Stroke of Unity3D Shader series

Table of contents

1 Introduction

Summarizing the implementation methods of the stroke effect, there are mainly the following:
①Normal extension+ZTest Always ②Normal
extension+Cull Front
③Normal extension+ZWrite Off ④Normal
extension+Stencil test ⑤Based
on the screen post-processing

2 How to extend vertices along the normal

The principle of normal extension is as follows: The
basic principle is still very simple: the model is rendered twice, the vertices of the model are extended along the normal direction in the first rendering, and then the stroke color is drawn, and the second rendering is as normal rendering That’s it. That is to say, the second rendering is used to cover the first rendering. Since there is no normal extension for the second time, only the middle part is covered, so as to realize the stroke.
This process is like a painter painting. For the same position, the color painted later is used to cover the color painted earlier.
So the main problem here is how to ensure that the second Pass must cover the first Pass. The following methods can be implemented, the general method ② and method ③ use a little more:
method ① the second pass enables ZTest Always
method ② the first pass uses the Cull Front
method ③ the first pass uses the ZWrite Off
method ④ uses the template test

2.1 Normal extension + ZTest Always

2.1.1 Code

To ensure that the second Pass can cover the first Pass, the easiest way is to let the depth test pass all the time, that is, use ZTest Always. But using ZTest Always has a lot of problems, we will discuss it in detail in the next section.

The pseudo code is as follows:

// render with stroke color first
Pass
{
    ...
    // vertex shader: vertices are extended along the normal 
    v2f vert ( appdata v )
    {
        v2f o;
        v.vertex.xy += normalize(v.normal) * _OutlineWidth;
        o.vertex = UnityObjectToClipPos(v.vertex);
        return o;
    }

    // Fragment shader: draw stroke color directly 
    fixed4 frag ( v2f i ) : SV_Target
    {
        return _OutlineColor;
    }
}

// re-render normally
Pass
{
    // Ensure that this Pass will be rendered
    ZTest Always
    // ...
}

The complete code is as follows:

Shader "LaoWang/Outline_Example01"
{
    Properties
    {
        _MainTex ("Texture", 2D) = "white" {}
        _OutlineWidth ("Outline width", Range(0.01, 4)) = 0.01
        _OutlineColor ("Outline Color", color) = (1.0, 1.0, 1.0, 1.0)
    }
    SubShader
    {
        Tags { "RenderType"="Opaque" "Queue"="Geometry"}
        LOD 100

        Pass
        {
            CGPROGRAM

            #pragma vertex vert
            #pragma fragment frag

            #include "UnityCG.cginc"

            struct appdata
            {
                float4 vertex : POSITION;
                float3 normal : NORMAL;
            };

            struct v2f
            {
                float4 vertex : SV_POSITION;
            };

            float _OutlineWidth;
            fixed4 _OutlineColor;

            v2f vert (appdata v)
            {
                v2f o;
                v.vertex.xy += normalize(v.normal) * _OutlineWidth;
                o.vertex = UnityObjectToClipPos(v.vertex);
                return o;
            }

            fixed4 frag (v2f i) : SV_Target
            {
                return _OutlineColor;
            }

            ENDCG
        }

        Pass
        {
            ZTest Always
            CGPROGRAM
            #pragma vertex vert
            #pragma fragment frag

            #include "UnityCG.cginc"
            #include "Lighting.cginc"

            struct appdata
            {
                float4 vertex : POSITION;
                float2 uv : TEXCOORD0;
            };

            struct v2f
            {
                float2 uv : TEXCOORD0;
                float4 vertex : SV_POSITION;
            };

            sampler2D _MainTex;
            float4 _MainTex_ST;

            v2f vert (appdata v)
            {
                v2f o;
                o.vertex = UnityObjectToClipPos(v.vertex);
                o.uv = TRANSFORM_TEX(v.uv, _MainTex);
                return o;
            }

            fixed4 frag (v2f i) : SV_Target
            {
                fixed4 color = tex2D(_MainTex, i.uv);
                return fixed4(color.rgb, 1.0);
            }
            ENDCG
        }
    }
}

2.1.2 Problems

Since the second Pass uses ZTest Always, it will cause two problems.
①The model will penetrate itself
but we will find that only part of the mesh will penetrate itself. Why does only part of the mesh penetrate? I didn’t figure it out either.
When the GPU draws a model, how is the rendering order of each triangle in the same model controlled? I hope that students who know can answer this, thank you.
②The object will always come to the front
So basically no one uses this method.

2.2 Normal extension + Cull Front

2.2.1 Code

The principle is similar to the previous section, except that instead of using ZTest Always to ensure that the second Pass covers the first Pass, Cull Front is used in the first Pass, that is, the first Pass only renders the back of the model, and then lets Expand the back side outward, because the back side is generally behind the front side (that is, the depth value of the back side is greater than the depth value of the front side), so the second Pass will cover the middle part.

Shader "LaoWang/Outline_CullFront"
{
    Properties
    {
        _MainTex ("Texture", 2D) = "white" {}
        _OutlineWidth ("Outline width", Range(0.01, 4)) = 0.01
        _OutlineColor ("Outline Color", color) = (1.0, 1.0, 1.0, 1.0)
    }
    SubShader
    {
        Tags { "RenderType"="Opaque" "Queue"="Geometry"}
        LOD 100

        Pass
        {
            Cull Front
            CGPROGRAM

            #pragma vertex vert
            #pragma fragment frag

            #include "UnityCG.cginc"

            struct appdata
            {
                float4 vertex : POSITION;
                float3 normal : NORMAL;
            };

            struct v2f
            {
                float4 vertex : SV_POSITION;
            };

            float _OutlineWidth;
            fixed4 _OutlineColor;

            v2f vert (appdata v)
            {
                v2f o;
                v.vertex.xy += normalize(v.normal) * _OutlineWidth;
                o.vertex = UnityObjectToClipPos(v.vertex);
                return o;
            }

            fixed4 frag (v2f i) : SV_Target
            {
                return _OutlineColor;
            }

            ENDCG
        }

        Pass
        {
            CGPROGRAM
            #pragma vertex vert
            #pragma fragment frag

            #include "UnityCG.cginc"
            #include "Lighting.cginc"

            struct appdata
            {
                float4 vertex : POSITION;
                float2 uv : TEXCOORD0;
            };

            struct v2f
            {
                float2 uv : TEXCOORD0;
                float4 vertex : SV_POSITION;
            };

            sampler2D _MainTex;
            float4 _MainTex_ST;

            v2f vert (appdata v)
            {
                v2f o;
                o.vertex = UnityObjectToClipPos(v.vertex);
                o.uv = TRANSFORM_TEX(v.uv, _MainTex);
                return o;
            }

            fixed4 frag (v2f i) : SV_Target
            {
                fixed4 color = tex2D(_MainTex, i.uv);
                return fixed4(color.rgb, 1.0);
            }
            ENDCG
        }
    }
}

The effect is this.
Although the Robot Kyle model does not work well in this way, it is still sufficient for most other models.
Normal strokes use this method.

2.2.2 Improvement points

① No matter how far the camera is from the object or the viewing angle changes, keep the width of the stroke in the same proportion.
As shown.
The reason for this problem is that we extend the vertices in the model space, and the distance of the extension is the same. However, due to the perspective camera, the stroke effect on the model near the camera is thicker, and the stroke effect in the far place is thinner.
The solution to this problem is that instead of extending the model space, we extend the vertices along the normal direction in the homogeneous clip space.
The biggest problem with this method is how to find the normal direction in the homogeneous clipping space?
The transformation matrix for transforming vertices from model space to homogeneous clipping space is MVP. Can the transformation of normals directly use the MVP matrix? The answer is no. The transformation of the normal should be the inverse transpose matrix of the transformation matrix, that is, we will use (MVP) -1 T for the normal transformation here.
Why can’t the normal transformation use the transformation matrix directly, but use the inverse transpose matrix? Mainly to ensure that when there is non-proportional scaling, the normal direction after transformation is still perpendicular to the surface. If there is no non-proportional scaling, that is, there is only rotation, then the transformation of the normal can directly use the transformation matrix. (For a detailed description, please refer to “Unity [Shader] Introduction Essentials” Section 4.7 Normal Transformation)
Is the MVP matrix only rotated? no. The P matrix, that is, the transformation matrix from the observation space to the homogeneous clipping space, must have non-proportional scaling. So, we need to use the inverse transpose matrix of MVP here.
Back to the original question, how to find the inverse transposition matrix of MVP? Unfortunately, Unity’s Shader does not directly provide corresponding variables. To really get this inverse transposed matrix, it needs to be calculated from the C# side and then passed to the shader. But in fact, we don’t need that high precision, just approximation. There are two approximations.
One is to directly use the MVP matrix to approximate.

v2f vert (appdata v)
{
    o.vertex = UnityObjectToClipPos(v.vertex);
    float3 clipNormal = mul((float3x3) UNITY_MATRIX_VP, mul((float3x3) UNITY_MATRIX_M, v.normal));
    o.vertex.xy += normalize(clipNormal).xy * _OutlineWidth;
}

The second is to use the inverse transpose matrix of (MV) * ​​P to approximate. Why use the inverse transposition matrix of MV, because Unity just provides this variable, UNITY_MATRIX_IT_MV.

v2f vert (appdata v)
{
    o.vertex = UnityObjectToClipPos(v.vertex);
    float3 viewNormal = mul((float3x3)UNITY_MATRIX_IT_MV, v.normal);
    float2 clipNormal = mul((float2x2)UNITY_MATRIX_P, viewNormal.xy);
    o.vertex.xy += normalize(clipNormal) * _OutlineWidth;
}

The effects are compared as follows, and it can be seen that the effects of the above two methods are actually similar.

2.3 Normal extension + ZWrite Off

2.3.1 Code

The logic is also very simple. Since the first Pass turns off deep writing, the second Pass must pass the depth test, so the second Pass will overwrite the first Pass.

Shader "LaoWang/Outline_ZWriteOff"
{
    Properties
    {
        _MainTex ("Texture", 2D) = "white" {}
        _OutlineWidth ("Outline width", Range(0.01, 4)) = 0.01
        _OutlineColor ("Outline Color", color) = (1.0, 1.0, 1.0, 1.0)
    }
    SubShader
    {
        Tags { "RenderType"="Opaque" "Queue"="Geometry"}
        LOD 100

        Pass
        {
            ZWrite Off

            CGPROGRAM

            #pragma vertex vert
            #pragma fragment frag

            #include "UnityCG.cginc"

            struct appdata
            {
                float4 vertex : POSITION;
                float3 normal : NORMAL;
            };

            struct v2f
            {
                float4 vertex : SV_POSITION;
            };

            float _OutlineWidth;
            fixed4 _OutlineColor;

            v2f vert (appdata v)
            {
                v2f o;
                //v.vertex.xy += normalize(v.normal) * _OutlineWidth;
                //o.vertex = UnityObjectToClipPos(v.vertex);

                o.vertex = UnityObjectToClipPos(v.vertex);
                float3 clipNormal = mul((float3x3) UNITY_MATRIX_VP, mul((float3x3) UNITY_MATRIX_M, v.normal));
                o.vertex.xy += normalize(clipNormal).xy * _OutlineWidth;

                //o.vertex = UnityObjectToClipPos(v.vertex);
                //float3 viewNormal = mul((float3x3)UNITY_MATRIX_IT_MV, v.normal);
                //float2 clipNormal = mul((float2x2)UNITY_MATRIX_P, viewNormal.xy);
                //o.vertex.xy += normalize(clipNormal) * _OutlineWidth;
                return o;
            }

            fixed4 frag (v2f i) : SV_Target
            {
                return _OutlineColor;
            }

            ENDCG
        }

        Pass
        {
            CGPROGRAM
            #pragma vertex vert
            #pragma fragment frag

            #include "UnityCG.cginc"
            #include "Lighting.cginc"

            struct appdata
            {
                float4 vertex : POSITION;
                float2 uv : TEXCOORD0;
            };

            struct v2f
            {
                float2 uv : TEXCOORD0;
                float4 vertex : SV_POSITION;
            };

            sampler2D _MainTex;
            float4 _MainTex_ST;

            v2f vert (appdata v)
            {
                v2f o;
                o.vertex = UnityObjectToClipPos(v.vertex);
                o.uv = TRANSFORM_TEX(v.uv, _MainTex);
                return o;
            }

            fixed4 frag (v2f i) : SV_Target
            {
                fixed4 color = tex2D(_MainTex, i.uv);
                return fixed4(color.rgb, 1.0);
            }
            ENDCG
        }
    }
}

The effect is as follows, you can see that the Robot Kyle model has the best effect in this way.

2.3.2 Problems

There are two problems with ZWrite Off turned off. details as follows.
We add a Ground to the scene and create a new standard shader.
Then we will find that the part of the stroke with the floor disappears.
We can see from the Frame Debugger that the floor was drawn last. When drawing the stroke, depth writing is not enabled, which causes the depth test of the floor to pass. So the color of the floor will overwrite the color of the stroke.
To solve this problem, it is actually very simple, and finally render our model. How to control the final rendering of our model? Of course, it is to control the rendering queue. We have talked about this in ” [Unity3D Shader Series of Perspective Effects XRay] “, so I won’t say more.
We set the render queue to “Geometry+1”, which means that our model is rendered after all opaque objects have been rendered.
After adjustment, the stroke effect is normal.
But there are still problems after this adjustment. For example, we copy another stroke model, and then one is behind the previous one. At this point, we will find that the overlapping part of the two models has no stroke.
Again, let’s go to the Frame Debugger to see why.
As we can see from the above figure, the front object is drawn first, and then the back object is drawn, which causes the stroke of the front object to be overwritten when the back object is drawn.
To solve this problem, we have to have a reserve knowledge: When Unity renders opaque objects, if the rendering queues of these two objects are the same (the value of Render Queue is the same), they are rendered in order from near to far from the camera. When rendering translucent objects, if the rendering queues of the two objects are the same (the value of Render Queue is the same), they are rendered in order from far to near to the camera.
Why do this? For opaque objects, render the near ones first and then the far ones. Due to the hardware’s Early-Z and other technologies, Over Draw can be reduced. For translucent objects, since depth writing needs to be turned off, the far ones must be rendered before the near ones, so as to ensure that the mixed color is correct.
Therefore, if we want to make the overlapping part of the two models also draw the stroke effect, we have to render the latter and then the front, and then change the rendering queue to Transparent. But this method is not completely able to solve the problem, because the distance from the camera given above is actually very vague. Which value is this distance? Is it the distance between the world coordinate of the object and the camera, or the distance between a vertex of the object and the camera? Unity officials did not give an explanation.

2.4 Normal extension + template test

First render the object normally and write the stencil buffer to 1. Then the normal extension is extended to stroke, and the stroke is drawn when the stencil buffer value is 0.
The pseudo-code.

Pass 
{ 
    //  Write the stencil buffer as 1 
    Stencil 
    { 
        Ref  1 
        Comp  Always 
        Pass  Replace 
    }

    //  render normally 
    ... 
}

Pass
{
    Stencil
    {
        Ref 0
        Comp Equal
    }
    ZWrite Off

    //  Rendering strokes 
    //  Vertex shader normal extension 
    ... 
}

full code.

Shader "LaoWang/Outline_StencilTest"
{
    Properties
    {
        _MainTex ("Texture", 2D) = "white" {}
        _OutlineWidth ("Outline width", Range(0.01, 4)) = 0.01
        _OutlineColor ("Outline Color", color) = (1.0, 1.0, 1.0, 1.0)
    }
    SubShader
    {
        Tags { "RenderType"="Opaque" "Queue"="Geometry+1"}
        LOD 100

        Pass
        {
            Stencil
            {
                Ref 1
                Comp Always
                Pass Replace
            }

            CGPROGRAM
            #pragma vertex vert
            #pragma fragment frag

            #include "UnityCG.cginc"
            #include "Lighting.cginc"

            struct appdata
            {
                float4 vertex : POSITION;
                float2 uv : TEXCOORD0;
            };

            struct v2f
            {
                float2 uv : TEXCOORD0;
                float4 vertex : SV_POSITION;
            };

            sampler2D _MainTex;
            float4 _MainTex_ST;

            v2f vert (appdata v)
            {
                v2f o;
                o.vertex = UnityObjectToClipPos(v.vertex);
                o.uv = TRANSFORM_TEX(v.uv, _MainTex);
                return o;
            }

            fixed4 frag (v2f i) : SV_Target
            {
                fixed4 color = tex2D(_MainTex, i.uv);
                return fixed4(color.rgb, 1.0);
            }
            ENDCG
        }

        Pass
        {
            Stencil
            {
                Ref 0
                Comp Equal
            }

            ZWrite  Off

            CGPROGRAM

            #pragma vertex vert
            #pragma fragment frag

            #include "UnityCG.cginc"

            struct appdata
            {
                float4 vertex : POSITION;
                float3 normal : NORMAL;
            };

            struct v2f
            {
                float4 vertex : SV_POSITION;
            };

            float _OutlineWidth;
            fixed4 _OutlineColor;

            v2f vert (appdata v)
            {
                v2f o;
                //v.vertex.xy += normalize(v.normal) * _OutlineWidth;
                //o.vertex = UnityObjectToClipPos(v.vertex);

                o.vertex = UnityObjectToClipPos(v.vertex);
                float3 clipNormal = mul((float3x3) UNITY_MATRIX_VP, mul((float3x3) UNITY_MATRIX_M, v.normal));
                o.vertex.xy += normalize(clipNormal).xy * _OutlineWidth;

                //o.vertex = UnityObjectToClipPos(v.vertex);
                //float3 viewNormal = mul((float3x3)UNITY_MATRIX_IT_MV, v.normal);
                //float2 clipNormal = mul((float2x2)UNITY_MATRIX_P, viewNormal.xy);
                //o.vertex.xy += normalize(clipNormal) * _OutlineWidth;
                return o;
            }

            fixed4 frag (v2f i) : SV_Target
            {
                return _OutlineColor;
            }

            ENDCG
        }
    }
}

The effect is as follows.
In this way, there will also be a problem that the overlapping parts of the two models have no strokes, but due to the use of template testing, this problem is unsolvable.

2.5 The problem of normal line extension to realize stroke

The use of normal extension to achieve strokes has the following problem, that is, when the normals are not continuous, the strokes will be interrupted.
To solve this problem, you need to write a tool to smooth the vertex’s normal, save it in the vertex’s color data, and then use the smoothed normal to extrapolate.
You can refer to this article , which implements the normal smoothing tool.

3 Ways of screen post-processing

There are generally two ways to use screen post-processing to achieve strokes, one is to use the shader replacement technology of Camera in Unity, and the other is to use Render Command.

3.1 Shader replacement technology using Camera

I did not go to the specific implementation of this method, but to understand it, here is the general idea.

3.1.1 Shader Replacement Technology

What is Camera’s shader replacement technology?
Speaking of tall, in fact, the essence is not complicated, that is to re-render the scene with the camera, but in the process of this rendering, the objects in the scene (not necessarily all objects, we can use code to control only a certain part of the object to be rendered) ) no longer uses its own Shader for shading, but uses a specific Shader (all objects to be rendered are shaded with the same Shader).
In terms of code, it is the two methods in the Camera class below.

public void RenderWithShader(Shader shader, string replacementTag);
public void SetReplacementShader(Shader shader, string replacementTag);

RenderWithShader is only valid for the frame when it was called.
After SetReplacementShader is called, Camera rendering will always use the specified Shader until the code actively calls the ResetReplacementShader method.

public void ResetReplacementShader();

Talk about the parameters of the two methods.
The first parameter is the Shader that the camera will use when rendering.
The second parameter is the label that the camera looks for, generally specified as RenderType.
For example, I believe everyone will understand at a glance.
We call it like this.

Camera.main.SetReplacementShader(Shader.Find("LaoWang/CameraReplace"), "RenderType");

The corresponding Shader code is as follows:

Shader "LaoWang/CameraReplace"
{
    SubShader
    {
        // Use this Pass instead of 
        Tags when rendering opaque objects { "RenderType" = "Opaque" }

        Pass
        {
            // ...
        }
    }

    SubShader
    {
        // Use this Pass instead of 
        Tags when rendering translucent objects { "RenderType" = "Transparent" }

        Pass
        {
            // ...
        }
    }
}

Then our main camera will traverse all objects in the scene when rendering. If the RenderType label of the Shader used by the object is Opaque, then the camera will use the first Pass in “LaoWang/CameraReplace” to render the object; if If the RenderType label of the Shader used by the object is Transparent, then the camera will use the second Pass in “LaoWang/CameraReplace” to render the object; if the RenderType label of the Shader used by the object is neither Opaque nor Transparent, the object will won’t render.
Does the searched label have to be specified as RenderType? no. Just because the shaders built into Unity have this tag, we generally specify it.
The camera output normal texture and depth texture in Unity actually use this technology. If one day you find that an object is not included when you use the camera to output the depth texture, it is likely that the RenderType tag of the Shader of that object is not set or Not set correctly.

3.1.2 Stroke ideas

①Create an extra camera, use Camera.CopyFrom to copy the parameters of the main camera, and set the position and rotation to be the same ②Set
the Culling Mask of the extra camera ③Create
a Renderer Texture, assuming the name is rt, its length and width can be set to the screen However, a texture with such a large screen size takes up a lot of memory. If it is unnecessary, it is recommended to use a smaller resolution.
④ Set the Target Texture of the extra camera to the Renderer Texture created in step ③
⑤ Create a script that inherits from MonoBehaviour and implements OnRenderImage( RenderTexture source, RenderTexture destination) method, and mount it on the main camera
⑥ Create a new Shader for extra camera rendering, set the RenderType label to Opaque, and only output solid color in fragment shading; this Shader is used for extra camera shader replacement
⑦ Extra The camera calls SetReplacementShader, the first parameter is the Shader created in step ⑥, and the second parameter is RenderType

SetReplacementShader(Shader.Find(""), "RenderType");

⑧In the OnRenerImage method, first perform Gaussian blur on rt. Gaussian blurring will expand the outline of the object seen by the extra camera. The Gaussian blurred image is then compared with rt to obtain a stroke, and then compared with the image seen by the main camera. screen overlay

3.1.3 Problems

There are several problems with this approach. First, an additional camera needs to be created and managed. Camera itself is a relatively heavy object in Unity3D scene management. Behind it, it should also involve a series of complex visual frustum cutting, sorting, etc. operation, which is too wasteful of computing resources for operations that only need to draw a few simple objects. In addition, the object that needs to be drawn needs to have a separate layer. If it has been assigned a layer by other objects of the same type, it is not very convenient to operate. The first step of the final rendering is separated from the last steps, since the final output needs to be on the main camera, which means there are some scripts that need to be maintained on both cameras.

3.2 Render Command

3.2.1 Ideas

The idea of ​​​​using Render Command to achieve strokes is the same as that of using the camera’s shader replacement technology, but instead of using an additional camera to render, the Render Command is directly used to handle this step of additional rendering.

3.2.2 Command Buffer

Command Render is mainly implemented by Command Buffer, which encapsulates the APIs of the underlying rendering interfaces such as [OpenGL] \DirectX, and a series of rendering instructions are predefined in it, which is very convenient for us to use.
Official documentation , detailed API .
Explain the code we use for the stroke.
First instantiate a CommandBuffer, set the name to “Render Outline”, the name is set here mainly to facilitate us to locate the corresponding rendering process in the Frame Debugger.

m_RenderCommand = new CommandBuffer
{
    name = "Render Outline"
};

Create a shader for rendering to the CommandBuffer.

m_OutlineMaterial = new Material(Shader.Find(OutlineShader));

Then first clear the background screen of the CommandBuffer.
Use DrawRenderer to add objects that need additional rendering to the CommandBuffer’s queue.
The following sample code indicates that when drawing several objects in the CommandBuffer, the material used is the first Pass in m_OutlineMaterial (the second 0 in the parameter).

// Add rendering tasks to RenderCommand sequentially 
m_RenderCommand.ClearRenderTarget( true , true , Color.clear);
 for ( int i = 0 ; i < OutlineObjects.Length; ++i)
{
    m_RenderCommand.DrawRenderer(OutlineObjects[i], m_OutlineMaterial, 0, 0);
}

Then create a render texture and use Graphics.SetRenderTarget to set the render target to the render texture you just created.
Then call Graphics.ExecuteCommandBuffer.
That is, rendering is performed according to the settings of CommandBuffer, and the result after rendering is the newly created rendering texture.

m_OutlineMaterial.SetColor("_OutlineColor", outlineColor);
RenderTexture outlineColorRt = RenderTexture.GetTemporary(Screen.width, Screen.height);
Graphics.SetRenderTarget(outlineColorRt);
Graphics.ExecuteCommandBuffer(m_RenderCommand);

When the CommandBuffer is no longer used, it must be released.

m_RenderCommand.Clear();

3.2.3 Complete code

PostEffectOutline.cs

using UnityEngine;
using UnityEngine.Rendering;

[DisallowMultipleComponent]
[RequireComponent(typeof(Camera))]
public class PostEffectOutline : MonoBehaviour
{
    private const string OutlineShader = "LaoWang/PostEffect/Outline";

    private Material m_OutlineMaterial;
    private CommandBuffer m_RenderCommand;
    public Renderer[] OutlineObjects;

    public Color outlineColor = Color.red;

    [ Range(1, 8) ]
     public  int downSampleScale = 2 ;                  // Downsampling scale 
    [ Range(0, 4) ]
     public  int blurIterations = 1 ;                   // Gaussian blur iterations 
    [ Range(0.2f, 3.0f) ]
     public  float blurSpread = 0.6f ;                  // Gaussian blur

    private void Awake()
    {
        m_RenderCommand = new CommandBuffer
        {
            name = "Render Outline"
        };

        m_OutlineMaterial = new Material(Shader.Find(OutlineShader));
    }

    void OnEnable()
    {
        // Add rendering tasks to RenderCommand sequentially 
        m_RenderCommand.ClearRenderTarget( true , true , Color.clear);
         for ( int i = 0 ; i < OutlineObjects.Length; ++i)
        {
            m_RenderCommand.DrawRenderer(OutlineObjects[i], m_OutlineMaterial, 0, 0);
        }
    }

    void OnDisable()
    {
        m_RenderCommand.Clear();
    }

    void OnDestroy()
    {
        m_RenderCommand.Clear();
    }

    private void OnRenderImage(RenderTexture source, RenderTexture destination)
    {
        //1. Draw color 
        m_OutlineMaterial.SetColor( "_OutlineColor" , outlineColor);
        RenderTexture outlineColorRt = RenderTexture.GetTemporary(Screen.width, Screen.height);
        Graphics.SetRenderTarget(outlineColorRt);
        Graphics.ExecuteCommandBuffer(m_RenderCommand);
        // for testing 
        //Graphics.Blit(outlineColorRt, destination); 
        //RenderTexture.ReleaseTemporary(outlineColorRt);

        //2. Downsampling 
        int rtW = Screen.width >> downSampleScale;
         int rtH = Screen.height >> downSampleScale;
        RenderTexture blurRt = RenderTexture.GetTemporary(rtW, rtH);
        blurRt.filterMode = FilterMode.Bilinear;
        Graphics.Blit(outlineColorRt, blurRt);

        //3. Gaussian blur
        RenderTexture blurTemp = RenderTexture.GetTemporary(rtW, rtH);
        for (int i = 0; i < blurIterations; ++i)
        {
            m_OutlineMaterial.SetFloat( "_BlurSize" , 1.0f + i * blurSpread);
             // Horizontal blur 
            Graphics.Blit(blurRt, blurTemp, m_OutlineMaterial, 1 );
             // Vertical blur 
            Graphics.Blit(blurTemp, blurRt, m_OutlineMaterial, 2 );
        }

        // for testing 
        //Graphics.Blit(blurRt, destination);

        m_OutlineMaterial.SetTexture("_OutlineColorTex", outlineColorRt);
        m_OutlineMaterial.SetTexture("_BlurTex", blurRt);
        Graphics.Blit(source, destination, m_OutlineMaterial, 3);

        RenderTexture.ReleaseTemporary(outlineColorRt);
        RenderTexture.ReleaseTemporary(blurRt);
        RenderTexture.ReleaseTemporary(blurTemp);
    }
}

PostEffect_Outline.shader

Shader "LaoWang/PostEffect/Outline"
{
    Properties
    {
        _OutlineColor ("Outline Color", color) = (1.0, 0, 0, 1.0)
        _MainTex ("Texture", 2D) = "white" {}
        _BlurSize ("Blur Size", float) = 1.0
    }
    SubShader
    {
        Tags { "RenderType" = "Opaque" }

        CGINCLUDE

        #include "UnityCG.cginc"

        fixed4 _OutlineColor;
        sampler2D _MainTex;
        half4 _MainTex_TexelSize;
        float _BlurSize;

        struct v2f
        {
            float4 pos : SV_POSITION;
            half2 uv[5] : TEXCOORD0;
        };

        v2f vertBlurVertical(appdata_img v)
        {
            v2f o;
            o.pos = UnityObjectToClipPos(v.vertex);

            half2 uv = v.texcoord;

            o.uv[0] = uv; 
            o.uv[1] = uv + float2(0.0, _MainTex_TexelSize.y * 1.0) * _BlurSize; 
            o.uv[2] = uv - float2(0.0, _MainTex_TexelSize.y * 1.0) * _BlurSize; 
            o.uv[3] = uv + float2(0.0, _MainTex_TexelSize.y * 2.0) * _BlurSize; 
            o.uv[4] = uv - float2(0.0, _MainTex_TexelSize.y * 2.0) * _BlurSize;

            return  o; 
        }

        v2f vertBlurHorizontal(appdata_img v)
        {
            v2f o;
            o.pos = UnityObjectToClipPos(v.vertex);

            half2 uv = v.texcoord;

            o.uv[0] = uv;
            o.uv[1] = uv + float2(_MainTex_TexelSize.x * 1.0, 0.0) * _BlurSize;
            o.uv[2] = uv - float2(_MainTex_TexelSize.x * 1.0, 0.0) * _BlurSize;
            o.uv[3] = uv + float2(_MainTex_TexelSize.x * 2.0, 0.0) * _BlurSize;
            o.uv[4] = uv - float2(_MainTex_TexelSize.x * 2.0, 0.0) * _BlurSize;

            return  o; 
        }

        fixed4 fragBlur(v2f i) : SV_Target
        {
            float weight[3] = {0.4026, 0.2442, 0.0545};
            fixed3 sum = tex2D(_MainTex, i.uv[0]).rgb * weight[0];

            for(int it = 1; it < 3; it++)
            {
                sum += tex2D(_MainTex, i.uv[it]).rgb * weight[it];
                sum += tex2D(_MainTex, i.uv[2*it]).rgb * weight[it];
            }

            return fixed4(sum, 1.0);
        }

        ENDCG

        ZTest Always 
        Cull Off 
        ZWrite Off

        pass
        {
            CGPROGRAM

            #pragma vertex vert
            #pragma fragment frag

            v2f_img vert (appdata_img v)
            {
                v2f_img o;
                o.pos = UnityObjectToClipPos(v.vertex);
                o.uv = v.texcoord;
                return o;
            }

            fixed4 frag (v2f_img i) : SV_Target
            {
                return _OutlineColor;
            }

            ENDCG
        }

        pass
        {
            NAME "GAUSSIAN_BLUR_VERTICAL"

            CGPROGRAM

            #pragma vertex vertBlurVertical
            #pragma fragment fragBlur

            ENDCG
        }

        pass
        {
            NAME "GAUSSIAN_BLUR_HORIZONTAL"

            CGPROGRAM

            #pragma vertex vertBlurHorizontal
            #pragma fragment fragBlur

            ENDCG
        }

        Pass
        {
            CGPROGRAM
            #pragma vertex vert
            #pragma fragment fragOutline

            sampler2D _BlurTex, _OutlineColorTex;
            half4 _BlurTex_TexelSize, _OutlineColorTex_TexelSize;

            v2f_img vert (appdata_img v)
            {
                v2f_img o;
                o.pos = UnityObjectToClipPos(v.vertex);
                o.uv = v.texcoord;
                return o;
            }

            fixed4 fragOutline(v2f_img i) : SV_Target
            {
                fixed4 scene = tex2D(_MainTex, i.uv);
                fixed4 blur = tex2D(_BlurTex, i.uv);
                fixed4 outlieColor = tex2D(_OutlineColorTex, i.uv);
                fixed4 outline = blur - outlieColor;

                fixed4 final = scene * (1 - all(outline.rgb)) + _OutlineColor * any(outline.rgb);
                return final;
            }

            ENDCG
        }
    }
    FallBack off
}

4 Complete project

The blogger’s personal blog link to this article.
Link: https://pan.baidu.com/s/1AhKWJxMaQI89vAwv8RE-vQ
Extraction code: xaaz

5 Reference articles

Leave a Comment

Your email address will not be published. Required fields are marked *