Unity Rendering Principles (10) Unity Basic Textures (Normal Textures and Normal Textures)

The original purpose of texturing is to use an image to control the appearance of the model. Using texture mapping technology, we can “glue” an image to the surface of the model and control the color of the model by texel (to distinguish from pixels).

When modeling, artists usually use texture expansion techniques in modeling software to store texture map coordinates on each vertex. Texture map coordinates define the 2D coordinates corresponding to the vertex in the texture. Usually, these coordinates are represented by a 2D variable (u, v), where u is the abscissa and v is the ordinate, so texture coordinates are also called uv coordinates.

Texture sizes can be various, for example 256 * 256, but uv coordinates are generally normalized to between [0,1]. It should be noted that the texture coordinates used for texture sampling are not necessarily in the range [0,1]. In fact, such texture coordinates that are not in the range [0,1] can be very useful. Closely related is the tile mode of the texture, which will determine how the rendering engine samples the texture when encountering texture coordinates that are not in the range [0,1]

Single grain

We usually use a texture instead of the diffuse color of the object. Let’s use a simple example with annotations to actually look at the mapping process of a single texture:

First, we declare some properties required for the texture.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
Shader "Unlit/SingleTexture"
{
Properties
{
_MainTex ("Texture", 2D) = "white" {}
_Color("Color Tint", Color) = (1,1,1,1)
_Specular("Specular", Color) = (1,1,1,1)
_Gloss("Gloss", Range(8.0, 256)) = 20
}
SubShader
{
Tags { "RenderType"="Opaque" }
LOD 100

Pass
{
Tags { "LightMode" = "UniversalForward" }

CGPROGRAM
#include "Lighting.cginc"
#pragma vertex vert
#pragma fragment frag

fixed4 _Color;
sampler2D _MainTex;
//Each texture has a corresponding set of implicit properties, where {texture name} _ST represents the zoom and offset of the modified texture
float4 _MainTex_ST;
fixed4 _Specular;
float _Gloss;

struct a2v
{
//The reason why this is float4 is because xyzw is a homogeneous coordinate, w is 1 for point coordinates, and 0 is a vector
float4 vertex : POSITION;
float3 normal : NORMAL;
float4 texcoord : TEXCOORD0;
};

//The semantic role of the slice element shader can be seen: https://docs.unity3d.com/cn/2019.4/Manual/SL-ShaderSemantics.html
struct v2f
{
float4 pos : SV_POSITION;
float3 worldNormal : TEXCOORD0;
float3 worldPos : TEXCOORD1;
float2 uv : TEXCOORD2;
};

v2f vert(a2v v)
{
v2f o;

//Get the coordinates of the vertex in the clipping space
o.pos = UnityObjectToClipPos(v.vertex);
//Get the direction of the normal in the world coordinate system, not normalized
o.worldNormal = UnityObjectToWorldNormal(v.normal);
//Get the vertex coordinates in the world coordinate system
o.worldPos = mul(unity_ObjectToWorld, v.vertex).xyz;
//Get the sampled texture coordinates of the point
o.uv = v.texcoord.xy * _MainTex_ST.xy * _MainTex_ST.zw;

return o;
}

fixed4 frag(v2f i) : SV_Target
{
//normalize the direction of the normal under the world coordinate system
fixed3 worldNormal = normalize(i.worldNormal);
//Get and normalize the direction of the point to the light source in the world coordinate system
fixed3 worldLightDir = normalize(UnityWorldSpaceLightDir(i.worldPos));

//Sample the texture and mix it with our custom colors to get the reflected color
fixed3 albedo = tex2D(_MainTex, i.uv).rgb * _Color.rgb;

//Get the ambient light direction and combine the reflected color
fixed3 ambient = UNITY_LIGHTMODEL_AMBIENT.xyz * albedo;

//get diffuse light
fixed3 diffuse = _LightColor0.rgb * albedo * max(0, dot(worldNormal, worldLightDir));

//Get and normalize the viewing direction of the point to the camera in world coordinates
fixed3 viewDir = normalize(UnityWorldSpaceViewDir(i.worldPos));
fixed3 halfDir = normalize(worldLightDir + viewDir);
//get specular reflection
fixed3 specular = _LightColor0.rgb * _Specular.rgb * pow(max(0, dot(worldNormal, halfDir)), _Gloss);

return fixed4(ambient + diffuse + specular, 1.0);
}
ENDCG
}
}

Fallback "Specular"
}

Properties of Unity Textures

After we import the image into Unity, there are many properties to choose from.

Let’s pick one of the more critical ones and talk about it:

  • Texture type. There are Texture, Normal Map and CubeMap available, we are currently using Texture
  • Wrap Mode. Determines how to tile when the texture coordinates exceed the range of [0,1]. There are two options, one is Repeat. In this mode, if the texture coordinates exceed 1, then its integer part will be discarded, and the fractional part will be used directly Sampling, the result is that the texture will be repeated continuously (that is, if it is greater than 1, then subtract one by ten, if it is still greater than 1, then continue to subtract one by ten), the other mode is Clamp, if the texture coordinates greater than 1, then select 1, if less than 0, intercept to 0
  • Filter Mode. Determines which filtering method will be taken when the transformation is stretched. There are three options, Point, Billinear and Trilinear. The filtering effect is improved once, but the consumption also increases in turn (Trillinear needs to use the result of Minmap. If minmap is not enabled, then the results of Trillnear and Billinear are the same)
  • Minmap. If a texture is shrunk, multiple pixels in the original texture will correspond to one target pixel. This time the problem is more complicated because we often need to deal with anti-aliasing problems. One way is to use multi-level asymptotic texture (mipmapping), which uses filtering techniques in advance to get many smaller images, forming an image pyramid. Each layer is the result of sampling the previous layer, so that when running in real time, You can quickly get the resulting pixels, which is a typical space-for-time approach.
  • Read/Write Enable. If you want to read the texture in code at runtime, you need to enable this option, but remember that once enabled, the memory occupied by the texture will double

Concave-convex mapping

Another common application of textures is bump mapping. The purpose of bump mapping is to use a texture to modify the normals of the surface of the model in order to provide more detail to the model. This method does not change the position of the fixed point of the model, but makes the model look uneven, but can see flaws in the contour of the model.

There are two main methods that can be used for bump mapping: one method is to use a height map to simulate the surface displacement and get a modified normal value, this method is also called Height mapping; the other method is to use a normal texture to directly store the surface normal value, this method is called normal mapping.

Height texture

The bump mapping is implemented using a height map. The intensity value is stored in the height map, which is used to represent the altitude of the local surface of the model. The shallower the color, the more convex the surface at that location is, and the darker the color, the more concave the location is. The advantage of this method is that it is very intuitive, but the disadvantage is that the calculation is more complicated. The surface normal cannot be directly obtained in real-time calculation, but needs to be calculated from the canary release value of the pixel, so it requires more performance.

Height maps are often used together with normal maps to give additional information about surface irregularities. That is, we usually use normal maps to modify lighting.

Normal texture

What is stored in the normal texture is the normal direction of the surface. Since the component range of the discovery direction is between [-1, 1], and the component range of the pixel is [0, 1], we need to make a mapping, the commonly used mapping is

pixel=normal+12pixel = \frac{normal + 1}{2}

This requires that after we sample the normal texture in the Shader, we also need to carry out a mapping process on the result to obtain the original normal direction. The process of reflection is actually the inverse function of the above mapping function

normal=pixel21normal = pixel * 2 -1

Since the direction is relative to the coordinate space, in which coordinate space is the normal direction stored in the normal texture? For the normals built into the model, it is defined in the model space, so a straightforward idea is to store the surface normals in the modified model space in a texture, which is called the normal texture of the model space (object-space normal map). However, in the actual production process, we often use another coordinate space, that is, the tangent space of the fixed point of the model, to store the normals. For each vertex of the model, it has its own tangent space. The origin of this tangent space is the vertex itself, and the z-axis is the normal direction of the vertex, the x-axis is the tangent direction of the vertex, and the y-axis consists of x and z cross product, also known as a sub-tangent or sub-normal, this texture is called a tangent-space normal map

The normal texture in the model space looks colorful, because the coordinate space where all normals are located is the model space, and the normal direction stored at each point is different, some are (0, 1, 0), after mapping After storing in the texture, it corresponds to RGB (0.5, 1.0.5) light green, and some are (0, -1, 0), after mapping and storing in the texture, it corresponds to (0.5, 0, 0.5) purple. The normal texture in tangent space looks almost all light blue, because the coordinate space where each normal is located is different, that is, each vertex’s own tangent space. This normal texture actually stores the normal disturbance direction of each point in their respective tangent space. That is to say, if the normal direction of a point does not change, then in its tangent space, the new normal direction is the z-axis direction, that is, (0,0,1), which corresponds to light blue after mapping and storing in the texture.

Comparison of normal textures in different spaces

In general, the normal texture in the model space is more in line with people’s intuitive cognition, and the normal texture itself is also intuitive and easy to adjust, and different normal directions represent different colors. But artists prefer normal textures in tangent space, why is this?

In fact, the normal itself can be stored in any coordinate system, we can even store it in world space, but the problem is that we don’t want to simply get the normal, the subsequent illumination calculation is our purpose. And choosing that coordinate system means that we need to convert different information into the corresponding coordinate system. If tangent space is selected, we need to convert the normal direction obtained from the normal texture from tangent space to world space

In general, the advantages of using normals to model spaces are as follows;

  • Simple to implement and more intuitive. We don’t even need the original normal and tangent information of the model, that is, there is less calculation. Generation is also simple, and if you want to generate normal textures in tangent space, since the tangents of the model are generally in the same direction as uv, to get a better normal mapping requires that the texture mapping is also continuous.
  • At the stitches of texture coordinates and sharp corners, there are fewer visible mutations, which can provide a smooth boundary. This is because the normal texture in the model space stores the normal information in the same coordinate system, so the normal obtained by the difference at the boundary can be smoothly transformed. The normal information in the normal texture in tangent space is the result of relying on the direction of the texture coordinates, and may be more visible signs of stitching at the edges or sharp parts

But tangent space has more advantages:

  • High degree of freedom. The normal texture in model space records absolute normal information and can only be used for the model when it was created, while applying it to other models means completely wrong. The normal texture in tangent space records relative normal information, which means. Even if the texture is applied to a completely different mesh, a reasonable result can be obtained.
  • Can do uv animation. For example, we can move the uv coordinates of a texture to achieve a bump movement effect, but using the normal texture in model space will get completely wrong results, for the same reason. This uv animation is often used on objects of the type of water and volcanic rocks.
  • Reusable normal textures. For example, for a brick, we can use only one normal texture for all 6 faces.
  • Compressible. Since the z-direction of the normal in a normal texture in tangent space is always positive, we can store only the xy direction and push to get the z-direction.

Practice

We need to unify the coordinate space where each direction vector is located in the computational illumination model. Since the normal stored in the normal texture is the direction under tangent space, we usually have two options: one is to perform illumination calculation in tangent space, at this time we need to transform the illumination direction and viewing angle direction to tangent space; Another option is to perform illumination calculation in world space. At this time, we need to transform the normal direction obtained by sampling to world space, and then calculate the illumination direction and viewing angle direction in world space.

In terms of efficiency, the first method is often better than the second method, because we can complete the transformation of the lighting direction and viewing angle direction in the vertex shader, while the second method needs to sample the normal texture first, so the transformation process must be implemented in the chip element shader, which means we have to perform a matrix operation in the chip element shader. But from a general point of view, the second method is better than the first method, because sometimes we need to perform some calculations in world space. For example, when using Cubemap for environment mapping, we need to sample Cubemap using the reflection direction in world space

Calculate in tangent space

The basic idea is to obtain the normal line in tangent space through texture sampling in the slice element shader, and then calculate with the viewing angle direction and illumination direction in tangent space to obtain the final illumination result.

To this end, we first need to transform the viewing angle direction and illumination direction from the model space to the tangent space in the vertex shader, that is, we need to know the transformation matrix from the model space to the tangent space, the Inverse Matrix of this transformation matrix, that is, from the tangent The transformation matrix from line space to model space is very easy to find. We can obtain it in the order of tangent (x axis), sub-tangent (y axis), and normal (z axis) in the vertex shader. And if there is only translation and rotation in a transformation, then the Inverse Matrix of this transformation is equal to its transpose matrix, so the transformation matrix from model space to tangent space is the transpose matrix from tangent space to model space transformation matrix, that is, we put The tangent (x-axis), the sub-tangent (y-axis), and the normal (z-axis) are arranged in rows

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
Shader "Unlit/NormalMapTangentSpace"
{
Properties
{
_Color("Color Tint", Color) = (1,1,1,1)
_MainTex("Main Tex", 2D) = "white" {}
_BumpMap("Normal Map", 2D) = "bump" {}
_BumpScale("Bump Scale", Float) = 1.0
_Specular("Specular", Color) = (1,1,1,1)
_Gloss("Gloss", Range(8.0, 256)) = 20
}
SubShader
{
Tags {"RenderType" = "Opaque"}
LOD 100

Pass
{
Tags{"LightMode" = "UniversalForward"}
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "Lighting.cginc"

fixed4 _Color;
sampler2D _MainTex;
float4 _MainTex_ST;
sampler2D _BumpMap;
float4 _BumpMap_ST;
float _BumpScale;
fixed4 _Specular;
float _Gloss;

struct a2v
{
float4 vertex: POSITION;
float3 normal: NORMAL;
float4 tangent: TANGENT;
float4 texcoord: TEXCOORD0;
};

struct v2f
{
float4 pos: SV_POSITION;
float4 uv: TEXCOORD0;
float3 lightDir: TEXCOORD1;
float3 viewDir: TEXCOORD2;
};

v2f vert(a2v v)
{
v2f o;
o.pos = UnityObjectToClipPos(v.vertex);
o.uv.xy = v.texcoord.xy * _MainTex_ST.xy * _MainTex_ST.zw;
o.uv.zw = v.texcoord.xy * _BumpMap_ST.xy * _BumpMap_ST.zw;

TANGENT_SPACE_ROTATION;
//TANGENT_SPACE_ROTATION macro, which is equivalent to embedding the following two lines of code:
//
//    float3 binormal = cross( v.normal, v.tangent.xyz ) * v.tangent.w;
//float3x3 rotation = float3x3 (v.tangent.xyz, binormal, v. normal); Defines the rotation matrix that transforms the vector of the object space to the tangent space.


o.lightDir = mul(rotation, ObjSpaceLightDir(v.vertex)).xyz;
o.viewDir = mul(rotation, ObjSpaceViewDir(v.vertex)).xyz;
return o;
}

fixed4 frag(v2f i): SV_Target
{
fixed3 tangentLightDir = normalize(i.lightDir);
fixed3 tangentViewDir = normalize(i.viewDir);

fixed4 packedNormal = tex2D(_BumpMap, i.uv.zw);
fixed3 tangentNormal = UnpackNormal(packedNormal);
tangentNormal.xy *= _BumpScale;
tangentNormal.z = sqrt(1.0 - saturate(dot(tangentNormal.xy, tangentNormal.xy)));

fixed3 albedo = tex2D(_MainTex, i.uv).rgb * _Color.rgb;

fixed3 ambient = UNITY_LIGHTMODEL_AMBIENT.xyz * albedo;

fixed3 diffuse = _LightColor0.rgb * albedo * max(0, dot(tangentNormal, tangentLightDir));

fixed3 halfDir = normalize(tangentLightDir + tangentViewDir);

fixed3 specular = _LightColor0.rgb * _Specular.rgb * pow(max(0, dot(tangentNormal, halfDir)), _Gloss);

return fixed4(ambient + diffuse + specular, 1.0);
}
ENDCG
}
}

Fallback "Specular"
}

Computing in world space

We need to transform the normal direction from tangent space to world space in the slice element shader. The basic idea of this method is to calculate the transformation matrix from tangent space to world space in the vertex shader and pass it to the slice element shader. The calculation of the transformation matrix can be obtained from the representation of the tangent, sub-tangent and normal of the vertices in world space. Finally, we just need to transform the normal direction in the normal texture from tangent space to world space in the slice element shader.

Although this method requires more computation, we need to use this method in situations such as using Cubemap for environment mapping.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
Shader "Unlit/NormalMapWorldSpace"
{
Properties
{
_Color("Color Tint", Color) = (1,1,1,1)
_MainTex("Main Tex", 2D) = "white" {}
_BumpMap("Normal Map", 2D) = "bump" {}
_BumpScale("Bump Scale", Float) = 1.0
_Specular("Specular", Color) = (1,1,1,1)
_Gloss("Gloss", Range(8.0, 256)) = 20
}
SubShader
{
Tags {"RenderType" = "Opaque"}
LOD 100

Pass
{
Tags{"LightMode" = "UniversalForward"}
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "Lighting.cginc"

fixed4 _Color;
sampler2D _MainTex;
float4 _MainTex_ST;
sampler2D _BumpMap;
float4 _BumpMap_ST;
float _BumpScale;
fixed4 _Specular;
float _Gloss;

struct a2v
{
float4 vertex: POSITION;
float3 normal: NORMAL;
float4 tangent: TANGENT;
float4 texcoord: TEXCOORD0;
};

struct v2f
{
float4 pos: SV_POSITION;
float4 uv: TEXCOORD0;
float4 TtoW0: TEXCOORD0;
float4 TtoW1: TEXCOORD1;
float4 TtoW2: TEXCOORD2;
};

v2f vert(a2v v)
{
v2f o;
o.pos = UnityObjectToClipPos(v.vertex);
o.uv.xy = v.texcoord.xy * _MainTex_ST.xy * _MainTex_ST.zw;
o.uv.zw = v.texcoord.xy * _BumpMap_ST.xy * _BumpMap_ST.zw;

float3 worldPos = mul(unity_ObjectToWorld, v.vertex).xyz;
fixed3 worldNormal = UnityObjectToWorldNormal(v.normal);
fixed3 worldTangent = UnityObjectToWorldDir(v.tangent.xyz);
fixed3 worldBinormal = cross(worldNormal, worldTangent) * v.tangent.w;

o.TtoW0 = float4(worldTangent.x, worldBinormal.x, worldNormal.x, worldPos.x);
o.TtoW1 = float4(worldTangent.y, worldBinormal.y, worldNormal.y, worldPos.y);
o.TtoW2 = float4(worldTangent.z, worldBinormal.z, worldNormal.z, worldPos.z);

return o;
}

fixed4 frag(v2f i): SV_Target
{
float3 worldPos = float3(i.TtoW0.w, i.TtoW1.w, i.TtoW2.w);
fixed3 lightDir = normalize(UnityWorldSpaceLightDir(worldPos));
fixed3 viewDir = normalize(UnityWorldSpaceViewDir(worldPos));

fixed4 packedNormal = tex2D(_BumpMap, i.uv.zw);
fixed3 bump = UnpackNormal(packedNormal);
bump.xy *= _BumpScale;
bump.z = sqrt(1.0 - saturate(dot(bump.xy, bump.xy)));

bump = normalize(half3(dot(i.TtoW0.xyz, bump), dot(i.TtoW1.xyz, bump), dot(i.TtoW2.xyz, bump)));

fixed3 albedo = tex2D(_MainTex, i.uv).rgb * _Color.rgb;

fixed3 ambient = UNITY_LIGHTMODEL_AMBIENT.xyz * albedo;

fixed3 diffuse = _LightColor0.rgb * albedo * max(0, dot(bump, lightDir));

fixed3 halfDir = normalize(lightDir + viewDir);

fixed3 specular = _LightColor0.rgb * _Specular.rgb * pow(max(0, dot(bump, halfDir)), _Gloss);

return fixed4(ambient + diffuse + specular, 1.0);
}
ENDCG
}
}

Fallback "Specular"
}