3D游戏引擎系列十

    xiaoxiao2021-03-26  25

    笔者介绍:姜雪伟,IT公司技术合伙人,IT高级讲师,社区专家,特邀编辑,畅销书作者,国家专利发明人;已出版书籍:《手把手教你架构3D游戏引擎》电子工业出版社和《Unity3D实战核心技术详解》电子工业出版社等。

    课程视频网址:http://edu.csdn.net/lecturer/144

    延迟渲染在PC端的游戏开发中使用的非常多,由于其效果非常好,也被很多开发者所喜欢,现在这些技术已经都放开了,作为开发者至少我们要知道其实现原理,代码网上也有很多,读者可在百度中搜索。在这里主要是给读者普及一下关于延时渲染的使用,我在自己研发的引擎中也是使用了该技术。Unity3D引擎也使用了该技术,但是由于其在移动端效率问题,一般不会用在项目开发中。

         延迟渲染也是图形学算法中的一种,它是针对场景渲染的,也称为后处理渲染,就是在原有场景渲染基础上再使用延迟算法对其再进行渲染处理,最后显示到屏幕上。下面我们就直奔主题了:

    Unity3D中,延迟渲染管线为分两个阶段进行,G-Buffer阶段和光照计算(Lighting)阶段。下面我们使用传统的方法,网上很多说的是四个阶段,我们

    的Deferred Lighting使用三个阶段,伪代码如下所示:

    第一步、 for each object { 填充G-Buffer } 第二步、 for each light { Lighting pass } 第三步、 for each object { 执行shading } 其中,第二步中Lighting pass非常重要,一遍一遍的敲公式非常麻烦,在这里用图片给读者展示一下Lighting Pass的计算公式,数学在游戏开发中占有重要

    占有重要地位,对于算法的实现,不是一朝一夕的事情,但是一定要坚持去下面就把Lighting Pass的计算公式给读者展示一下:

    Lighting pass在Deferred Lighting框架处于核心地位,在这里我打算先把lighting pass解析清楚。一旦lighting pass表达好了,G-Buffer所需要保存的信息,以及shading pass能得到的信息也都清楚了。

    基于物理的BRDF推出了渲染模型总公式:

    再有N个光源的情况下,每个像素的光照响应就是:

    对于Deferred shading来说,每一个shading pass就是执行一个

    而对于Deferred lighting来说,公式需要重新整理一下:

    由于cdiff是到最后的shading pass才计算,所以在每一个light pass里面,diffuse和specular必须分开才能保证结果正确:

    为了把diffuse和specular放入4个通道的buffer中,就只能牺牲specular的颜色,只剩下亮度,同时cspec也简化成一个标量。所以,lighting pass的计算成了:

    在上述公式中实现了Diffuse,Specular,lightning pass,最后就是对G-Buffer的处理,下面把处理Deferred延迟渲染的完整shader代码给读者展示一下:

    // Materices float4x4 g_matWorld : World; float4x4 g_matWorldView : WorldView; float4x4 g_matWorldViewProj : WorldViewProjection; float4x4 g_matProjInvert : ViewProjectionInverse; float4x4 g_matTextureProj : TextureProjection; // Light float4 g_LightAmbient; float4 g_LightColor; float4 g_LightPosition; float4 g_LightPositionViewSpace; float g_LightRadius; float g_OneOverSqrLightRadius; // Material float g_MaterialAmbient; float g_MaterialEmissive; float g_MaterialDiffuse; float g_MaterialSpecular; float g_MaterialShininess; // Textures texture g_TextureDiffuse; texture g_TextureCubeNormalization; // Render targets texture g_TextureSrcColor; texture g_TextureSrcPosition; texture g_TextureSrcNormal; texture g_TextureSrcMaterial; // --------------------------------------------------------------- // Samplers // --------------------------------------------------------------- sampler g_SamplerDiffuse = sampler_state { Texture = (g_TextureDiffuse); MinFilter = Linear; MagFilter = Linear; MipFilter = Linear; }; sampler g_SamplerCubeNormalization = sampler_state { Texture = (g_TextureCubeNormalization); MinFilter = Linear; MagFilter = Linear; MipFilter = None; AddressU = Clamp; AddressV = Clamp; }; sampler g_SamplerSrcPosition = sampler_state { Texture = (g_TextureSrcPosition); MinFilter = Point; MagFilter = Point; MipFilter = None; AddressU = Clamp; AddressV = Clamp; }; sampler g_SamplerSrcNormal = sampler_state { Texture = (g_TextureSrcNormal); MinFilter = Point; MagFilter = Point; MipFilter = None; AddressU = Clamp; AddressV = Clamp; }; sampler g_SamplerSrcColor = sampler_state { Texture = (g_TextureSrcColor); MinFilter = Point; MagFilter = Point; MipFilter = None; AddressU = Clamp; AddressV = Clamp; }; sampler g_SamplerSrcMaterial = sampler_state { Texture = (g_TextureSrcMaterial); MinFilter = Point; MagFilter = Point; MipFilter = None; AddressU = Clamp; AddressV = Clamp; }; // --------------------------------------------------------------- // Utitlities // --------------------------------------------------------------- float3 cubeNormalize(float3 normal) { return texCUBE(g_SamplerCubeNormalization, normal) * 2 - 1; } half getAttenuation(half3 lightVec) { return saturate(1 - dot(lightVec, lightVec) * g_OneOverSqrLightRadius); } float3 F32_3xI8_Compress(float f) { float u = floor(f * 256.0); float res_u = f * 256.0 - u; float v = floor(res_u * 256.0); float res_v = res_u * 256.0 - v; float w = floor(res_v * 256.0); return (1 / 256.0 * float3(u, v, w)); } float F32_3xI8_Decompress(float3 vec) { return (vec.x + vec.y * 1.0 / 256.0 + vec.z * 1.0 / 65536.0f); } // --------------------------------------------------------------- // Geometry rendering (to 4 RTs) // --------------------------------------------------------------- float4 VS_Geometry(float4 pos : POSITION) : POSITION { return mul(pos, g_matWorldViewProj); } // --------------------------------------------------------------- // Render to 4 Render Targets // --------------------------------------------------------------- struct VSOUTPUT_RenderToRTs { float4 pos : POSITION; float4 worldPos : TEXCOORD0; float2 texCoord : TEXCOORD1; float3 normal : TEXCOORD2; }; struct PSOUTPUT_RenderToRTs { float4 color : COLOR0; float4 position : COLOR1; float4 normal : COLOR2; float4 material : COLOR3; }; VSOUTPUT_RenderToRTs VS_RenderToRTs(float4 pos : POSITION, float3 normal : NORMAL, float2 texCoord : TEXCOORD0) { VSOUTPUT_RenderToRTs vsRenderToRTs = (VSOUTPUT_RenderToRTs)0; vsRenderToRTs.pos = mul(pos, g_matWorldViewProj); vsRenderToRTs.worldPos = vsRenderToRTs.pos; vsRenderToRTs.texCoord = texCoord; vsRenderToRTs.normal = mul(normal, (float3x3)g_matWorldView); return vsRenderToRTs; } PSOUTPUT_RenderToRTs PS_RenderToRTs(VSOUTPUT_RenderToRTs vsRenderToRTs) { PSOUTPUT_RenderToRTs psRenderToRTs = (PSOUTPUT_RenderToRTs)0; psRenderToRTs.color = tex2D(g_SamplerDiffuse, vsRenderToRTs.texCoord); psRenderToRTs.position = float4(F32_3xI8_Compress(vsRenderToRTs.worldPos.z / vsRenderToRTs.worldPos.w), 1); psRenderToRTs.normal = float4((normalize(vsRenderToRTs.normal) + 1) * 0.5, 0); psRenderToRTs.material = float4(g_MaterialAmbient, g_MaterialDiffuse, g_MaterialSpecular, g_MaterialShininess / 255); return psRenderToRTs; } // --------------------------------------------------------------- // Ambient lighting // --------------------------------------------------------------- struct VSOUTPUT_Ambient { float4 pos : POSITION; float2 tex : TEXCOORD0; }; VSOUTPUT_Ambient VS_Ambient(float4 pos : POSITION, float2 tex : TEXCOORD0) { VSOUTPUT_Ambient vsAmbient = (VSOUTPUT_Ambient)0; vsAmbient.pos = mul(pos, g_matWorldViewProj); vsAmbient.tex = tex; return vsAmbient; } float4 PS_Ambient(VSOUTPUT_Ambient vsAmbient) : COLOR0 { return tex2D(g_SamplerSrcColor, vsAmbient.tex) * g_LightAmbient; } // --------------------------------------------------------------- // Diffuse lighting // --------------------------------------------------------------- struct VSOUTPUT_Diffuse { float4 pos : POSITION; float4 pos2 : TEXCOORD0; float4 posProj : TEXCOORD1; }; VSOUTPUT_Diffuse VS_Diffuse(float4 inPos : POSITION) { VSOUTPUT_Diffuse Out = (VSOUTPUT_Diffuse)0; float4 pos = mul(inPos, g_matWorldViewProj); Out.pos = pos; Out.pos2 = pos; Out.posProj = mul(pos, g_matTextureProj); return Out; } float4 PS_Diffuse(VSOUTPUT_Diffuse In) : COLOR0 { // Get depth, normal (unbias), color and material from 4 render targets // Use projected texture lookups half depth = F32_3xI8_Decompress(tex2Dproj(g_SamplerSrcPosition, In.posProj)); half4 normal = tex2Dproj(g_SamplerSrcNormal, In.posProj) * 2 - 1; half4 color = tex2Dproj(g_SamplerSrcColor, In.posProj); half4 material = tex2Dproj(g_SamplerSrcMaterial, In.posProj); // Reconstruct original world space position half4 position = mul(half4(In.pos2.xy / In.pos2.w, depth, 1), g_matProjInvert); position.xyz /= position.w; // Calculate diffuse component half3 lightVec = g_LightPositionViewSpace - position; half3 lightDir = normalize(lightVec); half diff = saturate(dot(normal, lightDir)) * material.y; half attenuation = getAttenuation(lightVec); return g_LightColor * color * diff * attenuation; } //-------------------------------------------------------------------------------------- // Techniques //-------------------------------------------------------------------------------------- technique techRenderDeferred { pass GeometryPass { VertexShader = compile vs_1_1 VS_Geometry(); PixelShader = null; ZEnable = true; ZWriteEnable = true; StencilEnable = false; AlphaBlendEnable = false; ColorWriteEnable = Alpha; } pass RenderToRTsPass { VertexShader = compile vs_2_0 VS_RenderToRTs(); PixelShader = compile ps_2_0 PS_RenderToRTs(); ZWriteEnable = false; ColorWriteEnable = 0x0000000F; // Red | Green | Blue | Alpha } pass AmbientPass { VertexShader = compile vs_2_0 VS_Ambient(); PixelShader = compile ps_2_0 PS_Ambient(); ZWriteEnable = false; ZEnable = false; AlphaBlendEnable = true; SrcBlend = One; DestBlend = One; } pass DiffusePass { VertexShader = compile vs_1_1 VS_Diffuse(); PixelShader = compile ps_2_0 PS_Diffuse(); ZWriteEnable = false; ZEnable = false; AlphaBlendEnable = true; SrcBlend = One; DestBlend = One; CullMode = CW; ColorWriteEnable = 0x0000000F; } } 使用该Shader实现的延迟效果图如下所示:

    总结:

    游戏的精髓是图形学算法,作为游戏开发者对数学算法必须要掌握,对一些后处理算法要了解其原理。很多开发者认为自己写写逻辑就可以了,其实写逻辑也会遇到算法接口的调用,在使用接口时自己也要明白算法的原理,这样对于提升自己的技能非常有帮助。

    转载请注明原文地址: https://ju.6miu.com/read-660630.html

    最新回复(0)