Learn ComputeShader 07 Post Processing
这次我们将使用计算机着色器对图像进行后处理。
要进行后处理需要将渲染图像从cpu传递给gpu,并在gpu对图像进行处理然后传回cpu。
首先创建一个后处理基类BasePP
首先声明需要用到的属性。
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
[RequireComponent(typeof(Camera))]
public class BasePP : MonoBehaviour
{
public ComputeShader shader = null;
protected string kernelName = "CSMain";
protected Vector2Int texSize = new Vector2Int(0,0);
protected Vector2Int groupSize = new Vector2Int();
protected Camera thisCamera;
protected RenderTexture output = null;
protected RenderTexture renderedSource = null;
protected int kernelHandle = -1;
protected bool init = false;
protected virtual void Init()
{
if (!SystemInfo.supportsComputeShaders)
{
Debug.LogError("It seems your target Hardware does not support Compute Shaders.");
return;
}
if (!shader)
{
Debug.LogError("No shader");
return;
}
kernelHandle = shader.FindKernel(kernelName);
thisCamera = GetComponent<Camera>();
if (!thisCamera)
{
Debug.LogError("Object has no Camera");
return;
}
CreateTextures();
init = true;
}
protected void ClearTexture(ref RenderTexture textureToClear)
{
if (null != textureToClear)
{
textureToClear.Release();
textureToClear = null;
}
}
protected virtual void ClearTextures()
{
ClearTexture(ref output);
ClearTexture(ref renderedSource);
}
protected void CreateTexture(ref RenderTexture textureToMake, int divide=1)
{
textureToMake = new RenderTexture(texSize.x/divide, texSize.y/divide, 0);
textureToMake.enableRandomWrite = true;
textureToMake.Create();
}
protected virtual void CreateTextures()
{
}
protected virtual void OnEnable()
{
Init();
}
protected virtual void OnDisable()
{
ClearTextures();
init = false;
}
protected virtual void OnDestroy()
{
ClearTextures();
init = false;
}
protected virtual void DispatchWithSource(ref RenderTexture source, ref RenderTexture destination)
{
}
protected void CheckResolution(out bool resChange )
{
resChange = false;
}
protected virtual void OnRenderImage(RenderTexture source, RenderTexture destination)
{
if (!init || shader == null)
{
Graphics.Blit(source, destination);
}
else
{
CheckResolution(out _);
DispatchWithSource(ref source, ref destination);
}
}
}
初始化以后就会创建纹理,但是这个函数还没有完善。
首先设置纹理的宽度和高度,然后获取线程组的x和y的大小,这里GetKernelThreadGroupSizes的都三个参数out _表示忽略z
轴的线程组尺寸,这是 C# 语言中的一种方式,用于表示对某个输出值不感兴趣,不需要它的实际值。,然后创建两张纹理并传递给shader。
protected virtual void CreateTextures()
{
texSize.x = thisCamera.pixelWidth;//返回摄像机视口的宽度和高度
texSize.y = thisCamera.pixelHeight;
if (shader)
{
uint x, y;
shader.GetKernelThreadGroupSizes(kernelHandle, out x, out y,out _);
groupSize.x = Mathf.CeilToInt((float)texSize.x / (float)x);
groupSize.y = Mathf.CeilToInt((float)texSize.y / (float)y);
}
CreateTexture(ref output);
CreateTexture(ref renderedSource);
shader.SetTexture(kernelHandle, "source", renderedSource);
shader.SetTexture(kernelHandle, "output", output);
}
下面就是要获取到渲染的纹理并传递到GPU
OnRenderImage(RenderTexture source, RenderTexture destination)这个函数可以获取摄像机渲染后的图像到source,并通过对原图像的一系列处理之后设置到destination上。
OnRenderImage
是 Unity 中的一个特殊回调函数,用于在摄像机完成渲染之后对图像进行后处理。它是在摄像机渲染过程的最后阶段被自动调用的。你不需要手动调用这个函数,而是由 Unity 引擎在渲染管线中自动调用。
protected virtual void OnRenderImage(RenderTexture source, RenderTexture destination)
{
if (!init || shader == null)
{
Graphics.Blit(source, destination);
}
else
{
CheckResolution(out _);
DispatchWithSource(ref source, ref destination);
}
}
接着我们继续完善checkresolution和dispatchwithsource函数。
如果没有设置texsize那么就执行createtextures函数设置纹理大小
protected void CheckResolution(out bool resChange )
{
resChange = false;
if (texSize.x != thisCamera.pixelWidth || texSize.y != thisCamera.pixelHeight)
{
resChange = true;
CreateTextures();
}
}
首先将源纹理传递到shader,然后给shader分配线程组,然后将shader处理后的纹理传递到destination
protected virtual void DispatchWithSource(ref RenderTexture source, ref RenderTexture destination)
{
Graphics.Blit(source, renderedSource);
shader.Dispatch(kernelHandle, groupSize.x, groupSize.y, 1);
Graphics.Blit(output, destination);
}
接着我们书写一个简单的后处理示例
我们只是在代码中将原图像的颜色乘上一个shade调整图像的亮度
void Highlight(uint3 id : SV_DispatchThreadID)
{
float4 srcColor = source[id.xy];
float4 color =srcColor*shade;
output[id.xy] = color;
}
效果展示:
接着我们想要让角色周围高亮显示,但是其它地方仍然是变暗一些。
首先在cpu获取角色的位置并且转换到屏幕坐标,然后传递给shader。
protected override void OnRenderImage(RenderTexture source, RenderTexture destination)
{
if (!init || shader == null)
{
Graphics.Blit(source, destination);
}
else
{
if (trackedObject && thisCamera)
{
Vector3 pos = thisCamera.WorldToScreenPoint(trackedObject.position);//将世界坐标转换为屏幕坐标
center.x = pos.x;
center.y = pos.y;
shader.SetVector("center", center);
}
bool resChange =false;
CheckResolution(out _);
if (resChange) SetProperties();
DispatchWithSource(ref source, ref destination);
}
}
在shader中检测像素是否在角色周围,然后根据和角色的距离对颜色进行插值。如果距离大于半径完全是变暗的颜色,小于半径就是高亮显示。
float inCircle( float2 pt, float2 center, float radius, float edgeWidth ){
float len = length(pt - center);
return 1.0 - smoothstep(radius-edgeWidth, radius, len);
}
[numthreads(8, 8, 1)]
void Highlight(uint3 id : SV_DispatchThreadID)
{
float4 srcColor = source[id.xy];
float4 shadedSrcColor =srcColor*shade;
float4 highlight = inCircle((float2)id.xy,center.xy,radius,edgeWidth);
float4 color =lerp(shadedSrcColor,srcColor,highlight);
output[id.xy] = color;
}
效果: