花了一晚上的时间终于看懂Image Effect中的Blur,其实很简单,就是一下子没有理解到。
原理:使用两个一维[1*7]的高斯滤波模板,一个用在x方向,另一个用在y方向。高斯滤波有模糊的效果。
js脚本参数:
Down Sample:OnRenderImage中获取的图像进行降采样,其实就是把要处理的纹理变小。有利于加快shader运行速度。
Blur Size:在使用高斯模板时,相邻像素点的间隔。越大间隔越远,图像越模糊。但过大的值会导致失真。
Blur Iterations:迭代次数,越大模糊效果越好,但消耗越大。
Blur Type:两个不同的shader,后一个是前一个的优化版本,但差别不大。
具体代码分析:
function OnRenderImage (source : RenderTexture, destination : RenderTexture) {
if(CheckResources() == false) {
Graphics.Blit (source, destination);
return;
} var widthMod : float = 1.0f / (1.0f * (1<<downsample)); // 降采样系数的倒数,用于调整降采样后,相邻像素的间隔 // blurMaterial.SetVector ("_Parameter", Vector4 (blurSize * widthMod, -blurSize * widthMod, 0.0f, 0.0f));
source.filterMode = FilterMode.Bilinear; var rtW : int = source.width >> downsample; // >> 是除法的优化
var rtH : int = source.height >> downsample; // downsample
var rt : RenderTexture = RenderTexture.GetTemporary (rtW, rtH, 0, source.format); rt.filterMode = FilterMode.Bilinear;
// 对应的shader的Pass 0
Graphics.Blit (source, rt, blurMaterial, 0); //首先对图像进行降采样,同时进行简单的模糊 var passOffs = blurType == BlurType.StandardGauss ? 0 : 2; // 选择不同的blurtype,就调用不同的shader pass for(var i : int = 0; i < blurIterations; i++) {
var iterationOffs : float = (i*1.0f);
// _Parameter.x 记录的是 相邻像素的间隔,随着迭代次数增大
blurMaterial.SetVector ("_Parameter",
Vector4 (blurSize * widthMod + iterationOffs, -blurSize * widthMod - iterationOffs, 0.0f, 0.0f)); // vertical blur 垂直滤波
var rt2 : RenderTexture = RenderTexture.GetTemporary (rtW, rtH, 0, source.format);
rt2.filterMode = FilterMode.Bilinear;
Graphics.Blit (rt, rt2, blurMaterial, 1 + passOffs); // 对应着shader的pass 1,2
RenderTexture.ReleaseTemporary (rt);
rt = rt2; // horizontal blur 水平滤波
rt2 = RenderTexture.GetTemporary (rtW, rtH, 0, source.format);
rt2.filterMode = FilterMode.Bilinear;
Graphics.Blit (rt, rt2, blurMaterial, 2 + passOffs); // 对应着shader的pass 3,4
RenderTexture.ReleaseTemporary (rt);
rt = rt2;
} Graphics.Blit (rt, destination); RenderTexture.ReleaseTemporary (rt);
}
接着分析shader文件:
先看5个pass,分别是用在上文cs脚本中的Bilt函数中。
SubShader {
ZTest Off Cull Off ZWrite Off Blend Off
Fog { Mode off } // 0
Pass { CGPROGRAM #pragma vertex vert4Tap
#pragma fragment fragDownsample
#pragma fragmentoption ARB_precision_hint_fastest ENDCG } //
Pass {
ZTest Always
Cull Off CGPROGRAM #pragma vertex vertBlurVertical
#pragma fragment fragBlur8
#pragma fragmentoption ARB_precision_hint_fastest ENDCG
} //
Pass {
ZTest Always
Cull Off CGPROGRAM #pragma vertex vertBlurHorizontal
#pragma fragment fragBlur8
#pragma fragmentoption ARB_precision_hint_fastest ENDCG
} // alternate blur
//
Pass {
ZTest Always
Cull Off CGPROGRAM #pragma vertex vertBlurVerticalSGX
#pragma fragment fragBlurSGX
#pragma fragmentoption ARB_precision_hint_fastest ENDCG
} //
Pass {
ZTest Always
Cull Off CGPROGRAM #pragma vertex vertBlurHorizontalSGX
#pragma fragment fragBlurSGX
#pragma fragmentoption ARB_precision_hint_fastest ENDCG
}
}
pass 0:在降采样的同时,进行简单地模糊处理。
v2f_tap vert4Tap ( appdata_img v )
{
v2f_tap o; o.pos = mul (UNITY_MATRIX_MVP, v.vertex);
// 取像素周围的点
o.uv20 = v.texcoord + _MainTex_TexelSize.xy;
o.uv21 = v.texcoord + _MainTex_TexelSize.xy * half2(-.5h,-.5h);
o.uv22 = v.texcoord + _MainTex_TexelSize.xy * half2(.5h,-.5h);
o.uv23 = v.texcoord + _MainTex_TexelSize.xy * half2(-.5h,.5h); return o;
} fixed4 fragDownsample ( v2f_tap i ) : SV_Target
{
fixed4 color = tex2D (_MainTex, i.uv20);
color += tex2D (_MainTex, i.uv21);
color += tex2D (_MainTex, i.uv22);
color += tex2D (_MainTex, i.uv23);
return color / ;
}
接下来的pass 1,2 和pass 3, 4,都是分别在x y两个方向进行高斯滤波。
先看看高斯滤波模板:
static const half4 curve4[] = { half4(0.0205,0.0205,0.0205,), half4(0.0855,0.0855,0.0855,), half4(0.232,0.232,0.232,),
half4(0.324,0.324,0.324,), half4(0.232,0.232,0.232,), half4(0.0855,0.0855,0.0855,), half4(0.0205,0.0205,0.0205,) };
这是 [1*7]的模板,对中间点像素的左右两边各3个像素,总共7个像素进行加权求和,得到新的像素值。
pass 1,2的只有vert函数不一样,分别是取水平和垂直方向的偏差值。
v2f_withBlurCoords8 vertBlurHorizontal (appdata_img v)
{
v2f_withBlurCoords8 o;
o.pos = mul (UNITY_MATRIX_MVP, v.vertex); o.uv = half4(v.texcoord.xy,,);
o.offs = _MainTex_TexelSize.xy * half2(1.0, 0.0) * _Parameter.x; // 水平方向的偏差值 return o;
} v2f_withBlurCoords8 vertBlurVertical (appdata_img v)
{
v2f_withBlurCoords8 o;
o.pos = mul (UNITY_MATRIX_MVP, v.vertex); o.uv = half4(v.texcoord.xy,,);
o.offs = _MainTex_TexelSize.xy * half2(0.0, 1.0) * _Parameter.x; // 垂直方向的偏差值 return o;
} half4 fragBlur8 ( v2f_withBlurCoords8 i ) : SV_Target
{
half2 uv = i.uv.xy;
half2 netFilterWidth = i.offs;
half2 coords = uv - netFilterWidth * 3.0; // 这里从中心点偏移3个间隔,从最左边或者是最上边开始进行加权累加 half4 color = ;
for( int l = ; l < ; l++ )
{
half4 tap = tex2D(_MainTex, coords);
color += tap * curve4[l]; // 像素值乘上对应的权值
coords += netFilterWidth; // 移到下一个像素
}
return color;
}
在pass 1,2中的uv值都是float2向量,然而寄存器可以一次性储存float4,即可以一个float4值存储两个uv值。并且像素着色器函数中,计算相邻像素的步骤,可以放在顶点着色器中。于是就有下面这个版本:
v2f_withBlurCoordsSGX vertBlurHorizontalSGX (appdata_img v)
{
v2f_withBlurCoordsSGX o;
o.pos = mul (UNITY_MATRIX_MVP, v.vertex); o.uv = v.texcoord.xy;
half2 netFilterWidth = _MainTex_TexelSize.xy * half2(1.0, 0.0) * _Parameter.x;
half4 coords = -netFilterWidth.xyxy * 3.0;
// 计算左右相邻各3个像素的坐标
o.offs[] = v.texcoord.xyxy + coords * half4(.0h,.0h,-.0h,-.0h);
coords += netFilterWidth.xyxy;
o.offs[] = v.texcoord.xyxy + coords * half4(.0h,.0h,-.0h,-.0h);
coords += netFilterWidth.xyxy;
o.offs[] = v.texcoord.xyxy + coords * half4(.0h,.0h,-.0h,-.0h); return o;
} v2f_withBlurCoordsSGX vertBlurVerticalSGX (appdata_img v)
{
v2f_withBlurCoordsSGX o;
o.pos = mul (UNITY_MATRIX_MVP, v.vertex); o.uv = half4(v.texcoord.xy,,);
half2 netFilterWidth = _MainTex_TexelSize.xy * half2(0.0, 1.0) * _Parameter.x;
half4 coords = -netFilterWidth.xyxy * 3.0;
// 计算上下相邻各3个像素的坐标
o.offs[] = v.texcoord.xyxy + coords * half4(.0h,.0h,-.0h,-.0h);
coords += netFilterWidth.xyxy;
o.offs[] = v.texcoord.xyxy + coords * half4(.0h,.0h,-.0h,-.0h);
coords += netFilterWidth.xyxy;
o.offs[] = v.texcoord.xyxy + coords * half4(.0h,.0h,-.0h,-.0h); return o;
} half4 fragBlurSGX ( v2f_withBlurCoordsSGX i ) : SV_Target
{
half2 uv = i.uv.xy; half4 color = tex2D(_MainTex, i.uv) * curve4[]; // 中间像素,乘上对应的权值 for( int l = ; l < ; l++ )
{
half4 tapA = tex2D(_MainTex, i.offs[l].xy);
half4 tapB = tex2D(_MainTex, i.offs[l].zw);
color += (tapA + tapB) * curve4[l]; // 由于模板是对称的,可以使用相同的权值
} return color; }
结论:
通过调试,发现使用downsampler为1,iteration为2时,调整blursize可以得到较好的效果,并且性能较好。但blursize为0时,还是模糊图像,想做成那种从清晰到模糊的动画,估计还要调整一下代码。