summaryrefslogtreecommitdiffstats
path: root/src/Runtime/res/effectlib/screenSpaceDO.glsllib
diff options
context:
space:
mode:
authorAndy Nichols <andy.nichols@qt.io>2017-12-07 10:56:06 +0100
committerTomi Korpipää <tomi.korpipaa@qt.io>2017-12-14 06:56:38 +0000
commitf28c93051d3bb9a800a4a3a04209aea5f16ab1a9 (patch)
tree9ca98aa63d57dea1adb6313f9a745a39caf1278d /src/Runtime/res/effectlib/screenSpaceDO.glsllib
parent611f47ded9a119edb3a8bed39387958f5fc3a655 (diff)
Convert Tabs -> Spaces and Removed trailing whitespace from effectlib
This was done to make syncing the effectlib shaders in 3D Studio with the effectlib shaders in the 2.0 runtime where they had already been sanitized. This makes the diffing process easier, and will also bring things into line with the Qt coding standard. Change-Id: I0c45dcb7110cc31a24cce24d72b53e1d451b701b Reviewed-by: Tomi Korpipää <tomi.korpipaa@qt.io> Reviewed-by: Miikka Heikkinen <miikka.heikkinen@qt.io>
Diffstat (limited to 'src/Runtime/res/effectlib/screenSpaceDO.glsllib')
-rw-r--r--src/Runtime/res/effectlib/screenSpaceDO.glsllib42
1 files changed, 21 insertions, 21 deletions
diff --git a/src/Runtime/res/effectlib/screenSpaceDO.glsllib b/src/Runtime/res/effectlib/screenSpaceDO.glsllib
index 2a4a6080..2ccc69fc 100644
--- a/src/Runtime/res/effectlib/screenSpaceDO.glsllib
+++ b/src/Runtime/res/effectlib/screenSpaceDO.glsllib
@@ -37,7 +37,7 @@ vec3 getViewSpacePos( sampler2D depthSampler, vec2 camProps, vec2 UV, vec4 UvToE
{
float sampleDepth = getDepthValue( texture(depthSampler, UV), camProps );
sampleDepth = depthValueToLinearDistance( sampleDepth, camProps );
-
+
vec2 scaledUV = (UV * UvToEye.xy) + UvToEye.zw;
return vec3(scaledUV * sampleDepth, sampleDepth);
}
@@ -46,15 +46,15 @@ float shadowOcclusion(sampler2D depthSampler, vec3 lightDir, vec3 worldPos, mat4
{
vec3 viewPos = getViewSpacePos( depthSampler, camProps, ( gl_FragCoord.xy * aoScreen.zw ), UvToEye );
float depth = viewPos.z;
-
+
// Get the screen-space UV
vec2 centerUV = gl_FragCoord.xy * aoScreen.zw;
-
+
float screenDist = shadowParams.y * 3.1415926535 * aoScreen.y / viewPos.z;
if (screenDist < 1.0) { return 1.0; }
-
+
vec3 viewL = normalize( (viewMat * vec4(lightDir, 0)).xyz );
-
+
float steps = min( screenDist, 20.0 );
int maxCt = int(steps);
float step = 3.1415926535 * shadowParams.y / float(maxCt);
@@ -64,30 +64,30 @@ float shadowOcclusion(sampler2D depthSampler, vec3 lightDir, vec3 worldPos, mat4
{
vec3 ray = lightDir * step * float(i);
vec3 samplePos = worldPos - ray;
-
+
vec4 smpUV = viewProj * vec4(samplePos, 1.0);
smpUV /= smpUV.w;
smpUV.xy = (smpUV.xy + 1.0) * 0.5;
-
+
vec3 testPos = getViewSpacePos( depthSampler, camProps, smpUV.xy, UvToEye );
testPos.z += shadowParams.w;
vec3 testVec = normalize(viewPos - testPos);
testVec -= viewL;
float isBehind = clamp( testVec.z, 0.0, 1.0 );
- float diff = (testPos.z - depth) / shadowParams.y;
+ float diff = (testPos.z - depth) / shadowParams.y;
ret -= isBehind * (1.0 / (1.0 + diff * diff));
}
-
+
ret /= float(maxCt); // divide by number of samples;
// Blend between soft and hard based on softness param
// NOTE : the 0.72974 is actually an gamma-inverted 0.5 (assuming gamma 2.2)
// Would not need this if we linearized color instead.
float hardCut = (ret <= 0.72974) ? 0.0 : 1.0;
ret = shadowParams.z * ret + (1.0 - shadowParams.z) * hardCut;
-
+
// Blend between full and no occlusion based on strength param
ret = shadowParams.x * ret + (1.0 - shadowParams.x);
-
+
return ret;
}
@@ -96,7 +96,7 @@ float shadowOcclusion(sampler2D depthSampler, vec3 lightDir, vec3 worldPos, mat4
float glossyOcclusionBasis(sampler2D depthSampler, mat3 tanFrame, vec3 worldPos, mat4 viewProj, vec3 viewDir, vec4 shadowParams, vec2 camProps, float roughness)
{
float ret = 16.0;
-
+
float kernel[16];
kernel[0] = 0.5; kernel[1] = 0.25;
kernel[2] = 0.75; kernel[3] = 0.125;
@@ -114,30 +114,30 @@ float glossyOcclusionBasis(sampler2D depthSampler, mat3 tanFrame, vec3 worldPos,
ivec2 iCoords = ivec2( gl_FragCoord.xy );
float depth = getDepthValue( texelFetch(depthSampler, iCoords, 0) );
depth = depthValueToLinearDistance( depth, camProps );
-
+
for( int i = 0; i < 16; ++i )
{
- vec3 localDir;
+ vec3 localDir;
float phi = 6.28318530718 * (kernel[i] + phiShift);
float cosTheta = sqrt( float(i+1) / 33.0);
localDir.z = sqrt(1.0 - cosTheta*cosTheta) * normFac;
localDir.x = cos(phi) * cosTheta;
localDir.y = sin(phi) * cosTheta;
-
+
localDir = normalize(localDir);
-
+
vec3 halfDir = tanFrame[0]*localDir.x + tanFrame[1]*localDir.y + tanFrame[2]*localDir.z;
vec3 ray = reflect( -viewDir, halfDir ) * shadowParams.x;
-
+
vec4 samplePos = vec4( worldPos + ray, 1.0 );
-
+
vec4 sampleProj = viewProj * samplePos;
sampleProj /= sampleProj.w;
sampleProj.xy = (sampleProj.xy + 1.0) * 0.5;
float sampleDepth = getDepthValue( texture(depthSampler, sampleProj.xy) );
sampleDepth = depthValueToLinearDistance( sampleDepth, camProps );
-
+
// Occlusion is applied based on a Cauchy distribution filter
// But with a "dead zone" for the very close samples. By subtracting it from 16,
// which represents no occlusion (16/16 = 1), we let nearby occluders have a
@@ -147,9 +147,9 @@ float glossyOcclusionBasis(sampler2D depthSampler, mat3 tanFrame, vec3 worldPos,
float occlDist = 4.0 * max(depth - sampleDepth - shadowParams.y, 0.0) / shadowParams.x;
float occlFactor = 1.0 / ( 1.0 + occlDist*occlDist*0.04 );
occlFactor -= 1.0 / ( 1.0 + occlDist*occlDist*4.0 );
- ret -= min(2.0 * occlFactor, 1.0);
+ ret -= min(2.0 * occlFactor, 1.0);
}
-
+
ret /= 16.0; // divide by number of samples;
return ret;
}