SHADE_Y3/Assets/Shaders/SSAO_CS.glsl

106 lines
3.1 KiB
GLSL

#version 450
#pragma vscode_glsllint_stage : comp
const uint NUM_SAMPLES = 64;
const uint NUM_ROTATIONS = 16;
const int ROTATION_KERNEL_W = 4;
const int ROTATION_KERNEL_H = 4;
// can perhaps pass in as push constant.
const float RADIUS = 0.2f;
const float BIAS = 0.0025f;
layout(local_size_x = 16, local_size_y = 16) in;
layout(set = 3, binding = 0, rgba32f) uniform image2D positions;
layout(set = 3, binding = 1, rgba32f) uniform image2D normals;
layout(set = 3, binding = 2, rgba32f) uniform image2D outputImage;
// SSAO data
layout(std430, set = 4, binding = 0) buffer SSAOData
{
vec4 samples[NUM_SAMPLES];
} ssaoData;
layout (set = 4, binding = 1) uniform sampler2D noiseTexture;
layout(set = 2, binding = 0) uniform CameraData
{
vec4 position;
mat4 vpMat;
mat4 viewMat;
mat4 projMat;
} cameraData;
void main()
{
// image size of the SSAO image
ivec2 ssaoSize = imageSize (outputImage);
// global thread
ivec2 globalThread = ivec2 (gl_GlobalInvocationID.xy);
// load all the necessary variables
vec3 viewSpacePos = imageLoad (positions, globalThread).rgb;
vec3 viewSpaceNormal = normalize (imageLoad (normals, globalThread).rgb);
// Get the noise dimension. This should be 4x4
vec2 noiseDim = vec2 (textureSize(noiseTexture, 0));
// Get normlized thread UV coordinates
vec2 threadUV = (vec2(globalThread)) / vec2(ssaoSize);
vec2 noiseUVMult = vec2 (vec2(ssaoSize) / noiseDim);
noiseUVMult *= threadUV;
// sample from the noise
vec3 randomVec = texture(noiseTexture, noiseUVMult).rgb;
// Gram schmidt
vec3 tangent = normalize (randomVec - (viewSpaceNormal * dot(viewSpaceNormal, randomVec)));
vec3 bitangent = normalize (cross (tangent, viewSpaceNormal));
// matrix for tangent space to view space
mat3 TBN = mat3(tangent, bitangent, viewSpaceNormal);
float occlusion = 0.0f;
for (int i = 0; i < NUM_SAMPLES; ++i)
{
// We want to get a position at an offset from the view space position. Offset scaled by radius.
vec3 displacementVector = TBN * ssaoData.samples[i].rgb;
// Why are we adding positions?
displacementVector = viewSpacePos + displacementVector * RADIUS;
// Now we take that offset position and bring it to clip space
vec4 offsetPos = vec4 (displacementVector, 1.0f);
offsetPos = cameraData.projMat * offsetPos;
// then we do perspective division
offsetPos.xyz /= offsetPos.w;
// and bring it from [-1, 1] to screen coordinates
offsetPos.xyz = ((offsetPos.xyz * 0.5f) + 0.5f);
offsetPos.xy *= vec2(ssaoSize.xy);
// Now we attempt to get a position at that point.
float sampleDepth = imageLoad (positions, ivec2 (offsetPos.xy)).z;
// skip checks
if (sampleDepth == 0.0f)
continue;
// if sampled fragment is in front of current fragment, just occlude
float rangeCheck = smoothstep (0.0f, 1.0f, RADIUS / abs (viewSpacePos.z - sampleDepth));
occlusion += (sampleDepth <= displacementVector.z - BIAS ? 1.0f : 0.0f) * rangeCheck;
}
occlusion = 1.0f - (occlusion / float(NUM_SAMPLES));
// store result into result image
imageStore(outputImage, globalThread, occlusion.rrrr);
}