TiledDeferredLighting.bsl 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390
  1. #include "$ENGINE$\GBuffer.bslinc"
  2. #include "$ENGINE$\LightingCommon.bslinc"
  3. Parameters =
  4. {
  5. Sampler2D gGBufferASamp : alias("gGBufferATex");
  6. Sampler2D gGBufferBSamp : alias("gGBufferBTex");
  7. Sampler2D gDepthBufferSamp : alias("gDepthBufferTex");
  8. Texture2D gGBufferATex : auto("GBufferA");
  9. Texture2D gGBufferBTex : auto("GBufferB");
  10. Texture2D gDepthBufferTex : auto("GBufferDepth");
  11. };
  12. Technique
  13. : inherits("GBuffer")
  14. : inherits("LightingCommon") =
  15. {
  16. Language = "HLSL11";
  17. Pass =
  18. {
  19. Compute =
  20. {
  21. // Arbitrary limit, increase if needed
  22. #define MAX_LIGHTS 512
  23. SamplerState gGBufferASamp : register(s0);
  24. SamplerState gGBufferBSamp : register(s1);
  25. SamplerState gDepthBufferSamp : register(s2);
  26. Texture2D gGBufferATex : register(t0);
  27. Texture2D gGBufferBTex : register(t1);
  28. Texture2D gDepthBufferTex : register(t2);
  29. SurfaceData decodeGBuffer(float4 GBufferAData, float4 GBufferBData, float deviceZ)
  30. {
  31. SurfaceData output;
  32. output.albedo.xyz = GBufferAData.xyz;
  33. output.albedo.w = 1.0f;
  34. output.worldNormal = GBufferBData * float4(2, 2, 2, 1) - float4(1, 1, 1, 0);
  35. output.worldNormal.xyz = normalize(output.worldNormal.xyz);
  36. output.depth = convertFromDeviceZ(deviceZ);
  37. return output;
  38. }
  39. SurfaceData getGBufferData(float2 uv)
  40. {
  41. float4 GBufferAData = gGBufferATex.SampleLevel(gGBufferASamp, uv, 0);
  42. float4 GBufferBData = gGBufferBTex.SampleLevel(gGBufferBSamp, uv, 0);
  43. float deviceZ = gDepthBufferTex.SampleLevel(gDepthBufferSamp, uv, 0).r;
  44. return decodeGBuffer(GBufferAData, GBufferBData, deviceZ);
  45. }
  46. SurfaceData getGBufferData(uint2 pixelPos)
  47. {
  48. float4 GBufferAData = gGBufferATex.Load(int3(pixelPos, 0));
  49. float4 GBufferBData = gGBufferBTex.Load(int3(pixelPos, 0));
  50. float deviceZ = gDepthBufferTex.Load(int3(pixelPos, 0)).r;
  51. return decodeGBuffer(GBufferAData, GBufferBData, deviceZ);
  52. }
  53. StructuredBuffer<LightData> gLights : register(t3);
  54. RWTexture2D<float4> gOutput : register(u0);
  55. cbuffer Params : register(b0)
  56. {
  57. // Offsets at which specific light types begin in gLights buffer
  58. // Assumed directional lights start at 0
  59. // x - offset to point lights, y - offset to spot lights, z - total number of lights
  60. uint3 gLightOffsets;
  61. }
  62. groupshared uint sTileMinZ;
  63. groupshared uint sTileMaxZ;
  64. groupshared uint sNumLightsPerType[2];
  65. groupshared uint sTotalNumLights;
  66. groupshared uint sLightIndices[MAX_LIGHTS];
  67. [numthreads(TILE_SIZE, TILE_SIZE, 1)]
  68. void main(
  69. uint3 groupId : SV_GroupID,
  70. uint3 groupThreadId : SV_GroupThreadID,
  71. uint3 dispatchThreadId : SV_DispatchThreadID,
  72. uint threadIndex : SV_GroupIndex)
  73. {
  74. uint2 pixelPos = dispatchThreadId.xy + gViewportRectangle.xy;
  75. float deviceZ = gDepthBufferTex.Load(int3(pixelPos, 0)).r;
  76. float depth = convertFromDeviceZ(deviceZ);
  77. // Set initial values
  78. if(threadIndex == 0)
  79. {
  80. sTileMinZ = 0x7F7FFFFF;
  81. sTileMaxZ = 0;
  82. sNumLightsPerType[0] = 0;
  83. sNumLightsPerType[0] = 0;
  84. sTotalNumLights = 0;
  85. }
  86. GroupMemoryBarrierWithGroupSync();
  87. // Determine minimum and maximum depth values
  88. InterlockedMin(sTileMinZ, asuint(-depth));
  89. InterlockedMax(sTileMaxZ, asuint(-depth));
  90. GroupMemoryBarrierWithGroupSync();
  91. float minTileZ = asfloat(sTileMinZ);
  92. float maxTileZ = asfloat(sTileMaxZ);
  93. // Create a frustum for the current tile
  94. // First determine a scale of the tile compared to the viewport
  95. float2 tileScale = gViewportRectangle.zw / float2(TILE_SIZE, TILE_SIZE);
  96. // Now we need to use that scale to scale down the frustum.
  97. // Assume a projection matrix:
  98. // A, 0, C, 0
  99. // 0, B, D, 0
  100. // 0, 0, Q, QN
  101. // 0, 0, -1, 0
  102. //
  103. // Where A is = 2*n / (r - l)
  104. // and C = (r + l) / (r - l)
  105. //
  106. // Q & QN are used for Z value which we don't need to scale. B & D are equivalent for the
  107. // Y value, we'll only consider the X values (A & C) from now on.
  108. //
  109. // Both and A and C are inversely proportional to the size of the frustum (r - l). Larger scale mean that
  110. // tiles are that much smaller than the viewport. This means as our scale increases, (r - l) decreases,
  111. // which means A & C as a whole increase. Therefore:
  112. // A' = A * tileScale.x
  113. // C' = C * tileScale.x
  114. // Aside from scaling, we also need to offset the frustum to the center of the tile.
  115. // For this we calculate the bias value which we add to the C & D factors (which control
  116. // the offset in the projection matrix).
  117. float2 tileBias = tileScale - 1 - groupId.xy * 2;
  118. // This will yield a bias ranging from [-(tileScale - 1), tileScale - 1]. Every second bias is skipped as
  119. // corresponds to a point in-between two tiles, overlapping existing frustums.
  120. float At = gMatProj[0][0] * tileScale.x;
  121. float Ctt = gMatProj[0][2] * tileScale.x - tileBias.x;
  122. float Bt = gMatProj[1][1] * tileScale.y;
  123. float Dtt = gMatProj[1][2] * tileScale.y + tileBias.y;
  124. // Extract left/right/top/bottom frustum planes from scaled projection matrix
  125. // Note: Do this on the CPU? Since they're shared among all entries in a tile. Plus they don't change across frames.
  126. float4 frustumPlanes[6];
  127. frustumPlanes[0] = float4(At, 0.0f, gMatProj[3][2] + Ctt, 0.0f);
  128. frustumPlanes[1] = float4(-At, 0.0f, gMatProj[3][2] - Ctt, 0.0f);
  129. frustumPlanes[2] = float4(0.0f, -Bt, gMatProj[3][2] - Dtt, 0.0f);
  130. frustumPlanes[3] = float4(0.0f, Bt, gMatProj[3][2] + Dtt, 0.0f);
  131. // Normalize
  132. [unroll]
  133. for (uint i = 0; i < 4; ++i)
  134. frustumPlanes[i] *= rcp(length(frustumPlanes[i].xyz));
  135. // Generate near/far frustum planes
  136. // Note: d gets negated in plane equation, this is why its in opposite direction than it intuitively should be
  137. frustumPlanes[4] = float4(0.0f, 0.0f, -1.0f, -minTileZ);
  138. frustumPlanes[5] = float4(0.0f, 0.0f, 1.0f, maxTileZ);
  139. // Generate world position
  140. float2 screenUv = ((float2)(gViewportRectangle.xy + pixelPos) + 0.5f) / (float2)gViewportRectangle.zw;
  141. float2 clipSpacePos = (screenUv - gClipToUVScaleOffset.zw) / gClipToUVScaleOffset.xy;
  142. // x, y are now in clip space, z, w are in view space
  143. // We multiply them by a special inverse view-projection matrix, that had the projection entries that effect
  144. // z, w eliminated (since they are already in view space)
  145. // Note: Multiply by depth should be avoided if using ortographic projection
  146. float4 mixedSpacePos = float4(clipSpacePos.xy * -depth, depth, 1);
  147. float4 worldPosition4D = mul(gMatScreenToWorld, mixedSpacePos);
  148. float3 worldPosition = worldPosition4D.xyz / worldPosition4D.w;
  149. // Find radial & spot lights overlapping the tile
  150. for(uint type = 0; type < 2; type++)
  151. {
  152. uint lightOffset = threadIndex + gLightOffsets[type];
  153. uint lightsEnd = gLightOffsets[type + 1];
  154. for (uint i = lightOffset; i < lightsEnd && i < MAX_LIGHTS; i += TILE_SIZE)
  155. {
  156. float4 lightPosition = mul(gMatView, float4(gLights[i].position, 1.0f));
  157. float lightRadius = gLights[i].radius;
  158. // Note: The cull method can have false positives. In case of large light bounds and small tiles, it
  159. // can end up being quite a lot. Consider adding an extra heuristic to check a separating plane.
  160. bool lightInTile = true;
  161. // First check side planes as this will cull majority of the lights
  162. [unroll]
  163. for (uint j = 0; j < 4; ++j)
  164. {
  165. float dist = dot(frustumPlanes[j], lightPosition);
  166. lightInTile = lightInTile && (dist >= -lightRadius);
  167. }
  168. // Make sure to do an actual branch, since it's quite likely an entire warp will have the same value
  169. [branch]
  170. if (lightInTile)
  171. {
  172. bool inDepthRange = true;
  173. // Check near/far planes
  174. [unroll]
  175. for (uint j = 4; j < 6; ++j)
  176. {
  177. float dist = dot(frustumPlanes[j], lightPosition);
  178. inDepthRange = inDepthRange && (dist >= -lightRadius);
  179. }
  180. // In tile, add to branch
  181. [branch]
  182. if (inDepthRange)
  183. {
  184. InterlockedAdd(sNumLightsPerType[type], 1U);
  185. uint idx;
  186. InterlockedAdd(sTotalNumLights, 1U, idx);
  187. sLightIndices[idx] = i;
  188. }
  189. }
  190. }
  191. }
  192. GroupMemoryBarrierWithGroupSync();
  193. // Note: This unnecessarily samples depth again
  194. SurfaceData surfaceData = getGBufferData(pixelPos);
  195. float3 lightAccumulator = 0;
  196. float alpha = 0.0f;
  197. if(surfaceData.worldNormal.w > 0.0f)
  198. {
  199. for(uint i = 0; i < gLightOffsets[0]; ++i)
  200. lightAccumulator += getDirLightContibution(surfaceData, gLights[i]);
  201. for (uint i = 0; i < sNumLightsPerType[0]; ++i)
  202. {
  203. uint lightIdx = sLightIndices[i];
  204. lightAccumulator += getPointLightContribution(worldPosition, surfaceData, gLights[lightIdx]);
  205. }
  206. for(uint i = sNumLightsPerType[0]; i < sTotalNumLights; ++i)
  207. {
  208. uint lightIdx = sLightIndices[i];
  209. lightAccumulator += getSpotLightContribution(worldPosition, surfaceData, gLights[lightIdx]);
  210. }
  211. alpha = 1.0f;
  212. }
  213. float3 diffuse = surfaceData.albedo.xyz / PI; // TODO - Add better lighting model later
  214. uint2 viewportMax = gViewportRectangle.xy + gViewportRectangle.zw;
  215. // Ignore pixels out of valid range
  216. if (all(dispatchThreadId.xy < viewportMax))
  217. gOutput[pixelPos] = float4(gOutput[pixelPos].xyz + diffuse * lightAccumulator, alpha);
  218. }
  219. };
  220. };
  221. };
  222. Technique
  223. : inherits("GBuffer")
  224. : inherits("LightingCommon") =
  225. {
  226. Language = "GLSL";
  227. Pass =
  228. {
  229. Compute =
  230. {
  231. layout (local_size_x = TILE_SIZE, local_size_y = TILE_SIZE) in;
  232. layout(binding = 1) uniform sampler2D gGBufferATex;
  233. layout(binding = 2) uniform sampler2D gGBufferBTex;
  234. layout(binding = 3) uniform sampler2D gDepthBufferTex;
  235. SurfaceData decodeGBuffer(vec4 GBufferAData, vec4 GBufferBData, float deviceZ)
  236. {
  237. SurfaceData surfaceData;
  238. surfaceData.albedo.xyz = GBufferAData.xyz;
  239. surfaceData.albedo.w = 1.0f;
  240. surfaceData.worldNormal = GBufferBData * vec4(2, 2, 2, 1) - vec4(1, 1, 1, 0);
  241. surfaceData.worldNormal.xyz = normalize(surfaceData.worldNormal.xyz);
  242. surfaceData.depth = convertFromDeviceZ(deviceZ);
  243. return surfaceData;
  244. }
  245. SurfaceData getGBufferData(vec2 uv)
  246. {
  247. vec4 GBufferAData = textureLod(gGBufferATex, uv, 0);
  248. vec4 GBufferBData = textureLod(gGBufferBTex, uv, 0);
  249. float deviceZ = textureLod(gDepthBufferTex, uv, 0).r;
  250. return decodeGBuffer(GBufferAData, GBufferBData, deviceZ);
  251. }
  252. SurfaceData getGBufferData(ivec2 pixelPos)
  253. {
  254. vec4 GBufferAData = texelFetch(gGBufferATex, pixelPos, 0);
  255. vec4 GBufferBData = texelFetch(gGBufferBTex, pixelPos, 0);
  256. float deviceZ = texelFetch(gDepthBufferTex, pixelPos, 0).r;
  257. return decodeGBuffer(GBufferAData, GBufferBData, deviceZ);
  258. }
  259. layout(std430, binding = 4) buffer gLights
  260. {
  261. LightData[] gLightsData;
  262. };
  263. layout(binding = 5, rgba16f) uniform image2D gOutput;
  264. layout(binding = 6, std140) uniform Params
  265. {
  266. // Offsets at which specific light types begin in gLights buffer
  267. // Assumed directional lights start at 0
  268. // x - offset to point lights, y - offset to spot lights, z - total number of lights
  269. uvec3 gLightOffsets;
  270. };
  271. void main()
  272. {
  273. ivec2 pixelPos = ivec2(gl_GlobalInvocationID.xy) + gViewportRectangle.xy;
  274. SurfaceData surfaceData = getGBufferData(pixelPos);
  275. float alpha = 0.0f;
  276. vec3 lightAccumulator = vec3(0, 0, 0);
  277. if(surfaceData.worldNormal.w > 0.0f)
  278. {
  279. vec2 screenUv = (vec2(gViewportRectangle.xy + pixelPos) + 0.5f) / vec2(gViewportRectangle.zw);
  280. vec2 clipSpacePos = (screenUv - gClipToUVScaleOffset.zw) / gClipToUVScaleOffset.xy;
  281. // x, y are now in clip space, z, w are in view space
  282. // We multiply them by a special inverse view-projection matrix, that had the projection entries that effect
  283. // z, w eliminated (since they are already in view space)
  284. // Note: Multiply by depth should be avoided if using ortographic projection
  285. vec4 mixedSpacePos = vec4(clipSpacePos.xy * -surfaceData.depth, surfaceData.depth, 1);
  286. vec4 worldPosition4D = gMatScreenToWorld * mixedSpacePos;
  287. vec3 worldPosition = worldPosition4D.xyz / worldPosition4D.w;
  288. for(uint i = 0; i < gLightOffsets.x; i++)
  289. {
  290. LightData data = gLightsData[i];
  291. lightAccumulator += getDirLightContibution(surfaceData, data);
  292. }
  293. for(uint i = gLightOffsets.x; i < gLightOffsets.y; i++)
  294. {
  295. LightData data = gLightsData[i];
  296. lightAccumulator += getPointLightContribution(worldPosition, surfaceData, data);
  297. }
  298. for(uint i = gLightOffsets.y; i < gLightOffsets.z; i++)
  299. {
  300. LightData data = gLightsData[i];
  301. lightAccumulator += getSpotLightContribution(worldPosition, surfaceData, data);
  302. }
  303. alpha = 1.0f;
  304. }
  305. vec3 diffuse = surfaceData.albedo.xyz / PI; // TODO - Add better lighting model later
  306. uvec2 viewportMax = gViewportRectangle.xy + gViewportRectangle.zw;
  307. // Ignore pixels out of valid range
  308. if (all(lessThan(gl_GlobalInvocationID.xy, viewportMax)))
  309. {
  310. vec4 existingValue = imageLoad(gOutput, pixelPos);
  311. imageStore(gOutput, pixelPos, vec4(diffuse * lightAccumulator + existingValue.xyz, alpha));
  312. }
  313. }
  314. };
  315. };
  316. };