ss_effects_downsample.glsl 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224
  1. ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
  2. // Copyright (c) 2016, Intel Corporation
  3. // Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
  4. // documentation files (the "Software"), to deal in the Software without restriction, including without limitation
  5. // the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
  6. // permit persons to whom the Software is furnished to do so, subject to the following conditions:
  7. // The above copyright notice and this permission notice shall be included in all copies or substantial portions of
  8. // the Software.
  9. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
  10. // THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  11. // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  12. // TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  13. // SOFTWARE.
  14. ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
  15. // File changes (yyyy-mm-dd)
  16. // 2016-09-07: [email protected]: first commit
  17. // 2020-12-05: clayjohn: convert to Vulkan and Godot
  18. ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
  19. #[compute]
  20. #version 450
  21. #VERSION_DEFINES
  22. layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
  23. layout(push_constant, std430) uniform Params {
  24. vec2 pixel_size;
  25. float z_far;
  26. float z_near;
  27. bool orthogonal;
  28. float radius_sq;
  29. uvec2 pad;
  30. }
  31. params;
  32. layout(set = 0, binding = 0) uniform sampler2D source_depth;
  33. layout(r16f, set = 1, binding = 0) uniform restrict writeonly image2DArray dest_image0; //rename
  34. #ifdef GENERATE_MIPS
  35. layout(r16f, set = 2, binding = 0) uniform restrict writeonly image2DArray dest_image1;
  36. layout(r16f, set = 2, binding = 1) uniform restrict writeonly image2DArray dest_image2;
  37. layout(r16f, set = 2, binding = 2) uniform restrict writeonly image2DArray dest_image3;
  38. #ifdef GENERATE_FULL_MIPS
  39. layout(r16f, set = 2, binding = 3) uniform restrict writeonly image2DArray dest_image4;
  40. #endif
  41. #endif
  42. vec4 screen_space_to_view_space_depth(vec4 p_depth) {
  43. if (params.orthogonal) {
  44. vec4 depth = p_depth * 2.0 - 1.0;
  45. return -(depth * (params.z_far - params.z_near) - (params.z_far + params.z_near)) / 2.0;
  46. }
  47. float depth_linearize_mul = params.z_near;
  48. float depth_linearize_add = params.z_far;
  49. // Optimized version of "-cameraClipNear / (cameraClipFar - projDepth * (cameraClipFar - cameraClipNear)) * cameraClipFar"
  50. // Set your depth_linearize_mul and depth_linearize_add to:
  51. // depth_linearize_mul = ( cameraClipFar * cameraClipNear) / ( cameraClipFar - cameraClipNear );
  52. // depth_linearize_add = cameraClipFar / ( cameraClipFar - cameraClipNear );
  53. return depth_linearize_mul / (depth_linearize_add - p_depth);
  54. }
  55. float screen_space_to_view_space_depth(float p_depth) {
  56. if (params.orthogonal) {
  57. float depth = p_depth * 2.0 - 1.0;
  58. return -(depth * (params.z_far - params.z_near) - (params.z_far + params.z_near)) / 2.0;
  59. }
  60. float depth_linearize_mul = params.z_near;
  61. float depth_linearize_add = params.z_far;
  62. return depth_linearize_mul / (depth_linearize_add - p_depth);
  63. }
  64. #ifdef GENERATE_MIPS
  65. shared float depth_buffer[4][8][8];
  66. float mip_smart_average(vec4 p_depths) {
  67. float closest = min(min(p_depths.x, p_depths.y), min(p_depths.z, p_depths.w));
  68. float fallof_sq = -1.0f / params.radius_sq;
  69. vec4 dists = p_depths - closest.xxxx;
  70. vec4 weights = clamp(dists * dists * fallof_sq + 1.0, 0.0, 1.0);
  71. return dot(weights, p_depths) / dot(weights, vec4(1.0, 1.0, 1.0, 1.0));
  72. }
  73. void prepare_depths_and_mips(vec4 p_samples, uvec2 p_output_coord, uvec2 p_gtid) {
  74. p_samples = screen_space_to_view_space_depth(p_samples);
  75. depth_buffer[0][p_gtid.x][p_gtid.y] = p_samples.w;
  76. depth_buffer[1][p_gtid.x][p_gtid.y] = p_samples.z;
  77. depth_buffer[2][p_gtid.x][p_gtid.y] = p_samples.x;
  78. depth_buffer[3][p_gtid.x][p_gtid.y] = p_samples.y;
  79. imageStore(dest_image0, ivec3(p_output_coord.x, p_output_coord.y, 0), vec4(p_samples.w));
  80. imageStore(dest_image0, ivec3(p_output_coord.x, p_output_coord.y, 1), vec4(p_samples.z));
  81. imageStore(dest_image0, ivec3(p_output_coord.x, p_output_coord.y, 2), vec4(p_samples.x));
  82. imageStore(dest_image0, ivec3(p_output_coord.x, p_output_coord.y, 3), vec4(p_samples.y));
  83. uint depth_array_index = 2 * (p_gtid.y % 2) + (p_gtid.x % 2);
  84. uvec2 depth_array_offset = ivec2(p_gtid.x % 2, p_gtid.y % 2);
  85. ivec2 buffer_coord = ivec2(p_gtid) - ivec2(depth_array_offset);
  86. p_output_coord /= 2;
  87. groupMemoryBarrier();
  88. barrier();
  89. // if (still_alive) <-- all threads alive here
  90. {
  91. float sample_00 = depth_buffer[depth_array_index][buffer_coord.x + 0][buffer_coord.y + 0];
  92. float sample_01 = depth_buffer[depth_array_index][buffer_coord.x + 0][buffer_coord.y + 1];
  93. float sample_10 = depth_buffer[depth_array_index][buffer_coord.x + 1][buffer_coord.y + 0];
  94. float sample_11 = depth_buffer[depth_array_index][buffer_coord.x + 1][buffer_coord.y + 1];
  95. float avg = mip_smart_average(vec4(sample_00, sample_01, sample_10, sample_11));
  96. imageStore(dest_image1, ivec3(p_output_coord.x, p_output_coord.y, depth_array_index), vec4(avg));
  97. depth_buffer[depth_array_index][buffer_coord.x][buffer_coord.y] = avg;
  98. }
  99. bool still_alive = p_gtid.x % 4 == depth_array_offset.x && p_gtid.y % 4 == depth_array_offset.y;
  100. p_output_coord /= 2;
  101. groupMemoryBarrier();
  102. barrier();
  103. if (still_alive) {
  104. float sample_00 = depth_buffer[depth_array_index][buffer_coord.x + 0][buffer_coord.y + 0];
  105. float sample_01 = depth_buffer[depth_array_index][buffer_coord.x + 0][buffer_coord.y + 2];
  106. float sample_10 = depth_buffer[depth_array_index][buffer_coord.x + 2][buffer_coord.y + 0];
  107. float sample_11 = depth_buffer[depth_array_index][buffer_coord.x + 2][buffer_coord.y + 2];
  108. float avg = mip_smart_average(vec4(sample_00, sample_01, sample_10, sample_11));
  109. imageStore(dest_image2, ivec3(p_output_coord.x, p_output_coord.y, depth_array_index), vec4(avg));
  110. depth_buffer[depth_array_index][buffer_coord.x][buffer_coord.y] = avg;
  111. }
  112. still_alive = p_gtid.x % 8 == depth_array_offset.x && depth_array_offset.y % 8 == depth_array_offset.y;
  113. p_output_coord /= 2;
  114. groupMemoryBarrier();
  115. barrier();
  116. if (still_alive) {
  117. float sample_00 = depth_buffer[depth_array_index][buffer_coord.x + 0][buffer_coord.y + 0];
  118. float sample_01 = depth_buffer[depth_array_index][buffer_coord.x + 0][buffer_coord.y + 4];
  119. float sample_10 = depth_buffer[depth_array_index][buffer_coord.x + 4][buffer_coord.y + 0];
  120. float sample_11 = depth_buffer[depth_array_index][buffer_coord.x + 4][buffer_coord.y + 4];
  121. float avg = mip_smart_average(vec4(sample_00, sample_01, sample_10, sample_11));
  122. imageStore(dest_image3, ivec3(p_output_coord.x, p_output_coord.y, depth_array_index), vec4(avg));
  123. #ifndef GENERATE_FULL_MIPS
  124. }
  125. #else
  126. depth_buffer[depth_array_index][buffer_coord.x][buffer_coord.y] = avg;
  127. }
  128. still_alive = p_gtid.x % 16 == depth_array_offset.x && depth_array_offset.y % 16 == depth_array_offset.y;
  129. p_output_coord /= 2;
  130. if (still_alive) {
  131. // Use the previous average, not ideal, but still not bad.
  132. float sample_00 = depth_buffer[depth_array_index][buffer_coord.x + 0][buffer_coord.y + 0];
  133. imageStore(dest_image4, ivec3(p_output_coord.x, p_output_coord.y, depth_array_index), vec4(sample_00));
  134. }
  135. #endif
  136. }
  137. #else
  138. #ifndef USE_HALF_BUFFERS
  139. void prepare_depths(vec4 p_samples, uvec2 p_tid) {
  140. p_samples = screen_space_to_view_space_depth(p_samples);
  141. imageStore(dest_image0, ivec3(p_tid, 0), vec4(p_samples.w));
  142. imageStore(dest_image0, ivec3(p_tid, 1), vec4(p_samples.z));
  143. imageStore(dest_image0, ivec3(p_tid, 2), vec4(p_samples.x));
  144. imageStore(dest_image0, ivec3(p_tid, 3), vec4(p_samples.y));
  145. }
  146. #endif
  147. #endif
  148. void main() {
  149. #ifdef USE_HALF_BUFFERS
  150. // Half buffers means that we divide depth into two half res buffers (we only capture 1/4 of pixels).
  151. #ifdef USE_HALF_SIZE
  152. float sample_00 = texelFetch(source_depth, ivec2(4 * gl_GlobalInvocationID.x + 0, 4 * gl_GlobalInvocationID.y + 0), 0).x;
  153. float sample_11 = texelFetch(source_depth, ivec2(4 * gl_GlobalInvocationID.x + 2, 4 * gl_GlobalInvocationID.y + 2), 0).x;
  154. #else
  155. float sample_00 = texelFetch(source_depth, ivec2(2 * gl_GlobalInvocationID.x + 0, 2 * gl_GlobalInvocationID.y + 0), 0).x;
  156. float sample_11 = texelFetch(source_depth, ivec2(2 * gl_GlobalInvocationID.x + 1, 2 * gl_GlobalInvocationID.y + 1), 0).x;
  157. #endif
  158. sample_00 = screen_space_to_view_space_depth(sample_00);
  159. sample_11 = screen_space_to_view_space_depth(sample_11);
  160. imageStore(dest_image0, ivec3(gl_GlobalInvocationID.xy, 0), vec4(sample_00));
  161. imageStore(dest_image0, ivec3(gl_GlobalInvocationID.xy, 3), vec4(sample_11));
  162. #else //!USE_HALF_BUFFERS
  163. #ifdef USE_HALF_SIZE
  164. ivec2 depth_buffer_coord = 4 * ivec2(gl_GlobalInvocationID.xy);
  165. ivec2 output_coord = ivec2(gl_GlobalInvocationID);
  166. vec2 uv = (vec2(depth_buffer_coord) + 0.5f) * params.pixel_size;
  167. vec4 samples;
  168. samples.x = textureLodOffset(source_depth, uv, 0, ivec2(0, 2)).x;
  169. samples.y = textureLodOffset(source_depth, uv, 0, ivec2(2, 2)).x;
  170. samples.z = textureLodOffset(source_depth, uv, 0, ivec2(2, 0)).x;
  171. samples.w = textureLodOffset(source_depth, uv, 0, ivec2(0, 0)).x;
  172. #else
  173. ivec2 depth_buffer_coord = 2 * ivec2(gl_GlobalInvocationID.xy);
  174. ivec2 output_coord = ivec2(gl_GlobalInvocationID);
  175. vec2 uv = (vec2(depth_buffer_coord) + 0.5f) * params.pixel_size;
  176. vec4 samples = textureGather(source_depth, uv);
  177. #endif //USE_HALF_SIZE
  178. #ifdef GENERATE_MIPS
  179. prepare_depths_and_mips(samples, output_coord, gl_LocalInvocationID.xy);
  180. #else
  181. prepare_depths(samples, gl_GlobalInvocationID.xy);
  182. #endif
  183. #endif //USE_HALF_BUFFERS
  184. }