iron_gpu.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411
  1. #include "iron_gpu.h"
  2. #include <iron_system.h>
  3. #include <string.h>
  4. static gpu_buffer_t constant_buffer;
  5. static bool gpu_thrown = false;
  6. static gpu_texture_t textures_to_destroy[128];
  7. static gpu_buffer_t buffers_to_destroy[128];
  8. static gpu_pipeline_t pipelines_to_destroy[128];
  9. static int textures_to_destroy_count = 0;
  10. static int buffers_to_destroy_count = 0;
  11. static int pipelines_to_destroy_count = 0;
  12. int constant_buffer_index = 0;
  13. int draw_calls = 0;
  14. int draw_calls_last = 0;
  15. bool gpu_in_use = false;
  16. gpu_texture_t *current_textures[GPU_MAX_TEXTURES] = {NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL};
  17. gpu_texture_t *current_render_targets[8] = {NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL};
  18. int current_render_targets_count = 0;
  19. gpu_texture_t *current_depth_buffer = NULL;
  20. gpu_pipeline_t *current_pipeline = NULL;
  21. gpu_texture_t framebuffers[GPU_FRAMEBUFFER_COUNT];
  22. gpu_texture_t framebuffer_depth;
  23. int framebuffer_index = 0;
  24. void gpu_init(int depth_buffer_bits, bool vsync) {
  25. gpu_init_internal(depth_buffer_bits, vsync);
  26. gpu_constant_buffer_init(&constant_buffer, GPU_CONSTANT_BUFFER_SIZE * GPU_CONSTANT_BUFFER_MULTIPLE);
  27. gpu_constant_buffer_lock(&constant_buffer, 0, GPU_CONSTANT_BUFFER_SIZE);
  28. }
  29. void gpu_begin(gpu_texture_t **targets, int count, gpu_texture_t *depth_buffer, gpu_clear_t flags, unsigned color, float depth) {
  30. if (gpu_in_use && !gpu_thrown) {
  31. gpu_thrown = true;
  32. iron_log("End before you begin");
  33. }
  34. gpu_in_use = true;
  35. if (current_render_targets_count > 0 && current_render_targets[0] != &framebuffers[framebuffer_index]) {
  36. for (int i = 0; i < current_render_targets_count; ++i) {
  37. gpu_barrier(current_render_targets[i], GPU_TEXTURE_STATE_SHADER_RESOURCE);
  38. }
  39. }
  40. if (current_depth_buffer != NULL) {
  41. gpu_barrier(current_depth_buffer, GPU_TEXTURE_STATE_SHADER_RESOURCE);
  42. }
  43. if (targets == NULL) {
  44. current_render_targets[0] = &framebuffers[framebuffer_index];
  45. current_render_targets_count = 1;
  46. current_depth_buffer = framebuffer_depth.width > 0 ? &framebuffer_depth : NULL;
  47. }
  48. else {
  49. for (int i = 0; i < count; ++i) {
  50. current_render_targets[i] = targets[i];
  51. }
  52. current_render_targets_count = count;
  53. current_depth_buffer = depth_buffer;
  54. }
  55. for (int i = 0; i < current_render_targets_count; ++i) {
  56. gpu_barrier(current_render_targets[i], GPU_TEXTURE_STATE_RENDER_TARGET);
  57. }
  58. if (current_depth_buffer != NULL) {
  59. gpu_barrier(current_depth_buffer, GPU_TEXTURE_STATE_RENDER_TARGET_DEPTH);
  60. }
  61. gpu_begin_internal(flags, color, depth);
  62. }
  63. void gpu_draw() {
  64. if (current_pipeline == NULL || current_pipeline->impl.pipeline == NULL) {
  65. return;
  66. }
  67. gpu_constant_buffer_unlock(&constant_buffer);
  68. gpu_set_constant_buffer(&constant_buffer, constant_buffer_index * GPU_CONSTANT_BUFFER_SIZE, GPU_CONSTANT_BUFFER_SIZE);
  69. gpu_draw_internal();
  70. constant_buffer_index++;
  71. if (constant_buffer_index >= GPU_CONSTANT_BUFFER_MULTIPLE) {
  72. constant_buffer_index = 0;
  73. }
  74. draw_calls++;
  75. if (draw_calls + draw_calls_last >= GPU_CONSTANT_BUFFER_MULTIPLE) {
  76. draw_calls = draw_calls_last = constant_buffer_index = 0;
  77. gpu_execute_and_wait();
  78. }
  79. gpu_constant_buffer_lock(&constant_buffer, constant_buffer_index * GPU_CONSTANT_BUFFER_SIZE, GPU_CONSTANT_BUFFER_SIZE);
  80. }
  81. void gpu_end() {
  82. if (!gpu_in_use && !gpu_thrown) {
  83. gpu_thrown = true;
  84. iron_log("Begin before you end");
  85. }
  86. gpu_in_use = false;
  87. gpu_end_internal();
  88. }
  89. void gpu_cleanup() {
  90. while (textures_to_destroy_count > 0) {
  91. textures_to_destroy_count--;
  92. gpu_texture_destroy_internal(&textures_to_destroy[textures_to_destroy_count]);
  93. }
  94. while (buffers_to_destroy_count > 0) {
  95. buffers_to_destroy_count--;
  96. gpu_buffer_destroy_internal(&buffers_to_destroy[buffers_to_destroy_count]);
  97. }
  98. while (pipelines_to_destroy_count > 0) {
  99. pipelines_to_destroy_count--;
  100. gpu_pipeline_destroy_internal(&pipelines_to_destroy[pipelines_to_destroy_count]);
  101. }
  102. }
  103. void gpu_present() {
  104. gpu_present_internal();
  105. draw_calls_last = draw_calls;
  106. draw_calls = 0;
  107. gpu_cleanup();
  108. }
  109. void gpu_resize(int width, int height) {
  110. if (width == 0 || height == 0) {
  111. return;
  112. }
  113. if (width == framebuffers[0].width && height == framebuffers[0].height) {
  114. return;
  115. }
  116. gpu_resize_internal(width, height);
  117. }
  118. void gpu_set_int(int location, int value) {
  119. int *ints = (int *)(&constant_buffer.data[location]);
  120. ints[0] = value;
  121. }
  122. void gpu_set_int2(int location, int value1, int value2) {
  123. int *ints = (int *)(&constant_buffer.data[location]);
  124. ints[0] = value1;
  125. ints[1] = value2;
  126. }
  127. void gpu_set_int3(int location, int value1, int value2, int value3) {
  128. int *ints = (int *)(&constant_buffer.data[location]);
  129. ints[0] = value1;
  130. ints[1] = value2;
  131. ints[2] = value3;
  132. }
  133. void gpu_set_int4(int location, int value1, int value2, int value3, int value4) {
  134. int *ints = (int *)(&constant_buffer.data[location]);
  135. ints[0] = value1;
  136. ints[1] = value2;
  137. ints[2] = value3;
  138. ints[3] = value4;
  139. }
  140. void gpu_set_ints(int location, int *values, int count) {
  141. int *ints = (int *)(&constant_buffer.data[location]);
  142. for (int i = 0; i < count; ++i) {
  143. ints[i] = values[i];
  144. }
  145. }
  146. void gpu_set_float(int location, float value) {
  147. float *floats = (float *)(&constant_buffer.data[location]);
  148. floats[0] = value;
  149. }
  150. void gpu_set_float2(int location, float value1, float value2) {
  151. float *floats = (float *)(&constant_buffer.data[location]);
  152. floats[0] = value1;
  153. floats[1] = value2;
  154. }
  155. void gpu_set_float3(int location, float value1, float value2, float value3) {
  156. float *floats = (float *)(&constant_buffer.data[location]);
  157. floats[0] = value1;
  158. floats[1] = value2;
  159. floats[2] = value3;
  160. }
  161. void gpu_set_float4(int location, float value1, float value2, float value3, float value4) {
  162. float *floats = (float *)(&constant_buffer.data[location]);
  163. floats[0] = value1;
  164. floats[1] = value2;
  165. floats[2] = value3;
  166. floats[3] = value4;
  167. }
  168. void gpu_set_floats(int location, f32_array_t *values) {
  169. float *floats = (float *)(&constant_buffer.data[location]);
  170. for (int i = 0; i < values->length; ++i) {
  171. floats[i] = values->buffer[i];
  172. }
  173. }
  174. void gpu_set_bool(int location, bool value) {
  175. int *ints = (int *)(&constant_buffer.data[location]);
  176. ints[0] = value ? 1 : 0;
  177. }
  178. static void gpu_internal_set_matrix3(int offset, iron_matrix3x3_t *value) {
  179. float *floats = (float *)(&constant_buffer.data[offset]);
  180. for (int y = 0; y < 3; ++y) {
  181. for (int x = 0; x < 3; ++x) {
  182. floats[x + y * 4] = iron_matrix3x3_get(value, x, y);
  183. }
  184. }
  185. }
  186. static void gpu_internal_set_matrix4(int offset, iron_matrix4x4_t *value) {
  187. float *floats = (float *)(&constant_buffer.data[offset]);
  188. for (int y = 0; y < 4; ++y) {
  189. for (int x = 0; x < 4; ++x) {
  190. floats[x + y * 4] = iron_matrix4x4_get(value, x, y);
  191. }
  192. }
  193. }
  194. void gpu_set_matrix3(int location, iron_matrix3x3_t value) {
  195. if (gpu_transpose_mat) {
  196. iron_matrix3x3_t m = value;
  197. iron_matrix3x3_transpose(&m);
  198. gpu_internal_set_matrix3(location, &m);
  199. }
  200. else {
  201. gpu_internal_set_matrix3(location, &value);
  202. }
  203. }
  204. void gpu_set_matrix4(int location, iron_matrix4x4_t value) {
  205. if (gpu_transpose_mat) {
  206. iron_matrix4x4_t m = value;
  207. iron_matrix4x4_transpose(&m);
  208. gpu_internal_set_matrix4(location, &m);
  209. }
  210. else {
  211. gpu_internal_set_matrix4(location, &value);
  212. }
  213. }
  214. void gpu_vertex_structure_add(gpu_vertex_structure_t *structure, const char *name, gpu_vertex_data_t data) {
  215. structure->elements[structure->size].name = name;
  216. structure->elements[structure->size].data = data;
  217. structure->size++;
  218. }
  219. void gpu_pipeline_init(gpu_pipeline_t *pipe) {
  220. pipe->input_layout = NULL;
  221. pipe->vertex_shader = NULL;
  222. pipe->fragment_shader = NULL;
  223. pipe->cull_mode = GPU_CULL_MODE_NONE;
  224. pipe->depth_write = false;
  225. pipe->depth_mode = GPU_COMPARE_MODE_ALWAYS;
  226. pipe->blend_source = GPU_BLEND_ONE;
  227. pipe->blend_destination = GPU_BLEND_ZERO;
  228. pipe->alpha_blend_source = GPU_BLEND_ONE;
  229. pipe->alpha_blend_destination = GPU_BLEND_ZERO;
  230. for (int i = 0; i < 8; ++i) {
  231. pipe->color_write_mask_red[i] = true;
  232. pipe->color_write_mask_green[i] = true;
  233. pipe->color_write_mask_blue[i] = true;
  234. pipe->color_write_mask_alpha[i] = true;
  235. pipe->color_attachment[i] = GPU_TEXTURE_FORMAT_RGBA32;
  236. }
  237. pipe->color_attachment_count = 1;
  238. pipe->depth_attachment_bits = 0;
  239. memset(&pipe->impl, 0, sizeof(gpu_pipeline_impl_t));
  240. }
  241. void gpu_create_framebuffers(int depth_buffer_bits) {
  242. for (int i = 0; i < GPU_FRAMEBUFFER_COUNT; ++i) {
  243. gpu_render_target_init2(&framebuffers[i], iron_window_width(), iron_window_height(), GPU_TEXTURE_FORMAT_RGBA32, i);
  244. }
  245. if (depth_buffer_bits > 0) {
  246. gpu_render_target_init(&framebuffer_depth, iron_window_width(), iron_window_height(), GPU_TEXTURE_FORMAT_D32);
  247. }
  248. else {
  249. framebuffer_depth.width = framebuffer_depth.height = 0;
  250. }
  251. }
  252. void gpu_texture_destroy(gpu_texture_t *texture) {
  253. textures_to_destroy[textures_to_destroy_count] = *texture;
  254. textures_to_destroy_count++;
  255. if (textures_to_destroy_count >= 128) {
  256. gpu_execute_and_wait();
  257. gpu_cleanup();
  258. }
  259. }
  260. void gpu_set_pipeline(gpu_pipeline_t *pipeline) {
  261. current_pipeline = pipeline;
  262. for (int i = 0; i < GPU_MAX_TEXTURES; ++i) {
  263. current_textures[i] = NULL;
  264. }
  265. if (pipeline->impl.pipeline == NULL) {
  266. return;
  267. }
  268. gpu_set_pipeline_internal(pipeline);
  269. }
  270. void gpu_pipeline_destroy(gpu_pipeline_t *pipeline) {
  271. pipelines_to_destroy[pipelines_to_destroy_count] = *pipeline;
  272. pipelines_to_destroy_count++;
  273. if (pipelines_to_destroy_count >= 128) {
  274. gpu_execute_and_wait();
  275. gpu_cleanup();
  276. }
  277. }
  278. void gpu_buffer_destroy(gpu_buffer_t *buffer) {
  279. buffers_to_destroy[buffers_to_destroy_count] = *buffer;
  280. buffers_to_destroy_count++;
  281. if (buffers_to_destroy_count >= 128) {
  282. gpu_execute_and_wait();
  283. gpu_cleanup();
  284. }
  285. }
  286. int gpu_vertex_data_size(gpu_vertex_data_t data) {
  287. switch (data) {
  288. case GPU_VERTEX_DATA_F32_1X:
  289. return 1 * 4;
  290. case GPU_VERTEX_DATA_F32_2X:
  291. return 2 * 4;
  292. case GPU_VERTEX_DATA_F32_3X:
  293. return 3 * 4;
  294. case GPU_VERTEX_DATA_F32_4X:
  295. return 4 * 4;
  296. case GPU_VERTEX_DATA_I16_2X_NORM:
  297. return 2 * 2;
  298. case GPU_VERTEX_DATA_I16_4X_NORM:
  299. return 4 * 2;
  300. }
  301. }
  302. int gpu_vertex_struct_size(gpu_vertex_structure_t *s) {
  303. int size = 0;
  304. for (int i = 0; i < s->size; ++i) {
  305. size += gpu_vertex_data_size(s->elements[i].data);
  306. }
  307. return size;
  308. }
  309. int gpu_texture_format_size(gpu_texture_format_t format) {
  310. switch (format) {
  311. case GPU_TEXTURE_FORMAT_RGBA128:
  312. return 16;
  313. case GPU_TEXTURE_FORMAT_RGBA64:
  314. return 8;
  315. case GPU_TEXTURE_FORMAT_R16:
  316. return 2;
  317. case GPU_TEXTURE_FORMAT_R8:
  318. return 1;
  319. default:
  320. return 4;
  321. }
  322. }
  323. static gpu_buffer_t rt_constant_buffer;
  324. static gpu_raytrace_pipeline_t rt_pipeline;
  325. static gpu_raytrace_acceleration_structure_t rt_accel;
  326. static bool rt_created = false;
  327. static bool rt_accel_created = false;
  328. static const int rt_constant_buffer_size = 24;
  329. void _gpu_raytrace_init(buffer_t *shader) {
  330. if (rt_created) {
  331. gpu_buffer_destroy(&rt_constant_buffer);
  332. gpu_raytrace_pipeline_destroy(&rt_pipeline);
  333. }
  334. rt_created = true;
  335. gpu_constant_buffer_init(&rt_constant_buffer, rt_constant_buffer_size * 4);
  336. gpu_raytrace_pipeline_init(&rt_pipeline, shader->buffer, (int)shader->length, &rt_constant_buffer);
  337. }
  338. void _gpu_raytrace_as_init() {
  339. if (rt_accel_created) {
  340. gpu_raytrace_acceleration_structure_destroy(&rt_accel);
  341. }
  342. rt_accel_created = true;
  343. gpu_raytrace_acceleration_structure_init(&rt_accel);
  344. }
  345. void _gpu_raytrace_as_add(struct gpu_buffer *vb, gpu_buffer_t *ib, iron_matrix4x4_t transform) {
  346. gpu_raytrace_acceleration_structure_add(&rt_accel, vb, ib, transform);
  347. }
  348. void _gpu_raytrace_as_build(struct gpu_buffer *vb_full, gpu_buffer_t *ib_full) {
  349. gpu_raytrace_acceleration_structure_build(&rt_accel, vb_full, ib_full);
  350. }
  351. void _gpu_raytrace_dispatch_rays(gpu_texture_t *render_target, buffer_t *buffer) {
  352. float *cb = (float *)buffer->buffer;
  353. gpu_constant_buffer_lock(&rt_constant_buffer, 0, rt_constant_buffer.count);
  354. for (int i = 0; i < rt_constant_buffer_size; ++i) {
  355. float *floats = (float *)(&rt_constant_buffer.data[i * 4]);
  356. floats[0] = cb[i];
  357. }
  358. gpu_constant_buffer_unlock(&rt_constant_buffer);
  359. gpu_raytrace_set_acceleration_structure(&rt_accel);
  360. gpu_raytrace_set_pipeline(&rt_pipeline);
  361. gpu_raytrace_set_target(render_target);
  362. gpu_raytrace_dispatch_rays();
  363. }