iron_gpu.c 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330
  1. #include "iron_gpu.h"
  2. #include <iron_system.h>
  3. static gpu_buffer_t constant_buffer;
  4. static bool gpu_thrown = false;
  5. static gpu_texture_t textures_to_destroy[128];
  6. static gpu_buffer_t buffers_to_destroy[128];
  7. static gpu_pipeline_t pipelines_to_destroy[32];
  8. static int textures_to_destroy_count = 0;
  9. static int buffers_to_destroy_count = 0;
  10. static int pipelines_to_destroy_count = 0;
  11. int constant_buffer_index = 0;
  12. int draw_calls = 0;
  13. int draw_calls_last = 0;
  14. bool gpu_in_use = false;
  15. gpu_texture_t *current_render_targets[8] = {NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL};
  16. int current_render_targets_count = 0;
  17. gpu_texture_t *current_depth_buffer = NULL;
  18. gpu_texture_t framebuffers[GPU_FRAMEBUFFER_COUNT];
  19. gpu_texture_t framebuffer_depth;
  20. int framebuffer_index = 0;
  21. void gpu_init(int depth_buffer_bits, bool vsync) {
  22. gpu_init_internal(depth_buffer_bits, vsync);
  23. gpu_constant_buffer_init(&constant_buffer, GPU_CONSTANT_BUFFER_SIZE * GPU_CONSTANT_BUFFER_MULTIPLE);
  24. gpu_constant_buffer_lock(&constant_buffer, 0, GPU_CONSTANT_BUFFER_SIZE);
  25. }
  26. void gpu_begin(gpu_texture_t **targets, int count, gpu_texture_t *depth_buffer, unsigned flags, unsigned color, float depth) {
  27. if (gpu_in_use && !gpu_thrown) {
  28. gpu_thrown = true;
  29. iron_log("End before you begin");
  30. }
  31. gpu_in_use = true;
  32. if (current_render_targets_count > 0 && current_render_targets[0] != &framebuffers[framebuffer_index]) {
  33. for (int i = 0; i < current_render_targets_count; ++i) {
  34. gpu_barrier(current_render_targets[i], GPU_TEXTURE_STATE_SHADER_RESOURCE);
  35. }
  36. }
  37. if (current_depth_buffer != NULL) {
  38. gpu_barrier(current_depth_buffer, GPU_TEXTURE_STATE_SHADER_RESOURCE);
  39. }
  40. if (targets == NULL) {
  41. current_render_targets[0] = &framebuffers[framebuffer_index];
  42. current_render_targets_count = 1;
  43. current_depth_buffer = framebuffer_depth.width > 0 ? &framebuffer_depth : NULL;
  44. }
  45. else {
  46. for (int i = 0; i < count; ++i) {
  47. current_render_targets[i] = targets[i];
  48. }
  49. current_render_targets_count = count;
  50. current_depth_buffer = depth_buffer;
  51. }
  52. for (int i = 0; i < current_render_targets_count; ++i) {
  53. gpu_barrier(current_render_targets[i], GPU_TEXTURE_STATE_RENDER_TARGET);
  54. }
  55. if (current_depth_buffer != NULL) {
  56. gpu_barrier(current_depth_buffer, GPU_TEXTURE_STATE_RENDER_TARGET_DEPTH);
  57. }
  58. gpu_begin_internal(flags, color, depth);
  59. }
  60. void gpu_draw() {
  61. gpu_constant_buffer_unlock(&constant_buffer);
  62. gpu_set_constant_buffer(&constant_buffer, constant_buffer_index * GPU_CONSTANT_BUFFER_SIZE, GPU_CONSTANT_BUFFER_SIZE);
  63. gpu_draw_internal();
  64. constant_buffer_index++;
  65. if (constant_buffer_index >= GPU_CONSTANT_BUFFER_MULTIPLE) {
  66. constant_buffer_index = 0;
  67. }
  68. draw_calls++;
  69. if (draw_calls + draw_calls_last >= GPU_CONSTANT_BUFFER_MULTIPLE) {
  70. draw_calls = draw_calls_last = constant_buffer_index = 0;
  71. gpu_execute_and_wait();
  72. }
  73. gpu_constant_buffer_lock(&constant_buffer, constant_buffer_index * GPU_CONSTANT_BUFFER_SIZE, GPU_CONSTANT_BUFFER_SIZE);
  74. }
  75. void gpu_end() {
  76. if (!gpu_in_use && !gpu_thrown) {
  77. gpu_thrown = true;
  78. iron_log("Begin before you end");
  79. }
  80. gpu_in_use = false;
  81. gpu_end_internal();
  82. }
  83. void gpu_present() {
  84. gpu_present_internal();
  85. draw_calls_last = draw_calls;
  86. draw_calls = 0;
  87. while (textures_to_destroy_count > 0) {
  88. textures_to_destroy_count--;
  89. gpu_texture_destroy_internal(&textures_to_destroy[textures_to_destroy_count]);
  90. }
  91. while (buffers_to_destroy_count > 0) {
  92. buffers_to_destroy_count--;
  93. gpu_buffer_destroy_internal(&buffers_to_destroy[buffers_to_destroy_count]);
  94. }
  95. while (pipelines_to_destroy_count > 0) {
  96. pipelines_to_destroy_count--;
  97. gpu_pipeline_destroy_internal(&pipelines_to_destroy[pipelines_to_destroy_count]);
  98. }
  99. }
  100. void gpu_resize(int width, int height) {
  101. if (width == 0 || height == 0) {
  102. return;
  103. }
  104. if (width == framebuffers[0].width && height == framebuffers[0].height) {
  105. return;
  106. }
  107. gpu_resize_internal(width, height);
  108. }
  109. void gpu_set_int(int location, int value) {
  110. int *ints = (int *)(&constant_buffer.data[location]);
  111. ints[0] = value;
  112. }
  113. void gpu_set_int2(int location, int value1, int value2) {
  114. int *ints = (int *)(&constant_buffer.data[location]);
  115. ints[0] = value1;
  116. ints[1] = value2;
  117. }
  118. void gpu_set_int3(int location, int value1, int value2, int value3) {
  119. int *ints = (int *)(&constant_buffer.data[location]);
  120. ints[0] = value1;
  121. ints[1] = value2;
  122. ints[2] = value3;
  123. }
  124. void gpu_set_int4(int location, int value1, int value2, int value3, int value4) {
  125. int *ints = (int *)(&constant_buffer.data[location]);
  126. ints[0] = value1;
  127. ints[1] = value2;
  128. ints[2] = value3;
  129. ints[3] = value4;
  130. }
  131. void gpu_set_ints(int location, int *values, int count) {
  132. int *ints = (int *)(&constant_buffer.data[location]);
  133. for (int i = 0; i < count; ++i) {
  134. ints[i] = values[i];
  135. }
  136. }
  137. void gpu_set_float(int location, float value) {
  138. float *floats = (float *)(&constant_buffer.data[location]);
  139. floats[0] = value;
  140. }
  141. void gpu_set_float2(int location, float value1, float value2) {
  142. float *floats = (float *)(&constant_buffer.data[location]);
  143. floats[0] = value1;
  144. floats[1] = value2;
  145. }
  146. void gpu_set_float3(int location, float value1, float value2, float value3) {
  147. float *floats = (float *)(&constant_buffer.data[location]);
  148. floats[0] = value1;
  149. floats[1] = value2;
  150. floats[2] = value3;
  151. }
  152. void gpu_set_float4(int location, float value1, float value2, float value3, float value4) {
  153. float *floats = (float *)(&constant_buffer.data[location]);
  154. floats[0] = value1;
  155. floats[1] = value2;
  156. floats[2] = value3;
  157. floats[3] = value4;
  158. }
  159. void gpu_set_floats(int location, f32_array_t *values) {
  160. float *floats = (float *)(&constant_buffer.data[location]);
  161. for (int i = 0; i < values->length; ++i) {
  162. floats[i] = values->buffer[i];
  163. }
  164. }
  165. void gpu_set_bool(int location, bool value) {
  166. int *ints = (int *)(&constant_buffer.data[location]);
  167. ints[0] = value ? 1 : 0;
  168. }
  169. static void gpu_internal_set_matrix3(int offset, iron_matrix3x3_t *value) {
  170. float *floats = (float *)(&constant_buffer.data[offset]);
  171. for (int y = 0; y < 3; ++y) {
  172. for (int x = 0; x < 3; ++x) {
  173. floats[x + y * 4] = iron_matrix3x3_get(value, x, y);
  174. }
  175. }
  176. }
  177. static void gpu_internal_set_matrix4(int offset, iron_matrix4x4_t *value) {
  178. float *floats = (float *)(&constant_buffer.data[offset]);
  179. for (int y = 0; y < 4; ++y) {
  180. for (int x = 0; x < 4; ++x) {
  181. floats[x + y * 4] = iron_matrix4x4_get(value, x, y);
  182. }
  183. }
  184. }
  185. void gpu_set_matrix3(int location, iron_matrix3x3_t value) {
  186. if (gpu_transpose_mat) {
  187. iron_matrix3x3_t m = value;
  188. iron_matrix3x3_transpose(&m);
  189. gpu_internal_set_matrix3(location, &m);
  190. }
  191. else {
  192. gpu_internal_set_matrix3(location, &value);
  193. }
  194. }
  195. void gpu_set_matrix4(int location, iron_matrix4x4_t value) {
  196. if (gpu_transpose_mat) {
  197. iron_matrix4x4_t m = value;
  198. iron_matrix4x4_transpose(&m);
  199. gpu_internal_set_matrix4(location, &m);
  200. }
  201. else {
  202. gpu_internal_set_matrix4(location, &value);
  203. }
  204. }
  205. void gpu_vertex_structure_add(gpu_vertex_structure_t *structure, const char *name, gpu_vertex_data_t data) {
  206. structure->elements[structure->size].name = name;
  207. structure->elements[structure->size].data = data;
  208. structure->size++;
  209. }
  210. void gpu_pipeline_init(gpu_pipeline_t *pipe) {
  211. pipe->input_layout = NULL;
  212. pipe->vertex_shader = NULL;
  213. pipe->fragment_shader = NULL;
  214. pipe->cull_mode = GPU_CULL_MODE_NEVER;
  215. pipe->depth_write = false;
  216. pipe->depth_mode = GPU_COMPARE_MODE_ALWAYS;
  217. pipe->blend_source = GPU_BLEND_ONE;
  218. pipe->blend_destination = GPU_BLEND_ZERO;
  219. pipe->alpha_blend_source = GPU_BLEND_ONE;
  220. pipe->alpha_blend_destination = GPU_BLEND_ZERO;
  221. for (int i = 0; i < 8; ++i) {
  222. pipe->color_write_mask_red[i] = true;
  223. pipe->color_write_mask_green[i] = true;
  224. pipe->color_write_mask_blue[i] = true;
  225. pipe->color_write_mask_alpha[i] = true;
  226. pipe->color_attachment[i] = GPU_TEXTURE_FORMAT_RGBA32;
  227. }
  228. pipe->color_attachment_count = 1;
  229. pipe->depth_attachment_bits = 0;
  230. }
  231. void gpu_create_framebuffers(int depth_buffer_bits) {
  232. for (int i = 0; i < GPU_FRAMEBUFFER_COUNT; ++i) {
  233. gpu_render_target_init2(&framebuffers[i], iron_window_width(), iron_window_height(), GPU_TEXTURE_FORMAT_RGBA32, i);
  234. }
  235. if (depth_buffer_bits > 0) {
  236. gpu_render_target_init(&framebuffer_depth, iron_window_width(), iron_window_height(), GPU_TEXTURE_FORMAT_D32);
  237. }
  238. else {
  239. framebuffer_depth.width = framebuffer_depth.height = 0;
  240. }
  241. }
  242. void gpu_texture_destroy(gpu_texture_t *texture) {
  243. textures_to_destroy[textures_to_destroy_count] = *texture;
  244. textures_to_destroy_count++;
  245. }
  246. void gpu_pipeline_destroy(gpu_pipeline_t *pipeline) {
  247. pipelines_to_destroy[pipelines_to_destroy_count] = *pipeline;
  248. pipelines_to_destroy_count++;
  249. }
  250. void gpu_buffer_destroy(gpu_buffer_t *buffer) {
  251. buffers_to_destroy[buffers_to_destroy_count] = *buffer;
  252. buffers_to_destroy_count++;
  253. }
  254. int gpu_vertex_data_size(gpu_vertex_data_t data) {
  255. switch (data) {
  256. case GPU_VERTEX_DATA_F32_1X:
  257. return 1 * 4;
  258. case GPU_VERTEX_DATA_F32_2X:
  259. return 2 * 4;
  260. case GPU_VERTEX_DATA_F32_3X:
  261. return 3 * 4;
  262. case GPU_VERTEX_DATA_F32_4X:
  263. return 4 * 4;
  264. case GPU_VERTEX_DATA_I16_2X_NORM:
  265. return 2 * 2;
  266. case GPU_VERTEX_DATA_I16_4X_NORM:
  267. return 4 * 2;
  268. }
  269. }
  270. int gpu_vertex_struct_size(gpu_vertex_structure_t *s) {
  271. int size = 0;
  272. for (int i = 0; i < s->size; ++i) {
  273. size += gpu_vertex_data_size(s->elements[i].data);
  274. }
  275. return size;
  276. }
  277. int gpu_texture_format_size(gpu_texture_format_t format) {
  278. switch (format) {
  279. case GPU_TEXTURE_FORMAT_RGBA128:
  280. return 16;
  281. case GPU_TEXTURE_FORMAT_RGBA64:
  282. return 8;
  283. case GPU_TEXTURE_FORMAT_R16:
  284. return 2;
  285. case GPU_TEXTURE_FORMAT_R8:
  286. return 1;
  287. default:
  288. return 4;
  289. }
  290. }