webgpu_gpu.c 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292
  1. #include <stdlib.h>
  2. #include <assert.h>
  3. #include <string.h>
  4. #include <webgpu/webgpu.h>
  5. #include <iron_gpu.h>
  6. #include <iron_math.h>
  7. #include <iron_system.h>
  8. bool gpu_transpose_mat = false;
  9. int renderTargetWidth;
  10. int renderTargetHeight;
  11. int newRenderTargetWidth;
  12. int newRenderTargetHeight;
  13. WGPUDevice device;
  14. WGPUQueue queue;
  15. WGPUSwapChain swapChain;
  16. WGPUCommandEncoder encoder;
  17. WGPURenderPassEncoder pass;
  18. int indexCount;
  19. gpu_buffer_t *gpu_internal_current_vertex_buffer = NULL;
  20. gpu_buffer_t *gpu_internal_current_index_buffer = NULL;
  21. void gpu_destroy() {}
  22. void gpu_init_internal(int depth_bits, bool vsync) {
  23. newRenderTargetWidth = renderTargetWidth = iron_window_width();
  24. newRenderTargetHeight = renderTargetHeight = iron_window_height();
  25. device = emscripten_webgpu_get_device();
  26. queue = wgpuDeviceGetQueue(device);
  27. WGPUSurfaceDescriptorFromCanvasHTMLSelector canvasDesc;
  28. memset(&canvasDesc, 0, sizeof(canvasDesc));
  29. canvasDesc.selector = "canvas";
  30. WGPUSurfaceDescriptor surfDesc;
  31. memset(&surfDesc, 0, sizeof(surfDesc));
  32. surfDesc.nextInChain = &canvasDesc;
  33. WGPUInstance instance = 0;
  34. WGPUSurface surface = wgpuInstanceCreateSurface(instance, &surfDesc);
  35. WGPUSwapChainDescriptor scDesc;
  36. memset(&scDesc, 0, sizeof(scDesc));
  37. scDesc.usage = WGPUTextureUsage_RenderAttachment;
  38. scDesc.format = WGPUTextureFormat_BGRA8Unorm;
  39. scDesc.width = iron_window_width();
  40. scDesc.height = iron_window_height();
  41. scDesc.presentMode = WGPUPresentMode_Fifo;
  42. swapChain = wgpuDeviceCreateSwapChain(device, surface, &scDesc);
  43. }
  44. void gpu_begin_internal(struct gpu_texture **targets, int count, gpu_texture_t *depth_buffer, unsigned flags, unsigned color, float depth) {
  45. WGPUCommandEncoderDescriptor ceDesc;
  46. memset(&ceDesc, 0, sizeof(ceDesc));
  47. encoder = wgpuDeviceCreateCommandEncoder(device, &ceDesc);
  48. WGPURenderPassColorAttachment attachment;
  49. memset(&attachment, 0, sizeof(attachment));
  50. attachment.view = wgpuSwapChainGetCurrentTextureView(swapChain);;
  51. attachment.loadOp = WGPULoadOp_Clear;
  52. attachment.storeOp = WGPUStoreOp_Store;
  53. WGPUColor color = {0, 0, 0, 1};
  54. attachment.clearValue = color;
  55. WGPURenderPassDescriptor passDesc;
  56. memset(&passDesc, 0, sizeof(passDesc));
  57. passDesc.colorAttachmentCount = 1;
  58. passDesc.colorAttachments = &attachment;
  59. pass = wgpuCommandEncoderBeginRenderPass(encoder, &passDesc);
  60. }
  61. void gpu_end_internal() {
  62. wgpuRenderPassEncoderEnd(pass);
  63. WGPUCommandBufferDescriptor cbDesc;
  64. memset(&cbDesc, 0, sizeof(cbDesc));
  65. WGPUCommandBuffer commands = wgpuCommandEncoderFinish(encoder, &cbDesc);
  66. wgpuQueueSubmit(queue, 1, &commands);
  67. }
  68. void gpu_present_internal() {
  69. }
  70. bool gpu_raytrace_supported() {
  71. return false;
  72. }
  73. void gpu_vertex_buffer_init(gpu_buffer_t *buffer, int count, gpu_vertex_structure_t *structure) {
  74. buffer->count = count;
  75. buffer->stride = 0;
  76. for (int i = 0; i < structure->size; ++i) {
  77. buffer->stride += gpu_vertex_data_size(structure->elements[i].data);
  78. }
  79. }
  80. float *gpu_vertex_buffer_lock(gpu_buffer_t *buffer) {
  81. WGPUBufferDescriptor bDesc;
  82. memset(&bDesc, 0, sizeof(bDesc));
  83. bDesc.size = buffer->count * buffer->stride * sizeof(float);
  84. bDesc.usage = WGPUBufferUsage_Vertex | WGPUBufferUsage_CopyDst;
  85. bDesc.mappedAtCreation = true;
  86. buffer->impl.buffer = wgpuDeviceCreateBuffer(device, &bDesc);
  87. return wgpuBufferGetMappedRange(buffer->impl.buffer, 0, bDesc.size);
  88. }
  89. void gpu_vertex_buffer_unlock(gpu_buffer_t *buffer) {
  90. wgpuBufferUnmap(buffer->impl.buffer);
  91. }
  92. void gpu_constant_buffer_init(gpu_buffer_t *buffer, int size) {}
  93. void gpu_constant_buffer_destroy(gpu_buffer_t *buffer) {}
  94. void gpu_constant_buffer_lock(gpu_buffer_t *buffer, int start, int count) {}
  95. void gpu_constant_buffer_unlock(gpu_buffer_t *buffer) {}
  96. void gpu_index_buffer_init(gpu_buffer_t *buffer, int count) {
  97. buffer->count = count;
  98. }
  99. void gpu_buffer_destroy(gpu_buffer_t *buffer) {}
  100. void *gpu_index_buffer_lock(gpu_buffer_t *buffer) {
  101. int start = 0;
  102. int count = buffer->count;
  103. WGPUBufferDescriptor bDesc;
  104. memset(&bDesc, 0, sizeof(bDesc));
  105. bDesc.size = count * 4;
  106. bDesc.usage = WGPUBufferUsage_Index | WGPUBufferUsage_CopyDst;
  107. bDesc.mappedAtCreation = true;
  108. buffer->impl.buffer = wgpuDeviceCreateBuffer(device, &bDesc);
  109. return wgpuBufferGetMappedRange(buffer->impl.buffer, start * 4, bDesc.size);
  110. }
  111. void gpu_index_buffer_unlock(gpu_buffer_t *buffer) {
  112. wgpuBufferUnmap(buffer->impl.buffer);
  113. }
  114. void gpu_texture_init_from_bytes(gpu_texture_t *texture, void *data, int width, int height, gpu_texture_format_t format) {}
  115. void gpu_texture_destroy(gpu_texture_t *texture) {}
  116. void gpu_render_target_init(gpu_texture_t *target, int width, int height, gpu_texture_format_t format) {
  117. target->width = target->width = width;
  118. target->height = target->height = height;
  119. target->state = GPU_TEXTURE_STATE_RENDER_TARGET;
  120. target->data = NULL;
  121. }
  122. void gpu_render_target_init_framebuffer(gpu_texture_t *target, int width, int height, gpu_texture_format_t format) {}
  123. void gpu_pipeline_compile(gpu_pipeline_t *pipe) {
  124. WGPUColorTargetState csDesc;
  125. memset(&csDesc, 0, sizeof(csDesc));
  126. csDesc.format = WGPUTextureFormat_BGRA8Unorm;
  127. csDesc.writeMask = WGPUColorWriteMask_All;
  128. WGPUBlendState blend;
  129. memset(&blend, 0, sizeof(blend));
  130. blend.color.operation = WGPUBlendOperation_Add;
  131. blend.color.srcFactor = WGPUBlendFactor_One;
  132. blend.color.dstFactor = WGPUBlendFactor_Zero;
  133. blend.alpha.operation = WGPUBlendOperation_Add;
  134. blend.alpha.srcFactor = WGPUBlendFactor_One;
  135. blend.alpha.dstFactor = WGPUBlendFactor_Zero;
  136. csDesc.blend = &blend;
  137. WGPUPipelineLayoutDescriptor plDesc;
  138. memset(&plDesc, 0, sizeof(plDesc));
  139. plDesc.bindGroupLayoutCount = 0;
  140. plDesc.bindGroupLayouts = NULL;
  141. WGPUVertexAttribute vaDesc[8];
  142. memset(&vaDesc[0], 0, sizeof(vaDesc[0]) * 8);
  143. uint64_t offset = 0;
  144. for (int i = 0; i < pipe->input_layout->size; ++i) {
  145. vaDesc[i].shaderLocation = i;
  146. vaDesc[i].offset = offset;
  147. offset += gpu_vertex_data_size(pipe->input_layout->elements[i].data);
  148. switch (pipe->input_layout->elements[i].data) {
  149. case GPU_VERTEX_DATA_F32_1X:
  150. vaDesc[i].format = WGPUVertexFormat_Float32;
  151. break;
  152. case GPU_VERTEX_DATA_F32_2X:
  153. vaDesc[i].format = WGPUVertexFormat_Float32x2;
  154. break;
  155. case GPU_VERTEX_DATA_F32_3X:
  156. vaDesc[i].format = WGPUVertexFormat_Float32x3;
  157. break;
  158. case GPU_VERTEX_DATA_F32_4X:
  159. vaDesc[i].format = WGPUVertexFormat_Float32x4;
  160. break;
  161. case GPU_VERTEX_DATA_I16_2X_NORM:
  162. vaDesc[i].format = WGPUVertexFormat_Snorm16x2;
  163. break;
  164. case GPU_VERTEX_DATA_I16_4X_NORM:
  165. vaDesc[i].format = WGPUVertexFormat_Snorm16x4;
  166. break;
  167. }
  168. }
  169. WGPUVertexBufferLayout vbDesc;
  170. memset(&vbDesc, 0, sizeof(vbDesc));
  171. vbDesc.arrayStride = offset;
  172. vbDesc.attributeCount = pipe->input_layout->size;
  173. vbDesc.attributes = &vaDesc[0];
  174. WGPUVertexState vsDest;
  175. memset(&vsDest, 0, sizeof(vsDest));
  176. vsDest.module = pipe->vertex_shader->impl.module;
  177. vsDest.entryPoint = "main";
  178. vsDest.bufferCount = 1;
  179. vsDest.buffers = &vbDesc;
  180. WGPUFragmentState fragmentDest;
  181. memset(&fragmentDest, 0, sizeof(fragmentDest));
  182. fragmentDest.module = pipe->fragment_shader->impl.module;
  183. fragmentDest.entryPoint = "main";
  184. fragmentDest.targetCount = 1;
  185. fragmentDest.targets = &csDesc;
  186. WGPUPrimitiveState rsDesc;
  187. memset(&rsDesc, 0, sizeof(rsDesc));
  188. rsDesc.topology = WGPUPrimitiveTopology_TriangleList;
  189. rsDesc.stripIndexFormat = WGPUIndexFormat_Uint32;
  190. rsDesc.frontFace = WGPUFrontFace_CW;
  191. rsDesc.cullMode = WGPUCullMode_None;
  192. WGPUMultisampleState multisample;
  193. memset(&multisample, 0, sizeof(multisample));
  194. multisample.count = 1;
  195. multisample.mask = 0xffffffff;
  196. multisample.alphaToCoverageEnabled = false;
  197. WGPURenderPipelineDescriptor rpDesc;
  198. memset(&rpDesc, 0, sizeof(rpDesc));
  199. rpDesc.layout = wgpuDeviceCreatePipelineLayout(device, &plDesc);
  200. rpDesc.fragment = &fragmentDest;
  201. rpDesc.vertex = vsDest;
  202. rpDesc.multisample = multisample;
  203. rpDesc.primitive = rsDesc;
  204. pipe->impl.pipeline = wgpuDeviceCreateRenderPipeline(device, &rpDesc);
  205. }
  206. void gpu_shader_init(gpu_shader_t *shader, const void *source, size_t length, gpu_shader_type_t type) {
  207. WGPUShaderModuleSPIRVDescriptor smSpirvDesc;
  208. memset(&smSpirvDesc, 0, sizeof(smSpirvDesc));
  209. smSpirvDesc.chain.sType = WGPUSType_ShaderModuleSPIRVDescriptor;
  210. smSpirvDesc.codeSize = length / 4;
  211. smSpirvDesc.code = source;
  212. WGPUShaderModuleDescriptor smDesc;
  213. memset(&smDesc, 0, sizeof(smDesc));
  214. smDesc.nextInChain = &smSpirvDesc;
  215. shader->impl.module = wgpuDeviceCreateShaderModule(device, &smDesc);
  216. }
  217. void gpu_shader_destroy(gpu_shader_t *shader) {}
  218. void gpu_destroy() {}
  219. void gpu_barrier(gpu_texture_t *renderTarget, int state_after) {}
  220. void gpu_draw_internal() {
  221. wgpuRenderPassEncoderDrawIndexed(pass, indexCount, 1, 0, 0, 0);
  222. }
  223. void gpu_viewport(int x, int y, int width, int height) {}
  224. void gpu_scissor(int x, int y, int width, int height) {}
  225. void gpu_disable_scissor() {}
  226. void gpu_set_pipeline(struct gpu_pipeline *pipeline) {
  227. wgpuRenderPassEncoderSetPipeline(pass, pipeline->impl.pipeline);
  228. }
  229. void gpu_set_pipeline_layout() {}
  230. void gpu_set_vertex_buffer(struct gpu_buffer *buffer) {
  231. uint64_t size = buffer->count * buffer->stride;
  232. wgpuRenderPassEncoderSetVertexBuffer(pass, 0, buffer->impl.buffer, 0, size);
  233. }
  234. void gpu_set_index_buffer(struct gpu_buffer *buffer) {
  235. indexCount = buffer->count;
  236. uint64_t size = buffer->count * sizeof(int);
  237. wgpuRenderPassEncoderSetIndexBuffer(pass, buffer->impl.buffer, WGPUIndexFormat_Uint32, 0, size);
  238. }
  239. void gpu_get_render_target_pixels(gpu_texture_t *render_target, uint8_t *data) {}
  240. void gpu_wait() {}
  241. void gpu_execute_and_wait() {}
  242. void gpu_set_constant_buffer(struct gpu_buffer *buffer, int offset, size_t size) {}
  243. void gpu_set_texture(int unit, gpu_texture_t *texture) {}