luboslenco 3 settimane fa
parent
commit
a85f0131b1

+ 2 - 6
base/sources/backends/direct3d12_gpu.c

@@ -150,7 +150,7 @@ void gpu_barrier(gpu_texture_t *render_target, gpu_texture_state_t state_after)
 }
 
 void gpu_destroy() {
-	gpu_wait();
+	wait_for_fence(fence, fence_value, fence_event);
 	for (int i = 0; i < GPU_FRAMEBUFFER_COUNT; ++i) {
 		gpu_texture_destroy_internal(&framebuffers[i]);
 	}
@@ -411,16 +411,12 @@ void gpu_end_internal() {
 	current_render_targets_count = 0;
 }
 
-void gpu_wait() {
-	wait_for_fence(fence, fence_value, fence_event);
-}
-
 void gpu_execute_and_wait() {
 	command_list->lpVtbl->Close(command_list);
 	ID3D12CommandList *command_lists[] = {(ID3D12CommandList *)command_list};
 	queue->lpVtbl->ExecuteCommandLists(queue, 1, command_lists);
 	queue->lpVtbl->Signal(queue, fence, ++fence_value);
-	gpu_wait();
+	wait_for_fence(fence, fence_value, fence_event);
 	command_allocator->lpVtbl->Reset(command_allocator);
 	command_list->lpVtbl->Reset(command_list, command_allocator, NULL);
 

+ 1 - 5
base/sources/backends/metal_gpu.m

@@ -234,17 +234,13 @@ void gpu_end_internal() {
 	current_render_targets_count = 0;
 }
 
-void gpu_wait() {
-	[command_buffer waitUntilCompleted];
-}
-
 void gpu_execute_and_wait() {
 	if (gpu_in_use) {
 		[command_encoder endEncoding];
 	}
 
 	[command_buffer commit];
-	gpu_wait();
+	[command_buffer waitUntilCompleted];
 	id<MTLCommandQueue> queue = get_metal_queue();
 	command_buffer = [queue commandBuffer];
 

+ 2 - 6
base/sources/backends/vulkan_gpu.c

@@ -1128,10 +1128,6 @@ void gpu_end_internal() {
 	}
 }
 
-void gpu_wait() {
-	vkWaitForFences(device, 1, &fence, VK_TRUE, UINT64_MAX);
-}
-
 void gpu_execute_and_wait() {
 	if (gpu_in_use) {
 		vkCmdEndRendering(command_buffer);
@@ -1145,7 +1141,7 @@ void gpu_execute_and_wait() {
 		.pCommandBuffers = &command_buffer,
 	};
 	vkQueueSubmit(queue, 1, &submit_info, fence);
-	gpu_wait();
+	vkWaitForFences(device, 1, &fence, VK_TRUE, UINT64_MAX);
 
 	vkResetCommandBuffer(command_buffer, 0);
 	VkCommandBufferBeginInfo begin_info = {
@@ -1182,7 +1178,7 @@ void gpu_present_internal() {
 		.pWaitDstStageMask = (VkPipelineStageFlags[]){VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT},
 	};
 	vkQueueSubmit(queue, 1, &submit_info, fence);
-	gpu_wait();
+	vkWaitForFences(device, 1, &fence, VK_TRUE, UINT64_MAX);
 
 	VkPresentInfoKHR present = {
 		.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,

+ 0 - 1
base/sources/backends/webgpu_gpu.c

@@ -285,7 +285,6 @@ void gpu_set_index_buffer(struct gpu_buffer *buffer) {
 }
 
 void gpu_get_render_target_pixels(gpu_texture_t *render_target, uint8_t *data) {}
-void gpu_wait() {}
 void gpu_execute_and_wait() {}
 void gpu_set_constant_buffer(struct gpu_buffer *buffer, int offset, size_t size) {}
 void gpu_set_texture(int unit, gpu_texture_t *texture) {}

+ 0 - 1
base/sources/iron_gpu.h

@@ -148,7 +148,6 @@ void gpu_begin(gpu_texture_t **targets, int count, gpu_texture_t *depth_buffer,
 void gpu_begin_internal(gpu_texture_t **targets, int count, gpu_texture_t *depth_buffer, unsigned flags, unsigned color, float depth);
 void gpu_end(void);
 void gpu_end_internal(void);
-void gpu_wait(void);
 void gpu_execute_and_wait(void);
 void gpu_present(void);
 void gpu_present_internal(void);