Bladeren bron

Vulkan fixes

luboslenco 4 maanden geleden
bovenliggende
commit
dfd6ef7499

+ 1 - 1
armorlab/sources/make_mesh.ts

@@ -198,6 +198,6 @@ function make_mesh_run(data: material_t, layer_pass: i32 = 0): node_shader_conte
 
 	parser_material_finalize(con_mesh);
 	con_mesh.data.shader_from_source = true;
-	gpu_create_shaders_from_kong(node_shader_get(kong), ADDRESS(con_mesh.data.vertex_shader), ADDRESS(con_mesh.data.fragment_shader));
+	gpu_create_shaders_from_kong(node_shader_get(kong), ADDRESS(con_mesh.data.vertex_shader), ADDRESS(con_mesh.data.fragment_shader), ADDRESS(con_mesh.data._.vertex_shader_size), ADDRESS(con_mesh.data._.fragment_shader_size));
 	return con_mesh;
 }

+ 2 - 2
armorlab/sources/make_paint.ts

@@ -59,7 +59,7 @@ function make_paint_run(data: material_t, matcon: material_context_t): node_shad
 		node_shader_write_frag(kong, "output[2] = sample_lod(texpaint_pack, sampler_linear, tex_coord_inp, 0.0);");
 		node_shader_write_frag(kong, "output[3].rg = tex_coord_inp.xy;");
 		con_paint.data.shader_from_source = true;
-		gpu_create_shaders_from_kong(node_shader_get(kong), ADDRESS(con_paint.data.vertex_shader), ADDRESS(con_paint.data.fragment_shader));
+		gpu_create_shaders_from_kong(node_shader_get(kong), ADDRESS(con_paint.data.vertex_shader), ADDRESS(con_paint.data.fragment_shader), ADDRESS(con_paint.data._.vertex_shader_size), ADDRESS(con_paint.data._.fragment_shader_size));
 		return con_paint;
 	}
 
@@ -162,7 +162,7 @@ function make_paint_run(data: material_t, matcon: material_context_t): node_shad
 	parser_material_finalize(con_paint);
 	parser_material_sample_keep_aspect = false;
 	con_paint.data.shader_from_source = true;
-	gpu_create_shaders_from_kong(node_shader_get(kong), ADDRESS(con_paint.data.vertex_shader), ADDRESS(con_paint.data.fragment_shader));
+	gpu_create_shaders_from_kong(node_shader_get(kong), ADDRESS(con_paint.data.vertex_shader), ADDRESS(con_paint.data.fragment_shader), ADDRESS(con_paint.data._.vertex_shader_size), ADDRESS(con_paint.data._.fragment_shader_size));
 
 	return con_paint;
 }

+ 1 - 0
armorpaint/shaders/layer_merge.kong

@@ -234,6 +234,7 @@ fun layer_merge_frag(input: vert_out): float4 {
 				str),
 			max(col0.a, cola.a));
 	}
+
 	*/
 
 	return float4(1, 1, 1, 1);

+ 2 - 2
armorpaint/sources/make_mesh.ts

@@ -346,7 +346,7 @@ function make_mesh_run(data: material_t, layer_pass: i32 = 0): node_shader_conte
 			node_shader_write_frag(kong, "output[2] = float4(occlusion, roughness, metallic, height);");
 			parser_material_finalize(con_mesh);
 			con_mesh.data.shader_from_source = true;
-			gpu_create_shaders_from_kong(node_shader_get(kong), ADDRESS(con_mesh.data.vertex_shader), ADDRESS(con_mesh.data.fragment_shader));
+			gpu_create_shaders_from_kong(node_shader_get(kong), ADDRESS(con_mesh.data.vertex_shader), ADDRESS(con_mesh.data.fragment_shader), ADDRESS(con_mesh.data._.vertex_shader_size), ADDRESS(con_mesh.data._.fragment_shader_size));
 			return con_mesh;
 		}
 
@@ -492,7 +492,7 @@ function make_mesh_run(data: material_t, layer_pass: i32 = 0): node_shader_conte
 	parser_material_finalize(con_mesh);
 
 	con_mesh.data.shader_from_source = true;
-	gpu_create_shaders_from_kong(node_shader_get(kong), ADDRESS(con_mesh.data.vertex_shader), ADDRESS(con_mesh.data.fragment_shader));
+	gpu_create_shaders_from_kong(node_shader_get(kong), ADDRESS(con_mesh.data.vertex_shader), ADDRESS(con_mesh.data.fragment_shader), ADDRESS(con_mesh.data._.vertex_shader_size), ADDRESS(con_mesh.data._.fragment_shader_size));
 
 	return con_mesh;
 }

+ 1 - 1
armorpaint/sources/make_mesh_preview.ts

@@ -148,7 +148,7 @@ function make_mesh_preview_run(data: material_t, matcon: material_context_t): no
 	///end
 
 	con_mesh.data.shader_from_source = true;
-	gpu_create_shaders_from_kong(node_shader_get(kong), ADDRESS(con_mesh.data.vertex_shader), ADDRESS(con_mesh.data.fragment_shader));
+	gpu_create_shaders_from_kong(node_shader_get(kong), ADDRESS(con_mesh.data.vertex_shader), ADDRESS(con_mesh.data.fragment_shader), ADDRESS(con_mesh.data._.vertex_shader_size), ADDRESS(con_mesh.data._.fragment_shader_size));
 
 	return con_mesh;
 }

+ 1 - 1
armorpaint/sources/make_node_preview.ts

@@ -76,7 +76,7 @@ function make_node_preview_run(data: material_t, matcon: material_context_t, nod
 	parser_material_finalize(con_mesh);
 
 	con_mesh.data.shader_from_source = true;
-	gpu_create_shaders_from_kong(node_shader_get(kong), ADDRESS(con_mesh.data.vertex_shader), ADDRESS(con_mesh.data.fragment_shader));
+	gpu_create_shaders_from_kong(node_shader_get(kong), ADDRESS(con_mesh.data.vertex_shader), ADDRESS(con_mesh.data.fragment_shader), ADDRESS(con_mesh.data._.vertex_shader_size), ADDRESS(con_mesh.data._.fragment_shader_size));
 
 	return con_mesh;
 }

+ 3 - 3
armorpaint/sources/make_paint.ts

@@ -63,7 +63,7 @@ function make_paint_run(data: material_t, matcon: material_context_t): node_shad
 		// Init raytraced bake
 		make_bake_position_normal(kong);
 		con_paint.data.shader_from_source = true;
-		gpu_create_shaders_from_kong(node_shader_get(kong), ADDRESS(con_paint.data.vertex_shader), ADDRESS(con_paint.data.fragment_shader));
+		gpu_create_shaders_from_kong(node_shader_get(kong), ADDRESS(con_paint.data.vertex_shader), ADDRESS(con_paint.data.fragment_shader), ADDRESS(con_paint.data._.vertex_shader_size), ADDRESS(con_paint.data._.fragment_shader_size));
 		return con_paint;
 	}
 
@@ -74,7 +74,7 @@ function make_paint_run(data: material_t, matcon: material_context_t): node_shad
 	if (context_raw.tool == workspace_tool_t.COLORID || context_raw.tool == workspace_tool_t.PICKER || context_raw.tool == workspace_tool_t.MATERIAL) {
 		make_colorid_picker_run(kong);
 		con_paint.data.shader_from_source = true;
-		gpu_create_shaders_from_kong(node_shader_get(kong), ADDRESS(con_paint.data.vertex_shader), ADDRESS(con_paint.data.fragment_shader));
+		gpu_create_shaders_from_kong(node_shader_get(kong), ADDRESS(con_paint.data.vertex_shader), ADDRESS(con_paint.data.fragment_shader), ADDRESS(con_paint.data._.vertex_shader_size), ADDRESS(con_paint.data._.fragment_shader_size));
 		return con_paint;
 	}
 
@@ -493,7 +493,7 @@ function make_paint_run(data: material_t, matcon: material_context_t): node_shad
 	parser_material_triplanar = false;
 	parser_material_sample_keep_aspect = false;
 	con_paint.data.shader_from_source = true;
-	gpu_create_shaders_from_kong(node_shader_get(kong), ADDRESS(con_paint.data.vertex_shader), ADDRESS(con_paint.data.fragment_shader));
+	gpu_create_shaders_from_kong(node_shader_get(kong), ADDRESS(con_paint.data.vertex_shader), ADDRESS(con_paint.data.fragment_shader), ADDRESS(con_paint.data._.vertex_shader_size), ADDRESS(con_paint.data._.fragment_shader_size));
 
 	return con_paint;
 }

+ 2 - 2
armorsculpt/sources/make_mesh.ts

@@ -238,7 +238,7 @@ function make_mesh_run(data: material_t, layer_pass: i32 = 0): node_shader_conte
 			node_shader_write_frag(kong, "output[2] = float4(occlusion, roughness, metallic, height);");
 			parser_material_finalize(con_mesh);
 			con_mesh.data.shader_from_source = true;
-			gpu_create_shaders_from_kong(node_shader_get(kong), ADDRESS(con_mesh.data.vertex_shader), ADDRESS(con_mesh.data.fragment_shader));
+			gpu_create_shaders_from_kong(node_shader_get(kong), ADDRESS(con_mesh.data.vertex_shader), ADDRESS(con_mesh.data.fragment_shader), ADDRESS(con_mesh.data._.vertex_shader_size), ADDRESS(con_mesh.data._.fragment_shader_size));
 			return con_mesh;
 		}
 
@@ -322,7 +322,7 @@ function make_mesh_run(data: material_t, layer_pass: i32 = 0): node_shader_conte
 
 	parser_material_finalize(con_mesh);
 	con_mesh.data.shader_from_source = true;
-	gpu_create_shaders_from_kong(node_shader_get(kong), ADDRESS(con_mesh.data.vertex_shader), ADDRESS(con_mesh.data.fragment_shader));
+	gpu_create_shaders_from_kong(node_shader_get(kong), ADDRESS(con_mesh.data.vertex_shader), ADDRESS(con_mesh.data.fragment_shader), ADDRESS(con_mesh.data._.vertex_shader_size), ADDRESS(con_mesh.data._.fragment_shader_size));
 	return con_mesh;
 }
 

+ 1 - 1
armorsculpt/sources/make_mesh_preview.ts

@@ -153,7 +153,7 @@ function make_mesh_preview_run(data: material_t, matcon: material_context_t): no
 	///end
 
 	con_mesh.data.shader_from_source = true;
-	gpu_create_shaders_from_kong(node_shader_get(kong), ADDRESS(con_paint.data.vertex_shader), ADDRESS(con_paint.data.fragment_shader));
+	gpu_create_shaders_from_kong(node_shader_get(kong), ADDRESS(con_paint.data.vertex_shader), ADDRESS(con_paint.data.fragment_shader), ADDRESS(con_paint.data._.vertex_shader_size), ADDRESS(con_paint.data._.fragment_shader_size));
 
 	return con_mesh;
 }

+ 1 - 1
armorsculpt/sources/make_sculpt.ts

@@ -87,7 +87,7 @@ function make_sculpt_run(data: material_t, matcon: material_context_t): node_sha
 
 	parser_material_finalize(con_paint);
 	con_paint.data.shader_from_source = true;
-	gpu_create_shaders_from_kong(node_shader_get(kong), ADDRESS(con_paint.data.vertex_shader), ADDRESS(con_paint.data.fragment_shader));
+	gpu_create_shaders_from_kong(node_shader_get(kong), ADDRESS(con_paint.data.vertex_shader), ADDRESS(con_paint.data.fragment_shader), ADDRESS(con_paint.data._.vertex_shader_size), ADDRESS(con_paint.data._.fragment_shader_size));
 
 	return con_paint;
 }

+ 0 - 4
base/sources/backends/direct3d12_gpu.c

@@ -1536,10 +1536,6 @@ void iron_gpu_vertex_buffer_unlock(iron_gpu_buffer_t *buffer) {
 	buffer->impl.uploadBuffer->lpVtbl->Unmap(buffer->impl.uploadBuffer, 0, &range);
 }
 
-int iron_gpu_internal_vertex_buffer_set(iron_gpu_buffer_t *buffer) {
-	return 0;
-}
-
 int iron_gpu_vertex_buffer_count(iron_gpu_buffer_t *buffer) {
 	return buffer->myCount;
 }

+ 111 - 341
base/sources/backends/vulkan_gpu.c

@@ -15,7 +15,6 @@
 #include <iron_system.h>
 #include "vulkan_gpu.h"
 
-#define MAX_THINGS 256
 #define MAX_DESCRIPTOR_SETS 1024
 #define MAX_PRESENT_MODES 256
 #define GET_INSTANCE_PROC_ADDR(instance, entrypoint)                                                                                                           \
@@ -26,16 +25,6 @@
 		}                                                                                                                                                      \
 	}
 
-struct indexed_name {
-	uint32_t id;
-	char *name;
-};
-
-struct indexed_index {
-	uint32_t id;
-	uint32_t value;
-};
-
 struct descriptor_set {
 	int id;
 	bool in_use;
@@ -43,14 +32,9 @@ struct descriptor_set {
 	VkDescriptorSet set;
 };
 
-typedef struct inst {
-	iron_matrix4x4_t m;
-	int i;
-} inst_t;
-
 bool iron_gpu_transpose_mat = true;
 
-static iron_gpu_texture_t *vulkan_textures[16] = {
+static iron_gpu_texture_t *current_textures[16] = {
 	NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
 	NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL
 };
@@ -79,17 +63,6 @@ static VkDescriptorPool descriptor_pool;
 static struct descriptor_set descriptor_sets[MAX_DESCRIPTOR_SETS] = {0};
 static int descriptor_sets_count = 0;
 
-static struct indexed_name names[MAX_THINGS];
-static uint32_t names_size = 0;
-static struct indexed_name member_names[MAX_THINGS];
-static uint32_t member_names_size = 0;
-static struct indexed_index locs[MAX_THINGS];
-static uint32_t locs_size = 0;
-static struct indexed_index bindings[MAX_THINGS];
-static uint32_t bindings_size = 0;
-static struct indexed_index offsets[MAX_THINGS];
-static uint32_t offsets_size = 0;
-
 static bool began = false;
 static VkPhysicalDeviceProperties gpu_props;
 static VkQueueFamilyProperties *queue_props;
@@ -106,6 +79,69 @@ VkBool32 iron_vulkan_get_physical_device_presentation_support(VkPhysicalDevice p
 VkResult iron_vulkan_create_surface(VkInstance instance, VkSurfaceKHR *surface);
 void gpu_internal_resize(int, int);
 
+static int format_size(VkFormat format) {
+	switch (format) {
+	case VK_FORMAT_R32G32B32A32_SFLOAT:
+		return 16;
+	case VK_FORMAT_R16G16B16A16_SFLOAT:
+		return 8;
+	case VK_FORMAT_R16_SFLOAT:
+		return 2;
+	case VK_FORMAT_R8_UNORM:
+		return 1;
+	default:
+		return 4;
+	}
+}
+
+static VkCullModeFlagBits convert_cull_mode(iron_gpu_cull_mode_t cull_mode) {
+	switch (cull_mode) {
+	case IRON_GPU_CULL_MODE_CLOCKWISE:
+		return VK_CULL_MODE_BACK_BIT;
+	case IRON_GPU_CULL_MODE_COUNTERCLOCKWISE:
+		return VK_CULL_MODE_FRONT_BIT;
+	case IRON_GPU_CULL_MODE_NEVER:
+	default:
+		return VK_CULL_MODE_NONE;
+	}
+}
+
+static VkCompareOp convert_compare_mode(iron_gpu_compare_mode_t compare) {
+	switch (compare) {
+	default:
+	case IRON_GPU_COMPARE_MODE_ALWAYS:
+		return VK_COMPARE_OP_ALWAYS;
+	case IRON_GPU_COMPARE_MODE_NEVER:
+		return VK_COMPARE_OP_NEVER;
+	case IRON_GPU_COMPARE_MODE_LESS:
+		return VK_COMPARE_OP_LESS;
+	}
+}
+
+static VkBlendFactor convert_blend_factor(iron_gpu_blending_factor_t factor) {
+	switch (factor) {
+	case IRON_GPU_BLEND_ONE:
+		return VK_BLEND_FACTOR_ONE;
+	case IRON_GPU_BLEND_ZERO:
+		return VK_BLEND_FACTOR_ZERO;
+	case IRON_GPU_BLEND_SOURCE_ALPHA:
+		return VK_BLEND_FACTOR_SRC_ALPHA;
+	case IRON_GPU_BLEND_DEST_ALPHA:
+		return VK_BLEND_FACTOR_DST_ALPHA;
+	case IRON_GPU_BLEND_INV_SOURCE_ALPHA:
+		return VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
+	case IRON_GPU_BLEND_INV_DEST_ALPHA:
+		return VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA;
+	}
+}
+
+static VkBlendOp convert_blend_operation(iron_gpu_blending_operation_t op) {
+	switch (op) {
+	case IRON_GPU_BLENDOP_ADD:
+		return VK_BLEND_OP_ADD;
+	}
+}
+
 VkBool32 vkDebugUtilsMessengerCallbackEXT(
 	VkDebugUtilsMessageSeverityFlagBitsEXT message_severity,
 	VkDebugUtilsMessageTypeFlagsEXT message_types,
@@ -141,7 +177,6 @@ void setup_init_cmd() {
 			.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
 			.commandBufferCount = 1,
 		};
-
 		vkAllocateCommandBuffers(vk_ctx.device, &cmd, &vk_ctx.setup_cmd);
 
 		VkCommandBufferBeginInfo cmd_buf_info = {
@@ -150,7 +185,6 @@ void setup_init_cmd() {
 			.flags = 0,
 			.pInheritanceInfo = NULL,
 		};
-
 		vkBeginCommandBuffer(vk_ctx.setup_cmd, &cmd_buf_info);
 	}
 }
@@ -1210,21 +1244,12 @@ bool iron_vulkan_internal_get_size(int *width, int *height) {
 	}
 }
 
-bool iron_gpu_raytrace_supported() {
-#ifdef IRON_ANDROID
-	return false;
-#else
-	return true;
-#endif
-}
-
 int iron_gpu_max_bound_textures(void) {
 	VkPhysicalDeviceProperties props;
 	vkGetPhysicalDeviceProperties(vk_ctx.gpu, &props);
 	return props.limits.maxPerStageDescriptorSamplers;
 }
 
-
 static void end_pass(iron_gpu_command_list_t *list) {
 	if (in_render_pass) {
 		vkCmdEndRenderPass(list->impl._buffer);
@@ -1232,22 +1257,7 @@ static void end_pass(iron_gpu_command_list_t *list) {
 	}
 
 	for (int i = 0; i < 16; ++i) {
-		vulkan_textures[i] = NULL;
-	}
-}
-
-static int format_size(VkFormat format) {
-	switch (format) {
-	case VK_FORMAT_R32G32B32A32_SFLOAT:
-		return 16;
-	case VK_FORMAT_R16G16B16A16_SFLOAT:
-		return 8;
-	case VK_FORMAT_R16_SFLOAT:
-		return 2;
-	case VK_FORMAT_R8_UNORM:
-		return 1;
-	default:
-		return 4;
+		current_textures[i] = NULL;
 	}
 }
 
@@ -1444,9 +1454,11 @@ void iron_gpu_command_list_end(iron_gpu_command_list_t *list) {
 	vkQueueSubmit(vk_ctx.queue, 1, &submit_info, list->impl.fence);
 }
 
-void iron_gpu_command_list_render_target_to_framebuffer_barrier(iron_gpu_command_list_t *list, struct iron_gpu_texture *renderTarget) {}
+void iron_gpu_command_list_render_target_to_framebuffer_barrier(iron_gpu_command_list_t *list, struct iron_gpu_texture *renderTarget) {
+}
 
-void iron_gpu_command_list_framebuffer_to_render_target_barrier(iron_gpu_command_list_t *list, struct iron_gpu_texture *renderTarget) {}
+void iron_gpu_command_list_framebuffer_to_render_target_barrier(iron_gpu_command_list_t *list, struct iron_gpu_texture *renderTarget) {
+}
 
 void iron_gpu_command_list_draw(iron_gpu_command_list_t *list) {
 	int start = 0;
@@ -1792,11 +1804,14 @@ void iron_gpu_command_list_set_render_targets(iron_gpu_command_list_t *list, str
 	vkCmdClearAttachments(list->impl._buffer, count, attachments, 1, &clearRect);
 }
 
-void iron_gpu_command_list_upload_index_buffer(iron_gpu_command_list_t *list, struct iron_gpu_buffer *buffer) {}
+void iron_gpu_command_list_upload_index_buffer(iron_gpu_command_list_t *list, struct iron_gpu_buffer *buffer) {
+}
 
-void iron_gpu_command_list_upload_vertex_buffer(iron_gpu_command_list_t *list, struct iron_gpu_buffer *buffer) {}
+void iron_gpu_command_list_upload_vertex_buffer(iron_gpu_command_list_t *list, struct iron_gpu_buffer *buffer) {
+}
 
-void iron_gpu_command_list_upload_texture(iron_gpu_command_list_t *list, struct iron_gpu_texture *texture) {}
+void iron_gpu_command_list_upload_texture(iron_gpu_command_list_t *list, struct iron_gpu_texture *texture) {
+}
 
 void iron_gpu_command_list_get_render_target_pixels(iron_gpu_command_list_t *list, iron_gpu_texture_t *render_target, uint8_t *data) {
 	VkFormat format = render_target->impl.format;
@@ -1880,7 +1895,7 @@ void iron_gpu_command_list_render_target_to_texture_barrier(iron_gpu_command_lis
 int calc_descriptor_id(void) {
 	int texture_count = 0;
 	for (int i = 0; i < 16; ++i) {
-		if (vulkan_textures[i] != NULL) {
+		if (current_textures[i] != NULL) {
 			texture_count++;
 		}
 	}
@@ -1895,14 +1910,14 @@ static int write_tex_descs(VkDescriptorImageInfo *tex_descs) {
 
 	int texture_count = 0;
 	for (int i = 0; i < 16; ++i) {
-		if (vulkan_textures[i] != NULL) {
+		if (current_textures[i] != NULL) {
 			// tex_descs[i].sampler = vulkan_samplers[i];
-			if (vulkan_textures[i]->impl.stage_depth == i) {
-				tex_descs[i].imageView = vulkan_textures[i]->impl.depthView;
-				vulkan_textures[i]->impl.stage_depth = -1;
+			if (current_textures[i]->impl.stage_depth == i) {
+				tex_descs[i].imageView = current_textures[i]->impl.depthView;
+				current_textures[i]->impl.stage_depth = -1;
 			}
 			else {
-				tex_descs[i].imageView = vulkan_textures[i]->impl.view;
+				tex_descs[i].imageView = current_textures[i]->impl.view;
 			}
 			texture_count++;
 		}
@@ -1911,12 +1926,6 @@ static int write_tex_descs(VkDescriptorImageInfo *tex_descs) {
 	return texture_count;
 }
 
-static bool textures_changed(struct descriptor_set *set) {
-	VkDescriptorImageInfo tex_desc[16];
-	write_tex_descs(tex_desc);
-	return memcmp(&tex_desc, &set->tex_desc, sizeof(tex_desc)) != 0;
-}
-
 static void update_textures(struct descriptor_set *set) {
 	memset(&set->tex_desc, 0, sizeof(set->tex_desc));
 
@@ -1934,7 +1943,7 @@ static void update_textures(struct descriptor_set *set) {
 		writes[i].pImageInfo = &set->tex_desc[i];
 	}
 
-	if (vulkan_textures[0] != NULL) {
+	if (current_textures[0] != NULL) {
 		vkUpdateDescriptorSets(vk_ctx.device, texture_count, writes, 0, NULL);
 	}
 }
@@ -1948,11 +1957,6 @@ VkDescriptorSet get_descriptor_set() {
 				update_textures(&descriptor_sets[i]);
 				return descriptor_sets[i].set;
 			}
-			else {
-				// if (!textures_changed(&descriptor_sets[i])) {
-				// 	return descriptor_sets[i].set;
-				// }
-			}
 		}
 	}
 
@@ -1986,14 +1990,14 @@ VkDescriptorSet get_descriptor_set() {
 
 	int texture_count = 0;
 	for (int i = 0; i < 16; ++i) {
-		if (vulkan_textures[i] != NULL) {
+		if (current_textures[i] != NULL) {
 			// tex_desc[i].sampler = vulkan_samplers[i];
-			if (vulkan_textures[i]->impl.stage_depth == i) {
-				tex_desc[i].imageView = vulkan_textures[i]->impl.depthView;
-				vulkan_textures[i]->impl.stage_depth = -1;
+			if (current_textures[i]->impl.stage_depth == i) {
+				tex_desc[i].imageView = current_textures[i]->impl.depthView;
+				current_textures[i]->impl.stage_depth = -1;
 			}
 			else {
-				tex_desc[i].imageView = vulkan_textures[i]->impl.view;
+				tex_desc[i].imageView = current_textures[i]->impl.view;
 			}
 			texture_count++;
 		}
@@ -2026,7 +2030,7 @@ VkDescriptorSet get_descriptor_set() {
 		writes[i].pImageInfo = &tex_desc[i - 2];
 	}
 
-	if (vulkan_textures[0] != NULL) {
+	if (current_textures[0] != NULL) {
 		if (vk_ctx.vertex_uniform_buffer != NULL && vk_ctx.fragment_uniform_buffer != NULL) {
 			vkUpdateDescriptorSets(vk_ctx.device, 2 + texture_count, writes, 0, NULL);
 		}
@@ -2064,190 +2068,12 @@ void iron_gpu_command_list_wait(iron_gpu_command_list_t *list) {
 
 void iron_gpu_command_list_set_texture(iron_gpu_command_list_t *list, iron_gpu_texture_unit_t unit, iron_gpu_texture_t *texture) {
 	texture->impl.stage = unit.offset;
-	vulkan_textures[unit.offset] = texture;
+	current_textures[unit.offset] = texture;
 }
 
 void iron_gpu_command_list_set_texture_from_render_target_depth(iron_gpu_command_list_t *list, iron_gpu_texture_unit_t unit, iron_gpu_texture_t *target) {
 	target->impl.stage_depth = unit.offset;
-	vulkan_textures[unit.offset] = target;
-}
-
-static bool has_number(iron_internal_named_number *named_numbers, const char *name) {
-	for (int i = 0; i < IRON_INTERNAL_NAMED_NUMBER_COUNT; ++i) {
-		if (strcmp(named_numbers[i].name, name) == 0) {
-			return true;
-		}
-	}
-	return false;
-}
-
-static uint32_t find_number(iron_internal_named_number *named_numbers, const char *name) {
-	for (int i = 0; i < IRON_INTERNAL_NAMED_NUMBER_COUNT; ++i) {
-		if (strcmp(named_numbers[i].name, name) == 0) {
-			return named_numbers[i].number;
-		}
-	}
-	return -1;
-}
-
-static void set_number(iron_internal_named_number *named_numbers, const char *name, uint32_t number) {
-	for (int i = 0; i < IRON_INTERNAL_NAMED_NUMBER_COUNT; ++i) {
-		if (strcmp(named_numbers[i].name, name) == 0) {
-			named_numbers[i].number = number;
-			return;
-		}
-	}
-
-	for (int i = 0; i < IRON_INTERNAL_NAMED_NUMBER_COUNT; ++i) {
-		if (named_numbers[i].name[0] == 0) {
-			strcpy(named_numbers[i].name, name);
-			named_numbers[i].number = number;
-			return;
-		}
-	}
-}
-
-static void add_name(uint32_t id, char *name) {
-	names[names_size].id = id;
-	names[names_size].name = name;
-	++names_size;
-}
-
-static char *find_name(uint32_t id) {
-	for (uint32_t i = 0; i < names_size; ++i) {
-		if (names[i].id == id) {
-			return names[i].name;
-		}
-	}
-	return NULL;
-}
-
-static void add_member_name(uint32_t id, char *name) {
-	member_names[member_names_size].id = id;
-	member_names[member_names_size].name = name;
-	++member_names_size;
-}
-
-static char *find_member_name(uint32_t id) {
-	for (uint32_t i = 0; i < member_names_size; ++i) {
-		if (member_names[i].id == id) {
-			return member_names[i].name;
-		}
-	}
-	return NULL;
-}
-
-static void add_location(uint32_t id, uint32_t location) {
-	locs[locs_size].id = id;
-	locs[locs_size].value = location;
-	++locs_size;
-}
-
-static void add_binding(uint32_t id, uint32_t binding) {
-	bindings[bindings_size].id = id;
-	bindings[bindings_size].value = binding;
-	++bindings_size;
-}
-
-static void add_offset(uint32_t id, uint32_t offset) {
-	offsets[offsets_size].id = id;
-	offsets[offsets_size].value = offset;
-	++offsets_size;
-}
-
-static void parse_shader(uint32_t *shader_source, int shader_length, iron_internal_named_number *locations, iron_internal_named_number *textureBindings,
-                         iron_internal_named_number *uniformOffsets) {
-	names_size = 0;
-	member_names_size = 0;
-	locs_size = 0;
-	bindings_size = 0;
-	offsets_size = 0;
-
-	uint32_t *spirv = (uint32_t *)shader_source;
-	int spirvsize = shader_length / 4;
-	int index = 0;
-
-	uint32_t magicNumber = spirv[index++];
-	uint32_t version = spirv[index++];
-	uint32_t generator = spirv[index++];
-	uint32_t bound = spirv[index++];
-	index++;
-
-	while (index < spirvsize) {
-		int wordCount = spirv[index] >> 16;
-		uint32_t opcode = spirv[index] & 0xffff;
-
-		uint32_t *operands = wordCount > 1 ? &spirv[index + 1] : NULL;
-		uint32_t length = wordCount - 1;
-
-		switch (opcode) {
-		case 5: { // OpName
-			uint32_t id = operands[0];
-			char *string = (char *)&operands[1];
-			add_name(id, string);
-			break;
-		}
-		case 6: { // OpMemberName
-			uint32_t type = operands[0];
-			char *name = find_name(type);
-			if (name != NULL && strcmp(name, "_k_global_uniform_buffer_type") == 0) {
-				uint32_t member = operands[1];
-				char *string = (char *)&operands[2];
-				add_member_name(member, string);
-			}
-			break;
-		}
-		case 71: { // OpDecorate
-			uint32_t id = operands[0];
-			uint32_t decoration = operands[1];
-			if (decoration == 30) { // location
-				uint32_t location = operands[2];
-				add_location(id, location);
-			}
-			if (decoration == 33) { // binding
-				uint32_t binding = operands[2];
-				add_binding(id, binding);
-			}
-			break;
-		}
-		case 72: { // OpMemberDecorate
-			uint32_t type = operands[0];
-			char *name = find_name(type);
-			if (name != NULL && strcmp(name, "_k_global_uniform_buffer_type") == 0) {
-				uint32_t member = operands[1];
-				uint32_t decoration = operands[2];
-				if (decoration == 35) { // offset
-					uint32_t offset = operands[3];
-					add_offset(member, offset);
-				}
-			}
-			break;
-		}
-		}
-
-		index += wordCount;
-	}
-
-	for (uint32_t i = 0; i < locs_size; ++i) {
-		char *name = find_name(locs[i].id);
-		if (name != NULL) {
-			set_number(locations, name, locs[i].value);
-		}
-	}
-
-	for (uint32_t i = 0; i < bindings_size; ++i) {
-		char *name = find_name(bindings[i].id);
-		if (name != NULL) {
-			set_number(textureBindings, name, bindings[i].value);
-		}
-	}
-
-	for (uint32_t i = 0; i < offsets_size; ++i) {
-		char *name = find_member_name(offsets[i].id);
-		if (name != NULL) {
-			set_number(uniformOffsets, name, offsets[i].value);
-		}
-	}
+	current_textures[unit.offset] = target;
 }
 
 static VkShaderModule create_shader_module(const void *code, size_t size) {
@@ -2264,16 +2090,6 @@ static VkShaderModule create_shader_module(const void *code, size_t size) {
 	return module;
 }
 
-static VkShaderModule prepare_vs(VkShaderModule *vert_shader_module, iron_gpu_shader_t *vertex_shader) {
-	*vert_shader_module = create_shader_module(vertex_shader->impl.source, vertex_shader->impl.length);
-	return *vert_shader_module;
-}
-
-static VkShaderModule prepare_fs(VkShaderModule *frag_shader_module, iron_gpu_shader_t *fragment_shader) {
-	*frag_shader_module = create_shader_module(fragment_shader->impl.source, fragment_shader->impl.length);
-	return *frag_shader_module;
-}
-
 static VkFormat convert_image_format(iron_image_format_t format) {
 	switch (format) {
 	case IRON_IMAGE_FORMAT_RGBA128:
@@ -2305,74 +2121,22 @@ void iron_gpu_pipeline_destroy(iron_gpu_pipeline_t *pipeline) {
 
 iron_gpu_constant_location_t iron_gpu_pipeline_get_constant_location(iron_gpu_pipeline_t *pipeline, const char *name) {
 	iron_gpu_constant_location_t location;
-	location.impl.vertexOffset = -1;
-	if (has_number(pipeline->impl.vertexOffsets, name)) {
-		location.impl.vertexOffset = find_number(pipeline->impl.vertexOffsets, name);
-	}
 	return location;
 }
 
 iron_gpu_texture_unit_t iron_gpu_pipeline_get_texture_unit(iron_gpu_pipeline_t *pipeline, const char *name) {
 	iron_gpu_texture_unit_t unit;
-	unit.offset = -1;
 	return unit;
 }
 
-static VkCullModeFlagBits convert_cull_mode(iron_gpu_cull_mode_t cull_mode) {
-	switch (cull_mode) {
-	case IRON_GPU_CULL_MODE_CLOCKWISE:
-		return VK_CULL_MODE_BACK_BIT;
-	case IRON_GPU_CULL_MODE_COUNTERCLOCKWISE:
-		return VK_CULL_MODE_FRONT_BIT;
-	case IRON_GPU_CULL_MODE_NEVER:
-	default:
-		return VK_CULL_MODE_NONE;
-	}
-}
-
-static VkCompareOp convert_compare_mode(iron_gpu_compare_mode_t compare) {
-	switch (compare) {
-	default:
-	case IRON_GPU_COMPARE_MODE_ALWAYS:
-		return VK_COMPARE_OP_ALWAYS;
-	case IRON_GPU_COMPARE_MODE_NEVER:
-		return VK_COMPARE_OP_NEVER;
-	case IRON_GPU_COMPARE_MODE_LESS:
-		return VK_COMPARE_OP_LESS;
-	}
-}
-
-static VkBlendFactor convert_blend_factor(iron_gpu_blending_factor_t factor) {
-	switch (factor) {
-	case IRON_GPU_BLEND_ONE:
-		return VK_BLEND_FACTOR_ONE;
-	case IRON_GPU_BLEND_ZERO:
-		return VK_BLEND_FACTOR_ZERO;
-	case IRON_GPU_BLEND_SOURCE_ALPHA:
-		return VK_BLEND_FACTOR_SRC_ALPHA;
-	case IRON_GPU_BLEND_DEST_ALPHA:
-		return VK_BLEND_FACTOR_DST_ALPHA;
-	case IRON_GPU_BLEND_INV_SOURCE_ALPHA:
-		return VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
-	case IRON_GPU_BLEND_INV_DEST_ALPHA:
-		return VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA;
-	}
-}
-
-static VkBlendOp convert_blend_operation(iron_gpu_blending_operation_t op) {
-	switch (op) {
-	case IRON_GPU_BLENDOP_ADD:
-		return VK_BLEND_OP_ADD;
-	}
-}
-
 void iron_gpu_pipeline_compile(iron_gpu_pipeline_t *pipeline) {
 	memset(pipeline->impl.vertexLocations, 0, sizeof(iron_internal_named_number) * IRON_INTERNAL_NAMED_NUMBER_COUNT);
 	memset(pipeline->impl.vertexOffsets, 0, sizeof(iron_internal_named_number) * IRON_INTERNAL_NAMED_NUMBER_COUNT);
 	memset(pipeline->impl.vertexTextureBindings, 0, sizeof(iron_internal_named_number) * IRON_INTERNAL_NAMED_NUMBER_COUNT);
 	memset(pipeline->impl.fragmentTextureBindings, 0, sizeof(iron_internal_named_number) * IRON_INTERNAL_NAMED_NUMBER_COUNT);
-	parse_shader((uint32_t *)pipeline->vertex_shader->impl.source, pipeline->vertex_shader->impl.length, pipeline->impl.vertexLocations,
-	             pipeline->impl.vertexTextureBindings, pipeline->impl.vertexOffsets);
+
+	// parse_shader((uint32_t *)pipeline->vertex_shader->impl.source, pipeline->vertex_shader->impl.length, pipeline->impl.vertexLocations,
+	            //  pipeline->impl.vertexTextureBindings, pipeline->impl.vertexOffsets);
 
 	VkPipelineLayoutCreateInfo pPipelineLayoutCreateInfo = {
 		.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
@@ -2422,7 +2186,7 @@ void iron_gpu_pipeline_compile(iron_gpu_pipeline_t *pipeline) {
 		iron_gpu_vertex_element_t element = pipeline->input_layout->elements[i];
 
 		vi_attrs[attr].binding = 0;
-		vi_attrs[attr].location = find_number(pipeline->impl.vertexLocations, element.name);
+		vi_attrs[attr].location = i;
 		vi_attrs[attr].offset = offset;
 		offset += iron_gpu_vertex_data_size(element.data);
 
@@ -2523,12 +2287,14 @@ void iron_gpu_pipeline_compile(iron_gpu_pipeline_t *pipeline) {
 
 	shaderStages[0].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
 	shaderStages[0].stage = VK_SHADER_STAGE_VERTEX_BIT;
-	shaderStages[0].module = prepare_vs(&pipeline->impl.vert_shader_module, pipeline->vertex_shader);
+	pipeline->impl.vert_shader_module = create_shader_module(pipeline->vertex_shader->impl.source, pipeline->vertex_shader->impl.length);
+	shaderStages[0].module = pipeline->impl.vert_shader_module;
 	shaderStages[0].pName = vs_main;
 
 	shaderStages[1].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
 	shaderStages[1].stage = VK_SHADER_STAGE_FRAGMENT_BIT;
-	shaderStages[1].module = prepare_fs(&pipeline->impl.frag_shader_module, pipeline->fragment_shader);
+	pipeline->impl.frag_shader_module = create_shader_module(pipeline->fragment_shader->impl.source, pipeline->fragment_shader->impl.length);
+	shaderStages[1].module = pipeline->impl.frag_shader_module;
 	shaderStages[1].pName = fs_main;
 
 	pipeline_info.pVertexInputState = &vi;
@@ -3296,12 +3062,7 @@ void iron_gpu_vertex_buffer_init(iron_gpu_buffer_t *buffer, int vertexCount, iro
 	vkBindBufferMemory(vk_ctx.device, buffer->impl.buf, buffer->impl.mem, 0);
 }
 
-static void unset_vertex_buffer(iron_gpu_buffer_t *buffer) {
-
-}
-
 void iron_gpu_vertex_buffer_destroy(iron_gpu_buffer_t *buffer) {
-	unset_vertex_buffer(buffer);
 	vkFreeMemory(vk_ctx.device, buffer->impl.mem, NULL);
 	vkDestroyBuffer(vk_ctx.device, buffer->impl.buf, NULL);
 }
@@ -3317,10 +3078,6 @@ void iron_gpu_vertex_buffer_unlock(iron_gpu_buffer_t *buffer) {
 	vkUnmapMemory(vk_ctx.device, buffer->impl.mem);
 }
 
-int iron_gpu_internal_vertex_buffer_set(iron_gpu_buffer_t *buffer) {
-	return 0;
-}
-
 int iron_gpu_vertex_buffer_count(iron_gpu_buffer_t *buffer) {
 	return buffer->myCount;
 }
@@ -3467,6 +3224,11 @@ static const char *raygen_shader_name = "raygeneration";
 static const char *closesthit_shader_name = "closesthit";
 static const char *miss_shader_name = "miss";
 
+typedef struct inst {
+	iron_matrix4x4_t m;
+	int i;
+} inst_t;
+
 static VkDescriptorPool raytrace_descriptor_pool;
 static iron_gpu_raytrace_acceleration_structure_t *accel;
 static iron_gpu_raytrace_pipeline_t *pipeline;
@@ -3500,6 +3262,14 @@ static PFN_vkCmdBuildAccelerationStructuresKHR _vkCmdBuildAccelerationStructures
 static PFN_vkDestroyAccelerationStructureKHR _vkDestroyAccelerationStructureKHR = NULL;
 static PFN_vkCmdTraceRaysKHR _vkCmdTraceRaysKHR = NULL;
 
+bool iron_gpu_raytrace_supported() {
+	#ifdef IRON_ANDROID
+		return false;
+	#else
+		return true;
+	#endif
+	}
+
 void iron_gpu_raytrace_pipeline_init(iron_gpu_raytrace_pipeline_t *pipeline, iron_gpu_command_list_t *command_list, void *ray_shader, int ray_shader_size,
                                  struct iron_gpu_buffer *constant_buffer) {
 	output = NULL;

+ 5 - 10
base/sources/iron.h

@@ -996,7 +996,7 @@ extern size_t fragment_inputs_size;
 extern size_t vertex_functions_size;
 extern size_t fragment_functions_size;
 
-void gpu_create_shaders_from_kong(char *kong, char **vs, char **fs) {
+void gpu_create_shaders_from_kong(char *kong, char **vs, char **fs, int *vs_size, int *fs_size) {
 	next_variable_id = 1;
 	allocated_globals_size = 0;
 	next_function_index = 0;
@@ -1038,17 +1038,15 @@ void gpu_create_shaders_from_kong(char *kong, char **vs, char **fs) {
 	#else
 
 	transform(TRANSFORM_FLAG_ONE_COMPONENT_SWIZZLE | TRANSFORM_FLAG_BINARY_UNIFY_LENGTH);
-	int vs_size;
-	int fs_size;
-	spirv_export2(vs, fs, &vs_size, &fs_size, false);
+	spirv_export2(vs, fs, vs_size, fs_size, false);
 
 	#endif
 }
 
 #endif
 
-iron_gpu_shader_t *gpu_create_shader_from_source(string_t *source, iron_gpu_shader_type_t shader_type) {
-	iron_gpu_shader_t *shader = NULL;
+iron_gpu_shader_t *gpu_create_shader_from_source(string_t *source, int source_size, iron_gpu_shader_type_t shader_type) {
+	iron_gpu_shader_t *shader = (iron_gpu_shader_t *)malloc(sizeof(iron_gpu_shader_t));
 	char *temp_string_s = shader_type == IRON_GPU_SHADER_TYPE_VERTEX ? temp_string_vs : temp_string_fs;
 
 	#ifdef WITH_D3DCOMPILER
@@ -1066,20 +1064,17 @@ iron_gpu_shader_t *gpu_create_shader_from_source(string_t *source, iron_gpu_shad
 	}
 
 	int size = shader_buffer->lpVtbl->GetBufferSize(shader_buffer);
-	shader = (iron_gpu_shader_t *)malloc(sizeof(iron_gpu_shader_t));
 	iron_gpu_shader_init(shader, (char *)shader_buffer->lpVtbl->GetBufferPointer(shader_buffer), size, shader_type);
 	shader_buffer->lpVtbl->Release(shader_buffer);
 
 	#elif defined(IRON_METAL)
 
 	strcpy(temp_string_s, source);
-	shader = (iron_gpu_shader_t *)malloc(sizeof(iron_gpu_shader_t));
 	iron_gpu_shader_init(shader, temp_string_s, strlen(temp_string_s), shader_type);
 
 	#elif defined(IRON_VULKAN)
 
-	shader = (iron_gpu_shader_t *)malloc(sizeof(iron_gpu_shader_t));
-	// iron_gpu_shader_init(shader, source, length, shader_type);
+	iron_gpu_shader_init(shader, source, source_size, shader_type);
 
 	#endif
 

+ 0 - 1
base/sources/iron_gpu.h

@@ -219,7 +219,6 @@ float *iron_gpu_vertex_buffer_lock(iron_gpu_buffer_t *buffer);
 void iron_gpu_vertex_buffer_unlock(iron_gpu_buffer_t *buffer);
 int iron_gpu_vertex_buffer_count(iron_gpu_buffer_t *buffer);
 int iron_gpu_vertex_buffer_stride(iron_gpu_buffer_t *buffer);
-int iron_gpu_internal_vertex_buffer_set(iron_gpu_buffer_t *buffer);
 void gpu_set_vertex_buffer(iron_gpu_buffer_t *buffer);
 void iron_gpu_constant_buffer_init(iron_gpu_buffer_t *buffer, int size);
 void iron_gpu_constant_buffer_destroy(iron_gpu_buffer_t *buffer);

+ 1 - 1
base/sources/ts/iron/iron.ts

@@ -222,7 +222,7 @@ declare function iron_gpu_vertex_buffer_unlock(buffer: any): void;
 declare function gpu_set_vertex_buffer(buffer: any): void;
 declare function gpu_draw(): void;
 declare function gpu_create_shader(data: buffer_t, type: i32): iron_gpu_shader_t;
-declare function gpu_create_shader_from_source(source: string, shader_type: shader_type_t): iron_gpu_shader_t;
+declare function gpu_create_shader_from_source(source: string, source_size: i32, shader_type: shader_type_t): iron_gpu_shader_t;
 declare function iron_gpu_shader_destroy(shader: iron_gpu_shader_t): void;
 declare function gpu_create_pipeline(): any;
 declare function gpu_delete_pipeline(pipeline: any): void;

+ 2 - 0
base/sources/ts/iron/scene.ts

@@ -598,6 +598,8 @@ type shader_context_runtime_t = {
 	constants?: iron_gpu_constant_location_t[];
 	tex_units?: iron_gpu_texture_unit_t[];
 	structure?: iron_gpu_vertex_structure_t;
+	vertex_shader_size?: i32;
+	fragment_shader_size?: i32;
 };
 
 type vertex_element_t = {

+ 18 - 17
base/sources/ts/iron/shader_data.ts

@@ -133,8 +133,8 @@ function shader_context_compile(raw: shader_context_t): shader_context_t {
 
 	// Shaders
 	if (raw.shader_from_source) {
-		raw._.pipe_state.vertex_shader = gpu_create_shader_from_source(raw.vertex_shader, shader_type_t.VERTEX);
-		raw._.pipe_state.fragment_shader = gpu_create_shader_from_source(raw.fragment_shader, shader_type_t.FRAGMENT);
+		raw._.pipe_state.vertex_shader = gpu_create_shader_from_source(raw.vertex_shader, raw._.vertex_shader_size, shader_type_t.VERTEX);
+		raw._.pipe_state.fragment_shader = gpu_create_shader_from_source(raw.fragment_shader, raw._.fragment_shader_size, shader_type_t.FRAGMENT);
 
 		// Shader compile error
 		if (raw._.pipe_state.vertex_shader == null || raw._.pipe_state.fragment_shader == null) {
@@ -143,6 +143,7 @@ function shader_context_compile(raw: shader_context_t): shader_context_t {
 	}
 	else {
 		///if arm_embed
+
 		raw._.pipe_state.fragment_shader = sys_get_shader(raw.fragment_shader);
 		raw._.pipe_state.vertex_shader = sys_get_shader(raw.vertex_shader);
 
@@ -158,18 +159,18 @@ function shader_context_compile(raw: shader_context_t): shader_context_t {
 	return shader_context_finish_compile(raw);
 }
 
-///if arm_metal
+///if arm_direct3d12
 
 function shader_context_type_size(t: string): i32 {
 	if (t == "int") return 4;
 	if (t == "float") return 4;
 	if (t == "vec2") return 8;
-	if (t == "vec3") return 16;
+	if (t == "vec3") return 12;
 	if (t == "vec4") return 16;
 	if (t == "mat3") return 48;
 	if (t == "mat4") return 64;
 	if (t == "float2") return 8;
-	if (t == "float3") return 16;
+	if (t == "float3") return 12;
 	if (t == "float4") return 16;
 	if (t == "float3x3") return 48;
 	if (t == "float4x4") return 64;
@@ -177,10 +178,14 @@ function shader_context_type_size(t: string): i32 {
 }
 
 function shader_context_type_pad(offset: i32, size: i32): i32 {
-	if (size > 16) {
-		size = 16;
+	let r: i32 = offset % 16;
+	if (r == 0) {
+		return 0;
 	}
-	return (size - (offset % size)) % size;
+	if (size >= 16 || r + size > 16) {
+		return 16 - r;
+	}
+	return 0;
 }
 
 ///else
@@ -189,12 +194,12 @@ function shader_context_type_size(t: string): i32 {
 	if (t == "int") return 4;
 	if (t == "float") return 4;
 	if (t == "vec2") return 8;
-	if (t == "vec3") return 12;
+	if (t == "vec3") return 16;
 	if (t == "vec4") return 16;
 	if (t == "mat3") return 48;
 	if (t == "mat4") return 64;
 	if (t == "float2") return 8;
-	if (t == "float3") return 12;
+	if (t == "float3") return 16;
 	if (t == "float4") return 16;
 	if (t == "float3x3") return 48;
 	if (t == "float4x4") return 64;
@@ -202,14 +207,10 @@ function shader_context_type_size(t: string): i32 {
 }
 
 function shader_context_type_pad(offset: i32, size: i32): i32 {
-	let r: i32 = offset % 16;
-	if (r == 0) {
-		return 0;
-	}
-	if (size >= 16 || r + size > 16) {
-		return 16 - r;
+	if (size > 16) {
+		size = 16;
 	}
-	return 0;
+	return (size - (offset % size)) % size;
 }
 
 ///end