浏览代码

Fix ubsan reported errors in rendering

This allows the TPS demo to run without an ubsan reports from any of the
rendering code.
HP van Braam 7 月之前
父节点
当前提交
062d74bb9c

+ 1 - 1
drivers/d3d12/rendering_context_driver_d3d12.cpp

@@ -184,7 +184,7 @@ Error RenderingContextDriverD3D12::_initialize_devices() {
 
 
 		Device &device = driver_devices[i];
 		Device &device = driver_devices[i];
 		device.name = desc.Description;
 		device.name = desc.Description;
-		device.vendor = Vendor(desc.VendorId);
+		device.vendor = desc.VendorId;
 		device.workarounds = Workarounds();
 		device.workarounds = Workarounds();
 
 
 		if (desc.Flags & DXGI_ADAPTER_FLAG_SOFTWARE) {
 		if (desc.Flags & DXGI_ADAPTER_FLAG_SOFTWARE) {

+ 1 - 1
drivers/metal/rendering_context_driver_metal.mm

@@ -52,7 +52,7 @@ Error RenderingContextDriverMetal::initialize() {
 	}
 	}
 #endif
 #endif
 	device.type = DEVICE_TYPE_INTEGRATED_GPU;
 	device.type = DEVICE_TYPE_INTEGRATED_GPU;
-	device.vendor = VENDOR_APPLE;
+	device.vendor = Vendor::VENDOR_APPLE;
 	device.workarounds = Workarounds();
 	device.workarounds = Workarounds();
 
 
 	MetalDeviceProperties props(metal_device);
 	MetalDeviceProperties props(metal_device);

+ 2 - 2
drivers/vulkan/rendering_context_driver_vulkan.cpp

@@ -843,7 +843,7 @@ Error RenderingContextDriverVulkan::_initialize_devices() {
 
 
 		Device &driver_device = driver_devices[i];
 		Device &driver_device = driver_devices[i];
 		driver_device.name = String::utf8(props.deviceName);
 		driver_device.name = String::utf8(props.deviceName);
-		driver_device.vendor = Vendor(props.vendorID);
+		driver_device.vendor = props.vendorID;
 		driver_device.type = DeviceType(props.deviceType);
 		driver_device.type = DeviceType(props.deviceType);
 		driver_device.workarounds = Workarounds();
 		driver_device.workarounds = Workarounds();
 
 
@@ -880,7 +880,7 @@ void RenderingContextDriverVulkan::_check_driver_workarounds(const VkPhysicalDev
 	// This bug was fixed in driver version 512.503.0, so we only enabled it on devices older than this.
 	// This bug was fixed in driver version 512.503.0, so we only enabled it on devices older than this.
 	//
 	//
 	r_device.workarounds.avoid_compute_after_draw =
 	r_device.workarounds.avoid_compute_after_draw =
-			r_device.vendor == VENDOR_QUALCOMM &&
+			r_device.vendor == Vendor::VENDOR_QUALCOMM &&
 			p_device_properties.deviceID >= 0x6000000 && // Adreno 6xx
 			p_device_properties.deviceID >= 0x6000000 && // Adreno 6xx
 			p_device_properties.driverVersion < VK_MAKE_VERSION(512, 503, 0) &&
 			p_device_properties.driverVersion < VK_MAKE_VERSION(512, 503, 0) &&
 			r_device.name.find("Turnip") < 0;
 			r_device.name.find("Turnip") < 0;

+ 2 - 2
drivers/vulkan/rendering_device_driver_vulkan.cpp

@@ -1898,10 +1898,10 @@ RDD::TextureID RenderingDeviceDriverVulkan::texture_create_shared(TextureID p_or
 				vkGetPhysicalDeviceFormatProperties(physical_device, RD_TO_VK_FORMAT[p_view.format], &properties);
 				vkGetPhysicalDeviceFormatProperties(physical_device, RD_TO_VK_FORMAT[p_view.format], &properties);
 				const VkFormatFeatureFlags &supported_flags = owner_tex_info->vk_create_info.tiling == VK_IMAGE_TILING_LINEAR ? properties.linearTilingFeatures : properties.optimalTilingFeatures;
 				const VkFormatFeatureFlags &supported_flags = owner_tex_info->vk_create_info.tiling == VK_IMAGE_TILING_LINEAR ? properties.linearTilingFeatures : properties.optimalTilingFeatures;
 				if ((usage_info->usage & VK_IMAGE_USAGE_STORAGE_BIT) && !(supported_flags & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT)) {
 				if ((usage_info->usage & VK_IMAGE_USAGE_STORAGE_BIT) && !(supported_flags & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT)) {
-					usage_info->usage &= ~VK_IMAGE_USAGE_STORAGE_BIT;
+					usage_info->usage &= ~uint32_t(VK_IMAGE_USAGE_STORAGE_BIT);
 				}
 				}
 				if ((usage_info->usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) && !(supported_flags & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT)) {
 				if ((usage_info->usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) && !(supported_flags & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT)) {
-					usage_info->usage &= ~VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+					usage_info->usage &= ~uint32_t(VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT);
 				}
 				}
 			}
 			}
 
 

+ 3 - 3
servers/rendering/renderer_rd/forward_clustered/render_forward_clustered.cpp

@@ -2099,7 +2099,7 @@ void RenderForwardClustered::_render_scene(RenderDataRD *p_render_data, const Co
 				}
 				}
 			}
 			}
 
 
-			uint32_t opaque_color_pass_flags = using_motion_pass ? (color_pass_flags & ~COLOR_PASS_FLAG_MOTION_VECTORS) : color_pass_flags;
+			uint32_t opaque_color_pass_flags = using_motion_pass ? (color_pass_flags & ~uint32_t(COLOR_PASS_FLAG_MOTION_VECTORS)) : color_pass_flags;
 			RID opaque_framebuffer = using_motion_pass ? rb_data->get_color_pass_fb(opaque_color_pass_flags) : color_framebuffer;
 			RID opaque_framebuffer = using_motion_pass ? rb_data->get_color_pass_fb(opaque_color_pass_flags) : color_framebuffer;
 			RenderListParameters render_list_params(render_list[RENDER_LIST_OPAQUE].elements.ptr(), render_list[RENDER_LIST_OPAQUE].element_info.ptr(), render_list[RENDER_LIST_OPAQUE].elements.size(), reverse_cull, PASS_MODE_COLOR, opaque_color_pass_flags, rb_data.is_null(), p_render_data->directional_light_soft_shadows, rp_uniform_set, get_debug_draw_mode() == RS::VIEWPORT_DEBUG_DRAW_WIREFRAME, Vector2(), p_render_data->scene_data->lod_distance_multiplier, p_render_data->scene_data->screen_mesh_lod_threshold, p_render_data->scene_data->view_count, 0, base_specialization);
 			RenderListParameters render_list_params(render_list[RENDER_LIST_OPAQUE].elements.ptr(), render_list[RENDER_LIST_OPAQUE].element_info.ptr(), render_list[RENDER_LIST_OPAQUE].elements.size(), reverse_cull, PASS_MODE_COLOR, opaque_color_pass_flags, rb_data.is_null(), p_render_data->directional_light_soft_shadows, rp_uniform_set, get_debug_draw_mode() == RS::VIEWPORT_DEBUG_DRAW_WIREFRAME, Vector2(), p_render_data->scene_data->lod_distance_multiplier, p_render_data->scene_data->screen_mesh_lod_threshold, p_render_data->scene_data->view_count, 0, base_specialization);
 			_render_list_with_draw_list(&render_list_params, opaque_framebuffer, RD::DrawFlags(load_color ? RD::DRAW_DEFAULT_ALL : RD::DRAW_CLEAR_COLOR_ALL) | (depth_pre_pass ? RD::DRAW_DEFAULT_ALL : RD::DRAW_CLEAR_DEPTH), c, 0.0f);
 			_render_list_with_draw_list(&render_list_params, opaque_framebuffer, RD::DrawFlags(load_color ? RD::DRAW_DEFAULT_ALL : RD::DRAW_CLEAR_COLOR_ALL) | (depth_pre_pass ? RD::DRAW_DEFAULT_ALL : RD::DRAW_CLEAR_DEPTH), c, 0.0f);
@@ -2293,10 +2293,10 @@ void RenderForwardClustered::_render_scene(RenderDataRD *p_render_data, const Co
 	_setup_environment(p_render_data, is_reflection_probe, screen_size, p_default_bg_color, false);
 	_setup_environment(p_render_data, is_reflection_probe, screen_size, p_default_bg_color, false);
 
 
 	{
 	{
-		uint32_t transparent_color_pass_flags = (color_pass_flags | COLOR_PASS_FLAG_TRANSPARENT) & ~(COLOR_PASS_FLAG_SEPARATE_SPECULAR);
+		uint32_t transparent_color_pass_flags = (color_pass_flags | uint32_t(COLOR_PASS_FLAG_TRANSPARENT)) & ~uint32_t(COLOR_PASS_FLAG_SEPARATE_SPECULAR);
 		if (using_motion_pass) {
 		if (using_motion_pass) {
 			// Motion vectors on transparent draw calls are not required when using the reactive mask.
 			// Motion vectors on transparent draw calls are not required when using the reactive mask.
-			transparent_color_pass_flags &= ~(COLOR_PASS_FLAG_MOTION_VECTORS);
+			transparent_color_pass_flags &= ~uint32_t(COLOR_PASS_FLAG_MOTION_VECTORS);
 		}
 		}
 
 
 		RID alpha_framebuffer = rb_data.is_valid() ? rb_data->get_color_pass_fb(transparent_color_pass_flags) : color_only_framebuffer;
 		RID alpha_framebuffer = rb_data.is_valid() ? rb_data->get_color_pass_fb(transparent_color_pass_flags) : color_only_framebuffer;

+ 15 - 15
servers/rendering/renderer_rd/forward_clustered/scene_shader_forward_clustered.cpp

@@ -222,27 +222,27 @@ RS::ShaderNativeSourceCode SceneShaderForwardClustered::ShaderData::get_native_s
 	}
 	}
 }
 }
 
 
-SceneShaderForwardClustered::ShaderVersion SceneShaderForwardClustered::ShaderData::_get_shader_version(PipelineVersion p_pipeline_version, uint32_t p_color_pass_flags, bool p_ubershader) const {
-	uint32_t ubershader_base = p_ubershader ? SHADER_VERSION_DEPTH_PASS_WITH_MATERIAL : 0;
+uint16_t SceneShaderForwardClustered::ShaderData::_get_shader_version(PipelineVersion p_pipeline_version, uint32_t p_color_pass_flags, bool p_ubershader) const {
+	uint32_t ubershader_base = p_ubershader ? ShaderVersion::SHADER_VERSION_DEPTH_PASS_WITH_MATERIAL : 0;
 	switch (p_pipeline_version) {
 	switch (p_pipeline_version) {
 		case PIPELINE_VERSION_DEPTH_PASS:
 		case PIPELINE_VERSION_DEPTH_PASS:
-			return ShaderVersion(SHADER_VERSION_DEPTH_PASS + ubershader_base);
+			return ShaderVersion::SHADER_VERSION_DEPTH_PASS + ubershader_base;
 		case PIPELINE_VERSION_DEPTH_PASS_DP:
 		case PIPELINE_VERSION_DEPTH_PASS_DP:
-			return ShaderVersion(SHADER_VERSION_DEPTH_PASS_DP + ubershader_base);
+			return ShaderVersion::SHADER_VERSION_DEPTH_PASS_DP + ubershader_base;
 		case PIPELINE_VERSION_DEPTH_PASS_WITH_NORMAL_AND_ROUGHNESS:
 		case PIPELINE_VERSION_DEPTH_PASS_WITH_NORMAL_AND_ROUGHNESS:
-			return ShaderVersion(SHADER_VERSION_DEPTH_PASS_WITH_NORMAL_AND_ROUGHNESS + ubershader_base);
+			return ShaderVersion::SHADER_VERSION_DEPTH_PASS_WITH_NORMAL_AND_ROUGHNESS + ubershader_base;
 		case PIPELINE_VERSION_DEPTH_PASS_WITH_NORMAL_AND_ROUGHNESS_AND_VOXEL_GI:
 		case PIPELINE_VERSION_DEPTH_PASS_WITH_NORMAL_AND_ROUGHNESS_AND_VOXEL_GI:
-			return ShaderVersion(SHADER_VERSION_DEPTH_PASS_WITH_NORMAL_AND_ROUGHNESS_AND_VOXEL_GI + ubershader_base);
+			return ShaderVersion::SHADER_VERSION_DEPTH_PASS_WITH_NORMAL_AND_ROUGHNESS_AND_VOXEL_GI + ubershader_base;
 		case PIPELINE_VERSION_DEPTH_PASS_MULTIVIEW:
 		case PIPELINE_VERSION_DEPTH_PASS_MULTIVIEW:
-			return ShaderVersion(SHADER_VERSION_DEPTH_PASS_MULTIVIEW + ubershader_base);
+			return ShaderVersion::SHADER_VERSION_DEPTH_PASS_MULTIVIEW + ubershader_base;
 		case PIPELINE_VERSION_DEPTH_PASS_WITH_NORMAL_AND_ROUGHNESS_MULTIVIEW:
 		case PIPELINE_VERSION_DEPTH_PASS_WITH_NORMAL_AND_ROUGHNESS_MULTIVIEW:
-			return ShaderVersion(SHADER_VERSION_DEPTH_PASS_WITH_NORMAL_AND_ROUGHNESS_MULTIVIEW + ubershader_base);
+			return ShaderVersion::SHADER_VERSION_DEPTH_PASS_WITH_NORMAL_AND_ROUGHNESS_MULTIVIEW + ubershader_base;
 		case PIPELINE_VERSION_DEPTH_PASS_WITH_NORMAL_AND_ROUGHNESS_AND_VOXEL_GI_MULTIVIEW:
 		case PIPELINE_VERSION_DEPTH_PASS_WITH_NORMAL_AND_ROUGHNESS_AND_VOXEL_GI_MULTIVIEW:
-			return ShaderVersion(SHADER_VERSION_DEPTH_PASS_WITH_NORMAL_AND_ROUGHNESS_AND_VOXEL_GI_MULTIVIEW + ubershader_base);
+			return ShaderVersion::SHADER_VERSION_DEPTH_PASS_WITH_NORMAL_AND_ROUGHNESS_AND_VOXEL_GI_MULTIVIEW + ubershader_base;
 		case PIPELINE_VERSION_DEPTH_PASS_WITH_MATERIAL:
 		case PIPELINE_VERSION_DEPTH_PASS_WITH_MATERIAL:
-			return ShaderVersion(SHADER_VERSION_DEPTH_PASS_WITH_MATERIAL + SHADER_VERSION_DEPTH_PASS_WITH_MATERIAL);
+			return ShaderVersion::SHADER_VERSION_DEPTH_PASS_WITH_MATERIAL + ShaderVersion::SHADER_VERSION_DEPTH_PASS_WITH_MATERIAL;
 		case PIPELINE_VERSION_DEPTH_PASS_WITH_SDF:
 		case PIPELINE_VERSION_DEPTH_PASS_WITH_SDF:
-			return ShaderVersion(SHADER_VERSION_DEPTH_PASS_WITH_MATERIAL + SHADER_VERSION_DEPTH_PASS_WITH_SDF);
+			return ShaderVersion::SHADER_VERSION_DEPTH_PASS_WITH_MATERIAL + ShaderVersion::SHADER_VERSION_DEPTH_PASS_WITH_SDF;
 		case PIPELINE_VERSION_COLOR_PASS: {
 		case PIPELINE_VERSION_COLOR_PASS: {
 			int shader_flags = 0;
 			int shader_flags = 0;
 
 
@@ -266,11 +266,11 @@ SceneShaderForwardClustered::ShaderVersion SceneShaderForwardClustered::ShaderDa
 				shader_flags |= SHADER_COLOR_PASS_FLAG_MULTIVIEW;
 				shader_flags |= SHADER_COLOR_PASS_FLAG_MULTIVIEW;
 			}
 			}
 
 
-			return ShaderVersion(SHADER_VERSION_DEPTH_PASS_WITH_MATERIAL + SHADER_VERSION_COLOR_PASS + shader_flags);
+			return ShaderVersion::SHADER_VERSION_DEPTH_PASS_WITH_MATERIAL + ShaderVersion::SHADER_VERSION_COLOR_PASS + shader_flags;
 		} break;
 		} break;
 		default: {
 		default: {
 			DEV_ASSERT(false && "Unknown pipeline version.");
 			DEV_ASSERT(false && "Unknown pipeline version.");
-			return ShaderVersion(0);
+			return 0;
 		} break;
 		} break;
 	}
 	}
 }
 }
@@ -404,7 +404,7 @@ RD::PolygonCullMode SceneShaderForwardClustered::ShaderData::get_cull_mode_from_
 	return cull_mode_rd_table[p_cull_variant][cull_mode];
 	return cull_mode_rd_table[p_cull_variant][cull_mode];
 }
 }
 
 
-RID SceneShaderForwardClustered::ShaderData::_get_shader_variant(ShaderVersion p_shader_version) const {
+RID SceneShaderForwardClustered::ShaderData::_get_shader_variant(uint16_t p_shader_version) const {
 	if (version.is_valid()) {
 	if (version.is_valid()) {
 		MutexLock lock(SceneShaderForwardClustered::singleton_mutex);
 		MutexLock lock(SceneShaderForwardClustered::singleton_mutex);
 		ERR_FAIL_NULL_V(SceneShaderForwardClustered::singleton, RID());
 		ERR_FAIL_NULL_V(SceneShaderForwardClustered::singleton, RID());
@@ -426,7 +426,7 @@ RID SceneShaderForwardClustered::ShaderData::get_shader_variant(PipelineVersion
 
 
 uint64_t SceneShaderForwardClustered::ShaderData::get_vertex_input_mask(PipelineVersion p_pipeline_version, uint32_t p_color_pass_flags, bool p_ubershader) {
 uint64_t SceneShaderForwardClustered::ShaderData::get_vertex_input_mask(PipelineVersion p_pipeline_version, uint32_t p_color_pass_flags, bool p_ubershader) {
 	// Vertex input masks require knowledge of the shader. Since querying the shader can be expensive due to high contention and the necessary mutex, we cache the result instead.
 	// Vertex input masks require knowledge of the shader. Since querying the shader can be expensive due to high contention and the necessary mutex, we cache the result instead.
-	ShaderVersion shader_version = _get_shader_version(p_pipeline_version, p_color_pass_flags, p_ubershader);
+	uint16_t shader_version = _get_shader_version(p_pipeline_version, p_color_pass_flags, p_ubershader);
 	uint64_t input_mask = vertex_input_masks[shader_version].load(std::memory_order_relaxed);
 	uint64_t input_mask = vertex_input_masks[shader_version].load(std::memory_order_relaxed);
 	if (input_mask == 0) {
 	if (input_mask == 0) {
 		RID shader_rid = _get_shader_variant(shader_version);
 		RID shader_rid = _get_shader_variant(shader_version);

+ 16 - 15
servers/rendering/renderer_rd/forward_clustered/scene_shader_forward_clustered.h

@@ -50,18 +50,19 @@ public:
 		SHADER_GROUP_ADVANCED_MULTIVIEW,
 		SHADER_GROUP_ADVANCED_MULTIVIEW,
 	};
 	};
 
 
-	enum ShaderVersion {
-		SHADER_VERSION_DEPTH_PASS,
-		SHADER_VERSION_DEPTH_PASS_DP,
-		SHADER_VERSION_DEPTH_PASS_WITH_NORMAL_AND_ROUGHNESS,
-		SHADER_VERSION_DEPTH_PASS_WITH_NORMAL_AND_ROUGHNESS_AND_VOXEL_GI,
-		SHADER_VERSION_DEPTH_PASS_MULTIVIEW,
-		SHADER_VERSION_DEPTH_PASS_WITH_NORMAL_AND_ROUGHNESS_MULTIVIEW,
-		SHADER_VERSION_DEPTH_PASS_WITH_NORMAL_AND_ROUGHNESS_AND_VOXEL_GI_MULTIVIEW,
-		SHADER_VERSION_DEPTH_PASS_WITH_MATERIAL,
-		SHADER_VERSION_DEPTH_PASS_WITH_SDF,
-		SHADER_VERSION_COLOR_PASS,
-		SHADER_VERSION_MAX
+	// Not an enum because these values are constants that are processed as numbers
+	// to arrive at a unique version for a particular shader.
+	struct ShaderVersion {
+		constexpr static uint16_t SHADER_VERSION_DEPTH_PASS = 0;
+		constexpr static uint16_t SHADER_VERSION_DEPTH_PASS_DP = 1;
+		constexpr static uint16_t SHADER_VERSION_DEPTH_PASS_WITH_NORMAL_AND_ROUGHNESS = 2;
+		constexpr static uint16_t SHADER_VERSION_DEPTH_PASS_WITH_NORMAL_AND_ROUGHNESS_AND_VOXEL_GI = 3;
+		constexpr static uint16_t SHADER_VERSION_DEPTH_PASS_MULTIVIEW = 4;
+		constexpr static uint16_t SHADER_VERSION_DEPTH_PASS_WITH_NORMAL_AND_ROUGHNESS_MULTIVIEW = 5;
+		constexpr static uint16_t SHADER_VERSION_DEPTH_PASS_WITH_NORMAL_AND_ROUGHNESS_AND_VOXEL_GI_MULTIVIEW = 6;
+		constexpr static uint16_t SHADER_VERSION_DEPTH_PASS_WITH_MATERIAL = 7;
+		constexpr static uint16_t SHADER_VERSION_DEPTH_PASS_WITH_SDF = 8;
+		constexpr static uint16_t SHADER_VERSION_COLOR_PASS = 9;
 	};
 	};
 
 
 	enum ShaderColorPassFlags {
 	enum ShaderColorPassFlags {
@@ -205,7 +206,7 @@ public:
 
 
 		RID version;
 		RID version;
 
 
-		static const uint32_t VERTEX_INPUT_MASKS_SIZE = SHADER_VERSION_DEPTH_PASS_WITH_MATERIAL + SHADER_VERSION_COLOR_PASS + SHADER_COLOR_PASS_FLAG_COUNT;
+		static const uint32_t VERTEX_INPUT_MASKS_SIZE = ShaderVersion::SHADER_VERSION_DEPTH_PASS_WITH_MATERIAL + ShaderVersion::SHADER_VERSION_COLOR_PASS + SHADER_COLOR_PASS_FLAG_COUNT;
 		std::atomic<uint64_t> vertex_input_masks[VERTEX_INPUT_MASKS_SIZE] = {};
 		std::atomic<uint64_t> vertex_input_masks[VERTEX_INPUT_MASKS_SIZE] = {};
 
 
 		Vector<ShaderCompiler::GeneratedCode::Texture> texture_uniforms;
 		Vector<ShaderCompiler::GeneratedCode::Texture> texture_uniforms;
@@ -281,8 +282,8 @@ public:
 		virtual bool is_animated() const;
 		virtual bool is_animated() const;
 		virtual bool casts_shadows() const;
 		virtual bool casts_shadows() const;
 		virtual RS::ShaderNativeSourceCode get_native_source_code() const;
 		virtual RS::ShaderNativeSourceCode get_native_source_code() const;
-		ShaderVersion _get_shader_version(PipelineVersion p_pipeline_version, uint32_t p_color_pass_flags, bool p_ubershader) const;
-		RID _get_shader_variant(ShaderVersion p_shader_version) const;
+		uint16_t _get_shader_version(PipelineVersion p_pipeline_version, uint32_t p_color_pass_flags, bool p_ubershader) const;
+		RID _get_shader_variant(uint16_t p_shader_version) const;
 		void _clear_vertex_input_mask_cache();
 		void _clear_vertex_input_mask_cache();
 		RID get_shader_variant(PipelineVersion p_pipeline_version, uint32_t p_color_pass_flags, bool p_ubershader) const;
 		RID get_shader_variant(PipelineVersion p_pipeline_version, uint32_t p_color_pass_flags, bool p_ubershader) const;
 		uint64_t get_vertex_input_mask(PipelineVersion p_pipeline_version, uint32_t p_color_pass_flags, bool p_ubershader);
 		uint64_t get_vertex_input_mask(PipelineVersion p_pipeline_version, uint32_t p_color_pass_flags, bool p_ubershader);

+ 1 - 1
servers/rendering/renderer_rd/renderer_canvas_render_rd.h

@@ -138,7 +138,7 @@ class RendererCanvasRenderRD : public RendererCanvasRender {
 		uint32_t hash() const {
 		uint32_t hash() const {
 			uint32_t h = hash_murmur3_one_32(variant);
 			uint32_t h = hash_murmur3_one_32(variant);
 			h = hash_murmur3_one_32(framebuffer_format_id, h);
 			h = hash_murmur3_one_32(framebuffer_format_id, h);
-			h = hash_murmur3_one_32(vertex_format_id, h);
+			h = hash_murmur3_one_64((uint64_t)vertex_format_id, h);
 			h = hash_murmur3_one_32(render_primitive, h);
 			h = hash_murmur3_one_32(render_primitive, h);
 			h = hash_murmur3_one_32(shader_specialization.packed_0, h);
 			h = hash_murmur3_one_32(shader_specialization.packed_0, h);
 			h = hash_murmur3_one_32(lcd_blend, h);
 			h = hash_murmur3_one_32(lcd_blend, h);

+ 15 - 15
servers/rendering/renderer_scene_cull.cpp

@@ -359,7 +359,7 @@ void RendererSceneCull::_instance_unpair(Instance *p_A, Instance *p_B) {
 
 
 			if (geom->lightmap_captures.is_empty() && A->scenario && A->array_index >= 0) {
 			if (geom->lightmap_captures.is_empty() && A->scenario && A->array_index >= 0) {
 				InstanceData &idata = A->scenario->instance_data[A->array_index];
 				InstanceData &idata = A->scenario->instance_data[A->array_index];
-				idata.flags &= ~uint32_t(InstanceData::FLAG_LIGHTMAP_CAPTURE);
+				idata.flags &= ~InstanceData::FLAG_LIGHTMAP_CAPTURE;
 			}
 			}
 
 
 			lightmap_data->geometries.erase(A);
 			lightmap_data->geometries.erase(A);
@@ -532,7 +532,7 @@ void RendererSceneCull::_instance_update_mesh_instance(Instance *p_instance) con
 			if (p_instance->mesh_instance.is_valid()) {
 			if (p_instance->mesh_instance.is_valid()) {
 				idata.flags |= InstanceData::FLAG_USES_MESH_INSTANCE;
 				idata.flags |= InstanceData::FLAG_USES_MESH_INSTANCE;
 			} else {
 			} else {
-				idata.flags &= ~uint32_t(InstanceData::FLAG_USES_MESH_INSTANCE);
+				idata.flags &= ~InstanceData::FLAG_USES_MESH_INSTANCE;
 			}
 			}
 		}
 		}
 	}
 	}
@@ -1232,7 +1232,7 @@ void RendererSceneCull::instance_set_ignore_culling(RID p_instance, bool p_enabl
 		if (instance->ignore_all_culling) {
 		if (instance->ignore_all_culling) {
 			idata.flags |= InstanceData::FLAG_IGNORE_ALL_CULLING;
 			idata.flags |= InstanceData::FLAG_IGNORE_ALL_CULLING;
 		} else {
 		} else {
-			idata.flags &= ~uint32_t(InstanceData::FLAG_IGNORE_ALL_CULLING);
+			idata.flags &= ~InstanceData::FLAG_IGNORE_ALL_CULLING;
 		}
 		}
 	}
 	}
 }
 }
@@ -1324,7 +1324,7 @@ void RendererSceneCull::instance_geometry_set_flag(RID p_instance, RS::InstanceF
 				if (instance->baked_light) {
 				if (instance->baked_light) {
 					idata.flags |= InstanceData::FLAG_USES_BAKED_LIGHT;
 					idata.flags |= InstanceData::FLAG_USES_BAKED_LIGHT;
 				} else {
 				} else {
-					idata.flags &= ~uint32_t(InstanceData::FLAG_USES_BAKED_LIGHT);
+					idata.flags &= ~InstanceData::FLAG_USES_BAKED_LIGHT;
 				}
 				}
 			}
 			}
 
 
@@ -1364,7 +1364,7 @@ void RendererSceneCull::instance_geometry_set_flag(RID p_instance, RS::InstanceF
 				if (instance->redraw_if_visible) {
 				if (instance->redraw_if_visible) {
 					idata.flags |= InstanceData::FLAG_REDRAW_IF_VISIBLE;
 					idata.flags |= InstanceData::FLAG_REDRAW_IF_VISIBLE;
 				} else {
 				} else {
-					idata.flags &= ~uint32_t(InstanceData::FLAG_REDRAW_IF_VISIBLE);
+					idata.flags &= ~InstanceData::FLAG_REDRAW_IF_VISIBLE;
 				}
 				}
 			}
 			}
 
 
@@ -1377,7 +1377,7 @@ void RendererSceneCull::instance_geometry_set_flag(RID p_instance, RS::InstanceF
 				if (instance->ignore_occlusion_culling) {
 				if (instance->ignore_occlusion_culling) {
 					idata.flags |= InstanceData::FLAG_IGNORE_OCCLUSION_CULLING;
 					idata.flags |= InstanceData::FLAG_IGNORE_OCCLUSION_CULLING;
 				} else {
 				} else {
-					idata.flags &= ~uint32_t(InstanceData::FLAG_IGNORE_OCCLUSION_CULLING);
+					idata.flags &= ~InstanceData::FLAG_IGNORE_OCCLUSION_CULLING;
 				}
 				}
 			}
 			}
 		} break;
 		} break;
@@ -1398,13 +1398,13 @@ void RendererSceneCull::instance_geometry_set_cast_shadows_setting(RID p_instanc
 		if (instance->cast_shadows != RS::SHADOW_CASTING_SETTING_OFF) {
 		if (instance->cast_shadows != RS::SHADOW_CASTING_SETTING_OFF) {
 			idata.flags |= InstanceData::FLAG_CAST_SHADOWS;
 			idata.flags |= InstanceData::FLAG_CAST_SHADOWS;
 		} else {
 		} else {
-			idata.flags &= ~uint32_t(InstanceData::FLAG_CAST_SHADOWS);
+			idata.flags &= ~InstanceData::FLAG_CAST_SHADOWS;
 		}
 		}
 
 
 		if (instance->cast_shadows == RS::SHADOW_CASTING_SETTING_SHADOWS_ONLY) {
 		if (instance->cast_shadows == RS::SHADOW_CASTING_SETTING_SHADOWS_ONLY) {
 			idata.flags |= InstanceData::FLAG_CAST_SHADOWS_ONLY;
 			idata.flags |= InstanceData::FLAG_CAST_SHADOWS_ONLY;
 		} else {
 		} else {
-			idata.flags &= ~uint32_t(InstanceData::FLAG_CAST_SHADOWS_ONLY);
+			idata.flags &= ~InstanceData::FLAG_CAST_SHADOWS_ONLY;
 		}
 		}
 	}
 	}
 
 
@@ -2967,7 +2967,7 @@ void RendererSceneCull::_scene_cull(CullData &cull_data, InstanceCullResult &cul
 							}
 							}
 							cull_data.cull->lock.unlock();
 							cull_data.cull->lock.unlock();
 
 
-							idata.flags &= ~uint32_t(InstanceData::FLAG_REFLECTION_PROBE_DIRTY);
+							idata.flags &= ~InstanceData::FLAG_REFLECTION_PROBE_DIRTY;
 						}
 						}
 
 
 						if (RSG::light_storage->reflection_probe_instance_has_reflection(RID::from_uint64(idata.instance_data_rid))) {
 						if (RSG::light_storage->reflection_probe_instance_has_reflection(RID::from_uint64(idata.instance_data_rid))) {
@@ -3056,7 +3056,7 @@ void RendererSceneCull::_scene_cull(CullData &cull_data, InstanceCullResult &cul
 
 
 						ERR_FAIL_NULL(geom->geometry_instance);
 						ERR_FAIL_NULL(geom->geometry_instance);
 						geom->geometry_instance->pair_light_instances(instance_pair_buffer, idx);
 						geom->geometry_instance->pair_light_instances(instance_pair_buffer, idx);
-						idata.flags &= ~uint32_t(InstanceData::FLAG_GEOM_LIGHTING_DIRTY);
+						idata.flags &= ~InstanceData::FLAG_GEOM_LIGHTING_DIRTY;
 					}
 					}
 
 
 					if (idata.flags & InstanceData::FLAG_GEOM_PROJECTOR_SOFTSHADOW_DIRTY) {
 					if (idata.flags & InstanceData::FLAG_GEOM_PROJECTOR_SOFTSHADOW_DIRTY) {
@@ -3066,7 +3066,7 @@ void RendererSceneCull::_scene_cull(CullData &cull_data, InstanceCullResult &cul
 						cull_data.cull->lock.lock();
 						cull_data.cull->lock.lock();
 						geom->geometry_instance->set_softshadow_projector_pairing(geom->softshadow_count > 0, geom->projector_count > 0);
 						geom->geometry_instance->set_softshadow_projector_pairing(geom->softshadow_count > 0, geom->projector_count > 0);
 						cull_data.cull->lock.unlock();
 						cull_data.cull->lock.unlock();
-						idata.flags &= ~uint32_t(InstanceData::FLAG_GEOM_PROJECTOR_SOFTSHADOW_DIRTY);
+						idata.flags &= ~InstanceData::FLAG_GEOM_PROJECTOR_SOFTSHADOW_DIRTY;
 					}
 					}
 
 
 					if (geometry_instance_pair_mask & (1 << RS::INSTANCE_REFLECTION_PROBE) && (idata.flags & InstanceData::FLAG_GEOM_REFLECTION_DIRTY)) {
 					if (geometry_instance_pair_mask & (1 << RS::INSTANCE_REFLECTION_PROBE) && (idata.flags & InstanceData::FLAG_GEOM_REFLECTION_DIRTY)) {
@@ -3084,7 +3084,7 @@ void RendererSceneCull::_scene_cull(CullData &cull_data, InstanceCullResult &cul
 
 
 						ERR_FAIL_NULL(geom->geometry_instance);
 						ERR_FAIL_NULL(geom->geometry_instance);
 						geom->geometry_instance->pair_reflection_probe_instances(instance_pair_buffer, idx);
 						geom->geometry_instance->pair_reflection_probe_instances(instance_pair_buffer, idx);
-						idata.flags &= ~uint32_t(InstanceData::FLAG_GEOM_REFLECTION_DIRTY);
+						idata.flags &= ~InstanceData::FLAG_GEOM_REFLECTION_DIRTY;
 					}
 					}
 
 
 					if (geometry_instance_pair_mask & (1 << RS::INSTANCE_DECAL) && (idata.flags & InstanceData::FLAG_GEOM_DECAL_DIRTY)) {
 					if (geometry_instance_pair_mask & (1 << RS::INSTANCE_DECAL) && (idata.flags & InstanceData::FLAG_GEOM_DECAL_DIRTY)) {
@@ -3103,7 +3103,7 @@ void RendererSceneCull::_scene_cull(CullData &cull_data, InstanceCullResult &cul
 						ERR_FAIL_NULL(geom->geometry_instance);
 						ERR_FAIL_NULL(geom->geometry_instance);
 						geom->geometry_instance->pair_decal_instances(instance_pair_buffer, idx);
 						geom->geometry_instance->pair_decal_instances(instance_pair_buffer, idx);
 
 
-						idata.flags &= ~uint32_t(InstanceData::FLAG_GEOM_DECAL_DIRTY);
+						idata.flags &= ~InstanceData::FLAG_GEOM_DECAL_DIRTY;
 					}
 					}
 
 
 					if (idata.flags & InstanceData::FLAG_GEOM_VOXEL_GI_DIRTY) {
 					if (idata.flags & InstanceData::FLAG_GEOM_VOXEL_GI_DIRTY) {
@@ -3121,7 +3121,7 @@ void RendererSceneCull::_scene_cull(CullData &cull_data, InstanceCullResult &cul
 						ERR_FAIL_NULL(geom->geometry_instance);
 						ERR_FAIL_NULL(geom->geometry_instance);
 						geom->geometry_instance->pair_voxel_gi_instances(instance_pair_buffer, idx);
 						geom->geometry_instance->pair_voxel_gi_instances(instance_pair_buffer, idx);
 
 
-						idata.flags &= ~uint32_t(InstanceData::FLAG_GEOM_VOXEL_GI_DIRTY);
+						idata.flags &= ~InstanceData::FLAG_GEOM_VOXEL_GI_DIRTY;
 					}
 					}
 
 
 					if ((idata.flags & InstanceData::FLAG_LIGHTMAP_CAPTURE) && idata.instance->last_frame_pass != frame_number && !idata.instance->lightmap_target_sh.is_empty() && !idata.instance->lightmap_sh.is_empty()) {
 					if ((idata.flags & InstanceData::FLAG_LIGHTMAP_CAPTURE) && idata.instance->last_frame_pass != frame_number && !idata.instance->lightmap_target_sh.is_empty() && !idata.instance->lightmap_sh.is_empty()) {
@@ -3981,7 +3981,7 @@ void RendererSceneCull::render_probes() {
 				ERR_FAIL_NULL(geom->geometry_instance);
 				ERR_FAIL_NULL(geom->geometry_instance);
 				geom->geometry_instance->pair_voxel_gi_instances(instance_pair_buffer, idx);
 				geom->geometry_instance->pair_voxel_gi_instances(instance_pair_buffer, idx);
 
 
-				ins->scenario->instance_data[ins->array_index].flags &= ~uint32_t(InstanceData::FLAG_GEOM_VOXEL_GI_DIRTY);
+				ins->scenario->instance_data[ins->array_index].flags &= ~InstanceData::FLAG_GEOM_VOXEL_GI_DIRTY;
 			}
 			}
 
 
 			ERR_FAIL_NULL(geom->geometry_instance);
 			ERR_FAIL_NULL(geom->geometry_instance);

+ 1 - 1
servers/rendering/renderer_scene_cull.h

@@ -259,7 +259,7 @@ public:
 	struct InstanceData {
 	struct InstanceData {
 		// Store instance pointer as well as common instance processing information,
 		// Store instance pointer as well as common instance processing information,
 		// to make processing more cache friendly.
 		// to make processing more cache friendly.
-		enum Flags {
+		enum Flags : uint32_t {
 			FLAG_BASE_TYPE_MASK = 0xFF,
 			FLAG_BASE_TYPE_MASK = 0xFF,
 			FLAG_CAST_SHADOWS = (1 << 8),
 			FLAG_CAST_SHADOWS = (1 << 8),
 			FLAG_CAST_SHADOWS_ONLY = (1 << 9),
 			FLAG_CAST_SHADOWS_ONLY = (1 << 9),

+ 15 - 11
servers/rendering/rendering_context_driver.h

@@ -52,16 +52,20 @@ public:
 	void window_destroy(DisplayServer::WindowID p_window);
 	void window_destroy(DisplayServer::WindowID p_window);
 
 
 public:
 public:
-	enum Vendor {
-		VENDOR_UNKNOWN = 0x0,
-		VENDOR_AMD = 0x1002,
-		VENDOR_IMGTEC = 0x1010,
-		VENDOR_APPLE = 0x106B,
-		VENDOR_NVIDIA = 0x10DE,
-		VENDOR_ARM = 0x13B5,
-		VENDOR_MICROSOFT = 0x1414,
-		VENDOR_QUALCOMM = 0x5143,
-		VENDOR_INTEL = 0x8086
+	// Not an enum as these values are matched against values returned by
+	// the various drivers, which report them in uint32_t. Casting to an
+	// enum value is dangerous in this case as we don't actually know what
+	// range the driver is reporting a value in.
+	struct Vendor {
+		constexpr static uint32_t VENDOR_UNKNOWN = 0x0;
+		constexpr static uint32_t VENDOR_AMD = 0x1002;
+		constexpr static uint32_t VENDOR_IMGTEC = 0x1010;
+		constexpr static uint32_t VENDOR_APPLE = 0x106B;
+		constexpr static uint32_t VENDOR_NVIDIA = 0x10DE;
+		constexpr static uint32_t VENDOR_ARM = 0x13B5;
+		constexpr static uint32_t VENDOR_MICROSOFT = 0x1414;
+		constexpr static uint32_t VENDOR_QUALCOMM = 0x5143;
+		constexpr static uint32_t VENDOR_INTEL = 0x8086;
 	};
 	};
 
 
 	enum DeviceType {
 	enum DeviceType {
@@ -79,7 +83,7 @@ public:
 
 
 	struct Device {
 	struct Device {
 		String name = "Unknown";
 		String name = "Unknown";
-		Vendor vendor = VENDOR_UNKNOWN;
+		uint32_t vendor = Vendor::VENDOR_UNKNOWN;
 		DeviceType type = DEVICE_TYPE_OTHER;
 		DeviceType type = DEVICE_TYPE_OTHER;
 		Workarounds workarounds;
 		Workarounds workarounds;
 	};
 	};

+ 9 - 9
servers/rendering/rendering_device.cpp

@@ -53,21 +53,21 @@
 
 
 static String _get_device_vendor_name(const RenderingContextDriver::Device &p_device) {
 static String _get_device_vendor_name(const RenderingContextDriver::Device &p_device) {
 	switch (p_device.vendor) {
 	switch (p_device.vendor) {
-		case RenderingContextDriver::VENDOR_AMD:
+		case RenderingContextDriver::Vendor::VENDOR_AMD:
 			return "AMD";
 			return "AMD";
-		case RenderingContextDriver::VENDOR_IMGTEC:
+		case RenderingContextDriver::Vendor::VENDOR_IMGTEC:
 			return "ImgTec";
 			return "ImgTec";
-		case RenderingContextDriver::VENDOR_APPLE:
+		case RenderingContextDriver::Vendor::VENDOR_APPLE:
 			return "Apple";
 			return "Apple";
-		case RenderingContextDriver::VENDOR_NVIDIA:
+		case RenderingContextDriver::Vendor::VENDOR_NVIDIA:
 			return "NVIDIA";
 			return "NVIDIA";
-		case RenderingContextDriver::VENDOR_ARM:
+		case RenderingContextDriver::Vendor::VENDOR_ARM:
 			return "ARM";
 			return "ARM";
-		case RenderingContextDriver::VENDOR_MICROSOFT:
+		case RenderingContextDriver::Vendor::VENDOR_MICROSOFT:
 			return "Microsoft";
 			return "Microsoft";
-		case RenderingContextDriver::VENDOR_QUALCOMM:
+		case RenderingContextDriver::Vendor::VENDOR_QUALCOMM:
 			return "Qualcomm";
 			return "Qualcomm";
-		case RenderingContextDriver::VENDOR_INTEL:
+		case RenderingContextDriver::Vendor::VENDOR_INTEL:
 			return "Intel";
 			return "Intel";
 		default:
 		default:
 			return "Unknown";
 			return "Unknown";
@@ -2371,7 +2371,7 @@ RDD::RenderPassID RenderingDevice::_render_pass_create(RenderingDeviceDriver *p_
 	}
 	}
 
 
 	LocalVector<RDD::Attachment> attachments;
 	LocalVector<RDD::Attachment> attachments;
-	LocalVector<int> attachment_remap;
+	LocalVector<uint32_t> attachment_remap;
 
 
 	for (int i = 0; i < p_attachments.size(); i++) {
 	for (int i = 0; i < p_attachments.size(); i++) {
 		if (p_attachments[i].usage_flags == AttachmentFormat::UNUSED_ATTACHMENT) {
 		if (p_attachments[i].usage_flags == AttachmentFormat::UNUSED_ATTACHMENT) {