Browse Source

Add DX12 device creation

Panagiotis Christopoulos Charitos 1 year ago
parent
commit
c9c2e7c307

+ 3 - 1
AnKi/Gr/CMakeLists.txt

@@ -48,6 +48,8 @@ elseif(DIRECTX)
 
 	set(backend_sources  ${backend_sources} ${dxsources})
 	set(backend_headers ${backend_headers} ${dxheaders})
+
+	set(extra_libs ${extra_libs} d3d12 dxgi)
 endif()
 
 # Have 2 libraries. The AnKiGrCommon is the bare minimum for the AnKiShaderCompiler to work. Don't have
@@ -58,7 +60,7 @@ target_compile_definitions(AnKiGrCommon PRIVATE -DANKI_SOURCE_FILE)
 target_link_libraries(AnKiGrCommon AnKiUtil) # Only depend on Util
 
 if(ANKI_DLSS)
-	set(extra_libs AnKiNgx)
+	set(extra_libs ${extra_libs} AnKiNgx)
 endif()
 
 add_library(AnKiGr ${backend_sources} ${backend_headers})

+ 10 - 0
AnKi/Gr/Common.h

@@ -834,6 +834,16 @@ enum class VrsRate : U8
 };
 ANKI_ENUM_ALLOW_NUMERIC_OPERATIONS(VrsRate)
 
+enum class GpuQueueType : U8
+{
+	kGeneral,
+	kCompute,
+
+	kCount,
+	kFirst = 0
+};
+ANKI_ENUM_ALLOW_NUMERIC_OPERATIONS(GpuQueueType)
+
 /// Clear values for textures or attachments.
 class ClearValue
 {

+ 1 - 6
AnKi/Gr/D3D/D3DCommandBuffer.cpp

@@ -21,7 +21,7 @@ CommandBuffer* CommandBuffer::newInstance(const CommandBufferInitInfo& init)
 	return impl;
 }
 
-void CommandBuffer::flush(ConstWeakArray<FencePtr> waitFences, FencePtr* signalFence)
+void CommandBuffer::endRecording()
 {
 	ANKI_ASSERT(!"TODO");
 }
@@ -328,11 +328,6 @@ void CommandBuffer::endPipelineQuery(PipelineQuery* query)
 	ANKI_ASSERT(!"TODO");
 }
 
-void CommandBuffer::pushSecondLevelCommandBuffers(ConstWeakArray<CommandBuffer*> cmdbs)
-{
-	ANKI_ASSERT(!"TODO");
-}
-
 void CommandBuffer::resetTimestampQueries(ConstWeakArray<TimestampQuery*> queries)
 {
 	ANKI_ASSERT(!"TODO");

+ 55 - 0
AnKi/Gr/D3D/D3DCommon.h

@@ -7,11 +7,24 @@
 
 #include <AnKi/Gr/Common.h>
 #include <AnKi/Util/Logger.h>
+#include <string>
+#include <locale>
+#include <codecvt>
 
+#ifndef WIN32_LEAN_AND_MEAN
+#	define WIN32_LEAN_AND_MEAN // Exclude rarely-used stuff from Windows headers.
+#endif
+
+#include <windows.h>
 #include <d3d12.h>
 #include <dxgi1_6.h>
+#include <D3Dcompiler.h>
+#include <DirectXMath.h>
+#include <wrl.h>
 #include <AnKi/Util/CleanupWindows.h>
 
+using Microsoft::WRL::ComPtr;
+
 namespace anki {
 
 #define ANKI_D3D_LOGI(...) ANKI_LOG("D3D", kNormal, __VA_ARGS__)
@@ -20,4 +33,46 @@ namespace anki {
 #define ANKI_D3D_LOGF(...) ANKI_LOG("D3D", kFatal, __VA_ARGS__)
 #define ANKI_D3D_LOGV(...) ANKI_LOG("D3D", kVerbose, __VA_ARGS__)
 
+#define ANKI_D3D_SELF(class_) class_& self = *static_cast<class_*>(this)
+#define ANKI_D3D_SELF_CONST(class_) const class_& self = *static_cast<const class_*>(this)
+
+#define ANKI_D3D_CHECKF(x) \
+	do \
+	{ \
+		HRESULT rez; \
+		if((rez = (x)) < 0) [[unlikely]] \
+		{ \
+			ANKI_D3D_LOGF("D3D function failed (HRESULT: %l): %s", rez, #x); \
+		} \
+	} while(0)
+
+#define ANKI_D3D_CHECK(x) \
+	do \
+	{ \
+		HRESULT rez; \
+		if((rez = (x)) < 0) [[unlikely]] \
+		{ \
+			ANKI_D3D_LOGE("D3D function failed (HRESULT: %l): %s", rez, #x); \
+			return Error::kFunctionFailed; \
+		} \
+	} while(0)
+
+inline std::string ws2s(const std::wstring& wstr)
+{
+	using convert_typeX = std::codecvt_utf8<wchar_t>;
+	std::wstring_convert<convert_typeX, wchar_t> converterX;
+
+	return converterX.to_bytes(wstr);
+}
+
+template<typename T>
+void safeRelease(T*& p)
+{
+	if(p)
+	{
+		p->Release();
+		p = nullptr;
+	}
+}
+
 } // end namespace anki

+ 175 - 2
AnKi/Gr/D3D/D3DGrManager.cpp

@@ -18,10 +18,21 @@
 #include <AnKi/Gr/D3D/D3DGrUpscaler.h>
 #include <AnKi/Gr/D3D/D3DTimestampQuery.h>
 
+#include <AnKi/ShaderCompiler/Common.h>
+
+#if ANKI_WINDOWING_SYSTEM_SDL
+#	include <AnKi/Window/NativeWindowSdl.h>
+#	include <SDL_syswm.h>
+#endif
+
 namespace anki {
 
 BoolCVar g_validationCVar(CVarSubsystem::kGr, "Validation", false, "Enable or not validation");
+static BoolCVar g_gpuValidationCVar(CVarSubsystem::kGr, "GpuValidation", false, "Enable or not GPU validation");
 BoolCVar g_vsyncCVar(CVarSubsystem::kGr, "Vsync", false, "Enable or not vsync");
+BoolCVar g_debugMarkersCVar(CVarSubsystem::kGr, "DebugMarkers", false, "Enable or not debug markers");
+BoolCVar g_meshShadersCVar(CVarSubsystem::kGr, "MeshShaders", false, "Enable or not mesh shaders");
+static NumericCVar<U8> g_deviceCVar(CVarSubsystem::kGr, "Device", 0, 0, 16, "Choose an available device. Devices are sorted by performance");
 
 template<>
 template<>
@@ -60,8 +71,8 @@ GrManager::~GrManager()
 
 Error GrManager::init(GrManagerInitInfo& inf)
 {
-	ANKI_ASSERT(!"TODO");
-	return Error::kNone;
+	ANKI_D3D_SELF(GrManagerImpl);
+	return self.initInternal(inf);
 }
 
 TexturePtr GrManager::acquireNextPresentableTexture()
@@ -120,4 +131,166 @@ ANKI_NEW_GR_OBJECT(GrUpscaler)
 #undef ANKI_NEW_GR_OBJECT
 #undef ANKI_NEW_GR_OBJECT_NO_INIT_INFO
 
+GrManagerImpl::~GrManagerImpl()
+{
+	destroy();
+}
+
+void GrManager::submit(WeakArray<CommandBuffer*> cmdbs, WeakArray<Fence*> waitFences, FencePtr* signalFence)
+{
+	ANKI_ASSERT(!"TODO");
+}
+
+Error GrManagerImpl::initInternal(const GrManagerInitInfo& init)
+{
+	ANKI_D3D_LOGI("Initializing D3D backend");
+
+	GrMemoryPool::allocateSingleton(init.m_allocCallback, init.m_allocCallbackUserData);
+
+	// Validation
+	UINT dxgiFactoryFlags = 0;
+	if(g_validationCVar.get())
+	{
+		ComPtr<ID3D12Debug> debugInterface;
+		ANKI_D3D_CHECK(D3D12GetDebugInterface(IID_PPV_ARGS(&debugInterface)));
+
+		dxgiFactoryFlags |= DXGI_CREATE_FACTORY_DEBUG;
+
+		if(g_gpuValidationCVar.get())
+		{
+			ComPtr<ID3D12Debug1> debugInterface1;
+			ANKI_D3D_CHECK(debugInterface->QueryInterface(IID_PPV_ARGS(&debugInterface1)));
+
+			debugInterface1->SetEnableGPUBasedValidation(true);
+		}
+	}
+
+	ComPtr<IDXGIFactory4> factory2;
+	ANKI_D3D_CHECK(CreateDXGIFactory2(dxgiFactoryFlags, IID_PPV_ARGS(&factory2)));
+	ComPtr<IDXGIFactory6> factory6;
+	ANKI_D3D_CHECK(factory2->QueryInterface(IID_PPV_ARGS(&factory6)));
+
+	// Get adapters
+	struct Adapter
+	{
+		ComPtr<IDXGIAdapter1> m_adapter;
+		DXGI_ADAPTER_DESC1 m_descr;
+	};
+
+	GrDynamicArray<Adapter> adapters;
+	ComPtr<IDXGIAdapter1> pAdapter;
+	UINT adapterIdx = 0;
+	while(factory6->EnumAdapterByGpuPreference(adapterIdx, DXGI_GPU_PREFERENCE_HIGH_PERFORMANCE, IID_PPV_ARGS(&pAdapter)) != DXGI_ERROR_NOT_FOUND)
+	{
+		Adapter& a = *adapters.emplaceBack();
+		a.m_adapter = pAdapter;
+		pAdapter->GetDesc1(&a.m_descr);
+
+		++adapterIdx;
+	}
+
+	const U32 chosenPhysDevIdx = min<U32>(g_deviceCVar.get(), adapters.getSize() - 1);
+
+	ANKI_D3D_LOGI("Physical devices:");
+	for(U32 i = 0; i < adapters.getSize(); ++i)
+	{
+		ANKI_D3D_LOGI((i == chosenPhysDevIdx) ? "\t(Selected) %s" : "\t%s", ws2s(&adapters[i].m_descr.Description[0]).c_str());
+	}
+
+	// Find vendor
+	switch(adapters[chosenPhysDevIdx].m_descr.VendorId)
+	{
+	case 0x13B5:
+		m_capabilities.m_gpuVendor = GpuVendor::kArm;
+		m_capabilities.m_minSubgroupSize = 16;
+		m_capabilities.m_maxSubgroupSize = 16;
+		break;
+	case 0x10DE:
+		m_capabilities.m_gpuVendor = GpuVendor::kNvidia;
+		m_capabilities.m_minSubgroupSize = 32;
+		m_capabilities.m_maxSubgroupSize = 32;
+		break;
+	case 0x1002:
+	case 0x1022:
+		m_capabilities.m_gpuVendor = GpuVendor::kAMD;
+		m_capabilities.m_minSubgroupSize = 32;
+		m_capabilities.m_maxSubgroupSize = 64;
+		break;
+	case 0x8086:
+		m_capabilities.m_gpuVendor = GpuVendor::kIntel;
+		m_capabilities.m_minSubgroupSize = 8;
+		m_capabilities.m_maxSubgroupSize = 32;
+		break;
+	case 0x5143:
+		m_capabilities.m_gpuVendor = GpuVendor::kQualcomm;
+		m_capabilities.m_minSubgroupSize = 64;
+		m_capabilities.m_maxSubgroupSize = 128;
+		break;
+	default:
+		m_capabilities.m_gpuVendor = GpuVendor::kUnknown;
+		// Choose something really low
+		m_capabilities.m_minSubgroupSize = 8;
+		m_capabilities.m_maxSubgroupSize = 8;
+	}
+	ANKI_D3D_LOGI("Vendor identified as %s", &kGPUVendorStrings[m_capabilities.m_gpuVendor][0]);
+
+	// Create device
+	ANKI_D3D_CHECK(D3D12CreateDevice(adapters[chosenPhysDevIdx].m_adapter.Get(), D3D_FEATURE_LEVEL_12_2, IID_PPV_ARGS(&m_device)));
+
+	// Create queues
+	{
+		D3D12_COMMAND_QUEUE_DESC queueDesc = {};
+		queueDesc.Flags = D3D12_COMMAND_QUEUE_FLAG_NONE;
+		queueDesc.Type = D3D12_COMMAND_LIST_TYPE_DIRECT;
+		ANKI_D3D_CHECK(m_device->CreateCommandQueue(&queueDesc, IID_PPV_ARGS(&m_queues[GpuQueueType::kGeneral])));
+		queueDesc.Type = D3D12_COMMAND_LIST_TYPE_COMPUTE;
+		ANKI_D3D_CHECK(m_device->CreateCommandQueue(&queueDesc, IID_PPV_ARGS(&m_queues[GpuQueueType::kCompute])));
+	}
+
+	// Create swapchain
+	{
+		const NativeWindowSdl& window = static_cast<NativeWindowSdl&>(NativeWindow::getSingleton());
+
+		SDL_SysWMinfo wmInfo;
+		SDL_VERSION(&wmInfo.version);
+		SDL_GetWindowWMInfo(window.m_sdlWindow, &wmInfo);
+		const HWND hwnd = wmInfo.info.win.window;
+
+		// Describe and create the swap chain.
+		DXGI_SWAP_CHAIN_DESC1 swapChainDesc = {};
+		swapChainDesc.BufferCount = kMaxFramesInFlight;
+		swapChainDesc.Width = window.getWidth();
+		swapChainDesc.Height = window.getHeight();
+		swapChainDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
+		swapChainDesc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
+		swapChainDesc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_DISCARD;
+		swapChainDesc.SampleDesc.Count = 1;
+
+		ComPtr<IDXGISwapChain1> swapChain;
+		// Swap chain needs the queue so that it can force a flush on it.
+		ANKI_D3D_CHECK(factory2->CreateSwapChainForHwnd(m_queues[GpuQueueType::kGeneral], hwnd, &swapChainDesc, nullptr, nullptr, &swapChain));
+
+		swapChain->QueryInterface(IID_PPV_ARGS(&m_swapchain));
+
+		// Does not support fullscreen transitions.
+		factory2->MakeWindowAssociation(hwnd, DXGI_MWA_NO_ALT_ENTER);
+
+		m_backbufferIdx = m_swapchain->GetCurrentBackBufferIndex();
+	}
+
+	return Error::kNone;
+}
+
+void GrManagerImpl::destroy()
+{
+	ANKI_D3D_LOGI("Destroying D3D backend");
+
+	safeRelease(m_swapchain);
+	safeRelease(m_queues[GpuQueueType::kGeneral]);
+	safeRelease(m_queues[GpuQueueType::kCompute]);
+	safeRelease(m_device);
+
+	GrMemoryPool::freeSingleton();
+}
+
 } // end namespace anki

+ 9 - 8
AnKi/Gr/D3D/D3DGrManager.h

@@ -21,17 +21,18 @@ public:
 	{
 	}
 
-	~GrManagerImpl()
-	{
-	}
+	~GrManagerImpl();
 
-	Error init(const GrManagerInitInfo& cfg)
-	{
-		ANKI_ASSERT(0);
-		return Error::kNone;
-	}
+	Error initInternal(const GrManagerInitInfo& cfg);
 
 private:
+	ID3D12Device* m_device = nullptr;
+	Array<ID3D12CommandQueue*, U32(GpuQueueType::kCount)> m_queues = {};
+	IDXGISwapChain3* m_swapchain = nullptr;
+
+	U32 m_backbufferIdx = 0;
+
+	void destroy();
 };
 /// @}
 

+ 9 - 9
AnKi/Gr/Vulkan/VkCommandBufferFactory.cpp

@@ -11,17 +11,17 @@ namespace anki {
 
 static StatCounter g_commandBufferCountStatVar(StatCategory::kMisc, "CommandBufferCount", StatFlag::kNone);
 
-static VulkanQueueType getQueueTypeFromCommandBufferFlags(CommandBufferFlag flags, const VulkanQueueFamilies& queueFamilies)
+static GpuQueueType getQueueTypeFromCommandBufferFlags(CommandBufferFlag flags, const VulkanQueueFamilies& queueFamilies)
 {
 	ANKI_ASSERT(!!(flags & CommandBufferFlag::kGeneralWork) ^ !!(flags & CommandBufferFlag::kComputeWork));
-	if(!(flags & CommandBufferFlag::kGeneralWork) && queueFamilies[VulkanQueueType::kCompute] != kMaxU32)
+	if(!(flags & CommandBufferFlag::kGeneralWork) && queueFamilies[GpuQueueType::kCompute] != kMaxU32)
 	{
-		return VulkanQueueType::kCompute;
+		return GpuQueueType::kCompute;
 	}
 	else
 	{
-		ANKI_ASSERT(queueFamilies[VulkanQueueType::kGeneral] != kMaxU32);
-		return VulkanQueueType::kGeneral;
+		ANKI_ASSERT(queueFamilies[GpuQueueType::kGeneral] != kMaxU32);
+		return GpuQueueType::kGeneral;
 	}
 }
 
@@ -65,7 +65,7 @@ void MicroCommandBuffer::reset()
 
 Error CommandBufferThreadAllocator::init()
 {
-	for(VulkanQueueType qtype : EnumIterable<VulkanQueueType>())
+	for(GpuQueueType qtype : EnumIterable<GpuQueueType>())
 	{
 		if(m_factory->m_queueFamilies[qtype] == kMaxU32)
 		{
@@ -87,7 +87,7 @@ void CommandBufferThreadAllocator::destroy()
 {
 	for(U32 smallBatch = 0; smallBatch < 2; ++smallBatch)
 	{
-		for(VulkanQueueType queue : EnumIterable<VulkanQueueType>())
+		for(GpuQueueType queue : EnumIterable<GpuQueueType>())
 		{
 			m_recyclers[smallBatch][queue].destroy();
 		}
@@ -108,7 +108,7 @@ Error CommandBufferThreadAllocator::newCommandBuffer(CommandBufferFlag cmdbFlags
 	ANKI_ASSERT(!!(cmdbFlags & CommandBufferFlag::kComputeWork) ^ !!(cmdbFlags & CommandBufferFlag::kGeneralWork));
 
 	const Bool smallBatch = !!(cmdbFlags & CommandBufferFlag::kSmallBatch);
-	const VulkanQueueType queue = getQueueTypeFromCommandBufferFlags(cmdbFlags, m_factory->m_queueFamilies);
+	const GpuQueueType queue = getQueueTypeFromCommandBufferFlags(cmdbFlags, m_factory->m_queueFamilies);
 
 	MicroObjectRecycler<MicroCommandBuffer>& recycler = m_recyclers[smallBatch][queue];
 
@@ -175,7 +175,7 @@ void CommandBufferFactory::destroy()
 	{
 		for(U32 smallBatch = 0; smallBatch < 2; ++smallBatch)
 		{
-			for(VulkanQueueType queue : EnumIterable<VulkanQueueType>())
+			for(GpuQueueType queue : EnumIterable<GpuQueueType>())
 			{
 				talloc->m_recyclers[smallBatch][queue].trimCache();
 			}

+ 5 - 5
AnKi/Gr/Vulkan/VkCommandBufferFactory.h

@@ -93,9 +93,9 @@ public:
 		return m_flags;
 	}
 
-	VulkanQueueType getVulkanQueueType() const
+	GpuQueueType getVulkanQueueType() const
 	{
-		ANKI_ASSERT(m_queue != VulkanQueueType::kCount);
+		ANKI_ASSERT(m_queue != GpuQueueType::kCount);
 		return m_queue;
 	}
 
@@ -118,7 +118,7 @@ private:
 	CommandBufferThreadAllocator* m_threadAlloc;
 	mutable Atomic<I32> m_refcount = {0};
 	CommandBufferFlag m_flags = CommandBufferFlag::kNone;
-	VulkanQueueType m_queue = VulkanQueueType::kCount;
+	GpuQueueType m_queue = GpuQueueType::kCount;
 
 	void reset();
 
@@ -184,13 +184,13 @@ public:
 private:
 	CommandBufferFactory* m_factory;
 	ThreadId m_tid;
-	Array<VkCommandPool, U(VulkanQueueType::kCount)> m_pools = {};
+	Array<VkCommandPool, U(GpuQueueType::kCount)> m_pools = {};
 
 #if ANKI_EXTRA_CHECKS
 	Atomic<U32> m_createdCmdbs = {0};
 #endif
 
-	Array2d<MicroObjectRecycler<MicroCommandBuffer>, 2, U(VulkanQueueType::kCount)> m_recyclers;
+	Array2d<MicroObjectRecycler<MicroCommandBuffer>, 2, U(GpuQueueType::kCount)> m_recyclers;
 };
 
 /// Command bufffer object recycler.

+ 1 - 11
AnKi/Gr/Vulkan/VkCommon.h

@@ -102,17 +102,7 @@ enum class VulkanExtensions : U64
 };
 ANKI_ENUM_ALLOW_NUMERIC_OPERATIONS(VulkanExtensions)
 
-enum class VulkanQueueType : U8
-{
-	kGeneral,
-	kCompute,
-
-	kCount,
-	kFirst = 0
-};
-ANKI_ENUM_ALLOW_NUMERIC_OPERATIONS(VulkanQueueType)
-
-using VulkanQueueFamilies = Array<U32, U32(VulkanQueueType::kCount)>;
+using VulkanQueueFamilies = Array<U32, U32(GpuQueueType::kCount)>;
 
 /// @name Constants
 /// @{

+ 9 - 9
AnKi/Gr/Vulkan/VkGrManager.cpp

@@ -308,7 +308,7 @@ Error GrManagerImpl::initInternal(const GrManagerInitInfo& init)
 	ANKI_CHECK(initSurface());
 	ANKI_CHECK(initDevice());
 
-	for(VulkanQueueType qtype : EnumIterable<VulkanQueueType>())
+	for(GpuQueueType qtype : EnumIterable<GpuQueueType>())
 	{
 		if(m_queueFamilyIndices[qtype] != kMaxU32)
 		{
@@ -775,17 +775,17 @@ Error GrManagerImpl::initDevice()
 		{
 			if((queueInfos[i].queueFlags & GENERAL_QUEUE_FLAGS) == GENERAL_QUEUE_FLAGS)
 			{
-				m_queueFamilyIndices[VulkanQueueType::kGeneral] = i;
+				m_queueFamilyIndices[GpuQueueType::kGeneral] = i;
 			}
 			else if((queueInfos[i].queueFlags & VK_QUEUE_COMPUTE_BIT) && !(queueInfos[i].queueFlags & VK_QUEUE_GRAPHICS_BIT))
 			{
 				// This must be the async compute
-				m_queueFamilyIndices[VulkanQueueType::kCompute] = i;
+				m_queueFamilyIndices[GpuQueueType::kCompute] = i;
 			}
 		}
 	}
 
-	if(m_queueFamilyIndices[VulkanQueueType::kGeneral] == kMaxU32)
+	if(m_queueFamilyIndices[GpuQueueType::kGeneral] == kMaxU32)
 	{
 		ANKI_VK_LOGE("Couldn't find a queue family with graphics+compute+transfer+present. "
 					 "Something is wrong");
@@ -794,10 +794,10 @@ Error GrManagerImpl::initDevice()
 
 	if(!g_asyncComputeCVar.get())
 	{
-		m_queueFamilyIndices[VulkanQueueType::kCompute] = kMaxU32;
+		m_queueFamilyIndices[GpuQueueType::kCompute] = kMaxU32;
 	}
 
-	if(m_queueFamilyIndices[VulkanQueueType::kCompute] == kMaxU32)
+	if(m_queueFamilyIndices[GpuQueueType::kCompute] == kMaxU32)
 	{
 		ANKI_VK_LOGW("Couldn't find an async compute queue. Will try to use the general queue instead");
 	}
@@ -807,13 +807,13 @@ Error GrManagerImpl::initDevice()
 	}
 
 	const F32 priority = 1.0f;
-	Array<VkDeviceQueueCreateInfo, U32(VulkanQueueType::kCount)> q = {};
+	Array<VkDeviceQueueCreateInfo, U32(GpuQueueType::kCount)> q = {};
 
 	VkDeviceCreateInfo ci = {};
 	ci.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
 	ci.pQueueCreateInfos = &q[0];
 
-	for(VulkanQueueType qtype : EnumIterable<VulkanQueueType>())
+	for(GpuQueueType qtype : EnumIterable<GpuQueueType>())
 	{
 		if(m_queueFamilyIndices[qtype] != kMaxU32)
 		{
@@ -1605,7 +1605,7 @@ void GrManagerImpl::flushCommandBuffers(WeakArray<MicroCommandBuffer*> cmdbs, Bo
 	// Command buffers
 	Array<VkCommandBuffer, 16> handles;
 	submit.pCommandBuffers = handles.getBegin();
-	VulkanQueueType queueType = cmdbs[0]->getVulkanQueueType();
+	GpuQueueType queueType = cmdbs[0]->getVulkanQueueType();
 	for(MicroCommandBuffer* cmdb : cmdbs)
 	{
 		handles[submit.commandBufferCount] = cmdb->getHandle();

+ 3 - 3
AnKi/Gr/Vulkan/VkGrManager.h

@@ -46,7 +46,7 @@ public:
 
 	ConstWeakArray<U32> getQueueFamilies() const
 	{
-		const Bool hasAsyncCompute = m_queueFamilyIndices[VulkanQueueType::kCompute] != kMaxU32;
+		const Bool hasAsyncCompute = m_queueFamilyIndices[GpuQueueType::kCompute] != kMaxU32;
 		return (hasAsyncCompute) ? m_queueFamilyIndices : ConstWeakArray<U32>(&m_queueFamilyIndices[0], 1);
 	}
 
@@ -216,7 +216,7 @@ private:
 	VulkanExtensions m_extensions = VulkanExtensions::kNone;
 	VkDevice m_device = VK_NULL_HANDLE;
 	VulkanQueueFamilies m_queueFamilyIndices = {kMaxU32, kMaxU32};
-	Array<VkQueue, U32(VulkanQueueType::kCount)> m_queues = {};
+	Array<VkQueue, U32(GpuQueueType::kCount)> m_queues = {};
 	Mutex m_globalMtx;
 
 	VkPhysicalDeviceProperties2 m_devProps = {};
@@ -239,7 +239,7 @@ private:
 		/// Signaled by the submit that renders to the default FB. Present waits for it.
 		MicroSemaphorePtr m_renderSemaphore;
 
-		VulkanQueueType m_queueWroteToSwapchainImage = VulkanQueueType::kCount;
+		GpuQueueType m_queueWroteToSwapchainImage = GpuQueueType::kCount;
 	};
 
 	VkSurfaceKHR m_surface = VK_NULL_HANDLE;

+ 14 - 1
Tests/Gr/Gr.cpp

@@ -428,7 +428,20 @@ static void setAccelerationStructureBarrier(CommandBufferPtr cmdb, AccelerationS
 	cmdb->setPipelineBarrier({}, {}, {&barrier, 1});
 }
 
-ANKI_TEST(Gr, GrManager){COMMON_BEGIN() COMMON_END()}
+ANKI_TEST(Gr, GrManager)
+{
+	g_validationCVar.set(true);
+
+	DefaultMemoryPool::allocateSingleton(allocAligned, nullptr);
+	g_win = createWindow();
+	ANKI_TEST_EXPECT_NO_ERR(Input::allocateSingleton().init());
+	g_gr = createGrManager(g_win);
+
+	GrManager::freeSingleton();
+	Input::freeSingleton();
+	NativeWindow::freeSingleton();
+	DefaultMemoryPool::freeSingleton();
+}
 
 ANKI_TEST(Gr, Shader)
 {