Pārlūkot izejas kodu

Add Webgpu backend (#2132)

* WebGPU first draft (shaderc bin version 8)

* WebGPU scripts
Hugo Amnov 5 gadi atpakaļ
vecāks
revīzija
b62302631e

+ 144 - 0
3rdparty/webgpu/include/webgpu/EnumClassBitmasks.h

@@ -0,0 +1,144 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_ENUM_CLASS_BITMASKS_H_
+#define DAWN_ENUM_CLASS_BITMASKS_H_
+
+#include <type_traits>
+
+namespace wgpu {
+
+    template <typename T>
+    struct IsDawnBitmask {
+        static constexpr bool enable = false;
+    };
+
+    template <typename T, typename Enable = void>
+    struct LowerBitmask {
+        static constexpr bool enable = false;
+    };
+
+    template <typename T>
+    struct LowerBitmask<T, typename std::enable_if<IsDawnBitmask<T>::enable>::type> {
+        static constexpr bool enable = true;
+        using type = T;
+        constexpr static T Lower(T t) {
+            return t;
+        }
+    };
+
+    template <typename T>
+    struct BoolConvertible {
+        using Integral = typename std::underlying_type<T>::type;
+
+        constexpr BoolConvertible(Integral value) : value(value) {
+        }
+        constexpr operator bool() const {
+            return value != 0;
+        }
+        constexpr operator T() const {
+            return static_cast<T>(value);
+        }
+
+        Integral value;
+    };
+
+    template <typename T>
+    struct LowerBitmask<BoolConvertible<T>> {
+        static constexpr bool enable = true;
+        using type = T;
+        static constexpr type Lower(BoolConvertible<T> t) {
+            return t;
+        }
+    };
+
+    template <typename T1,
+              typename T2,
+              typename = typename std::enable_if<LowerBitmask<T1>::enable &&
+                                                 LowerBitmask<T2>::enable>::type>
+    constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator|(T1 left, T2 right) {
+        using T = typename LowerBitmask<T1>::type;
+        using Integral = typename std::underlying_type<T>::type;
+        return static_cast<Integral>(LowerBitmask<T1>::Lower(left)) |
+               static_cast<Integral>(LowerBitmask<T2>::Lower(right));
+    }
+
+    template <typename T1,
+              typename T2,
+              typename = typename std::enable_if<LowerBitmask<T1>::enable &&
+                                                 LowerBitmask<T2>::enable>::type>
+    constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator&(T1 left, T2 right) {
+        using T = typename LowerBitmask<T1>::type;
+        using Integral = typename std::underlying_type<T>::type;
+        return static_cast<Integral>(LowerBitmask<T1>::Lower(left)) &
+               static_cast<Integral>(LowerBitmask<T2>::Lower(right));
+    }
+
+    template <typename T1,
+              typename T2,
+              typename = typename std::enable_if<LowerBitmask<T1>::enable &&
+                                                 LowerBitmask<T2>::enable>::type>
+    constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator^(T1 left, T2 right) {
+        using T = typename LowerBitmask<T1>::type;
+        using Integral = typename std::underlying_type<T>::type;
+        return static_cast<Integral>(LowerBitmask<T1>::Lower(left)) ^
+               static_cast<Integral>(LowerBitmask<T2>::Lower(right));
+    }
+
+    template <typename T1>
+    constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator~(T1 t) {
+        using T = typename LowerBitmask<T1>::type;
+        using Integral = typename std::underlying_type<T>::type;
+        return ~static_cast<Integral>(LowerBitmask<T1>::Lower(t));
+    }
+
+    template <typename T,
+              typename T2,
+              typename = typename std::enable_if<IsDawnBitmask<T>::enable &&
+                                                 LowerBitmask<T2>::enable>::type>
+    constexpr T& operator&=(T& l, T2 right) {
+        T r = LowerBitmask<T2>::Lower(right);
+        l = l & r;
+        return l;
+    }
+
+    template <typename T,
+              typename T2,
+              typename = typename std::enable_if<IsDawnBitmask<T>::enable &&
+                                                 LowerBitmask<T2>::enable>::type>
+    constexpr T& operator|=(T& l, T2 right) {
+        T r = LowerBitmask<T2>::Lower(right);
+        l = l | r;
+        return l;
+    }
+
+    template <typename T,
+              typename T2,
+              typename = typename std::enable_if<IsDawnBitmask<T>::enable &&
+                                                 LowerBitmask<T2>::enable>::type>
+    constexpr T& operator^=(T& l, T2 right) {
+        T r = LowerBitmask<T2>::Lower(right);
+        l = l ^ r;
+        return l;
+    }
+
+    template <typename T>
+    constexpr bool HasZeroOrOneBits(T value) {
+        using Integral = typename std::underlying_type<T>::type;
+        return (static_cast<Integral>(value) & (static_cast<Integral>(value) - 1)) == 0;
+    }
+
+}  // namespace wgpu
+
+#endif  // DAWN_ENUM_CLASS_BITMASKS_H_

+ 1207 - 0
3rdparty/webgpu/include/webgpu/webgpu.h

@@ -0,0 +1,1207 @@
+// BSD 3-Clause License
+//
+// Copyright (c) 2019, "WebGPU native" developers
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+//    list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+//    this list of conditions and the following disclaimer in the documentation
+//    and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the copyright holder nor the names of its
+//    contributors may be used to endorse or promote products derived from
+//    this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#ifndef WEBGPU_H_
+#define WEBGPU_H_
+
+#if defined(WGPU_SHARED_LIBRARY)
+#    if defined(_WIN32)
+#        if defined(WGPU_IMPLEMENTATION)
+#            define WGPU_EXPORT __declspec(dllexport)
+#        else
+#            define WGPU_EXPORT __declspec(dllimport)
+#        endif
+#    else  // defined(_WIN32)
+#        if defined(WGPU_IMPLEMENTATION)
+#            define WGPU_EXPORT __attribute__((visibility("default")))
+#        else
+#            define WGPU_EXPORT
+#        endif
+#    endif  // defined(_WIN32)
+#else       // defined(WGPU_SHARED_LIBRARY)
+#    define WGPU_EXPORT
+#endif  // defined(WGPU_SHARED_LIBRARY)
+
+#include <stdint.h>
+#include <stddef.h>
+#include <stdbool.h>
+
+#define WGPU_WHOLE_SIZE (0xffffffffffffffffULL)
+
+typedef uint32_t WGPUFlags;
+
+typedef struct WGPUBindGroupImpl* WGPUBindGroup;
+typedef struct WGPUBindGroupLayoutImpl* WGPUBindGroupLayout;
+typedef struct WGPUBufferImpl* WGPUBuffer;
+typedef struct WGPUCommandBufferImpl* WGPUCommandBuffer;
+typedef struct WGPUCommandEncoderImpl* WGPUCommandEncoder;
+typedef struct WGPUComputePassEncoderImpl* WGPUComputePassEncoder;
+typedef struct WGPUComputePipelineImpl* WGPUComputePipeline;
+typedef struct WGPUDeviceImpl* WGPUDevice;
+typedef struct WGPUFenceImpl* WGPUFence;
+typedef struct WGPUInstanceImpl* WGPUInstance;
+typedef struct WGPUPipelineLayoutImpl* WGPUPipelineLayout;
+typedef struct WGPUQueueImpl* WGPUQueue;
+typedef struct WGPURenderBundleImpl* WGPURenderBundle;
+typedef struct WGPURenderBundleEncoderImpl* WGPURenderBundleEncoder;
+typedef struct WGPURenderPassEncoderImpl* WGPURenderPassEncoder;
+typedef struct WGPURenderPipelineImpl* WGPURenderPipeline;
+typedef struct WGPUSamplerImpl* WGPUSampler;
+typedef struct WGPUShaderModuleImpl* WGPUShaderModule;
+typedef struct WGPUSurfaceImpl* WGPUSurface;
+typedef struct WGPUSwapChainImpl* WGPUSwapChain;
+typedef struct WGPUTextureImpl* WGPUTexture;
+typedef struct WGPUTextureViewImpl* WGPUTextureView;
+
+typedef enum WGPUAdapterType {
+    WGPUAdapterType_DiscreteGPU = 0x00000000,
+    WGPUAdapterType_IntegratedGPU = 0x00000001,
+    WGPUAdapterType_CPU = 0x00000002,
+    WGPUAdapterType_Unknown = 0x00000003,
+    WGPUAdapterType_Force32 = 0x7FFFFFFF
+} WGPUAdapterType;
+
+typedef enum WGPUAddressMode {
+    WGPUAddressMode_Repeat = 0x00000000,
+    WGPUAddressMode_MirrorRepeat = 0x00000001,
+    WGPUAddressMode_ClampToEdge = 0x00000002,
+    WGPUAddressMode_Force32 = 0x7FFFFFFF
+} WGPUAddressMode;
+
+typedef enum WGPUBackendType {
+    WGPUBackendType_Null = 0x00000000,
+    WGPUBackendType_D3D11 = 0x00000001,
+    WGPUBackendType_D3D12 = 0x00000002,
+    WGPUBackendType_Metal = 0x00000003,
+    WGPUBackendType_Vulkan = 0x00000004,
+    WGPUBackendType_OpenGL = 0x00000005,
+    WGPUBackendType_OpenGLES = 0x00000006,
+    WGPUBackendType_Force32 = 0x7FFFFFFF
+} WGPUBackendType;
+
+typedef enum WGPUBindingType {
+    WGPUBindingType_UniformBuffer = 0x00000000,
+    WGPUBindingType_StorageBuffer = 0x00000001,
+    WGPUBindingType_ReadonlyStorageBuffer = 0x00000002,
+    WGPUBindingType_Sampler = 0x00000003,
+    WGPUBindingType_ComparisonSampler = 0x00000004,
+    WGPUBindingType_SampledTexture = 0x00000005,
+    WGPUBindingType_StorageTexture = 0x00000006,
+    WGPUBindingType_ReadonlyStorageTexture = 0x00000007,
+    WGPUBindingType_WriteonlyStorageTexture = 0x00000008,
+    WGPUBindingType_Force32 = 0x7FFFFFFF
+} WGPUBindingType;
+
+typedef enum WGPUBlendFactor {
+    WGPUBlendFactor_Zero = 0x00000000,
+    WGPUBlendFactor_One = 0x00000001,
+    WGPUBlendFactor_SrcColor = 0x00000002,
+    WGPUBlendFactor_OneMinusSrcColor = 0x00000003,
+    WGPUBlendFactor_SrcAlpha = 0x00000004,
+    WGPUBlendFactor_OneMinusSrcAlpha = 0x00000005,
+    WGPUBlendFactor_DstColor = 0x00000006,
+    WGPUBlendFactor_OneMinusDstColor = 0x00000007,
+    WGPUBlendFactor_DstAlpha = 0x00000008,
+    WGPUBlendFactor_OneMinusDstAlpha = 0x00000009,
+    WGPUBlendFactor_SrcAlphaSaturated = 0x0000000A,
+    WGPUBlendFactor_BlendColor = 0x0000000B,
+    WGPUBlendFactor_OneMinusBlendColor = 0x0000000C,
+    WGPUBlendFactor_Force32 = 0x7FFFFFFF
+} WGPUBlendFactor;
+
+typedef enum WGPUBlendOperation {
+    WGPUBlendOperation_Add = 0x00000000,
+    WGPUBlendOperation_Subtract = 0x00000001,
+    WGPUBlendOperation_ReverseSubtract = 0x00000002,
+    WGPUBlendOperation_Min = 0x00000003,
+    WGPUBlendOperation_Max = 0x00000004,
+    WGPUBlendOperation_Force32 = 0x7FFFFFFF
+} WGPUBlendOperation;
+
+typedef enum WGPUBufferMapAsyncStatus {
+    WGPUBufferMapAsyncStatus_Success = 0x00000000,
+    WGPUBufferMapAsyncStatus_Error = 0x00000001,
+    WGPUBufferMapAsyncStatus_Unknown = 0x00000002,
+    WGPUBufferMapAsyncStatus_DeviceLost = 0x00000003,
+    WGPUBufferMapAsyncStatus_Force32 = 0x7FFFFFFF
+} WGPUBufferMapAsyncStatus;
+
+typedef enum WGPUCompareFunction {
+    WGPUCompareFunction_Undefined = 0x00000000,
+    WGPUCompareFunction_Never = 0x00000001,
+    WGPUCompareFunction_Less = 0x00000002,
+    WGPUCompareFunction_LessEqual = 0x00000003,
+    WGPUCompareFunction_Greater = 0x00000004,
+    WGPUCompareFunction_GreaterEqual = 0x00000005,
+    WGPUCompareFunction_Equal = 0x00000006,
+    WGPUCompareFunction_NotEqual = 0x00000007,
+    WGPUCompareFunction_Always = 0x00000008,
+    WGPUCompareFunction_Force32 = 0x7FFFFFFF
+} WGPUCompareFunction;
+
+typedef enum WGPUCullMode {
+    WGPUCullMode_None = 0x00000000,
+    WGPUCullMode_Front = 0x00000001,
+    WGPUCullMode_Back = 0x00000002,
+    WGPUCullMode_Force32 = 0x7FFFFFFF
+} WGPUCullMode;
+
+typedef enum WGPUErrorFilter {
+    WGPUErrorFilter_None = 0x00000000,
+    WGPUErrorFilter_Validation = 0x00000001,
+    WGPUErrorFilter_OutOfMemory = 0x00000002,
+    WGPUErrorFilter_Force32 = 0x7FFFFFFF
+} WGPUErrorFilter;
+
+typedef enum WGPUErrorType {
+    WGPUErrorType_NoError = 0x00000000,
+    WGPUErrorType_Validation = 0x00000001,
+    WGPUErrorType_OutOfMemory = 0x00000002,
+    WGPUErrorType_Unknown = 0x00000003,
+    WGPUErrorType_DeviceLost = 0x00000004,
+    WGPUErrorType_Force32 = 0x7FFFFFFF
+} WGPUErrorType;
+
+typedef enum WGPUFenceCompletionStatus {
+    WGPUFenceCompletionStatus_Success = 0x00000000,
+    WGPUFenceCompletionStatus_Error = 0x00000001,
+    WGPUFenceCompletionStatus_Unknown = 0x00000002,
+    WGPUFenceCompletionStatus_DeviceLost = 0x00000003,
+    WGPUFenceCompletionStatus_Force32 = 0x7FFFFFFF
+} WGPUFenceCompletionStatus;
+
+typedef enum WGPUFilterMode {
+    WGPUFilterMode_Nearest = 0x00000000,
+    WGPUFilterMode_Linear = 0x00000001,
+    WGPUFilterMode_Force32 = 0x7FFFFFFF
+} WGPUFilterMode;
+
+typedef enum WGPUFrontFace {
+    WGPUFrontFace_CCW = 0x00000000,
+    WGPUFrontFace_CW = 0x00000001,
+    WGPUFrontFace_Force32 = 0x7FFFFFFF
+} WGPUFrontFace;
+
+typedef enum WGPUIndexFormat {
+    WGPUIndexFormat_Uint16 = 0x00000000,
+    WGPUIndexFormat_Uint32 = 0x00000001,
+    WGPUIndexFormat_Force32 = 0x7FFFFFFF
+} WGPUIndexFormat;
+
+typedef enum WGPUInputStepMode {
+    WGPUInputStepMode_Vertex = 0x00000000,
+    WGPUInputStepMode_Instance = 0x00000001,
+    WGPUInputStepMode_Force32 = 0x7FFFFFFF
+} WGPUInputStepMode;
+
+typedef enum WGPULoadOp {
+    WGPULoadOp_Clear = 0x00000000,
+    WGPULoadOp_Load = 0x00000001,
+    WGPULoadOp_Force32 = 0x7FFFFFFF
+} WGPULoadOp;
+
+typedef enum WGPUPresentMode {
+    WGPUPresentMode_Immediate = 0x00000000,
+    WGPUPresentMode_Mailbox = 0x00000001,
+    WGPUPresentMode_Fifo = 0x00000002,
+    WGPUPresentMode_Force32 = 0x7FFFFFFF
+} WGPUPresentMode;
+
+typedef enum WGPUPrimitiveTopology {
+    WGPUPrimitiveTopology_PointList = 0x00000000,
+    WGPUPrimitiveTopology_LineList = 0x00000001,
+    WGPUPrimitiveTopology_LineStrip = 0x00000002,
+    WGPUPrimitiveTopology_TriangleList = 0x00000003,
+    WGPUPrimitiveTopology_TriangleStrip = 0x00000004,
+    WGPUPrimitiveTopology_Force32 = 0x7FFFFFFF
+} WGPUPrimitiveTopology;
+
+typedef enum WGPUSType {
+    WGPUSType_Invalid = 0x00000000,
+    WGPUSType_SurfaceDescriptorFromMetalLayer = 0x00000001,
+    WGPUSType_SurfaceDescriptorFromWindowsHWND = 0x00000002,
+    WGPUSType_SurfaceDescriptorFromXlib = 0x00000003,
+    WGPUSType_SurfaceDescriptorFromHTMLCanvasId = 0x00000004,
+    WGPUSType_ShaderModuleSPIRVDescriptor = 0x00000005,
+    WGPUSType_ShaderModuleWGSLDescriptor = 0x00000006,
+    WGPUSType_SamplerDescriptorDummyAnisotropicFiltering = 0x00000007,
+    WGPUSType_RenderPipelineDescriptorDummyExtension = 0x00000008,
+    WGPUSType_Force32 = 0x7FFFFFFF
+} WGPUSType;
+
+typedef enum WGPUStencilOperation {
+    WGPUStencilOperation_Keep = 0x00000000,
+    WGPUStencilOperation_Zero = 0x00000001,
+    WGPUStencilOperation_Replace = 0x00000002,
+    WGPUStencilOperation_Invert = 0x00000003,
+    WGPUStencilOperation_IncrementClamp = 0x00000004,
+    WGPUStencilOperation_DecrementClamp = 0x00000005,
+    WGPUStencilOperation_IncrementWrap = 0x00000006,
+    WGPUStencilOperation_DecrementWrap = 0x00000007,
+    WGPUStencilOperation_Force32 = 0x7FFFFFFF
+} WGPUStencilOperation;
+
+typedef enum WGPUStoreOp {
+    WGPUStoreOp_Store = 0x00000000,
+    WGPUStoreOp_Clear = 0x00000001,
+    WGPUStoreOp_Force32 = 0x7FFFFFFF
+} WGPUStoreOp;
+
+typedef enum WGPUTextureAspect {
+    WGPUTextureAspect_All = 0x00000000,
+    WGPUTextureAspect_StencilOnly = 0x00000001,
+    WGPUTextureAspect_DepthOnly = 0x00000002,
+    WGPUTextureAspect_Force32 = 0x7FFFFFFF
+} WGPUTextureAspect;
+
+typedef enum WGPUTextureComponentType {
+    WGPUTextureComponentType_Float = 0x00000000,
+    WGPUTextureComponentType_Sint = 0x00000001,
+    WGPUTextureComponentType_Uint = 0x00000002,
+    WGPUTextureComponentType_Force32 = 0x7FFFFFFF
+} WGPUTextureComponentType;
+
+typedef enum WGPUTextureDimension {
+    WGPUTextureDimension_1D = 0x00000000,
+    WGPUTextureDimension_2D = 0x00000001,
+    WGPUTextureDimension_3D = 0x00000002,
+    WGPUTextureDimension_Force32 = 0x7FFFFFFF
+} WGPUTextureDimension;
+
+typedef enum WGPUTextureFormat {
+    WGPUTextureFormat_Undefined = 0x00000000,
+    WGPUTextureFormat_R8Unorm = 0x00000001,
+    WGPUTextureFormat_R8Snorm = 0x00000002,
+    WGPUTextureFormat_R8Uint = 0x00000003,
+    WGPUTextureFormat_R8Sint = 0x00000004,
+    WGPUTextureFormat_R16Uint = 0x00000005,
+    WGPUTextureFormat_R16Sint = 0x00000006,
+    WGPUTextureFormat_R16Float = 0x00000007,
+    WGPUTextureFormat_RG8Unorm = 0x00000008,
+    WGPUTextureFormat_RG8Snorm = 0x00000009,
+    WGPUTextureFormat_RG8Uint = 0x0000000A,
+    WGPUTextureFormat_RG8Sint = 0x0000000B,
+    WGPUTextureFormat_R32Float = 0x0000000C,
+    WGPUTextureFormat_R32Uint = 0x0000000D,
+    WGPUTextureFormat_R32Sint = 0x0000000E,
+    WGPUTextureFormat_RG16Uint = 0x0000000F,
+    WGPUTextureFormat_RG16Sint = 0x00000010,
+    WGPUTextureFormat_RG16Float = 0x00000011,
+    WGPUTextureFormat_RGBA8Unorm = 0x00000012,
+    WGPUTextureFormat_RGBA8UnormSrgb = 0x00000013,
+    WGPUTextureFormat_RGBA8Snorm = 0x00000014,
+    WGPUTextureFormat_RGBA8Uint = 0x00000015,
+    WGPUTextureFormat_RGBA8Sint = 0x00000016,
+    WGPUTextureFormat_BGRA8Unorm = 0x00000017,
+    WGPUTextureFormat_BGRA8UnormSrgb = 0x00000018,
+    WGPUTextureFormat_RGB10A2Unorm = 0x00000019,
+    WGPUTextureFormat_RG11B10Float = 0x0000001A,
+    WGPUTextureFormat_RG32Float = 0x0000001B,
+    WGPUTextureFormat_RG32Uint = 0x0000001C,
+    WGPUTextureFormat_RG32Sint = 0x0000001D,
+    WGPUTextureFormat_RGBA16Uint = 0x0000001E,
+    WGPUTextureFormat_RGBA16Sint = 0x0000001F,
+    WGPUTextureFormat_RGBA16Float = 0x00000020,
+    WGPUTextureFormat_RGBA32Float = 0x00000021,
+    WGPUTextureFormat_RGBA32Uint = 0x00000022,
+    WGPUTextureFormat_RGBA32Sint = 0x00000023,
+    WGPUTextureFormat_Depth32Float = 0x00000024,
+    WGPUTextureFormat_Depth24Plus = 0x00000025,
+    WGPUTextureFormat_Depth24PlusStencil8 = 0x00000026,
+    WGPUTextureFormat_BC1RGBAUnorm = 0x00000027,
+    WGPUTextureFormat_BC1RGBAUnormSrgb = 0x00000028,
+    WGPUTextureFormat_BC2RGBAUnorm = 0x00000029,
+    WGPUTextureFormat_BC2RGBAUnormSrgb = 0x0000002A,
+    WGPUTextureFormat_BC3RGBAUnorm = 0x0000002B,
+    WGPUTextureFormat_BC3RGBAUnormSrgb = 0x0000002C,
+    WGPUTextureFormat_BC4RUnorm = 0x0000002D,
+    WGPUTextureFormat_BC4RSnorm = 0x0000002E,
+    WGPUTextureFormat_BC5RGUnorm = 0x0000002F,
+    WGPUTextureFormat_BC5RGSnorm = 0x00000030,
+    WGPUTextureFormat_BC6HRGBUfloat = 0x00000031,
+    WGPUTextureFormat_BC6HRGBSfloat = 0x00000032,
+    WGPUTextureFormat_BC7RGBAUnorm = 0x00000033,
+    WGPUTextureFormat_BC7RGBAUnormSrgb = 0x00000034,
+    WGPUTextureFormat_Force32 = 0x7FFFFFFF
+} WGPUTextureFormat;
+
+typedef enum WGPUTextureViewDimension {
+    WGPUTextureViewDimension_Undefined = 0x00000000,
+    WGPUTextureViewDimension_1D = 0x00000001,
+    WGPUTextureViewDimension_2D = 0x00000002,
+    WGPUTextureViewDimension_2DArray = 0x00000003,
+    WGPUTextureViewDimension_Cube = 0x00000004,
+    WGPUTextureViewDimension_CubeArray = 0x00000005,
+    WGPUTextureViewDimension_3D = 0x00000006,
+    WGPUTextureViewDimension_Force32 = 0x7FFFFFFF
+} WGPUTextureViewDimension;
+
+typedef enum WGPUVertexFormat {
+    WGPUVertexFormat_UChar2 = 0x00000000,
+    WGPUVertexFormat_UChar4 = 0x00000001,
+    WGPUVertexFormat_Char2 = 0x00000002,
+    WGPUVertexFormat_Char4 = 0x00000003,
+    WGPUVertexFormat_UChar2Norm = 0x00000004,
+    WGPUVertexFormat_UChar4Norm = 0x00000005,
+    WGPUVertexFormat_Char2Norm = 0x00000006,
+    WGPUVertexFormat_Char4Norm = 0x00000007,
+    WGPUVertexFormat_UShort2 = 0x00000008,
+    WGPUVertexFormat_UShort4 = 0x00000009,
+    WGPUVertexFormat_Short2 = 0x0000000A,
+    WGPUVertexFormat_Short4 = 0x0000000B,
+    WGPUVertexFormat_UShort2Norm = 0x0000000C,
+    WGPUVertexFormat_UShort4Norm = 0x0000000D,
+    WGPUVertexFormat_Short2Norm = 0x0000000E,
+    WGPUVertexFormat_Short4Norm = 0x0000000F,
+    WGPUVertexFormat_Half2 = 0x00000010,
+    WGPUVertexFormat_Half4 = 0x00000011,
+    WGPUVertexFormat_Float = 0x00000012,
+    WGPUVertexFormat_Float2 = 0x00000013,
+    WGPUVertexFormat_Float3 = 0x00000014,
+    WGPUVertexFormat_Float4 = 0x00000015,
+    WGPUVertexFormat_UInt = 0x00000016,
+    WGPUVertexFormat_UInt2 = 0x00000017,
+    WGPUVertexFormat_UInt3 = 0x00000018,
+    WGPUVertexFormat_UInt4 = 0x00000019,
+    WGPUVertexFormat_Int = 0x0000001A,
+    WGPUVertexFormat_Int2 = 0x0000001B,
+    WGPUVertexFormat_Int3 = 0x0000001C,
+    WGPUVertexFormat_Int4 = 0x0000001D,
+    WGPUVertexFormat_Force32 = 0x7FFFFFFF
+} WGPUVertexFormat;
+
+typedef enum WGPUBufferUsage {
+    WGPUBufferUsage_None = 0x00000000,
+    WGPUBufferUsage_MapRead = 0x00000001,
+    WGPUBufferUsage_MapWrite = 0x00000002,
+    WGPUBufferUsage_CopySrc = 0x00000004,
+    WGPUBufferUsage_CopyDst = 0x00000008,
+    WGPUBufferUsage_Index = 0x00000010,
+    WGPUBufferUsage_Vertex = 0x00000020,
+    WGPUBufferUsage_Uniform = 0x00000040,
+    WGPUBufferUsage_Storage = 0x00000080,
+    WGPUBufferUsage_Indirect = 0x00000100,
+    WGPUBufferUsage_Force32 = 0x7FFFFFFF
+} WGPUBufferUsage;
+typedef WGPUFlags WGPUBufferUsageFlags;
+
+typedef enum WGPUColorWriteMask {
+    WGPUColorWriteMask_None = 0x00000000,
+    WGPUColorWriteMask_Red = 0x00000001,
+    WGPUColorWriteMask_Green = 0x00000002,
+    WGPUColorWriteMask_Blue = 0x00000004,
+    WGPUColorWriteMask_Alpha = 0x00000008,
+    WGPUColorWriteMask_All = 0x0000000F,
+    WGPUColorWriteMask_Force32 = 0x7FFFFFFF
+} WGPUColorWriteMask;
+typedef WGPUFlags WGPUColorWriteMaskFlags;
+
+typedef enum WGPUShaderStage {
+    WGPUShaderStage_None = 0x00000000,
+    WGPUShaderStage_Vertex = 0x00000001,
+    WGPUShaderStage_Fragment = 0x00000002,
+    WGPUShaderStage_Compute = 0x00000004,
+    WGPUShaderStage_Force32 = 0x7FFFFFFF
+} WGPUShaderStage;
+typedef WGPUFlags WGPUShaderStageFlags;
+
+typedef enum WGPUTextureUsage {
+    WGPUTextureUsage_None = 0x00000000,
+    WGPUTextureUsage_CopySrc = 0x00000001,
+    WGPUTextureUsage_CopyDst = 0x00000002,
+    WGPUTextureUsage_Sampled = 0x00000004,
+    WGPUTextureUsage_Storage = 0x00000008,
+    WGPUTextureUsage_OutputAttachment = 0x00000010,
+    WGPUTextureUsage_Present = 0x00000020,
+    WGPUTextureUsage_Force32 = 0x7FFFFFFF
+} WGPUTextureUsage;
+typedef WGPUFlags WGPUTextureUsageFlags;
+
+
+typedef struct WGPUChainedStruct {
+    struct WGPUChainedStruct const * next;
+    WGPUSType sType;
+} WGPUChainedStruct;
+
+typedef struct WGPUAdapterProperties {
+    WGPUChainedStruct const * nextInChain;
+    uint32_t deviceID;
+    uint32_t vendorID;
+    char const * name;
+    WGPUAdapterType adapterType;
+    WGPUBackendType backendType;
+} WGPUAdapterProperties;
+
+typedef struct WGPUBindGroupEntry {
+    uint32_t binding;
+    WGPUBuffer buffer;
+    uint64_t offset;
+    uint64_t size;
+    WGPUSampler sampler;
+    WGPUTextureView textureView;
+} WGPUBindGroupEntry;
+
+typedef struct WGPUBindGroupLayoutEntry {
+    uint32_t binding;
+    WGPUShaderStageFlags visibility;
+    WGPUBindingType type;
+    bool hasDynamicOffset;
+    bool multisampled;
+    WGPUTextureViewDimension textureDimension;
+    WGPUTextureViewDimension viewDimension;
+    WGPUTextureComponentType textureComponentType;
+    WGPUTextureFormat storageTextureFormat;
+} WGPUBindGroupLayoutEntry;
+
+typedef struct WGPUBlendDescriptor {
+    WGPUBlendOperation operation;
+    WGPUBlendFactor srcFactor;
+    WGPUBlendFactor dstFactor;
+} WGPUBlendDescriptor;
+
+typedef struct WGPUBufferCopyView {
+    WGPUChainedStruct const * nextInChain;
+    WGPUBuffer buffer;
+    uint64_t offset;
+    uint32_t rowPitch;
+    uint32_t imageHeight;
+    uint32_t bytesPerRow;
+    uint32_t rowsPerImage;
+} WGPUBufferCopyView;
+
+typedef struct WGPUBufferDescriptor {
+    WGPUChainedStruct const * nextInChain;
+    char const * label;
+    WGPUBufferUsageFlags usage;
+    uint64_t size;
+} WGPUBufferDescriptor;
+
+typedef struct WGPUColor {
+    float r;
+    float g;
+    float b;
+    float a;
+} WGPUColor;
+
+typedef struct WGPUCommandBufferDescriptor {
+    WGPUChainedStruct const * nextInChain;
+    char const * label;
+} WGPUCommandBufferDescriptor;
+
+typedef struct WGPUCommandEncoderDescriptor {
+    WGPUChainedStruct const * nextInChain;
+    char const * label;
+} WGPUCommandEncoderDescriptor;
+
+typedef struct WGPUComputePassDescriptor {
+    WGPUChainedStruct const * nextInChain;
+    char const * label;
+} WGPUComputePassDescriptor;
+
+typedef struct WGPUCreateBufferMappedResult {
+    WGPUBuffer buffer;
+    uint64_t dataLength;
+    void * data;
+} WGPUCreateBufferMappedResult;
+
+typedef struct WGPUDeviceProperties {
+    bool textureCompressionBC;
+} WGPUDeviceProperties;
+
+typedef struct WGPUExtent3D {
+    uint32_t width;
+    uint32_t height;
+    uint32_t depth;
+} WGPUExtent3D;
+
+typedef struct WGPUFenceDescriptor {
+    WGPUChainedStruct const * nextInChain;
+    char const * label;
+    uint64_t initialValue;
+} WGPUFenceDescriptor;
+
+typedef struct WGPUInstanceDescriptor {
+    WGPUChainedStruct const * nextInChain;
+} WGPUInstanceDescriptor;
+
+typedef struct WGPUOrigin3D {
+    uint32_t x;
+    uint32_t y;
+    uint32_t z;
+} WGPUOrigin3D;
+
+typedef struct WGPUPipelineLayoutDescriptor {
+    WGPUChainedStruct const * nextInChain;
+    char const * label;
+    uint32_t bindGroupLayoutCount;
+    WGPUBindGroupLayout const * bindGroupLayouts;
+} WGPUPipelineLayoutDescriptor;
+
+typedef struct WGPUProgrammableStageDescriptor {
+    WGPUChainedStruct const * nextInChain;
+    WGPUShaderModule module;
+    char const * entryPoint;
+} WGPUProgrammableStageDescriptor;
+
+typedef struct WGPURasterizationStateDescriptor {
+    WGPUChainedStruct const * nextInChain;
+    WGPUFrontFace frontFace;
+    WGPUCullMode cullMode;
+    int32_t depthBias;
+    float depthBiasSlopeScale;
+    float depthBiasClamp;
+} WGPURasterizationStateDescriptor;
+
+typedef struct WGPURenderBundleDescriptor {
+    WGPUChainedStruct const * nextInChain;
+    char const * label;
+} WGPURenderBundleDescriptor;
+
+typedef struct WGPURenderBundleEncoderDescriptor {
+    WGPUChainedStruct const * nextInChain;
+    char const * label;
+    uint32_t colorFormatsCount;
+    WGPUTextureFormat const * colorFormats;
+    WGPUTextureFormat depthStencilFormat;
+    uint32_t sampleCount;
+} WGPURenderBundleEncoderDescriptor;
+
+typedef struct WGPURenderPassDepthStencilAttachmentDescriptor {
+    WGPUTextureView attachment;
+    WGPULoadOp depthLoadOp;
+    WGPUStoreOp depthStoreOp;
+    float clearDepth;
+    WGPULoadOp stencilLoadOp;
+    WGPUStoreOp stencilStoreOp;
+    uint32_t clearStencil;
+} WGPURenderPassDepthStencilAttachmentDescriptor;
+
+typedef struct WGPUSamplerDescriptor {
+    WGPUChainedStruct const * nextInChain;
+    char const * label;
+    WGPUAddressMode addressModeU;
+    WGPUAddressMode addressModeV;
+    WGPUAddressMode addressModeW;
+    WGPUFilterMode magFilter;
+    WGPUFilterMode minFilter;
+    WGPUFilterMode mipmapFilter;
+    float lodMinClamp;
+    float lodMaxClamp;
+    WGPUCompareFunction compare;
+} WGPUSamplerDescriptor;
+
+typedef struct WGPUSamplerDescriptorDummyAnisotropicFiltering {
+    WGPUChainedStruct chain;
+    float maxAnisotropy;
+} WGPUSamplerDescriptorDummyAnisotropicFiltering;
+
+typedef struct WGPUShaderModuleDescriptor {
+    WGPUChainedStruct const * nextInChain;
+    char const * label;
+    uint32_t codeSize;
+    uint32_t const * code;
+} WGPUShaderModuleDescriptor;
+
+typedef struct WGPUShaderModuleSPIRVDescriptor {
+    WGPUChainedStruct chain;
+    uint32_t codeSize;
+    uint32_t const * code;
+} WGPUShaderModuleSPIRVDescriptor;
+
+typedef struct WGPUShaderModuleWGSLDescriptor {
+    WGPUChainedStruct chain;
+    char const * source;
+} WGPUShaderModuleWGSLDescriptor;
+
+typedef struct WGPUStencilStateFaceDescriptor {
+    WGPUCompareFunction compare;
+    WGPUStencilOperation failOp;
+    WGPUStencilOperation depthFailOp;
+    WGPUStencilOperation passOp;
+} WGPUStencilStateFaceDescriptor;
+
+typedef struct WGPUSurfaceDescriptor {
+    WGPUChainedStruct const * nextInChain;
+    char const * label;
+} WGPUSurfaceDescriptor;
+
+typedef struct WGPUSurfaceDescriptorFromHTMLCanvasId {
+    WGPUChainedStruct chain;
+    char const * id;
+} WGPUSurfaceDescriptorFromHTMLCanvasId;
+
+typedef struct WGPUSurfaceDescriptorFromMetalLayer {
+    WGPUChainedStruct chain;
+    void * layer;
+} WGPUSurfaceDescriptorFromMetalLayer;
+
+typedef struct WGPUSurfaceDescriptorFromWindowsHWND {
+    WGPUChainedStruct chain;
+    void * hinstance;
+    void * hwnd;
+} WGPUSurfaceDescriptorFromWindowsHWND;
+
+typedef struct WGPUSurfaceDescriptorFromXlib {
+    WGPUChainedStruct chain;
+    void * display;
+    uint32_t window;
+} WGPUSurfaceDescriptorFromXlib;
+
+typedef struct WGPUSwapChainDescriptor {
+    WGPUChainedStruct const * nextInChain;
+    char const * label;
+    WGPUTextureUsageFlags usage;
+    WGPUTextureFormat format;
+    uint32_t width;
+    uint32_t height;
+    WGPUPresentMode presentMode;
+    uint64_t implementation;
+} WGPUSwapChainDescriptor;
+
+typedef struct WGPUTextureViewDescriptor {
+    WGPUChainedStruct const * nextInChain;
+    char const * label;
+    WGPUTextureFormat format;
+    WGPUTextureViewDimension dimension;
+    uint32_t baseMipLevel;
+    uint32_t mipLevelCount;
+    uint32_t baseArrayLayer;
+    uint32_t arrayLayerCount;
+    WGPUTextureAspect aspect;
+} WGPUTextureViewDescriptor;
+
+typedef struct WGPUVertexAttributeDescriptor {
+    WGPUVertexFormat format;
+    uint64_t offset;
+    uint32_t shaderLocation;
+} WGPUVertexAttributeDescriptor;
+
+typedef struct WGPUBindGroupDescriptor {
+    WGPUChainedStruct const * nextInChain;
+    char const * label;
+    WGPUBindGroupLayout layout;
+    uint32_t bindingCount;
+    WGPUBindGroupEntry const * bindings;
+    uint32_t entryCount;
+    WGPUBindGroupEntry const * entries;
+} WGPUBindGroupDescriptor;
+
+typedef struct WGPUBindGroupLayoutDescriptor {
+    WGPUChainedStruct const * nextInChain;
+    char const * label;
+    uint32_t bindingCount;
+    WGPUBindGroupLayoutEntry const * bindings;
+    uint32_t entryCount;
+    WGPUBindGroupLayoutEntry const * entries;
+} WGPUBindGroupLayoutDescriptor;
+
+typedef struct WGPUColorStateDescriptor {
+    WGPUChainedStruct const * nextInChain;
+    WGPUTextureFormat format;
+    WGPUBlendDescriptor alphaBlend;
+    WGPUBlendDescriptor colorBlend;
+    WGPUColorWriteMaskFlags writeMask;
+} WGPUColorStateDescriptor;
+
+typedef struct WGPUComputePipelineDescriptor {
+    WGPUChainedStruct const * nextInChain;
+    char const * label;
+    WGPUPipelineLayout layout;
+    WGPUProgrammableStageDescriptor computeStage;
+} WGPUComputePipelineDescriptor;
+
+typedef struct WGPUDepthStencilStateDescriptor {
+    WGPUChainedStruct const * nextInChain;
+    WGPUTextureFormat format;
+    bool depthWriteEnabled;
+    WGPUCompareFunction depthCompare;
+    WGPUStencilStateFaceDescriptor stencilFront;
+    WGPUStencilStateFaceDescriptor stencilBack;
+    uint32_t stencilReadMask;
+    uint32_t stencilWriteMask;
+} WGPUDepthStencilStateDescriptor;
+
+typedef struct WGPURenderPassColorAttachmentDescriptor {
+    WGPUTextureView attachment;
+    WGPUTextureView resolveTarget;
+    WGPULoadOp loadOp;
+    WGPUStoreOp storeOp;
+    WGPUColor clearColor;
+} WGPURenderPassColorAttachmentDescriptor;
+
+typedef struct WGPURenderPipelineDescriptorDummyExtension {
+    WGPUChainedStruct chain;
+    WGPUProgrammableStageDescriptor dummyStage;
+} WGPURenderPipelineDescriptorDummyExtension;
+
+typedef struct WGPUTextureCopyView {
+    WGPUChainedStruct const * nextInChain;
+    WGPUTexture texture;
+    uint32_t mipLevel;
+    uint32_t arrayLayer;
+    WGPUOrigin3D origin;
+} WGPUTextureCopyView;
+
+typedef struct WGPUTextureDescriptor {
+    WGPUChainedStruct const * nextInChain;
+    char const * label;
+    WGPUTextureUsageFlags usage;
+    WGPUTextureDimension dimension;
+    WGPUExtent3D size;
+    uint32_t arrayLayerCount;
+    WGPUTextureFormat format;
+    uint32_t mipLevelCount;
+    uint32_t sampleCount;
+} WGPUTextureDescriptor;
+
+typedef struct WGPUVertexBufferLayoutDescriptor {
+    uint64_t arrayStride;
+    WGPUInputStepMode stepMode;
+    uint32_t attributeCount;
+    WGPUVertexAttributeDescriptor const * attributes;
+} WGPUVertexBufferLayoutDescriptor;
+
+typedef struct WGPURenderPassDescriptor {
+    WGPUChainedStruct const * nextInChain;
+    char const * label;
+    uint32_t colorAttachmentCount;
+    WGPURenderPassColorAttachmentDescriptor const * colorAttachments;
+    WGPURenderPassDepthStencilAttachmentDescriptor const * depthStencilAttachment;
+} WGPURenderPassDescriptor;
+
+typedef struct WGPUVertexStateDescriptor {
+    WGPUChainedStruct const * nextInChain;
+    WGPUIndexFormat indexFormat;
+    uint32_t vertexBufferCount;
+    WGPUVertexBufferLayoutDescriptor const * vertexBuffers;
+} WGPUVertexStateDescriptor;
+
+typedef struct WGPURenderPipelineDescriptor {
+    WGPUChainedStruct const * nextInChain;
+    char const * label;
+    WGPUPipelineLayout layout;
+    WGPUProgrammableStageDescriptor vertexStage;
+    WGPUProgrammableStageDescriptor const * fragmentStage;
+    WGPUVertexStateDescriptor const * vertexState;
+    WGPUPrimitiveTopology primitiveTopology;
+    WGPURasterizationStateDescriptor const * rasterizationState;
+    uint32_t sampleCount;
+    WGPUDepthStencilStateDescriptor const * depthStencilState;
+    uint32_t colorStateCount;
+    WGPUColorStateDescriptor const * colorStates;
+    uint32_t sampleMask;
+    bool alphaToCoverageEnabled;
+} WGPURenderPipelineDescriptor;
+
+
+// TODO(dawn:22): Remove this once users use the "Entry" version.
+typedef WGPUBindGroupEntry WGPUBindGroupBinding;
+typedef WGPUBindGroupLayoutEntry WGPUBindGroupLayoutBinding;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef void (*WGPUBufferMapReadCallback)(WGPUBufferMapAsyncStatus status, void const * data, uint64_t dataLength, void * userdata);
+typedef void (*WGPUBufferMapWriteCallback)(WGPUBufferMapAsyncStatus status, void * data, uint64_t dataLength, void * userdata);
+typedef void (*WGPUDeviceLostCallback)(char const * message, void * userdata);
+typedef void (*WGPUErrorCallback)(WGPUErrorType type, char const * message, void * userdata);
+typedef void (*WGPUFenceOnCompletionCallback)(WGPUFenceCompletionStatus status, void * userdata);
+
+typedef void (*WGPUProc)(void);
+
+#if !defined(WGPU_SKIP_PROCS)
+
+typedef WGPUInstance (*WGPUProcCreateInstance)(WGPUInstanceDescriptor const * descriptor);
+typedef WGPUProc (*WGPUProcGetProcAddress)(WGPUDevice device, char const * procName);
+
+// Procs of BindGroup
+typedef void (*WGPUProcBindGroupReference)(WGPUBindGroup bindGroup);
+typedef void (*WGPUProcBindGroupRelease)(WGPUBindGroup bindGroup);
+
+// Procs of BindGroupLayout
+typedef void (*WGPUProcBindGroupLayoutReference)(WGPUBindGroupLayout bindGroupLayout);
+typedef void (*WGPUProcBindGroupLayoutRelease)(WGPUBindGroupLayout bindGroupLayout);
+
+// Procs of Buffer
+typedef void (*WGPUProcBufferDestroy)(WGPUBuffer buffer);
+typedef void (*WGPUProcBufferMapReadAsync)(WGPUBuffer buffer, WGPUBufferMapReadCallback callback, void * userdata);
+typedef void (*WGPUProcBufferMapWriteAsync)(WGPUBuffer buffer, WGPUBufferMapWriteCallback callback, void * userdata);
+typedef void (*WGPUProcBufferSetSubData)(WGPUBuffer buffer, uint64_t start, uint64_t count, void const * data);
+typedef void (*WGPUProcBufferUnmap)(WGPUBuffer buffer);
+typedef void (*WGPUProcBufferReference)(WGPUBuffer buffer);
+typedef void (*WGPUProcBufferRelease)(WGPUBuffer buffer);
+
+// Procs of CommandBuffer
+typedef void (*WGPUProcCommandBufferReference)(WGPUCommandBuffer commandBuffer);
+typedef void (*WGPUProcCommandBufferRelease)(WGPUCommandBuffer commandBuffer);
+
+// Procs of CommandEncoder
+typedef WGPUComputePassEncoder (*WGPUProcCommandEncoderBeginComputePass)(WGPUCommandEncoder commandEncoder, WGPUComputePassDescriptor const * descriptor);
+typedef WGPURenderPassEncoder (*WGPUProcCommandEncoderBeginRenderPass)(WGPUCommandEncoder commandEncoder, WGPURenderPassDescriptor const * descriptor);
+typedef void (*WGPUProcCommandEncoderCopyBufferToBuffer)(WGPUCommandEncoder commandEncoder, WGPUBuffer source, uint64_t sourceOffset, WGPUBuffer destination, uint64_t destinationOffset, uint64_t size);
+typedef void (*WGPUProcCommandEncoderCopyBufferToTexture)(WGPUCommandEncoder commandEncoder, WGPUBufferCopyView const * source, WGPUTextureCopyView const * destination, WGPUExtent3D const * copySize);
+typedef void (*WGPUProcCommandEncoderCopyTextureToBuffer)(WGPUCommandEncoder commandEncoder, WGPUTextureCopyView const * source, WGPUBufferCopyView const * destination, WGPUExtent3D const * copySize);
+typedef void (*WGPUProcCommandEncoderCopyTextureToTexture)(WGPUCommandEncoder commandEncoder, WGPUTextureCopyView const * source, WGPUTextureCopyView const * destination, WGPUExtent3D const * copySize);
+typedef WGPUCommandBuffer (*WGPUProcCommandEncoderFinish)(WGPUCommandEncoder commandEncoder, WGPUCommandBufferDescriptor const * descriptor);
+typedef void (*WGPUProcCommandEncoderInsertDebugMarker)(WGPUCommandEncoder commandEncoder, char const * groupLabel);
+typedef void (*WGPUProcCommandEncoderPopDebugGroup)(WGPUCommandEncoder commandEncoder);
+typedef void (*WGPUProcCommandEncoderPushDebugGroup)(WGPUCommandEncoder commandEncoder, char const * groupLabel);
+typedef void (*WGPUProcCommandEncoderReference)(WGPUCommandEncoder commandEncoder);
+typedef void (*WGPUProcCommandEncoderRelease)(WGPUCommandEncoder commandEncoder);
+
+// Procs of ComputePassEncoder
+typedef void (*WGPUProcComputePassEncoderDispatch)(WGPUComputePassEncoder computePassEncoder, uint32_t x, uint32_t y, uint32_t z);
+typedef void (*WGPUProcComputePassEncoderDispatchIndirect)(WGPUComputePassEncoder computePassEncoder, WGPUBuffer indirectBuffer, uint64_t indirectOffset);
+typedef void (*WGPUProcComputePassEncoderEndPass)(WGPUComputePassEncoder computePassEncoder);
+typedef void (*WGPUProcComputePassEncoderInsertDebugMarker)(WGPUComputePassEncoder computePassEncoder, char const * groupLabel);
+typedef void (*WGPUProcComputePassEncoderPopDebugGroup)(WGPUComputePassEncoder computePassEncoder);
+typedef void (*WGPUProcComputePassEncoderPushDebugGroup)(WGPUComputePassEncoder computePassEncoder, char const * groupLabel);
+typedef void (*WGPUProcComputePassEncoderSetBindGroup)(WGPUComputePassEncoder computePassEncoder, uint32_t groupIndex, WGPUBindGroup group, uint32_t dynamicOffsetCount, uint32_t const * dynamicOffsets);
+typedef void (*WGPUProcComputePassEncoderSetPipeline)(WGPUComputePassEncoder computePassEncoder, WGPUComputePipeline pipeline);
+typedef void (*WGPUProcComputePassEncoderReference)(WGPUComputePassEncoder computePassEncoder);
+typedef void (*WGPUProcComputePassEncoderRelease)(WGPUComputePassEncoder computePassEncoder);
+
+// Procs of ComputePipeline
+typedef WGPUBindGroupLayout (*WGPUProcComputePipelineGetBindGroupLayout)(WGPUComputePipeline computePipeline, uint32_t groupIndex);
+typedef void (*WGPUProcComputePipelineReference)(WGPUComputePipeline computePipeline);
+typedef void (*WGPUProcComputePipelineRelease)(WGPUComputePipeline computePipeline);
+
+// Procs of Device
+typedef WGPUBindGroup (*WGPUProcDeviceCreateBindGroup)(WGPUDevice device, WGPUBindGroupDescriptor const * descriptor);
+typedef WGPUBindGroupLayout (*WGPUProcDeviceCreateBindGroupLayout)(WGPUDevice device, WGPUBindGroupLayoutDescriptor const * descriptor);
+typedef WGPUBuffer (*WGPUProcDeviceCreateBuffer)(WGPUDevice device, WGPUBufferDescriptor const * descriptor);
+typedef WGPUCreateBufferMappedResult (*WGPUProcDeviceCreateBufferMapped)(WGPUDevice device, WGPUBufferDescriptor const * descriptor);
+typedef WGPUCommandEncoder (*WGPUProcDeviceCreateCommandEncoder)(WGPUDevice device, WGPUCommandEncoderDescriptor const * descriptor);
+typedef WGPUComputePipeline (*WGPUProcDeviceCreateComputePipeline)(WGPUDevice device, WGPUComputePipelineDescriptor const * descriptor);
+typedef WGPUPipelineLayout (*WGPUProcDeviceCreatePipelineLayout)(WGPUDevice device, WGPUPipelineLayoutDescriptor const * descriptor);
+typedef WGPUQueue (*WGPUProcDeviceCreateQueue)(WGPUDevice device);
+typedef WGPURenderBundleEncoder (*WGPUProcDeviceCreateRenderBundleEncoder)(WGPUDevice device, WGPURenderBundleEncoderDescriptor const * descriptor);
+typedef WGPURenderPipeline (*WGPUProcDeviceCreateRenderPipeline)(WGPUDevice device, WGPURenderPipelineDescriptor const * descriptor);
+typedef WGPUSampler (*WGPUProcDeviceCreateSampler)(WGPUDevice device, WGPUSamplerDescriptor const * descriptor);
+typedef WGPUShaderModule (*WGPUProcDeviceCreateShaderModule)(WGPUDevice device, WGPUShaderModuleDescriptor const * descriptor);
+typedef WGPUSwapChain (*WGPUProcDeviceCreateSwapChain)(WGPUDevice device, WGPUSurface surface, WGPUSwapChainDescriptor const * descriptor);
+typedef WGPUTexture (*WGPUProcDeviceCreateTexture)(WGPUDevice device, WGPUTextureDescriptor const * descriptor);
+typedef WGPUQueue (*WGPUProcDeviceGetDefaultQueue)(WGPUDevice device);
+typedef void (*WGPUProcDeviceInjectError)(WGPUDevice device, WGPUErrorType type, char const * message);
+typedef void (*WGPUProcDeviceLoseForTesting)(WGPUDevice device);
+typedef bool (*WGPUProcDevicePopErrorScope)(WGPUDevice device, WGPUErrorCallback callback, void * userdata);
+typedef void (*WGPUProcDevicePushErrorScope)(WGPUDevice device, WGPUErrorFilter filter);
+typedef void (*WGPUProcDeviceSetDeviceLostCallback)(WGPUDevice device, WGPUDeviceLostCallback callback, void * userdata);
+typedef void (*WGPUProcDeviceSetUncapturedErrorCallback)(WGPUDevice device, WGPUErrorCallback callback, void * userdata);
+typedef void (*WGPUProcDeviceTick)(WGPUDevice device);
+typedef void (*WGPUProcDeviceReference)(WGPUDevice device);
+typedef void (*WGPUProcDeviceRelease)(WGPUDevice device);
+
+// Procs of Fence
+typedef uint64_t (*WGPUProcFenceGetCompletedValue)(WGPUFence fence);
+typedef void (*WGPUProcFenceOnCompletion)(WGPUFence fence, uint64_t value, WGPUFenceOnCompletionCallback callback, void * userdata);
+typedef void (*WGPUProcFenceReference)(WGPUFence fence);
+typedef void (*WGPUProcFenceRelease)(WGPUFence fence);
+
+// Procs of Instance
+typedef WGPUSurface (*WGPUProcInstanceCreateSurface)(WGPUInstance instance, WGPUSurfaceDescriptor const * descriptor);
+typedef void (*WGPUProcInstanceReference)(WGPUInstance instance);
+typedef void (*WGPUProcInstanceRelease)(WGPUInstance instance);
+
+// Procs of PipelineLayout
+typedef void (*WGPUProcPipelineLayoutReference)(WGPUPipelineLayout pipelineLayout);
+typedef void (*WGPUProcPipelineLayoutRelease)(WGPUPipelineLayout pipelineLayout);
+
+// Procs of Queue
+typedef WGPUFence (*WGPUProcQueueCreateFence)(WGPUQueue queue, WGPUFenceDescriptor const * descriptor);
+typedef void (*WGPUProcQueueSignal)(WGPUQueue queue, WGPUFence fence, uint64_t signalValue);
+typedef void (*WGPUProcQueueSubmit)(WGPUQueue queue, uint32_t commandCount, WGPUCommandBuffer const * commands);
+typedef void (*WGPUProcQueueReference)(WGPUQueue queue);
+typedef void (*WGPUProcQueueRelease)(WGPUQueue queue);
+
+// Procs of RenderBundle
+typedef void (*WGPUProcRenderBundleReference)(WGPURenderBundle renderBundle);
+typedef void (*WGPUProcRenderBundleRelease)(WGPURenderBundle renderBundle);
+
+// Procs of RenderBundleEncoder
+typedef void (*WGPUProcRenderBundleEncoderDraw)(WGPURenderBundleEncoder renderBundleEncoder, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance);
+typedef void (*WGPUProcRenderBundleEncoderDrawIndexed)(WGPURenderBundleEncoder renderBundleEncoder, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t baseVertex, uint32_t firstInstance);
+typedef void (*WGPUProcRenderBundleEncoderDrawIndexedIndirect)(WGPURenderBundleEncoder renderBundleEncoder, WGPUBuffer indirectBuffer, uint64_t indirectOffset);
+typedef void (*WGPUProcRenderBundleEncoderDrawIndirect)(WGPURenderBundleEncoder renderBundleEncoder, WGPUBuffer indirectBuffer, uint64_t indirectOffset);
+typedef WGPURenderBundle (*WGPUProcRenderBundleEncoderFinish)(WGPURenderBundleEncoder renderBundleEncoder, WGPURenderBundleDescriptor const * descriptor);
+typedef void (*WGPUProcRenderBundleEncoderInsertDebugMarker)(WGPURenderBundleEncoder renderBundleEncoder, char const * groupLabel);
+typedef void (*WGPUProcRenderBundleEncoderPopDebugGroup)(WGPURenderBundleEncoder renderBundleEncoder);
+typedef void (*WGPUProcRenderBundleEncoderPushDebugGroup)(WGPURenderBundleEncoder renderBundleEncoder, char const * groupLabel);
+typedef void (*WGPUProcRenderBundleEncoderSetBindGroup)(WGPURenderBundleEncoder renderBundleEncoder, uint32_t groupIndex, WGPUBindGroup group, uint32_t dynamicOffsetCount, uint32_t const * dynamicOffsets);
+typedef void (*WGPUProcRenderBundleEncoderSetIndexBuffer)(WGPURenderBundleEncoder renderBundleEncoder, WGPUBuffer buffer, uint64_t offset, uint64_t size);
+typedef void (*WGPUProcRenderBundleEncoderSetPipeline)(WGPURenderBundleEncoder renderBundleEncoder, WGPURenderPipeline pipeline);
+typedef void (*WGPUProcRenderBundleEncoderSetVertexBuffer)(WGPURenderBundleEncoder renderBundleEncoder, uint32_t slot, WGPUBuffer buffer, uint64_t offset, uint64_t size);
+typedef void (*WGPUProcRenderBundleEncoderReference)(WGPURenderBundleEncoder renderBundleEncoder);
+typedef void (*WGPUProcRenderBundleEncoderRelease)(WGPURenderBundleEncoder renderBundleEncoder);
+
+// Procs of RenderPassEncoder
+typedef void (*WGPUProcRenderPassEncoderDraw)(WGPURenderPassEncoder renderPassEncoder, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance);
+typedef void (*WGPUProcRenderPassEncoderDrawIndexed)(WGPURenderPassEncoder renderPassEncoder, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t baseVertex, uint32_t firstInstance);
+typedef void (*WGPUProcRenderPassEncoderDrawIndexedIndirect)(WGPURenderPassEncoder renderPassEncoder, WGPUBuffer indirectBuffer, uint64_t indirectOffset);
+typedef void (*WGPUProcRenderPassEncoderDrawIndirect)(WGPURenderPassEncoder renderPassEncoder, WGPUBuffer indirectBuffer, uint64_t indirectOffset);
+typedef void (*WGPUProcRenderPassEncoderEndPass)(WGPURenderPassEncoder renderPassEncoder);
+typedef void (*WGPUProcRenderPassEncoderExecuteBundles)(WGPURenderPassEncoder renderPassEncoder, uint32_t bundlesCount, WGPURenderBundle const * bundles);
+typedef void (*WGPUProcRenderPassEncoderInsertDebugMarker)(WGPURenderPassEncoder renderPassEncoder, char const * groupLabel);
+typedef void (*WGPUProcRenderPassEncoderPopDebugGroup)(WGPURenderPassEncoder renderPassEncoder);
+typedef void (*WGPUProcRenderPassEncoderPushDebugGroup)(WGPURenderPassEncoder renderPassEncoder, char const * groupLabel);
+typedef void (*WGPUProcRenderPassEncoderSetBindGroup)(WGPURenderPassEncoder renderPassEncoder, uint32_t groupIndex, WGPUBindGroup group, uint32_t dynamicOffsetCount, uint32_t const * dynamicOffsets);
+typedef void (*WGPUProcRenderPassEncoderSetBlendColor)(WGPURenderPassEncoder renderPassEncoder, WGPUColor const * color);
+typedef void (*WGPUProcRenderPassEncoderSetIndexBuffer)(WGPURenderPassEncoder renderPassEncoder, WGPUBuffer buffer, uint64_t offset, uint64_t size);
+typedef void (*WGPUProcRenderPassEncoderSetPipeline)(WGPURenderPassEncoder renderPassEncoder, WGPURenderPipeline pipeline);
+typedef void (*WGPUProcRenderPassEncoderSetScissorRect)(WGPURenderPassEncoder renderPassEncoder, uint32_t x, uint32_t y, uint32_t width, uint32_t height);
+typedef void (*WGPUProcRenderPassEncoderSetStencilReference)(WGPURenderPassEncoder renderPassEncoder, uint32_t reference);
+typedef void (*WGPUProcRenderPassEncoderSetVertexBuffer)(WGPURenderPassEncoder renderPassEncoder, uint32_t slot, WGPUBuffer buffer, uint64_t offset, uint64_t size);
+typedef void (*WGPUProcRenderPassEncoderSetViewport)(WGPURenderPassEncoder renderPassEncoder, float x, float y, float width, float height, float minDepth, float maxDepth);
+typedef void (*WGPUProcRenderPassEncoderReference)(WGPURenderPassEncoder renderPassEncoder);
+typedef void (*WGPUProcRenderPassEncoderRelease)(WGPURenderPassEncoder renderPassEncoder);
+
+// Procs of RenderPipeline
+typedef WGPUBindGroupLayout (*WGPUProcRenderPipelineGetBindGroupLayout)(WGPURenderPipeline renderPipeline, uint32_t groupIndex);
+typedef void (*WGPUProcRenderPipelineReference)(WGPURenderPipeline renderPipeline);
+typedef void (*WGPUProcRenderPipelineRelease)(WGPURenderPipeline renderPipeline);
+
+// Procs of Sampler
+typedef void (*WGPUProcSamplerReference)(WGPUSampler sampler);
+typedef void (*WGPUProcSamplerRelease)(WGPUSampler sampler);
+
+// Procs of ShaderModule
+typedef void (*WGPUProcShaderModuleReference)(WGPUShaderModule shaderModule);
+typedef void (*WGPUProcShaderModuleRelease)(WGPUShaderModule shaderModule);
+
+// Procs of Surface
+typedef void (*WGPUProcSurfaceReference)(WGPUSurface surface);
+typedef void (*WGPUProcSurfaceRelease)(WGPUSurface surface);
+
+// Procs of SwapChain
+typedef void (*WGPUProcSwapChainConfigure)(WGPUSwapChain swapChain, WGPUTextureFormat format, WGPUTextureUsageFlags allowedUsage, uint32_t width, uint32_t height);
+typedef WGPUTextureView (*WGPUProcSwapChainGetCurrentTextureView)(WGPUSwapChain swapChain);
+typedef void (*WGPUProcSwapChainPresent)(WGPUSwapChain swapChain);
+typedef void (*WGPUProcSwapChainReference)(WGPUSwapChain swapChain);
+typedef void (*WGPUProcSwapChainRelease)(WGPUSwapChain swapChain);
+
+// Procs of Texture
+typedef WGPUTextureView (*WGPUProcTextureCreateView)(WGPUTexture texture, WGPUTextureViewDescriptor const * descriptor);
+typedef void (*WGPUProcTextureDestroy)(WGPUTexture texture);
+typedef void (*WGPUProcTextureReference)(WGPUTexture texture);
+typedef void (*WGPUProcTextureRelease)(WGPUTexture texture);
+
+// Procs of TextureView
+typedef void (*WGPUProcTextureViewReference)(WGPUTextureView textureView);
+typedef void (*WGPUProcTextureViewRelease)(WGPUTextureView textureView);
+
+#endif  // !defined(WGPU_SKIP_PROCS)
+
+#if !defined(WGPU_SKIP_DECLARATIONS)
+
+WGPU_EXPORT WGPUInstance wgpuCreateInstance(WGPUInstanceDescriptor const * descriptor);
+WGPU_EXPORT WGPUProc wgpuGetProcAddress(WGPUDevice device, char const * procName);
+
+// Methods of BindGroup
+WGPU_EXPORT void wgpuBindGroupReference(WGPUBindGroup bindGroup);
+WGPU_EXPORT void wgpuBindGroupRelease(WGPUBindGroup bindGroup);
+
+// Methods of BindGroupLayout
+WGPU_EXPORT void wgpuBindGroupLayoutReference(WGPUBindGroupLayout bindGroupLayout);
+WGPU_EXPORT void wgpuBindGroupLayoutRelease(WGPUBindGroupLayout bindGroupLayout);
+
+// Methods of Buffer
+WGPU_EXPORT void wgpuBufferDestroy(WGPUBuffer buffer);
+WGPU_EXPORT void wgpuBufferMapReadAsync(WGPUBuffer buffer, WGPUBufferMapReadCallback callback, void * userdata);
+WGPU_EXPORT void wgpuBufferMapWriteAsync(WGPUBuffer buffer, WGPUBufferMapWriteCallback callback, void * userdata);
+WGPU_EXPORT void wgpuBufferSetSubData(WGPUBuffer buffer, uint64_t start, uint64_t count, void const * data);
+WGPU_EXPORT void wgpuBufferUnmap(WGPUBuffer buffer);
+WGPU_EXPORT void wgpuBufferReference(WGPUBuffer buffer);
+WGPU_EXPORT void wgpuBufferRelease(WGPUBuffer buffer);
+
+// Methods of CommandBuffer
+WGPU_EXPORT void wgpuCommandBufferReference(WGPUCommandBuffer commandBuffer);
+WGPU_EXPORT void wgpuCommandBufferRelease(WGPUCommandBuffer commandBuffer);
+
+// Methods of CommandEncoder
+WGPU_EXPORT WGPUComputePassEncoder wgpuCommandEncoderBeginComputePass(WGPUCommandEncoder commandEncoder, WGPUComputePassDescriptor const * descriptor);
+WGPU_EXPORT WGPURenderPassEncoder wgpuCommandEncoderBeginRenderPass(WGPUCommandEncoder commandEncoder, WGPURenderPassDescriptor const * descriptor);
+WGPU_EXPORT void wgpuCommandEncoderCopyBufferToBuffer(WGPUCommandEncoder commandEncoder, WGPUBuffer source, uint64_t sourceOffset, WGPUBuffer destination, uint64_t destinationOffset, uint64_t size);
+WGPU_EXPORT void wgpuCommandEncoderCopyBufferToTexture(WGPUCommandEncoder commandEncoder, WGPUBufferCopyView const * source, WGPUTextureCopyView const * destination, WGPUExtent3D const * copySize);
+WGPU_EXPORT void wgpuCommandEncoderCopyTextureToBuffer(WGPUCommandEncoder commandEncoder, WGPUTextureCopyView const * source, WGPUBufferCopyView const * destination, WGPUExtent3D const * copySize);
+WGPU_EXPORT void wgpuCommandEncoderCopyTextureToTexture(WGPUCommandEncoder commandEncoder, WGPUTextureCopyView const * source, WGPUTextureCopyView const * destination, WGPUExtent3D const * copySize);
+WGPU_EXPORT WGPUCommandBuffer wgpuCommandEncoderFinish(WGPUCommandEncoder commandEncoder, WGPUCommandBufferDescriptor const * descriptor);
+WGPU_EXPORT void wgpuCommandEncoderInsertDebugMarker(WGPUCommandEncoder commandEncoder, char const * groupLabel);
+WGPU_EXPORT void wgpuCommandEncoderPopDebugGroup(WGPUCommandEncoder commandEncoder);
+WGPU_EXPORT void wgpuCommandEncoderPushDebugGroup(WGPUCommandEncoder commandEncoder, char const * groupLabel);
+WGPU_EXPORT void wgpuCommandEncoderReference(WGPUCommandEncoder commandEncoder);
+WGPU_EXPORT void wgpuCommandEncoderRelease(WGPUCommandEncoder commandEncoder);
+
+// Methods of ComputePassEncoder
+WGPU_EXPORT void wgpuComputePassEncoderDispatch(WGPUComputePassEncoder computePassEncoder, uint32_t x, uint32_t y, uint32_t z);
+WGPU_EXPORT void wgpuComputePassEncoderDispatchIndirect(WGPUComputePassEncoder computePassEncoder, WGPUBuffer indirectBuffer, uint64_t indirectOffset);
+WGPU_EXPORT void wgpuComputePassEncoderEndPass(WGPUComputePassEncoder computePassEncoder);
+WGPU_EXPORT void wgpuComputePassEncoderInsertDebugMarker(WGPUComputePassEncoder computePassEncoder, char const * groupLabel);
+WGPU_EXPORT void wgpuComputePassEncoderPopDebugGroup(WGPUComputePassEncoder computePassEncoder);
+WGPU_EXPORT void wgpuComputePassEncoderPushDebugGroup(WGPUComputePassEncoder computePassEncoder, char const * groupLabel);
+WGPU_EXPORT void wgpuComputePassEncoderSetBindGroup(WGPUComputePassEncoder computePassEncoder, uint32_t groupIndex, WGPUBindGroup group, uint32_t dynamicOffsetCount, uint32_t const * dynamicOffsets);
+WGPU_EXPORT void wgpuComputePassEncoderSetPipeline(WGPUComputePassEncoder computePassEncoder, WGPUComputePipeline pipeline);
+WGPU_EXPORT void wgpuComputePassEncoderReference(WGPUComputePassEncoder computePassEncoder);
+WGPU_EXPORT void wgpuComputePassEncoderRelease(WGPUComputePassEncoder computePassEncoder);
+
+// Methods of ComputePipeline
+WGPU_EXPORT WGPUBindGroupLayout wgpuComputePipelineGetBindGroupLayout(WGPUComputePipeline computePipeline, uint32_t groupIndex);
+WGPU_EXPORT void wgpuComputePipelineReference(WGPUComputePipeline computePipeline);
+WGPU_EXPORT void wgpuComputePipelineRelease(WGPUComputePipeline computePipeline);
+
+// Methods of Device
+WGPU_EXPORT WGPUBindGroup wgpuDeviceCreateBindGroup(WGPUDevice device, WGPUBindGroupDescriptor const * descriptor);
+WGPU_EXPORT WGPUBindGroupLayout wgpuDeviceCreateBindGroupLayout(WGPUDevice device, WGPUBindGroupLayoutDescriptor const * descriptor);
+WGPU_EXPORT WGPUBuffer wgpuDeviceCreateBuffer(WGPUDevice device, WGPUBufferDescriptor const * descriptor);
+WGPU_EXPORT WGPUCreateBufferMappedResult wgpuDeviceCreateBufferMapped(WGPUDevice device, WGPUBufferDescriptor const * descriptor);
+WGPU_EXPORT WGPUCommandEncoder wgpuDeviceCreateCommandEncoder(WGPUDevice device, WGPUCommandEncoderDescriptor const * descriptor);
+WGPU_EXPORT WGPUComputePipeline wgpuDeviceCreateComputePipeline(WGPUDevice device, WGPUComputePipelineDescriptor const * descriptor);
+WGPU_EXPORT WGPUPipelineLayout wgpuDeviceCreatePipelineLayout(WGPUDevice device, WGPUPipelineLayoutDescriptor const * descriptor);
+WGPU_EXPORT WGPUQueue wgpuDeviceCreateQueue(WGPUDevice device);
+WGPU_EXPORT WGPURenderBundleEncoder wgpuDeviceCreateRenderBundleEncoder(WGPUDevice device, WGPURenderBundleEncoderDescriptor const * descriptor);
+WGPU_EXPORT WGPURenderPipeline wgpuDeviceCreateRenderPipeline(WGPUDevice device, WGPURenderPipelineDescriptor const * descriptor);
+WGPU_EXPORT WGPUSampler wgpuDeviceCreateSampler(WGPUDevice device, WGPUSamplerDescriptor const * descriptor);
+WGPU_EXPORT WGPUShaderModule wgpuDeviceCreateShaderModule(WGPUDevice device, WGPUShaderModuleDescriptor const * descriptor);
+WGPU_EXPORT WGPUSwapChain wgpuDeviceCreateSwapChain(WGPUDevice device, WGPUSurface surface, WGPUSwapChainDescriptor const * descriptor);
+WGPU_EXPORT WGPUTexture wgpuDeviceCreateTexture(WGPUDevice device, WGPUTextureDescriptor const * descriptor);
+WGPU_EXPORT WGPUQueue wgpuDeviceGetDefaultQueue(WGPUDevice device);
+WGPU_EXPORT void wgpuDeviceInjectError(WGPUDevice device, WGPUErrorType type, char const * message);
+WGPU_EXPORT void wgpuDeviceLoseForTesting(WGPUDevice device);
+WGPU_EXPORT bool wgpuDevicePopErrorScope(WGPUDevice device, WGPUErrorCallback callback, void * userdata);
+WGPU_EXPORT void wgpuDevicePushErrorScope(WGPUDevice device, WGPUErrorFilter filter);
+WGPU_EXPORT void wgpuDeviceSetDeviceLostCallback(WGPUDevice device, WGPUDeviceLostCallback callback, void * userdata);
+WGPU_EXPORT void wgpuDeviceSetUncapturedErrorCallback(WGPUDevice device, WGPUErrorCallback callback, void * userdata);
+WGPU_EXPORT void wgpuDeviceTick(WGPUDevice device);
+WGPU_EXPORT void wgpuDeviceReference(WGPUDevice device);
+WGPU_EXPORT void wgpuDeviceRelease(WGPUDevice device);
+
+// Methods of Fence
+WGPU_EXPORT uint64_t wgpuFenceGetCompletedValue(WGPUFence fence);
+WGPU_EXPORT void wgpuFenceOnCompletion(WGPUFence fence, uint64_t value, WGPUFenceOnCompletionCallback callback, void * userdata);
+WGPU_EXPORT void wgpuFenceReference(WGPUFence fence);
+WGPU_EXPORT void wgpuFenceRelease(WGPUFence fence);
+
+// Methods of Instance
+WGPU_EXPORT WGPUSurface wgpuInstanceCreateSurface(WGPUInstance instance, WGPUSurfaceDescriptor const * descriptor);
+WGPU_EXPORT void wgpuInstanceReference(WGPUInstance instance);
+WGPU_EXPORT void wgpuInstanceRelease(WGPUInstance instance);
+
+// Methods of PipelineLayout
+WGPU_EXPORT void wgpuPipelineLayoutReference(WGPUPipelineLayout pipelineLayout);
+WGPU_EXPORT void wgpuPipelineLayoutRelease(WGPUPipelineLayout pipelineLayout);
+
+// Methods of Queue
+WGPU_EXPORT WGPUFence wgpuQueueCreateFence(WGPUQueue queue, WGPUFenceDescriptor const * descriptor);
+WGPU_EXPORT void wgpuQueueSignal(WGPUQueue queue, WGPUFence fence, uint64_t signalValue);
+WGPU_EXPORT void wgpuQueueSubmit(WGPUQueue queue, uint32_t commandCount, WGPUCommandBuffer const * commands);
+WGPU_EXPORT void wgpuQueueReference(WGPUQueue queue);
+WGPU_EXPORT void wgpuQueueRelease(WGPUQueue queue);
+
+// Methods of RenderBundle
+WGPU_EXPORT void wgpuRenderBundleReference(WGPURenderBundle renderBundle);
+WGPU_EXPORT void wgpuRenderBundleRelease(WGPURenderBundle renderBundle);
+
+// Methods of RenderBundleEncoder
+WGPU_EXPORT void wgpuRenderBundleEncoderDraw(WGPURenderBundleEncoder renderBundleEncoder, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance);
+WGPU_EXPORT void wgpuRenderBundleEncoderDrawIndexed(WGPURenderBundleEncoder renderBundleEncoder, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t baseVertex, uint32_t firstInstance);
+WGPU_EXPORT void wgpuRenderBundleEncoderDrawIndexedIndirect(WGPURenderBundleEncoder renderBundleEncoder, WGPUBuffer indirectBuffer, uint64_t indirectOffset);
+WGPU_EXPORT void wgpuRenderBundleEncoderDrawIndirect(WGPURenderBundleEncoder renderBundleEncoder, WGPUBuffer indirectBuffer, uint64_t indirectOffset);
+WGPU_EXPORT WGPURenderBundle wgpuRenderBundleEncoderFinish(WGPURenderBundleEncoder renderBundleEncoder, WGPURenderBundleDescriptor const * descriptor);
+WGPU_EXPORT void wgpuRenderBundleEncoderInsertDebugMarker(WGPURenderBundleEncoder renderBundleEncoder, char const * groupLabel);
+WGPU_EXPORT void wgpuRenderBundleEncoderPopDebugGroup(WGPURenderBundleEncoder renderBundleEncoder);
+WGPU_EXPORT void wgpuRenderBundleEncoderPushDebugGroup(WGPURenderBundleEncoder renderBundleEncoder, char const * groupLabel);
+WGPU_EXPORT void wgpuRenderBundleEncoderSetBindGroup(WGPURenderBundleEncoder renderBundleEncoder, uint32_t groupIndex, WGPUBindGroup group, uint32_t dynamicOffsetCount, uint32_t const * dynamicOffsets);
+WGPU_EXPORT void wgpuRenderBundleEncoderSetIndexBuffer(WGPURenderBundleEncoder renderBundleEncoder, WGPUBuffer buffer, uint64_t offset, uint64_t size);
+WGPU_EXPORT void wgpuRenderBundleEncoderSetPipeline(WGPURenderBundleEncoder renderBundleEncoder, WGPURenderPipeline pipeline);
+WGPU_EXPORT void wgpuRenderBundleEncoderSetVertexBuffer(WGPURenderBundleEncoder renderBundleEncoder, uint32_t slot, WGPUBuffer buffer, uint64_t offset, uint64_t size);
+WGPU_EXPORT void wgpuRenderBundleEncoderReference(WGPURenderBundleEncoder renderBundleEncoder);
+WGPU_EXPORT void wgpuRenderBundleEncoderRelease(WGPURenderBundleEncoder renderBundleEncoder);
+
+// Methods of RenderPassEncoder
+WGPU_EXPORT void wgpuRenderPassEncoderDraw(WGPURenderPassEncoder renderPassEncoder, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance);
+WGPU_EXPORT void wgpuRenderPassEncoderDrawIndexed(WGPURenderPassEncoder renderPassEncoder, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t baseVertex, uint32_t firstInstance);
+WGPU_EXPORT void wgpuRenderPassEncoderDrawIndexedIndirect(WGPURenderPassEncoder renderPassEncoder, WGPUBuffer indirectBuffer, uint64_t indirectOffset);
+WGPU_EXPORT void wgpuRenderPassEncoderDrawIndirect(WGPURenderPassEncoder renderPassEncoder, WGPUBuffer indirectBuffer, uint64_t indirectOffset);
+WGPU_EXPORT void wgpuRenderPassEncoderEndPass(WGPURenderPassEncoder renderPassEncoder);
+WGPU_EXPORT void wgpuRenderPassEncoderExecuteBundles(WGPURenderPassEncoder renderPassEncoder, uint32_t bundlesCount, WGPURenderBundle const * bundles);
+WGPU_EXPORT void wgpuRenderPassEncoderInsertDebugMarker(WGPURenderPassEncoder renderPassEncoder, char const * groupLabel);
+WGPU_EXPORT void wgpuRenderPassEncoderPopDebugGroup(WGPURenderPassEncoder renderPassEncoder);
+WGPU_EXPORT void wgpuRenderPassEncoderPushDebugGroup(WGPURenderPassEncoder renderPassEncoder, char const * groupLabel);
+WGPU_EXPORT void wgpuRenderPassEncoderSetBindGroup(WGPURenderPassEncoder renderPassEncoder, uint32_t groupIndex, WGPUBindGroup group, uint32_t dynamicOffsetCount, uint32_t const * dynamicOffsets);
+WGPU_EXPORT void wgpuRenderPassEncoderSetBlendColor(WGPURenderPassEncoder renderPassEncoder, WGPUColor const * color);
+WGPU_EXPORT void wgpuRenderPassEncoderSetIndexBuffer(WGPURenderPassEncoder renderPassEncoder, WGPUBuffer buffer, uint64_t offset, uint64_t size);
+WGPU_EXPORT void wgpuRenderPassEncoderSetPipeline(WGPURenderPassEncoder renderPassEncoder, WGPURenderPipeline pipeline);
+WGPU_EXPORT void wgpuRenderPassEncoderSetScissorRect(WGPURenderPassEncoder renderPassEncoder, uint32_t x, uint32_t y, uint32_t width, uint32_t height);
+WGPU_EXPORT void wgpuRenderPassEncoderSetStencilReference(WGPURenderPassEncoder renderPassEncoder, uint32_t reference);
+WGPU_EXPORT void wgpuRenderPassEncoderSetVertexBuffer(WGPURenderPassEncoder renderPassEncoder, uint32_t slot, WGPUBuffer buffer, uint64_t offset, uint64_t size);
+WGPU_EXPORT void wgpuRenderPassEncoderSetViewport(WGPURenderPassEncoder renderPassEncoder, float x, float y, float width, float height, float minDepth, float maxDepth);
+WGPU_EXPORT void wgpuRenderPassEncoderReference(WGPURenderPassEncoder renderPassEncoder);
+WGPU_EXPORT void wgpuRenderPassEncoderRelease(WGPURenderPassEncoder renderPassEncoder);
+
+// Methods of RenderPipeline
+WGPU_EXPORT WGPUBindGroupLayout wgpuRenderPipelineGetBindGroupLayout(WGPURenderPipeline renderPipeline, uint32_t groupIndex);
+WGPU_EXPORT void wgpuRenderPipelineReference(WGPURenderPipeline renderPipeline);
+WGPU_EXPORT void wgpuRenderPipelineRelease(WGPURenderPipeline renderPipeline);
+
+// Methods of Sampler
+WGPU_EXPORT void wgpuSamplerReference(WGPUSampler sampler);
+WGPU_EXPORT void wgpuSamplerRelease(WGPUSampler sampler);
+
+// Methods of ShaderModule
+WGPU_EXPORT void wgpuShaderModuleReference(WGPUShaderModule shaderModule);
+WGPU_EXPORT void wgpuShaderModuleRelease(WGPUShaderModule shaderModule);
+
+// Methods of Surface
+WGPU_EXPORT void wgpuSurfaceReference(WGPUSurface surface);
+WGPU_EXPORT void wgpuSurfaceRelease(WGPUSurface surface);
+
+// Methods of SwapChain
+WGPU_EXPORT void wgpuSwapChainConfigure(WGPUSwapChain swapChain, WGPUTextureFormat format, WGPUTextureUsageFlags allowedUsage, uint32_t width, uint32_t height);
+WGPU_EXPORT WGPUTextureView wgpuSwapChainGetCurrentTextureView(WGPUSwapChain swapChain);
+WGPU_EXPORT void wgpuSwapChainPresent(WGPUSwapChain swapChain);
+WGPU_EXPORT void wgpuSwapChainReference(WGPUSwapChain swapChain);
+WGPU_EXPORT void wgpuSwapChainRelease(WGPUSwapChain swapChain);
+
+// Methods of Texture
+WGPU_EXPORT WGPUTextureView wgpuTextureCreateView(WGPUTexture texture, WGPUTextureViewDescriptor const * descriptor);
+WGPU_EXPORT void wgpuTextureDestroy(WGPUTexture texture);
+WGPU_EXPORT void wgpuTextureReference(WGPUTexture texture);
+WGPU_EXPORT void wgpuTextureRelease(WGPUTexture texture);
+
+// Methods of TextureView
+WGPU_EXPORT void wgpuTextureViewReference(WGPUTextureView textureView);
+WGPU_EXPORT void wgpuTextureViewRelease(WGPUTextureView textureView);
+
+#endif  // !defined(WGPU_SKIP_DECLARATIONS)
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // WEBGPU_H_

+ 1273 - 0
3rdparty/webgpu/include/webgpu/webgpu_cpp.h

@@ -0,0 +1,1273 @@
+
+#ifndef WEBGPU_CPP_H_
+#define WEBGPU_CPP_H_
+
+#include "webgpu/webgpu.h"
+#include "webgpu/EnumClassBitmasks.h"
+
+namespace wgpu {
+
+    static constexpr uint64_t kWholeSize = WGPU_WHOLE_SIZE;
+
+    enum class AdapterType : uint32_t {
+        DiscreteGPU = 0x00000000,
+        IntegratedGPU = 0x00000001,
+        CPU = 0x00000002,
+        Unknown = 0x00000003,
+    };
+
+    enum class AddressMode : uint32_t {
+        Repeat = 0x00000000,
+        MirrorRepeat = 0x00000001,
+        ClampToEdge = 0x00000002,
+    };
+
+    enum class BackendType : uint32_t {
+        Null = 0x00000000,
+        D3D11 = 0x00000001,
+        D3D12 = 0x00000002,
+        Metal = 0x00000003,
+        Vulkan = 0x00000004,
+        OpenGL = 0x00000005,
+        OpenGLES = 0x00000006,
+    };
+
+    enum class BindingType : uint32_t {
+        UniformBuffer = 0x00000000,
+        StorageBuffer = 0x00000001,
+        ReadonlyStorageBuffer = 0x00000002,
+        Sampler = 0x00000003,
+        ComparisonSampler = 0x00000004,
+        SampledTexture = 0x00000005,
+        StorageTexture = 0x00000006,
+        ReadonlyStorageTexture = 0x00000007,
+        WriteonlyStorageTexture = 0x00000008,
+    };
+
+    enum class BlendFactor : uint32_t {
+        Zero = 0x00000000,
+        One = 0x00000001,
+        SrcColor = 0x00000002,
+        OneMinusSrcColor = 0x00000003,
+        SrcAlpha = 0x00000004,
+        OneMinusSrcAlpha = 0x00000005,
+        DstColor = 0x00000006,
+        OneMinusDstColor = 0x00000007,
+        DstAlpha = 0x00000008,
+        OneMinusDstAlpha = 0x00000009,
+        SrcAlphaSaturated = 0x0000000A,
+        BlendColor = 0x0000000B,
+        OneMinusBlendColor = 0x0000000C,
+    };
+
+    enum class BlendOperation : uint32_t {
+        Add = 0x00000000,
+        Subtract = 0x00000001,
+        ReverseSubtract = 0x00000002,
+        Min = 0x00000003,
+        Max = 0x00000004,
+    };
+
+    enum class BufferMapAsyncStatus : uint32_t {
+        Success = 0x00000000,
+        Error = 0x00000001,
+        Unknown = 0x00000002,
+        DeviceLost = 0x00000003,
+    };
+
+    enum class CompareFunction : uint32_t {
+        Undefined = 0x00000000,
+        Never = 0x00000001,
+        Less = 0x00000002,
+        LessEqual = 0x00000003,
+        Greater = 0x00000004,
+        GreaterEqual = 0x00000005,
+        Equal = 0x00000006,
+        NotEqual = 0x00000007,
+        Always = 0x00000008,
+    };
+
+    enum class CullMode : uint32_t {
+        None = 0x00000000,
+        Front = 0x00000001,
+        Back = 0x00000002,
+    };
+
+    enum class ErrorFilter : uint32_t {
+        None = 0x00000000,
+        Validation = 0x00000001,
+        OutOfMemory = 0x00000002,
+    };
+
+    enum class ErrorType : uint32_t {
+        NoError = 0x00000000,
+        Validation = 0x00000001,
+        OutOfMemory = 0x00000002,
+        Unknown = 0x00000003,
+        DeviceLost = 0x00000004,
+    };
+
+    enum class FenceCompletionStatus : uint32_t {
+        Success = 0x00000000,
+        Error = 0x00000001,
+        Unknown = 0x00000002,
+        DeviceLost = 0x00000003,
+    };
+
+    enum class FilterMode : uint32_t {
+        Nearest = 0x00000000,
+        Linear = 0x00000001,
+    };
+
+    enum class FrontFace : uint32_t {
+        CCW = 0x00000000,
+        CW = 0x00000001,
+    };
+
+    enum class IndexFormat : uint32_t {
+        Uint16 = 0x00000000,
+        Uint32 = 0x00000001,
+    };
+
+    enum class InputStepMode : uint32_t {
+        Vertex = 0x00000000,
+        Instance = 0x00000001,
+    };
+
+    enum class LoadOp : uint32_t {
+        Clear = 0x00000000,
+        Load = 0x00000001,
+    };
+
+    enum class PresentMode : uint32_t {
+        Immediate = 0x00000000,
+        Mailbox = 0x00000001,
+        Fifo = 0x00000002,
+    };
+
+    enum class PrimitiveTopology : uint32_t {
+        PointList = 0x00000000,
+        LineList = 0x00000001,
+        LineStrip = 0x00000002,
+        TriangleList = 0x00000003,
+        TriangleStrip = 0x00000004,
+    };
+
+    enum class SType : uint32_t {
+        Invalid = 0x00000000,
+        SurfaceDescriptorFromMetalLayer = 0x00000001,
+        SurfaceDescriptorFromWindowsHWND = 0x00000002,
+        SurfaceDescriptorFromXlib = 0x00000003,
+        SurfaceDescriptorFromHTMLCanvasId = 0x00000004,
+        ShaderModuleSPIRVDescriptor = 0x00000005,
+        ShaderModuleWGSLDescriptor = 0x00000006,
+        SamplerDescriptorDummyAnisotropicFiltering = 0x00000007,
+        RenderPipelineDescriptorDummyExtension = 0x00000008,
+    };
+
+    enum class StencilOperation : uint32_t {
+        Keep = 0x00000000,
+        Zero = 0x00000001,
+        Replace = 0x00000002,
+        Invert = 0x00000003,
+        IncrementClamp = 0x00000004,
+        DecrementClamp = 0x00000005,
+        IncrementWrap = 0x00000006,
+        DecrementWrap = 0x00000007,
+    };
+
+    enum class StoreOp : uint32_t {
+        Store = 0x00000000,
+        Clear = 0x00000001,
+    };
+
+    enum class TextureAspect : uint32_t {
+        All = 0x00000000,
+        StencilOnly = 0x00000001,
+        DepthOnly = 0x00000002,
+    };
+
+    enum class TextureComponentType : uint32_t {
+        Float = 0x00000000,
+        Sint = 0x00000001,
+        Uint = 0x00000002,
+    };
+
+    enum class TextureDimension : uint32_t {
+        e1D = 0x00000000,
+        e2D = 0x00000001,
+        e3D = 0x00000002,
+    };
+
+    enum class TextureFormat : uint32_t {
+        Undefined = 0x00000000,
+        R8Unorm = 0x00000001,
+        R8Snorm = 0x00000002,
+        R8Uint = 0x00000003,
+        R8Sint = 0x00000004,
+        R16Uint = 0x00000005,
+        R16Sint = 0x00000006,
+        R16Float = 0x00000007,
+        RG8Unorm = 0x00000008,
+        RG8Snorm = 0x00000009,
+        RG8Uint = 0x0000000A,
+        RG8Sint = 0x0000000B,
+        R32Float = 0x0000000C,
+        R32Uint = 0x0000000D,
+        R32Sint = 0x0000000E,
+        RG16Uint = 0x0000000F,
+        RG16Sint = 0x00000010,
+        RG16Float = 0x00000011,
+        RGBA8Unorm = 0x00000012,
+        RGBA8UnormSrgb = 0x00000013,
+        RGBA8Snorm = 0x00000014,
+        RGBA8Uint = 0x00000015,
+        RGBA8Sint = 0x00000016,
+        BGRA8Unorm = 0x00000017,
+        BGRA8UnormSrgb = 0x00000018,
+        RGB10A2Unorm = 0x00000019,
+        RG11B10Float = 0x0000001A,
+        RG32Float = 0x0000001B,
+        RG32Uint = 0x0000001C,
+        RG32Sint = 0x0000001D,
+        RGBA16Uint = 0x0000001E,
+        RGBA16Sint = 0x0000001F,
+        RGBA16Float = 0x00000020,
+        RGBA32Float = 0x00000021,
+        RGBA32Uint = 0x00000022,
+        RGBA32Sint = 0x00000023,
+        Depth32Float = 0x00000024,
+        Depth24Plus = 0x00000025,
+        Depth24PlusStencil8 = 0x00000026,
+        BC1RGBAUnorm = 0x00000027,
+        BC1RGBAUnormSrgb = 0x00000028,
+        BC2RGBAUnorm = 0x00000029,
+        BC2RGBAUnormSrgb = 0x0000002A,
+        BC3RGBAUnorm = 0x0000002B,
+        BC3RGBAUnormSrgb = 0x0000002C,
+        BC4RUnorm = 0x0000002D,
+        BC4RSnorm = 0x0000002E,
+        BC5RGUnorm = 0x0000002F,
+        BC5RGSnorm = 0x00000030,
+        BC6HRGBUfloat = 0x00000031,
+        BC6HRGBSfloat = 0x00000032,
+        BC7RGBAUnorm = 0x00000033,
+        BC7RGBAUnormSrgb = 0x00000034,
+    };
+
+    enum class TextureViewDimension : uint32_t {
+        Undefined = 0x00000000,
+        e1D = 0x00000001,
+        e2D = 0x00000002,
+        e2DArray = 0x00000003,
+        Cube = 0x00000004,
+        CubeArray = 0x00000005,
+        e3D = 0x00000006,
+    };
+
+    enum class VertexFormat : uint32_t {
+        UChar2 = 0x00000000,
+        UChar4 = 0x00000001,
+        Char2 = 0x00000002,
+        Char4 = 0x00000003,
+        UChar2Norm = 0x00000004,
+        UChar4Norm = 0x00000005,
+        Char2Norm = 0x00000006,
+        Char4Norm = 0x00000007,
+        UShort2 = 0x00000008,
+        UShort4 = 0x00000009,
+        Short2 = 0x0000000A,
+        Short4 = 0x0000000B,
+        UShort2Norm = 0x0000000C,
+        UShort4Norm = 0x0000000D,
+        Short2Norm = 0x0000000E,
+        Short4Norm = 0x0000000F,
+        Half2 = 0x00000010,
+        Half4 = 0x00000011,
+        Float = 0x00000012,
+        Float2 = 0x00000013,
+        Float3 = 0x00000014,
+        Float4 = 0x00000015,
+        UInt = 0x00000016,
+        UInt2 = 0x00000017,
+        UInt3 = 0x00000018,
+        UInt4 = 0x00000019,
+        Int = 0x0000001A,
+        Int2 = 0x0000001B,
+        Int3 = 0x0000001C,
+        Int4 = 0x0000001D,
+    };
+
+
+    enum class BufferUsage : uint32_t {
+        None = 0x00000000,
+        MapRead = 0x00000001,
+        MapWrite = 0x00000002,
+        CopySrc = 0x00000004,
+        CopyDst = 0x00000008,
+        Index = 0x00000010,
+        Vertex = 0x00000020,
+        Uniform = 0x00000040,
+        Storage = 0x00000080,
+        Indirect = 0x00000100,
+    };
+
+    enum class ColorWriteMask : uint32_t {
+        None = 0x00000000,
+        Red = 0x00000001,
+        Green = 0x00000002,
+        Blue = 0x00000004,
+        Alpha = 0x00000008,
+        All = 0x0000000F,
+    };
+
+    enum class ShaderStage : uint32_t {
+        None = 0x00000000,
+        Vertex = 0x00000001,
+        Fragment = 0x00000002,
+        Compute = 0x00000004,
+    };
+
+    enum class TextureUsage : uint32_t {
+        None = 0x00000000,
+        CopySrc = 0x00000001,
+        CopyDst = 0x00000002,
+        Sampled = 0x00000004,
+        Storage = 0x00000008,
+        OutputAttachment = 0x00000010,
+        Present = 0x00000020,
+    };
+
+
+    template<>
+    struct IsDawnBitmask<BufferUsage> {
+        static constexpr bool enable = true;
+    };
+
+    template<>
+    struct IsDawnBitmask<ColorWriteMask> {
+        static constexpr bool enable = true;
+    };
+
+    template<>
+    struct IsDawnBitmask<ShaderStage> {
+        static constexpr bool enable = true;
+    };
+
+    template<>
+    struct IsDawnBitmask<TextureUsage> {
+        static constexpr bool enable = true;
+    };
+
+
+    using Proc = WGPUProc;
+    using BufferMapReadCallback = WGPUBufferMapReadCallback;
+    using BufferMapWriteCallback = WGPUBufferMapWriteCallback;
+    using DeviceLostCallback = WGPUDeviceLostCallback;
+    using ErrorCallback = WGPUErrorCallback;
+    using FenceOnCompletionCallback = WGPUFenceOnCompletionCallback;
+
+    class BindGroup;
+    class BindGroupLayout;
+    class Buffer;
+    class CommandBuffer;
+    class CommandEncoder;
+    class ComputePassEncoder;
+    class ComputePipeline;
+    class Device;
+    class Fence;
+    class Instance;
+    class PipelineLayout;
+    class Queue;
+    class RenderBundle;
+    class RenderBundleEncoder;
+    class RenderPassEncoder;
+    class RenderPipeline;
+    class Sampler;
+    class ShaderModule;
+    class Surface;
+    class SwapChain;
+    class Texture;
+    class TextureView;
+
+    struct AdapterProperties;
+    struct BindGroupEntry;
+    struct BindGroupLayoutEntry;
+    struct BlendDescriptor;
+    struct BufferCopyView;
+    struct BufferDescriptor;
+    struct Color;
+    struct CommandBufferDescriptor;
+    struct CommandEncoderDescriptor;
+    struct ComputePassDescriptor;
+    struct CreateBufferMappedResult;
+    struct DeviceProperties;
+    struct Extent3D;
+    struct FenceDescriptor;
+    struct InstanceDescriptor;
+    struct Origin3D;
+    struct PipelineLayoutDescriptor;
+    struct ProgrammableStageDescriptor;
+    struct RasterizationStateDescriptor;
+    struct RenderBundleDescriptor;
+    struct RenderBundleEncoderDescriptor;
+    struct RenderPassDepthStencilAttachmentDescriptor;
+    struct SamplerDescriptor;
+    struct SamplerDescriptorDummyAnisotropicFiltering;
+    struct ShaderModuleDescriptor;
+    struct ShaderModuleSPIRVDescriptor;
+    struct ShaderModuleWGSLDescriptor;
+    struct StencilStateFaceDescriptor;
+    struct SurfaceDescriptor;
+    struct SurfaceDescriptorFromHTMLCanvasId;
+    struct SurfaceDescriptorFromMetalLayer;
+    struct SurfaceDescriptorFromWindowsHWND;
+    struct SurfaceDescriptorFromXlib;
+    struct SwapChainDescriptor;
+    struct TextureViewDescriptor;
+    struct VertexAttributeDescriptor;
+    struct BindGroupDescriptor;
+    struct BindGroupLayoutDescriptor;
+    struct ColorStateDescriptor;
+    struct ComputePipelineDescriptor;
+    struct DepthStencilStateDescriptor;
+    struct RenderPassColorAttachmentDescriptor;
+    struct RenderPipelineDescriptorDummyExtension;
+    struct TextureCopyView;
+    struct TextureDescriptor;
+    struct VertexBufferLayoutDescriptor;
+    struct RenderPassDescriptor;
+    struct VertexStateDescriptor;
+    struct RenderPipelineDescriptor;
+
+    template<typename Derived, typename CType>
+    class ObjectBase {
+      public:
+        ObjectBase() = default;
+        ObjectBase(CType handle): mHandle(handle) {
+            if (mHandle) Derived::WGPUReference(mHandle);
+        }
+        ~ObjectBase() {
+            if (mHandle) Derived::WGPURelease(mHandle);
+        }
+
+        ObjectBase(ObjectBase const& other)
+            : ObjectBase(other.Get()) {
+        }
+        Derived& operator=(ObjectBase const& other) {
+            if (&other != this) {
+                if (mHandle) Derived::WGPURelease(mHandle);
+                mHandle = other.mHandle;
+                if (mHandle) Derived::WGPUReference(mHandle);
+            }
+
+            return static_cast<Derived&>(*this);
+        }
+
+        ObjectBase(ObjectBase&& other) {
+            mHandle = other.mHandle;
+            other.mHandle = 0;
+        }
+        Derived& operator=(ObjectBase&& other) {
+            if (&other != this) {
+                if (mHandle) Derived::WGPURelease(mHandle);
+                mHandle = other.mHandle;
+                other.mHandle = 0;
+            }
+
+            return static_cast<Derived&>(*this);
+        }
+
+        ObjectBase(std::nullptr_t) {}
+        Derived& operator=(std::nullptr_t) {
+            if (mHandle != nullptr) {
+                Derived::WGPURelease(mHandle);
+                mHandle = nullptr;
+            }
+            return static_cast<Derived&>(*this);
+        }
+
+        bool operator==(std::nullptr_t) const {
+            return mHandle == nullptr;
+        }
+        bool operator!=(std::nullptr_t) const {
+            return mHandle != nullptr;
+        }
+
+        explicit operator bool() const {
+            return mHandle != nullptr;
+        }
+        CType Get() const {
+            return mHandle;
+        }
+        CType Release() {
+            CType result = mHandle;
+            mHandle = 0;
+            return result;
+        }
+        static Derived Acquire(CType handle) {
+            Derived result;
+            result.mHandle = handle;
+            return result;
+        }
+
+      protected:
+        CType mHandle = nullptr;
+    };
+
+
+
+    class BindGroup : public ObjectBase<BindGroup, WGPUBindGroup> {
+      public:
+        using ObjectBase::ObjectBase;
+        using ObjectBase::operator=;
+
+
+      private:
+        friend ObjectBase<BindGroup, WGPUBindGroup>;
+        static void WGPUReference(WGPUBindGroup handle);
+        static void WGPURelease(WGPUBindGroup handle);
+    };
+
+    class BindGroupLayout : public ObjectBase<BindGroupLayout, WGPUBindGroupLayout> {
+      public:
+        using ObjectBase::ObjectBase;
+        using ObjectBase::operator=;
+
+
+      private:
+        friend ObjectBase<BindGroupLayout, WGPUBindGroupLayout>;
+        static void WGPUReference(WGPUBindGroupLayout handle);
+        static void WGPURelease(WGPUBindGroupLayout handle);
+    };
+
+    class Buffer : public ObjectBase<Buffer, WGPUBuffer> {
+      public:
+        using ObjectBase::ObjectBase;
+        using ObjectBase::operator=;
+
+        void Destroy() const;
+        void MapReadAsync(BufferMapReadCallback callback, void * userdata) const;
+        void MapWriteAsync(BufferMapWriteCallback callback, void * userdata) const;
+        void SetSubData(uint64_t start, uint64_t count, void const * data) const;
+        void Unmap() const;
+
+      private:
+        friend ObjectBase<Buffer, WGPUBuffer>;
+        static void WGPUReference(WGPUBuffer handle);
+        static void WGPURelease(WGPUBuffer handle);
+    };
+
+    class CommandBuffer : public ObjectBase<CommandBuffer, WGPUCommandBuffer> {
+      public:
+        using ObjectBase::ObjectBase;
+        using ObjectBase::operator=;
+
+
+      private:
+        friend ObjectBase<CommandBuffer, WGPUCommandBuffer>;
+        static void WGPUReference(WGPUCommandBuffer handle);
+        static void WGPURelease(WGPUCommandBuffer handle);
+    };
+
+    class CommandEncoder : public ObjectBase<CommandEncoder, WGPUCommandEncoder> {
+      public:
+        using ObjectBase::ObjectBase;
+        using ObjectBase::operator=;
+
+        ComputePassEncoder BeginComputePass(ComputePassDescriptor const * descriptor = nullptr) const;
+        RenderPassEncoder BeginRenderPass(RenderPassDescriptor const * descriptor) const;
+        void CopyBufferToBuffer(Buffer const& source, uint64_t sourceOffset, Buffer const& destination, uint64_t destinationOffset, uint64_t size) const;
+        void CopyBufferToTexture(BufferCopyView const * source, TextureCopyView const * destination, Extent3D const * copySize) const;
+        void CopyTextureToBuffer(TextureCopyView const * source, BufferCopyView const * destination, Extent3D const * copySize) const;
+        void CopyTextureToTexture(TextureCopyView const * source, TextureCopyView const * destination, Extent3D const * copySize) const;
+        CommandBuffer Finish(CommandBufferDescriptor const * descriptor = nullptr) const;
+        void InsertDebugMarker(char const * groupLabel) const;
+        void PopDebugGroup() const;
+        void PushDebugGroup(char const * groupLabel) const;
+
+      private:
+        friend ObjectBase<CommandEncoder, WGPUCommandEncoder>;
+        static void WGPUReference(WGPUCommandEncoder handle);
+        static void WGPURelease(WGPUCommandEncoder handle);
+    };
+
+    class ComputePassEncoder : public ObjectBase<ComputePassEncoder, WGPUComputePassEncoder> {
+      public:
+        using ObjectBase::ObjectBase;
+        using ObjectBase::operator=;
+
+        void Dispatch(uint32_t x, uint32_t y = 1, uint32_t z = 1) const;
+        void DispatchIndirect(Buffer const& indirectBuffer, uint64_t indirectOffset) const;
+        void EndPass() const;
+        void InsertDebugMarker(char const * groupLabel) const;
+        void PopDebugGroup() const;
+        void PushDebugGroup(char const * groupLabel) const;
+        void SetBindGroup(uint32_t groupIndex, BindGroup const& group, uint32_t dynamicOffsetCount = 0, uint32_t const * dynamicOffsets = nullptr) const;
+        void SetPipeline(ComputePipeline const& pipeline) const;
+
+      private:
+        friend ObjectBase<ComputePassEncoder, WGPUComputePassEncoder>;
+        static void WGPUReference(WGPUComputePassEncoder handle);
+        static void WGPURelease(WGPUComputePassEncoder handle);
+    };
+
+    class ComputePipeline : public ObjectBase<ComputePipeline, WGPUComputePipeline> {
+      public:
+        using ObjectBase::ObjectBase;
+        using ObjectBase::operator=;
+
+        BindGroupLayout GetBindGroupLayout(uint32_t groupIndex) const;
+
+      private:
+        friend ObjectBase<ComputePipeline, WGPUComputePipeline>;
+        static void WGPUReference(WGPUComputePipeline handle);
+        static void WGPURelease(WGPUComputePipeline handle);
+    };
+
+    class Device : public ObjectBase<Device, WGPUDevice> {
+      public:
+        using ObjectBase::ObjectBase;
+        using ObjectBase::operator=;
+
+        BindGroup CreateBindGroup(BindGroupDescriptor const * descriptor) const;
+        BindGroupLayout CreateBindGroupLayout(BindGroupLayoutDescriptor const * descriptor) const;
+        Buffer CreateBuffer(BufferDescriptor const * descriptor) const;
+        CreateBufferMappedResult CreateBufferMapped(BufferDescriptor const * descriptor) const;
+        CommandEncoder CreateCommandEncoder(CommandEncoderDescriptor const * descriptor = nullptr) const;
+        ComputePipeline CreateComputePipeline(ComputePipelineDescriptor const * descriptor) const;
+        PipelineLayout CreatePipelineLayout(PipelineLayoutDescriptor const * descriptor) const;
+        Queue CreateQueue() const;
+        RenderBundleEncoder CreateRenderBundleEncoder(RenderBundleEncoderDescriptor const * descriptor) const;
+        RenderPipeline CreateRenderPipeline(RenderPipelineDescriptor const * descriptor) const;
+        Sampler CreateSampler(SamplerDescriptor const * descriptor) const;
+        ShaderModule CreateShaderModule(ShaderModuleDescriptor const * descriptor) const;
+        SwapChain CreateSwapChain(Surface const& surface, SwapChainDescriptor const * descriptor) const;
+        Texture CreateTexture(TextureDescriptor const * descriptor) const;
+        Queue GetDefaultQueue() const;
+        void InjectError(ErrorType type, char const * message) const;
+        void LoseForTesting() const;
+        bool PopErrorScope(ErrorCallback callback, void * userdata) const;
+        void PushErrorScope(ErrorFilter filter) const;
+        void SetDeviceLostCallback(DeviceLostCallback callback, void * userdata) const;
+        void SetUncapturedErrorCallback(ErrorCallback callback, void * userdata) const;
+        void Tick() const;
+
+      private:
+        friend ObjectBase<Device, WGPUDevice>;
+        static void WGPUReference(WGPUDevice handle);
+        static void WGPURelease(WGPUDevice handle);
+    };
+
+    class Fence : public ObjectBase<Fence, WGPUFence> {
+      public:
+        using ObjectBase::ObjectBase;
+        using ObjectBase::operator=;
+
+        uint64_t GetCompletedValue() const;
+        void OnCompletion(uint64_t value, FenceOnCompletionCallback callback, void * userdata) const;
+
+      private:
+        friend ObjectBase<Fence, WGPUFence>;
+        static void WGPUReference(WGPUFence handle);
+        static void WGPURelease(WGPUFence handle);
+    };
+
+    class Instance : public ObjectBase<Instance, WGPUInstance> {
+      public:
+        using ObjectBase::ObjectBase;
+        using ObjectBase::operator=;
+
+        Surface CreateSurface(SurfaceDescriptor const * descriptor) const;
+
+      private:
+        friend ObjectBase<Instance, WGPUInstance>;
+        static void WGPUReference(WGPUInstance handle);
+        static void WGPURelease(WGPUInstance handle);
+    };
+
+    class PipelineLayout : public ObjectBase<PipelineLayout, WGPUPipelineLayout> {
+      public:
+        using ObjectBase::ObjectBase;
+        using ObjectBase::operator=;
+
+
+      private:
+        friend ObjectBase<PipelineLayout, WGPUPipelineLayout>;
+        static void WGPUReference(WGPUPipelineLayout handle);
+        static void WGPURelease(WGPUPipelineLayout handle);
+    };
+
+    class Queue : public ObjectBase<Queue, WGPUQueue> {
+      public:
+        using ObjectBase::ObjectBase;
+        using ObjectBase::operator=;
+
+        Fence CreateFence(FenceDescriptor const * descriptor) const;
+        void Signal(Fence const& fence, uint64_t signalValue) const;
+        void Submit(uint32_t commandCount, CommandBuffer const * commands) const;
+
+      private:
+        friend ObjectBase<Queue, WGPUQueue>;
+        static void WGPUReference(WGPUQueue handle);
+        static void WGPURelease(WGPUQueue handle);
+    };
+
+    class RenderBundle : public ObjectBase<RenderBundle, WGPURenderBundle> {
+      public:
+        using ObjectBase::ObjectBase;
+        using ObjectBase::operator=;
+
+
+      private:
+        friend ObjectBase<RenderBundle, WGPURenderBundle>;
+        static void WGPUReference(WGPURenderBundle handle);
+        static void WGPURelease(WGPURenderBundle handle);
+    };
+
+    class RenderBundleEncoder : public ObjectBase<RenderBundleEncoder, WGPURenderBundleEncoder> {
+      public:
+        using ObjectBase::ObjectBase;
+        using ObjectBase::operator=;
+
+        void Draw(uint32_t vertexCount, uint32_t instanceCount = 1, uint32_t firstVertex = 0, uint32_t firstInstance = 0) const;
+        void DrawIndexed(uint32_t indexCount, uint32_t instanceCount = 1, uint32_t firstIndex = 0, int32_t baseVertex = 0, uint32_t firstInstance = 0) const;
+        void DrawIndexedIndirect(Buffer const& indirectBuffer, uint64_t indirectOffset) const;
+        void DrawIndirect(Buffer const& indirectBuffer, uint64_t indirectOffset) const;
+        RenderBundle Finish(RenderBundleDescriptor const * descriptor = nullptr) const;
+        void InsertDebugMarker(char const * groupLabel) const;
+        void PopDebugGroup() const;
+        void PushDebugGroup(char const * groupLabel) const;
+        void SetBindGroup(uint32_t groupIndex, BindGroup const& group, uint32_t dynamicOffsetCount = 0, uint32_t const * dynamicOffsets = nullptr) const;
+        void SetIndexBuffer(Buffer const& buffer, uint64_t offset = 0, uint64_t size = 0) const;
+        void SetPipeline(RenderPipeline const& pipeline) const;
+        void SetVertexBuffer(uint32_t slot, Buffer const& buffer, uint64_t offset = 0, uint64_t size = 0) const;
+
+      private:
+        friend ObjectBase<RenderBundleEncoder, WGPURenderBundleEncoder>;
+        static void WGPUReference(WGPURenderBundleEncoder handle);
+        static void WGPURelease(WGPURenderBundleEncoder handle);
+    };
+
+    class RenderPassEncoder : public ObjectBase<RenderPassEncoder, WGPURenderPassEncoder> {
+      public:
+        using ObjectBase::ObjectBase;
+        using ObjectBase::operator=;
+
+        void Draw(uint32_t vertexCount, uint32_t instanceCount = 1, uint32_t firstVertex = 0, uint32_t firstInstance = 0) const;
+        void DrawIndexed(uint32_t indexCount, uint32_t instanceCount = 1, uint32_t firstIndex = 0, int32_t baseVertex = 0, uint32_t firstInstance = 0) const;
+        void DrawIndexedIndirect(Buffer const& indirectBuffer, uint64_t indirectOffset) const;
+        void DrawIndirect(Buffer const& indirectBuffer, uint64_t indirectOffset) const;
+        void EndPass() const;
+        void ExecuteBundles(uint32_t bundlesCount, RenderBundle const * bundles) const;
+        void InsertDebugMarker(char const * groupLabel) const;
+        void PopDebugGroup() const;
+        void PushDebugGroup(char const * groupLabel) const;
+        void SetBindGroup(uint32_t groupIndex, BindGroup const& group, uint32_t dynamicOffsetCount = 0, uint32_t const * dynamicOffsets = nullptr) const;
+        void SetBlendColor(Color const * color) const;
+        void SetIndexBuffer(Buffer const& buffer, uint64_t offset = 0, uint64_t size = 0) const;
+        void SetPipeline(RenderPipeline const& pipeline) const;
+        void SetScissorRect(uint32_t x, uint32_t y, uint32_t width, uint32_t height) const;
+        void SetStencilReference(uint32_t reference) const;
+        void SetVertexBuffer(uint32_t slot, Buffer const& buffer, uint64_t offset = 0, uint64_t size = 0) const;
+        void SetViewport(float x, float y, float width, float height, float minDepth, float maxDepth) const;
+
+      private:
+        friend ObjectBase<RenderPassEncoder, WGPURenderPassEncoder>;
+        static void WGPUReference(WGPURenderPassEncoder handle);
+        static void WGPURelease(WGPURenderPassEncoder handle);
+    };
+
+    class RenderPipeline : public ObjectBase<RenderPipeline, WGPURenderPipeline> {
+      public:
+        using ObjectBase::ObjectBase;
+        using ObjectBase::operator=;
+
+        BindGroupLayout GetBindGroupLayout(uint32_t groupIndex) const;
+
+      private:
+        friend ObjectBase<RenderPipeline, WGPURenderPipeline>;
+        static void WGPUReference(WGPURenderPipeline handle);
+        static void WGPURelease(WGPURenderPipeline handle);
+    };
+
+    class Sampler : public ObjectBase<Sampler, WGPUSampler> {
+      public:
+        using ObjectBase::ObjectBase;
+        using ObjectBase::operator=;
+
+
+      private:
+        friend ObjectBase<Sampler, WGPUSampler>;
+        static void WGPUReference(WGPUSampler handle);
+        static void WGPURelease(WGPUSampler handle);
+    };
+
+    class ShaderModule : public ObjectBase<ShaderModule, WGPUShaderModule> {
+      public:
+        using ObjectBase::ObjectBase;
+        using ObjectBase::operator=;
+
+
+      private:
+        friend ObjectBase<ShaderModule, WGPUShaderModule>;
+        static void WGPUReference(WGPUShaderModule handle);
+        static void WGPURelease(WGPUShaderModule handle);
+    };
+
+    class Surface : public ObjectBase<Surface, WGPUSurface> {
+      public:
+        using ObjectBase::ObjectBase;
+        using ObjectBase::operator=;
+
+
+      private:
+        friend ObjectBase<Surface, WGPUSurface>;
+        static void WGPUReference(WGPUSurface handle);
+        static void WGPURelease(WGPUSurface handle);
+    };
+
+    class SwapChain : public ObjectBase<SwapChain, WGPUSwapChain> {
+      public:
+        using ObjectBase::ObjectBase;
+        using ObjectBase::operator=;
+
+        void Configure(TextureFormat format, TextureUsage allowedUsage, uint32_t width, uint32_t height) const;
+        TextureView GetCurrentTextureView() const;
+        void Present() const;
+
+      private:
+        friend ObjectBase<SwapChain, WGPUSwapChain>;
+        static void WGPUReference(WGPUSwapChain handle);
+        static void WGPURelease(WGPUSwapChain handle);
+    };
+
+    class Texture : public ObjectBase<Texture, WGPUTexture> {
+      public:
+        using ObjectBase::ObjectBase;
+        using ObjectBase::operator=;
+
+        TextureView CreateView(TextureViewDescriptor const * descriptor = nullptr) const;
+        void Destroy() const;
+
+      private:
+        friend ObjectBase<Texture, WGPUTexture>;
+        static void WGPUReference(WGPUTexture handle);
+        static void WGPURelease(WGPUTexture handle);
+    };
+
+    class TextureView : public ObjectBase<TextureView, WGPUTextureView> {
+      public:
+        using ObjectBase::ObjectBase;
+        using ObjectBase::operator=;
+
+
+      private:
+        friend ObjectBase<TextureView, WGPUTextureView>;
+        static void WGPUReference(WGPUTextureView handle);
+        static void WGPURelease(WGPUTextureView handle);
+    };
+
+
+    Instance CreateInstance(InstanceDescriptor const * descriptor = nullptr);
+    Proc GetProcAddress(Device const& device, const char* procName);
+
+    struct ChainedStruct {
+        ChainedStruct const * nextInChain = nullptr;
+        SType sType = SType::Invalid;
+    };
+
+    struct AdapterProperties {
+        ChainedStruct const * nextInChain = nullptr;
+        uint32_t deviceID;
+        uint32_t vendorID;
+        char const * name;
+        AdapterType adapterType;
+        BackendType backendType;
+    };
+
+    struct BindGroupEntry {
+        uint32_t binding;
+        Buffer buffer;
+        uint64_t offset = 0;
+        uint64_t size;
+        Sampler sampler;
+        TextureView textureView;
+    };
+
+    struct BindGroupLayoutEntry {
+        uint32_t binding;
+        ShaderStage visibility;
+        BindingType type;
+        bool hasDynamicOffset = false;
+        bool multisampled = false;
+        TextureViewDimension textureDimension = TextureViewDimension::Undefined;
+        TextureViewDimension viewDimension = TextureViewDimension::Undefined;
+        TextureComponentType textureComponentType = TextureComponentType::Float;
+        TextureFormat storageTextureFormat = TextureFormat::Undefined;
+    };
+
+    struct BlendDescriptor {
+        BlendOperation operation = BlendOperation::Add;
+        BlendFactor srcFactor = BlendFactor::One;
+        BlendFactor dstFactor = BlendFactor::Zero;
+    };
+
+    struct BufferCopyView {
+        ChainedStruct const * nextInChain = nullptr;
+        Buffer buffer;
+        uint64_t offset = 0;
+        uint32_t rowPitch = 0;
+        uint32_t imageHeight = 0;
+        uint32_t bytesPerRow = 0;
+        uint32_t rowsPerImage = 0;
+    };
+
+    struct BufferDescriptor {
+        ChainedStruct const * nextInChain = nullptr;
+        char const * label = nullptr;
+        BufferUsage usage;
+        uint64_t size;
+    };
+
+    struct Color {
+        float r;
+        float g;
+        float b;
+        float a;
+    };
+
+    struct CommandBufferDescriptor {
+        ChainedStruct const * nextInChain = nullptr;
+        char const * label = nullptr;
+    };
+
+    struct CommandEncoderDescriptor {
+        ChainedStruct const * nextInChain = nullptr;
+        char const * label = nullptr;
+    };
+
+    struct ComputePassDescriptor {
+        ChainedStruct const * nextInChain = nullptr;
+        char const * label = nullptr;
+    };
+
+    struct CreateBufferMappedResult {
+        Buffer buffer;
+        uint64_t dataLength;
+        void * data;
+    };
+
+    struct DeviceProperties {
+        bool textureCompressionBC = false;
+    };
+
+    struct Extent3D {
+        uint32_t width;
+        uint32_t height;
+        uint32_t depth;
+    };
+
+    struct FenceDescriptor {
+        ChainedStruct const * nextInChain = nullptr;
+        char const * label = nullptr;
+        uint64_t initialValue = 0;
+    };
+
+    struct InstanceDescriptor {
+        ChainedStruct const * nextInChain = nullptr;
+    };
+
+    struct Origin3D {
+        uint32_t x = 0;
+        uint32_t y = 0;
+        uint32_t z = 0;
+    };
+
+    struct PipelineLayoutDescriptor {
+        ChainedStruct const * nextInChain = nullptr;
+        char const * label = nullptr;
+        uint32_t bindGroupLayoutCount;
+        BindGroupLayout const * bindGroupLayouts;
+    };
+
+    struct ProgrammableStageDescriptor {
+        ChainedStruct const * nextInChain = nullptr;
+        ShaderModule module;
+        char const * entryPoint;
+    };
+
+    struct RasterizationStateDescriptor {
+        ChainedStruct const * nextInChain = nullptr;
+        FrontFace frontFace = FrontFace::CCW;
+        CullMode cullMode = CullMode::None;
+        int32_t depthBias = 0;
+        float depthBiasSlopeScale = 0.0f;
+        float depthBiasClamp = 0.0f;
+    };
+
+    struct RenderBundleDescriptor {
+        ChainedStruct const * nextInChain = nullptr;
+        char const * label = nullptr;
+    };
+
+    struct RenderBundleEncoderDescriptor {
+        ChainedStruct const * nextInChain = nullptr;
+        char const * label = nullptr;
+        uint32_t colorFormatsCount;
+        TextureFormat const * colorFormats;
+        TextureFormat depthStencilFormat = TextureFormat::Undefined;
+        uint32_t sampleCount = 1;
+    };
+
+    struct RenderPassDepthStencilAttachmentDescriptor {
+        TextureView attachment;
+        LoadOp depthLoadOp;
+        StoreOp depthStoreOp;
+        float clearDepth;
+        LoadOp stencilLoadOp;
+        StoreOp stencilStoreOp;
+        uint32_t clearStencil = 0;
+    };
+
+    struct SamplerDescriptor {
+        ChainedStruct const * nextInChain = nullptr;
+        char const * label = nullptr;
+        AddressMode addressModeU = AddressMode::ClampToEdge;
+        AddressMode addressModeV = AddressMode::ClampToEdge;
+        AddressMode addressModeW = AddressMode::ClampToEdge;
+        FilterMode magFilter = FilterMode::Nearest;
+        FilterMode minFilter = FilterMode::Nearest;
+        FilterMode mipmapFilter = FilterMode::Nearest;
+        float lodMinClamp = 0.0f;
+        float lodMaxClamp = 1000.0f;
+        CompareFunction compare = CompareFunction::Undefined;
+    };
+
+    struct SamplerDescriptorDummyAnisotropicFiltering : ChainedStruct {
+        SamplerDescriptorDummyAnisotropicFiltering() {
+            sType = SType::SamplerDescriptorDummyAnisotropicFiltering;
+        }
+        alignas(ChainedStruct) float maxAnisotropy;
+    };
+
+    struct ShaderModuleDescriptor {
+        ChainedStruct const * nextInChain = nullptr;
+        char const * label = nullptr;
+        uint32_t codeSize = 0;
+        uint32_t const * code = nullptr;
+    };
+
+    struct ShaderModuleSPIRVDescriptor : ChainedStruct {
+        ShaderModuleSPIRVDescriptor() {
+            sType = SType::ShaderModuleSPIRVDescriptor;
+        }
+        alignas(ChainedStruct) uint32_t codeSize;
+        uint32_t const * code;
+    };
+
+    struct ShaderModuleWGSLDescriptor : ChainedStruct {
+        ShaderModuleWGSLDescriptor() {
+            sType = SType::ShaderModuleWGSLDescriptor;
+        }
+        alignas(ChainedStruct) char const * source;
+    };
+
+    struct StencilStateFaceDescriptor {
+        CompareFunction compare = CompareFunction::Always;
+        StencilOperation failOp = StencilOperation::Keep;
+        StencilOperation depthFailOp = StencilOperation::Keep;
+        StencilOperation passOp = StencilOperation::Keep;
+    };
+
+    struct SurfaceDescriptor {
+        ChainedStruct const * nextInChain = nullptr;
+        char const * label = nullptr;
+    };
+
+    struct SurfaceDescriptorFromHTMLCanvasId : ChainedStruct {
+        SurfaceDescriptorFromHTMLCanvasId() {
+            sType = SType::SurfaceDescriptorFromHTMLCanvasId;
+        }
+        alignas(ChainedStruct) char const * id;
+    };
+
+    struct SurfaceDescriptorFromMetalLayer : ChainedStruct {
+        SurfaceDescriptorFromMetalLayer() {
+            sType = SType::SurfaceDescriptorFromMetalLayer;
+        }
+        alignas(ChainedStruct) void * layer;
+    };
+
+    struct SurfaceDescriptorFromWindowsHWND : ChainedStruct {
+        SurfaceDescriptorFromWindowsHWND() {
+            sType = SType::SurfaceDescriptorFromWindowsHWND;
+        }
+        alignas(ChainedStruct) void * hinstance;
+        void * hwnd;
+    };
+
+    struct SurfaceDescriptorFromXlib : ChainedStruct {
+        SurfaceDescriptorFromXlib() {
+            sType = SType::SurfaceDescriptorFromXlib;
+        }
+        alignas(ChainedStruct) void * display;
+        uint32_t window;
+    };
+
+    struct SwapChainDescriptor {
+        ChainedStruct const * nextInChain = nullptr;
+        char const * label = nullptr;
+        TextureUsage usage;
+        TextureFormat format;
+        uint32_t width;
+        uint32_t height;
+        PresentMode presentMode;
+        uint64_t implementation = 0;
+    };
+
+    struct TextureViewDescriptor {
+        ChainedStruct const * nextInChain = nullptr;
+        char const * label = nullptr;
+        TextureFormat format = TextureFormat::Undefined;
+        TextureViewDimension dimension = TextureViewDimension::Undefined;
+        uint32_t baseMipLevel = 0;
+        uint32_t mipLevelCount = 0;
+        uint32_t baseArrayLayer = 0;
+        uint32_t arrayLayerCount = 0;
+        TextureAspect aspect = TextureAspect::All;
+    };
+
+    struct VertexAttributeDescriptor {
+        VertexFormat format;
+        uint64_t offset;
+        uint32_t shaderLocation;
+    };
+
+    struct BindGroupDescriptor {
+        ChainedStruct const * nextInChain = nullptr;
+        char const * label = nullptr;
+        BindGroupLayout layout;
+        uint32_t bindingCount = 0;
+        BindGroupEntry const * bindings;
+        uint32_t entryCount = 0;
+        BindGroupEntry const * entries;
+    };
+
+    struct BindGroupLayoutDescriptor {
+        ChainedStruct const * nextInChain = nullptr;
+        char const * label = nullptr;
+        uint32_t bindingCount = 0;
+        BindGroupLayoutEntry const * bindings;
+        uint32_t entryCount = 0;
+        BindGroupLayoutEntry const * entries;
+    };
+
+    struct ColorStateDescriptor {
+        ChainedStruct const * nextInChain = nullptr;
+        TextureFormat format;
+        BlendDescriptor alphaBlend;
+        BlendDescriptor colorBlend;
+        ColorWriteMask writeMask = ColorWriteMask::All;
+    };
+
+    struct ComputePipelineDescriptor {
+        ChainedStruct const * nextInChain = nullptr;
+        char const * label = nullptr;
+        PipelineLayout layout;
+        ProgrammableStageDescriptor computeStage;
+    };
+
+    struct DepthStencilStateDescriptor {
+        ChainedStruct const * nextInChain = nullptr;
+        TextureFormat format;
+        bool depthWriteEnabled = false;
+        CompareFunction depthCompare = CompareFunction::Always;
+        StencilStateFaceDescriptor stencilFront;
+        StencilStateFaceDescriptor stencilBack;
+        uint32_t stencilReadMask = 0xFFFFFFFF;
+        uint32_t stencilWriteMask = 0xFFFFFFFF;
+    };
+
+    struct RenderPassColorAttachmentDescriptor {
+        TextureView attachment;
+        TextureView resolveTarget;
+        LoadOp loadOp;
+        StoreOp storeOp;
+        Color clearColor;
+    };
+
+    struct RenderPipelineDescriptorDummyExtension : ChainedStruct {
+        RenderPipelineDescriptorDummyExtension() {
+            sType = SType::RenderPipelineDescriptorDummyExtension;
+        }
+        alignas(ChainedStruct) ProgrammableStageDescriptor dummyStage;
+    };
+
+    struct TextureCopyView {
+        ChainedStruct const * nextInChain = nullptr;
+        Texture texture;
+        uint32_t mipLevel = 0;
+        uint32_t arrayLayer = 0;
+        Origin3D origin;
+    };
+
+    struct TextureDescriptor {
+        ChainedStruct const * nextInChain = nullptr;
+        char const * label = nullptr;
+        TextureUsage usage;
+        TextureDimension dimension = TextureDimension::e2D;
+        Extent3D size;
+        uint32_t arrayLayerCount = 1;
+        TextureFormat format;
+        uint32_t mipLevelCount = 1;
+        uint32_t sampleCount = 1;
+    };
+
+    struct VertexBufferLayoutDescriptor {
+        uint64_t arrayStride;
+        InputStepMode stepMode = InputStepMode::Vertex;
+        uint32_t attributeCount;
+        VertexAttributeDescriptor const * attributes;
+    };
+
+    struct RenderPassDescriptor {
+        ChainedStruct const * nextInChain = nullptr;
+        char const * label = nullptr;
+        uint32_t colorAttachmentCount;
+        RenderPassColorAttachmentDescriptor const * colorAttachments;
+        RenderPassDepthStencilAttachmentDescriptor const * depthStencilAttachment = nullptr;
+    };
+
+    struct VertexStateDescriptor {
+        ChainedStruct const * nextInChain = nullptr;
+        IndexFormat indexFormat = IndexFormat::Uint32;
+        uint32_t vertexBufferCount = 0;
+        VertexBufferLayoutDescriptor const * vertexBuffers;
+    };
+
+    struct RenderPipelineDescriptor {
+        ChainedStruct const * nextInChain = nullptr;
+        char const * label = nullptr;
+        PipelineLayout layout;
+        ProgrammableStageDescriptor vertexStage;
+        ProgrammableStageDescriptor const * fragmentStage = nullptr;
+        VertexStateDescriptor const * vertexState = nullptr;
+        PrimitiveTopology primitiveTopology;
+        RasterizationStateDescriptor const * rasterizationState = nullptr;
+        uint32_t sampleCount = 1;
+        DepthStencilStateDescriptor const * depthStencilState = nullptr;
+        uint32_t colorStateCount;
+        ColorStateDescriptor const * colorStates;
+        uint32_t sampleMask = 0xFFFFFFFF;
+        bool alphaToCoverageEnabled = false;
+    };
+
+
+    // TODO(dawn:22): Remove this once users use the "Entry" version.
+    using BindGroupBinding = BindGroupEntry;
+    using BindGroupLayoutBinding = BindGroupLayoutEntry;
+
+}  // namespace wgpu
+
+#endif // WEBGPU_CPP_H_

+ 1762 - 0
3rdparty/webgpu/webgpu_cpp.cpp

@@ -0,0 +1,1762 @@
+
+#include "dawn/webgpu_cpp.h"
+
+namespace wgpu {
+
+
+    static_assert(sizeof(AdapterType) == sizeof(WGPUAdapterType), "sizeof mismatch for AdapterType");
+    static_assert(alignof(AdapterType) == alignof(WGPUAdapterType), "alignof mismatch for AdapterType");
+
+    static_assert(static_cast<uint32_t>(AdapterType::DiscreteGPU) == WGPUAdapterType_DiscreteGPU, "value mismatch for AdapterType::DiscreteGPU");
+    static_assert(static_cast<uint32_t>(AdapterType::IntegratedGPU) == WGPUAdapterType_IntegratedGPU, "value mismatch for AdapterType::IntegratedGPU");
+    static_assert(static_cast<uint32_t>(AdapterType::CPU) == WGPUAdapterType_CPU, "value mismatch for AdapterType::CPU");
+    static_assert(static_cast<uint32_t>(AdapterType::Unknown) == WGPUAdapterType_Unknown, "value mismatch for AdapterType::Unknown");
+
+
+    static_assert(sizeof(AddressMode) == sizeof(WGPUAddressMode), "sizeof mismatch for AddressMode");
+    static_assert(alignof(AddressMode) == alignof(WGPUAddressMode), "alignof mismatch for AddressMode");
+
+    static_assert(static_cast<uint32_t>(AddressMode::Repeat) == WGPUAddressMode_Repeat, "value mismatch for AddressMode::Repeat");
+    static_assert(static_cast<uint32_t>(AddressMode::MirrorRepeat) == WGPUAddressMode_MirrorRepeat, "value mismatch for AddressMode::MirrorRepeat");
+    static_assert(static_cast<uint32_t>(AddressMode::ClampToEdge) == WGPUAddressMode_ClampToEdge, "value mismatch for AddressMode::ClampToEdge");
+
+
+    static_assert(sizeof(BackendType) == sizeof(WGPUBackendType), "sizeof mismatch for BackendType");
+    static_assert(alignof(BackendType) == alignof(WGPUBackendType), "alignof mismatch for BackendType");
+
+    static_assert(static_cast<uint32_t>(BackendType::Null) == WGPUBackendType_Null, "value mismatch for BackendType::Null");
+    static_assert(static_cast<uint32_t>(BackendType::D3D11) == WGPUBackendType_D3D11, "value mismatch for BackendType::D3D11");
+    static_assert(static_cast<uint32_t>(BackendType::D3D12) == WGPUBackendType_D3D12, "value mismatch for BackendType::D3D12");
+    static_assert(static_cast<uint32_t>(BackendType::Metal) == WGPUBackendType_Metal, "value mismatch for BackendType::Metal");
+    static_assert(static_cast<uint32_t>(BackendType::Vulkan) == WGPUBackendType_Vulkan, "value mismatch for BackendType::Vulkan");
+    static_assert(static_cast<uint32_t>(BackendType::OpenGL) == WGPUBackendType_OpenGL, "value mismatch for BackendType::OpenGL");
+    static_assert(static_cast<uint32_t>(BackendType::OpenGLES) == WGPUBackendType_OpenGLES, "value mismatch for BackendType::OpenGLES");
+
+
+    static_assert(sizeof(BindingType) == sizeof(WGPUBindingType), "sizeof mismatch for BindingType");
+    static_assert(alignof(BindingType) == alignof(WGPUBindingType), "alignof mismatch for BindingType");
+
+    static_assert(static_cast<uint32_t>(BindingType::UniformBuffer) == WGPUBindingType_UniformBuffer, "value mismatch for BindingType::UniformBuffer");
+    static_assert(static_cast<uint32_t>(BindingType::StorageBuffer) == WGPUBindingType_StorageBuffer, "value mismatch for BindingType::StorageBuffer");
+    static_assert(static_cast<uint32_t>(BindingType::ReadonlyStorageBuffer) == WGPUBindingType_ReadonlyStorageBuffer, "value mismatch for BindingType::ReadonlyStorageBuffer");
+    static_assert(static_cast<uint32_t>(BindingType::Sampler) == WGPUBindingType_Sampler, "value mismatch for BindingType::Sampler");
+    static_assert(static_cast<uint32_t>(BindingType::ComparisonSampler) == WGPUBindingType_ComparisonSampler, "value mismatch for BindingType::ComparisonSampler");
+    static_assert(static_cast<uint32_t>(BindingType::SampledTexture) == WGPUBindingType_SampledTexture, "value mismatch for BindingType::SampledTexture");
+    static_assert(static_cast<uint32_t>(BindingType::StorageTexture) == WGPUBindingType_StorageTexture, "value mismatch for BindingType::StorageTexture");
+    static_assert(static_cast<uint32_t>(BindingType::ReadonlyStorageTexture) == WGPUBindingType_ReadonlyStorageTexture, "value mismatch for BindingType::ReadonlyStorageTexture");
+    static_assert(static_cast<uint32_t>(BindingType::WriteonlyStorageTexture) == WGPUBindingType_WriteonlyStorageTexture, "value mismatch for BindingType::WriteonlyStorageTexture");
+
+
+    static_assert(sizeof(BlendFactor) == sizeof(WGPUBlendFactor), "sizeof mismatch for BlendFactor");
+    static_assert(alignof(BlendFactor) == alignof(WGPUBlendFactor), "alignof mismatch for BlendFactor");
+
+    static_assert(static_cast<uint32_t>(BlendFactor::Zero) == WGPUBlendFactor_Zero, "value mismatch for BlendFactor::Zero");
+    static_assert(static_cast<uint32_t>(BlendFactor::One) == WGPUBlendFactor_One, "value mismatch for BlendFactor::One");
+    static_assert(static_cast<uint32_t>(BlendFactor::SrcColor) == WGPUBlendFactor_SrcColor, "value mismatch for BlendFactor::SrcColor");
+    static_assert(static_cast<uint32_t>(BlendFactor::OneMinusSrcColor) == WGPUBlendFactor_OneMinusSrcColor, "value mismatch for BlendFactor::OneMinusSrcColor");
+    static_assert(static_cast<uint32_t>(BlendFactor::SrcAlpha) == WGPUBlendFactor_SrcAlpha, "value mismatch for BlendFactor::SrcAlpha");
+    static_assert(static_cast<uint32_t>(BlendFactor::OneMinusSrcAlpha) == WGPUBlendFactor_OneMinusSrcAlpha, "value mismatch for BlendFactor::OneMinusSrcAlpha");
+    static_assert(static_cast<uint32_t>(BlendFactor::DstColor) == WGPUBlendFactor_DstColor, "value mismatch for BlendFactor::DstColor");
+    static_assert(static_cast<uint32_t>(BlendFactor::OneMinusDstColor) == WGPUBlendFactor_OneMinusDstColor, "value mismatch for BlendFactor::OneMinusDstColor");
+    static_assert(static_cast<uint32_t>(BlendFactor::DstAlpha) == WGPUBlendFactor_DstAlpha, "value mismatch for BlendFactor::DstAlpha");
+    static_assert(static_cast<uint32_t>(BlendFactor::OneMinusDstAlpha) == WGPUBlendFactor_OneMinusDstAlpha, "value mismatch for BlendFactor::OneMinusDstAlpha");
+    static_assert(static_cast<uint32_t>(BlendFactor::SrcAlphaSaturated) == WGPUBlendFactor_SrcAlphaSaturated, "value mismatch for BlendFactor::SrcAlphaSaturated");
+    static_assert(static_cast<uint32_t>(BlendFactor::BlendColor) == WGPUBlendFactor_BlendColor, "value mismatch for BlendFactor::BlendColor");
+    static_assert(static_cast<uint32_t>(BlendFactor::OneMinusBlendColor) == WGPUBlendFactor_OneMinusBlendColor, "value mismatch for BlendFactor::OneMinusBlendColor");
+
+
+    static_assert(sizeof(BlendOperation) == sizeof(WGPUBlendOperation), "sizeof mismatch for BlendOperation");
+    static_assert(alignof(BlendOperation) == alignof(WGPUBlendOperation), "alignof mismatch for BlendOperation");
+
+    static_assert(static_cast<uint32_t>(BlendOperation::Add) == WGPUBlendOperation_Add, "value mismatch for BlendOperation::Add");
+    static_assert(static_cast<uint32_t>(BlendOperation::Subtract) == WGPUBlendOperation_Subtract, "value mismatch for BlendOperation::Subtract");
+    static_assert(static_cast<uint32_t>(BlendOperation::ReverseSubtract) == WGPUBlendOperation_ReverseSubtract, "value mismatch for BlendOperation::ReverseSubtract");
+    static_assert(static_cast<uint32_t>(BlendOperation::Min) == WGPUBlendOperation_Min, "value mismatch for BlendOperation::Min");
+    static_assert(static_cast<uint32_t>(BlendOperation::Max) == WGPUBlendOperation_Max, "value mismatch for BlendOperation::Max");
+
+
+    static_assert(sizeof(BufferMapAsyncStatus) == sizeof(WGPUBufferMapAsyncStatus), "sizeof mismatch for BufferMapAsyncStatus");
+    static_assert(alignof(BufferMapAsyncStatus) == alignof(WGPUBufferMapAsyncStatus), "alignof mismatch for BufferMapAsyncStatus");
+
+    static_assert(static_cast<uint32_t>(BufferMapAsyncStatus::Success) == WGPUBufferMapAsyncStatus_Success, "value mismatch for BufferMapAsyncStatus::Success");
+    static_assert(static_cast<uint32_t>(BufferMapAsyncStatus::Error) == WGPUBufferMapAsyncStatus_Error, "value mismatch for BufferMapAsyncStatus::Error");
+    static_assert(static_cast<uint32_t>(BufferMapAsyncStatus::Unknown) == WGPUBufferMapAsyncStatus_Unknown, "value mismatch for BufferMapAsyncStatus::Unknown");
+    static_assert(static_cast<uint32_t>(BufferMapAsyncStatus::DeviceLost) == WGPUBufferMapAsyncStatus_DeviceLost, "value mismatch for BufferMapAsyncStatus::DeviceLost");
+
+
+    static_assert(sizeof(CompareFunction) == sizeof(WGPUCompareFunction), "sizeof mismatch for CompareFunction");
+    static_assert(alignof(CompareFunction) == alignof(WGPUCompareFunction), "alignof mismatch for CompareFunction");
+
+    static_assert(static_cast<uint32_t>(CompareFunction::Undefined) == WGPUCompareFunction_Undefined, "value mismatch for CompareFunction::Undefined");
+    static_assert(static_cast<uint32_t>(CompareFunction::Never) == WGPUCompareFunction_Never, "value mismatch for CompareFunction::Never");
+    static_assert(static_cast<uint32_t>(CompareFunction::Less) == WGPUCompareFunction_Less, "value mismatch for CompareFunction::Less");
+    static_assert(static_cast<uint32_t>(CompareFunction::LessEqual) == WGPUCompareFunction_LessEqual, "value mismatch for CompareFunction::LessEqual");
+    static_assert(static_cast<uint32_t>(CompareFunction::Greater) == WGPUCompareFunction_Greater, "value mismatch for CompareFunction::Greater");
+    static_assert(static_cast<uint32_t>(CompareFunction::GreaterEqual) == WGPUCompareFunction_GreaterEqual, "value mismatch for CompareFunction::GreaterEqual");
+    static_assert(static_cast<uint32_t>(CompareFunction::Equal) == WGPUCompareFunction_Equal, "value mismatch for CompareFunction::Equal");
+    static_assert(static_cast<uint32_t>(CompareFunction::NotEqual) == WGPUCompareFunction_NotEqual, "value mismatch for CompareFunction::NotEqual");
+    static_assert(static_cast<uint32_t>(CompareFunction::Always) == WGPUCompareFunction_Always, "value mismatch for CompareFunction::Always");
+
+
+    static_assert(sizeof(CullMode) == sizeof(WGPUCullMode), "sizeof mismatch for CullMode");
+    static_assert(alignof(CullMode) == alignof(WGPUCullMode), "alignof mismatch for CullMode");
+
+    static_assert(static_cast<uint32_t>(CullMode::None) == WGPUCullMode_None, "value mismatch for CullMode::None");
+    static_assert(static_cast<uint32_t>(CullMode::Front) == WGPUCullMode_Front, "value mismatch for CullMode::Front");
+    static_assert(static_cast<uint32_t>(CullMode::Back) == WGPUCullMode_Back, "value mismatch for CullMode::Back");
+
+
+    static_assert(sizeof(ErrorFilter) == sizeof(WGPUErrorFilter), "sizeof mismatch for ErrorFilter");
+    static_assert(alignof(ErrorFilter) == alignof(WGPUErrorFilter), "alignof mismatch for ErrorFilter");
+
+    static_assert(static_cast<uint32_t>(ErrorFilter::None) == WGPUErrorFilter_None, "value mismatch for ErrorFilter::None");
+    static_assert(static_cast<uint32_t>(ErrorFilter::Validation) == WGPUErrorFilter_Validation, "value mismatch for ErrorFilter::Validation");
+    static_assert(static_cast<uint32_t>(ErrorFilter::OutOfMemory) == WGPUErrorFilter_OutOfMemory, "value mismatch for ErrorFilter::OutOfMemory");
+
+
+    static_assert(sizeof(ErrorType) == sizeof(WGPUErrorType), "sizeof mismatch for ErrorType");
+    static_assert(alignof(ErrorType) == alignof(WGPUErrorType), "alignof mismatch for ErrorType");
+
+    static_assert(static_cast<uint32_t>(ErrorType::NoError) == WGPUErrorType_NoError, "value mismatch for ErrorType::NoError");
+    static_assert(static_cast<uint32_t>(ErrorType::Validation) == WGPUErrorType_Validation, "value mismatch for ErrorType::Validation");
+    static_assert(static_cast<uint32_t>(ErrorType::OutOfMemory) == WGPUErrorType_OutOfMemory, "value mismatch for ErrorType::OutOfMemory");
+    static_assert(static_cast<uint32_t>(ErrorType::Unknown) == WGPUErrorType_Unknown, "value mismatch for ErrorType::Unknown");
+    static_assert(static_cast<uint32_t>(ErrorType::DeviceLost) == WGPUErrorType_DeviceLost, "value mismatch for ErrorType::DeviceLost");
+
+
+    static_assert(sizeof(FenceCompletionStatus) == sizeof(WGPUFenceCompletionStatus), "sizeof mismatch for FenceCompletionStatus");
+    static_assert(alignof(FenceCompletionStatus) == alignof(WGPUFenceCompletionStatus), "alignof mismatch for FenceCompletionStatus");
+
+    static_assert(static_cast<uint32_t>(FenceCompletionStatus::Success) == WGPUFenceCompletionStatus_Success, "value mismatch for FenceCompletionStatus::Success");
+    static_assert(static_cast<uint32_t>(FenceCompletionStatus::Error) == WGPUFenceCompletionStatus_Error, "value mismatch for FenceCompletionStatus::Error");
+    static_assert(static_cast<uint32_t>(FenceCompletionStatus::Unknown) == WGPUFenceCompletionStatus_Unknown, "value mismatch for FenceCompletionStatus::Unknown");
+    static_assert(static_cast<uint32_t>(FenceCompletionStatus::DeviceLost) == WGPUFenceCompletionStatus_DeviceLost, "value mismatch for FenceCompletionStatus::DeviceLost");
+
+
+    static_assert(sizeof(FilterMode) == sizeof(WGPUFilterMode), "sizeof mismatch for FilterMode");
+    static_assert(alignof(FilterMode) == alignof(WGPUFilterMode), "alignof mismatch for FilterMode");
+
+    static_assert(static_cast<uint32_t>(FilterMode::Nearest) == WGPUFilterMode_Nearest, "value mismatch for FilterMode::Nearest");
+    static_assert(static_cast<uint32_t>(FilterMode::Linear) == WGPUFilterMode_Linear, "value mismatch for FilterMode::Linear");
+
+
+    static_assert(sizeof(FrontFace) == sizeof(WGPUFrontFace), "sizeof mismatch for FrontFace");
+    static_assert(alignof(FrontFace) == alignof(WGPUFrontFace), "alignof mismatch for FrontFace");
+
+    static_assert(static_cast<uint32_t>(FrontFace::CCW) == WGPUFrontFace_CCW, "value mismatch for FrontFace::CCW");
+    static_assert(static_cast<uint32_t>(FrontFace::CW) == WGPUFrontFace_CW, "value mismatch for FrontFace::CW");
+
+
+    static_assert(sizeof(IndexFormat) == sizeof(WGPUIndexFormat), "sizeof mismatch for IndexFormat");
+    static_assert(alignof(IndexFormat) == alignof(WGPUIndexFormat), "alignof mismatch for IndexFormat");
+
+    static_assert(static_cast<uint32_t>(IndexFormat::Uint16) == WGPUIndexFormat_Uint16, "value mismatch for IndexFormat::Uint16");
+    static_assert(static_cast<uint32_t>(IndexFormat::Uint32) == WGPUIndexFormat_Uint32, "value mismatch for IndexFormat::Uint32");
+
+
+    static_assert(sizeof(InputStepMode) == sizeof(WGPUInputStepMode), "sizeof mismatch for InputStepMode");
+    static_assert(alignof(InputStepMode) == alignof(WGPUInputStepMode), "alignof mismatch for InputStepMode");
+
+    static_assert(static_cast<uint32_t>(InputStepMode::Vertex) == WGPUInputStepMode_Vertex, "value mismatch for InputStepMode::Vertex");
+    static_assert(static_cast<uint32_t>(InputStepMode::Instance) == WGPUInputStepMode_Instance, "value mismatch for InputStepMode::Instance");
+
+
+    static_assert(sizeof(LoadOp) == sizeof(WGPULoadOp), "sizeof mismatch for LoadOp");
+    static_assert(alignof(LoadOp) == alignof(WGPULoadOp), "alignof mismatch for LoadOp");
+
+    static_assert(static_cast<uint32_t>(LoadOp::Clear) == WGPULoadOp_Clear, "value mismatch for LoadOp::Clear");
+    static_assert(static_cast<uint32_t>(LoadOp::Load) == WGPULoadOp_Load, "value mismatch for LoadOp::Load");
+
+
+    static_assert(sizeof(PresentMode) == sizeof(WGPUPresentMode), "sizeof mismatch for PresentMode");
+    static_assert(alignof(PresentMode) == alignof(WGPUPresentMode), "alignof mismatch for PresentMode");
+
+    static_assert(static_cast<uint32_t>(PresentMode::Immediate) == WGPUPresentMode_Immediate, "value mismatch for PresentMode::Immediate");
+    static_assert(static_cast<uint32_t>(PresentMode::Mailbox) == WGPUPresentMode_Mailbox, "value mismatch for PresentMode::Mailbox");
+    static_assert(static_cast<uint32_t>(PresentMode::Fifo) == WGPUPresentMode_Fifo, "value mismatch for PresentMode::Fifo");
+
+
+    static_assert(sizeof(PrimitiveTopology) == sizeof(WGPUPrimitiveTopology), "sizeof mismatch for PrimitiveTopology");
+    static_assert(alignof(PrimitiveTopology) == alignof(WGPUPrimitiveTopology), "alignof mismatch for PrimitiveTopology");
+
+    static_assert(static_cast<uint32_t>(PrimitiveTopology::PointList) == WGPUPrimitiveTopology_PointList, "value mismatch for PrimitiveTopology::PointList");
+    static_assert(static_cast<uint32_t>(PrimitiveTopology::LineList) == WGPUPrimitiveTopology_LineList, "value mismatch for PrimitiveTopology::LineList");
+    static_assert(static_cast<uint32_t>(PrimitiveTopology::LineStrip) == WGPUPrimitiveTopology_LineStrip, "value mismatch for PrimitiveTopology::LineStrip");
+    static_assert(static_cast<uint32_t>(PrimitiveTopology::TriangleList) == WGPUPrimitiveTopology_TriangleList, "value mismatch for PrimitiveTopology::TriangleList");
+    static_assert(static_cast<uint32_t>(PrimitiveTopology::TriangleStrip) == WGPUPrimitiveTopology_TriangleStrip, "value mismatch for PrimitiveTopology::TriangleStrip");
+
+
+    static_assert(sizeof(SType) == sizeof(WGPUSType), "sizeof mismatch for SType");
+    static_assert(alignof(SType) == alignof(WGPUSType), "alignof mismatch for SType");
+
+    static_assert(static_cast<uint32_t>(SType::Invalid) == WGPUSType_Invalid, "value mismatch for SType::Invalid");
+    static_assert(static_cast<uint32_t>(SType::SurfaceDescriptorFromMetalLayer) == WGPUSType_SurfaceDescriptorFromMetalLayer, "value mismatch for SType::SurfaceDescriptorFromMetalLayer");
+    static_assert(static_cast<uint32_t>(SType::SurfaceDescriptorFromWindowsHWND) == WGPUSType_SurfaceDescriptorFromWindowsHWND, "value mismatch for SType::SurfaceDescriptorFromWindowsHWND");
+    static_assert(static_cast<uint32_t>(SType::SurfaceDescriptorFromXlib) == WGPUSType_SurfaceDescriptorFromXlib, "value mismatch for SType::SurfaceDescriptorFromXlib");
+    static_assert(static_cast<uint32_t>(SType::SurfaceDescriptorFromHTMLCanvasId) == WGPUSType_SurfaceDescriptorFromHTMLCanvasId, "value mismatch for SType::SurfaceDescriptorFromHTMLCanvasId");
+    static_assert(static_cast<uint32_t>(SType::ShaderModuleSPIRVDescriptor) == WGPUSType_ShaderModuleSPIRVDescriptor, "value mismatch for SType::ShaderModuleSPIRVDescriptor");
+    static_assert(static_cast<uint32_t>(SType::ShaderModuleWGSLDescriptor) == WGPUSType_ShaderModuleWGSLDescriptor, "value mismatch for SType::ShaderModuleWGSLDescriptor");
+    static_assert(static_cast<uint32_t>(SType::SamplerDescriptorDummyAnisotropicFiltering) == WGPUSType_SamplerDescriptorDummyAnisotropicFiltering, "value mismatch for SType::SamplerDescriptorDummyAnisotropicFiltering");
+    static_assert(static_cast<uint32_t>(SType::RenderPipelineDescriptorDummyExtension) == WGPUSType_RenderPipelineDescriptorDummyExtension, "value mismatch for SType::RenderPipelineDescriptorDummyExtension");
+
+
+    static_assert(sizeof(StencilOperation) == sizeof(WGPUStencilOperation), "sizeof mismatch for StencilOperation");
+    static_assert(alignof(StencilOperation) == alignof(WGPUStencilOperation), "alignof mismatch for StencilOperation");
+
+    static_assert(static_cast<uint32_t>(StencilOperation::Keep) == WGPUStencilOperation_Keep, "value mismatch for StencilOperation::Keep");
+    static_assert(static_cast<uint32_t>(StencilOperation::Zero) == WGPUStencilOperation_Zero, "value mismatch for StencilOperation::Zero");
+    static_assert(static_cast<uint32_t>(StencilOperation::Replace) == WGPUStencilOperation_Replace, "value mismatch for StencilOperation::Replace");
+    static_assert(static_cast<uint32_t>(StencilOperation::Invert) == WGPUStencilOperation_Invert, "value mismatch for StencilOperation::Invert");
+    static_assert(static_cast<uint32_t>(StencilOperation::IncrementClamp) == WGPUStencilOperation_IncrementClamp, "value mismatch for StencilOperation::IncrementClamp");
+    static_assert(static_cast<uint32_t>(StencilOperation::DecrementClamp) == WGPUStencilOperation_DecrementClamp, "value mismatch for StencilOperation::DecrementClamp");
+    static_assert(static_cast<uint32_t>(StencilOperation::IncrementWrap) == WGPUStencilOperation_IncrementWrap, "value mismatch for StencilOperation::IncrementWrap");
+    static_assert(static_cast<uint32_t>(StencilOperation::DecrementWrap) == WGPUStencilOperation_DecrementWrap, "value mismatch for StencilOperation::DecrementWrap");
+
+
+    static_assert(sizeof(StoreOp) == sizeof(WGPUStoreOp), "sizeof mismatch for StoreOp");
+    static_assert(alignof(StoreOp) == alignof(WGPUStoreOp), "alignof mismatch for StoreOp");
+
+    static_assert(static_cast<uint32_t>(StoreOp::Store) == WGPUStoreOp_Store, "value mismatch for StoreOp::Store");
+    static_assert(static_cast<uint32_t>(StoreOp::Clear) == WGPUStoreOp_Clear, "value mismatch for StoreOp::Clear");
+
+
+    static_assert(sizeof(TextureAspect) == sizeof(WGPUTextureAspect), "sizeof mismatch for TextureAspect");
+    static_assert(alignof(TextureAspect) == alignof(WGPUTextureAspect), "alignof mismatch for TextureAspect");
+
+    static_assert(static_cast<uint32_t>(TextureAspect::All) == WGPUTextureAspect_All, "value mismatch for TextureAspect::All");
+    static_assert(static_cast<uint32_t>(TextureAspect::StencilOnly) == WGPUTextureAspect_StencilOnly, "value mismatch for TextureAspect::StencilOnly");
+    static_assert(static_cast<uint32_t>(TextureAspect::DepthOnly) == WGPUTextureAspect_DepthOnly, "value mismatch for TextureAspect::DepthOnly");
+
+
+    static_assert(sizeof(TextureComponentType) == sizeof(WGPUTextureComponentType), "sizeof mismatch for TextureComponentType");
+    static_assert(alignof(TextureComponentType) == alignof(WGPUTextureComponentType), "alignof mismatch for TextureComponentType");
+
+    static_assert(static_cast<uint32_t>(TextureComponentType::Float) == WGPUTextureComponentType_Float, "value mismatch for TextureComponentType::Float");
+    static_assert(static_cast<uint32_t>(TextureComponentType::Sint) == WGPUTextureComponentType_Sint, "value mismatch for TextureComponentType::Sint");
+    static_assert(static_cast<uint32_t>(TextureComponentType::Uint) == WGPUTextureComponentType_Uint, "value mismatch for TextureComponentType::Uint");
+
+
+    static_assert(sizeof(TextureDimension) == sizeof(WGPUTextureDimension), "sizeof mismatch for TextureDimension");
+    static_assert(alignof(TextureDimension) == alignof(WGPUTextureDimension), "alignof mismatch for TextureDimension");
+
+    static_assert(static_cast<uint32_t>(TextureDimension::e1D) == WGPUTextureDimension_1D, "value mismatch for TextureDimension::e1D");
+    static_assert(static_cast<uint32_t>(TextureDimension::e2D) == WGPUTextureDimension_2D, "value mismatch for TextureDimension::e2D");
+    static_assert(static_cast<uint32_t>(TextureDimension::e3D) == WGPUTextureDimension_3D, "value mismatch for TextureDimension::e3D");
+
+
+    static_assert(sizeof(TextureFormat) == sizeof(WGPUTextureFormat), "sizeof mismatch for TextureFormat");
+    static_assert(alignof(TextureFormat) == alignof(WGPUTextureFormat), "alignof mismatch for TextureFormat");
+
+    static_assert(static_cast<uint32_t>(TextureFormat::Undefined) == WGPUTextureFormat_Undefined, "value mismatch for TextureFormat::Undefined");
+    static_assert(static_cast<uint32_t>(TextureFormat::R8Unorm) == WGPUTextureFormat_R8Unorm, "value mismatch for TextureFormat::R8Unorm");
+    static_assert(static_cast<uint32_t>(TextureFormat::R8Snorm) == WGPUTextureFormat_R8Snorm, "value mismatch for TextureFormat::R8Snorm");
+    static_assert(static_cast<uint32_t>(TextureFormat::R8Uint) == WGPUTextureFormat_R8Uint, "value mismatch for TextureFormat::R8Uint");
+    static_assert(static_cast<uint32_t>(TextureFormat::R8Sint) == WGPUTextureFormat_R8Sint, "value mismatch for TextureFormat::R8Sint");
+    static_assert(static_cast<uint32_t>(TextureFormat::R16Uint) == WGPUTextureFormat_R16Uint, "value mismatch for TextureFormat::R16Uint");
+    static_assert(static_cast<uint32_t>(TextureFormat::R16Sint) == WGPUTextureFormat_R16Sint, "value mismatch for TextureFormat::R16Sint");
+    static_assert(static_cast<uint32_t>(TextureFormat::R16Float) == WGPUTextureFormat_R16Float, "value mismatch for TextureFormat::R16Float");
+    static_assert(static_cast<uint32_t>(TextureFormat::RG8Unorm) == WGPUTextureFormat_RG8Unorm, "value mismatch for TextureFormat::RG8Unorm");
+    static_assert(static_cast<uint32_t>(TextureFormat::RG8Snorm) == WGPUTextureFormat_RG8Snorm, "value mismatch for TextureFormat::RG8Snorm");
+    static_assert(static_cast<uint32_t>(TextureFormat::RG8Uint) == WGPUTextureFormat_RG8Uint, "value mismatch for TextureFormat::RG8Uint");
+    static_assert(static_cast<uint32_t>(TextureFormat::RG8Sint) == WGPUTextureFormat_RG8Sint, "value mismatch for TextureFormat::RG8Sint");
+    static_assert(static_cast<uint32_t>(TextureFormat::R32Float) == WGPUTextureFormat_R32Float, "value mismatch for TextureFormat::R32Float");
+    static_assert(static_cast<uint32_t>(TextureFormat::R32Uint) == WGPUTextureFormat_R32Uint, "value mismatch for TextureFormat::R32Uint");
+    static_assert(static_cast<uint32_t>(TextureFormat::R32Sint) == WGPUTextureFormat_R32Sint, "value mismatch for TextureFormat::R32Sint");
+    static_assert(static_cast<uint32_t>(TextureFormat::RG16Uint) == WGPUTextureFormat_RG16Uint, "value mismatch for TextureFormat::RG16Uint");
+    static_assert(static_cast<uint32_t>(TextureFormat::RG16Sint) == WGPUTextureFormat_RG16Sint, "value mismatch for TextureFormat::RG16Sint");
+    static_assert(static_cast<uint32_t>(TextureFormat::RG16Float) == WGPUTextureFormat_RG16Float, "value mismatch for TextureFormat::RG16Float");
+    static_assert(static_cast<uint32_t>(TextureFormat::RGBA8Unorm) == WGPUTextureFormat_RGBA8Unorm, "value mismatch for TextureFormat::RGBA8Unorm");
+    static_assert(static_cast<uint32_t>(TextureFormat::RGBA8UnormSrgb) == WGPUTextureFormat_RGBA8UnormSrgb, "value mismatch for TextureFormat::RGBA8UnormSrgb");
+    static_assert(static_cast<uint32_t>(TextureFormat::RGBA8Snorm) == WGPUTextureFormat_RGBA8Snorm, "value mismatch for TextureFormat::RGBA8Snorm");
+    static_assert(static_cast<uint32_t>(TextureFormat::RGBA8Uint) == WGPUTextureFormat_RGBA8Uint, "value mismatch for TextureFormat::RGBA8Uint");
+    static_assert(static_cast<uint32_t>(TextureFormat::RGBA8Sint) == WGPUTextureFormat_RGBA8Sint, "value mismatch for TextureFormat::RGBA8Sint");
+    static_assert(static_cast<uint32_t>(TextureFormat::BGRA8Unorm) == WGPUTextureFormat_BGRA8Unorm, "value mismatch for TextureFormat::BGRA8Unorm");
+    static_assert(static_cast<uint32_t>(TextureFormat::BGRA8UnormSrgb) == WGPUTextureFormat_BGRA8UnormSrgb, "value mismatch for TextureFormat::BGRA8UnormSrgb");
+    static_assert(static_cast<uint32_t>(TextureFormat::RGB10A2Unorm) == WGPUTextureFormat_RGB10A2Unorm, "value mismatch for TextureFormat::RGB10A2Unorm");
+    static_assert(static_cast<uint32_t>(TextureFormat::RG11B10Float) == WGPUTextureFormat_RG11B10Float, "value mismatch for TextureFormat::RG11B10Float");
+    static_assert(static_cast<uint32_t>(TextureFormat::RG32Float) == WGPUTextureFormat_RG32Float, "value mismatch for TextureFormat::RG32Float");
+    static_assert(static_cast<uint32_t>(TextureFormat::RG32Uint) == WGPUTextureFormat_RG32Uint, "value mismatch for TextureFormat::RG32Uint");
+    static_assert(static_cast<uint32_t>(TextureFormat::RG32Sint) == WGPUTextureFormat_RG32Sint, "value mismatch for TextureFormat::RG32Sint");
+    static_assert(static_cast<uint32_t>(TextureFormat::RGBA16Uint) == WGPUTextureFormat_RGBA16Uint, "value mismatch for TextureFormat::RGBA16Uint");
+    static_assert(static_cast<uint32_t>(TextureFormat::RGBA16Sint) == WGPUTextureFormat_RGBA16Sint, "value mismatch for TextureFormat::RGBA16Sint");
+    static_assert(static_cast<uint32_t>(TextureFormat::RGBA16Float) == WGPUTextureFormat_RGBA16Float, "value mismatch for TextureFormat::RGBA16Float");
+    static_assert(static_cast<uint32_t>(TextureFormat::RGBA32Float) == WGPUTextureFormat_RGBA32Float, "value mismatch for TextureFormat::RGBA32Float");
+    static_assert(static_cast<uint32_t>(TextureFormat::RGBA32Uint) == WGPUTextureFormat_RGBA32Uint, "value mismatch for TextureFormat::RGBA32Uint");
+    static_assert(static_cast<uint32_t>(TextureFormat::RGBA32Sint) == WGPUTextureFormat_RGBA32Sint, "value mismatch for TextureFormat::RGBA32Sint");
+    static_assert(static_cast<uint32_t>(TextureFormat::Depth32Float) == WGPUTextureFormat_Depth32Float, "value mismatch for TextureFormat::Depth32Float");
+    static_assert(static_cast<uint32_t>(TextureFormat::Depth24Plus) == WGPUTextureFormat_Depth24Plus, "value mismatch for TextureFormat::Depth24Plus");
+    static_assert(static_cast<uint32_t>(TextureFormat::Depth24PlusStencil8) == WGPUTextureFormat_Depth24PlusStencil8, "value mismatch for TextureFormat::Depth24PlusStencil8");
+    static_assert(static_cast<uint32_t>(TextureFormat::BC1RGBAUnorm) == WGPUTextureFormat_BC1RGBAUnorm, "value mismatch for TextureFormat::BC1RGBAUnorm");
+    static_assert(static_cast<uint32_t>(TextureFormat::BC1RGBAUnormSrgb) == WGPUTextureFormat_BC1RGBAUnormSrgb, "value mismatch for TextureFormat::BC1RGBAUnormSrgb");
+    static_assert(static_cast<uint32_t>(TextureFormat::BC2RGBAUnorm) == WGPUTextureFormat_BC2RGBAUnorm, "value mismatch for TextureFormat::BC2RGBAUnorm");
+    static_assert(static_cast<uint32_t>(TextureFormat::BC2RGBAUnormSrgb) == WGPUTextureFormat_BC2RGBAUnormSrgb, "value mismatch for TextureFormat::BC2RGBAUnormSrgb");
+    static_assert(static_cast<uint32_t>(TextureFormat::BC3RGBAUnorm) == WGPUTextureFormat_BC3RGBAUnorm, "value mismatch for TextureFormat::BC3RGBAUnorm");
+    static_assert(static_cast<uint32_t>(TextureFormat::BC3RGBAUnormSrgb) == WGPUTextureFormat_BC3RGBAUnormSrgb, "value mismatch for TextureFormat::BC3RGBAUnormSrgb");
+    static_assert(static_cast<uint32_t>(TextureFormat::BC4RUnorm) == WGPUTextureFormat_BC4RUnorm, "value mismatch for TextureFormat::BC4RUnorm");
+    static_assert(static_cast<uint32_t>(TextureFormat::BC4RSnorm) == WGPUTextureFormat_BC4RSnorm, "value mismatch for TextureFormat::BC4RSnorm");
+    static_assert(static_cast<uint32_t>(TextureFormat::BC5RGUnorm) == WGPUTextureFormat_BC5RGUnorm, "value mismatch for TextureFormat::BC5RGUnorm");
+    static_assert(static_cast<uint32_t>(TextureFormat::BC5RGSnorm) == WGPUTextureFormat_BC5RGSnorm, "value mismatch for TextureFormat::BC5RGSnorm");
+    static_assert(static_cast<uint32_t>(TextureFormat::BC6HRGBUfloat) == WGPUTextureFormat_BC6HRGBUfloat, "value mismatch for TextureFormat::BC6HRGBUfloat");
+    static_assert(static_cast<uint32_t>(TextureFormat::BC6HRGBSfloat) == WGPUTextureFormat_BC6HRGBSfloat, "value mismatch for TextureFormat::BC6HRGBSfloat");
+    static_assert(static_cast<uint32_t>(TextureFormat::BC7RGBAUnorm) == WGPUTextureFormat_BC7RGBAUnorm, "value mismatch for TextureFormat::BC7RGBAUnorm");
+    static_assert(static_cast<uint32_t>(TextureFormat::BC7RGBAUnormSrgb) == WGPUTextureFormat_BC7RGBAUnormSrgb, "value mismatch for TextureFormat::BC7RGBAUnormSrgb");
+
+
+    static_assert(sizeof(TextureViewDimension) == sizeof(WGPUTextureViewDimension), "sizeof mismatch for TextureViewDimension");
+    static_assert(alignof(TextureViewDimension) == alignof(WGPUTextureViewDimension), "alignof mismatch for TextureViewDimension");
+
+    static_assert(static_cast<uint32_t>(TextureViewDimension::Undefined) == WGPUTextureViewDimension_Undefined, "value mismatch for TextureViewDimension::Undefined");
+    static_assert(static_cast<uint32_t>(TextureViewDimension::e1D) == WGPUTextureViewDimension_1D, "value mismatch for TextureViewDimension::e1D");
+    static_assert(static_cast<uint32_t>(TextureViewDimension::e2D) == WGPUTextureViewDimension_2D, "value mismatch for TextureViewDimension::e2D");
+    static_assert(static_cast<uint32_t>(TextureViewDimension::e2DArray) == WGPUTextureViewDimension_2DArray, "value mismatch for TextureViewDimension::e2DArray");
+    static_assert(static_cast<uint32_t>(TextureViewDimension::Cube) == WGPUTextureViewDimension_Cube, "value mismatch for TextureViewDimension::Cube");
+    static_assert(static_cast<uint32_t>(TextureViewDimension::CubeArray) == WGPUTextureViewDimension_CubeArray, "value mismatch for TextureViewDimension::CubeArray");
+    static_assert(static_cast<uint32_t>(TextureViewDimension::e3D) == WGPUTextureViewDimension_3D, "value mismatch for TextureViewDimension::e3D");
+
+
+    static_assert(sizeof(VertexFormat) == sizeof(WGPUVertexFormat), "sizeof mismatch for VertexFormat");
+    static_assert(alignof(VertexFormat) == alignof(WGPUVertexFormat), "alignof mismatch for VertexFormat");
+
+    static_assert(static_cast<uint32_t>(VertexFormat::UChar2) == WGPUVertexFormat_UChar2, "value mismatch for VertexFormat::UChar2");
+    static_assert(static_cast<uint32_t>(VertexFormat::UChar4) == WGPUVertexFormat_UChar4, "value mismatch for VertexFormat::UChar4");
+    static_assert(static_cast<uint32_t>(VertexFormat::Char2) == WGPUVertexFormat_Char2, "value mismatch for VertexFormat::Char2");
+    static_assert(static_cast<uint32_t>(VertexFormat::Char4) == WGPUVertexFormat_Char4, "value mismatch for VertexFormat::Char4");
+    static_assert(static_cast<uint32_t>(VertexFormat::UChar2Norm) == WGPUVertexFormat_UChar2Norm, "value mismatch for VertexFormat::UChar2Norm");
+    static_assert(static_cast<uint32_t>(VertexFormat::UChar4Norm) == WGPUVertexFormat_UChar4Norm, "value mismatch for VertexFormat::UChar4Norm");
+    static_assert(static_cast<uint32_t>(VertexFormat::Char2Norm) == WGPUVertexFormat_Char2Norm, "value mismatch for VertexFormat::Char2Norm");
+    static_assert(static_cast<uint32_t>(VertexFormat::Char4Norm) == WGPUVertexFormat_Char4Norm, "value mismatch for VertexFormat::Char4Norm");
+    static_assert(static_cast<uint32_t>(VertexFormat::UShort2) == WGPUVertexFormat_UShort2, "value mismatch for VertexFormat::UShort2");
+    static_assert(static_cast<uint32_t>(VertexFormat::UShort4) == WGPUVertexFormat_UShort4, "value mismatch for VertexFormat::UShort4");
+    static_assert(static_cast<uint32_t>(VertexFormat::Short2) == WGPUVertexFormat_Short2, "value mismatch for VertexFormat::Short2");
+    static_assert(static_cast<uint32_t>(VertexFormat::Short4) == WGPUVertexFormat_Short4, "value mismatch for VertexFormat::Short4");
+    static_assert(static_cast<uint32_t>(VertexFormat::UShort2Norm) == WGPUVertexFormat_UShort2Norm, "value mismatch for VertexFormat::UShort2Norm");
+    static_assert(static_cast<uint32_t>(VertexFormat::UShort4Norm) == WGPUVertexFormat_UShort4Norm, "value mismatch for VertexFormat::UShort4Norm");
+    static_assert(static_cast<uint32_t>(VertexFormat::Short2Norm) == WGPUVertexFormat_Short2Norm, "value mismatch for VertexFormat::Short2Norm");
+    static_assert(static_cast<uint32_t>(VertexFormat::Short4Norm) == WGPUVertexFormat_Short4Norm, "value mismatch for VertexFormat::Short4Norm");
+    static_assert(static_cast<uint32_t>(VertexFormat::Half2) == WGPUVertexFormat_Half2, "value mismatch for VertexFormat::Half2");
+    static_assert(static_cast<uint32_t>(VertexFormat::Half4) == WGPUVertexFormat_Half4, "value mismatch for VertexFormat::Half4");
+    static_assert(static_cast<uint32_t>(VertexFormat::Float) == WGPUVertexFormat_Float, "value mismatch for VertexFormat::Float");
+    static_assert(static_cast<uint32_t>(VertexFormat::Float2) == WGPUVertexFormat_Float2, "value mismatch for VertexFormat::Float2");
+    static_assert(static_cast<uint32_t>(VertexFormat::Float3) == WGPUVertexFormat_Float3, "value mismatch for VertexFormat::Float3");
+    static_assert(static_cast<uint32_t>(VertexFormat::Float4) == WGPUVertexFormat_Float4, "value mismatch for VertexFormat::Float4");
+    static_assert(static_cast<uint32_t>(VertexFormat::UInt) == WGPUVertexFormat_UInt, "value mismatch for VertexFormat::UInt");
+    static_assert(static_cast<uint32_t>(VertexFormat::UInt2) == WGPUVertexFormat_UInt2, "value mismatch for VertexFormat::UInt2");
+    static_assert(static_cast<uint32_t>(VertexFormat::UInt3) == WGPUVertexFormat_UInt3, "value mismatch for VertexFormat::UInt3");
+    static_assert(static_cast<uint32_t>(VertexFormat::UInt4) == WGPUVertexFormat_UInt4, "value mismatch for VertexFormat::UInt4");
+    static_assert(static_cast<uint32_t>(VertexFormat::Int) == WGPUVertexFormat_Int, "value mismatch for VertexFormat::Int");
+    static_assert(static_cast<uint32_t>(VertexFormat::Int2) == WGPUVertexFormat_Int2, "value mismatch for VertexFormat::Int2");
+    static_assert(static_cast<uint32_t>(VertexFormat::Int3) == WGPUVertexFormat_Int3, "value mismatch for VertexFormat::Int3");
+    static_assert(static_cast<uint32_t>(VertexFormat::Int4) == WGPUVertexFormat_Int4, "value mismatch for VertexFormat::Int4");
+
+
+
+    static_assert(sizeof(BufferUsage) == sizeof(WGPUBufferUsageFlags), "sizeof mismatch for BufferUsage");
+    static_assert(alignof(BufferUsage) == alignof(WGPUBufferUsageFlags), "alignof mismatch for BufferUsage");
+
+    static_assert(static_cast<uint32_t>(BufferUsage::None) == WGPUBufferUsage_None, "value mismatch for BufferUsage::None");
+    static_assert(static_cast<uint32_t>(BufferUsage::MapRead) == WGPUBufferUsage_MapRead, "value mismatch for BufferUsage::MapRead");
+    static_assert(static_cast<uint32_t>(BufferUsage::MapWrite) == WGPUBufferUsage_MapWrite, "value mismatch for BufferUsage::MapWrite");
+    static_assert(static_cast<uint32_t>(BufferUsage::CopySrc) == WGPUBufferUsage_CopySrc, "value mismatch for BufferUsage::CopySrc");
+    static_assert(static_cast<uint32_t>(BufferUsage::CopyDst) == WGPUBufferUsage_CopyDst, "value mismatch for BufferUsage::CopyDst");
+    static_assert(static_cast<uint32_t>(BufferUsage::Index) == WGPUBufferUsage_Index, "value mismatch for BufferUsage::Index");
+    static_assert(static_cast<uint32_t>(BufferUsage::Vertex) == WGPUBufferUsage_Vertex, "value mismatch for BufferUsage::Vertex");
+    static_assert(static_cast<uint32_t>(BufferUsage::Uniform) == WGPUBufferUsage_Uniform, "value mismatch for BufferUsage::Uniform");
+    static_assert(static_cast<uint32_t>(BufferUsage::Storage) == WGPUBufferUsage_Storage, "value mismatch for BufferUsage::Storage");
+    static_assert(static_cast<uint32_t>(BufferUsage::Indirect) == WGPUBufferUsage_Indirect, "value mismatch for BufferUsage::Indirect");
+
+
+    static_assert(sizeof(ColorWriteMask) == sizeof(WGPUColorWriteMaskFlags), "sizeof mismatch for ColorWriteMask");
+    static_assert(alignof(ColorWriteMask) == alignof(WGPUColorWriteMaskFlags), "alignof mismatch for ColorWriteMask");
+
+    static_assert(static_cast<uint32_t>(ColorWriteMask::None) == WGPUColorWriteMask_None, "value mismatch for ColorWriteMask::None");
+    static_assert(static_cast<uint32_t>(ColorWriteMask::Red) == WGPUColorWriteMask_Red, "value mismatch for ColorWriteMask::Red");
+    static_assert(static_cast<uint32_t>(ColorWriteMask::Green) == WGPUColorWriteMask_Green, "value mismatch for ColorWriteMask::Green");
+    static_assert(static_cast<uint32_t>(ColorWriteMask::Blue) == WGPUColorWriteMask_Blue, "value mismatch for ColorWriteMask::Blue");
+    static_assert(static_cast<uint32_t>(ColorWriteMask::Alpha) == WGPUColorWriteMask_Alpha, "value mismatch for ColorWriteMask::Alpha");
+    static_assert(static_cast<uint32_t>(ColorWriteMask::All) == WGPUColorWriteMask_All, "value mismatch for ColorWriteMask::All");
+
+
+    static_assert(sizeof(ShaderStage) == sizeof(WGPUShaderStageFlags), "sizeof mismatch for ShaderStage");
+    static_assert(alignof(ShaderStage) == alignof(WGPUShaderStageFlags), "alignof mismatch for ShaderStage");
+
+    static_assert(static_cast<uint32_t>(ShaderStage::None) == WGPUShaderStage_None, "value mismatch for ShaderStage::None");
+    static_assert(static_cast<uint32_t>(ShaderStage::Vertex) == WGPUShaderStage_Vertex, "value mismatch for ShaderStage::Vertex");
+    static_assert(static_cast<uint32_t>(ShaderStage::Fragment) == WGPUShaderStage_Fragment, "value mismatch for ShaderStage::Fragment");
+    static_assert(static_cast<uint32_t>(ShaderStage::Compute) == WGPUShaderStage_Compute, "value mismatch for ShaderStage::Compute");
+
+
+    static_assert(sizeof(TextureUsage) == sizeof(WGPUTextureUsageFlags), "sizeof mismatch for TextureUsage");
+    static_assert(alignof(TextureUsage) == alignof(WGPUTextureUsageFlags), "alignof mismatch for TextureUsage");
+
+    static_assert(static_cast<uint32_t>(TextureUsage::None) == WGPUTextureUsage_None, "value mismatch for TextureUsage::None");
+    static_assert(static_cast<uint32_t>(TextureUsage::CopySrc) == WGPUTextureUsage_CopySrc, "value mismatch for TextureUsage::CopySrc");
+    static_assert(static_cast<uint32_t>(TextureUsage::CopyDst) == WGPUTextureUsage_CopyDst, "value mismatch for TextureUsage::CopyDst");
+    static_assert(static_cast<uint32_t>(TextureUsage::Sampled) == WGPUTextureUsage_Sampled, "value mismatch for TextureUsage::Sampled");
+    static_assert(static_cast<uint32_t>(TextureUsage::Storage) == WGPUTextureUsage_Storage, "value mismatch for TextureUsage::Storage");
+    static_assert(static_cast<uint32_t>(TextureUsage::OutputAttachment) == WGPUTextureUsage_OutputAttachment, "value mismatch for TextureUsage::OutputAttachment");
+    static_assert(static_cast<uint32_t>(TextureUsage::Present) == WGPUTextureUsage_Present, "value mismatch for TextureUsage::Present");
+
+
+    static_assert(sizeof(ChainedStruct) == sizeof(WGPUChainedStruct),
+            "sizeof mismatch for ChainedStruct");
+    static_assert(alignof(ChainedStruct) == alignof(WGPUChainedStruct),
+            "alignof mismatch for ChainedStruct");
+    static_assert(offsetof(ChainedStruct, nextInChain) == offsetof(WGPUChainedStruct, next),
+            "offsetof mismatch for ChainedStruct::nextInChain");
+    static_assert(offsetof(ChainedStruct, sType) == offsetof(WGPUChainedStruct, sType),
+            "offsetof mismatch for ChainedStruct::sType");
+
+
+    static_assert(sizeof(AdapterProperties) == sizeof(WGPUAdapterProperties), "sizeof mismatch for AdapterProperties");
+    static_assert(alignof(AdapterProperties) == alignof(WGPUAdapterProperties), "alignof mismatch for AdapterProperties");
+
+    static_assert(offsetof(AdapterProperties, nextInChain) == offsetof(WGPUAdapterProperties, nextInChain),
+            "offsetof mismatch for AdapterProperties::nextInChain");
+    static_assert(offsetof(AdapterProperties, deviceID) == offsetof(WGPUAdapterProperties, deviceID),
+            "offsetof mismatch for AdapterProperties::deviceID");
+    static_assert(offsetof(AdapterProperties, vendorID) == offsetof(WGPUAdapterProperties, vendorID),
+            "offsetof mismatch for AdapterProperties::vendorID");
+    static_assert(offsetof(AdapterProperties, name) == offsetof(WGPUAdapterProperties, name),
+            "offsetof mismatch for AdapterProperties::name");
+    static_assert(offsetof(AdapterProperties, adapterType) == offsetof(WGPUAdapterProperties, adapterType),
+            "offsetof mismatch for AdapterProperties::adapterType");
+    static_assert(offsetof(AdapterProperties, backendType) == offsetof(WGPUAdapterProperties, backendType),
+            "offsetof mismatch for AdapterProperties::backendType");
+
+
+    static_assert(sizeof(BindGroupEntry) == sizeof(WGPUBindGroupEntry), "sizeof mismatch for BindGroupEntry");
+    static_assert(alignof(BindGroupEntry) == alignof(WGPUBindGroupEntry), "alignof mismatch for BindGroupEntry");
+
+    static_assert(offsetof(BindGroupEntry, binding) == offsetof(WGPUBindGroupEntry, binding),
+            "offsetof mismatch for BindGroupEntry::binding");
+    static_assert(offsetof(BindGroupEntry, buffer) == offsetof(WGPUBindGroupEntry, buffer),
+            "offsetof mismatch for BindGroupEntry::buffer");
+    static_assert(offsetof(BindGroupEntry, offset) == offsetof(WGPUBindGroupEntry, offset),
+            "offsetof mismatch for BindGroupEntry::offset");
+    static_assert(offsetof(BindGroupEntry, size) == offsetof(WGPUBindGroupEntry, size),
+            "offsetof mismatch for BindGroupEntry::size");
+    static_assert(offsetof(BindGroupEntry, sampler) == offsetof(WGPUBindGroupEntry, sampler),
+            "offsetof mismatch for BindGroupEntry::sampler");
+    static_assert(offsetof(BindGroupEntry, textureView) == offsetof(WGPUBindGroupEntry, textureView),
+            "offsetof mismatch for BindGroupEntry::textureView");
+
+
+    static_assert(sizeof(BindGroupLayoutEntry) == sizeof(WGPUBindGroupLayoutEntry), "sizeof mismatch for BindGroupLayoutEntry");
+    static_assert(alignof(BindGroupLayoutEntry) == alignof(WGPUBindGroupLayoutEntry), "alignof mismatch for BindGroupLayoutEntry");
+
+    static_assert(offsetof(BindGroupLayoutEntry, binding) == offsetof(WGPUBindGroupLayoutEntry, binding),
+            "offsetof mismatch for BindGroupLayoutEntry::binding");
+    static_assert(offsetof(BindGroupLayoutEntry, visibility) == offsetof(WGPUBindGroupLayoutEntry, visibility),
+            "offsetof mismatch for BindGroupLayoutEntry::visibility");
+    static_assert(offsetof(BindGroupLayoutEntry, type) == offsetof(WGPUBindGroupLayoutEntry, type),
+            "offsetof mismatch for BindGroupLayoutEntry::type");
+    static_assert(offsetof(BindGroupLayoutEntry, hasDynamicOffset) == offsetof(WGPUBindGroupLayoutEntry, hasDynamicOffset),
+            "offsetof mismatch for BindGroupLayoutEntry::hasDynamicOffset");
+    static_assert(offsetof(BindGroupLayoutEntry, multisampled) == offsetof(WGPUBindGroupLayoutEntry, multisampled),
+            "offsetof mismatch for BindGroupLayoutEntry::multisampled");
+    static_assert(offsetof(BindGroupLayoutEntry, textureDimension) == offsetof(WGPUBindGroupLayoutEntry, textureDimension),
+            "offsetof mismatch for BindGroupLayoutEntry::textureDimension");
+    static_assert(offsetof(BindGroupLayoutEntry, viewDimension) == offsetof(WGPUBindGroupLayoutEntry, viewDimension),
+            "offsetof mismatch for BindGroupLayoutEntry::viewDimension");
+    static_assert(offsetof(BindGroupLayoutEntry, textureComponentType) == offsetof(WGPUBindGroupLayoutEntry, textureComponentType),
+            "offsetof mismatch for BindGroupLayoutEntry::textureComponentType");
+    static_assert(offsetof(BindGroupLayoutEntry, storageTextureFormat) == offsetof(WGPUBindGroupLayoutEntry, storageTextureFormat),
+            "offsetof mismatch for BindGroupLayoutEntry::storageTextureFormat");
+
+
+    static_assert(sizeof(BlendDescriptor) == sizeof(WGPUBlendDescriptor), "sizeof mismatch for BlendDescriptor");
+    static_assert(alignof(BlendDescriptor) == alignof(WGPUBlendDescriptor), "alignof mismatch for BlendDescriptor");
+
+    static_assert(offsetof(BlendDescriptor, operation) == offsetof(WGPUBlendDescriptor, operation),
+            "offsetof mismatch for BlendDescriptor::operation");
+    static_assert(offsetof(BlendDescriptor, srcFactor) == offsetof(WGPUBlendDescriptor, srcFactor),
+            "offsetof mismatch for BlendDescriptor::srcFactor");
+    static_assert(offsetof(BlendDescriptor, dstFactor) == offsetof(WGPUBlendDescriptor, dstFactor),
+            "offsetof mismatch for BlendDescriptor::dstFactor");
+
+
+    static_assert(sizeof(BufferCopyView) == sizeof(WGPUBufferCopyView), "sizeof mismatch for BufferCopyView");
+    static_assert(alignof(BufferCopyView) == alignof(WGPUBufferCopyView), "alignof mismatch for BufferCopyView");
+
+    static_assert(offsetof(BufferCopyView, nextInChain) == offsetof(WGPUBufferCopyView, nextInChain),
+            "offsetof mismatch for BufferCopyView::nextInChain");
+    static_assert(offsetof(BufferCopyView, buffer) == offsetof(WGPUBufferCopyView, buffer),
+            "offsetof mismatch for BufferCopyView::buffer");
+    static_assert(offsetof(BufferCopyView, offset) == offsetof(WGPUBufferCopyView, offset),
+            "offsetof mismatch for BufferCopyView::offset");
+    static_assert(offsetof(BufferCopyView, rowPitch) == offsetof(WGPUBufferCopyView, rowPitch),
+            "offsetof mismatch for BufferCopyView::rowPitch");
+    static_assert(offsetof(BufferCopyView, imageHeight) == offsetof(WGPUBufferCopyView, imageHeight),
+            "offsetof mismatch for BufferCopyView::imageHeight");
+    static_assert(offsetof(BufferCopyView, bytesPerRow) == offsetof(WGPUBufferCopyView, bytesPerRow),
+            "offsetof mismatch for BufferCopyView::bytesPerRow");
+    static_assert(offsetof(BufferCopyView, rowsPerImage) == offsetof(WGPUBufferCopyView, rowsPerImage),
+            "offsetof mismatch for BufferCopyView::rowsPerImage");
+
+
+    static_assert(sizeof(BufferDescriptor) == sizeof(WGPUBufferDescriptor), "sizeof mismatch for BufferDescriptor");
+    static_assert(alignof(BufferDescriptor) == alignof(WGPUBufferDescriptor), "alignof mismatch for BufferDescriptor");
+
+    static_assert(offsetof(BufferDescriptor, nextInChain) == offsetof(WGPUBufferDescriptor, nextInChain),
+            "offsetof mismatch for BufferDescriptor::nextInChain");
+    static_assert(offsetof(BufferDescriptor, label) == offsetof(WGPUBufferDescriptor, label),
+            "offsetof mismatch for BufferDescriptor::label");
+    static_assert(offsetof(BufferDescriptor, usage) == offsetof(WGPUBufferDescriptor, usage),
+            "offsetof mismatch for BufferDescriptor::usage");
+    static_assert(offsetof(BufferDescriptor, size) == offsetof(WGPUBufferDescriptor, size),
+            "offsetof mismatch for BufferDescriptor::size");
+
+
+    static_assert(sizeof(Color) == sizeof(WGPUColor), "sizeof mismatch for Color");
+    static_assert(alignof(Color) == alignof(WGPUColor), "alignof mismatch for Color");
+
+    static_assert(offsetof(Color, r) == offsetof(WGPUColor, r),
+            "offsetof mismatch for Color::r");
+    static_assert(offsetof(Color, g) == offsetof(WGPUColor, g),
+            "offsetof mismatch for Color::g");
+    static_assert(offsetof(Color, b) == offsetof(WGPUColor, b),
+            "offsetof mismatch for Color::b");
+    static_assert(offsetof(Color, a) == offsetof(WGPUColor, a),
+            "offsetof mismatch for Color::a");
+
+
+    static_assert(sizeof(CommandBufferDescriptor) == sizeof(WGPUCommandBufferDescriptor), "sizeof mismatch for CommandBufferDescriptor");
+    static_assert(alignof(CommandBufferDescriptor) == alignof(WGPUCommandBufferDescriptor), "alignof mismatch for CommandBufferDescriptor");
+
+    static_assert(offsetof(CommandBufferDescriptor, nextInChain) == offsetof(WGPUCommandBufferDescriptor, nextInChain),
+            "offsetof mismatch for CommandBufferDescriptor::nextInChain");
+    static_assert(offsetof(CommandBufferDescriptor, label) == offsetof(WGPUCommandBufferDescriptor, label),
+            "offsetof mismatch for CommandBufferDescriptor::label");
+
+
+    static_assert(sizeof(CommandEncoderDescriptor) == sizeof(WGPUCommandEncoderDescriptor), "sizeof mismatch for CommandEncoderDescriptor");
+    static_assert(alignof(CommandEncoderDescriptor) == alignof(WGPUCommandEncoderDescriptor), "alignof mismatch for CommandEncoderDescriptor");
+
+    static_assert(offsetof(CommandEncoderDescriptor, nextInChain) == offsetof(WGPUCommandEncoderDescriptor, nextInChain),
+            "offsetof mismatch for CommandEncoderDescriptor::nextInChain");
+    static_assert(offsetof(CommandEncoderDescriptor, label) == offsetof(WGPUCommandEncoderDescriptor, label),
+            "offsetof mismatch for CommandEncoderDescriptor::label");
+
+
+    static_assert(sizeof(ComputePassDescriptor) == sizeof(WGPUComputePassDescriptor), "sizeof mismatch for ComputePassDescriptor");
+    static_assert(alignof(ComputePassDescriptor) == alignof(WGPUComputePassDescriptor), "alignof mismatch for ComputePassDescriptor");
+
+    static_assert(offsetof(ComputePassDescriptor, nextInChain) == offsetof(WGPUComputePassDescriptor, nextInChain),
+            "offsetof mismatch for ComputePassDescriptor::nextInChain");
+    static_assert(offsetof(ComputePassDescriptor, label) == offsetof(WGPUComputePassDescriptor, label),
+            "offsetof mismatch for ComputePassDescriptor::label");
+
+
+    static_assert(sizeof(CreateBufferMappedResult) == sizeof(WGPUCreateBufferMappedResult), "sizeof mismatch for CreateBufferMappedResult");
+    static_assert(alignof(CreateBufferMappedResult) == alignof(WGPUCreateBufferMappedResult), "alignof mismatch for CreateBufferMappedResult");
+
+    static_assert(offsetof(CreateBufferMappedResult, buffer) == offsetof(WGPUCreateBufferMappedResult, buffer),
+            "offsetof mismatch for CreateBufferMappedResult::buffer");
+    static_assert(offsetof(CreateBufferMappedResult, dataLength) == offsetof(WGPUCreateBufferMappedResult, dataLength),
+            "offsetof mismatch for CreateBufferMappedResult::dataLength");
+    static_assert(offsetof(CreateBufferMappedResult, data) == offsetof(WGPUCreateBufferMappedResult, data),
+            "offsetof mismatch for CreateBufferMappedResult::data");
+
+
+    static_assert(sizeof(DeviceProperties) == sizeof(WGPUDeviceProperties), "sizeof mismatch for DeviceProperties");
+    static_assert(alignof(DeviceProperties) == alignof(WGPUDeviceProperties), "alignof mismatch for DeviceProperties");
+
+    static_assert(offsetof(DeviceProperties, textureCompressionBC) == offsetof(WGPUDeviceProperties, textureCompressionBC),
+            "offsetof mismatch for DeviceProperties::textureCompressionBC");
+
+
+    static_assert(sizeof(Extent3D) == sizeof(WGPUExtent3D), "sizeof mismatch for Extent3D");
+    static_assert(alignof(Extent3D) == alignof(WGPUExtent3D), "alignof mismatch for Extent3D");
+
+    static_assert(offsetof(Extent3D, width) == offsetof(WGPUExtent3D, width),
+            "offsetof mismatch for Extent3D::width");
+    static_assert(offsetof(Extent3D, height) == offsetof(WGPUExtent3D, height),
+            "offsetof mismatch for Extent3D::height");
+    static_assert(offsetof(Extent3D, depth) == offsetof(WGPUExtent3D, depth),
+            "offsetof mismatch for Extent3D::depth");
+
+
+    static_assert(sizeof(FenceDescriptor) == sizeof(WGPUFenceDescriptor), "sizeof mismatch for FenceDescriptor");
+    static_assert(alignof(FenceDescriptor) == alignof(WGPUFenceDescriptor), "alignof mismatch for FenceDescriptor");
+
+    static_assert(offsetof(FenceDescriptor, nextInChain) == offsetof(WGPUFenceDescriptor, nextInChain),
+            "offsetof mismatch for FenceDescriptor::nextInChain");
+    static_assert(offsetof(FenceDescriptor, label) == offsetof(WGPUFenceDescriptor, label),
+            "offsetof mismatch for FenceDescriptor::label");
+    static_assert(offsetof(FenceDescriptor, initialValue) == offsetof(WGPUFenceDescriptor, initialValue),
+            "offsetof mismatch for FenceDescriptor::initialValue");
+
+
+    static_assert(sizeof(InstanceDescriptor) == sizeof(WGPUInstanceDescriptor), "sizeof mismatch for InstanceDescriptor");
+    static_assert(alignof(InstanceDescriptor) == alignof(WGPUInstanceDescriptor), "alignof mismatch for InstanceDescriptor");
+
+    static_assert(offsetof(InstanceDescriptor, nextInChain) == offsetof(WGPUInstanceDescriptor, nextInChain),
+            "offsetof mismatch for InstanceDescriptor::nextInChain");
+
+
+    static_assert(sizeof(Origin3D) == sizeof(WGPUOrigin3D), "sizeof mismatch for Origin3D");
+    static_assert(alignof(Origin3D) == alignof(WGPUOrigin3D), "alignof mismatch for Origin3D");
+
+    static_assert(offsetof(Origin3D, x) == offsetof(WGPUOrigin3D, x),
+            "offsetof mismatch for Origin3D::x");
+    static_assert(offsetof(Origin3D, y) == offsetof(WGPUOrigin3D, y),
+            "offsetof mismatch for Origin3D::y");
+    static_assert(offsetof(Origin3D, z) == offsetof(WGPUOrigin3D, z),
+            "offsetof mismatch for Origin3D::z");
+
+
+    static_assert(sizeof(PipelineLayoutDescriptor) == sizeof(WGPUPipelineLayoutDescriptor), "sizeof mismatch for PipelineLayoutDescriptor");
+    static_assert(alignof(PipelineLayoutDescriptor) == alignof(WGPUPipelineLayoutDescriptor), "alignof mismatch for PipelineLayoutDescriptor");
+
+    static_assert(offsetof(PipelineLayoutDescriptor, nextInChain) == offsetof(WGPUPipelineLayoutDescriptor, nextInChain),
+            "offsetof mismatch for PipelineLayoutDescriptor::nextInChain");
+    static_assert(offsetof(PipelineLayoutDescriptor, label) == offsetof(WGPUPipelineLayoutDescriptor, label),
+            "offsetof mismatch for PipelineLayoutDescriptor::label");
+    static_assert(offsetof(PipelineLayoutDescriptor, bindGroupLayoutCount) == offsetof(WGPUPipelineLayoutDescriptor, bindGroupLayoutCount),
+            "offsetof mismatch for PipelineLayoutDescriptor::bindGroupLayoutCount");
+    static_assert(offsetof(PipelineLayoutDescriptor, bindGroupLayouts) == offsetof(WGPUPipelineLayoutDescriptor, bindGroupLayouts),
+            "offsetof mismatch for PipelineLayoutDescriptor::bindGroupLayouts");
+
+
+    static_assert(sizeof(ProgrammableStageDescriptor) == sizeof(WGPUProgrammableStageDescriptor), "sizeof mismatch for ProgrammableStageDescriptor");
+    static_assert(alignof(ProgrammableStageDescriptor) == alignof(WGPUProgrammableStageDescriptor), "alignof mismatch for ProgrammableStageDescriptor");
+
+    static_assert(offsetof(ProgrammableStageDescriptor, nextInChain) == offsetof(WGPUProgrammableStageDescriptor, nextInChain),
+            "offsetof mismatch for ProgrammableStageDescriptor::nextInChain");
+    static_assert(offsetof(ProgrammableStageDescriptor, module) == offsetof(WGPUProgrammableStageDescriptor, module),
+            "offsetof mismatch for ProgrammableStageDescriptor::module");
+    static_assert(offsetof(ProgrammableStageDescriptor, entryPoint) == offsetof(WGPUProgrammableStageDescriptor, entryPoint),
+            "offsetof mismatch for ProgrammableStageDescriptor::entryPoint");
+
+
+    static_assert(sizeof(RasterizationStateDescriptor) == sizeof(WGPURasterizationStateDescriptor), "sizeof mismatch for RasterizationStateDescriptor");
+    static_assert(alignof(RasterizationStateDescriptor) == alignof(WGPURasterizationStateDescriptor), "alignof mismatch for RasterizationStateDescriptor");
+
+    static_assert(offsetof(RasterizationStateDescriptor, nextInChain) == offsetof(WGPURasterizationStateDescriptor, nextInChain),
+            "offsetof mismatch for RasterizationStateDescriptor::nextInChain");
+    static_assert(offsetof(RasterizationStateDescriptor, frontFace) == offsetof(WGPURasterizationStateDescriptor, frontFace),
+            "offsetof mismatch for RasterizationStateDescriptor::frontFace");
+    static_assert(offsetof(RasterizationStateDescriptor, cullMode) == offsetof(WGPURasterizationStateDescriptor, cullMode),
+            "offsetof mismatch for RasterizationStateDescriptor::cullMode");
+    static_assert(offsetof(RasterizationStateDescriptor, depthBias) == offsetof(WGPURasterizationStateDescriptor, depthBias),
+            "offsetof mismatch for RasterizationStateDescriptor::depthBias");
+    static_assert(offsetof(RasterizationStateDescriptor, depthBiasSlopeScale) == offsetof(WGPURasterizationStateDescriptor, depthBiasSlopeScale),
+            "offsetof mismatch for RasterizationStateDescriptor::depthBiasSlopeScale");
+    static_assert(offsetof(RasterizationStateDescriptor, depthBiasClamp) == offsetof(WGPURasterizationStateDescriptor, depthBiasClamp),
+            "offsetof mismatch for RasterizationStateDescriptor::depthBiasClamp");
+
+
+    static_assert(sizeof(RenderBundleDescriptor) == sizeof(WGPURenderBundleDescriptor), "sizeof mismatch for RenderBundleDescriptor");
+    static_assert(alignof(RenderBundleDescriptor) == alignof(WGPURenderBundleDescriptor), "alignof mismatch for RenderBundleDescriptor");
+
+    static_assert(offsetof(RenderBundleDescriptor, nextInChain) == offsetof(WGPURenderBundleDescriptor, nextInChain),
+            "offsetof mismatch for RenderBundleDescriptor::nextInChain");
+    static_assert(offsetof(RenderBundleDescriptor, label) == offsetof(WGPURenderBundleDescriptor, label),
+            "offsetof mismatch for RenderBundleDescriptor::label");
+
+
+    static_assert(sizeof(RenderBundleEncoderDescriptor) == sizeof(WGPURenderBundleEncoderDescriptor), "sizeof mismatch for RenderBundleEncoderDescriptor");
+    static_assert(alignof(RenderBundleEncoderDescriptor) == alignof(WGPURenderBundleEncoderDescriptor), "alignof mismatch for RenderBundleEncoderDescriptor");
+
+    static_assert(offsetof(RenderBundleEncoderDescriptor, nextInChain) == offsetof(WGPURenderBundleEncoderDescriptor, nextInChain),
+            "offsetof mismatch for RenderBundleEncoderDescriptor::nextInChain");
+    static_assert(offsetof(RenderBundleEncoderDescriptor, label) == offsetof(WGPURenderBundleEncoderDescriptor, label),
+            "offsetof mismatch for RenderBundleEncoderDescriptor::label");
+    static_assert(offsetof(RenderBundleEncoderDescriptor, colorFormatsCount) == offsetof(WGPURenderBundleEncoderDescriptor, colorFormatsCount),
+            "offsetof mismatch for RenderBundleEncoderDescriptor::colorFormatsCount");
+    static_assert(offsetof(RenderBundleEncoderDescriptor, colorFormats) == offsetof(WGPURenderBundleEncoderDescriptor, colorFormats),
+            "offsetof mismatch for RenderBundleEncoderDescriptor::colorFormats");
+    static_assert(offsetof(RenderBundleEncoderDescriptor, depthStencilFormat) == offsetof(WGPURenderBundleEncoderDescriptor, depthStencilFormat),
+            "offsetof mismatch for RenderBundleEncoderDescriptor::depthStencilFormat");
+    static_assert(offsetof(RenderBundleEncoderDescriptor, sampleCount) == offsetof(WGPURenderBundleEncoderDescriptor, sampleCount),
+            "offsetof mismatch for RenderBundleEncoderDescriptor::sampleCount");
+
+
+    static_assert(sizeof(RenderPassDepthStencilAttachmentDescriptor) == sizeof(WGPURenderPassDepthStencilAttachmentDescriptor), "sizeof mismatch for RenderPassDepthStencilAttachmentDescriptor");
+    static_assert(alignof(RenderPassDepthStencilAttachmentDescriptor) == alignof(WGPURenderPassDepthStencilAttachmentDescriptor), "alignof mismatch for RenderPassDepthStencilAttachmentDescriptor");
+
+    static_assert(offsetof(RenderPassDepthStencilAttachmentDescriptor, attachment) == offsetof(WGPURenderPassDepthStencilAttachmentDescriptor, attachment),
+            "offsetof mismatch for RenderPassDepthStencilAttachmentDescriptor::attachment");
+    static_assert(offsetof(RenderPassDepthStencilAttachmentDescriptor, depthLoadOp) == offsetof(WGPURenderPassDepthStencilAttachmentDescriptor, depthLoadOp),
+            "offsetof mismatch for RenderPassDepthStencilAttachmentDescriptor::depthLoadOp");
+    static_assert(offsetof(RenderPassDepthStencilAttachmentDescriptor, depthStoreOp) == offsetof(WGPURenderPassDepthStencilAttachmentDescriptor, depthStoreOp),
+            "offsetof mismatch for RenderPassDepthStencilAttachmentDescriptor::depthStoreOp");
+    static_assert(offsetof(RenderPassDepthStencilAttachmentDescriptor, clearDepth) == offsetof(WGPURenderPassDepthStencilAttachmentDescriptor, clearDepth),
+            "offsetof mismatch for RenderPassDepthStencilAttachmentDescriptor::clearDepth");
+    static_assert(offsetof(RenderPassDepthStencilAttachmentDescriptor, stencilLoadOp) == offsetof(WGPURenderPassDepthStencilAttachmentDescriptor, stencilLoadOp),
+            "offsetof mismatch for RenderPassDepthStencilAttachmentDescriptor::stencilLoadOp");
+    static_assert(offsetof(RenderPassDepthStencilAttachmentDescriptor, stencilStoreOp) == offsetof(WGPURenderPassDepthStencilAttachmentDescriptor, stencilStoreOp),
+            "offsetof mismatch for RenderPassDepthStencilAttachmentDescriptor::stencilStoreOp");
+    static_assert(offsetof(RenderPassDepthStencilAttachmentDescriptor, clearStencil) == offsetof(WGPURenderPassDepthStencilAttachmentDescriptor, clearStencil),
+            "offsetof mismatch for RenderPassDepthStencilAttachmentDescriptor::clearStencil");
+
+
+    static_assert(sizeof(SamplerDescriptor) == sizeof(WGPUSamplerDescriptor), "sizeof mismatch for SamplerDescriptor");
+    static_assert(alignof(SamplerDescriptor) == alignof(WGPUSamplerDescriptor), "alignof mismatch for SamplerDescriptor");
+
+    static_assert(offsetof(SamplerDescriptor, nextInChain) == offsetof(WGPUSamplerDescriptor, nextInChain),
+            "offsetof mismatch for SamplerDescriptor::nextInChain");
+    static_assert(offsetof(SamplerDescriptor, label) == offsetof(WGPUSamplerDescriptor, label),
+            "offsetof mismatch for SamplerDescriptor::label");
+    static_assert(offsetof(SamplerDescriptor, addressModeU) == offsetof(WGPUSamplerDescriptor, addressModeU),
+            "offsetof mismatch for SamplerDescriptor::addressModeU");
+    static_assert(offsetof(SamplerDescriptor, addressModeV) == offsetof(WGPUSamplerDescriptor, addressModeV),
+            "offsetof mismatch for SamplerDescriptor::addressModeV");
+    static_assert(offsetof(SamplerDescriptor, addressModeW) == offsetof(WGPUSamplerDescriptor, addressModeW),
+            "offsetof mismatch for SamplerDescriptor::addressModeW");
+    static_assert(offsetof(SamplerDescriptor, magFilter) == offsetof(WGPUSamplerDescriptor, magFilter),
+            "offsetof mismatch for SamplerDescriptor::magFilter");
+    static_assert(offsetof(SamplerDescriptor, minFilter) == offsetof(WGPUSamplerDescriptor, minFilter),
+            "offsetof mismatch for SamplerDescriptor::minFilter");
+    static_assert(offsetof(SamplerDescriptor, mipmapFilter) == offsetof(WGPUSamplerDescriptor, mipmapFilter),
+            "offsetof mismatch for SamplerDescriptor::mipmapFilter");
+    static_assert(offsetof(SamplerDescriptor, lodMinClamp) == offsetof(WGPUSamplerDescriptor, lodMinClamp),
+            "offsetof mismatch for SamplerDescriptor::lodMinClamp");
+    static_assert(offsetof(SamplerDescriptor, lodMaxClamp) == offsetof(WGPUSamplerDescriptor, lodMaxClamp),
+            "offsetof mismatch for SamplerDescriptor::lodMaxClamp");
+    static_assert(offsetof(SamplerDescriptor, compare) == offsetof(WGPUSamplerDescriptor, compare),
+            "offsetof mismatch for SamplerDescriptor::compare");
+
+
+    static_assert(sizeof(SamplerDescriptorDummyAnisotropicFiltering) == sizeof(WGPUSamplerDescriptorDummyAnisotropicFiltering), "sizeof mismatch for SamplerDescriptorDummyAnisotropicFiltering");
+    static_assert(alignof(SamplerDescriptorDummyAnisotropicFiltering) == alignof(WGPUSamplerDescriptorDummyAnisotropicFiltering), "alignof mismatch for SamplerDescriptorDummyAnisotropicFiltering");
+
+    static_assert(offsetof(SamplerDescriptorDummyAnisotropicFiltering, maxAnisotropy) == offsetof(WGPUSamplerDescriptorDummyAnisotropicFiltering, maxAnisotropy),
+            "offsetof mismatch for SamplerDescriptorDummyAnisotropicFiltering::maxAnisotropy");
+
+
+    static_assert(sizeof(ShaderModuleDescriptor) == sizeof(WGPUShaderModuleDescriptor), "sizeof mismatch for ShaderModuleDescriptor");
+    static_assert(alignof(ShaderModuleDescriptor) == alignof(WGPUShaderModuleDescriptor), "alignof mismatch for ShaderModuleDescriptor");
+
+    static_assert(offsetof(ShaderModuleDescriptor, nextInChain) == offsetof(WGPUShaderModuleDescriptor, nextInChain),
+            "offsetof mismatch for ShaderModuleDescriptor::nextInChain");
+    static_assert(offsetof(ShaderModuleDescriptor, label) == offsetof(WGPUShaderModuleDescriptor, label),
+            "offsetof mismatch for ShaderModuleDescriptor::label");
+    static_assert(offsetof(ShaderModuleDescriptor, codeSize) == offsetof(WGPUShaderModuleDescriptor, codeSize),
+            "offsetof mismatch for ShaderModuleDescriptor::codeSize");
+    static_assert(offsetof(ShaderModuleDescriptor, code) == offsetof(WGPUShaderModuleDescriptor, code),
+            "offsetof mismatch for ShaderModuleDescriptor::code");
+
+
+    static_assert(sizeof(ShaderModuleSPIRVDescriptor) == sizeof(WGPUShaderModuleSPIRVDescriptor), "sizeof mismatch for ShaderModuleSPIRVDescriptor");
+    static_assert(alignof(ShaderModuleSPIRVDescriptor) == alignof(WGPUShaderModuleSPIRVDescriptor), "alignof mismatch for ShaderModuleSPIRVDescriptor");
+
+    static_assert(offsetof(ShaderModuleSPIRVDescriptor, codeSize) == offsetof(WGPUShaderModuleSPIRVDescriptor, codeSize),
+            "offsetof mismatch for ShaderModuleSPIRVDescriptor::codeSize");
+    static_assert(offsetof(ShaderModuleSPIRVDescriptor, code) == offsetof(WGPUShaderModuleSPIRVDescriptor, code),
+            "offsetof mismatch for ShaderModuleSPIRVDescriptor::code");
+
+
+    static_assert(sizeof(ShaderModuleWGSLDescriptor) == sizeof(WGPUShaderModuleWGSLDescriptor), "sizeof mismatch for ShaderModuleWGSLDescriptor");
+    static_assert(alignof(ShaderModuleWGSLDescriptor) == alignof(WGPUShaderModuleWGSLDescriptor), "alignof mismatch for ShaderModuleWGSLDescriptor");
+
+    static_assert(offsetof(ShaderModuleWGSLDescriptor, source) == offsetof(WGPUShaderModuleWGSLDescriptor, source),
+            "offsetof mismatch for ShaderModuleWGSLDescriptor::source");
+
+
+    static_assert(sizeof(StencilStateFaceDescriptor) == sizeof(WGPUStencilStateFaceDescriptor), "sizeof mismatch for StencilStateFaceDescriptor");
+    static_assert(alignof(StencilStateFaceDescriptor) == alignof(WGPUStencilStateFaceDescriptor), "alignof mismatch for StencilStateFaceDescriptor");
+
+    static_assert(offsetof(StencilStateFaceDescriptor, compare) == offsetof(WGPUStencilStateFaceDescriptor, compare),
+            "offsetof mismatch for StencilStateFaceDescriptor::compare");
+    static_assert(offsetof(StencilStateFaceDescriptor, failOp) == offsetof(WGPUStencilStateFaceDescriptor, failOp),
+            "offsetof mismatch for StencilStateFaceDescriptor::failOp");
+    static_assert(offsetof(StencilStateFaceDescriptor, depthFailOp) == offsetof(WGPUStencilStateFaceDescriptor, depthFailOp),
+            "offsetof mismatch for StencilStateFaceDescriptor::depthFailOp");
+    static_assert(offsetof(StencilStateFaceDescriptor, passOp) == offsetof(WGPUStencilStateFaceDescriptor, passOp),
+            "offsetof mismatch for StencilStateFaceDescriptor::passOp");
+
+
+    static_assert(sizeof(SurfaceDescriptor) == sizeof(WGPUSurfaceDescriptor), "sizeof mismatch for SurfaceDescriptor");
+    static_assert(alignof(SurfaceDescriptor) == alignof(WGPUSurfaceDescriptor), "alignof mismatch for SurfaceDescriptor");
+
+    static_assert(offsetof(SurfaceDescriptor, nextInChain) == offsetof(WGPUSurfaceDescriptor, nextInChain),
+            "offsetof mismatch for SurfaceDescriptor::nextInChain");
+    static_assert(offsetof(SurfaceDescriptor, label) == offsetof(WGPUSurfaceDescriptor, label),
+            "offsetof mismatch for SurfaceDescriptor::label");
+
+
+    static_assert(sizeof(SurfaceDescriptorFromHTMLCanvasId) == sizeof(WGPUSurfaceDescriptorFromHTMLCanvasId), "sizeof mismatch for SurfaceDescriptorFromHTMLCanvasId");
+    static_assert(alignof(SurfaceDescriptorFromHTMLCanvasId) == alignof(WGPUSurfaceDescriptorFromHTMLCanvasId), "alignof mismatch for SurfaceDescriptorFromHTMLCanvasId");
+
+    static_assert(offsetof(SurfaceDescriptorFromHTMLCanvasId, id) == offsetof(WGPUSurfaceDescriptorFromHTMLCanvasId, id),
+            "offsetof mismatch for SurfaceDescriptorFromHTMLCanvasId::id");
+
+
+    static_assert(sizeof(SurfaceDescriptorFromMetalLayer) == sizeof(WGPUSurfaceDescriptorFromMetalLayer), "sizeof mismatch for SurfaceDescriptorFromMetalLayer");
+    static_assert(alignof(SurfaceDescriptorFromMetalLayer) == alignof(WGPUSurfaceDescriptorFromMetalLayer), "alignof mismatch for SurfaceDescriptorFromMetalLayer");
+
+    static_assert(offsetof(SurfaceDescriptorFromMetalLayer, layer) == offsetof(WGPUSurfaceDescriptorFromMetalLayer, layer),
+            "offsetof mismatch for SurfaceDescriptorFromMetalLayer::layer");
+
+
+    static_assert(sizeof(SurfaceDescriptorFromWindowsHWND) == sizeof(WGPUSurfaceDescriptorFromWindowsHWND), "sizeof mismatch for SurfaceDescriptorFromWindowsHWND");
+    static_assert(alignof(SurfaceDescriptorFromWindowsHWND) == alignof(WGPUSurfaceDescriptorFromWindowsHWND), "alignof mismatch for SurfaceDescriptorFromWindowsHWND");
+
+    static_assert(offsetof(SurfaceDescriptorFromWindowsHWND, hinstance) == offsetof(WGPUSurfaceDescriptorFromWindowsHWND, hinstance),
+            "offsetof mismatch for SurfaceDescriptorFromWindowsHWND::hinstance");
+    static_assert(offsetof(SurfaceDescriptorFromWindowsHWND, hwnd) == offsetof(WGPUSurfaceDescriptorFromWindowsHWND, hwnd),
+            "offsetof mismatch for SurfaceDescriptorFromWindowsHWND::hwnd");
+
+
+    static_assert(sizeof(SurfaceDescriptorFromXlib) == sizeof(WGPUSurfaceDescriptorFromXlib), "sizeof mismatch for SurfaceDescriptorFromXlib");
+    static_assert(alignof(SurfaceDescriptorFromXlib) == alignof(WGPUSurfaceDescriptorFromXlib), "alignof mismatch for SurfaceDescriptorFromXlib");
+
+    static_assert(offsetof(SurfaceDescriptorFromXlib, display) == offsetof(WGPUSurfaceDescriptorFromXlib, display),
+            "offsetof mismatch for SurfaceDescriptorFromXlib::display");
+    static_assert(offsetof(SurfaceDescriptorFromXlib, window) == offsetof(WGPUSurfaceDescriptorFromXlib, window),
+            "offsetof mismatch for SurfaceDescriptorFromXlib::window");
+
+
+    static_assert(sizeof(SwapChainDescriptor) == sizeof(WGPUSwapChainDescriptor), "sizeof mismatch for SwapChainDescriptor");
+    static_assert(alignof(SwapChainDescriptor) == alignof(WGPUSwapChainDescriptor), "alignof mismatch for SwapChainDescriptor");
+
+    static_assert(offsetof(SwapChainDescriptor, nextInChain) == offsetof(WGPUSwapChainDescriptor, nextInChain),
+            "offsetof mismatch for SwapChainDescriptor::nextInChain");
+    static_assert(offsetof(SwapChainDescriptor, label) == offsetof(WGPUSwapChainDescriptor, label),
+            "offsetof mismatch for SwapChainDescriptor::label");
+    static_assert(offsetof(SwapChainDescriptor, usage) == offsetof(WGPUSwapChainDescriptor, usage),
+            "offsetof mismatch for SwapChainDescriptor::usage");
+    static_assert(offsetof(SwapChainDescriptor, format) == offsetof(WGPUSwapChainDescriptor, format),
+            "offsetof mismatch for SwapChainDescriptor::format");
+    static_assert(offsetof(SwapChainDescriptor, width) == offsetof(WGPUSwapChainDescriptor, width),
+            "offsetof mismatch for SwapChainDescriptor::width");
+    static_assert(offsetof(SwapChainDescriptor, height) == offsetof(WGPUSwapChainDescriptor, height),
+            "offsetof mismatch for SwapChainDescriptor::height");
+    static_assert(offsetof(SwapChainDescriptor, presentMode) == offsetof(WGPUSwapChainDescriptor, presentMode),
+            "offsetof mismatch for SwapChainDescriptor::presentMode");
+    static_assert(offsetof(SwapChainDescriptor, implementation) == offsetof(WGPUSwapChainDescriptor, implementation),
+            "offsetof mismatch for SwapChainDescriptor::implementation");
+
+
+    static_assert(sizeof(TextureViewDescriptor) == sizeof(WGPUTextureViewDescriptor), "sizeof mismatch for TextureViewDescriptor");
+    static_assert(alignof(TextureViewDescriptor) == alignof(WGPUTextureViewDescriptor), "alignof mismatch for TextureViewDescriptor");
+
+    static_assert(offsetof(TextureViewDescriptor, nextInChain) == offsetof(WGPUTextureViewDescriptor, nextInChain),
+            "offsetof mismatch for TextureViewDescriptor::nextInChain");
+    static_assert(offsetof(TextureViewDescriptor, label) == offsetof(WGPUTextureViewDescriptor, label),
+            "offsetof mismatch for TextureViewDescriptor::label");
+    static_assert(offsetof(TextureViewDescriptor, format) == offsetof(WGPUTextureViewDescriptor, format),
+            "offsetof mismatch for TextureViewDescriptor::format");
+    static_assert(offsetof(TextureViewDescriptor, dimension) == offsetof(WGPUTextureViewDescriptor, dimension),
+            "offsetof mismatch for TextureViewDescriptor::dimension");
+    static_assert(offsetof(TextureViewDescriptor, baseMipLevel) == offsetof(WGPUTextureViewDescriptor, baseMipLevel),
+            "offsetof mismatch for TextureViewDescriptor::baseMipLevel");
+    static_assert(offsetof(TextureViewDescriptor, mipLevelCount) == offsetof(WGPUTextureViewDescriptor, mipLevelCount),
+            "offsetof mismatch for TextureViewDescriptor::mipLevelCount");
+    static_assert(offsetof(TextureViewDescriptor, baseArrayLayer) == offsetof(WGPUTextureViewDescriptor, baseArrayLayer),
+            "offsetof mismatch for TextureViewDescriptor::baseArrayLayer");
+    static_assert(offsetof(TextureViewDescriptor, arrayLayerCount) == offsetof(WGPUTextureViewDescriptor, arrayLayerCount),
+            "offsetof mismatch for TextureViewDescriptor::arrayLayerCount");
+    static_assert(offsetof(TextureViewDescriptor, aspect) == offsetof(WGPUTextureViewDescriptor, aspect),
+            "offsetof mismatch for TextureViewDescriptor::aspect");
+
+
+    static_assert(sizeof(VertexAttributeDescriptor) == sizeof(WGPUVertexAttributeDescriptor), "sizeof mismatch for VertexAttributeDescriptor");
+    static_assert(alignof(VertexAttributeDescriptor) == alignof(WGPUVertexAttributeDescriptor), "alignof mismatch for VertexAttributeDescriptor");
+
+    static_assert(offsetof(VertexAttributeDescriptor, format) == offsetof(WGPUVertexAttributeDescriptor, format),
+            "offsetof mismatch for VertexAttributeDescriptor::format");
+    static_assert(offsetof(VertexAttributeDescriptor, offset) == offsetof(WGPUVertexAttributeDescriptor, offset),
+            "offsetof mismatch for VertexAttributeDescriptor::offset");
+    static_assert(offsetof(VertexAttributeDescriptor, shaderLocation) == offsetof(WGPUVertexAttributeDescriptor, shaderLocation),
+            "offsetof mismatch for VertexAttributeDescriptor::shaderLocation");
+
+
+    static_assert(sizeof(BindGroupDescriptor) == sizeof(WGPUBindGroupDescriptor), "sizeof mismatch for BindGroupDescriptor");
+    static_assert(alignof(BindGroupDescriptor) == alignof(WGPUBindGroupDescriptor), "alignof mismatch for BindGroupDescriptor");
+
+    static_assert(offsetof(BindGroupDescriptor, nextInChain) == offsetof(WGPUBindGroupDescriptor, nextInChain),
+            "offsetof mismatch for BindGroupDescriptor::nextInChain");
+    static_assert(offsetof(BindGroupDescriptor, label) == offsetof(WGPUBindGroupDescriptor, label),
+            "offsetof mismatch for BindGroupDescriptor::label");
+    static_assert(offsetof(BindGroupDescriptor, layout) == offsetof(WGPUBindGroupDescriptor, layout),
+            "offsetof mismatch for BindGroupDescriptor::layout");
+    static_assert(offsetof(BindGroupDescriptor, bindingCount) == offsetof(WGPUBindGroupDescriptor, bindingCount),
+            "offsetof mismatch for BindGroupDescriptor::bindingCount");
+    static_assert(offsetof(BindGroupDescriptor, bindings) == offsetof(WGPUBindGroupDescriptor, bindings),
+            "offsetof mismatch for BindGroupDescriptor::bindings");
+    static_assert(offsetof(BindGroupDescriptor, entryCount) == offsetof(WGPUBindGroupDescriptor, entryCount),
+            "offsetof mismatch for BindGroupDescriptor::entryCount");
+    static_assert(offsetof(BindGroupDescriptor, entries) == offsetof(WGPUBindGroupDescriptor, entries),
+            "offsetof mismatch for BindGroupDescriptor::entries");
+
+
+    static_assert(sizeof(BindGroupLayoutDescriptor) == sizeof(WGPUBindGroupLayoutDescriptor), "sizeof mismatch for BindGroupLayoutDescriptor");
+    static_assert(alignof(BindGroupLayoutDescriptor) == alignof(WGPUBindGroupLayoutDescriptor), "alignof mismatch for BindGroupLayoutDescriptor");
+
+    static_assert(offsetof(BindGroupLayoutDescriptor, nextInChain) == offsetof(WGPUBindGroupLayoutDescriptor, nextInChain),
+            "offsetof mismatch for BindGroupLayoutDescriptor::nextInChain");
+    static_assert(offsetof(BindGroupLayoutDescriptor, label) == offsetof(WGPUBindGroupLayoutDescriptor, label),
+            "offsetof mismatch for BindGroupLayoutDescriptor::label");
+    static_assert(offsetof(BindGroupLayoutDescriptor, bindingCount) == offsetof(WGPUBindGroupLayoutDescriptor, bindingCount),
+            "offsetof mismatch for BindGroupLayoutDescriptor::bindingCount");
+    static_assert(offsetof(BindGroupLayoutDescriptor, bindings) == offsetof(WGPUBindGroupLayoutDescriptor, bindings),
+            "offsetof mismatch for BindGroupLayoutDescriptor::bindings");
+    static_assert(offsetof(BindGroupLayoutDescriptor, entryCount) == offsetof(WGPUBindGroupLayoutDescriptor, entryCount),
+            "offsetof mismatch for BindGroupLayoutDescriptor::entryCount");
+    static_assert(offsetof(BindGroupLayoutDescriptor, entries) == offsetof(WGPUBindGroupLayoutDescriptor, entries),
+            "offsetof mismatch for BindGroupLayoutDescriptor::entries");
+
+
+    static_assert(sizeof(ColorStateDescriptor) == sizeof(WGPUColorStateDescriptor), "sizeof mismatch for ColorStateDescriptor");
+    static_assert(alignof(ColorStateDescriptor) == alignof(WGPUColorStateDescriptor), "alignof mismatch for ColorStateDescriptor");
+
+    static_assert(offsetof(ColorStateDescriptor, nextInChain) == offsetof(WGPUColorStateDescriptor, nextInChain),
+            "offsetof mismatch for ColorStateDescriptor::nextInChain");
+    static_assert(offsetof(ColorStateDescriptor, format) == offsetof(WGPUColorStateDescriptor, format),
+            "offsetof mismatch for ColorStateDescriptor::format");
+    static_assert(offsetof(ColorStateDescriptor, alphaBlend) == offsetof(WGPUColorStateDescriptor, alphaBlend),
+            "offsetof mismatch for ColorStateDescriptor::alphaBlend");
+    static_assert(offsetof(ColorStateDescriptor, colorBlend) == offsetof(WGPUColorStateDescriptor, colorBlend),
+            "offsetof mismatch for ColorStateDescriptor::colorBlend");
+    static_assert(offsetof(ColorStateDescriptor, writeMask) == offsetof(WGPUColorStateDescriptor, writeMask),
+            "offsetof mismatch for ColorStateDescriptor::writeMask");
+
+
+    static_assert(sizeof(ComputePipelineDescriptor) == sizeof(WGPUComputePipelineDescriptor), "sizeof mismatch for ComputePipelineDescriptor");
+    static_assert(alignof(ComputePipelineDescriptor) == alignof(WGPUComputePipelineDescriptor), "alignof mismatch for ComputePipelineDescriptor");
+
+    static_assert(offsetof(ComputePipelineDescriptor, nextInChain) == offsetof(WGPUComputePipelineDescriptor, nextInChain),
+            "offsetof mismatch for ComputePipelineDescriptor::nextInChain");
+    static_assert(offsetof(ComputePipelineDescriptor, label) == offsetof(WGPUComputePipelineDescriptor, label),
+            "offsetof mismatch for ComputePipelineDescriptor::label");
+    static_assert(offsetof(ComputePipelineDescriptor, layout) == offsetof(WGPUComputePipelineDescriptor, layout),
+            "offsetof mismatch for ComputePipelineDescriptor::layout");
+    static_assert(offsetof(ComputePipelineDescriptor, computeStage) == offsetof(WGPUComputePipelineDescriptor, computeStage),
+            "offsetof mismatch for ComputePipelineDescriptor::computeStage");
+
+
+    static_assert(sizeof(DepthStencilStateDescriptor) == sizeof(WGPUDepthStencilStateDescriptor), "sizeof mismatch for DepthStencilStateDescriptor");
+    static_assert(alignof(DepthStencilStateDescriptor) == alignof(WGPUDepthStencilStateDescriptor), "alignof mismatch for DepthStencilStateDescriptor");
+
+    static_assert(offsetof(DepthStencilStateDescriptor, nextInChain) == offsetof(WGPUDepthStencilStateDescriptor, nextInChain),
+            "offsetof mismatch for DepthStencilStateDescriptor::nextInChain");
+    static_assert(offsetof(DepthStencilStateDescriptor, format) == offsetof(WGPUDepthStencilStateDescriptor, format),
+            "offsetof mismatch for DepthStencilStateDescriptor::format");
+    static_assert(offsetof(DepthStencilStateDescriptor, depthWriteEnabled) == offsetof(WGPUDepthStencilStateDescriptor, depthWriteEnabled),
+            "offsetof mismatch for DepthStencilStateDescriptor::depthWriteEnabled");
+    static_assert(offsetof(DepthStencilStateDescriptor, depthCompare) == offsetof(WGPUDepthStencilStateDescriptor, depthCompare),
+            "offsetof mismatch for DepthStencilStateDescriptor::depthCompare");
+    static_assert(offsetof(DepthStencilStateDescriptor, stencilFront) == offsetof(WGPUDepthStencilStateDescriptor, stencilFront),
+            "offsetof mismatch for DepthStencilStateDescriptor::stencilFront");
+    static_assert(offsetof(DepthStencilStateDescriptor, stencilBack) == offsetof(WGPUDepthStencilStateDescriptor, stencilBack),
+            "offsetof mismatch for DepthStencilStateDescriptor::stencilBack");
+    static_assert(offsetof(DepthStencilStateDescriptor, stencilReadMask) == offsetof(WGPUDepthStencilStateDescriptor, stencilReadMask),
+            "offsetof mismatch for DepthStencilStateDescriptor::stencilReadMask");
+    static_assert(offsetof(DepthStencilStateDescriptor, stencilWriteMask) == offsetof(WGPUDepthStencilStateDescriptor, stencilWriteMask),
+            "offsetof mismatch for DepthStencilStateDescriptor::stencilWriteMask");
+
+
+    static_assert(sizeof(RenderPassColorAttachmentDescriptor) == sizeof(WGPURenderPassColorAttachmentDescriptor), "sizeof mismatch for RenderPassColorAttachmentDescriptor");
+    static_assert(alignof(RenderPassColorAttachmentDescriptor) == alignof(WGPURenderPassColorAttachmentDescriptor), "alignof mismatch for RenderPassColorAttachmentDescriptor");
+
+    static_assert(offsetof(RenderPassColorAttachmentDescriptor, attachment) == offsetof(WGPURenderPassColorAttachmentDescriptor, attachment),
+            "offsetof mismatch for RenderPassColorAttachmentDescriptor::attachment");
+    static_assert(offsetof(RenderPassColorAttachmentDescriptor, resolveTarget) == offsetof(WGPURenderPassColorAttachmentDescriptor, resolveTarget),
+            "offsetof mismatch for RenderPassColorAttachmentDescriptor::resolveTarget");
+    static_assert(offsetof(RenderPassColorAttachmentDescriptor, loadOp) == offsetof(WGPURenderPassColorAttachmentDescriptor, loadOp),
+            "offsetof mismatch for RenderPassColorAttachmentDescriptor::loadOp");
+    static_assert(offsetof(RenderPassColorAttachmentDescriptor, storeOp) == offsetof(WGPURenderPassColorAttachmentDescriptor, storeOp),
+            "offsetof mismatch for RenderPassColorAttachmentDescriptor::storeOp");
+    static_assert(offsetof(RenderPassColorAttachmentDescriptor, clearColor) == offsetof(WGPURenderPassColorAttachmentDescriptor, clearColor),
+            "offsetof mismatch for RenderPassColorAttachmentDescriptor::clearColor");
+
+
+    static_assert(sizeof(RenderPipelineDescriptorDummyExtension) == sizeof(WGPURenderPipelineDescriptorDummyExtension), "sizeof mismatch for RenderPipelineDescriptorDummyExtension");
+    static_assert(alignof(RenderPipelineDescriptorDummyExtension) == alignof(WGPURenderPipelineDescriptorDummyExtension), "alignof mismatch for RenderPipelineDescriptorDummyExtension");
+
+    static_assert(offsetof(RenderPipelineDescriptorDummyExtension, dummyStage) == offsetof(WGPURenderPipelineDescriptorDummyExtension, dummyStage),
+            "offsetof mismatch for RenderPipelineDescriptorDummyExtension::dummyStage");
+
+
+    static_assert(sizeof(TextureCopyView) == sizeof(WGPUTextureCopyView), "sizeof mismatch for TextureCopyView");
+    static_assert(alignof(TextureCopyView) == alignof(WGPUTextureCopyView), "alignof mismatch for TextureCopyView");
+
+    static_assert(offsetof(TextureCopyView, nextInChain) == offsetof(WGPUTextureCopyView, nextInChain),
+            "offsetof mismatch for TextureCopyView::nextInChain");
+    static_assert(offsetof(TextureCopyView, texture) == offsetof(WGPUTextureCopyView, texture),
+            "offsetof mismatch for TextureCopyView::texture");
+    static_assert(offsetof(TextureCopyView, mipLevel) == offsetof(WGPUTextureCopyView, mipLevel),
+            "offsetof mismatch for TextureCopyView::mipLevel");
+    static_assert(offsetof(TextureCopyView, arrayLayer) == offsetof(WGPUTextureCopyView, arrayLayer),
+            "offsetof mismatch for TextureCopyView::arrayLayer");
+    static_assert(offsetof(TextureCopyView, origin) == offsetof(WGPUTextureCopyView, origin),
+            "offsetof mismatch for TextureCopyView::origin");
+
+
+    static_assert(sizeof(TextureDescriptor) == sizeof(WGPUTextureDescriptor), "sizeof mismatch for TextureDescriptor");
+    static_assert(alignof(TextureDescriptor) == alignof(WGPUTextureDescriptor), "alignof mismatch for TextureDescriptor");
+
+    static_assert(offsetof(TextureDescriptor, nextInChain) == offsetof(WGPUTextureDescriptor, nextInChain),
+            "offsetof mismatch for TextureDescriptor::nextInChain");
+    static_assert(offsetof(TextureDescriptor, label) == offsetof(WGPUTextureDescriptor, label),
+            "offsetof mismatch for TextureDescriptor::label");
+    static_assert(offsetof(TextureDescriptor, usage) == offsetof(WGPUTextureDescriptor, usage),
+            "offsetof mismatch for TextureDescriptor::usage");
+    static_assert(offsetof(TextureDescriptor, dimension) == offsetof(WGPUTextureDescriptor, dimension),
+            "offsetof mismatch for TextureDescriptor::dimension");
+    static_assert(offsetof(TextureDescriptor, size) == offsetof(WGPUTextureDescriptor, size),
+            "offsetof mismatch for TextureDescriptor::size");
+    static_assert(offsetof(TextureDescriptor, arrayLayerCount) == offsetof(WGPUTextureDescriptor, arrayLayerCount),
+            "offsetof mismatch for TextureDescriptor::arrayLayerCount");
+    static_assert(offsetof(TextureDescriptor, format) == offsetof(WGPUTextureDescriptor, format),
+            "offsetof mismatch for TextureDescriptor::format");
+    static_assert(offsetof(TextureDescriptor, mipLevelCount) == offsetof(WGPUTextureDescriptor, mipLevelCount),
+            "offsetof mismatch for TextureDescriptor::mipLevelCount");
+    static_assert(offsetof(TextureDescriptor, sampleCount) == offsetof(WGPUTextureDescriptor, sampleCount),
+            "offsetof mismatch for TextureDescriptor::sampleCount");
+
+
+    static_assert(sizeof(VertexBufferLayoutDescriptor) == sizeof(WGPUVertexBufferLayoutDescriptor), "sizeof mismatch for VertexBufferLayoutDescriptor");
+    static_assert(alignof(VertexBufferLayoutDescriptor) == alignof(WGPUVertexBufferLayoutDescriptor), "alignof mismatch for VertexBufferLayoutDescriptor");
+
+    static_assert(offsetof(VertexBufferLayoutDescriptor, arrayStride) == offsetof(WGPUVertexBufferLayoutDescriptor, arrayStride),
+            "offsetof mismatch for VertexBufferLayoutDescriptor::arrayStride");
+    static_assert(offsetof(VertexBufferLayoutDescriptor, stepMode) == offsetof(WGPUVertexBufferLayoutDescriptor, stepMode),
+            "offsetof mismatch for VertexBufferLayoutDescriptor::stepMode");
+    static_assert(offsetof(VertexBufferLayoutDescriptor, attributeCount) == offsetof(WGPUVertexBufferLayoutDescriptor, attributeCount),
+            "offsetof mismatch for VertexBufferLayoutDescriptor::attributeCount");
+    static_assert(offsetof(VertexBufferLayoutDescriptor, attributes) == offsetof(WGPUVertexBufferLayoutDescriptor, attributes),
+            "offsetof mismatch for VertexBufferLayoutDescriptor::attributes");
+
+
+    static_assert(sizeof(RenderPassDescriptor) == sizeof(WGPURenderPassDescriptor), "sizeof mismatch for RenderPassDescriptor");
+    static_assert(alignof(RenderPassDescriptor) == alignof(WGPURenderPassDescriptor), "alignof mismatch for RenderPassDescriptor");
+
+    static_assert(offsetof(RenderPassDescriptor, nextInChain) == offsetof(WGPURenderPassDescriptor, nextInChain),
+            "offsetof mismatch for RenderPassDescriptor::nextInChain");
+    static_assert(offsetof(RenderPassDescriptor, label) == offsetof(WGPURenderPassDescriptor, label),
+            "offsetof mismatch for RenderPassDescriptor::label");
+    static_assert(offsetof(RenderPassDescriptor, colorAttachmentCount) == offsetof(WGPURenderPassDescriptor, colorAttachmentCount),
+            "offsetof mismatch for RenderPassDescriptor::colorAttachmentCount");
+    static_assert(offsetof(RenderPassDescriptor, colorAttachments) == offsetof(WGPURenderPassDescriptor, colorAttachments),
+            "offsetof mismatch for RenderPassDescriptor::colorAttachments");
+    static_assert(offsetof(RenderPassDescriptor, depthStencilAttachment) == offsetof(WGPURenderPassDescriptor, depthStencilAttachment),
+            "offsetof mismatch for RenderPassDescriptor::depthStencilAttachment");
+
+
+    static_assert(sizeof(VertexStateDescriptor) == sizeof(WGPUVertexStateDescriptor), "sizeof mismatch for VertexStateDescriptor");
+    static_assert(alignof(VertexStateDescriptor) == alignof(WGPUVertexStateDescriptor), "alignof mismatch for VertexStateDescriptor");
+
+    static_assert(offsetof(VertexStateDescriptor, nextInChain) == offsetof(WGPUVertexStateDescriptor, nextInChain),
+            "offsetof mismatch for VertexStateDescriptor::nextInChain");
+    static_assert(offsetof(VertexStateDescriptor, indexFormat) == offsetof(WGPUVertexStateDescriptor, indexFormat),
+            "offsetof mismatch for VertexStateDescriptor::indexFormat");
+    static_assert(offsetof(VertexStateDescriptor, vertexBufferCount) == offsetof(WGPUVertexStateDescriptor, vertexBufferCount),
+            "offsetof mismatch for VertexStateDescriptor::vertexBufferCount");
+    static_assert(offsetof(VertexStateDescriptor, vertexBuffers) == offsetof(WGPUVertexStateDescriptor, vertexBuffers),
+            "offsetof mismatch for VertexStateDescriptor::vertexBuffers");
+
+
+    static_assert(sizeof(RenderPipelineDescriptor) == sizeof(WGPURenderPipelineDescriptor), "sizeof mismatch for RenderPipelineDescriptor");
+    static_assert(alignof(RenderPipelineDescriptor) == alignof(WGPURenderPipelineDescriptor), "alignof mismatch for RenderPipelineDescriptor");
+
+    static_assert(offsetof(RenderPipelineDescriptor, nextInChain) == offsetof(WGPURenderPipelineDescriptor, nextInChain),
+            "offsetof mismatch for RenderPipelineDescriptor::nextInChain");
+    static_assert(offsetof(RenderPipelineDescriptor, label) == offsetof(WGPURenderPipelineDescriptor, label),
+            "offsetof mismatch for RenderPipelineDescriptor::label");
+    static_assert(offsetof(RenderPipelineDescriptor, layout) == offsetof(WGPURenderPipelineDescriptor, layout),
+            "offsetof mismatch for RenderPipelineDescriptor::layout");
+    static_assert(offsetof(RenderPipelineDescriptor, vertexStage) == offsetof(WGPURenderPipelineDescriptor, vertexStage),
+            "offsetof mismatch for RenderPipelineDescriptor::vertexStage");
+    static_assert(offsetof(RenderPipelineDescriptor, fragmentStage) == offsetof(WGPURenderPipelineDescriptor, fragmentStage),
+            "offsetof mismatch for RenderPipelineDescriptor::fragmentStage");
+    static_assert(offsetof(RenderPipelineDescriptor, vertexState) == offsetof(WGPURenderPipelineDescriptor, vertexState),
+            "offsetof mismatch for RenderPipelineDescriptor::vertexState");
+    static_assert(offsetof(RenderPipelineDescriptor, primitiveTopology) == offsetof(WGPURenderPipelineDescriptor, primitiveTopology),
+            "offsetof mismatch for RenderPipelineDescriptor::primitiveTopology");
+    static_assert(offsetof(RenderPipelineDescriptor, rasterizationState) == offsetof(WGPURenderPipelineDescriptor, rasterizationState),
+            "offsetof mismatch for RenderPipelineDescriptor::rasterizationState");
+    static_assert(offsetof(RenderPipelineDescriptor, sampleCount) == offsetof(WGPURenderPipelineDescriptor, sampleCount),
+            "offsetof mismatch for RenderPipelineDescriptor::sampleCount");
+    static_assert(offsetof(RenderPipelineDescriptor, depthStencilState) == offsetof(WGPURenderPipelineDescriptor, depthStencilState),
+            "offsetof mismatch for RenderPipelineDescriptor::depthStencilState");
+    static_assert(offsetof(RenderPipelineDescriptor, colorStateCount) == offsetof(WGPURenderPipelineDescriptor, colorStateCount),
+            "offsetof mismatch for RenderPipelineDescriptor::colorStateCount");
+    static_assert(offsetof(RenderPipelineDescriptor, colorStates) == offsetof(WGPURenderPipelineDescriptor, colorStates),
+            "offsetof mismatch for RenderPipelineDescriptor::colorStates");
+    static_assert(offsetof(RenderPipelineDescriptor, sampleMask) == offsetof(WGPURenderPipelineDescriptor, sampleMask),
+            "offsetof mismatch for RenderPipelineDescriptor::sampleMask");
+    static_assert(offsetof(RenderPipelineDescriptor, alphaToCoverageEnabled) == offsetof(WGPURenderPipelineDescriptor, alphaToCoverageEnabled),
+            "offsetof mismatch for RenderPipelineDescriptor::alphaToCoverageEnabled");
+
+
+
+    static_assert(sizeof(BindGroup) == sizeof(WGPUBindGroup), "sizeof mismatch for BindGroup");
+    static_assert(alignof(BindGroup) == alignof(WGPUBindGroup), "alignof mismatch for BindGroup");
+
+
+
+    void BindGroup::WGPUReference(WGPUBindGroup handle) {
+        if (handle != nullptr) {
+            wgpuBindGroupReference(handle);
+        }
+    }
+    void BindGroup::WGPURelease(WGPUBindGroup handle) {
+        if (handle != nullptr) {
+            wgpuBindGroupRelease(handle);
+        }
+    }
+
+
+    static_assert(sizeof(BindGroupLayout) == sizeof(WGPUBindGroupLayout), "sizeof mismatch for BindGroupLayout");
+    static_assert(alignof(BindGroupLayout) == alignof(WGPUBindGroupLayout), "alignof mismatch for BindGroupLayout");
+
+
+
+    void BindGroupLayout::WGPUReference(WGPUBindGroupLayout handle) {
+        if (handle != nullptr) {
+            wgpuBindGroupLayoutReference(handle);
+        }
+    }
+    void BindGroupLayout::WGPURelease(WGPUBindGroupLayout handle) {
+        if (handle != nullptr) {
+            wgpuBindGroupLayoutRelease(handle);
+        }
+    }
+
+
+    static_assert(sizeof(Buffer) == sizeof(WGPUBuffer), "sizeof mismatch for Buffer");
+    static_assert(alignof(Buffer) == alignof(WGPUBuffer), "alignof mismatch for Buffer");
+
+
+
+        void Buffer::Destroy() const {
+        wgpuBufferDestroy(Get());
+    }
+        void Buffer::MapReadAsync(BufferMapReadCallback callback, void * userdata) const {
+        wgpuBufferMapReadAsync(Get(), callback, reinterpret_cast<void * >(userdata));
+    }
+        void Buffer::MapWriteAsync(BufferMapWriteCallback callback, void * userdata) const {
+        wgpuBufferMapWriteAsync(Get(), callback, reinterpret_cast<void * >(userdata));
+    }
+        void Buffer::SetSubData(uint64_t start, uint64_t count, void const * data) const {
+        wgpuBufferSetSubData(Get(), start, count, reinterpret_cast<void const * >(data));
+    }
+        void Buffer::Unmap() const {
+        wgpuBufferUnmap(Get());
+    }
+    void Buffer::WGPUReference(WGPUBuffer handle) {
+        if (handle != nullptr) {
+            wgpuBufferReference(handle);
+        }
+    }
+    void Buffer::WGPURelease(WGPUBuffer handle) {
+        if (handle != nullptr) {
+            wgpuBufferRelease(handle);
+        }
+    }
+
+
+    static_assert(sizeof(CommandBuffer) == sizeof(WGPUCommandBuffer), "sizeof mismatch for CommandBuffer");
+    static_assert(alignof(CommandBuffer) == alignof(WGPUCommandBuffer), "alignof mismatch for CommandBuffer");
+
+
+
+    void CommandBuffer::WGPUReference(WGPUCommandBuffer handle) {
+        if (handle != nullptr) {
+            wgpuCommandBufferReference(handle);
+        }
+    }
+    void CommandBuffer::WGPURelease(WGPUCommandBuffer handle) {
+        if (handle != nullptr) {
+            wgpuCommandBufferRelease(handle);
+        }
+    }
+
+
+    static_assert(sizeof(CommandEncoder) == sizeof(WGPUCommandEncoder), "sizeof mismatch for CommandEncoder");
+    static_assert(alignof(CommandEncoder) == alignof(WGPUCommandEncoder), "alignof mismatch for CommandEncoder");
+
+
+
+        ComputePassEncoder CommandEncoder::BeginComputePass(ComputePassDescriptor const * descriptor) const {
+        auto result = wgpuCommandEncoderBeginComputePass(Get(), reinterpret_cast<WGPUComputePassDescriptor const * >(descriptor));
+        return ComputePassEncoder::Acquire(result);
+    }
+        RenderPassEncoder CommandEncoder::BeginRenderPass(RenderPassDescriptor const * descriptor) const {
+        auto result = wgpuCommandEncoderBeginRenderPass(Get(), reinterpret_cast<WGPURenderPassDescriptor const * >(descriptor));
+        return RenderPassEncoder::Acquire(result);
+    }
+        void CommandEncoder::CopyBufferToBuffer(Buffer const& source, uint64_t sourceOffset, Buffer const& destination, uint64_t destinationOffset, uint64_t size) const {
+        wgpuCommandEncoderCopyBufferToBuffer(Get(), source.Get(), sourceOffset, destination.Get(), destinationOffset, size);
+    }
+        void CommandEncoder::CopyBufferToTexture(BufferCopyView const * source, TextureCopyView const * destination, Extent3D const * copySize) const {
+        wgpuCommandEncoderCopyBufferToTexture(Get(), reinterpret_cast<WGPUBufferCopyView const * >(source), reinterpret_cast<WGPUTextureCopyView const * >(destination), reinterpret_cast<WGPUExtent3D const * >(copySize));
+    }
+        void CommandEncoder::CopyTextureToBuffer(TextureCopyView const * source, BufferCopyView const * destination, Extent3D const * copySize) const {
+        wgpuCommandEncoderCopyTextureToBuffer(Get(), reinterpret_cast<WGPUTextureCopyView const * >(source), reinterpret_cast<WGPUBufferCopyView const * >(destination), reinterpret_cast<WGPUExtent3D const * >(copySize));
+    }
+        void CommandEncoder::CopyTextureToTexture(TextureCopyView const * source, TextureCopyView const * destination, Extent3D const * copySize) const {
+        wgpuCommandEncoderCopyTextureToTexture(Get(), reinterpret_cast<WGPUTextureCopyView const * >(source), reinterpret_cast<WGPUTextureCopyView const * >(destination), reinterpret_cast<WGPUExtent3D const * >(copySize));
+    }
+        CommandBuffer CommandEncoder::Finish(CommandBufferDescriptor const * descriptor) const {
+        auto result = wgpuCommandEncoderFinish(Get(), reinterpret_cast<WGPUCommandBufferDescriptor const * >(descriptor));
+        return CommandBuffer::Acquire(result);
+    }
+        void CommandEncoder::InsertDebugMarker(char const * groupLabel) const {
+        wgpuCommandEncoderInsertDebugMarker(Get(), reinterpret_cast<char const * >(groupLabel));
+    }
+        void CommandEncoder::PopDebugGroup() const {
+        wgpuCommandEncoderPopDebugGroup(Get());
+    }
+        void CommandEncoder::PushDebugGroup(char const * groupLabel) const {
+        wgpuCommandEncoderPushDebugGroup(Get(), reinterpret_cast<char const * >(groupLabel));
+    }
+    void CommandEncoder::WGPUReference(WGPUCommandEncoder handle) {
+        if (handle != nullptr) {
+            wgpuCommandEncoderReference(handle);
+        }
+    }
+    void CommandEncoder::WGPURelease(WGPUCommandEncoder handle) {
+        if (handle != nullptr) {
+            wgpuCommandEncoderRelease(handle);
+        }
+    }
+
+
+    static_assert(sizeof(ComputePassEncoder) == sizeof(WGPUComputePassEncoder), "sizeof mismatch for ComputePassEncoder");
+    static_assert(alignof(ComputePassEncoder) == alignof(WGPUComputePassEncoder), "alignof mismatch for ComputePassEncoder");
+
+
+
+        void ComputePassEncoder::Dispatch(uint32_t x, uint32_t y, uint32_t z) const {
+        wgpuComputePassEncoderDispatch(Get(), x, y, z);
+    }
+        void ComputePassEncoder::DispatchIndirect(Buffer const& indirectBuffer, uint64_t indirectOffset) const {
+        wgpuComputePassEncoderDispatchIndirect(Get(), indirectBuffer.Get(), indirectOffset);
+    }
+        void ComputePassEncoder::EndPass() const {
+        wgpuComputePassEncoderEndPass(Get());
+    }
+        void ComputePassEncoder::InsertDebugMarker(char const * groupLabel) const {
+        wgpuComputePassEncoderInsertDebugMarker(Get(), reinterpret_cast<char const * >(groupLabel));
+    }
+        void ComputePassEncoder::PopDebugGroup() const {
+        wgpuComputePassEncoderPopDebugGroup(Get());
+    }
+        void ComputePassEncoder::PushDebugGroup(char const * groupLabel) const {
+        wgpuComputePassEncoderPushDebugGroup(Get(), reinterpret_cast<char const * >(groupLabel));
+    }
+        void ComputePassEncoder::SetBindGroup(uint32_t groupIndex, BindGroup const& group, uint32_t dynamicOffsetCount, uint32_t const * dynamicOffsets) const {
+        wgpuComputePassEncoderSetBindGroup(Get(), groupIndex, group.Get(), dynamicOffsetCount, reinterpret_cast<uint32_t const * >(dynamicOffsets));
+    }
+        void ComputePassEncoder::SetPipeline(ComputePipeline const& pipeline) const {
+        wgpuComputePassEncoderSetPipeline(Get(), pipeline.Get());
+    }
+    void ComputePassEncoder::WGPUReference(WGPUComputePassEncoder handle) {
+        if (handle != nullptr) {
+            wgpuComputePassEncoderReference(handle);
+        }
+    }
+    void ComputePassEncoder::WGPURelease(WGPUComputePassEncoder handle) {
+        if (handle != nullptr) {
+            wgpuComputePassEncoderRelease(handle);
+        }
+    }
+
+
+    static_assert(sizeof(ComputePipeline) == sizeof(WGPUComputePipeline), "sizeof mismatch for ComputePipeline");
+    static_assert(alignof(ComputePipeline) == alignof(WGPUComputePipeline), "alignof mismatch for ComputePipeline");
+
+
+
+        BindGroupLayout ComputePipeline::GetBindGroupLayout(uint32_t groupIndex) const {
+        auto result = wgpuComputePipelineGetBindGroupLayout(Get(), groupIndex);
+        return BindGroupLayout::Acquire(result);
+    }
+    void ComputePipeline::WGPUReference(WGPUComputePipeline handle) {
+        if (handle != nullptr) {
+            wgpuComputePipelineReference(handle);
+        }
+    }
+    void ComputePipeline::WGPURelease(WGPUComputePipeline handle) {
+        if (handle != nullptr) {
+            wgpuComputePipelineRelease(handle);
+        }
+    }
+
+
+    static_assert(sizeof(Device) == sizeof(WGPUDevice), "sizeof mismatch for Device");
+    static_assert(alignof(Device) == alignof(WGPUDevice), "alignof mismatch for Device");
+
+
+
+        BindGroup Device::CreateBindGroup(BindGroupDescriptor const * descriptor) const {
+        auto result = wgpuDeviceCreateBindGroup(Get(), reinterpret_cast<WGPUBindGroupDescriptor const * >(descriptor));
+        return BindGroup::Acquire(result);
+    }
+        BindGroupLayout Device::CreateBindGroupLayout(BindGroupLayoutDescriptor const * descriptor) const {
+        auto result = wgpuDeviceCreateBindGroupLayout(Get(), reinterpret_cast<WGPUBindGroupLayoutDescriptor const * >(descriptor));
+        return BindGroupLayout::Acquire(result);
+    }
+        Buffer Device::CreateBuffer(BufferDescriptor const * descriptor) const {
+        auto result = wgpuDeviceCreateBuffer(Get(), reinterpret_cast<WGPUBufferDescriptor const * >(descriptor));
+        return Buffer::Acquire(result);
+    }
+        CreateBufferMappedResult Device::CreateBufferMapped(BufferDescriptor const * descriptor) const {
+        auto result = wgpuDeviceCreateBufferMapped(Get(), reinterpret_cast<WGPUBufferDescriptor const * >(descriptor));
+        return CreateBufferMappedResult {
+            Buffer::Acquire(result.buffer),
+            result.dataLength,
+            result.data
+        };
+    }
+        CommandEncoder Device::CreateCommandEncoder(CommandEncoderDescriptor const * descriptor) const {
+        auto result = wgpuDeviceCreateCommandEncoder(Get(), reinterpret_cast<WGPUCommandEncoderDescriptor const * >(descriptor));
+        return CommandEncoder::Acquire(result);
+    }
+        ComputePipeline Device::CreateComputePipeline(ComputePipelineDescriptor const * descriptor) const {
+        auto result = wgpuDeviceCreateComputePipeline(Get(), reinterpret_cast<WGPUComputePipelineDescriptor const * >(descriptor));
+        return ComputePipeline::Acquire(result);
+    }
+        PipelineLayout Device::CreatePipelineLayout(PipelineLayoutDescriptor const * descriptor) const {
+        auto result = wgpuDeviceCreatePipelineLayout(Get(), reinterpret_cast<WGPUPipelineLayoutDescriptor const * >(descriptor));
+        return PipelineLayout::Acquire(result);
+    }
+        Queue Device::CreateQueue() const {
+        auto result = wgpuDeviceCreateQueue(Get());
+        return Queue::Acquire(result);
+    }
+        RenderBundleEncoder Device::CreateRenderBundleEncoder(RenderBundleEncoderDescriptor const * descriptor) const {
+        auto result = wgpuDeviceCreateRenderBundleEncoder(Get(), reinterpret_cast<WGPURenderBundleEncoderDescriptor const * >(descriptor));
+        return RenderBundleEncoder::Acquire(result);
+    }
+        RenderPipeline Device::CreateRenderPipeline(RenderPipelineDescriptor const * descriptor) const {
+        auto result = wgpuDeviceCreateRenderPipeline(Get(), reinterpret_cast<WGPURenderPipelineDescriptor const * >(descriptor));
+        return RenderPipeline::Acquire(result);
+    }
+        Sampler Device::CreateSampler(SamplerDescriptor const * descriptor) const {
+        auto result = wgpuDeviceCreateSampler(Get(), reinterpret_cast<WGPUSamplerDescriptor const * >(descriptor));
+        return Sampler::Acquire(result);
+    }
+        ShaderModule Device::CreateShaderModule(ShaderModuleDescriptor const * descriptor) const {
+        auto result = wgpuDeviceCreateShaderModule(Get(), reinterpret_cast<WGPUShaderModuleDescriptor const * >(descriptor));
+        return ShaderModule::Acquire(result);
+    }
+        SwapChain Device::CreateSwapChain(Surface const& surface, SwapChainDescriptor const * descriptor) const {
+        auto result = wgpuDeviceCreateSwapChain(Get(), surface.Get(), reinterpret_cast<WGPUSwapChainDescriptor const * >(descriptor));
+        return SwapChain::Acquire(result);
+    }
+        Texture Device::CreateTexture(TextureDescriptor const * descriptor) const {
+        auto result = wgpuDeviceCreateTexture(Get(), reinterpret_cast<WGPUTextureDescriptor const * >(descriptor));
+        return Texture::Acquire(result);
+    }
+        Queue Device::GetDefaultQueue() const {
+        auto result = wgpuDeviceGetDefaultQueue(Get());
+        return Queue::Acquire(result);
+    }
+        void Device::InjectError(ErrorType type, char const * message) const {
+        wgpuDeviceInjectError(Get(), static_cast<WGPUErrorType>(type), reinterpret_cast<char const * >(message));
+    }
+        void Device::LoseForTesting() const {
+        wgpuDeviceLoseForTesting(Get());
+    }
+        bool Device::PopErrorScope(ErrorCallback callback, void * userdata) const {
+        auto result = wgpuDevicePopErrorScope(Get(), callback, reinterpret_cast<void * >(userdata));
+        return result;
+    }
+        void Device::PushErrorScope(ErrorFilter filter) const {
+        wgpuDevicePushErrorScope(Get(), static_cast<WGPUErrorFilter>(filter));
+    }
+        void Device::SetDeviceLostCallback(DeviceLostCallback callback, void * userdata) const {
+        wgpuDeviceSetDeviceLostCallback(Get(), callback, reinterpret_cast<void * >(userdata));
+    }
+        void Device::SetUncapturedErrorCallback(ErrorCallback callback, void * userdata) const {
+        wgpuDeviceSetUncapturedErrorCallback(Get(), callback, reinterpret_cast<void * >(userdata));
+    }
+        void Device::Tick() const {
+        wgpuDeviceTick(Get());
+    }
+    void Device::WGPUReference(WGPUDevice handle) {
+        if (handle != nullptr) {
+            wgpuDeviceReference(handle);
+        }
+    }
+    void Device::WGPURelease(WGPUDevice handle) {
+        if (handle != nullptr) {
+            wgpuDeviceRelease(handle);
+        }
+    }
+
+
+    static_assert(sizeof(Fence) == sizeof(WGPUFence), "sizeof mismatch for Fence");
+    static_assert(alignof(Fence) == alignof(WGPUFence), "alignof mismatch for Fence");
+
+
+
+        uint64_t Fence::GetCompletedValue() const {
+        auto result = wgpuFenceGetCompletedValue(Get());
+        return result;
+    }
+        void Fence::OnCompletion(uint64_t value, FenceOnCompletionCallback callback, void * userdata) const {
+        wgpuFenceOnCompletion(Get(), value, callback, reinterpret_cast<void * >(userdata));
+    }
+    void Fence::WGPUReference(WGPUFence handle) {
+        if (handle != nullptr) {
+            wgpuFenceReference(handle);
+        }
+    }
+    void Fence::WGPURelease(WGPUFence handle) {
+        if (handle != nullptr) {
+            wgpuFenceRelease(handle);
+        }
+    }
+
+
+    static_assert(sizeof(Instance) == sizeof(WGPUInstance), "sizeof mismatch for Instance");
+    static_assert(alignof(Instance) == alignof(WGPUInstance), "alignof mismatch for Instance");
+
+
+
+        Surface Instance::CreateSurface(SurfaceDescriptor const * descriptor) const {
+        auto result = wgpuInstanceCreateSurface(Get(), reinterpret_cast<WGPUSurfaceDescriptor const * >(descriptor));
+        return Surface::Acquire(result);
+    }
+    void Instance::WGPUReference(WGPUInstance handle) {
+        if (handle != nullptr) {
+            wgpuInstanceReference(handle);
+        }
+    }
+    void Instance::WGPURelease(WGPUInstance handle) {
+        if (handle != nullptr) {
+            wgpuInstanceRelease(handle);
+        }
+    }
+
+
+    static_assert(sizeof(PipelineLayout) == sizeof(WGPUPipelineLayout), "sizeof mismatch for PipelineLayout");
+    static_assert(alignof(PipelineLayout) == alignof(WGPUPipelineLayout), "alignof mismatch for PipelineLayout");
+
+
+
+    void PipelineLayout::WGPUReference(WGPUPipelineLayout handle) {
+        if (handle != nullptr) {
+            wgpuPipelineLayoutReference(handle);
+        }
+    }
+    void PipelineLayout::WGPURelease(WGPUPipelineLayout handle) {
+        if (handle != nullptr) {
+            wgpuPipelineLayoutRelease(handle);
+        }
+    }
+
+
+    static_assert(sizeof(Queue) == sizeof(WGPUQueue), "sizeof mismatch for Queue");
+    static_assert(alignof(Queue) == alignof(WGPUQueue), "alignof mismatch for Queue");
+
+
+
+        Fence Queue::CreateFence(FenceDescriptor const * descriptor) const {
+        auto result = wgpuQueueCreateFence(Get(), reinterpret_cast<WGPUFenceDescriptor const * >(descriptor));
+        return Fence::Acquire(result);
+    }
+        void Queue::Signal(Fence const& fence, uint64_t signalValue) const {
+        wgpuQueueSignal(Get(), fence.Get(), signalValue);
+    }
+        void Queue::Submit(uint32_t commandCount, CommandBuffer const * commands) const {
+        wgpuQueueSubmit(Get(), commandCount, reinterpret_cast<WGPUCommandBuffer const * >(commands));
+    }
+    void Queue::WGPUReference(WGPUQueue handle) {
+        if (handle != nullptr) {
+            wgpuQueueReference(handle);
+        }
+    }
+    void Queue::WGPURelease(WGPUQueue handle) {
+        if (handle != nullptr) {
+            wgpuQueueRelease(handle);
+        }
+    }
+
+
+    static_assert(sizeof(RenderBundle) == sizeof(WGPURenderBundle), "sizeof mismatch for RenderBundle");
+    static_assert(alignof(RenderBundle) == alignof(WGPURenderBundle), "alignof mismatch for RenderBundle");
+
+
+
+    void RenderBundle::WGPUReference(WGPURenderBundle handle) {
+        if (handle != nullptr) {
+            wgpuRenderBundleReference(handle);
+        }
+    }
+    void RenderBundle::WGPURelease(WGPURenderBundle handle) {
+        if (handle != nullptr) {
+            wgpuRenderBundleRelease(handle);
+        }
+    }
+
+
+    static_assert(sizeof(RenderBundleEncoder) == sizeof(WGPURenderBundleEncoder), "sizeof mismatch for RenderBundleEncoder");
+    static_assert(alignof(RenderBundleEncoder) == alignof(WGPURenderBundleEncoder), "alignof mismatch for RenderBundleEncoder");
+
+
+
+        void RenderBundleEncoder::Draw(uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance) const {
+        wgpuRenderBundleEncoderDraw(Get(), vertexCount, instanceCount, firstVertex, firstInstance);
+    }
+        void RenderBundleEncoder::DrawIndexed(uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t baseVertex, uint32_t firstInstance) const {
+        wgpuRenderBundleEncoderDrawIndexed(Get(), indexCount, instanceCount, firstIndex, baseVertex, firstInstance);
+    }
+        void RenderBundleEncoder::DrawIndexedIndirect(Buffer const& indirectBuffer, uint64_t indirectOffset) const {
+        wgpuRenderBundleEncoderDrawIndexedIndirect(Get(), indirectBuffer.Get(), indirectOffset);
+    }
+        void RenderBundleEncoder::DrawIndirect(Buffer const& indirectBuffer, uint64_t indirectOffset) const {
+        wgpuRenderBundleEncoderDrawIndirect(Get(), indirectBuffer.Get(), indirectOffset);
+    }
+        RenderBundle RenderBundleEncoder::Finish(RenderBundleDescriptor const * descriptor) const {
+        auto result = wgpuRenderBundleEncoderFinish(Get(), reinterpret_cast<WGPURenderBundleDescriptor const * >(descriptor));
+        return RenderBundle::Acquire(result);
+    }
+        void RenderBundleEncoder::InsertDebugMarker(char const * groupLabel) const {
+        wgpuRenderBundleEncoderInsertDebugMarker(Get(), reinterpret_cast<char const * >(groupLabel));
+    }
+        void RenderBundleEncoder::PopDebugGroup() const {
+        wgpuRenderBundleEncoderPopDebugGroup(Get());
+    }
+        void RenderBundleEncoder::PushDebugGroup(char const * groupLabel) const {
+        wgpuRenderBundleEncoderPushDebugGroup(Get(), reinterpret_cast<char const * >(groupLabel));
+    }
+        void RenderBundleEncoder::SetBindGroup(uint32_t groupIndex, BindGroup const& group, uint32_t dynamicOffsetCount, uint32_t const * dynamicOffsets) const {
+        wgpuRenderBundleEncoderSetBindGroup(Get(), groupIndex, group.Get(), dynamicOffsetCount, reinterpret_cast<uint32_t const * >(dynamicOffsets));
+    }
+        void RenderBundleEncoder::SetIndexBuffer(Buffer const& buffer, uint64_t offset, uint64_t size) const {
+        wgpuRenderBundleEncoderSetIndexBuffer(Get(), buffer.Get(), offset, size);
+    }
+        void RenderBundleEncoder::SetPipeline(RenderPipeline const& pipeline) const {
+        wgpuRenderBundleEncoderSetPipeline(Get(), pipeline.Get());
+    }
+        void RenderBundleEncoder::SetVertexBuffer(uint32_t slot, Buffer const& buffer, uint64_t offset, uint64_t size) const {
+        wgpuRenderBundleEncoderSetVertexBuffer(Get(), slot, buffer.Get(), offset, size);
+    }
+    void RenderBundleEncoder::WGPUReference(WGPURenderBundleEncoder handle) {
+        if (handle != nullptr) {
+            wgpuRenderBundleEncoderReference(handle);
+        }
+    }
+    void RenderBundleEncoder::WGPURelease(WGPURenderBundleEncoder handle) {
+        if (handle != nullptr) {
+            wgpuRenderBundleEncoderRelease(handle);
+        }
+    }
+
+
+    static_assert(sizeof(RenderPassEncoder) == sizeof(WGPURenderPassEncoder), "sizeof mismatch for RenderPassEncoder");
+    static_assert(alignof(RenderPassEncoder) == alignof(WGPURenderPassEncoder), "alignof mismatch for RenderPassEncoder");
+
+
+
+        void RenderPassEncoder::Draw(uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance) const {
+        wgpuRenderPassEncoderDraw(Get(), vertexCount, instanceCount, firstVertex, firstInstance);
+    }
+        void RenderPassEncoder::DrawIndexed(uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t baseVertex, uint32_t firstInstance) const {
+        wgpuRenderPassEncoderDrawIndexed(Get(), indexCount, instanceCount, firstIndex, baseVertex, firstInstance);
+    }
+        void RenderPassEncoder::DrawIndexedIndirect(Buffer const& indirectBuffer, uint64_t indirectOffset) const {
+        wgpuRenderPassEncoderDrawIndexedIndirect(Get(), indirectBuffer.Get(), indirectOffset);
+    }
+        void RenderPassEncoder::DrawIndirect(Buffer const& indirectBuffer, uint64_t indirectOffset) const {
+        wgpuRenderPassEncoderDrawIndirect(Get(), indirectBuffer.Get(), indirectOffset);
+    }
+        void RenderPassEncoder::EndPass() const {
+        wgpuRenderPassEncoderEndPass(Get());
+    }
+        void RenderPassEncoder::ExecuteBundles(uint32_t bundlesCount, RenderBundle const * bundles) const {
+        wgpuRenderPassEncoderExecuteBundles(Get(), bundlesCount, reinterpret_cast<WGPURenderBundle const * >(bundles));
+    }
+        void RenderPassEncoder::InsertDebugMarker(char const * groupLabel) const {
+        wgpuRenderPassEncoderInsertDebugMarker(Get(), reinterpret_cast<char const * >(groupLabel));
+    }
+        void RenderPassEncoder::PopDebugGroup() const {
+        wgpuRenderPassEncoderPopDebugGroup(Get());
+    }
+        void RenderPassEncoder::PushDebugGroup(char const * groupLabel) const {
+        wgpuRenderPassEncoderPushDebugGroup(Get(), reinterpret_cast<char const * >(groupLabel));
+    }
+        void RenderPassEncoder::SetBindGroup(uint32_t groupIndex, BindGroup const& group, uint32_t dynamicOffsetCount, uint32_t const * dynamicOffsets) const {
+        wgpuRenderPassEncoderSetBindGroup(Get(), groupIndex, group.Get(), dynamicOffsetCount, reinterpret_cast<uint32_t const * >(dynamicOffsets));
+    }
+        void RenderPassEncoder::SetBlendColor(Color const * color) const {
+        wgpuRenderPassEncoderSetBlendColor(Get(), reinterpret_cast<WGPUColor const * >(color));
+    }
+        void RenderPassEncoder::SetIndexBuffer(Buffer const& buffer, uint64_t offset, uint64_t size) const {
+        wgpuRenderPassEncoderSetIndexBuffer(Get(), buffer.Get(), offset, size);
+    }
+        void RenderPassEncoder::SetPipeline(RenderPipeline const& pipeline) const {
+        wgpuRenderPassEncoderSetPipeline(Get(), pipeline.Get());
+    }
+        void RenderPassEncoder::SetScissorRect(uint32_t x, uint32_t y, uint32_t width, uint32_t height) const {
+        wgpuRenderPassEncoderSetScissorRect(Get(), x, y, width, height);
+    }
+        void RenderPassEncoder::SetStencilReference(uint32_t reference) const {
+        wgpuRenderPassEncoderSetStencilReference(Get(), reference);
+    }
+        void RenderPassEncoder::SetVertexBuffer(uint32_t slot, Buffer const& buffer, uint64_t offset, uint64_t size) const {
+        wgpuRenderPassEncoderSetVertexBuffer(Get(), slot, buffer.Get(), offset, size);
+    }
+        void RenderPassEncoder::SetViewport(float x, float y, float width, float height, float minDepth, float maxDepth) const {
+        wgpuRenderPassEncoderSetViewport(Get(), x, y, width, height, minDepth, maxDepth);
+    }
+    void RenderPassEncoder::WGPUReference(WGPURenderPassEncoder handle) {
+        if (handle != nullptr) {
+            wgpuRenderPassEncoderReference(handle);
+        }
+    }
+    void RenderPassEncoder::WGPURelease(WGPURenderPassEncoder handle) {
+        if (handle != nullptr) {
+            wgpuRenderPassEncoderRelease(handle);
+        }
+    }
+
+
+    static_assert(sizeof(RenderPipeline) == sizeof(WGPURenderPipeline), "sizeof mismatch for RenderPipeline");
+    static_assert(alignof(RenderPipeline) == alignof(WGPURenderPipeline), "alignof mismatch for RenderPipeline");
+
+
+
+        BindGroupLayout RenderPipeline::GetBindGroupLayout(uint32_t groupIndex) const {
+        auto result = wgpuRenderPipelineGetBindGroupLayout(Get(), groupIndex);
+        return BindGroupLayout::Acquire(result);
+    }
+    void RenderPipeline::WGPUReference(WGPURenderPipeline handle) {
+        if (handle != nullptr) {
+            wgpuRenderPipelineReference(handle);
+        }
+    }
+    void RenderPipeline::WGPURelease(WGPURenderPipeline handle) {
+        if (handle != nullptr) {
+            wgpuRenderPipelineRelease(handle);
+        }
+    }
+
+
+    static_assert(sizeof(Sampler) == sizeof(WGPUSampler), "sizeof mismatch for Sampler");
+    static_assert(alignof(Sampler) == alignof(WGPUSampler), "alignof mismatch for Sampler");
+
+
+
+    void Sampler::WGPUReference(WGPUSampler handle) {
+        if (handle != nullptr) {
+            wgpuSamplerReference(handle);
+        }
+    }
+    void Sampler::WGPURelease(WGPUSampler handle) {
+        if (handle != nullptr) {
+            wgpuSamplerRelease(handle);
+        }
+    }
+
+
+    static_assert(sizeof(ShaderModule) == sizeof(WGPUShaderModule), "sizeof mismatch for ShaderModule");
+    static_assert(alignof(ShaderModule) == alignof(WGPUShaderModule), "alignof mismatch for ShaderModule");
+
+
+
+    void ShaderModule::WGPUReference(WGPUShaderModule handle) {
+        if (handle != nullptr) {
+            wgpuShaderModuleReference(handle);
+        }
+    }
+    void ShaderModule::WGPURelease(WGPUShaderModule handle) {
+        if (handle != nullptr) {
+            wgpuShaderModuleRelease(handle);
+        }
+    }
+
+
+    static_assert(sizeof(Surface) == sizeof(WGPUSurface), "sizeof mismatch for Surface");
+    static_assert(alignof(Surface) == alignof(WGPUSurface), "alignof mismatch for Surface");
+
+
+
+    void Surface::WGPUReference(WGPUSurface handle) {
+        if (handle != nullptr) {
+            wgpuSurfaceReference(handle);
+        }
+    }
+    void Surface::WGPURelease(WGPUSurface handle) {
+        if (handle != nullptr) {
+            wgpuSurfaceRelease(handle);
+        }
+    }
+
+
+    static_assert(sizeof(SwapChain) == sizeof(WGPUSwapChain), "sizeof mismatch for SwapChain");
+    static_assert(alignof(SwapChain) == alignof(WGPUSwapChain), "alignof mismatch for SwapChain");
+
+
+
+        void SwapChain::Configure(TextureFormat format, TextureUsage allowedUsage, uint32_t width, uint32_t height) const {
+        wgpuSwapChainConfigure(Get(), static_cast<WGPUTextureFormat>(format), static_cast<WGPUTextureUsage>(allowedUsage), width, height);
+    }
+        TextureView SwapChain::GetCurrentTextureView() const {
+        auto result = wgpuSwapChainGetCurrentTextureView(Get());
+        return TextureView::Acquire(result);
+    }
+        void SwapChain::Present() const {
+        wgpuSwapChainPresent(Get());
+    }
+    void SwapChain::WGPUReference(WGPUSwapChain handle) {
+        if (handle != nullptr) {
+            wgpuSwapChainReference(handle);
+        }
+    }
+    void SwapChain::WGPURelease(WGPUSwapChain handle) {
+        if (handle != nullptr) {
+            wgpuSwapChainRelease(handle);
+        }
+    }
+
+
+    static_assert(sizeof(Texture) == sizeof(WGPUTexture), "sizeof mismatch for Texture");
+    static_assert(alignof(Texture) == alignof(WGPUTexture), "alignof mismatch for Texture");
+
+
+
+        TextureView Texture::CreateView(TextureViewDescriptor const * descriptor) const {
+        auto result = wgpuTextureCreateView(Get(), reinterpret_cast<WGPUTextureViewDescriptor const * >(descriptor));
+        return TextureView::Acquire(result);
+    }
+        void Texture::Destroy() const {
+        wgpuTextureDestroy(Get());
+    }
+    void Texture::WGPUReference(WGPUTexture handle) {
+        if (handle != nullptr) {
+            wgpuTextureReference(handle);
+        }
+    }
+    void Texture::WGPURelease(WGPUTexture handle) {
+        if (handle != nullptr) {
+            wgpuTextureRelease(handle);
+        }
+    }
+
+
+    static_assert(sizeof(TextureView) == sizeof(WGPUTextureView), "sizeof mismatch for TextureView");
+    static_assert(alignof(TextureView) == alignof(WGPUTextureView), "alignof mismatch for TextureView");
+
+
+
+    void TextureView::WGPUReference(WGPUTextureView handle) {
+        if (handle != nullptr) {
+            wgpuTextureViewReference(handle);
+        }
+    }
+    void TextureView::WGPURelease(WGPUTextureView handle) {
+        if (handle != nullptr) {
+            wgpuTextureViewRelease(handle);
+        }
+    }
+
+
+    Instance CreateInstance(const InstanceDescriptor* descriptor) {
+        const WGPUInstanceDescriptor* cDescriptor =
+            reinterpret_cast<const WGPUInstanceDescriptor*>(descriptor);
+        return Instance::Acquire(wgpuCreateInstance(cDescriptor));
+    }
+
+    Proc GetProcAddress(Device const& device, const char* procName) {
+        return reinterpret_cast<Proc>(wgpuGetProcAddress(device.Get(), procName));
+    }
+
+}

+ 5 - 0
bindings/cs/bgfx.cs

@@ -1371,6 +1371,11 @@ public static partial class bgfx
 		/// </summary>
 		Vulkan,
 	
+		/// <summary>
+		/// WebGPU
+		/// </summary>
+		WebGPU,
+	
 		Count
 	}
 	

+ 1 - 0
bindings/d/types.d

@@ -435,6 +435,7 @@ enum bgfx_renderer_type_t
 	BGFX_RENDERER_TYPE_OPENGLES, /// OpenGL ES 2.0+
 	BGFX_RENDERER_TYPE_OPENGL, /// OpenGL 2.1+
 	BGFX_RENDERER_TYPE_VULKAN, /// Vulkan
+	BGFX_RENDERER_TYPE_WEBGPU, /// WebGPU
 
 	BGFX_RENDERER_TYPE_COUNT
 }

+ 5 - 1
examples/21-deferred/deferred.cpp

@@ -483,7 +483,11 @@ public:
 						gbufferAt[1].init(m_gbufferTex[1]);
 					}
 
-					m_gbufferTex[2] = bgfx::createTexture2D(uint16_t(m_width), uint16_t(m_height), false, 1, bgfx::TextureFormat::D24S8, BGFX_TEXTURE_RT | tsFlags);
+					bgfx::TextureFormat::Enum depthFormat = bgfx::getRendererType() == bgfx::RendererType::WebGPU
+						? bgfx::TextureFormat::D32F   // WebGPU only supports sampling those for now
+						: bgfx::TextureFormat::D24S8;
+
+					m_gbufferTex[2] = bgfx::createTexture2D(uint16_t(m_width), uint16_t(m_height), false, 1, depthFormat, BGFX_TEXTURE_RT | tsFlags);
 					gbufferAt[2].init(m_gbufferTex[2]);
 
 					m_gbuffer = bgfx::createFrameBuffer(BX_COUNTOF(gbufferAt), gbufferAt, true);

+ 5 - 1
examples/31-rsm/reflectiveshadowmap.cpp

@@ -312,9 +312,13 @@ public:
 			;
 
 		// Make gbuffer and related textures
+		bgfx::TextureFormat::Enum depthFormat = bgfx::getRendererType() == bgfx::RendererType::WebGPU
+			? bgfx::TextureFormat::D32F
+			: bgfx::TextureFormat::D24;
+
 		m_gbufferTex[GBUFFER_RT_NORMAL] = bgfx::createTexture2D(bgfx::BackbufferRatio::Equal, false, 1, bgfx::TextureFormat::BGRA8, tsFlags);
 		m_gbufferTex[GBUFFER_RT_COLOR]  = bgfx::createTexture2D(bgfx::BackbufferRatio::Equal, false, 1, bgfx::TextureFormat::BGRA8, tsFlags);
-		m_gbufferTex[GBUFFER_RT_DEPTH]  = bgfx::createTexture2D(bgfx::BackbufferRatio::Equal, false, 1, bgfx::TextureFormat::D24,   tsFlags);
+		m_gbufferTex[GBUFFER_RT_DEPTH]  = bgfx::createTexture2D(bgfx::BackbufferRatio::Equal, false, 1, depthFormat,                tsFlags);
 		m_gbuffer = bgfx::createFrameBuffer(BX_COUNTOF(m_gbufferTex), m_gbufferTex, true);
 
 		// Make light buffer

+ 1 - 0
examples/common/bgfx_utils.cpp

@@ -114,6 +114,7 @@ static bgfx::ShaderHandle loadShader(bx::FileReaderI* _reader, const char* _name
 	case bgfx::RendererType::OpenGL:     shaderPath = "shaders/glsl/";  break;
 	case bgfx::RendererType::OpenGLES:   shaderPath = "shaders/essl/";  break;
 	case bgfx::RendererType::Vulkan:     shaderPath = "shaders/spirv/"; break;
+	case bgfx::RendererType::WebGPU:     shaderPath = "shaders/spirv/"; break;
 
 	case bgfx::RendererType::Count:
 		BX_CHECK(false, "You should not be here!");

+ 1 - 0
include/bgfx/bgfx.h

@@ -61,6 +61,7 @@ namespace bgfx
 			OpenGLES,     //!< OpenGL ES 2.0+
 			OpenGL,       //!< OpenGL 2.1+
 			Vulkan,       //!< Vulkan
+			WebGPU,       //!< WebGPU
 
 			Count
 		};

+ 1 - 0
include/bgfx/c99/bgfx.h

@@ -90,6 +90,7 @@ typedef enum bgfx_renderer_type
     BGFX_RENDERER_TYPE_OPENGLES,              /** ( 7) OpenGL ES 2.0+                 */
     BGFX_RENDERER_TYPE_OPENGL,                /** ( 8) OpenGL 2.1+                    */
     BGFX_RENDERER_TYPE_VULKAN,                /** ( 9) Vulkan                         */
+    BGFX_RENDERER_TYPE_WEBGPU,                /** (10) WebGPU                         */
 
     BGFX_RENDERER_TYPE_COUNT
 

+ 2 - 0
include/bgfx/embedded_shader.h

@@ -53,6 +53,7 @@
 		)
 #define BGFX_PLATFORM_SUPPORTS_SPIRV (0 \
 		|| BX_PLATFORM_ANDROID          \
+		|| BX_PLATFORM_EMSCRIPTEN       \
 		|| BX_PLATFORM_LINUX            \
 		|| BX_PLATFORM_WINDOWS          \
 		|| BX_PLATFORM_OSX              \
@@ -113,6 +114,7 @@
 					BGFX_EMBEDDED_SHADER_ESSL (bgfx::RendererType::OpenGLES,   _name)              \
 					BGFX_EMBEDDED_SHADER_GLSL (bgfx::RendererType::OpenGL,     _name)              \
 					BGFX_EMBEDDED_SHADER_SPIRV(bgfx::RendererType::Vulkan,     _name)              \
+					BGFX_EMBEDDED_SHADER_SPIRV(bgfx::RendererType::WebGPU,     _name)              \
 					{ bgfx::RendererType::Noop,  (const uint8_t*)"VSH\x5\x0\x0\x0\x0\x0\x0", 10 }, \
 					{ bgfx::RendererType::Count, NULL, 0 }                                         \
 				}                                                                                  \

+ 1 - 0
scripts/bgfx.idl

@@ -458,6 +458,7 @@ enum.RendererType { comment = "Renderer types:" }
 	.OpenGLES   --- OpenGL ES 2.0+
 	.OpenGL     --- OpenGL 2.1+
 	.Vulkan     --- Vulkan
+	.WebGPU     --- WebGPU
 	()
 
 --- Access mode enum.

+ 93 - 0
scripts/bgfx.lua

@@ -163,6 +163,43 @@ function bgfxProjectBase(_kind, _defines)
 		path.join(BGFX_DIR, "src/renderer_nvn.h"),
 	})
 
+	if _OPTIONS["webgpu"] then
+		defines {
+			"BGFX_CONFIG_RENDERER_WEBGPU=1",
+		}
+
+		configuration { "asmjs" }
+			defines {
+				"BGFX_CONFIG_RENDERER_OPENGL=0",
+				"BGFX_CONFIG_RENDERER_OPENGLES=0",
+			}
+
+		configuration { "not asmjs" }
+			--local generator = "out/Default"
+			local generator = "out/VS2019"
+
+			includedirs {
+				path.join(DAWN_DIR, "src"),
+				path.join(DAWN_DIR, "src/include"),
+				path.join(DAWN_DIR, "third_party/vulkan-headers/include"),
+				path.join(DAWN_DIR, generator, "gen/src"),
+				path.join(DAWN_DIR, generator, "gen/src/include"),
+			}
+
+			configuration { "vs*" }
+				defines {
+					"NTDDI_VERSION=NTDDI_WIN10_RS2",
+
+					-- We can't say `=_WIN32_WINNT_WIN10` here because some files do
+					-- `#if WINVER < 0x0600` without including windows.h before,
+					-- and then _WIN32_WINNT_WIN10 isn't yet known to be 0x0A00.
+					"_WIN32_WINNT=0x0A00",
+					"WINVER=0x0A00",
+				}
+
+		configuration {}
+    end
+    
 	if _OPTIONS["with-amalgamated"] then
 		excludes {
 			path.join(BGFX_DIR, "src/bgfx.cpp"),
@@ -234,3 +271,59 @@ function bgfxProject(_name, _kind, _defines)
 
 		copyLib()
 end
+
+if _OPTIONS["webgpu"] then
+	function usesWebGPU()
+		configuration { "asmjs" }
+			linkoptions {
+				"-s USE_WEBGPU=1",
+			}
+
+		configuration { "not asmjs" }
+			--local generator = "out/Default"
+			local generator = "out/VS2019"
+
+			includedirs {
+				path.join(DAWN_DIR, "src"),
+				path.join(DAWN_DIR, "src/include"),
+				path.join(DAWN_DIR, generator, "gen/src"),
+				path.join(DAWN_DIR, generator, "gen/src/include"),
+			}
+
+			libdirs {
+				path.join(DAWN_DIR, generator),
+				path.join(DAWN_DIR, generator, "lib/Debug"),
+			}
+
+			files {
+				path.join(DAWN_DIR, generator, "gen/src/dawn/webgpu_cpp.cpp"),
+			}
+
+			links {
+				-- shared
+				"dawn_proc_shared",
+				"dawn_native_shared",
+				"shaderc_spvc_shared",
+				-- static
+				--"dawn_common",
+				--"dawn_proc",
+				--"dawn_native",
+				--"dawn_platform",
+				------"shaderc",
+				--"shaderc_spvc",
+				--"SPIRV-tools",
+				--"SPIRV-tools-opt",
+				--"spirv-cross-cored",
+				--"spirv-cross-hlsld",
+				--"spirv-cross-glsld",
+				--"spirv-cross-msld",
+				--"spirv-cross-reflectd",
+			}
+
+			removeflags {
+				"FatalWarnings",
+			}
+
+		configuration {}
+	end
+end

+ 15 - 0
scripts/genie.lua

@@ -3,6 +3,11 @@
 -- License: https://github.com/bkaradzic/bgfx#license-bsd-2-clause
 --
 
+newoption {
+	trigger = "webgpu",
+	description = "Enable webgpu experimental renderer.",
+}
+
 newoption {
 	trigger = "with-amalgamated",
 	description = "Enable amalgamated build.",
@@ -162,6 +167,12 @@ if not os.isdir(BX_DIR) or not os.isdir(BIMG_DIR) then
 	os.exit()
 end
 
+if _OPTIONS["webgpu"] then
+	DAWN_DIR   = os.getenv("DAWN_DIR")
+
+	_OPTIONS["with-windows"] = "10.0"
+end
+
 dofile (path.join(BX_DIR, "scripts/toolchain.lua"))
 if not toolchain(BGFX_BUILD_DIR, BGFX_THIRD_PARTY_DIR) then
 	return -- no action specified
@@ -214,6 +225,10 @@ function exampleProjectDefaults()
 		"bx",
 	}
 
+	if _OPTIONS["webgpu"] then
+		usesWebGPU()
+	end
+
 	if _OPTIONS["with-sdl"] then
 		defines { "ENTRY_CONFIG_USE_SDL=1" }
 		links   { "SDL2" }

+ 1 - 0
scripts/shaderc.lua

@@ -597,6 +597,7 @@ project "shaderc"
 		path.join(BIMG_DIR, "include"),
 		path.join(BGFX_DIR, "include"),
 
+		path.join(BGFX_DIR, "3rdparty/webgpu/include"),
 		path.join(BGFX_DIR, "3rdparty/dxsdk/include"),
 
 		FCPP_DIR,

+ 22 - 11
src/bgfx.cpp

@@ -679,6 +679,8 @@ namespace bgfx
 		ShaderHandle vsh = createEmbeddedShader(s_embeddedShaders, g_caps.rendererType, "vs_debugfont");
 		ShaderHandle fsh = createEmbeddedShader(s_embeddedShaders, g_caps.rendererType, "fs_debugfont");
 
+		BX_CHECK(isValid(vsh) && isValid(fsh), "Failed to create embedded blit shaders");
+
 		m_program = createProgram(vsh, fsh, true);
 
 		m_vb = s_ctx->createTransientVertexBuffer(numBatchVertices*m_layout.m_stride, &m_layout);
@@ -849,12 +851,14 @@ namespace bgfx
 				.end();
 
 			ShaderHandle vsh = createEmbeddedShader(s_embeddedShaders, g_caps.rendererType, "vs_clear");
+			BX_CHECK(isValid(vsh), "Failed to create clear quad embedded vertex shader \"vs_clear\"");
 
 			for (uint32_t ii = 0, num = g_caps.limits.maxFBAttachments; ii < num; ++ii)
 			{
 				char name[32];
 				bx::snprintf(name, BX_COUNTOF(name), "fs_clear%d", ii);
 				ShaderHandle fsh = createEmbeddedShader(s_embeddedShaders, g_caps.rendererType, name);
+				BX_CHECK(isValid(fsh), "Failed to create clear quad embedded fragment shader \"%s\"", name);
 
 				m_program[ii] = createProgram(vsh, fsh);
 				BX_CHECK(isValid(m_program[ii]), "Failed to create clear quad program.");
@@ -1707,6 +1711,11 @@ namespace bgfx
 		return s_ctx->m_uniformRef[_handle.idx].m_name.getPtr();
 	}
 
+	const char* getName(ShaderHandle _handle)
+	{
+		return s_ctx->m_shaderRef[_handle.idx].m_name.getPtr();
+	}
+
 	static const char* s_topologyName[] =
 	{
 		"Triangles",
@@ -2476,6 +2485,7 @@ namespace bgfx
 	BGFX_RENDERER_CONTEXT(nvn);
 	BGFX_RENDERER_CONTEXT(gl);
 	BGFX_RENDERER_CONTEXT(vk);
+	BGFX_RENDERER_CONTEXT(webgpu);
 
 #undef BGFX_RENDERER_CONTEXT
 
@@ -2489,20 +2499,21 @@ namespace bgfx
 
 	static RendererCreator s_rendererCreator[] =
 	{
-		{ noop::rendererCreate,  noop::rendererDestroy,  BGFX_RENDERER_NOOP_NAME,       true                              }, // Noop
-		{ d3d9::rendererCreate,  d3d9::rendererDestroy,  BGFX_RENDERER_DIRECT3D9_NAME,  !!BGFX_CONFIG_RENDERER_DIRECT3D9  }, // Direct3D9
-		{ d3d11::rendererCreate, d3d11::rendererDestroy, BGFX_RENDERER_DIRECT3D11_NAME, !!BGFX_CONFIG_RENDERER_DIRECT3D11 }, // Direct3D11
-		{ d3d12::rendererCreate, d3d12::rendererDestroy, BGFX_RENDERER_DIRECT3D12_NAME, !!BGFX_CONFIG_RENDERER_DIRECT3D12 }, // Direct3D12
-		{ gnm::rendererCreate,   gnm::rendererDestroy,   BGFX_RENDERER_GNM_NAME,        !!BGFX_CONFIG_RENDERER_GNM        }, // GNM
+		{ noop::rendererCreate,   noop::rendererDestroy,   BGFX_RENDERER_NOOP_NAME,       true                              }, // Noop
+		{ d3d9::rendererCreate,   d3d9::rendererDestroy,   BGFX_RENDERER_DIRECT3D9_NAME,  !!BGFX_CONFIG_RENDERER_DIRECT3D9  }, // Direct3D9
+		{ d3d11::rendererCreate,  d3d11::rendererDestroy,  BGFX_RENDERER_DIRECT3D11_NAME, !!BGFX_CONFIG_RENDERER_DIRECT3D11 }, // Direct3D11
+		{ d3d12::rendererCreate,  d3d12::rendererDestroy,  BGFX_RENDERER_DIRECT3D12_NAME, !!BGFX_CONFIG_RENDERER_DIRECT3D12 }, // Direct3D12
+		{ gnm::rendererCreate,    gnm::rendererDestroy,    BGFX_RENDERER_GNM_NAME,        !!BGFX_CONFIG_RENDERER_GNM        }, // GNM
 #if BX_PLATFORM_OSX || BX_PLATFORM_IOS
-		{ mtl::rendererCreate,   mtl::rendererDestroy,   BGFX_RENDERER_METAL_NAME,      !!BGFX_CONFIG_RENDERER_METAL      }, // Metal
+		{ mtl::rendererCreate,    mtl::rendererDestroy,    BGFX_RENDERER_METAL_NAME,      !!BGFX_CONFIG_RENDERER_METAL      }, // Metal
 #else
-		{ noop::rendererCreate,  noop::rendererDestroy,  BGFX_RENDERER_NOOP_NAME,       false                             }, // Noop
+		{ noop::rendererCreate,   noop::rendererDestroy,   BGFX_RENDERER_NOOP_NAME,       false                             }, // Noop
 #endif // BX_PLATFORM_OSX || BX_PLATFORM_IOS
-		{ nvn::rendererCreate,   nvn::rendererDestroy,   BGFX_RENDERER_NVN_NAME,        !!BGFX_CONFIG_RENDERER_NVN        }, // NVN
-		{ gl::rendererCreate,    gl::rendererDestroy,    BGFX_RENDERER_OPENGL_NAME,     !!BGFX_CONFIG_RENDERER_OPENGLES   }, // OpenGLES
-		{ gl::rendererCreate,    gl::rendererDestroy,    BGFX_RENDERER_OPENGL_NAME,     !!BGFX_CONFIG_RENDERER_OPENGL     }, // OpenGL
-		{ vk::rendererCreate,    vk::rendererDestroy,    BGFX_RENDERER_VULKAN_NAME,     !!BGFX_CONFIG_RENDERER_VULKAN     }, // Vulkan
+		{ nvn::rendererCreate,    nvn::rendererDestroy,    BGFX_RENDERER_NVN_NAME,        !!BGFX_CONFIG_RENDERER_NVN        }, // NVN
+		{ gl::rendererCreate,     gl::rendererDestroy,     BGFX_RENDERER_OPENGL_NAME,     !!BGFX_CONFIG_RENDERER_OPENGLES   }, // OpenGLES
+		{ gl::rendererCreate,     gl::rendererDestroy,     BGFX_RENDERER_OPENGL_NAME,     !!BGFX_CONFIG_RENDERER_OPENGL     }, // OpenGL
+		{ vk::rendererCreate,     vk::rendererDestroy,     BGFX_RENDERER_VULKAN_NAME,     !!BGFX_CONFIG_RENDERER_VULKAN     }, // Vulkan
+		{ webgpu::rendererCreate, webgpu::rendererDestroy, BGFX_RENDERER_WEBGPU_NAME,     !!BGFX_CONFIG_RENDERER_WEBGPU     }, // WebGPU
 	};
 	BX_STATIC_ASSERT(BX_COUNTOF(s_rendererCreator) == RendererType::Count);
 

+ 11 - 1
src/bgfx_p.h

@@ -225,6 +225,7 @@ namespace stl = std;
 #define BGFX_RENDERER_METAL_NAME      "Metal"
 #define BGFX_RENDERER_NVN_NAME        "NVN"
 #define BGFX_RENDERER_VULKAN_NAME     "Vulkan"
+#define BGFX_RENDERER_WEBGPU_NAME     "WebGPU"
 #define BGFX_RENDERER_NOOP_NAME       "Noop"
 
 #if BGFX_CONFIG_RENDERER_OPENGL
@@ -491,6 +492,7 @@ namespace bgfx
 	TextureFormat::Enum getViableTextureFormat(const bimg::ImageContainer& _imageContainer);
 	const char* getName(TextureFormat::Enum _fmt);
 	const char* getName(UniformHandle _handle);
+	const char* getName(ShaderHandle _handle);
 	const char* getName(Topology::Enum _topology);
 
 	template<typename Ty>
@@ -1326,7 +1328,9 @@ constexpr uint64_t kSortKeyComputeProgramMask  = uint64_t(BGFX_CONFIG_MAX_PROGRA
 
 #define BGFX_UNIFORM_FRAGMENTBIT UINT8_C(0x10)
 #define BGFX_UNIFORM_SAMPLERBIT  UINT8_C(0x20)
-#define BGFX_UNIFORM_MASK (BGFX_UNIFORM_FRAGMENTBIT|BGFX_UNIFORM_SAMPLERBIT)
+#define BGFX_UNIFORM_READONLYBIT UINT8_C(0x40)
+#define BGFX_UNIFORM_COMPAREBIT  UINT8_C(0x80)
+#define BGFX_UNIFORM_MASK (BGFX_UNIFORM_FRAGMENTBIT|BGFX_UNIFORM_SAMPLERBIT|BGFX_UNIFORM_READONLYBIT|BGFX_UNIFORM_COMPAREBIT)
 
 	class UniformBuffer
 	{
@@ -3872,6 +3876,12 @@ constexpr uint64_t kSortKeyComputeProgramMask  = uint64_t(BGFX_CONFIG_MAX_PROGRA
 				uint16_t regCount;
 				bx::read(&reader, regCount, &err);
 
+				if (!isShaderVerLess(magic, 8))
+				{
+					uint16_t texInfo;
+					bx::read(&reader, texInfo);
+				}
+
 				PredefinedUniform::Enum predefined = nameToPredefinedUniformEnum(name);
 				if (PredefinedUniform::Count == predefined && UniformType::End != UniformType::Enum(type))
 				{

+ 4 - 0
src/config.h

@@ -137,6 +137,10 @@
 #	ifndef BGFX_CONFIG_RENDERER_VULKAN
 #		define BGFX_CONFIG_RENDERER_VULKAN 0
 #	endif // BGFX_CONFIG_RENDERER_VULKAN
+
+#	ifndef BGFX_CONFIG_RENDERER_WEBGPU
+#		define BGFX_CONFIG_RENDERER_WEBGPU 0
+#	endif // BGFX_CONFIG_RENDERER_VULKAN
 #endif // !defined...
 
 #if BGFX_CONFIG_RENDERER_OPENGL && BGFX_CONFIG_RENDERER_OPENGL < 21

+ 6 - 0
src/renderer_d3d11.cpp

@@ -3938,6 +3938,12 @@ namespace bgfx { namespace d3d11
 				uint16_t regCount = 0;
 				bx::read(&reader, regCount);
 
+				if (!isShaderVerLess(magic, 8) )
+				{
+					uint16_t texInfo = 0;
+					bx::read(&reader, texInfo);
+				}
+
 				const char* kind = "invalid";
 
 				PredefinedUniform::Enum predefined = nameToPredefinedUniformEnum(name);

+ 6 - 0
src/renderer_d3d12.cpp

@@ -4535,6 +4535,12 @@ namespace bgfx { namespace d3d12
 				uint16_t regCount = 0;
 				bx::read(&reader, regCount);
 
+				if (!isShaderVerLess(magic, 8) )
+				{
+					uint16_t texInfo = 0;
+					bx::read(&reader, texInfo);
+				}
+
 				const char* kind = "invalid";
 
 				PredefinedUniform::Enum predefined = nameToPredefinedUniformEnum(name);

+ 6 - 0
src/renderer_d3d9.cpp

@@ -2449,6 +2449,12 @@ namespace bgfx { namespace d3d9
 				uint16_t regCount = 0;
 				bx::read(&reader, regCount);
 
+				if (!isShaderVerLess(magic, 8) )
+				{
+					uint16_t texInfo = 0;
+					bx::read(&reader, texInfo);
+				}
+
 				const char* kind = "invalid";
 
 				PredefinedUniform::Enum predefined = nameToPredefinedUniformEnum(name);

+ 6 - 0
src/renderer_gl.cpp

@@ -5905,6 +5905,12 @@ namespace bgfx { namespace gl
 
 			uint16_t regCount;
 			bx::read(&reader, regCount);
+
+			if (!isShaderVerLess(magic, 8) )
+			{
+				uint16_t texInfo = 0;
+				bx::read(&reader, texInfo);
+			}
 		}
 
 		uint32_t shaderSize;

+ 6 - 0
src/renderer_mtl.mm

@@ -2459,6 +2459,12 @@ namespace bgfx { namespace mtl
 
 			uint16_t regCount;
 			bx::read(&reader, regCount);
+			
+			if (!isShaderVerLess(magic, 8) )
+			{
+				uint16_t texInfo = 0;
+				bx::read(&reader, texInfo);
+			}
 		}
 
 		if (isShaderType(magic, 'C'))

+ 5 - 0
src/renderer_vk.cpp

@@ -4816,6 +4816,11 @@ VK_DESTROY
 				uint16_t regCount;
 				bx::read(&reader, regCount);
 
+				if (!isShaderVerLess(magic, 8) )
+				{
+					uint16_t texInfo = 0;
+					bx::read(&reader, texInfo);
+				}
 				const char* kind = "invalid";
 
 				PredefinedUniform::Enum predefined = nameToPredefinedUniformEnum(name);

+ 4898 - 0
src/renderer_webgpu.cpp

@@ -0,0 +1,4898 @@
+/*
+ * Copyright 2011-2019 Branimir Karadzic. All rights reserved.
+ * License: https://github.com/bkaradzic/bgfx#license-bsd-2-clause
+ */
+
+#include "bgfx_p.h"
+
+//#define DAWN_ENABLE_BACKEND_D3D12
+#define DAWN_ENABLE_BACKEND_VULKAN
+
+#define BGFX_CONFIG_DEBUG_ANNOTATION 0
+
+#if BGFX_CONFIG_RENDERER_WEBGPU
+#include "renderer_webgpu.h"
+#include "renderer.h"
+#include "debug_renderdoc.h"
+
+#ifdef DAWN_ENABLE_BACKEND_VULKAN
+#include "renderer_vk.h"
+#endif
+
+#include <cfloat>
+#include <new>
+
+#if !BX_PLATFORM_EMSCRIPTEN
+#ifdef DAWN_ENABLE_BACKEND_D3D12
+#include <dawn_native/D3D12Backend.h>
+#endif
+#ifdef DAWN_ENABLE_BACKEND_VULKAN
+#include <dawn_native/VulkanBackend.h>
+#endif
+#include <dawn_native/DawnNative.h>
+#include <dawn/dawn_wsi.h>
+#include <dawn/dawn_proc.h>
+#else
+#include <emscripten/emscripten.h>
+#include <emscripten/html5.h>
+#endif
+
+#define UNIFORM_BUFFER_SIZE (8*1024*1024)
+
+
+namespace bgfx { namespace webgpu
+{
+	// TODO (hugoam) cleanup
+	template <class T>
+	T defaultDescriptor() { return T(); }
+
+	template <> wgpu::BlendDescriptor              defaultDescriptor() { return { wgpu::BlendOperation::Add, wgpu::BlendFactor::One, wgpu::BlendFactor::Zero }; }
+	template <> wgpu::ColorStateDescriptor         defaultDescriptor() { return { nullptr, wgpu::TextureFormat::RGBA8Unorm, defaultDescriptor<wgpu::BlendDescriptor>(), defaultDescriptor<wgpu::BlendDescriptor>(), wgpu::ColorWriteMask::All }; }
+	template <> wgpu::StencilStateFaceDescriptor   defaultDescriptor() { return { wgpu::CompareFunction::Always, wgpu::StencilOperation::Keep, wgpu::StencilOperation::Keep, wgpu::StencilOperation::Keep }; }
+	template <> wgpu::VertexStateDescriptor        defaultDescriptor() { return { nullptr, wgpu::IndexFormat::Uint32, 0, nullptr }; }
+	template <> wgpu::VertexBufferLayoutDescriptor defaultDescriptor() { return { 0, wgpu::InputStepMode::Vertex, 0, nullptr }; }
+	template <> wgpu::VertexAttributeDescriptor    defaultDescriptor() { return { wgpu::VertexFormat::Float, 0, 0 }; }
+	template <> wgpu::RasterizationStateDescriptor defaultDescriptor() { return { nullptr, wgpu::FrontFace::CCW, wgpu::CullMode::None, 0, 0.f, 0.f }; }
+	template <> wgpu::ProgrammableStageDescriptor  defaultDescriptor() { return { nullptr, {}, "main" }; }
+	template <> wgpu::DepthStencilStateDescriptor  defaultDescriptor() { return { nullptr, wgpu::TextureFormat::Depth24PlusStencil8, false, wgpu::CompareFunction::Always, defaultDescriptor<wgpu::StencilStateFaceDescriptor>(), defaultDescriptor<wgpu::StencilStateFaceDescriptor>(), 0xff, 0xff }; }
+	template <> wgpu::PipelineLayoutDescriptor     defaultDescriptor() { return { nullptr, "", 0, nullptr }; }
+	template <> wgpu::TextureViewDescriptor        defaultDescriptor() { return {}; }
+
+	template <> wgpu::RenderPassColorAttachmentDescriptor defaultDescriptor() { return { {}, {}, wgpu::LoadOp::Clear, wgpu::StoreOp::Store, { 0.0f, 0.0f, 0.0f, 0.0f } }; }
+	template <> wgpu::RenderPassDepthStencilAttachmentDescriptor defaultDescriptor() { return { {}, wgpu::LoadOp::Clear, wgpu::StoreOp::Store, 1.0f, wgpu::LoadOp::Clear, wgpu::StoreOp::Store, 0 }; }
+
+	RenderPassDescriptor::RenderPassDescriptor()
+	{
+		depthStencilAttachment = defaultDescriptor<wgpu::RenderPassDepthStencilAttachmentDescriptor>();
+
+		for(uint32_t i = 0; i < kMaxColorAttachments; ++i)
+		{
+			colorAttachments[i] = defaultDescriptor<wgpu::RenderPassColorAttachmentDescriptor>();
+		}
+
+		desc = defaultDescriptor<wgpu::RenderPassDescriptor>();
+		//desc.colorAttachmentCount = colorAttachmentCount;
+		desc.colorAttachments = colorAttachments;
+		desc.colorAttachmentCount = 1; // TODO (hugoam) set it properly everywhere
+	}
+
+	VertexStateDescriptor::VertexStateDescriptor()
+	{
+		for(uint32_t i = 0; i < kMaxVertexInputs; ++i)
+		{
+			vertexBuffers[i] = defaultDescriptor<wgpu::VertexBufferLayoutDescriptor>();
+		}
+
+		for (uint32_t i = 0; i < kMaxVertexAttributes; ++i)
+		{
+			attributes[i] = defaultDescriptor<wgpu::VertexAttributeDescriptor>();
+		}
+
+		vertexBuffers[0].attributes = &attributes[0];
+		//vertexBuffers[0].attributeCount = numAttributes;
+
+		desc = defaultDescriptor<wgpu::VertexStateDescriptor>();
+
+		desc.vertexBuffers = vertexBuffers;
+		//desc.vertexBufferCount = numVertexBuffers;
+	}
+
+	RenderPipelineDescriptor::RenderPipelineDescriptor()
+	{
+		//vertexStage = defaultDescriptor<wgpu::ProgrammableStageDescriptor>();
+		fragmentStage = defaultDescriptor<wgpu::ProgrammableStageDescriptor>();
+		rasterizationState = defaultDescriptor<wgpu::RasterizationStateDescriptor>();
+		depthStencilState = defaultDescriptor<wgpu::DepthStencilStateDescriptor>();
+
+		for(uint32_t i = 0; i < kMaxColorAttachments; ++i)
+		{
+			colorStates[i] = defaultDescriptor<wgpu::ColorStateDescriptor>();
+		}
+
+		desc = defaultDescriptor<wgpu::RenderPipelineDescriptor>();
+
+		desc.primitiveTopology = wgpu::PrimitiveTopology::TriangleList;
+		desc.sampleCount = 1;
+		desc.colorStateCount = 1;
+
+		//wgpu::VertexStateDescriptor inputState = inputState.descriptor();
+
+		desc.vertexStage = defaultDescriptor<wgpu::ProgrammableStageDescriptor>();
+		desc.fragmentStage = &fragmentStage;
+		//desc.vertexState = &inputState;
+		desc.rasterizationState = &rasterizationState;
+		desc.depthStencilState = nullptr;
+		desc.colorStates = colorStates;
+	}
+	// TODO (hugoam) cleanup (end)
+
+	static char s_viewName[BGFX_CONFIG_MAX_VIEWS][BGFX_CONFIG_MAX_VIEW_NAME];
+
+	inline void setViewType(ViewId _view, const bx::StringView _str)
+	{
+		if (BX_ENABLED(BGFX_CONFIG_DEBUG_ANNOTATION || BGFX_CONFIG_PROFILER) )
+		{
+			bx::memCopy(&s_viewName[_view][3], _str.getPtr(), _str.getLength() );
+		}
+	}
+
+	struct PrimInfo
+	{
+		wgpu::PrimitiveTopology m_type;
+		uint32_t m_min;
+		uint32_t m_div;
+		uint32_t m_sub;
+	};
+	
+	static const PrimInfo s_primInfo[] =
+	{
+		{ wgpu::PrimitiveTopology::TriangleList,  3, 3, 0 },
+		{ wgpu::PrimitiveTopology::TriangleStrip, 3, 1, 2 },
+		{ wgpu::PrimitiveTopology::LineList,      2, 2, 0 },
+		{ wgpu::PrimitiveTopology::LineStrip,     2, 1, 1 },
+		{ wgpu::PrimitiveTopology::PointList,     1, 1, 0 },
+	};
+	BX_STATIC_ASSERT(Topology::Count == BX_COUNTOF(s_primInfo) );
+	
+	static const wgpu::VertexFormat s_attribType[][4][2] =
+	{
+		{ // Uint8
+			{ wgpu::VertexFormat::UChar2, wgpu::VertexFormat::UChar2Norm },
+			{ wgpu::VertexFormat::UChar2, wgpu::VertexFormat::UChar2Norm },
+			{ wgpu::VertexFormat::UChar4, wgpu::VertexFormat::UChar4Norm },
+			{ wgpu::VertexFormat::UChar4, wgpu::VertexFormat::UChar4Norm },
+		},
+		{ // Uint10
+			{ wgpu::VertexFormat::UShort2, wgpu::VertexFormat::UShort2Norm },
+			{ wgpu::VertexFormat::UShort2, wgpu::VertexFormat::UShort2Norm },
+			{ wgpu::VertexFormat::UShort4, wgpu::VertexFormat::UShort4Norm },
+			{ wgpu::VertexFormat::UShort4, wgpu::VertexFormat::UShort4Norm },
+		},
+		{ // Int16
+			{ wgpu::VertexFormat::Short2, wgpu::VertexFormat::Short2Norm },
+			{ wgpu::VertexFormat::Short2, wgpu::VertexFormat::Short2Norm },
+			{ wgpu::VertexFormat::Short4, wgpu::VertexFormat::Short4Norm },
+			{ wgpu::VertexFormat::Short4, wgpu::VertexFormat::Short4Norm },
+		},
+		{ // Half
+			{ wgpu::VertexFormat::Half2, wgpu::VertexFormat::Half2 },
+			{ wgpu::VertexFormat::Half2, wgpu::VertexFormat::Half2 },
+			{ wgpu::VertexFormat::Half4, wgpu::VertexFormat::Half4 },
+			{ wgpu::VertexFormat::Half4, wgpu::VertexFormat::Half4 },
+		},
+		{ // Float
+			{ wgpu::VertexFormat::Float,  wgpu::VertexFormat::Float  },
+			{ wgpu::VertexFormat::Float2, wgpu::VertexFormat::Float2 },
+			{ wgpu::VertexFormat::Float3, wgpu::VertexFormat::Float3 },
+			{ wgpu::VertexFormat::Float4, wgpu::VertexFormat::Float4 },
+		},
+	};
+	BX_STATIC_ASSERT(AttribType::Count == BX_COUNTOF(s_attribType) );
+
+	static const wgpu::CullMode s_cullMode[] =
+	{
+		wgpu::CullMode::None,
+		wgpu::CullMode::Front,
+		wgpu::CullMode::Back,
+		wgpu::CullMode::None,
+	};
+
+	static const wgpu::BlendFactor s_blendFactor[][2] =
+	{
+		{ wgpu::BlendFactor(0),                  wgpu::BlendFactor(0)                  }, // ignored
+		{ wgpu::BlendFactor::Zero,               wgpu::BlendFactor::Zero               }, // ZERO
+		{ wgpu::BlendFactor::One,                wgpu::BlendFactor::One                }, // ONE
+		{ wgpu::BlendFactor::SrcColor,           wgpu::BlendFactor::SrcAlpha           }, // SRC_COLOR
+		{ wgpu::BlendFactor::OneMinusSrcColor,   wgpu::BlendFactor::OneMinusSrcAlpha   }, // INV_SRC_COLOR
+		{ wgpu::BlendFactor::SrcAlpha,           wgpu::BlendFactor::SrcAlpha           }, // SRC_ALPHA
+		{ wgpu::BlendFactor::OneMinusSrcAlpha,   wgpu::BlendFactor::OneMinusSrcAlpha   }, // INV_SRC_ALPHA
+		{ wgpu::BlendFactor::DstAlpha,           wgpu::BlendFactor::DstAlpha           }, // DST_ALPHA
+		{ wgpu::BlendFactor::OneMinusDstAlpha,   wgpu::BlendFactor::OneMinusDstAlpha   }, // INV_DST_ALPHA
+		{ wgpu::BlendFactor::DstColor,           wgpu::BlendFactor::DstAlpha           }, // DST_COLOR
+		{ wgpu::BlendFactor::OneMinusDstColor,   wgpu::BlendFactor::OneMinusDstAlpha   }, // INV_DST_COLOR
+		{ wgpu::BlendFactor::SrcAlphaSaturated,  wgpu::BlendFactor::One                }, // SRC_ALPHA_SAT
+		{ wgpu::BlendFactor::BlendColor,         wgpu::BlendFactor::BlendColor         }, // FACTOR
+		{ wgpu::BlendFactor::OneMinusBlendColor, wgpu::BlendFactor::OneMinusBlendColor }, // INV_FACTOR
+	};
+
+	static const wgpu::BlendOperation s_blendEquation[] =
+	{
+		wgpu::BlendOperation::Add,
+		wgpu::BlendOperation::Subtract,
+		wgpu::BlendOperation::ReverseSubtract,
+		wgpu::BlendOperation::Min,
+		wgpu::BlendOperation::Max,
+	};
+
+	static const wgpu::CompareFunction s_cmpFunc[] =
+	{
+		wgpu::CompareFunction::Always, // ignored
+		wgpu::CompareFunction::Less,
+		wgpu::CompareFunction::LessEqual,
+		wgpu::CompareFunction::Equal,
+		wgpu::CompareFunction::GreaterEqual,
+		wgpu::CompareFunction::Greater,
+		wgpu::CompareFunction::NotEqual,
+		wgpu::CompareFunction::Never,
+		wgpu::CompareFunction::Always,
+	};
+
+	static const wgpu::StencilOperation s_stencilOp[] =
+	{
+		wgpu::StencilOperation::Zero,
+		wgpu::StencilOperation::Keep,
+		wgpu::StencilOperation::Replace,
+		wgpu::StencilOperation::IncrementWrap,
+		wgpu::StencilOperation::IncrementClamp,
+		wgpu::StencilOperation::DecrementWrap,
+		wgpu::StencilOperation::DecrementClamp,
+		wgpu::StencilOperation::Invert,
+	};
+
+	static const wgpu::AddressMode s_textureAddress[] =
+	{
+		wgpu::AddressMode::Repeat,
+		wgpu::AddressMode::MirrorRepeat,
+		wgpu::AddressMode::ClampToEdge,
+		wgpu::AddressMode(0), // Border ? ClampToZero ?
+	};
+
+	static const wgpu::FilterMode s_textureFilterMinMag[] =
+	{
+		wgpu::FilterMode::Linear,
+		wgpu::FilterMode::Nearest,
+		wgpu::FilterMode::Linear,
+	};
+
+	static const wgpu::FilterMode s_textureFilterMip[] =
+	{
+		wgpu::FilterMode::Linear,
+		wgpu::FilterMode::Nearest,
+	};
+
+	struct TextureFormatInfo
+	{
+		wgpu::TextureFormat m_fmt;
+		wgpu::TextureFormat m_fmtSrgb;
+	};
+
+	static TextureFormatInfo s_textureFormat[] =
+	{
+		{ wgpu::TextureFormat::BC1RGBAUnorm,            wgpu::TextureFormat::BC1RGBAUnormSrgb },  // BC1
+		{ wgpu::TextureFormat::BC2RGBAUnorm,            wgpu::TextureFormat::BC2RGBAUnormSrgb },  // BC2
+		{ wgpu::TextureFormat::BC3RGBAUnorm,            wgpu::TextureFormat::BC3RGBAUnormSrgb },  // BC3
+		{ wgpu::TextureFormat::BC4RUnorm,               wgpu::TextureFormat::Undefined                },  // BC4  //  BC4RSnorm ??
+		{ wgpu::TextureFormat::BC5RGUnorm,              wgpu::TextureFormat::Undefined                },  // BC5  //  BC5RGSnorm ??
+		{ wgpu::TextureFormat::BC6HRGBUfloat,           wgpu::TextureFormat::Undefined                },  // BC6H //  BC6HRGBSfloat ??
+		{ wgpu::TextureFormat::BC7RGBAUnorm,            wgpu::TextureFormat::BC7RGBAUnormSrgb },  // BC7
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // ETC1
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // ETC2
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // ETC2A
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // ETC2A1
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // PTC12
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // PTC14
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // PTC12A
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // PTC14A
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // PTC22
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // PTC24
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // ATC
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // ATCE
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // ATCI
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // ASTC4x4
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // ASTC5x5
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // ASTC6x6
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // ASTC8x5
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // ASTC8x6
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // ASTC10x5
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // Unknown
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // R1
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // A8
+		{ wgpu::TextureFormat::R8Unorm,                 wgpu::TextureFormat::Undefined                },  // R8
+		{ wgpu::TextureFormat::R8Sint,                  wgpu::TextureFormat::Undefined                },  // R8I
+		{ wgpu::TextureFormat::R8Uint,                  wgpu::TextureFormat::Undefined                },  // R8U
+		{ wgpu::TextureFormat::R8Snorm,                 wgpu::TextureFormat::Undefined                },  // R8S
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // R16
+		{ wgpu::TextureFormat::R16Sint,                 wgpu::TextureFormat::Undefined                },  // R16I
+		{ wgpu::TextureFormat::R16Uint,                 wgpu::TextureFormat::Undefined                },  // R16U
+		{ wgpu::TextureFormat::R16Float,                wgpu::TextureFormat::Undefined                },  // R16F
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // R16S
+		{ wgpu::TextureFormat::R32Sint,                 wgpu::TextureFormat::Undefined                },  // R32I
+		{ wgpu::TextureFormat::R32Uint,                 wgpu::TextureFormat::Undefined                },  // R32U
+		{ wgpu::TextureFormat::R32Float,                wgpu::TextureFormat::Undefined                },  // R32F
+		{ wgpu::TextureFormat::RG8Unorm,                wgpu::TextureFormat::Undefined                },  // RG8
+		{ wgpu::TextureFormat::RG8Sint,                 wgpu::TextureFormat::Undefined                },  // RG8I
+		{ wgpu::TextureFormat::RG8Uint,                 wgpu::TextureFormat::Undefined                },  // RG8U
+		{ wgpu::TextureFormat::RG8Snorm,                wgpu::TextureFormat::Undefined                },  // RG8S
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // RG16
+		{ wgpu::TextureFormat::RG16Sint,                wgpu::TextureFormat::Undefined                },  // RG16I
+		{ wgpu::TextureFormat::RG16Uint,                wgpu::TextureFormat::Undefined                },  // RG16U
+		{ wgpu::TextureFormat::RG16Float,               wgpu::TextureFormat::Undefined                },  // RG16F
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // RG16S
+		{ wgpu::TextureFormat::RG32Sint,                wgpu::TextureFormat::Undefined                },  // RG32I
+		{ wgpu::TextureFormat::RG32Uint,                wgpu::TextureFormat::Undefined                },  // RG32U
+		{ wgpu::TextureFormat::RG32Float,               wgpu::TextureFormat::Undefined                },  // RG32F
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // RGB8
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // RGB8I
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // RGB8U
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // RGB8S
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // RGB9E5F
+		{ wgpu::TextureFormat::BGRA8Unorm,              wgpu::TextureFormat::BGRA8UnormSrgb   },  // BGRA8
+		{ wgpu::TextureFormat::RGBA8Unorm,              wgpu::TextureFormat::RGBA8UnormSrgb   },  // RGBA8
+		{ wgpu::TextureFormat::RGBA8Sint,               wgpu::TextureFormat::Undefined                },  // RGBA8I
+		{ wgpu::TextureFormat::RGBA8Uint,               wgpu::TextureFormat::Undefined                },  // RGBA8U
+		{ wgpu::TextureFormat::RGBA8Snorm,              wgpu::TextureFormat::Undefined                },  // RGBA8S
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // RGBA16
+		{ wgpu::TextureFormat::RGBA16Sint,              wgpu::TextureFormat::Undefined                },  // RGBA16I
+		{ wgpu::TextureFormat::RGBA16Uint,              wgpu::TextureFormat::Undefined                },  // RGBA16U
+		{ wgpu::TextureFormat::RGBA16Float,             wgpu::TextureFormat::Undefined                },  // RGBA16F
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // RGBA16S
+		{ wgpu::TextureFormat::RGBA32Sint,              wgpu::TextureFormat::Undefined                },  // RGBA32I
+		{ wgpu::TextureFormat::RGBA32Uint,              wgpu::TextureFormat::Undefined                },  // RGBA32U
+		{ wgpu::TextureFormat::RGBA32Float,             wgpu::TextureFormat::Undefined                },  // RGBA32F
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // R5G6B5
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // RGBA4
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // RGB5A1
+		{ wgpu::TextureFormat::RGB10A2Unorm,            wgpu::TextureFormat::Undefined                },  // RGB10A2
+		{ wgpu::TextureFormat::RG11B10Float,            wgpu::TextureFormat::Undefined                },  // RG11B10F
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // UnknownDepth
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // D16
+		{ wgpu::TextureFormat::Depth24Plus,             wgpu::TextureFormat::Undefined                },  // D24
+		{ wgpu::TextureFormat::Depth24PlusStencil8,     wgpu::TextureFormat::Undefined                },  // D24S8
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // D32
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // D16F
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // D24F
+		{ wgpu::TextureFormat::Depth32Float,            wgpu::TextureFormat::Undefined                },  // D32F
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // D0S8
+	};
+	BX_STATIC_ASSERT(TextureFormat::Count == BX_COUNTOF(s_textureFormat));
+
+	int32_t s_msaa[] =
+	{
+		 1,
+		 2,
+		 4,
+		 8,
+		16,
+	};
+
+	struct RendererContextWgpu;
+	static RendererContextWgpu* s_renderWgpu;
+
+	static bool s_ignoreError = false;
+
+#if !BX_PLATFORM_EMSCRIPTEN
+	DawnSwapChainImplementation(*createSwapChain)(wgpu::Device device, void* nwh);
+
+#ifdef DAWN_ENABLE_BACKEND_D3D12
+	DawnSwapChainImplementation CreateSwapChainD3D12(wgpu::Device device, void* nwh)
+	{
+		HWND win32Window = (HWND)nwh;
+		return dawn_native::d3d12::CreateNativeSwapChainImpl(device.Get(), win32Window);
+	}
+#endif
+
+#ifdef DAWN_ENABLE_BACKEND_VULKAN
+	DawnSwapChainImplementation CreateSwapChainVulkan(wgpu::Device device, void* nwh)
+	{
+		VkInstance instance = dawn_native::vulkan::GetInstance(device.Get());
+
+		PFN_vkCreateWin32SurfaceKHR vkCreateWin32SurfaceKHR = (PFN_vkCreateWin32SurfaceKHR)dawn_native::vulkan::GetInstanceProcAddr(device.Get(), "vkCreateWin32SurfaceKHR");
+
+		VkSurfaceKHR surface;
+#if BX_PLATFORM_WINDOWS
+		// Copied from renderer_vk.cpp -> needs refactor
+		{
+			VkWin32SurfaceCreateInfoKHR sci;
+			sci.sType = VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR;
+			sci.pNext = NULL;
+			sci.flags = 0;
+			sci.hinstance = (HINSTANCE)GetModuleHandle(NULL);
+			sci.hwnd = (HWND)nwh;
+			VkResult result = vkCreateWin32SurfaceKHR(instance, &sci, NULL, &surface);
+		}
+#endif
+		return dawn_native::vulkan::CreateNativeSwapChainImpl(device.Get(), surface);
+	}
+#endif
+
+#endif
+
+	struct RendererContextWgpu : public RendererContextI
+	{
+		RendererContextWgpu()
+			: m_frameIndex(0)
+			, m_numWindows(0)
+			, m_rtMsaa(false)
+			, m_capture(NULL)
+			, m_captureSize(0)
+		{
+			bx::memSet(&m_windows, 0xff, sizeof(m_windows) );
+		}
+
+		~RendererContextWgpu()
+		{
+		}
+
+		bool init(const Init& _init)
+		{
+			BX_UNUSED(_init);
+			BX_TRACE("Init.");
+
+			if (_init.debug
+			||  _init.profile)
+			{
+				m_renderDocDll = loadRenderDoc();
+			}
+
+			setGraphicsDebuggerPresent(NULL != m_renderDocDll);
+
+			m_fbh.idx = kInvalidHandle;
+			bx::memSet(m_uniforms, 0, sizeof(m_uniforms) );
+			bx::memSet(&m_resolution, 0, sizeof(m_resolution) );
+
+#if !BX_PLATFORM_EMSCRIPTEN
+			// Default to D3D12, Metal, Vulkan, OpenGL in that order as D3D12 and Metal are the preferred on
+			// their respective platforms, and Vulkan is preferred to OpenGL
+#if defined(DAWN_ENABLE_BACKEND_D3D12)
+			static dawn_native::BackendType backendType = dawn_native::BackendType::D3D12;
+#elif defined(DAWN_ENABLE_BACKEND_METAL)
+			static dawn_native::BackendType backendType = dawn_native::BackendType::Metal;
+#elif defined(DAWN_ENABLE_BACKEND_OPENGL)
+			static dawn_native::BackendType backendType = dawn_native::BackendType::OpenGL;
+#elif defined(DAWN_ENABLE_BACKEND_VULKAN)
+			static dawn_native::BackendType backendType = dawn_native::BackendType::Vulkan;
+#else
+#error
+#endif
+
+			if (BX_ENABLED(BGFX_CONFIG_DEBUG))
+			{
+				m_instance.EnableBackendValidation(true);
+			}
+
+			m_instance.DiscoverDefaultAdapters();
+
+			dawn_native::Adapter backendAdapter;
+			std::vector<dawn_native::Adapter> adapters = m_instance.GetAdapters();
+			for (dawn_native::Adapter& adapter : adapters)
+			{
+				if (adapter.GetBackendType() == backendType)
+				{
+					backendAdapter = adapter;
+					break;
+				}
+			}
+
+			//BX_ASSERT(adapterIt != adapters.end());
+
+			WGPUDevice backendDevice = backendAdapter.CreateDevice();
+			DawnProcTable backendProcs = dawn_native::GetProcs();
+
+			using CreateSwapChain = DawnSwapChainImplementation (*)(wgpu::Device device, void* nwh);
+
+#if defined(DAWN_ENABLE_BACKEND_D3D12)
+			createSwapChain = CreateSwapChainD3D12;
+#elif defined(DAWN_ENABLE_BACKEND_METAL)
+			createSwapChain = CreateSwapChainMetal;
+#elif defined(DAWN_ENABLE_BACKEND_NULL)
+			createSwapChain = CreateSwapChainNull;
+#elif defined(DAWN_ENABLE_BACKEND_OPENGL)
+			createSwapChain = CreateSwapChainOpenGL;
+#elif defined(DAWN_ENABLE_BACKEND_VULKAN)
+			createSwapChain = CreateSwapChainVulkan;
+#endif
+
+			// Choose whether to use the backend procs and devices directly, or set up the wire.
+			WGPUDevice cDevice = backendDevice;
+			DawnProcTable procs = backendProcs;
+			dawnProcSetProcs(&procs);
+
+			m_device = wgpu::Device::Acquire(cDevice);
+#else
+			m_device = wgpu::Device(emscripten_webgpu_get_device());
+#endif
+
+			auto PrintDeviceError = [](WGPUErrorType errorType, const char* message, void*) {
+				BX_UNUSED(errorType);
+
+				if (s_ignoreError)
+				{
+					BX_TRACE("Device error: %s", message);
+				}
+				else
+				{
+					BX_CHECK(false, "Device error: %s", message);
+				}
+				s_ignoreError = false;
+			};
+
+			m_device.SetUncapturedErrorCallback(PrintDeviceError, nullptr);
+
+			if (!m_device)
+			{
+				BX_WARN(!m_device, "Unable to create WebGPU device.");
+				return false;
+			}
+
+			bool success = m_mainFrameBuffer.create(
+				  0
+				, g_platformData.nwh
+				, _init.resolution.width
+				, _init.resolution.height
+				, TextureFormat::Unknown
+				, TextureFormat::UnknownDepth
+				);
+			m_numWindows = 1;
+
+			if (!success)
+			{
+				return false;
+			}
+
+			m_queue = m_device.GetDefaultQueue();
+
+			m_cmd.init(m_queue);
+			//BGFX_FATAL(NULL != m_cmd.m_commandQueue, Fatal::UnableToInitialize, "Unable to create Metal device.");
+
+			for (uint8_t ii = 0; ii < WEBGPU_MAX_FRAMES_IN_FLIGHT; ++ii)
+			{
+				BX_TRACE("Create scratch buffer %d", ii);
+				m_scratchBuffers[ii].create(BGFX_CONFIG_MAX_DRAW_CALLS * 128);
+				m_bindStateCache[ii].create(); // (1024);
+			}
+
+			for (uint8_t ii = 0; ii < WEBGPU_NUM_UNIFORM_BUFFERS; ++ii)
+			{
+				bool mapped = true; // ii == WEBGPU_NUM_UNIFORM_BUFFERS - 1;
+				m_uniformBuffers[ii].create(BGFX_CONFIG_MAX_DRAW_CALLS * 128, mapped);
+			}
+
+			g_caps.supported |= (0
+				| BGFX_CAPS_ALPHA_TO_COVERAGE
+				| BGFX_CAPS_BLEND_INDEPENDENT
+				| BGFX_CAPS_FRAGMENT_DEPTH
+				| BGFX_CAPS_INDEX32
+				| BGFX_CAPS_INSTANCING
+			//	| BGFX_CAPS_OCCLUSION_QUERY
+				| BGFX_CAPS_SWAP_CHAIN
+				| BGFX_CAPS_TEXTURE_2D_ARRAY
+			//	| BGFX_CAPS_TEXTURE_3D
+				| BGFX_CAPS_TEXTURE_BLIT
+				| BGFX_CAPS_TEXTURE_COMPARE_ALL
+				| BGFX_CAPS_TEXTURE_COMPARE_LEQUAL
+				| BGFX_CAPS_TEXTURE_READ_BACK
+				| BGFX_CAPS_VERTEX_ATTRIB_HALF
+				| BGFX_CAPS_VERTEX_ATTRIB_UINT10
+				| BGFX_CAPS_COMPUTE
+				);
+
+			g_caps.limits.maxTextureSize   = 8192;
+			g_caps.limits.maxFBAttachments = 4;
+			g_caps.supported |= BGFX_CAPS_TEXTURE_CUBE_ARRAY;
+			g_caps.supported |= BGFX_CAPS_DRAW_INDIRECT;
+
+			g_caps.limits.maxTextureLayers = 2048;
+			g_caps.limits.maxVertexStreams = BGFX_CONFIG_MAX_VERTEX_STREAMS;
+			// Maximum number of entries in the buffer argument table, per graphics or compute function are 31.
+			// It is decremented by 1 because 1 entry is used for uniforms.
+			g_caps.limits.maxComputeBindings = bx::uint32_min(30, BGFX_MAX_COMPUTE_BINDINGS);
+
+			for (uint32_t ii = 0; ii < TextureFormat::Count; ++ii)
+			{
+				uint16_t support = 0;
+
+				support |= wgpu::TextureFormat::Undefined != s_textureFormat[ii].m_fmt
+					? BGFX_CAPS_FORMAT_TEXTURE_2D
+					| BGFX_CAPS_FORMAT_TEXTURE_3D
+					| BGFX_CAPS_FORMAT_TEXTURE_CUBE
+					| BGFX_CAPS_FORMAT_TEXTURE_VERTEX
+					: BGFX_CAPS_FORMAT_TEXTURE_NONE
+					;
+
+				support |= wgpu::TextureFormat::Undefined != s_textureFormat[ii].m_fmtSrgb
+					? BGFX_CAPS_FORMAT_TEXTURE_2D_SRGB
+					| BGFX_CAPS_FORMAT_TEXTURE_3D_SRGB
+					| BGFX_CAPS_FORMAT_TEXTURE_CUBE_SRGB
+					| BGFX_CAPS_FORMAT_TEXTURE_VERTEX
+					: BGFX_CAPS_FORMAT_TEXTURE_NONE
+					;
+
+				if (!bimg::isCompressed(bimg::TextureFormat::Enum(ii) ) )
+				{
+					support |= 0
+						| BGFX_CAPS_FORMAT_TEXTURE_FRAMEBUFFER
+					//	| BGFX_CAPS_FORMAT_TEXTURE_FRAMEBUFFER_MSAA
+						;
+				}
+
+				g_caps.formats[ii] = support;
+			}
+
+			g_caps.formats[TextureFormat::A8     ] &= ~(BGFX_CAPS_FORMAT_TEXTURE_FRAMEBUFFER | BGFX_CAPS_FORMAT_TEXTURE_FRAMEBUFFER_MSAA);
+			g_caps.formats[TextureFormat::RG32I  ] &= ~(BGFX_CAPS_FORMAT_TEXTURE_FRAMEBUFFER_MSAA);
+			g_caps.formats[TextureFormat::RG32U  ] &= ~(BGFX_CAPS_FORMAT_TEXTURE_FRAMEBUFFER_MSAA);
+			g_caps.formats[TextureFormat::RGBA32I] &= ~(BGFX_CAPS_FORMAT_TEXTURE_FRAMEBUFFER_MSAA);
+			g_caps.formats[TextureFormat::RGBA32U] &= ~(BGFX_CAPS_FORMAT_TEXTURE_FRAMEBUFFER_MSAA);
+
+			g_caps.formats[TextureFormat::ETC2  ] =
+			g_caps.formats[TextureFormat::ETC2A ] =
+			g_caps.formats[TextureFormat::ETC2A1] =
+			g_caps.formats[TextureFormat::PTC12 ] =
+			g_caps.formats[TextureFormat::PTC14 ] =
+			g_caps.formats[TextureFormat::PTC12A] =
+			g_caps.formats[TextureFormat::PTC14A] =
+			g_caps.formats[TextureFormat::R5G6B5] =
+			g_caps.formats[TextureFormat::RGBA4 ] =
+			g_caps.formats[TextureFormat::RGB5A1] = BGFX_CAPS_FORMAT_TEXTURE_NONE;
+
+			g_caps.formats[TextureFormat::RGB9E5F] &= ~(BGFX_CAPS_FORMAT_TEXTURE_FRAMEBUFFER | BGFX_CAPS_FORMAT_TEXTURE_FRAMEBUFFER_MSAA);
+
+			// disable compressed formats
+			for (uint32_t ii = 0; ii < TextureFormat::Unknown; ++ii)
+			{
+				s_textureFormat[ii].m_fmt = wgpu::TextureFormat::Undefined;
+			}
+
+			for (uint32_t ii = 0; ii < TextureFormat::Count; ++ii)
+			{
+				if (BGFX_CAPS_FORMAT_TEXTURE_NONE == g_caps.formats[ii])
+				{
+					s_textureFormat[ii].m_fmt = wgpu::TextureFormat::Undefined;
+					s_textureFormat[ii].m_fmtSrgb = wgpu::TextureFormat::Undefined;
+				}
+			}
+
+			for (uint32_t ii = 1, last = 0; ii < BX_COUNTOF(s_msaa); ++ii)
+			{
+				// TODO (hugoam)
+				//const int32_t sampleCount = 1; //1<<ii;
+				//if (m_device.supportsTextureSampleCount(sampleCount) )
+				//{
+				//	s_msaa[ii] = sampleCount;
+				//	last = ii;
+				//}
+				//else
+				{
+					s_msaa[ii] = s_msaa[last];
+				}
+			}
+
+			// Init reserved part of view name.
+			for (uint32_t ii = 0; ii < BGFX_CONFIG_MAX_VIEWS; ++ii)
+			{
+				bx::snprintf(s_viewName[ii], BGFX_CONFIG_MAX_VIEW_NAME_RESERVED+1, "%3d   ", ii);
+			}
+
+			m_gpuTimer.init();
+
+			g_internalData.context = &m_device;
+
+			return true;
+		}
+
+		void shutdown()
+		{
+			m_gpuTimer.shutdown();
+
+			m_pipelineStateCache.invalidate();
+
+			for (uint32_t ii = 0; ii < BX_COUNTOF(m_shaders); ++ii)
+			{
+				m_shaders[ii].destroy();
+			}
+
+			for (uint32_t ii = 0; ii < BX_COUNTOF(m_textures); ++ii)
+			{
+				m_textures[ii].destroy();
+			}
+
+			captureFinish();
+
+			m_mainFrameBuffer.destroy();
+
+			for (uint32_t ii = 0; ii < BX_COUNTOF(m_scratchBuffers); ++ii)
+			{
+				m_scratchBuffers[ii].destroy();
+			}
+
+			m_cmd.shutdown();
+		}
+
+		RendererType::Enum getRendererType() const override
+		{
+			return RendererType::WebGPU;
+		}
+
+		const char* getRendererName() const override
+		{
+			return BGFX_RENDERER_WEBGPU_NAME;
+		}
+
+		void createIndexBuffer(IndexBufferHandle _handle, const Memory* _mem, uint16_t _flags) override
+		{
+			m_indexBuffers[_handle.idx].create(_mem->size, _mem->data, _flags);
+		}
+
+		void destroyIndexBuffer(IndexBufferHandle _handle) override
+		{
+			m_indexBuffers[_handle.idx].destroy();
+		}
+
+		void createVertexLayout(VertexLayoutHandle _handle, const VertexLayout& _decl) override
+		{
+			VertexLayout& decl = m_vertexDecls[_handle.idx];
+			bx::memCopy(&decl, &_decl, sizeof(VertexLayout) );
+			dump(decl);
+		}
+
+		void destroyVertexLayout(VertexLayoutHandle /*_handle*/) override
+		{
+		}
+
+		void createVertexBuffer(VertexBufferHandle _handle, const Memory* _mem, VertexLayoutHandle _declHandle, uint16_t _flags) override
+		{
+			m_vertexBuffers[_handle.idx].create(_mem->size, _mem->data, _declHandle, _flags);
+		}
+
+		void destroyVertexBuffer(VertexBufferHandle _handle) override
+		{
+			m_vertexBuffers[_handle.idx].destroy();
+		}
+
+		void createDynamicIndexBuffer(IndexBufferHandle _handle, uint32_t _size, uint16_t _flags) override
+		{
+			m_indexBuffers[_handle.idx].create(_size, NULL, _flags);
+		}
+
+		void updateDynamicIndexBuffer(IndexBufferHandle _handle, uint32_t _offset, uint32_t _size, const Memory* _mem) override
+		{
+			m_indexBuffers[_handle.idx].update(_offset, bx::uint32_min(_size, _mem->size), _mem->data);
+		}
+
+		void destroyDynamicIndexBuffer(IndexBufferHandle _handle) override
+		{
+			m_indexBuffers[_handle.idx].destroy();
+		}
+
+		void createDynamicVertexBuffer(VertexBufferHandle _handle, uint32_t _size, uint16_t _flags) override
+		{
+			VertexLayoutHandle decl = BGFX_INVALID_HANDLE;
+			m_vertexBuffers[_handle.idx].create(_size, NULL, decl, _flags);
+		}
+
+		void updateDynamicVertexBuffer(VertexBufferHandle _handle, uint32_t _offset, uint32_t _size, const Memory* _mem) override
+		{
+			m_vertexBuffers[_handle.idx].update(_offset, bx::uint32_min(_size, _mem->size), _mem->data);
+		}
+
+		void destroyDynamicVertexBuffer(VertexBufferHandle _handle) override
+		{
+			m_vertexBuffers[_handle.idx].destroy();
+		}
+
+		void createShader(ShaderHandle _handle, const Memory* _mem) override
+		{
+			m_shaders[_handle.idx].create(_handle, _mem);
+		}
+
+		void destroyShader(ShaderHandle _handle) override
+		{
+			m_shaders[_handle.idx].destroy();
+		}
+
+		void createProgram(ProgramHandle _handle, ShaderHandle _vsh, ShaderHandle _fsh) override
+		{
+			m_program[_handle.idx].create(&m_shaders[_vsh.idx], isValid(_fsh) ? &m_shaders[_fsh.idx] : NULL);
+		}
+
+		void destroyProgram(ProgramHandle _handle) override
+		{
+			m_program[_handle.idx].destroy();
+		}
+
+		void* createTexture(TextureHandle _handle, const Memory* _mem, uint64_t _flags, uint8_t _skip) override
+		{
+			m_textures[_handle.idx].create(_handle, _mem, _flags, _skip);
+			return NULL;
+		}
+
+		void updateTextureBegin(TextureHandle /*_handle*/, uint8_t /*_side*/, uint8_t /*_mip*/) override
+		{
+		}
+
+		void updateTexture(TextureHandle _handle, uint8_t _side, uint8_t _mip, const Rect& _rect, uint16_t _z, uint16_t _depth, uint16_t _pitch, const Memory* _mem) override
+		{
+			m_textures[_handle.idx].update(_side, _mip, _rect, _z, _depth, _pitch, _mem);
+		}
+
+		void updateTextureEnd() override
+		{
+		}
+
+		void readback(ReadbackWgpu& readback,  const TextureWgpu& texture, void* _data)
+		{
+			m_cmd.kick(false, true);
+			m_cmd.begin();
+
+			if (readback.m_mapped)
+				return;
+
+			BX_CHECK(readback.m_mip<texture.m_numMips,"Invalid mip: %d num mips:", readback.m_mip,texture.m_numMips);
+
+			uint32_t srcWidth  = bx::uint32_max(1, texture.m_width  >> readback.m_mip);
+			uint32_t srcHeight = bx::uint32_max(1, texture.m_height >> readback.m_mip);
+
+			const uint32_t bpp = bimg::getBitsPerPixel(bimg::TextureFormat::Enum(texture.m_textureFormat));
+			const uint32_t pitch = srcWidth * bpp / 8;
+
+			const uint32_t dstpitch = bx::strideAlign(pitch, kMinBufferOffsetAlignment);
+
+			// TODO move inside ReadbackWgpu::create
+			if (!readback.m_buffer)
+			{
+				wgpu::BufferDescriptor desc;
+				desc.size = dstpitch * srcHeight;
+				desc.usage = wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::MapRead;
+
+				readback.m_buffer = m_device.CreateBuffer(&desc);
+			}
+
+			wgpu::TextureCopyView textureCopyView;
+			textureCopyView.texture = texture.m_ptr;
+			textureCopyView.origin = { 0, 0, 0 };
+
+			wgpu::BufferCopyView bufferCopyView;
+			bufferCopyView.buffer = readback.m_buffer;
+			bufferCopyView.bytesPerRow = dstpitch;
+			bufferCopyView.rowsPerImage = srcHeight;
+
+			wgpu::Extent3D extent3D = { srcWidth, srcHeight, 1 };
+			m_cmd.m_encoder.CopyTextureToBuffer(&textureCopyView, &bufferCopyView, &extent3D);
+
+			auto finish = [](WGPUBufferMapAsyncStatus status, void const* data, uint64_t dataLength, void* userdata)
+			{
+				if(status == WGPUBufferMapAsyncStatus_Success)
+					static_cast<ReadbackWgpu*>(userdata)->readback(data, dataLength);
+			};
+
+			m_cmd.finish();
+
+			m_cmd.kick(true);
+
+			readback.m_mapped = true;
+			readback.m_data = _data;
+			readback.m_size = pitch * srcHeight;
+
+			readback.m_buffer.MapReadAsync(finish, &readback);
+		}
+
+		void readTexture(TextureHandle _handle, void* _data, uint8_t _mip) override
+		{
+			TextureWgpu& texture = m_textures[_handle.idx];
+
+			readback(texture.m_readback, texture, _data);
+		}
+
+		void resizeTexture(TextureHandle _handle, uint16_t _width, uint16_t _height, uint8_t _numMips, uint16_t _numLayers) override
+		{
+			TextureWgpu& texture = m_textures[_handle.idx];
+
+			uint32_t size = sizeof(uint32_t) + sizeof(TextureCreate);
+			const Memory* mem = alloc(size);
+
+			bx::StaticMemoryBlockWriter writer(mem->data, mem->size);
+			uint32_t magic = BGFX_CHUNK_MAGIC_TEX;
+			bx::write(&writer, magic);
+
+			TextureCreate tc;
+			tc.m_width     = _width;
+			tc.m_height    = _height;
+			tc.m_depth     = 0;
+			tc.m_numLayers = _numLayers;
+			tc.m_numMips   = _numMips;
+			tc.m_format    = TextureFormat::Enum(texture.m_requestedFormat);
+			tc.m_cubeMap   = false;
+			tc.m_mem       = NULL;
+			bx::write(&writer, tc);
+
+			texture.destroy();
+			texture.create(_handle, mem, texture.m_flags, 0);
+
+			release(mem);
+		}
+
+		void overrideInternal(TextureHandle _handle, uintptr_t _ptr) override
+		{
+			BX_UNUSED(_handle, _ptr);
+		}
+
+		uintptr_t getInternal(TextureHandle _handle) override
+		{
+			BX_UNUSED(_handle);
+			return 0;
+		}
+
+		void destroyTexture(TextureHandle _handle) override
+		{
+			m_textures[_handle.idx].destroy();
+		}
+
+		void createFrameBuffer(FrameBufferHandle _handle, uint8_t _num, const Attachment* _attachment) override
+		{
+			m_frameBuffers[_handle.idx].create(_num, _attachment);
+		}
+
+		void createFrameBuffer(FrameBufferHandle _handle, void* _nwh, uint32_t _width, uint32_t _height, TextureFormat::Enum _format, TextureFormat::Enum _depthFormat) override
+		{
+			for (uint32_t ii = 0, num = m_numWindows; ii < num; ++ii)
+			{
+				FrameBufferHandle handle = m_windows[ii];
+				if (isValid(handle)
+				&&  m_frameBuffers[handle.idx].m_nwh == _nwh)
+				{
+					destroyFrameBuffer(handle);
+				}
+			}
+
+			uint16_t denseIdx   = m_numWindows++;
+			m_windows[denseIdx] = _handle;
+
+			FrameBufferWgpu& fb = m_frameBuffers[_handle.idx];
+			fb.create(denseIdx, _nwh, _width, _height, _format, _depthFormat);
+			fb.m_swapChain->resize(m_frameBuffers[_handle.idx], _width, _height, 0);
+		}
+
+		void destroyFrameBuffer(FrameBufferHandle _handle) override
+		{
+			uint16_t denseIdx = m_frameBuffers[_handle.idx].destroy();
+
+			if (UINT16_MAX != denseIdx)
+			{
+				--m_numWindows;
+
+				if (m_numWindows > 1)
+				{
+					FrameBufferHandle handle = m_windows[m_numWindows];
+					m_windows[m_numWindows]  = {kInvalidHandle};
+
+					if (m_numWindows != denseIdx)
+					{
+						m_windows[denseIdx] = handle;
+						m_frameBuffers[handle.idx].m_denseIdx = denseIdx;
+					}
+				}
+			}
+		}
+
+		void createUniform(UniformHandle _handle, UniformType::Enum _type, uint16_t _num, const char* _name) override
+		{
+			if (NULL != m_uniforms[_handle.idx])
+			{
+				BX_FREE(g_allocator, m_uniforms[_handle.idx]);
+			}
+
+			uint32_t size = bx::alignUp(g_uniformTypeSize[_type]*_num, 16);
+			void* data = BX_ALLOC(g_allocator, size);
+			bx::memSet(data, 0, size);
+			m_uniforms[_handle.idx] = data;
+			m_uniformReg.add(_handle, _name);
+		}
+
+		void destroyUniform(UniformHandle _handle) override
+		{
+			BX_FREE(g_allocator, m_uniforms[_handle.idx]);
+			m_uniforms[_handle.idx] = NULL;
+			m_uniformReg.remove(_handle);
+		}
+
+		void requestScreenShotPre(const char* _filePath)
+		{
+			BX_UNUSED(_filePath);
+			//m_saveScreenshot = true;
+		}
+
+		void requestScreenShot(FrameBufferHandle _handle, const char* _filePath) override
+		{
+			BX_UNUSED(_handle); BX_UNUSED(_filePath);
+		}
+
+		void updateViewName(ViewId _id, const char* _name) override
+		{
+			bx::strCopy(
+				  &s_viewName[_id][BGFX_CONFIG_MAX_VIEW_NAME_RESERVED]
+				, BX_COUNTOF(s_viewName[0])-BGFX_CONFIG_MAX_VIEW_NAME_RESERVED
+				, _name
+				);
+		}
+
+		void updateUniform(uint16_t _loc, const void* _data, uint32_t _size) override
+		{
+			bx::memCopy(m_uniforms[_loc], _data, _size);
+		}
+
+		void invalidateOcclusionQuery(OcclusionQueryHandle _handle) override
+		{
+			BX_UNUSED(_handle);
+		}
+
+		void setMarker(const char* _marker, uint16_t _len) override
+		{
+			BX_UNUSED(_len);
+
+			if (BX_ENABLED(BGFX_CONFIG_DEBUG_ANNOTATION) )
+			{
+				m_renderEncoder.InsertDebugMarker(_marker);
+			}
+		}
+
+		virtual void setName(Handle _handle, const char* _name, uint16_t _len) override
+		{
+			BX_UNUSED(_handle); BX_UNUSED(_name); BX_UNUSED(_len);
+			BX_UNUSED(_len);
+
+			switch (_handle.type)
+			{
+			case Handle::IndexBuffer:
+				m_indexBuffers[_handle.idx].m_label.clear();
+				m_indexBuffers[_handle.idx].m_label.append(_name);
+				break;
+
+			case Handle::Shader:
+				m_shaders[_handle.idx].m_label.clear();
+				m_shaders[_handle.idx].m_label.append(_name);
+				break;
+
+			case Handle::Texture:
+				m_textures[_handle.idx].m_label.clear();
+				m_textures[_handle.idx].m_label.append(_name);
+				break;
+
+			case Handle::VertexBuffer:
+				m_vertexBuffers[_handle.idx].m_label.clear();
+				m_vertexBuffers[_handle.idx].m_label.append(_name);
+				break;
+
+			default:
+				BX_CHECK(false, "Invalid handle type?! %d", _handle.type);
+				break;
+			}
+		}
+
+		void submitBlit(BlitState& _bs, uint16_t _view);
+
+		void submit(Frame* _render, ClearQuad& _clearQuad, TextVideoMemBlitter& _textVideoMemBlitter) override;
+
+		void blitSetup(TextVideoMemBlitter& _blitter) override
+		{
+			BX_UNUSED(_blitter);
+		}
+
+		void blitRender(TextVideoMemBlitter& _blitter, uint32_t _numIndices) override
+		{
+			const uint32_t numVertices = _numIndices*4/6;
+			if (0 < numVertices)
+			{
+				m_indexBuffers [_blitter.m_ib->handle.idx].update(
+					  0
+					, bx::strideAlign(_numIndices*2, 4)
+					, _blitter.m_ib->data
+					, true
+					);
+				m_vertexBuffers[_blitter.m_vb->handle.idx].update(
+					  0
+					, numVertices*_blitter.m_layout.m_stride
+					, _blitter.m_vb->data
+					, true
+					);
+
+				endEncoding();
+
+				uint32_t width  = m_resolution.width;
+				uint32_t height = m_resolution.height;
+
+				FrameBufferHandle fbh = BGFX_INVALID_HANDLE;
+
+				uint64_t state = 0
+				| BGFX_STATE_WRITE_RGB
+				| BGFX_STATE_WRITE_A
+				| BGFX_STATE_DEPTH_TEST_ALWAYS
+				;
+
+				PipelineStateWgpu* pso = getPipelineState(
+														 state
+														 , 0
+														 , 0
+														 , fbh
+														 , _blitter.m_vb->layoutHandle
+														 , false
+														 , _blitter.m_program
+														 , 0
+														 );
+
+				RenderPassDescriptor renderPassDescriptor;
+				wgpu::RenderPassColorAttachmentDescriptor& color = renderPassDescriptor.colorAttachments[0];
+
+				setFrameBuffer(renderPassDescriptor, fbh);
+
+				color.loadOp = wgpu::LoadOp::Load;
+				color.storeOp = wgpu::StoreOp::Store;
+				//	NULL != renderPassDescriptor.colorAttachments[0].resolveTexture
+				//	? wgpu::StoreOp::MultisampleResolve
+				//	: wgpu::StoreOp::Store
+				//;
+
+				wgpu::RenderPassEncoder rce = m_cmd.m_encoder.BeginRenderPass(&renderPassDescriptor.desc);
+				m_renderEncoder = rce;
+
+				rce.SetViewport(0.0f, 0.0f, (float)width, (float)height, 0.0f, 1.0f);
+
+				rce.SetPipeline(pso->m_rps);
+
+				ProgramWgpu& program = m_program[_blitter.m_program.idx];
+
+				ScratchBufferWgpu& scratchBuffer = m_scratchBuffers[0];
+				BindStateCacheWgpu& bindStates = m_bindStateCache[0];
+
+				float proj[16];
+				bx::mtxOrtho(proj, 0.0f, (float)width, (float)height, 0.0f, 0.0f, 1000.0f, 0.0f, false);
+
+				PredefinedUniform& predefined = program.m_predefined[0];
+				uint8_t flags = predefined.m_type;
+				setShaderUniform(flags, predefined.m_loc, proj, 4);
+
+				BX_CHECK(program.m_vsh->m_size > 0, "Not supposed to happen");
+				const uint32_t voffset = scratchBuffer.write(m_vsScratch, program.m_vsh->m_gpuSize);
+
+				const uint32_t fsize = (NULL != program.m_fsh ? program.m_fsh->m_gpuSize : 0);
+				BX_CHECK(fsize == 0, "Not supposed to happen");
+
+				TextureWgpu& texture = m_textures[_blitter.m_texture.idx];
+
+				BindingsWgpu b;
+
+				BindStateWgpu& bindState = allocBindState(program, bindStates, b, scratchBuffer);
+
+				wgpu::BindGroupEntry& textureEntry = b.m_entries[b.numEntries++];
+				textureEntry.binding = program.m_textures[0].binding;
+				textureEntry.textureView = texture.m_ptr.CreateView();
+
+				wgpu::BindGroupEntry& samplerEntry = b.m_entries[b.numEntries++];
+				samplerEntry.binding = program.m_samplers[0].binding;
+				samplerEntry.sampler = 0 == (BGFX_SAMPLER_INTERNAL_DEFAULT & state)
+					? getSamplerState(state)
+					: texture.m_sampler;
+
+				bindGroups(program, bindState, b);
+
+				uint32_t numOffset = 1;
+				uint32_t offsets[1] = { voffset };
+
+				bindProgram(rce, program, bindState, numOffset, offsets);
+
+				VertexBufferWgpu& vb = m_vertexBuffers[_blitter.m_vb->handle.idx];
+				rce.SetVertexBuffer(0, vb.m_ptr);
+
+				rce.SetIndexBuffer(m_indexBuffers[_blitter.m_ib->handle.idx].m_ptr);
+				rce.DrawIndexed(_numIndices, 1, 0, 0, 0);
+			}
+		}
+
+		bool isDeviceRemoved() override
+		{
+			return false;
+		}
+
+		void flip() override
+		{
+			for (uint32_t ii = 0, num = m_numWindows; ii < num; ++ii)
+			{
+				FrameBufferWgpu& frameBuffer = ii == 0 ? m_mainFrameBuffer : m_frameBuffers[m_windows[ii].idx];
+				if (NULL != frameBuffer.m_swapChain)
+				//&& frameBuffer.m_swapChain->m_drawable)
+				{
+					SwapChainWgpu& swapChain = *frameBuffer.m_swapChain;
+					swapChain.flip();
+				}
+			}
+
+			m_cmd.m_encoder = nullptr;
+		}
+
+		void updateResolution(const Resolution& _resolution)
+		{
+			m_resolution = _resolution;
+			return; // TODO (hugoam)
+
+			m_mainFrameBuffer.m_swapChain->m_maxAnisotropy = !!(_resolution.reset & BGFX_RESET_MAXANISOTROPY)
+				? 16
+				: 1
+				;
+
+			const uint32_t maskFlags = ~(0
+				| BGFX_RESET_MAXANISOTROPY
+				| BGFX_RESET_DEPTH_CLAMP
+				| BGFX_RESET_SUSPEND
+				);
+
+			if (m_resolution.width            !=  _resolution.width
+			||  m_resolution.height           !=  _resolution.height
+			|| (m_resolution.reset&maskFlags) != (_resolution.reset&maskFlags) )
+			{
+				wgpu::TextureFormat prevMetalLayerPixelFormat; // = m_mainFrameBuffer.m_swapChain->m_metalLayer.pixelFormat;
+				BX_UNUSED(prevMetalLayerPixelFormat);
+
+				m_resolution = _resolution;
+				m_resolution.reset &= ~BGFX_RESET_INTERNAL_FORCE;
+
+				m_mainFrameBuffer.m_swapChain->resize(m_mainFrameBuffer, _resolution.width, _resolution.height, _resolution.reset);
+
+				for (uint32_t ii = 0; ii < BX_COUNTOF(m_frameBuffers); ++ii)
+				{
+					m_frameBuffers[ii].postReset();
+				}
+
+				updateCapture();
+
+				m_textVideoMem.resize(false, _resolution.width, _resolution.height);
+				m_textVideoMem.clear();
+
+				//if (prevMetalLayerPixelFormat != m_mainFrameBuffer.m_swapChain->m_metalLayer.pixelFormat)
+				{
+					//MTL_RELEASE(m_screenshotBlitRenderPipelineState)
+					//reset(m_renderPipelineDescriptor);
+
+					//m_renderPipelineDescriptor.colorAttachments[0].pixelFormat = m_mainFrameBuffer.m_swapChain->m_metalLayer.pixelFormat;
+					//m_renderPipelineDescriptor.vertexFunction   = m_screenshotBlitProgram.m_vsh->m_function;
+					//m_renderPipelineDescriptor.fragmentFunction = m_screenshotBlitProgram.m_fsh->m_function;
+					//m_screenshotBlitRenderPipelineState = m_device.newRenderPipelineStateWithDescriptor(m_renderPipelineDescriptor);
+				}
+			}
+		}
+
+		void invalidateCompute()
+		{
+			if (m_computeEncoder)
+			{
+				m_computeEncoder.EndPass();
+				m_computeEncoder = NULL;
+			}
+		}
+
+		void updateCapture()
+		{
+		}
+
+		void capture()
+		{
+		}
+
+		void captureFinish()
+		{
+		}
+
+		BindStateWgpu& allocBindState(const ProgramWgpu& program, BindStateCacheWgpu& bindStates, BindingsWgpu& bindings, ScratchBufferWgpu& scratchBuffer)
+		{
+			BindStateWgpu& bindState = bindStates.m_bindStates[bindStates.m_currentBindState];
+			bindStates.m_currentBindState++;
+
+			bindState.numOffset = program.m_numUniforms;
+
+			// first two bindings are always uniform buffer (vertex/fragment)
+			bindings.m_entries[0].binding = 0;
+			bindings.m_entries[0].offset = 0;
+			bindings.m_entries[0].size = program.m_vsh->m_gpuSize;
+			bindings.m_entries[0].buffer = scratchBuffer.m_buffer;
+			bindings.numEntries++;
+
+			if (NULL != program.m_fsh
+			&& 0 < program.m_fsh->m_gpuSize)
+			{
+				bindings.m_entries[1].binding = 48;
+				bindings.m_entries[1].offset = 0;
+				bindings.m_entries[1].size = program.m_fsh->m_gpuSize;
+				bindings.m_entries[1].buffer = scratchBuffer.m_buffer;
+				bindings.numEntries++;
+			}
+
+			return bindState;
+		}
+
+		void bindGroups(const ProgramWgpu& program, BindStateWgpu& bindState, BindingsWgpu& bindings)
+		{
+			wgpu::BindGroupDescriptor bindGroupDesc;
+			bindGroupDesc.layout = program.m_bindGroupLayout;
+			bindGroupDesc.entryCount = bindings.numEntries;
+			bindGroupDesc.entries = bindings.m_entries;
+
+			bindState.m_bindGroup = m_device.CreateBindGroup(&bindGroupDesc);
+		}
+
+		template <class Encoder>
+		void bindProgram(Encoder& encoder, const ProgramWgpu& program, BindStateWgpu& bindState, uint32_t numOffset, uint32_t* offsets)
+		{
+			BX_CHECK(bindState.numOffset == numOffset, "We're obviously doing something wrong");
+			encoder.SetBindGroup(0, bindState.m_bindGroup, numOffset, offsets);
+		}
+		
+		BindStateWgpu& allocAndFillBindState(const ProgramWgpu& program, BindStateCacheWgpu& bindStates, ScratchBufferWgpu& scratchBuffer, const RenderBind& renderBind)
+		{
+			BindingsWgpu b;
+
+			BindStateWgpu& bindState = allocBindState(program, bindStates, b, scratchBuffer);
+
+			for (uint8_t stage = 0; stage < BGFX_CONFIG_MAX_TEXTURE_SAMPLERS; ++stage)
+			{
+				const Binding& bind = renderBind.m_bind[stage];
+				const BindInfo& bindInfo = program.m_bindInfo[stage];
+
+				bool isUsed = isValid(program.m_bindInfo[stage].m_uniform);
+
+				BX_CHECK(!isUsed || kInvalidHandle != bind.m_idx, "All expected bindings must be bound with WebGPU");
+
+				if (kInvalidHandle != bind.m_idx)
+				{
+					switch (bind.m_type)
+					{
+					case Binding::Image:
+					{
+						TextureWgpu& texture = m_textures[bind.m_idx];
+						wgpu::BindGroupEntry& entry = b.m_entries[b.numEntries++];
+						entry.binding = bindInfo.m_binding;
+						entry.textureView = texture.getTextureMipLevel(bind.m_mip);
+					}
+					break;
+
+					case Binding::Texture:
+					{
+						// apparently bgfx allows to set a texture to a stage that a program does not even use
+						if (isUsed)
+						{
+							TextureWgpu& texture = m_textures[bind.m_idx];
+							uint32_t flags = bind.m_samplerFlags;
+
+							wgpu::TextureViewDescriptor viewDesc = defaultDescriptor<wgpu::TextureViewDescriptor>();
+							viewDesc.dimension = program.m_textures[bindInfo.m_index].viewDimension;
+
+							wgpu::BindGroupEntry& textureEntry = b.m_entries[b.numEntries++];
+							textureEntry.binding = bindInfo.m_binding;
+							//textureEntry.textureView = texture.m_ptr.CreateView();
+							textureEntry.textureView = texture.m_ptr.CreateView(&viewDesc);
+
+							wgpu::BindGroupEntry& samplerEntry = b.m_entries[b.numEntries++];
+							samplerEntry.binding = bindInfo.m_binding + 16;
+							samplerEntry.sampler = 0 == (BGFX_SAMPLER_INTERNAL_DEFAULT & flags)
+								? getSamplerState(flags)
+								: texture.m_sampler;
+						}
+					}
+					break;
+
+					case Binding::IndexBuffer:
+					case Binding::VertexBuffer:
+					{
+						const BufferWgpu& buffer = Binding::IndexBuffer == bind.m_type
+							? m_indexBuffers[bind.m_idx]
+							: m_vertexBuffers[bind.m_idx]
+							;
+
+						wgpu::BindGroupEntry& entry = b.m_entries[b.numEntries++];
+						entry.binding = bindInfo.m_binding;
+						entry.offset = 0;
+						entry.size = buffer.m_size;
+						entry.buffer = buffer.m_ptr;
+					}
+					break;
+					}
+				}
+			}
+
+			bindGroups(program, bindState, b);
+
+			return bindState;
+		};
+
+		void setShaderUniform(uint8_t _flags, uint32_t _regIndex, const void* _val, uint32_t _numRegs)
+		{
+			if(_flags&BGFX_UNIFORM_FRAGMENTBIT)
+			{
+				bx::memCopy(&m_fsScratch[_regIndex], _val, _numRegs * 16);
+			}
+			else
+			{
+				bx::memCopy(&m_vsScratch[_regIndex], _val, _numRegs * 16);
+			}
+		}
+
+		void setShaderUniform4f(uint8_t _flags, uint32_t _loc, const void* _val, uint32_t _numRegs)
+		{
+			setShaderUniform(_flags, _loc, _val, _numRegs);
+		}
+
+		void setShaderUniform4x4f(uint8_t _flags, uint32_t _loc, const void* _val, uint32_t _numRegs)
+		{
+			setShaderUniform(_flags, _loc, _val, _numRegs);
+		}
+
+		void commitShaderConstants(ScratchBufferWgpu& _scratchBuffer, const ProgramWgpu& _program, uint32_t _vertexOffset, uint32_t _fragmentOffset)
+		{
+			const uint32_t size = _program.m_vsh->m_size;
+			if (0 != size)
+				_scratchBuffer.write(m_vsScratch, size);
+
+			if(NULL != _program.m_fsh)
+			{
+				const uint32_t size = _program.m_fsh->m_size;
+				if(0 != size)
+					_scratchBuffer.write(m_fsScratch, size);
+			}
+		}
+
+		void commit(UniformBuffer& _uniformBuffer)
+		{
+			_uniformBuffer.reset();
+
+			for (;;)
+			{
+				uint32_t opcode = _uniformBuffer.read();
+
+				if (UniformType::End == opcode)
+				{
+					break;
+				}
+
+				UniformType::Enum type;
+				uint16_t loc;
+				uint16_t num;
+				uint16_t copy;
+				UniformBuffer::decodeOpcode(opcode, type, loc, num, copy);
+
+				const char* data;
+				if (copy)
+				{
+					data = _uniformBuffer.read(g_uniformTypeSize[type]*num);
+				}
+				else
+				{
+					UniformHandle handle;
+					bx::memCopy(&handle, _uniformBuffer.read(sizeof(UniformHandle) ), sizeof(UniformHandle) );
+					data = (const char*)m_uniforms[handle.idx];
+				}
+
+				switch ( (uint32_t)type)
+				{
+				case UniformType::Mat3:
+				case UniformType::Mat3|BGFX_UNIFORM_FRAGMENTBIT:
+					{
+						float* value = (float*)data;
+						for (uint32_t ii = 0, count = num/3; ii < count; ++ii,  loc += 3*16, value += 9)
+						{
+							Matrix4 mtx;
+							mtx.un.val[ 0] = value[0];
+							mtx.un.val[ 1] = value[1];
+							mtx.un.val[ 2] = value[2];
+							mtx.un.val[ 3] = 0.0f;
+							mtx.un.val[ 4] = value[3];
+							mtx.un.val[ 5] = value[4];
+							mtx.un.val[ 6] = value[5];
+							mtx.un.val[ 7] = 0.0f;
+							mtx.un.val[ 8] = value[6];
+							mtx.un.val[ 9] = value[7];
+							mtx.un.val[10] = value[8];
+							mtx.un.val[11] = 0.0f;
+							setShaderUniform(uint8_t(type), loc, &mtx.un.val[0], 3);
+						}
+					}
+					break;
+
+				case UniformType::Sampler:
+				case UniformType::Sampler | BGFX_UNIFORM_FRAGMENTBIT:
+				case UniformType::Vec4:
+				case UniformType::Vec4 | BGFX_UNIFORM_FRAGMENTBIT:
+				case UniformType::Mat4:
+				case UniformType::Mat4 | BGFX_UNIFORM_FRAGMENTBIT:
+					{
+						setShaderUniform(uint8_t(type), loc, data, num);
+					}
+					break;
+				case UniformType::End:
+					break;
+
+				default:
+					BX_TRACE("%4d: INVALID 0x%08x, t %d, l %d, n %d, c %d", _uniformBuffer.getPos(), opcode, type, loc, num, copy);
+					break;
+				}
+			}
+		}
+
+		void clearQuad(ClearQuad& _clearQuad, const Rect& /*_rect*/, const Clear& _clear, const float _palette[][4])
+		{
+			uint32_t width;
+			uint32_t height;
+
+			if (isValid(m_fbh) )
+			{
+				const FrameBufferWgpu& fb = m_frameBuffers[m_fbh.idx];
+				width  = fb.m_width;
+				height = fb.m_height;
+			}
+			else
+			{
+				width  = m_resolution.width;
+				height = m_resolution.height;
+			}
+
+			uint64_t state = 0;
+			state |= _clear.m_flags & BGFX_CLEAR_COLOR ? BGFX_STATE_WRITE_RGB|BGFX_STATE_WRITE_A         : 0;
+			state |= _clear.m_flags & BGFX_CLEAR_DEPTH ? BGFX_STATE_DEPTH_TEST_ALWAYS|BGFX_STATE_WRITE_Z : 0;
+			state |= BGFX_STATE_PT_TRISTRIP;
+
+			uint64_t stencil = 0;
+			stencil |= _clear.m_flags & BGFX_CLEAR_STENCIL ? 0
+				| BGFX_STENCIL_TEST_ALWAYS
+				| BGFX_STENCIL_FUNC_REF(_clear.m_stencil)
+				| BGFX_STENCIL_FUNC_RMASK(0xff)
+				| BGFX_STENCIL_OP_FAIL_S_REPLACE
+				| BGFX_STENCIL_OP_FAIL_Z_REPLACE
+				| BGFX_STENCIL_OP_PASS_Z_REPLACE
+				: 0
+				;
+
+			uint32_t numMrt = 1;
+			FrameBufferHandle fbh = m_fbh;
+			if (isValid(fbh) && m_frameBuffers[fbh.idx].m_swapChain == NULL)
+			{
+				const FrameBufferWgpu& fb = m_frameBuffers[fbh.idx];
+				numMrt = bx::uint32_max(1, fb.m_num);
+			}
+
+			wgpu::RenderPassEncoder rce = m_renderEncoder;
+			ProgramHandle programHandle = _clearQuad.m_program[numMrt-1];
+
+			const VertexLayout* decl = &_clearQuad.m_layout;
+			const PipelineStateWgpu* pso = getPipelineState(
+				  state
+				, stencil
+				, 0
+				, fbh
+				, 1
+				, &decl
+				, false
+				, programHandle
+				, 0
+				);
+			rce.SetPipeline(pso->m_rps);
+
+			float mrtClearColor[BGFX_CONFIG_MAX_FRAME_BUFFER_ATTACHMENTS][4];
+			float mrtClearDepth[4] = { _clear.m_depth };
+
+			if (BGFX_CLEAR_COLOR_USE_PALETTE & _clear.m_flags)
+			{
+				for (uint32_t ii = 0; ii < numMrt; ++ii)
+				{
+					uint8_t index = (uint8_t)bx::uint32_min(BGFX_CONFIG_MAX_COLOR_PALETTE-1, _clear.m_index[ii]);
+					bx::memCopy(mrtClearColor[ii], _palette[index], 16);
+				}
+			}
+			else
+			{
+				float rgba[4] =
+				{
+					_clear.m_index[0]*1.0f/255.0f,
+					_clear.m_index[1]*1.0f/255.0f,
+					_clear.m_index[2]*1.0f/255.0f,
+					_clear.m_index[3]*1.0f/255.0f,
+				};
+
+				for (uint32_t ii = 0; ii < numMrt; ++ii)
+				{
+					bx::memCopy( mrtClearColor[ii]
+								, rgba
+								, 16
+								);
+				}
+			}
+
+			ProgramWgpu& program = m_program[programHandle.idx];
+
+			ScratchBufferWgpu& scratchBuffer = m_scratchBuffers[0];
+			BindStateCacheWgpu& bindStates = m_bindStateCache[0];
+
+			BindingsWgpu b;
+			BindStateWgpu& bindState = allocBindState(program, bindStates, b, scratchBuffer);
+
+			const uint32_t voffset = scratchBuffer.write(mrtClearDepth, sizeof(mrtClearDepth), program.m_vsh->m_gpuSize);
+			const uint32_t foffset = scratchBuffer.write(mrtClearColor, sizeof(mrtClearColor), program.m_fsh->m_gpuSize);
+
+			uint32_t numOffset = 2;
+			uint32_t offsets[2] = { voffset, foffset };
+
+			bindGroups(program, bindState, b);
+
+			const VertexBufferWgpu& vb = m_vertexBuffers[_clearQuad.m_vb.idx];
+
+			bindProgram(rce, program, bindState, numOffset, offsets);
+
+			rce.SetVertexBuffer(0, vb.m_ptr);
+			rce.Draw(4, 1, 0, 0);
+		}
+
+		wgpu::TextureViewDescriptor attachmentView(const Attachment& _at, const TextureWgpu& _texture)
+		{
+			bool _resolve = bool(_texture.m_ptrMsaa);
+			BX_UNUSED(_resolve);
+
+			wgpu::TextureViewDescriptor desc;
+			if (1 < _texture.m_numSides)
+			{
+				desc.baseArrayLayer = _at.layer;
+			}
+			desc.baseMipLevel = _at.mip;
+			desc.arrayLayerCount = 1;
+			desc.mipLevelCount = 1;
+
+			if (_texture.m_type == TextureWgpu::Texture3D)
+			{
+				desc.dimension = wgpu::TextureViewDimension::e3D;
+			}
+
+			return desc;
+		}
+
+		void setFrameBuffer(RenderPassDescriptor& _renderPassDescriptor, FrameBufferHandle _fbh, bool _msaa = true)
+		{
+			if (!isValid(_fbh)
+			||  m_frameBuffers[_fbh.idx].m_swapChain)
+			{
+				SwapChainWgpu* swapChain = !isValid(_fbh)
+					? m_mainFrameBuffer.m_swapChain
+					: m_frameBuffers[_fbh.idx].m_swapChain
+					;
+
+				_renderPassDescriptor.colorAttachments[0] = defaultDescriptor<wgpu::RenderPassColorAttachmentDescriptor>();
+				_renderPassDescriptor.desc.colorAttachmentCount = 1;
+
+				// Force 1 array layers for attachments
+				wgpu::TextureViewDescriptor desc;
+				desc.arrayLayerCount = 1;
+
+				if (swapChain->m_backBufferColorMsaa)
+				{
+					_renderPassDescriptor.colorAttachments[0].attachment    = swapChain->m_backBufferColorMsaa.CreateView(&desc);
+					_renderPassDescriptor.colorAttachments[0].resolveTarget = swapChain->current();
+				}
+				else
+				{
+					_renderPassDescriptor.colorAttachments[0].attachment = swapChain->current();
+				}
+
+				_renderPassDescriptor.depthStencilAttachment = defaultDescriptor<wgpu::RenderPassDepthStencilAttachmentDescriptor>();
+				_renderPassDescriptor.depthStencilAttachment.attachment = swapChain->m_backBufferDepth.CreateView();
+				_renderPassDescriptor.desc.depthStencilAttachment = &_renderPassDescriptor.depthStencilAttachment;
+			}
+			else
+			{
+				FrameBufferWgpu& frameBuffer = m_frameBuffers[_fbh.idx];
+
+				_renderPassDescriptor.desc.colorAttachmentCount = frameBuffer.m_num;
+
+				for (uint32_t ii = 0; ii < frameBuffer.m_num; ++ii)
+				{
+					const TextureWgpu& texture = m_textures[frameBuffer.m_colorHandle[ii].idx];
+
+					const wgpu::TextureViewDescriptor desc = attachmentView(frameBuffer.m_colorAttachment[ii], texture);
+
+					_renderPassDescriptor.colorAttachments[ii] = defaultDescriptor<wgpu::RenderPassColorAttachmentDescriptor>();
+					_renderPassDescriptor.colorAttachments[ii].attachment = texture.m_ptrMsaa
+						? texture.m_ptrMsaa.CreateView(&desc)
+						: texture.m_ptr.CreateView(&desc)
+						;
+					_renderPassDescriptor.colorAttachments[ii].resolveTarget = texture.m_ptrMsaa
+						? texture.m_ptr.CreateView(&desc)
+						: wgpu::TextureView()
+						;
+				}
+
+				if (isValid(frameBuffer.m_depthHandle) )
+				{
+					const TextureWgpu& texture = m_textures[frameBuffer.m_depthHandle.idx];
+					const wgpu::TextureViewDescriptor desc = attachmentView(frameBuffer.m_depthAttachment, texture);
+
+					_renderPassDescriptor.depthStencilAttachment = defaultDescriptor<wgpu::RenderPassDepthStencilAttachmentDescriptor>();
+					_renderPassDescriptor.depthStencilAttachment.attachment = texture.m_ptrMsaa
+						? texture.m_ptrMsaa.CreateView(&desc)
+						: texture.m_ptr.CreateView(&desc)
+						;
+
+					_renderPassDescriptor.desc.depthStencilAttachment = &_renderPassDescriptor.depthStencilAttachment;
+				}
+			}
+
+			m_fbh    = _fbh;
+			m_rtMsaa = _msaa;
+		}
+
+		void setDepthStencilState(wgpu::DepthStencilStateDescriptor& desc, uint64_t _state, uint64_t _stencil = 0)
+		{
+			const uint32_t fstencil = unpackStencil(0, _stencil);
+			const uint32_t func = (_state&BGFX_STATE_DEPTH_TEST_MASK) >> BGFX_STATE_DEPTH_TEST_SHIFT;
+
+			desc.depthWriteEnabled = !!(BGFX_STATE_WRITE_Z & _state);
+			desc.depthCompare = s_cmpFunc[func];
+
+			uint32_t bstencil = unpackStencil(1, _stencil);
+			const uint32_t frontAndBack = bstencil != BGFX_STENCIL_NONE && bstencil != fstencil;
+			bstencil = frontAndBack ? bstencil : fstencil;
+
+			desc.stencilFront = defaultDescriptor<wgpu::StencilStateFaceDescriptor>();
+			desc.stencilBack = defaultDescriptor<wgpu::StencilStateFaceDescriptor>();
+
+			if (0 != _stencil)
+			{
+				// TODO (hugoam)
+				const uint32_t readMask  = (fstencil&BGFX_STENCIL_FUNC_RMASK_MASK)>>BGFX_STENCIL_FUNC_RMASK_SHIFT;
+				const uint32_t writeMask = 0xff;
+
+				desc.stencilReadMask  = readMask;
+				desc.stencilWriteMask = writeMask;
+
+				desc.stencilFront.failOp      = s_stencilOp[(fstencil&BGFX_STENCIL_OP_FAIL_S_MASK)>>BGFX_STENCIL_OP_FAIL_S_SHIFT];
+				desc.stencilFront.depthFailOp = s_stencilOp[(fstencil&BGFX_STENCIL_OP_FAIL_Z_MASK)>>BGFX_STENCIL_OP_FAIL_Z_SHIFT];
+				desc.stencilFront.passOp      = s_stencilOp[(fstencil&BGFX_STENCIL_OP_PASS_Z_MASK)>>BGFX_STENCIL_OP_PASS_Z_SHIFT];
+				desc.stencilFront.compare     = s_cmpFunc[(fstencil&BGFX_STENCIL_TEST_MASK)>>BGFX_STENCIL_TEST_SHIFT];
+
+				desc.stencilBack.failOp      = s_stencilOp[(bstencil&BGFX_STENCIL_OP_FAIL_S_MASK)>>BGFX_STENCIL_OP_FAIL_S_SHIFT];
+				desc.stencilBack.depthFailOp = s_stencilOp[(bstencil&BGFX_STENCIL_OP_FAIL_Z_MASK)>>BGFX_STENCIL_OP_FAIL_Z_SHIFT];
+				desc.stencilBack.passOp      = s_stencilOp[(bstencil&BGFX_STENCIL_OP_PASS_Z_MASK)>>BGFX_STENCIL_OP_PASS_Z_SHIFT];
+				desc.stencilBack.compare     = s_cmpFunc[(bstencil&BGFX_STENCIL_TEST_MASK)>>BGFX_STENCIL_TEST_SHIFT];
+			}
+		}
+
+		RenderPassStateWgpu* getRenderPassState(bgfx::FrameBufferHandle fbh, bool clear, Clear clr)
+		{
+			bx::HashMurmur2A murmur;
+			murmur.begin();
+			murmur.add(fbh.idx);
+			murmur.add(clear);
+			murmur.add(&clr, sizeof(clr));
+			uint32_t hash = murmur.end();
+
+			RenderPassStateWgpu* rps = m_renderPassStateCache.find(hash);
+
+			if (NULL == rps)
+			{
+				rps = BX_NEW(g_allocator, RenderPassStateWgpu);
+				m_renderPassStateCache.add(hash, rps);
+			}
+
+			return rps;
+		}
+
+		PipelineStateWgpu* getPipelineState(
+			  uint64_t _state
+			, uint64_t _stencil
+			, uint32_t _rgba
+			, FrameBufferHandle _fbh
+			, uint8_t _numStreams
+			, const VertexLayout** _vertexDecls
+			, bool _index32
+			, ProgramHandle _program
+			, uint8_t _numInstanceData
+			)
+		{
+			_state &= 0
+				| BGFX_STATE_WRITE_RGB
+				| BGFX_STATE_WRITE_A
+				| BGFX_STATE_WRITE_Z
+				| BGFX_STATE_DEPTH_TEST_MASK
+				| BGFX_STATE_BLEND_MASK
+				| BGFX_STATE_BLEND_EQUATION_MASK
+				| BGFX_STATE_BLEND_INDEPENDENT
+				| BGFX_STATE_BLEND_ALPHA_TO_COVERAGE
+				| BGFX_STATE_CULL_MASK
+				| BGFX_STATE_MSAA
+				| BGFX_STATE_LINEAA
+				| BGFX_STATE_CONSERVATIVE_RASTER
+				| BGFX_STATE_PT_MASK
+				;
+
+			const bool independentBlendEnable = !!(BGFX_STATE_BLEND_INDEPENDENT & _state);
+			const ProgramWgpu& program = m_program[_program.idx];
+
+			bx::HashMurmur2A murmur;
+			murmur.begin();
+			murmur.add(_state);
+			murmur.add(_stencil);
+			murmur.add(independentBlendEnable ? _rgba : 0);
+			murmur.add(_numInstanceData);
+
+			FrameBufferWgpu& frameBuffer = !isValid(_fbh) ? m_mainFrameBuffer : m_frameBuffers[_fbh.idx];
+			murmur.add(frameBuffer.m_pixelFormatHash);
+
+			murmur.add(program.m_vsh->m_hash);
+			if (NULL != program.m_fsh)
+			{
+				murmur.add(program.m_fsh->m_hash);
+			}
+
+			for (uint8_t ii = 0; ii < _numStreams; ++ii)
+			{
+				murmur.add(_vertexDecls[ii]->m_hash);
+			}
+
+			uint32_t hash = murmur.end();
+
+			PipelineStateWgpu* pso = m_pipelineStateCache.find(hash);
+
+			if (NULL == pso)
+			{
+				pso = BX_NEW(g_allocator, PipelineStateWgpu);
+
+				//pd.alphaToCoverageEnabled = !!(BGFX_STATE_BLEND_ALPHA_TO_COVERAGE & _state);
+
+				RenderPipelineDescriptor& pd = pso->m_rpd;
+
+				uint32_t frameBufferAttachment = 1;
+				uint32_t sampleCount = 1;
+
+				if (!isValid(_fbh)
+				||  s_renderWgpu->m_frameBuffers[_fbh.idx].m_swapChain)
+				{
+					SwapChainWgpu& swapChain = !isValid(_fbh)
+						? *s_renderWgpu->m_mainFrameBuffer.m_swapChain
+						: *s_renderWgpu->m_frameBuffers[_fbh.idx].m_swapChain
+						;
+					sampleCount = swapChain.m_backBufferColorMsaa
+						? swapChain.m_sampleCount
+						: 1
+						;
+					pd.colorStates[0].format = swapChain.m_colorFormat;
+					pd.depthStencilState.format = swapChain.m_depthFormat;
+					pd.desc.depthStencilState = &pd.depthStencilState;
+				}
+				else
+				{
+					frameBufferAttachment = frameBuffer.m_num;
+
+					for (uint32_t ii = 0; ii < frameBuffer.m_num; ++ii)
+					{
+						const TextureWgpu& texture = m_textures[frameBuffer.m_colorHandle[ii].idx];
+						sampleCount = texture.m_ptrMsaa
+							? texture.m_sampleCount
+							: 1
+							;
+						pd.colorStates[ii].format = s_textureFormat[texture.m_textureFormat].m_fmt;
+					}
+
+					pd.desc.colorStateCount = frameBuffer.m_num;
+
+					if (isValid(frameBuffer.m_depthHandle) )
+					{
+						const TextureWgpu& texture = m_textures[frameBuffer.m_depthHandle.idx];
+						pd.depthStencilState.format = s_textureFormat[texture.m_textureFormat].m_fmt;
+						pd.desc.depthStencilState = &pd.depthStencilState;
+					}
+				}
+
+				const uint32_t blend    = uint32_t( (_state&BGFX_STATE_BLEND_MASK         )>>BGFX_STATE_BLEND_SHIFT);
+				const uint32_t equation = uint32_t( (_state&BGFX_STATE_BLEND_EQUATION_MASK)>>BGFX_STATE_BLEND_EQUATION_SHIFT);
+
+				const uint32_t srcRGB = (blend    )&0xf;
+				const uint32_t dstRGB = (blend>> 4)&0xf;
+				const uint32_t srcA   = (blend>> 8)&0xf;
+				const uint32_t dstA   = (blend>>12)&0xf;
+
+				const uint32_t equRGB = (equation   )&0x7;
+				const uint32_t equA   = (equation>>3)&0x7;
+
+				wgpu::ColorWriteMask writeMask = wgpu::ColorWriteMask::None;
+				writeMask |= (_state&BGFX_STATE_WRITE_R) ? wgpu::ColorWriteMask::Red   : wgpu::ColorWriteMask::None;
+				writeMask |= (_state&BGFX_STATE_WRITE_G) ? wgpu::ColorWriteMask::Green : wgpu::ColorWriteMask::None;
+				writeMask |= (_state&BGFX_STATE_WRITE_B) ? wgpu::ColorWriteMask::Blue  : wgpu::ColorWriteMask::None;
+				writeMask |= (_state&BGFX_STATE_WRITE_A) ? wgpu::ColorWriteMask::Alpha : wgpu::ColorWriteMask::None;
+
+				for (uint32_t ii = 0; ii < (independentBlendEnable ? 1 : frameBufferAttachment); ++ii)
+				{
+					wgpu::ColorStateDescriptor& drt = pd.colorStates[ii]; // = pd.colorAttachments[ii];
+
+					if(!(BGFX_STATE_BLEND_MASK & _state))
+					{
+						drt.colorBlend = defaultDescriptor<wgpu::BlendDescriptor>();
+						drt.alphaBlend = defaultDescriptor<wgpu::BlendDescriptor>();
+					}
+					else
+					{
+						drt.colorBlend.srcFactor = s_blendFactor[srcRGB][0];
+						drt.colorBlend.dstFactor = s_blendFactor[dstRGB][0];
+						drt.colorBlend.operation = s_blendEquation[equRGB];
+
+						drt.alphaBlend.srcFactor = s_blendFactor[srcA][1];
+						drt.alphaBlend.dstFactor = s_blendFactor[dstA][1];
+						drt.alphaBlend.operation = s_blendEquation[equA];
+					}
+
+					drt.writeMask = writeMask;
+				}
+
+				if (independentBlendEnable)
+				{
+					for (uint32_t ii = 1, rgba = _rgba; ii < frameBufferAttachment; ++ii, rgba >>= 11)
+					{
+						wgpu::ColorStateDescriptor drt = pd.colorStates[ii]; // = pd.colorAttachments[ii];
+
+						//drt.blendingEnabled = 0 != (rgba&0x7ff);
+
+						const uint32_t src           = (rgba   )&0xf;
+						const uint32_t dst           = (rgba>>4)&0xf;
+						const uint32_t equationIndex = (rgba>>8)&0x7;
+						
+						drt.colorBlend.srcFactor  = s_blendFactor[src][0];
+						drt.colorBlend.dstFactor  = s_blendFactor[dst][0];
+						drt.colorBlend.operation  = s_blendEquation[equationIndex];
+
+						drt.alphaBlend.srcFactor  = s_blendFactor[src][1];
+						drt.alphaBlend.dstFactor  = s_blendFactor[dst][1];
+						drt.alphaBlend.operation  = s_blendEquation[equationIndex];
+
+						drt.writeMask = writeMask;
+					}
+				}
+
+				pd.desc.vertexStage.module = program.m_vsh->m_module;
+				pd.fragmentStage.module = program.m_fsh != NULL ? program.m_fsh->m_module : wgpu::ShaderModule();
+
+				setDepthStencilState(pd.depthStencilState, _state, _stencil);
+
+				const uint64_t cull = _state & BGFX_STATE_CULL_MASK;
+				const uint8_t cullIndex = uint8_t(cull >> BGFX_STATE_CULL_SHIFT);
+				pd.rasterizationState.cullMode = s_cullMode[cullIndex];
+
+				pd.rasterizationState.frontFace = (_state & BGFX_STATE_FRONT_CCW) ? wgpu::FrontFace::CCW : wgpu::FrontFace::CW;
+
+				// pd.desc = m_renderPipelineDescriptor;
+				pd.desc.sampleCount = sampleCount;
+
+				wgpu::PipelineLayoutDescriptor layout = defaultDescriptor<wgpu::PipelineLayoutDescriptor>();
+				layout.bindGroupLayouts = &program.m_bindGroupLayout;
+				layout.bindGroupLayoutCount = 1;
+
+				pd.desc.layout = m_device.CreatePipelineLayout(&layout);
+				// TODO (hugoam) this should be cached too ?
+
+				//uint32_t ref = (_state&BGFX_STATE_ALPHA_REF_MASK) >> BGFX_STATE_ALPHA_REF_SHIFT;
+				//viewState.m_alphaRef = ref / 255.0f;
+
+				const uint64_t primType = _state & BGFX_STATE_PT_MASK;
+				uint8_t primIndex = uint8_t(primType >> BGFX_STATE_PT_SHIFT);
+
+				PrimInfo prim = s_primInfo[primIndex];
+				pd.desc.primitiveTopology = prim.m_type;
+
+				VertexStateDescriptor input;
+				input.desc.vertexBufferCount = 0;
+
+				wgpu::VertexBufferLayoutDescriptor* inputBinding = input.vertexBuffers;
+				wgpu::VertexAttributeDescriptor* inputAttrib = input.attributes;
+
+				auto fillVertexDecl = [&](const ShaderWgpu* _vsh, const VertexLayout& _decl)
+				{
+					input.desc.vertexBufferCount += 1;
+
+					inputBinding->arrayStride = _decl.m_stride;
+					inputBinding->stepMode = wgpu::InputStepMode::Vertex;
+					inputBinding->attributes = inputAttrib;
+
+					uint32_t numAttribs = 0;
+
+					for(uint32_t attr = 0; attr < Attrib::Count; ++attr)
+					{
+						if(UINT16_MAX != _decl.m_attributes[attr])
+						{
+							if(UINT8_MAX == _vsh->m_attrRemap[attr])
+								continue;
+
+							inputAttrib->shaderLocation = _vsh->m_attrRemap[attr];
+
+							if(0 == _decl.m_attributes[attr])
+							{
+								inputAttrib->format = wgpu::VertexFormat::Float3;
+								inputAttrib->offset = 0;
+							}
+							else
+							{
+								uint8_t num;
+								AttribType::Enum type;
+								bool normalized;
+								bool asInt;
+								_decl.decode(Attrib::Enum(attr), num, type, normalized, asInt);
+								inputAttrib->format = s_attribType[type][num-1][normalized];
+								inputAttrib->offset = _decl.m_offset[attr];
+							}
+
+							++inputAttrib;
+							++numAttribs;
+						}
+					}
+
+					inputBinding->attributeCount = numAttribs;
+					inputBinding++;
+
+					return numAttribs;
+				};
+
+				//bool attrSet[Attrib::Count] = {};
+
+				uint16_t unsettedAttr[Attrib::Count];
+				bx::memCopy(unsettedAttr, program.m_vsh->m_attrMask, sizeof(uint16_t) * Attrib::Count);
+
+				uint8_t stream = 0;
+				for (; stream < _numStreams; ++stream)
+				{
+					VertexLayout layout;
+					bx::memCopy(&layout, _vertexDecls[stream], sizeof(VertexLayout));
+					const uint16_t* attrMask = program.m_vsh->m_attrMask;
+
+					for (uint32_t ii = 0; ii < Attrib::Count; ++ii)
+					{
+						Attrib::Enum iiattr = Attrib::Enum(ii);
+						uint16_t mask = attrMask[ii];
+						uint16_t attr = (layout.m_attributes[ii] & mask);
+						if (attr == 0)
+						{
+							layout.m_attributes[ii] = UINT16_MAX;
+						}
+						if (unsettedAttr[ii] && attr != UINT16_MAX)
+						{
+							unsettedAttr[ii] = 0;
+						}
+					}
+
+					fillVertexDecl(program.m_vsh, layout);
+				}
+
+				for (uint32_t ii = 0; ii < Attrib::Count; ++ii)
+				{
+					Attrib::Enum iiattr = Attrib::Enum(ii);
+					if (0 < unsettedAttr[ii])
+					{
+					  //uint32_t numAttribs = input.vertexBuffers[stream].attributeCount;
+					  //uint32_t numAttribs = inputBinding->attributeCount;
+					  //wgpu::VertexBufferLayoutDescriptor* inputAttrib = const_cast<VkVertexInputAttributeDescription*>(_vertexInputState.pVertexAttributeDescriptions + numAttribs);
+						inputAttrib->shaderLocation = program.m_vsh->m_attrRemap[ii];
+					  //inputAttrib->binding = 0;
+						inputAttrib->format = wgpu::VertexFormat::Float3; // VK_FORMAT_R32G32B32_SFLOAT;
+						inputAttrib->offset = 0;
+						input.vertexBuffers[stream-1].attributeCount++;
+						++inputAttrib;
+					}
+				}
+
+				// TODO (hugoam) WebGPU will crash whenever we are not supplying the correct number of attributes (which depends on the stride passed to bgfx::allocInstanceDataBuffer)
+				// so we need to know the number of live instance attributes in the shader and if they aren't all supplied:
+				//   - fail the pipeline state creation
+				//   - bind dummy attributes
+				if (0 < _numInstanceData)
+				{
+					uint32_t numBindings = input.desc.vertexBufferCount; // == stream+1 // .vertexBindingDescriptionCount;
+					uint32_t firstAttrib = input.vertexBuffers[stream-1].attributeCount;
+					uint32_t numAttribs = firstAttrib;
+
+					inputBinding->arrayStride = _numInstanceData * 16;
+					inputBinding->stepMode = wgpu::InputStepMode::Instance;
+
+					for (uint32_t inst = 0; inst < _numInstanceData; ++inst)
+					{
+						inputAttrib->shaderLocation = numAttribs;
+						inputAttrib->format = wgpu::VertexFormat::Float4;
+						inputAttrib->offset = inst * 16;
+
+						++numAttribs;
+						++inputAttrib;
+					}
+
+					input.desc.vertexBufferCount = numBindings + 1;
+					input.vertexBuffers[stream].attributeCount = numAttribs - firstAttrib;
+					input.vertexBuffers[stream].attributes = &input.attributes[firstAttrib];
+				}
+
+
+				input.desc.indexFormat = _index32 ? wgpu::IndexFormat::Uint32 : wgpu::IndexFormat::Uint16;
+
+				pd.desc.vertexState = &input.desc;
+
+				BX_TRACE("Creating WebGPU render pipeline state for program %s", program.m_vsh->name());
+				pso->m_rps = m_device.CreateRenderPipeline(&pd.desc);
+
+				m_pipelineStateCache.add(hash, pso);
+			}
+
+			return pso;
+		}
+
+		PipelineStateWgpu* getPipelineState(
+			  uint64_t _state
+			, uint64_t _stencil
+			, uint32_t _rgba
+			, FrameBufferHandle _fbh
+			, VertexLayoutHandle _declHandle
+			, bool _index32
+			, ProgramHandle _program
+			, uint8_t _numInstanceData
+			)
+		{
+			const VertexLayout* decl = &m_vertexDecls[_declHandle.idx];
+			return getPipelineState(
+				  _state
+				, _stencil
+				, _rgba
+				, _fbh
+				, 1
+				, &decl
+				, _index32
+				, _program
+				, _numInstanceData
+				);
+		}
+
+		PipelineStateWgpu* getComputePipelineState(ProgramHandle _program)
+		{
+			ProgramWgpu& program = m_program[_program.idx];
+
+			if (NULL == program.m_computePS)
+			{
+				PipelineStateWgpu* pso = BX_NEW(g_allocator, PipelineStateWgpu);
+				program.m_computePS = pso;
+
+				wgpu::PipelineLayoutDescriptor layout = defaultDescriptor<wgpu::PipelineLayoutDescriptor>();
+				layout.bindGroupLayouts = &program.m_bindGroupLayout;
+				layout.bindGroupLayoutCount = 1;
+
+				pso->m_layout = m_device.CreatePipelineLayout(&layout);
+
+				wgpu::ComputePipelineDescriptor desc;
+				desc.layout = pso->m_layout;
+				desc.computeStage = { nullptr, program.m_vsh->m_module, "main" };
+
+				pso->m_cps = m_device.CreateComputePipeline(&desc);
+			}
+
+			return program.m_computePS;
+		}
+
+
+		wgpu::Sampler getSamplerState(uint32_t _flags)
+		{
+			_flags &= BGFX_SAMPLER_BITS_MASK;
+			SamplerStateWgpu* sampler = m_samplerStateCache.find(_flags);
+
+			if (NULL == sampler)
+			{
+				sampler = BX_NEW(g_allocator, SamplerStateWgpu);
+
+				wgpu::SamplerDescriptor desc;
+				desc.addressModeU = s_textureAddress[(_flags&BGFX_SAMPLER_U_MASK)>>BGFX_SAMPLER_U_SHIFT];
+				desc.addressModeV = s_textureAddress[(_flags&BGFX_SAMPLER_V_MASK)>>BGFX_SAMPLER_V_SHIFT];
+				desc.addressModeW = s_textureAddress[(_flags&BGFX_SAMPLER_W_MASK)>>BGFX_SAMPLER_W_SHIFT];
+				desc.minFilter    = s_textureFilterMinMag[(_flags&BGFX_SAMPLER_MIN_MASK)>>BGFX_SAMPLER_MIN_SHIFT];
+				desc.magFilter    = s_textureFilterMinMag[(_flags&BGFX_SAMPLER_MAG_MASK)>>BGFX_SAMPLER_MAG_SHIFT];
+				desc.mipmapFilter = s_textureFilterMip[(_flags&BGFX_SAMPLER_MIP_MASK)>>BGFX_SAMPLER_MIP_SHIFT];
+				desc.lodMinClamp  = 0;
+				desc.lodMaxClamp  = FLT_MAX;
+
+				const uint32_t cmpFunc = (_flags&BGFX_SAMPLER_COMPARE_MASK)>>BGFX_SAMPLER_COMPARE_SHIFT;
+				desc.compare = 0 == cmpFunc
+					? wgpu::CompareFunction::Undefined
+					: s_cmpFunc[cmpFunc]
+					;
+
+				sampler->m_sampler = s_renderWgpu->m_device.CreateSampler(&desc);
+				m_samplerStateCache.add(_flags, sampler);
+			}
+
+			return sampler->m_sampler;
+		}
+
+		wgpu::CommandEncoder& getBlitCommandEncoder()
+		{
+			if (!m_cmd.m_encoder)
+				m_cmd.begin();
+
+			if (m_renderEncoder || m_computeEncoder)
+				endEncoding();
+
+			return m_cmd.m_encoder;
+		}
+
+		wgpu::RenderPassEncoder renderPass(bgfx::Frame* _render, bgfx::FrameBufferHandle fbh, bool clear, Clear clr, const char* name = NULL)
+		{
+			RenderPassStateWgpu* rps = s_renderWgpu->getRenderPassState(fbh, clear, clr);
+
+			RenderPassDescriptor& renderPassDescriptor = rps->m_rpd;
+			renderPassDescriptor.desc.label = name;
+
+			setFrameBuffer(renderPassDescriptor, fbh);
+
+			if(clear)
+			{
+				for(uint32_t ii = 0; ii < g_caps.limits.maxFBAttachments; ++ii)
+				{
+					wgpu::RenderPassColorAttachmentDescriptor& color = renderPassDescriptor.colorAttachments[ii];
+
+					if(0 != (BGFX_CLEAR_COLOR & clr.m_flags))
+					{
+						if(0 != (BGFX_CLEAR_COLOR_USE_PALETTE & clr.m_flags))
+						{
+							uint8_t index = (uint8_t)bx::uint32_min(BGFX_CONFIG_MAX_COLOR_PALETTE - 1, clr.m_index[ii]);
+							const float* rgba = _render->m_colorPalette[index];
+							const float rr = rgba[0];
+							const float gg = rgba[1];
+							const float bb = rgba[2];
+							const float aa = rgba[3];
+							color.clearColor = { rr, gg, bb, aa };
+						}
+						else
+						{
+							float rr = clr.m_index[0] * 1.0f / 255.0f;
+							float gg = clr.m_index[1] * 1.0f / 255.0f;
+							float bb = clr.m_index[2] * 1.0f / 255.0f;
+							float aa = clr.m_index[3] * 1.0f / 255.0f;
+							color.clearColor = { rr, gg, bb, aa };
+						}
+
+						color.loadOp = wgpu::LoadOp::Clear;
+					}
+					else
+					{
+						color.loadOp = wgpu::LoadOp::Load;
+					}
+
+					//desc.storeOp = desc.attachment.sampleCount > 1 ? wgpu::StoreOp::MultisampleResolve : wgpu::StoreOp::Store;
+					color.storeOp = wgpu::StoreOp::Store;
+				}
+
+				wgpu::RenderPassDepthStencilAttachmentDescriptor& depthStencil = renderPassDescriptor.depthStencilAttachment;
+
+				if(depthStencil.attachment)
+				{
+					depthStencil.clearDepth = clr.m_depth;
+					depthStencil.depthLoadOp = 0 != (BGFX_CLEAR_DEPTH & clr.m_flags)
+						? wgpu::LoadOp::Clear
+						: wgpu::LoadOp::Load
+						;
+					depthStencil.depthStoreOp = m_mainFrameBuffer.m_swapChain->m_backBufferColorMsaa
+						? wgpu::StoreOp(0) //wgpu::StoreOp::DontCare
+						: wgpu::StoreOp::Store
+						;
+
+					depthStencil.clearStencil = clr.m_stencil;
+					depthStencil.stencilLoadOp = 0 != (BGFX_CLEAR_STENCIL & clr.m_flags)
+						? wgpu::LoadOp::Clear
+						: wgpu::LoadOp::Load
+						;
+					depthStencil.stencilStoreOp = m_mainFrameBuffer.m_swapChain->m_backBufferColorMsaa
+						? wgpu::StoreOp(0) //wgpu::StoreOp::DontCare
+						: wgpu::StoreOp::Store
+						;
+				}
+			}
+			else
+			{
+				for(uint32_t ii = 0; ii < g_caps.limits.maxFBAttachments; ++ii)
+				{
+					wgpu::RenderPassColorAttachmentDescriptor& color = renderPassDescriptor.colorAttachments[ii];
+					if(color.attachment)
+					{
+						color.loadOp = wgpu::LoadOp::Load;
+					}
+				}
+
+				wgpu::RenderPassDepthStencilAttachmentDescriptor& depthStencil = renderPassDescriptor.depthStencilAttachment;
+
+				if(depthStencil.attachment)
+				{
+					depthStencil.depthLoadOp = wgpu::LoadOp::Load;
+					depthStencil.depthStoreOp = wgpu::StoreOp::Store;
+
+					depthStencil.stencilLoadOp = wgpu::LoadOp::Load;
+					depthStencil.stencilStoreOp = wgpu::StoreOp::Store;
+				}
+			}
+
+			wgpu::RenderPassEncoder rce = m_cmd.m_encoder.BeginRenderPass(&renderPassDescriptor.desc);
+			m_renderEncoder = rce;
+			return rce;
+		}
+
+		void endEncoding()
+		{
+			if (m_renderEncoder)
+			{
+				m_renderEncoder.EndPass();
+				m_renderEncoder = nullptr;
+			}
+
+			if (m_computeEncoder)
+			{
+				m_computeEncoder.EndPass();
+				m_computeEncoder = nullptr;
+			}
+		}
+
+		void* m_renderDocDll;
+
+#if !BX_PLATFORM_EMSCRIPTEN
+		dawn_native::Instance m_instance;
+#endif
+		wgpu::Device       m_device;
+		wgpu::Queue        m_queue;
+		TimerQueryWgpu     m_gpuTimer;
+		CommandQueueWgpu   m_cmd;
+
+		StagingBufferWgpu	m_uniformBuffers[WEBGPU_NUM_UNIFORM_BUFFERS];
+		ScratchBufferWgpu   m_scratchBuffers[WEBGPU_MAX_FRAMES_IN_FLIGHT];
+
+		BindStateCacheWgpu  m_bindStateCache[WEBGPU_MAX_FRAMES_IN_FLIGHT];
+
+		uint8_t m_frameIndex;
+
+		uint16_t          m_numWindows;
+		FrameBufferHandle m_windows[BGFX_CONFIG_MAX_FRAME_BUFFERS];
+
+		IndexBufferWgpu  m_indexBuffers[BGFX_CONFIG_MAX_INDEX_BUFFERS];
+		VertexBufferWgpu m_vertexBuffers[BGFX_CONFIG_MAX_VERTEX_BUFFERS];
+		ShaderWgpu       m_shaders[BGFX_CONFIG_MAX_SHADERS];
+		ProgramWgpu      m_program[BGFX_CONFIG_MAX_PROGRAMS];
+		TextureWgpu      m_textures[BGFX_CONFIG_MAX_TEXTURES];
+		ReadbackWgpu     m_readbacks[BGFX_CONFIG_MAX_TEXTURES];
+		FrameBufferWgpu  m_mainFrameBuffer;
+		FrameBufferWgpu  m_frameBuffers[BGFX_CONFIG_MAX_FRAME_BUFFERS];
+		VertexLayout     m_vertexDecls[BGFX_CONFIG_MAX_VERTEX_LAYOUTS];
+		UniformRegistry  m_uniformReg;
+		void*            m_uniforms[BGFX_CONFIG_MAX_UNIFORMS];
+
+		//StateCacheT<BindStateWgpu*>   m_bindStateCache;
+		StateCacheT<RenderPassStateWgpu*> m_renderPassStateCache;
+		StateCacheT<PipelineStateWgpu*> m_pipelineStateCache;
+		StateCacheT<SamplerStateWgpu*>  m_samplerStateCache;
+
+		TextVideoMem m_textVideoMem;
+
+		uint8_t m_fsScratch[64 << 10];
+		uint8_t m_vsScratch[64 << 10];
+
+		FrameBufferHandle m_fbh;
+		bool m_rtMsaa;
+
+		Resolution m_resolution;
+		void* m_capture;
+		uint32_t m_captureSize;
+
+		wgpu::RenderPassEncoder     m_renderEncoder;
+		wgpu::ComputePassEncoder    m_computeEncoder;
+	};
+
+	RendererContextI* rendererCreate(const Init& _init)
+	{
+		s_renderWgpu = BX_NEW(g_allocator, RendererContextWgpu);
+		if (!s_renderWgpu->init(_init) )
+		{
+			BX_DELETE(g_allocator, s_renderWgpu);
+			s_renderWgpu = NULL;
+		}
+		return s_renderWgpu;
+	}
+
+	void rendererDestroy()
+	{
+		s_renderWgpu->shutdown();
+		BX_DELETE(g_allocator, s_renderWgpu);
+		s_renderWgpu = NULL;
+	}
+
+	void writeString(bx::WriterI* _writer, const char* _str)
+	{
+		bx::write(_writer, _str, (int32_t)bx::strLen(_str) );
+	}
+
+	void ShaderWgpu::create(ShaderHandle _handle, const Memory* _mem)
+	{
+		m_handle = _handle;
+
+		BX_TRACE("Creating shader %s", getName(_handle));
+
+		bx::MemoryReader reader(_mem->data, _mem->size);
+
+		uint32_t magic;
+		bx::read(&reader, magic);
+
+		wgpu::ShaderStage shaderStage;
+
+		if (isShaderType(magic, 'C'))
+		{
+			shaderStage = wgpu::ShaderStage::Compute;
+		}
+		else if (isShaderType(magic, 'F'))
+		{
+			shaderStage = wgpu::ShaderStage::Fragment;
+		}
+		else if (isShaderType(magic, 'G'))
+		{
+			//shaderStage = wgpu::ShaderStage::Geometry;
+		}
+		else if (isShaderType(magic, 'V'))
+		{
+			shaderStage = wgpu::ShaderStage::Vertex;
+		}
+
+		m_stage = shaderStage;
+
+		uint32_t hashIn;
+		bx::read(&reader, hashIn);
+
+		uint32_t hashOut;
+
+		if (isShaderVerLess(magic, 6) )
+		{
+			hashOut = hashIn;
+		}
+		else
+		{
+			bx::read(&reader, hashOut);
+		}
+
+		uint16_t count;
+		bx::read(&reader, count);
+
+		m_numPredefined = 0;
+		m_numUniforms = count;
+
+		BX_TRACE("%s Shader consts %d"
+			, getShaderTypeName(magic)
+			, count
+			);
+
+		const bool fragment = isShaderType(magic, 'F');
+		uint8_t fragmentBit = fragment ? BGFX_UNIFORM_FRAGMENTBIT : 0;
+
+		BX_CHECK(!isShaderVerLess(magic, 7), "WebGPU backend supports only shader binary version >= 7");
+
+		if (0 < count)
+		{
+			for (uint32_t ii = 0; ii < count; ++ii)
+			{
+				uint8_t nameSize = 0;
+				bx::read(&reader, nameSize);
+
+				char name[256];
+				bx::read(&reader, &name, nameSize);
+				name[nameSize] = '\0';
+
+				uint8_t type = 0;
+				bx::read(&reader, type);
+
+				uint8_t num;
+				bx::read(&reader, num);
+
+				uint16_t regIndex;
+				bx::read(&reader, regIndex);
+
+				uint16_t regCount;
+				bx::read(&reader, regCount);
+
+				uint8_t texComponent;
+				bx::read(&reader, texComponent);
+
+				uint8_t texDimension;
+				bx::read(&reader, texDimension);
+
+				const char* kind = "invalid";
+
+				PredefinedUniform::Enum predefined = nameToPredefinedUniformEnum(name);
+				if (PredefinedUniform::Count != predefined)
+				{
+					kind = "predefined";
+					m_predefined[m_numPredefined].m_loc   = regIndex;
+					m_predefined[m_numPredefined].m_count = regCount;
+					m_predefined[m_numPredefined].m_type  = uint8_t(predefined|fragmentBit);
+					m_numPredefined++;
+				}
+				else if (UniformType::End == (~BGFX_UNIFORM_MASK & type))
+				{
+					// regCount is used for descriptor type
+					const bool buffer = regCount == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
+					const bool readonly = (type & BGFX_UNIFORM_READONLYBIT) != 0;
+
+					const uint8_t stage = regIndex - (buffer ? 16 : 32) - (fragment ? 48 : 0);
+
+					m_bindInfo[stage].m_index = m_numBuffers;
+					m_bindInfo[stage].m_binding = regIndex;
+					m_bindInfo[stage].m_uniform = { 0 };
+
+					m_buffers[m_numBuffers] = wgpu::BindGroupLayoutBinding();
+					m_buffers[m_numBuffers].binding = regIndex;
+					m_buffers[m_numBuffers].visibility = shaderStage;
+
+					if (buffer)
+					{
+						m_buffers[m_numBuffers].type = readonly
+							? wgpu::BindingType::ReadonlyStorageBuffer
+							: wgpu::BindingType::StorageBuffer;
+					}
+					else
+					{
+						m_buffers[m_numBuffers].type = readonly
+							? wgpu::BindingType::ReadonlyStorageTexture
+							: wgpu::BindingType::WriteonlyStorageTexture;
+					}
+
+					m_numBuffers++;
+
+					kind = "storage";
+				}
+				else if (UniformType::Sampler == (~BGFX_UNIFORM_MASK & type))
+				{
+					const UniformRegInfo* info = s_renderWgpu->m_uniformReg.find(name);
+					BX_CHECK(NULL != info, "User defined uniform '%s' is not found, it won't be set.", name);
+
+					const uint8_t stage = regIndex - 16 - (fragment ? 48 : 0);
+
+					m_bindInfo[stage].m_index = m_numSamplers;
+					m_bindInfo[stage].m_binding = regIndex;
+					m_bindInfo[stage].m_uniform = info->m_handle;
+
+					m_textures[m_numSamplers] = wgpu::BindGroupLayoutBinding();
+					m_textures[m_numSamplers].binding = regIndex;
+					m_textures[m_numSamplers].visibility = shaderStage;
+					m_textures[m_numSamplers].type = wgpu::BindingType::SampledTexture;
+					m_textures[m_numSamplers].viewDimension = wgpu::TextureViewDimension(texDimension);
+					m_textures[m_numSamplers].textureComponentType = wgpu::TextureComponentType(texComponent);
+
+					const bool comparisonSampler = (type & BGFX_UNIFORM_COMPAREBIT) != 0;
+
+					m_samplers[m_numSamplers] = wgpu::BindGroupLayoutBinding();
+					m_samplers[m_numSamplers].binding = regIndex + 16;
+					m_samplers[m_numSamplers].visibility = shaderStage;
+					m_samplers[m_numSamplers].type = comparisonSampler
+						? wgpu::BindingType::ComparisonSampler
+						: wgpu::BindingType::Sampler;
+
+					m_numSamplers++;
+
+					kind = "sampler";
+				}
+				else
+				{
+					const UniformRegInfo* info = s_renderWgpu->m_uniformReg.find(name);
+					BX_CHECK(NULL != info, "User defined uniform '%s' is not found, it won't be set.", name);
+
+					if(NULL == m_constantBuffer)
+					{
+						m_constantBuffer = UniformBuffer::create(1024);
+					}
+
+					kind = "user";
+					m_constantBuffer->writeUniformHandle((UniformType::Enum)(type | fragmentBit), regIndex, info->m_handle, regCount);
+				}
+
+				BX_TRACE("\t%s: %s (%s), r.index %3d, r.count %2d"
+					, kind
+					, name
+					, getUniformTypeName(UniformType::Enum(type&~BGFX_UNIFORM_MASK) )
+					, regIndex
+					, regCount
+					);
+				BX_UNUSED(kind);
+			}
+
+			if (NULL != m_constantBuffer)
+			{
+				m_constantBuffer->finish();
+			}
+		}
+
+		uint32_t shaderSize;
+		bx::read(&reader, shaderSize);
+
+		BX_TRACE("Shader body is at %lld size %u remaining %lld", reader.getPos(), shaderSize, reader.remaining());
+
+		const uint32_t* code = (const uint32_t*)reader.getDataPtr();
+		bx::skip(&reader, shaderSize+1);
+
+		m_code = (uint32_t*)BX_ALLOC(g_allocator, shaderSize);
+		m_codeSize = shaderSize;
+
+		bx::memCopy(m_code, code, shaderSize);
+		// TODO (hugoam) delete this
+
+		BX_TRACE("First word %08" PRIx32, code[0]);
+
+		uint8_t numAttrs = 0;
+		bx::read(&reader, numAttrs);
+
+		m_numAttrs = numAttrs;
+
+		bx::memSet(m_attrMask, 0, sizeof(m_attrMask));
+		bx::memSet(m_attrRemap, UINT8_MAX, sizeof(m_attrRemap));
+
+		for(uint8_t ii = 0; ii < numAttrs; ++ii)
+		{
+			uint16_t id;
+			bx::read(&reader, id);
+
+			auto toString = [](Attrib::Enum attr)
+			{
+				if (attr == Attrib::Position) return "Position";
+				else if (attr == Attrib::Normal) return "Normal";
+				else if (attr == Attrib::Tangent) return "Tangent";
+				else if (attr == Attrib::Bitangent) return "Bitangent";
+				else if (attr == Attrib::Color0) return "Color0";
+				else if (attr == Attrib::Color1) return "Color1";
+				else if (attr == Attrib::Color2) return "Color2";
+				else if (attr == Attrib::Color3) return "Color3";
+				else if (attr == Attrib::Indices) return "Indices";
+				else if (attr == Attrib::Weight) return "Weight";
+				else if (attr == Attrib::TexCoord0) return "TexCoord0";
+				else if (attr == Attrib::TexCoord1) return "TexCoord1";
+				else if (attr == Attrib::TexCoord2) return "TexCoord2";
+				else if (attr == Attrib::TexCoord3) return "TexCoord3";
+				else if (attr == Attrib::TexCoord4) return "TexCoord4";
+				else if (attr == Attrib::TexCoord5) return "TexCoord5";
+				else if (attr == Attrib::TexCoord6) return "TexCoord6";
+				else if (attr == Attrib::TexCoord7) return "TexCoord7";
+				return "Invalid";
+			};
+
+			Attrib::Enum attr = idToAttrib(id);
+
+			if(Attrib::Count != attr)
+			{
+				m_attrMask[attr] = UINT16_MAX;
+				m_attrRemap[attr] = ii;
+				BX_TRACE("\tattrib: %s (%i) at index %i", toString(attr), attr, ii);
+			}
+		}
+
+		wgpu::ShaderModuleDescriptor desc;
+		desc.label = getName(_handle);
+		desc.code = m_code;
+		desc.codeSize = shaderSize/4;
+
+		m_module = s_renderWgpu->m_device.CreateShaderModule(&desc);
+		
+		BGFX_FATAL(m_module
+			, bgfx::Fatal::InvalidShader
+			, "Failed to create %s shader."
+			, getShaderTypeName(magic)
+			);
+
+		bx::HashMurmur2A murmur;
+		murmur.begin();
+		murmur.add(hashIn);
+		murmur.add(hashOut);
+		murmur.add(code, shaderSize);
+		murmur.add(numAttrs);
+		murmur.add(m_attrMask, numAttrs);
+		m_hash = murmur.end();
+
+		auto roundUp = [](auto value, auto multiple)
+		{
+			return ((value + multiple - 1) / multiple) * multiple;
+		};
+
+		bx::read(&reader, m_size);
+
+		const uint32_t align = kMinBufferOffsetAlignment;
+		m_gpuSize = bx::strideAlign(m_size, align);
+
+		BX_TRACE("shader size %d (used=%d) (prev=%d)", (int)m_size, (int)m_gpuSize, (int)bx::strideAlign(roundUp(m_size, 4), align));
+	}
+
+	void ProgramWgpu::create(const ShaderWgpu* _vsh, const ShaderWgpu* _fsh)
+	{
+		BX_CHECK(_vsh->m_module, "Vertex shader doesn't exist.");
+		m_vsh = _vsh;
+		m_fsh = _fsh;
+		m_gpuSize = _vsh->m_gpuSize + (_fsh ? _fsh->m_gpuSize : 0);
+
+		//BX_CHECK(NULL != _vsh->m_code, "Vertex shader doesn't exist.");
+		m_vsh = _vsh;
+		bx::memCopy(&m_predefined[0], _vsh->m_predefined, _vsh->m_numPredefined * sizeof(PredefinedUniform));
+		m_numPredefined = _vsh->m_numPredefined;
+
+		if(NULL != _fsh)
+		{
+			//BX_CHECK(NULL != _fsh->m_code, "Fragment shader doesn't exist.");
+			m_fsh = _fsh;
+			bx::memCopy(&m_predefined[m_numPredefined], _fsh->m_predefined, _fsh->m_numPredefined * sizeof(PredefinedUniform));
+			m_numPredefined += _fsh->m_numPredefined;
+		}
+
+		wgpu::BindGroupLayoutEntry bindings[2 + BGFX_CONFIG_MAX_TEXTURE_SAMPLERS * 3];
+
+		m_numUniforms = 0 + (_vsh->m_size > 0 ? 1 : 0) + (NULL != _fsh && _fsh->m_size > 0 ? 1 : 0);
+
+		uint8_t numBindings = 0;
+
+		// bind uniform buffer at slot 0
+		bindings[numBindings].binding = 0;
+		bindings[numBindings].visibility = _vsh->m_stage;
+		bindings[numBindings].type = wgpu::BindingType::UniformBuffer;
+		bindings[numBindings].hasDynamicOffset = true;
+		numBindings++;
+
+		if (m_numUniforms > 1)
+		{
+			bindings[numBindings].binding = 48;
+			bindings[numBindings].visibility = wgpu::ShaderStage::Fragment;
+			bindings[numBindings].type = wgpu::BindingType::UniformBuffer;
+			bindings[numBindings].hasDynamicOffset = true;
+			numBindings++;
+		}
+
+		uint8_t numSamplers = 0;
+
+		for (uint32_t ii = 0; ii < _vsh->m_numSamplers; ++ii)
+		{
+			m_textures[ii] = _vsh->m_textures[ii];
+			m_samplers[ii] = _vsh->m_samplers[ii];
+			bindings[numBindings++] = _vsh->m_textures[ii];
+			bindings[numBindings++] = _vsh->m_samplers[ii];
+		}
+
+		numSamplers += _vsh->m_numSamplers;
+
+		if (NULL != _fsh)
+		{
+			for (uint32_t ii = 0; ii < _fsh->m_numSamplers; ++ii)
+			{
+				m_textures[numSamplers + ii] = _fsh->m_textures[ii];
+				m_samplers[numSamplers + ii] = _fsh->m_samplers[ii];
+				bindings[numBindings++] = _fsh->m_textures[ii];
+				bindings[numBindings++] = _fsh->m_samplers[ii];
+			}
+
+			numSamplers += _fsh->m_numSamplers;
+		}
+
+		for (uint8_t stage = 0; stage < BGFX_CONFIG_MAX_TEXTURE_SAMPLERS; ++stage)
+		{
+			if (isValid(m_vsh->m_bindInfo[stage].m_uniform))
+			{
+				m_bindInfo[stage] = m_vsh->m_bindInfo[stage];
+			}
+			else if (NULL != m_fsh && isValid(m_fsh->m_bindInfo[stage].m_uniform))
+			{
+				m_bindInfo[stage] = m_fsh->m_bindInfo[stage];
+				m_bindInfo[stage].m_index += _vsh->m_numSamplers;
+			}
+		}
+
+		m_numSamplers = numSamplers;
+
+		for (uint32_t ii = 0; ii < _vsh->m_numBuffers; ++ii)
+		{
+			m_buffers[ii] = _vsh->m_buffers[ii];
+			bindings[numBindings++] = _vsh->m_buffers[ii];
+		}
+
+		m_numBuffers = _vsh->m_numBuffers;
+
+		BX_CHECK(m_numUniforms + m_numSamplers * 2 + m_numBuffers == numBindings, "");
+
+		wgpu::BindGroupLayoutDescriptor bindGroupDesc;
+		bindGroupDesc.entryCount = numBindings;
+		bindGroupDesc.entries = bindings;
+		m_bindGroupLayout = s_renderWgpu->m_device.CreateBindGroupLayout(&bindGroupDesc);
+
+		bx::HashMurmur2A murmur;
+		murmur.begin();
+		murmur.add(m_numUniforms);
+		murmur.add(m_textures, sizeof(wgpu::BindGroupLayoutEntry) * numSamplers);
+		murmur.add(m_samplers, sizeof(wgpu::BindGroupLayoutEntry) * numSamplers);
+		murmur.add(m_buffers,  sizeof(wgpu::BindGroupLayoutEntry) * m_numBuffers);
+		m_bindGroupLayoutHash = murmur.end();
+	}
+
+	void ProgramWgpu::destroy()
+	{
+		m_vsh = NULL;
+		m_fsh = NULL;
+		if ( NULL != m_computePS )
+		{
+			BX_DELETE(g_allocator, m_computePS);
+			m_computePS = NULL;
+		}
+	}
+
+	void BufferWgpu::create(uint32_t _size, void* _data, uint16_t _flags, uint16_t _stride, bool _vertex)
+	{
+		BX_UNUSED(_stride);
+
+		m_size = _size;
+		m_flags = _flags;
+		m_vertex = _vertex;
+
+		const uint32_t paddedSize = bx::strideAlign(_size, 4);
+
+		bool storage = m_flags & BGFX_BUFFER_COMPUTE_READ_WRITE;
+		bool indirect = m_flags & BGFX_BUFFER_DRAW_INDIRECT;
+
+		wgpu::BufferDescriptor desc;
+		desc.size = paddedSize;
+		desc.usage = _vertex ? wgpu::BufferUsage::Vertex : wgpu::BufferUsage::Index;
+		desc.usage |= (storage || indirect) ? wgpu::BufferUsage::Storage : wgpu::BufferUsage::None;
+		desc.usage |= indirect ? wgpu::BufferUsage::Indirect : wgpu::BufferUsage::None;
+		desc.usage |= NULL == _data ? wgpu::BufferUsage::CopyDst : wgpu::BufferUsage::None;
+
+		if(NULL != _data)
+		{
+			wgpu::CreateBufferMappedResult mapped = s_renderWgpu->m_device.CreateBufferMapped(&desc);
+			m_ptr = mapped.buffer;
+			bx::memCopy(mapped.data, _data, _size);
+			mapped.buffer.Unmap();
+		}
+		else
+		{
+			m_ptr = s_renderWgpu->m_device.CreateBuffer(&desc);
+		}
+	}
+
+	void BufferWgpu::update(uint32_t _offset, uint32_t _size, void* _data, bool _discard)
+	{
+		wgpu::CommandEncoder& bce = s_renderWgpu->getBlitCommandEncoder();
+
+		if (!m_vertex && !_discard)
+		{
+			if ( m_dynamic == NULL )
+			{
+				m_dynamic = (uint8_t*)BX_ALLOC(g_allocator, m_size);
+			}
+
+			bx::memCopy(m_dynamic + _offset, _data, _size);
+			uint32_t start = _offset & 4;
+			uint32_t end = bx::strideAlign(_offset + _size, 4);
+
+			wgpu::BufferDescriptor desc;
+			desc.size = end - start;
+			desc.usage = wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::CopySrc;
+
+			wgpu::CreateBufferMappedResult mapped = s_renderWgpu->m_device.CreateBufferMapped(&desc);
+			wgpu::Buffer staging = mapped.buffer;
+			bx::memCopy(mapped.data, m_dynamic, _size);
+			mapped.buffer.Unmap();
+
+			// TODO pad to 4 bytes
+			bce.CopyBufferToBuffer(staging, 0, m_ptr, start, end - start);
+			s_renderWgpu->m_cmd.release(staging);
+		}
+		else
+		{
+			wgpu::BufferDescriptor desc;
+			desc.size = _size;
+			desc.usage = wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::CopySrc;
+
+			wgpu::CreateBufferMappedResult mapped = s_renderWgpu->m_device.CreateBufferMapped(&desc);
+			wgpu::Buffer staging = mapped.buffer;
+			bx::memCopy(mapped.data, _data, _size);
+			mapped.buffer.Unmap();
+
+			bce.CopyBufferToBuffer(staging, 0, m_ptr, _offset, _size);
+			s_renderWgpu->m_cmd.release(staging);
+		}
+	}
+
+	void VertexBufferWgpu::create(uint32_t _size, void* _data, VertexLayoutHandle _layoutHandle, uint16_t _flags)
+	{
+		m_layoutHandle = _layoutHandle;
+		uint16_t stride = isValid(_layoutHandle)
+			? s_renderWgpu->m_vertexDecls[_layoutHandle.idx].m_stride
+			: 0
+			;
+
+		BufferWgpu::create(_size, _data, _flags, stride, true);
+	}
+
+	void TextureWgpu::create(TextureHandle _handle, const Memory* _mem, uint64_t _flags, uint8_t _skip)
+	{
+		m_handle = _handle;
+
+		m_sampler = s_renderWgpu->getSamplerState(uint32_t(_flags) );
+
+		bimg::ImageContainer imageContainer;
+
+		if (bimg::imageParse(imageContainer, _mem->data, _mem->size) )
+		{
+			const bimg::ImageBlockInfo& blockInfo = getBlockInfo(bimg::TextureFormat::Enum(imageContainer.m_format) );
+			const uint8_t startLod = bx::min<uint8_t>(_skip, imageContainer.m_numMips-1);
+
+			bimg::TextureInfo ti;
+			bimg::imageGetSize(
+				  &ti
+				, uint16_t(imageContainer.m_width >>startLod)
+				, uint16_t(imageContainer.m_height>>startLod)
+				, uint16_t(imageContainer.m_depth >>startLod)
+				, imageContainer.m_cubeMap
+				, 1 < imageContainer.m_numMips
+				, imageContainer.m_numLayers
+				, imageContainer.m_format
+				);
+			ti.numMips = bx::min<uint8_t>(imageContainer.m_numMips-startLod, ti.numMips);
+
+			m_flags     = _flags;
+			m_width     = ti.width;
+			m_height    = ti.height;
+			m_depth     = ti.depth;
+			m_numLayers = ti.numLayers;
+			m_numMips   = ti.numMips;
+			m_numSides  = ti.numLayers * (imageContainer.m_cubeMap ? 6 : 1);
+			m_requestedFormat  = TextureFormat::Enum(imageContainer.m_format);
+			m_textureFormat    = getViableTextureFormat(imageContainer);
+			
+			if (m_requestedFormat == bgfx::TextureFormat::D16)
+				m_textureFormat = bgfx::TextureFormat::D32F;
+
+			const bool compressed = bimg::isCompressed(bimg::TextureFormat::Enum(imageContainer.m_format));
+
+			if (compressed)
+				m_textureFormat = bgfx::TextureFormat::BGRA8;
+
+			const bool convert = m_textureFormat != m_requestedFormat;
+			const uint8_t bpp  = bimg::getBitsPerPixel(bimg::TextureFormat::Enum(m_textureFormat) );
+
+			wgpu::TextureDescriptor desc = defaultDescriptor<wgpu::TextureDescriptor>();
+			//desc.label = getName(_handle);
+
+			if (1 < ti.numLayers)
+			{
+				if (imageContainer.m_cubeMap)
+				{
+					m_type = TextureCube;
+					desc.dimension = wgpu::TextureDimension::e2D;
+				}
+				else
+				{
+					m_type = Texture2D;
+					desc.dimension = wgpu::TextureDimension::e2D;
+				}
+			}
+			else if (imageContainer.m_cubeMap)
+			{
+				m_type = TextureCube;
+				desc.dimension = wgpu::TextureDimension::e2D;
+			}
+			else if (1 < imageContainer.m_depth)
+			{
+				m_type = Texture3D;
+				desc.dimension = wgpu::TextureDimension::e3D;
+			}
+			else
+			{
+				m_type = Texture2D;
+				desc.dimension = wgpu::TextureDimension::e2D;
+			}
+
+			const uint16_t numSides = ti.numLayers * (imageContainer.m_cubeMap ? 6 : 1);
+			const uint32_t numSrd = numSides * ti.numMips;
+
+			const bool writeOnly    = 0 != (_flags&BGFX_TEXTURE_RT_WRITE_ONLY);
+			const bool computeWrite = 0 != (_flags&BGFX_TEXTURE_COMPUTE_WRITE);
+			const bool renderTarget = 0 != (_flags&BGFX_TEXTURE_RT_MASK);
+			const bool srgb         = 0 != (_flags&BGFX_TEXTURE_SRGB);
+
+			BX_TRACE("Texture %3d: %s (requested: %s), layers %d, %dx%d%s RT[%c], WO[%c], CW[%c], sRGB[%c]"
+				, this - s_renderWgpu->m_textures
+				, getName( (TextureFormat::Enum)m_textureFormat)
+				, getName( (TextureFormat::Enum)m_requestedFormat)
+				, ti.numLayers
+				, ti.width
+				, ti.height
+				, imageContainer.m_cubeMap ? "x6" : ""
+				, renderTarget ? 'x' : ' '
+				, writeOnly    ? 'x' : ' '
+				, computeWrite ? 'x' : ' '
+				, srgb         ? 'x' : ' '
+				);
+
+			const uint32_t msaaQuality = bx::uint32_satsub( (_flags&BGFX_TEXTURE_RT_MSAA_MASK)>>BGFX_TEXTURE_RT_MSAA_SHIFT, 1);
+			const int32_t  sampleCount = s_msaa[msaaQuality];
+
+			
+			wgpu::TextureFormat format = wgpu::TextureFormat::Undefined;
+			if (srgb)
+			{
+				format = s_textureFormat[m_textureFormat].m_fmtSrgb;
+				BX_WARN(format != wgpu::TextureFormat::Undefined
+					, "sRGB not supported for texture format %d"
+					, m_textureFormat
+					);
+			}
+
+			if (format == wgpu::TextureFormat::Undefined)
+			{
+				// not swizzled and not sRGB, or sRGB unsupported
+				format = s_textureFormat[m_textureFormat].m_fmt;
+			}
+
+			desc.format = format;
+			desc.size.width  = m_width;
+			desc.size.height = m_height;
+			desc.size.depth  = bx::uint32_max(1,imageContainer.m_depth);
+			desc.mipLevelCount    = m_numMips;
+			desc.sampleCount      = 1;
+			desc.arrayLayerCount  = m_numSides;
+
+			desc.usage = wgpu::TextureUsage::Sampled;
+			desc.usage |= wgpu::TextureUsage::CopyDst;
+			desc.usage |= wgpu::TextureUsage::CopySrc;
+
+			if (computeWrite)
+			{
+				desc.usage |= wgpu::TextureUsage::Storage;
+			}
+
+			if (renderTarget)
+			{
+				desc.usage |= wgpu::TextureUsage::OutputAttachment;
+			}
+
+			m_ptr = s_renderWgpu->m_device.CreateTexture(&desc);
+
+			if (sampleCount > 1)
+			{
+				desc.sampleCount = sampleCount;
+
+				m_ptrMsaa = s_renderWgpu->m_device.CreateTexture(&desc);
+			}
+
+			// decode images
+			struct ImageInfo
+			{
+				uint8_t* data;
+				uint32_t width;
+				uint32_t height;
+				uint32_t depth;
+				uint32_t pitch;
+				uint32_t slice;
+				uint32_t size;
+				uint8_t mipLevel;
+				uint8_t layer;
+			};
+
+			ImageInfo* imageInfos = (ImageInfo*)BX_ALLOC(g_allocator, sizeof(ImageInfo) * numSrd);
+			bx::memSet(imageInfos, 0, sizeof(ImageInfo) * numSrd);
+			uint32_t alignment = 1; // tightly aligned buffer
+
+			uint32_t kk = 0;
+
+			for (uint8_t side = 0; side < numSides; ++side)
+			{
+				for (uint8_t lod = 0; lod < ti.numMips; ++lod)
+				{
+					bimg::ImageMip mip;
+					if (bimg::imageGetRawData(imageContainer, side, lod + startLod, _mem->data, _mem->size, mip))
+					{
+						if (convert)
+						{
+							const uint32_t pitch = bx::strideAlign(bx::max<uint32_t>(mip.m_width, 4) * bpp / 8, alignment);
+							const uint32_t slice = bx::strideAlign(bx::max<uint32_t>(mip.m_height, 4) * pitch, alignment);
+							const uint32_t size = slice * mip.m_depth;
+
+							uint8_t* temp = (uint8_t*)BX_ALLOC(g_allocator, size);
+							bimg::imageDecodeToBgra8(
+								  g_allocator
+								, temp
+								, mip.m_data
+								, mip.m_width
+								, mip.m_height
+								, pitch
+								, mip.m_format
+								);
+
+							imageInfos[kk].data = temp;
+							imageInfos[kk].width = mip.m_width;
+							imageInfos[kk].height = mip.m_height;
+							imageInfos[kk].depth = mip.m_depth;
+							imageInfos[kk].pitch = pitch;
+							imageInfos[kk].slice = slice;
+							imageInfos[kk].size = size;
+							imageInfos[kk].mipLevel = lod;
+							imageInfos[kk].layer = side;
+						}
+						else if (compressed)
+						{
+							const uint32_t pitch = bx::strideAlign((mip.m_width / blockInfo.blockWidth) * mip.m_blockSize, alignment);
+							const uint32_t slice = bx::strideAlign((mip.m_height / blockInfo.blockHeight) * pitch, alignment);
+							const uint32_t size = slice * mip.m_depth;
+
+							uint8_t* temp = (uint8_t*)BX_ALLOC(g_allocator, size);
+							bimg::imageCopy(
+								  temp
+								, mip.m_height / blockInfo.blockHeight
+								, (mip.m_width / blockInfo.blockWidth) * mip.m_blockSize
+								, mip.m_depth
+								, mip.m_data
+								, pitch
+								);
+
+							imageInfos[kk].data = temp;
+							imageInfos[kk].width = mip.m_width;
+							imageInfos[kk].height = mip.m_height;
+							imageInfos[kk].depth = mip.m_depth;
+							imageInfos[kk].pitch = pitch;
+							imageInfos[kk].slice = slice;
+							imageInfos[kk].size = size;
+							imageInfos[kk].mipLevel = lod;
+							imageInfos[kk].layer = side;
+						}
+						else
+						{
+							const uint32_t pitch = bx::strideAlign(mip.m_width * mip.m_bpp / 8, alignment);
+							const uint32_t slice = bx::strideAlign(mip.m_height * pitch, alignment);
+							const uint32_t size = slice * mip.m_depth;
+
+							uint8_t* temp = (uint8_t*)BX_ALLOC(g_allocator, size);
+							bimg::imageCopy(temp
+								, mip.m_height
+								, mip.m_width * mip.m_bpp / 8
+								, mip.m_depth
+								, mip.m_data
+								, pitch
+							);
+
+							imageInfos[kk].data = temp;
+							imageInfos[kk].width = mip.m_width;
+							imageInfos[kk].height = mip.m_height;
+							imageInfos[kk].depth = mip.m_depth;
+							imageInfos[kk].pitch = pitch;
+							imageInfos[kk].slice = slice;
+							imageInfos[kk].size = size;
+							imageInfos[kk].mipLevel = lod;
+							imageInfos[kk].layer = side;
+						}
+					}
+					++kk;
+				}
+			}
+
+			uint32_t totalMemSize = 0;
+			for (uint32_t ii = 0; ii < numSrd; ++ii)
+			{
+				const uint32_t dstpitch = bx::strideAlign(imageInfos[ii].pitch, kMinBufferOffsetAlignment);
+				totalMemSize += dstpitch * imageInfos[ii].height;
+				//totalMemSize += imageInfos[ii].size;
+			}
+
+			wgpu::Buffer stagingBuffer;
+			if (totalMemSize > 0)
+			{
+				wgpu::BufferDescriptor staginBufferDesc;
+				staginBufferDesc.size = totalMemSize;
+				staginBufferDesc.usage = wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::CopySrc;
+
+				wgpu::CreateBufferMappedResult mapped = s_renderWgpu->m_device.CreateBufferMapped(&staginBufferDesc);
+				stagingBuffer = mapped.buffer;
+
+				uint64_t offset = 0;
+
+				for (uint32_t ii = 0; ii < numSrd; ++ii)
+				{
+					const uint32_t dstpitch = bx::strideAlign(imageInfos[ii].pitch, kMinBufferOffsetAlignment);
+
+					const uint8_t* src = (uint8_t*)imageInfos[ii].data;
+					uint8_t* dst = (uint8_t*)mapped.data;
+
+					for (uint32_t yy = 0; yy < imageInfos[ii].height; ++yy, src += imageInfos[ii].pitch, offset += dstpitch)
+					{
+						bx::memCopy(dst + offset, src, imageInfos[ii].pitch);
+					}
+
+					//bx::memCopy(dst + offset, imageInfos[ii].data, imageInfos[ii].size);
+					//offset += imageInfos[ii].size;
+				}
+
+				mapped.buffer.Unmap();
+			}
+			
+			wgpu::BufferCopyView* bufferCopyView = (wgpu::BufferCopyView*)BX_ALLOC(g_allocator, sizeof(wgpu::BufferCopyView) * numSrd);
+			wgpu::TextureCopyView* textureCopyView = (wgpu::TextureCopyView*)BX_ALLOC(g_allocator, sizeof(wgpu::TextureCopyView) * numSrd);
+			wgpu::Extent3D* textureCopySize = (wgpu::Extent3D*)BX_ALLOC(g_allocator, sizeof(wgpu::Extent3D) * numSrd);
+
+			uint64_t offset = 0;
+
+			for (uint32_t ii = 0; ii < numSrd; ++ii)
+			{
+				const uint32_t dstpitch = bx::strideAlign(imageInfos[ii].pitch, kMinBufferOffsetAlignment);
+
+				uint32_t idealWidth  = bx::max<uint32_t>(1, m_width  >> imageInfos[ii].mipLevel);
+				uint32_t idealHeight = bx::max<uint32_t>(1, m_height >> imageInfos[ii].mipLevel);
+				new (&bufferCopyView[ii]) wgpu::BufferCopyView();
+				new (&textureCopyView[ii]) wgpu::TextureCopyView();
+				new (&textureCopySize[ii]) wgpu::Extent3D();
+			    bufferCopyView[ii].buffer      = stagingBuffer;
+				bufferCopyView[ii].offset      = offset;
+				bufferCopyView[ii].bytesPerRow = dstpitch; // assume that image data are tightly aligned
+				bufferCopyView[ii].rowsPerImage = 0; // assume that image data are tightly aligned
+				textureCopyView[ii].texture        = m_ptr;
+			  //textureCopyView[ii].imageSubresource.aspectMask     = m_vkTextureAspect;
+				textureCopyView[ii].mipLevel       = imageInfos[ii].mipLevel;
+				textureCopyView[ii].arrayLayer     = imageInfos[ii].layer;
+			  //textureCopyView[ii].layerCount     = 1;
+				textureCopyView[ii].origin = { 0, 0, 0 };
+				textureCopySize[ii] = { idealWidth, idealHeight, imageInfos[ii].depth };
+
+				offset += dstpitch * imageInfos[ii].height;
+				//offset += imageInfos[ii].size;
+			}
+
+
+			if (stagingBuffer)
+			{
+				wgpu::CommandEncoder encoder = s_renderWgpu->getBlitCommandEncoder();
+				//wgpu::CommandEncoder encoder = s_renderWgpu->m_cmd.m_encoder;
+				for (uint32_t ii = 0; ii < numSrd; ++ii)
+				{
+					encoder.CopyBufferToTexture(&bufferCopyView[ii], &textureCopyView[ii], &textureCopySize[ii]);
+				}
+			}
+			else
+			{
+				//VkCommandBuffer commandBuffer = s_renderVK->beginNewCommand();
+				//setImageMemoryBarrier(
+				//	commandBuffer
+				//	, (m_flags & BGFX_TEXTURE_COMPUTE_WRITE
+				//		? VK_IMAGE_LAYOUT_GENERAL
+				//		: VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
+				//		)
+				//);
+				//s_renderVK->submitCommandAndWait(commandBuffer);
+			}
+
+			//vkFreeMemory(device, stagingDeviceMem, allocatorCb);
+			//vkDestroy(stagingBuffer);
+
+			BX_FREE(g_allocator, bufferCopyView);
+			BX_FREE(g_allocator, textureCopyView);
+			BX_FREE(g_allocator, textureCopySize);
+			for (uint32_t ii = 0; ii < numSrd; ++ii)
+			{
+				BX_FREE(g_allocator, imageInfos[ii].data);
+			}
+			BX_FREE(g_allocator, imageInfos);
+		}
+	}
+
+	void TextureWgpu::update(uint8_t _side, uint8_t _mip, const Rect& _rect, uint16_t _z, uint16_t _depth, uint16_t _pitch, const Memory* _mem)
+	{
+		BX_UNUSED(_side); BX_UNUSED(_mip); BX_UNUSED(_depth); BX_UNUSED(_z);
+
+		const uint32_t bpp       = bimg::getBitsPerPixel(bimg::TextureFormat::Enum(m_textureFormat) );
+		const uint32_t rectpitch = _rect.m_width*bpp/8;
+		const uint32_t srcpitch  = UINT16_MAX == _pitch ? rectpitch : _pitch;
+		const uint32_t slice     = ( (m_type == Texture3D) ? 0 : _side + _z * (m_type == TextureCube ? 6 : 1) );
+		const uint16_t zz        = (m_type == Texture3D) ? _z : 0 ;
+
+		const bool convert = m_textureFormat != m_requestedFormat;
+
+		uint8_t* data = _mem->data;
+		uint8_t* temp = NULL;
+
+		if (convert)
+		{
+			temp = (uint8_t*)BX_ALLOC(g_allocator, rectpitch*_rect.m_height);
+			bimg::imageDecodeToBgra8(
+				  g_allocator
+				, temp
+				, data
+				, _rect.m_width
+				, _rect.m_height
+				, srcpitch
+				, bimg::TextureFormat::Enum(m_requestedFormat)
+				);
+			data = temp;
+		}
+
+		const uint32_t dstpitch = bx::strideAlign(rectpitch, kMinBufferOffsetAlignment);
+
+		wgpu::BufferDescriptor desc;
+		desc.size = dstpitch * _rect.m_height;
+		desc.usage = wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::CopySrc;
+
+		wgpu::CreateBufferMappedResult mapped = s_renderWgpu->m_device.CreateBufferMapped(&desc);
+		wgpu::Buffer staging = mapped.buffer;
+
+		const uint8_t* src = (uint8_t*)data;
+		uint8_t* dst = (uint8_t*)mapped.data;
+		uint64_t offset = 0;
+
+		for (uint32_t yy = 0; yy < _rect.m_height; ++yy, src += srcpitch, offset += dstpitch)
+		{
+			const uint32_t size = bx::strideAlign(rectpitch, 4);
+			bx::memCopy(dst + offset, src, size);
+		}
+
+		mapped.buffer.Unmap();
+
+		wgpu::BufferCopyView srcView;
+		srcView.buffer = staging;
+		srcView.offset = 0;
+		srcView.bytesPerRow = dstpitch;
+		srcView.rowsPerImage = 0;
+
+		wgpu::TextureCopyView destView;
+		destView.texture = m_ptr;
+		destView.mipLevel = _mip;
+		destView.arrayLayer = _side;
+		destView.origin = { _rect.m_x, _rect.m_y, zz };
+		//destView.origin = { _rect.m_x, _rect.m_y, _z };
+
+
+		wgpu::Extent3D destExtent = { _rect.m_width, _rect.m_height, _depth };
+
+		//region.imageSubresource.aspectMask = m_vkTextureAspect;
+
+		wgpu::CommandEncoder encoder = s_renderWgpu->getBlitCommandEncoder();
+		//wgpu::CommandEncoder encoder = s_renderWgpu->m_cmd.m_encoder;
+		encoder.CopyBufferToTexture(&srcView, &destView, &destExtent);
+
+		//wgpu::CommandBuffer copy = encoder.Finish();
+		//wgpu::Queue queue = s_renderWgpu->m_queue;
+		//queue.Submit(1, &copy);
+
+		//staging.Destroy();
+
+		if (NULL != temp)
+		{
+			BX_FREE(g_allocator, temp);
+		}
+	}
+
+	void BindStateWgpu::clear()
+	{
+		m_bindGroup = nullptr;
+	}
+
+	void StagingBufferWgpu::create(uint32_t _size, bool mapped)
+	{
+		wgpu::BufferDescriptor desc;
+		desc.size = BGFX_CONFIG_MAX_DRAW_CALLS * 128;  // UNIFORM_BUFFER_SIZE
+		desc.usage = wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc;
+
+		if (mapped)
+		{
+			wgpu::CreateBufferMappedResult mapped = s_renderWgpu->m_device.CreateBufferMapped(&desc);
+			m_buffer = mapped.buffer;
+			m_data = mapped.data;
+			m_size = mapped.dataLength;
+		}
+		else
+		{
+			m_buffer = s_renderWgpu->m_device.CreateBuffer(&desc);
+			map();
+		}
+	}
+
+	void StagingBufferWgpu::map()
+	{
+		auto ready = [](WGPUBufferMapAsyncStatus status, void* data, uint64_t dataLength, void* userdata)
+		{
+			if (status == WGPUBufferMapAsyncStatus_Success)
+				static_cast<StagingBufferWgpu*>(userdata)->mapped(data, dataLength);
+		};
+
+		m_buffer.MapWriteAsync(ready, this);
+	}
+
+	void StagingBufferWgpu::unmap()
+	{
+		m_data = nullptr;
+		m_size = 0;
+		m_buffer.Unmap();
+	}
+
+	void StagingBufferWgpu::destroy()
+	{
+		m_buffer = nullptr;
+	}
+
+	void StagingBufferWgpu::mapped(void* _data, uint64_t _size)
+	{
+		m_data = _data;
+		m_size = _size;
+	}
+
+	void ScratchBufferWgpu::create(uint32_t _size)
+	{
+		m_offset = 0;
+		m_size = _size;
+
+		wgpu::BufferDescriptor desc;
+		desc.size = BGFX_CONFIG_MAX_DRAW_CALLS * 128;  // UNIFORM_BUFFER_SIZE
+		desc.usage = wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Uniform;
+
+		m_buffer = s_renderWgpu->m_device.CreateBuffer(&desc);
+	}
+
+	void ScratchBufferWgpu::destroy()
+	{
+	}
+
+	void ScratchBufferWgpu::begin()
+	{
+		for (uint8_t ii = 0; ii < WEBGPU_NUM_UNIFORM_BUFFERS; ++ii)
+		{
+			if (nullptr != s_renderWgpu->m_uniformBuffers[ii].m_data)
+			{
+				m_staging = &s_renderWgpu->m_uniformBuffers[ii];
+				break;
+			}
+		}
+
+		BX_CHECK(nullptr != m_staging, "No available mapped uniform buffer");
+	}
+
+	uint32_t ScratchBufferWgpu::write(void* data, uint64_t _size, uint64_t _offset)
+	{
+		uint32_t offset = m_offset;
+		bx::memCopy((void*)((uint8_t*)m_staging->m_data + offset), data, _size);
+		m_offset += _offset;
+		return offset;
+	}
+
+	uint32_t ScratchBufferWgpu::write(void* data, uint64_t _size)
+	{
+		uint32_t offset = m_offset;
+		bx::memCopy((void*)((uint8_t*)m_staging->m_data + offset), data, _size);
+		m_offset += _size;
+		return offset;
+	}
+
+	void ScratchBufferWgpu::submit()
+	{
+		m_staging->unmap();
+
+		wgpu::CommandEncoder& bce = s_renderWgpu->getBlitCommandEncoder();
+		bce.CopyBufferToBuffer(m_staging->m_buffer, 0, m_buffer, 0, m_size);
+	}
+
+	void ScratchBufferWgpu::release()
+	{
+		m_staging->map();
+		m_staging = nullptr;
+		m_offset = 0;
+	}
+
+	void BindStateCacheWgpu::create() //(uint32_t _maxBindGroups)
+	{
+		//m_maxBindStates = 1024; // _maxBindStates;
+		m_currentBindState = 0;
+	}
+
+	void BindStateCacheWgpu::destroy()
+	{
+		reset();
+	}
+
+	void BindStateCacheWgpu::reset()
+	{
+		for (size_t i = 0; i < m_currentBindState; ++i)
+		{
+			m_bindStates[i] = {};
+		}
+
+		m_currentBindState = 0;
+	}
+
+	wgpu::TextureView TextureWgpu::getTextureMipLevel(int _mip)
+	{
+		if (_mip >= 0
+		&&  _mip <  m_numMips
+		&&  m_ptr)
+		{
+			if (!m_ptrMips[_mip])
+			{
+				wgpu::TextureViewDescriptor desc;
+				desc.baseMipLevel = _mip;
+				desc.mipLevelCount = 1;
+
+				desc.format = s_textureFormat[m_textureFormat].m_fmt;
+
+				if (TextureCube == m_type)
+				{
+					//desc.dimension = MTLTextureType2DArray;
+					desc.baseArrayLayer = 0;
+					desc.arrayLayerCount = m_numLayers * 6;
+				}
+				else
+				{
+					desc.baseArrayLayer = 0;
+					desc.arrayLayerCount = m_numLayers;
+				}
+
+				m_ptrMips[_mip] = m_ptr.CreateView(&desc);
+			}
+
+			return m_ptrMips[_mip];
+		}
+
+		return wgpu::TextureView();
+	}
+
+	void SwapChainWgpu::init(wgpu::Device _device, void* _nwh, uint32_t _width, uint32_t _height)
+	{
+		BX_UNUSED(_nwh);
+
+		wgpu::SwapChainDescriptor desc;
+		desc.usage = wgpu::TextureUsage::OutputAttachment;
+		desc.width = _width;
+		desc.height = _height;
+
+#if !BX_PLATFORM_EMSCRIPTEN
+		m_impl = createSwapChain(_device, _nwh);
+
+		desc.presentMode = wgpu::PresentMode::Immediate;
+		desc.format = wgpu::TextureFormat::RGBA8Unorm;
+		desc.implementation = reinterpret_cast<uint64_t>(&m_impl);
+		m_swapChain = _device.CreateSwapChain(nullptr, &desc);
+#else
+		wgpu::SurfaceDescriptorFromHTMLCanvasId canvasDesc{};
+		canvasDesc.id = "canvas";
+
+		wgpu::SurfaceDescriptor surfDesc{};
+		surfDesc.nextInChain = &canvasDesc;
+		wgpu::Surface surface = wgpu::Instance().CreateSurface(&surfDesc);
+
+		desc.presentMode = wgpu::PresentMode::Immediate;
+		desc.format = wgpu::TextureFormat::BGRA8Unorm;
+		m_swapChain = _device.CreateSwapChain(surface, &desc);
+#endif
+
+		m_colorFormat = desc.format;
+		m_depthFormat = wgpu::TextureFormat::Depth24PlusStencil8;
+	}
+
+	void SwapChainWgpu::resize(FrameBufferWgpu& _frameBuffer, uint32_t _width, uint32_t _height, uint32_t _flags)
+	{
+		BX_TRACE("SwapChainWgpu::resize");
+		
+		const int32_t sampleCount = s_msaa[(_flags&BGFX_RESET_MSAA_MASK)>>BGFX_RESET_MSAA_SHIFT];
+
+		wgpu::TextureFormat format = (_flags & BGFX_RESET_SRGB_BACKBUFFER)
+#ifdef DAWN_ENABLE_BACKEND_VULKAN
+			? wgpu::TextureFormat::BGRA8UnormSrgb
+			: wgpu::TextureFormat::BGRA8Unorm
+#else
+			? wgpu::TextureFormat::RGBA8UnormSrgb
+			: wgpu::TextureFormat::RGBA8Unorm
+#endif
+			;
+
+#if !BX_PLATFORM_EMSCRIPTEN
+		m_swapChain.Configure(format, wgpu::TextureUsage::OutputAttachment, _width, _height);
+#endif
+
+		m_colorFormat = format;
+		m_depthFormat = wgpu::TextureFormat::Depth24PlusStencil8;
+
+		bx::HashMurmur2A murmur;
+		murmur.begin();
+		murmur.add(1);
+		murmur.add((uint32_t)m_colorFormat);
+		murmur.add((uint32_t)m_depthFormat);
+		murmur.add((uint32_t)sampleCount);
+		_frameBuffer.m_pixelFormatHash = murmur.end();
+
+		wgpu::TextureDescriptor desc;
+
+		desc.dimension = wgpu::TextureDimension::e2D;
+
+		desc.size.width  = _width;
+		desc.size.height = _height;
+		desc.size.depth  = 1;
+		desc.mipLevelCount = 1;
+		desc.sampleCount = sampleCount;
+		desc.arrayLayerCount = 1;
+		desc.usage = wgpu::TextureUsage::OutputAttachment;
+
+		if (m_backBufferDepth)
+		{
+			m_backBufferDepth.Destroy();
+		}
+
+		desc.format = wgpu::TextureFormat::Depth24PlusStencil8;
+
+		m_backBufferDepth = s_renderWgpu->m_device.CreateTexture(&desc);
+
+		if (sampleCount > 1)
+		{
+			if (m_backBufferColorMsaa)
+			{
+				m_backBufferColorMsaa.Destroy();
+			}
+
+			desc.format = m_colorFormat;
+			desc.sampleCount = sampleCount;
+
+			m_backBufferColorMsaa = s_renderWgpu->m_device.CreateTexture(&desc);
+		}
+	}
+
+	void SwapChainWgpu::flip()
+	{
+		m_drawable = m_swapChain.GetCurrentTextureView();
+	}
+
+	wgpu::TextureView SwapChainWgpu::current()
+	{
+		if (!m_drawable)
+			m_drawable = m_swapChain.GetCurrentTextureView();
+		return m_drawable;
+	}
+
+	void FrameBufferWgpu::create(uint8_t _num, const Attachment* _attachment)
+	{
+		m_swapChain = NULL;
+		m_denseIdx  = UINT16_MAX;
+		m_num       = 0;
+		m_width     = 0;
+		m_height    = 0;
+
+		for (uint32_t ii = 0; ii < _num; ++ii)
+		{
+			const Attachment& at = _attachment[ii];
+			TextureHandle handle = at.handle;
+
+			if (isValid(handle) )
+			{
+				const TextureWgpu& texture = s_renderWgpu->m_textures[handle.idx];
+
+				if (0 == m_width)
+				{
+					m_width = texture.m_width;
+					m_height = texture.m_height;
+				}
+
+				if (bimg::isDepth(bimg::TextureFormat::Enum(texture.m_textureFormat) ) )
+				{
+					m_depthHandle = handle;
+					m_depthAttachment = at;
+				}
+				else
+				{
+					m_colorHandle[m_num] = handle;
+					m_colorAttachment[m_num] = at;
+					m_num++;
+				}
+			}
+		}
+
+		bx::HashMurmur2A murmur;
+		murmur.begin();
+		murmur.add(m_num);
+
+		for (uint32_t ii = 0; ii < m_num; ++ii)
+		{
+			const TextureWgpu& texture = s_renderWgpu->m_textures[m_colorHandle[ii].idx];
+			murmur.add(uint32_t(s_textureFormat[texture.m_textureFormat].m_fmt) );
+		}
+
+		if (!isValid(m_depthHandle) )
+		{
+			murmur.add(uint32_t(wgpu::TextureFormat::Undefined) );
+		}
+		else
+		{
+			const TextureWgpu& depthTexture = s_renderWgpu->m_textures[m_depthHandle.idx];
+			murmur.add(uint32_t(s_textureFormat[depthTexture.m_textureFormat].m_fmt) );
+		}
+
+		murmur.add(1); // SampleCount
+
+		m_pixelFormatHash = murmur.end();
+	}
+
+	bool FrameBufferWgpu::create(uint16_t _denseIdx, void* _nwh, uint32_t _width, uint32_t _height, TextureFormat::Enum _format, TextureFormat::Enum _depthFormat)
+	{
+		BX_UNUSED(_format, _depthFormat);
+		m_swapChain = BX_NEW(g_allocator, SwapChainWgpu);
+		m_num       = 0;
+		m_width     = _width;
+		m_height    = _height;
+		m_nwh       = _nwh;
+		m_denseIdx  = _denseIdx;
+
+		m_swapChain->init(s_renderWgpu->m_device, _nwh, _width, _height);
+		m_swapChain->resize(*this, _width, _height, 0);
+
+		return m_swapChain->m_swapChain != nullptr;
+	}
+
+	void FrameBufferWgpu::postReset()
+	{
+	}
+
+	uint16_t FrameBufferWgpu::destroy()
+	{
+		if (NULL != m_swapChain)
+		{
+			BX_DELETE(g_allocator, m_swapChain);
+			m_swapChain = NULL;
+		}
+
+		m_num = 0;
+		m_nwh = NULL;
+		m_depthHandle.idx = kInvalidHandle;
+
+		uint16_t denseIdx = m_denseIdx;
+		m_denseIdx = UINT16_MAX;
+
+		return denseIdx;
+	}
+
+	void CommandQueueWgpu::init(wgpu::Queue _queue)
+	{
+		m_queue = _queue;
+#if BGFX_CONFIG_MULTITHREADED
+		//m_framesSemaphore.post(WEBGPU_MAX_FRAMES_IN_FLIGHT);
+#endif
+	}
+
+	void CommandQueueWgpu::shutdown()
+	{
+		finish(true);
+	}
+
+	void CommandQueueWgpu::begin()
+	{
+		m_encoder = s_renderWgpu->m_device.CreateCommandEncoder();
+	}
+
+	inline void commandBufferFinishedCallback(void* _data)
+	{
+#if BGFX_CONFIG_MULTITHREADED
+		CommandQueueWgpu* queue = (CommandQueueWgpu*)_data;
+		if (queue)
+		{
+			//queue->m_framesSemaphore.post();
+		}
+#else
+		BX_UNUSED(_data);
+#endif
+	}
+
+	void CommandQueueWgpu::kick(bool _endFrame, bool _waitForFinish)
+	{
+		if (m_encoder)
+		{
+			if (_endFrame)
+			{
+				m_releaseWriteIndex = (m_releaseWriteIndex + 1) % WEBGPU_MAX_FRAMES_IN_FLIGHT;
+				//m_encoder.addCompletedHandler(commandBufferFinishedCallback, this);
+			}
+
+			wgpu::CommandBuffer commands = m_encoder.Finish();
+			m_queue.Submit(1, &commands);
+
+			if (_waitForFinish)
+			{
+#if BGFX_CONFIG_MULTITHREADED
+				//m_framesSemaphore.post();
+#endif
+			}
+
+			m_encoder = nullptr;
+		}
+	}
+
+	void CommandQueueWgpu::finish(bool _finishAll)
+	{
+		if (_finishAll)
+		{
+			uint32_t count = m_encoder
+				? 2
+				: 3
+				;
+
+			for (uint32_t ii = 0; ii < count; ++ii)
+			{
+				consume();
+			}
+
+#if BGFX_CONFIG_MULTITHREADED
+			//m_framesSemaphore.post(count);
+#endif
+		}
+		else
+		{
+			consume();
+		}
+	}
+
+	void CommandQueueWgpu::release(wgpu::Buffer _buffer)
+	{
+		m_release[m_releaseWriteIndex].push_back(_buffer);
+	}
+
+	void CommandQueueWgpu::consume()
+	{
+#if BGFX_CONFIG_MULTITHREADED
+		//m_framesSemaphore.wait();
+#endif
+
+		m_releaseReadIndex = (m_releaseReadIndex + 1) % WEBGPU_MAX_FRAMES_IN_FLIGHT;
+
+		for (wgpu::Buffer& buffer : m_release[m_releaseReadIndex])
+		{
+			buffer.Destroy();
+		}
+		
+		m_release[m_releaseReadIndex].clear();
+	}
+
+	void TimerQueryWgpu::init()
+	{
+		m_frequency = bx::getHPFrequency();
+	}
+
+	void TimerQueryWgpu::shutdown()
+	{
+	}
+
+	uint32_t TimerQueryWgpu::begin(uint32_t _resultIdx)
+	{
+		BX_UNUSED(_resultIdx);
+		return 0;
+	}
+
+	void TimerQueryWgpu::end(uint32_t _idx)
+	{
+		BX_UNUSED(_idx);
+	}
+
+#if 0
+	static void setTimestamp(void* _data)
+	{
+		*( (int64_t*)_data) = bx::getHPCounter();
+	}
+#endif
+
+	void TimerQueryWgpu::addHandlers(wgpu::CommandBuffer& _commandBuffer)
+	{
+		BX_UNUSED(_commandBuffer);
+
+		while (0 == m_control.reserve(1) )
+		{
+			m_control.consume(1);
+		}
+
+		//uint32_t offset = m_control.m_current;
+		//_commandBuffer.addScheduledHandler(setTimestamp, &m_result[offset].m_begin);
+		//_commandBuffer.addCompletedHandler(setTimestamp, &m_result[offset].m_end);
+		m_control.commit(1);
+	}
+
+	bool TimerQueryWgpu::get()
+	{
+		if (0 != m_control.available() )
+		{
+			uint32_t offset = m_control.m_read;
+			m_begin = m_result[offset].m_begin;
+			m_end   = m_result[offset].m_end;
+			m_elapsed = m_end - m_begin;
+
+			m_control.consume(1);
+
+			return true;
+		}
+
+		return false;
+	}
+
+	void RendererContextWgpu::submitBlit(BlitState& _bs, uint16_t _view)
+	{
+		if (!_bs.hasItem(_view) )
+		{
+			return;
+		}
+
+		endEncoding();
+
+		wgpu::CommandEncoder& bce = getBlitCommandEncoder();
+
+		while (_bs.hasItem(_view) )
+		{
+			const BlitItem& blit = _bs.advance();
+
+			const TextureWgpu& src = m_textures[blit.m_src.idx];
+			const TextureWgpu& dst = m_textures[blit.m_dst.idx];
+
+			uint32_t srcWidth  = bx::uint32_min(src.m_width,  blit.m_srcX + blit.m_width)  - blit.m_srcX;
+			uint32_t srcHeight = bx::uint32_min(src.m_height, blit.m_srcY + blit.m_height) - blit.m_srcY;
+			uint32_t srcDepth  = bx::uint32_min(src.m_depth,  blit.m_srcZ + blit.m_depth)  - blit.m_srcZ;
+			uint32_t dstWidth  = bx::uint32_min(dst.m_width,  blit.m_dstX + blit.m_width)  - blit.m_dstX;
+			uint32_t dstHeight = bx::uint32_min(dst.m_height, blit.m_dstY + blit.m_height) - blit.m_dstY;
+			uint32_t dstDepth  = bx::uint32_min(dst.m_depth,  blit.m_dstZ + blit.m_depth)  - blit.m_dstZ;
+			uint32_t width     = bx::uint32_min(srcWidth,  dstWidth);
+			uint32_t height    = bx::uint32_min(srcHeight, dstHeight);
+			uint32_t depth     = bx::uint32_min(srcDepth,  dstDepth);
+			bool     readBack  = !!(dst.m_flags & BGFX_TEXTURE_READ_BACK);
+
+			wgpu::TextureCopyView srcView;
+			srcView.texture = src.m_ptr;
+			srcView.origin = { blit.m_srcX, blit.m_srcY, 0 };
+			srcView.mipLevel = blit.m_srcMip;
+			srcView.arrayLayer = blit.m_srcZ;
+
+			wgpu::TextureCopyView dstView;
+			dstView.texture = dst.m_ptr;
+			dstView.origin = { blit.m_dstX, blit.m_dstY, 0 };
+			dstView.mipLevel = blit.m_dstMip;
+			dstView.arrayLayer = blit.m_dstZ;
+
+			if (depth == 0)
+			{
+				wgpu::Extent3D copyExtent = { width, height, 1 };
+				bce.CopyTextureToTexture(&srcView, &dstView, &copyExtent);
+			}
+			else
+			{
+				wgpu::Extent3D copyExtent = { width, height, depth };
+				bce.CopyTextureToTexture(&srcView, &dstView, &copyExtent);
+			}
+
+			if (readBack)
+			{
+				//bce..synchronizeTexture(dst.m_ptr, 0, blit.m_dstMip);
+			}
+		}
+
+		//if (bce)
+		//{
+		//	bce.endEncoding();
+		//	bce = 0;
+		//}
+	}
+
+	void RendererContextWgpu::submit(Frame* _render, ClearQuad& _clearQuad, TextVideoMemBlitter& _textVideoMemBlitter)
+	{
+		if(_render->m_capture)
+		{
+			renderDocTriggerCapture();
+		}
+
+		m_cmd.finish(false);
+
+		if (!m_cmd.m_encoder)
+		{
+			m_cmd.begin();
+		}
+
+		BGFX_WEBGPU_PROFILER_BEGIN_LITERAL("rendererSubmit", kColorFrame);
+
+		int64_t timeBegin = bx::getHPCounter();
+		int64_t captureElapsed = 0;
+
+		//m_gpuTimer.addHandlers(m_encoder);
+
+		updateResolution(_render->m_resolution);
+
+		m_frameIndex = 0; // (m_frameIndex + 1) % WEBGPU_MAX_FRAMES_IN_FLIGHT;
+
+		ScratchBufferWgpu& scratchBuffer = m_scratchBuffers[m_frameIndex];
+		scratchBuffer.begin();
+
+		BindStateCacheWgpu& bindStates = m_bindStateCache[m_frameIndex];
+		bindStates.reset();
+
+		if (0 < _render->m_iboffset)
+		{
+			BGFX_PROFILER_SCOPE("bgfx/Update transient index buffer", kColorResource);
+			TransientIndexBuffer* ib = _render->m_transientIb;
+			m_indexBuffers[ib->handle.idx].update(0, bx::strideAlign(_render->m_iboffset,4), ib->data, true);
+		}
+
+		if (0 < _render->m_vboffset)
+		{
+			BGFX_PROFILER_SCOPE("bgfx/Update transient vertex buffer", kColorResource);
+			TransientVertexBuffer* vb = _render->m_transientVb;
+			m_vertexBuffers[vb->handle.idx].update(0, bx::strideAlign(_render->m_vboffset,4), vb->data, true);
+		}
+
+		_render->sort();
+
+		RenderDraw currentState;
+		currentState.clear();
+		currentState.m_stateFlags = BGFX_STATE_NONE;
+		currentState.m_stencil    = packStencil(BGFX_STENCIL_NONE, BGFX_STENCIL_NONE);
+
+		RenderBind currentBind;
+		currentBind.clear();
+
+		static ViewState viewState;
+		viewState.reset(_render);
+		uint32_t blendFactor = 0;
+
+		//bool wireframe = !!(_render->m_debug&BGFX_DEBUG_WIREFRAME);
+
+		ProgramHandle currentProgram = BGFX_INVALID_HANDLE;
+		uint32_t currentBindHash = 0;
+		uint32_t currentBindLayoutHash = 0;
+		BindStateWgpu* previousBindState = nullptr;
+		SortKey key;
+		uint16_t view = UINT16_MAX;
+		FrameBufferHandle fbh = { BGFX_CONFIG_MAX_FRAME_BUFFERS };
+
+		BlitState bs(_render);
+
+		const uint64_t primType = 0;
+		uint8_t primIndex = uint8_t(primType >> BGFX_STATE_PT_SHIFT);
+		PrimInfo prim = s_primInfo[primIndex];
+		const uint32_t maxComputeBindings = g_caps.limits.maxComputeBindings;
+
+		// TODO store this
+		static wgpu::RenderPassEncoder rce;
+		
+		PipelineStateWgpu* currentPso = NULL;
+
+		bool wasCompute     = false;
+		bool viewHasScissor = false;
+		Rect viewScissorRect;
+		viewScissorRect.clear();
+
+		uint32_t statsNumPrimsSubmitted[BX_COUNTOF(s_primInfo)] = {};
+		uint32_t statsNumPrimsRendered[BX_COUNTOF(s_primInfo)]  = {};
+		uint32_t statsNumInstances[BX_COUNTOF(s_primInfo)]      = {};
+		uint32_t statsNumDrawIndirect[BX_COUNTOF(s_primInfo)]   = {};
+		uint32_t statsNumIndices = 0;
+		uint32_t statsKeyType[2] = {};
+
+		Profiler<TimerQueryWgpu> profiler(
+			  _render
+			, m_gpuTimer
+			, s_viewName
+			);
+
+		if (0 == (_render->m_debug & BGFX_DEBUG_IFH))
+		{
+			viewState.m_rect = _render->m_view[0].m_rect;
+			int32_t numItems = _render->m_numRenderItems;
+
+			for (int32_t item = 0; item < numItems;)
+			{
+				const uint64_t encodedKey = _render->m_sortKeys[item];
+				const bool isCompute = key.decode(encodedKey, _render->m_viewRemap);
+				statsKeyType[isCompute]++;
+
+				const bool viewChanged = 0
+					|| key.m_view != view
+					|| item == numItems
+					;
+
+				const uint32_t itemIdx = _render->m_sortValues[item];
+				const RenderItem& renderItem = _render->m_renderItem[itemIdx];
+				const RenderBind& renderBind = _render->m_renderItemBind[itemIdx];
+				++item;
+
+				if (viewChanged
+					|| (!isCompute && wasCompute))
+				{
+					view = key.m_view;
+					currentProgram = BGFX_INVALID_HANDLE;
+
+					if (item > 1)
+					{
+						profiler.end();
+					}
+
+					BGFX_WEBGPU_PROFILER_END();
+					setViewType(view, "  ");
+					BGFX_WEBGPU_PROFILER_BEGIN(view, kColorView);
+
+					profiler.begin(view);
+
+					viewState.m_rect = _render->m_view[view].m_rect;
+
+					submitBlit(bs, view);
+
+					if (!isCompute)
+					{
+						const Rect& scissorRect = _render->m_view[view].m_scissor;
+						viewHasScissor = !scissorRect.isZero();
+						viewScissorRect = viewHasScissor ? scissorRect : viewState.m_rect;
+						Clear& clr = _render->m_view[view].m_clear;
+
+						Rect viewRect = viewState.m_rect;
+						bool clearWithRenderPass = false;
+
+						if (!m_renderEncoder
+							|| fbh.idx != _render->m_view[view].m_fbh.idx)
+						{
+							endEncoding();
+
+							fbh = _render->m_view[view].m_fbh;
+
+							uint32_t width = m_resolution.width;
+							uint32_t height = m_resolution.height;
+
+							if (isValid(fbh))
+							{
+								FrameBufferWgpu& frameBuffer = m_frameBuffers[fbh.idx];
+								width = frameBuffer.m_width;
+								height = frameBuffer.m_height;
+							}
+
+							clearWithRenderPass = true
+								&& 0 == viewRect.m_x
+								&& 0 == viewRect.m_y
+								&& width == viewRect.m_width
+								&& height == viewRect.m_height
+								;
+
+							rce = renderPass(_render, fbh, clearWithRenderPass, clr, s_viewName[view]);
+						}
+						else if (BX_ENABLED(BGFX_CONFIG_DEBUG_ANNOTATION))
+						{
+							rce.PopDebugGroup();
+						}
+
+						if (BX_ENABLED(BGFX_CONFIG_DEBUG_ANNOTATION))
+						{
+							rce.PushDebugGroup(s_viewName[view]);
+						}
+
+						//rce.setTriangleFillMode(wireframe ? MTLTriangleFillModeLines : MTLTriangleFillModeFill);
+
+						// TODO (webgpu) check other renderers
+						const Rect& rect = viewState.m_rect;
+						rce.SetViewport(rect.m_x, rect.m_y, rect.m_width, rect.m_height, 0.0f, 1.0f);
+
+						 // can't disable: set to view rect
+						rce.SetScissorRect(rect.m_x, rect.m_y, rect.m_width, rect.m_height);
+
+
+						if (BGFX_CLEAR_NONE != (clr.m_flags & BGFX_CLEAR_MASK)
+							&& !clearWithRenderPass)
+						{
+							clearQuad(_clearQuad, viewState.m_rect, clr, _render->m_colorPalette);
+						}
+					}
+				}
+
+				if (isCompute)
+				{
+					if (!wasCompute)
+					{
+						wasCompute = true;
+
+						endEncoding();
+						rce = NULL;
+
+						setViewType(view, "C");
+						BGFX_WEBGPU_PROFILER_END();
+						BGFX_WEBGPU_PROFILER_BEGIN(view, kColorCompute);
+
+						m_computeEncoder = m_cmd.m_encoder.BeginComputePass();
+					}
+					else if (viewChanged)
+					{
+						if (BX_ENABLED(BGFX_CONFIG_DEBUG_ANNOTATION))
+						{
+							m_computeEncoder.PopDebugGroup();
+						}
+
+						endEncoding();
+						m_computeEncoder = m_cmd.m_encoder.BeginComputePass();
+					}
+
+					if (viewChanged)
+					{
+						if (BX_ENABLED(BGFX_CONFIG_DEBUG_ANNOTATION))
+						{
+							s_viewName[view][3] = L'C';
+							m_computeEncoder.PushDebugGroup(s_viewName[view]);
+							s_viewName[view][3] = L' ';
+						}
+					}
+
+					const RenderCompute& compute = renderItem.compute;
+
+					bool programChanged = false;
+					bool constantsChanged = compute.m_uniformBegin < compute.m_uniformEnd;
+					rendererUpdateUniforms(this, _render->m_uniformBuffer[compute.m_uniformIdx], compute.m_uniformBegin, compute.m_uniformEnd);
+
+					if (key.m_program.idx != currentProgram.idx)
+					{
+						currentProgram = key.m_program;
+
+						currentPso = getComputePipelineState(currentProgram);
+
+						if (NULL == currentPso)
+						{
+							currentProgram = BGFX_INVALID_HANDLE;
+							continue;
+						}
+
+						m_computeEncoder.SetPipeline(currentPso->m_cps);
+						programChanged =
+							constantsChanged = true;
+					}
+
+					if (!isValid(currentProgram)
+					  || NULL == currentPso)
+						BX_WARN(false, "Invalid program / No PSO");
+
+					const ProgramWgpu& program = m_program[currentProgram.idx];
+
+					if (constantsChanged)
+					{
+						UniformBuffer* vcb = program.m_vsh->m_constantBuffer;
+						if (NULL != vcb)
+						{
+							commit(*vcb);
+						}
+					}
+
+					viewState.setPredefined<4>(this, view, program, _render, compute);
+
+					uint32_t numOffset = 1;
+					uint32_t offsets[2] = { scratchBuffer.m_offset, 0 };
+					if (program.m_vsh->m_size > 0)
+					{
+						scratchBuffer.write(m_vsScratch, program.m_vsh->m_gpuSize);
+					}
+
+					BindStateWgpu& bindState = allocAndFillBindState(program, bindStates, scratchBuffer, renderBind);
+
+					bindProgram(m_computeEncoder, program, bindState, numOffset, offsets);
+
+					if (isValid(compute.m_indirectBuffer))
+					{
+						const VertexBufferWgpu& vb = m_vertexBuffers[compute.m_indirectBuffer.idx];
+						
+						uint32_t numDrawIndirect = UINT16_MAX == compute.m_numIndirect
+						? vb.m_size/BGFX_CONFIG_DRAW_INDIRECT_STRIDE
+						: compute.m_numIndirect
+						;
+						
+						uint32_t args = compute.m_startIndirect * BGFX_CONFIG_DRAW_INDIRECT_STRIDE;
+						for (uint32_t ii = 0; ii < numDrawIndirect; ++ii)
+						{
+							m_computeEncoder.DispatchIndirect(
+								  vb.m_ptr
+								, args
+								);
+							args += BGFX_CONFIG_DRAW_INDIRECT_STRIDE;
+						}
+					}
+					else
+					{
+						m_computeEncoder.Dispatch(compute.m_numX, compute.m_numY, compute.m_numZ);
+					}
+
+					continue;
+				}
+
+
+				bool resetState = viewChanged || wasCompute;
+
+				if (wasCompute)
+				{
+					wasCompute = false;
+					currentProgram = BGFX_INVALID_HANDLE;
+
+					setViewType(view, " ");
+					BGFX_WEBGPU_PROFILER_END();
+					BGFX_WEBGPU_PROFILER_BEGIN(view, kColorDraw);
+				}
+
+				const RenderDraw& draw = renderItem.draw;
+
+				// TODO (hugoam)
+				//const bool depthWrite = !!(BGFX_STATE_WRITE_Z & draw.m_stateFlags);
+				const uint64_t newFlags = draw.m_stateFlags;
+				uint64_t changedFlags = currentState.m_stateFlags ^ draw.m_stateFlags;
+				currentState.m_stateFlags = newFlags;
+
+				const uint64_t newStencil = draw.m_stencil;
+				uint64_t changedStencil = (currentState.m_stencil ^ draw.m_stencil) & BGFX_STENCIL_FUNC_REF_MASK;
+				currentState.m_stencil = newStencil;
+
+				if (resetState)
+				{
+					wasCompute = false;
+
+					currentState.clear();
+					currentState.m_scissor = !draw.m_scissor;
+					changedFlags = BGFX_STATE_MASK;
+					changedStencil = packStencil(BGFX_STENCIL_MASK, BGFX_STENCIL_MASK);
+					currentState.m_stateFlags = newFlags;
+					currentState.m_stencil = newStencil;
+
+					currentBind.clear();
+
+					currentProgram = BGFX_INVALID_HANDLE;
+					const uint64_t pt = newFlags & BGFX_STATE_PT_MASK;
+					primIndex = uint8_t(pt >> BGFX_STATE_PT_SHIFT);
+				}
+
+				if (prim.m_type != s_primInfo[primIndex].m_type)
+				{
+					prim = s_primInfo[primIndex];
+				}
+
+				uint16_t scissor = draw.m_scissor;
+				if (currentState.m_scissor != scissor)
+				{
+					currentState.m_scissor = scissor;
+
+					if (UINT16_MAX == scissor)
+					{
+						if (viewHasScissor)
+						{
+							const auto& r = viewScissorRect;
+							rce.SetScissorRect(r.m_x, r.m_y, r.m_width, r.m_height);
+						}
+						else
+						{   // can't disable: set to view rect
+							const auto& r = viewState.m_rect;
+							rce.SetScissorRect(r.m_x, r.m_y, r.m_width, r.m_height);
+						}
+					}
+					else
+					{
+						Rect scissorRect;
+						scissorRect.setIntersect(viewScissorRect, _render->m_frameCache.m_rectCache.m_cache[scissor]);
+
+						const auto& r = scissorRect;
+						if (r.m_width == 0 || r.m_height == 0)
+						{
+							continue;
+						}
+						rce.SetScissorRect(r.m_x, r.m_y, r.m_width, r.m_height);
+					}
+
+				}
+
+				if (0 != changedStencil)
+				{
+					const uint32_t fstencil = unpackStencil(0, draw.m_stencil);
+					const uint32_t ref = (fstencil & BGFX_STENCIL_FUNC_REF_MASK) >> BGFX_STENCIL_FUNC_REF_SHIFT;
+					rce.SetStencilReference(ref);
+				}
+
+				if ((0 | BGFX_STATE_PT_MASK) & changedFlags)
+				{
+					const uint64_t pt = newFlags & BGFX_STATE_PT_MASK;
+					primIndex = uint8_t(pt >> BGFX_STATE_PT_SHIFT);
+					if (prim.m_type != s_primInfo[primIndex].m_type)
+					{
+						prim = s_primInfo[primIndex];
+					}
+				}
+
+				if (blendFactor != draw.m_rgba
+					&& !(newFlags & BGFX_STATE_BLEND_INDEPENDENT))
+				{
+					const uint32_t rgba = draw.m_rgba;
+					float rr = ((rgba >> 24)) / 255.0f;
+					float gg = ((rgba >> 16) & 0xff) / 255.0f;
+					float bb = ((rgba >> 8) & 0xff) / 255.0f;
+					float aa = ((rgba) & 0xff) / 255.0f;
+					wgpu::Color color = { rr, gg, bb, aa };
+					rce.SetBlendColor(&color);
+
+					blendFactor = draw.m_rgba;
+				}
+
+				bool programChanged = false;
+				bool constantsChanged = draw.m_uniformBegin < draw.m_uniformEnd;
+				rendererUpdateUniforms(this, _render->m_uniformBuffer[draw.m_uniformIdx], draw.m_uniformBegin, draw.m_uniformEnd);
+
+				bool vertexStreamChanged = hasVertexStreamChanged(currentState, draw);
+
+				if (key.m_program.idx != currentProgram.idx
+					|| vertexStreamChanged
+					|| (0
+						| BGFX_STATE_BLEND_MASK
+						| BGFX_STATE_BLEND_EQUATION_MASK
+						| BGFX_STATE_WRITE_RGB
+						| BGFX_STATE_WRITE_A
+						| BGFX_STATE_BLEND_INDEPENDENT
+						| BGFX_STATE_MSAA
+						| BGFX_STATE_BLEND_ALPHA_TO_COVERAGE
+						) & changedFlags
+					|| ((blendFactor != draw.m_rgba) && !!(newFlags & BGFX_STATE_BLEND_INDEPENDENT)))
+				{
+					currentProgram = key.m_program;
+
+					currentState.m_streamMask = draw.m_streamMask;
+					currentState.m_instanceDataBuffer.idx = draw.m_instanceDataBuffer.idx;
+					currentState.m_instanceDataOffset = draw.m_instanceDataOffset;
+					currentState.m_instanceDataStride = draw.m_instanceDataStride;
+
+					const VertexLayout* decls[BGFX_CONFIG_MAX_VERTEX_STREAMS];
+
+					uint32_t numVertices = draw.m_numVertices;
+					uint8_t  numStreams = 0;
+					for (uint32_t idx = 0, streamMask = draw.m_streamMask
+						; 0 != streamMask
+						; streamMask >>= 1, idx += 1, ++numStreams
+						)
+					{
+						const uint32_t ntz = bx::uint32_cnttz(streamMask);
+						streamMask >>= ntz;
+						idx += ntz;
+
+						currentState.m_stream[idx].m_layoutHandle = draw.m_stream[idx].m_layoutHandle;
+						currentState.m_stream[idx].m_handle = draw.m_stream[idx].m_handle;
+						currentState.m_stream[idx].m_startVertex = draw.m_stream[idx].m_startVertex;
+
+						const uint16_t handle = draw.m_stream[idx].m_handle.idx;
+						const VertexBufferWgpu& vb = m_vertexBuffers[handle];
+						const uint16_t decl = isValid(draw.m_stream[idx].m_layoutHandle)
+							? draw.m_stream[idx].m_layoutHandle.idx
+							: vb.m_layoutHandle.idx;
+						const VertexLayout& vertexDecl = m_vertexDecls[decl];
+						const uint32_t stride = vertexDecl.m_stride;
+
+						decls[numStreams] = &vertexDecl;
+
+						numVertices = bx::uint32_min(UINT32_MAX == draw.m_numVertices
+							? vb.m_size / stride
+							: draw.m_numVertices
+							, numVertices
+						);
+						const uint32_t offset = draw.m_stream[idx].m_startVertex * stride;
+
+						rce.SetVertexBuffer(idx, vb.m_ptr, offset);
+					}
+
+					bool index32 = false;
+
+					if (isValid(draw.m_indexBuffer))
+					{
+						const IndexBufferWgpu& ib = m_indexBuffers[draw.m_indexBuffer.idx];
+						index32 = 0 != (ib.m_flags & BGFX_BUFFER_INDEX32);
+					}
+
+					currentState.m_numVertices = numVertices;
+
+					if (!isValid(currentProgram))
+					{
+						continue;
+					}
+					else
+					{
+						currentPso = NULL;
+
+						if (0 < numStreams)
+						{
+							currentPso = getPipelineState(
+								newFlags
+								, newStencil
+								, draw.m_rgba
+								, fbh
+								, numStreams
+								, decls
+								, index32
+								, currentProgram
+								, uint8_t(draw.m_instanceDataStride / 16)
+							);
+						}
+
+						if (NULL == currentPso)
+						{
+							currentProgram = BGFX_INVALID_HANDLE;
+							continue;
+						}
+
+						rce.SetPipeline(currentPso->m_rps);
+					}
+
+					if (isValid(draw.m_instanceDataBuffer))
+					{
+						const VertexBufferWgpu& inst = m_vertexBuffers[draw.m_instanceDataBuffer.idx];
+						rce.SetVertexBuffer(numStreams/*+1*/, inst.m_ptr, draw.m_instanceDataOffset);
+					}
+
+					programChanged =
+						constantsChanged = true;
+				}
+
+				if (isValid(currentProgram))
+				{
+					const ProgramWgpu& program = m_program[currentProgram.idx];
+
+					if (constantsChanged)
+					{
+						UniformBuffer* vcb = program.m_vsh->m_constantBuffer;
+						if (NULL != vcb)
+						{
+							commit(*vcb);
+						}
+					}
+
+					if (constantsChanged)
+					{
+						UniformBuffer* fcb = program.m_fsh->m_constantBuffer;
+						if (NULL != fcb)
+						{
+							commit(*fcb);
+						}
+					}
+
+					viewState.setPredefined<4>(this, view, program, _render, draw);
+
+					bool hasPredefined = 0 < program.m_numPredefined;
+
+					uint32_t numOffset = 0;
+					uint32_t offsets[2] = { 0, 0 };
+					if (constantsChanged
+						|| hasPredefined)
+					{
+						//viewState.setPredefined<4>(this, view, program, _render, draw, programChanged || viewChanged);
+						//commitShaderConstants(scratchBuffer, program, voffset, foffset);
+
+						const uint32_t vsize = program.m_vsh->m_gpuSize;
+						const uint32_t fsize = (NULL != program.m_fsh ? program.m_fsh->m_gpuSize : 0);
+
+						if (program.m_vsh->m_size > 0)
+						{
+							offsets[numOffset++] = scratchBuffer.write(m_vsScratch, vsize);
+						}
+						if (fsize > 0)
+						{
+							offsets[numOffset++] = scratchBuffer.write(m_fsScratch, fsize);
+						}
+					}
+
+					uint32_t bindHash = bx::hash<bx::HashMurmur2A>(renderBind.m_bind, sizeof(renderBind.m_bind));
+					if (currentBindHash != bindHash
+					||  currentBindLayoutHash != program.m_bindGroupLayoutHash)
+					{
+						currentBindHash = bindHash;
+						currentBindLayoutHash = program.m_bindGroupLayoutHash;
+						previousBindState = &bindStates.m_bindStates[bindStates.m_currentBindState];
+
+						allocAndFillBindState(program, bindStates, scratchBuffer, renderBind);
+					}
+
+					BindStateWgpu& bindState = bindStates.m_bindStates[bindStates.m_currentBindState-1];
+
+					bindProgram(rce, program, bindState, numOffset, offsets);
+				}
+
+				if (0 != currentState.m_streamMask)
+				{
+					uint32_t numVertices = draw.m_numVertices;
+					if (UINT32_MAX == numVertices)
+					{
+						const VertexBufferWgpu& vb = m_vertexBuffers[currentState.m_stream[0].m_handle.idx];
+						uint16_t decl = !isValid(vb.m_layoutHandle) ? draw.m_stream[0].m_layoutHandle.idx : vb.m_layoutHandle.idx;
+						const VertexLayout& vertexDecl = m_vertexDecls[decl];
+						numVertices = vb.m_size/vertexDecl.m_stride;
+					}
+
+					uint32_t numIndices        = 0;
+					uint32_t numPrimsSubmitted = 0;
+					uint32_t numInstances      = 0;
+					uint32_t numPrimsRendered  = 0;
+					uint32_t numDrawIndirect   = 0;
+
+					if (isValid(draw.m_indirectBuffer) )
+					{
+						const VertexBufferWgpu& vb = m_vertexBuffers[draw.m_indirectBuffer.idx];
+
+						if (isValid(draw.m_indexBuffer) )
+						{
+							const IndexBufferWgpu& ib = m_indexBuffers[draw.m_indexBuffer.idx];
+
+							numDrawIndirect = UINT16_MAX == draw.m_numIndirect
+							? vb.m_size/BGFX_CONFIG_DRAW_INDIRECT_STRIDE
+							: draw.m_numIndirect
+							;
+
+							for (uint32_t ii = 0; ii < numDrawIndirect; ++ii)
+							{
+								rce.SetIndexBuffer(ib.m_ptr, 0);
+								rce.DrawIndexedIndirect(vb.m_ptr, (draw.m_startIndirect + ii)* BGFX_CONFIG_DRAW_INDIRECT_STRIDE);
+							}
+						}
+						else
+						{
+							numDrawIndirect = UINT16_MAX == draw.m_numIndirect
+							? vb.m_size/BGFX_CONFIG_DRAW_INDIRECT_STRIDE
+							: draw.m_numIndirect
+							;
+							for (uint32_t ii = 0; ii < numDrawIndirect; ++ii)
+							{
+								rce.DrawIndirect(vb.m_ptr, (draw.m_startIndirect + ii)* BGFX_CONFIG_DRAW_INDIRECT_STRIDE);
+							}
+						}
+					}
+					else
+					{
+						if (isValid(draw.m_indexBuffer) )
+						{
+							const IndexBufferWgpu& ib = m_indexBuffers[draw.m_indexBuffer.idx];
+
+							const uint32_t indexSize = 0 == (ib.m_flags & BGFX_BUFFER_INDEX32) ? 2 : 4;
+
+							if (UINT32_MAX == draw.m_numIndices)
+							{
+								numIndices        = ib.m_size/indexSize;
+								numPrimsSubmitted = numIndices/prim.m_div - prim.m_sub;
+								numInstances      = draw.m_numInstances;
+								numPrimsRendered  = numPrimsSubmitted*draw.m_numInstances;
+
+								rce.SetIndexBuffer(ib.m_ptr, 0);
+								rce.DrawIndexed(numIndices, draw.m_numInstances, 0, 0, 0);
+							}
+							else if (prim.m_min <= draw.m_numIndices)
+							{
+								numIndices        = draw.m_numIndices;
+								numPrimsSubmitted = numIndices/prim.m_div - prim.m_sub;
+								numInstances      = draw.m_numInstances;
+								numPrimsRendered  = numPrimsSubmitted*draw.m_numInstances;
+
+								rce.SetIndexBuffer(ib.m_ptr, 0);
+								rce.DrawIndexed(numIndices, numInstances, draw.m_startIndex, 0, 0);
+							}
+						}
+						else
+						{
+							numPrimsSubmitted = numVertices/prim.m_div - prim.m_sub;
+							numInstances      = draw.m_numInstances;
+							numPrimsRendered  = numPrimsSubmitted*draw.m_numInstances;
+
+							rce.Draw(numVertices, draw.m_numInstances, 0, 0);
+						}
+					}
+
+					statsNumPrimsSubmitted[primIndex] += numPrimsSubmitted;
+					statsNumPrimsRendered[primIndex]  += numPrimsRendered;
+					statsNumInstances[primIndex]      += numInstances;
+					statsNumDrawIndirect[primIndex]   += numDrawIndirect;
+					statsNumIndices                   += numIndices;
+				}
+			}
+
+			if (wasCompute)
+			{
+				invalidateCompute();
+
+				setViewType(view, "C");
+				BGFX_WEBGPU_PROFILER_END();
+				BGFX_WEBGPU_PROFILER_BEGIN(view, kColorCompute);
+			}
+
+			submitBlit(bs, BGFX_CONFIG_MAX_VIEWS);
+
+			if (0 < _render->m_numRenderItems)
+			{
+				captureElapsed = -bx::getHPCounter();
+				capture();
+				rce = m_renderEncoder;
+				captureElapsed += bx::getHPCounter();
+
+				profiler.end();
+			}
+		}
+
+		if (BX_ENABLED(BGFX_CONFIG_DEBUG_ANNOTATION) )
+		{
+			if (0 < _render->m_numRenderItems)
+			{
+				rce.PopDebugGroup();
+			}
+		}
+
+		BGFX_WEBGPU_PROFILER_END();
+
+		int64_t timeEnd = bx::getHPCounter();
+		int64_t frameTime = timeEnd - timeBegin;
+
+		static int64_t min = frameTime;
+		static int64_t max = frameTime;
+		min = bx::min<int64_t>(min, frameTime);
+		max = bx::max<int64_t>(max, frameTime);
+
+		static uint32_t maxGpuLatency = 0;
+		static double   maxGpuElapsed = 0.0f;
+		double elapsedGpuMs = 0.0;
+
+		do
+		{
+			double toGpuMs = 1000.0 / double(m_gpuTimer.m_frequency);
+			elapsedGpuMs   = m_gpuTimer.m_elapsed * toGpuMs;
+			maxGpuElapsed  = elapsedGpuMs > maxGpuElapsed ? elapsedGpuMs : maxGpuElapsed;
+		}
+		while (m_gpuTimer.get() );
+
+		maxGpuLatency = bx::uint32_imax(maxGpuLatency, m_gpuTimer.m_control.available()-1);
+
+		const int64_t timerFreq = bx::getHPFrequency();
+
+		Stats& perfStats = _render->m_perfStats;
+		perfStats.cpuTimeBegin  = timeBegin;
+		perfStats.cpuTimeEnd    = timeEnd;
+		perfStats.cpuTimerFreq  = timerFreq;
+		perfStats.gpuTimeBegin  = m_gpuTimer.m_begin;
+		perfStats.gpuTimeEnd    = m_gpuTimer.m_end;
+		perfStats.gpuTimerFreq  = m_gpuTimer.m_frequency;
+		perfStats.numDraw       = statsKeyType[0];
+		perfStats.numCompute    = statsKeyType[1];
+		perfStats.numBlit       = _render->m_numBlitItems;
+		perfStats.maxGpuLatency = maxGpuLatency;
+		bx::memCopy(perfStats.numPrims, statsNumPrimsRendered, sizeof(perfStats.numPrims) );
+		perfStats.gpuMemoryMax  = -INT64_MAX;
+		perfStats.gpuMemoryUsed = -INT64_MAX;
+
+		//rce.setTriangleFillMode(MTLTriangleFillModeFill);
+		if (_render->m_debug & (BGFX_DEBUG_IFH|BGFX_DEBUG_STATS) )
+		{
+			rce = renderPass(_render, BGFX_INVALID_HANDLE, false, Clear());
+
+			if (BX_ENABLED(BGFX_CONFIG_DEBUG_ANNOTATION))
+			{
+				rce.PushDebugGroup("debugstats");
+			}
+
+			TextVideoMem& tvm = m_textVideoMem;
+
+			static int64_t next = timeEnd;
+
+			if (timeEnd >= next)
+			{
+				next = timeEnd + timerFreq;
+
+				double freq = double(timerFreq);
+				double toMs = 1000.0/freq;
+
+				tvm.clear();
+				uint16_t pos = 0;
+				tvm.printf(0, pos++, BGFX_CONFIG_DEBUG ? 0x8c : 0x8f
+					, " %s / " BX_COMPILER_NAME " / " BX_CPU_NAME " / " BX_ARCH_NAME " / " BX_PLATFORM_NAME " "
+					, getRendererName()
+					);
+
+				pos = 10;
+				tvm.printf(10, pos++, 0x8b, "        Frame: %7.3f, % 7.3f \x1f, % 7.3f \x1e [ms] / % 6.2f FPS "
+					, double(frameTime)*toMs
+					, double(min)*toMs
+					, double(max)*toMs
+					, freq/frameTime
+					);
+
+				const uint32_t msaa = (m_resolution.reset&BGFX_RESET_MSAA_MASK)>>BGFX_RESET_MSAA_SHIFT;
+				tvm.printf(10, pos++, 0x8b, "  Reset flags: [%c] vsync, [%c] MSAAx%d, [%c] MaxAnisotropy "
+					, !!(m_resolution.reset&BGFX_RESET_VSYNC) ? '\xfe' : ' '
+					, 0 != msaa ? '\xfe' : ' '
+					, 1<<msaa
+					, !!(m_resolution.reset&BGFX_RESET_MAXANISOTROPY) ? '\xfe' : ' '
+					);
+
+				double elapsedCpuMs = double(frameTime)*toMs;
+				tvm.printf(10, pos++, 0x8b, "    Submitted: %4d (draw %4d, compute %4d) / CPU %3.4f [ms] %c GPU %3.4f [ms] (latency %d)"
+					, _render->m_numRenderItems
+					, statsKeyType[0]
+					, statsKeyType[1]
+					, elapsedCpuMs
+					, elapsedCpuMs > maxGpuElapsed ? '>' : '<'
+					, maxGpuElapsed
+					, maxGpuLatency
+					);
+				maxGpuLatency = 0;
+				maxGpuElapsed = 0.0;
+
+				for (uint32_t ii = 0; ii < Topology::Count; ++ii)
+				{
+					tvm.printf(10, pos++, 0x8b, "   %10s: %7d (#inst: %5d), submitted: %7d"
+						, getName(Topology::Enum(ii) )
+						, statsNumPrimsRendered[ii]
+						, statsNumInstances[ii]
+						, statsNumPrimsSubmitted[ii]
+						);
+				}
+
+				tvm.printf(10, pos++, 0x8b, "      Indices: %7d ", statsNumIndices);
+//				tvm.printf(10, pos++, 0x8b, " Uniform size: %7d, Max: %7d ", _render->m_uniformEnd, _render->m_uniformMax);
+				tvm.printf(10, pos++, 0x8b, "     DVB size: %7d ", _render->m_vboffset);
+				tvm.printf(10, pos++, 0x8b, "     DIB size: %7d ", _render->m_iboffset);
+
+				pos++;
+				double captureMs = double(captureElapsed)*toMs;
+				tvm.printf(10, pos++, 0x8b, "     Capture: %3.4f [ms]", captureMs);
+
+				uint8_t attr[2] = { 0x8c, 0x8a };
+				uint8_t attrIndex = _render->m_waitSubmit < _render->m_waitRender;
+
+				tvm.printf(10, pos++, attr[attrIndex    &1], " Submit wait: %3.4f [ms]", _render->m_waitSubmit*toMs);
+				tvm.printf(10, pos++, attr[(attrIndex+1)&1], " Render wait: %3.4f [ms]", _render->m_waitRender*toMs);
+
+				min = frameTime;
+				max = frameTime;
+			}
+
+			blit(this, _textVideoMemBlitter, tvm);
+			rce = m_renderEncoder;
+
+			if (BX_ENABLED(BGFX_CONFIG_DEBUG_ANNOTATION))
+			{
+				rce.PopDebugGroup();
+			}
+		}
+		else if (_render->m_debug & BGFX_DEBUG_TEXT)
+		{
+			if (BX_ENABLED(BGFX_CONFIG_DEBUG_ANNOTATION))
+			{
+				rce.PushDebugGroup("debugtext");
+			}
+
+			blit(this, _textVideoMemBlitter, _render->m_textVideoMem);
+			rce = m_renderEncoder;
+
+			if (BX_ENABLED(BGFX_CONFIG_DEBUG_ANNOTATION))
+			{
+				rce.PopDebugGroup();
+			}
+		}
+
+		scratchBuffer.submit();
+
+		endEncoding();
+
+		m_cmd.kick(true);
+
+		scratchBuffer.release();
+
+#if !BX_PLATFORM_EMSCRIPTEN
+		for (uint32_t ii = 0, num = m_numWindows; ii < num; ++ii)
+		{
+			FrameBufferWgpu& frameBuffer = ii == 0 ? m_mainFrameBuffer : m_frameBuffers[m_windows[ii].idx];
+			if (NULL != frameBuffer.m_swapChain
+			&& frameBuffer.m_swapChain->m_drawable)
+			{
+				SwapChainWgpu& swapChain = *frameBuffer.m_swapChain;
+				swapChain.m_swapChain.Present();
+			}
+		}
+#endif
+	}
+
+} /* namespace webgpu */ } // namespace bgfx
+
+#else
+
+namespace bgfx { namespace webgpu
+	{
+		RendererContextI* rendererCreate(const Init& _init)
+		{
+			BX_UNUSED(_init);
+			return NULL;
+		}
+
+		void rendererDestroy()
+		{
+		}
+	} /* namespace webgpu */ } // namespace bgfx
+
+#endif // BGFX_CONFIG_RENDERER_WEBGPU

+ 4775 - 0
src/renderer_webgpu.cpp~RF2c45ef4.TMP

@@ -0,0 +1,4775 @@
+/*
+ * Copyright 2011-2019 Branimir Karadzic. All rights reserved.
+ * License: https://github.com/bkaradzic/bgfx#license-bsd-2-clause
+ */
+
+#include "bgfx_p.h"
+
+//#define DAWN_ENABLE_BACKEND_D3D12
+#define DAWN_ENABLE_BACKEND_VULKAN
+
+#if BGFX_CONFIG_RENDERER_WEBGPU
+#include "renderer_webgpu.h"
+#include "renderer.h"
+#include "debug_renderdoc.h"
+
+#ifdef DAWN_ENABLE_BACKEND_VULKAN
+#include "renderer_vk.h"
+#endif
+
+#include <cfloat>
+#include <new>
+
+#if !BX_PLATFORM_EMSCRIPTEN
+#ifdef DAWN_ENABLE_BACKEND_D3D12
+#include <dawn_native/D3D12Backend.h>
+#endif
+#ifdef DAWN_ENABLE_BACKEND_VULKAN
+#include <dawn_native/VulkanBackend.h>
+#endif
+#include <dawn_native/DawnNative.h>
+#include <dawn/dawn_wsi.h>
+#include <dawn/dawn_proc.h>
+#else
+#include <emscripten/emscripten.h>
+#include <emscripten/html5.h>
+#endif
+
+#define UNIFORM_BUFFER_SIZE (8*1024*1024)
+
+#define INDIRECT 1
+
+
+#define VARIABLE_BIND_GROUPS 0
+
+namespace bgfx { namespace webgpu
+{
+	// TODO (hugoam) cleanup
+	template <class T>
+	T defaultDescriptor() { return T(); }
+
+	template <> wgpu::BlendDescriptor              defaultDescriptor() { return { wgpu::BlendOperation::Add, wgpu::BlendFactor::One, wgpu::BlendFactor::Zero }; }
+	template <> wgpu::ColorStateDescriptor         defaultDescriptor() { return { nullptr, wgpu::TextureFormat::RGBA8Unorm, defaultDescriptor<wgpu::BlendDescriptor>(), defaultDescriptor<wgpu::BlendDescriptor>(), wgpu::ColorWriteMask::All }; }
+	template <> wgpu::StencilStateFaceDescriptor   defaultDescriptor() { return { wgpu::CompareFunction::Always, wgpu::StencilOperation::Keep, wgpu::StencilOperation::Keep, wgpu::StencilOperation::Keep }; }
+	template <> wgpu::VertexStateDescriptor        defaultDescriptor() { return { nullptr, wgpu::IndexFormat::Uint32, 0, nullptr }; }
+	template <> wgpu::VertexBufferLayoutDescriptor defaultDescriptor() { return { 0, wgpu::InputStepMode::Vertex, 0, nullptr }; }
+	template <> wgpu::VertexAttributeDescriptor    defaultDescriptor() { return { wgpu::VertexFormat::Float, 0, 0 }; }
+	template <> wgpu::RasterizationStateDescriptor defaultDescriptor() { return { nullptr, wgpu::FrontFace::CCW, wgpu::CullMode::None, 0, 0.f, 0.f }; }
+	template <> wgpu::ProgrammableStageDescriptor  defaultDescriptor() { return { nullptr, {}, "main" }; }
+	template <> wgpu::DepthStencilStateDescriptor  defaultDescriptor() { return { nullptr, wgpu::TextureFormat::Depth24PlusStencil8, false, wgpu::CompareFunction::Always, defaultDescriptor<wgpu::StencilStateFaceDescriptor>(), defaultDescriptor<wgpu::StencilStateFaceDescriptor>(), 0xff, 0xff }; }
+	template <> wgpu::PipelineLayoutDescriptor     defaultDescriptor() { return { nullptr, "", 0, nullptr }; }
+	template <> wgpu::TextureViewDescriptor        defaultDescriptor() { return {}; }
+
+	template <> wgpu::RenderPassColorAttachmentDescriptor defaultDescriptor() { return { {}, {}, wgpu::LoadOp::Clear, wgpu::StoreOp::Store, { 0.0f, 0.0f, 0.0f, 0.0f } }; }
+	template <> wgpu::RenderPassDepthStencilAttachmentDescriptor defaultDescriptor() { return { {}, wgpu::LoadOp::Clear, wgpu::StoreOp::Store, 1.0f, wgpu::LoadOp::Clear, wgpu::StoreOp::Store, 0 }; }
+
+	RenderPassDescriptor::RenderPassDescriptor()
+	{
+		depthStencilAttachment = defaultDescriptor<wgpu::RenderPassDepthStencilAttachmentDescriptor>();
+
+		for(uint32_t i = 0; i < kMaxColorAttachments; ++i)
+		{
+			colorAttachments[i] = defaultDescriptor<wgpu::RenderPassColorAttachmentDescriptor>();
+		}
+
+		desc = defaultDescriptor<wgpu::RenderPassDescriptor>();
+		//desc.colorAttachmentCount = colorAttachmentCount;
+		desc.colorAttachments = colorAttachments;
+		desc.colorAttachmentCount = 1; // TODO (hugoam) set it properly everywhere
+	}
+
+	VertexStateDescriptor::VertexStateDescriptor()
+	{
+		for(uint32_t i = 0; i < kMaxVertexInputs; ++i)
+		{
+			vertexBuffers[i] = defaultDescriptor<wgpu::VertexBufferLayoutDescriptor>();
+		}
+
+		for (uint32_t i = 0; i < kMaxVertexAttributes; ++i)
+		{
+			attributes[i] = defaultDescriptor<wgpu::VertexAttributeDescriptor>();
+		}
+
+		vertexBuffers[0].attributes = &attributes[0];
+		//vertexBuffers[0].attributeCount = numAttributes;
+
+		desc = defaultDescriptor<wgpu::VertexStateDescriptor>();
+
+		desc.vertexBuffers = vertexBuffers;
+		//desc.vertexBufferCount = numVertexBuffers;
+	}
+
+	RenderPipelineDescriptor::RenderPipelineDescriptor()
+	{
+		//vertexStage = defaultDescriptor<wgpu::ProgrammableStageDescriptor>();
+		fragmentStage = defaultDescriptor<wgpu::ProgrammableStageDescriptor>();
+		rasterizationState = defaultDescriptor<wgpu::RasterizationStateDescriptor>();
+		depthStencilState = defaultDescriptor<wgpu::DepthStencilStateDescriptor>();
+
+		for(uint32_t i = 0; i < kMaxColorAttachments; ++i)
+		{
+			colorStates[i] = defaultDescriptor<wgpu::ColorStateDescriptor>();
+		}
+
+		desc = defaultDescriptor<wgpu::RenderPipelineDescriptor>();
+
+		desc.primitiveTopology = wgpu::PrimitiveTopology::TriangleList;
+		desc.sampleCount = 1;
+		desc.colorStateCount = 1;
+
+		//wgpu::VertexStateDescriptor inputState = inputState.descriptor();
+
+		desc.vertexStage = defaultDescriptor<wgpu::ProgrammableStageDescriptor>();
+		desc.fragmentStage = &fragmentStage;
+		//desc.vertexState = &inputState;
+		desc.rasterizationState = &rasterizationState;
+		desc.depthStencilState = nullptr;
+		desc.colorStates = colorStates;
+	}
+	// TODO (hugoam) cleanup (end)
+
+	static char s_viewName[BGFX_CONFIG_MAX_VIEWS][BGFX_CONFIG_MAX_VIEW_NAME];
+
+	inline void setViewType(ViewId _view, const bx::StringView _str)
+	{
+		if (BX_ENABLED(BGFX_CONFIG_DEBUG_ANNOTATION || BGFX_CONFIG_PROFILER) )
+		{
+			bx::memCopy(&s_viewName[_view][3], _str.getPtr(), _str.getLength() );
+		}
+	}
+
+	struct PrimInfo
+	{
+		wgpu::PrimitiveTopology m_type;
+		uint32_t m_min;
+		uint32_t m_div;
+		uint32_t m_sub;
+	};
+	
+	static const PrimInfo s_primInfo[] =
+	{
+		{ wgpu::PrimitiveTopology::TriangleList,  3, 3, 0 },
+		{ wgpu::PrimitiveTopology::TriangleStrip, 3, 1, 2 },
+		{ wgpu::PrimitiveTopology::LineList,      2, 2, 0 },
+		{ wgpu::PrimitiveTopology::LineStrip,     2, 1, 1 },
+		{ wgpu::PrimitiveTopology::PointList,     1, 1, 0 },
+	};
+	BX_STATIC_ASSERT(Topology::Count == BX_COUNTOF(s_primInfo) );
+	
+	static const wgpu::VertexFormat s_attribType[][4][2] =
+	{
+		{ // Uint8
+			{ wgpu::VertexFormat::UChar2, wgpu::VertexFormat::UChar2Norm },
+			{ wgpu::VertexFormat::UChar2, wgpu::VertexFormat::UChar2Norm },
+			{ wgpu::VertexFormat::UChar4, wgpu::VertexFormat::UChar4Norm },
+			{ wgpu::VertexFormat::UChar4, wgpu::VertexFormat::UChar4Norm },
+		},
+		{ // Uint10
+			{ wgpu::VertexFormat::UShort2, wgpu::VertexFormat::UShort2Norm },
+			{ wgpu::VertexFormat::UShort2, wgpu::VertexFormat::UShort2Norm },
+			{ wgpu::VertexFormat::UShort4, wgpu::VertexFormat::UShort4Norm },
+			{ wgpu::VertexFormat::UShort4, wgpu::VertexFormat::UShort4Norm },
+		},
+		{ // Int16
+			{ wgpu::VertexFormat::Short2, wgpu::VertexFormat::Short2Norm },
+			{ wgpu::VertexFormat::Short2, wgpu::VertexFormat::Short2Norm },
+			{ wgpu::VertexFormat::Short4, wgpu::VertexFormat::Short4Norm },
+			{ wgpu::VertexFormat::Short4, wgpu::VertexFormat::Short4Norm },
+		},
+		{ // Half
+			{ wgpu::VertexFormat::Half2, wgpu::VertexFormat::Half2 },
+			{ wgpu::VertexFormat::Half2, wgpu::VertexFormat::Half2 },
+			{ wgpu::VertexFormat::Half4, wgpu::VertexFormat::Half4 },
+			{ wgpu::VertexFormat::Half4, wgpu::VertexFormat::Half4 },
+		},
+		{ // Float
+			{ wgpu::VertexFormat::Float,  wgpu::VertexFormat::Float  },
+			{ wgpu::VertexFormat::Float2, wgpu::VertexFormat::Float2 },
+			{ wgpu::VertexFormat::Float3, wgpu::VertexFormat::Float3 },
+			{ wgpu::VertexFormat::Float4, wgpu::VertexFormat::Float4 },
+		},
+	};
+	BX_STATIC_ASSERT(AttribType::Count == BX_COUNTOF(s_attribType) );
+
+	static const wgpu::CullMode s_cullMode[] =
+	{
+		wgpu::CullMode::None,
+		wgpu::CullMode::Front,
+		wgpu::CullMode::Back,
+		wgpu::CullMode::None,
+	};
+
+	static const wgpu::BlendFactor s_blendFactor[][2] =
+	{
+		{ wgpu::BlendFactor(0),                  wgpu::BlendFactor(0)                  }, // ignored
+		{ wgpu::BlendFactor::Zero,               wgpu::BlendFactor::Zero               }, // ZERO
+		{ wgpu::BlendFactor::One,                wgpu::BlendFactor::One                }, // ONE
+		{ wgpu::BlendFactor::SrcColor,           wgpu::BlendFactor::SrcAlpha           }, // SRC_COLOR
+		{ wgpu::BlendFactor::OneMinusSrcColor,   wgpu::BlendFactor::OneMinusSrcAlpha   }, // INV_SRC_COLOR
+		{ wgpu::BlendFactor::SrcAlpha,           wgpu::BlendFactor::SrcAlpha           }, // SRC_ALPHA
+		{ wgpu::BlendFactor::OneMinusSrcAlpha,   wgpu::BlendFactor::OneMinusSrcAlpha   }, // INV_SRC_ALPHA
+		{ wgpu::BlendFactor::DstAlpha,           wgpu::BlendFactor::DstAlpha           }, // DST_ALPHA
+		{ wgpu::BlendFactor::OneMinusDstAlpha,   wgpu::BlendFactor::OneMinusDstAlpha   }, // INV_DST_ALPHA
+		{ wgpu::BlendFactor::DstColor,           wgpu::BlendFactor::DstAlpha           }, // DST_COLOR
+		{ wgpu::BlendFactor::OneMinusDstColor,   wgpu::BlendFactor::OneMinusDstAlpha   }, // INV_DST_COLOR
+		{ wgpu::BlendFactor::SrcAlphaSaturated,  wgpu::BlendFactor::One                }, // SRC_ALPHA_SAT
+		{ wgpu::BlendFactor::BlendColor,         wgpu::BlendFactor::BlendColor         }, // FACTOR
+		{ wgpu::BlendFactor::OneMinusBlendColor, wgpu::BlendFactor::OneMinusBlendColor }, // INV_FACTOR
+	};
+
+	static const wgpu::BlendOperation s_blendEquation[] =
+	{
+		wgpu::BlendOperation::Add,
+		wgpu::BlendOperation::Subtract,
+		wgpu::BlendOperation::ReverseSubtract,
+		wgpu::BlendOperation::Min,
+		wgpu::BlendOperation::Max,
+	};
+
+	static const wgpu::CompareFunction s_cmpFunc[] =
+	{
+		wgpu::CompareFunction::Always, // ignored
+		wgpu::CompareFunction::Less,
+		wgpu::CompareFunction::LessEqual,
+		wgpu::CompareFunction::Equal,
+		wgpu::CompareFunction::GreaterEqual,
+		wgpu::CompareFunction::Greater,
+		wgpu::CompareFunction::NotEqual,
+		wgpu::CompareFunction::Never,
+		wgpu::CompareFunction::Always,
+	};
+
+	static const wgpu::StencilOperation s_stencilOp[] =
+	{
+		wgpu::StencilOperation::Zero,
+		wgpu::StencilOperation::Keep,
+		wgpu::StencilOperation::Replace,
+		wgpu::StencilOperation::IncrementWrap,
+		wgpu::StencilOperation::IncrementClamp,
+		wgpu::StencilOperation::DecrementWrap,
+		wgpu::StencilOperation::DecrementClamp,
+		wgpu::StencilOperation::Invert,
+	};
+
+	static const wgpu::AddressMode s_textureAddress[] =
+	{
+		wgpu::AddressMode::Repeat,
+		wgpu::AddressMode::MirrorRepeat,
+		wgpu::AddressMode::ClampToEdge,
+		wgpu::AddressMode(0), // Border ? ClampToZero ?
+	};
+
+	static const wgpu::FilterMode s_textureFilterMinMag[] =
+	{
+		wgpu::FilterMode::Linear,
+		wgpu::FilterMode::Nearest,
+		wgpu::FilterMode::Linear,
+	};
+
+	static const wgpu::FilterMode s_textureFilterMip[] =
+	{
+		wgpu::FilterMode::Linear,
+		wgpu::FilterMode::Nearest,
+	};
+
+	struct TextureFormatInfo
+	{
+		wgpu::TextureFormat m_fmt;
+		wgpu::TextureFormat m_fmtSrgb;
+	};
+
+	static TextureFormatInfo s_textureFormat[] =
+	{
+		{ wgpu::TextureFormat::BC1RGBAUnorm,            wgpu::TextureFormat::BC1RGBAUnormSrgb },  // BC1
+		{ wgpu::TextureFormat::BC2RGBAUnorm,            wgpu::TextureFormat::BC2RGBAUnormSrgb },  // BC2
+		{ wgpu::TextureFormat::BC3RGBAUnorm,            wgpu::TextureFormat::BC3RGBAUnormSrgb },  // BC3
+		{ wgpu::TextureFormat::BC4RUnorm,               wgpu::TextureFormat::Undefined                },  // BC4  //  BC4RSnorm ??
+		{ wgpu::TextureFormat::BC5RGUnorm,              wgpu::TextureFormat::Undefined                },  // BC5  //  BC5RGSnorm ??
+		{ wgpu::TextureFormat::BC6HRGBUfloat,           wgpu::TextureFormat::Undefined                },  // BC6H //  BC6HRGBSfloat ??
+		{ wgpu::TextureFormat::BC7RGBAUnorm,            wgpu::TextureFormat::BC7RGBAUnormSrgb },  // BC7
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // ETC1
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // ETC2
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // ETC2A
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // ETC2A1
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // PTC12
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // PTC14
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // PTC12A
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // PTC14A
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // PTC22
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // PTC24
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // ATC
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // ATCE
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // ATCI
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // ASTC4x4
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // ASTC5x5
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // ASTC6x6
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // ASTC8x5
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // ASTC8x6
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // ASTC10x5
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // Unknown
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // R1
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // A8
+		{ wgpu::TextureFormat::R8Unorm,                 wgpu::TextureFormat::Undefined                },  // R8
+		{ wgpu::TextureFormat::R8Sint,                  wgpu::TextureFormat::Undefined                },  // R8I
+		{ wgpu::TextureFormat::R8Uint,                  wgpu::TextureFormat::Undefined                },  // R8U
+		{ wgpu::TextureFormat::R8Snorm,                 wgpu::TextureFormat::Undefined                },  // R8S
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // R16
+		{ wgpu::TextureFormat::R16Sint,                 wgpu::TextureFormat::Undefined                },  // R16I
+		{ wgpu::TextureFormat::R16Uint,                 wgpu::TextureFormat::Undefined                },  // R16U
+		{ wgpu::TextureFormat::R16Float,                wgpu::TextureFormat::Undefined                },  // R16F
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // R16S
+		{ wgpu::TextureFormat::R32Sint,                 wgpu::TextureFormat::Undefined                },  // R32I
+		{ wgpu::TextureFormat::R32Uint,                 wgpu::TextureFormat::Undefined                },  // R32U
+		{ wgpu::TextureFormat::R32Float,                wgpu::TextureFormat::Undefined                },  // R32F
+		{ wgpu::TextureFormat::RG8Unorm,                wgpu::TextureFormat::Undefined                },  // RG8
+		{ wgpu::TextureFormat::RG8Sint,                 wgpu::TextureFormat::Undefined                },  // RG8I
+		{ wgpu::TextureFormat::RG8Uint,                 wgpu::TextureFormat::Undefined                },  // RG8U
+		{ wgpu::TextureFormat::RG8Snorm,                wgpu::TextureFormat::Undefined                },  // RG8S
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // RG16
+		{ wgpu::TextureFormat::RG16Sint,                wgpu::TextureFormat::Undefined                },  // RG16I
+		{ wgpu::TextureFormat::RG16Uint,                wgpu::TextureFormat::Undefined                },  // RG16U
+		{ wgpu::TextureFormat::RG16Float,               wgpu::TextureFormat::Undefined                },  // RG16F
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // RG16S
+		{ wgpu::TextureFormat::RG32Sint,                wgpu::TextureFormat::Undefined                },  // RG32I
+		{ wgpu::TextureFormat::RG32Uint,                wgpu::TextureFormat::Undefined                },  // RG32U
+		{ wgpu::TextureFormat::RG32Float,               wgpu::TextureFormat::Undefined                },  // RG32F
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // RGB8
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // RGB8I
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // RGB8U
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // RGB8S
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // RGB9E5F
+		{ wgpu::TextureFormat::BGRA8Unorm,              wgpu::TextureFormat::BGRA8UnormSrgb   },  // BGRA8
+		{ wgpu::TextureFormat::RGBA8Unorm,              wgpu::TextureFormat::RGBA8UnormSrgb   },  // RGBA8
+		{ wgpu::TextureFormat::RGBA8Sint,               wgpu::TextureFormat::Undefined                },  // RGBA8I
+		{ wgpu::TextureFormat::RGBA8Uint,               wgpu::TextureFormat::Undefined                },  // RGBA8U
+		{ wgpu::TextureFormat::RGBA8Snorm,              wgpu::TextureFormat::Undefined                },  // RGBA8S
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // RGBA16
+		{ wgpu::TextureFormat::RGBA16Sint,              wgpu::TextureFormat::Undefined                },  // RGBA16I
+		{ wgpu::TextureFormat::RGBA16Uint,              wgpu::TextureFormat::Undefined                },  // RGBA16U
+		{ wgpu::TextureFormat::RGBA16Float,             wgpu::TextureFormat::Undefined                },  // RGBA16F
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // RGBA16S
+		{ wgpu::TextureFormat::RGBA32Sint,              wgpu::TextureFormat::Undefined                },  // RGBA32I
+		{ wgpu::TextureFormat::RGBA32Uint,              wgpu::TextureFormat::Undefined                },  // RGBA32U
+		{ wgpu::TextureFormat::RGBA32Float,             wgpu::TextureFormat::Undefined                },  // RGBA32F
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // R5G6B5
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // RGBA4
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // RGB5A1
+		{ wgpu::TextureFormat::RGB10A2Unorm,            wgpu::TextureFormat::Undefined                },  // RGB10A2
+		{ wgpu::TextureFormat::RG11B10Float,            wgpu::TextureFormat::Undefined                },  // RG11B10F
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // UnknownDepth
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // D16
+		{ wgpu::TextureFormat::Depth24Plus,             wgpu::TextureFormat::Undefined                },  // D24
+		{ wgpu::TextureFormat::Depth24PlusStencil8,     wgpu::TextureFormat::Undefined                },  // D24S8
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // D32
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // D16F
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // D24F
+		{ wgpu::TextureFormat::Depth32Float,            wgpu::TextureFormat::Undefined                },  // D32F
+		{ wgpu::TextureFormat::Undefined,                       wgpu::TextureFormat::Undefined                },  // D0S8
+	};
+	BX_STATIC_ASSERT(TextureFormat::Count == BX_COUNTOF(s_textureFormat));
+
+	int32_t s_msaa[] =
+	{
+		 1,
+		 2,
+		 4,
+		 8,
+		16,
+	};
+
+	struct RendererContextWgpu;
+	static RendererContextWgpu* s_renderWgpu;
+
+	static bool s_ignoreError = false;
+
+#if !BX_PLATFORM_EMSCRIPTEN
+	DawnSwapChainImplementation(*createSwapChain)(wgpu::Device device, void* nwh);
+
+#ifdef DAWN_ENABLE_BACKEND_D3D12
+	DawnSwapChainImplementation CreateSwapChainD3D12(wgpu::Device device, void* nwh)
+	{
+		HWND win32Window = (HWND)nwh;
+		return dawn_native::d3d12::CreateNativeSwapChainImpl(device.Get(), win32Window);
+	}
+#endif
+
+#ifdef DAWN_ENABLE_BACKEND_VULKAN
+	DawnSwapChainImplementation CreateSwapChainVulkan(wgpu::Device device, void* nwh)
+	{
+		VkInstance instance = dawn_native::vulkan::GetInstance(device.Get());
+
+		PFN_vkCreateWin32SurfaceKHR vkCreateWin32SurfaceKHR = (PFN_vkCreateWin32SurfaceKHR)dawn_native::vulkan::GetInstanceProcAddr(device.Get(), "vkCreateWin32SurfaceKHR");
+
+		VkSurfaceKHR surface;
+#if BX_PLATFORM_WINDOWS
+		// Copied from renderer_vk.cpp -> needs refactor
+		{
+			VkWin32SurfaceCreateInfoKHR sci;
+			sci.sType = VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR;
+			sci.pNext = NULL;
+			sci.flags = 0;
+			sci.hinstance = (HINSTANCE)GetModuleHandle(NULL);
+			sci.hwnd = (HWND)nwh;
+			VkResult result = vkCreateWin32SurfaceKHR(instance, &sci, NULL, &surface);
+		}
+#endif
+		return dawn_native::vulkan::CreateNativeSwapChainImpl(device.Get(), surface);
+	}
+#endif
+
+#endif
+
+	struct RendererContextWgpu : public RendererContextI
+	{
+		RendererContextWgpu()
+			: m_frameIndex(0)
+			, m_numWindows(0)
+			, m_rtMsaa(false)
+			, m_capture(NULL)
+			, m_captureSize(0)
+		{
+			bx::memSet(&m_windows, 0xff, sizeof(m_windows) );
+		}
+
+		~RendererContextWgpu()
+		{
+		}
+
+		bool init(const Init& _init)
+		{
+			BX_UNUSED(_init);
+			BX_TRACE("Init.");
+
+			if (_init.debug
+			||  _init.profile)
+			{
+				m_renderDocDll = loadRenderDoc();
+			}
+
+			setGraphicsDebuggerPresent(NULL != m_renderDocDll);
+
+			m_fbh.idx = kInvalidHandle;
+			bx::memSet(m_uniforms, 0, sizeof(m_uniforms) );
+			bx::memSet(&m_resolution, 0, sizeof(m_resolution) );
+
+#if !BX_PLATFORM_EMSCRIPTEN
+			// Default to D3D12, Metal, Vulkan, OpenGL in that order as D3D12 and Metal are the preferred on
+			// their respective platforms, and Vulkan is preferred to OpenGL
+#if defined(DAWN_ENABLE_BACKEND_D3D12)
+			static dawn_native::BackendType backendType = dawn_native::BackendType::D3D12;
+#elif defined(DAWN_ENABLE_BACKEND_METAL)
+			static dawn_native::BackendType backendType = dawn_native::BackendType::Metal;
+#elif defined(DAWN_ENABLE_BACKEND_OPENGL)
+			static dawn_native::BackendType backendType = dawn_native::BackendType::OpenGL;
+#elif defined(DAWN_ENABLE_BACKEND_VULKAN)
+			static dawn_native::BackendType backendType = dawn_native::BackendType::Vulkan;
+#else
+#error
+#endif
+
+			m_instance.EnableBackendValidation(true);
+			m_instance.DiscoverDefaultAdapters();
+
+			dawn_native::Adapter backendAdapter;
+			std::vector<dawn_native::Adapter> adapters = m_instance.GetAdapters();
+			for (dawn_native::Adapter& adapter : adapters)
+			{
+				if (adapter.GetBackendType() == backendType)
+				{
+					backendAdapter = adapter;
+					break;
+				}
+			}
+#endif
+
+			//BX_ASSERT(adapterIt != adapters.end());
+
+			WGPUDevice backendDevice = backendAdapter.CreateDevice();
+			DawnProcTable backendProcs = dawn_native::GetProcs();
+
+			using CreateSwapChain = DawnSwapChainImplementation (*)(wgpu::Device device, void* nwh);
+
+#if defined(DAWN_ENABLE_BACKEND_D3D12)
+			createSwapChain = CreateSwapChainD3D12;
+#elif defined(DAWN_ENABLE_BACKEND_METAL)
+			createSwapChain = CreateSwapChainMetal;
+#elif defined(DAWN_ENABLE_BACKEND_NULL)
+			createSwapChain = CreateSwapChainNull;
+#elif defined(DAWN_ENABLE_BACKEND_OPENGL)
+			createSwapChain = CreateSwapChainOpenGL;
+#elif defined(DAWN_ENABLE_BACKEND_VULKAN)
+			createSwapChain = CreateSwapChainVulkan;
+#endif
+
+			// Choose whether to use the backend procs and devices directly, or set up the wire.
+			WGPUDevice cDevice = backendDevice;
+			DawnProcTable procs = backendProcs;
+			dawnProcSetProcs(&procs);
+
+			m_device = wgpu::Device::Acquire(cDevice);
+#else
+			m_device = wgpu::Device(emscripten_webgpu_get_device());
+#endif
+
+			auto PrintDeviceError = [](WGPUErrorType errorType, const char* message, void*) {
+				BX_UNUSED(errorType);
+
+				if (s_ignoreError)
+				{
+					BX_TRACE("Device error: %s", message);
+				}
+				else
+				{
+					BX_CHECK(false, "Device error: %s", message);
+				}
+				s_ignoreError = false;
+			};
+
+			m_device.SetUncapturedErrorCallback(PrintDeviceError, nullptr);
+
+			if (!m_device)
+			{
+				BX_WARN(!m_device, "Unable to create WebGPU device.");
+				return false;
+			}
+
+			bool success = m_mainFrameBuffer.create(
+				  0
+				, g_platformData.nwh
+				, _init.resolution.width
+				, _init.resolution.height
+				, TextureFormat::Unknown
+				, TextureFormat::UnknownDepth
+				);
+			m_numWindows = 1;
+
+			if (!success)
+			{
+				return false;
+			}
+
+			m_queue = m_device.GetDefaultQueue();
+
+			m_cmd.init(m_queue);
+			//BGFX_FATAL(NULL != m_cmd.m_commandQueue, Fatal::UnableToInitialize, "Unable to create Metal device.");
+
+			for (uint8_t ii = 0; ii < WEBGPU_MAX_FRAMES_IN_FLIGHT; ++ii)
+			{
+				BX_TRACE("Create scratch buffer %d", ii);
+				m_scratchBuffers[ii].create(BGFX_CONFIG_MAX_DRAW_CALLS * 128);
+				m_bindStateCache[ii].create(); // (1024);
+			}
+
+			g_caps.supported |= (0
+				| BGFX_CAPS_ALPHA_TO_COVERAGE
+				| BGFX_CAPS_BLEND_INDEPENDENT
+				| BGFX_CAPS_FRAGMENT_DEPTH
+				| BGFX_CAPS_INDEX32
+				| BGFX_CAPS_INSTANCING
+			//	| BGFX_CAPS_OCCLUSION_QUERY
+				| BGFX_CAPS_SWAP_CHAIN
+				| BGFX_CAPS_TEXTURE_2D_ARRAY
+			//	| BGFX_CAPS_TEXTURE_3D
+				| BGFX_CAPS_TEXTURE_BLIT
+				| BGFX_CAPS_TEXTURE_COMPARE_ALL
+				| BGFX_CAPS_TEXTURE_COMPARE_LEQUAL
+				| BGFX_CAPS_TEXTURE_READ_BACK
+				| BGFX_CAPS_VERTEX_ATTRIB_HALF
+				| BGFX_CAPS_VERTEX_ATTRIB_UINT10
+				| BGFX_CAPS_COMPUTE
+				);
+
+			g_caps.limits.maxTextureSize   = 16384;
+			g_caps.limits.maxFBAttachments = 4;
+			g_caps.supported |= BGFX_CAPS_TEXTURE_CUBE_ARRAY;
+
+			if (BX_ENABLED(INDIRECT) )
+			{
+				g_caps.supported |= BGFX_CAPS_DRAW_INDIRECT;
+			}
+
+			g_caps.limits.maxTextureLayers = 2048;
+			g_caps.limits.maxVertexStreams = BGFX_CONFIG_MAX_VERTEX_STREAMS;
+			// Maximum number of entries in the buffer argument table, per graphics or compute function are 31.
+			// It is decremented by 1 because 1 entry is used for uniforms.
+			g_caps.limits.maxComputeBindings = bx::uint32_min(30, BGFX_MAX_COMPUTE_BINDINGS);
+
+			for (uint32_t ii = 0; ii < TextureFormat::Count; ++ii)
+			{
+				uint16_t support = 0;
+
+				support |= wgpu::TextureFormat::Undefined != s_textureFormat[ii].m_fmt
+					? BGFX_CAPS_FORMAT_TEXTURE_2D
+					| BGFX_CAPS_FORMAT_TEXTURE_3D
+					| BGFX_CAPS_FORMAT_TEXTURE_CUBE
+					| BGFX_CAPS_FORMAT_TEXTURE_VERTEX
+					: BGFX_CAPS_FORMAT_TEXTURE_NONE
+					;
+
+				support |= wgpu::TextureFormat::Undefined != s_textureFormat[ii].m_fmtSrgb
+					? BGFX_CAPS_FORMAT_TEXTURE_2D_SRGB
+					| BGFX_CAPS_FORMAT_TEXTURE_3D_SRGB
+					| BGFX_CAPS_FORMAT_TEXTURE_CUBE_SRGB
+					| BGFX_CAPS_FORMAT_TEXTURE_VERTEX
+					: BGFX_CAPS_FORMAT_TEXTURE_NONE
+					;
+
+				if (!bimg::isCompressed(bimg::TextureFormat::Enum(ii) ) )
+				{
+					support |= 0
+						| BGFX_CAPS_FORMAT_TEXTURE_FRAMEBUFFER
+					//	| BGFX_CAPS_FORMAT_TEXTURE_FRAMEBUFFER_MSAA
+						;
+				}
+
+				g_caps.formats[ii] = support;
+			}
+
+			g_caps.formats[TextureFormat::A8     ] &= ~(BGFX_CAPS_FORMAT_TEXTURE_FRAMEBUFFER | BGFX_CAPS_FORMAT_TEXTURE_FRAMEBUFFER_MSAA);
+			g_caps.formats[TextureFormat::RG32I  ] &= ~(BGFX_CAPS_FORMAT_TEXTURE_FRAMEBUFFER_MSAA);
+			g_caps.formats[TextureFormat::RG32U  ] &= ~(BGFX_CAPS_FORMAT_TEXTURE_FRAMEBUFFER_MSAA);
+			g_caps.formats[TextureFormat::RGBA32I] &= ~(BGFX_CAPS_FORMAT_TEXTURE_FRAMEBUFFER_MSAA);
+			g_caps.formats[TextureFormat::RGBA32U] &= ~(BGFX_CAPS_FORMAT_TEXTURE_FRAMEBUFFER_MSAA);
+
+			g_caps.formats[TextureFormat::ETC2  ] =
+			g_caps.formats[TextureFormat::ETC2A ] =
+			g_caps.formats[TextureFormat::ETC2A1] =
+			g_caps.formats[TextureFormat::PTC12 ] =
+			g_caps.formats[TextureFormat::PTC14 ] =
+			g_caps.formats[TextureFormat::PTC12A] =
+			g_caps.formats[TextureFormat::PTC14A] =
+			g_caps.formats[TextureFormat::R5G6B5] =
+			g_caps.formats[TextureFormat::RGBA4 ] =
+			g_caps.formats[TextureFormat::RGB5A1] = BGFX_CAPS_FORMAT_TEXTURE_NONE;
+
+			g_caps.formats[TextureFormat::RGB9E5F] &= ~(BGFX_CAPS_FORMAT_TEXTURE_FRAMEBUFFER | BGFX_CAPS_FORMAT_TEXTURE_FRAMEBUFFER_MSAA);
+
+			// disable compressed formats
+			for (uint32_t ii = 0; ii < TextureFormat::Unknown; ++ii)
+			{
+				s_textureFormat[ii].m_fmt = wgpu::TextureFormat::Undefined;
+			}
+
+			for (uint32_t ii = 0; ii < TextureFormat::Count; ++ii)
+			{
+				if (BGFX_CAPS_FORMAT_TEXTURE_NONE == g_caps.formats[ii])
+				{
+					s_textureFormat[ii].m_fmt = wgpu::TextureFormat::Undefined;
+					s_textureFormat[ii].m_fmtSrgb = wgpu::TextureFormat::Undefined;
+				}
+			}
+
+			for (uint32_t ii = 1, last = 0; ii < BX_COUNTOF(s_msaa); ++ii)
+			{
+				// TODO (hugoam)
+				//const int32_t sampleCount = 1; //1<<ii;
+				//if (m_device.supportsTextureSampleCount(sampleCount) )
+				//{
+				//	s_msaa[ii] = sampleCount;
+				//	last = ii;
+				//}
+				//else
+				{
+					s_msaa[ii] = s_msaa[last];
+				}
+			}
+
+			// Init reserved part of view name.
+			for (uint32_t ii = 0; ii < BGFX_CONFIG_MAX_VIEWS; ++ii)
+			{
+				bx::snprintf(s_viewName[ii], BGFX_CONFIG_MAX_VIEW_NAME_RESERVED+1, "%3d   ", ii);
+			}
+
+			m_gpuTimer.init();
+
+			g_internalData.context = &m_device;
+
+			return true;
+		}
+
+		void shutdown()
+		{
+			m_gpuTimer.shutdown();
+
+			m_pipelineStateCache.invalidate();
+
+			for (uint32_t ii = 0; ii < BX_COUNTOF(m_shaders); ++ii)
+			{
+				m_shaders[ii].destroy();
+			}
+
+			for (uint32_t ii = 0; ii < BX_COUNTOF(m_textures); ++ii)
+			{
+				m_textures[ii].destroy();
+			}
+
+			captureFinish();
+
+			m_mainFrameBuffer.destroy();
+
+			for (uint32_t ii = 0; ii < BX_COUNTOF(m_scratchBuffers); ++ii)
+			{
+				m_scratchBuffers[ii].destroy();
+			}
+
+			m_cmd.shutdown();
+		}
+
+		RendererType::Enum getRendererType() const override
+		{
+			return RendererType::WebGPU;
+		}
+
+		const char* getRendererName() const override
+		{
+			return BGFX_RENDERER_WEBGPU_NAME;
+		}
+
+		void createIndexBuffer(IndexBufferHandle _handle, const Memory* _mem, uint16_t _flags) override
+		{
+			m_indexBuffers[_handle.idx].create(_mem->size, _mem->data, _flags);
+		}
+
+		void destroyIndexBuffer(IndexBufferHandle _handle) override
+		{
+			m_indexBuffers[_handle.idx].destroy();
+		}
+
+		void createVertexLayout(VertexLayoutHandle _handle, const VertexLayout& _decl) override
+		{
+			VertexLayout& decl = m_vertexDecls[_handle.idx];
+			bx::memCopy(&decl, &_decl, sizeof(VertexLayout) );
+			dump(decl);
+		}
+
+		void destroyVertexLayout(VertexLayoutHandle /*_handle*/) override
+		{
+		}
+
+		void createVertexBuffer(VertexBufferHandle _handle, const Memory* _mem, VertexLayoutHandle _declHandle, uint16_t _flags) override
+		{
+			m_vertexBuffers[_handle.idx].create(_mem->size, _mem->data, _declHandle, _flags);
+		}
+
+		void destroyVertexBuffer(VertexBufferHandle _handle) override
+		{
+			m_vertexBuffers[_handle.idx].destroy();
+		}
+
+		void createDynamicIndexBuffer(IndexBufferHandle _handle, uint32_t _size, uint16_t _flags) override
+		{
+			m_indexBuffers[_handle.idx].create(_size, NULL, _flags);
+		}
+
+		void updateDynamicIndexBuffer(IndexBufferHandle _handle, uint32_t _offset, uint32_t _size, const Memory* _mem) override
+		{
+			m_indexBuffers[_handle.idx].update(_offset, bx::uint32_min(_size, _mem->size), _mem->data);
+		}
+
+		void destroyDynamicIndexBuffer(IndexBufferHandle _handle) override
+		{
+			m_indexBuffers[_handle.idx].destroy();
+		}
+
+		void createDynamicVertexBuffer(VertexBufferHandle _handle, uint32_t _size, uint16_t _flags) override
+		{
+			VertexLayoutHandle decl = BGFX_INVALID_HANDLE;
+			m_vertexBuffers[_handle.idx].create(_size, NULL, decl, _flags);
+		}
+
+		void updateDynamicVertexBuffer(VertexBufferHandle _handle, uint32_t _offset, uint32_t _size, const Memory* _mem) override
+		{
+			m_vertexBuffers[_handle.idx].update(_offset, bx::uint32_min(_size, _mem->size), _mem->data);
+		}
+
+		void destroyDynamicVertexBuffer(VertexBufferHandle _handle) override
+		{
+			m_vertexBuffers[_handle.idx].destroy();
+		}
+
+		void createShader(ShaderHandle _handle, const Memory* _mem) override
+		{
+			m_shaders[_handle.idx].create(_handle, _mem);
+		}
+
+		void destroyShader(ShaderHandle _handle) override
+		{
+			m_shaders[_handle.idx].destroy();
+		}
+
+		void createProgram(ProgramHandle _handle, ShaderHandle _vsh, ShaderHandle _fsh) override
+		{
+			m_program[_handle.idx].create(&m_shaders[_vsh.idx], isValid(_fsh) ? &m_shaders[_fsh.idx] : NULL);
+		}
+
+		void destroyProgram(ProgramHandle _handle) override
+		{
+			m_program[_handle.idx].destroy();
+		}
+
+		void* createTexture(TextureHandle _handle, const Memory* _mem, uint64_t _flags, uint8_t _skip) override
+		{
+			m_textures[_handle.idx].create(_handle, _mem, _flags, _skip);
+			return NULL;
+		}
+
+		void updateTextureBegin(TextureHandle /*_handle*/, uint8_t /*_side*/, uint8_t /*_mip*/) override
+		{
+		}
+
+		void updateTexture(TextureHandle _handle, uint8_t _side, uint8_t _mip, const Rect& _rect, uint16_t _z, uint16_t _depth, uint16_t _pitch, const Memory* _mem) override
+		{
+			m_textures[_handle.idx].update(_side, _mip, _rect, _z, _depth, _pitch, _mem);
+		}
+
+		void updateTextureEnd() override
+		{
+		}
+
+		void readback(ReadbackWgpu& readback,  const TextureWgpu& texture, void* _data)
+		{
+			m_cmd.kick(false, true);
+			m_cmd.begin();
+
+			if (readback.m_mapped)
+				return;
+
+			BX_CHECK(readback.m_mip<texture.m_numMips,"Invalid mip: %d num mips:", readback.m_mip,texture.m_numMips);
+
+			uint32_t srcWidth  = bx::uint32_max(1, texture.m_width  >> readback.m_mip);
+			uint32_t srcHeight = bx::uint32_max(1, texture.m_height >> readback.m_mip);
+
+			const uint32_t bpp = bimg::getBitsPerPixel(bimg::TextureFormat::Enum(texture.m_textureFormat));
+			const uint32_t pitch = srcWidth * bpp / 8;
+
+			const uint32_t dstpitch = bx::strideAlign(pitch, 256);
+
+			// TODO move inside ReadbackWgpu::create
+			if (!readback.m_buffer)
+			{
+				wgpu::BufferDescriptor desc;
+				desc.size = dstpitch * srcHeight;
+				desc.usage = wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::MapRead;
+
+				readback.m_buffer = m_device.CreateBuffer(&desc);
+			}
+
+			wgpu::TextureCopyView textureCopyView;
+			textureCopyView.texture = texture.m_ptr;
+			textureCopyView.origin = { 0, 0, 0 };
+
+			wgpu::BufferCopyView bufferCopyView;
+			bufferCopyView.buffer = readback.m_buffer;
+			bufferCopyView.bytesPerRow = dstpitch;
+			bufferCopyView.rowsPerImage = srcHeight;
+
+			wgpu::Extent3D extent3D = { srcWidth, srcHeight, 1 };
+			m_cmd.m_encoder.CopyTextureToBuffer(&textureCopyView, &bufferCopyView, &extent3D);
+
+			auto finish = [](WGPUBufferMapAsyncStatus status, void const* data, uint64_t dataLength, void* userdata)
+			{
+				if(status == WGPUBufferMapAsyncStatus_Success)
+					static_cast<ReadbackWgpu*>(userdata)->readback(data, dataLength);
+			};
+
+			m_cmd.finish();
+
+			m_cmd.kick(true);
+
+			readback.m_mapped = true;
+			readback.m_data = _data;
+			readback.m_size = pitch * srcHeight;
+
+			readback.m_buffer.MapReadAsync(finish, &readback);
+		}
+
+		void readTexture(TextureHandle _handle, void* _data, uint8_t _mip) override
+		{
+			TextureWgpu& texture = m_textures[_handle.idx];
+
+			readback(texture.m_readback, texture, _data);
+		}
+
+		void resizeTexture(TextureHandle _handle, uint16_t _width, uint16_t _height, uint8_t _numMips, uint16_t _numLayers) override
+		{
+			TextureWgpu& texture = m_textures[_handle.idx];
+
+			uint32_t size = sizeof(uint32_t) + sizeof(TextureCreate);
+			const Memory* mem = alloc(size);
+
+			bx::StaticMemoryBlockWriter writer(mem->data, mem->size);
+			uint32_t magic = BGFX_CHUNK_MAGIC_TEX;
+			bx::write(&writer, magic);
+
+			TextureCreate tc;
+			tc.m_width     = _width;
+			tc.m_height    = _height;
+			tc.m_depth     = 0;
+			tc.m_numLayers = _numLayers;
+			tc.m_numMips   = _numMips;
+			tc.m_format    = TextureFormat::Enum(texture.m_requestedFormat);
+			tc.m_cubeMap   = false;
+			tc.m_mem       = NULL;
+			bx::write(&writer, tc);
+
+			texture.destroy();
+			texture.create(_handle, mem, texture.m_flags, 0);
+
+			release(mem);
+		}
+
+		void overrideInternal(TextureHandle _handle, uintptr_t _ptr) override
+		{
+			BX_UNUSED(_handle, _ptr);
+		}
+
+		uintptr_t getInternal(TextureHandle _handle) override
+		{
+			BX_UNUSED(_handle);
+			return 0;
+		}
+
+		void destroyTexture(TextureHandle _handle) override
+		{
+			m_textures[_handle.idx].destroy();
+		}
+
+		void createFrameBuffer(FrameBufferHandle _handle, uint8_t _num, const Attachment* _attachment) override
+		{
+			m_frameBuffers[_handle.idx].create(_num, _attachment);
+		}
+
+		void createFrameBuffer(FrameBufferHandle _handle, void* _nwh, uint32_t _width, uint32_t _height, TextureFormat::Enum _format, TextureFormat::Enum _depthFormat) override
+		{
+			for (uint32_t ii = 0, num = m_numWindows; ii < num; ++ii)
+			{
+				FrameBufferHandle handle = m_windows[ii];
+				if (isValid(handle)
+				&&  m_frameBuffers[handle.idx].m_nwh == _nwh)
+				{
+					destroyFrameBuffer(handle);
+				}
+			}
+
+			uint16_t denseIdx   = m_numWindows++;
+			m_windows[denseIdx] = _handle;
+
+			FrameBufferWgpu& fb = m_frameBuffers[_handle.idx];
+			fb.create(denseIdx, _nwh, _width, _height, _format, _depthFormat);
+			fb.m_swapChain->resize(m_frameBuffers[_handle.idx], _width, _height, 0);
+		}
+
+		void destroyFrameBuffer(FrameBufferHandle _handle) override
+		{
+			uint16_t denseIdx = m_frameBuffers[_handle.idx].destroy();
+
+			if (UINT16_MAX != denseIdx)
+			{
+				--m_numWindows;
+
+				if (m_numWindows > 1)
+				{
+					FrameBufferHandle handle = m_windows[m_numWindows];
+					m_windows[m_numWindows]  = {kInvalidHandle};
+
+					if (m_numWindows != denseIdx)
+					{
+						m_windows[denseIdx] = handle;
+						m_frameBuffers[handle.idx].m_denseIdx = denseIdx;
+					}
+				}
+			}
+		}
+
+		void createUniform(UniformHandle _handle, UniformType::Enum _type, uint16_t _num, const char* _name) override
+		{
+			if (NULL != m_uniforms[_handle.idx])
+			{
+				BX_FREE(g_allocator, m_uniforms[_handle.idx]);
+			}
+
+			uint32_t size = bx::alignUp(g_uniformTypeSize[_type]*_num, 16);
+			void* data = BX_ALLOC(g_allocator, size);
+			bx::memSet(data, 0, size);
+			m_uniforms[_handle.idx] = data;
+			m_uniformReg.add(_handle, _name);
+		}
+
+		void destroyUniform(UniformHandle _handle) override
+		{
+			BX_FREE(g_allocator, m_uniforms[_handle.idx]);
+			m_uniforms[_handle.idx] = NULL;
+			m_uniformReg.remove(_handle);
+		}
+
+		void requestScreenShotPre(const char* _filePath)
+		{
+			BX_UNUSED(_filePath);
+			//m_saveScreenshot = true;
+		}
+
+		void requestScreenShot(FrameBufferHandle _handle, const char* _filePath) override
+		{
+			BX_UNUSED(_handle); BX_UNUSED(_filePath);
+		}
+
+		void updateViewName(ViewId _id, const char* _name) override
+		{
+			bx::strCopy(
+				  &s_viewName[_id][BGFX_CONFIG_MAX_VIEW_NAME_RESERVED]
+				, BX_COUNTOF(s_viewName[0])-BGFX_CONFIG_MAX_VIEW_NAME_RESERVED
+				, _name
+				);
+		}
+
+		void updateUniform(uint16_t _loc, const void* _data, uint32_t _size) override
+		{
+			bx::memCopy(m_uniforms[_loc], _data, _size);
+		}
+
+		void invalidateOcclusionQuery(OcclusionQueryHandle _handle) override
+		{
+			BX_UNUSED(_handle);
+		}
+
+		void setMarker(const char* _marker, uint16_t _len) override
+		{
+			BX_UNUSED(_len);
+
+			if (BX_ENABLED(BGFX_CONFIG_DEBUG_ANNOTATION) )
+			{
+				m_renderEncoder.InsertDebugMarker(_marker);
+			}
+		}
+
+		void submitBlit(BlitState& _bs, uint16_t _view);
+
+		void submit(Frame* _render, ClearQuad& _clearQuad, TextVideoMemBlitter& _textVideoMemBlitter) override;
+
+		void blitSetup(TextVideoMemBlitter& _blitter) override
+		{
+			BX_UNUSED(_blitter);
+		}
+
+		void blitRender(TextVideoMemBlitter& _blitter, uint32_t _numIndices) override
+		{
+			const uint32_t numVertices = _numIndices*4/6;
+			if (0 < numVertices)
+			{
+				m_indexBuffers [_blitter.m_ib->handle.idx].update(
+					  0
+					, bx::strideAlign(_numIndices*2, 4)
+					, _blitter.m_ib->data
+					, true
+					);
+				m_vertexBuffers[_blitter.m_vb->handle.idx].update(
+					  0
+					, numVertices*_blitter.m_layout.m_stride
+					, _blitter.m_vb->data
+					, true
+					);
+
+				endEncoding();
+
+				uint32_t width  = m_resolution.width;
+				uint32_t height = m_resolution.height;
+
+				FrameBufferHandle fbh = BGFX_INVALID_HANDLE;
+
+				uint64_t state = 0
+				| BGFX_STATE_WRITE_RGB
+				| BGFX_STATE_WRITE_A
+				| BGFX_STATE_DEPTH_TEST_ALWAYS
+				;
+
+				PipelineStateWgpu* pso = getPipelineState(
+														 state
+														 , 0
+														 , 0
+														 , fbh
+														 , _blitter.m_vb->layoutHandle
+														 , false
+														 , _blitter.m_program
+														 , 0
+														 );
+
+				RenderPassDescriptor renderPassDescriptor;
+				wgpu::RenderPassColorAttachmentDescriptor& color = renderPassDescriptor.colorAttachments[0];
+
+				setFrameBuffer(renderPassDescriptor, fbh);
+
+				color.loadOp = wgpu::LoadOp::Load;
+				color.storeOp = wgpu::StoreOp::Store;
+				//	NULL != renderPassDescriptor.colorAttachments[0].resolveTexture
+				//	? wgpu::StoreOp::MultisampleResolve
+				//	: wgpu::StoreOp::Store
+				//;
+
+				wgpu::RenderPassEncoder rce = m_cmd.m_encoder.BeginRenderPass(&renderPassDescriptor.desc);
+				m_renderEncoder = rce;
+
+				rce.SetViewport(0.0f, 0.0f, (float)width, (float)height, 0.0f, 1.0f);
+
+				rce.SetPipeline(pso->m_rps);
+
+				ProgramWgpu& program = m_program[_blitter.m_program.idx];
+
+				ScratchBufferWgpu& scratchBuffer = m_scratchBuffers[0];
+				BindStateCacheWgpu& bindStates = m_bindStateCache[0];
+
+				const uint32_t voffset = scratchBuffer.m_offset;
+				scratchBuffer.m_offset += program.m_gpuSize;
+
+				float proj[16];
+				bx::mtxOrtho(proj, 0.0f, (float)width, (float)height, 0.0f, 0.0f, 1000.0f, 0.0f, false);
+
+				PredefinedUniform& predefined = program.m_predefined[0];
+				uint8_t flags = predefined.m_type;
+				setShaderUniform(flags, predefined.m_loc, proj, 4);
+
+				BX_CHECK(program.m_vsh->m_size > 0, "Not supposed to happen");
+				scratchBuffer.m_buffer.SetSubData(voffset, program.m_vsh->m_gpuSize, m_vsScratch);
+
+				const uint32_t fsize = (NULL != program.m_fsh ? program.m_fsh->m_gpuSize : 0);
+				BX_CHECK(fsize == 0, "Not supposed to happen");
+
+				TextureWgpu& texture = m_textures[_blitter.m_texture.idx];
+
+				BindingsWgpu b;
+
+				BindStateWgpu& bindState = allocBindState(program, bindStates, b, scratchBuffer);
+
+				wgpu::BindGroupEntry& textureEntry = b.m_entries[b.numEntries++];
+				textureEntry.binding = program.m_textures[0].binding;
+				textureEntry.textureView = texture.m_ptr.CreateView();
+
+				wgpu::BindGroupEntry& samplerEntry = b.m_entries[b.numEntries++];
+				samplerEntry.binding = program.m_samplers[0].binding;
+				samplerEntry.sampler = 0 == (BGFX_SAMPLER_INTERNAL_DEFAULT & state)
+					? getSamplerState(state)
+					: texture.m_sampler;
+
+				bindGroups(program, bindState, b);
+
+				uint32_t numOffset = 1;
+				uint32_t offsets[1] = { voffset };
+
+				bindProgram(rce, program, bindState, numOffset, offsets);
+
+				VertexBufferWgpu& vb = m_vertexBuffers[_blitter.m_vb->handle.idx];
+				rce.SetVertexBuffer(0, vb.m_ptr);
+
+				rce.SetIndexBuffer(m_indexBuffers[_blitter.m_ib->handle.idx].m_ptr);
+				rce.DrawIndexed(_numIndices, 1, 0, 0, 0);
+			}
+		}
+
+		bool isDeviceRemoved() override
+		{
+			return false;
+		}
+
+		void flip() override
+		{
+			for (uint32_t ii = 0, num = m_numWindows; ii < num; ++ii)
+			{
+				FrameBufferWgpu& frameBuffer = ii == 0 ? m_mainFrameBuffer : m_frameBuffers[m_windows[ii].idx];
+				if (NULL != frameBuffer.m_swapChain)
+				//&& frameBuffer.m_swapChain->m_drawable)
+				{
+					SwapChainWgpu& swapChain = *frameBuffer.m_swapChain;
+					swapChain.flip();
+				}
+			}
+
+			m_cmd.m_encoder = nullptr;
+		}
+
+		void updateResolution(const Resolution& _resolution)
+		{
+			m_resolution = _resolution;
+			return; // TODO (hugoam)
+
+			m_mainFrameBuffer.m_swapChain->m_maxAnisotropy = !!(_resolution.reset & BGFX_RESET_MAXANISOTROPY)
+				? 16
+				: 1
+				;
+
+			const uint32_t maskFlags = ~(0
+				| BGFX_RESET_MAXANISOTROPY
+				| BGFX_RESET_DEPTH_CLAMP
+				| BGFX_RESET_SUSPEND
+				);
+
+			if (m_resolution.width            !=  _resolution.width
+			||  m_resolution.height           !=  _resolution.height
+			|| (m_resolution.reset&maskFlags) != (_resolution.reset&maskFlags) )
+			{
+				wgpu::TextureFormat prevMetalLayerPixelFormat; // = m_mainFrameBuffer.m_swapChain->m_metalLayer.pixelFormat;
+				BX_UNUSED(prevMetalLayerPixelFormat);
+
+				m_resolution = _resolution;
+				m_resolution.reset &= ~BGFX_RESET_INTERNAL_FORCE;
+
+				m_mainFrameBuffer.m_swapChain->resize(m_mainFrameBuffer, _resolution.width, _resolution.height, _resolution.reset);
+
+				for (uint32_t ii = 0; ii < BX_COUNTOF(m_frameBuffers); ++ii)
+				{
+					m_frameBuffers[ii].postReset();
+				}
+
+				updateCapture();
+
+				m_textVideoMem.resize(false, _resolution.width, _resolution.height);
+				m_textVideoMem.clear();
+
+				//if (prevMetalLayerPixelFormat != m_mainFrameBuffer.m_swapChain->m_metalLayer.pixelFormat)
+				{
+					//MTL_RELEASE(m_screenshotBlitRenderPipelineState)
+					//reset(m_renderPipelineDescriptor);
+
+					//m_renderPipelineDescriptor.colorAttachments[0].pixelFormat = m_mainFrameBuffer.m_swapChain->m_metalLayer.pixelFormat;
+					//m_renderPipelineDescriptor.vertexFunction   = m_screenshotBlitProgram.m_vsh->m_function;
+					//m_renderPipelineDescriptor.fragmentFunction = m_screenshotBlitProgram.m_fsh->m_function;
+					//m_screenshotBlitRenderPipelineState = m_device.newRenderPipelineStateWithDescriptor(m_renderPipelineDescriptor);
+				}
+			}
+		}
+
+		void invalidateCompute()
+		{
+			if (m_computeEncoder)
+			{
+				m_computeEncoder.EndPass();
+				m_computeEncoder = NULL;
+			}
+		}
+
+		void updateCapture()
+		{
+		}
+
+		void capture()
+		{
+		}
+
+		void captureFinish()
+		{
+		}
+
+		BindStateWgpu& allocBindState(const ProgramWgpu& program, BindStateCacheWgpu& bindStates, BindingsWgpu& bindings, ScratchBufferWgpu& scratchBuffer)
+		{
+			BindStateWgpu& bindState = bindStates.m_bindStates[bindStates.m_currentBindState];
+			bindStates.m_currentBindState++;
+
+			bindState.numOffset = program.m_numUniforms;
+
+			// first two bindings are always uniform buffer (vertex/fragment)
+			bindings.m_entries[0].binding = 0;
+			bindings.m_entries[0].offset = 0;
+			bindings.m_entries[0].size = program.m_vsh->m_gpuSize;
+			bindings.m_entries[0].buffer = scratchBuffer.m_buffer;
+			bindings.numEntries++;
+
+			if (NULL != program.m_fsh
+			&& 0 < program.m_fsh->m_gpuSize)
+			{
+				bindings.m_entries[1].binding = 48;
+				bindings.m_entries[1].offset = 0;
+				bindings.m_entries[1].size = program.m_fsh->m_gpuSize;
+				bindings.m_entries[1].buffer = scratchBuffer.m_buffer;
+				bindings.numEntries++;
+			}
+
+			return bindState;
+		}
+
+		void bindGroups(const ProgramWgpu& program, BindStateWgpu& bindState, BindingsWgpu& bindings)
+		{
+			wgpu::BindGroupDescriptor bindGroupDesc;
+			bindGroupDesc.layout = program.m_bindGroupLayout;
+			bindGroupDesc.entryCount = bindings.numEntries;
+			bindGroupDesc.entries = bindings.m_entries;
+
+			bindState.m_bindGroup = m_device.CreateBindGroup(&bindGroupDesc);
+		}
+
+		template <class Encoder>
+		void bindProgram(Encoder& encoder, const ProgramWgpu& program, BindStateWgpu& bindState, uint32_t numOffset, uint32_t* offsets)
+		{
+			BX_CHECK(bindState.numOffset == numOffset, "We're obviously doing something wrong");
+			encoder.SetBindGroup(0, bindState.m_bindGroup, numOffset, offsets);
+		}
+		
+		void allocBindState(const ProgramWgpu& program, BindStateCacheWgpu& bindStates, ScratchBufferWgpu& scratchBuffer, const RenderBind& renderBind)
+		{
+			BindingsWgpu b;
+
+			BindStateWgpu& bindState = allocBindState(program, bindStates, b, scratchBuffer);
+
+			for (uint8_t stage = 0; stage < BGFX_CONFIG_MAX_TEXTURE_SAMPLERS; ++stage)
+			{
+				const Binding& bind = renderBind.m_bind[stage];
+				const BindInfo& bindInfo = program.m_bindInfo[stage];
+
+				bool isUsed = isValid(program.m_bindInfo[stage].m_uniform);
+
+				BX_CHECK(!isUsed || kInvalidHandle != bind.m_idx, "All expected bindings must be bound with WebGPU");
+
+				if (kInvalidHandle != bind.m_idx)
+				{
+					switch (bind.m_type)
+					{
+					case Binding::Image:
+					{
+						TextureWgpu& texture = m_textures[bind.m_idx];
+						wgpu::BindGroupEntry& entry = b.m_entries[b.numEntries++];
+						entry.binding = bindInfo.m_binding;
+						entry.textureView = texture.getTextureMipLevel(bind.m_mip);
+					}
+					break;
+
+					case Binding::Texture:
+					{
+						// apparently bgfx allows to set a texture to a stage that a program does not even use
+						if (isUsed)
+						{
+							TextureWgpu& texture = m_textures[bind.m_idx];
+							uint32_t flags = bind.m_samplerFlags;
+
+							wgpu::TextureViewDescriptor viewDesc = defaultDescriptor<wgpu::TextureViewDescriptor>();
+							viewDesc.dimension = program.m_textures[bindInfo.m_index].viewDimension;
+
+							wgpu::BindGroupEntry& textureEntry = b.m_entries[b.numEntries++];
+							textureEntry.binding = bindInfo.m_binding;
+							textureEntry.textureView = texture.m_ptr.CreateView();
+							//textureEntry.textureView = texture.m_ptr.CreateView(&viewDesc);
+
+							wgpu::BindGroupEntry& samplerEntry = b.m_entries[b.numEntries++];
+							samplerEntry.binding = bindInfo.m_binding + 16;
+							samplerEntry.sampler = 0 == (BGFX_SAMPLER_INTERNAL_DEFAULT & flags)
+								? getSamplerState(flags)
+								: texture.m_sampler;
+						}
+					}
+					break;
+
+					case Binding::IndexBuffer:
+					case Binding::VertexBuffer:
+					{
+						const BufferWgpu& buffer = Binding::IndexBuffer == bind.m_type
+							? m_indexBuffers[bind.m_idx]
+							: m_vertexBuffers[bind.m_idx]
+							;
+
+						wgpu::BindGroupEntry& entry = b.m_entries[b.numEntries++];
+						entry.binding = bindInfo.m_binding;
+						entry.offset = 0;
+						entry.size = buffer.m_size;
+						entry.buffer = buffer.m_ptr;
+					}
+					break;
+					}
+				}
+			}
+
+			bindGroups(program, bindState, b);
+		};
+
+		void setShaderUniform(uint8_t _flags, uint32_t _regIndex, const void* _val, uint32_t _numRegs)
+		{
+			if(_flags&BGFX_UNIFORM_FRAGMENTBIT)
+			{
+				bx::memCopy(&m_fsScratch[_regIndex], _val, _numRegs * 16);
+			}
+			else
+			{
+				bx::memCopy(&m_vsScratch[_regIndex], _val, _numRegs * 16);
+			}
+		}
+
+		void setShaderUniform4f(uint8_t _flags, uint32_t _loc, const void* _val, uint32_t _numRegs)
+		{
+			setShaderUniform(_flags, _loc, _val, _numRegs);
+		}
+
+		void setShaderUniform4x4f(uint8_t _flags, uint32_t _loc, const void* _val, uint32_t _numRegs)
+		{
+			setShaderUniform(_flags, _loc, _val, _numRegs);
+		}
+
+		void commitShaderConstants(ScratchBufferWgpu& _scratchBuffer, const ProgramWgpu& _program, uint32_t _vertexOffset, uint32_t _fragmentOffset)
+		{
+			const uint32_t size = _program.m_vsh->m_size;
+			if (0 != size)
+				_scratchBuffer.m_buffer.SetSubData(_vertexOffset, size, m_vsScratch);
+
+			if(NULL != _program.m_fsh)
+			{
+				const uint32_t size = _program.m_fsh->m_size;
+				if(0 != size)
+					_scratchBuffer.m_buffer.SetSubData(_fragmentOffset, size, m_fsScratch);
+			}
+		}
+
+		void commit(UniformBuffer& _uniformBuffer)
+		{
+			_uniformBuffer.reset();
+
+			for (;;)
+			{
+				uint32_t opcode = _uniformBuffer.read();
+
+				if (UniformType::End == opcode)
+				{
+					break;
+				}
+
+				UniformType::Enum type;
+				uint16_t loc;
+				uint16_t num;
+				uint16_t copy;
+				UniformBuffer::decodeOpcode(opcode, type, loc, num, copy);
+
+				const char* data;
+				if (copy)
+				{
+					data = _uniformBuffer.read(g_uniformTypeSize[type]*num);
+				}
+				else
+				{
+					UniformHandle handle;
+					bx::memCopy(&handle, _uniformBuffer.read(sizeof(UniformHandle) ), sizeof(UniformHandle) );
+					data = (const char*)m_uniforms[handle.idx];
+				}
+
+				switch ( (uint32_t)type)
+				{
+				case UniformType::Mat3:
+				case UniformType::Mat3|BGFX_UNIFORM_FRAGMENTBIT:
+					{
+						float* value = (float*)data;
+						for (uint32_t ii = 0, count = num/3; ii < count; ++ii,  loc += 3*16, value += 9)
+						{
+							Matrix4 mtx;
+							mtx.un.val[ 0] = value[0];
+							mtx.un.val[ 1] = value[1];
+							mtx.un.val[ 2] = value[2];
+							mtx.un.val[ 3] = 0.0f;
+							mtx.un.val[ 4] = value[3];
+							mtx.un.val[ 5] = value[4];
+							mtx.un.val[ 6] = value[5];
+							mtx.un.val[ 7] = 0.0f;
+							mtx.un.val[ 8] = value[6];
+							mtx.un.val[ 9] = value[7];
+							mtx.un.val[10] = value[8];
+							mtx.un.val[11] = 0.0f;
+							setShaderUniform(uint8_t(type), loc, &mtx.un.val[0], 3);
+						}
+					}
+					break;
+
+				case UniformType::Sampler:
+				case UniformType::Sampler | BGFX_UNIFORM_FRAGMENTBIT:
+				case UniformType::Vec4:
+				case UniformType::Vec4 | BGFX_UNIFORM_FRAGMENTBIT:
+				case UniformType::Mat4:
+				case UniformType::Mat4 | BGFX_UNIFORM_FRAGMENTBIT:
+					{
+						setShaderUniform(uint8_t(type), loc, data, num);
+					}
+					break;
+				case UniformType::End:
+					break;
+
+				default:
+					BX_TRACE("%4d: INVALID 0x%08x, t %d, l %d, n %d, c %d", _uniformBuffer.getPos(), opcode, type, loc, num, copy);
+					break;
+				}
+			}
+		}
+
+		void clearQuad(ClearQuad& _clearQuad, const Rect& /*_rect*/, const Clear& _clear, const float _palette[][4])
+		{
+			uint32_t width;
+			uint32_t height;
+
+			if (isValid(m_fbh) )
+			{
+				const FrameBufferWgpu& fb = m_frameBuffers[m_fbh.idx];
+				width  = fb.m_width;
+				height = fb.m_height;
+			}
+			else
+			{
+				width  = m_resolution.width;
+				height = m_resolution.height;
+			}
+
+			uint64_t state = 0;
+			state |= _clear.m_flags & BGFX_CLEAR_COLOR ? BGFX_STATE_WRITE_RGB|BGFX_STATE_WRITE_A         : 0;
+			state |= _clear.m_flags & BGFX_CLEAR_DEPTH ? BGFX_STATE_DEPTH_TEST_ALWAYS|BGFX_STATE_WRITE_Z : 0;
+			state |= BGFX_STATE_PT_TRISTRIP;
+
+			uint64_t stencil = 0;
+			stencil |= _clear.m_flags & BGFX_CLEAR_STENCIL ? 0
+				| BGFX_STENCIL_TEST_ALWAYS
+				| BGFX_STENCIL_FUNC_REF(_clear.m_stencil)
+				| BGFX_STENCIL_FUNC_RMASK(0xff)
+				| BGFX_STENCIL_OP_FAIL_S_REPLACE
+				| BGFX_STENCIL_OP_FAIL_Z_REPLACE
+				| BGFX_STENCIL_OP_PASS_Z_REPLACE
+				: 0
+				;
+
+			uint32_t numMrt = 1;
+			FrameBufferHandle fbh = m_fbh;
+			if (isValid(fbh) && m_frameBuffers[fbh.idx].m_swapChain == NULL)
+			{
+				const FrameBufferWgpu& fb = m_frameBuffers[fbh.idx];
+				numMrt = bx::uint32_max(1, fb.m_num);
+			}
+
+			wgpu::RenderPassEncoder rce = m_renderEncoder;
+			ProgramHandle programHandle = _clearQuad.m_program[numMrt-1];
+
+			const VertexLayout* decl = &_clearQuad.m_layout;
+			const PipelineStateWgpu* pso = getPipelineState(
+				  state
+				, stencil
+				, 0
+				, fbh
+				, 1
+				, &decl
+				, false
+				, programHandle
+				, 0
+				);
+			rce.SetPipeline(pso->m_rps);
+
+			float mrtClearColor[BGFX_CONFIG_MAX_FRAME_BUFFER_ATTACHMENTS][4];
+			float mrtClearDepth[4] = { _clear.m_depth };
+
+			if (BGFX_CLEAR_COLOR_USE_PALETTE & _clear.m_flags)
+			{
+				for (uint32_t ii = 0; ii < numMrt; ++ii)
+				{
+					uint8_t index = (uint8_t)bx::uint32_min(BGFX_CONFIG_MAX_COLOR_PALETTE-1, _clear.m_index[ii]);
+					bx::memCopy(mrtClearColor[ii], _palette[index], 16);
+				}
+			}
+			else
+			{
+				float rgba[4] =
+				{
+					_clear.m_index[0]*1.0f/255.0f,
+					_clear.m_index[1]*1.0f/255.0f,
+					_clear.m_index[2]*1.0f/255.0f,
+					_clear.m_index[3]*1.0f/255.0f,
+				};
+
+				for (uint32_t ii = 0; ii < numMrt; ++ii)
+				{
+					bx::memCopy( mrtClearColor[ii]
+								, rgba
+								, 16
+								);
+				}
+			}
+
+			ProgramWgpu& program = m_program[programHandle.idx];
+
+			ScratchBufferWgpu& scratchBuffer = m_scratchBuffers[0];
+			BindStateCacheWgpu& bindStates = m_bindStateCache[0];
+
+			BindingsWgpu b;
+			BindStateWgpu& bindState = allocBindState(program, bindStates, b, scratchBuffer);
+
+			const uint32_t voffset = scratchBuffer.m_offset;
+			const uint32_t foffset = scratchBuffer.m_offset + program.m_vsh->m_gpuSize;
+			scratchBuffer.m_offset += program.m_gpuSize;
+
+			scratchBuffer.m_buffer.SetSubData(voffset, bx::uint32_min(program.m_vsh->m_gpuSize, sizeof(mrtClearDepth)), (uint8_t*)mrtClearDepth);
+			scratchBuffer.m_buffer.SetSubData(foffset, bx::uint32_min(program.m_fsh->m_gpuSize, sizeof(mrtClearColor)), (uint8_t*)mrtClearColor);
+
+			uint32_t numOffset = 2;
+			uint32_t offsets[2] = { voffset, foffset };
+
+			bindGroups(program, bindState, b);
+
+			const VertexBufferWgpu& vb = m_vertexBuffers[_clearQuad.m_vb.idx];
+
+			bindProgram(rce, program, bindState, numOffset, offsets);
+
+			rce.SetVertexBuffer(0, vb.m_ptr);
+			rce.Draw(4, 1, 0, 0);
+		}
+
+		wgpu::TextureViewDescriptor attachmentView(const Attachment& _at, const TextureWgpu& _texture)
+		{
+			bool _resolve = bool(_texture.m_ptrMsaa);
+			BX_UNUSED(_resolve);
+
+			wgpu::TextureViewDescriptor desc;
+			if (1 < _texture.m_numSides)
+			{
+				desc.baseArrayLayer = _at.layer;
+			}
+			desc.baseMipLevel = _at.mip;
+			desc.arrayLayerCount = 1;
+			desc.mipLevelCount = 1;
+
+			if (_texture.m_type == TextureWgpu::Texture3D)
+			{
+				desc.dimension = wgpu::TextureViewDimension::e3D;
+			}
+
+			return desc;
+		}
+
+		void setFrameBuffer(RenderPassDescriptor& _renderPassDescriptor, FrameBufferHandle _fbh, bool _msaa = true)
+		{
+			if (!isValid(_fbh)
+			||  m_frameBuffers[_fbh.idx].m_swapChain)
+			{
+				SwapChainWgpu* swapChain = !isValid(_fbh)
+					? m_mainFrameBuffer.m_swapChain
+					: m_frameBuffers[_fbh.idx].m_swapChain
+					;
+
+				_renderPassDescriptor.colorAttachments[0] = defaultDescriptor<wgpu::RenderPassColorAttachmentDescriptor>();
+				_renderPassDescriptor.desc.colorAttachmentCount = 1;
+
+				// Force 1 array layers for attachments
+				wgpu::TextureViewDescriptor desc;
+				desc.arrayLayerCount = 1;
+
+				if (swapChain->m_backBufferColorMsaa)
+				{
+					_renderPassDescriptor.colorAttachments[0].attachment    = swapChain->m_backBufferColorMsaa.CreateView(&desc);
+					_renderPassDescriptor.colorAttachments[0].resolveTarget = swapChain->current();
+				}
+				else
+				{
+					_renderPassDescriptor.colorAttachments[0].attachment = swapChain->current();
+				}
+
+				_renderPassDescriptor.depthStencilAttachment = defaultDescriptor<wgpu::RenderPassDepthStencilAttachmentDescriptor>();
+				_renderPassDescriptor.depthStencilAttachment.attachment = swapChain->m_backBufferDepth.CreateView();
+				_renderPassDescriptor.desc.depthStencilAttachment = &_renderPassDescriptor.depthStencilAttachment;
+			}
+			else
+			{
+				FrameBufferWgpu& frameBuffer = m_frameBuffers[_fbh.idx];
+
+				_renderPassDescriptor.desc.colorAttachmentCount = frameBuffer.m_num;
+
+				for (uint32_t ii = 0; ii < frameBuffer.m_num; ++ii)
+				{
+					const TextureWgpu& texture = m_textures[frameBuffer.m_colorHandle[ii].idx];
+
+					const wgpu::TextureViewDescriptor desc = attachmentView(frameBuffer.m_colorAttachment[ii], texture);
+
+					_renderPassDescriptor.colorAttachments[ii] = defaultDescriptor<wgpu::RenderPassColorAttachmentDescriptor>();
+					_renderPassDescriptor.colorAttachments[ii].attachment = texture.m_ptrMsaa
+						? texture.m_ptrMsaa.CreateView(&desc)
+						: texture.m_ptr.CreateView(&desc)
+						;
+					_renderPassDescriptor.colorAttachments[ii].resolveTarget = texture.m_ptrMsaa
+						? texture.m_ptr.CreateView(&desc)
+						: wgpu::TextureView()
+						;
+				}
+
+				if (isValid(frameBuffer.m_depthHandle) )
+				{
+					const TextureWgpu& texture = m_textures[frameBuffer.m_depthHandle.idx];
+					const wgpu::TextureViewDescriptor desc = attachmentView(frameBuffer.m_depthAttachment, texture);
+
+					_renderPassDescriptor.depthStencilAttachment = defaultDescriptor<wgpu::RenderPassDepthStencilAttachmentDescriptor>();
+					_renderPassDescriptor.depthStencilAttachment.attachment = texture.m_ptrMsaa
+						? texture.m_ptrMsaa.CreateView(&desc)
+						: texture.m_ptr.CreateView(&desc)
+						;
+
+					_renderPassDescriptor.desc.depthStencilAttachment = &_renderPassDescriptor.depthStencilAttachment;
+				}
+			}
+
+			m_fbh    = _fbh;
+			m_rtMsaa = _msaa;
+		}
+
+		void setDepthStencilState(wgpu::DepthStencilStateDescriptor& desc, uint64_t _state, uint64_t _stencil = 0)
+		{
+			const uint32_t fstencil = unpackStencil(0, _stencil);
+			const uint32_t func = (_state&BGFX_STATE_DEPTH_TEST_MASK) >> BGFX_STATE_DEPTH_TEST_SHIFT;
+
+			desc.depthWriteEnabled = !!(BGFX_STATE_WRITE_Z & _state);
+			desc.depthCompare = s_cmpFunc[func];
+
+			uint32_t bstencil = unpackStencil(1, _stencil);
+			const uint32_t frontAndBack = bstencil != BGFX_STENCIL_NONE && bstencil != fstencil;
+			bstencil = frontAndBack ? bstencil : fstencil;
+
+			desc.stencilFront = defaultDescriptor<wgpu::StencilStateFaceDescriptor>();
+			desc.stencilBack = defaultDescriptor<wgpu::StencilStateFaceDescriptor>();
+
+			if (0 != _stencil)
+			{
+				// TODO (hugoam)
+				const uint32_t readMask  = (fstencil&BGFX_STENCIL_FUNC_RMASK_MASK)>>BGFX_STENCIL_FUNC_RMASK_SHIFT;
+				const uint32_t writeMask = 0xff;
+
+				desc.stencilReadMask  = readMask;
+				desc.stencilWriteMask = writeMask;
+
+				desc.stencilFront.failOp      = s_stencilOp[(fstencil&BGFX_STENCIL_OP_FAIL_S_MASK)>>BGFX_STENCIL_OP_FAIL_S_SHIFT];
+				desc.stencilFront.depthFailOp = s_stencilOp[(fstencil&BGFX_STENCIL_OP_FAIL_Z_MASK)>>BGFX_STENCIL_OP_FAIL_Z_SHIFT];
+				desc.stencilFront.passOp      = s_stencilOp[(fstencil&BGFX_STENCIL_OP_PASS_Z_MASK)>>BGFX_STENCIL_OP_PASS_Z_SHIFT];
+				desc.stencilFront.compare     = s_cmpFunc[(fstencil&BGFX_STENCIL_TEST_MASK)>>BGFX_STENCIL_TEST_SHIFT];
+
+				desc.stencilBack.failOp      = s_stencilOp[(bstencil&BGFX_STENCIL_OP_FAIL_S_MASK)>>BGFX_STENCIL_OP_FAIL_S_SHIFT];
+				desc.stencilBack.depthFailOp = s_stencilOp[(bstencil&BGFX_STENCIL_OP_FAIL_Z_MASK)>>BGFX_STENCIL_OP_FAIL_Z_SHIFT];
+				desc.stencilBack.passOp      = s_stencilOp[(bstencil&BGFX_STENCIL_OP_PASS_Z_MASK)>>BGFX_STENCIL_OP_PASS_Z_SHIFT];
+				desc.stencilBack.compare     = s_cmpFunc[(bstencil&BGFX_STENCIL_TEST_MASK)>>BGFX_STENCIL_TEST_SHIFT];
+			}
+		}
+
+		RenderPassStateWgpu* getRenderPassState(bgfx::FrameBufferHandle fbh, bool clear, Clear clr)
+		{
+			bx::HashMurmur2A murmur;
+			murmur.begin();
+			murmur.add(fbh.idx);
+			murmur.add(clear);
+			murmur.add(&clr, sizeof(clr));
+			uint32_t hash = murmur.end();
+
+			RenderPassStateWgpu* rps = m_renderPassStateCache.find(hash);
+
+			if (NULL == rps)
+			{
+				rps = BX_NEW(g_allocator, RenderPassStateWgpu);
+				m_renderPassStateCache.add(hash, rps);
+			}
+
+			return rps;
+		}
+
+		PipelineStateWgpu* getPipelineState(
+			  uint64_t _state
+			, uint64_t _stencil
+			, uint32_t _rgba
+			, FrameBufferHandle _fbh
+			, uint8_t _numStreams
+			, const VertexLayout** _vertexDecls
+			, bool _index32
+			, ProgramHandle _program
+			, uint8_t _numInstanceData
+			)
+		{
+			_state &= 0
+				| BGFX_STATE_WRITE_RGB
+				| BGFX_STATE_WRITE_A
+				| BGFX_STATE_WRITE_Z
+				| BGFX_STATE_DEPTH_TEST_MASK
+				| BGFX_STATE_BLEND_MASK
+				| BGFX_STATE_BLEND_EQUATION_MASK
+				| BGFX_STATE_BLEND_INDEPENDENT
+				| BGFX_STATE_BLEND_ALPHA_TO_COVERAGE
+				| BGFX_STATE_CULL_MASK
+				| BGFX_STATE_MSAA
+				| BGFX_STATE_LINEAA
+				| BGFX_STATE_CONSERVATIVE_RASTER
+				| BGFX_STATE_PT_MASK
+				;
+
+			const bool independentBlendEnable = !!(BGFX_STATE_BLEND_INDEPENDENT & _state);
+			const ProgramWgpu& program = m_program[_program.idx];
+
+			bx::HashMurmur2A murmur;
+			murmur.begin();
+			murmur.add(_state);
+			murmur.add(_stencil);
+			murmur.add(independentBlendEnable ? _rgba : 0);
+			murmur.add(_numInstanceData);
+
+			FrameBufferWgpu& frameBuffer = !isValid(_fbh) ? m_mainFrameBuffer : m_frameBuffers[_fbh.idx];
+			murmur.add(frameBuffer.m_pixelFormatHash);
+
+			murmur.add(program.m_vsh->m_hash);
+			if (NULL != program.m_fsh)
+			{
+				murmur.add(program.m_fsh->m_hash);
+			}
+
+			for (uint8_t ii = 0; ii < _numStreams; ++ii)
+			{
+				murmur.add(_vertexDecls[ii]->m_hash);
+			}
+
+			uint32_t hash = murmur.end();
+
+			PipelineStateWgpu* pso = m_pipelineStateCache.find(hash);
+
+			if (NULL == pso)
+			{
+				pso = BX_NEW(g_allocator, PipelineStateWgpu);
+
+				//pd.alphaToCoverageEnabled = !!(BGFX_STATE_BLEND_ALPHA_TO_COVERAGE & _state);
+
+				RenderPipelineDescriptor& pd = pso->m_rpd;
+
+				uint32_t frameBufferAttachment = 1;
+				uint32_t sampleCount = 1;
+
+				if (!isValid(_fbh)
+				||  s_renderWgpu->m_frameBuffers[_fbh.idx].m_swapChain)
+				{
+					SwapChainWgpu& swapChain = !isValid(_fbh)
+						? *s_renderWgpu->m_mainFrameBuffer.m_swapChain
+						: *s_renderWgpu->m_frameBuffers[_fbh.idx].m_swapChain
+						;
+					sampleCount = swapChain.m_backBufferColorMsaa
+						? swapChain.m_sampleCount
+						: 1
+						;
+					pd.colorStates[0].format = swapChain.m_colorFormat;
+					pd.depthStencilState.format = swapChain.m_depthFormat;
+					pd.desc.depthStencilState = &pd.depthStencilState;
+				}
+				else
+				{
+					frameBufferAttachment = frameBuffer.m_num;
+
+					for (uint32_t ii = 0; ii < frameBuffer.m_num; ++ii)
+					{
+						const TextureWgpu& texture = m_textures[frameBuffer.m_colorHandle[ii].idx];
+						sampleCount = texture.m_ptrMsaa
+							? texture.m_sampleCount
+							: 1
+							;
+						pd.colorStates[ii].format = s_textureFormat[texture.m_textureFormat].m_fmt;
+					}
+
+					pd.desc.colorStateCount = frameBuffer.m_num;
+
+					if (isValid(frameBuffer.m_depthHandle) )
+					{
+						const TextureWgpu& texture = m_textures[frameBuffer.m_depthHandle.idx];
+						pd.depthStencilState.format = s_textureFormat[texture.m_textureFormat].m_fmt;
+						pd.desc.depthStencilState = &pd.depthStencilState;
+					}
+				}
+
+				const uint32_t blend    = uint32_t( (_state&BGFX_STATE_BLEND_MASK         )>>BGFX_STATE_BLEND_SHIFT);
+				const uint32_t equation = uint32_t( (_state&BGFX_STATE_BLEND_EQUATION_MASK)>>BGFX_STATE_BLEND_EQUATION_SHIFT);
+
+				const uint32_t srcRGB = (blend    )&0xf;
+				const uint32_t dstRGB = (blend>> 4)&0xf;
+				const uint32_t srcA   = (blend>> 8)&0xf;
+				const uint32_t dstA   = (blend>>12)&0xf;
+
+				const uint32_t equRGB = (equation   )&0x7;
+				const uint32_t equA   = (equation>>3)&0x7;
+
+				wgpu::ColorWriteMask writeMask = wgpu::ColorWriteMask::None;
+				writeMask |= (_state&BGFX_STATE_WRITE_R) ? wgpu::ColorWriteMask::Red   : wgpu::ColorWriteMask::None;
+				writeMask |= (_state&BGFX_STATE_WRITE_G) ? wgpu::ColorWriteMask::Green : wgpu::ColorWriteMask::None;
+				writeMask |= (_state&BGFX_STATE_WRITE_B) ? wgpu::ColorWriteMask::Blue  : wgpu::ColorWriteMask::None;
+				writeMask |= (_state&BGFX_STATE_WRITE_A) ? wgpu::ColorWriteMask::Alpha : wgpu::ColorWriteMask::None;
+
+				for (uint32_t ii = 0; ii < (independentBlendEnable ? 1 : frameBufferAttachment); ++ii)
+				{
+					wgpu::ColorStateDescriptor& drt = pd.colorStates[ii]; // = pd.colorAttachments[ii];
+
+					if(!(BGFX_STATE_BLEND_MASK & _state))
+					{
+						drt.colorBlend = defaultDescriptor<wgpu::BlendDescriptor>();
+						drt.alphaBlend = defaultDescriptor<wgpu::BlendDescriptor>();
+					}
+					else
+					{
+						drt.colorBlend.srcFactor = s_blendFactor[srcRGB][0];
+						drt.colorBlend.dstFactor = s_blendFactor[dstRGB][0];
+						drt.colorBlend.operation = s_blendEquation[equRGB];
+
+						drt.alphaBlend.srcFactor = s_blendFactor[srcA][1];
+						drt.alphaBlend.dstFactor = s_blendFactor[dstA][1];
+						drt.alphaBlend.operation = s_blendEquation[equA];
+					}
+
+					drt.writeMask = writeMask;
+				}
+
+				if (independentBlendEnable)
+				{
+					for (uint32_t ii = 1, rgba = _rgba; ii < frameBufferAttachment; ++ii, rgba >>= 11)
+					{
+						wgpu::ColorStateDescriptor drt = pd.colorStates[ii]; // = pd.colorAttachments[ii];
+
+						//drt.blendingEnabled = 0 != (rgba&0x7ff);
+
+						const uint32_t src           = (rgba   )&0xf;
+						const uint32_t dst           = (rgba>>4)&0xf;
+						const uint32_t equationIndex = (rgba>>8)&0x7;
+						
+						drt.colorBlend.srcFactor  = s_blendFactor[src][0];
+						drt.colorBlend.dstFactor  = s_blendFactor[dst][0];
+						drt.colorBlend.operation  = s_blendEquation[equationIndex];
+
+						drt.alphaBlend.srcFactor  = s_blendFactor[src][1];
+						drt.alphaBlend.dstFactor  = s_blendFactor[dst][1];
+						drt.alphaBlend.operation  = s_blendEquation[equationIndex];
+
+						drt.writeMask = writeMask;
+					}
+				}
+
+				pd.desc.vertexStage.module = program.m_vsh->m_module;
+				pd.fragmentStage.module = program.m_fsh != NULL ? program.m_fsh->m_module : wgpu::ShaderModule();
+
+				setDepthStencilState(pd.depthStencilState, _state, _stencil);
+
+				const uint64_t cull = _state & BGFX_STATE_CULL_MASK;
+				const uint8_t cullIndex = uint8_t(cull >> BGFX_STATE_CULL_SHIFT);
+				pd.rasterizationState.cullMode = s_cullMode[cullIndex];
+
+				pd.rasterizationState.frontFace = (_state & BGFX_STATE_FRONT_CCW) ? wgpu::FrontFace::CCW : wgpu::FrontFace::CW;
+
+				// pd.desc = m_renderPipelineDescriptor;
+				pd.desc.sampleCount = sampleCount;
+
+				wgpu::PipelineLayoutDescriptor layout = defaultDescriptor<wgpu::PipelineLayoutDescriptor>();
+				layout.bindGroupLayouts = &program.m_bindGroupLayout;
+				layout.bindGroupLayoutCount = 1;
+
+				pd.desc.layout = m_device.CreatePipelineLayout(&layout);
+				// TODO (hugoam) this should be cached too ?
+
+				//uint32_t ref = (_state&BGFX_STATE_ALPHA_REF_MASK) >> BGFX_STATE_ALPHA_REF_SHIFT;
+				//viewState.m_alphaRef = ref / 255.0f;
+
+				const uint64_t primType = _state & BGFX_STATE_PT_MASK;
+				uint8_t primIndex = uint8_t(primType >> BGFX_STATE_PT_SHIFT);
+
+				PrimInfo prim = s_primInfo[primIndex];
+				pd.desc.primitiveTopology = prim.m_type;
+
+				VertexStateDescriptor input;
+				input.desc.vertexBufferCount = 0;
+
+				wgpu::VertexBufferLayoutDescriptor* inputBinding = input.vertexBuffers;
+				wgpu::VertexAttributeDescriptor* inputAttrib = input.attributes;
+
+				auto fillVertexDecl = [&](const ShaderWgpu* _vsh, const VertexLayout& _decl)
+				{
+					input.desc.vertexBufferCount += 1;
+
+					inputBinding->arrayStride = _decl.m_stride;
+					inputBinding->stepMode = wgpu::InputStepMode::Vertex;
+					inputBinding->attributes = inputAttrib;
+
+					uint32_t numAttribs = 0;
+
+					for(uint32_t attr = 0; attr < Attrib::Count; ++attr)
+					{
+						if(UINT16_MAX != _decl.m_attributes[attr])
+						{
+							if(UINT8_MAX == _vsh->m_attrRemap[attr])
+								continue;
+
+							inputAttrib->shaderLocation = _vsh->m_attrRemap[attr];
+
+							if(0 == _decl.m_attributes[attr])
+							{
+								inputAttrib->format = wgpu::VertexFormat::Float3;
+								inputAttrib->offset = 0;
+							}
+							else
+							{
+								uint8_t num;
+								AttribType::Enum type;
+								bool normalized;
+								bool asInt;
+								_decl.decode(Attrib::Enum(attr), num, type, normalized, asInt);
+								inputAttrib->format = s_attribType[type][num-1][normalized];
+								inputAttrib->offset = _decl.m_offset[attr];
+							}
+
+							++inputAttrib;
+							++numAttribs;
+						}
+					}
+
+					inputBinding->attributeCount = numAttribs;
+					inputBinding++;
+
+					return numAttribs;
+				};
+
+				//bool attrSet[Attrib::Count] = {};
+
+				uint16_t unsettedAttr[Attrib::Count];
+				bx::memCopy(unsettedAttr, program.m_vsh->m_attrMask, sizeof(uint16_t) * Attrib::Count);
+
+				uint8_t stream = 0;
+				for (; stream < _numStreams; ++stream)
+				{
+					VertexLayout layout;
+					bx::memCopy(&layout, _vertexDecls[stream], sizeof(VertexLayout));
+					const uint16_t* attrMask = program.m_vsh->m_attrMask;
+
+					for (uint32_t ii = 0; ii < Attrib::Count; ++ii)
+					{
+						Attrib::Enum iiattr = Attrib::Enum(ii);
+						uint16_t mask = attrMask[ii];
+						uint16_t attr = (layout.m_attributes[ii] & mask);
+						if (attr == 0)
+						{
+							layout.m_attributes[ii] = UINT16_MAX;
+						}
+						if (unsettedAttr[ii] && attr != UINT16_MAX)
+						{
+							unsettedAttr[ii] = 0;
+						}
+					}
+
+					fillVertexDecl(program.m_vsh, layout);
+				}
+
+				for (uint32_t ii = 0; ii < Attrib::Count; ++ii)
+				{
+					Attrib::Enum iiattr = Attrib::Enum(ii);
+					if (0 < unsettedAttr[ii])
+					{
+					  //uint32_t numAttribs = input.vertexBuffers[stream].attributeCount;
+					  //uint32_t numAttribs = inputBinding->attributeCount;
+					  //wgpu::VertexBufferLayoutDescriptor* inputAttrib = const_cast<VkVertexInputAttributeDescription*>(_vertexInputState.pVertexAttributeDescriptions + numAttribs);
+						inputAttrib->shaderLocation = program.m_vsh->m_attrRemap[ii];
+					  //inputAttrib->binding = 0;
+						inputAttrib->format = wgpu::VertexFormat::Float3; // VK_FORMAT_R32G32B32_SFLOAT;
+						inputAttrib->offset = 0;
+						input.vertexBuffers[stream-1].attributeCount++;
+						++inputAttrib;
+					}
+				}
+
+				// TODO (hugoam) WebGPU will crash whenever we are not supplying the correct number of attributes (which depends on the stride passed to bgfx::allocInstanceDataBuffer)
+				// so we need to know the number of live instance attributes in the shader and if they aren't all supplied:
+				//   - fail the pipeline state creation
+				//   - bind dummy attributes
+				if (0 < _numInstanceData)
+				{
+					uint32_t numBindings = input.desc.vertexBufferCount; // == stream+1 // .vertexBindingDescriptionCount;
+					uint32_t firstAttrib = input.vertexBuffers[stream-1].attributeCount;
+					uint32_t numAttribs = firstAttrib;
+
+					inputBinding->arrayStride = _numInstanceData * 16;
+					inputBinding->stepMode = wgpu::InputStepMode::Instance;
+
+					for (uint32_t inst = 0; inst < _numInstanceData; ++inst)
+					{
+						inputAttrib->shaderLocation = numAttribs;
+						inputAttrib->format = wgpu::VertexFormat::Float4;
+						inputAttrib->offset = inst * 16;
+
+						++numAttribs;
+						++inputAttrib;
+					}
+
+					input.desc.vertexBufferCount = numBindings + 1;
+					input.vertexBuffers[stream].attributeCount = numAttribs - firstAttrib;
+					input.vertexBuffers[stream].attributes = &input.attributes[firstAttrib];
+				}
+
+
+				input.desc.indexFormat = _index32 ? wgpu::IndexFormat::Uint32 : wgpu::IndexFormat::Uint16;
+
+				pd.desc.vertexState = &input.desc;
+
+				BX_TRACE("Creating WebGPU render pipeline state for program %s", program.m_vsh->name());
+				pso->m_rps = m_device.CreateRenderPipeline(&pd.desc);
+
+				m_pipelineStateCache.add(hash, pso);
+			}
+
+			return pso;
+		}
+
+		PipelineStateWgpu* getPipelineState(
+			  uint64_t _state
+			, uint64_t _stencil
+			, uint32_t _rgba
+			, FrameBufferHandle _fbh
+			, VertexLayoutHandle _declHandle
+			, bool _index32
+			, ProgramHandle _program
+			, uint8_t _numInstanceData
+			)
+		{
+			const VertexLayout* decl = &m_vertexDecls[_declHandle.idx];
+			return getPipelineState(
+				  _state
+				, _stencil
+				, _rgba
+				, _fbh
+				, 1
+				, &decl
+				, _index32
+				, _program
+				, _numInstanceData
+				);
+		}
+
+		PipelineStateWgpu* getComputePipelineState(ProgramHandle _program)
+		{
+			ProgramWgpu& program = m_program[_program.idx];
+
+			if (NULL == program.m_computePS)
+			{
+				PipelineStateWgpu* pso = BX_NEW(g_allocator, PipelineStateWgpu);
+				program.m_computePS = pso;
+
+				wgpu::PipelineLayoutDescriptor layout = defaultDescriptor<wgpu::PipelineLayoutDescriptor>();
+				layout.bindGroupLayouts = &program.m_bindGroupLayout;
+				layout.bindGroupLayoutCount = 1;
+
+				pso->m_layout = m_device.CreatePipelineLayout(&layout);
+
+				wgpu::ComputePipelineDescriptor desc;
+				desc.layout = pso->m_layout;
+				desc.computeStage = { nullptr, program.m_vsh->m_module, "main" };
+
+				pso->m_cps = m_device.CreateComputePipeline(&desc);
+			}
+
+			return program.m_computePS;
+		}
+
+
+		wgpu::Sampler getSamplerState(uint32_t _flags)
+		{
+			_flags &= BGFX_SAMPLER_BITS_MASK;
+			SamplerStateWgpu* sampler = m_samplerStateCache.find(_flags);
+
+			if (NULL == sampler)
+			{
+				sampler = BX_NEW(g_allocator, SamplerStateWgpu);
+
+				wgpu::SamplerDescriptor desc;
+				desc.addressModeU = s_textureAddress[(_flags&BGFX_SAMPLER_U_MASK)>>BGFX_SAMPLER_U_SHIFT];
+				desc.addressModeV = s_textureAddress[(_flags&BGFX_SAMPLER_V_MASK)>>BGFX_SAMPLER_V_SHIFT];
+				desc.addressModeW = s_textureAddress[(_flags&BGFX_SAMPLER_W_MASK)>>BGFX_SAMPLER_W_SHIFT];
+				desc.minFilter    = s_textureFilterMinMag[(_flags&BGFX_SAMPLER_MIN_MASK)>>BGFX_SAMPLER_MIN_SHIFT];
+				desc.magFilter    = s_textureFilterMinMag[(_flags&BGFX_SAMPLER_MAG_MASK)>>BGFX_SAMPLER_MAG_SHIFT];
+				desc.mipmapFilter = s_textureFilterMip[(_flags&BGFX_SAMPLER_MIP_MASK)>>BGFX_SAMPLER_MIP_SHIFT];
+				desc.lodMinClamp  = 0;
+				desc.lodMaxClamp  = FLT_MAX;
+
+				const uint32_t cmpFunc = (_flags&BGFX_SAMPLER_COMPARE_MASK)>>BGFX_SAMPLER_COMPARE_SHIFT;
+				desc.compare = 0 == cmpFunc
+					? wgpu::CompareFunction::Undefined
+					: s_cmpFunc[cmpFunc]
+					;
+
+				sampler->m_sampler = s_renderWgpu->m_device.CreateSampler(&desc);
+				m_samplerStateCache.add(_flags, sampler);
+			}
+
+			return sampler->m_sampler;
+		}
+
+		wgpu::CommandEncoder& getBlitCommandEncoder()
+		{
+			if (!m_cmd.m_encoder)
+				m_cmd.begin();
+
+			if (m_renderEncoder || m_computeEncoder)
+				endEncoding();
+
+			return m_cmd.m_encoder;
+		}
+
+		wgpu::RenderPassEncoder renderPass(bgfx::Frame* _render, bgfx::FrameBufferHandle fbh, bool clear, Clear clr, const char* name = NULL)
+		{
+			RenderPassStateWgpu* rps = s_renderWgpu->getRenderPassState(fbh, clear, clr);
+
+			RenderPassDescriptor& renderPassDescriptor = rps->m_rpd;
+			renderPassDescriptor.desc.label = name;
+
+			setFrameBuffer(renderPassDescriptor, fbh);
+
+			if(clear)
+			{
+				for(uint32_t ii = 0; ii < g_caps.limits.maxFBAttachments; ++ii)
+				{
+					wgpu::RenderPassColorAttachmentDescriptor& color = renderPassDescriptor.colorAttachments[ii];
+
+					if(0 != (BGFX_CLEAR_COLOR & clr.m_flags))
+					{
+						if(0 != (BGFX_CLEAR_COLOR_USE_PALETTE & clr.m_flags))
+						{
+							uint8_t index = (uint8_t)bx::uint32_min(BGFX_CONFIG_MAX_COLOR_PALETTE - 1, clr.m_index[ii]);
+							const float* rgba = _render->m_colorPalette[index];
+							const float rr = rgba[0];
+							const float gg = rgba[1];
+							const float bb = rgba[2];
+							const float aa = rgba[3];
+							color.clearColor = { rr, gg, bb, aa };
+						}
+						else
+						{
+							float rr = clr.m_index[0] * 1.0f / 255.0f;
+							float gg = clr.m_index[1] * 1.0f / 255.0f;
+							float bb = clr.m_index[2] * 1.0f / 255.0f;
+							float aa = clr.m_index[3] * 1.0f / 255.0f;
+							color.clearColor = { rr, gg, bb, aa };
+						}
+
+						color.loadOp = wgpu::LoadOp::Clear;
+					}
+					else
+					{
+						color.loadOp = wgpu::LoadOp::Load;
+					}
+
+					//desc.storeOp = desc.attachment.sampleCount > 1 ? wgpu::StoreOp::MultisampleResolve : wgpu::StoreOp::Store;
+					color.storeOp = wgpu::StoreOp::Store;
+				}
+
+				wgpu::RenderPassDepthStencilAttachmentDescriptor& depthStencil = renderPassDescriptor.depthStencilAttachment;
+
+				if(depthStencil.attachment)
+				{
+					depthStencil.clearDepth = clr.m_depth;
+					depthStencil.depthLoadOp = 0 != (BGFX_CLEAR_DEPTH & clr.m_flags)
+						? wgpu::LoadOp::Clear
+						: wgpu::LoadOp::Load
+						;
+					depthStencil.depthStoreOp = m_mainFrameBuffer.m_swapChain->m_backBufferColorMsaa
+						? wgpu::StoreOp(0) //wgpu::StoreOp::DontCare
+						: wgpu::StoreOp::Store
+						;
+
+					depthStencil.clearStencil = clr.m_stencil;
+					depthStencil.stencilLoadOp = 0 != (BGFX_CLEAR_STENCIL & clr.m_flags)
+						? wgpu::LoadOp::Clear
+						: wgpu::LoadOp::Load
+						;
+					depthStencil.stencilStoreOp = m_mainFrameBuffer.m_swapChain->m_backBufferColorMsaa
+						? wgpu::StoreOp(0) //wgpu::StoreOp::DontCare
+						: wgpu::StoreOp::Store
+						;
+				}
+			}
+			else
+			{
+				for(uint32_t ii = 0; ii < g_caps.limits.maxFBAttachments; ++ii)
+				{
+					wgpu::RenderPassColorAttachmentDescriptor& color = renderPassDescriptor.colorAttachments[ii];
+					if(color.attachment)
+					{
+						color.loadOp = wgpu::LoadOp::Load;
+					}
+				}
+
+				wgpu::RenderPassDepthStencilAttachmentDescriptor& depthStencil = renderPassDescriptor.depthStencilAttachment;
+
+				if(depthStencil.attachment)
+				{
+					depthStencil.depthLoadOp = wgpu::LoadOp::Load;
+					depthStencil.depthStoreOp = wgpu::StoreOp::Store;
+
+					depthStencil.stencilLoadOp = wgpu::LoadOp::Load;
+					depthStencil.stencilStoreOp = wgpu::StoreOp::Store;
+				}
+			}
+
+			wgpu::RenderPassEncoder rce = m_cmd.m_encoder.BeginRenderPass(&renderPassDescriptor.desc);
+			m_renderEncoder = rce;
+			return rce;
+		}
+
+		void endEncoding()
+		{
+			if (m_renderEncoder)
+			{
+				m_renderEncoder.EndPass();
+				m_renderEncoder = nullptr;
+			}
+
+			if (m_computeEncoder)
+			{
+				m_computeEncoder.EndPass();
+				m_computeEncoder = nullptr;
+			}
+		}
+
+		void* m_renderDocDll;
+
+#if !BX_PLATFORM_EMSCRIPTEN
+		dawn_native::Instance m_instance;
+#endif
+		wgpu::Device       m_device;
+		wgpu::Queue        m_queue;
+		TimerQueryWgpu     m_gpuTimer;
+		CommandQueueWgpu   m_cmd;
+
+		ScratchBufferWgpu   m_scratchBuffers[WEBGPU_MAX_FRAMES_IN_FLIGHT];
+
+		BindStateCacheWgpu  m_bindStateCache[WEBGPU_MAX_FRAMES_IN_FLIGHT];
+
+		uint8_t m_frameIndex;
+
+		uint16_t          m_numWindows;
+		FrameBufferHandle m_windows[BGFX_CONFIG_MAX_FRAME_BUFFERS];
+
+		IndexBufferWgpu  m_indexBuffers[BGFX_CONFIG_MAX_INDEX_BUFFERS];
+		VertexBufferWgpu m_vertexBuffers[BGFX_CONFIG_MAX_VERTEX_BUFFERS];
+		ShaderWgpu       m_shaders[BGFX_CONFIG_MAX_SHADERS];
+		ProgramWgpu      m_program[BGFX_CONFIG_MAX_PROGRAMS];
+		TextureWgpu      m_textures[BGFX_CONFIG_MAX_TEXTURES];
+		ReadbackWgpu     m_readbacks[BGFX_CONFIG_MAX_TEXTURES];
+		FrameBufferWgpu  m_mainFrameBuffer;
+		FrameBufferWgpu  m_frameBuffers[BGFX_CONFIG_MAX_FRAME_BUFFERS];
+		VertexLayout     m_vertexDecls[BGFX_CONFIG_MAX_VERTEX_LAYOUTS];
+		UniformRegistry  m_uniformReg;
+		void*            m_uniforms[BGFX_CONFIG_MAX_UNIFORMS];
+
+		//StateCacheT<BindStateWgpu*>   m_bindStateCache;
+		StateCacheT<RenderPassStateWgpu*> m_renderPassStateCache;
+		StateCacheT<PipelineStateWgpu*> m_pipelineStateCache;
+		StateCacheT<SamplerStateWgpu*>  m_samplerStateCache;
+
+		TextVideoMem m_textVideoMem;
+
+		uint8_t m_fsScratch[64 << 10];
+		uint8_t m_vsScratch[64 << 10];
+
+		FrameBufferHandle m_fbh;
+		bool m_rtMsaa;
+
+		Resolution m_resolution;
+		void* m_capture;
+		uint32_t m_captureSize;
+
+		wgpu::RenderPassEncoder     m_renderEncoder;
+		wgpu::ComputePassEncoder    m_computeEncoder;
+	};
+
+	RendererContextI* rendererCreate(const Init& _init)
+	{
+		s_renderWgpu = BX_NEW(g_allocator, RendererContextWgpu);
+		if (!s_renderWgpu->init(_init) )
+		{
+			BX_DELETE(g_allocator, s_renderWgpu);
+			s_renderWgpu = NULL;
+		}
+		return s_renderWgpu;
+	}
+
+	void rendererDestroy()
+	{
+		s_renderWgpu->shutdown();
+		BX_DELETE(g_allocator, s_renderWgpu);
+		s_renderWgpu = NULL;
+	}
+
+	void writeString(bx::WriterI* _writer, const char* _str)
+	{
+		bx::write(_writer, _str, (int32_t)bx::strLen(_str) );
+	}
+
+	void ShaderWgpu::create(ShaderHandle _handle, const Memory* _mem)
+	{
+		m_handle = _handle;
+
+		BX_TRACE("Creating shader %s", getName(_handle));
+
+		bx::MemoryReader reader(_mem->data, _mem->size);
+
+		uint32_t magic;
+		bx::read(&reader, magic);
+
+		wgpu::ShaderStage shaderStage;
+
+		if (isShaderType(magic, 'C'))
+		{
+			shaderStage = wgpu::ShaderStage::Compute;
+		}
+		else if (isShaderType(magic, 'F'))
+		{
+			shaderStage = wgpu::ShaderStage::Fragment;
+		}
+		else if (isShaderType(magic, 'G'))
+		{
+			//shaderStage = wgpu::ShaderStage::Geometry;
+		}
+		else if (isShaderType(magic, 'V'))
+		{
+			shaderStage = wgpu::ShaderStage::Vertex;
+		}
+
+		m_stage = shaderStage;
+
+		uint32_t hashIn;
+		bx::read(&reader, hashIn);
+
+		uint32_t hashOut;
+
+		if (isShaderVerLess(magic, 6) )
+		{
+			hashOut = hashIn;
+		}
+		else
+		{
+			bx::read(&reader, hashOut);
+		}
+
+		uint16_t count;
+		bx::read(&reader, count);
+
+		m_numPredefined = 0;
+		m_numUniforms = count;
+
+		BX_TRACE("%s Shader consts %d"
+			, getShaderTypeName(magic)
+			, count
+			);
+
+		const bool fragment = isShaderType(magic, 'F');
+		uint8_t fragmentBit = fragment ? BGFX_UNIFORM_FRAGMENTBIT : 0;
+
+		BX_CHECK(!isShaderVerLess(magic, 7), "WebGPU backend supports only shader binary version >= 7");
+
+		if (0 < count)
+		{
+			for (uint32_t ii = 0; ii < count; ++ii)
+			{
+				uint8_t nameSize = 0;
+				bx::read(&reader, nameSize);
+
+				char name[256];
+				bx::read(&reader, &name, nameSize);
+				name[nameSize] = '\0';
+
+				uint8_t type = 0;
+				bx::read(&reader, type);
+
+				uint8_t num;
+				bx::read(&reader, num);
+
+				uint16_t regIndex;
+				bx::read(&reader, regIndex);
+
+				uint16_t regCount;
+				bx::read(&reader, regCount);
+
+				uint8_t texComponent;
+				bx::read(&reader, texComponent);
+
+				uint8_t texDimension;
+				bx::read(&reader, texDimension);
+
+				const char* kind = "invalid";
+
+				PredefinedUniform::Enum predefined = nameToPredefinedUniformEnum(name);
+				if (PredefinedUniform::Count != predefined)
+				{
+					kind = "predefined";
+					m_predefined[m_numPredefined].m_loc   = regIndex;
+					m_predefined[m_numPredefined].m_count = regCount;
+					m_predefined[m_numPredefined].m_type  = uint8_t(predefined|fragmentBit);
+					m_numPredefined++;
+				}
+				else if (UniformType::End == (~BGFX_UNIFORM_MASK & type))
+				{
+					// regCount is used for descriptor type
+					const bool buffer = regCount == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
+					const bool readonly = (type & BGFX_UNIFORM_READONLYBIT) != 0;
+
+					const uint8_t stage = regIndex - (buffer ? 16 : 32) - (fragment ? 48 : 0);
+
+					m_bindInfo[stage].m_index = m_numBuffers;
+					m_bindInfo[stage].m_binding = regIndex;
+					m_bindInfo[stage].m_uniform = { 0 };
+
+					m_buffers[m_numBuffers] = wgpu::BindGroupLayoutBinding();
+					m_buffers[m_numBuffers].binding = regIndex;
+					m_buffers[m_numBuffers].visibility = shaderStage;
+
+					if (buffer)
+					{
+						m_buffers[m_numBuffers].type = readonly
+							? wgpu::BindingType::ReadonlyStorageBuffer
+							: wgpu::BindingType::StorageBuffer;
+					}
+					else
+					{
+						m_buffers[m_numBuffers].type = readonly
+							? wgpu::BindingType::ReadonlyStorageTexture
+							: wgpu::BindingType::WriteonlyStorageTexture;
+					}
+
+					m_numBuffers++;
+
+					kind = "storage";
+				}
+				else if (UniformType::Sampler == (~BGFX_UNIFORM_MASK & type))
+				{
+					const UniformRegInfo* info = s_renderWgpu->m_uniformReg.find(name);
+					BX_CHECK(NULL != info, "User defined uniform '%s' is not found, it won't be set.", name);
+
+					const uint8_t stage = regIndex - 16 - (fragment ? 48 : 0);
+
+					m_bindInfo[stage].m_index = m_numSamplers;
+					m_bindInfo[stage].m_binding = regIndex;
+					m_bindInfo[stage].m_uniform = info->m_handle;
+
+					m_textures[m_numSamplers] = wgpu::BindGroupLayoutBinding();
+					m_textures[m_numSamplers].binding = regIndex;
+					m_textures[m_numSamplers].visibility = shaderStage;
+					m_textures[m_numSamplers].type = wgpu::BindingType::SampledTexture;
+					m_textures[m_numSamplers].viewDimension = wgpu::TextureViewDimension(texDimension);
+					m_textures[m_numSamplers].textureComponentType = wgpu::TextureComponentType(texComponent);
+
+					const bool comparisonSampler = (type & BGFX_UNIFORM_COMPAREBIT) != 0;
+
+					m_samplers[m_numSamplers] = wgpu::BindGroupLayoutBinding();
+					m_samplers[m_numSamplers].binding = regIndex + 16;
+					m_samplers[m_numSamplers].visibility = shaderStage;
+					m_samplers[m_numSamplers].type = comparisonSampler
+						? wgpu::BindingType::ComparisonSampler
+						: wgpu::BindingType::Sampler;
+
+					m_numSamplers++;
+
+					kind = "sampler";
+				}
+				else
+				{
+					const UniformRegInfo* info = s_renderWgpu->m_uniformReg.find(name);
+					BX_CHECK(NULL != info, "User defined uniform '%s' is not found, it won't be set.", name);
+
+					if(NULL == m_constantBuffer)
+					{
+						m_constantBuffer = UniformBuffer::create(1024);
+					}
+
+					kind = "user";
+					m_constantBuffer->writeUniformHandle((UniformType::Enum)(type | fragmentBit), regIndex, info->m_handle, regCount);
+				}
+
+				BX_TRACE("\t%s: %s (%s), r.index %3d, r.count %2d"
+					, kind
+					, name
+					, getUniformTypeName(UniformType::Enum(type&~BGFX_UNIFORM_MASK) )
+					, regIndex
+					, regCount
+					);
+				BX_UNUSED(kind);
+			}
+
+			if (NULL != m_constantBuffer)
+			{
+				m_constantBuffer->finish();
+			}
+		}
+
+		uint32_t shaderSize;
+		bx::read(&reader, shaderSize);
+
+		BX_TRACE("Shader body is at %lld size %u remaining %lld", reader.getPos(), shaderSize, reader.remaining());
+
+		const uint32_t* code = (const uint32_t*)reader.getDataPtr();
+		bx::skip(&reader, shaderSize+1);
+
+		m_code = (uint32_t*)BX_ALLOC(g_allocator, shaderSize);
+		m_codeSize = shaderSize;
+
+		bx::memCopy(m_code, code, shaderSize);
+		// TODO (hugoam) delete this
+
+		BX_TRACE("First word %08" PRIx32, code[0]);
+
+		uint8_t numAttrs = 0;
+		bx::read(&reader, numAttrs);
+
+		m_numAttrs = numAttrs;
+
+		bx::memSet(m_attrMask, 0, sizeof(m_attrMask));
+		bx::memSet(m_attrRemap, UINT8_MAX, sizeof(m_attrRemap));
+
+		for(uint8_t ii = 0; ii < numAttrs; ++ii)
+		{
+			uint16_t id;
+			bx::read(&reader, id);
+
+			auto toString = [](Attrib::Enum attr)
+			{
+				if (attr == Attrib::Position) return "Position";
+				else if (attr == Attrib::Normal) return "Normal";
+				else if (attr == Attrib::Tangent) return "Tangent";
+				else if (attr == Attrib::Bitangent) return "Bitangent";
+				else if (attr == Attrib::Color0) return "Color0";
+				else if (attr == Attrib::Color1) return "Color1";
+				else if (attr == Attrib::Color2) return "Color2";
+				else if (attr == Attrib::Color3) return "Color3";
+				else if (attr == Attrib::Indices) return "Indices";
+				else if (attr == Attrib::Weight) return "Weight";
+				else if (attr == Attrib::TexCoord0) return "TexCoord0";
+				else if (attr == Attrib::TexCoord1) return "TexCoord1";
+				else if (attr == Attrib::TexCoord2) return "TexCoord2";
+				else if (attr == Attrib::TexCoord3) return "TexCoord3";
+				else if (attr == Attrib::TexCoord4) return "TexCoord4";
+				else if (attr == Attrib::TexCoord5) return "TexCoord5";
+				else if (attr == Attrib::TexCoord6) return "TexCoord6";
+				else if (attr == Attrib::TexCoord7) return "TexCoord7";
+				return "Invalid";
+			};
+
+			Attrib::Enum attr = idToAttrib(id);
+
+			if(Attrib::Count != attr)
+			{
+				m_attrMask[attr] = UINT16_MAX;
+				m_attrRemap[attr] = ii;
+				BX_TRACE("\tattrib: %s (%i) at index %i", toString(attr), attr, ii);
+			}
+		}
+
+		wgpu::ShaderModuleDescriptor desc;
+		desc.label = getName(_handle);
+		desc.code = m_code;
+		desc.codeSize = shaderSize/4;
+
+		m_module = s_renderWgpu->m_device.CreateShaderModule(&desc);
+		
+		BGFX_FATAL(m_module
+			, bgfx::Fatal::InvalidShader
+			, "Failed to create %s shader."
+			, getShaderTypeName(magic)
+			);
+
+		bx::HashMurmur2A murmur;
+		murmur.begin();
+		murmur.add(hashIn);
+		murmur.add(hashOut);
+		murmur.add(code, shaderSize);
+		murmur.add(numAttrs);
+		murmur.add(m_attrMask, numAttrs);
+		m_hash = murmur.end();
+
+		auto roundUp = [](auto value, auto multiple)
+		{
+			return ((value + multiple - 1) / multiple) * multiple;
+		};
+
+		bx::read(&reader, m_size);
+
+		const uint32_t align = kMinUniformBufferOffsetAlignment;
+		m_gpuSize = bx::strideAlign(m_size, align);
+
+		BX_TRACE("shader size %d (used=%d) (prev=%d)", (int)m_size, (int)m_gpuSize, (int)bx::strideAlign(roundUp(m_size, 4), align));
+	}
+
+	void ProgramWgpu::create(const ShaderWgpu* _vsh, const ShaderWgpu* _fsh)
+	{
+		BX_CHECK(_vsh->m_module, "Vertex shader doesn't exist.");
+		m_vsh = _vsh;
+		m_fsh = _fsh;
+		m_gpuSize = _vsh->m_gpuSize + (_fsh ? _fsh->m_gpuSize : 0);
+
+		//BX_CHECK(NULL != _vsh->m_code, "Vertex shader doesn't exist.");
+		m_vsh = _vsh;
+		bx::memCopy(&m_predefined[0], _vsh->m_predefined, _vsh->m_numPredefined * sizeof(PredefinedUniform));
+		m_numPredefined = _vsh->m_numPredefined;
+
+		if(NULL != _fsh)
+		{
+			//BX_CHECK(NULL != _fsh->m_code, "Fragment shader doesn't exist.");
+			m_fsh = _fsh;
+			bx::memCopy(&m_predefined[m_numPredefined], _fsh->m_predefined, _fsh->m_numPredefined * sizeof(PredefinedUniform));
+			m_numPredefined += _fsh->m_numPredefined;
+		}
+
+		wgpu::BindGroupLayoutEntry bindings[2 + BGFX_CONFIG_MAX_TEXTURE_SAMPLERS * 3];
+
+		m_numUniforms = 0 + (_vsh->m_size > 0 ? 1 : 0) + (NULL != _fsh && _fsh->m_size > 0 ? 1 : 0);
+
+		uint8_t numBindings = 0;
+
+		// bind uniform buffer at slot 0
+		bindings[numBindings].binding = 0;
+		bindings[numBindings].visibility = _vsh->m_stage;
+		bindings[numBindings].type = wgpu::BindingType::UniformBuffer;
+		bindings[numBindings].hasDynamicOffset = true;
+		numBindings++;
+
+		if (m_numUniforms > 1)
+		{
+			bindings[numBindings].binding = 48;
+			bindings[numBindings].visibility = wgpu::ShaderStage::Fragment;
+			bindings[numBindings].type = wgpu::BindingType::UniformBuffer;
+			bindings[numBindings].hasDynamicOffset = true;
+			numBindings++;
+		}
+
+		uint8_t numSamplers = 0;
+
+		for (uint32_t ii = 0; ii < _vsh->m_numSamplers; ++ii)
+		{
+			m_textures[ii] = _vsh->m_textures[ii];
+			m_samplers[ii] = _vsh->m_samplers[ii];
+			bindings[numBindings++] = _vsh->m_textures[ii];
+			bindings[numBindings++] = _vsh->m_samplers[ii];
+		}
+
+		numSamplers += _vsh->m_numSamplers;
+
+		if (NULL != _fsh)
+		{
+			for (uint32_t ii = 0; ii < _fsh->m_numSamplers; ++ii)
+			{
+				m_textures[numSamplers + ii] = _fsh->m_textures[ii];
+				m_samplers[numSamplers + ii] = _fsh->m_samplers[ii];
+				bindings[numBindings++] = _fsh->m_textures[ii];
+				bindings[numBindings++] = _fsh->m_samplers[ii];
+			}
+
+			numSamplers += _fsh->m_numSamplers;
+		}
+
+		for (uint8_t stage = 0; stage < BGFX_CONFIG_MAX_TEXTURE_SAMPLERS; ++stage)
+		{
+			if (isValid(m_vsh->m_bindInfo[stage].m_uniform))
+			{
+				m_bindInfo[stage] = m_vsh->m_bindInfo[stage];
+			}
+			else if (NULL != m_fsh && isValid(m_fsh->m_bindInfo[stage].m_uniform))
+			{
+				m_bindInfo[stage] = m_fsh->m_bindInfo[stage];
+				m_bindInfo[stage].m_index += _vsh->m_numSamplers;
+			}
+		}
+
+		m_numSamplers = numSamplers;
+
+		for (uint32_t ii = 0; ii < _vsh->m_numBuffers; ++ii)
+		{
+			m_buffers[ii] = _vsh->m_buffers[ii];
+			bindings[numBindings++] = _vsh->m_buffers[ii];
+		}
+
+		m_numBuffers = _vsh->m_numBuffers;
+
+		BX_CHECK(m_numUniforms + m_numSamplers * 2 + m_numBuffers == numBindings, "");
+
+		wgpu::BindGroupLayoutDescriptor bindGroupDesc;
+		bindGroupDesc.entryCount = numBindings;
+		bindGroupDesc.entries = bindings;
+		m_bindGroupLayout = s_renderWgpu->m_device.CreateBindGroupLayout(&bindGroupDesc);
+
+		bx::HashMurmur2A murmur;
+		murmur.begin();
+		murmur.add(m_numUniforms);
+		murmur.add(m_textures, sizeof(wgpu::BindGroupLayoutEntry) * numSamplers);
+		murmur.add(m_samplers, sizeof(wgpu::BindGroupLayoutEntry) * numSamplers);
+		murmur.add(m_buffers,  sizeof(wgpu::BindGroupLayoutEntry) * m_numBuffers);
+		m_bindGroupLayoutHash = murmur.end();
+	}
+
+	void ProgramWgpu::destroy()
+	{
+		m_vsh = NULL;
+		m_fsh = NULL;
+		if ( NULL != m_computePS )
+		{
+			BX_DELETE(g_allocator, m_computePS);
+			m_computePS = NULL;
+		}
+	}
+
+	void BufferWgpu::create(uint32_t _size, void* _data, uint16_t _flags, uint16_t _stride, bool _vertex)
+	{
+		BX_UNUSED(_stride);
+
+		m_size = _size;
+		m_flags = _flags;
+		m_vertex = _vertex;
+
+		const uint32_t paddedSize = bx::strideAlign(_size, 4);
+
+		bool storage = m_flags & BGFX_BUFFER_COMPUTE_READ_WRITE;
+		bool indirect = m_flags & BGFX_BUFFER_DRAW_INDIRECT;
+
+		wgpu::BufferDescriptor desc;
+		desc.size = paddedSize;
+		desc.usage = _vertex ? wgpu::BufferUsage::Vertex : wgpu::BufferUsage::Index;
+		desc.usage |= (storage || indirect) ? wgpu::BufferUsage::Storage : wgpu::BufferUsage(0);
+		desc.usage |= indirect ? wgpu::BufferUsage::Indirect : wgpu::BufferUsage(0);
+		desc.usage |= wgpu::BufferUsage::CopyDst;
+
+		m_ptr = s_renderWgpu->m_device.CreateBuffer(&desc);
+
+		if(NULL != _data)
+		{
+			if(_size % 4 != 0)
+			{
+				uint8_t* temp = (uint8_t*)BX_ALLOC(g_allocator, paddedSize);
+				bx::memCopy(temp, _data, _size);
+				m_ptr.SetSubData(0, paddedSize, temp);
+				BX_FREE(g_allocator, temp);
+			}
+			else
+			{
+				m_ptr.SetSubData(0, _size, (const uint8_t*)_data);
+			}
+		}
+	}
+
+	void BufferWgpu::update(uint32_t _offset, uint32_t _size, void* _data, bool _discard)
+	{
+		wgpu::CommandEncoder& bce = s_renderWgpu->getBlitCommandEncoder();
+
+		if (!m_vertex && !_discard)
+		{
+			if ( m_dynamic == NULL )
+			{
+				m_dynamic = (uint8_t*)BX_ALLOC(g_allocator, m_size);
+			}
+
+			bx::memCopy(m_dynamic + _offset, _data, _size);
+			uint32_t start = _offset & 4;
+			uint32_t end = bx::strideAlign(_offset + _size, 4);
+
+			wgpu::BufferDescriptor desc;
+			desc.size = end - start;
+			desc.usage = wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::CopySrc;
+
+			wgpu::Buffer staging = s_renderWgpu->m_device.CreateBuffer(&desc); // m_dynamic, , 0);
+			staging.SetSubData(0, _size, reinterpret_cast<const uint8_t*>(_data));
+
+			// TODO pad to 4 bytes
+			bce.CopyBufferToBuffer(staging, 0, m_ptr, start, end - start);
+			s_renderWgpu->m_cmd.release(staging);
+		}
+		else
+		{
+			wgpu::BufferDescriptor desc;
+			desc.size = _size;
+			desc.usage = wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::CopySrc;
+
+			wgpu::Buffer staging = s_renderWgpu->m_device.CreateBuffer(&desc);
+			staging.SetSubData(0, _size, reinterpret_cast<const uint8_t*>(_data));
+
+			bce.CopyBufferToBuffer(staging, 0, m_ptr, _offset, _size);
+			s_renderWgpu->m_cmd.release(staging);
+		}
+	}
+
+	void VertexBufferWgpu::create(uint32_t _size, void* _data, VertexLayoutHandle _layoutHandle, uint16_t _flags)
+	{
+		m_layoutHandle = _layoutHandle;
+		uint16_t stride = isValid(_layoutHandle)
+			? s_renderWgpu->m_vertexDecls[_layoutHandle.idx].m_stride
+			: 0
+			;
+
+		BufferWgpu::create(_size, _data, _flags, stride, true);
+	}
+
+	void TextureWgpu::create(TextureHandle _handle, const Memory* _mem, uint64_t _flags, uint8_t _skip)
+	{
+		m_handle = _handle;
+
+		m_sampler = s_renderWgpu->getSamplerState(uint32_t(_flags) );
+
+		bimg::ImageContainer imageContainer;
+
+		if (bimg::imageParse(imageContainer, _mem->data, _mem->size) )
+		{
+			const bimg::ImageBlockInfo& blockInfo = getBlockInfo(bimg::TextureFormat::Enum(imageContainer.m_format) );
+			const uint8_t startLod = bx::min<uint8_t>(_skip, imageContainer.m_numMips-1);
+
+			bimg::TextureInfo ti;
+			bimg::imageGetSize(
+				  &ti
+				, uint16_t(imageContainer.m_width >>startLod)
+				, uint16_t(imageContainer.m_height>>startLod)
+				, uint16_t(imageContainer.m_depth >>startLod)
+				, imageContainer.m_cubeMap
+				, 1 < imageContainer.m_numMips
+				, imageContainer.m_numLayers
+				, imageContainer.m_format
+				);
+			ti.numMips = bx::min<uint8_t>(imageContainer.m_numMips-startLod, ti.numMips);
+
+			m_flags     = _flags;
+			m_width     = ti.width;
+			m_height    = ti.height;
+			m_depth     = ti.depth;
+			m_numLayers = ti.numLayers;
+			m_numMips   = ti.numMips;
+			m_numSides  = ti.numLayers * (imageContainer.m_cubeMap ? 6 : 1);
+			m_requestedFormat  = TextureFormat::Enum(imageContainer.m_format);
+			m_textureFormat    = getViableTextureFormat(imageContainer);
+			
+			if (m_requestedFormat == bgfx::TextureFormat::D16)
+				m_textureFormat = bgfx::TextureFormat::D32F;
+
+			const bool compressed = bimg::isCompressed(bimg::TextureFormat::Enum(imageContainer.m_format));
+
+			if (compressed)
+				m_textureFormat = bgfx::TextureFormat::BGRA8;
+
+			const bool convert = m_textureFormat != m_requestedFormat;
+			const uint8_t bpp  = bimg::getBitsPerPixel(bimg::TextureFormat::Enum(m_textureFormat) );
+
+			wgpu::TextureDescriptor desc = defaultDescriptor<wgpu::TextureDescriptor>();
+			//desc.label = getName(_handle);
+
+			if (1 < ti.numLayers)
+			{
+				if (imageContainer.m_cubeMap)
+				{
+					m_type = TextureCube;
+					desc.dimension = wgpu::TextureDimension::e2D;
+				}
+				else
+				{
+					m_type = Texture2D;
+					desc.dimension = wgpu::TextureDimension::e2D;
+				}
+			}
+			else if (imageContainer.m_cubeMap)
+			{
+				m_type = TextureCube;
+				desc.dimension = wgpu::TextureDimension::e2D;
+			}
+			else if (1 < imageContainer.m_depth)
+			{
+				m_type = Texture3D;
+				desc.dimension = wgpu::TextureDimension::e3D;
+			}
+			else
+			{
+				m_type = Texture2D;
+				desc.dimension = wgpu::TextureDimension::e2D;
+			}
+
+			const uint16_t numSides = ti.numLayers * (imageContainer.m_cubeMap ? 6 : 1);
+			const uint32_t numSrd = numSides * ti.numMips;
+
+			const bool writeOnly    = 0 != (_flags&BGFX_TEXTURE_RT_WRITE_ONLY);
+			const bool computeWrite = 0 != (_flags&BGFX_TEXTURE_COMPUTE_WRITE);
+			const bool renderTarget = 0 != (_flags&BGFX_TEXTURE_RT_MASK);
+			const bool srgb         = 0 != (_flags&BGFX_TEXTURE_SRGB);
+
+			BX_TRACE("Texture %3d: %s (requested: %s), layers %d, %dx%d%s RT[%c], WO[%c], CW[%c], sRGB[%c]"
+				, this - s_renderWgpu->m_textures
+				, getName( (TextureFormat::Enum)m_textureFormat)
+				, getName( (TextureFormat::Enum)m_requestedFormat)
+				, ti.numLayers
+				, ti.width
+				, ti.height
+				, imageContainer.m_cubeMap ? "x6" : ""
+				, renderTarget ? 'x' : ' '
+				, writeOnly    ? 'x' : ' '
+				, computeWrite ? 'x' : ' '
+				, srgb         ? 'x' : ' '
+				);
+
+			const uint32_t msaaQuality = bx::uint32_satsub( (_flags&BGFX_TEXTURE_RT_MSAA_MASK)>>BGFX_TEXTURE_RT_MSAA_SHIFT, 1);
+			const int32_t  sampleCount = s_msaa[msaaQuality];
+
+			
+			wgpu::TextureFormat format = wgpu::TextureFormat::Undefined;
+			if (srgb)
+			{
+				format = s_textureFormat[m_textureFormat].m_fmtSrgb;
+				BX_WARN(format != wgpu::TextureFormat::Undefined
+					, "sRGB not supported for texture format %d"
+					, m_textureFormat
+					);
+			}
+
+			if (format == wgpu::TextureFormat::Undefined)
+			{
+				// not swizzled and not sRGB, or sRGB unsupported
+				format = s_textureFormat[m_textureFormat].m_fmt;
+			}
+
+			desc.format = format;
+			desc.size.width  = m_width;
+			desc.size.height = m_height;
+			desc.size.depth  = bx::uint32_max(1,imageContainer.m_depth);
+			desc.mipLevelCount    = m_numMips;
+			desc.sampleCount      = 1;
+			desc.arrayLayerCount  = m_numSides;
+
+			desc.usage = wgpu::TextureUsage::Sampled;
+			desc.usage |= wgpu::TextureUsage::CopyDst;
+			desc.usage |= wgpu::TextureUsage::CopySrc;
+
+			if (computeWrite)
+			{
+				desc.usage |= wgpu::TextureUsage::Storage;
+			}
+
+			if (renderTarget)
+			{
+				desc.usage |= wgpu::TextureUsage::OutputAttachment;
+			}
+
+			m_ptr = s_renderWgpu->m_device.CreateTexture(&desc);
+
+			if (sampleCount > 1)
+			{
+				desc.sampleCount = sampleCount;
+
+				m_ptrMsaa = s_renderWgpu->m_device.CreateTexture(&desc);
+			}
+
+			// decode images
+			struct ImageInfo
+			{
+				uint8_t* data;
+				uint32_t width;
+				uint32_t height;
+				uint32_t depth;
+				uint32_t pitch;
+				uint32_t slice;
+				uint32_t size;
+				uint8_t mipLevel;
+				uint8_t layer;
+			};
+
+			ImageInfo* imageInfos = (ImageInfo*)BX_ALLOC(g_allocator, sizeof(ImageInfo) * numSrd);
+			bx::memSet(imageInfos, 0, sizeof(ImageInfo) * numSrd);
+			uint32_t alignment = 1; // tightly aligned buffer
+
+			uint32_t kk = 0;
+
+			for (uint8_t side = 0; side < numSides; ++side)
+			{
+				for (uint8_t lod = 0; lod < ti.numMips; ++lod)
+				{
+					bimg::ImageMip mip;
+					if (bimg::imageGetRawData(imageContainer, side, lod + startLod, _mem->data, _mem->size, mip))
+					{
+						if (convert)
+						{
+							const uint32_t pitch = bx::strideAlign(bx::max<uint32_t>(mip.m_width, 4) * bpp / 8, alignment);
+							const uint32_t slice = bx::strideAlign(bx::max<uint32_t>(mip.m_height, 4) * pitch, alignment);
+							const uint32_t size = slice * mip.m_depth;
+
+							uint8_t* temp = (uint8_t*)BX_ALLOC(g_allocator, size);
+							bimg::imageDecodeToBgra8(
+								  g_allocator
+								, temp
+								, mip.m_data
+								, mip.m_width
+								, mip.m_height
+								, pitch
+								, mip.m_format
+								);
+
+							imageInfos[kk].data = temp;
+							imageInfos[kk].width = mip.m_width;
+							imageInfos[kk].height = mip.m_height;
+							imageInfos[kk].depth = mip.m_depth;
+							imageInfos[kk].pitch = pitch;
+							imageInfos[kk].slice = slice;
+							imageInfos[kk].size = size;
+							imageInfos[kk].mipLevel = lod;
+							imageInfos[kk].layer = side;
+						}
+						else if (compressed)
+						{
+							const uint32_t pitch = bx::strideAlign((mip.m_width / blockInfo.blockWidth) * mip.m_blockSize, alignment);
+							const uint32_t slice = bx::strideAlign((mip.m_height / blockInfo.blockHeight) * pitch, alignment);
+							const uint32_t size = slice * mip.m_depth;
+
+							uint8_t* temp = (uint8_t*)BX_ALLOC(g_allocator, size);
+							bimg::imageCopy(
+								  temp
+								, mip.m_height / blockInfo.blockHeight
+								, (mip.m_width / blockInfo.blockWidth) * mip.m_blockSize
+								, mip.m_depth
+								, mip.m_data
+								, pitch
+								);
+
+							imageInfos[kk].data = temp;
+							imageInfos[kk].width = mip.m_width;
+							imageInfos[kk].height = mip.m_height;
+							imageInfos[kk].depth = mip.m_depth;
+							imageInfos[kk].pitch = pitch;
+							imageInfos[kk].slice = slice;
+							imageInfos[kk].size = size;
+							imageInfos[kk].mipLevel = lod;
+							imageInfos[kk].layer = side;
+						}
+						else
+						{
+							const uint32_t pitch = bx::strideAlign(mip.m_width * mip.m_bpp / 8, alignment);
+							const uint32_t slice = bx::strideAlign(mip.m_height * pitch, alignment);
+							const uint32_t size = slice * mip.m_depth;
+
+							uint8_t* temp = (uint8_t*)BX_ALLOC(g_allocator, size);
+							bimg::imageCopy(temp
+								, mip.m_height
+								, mip.m_width * mip.m_bpp / 8
+								, mip.m_depth
+								, mip.m_data
+								, pitch
+							);
+
+							imageInfos[kk].data = temp;
+							imageInfos[kk].width = mip.m_width;
+							imageInfos[kk].height = mip.m_height;
+							imageInfos[kk].depth = mip.m_depth;
+							imageInfos[kk].pitch = pitch;
+							imageInfos[kk].slice = slice;
+							imageInfos[kk].size = size;
+							imageInfos[kk].mipLevel = lod;
+							imageInfos[kk].layer = side;
+						}
+					}
+					++kk;
+				}
+			}
+
+			uint32_t totalMemSize = 0;
+			for (uint32_t ii = 0; ii < numSrd; ++ii)
+			{
+				const uint32_t dstpitch = bx::strideAlign(imageInfos[ii].pitch, 256);
+				totalMemSize += dstpitch * imageInfos[ii].height;
+				//totalMemSize += imageInfos[ii].size;
+			}
+
+			wgpu::Buffer stagingBuffer;
+			if (totalMemSize > 0)
+			{
+				wgpu::BufferDescriptor staginBufferDesc;
+				staginBufferDesc.size = totalMemSize;
+				staginBufferDesc.usage = wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::CopySrc;
+
+				stagingBuffer = s_renderWgpu->m_device.CreateBuffer(&staginBufferDesc);
+
+				uint64_t offset = 0;
+
+				for (uint32_t ii = 0; ii < numSrd; ++ii)
+				{
+					const uint32_t dstpitch = bx::strideAlign(imageInfos[ii].pitch, 256);
+
+					const uint8_t* src = (uint8_t*)imageInfos[ii].data;
+					//uint64_t offset = 0;
+
+					for (uint32_t yy = 0; yy < imageInfos[ii].height; ++yy, src += imageInfos[ii].pitch, offset += dstpitch)
+					{
+						stagingBuffer.SetSubData(offset, imageInfos[ii].pitch, src);
+					}
+
+					//stagingBuffer.SetSubData(offset, imageInfos[ii].size, imageInfos[ii].data);
+					//offset += imageInfos[ii].size;
+				}
+			}
+			
+			wgpu::BufferCopyView* bufferCopyView = (wgpu::BufferCopyView*)BX_ALLOC(g_allocator, sizeof(wgpu::BufferCopyView) * numSrd);
+			wgpu::TextureCopyView* textureCopyView = (wgpu::TextureCopyView*)BX_ALLOC(g_allocator, sizeof(wgpu::TextureCopyView) * numSrd);
+			wgpu::Extent3D* textureCopySize = (wgpu::Extent3D*)BX_ALLOC(g_allocator, sizeof(wgpu::Extent3D) * numSrd);
+
+			uint64_t offset = 0;
+
+			for (uint32_t ii = 0; ii < numSrd; ++ii)
+			{
+				const uint32_t dstpitch = bx::strideAlign(imageInfos[ii].pitch, 256);
+
+				uint32_t idealWidth  = bx::max<uint32_t>(1, m_width  >> imageInfos[ii].mipLevel);
+				uint32_t idealHeight = bx::max<uint32_t>(1, m_height >> imageInfos[ii].mipLevel);
+				new (&bufferCopyView[ii]) wgpu::BufferCopyView();
+				new (&textureCopyView[ii]) wgpu::TextureCopyView();
+				new (&textureCopySize[ii]) wgpu::Extent3D();
+			    bufferCopyView[ii].buffer      = stagingBuffer;
+				bufferCopyView[ii].offset      = offset;
+				bufferCopyView[ii].bytesPerRow = dstpitch; // assume that image data are tightly aligned
+				bufferCopyView[ii].rowsPerImage = 0; // assume that image data are tightly aligned
+				textureCopyView[ii].texture        = m_ptr;
+			  //textureCopyView[ii].imageSubresource.aspectMask     = m_vkTextureAspect;
+				textureCopyView[ii].mipLevel       = imageInfos[ii].mipLevel;
+				textureCopyView[ii].arrayLayer     = imageInfos[ii].layer;
+			  //textureCopyView[ii].layerCount     = 1;
+				textureCopyView[ii].origin = { 0, 0, 0 };
+				textureCopySize[ii] = { idealWidth, idealHeight, imageInfos[ii].depth };
+
+				offset += dstpitch * imageInfos[ii].height;
+				//offset += imageInfos[ii].size;
+			}
+
+
+			if (stagingBuffer)
+			{
+				wgpu::CommandEncoder encoder = s_renderWgpu->getBlitCommandEncoder();
+				//wgpu::CommandEncoder encoder = s_renderWgpu->m_cmd.m_encoder;
+				for (uint32_t ii = 0; ii < numSrd; ++ii)
+				{
+					encoder.CopyBufferToTexture(&bufferCopyView[ii], &textureCopyView[ii], &textureCopySize[ii]);
+				}
+			}
+			else
+			{
+				//VkCommandBuffer commandBuffer = s_renderVK->beginNewCommand();
+				//setImageMemoryBarrier(
+				//	commandBuffer
+				//	, (m_flags & BGFX_TEXTURE_COMPUTE_WRITE
+				//		? VK_IMAGE_LAYOUT_GENERAL
+				//		: VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
+				//		)
+				//);
+				//s_renderVK->submitCommandAndWait(commandBuffer);
+			}
+
+			//vkFreeMemory(device, stagingDeviceMem, allocatorCb);
+			//vkDestroy(stagingBuffer);
+
+			BX_FREE(g_allocator, bufferCopyView);
+			BX_FREE(g_allocator, textureCopyView);
+			BX_FREE(g_allocator, textureCopySize);
+			for (uint32_t ii = 0; ii < numSrd; ++ii)
+			{
+				BX_FREE(g_allocator, imageInfos[ii].data);
+			}
+			BX_FREE(g_allocator, imageInfos);
+		}
+	}
+
+	void TextureWgpu::update(uint8_t _side, uint8_t _mip, const Rect& _rect, uint16_t _z, uint16_t _depth, uint16_t _pitch, const Memory* _mem)
+	{
+		BX_UNUSED(_side); BX_UNUSED(_mip); BX_UNUSED(_depth); BX_UNUSED(_z);
+
+		const uint32_t bpp       = bimg::getBitsPerPixel(bimg::TextureFormat::Enum(m_textureFormat) );
+		const uint32_t rectpitch = _rect.m_width*bpp/8;
+		const uint32_t srcpitch  = UINT16_MAX == _pitch ? rectpitch : _pitch;
+		const uint32_t slice     = ( (m_type == Texture3D) ? 0 : _side + _z * (m_type == TextureCube ? 6 : 1) );
+		const uint16_t zz        = (m_type == Texture3D) ? _z : 0 ;
+
+		const bool convert = m_textureFormat != m_requestedFormat;
+
+		uint8_t* data = _mem->data;
+		uint8_t* temp = NULL;
+
+		if (convert)
+		{
+			temp = (uint8_t*)BX_ALLOC(g_allocator, rectpitch*_rect.m_height);
+			bimg::imageDecodeToBgra8(
+				  g_allocator
+				, temp
+				, data
+				, _rect.m_width
+				, _rect.m_height
+				, srcpitch
+				, bimg::TextureFormat::Enum(m_requestedFormat)
+				);
+			data = temp;
+		}
+
+		//const uint32_t dstpitch = bx::strideAlign(rectpitch, 64);
+		const uint32_t dstpitch = bx::strideAlign(rectpitch, 256);
+
+		wgpu::BufferDescriptor desc;
+		desc.size = dstpitch * _rect.m_height;
+		desc.usage = wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::CopySrc;
+
+		wgpu::Buffer staging = s_renderWgpu->m_device.CreateBuffer(&desc);
+
+		const uint8_t* src = (uint8_t*)data;
+		uint64_t offset = 0;
+
+		for (uint32_t yy = 0; yy < _rect.m_height; ++yy, src += srcpitch, offset += dstpitch)
+		{
+			const uint32_t size = bx::strideAlign(rectpitch, 4);
+			staging.SetSubData(offset, size, src);
+		}
+
+		wgpu::BufferCopyView srcView;
+		srcView.buffer = staging;
+		srcView.offset = 0;
+		srcView.bytesPerRow = dstpitch;
+		srcView.rowsPerImage = 0;
+
+		wgpu::TextureCopyView destView;
+		destView.texture = m_ptr;
+		destView.mipLevel = _mip;
+		destView.arrayLayer = _side;
+		destView.origin = { _rect.m_x, _rect.m_y, zz };
+		//destView.origin = { _rect.m_x, _rect.m_y, _z };
+
+
+		wgpu::Extent3D destExtent = { _rect.m_width, _rect.m_height, _depth };
+
+		//region.imageSubresource.aspectMask = m_vkTextureAspect;
+
+		wgpu::CommandEncoder encoder = s_renderWgpu->getBlitCommandEncoder();
+		//wgpu::CommandEncoder encoder = s_renderWgpu->m_cmd.m_encoder;
+		encoder.CopyBufferToTexture(&srcView, &destView, &destExtent);
+
+		//wgpu::CommandBuffer copy = encoder.Finish();
+		//wgpu::Queue queue = s_renderWgpu->m_queue;
+		//queue.Submit(1, &copy);
+
+		//staging.Destroy();
+
+		if (NULL != temp)
+		{
+			BX_FREE(g_allocator, temp);
+		}
+	}
+
+	void BindStateWgpu::clear()
+	{
+		m_bindGroup = nullptr;
+	}
+
+	void ScratchBufferWgpu::create(uint32_t _size)
+	{
+		m_offset = 0;
+		m_size = _size;
+
+		wgpu::BufferDescriptor desc;
+		desc.size = _size; // UNIFORM_BUFFER_SIZE
+		// TODO (webgpu) use map and wgpu::BufferUsage::MapWrite ?
+		desc.usage = wgpu::BufferUsage::Uniform | wgpu::BufferUsage::CopyDst; //wgpu::BufferUsage::TransferDst;
+		m_buffer = s_renderWgpu->m_device.CreateBuffer(&desc);
+	}
+
+	void ScratchBufferWgpu::destroy()
+	{
+		reset();
+
+		m_buffer.Release();
+	}
+
+	void ScratchBufferWgpu::reset()
+	{
+		m_offset = 0;
+	}
+
+	void BindStateCacheWgpu::create() //(uint32_t _maxBindGroups)
+	{
+		//m_maxBindStates = 1024; // _maxBindStates;
+		m_currentBindState = 0;
+	}
+
+	void BindStateCacheWgpu::destroy()
+	{
+		reset();
+	}
+
+	void BindStateCacheWgpu::reset()
+	{
+		for (size_t i = 0; i < m_currentBindState; ++i)
+		{
+			m_bindStates[i] = {};
+		}
+
+		m_currentBindState = 0;
+	}
+
+	wgpu::TextureView TextureWgpu::getTextureMipLevel(int _mip)
+	{
+		if (_mip >= 0
+		&&  _mip <  m_numMips
+		&&  m_ptr)
+		{
+			if (!m_ptrMips[_mip])
+			{
+				wgpu::TextureViewDescriptor desc;
+				desc.baseMipLevel = _mip;
+				desc.mipLevelCount = 1;
+
+				desc.format = s_textureFormat[m_textureFormat].m_fmt;
+
+				if (TextureCube == m_type)
+				{
+					//desc.dimension = MTLTextureType2DArray;
+					desc.baseArrayLayer = 0;
+					desc.arrayLayerCount = m_numLayers * 6;
+				}
+				else
+				{
+					desc.baseArrayLayer = 0;
+					desc.arrayLayerCount = m_numLayers;
+				}
+
+				m_ptrMips[_mip] = m_ptr.CreateView(&desc);
+			}
+
+			return m_ptrMips[_mip];
+		}
+
+		return wgpu::TextureView();
+	}
+
+	void SwapChainWgpu::init(wgpu::Device _device, void* _nwh, uint32_t _width, uint32_t _height)
+	{
+		BX_UNUSED(_nwh);
+
+		wgpu::SwapChainDescriptor desc;
+		desc.usage = wgpu::TextureUsage::OutputAttachment;
+		desc.width = _width;
+		desc.height = _height;
+
+#if !BX_PLATFORM_EMSCRIPTEN
+		m_impl = createSwapChain(_device, _nwh);
+
+		desc.presentMode = wgpu::PresentMode::Immediate;
+		desc.format = wgpu::TextureFormat::RGBA8Unorm;
+		desc.implementation = reinterpret_cast<uint64_t>(&m_impl);
+		m_swapChain = _device.CreateSwapChain(nullptr, &desc);
+#else
+		wgpu::SurfaceDescriptorFromHTMLCanvasId canvasDesc{};
+		canvasDesc.id = "canvas";
+
+		wgpu::SurfaceDescriptor surfDesc{};
+		surfDesc.nextInChain = &canvasDesc;
+		wgpu::Surface surface = wgpu::Instance().CreateSurface(&surfDesc);
+
+		desc.presentMode = wgpu::PresentMode::Immediate;
+		desc.format = wgpu::TextureFormat::BGRA8Unorm;
+		m_swapChain = _device.CreateSwapChain(surface, &desc);
+#endif
+
+		m_colorFormat = desc.format;
+		m_depthFormat = wgpu::TextureFormat::Depth24PlusStencil8;
+	}
+
+	void SwapChainWgpu::resize(FrameBufferWgpu& _frameBuffer, uint32_t _width, uint32_t _height, uint32_t _flags)
+	{
+		BX_TRACE("SwapChainWgpu::resize");
+		
+		const int32_t sampleCount = s_msaa[(_flags&BGFX_RESET_MSAA_MASK)>>BGFX_RESET_MSAA_SHIFT];
+
+		wgpu::TextureFormat format = (_flags & BGFX_RESET_SRGB_BACKBUFFER)
+#ifdef DAWN_ENABLE_BACKEND_VULKAN
+			? wgpu::TextureFormat::BGRA8UnormSrgb
+			: wgpu::TextureFormat::BGRA8Unorm
+#else
+			? wgpu::TextureFormat::RGBA8UnormSrgb
+			: wgpu::TextureFormat::RGBA8Unorm
+#endif
+			;
+
+#if !BX_PLATFORM_EMSCRIPTEN
+		m_swapChain.Configure(format, wgpu::TextureUsage::OutputAttachment, _width, _height);
+#endif
+
+		m_colorFormat = format;
+		m_depthFormat = wgpu::TextureFormat::Depth24PlusStencil8;
+
+		bx::HashMurmur2A murmur;
+		murmur.begin();
+		murmur.add(1);
+		murmur.add((uint32_t)m_colorFormat);
+		murmur.add((uint32_t)m_depthFormat);
+		murmur.add((uint32_t)sampleCount);
+		_frameBuffer.m_pixelFormatHash = murmur.end();
+
+		wgpu::TextureDescriptor desc;
+
+		desc.dimension = wgpu::TextureDimension::e2D;
+
+		desc.size.width  = _width;
+		desc.size.height = _height;
+		desc.size.depth  = 1;
+		desc.mipLevelCount = 1;
+		desc.sampleCount = sampleCount;
+		desc.arrayLayerCount = 1;
+		desc.usage = wgpu::TextureUsage::OutputAttachment;
+
+		if (m_backBufferDepth)
+		{
+			m_backBufferDepth.Destroy();
+		}
+
+		desc.format = wgpu::TextureFormat::Depth24PlusStencil8;
+
+		m_backBufferDepth = s_renderWgpu->m_device.CreateTexture(&desc);
+
+		if (sampleCount > 1)
+		{
+			if (m_backBufferColorMsaa)
+			{
+				m_backBufferColorMsaa.Destroy();
+			}
+
+			desc.format = m_colorFormat;
+			desc.sampleCount = sampleCount;
+
+			m_backBufferColorMsaa = s_renderWgpu->m_device.CreateTexture(&desc);
+		}
+	}
+
+	void SwapChainWgpu::flip()
+	{
+		m_drawable = m_swapChain.GetCurrentTextureView();
+	}
+
+	wgpu::TextureView SwapChainWgpu::current()
+	{
+		if (!m_drawable)
+			m_drawable = m_swapChain.GetCurrentTextureView();
+		return m_drawable;
+	}
+
+	void FrameBufferWgpu::create(uint8_t _num, const Attachment* _attachment)
+	{
+		m_swapChain = NULL;
+		m_denseIdx  = UINT16_MAX;
+		m_num       = 0;
+		m_width     = 0;
+		m_height    = 0;
+
+		for (uint32_t ii = 0; ii < _num; ++ii)
+		{
+			const Attachment& at = _attachment[ii];
+			TextureHandle handle = at.handle;
+
+			if (isValid(handle) )
+			{
+				const TextureWgpu& texture = s_renderWgpu->m_textures[handle.idx];
+
+				if (0 == m_width)
+				{
+					m_width = texture.m_width;
+					m_height = texture.m_height;
+				}
+
+				if (bimg::isDepth(bimg::TextureFormat::Enum(texture.m_textureFormat) ) )
+				{
+					m_depthHandle = handle;
+					m_depthAttachment = at;
+				}
+				else
+				{
+					m_colorHandle[m_num] = handle;
+					m_colorAttachment[m_num] = at;
+					m_num++;
+				}
+			}
+		}
+
+		bx::HashMurmur2A murmur;
+		murmur.begin();
+		murmur.add(m_num);
+
+		for (uint32_t ii = 0; ii < m_num; ++ii)
+		{
+			const TextureWgpu& texture = s_renderWgpu->m_textures[m_colorHandle[ii].idx];
+			murmur.add(uint32_t(s_textureFormat[texture.m_textureFormat].m_fmt) );
+		}
+
+		if (!isValid(m_depthHandle) )
+		{
+			murmur.add(uint32_t(wgpu::TextureFormat::Undefined) );
+		}
+		else
+		{
+			const TextureWgpu& depthTexture = s_renderWgpu->m_textures[m_depthHandle.idx];
+			murmur.add(uint32_t(s_textureFormat[depthTexture.m_textureFormat].m_fmt) );
+		}
+
+		murmur.add(1); // SampleCount
+
+		m_pixelFormatHash = murmur.end();
+	}
+
+	bool FrameBufferWgpu::create(uint16_t _denseIdx, void* _nwh, uint32_t _width, uint32_t _height, TextureFormat::Enum _format, TextureFormat::Enum _depthFormat)
+	{
+		BX_UNUSED(_format, _depthFormat);
+		m_swapChain = BX_NEW(g_allocator, SwapChainWgpu);
+		m_num       = 0;
+		m_width     = _width;
+		m_height    = _height;
+		m_nwh       = _nwh;
+		m_denseIdx  = _denseIdx;
+
+		m_swapChain->init(s_renderWgpu->m_device, _nwh, _width, _height);
+		m_swapChain->resize(*this, _width, _height, 0);
+
+		return m_swapChain->m_swapChain != nullptr;
+	}
+
+	void FrameBufferWgpu::postReset()
+	{
+	}
+
+	uint16_t FrameBufferWgpu::destroy()
+	{
+		if (NULL != m_swapChain)
+		{
+			BX_DELETE(g_allocator, m_swapChain);
+			m_swapChain = NULL;
+		}
+
+		m_num = 0;
+		m_nwh = NULL;
+		m_depthHandle.idx = kInvalidHandle;
+
+		uint16_t denseIdx = m_denseIdx;
+		m_denseIdx = UINT16_MAX;
+
+		return denseIdx;
+	}
+
+	void CommandQueueWgpu::init(wgpu::Queue _queue)
+	{
+		m_queue = _queue;
+#if BGFX_CONFIG_MULTITHREADED
+		//m_framesSemaphore.post(WEBGPU_MAX_FRAMES_IN_FLIGHT);
+#endif
+	}
+
+	void CommandQueueWgpu::shutdown()
+	{
+		finish(true);
+	}
+
+	void CommandQueueWgpu::begin()
+	{
+		m_encoder = s_renderWgpu->m_device.CreateCommandEncoder();
+	}
+
+	inline void commandBufferFinishedCallback(void* _data)
+	{
+#if BGFX_CONFIG_MULTITHREADED
+		CommandQueueWgpu* queue = (CommandQueueWgpu*)_data;
+		if (queue)
+		{
+			//queue->m_framesSemaphore.post();
+		}
+#else
+		BX_UNUSED(_data);
+#endif
+	}
+
+	void CommandQueueWgpu::kick(bool _endFrame, bool _waitForFinish)
+	{
+		if (m_encoder)
+		{
+			if (_endFrame)
+			{
+				m_releaseWriteIndex = (m_releaseWriteIndex + 1) % WEBGPU_MAX_FRAMES_IN_FLIGHT;
+				//m_encoder.addCompletedHandler(commandBufferFinishedCallback, this);
+			}
+
+			wgpu::CommandBuffer commands = m_encoder.Finish();
+			m_queue.Submit(1, &commands);
+
+			if (_waitForFinish)
+			{
+#if BGFX_CONFIG_MULTITHREADED
+				//m_framesSemaphore.post();
+#endif
+			}
+
+			m_encoder = nullptr;
+		}
+	}
+
+	void CommandQueueWgpu::finish(bool _finishAll)
+	{
+		if (_finishAll)
+		{
+			uint32_t count = m_encoder
+				? 2
+				: 3
+				;
+
+			for (uint32_t ii = 0; ii < count; ++ii)
+			{
+				consume();
+			}
+
+#if BGFX_CONFIG_MULTITHREADED
+			//m_framesSemaphore.post(count);
+#endif
+		}
+		else
+		{
+			consume();
+		}
+	}
+
+	void CommandQueueWgpu::release(wgpu::Buffer _buffer)
+	{
+		m_release[m_releaseWriteIndex].push_back(_buffer);
+	}
+
+	void CommandQueueWgpu::consume()
+	{
+#if BGFX_CONFIG_MULTITHREADED
+		//m_framesSemaphore.wait();
+#endif
+
+		m_releaseReadIndex = (m_releaseReadIndex + 1) % WEBGPU_MAX_FRAMES_IN_FLIGHT;
+
+		for (wgpu::Buffer& buffer : m_release[m_releaseReadIndex])
+		{
+			buffer.Destroy();
+		}
+		
+		m_release[m_releaseReadIndex].clear();
+	}
+
+	void TimerQueryWgpu::init()
+	{
+		m_frequency = bx::getHPFrequency();
+	}
+
+	void TimerQueryWgpu::shutdown()
+	{
+	}
+
+	uint32_t TimerQueryWgpu::begin(uint32_t _resultIdx)
+	{
+		BX_UNUSED(_resultIdx);
+		return 0;
+	}
+
+	void TimerQueryWgpu::end(uint32_t _idx)
+	{
+		BX_UNUSED(_idx);
+	}
+
+#if 0
+	static void setTimestamp(void* _data)
+	{
+		*( (int64_t*)_data) = bx::getHPCounter();
+	}
+#endif
+
+	void TimerQueryWgpu::addHandlers(wgpu::CommandBuffer& _commandBuffer)
+	{
+		BX_UNUSED(_commandBuffer);
+
+		while (0 == m_control.reserve(1) )
+		{
+			m_control.consume(1);
+		}
+
+		//uint32_t offset = m_control.m_current;
+		//_commandBuffer.addScheduledHandler(setTimestamp, &m_result[offset].m_begin);
+		//_commandBuffer.addCompletedHandler(setTimestamp, &m_result[offset].m_end);
+		m_control.commit(1);
+	}
+
+	bool TimerQueryWgpu::get()
+	{
+		if (0 != m_control.available() )
+		{
+			uint32_t offset = m_control.m_read;
+			m_begin = m_result[offset].m_begin;
+			m_end   = m_result[offset].m_end;
+			m_elapsed = m_end - m_begin;
+
+			m_control.consume(1);
+
+			return true;
+		}
+
+		return false;
+	}
+
+	void RendererContextWgpu::submitBlit(BlitState& _bs, uint16_t _view)
+	{
+		if (!_bs.hasItem(_view) )
+		{
+			return;
+		}
+
+		endEncoding();
+
+		wgpu::CommandEncoder& bce = getBlitCommandEncoder();
+
+		while (_bs.hasItem(_view) )
+		{
+			const BlitItem& blit = _bs.advance();
+
+			const TextureWgpu& src = m_textures[blit.m_src.idx];
+			const TextureWgpu& dst = m_textures[blit.m_dst.idx];
+
+			uint32_t srcWidth  = bx::uint32_min(src.m_width,  blit.m_srcX + blit.m_width)  - blit.m_srcX;
+			uint32_t srcHeight = bx::uint32_min(src.m_height, blit.m_srcY + blit.m_height) - blit.m_srcY;
+			uint32_t srcDepth  = bx::uint32_min(src.m_depth,  blit.m_srcZ + blit.m_depth)  - blit.m_srcZ;
+			uint32_t dstWidth  = bx::uint32_min(dst.m_width,  blit.m_dstX + blit.m_width)  - blit.m_dstX;
+			uint32_t dstHeight = bx::uint32_min(dst.m_height, blit.m_dstY + blit.m_height) - blit.m_dstY;
+			uint32_t dstDepth  = bx::uint32_min(dst.m_depth,  blit.m_dstZ + blit.m_depth)  - blit.m_dstZ;
+			uint32_t width     = bx::uint32_min(srcWidth,  dstWidth);
+			uint32_t height    = bx::uint32_min(srcHeight, dstHeight);
+			uint32_t depth     = bx::uint32_min(srcDepth,  dstDepth);
+			bool     readBack  = !!(dst.m_flags & BGFX_TEXTURE_READ_BACK);
+
+			wgpu::TextureCopyView srcView;
+			srcView.texture = src.m_ptr;
+			srcView.origin = { blit.m_srcX, blit.m_srcY, 0 };
+			srcView.mipLevel = blit.m_srcMip;
+			srcView.arrayLayer = blit.m_srcZ;
+
+			wgpu::TextureCopyView dstView;
+			dstView.texture = dst.m_ptr;
+			dstView.origin = { blit.m_dstX, blit.m_dstY, 0 };
+			dstView.mipLevel = blit.m_dstMip;
+			dstView.arrayLayer = blit.m_dstZ;
+
+			if (depth == 0)
+			{
+				wgpu::Extent3D copyExtent = { width, height, 1 };
+				bce.CopyTextureToTexture(&srcView, &dstView, &copyExtent);
+			}
+			else
+			{
+				wgpu::Extent3D copyExtent = { width, height, depth };
+				bce.CopyTextureToTexture(&srcView, &dstView, &copyExtent);
+			}
+
+			if (readBack)
+			{
+				//bce..synchronizeTexture(dst.m_ptr, 0, blit.m_dstMip);
+			}
+		}
+
+		//if (bce)
+		//{
+		//	bce.endEncoding();
+		//	bce = 0;
+		//}
+	}
+
+	void RendererContextWgpu::submit(Frame* _render, ClearQuad& _clearQuad, TextVideoMemBlitter& _textVideoMemBlitter)
+	{
+		if(_render->m_capture)
+		{
+			renderDocTriggerCapture();
+		}
+
+		m_cmd.finish(false);
+
+		if (!m_cmd.m_encoder)
+		{
+			m_cmd.begin();
+		}
+
+		BGFX_WEBGPU_PROFILER_BEGIN_LITERAL("rendererSubmit", kColorFrame);
+
+		int64_t timeBegin = bx::getHPCounter();
+		int64_t captureElapsed = 0;
+
+		//m_gpuTimer.addHandlers(m_encoder);
+
+		updateResolution(_render->m_resolution);
+
+		m_frameIndex = 0; // (m_frameIndex + 1) % WEBGPU_MAX_FRAMES_IN_FLIGHT;
+
+		ScratchBufferWgpu& scratchBuffer = m_scratchBuffers[m_frameIndex];
+		scratchBuffer.reset();
+
+		BindStateCacheWgpu& bindStates = m_bindStateCache[m_frameIndex];
+		bindStates.reset();
+
+		if (0 < _render->m_iboffset)
+		{
+			BGFX_PROFILER_SCOPE("bgfx/Update transient index buffer", kColorResource);
+			TransientIndexBuffer* ib = _render->m_transientIb;
+			m_indexBuffers[ib->handle.idx].update(0, bx::strideAlign(_render->m_iboffset,4), ib->data, true);
+		}
+
+		if (0 < _render->m_vboffset)
+		{
+			BGFX_PROFILER_SCOPE("bgfx/Update transient vertex buffer", kColorResource);
+			TransientVertexBuffer* vb = _render->m_transientVb;
+			m_vertexBuffers[vb->handle.idx].update(0, bx::strideAlign(_render->m_vboffset,4), vb->data, true);
+		}
+
+		_render->sort();
+
+		RenderDraw currentState;
+		currentState.clear();
+		currentState.m_stateFlags = BGFX_STATE_NONE;
+		currentState.m_stencil    = packStencil(BGFX_STENCIL_NONE, BGFX_STENCIL_NONE);
+
+		RenderBind currentBind;
+		currentBind.clear();
+
+		static ViewState viewState;
+		viewState.reset(_render);
+		uint32_t blendFactor = 0;
+
+		//bool wireframe = !!(_render->m_debug&BGFX_DEBUG_WIREFRAME);
+
+		ProgramHandle currentProgram = BGFX_INVALID_HANDLE;
+		uint32_t currentBindHash = 0;
+		uint32_t currentBindLayoutHash = 0;
+		BindStateWgpu* previousBindState = nullptr;
+		SortKey key;
+		uint16_t view = UINT16_MAX;
+		FrameBufferHandle fbh = { BGFX_CONFIG_MAX_FRAME_BUFFERS };
+
+		BlitState bs(_render);
+
+		const uint64_t primType = 0;
+		uint8_t primIndex = uint8_t(primType >> BGFX_STATE_PT_SHIFT);
+		PrimInfo prim = s_primInfo[primIndex];
+		const uint32_t maxComputeBindings = g_caps.limits.maxComputeBindings;
+
+		// TODO store this
+		static wgpu::RenderPassEncoder rce;
+		
+		PipelineStateWgpu* currentPso = NULL;
+
+		bool wasCompute     = false;
+		bool viewHasScissor = false;
+		Rect viewScissorRect;
+		viewScissorRect.clear();
+
+		uint32_t statsNumPrimsSubmitted[BX_COUNTOF(s_primInfo)] = {};
+		uint32_t statsNumPrimsRendered[BX_COUNTOF(s_primInfo)]  = {};
+		uint32_t statsNumInstances[BX_COUNTOF(s_primInfo)]      = {};
+		uint32_t statsNumDrawIndirect[BX_COUNTOF(s_primInfo)]   = {};
+		uint32_t statsNumIndices = 0;
+		uint32_t statsKeyType[2] = {};
+
+		Profiler<TimerQueryWgpu> profiler(
+			  _render
+			, m_gpuTimer
+			, s_viewName
+			);
+
+		if (0 == (_render->m_debug & BGFX_DEBUG_IFH))
+		{
+			viewState.m_rect = _render->m_view[0].m_rect;
+			int32_t numItems = _render->m_numRenderItems;
+
+			for (int32_t item = 0; item < numItems;)
+			{
+				const uint64_t encodedKey = _render->m_sortKeys[item];
+				const bool isCompute = key.decode(encodedKey, _render->m_viewRemap);
+				statsKeyType[isCompute]++;
+
+				const bool viewChanged = 0
+					|| key.m_view != view
+					|| item == numItems
+					;
+
+				const uint32_t itemIdx = _render->m_sortValues[item];
+				const RenderItem& renderItem = _render->m_renderItem[itemIdx];
+				const RenderBind& renderBind = _render->m_renderItemBind[itemIdx];
+				++item;
+
+				if (viewChanged
+					|| (!isCompute && wasCompute))
+				{
+					view = key.m_view;
+					currentProgram = BGFX_INVALID_HANDLE;
+
+					if (item > 1)
+					{
+						profiler.end();
+					}
+
+					BGFX_WEBGPU_PROFILER_END();
+					setViewType(view, "  ");
+					BGFX_WEBGPU_PROFILER_BEGIN(view, kColorView);
+
+					profiler.begin(view);
+
+					viewState.m_rect = _render->m_view[view].m_rect;
+
+					submitBlit(bs, view);
+
+					if (!isCompute)
+					{
+						const Rect& scissorRect = _render->m_view[view].m_scissor;
+						viewHasScissor = !scissorRect.isZero();
+						viewScissorRect = viewHasScissor ? scissorRect : viewState.m_rect;
+						Clear& clr = _render->m_view[view].m_clear;
+
+						Rect viewRect = viewState.m_rect;
+						bool clearWithRenderPass = false;
+
+						if (!m_renderEncoder
+							|| fbh.idx != _render->m_view[view].m_fbh.idx)
+						{
+							endEncoding();
+
+							fbh = _render->m_view[view].m_fbh;
+
+							uint32_t width = m_resolution.width;
+							uint32_t height = m_resolution.height;
+
+							if (isValid(fbh))
+							{
+								FrameBufferWgpu& frameBuffer = m_frameBuffers[fbh.idx];
+								width = frameBuffer.m_width;
+								height = frameBuffer.m_height;
+							}
+
+							clearWithRenderPass = true
+								&& 0 == viewRect.m_x
+								&& 0 == viewRect.m_y
+								&& width == viewRect.m_width
+								&& height == viewRect.m_height
+								;
+
+							rce = renderPass(_render, fbh, clearWithRenderPass, clr, s_viewName[view]);
+						}
+						else if (BX_ENABLED(BGFX_CONFIG_DEBUG_ANNOTATION))
+						{
+							rce.PopDebugGroup();
+						}
+
+						if (BX_ENABLED(BGFX_CONFIG_DEBUG_ANNOTATION))
+						{
+							rce.PushDebugGroup(s_viewName[view]);
+						}
+
+						//rce.setTriangleFillMode(wireframe ? MTLTriangleFillModeLines : MTLTriangleFillModeFill);
+
+						// TODO (webgpu) check other renderers
+						const Rect& rect = viewState.m_rect;
+						rce.SetViewport(rect.m_x, rect.m_y, rect.m_width, rect.m_height, 0.0f, 1.0f);
+
+						 // can't disable: set to view rect
+						rce.SetScissorRect(rect.m_x, rect.m_y, rect.m_width, rect.m_height);
+
+
+						if (BGFX_CLEAR_NONE != (clr.m_flags & BGFX_CLEAR_MASK)
+							&& !clearWithRenderPass)
+						{
+							clearQuad(_clearQuad, viewState.m_rect, clr, _render->m_colorPalette);
+						}
+					}
+				}
+
+				if (isCompute)
+				{
+					if (!wasCompute)
+					{
+						wasCompute = true;
+
+						endEncoding();
+						rce = NULL;
+
+						setViewType(view, "C");
+						BGFX_WEBGPU_PROFILER_END();
+						BGFX_WEBGPU_PROFILER_BEGIN(view, kColorCompute);
+
+						m_computeEncoder = m_cmd.m_encoder.BeginComputePass();
+					}
+					else if (viewChanged)
+					{
+						if (BX_ENABLED(BGFX_CONFIG_DEBUG_ANNOTATION))
+						{
+							m_computeEncoder.PopDebugGroup();
+						}
+
+						endEncoding();
+						m_computeEncoder = m_cmd.m_encoder.BeginComputePass();
+					}
+
+					if (viewChanged)
+					{
+						if (BX_ENABLED(BGFX_CONFIG_DEBUG_ANNOTATION))
+						{
+							s_viewName[view][3] = L'C';
+							m_computeEncoder.PushDebugGroup(s_viewName[view]);
+							s_viewName[view][3] = L' ';
+						}
+					}
+
+					const RenderCompute& compute = renderItem.compute;
+
+					bool programChanged = false;
+					bool constantsChanged = compute.m_uniformBegin < compute.m_uniformEnd;
+					rendererUpdateUniforms(this, _render->m_uniformBuffer[compute.m_uniformIdx], compute.m_uniformBegin, compute.m_uniformEnd);
+
+					if (key.m_program.idx != currentProgram.idx)
+					{
+						currentProgram = key.m_program;
+
+						currentPso = getComputePipelineState(currentProgram);
+
+						if (NULL == currentPso)
+						{
+							currentProgram = BGFX_INVALID_HANDLE;
+							continue;
+						}
+
+						m_computeEncoder.SetPipeline(currentPso->m_cps);
+						programChanged =
+							constantsChanged = true;
+					}
+
+					if (!isValid(currentProgram)
+					  || NULL == currentPso)
+						BX_WARN(false, "Invalid program / No PSO");
+
+					const ProgramWgpu& program = m_program[currentProgram.idx];
+
+					if (constantsChanged)
+					{
+						UniformBuffer* vcb = program.m_vsh->m_constantBuffer;
+						if (NULL != vcb)
+						{
+							commit(*vcb);
+						}
+					}
+
+					viewState.setPredefined<4>(this, view, program, _render, compute);
+
+					uint32_t numOffset = 1;
+					uint32_t offsets[2] = { scratchBuffer.m_offset, 0 };
+					if (program.m_vsh->m_size > 0)
+					{
+						scratchBuffer.m_buffer.SetSubData(scratchBuffer.m_offset, program.m_vsh->m_gpuSize, m_vsScratch);
+						scratchBuffer.m_offset += program.m_vsh->m_gpuSize;
+					}
+
+					BindingsWgpu b;
+					BindStateWgpu& bindState = allocBindState(program, bindStates, b, scratchBuffer);
+
+					bindGroups(program, bindState, b);
+
+					bindProgram(m_computeEncoder, program, bindState, numOffset, offsets);
+
+					if (isValid(compute.m_indirectBuffer))
+					{
+						const VertexBufferWgpu& vb = m_vertexBuffers[compute.m_indirectBuffer.idx];
+						
+						uint32_t numDrawIndirect = UINT16_MAX == compute.m_numIndirect
+						? vb.m_size/BGFX_CONFIG_DRAW_INDIRECT_STRIDE
+						: compute.m_numIndirect
+						;
+						
+						uint32_t args = compute.m_startIndirect * BGFX_CONFIG_DRAW_INDIRECT_STRIDE;
+						for (uint32_t ii = 0; ii < numDrawIndirect; ++ii)
+						{
+							m_computeEncoder.DispatchIndirect(
+								  vb.m_ptr
+								, args
+								);
+							args += BGFX_CONFIG_DRAW_INDIRECT_STRIDE;
+						}
+					}
+					else
+					{
+						m_computeEncoder.Dispatch(compute.m_numX, compute.m_numY, compute.m_numZ);
+					}
+
+					continue;
+				}
+
+
+				bool resetState = viewChanged || wasCompute;
+
+				if (wasCompute)
+				{
+					wasCompute = false;
+					currentProgram = BGFX_INVALID_HANDLE;
+
+					setViewType(view, " ");
+					BGFX_WEBGPU_PROFILER_END();
+					BGFX_WEBGPU_PROFILER_BEGIN(view, kColorDraw);
+				}
+
+				const RenderDraw& draw = renderItem.draw;
+
+				// TODO (hugoam)
+				//const bool depthWrite = !!(BGFX_STATE_WRITE_Z & draw.m_stateFlags);
+				const uint64_t newFlags = draw.m_stateFlags;
+				uint64_t changedFlags = currentState.m_stateFlags ^ draw.m_stateFlags;
+				currentState.m_stateFlags = newFlags;
+
+				const uint64_t newStencil = draw.m_stencil;
+				uint64_t changedStencil = (currentState.m_stencil ^ draw.m_stencil) & BGFX_STENCIL_FUNC_REF_MASK;
+				currentState.m_stencil = newStencil;
+
+				if (resetState)
+				{
+					wasCompute = false;
+
+					currentState.clear();
+					currentState.m_scissor = !draw.m_scissor;
+					changedFlags = BGFX_STATE_MASK;
+					changedStencil = packStencil(BGFX_STENCIL_MASK, BGFX_STENCIL_MASK);
+					currentState.m_stateFlags = newFlags;
+					currentState.m_stencil = newStencil;
+
+					currentBind.clear();
+
+					currentProgram = BGFX_INVALID_HANDLE;
+					const uint64_t pt = newFlags & BGFX_STATE_PT_MASK;
+					primIndex = uint8_t(pt >> BGFX_STATE_PT_SHIFT);
+				}
+
+				if (prim.m_type != s_primInfo[primIndex].m_type)
+				{
+					prim = s_primInfo[primIndex];
+				}
+
+				uint16_t scissor = draw.m_scissor;
+				if (currentState.m_scissor != scissor)
+				{
+					currentState.m_scissor = scissor;
+
+					if (UINT16_MAX == scissor)
+					{
+						if (viewHasScissor)
+						{
+							const auto& r = viewScissorRect;
+							rce.SetScissorRect(r.m_x, r.m_y, r.m_width, r.m_height);
+						}
+						else
+						{   // can't disable: set to view rect
+							const auto& r = viewState.m_rect;
+							rce.SetScissorRect(r.m_x, r.m_y, r.m_width, r.m_height);
+						}
+					}
+					else
+					{
+						Rect scissorRect;
+						scissorRect.setIntersect(viewScissorRect, _render->m_frameCache.m_rectCache.m_cache[scissor]);
+
+						const auto& r = scissorRect;
+						rce.SetScissorRect(r.m_x, r.m_y, r.m_width, r.m_height);
+					}
+
+				}
+
+				if (0 != changedStencil)
+				{
+					const uint32_t fstencil = unpackStencil(0, draw.m_stencil);
+					const uint32_t ref = (fstencil & BGFX_STENCIL_FUNC_REF_MASK) >> BGFX_STENCIL_FUNC_REF_SHIFT;
+					rce.SetStencilReference(ref);
+				}
+
+				if ((0 | BGFX_STATE_PT_MASK) & changedFlags)
+				{
+					const uint64_t pt = newFlags & BGFX_STATE_PT_MASK;
+					primIndex = uint8_t(pt >> BGFX_STATE_PT_SHIFT);
+					if (prim.m_type != s_primInfo[primIndex].m_type)
+					{
+						prim = s_primInfo[primIndex];
+					}
+				}
+
+				if (blendFactor != draw.m_rgba
+					&& !(newFlags & BGFX_STATE_BLEND_INDEPENDENT))
+				{
+					const uint32_t rgba = draw.m_rgba;
+					float rr = ((rgba >> 24)) / 255.0f;
+					float gg = ((rgba >> 16) & 0xff) / 255.0f;
+					float bb = ((rgba >> 8) & 0xff) / 255.0f;
+					float aa = ((rgba) & 0xff) / 255.0f;
+					wgpu::Color color = { rr, gg, bb, aa };
+					rce.SetBlendColor(&color);
+
+					blendFactor = draw.m_rgba;
+				}
+
+				bool programChanged = false;
+				bool constantsChanged = draw.m_uniformBegin < draw.m_uniformEnd;
+				rendererUpdateUniforms(this, _render->m_uniformBuffer[draw.m_uniformIdx], draw.m_uniformBegin, draw.m_uniformEnd);
+
+				bool vertexStreamChanged = hasVertexStreamChanged(currentState, draw);
+
+				if (key.m_program.idx != currentProgram.idx
+					|| vertexStreamChanged
+					|| (0
+						| BGFX_STATE_BLEND_MASK
+						| BGFX_STATE_BLEND_EQUATION_MASK
+						| BGFX_STATE_WRITE_RGB
+						| BGFX_STATE_WRITE_A
+						| BGFX_STATE_BLEND_INDEPENDENT
+						| BGFX_STATE_MSAA
+						| BGFX_STATE_BLEND_ALPHA_TO_COVERAGE
+						) & changedFlags
+					|| ((blendFactor != draw.m_rgba) && !!(newFlags & BGFX_STATE_BLEND_INDEPENDENT)))
+				{
+					currentProgram = key.m_program;
+
+					currentState.m_streamMask = draw.m_streamMask;
+					currentState.m_instanceDataBuffer.idx = draw.m_instanceDataBuffer.idx;
+					currentState.m_instanceDataOffset = draw.m_instanceDataOffset;
+					currentState.m_instanceDataStride = draw.m_instanceDataStride;
+
+					const VertexLayout* decls[BGFX_CONFIG_MAX_VERTEX_STREAMS];
+
+					uint32_t numVertices = draw.m_numVertices;
+					uint8_t  numStreams = 0;
+					for (uint32_t idx = 0, streamMask = draw.m_streamMask
+						; 0 != streamMask
+						; streamMask >>= 1, idx += 1, ++numStreams
+						)
+					{
+						const uint32_t ntz = bx::uint32_cnttz(streamMask);
+						streamMask >>= ntz;
+						idx += ntz;
+
+						currentState.m_stream[idx].m_layoutHandle = draw.m_stream[idx].m_layoutHandle;
+						currentState.m_stream[idx].m_handle = draw.m_stream[idx].m_handle;
+						currentState.m_stream[idx].m_startVertex = draw.m_stream[idx].m_startVertex;
+
+						const uint16_t handle = draw.m_stream[idx].m_handle.idx;
+						const VertexBufferWgpu& vb = m_vertexBuffers[handle];
+						const uint16_t decl = isValid(draw.m_stream[idx].m_layoutHandle)
+							? draw.m_stream[idx].m_layoutHandle.idx
+							: vb.m_layoutHandle.idx;
+						const VertexLayout& vertexDecl = m_vertexDecls[decl];
+						const uint32_t stride = vertexDecl.m_stride;
+
+						decls[numStreams] = &vertexDecl;
+
+						numVertices = bx::uint32_min(UINT32_MAX == draw.m_numVertices
+							? vb.m_size / stride
+							: draw.m_numVertices
+							, numVertices
+						);
+						const uint32_t offset = draw.m_stream[idx].m_startVertex * stride;
+
+						rce.SetVertexBuffer(idx, vb.m_ptr, offset);
+					}
+
+					bool index32 = false;
+
+					if (isValid(draw.m_indexBuffer))
+					{
+						const IndexBufferWgpu& ib = m_indexBuffers[draw.m_indexBuffer.idx];
+						index32 = 0 != (ib.m_flags & BGFX_BUFFER_INDEX32);
+					}
+
+					currentState.m_numVertices = numVertices;
+
+					if (!isValid(currentProgram))
+					{
+						continue;
+					}
+					else
+					{
+						currentPso = NULL;
+
+						if (0 < numStreams)
+						{
+							currentPso = getPipelineState(
+								newFlags
+								, newStencil
+								, draw.m_rgba
+								, fbh
+								, numStreams
+								, decls
+								, index32
+								, currentProgram
+								, uint8_t(draw.m_instanceDataStride / 16)
+							);
+						}
+
+						if (NULL == currentPso)
+						{
+							currentProgram = BGFX_INVALID_HANDLE;
+							continue;
+						}
+
+						rce.SetPipeline(currentPso->m_rps);
+					}
+
+					if (isValid(draw.m_instanceDataBuffer))
+					{
+						const VertexBufferWgpu& inst = m_vertexBuffers[draw.m_instanceDataBuffer.idx];
+						rce.SetVertexBuffer(numStreams/*+1*/, inst.m_ptr, draw.m_instanceDataOffset);
+					}
+
+					programChanged =
+						constantsChanged = true;
+				}
+
+				if (isValid(currentProgram))
+				{
+					const ProgramWgpu& program = m_program[currentProgram.idx];
+
+					if (constantsChanged)
+					{
+						UniformBuffer* vcb = program.m_vsh->m_constantBuffer;
+						if (NULL != vcb)
+						{
+							commit(*vcb);
+						}
+					}
+
+					if (constantsChanged)
+					{
+						UniformBuffer* fcb = program.m_fsh->m_constantBuffer;
+						if (NULL != fcb)
+						{
+							commit(*fcb);
+						}
+					}
+
+					viewState.setPredefined<4>(this, view, program, _render, draw);
+
+					bool hasPredefined = 0 < program.m_numPredefined;
+
+					uint32_t numOffset = 0;
+					uint32_t offsets[2] = { 0, 0 };
+					if (constantsChanged
+						|| hasPredefined)
+					{
+						//viewState.setPredefined<4>(this, view, program, _render, draw, programChanged || viewChanged);
+						//commitShaderConstants(scratchBuffer, program, voffset, foffset);
+
+						const uint32_t vsize = program.m_fsh->m_gpuSize;
+						const uint32_t fsize = (NULL != program.m_fsh ? program.m_fsh->m_gpuSize : 0);
+
+						if (program.m_vsh->m_size > 0)
+						{
+							offsets[numOffset++] = scratchBuffer.m_offset;
+							scratchBuffer.m_buffer.SetSubData(scratchBuffer.m_offset, vsize, m_vsScratch);
+						}
+						if (fsize > 0)
+						{
+							offsets[numOffset++] = scratchBuffer.m_offset + vsize;
+							scratchBuffer.m_buffer.SetSubData(scratchBuffer.m_offset + vsize, fsize, m_fsScratch);
+						}
+
+						scratchBuffer.m_offset += program.m_gpuSize;
+					}
+
+					uint32_t bindHash = bx::hash<bx::HashMurmur2A>(renderBind.m_bind, sizeof(renderBind.m_bind));
+					if (currentBindHash != bindHash
+					||  currentBindLayoutHash != program.m_bindGroupLayoutHash)
+					{
+						currentBindHash = bindHash;
+						currentBindLayoutHash = program.m_bindGroupLayoutHash;
+						previousBindState = &bindStates.m_bindStates[bindStates.m_currentBindState];
+
+						allocBindState(program, bindStates, scratchBuffer, renderBind);
+					}
+
+					BindStateWgpu& bindState = bindStates.m_bindStates[bindStates.m_currentBindState-1];
+
+					bindProgram(rce, program, bindState, numOffset, offsets);
+				}
+
+				if (0 != currentState.m_streamMask)
+				{
+					uint32_t numVertices = draw.m_numVertices;
+					if (UINT32_MAX == numVertices)
+					{
+						const VertexBufferWgpu& vb = m_vertexBuffers[currentState.m_stream[0].m_handle.idx];
+						uint16_t decl = !isValid(vb.m_layoutHandle) ? draw.m_stream[0].m_layoutHandle.idx : vb.m_layoutHandle.idx;
+						const VertexLayout& vertexDecl = m_vertexDecls[decl];
+						numVertices = vb.m_size/vertexDecl.m_stride;
+					}
+
+					uint32_t numIndices        = 0;
+					uint32_t numPrimsSubmitted = 0;
+					uint32_t numInstances      = 0;
+					uint32_t numPrimsRendered  = 0;
+					uint32_t numDrawIndirect   = 0;
+
+					if (isValid(draw.m_indirectBuffer) )
+					{
+						const VertexBufferWgpu& vb = m_vertexBuffers[draw.m_indirectBuffer.idx];
+
+						if (isValid(draw.m_indexBuffer) )
+						{
+							const IndexBufferWgpu& ib = m_indexBuffers[draw.m_indexBuffer.idx];
+
+							numDrawIndirect = UINT16_MAX == draw.m_numIndirect
+							? vb.m_size/BGFX_CONFIG_DRAW_INDIRECT_STRIDE
+							: draw.m_numIndirect
+							;
+
+							for (uint32_t ii = 0; ii < numDrawIndirect; ++ii)
+							{
+								rce.SetIndexBuffer(ib.m_ptr, 0);
+								rce.DrawIndexedIndirect(vb.m_ptr, (draw.m_startIndirect + ii)* BGFX_CONFIG_DRAW_INDIRECT_STRIDE);
+							}
+						}
+						else
+						{
+							numDrawIndirect = UINT16_MAX == draw.m_numIndirect
+							? vb.m_size/BGFX_CONFIG_DRAW_INDIRECT_STRIDE
+							: draw.m_numIndirect
+							;
+							for (uint32_t ii = 0; ii < numDrawIndirect; ++ii)
+							{
+								rce.DrawIndirect(vb.m_ptr, (draw.m_startIndirect + ii)* BGFX_CONFIG_DRAW_INDIRECT_STRIDE);
+							}
+						}
+					}
+					else
+					{
+						if (isValid(draw.m_indexBuffer) )
+						{
+							const IndexBufferWgpu& ib = m_indexBuffers[draw.m_indexBuffer.idx];
+
+							const uint32_t indexSize = 0 == (ib.m_flags & BGFX_BUFFER_INDEX32) ? 2 : 4;
+
+							if (UINT32_MAX == draw.m_numIndices)
+							{
+								numIndices        = ib.m_size/indexSize;
+								numPrimsSubmitted = numIndices/prim.m_div - prim.m_sub;
+								numInstances      = draw.m_numInstances;
+								numPrimsRendered  = numPrimsSubmitted*draw.m_numInstances;
+
+								rce.SetIndexBuffer(ib.m_ptr, 0);
+								rce.DrawIndexed(numIndices, draw.m_numInstances, 0, 0, 0);
+							}
+							else if (prim.m_min <= draw.m_numIndices)
+							{
+								numIndices        = draw.m_numIndices;
+								numPrimsSubmitted = numIndices/prim.m_div - prim.m_sub;
+								numInstances      = draw.m_numInstances;
+								numPrimsRendered  = numPrimsSubmitted*draw.m_numInstances;
+
+								rce.SetIndexBuffer(ib.m_ptr, 0);
+								rce.DrawIndexed(numIndices, numInstances, draw.m_startIndex, 0, 0);
+							}
+						}
+						else
+						{
+							numPrimsSubmitted = numVertices/prim.m_div - prim.m_sub;
+							numInstances      = draw.m_numInstances;
+							numPrimsRendered  = numPrimsSubmitted*draw.m_numInstances;
+
+							rce.Draw(numVertices, draw.m_numInstances, 0, 0);
+						}
+					}
+
+					statsNumPrimsSubmitted[primIndex] += numPrimsSubmitted;
+					statsNumPrimsRendered[primIndex]  += numPrimsRendered;
+					statsNumInstances[primIndex]      += numInstances;
+					statsNumDrawIndirect[primIndex]   += numDrawIndirect;
+					statsNumIndices                   += numIndices;
+				}
+			}
+
+			if (wasCompute)
+			{
+				invalidateCompute();
+
+				setViewType(view, "C");
+				BGFX_WEBGPU_PROFILER_END();
+				BGFX_WEBGPU_PROFILER_BEGIN(view, kColorCompute);
+			}
+
+			submitBlit(bs, BGFX_CONFIG_MAX_VIEWS);
+
+			if (0 < _render->m_numRenderItems)
+			{
+				captureElapsed = -bx::getHPCounter();
+				capture();
+				rce = m_renderEncoder;
+				captureElapsed += bx::getHPCounter();
+
+				profiler.end();
+			}
+		}
+
+		if (BX_ENABLED(BGFX_CONFIG_DEBUG_ANNOTATION) )
+		{
+			if (0 < _render->m_numRenderItems)
+			{
+				rce.PopDebugGroup();
+			}
+		}
+
+		BGFX_WEBGPU_PROFILER_END();
+
+		int64_t timeEnd = bx::getHPCounter();
+		int64_t frameTime = timeEnd - timeBegin;
+
+		static int64_t min = frameTime;
+		static int64_t max = frameTime;
+		min = bx::min<int64_t>(min, frameTime);
+		max = bx::max<int64_t>(max, frameTime);
+
+		static uint32_t maxGpuLatency = 0;
+		static double   maxGpuElapsed = 0.0f;
+		double elapsedGpuMs = 0.0;
+
+		do
+		{
+			double toGpuMs = 1000.0 / double(m_gpuTimer.m_frequency);
+			elapsedGpuMs   = m_gpuTimer.m_elapsed * toGpuMs;
+			maxGpuElapsed  = elapsedGpuMs > maxGpuElapsed ? elapsedGpuMs : maxGpuElapsed;
+		}
+		while (m_gpuTimer.get() );
+
+		maxGpuLatency = bx::uint32_imax(maxGpuLatency, m_gpuTimer.m_control.available()-1);
+
+		const int64_t timerFreq = bx::getHPFrequency();
+
+		Stats& perfStats = _render->m_perfStats;
+		perfStats.cpuTimeBegin  = timeBegin;
+		perfStats.cpuTimeEnd    = timeEnd;
+		perfStats.cpuTimerFreq  = timerFreq;
+		perfStats.gpuTimeBegin  = m_gpuTimer.m_begin;
+		perfStats.gpuTimeEnd    = m_gpuTimer.m_end;
+		perfStats.gpuTimerFreq  = m_gpuTimer.m_frequency;
+		perfStats.numDraw       = statsKeyType[0];
+		perfStats.numCompute    = statsKeyType[1];
+		perfStats.numBlit       = _render->m_numBlitItems;
+		perfStats.maxGpuLatency = maxGpuLatency;
+		bx::memCopy(perfStats.numPrims, statsNumPrimsRendered, sizeof(perfStats.numPrims) );
+		perfStats.gpuMemoryMax  = -INT64_MAX;
+		perfStats.gpuMemoryUsed = -INT64_MAX;
+
+		//rce.setTriangleFillMode(MTLTriangleFillModeFill);
+		if (_render->m_debug & (BGFX_DEBUG_IFH|BGFX_DEBUG_STATS) )
+		{
+			rce = renderPass(_render, BGFX_INVALID_HANDLE, false, Clear());
+
+			if (BX_ENABLED(BGFX_CONFIG_DEBUG_ANNOTATION))
+			{
+				rce.PushDebugGroup("debugstats");
+			}
+
+			TextVideoMem& tvm = m_textVideoMem;
+
+			static int64_t next = timeEnd;
+
+			if (timeEnd >= next)
+			{
+				next = timeEnd + timerFreq;
+
+				double freq = double(timerFreq);
+				double toMs = 1000.0/freq;
+
+				tvm.clear();
+				uint16_t pos = 0;
+				tvm.printf(0, pos++, BGFX_CONFIG_DEBUG ? 0x8c : 0x8f
+					, " %s / " BX_COMPILER_NAME " / " BX_CPU_NAME " / " BX_ARCH_NAME " / " BX_PLATFORM_NAME " "
+					, getRendererName()
+					);
+
+				pos = 10;
+				tvm.printf(10, pos++, 0x8b, "        Frame: %7.3f, % 7.3f \x1f, % 7.3f \x1e [ms] / % 6.2f FPS "
+					, double(frameTime)*toMs
+					, double(min)*toMs
+					, double(max)*toMs
+					, freq/frameTime
+					);
+
+				const uint32_t msaa = (m_resolution.reset&BGFX_RESET_MSAA_MASK)>>BGFX_RESET_MSAA_SHIFT;
+				tvm.printf(10, pos++, 0x8b, "  Reset flags: [%c] vsync, [%c] MSAAx%d, [%c] MaxAnisotropy "
+					, !!(m_resolution.reset&BGFX_RESET_VSYNC) ? '\xfe' : ' '
+					, 0 != msaa ? '\xfe' : ' '
+					, 1<<msaa
+					, !!(m_resolution.reset&BGFX_RESET_MAXANISOTROPY) ? '\xfe' : ' '
+					);
+
+				double elapsedCpuMs = double(frameTime)*toMs;
+				tvm.printf(10, pos++, 0x8b, "    Submitted: %4d (draw %4d, compute %4d) / CPU %3.4f [ms] %c GPU %3.4f [ms] (latency %d)"
+					, _render->m_numRenderItems
+					, statsKeyType[0]
+					, statsKeyType[1]
+					, elapsedCpuMs
+					, elapsedCpuMs > maxGpuElapsed ? '>' : '<'
+					, maxGpuElapsed
+					, maxGpuLatency
+					);
+				maxGpuLatency = 0;
+				maxGpuElapsed = 0.0;
+
+				for (uint32_t ii = 0; ii < Topology::Count; ++ii)
+				{
+					tvm.printf(10, pos++, 0x8b, "   %10s: %7d (#inst: %5d), submitted: %7d"
+						, getName(Topology::Enum(ii) )
+						, statsNumPrimsRendered[ii]
+						, statsNumInstances[ii]
+						, statsNumPrimsSubmitted[ii]
+						);
+				}
+
+				tvm.printf(10, pos++, 0x8b, "      Indices: %7d ", statsNumIndices);
+//				tvm.printf(10, pos++, 0x8b, " Uniform size: %7d, Max: %7d ", _render->m_uniformEnd, _render->m_uniformMax);
+				tvm.printf(10, pos++, 0x8b, "     DVB size: %7d ", _render->m_vboffset);
+				tvm.printf(10, pos++, 0x8b, "     DIB size: %7d ", _render->m_iboffset);
+
+				pos++;
+				double captureMs = double(captureElapsed)*toMs;
+				tvm.printf(10, pos++, 0x8b, "     Capture: %3.4f [ms]", captureMs);
+
+				uint8_t attr[2] = { 0x8c, 0x8a };
+				uint8_t attrIndex = _render->m_waitSubmit < _render->m_waitRender;
+
+				tvm.printf(10, pos++, attr[attrIndex    &1], " Submit wait: %3.4f [ms]", _render->m_waitSubmit*toMs);
+				tvm.printf(10, pos++, attr[(attrIndex+1)&1], " Render wait: %3.4f [ms]", _render->m_waitRender*toMs);
+
+				min = frameTime;
+				max = frameTime;
+			}
+
+			blit(this, _textVideoMemBlitter, tvm);
+			rce = m_renderEncoder;
+
+			if (BX_ENABLED(BGFX_CONFIG_DEBUG_ANNOTATION))
+			{
+				rce.PopDebugGroup();
+			}
+		}
+		else if (_render->m_debug & BGFX_DEBUG_TEXT)
+		{
+			if (BX_ENABLED(BGFX_CONFIG_DEBUG_ANNOTATION))
+			{
+				rce.PushDebugGroup("debugtext");
+			}
+
+			blit(this, _textVideoMemBlitter, _render->m_textVideoMem);
+			rce = m_renderEncoder;
+
+			if (BX_ENABLED(BGFX_CONFIG_DEBUG_ANNOTATION))
+			{
+				rce.PopDebugGroup();
+			}
+		}
+
+		endEncoding();
+
+		m_cmd.kick(true);
+
+#if !BX_PLATFORM_EMSCRIPTEN
+		for (uint32_t ii = 0, num = m_numWindows; ii < num; ++ii)
+		{
+			FrameBufferWgpu& frameBuffer = ii == 0 ? m_mainFrameBuffer : m_frameBuffers[m_windows[ii].idx];
+			if (NULL != frameBuffer.m_swapChain
+			&& frameBuffer.m_swapChain->m_drawable)
+			{
+				SwapChainWgpu& swapChain = *frameBuffer.m_swapChain;
+				swapChain.m_swapChain.Present();
+			}
+		}
+#endif
+	}
+
+} /* namespace webgpu */ } // namespace bgfx
+
+#else
+
+namespace bgfx { namespace webgpu
+	{
+		RendererContextI* rendererCreate(const Init& _init)
+		{
+			BX_UNUSED(_init);
+			return NULL;
+		}
+
+		void rendererDestroy()
+		{
+		}
+	} /* namespace webgpu */ } // namespace bgfx
+
+#endif // BGFX_CONFIG_RENDERER_WEBGPU

+ 568 - 0
src/renderer_webgpu.h

@@ -0,0 +1,568 @@
+/*
+ * Copyright 2011-2019 Branimir Karadzic. All rights reserved.
+ * License: https://github.com/bkaradzic/bgfx#license-bsd-2-clause
+ */
+
+#ifndef BGFX_RENDERER_WEBGPU_H_HEADER_GUARD
+#define BGFX_RENDERER_WEBGPU_H_HEADER_GUARD
+
+#include "bgfx_p.h"
+
+#if BGFX_CONFIG_RENDERER_WEBGPU
+
+#if !BX_PLATFORM_EMSCRIPTEN
+#include <dawn/webgpu_cpp.h>
+#include <dawn/dawn_wsi.h>
+#else
+#include <webgpu/webgpu_cpp.h>
+#endif
+
+#define BGFX_WEBGPU_PROFILER_BEGIN(_view, _abgr)         \
+	BX_MACRO_BLOCK_BEGIN                              \
+		BGFX_PROFILER_BEGIN(s_viewName[view], _abgr); \
+	BX_MACRO_BLOCK_END
+
+#define BGFX_WEBGPU_PROFILER_BEGIN_LITERAL(_name, _abgr)   \
+	BX_MACRO_BLOCK_BEGIN                                \
+		BGFX_PROFILER_BEGIN_LITERAL("" # _name, _abgr); \
+	BX_MACRO_BLOCK_END
+
+#define BGFX_WEBGPU_PROFILER_END() \
+	BX_MACRO_BLOCK_BEGIN        \
+		BGFX_PROFILER_END();    \
+	BX_MACRO_BLOCK_END
+
+#define WEBGPU_MAX_FRAMES_IN_FLIGHT 3
+#define WEBGPU_NUM_UNIFORM_BUFFERS 8
+
+namespace bgfx {
+	namespace webgpu
+	{
+
+		template <typename Ty>
+		class StateCacheT
+		{
+		public:
+			void add(uint64_t _id, Ty _item)
+			{
+				invalidate(_id);
+				m_hashMap.insert(stl::make_pair(_id, _item));
+			}
+
+			Ty find(uint64_t _id)
+			{
+				typename HashMap::iterator it = m_hashMap.find(_id);
+				if(it != m_hashMap.end())
+				{
+					return it->second;
+				}
+
+				return NULL;
+			}
+
+			void invalidate(uint64_t _id)
+			{
+				typename HashMap::iterator it = m_hashMap.find(_id);
+				if(it != m_hashMap.end())
+				{
+					release(it->second);
+					m_hashMap.erase(it);
+				}
+			}
+
+			void invalidate()
+			{
+				for(typename HashMap::iterator it = m_hashMap.begin(), itEnd = m_hashMap.end(); it != itEnd; ++it)
+				{
+					release(it->second);
+				}
+
+				m_hashMap.clear();
+			}
+
+			uint32_t getCount() const
+			{
+				return uint32_t(m_hashMap.size());
+			}
+
+		private:
+			typedef stl::unordered_map<uint64_t, Ty> HashMap;
+			HashMap m_hashMap;
+		};
+
+		struct BufferWgpu
+		{
+			void create(uint32_t _size, void* _data, uint16_t _flags, uint16_t _stride = 0, bool _vertex = false);
+			void update(uint32_t _offset, uint32_t _size, void* _data, bool _discard = false);
+
+			void destroy()
+			{
+				m_ptr.Destroy();
+
+				if(NULL != m_dynamic)
+				{
+					BX_DELETE(g_allocator, m_dynamic);
+					m_dynamic = NULL;
+				}
+			}
+
+			uint32_t m_size;
+			uint16_t m_flags = BGFX_BUFFER_NONE;
+			bool     m_vertex;
+
+			String       m_label;
+			wgpu::Buffer m_ptr;
+			uint8_t*     m_dynamic = NULL;
+		};
+
+		typedef BufferWgpu IndexBufferWgpu;
+
+		struct VertexBufferWgpu : public BufferWgpu
+		{
+			void create(uint32_t _size, void* _data, VertexLayoutHandle _declHandle, uint16_t _flags);
+
+			VertexLayoutHandle m_layoutHandle;
+		};
+
+		struct BindInfo
+		{
+			uint32_t      m_index = UINT32_MAX;
+			uint32_t      m_binding = UINT32_MAX;
+			UniformHandle m_uniform = BGFX_INVALID_HANDLE;
+		};
+
+		struct ShaderWgpu
+		{
+			void create(ShaderHandle _handle, const Memory* _mem);
+			void destroy()
+			{
+				if (NULL != m_constantBuffer)
+				{
+					UniformBuffer::destroy(m_constantBuffer);
+					m_constantBuffer = NULL;
+				}
+
+				m_module = nullptr;
+			}
+
+			const char* name() const { return getName(m_handle); }
+
+			ShaderHandle m_handle;
+			String m_label;
+
+			wgpu::ShaderStage m_stage;
+			wgpu::ShaderModule m_module;
+
+			uint32_t* m_code = nullptr;
+			size_t m_codeSize = 0;
+
+			UniformBuffer* m_constantBuffer = nullptr;
+
+			PredefinedUniform m_predefined[PredefinedUniform::Count];
+			uint16_t m_attrMask[Attrib::Count];
+			uint8_t m_attrRemap[Attrib::Count];
+
+			uint32_t m_hash = 0;
+			uint16_t m_numUniforms = 0;
+			uint16_t m_size = 0;
+			uint16_t m_gpuSize = 0;
+			uint8_t m_numPredefined = 0;
+			uint8_t m_numAttrs = 0;
+
+			BindInfo					m_bindInfo[BGFX_CONFIG_MAX_TEXTURE_SAMPLERS];
+			wgpu::BindGroupLayoutEntry	m_samplers[BGFX_CONFIG_MAX_TEXTURE_SAMPLERS];
+			wgpu::BindGroupLayoutEntry	m_textures[BGFX_CONFIG_MAX_TEXTURE_SAMPLERS];
+			uint8_t						m_numSamplers = 0;
+			wgpu::BindGroupLayoutEntry	m_buffers[BGFX_CONFIG_MAX_TEXTURE_SAMPLERS];
+			uint32_t					m_numBuffers = 0;
+		};
+
+		struct PipelineStateWgpu;
+
+		struct ProgramWgpu
+		{
+			void create(const ShaderWgpu* _vsh, const ShaderWgpu* _fsh);
+			void destroy();
+
+			const ShaderWgpu* m_vsh = NULL;
+			const ShaderWgpu* m_fsh = NULL;
+
+			PredefinedUniform m_predefined[PredefinedUniform::Count * 2];
+			uint8_t m_numPredefined;
+
+			PipelineStateWgpu* m_computePS = NULL;
+
+			wgpu::BindGroupLayout m_bindGroupLayout;
+			uint16_t              m_gpuSize = 0;
+			uint32_t              m_numUniforms;
+			uint32_t              m_bindGroupLayoutHash;
+
+			BindInfo                   m_bindInfo[BGFX_CONFIG_MAX_TEXTURE_SAMPLERS];
+			wgpu::BindGroupLayoutEntry m_samplers[BGFX_CONFIG_MAX_TEXTURE_SAMPLERS];
+			wgpu::BindGroupLayoutEntry m_textures[BGFX_CONFIG_MAX_TEXTURE_SAMPLERS];
+			uint32_t                   m_numSamplers = 0;
+			wgpu::BindGroupLayoutEntry m_buffers[BGFX_CONFIG_MAX_TEXTURE_SAMPLERS];
+			uint32_t                   m_numBuffers = 0;
+		};
+
+		constexpr size_t kMaxVertexInputs = 16;
+		constexpr size_t kMaxVertexAttributes = 16;
+		constexpr size_t kMaxColorAttachments = BGFX_CONFIG_MAX_FRAME_BUFFER_ATTACHMENTS;
+
+		constexpr uint32_t kMinBufferOffsetAlignment = 256;
+
+		struct RenderPassDescriptor
+		{
+			RenderPassDescriptor();
+
+			wgpu::RenderPassDescriptor desc;
+
+			wgpu::RenderPassColorAttachmentDescriptor colorAttachments[kMaxColorAttachments];
+			wgpu::RenderPassDepthStencilAttachmentDescriptor depthStencilAttachment;
+		};
+
+		struct VertexStateDescriptor
+		{
+			VertexStateDescriptor();
+
+			wgpu::VertexStateDescriptor desc;
+
+			wgpu::VertexBufferLayoutDescriptor vertexBuffers[kMaxVertexInputs];
+			wgpu::VertexAttributeDescriptor attributes[kMaxVertexAttributes];
+		};
+
+		struct RenderPipelineDescriptor
+		{
+			RenderPipelineDescriptor();
+
+			wgpu::RenderPipelineDescriptor desc;
+
+			//wgpu::ProgrammableStageDescriptor vertexStage;
+			wgpu::ProgrammableStageDescriptor fragmentStage;
+
+			wgpu::VertexStateDescriptor inputState;
+
+			wgpu::RasterizationStateDescriptor rasterizationState;
+			wgpu::DepthStencilStateDescriptor depthStencilState;
+			wgpu::ColorStateDescriptor colorStates[kMaxColorAttachments];
+		};
+
+		struct BindingsWgpu
+		{
+			uint32_t numEntries = 0;
+			wgpu::BindGroupEntry m_entries[2 + BGFX_CONFIG_MAX_TEXTURE_SAMPLERS*3];
+		};
+
+		struct BindStateWgpu
+		{
+			void clear();
+
+			uint32_t numOffset;
+
+			wgpu::BindGroup m_bindGroup;
+		};
+
+		struct RenderPassStateWgpu
+		{
+			RenderPassDescriptor m_rpd;
+		};
+
+		struct PipelineStateWgpu
+		{
+			RenderPipelineDescriptor m_rpd;
+
+			wgpu::PipelineLayout m_layout;
+
+			wgpu::RenderPipeline m_rps;
+			wgpu::ComputePipeline m_cps;
+		};
+
+		void release(RenderPassStateWgpu* _ptr)
+		{
+			BX_DELETE(g_allocator, _ptr);
+		}
+
+		void release(PipelineStateWgpu* _ptr)
+		{
+			BX_DELETE(g_allocator, _ptr);
+		}
+
+		class StagingBufferWgpu
+		{
+		public:
+			void create(uint32_t _size, bool mapped);
+			void map();
+			void unmap();
+			void destroy();
+
+			void mapped(void* _data, uint64_t _size);
+
+			wgpu::Buffer m_buffer;
+			void* m_data = nullptr;
+			uint64_t m_size = 0;
+		};
+
+		class ScratchBufferWgpu
+		{
+		public:
+			void create(uint32_t _size); // , uint32_t _maxBindGroups);
+			void destroy();
+			void begin();
+			uint32_t write(void* data, uint64_t _size, uint64_t _offset);
+			uint32_t write(void* data, uint64_t _size);
+			void submit();
+			void release();
+
+			StagingBufferWgpu* m_staging = nullptr;
+			wgpu::Buffer m_stagingAlloc;
+			wgpu::Buffer m_buffer;
+			uint32_t m_offset;
+			uint32_t m_size;
+		};
+
+		class BindStateCacheWgpu
+		{
+		public:
+			void create(); // , uint32_t _maxBindGroups);
+			void destroy();
+			void reset();
+
+			BindStateWgpu m_bindStates[1024] = {};
+			uint32_t m_currentBindState;
+			//uint32_t m_maxBindStates;
+		};
+
+		struct ReadbackWgpu
+		{
+			void create(TextureHandle _texture) { m_texture = _texture; }
+
+			void destroy()
+			{
+				m_buffer.Destroy();
+			}
+
+			void readback(void const* data, uint64_t size)
+			{
+				bx::memCopy(m_data, data, m_size < size ? m_size : size);
+				m_buffer.Unmap();
+				m_mapped = false;
+			}
+
+			TextureHandle m_texture;
+			wgpu::Buffer m_buffer;
+			uint32_t m_mip = 0;
+			bool m_mapped = false;
+			void* m_data = NULL;
+			size_t m_size = 0;
+		};
+
+		struct TextureWgpu
+		{
+			enum Enum
+			{
+				Texture2D,
+				Texture3D,
+				TextureCube,
+			};
+
+			void create(TextureHandle _handle, const Memory* _mem, uint64_t _flags, uint8_t _skip);
+
+			void destroy()
+			{
+				m_ptr.Destroy();
+			}
+
+			void update(
+				uint8_t _side
+				, uint8_t _mip
+				, const Rect& _rect
+				, uint16_t _z
+				, uint16_t _depth
+				, uint16_t _pitch
+				, const Memory* _mem
+			);
+
+			TextureHandle m_handle;
+			String m_label;
+
+			wgpu::TextureView m_view;
+			wgpu::TextureView getTextureMipLevel(int _mip);
+
+			wgpu::Texture m_ptr;
+			wgpu::Texture m_ptrMsaa;
+			wgpu::TextureView m_ptrMips[14] = {};
+			wgpu::Sampler m_sampler;
+			uint64_t m_flags = 0;
+			uint32_t m_width = 0;
+			uint32_t m_height = 0;
+			uint32_t m_depth = 0;
+			uint8_t m_type;
+			TextureFormat::Enum m_requestedFormat;
+			TextureFormat::Enum m_textureFormat;
+			uint8_t m_numMips = 0;
+			uint8_t m_numLayers;
+			uint32_t m_numSides;
+			uint8_t m_sampleCount;
+
+			ReadbackWgpu m_readback;
+		};
+
+		struct SamplerStateWgpu
+		{
+			wgpu::Sampler m_sampler;
+		};
+
+		void release(SamplerStateWgpu* _ptr)
+		{
+			BX_DELETE(g_allocator, _ptr);
+		}
+
+		struct FrameBufferWgpu;
+
+		struct SwapChainWgpu
+		{
+			void init(wgpu::Device _device, void* _nwh, uint32_t _width, uint32_t _height);
+			void resize(FrameBufferWgpu& _frameBuffer, uint32_t _width, uint32_t _height, uint32_t _flags);
+
+			void flip();
+
+			wgpu::TextureView current();
+
+#if !BX_PLATFORM_EMSCRIPTEN
+			DawnSwapChainImplementation m_impl;
+#endif
+
+			wgpu::SwapChain m_swapChain;
+
+			wgpu::TextureView m_drawable;
+
+			wgpu::Texture m_backBufferColorMsaa;
+			wgpu::Texture m_backBufferDepth;
+
+			wgpu::TextureFormat m_colorFormat;
+			wgpu::TextureFormat m_depthFormat;
+
+			uint32_t m_maxAnisotropy = 0;
+			uint8_t m_sampleCount;
+		};
+
+		struct FrameBufferWgpu
+		{
+			void create(uint8_t _num, const Attachment* _attachment);
+			bool create(
+				uint16_t _denseIdx
+				, void* _nwh
+				, uint32_t _width
+				, uint32_t _height
+				, TextureFormat::Enum _format
+				, TextureFormat::Enum _depthFormat
+			);
+			void postReset();
+			uint16_t destroy();
+
+			SwapChainWgpu* m_swapChain = NULL;
+			void* m_nwh = NULL;
+			uint32_t m_width;
+			uint32_t m_height;
+			uint16_t m_denseIdx = UINT16_MAX;
+
+			uint32_t m_pixelFormatHash = 0;
+
+			TextureHandle m_colorHandle[BGFX_CONFIG_MAX_FRAME_BUFFER_ATTACHMENTS - 1];
+			TextureHandle m_depthHandle = { kInvalidHandle };
+			Attachment m_colorAttachment[BGFX_CONFIG_MAX_FRAME_BUFFER_ATTACHMENTS - 1];
+			Attachment m_depthAttachment;
+			uint8_t m_num = 0; // number of color handles
+		};
+
+		struct CommandQueueWgpu
+		{
+			void init(wgpu::Queue _queue);
+			void shutdown();
+			void begin();
+			void kick(bool _endFrame, bool _waitForFinish = false);
+			void finish(bool _finishAll = false);
+			void release(wgpu::Buffer _buffer);
+			void consume();
+
+#if BGFX_CONFIG_MULTITHREADED
+			//bx::Semaphore 		 m_framesSemaphore;
+#endif
+
+			wgpu::Queue		     m_queue;
+			wgpu::CommandEncoder m_encoder;
+
+			int m_releaseWriteIndex = 0;
+			int m_releaseReadIndex = 0;
+
+			typedef stl::vector<wgpu::Buffer> ResourceArray;
+			ResourceArray m_release[WEBGPU_MAX_FRAMES_IN_FLIGHT];
+		};
+
+		struct TimerQueryWgpu
+		{
+			TimerQueryWgpu()
+				: m_control(4)
+			{
+			}
+
+			void init();
+			void shutdown();
+			uint32_t begin(uint32_t _resultIdx);
+			void end(uint32_t _idx);
+			void addHandlers(wgpu::CommandBuffer& _commandBuffer);
+			bool get();
+
+			struct Result
+			{
+				void reset()
+				{
+					m_begin = 0;
+					m_end = 0;
+					m_pending = 0;
+				}
+
+				uint64_t m_begin;
+				uint64_t m_end;
+				uint32_t m_pending;
+			};
+
+			uint64_t m_begin;
+			uint64_t m_end;
+			uint64_t m_elapsed;
+			uint64_t m_frequency;
+
+			Result m_result[4 * 2];
+			bx::RingBufferControl m_control;
+		};
+
+		struct OcclusionQueryWgpu
+		{
+			OcclusionQueryWgpu()
+				: m_control(BX_COUNTOF(m_query))
+			{
+			}
+
+			void postReset();
+			void preReset();
+			void begin(wgpu::RenderPassEncoder& _rce, Frame* _render, OcclusionQueryHandle _handle);
+			void end(wgpu::RenderPassEncoder& _rce);
+			void resolve(Frame* _render, bool _wait = false);
+			void invalidate(OcclusionQueryHandle _handle);
+
+			struct Query
+			{
+				OcclusionQueryHandle m_handle;
+			};
+
+			wgpu::Buffer m_buffer;
+			Query m_query[BGFX_CONFIG_MAX_OCCLUSION_QUERIES];
+			bx::RingBufferControl m_control;
+		};
+
+	} /* namespace metal */
+} // namespace bgfx
+
+#endif // BGFX_CONFIG_RENDERER_WEBGPU
+
+#endif // BGFX_RENDERER_WEBGPU_H_HEADER_GUARD

+ 6 - 0
src/shader.cpp

@@ -120,6 +120,12 @@ namespace bgfx
 
 				uint16_t regCount;
 				bx::read(_reader, regCount, _err);
+
+				if (!isShaderVerLess(magic, 8) )
+				{
+					uint16_t texInfo;
+					bx::read(_reader, texInfo, _err);
+				}
 			}
 
 			uint32_t shaderSize;

+ 1 - 0
src/vertexlayout.cpp

@@ -53,6 +53,7 @@ namespace bgfx
 		&s_attribTypeSizeGl,    // OpenGLES
 		&s_attribTypeSizeGl,    // OpenGL
 		&s_attribTypeSizeD3D1x, // Vulkan
+		&s_attribTypeSizeD3D1x, // WebGPU
 		&s_attribTypeSizeD3D9,  // Count
 	};
 	BX_STATIC_ASSERT(BX_COUNTOF(s_attribTypeSize) == RendererType::Count+1);

+ 1 - 1
tools/shaderc/shaderc.cpp

@@ -13,7 +13,7 @@ extern "C"
 #include <fpp.h>
 } // extern "C"
 
-#define BGFX_SHADER_BIN_VERSION 6
+#define BGFX_SHADER_BIN_VERSION 8
 #define BGFX_CHUNK_MAGIC_CSH BX_MAKEFOURCC('C', 'S', 'H', BGFX_SHADER_BIN_VERSION)
 #define BGFX_CHUNK_MAGIC_FSH BX_MAKEFOURCC('F', 'S', 'H', BGFX_SHADER_BIN_VERSION)
 #define BGFX_CHUNK_MAGIC_VSH BX_MAKEFOURCC('V', 'S', 'H', BGFX_SHADER_BIN_VERSION)

+ 5 - 1
tools/shaderc/shaderc.h

@@ -109,7 +109,9 @@ namespace bgfx
 
 #define BGFX_UNIFORM_FRAGMENTBIT UINT8_C(0x10)
 #define BGFX_UNIFORM_SAMPLERBIT  UINT8_C(0x20)
-#define BGFX_UNIFORM_MASK (BGFX_UNIFORM_FRAGMENTBIT|BGFX_UNIFORM_SAMPLERBIT)
+#define BGFX_UNIFORM_READONLYBIT UINT8_C(0x40)
+#define BGFX_UNIFORM_COMPAREBIT  UINT8_C(0x80)
+#define BGFX_UNIFORM_MASK (BGFX_UNIFORM_FRAGMENTBIT|BGFX_UNIFORM_SAMPLERBIT|BGFX_UNIFORM_READONLYBIT|BGFX_UNIFORM_COMPAREBIT)
 
 	const char* getUniformTypeName(UniformType::Enum _enum);
 	UniformType::Enum nameToUniformTypeEnum(const char* _name);
@@ -121,6 +123,8 @@ namespace bgfx
 		uint8_t num;
 		uint16_t regIndex;
 		uint16_t regCount;
+		uint8_t texComponent;
+		uint8_t texDimension;
 	};
 
 	struct Options

+ 2 - 0
tools/shaderc/shaderc_glsl.cpp

@@ -339,6 +339,8 @@ namespace bgfx { namespace glsl
 			bx::write(_writer, un.num);
 			bx::write(_writer, un.regIndex);
 			bx::write(_writer, un.regCount);
+			bx::write(_writer, un.texComponent);
+			bx::write(_writer, un.texDimension);
 
 			BX_TRACE("%s, %s, %d, %d, %d"
 				, un.name.c_str()

+ 2 - 0
tools/shaderc/shaderc_hlsl.cpp

@@ -736,6 +736,8 @@ namespace bgfx { namespace hlsl
 				bx::write(_writer, un.num);
 				bx::write(_writer, un.regIndex);
 				bx::write(_writer, un.regCount);
+				bx::write(_writer, un.texComponent);
+				bx::write(_writer, un.texDimension);
 
 				BX_TRACE("%s, %s, %d, %d, %d"
 					, un.name.c_str()

+ 2 - 0
tools/shaderc/shaderc_metal.cpp

@@ -602,6 +602,8 @@ namespace bgfx { namespace metal
 			bx::write(_writer, un.num);
 			bx::write(_writer, un.regIndex);
 			bx::write(_writer, un.regCount);
+			bx::write(_writer, un.texComponent);
+			bx::write(_writer, un.texDimension);
 
 			BX_TRACE("%s, %s, %d, %d, %d"
 				, un.name.c_str()

+ 80 - 8
tools/shaderc/shaderc_spirv.cpp

@@ -14,6 +14,7 @@ BX_PRAGMA_DIAGNOSTIC_IGNORED_CLANG_GCC("-Wshadow") // warning: declaration of 'u
 #include <ResourceLimits.h>
 #include <SPIRV/SPVRemapper.h>
 #include <SPIRV/GlslangToSpv.h>
+#include <webgpu/webgpu_cpp.h>
 #define SPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS
 #include <spirv_msl.hpp>
 #include <spirv_reflect.hpp>
@@ -175,6 +176,42 @@ namespace bgfx { namespace spirv
 		return true;
 	}
 
+	wgpu::TextureComponentType SpirvCrossBaseTypeToFormatType(spirv_cross::SPIRType::BaseType spirvBaseType)
+	{
+		switch (spirvBaseType)
+		{
+		case spirv_cross::SPIRType::Float:
+			return wgpu::TextureComponentType::Float;
+		case spirv_cross::SPIRType::Int:
+			return wgpu::TextureComponentType::Sint;
+		case spirv_cross::SPIRType::UInt:
+			return wgpu::TextureComponentType::Uint;
+		default:
+		    return wgpu::TextureComponentType::Float;
+		}
+	}
+
+	wgpu::TextureViewDimension SpirvDimToTextureViewDimension(spv::Dim dim, bool arrayed)
+	{
+		switch (dim)
+		{
+		case spv::Dim::Dim1D:
+			return wgpu::TextureViewDimension::e1D;
+		case spv::Dim::Dim2D:
+			return arrayed
+				? wgpu::TextureViewDimension::e2DArray
+				: wgpu::TextureViewDimension::e2D;
+		case spv::Dim::Dim3D:
+			return wgpu::TextureViewDimension::e3D;
+		case spv::Dim::DimCube:
+			return arrayed
+				? wgpu::TextureViewDimension::CubeArray
+				: wgpu::TextureViewDimension::Cube;
+		default:
+			return wgpu::TextureViewDimension::Undefined;
+		}
+	}
+
 	struct SpvReflection
 	{
 		struct TypeId
@@ -619,6 +656,8 @@ namespace bgfx { namespace spirv
 			bx::write(_writer, un.num);
 			bx::write(_writer, un.regIndex);
 			bx::write(_writer, un.regCount);
+			bx::write(_writer, un.texComponent);
+			bx::write(_writer, un.texDimension);
 
 			BX_TRACE("%s, %s, %d, %d, %d"
 				, un.name.c_str()
@@ -938,15 +977,32 @@ namespace bgfx { namespace spirv
 						if (name.size() > 7
 						&&  0 == bx::strCmp(name.c_str() + name.length() - 7, "Texture") )
 						{
-							auto uniform_name = name.substr(0, name.length() - 7);
+							std::string uniform_name = name.substr(0, name.length() - 7);
+							uint32_t binding_index = refl.get_decoration(resource.id, spv::Decoration::DecorationBinding);
+
+							auto imageType = refl.get_type(resource.base_type_id).image;
+							auto componentType = refl.get_type(imageType.type).basetype;
+
+							bool isCompareSampler = false;
+							for (auto& sampler : resourcesrefl.separate_samplers)
+							{
+								if (binding_index + 16 == refl.get_decoration(sampler.id, spv::Decoration::DecorationBinding))
+								{
+									std::string samplerName = refl.get_name(sampler.id);
+									isCompareSampler = refl.variable_is_depth_or_compare(sampler.id) || samplerName.find("Comparison") != std::string::npos;
+									break;
+								}
+							}
 
 							Uniform un;
 							un.name = uniform_name;
-							un.type = UniformType::Enum(BGFX_UNIFORM_SAMPLERBIT | UniformType::Sampler);
+							if (isCompareSampler)
+								un.type = UniformType::Enum(BGFX_UNIFORM_SAMPLERBIT | BGFX_UNIFORM_COMPAREBIT | UniformType::Sampler);
+							else
+								un.type = UniformType::Enum(BGFX_UNIFORM_SAMPLERBIT | UniformType::Sampler);
 
-							const uint32_t binding = refl.get_decoration(resource.id, spv::DecorationBinding);
-
-							uint32_t binding_index = refl.get_decoration(resource.id, spv::Decoration::DecorationBinding);
+							un.texComponent = uint8_t(SpirvCrossBaseTypeToFormatType(componentType));
+							un.texDimension = uint8_t(SpirvDimToTextureViewDimension(imageType.dim, imageType.arrayed));
 
 							un.regIndex = binding_index;
 							un.regCount = 0; // unused
@@ -963,13 +1019,24 @@ namespace bgfx { namespace spirv
 						if (name.size() > 7
 						&&  0 == bx::strCmp(name.c_str() + name.length() - 7, "Texture") )
 						{
-							auto uniform_name = name.substr(0, name.length() - 7);
+							std::string  uniform_name = name.substr(0, name.length() - 7);
 							uint32_t binding_index = refl.get_decoration(resource.id, spv::Decoration::DecorationBinding);
-							std::string sampler_name = uniform_name + "Sampler";
+
+							auto imageType = refl.get_type(resource.base_type_id).image;
+							auto componentType = refl.get_type(imageType.type).basetype;
+
+							spirv_cross::Bitset flags = refl.get_buffer_block_flags(resource.id);
+							UniformType::Enum type = flags.get(spv::DecorationNonWritable)
+								? UniformType::Enum(BGFX_UNIFORM_READONLYBIT | UniformType::End)
+								: UniformType::End;
 
 							Uniform un;
 							un.name = uniform_name;
-							un.type = UniformType::End;
+							un.type = type;
+							
+							un.texComponent = uint8_t(SpirvCrossBaseTypeToFormatType(componentType));
+							un.texDimension = uint8_t(SpirvDimToTextureViewDimension(imageType.dim, imageType.arrayed));
+
 							un.regIndex = binding_index;
 							un.regCount = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;	// for descriptor type
 
@@ -986,6 +1053,11 @@ namespace bgfx { namespace spirv
 						{
 							if (!bx::strFind(uniform.name.c_str(), name.c_str()).isEmpty())
 							{
+								spirv_cross::Bitset flags = refl.get_buffer_block_flags(resource.id);
+								UniformType::Enum type = flags.get(spv::DecorationNonWritable)
+									? UniformType::Enum(BGFX_UNIFORM_READONLYBIT | UniformType::End)
+									: UniformType::End;
+
 								uint32_t binding_index = refl.get_decoration(resource.id, spv::Decoration::DecorationBinding);
 								uniform.name = name;
 								uniform.type = UniformType::End;