Explorar el Código

Added glslang.

Branimir Karadžić hace 9 años
padre
commit
f46d6a5803
Se han modificado 100 ficheros con 26222 adiciones y 0 borrados
  1. 40 0
      3rdparty/glslang/.appveyor.yml
  2. 12 0
      3rdparty/glslang/.clang-format
  3. 17 0
      3rdparty/glslang/.gitattributes
  4. 13 0
      3rdparty/glslang/.gitignore
  5. 60 0
      3rdparty/glslang/.travis.yml
  6. 58 0
      3rdparty/glslang/CMakeLists.txt
  7. 105 0
      3rdparty/glslang/ChooseMSVCCRT.cmake
  8. 34 0
      3rdparty/glslang/External/CMakeLists.txt
  9. 11 0
      3rdparty/glslang/OGLCompilersDLL/CMakeLists.txt
  10. 155 0
      3rdparty/glslang/OGLCompilersDLL/InitializeDll.cpp
  11. 50 0
      3rdparty/glslang/OGLCompilersDLL/InitializeDll.h
  12. 137 0
      3rdparty/glslang/README-spirv-remap.txt
  13. 306 0
      3rdparty/glslang/README.md
  14. 48 0
      3rdparty/glslang/SPIRV/CMakeLists.txt
  15. 116 0
      3rdparty/glslang/SPIRV/GLSL.ext.AMD.h
  16. 36 0
      3rdparty/glslang/SPIRV/GLSL.ext.KHR.h
  17. 131 0
      3rdparty/glslang/SPIRV/GLSL.std.450.h
  18. 5131 0
      3rdparty/glslang/SPIRV/GlslangToSpv.cpp
  19. 54 0
      3rdparty/glslang/SPIRV/GlslangToSpv.h
  20. 113 0
      3rdparty/glslang/SPIRV/InReadableOrder.cpp
  21. 68 0
      3rdparty/glslang/SPIRV/Logger.cpp
  22. 74 0
      3rdparty/glslang/SPIRV/Logger.h
  23. 1311 0
      3rdparty/glslang/SPIRV/SPVRemapper.cpp
  24. 295 0
      3rdparty/glslang/SPIRV/SPVRemapper.h
  25. 2542 0
      3rdparty/glslang/SPIRV/SpvBuilder.cpp
  26. 598 0
      3rdparty/glslang/SPIRV/SpvBuilder.h
  27. 81 0
      3rdparty/glslang/SPIRV/bitutils.h
  28. 643 0
      3rdparty/glslang/SPIRV/disassemble.cpp
  29. 52 0
      3rdparty/glslang/SPIRV/disassemble.h
  30. 2804 0
      3rdparty/glslang/SPIRV/doc.cpp
  31. 260 0
      3rdparty/glslang/SPIRV/doc.h
  32. 1078 0
      3rdparty/glslang/SPIRV/hex_float.h
  33. 925 0
      3rdparty/glslang/SPIRV/spirv.hpp
  34. 400 0
      3rdparty/glslang/SPIRV/spvIR.h
  35. 49 0
      3rdparty/glslang/StandAlone/CMakeLists.txt
  36. 458 0
      3rdparty/glslang/StandAlone/ResourceLimits.cpp
  37. 57 0
      3rdparty/glslang/StandAlone/ResourceLimits.h
  38. 1082 0
      3rdparty/glslang/StandAlone/StandAlone.cpp
  39. 98 0
      3rdparty/glslang/StandAlone/Worklist.h
  40. 344 0
      3rdparty/glslang/StandAlone/spirv-remap.cpp
  41. 28 0
      3rdparty/glslang/Test/100.conf
  42. 227 0
      3rdparty/glslang/Test/100.frag
  43. 76 0
      3rdparty/glslang/Test/100Limits.vert
  44. 76 0
      3rdparty/glslang/Test/100scope.vert
  45. 74 0
      3rdparty/glslang/Test/110scope.vert
  46. 238 0
      3rdparty/glslang/Test/120.frag
  47. 203 0
      3rdparty/glslang/Test/120.vert
  48. 169 0
      3rdparty/glslang/Test/130.frag
  49. 78 0
      3rdparty/glslang/Test/130.vert
  50. 53 0
      3rdparty/glslang/Test/140.frag
  51. 59 0
      3rdparty/glslang/Test/140.vert
  52. 50 0
      3rdparty/glslang/Test/150.frag
  53. 139 0
      3rdparty/glslang/Test/150.geom
  54. 34 0
      3rdparty/glslang/Test/150.tesc
  55. 35 0
      3rdparty/glslang/Test/150.tese
  56. 25 0
      3rdparty/glslang/Test/150.vert
  57. 161 0
      3rdparty/glslang/Test/300.frag
  58. 187 0
      3rdparty/glslang/Test/300.vert
  59. 76 0
      3rdparty/glslang/Test/300BuiltIns.frag
  60. 58 0
      3rdparty/glslang/Test/300block.frag
  61. 19 0
      3rdparty/glslang/Test/300layout.frag
  62. 57 0
      3rdparty/glslang/Test/300layout.vert
  63. 8 0
      3rdparty/glslang/Test/300link.frag
  64. 11 0
      3rdparty/glslang/Test/300link2.frag
  65. 7 0
      3rdparty/glslang/Test/300link3.frag
  66. 135 0
      3rdparty/glslang/Test/300operations.frag
  67. 74 0
      3rdparty/glslang/Test/300scope.vert
  68. 240 0
      3rdparty/glslang/Test/310.comp
  69. 431 0
      3rdparty/glslang/Test/310.frag
  70. 152 0
      3rdparty/glslang/Test/310.geom
  71. 169 0
      3rdparty/glslang/Test/310.tesc
  72. 128 0
      3rdparty/glslang/Test/310.tese
  73. 403 0
      3rdparty/glslang/Test/310.vert
  74. 115 0
      3rdparty/glslang/Test/310AofA.vert
  75. 8 0
      3rdparty/glslang/Test/310implicitSizeArrayError.vert
  76. 152 0
      3rdparty/glslang/Test/330.frag
  77. 12 0
      3rdparty/glslang/Test/330comp.frag
  78. 197 0
      3rdparty/glslang/Test/400.frag
  79. 330 0
      3rdparty/glslang/Test/400.geom
  80. 105 0
      3rdparty/glslang/Test/400.tesc
  81. 105 0
      3rdparty/glslang/Test/400.tese
  82. 106 0
      3rdparty/glslang/Test/400.vert
  83. 39 0
      3rdparty/glslang/Test/410.geom
  84. 11 0
      3rdparty/glslang/Test/410.tesc
  85. 9 0
      3rdparty/glslang/Test/410.vert
  86. 30 0
      3rdparty/glslang/Test/420.comp
  87. 14 0
      3rdparty/glslang/Test/420.frag
  88. 55 0
      3rdparty/glslang/Test/420.geom
  89. 43 0
      3rdparty/glslang/Test/420.tesc
  90. 90 0
      3rdparty/glslang/Test/420.tese
  91. 161 0
      3rdparty/glslang/Test/420.vert
  92. 21 0
      3rdparty/glslang/Test/420_size_gl_in.geom
  93. 87 0
      3rdparty/glslang/Test/430.comp
  94. 223 0
      3rdparty/glslang/Test/430.vert
  95. 108 0
      3rdparty/glslang/Test/430AofA.frag
  96. 74 0
      3rdparty/glslang/Test/430scope.vert
  97. 153 0
      3rdparty/glslang/Test/440.frag
  98. 191 0
      3rdparty/glslang/Test/440.vert
  99. 1 0
      3rdparty/glslang/Test/450.comp
  100. 56 0
      3rdparty/glslang/Test/450.frag

+ 40 - 0
3rdparty/glslang/.appveyor.yml

@@ -0,0 +1,40 @@
+# Windows Build Configuration for AppVeyor
+# http://www.appveyor.com/docs/appveyor-yml
+
+# build version format
+version: "{build}"
+
+os: Visual Studio 2013
+
+platform:
+  - Any CPU
+
+configuration:
+  - Debug
+  - Release
+
+branches:
+  only:
+    - master
+
+clone_depth: 5
+
+matrix:
+  fast_finish: true # Show final status immediately if a test fails.
+
+# scripts that run after cloning repository
+install:
+  - git clone https://github.com/google/googletest.git External/googletest
+
+build:
+  parallel: true  # enable MSBuild parallel builds
+  verbosity: minimal
+
+build_script:
+  - mkdir build && cd build
+  - cmake .. -DCMAKE_INSTALL_PREFIX=install
+  - cmake --build . --config %CONFIGURATION% --target install
+
+test_script:
+  - ctest -C %CONFIGURATION% --output-on-failure
+  - cd ../Test && bash runtests

+ 12 - 0
3rdparty/glslang/.clang-format

@@ -0,0 +1,12 @@
+Language: Cpp
+IndentWidth: 4
+BreakBeforeBraces: Custom
+BraceWrapping: { AfterFunction: true, AfterControlStatement: true }
+IndentCaseLabels: false
+ReflowComments: false
+ColumnLimit: 120
+AccessModifierOffset: -4
+AlignTrailingComments: true
+AllowShortBlocksOnASingleLine: false
+AllowShortIfStatementsOnASingleLine: false
+AllowShortLoopsOnASingleLine: false

+ 17 - 0
3rdparty/glslang/.gitattributes

@@ -0,0 +1,17 @@
+# test files have a mix of lf/crlf, and that's a good thing, for testing, don't mess with it
+# bash scripts need lines ending with lf, and that's correct for Windows too, e.g., under Cygwin
+# (scripts often don't have a suffix)
+* -text
+*.sh text eof=lf
+
+# txt files should be native and normalized
+*.txt text
+
+# source code can be native and normalized, but simpler if lf everywhere; will try that way
+*.h text eof=lf
+*.c text eof=lf
+*.cpp text eof=lf
+*.y text eof=lf
+*.out text eof=lf
+*.conf text eof=lf
+*.err text eof=lf

+ 13 - 0
3rdparty/glslang/.gitignore

@@ -0,0 +1,13 @@
+*.o
+*.a
+*.so
+*.exe
+tags
+TAGS
+build/
+Test/localResults/
+Test/multiThread.out
+Test/singleThread.out
+Test/vert.spv
+Test/frag.spv
+External/googletest

+ 60 - 0
3rdparty/glslang/.travis.yml

@@ -0,0 +1,60 @@
+# Linux and Mac Build Configuration for Travis
+
+language: cpp
+
+os:
+  - linux
+  - osx
+
+# Use Ubuntu 14.04 LTS (Trusty) as the Linux testing environment.
+sudo: required
+dist: trusty
+
+env:
+  - GLSLANG_BUILD_TYPE=Release
+  - GLSLANG_BUILD_TYPE=Debug
+
+compiler:
+  - clang
+  - gcc
+
+matrix:
+  fast_finish: true # Show final status immediately if a test fails.
+  exclude:
+    # Skip GCC builds on Mac OS X.
+    - os: osx
+      compiler: gcc
+
+cache:
+  apt: true
+
+branches:
+  only:
+    - master
+
+addons:
+  apt:
+    packages:
+      - clang-3.6
+      - ninja-build
+
+install:
+  # Install ninja on Mac OS X.
+  - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then brew update && brew install ninja; fi
+  # Make sure that clang-3.6 is selected.
+  - if [[ "$TRAVIS_OS_NAME" == "linux" && "$CC" == "clang" ]]; then
+      export CC=clang-3.6 CXX=clang++-3.6;
+    fi
+
+before_script:
+  - git clone https://github.com/google/googletest.git External/googletest
+
+script:
+  - mkdir build && cd build
+  # We need to install the compiled binaries so the paths in the runtests script can resolve correctly.
+  - cmake -GNinja -DCMAKE_BUILD_TYPE=${GLSLANG_BUILD_TYPE} -DCMAKE_INSTALL_PREFIX=`pwd`/install ..
+  - ninja install
+  # Run Google-Test-based tests.
+  - ctest --output-on-failure
+  # Run runtests-based tests.
+  - cd ../Test && ./runtests

+ 58 - 0
3rdparty/glslang/CMakeLists.txt

@@ -0,0 +1,58 @@
+cmake_minimum_required(VERSION 2.8.11)
+set_property(GLOBAL PROPERTY USE_FOLDERS ON)
+
+option(ENABLE_AMD_EXTENSIONS "Enables support of AMD-specific extensions" ON)
+
+enable_testing()
+
+set(CMAKE_INSTALL_PREFIX "install" CACHE STRING "prefix")
+
+project(glslang)
+
+if(ENABLE_AMD_EXTENSIONS)
+    add_definitions(-DAMD_EXTENSIONS)
+endif(ENABLE_AMD_EXTENSIONS)
+
+if(WIN32)
+    set(CMAKE_DEBUG_POSTFIX "d")
+    include(ChooseMSVCCRT.cmake)
+    add_definitions(-DGLSLANG_OSINCLUDE_WIN32)
+elseif(UNIX)
+    add_definitions(-fPIC)
+    add_definitions(-DGLSLANG_OSINCLUDE_UNIX)
+else(WIN32)
+    message("unknown platform")
+endif(WIN32)
+
+if(CMAKE_COMPILER_IS_GNUCXX)
+    add_definitions(-Wall -Wmaybe-uninitialized -Wuninitialized -Wunused -Wunused-local-typedefs
+      -Wunused-parameter -Wunused-value  -Wunused-variable -Wunused-but-set-parameter -Wunused-but-set-variable)
+    add_definitions(-Wno-reorder)  # disable this from -Wall, since it happens all over.
+    add_definitions(-std=c++11)
+elseif(${CMAKE_CXX_COMPILER_ID} MATCHES "Clang")
+    add_definitions(-Wall -Wuninitialized -Wunused -Wunused-local-typedefs
+      -Wunused-parameter -Wunused-value  -Wunused-variable)
+    add_definitions(-Wno-reorder)  # disable this from -Wall, since it happens all over.
+    add_definitions(-std=c++11)
+endif()
+
+function(glslang_set_link_args TARGET)
+    # For MinGW compiles, statically link against the GCC and C++ runtimes.
+    # This avoids the need to ship those runtimes as DLLs.
+    if(WIN32)
+	if(${CMAKE_CXX_COMPILER_ID} MATCHES "GNU")
+	    set_target_properties(${TARGET} PROPERTIES
+		    LINK_FLAGS "-static -static-libgcc -static-libstdc++")
+        endif()
+    endif(WIN32)
+endfunction(glslang_set_link_args)
+
+# We depend on these for later projects, so they should come first.
+add_subdirectory(External)
+
+add_subdirectory(glslang)
+add_subdirectory(OGLCompilersDLL)
+add_subdirectory(StandAlone)
+add_subdirectory(SPIRV)
+add_subdirectory(hlsl)
+add_subdirectory(gtests)

+ 105 - 0
3rdparty/glslang/ChooseMSVCCRT.cmake

@@ -0,0 +1,105 @@
+# The macro choose_msvc_crt() takes a list of possible
+# C runtimes to choose from, in the form of compiler flags,
+# to present to the user. (MTd for /MTd, etc)
+#
+# The macro is invoked at the end of the file.
+#
+# CMake already sets CRT flags in the CMAKE_CXX_FLAGS_* and
+# CMAKE_C_FLAGS_* variables by default. To let the user
+# override that for each build type:
+# 1. Detect which CRT is already selected, and reflect this in
+# LLVM_USE_CRT_* so the user can have a better idea of what
+# changes they're making.
+# 2. Replace the flags in both variables with the new flag via a regex.
+# 3. set() the variables back into the cache so the changes
+# are user-visible.
+
+### Helper macros: ###
+macro(make_crt_regex regex crts)
+  set(${regex} "")
+  foreach(crt ${${crts}})
+    # Trying to match the beginning or end of the string with stuff
+    # like [ ^]+ didn't work, so use a bunch of parentheses instead.
+    set(${regex} "${${regex}}|(^| +)/${crt}($| +)")
+  endforeach(crt)
+  string(REGEX REPLACE "^\\|" "" ${regex} "${${regex}}")
+endmacro(make_crt_regex)
+
+macro(get_current_crt crt_current regex flagsvar)
+  # Find the selected-by-CMake CRT for each build type, if any.
+  # Strip off the leading slash and any whitespace.
+  string(REGEX MATCH "${${regex}}" ${crt_current} "${${flagsvar}}")
+  string(REPLACE "/" " " ${crt_current} "${${crt_current}}")
+  string(STRIP "${${crt_current}}" ${crt_current})
+endmacro(get_current_crt)
+
+# Replaces or adds a flag to a variable.
+# Expects 'flag' to be padded with spaces.
+macro(set_flag_in_var flagsvar regex flag)
+  string(REGEX MATCH "${${regex}}" current_flag "${${flagsvar}}")
+  if("${current_flag}" STREQUAL "")
+    set(${flagsvar} "${${flagsvar}}${${flag}}")
+  else()
+    string(REGEX REPLACE "${${regex}}" "${${flag}}" ${flagsvar} "${${flagsvar}}")
+  endif()
+  string(STRIP "${${flagsvar}}" ${flagsvar})
+  # Make sure this change gets reflected in the cache/gui.
+  # CMake requires the docstring parameter whenever set() touches the cache,
+  # so get the existing docstring and re-use that.
+  get_property(flagsvar_docs CACHE ${flagsvar} PROPERTY HELPSTRING)
+  set(${flagsvar} "${${flagsvar}}" CACHE STRING "${flagsvar_docs}" FORCE)
+endmacro(set_flag_in_var)
+
+
+macro(choose_msvc_crt MSVC_CRT)
+  if(LLVM_USE_CRT)
+    message(FATAL_ERROR
+      "LLVM_USE_CRT is deprecated. Use the CMAKE_BUILD_TYPE-specific
+variables (LLVM_USE_CRT_DEBUG, etc) instead.")
+  endif()
+
+  make_crt_regex(MSVC_CRT_REGEX ${MSVC_CRT})
+
+  foreach(build_type ${CMAKE_CONFIGURATION_TYPES} ${CMAKE_BUILD_TYPE})
+    string(TOUPPER "${build_type}" build)
+    if (NOT LLVM_USE_CRT_${build})
+      get_current_crt(LLVM_USE_CRT_${build}
+        MSVC_CRT_REGEX
+        CMAKE_CXX_FLAGS_${build})
+      set(LLVM_USE_CRT_${build}
+        "${LLVM_USE_CRT_${build}}"
+        CACHE STRING "Specify VC++ CRT to use for ${build_type} configurations."
+        FORCE)
+      set_property(CACHE LLVM_USE_CRT_${build}
+        PROPERTY STRINGS ;${${MSVC_CRT}})
+    endif(NOT LLVM_USE_CRT_${build})
+  endforeach(build_type)
+
+  foreach(build_type ${CMAKE_CONFIGURATION_TYPES} ${CMAKE_BUILD_TYPE})
+    string(TOUPPER "${build_type}" build)
+    if ("${LLVM_USE_CRT_${build}}" STREQUAL "")
+      set(flag_string " ")
+    else()
+      set(flag_string " /${LLVM_USE_CRT_${build}} ")
+      list(FIND ${MSVC_CRT} ${LLVM_USE_CRT_${build}} idx)
+      if (idx LESS 0)
+        message(FATAL_ERROR
+          "Invalid value for LLVM_USE_CRT_${build}: ${LLVM_USE_CRT_${build}}. Valid options are one of: ${${MSVC_CRT}}")
+      endif (idx LESS 0)
+      message(STATUS "Using ${build_type} VC++ CRT: ${LLVM_USE_CRT_${build}}")
+    endif()
+    foreach(lang C CXX)
+      set_flag_in_var(CMAKE_${lang}_FLAGS_${build} MSVC_CRT_REGEX flag_string)
+    endforeach(lang)
+  endforeach(build_type)
+endmacro(choose_msvc_crt MSVC_CRT)
+
+
+# List of valid CRTs for MSVC
+set(MSVC_CRT
+  MD
+  MDd
+  MT
+  MTd)
+
+choose_msvc_crt(MSVC_CRT)

+ 34 - 0
3rdparty/glslang/External/CMakeLists.txt

@@ -0,0 +1,34 @@
+# Suppress all warnings from external projects.
+set_property(DIRECTORY APPEND PROPERTY COMPILE_OPTIONS -w)
+
+if (TARGET gmock)
+  message(STATUS "Google Mock already configured - use it")
+elseif(IS_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/googletest)
+  # We need to make sure Google Test does not mess up with the
+  # global CRT settings on Windows.
+  if(WIN32)
+    set(gtest_force_shared_crt ON CACHE BOOL "" FORCE)
+  endif(WIN32)
+  add_subdirectory(googletest)
+  set(GTEST_TARGETS
+    gtest
+    gtest_main
+    gmock
+    gmock_main
+  )
+  foreach(target ${GTEST_TARGETS})
+    set_property(TARGET ${target} PROPERTY FOLDER gtest)
+  endforeach()
+  mark_as_advanced(gmock_build_tests
+    BUILD_GMOCK
+    BUILD_GTEST
+    BUILD_SHARED_LIBS
+    gtest_build_samples
+    gtest_build_tests
+    gtest_disable_pthreads
+    gtest_force_shared_crt
+    gtest_hide_internal_symbols)
+else()
+  message(STATUS
+    "Google Mock was not found - tests based on that will not build")
+endif()

+ 11 - 0
3rdparty/glslang/OGLCompilersDLL/CMakeLists.txt

@@ -0,0 +1,11 @@
+set(SOURCES InitializeDll.cpp InitializeDll.h)
+
+add_library(OGLCompiler STATIC ${SOURCES})
+set_property(TARGET OGLCompiler PROPERTY FOLDER glslang)
+
+if(WIN32)
+    source_group("Source" FILES ${SOURCES})
+endif(WIN32)
+
+install(TARGETS OGLCompiler 
+        ARCHIVE DESTINATION lib)

+ 155 - 0
3rdparty/glslang/OGLCompilersDLL/InitializeDll.cpp

@@ -0,0 +1,155 @@
+//
+//Copyright (C) 2002-2005  3Dlabs Inc. Ltd.
+//All rights reserved.
+//
+//Redistribution and use in source and binary forms, with or without
+//modification, are permitted provided that the following conditions
+//are met:
+//
+//    Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+//
+//    Redistributions in binary form must reproduce the above
+//    copyright notice, this list of conditions and the following
+//    disclaimer in the documentation and/or other materials provided
+//    with the distribution.
+//
+//    Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+//    contributors may be used to endorse or promote products derived
+//    from this software without specific prior written permission.
+//
+//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+//"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+//LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+//FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+//COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+//INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+//BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+//CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+//LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+//ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+//POSSIBILITY OF SUCH DAMAGE.
+//
+
+#define SH_EXPORTING
+
+#include <cassert>
+
+#include "InitializeDll.h"
+#include "../glslang/Include/InitializeGlobals.h"
+
+#include "../glslang/Public/ShaderLang.h"
+
+namespace glslang {
+
+OS_TLSIndex ThreadInitializeIndex = OS_INVALID_TLS_INDEX;
+
+bool InitProcess()
+{
+    glslang::GetGlobalLock();
+
+    if (ThreadInitializeIndex != OS_INVALID_TLS_INDEX) {
+        //
+        // Function is re-entrant.
+        //
+
+        glslang::ReleaseGlobalLock();
+        return true;
+    }
+
+    ThreadInitializeIndex = OS_AllocTLSIndex();
+
+    if (ThreadInitializeIndex == OS_INVALID_TLS_INDEX) {
+        assert(0 && "InitProcess(): Failed to allocate TLS area for init flag");
+
+        glslang::ReleaseGlobalLock();
+        return false;
+    }
+
+    if (! InitializePoolIndex()) {
+        assert(0 && "InitProcess(): Failed to initialize global pool");
+
+        glslang::ReleaseGlobalLock();
+        return false;
+    }
+
+    if (! InitThread()) {
+        assert(0 && "InitProcess(): Failed to initialize thread");
+
+        glslang::ReleaseGlobalLock();
+        return false;
+    }
+
+    glslang::ReleaseGlobalLock();
+    return true;
+}
+
+
+bool InitThread()
+{
+    //
+    // This function is re-entrant
+    //
+    if (ThreadInitializeIndex == OS_INVALID_TLS_INDEX) {
+        assert(0 && "InitThread(): Process hasn't been initalised.");
+        return false;
+    }
+
+    if (OS_GetTLSValue(ThreadInitializeIndex) != 0)
+        return true;
+
+    InitializeMemoryPools();
+
+    if (! OS_SetTLSValue(ThreadInitializeIndex, (void *)1)) {
+        assert(0 && "InitThread(): Unable to set init flag.");
+        return false;
+    }
+
+    return true;
+}
+
+
+bool DetachThread()
+{
+    bool success = true;
+
+    if (ThreadInitializeIndex == OS_INVALID_TLS_INDEX)
+        return true;
+
+    //
+    // Function is re-entrant and this thread may not have been initialized.
+    //
+    if (OS_GetTLSValue(ThreadInitializeIndex) != 0) {
+        if (!OS_SetTLSValue(ThreadInitializeIndex, (void *)0)) {
+            assert(0 && "DetachThread(): Unable to clear init flag.");
+            success = false;
+        }
+
+        FreeGlobalPools();
+
+    }
+
+    return success;
+}
+
+bool DetachProcess()
+{
+    bool success = true;
+
+    if (ThreadInitializeIndex == OS_INVALID_TLS_INDEX)
+        return true;
+
+    ShFinalize();
+
+    success = DetachThread();
+
+    FreePoolIndex();
+
+    OS_FreeTLSIndex(ThreadInitializeIndex);
+    ThreadInitializeIndex = OS_INVALID_TLS_INDEX;
+
+    return success;
+}
+
+} // end namespace glslang

+ 50 - 0
3rdparty/glslang/OGLCompilersDLL/InitializeDll.h

@@ -0,0 +1,50 @@
+//
+//Copyright (C) 2002-2005  3Dlabs Inc. Ltd.
+//All rights reserved.
+//
+//Redistribution and use in source and binary forms, with or without
+//modification, are permitted provided that the following conditions
+//are met:
+//
+//    Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+//
+//    Redistributions in binary form must reproduce the above
+//    copyright notice, this list of conditions and the following
+//    disclaimer in the documentation and/or other materials provided
+//    with the distribution.
+//
+//    Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+//    contributors may be used to endorse or promote products derived
+//    from this software without specific prior written permission.
+//
+//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+//"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+//LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+//FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+//COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+//INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+//BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+//CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+//LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+//ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+//POSSIBILITY OF SUCH DAMAGE.
+//
+#ifndef __INITIALIZEDLL_H
+#define __INITIALIZEDLL_H
+
+
+#include "../glslang/OSDependent/osinclude.h"
+
+namespace glslang {
+
+bool InitProcess();
+bool InitThread();
+bool DetachThread();
+bool DetachProcess();
+
+} // end namespace glslang
+
+#endif // __INITIALIZEDLL_H
+

+ 137 - 0
3rdparty/glslang/README-spirv-remap.txt

@@ -0,0 +1,137 @@
+
+VERSION
+--------------------------------------------------------------------------------
+spirv-remap 0.97
+
+INTRO:
+--------------------------------------------------------------------------------
+spirv-remap is a utility to improve compression of SPIR-V binary files via
+entropy reduction, plus optional stripping of debug information and
+load/store optimization.  It transforms SPIR-V to SPIR-V, remapping IDs.  The
+resulting modules have an increased ID range (IDs are not as tightly packed
+around zero), but will compress better when multiple modules are compressed
+together, since compressor's dictionary can find better cross module
+commonality.
+
+Remapping is accomplished via canonicalization.  Thus, modules can be
+compressed one at a time with no loss of quality relative to operating on
+many modules at once.  The command line tool operates on multiple modules
+only in the trivial repetition sense, for ease of use.  The remapper API
+only accepts a single module at a time.
+
+There are two modes of use: command line, and a C++11 API.  Both are
+described below.
+
+spirv-remap is currently in an alpha state.  Although there are no known
+remapping defects, it has only been exercised on one real world game shader
+workload.
+
+
+FEEDBACK
+--------------------------------------------------------------------------------
+Report defects, enhancements requests, code improvements, etc to:
+   [email protected]
+
+
+COMMAND LINE USAGE:
+--------------------------------------------------------------------------------
+Examples are given with a verbosity of one (-v), but more verbosity can be
+had via -vv, -vvv, etc, or an integer parameter to --verbose, such as
+"--verbose 4".  With no verbosity, the command is silent and returns 0 on
+success, and a positive integer error on failure.
+
+Pre-built binaries for several OSs are available.  Examples presented are
+for Linux.  Command line arguments can be provided in any order.
+
+1. Basic ID remapping
+
+Perform ID remapping on all shaders in "*.spv", writing new files with
+the same basenames to /tmp/out_dir.
+
+  spirv-remap -v --map all --input *.spv --output /tmp/out_dir
+
+2. Perform all possible size reductions
+
+  spirv-remap-linux-64 -v --do-everything --input *.spv --output /tmp/out_dir
+
+Note that --do-everything is a synonym for:
+
+  --map all --dce all --opt all --strip all
+
+API USAGE:
+--------------------------------------------------------------------------------
+
+The public interface to the remapper is defined in SPIRV/SPVRemapper.h as follows:
+
+namespace spv {
+
+class spirvbin_t
+{
+public:
+   enum Options { ... };
+   spirvbin_t(int verbose = 0);  // construct
+
+   // remap an existing binary in memory
+   void remap(std::vector<std::uint32_t>& spv, std::uint32_t opts = DO_EVERYTHING);
+
+   // Type for error/log handler functions
+   typedef std::function<void(const std::string&)> errorfn_t;
+   typedef std::function<void(const std::string&)> logfn_t;
+
+   // Register error/log handling functions (can be c/c++ fn, lambda fn, or functor)
+   static void registerErrorHandler(errorfn_t handler) { errorHandler = handler; }
+   static void registerLogHandler(logfn_t handler)     { logHandler   = handler; }
+};
+
+} // namespace spv
+
+The class definition is in SPVRemapper.cpp.
+
+remap() accepts an std::vector of SPIR-V words, modifies them per the
+request given in 'opts', and leaves the 'spv' container with the result.
+It is safe to instantiate one spirvbin_t per thread and process a different
+SPIR-V in each.
+
+The "opts" parameter to remap() accepts a bit mask of desired remapping
+options.  See REMAPPING AND OPTIMIZATION OPTIONS.
+
+On error, the function supplied to registerErrorHandler() will be invoked.
+This can be a standard C/C++ function, a lambda function, or a functor.
+The default handler simply calls exit(5); The error handler is a static
+member, so need only be set up once, not once per spirvbin_t instance.
+
+Log messages are supplied to registerLogHandler().  By default, log
+messages are eaten silently.  The log handler is also a static member.
+
+BUILD DEPENDENCIES:
+--------------------------------------------------------------------------------
+ 1. C++11 compatible compiler
+ 2. cmake
+ 3. glslang
+
+
+BUILDING
+--------------------------------------------------------------------------------
+The standalone remapper is built along side glslangValidator through its
+normal build process.
+
+
+REMAPPING AND OPTIMIZATION OPTIONS
+--------------------------------------------------------------------------------
+API:
+   These are bits defined under spv::spirvbin_t::, and can be
+   bitwise or-ed together as desired.
+
+   MAP_TYPES      = canonicalize type IDs
+   MAP_NAMES      = canonicalize named data
+   MAP_FUNCS      = canonicalize function bodies
+   DCE_FUNCS      = remove dead functions
+   DCE_VARS       = remove dead variables
+   DCE_TYPES      = remove dead types
+   OPT_LOADSTORE  = optimize unneeded load/stores
+   MAP_ALL        = (MAP_TYPES | MAP_NAMES | MAP_FUNCS)
+   DCE_ALL        = (DCE_FUNCS | DCE_VARS | DCE_TYPES)
+   OPT_ALL        = (OPT_LOADSTORE)
+   ALL_BUT_STRIP  = (MAP_ALL | DCE_ALL | OPT_ALL)
+   DO_EVERYTHING  = (STRIP | ALL_BUT_STRIP)
+

+ 306 - 0
3rdparty/glslang/README.md

@@ -0,0 +1,306 @@
+Also see the Khronos landing page for glslang as a reference front end:
+
+https://www.khronos.org/opengles/sdk/tools/Reference-Compiler/
+
+The above page includes where to get binaries, and is kept up to date
+regarding the feature level of glslang.
+
+glslang
+=======
+
+[![Build Status](https://travis-ci.org/KhronosGroup/glslang.svg?branch=master)](https://travis-ci.org/KhronosGroup/glslang)
+[![Build status](https://ci.appveyor.com/api/projects/status/q6fi9cb0qnhkla68/branch/master?svg=true)](https://ci.appveyor.com/project/Khronoswebmaster/glslang/branch/master)
+
+An OpenGL and OpenGL ES shader front end and validator.
+
+There are several components:
+
+1. A GLSL/ESSL front-end for reference validation and translation of GLSL/ESSL into an AST.
+
+2. An HLSL front-end for translation of a broad generic HLL into the AST.
+
+3. A SPIR-V back end for translating the AST to SPIR-V.
+
+4. A standalone wrapper, `glslangValidator`, that can be used as a command-line tool for the above.
+
+How to add a feature protected by a version/extension/stage/profile:  See the
+comment in `glslang/MachineIndependent/Versions.cpp`.
+
+Tasks waiting to be done are documented as GitHub issues.
+
+Execution of Standalone Wrapper
+-------------------------------
+
+To use the standalone binary form, execute `glslangValidator`, and it will print
+a usage statement.  Basic operation is to give it a file containing a shader,
+and it will print out warnings/errors and optionally an AST.
+
+The applied stage-specific rules are based on the file extension:
+* `.vert` for a vertex shader
+* `.tesc` for a tessellation control shader
+* `.tese` for a tessellation evaluation shader
+* `.geom` for a geometry shader
+* `.frag` for a fragment shader
+* `.comp` for a compute shader
+
+There is also a non-shader extension
+* `.conf` for a configuration file of limits, see usage statement for example
+
+Building
+--------
+
+### Dependencies
+
+* [CMake][cmake]: for generating compilation targets.
+* [bison][bison]: _optional_, but needed when changing the grammar (glslang.y).
+* [googletest][googletest]: _optional_, but should use if making any changes to glslang.
+
+### Build steps
+
+#### 1) Check-Out this project 
+
+```bash
+cd <parent of where you want glslang to be>
+# If using SSH
+git clone [email protected]:KhronosGroup/glslang.git
+# Or if using HTTPS
+git clone https://github.com/KhronosGroup/glslang.git
+```
+
+#### 2) Check-Out External Projects
+
+```bash
+cd <the directory glslang was cloned to, "External" will be a subdirectory>
+git clone https://github.com/google/googletest.git External/googletest
+```
+
+#### 3) Configure
+
+Assume the source directory is `$SOURCE_DIR` and
+the build directory is `$BUILD_DIR`:
+
+For building on Linux (assuming using the Ninja generator):
+
+```bash
+cd $BUILD_DIR
+
+cmake -GNinja -DCMAKE_BUILD_TYPE={Debug|Release|RelWithDebInfo} \
+      -DCMAKE_INSTALL_PREFIX=`pwd`/install $SOURCE_DIR
+```
+
+For building on Windows:
+
+```bash
+cmake $SOURCE_DIR -DCMAKE_INSTALL_PREFIX=`pwd`/install
+# The CMAKE_INSTALL_PREFIX part is for testing (explained later).
+```
+
+The CMake GUI also works for Windows (version 3.4.1 tested).
+
+#### 4) Build and Install
+
+```bash
+# for Linux:
+ninja install
+
+# for Windows:
+cmake --build . --config {Release|Debug|MinSizeRel|RelWithDebInfo} \
+      --target install
+```
+
+If using MSVC, after running CMake to configure, use the
+Configuration Manager to check the `INSTALL` project.
+
+### If you need to change the GLSL grammar
+
+The grammar in `glslang/MachineIndependent/glslang.y` has to be recompiled with
+bison if it changes, the output files are committed to the repo to avoid every
+developer needing to have bison configured to compile the project when grammar
+changes are quite infrequent. For windows you can get binaries from
+[GnuWin32][bison-gnu-win32].
+
+The command to rebuild is:
+
+```bash
+bison --defines=MachineIndependent/glslang_tab.cpp.h
+      -t MachineIndependent/glslang.y
+      -o MachineIndependent/glslang_tab.cpp
+```
+
+The above command is also available in the bash script at
+`glslang/updateGrammar`.
+
+Testing
+-------
+
+Right now, there are two test harnesses existing in glslang: one is [Google
+Test](gtests/), one is the [`runtests` script](Test/runtests). The former
+runs unit tests and single-shader single-threaded integration tests, while
+the latter runs multiple-shader linking tests and multi-threaded tests.
+
+### Running tests
+
+The [`runtests` script](Test/runtests) requires compiled binaries to be
+installed into `$BUILD_DIR/install`. Please make sure you have supplied the
+correct configuration to CMake (using `-DCMAKE_INSTALL_PREFIX`) when building;
+otherwise, you may want to modify the path in the `runtests` script.
+
+Running Google Test-backed tests:
+
+```bash
+cd $BUILD_DIR
+
+# for Linux:
+ctest
+
+# for Windows:
+ctest -C {Debug|Release|RelWithDebInfo|MinSizeRel}
+
+# or, run the test binary directly
+# (which gives more fine-grained control like filtering):
+<dir-to-glslangtests-in-build-dir>/glslangtests
+```
+
+Running `runtests` script-backed tests:
+
+```bash
+cd $SOURCE_DIR/Test && ./runtests
+```
+
+### Contributing tests
+
+Test results should always be included with a pull request that modifies
+functionality.
+
+If you are writing unit tests, please use the Google Test framework and
+place the tests under the `gtests/` directory.
+
+Integration tests are placed in the `Test/` directory. It contains test input
+and a subdirectory `baseResults/` that contains the expected results of the
+tests.  Both the tests and `baseResults/` are under source-code control.
+
+Google Test runs those integration tests by reading the test input, compiling
+them, and then compare against the expected results in `baseResults/`. The
+integration tests to run via Google Test is registered in various
+`gtests/*.FromFile.cpp` source files. `glslangtests` provides a command-line
+option `--update-mode`, which, if supplied, will overwrite the golden files
+under the `baseResults/` directory with real output from that invocation.
+For more information, please check `gtests/` directory's
+[README](gtests/README.md).
+
+For the `runtests` script, it will generate current results in the
+`localResults/` directory and `diff` them against the `baseResults/`.
+When you want to update the tracked test results, they need to be
+copied from `localResults/` to `baseResults/`.  This can be done by
+the `bump` shell script.
+
+You can add your own private list of tests, not tracked publicly, by using
+`localtestlist` to list non-tracked tests.  This is automatically read
+by `runtests` and included in the `diff` and `bump` process.
+
+Programmatic Interfaces
+-----------------------
+
+Another piece of software can programmatically translate shaders to an AST
+using one of two different interfaces:
+* A new C++ class-oriented interface, or
+* The original C functional interface
+
+The `main()` in `StandAlone/StandAlone.cpp` shows examples using both styles.
+
+### C++ Class Interface (new, preferred)
+
+This interface is in roughly the last 1/3 of `ShaderLang.h`.  It is in the
+glslang namespace and contains the following.
+
+```cxx
+const char* GetEsslVersionString();
+const char* GetGlslVersionString();
+bool InitializeProcess();
+void FinalizeProcess();
+
+class TShader
+    bool parse(...);
+    void setStrings(...);
+    const char* getInfoLog();
+
+class TProgram
+    void addShader(...);
+    bool link(...);
+    const char* getInfoLog();
+    Reflection queries
+```
+
+See `ShaderLang.h` and the usage of it in `StandAlone/StandAlone.cpp` for more
+details.
+
+### C Functional Interface (orignal)
+
+This interface is in roughly the first 2/3 of `ShaderLang.h`, and referred to
+as the `Sh*()` interface, as all the entry points start `Sh`.
+
+The `Sh*()` interface takes a "compiler" call-back object, which it calls after
+building call back that is passed the AST and can then execute a backend on it.
+
+The following is a simplified resulting run-time call stack:
+
+```c
+ShCompile(shader, compiler) -> compiler(AST) -> <back end>
+```
+
+In practice, `ShCompile()` takes shader strings, default version, and
+warning/error and other options for controlling compilation.
+
+Basic Internal Operation
+------------------------
+
+* Initial lexical analysis is done by the preprocessor in
+  `MachineIndependent/Preprocessor`, and then refined by a GLSL scanner
+  in `MachineIndependent/Scan.cpp`.  There is currently no use of flex.
+
+* Code is parsed using bison on `MachineIndependent/glslang.y` with the
+  aid of a symbol table and an AST.  The symbol table is not passed on to
+  the back-end; the intermediate representation stands on its own.
+  The tree is built by the grammar productions, many of which are
+  offloaded into `ParseHelper.cpp`, and by `Intermediate.cpp`.
+
+* The intermediate representation is very high-level, and represented
+  as an in-memory tree.   This serves to lose no information from the
+  original program, and to have efficient transfer of the result from
+  parsing to the back-end.  In the AST, constants are propogated and
+  folded, and a very small amount of dead code is eliminated.
+
+  To aid linking and reflection, the last top-level branch in the AST
+  lists all global symbols.
+
+* The primary algorithm of the back-end compiler is to traverse the
+  tree (high-level intermediate representation), and create an internal
+  object code representation.  There is an example of how to do this
+  in `MachineIndependent/intermOut.cpp`.
+
+* Reduction of the tree to a linear byte-code style low-level intermediate
+  representation is likely a good way to generate fully optimized code.
+
+* There is currently some dead old-style linker-type code still lying around.
+
+* Memory pool: parsing uses types derived from C++ `std` types, using a
+  custom allocator that puts them in a memory pool.  This makes allocation
+  of individual container/contents just few cycles and deallocation free.
+  This pool is popped after the AST is made and processed.
+
+  The use is simple: if you are going to call `new`, there are three cases:
+
+  - the object comes from the pool (its base class has the macro
+    `POOL_ALLOCATOR_NEW_DELETE` in it) and you do not have to call `delete`
+
+  - it is a `TString`, in which case call `NewPoolTString()`, which gets
+    it from the pool, and there is no corresponding `delete`
+
+  - the object does not come from the pool, and you have to do normal
+    C++ memory management of what you `new`
+
+
+[cmake]: https://cmake.org/
+[bison]: https://www.gnu.org/software/bison/
+[googletest]: https://github.com/google/googletest
+[bison-gnu-win32]: http://gnuwin32.sourceforge.net/packages/bison.htm

+ 48 - 0
3rdparty/glslang/SPIRV/CMakeLists.txt

@@ -0,0 +1,48 @@
+set(SOURCES
+    GlslangToSpv.cpp
+    InReadableOrder.cpp
+    Logger.cpp
+    SpvBuilder.cpp
+    doc.cpp
+    disassemble.cpp)
+
+set(SPVREMAP_SOURCES
+    SPVRemapper.cpp
+    doc.cpp)
+
+set(HEADERS
+    bitutils.h
+    spirv.hpp
+    GLSL.std.450.h
+    GLSL.ext.KHR.h
+    GlslangToSpv.h
+    hex_float.h
+    Logger.h
+    SpvBuilder.h
+    spvIR.h
+    doc.h
+    disassemble.h)
+
+set(SPVREMAP_HEADERS
+    SPVRemapper.h
+    doc.h)
+
+if(ENABLE_AMD_EXTENSIONS)
+    list(APPEND
+         HEADERS
+         GLSL.ext.AMD.h)
+endif(ENABLE_AMD_EXTENSIONS)
+
+add_library(SPIRV STATIC ${SOURCES} ${HEADERS})
+set_property(TARGET SPIRV PROPERTY FOLDER glslang)
+
+add_library(SPVRemapper STATIC ${SPVREMAP_SOURCES} ${SPVREMAP_HEADERS})
+set_property(TARGET SPVRemapper PROPERTY FOLDER glslang)
+
+if(WIN32)
+    source_group("Source" FILES ${SOURCES} ${HEADERS})
+    source_group("Source" FILES ${SPVREMAP_SOURCES} ${SPVREMAP_HEADERS})
+endif(WIN32)
+
+install(TARGETS SPIRV SPVRemapper
+        ARCHIVE DESTINATION lib)

+ 116 - 0
3rdparty/glslang/SPIRV/GLSL.ext.AMD.h

@@ -0,0 +1,116 @@
+/*
+** Copyright (c) 2014-2016 The Khronos Group Inc.
+**
+** Permission is hereby granted, free of charge, to any person obtaining a copy
+** of this software and/or associated documentation files (the "Materials"),
+** to deal in the Materials without restriction, including without limitation
+** the rights to use, copy, modify, merge, publish, distribute, sublicense,
+** and/or sell copies of the Materials, and to permit persons to whom the
+** Materials are furnished to do so, subject to the following conditions:
+**
+** The above copyright notice and this permission notice shall be included in
+** all copies or substantial portions of the Materials.
+**
+** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
+** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
+** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/
+**
+** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
+** IN THE MATERIALS.
+*/
+
+#ifndef GLSLextAMD_H
+#define GLSLextAMD_H
+
+enum BuiltIn;
+enum Decoration;
+enum Op;
+
+static const int GLSLextAMDVersion = 100;
+static const int GLSLextAMDRevision = 2;
+
+// SPV_AMD_shader_ballot
+static const char* const E_SPV_AMD_shader_ballot = "SPV_AMD_shader_ballot";
+
+static const Op OpGroupIAddNonUniformAMD = static_cast<Op>(5000);
+static const Op OpGroupFAddNonUniformAMD = static_cast<Op>(5001);
+static const Op OpGroupFMinNonUniformAMD = static_cast<Op>(5002);
+static const Op OpGroupUMinNonUniformAMD = static_cast<Op>(5003);
+static const Op OpGroupSMinNonUniformAMD = static_cast<Op>(5004);
+static const Op OpGroupFMaxNonUniformAMD = static_cast<Op>(5005);
+static const Op OpGroupUMaxNonUniformAMD = static_cast<Op>(5006);
+static const Op OpGroupSMaxNonUniformAMD = static_cast<Op>(5007);
+
+enum ShaderBallotAMD {
+    ShaderBallotBadAMD = 0, // Don't use
+
+    SwizzleInvocationsAMD = 1,
+    SwizzleInvocationsMaskedAMD = 2,
+    WriteInvocationAMD = 3,
+    MbcntAMD = 4,
+
+    ShaderBallotCountAMD
+};
+
+// SPV_AMD_shader_trinary_minmax
+static const char* const E_SPV_AMD_shader_trinary_minmax = "SPV_AMD_shader_trinary_minmax";
+
+enum ShaderTrinaryMinMaxAMD {
+    ShaderTrinaryMinMaxBadAMD = 0, // Don't use
+
+    FMin3AMD = 1,
+    UMin3AMD = 2,
+    SMin3AMD = 3,
+    FMax3AMD = 4,
+    UMax3AMD = 5,
+    SMax3AMD = 6,
+    FMid3AMD = 7,
+    UMid3AMD = 8,
+    SMid3AMD = 9,
+
+    ShaderTrinaryMinMaxCountAMD
+};
+
+// SPV_AMD_shader_explicit_vertex_parameter
+static const char* const E_SPV_AMD_shader_explicit_vertex_parameter = "SPV_AMD_shader_explicit_vertex_parameter";
+
+static const BuiltIn BuiltInBaryCoordNoPerspAMD           = static_cast<BuiltIn>(4992);
+static const BuiltIn BuiltInBaryCoordNoPerspCentroidAMD   = static_cast<BuiltIn>(4993);
+static const BuiltIn BuiltInBaryCoordNoPerspSampleAMD     = static_cast<BuiltIn>(4994);
+static const BuiltIn BuiltInBaryCoordSmoothAMD            = static_cast<BuiltIn>(4995);
+static const BuiltIn BuiltInBaryCoordSmoothCentroidAMD    = static_cast<BuiltIn>(4996);
+static const BuiltIn BuiltInBaryCoordSmoothSampleAMD      = static_cast<BuiltIn>(4997);
+static const BuiltIn BuiltInBaryCoordPullModelAMD         = static_cast<BuiltIn>(4998);
+
+static const Decoration DecorationExplicitInterpAMD       = static_cast<Decoration>(4999);
+
+enum ShaderExplicitVertexParameterAMD {
+    ShaderExplicitVertexParameterBadAMD = 0, // Don't use
+
+    InterpolateAtVertexAMD = 1,
+
+    ShaderExplicitVertexParameterCountAMD
+};
+
+// SPV_AMD_gcn_shader
+static const char* const E_SPV_AMD_gcn_shader = "SPV_AMD_gcn_shader";
+
+enum GcnShaderAMD {
+    GcnShaderBadAMD = 0, // Don't use
+
+    CubeFaceIndexAMD = 1,
+    CubeFaceCoordAMD = 2,
+    TimeAMD = 3,
+
+    GcnShaderCountAMD
+};
+
+// SPV_AMD_gpu_shader_half_float
+static const char* const E_SPV_AMD_gpu_shader_half_float = "SPV_AMD_gpu_shader_half_float";
+
+#endif  // #ifndef GLSLextAMD_H

+ 36 - 0
3rdparty/glslang/SPIRV/GLSL.ext.KHR.h

@@ -0,0 +1,36 @@
+/*
+** Copyright (c) 2014-2016 The Khronos Group Inc.
+**
+** Permission is hereby granted, free of charge, to any person obtaining a copy
+** of this software and/or associated documentation files (the "Materials"),
+** to deal in the Materials without restriction, including without limitation
+** the rights to use, copy, modify, merge, publish, distribute, sublicense,
+** and/or sell copies of the Materials, and to permit persons to whom the
+** Materials are furnished to do so, subject to the following conditions:
+**
+** The above copyright notice and this permission notice shall be included in
+** all copies or substantial portions of the Materials.
+**
+** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
+** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
+** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/
+**
+** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
+** IN THE MATERIALS.
+*/
+
+#ifndef GLSLextKHR_H
+#define GLSLextKHR_H
+
+// SPV_KHR_shader_ballot
+static const char* const E_SPV_KHR_shader_ballot                = "SPV_KHR_shader_ballot";
+
+// SPV_KHR_shader_draw_parameters
+static const char* const E_SPV_KHR_shader_draw_parameters       = "SPV_KHR_shader_draw_parameters";
+
+#endif  // #ifndef GLSLextKHR_H

+ 131 - 0
3rdparty/glslang/SPIRV/GLSL.std.450.h

@@ -0,0 +1,131 @@
+/*
+** Copyright (c) 2014-2016 The Khronos Group Inc.
+**
+** Permission is hereby granted, free of charge, to any person obtaining a copy
+** of this software and/or associated documentation files (the "Materials"),
+** to deal in the Materials without restriction, including without limitation
+** the rights to use, copy, modify, merge, publish, distribute, sublicense,
+** and/or sell copies of the Materials, and to permit persons to whom the
+** Materials are furnished to do so, subject to the following conditions:
+**
+** The above copyright notice and this permission notice shall be included in
+** all copies or substantial portions of the Materials.
+**
+** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
+** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
+** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/ 
+**
+** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
+** IN THE MATERIALS.
+*/
+
+#ifndef GLSLstd450_H
+#define GLSLstd450_H
+
+static const int GLSLstd450Version = 100;
+static const int GLSLstd450Revision = 1;
+
+enum GLSLstd450 {
+    GLSLstd450Bad = 0,              // Don't use
+
+    GLSLstd450Round = 1,
+    GLSLstd450RoundEven = 2,
+    GLSLstd450Trunc = 3,
+    GLSLstd450FAbs = 4,
+    GLSLstd450SAbs = 5,
+    GLSLstd450FSign = 6,
+    GLSLstd450SSign = 7,
+    GLSLstd450Floor = 8,
+    GLSLstd450Ceil = 9,
+    GLSLstd450Fract = 10,
+
+    GLSLstd450Radians = 11,
+    GLSLstd450Degrees = 12,
+    GLSLstd450Sin = 13,
+    GLSLstd450Cos = 14,
+    GLSLstd450Tan = 15,
+    GLSLstd450Asin = 16,
+    GLSLstd450Acos = 17,
+    GLSLstd450Atan = 18,
+    GLSLstd450Sinh = 19,
+    GLSLstd450Cosh = 20,
+    GLSLstd450Tanh = 21,
+    GLSLstd450Asinh = 22,
+    GLSLstd450Acosh = 23,
+    GLSLstd450Atanh = 24,
+    GLSLstd450Atan2 = 25,
+
+    GLSLstd450Pow = 26,
+    GLSLstd450Exp = 27,
+    GLSLstd450Log = 28,
+    GLSLstd450Exp2 = 29,
+    GLSLstd450Log2 = 30,
+    GLSLstd450Sqrt = 31,
+    GLSLstd450InverseSqrt = 32,
+
+    GLSLstd450Determinant = 33,
+    GLSLstd450MatrixInverse = 34,
+
+    GLSLstd450Modf = 35,            // second operand needs an OpVariable to write to
+    GLSLstd450ModfStruct = 36,      // no OpVariable operand
+    GLSLstd450FMin = 37,
+    GLSLstd450UMin = 38,
+    GLSLstd450SMin = 39,
+    GLSLstd450FMax = 40,
+    GLSLstd450UMax = 41,
+    GLSLstd450SMax = 42,
+    GLSLstd450FClamp = 43,
+    GLSLstd450UClamp = 44,
+    GLSLstd450SClamp = 45,
+    GLSLstd450FMix = 46,
+    GLSLstd450IMix = 47,            // Reserved
+    GLSLstd450Step = 48,
+    GLSLstd450SmoothStep = 49,
+
+    GLSLstd450Fma = 50,
+    GLSLstd450Frexp = 51,            // second operand needs an OpVariable to write to
+    GLSLstd450FrexpStruct = 52,      // no OpVariable operand
+    GLSLstd450Ldexp = 53,
+
+    GLSLstd450PackSnorm4x8 = 54,
+    GLSLstd450PackUnorm4x8 = 55,
+    GLSLstd450PackSnorm2x16 = 56,
+    GLSLstd450PackUnorm2x16 = 57,
+    GLSLstd450PackHalf2x16 = 58,
+    GLSLstd450PackDouble2x32 = 59,
+    GLSLstd450UnpackSnorm2x16 = 60,
+    GLSLstd450UnpackUnorm2x16 = 61,
+    GLSLstd450UnpackHalf2x16 = 62,
+    GLSLstd450UnpackSnorm4x8 = 63,
+    GLSLstd450UnpackUnorm4x8 = 64,
+    GLSLstd450UnpackDouble2x32 = 65,
+
+    GLSLstd450Length = 66,
+    GLSLstd450Distance = 67,
+    GLSLstd450Cross = 68,
+    GLSLstd450Normalize = 69,
+    GLSLstd450FaceForward = 70,
+    GLSLstd450Reflect = 71,
+    GLSLstd450Refract = 72,
+
+    GLSLstd450FindILsb = 73,
+    GLSLstd450FindSMsb = 74,
+    GLSLstd450FindUMsb = 75,
+
+    GLSLstd450InterpolateAtCentroid = 76,
+    GLSLstd450InterpolateAtSample = 77,
+    GLSLstd450InterpolateAtOffset = 78,
+
+    GLSLstd450NMin = 79,
+    GLSLstd450NMax = 80,
+    GLSLstd450NClamp = 81,
+
+    GLSLstd450Count
+};
+
+#endif  // #ifndef GLSLstd450_H

+ 5131 - 0
3rdparty/glslang/SPIRV/GlslangToSpv.cpp

@@ -0,0 +1,5131 @@
+//
+//Copyright (C) 2014-2016 LunarG, Inc.
+//Copyright (C) 2015-2016 Google, Inc.
+//
+//All rights reserved.
+//
+//Redistribution and use in source and binary forms, with or without
+//modification, are permitted provided that the following conditions
+//are met:
+//
+//    Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+//
+//    Redistributions in binary form must reproduce the above
+//    copyright notice, this list of conditions and the following
+//    disclaimer in the documentation and/or other materials provided
+//    with the distribution.
+//
+//    Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+//    contributors may be used to endorse or promote products derived
+//    from this software without specific prior written permission.
+//
+//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+//"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+//LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+//FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+//COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+//INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+//BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+//CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+//LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+//ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+//POSSIBILITY OF SUCH DAMAGE.
+
+//
+// Visit the nodes in the glslang intermediate tree representation to
+// translate them to SPIR-V.
+//
+
+#include "spirv.hpp"
+#include "GlslangToSpv.h"
+#include "SpvBuilder.h"
+namespace spv {
+    #include "GLSL.std.450.h"
+    #include "GLSL.ext.KHR.h"
+#ifdef AMD_EXTENSIONS
+    #include "GLSL.ext.AMD.h"
+#endif
+}
+
+// Glslang includes
+#include "../glslang/MachineIndependent/localintermediate.h"
+#include "../glslang/MachineIndependent/SymbolTable.h"
+#include "../glslang/Include/Common.h"
+#include "../glslang/Include/revision.h"
+
+#include <fstream>
+#include <iomanip>
+#include <list>
+#include <map>
+#include <stack>
+#include <string>
+#include <vector>
+
+namespace {
+
+// For low-order part of the generator's magic number. Bump up
+// when there is a change in the style (e.g., if SSA form changes,
+// or a different instruction sequence to do something gets used).
+const int GeneratorVersion = 1;
+
+namespace {
+class SpecConstantOpModeGuard {
+public:
+    SpecConstantOpModeGuard(spv::Builder* builder)
+        : builder_(builder) {
+        previous_flag_ = builder->isInSpecConstCodeGenMode();
+    }
+    ~SpecConstantOpModeGuard() {
+        previous_flag_ ? builder_->setToSpecConstCodeGenMode()
+                       : builder_->setToNormalCodeGenMode();
+    }
+    void turnOnSpecConstantOpMode() {
+        builder_->setToSpecConstCodeGenMode();
+    }
+
+private:
+    spv::Builder* builder_;
+    bool previous_flag_;
+};
+}
+
+//
+// The main holder of information for translating glslang to SPIR-V.
+//
+// Derives from the AST walking base class.
+//
+class TGlslangToSpvTraverser : public glslang::TIntermTraverser {
+public:
+    TGlslangToSpvTraverser(const glslang::TIntermediate*, spv::SpvBuildLogger* logger);
+    virtual ~TGlslangToSpvTraverser();
+
+    bool visitAggregate(glslang::TVisit, glslang::TIntermAggregate*);
+    bool visitBinary(glslang::TVisit, glslang::TIntermBinary*);
+    void visitConstantUnion(glslang::TIntermConstantUnion*);
+    bool visitSelection(glslang::TVisit, glslang::TIntermSelection*);
+    bool visitSwitch(glslang::TVisit, glslang::TIntermSwitch*);
+    void visitSymbol(glslang::TIntermSymbol* symbol);
+    bool visitUnary(glslang::TVisit, glslang::TIntermUnary*);
+    bool visitLoop(glslang::TVisit, glslang::TIntermLoop*);
+    bool visitBranch(glslang::TVisit visit, glslang::TIntermBranch*);
+
+    void dumpSpv(std::vector<unsigned int>& out);
+
+protected:
+    spv::Decoration TranslateInterpolationDecoration(const glslang::TQualifier& qualifier);
+    spv::Decoration TranslateAuxiliaryStorageDecoration(const glslang::TQualifier& qualifier);
+    spv::BuiltIn TranslateBuiltInDecoration(glslang::TBuiltInVariable, bool memberDeclaration);
+    spv::ImageFormat TranslateImageFormat(const glslang::TType& type);
+    spv::Id createSpvVariable(const glslang::TIntermSymbol*);
+    spv::Id getSampledType(const glslang::TSampler&);
+    spv::Id getInvertedSwizzleType(const glslang::TIntermTyped&);
+    spv::Id createInvertedSwizzle(spv::Decoration precision, const glslang::TIntermTyped&, spv::Id parentResult);
+    void convertSwizzle(const glslang::TIntermAggregate&, std::vector<unsigned>& swizzle);
+    spv::Id convertGlslangToSpvType(const glslang::TType& type);
+    spv::Id convertGlslangToSpvType(const glslang::TType& type, glslang::TLayoutPacking, const glslang::TQualifier&);
+    spv::Id convertGlslangStructToSpvType(const glslang::TType&, const glslang::TTypeList* glslangStruct,
+                                          glslang::TLayoutPacking, const glslang::TQualifier&);
+    void decorateStructType(const glslang::TType&, const glslang::TTypeList* glslangStruct, glslang::TLayoutPacking,
+                            const glslang::TQualifier&, spv::Id);
+    spv::Id makeArraySizeId(const glslang::TArraySizes&, int dim);
+    spv::Id accessChainLoad(const glslang::TType& type);
+    void    accessChainStore(const glslang::TType& type, spv::Id rvalue);
+    void multiTypeStore(const glslang::TType&, spv::Id rValue);
+    glslang::TLayoutPacking getExplicitLayout(const glslang::TType& type) const;
+    int getArrayStride(const glslang::TType& arrayType, glslang::TLayoutPacking, glslang::TLayoutMatrix);
+    int getMatrixStride(const glslang::TType& matrixType, glslang::TLayoutPacking, glslang::TLayoutMatrix);
+    void updateMemberOffset(const glslang::TType& structType, const glslang::TType& memberType, int& currentOffset, int& nextOffset, glslang::TLayoutPacking, glslang::TLayoutMatrix);
+    void declareUseOfStructMember(const glslang::TTypeList& members, int glslangMember);
+
+    bool isShaderEntryPoint(const glslang::TIntermAggregate* node);
+    void makeFunctions(const glslang::TIntermSequence&);
+    void makeGlobalInitializers(const glslang::TIntermSequence&);
+    void visitFunctions(const glslang::TIntermSequence&);
+    void handleFunctionEntry(const glslang::TIntermAggregate* node);
+    void translateArguments(const glslang::TIntermAggregate& node, std::vector<spv::Id>& arguments);
+    void translateArguments(glslang::TIntermUnary& node, std::vector<spv::Id>& arguments);
+    spv::Id createImageTextureFunctionCall(glslang::TIntermOperator* node);
+    spv::Id handleUserFunctionCall(const glslang::TIntermAggregate*);
+
+    spv::Id createBinaryOperation(glslang::TOperator op, spv::Decoration precision, spv::Decoration noContraction, spv::Id typeId, spv::Id left, spv::Id right, glslang::TBasicType typeProxy, bool reduceComparison = true);
+    spv::Id createBinaryMatrixOperation(spv::Op, spv::Decoration precision, spv::Decoration noContraction, spv::Id typeId, spv::Id left, spv::Id right);
+    spv::Id createUnaryOperation(glslang::TOperator op, spv::Decoration precision, spv::Decoration noContraction, spv::Id typeId, spv::Id operand,glslang::TBasicType typeProxy);
+    spv::Id createUnaryMatrixOperation(spv::Op op, spv::Decoration precision, spv::Decoration noContraction, spv::Id typeId, spv::Id operand,glslang::TBasicType typeProxy);
+    spv::Id createConversion(glslang::TOperator op, spv::Decoration precision, spv::Decoration noContraction, spv::Id destTypeId, spv::Id operand, glslang::TBasicType typeProxy);
+    spv::Id makeSmearedConstant(spv::Id constant, int vectorSize);
+    spv::Id createAtomicOperation(glslang::TOperator op, spv::Decoration precision, spv::Id typeId, std::vector<spv::Id>& operands, glslang::TBasicType typeProxy);
+    spv::Id createInvocationsOperation(glslang::TOperator op, spv::Id typeId, std::vector<spv::Id>& operands, glslang::TBasicType typeProxy);
+    spv::Id CreateInvocationsVectorOperation(spv::Op op, spv::Id typeId, std::vector<spv::Id>& operands);
+    spv::Id createMiscOperation(glslang::TOperator op, spv::Decoration precision, spv::Id typeId, std::vector<spv::Id>& operands, glslang::TBasicType typeProxy);
+    spv::Id createNoArgOperation(glslang::TOperator op, spv::Decoration precision, spv::Id typeId);
+    spv::Id getSymbolId(const glslang::TIntermSymbol* node);
+    void addDecoration(spv::Id id, spv::Decoration dec);
+    void addDecoration(spv::Id id, spv::Decoration dec, unsigned value);
+    void addMemberDecoration(spv::Id id, int member, spv::Decoration dec);
+    void addMemberDecoration(spv::Id id, int member, spv::Decoration dec, unsigned value);
+    spv::Id createSpvConstant(const glslang::TIntermTyped&);
+    spv::Id createSpvConstantFromConstUnionArray(const glslang::TType& type, const glslang::TConstUnionArray&, int& nextConst, bool specConstant);
+    bool isTrivialLeaf(const glslang::TIntermTyped* node);
+    bool isTrivial(const glslang::TIntermTyped* node);
+    spv::Id createShortCircuit(glslang::TOperator, glslang::TIntermTyped& left, glslang::TIntermTyped& right);
+    spv::Id getExtBuiltins(const char* name);
+
+    spv::Function* shaderEntry;
+    spv::Function* currentFunction;
+    spv::Instruction* entryPoint;
+    int sequenceDepth;
+
+    spv::SpvBuildLogger* logger;
+
+    // There is a 1:1 mapping between a spv builder and a module; this is thread safe
+    spv::Builder builder;
+    bool inMain;
+    bool mainTerminated;
+    bool linkageOnly;                  // true when visiting the set of objects in the AST present only for establishing interface, whether or not they were statically used
+    std::set<spv::Id> iOSet;           // all input/output variables from either static use or declaration of interface
+    const glslang::TIntermediate* glslangIntermediate;
+    spv::Id stdBuiltins;
+    std::unordered_map<const char*, spv::Id> extBuiltinMap;
+
+    std::unordered_map<int, spv::Id> symbolValues;
+    std::unordered_set<int> rValueParameters;  // set of formal function parameters passed as rValues, rather than a pointer
+    std::unordered_map<std::string, spv::Function*> functionMap;
+    std::unordered_map<const glslang::TTypeList*, spv::Id> structMap[glslang::ElpCount][glslang::ElmCount];
+    std::unordered_map<const glslang::TTypeList*, std::vector<int> > memberRemapper;  // for mapping glslang block indices to spv indices (e.g., due to hidden members)
+    std::stack<bool> breakForLoop;  // false means break for switch
+};
+
+//
+// Helper functions for translating glslang representations to SPIR-V enumerants.
+//
+
+// Translate glslang profile to SPIR-V source language.
+spv::SourceLanguage TranslateSourceLanguage(glslang::EShSource source, EProfile profile)
+{
+    switch (source) {
+    case glslang::EShSourceGlsl:
+        switch (profile) {
+        case ENoProfile:
+        case ECoreProfile:
+        case ECompatibilityProfile:
+            return spv::SourceLanguageGLSL;
+        case EEsProfile:
+            return spv::SourceLanguageESSL;
+        default:
+            return spv::SourceLanguageUnknown;
+        }
+    case glslang::EShSourceHlsl:
+        //Use SourceLanguageUnknown instead of SourceLanguageHLSL for now, until Vulkan knows what HLSL is
+        return spv::SourceLanguageUnknown;
+    default:
+        return spv::SourceLanguageUnknown;
+    }
+}
+
+// Translate glslang language (stage) to SPIR-V execution model.
+spv::ExecutionModel TranslateExecutionModel(EShLanguage stage)
+{
+    switch (stage) {
+    case EShLangVertex:           return spv::ExecutionModelVertex;
+    case EShLangTessControl:      return spv::ExecutionModelTessellationControl;
+    case EShLangTessEvaluation:   return spv::ExecutionModelTessellationEvaluation;
+    case EShLangGeometry:         return spv::ExecutionModelGeometry;
+    case EShLangFragment:         return spv::ExecutionModelFragment;
+    case EShLangCompute:          return spv::ExecutionModelGLCompute;
+    default:
+        assert(0);
+        return spv::ExecutionModelFragment;
+    }
+}
+
+// Translate glslang type to SPIR-V storage class.
+spv::StorageClass TranslateStorageClass(const glslang::TType& type)
+{
+    if (type.getQualifier().isPipeInput())
+        return spv::StorageClassInput;
+    else if (type.getQualifier().isPipeOutput())
+        return spv::StorageClassOutput;
+    else if (type.getBasicType() == glslang::EbtSampler)
+        return spv::StorageClassUniformConstant;
+    else if (type.getBasicType() == glslang::EbtAtomicUint)
+        return spv::StorageClassAtomicCounter;
+    else if (type.getQualifier().isUniformOrBuffer()) {
+        if (type.getQualifier().layoutPushConstant)
+            return spv::StorageClassPushConstant;
+        if (type.getBasicType() == glslang::EbtBlock)
+            return spv::StorageClassUniform;
+        else
+            return spv::StorageClassUniformConstant;
+        // TODO: how are we distinguishing between default and non-default non-writable uniforms?  Do default uniforms even exist?
+    } else {
+        switch (type.getQualifier().storage) {
+        case glslang::EvqShared:        return spv::StorageClassWorkgroup;  break;
+        case glslang::EvqGlobal:        return spv::StorageClassPrivate;
+        case glslang::EvqConstReadOnly: return spv::StorageClassFunction;
+        case glslang::EvqTemporary:     return spv::StorageClassFunction;
+        default:
+            assert(0);
+            return spv::StorageClassFunction;
+        }
+    }
+}
+
+// Translate glslang sampler type to SPIR-V dimensionality.
+spv::Dim TranslateDimensionality(const glslang::TSampler& sampler)
+{
+    switch (sampler.dim) {
+    case glslang::Esd1D:      return spv::Dim1D;
+    case glslang::Esd2D:      return spv::Dim2D;
+    case glslang::Esd3D:      return spv::Dim3D;
+    case glslang::EsdCube:    return spv::DimCube;
+    case glslang::EsdRect:    return spv::DimRect;
+    case glslang::EsdBuffer:  return spv::DimBuffer;
+    case glslang::EsdSubpass: return spv::DimSubpassData;
+    default:
+        assert(0);
+        return spv::Dim2D;
+    }
+}
+
+// Translate glslang precision to SPIR-V precision decorations.
+spv::Decoration TranslatePrecisionDecoration(glslang::TPrecisionQualifier glslangPrecision)
+{
+    switch (glslangPrecision) {
+    case glslang::EpqLow:    return spv::DecorationRelaxedPrecision;
+    case glslang::EpqMedium: return spv::DecorationRelaxedPrecision;
+    default:
+        return spv::NoPrecision;
+    }
+}
+
+// Translate glslang type to SPIR-V precision decorations.
+spv::Decoration TranslatePrecisionDecoration(const glslang::TType& type)
+{
+    return TranslatePrecisionDecoration(type.getQualifier().precision);
+}
+
+// Translate glslang type to SPIR-V block decorations.
+spv::Decoration TranslateBlockDecoration(const glslang::TType& type)
+{
+    if (type.getBasicType() == glslang::EbtBlock) {
+        switch (type.getQualifier().storage) {
+        case glslang::EvqUniform:      return spv::DecorationBlock;
+        case glslang::EvqBuffer:       return spv::DecorationBufferBlock;
+        case glslang::EvqVaryingIn:    return spv::DecorationBlock;
+        case glslang::EvqVaryingOut:   return spv::DecorationBlock;
+        default:
+            assert(0);
+            break;
+        }
+    }
+
+    return spv::DecorationMax;
+}
+
+// Translate glslang type to SPIR-V memory decorations.
+void TranslateMemoryDecoration(const glslang::TQualifier& qualifier, std::vector<spv::Decoration>& memory)
+{
+    if (qualifier.coherent)
+        memory.push_back(spv::DecorationCoherent);
+    if (qualifier.volatil)
+        memory.push_back(spv::DecorationVolatile);
+    if (qualifier.restrict)
+        memory.push_back(spv::DecorationRestrict);
+    if (qualifier.readonly)
+        memory.push_back(spv::DecorationNonWritable);
+    if (qualifier.writeonly)
+       memory.push_back(spv::DecorationNonReadable);
+}
+
+// Translate glslang type to SPIR-V layout decorations.
+spv::Decoration TranslateLayoutDecoration(const glslang::TType& type, glslang::TLayoutMatrix matrixLayout)
+{
+    if (type.isMatrix()) {
+        switch (matrixLayout) {
+        case glslang::ElmRowMajor:
+            return spv::DecorationRowMajor;
+        case glslang::ElmColumnMajor:
+            return spv::DecorationColMajor;
+        default:
+            // opaque layouts don't need a majorness
+            return spv::DecorationMax;
+        }
+    } else {
+        switch (type.getBasicType()) {
+        default:
+            return spv::DecorationMax;
+            break;
+        case glslang::EbtBlock:
+            switch (type.getQualifier().storage) {
+            case glslang::EvqUniform:
+            case glslang::EvqBuffer:
+                switch (type.getQualifier().layoutPacking) {
+                case glslang::ElpShared:  return spv::DecorationGLSLShared;
+                case glslang::ElpPacked:  return spv::DecorationGLSLPacked;
+                default:
+                    return spv::DecorationMax;
+                }
+            case glslang::EvqVaryingIn:
+            case glslang::EvqVaryingOut:
+                assert(type.getQualifier().layoutPacking == glslang::ElpNone);
+                return spv::DecorationMax;
+            default:
+                assert(0);
+                return spv::DecorationMax;
+            }
+        }
+    }
+}
+
+// Translate glslang type to SPIR-V interpolation decorations.
+// Returns spv::DecorationMax when no decoration
+// should be applied.
+spv::Decoration TGlslangToSpvTraverser::TranslateInterpolationDecoration(const glslang::TQualifier& qualifier)
+{
+    if (qualifier.smooth)
+        // Smooth decoration doesn't exist in SPIR-V 1.0
+        return spv::DecorationMax;
+    else if (qualifier.nopersp)
+        return spv::DecorationNoPerspective;
+    else if (qualifier.flat)
+        return spv::DecorationFlat;
+#ifdef AMD_EXTENSIONS
+    else if (qualifier.explicitInterp) {
+        builder.addExtension(spv::E_SPV_AMD_shader_explicit_vertex_parameter);
+        return spv::DecorationExplicitInterpAMD;
+    }
+#endif
+    else
+        return spv::DecorationMax;
+}
+
+// Translate glslang type to SPIR-V auxiliary storage decorations.
+// Returns spv::DecorationMax when no decoration
+// should be applied.
+spv::Decoration TGlslangToSpvTraverser::TranslateAuxiliaryStorageDecoration(const glslang::TQualifier& qualifier)
+{
+    if (qualifier.patch)
+        return spv::DecorationPatch;
+    else if (qualifier.centroid)
+        return spv::DecorationCentroid;
+    else if (qualifier.sample) {
+        builder.addCapability(spv::CapabilitySampleRateShading);
+        return spv::DecorationSample;
+    } else
+        return spv::DecorationMax;
+}
+
+// If glslang type is invariant, return SPIR-V invariant decoration.
+spv::Decoration TranslateInvariantDecoration(const glslang::TQualifier& qualifier)
+{
+    if (qualifier.invariant)
+        return spv::DecorationInvariant;
+    else
+        return spv::DecorationMax;
+}
+
+// If glslang type is noContraction, return SPIR-V NoContraction decoration.
+spv::Decoration TranslateNoContractionDecoration(const glslang::TQualifier& qualifier)
+{
+    if (qualifier.noContraction)
+        return spv::DecorationNoContraction;
+    else
+        return spv::DecorationMax;
+}
+
+// Translate a glslang built-in variable to a SPIR-V built in decoration.  Also generate
+// associated capabilities when required.  For some built-in variables, a capability
+// is generated only when using the variable in an executable instruction, but not when
+// just declaring a struct member variable with it.  This is true for PointSize,
+// ClipDistance, and CullDistance.
+spv::BuiltIn TGlslangToSpvTraverser::TranslateBuiltInDecoration(glslang::TBuiltInVariable builtIn, bool memberDeclaration)
+{
+    switch (builtIn) {
+    case glslang::EbvPointSize:
+        // Defer adding the capability until the built-in is actually used.
+        if (! memberDeclaration) {
+            switch (glslangIntermediate->getStage()) {
+            case EShLangGeometry:
+                builder.addCapability(spv::CapabilityGeometryPointSize);
+                break;
+            case EShLangTessControl:
+            case EShLangTessEvaluation:
+                builder.addCapability(spv::CapabilityTessellationPointSize);
+                break;
+            default:
+                break;
+            }
+        }
+        return spv::BuiltInPointSize;
+
+    // These *Distance capabilities logically belong here, but if the member is declared and
+    // then never used, consumers of SPIR-V prefer the capability not be declared.
+    // They are now generated when used, rather than here when declared.
+    // Potentially, the specification should be more clear what the minimum
+    // use needed is to trigger the capability.
+    //
+    case glslang::EbvClipDistance:
+        if (!memberDeclaration)
+        builder.addCapability(spv::CapabilityClipDistance);
+        return spv::BuiltInClipDistance;
+
+    case glslang::EbvCullDistance:
+        if (!memberDeclaration)
+        builder.addCapability(spv::CapabilityCullDistance);
+        return spv::BuiltInCullDistance;
+
+    case glslang::EbvViewportIndex:
+        builder.addCapability(spv::CapabilityMultiViewport);
+        return spv::BuiltInViewportIndex;
+
+    case glslang::EbvSampleId:
+        builder.addCapability(spv::CapabilitySampleRateShading);
+        return spv::BuiltInSampleId;
+
+    case glslang::EbvSamplePosition:
+        builder.addCapability(spv::CapabilitySampleRateShading);
+        return spv::BuiltInSamplePosition;
+
+    case glslang::EbvSampleMask:
+        builder.addCapability(spv::CapabilitySampleRateShading);
+        return spv::BuiltInSampleMask;
+
+    case glslang::EbvLayer:
+        builder.addCapability(spv::CapabilityGeometry);
+        return spv::BuiltInLayer;
+
+    case glslang::EbvPosition:             return spv::BuiltInPosition;
+    case glslang::EbvVertexId:             return spv::BuiltInVertexId;
+    case glslang::EbvInstanceId:           return spv::BuiltInInstanceId;
+    case glslang::EbvVertexIndex:          return spv::BuiltInVertexIndex;
+    case glslang::EbvInstanceIndex:        return spv::BuiltInInstanceIndex;
+
+    case glslang::EbvBaseVertex:
+        builder.addExtension(spv::E_SPV_KHR_shader_draw_parameters);
+        builder.addCapability(spv::CapabilityDrawParameters);
+        return spv::BuiltInBaseVertex;
+
+    case glslang::EbvBaseInstance:
+        builder.addExtension(spv::E_SPV_KHR_shader_draw_parameters);
+        builder.addCapability(spv::CapabilityDrawParameters);
+        return spv::BuiltInBaseInstance;
+
+    case glslang::EbvDrawId:
+        builder.addExtension(spv::E_SPV_KHR_shader_draw_parameters);
+        builder.addCapability(spv::CapabilityDrawParameters);
+        return spv::BuiltInDrawIndex;
+
+    case glslang::EbvPrimitiveId:
+        if (glslangIntermediate->getStage() == EShLangFragment)
+            builder.addCapability(spv::CapabilityGeometry);
+        return spv::BuiltInPrimitiveId;
+
+    case glslang::EbvInvocationId:         return spv::BuiltInInvocationId;
+    case glslang::EbvTessLevelInner:       return spv::BuiltInTessLevelInner;
+    case glslang::EbvTessLevelOuter:       return spv::BuiltInTessLevelOuter;
+    case glslang::EbvTessCoord:            return spv::BuiltInTessCoord;
+    case glslang::EbvPatchVertices:        return spv::BuiltInPatchVertices;
+    case glslang::EbvFragCoord:            return spv::BuiltInFragCoord;
+    case glslang::EbvPointCoord:           return spv::BuiltInPointCoord;
+    case glslang::EbvFace:                 return spv::BuiltInFrontFacing;
+    case glslang::EbvFragDepth:            return spv::BuiltInFragDepth;
+    case glslang::EbvHelperInvocation:     return spv::BuiltInHelperInvocation;
+    case glslang::EbvNumWorkGroups:        return spv::BuiltInNumWorkgroups;
+    case glslang::EbvWorkGroupSize:        return spv::BuiltInWorkgroupSize;
+    case glslang::EbvWorkGroupId:          return spv::BuiltInWorkgroupId;
+    case glslang::EbvLocalInvocationId:    return spv::BuiltInLocalInvocationId;
+    case glslang::EbvLocalInvocationIndex: return spv::BuiltInLocalInvocationIndex;
+    case glslang::EbvGlobalInvocationId:   return spv::BuiltInGlobalInvocationId;
+
+    case glslang::EbvSubGroupSize:
+        builder.addExtension(spv::E_SPV_KHR_shader_ballot);
+        builder.addCapability(spv::CapabilitySubgroupBallotKHR);
+        return spv::BuiltInSubgroupSize;
+
+    case glslang::EbvSubGroupInvocation:
+        builder.addExtension(spv::E_SPV_KHR_shader_ballot);
+        builder.addCapability(spv::CapabilitySubgroupBallotKHR);
+        return spv::BuiltInSubgroupLocalInvocationId;
+
+    case glslang::EbvSubGroupEqMask:
+        builder.addExtension(spv::E_SPV_KHR_shader_ballot);
+        builder.addCapability(spv::CapabilitySubgroupBallotKHR);
+        return spv::BuiltInSubgroupEqMaskKHR;
+
+    case glslang::EbvSubGroupGeMask:
+        builder.addExtension(spv::E_SPV_KHR_shader_ballot);
+        builder.addCapability(spv::CapabilitySubgroupBallotKHR);
+        return spv::BuiltInSubgroupGeMaskKHR;
+
+    case glslang::EbvSubGroupGtMask:
+        builder.addExtension(spv::E_SPV_KHR_shader_ballot);
+        builder.addCapability(spv::CapabilitySubgroupBallotKHR);
+        return spv::BuiltInSubgroupGtMaskKHR;
+
+    case glslang::EbvSubGroupLeMask:
+        builder.addExtension(spv::E_SPV_KHR_shader_ballot);
+        builder.addCapability(spv::CapabilitySubgroupBallotKHR);
+        return spv::BuiltInSubgroupLeMaskKHR;
+
+    case glslang::EbvSubGroupLtMask:
+        builder.addExtension(spv::E_SPV_KHR_shader_ballot);
+        builder.addCapability(spv::CapabilitySubgroupBallotKHR);
+        return spv::BuiltInSubgroupLtMaskKHR;
+
+#ifdef AMD_EXTENSIONS
+    case glslang::EbvBaryCoordNoPersp:
+        builder.addExtension(spv::E_SPV_AMD_shader_explicit_vertex_parameter);
+        return spv::BuiltInBaryCoordNoPerspAMD;
+
+    case glslang::EbvBaryCoordNoPerspCentroid:
+        builder.addExtension(spv::E_SPV_AMD_shader_explicit_vertex_parameter);
+        return spv::BuiltInBaryCoordNoPerspCentroidAMD;
+
+    case glslang::EbvBaryCoordNoPerspSample:
+        builder.addExtension(spv::E_SPV_AMD_shader_explicit_vertex_parameter);
+        return spv::BuiltInBaryCoordNoPerspSampleAMD;
+
+    case glslang::EbvBaryCoordSmooth:
+        builder.addExtension(spv::E_SPV_AMD_shader_explicit_vertex_parameter);
+        return spv::BuiltInBaryCoordSmoothAMD;
+
+    case glslang::EbvBaryCoordSmoothCentroid:
+        builder.addExtension(spv::E_SPV_AMD_shader_explicit_vertex_parameter);
+        return spv::BuiltInBaryCoordSmoothCentroidAMD;
+
+    case glslang::EbvBaryCoordSmoothSample:
+        builder.addExtension(spv::E_SPV_AMD_shader_explicit_vertex_parameter);
+        return spv::BuiltInBaryCoordSmoothSampleAMD;
+
+    case glslang::EbvBaryCoordPullModel:
+        builder.addExtension(spv::E_SPV_AMD_shader_explicit_vertex_parameter);
+        return spv::BuiltInBaryCoordPullModelAMD;
+#endif
+    default:                               return spv::BuiltInMax;
+    }
+}
+
+// Translate glslang image layout format to SPIR-V image format.
+spv::ImageFormat TGlslangToSpvTraverser::TranslateImageFormat(const glslang::TType& type)
+{
+    assert(type.getBasicType() == glslang::EbtSampler);
+
+    // Check for capabilities
+    switch (type.getQualifier().layoutFormat) {
+    case glslang::ElfRg32f:
+    case glslang::ElfRg16f:
+    case glslang::ElfR11fG11fB10f:
+    case glslang::ElfR16f:
+    case glslang::ElfRgba16:
+    case glslang::ElfRgb10A2:
+    case glslang::ElfRg16:
+    case glslang::ElfRg8:
+    case glslang::ElfR16:
+    case glslang::ElfR8:
+    case glslang::ElfRgba16Snorm:
+    case glslang::ElfRg16Snorm:
+    case glslang::ElfRg8Snorm:
+    case glslang::ElfR16Snorm:
+    case glslang::ElfR8Snorm:
+
+    case glslang::ElfRg32i:
+    case glslang::ElfRg16i:
+    case glslang::ElfRg8i:
+    case glslang::ElfR16i:
+    case glslang::ElfR8i:
+
+    case glslang::ElfRgb10a2ui:
+    case glslang::ElfRg32ui:
+    case glslang::ElfRg16ui:
+    case glslang::ElfRg8ui:
+    case glslang::ElfR16ui:
+    case glslang::ElfR8ui:
+        builder.addCapability(spv::CapabilityStorageImageExtendedFormats);
+        break;
+
+    default:
+        break;
+    }
+
+    // do the translation
+    switch (type.getQualifier().layoutFormat) {
+    case glslang::ElfNone:          return spv::ImageFormatUnknown;
+    case glslang::ElfRgba32f:       return spv::ImageFormatRgba32f;
+    case glslang::ElfRgba16f:       return spv::ImageFormatRgba16f;
+    case glslang::ElfR32f:          return spv::ImageFormatR32f;
+    case glslang::ElfRgba8:         return spv::ImageFormatRgba8;
+    case glslang::ElfRgba8Snorm:    return spv::ImageFormatRgba8Snorm;
+    case glslang::ElfRg32f:         return spv::ImageFormatRg32f;
+    case glslang::ElfRg16f:         return spv::ImageFormatRg16f;
+    case glslang::ElfR11fG11fB10f:  return spv::ImageFormatR11fG11fB10f;
+    case glslang::ElfR16f:          return spv::ImageFormatR16f;
+    case glslang::ElfRgba16:        return spv::ImageFormatRgba16;
+    case glslang::ElfRgb10A2:       return spv::ImageFormatRgb10A2;
+    case glslang::ElfRg16:          return spv::ImageFormatRg16;
+    case glslang::ElfRg8:           return spv::ImageFormatRg8;
+    case glslang::ElfR16:           return spv::ImageFormatR16;
+    case glslang::ElfR8:            return spv::ImageFormatR8;
+    case glslang::ElfRgba16Snorm:   return spv::ImageFormatRgba16Snorm;
+    case glslang::ElfRg16Snorm:     return spv::ImageFormatRg16Snorm;
+    case glslang::ElfRg8Snorm:      return spv::ImageFormatRg8Snorm;
+    case glslang::ElfR16Snorm:      return spv::ImageFormatR16Snorm;
+    case glslang::ElfR8Snorm:       return spv::ImageFormatR8Snorm;
+    case glslang::ElfRgba32i:       return spv::ImageFormatRgba32i;
+    case glslang::ElfRgba16i:       return spv::ImageFormatRgba16i;
+    case glslang::ElfRgba8i:        return spv::ImageFormatRgba8i;
+    case glslang::ElfR32i:          return spv::ImageFormatR32i;
+    case glslang::ElfRg32i:         return spv::ImageFormatRg32i;
+    case glslang::ElfRg16i:         return spv::ImageFormatRg16i;
+    case glslang::ElfRg8i:          return spv::ImageFormatRg8i;
+    case glslang::ElfR16i:          return spv::ImageFormatR16i;
+    case glslang::ElfR8i:           return spv::ImageFormatR8i;
+    case glslang::ElfRgba32ui:      return spv::ImageFormatRgba32ui;
+    case glslang::ElfRgba16ui:      return spv::ImageFormatRgba16ui;
+    case glslang::ElfRgba8ui:       return spv::ImageFormatRgba8ui;
+    case glslang::ElfR32ui:         return spv::ImageFormatR32ui;
+    case glslang::ElfRg32ui:        return spv::ImageFormatRg32ui;
+    case glslang::ElfRg16ui:        return spv::ImageFormatRg16ui;
+    case glslang::ElfRgb10a2ui:     return spv::ImageFormatRgb10a2ui;
+    case glslang::ElfRg8ui:         return spv::ImageFormatRg8ui;
+    case glslang::ElfR16ui:         return spv::ImageFormatR16ui;
+    case glslang::ElfR8ui:          return spv::ImageFormatR8ui;
+    default:                        return spv::ImageFormatMax;
+    }
+}
+
+// Return whether or not the given type is something that should be tied to a
+// descriptor set.
+bool IsDescriptorResource(const glslang::TType& type)
+{
+    // uniform and buffer blocks are included, unless it is a push_constant
+    if (type.getBasicType() == glslang::EbtBlock)
+        return type.getQualifier().isUniformOrBuffer() && ! type.getQualifier().layoutPushConstant;
+
+    // non block...
+    // basically samplerXXX/subpass/sampler/texture are all included
+    // if they are the global-scope-class, not the function parameter
+    // (or local, if they ever exist) class.
+    if (type.getBasicType() == glslang::EbtSampler)
+        return type.getQualifier().isUniformOrBuffer();
+
+    // None of the above.
+    return false;
+}
+
+void InheritQualifiers(glslang::TQualifier& child, const glslang::TQualifier& parent)
+{
+    if (child.layoutMatrix == glslang::ElmNone)
+        child.layoutMatrix = parent.layoutMatrix;
+
+    if (parent.invariant)
+        child.invariant = true;
+    if (parent.nopersp)
+        child.nopersp = true;
+#ifdef AMD_EXTENSIONS
+    if (parent.explicitInterp)
+        child.explicitInterp = true;
+#endif
+    if (parent.flat)
+        child.flat = true;
+    if (parent.centroid)
+        child.centroid = true;
+    if (parent.patch)
+        child.patch = true;
+    if (parent.sample)
+        child.sample = true;
+    if (parent.coherent)
+        child.coherent = true;
+    if (parent.volatil)
+        child.volatil = true;
+    if (parent.restrict)
+        child.restrict = true;
+    if (parent.readonly)
+        child.readonly = true;
+    if (parent.writeonly)
+        child.writeonly = true;
+}
+
+bool HasNonLayoutQualifiers(const glslang::TType& type, const glslang::TQualifier& qualifier)
+{
+    // This should list qualifiers that simultaneous satisfy:
+    // - struct members might inherit from a struct declaration
+    //     (note that non-block structs don't explicitly inherit,
+    //      only implicitly, meaning no decoration involved)
+    // - affect decorations on the struct members
+    //     (note smooth does not, and expecting something like volatile
+    //      to effect the whole object)
+    // - are not part of the offset/st430/etc or row/column-major layout
+    return qualifier.invariant || (qualifier.hasLocation() && type.getBasicType() == glslang::EbtBlock);
+}
+
+//
+// Implement the TGlslangToSpvTraverser class.
+//
+
+TGlslangToSpvTraverser::TGlslangToSpvTraverser(const glslang::TIntermediate* glslangIntermediate, spv::SpvBuildLogger* buildLogger)
+    : TIntermTraverser(true, false, true), shaderEntry(nullptr), currentFunction(nullptr),
+      sequenceDepth(0), logger(buildLogger),
+      builder((glslang::GetKhronosToolId() << 16) | GeneratorVersion, logger),
+      inMain(false), mainTerminated(false), linkageOnly(false),
+      glslangIntermediate(glslangIntermediate)
+{
+    spv::ExecutionModel executionModel = TranslateExecutionModel(glslangIntermediate->getStage());
+
+    builder.clearAccessChain();
+    builder.setSource(TranslateSourceLanguage(glslangIntermediate->getSource(), glslangIntermediate->getProfile()), glslangIntermediate->getVersion());
+    stdBuiltins = builder.import("GLSL.std.450");
+    builder.setMemoryModel(spv::AddressingModelLogical, spv::MemoryModelGLSL450);
+    shaderEntry = builder.makeEntryPoint(glslangIntermediate->getEntryPointName().c_str());
+    entryPoint = builder.addEntryPoint(executionModel, shaderEntry, glslangIntermediate->getEntryPointName().c_str());
+
+    // Add the source extensions
+    const auto& sourceExtensions = glslangIntermediate->getRequestedExtensions();
+    for (auto it = sourceExtensions.begin(); it != sourceExtensions.end(); ++it)
+        builder.addSourceExtension(it->c_str());
+
+    // Add the top-level modes for this shader.
+
+    if (glslangIntermediate->getXfbMode()) {
+        builder.addCapability(spv::CapabilityTransformFeedback);
+        builder.addExecutionMode(shaderEntry, spv::ExecutionModeXfb);
+    }
+
+    unsigned int mode;
+    switch (glslangIntermediate->getStage()) {
+    case EShLangVertex:
+        builder.addCapability(spv::CapabilityShader);
+        break;
+
+    case EShLangTessControl:
+        builder.addCapability(spv::CapabilityTessellation);
+        builder.addExecutionMode(shaderEntry, spv::ExecutionModeOutputVertices, glslangIntermediate->getVertices());
+        break;
+
+    case EShLangTessEvaluation:
+        builder.addCapability(spv::CapabilityTessellation);
+        switch (glslangIntermediate->getInputPrimitive()) {
+        case glslang::ElgTriangles:           mode = spv::ExecutionModeTriangles;     break;
+        case glslang::ElgQuads:               mode = spv::ExecutionModeQuads;         break;
+        case glslang::ElgIsolines:            mode = spv::ExecutionModeIsolines;      break;
+        default:                              mode = spv::ExecutionModeMax;           break;
+        }
+        if (mode != spv::ExecutionModeMax)
+            builder.addExecutionMode(shaderEntry, (spv::ExecutionMode)mode);
+
+        switch (glslangIntermediate->getVertexSpacing()) {
+        case glslang::EvsEqual:            mode = spv::ExecutionModeSpacingEqual;          break;
+        case glslang::EvsFractionalEven:   mode = spv::ExecutionModeSpacingFractionalEven; break;
+        case glslang::EvsFractionalOdd:    mode = spv::ExecutionModeSpacingFractionalOdd;  break;
+        default:                           mode = spv::ExecutionModeMax;                   break;
+        }
+        if (mode != spv::ExecutionModeMax)
+            builder.addExecutionMode(shaderEntry, (spv::ExecutionMode)mode);
+
+        switch (glslangIntermediate->getVertexOrder()) {
+        case glslang::EvoCw:     mode = spv::ExecutionModeVertexOrderCw;  break;
+        case glslang::EvoCcw:    mode = spv::ExecutionModeVertexOrderCcw; break;
+        default:                 mode = spv::ExecutionModeMax;            break;
+        }
+        if (mode != spv::ExecutionModeMax)
+            builder.addExecutionMode(shaderEntry, (spv::ExecutionMode)mode);
+
+        if (glslangIntermediate->getPointMode())
+            builder.addExecutionMode(shaderEntry, spv::ExecutionModePointMode);
+        break;
+
+    case EShLangGeometry:
+        builder.addCapability(spv::CapabilityGeometry);
+        switch (glslangIntermediate->getInputPrimitive()) {
+        case glslang::ElgPoints:             mode = spv::ExecutionModeInputPoints;             break;
+        case glslang::ElgLines:              mode = spv::ExecutionModeInputLines;              break;
+        case glslang::ElgLinesAdjacency:     mode = spv::ExecutionModeInputLinesAdjacency;     break;
+        case glslang::ElgTriangles:          mode = spv::ExecutionModeTriangles;               break;
+        case glslang::ElgTrianglesAdjacency: mode = spv::ExecutionModeInputTrianglesAdjacency; break;
+        default:                             mode = spv::ExecutionModeMax;                     break;
+        }
+        if (mode != spv::ExecutionModeMax)
+            builder.addExecutionMode(shaderEntry, (spv::ExecutionMode)mode);
+
+        builder.addExecutionMode(shaderEntry, spv::ExecutionModeInvocations, glslangIntermediate->getInvocations());
+
+        switch (glslangIntermediate->getOutputPrimitive()) {
+        case glslang::ElgPoints:        mode = spv::ExecutionModeOutputPoints;                 break;
+        case glslang::ElgLineStrip:     mode = spv::ExecutionModeOutputLineStrip;              break;
+        case glslang::ElgTriangleStrip: mode = spv::ExecutionModeOutputTriangleStrip;          break;
+        default:                        mode = spv::ExecutionModeMax;                          break;
+        }
+        if (mode != spv::ExecutionModeMax)
+            builder.addExecutionMode(shaderEntry, (spv::ExecutionMode)mode);
+        builder.addExecutionMode(shaderEntry, spv::ExecutionModeOutputVertices, glslangIntermediate->getVertices());
+        break;
+
+    case EShLangFragment:
+        builder.addCapability(spv::CapabilityShader);
+        if (glslangIntermediate->getPixelCenterInteger())
+            builder.addExecutionMode(shaderEntry, spv::ExecutionModePixelCenterInteger);
+
+        if (glslangIntermediate->getOriginUpperLeft())
+            builder.addExecutionMode(shaderEntry, spv::ExecutionModeOriginUpperLeft);
+        else
+            builder.addExecutionMode(shaderEntry, spv::ExecutionModeOriginLowerLeft);
+
+        if (glslangIntermediate->getEarlyFragmentTests())
+            builder.addExecutionMode(shaderEntry, spv::ExecutionModeEarlyFragmentTests);
+
+        switch(glslangIntermediate->getDepth()) {
+        case glslang::EldGreater:  mode = spv::ExecutionModeDepthGreater; break;
+        case glslang::EldLess:     mode = spv::ExecutionModeDepthLess;    break;
+        default:                   mode = spv::ExecutionModeMax;          break;
+        }
+        if (mode != spv::ExecutionModeMax)
+            builder.addExecutionMode(shaderEntry, (spv::ExecutionMode)mode);
+
+        if (glslangIntermediate->getDepth() != glslang::EldUnchanged && glslangIntermediate->isDepthReplacing())
+            builder.addExecutionMode(shaderEntry, spv::ExecutionModeDepthReplacing);
+        break;
+
+    case EShLangCompute:
+        builder.addCapability(spv::CapabilityShader);
+        builder.addExecutionMode(shaderEntry, spv::ExecutionModeLocalSize, glslangIntermediate->getLocalSize(0),
+                                                                           glslangIntermediate->getLocalSize(1),
+                                                                           glslangIntermediate->getLocalSize(2));
+        break;
+
+    default:
+        break;
+    }
+
+}
+
+// Finish everything and dump
+void TGlslangToSpvTraverser::dumpSpv(std::vector<unsigned int>& out)
+{
+    // finish off the entry-point SPV instruction by adding the Input/Output <id>
+    for (auto it = iOSet.cbegin(); it != iOSet.cend(); ++it)
+        entryPoint->addIdOperand(*it);
+
+    builder.eliminateDeadDecorations();
+    builder.dump(out);
+}
+
+TGlslangToSpvTraverser::~TGlslangToSpvTraverser()
+{
+    if (! mainTerminated) {
+        spv::Block* lastMainBlock = shaderEntry->getLastBlock();
+        builder.setBuildPoint(lastMainBlock);
+        builder.leaveFunction();
+    }
+}
+
+//
+// Implement the traversal functions.
+//
+// Return true from interior nodes to have the external traversal
+// continue on to children.  Return false if children were
+// already processed.
+//
+
+//
+// Symbols can turn into
+//  - uniform/input reads
+//  - output writes
+//  - complex lvalue base setups:  foo.bar[3]....  , where we see foo and start up an access chain
+//  - something simple that degenerates into the last bullet
+//
+void TGlslangToSpvTraverser::visitSymbol(glslang::TIntermSymbol* symbol)
+{
+    SpecConstantOpModeGuard spec_constant_op_mode_setter(&builder);
+    if (symbol->getType().getQualifier().isSpecConstant())
+        spec_constant_op_mode_setter.turnOnSpecConstantOpMode();
+
+    // getSymbolId() will set up all the IO decorations on the first call.
+    // Formal function parameters were mapped during makeFunctions().
+    spv::Id id = getSymbolId(symbol);
+
+    // Include all "static use" and "linkage only" interface variables on the OpEntryPoint instruction
+    if (builder.isPointer(id)) {
+        spv::StorageClass sc = builder.getStorageClass(id);
+        if (sc == spv::StorageClassInput || sc == spv::StorageClassOutput)
+            iOSet.insert(id);
+    }
+
+    // Only process non-linkage-only nodes for generating actual static uses
+    if (! linkageOnly || symbol->getQualifier().isSpecConstant()) {
+        // Prepare to generate code for the access
+
+        // L-value chains will be computed left to right.  We're on the symbol now,
+        // which is the left-most part of the access chain, so now is "clear" time,
+        // followed by setting the base.
+        builder.clearAccessChain();
+
+        // For now, we consider all user variables as being in memory, so they are pointers,
+        // except for
+        // A) R-Value arguments to a function, which are an intermediate object.
+        //    See comments in handleUserFunctionCall().
+        // B) Specialization constants (normal constants don't even come in as a variable),
+        //    These are also pure R-values.
+        glslang::TQualifier qualifier = symbol->getQualifier();
+        if (qualifier.isSpecConstant() || rValueParameters.find(symbol->getId()) != rValueParameters.end())
+            builder.setAccessChainRValue(id);
+        else
+            builder.setAccessChainLValue(id);
+    }
+}
+
+bool TGlslangToSpvTraverser::visitBinary(glslang::TVisit /* visit */, glslang::TIntermBinary* node)
+{
+    SpecConstantOpModeGuard spec_constant_op_mode_setter(&builder);
+    if (node->getType().getQualifier().isSpecConstant())
+        spec_constant_op_mode_setter.turnOnSpecConstantOpMode();
+
+    // First, handle special cases
+    switch (node->getOp()) {
+    case glslang::EOpAssign:
+    case glslang::EOpAddAssign:
+    case glslang::EOpSubAssign:
+    case glslang::EOpMulAssign:
+    case glslang::EOpVectorTimesMatrixAssign:
+    case glslang::EOpVectorTimesScalarAssign:
+    case glslang::EOpMatrixTimesScalarAssign:
+    case glslang::EOpMatrixTimesMatrixAssign:
+    case glslang::EOpDivAssign:
+    case glslang::EOpModAssign:
+    case glslang::EOpAndAssign:
+    case glslang::EOpInclusiveOrAssign:
+    case glslang::EOpExclusiveOrAssign:
+    case glslang::EOpLeftShiftAssign:
+    case glslang::EOpRightShiftAssign:
+        // A bin-op assign "a += b" means the same thing as "a = a + b"
+        // where a is evaluated before b. For a simple assignment, GLSL
+        // says to evaluate the left before the right.  So, always, left
+        // node then right node.
+        {
+            // get the left l-value, save it away
+            builder.clearAccessChain();
+            node->getLeft()->traverse(this);
+            spv::Builder::AccessChain lValue = builder.getAccessChain();
+
+            // evaluate the right
+            builder.clearAccessChain();
+            node->getRight()->traverse(this);
+            spv::Id rValue = accessChainLoad(node->getRight()->getType());
+
+            if (node->getOp() != glslang::EOpAssign) {
+                // the left is also an r-value
+                builder.setAccessChain(lValue);
+                spv::Id leftRValue = accessChainLoad(node->getLeft()->getType());
+
+                // do the operation
+                rValue = createBinaryOperation(node->getOp(), TranslatePrecisionDecoration(node->getOperationPrecision()),
+                                               TranslateNoContractionDecoration(node->getType().getQualifier()),
+                                               convertGlslangToSpvType(node->getType()), leftRValue, rValue,
+                                               node->getType().getBasicType());
+
+                // these all need their counterparts in createBinaryOperation()
+                assert(rValue != spv::NoResult);
+            }
+
+            // store the result
+            builder.setAccessChain(lValue);
+            multiTypeStore(node->getType(), rValue);
+
+            // assignments are expressions having an rValue after they are evaluated...
+            builder.clearAccessChain();
+            builder.setAccessChainRValue(rValue);
+        }
+        return false;
+    case glslang::EOpIndexDirect:
+    case glslang::EOpIndexDirectStruct:
+        {
+            // Get the left part of the access chain.
+            node->getLeft()->traverse(this);
+
+            // Add the next element in the chain
+
+            const int glslangIndex = node->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst();
+            if (! node->getLeft()->getType().isArray() &&
+                node->getLeft()->getType().isVector() &&
+                node->getOp() == glslang::EOpIndexDirect) {
+                // This is essentially a hard-coded vector swizzle of size 1,
+                // so short circuit the access-chain stuff with a swizzle.
+                std::vector<unsigned> swizzle;
+                swizzle.push_back(glslangIndex);
+                builder.accessChainPushSwizzle(swizzle, convertGlslangToSpvType(node->getLeft()->getType()));
+            } else {
+                int spvIndex = glslangIndex;
+                if (node->getLeft()->getBasicType() == glslang::EbtBlock &&
+                    node->getOp() == glslang::EOpIndexDirectStruct)
+                {
+                    // This may be, e.g., an anonymous block-member selection, which generally need
+                    // index remapping due to hidden members in anonymous blocks.
+                    std::vector<int>& remapper = memberRemapper[node->getLeft()->getType().getStruct()];
+                    assert(remapper.size() > 0);
+                    spvIndex = remapper[glslangIndex];
+                }
+
+                // normal case for indexing array or structure or block
+                builder.accessChainPush(builder.makeIntConstant(spvIndex));
+
+                // Add capabilities here for accessing PointSize and clip/cull distance.
+                // We have deferred generation of associated capabilities until now.
+                if (node->getLeft()->getType().isStruct() && ! node->getLeft()->getType().isArray())
+                    declareUseOfStructMember(*(node->getLeft()->getType().getStruct()), glslangIndex);
+            }
+        }
+        return false;
+    case glslang::EOpIndexIndirect:
+        {
+            // Structure or array or vector indirection.
+            // Will use native SPIR-V access-chain for struct and array indirection;
+            // matrices are arrays of vectors, so will also work for a matrix.
+            // Will use the access chain's 'component' for variable index into a vector.
+
+            // This adapter is building access chains left to right.
+            // Set up the access chain to the left.
+            node->getLeft()->traverse(this);
+
+            // save it so that computing the right side doesn't trash it
+            spv::Builder::AccessChain partial = builder.getAccessChain();
+
+            // compute the next index in the chain
+            builder.clearAccessChain();
+            node->getRight()->traverse(this);
+            spv::Id index = accessChainLoad(node->getRight()->getType());
+
+            // restore the saved access chain
+            builder.setAccessChain(partial);
+
+            if (! node->getLeft()->getType().isArray() && node->getLeft()->getType().isVector())
+                builder.accessChainPushComponent(index, convertGlslangToSpvType(node->getLeft()->getType()));
+            else
+                builder.accessChainPush(index);
+        }
+        return false;
+    case glslang::EOpVectorSwizzle:
+        {
+            node->getLeft()->traverse(this);
+            std::vector<unsigned> swizzle;
+            convertSwizzle(*node->getRight()->getAsAggregate(), swizzle);
+            builder.accessChainPushSwizzle(swizzle, convertGlslangToSpvType(node->getLeft()->getType()));
+        }
+        return false;
+    case glslang::EOpLogicalOr:
+    case glslang::EOpLogicalAnd:
+        {
+
+            // These may require short circuiting, but can sometimes be done as straight
+            // binary operations.  The right operand must be short circuited if it has
+            // side effects, and should probably be if it is complex.
+            if (isTrivial(node->getRight()->getAsTyped()))
+                break; // handle below as a normal binary operation
+            // otherwise, we need to do dynamic short circuiting on the right operand
+            spv::Id result = createShortCircuit(node->getOp(), *node->getLeft()->getAsTyped(), *node->getRight()->getAsTyped());
+            builder.clearAccessChain();
+            builder.setAccessChainRValue(result);
+        }
+        return false;
+    default:
+        break;
+    }
+
+    // Assume generic binary op...
+
+    // get right operand
+    builder.clearAccessChain();
+    node->getLeft()->traverse(this);
+    spv::Id left = accessChainLoad(node->getLeft()->getType());
+
+    // get left operand
+    builder.clearAccessChain();
+    node->getRight()->traverse(this);
+    spv::Id right = accessChainLoad(node->getRight()->getType());
+
+    // get result
+    spv::Id result = createBinaryOperation(node->getOp(), TranslatePrecisionDecoration(node->getOperationPrecision()),
+                                           TranslateNoContractionDecoration(node->getType().getQualifier()),
+                                           convertGlslangToSpvType(node->getType()), left, right,
+                                           node->getLeft()->getType().getBasicType());
+
+    builder.clearAccessChain();
+    if (! result) {
+        logger->missingFunctionality("unknown glslang binary operation");
+        return true;  // pick up a child as the place-holder result
+    } else {
+        builder.setAccessChainRValue(result);
+        return false;
+    }
+}
+
+bool TGlslangToSpvTraverser::visitUnary(glslang::TVisit /* visit */, glslang::TIntermUnary* node)
+{
+    SpecConstantOpModeGuard spec_constant_op_mode_setter(&builder);
+    if (node->getType().getQualifier().isSpecConstant())
+        spec_constant_op_mode_setter.turnOnSpecConstantOpMode();
+
+    spv::Id result = spv::NoResult;
+
+    // try texturing first
+    result = createImageTextureFunctionCall(node);
+    if (result != spv::NoResult) {
+        builder.clearAccessChain();
+        builder.setAccessChainRValue(result);
+
+        return false; // done with this node
+    }
+
+    // Non-texturing.
+
+    if (node->getOp() == glslang::EOpArrayLength) {
+        // Quite special; won't want to evaluate the operand.
+
+        // Normal .length() would have been constant folded by the front-end.
+        // So, this has to be block.lastMember.length().
+        // SPV wants "block" and member number as the operands, go get them.
+        assert(node->getOperand()->getType().isRuntimeSizedArray());
+        glslang::TIntermTyped* block = node->getOperand()->getAsBinaryNode()->getLeft();
+        block->traverse(this);
+        unsigned int member = node->getOperand()->getAsBinaryNode()->getRight()->getAsConstantUnion()->getConstArray()[0].getUConst();
+        spv::Id length = builder.createArrayLength(builder.accessChainGetLValue(), member);
+
+        builder.clearAccessChain();
+        builder.setAccessChainRValue(length);
+
+        return false;
+    }
+
+    // Start by evaluating the operand
+
+    // Does it need a swizzle inversion?  If so, evaluation is inverted;
+    // operate first on the swizzle base, then apply the swizzle.
+    spv::Id invertedType = spv::NoType;
+    auto resultType = [&invertedType, &node, this](){ return invertedType != spv::NoType ? invertedType : convertGlslangToSpvType(node->getType()); };
+    if (node->getOp() == glslang::EOpInterpolateAtCentroid)
+        invertedType = getInvertedSwizzleType(*node->getOperand());
+
+    builder.clearAccessChain();
+    if (invertedType != spv::NoType)
+        node->getOperand()->getAsBinaryNode()->getLeft()->traverse(this);
+    else
+        node->getOperand()->traverse(this);
+
+    spv::Id operand = spv::NoResult;
+
+    if (node->getOp() == glslang::EOpAtomicCounterIncrement ||
+        node->getOp() == glslang::EOpAtomicCounterDecrement ||
+        node->getOp() == glslang::EOpAtomicCounter          ||
+        node->getOp() == glslang::EOpInterpolateAtCentroid)
+        operand = builder.accessChainGetLValue(); // Special case l-value operands
+    else
+        operand = accessChainLoad(node->getOperand()->getType());
+
+    spv::Decoration precision = TranslatePrecisionDecoration(node->getOperationPrecision());
+    spv::Decoration noContraction = TranslateNoContractionDecoration(node->getType().getQualifier());
+
+    // it could be a conversion
+    if (! result)
+        result = createConversion(node->getOp(), precision, noContraction, resultType(), operand, node->getOperand()->getBasicType());
+
+    // if not, then possibly an operation
+    if (! result)
+        result = createUnaryOperation(node->getOp(), precision, noContraction, resultType(), operand, node->getOperand()->getBasicType());
+
+    if (result) {
+        if (invertedType)
+            result = createInvertedSwizzle(precision, *node->getOperand(), result);
+
+        builder.clearAccessChain();
+        builder.setAccessChainRValue(result);
+
+        return false; // done with this node
+    }
+
+    // it must be a special case, check...
+    switch (node->getOp()) {
+    case glslang::EOpPostIncrement:
+    case glslang::EOpPostDecrement:
+    case glslang::EOpPreIncrement:
+    case glslang::EOpPreDecrement:
+        {
+            // we need the integer value "1" or the floating point "1.0" to add/subtract
+            spv::Id one = 0;
+            if (node->getBasicType() == glslang::EbtFloat)
+                one = builder.makeFloatConstant(1.0F);
+            else if (node->getBasicType() == glslang::EbtDouble)
+                one = builder.makeDoubleConstant(1.0);
+#ifdef AMD_EXTENSIONS
+            else if (node->getBasicType() == glslang::EbtFloat16)
+                one = builder.makeFloat16Constant(1.0F);
+#endif
+            else if (node->getBasicType() == glslang::EbtInt64 || node->getBasicType() == glslang::EbtUint64)
+                one = builder.makeInt64Constant(1);
+            else
+                one = builder.makeIntConstant(1);
+            glslang::TOperator op;
+            if (node->getOp() == glslang::EOpPreIncrement ||
+                node->getOp() == glslang::EOpPostIncrement)
+                op = glslang::EOpAdd;
+            else
+                op = glslang::EOpSub;
+
+            spv::Id result = createBinaryOperation(op, precision,
+                                                   TranslateNoContractionDecoration(node->getType().getQualifier()),
+                                                   convertGlslangToSpvType(node->getType()), operand, one,
+                                                   node->getType().getBasicType());
+            assert(result != spv::NoResult);
+
+            // The result of operation is always stored, but conditionally the
+            // consumed result.  The consumed result is always an r-value.
+            builder.accessChainStore(result);
+            builder.clearAccessChain();
+            if (node->getOp() == glslang::EOpPreIncrement ||
+                node->getOp() == glslang::EOpPreDecrement)
+                builder.setAccessChainRValue(result);
+            else
+                builder.setAccessChainRValue(operand);
+        }
+
+        return false;
+
+    case glslang::EOpEmitStreamVertex:
+        builder.createNoResultOp(spv::OpEmitStreamVertex, operand);
+        return false;
+    case glslang::EOpEndStreamPrimitive:
+        builder.createNoResultOp(spv::OpEndStreamPrimitive, operand);
+        return false;
+
+    default:
+        logger->missingFunctionality("unknown glslang unary");
+        return true;  // pick up operand as placeholder result
+    }
+}
+
+bool TGlslangToSpvTraverser::visitAggregate(glslang::TVisit visit, glslang::TIntermAggregate* node)
+{
+    SpecConstantOpModeGuard spec_constant_op_mode_setter(&builder);
+    if (node->getType().getQualifier().isSpecConstant())
+        spec_constant_op_mode_setter.turnOnSpecConstantOpMode();
+
+    spv::Id result = spv::NoResult;
+    spv::Id invertedType = spv::NoType;  // to use to override the natural type of the node
+    auto resultType = [&invertedType, &node, this](){ return invertedType != spv::NoType ? invertedType : convertGlslangToSpvType(node->getType()); };
+
+    // try texturing
+    result = createImageTextureFunctionCall(node);
+    if (result != spv::NoResult) {
+        builder.clearAccessChain();
+        builder.setAccessChainRValue(result);
+
+        return false;
+    } else if (node->getOp() == glslang::EOpImageStore) {
+        // "imageStore" is a special case, which has no result
+        return false;
+    }
+
+    glslang::TOperator binOp = glslang::EOpNull;
+    bool reduceComparison = true;
+    bool isMatrix = false;
+    bool noReturnValue = false;
+    bool atomic = false;
+
+    assert(node->getOp());
+
+    spv::Decoration precision = TranslatePrecisionDecoration(node->getOperationPrecision());
+
+    switch (node->getOp()) {
+    case glslang::EOpSequence:
+    {
+        if (preVisit)
+            ++sequenceDepth;
+        else
+            --sequenceDepth;
+
+        if (sequenceDepth == 1) {
+            // If this is the parent node of all the functions, we want to see them
+            // early, so all call points have actual SPIR-V functions to reference.
+            // In all cases, still let the traverser visit the children for us.
+            makeFunctions(node->getAsAggregate()->getSequence());
+
+            // Also, we want all globals initializers to go into the beginning of the entry point, before
+            // anything else gets there, so visit out of order, doing them all now.
+            makeGlobalInitializers(node->getAsAggregate()->getSequence());
+
+            // Initializers are done, don't want to visit again, but functions link objects need to be processed,
+            // so do them manually.
+            visitFunctions(node->getAsAggregate()->getSequence());
+
+            return false;
+        }
+
+        return true;
+    }
+    case glslang::EOpLinkerObjects:
+    {
+        if (visit == glslang::EvPreVisit)
+            linkageOnly = true;
+        else
+            linkageOnly = false;
+
+        return true;
+    }
+    case glslang::EOpComma:
+    {
+        // processing from left to right naturally leaves the right-most
+        // lying around in the access chain
+        glslang::TIntermSequence& glslangOperands = node->getSequence();
+        for (int i = 0; i < (int)glslangOperands.size(); ++i)
+            glslangOperands[i]->traverse(this);
+
+        return false;
+    }
+    case glslang::EOpFunction:
+        if (visit == glslang::EvPreVisit) {
+            if (isShaderEntryPoint(node)) {
+                inMain = true;
+                builder.setBuildPoint(shaderEntry->getLastBlock());
+                currentFunction = shaderEntry;
+            } else {
+                handleFunctionEntry(node);
+            }
+        } else {
+            if (inMain)
+                mainTerminated = true;
+            builder.leaveFunction();
+            inMain = false;
+        }
+
+        return true;
+    case glslang::EOpParameters:
+        // Parameters will have been consumed by EOpFunction processing, but not
+        // the body, so we still visited the function node's children, making this
+        // child redundant.
+        return false;
+    case glslang::EOpFunctionCall:
+    {
+        if (node->isUserDefined())
+            result = handleUserFunctionCall(node);
+        //assert(result);  // this can happen for bad shaders because the call graph completeness checking is not yet done
+        if (result) {
+            builder.clearAccessChain();
+            builder.setAccessChainRValue(result);
+        } else
+            logger->missingFunctionality("missing user function; linker needs to catch that");
+
+        return false;
+    }
+    case glslang::EOpConstructMat2x2:
+    case glslang::EOpConstructMat2x3:
+    case glslang::EOpConstructMat2x4:
+    case glslang::EOpConstructMat3x2:
+    case glslang::EOpConstructMat3x3:
+    case glslang::EOpConstructMat3x4:
+    case glslang::EOpConstructMat4x2:
+    case glslang::EOpConstructMat4x3:
+    case glslang::EOpConstructMat4x4:
+    case glslang::EOpConstructDMat2x2:
+    case glslang::EOpConstructDMat2x3:
+    case glslang::EOpConstructDMat2x4:
+    case glslang::EOpConstructDMat3x2:
+    case glslang::EOpConstructDMat3x3:
+    case glslang::EOpConstructDMat3x4:
+    case glslang::EOpConstructDMat4x2:
+    case glslang::EOpConstructDMat4x3:
+    case glslang::EOpConstructDMat4x4:
+#ifdef AMD_EXTENSIONS
+    case glslang::EOpConstructF16Mat2x2:
+    case glslang::EOpConstructF16Mat2x3:
+    case glslang::EOpConstructF16Mat2x4:
+    case glslang::EOpConstructF16Mat3x2:
+    case glslang::EOpConstructF16Mat3x3:
+    case glslang::EOpConstructF16Mat3x4:
+    case glslang::EOpConstructF16Mat4x2:
+    case glslang::EOpConstructF16Mat4x3:
+    case glslang::EOpConstructF16Mat4x4:
+#endif
+        isMatrix = true;
+        // fall through
+    case glslang::EOpConstructFloat:
+    case glslang::EOpConstructVec2:
+    case glslang::EOpConstructVec3:
+    case glslang::EOpConstructVec4:
+    case glslang::EOpConstructDouble:
+    case glslang::EOpConstructDVec2:
+    case glslang::EOpConstructDVec3:
+    case glslang::EOpConstructDVec4:
+#ifdef AMD_EXTENSIONS
+    case glslang::EOpConstructFloat16:
+    case glslang::EOpConstructF16Vec2:
+    case glslang::EOpConstructF16Vec3:
+    case glslang::EOpConstructF16Vec4:
+#endif
+    case glslang::EOpConstructBool:
+    case glslang::EOpConstructBVec2:
+    case glslang::EOpConstructBVec3:
+    case glslang::EOpConstructBVec4:
+    case glslang::EOpConstructInt:
+    case glslang::EOpConstructIVec2:
+    case glslang::EOpConstructIVec3:
+    case glslang::EOpConstructIVec4:
+    case glslang::EOpConstructUint:
+    case glslang::EOpConstructUVec2:
+    case glslang::EOpConstructUVec3:
+    case glslang::EOpConstructUVec4:
+    case glslang::EOpConstructInt64:
+    case glslang::EOpConstructI64Vec2:
+    case glslang::EOpConstructI64Vec3:
+    case glslang::EOpConstructI64Vec4:
+    case glslang::EOpConstructUint64:
+    case glslang::EOpConstructU64Vec2:
+    case glslang::EOpConstructU64Vec3:
+    case glslang::EOpConstructU64Vec4:
+    case glslang::EOpConstructStruct:
+    case glslang::EOpConstructTextureSampler:
+    {
+        std::vector<spv::Id> arguments;
+        translateArguments(*node, arguments);
+        spv::Id constructed;
+        if (node->getOp() == glslang::EOpConstructTextureSampler)
+            constructed = builder.createOp(spv::OpSampledImage, resultType(), arguments);
+        else if (node->getOp() == glslang::EOpConstructStruct || node->getType().isArray()) {
+            std::vector<spv::Id> constituents;
+            for (int c = 0; c < (int)arguments.size(); ++c)
+                constituents.push_back(arguments[c]);
+            constructed = builder.createCompositeConstruct(resultType(), constituents);
+        } else if (isMatrix)
+            constructed = builder.createMatrixConstructor(precision, arguments, resultType());
+        else
+            constructed = builder.createConstructor(precision, arguments, resultType());
+
+        builder.clearAccessChain();
+        builder.setAccessChainRValue(constructed);
+
+        return false;
+    }
+
+    // These six are component-wise compares with component-wise results.
+    // Forward on to createBinaryOperation(), requesting a vector result.
+    case glslang::EOpLessThan:
+    case glslang::EOpGreaterThan:
+    case glslang::EOpLessThanEqual:
+    case glslang::EOpGreaterThanEqual:
+    case glslang::EOpVectorEqual:
+    case glslang::EOpVectorNotEqual:
+    {
+        // Map the operation to a binary
+        binOp = node->getOp();
+        reduceComparison = false;
+        switch (node->getOp()) {
+        case glslang::EOpVectorEqual:     binOp = glslang::EOpVectorEqual;      break;
+        case glslang::EOpVectorNotEqual:  binOp = glslang::EOpVectorNotEqual;   break;
+        default:                          binOp = node->getOp();                break;
+        }
+
+        break;
+    }
+    case glslang::EOpMul:
+        // component-wise matrix multiply
+        binOp = glslang::EOpMul;
+        break;
+    case glslang::EOpOuterProduct:
+        // two vectors multiplied to make a matrix
+        binOp = glslang::EOpOuterProduct;
+        break;
+    case glslang::EOpDot:
+    {
+        // for scalar dot product, use multiply
+        glslang::TIntermSequence& glslangOperands = node->getSequence();
+        if (glslangOperands[0]->getAsTyped()->getVectorSize() == 1)
+            binOp = glslang::EOpMul;
+        break;
+    }
+    case glslang::EOpMod:
+        // when an aggregate, this is the floating-point mod built-in function,
+        // which can be emitted by the one in createBinaryOperation()
+        binOp = glslang::EOpMod;
+        break;
+    case glslang::EOpEmitVertex:
+    case glslang::EOpEndPrimitive:
+    case glslang::EOpBarrier:
+    case glslang::EOpMemoryBarrier:
+    case glslang::EOpMemoryBarrierAtomicCounter:
+    case glslang::EOpMemoryBarrierBuffer:
+    case glslang::EOpMemoryBarrierImage:
+    case glslang::EOpMemoryBarrierShared:
+    case glslang::EOpGroupMemoryBarrier:
+    case glslang::EOpAllMemoryBarrierWithGroupSync:
+    case glslang::EOpGroupMemoryBarrierWithGroupSync:
+    case glslang::EOpWorkgroupMemoryBarrier:
+    case glslang::EOpWorkgroupMemoryBarrierWithGroupSync:
+        noReturnValue = true;
+        // These all have 0 operands and will naturally finish up in the code below for 0 operands
+        break;
+
+    case glslang::EOpAtomicAdd:
+    case glslang::EOpAtomicMin:
+    case glslang::EOpAtomicMax:
+    case glslang::EOpAtomicAnd:
+    case glslang::EOpAtomicOr:
+    case glslang::EOpAtomicXor:
+    case glslang::EOpAtomicExchange:
+    case glslang::EOpAtomicCompSwap:
+        atomic = true;
+        break;
+
+    default:
+        break;
+    }
+
+    //
+    // See if it maps to a regular operation.
+    //
+    if (binOp != glslang::EOpNull) {
+        glslang::TIntermTyped* left = node->getSequence()[0]->getAsTyped();
+        glslang::TIntermTyped* right = node->getSequence()[1]->getAsTyped();
+        assert(left && right);
+
+        builder.clearAccessChain();
+        left->traverse(this);
+        spv::Id leftId = accessChainLoad(left->getType());
+
+        builder.clearAccessChain();
+        right->traverse(this);
+        spv::Id rightId = accessChainLoad(right->getType());
+
+        result = createBinaryOperation(binOp, precision, TranslateNoContractionDecoration(node->getType().getQualifier()),
+                                       resultType(), leftId, rightId,
+                                       left->getType().getBasicType(), reduceComparison);
+
+        // code above should only make binOp that exists in createBinaryOperation
+        assert(result != spv::NoResult);
+        builder.clearAccessChain();
+        builder.setAccessChainRValue(result);
+
+        return false;
+    }
+
+    //
+    // Create the list of operands.
+    //
+    glslang::TIntermSequence& glslangOperands = node->getSequence();
+    std::vector<spv::Id> operands;
+    for (int arg = 0; arg < (int)glslangOperands.size(); ++arg) {
+        // special case l-value operands; there are just a few
+        bool lvalue = false;
+        switch (node->getOp()) {
+        case glslang::EOpFrexp:
+        case glslang::EOpModf:
+            if (arg == 1)
+                lvalue = true;
+            break;
+        case glslang::EOpInterpolateAtSample:
+        case glslang::EOpInterpolateAtOffset:
+#ifdef AMD_EXTENSIONS
+        case glslang::EOpInterpolateAtVertex:
+#endif
+            if (arg == 0) {
+                lvalue = true;
+
+                // Does it need a swizzle inversion?  If so, evaluation is inverted;
+                // operate first on the swizzle base, then apply the swizzle.
+                if (glslangOperands[0]->getAsOperator() && 
+                    glslangOperands[0]->getAsOperator()->getOp() == glslang::EOpVectorSwizzle)
+                    invertedType = convertGlslangToSpvType(glslangOperands[0]->getAsBinaryNode()->getLeft()->getType());
+            }
+            break;
+        case glslang::EOpAtomicAdd:
+        case glslang::EOpAtomicMin:
+        case glslang::EOpAtomicMax:
+        case glslang::EOpAtomicAnd:
+        case glslang::EOpAtomicOr:
+        case glslang::EOpAtomicXor:
+        case glslang::EOpAtomicExchange:
+        case glslang::EOpAtomicCompSwap:
+            if (arg == 0)
+                lvalue = true;
+            break;
+        case glslang::EOpAddCarry:
+        case glslang::EOpSubBorrow:
+            if (arg == 2)
+                lvalue = true;
+            break;
+        case glslang::EOpUMulExtended:
+        case glslang::EOpIMulExtended:
+            if (arg >= 2)
+                lvalue = true;
+            break;
+        default:
+            break;
+        }
+        builder.clearAccessChain();
+        if (invertedType != spv::NoType && arg == 0)
+            glslangOperands[0]->getAsBinaryNode()->getLeft()->traverse(this);
+        else
+            glslangOperands[arg]->traverse(this);
+        if (lvalue)
+            operands.push_back(builder.accessChainGetLValue());
+        else
+            operands.push_back(accessChainLoad(glslangOperands[arg]->getAsTyped()->getType()));
+    }
+
+    if (atomic) {
+        // Handle all atomics
+        result = createAtomicOperation(node->getOp(), precision, resultType(), operands, node->getBasicType());
+    } else {
+        // Pass through to generic operations.
+        switch (glslangOperands.size()) {
+        case 0:
+            result = createNoArgOperation(node->getOp(), precision, resultType());
+            break;
+        case 1:
+            result = createUnaryOperation(
+                node->getOp(), precision,
+                TranslateNoContractionDecoration(node->getType().getQualifier()),
+                resultType(), operands.front(),
+                glslangOperands[0]->getAsTyped()->getBasicType());
+            break;
+        default:
+            result = createMiscOperation(node->getOp(), precision, resultType(), operands, node->getBasicType());
+            break;
+        }
+        if (invertedType)
+            result = createInvertedSwizzle(precision, *glslangOperands[0]->getAsBinaryNode(), result);
+    }
+
+    if (noReturnValue)
+        return false;
+
+    if (! result) {
+        logger->missingFunctionality("unknown glslang aggregate");
+        return true;  // pick up a child as a placeholder operand
+    } else {
+        builder.clearAccessChain();
+        builder.setAccessChainRValue(result);
+        return false;
+    }
+}
+
+bool TGlslangToSpvTraverser::visitSelection(glslang::TVisit /* visit */, glslang::TIntermSelection* node)
+{
+    // This path handles both if-then-else and ?:
+    // The if-then-else has a node type of void, while
+    // ?: has a non-void node type
+    spv::Id result = 0;
+    if (node->getBasicType() != glslang::EbtVoid) {
+        // don't handle this as just on-the-fly temporaries, because there will be two names
+        // and better to leave SSA to later passes
+        result = builder.createVariable(spv::StorageClassFunction, convertGlslangToSpvType(node->getType()));
+    }
+
+    // emit the condition before doing anything with selection
+    node->getCondition()->traverse(this);
+
+    // make an "if" based on the value created by the condition
+    spv::Builder::If ifBuilder(accessChainLoad(node->getCondition()->getType()), builder);
+
+    if (node->getTrueBlock()) {
+        // emit the "then" statement
+        node->getTrueBlock()->traverse(this);
+        if (result)
+            builder.createStore(accessChainLoad(node->getTrueBlock()->getAsTyped()->getType()), result);
+    }
+
+    if (node->getFalseBlock()) {
+        ifBuilder.makeBeginElse();
+        // emit the "else" statement
+        node->getFalseBlock()->traverse(this);
+        if (result)
+            builder.createStore(accessChainLoad(node->getFalseBlock()->getAsTyped()->getType()), result);
+    }
+
+    ifBuilder.makeEndIf();
+
+    if (result) {
+        // GLSL only has r-values as the result of a :?, but
+        // if we have an l-value, that can be more efficient if it will
+        // become the base of a complex r-value expression, because the
+        // next layer copies r-values into memory to use the access-chain mechanism
+        builder.clearAccessChain();
+        builder.setAccessChainLValue(result);
+    }
+
+    return false;
+}
+
+bool TGlslangToSpvTraverser::visitSwitch(glslang::TVisit /* visit */, glslang::TIntermSwitch* node)
+{
+    // emit and get the condition before doing anything with switch
+    node->getCondition()->traverse(this);
+    spv::Id selector = accessChainLoad(node->getCondition()->getAsTyped()->getType());
+
+    // browse the children to sort out code segments
+    int defaultSegment = -1;
+    std::vector<TIntermNode*> codeSegments;
+    glslang::TIntermSequence& sequence = node->getBody()->getSequence();
+    std::vector<int> caseValues;
+    std::vector<int> valueIndexToSegment(sequence.size());  // note: probably not all are used, it is an overestimate
+    for (glslang::TIntermSequence::iterator c = sequence.begin(); c != sequence.end(); ++c) {
+        TIntermNode* child = *c;
+        if (child->getAsBranchNode() && child->getAsBranchNode()->getFlowOp() == glslang::EOpDefault)
+            defaultSegment = (int)codeSegments.size();
+        else if (child->getAsBranchNode() && child->getAsBranchNode()->getFlowOp() == glslang::EOpCase) {
+            valueIndexToSegment[caseValues.size()] = (int)codeSegments.size();
+            caseValues.push_back(child->getAsBranchNode()->getExpression()->getAsConstantUnion()->getConstArray()[0].getIConst());
+        } else
+            codeSegments.push_back(child);
+    }
+
+    // handle the case where the last code segment is missing, due to no code
+    // statements between the last case and the end of the switch statement
+    if ((caseValues.size() && (int)codeSegments.size() == valueIndexToSegment[caseValues.size() - 1]) ||
+        (int)codeSegments.size() == defaultSegment)
+        codeSegments.push_back(nullptr);
+
+    // make the switch statement
+    std::vector<spv::Block*> segmentBlocks; // returned, as the blocks allocated in the call
+    builder.makeSwitch(selector, (int)codeSegments.size(), caseValues, valueIndexToSegment, defaultSegment, segmentBlocks);
+
+    // emit all the code in the segments
+    breakForLoop.push(false);
+    for (unsigned int s = 0; s < codeSegments.size(); ++s) {
+        builder.nextSwitchSegment(segmentBlocks, s);
+        if (codeSegments[s])
+            codeSegments[s]->traverse(this);
+        else
+            builder.addSwitchBreak();
+    }
+    breakForLoop.pop();
+
+    builder.endSwitch(segmentBlocks);
+
+    return false;
+}
+
+void TGlslangToSpvTraverser::visitConstantUnion(glslang::TIntermConstantUnion* node)
+{
+    int nextConst = 0;
+    spv::Id constant = createSpvConstantFromConstUnionArray(node->getType(), node->getConstArray(), nextConst, false);
+
+    builder.clearAccessChain();
+    builder.setAccessChainRValue(constant);
+}
+
+bool TGlslangToSpvTraverser::visitLoop(glslang::TVisit /* visit */, glslang::TIntermLoop* node)
+{
+    auto blocks = builder.makeNewLoop();
+    builder.createBranch(&blocks.head);
+    // Spec requires back edges to target header blocks, and every header block
+    // must dominate its merge block.  Make a header block first to ensure these
+    // conditions are met.  By definition, it will contain OpLoopMerge, followed
+    // by a block-ending branch.  But we don't want to put any other body/test
+    // instructions in it, since the body/test may have arbitrary instructions,
+    // including merges of its own.
+    builder.setBuildPoint(&blocks.head);
+    builder.createLoopMerge(&blocks.merge, &blocks.continue_target, spv::LoopControlMaskNone);
+    if (node->testFirst() && node->getTest()) {
+        spv::Block& test = builder.makeNewBlock();
+        builder.createBranch(&test);
+
+        builder.setBuildPoint(&test);
+        node->getTest()->traverse(this);
+        spv::Id condition =
+            accessChainLoad(node->getTest()->getType());
+        builder.createConditionalBranch(condition, &blocks.body, &blocks.merge);
+
+        builder.setBuildPoint(&blocks.body);
+        breakForLoop.push(true);
+        if (node->getBody())
+            node->getBody()->traverse(this);
+        builder.createBranch(&blocks.continue_target);
+        breakForLoop.pop();
+
+        builder.setBuildPoint(&blocks.continue_target);
+        if (node->getTerminal())
+            node->getTerminal()->traverse(this);
+        builder.createBranch(&blocks.head);
+    } else {
+        builder.createBranch(&blocks.body);
+
+        breakForLoop.push(true);
+        builder.setBuildPoint(&blocks.body);
+        if (node->getBody())
+            node->getBody()->traverse(this);
+        builder.createBranch(&blocks.continue_target);
+        breakForLoop.pop();
+
+        builder.setBuildPoint(&blocks.continue_target);
+        if (node->getTerminal())
+            node->getTerminal()->traverse(this);
+        if (node->getTest()) {
+            node->getTest()->traverse(this);
+            spv::Id condition =
+                accessChainLoad(node->getTest()->getType());
+            builder.createConditionalBranch(condition, &blocks.head, &blocks.merge);
+        } else {
+            // TODO: unless there was a break/return/discard instruction
+            // somewhere in the body, this is an infinite loop, so we should
+            // issue a warning.
+            builder.createBranch(&blocks.head);
+        }
+    }
+    builder.setBuildPoint(&blocks.merge);
+    builder.closeLoop();
+    return false;
+}
+
+bool TGlslangToSpvTraverser::visitBranch(glslang::TVisit /* visit */, glslang::TIntermBranch* node)
+{
+    if (node->getExpression())
+        node->getExpression()->traverse(this);
+
+    switch (node->getFlowOp()) {
+    case glslang::EOpKill:
+        builder.makeDiscard();
+        break;
+    case glslang::EOpBreak:
+        if (breakForLoop.top())
+            builder.createLoopExit();
+        else
+            builder.addSwitchBreak();
+        break;
+    case glslang::EOpContinue:
+        builder.createLoopContinue();
+        break;
+    case glslang::EOpReturn:
+        if (node->getExpression()) {
+            const glslang::TType& glslangReturnType = node->getExpression()->getType();
+            spv::Id returnId = accessChainLoad(glslangReturnType);
+            if (builder.getTypeId(returnId) != currentFunction->getReturnType()) {
+                builder.clearAccessChain();
+                spv::Id copyId = builder.createVariable(spv::StorageClassFunction, currentFunction->getReturnType());
+                builder.setAccessChainLValue(copyId);
+                multiTypeStore(glslangReturnType, returnId);
+                returnId = builder.createLoad(copyId);
+            }
+            builder.makeReturn(false, returnId);
+        } else
+            builder.makeReturn(false);
+
+        builder.clearAccessChain();
+        break;
+
+    default:
+        assert(0);
+        break;
+    }
+
+    return false;
+}
+
+spv::Id TGlslangToSpvTraverser::createSpvVariable(const glslang::TIntermSymbol* node)
+{
+    // First, steer off constants, which are not SPIR-V variables, but
+    // can still have a mapping to a SPIR-V Id.
+    // This includes specialization constants.
+    if (node->getQualifier().isConstant()) {
+        return createSpvConstant(*node);
+    }
+
+    // Now, handle actual variables
+    spv::StorageClass storageClass = TranslateStorageClass(node->getType());
+    spv::Id spvType = convertGlslangToSpvType(node->getType());
+
+    const char* name = node->getName().c_str();
+    if (glslang::IsAnonymous(name))
+        name = "";
+
+    return builder.createVariable(storageClass, spvType, name);
+}
+
+// Return type Id of the sampled type.
+spv::Id TGlslangToSpvTraverser::getSampledType(const glslang::TSampler& sampler)
+{
+    switch (sampler.type) {
+        case glslang::EbtFloat:    return builder.makeFloatType(32);
+        case glslang::EbtInt:      return builder.makeIntType(32);
+        case glslang::EbtUint:     return builder.makeUintType(32);
+        default:
+            assert(0);
+            return builder.makeFloatType(32);
+    }
+}
+
+// If node is a swizzle operation, return the type that should be used if
+// the swizzle base is first consumed by another operation, before the swizzle
+// is applied.
+spv::Id TGlslangToSpvTraverser::getInvertedSwizzleType(const glslang::TIntermTyped& node)
+{
+    if (node.getAsOperator() && 
+        node.getAsOperator()->getOp() == glslang::EOpVectorSwizzle)
+        return convertGlslangToSpvType(node.getAsBinaryNode()->getLeft()->getType());
+    else
+        return spv::NoType;
+}
+
+// When inverting a swizzle with a parent op, this function
+// will apply the swizzle operation to a completed parent operation.
+spv::Id TGlslangToSpvTraverser::createInvertedSwizzle(spv::Decoration precision, const glslang::TIntermTyped& node, spv::Id parentResult)
+{
+    std::vector<unsigned> swizzle;
+    convertSwizzle(*node.getAsBinaryNode()->getRight()->getAsAggregate(), swizzle);
+    return builder.createRvalueSwizzle(precision, convertGlslangToSpvType(node.getType()), parentResult, swizzle);
+}
+
+// Convert a glslang AST swizzle node to a swizzle vector for building SPIR-V.
+void TGlslangToSpvTraverser::convertSwizzle(const glslang::TIntermAggregate& node, std::vector<unsigned>& swizzle)
+{
+    const glslang::TIntermSequence& swizzleSequence = node.getSequence();
+    for (int i = 0; i < (int)swizzleSequence.size(); ++i)
+        swizzle.push_back(swizzleSequence[i]->getAsConstantUnion()->getConstArray()[0].getIConst());
+}
+
+// Convert from a glslang type to an SPV type, by calling into a
+// recursive version of this function. This establishes the inherited
+// layout state rooted from the top-level type.
+spv::Id TGlslangToSpvTraverser::convertGlslangToSpvType(const glslang::TType& type)
+{
+    return convertGlslangToSpvType(type, getExplicitLayout(type), type.getQualifier());
+}
+
+// Do full recursive conversion of an arbitrary glslang type to a SPIR-V Id.
+// explicitLayout can be kept the same throughout the hierarchical recursive walk.
+// Mutually recursive with convertGlslangStructToSpvType().
+spv::Id TGlslangToSpvTraverser::convertGlslangToSpvType(const glslang::TType& type, glslang::TLayoutPacking explicitLayout, const glslang::TQualifier& qualifier)
+{
+    spv::Id spvType = spv::NoResult;
+
+    switch (type.getBasicType()) {
+    case glslang::EbtVoid:
+        spvType = builder.makeVoidType();
+        assert (! type.isArray());
+        break;
+    case glslang::EbtFloat:
+        spvType = builder.makeFloatType(32);
+        break;
+    case glslang::EbtDouble:
+        spvType = builder.makeFloatType(64);
+        break;
+#ifdef AMD_EXTENSIONS
+    case glslang::EbtFloat16:
+        builder.addExtension(spv::E_SPV_AMD_gpu_shader_half_float);
+        builder.addCapability(spv::CapabilityFloat16);
+        spvType = builder.makeFloatType(16);
+        break;
+#endif
+    case glslang::EbtBool:
+        // "transparent" bool doesn't exist in SPIR-V.  The GLSL convention is
+        // a 32-bit int where non-0 means true.
+        if (explicitLayout != glslang::ElpNone)
+            spvType = builder.makeUintType(32);
+        else
+            spvType = builder.makeBoolType();
+        break;
+    case glslang::EbtInt:
+        spvType = builder.makeIntType(32);
+        break;
+    case glslang::EbtUint:
+        spvType = builder.makeUintType(32);
+        break;
+    case glslang::EbtInt64:
+        builder.addCapability(spv::CapabilityInt64);
+        spvType = builder.makeIntType(64);
+        break;
+    case glslang::EbtUint64:
+        builder.addCapability(spv::CapabilityInt64);
+        spvType = builder.makeUintType(64);
+        break;
+    case glslang::EbtAtomicUint:
+        builder.addCapability(spv::CapabilityAtomicStorage);
+        spvType = builder.makeUintType(32);
+        break;
+    case glslang::EbtSampler:
+        {
+            const glslang::TSampler& sampler = type.getSampler();
+            if (sampler.sampler) {
+                // pure sampler
+                spvType = builder.makeSamplerType();
+            } else {
+                // an image is present, make its type
+                spvType = builder.makeImageType(getSampledType(sampler), TranslateDimensionality(sampler), sampler.shadow, sampler.arrayed, sampler.ms,
+                                                sampler.image ? 2 : 1, TranslateImageFormat(type));
+                if (sampler.combined) {
+                    // already has both image and sampler, make the combined type
+                    spvType = builder.makeSampledImageType(spvType);
+                }
+            }
+        }
+        break;
+    case glslang::EbtStruct:
+    case glslang::EbtBlock:
+        {
+            // If we've seen this struct type, return it
+            const glslang::TTypeList* glslangMembers = type.getStruct();
+
+            // Try to share structs for different layouts, but not yet for other
+            // kinds of qualification (primarily not yet including interpolant qualification).
+            if (! HasNonLayoutQualifiers(type, qualifier))
+                spvType = structMap[explicitLayout][qualifier.layoutMatrix][glslangMembers];
+            if (spvType != spv::NoResult)
+                break;
+
+            // else, we haven't seen it...
+            if (type.getBasicType() == glslang::EbtBlock)
+                memberRemapper[glslangMembers].resize(glslangMembers->size());
+            spvType = convertGlslangStructToSpvType(type, glslangMembers, explicitLayout, qualifier);
+        }
+        break;
+    default:
+        assert(0);
+        break;
+    }
+
+    if (type.isMatrix())
+        spvType = builder.makeMatrixType(spvType, type.getMatrixCols(), type.getMatrixRows());
+    else {
+        // If this variable has a vector element count greater than 1, create a SPIR-V vector
+        if (type.getVectorSize() > 1)
+            spvType = builder.makeVectorType(spvType, type.getVectorSize());
+    }
+
+    if (type.isArray()) {
+        int stride = 0;  // keep this 0 unless doing an explicit layout; 0 will mean no decoration, no stride
+
+        // Do all but the outer dimension
+        if (type.getArraySizes()->getNumDims() > 1) {
+            // We need to decorate array strides for types needing explicit layout, except blocks.
+            if (explicitLayout != glslang::ElpNone && type.getBasicType() != glslang::EbtBlock) {
+                // Use a dummy glslang type for querying internal strides of
+                // arrays of arrays, but using just a one-dimensional array.
+                glslang::TType simpleArrayType(type, 0); // deference type of the array
+                while (simpleArrayType.getArraySizes().getNumDims() > 1)
+                    simpleArrayType.getArraySizes().dereference();
+
+                // Will compute the higher-order strides here, rather than making a whole
+                // pile of types and doing repetitive recursion on their contents.
+                stride = getArrayStride(simpleArrayType, explicitLayout, qualifier.layoutMatrix);
+            }
+
+            // make the arrays
+            for (int dim = type.getArraySizes()->getNumDims() - 1; dim > 0; --dim) {
+                spvType = builder.makeArrayType(spvType, makeArraySizeId(*type.getArraySizes(), dim), stride);
+                if (stride > 0)
+                    builder.addDecoration(spvType, spv::DecorationArrayStride, stride);
+                stride *= type.getArraySizes()->getDimSize(dim);
+            }
+        } else {
+            // single-dimensional array, and don't yet have stride
+
+            // We need to decorate array strides for types needing explicit layout, except blocks.
+            if (explicitLayout != glslang::ElpNone && type.getBasicType() != glslang::EbtBlock)
+                stride = getArrayStride(type, explicitLayout, qualifier.layoutMatrix);
+        }
+
+        // Do the outer dimension, which might not be known for a runtime-sized array
+        if (type.isRuntimeSizedArray()) {
+            spvType = builder.makeRuntimeArray(spvType);
+        } else {
+            assert(type.getOuterArraySize() > 0);
+            spvType = builder.makeArrayType(spvType, makeArraySizeId(*type.getArraySizes(), 0), stride);
+        }
+        if (stride > 0)
+            builder.addDecoration(spvType, spv::DecorationArrayStride, stride);
+    }
+
+    return spvType;
+}
+
+
+// Do full recursive conversion of a glslang structure (or block) type to a SPIR-V Id.
+// explicitLayout can be kept the same throughout the hierarchical recursive walk.
+// Mutually recursive with convertGlslangToSpvType().
+spv::Id TGlslangToSpvTraverser::convertGlslangStructToSpvType(const glslang::TType& type,
+                                                              const glslang::TTypeList* glslangMembers,
+                                                              glslang::TLayoutPacking explicitLayout,
+                                                              const glslang::TQualifier& qualifier)
+{
+    // Create a vector of struct types for SPIR-V to consume
+    std::vector<spv::Id> spvMembers;
+    int memberDelta = 0;  // how much the member's index changes from glslang to SPIR-V, normally 0, except sometimes for blocks
+    int locationOffset = 0;  // for use across struct members, when they are called recursively
+    for (int i = 0; i < (int)glslangMembers->size(); i++) {
+        glslang::TType& glslangMember = *(*glslangMembers)[i].type;
+        if (glslangMember.hiddenMember()) {
+            ++memberDelta;
+            if (type.getBasicType() == glslang::EbtBlock)
+                memberRemapper[glslangMembers][i] = -1;
+        } else {
+            if (type.getBasicType() == glslang::EbtBlock)
+                memberRemapper[glslangMembers][i] = i - memberDelta;
+            // modify just this child's view of the qualifier
+            glslang::TQualifier memberQualifier = glslangMember.getQualifier();
+            InheritQualifiers(memberQualifier, qualifier);
+
+            // manually inherit location; it's more complex
+            if (! memberQualifier.hasLocation() && qualifier.hasLocation())
+                memberQualifier.layoutLocation = qualifier.layoutLocation + locationOffset;
+            if (qualifier.hasLocation())
+                locationOffset += glslangIntermediate->computeTypeLocationSize(glslangMember);
+
+            // recurse
+            spvMembers.push_back(convertGlslangToSpvType(glslangMember, explicitLayout, memberQualifier));
+        }
+    }
+
+    // Make the SPIR-V type
+    spv::Id spvType = builder.makeStructType(spvMembers, type.getTypeName().c_str());
+    if (! HasNonLayoutQualifiers(type, qualifier))
+        structMap[explicitLayout][qualifier.layoutMatrix][glslangMembers] = spvType;
+
+    // Decorate it
+    decorateStructType(type, glslangMembers, explicitLayout, qualifier, spvType);
+
+    return spvType;
+}
+
+void TGlslangToSpvTraverser::decorateStructType(const glslang::TType& type,
+                                                const glslang::TTypeList* glslangMembers,
+                                                glslang::TLayoutPacking explicitLayout,
+                                                const glslang::TQualifier& qualifier,
+                                                spv::Id spvType)
+{
+    // Name and decorate the non-hidden members
+    int offset = -1;
+    int locationOffset = 0;  // for use within the members of this struct
+    for (int i = 0; i < (int)glslangMembers->size(); i++) {
+        glslang::TType& glslangMember = *(*glslangMembers)[i].type;
+        int member = i;
+        if (type.getBasicType() == glslang::EbtBlock)
+            member = memberRemapper[glslangMembers][i];
+
+        // modify just this child's view of the qualifier
+        glslang::TQualifier memberQualifier = glslangMember.getQualifier();
+        InheritQualifiers(memberQualifier, qualifier);
+
+        // using -1 above to indicate a hidden member
+        if (member >= 0) {
+            builder.addMemberName(spvType, member, glslangMember.getFieldName().c_str());
+            addMemberDecoration(spvType, member, TranslateLayoutDecoration(glslangMember, memberQualifier.layoutMatrix));
+            addMemberDecoration(spvType, member, TranslatePrecisionDecoration(glslangMember));
+            // Add interpolation and auxiliary storage decorations only to top-level members of Input and Output storage classes
+            if (type.getQualifier().storage == glslang::EvqVaryingIn || type.getQualifier().storage == glslang::EvqVaryingOut) {
+                if (type.getBasicType() == glslang::EbtBlock) {
+                    addMemberDecoration(spvType, member, TranslateInterpolationDecoration(memberQualifier));
+                    addMemberDecoration(spvType, member, TranslateAuxiliaryStorageDecoration(memberQualifier));
+                }
+            }
+            addMemberDecoration(spvType, member, TranslateInvariantDecoration(memberQualifier));
+
+            if (qualifier.storage == glslang::EvqBuffer) {
+                std::vector<spv::Decoration> memory;
+                TranslateMemoryDecoration(memberQualifier, memory);
+                for (unsigned int i = 0; i < memory.size(); ++i)
+                    addMemberDecoration(spvType, member, memory[i]);
+            }
+
+            // Compute location decoration; tricky based on whether inheritance is at play and
+            // what kind of container we have, etc.
+            // TODO: This algorithm (and it's cousin above doing almost the same thing) should
+            //       probably move to the linker stage of the front end proper, and just have the
+            //       answer sitting already distributed throughout the individual member locations.
+            int location = -1;                // will only decorate if present or inherited
+            // Ignore member locations if the container is an array, as that's
+            // ill-specified and decisions have been made to not allow this anyway.
+            // The object itself must have a location, and that comes out from decorating the object,
+            // not the type (this code decorates types).
+            if (! type.isArray()) {
+                if (memberQualifier.hasLocation()) { // no inheritance, or override of inheritance
+                    // struct members should not have explicit locations
+                    assert(type.getBasicType() != glslang::EbtStruct);
+                    location = memberQualifier.layoutLocation;
+                } else if (type.getBasicType() != glslang::EbtBlock) {
+                    // If it is a not a Block, (...) Its members are assigned consecutive locations (...)
+                    // The members, and their nested types, must not themselves have Location decorations.
+                } else if (qualifier.hasLocation()) // inheritance
+                    location = qualifier.layoutLocation + locationOffset;
+            }
+            if (location >= 0)
+                builder.addMemberDecoration(spvType, member, spv::DecorationLocation, location);
+
+            if (qualifier.hasLocation())      // track for upcoming inheritance
+                locationOffset += glslangIntermediate->computeTypeLocationSize(glslangMember);
+
+            // component, XFB, others
+            if (glslangMember.getQualifier().hasComponent())
+                builder.addMemberDecoration(spvType, member, spv::DecorationComponent, glslangMember.getQualifier().layoutComponent);
+            if (glslangMember.getQualifier().hasXfbOffset())
+                builder.addMemberDecoration(spvType, member, spv::DecorationOffset, glslangMember.getQualifier().layoutXfbOffset);
+            else if (explicitLayout != glslang::ElpNone) {
+                // figure out what to do with offset, which is accumulating
+                int nextOffset;
+                updateMemberOffset(type, glslangMember, offset, nextOffset, explicitLayout, memberQualifier.layoutMatrix);
+                if (offset >= 0)
+                    builder.addMemberDecoration(spvType, member, spv::DecorationOffset, offset);
+                offset = nextOffset;
+            }
+
+            if (glslangMember.isMatrix() && explicitLayout != glslang::ElpNone)
+                builder.addMemberDecoration(spvType, member, spv::DecorationMatrixStride, getMatrixStride(glslangMember, explicitLayout, memberQualifier.layoutMatrix));
+
+            // built-in variable decorations
+            spv::BuiltIn builtIn = TranslateBuiltInDecoration(glslangMember.getQualifier().builtIn, true);
+            if (builtIn != spv::BuiltInMax)
+                addMemberDecoration(spvType, member, spv::DecorationBuiltIn, (int)builtIn);
+        }
+    }
+
+    // Decorate the structure
+    addDecoration(spvType, TranslateLayoutDecoration(type, qualifier.layoutMatrix));
+    addDecoration(spvType, TranslateBlockDecoration(type));
+    if (type.getQualifier().hasStream() && glslangIntermediate->isMultiStream()) {
+        builder.addCapability(spv::CapabilityGeometryStreams);
+        builder.addDecoration(spvType, spv::DecorationStream, type.getQualifier().layoutStream);
+    }
+    if (glslangIntermediate->getXfbMode()) {
+        builder.addCapability(spv::CapabilityTransformFeedback);
+        if (type.getQualifier().hasXfbStride())
+            builder.addDecoration(spvType, spv::DecorationXfbStride, type.getQualifier().layoutXfbStride);
+        if (type.getQualifier().hasXfbBuffer())
+            builder.addDecoration(spvType, spv::DecorationXfbBuffer, type.getQualifier().layoutXfbBuffer);
+    }
+}
+
+// Turn the expression forming the array size into an id.
+// This is not quite trivial, because of specialization constants.
+// Sometimes, a raw constant is turned into an Id, and sometimes
+// a specialization constant expression is.
+spv::Id TGlslangToSpvTraverser::makeArraySizeId(const glslang::TArraySizes& arraySizes, int dim)
+{
+    // First, see if this is sized with a node, meaning a specialization constant:
+    glslang::TIntermTyped* specNode = arraySizes.getDimNode(dim);
+    if (specNode != nullptr) {
+        builder.clearAccessChain();
+        specNode->traverse(this);
+        return accessChainLoad(specNode->getAsTyped()->getType());
+    }
+
+    // Otherwise, need a compile-time (front end) size, get it:
+    int size = arraySizes.getDimSize(dim);
+    assert(size > 0);
+    return builder.makeUintConstant(size);
+}
+
+// Wrap the builder's accessChainLoad to:
+//  - localize handling of RelaxedPrecision
+//  - use the SPIR-V inferred type instead of another conversion of the glslang type
+//    (avoids unnecessary work and possible type punning for structures)
+//  - do conversion of concrete to abstract type
+spv::Id TGlslangToSpvTraverser::accessChainLoad(const glslang::TType& type)
+{
+    spv::Id nominalTypeId = builder.accessChainGetInferredType();
+    spv::Id loadedId = builder.accessChainLoad(TranslatePrecisionDecoration(type), nominalTypeId);
+
+    // Need to convert to abstract types when necessary
+    if (type.getBasicType() == glslang::EbtBool) {
+        if (builder.isScalarType(nominalTypeId)) {
+            // Conversion for bool
+            spv::Id boolType = builder.makeBoolType();
+            if (nominalTypeId != boolType)
+                loadedId = builder.createBinOp(spv::OpINotEqual, boolType, loadedId, builder.makeUintConstant(0));
+        } else if (builder.isVectorType(nominalTypeId)) {
+            // Conversion for bvec
+            int vecSize = builder.getNumTypeComponents(nominalTypeId);
+            spv::Id bvecType = builder.makeVectorType(builder.makeBoolType(), vecSize);
+            if (nominalTypeId != bvecType)
+                loadedId = builder.createBinOp(spv::OpINotEqual, bvecType, loadedId, makeSmearedConstant(builder.makeUintConstant(0), vecSize));
+        }
+    }
+
+    return loadedId;
+}
+
+// Wrap the builder's accessChainStore to:
+//  - do conversion of concrete to abstract type
+//
+// Implicitly uses the existing builder.accessChain as the storage target.
+void TGlslangToSpvTraverser::accessChainStore(const glslang::TType& type, spv::Id rvalue)
+{
+    // Need to convert to abstract types when necessary
+    if (type.getBasicType() == glslang::EbtBool) {
+        spv::Id nominalTypeId = builder.accessChainGetInferredType();
+
+        if (builder.isScalarType(nominalTypeId)) {
+            // Conversion for bool
+            spv::Id boolType = builder.makeBoolType();
+            if (nominalTypeId != boolType) {
+                spv::Id zero = builder.makeUintConstant(0);
+                spv::Id one  = builder.makeUintConstant(1);
+                rvalue = builder.createTriOp(spv::OpSelect, nominalTypeId, rvalue, one, zero);
+            }
+        } else if (builder.isVectorType(nominalTypeId)) {
+            // Conversion for bvec
+            int vecSize = builder.getNumTypeComponents(nominalTypeId);
+            spv::Id bvecType = builder.makeVectorType(builder.makeBoolType(), vecSize);
+            if (nominalTypeId != bvecType) {
+                spv::Id zero = makeSmearedConstant(builder.makeUintConstant(0), vecSize);
+                spv::Id one  = makeSmearedConstant(builder.makeUintConstant(1), vecSize);
+                rvalue = builder.createTriOp(spv::OpSelect, nominalTypeId, rvalue, one, zero);
+            }
+        }
+    }
+
+    builder.accessChainStore(rvalue);
+}
+
+// For storing when types match at the glslang level, but not might match at the
+// SPIR-V level.
+//
+// This especially happens when a single glslang type expands to multiple
+// SPIR-V types, like a struct that is used in a member-undecorated way as well
+// as in a member-decorated way.
+//
+// NOTE: This function can handle any store request; if it's not special it
+// simplifies to a simple OpStore.
+//
+// Implicitly uses the existing builder.accessChain as the storage target.
+void TGlslangToSpvTraverser::multiTypeStore(const glslang::TType& type, spv::Id rValue)
+{
+    // we only do the complex path here if it's an aggregate
+    if (! type.isStruct() && ! type.isArray()) {
+        accessChainStore(type, rValue);
+        return;
+    }
+
+    // and, it has to be a case of type aliasing
+    spv::Id rType = builder.getTypeId(rValue);
+    spv::Id lValue = builder.accessChainGetLValue();
+    spv::Id lType = builder.getContainedTypeId(builder.getTypeId(lValue));
+    if (lType == rType) {
+        accessChainStore(type, rValue);
+        return;
+    }
+
+    // Recursively (as needed) copy an aggregate type to a different aggregate type,
+    // where the two types were the same type in GLSL. This requires member
+    // by member copy, recursively.
+
+    // If an array, copy element by element.
+    if (type.isArray()) {
+        glslang::TType glslangElementType(type, 0);
+        spv::Id elementRType = builder.getContainedTypeId(rType);
+        for (int index = 0; index < type.getOuterArraySize(); ++index) {
+            // get the source member
+            spv::Id elementRValue = builder.createCompositeExtract(rValue, elementRType, index);
+
+            // set up the target storage
+            builder.clearAccessChain();
+            builder.setAccessChainLValue(lValue);
+            builder.accessChainPush(builder.makeIntConstant(index));
+
+            // store the member
+            multiTypeStore(glslangElementType, elementRValue);
+        }
+    } else {
+        assert(type.isStruct());
+
+        // loop over structure members
+        const glslang::TTypeList& members = *type.getStruct();
+        for (int m = 0; m < (int)members.size(); ++m) {
+            const glslang::TType& glslangMemberType = *members[m].type;
+
+            // get the source member
+            spv::Id memberRType = builder.getContainedTypeId(rType, m);
+            spv::Id memberRValue = builder.createCompositeExtract(rValue, memberRType, m);
+
+            // set up the target storage
+            builder.clearAccessChain();
+            builder.setAccessChainLValue(lValue);
+            builder.accessChainPush(builder.makeIntConstant(m));
+
+            // store the member
+            multiTypeStore(glslangMemberType, memberRValue);
+        }
+    }
+}
+
+// Decide whether or not this type should be
+// decorated with offsets and strides, and if so
+// whether std140 or std430 rules should be applied.
+glslang::TLayoutPacking TGlslangToSpvTraverser::getExplicitLayout(const glslang::TType& type) const
+{
+    // has to be a block
+    if (type.getBasicType() != glslang::EbtBlock)
+        return glslang::ElpNone;
+
+    // has to be a uniform or buffer block
+    if (type.getQualifier().storage != glslang::EvqUniform &&
+        type.getQualifier().storage != glslang::EvqBuffer)
+        return glslang::ElpNone;
+
+    // return the layout to use
+    switch (type.getQualifier().layoutPacking) {
+    case glslang::ElpStd140:
+    case glslang::ElpStd430:
+        return type.getQualifier().layoutPacking;
+    default:
+        return glslang::ElpNone;
+    }
+}
+
+// Given an array type, returns the integer stride required for that array
+int TGlslangToSpvTraverser::getArrayStride(const glslang::TType& arrayType, glslang::TLayoutPacking explicitLayout, glslang::TLayoutMatrix matrixLayout)
+{
+    int size;
+    int stride;
+    glslangIntermediate->getBaseAlignment(arrayType, size, stride, explicitLayout == glslang::ElpStd140, matrixLayout == glslang::ElmRowMajor);
+
+    return stride;
+}
+
+// Given a matrix type, or array (of array) of matrixes type, returns the integer stride required for that matrix
+// when used as a member of an interface block
+int TGlslangToSpvTraverser::getMatrixStride(const glslang::TType& matrixType, glslang::TLayoutPacking explicitLayout, glslang::TLayoutMatrix matrixLayout)
+{
+    glslang::TType elementType;
+    elementType.shallowCopy(matrixType);
+    elementType.clearArraySizes();
+
+    int size;
+    int stride;
+    glslangIntermediate->getBaseAlignment(elementType, size, stride, explicitLayout == glslang::ElpStd140, matrixLayout == glslang::ElmRowMajor);
+
+    return stride;
+}
+
+// Given a member type of a struct, realign the current offset for it, and compute
+// the next (not yet aligned) offset for the next member, which will get aligned
+// on the next call.
+// 'currentOffset' should be passed in already initialized, ready to modify, and reflecting
+// the migration of data from nextOffset -> currentOffset.  It should be -1 on the first call.
+// -1 means a non-forced member offset (no decoration needed).
+void TGlslangToSpvTraverser::updateMemberOffset(const glslang::TType& /*structType*/, const glslang::TType& memberType, int& currentOffset, int& nextOffset,
+                                                glslang::TLayoutPacking explicitLayout, glslang::TLayoutMatrix matrixLayout)
+{
+    // this will get a positive value when deemed necessary
+    nextOffset = -1;
+
+    // override anything in currentOffset with user-set offset
+    if (memberType.getQualifier().hasOffset())
+        currentOffset = memberType.getQualifier().layoutOffset;
+
+    // It could be that current linker usage in glslang updated all the layoutOffset,
+    // in which case the following code does not matter.  But, that's not quite right
+    // once cross-compilation unit GLSL validation is done, as the original user
+    // settings are needed in layoutOffset, and then the following will come into play.
+
+    if (explicitLayout == glslang::ElpNone) {
+        if (! memberType.getQualifier().hasOffset())
+            currentOffset = -1;
+
+        return;
+    }
+
+    // Getting this far means we need explicit offsets
+    if (currentOffset < 0)
+        currentOffset = 0;
+
+    // Now, currentOffset is valid (either 0, or from a previous nextOffset),
+    // but possibly not yet correctly aligned.
+
+    int memberSize;
+    int dummyStride;
+    int memberAlignment = glslangIntermediate->getBaseAlignment(memberType, memberSize, dummyStride, explicitLayout == glslang::ElpStd140, matrixLayout == glslang::ElmRowMajor);
+    glslang::RoundToPow2(currentOffset, memberAlignment);
+    nextOffset = currentOffset + memberSize;
+}
+
+void TGlslangToSpvTraverser::declareUseOfStructMember(const glslang::TTypeList& members, int glslangMember)
+{
+    const glslang::TBuiltInVariable glslangBuiltIn = members[glslangMember].type->getQualifier().builtIn;
+    switch (glslangBuiltIn)
+    {
+    case glslang::EbvClipDistance:
+    case glslang::EbvCullDistance:
+    case glslang::EbvPointSize:
+        // Generate the associated capability.  Delegate to TranslateBuiltInDecoration.
+        // Alternately, we could just call this for any glslang built-in, since the
+        // capability already guards against duplicates.
+        TranslateBuiltInDecoration(glslangBuiltIn, false);
+        break;
+    default:
+        // Capabilities were already generated when the struct was declared.
+        break;
+    }
+}
+
+bool TGlslangToSpvTraverser::isShaderEntryPoint(const glslang::TIntermAggregate* node)
+{
+    return node->getName().compare(glslangIntermediate->getEntryPointMangledName().c_str()) == 0;
+}
+
+// Make all the functions, skeletally, without actually visiting their bodies.
+void TGlslangToSpvTraverser::makeFunctions(const glslang::TIntermSequence& glslFunctions)
+{
+    for (int f = 0; f < (int)glslFunctions.size(); ++f) {
+        glslang::TIntermAggregate* glslFunction = glslFunctions[f]->getAsAggregate();
+        if (! glslFunction || glslFunction->getOp() != glslang::EOpFunction || isShaderEntryPoint(glslFunction))
+            continue;
+
+        // We're on a user function.  Set up the basic interface for the function now,
+        // so that it's available to call.  Translating the body will happen later.
+        //
+        // Typically (except for a "const in" parameter), an address will be passed to the
+        // function.  What it is an address of varies:
+        //
+        // - "in" parameters not marked as "const" can be written to without modifying the calling
+        //   argument so that write needs to be to a copy, hence the address of a copy works.
+        //
+        // - "const in" parameters can just be the r-value, as no writes need occur.
+        //
+        // - "out" and "inout" arguments can't be done as pointers to the calling argument, because
+        //   GLSL has copy-in/copy-out semantics.  They can be handled though with a pointer to a copy.
+
+        std::vector<spv::Id> paramTypes;
+        std::vector<spv::Decoration> paramPrecisions;
+        glslang::TIntermSequence& parameters = glslFunction->getSequence()[0]->getAsAggregate()->getSequence();
+
+        for (int p = 0; p < (int)parameters.size(); ++p) {
+            const glslang::TType& paramType = parameters[p]->getAsTyped()->getType();
+            spv::Id typeId = convertGlslangToSpvType(paramType);
+            if (paramType.isOpaque())
+                typeId = builder.makePointer(TranslateStorageClass(paramType), typeId);
+            else if (paramType.getQualifier().storage != glslang::EvqConstReadOnly)
+                typeId = builder.makePointer(spv::StorageClassFunction, typeId);
+            else
+                rValueParameters.insert(parameters[p]->getAsSymbolNode()->getId());
+            paramPrecisions.push_back(TranslatePrecisionDecoration(paramType));
+            paramTypes.push_back(typeId);
+        }
+
+        spv::Block* functionBlock;
+        spv::Function *function = builder.makeFunctionEntry(TranslatePrecisionDecoration(glslFunction->getType()),
+                                                            convertGlslangToSpvType(glslFunction->getType()),
+                                                            glslFunction->getName().c_str(), paramTypes, paramPrecisions, &functionBlock);
+
+        // Track function to emit/call later
+        functionMap[glslFunction->getName().c_str()] = function;
+
+        // Set the parameter id's
+        for (int p = 0; p < (int)parameters.size(); ++p) {
+            symbolValues[parameters[p]->getAsSymbolNode()->getId()] = function->getParamId(p);
+            // give a name too
+            builder.addName(function->getParamId(p), parameters[p]->getAsSymbolNode()->getName().c_str());
+        }
+    }
+}
+
+// Process all the initializers, while skipping the functions and link objects
+void TGlslangToSpvTraverser::makeGlobalInitializers(const glslang::TIntermSequence& initializers)
+{
+    builder.setBuildPoint(shaderEntry->getLastBlock());
+    for (int i = 0; i < (int)initializers.size(); ++i) {
+        glslang::TIntermAggregate* initializer = initializers[i]->getAsAggregate();
+        if (initializer && initializer->getOp() != glslang::EOpFunction && initializer->getOp() != glslang::EOpLinkerObjects) {
+
+            // We're on a top-level node that's not a function.  Treat as an initializer, whose
+            // code goes into the beginning of the entry point.
+            initializer->traverse(this);
+        }
+    }
+}
+
+// Process all the functions, while skipping initializers.
+void TGlslangToSpvTraverser::visitFunctions(const glslang::TIntermSequence& glslFunctions)
+{
+    for (int f = 0; f < (int)glslFunctions.size(); ++f) {
+        glslang::TIntermAggregate* node = glslFunctions[f]->getAsAggregate();
+        if (node && (node->getOp() == glslang::EOpFunction || node->getOp() == glslang ::EOpLinkerObjects))
+            node->traverse(this);
+    }
+}
+
+void TGlslangToSpvTraverser::handleFunctionEntry(const glslang::TIntermAggregate* node)
+{
+    // SPIR-V functions should already be in the functionMap from the prepass
+    // that called makeFunctions().
+    currentFunction = functionMap[node->getName().c_str()];
+    spv::Block* functionBlock = currentFunction->getEntryBlock();
+    builder.setBuildPoint(functionBlock);
+}
+
+void TGlslangToSpvTraverser::translateArguments(const glslang::TIntermAggregate& node, std::vector<spv::Id>& arguments)
+{
+    const glslang::TIntermSequence& glslangArguments = node.getSequence();
+
+    glslang::TSampler sampler = {};
+    bool cubeCompare = false;
+    if (node.isTexture() || node.isImage()) {
+        sampler = glslangArguments[0]->getAsTyped()->getType().getSampler();
+        cubeCompare = sampler.dim == glslang::EsdCube && sampler.arrayed && sampler.shadow;
+    }
+
+    for (int i = 0; i < (int)glslangArguments.size(); ++i) {
+        builder.clearAccessChain();
+        glslangArguments[i]->traverse(this);
+
+        // Special case l-value operands
+        bool lvalue = false;
+        switch (node.getOp()) {
+        case glslang::EOpImageAtomicAdd:
+        case glslang::EOpImageAtomicMin:
+        case glslang::EOpImageAtomicMax:
+        case glslang::EOpImageAtomicAnd:
+        case glslang::EOpImageAtomicOr:
+        case glslang::EOpImageAtomicXor:
+        case glslang::EOpImageAtomicExchange:
+        case glslang::EOpImageAtomicCompSwap:
+            if (i == 0)
+                lvalue = true;
+            break;
+        case glslang::EOpSparseImageLoad:
+            if ((sampler.ms && i == 3) || (! sampler.ms && i == 2))
+                lvalue = true;
+            break;
+        case glslang::EOpSparseTexture:
+            if ((cubeCompare && i == 3) || (! cubeCompare && i == 2))
+                lvalue = true;
+            break;
+        case glslang::EOpSparseTextureClamp:
+            if ((cubeCompare && i == 4) || (! cubeCompare && i == 3))
+                lvalue = true;
+            break;
+        case glslang::EOpSparseTextureLod:
+        case glslang::EOpSparseTextureOffset:
+            if (i == 3)
+                lvalue = true;
+            break;
+        case glslang::EOpSparseTextureFetch:
+            if ((sampler.dim != glslang::EsdRect && i == 3) || (sampler.dim == glslang::EsdRect && i == 2))
+                lvalue = true;
+            break;
+        case glslang::EOpSparseTextureFetchOffset:
+            if ((sampler.dim != glslang::EsdRect && i == 4) || (sampler.dim == glslang::EsdRect && i == 3))
+                lvalue = true;
+            break;
+        case glslang::EOpSparseTextureLodOffset:
+        case glslang::EOpSparseTextureGrad:
+        case glslang::EOpSparseTextureOffsetClamp:
+            if (i == 4)
+                lvalue = true;
+            break;
+        case glslang::EOpSparseTextureGradOffset:
+        case glslang::EOpSparseTextureGradClamp:
+            if (i == 5)
+                lvalue = true;
+            break;
+        case glslang::EOpSparseTextureGradOffsetClamp:
+            if (i == 6)
+                lvalue = true;
+            break;
+         case glslang::EOpSparseTextureGather:
+            if ((sampler.shadow && i == 3) || (! sampler.shadow && i == 2))
+                lvalue = true;
+            break;
+        case glslang::EOpSparseTextureGatherOffset:
+        case glslang::EOpSparseTextureGatherOffsets:
+            if ((sampler.shadow && i == 4) || (! sampler.shadow && i == 3))
+                lvalue = true;
+            break;
+        default:
+            break;
+        }
+
+        if (lvalue)
+            arguments.push_back(builder.accessChainGetLValue());
+        else
+            arguments.push_back(accessChainLoad(glslangArguments[i]->getAsTyped()->getType()));
+    }
+}
+
+void TGlslangToSpvTraverser::translateArguments(glslang::TIntermUnary& node, std::vector<spv::Id>& arguments)
+{
+    builder.clearAccessChain();
+    node.getOperand()->traverse(this);
+    arguments.push_back(accessChainLoad(node.getOperand()->getType()));
+}
+
+spv::Id TGlslangToSpvTraverser::createImageTextureFunctionCall(glslang::TIntermOperator* node)
+{
+    if (! node->isImage() && ! node->isTexture()) {
+        return spv::NoResult;
+    }
+    auto resultType = [&node,this]{ return convertGlslangToSpvType(node->getType()); };
+
+    // Process a GLSL texturing op (will be SPV image)
+    const glslang::TSampler sampler = node->getAsAggregate() ? node->getAsAggregate()->getSequence()[0]->getAsTyped()->getType().getSampler()
+                                                             : node->getAsUnaryNode()->getOperand()->getAsTyped()->getType().getSampler();
+    std::vector<spv::Id> arguments;
+    if (node->getAsAggregate())
+        translateArguments(*node->getAsAggregate(), arguments);
+    else
+        translateArguments(*node->getAsUnaryNode(), arguments);
+    spv::Decoration precision = TranslatePrecisionDecoration(node->getOperationPrecision());
+
+    spv::Builder::TextureParameters params = { };
+    params.sampler = arguments[0];
+
+    glslang::TCrackedTextureOp cracked;
+    node->crackTexture(sampler, cracked);
+
+    // Check for queries
+    if (cracked.query) {
+        // OpImageQueryLod works on a sampled image, for other queries the image has to be extracted first
+        if (node->getOp() != glslang::EOpTextureQueryLod && builder.isSampledImage(params.sampler))
+            params.sampler = builder.createUnaryOp(spv::OpImage, builder.getImageType(params.sampler), params.sampler);
+
+        switch (node->getOp()) {
+        case glslang::EOpImageQuerySize:
+        case glslang::EOpTextureQuerySize:
+            if (arguments.size() > 1) {
+                params.lod = arguments[1];
+                return builder.createTextureQueryCall(spv::OpImageQuerySizeLod, params);
+            } else
+                return builder.createTextureQueryCall(spv::OpImageQuerySize, params);
+        case glslang::EOpImageQuerySamples:
+        case glslang::EOpTextureQuerySamples:
+            return builder.createTextureQueryCall(spv::OpImageQuerySamples, params);
+        case glslang::EOpTextureQueryLod:
+            params.coords = arguments[1];
+            return builder.createTextureQueryCall(spv::OpImageQueryLod, params);
+        case glslang::EOpTextureQueryLevels:
+            return builder.createTextureQueryCall(spv::OpImageQueryLevels, params);
+        case glslang::EOpSparseTexelsResident:
+            return builder.createUnaryOp(spv::OpImageSparseTexelsResident, builder.makeBoolType(), arguments[0]);
+        default:
+            assert(0);
+            break;
+        }
+    }
+
+    // Check for image functions other than queries
+    if (node->isImage()) {
+        std::vector<spv::Id> operands;
+        auto opIt = arguments.begin();
+        operands.push_back(*(opIt++));
+
+        // Handle subpass operations
+        // TODO: GLSL should change to have the "MS" only on the type rather than the
+        // built-in function.
+        if (cracked.subpass) {
+            // add on the (0,0) coordinate
+            spv::Id zero = builder.makeIntConstant(0);
+            std::vector<spv::Id> comps;
+            comps.push_back(zero);
+            comps.push_back(zero);
+            operands.push_back(builder.makeCompositeConstant(builder.makeVectorType(builder.makeIntType(32), 2), comps));
+            if (sampler.ms) {
+                operands.push_back(spv::ImageOperandsSampleMask);
+                operands.push_back(*(opIt++));
+            }
+            return builder.createOp(spv::OpImageRead, resultType(), operands);
+        }
+
+        operands.push_back(*(opIt++));
+        if (node->getOp() == glslang::EOpImageLoad) {
+            if (sampler.ms) {
+                operands.push_back(spv::ImageOperandsSampleMask);
+                operands.push_back(*opIt);
+            }
+            if (builder.getImageTypeFormat(builder.getImageType(operands.front())) == spv::ImageFormatUnknown)
+                builder.addCapability(spv::CapabilityStorageImageReadWithoutFormat);
+            return builder.createOp(spv::OpImageRead, resultType(), operands);
+        } else if (node->getOp() == glslang::EOpImageStore) {
+            if (sampler.ms) {
+                operands.push_back(*(opIt + 1));
+                operands.push_back(spv::ImageOperandsSampleMask);
+                operands.push_back(*opIt);
+            } else
+                operands.push_back(*opIt);
+            builder.createNoResultOp(spv::OpImageWrite, operands);
+            if (builder.getImageTypeFormat(builder.getImageType(operands.front())) == spv::ImageFormatUnknown)
+                builder.addCapability(spv::CapabilityStorageImageWriteWithoutFormat);
+            return spv::NoResult;
+        } else if (node->getOp() == glslang::EOpSparseImageLoad) {
+            builder.addCapability(spv::CapabilitySparseResidency);
+            if (builder.getImageTypeFormat(builder.getImageType(operands.front())) == spv::ImageFormatUnknown)
+                builder.addCapability(spv::CapabilityStorageImageReadWithoutFormat);
+
+            if (sampler.ms) {
+                operands.push_back(spv::ImageOperandsSampleMask);
+                operands.push_back(*opIt++);
+            }
+
+            // Create the return type that was a special structure
+            spv::Id texelOut = *opIt;
+            spv::Id typeId0 = resultType();
+            spv::Id typeId1 = builder.getDerefTypeId(texelOut);
+            spv::Id resultTypeId = builder.makeStructResultType(typeId0, typeId1);
+
+            spv::Id resultId = builder.createOp(spv::OpImageSparseRead, resultTypeId, operands);
+
+            // Decode the return type
+            builder.createStore(builder.createCompositeExtract(resultId, typeId1, 1), texelOut);
+            return builder.createCompositeExtract(resultId, typeId0, 0);
+        } else {
+            // Process image atomic operations
+
+            // GLSL "IMAGE_PARAMS" will involve in constructing an image texel pointer and this pointer,
+            // as the first source operand, is required by SPIR-V atomic operations.
+            operands.push_back(sampler.ms ? *(opIt++) : builder.makeUintConstant(0)); // For non-MS, the value should be 0
+
+            spv::Id resultTypeId = builder.makePointer(spv::StorageClassImage, resultType());
+            spv::Id pointer = builder.createOp(spv::OpImageTexelPointer, resultTypeId, operands);
+
+            std::vector<spv::Id> operands;
+            operands.push_back(pointer);
+            for (; opIt != arguments.end(); ++opIt)
+                operands.push_back(*opIt);
+
+            return createAtomicOperation(node->getOp(), precision, resultType(), operands, node->getBasicType());
+        }
+    }
+
+    // Check for texture functions other than queries
+    bool sparse = node->isSparseTexture();
+    bool cubeCompare = sampler.dim == glslang::EsdCube && sampler.arrayed && sampler.shadow;
+
+    // check for bias argument
+    bool bias = false;
+    if (! cracked.lod && ! cracked.gather && ! cracked.grad && ! cracked.fetch && ! cubeCompare) {
+        int nonBiasArgCount = 2;
+        if (cracked.offset)
+            ++nonBiasArgCount;
+        if (cracked.grad)
+            nonBiasArgCount += 2;
+        if (cracked.lodClamp)
+            ++nonBiasArgCount;
+        if (sparse)
+            ++nonBiasArgCount;
+
+        if ((int)arguments.size() > nonBiasArgCount)
+            bias = true;
+    }
+
+    // See if the sampler param should really be just the SPV image part
+    if (cracked.fetch) {
+        // a fetch needs to have the image extracted first
+        if (builder.isSampledImage(params.sampler))
+            params.sampler = builder.createUnaryOp(spv::OpImage, builder.getImageType(params.sampler), params.sampler);
+    }
+
+    // set the rest of the arguments
+
+    params.coords = arguments[1];
+    int extraArgs = 0;
+    bool noImplicitLod = false;
+
+    // sort out where Dref is coming from
+    if (cubeCompare) {
+        params.Dref = arguments[2];
+        ++extraArgs;
+    } else if (sampler.shadow && cracked.gather) {
+        params.Dref = arguments[2];
+        ++extraArgs;
+    } else if (sampler.shadow) {
+        std::vector<spv::Id> indexes;
+        int dRefComp;
+        if (cracked.proj)
+            dRefComp = 2;  // "The resulting 3rd component of P in the shadow forms is used as Dref"
+        else
+            dRefComp = builder.getNumComponents(params.coords) - 1;
+        indexes.push_back(dRefComp);
+        params.Dref = builder.createCompositeExtract(params.coords, builder.getScalarTypeId(builder.getTypeId(params.coords)), indexes);
+    }
+
+    // lod
+    if (cracked.lod) {
+        params.lod = arguments[2];
+        ++extraArgs;
+    } else if (glslangIntermediate->getStage() != EShLangFragment) {
+        // we need to invent the default lod for an explicit lod instruction for a non-fragment stage
+        noImplicitLod = true;
+    }
+
+    // multisample
+    if (sampler.ms) {
+        params.sample = arguments[2]; // For MS, "sample" should be specified
+        ++extraArgs;
+    }
+
+    // gradient
+    if (cracked.grad) {
+        params.gradX = arguments[2 + extraArgs];
+        params.gradY = arguments[3 + extraArgs];
+        extraArgs += 2;
+    }
+
+    // offset and offsets
+    if (cracked.offset) {
+        params.offset = arguments[2 + extraArgs];
+        ++extraArgs;
+    } else if (cracked.offsets) {
+        params.offsets = arguments[2 + extraArgs];
+        ++extraArgs;
+    }
+
+    // lod clamp
+    if (cracked.lodClamp) {
+        params.lodClamp = arguments[2 + extraArgs];
+        ++extraArgs;
+    }
+
+    // sparse
+    if (sparse) {
+        params.texelOut = arguments[2 + extraArgs];
+        ++extraArgs;
+    }
+
+    // bias
+    if (bias) {
+        params.bias = arguments[2 + extraArgs];
+        ++extraArgs;
+    }
+
+    // gather component
+    if (cracked.gather && ! sampler.shadow) {
+        // default component is 0, if missing, otherwise an argument
+        if (2 + extraArgs < (int)arguments.size()) {
+            params.component = arguments[2 + extraArgs];
+            ++extraArgs;
+        } else {
+            params.component = builder.makeIntConstant(0);
+        }
+    }
+
+    // projective component (might not to move)
+    // GLSL: "The texture coordinates consumed from P, not including the last component of P,
+    //       are divided by the last component of P."
+    // SPIR-V:  "... (u [, v] [, w], q)... It may be a vector larger than needed, but all
+    //          unused components will appear after all used components."
+    if (cracked.proj) {
+        int projSourceComp = builder.getNumComponents(params.coords) - 1;
+        int projTargetComp;
+        switch (sampler.dim) {
+        case glslang::Esd1D:   projTargetComp = 1;              break;
+        case glslang::Esd2D:   projTargetComp = 2;              break;
+        case glslang::EsdRect: projTargetComp = 2;              break;
+        default:               projTargetComp = projSourceComp; break;
+        }
+        // copy the projective coordinate if we have to
+        if (projTargetComp != projSourceComp) {
+            spv::Id projComp = builder.createCompositeExtract(params.coords, 
+                                                              builder.getScalarTypeId(builder.getTypeId(params.coords)),
+                                                              projSourceComp);
+            params.coords = builder.createCompositeInsert(projComp, params.coords,
+                                                          builder.getTypeId(params.coords), projTargetComp);
+        }
+    }
+
+    return builder.createTextureCall(precision, resultType(), sparse, cracked.fetch, cracked.proj, cracked.gather, noImplicitLod, params);
+}
+
+spv::Id TGlslangToSpvTraverser::handleUserFunctionCall(const glslang::TIntermAggregate* node)
+{
+    // Grab the function's pointer from the previously created function
+    spv::Function* function = functionMap[node->getName().c_str()];
+    if (! function)
+        return 0;
+
+    const glslang::TIntermSequence& glslangArgs = node->getSequence();
+    const glslang::TQualifierList& qualifiers = node->getQualifierList();
+
+    //  See comments in makeFunctions() for details about the semantics for parameter passing.
+    //
+    // These imply we need a four step process:
+    // 1. Evaluate the arguments
+    // 2. Allocate and make copies of in, out, and inout arguments
+    // 3. Make the call
+    // 4. Copy back the results
+
+    // 1. Evaluate the arguments
+    std::vector<spv::Builder::AccessChain> lValues;
+    std::vector<spv::Id> rValues;
+    std::vector<const glslang::TType*> argTypes;
+    for (int a = 0; a < (int)glslangArgs.size(); ++a) {
+        const glslang::TType& paramType = glslangArgs[a]->getAsTyped()->getType();
+        // build l-value
+        builder.clearAccessChain();
+        glslangArgs[a]->traverse(this);
+        argTypes.push_back(&paramType);
+        // keep outputs and opaque objects as l-values, evaluate input-only as r-values
+        if (qualifiers[a] != glslang::EvqConstReadOnly || paramType.isOpaque()) {
+            // save l-value
+            lValues.push_back(builder.getAccessChain());
+        } else {
+            // process r-value
+            rValues.push_back(accessChainLoad(*argTypes.back()));
+        }
+    }
+
+    // 2. Allocate space for anything needing a copy, and if it's "in" or "inout"
+    // copy the original into that space.
+    //
+    // Also, build up the list of actual arguments to pass in for the call
+    int lValueCount = 0;
+    int rValueCount = 0;
+    std::vector<spv::Id> spvArgs;
+    for (int a = 0; a < (int)glslangArgs.size(); ++a) {
+        const glslang::TType& paramType = glslangArgs[a]->getAsTyped()->getType();
+        spv::Id arg;
+        if (paramType.isOpaque()) {
+            builder.setAccessChain(lValues[lValueCount]);
+            arg = builder.accessChainGetLValue();
+            ++lValueCount;
+        } else if (qualifiers[a] != glslang::EvqConstReadOnly) {
+            // need space to hold the copy
+            arg = builder.createVariable(spv::StorageClassFunction, convertGlslangToSpvType(paramType), "param");
+            if (qualifiers[a] == glslang::EvqIn || qualifiers[a] == glslang::EvqInOut) {
+                // need to copy the input into output space
+                builder.setAccessChain(lValues[lValueCount]);
+                spv::Id copy = accessChainLoad(*argTypes[a]);
+                builder.clearAccessChain();
+                builder.setAccessChainLValue(arg);
+                multiTypeStore(paramType, copy);
+            }
+            ++lValueCount;
+        } else {
+            arg = rValues[rValueCount];
+            ++rValueCount;
+        }
+        spvArgs.push_back(arg);
+    }
+
+    // 3. Make the call.
+    spv::Id result = builder.createFunctionCall(function, spvArgs);
+    builder.setPrecision(result, TranslatePrecisionDecoration(node->getType()));
+
+    // 4. Copy back out an "out" arguments.
+    lValueCount = 0;
+    for (int a = 0; a < (int)glslangArgs.size(); ++a) {
+        const glslang::TType& paramType = glslangArgs[a]->getAsTyped()->getType();
+        if (qualifiers[a] != glslang::EvqConstReadOnly) {
+            if (qualifiers[a] == glslang::EvqOut || qualifiers[a] == glslang::EvqInOut) {
+                spv::Id copy = builder.createLoad(spvArgs[a]);
+                builder.setAccessChain(lValues[lValueCount]);
+                multiTypeStore(paramType, copy);
+            }
+            ++lValueCount;
+        }
+    }
+
+    return result;
+}
+
+// Translate AST operation to SPV operation, already having SPV-based operands/types.
+spv::Id TGlslangToSpvTraverser::createBinaryOperation(glslang::TOperator op, spv::Decoration precision,
+                                                      spv::Decoration noContraction,
+                                                      spv::Id typeId, spv::Id left, spv::Id right,
+                                                      glslang::TBasicType typeProxy, bool reduceComparison)
+{
+    bool isUnsigned = typeProxy == glslang::EbtUint || typeProxy == glslang::EbtUint64;
+#ifdef AMD_EXTENSIONS
+    bool isFloat = typeProxy == glslang::EbtFloat || typeProxy == glslang::EbtDouble || typeProxy == glslang::EbtFloat16;
+#else
+    bool isFloat = typeProxy == glslang::EbtFloat || typeProxy == glslang::EbtDouble;
+#endif
+    bool isBool = typeProxy == glslang::EbtBool;
+
+    spv::Op binOp = spv::OpNop;
+    bool needMatchingVectors = true;  // for non-matrix ops, would a scalar need to smear to match a vector?
+    bool comparison = false;
+
+    switch (op) {
+    case glslang::EOpAdd:
+    case glslang::EOpAddAssign:
+        if (isFloat)
+            binOp = spv::OpFAdd;
+        else
+            binOp = spv::OpIAdd;
+        break;
+    case glslang::EOpSub:
+    case glslang::EOpSubAssign:
+        if (isFloat)
+            binOp = spv::OpFSub;
+        else
+            binOp = spv::OpISub;
+        break;
+    case glslang::EOpMul:
+    case glslang::EOpMulAssign:
+        if (isFloat)
+            binOp = spv::OpFMul;
+        else
+            binOp = spv::OpIMul;
+        break;
+    case glslang::EOpVectorTimesScalar:
+    case glslang::EOpVectorTimesScalarAssign:
+        if (isFloat && (builder.isVector(left) || builder.isVector(right))) {
+            if (builder.isVector(right))
+                std::swap(left, right);
+            assert(builder.isScalar(right));
+            needMatchingVectors = false;
+            binOp = spv::OpVectorTimesScalar;
+        } else
+            binOp = spv::OpIMul;
+        break;
+    case glslang::EOpVectorTimesMatrix:
+    case glslang::EOpVectorTimesMatrixAssign:
+        binOp = spv::OpVectorTimesMatrix;
+        break;
+    case glslang::EOpMatrixTimesVector:
+        binOp = spv::OpMatrixTimesVector;
+        break;
+    case glslang::EOpMatrixTimesScalar:
+    case glslang::EOpMatrixTimesScalarAssign:
+        binOp = spv::OpMatrixTimesScalar;
+        break;
+    case glslang::EOpMatrixTimesMatrix:
+    case glslang::EOpMatrixTimesMatrixAssign:
+        binOp = spv::OpMatrixTimesMatrix;
+        break;
+    case glslang::EOpOuterProduct:
+        binOp = spv::OpOuterProduct;
+        needMatchingVectors = false;
+        break;
+
+    case glslang::EOpDiv:
+    case glslang::EOpDivAssign:
+        if (isFloat)
+            binOp = spv::OpFDiv;
+        else if (isUnsigned)
+            binOp = spv::OpUDiv;
+        else
+            binOp = spv::OpSDiv;
+        break;
+    case glslang::EOpMod:
+    case glslang::EOpModAssign:
+        if (isFloat)
+            binOp = spv::OpFMod;
+        else if (isUnsigned)
+            binOp = spv::OpUMod;
+        else
+            binOp = spv::OpSMod;
+        break;
+    case glslang::EOpRightShift:
+    case glslang::EOpRightShiftAssign:
+        if (isUnsigned)
+            binOp = spv::OpShiftRightLogical;
+        else
+            binOp = spv::OpShiftRightArithmetic;
+        break;
+    case glslang::EOpLeftShift:
+    case glslang::EOpLeftShiftAssign:
+        binOp = spv::OpShiftLeftLogical;
+        break;
+    case glslang::EOpAnd:
+    case glslang::EOpAndAssign:
+        binOp = spv::OpBitwiseAnd;
+        break;
+    case glslang::EOpLogicalAnd:
+        needMatchingVectors = false;
+        binOp = spv::OpLogicalAnd;
+        break;
+    case glslang::EOpInclusiveOr:
+    case glslang::EOpInclusiveOrAssign:
+        binOp = spv::OpBitwiseOr;
+        break;
+    case glslang::EOpLogicalOr:
+        needMatchingVectors = false;
+        binOp = spv::OpLogicalOr;
+        break;
+    case glslang::EOpExclusiveOr:
+    case glslang::EOpExclusiveOrAssign:
+        binOp = spv::OpBitwiseXor;
+        break;
+    case glslang::EOpLogicalXor:
+        needMatchingVectors = false;
+        binOp = spv::OpLogicalNotEqual;
+        break;
+
+    case glslang::EOpLessThan:
+    case glslang::EOpGreaterThan:
+    case glslang::EOpLessThanEqual:
+    case glslang::EOpGreaterThanEqual:
+    case glslang::EOpEqual:
+    case glslang::EOpNotEqual:
+    case glslang::EOpVectorEqual:
+    case glslang::EOpVectorNotEqual:
+        comparison = true;
+        break;
+    default:
+        break;
+    }
+
+    // handle mapped binary operations (should be non-comparison)
+    if (binOp != spv::OpNop) {
+        assert(comparison == false);
+        if (builder.isMatrix(left) || builder.isMatrix(right))
+            return createBinaryMatrixOperation(binOp, precision, noContraction, typeId, left, right);
+
+        // No matrix involved; make both operands be the same number of components, if needed
+        if (needMatchingVectors)
+            builder.promoteScalar(precision, left, right);
+
+        spv::Id result = builder.createBinOp(binOp, typeId, left, right);
+        addDecoration(result, noContraction);
+        return builder.setPrecision(result, precision);
+    }
+
+    if (! comparison)
+        return 0;
+
+    // Handle comparison instructions
+
+    if (reduceComparison && (op == glslang::EOpEqual || op == glslang::EOpNotEqual)
+                         && (builder.isVector(left) || builder.isMatrix(left) || builder.isAggregate(left)))
+        return builder.createCompositeCompare(precision, left, right, op == glslang::EOpEqual);
+
+    switch (op) {
+    case glslang::EOpLessThan:
+        if (isFloat)
+            binOp = spv::OpFOrdLessThan;
+        else if (isUnsigned)
+            binOp = spv::OpULessThan;
+        else
+            binOp = spv::OpSLessThan;
+        break;
+    case glslang::EOpGreaterThan:
+        if (isFloat)
+            binOp = spv::OpFOrdGreaterThan;
+        else if (isUnsigned)
+            binOp = spv::OpUGreaterThan;
+        else
+            binOp = spv::OpSGreaterThan;
+        break;
+    case glslang::EOpLessThanEqual:
+        if (isFloat)
+            binOp = spv::OpFOrdLessThanEqual;
+        else if (isUnsigned)
+            binOp = spv::OpULessThanEqual;
+        else
+            binOp = spv::OpSLessThanEqual;
+        break;
+    case glslang::EOpGreaterThanEqual:
+        if (isFloat)
+            binOp = spv::OpFOrdGreaterThanEqual;
+        else if (isUnsigned)
+            binOp = spv::OpUGreaterThanEqual;
+        else
+            binOp = spv::OpSGreaterThanEqual;
+        break;
+    case glslang::EOpEqual:
+    case glslang::EOpVectorEqual:
+        if (isFloat)
+            binOp = spv::OpFOrdEqual;
+        else if (isBool)
+            binOp = spv::OpLogicalEqual;
+        else
+            binOp = spv::OpIEqual;
+        break;
+    case glslang::EOpNotEqual:
+    case glslang::EOpVectorNotEqual:
+        if (isFloat)
+            binOp = spv::OpFOrdNotEqual;
+        else if (isBool)
+            binOp = spv::OpLogicalNotEqual;
+        else
+            binOp = spv::OpINotEqual;
+        break;
+    default:
+        break;
+    }
+
+    if (binOp != spv::OpNop) {
+        spv::Id result = builder.createBinOp(binOp, typeId, left, right);
+        addDecoration(result, noContraction);
+        return builder.setPrecision(result, precision);
+    }
+
+    return 0;
+}
+
+//
+// Translate AST matrix operation to SPV operation, already having SPV-based operands/types.
+// These can be any of:
+//
+//   matrix * scalar
+//   scalar * matrix
+//   matrix * matrix     linear algebraic
+//   matrix * vector
+//   vector * matrix
+//   matrix * matrix     componentwise
+//   matrix op matrix    op in {+, -, /}
+//   matrix op scalar    op in {+, -, /}
+//   scalar op matrix    op in {+, -, /}
+//
+spv::Id TGlslangToSpvTraverser::createBinaryMatrixOperation(spv::Op op, spv::Decoration precision, spv::Decoration noContraction, spv::Id typeId, spv::Id left, spv::Id right)
+{
+    bool firstClass = true;
+
+    // First, handle first-class matrix operations (* and matrix/scalar)
+    switch (op) {
+    case spv::OpFDiv:
+        if (builder.isMatrix(left) && builder.isScalar(right)) {
+            // turn matrix / scalar into a multiply...
+            right = builder.createBinOp(spv::OpFDiv, builder.getTypeId(right), builder.makeFloatConstant(1.0F), right);
+            op = spv::OpMatrixTimesScalar;
+        } else
+            firstClass = false;
+        break;
+    case spv::OpMatrixTimesScalar:
+        if (builder.isMatrix(right))
+            std::swap(left, right);
+        assert(builder.isScalar(right));
+        break;
+    case spv::OpVectorTimesMatrix:
+        assert(builder.isVector(left));
+        assert(builder.isMatrix(right));
+        break;
+    case spv::OpMatrixTimesVector:
+        assert(builder.isMatrix(left));
+        assert(builder.isVector(right));
+        break;
+    case spv::OpMatrixTimesMatrix:
+        assert(builder.isMatrix(left));
+        assert(builder.isMatrix(right));
+        break;
+    default:
+        firstClass = false;
+        break;
+    }
+
+    if (firstClass) {
+        spv::Id result = builder.createBinOp(op, typeId, left, right);
+        addDecoration(result, noContraction);
+        return builder.setPrecision(result, precision);
+    }
+
+    // Handle component-wise +, -, *, %, and / for all combinations of type.
+    // The result type of all of them is the same type as the (a) matrix operand.
+    // The algorithm is to:
+    //   - break the matrix(es) into vectors
+    //   - smear any scalar to a vector
+    //   - do vector operations
+    //   - make a matrix out the vector results
+    switch (op) {
+    case spv::OpFAdd:
+    case spv::OpFSub:
+    case spv::OpFDiv:
+    case spv::OpFMod:
+    case spv::OpFMul:
+    {
+        // one time set up...
+        bool  leftMat = builder.isMatrix(left);
+        bool rightMat = builder.isMatrix(right);
+        unsigned int numCols = leftMat ? builder.getNumColumns(left) : builder.getNumColumns(right);
+        int numRows = leftMat ? builder.getNumRows(left) : builder.getNumRows(right);
+        spv::Id scalarType = builder.getScalarTypeId(typeId);
+        spv::Id vecType = builder.makeVectorType(scalarType, numRows);
+        std::vector<spv::Id> results;
+        spv::Id smearVec = spv::NoResult;
+        if (builder.isScalar(left))
+            smearVec = builder.smearScalar(precision, left, vecType);
+        else if (builder.isScalar(right))
+            smearVec = builder.smearScalar(precision, right, vecType);
+
+        // do each vector op
+        for (unsigned int c = 0; c < numCols; ++c) {
+            std::vector<unsigned int> indexes;
+            indexes.push_back(c);
+            spv::Id  leftVec =  leftMat ? builder.createCompositeExtract( left, vecType, indexes) : smearVec;
+            spv::Id rightVec = rightMat ? builder.createCompositeExtract(right, vecType, indexes) : smearVec;
+            spv::Id result = builder.createBinOp(op, vecType, leftVec, rightVec);
+            addDecoration(result, noContraction);
+            results.push_back(builder.setPrecision(result, precision));
+        }
+
+        // put the pieces together
+        return  builder.setPrecision(builder.createCompositeConstruct(typeId, results), precision);
+    }
+    default:
+        assert(0);
+        return spv::NoResult;
+    }
+}
+
+spv::Id TGlslangToSpvTraverser::createUnaryOperation(glslang::TOperator op, spv::Decoration precision, spv::Decoration noContraction, spv::Id typeId, spv::Id operand, glslang::TBasicType typeProxy)
+{
+    spv::Op unaryOp = spv::OpNop;
+    int extBuiltins = -1;
+    int libCall = -1;
+    bool isUnsigned = typeProxy == glslang::EbtUint || typeProxy == glslang::EbtUint64;
+#ifdef AMD_EXTENSIONS
+    bool isFloat = typeProxy == glslang::EbtFloat || typeProxy == glslang::EbtDouble || typeProxy == glslang::EbtFloat16;
+#else
+    bool isFloat = typeProxy == glslang::EbtFloat || typeProxy == glslang::EbtDouble;
+#endif
+
+    switch (op) {
+    case glslang::EOpNegative:
+        if (isFloat) {
+            unaryOp = spv::OpFNegate;
+            if (builder.isMatrixType(typeId))
+                return createUnaryMatrixOperation(unaryOp, precision, noContraction, typeId, operand, typeProxy);
+        } else
+            unaryOp = spv::OpSNegate;
+        break;
+
+    case glslang::EOpLogicalNot:
+    case glslang::EOpVectorLogicalNot:
+        unaryOp = spv::OpLogicalNot;
+        break;
+    case glslang::EOpBitwiseNot:
+        unaryOp = spv::OpNot;
+        break;
+
+    case glslang::EOpDeterminant:
+        libCall = spv::GLSLstd450Determinant;
+        break;
+    case glslang::EOpMatrixInverse:
+        libCall = spv::GLSLstd450MatrixInverse;
+        break;
+    case glslang::EOpTranspose:
+        unaryOp = spv::OpTranspose;
+        break;
+
+    case glslang::EOpRadians:
+        libCall = spv::GLSLstd450Radians;
+        break;
+    case glslang::EOpDegrees:
+        libCall = spv::GLSLstd450Degrees;
+        break;
+    case glslang::EOpSin:
+        libCall = spv::GLSLstd450Sin;
+        break;
+    case glslang::EOpCos:
+        libCall = spv::GLSLstd450Cos;
+        break;
+    case glslang::EOpTan:
+        libCall = spv::GLSLstd450Tan;
+        break;
+    case glslang::EOpAcos:
+        libCall = spv::GLSLstd450Acos;
+        break;
+    case glslang::EOpAsin:
+        libCall = spv::GLSLstd450Asin;
+        break;
+    case glslang::EOpAtan:
+        libCall = spv::GLSLstd450Atan;
+        break;
+
+    case glslang::EOpAcosh:
+        libCall = spv::GLSLstd450Acosh;
+        break;
+    case glslang::EOpAsinh:
+        libCall = spv::GLSLstd450Asinh;
+        break;
+    case glslang::EOpAtanh:
+        libCall = spv::GLSLstd450Atanh;
+        break;
+    case glslang::EOpTanh:
+        libCall = spv::GLSLstd450Tanh;
+        break;
+    case glslang::EOpCosh:
+        libCall = spv::GLSLstd450Cosh;
+        break;
+    case glslang::EOpSinh:
+        libCall = spv::GLSLstd450Sinh;
+        break;
+
+    case glslang::EOpLength:
+        libCall = spv::GLSLstd450Length;
+        break;
+    case glslang::EOpNormalize:
+        libCall = spv::GLSLstd450Normalize;
+        break;
+
+    case glslang::EOpExp:
+        libCall = spv::GLSLstd450Exp;
+        break;
+    case glslang::EOpLog:
+        libCall = spv::GLSLstd450Log;
+        break;
+    case glslang::EOpExp2:
+        libCall = spv::GLSLstd450Exp2;
+        break;
+    case glslang::EOpLog2:
+        libCall = spv::GLSLstd450Log2;
+        break;
+    case glslang::EOpSqrt:
+        libCall = spv::GLSLstd450Sqrt;
+        break;
+    case glslang::EOpInverseSqrt:
+        libCall = spv::GLSLstd450InverseSqrt;
+        break;
+
+    case glslang::EOpFloor:
+        libCall = spv::GLSLstd450Floor;
+        break;
+    case glslang::EOpTrunc:
+        libCall = spv::GLSLstd450Trunc;
+        break;
+    case glslang::EOpRound:
+        libCall = spv::GLSLstd450Round;
+        break;
+    case glslang::EOpRoundEven:
+        libCall = spv::GLSLstd450RoundEven;
+        break;
+    case glslang::EOpCeil:
+        libCall = spv::GLSLstd450Ceil;
+        break;
+    case glslang::EOpFract:
+        libCall = spv::GLSLstd450Fract;
+        break;
+
+    case glslang::EOpIsNan:
+        unaryOp = spv::OpIsNan;
+        break;
+    case glslang::EOpIsInf:
+        unaryOp = spv::OpIsInf;
+        break;
+    case glslang::EOpIsFinite:
+        unaryOp = spv::OpIsFinite;
+        break;
+
+    case glslang::EOpFloatBitsToInt:
+    case glslang::EOpFloatBitsToUint:
+    case glslang::EOpIntBitsToFloat:
+    case glslang::EOpUintBitsToFloat:
+    case glslang::EOpDoubleBitsToInt64:
+    case glslang::EOpDoubleBitsToUint64:
+    case glslang::EOpInt64BitsToDouble:
+    case glslang::EOpUint64BitsToDouble:
+        unaryOp = spv::OpBitcast;
+        break;
+
+    case glslang::EOpPackSnorm2x16:
+        libCall = spv::GLSLstd450PackSnorm2x16;
+        break;
+    case glslang::EOpUnpackSnorm2x16:
+        libCall = spv::GLSLstd450UnpackSnorm2x16;
+        break;
+    case glslang::EOpPackUnorm2x16:
+        libCall = spv::GLSLstd450PackUnorm2x16;
+        break;
+    case glslang::EOpUnpackUnorm2x16:
+        libCall = spv::GLSLstd450UnpackUnorm2x16;
+        break;
+    case glslang::EOpPackHalf2x16:
+        libCall = spv::GLSLstd450PackHalf2x16;
+        break;
+    case glslang::EOpUnpackHalf2x16:
+        libCall = spv::GLSLstd450UnpackHalf2x16;
+        break;
+    case glslang::EOpPackSnorm4x8:
+        libCall = spv::GLSLstd450PackSnorm4x8;
+        break;
+    case glslang::EOpUnpackSnorm4x8:
+        libCall = spv::GLSLstd450UnpackSnorm4x8;
+        break;
+    case glslang::EOpPackUnorm4x8:
+        libCall = spv::GLSLstd450PackUnorm4x8;
+        break;
+    case glslang::EOpUnpackUnorm4x8:
+        libCall = spv::GLSLstd450UnpackUnorm4x8;
+        break;
+    case glslang::EOpPackDouble2x32:
+        libCall = spv::GLSLstd450PackDouble2x32;
+        break;
+    case glslang::EOpUnpackDouble2x32:
+        libCall = spv::GLSLstd450UnpackDouble2x32;
+        break;
+
+    case glslang::EOpPackInt2x32:
+    case glslang::EOpUnpackInt2x32:
+    case glslang::EOpPackUint2x32:
+    case glslang::EOpUnpackUint2x32:
+        unaryOp = spv::OpBitcast;
+        break;
+
+#ifdef AMD_EXTENSIONS
+    case glslang::EOpPackFloat2x16:
+    case glslang::EOpUnpackFloat2x16:
+        unaryOp = spv::OpBitcast;
+        break;
+#endif
+
+    case glslang::EOpDPdx:
+        unaryOp = spv::OpDPdx;
+        break;
+    case glslang::EOpDPdy:
+        unaryOp = spv::OpDPdy;
+        break;
+    case glslang::EOpFwidth:
+        unaryOp = spv::OpFwidth;
+        break;
+    case glslang::EOpDPdxFine:
+        builder.addCapability(spv::CapabilityDerivativeControl);
+        unaryOp = spv::OpDPdxFine;
+        break;
+    case glslang::EOpDPdyFine:
+        builder.addCapability(spv::CapabilityDerivativeControl);
+        unaryOp = spv::OpDPdyFine;
+        break;
+    case glslang::EOpFwidthFine:
+        builder.addCapability(spv::CapabilityDerivativeControl);
+        unaryOp = spv::OpFwidthFine;
+        break;
+    case glslang::EOpDPdxCoarse:
+        builder.addCapability(spv::CapabilityDerivativeControl);
+        unaryOp = spv::OpDPdxCoarse;
+        break;
+    case glslang::EOpDPdyCoarse:
+        builder.addCapability(spv::CapabilityDerivativeControl);
+        unaryOp = spv::OpDPdyCoarse;
+        break;
+    case glslang::EOpFwidthCoarse:
+        builder.addCapability(spv::CapabilityDerivativeControl);
+        unaryOp = spv::OpFwidthCoarse;
+        break;
+    case glslang::EOpInterpolateAtCentroid:
+        builder.addCapability(spv::CapabilityInterpolationFunction);
+        libCall = spv::GLSLstd450InterpolateAtCentroid;
+        break;
+    case glslang::EOpAny:
+        unaryOp = spv::OpAny;
+        break;
+    case glslang::EOpAll:
+        unaryOp = spv::OpAll;
+        break;
+
+    case glslang::EOpAbs:
+        if (isFloat)
+            libCall = spv::GLSLstd450FAbs;
+        else
+            libCall = spv::GLSLstd450SAbs;
+        break;
+    case glslang::EOpSign:
+        if (isFloat)
+            libCall = spv::GLSLstd450FSign;
+        else
+            libCall = spv::GLSLstd450SSign;
+        break;
+
+    case glslang::EOpAtomicCounterIncrement:
+    case glslang::EOpAtomicCounterDecrement:
+    case glslang::EOpAtomicCounter:
+    {
+        // Handle all of the atomics in one place, in createAtomicOperation()
+        std::vector<spv::Id> operands;
+        operands.push_back(operand);
+        return createAtomicOperation(op, precision, typeId, operands, typeProxy);
+    }
+
+    case glslang::EOpBitFieldReverse:
+        unaryOp = spv::OpBitReverse;
+        break;
+    case glslang::EOpBitCount:
+        unaryOp = spv::OpBitCount;
+        break;
+    case glslang::EOpFindLSB:
+        libCall = spv::GLSLstd450FindILsb;
+        break;
+    case glslang::EOpFindMSB:
+        if (isUnsigned)
+            libCall = spv::GLSLstd450FindUMsb;
+        else
+            libCall = spv::GLSLstd450FindSMsb;
+        break;
+
+    case glslang::EOpBallot:
+    case glslang::EOpReadFirstInvocation:
+    case glslang::EOpAnyInvocation:
+    case glslang::EOpAllInvocations:
+    case glslang::EOpAllInvocationsEqual:
+#ifdef AMD_EXTENSIONS
+    case glslang::EOpMinInvocations:
+    case glslang::EOpMaxInvocations:
+    case glslang::EOpAddInvocations:
+    case glslang::EOpMinInvocationsNonUniform:
+    case glslang::EOpMaxInvocationsNonUniform:
+    case glslang::EOpAddInvocationsNonUniform:
+#endif
+    {
+        std::vector<spv::Id> operands;
+        operands.push_back(operand);
+        return createInvocationsOperation(op, typeId, operands, typeProxy);
+    }
+
+#ifdef AMD_EXTENSIONS
+    case glslang::EOpMbcnt:
+        extBuiltins = getExtBuiltins(spv::E_SPV_AMD_shader_ballot);
+        libCall = spv::MbcntAMD;
+        break;
+
+    case glslang::EOpCubeFaceIndex:
+        extBuiltins = getExtBuiltins(spv::E_SPV_AMD_gcn_shader);
+        libCall = spv::CubeFaceIndexAMD;
+        break;
+
+    case glslang::EOpCubeFaceCoord:
+        extBuiltins = getExtBuiltins(spv::E_SPV_AMD_gcn_shader);
+        libCall = spv::CubeFaceCoordAMD;
+        break;
+#endif
+
+    default:
+        return 0;
+    }
+
+    spv::Id id;
+    if (libCall >= 0) {
+        std::vector<spv::Id> args;
+        args.push_back(operand);
+        id = builder.createBuiltinCall(typeId, extBuiltins >= 0 ? extBuiltins : stdBuiltins, libCall, args);
+    } else {
+        id = builder.createUnaryOp(unaryOp, typeId, operand);
+    }
+
+    addDecoration(id, noContraction);
+    return builder.setPrecision(id, precision);
+}
+
+// Create a unary operation on a matrix
+spv::Id TGlslangToSpvTraverser::createUnaryMatrixOperation(spv::Op op, spv::Decoration precision, spv::Decoration noContraction, spv::Id typeId, spv::Id operand, glslang::TBasicType /* typeProxy */)
+{
+    // Handle unary operations vector by vector.
+    // The result type is the same type as the original type.
+    // The algorithm is to:
+    //   - break the matrix into vectors
+    //   - apply the operation to each vector
+    //   - make a matrix out the vector results
+
+    // get the types sorted out
+    int numCols = builder.getNumColumns(operand);
+    int numRows = builder.getNumRows(operand);
+    spv::Id srcVecType  = builder.makeVectorType(builder.getScalarTypeId(builder.getTypeId(operand)), numRows);
+    spv::Id destVecType = builder.makeVectorType(builder.getScalarTypeId(typeId), numRows);
+    std::vector<spv::Id> results;
+
+    // do each vector op
+    for (int c = 0; c < numCols; ++c) {
+        std::vector<unsigned int> indexes;
+        indexes.push_back(c);
+        spv::Id srcVec  = builder.createCompositeExtract(operand, srcVecType, indexes);
+        spv::Id destVec = builder.createUnaryOp(op, destVecType, srcVec);
+        addDecoration(destVec, noContraction);
+        results.push_back(builder.setPrecision(destVec, precision));
+    }
+
+    // put the pieces together
+    return builder.setPrecision(builder.createCompositeConstruct(typeId, results), precision);
+}
+
+spv::Id TGlslangToSpvTraverser::createConversion(glslang::TOperator op, spv::Decoration precision, spv::Decoration noContraction, spv::Id destType, spv::Id operand, glslang::TBasicType typeProxy)
+{
+    spv::Op convOp = spv::OpNop;
+    spv::Id zero = 0;
+    spv::Id one = 0;
+    spv::Id type = 0;
+
+    int vectorSize = builder.isVectorType(destType) ? builder.getNumTypeComponents(destType) : 0;
+
+    switch (op) {
+    case glslang::EOpConvIntToBool:
+    case glslang::EOpConvUintToBool:
+    case glslang::EOpConvInt64ToBool:
+    case glslang::EOpConvUint64ToBool:
+        zero = (op == glslang::EOpConvInt64ToBool ||
+                op == glslang::EOpConvUint64ToBool) ? builder.makeUint64Constant(0) : builder.makeUintConstant(0);
+        zero = makeSmearedConstant(zero, vectorSize);
+        return builder.createBinOp(spv::OpINotEqual, destType, operand, zero);
+
+    case glslang::EOpConvFloatToBool:
+        zero = builder.makeFloatConstant(0.0F);
+        zero = makeSmearedConstant(zero, vectorSize);
+        return builder.createBinOp(spv::OpFOrdNotEqual, destType, operand, zero);
+
+    case glslang::EOpConvDoubleToBool:
+        zero = builder.makeDoubleConstant(0.0);
+        zero = makeSmearedConstant(zero, vectorSize);
+        return builder.createBinOp(spv::OpFOrdNotEqual, destType, operand, zero);
+
+#ifdef AMD_EXTENSIONS
+    case glslang::EOpConvFloat16ToBool:
+        zero = builder.makeFloat16Constant(0.0F);
+        zero = makeSmearedConstant(zero, vectorSize);
+        return builder.createBinOp(spv::OpFOrdNotEqual, destType, operand, zero);
+#endif
+
+    case glslang::EOpConvBoolToFloat:
+        convOp = spv::OpSelect;
+        zero = builder.makeFloatConstant(0.0F);
+        one  = builder.makeFloatConstant(1.0F);
+        break;
+
+    case glslang::EOpConvBoolToDouble:
+        convOp = spv::OpSelect;
+        zero = builder.makeDoubleConstant(0.0);
+        one  = builder.makeDoubleConstant(1.0);
+        break;
+
+#ifdef AMD_EXTENSIONS
+    case glslang::EOpConvBoolToFloat16:
+        convOp = spv::OpSelect;
+        zero = builder.makeFloat16Constant(0.0F);
+        one = builder.makeFloat16Constant(1.0F);
+        break;
+#endif
+
+    case glslang::EOpConvBoolToInt:
+    case glslang::EOpConvBoolToInt64:
+        zero = (op == glslang::EOpConvBoolToInt64) ? builder.makeInt64Constant(0) : builder.makeIntConstant(0);
+        one  = (op == glslang::EOpConvBoolToInt64) ? builder.makeInt64Constant(1) : builder.makeIntConstant(1);
+        convOp = spv::OpSelect;
+        break;
+
+    case glslang::EOpConvBoolToUint:
+    case glslang::EOpConvBoolToUint64:
+        zero = (op == glslang::EOpConvBoolToUint64) ? builder.makeUint64Constant(0) : builder.makeUintConstant(0);
+        one  = (op == glslang::EOpConvBoolToUint64) ? builder.makeUint64Constant(1) : builder.makeUintConstant(1);
+        convOp = spv::OpSelect;
+        break;
+
+    case glslang::EOpConvIntToFloat:
+    case glslang::EOpConvIntToDouble:
+    case glslang::EOpConvInt64ToFloat:
+    case glslang::EOpConvInt64ToDouble:
+#ifdef AMD_EXTENSIONS
+    case glslang::EOpConvIntToFloat16:
+    case glslang::EOpConvInt64ToFloat16:
+#endif
+        convOp = spv::OpConvertSToF;
+        break;
+
+    case glslang::EOpConvUintToFloat:
+    case glslang::EOpConvUintToDouble:
+    case glslang::EOpConvUint64ToFloat:
+    case glslang::EOpConvUint64ToDouble:
+#ifdef AMD_EXTENSIONS
+    case glslang::EOpConvUintToFloat16:
+    case glslang::EOpConvUint64ToFloat16:
+#endif
+        convOp = spv::OpConvertUToF;
+        break;
+
+    case glslang::EOpConvDoubleToFloat:
+    case glslang::EOpConvFloatToDouble:
+#ifdef AMD_EXTENSIONS
+    case glslang::EOpConvDoubleToFloat16:
+    case glslang::EOpConvFloat16ToDouble:
+    case glslang::EOpConvFloatToFloat16:
+    case glslang::EOpConvFloat16ToFloat:
+#endif
+        convOp = spv::OpFConvert;
+        if (builder.isMatrixType(destType))
+            return createUnaryMatrixOperation(convOp, precision, noContraction, destType, operand, typeProxy);
+        break;
+
+    case glslang::EOpConvFloatToInt:
+    case glslang::EOpConvDoubleToInt:
+    case glslang::EOpConvFloatToInt64:
+    case glslang::EOpConvDoubleToInt64:
+#ifdef AMD_EXTENSIONS
+    case glslang::EOpConvFloat16ToInt:
+    case glslang::EOpConvFloat16ToInt64:
+#endif
+        convOp = spv::OpConvertFToS;
+        break;
+
+    case glslang::EOpConvUintToInt:
+    case glslang::EOpConvIntToUint:
+    case glslang::EOpConvUint64ToInt64:
+    case glslang::EOpConvInt64ToUint64:
+        if (builder.isInSpecConstCodeGenMode()) {
+            // Build zero scalar or vector for OpIAdd.
+            zero = (op == glslang::EOpConvUint64ToInt64 ||
+                    op == glslang::EOpConvInt64ToUint64) ? builder.makeUint64Constant(0) : builder.makeUintConstant(0);
+            zero = makeSmearedConstant(zero, vectorSize);
+            // Use OpIAdd, instead of OpBitcast to do the conversion when
+            // generating for OpSpecConstantOp instruction.
+            return builder.createBinOp(spv::OpIAdd, destType, operand, zero);
+        }
+        // For normal run-time conversion instruction, use OpBitcast.
+        convOp = spv::OpBitcast;
+        break;
+
+    case glslang::EOpConvFloatToUint:
+    case glslang::EOpConvDoubleToUint:
+    case glslang::EOpConvFloatToUint64:
+    case glslang::EOpConvDoubleToUint64:
+#ifdef AMD_EXTENSIONS
+    case glslang::EOpConvFloat16ToUint:
+    case glslang::EOpConvFloat16ToUint64:
+#endif
+        convOp = spv::OpConvertFToU;
+        break;
+
+    case glslang::EOpConvIntToInt64:
+    case glslang::EOpConvInt64ToInt:
+        convOp = spv::OpSConvert;
+        break;
+
+    case glslang::EOpConvUintToUint64:
+    case glslang::EOpConvUint64ToUint:
+        convOp = spv::OpUConvert;
+        break;
+
+    case glslang::EOpConvIntToUint64:
+    case glslang::EOpConvInt64ToUint:
+    case glslang::EOpConvUint64ToInt:
+    case glslang::EOpConvUintToInt64:
+        // OpSConvert/OpUConvert + OpBitCast
+        switch (op) {
+        case glslang::EOpConvIntToUint64:
+            convOp = spv::OpSConvert;
+            type   = builder.makeIntType(64);
+            break;
+        case glslang::EOpConvInt64ToUint:
+            convOp = spv::OpSConvert;
+            type   = builder.makeIntType(32);
+            break;
+        case glslang::EOpConvUint64ToInt:
+            convOp = spv::OpUConvert;
+            type   = builder.makeUintType(32);
+            break;
+        case glslang::EOpConvUintToInt64:
+            convOp = spv::OpUConvert;
+            type   = builder.makeUintType(64);
+            break;
+        default:
+            assert(0);
+            break;
+        }
+
+        if (vectorSize > 0)
+            type = builder.makeVectorType(type, vectorSize);
+
+        operand = builder.createUnaryOp(convOp, type, operand);
+
+        if (builder.isInSpecConstCodeGenMode()) {
+            // Build zero scalar or vector for OpIAdd.
+            zero = (op == glslang::EOpConvIntToUint64 ||
+                    op == glslang::EOpConvUintToInt64) ? builder.makeUint64Constant(0) : builder.makeUintConstant(0);
+            zero = makeSmearedConstant(zero, vectorSize);
+            // Use OpIAdd, instead of OpBitcast to do the conversion when
+            // generating for OpSpecConstantOp instruction.
+            return builder.createBinOp(spv::OpIAdd, destType, operand, zero);
+        }
+        // For normal run-time conversion instruction, use OpBitcast.
+        convOp = spv::OpBitcast;
+        break;
+    default:
+        break;
+    }
+
+    spv::Id result = 0;
+    if (convOp == spv::OpNop)
+        return result;
+
+    if (convOp == spv::OpSelect) {
+        zero = makeSmearedConstant(zero, vectorSize);
+        one  = makeSmearedConstant(one, vectorSize);
+        result = builder.createTriOp(convOp, destType, operand, one, zero);
+    } else
+        result = builder.createUnaryOp(convOp, destType, operand);
+
+    return builder.setPrecision(result, precision);
+}
+
+spv::Id TGlslangToSpvTraverser::makeSmearedConstant(spv::Id constant, int vectorSize)
+{
+    if (vectorSize == 0)
+        return constant;
+
+    spv::Id vectorTypeId = builder.makeVectorType(builder.getTypeId(constant), vectorSize);
+    std::vector<spv::Id> components;
+    for (int c = 0; c < vectorSize; ++c)
+        components.push_back(constant);
+    return builder.makeCompositeConstant(vectorTypeId, components);
+}
+
+// For glslang ops that map to SPV atomic opCodes
+spv::Id TGlslangToSpvTraverser::createAtomicOperation(glslang::TOperator op, spv::Decoration /*precision*/, spv::Id typeId, std::vector<spv::Id>& operands, glslang::TBasicType typeProxy)
+{
+    spv::Op opCode = spv::OpNop;
+
+    switch (op) {
+    case glslang::EOpAtomicAdd:
+    case glslang::EOpImageAtomicAdd:
+        opCode = spv::OpAtomicIAdd;
+        break;
+    case glslang::EOpAtomicMin:
+    case glslang::EOpImageAtomicMin:
+        opCode = typeProxy == glslang::EbtUint ? spv::OpAtomicUMin : spv::OpAtomicSMin;
+        break;
+    case glslang::EOpAtomicMax:
+    case glslang::EOpImageAtomicMax:
+        opCode = typeProxy == glslang::EbtUint ? spv::OpAtomicUMax : spv::OpAtomicSMax;
+        break;
+    case glslang::EOpAtomicAnd:
+    case glslang::EOpImageAtomicAnd:
+        opCode = spv::OpAtomicAnd;
+        break;
+    case glslang::EOpAtomicOr:
+    case glslang::EOpImageAtomicOr:
+        opCode = spv::OpAtomicOr;
+        break;
+    case glslang::EOpAtomicXor:
+    case glslang::EOpImageAtomicXor:
+        opCode = spv::OpAtomicXor;
+        break;
+    case glslang::EOpAtomicExchange:
+    case glslang::EOpImageAtomicExchange:
+        opCode = spv::OpAtomicExchange;
+        break;
+    case glslang::EOpAtomicCompSwap:
+    case glslang::EOpImageAtomicCompSwap:
+        opCode = spv::OpAtomicCompareExchange;
+        break;
+    case glslang::EOpAtomicCounterIncrement:
+        opCode = spv::OpAtomicIIncrement;
+        break;
+    case glslang::EOpAtomicCounterDecrement:
+        opCode = spv::OpAtomicIDecrement;
+        break;
+    case glslang::EOpAtomicCounter:
+        opCode = spv::OpAtomicLoad;
+        break;
+    default:
+        assert(0);
+        break;
+    }
+
+    // Sort out the operands
+    //  - mapping from glslang -> SPV
+    //  - there are extra SPV operands with no glslang source
+    //  - compare-exchange swaps the value and comparator
+    //  - compare-exchange has an extra memory semantics
+    std::vector<spv::Id> spvAtomicOperands;  // hold the spv operands
+    auto opIt = operands.begin();            // walk the glslang operands
+    spvAtomicOperands.push_back(*(opIt++));
+    spvAtomicOperands.push_back(builder.makeUintConstant(spv::ScopeDevice));     // TBD: what is the correct scope?
+    spvAtomicOperands.push_back(builder.makeUintConstant(spv::MemorySemanticsMaskNone)); // TBD: what are the correct memory semantics?
+    if (opCode == spv::OpAtomicCompareExchange) {
+        // There are 2 memory semantics for compare-exchange. And the operand order of "comparator" and "new value" in GLSL
+        // differs from that in SPIR-V. Hence, special processing is required.
+        spvAtomicOperands.push_back(builder.makeUintConstant(spv::MemorySemanticsMaskNone));
+        spvAtomicOperands.push_back(*(opIt + 1));
+        spvAtomicOperands.push_back(*opIt);
+        opIt += 2;
+    }
+
+    // Add the rest of the operands, skipping any that were dealt with above.
+    for (; opIt != operands.end(); ++opIt)
+        spvAtomicOperands.push_back(*opIt);
+
+    return builder.createOp(opCode, typeId, spvAtomicOperands);
+}
+
+// Create group invocation operations.
+spv::Id TGlslangToSpvTraverser::createInvocationsOperation(glslang::TOperator op, spv::Id typeId, std::vector<spv::Id>& operands, glslang::TBasicType typeProxy)
+{
+#ifdef AMD_EXTENSIONS
+    bool isUnsigned = typeProxy == glslang::EbtUint || typeProxy == glslang::EbtUint64;
+    bool isFloat = typeProxy == glslang::EbtFloat || typeProxy == glslang::EbtDouble || typeProxy == glslang::EbtFloat16;
+#endif
+
+    spv::Op opCode = spv::OpNop;
+
+    std::vector<spv::Id> spvGroupOperands;
+    if (op == glslang::EOpBallot || op == glslang::EOpReadFirstInvocation) {
+        builder.addExtension(spv::E_SPV_KHR_shader_ballot);
+        builder.addCapability(spv::CapabilitySubgroupBallotKHR);
+    } else {
+        builder.addCapability(spv::CapabilityGroups);
+#ifdef AMD_EXTENSIONS
+        if (op == glslang::EOpMinInvocationsNonUniform ||
+            op == glslang::EOpMaxInvocationsNonUniform ||
+            op == glslang::EOpAddInvocationsNonUniform)
+            builder.addExtension(spv::E_SPV_AMD_shader_ballot);
+#endif
+
+        spvGroupOperands.push_back(builder.makeUintConstant(spv::ScopeSubgroup));
+#ifdef AMD_EXTENSIONS
+        if (op == glslang::EOpMinInvocations || op == glslang::EOpMaxInvocations || op == glslang::EOpAddInvocations ||
+            op == glslang::EOpMinInvocationsNonUniform || op == glslang::EOpMaxInvocationsNonUniform || op == glslang::EOpAddInvocationsNonUniform)
+            spvGroupOperands.push_back(spv::GroupOperationReduce);
+#endif
+    }
+
+    for (auto opIt = operands.begin(); opIt != operands.end(); ++opIt)
+        spvGroupOperands.push_back(*opIt);
+
+    switch (op) {
+    case glslang::EOpAnyInvocation:
+        opCode = spv::OpGroupAny;
+        break;
+    case glslang::EOpAllInvocations:
+        opCode = spv::OpGroupAll;
+        break;
+    case glslang::EOpAllInvocationsEqual:
+    {
+        spv::Id groupAll = builder.createOp(spv::OpGroupAll, typeId, spvGroupOperands);
+        spv::Id groupAny = builder.createOp(spv::OpGroupAny, typeId, spvGroupOperands);
+
+        return builder.createBinOp(spv::OpLogicalOr, typeId, groupAll,
+                                   builder.createUnaryOp(spv::OpLogicalNot, typeId, groupAny));
+    }
+
+    case glslang::EOpReadInvocation:
+        opCode = spv::OpGroupBroadcast;
+        if (builder.isVectorType(typeId))
+            return CreateInvocationsVectorOperation(opCode, typeId, operands);
+        break;
+    case glslang::EOpReadFirstInvocation:
+        opCode = spv::OpSubgroupFirstInvocationKHR;
+        break;
+    case glslang::EOpBallot:
+    {
+        // NOTE: According to the spec, the result type of "OpSubgroupBallotKHR" must be a 4 component vector of 32
+        // bit integer types. The GLSL built-in function "ballotARB()" assumes the maximum number of invocations in
+        // a subgroup is 64. Thus, we have to convert uvec4.xy to uint64_t as follow:
+        //
+        //     result = Bitcast(SubgroupBallotKHR(Predicate).xy)
+        //
+        spv::Id uintType  = builder.makeUintType(32);
+        spv::Id uvec4Type = builder.makeVectorType(uintType, 4);
+        spv::Id result = builder.createOp(spv::OpSubgroupBallotKHR, uvec4Type, spvGroupOperands);
+
+        std::vector<spv::Id> components;
+        components.push_back(builder.createCompositeExtract(result, uintType, 0));
+        components.push_back(builder.createCompositeExtract(result, uintType, 1));
+
+        spv::Id uvec2Type = builder.makeVectorType(uintType, 2);
+        return builder.createUnaryOp(spv::OpBitcast, typeId,
+                                     builder.createCompositeConstruct(uvec2Type, components));
+    }
+
+#ifdef AMD_EXTENSIONS
+    case glslang::EOpMinInvocations:
+    case glslang::EOpMaxInvocations:
+    case glslang::EOpAddInvocations:
+        if (op == glslang::EOpMinInvocations) {
+            if (isFloat)
+                opCode = spv::OpGroupFMin;
+            else {
+                if (isUnsigned)
+                    opCode = spv::OpGroupUMin;
+                else
+                    opCode = spv::OpGroupSMin;
+            }
+        } else if (op == glslang::EOpMaxInvocations) {
+            if (isFloat)
+                opCode = spv::OpGroupFMax;
+            else {
+                if (isUnsigned)
+                    opCode = spv::OpGroupUMax;
+                else
+                    opCode = spv::OpGroupSMax;
+            }
+        } else {
+            if (isFloat)
+                opCode = spv::OpGroupFAdd;
+            else
+                opCode = spv::OpGroupIAdd;
+        }
+
+        if (builder.isVectorType(typeId))
+            return CreateInvocationsVectorOperation(opCode, typeId, operands);
+
+        break;
+    case glslang::EOpMinInvocationsNonUniform:
+    case glslang::EOpMaxInvocationsNonUniform:
+    case glslang::EOpAddInvocationsNonUniform:
+        if (op == glslang::EOpMinInvocationsNonUniform) {
+            if (isFloat)
+                opCode = spv::OpGroupFMinNonUniformAMD;
+            else {
+                if (isUnsigned)
+                    opCode = spv::OpGroupUMinNonUniformAMD;
+                else
+                    opCode = spv::OpGroupSMinNonUniformAMD;
+            }
+        }
+        else if (op == glslang::EOpMaxInvocationsNonUniform) {
+            if (isFloat)
+                opCode = spv::OpGroupFMaxNonUniformAMD;
+            else {
+                if (isUnsigned)
+                    opCode = spv::OpGroupUMaxNonUniformAMD;
+                else
+                    opCode = spv::OpGroupSMaxNonUniformAMD;
+            }
+        }
+        else {
+            if (isFloat)
+                opCode = spv::OpGroupFAddNonUniformAMD;
+            else
+                opCode = spv::OpGroupIAddNonUniformAMD;
+        }
+
+        if (builder.isVectorType(typeId))
+            return CreateInvocationsVectorOperation(opCode, typeId, operands);
+
+        break;
+#endif
+    default:
+        logger->missingFunctionality("invocation operation");
+        return spv::NoResult;
+    }
+
+    assert(opCode != spv::OpNop);
+    return builder.createOp(opCode, typeId, spvGroupOperands);
+}
+
+// Create group invocation operations on a vector
+spv::Id TGlslangToSpvTraverser::CreateInvocationsVectorOperation(spv::Op op, spv::Id typeId, std::vector<spv::Id>& operands)
+{
+#ifdef AMD_EXTENSIONS
+    assert(op == spv::OpGroupFMin || op == spv::OpGroupUMin || op == spv::OpGroupSMin ||
+           op == spv::OpGroupFMax || op == spv::OpGroupUMax || op == spv::OpGroupSMax ||
+           op == spv::OpGroupFAdd || op == spv::OpGroupIAdd || op == spv::OpGroupBroadcast ||
+           op == spv::OpGroupFMinNonUniformAMD || op == spv::OpGroupUMinNonUniformAMD || op == spv::OpGroupSMinNonUniformAMD ||
+           op == spv::OpGroupFMaxNonUniformAMD || op == spv::OpGroupUMaxNonUniformAMD || op == spv::OpGroupSMaxNonUniformAMD ||
+           op == spv::OpGroupFAddNonUniformAMD || op == spv::OpGroupIAddNonUniformAMD);
+#else
+    assert(op == spv::OpGroupFMin || op == spv::OpGroupUMin || op == spv::OpGroupSMin ||
+           op == spv::OpGroupFMax || op == spv::OpGroupUMax || op == spv::OpGroupSMax ||
+           op == spv::OpGroupFAdd || op == spv::OpGroupIAdd || op == spv::OpGroupBroadcast);
+#endif
+
+    // Handle group invocation operations scalar by scalar.
+    // The result type is the same type as the original type.
+    // The algorithm is to:
+    //   - break the vector into scalars
+    //   - apply the operation to each scalar
+    //   - make a vector out the scalar results
+
+    // get the types sorted out
+    int numComponents = builder.getNumComponents(operands[0]);
+    spv::Id scalarType = builder.getScalarTypeId(builder.getTypeId(operands[0]));
+    std::vector<spv::Id> results;
+
+    // do each scalar op
+    for (int comp = 0; comp < numComponents; ++comp) {
+        std::vector<unsigned int> indexes;
+        indexes.push_back(comp);
+        spv::Id scalar = builder.createCompositeExtract(operands[0], scalarType, indexes);
+
+        std::vector<spv::Id> spvGroupOperands;
+        spvGroupOperands.push_back(builder.makeUintConstant(spv::ScopeSubgroup));
+        if (op == spv::OpGroupBroadcast) {
+            spvGroupOperands.push_back(scalar);
+            spvGroupOperands.push_back(operands[1]);
+        } else {
+            spvGroupOperands.push_back(spv::GroupOperationReduce);
+            spvGroupOperands.push_back(scalar);
+        }
+
+        results.push_back(builder.createOp(op, scalarType, spvGroupOperands));
+    }
+
+    // put the pieces together
+    return builder.createCompositeConstruct(typeId, results);
+}
+
+spv::Id TGlslangToSpvTraverser::createMiscOperation(glslang::TOperator op, spv::Decoration precision, spv::Id typeId, std::vector<spv::Id>& operands, glslang::TBasicType typeProxy)
+{
+    bool isUnsigned = typeProxy == glslang::EbtUint || typeProxy == glslang::EbtUint64;
+#ifdef AMD_EXTENSIONS
+    bool isFloat = typeProxy == glslang::EbtFloat || typeProxy == glslang::EbtDouble || typeProxy == glslang::EbtFloat16;
+#else
+    bool isFloat = typeProxy == glslang::EbtFloat || typeProxy == glslang::EbtDouble;
+#endif
+
+    spv::Op opCode = spv::OpNop;
+    int extBuiltins = -1;
+    int libCall = -1;
+    size_t consumedOperands = operands.size();
+    spv::Id typeId0 = 0;
+    if (consumedOperands > 0)
+        typeId0 = builder.getTypeId(operands[0]);
+    spv::Id frexpIntType = 0;
+
+    switch (op) {
+    case glslang::EOpMin:
+        if (isFloat)
+            libCall = spv::GLSLstd450FMin;
+        else if (isUnsigned)
+            libCall = spv::GLSLstd450UMin;
+        else
+            libCall = spv::GLSLstd450SMin;
+        builder.promoteScalar(precision, operands.front(), operands.back());
+        break;
+    case glslang::EOpModf:
+        libCall = spv::GLSLstd450Modf;
+        break;
+    case glslang::EOpMax:
+        if (isFloat)
+            libCall = spv::GLSLstd450FMax;
+        else if (isUnsigned)
+            libCall = spv::GLSLstd450UMax;
+        else
+            libCall = spv::GLSLstd450SMax;
+        builder.promoteScalar(precision, operands.front(), operands.back());
+        break;
+    case glslang::EOpPow:
+        libCall = spv::GLSLstd450Pow;
+        break;
+    case glslang::EOpDot:
+        opCode = spv::OpDot;
+        break;
+    case glslang::EOpAtan:
+        libCall = spv::GLSLstd450Atan2;
+        break;
+
+    case glslang::EOpClamp:
+        if (isFloat)
+            libCall = spv::GLSLstd450FClamp;
+        else if (isUnsigned)
+            libCall = spv::GLSLstd450UClamp;
+        else
+            libCall = spv::GLSLstd450SClamp;
+        builder.promoteScalar(precision, operands.front(), operands[1]);
+        builder.promoteScalar(precision, operands.front(), operands[2]);
+        break;
+    case glslang::EOpMix:
+        if (! builder.isBoolType(builder.getScalarTypeId(builder.getTypeId(operands.back())))) {
+            assert(isFloat);
+            libCall = spv::GLSLstd450FMix;
+        } else {
+            opCode = spv::OpSelect;
+            std::swap(operands.front(), operands.back());
+        }
+        builder.promoteScalar(precision, operands.front(), operands.back());
+        break;
+    case glslang::EOpStep:
+        libCall = spv::GLSLstd450Step;
+        builder.promoteScalar(precision, operands.front(), operands.back());
+        break;
+    case glslang::EOpSmoothStep:
+        libCall = spv::GLSLstd450SmoothStep;
+        builder.promoteScalar(precision, operands[0], operands[2]);
+        builder.promoteScalar(precision, operands[1], operands[2]);
+        break;
+
+    case glslang::EOpDistance:
+        libCall = spv::GLSLstd450Distance;
+        break;
+    case glslang::EOpCross:
+        libCall = spv::GLSLstd450Cross;
+        break;
+    case glslang::EOpFaceForward:
+        libCall = spv::GLSLstd450FaceForward;
+        break;
+    case glslang::EOpReflect:
+        libCall = spv::GLSLstd450Reflect;
+        break;
+    case glslang::EOpRefract:
+        libCall = spv::GLSLstd450Refract;
+        break;
+    case glslang::EOpInterpolateAtSample:
+        builder.addCapability(spv::CapabilityInterpolationFunction);
+        libCall = spv::GLSLstd450InterpolateAtSample;
+        break;
+    case glslang::EOpInterpolateAtOffset:
+        builder.addCapability(spv::CapabilityInterpolationFunction);
+        libCall = spv::GLSLstd450InterpolateAtOffset;
+        break;
+    case glslang::EOpAddCarry:
+        opCode = spv::OpIAddCarry;
+        typeId = builder.makeStructResultType(typeId0, typeId0);
+        consumedOperands = 2;
+        break;
+    case glslang::EOpSubBorrow:
+        opCode = spv::OpISubBorrow;
+        typeId = builder.makeStructResultType(typeId0, typeId0);
+        consumedOperands = 2;
+        break;
+    case glslang::EOpUMulExtended:
+        opCode = spv::OpUMulExtended;
+        typeId = builder.makeStructResultType(typeId0, typeId0);
+        consumedOperands = 2;
+        break;
+    case glslang::EOpIMulExtended:
+        opCode = spv::OpSMulExtended;
+        typeId = builder.makeStructResultType(typeId0, typeId0);
+        consumedOperands = 2;
+        break;
+    case glslang::EOpBitfieldExtract:
+        if (isUnsigned)
+            opCode = spv::OpBitFieldUExtract;
+        else
+            opCode = spv::OpBitFieldSExtract;
+        break;
+    case glslang::EOpBitfieldInsert:
+        opCode = spv::OpBitFieldInsert;
+        break;
+
+    case glslang::EOpFma:
+        libCall = spv::GLSLstd450Fma;
+        break;
+    case glslang::EOpFrexp:
+        libCall = spv::GLSLstd450FrexpStruct;
+        if (builder.getNumComponents(operands[0]) == 1)
+            frexpIntType = builder.makeIntegerType(32, true);
+        else
+            frexpIntType = builder.makeVectorType(builder.makeIntegerType(32, true), builder.getNumComponents(operands[0]));
+        typeId = builder.makeStructResultType(typeId0, frexpIntType);
+        consumedOperands = 1;
+        break;
+    case glslang::EOpLdexp:
+        libCall = spv::GLSLstd450Ldexp;
+        break;
+
+    case glslang::EOpReadInvocation:
+        return createInvocationsOperation(op, typeId, operands, typeProxy);
+
+#ifdef AMD_EXTENSIONS
+    case glslang::EOpSwizzleInvocations:
+        extBuiltins = getExtBuiltins(spv::E_SPV_AMD_shader_ballot);
+        libCall = spv::SwizzleInvocationsAMD;
+        break;
+    case glslang::EOpSwizzleInvocationsMasked:
+        extBuiltins = getExtBuiltins(spv::E_SPV_AMD_shader_ballot);
+        libCall = spv::SwizzleInvocationsMaskedAMD;
+        break;
+    case glslang::EOpWriteInvocation:
+        extBuiltins = getExtBuiltins(spv::E_SPV_AMD_shader_ballot);
+        libCall = spv::WriteInvocationAMD;
+        break;
+
+    case glslang::EOpMin3:
+        extBuiltins = getExtBuiltins(spv::E_SPV_AMD_shader_trinary_minmax);
+        if (isFloat)
+            libCall = spv::FMin3AMD;
+        else {
+            if (isUnsigned)
+                libCall = spv::UMin3AMD;
+            else
+                libCall = spv::SMin3AMD;
+        }
+        break;
+    case glslang::EOpMax3:
+        extBuiltins = getExtBuiltins(spv::E_SPV_AMD_shader_trinary_minmax);
+        if (isFloat)
+            libCall = spv::FMax3AMD;
+        else {
+            if (isUnsigned)
+                libCall = spv::UMax3AMD;
+            else
+                libCall = spv::SMax3AMD;
+        }
+        break;
+    case glslang::EOpMid3:
+        extBuiltins = getExtBuiltins(spv::E_SPV_AMD_shader_trinary_minmax);
+        if (isFloat)
+            libCall = spv::FMid3AMD;
+        else {
+            if (isUnsigned)
+                libCall = spv::UMid3AMD;
+            else
+                libCall = spv::SMid3AMD;
+        }
+        break;
+
+    case glslang::EOpInterpolateAtVertex:
+        extBuiltins = getExtBuiltins(spv::E_SPV_AMD_shader_explicit_vertex_parameter);
+        libCall = spv::InterpolateAtVertexAMD;
+        break;
+#endif
+
+    default:
+        return 0;
+    }
+
+    spv::Id id = 0;
+    if (libCall >= 0) {
+        // Use an extended instruction from the standard library.
+        // Construct the call arguments, without modifying the original operands vector.
+        // We might need the remaining arguments, e.g. in the EOpFrexp case.
+        std::vector<spv::Id> callArguments(operands.begin(), operands.begin() + consumedOperands);
+        id = builder.createBuiltinCall(typeId, extBuiltins >= 0 ? extBuiltins : stdBuiltins, libCall, callArguments);
+    } else {
+        switch (consumedOperands) {
+        case 0:
+            // should all be handled by visitAggregate and createNoArgOperation
+            assert(0);
+            return 0;
+        case 1:
+            // should all be handled by createUnaryOperation
+            assert(0);
+            return 0;
+        case 2:
+            id = builder.createBinOp(opCode, typeId, operands[0], operands[1]);
+            break;
+        default:
+            // anything 3 or over doesn't have l-value operands, so all should be consumed
+            assert(consumedOperands == operands.size());
+            id = builder.createOp(opCode, typeId, operands);
+            break;
+        }
+    }
+
+    // Decode the return types that were structures
+    switch (op) {
+    case glslang::EOpAddCarry:
+    case glslang::EOpSubBorrow:
+        builder.createStore(builder.createCompositeExtract(id, typeId0, 1), operands[2]);
+        id = builder.createCompositeExtract(id, typeId0, 0);
+        break;
+    case glslang::EOpUMulExtended:
+    case glslang::EOpIMulExtended:
+        builder.createStore(builder.createCompositeExtract(id, typeId0, 0), operands[3]);
+        builder.createStore(builder.createCompositeExtract(id, typeId0, 1), operands[2]);
+        break;
+    case glslang::EOpFrexp:
+        assert(operands.size() == 2);
+        builder.createStore(builder.createCompositeExtract(id, frexpIntType, 1), operands[1]);
+        id = builder.createCompositeExtract(id, typeId0, 0);
+        break;
+    default:
+        break;
+    }
+
+    return builder.setPrecision(id, precision);
+}
+
+// Intrinsics with no arguments (or no return value, and no precision).
+spv::Id TGlslangToSpvTraverser::createNoArgOperation(glslang::TOperator op, spv::Decoration precision, spv::Id typeId)
+{
+    // TODO: get the barrier operands correct
+
+    switch (op) {
+    case glslang::EOpEmitVertex:
+        builder.createNoResultOp(spv::OpEmitVertex);
+        return 0;
+    case glslang::EOpEndPrimitive:
+        builder.createNoResultOp(spv::OpEndPrimitive);
+        return 0;
+    case glslang::EOpBarrier:
+        builder.createControlBarrier(spv::ScopeWorkgroup, spv::ScopeDevice, spv::MemorySemanticsMaskNone);
+        return 0;
+    case glslang::EOpMemoryBarrier:
+        builder.createMemoryBarrier(spv::ScopeDevice, spv::MemorySemanticsAllMemory);
+        return 0;
+    case glslang::EOpMemoryBarrierAtomicCounter:
+        builder.createMemoryBarrier(spv::ScopeDevice, spv::MemorySemanticsAtomicCounterMemoryMask);
+        return 0;
+    case glslang::EOpMemoryBarrierBuffer:
+        builder.createMemoryBarrier(spv::ScopeDevice, spv::MemorySemanticsUniformMemoryMask);
+        return 0;
+    case glslang::EOpMemoryBarrierImage:
+        builder.createMemoryBarrier(spv::ScopeDevice, spv::MemorySemanticsImageMemoryMask);
+        return 0;
+    case glslang::EOpMemoryBarrierShared:
+        builder.createMemoryBarrier(spv::ScopeDevice, spv::MemorySemanticsWorkgroupMemoryMask);
+        return 0;
+    case glslang::EOpGroupMemoryBarrier:
+        builder.createMemoryBarrier(spv::ScopeDevice, spv::MemorySemanticsCrossWorkgroupMemoryMask);
+        return 0;
+    case glslang::EOpAllMemoryBarrierWithGroupSync:
+        // Control barrier with non-"None" semantic is also a memory barrier.
+        builder.createControlBarrier(spv::ScopeDevice, spv::ScopeDevice, spv::MemorySemanticsAllMemory);
+        return 0;
+    case glslang::EOpGroupMemoryBarrierWithGroupSync:
+        // Control barrier with non-"None" semantic is also a memory barrier.
+        builder.createControlBarrier(spv::ScopeDevice, spv::ScopeDevice, spv::MemorySemanticsCrossWorkgroupMemoryMask);
+        return 0;
+    case glslang::EOpWorkgroupMemoryBarrier:
+        builder.createMemoryBarrier(spv::ScopeWorkgroup, spv::MemorySemanticsWorkgroupMemoryMask);
+        return 0;
+    case glslang::EOpWorkgroupMemoryBarrierWithGroupSync:
+        // Control barrier with non-"None" semantic is also a memory barrier.
+        builder.createControlBarrier(spv::ScopeWorkgroup, spv::ScopeWorkgroup, spv::MemorySemanticsWorkgroupMemoryMask);
+        return 0;
+#ifdef AMD_EXTENSIONS
+    case glslang::EOpTime:
+    {
+        std::vector<spv::Id> args; // Dummy arguments
+        spv::Id id = builder.createBuiltinCall(typeId, getExtBuiltins(spv::E_SPV_AMD_gcn_shader), spv::TimeAMD, args);
+        return builder.setPrecision(id, precision);
+    }
+#endif
+    default:
+        logger->missingFunctionality("unknown operation with no arguments");
+        return 0;
+    }
+}
+
+spv::Id TGlslangToSpvTraverser::getSymbolId(const glslang::TIntermSymbol* symbol)
+{
+    auto iter = symbolValues.find(symbol->getId());
+    spv::Id id;
+    if (symbolValues.end() != iter) {
+        id = iter->second;
+        return id;
+    }
+
+    // it was not found, create it
+    id = createSpvVariable(symbol);
+    symbolValues[symbol->getId()] = id;
+
+    if (symbol->getBasicType() != glslang::EbtBlock) {
+        addDecoration(id, TranslatePrecisionDecoration(symbol->getType()));
+        addDecoration(id, TranslateInterpolationDecoration(symbol->getType().getQualifier()));
+        addDecoration(id, TranslateAuxiliaryStorageDecoration(symbol->getType().getQualifier()));
+        if (symbol->getType().getQualifier().hasSpecConstantId())
+            addDecoration(id, spv::DecorationSpecId, symbol->getType().getQualifier().layoutSpecConstantId);
+        if (symbol->getQualifier().hasIndex())
+            builder.addDecoration(id, spv::DecorationIndex, symbol->getQualifier().layoutIndex);
+        if (symbol->getQualifier().hasComponent())
+            builder.addDecoration(id, spv::DecorationComponent, symbol->getQualifier().layoutComponent);
+        if (glslangIntermediate->getXfbMode()) {
+            builder.addCapability(spv::CapabilityTransformFeedback);
+            if (symbol->getQualifier().hasXfbStride())
+                builder.addDecoration(id, spv::DecorationXfbStride, symbol->getQualifier().layoutXfbStride);
+            if (symbol->getQualifier().hasXfbBuffer())
+                builder.addDecoration(id, spv::DecorationXfbBuffer, symbol->getQualifier().layoutXfbBuffer);
+            if (symbol->getQualifier().hasXfbOffset())
+                builder.addDecoration(id, spv::DecorationOffset, symbol->getQualifier().layoutXfbOffset);
+        }
+        // atomic counters use this:
+        if (symbol->getQualifier().hasOffset())
+            builder.addDecoration(id, spv::DecorationOffset, symbol->getQualifier().layoutOffset);
+    }
+
+    if (symbol->getQualifier().hasLocation())
+        builder.addDecoration(id, spv::DecorationLocation, symbol->getQualifier().layoutLocation);
+    addDecoration(id, TranslateInvariantDecoration(symbol->getType().getQualifier()));
+    if (symbol->getQualifier().hasStream() && glslangIntermediate->isMultiStream()) {
+        builder.addCapability(spv::CapabilityGeometryStreams);
+        builder.addDecoration(id, spv::DecorationStream, symbol->getQualifier().layoutStream);
+    }
+    if (symbol->getQualifier().hasSet())
+        builder.addDecoration(id, spv::DecorationDescriptorSet, symbol->getQualifier().layoutSet);
+    else if (IsDescriptorResource(symbol->getType())) {
+        // default to 0
+        builder.addDecoration(id, spv::DecorationDescriptorSet, 0);
+    }
+    if (symbol->getQualifier().hasBinding())
+        builder.addDecoration(id, spv::DecorationBinding, symbol->getQualifier().layoutBinding);
+    if (symbol->getQualifier().hasAttachment())
+        builder.addDecoration(id, spv::DecorationInputAttachmentIndex, symbol->getQualifier().layoutAttachment);
+    if (glslangIntermediate->getXfbMode()) {
+        builder.addCapability(spv::CapabilityTransformFeedback);
+        if (symbol->getQualifier().hasXfbStride())
+            builder.addDecoration(id, spv::DecorationXfbStride, symbol->getQualifier().layoutXfbStride);
+        if (symbol->getQualifier().hasXfbBuffer())
+            builder.addDecoration(id, spv::DecorationXfbBuffer, symbol->getQualifier().layoutXfbBuffer);
+    }
+
+    if (symbol->getType().isImage()) {
+        std::vector<spv::Decoration> memory;
+        TranslateMemoryDecoration(symbol->getType().getQualifier(), memory);
+        for (unsigned int i = 0; i < memory.size(); ++i)
+            addDecoration(id, memory[i]);
+    }
+
+    // built-in variable decorations
+    spv::BuiltIn builtIn = TranslateBuiltInDecoration(symbol->getQualifier().builtIn, false);
+    if (builtIn != spv::BuiltInMax)
+        addDecoration(id, spv::DecorationBuiltIn, (int)builtIn);
+
+    return id;
+}
+
+// If 'dec' is valid, add no-operand decoration to an object
+void TGlslangToSpvTraverser::addDecoration(spv::Id id, spv::Decoration dec)
+{
+    if (dec != spv::DecorationMax)
+        builder.addDecoration(id, dec);
+}
+
+// If 'dec' is valid, add a one-operand decoration to an object
+void TGlslangToSpvTraverser::addDecoration(spv::Id id, spv::Decoration dec, unsigned value)
+{
+    if (dec != spv::DecorationMax)
+        builder.addDecoration(id, dec, value);
+}
+
+// If 'dec' is valid, add a no-operand decoration to a struct member
+void TGlslangToSpvTraverser::addMemberDecoration(spv::Id id, int member, spv::Decoration dec)
+{
+    if (dec != spv::DecorationMax)
+        builder.addMemberDecoration(id, (unsigned)member, dec);
+}
+
+// If 'dec' is valid, add a one-operand decoration to a struct member
+void TGlslangToSpvTraverser::addMemberDecoration(spv::Id id, int member, spv::Decoration dec, unsigned value)
+{
+    if (dec != spv::DecorationMax)
+        builder.addMemberDecoration(id, (unsigned)member, dec, value);
+}
+
+// Make a full tree of instructions to build a SPIR-V specialization constant,
+// or regular constant if possible.
+//
+// TBD: this is not yet done, nor verified to be the best design, it does do the leaf symbols though
+//
+// Recursively walk the nodes.  The nodes form a tree whose leaves are
+// regular constants, which themselves are trees that createSpvConstant()
+// recursively walks.  So, this function walks the "top" of the tree:
+//  - emit specialization constant-building instructions for specConstant
+//  - when running into a non-spec-constant, switch to createSpvConstant()
+spv::Id TGlslangToSpvTraverser::createSpvConstant(const glslang::TIntermTyped& node)
+{
+    assert(node.getQualifier().isConstant());
+
+    // Handle front-end constants first (non-specialization constants).
+    if (! node.getQualifier().specConstant) {
+        // hand off to the non-spec-constant path
+        assert(node.getAsConstantUnion() != nullptr || node.getAsSymbolNode() != nullptr);
+        int nextConst = 0;
+        return createSpvConstantFromConstUnionArray(node.getType(), node.getAsConstantUnion() ? node.getAsConstantUnion()->getConstArray() : node.getAsSymbolNode()->getConstArray(),
+                                 nextConst, false);
+    }
+
+    // We now know we have a specialization constant to build
+
+    // gl_WorkGroupSize is a special case until the front-end handles hierarchical specialization constants,
+    // even then, it's specialization ids are handled by special case syntax in GLSL: layout(local_size_x = ...
+    if (node.getType().getQualifier().builtIn == glslang::EbvWorkGroupSize) {
+        std::vector<spv::Id> dimConstId;
+        for (int dim = 0; dim < 3; ++dim) {
+            bool specConst = (glslangIntermediate->getLocalSizeSpecId(dim) != glslang::TQualifier::layoutNotSet);
+            dimConstId.push_back(builder.makeUintConstant(glslangIntermediate->getLocalSize(dim), specConst));
+            if (specConst)
+                addDecoration(dimConstId.back(), spv::DecorationSpecId, glslangIntermediate->getLocalSizeSpecId(dim));
+        }
+        return builder.makeCompositeConstant(builder.makeVectorType(builder.makeUintType(32), 3), dimConstId, true);
+    }
+
+    // An AST node labelled as specialization constant should be a symbol node.
+    // Its initializer should either be a sub tree with constant nodes, or a constant union array.
+    if (auto* sn = node.getAsSymbolNode()) {
+        if (auto* sub_tree = sn->getConstSubtree()) {
+            // Traverse the constant constructor sub tree like generating normal run-time instructions.
+            // During the AST traversal, if the node is marked as 'specConstant', SpecConstantOpModeGuard
+            // will set the builder into spec constant op instruction generating mode.
+            sub_tree->traverse(this);
+            return accessChainLoad(sub_tree->getType());
+        } else if (auto* const_union_array = &sn->getConstArray()){
+            int nextConst = 0;
+            return createSpvConstantFromConstUnionArray(sn->getType(), *const_union_array, nextConst, true);
+        }
+    }
+
+    // Neither a front-end constant node, nor a specialization constant node with constant union array or
+    // constant sub tree as initializer.
+    logger->missingFunctionality("Neither a front-end constant nor a spec constant.");
+    exit(1);
+    return spv::NoResult;
+}
+
+// Use 'consts' as the flattened glslang source of scalar constants to recursively
+// build the aggregate SPIR-V constant.
+//
+// If there are not enough elements present in 'consts', 0 will be substituted;
+// an empty 'consts' can be used to create a fully zeroed SPIR-V constant.
+//
+spv::Id TGlslangToSpvTraverser::createSpvConstantFromConstUnionArray(const glslang::TType& glslangType, const glslang::TConstUnionArray& consts, int& nextConst, bool specConstant)
+{
+    // vector of constants for SPIR-V
+    std::vector<spv::Id> spvConsts;
+
+    // Type is used for struct and array constants
+    spv::Id typeId = convertGlslangToSpvType(glslangType);
+
+    if (glslangType.isArray()) {
+        glslang::TType elementType(glslangType, 0);
+        for (int i = 0; i < glslangType.getOuterArraySize(); ++i)
+            spvConsts.push_back(createSpvConstantFromConstUnionArray(elementType, consts, nextConst, false));
+    } else if (glslangType.isMatrix()) {
+        glslang::TType vectorType(glslangType, 0);
+        for (int col = 0; col < glslangType.getMatrixCols(); ++col)
+            spvConsts.push_back(createSpvConstantFromConstUnionArray(vectorType, consts, nextConst, false));
+    } else if (glslangType.getStruct()) {
+        glslang::TVector<glslang::TTypeLoc>::const_iterator iter;
+        for (iter = glslangType.getStruct()->begin(); iter != glslangType.getStruct()->end(); ++iter)
+            spvConsts.push_back(createSpvConstantFromConstUnionArray(*iter->type, consts, nextConst, false));
+    } else if (glslangType.getVectorSize() > 1) {
+        for (unsigned int i = 0; i < (unsigned int)glslangType.getVectorSize(); ++i) {
+            bool zero = nextConst >= consts.size();
+            switch (glslangType.getBasicType()) {
+            case glslang::EbtInt:
+                spvConsts.push_back(builder.makeIntConstant(zero ? 0 : consts[nextConst].getIConst()));
+                break;
+            case glslang::EbtUint:
+                spvConsts.push_back(builder.makeUintConstant(zero ? 0 : consts[nextConst].getUConst()));
+                break;
+            case glslang::EbtInt64:
+                spvConsts.push_back(builder.makeInt64Constant(zero ? 0 : consts[nextConst].getI64Const()));
+                break;
+            case glslang::EbtUint64:
+                spvConsts.push_back(builder.makeUint64Constant(zero ? 0 : consts[nextConst].getU64Const()));
+                break;
+            case glslang::EbtFloat:
+                spvConsts.push_back(builder.makeFloatConstant(zero ? 0.0F : (float)consts[nextConst].getDConst()));
+                break;
+            case glslang::EbtDouble:
+                spvConsts.push_back(builder.makeDoubleConstant(zero ? 0.0 : consts[nextConst].getDConst()));
+                break;
+#ifdef AMD_EXTENSIONS
+            case glslang::EbtFloat16:
+                spvConsts.push_back(builder.makeFloat16Constant(zero ? 0.0F : (float)consts[nextConst].getDConst()));
+                break;
+#endif
+            case glslang::EbtBool:
+                spvConsts.push_back(builder.makeBoolConstant(zero ? false : consts[nextConst].getBConst()));
+                break;
+            default:
+                assert(0);
+                break;
+            }
+            ++nextConst;
+        }
+    } else {
+        // we have a non-aggregate (scalar) constant
+        bool zero = nextConst >= consts.size();
+        spv::Id scalar = 0;
+        switch (glslangType.getBasicType()) {
+        case glslang::EbtInt:
+            scalar = builder.makeIntConstant(zero ? 0 : consts[nextConst].getIConst(), specConstant);
+            break;
+        case glslang::EbtUint:
+            scalar = builder.makeUintConstant(zero ? 0 : consts[nextConst].getUConst(), specConstant);
+            break;
+        case glslang::EbtInt64:
+            scalar = builder.makeInt64Constant(zero ? 0 : consts[nextConst].getI64Const(), specConstant);
+            break;
+        case glslang::EbtUint64:
+            scalar = builder.makeUint64Constant(zero ? 0 : consts[nextConst].getU64Const(), specConstant);
+            break;
+        case glslang::EbtFloat:
+            scalar = builder.makeFloatConstant(zero ? 0.0F : (float)consts[nextConst].getDConst(), specConstant);
+            break;
+        case glslang::EbtDouble:
+            scalar = builder.makeDoubleConstant(zero ? 0.0 : consts[nextConst].getDConst(), specConstant);
+            break;
+#ifdef AMD_EXTENSIONS
+        case glslang::EbtFloat16:
+            scalar = builder.makeFloat16Constant(zero ? 0.0F : (float)consts[nextConst].getDConst(), specConstant);
+            break;
+#endif
+        case glslang::EbtBool:
+            scalar = builder.makeBoolConstant(zero ? false : consts[nextConst].getBConst(), specConstant);
+            break;
+        default:
+            assert(0);
+            break;
+        }
+        ++nextConst;
+        return scalar;
+    }
+
+    return builder.makeCompositeConstant(typeId, spvConsts);
+}
+
+// Return true if the node is a constant or symbol whose reading has no
+// non-trivial observable cost or effect.
+bool TGlslangToSpvTraverser::isTrivialLeaf(const glslang::TIntermTyped* node)
+{
+    // don't know what this is
+    if (node == nullptr)
+        return false;
+
+    // a constant is safe
+    if (node->getAsConstantUnion() != nullptr)
+        return true;
+
+    // not a symbol means non-trivial
+    if (node->getAsSymbolNode() == nullptr)
+        return false;
+
+    // a symbol, depends on what's being read
+    switch (node->getType().getQualifier().storage) {
+    case glslang::EvqTemporary:
+    case glslang::EvqGlobal:
+    case glslang::EvqIn:
+    case glslang::EvqInOut:
+    case glslang::EvqConst:
+    case glslang::EvqConstReadOnly:
+    case glslang::EvqUniform:
+        return true;
+    default:
+        return false;
+    }
+}
+
+// A node is trivial if it is a single operation with no side effects.
+// Error on the side of saying non-trivial.
+// Return true if trivial.
+bool TGlslangToSpvTraverser::isTrivial(const glslang::TIntermTyped* node)
+{
+    if (node == nullptr)
+        return false;
+
+    // symbols and constants are trivial
+    if (isTrivialLeaf(node))
+        return true;
+
+    // otherwise, it needs to be a simple operation or one or two leaf nodes
+
+    // not a simple operation
+    const glslang::TIntermBinary* binaryNode = node->getAsBinaryNode();
+    const glslang::TIntermUnary* unaryNode = node->getAsUnaryNode();
+    if (binaryNode == nullptr && unaryNode == nullptr)
+        return false;
+
+    // not on leaf nodes
+    if (binaryNode && (! isTrivialLeaf(binaryNode->getLeft()) || ! isTrivialLeaf(binaryNode->getRight())))
+        return false;
+
+    if (unaryNode && ! isTrivialLeaf(unaryNode->getOperand())) {
+        return false;
+    }
+
+    switch (node->getAsOperator()->getOp()) {
+    case glslang::EOpLogicalNot:
+    case glslang::EOpConvIntToBool:
+    case glslang::EOpConvUintToBool:
+    case glslang::EOpConvFloatToBool:
+    case glslang::EOpConvDoubleToBool:
+    case glslang::EOpEqual:
+    case glslang::EOpNotEqual:
+    case glslang::EOpLessThan:
+    case glslang::EOpGreaterThan:
+    case glslang::EOpLessThanEqual:
+    case glslang::EOpGreaterThanEqual:
+    case glslang::EOpIndexDirect:
+    case glslang::EOpIndexDirectStruct:
+    case glslang::EOpLogicalXor:
+    case glslang::EOpAny:
+    case glslang::EOpAll:
+        return true;
+    default:
+        return false;
+    }
+}
+
+// Emit short-circuiting code, where 'right' is never evaluated unless
+// the left side is true (for &&) or false (for ||).
+spv::Id TGlslangToSpvTraverser::createShortCircuit(glslang::TOperator op, glslang::TIntermTyped& left, glslang::TIntermTyped& right)
+{
+    spv::Id boolTypeId = builder.makeBoolType();
+
+    // emit left operand
+    builder.clearAccessChain();
+    left.traverse(this);
+    spv::Id leftId = accessChainLoad(left.getType());
+
+    // Operands to accumulate OpPhi operands
+    std::vector<spv::Id> phiOperands;
+    // accumulate left operand's phi information
+    phiOperands.push_back(leftId);
+    phiOperands.push_back(builder.getBuildPoint()->getId());
+
+    // Make the two kinds of operation symmetric with a "!"
+    //   || => emit "if (! left) result = right"
+    //   && => emit "if (  left) result = right"
+    //
+    // TODO: this runtime "not" for || could be avoided by adding functionality
+    // to 'builder' to have an "else" without an "then"
+    if (op == glslang::EOpLogicalOr)
+        leftId = builder.createUnaryOp(spv::OpLogicalNot, boolTypeId, leftId);
+
+    // make an "if" based on the left value
+    spv::Builder::If ifBuilder(leftId, builder);
+
+    // emit right operand as the "then" part of the "if"
+    builder.clearAccessChain();
+    right.traverse(this);
+    spv::Id rightId = accessChainLoad(right.getType());
+
+    // accumulate left operand's phi information
+    phiOperands.push_back(rightId);
+    phiOperands.push_back(builder.getBuildPoint()->getId());
+
+    // finish the "if"
+    ifBuilder.makeEndIf();
+
+    // phi together the two results
+    return builder.createOp(spv::OpPhi, boolTypeId, phiOperands);
+}
+
+// Return type Id of the imported set of extended instructions corresponds to the name.
+// Import this set if it has not been imported yet.
+spv::Id TGlslangToSpvTraverser::getExtBuiltins(const char* name)
+{
+    if (extBuiltinMap.find(name) != extBuiltinMap.end())
+        return extBuiltinMap[name];
+    else {
+        builder.addExtension(name);
+        spv::Id extBuiltins = builder.import(name);
+        extBuiltinMap[name] = extBuiltins;
+        return extBuiltins;
+    }
+}
+
+};  // end anonymous namespace
+
+namespace glslang {
+
+void GetSpirvVersion(std::string& version)
+{
+    const int bufSize = 100;
+    char buf[bufSize];
+    snprintf(buf, bufSize, "0x%08x, Revision %d", spv::Version, spv::Revision);
+    version = buf;
+}
+
+// Write SPIR-V out to a binary file
+void OutputSpvBin(const std::vector<unsigned int>& spirv, const char* baseName)
+{
+    std::ofstream out;
+    out.open(baseName, std::ios::binary | std::ios::out);
+    for (int i = 0; i < (int)spirv.size(); ++i) {
+        unsigned int word = spirv[i];
+        out.write((const char*)&word, 4);
+    }
+    out.close();
+}
+
+// Write SPIR-V out to a text file with 32-bit hexadecimal words
+void OutputSpvHex(const std::vector<unsigned int>& spirv, const char* baseName)
+{
+    std::ofstream out;
+    out.open(baseName, std::ios::binary | std::ios::out);
+    out << "\t// " GLSLANG_REVISION " " GLSLANG_DATE << std::endl;
+    const int WORDS_PER_LINE = 8;
+    for (int i = 0; i < (int)spirv.size(); i += WORDS_PER_LINE) {
+        out << "\t";
+        for (int j = 0; j < WORDS_PER_LINE && i + j < (int)spirv.size(); ++j) {
+            const unsigned int word = spirv[i + j];
+            out << "0x" << std::hex << std::setw(8) << std::setfill('0') << word;
+            if (i + j + 1 < (int)spirv.size()) {
+                out << ",";
+            }
+        }
+        out << std::endl;
+    }
+    out.close();
+}
+
+//
+// Set up the glslang traversal
+//
+void GlslangToSpv(const glslang::TIntermediate& intermediate, std::vector<unsigned int>& spirv)
+{
+    spv::SpvBuildLogger logger;
+    GlslangToSpv(intermediate, spirv, &logger);
+}
+
+void GlslangToSpv(const glslang::TIntermediate& intermediate, std::vector<unsigned int>& spirv, spv::SpvBuildLogger* logger)
+{
+    TIntermNode* root = intermediate.getTreeRoot();
+
+    if (root == 0)
+        return;
+
+    glslang::GetThreadPoolAllocator().push();
+
+    TGlslangToSpvTraverser it(&intermediate, logger);
+
+    root->traverse(&it);
+
+    it.dumpSpv(spirv);
+
+    glslang::GetThreadPoolAllocator().pop();
+}
+
+}; // end namespace glslang

+ 54 - 0
3rdparty/glslang/SPIRV/GlslangToSpv.h

@@ -0,0 +1,54 @@
+//
+//Copyright (C) 2014 LunarG, Inc.
+//
+//All rights reserved.
+//
+//Redistribution and use in source and binary forms, with or without
+//modification, are permitted provided that the following conditions
+//are met:
+//
+//    Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+//
+//    Redistributions in binary form must reproduce the above
+//    copyright notice, this list of conditions and the following
+//    disclaimer in the documentation and/or other materials provided
+//    with the distribution.
+//
+//    Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+//    contributors may be used to endorse or promote products derived
+//    from this software without specific prior written permission.
+//
+//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+//"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+//LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+//FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+//COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+//INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+//BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+//CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+//LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+//ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+//POSSIBILITY OF SUCH DAMAGE.
+
+#if _MSC_VER >= 1900
+    #pragma warning(disable : 4464) // relative include path contains '..'
+#endif
+
+#include "../glslang/Include/intermediate.h"
+
+#include <string>
+#include <vector>
+
+#include "Logger.h"
+
+namespace glslang {
+
+void GetSpirvVersion(std::string&);
+void GlslangToSpv(const glslang::TIntermediate& intermediate, std::vector<unsigned int>& spirv);
+void GlslangToSpv(const glslang::TIntermediate& intermediate, std::vector<unsigned int>& spirv, spv::SpvBuildLogger* logger);
+void OutputSpvBin(const std::vector<unsigned int>& spirv, const char* baseName);
+void OutputSpvHex(const std::vector<unsigned int>& spirv, const char* baseName);
+
+}

+ 113 - 0
3rdparty/glslang/SPIRV/InReadableOrder.cpp

@@ -0,0 +1,113 @@
+//
+//Copyright (C) 2016 Google, Inc.
+//
+//All rights reserved.
+//
+//Redistribution and use in source and binary forms, with or without
+//modification, are permitted provided that the following conditions
+//are met:
+//
+//    Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+//
+//    Redistributions in binary form must reproduce the above
+//    copyright notice, this list of conditions and the following
+//    disclaimer in the documentation and/or other materials provided
+//    with the distribution.
+//
+//    Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+//    contributors may be used to endorse or promote products derived
+//    from this software without specific prior written permission.
+//
+//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+//"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+//LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+//FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+//COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+//INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+//BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+//CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+//LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+//ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+//POSSIBILITY OF SUCH DAMAGE.
+
+// The SPIR-V spec requires code blocks to appear in an order satisfying the
+// dominator-tree direction (ie, dominator before the dominated).  This is,
+// actually, easy to achieve: any pre-order CFG traversal algorithm will do it.
+// Because such algorithms visit a block only after traversing some path to it
+// from the root, they necessarily visit the block's idom first.
+//
+// But not every graph-traversal algorithm outputs blocks in an order that
+// appears logical to human readers.  The problem is that unrelated branches may
+// be interspersed with each other, and merge blocks may come before some of the
+// branches being merged.
+//
+// A good, human-readable order of blocks may be achieved by performing
+// depth-first search but delaying merge nodes until after all their branches
+// have been visited.  This is implemented below by the inReadableOrder()
+// function.
+
+#include "spvIR.h"
+
+#include <cassert>
+#include <unordered_map>
+
+using spv::Block;
+using spv::Id;
+
+namespace {
+// Traverses CFG in a readable order, invoking a pre-set callback on each block.
+// Use by calling visit() on the root block.
+class ReadableOrderTraverser {
+public:
+    explicit ReadableOrderTraverser(std::function<void(Block*)> callback) : callback_(callback) {}
+    // Visits the block if it hasn't been visited already and isn't currently
+    // being delayed.  Invokes callback(block), then descends into its
+    // successors.  Delays merge-block and continue-block processing until all
+    // the branches have been completed.
+    void visit(Block* block)
+    {
+        assert(block);
+        if (visited_[block] || delayed_[block])
+            return;
+        callback_(block);
+        visited_[block] = true;
+        Block* mergeBlock = nullptr;
+        Block* continueBlock = nullptr;
+        auto mergeInst = block->getMergeInstruction();
+        if (mergeInst) {
+            Id mergeId = mergeInst->getIdOperand(0);
+            mergeBlock = block->getParent().getParent().getInstruction(mergeId)->getBlock();
+            delayed_[mergeBlock] = true;
+            if (mergeInst->getOpCode() == spv::OpLoopMerge) {
+                Id continueId = mergeInst->getIdOperand(1);
+                continueBlock =
+                    block->getParent().getParent().getInstruction(continueId)->getBlock();
+                delayed_[continueBlock] = true;
+            }
+        }
+        const auto successors = block->getSuccessors();
+        for (auto it = successors.cbegin(); it != successors.cend(); ++it)
+            visit(*it);
+        if (continueBlock) {
+            delayed_[continueBlock] = false;
+            visit(continueBlock);
+        }
+        if (mergeBlock) {
+            delayed_[mergeBlock] = false;
+            visit(mergeBlock);
+        }
+    }
+
+private:
+    std::function<void(Block*)> callback_;
+    // Whether a block has already been visited or is being delayed.
+    std::unordered_map<Block *, bool> visited_, delayed_;
+};
+}
+
+void spv::inReadableOrder(Block* root, std::function<void(Block*)> callback)
+{
+    ReadableOrderTraverser(callback).visit(root);
+}

+ 68 - 0
3rdparty/glslang/SPIRV/Logger.cpp

@@ -0,0 +1,68 @@
+//
+// Copyright (C) 2016 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+//    Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+//
+//    Redistributions in binary form must reproduce the above
+//    copyright notice, this list of conditions and the following
+//    disclaimer in the documentation and/or other materials provided
+//    with the distribution.
+//
+//    Neither the name of Google Inc. nor the names of its
+//    contributors may be used to endorse or promote products derived
+//    from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
+#include "Logger.h"
+
+#include <algorithm>
+#include <iterator>
+#include <sstream>
+
+namespace spv {
+
+void SpvBuildLogger::tbdFunctionality(const std::string& f)
+{
+    if (std::find(std::begin(tbdFeatures), std::end(tbdFeatures), f) == std::end(tbdFeatures))
+        tbdFeatures.push_back(f);
+}
+
+void SpvBuildLogger::missingFunctionality(const std::string& f)
+{
+    if (std::find(std::begin(missingFeatures), std::end(missingFeatures), f) == std::end(missingFeatures))
+        missingFeatures.push_back(f);
+}
+
+std::string SpvBuildLogger::getAllMessages() const {
+    std::ostringstream messages;
+    for (auto it = tbdFeatures.cbegin(); it != tbdFeatures.cend(); ++it)
+        messages << "TBD functionality: " << *it << "\n";
+    for (auto it = missingFeatures.cbegin(); it != missingFeatures.cend(); ++it)
+        messages << "Missing functionality: " << *it << "\n";
+    for (auto it = warnings.cbegin(); it != warnings.cend(); ++it)
+        messages << "warning: " << *it << "\n";
+    for (auto it = errors.cbegin(); it != errors.cend(); ++it)
+        messages << "error: " << *it << "\n";
+    return messages.str();
+}
+
+} // end spv namespace

+ 74 - 0
3rdparty/glslang/SPIRV/Logger.h

@@ -0,0 +1,74 @@
+//
+// Copyright (C) 2016 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+//    Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+//
+//    Redistributions in binary form must reproduce the above
+//    copyright notice, this list of conditions and the following
+//    disclaimer in the documentation and/or other materials provided
+//    with the distribution.
+//
+//    Neither the name of Google Inc. nor the names of its
+//    contributors may be used to endorse or promote products derived
+//    from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef GLSLANG_SPIRV_LOGGER_H
+#define GLSLANG_SPIRV_LOGGER_H
+
+#include <string>
+#include <vector>
+
+namespace spv {
+
+// A class for holding all SPIR-V build status messages, including
+// missing/TBD functionalities, warnings, and errors.
+class SpvBuildLogger {
+public:
+    SpvBuildLogger() {}
+
+    // Registers a TBD functionality.
+    void tbdFunctionality(const std::string& f);
+    // Registers a missing functionality.
+    void missingFunctionality(const std::string& f);
+
+    // Logs a warning.
+    void warning(const std::string& w) { warnings.push_back(w); }
+    // Logs an error.
+    void error(const std::string& e) { errors.push_back(e); }
+
+    // Returns all messages accumulated in the order of:
+    // TBD functionalities, missing functionalities, warnings, errors.
+    std::string getAllMessages() const;
+
+private:
+    SpvBuildLogger(const SpvBuildLogger&);
+
+    std::vector<std::string> tbdFeatures;
+    std::vector<std::string> missingFeatures;
+    std::vector<std::string> warnings;
+    std::vector<std::string> errors;
+};
+
+} // end spv namespace
+
+#endif // GLSLANG_SPIRV_LOGGER_H

+ 1311 - 0
3rdparty/glslang/SPIRV/SPVRemapper.cpp

@@ -0,0 +1,1311 @@
+//
+//Copyright (C) 2015 LunarG, Inc.
+//
+//All rights reserved.
+//
+//Redistribution and use in source and binary forms, with or without
+//modification, are permitted provided that the following conditions
+//are met:
+//
+//    Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+//
+//    Redistributions in binary form must reproduce the above
+//    copyright notice, this list of conditions and the following
+//    disclaimer in the documentation and/or other materials provided
+//    with the distribution.
+//
+//    Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+//    contributors may be used to endorse or promote products derived
+//    from this software without specific prior written permission.
+//
+//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+//"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+//LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+//FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+//COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+//INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+//BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+//CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+//LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+//ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+//POSSIBILITY OF SUCH DAMAGE.
+//
+
+#include "SPVRemapper.h"
+#include "doc.h"
+
+#if !defined (use_cpp11)
+// ... not supported before C++11
+#else // defined (use_cpp11)
+
+#include <algorithm>
+#include <cassert>
+#include "../glslang/Include/Common.h"
+
+namespace spv {
+
+    // By default, just abort on error.  Can be overridden via RegisterErrorHandler
+    spirvbin_t::errorfn_t spirvbin_t::errorHandler = [](const std::string&) { exit(5); };
+    // By default, eat log messages.  Can be overridden via RegisterLogHandler
+    spirvbin_t::logfn_t   spirvbin_t::logHandler   = [](const std::string&) { };
+
+    // This can be overridden to provide other message behavior if needed
+    void spirvbin_t::msg(int minVerbosity, int indent, const std::string& txt) const
+    {
+        if (verbose >= minVerbosity)
+            logHandler(std::string(indent, ' ') + txt);
+    }
+
+    // hash opcode, with special handling for OpExtInst
+    std::uint32_t spirvbin_t::asOpCodeHash(unsigned word)
+    {
+        const spv::Op opCode = asOpCode(word);
+
+        std::uint32_t offset = 0;
+
+        switch (opCode) {
+        case spv::OpExtInst:
+            offset += asId(word + 4); break;
+        default:
+            break;
+        }
+
+        return opCode * 19 + offset; // 19 = small prime
+    }
+
+    spirvbin_t::range_t spirvbin_t::literalRange(spv::Op opCode) const
+    {
+        static const int maxCount = 1<<30;
+
+        switch (opCode) {
+        case spv::OpTypeFloat:        // fall through...
+        case spv::OpTypePointer:      return range_t(2, 3);
+        case spv::OpTypeInt:          return range_t(2, 4);
+        // TODO: case spv::OpTypeImage:
+        // TODO: case spv::OpTypeSampledImage:
+        case spv::OpTypeSampler:      return range_t(3, 8);
+        case spv::OpTypeVector:       // fall through
+        case spv::OpTypeMatrix:       // ...
+        case spv::OpTypePipe:         return range_t(3, 4);
+        case spv::OpConstant:         return range_t(3, maxCount);
+        default:                      return range_t(0, 0);
+        }
+    }
+
+    spirvbin_t::range_t spirvbin_t::typeRange(spv::Op opCode) const
+    {
+        static const int maxCount = 1<<30;
+
+        if (isConstOp(opCode))
+            return range_t(1, 2);
+
+        switch (opCode) {
+        case spv::OpTypeVector:       // fall through
+        case spv::OpTypeMatrix:       // ... 
+        case spv::OpTypeSampler:      // ... 
+        case spv::OpTypeArray:        // ... 
+        case spv::OpTypeRuntimeArray: // ... 
+        case spv::OpTypePipe:         return range_t(2, 3);
+        case spv::OpTypeStruct:       // fall through
+        case spv::OpTypeFunction:     return range_t(2, maxCount);
+        case spv::OpTypePointer:      return range_t(3, 4);
+        default:                      return range_t(0, 0);
+        }
+    }
+
+    spirvbin_t::range_t spirvbin_t::constRange(spv::Op opCode) const
+    {
+        static const int maxCount = 1<<30;
+
+        switch (opCode) {
+        case spv::OpTypeArray:         // fall through...
+        case spv::OpTypeRuntimeArray:  return range_t(3, 4);
+        case spv::OpConstantComposite: return range_t(3, maxCount);
+        default:                       return range_t(0, 0);
+        }
+    }
+
+    // Return the size of a type in 32-bit words.  This currently only
+    // handles ints and floats, and is only invoked by queries which must be
+    // integer types.  If ever needed, it can be generalized.
+    unsigned spirvbin_t::typeSizeInWords(spv::Id id) const
+    {
+        const unsigned typeStart = idPos(id);
+        const spv::Op  opCode    = asOpCode(typeStart);
+
+        switch (opCode) {
+        case spv::OpTypeInt:   // fall through...
+        case spv::OpTypeFloat: return (spv[typeStart+2]+31)/32;
+        default:
+            return 0;
+        }
+    }
+
+    // Looks up the type of a given const or variable ID, and
+    // returns its size in 32-bit words.
+    unsigned spirvbin_t::idTypeSizeInWords(spv::Id id) const
+    {
+        const auto tid_it = idTypeSizeMap.find(id);
+        if (tid_it == idTypeSizeMap.end())
+            error("type size for ID not found");
+
+        return tid_it->second;
+    }
+
+    // Is this an opcode we should remove when using --strip?
+    bool spirvbin_t::isStripOp(spv::Op opCode) const
+    {
+        switch (opCode) {
+        case spv::OpSource:
+        case spv::OpSourceExtension:
+        case spv::OpName:
+        case spv::OpMemberName:
+        case spv::OpLine:           return true;
+        default:                    return false;
+        }
+    }
+
+    // Return true if this opcode is flow control
+    bool spirvbin_t::isFlowCtrl(spv::Op opCode) const
+    {
+        switch (opCode) {
+        case spv::OpBranchConditional:
+        case spv::OpBranch:
+        case spv::OpSwitch:
+        case spv::OpLoopMerge:
+        case spv::OpSelectionMerge:
+        case spv::OpLabel:
+        case spv::OpFunction:
+        case spv::OpFunctionEnd:    return true;
+        default:                    return false;
+        }
+    }
+
+    // Return true if this opcode defines a type
+    bool spirvbin_t::isTypeOp(spv::Op opCode) const
+    {
+        switch (opCode) {
+        case spv::OpTypeVoid:
+        case spv::OpTypeBool:
+        case spv::OpTypeInt:
+        case spv::OpTypeFloat:
+        case spv::OpTypeVector:
+        case spv::OpTypeMatrix:
+        case spv::OpTypeImage:
+        case spv::OpTypeSampler:
+        case spv::OpTypeArray:
+        case spv::OpTypeRuntimeArray:
+        case spv::OpTypeStruct:
+        case spv::OpTypeOpaque:
+        case spv::OpTypePointer:
+        case spv::OpTypeFunction:
+        case spv::OpTypeEvent:
+        case spv::OpTypeDeviceEvent:
+        case spv::OpTypeReserveId:
+        case spv::OpTypeQueue:
+        case spv::OpTypeSampledImage:
+        case spv::OpTypePipe:         return true;
+        default:                      return false;
+        }
+    }
+
+    // Return true if this opcode defines a constant
+    bool spirvbin_t::isConstOp(spv::Op opCode) const
+    {
+        switch (opCode) {
+        case spv::OpConstantNull:       error("unimplemented constant type");
+        case spv::OpConstantSampler:    error("unimplemented constant type");
+
+        case spv::OpConstantTrue:
+        case spv::OpConstantFalse:
+        case spv::OpConstantComposite:
+        case spv::OpConstant:         return true;
+        default:                      return false;
+        }
+    }
+
+    const auto inst_fn_nop = [](spv::Op, unsigned) { return false; };
+    const auto op_fn_nop   = [](spv::Id&)          { };
+
+    // g++ doesn't like these defined in the class proper in an anonymous namespace.
+    // Dunno why.  Also MSVC doesn't like the constexpr keyword.  Also dunno why.
+    // Defining them externally seems to please both compilers, so, here they are.
+    const spv::Id spirvbin_t::unmapped    = spv::Id(-10000);
+    const spv::Id spirvbin_t::unused      = spv::Id(-10001);
+    const int     spirvbin_t::header_size = 5;
+
+    spv::Id spirvbin_t::nextUnusedId(spv::Id id)
+    {
+        while (isNewIdMapped(id))  // search for an unused ID
+            ++id;
+
+        return id;
+    }
+
+    spv::Id spirvbin_t::localId(spv::Id id, spv::Id newId)
+    {
+        assert(id != spv::NoResult && newId != spv::NoResult);
+
+        if (id >= idMapL.size())
+            idMapL.resize(id+1, unused);
+
+        if (newId != unmapped && newId != unused) {
+            if (isOldIdUnused(id))
+                error(std::string("ID unused in module: ") + std::to_string(id));
+
+            if (!isOldIdUnmapped(id))
+                error(std::string("ID already mapped: ") + std::to_string(id) + " -> "
+                + std::to_string(localId(id)));
+
+            if (isNewIdMapped(newId))
+                error(std::string("ID already used in module: ") + std::to_string(newId));
+
+            msg(4, 4, std::string("map: ") + std::to_string(id) + " -> " + std::to_string(newId));
+            setMapped(newId);
+            largestNewId = std::max(largestNewId, newId);
+        }
+
+        return idMapL[id] = newId;
+    }
+
+    // Parse a literal string from the SPIR binary and return it as an std::string
+    // Due to C++11 RValue references, this doesn't copy the result string.
+    std::string spirvbin_t::literalString(unsigned word) const
+    {
+        std::string literal;
+
+        literal.reserve(16);
+
+        const char* bytes = reinterpret_cast<const char*>(spv.data() + word);
+
+        while (bytes && *bytes)
+            literal += *bytes++;
+
+        return literal;
+    }
+
+
+    void spirvbin_t::applyMap()
+    {
+        msg(3, 2, std::string("Applying map: "));
+
+        // Map local IDs through the ID map
+        process(inst_fn_nop, // ignore instructions
+            [this](spv::Id& id) {
+                id = localId(id);
+                assert(id != unused && id != unmapped);
+            }
+        );
+    }
+
+
+    // Find free IDs for anything we haven't mapped
+    void spirvbin_t::mapRemainder()
+    {
+        msg(3, 2, std::string("Remapping remainder: "));
+
+        spv::Id     unusedId  = 1;  // can't use 0: that's NoResult
+        spirword_t  maxBound  = 0;
+
+        for (spv::Id id = 0; id < idMapL.size(); ++id) {
+            if (isOldIdUnused(id))
+                continue;
+
+            // Find a new mapping for any used but unmapped IDs
+            if (isOldIdUnmapped(id))
+                localId(id, unusedId = nextUnusedId(unusedId));
+
+            if (isOldIdUnmapped(id))
+                error(std::string("old ID not mapped: ") + std::to_string(id));
+
+            // Track max bound
+            maxBound = std::max(maxBound, localId(id) + 1);
+        }
+
+        bound(maxBound); // reset header ID bound to as big as it now needs to be
+    }
+
+    void spirvbin_t::stripDebug()
+    {
+        if ((options & STRIP) == 0)
+            return;
+
+        // build local Id and name maps
+        process(
+            [&](spv::Op opCode, unsigned start) {
+                // remember opcodes we want to strip later
+                if (isStripOp(opCode))
+                    stripInst(start);
+                return true;
+            },
+            op_fn_nop);
+    }
+
+    void spirvbin_t::buildLocalMaps()
+    {
+        msg(2, 2, std::string("build local maps: "));
+
+        mapped.clear();
+        idMapL.clear();
+//      preserve nameMap, so we don't clear that.
+        fnPos.clear();
+        fnPosDCE.clear();
+        fnCalls.clear();
+        typeConstPos.clear();
+        idPosR.clear();
+        entryPoint = spv::NoResult;
+        largestNewId = 0;
+
+        idMapL.resize(bound(), unused);
+
+        int         fnStart = 0;
+        spv::Id     fnRes   = spv::NoResult;
+
+        // build local Id and name maps
+        process(
+            [&](spv::Op opCode, unsigned start) {
+                // remember opcodes we want to strip later
+                if ((options & STRIP) && isStripOp(opCode))
+                    stripInst(start);
+
+                unsigned word = start+1;
+                spv::Id  typeId = spv::NoResult;
+
+                if (spv::InstructionDesc[opCode].hasType())
+                    typeId = asId(word++);
+
+                // If there's a result ID, remember the size of its type
+                if (spv::InstructionDesc[opCode].hasResult()) {
+                    const spv::Id resultId = asId(word++);
+                    idPosR[resultId] = start;
+                    
+                    if (typeId != spv::NoResult) {
+                        const unsigned idTypeSize = typeSizeInWords(typeId);
+
+                        if (idTypeSize != 0)
+                            idTypeSizeMap[resultId] = idTypeSize;
+                    }
+                }
+
+                if (opCode == spv::Op::OpName) {
+                    const spv::Id    target = asId(start+1);
+                    const std::string  name = literalString(start+2);
+                    nameMap[name] = target;
+
+                } else if (opCode == spv::Op::OpFunctionCall) {
+                    ++fnCalls[asId(start + 3)];
+                } else if (opCode == spv::Op::OpEntryPoint) {
+                    entryPoint = asId(start + 2);
+                } else if (opCode == spv::Op::OpFunction) {
+                    if (fnStart != 0)
+                        error("nested function found");
+                    fnStart = start;
+                    fnRes   = asId(start + 2);
+                } else if (opCode == spv::Op::OpFunctionEnd) {
+                    assert(fnRes != spv::NoResult);
+                    if (fnStart == 0)
+                        error("function end without function start");
+                    fnPos[fnRes] = range_t(fnStart, start + asWordCount(start));
+                    fnStart = 0;
+                } else if (isConstOp(opCode)) {
+                    assert(asId(start + 2) != spv::NoResult);
+                    typeConstPos.insert(start);
+                } else if (isTypeOp(opCode)) {
+                    assert(asId(start + 1) != spv::NoResult);
+                    typeConstPos.insert(start);
+                }
+
+                return false;
+            },
+
+            [this](spv::Id& id) { localId(id, unmapped); }
+        );
+    }
+
+    // Validate the SPIR header
+    void spirvbin_t::validate() const
+    {
+        msg(2, 2, std::string("validating: "));
+
+        if (spv.size() < header_size)
+            error("file too short: ");
+
+        if (magic() != spv::MagicNumber)
+            error("bad magic number");
+
+        // field 1 = version
+        // field 2 = generator magic
+        // field 3 = result <id> bound
+
+        if (schemaNum() != 0)
+            error("bad schema, must be 0");
+    }
+
+
+    int spirvbin_t::processInstruction(unsigned word, instfn_t instFn, idfn_t idFn)
+    {
+        const auto     instructionStart = word;
+        const unsigned wordCount = asWordCount(instructionStart);
+        const spv::Op  opCode    = asOpCode(instructionStart);
+        const int      nextInst  = word++ + wordCount;
+
+        if (nextInst > int(spv.size()))
+            error("spir instruction terminated too early");
+
+        // Base for computing number of operands; will be updated as more is learned
+        unsigned numOperands = wordCount - 1;
+
+        if (instFn(opCode, instructionStart))
+            return nextInst;
+
+        // Read type and result ID from instruction desc table
+        if (spv::InstructionDesc[opCode].hasType()) {
+            idFn(asId(word++));
+            --numOperands;
+        }
+
+        if (spv::InstructionDesc[opCode].hasResult()) {
+            idFn(asId(word++));
+            --numOperands;
+        }
+
+        // Extended instructions: currently, assume everything is an ID.
+        // TODO: add whatever data we need for exceptions to that
+        if (opCode == spv::OpExtInst) {
+            word        += 2; // instruction set, and instruction from set
+            numOperands -= 2;
+
+            for (unsigned op=0; op < numOperands; ++op)
+                idFn(asId(word++)); // ID
+
+            return nextInst;
+        }
+
+        // Circular buffer so we can look back at previous unmapped values during the mapping pass.
+        static const unsigned idBufferSize = 4;
+        spv::Id idBuffer[idBufferSize];
+        unsigned idBufferPos = 0;
+
+        // Store IDs from instruction in our map
+        for (int op = 0; numOperands > 0; ++op, --numOperands) {
+            switch (spv::InstructionDesc[opCode].operands.getClass(op)) {
+            case spv::OperandId:
+            case spv::OperandScope:
+            case spv::OperandMemorySemantics:
+                idBuffer[idBufferPos] = asId(word);
+                idBufferPos = (idBufferPos + 1) % idBufferSize;
+                idFn(asId(word++));
+                break;
+
+            case spv::OperandVariableIds:
+                for (unsigned i = 0; i < numOperands; ++i)
+                    idFn(asId(word++));
+                return nextInst;
+
+            case spv::OperandVariableLiterals:
+                // for clarity
+                // if (opCode == spv::OpDecorate && asDecoration(word - 1) == spv::DecorationBuiltIn) {
+                //     ++word;
+                //     --numOperands;
+                // }
+                // word += numOperands;
+                return nextInst;
+
+            case spv::OperandVariableLiteralId: {
+                if (opCode == OpSwitch) {
+                    // word-2 is the position of the selector ID.  OpSwitch Literals match its type.
+                    // In case the IDs are currently being remapped, we get the word[-2] ID from
+                    // the circular idBuffer.
+                    const unsigned literalSizePos = (idBufferPos+idBufferSize-2) % idBufferSize;
+                    const unsigned literalSize = idTypeSizeInWords(idBuffer[literalSizePos]);
+                    const unsigned numLiteralIdPairs = (nextInst-word) / (1+literalSize);
+
+                    for (unsigned arg=0; arg<numLiteralIdPairs; ++arg) {
+                        word += literalSize;  // literal
+                        idFn(asId(word++));   // label
+                    }
+                } else {
+                    assert(0); // currentely, only OpSwitch uses OperandVariableLiteralId
+                }
+
+                return nextInst;
+            }
+
+            case spv::OperandLiteralString: {
+                const int stringWordCount = literalStringWords(literalString(word));
+                word += stringWordCount;
+                numOperands -= (stringWordCount-1); // -1 because for() header post-decrements
+                break;
+            }
+
+            // Execution mode might have extra literal operands.  Skip them.
+            case spv::OperandExecutionMode:
+                return nextInst;
+
+            // Single word operands we simply ignore, as they hold no IDs
+            case spv::OperandLiteralNumber:
+            case spv::OperandSource:
+            case spv::OperandExecutionModel:
+            case spv::OperandAddressing:
+            case spv::OperandMemory:
+            case spv::OperandStorage:
+            case spv::OperandDimensionality:
+            case spv::OperandSamplerAddressingMode:
+            case spv::OperandSamplerFilterMode:
+            case spv::OperandSamplerImageFormat:
+            case spv::OperandImageChannelOrder:
+            case spv::OperandImageChannelDataType:
+            case spv::OperandImageOperands:
+            case spv::OperandFPFastMath:
+            case spv::OperandFPRoundingMode:
+            case spv::OperandLinkageType:
+            case spv::OperandAccessQualifier:
+            case spv::OperandFuncParamAttr:
+            case spv::OperandDecoration:
+            case spv::OperandBuiltIn:
+            case spv::OperandSelect:
+            case spv::OperandLoop:
+            case spv::OperandFunction:
+            case spv::OperandMemoryAccess:
+            case spv::OperandGroupOperation:
+            case spv::OperandKernelEnqueueFlags:
+            case spv::OperandKernelProfilingInfo:
+            case spv::OperandCapability:
+                ++word;
+                break;
+
+            default:
+                assert(0 && "Unhandled Operand Class");
+                break;
+            }
+        }
+
+        return nextInst;
+    }
+
+    // Make a pass over all the instructions and process them given appropriate functions
+    spirvbin_t& spirvbin_t::process(instfn_t instFn, idfn_t idFn, unsigned begin, unsigned end)
+    {
+        // For efficiency, reserve name map space.  It can grow if needed.
+        nameMap.reserve(32);
+
+        // If begin or end == 0, use defaults
+        begin = (begin == 0 ? header_size          : begin);
+        end   = (end   == 0 ? unsigned(spv.size()) : end);
+
+        // basic parsing and InstructionDesc table borrowed from SpvDisassemble.cpp...
+        unsigned nextInst = unsigned(spv.size());
+
+        for (unsigned word = begin; word < end; word = nextInst)
+            nextInst = processInstruction(word, instFn, idFn);
+
+        return *this;
+    }
+
+    // Apply global name mapping to a single module
+    void spirvbin_t::mapNames()
+    {
+        static const std::uint32_t softTypeIdLimit = 3011;  // small prime.  TODO: get from options
+        static const std::uint32_t firstMappedID   = 3019;  // offset into ID space
+
+        for (const auto& name : nameMap) {
+            std::uint32_t hashval = 1911;
+            for (const char c : name.first)
+                hashval = hashval * 1009 + c;
+
+            if (isOldIdUnmapped(name.second))
+                localId(name.second, nextUnusedId(hashval % softTypeIdLimit + firstMappedID));
+        }
+    }
+
+    // Map fn contents to IDs of similar functions in other modules
+    void spirvbin_t::mapFnBodies()
+    {
+        static const std::uint32_t softTypeIdLimit = 19071;  // small prime.  TODO: get from options
+        static const std::uint32_t firstMappedID   =  6203;  // offset into ID space
+
+        // Initial approach: go through some high priority opcodes first and assign them
+        // hash values.
+
+        spv::Id               fnId       = spv::NoResult;
+        std::vector<unsigned> instPos;
+        instPos.reserve(unsigned(spv.size()) / 16); // initial estimate; can grow if needed.
+
+        // Build local table of instruction start positions
+        process(
+            [&](spv::Op, unsigned start) { instPos.push_back(start); return true; },
+            op_fn_nop);
+
+        // Window size for context-sensitive canonicalization values
+        // Empirical best size from a single data set.  TODO: Would be a good tunable.
+        // We essentially perform a little convolution around each instruction,
+        // to capture the flavor of nearby code, to hopefully match to similar
+        // code in other modules.
+        static const unsigned windowSize = 2;
+
+        for (unsigned entry = 0; entry < unsigned(instPos.size()); ++entry) {
+            const unsigned start  = instPos[entry];
+            const spv::Op  opCode = asOpCode(start);
+
+            if (opCode == spv::OpFunction)
+                fnId   = asId(start + 2);
+
+            if (opCode == spv::OpFunctionEnd)
+                fnId = spv::NoResult;
+
+            if (fnId != spv::NoResult) { // if inside a function
+                if (spv::InstructionDesc[opCode].hasResult()) {
+                    const unsigned word    = start + (spv::InstructionDesc[opCode].hasType() ? 2 : 1);
+                    const spv::Id  resId   = asId(word);
+                    std::uint32_t  hashval = fnId * 17; // small prime
+
+                    for (unsigned i = entry-1; i >= entry-windowSize; --i) {
+                        if (asOpCode(instPos[i]) == spv::OpFunction)
+                            break;
+                        hashval = hashval * 30103 + asOpCodeHash(instPos[i]); // 30103 = semiarbitrary prime
+                    }
+
+                    for (unsigned i = entry; i <= entry + windowSize; ++i) {
+                        if (asOpCode(instPos[i]) == spv::OpFunctionEnd)
+                            break;
+                        hashval = hashval * 30103 + asOpCodeHash(instPos[i]); // 30103 = semiarbitrary prime
+                    }
+
+                    if (isOldIdUnmapped(resId))
+                        localId(resId, nextUnusedId(hashval % softTypeIdLimit + firstMappedID));
+                }
+            }
+        }
+
+        spv::Op          thisOpCode(spv::OpNop);
+        std::unordered_map<int, int> opCounter;
+        int              idCounter(0);
+        fnId = spv::NoResult;
+
+        process(
+            [&](spv::Op opCode, unsigned start) {
+                switch (opCode) {
+                case spv::OpFunction:
+                    // Reset counters at each function
+                    idCounter = 0;
+                    opCounter.clear();
+                    fnId = asId(start + 2);
+                    break;
+
+                case spv::OpImageSampleImplicitLod:
+                case spv::OpImageSampleExplicitLod:
+                case spv::OpImageSampleDrefImplicitLod:
+                case spv::OpImageSampleDrefExplicitLod:
+                case spv::OpImageSampleProjImplicitLod:
+                case spv::OpImageSampleProjExplicitLod:
+                case spv::OpImageSampleProjDrefImplicitLod:
+                case spv::OpImageSampleProjDrefExplicitLod:
+                case spv::OpDot:
+                case spv::OpCompositeExtract:
+                case spv::OpCompositeInsert:
+                case spv::OpVectorShuffle:
+                case spv::OpLabel:
+                case spv::OpVariable:
+
+                case spv::OpAccessChain:
+                case spv::OpLoad:
+                case spv::OpStore:
+                case spv::OpCompositeConstruct:
+                case spv::OpFunctionCall:
+                    ++opCounter[opCode];
+                    idCounter = 0;
+                    thisOpCode = opCode;
+                    break;
+                default:
+                    thisOpCode = spv::OpNop;
+                }
+
+                return false;
+            },
+
+            [&](spv::Id& id) {
+                if (thisOpCode != spv::OpNop) {
+                    ++idCounter;
+                    const std::uint32_t hashval = opCounter[thisOpCode] * thisOpCode * 50047 + idCounter + fnId * 117;
+
+                    if (isOldIdUnmapped(id))
+                        localId(id, nextUnusedId(hashval % softTypeIdLimit + firstMappedID));
+                }
+            });
+    }
+
+    // EXPERIMENTAL: forward IO and uniform load/stores into operands
+    // This produces invalid Schema-0 SPIRV
+    void spirvbin_t::forwardLoadStores()
+    {
+        idset_t fnLocalVars; // set of function local vars
+        idmap_t idMap;       // Map of load result IDs to what they load
+
+        // EXPERIMENTAL: Forward input and access chain loads into consumptions
+        process(
+            [&](spv::Op opCode, unsigned start) {
+                // Add inputs and uniforms to the map
+                if ((opCode == spv::OpVariable && asWordCount(start) == 4) &&
+                    (spv[start+3] == spv::StorageClassUniform ||
+                    spv[start+3] == spv::StorageClassUniformConstant ||
+                    spv[start+3] == spv::StorageClassInput))
+                    fnLocalVars.insert(asId(start+2));
+
+                if (opCode == spv::OpAccessChain && fnLocalVars.count(asId(start+3)) > 0)
+                    fnLocalVars.insert(asId(start+2));
+
+                if (opCode == spv::OpLoad && fnLocalVars.count(asId(start+3)) > 0) {
+                    idMap[asId(start+2)] = asId(start+3);
+                    stripInst(start);
+                }
+
+                return false;
+            },
+
+            [&](spv::Id& id) { if (idMap.find(id) != idMap.end()) id = idMap[id]; }
+        );
+
+        // EXPERIMENTAL: Implicit output stores
+        fnLocalVars.clear();
+        idMap.clear();
+
+        process(
+            [&](spv::Op opCode, unsigned start) {
+                // Add inputs and uniforms to the map
+                if ((opCode == spv::OpVariable && asWordCount(start) == 4) &&
+                    (spv[start+3] == spv::StorageClassOutput))
+                    fnLocalVars.insert(asId(start+2));
+
+                if (opCode == spv::OpStore && fnLocalVars.count(asId(start+1)) > 0) {
+                    idMap[asId(start+2)] = asId(start+1);
+                    stripInst(start);
+                }
+
+                return false;
+            },
+            op_fn_nop);
+
+        process(
+            inst_fn_nop,
+            [&](spv::Id& id) { if (idMap.find(id) != idMap.end()) id = idMap[id]; }
+        );
+
+        strip();          // strip out data we decided to eliminate
+    }
+
+    // optimize loads and stores
+    void spirvbin_t::optLoadStore()
+    {
+        idset_t    fnLocalVars;  // candidates for removal (only locals)
+        idmap_t    idMap;        // Map of load result IDs to what they load
+        blockmap_t blockMap;     // Map of IDs to blocks they first appear in
+        int        blockNum = 0; // block count, to avoid crossing flow control
+
+        // Find all the function local pointers stored at most once, and not via access chains
+        process(
+            [&](spv::Op opCode, unsigned start) {
+                const int wordCount = asWordCount(start);
+
+                // Count blocks, so we can avoid crossing flow control
+                if (isFlowCtrl(opCode))
+                    ++blockNum;
+
+                // Add local variables to the map
+                if ((opCode == spv::OpVariable && spv[start+3] == spv::StorageClassFunction && asWordCount(start) == 4)) {
+                    fnLocalVars.insert(asId(start+2));
+                    return true;
+                }
+
+                // Ignore process vars referenced via access chain
+                if ((opCode == spv::OpAccessChain || opCode == spv::OpInBoundsAccessChain) && fnLocalVars.count(asId(start+3)) > 0) {
+                    fnLocalVars.erase(asId(start+3));
+                    idMap.erase(asId(start+3));
+                    return true;
+                }
+
+                if (opCode == spv::OpLoad && fnLocalVars.count(asId(start+3)) > 0) {
+                    const spv::Id varId = asId(start+3);
+
+                    // Avoid loads before stores
+                    if (idMap.find(varId) == idMap.end()) {
+                        fnLocalVars.erase(varId);
+                        idMap.erase(varId);
+                    }
+
+                    // don't do for volatile references
+                    if (wordCount > 4 && (spv[start+4] & spv::MemoryAccessVolatileMask)) {
+                        fnLocalVars.erase(varId);
+                        idMap.erase(varId);
+                    }
+
+                    // Handle flow control
+                    if (blockMap.find(varId) == blockMap.end()) {
+                        blockMap[varId] = blockNum;  // track block we found it in.
+                    } else if (blockMap[varId] != blockNum) {
+                        fnLocalVars.erase(varId);  // Ignore if crosses flow control
+                        idMap.erase(varId);
+                    }
+
+                    return true;
+                }
+
+                if (opCode == spv::OpStore && fnLocalVars.count(asId(start+1)) > 0) {
+                    const spv::Id varId = asId(start+1);
+
+                    if (idMap.find(varId) == idMap.end()) {
+                        idMap[varId] = asId(start+2);
+                    } else {
+                        // Remove if it has more than one store to the same pointer
+                        fnLocalVars.erase(varId);
+                        idMap.erase(varId);
+                    }
+
+                    // don't do for volatile references
+                    if (wordCount > 3 && (spv[start+3] & spv::MemoryAccessVolatileMask)) {
+                        fnLocalVars.erase(asId(start+3));
+                        idMap.erase(asId(start+3));
+                    }
+
+                    // Handle flow control
+                    if (blockMap.find(varId) == blockMap.end()) {
+                        blockMap[varId] = blockNum;  // track block we found it in.
+                    } else if (blockMap[varId] != blockNum) {
+                        fnLocalVars.erase(varId);  // Ignore if crosses flow control
+                        idMap.erase(varId);
+                    }
+
+                    return true;
+                }
+
+                return false;
+            },
+
+            // If local var id used anywhere else, don't eliminate
+            [&](spv::Id& id) { 
+                if (fnLocalVars.count(id) > 0) {
+                    fnLocalVars.erase(id);
+                    idMap.erase(id);
+                }
+            }
+        );
+
+        process(
+            [&](spv::Op opCode, unsigned start) {
+                if (opCode == spv::OpLoad && fnLocalVars.count(asId(start+3)) > 0)
+                    idMap[asId(start+2)] = idMap[asId(start+3)];
+                return false;
+            },
+            op_fn_nop);
+
+        // Chase replacements to their origins, in case there is a chain such as:
+        //   2 = store 1
+        //   3 = load 2
+        //   4 = store 3
+        //   5 = load 4
+        // We want to replace uses of 5 with 1.
+        for (const auto& idPair : idMap) {
+            spv::Id id = idPair.first;
+            while (idMap.find(id) != idMap.end())  // Chase to end of chain
+                id = idMap[id];
+
+            idMap[idPair.first] = id;              // replace with final result
+        }
+
+        // Remove the load/store/variables for the ones we've discovered
+        process(
+            [&](spv::Op opCode, unsigned start) {
+                if ((opCode == spv::OpLoad  && fnLocalVars.count(asId(start+3)) > 0) ||
+                    (opCode == spv::OpStore && fnLocalVars.count(asId(start+1)) > 0) ||
+                    (opCode == spv::OpVariable && fnLocalVars.count(asId(start+2)) > 0)) {
+
+                    stripInst(start);
+                    return true;
+                }
+
+                return false;
+            },
+
+            [&](spv::Id& id) {
+                if (idMap.find(id) != idMap.end()) id = idMap[id];
+            }
+        );
+
+        strip();          // strip out data we decided to eliminate
+    }
+
+    // remove bodies of uncalled functions
+    void spirvbin_t::dceFuncs()
+    {
+        msg(3, 2, std::string("Removing Dead Functions: "));
+
+        // TODO: There are more efficient ways to do this.
+        bool changed = true;
+
+        while (changed) {
+            changed = false;
+
+            for (auto fn = fnPos.begin(); fn != fnPos.end(); ) {
+                if (fn->first == entryPoint) { // don't DCE away the entry point!
+                    ++fn;
+                    continue;
+                }
+
+                const auto call_it = fnCalls.find(fn->first);
+
+                if (call_it == fnCalls.end() || call_it->second == 0) {
+                    changed = true;
+                    stripRange.push_back(fn->second);
+                    fnPosDCE.insert(*fn);
+
+                    // decrease counts of called functions
+                    process(
+                        [&](spv::Op opCode, unsigned start) {
+                            if (opCode == spv::Op::OpFunctionCall) {
+                                const auto call_it = fnCalls.find(asId(start + 3));
+                                if (call_it != fnCalls.end()) {
+                                    if (--call_it->second <= 0)
+                                        fnCalls.erase(call_it);
+                                }
+                            }
+
+                            return true;
+                        },
+                        op_fn_nop,
+                        fn->second.first,
+                        fn->second.second);
+
+                    fn = fnPos.erase(fn);
+                } else ++fn;
+            }
+        }
+    }
+
+    // remove unused function variables + decorations
+    void spirvbin_t::dceVars()
+    {
+        msg(3, 2, std::string("DCE Vars: "));
+
+        std::unordered_map<spv::Id, int> varUseCount;
+
+        // Count function variable use
+        process(
+            [&](spv::Op opCode, unsigned start) {
+                if (opCode == spv::OpVariable) {
+                    ++varUseCount[asId(start+2)];
+                    return true;
+                } else if (opCode == spv::OpEntryPoint) {
+                    const int wordCount = asWordCount(start);
+                    for (int i = 4; i < wordCount; i++) {
+                        ++varUseCount[asId(start+i)];
+                    }
+                    return true;
+                } else
+                    return false;
+            },
+
+            [&](spv::Id& id) { if (varUseCount[id]) ++varUseCount[id]; }
+        );
+
+        // Remove single-use function variables + associated decorations and names
+        process(
+            [&](spv::Op opCode, unsigned start) {
+                if ((opCode == spv::OpVariable && varUseCount[asId(start+2)] == 1)  ||
+                    (opCode == spv::OpDecorate && varUseCount[asId(start+1)] == 1)  ||
+                    (opCode == spv::OpName     && varUseCount[asId(start+1)] == 1)) {
+                        stripInst(start);
+                }
+                return true;
+            },
+            op_fn_nop);
+    }
+
+    // remove unused types
+    void spirvbin_t::dceTypes()
+    {
+        std::vector<bool> isType(bound(), false);
+
+        // for speed, make O(1) way to get to type query (map is log(n))
+        for (const auto typeStart : typeConstPos)
+            isType[asTypeConstId(typeStart)] = true;
+
+        std::unordered_map<spv::Id, int> typeUseCount;
+
+        // This is not the most efficient algorithm, but this is an offline tool, and
+        // it's easy to write this way.  Can be improved opportunistically if needed.
+        bool changed = true;
+        while (changed) {
+            changed = false;
+            strip();
+            typeUseCount.clear();
+
+            // Count total type usage
+            process(inst_fn_nop,
+                    [&](spv::Id& id) { if (isType[id]) ++typeUseCount[id]; }
+                    );
+
+            // Remove single reference types
+            for (const auto typeStart : typeConstPos) {
+                const spv::Id typeId = asTypeConstId(typeStart);
+                if (typeUseCount[typeId] == 1) {
+                    changed = true;
+                    --typeUseCount[typeId];
+                    stripInst(typeStart);
+                }
+            }
+        }
+    }
+
+
+#ifdef NOTDEF
+    bool spirvbin_t::matchType(const spirvbin_t::globaltypes_t& globalTypes, spv::Id lt, spv::Id gt) const
+    {
+        // Find the local type id "lt" and global type id "gt"
+        const auto lt_it = typeConstPosR.find(lt);
+        if (lt_it == typeConstPosR.end())
+            return false;
+
+        const auto typeStart = lt_it->second;
+
+        // Search for entry in global table
+        const auto gtype = globalTypes.find(gt);
+        if (gtype == globalTypes.end())
+            return false;
+
+        const auto& gdata = gtype->second;
+
+        // local wordcount and opcode
+        const int     wordCount   = asWordCount(typeStart);
+        const spv::Op opCode      = asOpCode(typeStart);
+
+        // no type match if opcodes don't match, or operand count doesn't match
+        if (opCode != opOpCode(gdata[0]) || wordCount != opWordCount(gdata[0]))
+            return false;
+
+        const unsigned numOperands = wordCount - 2; // all types have a result
+
+        const auto cmpIdRange = [&](range_t range) {
+            for (int x=range.first; x<std::min(range.second, wordCount); ++x)
+                if (!matchType(globalTypes, asId(typeStart+x), gdata[x]))
+                    return false;
+            return true;
+        };
+
+        const auto cmpConst   = [&]() { return cmpIdRange(constRange(opCode)); };
+        const auto cmpSubType = [&]() { return cmpIdRange(typeRange(opCode));  };
+
+        // Compare literals in range [start,end)
+        const auto cmpLiteral = [&]() {
+            const auto range = literalRange(opCode);
+            return std::equal(spir.begin() + typeStart + range.first,
+                spir.begin() + typeStart + std::min(range.second, wordCount),
+                gdata.begin() + range.first);
+        };
+
+        assert(isTypeOp(opCode) || isConstOp(opCode));
+
+        switch (opCode) {
+        case spv::OpTypeOpaque:       // TODO: disable until we compare the literal strings.
+        case spv::OpTypeQueue:        return false;
+        case spv::OpTypeEvent:        // fall through...
+        case spv::OpTypeDeviceEvent:  // ...
+        case spv::OpTypeReserveId:    return false;
+            // for samplers, we don't handle the optional parameters yet
+        case spv::OpTypeSampler:      return cmpLiteral() && cmpConst() && cmpSubType() && wordCount == 8;
+        default:                      return cmpLiteral() && cmpConst() && cmpSubType();
+        }
+    }
+
+
+    // Look for an equivalent type in the globalTypes map
+    spv::Id spirvbin_t::findType(const spirvbin_t::globaltypes_t& globalTypes, spv::Id lt) const
+    {
+        // Try a recursive type match on each in turn, and return a match if we find one
+        for (const auto& gt : globalTypes)
+            if (matchType(globalTypes, lt, gt.first))
+                return gt.first;
+
+        return spv::NoType;
+    }
+#endif // NOTDEF
+
+    // Return start position in SPV of given Id.  error if not found.
+    unsigned spirvbin_t::idPos(spv::Id id) const
+    {
+        const auto tid_it = idPosR.find(id);
+        if (tid_it == idPosR.end())
+            error("ID not found");
+
+        return tid_it->second;
+    }
+
+    // Hash types to canonical values.  This can return ID collisions (it's a bit
+    // inevitable): it's up to the caller to handle that gracefully.
+    std::uint32_t spirvbin_t::hashType(unsigned typeStart) const
+    {
+        const unsigned wordCount   = asWordCount(typeStart);
+        const spv::Op  opCode      = asOpCode(typeStart);
+
+        switch (opCode) {
+        case spv::OpTypeVoid:         return 0;
+        case spv::OpTypeBool:         return 1;
+        case spv::OpTypeInt:          return 3 + (spv[typeStart+3]);
+        case spv::OpTypeFloat:        return 5;
+        case spv::OpTypeVector:
+            return 6 + hashType(idPos(spv[typeStart+2])) * (spv[typeStart+3] - 1);
+        case spv::OpTypeMatrix:
+            return 30 + hashType(idPos(spv[typeStart+2])) * (spv[typeStart+3] - 1);
+        case spv::OpTypeImage:
+            return 120 + hashType(idPos(spv[typeStart+2])) +
+                spv[typeStart+3] +            // dimensionality
+                spv[typeStart+4] * 8 * 16 +   // depth
+                spv[typeStart+5] * 4 * 16 +   // arrayed
+                spv[typeStart+6] * 2 * 16 +   // multisampled
+                spv[typeStart+7] * 1 * 16;    // format
+        case spv::OpTypeSampler:
+            return 500;
+        case spv::OpTypeSampledImage:
+            return 502;
+        case spv::OpTypeArray:
+            return 501 + hashType(idPos(spv[typeStart+2])) * spv[typeStart+3];
+        case spv::OpTypeRuntimeArray:
+            return 5000  + hashType(idPos(spv[typeStart+2]));
+        case spv::OpTypeStruct:
+            {
+                std::uint32_t hash = 10000;
+                for (unsigned w=2; w < wordCount; ++w)
+                    hash += w * hashType(idPos(spv[typeStart+w]));
+                return hash;
+            }
+
+        case spv::OpTypeOpaque:         return 6000 + spv[typeStart+2];
+        case spv::OpTypePointer:        return 100000  + hashType(idPos(spv[typeStart+3]));
+        case spv::OpTypeFunction:
+            {
+                std::uint32_t hash = 200000;
+                for (unsigned w=2; w < wordCount; ++w)
+                    hash += w * hashType(idPos(spv[typeStart+w]));
+                return hash;
+            }
+
+        case spv::OpTypeEvent:           return 300000;
+        case spv::OpTypeDeviceEvent:     return 300001;
+        case spv::OpTypeReserveId:       return 300002;
+        case spv::OpTypeQueue:           return 300003;
+        case spv::OpTypePipe:            return 300004;
+
+        case spv::OpConstantNull:        return 300005;
+        case spv::OpConstantSampler:     return 300006;
+
+        case spv::OpConstantTrue:        return 300007;
+        case spv::OpConstantFalse:       return 300008;
+        case spv::OpConstantComposite:
+            {
+                std::uint32_t hash = 300011 + hashType(idPos(spv[typeStart+1]));
+                for (unsigned w=3; w < wordCount; ++w)
+                    hash += w * hashType(idPos(spv[typeStart+w]));
+                return hash;
+            }
+        case spv::OpConstant:
+            {
+                std::uint32_t hash = 400011 + hashType(idPos(spv[typeStart+1]));
+                for (unsigned w=3; w < wordCount; ++w)
+                    hash += w * spv[typeStart+w];
+                return hash;
+            }
+
+        default:
+            error("unknown type opcode");
+            return 0;
+        }
+    }
+
+    void spirvbin_t::mapTypeConst()
+    {
+        globaltypes_t globalTypeMap;
+
+        msg(3, 2, std::string("Remapping Consts & Types: "));
+
+        static const std::uint32_t softTypeIdLimit = 3011; // small prime.  TODO: get from options
+        static const std::uint32_t firstMappedID   = 8;    // offset into ID space
+
+        for (auto& typeStart : typeConstPos) {
+            const spv::Id       resId     = asTypeConstId(typeStart);
+            const std::uint32_t hashval   = hashType(typeStart);
+
+            if (isOldIdUnmapped(resId))
+                localId(resId, nextUnusedId(hashval % softTypeIdLimit + firstMappedID));
+        }
+    }
+
+
+    // Strip a single binary by removing ranges given in stripRange
+    void spirvbin_t::strip()
+    {
+        if (stripRange.empty()) // nothing to do
+            return;
+
+        // Sort strip ranges in order of traversal
+        std::sort(stripRange.begin(), stripRange.end());
+
+        // Allocate a new binary big enough to hold old binary
+        // We'll step this iterator through the strip ranges as we go through the binary
+        auto strip_it = stripRange.begin();
+
+        int strippedPos = 0;
+        for (unsigned word = 0; word < unsigned(spv.size()); ++word) {
+            if (strip_it != stripRange.end() && word >= strip_it->second)
+                ++strip_it;
+
+            if (strip_it == stripRange.end() || word < strip_it->first || word >= strip_it->second)
+                spv[strippedPos++] = spv[word];
+        }
+
+        spv.resize(strippedPos);
+        stripRange.clear();
+
+        buildLocalMaps();
+    }
+
+    // Strip a single binary by removing ranges given in stripRange
+    void spirvbin_t::remap(std::uint32_t opts)
+    {
+        options = opts;
+
+        // Set up opcode tables from SpvDoc
+        spv::Parameterize();
+
+        validate();  // validate header
+        buildLocalMaps();
+
+        msg(3, 4, std::string("ID bound: ") + std::to_string(bound()));
+
+        strip();        // strip out data we decided to eliminate
+        if (options & OPT_LOADSTORE) optLoadStore();
+        if (options & OPT_FWD_LS)    forwardLoadStores();
+        if (options & DCE_FUNCS)     dceFuncs();
+        if (options & DCE_VARS)      dceVars();
+        if (options & DCE_TYPES)     dceTypes();
+        strip();        // strip out data we decided to eliminate
+
+        if (options & MAP_TYPES)     mapTypeConst();
+        if (options & MAP_NAMES)     mapNames();
+        if (options & MAP_FUNCS)     mapFnBodies();
+
+        mapRemainder(); // map any unmapped IDs
+        applyMap();     // Now remap each shader to the new IDs we've come up with
+    }
+
+    // remap from a memory image
+    void spirvbin_t::remap(std::vector<std::uint32_t>& in_spv, std::uint32_t opts)
+    {
+        spv.swap(in_spv);
+        remap(opts);
+        spv.swap(in_spv);
+    }
+
+} // namespace SPV
+
+#endif // defined (use_cpp11)
+

+ 295 - 0
3rdparty/glslang/SPIRV/SPVRemapper.h

@@ -0,0 +1,295 @@
+//
+//Copyright (C) 2015 LunarG, Inc.
+//
+//All rights reserved.
+//
+//Redistribution and use in source and binary forms, with or without
+//modification, are permitted provided that the following conditions
+//are met:
+//
+//    Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+//
+//    Redistributions in binary form must reproduce the above
+//    copyright notice, this list of conditions and the following
+//    disclaimer in the documentation and/or other materials provided
+//    with the distribution.
+//
+//    Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+//    contributors may be used to endorse or promote products derived
+//    from this software without specific prior written permission.
+//
+//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+//"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+//LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+//FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+//COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+//INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+//BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+//CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+//LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+//ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+//POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef SPIRVREMAPPER_H
+#define SPIRVREMAPPER_H
+
+#include <string>
+#include <vector>
+#include <cstdlib>
+
+namespace spv {
+
+// MSVC defines __cplusplus as an older value, even when it supports almost all of 11.
+// We handle that here by making our own symbol.
+#if __cplusplus >= 201103L || _MSC_VER >= 1700
+#   define use_cpp11 1
+#endif
+
+class spirvbin_base_t
+{
+public:
+   enum Options {
+      NONE          = 0,
+      STRIP         = (1<<0),
+      MAP_TYPES     = (1<<1),
+      MAP_NAMES     = (1<<2),
+      MAP_FUNCS     = (1<<3),
+      DCE_FUNCS     = (1<<4),
+      DCE_VARS      = (1<<5),
+      DCE_TYPES     = (1<<6),
+      OPT_LOADSTORE = (1<<7),
+      OPT_FWD_LS    = (1<<8), // EXPERIMENTAL: PRODUCES INVALID SCHEMA-0 SPIRV
+      MAP_ALL       = (MAP_TYPES | MAP_NAMES | MAP_FUNCS),
+      DCE_ALL       = (DCE_FUNCS | DCE_VARS | DCE_TYPES),
+      OPT_ALL       = (OPT_LOADSTORE),
+
+      ALL_BUT_STRIP = (MAP_ALL | DCE_ALL | OPT_ALL),
+      DO_EVERYTHING = (STRIP | ALL_BUT_STRIP)
+   };
+};
+
+} // namespace SPV
+
+#if !defined (use_cpp11)
+#include <cstdio>
+#include <cstdint>
+
+namespace spv {
+class spirvbin_t : public spirvbin_base_t
+{
+public:
+    spirvbin_t(int /*verbose = 0*/) { }
+
+    void remap(std::vector<std::uint32_t>& /*spv*/, unsigned int /*opts = 0*/)
+    {
+        printf("Tool not compiled for C++11, which is required for SPIR-V remapping.\n");
+        exit(5);
+    }
+};
+
+} // namespace SPV
+
+#else // defined (use_cpp11)
+
+#include <functional>
+#include <cstdint>
+#include <unordered_map>
+#include <unordered_set>
+#include <map>
+#include <set>
+#include <cassert>
+
+#include "spirv.hpp"
+#include "spvIR.h"
+
+namespace spv {
+
+// class to hold SPIR-V binary data for remapping, DCE, and debug stripping
+class spirvbin_t : public spirvbin_base_t
+{
+public:
+   spirvbin_t(int verbose = 0) : entryPoint(spv::NoResult), largestNewId(0), verbose(verbose) { }
+   
+   // remap on an existing binary in memory
+   void remap(std::vector<std::uint32_t>& spv, std::uint32_t opts = DO_EVERYTHING);
+
+   // Type for error/log handler functions
+   typedef std::function<void(const std::string&)> errorfn_t;
+   typedef std::function<void(const std::string&)> logfn_t;
+
+   // Register error/log handling functions (can be lambda fn / functor / etc)
+   static void registerErrorHandler(errorfn_t handler) { errorHandler = handler; }
+   static void registerLogHandler(logfn_t handler)     { logHandler   = handler; }
+
+protected:
+   // This can be overridden to provide other message behavior if needed
+   virtual void msg(int minVerbosity, int indent, const std::string& txt) const;
+
+private:
+   // Local to global, or global to local ID map
+   typedef std::unordered_map<spv::Id, spv::Id> idmap_t;
+   typedef std::unordered_set<spv::Id>          idset_t;
+   typedef std::unordered_map<spv::Id, int>     blockmap_t;
+
+   void remap(std::uint32_t opts = DO_EVERYTHING);
+
+   // Map of names to IDs
+   typedef std::unordered_map<std::string, spv::Id> namemap_t;
+
+   typedef std::uint32_t spirword_t;
+
+   typedef std::pair<unsigned, unsigned> range_t;
+   typedef std::function<void(spv::Id&)>                idfn_t;
+   typedef std::function<bool(spv::Op, unsigned start)> instfn_t;
+
+   // Special Values for ID map:
+   static const spv::Id unmapped;     // unchanged from default value
+   static const spv::Id unused;       // unused ID
+   static const int     header_size;  // SPIR header = 5 words
+
+   class id_iterator_t;
+
+   // For mapping type entries between different shaders
+   typedef std::vector<spirword_t>        typeentry_t;
+   typedef std::map<spv::Id, typeentry_t> globaltypes_t;
+
+   // A set that preserves position order, and a reverse map
+   typedef std::set<int>                    posmap_t;
+   typedef std::unordered_map<spv::Id, int> posmap_rev_t;
+
+   // Maps and ID to the size of its base type, if known.
+   typedef std::unordered_map<spv::Id, unsigned> typesize_map_t;
+
+   // handle error
+   void error(const std::string& txt) const { errorHandler(txt); }
+
+   bool     isConstOp(spv::Op opCode)      const;
+   bool     isTypeOp(spv::Op opCode)       const;
+   bool     isStripOp(spv::Op opCode)      const;
+   bool     isFlowCtrl(spv::Op opCode)     const;
+   range_t  literalRange(spv::Op opCode)   const;
+   range_t  typeRange(spv::Op opCode)      const;
+   range_t  constRange(spv::Op opCode)     const;
+   unsigned typeSizeInWords(spv::Id id)    const;
+   unsigned idTypeSizeInWords(spv::Id id)  const;
+   
+   spv::Id&        asId(unsigned word)                { return spv[word]; }
+   const spv::Id&  asId(unsigned word)          const { return spv[word]; }
+   spv::Op         asOpCode(unsigned word)      const { return opOpCode(spv[word]); }
+   std::uint32_t   asOpCodeHash(unsigned word);
+   spv::Decoration asDecoration(unsigned word)  const { return spv::Decoration(spv[word]); }
+   unsigned        asWordCount(unsigned word)   const { return opWordCount(spv[word]); }
+   spv::Id         asTypeConstId(unsigned word) const { return asId(word + (isTypeOp(asOpCode(word)) ? 1 : 2)); }
+   unsigned        idPos(spv::Id id)            const;
+
+   static unsigned opWordCount(spirword_t data) { return data >> spv::WordCountShift; }
+   static spv::Op  opOpCode(spirword_t data)    { return spv::Op(data & spv::OpCodeMask); }
+
+   // Header access & set methods
+   spirword_t  magic()    const       { return spv[0]; } // return magic number
+   spirword_t  bound()    const       { return spv[3]; } // return Id bound from header
+   spirword_t  bound(spirword_t b)    { return spv[3] = b; };
+   spirword_t  genmagic() const       { return spv[2]; } // generator magic
+   spirword_t  genmagic(spirword_t m) { return spv[2] = m; }
+   spirword_t  schemaNum() const      { return spv[4]; } // schema number from header
+
+   // Mapping fns: get
+   spv::Id     localId(spv::Id id) const { return idMapL[id]; }
+
+   // Mapping fns: set
+   inline spv::Id   localId(spv::Id id, spv::Id newId);
+   void             countIds(spv::Id id);
+
+   // Return next unused new local ID.
+   // NOTE: boost::dynamic_bitset would be more efficient due to find_next(),
+   // which std::vector<bool> doens't have.
+   inline spv::Id   nextUnusedId(spv::Id id);
+
+   void buildLocalMaps();
+   std::string literalString(unsigned word) const; // Return literal as a std::string
+   int literalStringWords(const std::string& str) const { return (int(str.size())+4)/4; }
+
+   bool isNewIdMapped(spv::Id newId)   const { return isMapped(newId);            }
+   bool isOldIdUnmapped(spv::Id oldId) const { return localId(oldId) == unmapped; }
+   bool isOldIdUnused(spv::Id oldId)   const { return localId(oldId) == unused;   }
+   bool isOldIdMapped(spv::Id oldId)   const { return !isOldIdUnused(oldId) && !isOldIdUnmapped(oldId); }
+   bool isFunction(spv::Id oldId)      const { return fnPos.find(oldId) != fnPos.end(); }
+
+   // bool    matchType(const globaltypes_t& globalTypes, spv::Id lt, spv::Id gt) const;
+   // spv::Id findType(const globaltypes_t& globalTypes, spv::Id lt) const;
+   std::uint32_t hashType(unsigned typeStart) const;
+
+   spirvbin_t& process(instfn_t, idfn_t, unsigned begin = 0, unsigned end = 0);
+   int         processInstruction(unsigned word, instfn_t, idfn_t);
+
+   void        validate() const;
+   void        mapTypeConst();
+   void        mapFnBodies();
+   void        optLoadStore();
+   void        dceFuncs();
+   void        dceVars();
+   void        dceTypes();
+   void        mapNames();
+   void        foldIds();  // fold IDs to smallest space
+   void        forwardLoadStores(); // load store forwarding (EXPERIMENTAL)
+   void        offsetIds(); // create relative offset IDs
+
+   void        applyMap();            // remap per local name map
+   void        mapRemainder();        // map any IDs we haven't touched yet
+   void        stripDebug();          // strip debug info
+   void        strip();               // remove debug symbols
+   
+   std::vector<spirword_t> spv;      // SPIR words
+
+   namemap_t               nameMap;  // ID names from OpName
+
+   // Since we want to also do binary ops, we can't use std::vector<bool>.  we could use
+   // boost::dynamic_bitset, but we're trying to avoid a boost dependency.
+   typedef std::uint64_t bits_t;
+   std::vector<bits_t> mapped; // which new IDs have been mapped
+   static const int mBits = sizeof(bits_t) * 4;
+
+   bool isMapped(spv::Id id) const  { return id < maxMappedId() && ((mapped[id/mBits] & (1LL<<(id%mBits))) != 0); }
+   void setMapped(spv::Id id) { resizeMapped(id); mapped[id/mBits] |= (1LL<<(id%mBits)); }
+   void resizeMapped(spv::Id id) { if (id >= maxMappedId()) mapped.resize(id/mBits+1, 0); }
+   size_t maxMappedId() const { return mapped.size() * mBits; }
+
+   // Add a strip range for a given instruction starting at 'start'
+   // Note: avoiding brace initializers to please older versions os MSVC.
+   void stripInst(unsigned start) { stripRange.push_back(range_t(start, start + asWordCount(start))); }
+
+   // Function start and end.  use unordered_map because we'll have
+   // many fewer functions than IDs.
+   std::unordered_map<spv::Id, range_t> fnPos;
+   std::unordered_map<spv::Id, range_t> fnPosDCE; // deleted functions
+
+   // Which functions are called, anywhere in the module, with a call count
+   std::unordered_map<spv::Id, int> fnCalls;
+   
+   posmap_t       typeConstPos;  // word positions that define types & consts (ordered)
+   posmap_rev_t   idPosR;        // reverse map from IDs to positions
+   typesize_map_t idTypeSizeMap; // maps each ID to its type size, if known.
+   
+   std::vector<spv::Id>  idMapL;   // ID {M}ap from {L}ocal to {G}lobal IDs
+
+   spv::Id entryPoint;      // module entry point
+   spv::Id largestNewId;    // biggest new ID we have mapped anything to
+
+   // Sections of the binary to strip, given as [begin,end)
+   std::vector<range_t> stripRange;
+
+   // processing options:
+   std::uint32_t options;
+   int           verbose;     // verbosity level
+
+   static errorfn_t errorHandler;
+   static logfn_t   logHandler;
+};
+
+} // namespace SPV
+
+#endif // defined (use_cpp11)
+#endif // SPIRVREMAPPER_H

+ 2542 - 0
3rdparty/glslang/SPIRV/SpvBuilder.cpp

@@ -0,0 +1,2542 @@
+//
+//Copyright (C) 2014-2015 LunarG, Inc.
+//Copyright (C) 2015-2016 Google, Inc.
+//
+//All rights reserved.
+//
+//Redistribution and use in source and binary forms, with or without
+//modification, are permitted provided that the following conditions
+//are met:
+//
+//    Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+//
+//    Redistributions in binary form must reproduce the above
+//    copyright notice, this list of conditions and the following
+//    disclaimer in the documentation and/or other materials provided
+//    with the distribution.
+//
+//    Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+//    contributors may be used to endorse or promote products derived
+//    from this software without specific prior written permission.
+//
+//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+//"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+//LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+//FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+//COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+//INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+//BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+//CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+//LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+//ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+//POSSIBILITY OF SUCH DAMAGE.
+
+//
+// Helper for making SPIR-V IR.  Generally, this is documented in the header
+// SpvBuilder.h.
+//
+
+#include <cassert>
+#include <cstdlib>
+
+#include <unordered_set>
+#include <algorithm>
+
+#include "SpvBuilder.h"
+
+#ifdef AMD_EXTENSIONS
+    #include "hex_float.h"
+#endif
+
+#ifndef _WIN32
+    #include <cstdio>
+#endif
+
+namespace spv {
+
+Builder::Builder(unsigned int magicNumber, SpvBuildLogger* buildLogger) :
+    source(SourceLanguageUnknown),
+    sourceVersion(0),
+    addressModel(AddressingModelLogical),
+    memoryModel(MemoryModelGLSL450),
+    builderNumber(magicNumber),
+    buildPoint(0),
+    uniqueId(0),
+    mainFunction(0),
+    generatingOpCodeForSpecConst(false),
+    logger(buildLogger)
+{
+    clearAccessChain();
+}
+
+Builder::~Builder()
+{
+}
+
+Id Builder::import(const char* name)
+{
+    Instruction* import = new Instruction(getUniqueId(), NoType, OpExtInstImport);
+    import->addStringOperand(name);
+    
+    imports.push_back(std::unique_ptr<Instruction>(import));
+    return import->getResultId();
+}
+
+// For creating new groupedTypes (will return old type if the requested one was already made).
+Id Builder::makeVoidType()
+{
+    Instruction* type;
+    if (groupedTypes[OpTypeVoid].size() == 0) {
+        type = new Instruction(getUniqueId(), NoType, OpTypeVoid);
+        groupedTypes[OpTypeVoid].push_back(type);
+        constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+        module.mapInstruction(type);
+    } else
+        type = groupedTypes[OpTypeVoid].back();
+
+    return type->getResultId();
+}
+
+Id Builder::makeBoolType()
+{
+    Instruction* type;
+    if (groupedTypes[OpTypeBool].size() == 0) {
+        type = new Instruction(getUniqueId(), NoType, OpTypeBool);
+        groupedTypes[OpTypeBool].push_back(type);
+        constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+        module.mapInstruction(type);
+    } else
+        type = groupedTypes[OpTypeBool].back();
+
+    return type->getResultId();
+}
+
+Id Builder::makeSamplerType()
+{
+    Instruction* type;
+    if (groupedTypes[OpTypeSampler].size() == 0) {
+        type = new Instruction(getUniqueId(), NoType, OpTypeSampler);
+        groupedTypes[OpTypeSampler].push_back(type);
+        constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+        module.mapInstruction(type);
+    } else
+        type = groupedTypes[OpTypeSampler].back();
+
+    return type->getResultId();
+}
+
+Id Builder::makePointer(StorageClass storageClass, Id pointee)
+{
+    // try to find it
+    Instruction* type;
+    for (int t = 0; t < (int)groupedTypes[OpTypePointer].size(); ++t) {
+        type = groupedTypes[OpTypePointer][t];
+        if (type->getImmediateOperand(0) == (unsigned)storageClass &&
+            type->getIdOperand(1) == pointee)
+            return type->getResultId();
+    }
+
+    // not found, make it
+    type = new Instruction(getUniqueId(), NoType, OpTypePointer);
+    type->addImmediateOperand(storageClass);
+    type->addIdOperand(pointee);
+    groupedTypes[OpTypePointer].push_back(type);
+    constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+    module.mapInstruction(type);
+
+    return type->getResultId();
+}
+
+Id Builder::makeIntegerType(int width, bool hasSign)
+{
+    // try to find it
+    Instruction* type;
+    for (int t = 0; t < (int)groupedTypes[OpTypeInt].size(); ++t) {
+        type = groupedTypes[OpTypeInt][t];
+        if (type->getImmediateOperand(0) == (unsigned)width &&
+            type->getImmediateOperand(1) == (hasSign ? 1u : 0u))
+            return type->getResultId();
+    }
+
+    // not found, make it
+    type = new Instruction(getUniqueId(), NoType, OpTypeInt);
+    type->addImmediateOperand(width);
+    type->addImmediateOperand(hasSign ? 1 : 0);
+    groupedTypes[OpTypeInt].push_back(type);
+    constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+    module.mapInstruction(type);
+
+    // deal with capabilities
+    switch (width) {
+    case 16:
+        addCapability(CapabilityInt16);
+        break;
+    case 64:
+        addCapability(CapabilityInt64);
+        break;
+    default:
+        break;
+    }
+
+    return type->getResultId();
+}
+
+Id Builder::makeFloatType(int width)
+{
+    // try to find it
+    Instruction* type;
+    for (int t = 0; t < (int)groupedTypes[OpTypeFloat].size(); ++t) {
+        type = groupedTypes[OpTypeFloat][t];
+        if (type->getImmediateOperand(0) == (unsigned)width)
+            return type->getResultId();
+    }
+
+    // not found, make it
+    type = new Instruction(getUniqueId(), NoType, OpTypeFloat);
+    type->addImmediateOperand(width);
+    groupedTypes[OpTypeFloat].push_back(type);
+    constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+    module.mapInstruction(type);
+
+    // deal with capabilities
+    switch (width) {
+    case 16:
+        addCapability(CapabilityFloat16);
+        break;
+    case 64:
+        addCapability(CapabilityFloat64);
+        break;
+    default:
+        break;
+    }
+
+    return type->getResultId();
+}
+
+// Make a struct without checking for duplication.
+// See makeStructResultType() for non-decorated structs
+// needed as the result of some instructions, which does
+// check for duplicates.
+Id Builder::makeStructType(const std::vector<Id>& members, const char* name)
+{
+    // Don't look for previous one, because in the general case,
+    // structs can be duplicated except for decorations.
+
+    // not found, make it
+    Instruction* type = new Instruction(getUniqueId(), NoType, OpTypeStruct);
+    for (int op = 0; op < (int)members.size(); ++op)
+        type->addIdOperand(members[op]);
+    groupedTypes[OpTypeStruct].push_back(type);
+    constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+    module.mapInstruction(type);
+    addName(type->getResultId(), name);
+
+    return type->getResultId();
+}
+
+// Make a struct for the simple results of several instructions,
+// checking for duplication.
+Id Builder::makeStructResultType(Id type0, Id type1)
+{
+    // try to find it
+    Instruction* type;
+    for (int t = 0; t < (int)groupedTypes[OpTypeStruct].size(); ++t) {
+        type = groupedTypes[OpTypeStruct][t];
+        if (type->getNumOperands() != 2)
+            continue;
+        if (type->getIdOperand(0) != type0 || 
+            type->getIdOperand(1) != type1)
+            continue;
+        return type->getResultId();
+    }
+
+    // not found, make it
+    std::vector<spv::Id> members;
+    members.push_back(type0);
+    members.push_back(type1);
+
+    return makeStructType(members, "ResType");
+}
+
+Id Builder::makeVectorType(Id component, int size)
+{
+    // try to find it
+    Instruction* type;
+    for (int t = 0; t < (int)groupedTypes[OpTypeVector].size(); ++t) {
+        type = groupedTypes[OpTypeVector][t];
+        if (type->getIdOperand(0) == component &&
+            type->getImmediateOperand(1) == (unsigned)size)
+            return type->getResultId();
+    }
+
+    // not found, make it
+    type = new Instruction(getUniqueId(), NoType, OpTypeVector);
+    type->addIdOperand(component);
+    type->addImmediateOperand(size);
+    groupedTypes[OpTypeVector].push_back(type);
+    constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+    module.mapInstruction(type);
+
+    return type->getResultId();
+}
+
+Id Builder::makeMatrixType(Id component, int cols, int rows)
+{
+    assert(cols <= maxMatrixSize && rows <= maxMatrixSize);
+
+    Id column = makeVectorType(component, rows);
+
+    // try to find it
+    Instruction* type;
+    for (int t = 0; t < (int)groupedTypes[OpTypeMatrix].size(); ++t) {
+        type = groupedTypes[OpTypeMatrix][t];
+        if (type->getIdOperand(0) == column &&
+            type->getImmediateOperand(1) == (unsigned)cols)
+            return type->getResultId();
+    }
+
+    // not found, make it
+    type = new Instruction(getUniqueId(), NoType, OpTypeMatrix);
+    type->addIdOperand(column);
+    type->addImmediateOperand(cols);
+    groupedTypes[OpTypeMatrix].push_back(type);
+    constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+    module.mapInstruction(type);
+
+    return type->getResultId();
+}
+
+// TODO: performance: track arrays per stride
+// If a stride is supplied (non-zero) make an array.
+// If no stride (0), reuse previous array types.
+// 'size' is an Id of a constant or specialization constant of the array size
+Id Builder::makeArrayType(Id element, Id sizeId, int stride)
+{
+    Instruction* type;
+    if (stride == 0) {
+        // try to find existing type
+        for (int t = 0; t < (int)groupedTypes[OpTypeArray].size(); ++t) {
+            type = groupedTypes[OpTypeArray][t];
+            if (type->getIdOperand(0) == element &&
+                type->getIdOperand(1) == sizeId)
+                return type->getResultId();
+        }
+    }
+
+    // not found, make it
+    type = new Instruction(getUniqueId(), NoType, OpTypeArray);
+    type->addIdOperand(element);
+    type->addIdOperand(sizeId);
+    groupedTypes[OpTypeArray].push_back(type);
+    constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+    module.mapInstruction(type);
+
+    return type->getResultId();
+}
+
+Id Builder::makeRuntimeArray(Id element)
+{
+    Instruction* type = new Instruction(getUniqueId(), NoType, OpTypeRuntimeArray);
+    type->addIdOperand(element);
+    constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+    module.mapInstruction(type);
+
+    return type->getResultId();
+}
+
+Id Builder::makeFunctionType(Id returnType, const std::vector<Id>& paramTypes)
+{
+    // try to find it
+    Instruction* type;
+    for (int t = 0; t < (int)groupedTypes[OpTypeFunction].size(); ++t) {
+        type = groupedTypes[OpTypeFunction][t];
+        if (type->getIdOperand(0) != returnType || (int)paramTypes.size() != type->getNumOperands() - 1)
+            continue;
+        bool mismatch = false;
+        for (int p = 0; p < (int)paramTypes.size(); ++p) {
+            if (paramTypes[p] != type->getIdOperand(p + 1)) {
+                mismatch = true;
+                break;
+            }
+        }
+        if (! mismatch)
+            return type->getResultId();
+    }
+
+    // not found, make it
+    type = new Instruction(getUniqueId(), NoType, OpTypeFunction);
+    type->addIdOperand(returnType);
+    for (int p = 0; p < (int)paramTypes.size(); ++p)
+        type->addIdOperand(paramTypes[p]);
+    groupedTypes[OpTypeFunction].push_back(type);
+    constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+    module.mapInstruction(type);
+
+    return type->getResultId();
+}
+
+Id Builder::makeImageType(Id sampledType, Dim dim, bool depth, bool arrayed, bool ms, unsigned sampled, ImageFormat format)
+{
+    // try to find it
+    Instruction* type;
+    for (int t = 0; t < (int)groupedTypes[OpTypeImage].size(); ++t) {
+        type = groupedTypes[OpTypeImage][t];
+        if (type->getIdOperand(0) == sampledType &&
+            type->getImmediateOperand(1) == (unsigned int)dim &&
+            type->getImmediateOperand(2) == (  depth ? 1u : 0u) &&
+            type->getImmediateOperand(3) == (arrayed ? 1u : 0u) &&
+            type->getImmediateOperand(4) == (     ms ? 1u : 0u) &&
+            type->getImmediateOperand(5) == sampled &&
+            type->getImmediateOperand(6) == (unsigned int)format)
+            return type->getResultId();
+    }
+
+    // not found, make it
+    type = new Instruction(getUniqueId(), NoType, OpTypeImage);
+    type->addIdOperand(sampledType);
+    type->addImmediateOperand(   dim);
+    type->addImmediateOperand(  depth ? 1 : 0);
+    type->addImmediateOperand(arrayed ? 1 : 0);
+    type->addImmediateOperand(     ms ? 1 : 0);
+    type->addImmediateOperand(sampled);
+    type->addImmediateOperand((unsigned int)format);
+
+    groupedTypes[OpTypeImage].push_back(type);
+    constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+    module.mapInstruction(type);
+
+    // deal with capabilities
+    switch (dim) {
+    case DimBuffer:
+        if (sampled)
+            addCapability(CapabilitySampledBuffer);
+        else
+            addCapability(CapabilityImageBuffer);
+        break;
+    case Dim1D:
+        if (sampled)
+            addCapability(CapabilitySampled1D);
+        else
+            addCapability(CapabilityImage1D);
+        break;
+    case DimCube:
+        if (arrayed) {
+            if (sampled)
+                addCapability(CapabilitySampledCubeArray);
+            else
+                addCapability(CapabilityImageCubeArray);
+        }
+        break;
+    case DimRect:
+        if (sampled)
+            addCapability(CapabilitySampledRect);
+        else
+            addCapability(CapabilityImageRect);
+        break;
+    case DimSubpassData:
+        addCapability(CapabilityInputAttachment);
+        break;
+    default:
+        break;
+    }
+
+    if (ms) {
+        if (arrayed)
+            addCapability(CapabilityImageMSArray);
+        if (! sampled)
+            addCapability(CapabilityStorageImageMultisample);
+    }
+
+    return type->getResultId();
+}
+
+Id Builder::makeSampledImageType(Id imageType)
+{
+    // try to find it
+    Instruction* type;
+    for (int t = 0; t < (int)groupedTypes[OpTypeSampledImage].size(); ++t) {
+        type = groupedTypes[OpTypeSampledImage][t];
+        if (type->getIdOperand(0) == imageType)
+            return type->getResultId();
+    }
+
+    // not found, make it
+    type = new Instruction(getUniqueId(), NoType, OpTypeSampledImage);
+    type->addIdOperand(imageType);
+
+    groupedTypes[OpTypeSampledImage].push_back(type);
+    constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+    module.mapInstruction(type);
+
+    return type->getResultId();
+}
+
+Id Builder::getDerefTypeId(Id resultId) const
+{
+    Id typeId = getTypeId(resultId);
+    assert(isPointerType(typeId));
+
+    return module.getInstruction(typeId)->getImmediateOperand(1);
+}
+
+Op Builder::getMostBasicTypeClass(Id typeId) const
+{
+    Instruction* instr = module.getInstruction(typeId);
+
+    Op typeClass = instr->getOpCode();
+    switch (typeClass)
+    {
+    case OpTypeVoid:
+    case OpTypeBool:
+    case OpTypeInt:
+    case OpTypeFloat:
+    case OpTypeStruct:
+        return typeClass;
+    case OpTypeVector:
+    case OpTypeMatrix:
+    case OpTypeArray:
+    case OpTypeRuntimeArray:
+        return getMostBasicTypeClass(instr->getIdOperand(0));
+    case OpTypePointer:
+        return getMostBasicTypeClass(instr->getIdOperand(1));
+    default:
+        assert(0);
+        return OpTypeFloat;
+    }
+}
+
+int Builder::getNumTypeConstituents(Id typeId) const
+{
+    Instruction* instr = module.getInstruction(typeId);
+
+    switch (instr->getOpCode())
+    {
+    case OpTypeBool:
+    case OpTypeInt:
+    case OpTypeFloat:
+        return 1;
+    case OpTypeVector:
+    case OpTypeMatrix:
+        return instr->getImmediateOperand(1);
+    case OpTypeArray:
+    {
+        Id lengthId = instr->getImmediateOperand(1);
+        return module.getInstruction(lengthId)->getImmediateOperand(0);
+    }
+    case OpTypeStruct:
+        return instr->getNumOperands();
+    default:
+        assert(0);
+        return 1;
+    }
+}
+
+// Return the lowest-level type of scalar that an homogeneous composite is made out of.
+// Typically, this is just to find out if something is made out of ints or floats.
+// However, it includes returning a structure, if say, it is an array of structure.
+Id Builder::getScalarTypeId(Id typeId) const
+{
+    Instruction* instr = module.getInstruction(typeId);
+
+    Op typeClass = instr->getOpCode();
+    switch (typeClass)
+    {
+    case OpTypeVoid:
+    case OpTypeBool:
+    case OpTypeInt:
+    case OpTypeFloat:
+    case OpTypeStruct:
+        return instr->getResultId();
+    case OpTypeVector:
+    case OpTypeMatrix:
+    case OpTypeArray:
+    case OpTypeRuntimeArray:
+    case OpTypePointer:
+        return getScalarTypeId(getContainedTypeId(typeId));
+    default:
+        assert(0);
+        return NoResult;
+    }
+}
+
+// Return the type of 'member' of a composite.
+Id Builder::getContainedTypeId(Id typeId, int member) const
+{
+    Instruction* instr = module.getInstruction(typeId);
+
+    Op typeClass = instr->getOpCode();
+    switch (typeClass)
+    {
+    case OpTypeVector:
+    case OpTypeMatrix:
+    case OpTypeArray:
+    case OpTypeRuntimeArray:
+        return instr->getIdOperand(0);
+    case OpTypePointer:
+        return instr->getIdOperand(1);
+    case OpTypeStruct:
+        return instr->getIdOperand(member);
+    default:
+        assert(0);
+        return NoResult;
+    }
+}
+
+// Return the immediately contained type of a given composite type.
+Id Builder::getContainedTypeId(Id typeId) const
+{
+    return getContainedTypeId(typeId, 0);
+}
+
+// See if a scalar constant of this type has already been created, so it
+// can be reused rather than duplicated.  (Required by the specification).
+Id Builder::findScalarConstant(Op typeClass, Op opcode, Id typeId, unsigned value) const
+{
+    Instruction* constant;
+    for (int i = 0; i < (int)groupedConstants[typeClass].size(); ++i) {
+        constant = groupedConstants[typeClass][i];
+        if (constant->getOpCode() == opcode &&
+            constant->getTypeId() == typeId &&
+            constant->getImmediateOperand(0) == value)
+            return constant->getResultId();
+    }
+
+    return 0;
+}
+
+// Version of findScalarConstant (see above) for scalars that take two operands (e.g. a 'double' or 'int64').
+Id Builder::findScalarConstant(Op typeClass, Op opcode, Id typeId, unsigned v1, unsigned v2) const
+{
+    Instruction* constant;
+    for (int i = 0; i < (int)groupedConstants[typeClass].size(); ++i) {
+        constant = groupedConstants[typeClass][i];
+        if (constant->getOpCode() == opcode &&
+            constant->getTypeId() == typeId &&
+            constant->getImmediateOperand(0) == v1 &&
+            constant->getImmediateOperand(1) == v2)
+            return constant->getResultId();
+    }
+
+    return 0;
+}
+
+// Return true if consuming 'opcode' means consuming a constant.
+// "constant" here means after final transform to executable code,
+// the value consumed will be a constant, so includes specialization.
+bool Builder::isConstantOpCode(Op opcode) const
+{
+    switch (opcode) {
+    case OpUndef: 
+    case OpConstantTrue:
+    case OpConstantFalse:
+    case OpConstant:
+    case OpConstantComposite:
+    case OpConstantSampler:
+    case OpConstantNull:
+    case OpSpecConstantTrue:
+    case OpSpecConstantFalse:
+    case OpSpecConstant:
+    case OpSpecConstantComposite:
+    case OpSpecConstantOp:
+        return true;
+    default:
+        return false;
+    }
+}
+
+// Return true if consuming 'opcode' means consuming a specialization constant.
+bool Builder::isSpecConstantOpCode(Op opcode) const
+{
+    switch (opcode) {
+    case OpSpecConstantTrue:
+    case OpSpecConstantFalse:
+    case OpSpecConstant:
+    case OpSpecConstantComposite:
+    case OpSpecConstantOp:
+        return true;
+    default:
+        return false;
+    }
+}
+
+Id Builder::makeBoolConstant(bool b, bool specConstant)
+{
+    Id typeId = makeBoolType();
+    Instruction* constant;
+    Op opcode = specConstant ? (b ? OpSpecConstantTrue : OpSpecConstantFalse) : (b ? OpConstantTrue : OpConstantFalse);
+
+    // See if we already made it. Applies only to regular constants, because specialization constants
+    // must remain distinct for the purpose of applying a SpecId decoration.
+    if (! specConstant) {
+        Id existing = 0;
+        for (int i = 0; i < (int)groupedConstants[OpTypeBool].size(); ++i) {
+            constant = groupedConstants[OpTypeBool][i];
+            if (constant->getTypeId() == typeId && constant->getOpCode() == opcode)
+                existing = constant->getResultId();
+        }
+
+        if (existing)
+            return existing;
+    }
+
+    // Make it
+    Instruction* c = new Instruction(getUniqueId(), typeId, opcode);
+    constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(c));
+    groupedConstants[OpTypeBool].push_back(c);
+    module.mapInstruction(c);
+
+    return c->getResultId();
+}
+
+Id Builder::makeIntConstant(Id typeId, unsigned value, bool specConstant)
+{
+    Op opcode = specConstant ? OpSpecConstant : OpConstant;
+
+    // See if we already made it. Applies only to regular constants, because specialization constants
+    // must remain distinct for the purpose of applying a SpecId decoration.
+    if (! specConstant) {
+        Id existing = findScalarConstant(OpTypeInt, opcode, typeId, value);
+        if (existing)
+            return existing;
+    }
+
+    Instruction* c = new Instruction(getUniqueId(), typeId, opcode);
+    c->addImmediateOperand(value);
+    constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(c));
+    groupedConstants[OpTypeInt].push_back(c);
+    module.mapInstruction(c);
+
+    return c->getResultId();
+}
+
+Id Builder::makeInt64Constant(Id typeId, unsigned long long value, bool specConstant)
+{
+    Op opcode = specConstant ? OpSpecConstant : OpConstant;
+
+    unsigned op1 = value & 0xFFFFFFFF;
+    unsigned op2 = value >> 32;
+
+    // See if we already made it. Applies only to regular constants, because specialization constants
+    // must remain distinct for the purpose of applying a SpecId decoration.
+    if (! specConstant) {
+        Id existing = findScalarConstant(OpTypeInt, opcode, typeId, op1, op2);
+        if (existing)
+            return existing;
+    }
+
+    Instruction* c = new Instruction(getUniqueId(), typeId, opcode);
+    c->addImmediateOperand(op1);
+    c->addImmediateOperand(op2);
+    constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(c));
+    groupedConstants[OpTypeInt].push_back(c);
+    module.mapInstruction(c);
+
+    return c->getResultId();
+}
+
+Id Builder::makeFloatConstant(float f, bool specConstant)
+{
+    Op opcode = specConstant ? OpSpecConstant : OpConstant;
+    Id typeId = makeFloatType(32);
+    union { float fl; unsigned int ui; } u;
+    u.fl = f;
+    unsigned value = u.ui;
+
+    // See if we already made it. Applies only to regular constants, because specialization constants
+    // must remain distinct for the purpose of applying a SpecId decoration.
+    if (! specConstant) {
+        Id existing = findScalarConstant(OpTypeFloat, opcode, typeId, value);
+        if (existing)
+            return existing;
+    }
+
+    Instruction* c = new Instruction(getUniqueId(), typeId, opcode);
+    c->addImmediateOperand(value);
+    constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(c));
+    groupedConstants[OpTypeFloat].push_back(c);
+    module.mapInstruction(c);
+
+    return c->getResultId();
+}
+
+Id Builder::makeDoubleConstant(double d, bool specConstant)
+{
+    Op opcode = specConstant ? OpSpecConstant : OpConstant;
+    Id typeId = makeFloatType(64);
+    union { double db; unsigned long long ull; } u;
+    u.db = d;
+    unsigned long long value = u.ull;
+    unsigned op1 = value & 0xFFFFFFFF;
+    unsigned op2 = value >> 32;
+
+    // See if we already made it. Applies only to regular constants, because specialization constants
+    // must remain distinct for the purpose of applying a SpecId decoration.
+    if (! specConstant) {
+        Id existing = findScalarConstant(OpTypeFloat, opcode, typeId, op1, op2);
+        if (existing)
+            return existing;
+    }
+
+    Instruction* c = new Instruction(getUniqueId(), typeId, opcode);
+    c->addImmediateOperand(op1);
+    c->addImmediateOperand(op2);
+    constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(c));
+    groupedConstants[OpTypeFloat].push_back(c);
+    module.mapInstruction(c);
+
+    return c->getResultId();
+}
+
+#ifdef AMD_EXTENSIONS
+Id Builder::makeFloat16Constant(float f16, bool specConstant)
+{
+    Op opcode = specConstant ? OpSpecConstant : OpConstant;
+    Id typeId = makeFloatType(16);
+
+    spvutils::HexFloat<spvutils::FloatProxy<float>> fVal(f16);
+    spvutils::HexFloat<spvutils::FloatProxy<spvutils::Float16>> f16Val(0);
+    fVal.castTo(f16Val, spvutils::kRoundToZero);
+
+    unsigned value = f16Val.value().getAsFloat().get_value();
+
+    // See if we already made it. Applies only to regular constants, because specialization constants
+    // must remain distinct for the purpose of applying a SpecId decoration.
+    if (!specConstant) {
+        Id existing = findScalarConstant(OpTypeFloat, opcode, typeId, value);
+        if (existing)
+            return existing;
+    }
+
+    Instruction* c = new Instruction(getUniqueId(), typeId, opcode);
+    c->addImmediateOperand(value);
+    constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(c));
+    groupedConstants[OpTypeFloat].push_back(c);
+    module.mapInstruction(c);
+
+    return c->getResultId();
+}
+#endif
+
+Id Builder::findCompositeConstant(Op typeClass, std::vector<Id>& comps) const
+{
+    Instruction* constant = 0;
+    bool found = false;
+    for (int i = 0; i < (int)groupedConstants[typeClass].size(); ++i) {
+        constant = groupedConstants[typeClass][i];
+
+        // same shape?
+        if (constant->getNumOperands() != (int)comps.size())
+            continue;
+
+        // same contents?
+        bool mismatch = false;
+        for (int op = 0; op < constant->getNumOperands(); ++op) {
+            if (constant->getIdOperand(op) != comps[op]) {
+                mismatch = true;
+                break;
+            }
+        }
+        if (! mismatch) {
+            found = true;
+            break;
+        }
+    }
+
+    return found ? constant->getResultId() : NoResult;
+}
+
+// Comments in header
+Id Builder::makeCompositeConstant(Id typeId, std::vector<Id>& members, bool specConstant)
+{
+    Op opcode = specConstant ? OpSpecConstantComposite : OpConstantComposite;
+    assert(typeId);
+    Op typeClass = getTypeClass(typeId);
+
+    switch (typeClass) {
+    case OpTypeVector:
+    case OpTypeArray:
+    case OpTypeStruct:
+    case OpTypeMatrix:
+        break;
+    default:
+        assert(0);
+        return makeFloatConstant(0.0);
+    }
+
+    if (! specConstant) {
+        Id existing = findCompositeConstant(typeClass, members);
+        if (existing)
+            return existing;
+    }
+
+    Instruction* c = new Instruction(getUniqueId(), typeId, opcode);
+    for (int op = 0; op < (int)members.size(); ++op)
+        c->addIdOperand(members[op]);
+    constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(c));
+    groupedConstants[typeClass].push_back(c);
+    module.mapInstruction(c);
+
+    return c->getResultId();
+}
+
+Instruction* Builder::addEntryPoint(ExecutionModel model, Function* function, const char* name)
+{
+    Instruction* entryPoint = new Instruction(OpEntryPoint);
+    entryPoint->addImmediateOperand(model);
+    entryPoint->addIdOperand(function->getId());
+    entryPoint->addStringOperand(name);
+
+    entryPoints.push_back(std::unique_ptr<Instruction>(entryPoint));
+
+    return entryPoint;
+}
+
+// Currently relying on the fact that all 'value' of interest are small non-negative values.
+void Builder::addExecutionMode(Function* entryPoint, ExecutionMode mode, int value1, int value2, int value3)
+{
+    Instruction* instr = new Instruction(OpExecutionMode);
+    instr->addIdOperand(entryPoint->getId());
+    instr->addImmediateOperand(mode);
+    if (value1 >= 0)
+        instr->addImmediateOperand(value1);
+    if (value2 >= 0)
+        instr->addImmediateOperand(value2);
+    if (value3 >= 0)
+        instr->addImmediateOperand(value3);
+
+    executionModes.push_back(std::unique_ptr<Instruction>(instr));
+}
+
+void Builder::addName(Id id, const char* string)
+{
+    Instruction* name = new Instruction(OpName);
+    name->addIdOperand(id);
+    name->addStringOperand(string);
+
+    names.push_back(std::unique_ptr<Instruction>(name));
+}
+
+void Builder::addMemberName(Id id, int memberNumber, const char* string)
+{
+    Instruction* name = new Instruction(OpMemberName);
+    name->addIdOperand(id);
+    name->addImmediateOperand(memberNumber);
+    name->addStringOperand(string);
+
+    names.push_back(std::unique_ptr<Instruction>(name));
+}
+
+void Builder::addLine(Id target, Id fileName, int lineNum, int column)
+{
+    Instruction* line = new Instruction(OpLine);
+    line->addIdOperand(target);
+    line->addIdOperand(fileName);
+    line->addImmediateOperand(lineNum);
+    line->addImmediateOperand(column);
+
+    lines.push_back(std::unique_ptr<Instruction>(line));
+}
+
+void Builder::addDecoration(Id id, Decoration decoration, int num)
+{
+    if (decoration == spv::DecorationMax)
+        return;
+    Instruction* dec = new Instruction(OpDecorate);
+    dec->addIdOperand(id);
+    dec->addImmediateOperand(decoration);
+    if (num >= 0)
+        dec->addImmediateOperand(num);
+
+    decorations.push_back(std::unique_ptr<Instruction>(dec));
+}
+
+void Builder::addMemberDecoration(Id id, unsigned int member, Decoration decoration, int num)
+{
+    Instruction* dec = new Instruction(OpMemberDecorate);
+    dec->addIdOperand(id);
+    dec->addImmediateOperand(member);
+    dec->addImmediateOperand(decoration);
+    if (num >= 0)
+        dec->addImmediateOperand(num);
+
+    decorations.push_back(std::unique_ptr<Instruction>(dec));
+}
+
+// Comments in header
+Function* Builder::makeEntryPoint(const char* entryPoint)
+{
+    assert(! mainFunction);
+
+    Block* entry;
+    std::vector<Id> params;
+    std::vector<Decoration> precisions;
+
+    mainFunction = makeFunctionEntry(NoPrecision, makeVoidType(), entryPoint, params, precisions, &entry);
+
+    return mainFunction;
+}
+
+// Comments in header
+Function* Builder::makeFunctionEntry(Decoration precision, Id returnType, const char* name,
+                                     const std::vector<Id>& paramTypes, const std::vector<Decoration>& precisions, Block **entry)
+{
+    // Make the function and initial instructions in it
+    Id typeId = makeFunctionType(returnType, paramTypes);
+    Id firstParamId = paramTypes.size() == 0 ? 0 : getUniqueIds((int)paramTypes.size());
+    Function* function = new Function(getUniqueId(), returnType, typeId, firstParamId, module);
+
+    // Set up the precisions
+    setPrecision(function->getId(), precision);
+    for (unsigned p = 0; p < (unsigned)precisions.size(); ++p)
+        setPrecision(firstParamId + p, precisions[p]);
+
+    // CFG
+    if (entry) {
+        *entry = new Block(getUniqueId(), *function);
+        function->addBlock(*entry);
+        setBuildPoint(*entry);
+    }
+
+    if (name)
+        addName(function->getId(), name);
+
+    functions.push_back(std::unique_ptr<Function>(function));
+
+    return function;
+}
+
+// Comments in header
+void Builder::makeReturn(bool implicit, Id retVal)
+{
+    if (retVal) {
+        Instruction* inst = new Instruction(NoResult, NoType, OpReturnValue);
+        inst->addIdOperand(retVal);
+        buildPoint->addInstruction(std::unique_ptr<Instruction>(inst));
+    } else
+        buildPoint->addInstruction(std::unique_ptr<Instruction>(new Instruction(NoResult, NoType, OpReturn)));
+
+    if (! implicit)
+        createAndSetNoPredecessorBlock("post-return");
+}
+
+// Comments in header
+void Builder::leaveFunction()
+{
+    Block* block = buildPoint;
+    Function& function = buildPoint->getParent();
+    assert(block);
+
+    // If our function did not contain a return, add a return void now.
+    if (! block->isTerminated()) {
+        if (function.getReturnType() == makeVoidType())
+            makeReturn(true);
+        else {
+            makeReturn(true, createUndefined(function.getReturnType()));
+        }
+    }
+}
+
+// Comments in header
+void Builder::makeDiscard()
+{
+    buildPoint->addInstruction(std::unique_ptr<Instruction>(new Instruction(OpKill)));
+    createAndSetNoPredecessorBlock("post-discard");
+}
+
+// Comments in header
+Id Builder::createVariable(StorageClass storageClass, Id type, const char* name)
+{
+    Id pointerType = makePointer(storageClass, type);
+    Instruction* inst = new Instruction(getUniqueId(), pointerType, OpVariable);
+    inst->addImmediateOperand(storageClass);
+
+    switch (storageClass) {
+    case StorageClassFunction:
+        // Validation rules require the declaration in the entry block
+        buildPoint->getParent().addLocalVariable(std::unique_ptr<Instruction>(inst));
+        break;
+
+    default:
+        constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(inst));
+        module.mapInstruction(inst);
+        break;
+    }
+
+    if (name)
+        addName(inst->getResultId(), name);
+
+    return inst->getResultId();
+}
+
+// Comments in header
+Id Builder::createUndefined(Id type)
+{
+  Instruction* inst = new Instruction(getUniqueId(), type, OpUndef);
+  buildPoint->addInstruction(std::unique_ptr<Instruction>(inst));
+  return inst->getResultId();
+}
+
+// Comments in header
+void Builder::createStore(Id rValue, Id lValue)
+{
+    Instruction* store = new Instruction(OpStore);
+    store->addIdOperand(lValue);
+    store->addIdOperand(rValue);
+    buildPoint->addInstruction(std::unique_ptr<Instruction>(store));
+}
+
+// Comments in header
+Id Builder::createLoad(Id lValue)
+{
+    Instruction* load = new Instruction(getUniqueId(), getDerefTypeId(lValue), OpLoad);
+    load->addIdOperand(lValue);
+    buildPoint->addInstruction(std::unique_ptr<Instruction>(load));
+
+    return load->getResultId();
+}
+
+// Comments in header
+Id Builder::createAccessChain(StorageClass storageClass, Id base, std::vector<Id>& offsets)
+{
+    // Figure out the final resulting type.
+    spv::Id typeId = getTypeId(base);
+    assert(isPointerType(typeId) && offsets.size() > 0);
+    typeId = getContainedTypeId(typeId);
+    for (int i = 0; i < (int)offsets.size(); ++i) {
+        if (isStructType(typeId)) {
+            assert(isConstantScalar(offsets[i]));
+            typeId = getContainedTypeId(typeId, getConstantScalar(offsets[i]));
+        } else
+            typeId = getContainedTypeId(typeId, offsets[i]);
+    }
+    typeId = makePointer(storageClass, typeId);
+
+    // Make the instruction
+    Instruction* chain = new Instruction(getUniqueId(), typeId, OpAccessChain);
+    chain->addIdOperand(base);
+    for (int i = 0; i < (int)offsets.size(); ++i)
+        chain->addIdOperand(offsets[i]);
+    buildPoint->addInstruction(std::unique_ptr<Instruction>(chain));
+
+    return chain->getResultId();
+}
+
+Id Builder::createArrayLength(Id base, unsigned int member)
+{
+    Instruction* length = new Instruction(getUniqueId(), makeIntType(32), OpArrayLength);
+    length->addIdOperand(base);
+    length->addImmediateOperand(member);
+    buildPoint->addInstruction(std::unique_ptr<Instruction>(length));
+
+    return length->getResultId();
+}
+
+Id Builder::createCompositeExtract(Id composite, Id typeId, unsigned index)
+{
+    // Generate code for spec constants if in spec constant operation
+    // generation mode.
+    if (generatingOpCodeForSpecConst) {
+        return createSpecConstantOp(OpCompositeExtract, typeId, std::vector<Id>(1, composite), std::vector<Id>(1, index));
+    }
+    Instruction* extract = new Instruction(getUniqueId(), typeId, OpCompositeExtract);
+    extract->addIdOperand(composite);
+    extract->addImmediateOperand(index);
+    buildPoint->addInstruction(std::unique_ptr<Instruction>(extract));
+
+    return extract->getResultId();
+}
+
+Id Builder::createCompositeExtract(Id composite, Id typeId, std::vector<unsigned>& indexes)
+{
+    // Generate code for spec constants if in spec constant operation
+    // generation mode.
+    if (generatingOpCodeForSpecConst) {
+        return createSpecConstantOp(OpCompositeExtract, typeId, std::vector<Id>(1, composite), indexes);
+    }
+    Instruction* extract = new Instruction(getUniqueId(), typeId, OpCompositeExtract);
+    extract->addIdOperand(composite);
+    for (int i = 0; i < (int)indexes.size(); ++i)
+        extract->addImmediateOperand(indexes[i]);
+    buildPoint->addInstruction(std::unique_ptr<Instruction>(extract));
+
+    return extract->getResultId();
+}
+
+Id Builder::createCompositeInsert(Id object, Id composite, Id typeId, unsigned index)
+{
+    Instruction* insert = new Instruction(getUniqueId(), typeId, OpCompositeInsert);
+    insert->addIdOperand(object);
+    insert->addIdOperand(composite);
+    insert->addImmediateOperand(index);
+    buildPoint->addInstruction(std::unique_ptr<Instruction>(insert));
+
+    return insert->getResultId();
+}
+
+Id Builder::createCompositeInsert(Id object, Id composite, Id typeId, std::vector<unsigned>& indexes)
+{
+    Instruction* insert = new Instruction(getUniqueId(), typeId, OpCompositeInsert);
+    insert->addIdOperand(object);
+    insert->addIdOperand(composite);
+    for (int i = 0; i < (int)indexes.size(); ++i)
+        insert->addImmediateOperand(indexes[i]);
+    buildPoint->addInstruction(std::unique_ptr<Instruction>(insert));
+
+    return insert->getResultId();
+}
+
+Id Builder::createVectorExtractDynamic(Id vector, Id typeId, Id componentIndex)
+{
+    Instruction* extract = new Instruction(getUniqueId(), typeId, OpVectorExtractDynamic);
+    extract->addIdOperand(vector);
+    extract->addIdOperand(componentIndex);
+    buildPoint->addInstruction(std::unique_ptr<Instruction>(extract));
+
+    return extract->getResultId();
+}
+
+Id Builder::createVectorInsertDynamic(Id vector, Id typeId, Id component, Id componentIndex)
+{
+    Instruction* insert = new Instruction(getUniqueId(), typeId, OpVectorInsertDynamic);
+    insert->addIdOperand(vector);
+    insert->addIdOperand(component);
+    insert->addIdOperand(componentIndex);
+    buildPoint->addInstruction(std::unique_ptr<Instruction>(insert));
+
+    return insert->getResultId();
+}
+
+// An opcode that has no operands, no result id, and no type
+void Builder::createNoResultOp(Op opCode)
+{
+    Instruction* op = new Instruction(opCode);
+    buildPoint->addInstruction(std::unique_ptr<Instruction>(op));
+}
+
+// An opcode that has one operand, no result id, and no type
+void Builder::createNoResultOp(Op opCode, Id operand)
+{
+    Instruction* op = new Instruction(opCode);
+    op->addIdOperand(operand);
+    buildPoint->addInstruction(std::unique_ptr<Instruction>(op));
+}
+
+// An opcode that has one operand, no result id, and no type
+void Builder::createNoResultOp(Op opCode, const std::vector<Id>& operands)
+{
+    Instruction* op = new Instruction(opCode);
+    for (auto it = operands.cbegin(); it != operands.cend(); ++it)
+        op->addIdOperand(*it);
+    buildPoint->addInstruction(std::unique_ptr<Instruction>(op));
+}
+
+void Builder::createControlBarrier(Scope execution, Scope memory, MemorySemanticsMask semantics)
+{
+    Instruction* op = new Instruction(OpControlBarrier);
+    op->addImmediateOperand(makeUintConstant(execution));
+    op->addImmediateOperand(makeUintConstant(memory));
+    op->addImmediateOperand(makeUintConstant(semantics));
+    buildPoint->addInstruction(std::unique_ptr<Instruction>(op));
+}
+
+void Builder::createMemoryBarrier(unsigned executionScope, unsigned memorySemantics)
+{
+    Instruction* op = new Instruction(OpMemoryBarrier);
+    op->addImmediateOperand(makeUintConstant(executionScope));
+    op->addImmediateOperand(makeUintConstant(memorySemantics));
+    buildPoint->addInstruction(std::unique_ptr<Instruction>(op));
+}
+
+// An opcode that has one operands, a result id, and a type
+Id Builder::createUnaryOp(Op opCode, Id typeId, Id operand)
+{
+    // Generate code for spec constants if in spec constant operation
+    // generation mode.
+    if (generatingOpCodeForSpecConst) {
+        return createSpecConstantOp(opCode, typeId, std::vector<Id>(1, operand), std::vector<Id>());
+    }
+    Instruction* op = new Instruction(getUniqueId(), typeId, opCode);
+    op->addIdOperand(operand);
+    buildPoint->addInstruction(std::unique_ptr<Instruction>(op));
+
+    return op->getResultId();
+}
+
+Id Builder::createBinOp(Op opCode, Id typeId, Id left, Id right)
+{
+    // Generate code for spec constants if in spec constant operation
+    // generation mode.
+    if (generatingOpCodeForSpecConst) {
+        std::vector<Id> operands(2);
+        operands[0] = left; operands[1] = right;
+        return createSpecConstantOp(opCode, typeId, operands, std::vector<Id>());
+    }
+    Instruction* op = new Instruction(getUniqueId(), typeId, opCode);
+    op->addIdOperand(left);
+    op->addIdOperand(right);
+    buildPoint->addInstruction(std::unique_ptr<Instruction>(op));
+
+    return op->getResultId();
+}
+
+Id Builder::createTriOp(Op opCode, Id typeId, Id op1, Id op2, Id op3)
+{
+    // Generate code for spec constants if in spec constant operation
+    // generation mode.
+    if (generatingOpCodeForSpecConst) {
+        std::vector<Id> operands(3);
+        operands[0] = op1;
+        operands[1] = op2;
+        operands[2] = op3;
+        return createSpecConstantOp(
+            opCode, typeId, operands, std::vector<Id>());
+    }
+    Instruction* op = new Instruction(getUniqueId(), typeId, opCode);
+    op->addIdOperand(op1);
+    op->addIdOperand(op2);
+    op->addIdOperand(op3);
+    buildPoint->addInstruction(std::unique_ptr<Instruction>(op));
+
+    return op->getResultId();
+}
+
+Id Builder::createOp(Op opCode, Id typeId, const std::vector<Id>& operands)
+{
+    Instruction* op = new Instruction(getUniqueId(), typeId, opCode);
+    for (auto it = operands.cbegin(); it != operands.cend(); ++it)
+        op->addIdOperand(*it);
+    buildPoint->addInstruction(std::unique_ptr<Instruction>(op));
+
+    return op->getResultId();
+}
+
+Id Builder::createSpecConstantOp(Op opCode, Id typeId, const std::vector<Id>& operands, const std::vector<unsigned>& literals)
+{
+    Instruction* op = new Instruction(getUniqueId(), typeId, OpSpecConstantOp);
+    op->addImmediateOperand((unsigned) opCode);
+    for (auto it = operands.cbegin(); it != operands.cend(); ++it)
+        op->addIdOperand(*it);
+    for (auto it = literals.cbegin(); it != literals.cend(); ++it)
+        op->addImmediateOperand(*it);
+    module.mapInstruction(op);
+    constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(op));
+
+    return op->getResultId();
+}
+
+Id Builder::createFunctionCall(spv::Function* function, std::vector<spv::Id>& args)
+{
+    Instruction* op = new Instruction(getUniqueId(), function->getReturnType(), OpFunctionCall);
+    op->addIdOperand(function->getId());
+    for (int a = 0; a < (int)args.size(); ++a)
+        op->addIdOperand(args[a]);
+    buildPoint->addInstruction(std::unique_ptr<Instruction>(op));
+
+    return op->getResultId();
+}
+
+// Comments in header
+Id Builder::createRvalueSwizzle(Decoration precision, Id typeId, Id source, std::vector<unsigned>& channels)
+{
+    if (channels.size() == 1)
+        return setPrecision(createCompositeExtract(source, typeId, channels.front()), precision);
+
+    if (generatingOpCodeForSpecConst) {
+        std::vector<Id> operands(2);
+        operands[0] = operands[1] = source;
+        return setPrecision(createSpecConstantOp(OpVectorShuffle, typeId, operands, channels), precision);
+    }
+    Instruction* swizzle = new Instruction(getUniqueId(), typeId, OpVectorShuffle);
+    assert(isVector(source));
+    swizzle->addIdOperand(source);
+    swizzle->addIdOperand(source);
+    for (int i = 0; i < (int)channels.size(); ++i)
+        swizzle->addImmediateOperand(channels[i]);
+    buildPoint->addInstruction(std::unique_ptr<Instruction>(swizzle));
+
+    return setPrecision(swizzle->getResultId(), precision);
+}
+
+// Comments in header
+Id Builder::createLvalueSwizzle(Id typeId, Id target, Id source, std::vector<unsigned>& channels)
+{
+    if (channels.size() == 1 && getNumComponents(source) == 1)
+        return createCompositeInsert(source, target, typeId, channels.front());
+
+    Instruction* swizzle = new Instruction(getUniqueId(), typeId, OpVectorShuffle);
+    assert(isVector(target));
+    swizzle->addIdOperand(target);
+    if (accessChain.component != NoResult)
+        // For dynamic component selection, source does not involve in l-value swizzle
+        swizzle->addIdOperand(target);
+    else {
+        assert(getNumComponents(source) == (int)channels.size());
+        assert(isVector(source));
+        swizzle->addIdOperand(source);
+    }
+
+    // Set up an identity shuffle from the base value to the result value
+    unsigned int components[4];
+    int numTargetComponents = getNumComponents(target);
+    for (int i = 0; i < numTargetComponents; ++i)
+        components[i] = i;
+
+    // Punch in the l-value swizzle
+    for (int i = 0; i < (int)channels.size(); ++i) {
+        if (accessChain.component != NoResult)
+            components[i] = channels[i]; // Only shuffle the base value
+        else
+            components[channels[i]] = numTargetComponents + i;
+    }
+
+    // finish the instruction with these components selectors
+    for (int i = 0; i < numTargetComponents; ++i)
+        swizzle->addImmediateOperand(components[i]);
+    buildPoint->addInstruction(std::unique_ptr<Instruction>(swizzle));
+
+    return swizzle->getResultId();
+}
+
+// Comments in header
+void Builder::promoteScalar(Decoration precision, Id& left, Id& right)
+{
+    int direction = getNumComponents(right) - getNumComponents(left);
+
+    if (direction > 0)
+        left = smearScalar(precision, left, makeVectorType(getTypeId(left), getNumComponents(right)));
+    else if (direction < 0)
+        right = smearScalar(precision, right, makeVectorType(getTypeId(right), getNumComponents(left)));
+
+    return;
+}
+
+// Comments in header
+Id Builder::smearScalar(Decoration precision, Id scalar, Id vectorType)
+{
+    assert(getNumComponents(scalar) == 1);
+    assert(getTypeId(scalar) == getScalarTypeId(vectorType));
+
+    int numComponents = getNumTypeComponents(vectorType);
+    if (numComponents == 1)
+        return scalar;
+
+    Instruction* smear = nullptr;
+    if (generatingOpCodeForSpecConst) {
+        auto members = std::vector<spv::Id>(numComponents, scalar);
+        // Sometime even in spec-constant-op mode, the temporary vector created by
+        // promoting a scalar might not be a spec constant. This should depend on
+        // the scalar.
+        // e.g.:
+        //  const vec2 spec_const_result = a_spec_const_vec2 + a_front_end_const_scalar;
+        // In such cases, the temporary vector created from a_front_end_const_scalar
+        // is not a spec constant vector, even though the binary operation node is marked
+        // as 'specConstant' and we are in spec-constant-op mode.
+        auto result_id = makeCompositeConstant(vectorType, members, isSpecConstant(scalar));
+        smear = module.getInstruction(result_id);
+    } else {
+        smear = new Instruction(getUniqueId(), vectorType, OpCompositeConstruct);
+        for (int c = 0; c < numComponents; ++c)
+            smear->addIdOperand(scalar);
+        buildPoint->addInstruction(std::unique_ptr<Instruction>(smear));
+    }
+
+    return setPrecision(smear->getResultId(), precision);
+}
+
+// Comments in header
+Id Builder::createBuiltinCall(Id resultType, Id builtins, int entryPoint, std::vector<Id>& args)
+{
+    Instruction* inst = new Instruction(getUniqueId(), resultType, OpExtInst);
+    inst->addIdOperand(builtins);
+    inst->addImmediateOperand(entryPoint);
+    for (int arg = 0; arg < (int)args.size(); ++arg)
+        inst->addIdOperand(args[arg]);
+
+    buildPoint->addInstruction(std::unique_ptr<Instruction>(inst));
+
+    return inst->getResultId();
+}
+
+// Accept all parameters needed to create a texture instruction.
+// Create the correct instruction based on the inputs, and make the call.
+Id Builder::createTextureCall(Decoration precision, Id resultType, bool sparse, bool fetch, bool proj, bool gather, bool noImplicitLod, const TextureParameters& parameters)
+{
+    static const int maxTextureArgs = 10;
+    Id texArgs[maxTextureArgs] = {};
+
+    //
+    // Set up the fixed arguments
+    //
+    int numArgs = 0;
+    bool explicitLod = false;
+    texArgs[numArgs++] = parameters.sampler;
+    texArgs[numArgs++] = parameters.coords;
+    if (parameters.Dref != NoResult)
+        texArgs[numArgs++] = parameters.Dref;
+    if (parameters.component != NoResult)
+        texArgs[numArgs++] = parameters.component;
+
+    //
+    // Set up the optional arguments
+    //
+    int optArgNum = numArgs;                        // track which operand, if it exists, is the mask of optional arguments
+    ++numArgs;                                      // speculatively make room for the mask operand
+    ImageOperandsMask mask = ImageOperandsMaskNone; // the mask operand
+    if (parameters.bias) {
+        mask = (ImageOperandsMask)(mask | ImageOperandsBiasMask);
+        texArgs[numArgs++] = parameters.bias;
+    }
+    if (parameters.lod) {
+        mask = (ImageOperandsMask)(mask | ImageOperandsLodMask);
+        texArgs[numArgs++] = parameters.lod;
+        explicitLod = true;
+    } else if (parameters.gradX) {
+        mask = (ImageOperandsMask)(mask | ImageOperandsGradMask);
+        texArgs[numArgs++] = parameters.gradX;
+        texArgs[numArgs++] = parameters.gradY;
+        explicitLod = true;
+    } else if (noImplicitLod && ! fetch && ! gather) {
+        // have to explicitly use lod of 0 if not allowed to have them be implicit, and
+        // we would otherwise be about to issue an implicit instruction
+        mask = (ImageOperandsMask)(mask | ImageOperandsLodMask);
+        texArgs[numArgs++] = makeFloatConstant(0.0);
+        explicitLod = true;
+    }
+    if (parameters.offset) {
+        if (isConstant(parameters.offset))
+            mask = (ImageOperandsMask)(mask | ImageOperandsConstOffsetMask);
+        else {
+            addCapability(CapabilityImageGatherExtended);
+            mask = (ImageOperandsMask)(mask | ImageOperandsOffsetMask);
+        }
+        texArgs[numArgs++] = parameters.offset;
+    }
+    if (parameters.offsets) {
+        mask = (ImageOperandsMask)(mask | ImageOperandsConstOffsetsMask);
+        texArgs[numArgs++] = parameters.offsets;
+    }
+    if (parameters.sample) {
+        mask = (ImageOperandsMask)(mask | ImageOperandsSampleMask);
+        texArgs[numArgs++] = parameters.sample;
+    }
+    if (parameters.lodClamp) {
+        // capability if this bit is used
+        addCapability(CapabilityMinLod);
+
+        mask = (ImageOperandsMask)(mask | ImageOperandsMinLodMask);
+        texArgs[numArgs++] = parameters.lodClamp;
+    }
+    if (mask == ImageOperandsMaskNone)
+        --numArgs;  // undo speculative reservation for the mask argument
+    else
+        texArgs[optArgNum] = mask;
+
+    //
+    // Set up the instruction
+    //
+    Op opCode = OpNop;  // All paths below need to set this
+    if (fetch) {
+        if (sparse)
+            opCode = OpImageSparseFetch;
+        else
+            opCode = OpImageFetch;
+    } else if (gather) {
+        if (parameters.Dref)
+            if (sparse)
+                opCode = OpImageSparseDrefGather;
+            else
+                opCode = OpImageDrefGather;
+        else
+            if (sparse)
+                opCode = OpImageSparseGather;
+            else
+                opCode = OpImageGather;
+    } else if (explicitLod) {
+        if (parameters.Dref) {
+            if (proj)
+                if (sparse)
+                    opCode = OpImageSparseSampleProjDrefExplicitLod;
+                else
+                    opCode = OpImageSampleProjDrefExplicitLod;
+            else
+                if (sparse)
+                    opCode = OpImageSparseSampleDrefExplicitLod;
+                else
+                    opCode = OpImageSampleDrefExplicitLod;
+        } else {
+            if (proj)
+                if (sparse)
+                    opCode = OpImageSparseSampleProjExplicitLod;
+                else
+                    opCode = OpImageSampleProjExplicitLod;
+            else
+                if (sparse)
+                    opCode = OpImageSparseSampleExplicitLod;
+                else
+                    opCode = OpImageSampleExplicitLod;
+        }
+    } else {
+        if (parameters.Dref) {
+            if (proj)
+                if (sparse)
+                    opCode = OpImageSparseSampleProjDrefImplicitLod;
+                else
+                    opCode = OpImageSampleProjDrefImplicitLod;
+            else
+                if (sparse)
+                    opCode = OpImageSparseSampleDrefImplicitLod;
+                else
+                    opCode = OpImageSampleDrefImplicitLod;
+        } else {
+            if (proj)
+                if (sparse)
+                    opCode = OpImageSparseSampleProjImplicitLod;
+                else
+                    opCode = OpImageSampleProjImplicitLod;
+            else
+                if (sparse)
+                    opCode = OpImageSparseSampleImplicitLod;
+                else
+                    opCode = OpImageSampleImplicitLod;
+        }
+    }
+
+    // See if the result type is expecting a smeared result.
+    // This happens when a legacy shadow*() call is made, which
+    // gets a vec4 back instead of a float.
+    Id smearedType = resultType;
+    if (! isScalarType(resultType)) {
+        switch (opCode) {
+        case OpImageSampleDrefImplicitLod:
+        case OpImageSampleDrefExplicitLod:
+        case OpImageSampleProjDrefImplicitLod:
+        case OpImageSampleProjDrefExplicitLod:
+            resultType = getScalarTypeId(resultType);
+            break;
+        default:
+            break;
+        }
+    }
+
+    Id typeId0 = 0;
+    Id typeId1 = 0;
+
+    if (sparse) {
+        typeId0 = resultType;
+        typeId1 = getDerefTypeId(parameters.texelOut);
+        resultType = makeStructResultType(typeId0, typeId1);
+    }
+
+    // Build the SPIR-V instruction
+    Instruction* textureInst = new Instruction(getUniqueId(), resultType, opCode);
+    for (int op = 0; op < optArgNum; ++op)
+        textureInst->addIdOperand(texArgs[op]);
+    if (optArgNum < numArgs)
+        textureInst->addImmediateOperand(texArgs[optArgNum]);
+    for (int op = optArgNum + 1; op < numArgs; ++op)
+        textureInst->addIdOperand(texArgs[op]);
+    setPrecision(textureInst->getResultId(), precision);
+    buildPoint->addInstruction(std::unique_ptr<Instruction>(textureInst));
+
+    Id resultId = textureInst->getResultId();
+
+    if (sparse) {
+        // set capability
+        addCapability(CapabilitySparseResidency);
+
+        // Decode the return type that was a special structure
+        createStore(createCompositeExtract(resultId, typeId1, 1), parameters.texelOut);
+        resultId = createCompositeExtract(resultId, typeId0, 0);
+        setPrecision(resultId, precision);
+    } else {
+        // When a smear is needed, do it, as per what was computed
+        // above when resultType was changed to a scalar type.
+        if (resultType != smearedType)
+            resultId = smearScalar(precision, resultId, smearedType);
+    }
+
+    return resultId;
+}
+
+// Comments in header
+Id Builder::createTextureQueryCall(Op opCode, const TextureParameters& parameters)
+{
+    // All these need a capability
+    addCapability(CapabilityImageQuery);
+
+    // Figure out the result type
+    Id resultType = 0;
+    switch (opCode) {
+    case OpImageQuerySize:
+    case OpImageQuerySizeLod:
+    {
+        int numComponents = 0;
+        switch (getTypeDimensionality(getImageType(parameters.sampler))) {
+        case Dim1D:
+        case DimBuffer:
+            numComponents = 1;
+            break;
+        case Dim2D:
+        case DimCube:
+        case DimRect:
+        case DimSubpassData:
+            numComponents = 2;
+            break;
+        case Dim3D:
+            numComponents = 3;
+            break;
+
+        default:
+            assert(0);
+            break;
+        }
+        if (isArrayedImageType(getImageType(parameters.sampler)))
+            ++numComponents;
+        if (numComponents == 1)
+            resultType = makeIntType(32);
+        else
+            resultType = makeVectorType(makeIntType(32), numComponents);
+
+        break;
+    }
+    case OpImageQueryLod:
+        resultType = makeVectorType(makeFloatType(32), 2);
+        break;
+    case OpImageQueryLevels:
+    case OpImageQuerySamples:
+        resultType = makeIntType(32);
+        break;
+    default:
+        assert(0);
+        break;
+    }
+
+    Instruction* query = new Instruction(getUniqueId(), resultType, opCode);
+    query->addIdOperand(parameters.sampler);
+    if (parameters.coords)
+        query->addIdOperand(parameters.coords);
+    if (parameters.lod)
+        query->addIdOperand(parameters.lod);
+    buildPoint->addInstruction(std::unique_ptr<Instruction>(query));
+
+    return query->getResultId();
+}
+
+// External comments in header.
+// Operates recursively to visit the composite's hierarchy.
+Id Builder::createCompositeCompare(Decoration precision, Id value1, Id value2, bool equal)
+{
+    Id boolType = makeBoolType();
+    Id valueType = getTypeId(value1);
+
+    Id resultId = NoResult;
+
+    int numConstituents = getNumTypeConstituents(valueType);
+
+    // Scalars and Vectors
+
+    if (isScalarType(valueType) || isVectorType(valueType)) {
+        assert(valueType == getTypeId(value2));
+        // These just need a single comparison, just have
+        // to figure out what it is.
+        Op op;
+        switch (getMostBasicTypeClass(valueType)) {
+        case OpTypeFloat:
+            op = equal ? OpFOrdEqual : OpFOrdNotEqual;
+            break;
+        case OpTypeInt:
+        default:
+            op = equal ? OpIEqual : OpINotEqual;
+            break;
+        case OpTypeBool:
+            op = equal ? OpLogicalEqual : OpLogicalNotEqual;
+            precision = NoPrecision;
+            break;
+        }
+
+        if (isScalarType(valueType)) {
+            // scalar
+            resultId = createBinOp(op, boolType, value1, value2);
+        } else {
+            // vector
+            resultId = createBinOp(op, makeVectorType(boolType, numConstituents), value1, value2);
+            setPrecision(resultId, precision);
+            // reduce vector compares...
+            resultId = createUnaryOp(equal ? OpAll : OpAny, boolType, resultId);
+        }
+
+        return setPrecision(resultId, precision);
+    }
+
+    // Only structs, arrays, and matrices should be left.
+    // They share in common the reduction operation across their constituents.
+    assert(isAggregateType(valueType) || isMatrixType(valueType));
+
+    // Compare each pair of constituents
+    for (int constituent = 0; constituent < numConstituents; ++constituent) {
+        std::vector<unsigned> indexes(1, constituent);
+        Id constituentType1 = getContainedTypeId(getTypeId(value1), constituent);
+        Id constituentType2 = getContainedTypeId(getTypeId(value2), constituent);
+        Id constituent1 = createCompositeExtract(value1, constituentType1, indexes);
+        Id constituent2 = createCompositeExtract(value2, constituentType2, indexes);
+
+        Id subResultId = createCompositeCompare(precision, constituent1, constituent2, equal);
+
+        if (constituent == 0)
+            resultId = subResultId;
+        else
+            resultId = setPrecision(createBinOp(equal ? OpLogicalAnd : OpLogicalOr, boolType, resultId, subResultId), precision);
+    }
+
+    return resultId;
+}
+
+// OpCompositeConstruct
+Id Builder::createCompositeConstruct(Id typeId, std::vector<Id>& constituents)
+{
+    assert(isAggregateType(typeId) || (getNumTypeConstituents(typeId) > 1 && getNumTypeConstituents(typeId) == (int)constituents.size()));
+
+    if (generatingOpCodeForSpecConst) {
+        // Sometime, even in spec-constant-op mode, the constant composite to be
+        // constructed may not be a specialization constant.
+        // e.g.:
+        //  const mat2 m2 = mat2(a_spec_const, a_front_end_const, another_front_end_const, third_front_end_const);
+        // The first column vector should be a spec constant one, as a_spec_const is a spec constant.
+        // The second column vector should NOT be spec constant, as it does not contain any spec constants.
+        // To handle such cases, we check the constituents of the constant vector to determine whether this
+        // vector should be created as a spec constant.
+        return makeCompositeConstant(typeId, constituents,
+                                     std::any_of(constituents.begin(), constituents.end(),
+                                                 [&](spv::Id id) { return isSpecConstant(id); }));
+    }
+
+    Instruction* op = new Instruction(getUniqueId(), typeId, OpCompositeConstruct);
+    for (int c = 0; c < (int)constituents.size(); ++c)
+        op->addIdOperand(constituents[c]);
+    buildPoint->addInstruction(std::unique_ptr<Instruction>(op));
+
+    return op->getResultId();
+}
+
+// Vector or scalar constructor
+Id Builder::createConstructor(Decoration precision, const std::vector<Id>& sources, Id resultTypeId)
+{
+    Id result = NoResult;
+    unsigned int numTargetComponents = getNumTypeComponents(resultTypeId);
+    unsigned int targetComponent = 0;
+
+    // Special case: when calling a vector constructor with a single scalar
+    // argument, smear the scalar
+    if (sources.size() == 1 && isScalar(sources[0]) && numTargetComponents > 1)
+        return smearScalar(precision, sources[0], resultTypeId);
+
+    Id scalarTypeId = getScalarTypeId(resultTypeId);
+    std::vector<Id> constituents;  // accumulate the arguments for OpCompositeConstruct
+    for (unsigned int i = 0; i < sources.size(); ++i) {
+        assert(! isAggregate(sources[i]));
+        unsigned int sourceSize = getNumComponents(sources[i]);
+        unsigned int sourcesToUse = sourceSize;
+        if (sourcesToUse + targetComponent > numTargetComponents)
+            sourcesToUse = numTargetComponents - targetComponent;
+
+        for (unsigned int s = 0; s < sourcesToUse; ++s) {
+            Id arg = sources[i];
+            if (sourceSize > 1) {
+                std::vector<unsigned> swiz;
+                swiz.push_back(s);
+                arg = createRvalueSwizzle(precision, scalarTypeId, arg, swiz);
+            }
+
+            if (numTargetComponents > 1)
+                constituents.push_back(arg);
+            else
+                result = arg;
+            ++targetComponent;
+        }
+
+        if (targetComponent >= numTargetComponents)
+            break;
+    }
+
+    if (constituents.size() > 0)
+        result = createCompositeConstruct(resultTypeId, constituents);
+
+    return setPrecision(result, precision);
+}
+
+// Comments in header
+Id Builder::createMatrixConstructor(Decoration precision, const std::vector<Id>& sources, Id resultTypeId)
+{
+    Id componentTypeId = getScalarTypeId(resultTypeId);
+    int numCols = getTypeNumColumns(resultTypeId);
+    int numRows = getTypeNumRows(resultTypeId);
+
+    Instruction* instr = module.getInstruction(componentTypeId);
+    Id bitCount = instr->getIdOperand(0);
+
+    // Will use a two step process
+    // 1. make a compile-time 2D array of values
+    // 2. construct a matrix from that array
+
+    // Step 1.
+
+    // initialize the array to the identity matrix
+    Id ids[maxMatrixSize][maxMatrixSize];
+    Id  one = (bitCount == 64 ? makeDoubleConstant(1.0) : makeFloatConstant(1.0));
+    Id zero = (bitCount == 64 ? makeDoubleConstant(0.0) : makeFloatConstant(0.0));
+    for (int col = 0; col < 4; ++col) {
+        for (int row = 0; row < 4; ++row) {
+            if (col == row)
+                ids[col][row] = one;
+            else
+                ids[col][row] = zero;
+        }
+    }
+
+    // modify components as dictated by the arguments
+    if (sources.size() == 1 && isScalar(sources[0])) {
+        // a single scalar; resets the diagonals
+        for (int col = 0; col < 4; ++col)
+            ids[col][col] = sources[0];
+    } else if (isMatrix(sources[0])) {
+        // constructing from another matrix; copy over the parts that exist in both the argument and constructee
+        Id matrix = sources[0];
+        int minCols = std::min(numCols, getNumColumns(matrix));
+        int minRows = std::min(numRows, getNumRows(matrix));
+        for (int col = 0; col < minCols; ++col) {
+            std::vector<unsigned> indexes;
+            indexes.push_back(col);
+            for (int row = 0; row < minRows; ++row) {
+                indexes.push_back(row);
+                ids[col][row] = createCompositeExtract(matrix, componentTypeId, indexes);
+                indexes.pop_back();
+                setPrecision(ids[col][row], precision);
+            }
+        }
+    } else {
+        // fill in the matrix in column-major order with whatever argument components are available
+        int row = 0;
+        int col = 0;
+
+        for (int arg = 0; arg < (int)sources.size(); ++arg) {
+            Id argComp = sources[arg];
+            for (int comp = 0; comp < getNumComponents(sources[arg]); ++comp) {
+                if (getNumComponents(sources[arg]) > 1) {
+                    argComp = createCompositeExtract(sources[arg], componentTypeId, comp);
+                    setPrecision(argComp, precision);
+                }
+                ids[col][row++] = argComp;
+                if (row == numRows) {
+                    row = 0;
+                    col++;
+                }
+            }
+        }
+    }
+
+
+    // Step 2:  Construct a matrix from that array.
+    // First make the column vectors, then make the matrix.
+
+    // make the column vectors
+    Id columnTypeId = getContainedTypeId(resultTypeId);
+    std::vector<Id> matrixColumns;
+    for (int col = 0; col < numCols; ++col) {
+        std::vector<Id> vectorComponents;
+        for (int row = 0; row < numRows; ++row)
+            vectorComponents.push_back(ids[col][row]);
+        Id column = createCompositeConstruct(columnTypeId, vectorComponents);
+        setPrecision(column, precision);
+        matrixColumns.push_back(column);
+    }
+
+    // make the matrix
+    return setPrecision(createCompositeConstruct(resultTypeId, matrixColumns), precision);
+}
+
+// Comments in header
+Builder::If::If(Id cond, Builder& gb) :
+    builder(gb),
+    condition(cond),
+    elseBlock(0)
+{
+    function = &builder.getBuildPoint()->getParent();
+
+    // make the blocks, but only put the then-block into the function,
+    // the else-block and merge-block will be added later, in order, after
+    // earlier code is emitted
+    thenBlock = new Block(builder.getUniqueId(), *function);
+    mergeBlock = new Block(builder.getUniqueId(), *function);
+
+    // Save the current block, so that we can add in the flow control split when
+    // makeEndIf is called.
+    headerBlock = builder.getBuildPoint();
+
+    function->addBlock(thenBlock);
+    builder.setBuildPoint(thenBlock);
+}
+
+// Comments in header
+void Builder::If::makeBeginElse()
+{
+    // Close out the "then" by having it jump to the mergeBlock
+    builder.createBranch(mergeBlock);
+
+    // Make the first else block and add it to the function
+    elseBlock = new Block(builder.getUniqueId(), *function);
+    function->addBlock(elseBlock);
+
+    // Start building the else block
+    builder.setBuildPoint(elseBlock);
+}
+
+// Comments in header
+void Builder::If::makeEndIf()
+{
+    // jump to the merge block
+    builder.createBranch(mergeBlock);
+
+    // Go back to the headerBlock and make the flow control split
+    builder.setBuildPoint(headerBlock);
+    builder.createSelectionMerge(mergeBlock, SelectionControlMaskNone);
+    if (elseBlock)
+        builder.createConditionalBranch(condition, thenBlock, elseBlock);
+    else
+        builder.createConditionalBranch(condition, thenBlock, mergeBlock);
+
+    // add the merge block to the function
+    function->addBlock(mergeBlock);
+    builder.setBuildPoint(mergeBlock);
+}
+
+// Comments in header
+void Builder::makeSwitch(Id selector, int numSegments, std::vector<int>& caseValues, std::vector<int>& valueIndexToSegment, int defaultSegment,
+                         std::vector<Block*>& segmentBlocks)
+{
+    Function& function = buildPoint->getParent();
+
+    // make all the blocks
+    for (int s = 0; s < numSegments; ++s)
+        segmentBlocks.push_back(new Block(getUniqueId(), function));
+
+    Block* mergeBlock = new Block(getUniqueId(), function);
+
+    // make and insert the switch's selection-merge instruction
+    createSelectionMerge(mergeBlock, SelectionControlMaskNone);
+
+    // make the switch instruction
+    Instruction* switchInst = new Instruction(NoResult, NoType, OpSwitch);
+    switchInst->addIdOperand(selector);
+    auto defaultOrMerge = (defaultSegment >= 0) ? segmentBlocks[defaultSegment] : mergeBlock;
+    switchInst->addIdOperand(defaultOrMerge->getId());
+    defaultOrMerge->addPredecessor(buildPoint);
+    for (int i = 0; i < (int)caseValues.size(); ++i) {
+        switchInst->addImmediateOperand(caseValues[i]);
+        switchInst->addIdOperand(segmentBlocks[valueIndexToSegment[i]]->getId());
+        segmentBlocks[valueIndexToSegment[i]]->addPredecessor(buildPoint);
+    }
+    buildPoint->addInstruction(std::unique_ptr<Instruction>(switchInst));
+
+    // push the merge block
+    switchMerges.push(mergeBlock);
+}
+
+// Comments in header
+void Builder::addSwitchBreak()
+{
+    // branch to the top of the merge block stack
+    createBranch(switchMerges.top());
+    createAndSetNoPredecessorBlock("post-switch-break");
+}
+
+// Comments in header
+void Builder::nextSwitchSegment(std::vector<Block*>& segmentBlock, int nextSegment)
+{
+    int lastSegment = nextSegment - 1;
+    if (lastSegment >= 0) {
+        // Close out previous segment by jumping, if necessary, to next segment
+        if (! buildPoint->isTerminated())
+            createBranch(segmentBlock[nextSegment]);
+    }
+    Block* block = segmentBlock[nextSegment];
+    block->getParent().addBlock(block);
+    setBuildPoint(block);
+}
+
+// Comments in header
+void Builder::endSwitch(std::vector<Block*>& /*segmentBlock*/)
+{
+    // Close out previous segment by jumping, if necessary, to next segment
+    if (! buildPoint->isTerminated())
+        addSwitchBreak();
+
+    switchMerges.top()->getParent().addBlock(switchMerges.top());
+    setBuildPoint(switchMerges.top());
+
+    switchMerges.pop();
+}
+
+Block& Builder::makeNewBlock()
+{
+    Function& function = buildPoint->getParent();
+    auto block = new Block(getUniqueId(), function);
+    function.addBlock(block);
+    return *block;
+}
+
+Builder::LoopBlocks& Builder::makeNewLoop()
+{
+    // This verbosity is needed to simultaneously get the same behavior
+    // everywhere (id's in the same order), have a syntax that works
+    // across lots of versions of C++, have no warnings from pedantic
+    // compilation modes, and leave the rest of the code alone.
+    Block& head            = makeNewBlock();
+    Block& body            = makeNewBlock();
+    Block& merge           = makeNewBlock();
+    Block& continue_target = makeNewBlock();
+    LoopBlocks blocks(head, body, merge, continue_target);
+    loops.push(blocks);
+    return loops.top();
+}
+
+void Builder::createLoopContinue()
+{
+    createBranch(&loops.top().continue_target);
+    // Set up a block for dead code.
+    createAndSetNoPredecessorBlock("post-loop-continue");
+}
+
+void Builder::createLoopExit()
+{
+    createBranch(&loops.top().merge);
+    // Set up a block for dead code.
+    createAndSetNoPredecessorBlock("post-loop-break");
+}
+
+void Builder::closeLoop()
+{
+    loops.pop();
+}
+
+void Builder::clearAccessChain()
+{
+    accessChain.base = NoResult;
+    accessChain.indexChain.clear();
+    accessChain.instr = NoResult;
+    accessChain.swizzle.clear();
+    accessChain.component = NoResult;
+    accessChain.preSwizzleBaseType = NoType;
+    accessChain.isRValue = false;
+}
+
+// Comments in header
+void Builder::accessChainPushSwizzle(std::vector<unsigned>& swizzle, Id preSwizzleBaseType)
+{
+    // swizzles can be stacked in GLSL, but simplified to a single
+    // one here; the base type doesn't change
+    if (accessChain.preSwizzleBaseType == NoType)
+        accessChain.preSwizzleBaseType = preSwizzleBaseType;
+
+    // if needed, propagate the swizzle for the current access chain
+    if (accessChain.swizzle.size()) {
+        std::vector<unsigned> oldSwizzle = accessChain.swizzle;
+        accessChain.swizzle.resize(0);
+        for (unsigned int i = 0; i < swizzle.size(); ++i) {
+            accessChain.swizzle.push_back(oldSwizzle[swizzle[i]]);
+        }
+    } else
+        accessChain.swizzle = swizzle;
+
+    // determine if we need to track this swizzle anymore
+    simplifyAccessChainSwizzle();
+}
+
+// Comments in header
+void Builder::accessChainStore(Id rvalue)
+{
+    assert(accessChain.isRValue == false);
+
+    transferAccessChainSwizzle(true);
+    Id base = collapseAccessChain();
+
+    // If swizzle still exists, it is out-of-order or not full, we must load the target vector,
+    // extract and insert elements to perform writeMask and/or swizzle.
+    Id source = NoResult;
+    if (accessChain.swizzle.size()) {
+        Id tempBaseId = createLoad(base);
+        source = createLvalueSwizzle(getTypeId(tempBaseId), tempBaseId, rvalue, accessChain.swizzle);
+    }
+
+    // dynamic component selection
+    if (accessChain.component != NoResult) {
+        Id tempBaseId = (source == NoResult) ? createLoad(base) : source;
+        source = createVectorInsertDynamic(tempBaseId, getTypeId(tempBaseId), rvalue, accessChain.component);
+    }
+
+    if (source == NoResult)
+        source = rvalue;
+
+    createStore(source, base);
+}
+
+// Comments in header
+Id Builder::accessChainLoad(Decoration precision, Id resultType)
+{
+    Id id;
+
+    if (accessChain.isRValue) {
+        // transfer access chain, but keep it static, so we can stay in registers
+        transferAccessChainSwizzle(false);
+        if (accessChain.indexChain.size() > 0) {
+            Id swizzleBase = accessChain.preSwizzleBaseType != NoType ? accessChain.preSwizzleBaseType : resultType;
+
+            // if all the accesses are constants, we can use OpCompositeExtract
+            std::vector<unsigned> indexes;
+            bool constant = true;
+            for (int i = 0; i < (int)accessChain.indexChain.size(); ++i) {
+                if (isConstantScalar(accessChain.indexChain[i]))
+                    indexes.push_back(getConstantScalar(accessChain.indexChain[i]));
+                else {
+                    constant = false;
+                    break;
+                }
+            }
+
+            if (constant)
+                id = createCompositeExtract(accessChain.base, swizzleBase, indexes);
+            else {
+                // make a new function variable for this r-value
+                Id lValue = createVariable(StorageClassFunction, getTypeId(accessChain.base), "indexable");
+
+                // store into it
+                createStore(accessChain.base, lValue);
+
+                // move base to the new variable
+                accessChain.base = lValue;
+                accessChain.isRValue = false;
+
+                // load through the access chain
+                id = createLoad(collapseAccessChain());
+            }
+            setPrecision(id, precision);
+        } else
+            id = accessChain.base;  // no precision, it was set when this was defined
+    } else {
+        transferAccessChainSwizzle(true);
+        // load through the access chain
+        id = createLoad(collapseAccessChain());
+        setPrecision(id, precision);
+    }
+
+    // Done, unless there are swizzles to do
+    if (accessChain.swizzle.size() == 0 && accessChain.component == NoResult)
+        return id;
+
+    // Do remaining swizzling
+    // First, static swizzling
+    if (accessChain.swizzle.size()) {
+        // static swizzle
+        Id swizzledType = getScalarTypeId(getTypeId(id));
+        if (accessChain.swizzle.size() > 1)
+            swizzledType = makeVectorType(swizzledType, (int)accessChain.swizzle.size());
+        id = createRvalueSwizzle(precision, swizzledType, id, accessChain.swizzle);
+    }
+
+    // dynamic single-component selection
+    if (accessChain.component != NoResult)
+        id = setPrecision(createVectorExtractDynamic(id, resultType, accessChain.component), precision);
+
+    return id;
+}
+
+Id Builder::accessChainGetLValue()
+{
+    assert(accessChain.isRValue == false);
+
+    transferAccessChainSwizzle(true);
+    Id lvalue = collapseAccessChain();
+
+    // If swizzle exists, it is out-of-order or not full, we must load the target vector,
+    // extract and insert elements to perform writeMask and/or swizzle.  This does not
+    // go with getting a direct l-value pointer.
+    assert(accessChain.swizzle.size() == 0);
+    assert(accessChain.component == NoResult);
+
+    return lvalue;
+}
+
+// comment in header
+Id Builder::accessChainGetInferredType()
+{
+    // anything to operate on?
+    if (accessChain.base == NoResult)
+        return NoType;
+    Id type = getTypeId(accessChain.base);
+
+    // do initial dereference
+    if (! accessChain.isRValue)
+        type = getContainedTypeId(type);
+
+    // dereference each index
+    for (auto it = accessChain.indexChain.cbegin(); it != accessChain.indexChain.cend(); ++it) {
+        if (isStructType(type))
+            type = getContainedTypeId(type, getConstantScalar(*it));
+        else
+            type = getContainedTypeId(type);
+    }
+
+    // dereference swizzle
+    if (accessChain.swizzle.size() == 1)
+        type = getContainedTypeId(type);
+    else if (accessChain.swizzle.size() > 1)
+        type = makeVectorType(getContainedTypeId(type), (int)accessChain.swizzle.size());
+
+    // dereference component selection
+    if (accessChain.component)
+        type = getContainedTypeId(type);
+
+    return type;
+}
+
+// comment in header
+void Builder::eliminateDeadDecorations() {
+    std::unordered_set<const Block*> reachable_blocks;
+    std::unordered_set<Id> unreachable_definitions;
+    // Collect IDs defined in unreachable blocks. For each function, label the
+    // reachable blocks first. Then for each unreachable block, collect the
+    // result IDs of the instructions in it.
+    for (std::vector<Function*>::const_iterator fi = module.getFunctions().cbegin();
+        fi != module.getFunctions().cend(); fi++) {
+        Function* f = *fi;
+        Block* entry = f->getEntryBlock();
+        inReadableOrder(entry, [&reachable_blocks](const Block* b) {
+            reachable_blocks.insert(b);
+        });
+        for (std::vector<Block*>::const_iterator bi = f->getBlocks().cbegin();
+            bi != f->getBlocks().cend(); bi++) {
+            Block* b = *bi;
+            if (!reachable_blocks.count(b)) {
+                for (std::vector<std::unique_ptr<Instruction> >::const_iterator
+                         ii = b->getInstructions().cbegin();
+                    ii != b->getInstructions().cend(); ii++) {
+                    Instruction* i = ii->get();
+                    unreachable_definitions.insert(i->getResultId());
+                }
+            }
+        }
+    }
+    decorations.erase(std::remove_if(decorations.begin(), decorations.end(),
+        [&unreachable_definitions](std::unique_ptr<Instruction>& I) -> bool {
+            Instruction* inst = I.get();
+            Id decoration_id = inst->getIdOperand(0);
+            return unreachable_definitions.count(decoration_id) != 0;
+        }),
+        decorations.end());
+}
+
+void Builder::dump(std::vector<unsigned int>& out) const
+{
+    // Header, before first instructions:
+    out.push_back(MagicNumber);
+    out.push_back(Version);
+    out.push_back(builderNumber);
+    out.push_back(uniqueId + 1);
+    out.push_back(0);
+
+    // Capabilities
+    for (auto it = capabilities.cbegin(); it != capabilities.cend(); ++it) {
+        Instruction capInst(0, 0, OpCapability);
+        capInst.addImmediateOperand(*it);
+        capInst.dump(out);
+    }
+
+    for (auto it = extensions.cbegin(); it != extensions.cend(); ++it) {
+        Instruction extInst(0, 0, OpExtension);
+        extInst.addStringOperand(*it);
+        extInst.dump(out);
+    }
+
+    dumpInstructions(out, imports);
+    Instruction memInst(0, 0, OpMemoryModel);
+    memInst.addImmediateOperand(addressModel);
+    memInst.addImmediateOperand(memoryModel);
+    memInst.dump(out);
+
+    // Instructions saved up while building:
+    dumpInstructions(out, entryPoints);
+    dumpInstructions(out, executionModes);
+
+    // Debug instructions
+    if (source != SourceLanguageUnknown) {
+        Instruction sourceInst(0, 0, OpSource);
+        sourceInst.addImmediateOperand(source);
+        sourceInst.addImmediateOperand(sourceVersion);
+        sourceInst.dump(out);
+    }
+    for (int e = 0; e < (int)sourceExtensions.size(); ++e) {
+        Instruction sourceExtInst(0, 0, OpSourceExtension);
+        sourceExtInst.addStringOperand(sourceExtensions[e]);
+        sourceExtInst.dump(out);
+    }
+    dumpInstructions(out, names);
+    dumpInstructions(out, lines);
+
+    // Annotation instructions
+    dumpInstructions(out, decorations);
+
+    dumpInstructions(out, constantsTypesGlobals);
+    dumpInstructions(out, externals);
+
+    // The functions
+    module.dump(out);
+}
+
+//
+// Protected methods.
+//
+
+// Turn the described access chain in 'accessChain' into an instruction
+// computing its address.  This *cannot* include complex swizzles, which must
+// be handled after this is called, but it does include swizzles that select
+// an individual element, as a single address of a scalar type can be
+// computed by an OpAccessChain instruction.
+Id Builder::collapseAccessChain()
+{
+    assert(accessChain.isRValue == false);
+
+    if (accessChain.indexChain.size() > 0) {
+        if (accessChain.instr == 0) {
+            StorageClass storageClass = (StorageClass)module.getStorageClass(getTypeId(accessChain.base));
+            accessChain.instr = createAccessChain(storageClass, accessChain.base, accessChain.indexChain);
+        }
+
+        return accessChain.instr;
+    } else
+        return accessChain.base;
+
+    // note that non-trivial swizzling is left pending...
+}
+
+// clear out swizzle if it is redundant, that is reselecting the same components
+// that would be present without the swizzle.
+void Builder::simplifyAccessChainSwizzle()
+{
+    // If the swizzle has fewer components than the vector, it is subsetting, and must stay
+    // to preserve that fact.
+    if (getNumTypeComponents(accessChain.preSwizzleBaseType) > (int)accessChain.swizzle.size())
+        return;
+
+    // if components are out of order, it is a swizzle
+    for (unsigned int i = 0; i < accessChain.swizzle.size(); ++i) {
+        if (i != accessChain.swizzle[i])
+            return;
+    }
+
+    // otherwise, there is no need to track this swizzle
+    accessChain.swizzle.clear();
+    if (accessChain.component == NoResult)
+        accessChain.preSwizzleBaseType = NoType;
+}
+
+// To the extent any swizzling can become part of the chain
+// of accesses instead of a post operation, make it so.
+// If 'dynamic' is true, include transferring a non-static component index,
+// otherwise, only transfer static indexes.
+//
+// Also, Boolean vectors are likely to be special.  While
+// for external storage, they should only be integer types,
+// function-local bool vectors could use sub-word indexing,
+// so keep that as a separate Insert/Extract on a loaded vector.
+void Builder::transferAccessChainSwizzle(bool dynamic)
+{
+    // too complex?
+    if (accessChain.swizzle.size() > 1)
+        return;
+
+    // non existent?
+    if (accessChain.swizzle.size() == 0 && accessChain.component == NoResult)
+        return;
+
+    // single component...
+
+    // skip doing it for Boolean vectors
+    if (isBoolType(getContainedTypeId(accessChain.preSwizzleBaseType)))
+        return;
+
+    if (accessChain.swizzle.size() == 1) {
+        // handle static component
+        accessChain.indexChain.push_back(makeUintConstant(accessChain.swizzle.front()));
+        accessChain.swizzle.clear();
+        // note, the only valid remaining dynamic access would be to this one
+        // component, so don't bother even looking at accessChain.component
+        accessChain.preSwizzleBaseType = NoType;
+        accessChain.component = NoResult;
+    } else if (dynamic && accessChain.component != NoResult) {
+        // handle dynamic component
+        accessChain.indexChain.push_back(accessChain.component);
+        accessChain.preSwizzleBaseType = NoType;
+        accessChain.component = NoResult;
+    }
+}
+
+// Utility method for creating a new block and setting the insert point to
+// be in it. This is useful for flow-control operations that need a "dummy"
+// block proceeding them (e.g. instructions after a discard, etc).
+void Builder::createAndSetNoPredecessorBlock(const char* /*name*/)
+{
+    Block* block = new Block(getUniqueId(), buildPoint->getParent());
+    block->setUnreachable();
+    buildPoint->getParent().addBlock(block);
+    setBuildPoint(block);
+
+    //if (name)
+    //    addName(block->getId(), name);
+}
+
+// Comments in header
+void Builder::createBranch(Block* block)
+{
+    Instruction* branch = new Instruction(OpBranch);
+    branch->addIdOperand(block->getId());
+    buildPoint->addInstruction(std::unique_ptr<Instruction>(branch));
+    block->addPredecessor(buildPoint);
+}
+
+void Builder::createSelectionMerge(Block* mergeBlock, unsigned int control)
+{
+    Instruction* merge = new Instruction(OpSelectionMerge);
+    merge->addIdOperand(mergeBlock->getId());
+    merge->addImmediateOperand(control);
+    buildPoint->addInstruction(std::unique_ptr<Instruction>(merge));
+}
+
+void Builder::createLoopMerge(Block* mergeBlock, Block* continueBlock, unsigned int control)
+{
+    Instruction* merge = new Instruction(OpLoopMerge);
+    merge->addIdOperand(mergeBlock->getId());
+    merge->addIdOperand(continueBlock->getId());
+    merge->addImmediateOperand(control);
+    buildPoint->addInstruction(std::unique_ptr<Instruction>(merge));
+}
+
+void Builder::createConditionalBranch(Id condition, Block* thenBlock, Block* elseBlock)
+{
+    Instruction* branch = new Instruction(OpBranchConditional);
+    branch->addIdOperand(condition);
+    branch->addIdOperand(thenBlock->getId());
+    branch->addIdOperand(elseBlock->getId());
+    buildPoint->addInstruction(std::unique_ptr<Instruction>(branch));
+    thenBlock->addPredecessor(buildPoint);
+    elseBlock->addPredecessor(buildPoint);
+}
+
+void Builder::dumpInstructions(std::vector<unsigned int>& out, const std::vector<std::unique_ptr<Instruction> >& instructions) const
+{
+    for (int i = 0; i < (int)instructions.size(); ++i) {
+        instructions[i]->dump(out);
+    }
+}
+
+}; // end spv namespace

+ 598 - 0
3rdparty/glslang/SPIRV/SpvBuilder.h

@@ -0,0 +1,598 @@
+//
+//Copyright (C) 2014-2015 LunarG, Inc.
+//Copyright (C) 2015-2016 Google, Inc.
+//
+//All rights reserved.
+//
+//Redistribution and use in source and binary forms, with or without
+//modification, are permitted provided that the following conditions
+//are met:
+//
+//    Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+//
+//    Redistributions in binary form must reproduce the above
+//    copyright notice, this list of conditions and the following
+//    disclaimer in the documentation and/or other materials provided
+//    with the distribution.
+//
+//    Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+//    contributors may be used to endorse or promote products derived
+//    from this software without specific prior written permission.
+//
+//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+//"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+//LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+//FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+//COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+//INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+//BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+//CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+//LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+//ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+//POSSIBILITY OF SUCH DAMAGE.
+
+//
+// "Builder" is an interface to fully build SPIR-V IR.   Allocate one of
+// these to build (a thread safe) internal SPIR-V representation (IR),
+// and then dump it as a binary stream according to the SPIR-V specification.
+//
+// A Builder has a 1:1 relationship with a SPIR-V module.
+//
+
+#pragma once
+#ifndef SpvBuilder_H
+#define SpvBuilder_H
+
+#include "Logger.h"
+#include "spirv.hpp"
+#include "spvIR.h"
+
+#include <algorithm>
+#include <map>
+#include <memory>
+#include <set>
+#include <sstream>
+#include <stack>
+
+namespace spv {
+
+class Builder {
+public:
+    Builder(unsigned int userNumber, SpvBuildLogger* logger);
+    virtual ~Builder();
+
+    static const int maxMatrixSize = 4;
+
+    void setSource(spv::SourceLanguage lang, int version)
+    {
+        source = lang;
+        sourceVersion = version;
+    }
+    void addSourceExtension(const char* ext) { sourceExtensions.push_back(ext); }
+    void addExtension(const char* ext) { extensions.insert(ext); }
+    Id import(const char*);
+    void setMemoryModel(spv::AddressingModel addr, spv::MemoryModel mem)
+    {
+        addressModel = addr;
+        memoryModel = mem;
+    }
+
+    void addCapability(spv::Capability cap) { capabilities.insert(cap); }
+
+    // To get a new <id> for anything needing a new one.
+    Id getUniqueId() { return ++uniqueId; }
+
+    // To get a set of new <id>s, e.g., for a set of function parameters
+    Id getUniqueIds(int numIds)
+    {
+        Id id = uniqueId + 1;
+        uniqueId += numIds;
+        return id;
+    }
+
+    // For creating new types (will return old type if the requested one was already made).
+    Id makeVoidType();
+    Id makeBoolType();
+    Id makePointer(StorageClass, Id type);
+    Id makeIntegerType(int width, bool hasSign);   // generic
+    Id makeIntType(int width) { return makeIntegerType(width, true); }
+    Id makeUintType(int width) { return makeIntegerType(width, false); }
+    Id makeFloatType(int width);
+    Id makeStructType(const std::vector<Id>& members, const char*);
+    Id makeStructResultType(Id type0, Id type1);
+    Id makeVectorType(Id component, int size);
+    Id makeMatrixType(Id component, int cols, int rows);
+    Id makeArrayType(Id element, Id sizeId, int stride);  // 0 stride means no stride decoration
+    Id makeRuntimeArray(Id element);
+    Id makeFunctionType(Id returnType, const std::vector<Id>& paramTypes);
+    Id makeImageType(Id sampledType, Dim, bool depth, bool arrayed, bool ms, unsigned sampled, ImageFormat format);
+    Id makeSamplerType();
+    Id makeSampledImageType(Id imageType);
+
+    // For querying about types.
+    Id getTypeId(Id resultId) const { return module.getTypeId(resultId); }
+    Id getDerefTypeId(Id resultId) const;
+    Op getOpCode(Id id) const { return module.getInstruction(id)->getOpCode(); }
+    Op getTypeClass(Id typeId) const { return getOpCode(typeId); }
+    Op getMostBasicTypeClass(Id typeId) const;
+    int getNumComponents(Id resultId) const { return getNumTypeComponents(getTypeId(resultId)); }
+    int getNumTypeConstituents(Id typeId) const;
+    int getNumTypeComponents(Id typeId) const { return getNumTypeConstituents(typeId); }
+    Id getScalarTypeId(Id typeId) const;
+    Id getContainedTypeId(Id typeId) const;
+    Id getContainedTypeId(Id typeId, int) const;
+    StorageClass getTypeStorageClass(Id typeId) const { return module.getStorageClass(typeId); }
+    ImageFormat getImageTypeFormat(Id typeId) const { return (ImageFormat)module.getInstruction(typeId)->getImmediateOperand(6); }
+
+    bool isPointer(Id resultId)      const { return isPointerType(getTypeId(resultId)); }
+    bool isScalar(Id resultId)       const { return isScalarType(getTypeId(resultId)); }
+    bool isVector(Id resultId)       const { return isVectorType(getTypeId(resultId)); }
+    bool isMatrix(Id resultId)       const { return isMatrixType(getTypeId(resultId)); }
+    bool isAggregate(Id resultId)    const { return isAggregateType(getTypeId(resultId)); }
+    bool isSampledImage(Id resultId) const { return isSampledImageType(getTypeId(resultId)); }
+
+    bool isBoolType(Id typeId)         const { return groupedTypes[OpTypeBool].size() > 0 && typeId == groupedTypes[OpTypeBool].back()->getResultId(); }
+    bool isPointerType(Id typeId)      const { return getTypeClass(typeId) == OpTypePointer; }
+    bool isScalarType(Id typeId)       const { return getTypeClass(typeId) == OpTypeFloat  || getTypeClass(typeId) == OpTypeInt || getTypeClass(typeId) == OpTypeBool; }
+    bool isVectorType(Id typeId)       const { return getTypeClass(typeId) == OpTypeVector; }
+    bool isMatrixType(Id typeId)       const { return getTypeClass(typeId) == OpTypeMatrix; }
+    bool isStructType(Id typeId)       const { return getTypeClass(typeId) == OpTypeStruct; }
+    bool isArrayType(Id typeId)        const { return getTypeClass(typeId) == OpTypeArray; }
+    bool isAggregateType(Id typeId)    const { return isArrayType(typeId) || isStructType(typeId); }
+    bool isImageType(Id typeId)        const { return getTypeClass(typeId) == OpTypeImage; }
+    bool isSamplerType(Id typeId)      const { return getTypeClass(typeId) == OpTypeSampler; }
+    bool isSampledImageType(Id typeId) const { return getTypeClass(typeId) == OpTypeSampledImage; }
+
+    bool isConstantOpCode(Op opcode) const;
+    bool isSpecConstantOpCode(Op opcode) const;
+    bool isConstant(Id resultId) const { return isConstantOpCode(getOpCode(resultId)); }
+    bool isConstantScalar(Id resultId) const { return getOpCode(resultId) == OpConstant; }
+    bool isSpecConstant(Id resultId) const { return isSpecConstantOpCode(getOpCode(resultId)); }
+    unsigned int getConstantScalar(Id resultId) const { return module.getInstruction(resultId)->getImmediateOperand(0); }
+    StorageClass getStorageClass(Id resultId) const { return getTypeStorageClass(getTypeId(resultId)); }
+
+    int getTypeNumColumns(Id typeId) const
+    {
+        assert(isMatrixType(typeId));
+        return getNumTypeConstituents(typeId);
+    }
+    int getNumColumns(Id resultId) const { return getTypeNumColumns(getTypeId(resultId)); }
+    int getTypeNumRows(Id typeId) const
+    {
+        assert(isMatrixType(typeId));
+        return getNumTypeComponents(getContainedTypeId(typeId));
+    }
+    int getNumRows(Id resultId) const { return getTypeNumRows(getTypeId(resultId)); }
+
+    Dim getTypeDimensionality(Id typeId) const
+    {
+        assert(isImageType(typeId));
+        return (Dim)module.getInstruction(typeId)->getImmediateOperand(1);
+    }
+    Id getImageType(Id resultId) const
+    {
+        Id typeId = getTypeId(resultId);
+        assert(isImageType(typeId) || isSampledImageType(typeId));
+        return isSampledImageType(typeId) ? module.getInstruction(typeId)->getIdOperand(0) : typeId;
+    }
+    bool isArrayedImageType(Id typeId) const
+    {
+        assert(isImageType(typeId));
+        return module.getInstruction(typeId)->getImmediateOperand(3) != 0;
+    }
+
+    // For making new constants (will return old constant if the requested one was already made).
+    Id makeBoolConstant(bool b, bool specConstant = false);
+    Id makeIntConstant(int i, bool specConstant = false)         { return makeIntConstant(makeIntType(32),  (unsigned)i, specConstant); }
+    Id makeUintConstant(unsigned u, bool specConstant = false)   { return makeIntConstant(makeUintType(32),           u, specConstant); }
+    Id makeInt64Constant(long long i, bool specConstant = false)            { return makeInt64Constant(makeIntType(64),  (unsigned long long)i, specConstant); }
+    Id makeUint64Constant(unsigned long long u, bool specConstant = false)  { return makeInt64Constant(makeUintType(64),                     u, specConstant); }
+    Id makeFloatConstant(float f, bool specConstant = false);
+    Id makeDoubleConstant(double d, bool specConstant = false);
+#ifdef AMD_EXTENSIONS
+    Id makeFloat16Constant(float f16, bool specConstant = false);
+#endif
+
+    // Turn the array of constants into a proper spv constant of the requested type.
+    Id makeCompositeConstant(Id type, std::vector<Id>& comps, bool specConst = false);
+
+    // Methods for adding information outside the CFG.
+    Instruction* addEntryPoint(ExecutionModel, Function*, const char* name);
+    void addExecutionMode(Function*, ExecutionMode mode, int value1 = -1, int value2 = -1, int value3 = -1);
+    void addName(Id, const char* name);
+    void addMemberName(Id, int member, const char* name);
+    void addLine(Id target, Id fileName, int line, int column);
+    void addDecoration(Id, Decoration, int num = -1);
+    void addMemberDecoration(Id, unsigned int member, Decoration, int num = -1);
+
+    // At the end of what block do the next create*() instructions go?
+    void setBuildPoint(Block* bp) { buildPoint = bp; }
+    Block* getBuildPoint() const { return buildPoint; }
+
+    // Make the entry-point function. The returned pointer is only valid
+    // for the lifetime of this builder.
+    Function* makeEntryPoint(const char*);
+
+    // Make a shader-style function, and create its entry block if entry is non-zero.
+    // Return the function, pass back the entry.
+    // The returned pointer is only valid for the lifetime of this builder.
+    Function* makeFunctionEntry(Decoration precision, Id returnType, const char* name, const std::vector<Id>& paramTypes,
+                                const std::vector<Decoration>& precisions, Block **entry = 0);
+
+    // Create a return. An 'implicit' return is one not appearing in the source
+    // code.  In the case of an implicit return, no post-return block is inserted.
+    void makeReturn(bool implicit, Id retVal = 0);
+
+    // Generate all the code needed to finish up a function.
+    void leaveFunction();
+
+    // Create a discard.
+    void makeDiscard();
+
+    // Create a global or function local or IO variable.
+    Id createVariable(StorageClass, Id type, const char* name = 0);
+
+    // Create an intermediate with an undefined value.
+    Id createUndefined(Id type);
+
+    // Store into an Id and return the l-value
+    void createStore(Id rValue, Id lValue);
+
+    // Load from an Id and return it
+    Id createLoad(Id lValue);
+
+    // Create an OpAccessChain instruction
+    Id createAccessChain(StorageClass, Id base, std::vector<Id>& offsets);
+
+    // Create an OpArrayLength instruction
+    Id createArrayLength(Id base, unsigned int member);
+
+    // Create an OpCompositeExtract instruction
+    Id createCompositeExtract(Id composite, Id typeId, unsigned index);
+    Id createCompositeExtract(Id composite, Id typeId, std::vector<unsigned>& indexes);
+    Id createCompositeInsert(Id object, Id composite, Id typeId, unsigned index);
+    Id createCompositeInsert(Id object, Id composite, Id typeId, std::vector<unsigned>& indexes);
+
+    Id createVectorExtractDynamic(Id vector, Id typeId, Id componentIndex);
+    Id createVectorInsertDynamic(Id vector, Id typeId, Id component, Id componentIndex);
+
+    void createNoResultOp(Op);
+    void createNoResultOp(Op, Id operand);
+    void createNoResultOp(Op, const std::vector<Id>& operands);
+    void createControlBarrier(Scope execution, Scope memory, MemorySemanticsMask);
+    void createMemoryBarrier(unsigned executionScope, unsigned memorySemantics);
+    Id createUnaryOp(Op, Id typeId, Id operand);
+    Id createBinOp(Op, Id typeId, Id operand1, Id operand2);
+    Id createTriOp(Op, Id typeId, Id operand1, Id operand2, Id operand3);
+    Id createOp(Op, Id typeId, const std::vector<Id>& operands);
+    Id createFunctionCall(spv::Function*, std::vector<spv::Id>&);
+    Id createSpecConstantOp(Op, Id typeId, const std::vector<spv::Id>& operands, const std::vector<unsigned>& literals);
+
+    // Take an rvalue (source) and a set of channels to extract from it to
+    // make a new rvalue, which is returned.
+    Id createRvalueSwizzle(Decoration precision, Id typeId, Id source, std::vector<unsigned>& channels);
+
+    // Take a copy of an lvalue (target) and a source of components, and set the
+    // source components into the lvalue where the 'channels' say to put them.
+    // An updated version of the target is returned.
+    // (No true lvalue or stores are used.)
+    Id createLvalueSwizzle(Id typeId, Id target, Id source, std::vector<unsigned>& channels);
+
+    // If both the id and precision are valid, the id
+    // gets tagged with the requested precision.
+    // The passed in id is always the returned id, to simplify use patterns.
+    Id setPrecision(Id id, Decoration precision)
+    {
+        if (precision != NoPrecision && id != NoResult)
+            addDecoration(id, precision);
+
+        return id;
+    }
+
+    // Can smear a scalar to a vector for the following forms:
+    //   - promoteScalar(scalar, vector)  // smear scalar to width of vector
+    //   - promoteScalar(vector, scalar)  // smear scalar to width of vector
+    //   - promoteScalar(pointer, scalar) // smear scalar to width of what pointer points to
+    //   - promoteScalar(scalar, scalar)  // do nothing
+    // Other forms are not allowed.
+    //
+    // Generally, the type of 'scalar' does not need to be the same type as the components in 'vector'.
+    // The type of the created vector is a vector of components of the same type as the scalar.
+    //
+    // Note: One of the arguments will change, with the result coming back that way rather than 
+    // through the return value.
+    void promoteScalar(Decoration precision, Id& left, Id& right);
+
+    // Make a value by smearing the scalar to fill the type.
+    // vectorType should be the correct type for making a vector of scalarVal.
+    // (No conversions are done.)
+    Id smearScalar(Decoration precision, Id scalarVal, Id vectorType);
+
+    // Create a call to a built-in function.
+    Id createBuiltinCall(Id resultType, Id builtins, int entryPoint, std::vector<Id>& args);
+
+    // List of parameters used to create a texture operation
+    struct TextureParameters {
+        Id sampler;
+        Id coords;
+        Id bias;
+        Id lod;
+        Id Dref;
+        Id offset;
+        Id offsets;
+        Id gradX;
+        Id gradY;
+        Id sample;
+        Id component;
+        Id texelOut;
+        Id lodClamp;
+    };
+
+    // Select the correct texture operation based on all inputs, and emit the correct instruction
+    Id createTextureCall(Decoration precision, Id resultType, bool sparse, bool fetch, bool proj, bool gather, bool noImplicit, const TextureParameters&);
+
+    // Emit the OpTextureQuery* instruction that was passed in.
+    // Figure out the right return value and type, and return it.
+    Id createTextureQueryCall(Op, const TextureParameters&);
+
+    Id createSamplePositionCall(Decoration precision, Id, Id);
+
+    Id createBitFieldExtractCall(Decoration precision, Id, Id, Id, bool isSigned);
+    Id createBitFieldInsertCall(Decoration precision, Id, Id, Id, Id);
+
+    // Reduction comparison for composites:  For equal and not-equal resulting in a scalar.
+    Id createCompositeCompare(Decoration precision, Id, Id, bool /* true if for equal, false if for not-equal */);
+
+    // OpCompositeConstruct
+    Id createCompositeConstruct(Id typeId, std::vector<Id>& constituents);
+
+    // vector or scalar constructor
+    Id createConstructor(Decoration precision, const std::vector<Id>& sources, Id resultTypeId);
+
+    // matrix constructor
+    Id createMatrixConstructor(Decoration precision, const std::vector<Id>& sources, Id constructee);
+
+    // Helper to use for building nested control flow with if-then-else.
+    class If {
+    public:
+        If(Id condition, Builder& builder);
+        ~If() {}
+
+        void makeBeginElse();
+        void makeEndIf();
+
+    private:
+        If(const If&);
+        If& operator=(If&);
+
+        Builder& builder;
+        Id condition;
+        Function* function;
+        Block* headerBlock;
+        Block* thenBlock;
+        Block* elseBlock;
+        Block* mergeBlock;
+    };
+
+    // Make a switch statement.  A switch has 'numSegments' of pieces of code, not containing
+    // any case/default labels, all separated by one or more case/default labels.  Each possible
+    // case value v is a jump to the caseValues[v] segment.  The defaultSegment is also in this
+    // number space.  How to compute the value is given by 'condition', as in switch(condition).
+    //
+    // The SPIR-V Builder will maintain the stack of post-switch merge blocks for nested switches.
+    //
+    // Use a defaultSegment < 0 if there is no default segment (to branch to post switch).
+    //
+    // Returns the right set of basic blocks to start each code segment with, so that the caller's
+    // recursion stack can hold the memory for it.
+    //
+    void makeSwitch(Id condition, int numSegments, std::vector<int>& caseValues, std::vector<int>& valueToSegment, int defaultSegment,
+                    std::vector<Block*>& segmentBB);  // return argument
+
+    // Add a branch to the innermost switch's merge block.
+    void addSwitchBreak();
+
+    // Move to the next code segment, passing in the return argument in makeSwitch()
+    void nextSwitchSegment(std::vector<Block*>& segmentBB, int segment);
+
+    // Finish off the innermost switch.
+    void endSwitch(std::vector<Block*>& segmentBB);
+
+    struct LoopBlocks {
+        LoopBlocks(Block& head, Block& body, Block& merge, Block& continue_target) :
+            head(head), body(body), merge(merge), continue_target(continue_target) { }
+        Block &head, &body, &merge, &continue_target;
+    private:
+        LoopBlocks();
+        LoopBlocks& operator=(const LoopBlocks&);
+    };
+
+    // Start a new loop and prepare the builder to generate code for it.  Until
+    // closeLoop() is called for this loop, createLoopContinue() and
+    // createLoopExit() will target its corresponding blocks.
+    LoopBlocks& makeNewLoop();
+
+    // Create a new block in the function containing the build point.  Memory is
+    // owned by the function object.
+    Block& makeNewBlock();
+
+    // Add a branch to the continue_target of the current (innermost) loop.
+    void createLoopContinue();
+
+    // Add an exit (e.g. "break") from the innermost loop that we're currently
+    // in.
+    void createLoopExit();
+
+    // Close the innermost loop that you're in
+    void closeLoop();
+
+    //
+    // Access chain design for an R-Value vs. L-Value:
+    //
+    // There is a single access chain the builder is building at
+    // any particular time.  Such a chain can be used to either to a load or
+    // a store, when desired.
+    //
+    // Expressions can be r-values, l-values, or both, or only r-values:
+    //    a[b.c].d = ....  // l-value
+    //    ... = a[b.c].d;  // r-value, that also looks like an l-value
+    //    ++a[b.c].d;      // r-value and l-value
+    //    (x + y)[2];      // r-value only, can't possibly be l-value
+    //
+    // Computing an r-value means generating code.  Hence,
+    // r-values should only be computed when they are needed, not speculatively.
+    //
+    // Computing an l-value means saving away information for later use in the compiler,
+    // no code is generated until the l-value is later dereferenced.  It is okay
+    // to speculatively generate an l-value, just not okay to speculatively dereference it.
+    //
+    // The base of the access chain (the left-most variable or expression
+    // from which everything is based) can be set either as an l-value
+    // or as an r-value.  Most efficient would be to set an l-value if one
+    // is available.  If an expression was evaluated, the resulting r-value
+    // can be set as the chain base.
+    //
+    // The users of this single access chain can save and restore if they
+    // want to nest or manage multiple chains.
+    //
+
+    struct AccessChain {
+        Id base;                       // for l-values, pointer to the base object, for r-values, the base object
+        std::vector<Id> indexChain;
+        Id instr;                      // cache the instruction that generates this access chain
+        std::vector<unsigned> swizzle; // each std::vector element selects the next GLSL component number
+        Id component;                  // a dynamic component index, can coexist with a swizzle, done after the swizzle, NoResult if not present
+        Id preSwizzleBaseType;         // dereferenced type, before swizzle or component is applied; NoType unless a swizzle or component is present
+        bool isRValue;                 // true if 'base' is an r-value, otherwise, base is an l-value
+    };
+
+    //
+    // the SPIR-V builder maintains a single active chain that
+    // the following methods operate on
+    //
+
+    // for external save and restore
+    AccessChain getAccessChain() { return accessChain; }
+    void setAccessChain(AccessChain newChain) { accessChain = newChain; }
+
+    // clear accessChain
+    void clearAccessChain();
+
+    // set new base as an l-value base
+    void setAccessChainLValue(Id lValue)
+    {
+        assert(isPointer(lValue));
+        accessChain.base = lValue;
+    }
+
+    // set new base value as an r-value
+    void setAccessChainRValue(Id rValue)
+    {
+        accessChain.isRValue = true;
+        accessChain.base = rValue;
+    }
+
+    // push offset onto the end of the chain
+    void accessChainPush(Id offset)
+    {
+        accessChain.indexChain.push_back(offset);
+    }
+
+    // push new swizzle onto the end of any existing swizzle, merging into a single swizzle
+    void accessChainPushSwizzle(std::vector<unsigned>& swizzle, Id preSwizzleBaseType);
+
+    // push a variable component selection onto the access chain; supporting only one, so unsided
+    void accessChainPushComponent(Id component, Id preSwizzleBaseType)
+    {
+        accessChain.component = component;
+        if (accessChain.preSwizzleBaseType == NoType)
+            accessChain.preSwizzleBaseType = preSwizzleBaseType;
+    }
+
+    // use accessChain and swizzle to store value
+    void accessChainStore(Id rvalue);
+
+    // use accessChain and swizzle to load an r-value
+    Id accessChainLoad(Decoration precision, Id ResultType);
+
+    // get the direct pointer for an l-value
+    Id accessChainGetLValue();
+
+    // Get the inferred SPIR-V type of the result of the current access chain,
+    // based on the type of the base and the chain of dereferences.
+    Id accessChainGetInferredType();
+
+    // Remove OpDecorate instructions whose operands are defined in unreachable
+    // blocks.
+    void eliminateDeadDecorations();
+    void dump(std::vector<unsigned int>&) const;
+
+    void createBranch(Block* block);
+    void createConditionalBranch(Id condition, Block* thenBlock, Block* elseBlock);
+    void createLoopMerge(Block* mergeBlock, Block* continueBlock, unsigned int control);
+
+    // Sets to generate opcode for specialization constants.
+    void setToSpecConstCodeGenMode() { generatingOpCodeForSpecConst = true; }
+    // Sets to generate opcode for non-specialization constants (normal mode).
+    void setToNormalCodeGenMode() { generatingOpCodeForSpecConst = false; }
+    // Check if the builder is generating code for spec constants.
+    bool isInSpecConstCodeGenMode() { return generatingOpCodeForSpecConst; }
+
+ protected:
+    Id makeIntConstant(Id typeId, unsigned value, bool specConstant);
+    Id makeInt64Constant(Id typeId, unsigned long long value, bool specConstant);
+    Id findScalarConstant(Op typeClass, Op opcode, Id typeId, unsigned value) const;
+    Id findScalarConstant(Op typeClass, Op opcode, Id typeId, unsigned v1, unsigned v2) const;
+    Id findCompositeConstant(Op typeClass, std::vector<Id>& comps) const;
+    Id collapseAccessChain();
+    void transferAccessChainSwizzle(bool dynamic);
+    void simplifyAccessChainSwizzle();
+    void createAndSetNoPredecessorBlock(const char*);
+    void createSelectionMerge(Block* mergeBlock, unsigned int control);
+    void dumpInstructions(std::vector<unsigned int>&, const std::vector<std::unique_ptr<Instruction> >&) const;
+
+    SourceLanguage source;
+    int sourceVersion;
+    std::set<const char*> extensions;
+    std::vector<const char*> sourceExtensions;
+    AddressingModel addressModel;
+    MemoryModel memoryModel;
+    std::set<spv::Capability> capabilities;
+    int builderNumber;
+    Module module;
+    Block* buildPoint;
+    Id uniqueId;
+    Function* mainFunction;
+    bool generatingOpCodeForSpecConst;
+    AccessChain accessChain;
+
+    // special blocks of instructions for output
+    std::vector<std::unique_ptr<Instruction> > imports;
+    std::vector<std::unique_ptr<Instruction> > entryPoints;
+    std::vector<std::unique_ptr<Instruction> > executionModes;
+    std::vector<std::unique_ptr<Instruction> > names;
+    std::vector<std::unique_ptr<Instruction> > lines;
+    std::vector<std::unique_ptr<Instruction> > decorations;
+    std::vector<std::unique_ptr<Instruction> > constantsTypesGlobals;
+    std::vector<std::unique_ptr<Instruction> > externals;
+    std::vector<std::unique_ptr<Function> > functions;
+
+     // not output, internally used for quick & dirty canonical (unique) creation
+    std::vector<Instruction*> groupedConstants[OpConstant];  // all types appear before OpConstant
+    std::vector<Instruction*> groupedTypes[OpConstant];
+
+    // stack of switches
+    std::stack<Block*> switchMerges;
+
+    // Our loop stack.
+    std::stack<LoopBlocks> loops;
+
+    // The stream for outputing warnings and errors.
+    SpvBuildLogger* logger;
+};  // end Builder class
+
+};  // end spv namespace
+
+#endif // SpvBuilder_H

+ 81 - 0
3rdparty/glslang/SPIRV/bitutils.h

@@ -0,0 +1,81 @@
+// Copyright (c) 2015-2016 The Khronos Group Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef LIBSPIRV_UTIL_BITUTILS_H_
+#define LIBSPIRV_UTIL_BITUTILS_H_
+
+#include <cstdint>
+#include <cstring>
+
+namespace spvutils {
+
+// Performs a bitwise copy of source to the destination type Dest.
+template <typename Dest, typename Src>
+Dest BitwiseCast(Src source) {
+  Dest dest;
+  static_assert(sizeof(source) == sizeof(dest),
+                "BitwiseCast: Source and destination must have the same size");
+  std::memcpy(&dest, &source, sizeof(dest));
+  return dest;
+}
+
+// SetBits<T, First, Num> returns an integer of type <T> with bits set
+// for position <First> through <First + Num - 1>, counting from the least
+// significant bit. In particular when Num == 0, no positions are set to 1.
+// A static assert will be triggered if First + Num > sizeof(T) * 8, that is,
+// a bit that will not fit in the underlying type is set.
+template <typename T, size_t First = 0, size_t Num = 0>
+struct SetBits {
+  static_assert(First < sizeof(T) * 8,
+                "Tried to set a bit that is shifted too far.");
+  const static T get = (T(1) << First) | SetBits<T, First + 1, Num - 1>::get;
+};
+
+template <typename T, size_t Last>
+struct SetBits<T, Last, 0> {
+  const static T get = T(0);
+};
+
+// This is all compile-time so we can put our tests right here.
+static_assert(SetBits<uint32_t, 0, 0>::get == uint32_t(0x00000000),
+              "SetBits failed");
+static_assert(SetBits<uint32_t, 0, 1>::get == uint32_t(0x00000001),
+              "SetBits failed");
+static_assert(SetBits<uint32_t, 31, 1>::get == uint32_t(0x80000000),
+              "SetBits failed");
+static_assert(SetBits<uint32_t, 1, 2>::get == uint32_t(0x00000006),
+              "SetBits failed");
+static_assert(SetBits<uint32_t, 30, 2>::get == uint32_t(0xc0000000),
+              "SetBits failed");
+static_assert(SetBits<uint32_t, 0, 31>::get == uint32_t(0x7FFFFFFF),
+              "SetBits failed");
+static_assert(SetBits<uint32_t, 0, 32>::get == uint32_t(0xFFFFFFFF),
+              "SetBits failed");
+static_assert(SetBits<uint32_t, 16, 16>::get == uint32_t(0xFFFF0000),
+              "SetBits failed");
+
+static_assert(SetBits<uint64_t, 0, 1>::get == uint64_t(0x0000000000000001LL),
+              "SetBits failed");
+static_assert(SetBits<uint64_t, 63, 1>::get == uint64_t(0x8000000000000000LL),
+              "SetBits failed");
+static_assert(SetBits<uint64_t, 62, 2>::get == uint64_t(0xc000000000000000LL),
+              "SetBits failed");
+static_assert(SetBits<uint64_t, 31, 1>::get == uint64_t(0x0000000080000000LL),
+              "SetBits failed");
+static_assert(SetBits<uint64_t, 16, 16>::get == uint64_t(0x00000000FFFF0000LL),
+              "SetBits failed");
+
+}  // namespace spvutils
+
+#endif  // LIBSPIRV_UTIL_BITUTILS_H_

+ 643 - 0
3rdparty/glslang/SPIRV/disassemble.cpp

@@ -0,0 +1,643 @@
+//
+//Copyright (C) 2014-2015 LunarG, Inc.
+//
+//All rights reserved.
+//
+//Redistribution and use in source and binary forms, with or without
+//modification, are permitted provided that the following conditions
+//are met:
+//
+//    Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+//
+//    Redistributions in binary form must reproduce the above
+//    copyright notice, this list of conditions and the following
+//    disclaimer in the documentation and/or other materials provided
+//    with the distribution.
+//
+//    Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+//    contributors may be used to endorse or promote products derived
+//    from this software without specific prior written permission.
+//
+//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+//"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+//LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+//FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+//COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+//INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+//BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+//CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+//LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+//ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+//POSSIBILITY OF SUCH DAMAGE.
+
+//
+// Disassembler for SPIR-V.
+//
+
+#include <cstdlib>
+#include <cstring>
+#include <cassert>
+#include <iomanip>
+#include <stack>
+#include <sstream>
+#include <cstring>
+
+#include "disassemble.h"
+#include "doc.h"
+
+namespace spv {
+    extern "C" {
+        // Include C-based headers that don't have a namespace
+        #include "GLSL.std.450.h"
+#ifdef AMD_EXTENSIONS
+        #include "GLSL.ext.AMD.h"
+#endif
+    }
+}
+const char* GlslStd450DebugNames[spv::GLSLstd450Count];
+
+namespace spv {
+
+#ifdef AMD_EXTENSIONS
+static const char* GLSLextAMDGetDebugNames(const char*, unsigned);
+#endif
+
+static void Kill(std::ostream& out, const char* message)
+{
+    out << std::endl << "Disassembly failed: " << message << std::endl;
+    exit(1);
+}
+
+// used to identify the extended instruction library imported when printing
+enum ExtInstSet {
+    GLSL450Inst,
+#ifdef AMD_EXTENSIONS
+    GLSLextAMDInst,
+#endif
+    OpenCLExtInst,
+};
+
+// Container class for a single instance of a SPIR-V stream, with methods for disassembly.
+class SpirvStream {
+public:
+    SpirvStream(std::ostream& out, const std::vector<unsigned int>& stream) : out(out), stream(stream), word(0), nextNestedControl(0) { }
+    virtual ~SpirvStream() { }
+
+    void validate();
+    void processInstructions();
+
+protected:
+    SpirvStream(const SpirvStream&);
+    SpirvStream& operator=(const SpirvStream&);
+    Op getOpCode(int id) const { return idInstruction[id] ? (Op)(stream[idInstruction[id]] & OpCodeMask) : OpNop; }
+
+    // Output methods
+    void outputIndent();
+    void formatId(Id id, std::stringstream&);
+    void outputResultId(Id id);
+    void outputTypeId(Id id);
+    void outputId(Id id);
+    void outputMask(OperandClass operandClass, unsigned mask);
+    void disassembleImmediates(int numOperands);
+    void disassembleIds(int numOperands);
+    int disassembleString();
+    void disassembleInstruction(Id resultId, Id typeId, Op opCode, int numOperands);
+
+    // Data
+    std::ostream& out;                       // where to write the disassembly
+    const std::vector<unsigned int>& stream; // the actual word stream
+    int size;                                // the size of the word stream
+    int word;                                // the next word of the stream to read
+
+    // map each <id> to the instruction that created it
+    Id bound;
+    std::vector<unsigned int> idInstruction;  // the word offset into the stream where the instruction for result [id] starts; 0 if not yet seen (forward reference or function parameter)
+
+    std::vector<std::string> idDescriptor;    // the best text string known for explaining the <id>
+
+    // schema
+    unsigned int schema;
+
+    // stack of structured-merge points
+    std::stack<Id> nestedControl;
+    Id nextNestedControl;         // need a slight delay for when we are nested
+};
+
+void SpirvStream::validate()
+{
+    size = (int)stream.size();
+    if (size < 4)
+        Kill(out, "stream is too short");
+
+    // Magic number
+    if (stream[word++] != MagicNumber) {
+        out << "Bad magic number";
+        return;
+    }
+
+    // Version
+    out << "// Module Version " << std::hex << stream[word++] << std::endl;
+
+    // Generator's magic number
+    out << "// Generated by (magic number): " << std::hex << stream[word++] << std::dec << std::endl;
+
+    // Result <id> bound
+    bound = stream[word++];
+    idInstruction.resize(bound);
+    idDescriptor.resize(bound);
+    out << "// Id's are bound by " << bound << std::endl;
+    out << std::endl;
+
+    // Reserved schema, must be 0 for now
+    schema = stream[word++];
+    if (schema != 0)
+        Kill(out, "bad schema, must be 0");
+}
+
+// Loop over all the instructions, in order, processing each.
+// Boiler plate for each is handled here directly, the rest is dispatched.
+void SpirvStream::processInstructions()
+{
+    // Instructions
+    while (word < size) {
+        int instructionStart = word;
+
+        // Instruction wordCount and opcode
+        unsigned int firstWord = stream[word];
+        unsigned wordCount = firstWord >> WordCountShift;
+        Op opCode = (Op)(firstWord & OpCodeMask);
+        int nextInst = word + wordCount;
+        ++word;
+
+        // Presence of full instruction
+        if (nextInst > size)
+            Kill(out, "stream instruction terminated too early");
+
+        // Base for computing number of operands; will be updated as more is learned
+        unsigned numOperands = wordCount - 1;
+
+        // Type <id>
+        Id typeId = 0;
+        if (InstructionDesc[opCode].hasType()) {
+            typeId = stream[word++];
+            --numOperands;
+        }
+
+        // Result <id>
+        Id resultId = 0;
+        if (InstructionDesc[opCode].hasResult()) {
+            resultId = stream[word++];
+            --numOperands;
+
+            // save instruction for future reference
+            idInstruction[resultId] = instructionStart;
+        }
+
+        outputResultId(resultId);
+        outputTypeId(typeId);
+        outputIndent();
+
+        // Hand off the Op and all its operands
+        disassembleInstruction(resultId, typeId, opCode, numOperands);
+        if (word != nextInst) {
+            out << " ERROR, incorrect number of operands consumed.  At " << word << " instead of " << nextInst << " instruction start was " << instructionStart;
+            word = nextInst;
+        }
+        out << std::endl;
+    }
+}
+
+void SpirvStream::outputIndent()
+{
+    for (int i = 0; i < (int)nestedControl.size(); ++i)
+        out << "  ";
+}
+
+void SpirvStream::formatId(Id id, std::stringstream& idStream)
+{
+    if (id != 0) {
+        // On instructions with no IDs, this is called with "0", which does not
+        // have to be within ID bounds on null shaders.
+        if (id >= bound)
+            Kill(out, "Bad <id>");
+
+        idStream << id;
+        if (idDescriptor[id].size() > 0)
+            idStream << "(" << idDescriptor[id] << ")";
+    }
+}
+
+void SpirvStream::outputResultId(Id id)
+{
+    const int width = 16;
+    std::stringstream idStream;
+    formatId(id, idStream);
+    out << std::setw(width) << std::right << idStream.str();
+    if (id != 0)
+        out << ":";
+    else
+        out << " ";
+
+    if (nestedControl.size() && id == nestedControl.top())
+        nestedControl.pop();
+}
+
+void SpirvStream::outputTypeId(Id id)
+{
+    const int width = 12;
+    std::stringstream idStream;
+    formatId(id, idStream);
+    out << std::setw(width) << std::right << idStream.str() << " ";
+}
+
+void SpirvStream::outputId(Id id)
+{
+    if (id >= bound)
+        Kill(out, "Bad <id>");
+
+    out << id;
+    if (idDescriptor[id].size() > 0)
+        out << "(" << idDescriptor[id] << ")";
+}
+
+void SpirvStream::outputMask(OperandClass operandClass, unsigned mask)
+{
+    if (mask == 0)
+        out << "None";
+    else {
+        for (int m = 0; m < OperandClassParams[operandClass].ceiling; ++m) {
+            if (mask & (1 << m))
+                out << OperandClassParams[operandClass].getName(m) << " ";
+        }
+    }
+}
+
+void SpirvStream::disassembleImmediates(int numOperands)
+{
+    for (int i = 0; i < numOperands; ++i) {
+        out << stream[word++];
+        if (i < numOperands - 1)
+            out << " ";
+    }
+}
+
+void SpirvStream::disassembleIds(int numOperands)
+{
+    for (int i = 0; i < numOperands; ++i) {
+        outputId(stream[word++]);
+        if (i < numOperands - 1)
+            out << " ";
+    }
+}
+
+// return the number of operands consumed by the string
+int SpirvStream::disassembleString()
+{
+    int startWord = word;
+
+    out << " \"";
+
+    const char* wordString;
+    bool done = false;
+    do {
+        unsigned int content = stream[word];
+        wordString = (const char*)&content;
+        for (int charCount = 0; charCount < 4; ++charCount) {
+            if (*wordString == 0) {
+                done = true;
+                break;
+            }
+            out << *(wordString++);
+        }
+        ++word;
+    } while (! done);
+
+    out << "\"";
+
+    return word - startWord;
+}
+
+void SpirvStream::disassembleInstruction(Id resultId, Id /*typeId*/, Op opCode, int numOperands)
+{
+    // Process the opcode
+
+    out << (OpcodeString(opCode) + 2);  // leave out the "Op"
+
+    if (opCode == OpLoopMerge || opCode == OpSelectionMerge)
+        nextNestedControl = stream[word];
+    else if (opCode == OpBranchConditional || opCode == OpSwitch) {
+        if (nextNestedControl) {
+            nestedControl.push(nextNestedControl);
+            nextNestedControl = 0;
+        }
+    } else if (opCode == OpExtInstImport) {
+        idDescriptor[resultId] = (const char*)(&stream[word]);
+    }
+    else {
+        if (resultId != 0 && idDescriptor[resultId].size() == 0) {
+            switch (opCode) {
+            case OpTypeInt:
+                idDescriptor[resultId] = "int";
+                break;
+            case OpTypeFloat:
+                idDescriptor[resultId] = "float";
+                break;
+            case OpTypeBool:
+                idDescriptor[resultId] = "bool";
+                break;
+            case OpTypeStruct:
+                idDescriptor[resultId] = "struct";
+                break;
+            case OpTypePointer:
+                idDescriptor[resultId] = "ptr";
+                break;
+            case OpTypeVector:
+                if (idDescriptor[stream[word]].size() > 0)
+                    idDescriptor[resultId].append(idDescriptor[stream[word]].begin(), idDescriptor[stream[word]].begin() + 1);
+                idDescriptor[resultId].append("vec");
+                switch (stream[word + 1]) {
+                case 2:   idDescriptor[resultId].append("2");   break;
+                case 3:   idDescriptor[resultId].append("3");   break;
+                case 4:   idDescriptor[resultId].append("4");   break;
+                case 8:   idDescriptor[resultId].append("8");   break;
+                case 16:  idDescriptor[resultId].append("16");  break;
+                case 32:  idDescriptor[resultId].append("32");  break;
+                default: break;
+                }
+                break;
+            default:
+                break;
+            }
+        }
+    }
+
+    // Process the operands.  Note, a new context-dependent set could be
+    // swapped in mid-traversal.
+
+    // Handle images specially, so can put out helpful strings.
+    if (opCode == OpTypeImage) {
+        out << " ";
+        disassembleIds(1);
+        out << " " << DimensionString((Dim)stream[word++]);
+        out << (stream[word++] != 0 ? " depth" : "");
+        out << (stream[word++] != 0 ? " array" : "");
+        out << (stream[word++] != 0 ? " multi-sampled" : "");
+        switch (stream[word++]) {
+        case 0: out << " runtime";    break;
+        case 1: out << " sampled";    break;
+        case 2: out << " nonsampled"; break;
+        }
+        out << " format:" << ImageFormatString((ImageFormat)stream[word++]);
+
+        if (numOperands == 8) {
+            out << " " << AccessQualifierString(stream[word++]);
+        }
+        return;
+    }
+
+    // Handle all the parameterized operands
+    for (int op = 0; op < InstructionDesc[opCode].operands.getNum() && numOperands > 0; ++op) {
+        out << " ";
+        OperandClass operandClass = InstructionDesc[opCode].operands.getClass(op);
+        switch (operandClass) {
+        case OperandId:
+        case OperandScope:
+        case OperandMemorySemantics:
+            disassembleIds(1);
+            --numOperands;
+            // Get names for printing "(XXX)" for readability, *after* this id
+            if (opCode == OpName)
+                idDescriptor[stream[word - 1]] = (const char*)(&stream[word]);
+            break;
+        case OperandVariableIds:
+            disassembleIds(numOperands);
+            return;
+        case OperandImageOperands:
+            outputMask(OperandImageOperands, stream[word++]);
+            --numOperands;
+            disassembleIds(numOperands);
+            return;
+        case OperandOptionalLiteral:
+        case OperandVariableLiterals:
+            if ((opCode == OpDecorate && stream[word - 1] == DecorationBuiltIn) ||
+                (opCode == OpMemberDecorate && stream[word - 1] == DecorationBuiltIn)) {
+                out << BuiltInString(stream[word++]);
+                --numOperands;
+                ++op;
+            }
+            disassembleImmediates(numOperands);
+            return;
+        case OperandVariableIdLiteral:
+            while (numOperands > 0) {
+                out << std::endl;
+                outputResultId(0);
+                outputTypeId(0);
+                outputIndent();
+                out << "     Type ";
+                disassembleIds(1);
+                out << ", member ";
+                disassembleImmediates(1);
+                numOperands -= 2;
+            }
+            return;
+        case OperandVariableLiteralId:
+            while (numOperands > 0) {
+                out << std::endl;
+                outputResultId(0);
+                outputTypeId(0);
+                outputIndent();
+                out << "     case ";
+                disassembleImmediates(1);
+                out << ": ";
+                disassembleIds(1);
+                numOperands -= 2;
+            }
+            return;
+        case OperandLiteralNumber:
+            disassembleImmediates(1);
+            --numOperands;
+            if (opCode == OpExtInst) {
+                ExtInstSet extInstSet = GLSL450Inst;
+                const char* name = idDescriptor[stream[word - 2]].c_str();
+                if (0 == memcmp("OpenCL", name, 6)) {
+                    extInstSet = OpenCLExtInst;
+#ifdef AMD_EXTENSIONS
+                } else if (strcmp(spv::E_SPV_AMD_shader_ballot, name) == 0 ||
+                           strcmp(spv::E_SPV_AMD_shader_trinary_minmax, name) == 0 ||
+                           strcmp(spv::E_SPV_AMD_shader_explicit_vertex_parameter, name) == 0 ||
+                           strcmp(spv::E_SPV_AMD_gcn_shader, name) == 0) {
+                    extInstSet = GLSLextAMDInst;
+#endif
+                }
+                unsigned entrypoint = stream[word - 1];
+                if (extInstSet == GLSL450Inst) {
+                    if (entrypoint < GLSLstd450Count) {
+                        out << "(" << GlslStd450DebugNames[entrypoint] << ")";
+                    }
+#ifdef AMD_EXTENSIONS
+                } else if (extInstSet == GLSLextAMDInst) {
+                    out << "(" << GLSLextAMDGetDebugNames(name, entrypoint) << ")";
+#endif
+                }
+            }
+            break;
+        case OperandOptionalLiteralString:
+        case OperandLiteralString:
+            numOperands -= disassembleString();
+            break;
+        default:
+            assert(operandClass >= OperandSource && operandClass < OperandOpcode);
+
+            if (OperandClassParams[operandClass].bitmask)
+                outputMask(operandClass, stream[word++]);
+            else
+                out << OperandClassParams[operandClass].getName(stream[word++]);
+            --numOperands;
+
+            break;
+        }
+    }
+
+    return;
+}
+
+static void GLSLstd450GetDebugNames(const char** names)
+{
+    for (int i = 0; i < GLSLstd450Count; ++i)
+        names[i] = "Unknown";
+
+    names[GLSLstd450Round]                   = "Round";
+    names[GLSLstd450RoundEven]               = "RoundEven";
+    names[GLSLstd450Trunc]                   = "Trunc";
+    names[GLSLstd450FAbs]                    = "FAbs";
+    names[GLSLstd450SAbs]                    = "SAbs";
+    names[GLSLstd450FSign]                   = "FSign";
+    names[GLSLstd450SSign]                   = "SSign";
+    names[GLSLstd450Floor]                   = "Floor";
+    names[GLSLstd450Ceil]                    = "Ceil";
+    names[GLSLstd450Fract]                   = "Fract";
+    names[GLSLstd450Radians]                 = "Radians";
+    names[GLSLstd450Degrees]                 = "Degrees";
+    names[GLSLstd450Sin]                     = "Sin";
+    names[GLSLstd450Cos]                     = "Cos";
+    names[GLSLstd450Tan]                     = "Tan";
+    names[GLSLstd450Asin]                    = "Asin";
+    names[GLSLstd450Acos]                    = "Acos";
+    names[GLSLstd450Atan]                    = "Atan";
+    names[GLSLstd450Sinh]                    = "Sinh";
+    names[GLSLstd450Cosh]                    = "Cosh";
+    names[GLSLstd450Tanh]                    = "Tanh";
+    names[GLSLstd450Asinh]                   = "Asinh";
+    names[GLSLstd450Acosh]                   = "Acosh";
+    names[GLSLstd450Atanh]                   = "Atanh";
+    names[GLSLstd450Atan2]                   = "Atan2";
+    names[GLSLstd450Pow]                     = "Pow";
+    names[GLSLstd450Exp]                     = "Exp";
+    names[GLSLstd450Log]                     = "Log";
+    names[GLSLstd450Exp2]                    = "Exp2";
+    names[GLSLstd450Log2]                    = "Log2";
+    names[GLSLstd450Sqrt]                    = "Sqrt";
+    names[GLSLstd450InverseSqrt]             = "InverseSqrt";
+    names[GLSLstd450Determinant]             = "Determinant";
+    names[GLSLstd450MatrixInverse]           = "MatrixInverse";
+    names[GLSLstd450Modf]                    = "Modf";
+    names[GLSLstd450ModfStruct]              = "ModfStruct";
+    names[GLSLstd450FMin]                    = "FMin";
+    names[GLSLstd450SMin]                    = "SMin";
+    names[GLSLstd450UMin]                    = "UMin";
+    names[GLSLstd450FMax]                    = "FMax";
+    names[GLSLstd450SMax]                    = "SMax";
+    names[GLSLstd450UMax]                    = "UMax";
+    names[GLSLstd450FClamp]                  = "FClamp";
+    names[GLSLstd450SClamp]                  = "SClamp";
+    names[GLSLstd450UClamp]                  = "UClamp";
+    names[GLSLstd450FMix]                    = "FMix";
+    names[GLSLstd450Step]                    = "Step";
+    names[GLSLstd450SmoothStep]              = "SmoothStep";
+    names[GLSLstd450Fma]                     = "Fma";
+    names[GLSLstd450Frexp]                   = "Frexp";
+    names[GLSLstd450FrexpStruct]             = "FrexpStruct";
+    names[GLSLstd450Ldexp]                   = "Ldexp";
+    names[GLSLstd450PackSnorm4x8]            = "PackSnorm4x8";
+    names[GLSLstd450PackUnorm4x8]            = "PackUnorm4x8";
+    names[GLSLstd450PackSnorm2x16]           = "PackSnorm2x16";
+    names[GLSLstd450PackUnorm2x16]           = "PackUnorm2x16";
+    names[GLSLstd450PackHalf2x16]            = "PackHalf2x16";
+    names[GLSLstd450PackDouble2x32]          = "PackDouble2x32";
+    names[GLSLstd450UnpackSnorm2x16]         = "UnpackSnorm2x16";
+    names[GLSLstd450UnpackUnorm2x16]         = "UnpackUnorm2x16";
+    names[GLSLstd450UnpackHalf2x16]          = "UnpackHalf2x16";
+    names[GLSLstd450UnpackSnorm4x8]          = "UnpackSnorm4x8";
+    names[GLSLstd450UnpackUnorm4x8]          = "UnpackUnorm4x8";
+    names[GLSLstd450UnpackDouble2x32]        = "UnpackDouble2x32";
+    names[GLSLstd450Length]                  = "Length";
+    names[GLSLstd450Distance]                = "Distance";
+    names[GLSLstd450Cross]                   = "Cross";
+    names[GLSLstd450Normalize]               = "Normalize";
+    names[GLSLstd450FaceForward]             = "FaceForward";
+    names[GLSLstd450Reflect]                 = "Reflect";
+    names[GLSLstd450Refract]                 = "Refract";
+    names[GLSLstd450FindILsb]                = "FindILsb";
+    names[GLSLstd450FindSMsb]                = "FindSMsb";
+    names[GLSLstd450FindUMsb]                = "FindUMsb";
+    names[GLSLstd450InterpolateAtCentroid]   = "InterpolateAtCentroid";
+    names[GLSLstd450InterpolateAtSample]     = "InterpolateAtSample";
+    names[GLSLstd450InterpolateAtOffset]     = "InterpolateAtOffset";
+}
+
+#ifdef AMD_EXTENSIONS
+static const char* GLSLextAMDGetDebugNames(const char* name, unsigned entrypoint)
+{
+    if (strcmp(name, spv::E_SPV_AMD_shader_ballot) == 0) {
+        switch (entrypoint) {
+        case SwizzleInvocationsAMD:         return "SwizzleInvocationsAMD";
+        case SwizzleInvocationsMaskedAMD:   return "SwizzleInvocationsMaskedAMD";
+        case WriteInvocationAMD:            return "WriteInvocationAMD";
+        case MbcntAMD:                      return "MbcntAMD";
+        default:                            return "Bad";
+        }
+    } else if (strcmp(name, spv::E_SPV_AMD_shader_trinary_minmax) == 0) {
+        switch (entrypoint) {
+        case FMin3AMD:      return "FMin3AMD";
+        case UMin3AMD:      return "UMin3AMD";
+        case SMin3AMD:      return "SMin3AMD";
+        case FMax3AMD:      return "FMax3AMD";
+        case UMax3AMD:      return "UMax3AMD";
+        case SMax3AMD:      return "SMax3AMD";
+        case FMid3AMD:      return "FMid3AMD";
+        case UMid3AMD:      return "UMid3AMD";
+        case SMid3AMD:      return "SMid3AMD";
+        default:            return "Bad";
+        }
+    } else if (strcmp(name, spv::E_SPV_AMD_shader_explicit_vertex_parameter) == 0) {
+        switch (entrypoint) {
+        case InterpolateAtVertexAMD:    return "InterpolateAtVertexAMD";
+        default:                        return "Bad";
+        }
+    }
+    else if (strcmp(name, spv::E_SPV_AMD_gcn_shader) == 0) {
+        switch (entrypoint) {
+        case CubeFaceIndexAMD:      return "CubeFaceIndexAMD";
+        case CubeFaceCoordAMD:      return "CubeFaceCoordAMD";
+        case TimeAMD:               return "TimeAMD";
+        default:
+            break;
+        }
+    }
+
+    return "Bad";
+}
+#endif
+
+void Disassemble(std::ostream& out, const std::vector<unsigned int>& stream)
+{
+    SpirvStream SpirvStream(out, stream);
+    spv::Parameterize();
+    GLSLstd450GetDebugNames(GlslStd450DebugNames);
+    SpirvStream.validate();
+    SpirvStream.processInstructions();
+}
+
+}; // end namespace spv

+ 52 - 0
3rdparty/glslang/SPIRV/disassemble.h

@@ -0,0 +1,52 @@
+//
+//Copyright (C) 2014-2015 LunarG, Inc.
+//
+//All rights reserved.
+//
+//Redistribution and use in source and binary forms, with or without
+//modification, are permitted provided that the following conditions
+//are met:
+//
+//    Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+//
+//    Redistributions in binary form must reproduce the above
+//    copyright notice, this list of conditions and the following
+//    disclaimer in the documentation and/or other materials provided
+//    with the distribution.
+//
+//    Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+//    contributors may be used to endorse or promote products derived
+//    from this software without specific prior written permission.
+//
+//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+//"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+//LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+//FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+//COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+//INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+//BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+//CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+//LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+//ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+//POSSIBILITY OF SUCH DAMAGE.
+
+//
+// Disassembler for SPIR-V.
+//
+
+#pragma once
+#ifndef disassembler_H
+#define disassembler_H
+
+#include <iostream>
+#include <vector>
+
+namespace spv {
+
+    void Disassemble(std::ostream& out, const std::vector<unsigned int>&);
+
+};  // end namespace spv
+
+#endif // disassembler_H

+ 2804 - 0
3rdparty/glslang/SPIRV/doc.cpp

@@ -0,0 +1,2804 @@
+//
+//Copyright (C) 2014-2015 LunarG, Inc.
+//
+//All rights reserved.
+//
+//Redistribution and use in source and binary forms, with or without
+//modification, are permitted provided that the following conditions
+//are met:
+//
+//    Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+//
+//    Redistributions in binary form must reproduce the above
+//    copyright notice, this list of conditions and the following
+//    disclaimer in the documentation and/or other materials provided
+//    with the distribution.
+//
+//    Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+//    contributors may be used to endorse or promote products derived
+//    from this software without specific prior written permission.
+//
+//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+//"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+//LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+//FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+//COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+//INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+//BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+//CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+//LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+//ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+//POSSIBILITY OF SUCH DAMAGE.
+
+//
+// 1) Programmatically fill in instruction/operand information.
+//    This can be used for disassembly, printing documentation, etc.
+//
+// 2) Print documentation from this parameterization.
+//
+
+#include "doc.h"
+
+#include <cstdio>
+#include <cstring>
+#include <algorithm>
+
+namespace spv {
+    extern "C" {
+        // Include C-based headers that don't have a namespace
+#ifdef AMD_EXTENSIONS
+        #include "GLSL.ext.AMD.h"
+#endif
+    }
+}
+
+namespace spv {
+
+//
+// Whole set of functions that translate enumerants to their text strings for
+// the specification (or their sanitized versions for auto-generating the
+// spirv headers.
+//
+// Also, the ceilings are declared next to these, to help keep them in sync.
+// Ceilings should be
+//  - one more than the maximum value an enumerant takes on, for non-mask enumerants
+//    (for non-sparse enums, this is the number of enumurants)
+//  - the number of bits consumed by the set of masks
+//    (for non-sparse mask enums, this is the number of enumurants)
+//
+
+const int SourceLanguageCeiling = 6; // HLSL todo: need official enumerant
+
+const char* SourceString(int source)
+{
+    switch (source) {
+    case 0:  return "Unknown";
+    case 1:  return "ESSL";
+    case 2:  return "GLSL";
+    case 3:  return "OpenCL_C";
+    case 4:  return "OpenCL_CPP";
+    case 5:  return "HLSL";
+
+    case SourceLanguageCeiling:
+    default: return "Bad";
+    }
+}
+
+const int ExecutionModelCeiling = 7;
+
+const char* ExecutionModelString(int model)
+{
+    switch (model) {
+    case 0:  return "Vertex";
+    case 1:  return "TessellationControl";
+    case 2:  return "TessellationEvaluation";
+    case 3:  return "Geometry";
+    case 4:  return "Fragment";
+    case 5:  return "GLCompute";
+    case 6:  return "Kernel";
+
+    case ExecutionModelCeiling:
+    default: return "Bad";
+    }
+}
+
+const int AddressingModelCeiling = 3;
+
+const char* AddressingString(int addr)
+{
+    switch (addr) {
+    case 0:  return "Logical";
+    case 1:  return "Physical32";
+    case 2:  return "Physical64";
+
+    case AddressingModelCeiling:
+    default: return "Bad";
+    }
+}
+
+const int MemoryModelCeiling = 3;
+
+const char* MemoryString(int mem)
+{
+    switch (mem) {
+    case 0:  return "Simple";
+    case 1:  return "GLSL450";
+    case 2:  return "OpenCL";
+
+    case MemoryModelCeiling:
+    default: return "Bad";
+    }
+}
+
+const int ExecutionModeCeiling = 33;
+
+const char* ExecutionModeString(int mode)
+{
+    switch (mode) {
+    case 0:  return "Invocations";
+    case 1:  return "SpacingEqual";
+    case 2:  return "SpacingFractionalEven";
+    case 3:  return "SpacingFractionalOdd";
+    case 4:  return "VertexOrderCw";
+    case 5:  return "VertexOrderCcw";
+    case 6:  return "PixelCenterInteger";
+    case 7:  return "OriginUpperLeft";
+    case 8:  return "OriginLowerLeft";
+    case 9:  return "EarlyFragmentTests";
+    case 10: return "PointMode";
+    case 11: return "Xfb";
+    case 12: return "DepthReplacing";
+    case 13: return "Bad";
+    case 14: return "DepthGreater";
+    case 15: return "DepthLess";
+    case 16: return "DepthUnchanged";
+    case 17: return "LocalSize";
+    case 18: return "LocalSizeHint";
+    case 19: return "InputPoints";
+    case 20: return "InputLines";
+    case 21: return "InputLinesAdjacency";
+    case 22: return "Triangles";
+    case 23: return "InputTrianglesAdjacency";
+    case 24: return "Quads";
+    case 25: return "Isolines";
+    case 26: return "OutputVertices";
+    case 27: return "OutputPoints";
+    case 28: return "OutputLineStrip";
+    case 29: return "OutputTriangleStrip";
+    case 30: return "VecTypeHint";
+    case 31: return "ContractionOff";
+    case 32: return "Bad";
+
+    case ExecutionModeCeiling:
+    default: return "Bad";
+    }
+}
+
+const int StorageClassCeiling = 12;
+
+const char* StorageClassString(int StorageClass)
+{
+    switch (StorageClass) {
+    case 0:  return "UniformConstant";
+    case 1:  return "Input";
+    case 2:  return "Uniform";
+    case 3:  return "Output";
+    case 4:  return "Workgroup";
+    case 5:  return "CrossWorkgroup";
+    case 6:  return "Private";
+    case 7:  return "Function";
+    case 8:  return "Generic";
+    case 9:  return "PushConstant";
+    case 10: return "AtomicCounter";
+    case 11: return "Image";
+
+    case StorageClassCeiling:
+    default: return "Bad";
+    }
+}
+
+const int DecorationCeiling = 45;
+
+const char* DecorationString(int decoration)
+{
+    switch (decoration) {
+    case 0:  return "RelaxedPrecision";
+    case 1:  return "SpecId";
+    case 2:  return "Block";
+    case 3:  return "BufferBlock";
+    case 4:  return "RowMajor";
+    case 5:  return "ColMajor";
+    case 6:  return "ArrayStride";
+    case 7:  return "MatrixStride";
+    case 8:  return "GLSLShared";
+    case 9:  return "GLSLPacked";
+    case 10: return "CPacked";
+    case 11: return "BuiltIn";
+    case 12: return "Bad";
+    case 13: return "NoPerspective";
+    case 14: return "Flat";
+    case 15: return "Patch";
+    case 16: return "Centroid";
+    case 17: return "Sample";
+    case 18: return "Invariant";
+    case 19: return "Restrict";
+    case 20: return "Aliased";
+    case 21: return "Volatile";
+    case 22: return "Constant";
+    case 23: return "Coherent";
+    case 24: return "NonWritable";
+    case 25: return "NonReadable";
+    case 26: return "Uniform";
+    case 27: return "Bad";
+    case 28: return "SaturatedConversion";
+    case 29: return "Stream";
+    case 30: return "Location";
+    case 31: return "Component";
+    case 32: return "Index";
+    case 33: return "Binding";
+    case 34: return "DescriptorSet";
+    case 35: return "Offset";
+    case 36: return "XfbBuffer";
+    case 37: return "XfbStride";
+    case 38: return "FuncParamAttr";
+    case 39: return "FP Rounding Mode";
+    case 40: return "FP Fast Math Mode";
+    case 41: return "Linkage Attributes";
+    case 42: return "NoContraction";
+    case 43: return "InputAttachmentIndex";
+    case 44: return "Alignment";
+
+    case DecorationCeiling:
+    default:  return "Bad";
+
+#ifdef AMD_EXTENSIONS
+    case 4999: return "ExplicitInterpAMD";
+#endif
+    }
+}
+
+const int BuiltInCeiling = 44;
+
+const char* BuiltInString(int builtIn)
+{
+    switch (builtIn) {
+    case 0:  return "Position";
+    case 1:  return "PointSize";
+    case 2:  return "Bad";
+    case 3:  return "ClipDistance";
+    case 4:  return "CullDistance";
+    case 5:  return "VertexId";
+    case 6:  return "InstanceId";
+    case 7:  return "PrimitiveId";
+    case 8:  return "InvocationId";
+    case 9:  return "Layer";
+    case 10: return "ViewportIndex";
+    case 11: return "TessLevelOuter";
+    case 12: return "TessLevelInner";
+    case 13: return "TessCoord";
+    case 14: return "PatchVertices";
+    case 15: return "FragCoord";
+    case 16: return "PointCoord";
+    case 17: return "FrontFacing";
+    case 18: return "SampleId";
+    case 19: return "SamplePosition";
+    case 20: return "SampleMask";
+    case 21: return "Bad";
+    case 22: return "FragDepth";
+    case 23: return "HelperInvocation";
+    case 24: return "NumWorkgroups";
+    case 25: return "WorkgroupSize";
+    case 26: return "WorkgroupId";
+    case 27: return "LocalInvocationId";
+    case 28: return "GlobalInvocationId";
+    case 29: return "LocalInvocationIndex";
+    case 30: return "WorkDim";
+    case 31: return "GlobalSize";
+    case 32: return "EnqueuedWorkgroupSize";
+    case 33: return "GlobalOffset";
+    case 34: return "GlobalLinearId";
+    case 35: return "Bad";
+    case 36: return "SubgroupSize";
+    case 37: return "SubgroupMaxSize";
+    case 38: return "NumSubgroups";
+    case 39: return "NumEnqueuedSubgroups";
+    case 40: return "SubgroupId";
+    case 41: return "SubgroupLocalInvocationId";
+    case 42: return "VertexIndex";                 // TBD: put next to VertexId?
+    case 43: return "InstanceIndex";               // TBD: put next to InstanceId?
+
+    case BuiltInCeiling:
+    default: return "Bad";
+
+    case 4416: return "SubgroupEqMaskKHR";
+    case 4417: return "SubgroupGeMaskKHR";
+    case 4418: return "SubgroupGtMaskKHR";
+    case 4419: return "SubgroupLeMaskKHR";
+    case 4420: return "SubgroupLtMaskKHR";
+
+    case 4424: return "BaseVertex";
+    case 4425: return "BaseInstance";
+    case 4426: return "DrawIndex";
+
+#ifdef AMD_EXTENSIONS
+    case 4992: return "BaryCoordNoPerspAMD";
+    case 4993: return "BaryCoordNoPerspCentroidAMD";
+    case 4994: return "BaryCoordNoPerspSampleAMD";
+    case 4995: return "BaryCoordSmoothAMD";
+    case 4996: return "BaryCoordSmoothCentroidAMD";
+    case 4997: return "BaryCoordSmoothSampleAMD";
+    case 4998: return "BaryCoordPullModelAMD";
+#endif
+    }
+}
+
+const int DimensionCeiling = 7;
+
+const char* DimensionString(int dim)
+{
+    switch (dim) {
+    case 0:  return "1D";
+    case 1:  return "2D";
+    case 2:  return "3D";
+    case 3:  return "Cube";
+    case 4:  return "Rect";
+    case 5:  return "Buffer";
+    case 6:  return "SubpassData";
+
+    case DimensionCeiling:
+    default: return "Bad";
+    }
+}
+
+const int SamplerAddressingModeCeiling = 5;
+
+const char* SamplerAddressingModeString(int mode)
+{
+    switch (mode) {
+    case 0:  return "None";
+    case 1:  return "ClampToEdge";
+    case 2:  return "Clamp";
+    case 3:  return "Repeat";
+    case 4:  return "RepeatMirrored";
+
+    case SamplerAddressingModeCeiling:
+    default: return "Bad";
+    }
+}
+
+const int SamplerFilterModeCeiling = 2;
+
+const char* SamplerFilterModeString(int mode)
+{
+    switch (mode) {
+    case 0: return "Nearest";
+    case 1: return "Linear";
+
+    case SamplerFilterModeCeiling:
+    default: return "Bad";
+    }
+}
+
+const int ImageFormatCeiling = 40;
+
+const char* ImageFormatString(int format)
+{
+    switch (format) {
+    case  0: return "Unknown";
+
+    // ES/Desktop float
+    case  1: return "Rgba32f";
+    case  2: return "Rgba16f";
+    case  3: return "R32f";
+    case  4: return "Rgba8";
+    case  5: return "Rgba8Snorm";
+
+    // Desktop float
+    case  6: return "Rg32f";
+    case  7: return "Rg16f";
+    case  8: return "R11fG11fB10f";
+    case  9: return "R16f";
+    case 10: return "Rgba16";
+    case 11: return "Rgb10A2";
+    case 12: return "Rg16";
+    case 13: return "Rg8";
+    case 14: return "R16";
+    case 15: return "R8";
+    case 16: return "Rgba16Snorm";
+    case 17: return "Rg16Snorm";
+    case 18: return "Rg8Snorm";
+    case 19: return "R16Snorm";
+    case 20: return "R8Snorm";
+
+    // ES/Desktop int
+    case 21: return "Rgba32i";
+    case 22: return "Rgba16i";
+    case 23: return "Rgba8i";
+    case 24: return "R32i";
+
+    // Desktop int
+    case 25: return "Rg32i";
+    case 26: return "Rg16i";
+    case 27: return "Rg8i";
+    case 28: return "R16i";
+    case 29: return "R8i";
+
+    // ES/Desktop uint
+    case 30: return "Rgba32ui";
+    case 31: return "Rgba16ui";
+    case 32: return "Rgba8ui";
+    case 33: return "R32ui";
+
+    // Desktop uint
+    case 34: return "Rgb10a2ui";
+    case 35: return "Rg32ui";
+    case 36: return "Rg16ui";
+    case 37: return "Rg8ui";
+    case 38: return "R16ui";
+    case 39: return "R8ui";
+
+    case ImageFormatCeiling:
+    default:
+        return "Bad";
+    }
+}
+
+const int ImageChannelOrderCeiling = 19;
+
+const char* ImageChannelOrderString(int format)
+{
+    switch (format) {
+    case 0:  return "R";
+    case 1:  return "A";
+    case 2:  return "RG";
+    case 3:  return "RA";
+    case 4:  return "RGB";
+    case 5:  return "RGBA";
+    case 6:  return "BGRA";
+    case 7:  return "ARGB";
+    case 8:  return "Intensity";
+    case 9:  return "Luminance";
+    case 10: return "Rx";
+    case 11: return "RGx";
+    case 12: return "RGBx";
+    case 13: return "Depth";
+    case 14: return "DepthStencil";
+    case 15: return "sRGB";
+    case 16: return "sRGBx";
+    case 17: return "sRGBA";
+    case 18: return "sBGRA";
+
+    case ImageChannelOrderCeiling:
+    default: 
+        return "Bad";
+    }
+}
+
+const int ImageChannelDataTypeCeiling = 17;
+
+const char* ImageChannelDataTypeString(int type)
+{
+    switch (type)
+    {
+    case 0: return "SnormInt8";
+    case 1: return "SnormInt16";
+    case 2: return "UnormInt8";
+    case 3: return "UnormInt16";
+    case 4: return "UnormShort565";
+    case 5: return "UnormShort555";
+    case 6: return "UnormInt101010";
+    case 7: return "SignedInt8";
+    case 8: return "SignedInt16";
+    case 9: return "SignedInt32";
+    case 10: return "UnsignedInt8";
+    case 11: return "UnsignedInt16";
+    case 12: return "UnsignedInt32";
+    case 13: return "HalfFloat";
+    case 14: return "Float";
+    case 15: return "UnormInt24";
+    case 16: return "UnormInt101010_2";
+
+    case ImageChannelDataTypeCeiling:
+    default:
+        return "Bad";
+    }
+}
+
+const int ImageOperandsCeiling = 8;
+
+const char* ImageOperandsString(int format)
+{
+    switch (format) {
+    case 0: return "Bias";
+    case 1: return "Lod";
+    case 2: return "Grad";
+    case 3: return "ConstOffset";
+    case 4: return "Offset";
+    case 5: return "ConstOffsets";
+    case 6: return "Sample";
+    case 7: return "MinLod";
+
+    case ImageOperandsCeiling:
+    default:
+        return "Bad";
+    }
+}
+
+const int FPFastMathCeiling = 5;
+
+const char* FPFastMathString(int mode)
+{
+    switch (mode) {
+    case 0: return "NotNaN";
+    case 1: return "NotInf";
+    case 2: return "NSZ";
+    case 3: return "AllowRecip";
+    case 4: return "Fast";
+
+    case FPFastMathCeiling:
+    default:     return "Bad";
+    }
+}
+
+const int FPRoundingModeCeiling = 4;
+
+const char* FPRoundingModeString(int mode)
+{
+    switch (mode) {
+    case 0:  return "RTE";
+    case 1:  return "RTZ";
+    case 2:  return "RTP";
+    case 3:  return "RTN";
+
+    case FPRoundingModeCeiling:
+    default: return "Bad";
+    }
+}
+
+const int LinkageTypeCeiling = 2;
+
+const char* LinkageTypeString(int type)
+{
+    switch (type) {
+    case 0:  return "Export";
+    case 1:  return "Import";
+
+    case LinkageTypeCeiling:
+    default: return "Bad";
+    }
+}
+
+const int FuncParamAttrCeiling = 8;
+
+const char* FuncParamAttrString(int attr)
+{
+    switch (attr) {
+    case 0:  return "Zext";
+    case 1:  return "Sext";
+    case 2:  return "ByVal";
+    case 3:  return "Sret";
+    case 4:  return "NoAlias";
+    case 5:  return "NoCapture";
+    case 6:  return "NoWrite";
+    case 7:  return "NoReadWrite";
+
+    case FuncParamAttrCeiling:
+    default: return "Bad";
+    }
+}
+
+const int AccessQualifierCeiling = 3;
+
+const char* AccessQualifierString(int attr)
+{
+    switch (attr) {
+    case 0:  return "ReadOnly";
+    case 1:  return "WriteOnly";
+    case 2:  return "ReadWrite";
+
+    case AccessQualifierCeiling:
+    default: return "Bad";
+    }
+}
+
+const int SelectControlCeiling = 2;
+
+const char* SelectControlString(int cont)
+{
+    switch (cont) {
+    case 0:  return "Flatten";
+    case 1:  return "DontFlatten";
+
+    case SelectControlCeiling:
+    default: return "Bad";
+    }
+}
+
+const int LoopControlCeiling = 2;
+
+const char* LoopControlString(int cont)
+{
+    switch (cont) {
+    case 0:  return "Unroll";
+    case 1:  return "DontUnroll";
+
+    case LoopControlCeiling:
+    default: return "Bad";
+    }
+}
+
+const int FunctionControlCeiling = 4;
+
+const char* FunctionControlString(int cont)
+{
+    switch (cont) {
+    case 0:  return "Inline";
+    case 1:  return "DontInline";
+    case 2:  return "Pure";
+    case 3:  return "Const";
+
+    case FunctionControlCeiling:
+    default: return "Bad";
+    }
+}
+
+const int MemorySemanticsCeiling = 12;
+
+const char* MemorySemanticsString(int mem)
+{
+    // Note: No bits set (None) means "Relaxed"
+    switch (mem) {
+    case 0: return "Bad"; // Note: this is a placeholder for 'Consume'
+    case 1: return "Acquire";
+    case 2: return "Release";
+    case 3: return "AcquireRelease";
+    case 4: return "SequentiallyConsistent";
+    case 5: return "Bad"; // Note: reserved for future expansion
+    case 6: return "UniformMemory";
+    case 7: return "SubgroupMemory";
+    case 8: return "WorkgroupMemory";
+    case 9: return "CrossWorkgroupMemory";
+    case 10: return "AtomicCounterMemory";
+    case 11: return "ImageMemory";
+
+    case MemorySemanticsCeiling:
+    default:     return "Bad";
+    }
+}
+
+const int MemoryAccessCeiling = 3;
+
+const char* MemoryAccessString(int mem)
+{
+    switch (mem) {
+    case 0:  return "Volatile";
+    case 1:  return "Aligned";
+    case 2:  return "Nontemporal";
+
+    case MemoryAccessCeiling:
+    default: return "Bad";
+    }
+}
+
+const int ScopeCeiling = 5;
+
+const char* ScopeString(int mem)
+{
+    switch (mem) {
+    case 0:  return "CrossDevice";
+    case 1:  return "Device";
+    case 2:  return "Workgroup";
+    case 3:  return "Subgroup";
+    case 4:  return "Invocation";
+
+    case ScopeCeiling:
+    default: return "Bad";
+    }
+}
+
+const int GroupOperationCeiling = 3;
+
+const char* GroupOperationString(int gop)
+{
+
+    switch (gop)
+    {
+    case 0:  return "Reduce";
+    case 1:  return "InclusiveScan";
+    case 2:  return "ExclusiveScan";
+
+    case GroupOperationCeiling:
+    default: return "Bad";
+    }
+}
+
+const int KernelEnqueueFlagsCeiling = 3;
+
+const char* KernelEnqueueFlagsString(int flag)
+{
+    switch (flag)
+    {
+    case 0:  return "NoWait";
+    case 1:  return "WaitKernel";
+    case 2:  return "WaitWorkGroup";
+
+    case KernelEnqueueFlagsCeiling:
+    default: return "Bad";
+    }
+}
+
+const int KernelProfilingInfoCeiling = 1;
+
+const char* KernelProfilingInfoString(int info)
+{
+    switch (info)
+    {
+    case 0:  return "CmdExecTime";
+
+    case KernelProfilingInfoCeiling:
+    default: return "Bad";
+    }
+}
+
+const int CapabilityCeiling = 58;
+
+const char* CapabilityString(int info)
+{
+    switch (info)
+    {
+    case 0:  return "Matrix";
+    case 1:  return "Shader";
+    case 2:  return "Geometry";
+    case 3:  return "Tessellation";
+    case 4:  return "Addresses";
+    case 5:  return "Linkage";
+    case 6:  return "Kernel";
+    case 7:  return "Vector16";
+    case 8:  return "Float16Buffer";
+    case 9:  return "Float16";
+    case 10: return "Float64";
+    case 11: return "Int64";
+    case 12: return "Int64Atomics";
+    case 13: return "ImageBasic";
+    case 14: return "ImageReadWrite";
+    case 15: return "ImageMipmap";
+    case 16: return "Bad";
+    case 17: return "Pipes";
+    case 18: return "Groups";
+    case 19: return "DeviceEnqueue";
+    case 20: return "LiteralSampler";
+    case 21: return "AtomicStorage";
+    case 22: return "Int16";
+    case 23: return "TessellationPointSize";
+    case 24: return "GeometryPointSize";
+    case 25: return "ImageGatherExtended"; 
+    case 26: return "Bad";
+    case 27: return "StorageImageMultisample";
+    case 28: return "UniformBufferArrayDynamicIndexing";
+    case 29: return "SampledImageArrayDynamicIndexing";
+    case 30: return "StorageBufferArrayDynamicIndexing";
+    case 31: return "StorageImageArrayDynamicIndexing";
+    case 32: return "ClipDistance";
+    case 33: return "CullDistance";
+    case 34: return "ImageCubeArray";
+    case 35: return "SampleRateShading";
+    case 36: return "ImageRect";
+    case 37: return "SampledRect";
+    case 38: return "GenericPointer";
+    case 39: return "Int8";
+    case 40: return "InputAttachment";
+    case 41: return "SparseResidency";
+    case 42: return "MinLod";
+    case 43: return "Sampled1D";
+    case 44: return "Image1D";
+    case 45: return "SampledCubeArray";
+    case 46: return "SampledBuffer";
+    case 47: return "ImageBuffer";
+    case 48: return "ImageMSArray";
+    case 49: return "StorageImageExtendedFormats";
+    case 50: return "ImageQuery";
+    case 51: return "DerivativeControl";
+    case 52: return "InterpolationFunction";
+    case 53: return "TransformFeedback";
+    case 54: return "GeometryStreams";
+    case 55: return "StorageImageReadWithoutFormat";
+    case 56: return "StorageImageWriteWithoutFormat";
+    case 57: return "MultiViewport";
+
+    case CapabilityCeiling:
+    default: return "Bad";
+
+    case 4423: return "SubgroupBallotKHR";
+    case 4427: return "DrawParameters";
+    }
+}
+
+const char* OpcodeString(int op)
+{
+    switch (op) {
+    case 0:   return "OpNop";
+    case 1:   return "OpUndef";
+    case 2:   return "OpSourceContinued";
+    case 3:   return "OpSource";
+    case 4:   return "OpSourceExtension";
+    case 5:   return "OpName";
+    case 6:   return "OpMemberName";
+    case 7:   return "OpString";
+    case 8:   return "OpLine";
+    case 9:   return "Bad";
+    case 10:  return "OpExtension";
+    case 11:  return "OpExtInstImport";
+    case 12:  return "OpExtInst";
+    case 13:  return "Bad";
+    case 14:  return "OpMemoryModel";
+    case 15:  return "OpEntryPoint";
+    case 16:  return "OpExecutionMode";
+    case 17:  return "OpCapability";
+    case 18:  return "Bad";
+    case 19:  return "OpTypeVoid";
+    case 20:  return "OpTypeBool";
+    case 21:  return "OpTypeInt";
+    case 22:  return "OpTypeFloat";
+    case 23:  return "OpTypeVector";
+    case 24:  return "OpTypeMatrix";
+    case 25:  return "OpTypeImage";
+    case 26:  return "OpTypeSampler";
+    case 27:  return "OpTypeSampledImage";
+    case 28:  return "OpTypeArray";
+    case 29:  return "OpTypeRuntimeArray";
+    case 30:  return "OpTypeStruct";
+    case 31:  return "OpTypeOpaque";
+    case 32:  return "OpTypePointer";
+    case 33:  return "OpTypeFunction";
+    case 34:  return "OpTypeEvent";
+    case 35:  return "OpTypeDeviceEvent";
+    case 36:  return "OpTypeReserveId";
+    case 37:  return "OpTypeQueue";
+    case 38:  return "OpTypePipe";
+    case 39:  return "OpTypeForwardPointer";
+    case 40:  return "Bad";
+    case 41:  return "OpConstantTrue";
+    case 42:  return "OpConstantFalse";
+    case 43:  return "OpConstant";
+    case 44:  return "OpConstantComposite";
+    case 45:  return "OpConstantSampler";
+    case 46:  return "OpConstantNull";
+    case 47:  return "Bad";
+    case 48:  return "OpSpecConstantTrue";
+    case 49:  return "OpSpecConstantFalse";
+    case 50:  return "OpSpecConstant";
+    case 51:  return "OpSpecConstantComposite";
+    case 52:  return "OpSpecConstantOp";
+    case 53:  return "Bad";
+    case 54:  return "OpFunction";
+    case 55:  return "OpFunctionParameter";
+    case 56:  return "OpFunctionEnd";
+    case 57:  return "OpFunctionCall";
+    case 58:  return "Bad";
+    case 59:  return "OpVariable";
+    case 60:  return "OpImageTexelPointer";
+    case 61:  return "OpLoad";
+    case 62:  return "OpStore";
+    case 63:  return "OpCopyMemory";
+    case 64:  return "OpCopyMemorySized";
+    case 65:  return "OpAccessChain";
+    case 66:  return "OpInBoundsAccessChain";
+    case 67:  return "OpPtrAccessChain";
+    case 68:  return "OpArrayLength";
+    case 69:  return "OpGenericPtrMemSemantics";
+    case 70:  return "OpInBoundsPtrAccessChain";
+    case 71:  return "OpDecorate";
+    case 72:  return "OpMemberDecorate";
+    case 73:  return "OpDecorationGroup";
+    case 74:  return "OpGroupDecorate";
+    case 75:  return "OpGroupMemberDecorate";
+    case 76:  return "Bad";
+    case 77:  return "OpVectorExtractDynamic";
+    case 78:  return "OpVectorInsertDynamic";
+    case 79:  return "OpVectorShuffle";
+    case 80:  return "OpCompositeConstruct";
+    case 81:  return "OpCompositeExtract";
+    case 82:  return "OpCompositeInsert";
+    case 83:  return "OpCopyObject";
+    case 84:  return "OpTranspose";
+    case 85:  return "Bad";
+    case 86:  return "OpSampledImage";
+    case 87:  return "OpImageSampleImplicitLod";
+    case 88:  return "OpImageSampleExplicitLod";
+    case 89:  return "OpImageSampleDrefImplicitLod";
+    case 90:  return "OpImageSampleDrefExplicitLod";
+    case 91:  return "OpImageSampleProjImplicitLod";
+    case 92:  return "OpImageSampleProjExplicitLod";
+    case 93:  return "OpImageSampleProjDrefImplicitLod";
+    case 94:  return "OpImageSampleProjDrefExplicitLod";
+    case 95:  return "OpImageFetch";
+    case 96:  return "OpImageGather";
+    case 97:  return "OpImageDrefGather";
+    case 98:  return "OpImageRead";
+    case 99:  return "OpImageWrite";
+    case 100: return "OpImage";
+    case 101: return "OpImageQueryFormat";
+    case 102: return "OpImageQueryOrder";
+    case 103: return "OpImageQuerySizeLod";
+    case 104: return "OpImageQuerySize";
+    case 105: return "OpImageQueryLod";
+    case 106: return "OpImageQueryLevels";
+    case 107: return "OpImageQuerySamples";
+    case 108: return "Bad";
+    case 109: return "OpConvertFToU";
+    case 110: return "OpConvertFToS";
+    case 111: return "OpConvertSToF";
+    case 112: return "OpConvertUToF";
+    case 113: return "OpUConvert";
+    case 114: return "OpSConvert";
+    case 115: return "OpFConvert";
+    case 116: return "OpQuantizeToF16";
+    case 117: return "OpConvertPtrToU";
+    case 118: return "OpSatConvertSToU";
+    case 119: return "OpSatConvertUToS";
+    case 120: return "OpConvertUToPtr";
+    case 121: return "OpPtrCastToGeneric";
+    case 122: return "OpGenericCastToPtr";
+    case 123: return "OpGenericCastToPtrExplicit";
+    case 124: return "OpBitcast";
+    case 125: return "Bad";
+    case 126: return "OpSNegate";
+    case 127: return "OpFNegate";
+    case 128: return "OpIAdd";
+    case 129: return "OpFAdd";
+    case 130: return "OpISub";
+    case 131: return "OpFSub";
+    case 132: return "OpIMul";
+    case 133: return "OpFMul";
+    case 134: return "OpUDiv";
+    case 135: return "OpSDiv";
+    case 136: return "OpFDiv";
+    case 137: return "OpUMod";
+    case 138: return "OpSRem";
+    case 139: return "OpSMod";
+    case 140: return "OpFRem";
+    case 141: return "OpFMod";
+    case 142: return "OpVectorTimesScalar";
+    case 143: return "OpMatrixTimesScalar";
+    case 144: return "OpVectorTimesMatrix";
+    case 145: return "OpMatrixTimesVector";
+    case 146: return "OpMatrixTimesMatrix";
+    case 147: return "OpOuterProduct";
+    case 148: return "OpDot";
+    case 149: return "OpIAddCarry";
+    case 150: return "OpISubBorrow";
+    case 151: return "OpUMulExtended";
+    case 152: return "OpSMulExtended";
+    case 153: return "Bad";
+    case 154: return "OpAny";
+    case 155: return "OpAll";
+    case 156: return "OpIsNan";
+    case 157: return "OpIsInf";
+    case 158: return "OpIsFinite";
+    case 159: return "OpIsNormal";
+    case 160: return "OpSignBitSet";
+    case 161: return "OpLessOrGreater";
+    case 162: return "OpOrdered";
+    case 163: return "OpUnordered";
+    case 164: return "OpLogicalEqual";
+    case 165: return "OpLogicalNotEqual";
+    case 166: return "OpLogicalOr";
+    case 167: return "OpLogicalAnd";
+    case 168: return "OpLogicalNot";
+    case 169: return "OpSelect";
+    case 170: return "OpIEqual";
+    case 171: return "OpINotEqual";
+    case 172: return "OpUGreaterThan";
+    case 173: return "OpSGreaterThan";
+    case 174: return "OpUGreaterThanEqual";
+    case 175: return "OpSGreaterThanEqual";
+    case 176: return "OpULessThan";
+    case 177: return "OpSLessThan";
+    case 178: return "OpULessThanEqual";
+    case 179: return "OpSLessThanEqual";
+    case 180: return "OpFOrdEqual";
+    case 181: return "OpFUnordEqual";
+    case 182: return "OpFOrdNotEqual";
+    case 183: return "OpFUnordNotEqual";
+    case 184: return "OpFOrdLessThan";
+    case 185: return "OpFUnordLessThan";
+    case 186: return "OpFOrdGreaterThan";
+    case 187: return "OpFUnordGreaterThan";
+    case 188: return "OpFOrdLessThanEqual";
+    case 189: return "OpFUnordLessThanEqual";
+    case 190: return "OpFOrdGreaterThanEqual";
+    case 191: return "OpFUnordGreaterThanEqual";
+    case 192: return "Bad";
+    case 193: return "Bad";
+    case 194: return "OpShiftRightLogical";
+    case 195: return "OpShiftRightArithmetic";
+    case 196: return "OpShiftLeftLogical";
+    case 197: return "OpBitwiseOr";
+    case 198: return "OpBitwiseXor";
+    case 199: return "OpBitwiseAnd";
+    case 200: return "OpNot";
+    case 201: return "OpBitFieldInsert";
+    case 202: return "OpBitFieldSExtract";
+    case 203: return "OpBitFieldUExtract";
+    case 204: return "OpBitReverse";
+    case 205: return "OpBitCount";
+    case 206: return "Bad";
+    case 207: return "OpDPdx";
+    case 208: return "OpDPdy";
+    case 209: return "OpFwidth";
+    case 210: return "OpDPdxFine";
+    case 211: return "OpDPdyFine";
+    case 212: return "OpFwidthFine";
+    case 213: return "OpDPdxCoarse";
+    case 214: return "OpDPdyCoarse";
+    case 215: return "OpFwidthCoarse";
+    case 216: return "Bad";
+    case 217: return "Bad";
+    case 218: return "OpEmitVertex";
+    case 219: return "OpEndPrimitive";
+    case 220: return "OpEmitStreamVertex";
+    case 221: return "OpEndStreamPrimitive";
+    case 222: return "Bad";
+    case 223: return "Bad";
+    case 224: return "OpControlBarrier";
+    case 225: return "OpMemoryBarrier";
+    case 226: return "Bad";
+    case 227: return "OpAtomicLoad";
+    case 228: return "OpAtomicStore";
+    case 229: return "OpAtomicExchange";
+    case 230: return "OpAtomicCompareExchange";
+    case 231: return "OpAtomicCompareExchangeWeak";
+    case 232: return "OpAtomicIIncrement";
+    case 233: return "OpAtomicIDecrement";
+    case 234: return "OpAtomicIAdd";
+    case 235: return "OpAtomicISub";
+    case 236: return "OpAtomicSMin";
+    case 237: return "OpAtomicUMin";
+    case 238: return "OpAtomicSMax";
+    case 239: return "OpAtomicUMax";
+    case 240: return "OpAtomicAnd";
+    case 241: return "OpAtomicOr";
+    case 242: return "OpAtomicXor";
+    case 243: return "Bad";
+    case 244: return "Bad";
+    case 245: return "OpPhi";
+    case 246: return "OpLoopMerge";
+    case 247: return "OpSelectionMerge";
+    case 248: return "OpLabel";
+    case 249: return "OpBranch";
+    case 250: return "OpBranchConditional";
+    case 251: return "OpSwitch";
+    case 252: return "OpKill";
+    case 253: return "OpReturn";
+    case 254: return "OpReturnValue";
+    case 255: return "OpUnreachable";
+    case 256: return "OpLifetimeStart";
+    case 257: return "OpLifetimeStop";
+    case 258: return "Bad";
+    case 259: return "OpGroupAsyncCopy";
+    case 260: return "OpGroupWaitEvents";
+    case 261: return "OpGroupAll";
+    case 262: return "OpGroupAny";
+    case 263: return "OpGroupBroadcast";
+    case 264: return "OpGroupIAdd";
+    case 265: return "OpGroupFAdd";
+    case 266: return "OpGroupFMin";
+    case 267: return "OpGroupUMin";
+    case 268: return "OpGroupSMin";
+    case 269: return "OpGroupFMax";
+    case 270: return "OpGroupUMax";
+    case 271: return "OpGroupSMax";
+    case 272: return "Bad";
+    case 273: return "Bad";
+    case 274: return "OpReadPipe";
+    case 275: return "OpWritePipe";
+    case 276: return "OpReservedReadPipe";
+    case 277: return "OpReservedWritePipe";
+    case 278: return "OpReserveReadPipePackets";
+    case 279: return "OpReserveWritePipePackets";
+    case 280: return "OpCommitReadPipe";
+    case 281: return "OpCommitWritePipe";
+    case 282: return "OpIsValidReserveId";
+    case 283: return "OpGetNumPipePackets";
+    case 284: return "OpGetMaxPipePackets";
+    case 285: return "OpGroupReserveReadPipePackets";
+    case 286: return "OpGroupReserveWritePipePackets";
+    case 287: return "OpGroupCommitReadPipe";
+    case 288: return "OpGroupCommitWritePipe";
+    case 289: return "Bad";
+    case 290: return "Bad";
+    case 291: return "OpEnqueueMarker";
+    case 292: return "OpEnqueueKernel";
+    case 293: return "OpGetKernelNDrangeSubGroupCount";
+    case 294: return "OpGetKernelNDrangeMaxSubGroupSize";
+    case 295: return "OpGetKernelWorkGroupSize";
+    case 296: return "OpGetKernelPreferredWorkGroupSizeMultiple";
+    case 297: return "OpRetainEvent";
+    case 298: return "OpReleaseEvent";
+    case 299: return "OpCreateUserEvent";
+    case 300: return "OpIsValidEvent";
+    case 301: return "OpSetUserEventStatus";
+    case 302: return "OpCaptureEventProfilingInfo";
+    case 303: return "OpGetDefaultQueue";
+    case 304: return "OpBuildNDRange";
+    case 305: return "OpImageSparseSampleImplicitLod";
+    case 306: return "OpImageSparseSampleExplicitLod";
+    case 307: return "OpImageSparseSampleDrefImplicitLod";
+    case 308: return "OpImageSparseSampleDrefExplicitLod";
+    case 309: return "OpImageSparseSampleProjImplicitLod";
+    case 310: return "OpImageSparseSampleProjExplicitLod";
+    case 311: return "OpImageSparseSampleProjDrefImplicitLod";
+    case 312: return "OpImageSparseSampleProjDrefExplicitLod";
+    case 313: return "OpImageSparseFetch";
+    case 314: return "OpImageSparseGather";
+    case 315: return "OpImageSparseDrefGather";
+    case 316: return "OpImageSparseTexelsResident";
+    case 317: return "OpNoLine";
+    case 318: return "OpAtomicFlagTestAndSet";
+    case 319: return "OpAtomicFlagClear";
+    case 320: return "OpImageSparseRead";
+
+    case OpcodeCeiling:
+    default:
+        return "Bad";
+
+    case 4421: return "OpSubgroupBallotKHR";
+    case 4422: return "OpSubgroupFirstInvocationKHR";
+
+#ifdef AMD_EXTENSIONS
+    case 5000: return "OpGroupIAddNonUniformAMD";
+    case 5001: return "OpGroupFAddNonUniformAMD";
+    case 5002: return "OpGroupFMinNonUniformAMD";
+    case 5003: return "OpGroupUMinNonUniformAMD";
+    case 5004: return "OpGroupSMinNonUniformAMD";
+    case 5005: return "OpGroupFMaxNonUniformAMD";
+    case 5006: return "OpGroupUMaxNonUniformAMD";
+    case 5007: return "OpGroupSMaxNonUniformAMD";
+#endif
+    }
+}
+
+// The set of objects that hold all the instruction/operand
+// parameterization information.
+InstructionParameters InstructionDesc[OpCodeMask + 1];
+OperandParameters ExecutionModeOperands[ExecutionModeCeiling];
+OperandParameters DecorationOperands[DecorationCeiling];
+
+EnumDefinition OperandClassParams[OperandCount];
+EnumParameters ExecutionModelParams[ExecutionModelCeiling];
+EnumParameters AddressingParams[AddressingModelCeiling];
+EnumParameters MemoryParams[MemoryModelCeiling];
+EnumParameters ExecutionModeParams[ExecutionModeCeiling];
+EnumParameters StorageParams[StorageClassCeiling];
+EnumParameters SamplerAddressingModeParams[SamplerAddressingModeCeiling];
+EnumParameters SamplerFilterModeParams[SamplerFilterModeCeiling];
+EnumParameters ImageFormatParams[ImageFormatCeiling];
+EnumParameters ImageChannelOrderParams[ImageChannelOrderCeiling];
+EnumParameters ImageChannelDataTypeParams[ImageChannelDataTypeCeiling];
+EnumParameters ImageOperandsParams[ImageOperandsCeiling];
+EnumParameters FPFastMathParams[FPFastMathCeiling];
+EnumParameters FPRoundingModeParams[FPRoundingModeCeiling];
+EnumParameters LinkageTypeParams[LinkageTypeCeiling];
+EnumParameters DecorationParams[DecorationCeiling];
+EnumParameters BuiltInParams[BuiltInCeiling];
+EnumParameters DimensionalityParams[DimensionCeiling];
+EnumParameters FuncParamAttrParams[FuncParamAttrCeiling];
+EnumParameters AccessQualifierParams[AccessQualifierCeiling];
+EnumParameters GroupOperationParams[GroupOperationCeiling];
+EnumParameters LoopControlParams[FunctionControlCeiling];
+EnumParameters SelectionControlParams[SelectControlCeiling];
+EnumParameters FunctionControlParams[FunctionControlCeiling];
+EnumParameters MemorySemanticsParams[MemorySemanticsCeiling];
+EnumParameters MemoryAccessParams[MemoryAccessCeiling];
+EnumParameters ScopeParams[ScopeCeiling];
+EnumParameters KernelEnqueueFlagsParams[KernelEnqueueFlagsCeiling];
+EnumParameters KernelProfilingInfoParams[KernelProfilingInfoCeiling];
+EnumParameters CapabilityParams[CapabilityCeiling];
+
+// Set up all the parameterizing descriptions of the opcodes, operands, etc.
+void Parameterize()
+{
+    // only do this once.
+    static bool initialized = false;
+    if (initialized)
+        return;
+    initialized = true;
+
+    // Exceptions to having a result <id> and a resulting type <id>.
+    // (Everything is initialized to have both).
+
+    InstructionDesc[OpNop].setResultAndType(false, false);
+    InstructionDesc[OpSource].setResultAndType(false, false);
+    InstructionDesc[OpSourceContinued].setResultAndType(false, false);
+    InstructionDesc[OpSourceExtension].setResultAndType(false, false);
+    InstructionDesc[OpExtension].setResultAndType(false, false);
+    InstructionDesc[OpExtInstImport].setResultAndType(true, false);
+    InstructionDesc[OpCapability].setResultAndType(false, false);
+    InstructionDesc[OpMemoryModel].setResultAndType(false, false);
+    InstructionDesc[OpEntryPoint].setResultAndType(false, false);
+    InstructionDesc[OpExecutionMode].setResultAndType(false, false);
+    InstructionDesc[OpTypeVoid].setResultAndType(true, false);
+    InstructionDesc[OpTypeBool].setResultAndType(true, false);
+    InstructionDesc[OpTypeInt].setResultAndType(true, false);
+    InstructionDesc[OpTypeFloat].setResultAndType(true, false);
+    InstructionDesc[OpTypeVector].setResultAndType(true, false);
+    InstructionDesc[OpTypeMatrix].setResultAndType(true, false);
+    InstructionDesc[OpTypeImage].setResultAndType(true, false);
+    InstructionDesc[OpTypeSampler].setResultAndType(true, false);
+    InstructionDesc[OpTypeSampledImage].setResultAndType(true, false);
+    InstructionDesc[OpTypeArray].setResultAndType(true, false);
+    InstructionDesc[OpTypeRuntimeArray].setResultAndType(true, false);
+    InstructionDesc[OpTypeStruct].setResultAndType(true, false);
+    InstructionDesc[OpTypeOpaque].setResultAndType(true, false);
+    InstructionDesc[OpTypePointer].setResultAndType(true, false);
+    InstructionDesc[OpTypeForwardPointer].setResultAndType(false, false);
+    InstructionDesc[OpTypeFunction].setResultAndType(true, false);
+    InstructionDesc[OpTypeEvent].setResultAndType(true, false);
+    InstructionDesc[OpTypeDeviceEvent].setResultAndType(true, false);
+    InstructionDesc[OpTypeReserveId].setResultAndType(true, false);
+    InstructionDesc[OpTypeQueue].setResultAndType(true, false);
+    InstructionDesc[OpTypePipe].setResultAndType(true, false);
+    InstructionDesc[OpFunctionEnd].setResultAndType(false, false);
+    InstructionDesc[OpStore].setResultAndType(false, false);
+    InstructionDesc[OpImageWrite].setResultAndType(false, false);
+    InstructionDesc[OpDecorationGroup].setResultAndType(true, false);
+    InstructionDesc[OpDecorate].setResultAndType(false, false);
+    InstructionDesc[OpMemberDecorate].setResultAndType(false, false);
+    InstructionDesc[OpGroupDecorate].setResultAndType(false, false);
+    InstructionDesc[OpGroupMemberDecorate].setResultAndType(false, false);
+    InstructionDesc[OpName].setResultAndType(false, false);
+    InstructionDesc[OpMemberName].setResultAndType(false, false);
+    InstructionDesc[OpString].setResultAndType(true, false);
+    InstructionDesc[OpLine].setResultAndType(false, false);
+    InstructionDesc[OpNoLine].setResultAndType(false, false);
+    InstructionDesc[OpCopyMemory].setResultAndType(false, false);
+    InstructionDesc[OpCopyMemorySized].setResultAndType(false, false);
+    InstructionDesc[OpEmitVertex].setResultAndType(false, false);
+    InstructionDesc[OpEndPrimitive].setResultAndType(false, false);
+    InstructionDesc[OpEmitStreamVertex].setResultAndType(false, false);
+    InstructionDesc[OpEndStreamPrimitive].setResultAndType(false, false);
+    InstructionDesc[OpControlBarrier].setResultAndType(false, false);
+    InstructionDesc[OpMemoryBarrier].setResultAndType(false, false);
+    InstructionDesc[OpAtomicStore].setResultAndType(false, false);
+    InstructionDesc[OpLoopMerge].setResultAndType(false, false);
+    InstructionDesc[OpSelectionMerge].setResultAndType(false, false);
+    InstructionDesc[OpLabel].setResultAndType(true, false);
+    InstructionDesc[OpBranch].setResultAndType(false, false);
+    InstructionDesc[OpBranchConditional].setResultAndType(false, false);
+    InstructionDesc[OpSwitch].setResultAndType(false, false);
+    InstructionDesc[OpKill].setResultAndType(false, false);
+    InstructionDesc[OpReturn].setResultAndType(false, false);
+    InstructionDesc[OpReturnValue].setResultAndType(false, false);
+    InstructionDesc[OpUnreachable].setResultAndType(false, false);
+    InstructionDesc[OpLifetimeStart].setResultAndType(false, false);
+    InstructionDesc[OpLifetimeStop].setResultAndType(false, false);
+    InstructionDesc[OpCommitReadPipe].setResultAndType(false, false);
+    InstructionDesc[OpCommitWritePipe].setResultAndType(false, false);
+    InstructionDesc[OpGroupCommitWritePipe].setResultAndType(false, false);
+    InstructionDesc[OpGroupCommitReadPipe].setResultAndType(false, false);
+    InstructionDesc[OpCaptureEventProfilingInfo].setResultAndType(false, false);
+    InstructionDesc[OpSetUserEventStatus].setResultAndType(false, false);
+    InstructionDesc[OpRetainEvent].setResultAndType(false, false);
+    InstructionDesc[OpReleaseEvent].setResultAndType(false, false);
+    InstructionDesc[OpGroupWaitEvents].setResultAndType(false, false);
+    InstructionDesc[OpAtomicFlagClear].setResultAndType(false, false);
+
+    // Specific additional context-dependent operands
+
+    ExecutionModeOperands[ExecutionModeInvocations].push(OperandLiteralNumber, "'Number of <<Invocation,invocations>>'");
+
+    ExecutionModeOperands[ExecutionModeLocalSize].push(OperandLiteralNumber, "'x size'");
+    ExecutionModeOperands[ExecutionModeLocalSize].push(OperandLiteralNumber, "'y size'");
+    ExecutionModeOperands[ExecutionModeLocalSize].push(OperandLiteralNumber, "'z size'");
+
+    ExecutionModeOperands[ExecutionModeLocalSizeHint].push(OperandLiteralNumber, "'x size'");
+    ExecutionModeOperands[ExecutionModeLocalSizeHint].push(OperandLiteralNumber, "'y size'");
+    ExecutionModeOperands[ExecutionModeLocalSizeHint].push(OperandLiteralNumber, "'z size'");
+
+    ExecutionModeOperands[ExecutionModeOutputVertices].push(OperandLiteralNumber, "'Vertex count'");
+    ExecutionModeOperands[ExecutionModeVecTypeHint].push(OperandLiteralNumber, "'Vector type'");
+
+    DecorationOperands[DecorationStream].push(OperandLiteralNumber, "'Stream Number'");
+    DecorationOperands[DecorationLocation].push(OperandLiteralNumber, "'Location'");
+    DecorationOperands[DecorationComponent].push(OperandLiteralNumber, "'Component'");
+    DecorationOperands[DecorationIndex].push(OperandLiteralNumber, "'Index'");
+    DecorationOperands[DecorationBinding].push(OperandLiteralNumber, "'Binding Point'");
+    DecorationOperands[DecorationDescriptorSet].push(OperandLiteralNumber, "'Descriptor Set'");
+    DecorationOperands[DecorationOffset].push(OperandLiteralNumber, "'Byte Offset'");
+    DecorationOperands[DecorationXfbBuffer].push(OperandLiteralNumber, "'XFB Buffer Number'");
+    DecorationOperands[DecorationXfbStride].push(OperandLiteralNumber, "'XFB Stride'");
+    DecorationOperands[DecorationArrayStride].push(OperandLiteralNumber, "'Array Stride'");
+    DecorationOperands[DecorationMatrixStride].push(OperandLiteralNumber, "'Matrix Stride'");
+    DecorationOperands[DecorationBuiltIn].push(OperandLiteralNumber, "See <<BuiltIn,*BuiltIn*>>");
+    DecorationOperands[DecorationFPRoundingMode].push(OperandFPRoundingMode, "'Floating-Point Rounding Mode'");
+    DecorationOperands[DecorationFPFastMathMode].push(OperandFPFastMath, "'Fast-Math Mode'");
+    DecorationOperands[DecorationLinkageAttributes].push(OperandLiteralString, "'Name'");
+    DecorationOperands[DecorationLinkageAttributes].push(OperandLinkageType, "'Linkage Type'");
+    DecorationOperands[DecorationFuncParamAttr].push(OperandFuncParamAttr, "'Function Parameter Attribute'");
+    DecorationOperands[DecorationSpecId].push(OperandLiteralNumber, "'Specialization Constant ID'");
+    DecorationOperands[DecorationInputAttachmentIndex].push(OperandLiteralNumber, "'Attachment Index'");
+    DecorationOperands[DecorationAlignment].push(OperandLiteralNumber, "'Alignment'");
+
+    OperandClassParams[OperandSource].set(SourceLanguageCeiling, SourceString, 0);
+    OperandClassParams[OperandExecutionModel].set(ExecutionModelCeiling, ExecutionModelString, ExecutionModelParams);
+    OperandClassParams[OperandAddressing].set(AddressingModelCeiling, AddressingString, AddressingParams);
+    OperandClassParams[OperandMemory].set(MemoryModelCeiling, MemoryString, MemoryParams);
+    OperandClassParams[OperandExecutionMode].set(ExecutionModeCeiling, ExecutionModeString, ExecutionModeParams);
+    OperandClassParams[OperandExecutionMode].setOperands(ExecutionModeOperands);
+    OperandClassParams[OperandStorage].set(StorageClassCeiling, StorageClassString, StorageParams);
+    OperandClassParams[OperandDimensionality].set(DimensionCeiling, DimensionString, DimensionalityParams);
+    OperandClassParams[OperandSamplerAddressingMode].set(SamplerAddressingModeCeiling, SamplerAddressingModeString, SamplerAddressingModeParams);
+    OperandClassParams[OperandSamplerFilterMode].set(SamplerFilterModeCeiling, SamplerFilterModeString, SamplerFilterModeParams);
+    OperandClassParams[OperandSamplerImageFormat].set(ImageFormatCeiling, ImageFormatString, ImageFormatParams);
+    OperandClassParams[OperandImageChannelOrder].set(ImageChannelOrderCeiling, ImageChannelOrderString, ImageChannelOrderParams);
+    OperandClassParams[OperandImageChannelDataType].set(ImageChannelDataTypeCeiling, ImageChannelDataTypeString, ImageChannelDataTypeParams);
+    OperandClassParams[OperandImageOperands].set(ImageOperandsCeiling, ImageOperandsString, ImageOperandsParams, true);
+    OperandClassParams[OperandFPFastMath].set(FPFastMathCeiling, FPFastMathString, FPFastMathParams, true);
+    OperandClassParams[OperandFPRoundingMode].set(FPRoundingModeCeiling, FPRoundingModeString, FPRoundingModeParams);
+    OperandClassParams[OperandLinkageType].set(LinkageTypeCeiling, LinkageTypeString, LinkageTypeParams);
+    OperandClassParams[OperandFuncParamAttr].set(FuncParamAttrCeiling, FuncParamAttrString, FuncParamAttrParams);
+    OperandClassParams[OperandAccessQualifier].set(AccessQualifierCeiling, AccessQualifierString, AccessQualifierParams);
+    OperandClassParams[OperandDecoration].set(DecorationCeiling, DecorationString, DecorationParams);
+    OperandClassParams[OperandDecoration].setOperands(DecorationOperands);
+    OperandClassParams[OperandBuiltIn].set(BuiltInCeiling, BuiltInString, BuiltInParams);
+    OperandClassParams[OperandSelect].set(SelectControlCeiling, SelectControlString, SelectionControlParams, true);
+    OperandClassParams[OperandLoop].set(LoopControlCeiling, LoopControlString, LoopControlParams, true);
+    OperandClassParams[OperandFunction].set(FunctionControlCeiling, FunctionControlString, FunctionControlParams, true);
+    OperandClassParams[OperandMemorySemantics].set(MemorySemanticsCeiling, MemorySemanticsString, MemorySemanticsParams, true);
+    OperandClassParams[OperandMemoryAccess].set(MemoryAccessCeiling, MemoryAccessString, MemoryAccessParams, true);
+    OperandClassParams[OperandScope].set(ScopeCeiling, ScopeString, ScopeParams);
+    OperandClassParams[OperandGroupOperation].set(GroupOperationCeiling, GroupOperationString, GroupOperationParams);
+    OperandClassParams[OperandKernelEnqueueFlags].set(KernelEnqueueFlagsCeiling, KernelEnqueueFlagsString, KernelEnqueueFlagsParams);
+    OperandClassParams[OperandKernelProfilingInfo].set(KernelProfilingInfoCeiling, KernelProfilingInfoString, KernelProfilingInfoParams, true);
+    OperandClassParams[OperandCapability].set(CapabilityCeiling, CapabilityString, CapabilityParams);
+    OperandClassParams[OperandOpcode].set(OpcodeCeiling, OpcodeString, 0);
+
+    CapabilityParams[CapabilityShader].caps.push_back(CapabilityMatrix);
+    CapabilityParams[CapabilityGeometry].caps.push_back(CapabilityShader);
+    CapabilityParams[CapabilityTessellation].caps.push_back(CapabilityShader);
+    CapabilityParams[CapabilityVector16].caps.push_back(CapabilityKernel);
+    CapabilityParams[CapabilityFloat16Buffer].caps.push_back(CapabilityKernel);
+    CapabilityParams[CapabilityInt64Atomics].caps.push_back(CapabilityInt64);
+    CapabilityParams[CapabilityImageBasic].caps.push_back(CapabilityKernel);
+    CapabilityParams[CapabilityImageReadWrite].caps.push_back(CapabilityImageBasic);
+    CapabilityParams[CapabilityImageMipmap].caps.push_back(CapabilityImageBasic);
+    CapabilityParams[CapabilityPipes].caps.push_back(CapabilityKernel);
+    CapabilityParams[CapabilityDeviceEnqueue].caps.push_back(CapabilityKernel);
+    CapabilityParams[CapabilityLiteralSampler].caps.push_back(CapabilityKernel);
+    CapabilityParams[CapabilityAtomicStorage].caps.push_back(CapabilityShader);
+    CapabilityParams[CapabilitySampleRateShading].caps.push_back(CapabilityShader);
+    CapabilityParams[CapabilityTessellationPointSize].caps.push_back(CapabilityTessellation);
+    CapabilityParams[CapabilityGeometryPointSize].caps.push_back(CapabilityGeometry);
+    CapabilityParams[CapabilityImageGatherExtended].caps.push_back(CapabilityShader);
+    CapabilityParams[CapabilityStorageImageExtendedFormats].caps.push_back(CapabilityShader);
+    CapabilityParams[CapabilityStorageImageMultisample].caps.push_back(CapabilityShader);
+    CapabilityParams[CapabilityUniformBufferArrayDynamicIndexing].caps.push_back(CapabilityShader);
+    CapabilityParams[CapabilitySampledImageArrayDynamicIndexing].caps.push_back(CapabilityShader);
+    CapabilityParams[CapabilityStorageBufferArrayDynamicIndexing].caps.push_back(CapabilityShader);
+    CapabilityParams[CapabilityStorageImageArrayDynamicIndexing].caps.push_back(CapabilityShader);
+    CapabilityParams[CapabilityClipDistance].caps.push_back(CapabilityShader);
+    CapabilityParams[CapabilityCullDistance].caps.push_back(CapabilityShader);
+    CapabilityParams[CapabilityGenericPointer].caps.push_back(CapabilityAddresses);
+    CapabilityParams[CapabilityInt8].caps.push_back(CapabilityKernel);
+    CapabilityParams[CapabilityInputAttachment].caps.push_back(CapabilityShader);
+    CapabilityParams[CapabilityMinLod].caps.push_back(CapabilityShader);
+    CapabilityParams[CapabilitySparseResidency].caps.push_back(CapabilityShader);
+    CapabilityParams[CapabilitySampled1D].caps.push_back(CapabilityShader);
+    CapabilityParams[CapabilitySampledRect].caps.push_back(CapabilityShader);
+    CapabilityParams[CapabilitySampledBuffer].caps.push_back(CapabilityShader);
+    CapabilityParams[CapabilitySampledCubeArray].caps.push_back(CapabilityShader);
+    CapabilityParams[CapabilityImageMSArray].caps.push_back(CapabilityShader);
+    CapabilityParams[CapabilityImage1D].caps.push_back(CapabilitySampled1D);
+    CapabilityParams[CapabilityImageRect].caps.push_back(CapabilitySampledRect);
+    CapabilityParams[CapabilityImageBuffer].caps.push_back(CapabilitySampledBuffer);
+    CapabilityParams[CapabilityImageCubeArray].caps.push_back(CapabilitySampledCubeArray);
+    CapabilityParams[CapabilityImageQuery].caps.push_back(CapabilityShader);
+    CapabilityParams[CapabilityDerivativeControl].caps.push_back(CapabilityShader);
+    CapabilityParams[CapabilityInterpolationFunction].caps.push_back(CapabilityShader);
+    CapabilityParams[CapabilityTransformFeedback].caps.push_back(CapabilityShader);
+    CapabilityParams[CapabilityGeometryStreams].caps.push_back(CapabilityGeometry);
+    CapabilityParams[CapabilityStorageImageReadWithoutFormat].caps.push_back(CapabilityShader);
+    CapabilityParams[CapabilityStorageImageWriteWithoutFormat].caps.push_back(CapabilityShader);
+    CapabilityParams[CapabilityMultiViewport].caps.push_back(CapabilityGeometry);
+
+    AddressingParams[AddressingModelPhysical32].caps.push_back(CapabilityAddresses);
+    AddressingParams[AddressingModelPhysical64].caps.push_back(CapabilityAddresses);
+
+    MemoryParams[MemoryModelSimple].caps.push_back(CapabilityShader);
+    MemoryParams[MemoryModelGLSL450].caps.push_back(CapabilityShader);
+    MemoryParams[MemoryModelOpenCL].caps.push_back(CapabilityKernel);
+
+    MemorySemanticsParams[MemorySemanticsUniformMemoryShift].caps.push_back(CapabilityShader);
+    MemorySemanticsParams[MemorySemanticsAtomicCounterMemoryShift].caps.push_back(CapabilityAtomicStorage);
+
+    ExecutionModelParams[ExecutionModelVertex].caps.push_back(CapabilityShader);
+    ExecutionModelParams[ExecutionModelTessellationControl].caps.push_back(CapabilityTessellation);
+    ExecutionModelParams[ExecutionModelTessellationEvaluation].caps.push_back(CapabilityTessellation);
+    ExecutionModelParams[ExecutionModelGeometry].caps.push_back(CapabilityGeometry);
+    ExecutionModelParams[ExecutionModelFragment].caps.push_back(CapabilityShader);
+    ExecutionModelParams[ExecutionModelGLCompute].caps.push_back(CapabilityShader);
+    ExecutionModelParams[ExecutionModelKernel].caps.push_back(CapabilityKernel);
+
+    // Storage capabilites
+    StorageParams[StorageClassInput].caps.push_back(CapabilityShader);
+    StorageParams[StorageClassUniform].caps.push_back(CapabilityShader);
+    StorageParams[StorageClassOutput].caps.push_back(CapabilityShader);
+    StorageParams[StorageClassPrivate].caps.push_back(CapabilityShader);
+    StorageParams[StorageClassGeneric].caps.push_back(CapabilityKernel);
+    StorageParams[StorageClassAtomicCounter].caps.push_back(CapabilityAtomicStorage);
+    StorageParams[StorageClassPushConstant].caps.push_back(CapabilityShader);
+
+    // Sampler Filter & Addressing mode capabilities
+    SamplerAddressingModeParams[SamplerAddressingModeNone].caps.push_back(CapabilityKernel);
+    SamplerAddressingModeParams[SamplerAddressingModeClampToEdge].caps.push_back(CapabilityKernel);
+    SamplerAddressingModeParams[SamplerAddressingModeClamp].caps.push_back(CapabilityKernel);
+    SamplerAddressingModeParams[SamplerAddressingModeRepeat].caps.push_back(CapabilityKernel);
+    SamplerAddressingModeParams[SamplerAddressingModeRepeatMirrored].caps.push_back(CapabilityKernel);
+
+    SamplerFilterModeParams[SamplerFilterModeNearest].caps.push_back(CapabilityKernel);
+    SamplerFilterModeParams[SamplerFilterModeLinear].caps.push_back(CapabilityKernel);
+
+    // image format capabilities
+
+    // ES/Desktop float
+    ImageFormatParams[ImageFormatRgba32f].caps.push_back(CapabilityShader);
+    ImageFormatParams[ImageFormatRgba16f].caps.push_back(CapabilityShader);
+    ImageFormatParams[ImageFormatR32f].caps.push_back(CapabilityShader);
+    ImageFormatParams[ImageFormatRgba8].caps.push_back(CapabilityShader);
+    ImageFormatParams[ImageFormatRgba8Snorm].caps.push_back(CapabilityShader);
+
+    // Desktop float
+    ImageFormatParams[ImageFormatRg32f].caps.push_back(CapabilityStorageImageExtendedFormats);
+    ImageFormatParams[ImageFormatRg16f].caps.push_back(CapabilityStorageImageExtendedFormats);
+    ImageFormatParams[ImageFormatR11fG11fB10f].caps.push_back(CapabilityStorageImageExtendedFormats);
+    ImageFormatParams[ImageFormatR16f].caps.push_back(CapabilityStorageImageExtendedFormats);
+    ImageFormatParams[ImageFormatRgba16].caps.push_back(CapabilityStorageImageExtendedFormats);
+    ImageFormatParams[ImageFormatRgb10A2].caps.push_back(CapabilityStorageImageExtendedFormats);
+    ImageFormatParams[ImageFormatRg16].caps.push_back(CapabilityStorageImageExtendedFormats);
+    ImageFormatParams[ImageFormatRg8].caps.push_back(CapabilityStorageImageExtendedFormats);
+    ImageFormatParams[ImageFormatR16].caps.push_back(CapabilityStorageImageExtendedFormats);
+    ImageFormatParams[ImageFormatR8].caps.push_back(CapabilityStorageImageExtendedFormats);
+    ImageFormatParams[ImageFormatRgba16Snorm].caps.push_back(CapabilityStorageImageExtendedFormats);
+    ImageFormatParams[ImageFormatRg16Snorm].caps.push_back(CapabilityStorageImageExtendedFormats);
+    ImageFormatParams[ImageFormatRg8Snorm].caps.push_back(CapabilityStorageImageExtendedFormats);
+    ImageFormatParams[ImageFormatR16Snorm].caps.push_back(CapabilityStorageImageExtendedFormats);
+    ImageFormatParams[ImageFormatR8Snorm].caps.push_back(CapabilityStorageImageExtendedFormats);
+
+    // ES/Desktop int
+    ImageFormatParams[ImageFormatRgba32i].caps.push_back(CapabilityShader);
+    ImageFormatParams[ImageFormatRgba16i].caps.push_back(CapabilityShader);
+    ImageFormatParams[ImageFormatRgba8i].caps.push_back(CapabilityShader);
+    ImageFormatParams[ImageFormatR32i].caps.push_back(CapabilityShader);
+
+    // Desktop int
+    ImageFormatParams[ImageFormatRg32i].caps.push_back(CapabilityStorageImageExtendedFormats);
+    ImageFormatParams[ImageFormatRg16i].caps.push_back(CapabilityStorageImageExtendedFormats);
+    ImageFormatParams[ImageFormatRg8i].caps.push_back(CapabilityStorageImageExtendedFormats);
+    ImageFormatParams[ImageFormatR16i].caps.push_back(CapabilityStorageImageExtendedFormats);
+    ImageFormatParams[ImageFormatR8i].caps.push_back(CapabilityStorageImageExtendedFormats);
+
+    // ES/Desktop uint
+    ImageFormatParams[ImageFormatRgba32ui].caps.push_back(CapabilityShader);
+    ImageFormatParams[ImageFormatRgba16ui].caps.push_back(CapabilityShader);
+    ImageFormatParams[ImageFormatRgba8ui].caps.push_back(CapabilityShader);
+    ImageFormatParams[ImageFormatR32ui].caps.push_back(CapabilityShader);
+
+    // Desktop uint
+    ImageFormatParams[ImageFormatRgb10a2ui].caps.push_back(CapabilityStorageImageExtendedFormats);
+    ImageFormatParams[ImageFormatRg32ui].caps.push_back(CapabilityStorageImageExtendedFormats);
+    ImageFormatParams[ImageFormatRg16ui].caps.push_back(CapabilityStorageImageExtendedFormats);
+    ImageFormatParams[ImageFormatRg8ui].caps.push_back(CapabilityStorageImageExtendedFormats);
+    ImageFormatParams[ImageFormatR16ui].caps.push_back(CapabilityStorageImageExtendedFormats);
+    ImageFormatParams[ImageFormatR8ui].caps.push_back(CapabilityStorageImageExtendedFormats);
+
+    // image channel order capabilities
+    for (int i = 0; i < ImageChannelOrderCeiling; ++i) {
+        ImageChannelOrderParams[i].caps.push_back(CapabilityKernel);
+    }
+
+    // image channel type capabilities
+    for (int i = 0; i < ImageChannelDataTypeCeiling; ++i) {
+        ImageChannelDataTypeParams[i].caps.push_back(CapabilityKernel);
+    }
+
+    // image lookup operands
+    ImageOperandsParams[ImageOperandsBiasShift].caps.push_back(CapabilityShader);
+    ImageOperandsParams[ImageOperandsOffsetShift].caps.push_back(CapabilityImageGatherExtended);
+    ImageOperandsParams[ImageOperandsMinLodShift].caps.push_back(CapabilityMinLod);
+
+    // fast math flags capabilities
+    for (int i = 0; i < FPFastMathCeiling; ++i) {
+        FPFastMathParams[i].caps.push_back(CapabilityKernel);
+    }
+
+    // fp rounding mode capabilities
+    for (int i = 0; i < FPRoundingModeCeiling; ++i) {
+        FPRoundingModeParams[i].caps.push_back(CapabilityKernel);
+    }
+
+    // linkage types
+    for (int i = 0; i < LinkageTypeCeiling; ++i) {
+        LinkageTypeParams[i].caps.push_back(CapabilityLinkage);
+    }
+
+    // function argument types
+    for (int i = 0; i < FuncParamAttrCeiling; ++i) {
+        FuncParamAttrParams[i].caps.push_back(CapabilityKernel);
+    }
+
+    // function argument types
+    for (int i = 0; i < AccessQualifierCeiling; ++i) {
+        AccessQualifierParams[i].caps.push_back(CapabilityKernel);
+    }
+
+    ExecutionModeParams[ExecutionModeInvocations].caps.push_back(CapabilityGeometry);
+    ExecutionModeParams[ExecutionModeSpacingEqual].caps.push_back(CapabilityTessellation);
+    ExecutionModeParams[ExecutionModeSpacingFractionalEven].caps.push_back(CapabilityTessellation);
+    ExecutionModeParams[ExecutionModeSpacingFractionalOdd].caps.push_back(CapabilityTessellation);
+    ExecutionModeParams[ExecutionModeVertexOrderCw].caps.push_back(CapabilityTessellation);
+    ExecutionModeParams[ExecutionModeVertexOrderCcw].caps.push_back(CapabilityTessellation);
+    ExecutionModeParams[ExecutionModePixelCenterInteger].caps.push_back(CapabilityShader);
+    ExecutionModeParams[ExecutionModeOriginUpperLeft].caps.push_back(CapabilityShader);
+    ExecutionModeParams[ExecutionModeOriginLowerLeft].caps.push_back(CapabilityShader);
+    ExecutionModeParams[ExecutionModeEarlyFragmentTests].caps.push_back(CapabilityShader);
+    ExecutionModeParams[ExecutionModePointMode].caps.push_back(CapabilityTessellation);
+    ExecutionModeParams[ExecutionModeXfb].caps.push_back(CapabilityTransformFeedback);
+    ExecutionModeParams[ExecutionModeDepthReplacing].caps.push_back(CapabilityShader);
+    ExecutionModeParams[ExecutionModeDepthGreater].caps.push_back(CapabilityShader);
+    ExecutionModeParams[ExecutionModeDepthLess].caps.push_back(CapabilityShader);
+    ExecutionModeParams[ExecutionModeDepthUnchanged].caps.push_back(CapabilityShader);
+    ExecutionModeParams[ExecutionModeLocalSizeHint].caps.push_back(CapabilityKernel);
+    ExecutionModeParams[ExecutionModeInputPoints].caps.push_back(CapabilityGeometry);
+    ExecutionModeParams[ExecutionModeInputLines].caps.push_back(CapabilityGeometry);
+    ExecutionModeParams[ExecutionModeInputLinesAdjacency].caps.push_back(CapabilityGeometry);
+    ExecutionModeParams[ExecutionModeTriangles].caps.push_back(CapabilityGeometry);
+    ExecutionModeParams[ExecutionModeTriangles].caps.push_back(CapabilityTessellation);
+    ExecutionModeParams[ExecutionModeInputTrianglesAdjacency].caps.push_back(CapabilityGeometry);
+    ExecutionModeParams[ExecutionModeQuads].caps.push_back(CapabilityTessellation);
+    ExecutionModeParams[ExecutionModeIsolines].caps.push_back(CapabilityTessellation);
+    ExecutionModeParams[ExecutionModeOutputVertices].caps.push_back(CapabilityGeometry);
+    ExecutionModeParams[ExecutionModeOutputVertices].caps.push_back(CapabilityTessellation);
+    ExecutionModeParams[ExecutionModeOutputPoints].caps.push_back(CapabilityGeometry);
+    ExecutionModeParams[ExecutionModeOutputLineStrip].caps.push_back(CapabilityGeometry);
+    ExecutionModeParams[ExecutionModeOutputTriangleStrip].caps.push_back(CapabilityGeometry);
+    ExecutionModeParams[ExecutionModeVecTypeHint].caps.push_back(CapabilityKernel);
+    ExecutionModeParams[ExecutionModeContractionOff].caps.push_back(CapabilityKernel);
+
+    DecorationParams[DecorationRelaxedPrecision].caps.push_back(CapabilityShader);
+    DecorationParams[DecorationBlock].caps.push_back(CapabilityShader);
+    DecorationParams[DecorationBufferBlock].caps.push_back(CapabilityShader);
+    DecorationParams[DecorationRowMajor].caps.push_back(CapabilityMatrix);
+    DecorationParams[DecorationColMajor].caps.push_back(CapabilityMatrix);
+    DecorationParams[DecorationGLSLShared].caps.push_back(CapabilityShader);
+    DecorationParams[DecorationGLSLPacked].caps.push_back(CapabilityShader);
+    DecorationParams[DecorationNoPerspective].caps.push_back(CapabilityShader);
+    DecorationParams[DecorationFlat].caps.push_back(CapabilityShader);
+    DecorationParams[DecorationPatch].caps.push_back(CapabilityTessellation);
+    DecorationParams[DecorationCentroid].caps.push_back(CapabilityShader);
+    DecorationParams[DecorationSample].caps.push_back(CapabilitySampleRateShading);
+    DecorationParams[DecorationInvariant].caps.push_back(CapabilityShader);
+    DecorationParams[DecorationConstant].caps.push_back(CapabilityKernel);
+    DecorationParams[DecorationUniform].caps.push_back(CapabilityShader);
+    DecorationParams[DecorationCPacked].caps.push_back(CapabilityKernel);
+    DecorationParams[DecorationSaturatedConversion].caps.push_back(CapabilityKernel);
+    DecorationParams[DecorationStream].caps.push_back(CapabilityGeometryStreams);
+    DecorationParams[DecorationLocation].caps.push_back(CapabilityShader);
+    DecorationParams[DecorationComponent].caps.push_back(CapabilityShader);
+    DecorationParams[DecorationOffset].caps.push_back(CapabilityShader);
+    DecorationParams[DecorationIndex].caps.push_back(CapabilityShader);
+    DecorationParams[DecorationBinding].caps.push_back(CapabilityShader);
+    DecorationParams[DecorationDescriptorSet].caps.push_back(CapabilityShader);
+    DecorationParams[DecorationXfbBuffer].caps.push_back(CapabilityTransformFeedback);
+    DecorationParams[DecorationXfbStride].caps.push_back(CapabilityTransformFeedback);
+    DecorationParams[DecorationArrayStride].caps.push_back(CapabilityShader);
+    DecorationParams[DecorationMatrixStride].caps.push_back(CapabilityMatrix);
+    DecorationParams[DecorationFuncParamAttr].caps.push_back(CapabilityKernel);
+    DecorationParams[DecorationFPRoundingMode].caps.push_back(CapabilityKernel);
+    DecorationParams[DecorationFPFastMathMode].caps.push_back(CapabilityKernel);
+    DecorationParams[DecorationLinkageAttributes].caps.push_back(CapabilityLinkage);
+    DecorationParams[DecorationSpecId].caps.push_back(CapabilityShader);
+    DecorationParams[DecorationNoContraction].caps.push_back(CapabilityShader);
+    DecorationParams[DecorationInputAttachmentIndex].caps.push_back(CapabilityInputAttachment);
+    DecorationParams[DecorationAlignment].caps.push_back(CapabilityKernel);
+
+    BuiltInParams[BuiltInPosition].caps.push_back(CapabilityShader);
+    BuiltInParams[BuiltInPointSize].caps.push_back(CapabilityShader);
+    BuiltInParams[BuiltInClipDistance].caps.push_back(CapabilityClipDistance);
+    BuiltInParams[BuiltInCullDistance].caps.push_back(CapabilityCullDistance);
+
+    BuiltInParams[BuiltInVertexId].caps.push_back(CapabilityShader);
+    BuiltInParams[BuiltInVertexId].desc = "Vertex ID, which takes on values 0, 1, 2, . . . .";
+
+    BuiltInParams[BuiltInInstanceId].caps.push_back(CapabilityShader);
+    BuiltInParams[BuiltInInstanceId].desc = "Instance ID, which takes on values 0, 1, 2, . . . .";
+
+    BuiltInParams[BuiltInVertexIndex].caps.push_back(CapabilityShader);
+    BuiltInParams[BuiltInVertexIndex].desc = "Vertex index, which takes on values base, base+1, base+2, . . . .";
+
+    BuiltInParams[BuiltInInstanceIndex].caps.push_back(CapabilityShader);
+    BuiltInParams[BuiltInInstanceIndex].desc = "Instance index, which takes on values base, base+1, base+2, . . . .";
+
+    BuiltInParams[BuiltInPrimitiveId].caps.push_back(CapabilityGeometry);
+    BuiltInParams[BuiltInPrimitiveId].caps.push_back(CapabilityTessellation);
+    BuiltInParams[BuiltInInvocationId].caps.push_back(CapabilityGeometry);
+    BuiltInParams[BuiltInInvocationId].caps.push_back(CapabilityTessellation);
+    BuiltInParams[BuiltInLayer].caps.push_back(CapabilityGeometry);
+    BuiltInParams[BuiltInViewportIndex].caps.push_back(CapabilityMultiViewport);
+    BuiltInParams[BuiltInTessLevelOuter].caps.push_back(CapabilityTessellation);
+    BuiltInParams[BuiltInTessLevelInner].caps.push_back(CapabilityTessellation);
+    BuiltInParams[BuiltInTessCoord].caps.push_back(CapabilityTessellation);
+    BuiltInParams[BuiltInPatchVertices].caps.push_back(CapabilityTessellation);
+    BuiltInParams[BuiltInFragCoord].caps.push_back(CapabilityShader);
+    BuiltInParams[BuiltInPointCoord].caps.push_back(CapabilityShader);
+    BuiltInParams[BuiltInFrontFacing].caps.push_back(CapabilityShader);
+    BuiltInParams[BuiltInSampleId].caps.push_back(CapabilitySampleRateShading);
+    BuiltInParams[BuiltInSamplePosition].caps.push_back(CapabilitySampleRateShading);
+    BuiltInParams[BuiltInSampleMask].caps.push_back(CapabilitySampleRateShading);
+    BuiltInParams[BuiltInFragDepth].caps.push_back(CapabilityShader);
+    BuiltInParams[BuiltInHelperInvocation].caps.push_back(CapabilityShader);
+    BuiltInParams[BuiltInWorkDim].caps.push_back(CapabilityKernel);
+    BuiltInParams[BuiltInGlobalSize].caps.push_back(CapabilityKernel);
+    BuiltInParams[BuiltInEnqueuedWorkgroupSize].caps.push_back(CapabilityKernel);
+    BuiltInParams[BuiltInGlobalOffset].caps.push_back(CapabilityKernel);
+    BuiltInParams[BuiltInGlobalLinearId].caps.push_back(CapabilityKernel);
+
+    BuiltInParams[BuiltInSubgroupSize].caps.push_back(CapabilityKernel);
+    BuiltInParams[BuiltInSubgroupMaxSize].caps.push_back(CapabilityKernel);
+    BuiltInParams[BuiltInNumSubgroups].caps.push_back(CapabilityKernel);
+    BuiltInParams[BuiltInNumEnqueuedSubgroups].caps.push_back(CapabilityKernel);
+    BuiltInParams[BuiltInSubgroupId].caps.push_back(CapabilityKernel);
+    BuiltInParams[BuiltInSubgroupLocalInvocationId].caps.push_back(CapabilityKernel);
+
+    DimensionalityParams[Dim1D].caps.push_back(CapabilitySampled1D);
+    DimensionalityParams[DimCube].caps.push_back(CapabilityShader);
+    DimensionalityParams[DimRect].caps.push_back(CapabilitySampledRect);
+    DimensionalityParams[DimBuffer].caps.push_back(CapabilitySampledBuffer);
+    DimensionalityParams[DimSubpassData].caps.push_back(CapabilityInputAttachment);
+
+    // Group Operations
+    for (int i = 0; i < GroupOperationCeiling; ++i) {
+        GroupOperationParams[i].caps.push_back(CapabilityKernel);
+    }
+
+    // Enqueue flags
+    for (int i = 0; i < KernelEnqueueFlagsCeiling; ++i) {
+        KernelEnqueueFlagsParams[i].caps.push_back(CapabilityKernel);
+    }
+
+    // Profiling info
+    KernelProfilingInfoParams[0].caps.push_back(CapabilityKernel);
+
+    // set name of operator, an initial set of <id> style operands, and the description
+
+    InstructionDesc[OpSource].operands.push(OperandSource, "");
+    InstructionDesc[OpSource].operands.push(OperandLiteralNumber, "'Version'");
+    InstructionDesc[OpSource].operands.push(OperandId, "'File'", true);
+    InstructionDesc[OpSource].operands.push(OperandLiteralString, "'Source'", true);
+
+    InstructionDesc[OpSourceContinued].operands.push(OperandLiteralString, "'Continued Source'");
+
+    InstructionDesc[OpSourceExtension].operands.push(OperandLiteralString, "'Extension'");
+
+    InstructionDesc[OpName].operands.push(OperandId, "'Target'");
+    InstructionDesc[OpName].operands.push(OperandLiteralString, "'Name'");
+
+    InstructionDesc[OpMemberName].operands.push(OperandId, "'Type'");
+    InstructionDesc[OpMemberName].operands.push(OperandLiteralNumber, "'Member'");
+    InstructionDesc[OpMemberName].operands.push(OperandLiteralString, "'Name'");
+
+    InstructionDesc[OpString].operands.push(OperandLiteralString, "'String'");
+
+    InstructionDesc[OpLine].operands.push(OperandId, "'File'");
+    InstructionDesc[OpLine].operands.push(OperandLiteralNumber, "'Line'");
+    InstructionDesc[OpLine].operands.push(OperandLiteralNumber, "'Column'");
+
+    InstructionDesc[OpExtension].operands.push(OperandLiteralString, "'Name'");
+
+    InstructionDesc[OpExtInstImport].operands.push(OperandLiteralString, "'Name'");
+
+    InstructionDesc[OpCapability].operands.push(OperandCapability, "'Capability'");
+
+    InstructionDesc[OpMemoryModel].operands.push(OperandAddressing, "");
+    InstructionDesc[OpMemoryModel].operands.push(OperandMemory, "");
+
+    InstructionDesc[OpEntryPoint].operands.push(OperandExecutionModel, "");
+    InstructionDesc[OpEntryPoint].operands.push(OperandId, "'Entry Point'");
+    InstructionDesc[OpEntryPoint].operands.push(OperandLiteralString, "'Name'");
+    InstructionDesc[OpEntryPoint].operands.push(OperandVariableIds, "'Interface'");
+
+    InstructionDesc[OpExecutionMode].operands.push(OperandId, "'Entry Point'");
+    InstructionDesc[OpExecutionMode].operands.push(OperandExecutionMode, "'Mode'");
+    InstructionDesc[OpExecutionMode].operands.push(OperandOptionalLiteral, "See <<Execution_Mode,Execution Mode>>");
+
+    InstructionDesc[OpTypeInt].operands.push(OperandLiteralNumber, "'Width'");
+    InstructionDesc[OpTypeInt].operands.push(OperandLiteralNumber, "'Signedness'");
+
+    InstructionDesc[OpTypeFloat].operands.push(OperandLiteralNumber, "'Width'");
+
+    InstructionDesc[OpTypeVector].operands.push(OperandId, "'Component Type'");
+    InstructionDesc[OpTypeVector].operands.push(OperandLiteralNumber, "'Component Count'");
+
+    InstructionDesc[OpTypeMatrix].capabilities.push_back(CapabilityMatrix);
+    InstructionDesc[OpTypeMatrix].operands.push(OperandId, "'Column Type'");
+    InstructionDesc[OpTypeMatrix].operands.push(OperandLiteralNumber, "'Column Count'");
+
+    InstructionDesc[OpTypeImage].operands.push(OperandId, "'Sampled Type'");
+    InstructionDesc[OpTypeImage].operands.push(OperandDimensionality, "");
+    InstructionDesc[OpTypeImage].operands.push(OperandLiteralNumber, "'Depth'");
+    InstructionDesc[OpTypeImage].operands.push(OperandLiteralNumber, "'Arrayed'");
+    InstructionDesc[OpTypeImage].operands.push(OperandLiteralNumber, "'MS'");
+    InstructionDesc[OpTypeImage].operands.push(OperandLiteralNumber, "'Sampled'");
+    InstructionDesc[OpTypeImage].operands.push(OperandSamplerImageFormat, "");
+    InstructionDesc[OpTypeImage].operands.push(OperandAccessQualifier, "", true);
+
+    InstructionDesc[OpTypeSampledImage].operands.push(OperandId, "'Image Type'");
+
+    InstructionDesc[OpTypeArray].operands.push(OperandId, "'Element Type'");
+    InstructionDesc[OpTypeArray].operands.push(OperandId, "'Length'");
+
+    InstructionDesc[OpTypeRuntimeArray].capabilities.push_back(CapabilityShader);
+    InstructionDesc[OpTypeRuntimeArray].operands.push(OperandId, "'Element Type'");
+
+    InstructionDesc[OpTypeStruct].operands.push(OperandVariableIds, "'Member 0 type', +\n'member 1 type', +\n...");
+
+    InstructionDesc[OpTypeOpaque].capabilities.push_back(CapabilityKernel);
+    InstructionDesc[OpTypeOpaque].operands.push(OperandLiteralString, "The name of the opaque type.");
+
+    InstructionDesc[OpTypePointer].operands.push(OperandStorage, "");
+    InstructionDesc[OpTypePointer].operands.push(OperandId, "'Type'");
+
+    InstructionDesc[OpTypeForwardPointer].capabilities.push_back(CapabilityAddresses);
+    InstructionDesc[OpTypeForwardPointer].operands.push(OperandId, "'Pointer Type'");
+    InstructionDesc[OpTypeForwardPointer].operands.push(OperandStorage, "");
+
+    InstructionDesc[OpTypeEvent].capabilities.push_back(CapabilityKernel);
+
+    InstructionDesc[OpTypeDeviceEvent].capabilities.push_back(CapabilityDeviceEnqueue);
+
+    InstructionDesc[OpTypeReserveId].capabilities.push_back(CapabilityPipes);
+
+    InstructionDesc[OpTypeQueue].capabilities.push_back(CapabilityDeviceEnqueue);
+
+    InstructionDesc[OpTypePipe].operands.push(OperandAccessQualifier, "'Qualifier'");
+    InstructionDesc[OpTypePipe].capabilities.push_back(CapabilityPipes);
+
+    InstructionDesc[OpTypeFunction].operands.push(OperandId, "'Return Type'");
+    InstructionDesc[OpTypeFunction].operands.push(OperandVariableIds, "'Parameter 0 Type', +\n'Parameter 1 Type', +\n...");
+
+    InstructionDesc[OpConstant].operands.push(OperandVariableLiterals, "'Value'");
+
+    InstructionDesc[OpConstantComposite].operands.push(OperandVariableIds, "'Constituents'");
+
+    InstructionDesc[OpConstantSampler].capabilities.push_back(CapabilityLiteralSampler);
+    InstructionDesc[OpConstantSampler].operands.push(OperandSamplerAddressingMode, "");
+    InstructionDesc[OpConstantSampler].operands.push(OperandLiteralNumber, "'Param'");
+    InstructionDesc[OpConstantSampler].operands.push(OperandSamplerFilterMode, "");
+
+    InstructionDesc[OpSpecConstant].operands.push(OperandVariableLiterals, "'Value'");
+
+    InstructionDesc[OpSpecConstantComposite].operands.push(OperandVariableIds, "'Constituents'");
+
+    InstructionDesc[OpSpecConstantOp].operands.push(OperandLiteralNumber, "'Opcode'");
+    InstructionDesc[OpSpecConstantOp].operands.push(OperandVariableIds, "'Operands'");
+
+    InstructionDesc[OpVariable].operands.push(OperandStorage, "");
+    InstructionDesc[OpVariable].operands.push(OperandId, "'Initializer'", true);
+
+    InstructionDesc[OpFunction].operands.push(OperandFunction, "");
+    InstructionDesc[OpFunction].operands.push(OperandId, "'Function Type'");
+
+    InstructionDesc[OpFunctionCall].operands.push(OperandId, "'Function'");
+    InstructionDesc[OpFunctionCall].operands.push(OperandVariableIds, "'Argument 0', +\n'Argument 1', +\n...");
+
+    InstructionDesc[OpExtInst].operands.push(OperandId, "'Set'");
+    InstructionDesc[OpExtInst].operands.push(OperandLiteralNumber, "'Instruction'");
+    InstructionDesc[OpExtInst].operands.push(OperandVariableIds, "'Operand 1', +\n'Operand 2', +\n...");
+
+    InstructionDesc[OpLoad].operands.push(OperandId, "'Pointer'");
+    InstructionDesc[OpLoad].operands.push(OperandMemoryAccess, "", true);
+
+    InstructionDesc[OpStore].operands.push(OperandId, "'Pointer'");
+    InstructionDesc[OpStore].operands.push(OperandId, "'Object'");
+    InstructionDesc[OpStore].operands.push(OperandMemoryAccess, "", true);
+
+    InstructionDesc[OpPhi].operands.push(OperandVariableIds, "'Variable, Parent, ...'");
+
+    InstructionDesc[OpDecorate].operands.push(OperandId, "'Target'");
+    InstructionDesc[OpDecorate].operands.push(OperandDecoration, "");
+    InstructionDesc[OpDecorate].operands.push(OperandVariableLiterals, "See <<Decoration,'Decoration'>>.");
+
+    InstructionDesc[OpMemberDecorate].operands.push(OperandId, "'Structure Type'");
+    InstructionDesc[OpMemberDecorate].operands.push(OperandLiteralNumber, "'Member'");
+    InstructionDesc[OpMemberDecorate].operands.push(OperandDecoration, "");
+    InstructionDesc[OpMemberDecorate].operands.push(OperandVariableLiterals, "See <<Decoration,'Decoration'>>.");
+
+    InstructionDesc[OpGroupDecorate].operands.push(OperandId, "'Decoration Group'");
+    InstructionDesc[OpGroupDecorate].operands.push(OperandVariableIds, "'Targets'");
+
+    InstructionDesc[OpGroupMemberDecorate].operands.push(OperandId, "'Decoration Group'");
+    InstructionDesc[OpGroupMemberDecorate].operands.push(OperandVariableIdLiteral, "'Targets'");
+
+    InstructionDesc[OpVectorExtractDynamic].operands.push(OperandId, "'Vector'");
+    InstructionDesc[OpVectorExtractDynamic].operands.push(OperandId, "'Index'");
+
+    InstructionDesc[OpVectorInsertDynamic].operands.push(OperandId, "'Vector'");
+    InstructionDesc[OpVectorInsertDynamic].operands.push(OperandId, "'Component'");
+    InstructionDesc[OpVectorInsertDynamic].operands.push(OperandId, "'Index'");
+
+    InstructionDesc[OpVectorShuffle].operands.push(OperandId, "'Vector 1'");
+    InstructionDesc[OpVectorShuffle].operands.push(OperandId, "'Vector 2'");
+    InstructionDesc[OpVectorShuffle].operands.push(OperandVariableLiterals, "'Components'");
+
+    InstructionDesc[OpCompositeConstruct].operands.push(OperandVariableIds, "'Constituents'");
+
+    InstructionDesc[OpCompositeExtract].operands.push(OperandId, "'Composite'");
+    InstructionDesc[OpCompositeExtract].operands.push(OperandVariableLiterals, "'Indexes'");
+
+    InstructionDesc[OpCompositeInsert].operands.push(OperandId, "'Object'");
+    InstructionDesc[OpCompositeInsert].operands.push(OperandId, "'Composite'");
+    InstructionDesc[OpCompositeInsert].operands.push(OperandVariableLiterals, "'Indexes'");
+
+    InstructionDesc[OpCopyObject].operands.push(OperandId, "'Operand'");
+
+    InstructionDesc[OpCopyMemory].operands.push(OperandId, "'Target'");
+    InstructionDesc[OpCopyMemory].operands.push(OperandId, "'Source'");
+    InstructionDesc[OpCopyMemory].operands.push(OperandMemoryAccess, "", true);
+
+    InstructionDesc[OpCopyMemorySized].operands.push(OperandId, "'Target'");
+    InstructionDesc[OpCopyMemorySized].operands.push(OperandId, "'Source'");
+    InstructionDesc[OpCopyMemorySized].operands.push(OperandId, "'Size'");
+    InstructionDesc[OpCopyMemorySized].operands.push(OperandMemoryAccess, "", true);
+
+    InstructionDesc[OpCopyMemorySized].capabilities.push_back(CapabilityAddresses);
+
+    InstructionDesc[OpSampledImage].operands.push(OperandId, "'Image'");
+    InstructionDesc[OpSampledImage].operands.push(OperandId, "'Sampler'");
+
+    InstructionDesc[OpImage].operands.push(OperandId, "'Sampled Image'");
+
+    InstructionDesc[OpImageRead].operands.push(OperandId, "'Image'");
+    InstructionDesc[OpImageRead].operands.push(OperandId, "'Coordinate'");
+    InstructionDesc[OpImageRead].operands.push(OperandImageOperands, "", true);
+    InstructionDesc[OpImageRead].operands.push(OperandVariableIds, "", true);
+
+    InstructionDesc[OpImageWrite].operands.push(OperandId, "'Image'");
+    InstructionDesc[OpImageWrite].operands.push(OperandId, "'Coordinate'");
+    InstructionDesc[OpImageWrite].operands.push(OperandId, "'Texel'");
+    InstructionDesc[OpImageWrite].operands.push(OperandImageOperands, "", true);
+    InstructionDesc[OpImageWrite].operands.push(OperandVariableIds, "", true);
+
+    InstructionDesc[OpImageSampleImplicitLod].operands.push(OperandId, "'Sampled Image'");
+    InstructionDesc[OpImageSampleImplicitLod].operands.push(OperandId, "'Coordinate'");
+    InstructionDesc[OpImageSampleImplicitLod].operands.push(OperandImageOperands, "", true);
+    InstructionDesc[OpImageSampleImplicitLod].operands.push(OperandVariableIds, "", true);
+    InstructionDesc[OpImageSampleImplicitLod].capabilities.push_back(CapabilityShader);
+
+    InstructionDesc[OpImageSampleExplicitLod].operands.push(OperandId, "'Sampled Image'");
+    InstructionDesc[OpImageSampleExplicitLod].operands.push(OperandId, "'Coordinate'");
+    InstructionDesc[OpImageSampleExplicitLod].operands.push(OperandImageOperands, "", true);
+    InstructionDesc[OpImageSampleExplicitLod].operands.push(OperandVariableIds, "", true);
+
+    InstructionDesc[OpImageSampleDrefImplicitLod].operands.push(OperandId, "'Sampled Image'");
+    InstructionDesc[OpImageSampleDrefImplicitLod].operands.push(OperandId, "'Coordinate'");
+    InstructionDesc[OpImageSampleDrefImplicitLod].operands.push(OperandId, "'D~ref~'");
+    InstructionDesc[OpImageSampleDrefImplicitLod].operands.push(OperandImageOperands, "", true);
+    InstructionDesc[OpImageSampleDrefImplicitLod].operands.push(OperandVariableIds, "", true);
+    InstructionDesc[OpImageSampleDrefImplicitLod].capabilities.push_back(CapabilityShader);
+
+    InstructionDesc[OpImageSampleDrefExplicitLod].operands.push(OperandId, "'Sampled Image'");
+    InstructionDesc[OpImageSampleDrefExplicitLod].operands.push(OperandId, "'Coordinate'");
+    InstructionDesc[OpImageSampleDrefExplicitLod].operands.push(OperandId, "'D~ref~'");
+    InstructionDesc[OpImageSampleDrefExplicitLod].operands.push(OperandImageOperands, "", true);
+    InstructionDesc[OpImageSampleDrefExplicitLod].operands.push(OperandVariableIds, "", true);
+    InstructionDesc[OpImageSampleDrefExplicitLod].capabilities.push_back(CapabilityShader);
+
+    InstructionDesc[OpImageSampleProjImplicitLod].operands.push(OperandId, "'Sampled Image'");
+    InstructionDesc[OpImageSampleProjImplicitLod].operands.push(OperandId, "'Coordinate'");
+    InstructionDesc[OpImageSampleProjImplicitLod].operands.push(OperandImageOperands, "", true);
+    InstructionDesc[OpImageSampleProjImplicitLod].operands.push(OperandVariableIds, "", true);
+    InstructionDesc[OpImageSampleProjImplicitLod].capabilities.push_back(CapabilityShader);
+
+    InstructionDesc[OpImageSampleProjExplicitLod].operands.push(OperandId, "'Sampled Image'");
+    InstructionDesc[OpImageSampleProjExplicitLod].operands.push(OperandId, "'Coordinate'");
+    InstructionDesc[OpImageSampleProjExplicitLod].operands.push(OperandImageOperands, "", true);
+    InstructionDesc[OpImageSampleProjExplicitLod].operands.push(OperandVariableIds, "", true);
+    InstructionDesc[OpImageSampleProjExplicitLod].capabilities.push_back(CapabilityShader);
+
+    InstructionDesc[OpImageSampleProjDrefImplicitLod].operands.push(OperandId, "'Sampled Image'");
+    InstructionDesc[OpImageSampleProjDrefImplicitLod].operands.push(OperandId, "'Coordinate'");
+    InstructionDesc[OpImageSampleProjDrefImplicitLod].operands.push(OperandId, "'D~ref~'");
+    InstructionDesc[OpImageSampleProjDrefImplicitLod].operands.push(OperandImageOperands, "", true);
+    InstructionDesc[OpImageSampleProjDrefImplicitLod].operands.push(OperandVariableIds, "", true);
+    InstructionDesc[OpImageSampleProjDrefImplicitLod].capabilities.push_back(CapabilityShader);
+
+    InstructionDesc[OpImageSampleProjDrefExplicitLod].operands.push(OperandId, "'Sampled Image'");
+    InstructionDesc[OpImageSampleProjDrefExplicitLod].operands.push(OperandId, "'Coordinate'");
+    InstructionDesc[OpImageSampleProjDrefExplicitLod].operands.push(OperandId, "'D~ref~'");
+    InstructionDesc[OpImageSampleProjDrefExplicitLod].operands.push(OperandImageOperands, "", true);
+    InstructionDesc[OpImageSampleProjDrefExplicitLod].operands.push(OperandVariableIds, "", true);
+    InstructionDesc[OpImageSampleProjDrefExplicitLod].capabilities.push_back(CapabilityShader);
+
+    InstructionDesc[OpImageFetch].operands.push(OperandId, "'Image'");
+    InstructionDesc[OpImageFetch].operands.push(OperandId, "'Coordinate'");
+    InstructionDesc[OpImageFetch].operands.push(OperandImageOperands, "", true);
+    InstructionDesc[OpImageFetch].operands.push(OperandVariableIds, "", true);
+
+    InstructionDesc[OpImageGather].operands.push(OperandId, "'Sampled Image'");
+    InstructionDesc[OpImageGather].operands.push(OperandId, "'Coordinate'");
+    InstructionDesc[OpImageGather].operands.push(OperandId, "'Component'");
+    InstructionDesc[OpImageGather].operands.push(OperandImageOperands, "", true);
+    InstructionDesc[OpImageGather].operands.push(OperandVariableIds, "", true);
+    InstructionDesc[OpImageGather].capabilities.push_back(CapabilityShader);
+
+    InstructionDesc[OpImageDrefGather].operands.push(OperandId, "'Sampled Image'");
+    InstructionDesc[OpImageDrefGather].operands.push(OperandId, "'Coordinate'");
+    InstructionDesc[OpImageDrefGather].operands.push(OperandId, "'D~ref~'");
+    InstructionDesc[OpImageDrefGather].operands.push(OperandImageOperands, "", true);
+    InstructionDesc[OpImageDrefGather].operands.push(OperandVariableIds, "", true);
+    InstructionDesc[OpImageDrefGather].capabilities.push_back(CapabilityShader);
+
+    InstructionDesc[OpImageSparseSampleImplicitLod].operands.push(OperandId, "'Sampled Image'");
+    InstructionDesc[OpImageSparseSampleImplicitLod].operands.push(OperandId, "'Coordinate'");
+    InstructionDesc[OpImageSparseSampleImplicitLod].operands.push(OperandImageOperands, "", true);
+    InstructionDesc[OpImageSparseSampleImplicitLod].operands.push(OperandVariableIds, "", true);
+    InstructionDesc[OpImageSparseSampleImplicitLod].capabilities.push_back(CapabilitySparseResidency);
+
+    InstructionDesc[OpImageSparseSampleExplicitLod].operands.push(OperandId, "'Sampled Image'");
+    InstructionDesc[OpImageSparseSampleExplicitLod].operands.push(OperandId, "'Coordinate'");
+    InstructionDesc[OpImageSparseSampleExplicitLod].operands.push(OperandImageOperands, "", true);
+    InstructionDesc[OpImageSparseSampleExplicitLod].operands.push(OperandVariableIds, "", true);
+    InstructionDesc[OpImageSparseSampleExplicitLod].capabilities.push_back(CapabilitySparseResidency);
+
+    InstructionDesc[OpImageSparseSampleDrefImplicitLod].operands.push(OperandId, "'Sampled Image'");
+    InstructionDesc[OpImageSparseSampleDrefImplicitLod].operands.push(OperandId, "'Coordinate'");
+    InstructionDesc[OpImageSparseSampleDrefImplicitLod].operands.push(OperandId, "'D~ref~'");
+    InstructionDesc[OpImageSparseSampleDrefImplicitLod].operands.push(OperandImageOperands, "", true);
+    InstructionDesc[OpImageSparseSampleDrefImplicitLod].operands.push(OperandVariableIds, "", true);
+    InstructionDesc[OpImageSparseSampleDrefImplicitLod].capabilities.push_back(CapabilitySparseResidency);
+
+    InstructionDesc[OpImageSparseSampleDrefExplicitLod].operands.push(OperandId, "'Sampled Image'");
+    InstructionDesc[OpImageSparseSampleDrefExplicitLod].operands.push(OperandId, "'Coordinate'");
+    InstructionDesc[OpImageSparseSampleDrefExplicitLod].operands.push(OperandId, "'D~ref~'");
+    InstructionDesc[OpImageSparseSampleDrefExplicitLod].operands.push(OperandImageOperands, "", true);
+    InstructionDesc[OpImageSparseSampleDrefExplicitLod].operands.push(OperandVariableIds, "", true);
+    InstructionDesc[OpImageSparseSampleDrefExplicitLod].capabilities.push_back(CapabilitySparseResidency);
+
+    InstructionDesc[OpImageSparseSampleProjImplicitLod].operands.push(OperandId, "'Sampled Image'");
+    InstructionDesc[OpImageSparseSampleProjImplicitLod].operands.push(OperandId, "'Coordinate'");
+    InstructionDesc[OpImageSparseSampleProjImplicitLod].operands.push(OperandImageOperands, "", true);
+    InstructionDesc[OpImageSparseSampleProjImplicitLod].operands.push(OperandVariableIds, "", true);
+    InstructionDesc[OpImageSparseSampleProjImplicitLod].capabilities.push_back(CapabilitySparseResidency);
+
+    InstructionDesc[OpImageSparseSampleProjExplicitLod].operands.push(OperandId, "'Sampled Image'");
+    InstructionDesc[OpImageSparseSampleProjExplicitLod].operands.push(OperandId, "'Coordinate'");
+    InstructionDesc[OpImageSparseSampleProjExplicitLod].operands.push(OperandImageOperands, "", true);
+    InstructionDesc[OpImageSparseSampleProjExplicitLod].operands.push(OperandVariableIds, "", true);
+    InstructionDesc[OpImageSparseSampleProjExplicitLod].capabilities.push_back(CapabilitySparseResidency);
+
+    InstructionDesc[OpImageSparseSampleProjDrefImplicitLod].operands.push(OperandId, "'Sampled Image'");
+    InstructionDesc[OpImageSparseSampleProjDrefImplicitLod].operands.push(OperandId, "'Coordinate'");
+    InstructionDesc[OpImageSparseSampleProjDrefImplicitLod].operands.push(OperandId, "'D~ref~'");
+    InstructionDesc[OpImageSparseSampleProjDrefImplicitLod].operands.push(OperandImageOperands, "", true);
+    InstructionDesc[OpImageSparseSampleProjDrefImplicitLod].operands.push(OperandVariableIds, "", true);
+    InstructionDesc[OpImageSparseSampleProjDrefImplicitLod].capabilities.push_back(CapabilitySparseResidency);
+
+    InstructionDesc[OpImageSparseSampleProjDrefExplicitLod].operands.push(OperandId, "'Sampled Image'");
+    InstructionDesc[OpImageSparseSampleProjDrefExplicitLod].operands.push(OperandId, "'Coordinate'");
+    InstructionDesc[OpImageSparseSampleProjDrefExplicitLod].operands.push(OperandId, "'D~ref~'");
+    InstructionDesc[OpImageSparseSampleProjDrefExplicitLod].operands.push(OperandImageOperands, "", true);
+    InstructionDesc[OpImageSparseSampleProjDrefExplicitLod].operands.push(OperandVariableIds, "", true);
+    InstructionDesc[OpImageSparseSampleProjDrefExplicitLod].capabilities.push_back(CapabilitySparseResidency);
+
+    InstructionDesc[OpImageSparseFetch].operands.push(OperandId, "'Image'");
+    InstructionDesc[OpImageSparseFetch].operands.push(OperandId, "'Coordinate'");
+    InstructionDesc[OpImageSparseFetch].operands.push(OperandImageOperands, "", true);
+    InstructionDesc[OpImageSparseFetch].operands.push(OperandVariableIds, "", true);
+    InstructionDesc[OpImageSparseFetch].capabilities.push_back(CapabilitySparseResidency);
+
+    InstructionDesc[OpImageSparseGather].operands.push(OperandId, "'Sampled Image'");
+    InstructionDesc[OpImageSparseGather].operands.push(OperandId, "'Coordinate'");
+    InstructionDesc[OpImageSparseGather].operands.push(OperandId, "'Component'");
+    InstructionDesc[OpImageSparseGather].operands.push(OperandImageOperands, "", true);
+    InstructionDesc[OpImageSparseGather].operands.push(OperandVariableIds, "", true);
+    InstructionDesc[OpImageSparseGather].capabilities.push_back(CapabilitySparseResidency);
+
+    InstructionDesc[OpImageSparseDrefGather].operands.push(OperandId, "'Sampled Image'");
+    InstructionDesc[OpImageSparseDrefGather].operands.push(OperandId, "'Coordinate'");
+    InstructionDesc[OpImageSparseDrefGather].operands.push(OperandId, "'D~ref~'");
+    InstructionDesc[OpImageSparseDrefGather].operands.push(OperandImageOperands, "", true);
+    InstructionDesc[OpImageSparseDrefGather].operands.push(OperandVariableIds, "", true);
+    InstructionDesc[OpImageSparseDrefGather].capabilities.push_back(CapabilitySparseResidency);
+
+    InstructionDesc[OpImageSparseRead].operands.push(OperandId, "'Image'");
+    InstructionDesc[OpImageSparseRead].operands.push(OperandId, "'Coordinate'");
+    InstructionDesc[OpImageSparseRead].operands.push(OperandImageOperands, "", true);
+    InstructionDesc[OpImageSparseRead].operands.push(OperandVariableIds, "", true);
+    InstructionDesc[OpImageSparseRead].capabilities.push_back(CapabilitySparseResidency);
+
+    InstructionDesc[OpImageSparseTexelsResident].operands.push(OperandId, "'Resident Code'");
+    InstructionDesc[OpImageSparseTexelsResident].capabilities.push_back(CapabilitySparseResidency);
+
+    InstructionDesc[OpImageQuerySizeLod].operands.push(OperandId, "'Image'");
+    InstructionDesc[OpImageQuerySizeLod].operands.push(OperandId, "'Level of Detail'");
+    InstructionDesc[OpImageQuerySizeLod].capabilities.push_back(CapabilityKernel);
+    InstructionDesc[OpImageQuerySizeLod].capabilities.push_back(CapabilityImageQuery);
+
+    InstructionDesc[OpImageQuerySize].operands.push(OperandId, "'Image'");
+    InstructionDesc[OpImageQuerySize].capabilities.push_back(CapabilityKernel);
+    InstructionDesc[OpImageQuerySize].capabilities.push_back(CapabilityImageQuery);
+
+    InstructionDesc[OpImageQueryLod].operands.push(OperandId, "'Image'");
+    InstructionDesc[OpImageQueryLod].operands.push(OperandId, "'Coordinate'");
+    InstructionDesc[OpImageQueryLod].capabilities.push_back(CapabilityImageQuery);
+
+    InstructionDesc[OpImageQueryLevels].operands.push(OperandId, "'Image'");
+    InstructionDesc[OpImageQueryLevels].capabilities.push_back(CapabilityKernel);
+    InstructionDesc[OpImageQueryLevels].capabilities.push_back(CapabilityImageQuery);
+
+    InstructionDesc[OpImageQuerySamples].operands.push(OperandId, "'Image'");
+    InstructionDesc[OpImageQuerySamples].capabilities.push_back(CapabilityKernel);
+    InstructionDesc[OpImageQuerySamples].capabilities.push_back(CapabilityImageQuery);
+
+    InstructionDesc[OpImageQueryFormat].operands.push(OperandId, "'Image'");
+    InstructionDesc[OpImageQueryFormat].capabilities.push_back(CapabilityKernel);
+
+    InstructionDesc[OpImageQueryOrder].operands.push(OperandId, "'Image'");
+    InstructionDesc[OpImageQueryOrder].capabilities.push_back(CapabilityKernel);
+
+    InstructionDesc[OpAccessChain].operands.push(OperandId, "'Base'");
+    InstructionDesc[OpAccessChain].operands.push(OperandVariableIds, "'Indexes'");
+
+    InstructionDesc[OpInBoundsAccessChain].operands.push(OperandId, "'Base'");
+    InstructionDesc[OpInBoundsAccessChain].operands.push(OperandVariableIds, "'Indexes'");
+
+    InstructionDesc[OpPtrAccessChain].operands.push(OperandId, "'Base'");
+    InstructionDesc[OpPtrAccessChain].operands.push(OperandId, "'Element'");
+    InstructionDesc[OpPtrAccessChain].operands.push(OperandVariableIds, "'Indexes'");
+    InstructionDesc[OpPtrAccessChain].capabilities.push_back(CapabilityAddresses);
+
+    InstructionDesc[OpInBoundsPtrAccessChain].operands.push(OperandId, "'Base'");
+    InstructionDesc[OpInBoundsPtrAccessChain].operands.push(OperandId, "'Element'");
+    InstructionDesc[OpInBoundsPtrAccessChain].operands.push(OperandVariableIds, "'Indexes'");
+    InstructionDesc[OpInBoundsPtrAccessChain].capabilities.push_back(CapabilityAddresses);
+
+    InstructionDesc[OpSNegate].operands.push(OperandId, "'Operand'");
+
+    InstructionDesc[OpFNegate].operands.push(OperandId, "'Operand'");
+
+    InstructionDesc[OpNot].operands.push(OperandId, "'Operand'");
+
+    InstructionDesc[OpAny].operands.push(OperandId, "'Vector'");
+
+    InstructionDesc[OpAll].operands.push(OperandId, "'Vector'");
+
+    InstructionDesc[OpConvertFToU].operands.push(OperandId, "'Float Value'");
+
+    InstructionDesc[OpConvertFToS].operands.push(OperandId, "'Float Value'");
+
+    InstructionDesc[OpConvertSToF].operands.push(OperandId, "'Signed Value'");
+
+    InstructionDesc[OpConvertUToF].operands.push(OperandId, "'Unsigned Value'");
+
+    InstructionDesc[OpUConvert].operands.push(OperandId, "'Unsigned Value'");
+
+    InstructionDesc[OpSConvert].operands.push(OperandId, "'Signed Value'");
+
+    InstructionDesc[OpFConvert].operands.push(OperandId, "'Float Value'");
+
+    InstructionDesc[OpSatConvertSToU].operands.push(OperandId, "'Signed Value'");
+    InstructionDesc[OpSatConvertSToU].capabilities.push_back(CapabilityKernel);
+
+    InstructionDesc[OpSatConvertUToS].operands.push(OperandId, "'Unsigned Value'");
+    InstructionDesc[OpSatConvertUToS].capabilities.push_back(CapabilityKernel);
+
+    InstructionDesc[OpConvertPtrToU].operands.push(OperandId, "'Pointer'");
+    InstructionDesc[OpConvertPtrToU].capabilities.push_back(CapabilityAddresses);
+
+    InstructionDesc[OpConvertUToPtr].operands.push(OperandId, "'Integer Value'");
+    InstructionDesc[OpConvertUToPtr].capabilities.push_back(CapabilityAddresses);
+
+    InstructionDesc[OpPtrCastToGeneric].operands.push(OperandId, "'Pointer'");
+    InstructionDesc[OpPtrCastToGeneric].capabilities.push_back(CapabilityKernel);
+
+    InstructionDesc[OpGenericCastToPtr].operands.push(OperandId, "'Pointer'");
+    InstructionDesc[OpGenericCastToPtr].capabilities.push_back(CapabilityKernel);
+
+    InstructionDesc[OpGenericCastToPtrExplicit].operands.push(OperandId, "'Pointer'");
+    InstructionDesc[OpGenericCastToPtrExplicit].operands.push(OperandStorage, "'Storage'");
+    InstructionDesc[OpGenericCastToPtrExplicit].capabilities.push_back(CapabilityKernel);
+
+    InstructionDesc[OpGenericPtrMemSemantics].operands.push(OperandId, "'Pointer'");
+    InstructionDesc[OpGenericPtrMemSemantics].capabilities.push_back(CapabilityKernel);
+
+    InstructionDesc[OpBitcast].operands.push(OperandId, "'Operand'");
+
+    InstructionDesc[OpQuantizeToF16].operands.push(OperandId, "'Value'");
+
+    InstructionDesc[OpTranspose].capabilities.push_back(CapabilityMatrix);
+    InstructionDesc[OpTranspose].operands.push(OperandId, "'Matrix'");
+
+    InstructionDesc[OpIsNan].operands.push(OperandId, "'x'");
+
+    InstructionDesc[OpIsInf].operands.push(OperandId, "'x'");
+
+    InstructionDesc[OpIsFinite].capabilities.push_back(CapabilityKernel);
+    InstructionDesc[OpIsFinite].operands.push(OperandId, "'x'");
+
+    InstructionDesc[OpIsNormal].capabilities.push_back(CapabilityKernel);
+    InstructionDesc[OpIsNormal].operands.push(OperandId, "'x'");
+
+    InstructionDesc[OpSignBitSet].capabilities.push_back(CapabilityKernel);
+    InstructionDesc[OpSignBitSet].operands.push(OperandId, "'x'");
+
+    InstructionDesc[OpLessOrGreater].capabilities.push_back(CapabilityKernel);
+    InstructionDesc[OpLessOrGreater].operands.push(OperandId, "'x'");
+    InstructionDesc[OpLessOrGreater].operands.push(OperandId, "'y'");
+
+    InstructionDesc[OpOrdered].capabilities.push_back(CapabilityKernel);
+    InstructionDesc[OpOrdered].operands.push(OperandId, "'x'");
+    InstructionDesc[OpOrdered].operands.push(OperandId, "'y'");
+
+    InstructionDesc[OpUnordered].capabilities.push_back(CapabilityKernel);
+    InstructionDesc[OpUnordered].operands.push(OperandId, "'x'");
+    InstructionDesc[OpUnordered].operands.push(OperandId, "'y'");
+
+    InstructionDesc[OpArrayLength].operands.push(OperandId, "'Structure'");
+    InstructionDesc[OpArrayLength].operands.push(OperandLiteralNumber, "'Array member'");
+    InstructionDesc[OpArrayLength].capabilities.push_back(CapabilityShader);
+
+    InstructionDesc[OpIAdd].operands.push(OperandId, "'Operand 1'");
+    InstructionDesc[OpIAdd].operands.push(OperandId, "'Operand 2'");
+
+    InstructionDesc[OpFAdd].operands.push(OperandId, "'Operand 1'");
+    InstructionDesc[OpFAdd].operands.push(OperandId, "'Operand 2'");
+
+    InstructionDesc[OpISub].operands.push(OperandId, "'Operand 1'");
+    InstructionDesc[OpISub].operands.push(OperandId, "'Operand 2'");
+
+    InstructionDesc[OpFSub].operands.push(OperandId, "'Operand 1'");
+    InstructionDesc[OpFSub].operands.push(OperandId, "'Operand 2'");
+
+    InstructionDesc[OpIMul].operands.push(OperandId, "'Operand 1'");
+    InstructionDesc[OpIMul].operands.push(OperandId, "'Operand 2'");
+
+    InstructionDesc[OpFMul].operands.push(OperandId, "'Operand 1'");
+    InstructionDesc[OpFMul].operands.push(OperandId, "'Operand 2'");
+
+    InstructionDesc[OpUDiv].operands.push(OperandId, "'Operand 1'");
+    InstructionDesc[OpUDiv].operands.push(OperandId, "'Operand 2'");
+
+    InstructionDesc[OpSDiv].operands.push(OperandId, "'Operand 1'");
+    InstructionDesc[OpSDiv].operands.push(OperandId, "'Operand 2'");
+
+    InstructionDesc[OpFDiv].operands.push(OperandId, "'Operand 1'");
+    InstructionDesc[OpFDiv].operands.push(OperandId, "'Operand 2'");
+
+    InstructionDesc[OpUMod].operands.push(OperandId, "'Operand 1'");
+    InstructionDesc[OpUMod].operands.push(OperandId, "'Operand 2'");
+
+    InstructionDesc[OpSRem].operands.push(OperandId, "'Operand 1'");
+    InstructionDesc[OpSRem].operands.push(OperandId, "'Operand 2'");
+
+    InstructionDesc[OpSMod].operands.push(OperandId, "'Operand 1'");
+    InstructionDesc[OpSMod].operands.push(OperandId, "'Operand 2'");
+
+    InstructionDesc[OpFRem].operands.push(OperandId, "'Operand 1'");
+    InstructionDesc[OpFRem].operands.push(OperandId, "'Operand 2'");
+
+    InstructionDesc[OpFMod].operands.push(OperandId, "'Operand 1'");
+    InstructionDesc[OpFMod].operands.push(OperandId, "'Operand 2'");
+
+    InstructionDesc[OpVectorTimesScalar].operands.push(OperandId, "'Vector'");
+    InstructionDesc[OpVectorTimesScalar].operands.push(OperandId, "'Scalar'");
+
+    InstructionDesc[OpMatrixTimesScalar].capabilities.push_back(CapabilityMatrix);
+    InstructionDesc[OpMatrixTimesScalar].operands.push(OperandId, "'Matrix'");
+    InstructionDesc[OpMatrixTimesScalar].operands.push(OperandId, "'Scalar'");
+
+    InstructionDesc[OpVectorTimesMatrix].capabilities.push_back(CapabilityMatrix);
+    InstructionDesc[OpVectorTimesMatrix].operands.push(OperandId, "'Vector'");
+    InstructionDesc[OpVectorTimesMatrix].operands.push(OperandId, "'Matrix'");
+
+    InstructionDesc[OpMatrixTimesVector].capabilities.push_back(CapabilityMatrix);
+    InstructionDesc[OpMatrixTimesVector].operands.push(OperandId, "'Matrix'");
+    InstructionDesc[OpMatrixTimesVector].operands.push(OperandId, "'Vector'");
+
+    InstructionDesc[OpMatrixTimesMatrix].capabilities.push_back(CapabilityMatrix);
+    InstructionDesc[OpMatrixTimesMatrix].operands.push(OperandId, "'LeftMatrix'");
+    InstructionDesc[OpMatrixTimesMatrix].operands.push(OperandId, "'RightMatrix'");
+
+    InstructionDesc[OpOuterProduct].capabilities.push_back(CapabilityMatrix);
+    InstructionDesc[OpOuterProduct].operands.push(OperandId, "'Vector 1'");
+    InstructionDesc[OpOuterProduct].operands.push(OperandId, "'Vector 2'");
+
+    InstructionDesc[OpDot].operands.push(OperandId, "'Vector 1'");
+    InstructionDesc[OpDot].operands.push(OperandId, "'Vector 2'");
+
+    InstructionDesc[OpIAddCarry].operands.push(OperandId, "'Operand 1'");
+    InstructionDesc[OpIAddCarry].operands.push(OperandId, "'Operand 2'");
+
+    InstructionDesc[OpISubBorrow].operands.push(OperandId, "'Operand 1'");
+    InstructionDesc[OpISubBorrow].operands.push(OperandId, "'Operand 2'");
+
+    InstructionDesc[OpUMulExtended].operands.push(OperandId, "'Operand 1'");
+    InstructionDesc[OpUMulExtended].operands.push(OperandId, "'Operand 2'");
+
+    InstructionDesc[OpSMulExtended].operands.push(OperandId, "'Operand 1'");
+    InstructionDesc[OpSMulExtended].operands.push(OperandId, "'Operand 2'");
+
+    InstructionDesc[OpShiftRightLogical].operands.push(OperandId, "'Base'");
+    InstructionDesc[OpShiftRightLogical].operands.push(OperandId, "'Shift'");
+
+    InstructionDesc[OpShiftRightArithmetic].operands.push(OperandId, "'Base'");
+    InstructionDesc[OpShiftRightArithmetic].operands.push(OperandId, "'Shift'");
+
+    InstructionDesc[OpShiftLeftLogical].operands.push(OperandId, "'Base'");
+    InstructionDesc[OpShiftLeftLogical].operands.push(OperandId, "'Shift'");
+
+    InstructionDesc[OpLogicalOr].operands.push(OperandId, "'Operand 1'");
+    InstructionDesc[OpLogicalOr].operands.push(OperandId, "'Operand 2'");
+
+    InstructionDesc[OpLogicalAnd].operands.push(OperandId, "'Operand 1'");
+    InstructionDesc[OpLogicalAnd].operands.push(OperandId, "'Operand 2'");
+
+    InstructionDesc[OpLogicalEqual].operands.push(OperandId, "'Operand 1'");
+    InstructionDesc[OpLogicalEqual].operands.push(OperandId, "'Operand 2'");
+
+    InstructionDesc[OpLogicalNotEqual].operands.push(OperandId, "'Operand 1'");
+    InstructionDesc[OpLogicalNotEqual].operands.push(OperandId, "'Operand 2'");
+
+    InstructionDesc[OpLogicalNot].operands.push(OperandId, "'Operand'");
+
+    InstructionDesc[OpBitwiseOr].operands.push(OperandId, "'Operand 1'");
+    InstructionDesc[OpBitwiseOr].operands.push(OperandId, "'Operand 2'");
+
+    InstructionDesc[OpBitwiseXor].operands.push(OperandId, "'Operand 1'");
+    InstructionDesc[OpBitwiseXor].operands.push(OperandId, "'Operand 2'");
+
+    InstructionDesc[OpBitwiseAnd].operands.push(OperandId, "'Operand 1'");
+    InstructionDesc[OpBitwiseAnd].operands.push(OperandId, "'Operand 2'");
+
+    InstructionDesc[OpBitFieldInsert].capabilities.push_back(CapabilityShader);
+    InstructionDesc[OpBitFieldInsert].operands.push(OperandId, "'Base'");
+    InstructionDesc[OpBitFieldInsert].operands.push(OperandId, "'Insert'");
+    InstructionDesc[OpBitFieldInsert].operands.push(OperandId, "'Offset'");
+    InstructionDesc[OpBitFieldInsert].operands.push(OperandId, "'Count'");
+    
+    InstructionDesc[OpBitFieldSExtract].capabilities.push_back(CapabilityShader);
+    InstructionDesc[OpBitFieldSExtract].operands.push(OperandId, "'Base'");
+    InstructionDesc[OpBitFieldSExtract].operands.push(OperandId, "'Offset'");
+    InstructionDesc[OpBitFieldSExtract].operands.push(OperandId, "'Count'");
+    
+    InstructionDesc[OpBitFieldUExtract].capabilities.push_back(CapabilityShader);
+    InstructionDesc[OpBitFieldUExtract].operands.push(OperandId, "'Base'");
+    InstructionDesc[OpBitFieldUExtract].operands.push(OperandId, "'Offset'");
+    InstructionDesc[OpBitFieldUExtract].operands.push(OperandId, "'Count'");
+    
+    InstructionDesc[OpBitReverse].capabilities.push_back(CapabilityShader);
+    InstructionDesc[OpBitReverse].operands.push(OperandId, "'Base'");
+
+    InstructionDesc[OpBitCount].operands.push(OperandId, "'Base'");
+
+    InstructionDesc[OpSelect].operands.push(OperandId, "'Condition'");
+    InstructionDesc[OpSelect].operands.push(OperandId, "'Object 1'");
+    InstructionDesc[OpSelect].operands.push(OperandId, "'Object 2'");
+
+    InstructionDesc[OpIEqual].operands.push(OperandId, "'Operand 1'");
+    InstructionDesc[OpIEqual].operands.push(OperandId, "'Operand 2'");
+
+    InstructionDesc[OpFOrdEqual].operands.push(OperandId, "'Operand 1'");
+    InstructionDesc[OpFOrdEqual].operands.push(OperandId, "'Operand 2'");
+
+    InstructionDesc[OpFUnordEqual].operands.push(OperandId, "'Operand 1'");
+    InstructionDesc[OpFUnordEqual].operands.push(OperandId, "'Operand 2'");
+
+    InstructionDesc[OpINotEqual].operands.push(OperandId, "'Operand 1'");
+    InstructionDesc[OpINotEqual].operands.push(OperandId, "'Operand 2'");
+
+    InstructionDesc[OpFOrdNotEqual].operands.push(OperandId, "'Operand 1'");
+    InstructionDesc[OpFOrdNotEqual].operands.push(OperandId, "'Operand 2'");
+
+    InstructionDesc[OpFUnordNotEqual].operands.push(OperandId, "'Operand 1'");
+    InstructionDesc[OpFUnordNotEqual].operands.push(OperandId, "'Operand 2'");
+
+    InstructionDesc[OpULessThan].operands.push(OperandId, "'Operand 1'");
+    InstructionDesc[OpULessThan].operands.push(OperandId, "'Operand 2'");
+
+    InstructionDesc[OpSLessThan].operands.push(OperandId, "'Operand 1'");
+    InstructionDesc[OpSLessThan].operands.push(OperandId, "'Operand 2'");
+
+    InstructionDesc[OpFOrdLessThan].operands.push(OperandId, "'Operand 1'");
+    InstructionDesc[OpFOrdLessThan].operands.push(OperandId, "'Operand 2'");
+
+    InstructionDesc[OpFUnordLessThan].operands.push(OperandId, "'Operand 1'");
+    InstructionDesc[OpFUnordLessThan].operands.push(OperandId, "'Operand 2'");
+
+    InstructionDesc[OpUGreaterThan].operands.push(OperandId, "'Operand 1'");
+    InstructionDesc[OpUGreaterThan].operands.push(OperandId, "'Operand 2'");
+
+    InstructionDesc[OpSGreaterThan].operands.push(OperandId, "'Operand 1'");
+    InstructionDesc[OpSGreaterThan].operands.push(OperandId, "'Operand 2'");
+
+    InstructionDesc[OpFOrdGreaterThan].operands.push(OperandId, "'Operand 1'");
+    InstructionDesc[OpFOrdGreaterThan].operands.push(OperandId, "'Operand 2'");
+
+    InstructionDesc[OpFUnordGreaterThan].operands.push(OperandId, "'Operand 1'");
+    InstructionDesc[OpFUnordGreaterThan].operands.push(OperandId, "'Operand 2'");
+
+    InstructionDesc[OpULessThanEqual].operands.push(OperandId, "'Operand 1'");
+    InstructionDesc[OpULessThanEqual].operands.push(OperandId, "'Operand 2'");
+
+    InstructionDesc[OpSLessThanEqual].operands.push(OperandId, "'Operand 1'");
+    InstructionDesc[OpSLessThanEqual].operands.push(OperandId, "'Operand 2'");
+
+    InstructionDesc[OpFOrdLessThanEqual].operands.push(OperandId, "'Operand 1'");
+    InstructionDesc[OpFOrdLessThanEqual].operands.push(OperandId, "'Operand 2'");
+
+    InstructionDesc[OpFUnordLessThanEqual].operands.push(OperandId, "'Operand 1'");
+    InstructionDesc[OpFUnordLessThanEqual].operands.push(OperandId, "'Operand 2'");
+
+    InstructionDesc[OpUGreaterThanEqual].operands.push(OperandId, "'Operand 1'");
+    InstructionDesc[OpUGreaterThanEqual].operands.push(OperandId, "'Operand 2'");
+
+    InstructionDesc[OpSGreaterThanEqual].operands.push(OperandId, "'Operand 1'");
+    InstructionDesc[OpSGreaterThanEqual].operands.push(OperandId, "'Operand 2'");
+
+    InstructionDesc[OpFOrdGreaterThanEqual].operands.push(OperandId, "'Operand 1'");
+    InstructionDesc[OpFOrdGreaterThanEqual].operands.push(OperandId, "'Operand 2'");
+
+    InstructionDesc[OpFUnordGreaterThanEqual].operands.push(OperandId, "'Operand 1'");
+    InstructionDesc[OpFUnordGreaterThanEqual].operands.push(OperandId, "'Operand 2'");
+
+    InstructionDesc[OpDPdx].capabilities.push_back(CapabilityShader);
+    InstructionDesc[OpDPdx].operands.push(OperandId, "'P'");
+
+    InstructionDesc[OpDPdy].capabilities.push_back(CapabilityShader);
+    InstructionDesc[OpDPdy].operands.push(OperandId, "'P'");
+
+    InstructionDesc[OpFwidth].capabilities.push_back(CapabilityShader);
+    InstructionDesc[OpFwidth].operands.push(OperandId, "'P'");
+
+    InstructionDesc[OpDPdxFine].capabilities.push_back(CapabilityDerivativeControl);
+    InstructionDesc[OpDPdxFine].operands.push(OperandId, "'P'");
+
+    InstructionDesc[OpDPdyFine].capabilities.push_back(CapabilityDerivativeControl);
+    InstructionDesc[OpDPdyFine].operands.push(OperandId, "'P'");
+
+    InstructionDesc[OpFwidthFine].capabilities.push_back(CapabilityDerivativeControl);
+    InstructionDesc[OpFwidthFine].operands.push(OperandId, "'P'");
+
+    InstructionDesc[OpDPdxCoarse].capabilities.push_back(CapabilityDerivativeControl);
+    InstructionDesc[OpDPdxCoarse].operands.push(OperandId, "'P'");
+
+    InstructionDesc[OpDPdyCoarse].capabilities.push_back(CapabilityDerivativeControl);
+    InstructionDesc[OpDPdyCoarse].operands.push(OperandId, "'P'");
+
+    InstructionDesc[OpFwidthCoarse].capabilities.push_back(CapabilityDerivativeControl);
+    InstructionDesc[OpFwidthCoarse].operands.push(OperandId, "'P'");
+
+    InstructionDesc[OpEmitVertex].capabilities.push_back(CapabilityGeometry);
+
+    InstructionDesc[OpEndPrimitive].capabilities.push_back(CapabilityGeometry);
+
+    InstructionDesc[OpEmitStreamVertex].operands.push(OperandId, "'Stream'");
+    InstructionDesc[OpEmitStreamVertex].capabilities.push_back(CapabilityGeometryStreams);
+
+    InstructionDesc[OpEndStreamPrimitive].operands.push(OperandId, "'Stream'");
+    InstructionDesc[OpEndStreamPrimitive].capabilities.push_back(CapabilityGeometryStreams);
+
+    InstructionDesc[OpControlBarrier].operands.push(OperandScope, "'Execution'");
+    InstructionDesc[OpControlBarrier].operands.push(OperandScope, "'Memory'");
+    InstructionDesc[OpControlBarrier].operands.push(OperandMemorySemantics, "'Semantics'");
+
+    InstructionDesc[OpMemoryBarrier].operands.push(OperandScope, "'Memory'");
+    InstructionDesc[OpMemoryBarrier].operands.push(OperandMemorySemantics, "'Semantics'");
+
+    InstructionDesc[OpImageTexelPointer].operands.push(OperandId, "'Image'");
+    InstructionDesc[OpImageTexelPointer].operands.push(OperandId, "'Coordinate'");
+    InstructionDesc[OpImageTexelPointer].operands.push(OperandId, "'Sample'");
+
+    InstructionDesc[OpAtomicLoad].operands.push(OperandId, "'Pointer'");
+    InstructionDesc[OpAtomicLoad].operands.push(OperandScope, "'Scope'");
+    InstructionDesc[OpAtomicLoad].operands.push(OperandMemorySemantics, "'Semantics'");
+
+    InstructionDesc[OpAtomicStore].operands.push(OperandId, "'Pointer'");
+    InstructionDesc[OpAtomicStore].operands.push(OperandScope, "'Scope'");
+    InstructionDesc[OpAtomicStore].operands.push(OperandMemorySemantics, "'Semantics'");
+    InstructionDesc[OpAtomicStore].operands.push(OperandId, "'Value'");
+
+    InstructionDesc[OpAtomicExchange].operands.push(OperandId, "'Pointer'");
+    InstructionDesc[OpAtomicExchange].operands.push(OperandScope, "'Scope'");
+    InstructionDesc[OpAtomicExchange].operands.push(OperandMemorySemantics, "'Semantics'");
+    InstructionDesc[OpAtomicExchange].operands.push(OperandId, "'Value'");
+
+    InstructionDesc[OpAtomicCompareExchange].operands.push(OperandId, "'Pointer'");
+    InstructionDesc[OpAtomicCompareExchange].operands.push(OperandScope, "'Scope'");
+    InstructionDesc[OpAtomicCompareExchange].operands.push(OperandMemorySemantics, "'Equal'");
+    InstructionDesc[OpAtomicCompareExchange].operands.push(OperandMemorySemantics, "'Unequal'");
+    InstructionDesc[OpAtomicCompareExchange].operands.push(OperandId, "'Value'");
+    InstructionDesc[OpAtomicCompareExchange].operands.push(OperandId, "'Comparator'");
+
+    InstructionDesc[OpAtomicCompareExchangeWeak].operands.push(OperandId, "'Pointer'");
+    InstructionDesc[OpAtomicCompareExchangeWeak].operands.push(OperandScope, "'Scope'");
+    InstructionDesc[OpAtomicCompareExchangeWeak].operands.push(OperandMemorySemantics, "'Equal'");
+    InstructionDesc[OpAtomicCompareExchangeWeak].operands.push(OperandMemorySemantics, "'Unequal'");
+    InstructionDesc[OpAtomicCompareExchangeWeak].operands.push(OperandId, "'Value'");
+    InstructionDesc[OpAtomicCompareExchangeWeak].operands.push(OperandId, "'Comparator'");
+    InstructionDesc[OpAtomicCompareExchangeWeak].capabilities.push_back(CapabilityKernel);
+
+    InstructionDesc[OpAtomicIIncrement].operands.push(OperandId, "'Pointer'");
+    InstructionDesc[OpAtomicIIncrement].operands.push(OperandScope, "'Scope'");
+    InstructionDesc[OpAtomicIIncrement].operands.push(OperandMemorySemantics, "'Semantics'");
+
+    InstructionDesc[OpAtomicIDecrement].operands.push(OperandId, "'Pointer'");
+    InstructionDesc[OpAtomicIDecrement].operands.push(OperandScope, "'Scope'");
+    InstructionDesc[OpAtomicIDecrement].operands.push(OperandMemorySemantics, "'Semantics'");
+
+    InstructionDesc[OpAtomicIAdd].operands.push(OperandId, "'Pointer'");
+    InstructionDesc[OpAtomicIAdd].operands.push(OperandScope, "'Scope'");
+    InstructionDesc[OpAtomicIAdd].operands.push(OperandMemorySemantics, "'Semantics'");
+    InstructionDesc[OpAtomicIAdd].operands.push(OperandId, "'Value'");
+
+    InstructionDesc[OpAtomicISub].operands.push(OperandId, "'Pointer'");
+    InstructionDesc[OpAtomicISub].operands.push(OperandScope, "'Scope'");
+    InstructionDesc[OpAtomicISub].operands.push(OperandMemorySemantics, "'Semantics'");
+    InstructionDesc[OpAtomicISub].operands.push(OperandId, "'Value'");
+
+    InstructionDesc[OpAtomicUMin].operands.push(OperandId, "'Pointer'");
+    InstructionDesc[OpAtomicUMin].operands.push(OperandScope, "'Scope'");
+    InstructionDesc[OpAtomicUMin].operands.push(OperandMemorySemantics, "'Semantics'");
+    InstructionDesc[OpAtomicUMin].operands.push(OperandId, "'Value'");
+
+    InstructionDesc[OpAtomicUMax].operands.push(OperandId, "'Pointer'");
+    InstructionDesc[OpAtomicUMax].operands.push(OperandScope, "'Scope'");
+    InstructionDesc[OpAtomicUMax].operands.push(OperandMemorySemantics, "'Semantics'");
+    InstructionDesc[OpAtomicUMax].operands.push(OperandId, "'Value'");
+
+    InstructionDesc[OpAtomicSMin].operands.push(OperandId, "'Pointer'");
+    InstructionDesc[OpAtomicSMin].operands.push(OperandScope, "'Scope'");
+    InstructionDesc[OpAtomicSMin].operands.push(OperandMemorySemantics, "'Semantics'");
+    InstructionDesc[OpAtomicSMin].operands.push(OperandId, "'Value'");
+
+    InstructionDesc[OpAtomicSMax].operands.push(OperandId, "'Pointer'");
+    InstructionDesc[OpAtomicSMax].operands.push(OperandScope, "'Scope'");
+    InstructionDesc[OpAtomicSMax].operands.push(OperandMemorySemantics, "'Semantics'");
+    InstructionDesc[OpAtomicSMax].operands.push(OperandId, "'Value'");
+
+    InstructionDesc[OpAtomicAnd].operands.push(OperandId, "'Pointer'");
+    InstructionDesc[OpAtomicAnd].operands.push(OperandScope, "'Scope'");
+    InstructionDesc[OpAtomicAnd].operands.push(OperandMemorySemantics, "'Semantics'");
+    InstructionDesc[OpAtomicAnd].operands.push(OperandId, "'Value'");
+
+    InstructionDesc[OpAtomicOr].operands.push(OperandId, "'Pointer'");
+    InstructionDesc[OpAtomicOr].operands.push(OperandScope, "'Scope'");
+    InstructionDesc[OpAtomicOr].operands.push(OperandMemorySemantics, "'Semantics'");
+    InstructionDesc[OpAtomicOr].operands.push(OperandId, "'Value'");
+
+    InstructionDesc[OpAtomicXor].operands.push(OperandId, "'Pointer'");
+    InstructionDesc[OpAtomicXor].operands.push(OperandScope, "'Scope'");
+    InstructionDesc[OpAtomicXor].operands.push(OperandMemorySemantics, "'Semantics'");
+    InstructionDesc[OpAtomicXor].operands.push(OperandId, "'Value'");
+
+    InstructionDesc[OpAtomicFlagTestAndSet].operands.push(OperandId, "'Pointer'");
+    InstructionDesc[OpAtomicFlagTestAndSet].operands.push(OperandScope, "'Scope'");
+    InstructionDesc[OpAtomicFlagTestAndSet].operands.push(OperandMemorySemantics, "'Semantics'");
+    InstructionDesc[OpAtomicFlagTestAndSet].capabilities.push_back(CapabilityKernel);
+
+    InstructionDesc[OpAtomicFlagClear].operands.push(OperandId, "'Pointer'");
+    InstructionDesc[OpAtomicFlagClear].operands.push(OperandScope, "'Scope'");
+    InstructionDesc[OpAtomicFlagClear].operands.push(OperandMemorySemantics, "'Semantics'");
+    InstructionDesc[OpAtomicFlagClear].capabilities.push_back(CapabilityKernel);
+
+    InstructionDesc[OpLoopMerge].operands.push(OperandId, "'Merge Block'");
+    InstructionDesc[OpLoopMerge].operands.push(OperandId, "'Continue Target'");
+    InstructionDesc[OpLoopMerge].operands.push(OperandLoop, "");
+
+    InstructionDesc[OpSelectionMerge].operands.push(OperandId, "'Merge Block'");
+    InstructionDesc[OpSelectionMerge].operands.push(OperandSelect, "");
+
+    InstructionDesc[OpBranch].operands.push(OperandId, "'Target Label'");
+
+    InstructionDesc[OpBranchConditional].operands.push(OperandId, "'Condition'");
+    InstructionDesc[OpBranchConditional].operands.push(OperandId, "'True Label'");
+    InstructionDesc[OpBranchConditional].operands.push(OperandId, "'False Label'");
+    InstructionDesc[OpBranchConditional].operands.push(OperandVariableLiterals, "'Branch weights'");
+
+    InstructionDesc[OpSwitch].operands.push(OperandId, "'Selector'");
+    InstructionDesc[OpSwitch].operands.push(OperandId, "'Default'");
+    InstructionDesc[OpSwitch].operands.push(OperandVariableLiteralId, "'Target'");
+
+    InstructionDesc[OpKill].capabilities.push_back(CapabilityShader);
+
+    InstructionDesc[OpReturnValue].operands.push(OperandId, "'Value'");
+
+    InstructionDesc[OpLifetimeStart].operands.push(OperandId, "'Pointer'");
+    InstructionDesc[OpLifetimeStart].operands.push(OperandLiteralNumber, "'Size'");
+    InstructionDesc[OpLifetimeStart].capabilities.push_back(CapabilityKernel);
+
+    InstructionDesc[OpLifetimeStop].operands.push(OperandId, "'Pointer'");
+    InstructionDesc[OpLifetimeStop].operands.push(OperandLiteralNumber, "'Size'");
+    InstructionDesc[OpLifetimeStop].capabilities.push_back(CapabilityKernel);
+
+    InstructionDesc[OpGroupAsyncCopy].capabilities.push_back(CapabilityKernel);
+    InstructionDesc[OpGroupAsyncCopy].operands.push(OperandScope, "'Execution'");
+    InstructionDesc[OpGroupAsyncCopy].operands.push(OperandId, "'Destination'");
+    InstructionDesc[OpGroupAsyncCopy].operands.push(OperandId, "'Source'");
+    InstructionDesc[OpGroupAsyncCopy].operands.push(OperandId, "'Num Elements'");
+    InstructionDesc[OpGroupAsyncCopy].operands.push(OperandId, "'Stride'");
+    InstructionDesc[OpGroupAsyncCopy].operands.push(OperandId, "'Event'");
+
+    InstructionDesc[OpGroupWaitEvents].capabilities.push_back(CapabilityKernel);
+    InstructionDesc[OpGroupWaitEvents].operands.push(OperandScope, "'Execution'");
+    InstructionDesc[OpGroupWaitEvents].operands.push(OperandId, "'Num Events'");
+    InstructionDesc[OpGroupWaitEvents].operands.push(OperandId, "'Events List'");
+
+    InstructionDesc[OpGroupAll].capabilities.push_back(CapabilityGroups);
+    InstructionDesc[OpGroupAll].operands.push(OperandScope, "'Execution'");
+    InstructionDesc[OpGroupAll].operands.push(OperandId, "'Predicate'");
+
+    InstructionDesc[OpGroupAny].capabilities.push_back(CapabilityGroups);
+    InstructionDesc[OpGroupAny].operands.push(OperandScope, "'Execution'");
+    InstructionDesc[OpGroupAny].operands.push(OperandId, "'Predicate'");
+
+    InstructionDesc[OpGroupBroadcast].capabilities.push_back(CapabilityGroups);
+    InstructionDesc[OpGroupBroadcast].operands.push(OperandScope, "'Execution'");
+    InstructionDesc[OpGroupBroadcast].operands.push(OperandId, "'Value'");
+    InstructionDesc[OpGroupBroadcast].operands.push(OperandId, "'LocalId'");
+
+    InstructionDesc[OpGroupIAdd].capabilities.push_back(CapabilityGroups);
+    InstructionDesc[OpGroupIAdd].operands.push(OperandScope, "'Execution'");
+    InstructionDesc[OpGroupIAdd].operands.push(OperandGroupOperation, "'Operation'");
+    InstructionDesc[OpGroupIAdd].operands.push(OperandId, "'X'");
+
+    InstructionDesc[OpGroupFAdd].capabilities.push_back(CapabilityGroups);
+    InstructionDesc[OpGroupFAdd].operands.push(OperandScope, "'Execution'");
+    InstructionDesc[OpGroupFAdd].operands.push(OperandGroupOperation, "'Operation'");
+    InstructionDesc[OpGroupFAdd].operands.push(OperandId, "'X'");
+
+    InstructionDesc[OpGroupUMin].capabilities.push_back(CapabilityGroups);
+    InstructionDesc[OpGroupUMin].operands.push(OperandScope, "'Execution'");
+    InstructionDesc[OpGroupUMin].operands.push(OperandGroupOperation, "'Operation'");
+    InstructionDesc[OpGroupUMin].operands.push(OperandId, "'X'");
+
+    InstructionDesc[OpGroupSMin].capabilities.push_back(CapabilityGroups);
+    InstructionDesc[OpGroupSMin].operands.push(OperandScope, "'Execution'");
+    InstructionDesc[OpGroupSMin].operands.push(OperandGroupOperation, "'Operation'");
+    InstructionDesc[OpGroupSMin].operands.push(OperandId, "X");
+
+    InstructionDesc[OpGroupFMin].capabilities.push_back(CapabilityGroups);
+    InstructionDesc[OpGroupFMin].operands.push(OperandScope, "'Execution'");
+    InstructionDesc[OpGroupFMin].operands.push(OperandGroupOperation, "'Operation'");
+    InstructionDesc[OpGroupFMin].operands.push(OperandId, "X");
+
+    InstructionDesc[OpGroupUMax].capabilities.push_back(CapabilityGroups);
+    InstructionDesc[OpGroupUMax].operands.push(OperandScope, "'Execution'");
+    InstructionDesc[OpGroupUMax].operands.push(OperandGroupOperation, "'Operation'");
+    InstructionDesc[OpGroupUMax].operands.push(OperandId, "X");
+
+    InstructionDesc[OpGroupSMax].capabilities.push_back(CapabilityGroups);
+    InstructionDesc[OpGroupSMax].operands.push(OperandScope, "'Execution'");
+    InstructionDesc[OpGroupSMax].operands.push(OperandGroupOperation, "'Operation'");
+    InstructionDesc[OpGroupSMax].operands.push(OperandId, "X");
+
+    InstructionDesc[OpGroupFMax].capabilities.push_back(CapabilityGroups);
+    InstructionDesc[OpGroupFMax].operands.push(OperandScope, "'Execution'");
+    InstructionDesc[OpGroupFMax].operands.push(OperandGroupOperation, "'Operation'");
+    InstructionDesc[OpGroupFMax].operands.push(OperandId, "X");
+
+    InstructionDesc[OpReadPipe].capabilities.push_back(CapabilityPipes);
+    InstructionDesc[OpReadPipe].operands.push(OperandId, "'Pipe'");
+    InstructionDesc[OpReadPipe].operands.push(OperandId, "'Pointer'");
+    InstructionDesc[OpReadPipe].operands.push(OperandId, "'Packet Size'");
+    InstructionDesc[OpReadPipe].operands.push(OperandId, "'Packet Alignment'");
+
+    InstructionDesc[OpWritePipe].capabilities.push_back(CapabilityPipes);
+    InstructionDesc[OpWritePipe].operands.push(OperandId, "'Pipe'");
+    InstructionDesc[OpWritePipe].operands.push(OperandId, "'Pointer'");
+    InstructionDesc[OpWritePipe].operands.push(OperandId, "'Packet Size'");
+    InstructionDesc[OpWritePipe].operands.push(OperandId, "'Packet Alignment'");
+
+    InstructionDesc[OpReservedReadPipe].capabilities.push_back(CapabilityPipes);
+    InstructionDesc[OpReservedReadPipe].operands.push(OperandId, "'Pipe'");
+    InstructionDesc[OpReservedReadPipe].operands.push(OperandId, "'Reserve Id'");
+    InstructionDesc[OpReservedReadPipe].operands.push(OperandId, "'Index'");
+    InstructionDesc[OpReservedReadPipe].operands.push(OperandId, "'Pointer'");
+    InstructionDesc[OpReservedReadPipe].operands.push(OperandId, "'Packet Size'");
+    InstructionDesc[OpReservedReadPipe].operands.push(OperandId, "'Packet Alignment'");
+
+    InstructionDesc[OpReservedWritePipe].capabilities.push_back(CapabilityPipes);
+    InstructionDesc[OpReservedWritePipe].operands.push(OperandId, "'Pipe'");
+    InstructionDesc[OpReservedWritePipe].operands.push(OperandId, "'Reserve Id'");
+    InstructionDesc[OpReservedWritePipe].operands.push(OperandId, "'Index'");
+    InstructionDesc[OpReservedWritePipe].operands.push(OperandId, "'Pointer'");
+    InstructionDesc[OpReservedWritePipe].operands.push(OperandId, "'Packet Size'");
+    InstructionDesc[OpReservedWritePipe].operands.push(OperandId, "'Packet Alignment'");
+
+    InstructionDesc[OpReserveReadPipePackets].capabilities.push_back(CapabilityPipes);
+    InstructionDesc[OpReserveReadPipePackets].operands.push(OperandId, "'Pipe'");
+    InstructionDesc[OpReserveReadPipePackets].operands.push(OperandId, "'Num Packets'");
+    InstructionDesc[OpReserveReadPipePackets].operands.push(OperandId, "'Packet Size'");
+    InstructionDesc[OpReserveReadPipePackets].operands.push(OperandId, "'Packet Alignment'");
+
+    InstructionDesc[OpReserveWritePipePackets].capabilities.push_back(CapabilityPipes);
+    InstructionDesc[OpReserveWritePipePackets].operands.push(OperandId, "'Pipe'");
+    InstructionDesc[OpReserveWritePipePackets].operands.push(OperandId, "'Num Packets'");
+    InstructionDesc[OpReserveWritePipePackets].operands.push(OperandId, "'Packet Size'");
+    InstructionDesc[OpReserveWritePipePackets].operands.push(OperandId, "'Packet Alignment'");
+
+    InstructionDesc[OpCommitReadPipe].capabilities.push_back(CapabilityPipes);
+    InstructionDesc[OpCommitReadPipe].operands.push(OperandId, "'Pipe'");
+    InstructionDesc[OpCommitReadPipe].operands.push(OperandId, "'Reserve Id'");
+    InstructionDesc[OpCommitReadPipe].operands.push(OperandId, "'Packet Size'");
+    InstructionDesc[OpCommitReadPipe].operands.push(OperandId, "'Packet Alignment'");
+
+    InstructionDesc[OpCommitWritePipe].capabilities.push_back(CapabilityPipes);
+    InstructionDesc[OpCommitWritePipe].operands.push(OperandId, "'Pipe'");
+    InstructionDesc[OpCommitWritePipe].operands.push(OperandId, "'Reserve Id'");
+    InstructionDesc[OpCommitWritePipe].operands.push(OperandId, "'Packet Size'");
+    InstructionDesc[OpCommitWritePipe].operands.push(OperandId, "'Packet Alignment'");
+
+    InstructionDesc[OpIsValidReserveId].capabilities.push_back(CapabilityPipes);
+    InstructionDesc[OpIsValidReserveId].operands.push(OperandId, "'Reserve Id'");
+
+    InstructionDesc[OpGetNumPipePackets].capabilities.push_back(CapabilityPipes);
+    InstructionDesc[OpGetNumPipePackets].operands.push(OperandId, "'Pipe'");
+    InstructionDesc[OpGetNumPipePackets].operands.push(OperandId, "'Packet Size'");
+    InstructionDesc[OpGetNumPipePackets].operands.push(OperandId, "'Packet Alignment'");
+
+    InstructionDesc[OpGetMaxPipePackets].capabilities.push_back(CapabilityPipes);
+    InstructionDesc[OpGetMaxPipePackets].operands.push(OperandId, "'Pipe'");
+    InstructionDesc[OpGetMaxPipePackets].operands.push(OperandId, "'Packet Size'");
+    InstructionDesc[OpGetMaxPipePackets].operands.push(OperandId, "'Packet Alignment'");
+
+    InstructionDesc[OpGroupReserveReadPipePackets].capabilities.push_back(CapabilityPipes);
+    InstructionDesc[OpGroupReserveReadPipePackets].operands.push(OperandScope, "'Execution'");
+    InstructionDesc[OpGroupReserveReadPipePackets].operands.push(OperandId, "'Pipe'");
+    InstructionDesc[OpGroupReserveReadPipePackets].operands.push(OperandId, "'Num Packets'");
+    InstructionDesc[OpGroupReserveReadPipePackets].operands.push(OperandId, "'Packet Size'");
+    InstructionDesc[OpGroupReserveReadPipePackets].operands.push(OperandId, "'Packet Alignment'");
+
+    InstructionDesc[OpGroupReserveWritePipePackets].capabilities.push_back(CapabilityPipes);
+    InstructionDesc[OpGroupReserveWritePipePackets].operands.push(OperandScope, "'Execution'");
+    InstructionDesc[OpGroupReserveWritePipePackets].operands.push(OperandId, "'Pipe'");
+    InstructionDesc[OpGroupReserveWritePipePackets].operands.push(OperandId, "'Num Packets'");
+    InstructionDesc[OpGroupReserveWritePipePackets].operands.push(OperandId, "'Packet Size'");
+    InstructionDesc[OpGroupReserveWritePipePackets].operands.push(OperandId, "'Packet Alignment'");
+
+    InstructionDesc[OpGroupCommitReadPipe].capabilities.push_back(CapabilityPipes);
+    InstructionDesc[OpGroupCommitReadPipe].operands.push(OperandScope, "'Execution'");
+    InstructionDesc[OpGroupCommitReadPipe].operands.push(OperandId, "'Pipe'");
+    InstructionDesc[OpGroupCommitReadPipe].operands.push(OperandId, "'Reserve Id'");
+    InstructionDesc[OpGroupCommitReadPipe].operands.push(OperandId, "'Packet Size'");
+    InstructionDesc[OpGroupCommitReadPipe].operands.push(OperandId, "'Packet Alignment'");
+
+    InstructionDesc[OpGroupCommitWritePipe].capabilities.push_back(CapabilityPipes);
+    InstructionDesc[OpGroupCommitWritePipe].operands.push(OperandScope, "'Execution'");
+    InstructionDesc[OpGroupCommitWritePipe].operands.push(OperandId, "'Pipe'");
+    InstructionDesc[OpGroupCommitWritePipe].operands.push(OperandId, "'Reserve Id'");
+    InstructionDesc[OpGroupCommitWritePipe].operands.push(OperandId, "'Packet Size'");
+    InstructionDesc[OpGroupCommitWritePipe].operands.push(OperandId, "'Packet Alignment'");
+
+    InstructionDesc[OpBuildNDRange].capabilities.push_back(CapabilityDeviceEnqueue);
+    InstructionDesc[OpBuildNDRange].operands.push(OperandId, "'GlobalWorkSize'");
+    InstructionDesc[OpBuildNDRange].operands.push(OperandId, "'LocalWorkSize'");
+    InstructionDesc[OpBuildNDRange].operands.push(OperandId, "'GlobalWorkOffset'");
+
+    InstructionDesc[OpGetDefaultQueue].capabilities.push_back(CapabilityDeviceEnqueue);
+
+    InstructionDesc[OpCaptureEventProfilingInfo].capabilities.push_back(CapabilityDeviceEnqueue);
+
+    InstructionDesc[OpCaptureEventProfilingInfo].operands.push(OperandId, "'Event'");
+    InstructionDesc[OpCaptureEventProfilingInfo].operands.push(OperandId, "'Profiling Info'");
+    InstructionDesc[OpCaptureEventProfilingInfo].operands.push(OperandId, "'Value'");
+
+    InstructionDesc[OpSetUserEventStatus].capabilities.push_back(CapabilityDeviceEnqueue);
+
+    InstructionDesc[OpSetUserEventStatus].operands.push(OperandId, "'Event'");
+    InstructionDesc[OpSetUserEventStatus].operands.push(OperandId, "'Status'");
+
+    InstructionDesc[OpIsValidEvent].capabilities.push_back(CapabilityDeviceEnqueue);
+    InstructionDesc[OpIsValidEvent].operands.push(OperandId, "'Event'");
+
+    InstructionDesc[OpCreateUserEvent].capabilities.push_back(CapabilityDeviceEnqueue);
+
+    InstructionDesc[OpRetainEvent].capabilities.push_back(CapabilityDeviceEnqueue);
+    InstructionDesc[OpRetainEvent].operands.push(OperandId, "'Event'");
+
+    InstructionDesc[OpReleaseEvent].capabilities.push_back(CapabilityDeviceEnqueue);
+    InstructionDesc[OpReleaseEvent].operands.push(OperandId, "'Event'");
+
+    InstructionDesc[OpGetKernelWorkGroupSize].capabilities.push_back(CapabilityDeviceEnqueue);
+    InstructionDesc[OpGetKernelWorkGroupSize].operands.push(OperandId, "'Invoke'");
+    InstructionDesc[OpGetKernelWorkGroupSize].operands.push(OperandId, "'Param'");
+    InstructionDesc[OpGetKernelWorkGroupSize].operands.push(OperandId, "'Param Size'");
+    InstructionDesc[OpGetKernelWorkGroupSize].operands.push(OperandId, "'Param Align'");
+
+    InstructionDesc[OpGetKernelPreferredWorkGroupSizeMultiple].capabilities.push_back(CapabilityDeviceEnqueue);
+    InstructionDesc[OpGetKernelPreferredWorkGroupSizeMultiple].operands.push(OperandId, "'Invoke'");
+    InstructionDesc[OpGetKernelPreferredWorkGroupSizeMultiple].operands.push(OperandId, "'Param'");
+    InstructionDesc[OpGetKernelPreferredWorkGroupSizeMultiple].operands.push(OperandId, "'Param Size'");
+    InstructionDesc[OpGetKernelPreferredWorkGroupSizeMultiple].operands.push(OperandId, "'Param Align'");
+
+    InstructionDesc[OpGetKernelNDrangeSubGroupCount].capabilities.push_back(CapabilityDeviceEnqueue);
+    InstructionDesc[OpGetKernelNDrangeSubGroupCount].operands.push(OperandId, "'ND Range'");
+    InstructionDesc[OpGetKernelNDrangeSubGroupCount].operands.push(OperandId, "'Invoke'");
+    InstructionDesc[OpGetKernelNDrangeSubGroupCount].operands.push(OperandId, "'Param'");
+    InstructionDesc[OpGetKernelNDrangeSubGroupCount].operands.push(OperandId, "'Param Size'");
+    InstructionDesc[OpGetKernelNDrangeSubGroupCount].operands.push(OperandId, "'Param Align'");
+
+    InstructionDesc[OpGetKernelNDrangeMaxSubGroupSize].capabilities.push_back(CapabilityDeviceEnqueue);
+    InstructionDesc[OpGetKernelNDrangeMaxSubGroupSize].operands.push(OperandId, "'ND Range'");
+    InstructionDesc[OpGetKernelNDrangeMaxSubGroupSize].operands.push(OperandId, "'Invoke'");
+    InstructionDesc[OpGetKernelNDrangeMaxSubGroupSize].operands.push(OperandId, "'Param'");
+    InstructionDesc[OpGetKernelNDrangeMaxSubGroupSize].operands.push(OperandId, "'Param Size'");
+    InstructionDesc[OpGetKernelNDrangeMaxSubGroupSize].operands.push(OperandId, "'Param Align'");
+
+    InstructionDesc[OpEnqueueKernel].capabilities.push_back(CapabilityDeviceEnqueue);
+    InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'Queue'");
+    InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'Flags'");
+    InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'ND Range'");
+    InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'Num Events'");
+    InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'Wait Events'");
+    InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'Ret Event'");
+    InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'Invoke'");
+    InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'Param'");
+    InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'Param Size'");
+    InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'Param Align'");
+    InstructionDesc[OpEnqueueKernel].operands.push(OperandVariableIds, "'Local Size'");
+
+    InstructionDesc[OpEnqueueMarker].capabilities.push_back(CapabilityDeviceEnqueue);
+    InstructionDesc[OpEnqueueMarker].operands.push(OperandId, "'Queue'");
+    InstructionDesc[OpEnqueueMarker].operands.push(OperandId, "'Num Events'");
+    InstructionDesc[OpEnqueueMarker].operands.push(OperandId, "'Wait Events'");
+    InstructionDesc[OpEnqueueMarker].operands.push(OperandId, "'Ret Event'");
+
+    InstructionDesc[OpSubgroupBallotKHR].operands.push(OperandId, "'Predicate'");
+
+    InstructionDesc[OpSubgroupFirstInvocationKHR].operands.push(OperandId, "'Value'");
+
+#ifdef AMD_EXTENSIONS
+    InstructionDesc[OpGroupIAddNonUniformAMD].capabilities.push_back(CapabilityGroups);
+    InstructionDesc[OpGroupIAddNonUniformAMD].operands.push(OperandScope, "'Execution'");
+    InstructionDesc[OpGroupIAddNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'");
+    InstructionDesc[OpGroupIAddNonUniformAMD].operands.push(OperandId, "'X'");
+
+    InstructionDesc[OpGroupFAddNonUniformAMD].capabilities.push_back(CapabilityGroups);
+    InstructionDesc[OpGroupFAddNonUniformAMD].operands.push(OperandScope, "'Execution'");
+    InstructionDesc[OpGroupFAddNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'");
+    InstructionDesc[OpGroupFAddNonUniformAMD].operands.push(OperandId, "'X'");
+
+    InstructionDesc[OpGroupUMinNonUniformAMD].capabilities.push_back(CapabilityGroups);
+    InstructionDesc[OpGroupUMinNonUniformAMD].operands.push(OperandScope, "'Execution'");
+    InstructionDesc[OpGroupUMinNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'");
+    InstructionDesc[OpGroupUMinNonUniformAMD].operands.push(OperandId, "'X'");
+
+    InstructionDesc[OpGroupSMinNonUniformAMD].capabilities.push_back(CapabilityGroups);
+    InstructionDesc[OpGroupSMinNonUniformAMD].operands.push(OperandScope, "'Execution'");
+    InstructionDesc[OpGroupSMinNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'");
+    InstructionDesc[OpGroupSMinNonUniformAMD].operands.push(OperandId, "X");
+
+    InstructionDesc[OpGroupFMinNonUniformAMD].capabilities.push_back(CapabilityGroups);
+    InstructionDesc[OpGroupFMinNonUniformAMD].operands.push(OperandScope, "'Execution'");
+    InstructionDesc[OpGroupFMinNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'");
+    InstructionDesc[OpGroupFMinNonUniformAMD].operands.push(OperandId, "X");
+
+    InstructionDesc[OpGroupUMaxNonUniformAMD].capabilities.push_back(CapabilityGroups);
+    InstructionDesc[OpGroupUMaxNonUniformAMD].operands.push(OperandScope, "'Execution'");
+    InstructionDesc[OpGroupUMaxNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'");
+    InstructionDesc[OpGroupUMaxNonUniformAMD].operands.push(OperandId, "X");
+
+    InstructionDesc[OpGroupSMaxNonUniformAMD].capabilities.push_back(CapabilityGroups);
+    InstructionDesc[OpGroupSMaxNonUniformAMD].operands.push(OperandScope, "'Execution'");
+    InstructionDesc[OpGroupSMaxNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'");
+    InstructionDesc[OpGroupSMaxNonUniformAMD].operands.push(OperandId, "X");
+
+    InstructionDesc[OpGroupFMaxNonUniformAMD].capabilities.push_back(CapabilityGroups);
+    InstructionDesc[OpGroupFMaxNonUniformAMD].operands.push(OperandScope, "'Execution'");
+    InstructionDesc[OpGroupFMaxNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'");
+    InstructionDesc[OpGroupFMaxNonUniformAMD].operands.push(OperandId, "X");
+#endif
+}
+
+}; // end spv namespace

+ 260 - 0
3rdparty/glslang/SPIRV/doc.h

@@ -0,0 +1,260 @@
+//
+//Copyright (C) 2014-2015 LunarG, Inc.
+//
+//All rights reserved.
+//
+//Redistribution and use in source and binary forms, with or without
+//modification, are permitted provided that the following conditions
+//are met:
+//
+//    Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+//
+//    Redistributions in binary form must reproduce the above
+//    copyright notice, this list of conditions and the following
+//    disclaimer in the documentation and/or other materials provided
+//    with the distribution.
+//
+//    Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+//    contributors may be used to endorse or promote products derived
+//    from this software without specific prior written permission.
+//
+//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+//"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+//LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+//FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+//COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+//INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+//BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+//CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+//LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+//ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+//POSSIBILITY OF SUCH DAMAGE.
+
+//
+// Parameterize the SPIR-V enumerants.
+//
+
+#include "spirv.hpp"
+
+#include <vector>
+
+namespace spv {
+
+// Fill in all the parameters
+void Parameterize();
+
+// Return the English names of all the enums.
+const char* SourceString(int);
+const char* AddressingString(int);
+const char* MemoryString(int);
+const char* ExecutionModelString(int);
+const char* ExecutionModeString(int);
+const char* StorageClassString(int);
+const char* DecorationString(int);
+const char* BuiltInString(int);
+const char* DimensionString(int);
+const char* SelectControlString(int);
+const char* LoopControlString(int);
+const char* FunctionControlString(int);
+const char* SamplerAddressingModeString(int);
+const char* SamplerFilterModeString(int);
+const char* ImageFormatString(int);
+const char* ImageChannelOrderString(int);
+const char* ImageChannelTypeString(int);
+const char* ImageChannelDataTypeString(int type);
+const char* ImageOperandsString(int format);
+const char* ImageOperands(int);
+const char* FPFastMathString(int);
+const char* FPRoundingModeString(int);
+const char* LinkageTypeString(int);
+const char* FuncParamAttrString(int);
+const char* AccessQualifierString(int);
+const char* MemorySemanticsString(int);
+const char* MemoryAccessString(int);
+const char* ExecutionScopeString(int);
+const char* GroupOperationString(int);
+const char* KernelEnqueueFlagsString(int);
+const char* KernelProfilingInfoString(int);
+const char* CapabilityString(int);
+const char* OpcodeString(int);
+const char* ScopeString(int mem);
+
+// For grouping opcodes into subsections
+enum OpcodeClass {
+    OpClassMisc,
+    OpClassDebug,
+    OpClassAnnotate,
+    OpClassExtension,
+    OpClassMode,
+    OpClassType,
+    OpClassConstant,
+    OpClassMemory,
+    OpClassFunction,
+    OpClassImage,
+    OpClassConvert,
+    OpClassComposite,
+    OpClassArithmetic,
+    OpClassBit,
+    OpClassRelationalLogical,
+    OpClassDerivative,
+    OpClassFlowControl,
+    OpClassAtomic,
+    OpClassPrimitive,
+    OpClassBarrier,
+    OpClassGroup,
+    OpClassDeviceSideEnqueue,
+    OpClassPipe,
+
+    OpClassCount,
+    OpClassMissing             // all instructions start out as missing
+};
+
+// For parameterizing operands.
+enum OperandClass {
+    OperandNone,
+    OperandId,
+    OperandVariableIds,
+    OperandOptionalLiteral,
+    OperandOptionalLiteralString,
+    OperandVariableLiterals,
+    OperandVariableIdLiteral,
+    OperandVariableLiteralId,
+    OperandLiteralNumber,
+    OperandLiteralString,
+    OperandSource,
+    OperandExecutionModel,
+    OperandAddressing,
+    OperandMemory,
+    OperandExecutionMode,
+    OperandStorage,
+    OperandDimensionality,
+    OperandSamplerAddressingMode,
+    OperandSamplerFilterMode,
+    OperandSamplerImageFormat,
+    OperandImageChannelOrder,
+    OperandImageChannelDataType,
+    OperandImageOperands,
+    OperandFPFastMath,
+    OperandFPRoundingMode,
+    OperandLinkageType,
+    OperandAccessQualifier,
+    OperandFuncParamAttr,
+    OperandDecoration,
+    OperandBuiltIn,
+    OperandSelect,
+    OperandLoop,
+    OperandFunction,
+    OperandMemorySemantics,
+    OperandMemoryAccess,
+    OperandScope,
+    OperandGroupOperation,
+    OperandKernelEnqueueFlags,
+    OperandKernelProfilingInfo,
+    OperandCapability,
+
+    OperandOpcode,
+
+    OperandCount
+};
+
+// Any specific enum can have a set of capabilities that allow it:
+typedef std::vector<Capability> EnumCaps;
+
+// Parameterize a set of operands with their OperandClass(es) and descriptions.
+class OperandParameters {
+public:
+    OperandParameters() { }
+    void push(OperandClass oc, const char* d, bool opt = false)
+    {
+        opClass.push_back(oc);
+        desc.push_back(d);
+        optional.push_back(opt);
+    }
+    void setOptional();
+    OperandClass getClass(int op) const { return opClass[op]; }
+    const char* getDesc(int op) const { return desc[op]; }
+    bool isOptional(int op) const { return optional[op]; }
+    int getNum() const { return (int)opClass.size(); }
+
+protected:
+    std::vector<OperandClass> opClass;
+    std::vector<const char*> desc;
+    std::vector<bool> optional;
+};
+
+// Parameterize an enumerant
+class EnumParameters {
+public:
+    EnumParameters() : desc(0) { }
+    EnumCaps caps;
+    const char* desc;
+};
+
+// Parameterize a set of enumerants that form an enum
+class EnumDefinition : public EnumParameters {
+public:
+    EnumDefinition() : 
+        ceiling(0), bitmask(false), getName(0), enumParams(0), operandParams(0) { }
+    void set(int ceil, const char* (*name)(int), EnumParameters* ep, bool mask = false)
+    {
+        ceiling = ceil;
+        getName = name;
+        bitmask = mask;
+        enumParams = ep;
+    }
+    void setOperands(OperandParameters* op) { operandParams = op; }
+    int ceiling;   // ceiling of enumerants
+    bool bitmask;  // true if these enumerants combine into a bitmask
+    const char* (*getName)(int);      // a function that returns the name for each enumerant value (or shift)
+    EnumParameters* enumParams;       // parameters for each individual enumerant
+    OperandParameters* operandParams; // sets of operands
+};
+
+// Parameterize an instruction's logical format, including its known set of operands,
+// per OperandParameters above.
+class InstructionParameters {
+public:
+    InstructionParameters() :
+        opDesc("TBD"),
+        opClass(OpClassMissing),
+        typePresent(true),         // most normal, only exceptions have to be spelled out
+        resultPresent(true)        // most normal, only exceptions have to be spelled out
+    { }
+
+    void setResultAndType(bool r, bool t)
+    {
+        resultPresent = r;
+        typePresent = t;
+    }
+
+    bool hasResult() const { return resultPresent != 0; }
+    bool hasType()   const { return typePresent != 0; }
+
+    const char* opDesc;
+    EnumCaps capabilities;
+    OpcodeClass opClass;
+    OperandParameters operands;
+
+protected:
+    int typePresent   : 1;
+    int resultPresent : 1;
+};
+
+const int OpcodeCeiling = 321;
+
+// The set of objects that hold all the instruction/operand
+// parameterization information.
+extern InstructionParameters InstructionDesc[];
+
+// These hold definitions of the enumerants used for operands
+extern EnumDefinition OperandClassParams[];
+
+const char* GetOperandDesc(OperandClass operand);
+void PrintImmediateRow(int imm, const char* name, const EnumParameters* enumParams, bool caps, bool hex = false);
+const char* AccessQualifierString(int attr);
+
+void PrintOperands(const OperandParameters& operands, int reservedOperands);
+
+};  // end namespace spv

+ 1078 - 0
3rdparty/glslang/SPIRV/hex_float.h

@@ -0,0 +1,1078 @@
+// Copyright (c) 2015-2016 The Khronos Group Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef LIBSPIRV_UTIL_HEX_FLOAT_H_
+#define LIBSPIRV_UTIL_HEX_FLOAT_H_
+
+#include <cassert>
+#include <cctype>
+#include <cmath>
+#include <cstdint>
+#include <iomanip>
+#include <limits>
+#include <sstream>
+
+#if defined(_MSC_VER) && _MSC_VER < 1700
+namespace std {
+bool isnan(double f)
+{
+  return ::_isnan(f) != 0;
+}
+bool isinf(double f)
+{
+  return ::_finite(f) == 0;
+}
+}
+#endif
+
+#include "bitutils.h"
+
+namespace spvutils {
+
+class Float16 {
+ public:
+  Float16(uint16_t v) : val(v) {}
+  Float16() {}
+  static bool isNan(const Float16& val) {
+    return ((val.val & 0x7C00) == 0x7C00) && ((val.val & 0x3FF) != 0);
+  }
+  // Returns true if the given value is any kind of infinity.
+  static bool isInfinity(const Float16& val) {
+    return ((val.val & 0x7C00) == 0x7C00) && ((val.val & 0x3FF) == 0);
+  }
+  Float16(const Float16& other) { val = other.val; }
+  uint16_t get_value() const { return val; }
+
+  // Returns the maximum normal value.
+  static Float16 max() { return Float16(0x7bff); }
+  // Returns the lowest normal value.
+  static Float16 lowest() { return Float16(0xfbff); }
+
+ private:
+  uint16_t val;
+};
+
+// To specialize this type, you must override uint_type to define
+// an unsigned integer that can fit your floating point type.
+// You must also add a isNan function that returns true if
+// a value is Nan.
+template <typename T>
+struct FloatProxyTraits {
+  typedef void uint_type;
+};
+
+template <>
+struct FloatProxyTraits<float> {
+  typedef uint32_t uint_type;
+  static bool isNan(float f) { return std::isnan(f); }
+  // Returns true if the given value is any kind of infinity.
+  static bool isInfinity(float f) { return std::isinf(f); }
+  // Returns the maximum normal value.
+  static float max() { return std::numeric_limits<float>::max(); }
+  // Returns the lowest normal value.
+  static float lowest() { return std::numeric_limits<float>::lowest(); }
+};
+
+template <>
+struct FloatProxyTraits<double> {
+  typedef uint64_t uint_type;
+  static bool isNan(double f) { return std::isnan(f); }
+  // Returns true if the given value is any kind of infinity.
+  static bool isInfinity(double f) { return std::isinf(f); }
+  // Returns the maximum normal value.
+  static double max() { return std::numeric_limits<double>::max(); }
+  // Returns the lowest normal value.
+  static double lowest() { return std::numeric_limits<double>::lowest(); }
+};
+
+template <>
+struct FloatProxyTraits<Float16> {
+  typedef uint16_t uint_type;
+  static bool isNan(Float16 f) { return Float16::isNan(f); }
+  // Returns true if the given value is any kind of infinity.
+  static bool isInfinity(Float16 f) { return Float16::isInfinity(f); }
+  // Returns the maximum normal value.
+  static Float16 max() { return Float16::max(); }
+  // Returns the lowest normal value.
+  static Float16 lowest() { return Float16::lowest(); }
+};
+
+// Since copying a floating point number (especially if it is NaN)
+// does not guarantee that bits are preserved, this class lets us
+// store the type and use it as a float when necessary.
+template <typename T>
+class FloatProxy {
+ public:
+  typedef typename FloatProxyTraits<T>::uint_type uint_type;
+
+  // Since this is to act similar to the normal floats,
+  // do not initialize the data by default.
+  FloatProxy() {}
+
+  // Intentionally non-explicit. This is a proxy type so
+  // implicit conversions allow us to use it more transparently.
+  FloatProxy(T val) { data_ = BitwiseCast<uint_type>(val); }
+
+  // Intentionally non-explicit. This is a proxy type so
+  // implicit conversions allow us to use it more transparently.
+  FloatProxy(uint_type val) { data_ = val; }
+
+  // This is helpful to have and is guaranteed not to stomp bits.
+  FloatProxy<T> operator-() const {
+    return static_cast<uint_type>(data_ ^
+                                  (uint_type(0x1) << (sizeof(T) * 8 - 1)));
+  }
+
+  // Returns the data as a floating point value.
+  T getAsFloat() const { return BitwiseCast<T>(data_); }
+
+  // Returns the raw data.
+  uint_type data() const { return data_; }
+
+  // Returns true if the value represents any type of NaN.
+  bool isNan() { return FloatProxyTraits<T>::isNan(getAsFloat()); }
+  // Returns true if the value represents any type of infinity.
+  bool isInfinity() { return FloatProxyTraits<T>::isInfinity(getAsFloat()); }
+
+  // Returns the maximum normal value.
+  static FloatProxy<T> max() {
+    return FloatProxy<T>(FloatProxyTraits<T>::max());
+  }
+  // Returns the lowest normal value.
+  static FloatProxy<T> lowest() {
+    return FloatProxy<T>(FloatProxyTraits<T>::lowest());
+  }
+
+ private:
+  uint_type data_;
+};
+
+template <typename T>
+bool operator==(const FloatProxy<T>& first, const FloatProxy<T>& second) {
+  return first.data() == second.data();
+}
+
+// Reads a FloatProxy value as a normal float from a stream.
+template <typename T>
+std::istream& operator>>(std::istream& is, FloatProxy<T>& value) {
+  T float_val;
+  is >> float_val;
+  value = FloatProxy<T>(float_val);
+  return is;
+}
+
+// This is an example traits. It is not meant to be used in practice, but will
+// be the default for any non-specialized type.
+template <typename T>
+struct HexFloatTraits {
+  // Integer type that can store this hex-float.
+  typedef void uint_type;
+  // Signed integer type that can store this hex-float.
+  typedef void int_type;
+  // The numerical type that this HexFloat represents.
+  typedef void underlying_type;
+  // The type needed to construct the underlying type.
+  typedef void native_type;
+  // The number of bits that are actually relevant in the uint_type.
+  // This allows us to deal with, for example, 24-bit values in a 32-bit
+  // integer.
+  static const uint32_t num_used_bits = 0;
+  // Number of bits that represent the exponent.
+  static const uint32_t num_exponent_bits = 0;
+  // Number of bits that represent the fractional part.
+  static const uint32_t num_fraction_bits = 0;
+  // The bias of the exponent. (How much we need to subtract from the stored
+  // value to get the correct value.)
+  static const uint32_t exponent_bias = 0;
+};
+
+// Traits for IEEE float.
+// 1 sign bit, 8 exponent bits, 23 fractional bits.
+template <>
+struct HexFloatTraits<FloatProxy<float>> {
+  typedef uint32_t uint_type;
+  typedef int32_t int_type;
+  typedef FloatProxy<float> underlying_type;
+  typedef float native_type;
+  static const uint_type num_used_bits = 32;
+  static const uint_type num_exponent_bits = 8;
+  static const uint_type num_fraction_bits = 23;
+  static const uint_type exponent_bias = 127;
+};
+
+// Traits for IEEE double.
+// 1 sign bit, 11 exponent bits, 52 fractional bits.
+template <>
+struct HexFloatTraits<FloatProxy<double>> {
+  typedef uint64_t uint_type;
+  typedef int64_t int_type;
+  typedef FloatProxy<double> underlying_type;
+  typedef double native_type;
+  static const uint_type num_used_bits = 64;
+  static const uint_type num_exponent_bits = 11;
+  static const uint_type num_fraction_bits = 52;
+  static const uint_type exponent_bias = 1023;
+};
+
+// Traits for IEEE half.
+// 1 sign bit, 5 exponent bits, 10 fractional bits.
+template <>
+struct HexFloatTraits<FloatProxy<Float16>> {
+  typedef uint16_t uint_type;
+  typedef int16_t int_type;
+  typedef uint16_t underlying_type;
+  typedef uint16_t native_type;
+  static const uint_type num_used_bits = 16;
+  static const uint_type num_exponent_bits = 5;
+  static const uint_type num_fraction_bits = 10;
+  static const uint_type exponent_bias = 15;
+};
+
+enum round_direction {
+  kRoundToZero,
+  kRoundToNearestEven,
+  kRoundToPositiveInfinity,
+  kRoundToNegativeInfinity
+};
+
+// Template class that houses a floating pointer number.
+// It exposes a number of constants based on the provided traits to
+// assist in interpreting the bits of the value.
+template <typename T, typename Traits = HexFloatTraits<T>>
+class HexFloat {
+ public:
+  typedef typename Traits::uint_type uint_type;
+  typedef typename Traits::int_type int_type;
+  typedef typename Traits::underlying_type underlying_type;
+  typedef typename Traits::native_type native_type;
+
+  explicit HexFloat(T f) : value_(f) {}
+
+  T value() const { return value_; }
+  void set_value(T f) { value_ = f; }
+
+  // These are all written like this because it is convenient to have
+  // compile-time constants for all of these values.
+
+  // Pass-through values to save typing.
+  static const uint32_t num_used_bits = Traits::num_used_bits;
+  static const uint32_t exponent_bias = Traits::exponent_bias;
+  static const uint32_t num_exponent_bits = Traits::num_exponent_bits;
+  static const uint32_t num_fraction_bits = Traits::num_fraction_bits;
+
+  // Number of bits to shift left to set the highest relevant bit.
+  static const uint32_t top_bit_left_shift = num_used_bits - 1;
+  // How many nibbles (hex characters) the fractional part takes up.
+  static const uint32_t fraction_nibbles = (num_fraction_bits + 3) / 4;
+  // If the fractional part does not fit evenly into a hex character (4-bits)
+  // then we have to left-shift to get rid of leading 0s. This is the amount
+  // we have to shift (might be 0).
+  static const uint32_t num_overflow_bits =
+      fraction_nibbles * 4 - num_fraction_bits;
+
+  // The representation of the fraction, not the actual bits. This
+  // includes the leading bit that is usually implicit.
+  static const uint_type fraction_represent_mask =
+      spvutils::SetBits<uint_type, 0,
+                        num_fraction_bits + num_overflow_bits>::get;
+
+  // The topmost bit in the nibble-aligned fraction.
+  static const uint_type fraction_top_bit =
+      uint_type(1) << (num_fraction_bits + num_overflow_bits - 1);
+
+  // The least significant bit in the exponent, which is also the bit
+  // immediately to the left of the significand.
+  static const uint_type first_exponent_bit = uint_type(1)
+                                              << (num_fraction_bits);
+
+  // The mask for the encoded fraction. It does not include the
+  // implicit bit.
+  static const uint_type fraction_encode_mask =
+      spvutils::SetBits<uint_type, 0, num_fraction_bits>::get;
+
+  // The bit that is used as a sign.
+  static const uint_type sign_mask = uint_type(1) << top_bit_left_shift;
+
+  // The bits that represent the exponent.
+  static const uint_type exponent_mask =
+      spvutils::SetBits<uint_type, num_fraction_bits, num_exponent_bits>::get;
+
+  // How far left the exponent is shifted.
+  static const uint32_t exponent_left_shift = num_fraction_bits;
+
+  // How far from the right edge the fraction is shifted.
+  static const uint32_t fraction_right_shift =
+      static_cast<uint32_t>(sizeof(uint_type) * 8) - num_fraction_bits;
+
+  // The maximum representable unbiased exponent.
+  static const int_type max_exponent =
+      (exponent_mask >> num_fraction_bits) - exponent_bias;
+  // The minimum representable exponent for normalized numbers.
+  static const int_type min_exponent = -static_cast<int_type>(exponent_bias);
+
+  // Returns the bits associated with the value.
+  uint_type getBits() const { return spvutils::BitwiseCast<uint_type>(value_); }
+
+  // Returns the bits associated with the value, without the leading sign bit.
+  uint_type getUnsignedBits() const {
+    return static_cast<uint_type>(spvutils::BitwiseCast<uint_type>(value_) &
+                                  ~sign_mask);
+  }
+
+  // Returns the bits associated with the exponent, shifted to start at the
+  // lsb of the type.
+  const uint_type getExponentBits() const {
+    return static_cast<uint_type>((getBits() & exponent_mask) >>
+                                  num_fraction_bits);
+  }
+
+  // Returns the exponent in unbiased form. This is the exponent in the
+  // human-friendly form.
+  const int_type getUnbiasedExponent() const {
+    return static_cast<int_type>(getExponentBits() - exponent_bias);
+  }
+
+  // Returns just the significand bits from the value.
+  const uint_type getSignificandBits() const {
+    return getBits() & fraction_encode_mask;
+  }
+
+  // If the number was normalized, returns the unbiased exponent.
+  // If the number was denormal, normalize the exponent first.
+  const int_type getUnbiasedNormalizedExponent() const {
+    if ((getBits() & ~sign_mask) == 0) {  // special case if everything is 0
+      return 0;
+    }
+    int_type exp = getUnbiasedExponent();
+    if (exp == min_exponent) {  // We are in denorm land.
+      uint_type significand_bits = getSignificandBits();
+      while ((significand_bits & (first_exponent_bit >> 1)) == 0) {
+        significand_bits = static_cast<uint_type>(significand_bits << 1);
+        exp = static_cast<int_type>(exp - 1);
+      }
+      significand_bits &= fraction_encode_mask;
+    }
+    return exp;
+  }
+
+  // Returns the signficand after it has been normalized.
+  const uint_type getNormalizedSignificand() const {
+    int_type unbiased_exponent = getUnbiasedNormalizedExponent();
+    uint_type significand = getSignificandBits();
+    for (int_type i = unbiased_exponent; i <= min_exponent; ++i) {
+      significand = static_cast<uint_type>(significand << 1);
+    }
+    significand &= fraction_encode_mask;
+    return significand;
+  }
+
+  // Returns true if this number represents a negative value.
+  bool isNegative() const { return (getBits() & sign_mask) != 0; }
+
+  // Sets this HexFloat from the individual components.
+  // Note this assumes EVERY significand is normalized, and has an implicit
+  // leading one. This means that the only way that this method will set 0,
+  // is if you set a number so denormalized that it underflows.
+  // Do not use this method with raw bits extracted from a subnormal number,
+  // since subnormals do not have an implicit leading 1 in the significand.
+  // The significand is also expected to be in the
+  // lowest-most num_fraction_bits of the uint_type.
+  // The exponent is expected to be unbiased, meaning an exponent of
+  // 0 actually means 0.
+  // If underflow_round_up is set, then on underflow, if a number is non-0
+  // and would underflow, we round up to the smallest denorm.
+  void setFromSignUnbiasedExponentAndNormalizedSignificand(
+      bool negative, int_type exponent, uint_type significand,
+      bool round_denorm_up) {
+    bool significand_is_zero = significand == 0;
+
+    if (exponent <= min_exponent) {
+      // If this was denormalized, then we have to shift the bit on, meaning
+      // the significand is not zero.
+      significand_is_zero = false;
+      significand |= first_exponent_bit;
+      significand = static_cast<uint_type>(significand >> 1);
+    }
+
+    while (exponent < min_exponent) {
+      significand = static_cast<uint_type>(significand >> 1);
+      ++exponent;
+    }
+
+    if (exponent == min_exponent) {
+      if (significand == 0 && !significand_is_zero && round_denorm_up) {
+        significand = static_cast<uint_type>(0x1);
+      }
+    }
+
+    uint_type new_value = 0;
+    if (negative) {
+      new_value = static_cast<uint_type>(new_value | sign_mask);
+    }
+    exponent = static_cast<int_type>(exponent + exponent_bias);
+    assert(exponent >= 0);
+
+    // put it all together
+    exponent = static_cast<uint_type>((exponent << exponent_left_shift) &
+                                      exponent_mask);
+    significand = static_cast<uint_type>(significand & fraction_encode_mask);
+    new_value = static_cast<uint_type>(new_value | (exponent | significand));
+    value_ = BitwiseCast<T>(new_value);
+  }
+
+  // Increments the significand of this number by the given amount.
+  // If this would spill the significand into the implicit bit,
+  // carry is set to true and the significand is shifted to fit into
+  // the correct location, otherwise carry is set to false.
+  // All significands and to_increment are assumed to be within the bounds
+  // for a valid significand.
+  static uint_type incrementSignificand(uint_type significand,
+                                        uint_type to_increment, bool* carry) {
+    significand = static_cast<uint_type>(significand + to_increment);
+    *carry = false;
+    if (significand & first_exponent_bit) {
+      *carry = true;
+      // The implicit 1-bit will have carried, so we should zero-out the
+      // top bit and shift back.
+      significand = static_cast<uint_type>(significand & ~first_exponent_bit);
+      significand = static_cast<uint_type>(significand >> 1);
+    }
+    return significand;
+  }
+
+  // These exist because MSVC throws warnings on negative right-shifts
+  // even if they are not going to be executed. Eg:
+  // constant_number < 0? 0: constant_number
+  // These convert the negative left-shifts into right shifts.
+
+  template <typename int_type>
+  uint_type negatable_left_shift(int_type N, uint_type val)
+  {
+    if(N >= 0)
+      return val << N;
+
+    return val >> -N;
+  }
+
+  template <typename int_type>
+  uint_type negatable_right_shift(int_type N, uint_type val)
+  {
+    if(N >= 0)
+      return val >> N;
+
+    return val << -N;
+  }
+
+  // Returns the significand, rounded to fit in a significand in
+  // other_T. This is shifted so that the most significant
+  // bit of the rounded number lines up with the most significant bit
+  // of the returned significand.
+  template <typename other_T>
+  typename other_T::uint_type getRoundedNormalizedSignificand(
+      round_direction dir, bool* carry_bit) {
+    typedef typename other_T::uint_type other_uint_type;
+    static const int_type num_throwaway_bits =
+        static_cast<int_type>(num_fraction_bits) -
+        static_cast<int_type>(other_T::num_fraction_bits);
+
+    static const uint_type last_significant_bit =
+        (num_throwaway_bits < 0)
+            ? 0
+            : negatable_left_shift(num_throwaway_bits, 1u);
+    static const uint_type first_rounded_bit =
+        (num_throwaway_bits < 1)
+            ? 0
+            : negatable_left_shift(num_throwaway_bits - 1, 1u);
+
+    static const uint_type throwaway_mask_bits =
+        num_throwaway_bits > 0 ? num_throwaway_bits : 0;
+    static const uint_type throwaway_mask =
+        spvutils::SetBits<uint_type, 0, throwaway_mask_bits>::get;
+
+    *carry_bit = false;
+    other_uint_type out_val = 0;
+    uint_type significand = getNormalizedSignificand();
+    // If we are up-casting, then we just have to shift to the right location.
+    if (num_throwaway_bits <= 0) {
+      out_val = static_cast<other_uint_type>(significand);
+      uint_type shift_amount = static_cast<uint_type>(-num_throwaway_bits);
+      out_val = static_cast<other_uint_type>(out_val << shift_amount);
+      return out_val;
+    }
+
+    // If every non-representable bit is 0, then we don't have any casting to
+    // do.
+    if ((significand & throwaway_mask) == 0) {
+      return static_cast<other_uint_type>(
+          negatable_right_shift(num_throwaway_bits, significand));
+    }
+
+    bool round_away_from_zero = false;
+    // We actually have to narrow the significand here, so we have to follow the
+    // rounding rules.
+    switch (dir) {
+      case kRoundToZero:
+        break;
+      case kRoundToPositiveInfinity:
+        round_away_from_zero = !isNegative();
+        break;
+      case kRoundToNegativeInfinity:
+        round_away_from_zero = isNegative();
+        break;
+      case kRoundToNearestEven:
+        // Have to round down, round bit is 0
+        if ((first_rounded_bit & significand) == 0) {
+          break;
+        }
+        if (((significand & throwaway_mask) & ~first_rounded_bit) != 0) {
+          // If any subsequent bit of the rounded portion is non-0 then we round
+          // up.
+          round_away_from_zero = true;
+          break;
+        }
+        // We are exactly half-way between 2 numbers, pick even.
+        if ((significand & last_significant_bit) != 0) {
+          // 1 for our last bit, round up.
+          round_away_from_zero = true;
+          break;
+        }
+        break;
+    }
+
+    if (round_away_from_zero) {
+      return static_cast<other_uint_type>(
+          negatable_right_shift(num_throwaway_bits, incrementSignificand(
+              significand, last_significant_bit, carry_bit)));
+    } else {
+      return static_cast<other_uint_type>(
+          negatable_right_shift(num_throwaway_bits, significand));
+    }
+  }
+
+  // Casts this value to another HexFloat. If the cast is widening,
+  // then round_dir is ignored. If the cast is narrowing, then
+  // the result is rounded in the direction specified.
+  // This number will retain Nan and Inf values.
+  // It will also saturate to Inf if the number overflows, and
+  // underflow to (0 or min depending on rounding) if the number underflows.
+  template <typename other_T>
+  void castTo(other_T& other, round_direction round_dir) {
+    other = other_T(static_cast<typename other_T::native_type>(0));
+    bool negate = isNegative();
+    if (getUnsignedBits() == 0) {
+      if (negate) {
+        other.set_value(-other.value());
+      }
+      return;
+    }
+    uint_type significand = getSignificandBits();
+    bool carried = false;
+    typename other_T::uint_type rounded_significand =
+        getRoundedNormalizedSignificand<other_T>(round_dir, &carried);
+
+    int_type exponent = getUnbiasedExponent();
+    if (exponent == min_exponent) {
+      // If we are denormal, normalize the exponent, so that we can encode
+      // easily.
+      exponent = static_cast<int_type>(exponent + 1);
+      for (uint_type check_bit = first_exponent_bit >> 1; check_bit != 0;
+           check_bit = static_cast<uint_type>(check_bit >> 1)) {
+        exponent = static_cast<int_type>(exponent - 1);
+        if (check_bit & significand) break;
+      }
+    }
+
+    bool is_nan =
+        (getBits() & exponent_mask) == exponent_mask && significand != 0;
+    bool is_inf =
+        !is_nan &&
+        ((exponent + carried) > static_cast<int_type>(other_T::exponent_bias) ||
+         (significand == 0 && (getBits() & exponent_mask) == exponent_mask));
+
+    // If we are Nan or Inf we should pass that through.
+    if (is_inf) {
+      other.set_value(BitwiseCast<typename other_T::underlying_type>(
+          static_cast<typename other_T::uint_type>(
+              (negate ? other_T::sign_mask : 0) | other_T::exponent_mask)));
+      return;
+    }
+    if (is_nan) {
+      typename other_T::uint_type shifted_significand;
+      shifted_significand = static_cast<typename other_T::uint_type>(
+          negatable_left_shift(
+              static_cast<int_type>(other_T::num_fraction_bits) -
+              static_cast<int_type>(num_fraction_bits), significand));
+
+      // We are some sort of Nan. We try to keep the bit-pattern of the Nan
+      // as close as possible. If we had to shift off bits so we are 0, then we
+      // just set the last bit.
+      other.set_value(BitwiseCast<typename other_T::underlying_type>(
+          static_cast<typename other_T::uint_type>(
+              (negate ? other_T::sign_mask : 0) | other_T::exponent_mask |
+              (shifted_significand == 0 ? 0x1 : shifted_significand))));
+      return;
+    }
+
+    bool round_underflow_up =
+        isNegative() ? round_dir == kRoundToNegativeInfinity
+                     : round_dir == kRoundToPositiveInfinity;
+    typedef typename other_T::int_type other_int_type;
+    // setFromSignUnbiasedExponentAndNormalizedSignificand will
+    // zero out any underflowing value (but retain the sign).
+    other.setFromSignUnbiasedExponentAndNormalizedSignificand(
+        negate, static_cast<other_int_type>(exponent), rounded_significand,
+        round_underflow_up);
+    return;
+  }
+
+ private:
+  T value_;
+
+  static_assert(num_used_bits ==
+                    Traits::num_exponent_bits + Traits::num_fraction_bits + 1,
+                "The number of bits do not fit");
+  static_assert(sizeof(T) == sizeof(uint_type), "The type sizes do not match");
+};
+
+// Returns 4 bits represented by the hex character.
+inline uint8_t get_nibble_from_character(int character) {
+  const char* dec = "0123456789";
+  const char* lower = "abcdef";
+  const char* upper = "ABCDEF";
+  const char* p = nullptr;
+  if ((p = strchr(dec, character))) {
+    return static_cast<uint8_t>(p - dec);
+  } else if ((p = strchr(lower, character))) {
+    return static_cast<uint8_t>(p - lower + 0xa);
+  } else if ((p = strchr(upper, character))) {
+    return static_cast<uint8_t>(p - upper + 0xa);
+  }
+
+  assert(false && "This was called with a non-hex character");
+  return 0;
+}
+
+// Outputs the given HexFloat to the stream.
+template <typename T, typename Traits>
+std::ostream& operator<<(std::ostream& os, const HexFloat<T, Traits>& value) {
+  typedef HexFloat<T, Traits> HF;
+  typedef typename HF::uint_type uint_type;
+  typedef typename HF::int_type int_type;
+
+  static_assert(HF::num_used_bits != 0,
+                "num_used_bits must be non-zero for a valid float");
+  static_assert(HF::num_exponent_bits != 0,
+                "num_exponent_bits must be non-zero for a valid float");
+  static_assert(HF::num_fraction_bits != 0,
+                "num_fractin_bits must be non-zero for a valid float");
+
+  const uint_type bits = spvutils::BitwiseCast<uint_type>(value.value());
+  const char* const sign = (bits & HF::sign_mask) ? "-" : "";
+  const uint_type exponent = static_cast<uint_type>(
+      (bits & HF::exponent_mask) >> HF::num_fraction_bits);
+
+  uint_type fraction = static_cast<uint_type>((bits & HF::fraction_encode_mask)
+                                              << HF::num_overflow_bits);
+
+  const bool is_zero = exponent == 0 && fraction == 0;
+  const bool is_denorm = exponent == 0 && !is_zero;
+
+  // exponent contains the biased exponent we have to convert it back into
+  // the normal range.
+  int_type int_exponent = static_cast<int_type>(exponent - HF::exponent_bias);
+  // If the number is all zeros, then we actually have to NOT shift the
+  // exponent.
+  int_exponent = is_zero ? 0 : int_exponent;
+
+  // If we are denorm, then start shifting, and decreasing the exponent until
+  // our leading bit is 1.
+
+  if (is_denorm) {
+    while ((fraction & HF::fraction_top_bit) == 0) {
+      fraction = static_cast<uint_type>(fraction << 1);
+      int_exponent = static_cast<int_type>(int_exponent - 1);
+    }
+    // Since this is denormalized, we have to consume the leading 1 since it
+    // will end up being implicit.
+    fraction = static_cast<uint_type>(fraction << 1);  // eat the leading 1
+    fraction &= HF::fraction_represent_mask;
+  }
+
+  uint_type fraction_nibbles = HF::fraction_nibbles;
+  // We do not have to display any trailing 0s, since this represents the
+  // fractional part.
+  while (fraction_nibbles > 0 && (fraction & 0xF) == 0) {
+    // Shift off any trailing values;
+    fraction = static_cast<uint_type>(fraction >> 4);
+    --fraction_nibbles;
+  }
+
+  const auto saved_flags = os.flags();
+  const auto saved_fill = os.fill();
+
+  os << sign << "0x" << (is_zero ? '0' : '1');
+  if (fraction_nibbles) {
+    // Make sure to keep the leading 0s in place, since this is the fractional
+    // part.
+    os << "." << std::setw(static_cast<int>(fraction_nibbles))
+       << std::setfill('0') << std::hex << fraction;
+  }
+  os << "p" << std::dec << (int_exponent >= 0 ? "+" : "") << int_exponent;
+
+  os.flags(saved_flags);
+  os.fill(saved_fill);
+
+  return os;
+}
+
+// Returns true if negate_value is true and the next character on the
+// input stream is a plus or minus sign.  In that case we also set the fail bit
+// on the stream and set the value to the zero value for its type.
+template <typename T, typename Traits>
+inline bool RejectParseDueToLeadingSign(std::istream& is, bool negate_value,
+                                        HexFloat<T, Traits>& value) {
+  if (negate_value) {
+    auto next_char = is.peek();
+    if (next_char == '-' || next_char == '+') {
+      // Fail the parse.  Emulate standard behaviour by setting the value to
+      // the zero value, and set the fail bit on the stream.
+      value = HexFloat<T, Traits>(typename HexFloat<T, Traits>::uint_type(0));
+      is.setstate(std::ios_base::failbit);
+      return true;
+    }
+  }
+  return false;
+}
+
+// Parses a floating point number from the given stream and stores it into the
+// value parameter.
+// If negate_value is true then the number may not have a leading minus or
+// plus, and if it successfully parses, then the number is negated before
+// being stored into the value parameter.
+// If the value cannot be correctly parsed or overflows the target floating
+// point type, then set the fail bit on the stream.
+// TODO(dneto): Promise C++11 standard behavior in how the value is set in
+// the error case, but only after all target platforms implement it correctly.
+// In particular, the Microsoft C++ runtime appears to be out of spec.
+template <typename T, typename Traits>
+inline std::istream& ParseNormalFloat(std::istream& is, bool negate_value,
+                                      HexFloat<T, Traits>& value) {
+  if (RejectParseDueToLeadingSign(is, negate_value, value)) {
+    return is;
+  }
+  T val;
+  is >> val;
+  if (negate_value) {
+    val = -val;
+  }
+  value.set_value(val);
+  // In the failure case, map -0.0 to 0.0.
+  if (is.fail() && value.getUnsignedBits() == 0u) {
+    value = HexFloat<T, Traits>(typename HexFloat<T, Traits>::uint_type(0));
+  }
+  if (val.isInfinity()) {
+    // Fail the parse.  Emulate standard behaviour by setting the value to
+    // the closest normal value, and set the fail bit on the stream.
+    value.set_value((value.isNegative() | negate_value) ? T::lowest()
+                                                        : T::max());
+    is.setstate(std::ios_base::failbit);
+  }
+  return is;
+}
+
+// Specialization of ParseNormalFloat for FloatProxy<Float16> values.
+// This will parse the float as it were a 32-bit floating point number,
+// and then round it down to fit into a Float16 value.
+// The number is rounded towards zero.
+// If negate_value is true then the number may not have a leading minus or
+// plus, and if it successfully parses, then the number is negated before
+// being stored into the value parameter.
+// If the value cannot be correctly parsed or overflows the target floating
+// point type, then set the fail bit on the stream.
+// TODO(dneto): Promise C++11 standard behavior in how the value is set in
+// the error case, but only after all target platforms implement it correctly.
+// In particular, the Microsoft C++ runtime appears to be out of spec.
+template <>
+inline std::istream&
+ParseNormalFloat<FloatProxy<Float16>, HexFloatTraits<FloatProxy<Float16>>>(
+    std::istream& is, bool negate_value,
+    HexFloat<FloatProxy<Float16>, HexFloatTraits<FloatProxy<Float16>>>& value) {
+  // First parse as a 32-bit float.
+  HexFloat<FloatProxy<float>> float_val(0.0f);
+  ParseNormalFloat(is, negate_value, float_val);
+
+  // Then convert to 16-bit float, saturating at infinities, and
+  // rounding toward zero.
+  float_val.castTo(value, kRoundToZero);
+
+  // Overflow on 16-bit behaves the same as for 32- and 64-bit: set the
+  // fail bit and set the lowest or highest value.
+  if (Float16::isInfinity(value.value().getAsFloat())) {
+    value.set_value(value.isNegative() ? Float16::lowest() : Float16::max());
+    is.setstate(std::ios_base::failbit);
+  }
+  return is;
+}
+
+// Reads a HexFloat from the given stream.
+// If the float is not encoded as a hex-float then it will be parsed
+// as a regular float.
+// This may fail if your stream does not support at least one unget.
+// Nan values can be encoded with "0x1.<not zero>p+exponent_bias".
+// This would normally overflow a float and round to
+// infinity but this special pattern is the exact representation for a NaN,
+// and therefore is actually encoded as the correct NaN. To encode inf,
+// either 0x0p+exponent_bias can be specified or any exponent greater than
+// exponent_bias.
+// Examples using IEEE 32-bit float encoding.
+//    0x1.0p+128 (+inf)
+//    -0x1.0p-128 (-inf)
+//
+//    0x1.1p+128 (+Nan)
+//    -0x1.1p+128 (-Nan)
+//
+//    0x1p+129 (+inf)
+//    -0x1p+129 (-inf)
+template <typename T, typename Traits>
+std::istream& operator>>(std::istream& is, HexFloat<T, Traits>& value) {
+  using HF = HexFloat<T, Traits>;
+  using uint_type = typename HF::uint_type;
+  using int_type = typename HF::int_type;
+
+  value.set_value(static_cast<typename HF::native_type>(0.f));
+
+  if (is.flags() & std::ios::skipws) {
+    // If the user wants to skip whitespace , then we should obey that.
+    while (std::isspace(is.peek())) {
+      is.get();
+    }
+  }
+
+  auto next_char = is.peek();
+  bool negate_value = false;
+
+  if (next_char != '-' && next_char != '0') {
+    return ParseNormalFloat(is, negate_value, value);
+  }
+
+  if (next_char == '-') {
+    negate_value = true;
+    is.get();
+    next_char = is.peek();
+  }
+
+  if (next_char == '0') {
+    is.get();  // We may have to unget this.
+    auto maybe_hex_start = is.peek();
+    if (maybe_hex_start != 'x' && maybe_hex_start != 'X') {
+      is.unget();
+      return ParseNormalFloat(is, negate_value, value);
+    } else {
+      is.get();  // Throw away the 'x';
+    }
+  } else {
+    return ParseNormalFloat(is, negate_value, value);
+  }
+
+  // This "looks" like a hex-float so treat it as one.
+  bool seen_p = false;
+  bool seen_dot = false;
+  uint_type fraction_index = 0;
+
+  uint_type fraction = 0;
+  int_type exponent = HF::exponent_bias;
+
+  // Strip off leading zeros so we don't have to special-case them later.
+  while ((next_char = is.peek()) == '0') {
+    is.get();
+  }
+
+  bool is_denorm =
+      true;  // Assume denorm "representation" until we hear otherwise.
+             // NB: This does not mean the value is actually denorm,
+             // it just means that it was written 0.
+  bool bits_written = false;  // Stays false until we write a bit.
+  while (!seen_p && !seen_dot) {
+    // Handle characters that are left of the fractional part.
+    if (next_char == '.') {
+      seen_dot = true;
+    } else if (next_char == 'p') {
+      seen_p = true;
+    } else if (::isxdigit(next_char)) {
+      // We know this is not denormalized since we have stripped all leading
+      // zeroes and we are not a ".".
+      is_denorm = false;
+      int number = get_nibble_from_character(next_char);
+      for (int i = 0; i < 4; ++i, number <<= 1) {
+        uint_type write_bit = (number & 0x8) ? 0x1 : 0x0;
+        if (bits_written) {
+          // If we are here the bits represented belong in the fractional
+          // part of the float, and we have to adjust the exponent accordingly.
+          fraction = static_cast<uint_type>(
+              fraction |
+              static_cast<uint_type>(
+                  write_bit << (HF::top_bit_left_shift - fraction_index++)));
+          exponent = static_cast<int_type>(exponent + 1);
+        }
+        bits_written |= write_bit != 0;
+      }
+    } else {
+      // We have not found our exponent yet, so we have to fail.
+      is.setstate(std::ios::failbit);
+      return is;
+    }
+    is.get();
+    next_char = is.peek();
+  }
+  bits_written = false;
+  while (seen_dot && !seen_p) {
+    // Handle only fractional parts now.
+    if (next_char == 'p') {
+      seen_p = true;
+    } else if (::isxdigit(next_char)) {
+      int number = get_nibble_from_character(next_char);
+      for (int i = 0; i < 4; ++i, number <<= 1) {
+        uint_type write_bit = (number & 0x8) ? 0x01 : 0x00;
+        bits_written |= write_bit != 0;
+        if (is_denorm && !bits_written) {
+          // Handle modifying the exponent here this way we can handle
+          // an arbitrary number of hex values without overflowing our
+          // integer.
+          exponent = static_cast<int_type>(exponent - 1);
+        } else {
+          fraction = static_cast<uint_type>(
+              fraction |
+              static_cast<uint_type>(
+                  write_bit << (HF::top_bit_left_shift - fraction_index++)));
+        }
+      }
+    } else {
+      // We still have not found our 'p' exponent yet, so this is not a valid
+      // hex-float.
+      is.setstate(std::ios::failbit);
+      return is;
+    }
+    is.get();
+    next_char = is.peek();
+  }
+
+  bool seen_sign = false;
+  int8_t exponent_sign = 1;
+  int_type written_exponent = 0;
+  while (true) {
+    if ((next_char == '-' || next_char == '+')) {
+      if (seen_sign) {
+        is.setstate(std::ios::failbit);
+        return is;
+      }
+      seen_sign = true;
+      exponent_sign = (next_char == '-') ? -1 : 1;
+    } else if (::isdigit(next_char)) {
+      // Hex-floats express their exponent as decimal.
+      written_exponent = static_cast<int_type>(written_exponent * 10);
+      written_exponent =
+          static_cast<int_type>(written_exponent + (next_char - '0'));
+    } else {
+      break;
+    }
+    is.get();
+    next_char = is.peek();
+  }
+
+  written_exponent = static_cast<int_type>(written_exponent * exponent_sign);
+  exponent = static_cast<int_type>(exponent + written_exponent);
+
+  bool is_zero = is_denorm && (fraction == 0);
+  if (is_denorm && !is_zero) {
+    fraction = static_cast<uint_type>(fraction << 1);
+    exponent = static_cast<int_type>(exponent - 1);
+  } else if (is_zero) {
+    exponent = 0;
+  }
+
+  if (exponent <= 0 && !is_zero) {
+    fraction = static_cast<uint_type>(fraction >> 1);
+    fraction |= static_cast<uint_type>(1) << HF::top_bit_left_shift;
+  }
+
+  fraction = (fraction >> HF::fraction_right_shift) & HF::fraction_encode_mask;
+
+  const int_type max_exponent =
+      SetBits<uint_type, 0, HF::num_exponent_bits>::get;
+
+  // Handle actual denorm numbers
+  while (exponent < 0 && !is_zero) {
+    fraction = static_cast<uint_type>(fraction >> 1);
+    exponent = static_cast<int_type>(exponent + 1);
+
+    fraction &= HF::fraction_encode_mask;
+    if (fraction == 0) {
+      // We have underflowed our fraction. We should clamp to zero.
+      is_zero = true;
+      exponent = 0;
+    }
+  }
+
+  // We have overflowed so we should be inf/-inf.
+  if (exponent > max_exponent) {
+    exponent = max_exponent;
+    fraction = 0;
+  }
+
+  uint_type output_bits = static_cast<uint_type>(
+      static_cast<uint_type>(negate_value ? 1 : 0) << HF::top_bit_left_shift);
+  output_bits |= fraction;
+
+  uint_type shifted_exponent = static_cast<uint_type>(
+      static_cast<uint_type>(exponent << HF::exponent_left_shift) &
+      HF::exponent_mask);
+  output_bits |= shifted_exponent;
+
+  T output_float = spvutils::BitwiseCast<T>(output_bits);
+  value.set_value(output_float);
+
+  return is;
+}
+
+// Writes a FloatProxy value to a stream.
+// Zero and normal numbers are printed in the usual notation, but with
+// enough digits to fully reproduce the value.  Other values (subnormal,
+// NaN, and infinity) are printed as a hex float.
+template <typename T>
+std::ostream& operator<<(std::ostream& os, const FloatProxy<T>& value) {
+  auto float_val = value.getAsFloat();
+  switch (std::fpclassify(float_val)) {
+    case FP_ZERO:
+    case FP_NORMAL: {
+      auto saved_precision = os.precision();
+      os.precision(std::numeric_limits<T>::digits10);
+      os << float_val;
+      os.precision(saved_precision);
+    } break;
+    default:
+      os << HexFloat<FloatProxy<T>>(value);
+      break;
+  }
+  return os;
+}
+
+template <>
+inline std::ostream& operator<<<Float16>(std::ostream& os,
+                                         const FloatProxy<Float16>& value) {
+  os << HexFloat<FloatProxy<Float16>>(value);
+  return os;
+}
+}
+
+#endif  // LIBSPIRV_UTIL_HEX_FLOAT_H_

+ 925 - 0
3rdparty/glslang/SPIRV/spirv.hpp

@@ -0,0 +1,925 @@
+// Copyright (c) 2014-2016 The Khronos Group Inc.
+// 
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and/or associated documentation files (the "Materials"),
+// to deal in the Materials without restriction, including without limitation
+// the rights to use, copy, modify, merge, publish, distribute, sublicense,
+// and/or sell copies of the Materials, and to permit persons to whom the
+// Materials are furnished to do so, subject to the following conditions:
+// 
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Materials.
+// 
+// MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
+// STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
+// HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/ 
+// 
+// THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
+// IN THE MATERIALS.
+
+// This header is automatically generated by the same tool that creates
+// the Binary Section of the SPIR-V specification.
+
+// Enumeration tokens for SPIR-V, in various styles:
+//   C, C++, C++11, JSON, Lua, Python
+// 
+// - C will have tokens with a "Spv" prefix, e.g.: SpvSourceLanguageGLSL
+// - C++ will have tokens in the "spv" name space, e.g.: spv::SourceLanguageGLSL
+// - C++11 will use enum classes in the spv namespace, e.g.: spv::SourceLanguage::GLSL
+// - Lua will use tables, e.g.: spv.SourceLanguage.GLSL
+// - Python will use dictionaries, e.g.: spv['SourceLanguage']['GLSL']
+// 
+// Some tokens act like mask values, which can be OR'd together,
+// while others are mutually exclusive.  The mask-like ones have
+// "Mask" in their name, and a parallel enum that has the shift
+// amount (1 << x) for each corresponding enumerant.
+
+#ifndef spirv_HPP
+#define spirv_HPP
+
+namespace spv {
+
+typedef unsigned int Id;
+
+#define SPV_VERSION 0x10000
+#define SPV_REVISION 8
+
+static const unsigned int MagicNumber = 0x07230203;
+static const unsigned int Version = 0x00010000;
+static const unsigned int Revision = 8;
+static const unsigned int OpCodeMask = 0xffff;
+static const unsigned int WordCountShift = 16;
+
+enum SourceLanguage {
+    SourceLanguageUnknown = 0,
+    SourceLanguageESSL = 1,
+    SourceLanguageGLSL = 2,
+    SourceLanguageOpenCL_C = 3,
+    SourceLanguageOpenCL_CPP = 4,
+    SourceLanguageHLSL = 5,
+    SourceLanguageMax = 0x7fffffff,
+};
+
+enum ExecutionModel {
+    ExecutionModelVertex = 0,
+    ExecutionModelTessellationControl = 1,
+    ExecutionModelTessellationEvaluation = 2,
+    ExecutionModelGeometry = 3,
+    ExecutionModelFragment = 4,
+    ExecutionModelGLCompute = 5,
+    ExecutionModelKernel = 6,
+    ExecutionModelMax = 0x7fffffff,
+};
+
+enum AddressingModel {
+    AddressingModelLogical = 0,
+    AddressingModelPhysical32 = 1,
+    AddressingModelPhysical64 = 2,
+    AddressingModelMax = 0x7fffffff,
+};
+
+enum MemoryModel {
+    MemoryModelSimple = 0,
+    MemoryModelGLSL450 = 1,
+    MemoryModelOpenCL = 2,
+    MemoryModelMax = 0x7fffffff,
+};
+
+enum ExecutionMode {
+    ExecutionModeInvocations = 0,
+    ExecutionModeSpacingEqual = 1,
+    ExecutionModeSpacingFractionalEven = 2,
+    ExecutionModeSpacingFractionalOdd = 3,
+    ExecutionModeVertexOrderCw = 4,
+    ExecutionModeVertexOrderCcw = 5,
+    ExecutionModePixelCenterInteger = 6,
+    ExecutionModeOriginUpperLeft = 7,
+    ExecutionModeOriginLowerLeft = 8,
+    ExecutionModeEarlyFragmentTests = 9,
+    ExecutionModePointMode = 10,
+    ExecutionModeXfb = 11,
+    ExecutionModeDepthReplacing = 12,
+    ExecutionModeDepthGreater = 14,
+    ExecutionModeDepthLess = 15,
+    ExecutionModeDepthUnchanged = 16,
+    ExecutionModeLocalSize = 17,
+    ExecutionModeLocalSizeHint = 18,
+    ExecutionModeInputPoints = 19,
+    ExecutionModeInputLines = 20,
+    ExecutionModeInputLinesAdjacency = 21,
+    ExecutionModeTriangles = 22,
+    ExecutionModeInputTrianglesAdjacency = 23,
+    ExecutionModeQuads = 24,
+    ExecutionModeIsolines = 25,
+    ExecutionModeOutputVertices = 26,
+    ExecutionModeOutputPoints = 27,
+    ExecutionModeOutputLineStrip = 28,
+    ExecutionModeOutputTriangleStrip = 29,
+    ExecutionModeVecTypeHint = 30,
+    ExecutionModeContractionOff = 31,
+    ExecutionModeMax = 0x7fffffff,
+};
+
+enum StorageClass {
+    StorageClassUniformConstant = 0,
+    StorageClassInput = 1,
+    StorageClassUniform = 2,
+    StorageClassOutput = 3,
+    StorageClassWorkgroup = 4,
+    StorageClassCrossWorkgroup = 5,
+    StorageClassPrivate = 6,
+    StorageClassFunction = 7,
+    StorageClassGeneric = 8,
+    StorageClassPushConstant = 9,
+    StorageClassAtomicCounter = 10,
+    StorageClassImage = 11,
+    StorageClassMax = 0x7fffffff,
+};
+
+enum Dim {
+    Dim1D = 0,
+    Dim2D = 1,
+    Dim3D = 2,
+    DimCube = 3,
+    DimRect = 4,
+    DimBuffer = 5,
+    DimSubpassData = 6,
+    DimMax = 0x7fffffff,
+};
+
+enum SamplerAddressingMode {
+    SamplerAddressingModeNone = 0,
+    SamplerAddressingModeClampToEdge = 1,
+    SamplerAddressingModeClamp = 2,
+    SamplerAddressingModeRepeat = 3,
+    SamplerAddressingModeRepeatMirrored = 4,
+    SamplerAddressingModeMax = 0x7fffffff,
+};
+
+enum SamplerFilterMode {
+    SamplerFilterModeNearest = 0,
+    SamplerFilterModeLinear = 1,
+    SamplerFilterModeMax = 0x7fffffff,
+};
+
+enum ImageFormat {
+    ImageFormatUnknown = 0,
+    ImageFormatRgba32f = 1,
+    ImageFormatRgba16f = 2,
+    ImageFormatR32f = 3,
+    ImageFormatRgba8 = 4,
+    ImageFormatRgba8Snorm = 5,
+    ImageFormatRg32f = 6,
+    ImageFormatRg16f = 7,
+    ImageFormatR11fG11fB10f = 8,
+    ImageFormatR16f = 9,
+    ImageFormatRgba16 = 10,
+    ImageFormatRgb10A2 = 11,
+    ImageFormatRg16 = 12,
+    ImageFormatRg8 = 13,
+    ImageFormatR16 = 14,
+    ImageFormatR8 = 15,
+    ImageFormatRgba16Snorm = 16,
+    ImageFormatRg16Snorm = 17,
+    ImageFormatRg8Snorm = 18,
+    ImageFormatR16Snorm = 19,
+    ImageFormatR8Snorm = 20,
+    ImageFormatRgba32i = 21,
+    ImageFormatRgba16i = 22,
+    ImageFormatRgba8i = 23,
+    ImageFormatR32i = 24,
+    ImageFormatRg32i = 25,
+    ImageFormatRg16i = 26,
+    ImageFormatRg8i = 27,
+    ImageFormatR16i = 28,
+    ImageFormatR8i = 29,
+    ImageFormatRgba32ui = 30,
+    ImageFormatRgba16ui = 31,
+    ImageFormatRgba8ui = 32,
+    ImageFormatR32ui = 33,
+    ImageFormatRgb10a2ui = 34,
+    ImageFormatRg32ui = 35,
+    ImageFormatRg16ui = 36,
+    ImageFormatRg8ui = 37,
+    ImageFormatR16ui = 38,
+    ImageFormatR8ui = 39,
+    ImageFormatMax = 0x7fffffff,
+};
+
+enum ImageChannelOrder {
+    ImageChannelOrderR = 0,
+    ImageChannelOrderA = 1,
+    ImageChannelOrderRG = 2,
+    ImageChannelOrderRA = 3,
+    ImageChannelOrderRGB = 4,
+    ImageChannelOrderRGBA = 5,
+    ImageChannelOrderBGRA = 6,
+    ImageChannelOrderARGB = 7,
+    ImageChannelOrderIntensity = 8,
+    ImageChannelOrderLuminance = 9,
+    ImageChannelOrderRx = 10,
+    ImageChannelOrderRGx = 11,
+    ImageChannelOrderRGBx = 12,
+    ImageChannelOrderDepth = 13,
+    ImageChannelOrderDepthStencil = 14,
+    ImageChannelOrdersRGB = 15,
+    ImageChannelOrdersRGBx = 16,
+    ImageChannelOrdersRGBA = 17,
+    ImageChannelOrdersBGRA = 18,
+    ImageChannelOrderABGR = 19,
+    ImageChannelOrderMax = 0x7fffffff,
+};
+
+enum ImageChannelDataType {
+    ImageChannelDataTypeSnormInt8 = 0,
+    ImageChannelDataTypeSnormInt16 = 1,
+    ImageChannelDataTypeUnormInt8 = 2,
+    ImageChannelDataTypeUnormInt16 = 3,
+    ImageChannelDataTypeUnormShort565 = 4,
+    ImageChannelDataTypeUnormShort555 = 5,
+    ImageChannelDataTypeUnormInt101010 = 6,
+    ImageChannelDataTypeSignedInt8 = 7,
+    ImageChannelDataTypeSignedInt16 = 8,
+    ImageChannelDataTypeSignedInt32 = 9,
+    ImageChannelDataTypeUnsignedInt8 = 10,
+    ImageChannelDataTypeUnsignedInt16 = 11,
+    ImageChannelDataTypeUnsignedInt32 = 12,
+    ImageChannelDataTypeHalfFloat = 13,
+    ImageChannelDataTypeFloat = 14,
+    ImageChannelDataTypeUnormInt24 = 15,
+    ImageChannelDataTypeUnormInt101010_2 = 16,
+    ImageChannelDataTypeMax = 0x7fffffff,
+};
+
+enum ImageOperandsShift {
+    ImageOperandsBiasShift = 0,
+    ImageOperandsLodShift = 1,
+    ImageOperandsGradShift = 2,
+    ImageOperandsConstOffsetShift = 3,
+    ImageOperandsOffsetShift = 4,
+    ImageOperandsConstOffsetsShift = 5,
+    ImageOperandsSampleShift = 6,
+    ImageOperandsMinLodShift = 7,
+    ImageOperandsMax = 0x7fffffff,
+};
+
+enum ImageOperandsMask {
+    ImageOperandsMaskNone = 0,
+    ImageOperandsBiasMask = 0x00000001,
+    ImageOperandsLodMask = 0x00000002,
+    ImageOperandsGradMask = 0x00000004,
+    ImageOperandsConstOffsetMask = 0x00000008,
+    ImageOperandsOffsetMask = 0x00000010,
+    ImageOperandsConstOffsetsMask = 0x00000020,
+    ImageOperandsSampleMask = 0x00000040,
+    ImageOperandsMinLodMask = 0x00000080,
+};
+
+enum FPFastMathModeShift {
+    FPFastMathModeNotNaNShift = 0,
+    FPFastMathModeNotInfShift = 1,
+    FPFastMathModeNSZShift = 2,
+    FPFastMathModeAllowRecipShift = 3,
+    FPFastMathModeFastShift = 4,
+    FPFastMathModeMax = 0x7fffffff,
+};
+
+enum FPFastMathModeMask {
+    FPFastMathModeMaskNone = 0,
+    FPFastMathModeNotNaNMask = 0x00000001,
+    FPFastMathModeNotInfMask = 0x00000002,
+    FPFastMathModeNSZMask = 0x00000004,
+    FPFastMathModeAllowRecipMask = 0x00000008,
+    FPFastMathModeFastMask = 0x00000010,
+};
+
+enum FPRoundingMode {
+    FPRoundingModeRTE = 0,
+    FPRoundingModeRTZ = 1,
+    FPRoundingModeRTP = 2,
+    FPRoundingModeRTN = 3,
+    FPRoundingModeMax = 0x7fffffff,
+};
+
+enum LinkageType {
+    LinkageTypeExport = 0,
+    LinkageTypeImport = 1,
+    LinkageTypeMax = 0x7fffffff,
+};
+
+enum AccessQualifier {
+    AccessQualifierReadOnly = 0,
+    AccessQualifierWriteOnly = 1,
+    AccessQualifierReadWrite = 2,
+    AccessQualifierMax = 0x7fffffff,
+};
+
+enum FunctionParameterAttribute {
+    FunctionParameterAttributeZext = 0,
+    FunctionParameterAttributeSext = 1,
+    FunctionParameterAttributeByVal = 2,
+    FunctionParameterAttributeSret = 3,
+    FunctionParameterAttributeNoAlias = 4,
+    FunctionParameterAttributeNoCapture = 5,
+    FunctionParameterAttributeNoWrite = 6,
+    FunctionParameterAttributeNoReadWrite = 7,
+    FunctionParameterAttributeMax = 0x7fffffff,
+};
+
+enum Decoration {
+    DecorationRelaxedPrecision = 0,
+    DecorationSpecId = 1,
+    DecorationBlock = 2,
+    DecorationBufferBlock = 3,
+    DecorationRowMajor = 4,
+    DecorationColMajor = 5,
+    DecorationArrayStride = 6,
+    DecorationMatrixStride = 7,
+    DecorationGLSLShared = 8,
+    DecorationGLSLPacked = 9,
+    DecorationCPacked = 10,
+    DecorationBuiltIn = 11,
+    DecorationNoPerspective = 13,
+    DecorationFlat = 14,
+    DecorationPatch = 15,
+    DecorationCentroid = 16,
+    DecorationSample = 17,
+    DecorationInvariant = 18,
+    DecorationRestrict = 19,
+    DecorationAliased = 20,
+    DecorationVolatile = 21,
+    DecorationConstant = 22,
+    DecorationCoherent = 23,
+    DecorationNonWritable = 24,
+    DecorationNonReadable = 25,
+    DecorationUniform = 26,
+    DecorationSaturatedConversion = 28,
+    DecorationStream = 29,
+    DecorationLocation = 30,
+    DecorationComponent = 31,
+    DecorationIndex = 32,
+    DecorationBinding = 33,
+    DecorationDescriptorSet = 34,
+    DecorationOffset = 35,
+    DecorationXfbBuffer = 36,
+    DecorationXfbStride = 37,
+    DecorationFuncParamAttr = 38,
+    DecorationFPRoundingMode = 39,
+    DecorationFPFastMathMode = 40,
+    DecorationLinkageAttributes = 41,
+    DecorationNoContraction = 42,
+    DecorationInputAttachmentIndex = 43,
+    DecorationAlignment = 44,
+    DecorationMax = 0x7fffffff,
+};
+
+enum BuiltIn {
+    BuiltInPosition = 0,
+    BuiltInPointSize = 1,
+    BuiltInClipDistance = 3,
+    BuiltInCullDistance = 4,
+    BuiltInVertexId = 5,
+    BuiltInInstanceId = 6,
+    BuiltInPrimitiveId = 7,
+    BuiltInInvocationId = 8,
+    BuiltInLayer = 9,
+    BuiltInViewportIndex = 10,
+    BuiltInTessLevelOuter = 11,
+    BuiltInTessLevelInner = 12,
+    BuiltInTessCoord = 13,
+    BuiltInPatchVertices = 14,
+    BuiltInFragCoord = 15,
+    BuiltInPointCoord = 16,
+    BuiltInFrontFacing = 17,
+    BuiltInSampleId = 18,
+    BuiltInSamplePosition = 19,
+    BuiltInSampleMask = 20,
+    BuiltInFragDepth = 22,
+    BuiltInHelperInvocation = 23,
+    BuiltInNumWorkgroups = 24,
+    BuiltInWorkgroupSize = 25,
+    BuiltInWorkgroupId = 26,
+    BuiltInLocalInvocationId = 27,
+    BuiltInGlobalInvocationId = 28,
+    BuiltInLocalInvocationIndex = 29,
+    BuiltInWorkDim = 30,
+    BuiltInGlobalSize = 31,
+    BuiltInEnqueuedWorkgroupSize = 32,
+    BuiltInGlobalOffset = 33,
+    BuiltInGlobalLinearId = 34,
+    BuiltInSubgroupSize = 36,
+    BuiltInSubgroupMaxSize = 37,
+    BuiltInNumSubgroups = 38,
+    BuiltInNumEnqueuedSubgroups = 39,
+    BuiltInSubgroupId = 40,
+    BuiltInSubgroupLocalInvocationId = 41,
+    BuiltInVertexIndex = 42,
+    BuiltInInstanceIndex = 43,
+    BuiltInSubgroupEqMaskKHR = 4416,
+    BuiltInSubgroupGeMaskKHR = 4417,
+    BuiltInSubgroupGtMaskKHR = 4418,
+    BuiltInSubgroupLeMaskKHR = 4419,
+    BuiltInSubgroupLtMaskKHR = 4420,
+    BuiltInBaseVertex = 4424,
+    BuiltInBaseInstance = 4425,
+    BuiltInDrawIndex = 4426,
+    BuiltInMax = 0x7fffffff,
+};
+
+enum SelectionControlShift {
+    SelectionControlFlattenShift = 0,
+    SelectionControlDontFlattenShift = 1,
+    SelectionControlMax = 0x7fffffff,
+};
+
+enum SelectionControlMask {
+    SelectionControlMaskNone = 0,
+    SelectionControlFlattenMask = 0x00000001,
+    SelectionControlDontFlattenMask = 0x00000002,
+};
+
+enum LoopControlShift {
+    LoopControlUnrollShift = 0,
+    LoopControlDontUnrollShift = 1,
+    LoopControlMax = 0x7fffffff,
+};
+
+enum LoopControlMask {
+    LoopControlMaskNone = 0,
+    LoopControlUnrollMask = 0x00000001,
+    LoopControlDontUnrollMask = 0x00000002,
+};
+
+enum FunctionControlShift {
+    FunctionControlInlineShift = 0,
+    FunctionControlDontInlineShift = 1,
+    FunctionControlPureShift = 2,
+    FunctionControlConstShift = 3,
+    FunctionControlMax = 0x7fffffff,
+};
+
+enum FunctionControlMask {
+    FunctionControlMaskNone = 0,
+    FunctionControlInlineMask = 0x00000001,
+    FunctionControlDontInlineMask = 0x00000002,
+    FunctionControlPureMask = 0x00000004,
+    FunctionControlConstMask = 0x00000008,
+};
+
+enum MemorySemanticsShift {
+    MemorySemanticsAcquireShift = 1,
+    MemorySemanticsReleaseShift = 2,
+    MemorySemanticsAcquireReleaseShift = 3,
+    MemorySemanticsSequentiallyConsistentShift = 4,
+    MemorySemanticsUniformMemoryShift = 6,
+    MemorySemanticsSubgroupMemoryShift = 7,
+    MemorySemanticsWorkgroupMemoryShift = 8,
+    MemorySemanticsCrossWorkgroupMemoryShift = 9,
+    MemorySemanticsAtomicCounterMemoryShift = 10,
+    MemorySemanticsImageMemoryShift = 11,
+    MemorySemanticsMax = 0x7fffffff,
+};
+
+enum MemorySemanticsMask {
+    MemorySemanticsMaskNone = 0,
+    MemorySemanticsAcquireMask = 0x00000002,
+    MemorySemanticsReleaseMask = 0x00000004,
+    MemorySemanticsAcquireReleaseMask = 0x00000008,
+    MemorySemanticsSequentiallyConsistentMask = 0x00000010,
+    MemorySemanticsUniformMemoryMask = 0x00000040,
+    MemorySemanticsSubgroupMemoryMask = 0x00000080,
+    MemorySemanticsWorkgroupMemoryMask = 0x00000100,
+    MemorySemanticsCrossWorkgroupMemoryMask = 0x00000200,
+    MemorySemanticsAtomicCounterMemoryMask = 0x00000400,
+    MemorySemanticsImageMemoryMask = 0x00000800,
+};
+
+enum MemoryAccessShift {
+    MemoryAccessVolatileShift = 0,
+    MemoryAccessAlignedShift = 1,
+    MemoryAccessNontemporalShift = 2,
+    MemoryAccessMax = 0x7fffffff,
+};
+
+enum MemoryAccessMask {
+    MemoryAccessMaskNone = 0,
+    MemoryAccessVolatileMask = 0x00000001,
+    MemoryAccessAlignedMask = 0x00000002,
+    MemoryAccessNontemporalMask = 0x00000004,
+};
+
+enum Scope {
+    ScopeCrossDevice = 0,
+    ScopeDevice = 1,
+    ScopeWorkgroup = 2,
+    ScopeSubgroup = 3,
+    ScopeInvocation = 4,
+    ScopeMax = 0x7fffffff,
+};
+
+enum GroupOperation {
+    GroupOperationReduce = 0,
+    GroupOperationInclusiveScan = 1,
+    GroupOperationExclusiveScan = 2,
+    GroupOperationMax = 0x7fffffff,
+};
+
+enum KernelEnqueueFlags {
+    KernelEnqueueFlagsNoWait = 0,
+    KernelEnqueueFlagsWaitKernel = 1,
+    KernelEnqueueFlagsWaitWorkGroup = 2,
+    KernelEnqueueFlagsMax = 0x7fffffff,
+};
+
+enum KernelProfilingInfoShift {
+    KernelProfilingInfoCmdExecTimeShift = 0,
+    KernelProfilingInfoMax = 0x7fffffff,
+};
+
+enum KernelProfilingInfoMask {
+    KernelProfilingInfoMaskNone = 0,
+    KernelProfilingInfoCmdExecTimeMask = 0x00000001,
+};
+
+enum Capability {
+    CapabilityMatrix = 0,
+    CapabilityShader = 1,
+    CapabilityGeometry = 2,
+    CapabilityTessellation = 3,
+    CapabilityAddresses = 4,
+    CapabilityLinkage = 5,
+    CapabilityKernel = 6,
+    CapabilityVector16 = 7,
+    CapabilityFloat16Buffer = 8,
+    CapabilityFloat16 = 9,
+    CapabilityFloat64 = 10,
+    CapabilityInt64 = 11,
+    CapabilityInt64Atomics = 12,
+    CapabilityImageBasic = 13,
+    CapabilityImageReadWrite = 14,
+    CapabilityImageMipmap = 15,
+    CapabilityPipes = 17,
+    CapabilityGroups = 18,
+    CapabilityDeviceEnqueue = 19,
+    CapabilityLiteralSampler = 20,
+    CapabilityAtomicStorage = 21,
+    CapabilityInt16 = 22,
+    CapabilityTessellationPointSize = 23,
+    CapabilityGeometryPointSize = 24,
+    CapabilityImageGatherExtended = 25,
+    CapabilityStorageImageMultisample = 27,
+    CapabilityUniformBufferArrayDynamicIndexing = 28,
+    CapabilitySampledImageArrayDynamicIndexing = 29,
+    CapabilityStorageBufferArrayDynamicIndexing = 30,
+    CapabilityStorageImageArrayDynamicIndexing = 31,
+    CapabilityClipDistance = 32,
+    CapabilityCullDistance = 33,
+    CapabilityImageCubeArray = 34,
+    CapabilitySampleRateShading = 35,
+    CapabilityImageRect = 36,
+    CapabilitySampledRect = 37,
+    CapabilityGenericPointer = 38,
+    CapabilityInt8 = 39,
+    CapabilityInputAttachment = 40,
+    CapabilitySparseResidency = 41,
+    CapabilityMinLod = 42,
+    CapabilitySampled1D = 43,
+    CapabilityImage1D = 44,
+    CapabilitySampledCubeArray = 45,
+    CapabilitySampledBuffer = 46,
+    CapabilityImageBuffer = 47,
+    CapabilityImageMSArray = 48,
+    CapabilityStorageImageExtendedFormats = 49,
+    CapabilityImageQuery = 50,
+    CapabilityDerivativeControl = 51,
+    CapabilityInterpolationFunction = 52,
+    CapabilityTransformFeedback = 53,
+    CapabilityGeometryStreams = 54,
+    CapabilityStorageImageReadWithoutFormat = 55,
+    CapabilityStorageImageWriteWithoutFormat = 56,
+    CapabilityMultiViewport = 57,
+    CapabilitySubgroupBallotKHR = 4423,
+    CapabilityDrawParameters = 4427,
+    CapabilityMax = 0x7fffffff,
+};
+
+enum Op {
+    OpNop = 0,
+    OpUndef = 1,
+    OpSourceContinued = 2,
+    OpSource = 3,
+    OpSourceExtension = 4,
+    OpName = 5,
+    OpMemberName = 6,
+    OpString = 7,
+    OpLine = 8,
+    OpExtension = 10,
+    OpExtInstImport = 11,
+    OpExtInst = 12,
+    OpMemoryModel = 14,
+    OpEntryPoint = 15,
+    OpExecutionMode = 16,
+    OpCapability = 17,
+    OpTypeVoid = 19,
+    OpTypeBool = 20,
+    OpTypeInt = 21,
+    OpTypeFloat = 22,
+    OpTypeVector = 23,
+    OpTypeMatrix = 24,
+    OpTypeImage = 25,
+    OpTypeSampler = 26,
+    OpTypeSampledImage = 27,
+    OpTypeArray = 28,
+    OpTypeRuntimeArray = 29,
+    OpTypeStruct = 30,
+    OpTypeOpaque = 31,
+    OpTypePointer = 32,
+    OpTypeFunction = 33,
+    OpTypeEvent = 34,
+    OpTypeDeviceEvent = 35,
+    OpTypeReserveId = 36,
+    OpTypeQueue = 37,
+    OpTypePipe = 38,
+    OpTypeForwardPointer = 39,
+    OpConstantTrue = 41,
+    OpConstantFalse = 42,
+    OpConstant = 43,
+    OpConstantComposite = 44,
+    OpConstantSampler = 45,
+    OpConstantNull = 46,
+    OpSpecConstantTrue = 48,
+    OpSpecConstantFalse = 49,
+    OpSpecConstant = 50,
+    OpSpecConstantComposite = 51,
+    OpSpecConstantOp = 52,
+    OpFunction = 54,
+    OpFunctionParameter = 55,
+    OpFunctionEnd = 56,
+    OpFunctionCall = 57,
+    OpVariable = 59,
+    OpImageTexelPointer = 60,
+    OpLoad = 61,
+    OpStore = 62,
+    OpCopyMemory = 63,
+    OpCopyMemorySized = 64,
+    OpAccessChain = 65,
+    OpInBoundsAccessChain = 66,
+    OpPtrAccessChain = 67,
+    OpArrayLength = 68,
+    OpGenericPtrMemSemantics = 69,
+    OpInBoundsPtrAccessChain = 70,
+    OpDecorate = 71,
+    OpMemberDecorate = 72,
+    OpDecorationGroup = 73,
+    OpGroupDecorate = 74,
+    OpGroupMemberDecorate = 75,
+    OpVectorExtractDynamic = 77,
+    OpVectorInsertDynamic = 78,
+    OpVectorShuffle = 79,
+    OpCompositeConstruct = 80,
+    OpCompositeExtract = 81,
+    OpCompositeInsert = 82,
+    OpCopyObject = 83,
+    OpTranspose = 84,
+    OpSampledImage = 86,
+    OpImageSampleImplicitLod = 87,
+    OpImageSampleExplicitLod = 88,
+    OpImageSampleDrefImplicitLod = 89,
+    OpImageSampleDrefExplicitLod = 90,
+    OpImageSampleProjImplicitLod = 91,
+    OpImageSampleProjExplicitLod = 92,
+    OpImageSampleProjDrefImplicitLod = 93,
+    OpImageSampleProjDrefExplicitLod = 94,
+    OpImageFetch = 95,
+    OpImageGather = 96,
+    OpImageDrefGather = 97,
+    OpImageRead = 98,
+    OpImageWrite = 99,
+    OpImage = 100,
+    OpImageQueryFormat = 101,
+    OpImageQueryOrder = 102,
+    OpImageQuerySizeLod = 103,
+    OpImageQuerySize = 104,
+    OpImageQueryLod = 105,
+    OpImageQueryLevels = 106,
+    OpImageQuerySamples = 107,
+    OpConvertFToU = 109,
+    OpConvertFToS = 110,
+    OpConvertSToF = 111,
+    OpConvertUToF = 112,
+    OpUConvert = 113,
+    OpSConvert = 114,
+    OpFConvert = 115,
+    OpQuantizeToF16 = 116,
+    OpConvertPtrToU = 117,
+    OpSatConvertSToU = 118,
+    OpSatConvertUToS = 119,
+    OpConvertUToPtr = 120,
+    OpPtrCastToGeneric = 121,
+    OpGenericCastToPtr = 122,
+    OpGenericCastToPtrExplicit = 123,
+    OpBitcast = 124,
+    OpSNegate = 126,
+    OpFNegate = 127,
+    OpIAdd = 128,
+    OpFAdd = 129,
+    OpISub = 130,
+    OpFSub = 131,
+    OpIMul = 132,
+    OpFMul = 133,
+    OpUDiv = 134,
+    OpSDiv = 135,
+    OpFDiv = 136,
+    OpUMod = 137,
+    OpSRem = 138,
+    OpSMod = 139,
+    OpFRem = 140,
+    OpFMod = 141,
+    OpVectorTimesScalar = 142,
+    OpMatrixTimesScalar = 143,
+    OpVectorTimesMatrix = 144,
+    OpMatrixTimesVector = 145,
+    OpMatrixTimesMatrix = 146,
+    OpOuterProduct = 147,
+    OpDot = 148,
+    OpIAddCarry = 149,
+    OpISubBorrow = 150,
+    OpUMulExtended = 151,
+    OpSMulExtended = 152,
+    OpAny = 154,
+    OpAll = 155,
+    OpIsNan = 156,
+    OpIsInf = 157,
+    OpIsFinite = 158,
+    OpIsNormal = 159,
+    OpSignBitSet = 160,
+    OpLessOrGreater = 161,
+    OpOrdered = 162,
+    OpUnordered = 163,
+    OpLogicalEqual = 164,
+    OpLogicalNotEqual = 165,
+    OpLogicalOr = 166,
+    OpLogicalAnd = 167,
+    OpLogicalNot = 168,
+    OpSelect = 169,
+    OpIEqual = 170,
+    OpINotEqual = 171,
+    OpUGreaterThan = 172,
+    OpSGreaterThan = 173,
+    OpUGreaterThanEqual = 174,
+    OpSGreaterThanEqual = 175,
+    OpULessThan = 176,
+    OpSLessThan = 177,
+    OpULessThanEqual = 178,
+    OpSLessThanEqual = 179,
+    OpFOrdEqual = 180,
+    OpFUnordEqual = 181,
+    OpFOrdNotEqual = 182,
+    OpFUnordNotEqual = 183,
+    OpFOrdLessThan = 184,
+    OpFUnordLessThan = 185,
+    OpFOrdGreaterThan = 186,
+    OpFUnordGreaterThan = 187,
+    OpFOrdLessThanEqual = 188,
+    OpFUnordLessThanEqual = 189,
+    OpFOrdGreaterThanEqual = 190,
+    OpFUnordGreaterThanEqual = 191,
+    OpShiftRightLogical = 194,
+    OpShiftRightArithmetic = 195,
+    OpShiftLeftLogical = 196,
+    OpBitwiseOr = 197,
+    OpBitwiseXor = 198,
+    OpBitwiseAnd = 199,
+    OpNot = 200,
+    OpBitFieldInsert = 201,
+    OpBitFieldSExtract = 202,
+    OpBitFieldUExtract = 203,
+    OpBitReverse = 204,
+    OpBitCount = 205,
+    OpDPdx = 207,
+    OpDPdy = 208,
+    OpFwidth = 209,
+    OpDPdxFine = 210,
+    OpDPdyFine = 211,
+    OpFwidthFine = 212,
+    OpDPdxCoarse = 213,
+    OpDPdyCoarse = 214,
+    OpFwidthCoarse = 215,
+    OpEmitVertex = 218,
+    OpEndPrimitive = 219,
+    OpEmitStreamVertex = 220,
+    OpEndStreamPrimitive = 221,
+    OpControlBarrier = 224,
+    OpMemoryBarrier = 225,
+    OpAtomicLoad = 227,
+    OpAtomicStore = 228,
+    OpAtomicExchange = 229,
+    OpAtomicCompareExchange = 230,
+    OpAtomicCompareExchangeWeak = 231,
+    OpAtomicIIncrement = 232,
+    OpAtomicIDecrement = 233,
+    OpAtomicIAdd = 234,
+    OpAtomicISub = 235,
+    OpAtomicSMin = 236,
+    OpAtomicUMin = 237,
+    OpAtomicSMax = 238,
+    OpAtomicUMax = 239,
+    OpAtomicAnd = 240,
+    OpAtomicOr = 241,
+    OpAtomicXor = 242,
+    OpPhi = 245,
+    OpLoopMerge = 246,
+    OpSelectionMerge = 247,
+    OpLabel = 248,
+    OpBranch = 249,
+    OpBranchConditional = 250,
+    OpSwitch = 251,
+    OpKill = 252,
+    OpReturn = 253,
+    OpReturnValue = 254,
+    OpUnreachable = 255,
+    OpLifetimeStart = 256,
+    OpLifetimeStop = 257,
+    OpGroupAsyncCopy = 259,
+    OpGroupWaitEvents = 260,
+    OpGroupAll = 261,
+    OpGroupAny = 262,
+    OpGroupBroadcast = 263,
+    OpGroupIAdd = 264,
+    OpGroupFAdd = 265,
+    OpGroupFMin = 266,
+    OpGroupUMin = 267,
+    OpGroupSMin = 268,
+    OpGroupFMax = 269,
+    OpGroupUMax = 270,
+    OpGroupSMax = 271,
+    OpReadPipe = 274,
+    OpWritePipe = 275,
+    OpReservedReadPipe = 276,
+    OpReservedWritePipe = 277,
+    OpReserveReadPipePackets = 278,
+    OpReserveWritePipePackets = 279,
+    OpCommitReadPipe = 280,
+    OpCommitWritePipe = 281,
+    OpIsValidReserveId = 282,
+    OpGetNumPipePackets = 283,
+    OpGetMaxPipePackets = 284,
+    OpGroupReserveReadPipePackets = 285,
+    OpGroupReserveWritePipePackets = 286,
+    OpGroupCommitReadPipe = 287,
+    OpGroupCommitWritePipe = 288,
+    OpEnqueueMarker = 291,
+    OpEnqueueKernel = 292,
+    OpGetKernelNDrangeSubGroupCount = 293,
+    OpGetKernelNDrangeMaxSubGroupSize = 294,
+    OpGetKernelWorkGroupSize = 295,
+    OpGetKernelPreferredWorkGroupSizeMultiple = 296,
+    OpRetainEvent = 297,
+    OpReleaseEvent = 298,
+    OpCreateUserEvent = 299,
+    OpIsValidEvent = 300,
+    OpSetUserEventStatus = 301,
+    OpCaptureEventProfilingInfo = 302,
+    OpGetDefaultQueue = 303,
+    OpBuildNDRange = 304,
+    OpImageSparseSampleImplicitLod = 305,
+    OpImageSparseSampleExplicitLod = 306,
+    OpImageSparseSampleDrefImplicitLod = 307,
+    OpImageSparseSampleDrefExplicitLod = 308,
+    OpImageSparseSampleProjImplicitLod = 309,
+    OpImageSparseSampleProjExplicitLod = 310,
+    OpImageSparseSampleProjDrefImplicitLod = 311,
+    OpImageSparseSampleProjDrefExplicitLod = 312,
+    OpImageSparseFetch = 313,
+    OpImageSparseGather = 314,
+    OpImageSparseDrefGather = 315,
+    OpImageSparseTexelsResident = 316,
+    OpNoLine = 317,
+    OpAtomicFlagTestAndSet = 318,
+    OpAtomicFlagClear = 319,
+    OpImageSparseRead = 320,
+    OpSubgroupBallotKHR = 4421,
+    OpSubgroupFirstInvocationKHR = 4422,
+    OpMax = 0x7fffffff,
+};
+
+// Overload operator| for mask bit combining
+
+inline ImageOperandsMask operator|(ImageOperandsMask a, ImageOperandsMask b) { return ImageOperandsMask(unsigned(a) | unsigned(b)); }
+inline FPFastMathModeMask operator|(FPFastMathModeMask a, FPFastMathModeMask b) { return FPFastMathModeMask(unsigned(a) | unsigned(b)); }
+inline SelectionControlMask operator|(SelectionControlMask a, SelectionControlMask b) { return SelectionControlMask(unsigned(a) | unsigned(b)); }
+inline LoopControlMask operator|(LoopControlMask a, LoopControlMask b) { return LoopControlMask(unsigned(a) | unsigned(b)); }
+inline FunctionControlMask operator|(FunctionControlMask a, FunctionControlMask b) { return FunctionControlMask(unsigned(a) | unsigned(b)); }
+inline MemorySemanticsMask operator|(MemorySemanticsMask a, MemorySemanticsMask b) { return MemorySemanticsMask(unsigned(a) | unsigned(b)); }
+inline MemoryAccessMask operator|(MemoryAccessMask a, MemoryAccessMask b) { return MemoryAccessMask(unsigned(a) | unsigned(b)); }
+inline KernelProfilingInfoMask operator|(KernelProfilingInfoMask a, KernelProfilingInfoMask b) { return KernelProfilingInfoMask(unsigned(a) | unsigned(b)); }
+
+}  // end namespace spv
+
+#endif  // #ifndef spirv_HPP
+

+ 400 - 0
3rdparty/glslang/SPIRV/spvIR.h

@@ -0,0 +1,400 @@
+//
+//Copyright (C) 2014 LunarG, Inc.
+//
+//All rights reserved.
+//
+//Redistribution and use in source and binary forms, with or without
+//modification, are permitted provided that the following conditions
+//are met:
+//
+//    Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+//
+//    Redistributions in binary form must reproduce the above
+//    copyright notice, this list of conditions and the following
+//    disclaimer in the documentation and/or other materials provided
+//    with the distribution.
+//
+//    Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+//    contributors may be used to endorse or promote products derived
+//    from this software without specific prior written permission.
+//
+//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+//"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+//LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+//FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+//COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+//INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+//BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+//CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+//LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+//ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+//POSSIBILITY OF SUCH DAMAGE.
+
+// SPIRV-IR
+//
+// Simple in-memory representation (IR) of SPIRV.  Just for holding
+// Each function's CFG of blocks.  Has this hierarchy:
+//  - Module, which is a list of 
+//    - Function, which is a list of 
+//      - Block, which is a list of 
+//        - Instruction
+//
+
+#pragma once
+#ifndef spvIR_H
+#define spvIR_H
+
+#include "spirv.hpp"
+
+#include <algorithm>
+#include <cassert>
+#include <functional>
+#include <iostream>
+#include <memory>
+#include <vector>
+
+namespace spv {
+
+class Block;
+class Function;
+class Module;
+
+const Id NoResult = 0;
+const Id NoType = 0;
+
+const Decoration NoPrecision = DecorationMax;
+const MemorySemanticsMask MemorySemanticsAllMemory = 
+                (MemorySemanticsMask)(MemorySemanticsSequentiallyConsistentMask |
+                                      MemorySemanticsUniformMemoryMask |
+                                      MemorySemanticsSubgroupMemoryMask |
+                                      MemorySemanticsWorkgroupMemoryMask |
+                                      MemorySemanticsCrossWorkgroupMemoryMask |
+                                      MemorySemanticsAtomicCounterMemoryMask |
+                                      MemorySemanticsImageMemoryMask);
+
+//
+// SPIR-V IR instruction.
+//
+
+class Instruction {
+public:
+    Instruction(Id resultId, Id typeId, Op opCode) : resultId(resultId), typeId(typeId), opCode(opCode), block(nullptr) { }
+    explicit Instruction(Op opCode) : resultId(NoResult), typeId(NoType), opCode(opCode), block(nullptr) { }
+    virtual ~Instruction() {}
+    void addIdOperand(Id id) { operands.push_back(id); }
+    void addImmediateOperand(unsigned int immediate) { operands.push_back(immediate); }
+    void addStringOperand(const char* str)
+    {
+        originalString = str;
+        unsigned int word;
+        char* wordString = (char*)&word;
+        char* wordPtr = wordString;
+        int charCount = 0;
+        char c;
+        do {
+            c = *(str++);
+            *(wordPtr++) = c;
+            ++charCount;
+            if (charCount == 4) {
+                addImmediateOperand(word);
+                wordPtr = wordString;
+                charCount = 0;
+            }
+        } while (c != 0);
+
+        // deal with partial last word
+        if (charCount > 0) {
+            // pad with 0s
+            for (; charCount < 4; ++charCount)
+                *(wordPtr++) = 0;
+            addImmediateOperand(word);
+        }
+    }
+    void setBlock(Block* b) { block = b; }
+    Block* getBlock() const { return block; }
+    Op getOpCode() const { return opCode; }
+    int getNumOperands() const { return (int)operands.size(); }
+    Id getResultId() const { return resultId; }
+    Id getTypeId() const { return typeId; }
+    Id getIdOperand(int op) const { return operands[op]; }
+    unsigned int getImmediateOperand(int op) const { return operands[op]; }
+    const char* getStringOperand() const { return originalString.c_str(); }
+
+    // Write out the binary form.
+    void dump(std::vector<unsigned int>& out) const
+    {
+        // Compute the wordCount
+        unsigned int wordCount = 1;
+        if (typeId)
+            ++wordCount;
+        if (resultId)
+            ++wordCount;
+        wordCount += (unsigned int)operands.size();
+
+        // Write out the beginning of the instruction
+        out.push_back(((wordCount) << WordCountShift) | opCode);
+        if (typeId)
+            out.push_back(typeId);
+        if (resultId)
+            out.push_back(resultId);
+
+        // Write out the operands
+        for (int op = 0; op < (int)operands.size(); ++op)
+            out.push_back(operands[op]);
+    }
+
+protected:
+    Instruction(const Instruction&);
+    Id resultId;
+    Id typeId;
+    Op opCode;
+    std::vector<Id> operands;
+    std::string originalString;        // could be optimized away; convenience for getting string operand
+    Block* block;
+};
+
+//
+// SPIR-V IR block.
+//
+
+class Block {
+public:
+    Block(Id id, Function& parent);
+    virtual ~Block()
+    {
+    }
+
+    Id getId() { return instructions.front()->getResultId(); }
+
+    Function& getParent() const { return parent; }
+    void addInstruction(std::unique_ptr<Instruction> inst);
+    void addPredecessor(Block* pred) { predecessors.push_back(pred); pred->successors.push_back(this);}
+    void addLocalVariable(std::unique_ptr<Instruction> inst) { localVariables.push_back(std::move(inst)); }
+    const std::vector<Block*>& getPredecessors() const { return predecessors; }
+    const std::vector<Block*>& getSuccessors() const { return successors; }
+    const std::vector<std::unique_ptr<Instruction> >& getInstructions() const {
+        return instructions;
+    }
+    void setUnreachable() { unreachable = true; }
+    bool isUnreachable() const { return unreachable; }
+    // Returns the block's merge instruction, if one exists (otherwise null).
+    const Instruction* getMergeInstruction() const {
+        if (instructions.size() < 2) return nullptr;
+        const Instruction* nextToLast = (instructions.cend() - 2)->get();
+        switch (nextToLast->getOpCode()) {
+            case OpSelectionMerge:
+            case OpLoopMerge:
+                return nextToLast;
+            default:
+                return nullptr;
+        }
+        return nullptr;
+    }
+
+    bool isTerminated() const
+    {
+        switch (instructions.back()->getOpCode()) {
+        case OpBranch:
+        case OpBranchConditional:
+        case OpSwitch:
+        case OpKill:
+        case OpReturn:
+        case OpReturnValue:
+            return true;
+        default:
+            return false;
+        }
+    }
+
+    void dump(std::vector<unsigned int>& out) const
+    {
+        instructions[0]->dump(out);
+        for (int i = 0; i < (int)localVariables.size(); ++i)
+            localVariables[i]->dump(out);
+        for (int i = 1; i < (int)instructions.size(); ++i)
+            instructions[i]->dump(out);
+    }
+
+protected:
+    Block(const Block&);
+    Block& operator=(Block&);
+
+    // To enforce keeping parent and ownership in sync:
+    friend Function;
+
+    std::vector<std::unique_ptr<Instruction> > instructions;
+    std::vector<Block*> predecessors, successors;
+    std::vector<std::unique_ptr<Instruction> > localVariables;
+    Function& parent;
+
+    // track whether this block is known to be uncreachable (not necessarily 
+    // true for all unreachable blocks, but should be set at least
+    // for the extraneous ones introduced by the builder).
+    bool unreachable;
+};
+
+// Traverses the control-flow graph rooted at root in an order suited for
+// readable code generation.  Invokes callback at every node in the traversal
+// order.
+void inReadableOrder(Block* root, std::function<void(Block*)> callback);
+
+//
+// SPIR-V IR Function.
+//
+
+class Function {
+public:
+    Function(Id id, Id resultType, Id functionType, Id firstParam, Module& parent);
+    virtual ~Function()
+    {
+        for (int i = 0; i < (int)parameterInstructions.size(); ++i)
+            delete parameterInstructions[i];
+
+        for (int i = 0; i < (int)blocks.size(); ++i)
+            delete blocks[i];
+    }
+    Id getId() const { return functionInstruction.getResultId(); }
+    Id getParamId(int p) { return parameterInstructions[p]->getResultId(); }
+
+    void addBlock(Block* block) { blocks.push_back(block); }
+    void removeBlock(Block* block)
+    {
+        auto found = find(blocks.begin(), blocks.end(), block);
+        assert(found != blocks.end());
+        blocks.erase(found);
+        delete block;
+    }
+
+    Module& getParent() const { return parent; }
+    Block* getEntryBlock() const { return blocks.front(); }
+    Block* getLastBlock() const { return blocks.back(); }
+    const std::vector<Block*>& getBlocks() const { return blocks; }
+    void addLocalVariable(std::unique_ptr<Instruction> inst);
+    Id getReturnType() const { return functionInstruction.getTypeId(); }
+    void dump(std::vector<unsigned int>& out) const
+    {
+        // OpFunction
+        functionInstruction.dump(out);
+
+        // OpFunctionParameter
+        for (int p = 0; p < (int)parameterInstructions.size(); ++p)
+            parameterInstructions[p]->dump(out);
+
+        // Blocks
+        inReadableOrder(blocks[0], [&out](const Block* b) { b->dump(out); });
+        Instruction end(0, 0, OpFunctionEnd);
+        end.dump(out);
+    }
+
+protected:
+    Function(const Function&);
+    Function& operator=(Function&);
+
+    Module& parent;
+    Instruction functionInstruction;
+    std::vector<Instruction*> parameterInstructions;
+    std::vector<Block*> blocks;
+};
+
+//
+// SPIR-V IR Module.
+//
+
+class Module {
+public:
+    Module() {}
+    virtual ~Module()
+    {
+        // TODO delete things
+    }
+
+    void addFunction(Function *fun) { functions.push_back(fun); }
+
+    void mapInstruction(Instruction *instruction)
+    {
+        spv::Id resultId = instruction->getResultId();
+        // map the instruction's result id
+        if (resultId >= idToInstruction.size())
+            idToInstruction.resize(resultId + 16);
+        idToInstruction[resultId] = instruction;
+    }
+
+    Instruction* getInstruction(Id id) const { return idToInstruction[id]; }
+    const std::vector<Function*>& getFunctions() const { return functions; }
+    spv::Id getTypeId(Id resultId) const { return idToInstruction[resultId]->getTypeId(); }
+    StorageClass getStorageClass(Id typeId) const
+    {
+        assert(idToInstruction[typeId]->getOpCode() == spv::OpTypePointer);
+        return (StorageClass)idToInstruction[typeId]->getImmediateOperand(0);
+    }
+
+    void dump(std::vector<unsigned int>& out) const
+    {
+        for (int f = 0; f < (int)functions.size(); ++f)
+            functions[f]->dump(out);
+    }
+
+protected:
+    Module(const Module&);
+    std::vector<Function*> functions;
+
+    // map from result id to instruction having that result id
+    std::vector<Instruction*> idToInstruction;
+
+    // map from a result id to its type id
+};
+
+//
+// Implementation (it's here due to circular type definitions).
+//
+
+// Add both
+// - the OpFunction instruction
+// - all the OpFunctionParameter instructions
+__inline Function::Function(Id id, Id resultType, Id functionType, Id firstParamId, Module& parent)
+    : parent(parent), functionInstruction(id, resultType, OpFunction)
+{
+    // OpFunction
+    functionInstruction.addImmediateOperand(FunctionControlMaskNone);
+    functionInstruction.addIdOperand(functionType);
+    parent.mapInstruction(&functionInstruction);
+    parent.addFunction(this);
+
+    // OpFunctionParameter
+    Instruction* typeInst = parent.getInstruction(functionType);
+    int numParams = typeInst->getNumOperands() - 1;
+    for (int p = 0; p < numParams; ++p) {
+        Instruction* param = new Instruction(firstParamId + p, typeInst->getIdOperand(p + 1), OpFunctionParameter);
+        parent.mapInstruction(param);
+        parameterInstructions.push_back(param);
+    }
+}
+
+__inline void Function::addLocalVariable(std::unique_ptr<Instruction> inst)
+{
+    Instruction* raw_instruction = inst.get();
+    blocks[0]->addLocalVariable(std::move(inst));
+    parent.mapInstruction(raw_instruction);
+}
+
+__inline Block::Block(Id id, Function& parent) : parent(parent), unreachable(false)
+{
+    instructions.push_back(std::unique_ptr<Instruction>(new Instruction(id, NoType, OpLabel)));
+    instructions.back()->setBlock(this);
+    parent.getParent().mapInstruction(instructions.back().get());
+}
+
+__inline void Block::addInstruction(std::unique_ptr<Instruction> inst)
+{
+    Instruction* raw_instruction = inst.get();
+    instructions.push_back(std::move(inst));
+    raw_instruction->setBlock(this);
+    if (raw_instruction->getResultId())
+        parent.getParent().mapInstruction(raw_instruction);
+}
+
+};  // end spv namespace
+
+#endif // spvIR_H

+ 49 - 0
3rdparty/glslang/StandAlone/CMakeLists.txt

@@ -0,0 +1,49 @@
+add_library(glslang-default-resource-limits
+    ${CMAKE_CURRENT_SOURCE_DIR}/ResourceLimits.cpp
+)
+set_property(TARGET glslang-default-resource-limits PROPERTY FOLDER glslang)
+
+target_include_directories(glslang-default-resource-limits
+    PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}
+    PUBLIC ${PROJECT_SOURCE_DIR}
+)
+
+set(SOURCES StandAlone.cpp)
+set(REMAPPER_SOURCES spirv-remap.cpp)
+
+add_executable(glslangValidator ${SOURCES})
+add_executable(spirv-remap ${REMAPPER_SOURCES})
+set_property(TARGET glslangValidator PROPERTY FOLDER tools)
+set_property(TARGET spirv-remap PROPERTY FOLDER tools)
+glslang_set_link_args(glslangValidator)
+glslang_set_link_args(spirv-remap)
+
+set(LIBRARIES
+    glslang
+    OGLCompiler
+    OSDependent
+    HLSL
+    SPIRV
+    SPVRemapper
+    glslang-default-resource-limits)
+
+if(WIN32)
+    set(LIBRARIES ${LIBRARIES} psapi)
+elseif(UNIX)
+    if(NOT ANDROID)
+        set(LIBRARIES ${LIBRARIES} pthread)
+    endif()
+endif(WIN32)
+
+target_link_libraries(glslangValidator ${LIBRARIES})
+target_link_libraries(spirv-remap ${LIBRARIES})
+
+if(WIN32)
+    source_group("Source" FILES ${SOURCES})
+endif(WIN32)
+
+install(TARGETS glslangValidator
+        RUNTIME DESTINATION bin)
+
+install(TARGETS spirv-remap
+        RUNTIME DESTINATION bin)

+ 458 - 0
3rdparty/glslang/StandAlone/ResourceLimits.cpp

@@ -0,0 +1,458 @@
+//
+// Copyright (C) 2016 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+//    Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+//
+//    Redistributions in binary form must reproduce the above
+//    copyright notice, this list of conditions and the following
+//    disclaimer in the documentation and/or other materials provided
+//    with the distribution.
+//
+//    Neither the name of Google Inc. nor the names of its
+//    contributors may be used to endorse or promote products derived
+//    from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
+#include <cstdlib>
+#include <cstring>
+#include <sstream>
+#include <cctype>
+
+#include "ResourceLimits.h"
+
+namespace glslang {
+
+const TBuiltInResource DefaultTBuiltInResource = {
+    /* .MaxLights = */ 32,
+    /* .MaxClipPlanes = */ 6,
+    /* .MaxTextureUnits = */ 32,
+    /* .MaxTextureCoords = */ 32,
+    /* .MaxVertexAttribs = */ 64,
+    /* .MaxVertexUniformComponents = */ 4096,
+    /* .MaxVaryingFloats = */ 64,
+    /* .MaxVertexTextureImageUnits = */ 32,
+    /* .MaxCombinedTextureImageUnits = */ 80,
+    /* .MaxTextureImageUnits = */ 32,
+    /* .MaxFragmentUniformComponents = */ 4096,
+    /* .MaxDrawBuffers = */ 32,
+    /* .MaxVertexUniformVectors = */ 128,
+    /* .MaxVaryingVectors = */ 8,
+    /* .MaxFragmentUniformVectors = */ 16,
+    /* .MaxVertexOutputVectors = */ 16,
+    /* .MaxFragmentInputVectors = */ 15,
+    /* .MinProgramTexelOffset = */ -8,
+    /* .MaxProgramTexelOffset = */ 7,
+    /* .MaxClipDistances = */ 8,
+    /* .MaxComputeWorkGroupCountX = */ 65535,
+    /* .MaxComputeWorkGroupCountY = */ 65535,
+    /* .MaxComputeWorkGroupCountZ = */ 65535,
+    /* .MaxComputeWorkGroupSizeX = */ 1024,
+    /* .MaxComputeWorkGroupSizeY = */ 1024,
+    /* .MaxComputeWorkGroupSizeZ = */ 64,
+    /* .MaxComputeUniformComponents = */ 1024,
+    /* .MaxComputeTextureImageUnits = */ 16,
+    /* .MaxComputeImageUniforms = */ 8,
+    /* .MaxComputeAtomicCounters = */ 8,
+    /* .MaxComputeAtomicCounterBuffers = */ 1,
+    /* .MaxVaryingComponents = */ 60,
+    /* .MaxVertexOutputComponents = */ 64,
+    /* .MaxGeometryInputComponents = */ 64,
+    /* .MaxGeometryOutputComponents = */ 128,
+    /* .MaxFragmentInputComponents = */ 128,
+    /* .MaxImageUnits = */ 8,
+    /* .MaxCombinedImageUnitsAndFragmentOutputs = */ 8,
+    /* .MaxCombinedShaderOutputResources = */ 8,
+    /* .MaxImageSamples = */ 0,
+    /* .MaxVertexImageUniforms = */ 0,
+    /* .MaxTessControlImageUniforms = */ 0,
+    /* .MaxTessEvaluationImageUniforms = */ 0,
+    /* .MaxGeometryImageUniforms = */ 0,
+    /* .MaxFragmentImageUniforms = */ 8,
+    /* .MaxCombinedImageUniforms = */ 8,
+    /* .MaxGeometryTextureImageUnits = */ 16,
+    /* .MaxGeometryOutputVertices = */ 256,
+    /* .MaxGeometryTotalOutputComponents = */ 1024,
+    /* .MaxGeometryUniformComponents = */ 1024,
+    /* .MaxGeometryVaryingComponents = */ 64,
+    /* .MaxTessControlInputComponents = */ 128,
+    /* .MaxTessControlOutputComponents = */ 128,
+    /* .MaxTessControlTextureImageUnits = */ 16,
+    /* .MaxTessControlUniformComponents = */ 1024,
+    /* .MaxTessControlTotalOutputComponents = */ 4096,
+    /* .MaxTessEvaluationInputComponents = */ 128,
+    /* .MaxTessEvaluationOutputComponents = */ 128,
+    /* .MaxTessEvaluationTextureImageUnits = */ 16,
+    /* .MaxTessEvaluationUniformComponents = */ 1024,
+    /* .MaxTessPatchComponents = */ 120,
+    /* .MaxPatchVertices = */ 32,
+    /* .MaxTessGenLevel = */ 64,
+    /* .MaxViewports = */ 16,
+    /* .MaxVertexAtomicCounters = */ 0,
+    /* .MaxTessControlAtomicCounters = */ 0,
+    /* .MaxTessEvaluationAtomicCounters = */ 0,
+    /* .MaxGeometryAtomicCounters = */ 0,
+    /* .MaxFragmentAtomicCounters = */ 8,
+    /* .MaxCombinedAtomicCounters = */ 8,
+    /* .MaxAtomicCounterBindings = */ 1,
+    /* .MaxVertexAtomicCounterBuffers = */ 0,
+    /* .MaxTessControlAtomicCounterBuffers = */ 0,
+    /* .MaxTessEvaluationAtomicCounterBuffers = */ 0,
+    /* .MaxGeometryAtomicCounterBuffers = */ 0,
+    /* .MaxFragmentAtomicCounterBuffers = */ 1,
+    /* .MaxCombinedAtomicCounterBuffers = */ 1,
+    /* .MaxAtomicCounterBufferSize = */ 16384,
+    /* .MaxTransformFeedbackBuffers = */ 4,
+    /* .MaxTransformFeedbackInterleavedComponents = */ 64,
+    /* .MaxCullDistances = */ 8,
+    /* .MaxCombinedClipAndCullDistances = */ 8,
+    /* .MaxSamples = */ 4,
+    /* .limits = */ {
+        /* .nonInductiveForLoops = */ 1,
+        /* .whileLoops = */ 1,
+        /* .doWhileLoops = */ 1,
+        /* .generalUniformIndexing = */ 1,
+        /* .generalAttributeMatrixVectorIndexing = */ 1,
+        /* .generalVaryingIndexing = */ 1,
+        /* .generalSamplerIndexing = */ 1,
+        /* .generalVariableIndexing = */ 1,
+        /* .generalConstantMatrixVectorIndexing = */ 1,
+    }};
+
+std::string GetDefaultTBuiltInResourceString()
+{
+    std::ostringstream ostream;
+
+    ostream << "MaxLights "                                 << DefaultTBuiltInResource.maxLights << "\n"
+            << "MaxClipPlanes "                             << DefaultTBuiltInResource.maxClipPlanes << "\n"
+            << "MaxTextureUnits "                           << DefaultTBuiltInResource.maxTextureUnits << "\n"
+            << "MaxTextureCoords "                          << DefaultTBuiltInResource.maxTextureCoords << "\n"
+            << "MaxVertexAttribs "                          << DefaultTBuiltInResource.maxVertexAttribs << "\n"
+            << "MaxVertexUniformComponents "                << DefaultTBuiltInResource.maxVertexUniformComponents << "\n"
+            << "MaxVaryingFloats "                          << DefaultTBuiltInResource.maxVaryingFloats << "\n"
+            << "MaxVertexTextureImageUnits "                << DefaultTBuiltInResource.maxVertexTextureImageUnits << "\n"
+            << "MaxCombinedTextureImageUnits "              << DefaultTBuiltInResource.maxCombinedTextureImageUnits << "\n"
+            << "MaxTextureImageUnits "                      << DefaultTBuiltInResource.maxTextureImageUnits << "\n"
+            << "MaxFragmentUniformComponents "              << DefaultTBuiltInResource.maxFragmentUniformComponents << "\n"
+            << "MaxDrawBuffers "                            << DefaultTBuiltInResource.maxDrawBuffers << "\n"
+            << "MaxVertexUniformVectors "                   << DefaultTBuiltInResource.maxVertexUniformVectors << "\n"
+            << "MaxVaryingVectors "                         << DefaultTBuiltInResource.maxVaryingVectors << "\n"
+            << "MaxFragmentUniformVectors "                 << DefaultTBuiltInResource.maxFragmentUniformVectors << "\n"
+            << "MaxVertexOutputVectors "                    << DefaultTBuiltInResource.maxVertexOutputVectors << "\n"
+            << "MaxFragmentInputVectors "                   << DefaultTBuiltInResource.maxFragmentInputVectors << "\n"
+            << "MinProgramTexelOffset "                     << DefaultTBuiltInResource.minProgramTexelOffset << "\n"
+            << "MaxProgramTexelOffset "                     << DefaultTBuiltInResource.maxProgramTexelOffset << "\n"
+            << "MaxClipDistances "                          << DefaultTBuiltInResource.maxClipDistances << "\n"
+            << "MaxComputeWorkGroupCountX "                 << DefaultTBuiltInResource.maxComputeWorkGroupCountX << "\n"
+            << "MaxComputeWorkGroupCountY "                 << DefaultTBuiltInResource.maxComputeWorkGroupCountY << "\n"
+            << "MaxComputeWorkGroupCountZ "                 << DefaultTBuiltInResource.maxComputeWorkGroupCountZ << "\n"
+            << "MaxComputeWorkGroupSizeX "                  << DefaultTBuiltInResource.maxComputeWorkGroupSizeX << "\n"
+            << "MaxComputeWorkGroupSizeY "                  << DefaultTBuiltInResource.maxComputeWorkGroupSizeY << "\n"
+            << "MaxComputeWorkGroupSizeZ "                  << DefaultTBuiltInResource.maxComputeWorkGroupSizeZ << "\n"
+            << "MaxComputeUniformComponents "               << DefaultTBuiltInResource.maxComputeUniformComponents << "\n"
+            << "MaxComputeTextureImageUnits "               << DefaultTBuiltInResource.maxComputeTextureImageUnits << "\n"
+            << "MaxComputeImageUniforms "                   << DefaultTBuiltInResource.maxComputeImageUniforms << "\n"
+            << "MaxComputeAtomicCounters "                  << DefaultTBuiltInResource.maxComputeAtomicCounters << "\n"
+            << "MaxComputeAtomicCounterBuffers "            << DefaultTBuiltInResource.maxComputeAtomicCounterBuffers << "\n"
+            << "MaxVaryingComponents "                      << DefaultTBuiltInResource.maxVaryingComponents << "\n"
+            << "MaxVertexOutputComponents "                 << DefaultTBuiltInResource.maxVertexOutputComponents << "\n"
+            << "MaxGeometryInputComponents "                << DefaultTBuiltInResource.maxGeometryInputComponents << "\n"
+            << "MaxGeometryOutputComponents "               << DefaultTBuiltInResource.maxGeometryOutputComponents << "\n"
+            << "MaxFragmentInputComponents "                << DefaultTBuiltInResource.maxFragmentInputComponents << "\n"
+            << "MaxImageUnits "                             << DefaultTBuiltInResource.maxImageUnits << "\n"
+            << "MaxCombinedImageUnitsAndFragmentOutputs "   << DefaultTBuiltInResource.maxCombinedImageUnitsAndFragmentOutputs << "\n"
+            << "MaxCombinedShaderOutputResources "          << DefaultTBuiltInResource.maxCombinedShaderOutputResources << "\n"
+            << "MaxImageSamples "                           << DefaultTBuiltInResource.maxImageSamples << "\n"
+            << "MaxVertexImageUniforms "                    << DefaultTBuiltInResource.maxVertexImageUniforms << "\n"
+            << "MaxTessControlImageUniforms "               << DefaultTBuiltInResource.maxTessControlImageUniforms << "\n"
+            << "MaxTessEvaluationImageUniforms "            << DefaultTBuiltInResource.maxTessEvaluationImageUniforms << "\n"
+            << "MaxGeometryImageUniforms "                  << DefaultTBuiltInResource.maxGeometryImageUniforms << "\n"
+            << "MaxFragmentImageUniforms "                  << DefaultTBuiltInResource.maxFragmentImageUniforms << "\n"
+            << "MaxCombinedImageUniforms "                  << DefaultTBuiltInResource.maxCombinedImageUniforms << "\n"
+            << "MaxGeometryTextureImageUnits "              << DefaultTBuiltInResource.maxGeometryTextureImageUnits << "\n"
+            << "MaxGeometryOutputVertices "                 << DefaultTBuiltInResource.maxGeometryOutputVertices << "\n"
+            << "MaxGeometryTotalOutputComponents "          << DefaultTBuiltInResource.maxGeometryTotalOutputComponents << "\n"
+            << "MaxGeometryUniformComponents "              << DefaultTBuiltInResource.maxGeometryUniformComponents << "\n"
+            << "MaxGeometryVaryingComponents "              << DefaultTBuiltInResource.maxGeometryVaryingComponents << "\n"
+            << "MaxTessControlInputComponents "             << DefaultTBuiltInResource.maxTessControlInputComponents << "\n"
+            << "MaxTessControlOutputComponents "            << DefaultTBuiltInResource.maxTessControlOutputComponents << "\n"
+            << "MaxTessControlTextureImageUnits "           << DefaultTBuiltInResource.maxTessControlTextureImageUnits << "\n"
+            << "MaxTessControlUniformComponents "           << DefaultTBuiltInResource.maxTessControlUniformComponents << "\n"
+            << "MaxTessControlTotalOutputComponents "       << DefaultTBuiltInResource.maxTessControlTotalOutputComponents << "\n"
+            << "MaxTessEvaluationInputComponents "          << DefaultTBuiltInResource.maxTessEvaluationInputComponents << "\n"
+            << "MaxTessEvaluationOutputComponents "         << DefaultTBuiltInResource.maxTessEvaluationOutputComponents << "\n"
+            << "MaxTessEvaluationTextureImageUnits "        << DefaultTBuiltInResource.maxTessEvaluationTextureImageUnits << "\n"
+            << "MaxTessEvaluationUniformComponents "        << DefaultTBuiltInResource.maxTessEvaluationUniformComponents << "\n"
+            << "MaxTessPatchComponents "                    << DefaultTBuiltInResource.maxTessPatchComponents << "\n"
+            << "MaxPatchVertices "                          << DefaultTBuiltInResource.maxPatchVertices << "\n"
+            << "MaxTessGenLevel "                           << DefaultTBuiltInResource.maxTessGenLevel << "\n"
+            << "MaxViewports "                              << DefaultTBuiltInResource.maxViewports << "\n"
+            << "MaxVertexAtomicCounters "                   << DefaultTBuiltInResource.maxVertexAtomicCounters << "\n"
+            << "MaxTessControlAtomicCounters "              << DefaultTBuiltInResource.maxTessControlAtomicCounters << "\n"
+            << "MaxTessEvaluationAtomicCounters "           << DefaultTBuiltInResource.maxTessEvaluationAtomicCounters << "\n"
+            << "MaxGeometryAtomicCounters "                 << DefaultTBuiltInResource.maxGeometryAtomicCounters << "\n"
+            << "MaxFragmentAtomicCounters "                 << DefaultTBuiltInResource.maxFragmentAtomicCounters << "\n"
+            << "MaxCombinedAtomicCounters "                 << DefaultTBuiltInResource.maxCombinedAtomicCounters << "\n"
+            << "MaxAtomicCounterBindings "                  << DefaultTBuiltInResource.maxAtomicCounterBindings << "\n"
+            << "MaxVertexAtomicCounterBuffers "             << DefaultTBuiltInResource.maxVertexAtomicCounterBuffers << "\n"
+            << "MaxTessControlAtomicCounterBuffers "        << DefaultTBuiltInResource.maxTessControlAtomicCounterBuffers << "\n"
+            << "MaxTessEvaluationAtomicCounterBuffers "     << DefaultTBuiltInResource.maxTessEvaluationAtomicCounterBuffers << "\n"
+            << "MaxGeometryAtomicCounterBuffers "           << DefaultTBuiltInResource.maxGeometryAtomicCounterBuffers << "\n"
+            << "MaxFragmentAtomicCounterBuffers "           << DefaultTBuiltInResource.maxFragmentAtomicCounterBuffers << "\n"
+            << "MaxCombinedAtomicCounterBuffers "           << DefaultTBuiltInResource.maxCombinedAtomicCounterBuffers << "\n"
+            << "MaxAtomicCounterBufferSize "                << DefaultTBuiltInResource.maxAtomicCounterBufferSize << "\n"
+            << "MaxTransformFeedbackBuffers "               << DefaultTBuiltInResource.maxTransformFeedbackBuffers << "\n"
+            << "MaxTransformFeedbackInterleavedComponents " << DefaultTBuiltInResource.maxTransformFeedbackInterleavedComponents << "\n"
+            << "MaxCullDistances "                          << DefaultTBuiltInResource.maxCullDistances << "\n"
+            << "MaxCombinedClipAndCullDistances "           << DefaultTBuiltInResource.maxCombinedClipAndCullDistances << "\n"
+            << "MaxSamples "                                << DefaultTBuiltInResource.maxSamples << "\n"
+
+            << "nonInductiveForLoops "                      << DefaultTBuiltInResource.limits.nonInductiveForLoops << "\n"
+            << "whileLoops "                                << DefaultTBuiltInResource.limits.whileLoops << "\n"
+            << "doWhileLoops "                              << DefaultTBuiltInResource.limits.doWhileLoops << "\n"
+            << "generalUniformIndexing "                    << DefaultTBuiltInResource.limits.generalUniformIndexing << "\n"
+            << "generalAttributeMatrixVectorIndexing "      << DefaultTBuiltInResource.limits.generalAttributeMatrixVectorIndexing << "\n"
+            << "generalVaryingIndexing "                    << DefaultTBuiltInResource.limits.generalVaryingIndexing << "\n"
+            << "generalSamplerIndexing "                    << DefaultTBuiltInResource.limits.generalSamplerIndexing << "\n"
+            << "generalVariableIndexing "                   << DefaultTBuiltInResource.limits.generalVariableIndexing << "\n"
+            << "generalConstantMatrixVectorIndexing "       << DefaultTBuiltInResource.limits.generalConstantMatrixVectorIndexing << "\n"
+      ;
+
+    return ostream.str();
+}
+
+void DecodeResourceLimits(TBuiltInResource* resources, char* config)
+{
+    static const char* delims = " \t\n\r";
+
+    size_t pos     = 0;
+    std::string configStr(config);
+
+    while ((pos = configStr.find_first_not_of(delims, pos)) != std::string::npos) {
+        const size_t token_s = pos;
+        const size_t token_e = configStr.find_first_of(delims, token_s);
+        const size_t value_s = configStr.find_first_not_of(delims, token_e);
+        const size_t value_e = configStr.find_first_of(delims, value_s);
+        pos = value_e;
+
+        // Faster to use compare(), but prefering readability.
+        const std::string tokenStr = configStr.substr(token_s, token_e-token_s);
+        const std::string valueStr = configStr.substr(value_s, value_e-value_s);
+
+        if (value_s == std::string::npos || ! (valueStr[0] == '-' || isdigit(valueStr[0]))) {
+            printf("Error: '%s' bad .conf file.  Each name must be followed by one number.\n",
+                   valueStr.c_str());
+            return;
+        }
+
+        const int value = std::atoi(valueStr.c_str());
+
+        if (tokenStr == "MaxLights")
+            resources->maxLights = value;
+        else if (tokenStr == "MaxClipPlanes")
+            resources->maxClipPlanes = value;
+        else if (tokenStr == "MaxTextureUnits")
+            resources->maxTextureUnits = value;
+        else if (tokenStr == "MaxTextureCoords")
+            resources->maxTextureCoords = value;
+        else if (tokenStr == "MaxVertexAttribs")
+            resources->maxVertexAttribs = value;
+        else if (tokenStr == "MaxVertexUniformComponents")
+            resources->maxVertexUniformComponents = value;
+        else if (tokenStr == "MaxVaryingFloats")
+            resources->maxVaryingFloats = value;
+        else if (tokenStr == "MaxVertexTextureImageUnits")
+            resources->maxVertexTextureImageUnits = value;
+        else if (tokenStr == "MaxCombinedTextureImageUnits")
+            resources->maxCombinedTextureImageUnits = value;
+        else if (tokenStr == "MaxTextureImageUnits")
+            resources->maxTextureImageUnits = value;
+        else if (tokenStr == "MaxFragmentUniformComponents")
+            resources->maxFragmentUniformComponents = value;
+        else if (tokenStr == "MaxDrawBuffers")
+            resources->maxDrawBuffers = value;
+        else if (tokenStr == "MaxVertexUniformVectors")
+            resources->maxVertexUniformVectors = value;
+        else if (tokenStr == "MaxVaryingVectors")
+            resources->maxVaryingVectors = value;
+        else if (tokenStr == "MaxFragmentUniformVectors")
+            resources->maxFragmentUniformVectors = value;
+        else if (tokenStr == "MaxVertexOutputVectors")
+            resources->maxVertexOutputVectors = value;
+        else if (tokenStr == "MaxFragmentInputVectors")
+            resources->maxFragmentInputVectors = value;
+        else if (tokenStr == "MinProgramTexelOffset")
+            resources->minProgramTexelOffset = value;
+        else if (tokenStr == "MaxProgramTexelOffset")
+            resources->maxProgramTexelOffset = value;
+        else if (tokenStr == "MaxClipDistances")
+            resources->maxClipDistances = value;
+        else if (tokenStr == "MaxComputeWorkGroupCountX")
+            resources->maxComputeWorkGroupCountX = value;
+        else if (tokenStr == "MaxComputeWorkGroupCountY")
+            resources->maxComputeWorkGroupCountY = value;
+        else if (tokenStr == "MaxComputeWorkGroupCountZ")
+            resources->maxComputeWorkGroupCountZ = value;
+        else if (tokenStr == "MaxComputeWorkGroupSizeX")
+            resources->maxComputeWorkGroupSizeX = value;
+        else if (tokenStr == "MaxComputeWorkGroupSizeY")
+            resources->maxComputeWorkGroupSizeY = value;
+        else if (tokenStr == "MaxComputeWorkGroupSizeZ")
+            resources->maxComputeWorkGroupSizeZ = value;
+        else if (tokenStr == "MaxComputeUniformComponents")
+            resources->maxComputeUniformComponents = value;
+        else if (tokenStr == "MaxComputeTextureImageUnits")
+            resources->maxComputeTextureImageUnits = value;
+        else if (tokenStr == "MaxComputeImageUniforms")
+            resources->maxComputeImageUniforms = value;
+        else if (tokenStr == "MaxComputeAtomicCounters")
+            resources->maxComputeAtomicCounters = value;
+        else if (tokenStr == "MaxComputeAtomicCounterBuffers")
+            resources->maxComputeAtomicCounterBuffers = value;
+        else if (tokenStr == "MaxVaryingComponents")
+            resources->maxVaryingComponents = value;
+        else if (tokenStr == "MaxVertexOutputComponents")
+            resources->maxVertexOutputComponents = value;
+        else if (tokenStr == "MaxGeometryInputComponents")
+            resources->maxGeometryInputComponents = value;
+        else if (tokenStr == "MaxGeometryOutputComponents")
+            resources->maxGeometryOutputComponents = value;
+        else if (tokenStr == "MaxFragmentInputComponents")
+            resources->maxFragmentInputComponents = value;
+        else if (tokenStr == "MaxImageUnits")
+            resources->maxImageUnits = value;
+        else if (tokenStr == "MaxCombinedImageUnitsAndFragmentOutputs")
+            resources->maxCombinedImageUnitsAndFragmentOutputs = value;
+        else if (tokenStr == "MaxCombinedShaderOutputResources")
+            resources->maxCombinedShaderOutputResources = value;
+        else if (tokenStr == "MaxImageSamples")
+            resources->maxImageSamples = value;
+        else if (tokenStr == "MaxVertexImageUniforms")
+            resources->maxVertexImageUniforms = value;
+        else if (tokenStr == "MaxTessControlImageUniforms")
+            resources->maxTessControlImageUniforms = value;
+        else if (tokenStr == "MaxTessEvaluationImageUniforms")
+            resources->maxTessEvaluationImageUniforms = value;
+        else if (tokenStr == "MaxGeometryImageUniforms")
+            resources->maxGeometryImageUniforms = value;
+        else if (tokenStr == "MaxFragmentImageUniforms")
+            resources->maxFragmentImageUniforms = value;
+        else if (tokenStr == "MaxCombinedImageUniforms")
+            resources->maxCombinedImageUniforms = value;
+        else if (tokenStr == "MaxGeometryTextureImageUnits")
+            resources->maxGeometryTextureImageUnits = value;
+        else if (tokenStr == "MaxGeometryOutputVertices")
+            resources->maxGeometryOutputVertices = value;
+        else if (tokenStr == "MaxGeometryTotalOutputComponents")
+            resources->maxGeometryTotalOutputComponents = value;
+        else if (tokenStr == "MaxGeometryUniformComponents")
+            resources->maxGeometryUniformComponents = value;
+        else if (tokenStr == "MaxGeometryVaryingComponents")
+            resources->maxGeometryVaryingComponents = value;
+        else if (tokenStr == "MaxTessControlInputComponents")
+            resources->maxTessControlInputComponents = value;
+        else if (tokenStr == "MaxTessControlOutputComponents")
+            resources->maxTessControlOutputComponents = value;
+        else if (tokenStr == "MaxTessControlTextureImageUnits")
+            resources->maxTessControlTextureImageUnits = value;
+        else if (tokenStr == "MaxTessControlUniformComponents")
+            resources->maxTessControlUniformComponents = value;
+        else if (tokenStr == "MaxTessControlTotalOutputComponents")
+            resources->maxTessControlTotalOutputComponents = value;
+        else if (tokenStr == "MaxTessEvaluationInputComponents")
+            resources->maxTessEvaluationInputComponents = value;
+        else if (tokenStr == "MaxTessEvaluationOutputComponents")
+            resources->maxTessEvaluationOutputComponents = value;
+        else if (tokenStr == "MaxTessEvaluationTextureImageUnits")
+            resources->maxTessEvaluationTextureImageUnits = value;
+        else if (tokenStr == "MaxTessEvaluationUniformComponents")
+            resources->maxTessEvaluationUniformComponents = value;
+        else if (tokenStr == "MaxTessPatchComponents")
+            resources->maxTessPatchComponents = value;
+        else if (tokenStr == "MaxPatchVertices")
+            resources->maxPatchVertices = value;
+        else if (tokenStr == "MaxTessGenLevel")
+            resources->maxTessGenLevel = value;
+        else if (tokenStr == "MaxViewports")
+            resources->maxViewports = value;
+        else if (tokenStr == "MaxVertexAtomicCounters")
+            resources->maxVertexAtomicCounters = value;
+        else if (tokenStr == "MaxTessControlAtomicCounters")
+            resources->maxTessControlAtomicCounters = value;
+        else if (tokenStr == "MaxTessEvaluationAtomicCounters")
+            resources->maxTessEvaluationAtomicCounters = value;
+        else if (tokenStr == "MaxGeometryAtomicCounters")
+            resources->maxGeometryAtomicCounters = value;
+        else if (tokenStr == "MaxFragmentAtomicCounters")
+            resources->maxFragmentAtomicCounters = value;
+        else if (tokenStr == "MaxCombinedAtomicCounters")
+            resources->maxCombinedAtomicCounters = value;
+        else if (tokenStr == "MaxAtomicCounterBindings")
+            resources->maxAtomicCounterBindings = value;
+        else if (tokenStr == "MaxVertexAtomicCounterBuffers")
+            resources->maxVertexAtomicCounterBuffers = value;
+        else if (tokenStr == "MaxTessControlAtomicCounterBuffers")
+            resources->maxTessControlAtomicCounterBuffers = value;
+        else if (tokenStr == "MaxTessEvaluationAtomicCounterBuffers")
+            resources->maxTessEvaluationAtomicCounterBuffers = value;
+        else if (tokenStr == "MaxGeometryAtomicCounterBuffers")
+            resources->maxGeometryAtomicCounterBuffers = value;
+        else if (tokenStr == "MaxFragmentAtomicCounterBuffers")
+            resources->maxFragmentAtomicCounterBuffers = value;
+        else if (tokenStr == "MaxCombinedAtomicCounterBuffers")
+            resources->maxCombinedAtomicCounterBuffers = value;
+        else if (tokenStr == "MaxAtomicCounterBufferSize")
+            resources->maxAtomicCounterBufferSize = value;
+        else if (tokenStr == "MaxTransformFeedbackBuffers")
+            resources->maxTransformFeedbackBuffers = value;
+        else if (tokenStr == "MaxTransformFeedbackInterleavedComponents")
+            resources->maxTransformFeedbackInterleavedComponents = value;
+        else if (tokenStr == "MaxCullDistances")
+            resources->maxCullDistances = value;
+        else if (tokenStr == "MaxCombinedClipAndCullDistances")
+            resources->maxCombinedClipAndCullDistances = value;
+        else if (tokenStr == "MaxSamples")
+            resources->maxSamples = value;
+        else if (tokenStr == "nonInductiveForLoops")
+            resources->limits.nonInductiveForLoops = (value != 0);
+        else if (tokenStr == "whileLoops")
+            resources->limits.whileLoops = (value != 0);
+        else if (tokenStr == "doWhileLoops")
+            resources->limits.doWhileLoops = (value != 0);
+        else if (tokenStr == "generalUniformIndexing")
+            resources->limits.generalUniformIndexing = (value != 0);
+        else if (tokenStr == "generalAttributeMatrixVectorIndexing")
+            resources->limits.generalAttributeMatrixVectorIndexing = (value != 0);
+        else if (tokenStr == "generalVaryingIndexing")
+            resources->limits.generalVaryingIndexing = (value != 0);
+        else if (tokenStr == "generalSamplerIndexing")
+            resources->limits.generalSamplerIndexing = (value != 0);
+        else if (tokenStr == "generalVariableIndexing")
+            resources->limits.generalVariableIndexing = (value != 0);
+        else if (tokenStr == "generalConstantMatrixVectorIndexing")
+            resources->limits.generalConstantMatrixVectorIndexing = (value != 0);
+        else
+            printf("Warning: unrecognized limit (%s) in configuration file.\n", tokenStr.c_str());
+
+    }
+}
+
+}  // end namespace glslang

+ 57 - 0
3rdparty/glslang/StandAlone/ResourceLimits.h

@@ -0,0 +1,57 @@
+//
+// Copyright (C) 2016 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+//    Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+//
+//    Redistributions in binary form must reproduce the above
+//    copyright notice, this list of conditions and the following
+//    disclaimer in the documentation and/or other materials provided
+//    with the distribution.
+//
+//    Neither the name of Google Inc. nor the names of its
+//    contributors may be used to endorse or promote products derived
+//    from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef _STAND_ALONE_RESOURCE_LIMITS_INCLUDED_
+#define _STAND_ALONE_RESOURCE_LIMITS_INCLUDED_
+
+#include <string>
+
+#include "glslang/Include/ResourceLimits.h"
+
+namespace glslang {
+
+// These are the default resources for TBuiltInResources, used for both
+//  - parsing this string for the case where the user didn't supply one,
+//  - dumping out a template for user construction of a config file.
+extern const TBuiltInResource DefaultTBuiltInResource;
+
+// Returns the DefaultTBuiltInResource as a human-readable string.
+std::string GetDefaultTBuiltInResourceString();
+
+// Decodes the resource limits from |config| to |resources|.
+void DecodeResourceLimits(TBuiltInResource* resources, char* config);
+
+}  // end namespace glslang
+
+#endif  // _STAND_ALONE_RESOURCE_LIMITS_INCLUDED_

+ 1082 - 0
3rdparty/glslang/StandAlone/StandAlone.cpp

@@ -0,0 +1,1082 @@
+//
+//Copyright (C) 2002-2005  3Dlabs Inc. Ltd.
+//Copyright (C) 2013-2016 LunarG, Inc.
+//
+//All rights reserved.
+//
+//Redistribution and use in source and binary forms, with or without
+//modification, are permitted provided that the following conditions
+//are met:
+//
+//    Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+//
+//    Redistributions in binary form must reproduce the above
+//    copyright notice, this list of conditions and the following
+//    disclaimer in the documentation and/or other materials provided
+//    with the distribution.
+//
+//    Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+//    contributors may be used to endorse or promote products derived
+//    from this software without specific prior written permission.
+//
+//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+//"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+//LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+//FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+//COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+//INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+//BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+//CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+//LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+//ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+//POSSIBILITY OF SUCH DAMAGE.
+//
+
+// this only applies to the standalone wrapper, not the front end in general
+#define _CRT_SECURE_NO_WARNINGS
+
+#include "ResourceLimits.h"
+#include "Worklist.h"
+#include "./../glslang/Include/ShHandle.h"
+#include "./../glslang/Include/revision.h"
+#include "./../glslang/Public/ShaderLang.h"
+#include "../SPIRV/GlslangToSpv.h"
+#include "../SPIRV/GLSL.std.450.h"
+#include "../SPIRV/doc.h"
+#include "../SPIRV/disassemble.h"
+#include <cstring>
+#include <cstdlib>
+#include <cctype>
+#include <cmath>
+#include <array>
+
+#include "../glslang/OSDependent/osinclude.h"
+
+extern "C" {
+    SH_IMPORT_EXPORT void ShOutputHtml();
+}
+
+// Command-line options
+enum TOptions {
+    EOptionNone               = 0,
+    EOptionIntermediate       = (1 <<  0),
+    EOptionSuppressInfolog    = (1 <<  1),
+    EOptionMemoryLeakMode     = (1 <<  2),
+    EOptionRelaxedErrors      = (1 <<  3),
+    EOptionGiveWarnings       = (1 <<  4),
+    EOptionLinkProgram        = (1 <<  5),
+    EOptionMultiThreaded      = (1 <<  6),
+    EOptionDumpConfig         = (1 <<  7),
+    EOptionDumpReflection     = (1 <<  8),
+    EOptionSuppressWarnings   = (1 <<  9),
+    EOptionDumpVersions       = (1 << 10),
+    EOptionSpv                = (1 << 11),
+    EOptionHumanReadableSpv   = (1 << 12),
+    EOptionVulkanRules        = (1 << 13),
+    EOptionDefaultDesktop     = (1 << 14),
+    EOptionOutputPreprocessed = (1 << 15),
+    EOptionOutputHexadecimal  = (1 << 16),
+    EOptionReadHlsl           = (1 << 17),
+    EOptionCascadingErrors    = (1 << 18),
+    EOptionAutoMapBindings    = (1 << 19),
+    EOptionFlattenUniformArrays = (1 << 20),
+    EOptionNoStorageFormat    = (1 << 21),
+};
+
+//
+// Return codes from main/exit().
+//
+enum TFailCode {
+    ESuccess = 0,
+    EFailUsage,
+    EFailCompile,
+    EFailLink,
+    EFailCompilerCreate,
+    EFailThreadCreate,
+    EFailLinkerCreate
+};
+
+//
+// Forward declarations.
+//
+EShLanguage FindLanguage(const std::string& name, bool parseSuffix=true);
+void CompileFile(const char* fileName, ShHandle);
+void usage();
+void FreeFileData(char** data);
+char** ReadFileData(const char* fileName);
+void InfoLogMsg(const char* msg, const char* name, const int num);
+
+// Globally track if any compile or link failure.
+bool CompileFailed = false;
+bool LinkFailed = false;
+
+// Use to test breaking up a single shader file into multiple strings.
+// Set in ReadFileData().
+int NumShaderStrings;
+
+TBuiltInResource Resources;
+std::string ConfigFile;
+
+//
+// Parse either a .conf file provided by the user or the default from glslang::DefaultTBuiltInResource
+//
+void ProcessConfigFile()
+{
+    char** configStrings = 0;
+    char* config = 0;
+    if (ConfigFile.size() > 0) {
+        configStrings = ReadFileData(ConfigFile.c_str());
+        if (configStrings)
+            config = *configStrings;
+        else {
+            printf("Error opening configuration file; will instead use the default configuration\n");
+            usage();
+        }
+    }
+
+    if (config == 0) {
+        Resources = glslang::DefaultTBuiltInResource;
+        return;
+    }
+
+    glslang::DecodeResourceLimits(&Resources,  config);
+
+    if (configStrings)
+        FreeFileData(configStrings);
+    else
+        delete[] config;
+}
+
+// thread-safe list of shaders to asynchronously grab and compile
+glslang::TWorklist Worklist;
+
+// array of unique places to leave the shader names and infologs for the asynchronous compiles
+glslang::TWorkItem** Work = 0;
+int NumWorkItems = 0;
+
+int Options = 0;
+const char* ExecutableName = nullptr;
+const char* binaryFileName = nullptr;
+const char* entryPointName = nullptr;
+const char* shaderStageName = nullptr;
+
+std::array<unsigned int, EShLangCount> baseSamplerBinding;
+std::array<unsigned int, EShLangCount> baseTextureBinding;
+std::array<unsigned int, EShLangCount> baseImageBinding;
+std::array<unsigned int, EShLangCount> baseUboBinding;
+
+//
+// Create the default name for saving a binary if -o is not provided.
+//
+const char* GetBinaryName(EShLanguage stage)
+{
+    const char* name;
+    if (binaryFileName == nullptr) {
+        switch (stage) {
+        case EShLangVertex:          name = "vert.spv";    break;
+        case EShLangTessControl:     name = "tesc.spv";    break;
+        case EShLangTessEvaluation:  name = "tese.spv";    break;
+        case EShLangGeometry:        name = "geom.spv";    break;
+        case EShLangFragment:        name = "frag.spv";    break;
+        case EShLangCompute:         name = "comp.spv";    break;
+        default:                     name = "unknown";     break;
+        }
+    } else
+        name = binaryFileName;
+
+    return name;
+}
+
+//
+// *.conf => this is a config file that can set limits/resources
+//
+bool SetConfigFile(const std::string& name)
+{
+    if (name.size() < 5)
+        return false;
+
+    if (name.compare(name.size() - 5, 5, ".conf") == 0) {
+        ConfigFile = name;
+        return true;
+    }
+
+    return false;
+}
+
+//
+// Give error and exit with failure code.
+//
+void Error(const char* message)
+{
+    printf("%s: Error %s (use -h for usage)\n", ExecutableName, message);
+    exit(EFailUsage);
+}
+
+//
+// Process an optional binding base of the form:
+//   --argname [stage] base
+// Where stage is one of the forms accepted by FindLanguage, and base is an integer
+//
+void ProcessBindingBase(int& argc, char**& argv, std::array<unsigned int, EShLangCount>& base)
+{
+    if (argc < 2)
+        usage();
+
+    if (!isdigit(argv[1][0])) {
+        if (argc < 3) // this form needs one more argument
+            usage();
+    
+        // Parse form: --argname stage base
+        const EShLanguage lang = FindLanguage(argv[1], false);
+        base[lang] = atoi(argv[2]);
+        argc-= 2;
+        argv+= 2;
+    } else {
+        // Parse form: --argname base
+        for (int lang=0; lang<EShLangCount; ++lang)
+            base[lang] = atoi(argv[1]);
+
+        argc--;
+        argv++;
+    }
+}
+
+//
+// Do all command-line argument parsing.  This includes building up the work-items
+// to be processed later, and saving all the command-line options.
+//
+// Does not return (it exits) if command-line is fatally flawed.
+//
+void ProcessArguments(int argc, char* argv[])
+{
+    baseSamplerBinding.fill(0);
+    baseTextureBinding.fill(0);
+    baseImageBinding.fill(0);
+    baseUboBinding.fill(0);
+
+    ExecutableName = argv[0];
+    NumWorkItems = argc;  // will include some empties where the '-' options were, but it doesn't matter, they'll be 0
+    Work = new glslang::TWorkItem*[NumWorkItems];
+    for (int w = 0; w < NumWorkItems; ++w)
+        Work[w] = 0;
+
+    argc--;
+    argv++;    
+    for (; argc >= 1; argc--, argv++) {
+        if (argv[0][0] == '-') {
+            switch (argv[0][1]) {
+            case '-':
+                {
+                    std::string lowerword(argv[0]+2);
+                    std::transform(lowerword.begin(), lowerword.end(), lowerword.begin(), ::tolower);
+
+                    // handle --word style options
+                    if (lowerword == "shift-sampler-bindings" || // synonyms
+                        lowerword == "shift-sampler-binding"  ||
+                        lowerword == "ssb") {
+                        ProcessBindingBase(argc, argv, baseSamplerBinding);
+                    } else if (lowerword == "shift-texture-bindings" ||  // synonyms
+                               lowerword == "shift-texture-binding"  ||
+                               lowerword == "stb") {
+                        ProcessBindingBase(argc, argv, baseTextureBinding);
+                    } else if (lowerword == "shift-image-bindings" ||  // synonyms
+                               lowerword == "shift-image-binding"  ||
+                               lowerword == "sib") {
+                        ProcessBindingBase(argc, argv, baseImageBinding);
+                    } else if (lowerword == "shift-ubo-bindings" ||  // synonyms
+                               lowerword == "shift-ubo-binding"  ||
+                               lowerword == "sub") {
+                        ProcessBindingBase(argc, argv, baseUboBinding);
+                    } else if (lowerword == "auto-map-bindings" ||  // synonyms
+                               lowerword == "auto-map-binding"  || 
+                               lowerword == "amb") {
+                        Options |= EOptionAutoMapBindings;
+                    } else if (lowerword == "flatten-uniform-arrays" || // synonyms
+                               lowerword == "flatten-uniform-array"  ||
+                               lowerword == "fua") {
+                        Options |= EOptionFlattenUniformArrays;
+                    } else if (lowerword == "no-storage-format" || // synonyms
+                               lowerword == "nsf") {
+                        Options |= EOptionNoStorageFormat;
+                    } else {
+                        usage();
+                    }
+                }
+                break;
+            case 'H':
+                Options |= EOptionHumanReadableSpv;
+                if ((Options & EOptionSpv) == 0) {
+                    // default to Vulkan
+                    Options |= EOptionSpv;
+                    Options |= EOptionVulkanRules;
+                    Options |= EOptionLinkProgram;
+                }
+                break;
+            case 'V':
+                Options |= EOptionSpv;
+                Options |= EOptionVulkanRules;
+                Options |= EOptionLinkProgram;
+                break;
+            case 'S':
+                shaderStageName = argv[1];
+                if (argc > 0) {
+                    argc--;
+                    argv++;
+                }
+                else
+                    Error("no <stage> specified for -S");
+                break;
+            case 'G':
+                Options |= EOptionSpv;
+                Options |= EOptionLinkProgram;
+                // undo a -H default to Vulkan
+                Options &= ~EOptionVulkanRules;
+                break;
+            case 'E':
+                Options |= EOptionOutputPreprocessed;
+                break;
+            case 'c':
+                Options |= EOptionDumpConfig;
+                break;
+            case 'C':
+                Options |= EOptionCascadingErrors;
+                break;
+            case 'd':
+                Options |= EOptionDefaultDesktop;
+                break;
+            case 'D':
+                Options |= EOptionReadHlsl;
+                break;
+            case 'e':
+                // HLSL todo: entry point handle needs much more sophistication.
+                // This is okay for one compilation unit with one entry point.
+                entryPointName = argv[1];
+                if (argc > 0) {
+                    argc--;
+                    argv++;
+                } else
+                    Error("no <entry-point> provided for -e");
+                break;
+            case 'h':
+                usage();
+                break;
+            case 'i':
+                Options |= EOptionIntermediate;
+                break;
+            case 'l':
+                Options |= EOptionLinkProgram;
+                break;
+            case 'm':
+                Options |= EOptionMemoryLeakMode;
+                break;
+            case 'o':
+                binaryFileName = argv[1];
+                if (argc > 0) {
+                    argc--;
+                    argv++;
+                } else
+                    Error("no <file> provided for -o");
+                break;
+            case 'q':
+                Options |= EOptionDumpReflection;
+                break;
+            case 'r':
+                Options |= EOptionRelaxedErrors;
+                break;
+            case 's':
+                Options |= EOptionSuppressInfolog;
+                break;
+            case 't':
+                #ifdef _WIN32
+                    Options |= EOptionMultiThreaded;
+                #endif
+                break;
+            case 'v':
+                Options |= EOptionDumpVersions;
+                break;
+            case 'w':
+                Options |= EOptionSuppressWarnings;
+                break;
+            case 'x':
+                Options |= EOptionOutputHexadecimal;
+                break;
+            default:
+                usage();
+                break;
+            }
+        } else {
+            std::string name(argv[0]);
+            if (! SetConfigFile(name)) {
+                Work[argc] = new glslang::TWorkItem(name);
+                Worklist.add(Work[argc]);
+            }
+        }
+    }
+
+    // Make sure that -E is not specified alongside linking (which includes SPV generation)
+    if ((Options & EOptionOutputPreprocessed) && (Options & EOptionLinkProgram))
+        Error("can't use -E when linking is selected");
+
+    // -o or -x makes no sense if there is no target binary
+    if (binaryFileName && (Options & EOptionSpv) == 0)
+        Error("no binary generation requested (e.g., -V)");
+
+    if ((Options & EOptionFlattenUniformArrays) != 0 &&
+        (Options & EOptionReadHlsl) == 0)
+        Error("uniform array flattening only valid when compiling HLSL source.");
+}
+
+//
+// Translate the meaningful subset of command-line options to parser-behavior options.
+//
+void SetMessageOptions(EShMessages& messages)
+{
+    if (Options & EOptionRelaxedErrors)
+        messages = (EShMessages)(messages | EShMsgRelaxedErrors);
+    if (Options & EOptionIntermediate)
+        messages = (EShMessages)(messages | EShMsgAST);
+    if (Options & EOptionSuppressWarnings)
+        messages = (EShMessages)(messages | EShMsgSuppressWarnings);
+    if (Options & EOptionSpv)
+        messages = (EShMessages)(messages | EShMsgSpvRules);
+    if (Options & EOptionVulkanRules)
+        messages = (EShMessages)(messages | EShMsgVulkanRules);
+    if (Options & EOptionOutputPreprocessed)
+        messages = (EShMessages)(messages | EShMsgOnlyPreprocessor);
+    if (Options & EOptionReadHlsl)
+        messages = (EShMessages)(messages | EShMsgReadHlsl);
+    if (Options & EOptionCascadingErrors)
+        messages = (EShMessages)(messages | EShMsgCascadingErrors);
+}
+
+//
+// Thread entry point, for non-linking asynchronous mode.
+//
+// Return 0 for failure, 1 for success.
+//
+unsigned int CompileShaders(void*)
+{
+    glslang::TWorkItem* workItem;
+    while (Worklist.remove(workItem)) {
+        ShHandle compiler = ShConstructCompiler(FindLanguage(workItem->name), Options);
+        if (compiler == 0)
+            return 0;
+
+        CompileFile(workItem->name.c_str(), compiler);
+
+        if (! (Options & EOptionSuppressInfolog))
+            workItem->results = ShGetInfoLog(compiler);
+
+        ShDestruct(compiler);
+    }
+
+    return 0;
+}
+
+// Outputs the given string, but only if it is non-null and non-empty.
+// This prevents erroneous newlines from appearing.
+void PutsIfNonEmpty(const char* str)
+{
+    if (str && str[0]) {
+        puts(str);
+    }
+}
+
+// Outputs the given string to stderr, but only if it is non-null and non-empty.
+// This prevents erroneous newlines from appearing.
+void StderrIfNonEmpty(const char* str)
+{
+    if (str && str[0]) {
+      fprintf(stderr, "%s\n", str);
+    }
+}
+
+// Simple bundling of what makes a compilation unit for ease in passing around,
+// and separation of handling file IO versus API (programmatic) compilation.
+struct ShaderCompUnit {
+    EShLanguage stage;
+    std::string fileName;
+    char** text;             // memory owned/managed externally
+    const char*  fileNameList[1];
+
+    // Need to have a special constructors to adjust the fileNameList, since back end needs a list of ptrs
+    ShaderCompUnit(EShLanguage istage, std::string &ifileName, char** itext)
+    {
+        stage = istage;
+        fileName = ifileName;
+        text    = itext;
+        fileNameList[0] = fileName.c_str();
+    }
+
+    ShaderCompUnit(const ShaderCompUnit &rhs)
+    {
+        stage = rhs.stage;
+        fileName = rhs.fileName;
+        text = rhs.text;
+        fileNameList[0] = fileName.c_str();
+    }
+
+};
+
+//
+// For linking mode: Will independently parse each compilation unit, but then put them
+// in the same program and link them together, making at most one linked module per
+// pipeline stage.
+//
+// Uses the new C++ interface instead of the old handle-based interface.
+//
+
+void CompileAndLinkShaderUnits(std::vector<ShaderCompUnit> compUnits)
+{
+    // keep track of what to free
+    std::list<glslang::TShader*> shaders;
+
+    EShMessages messages = EShMsgDefault;
+    SetMessageOptions(messages);
+
+    //
+    // Per-shader processing...
+    //
+
+    glslang::TProgram& program = *new glslang::TProgram;
+    for (auto it = compUnits.cbegin(); it != compUnits.cend(); ++it) {
+        const auto &compUnit = *it;
+        glslang::TShader* shader = new glslang::TShader(compUnit.stage);
+        shader->setStringsWithLengthsAndNames(compUnit.text, NULL, compUnit.fileNameList, 1);
+        if (entryPointName) // HLSL todo: this needs to be tracked per compUnits
+            shader->setEntryPoint(entryPointName);
+
+        shader->setShiftSamplerBinding(baseSamplerBinding[compUnit.stage]);
+        shader->setShiftTextureBinding(baseTextureBinding[compUnit.stage]);
+        shader->setShiftImageBinding(baseImageBinding[compUnit.stage]);
+        shader->setShiftUboBinding(baseUboBinding[compUnit.stage]);
+        shader->setFlattenUniformArrays((Options & EOptionFlattenUniformArrays) != 0);
+        shader->setNoStorageFormat((Options & EOptionNoStorageFormat) != 0);
+
+        if (Options & EOptionAutoMapBindings)
+            shader->setAutoMapBindings(true);
+                
+        shaders.push_back(shader);
+
+        const int defaultVersion = Options & EOptionDefaultDesktop? 110: 100;
+
+        if (Options & EOptionOutputPreprocessed) {
+            std::string str;
+            glslang::TShader::ForbidInclude includer;
+            if (shader->preprocess(&Resources, defaultVersion, ENoProfile, false, false,
+                                   messages, &str, includer)) {
+                PutsIfNonEmpty(str.c_str());
+            } else {
+                CompileFailed = true;
+            }
+            StderrIfNonEmpty(shader->getInfoLog());
+            StderrIfNonEmpty(shader->getInfoDebugLog());
+            continue;
+        }
+        if (! shader->parse(&Resources, defaultVersion, false, messages))
+            CompileFailed = true;
+
+        program.addShader(shader);
+
+        if (! (Options & EOptionSuppressInfolog) &&
+            ! (Options & EOptionMemoryLeakMode)) {
+            PutsIfNonEmpty(compUnit.fileName.c_str());
+            PutsIfNonEmpty(shader->getInfoLog());
+            PutsIfNonEmpty(shader->getInfoDebugLog());
+        }
+    }
+
+    //
+    // Program-level processing...
+    //
+
+    // Link
+    if (! (Options & EOptionOutputPreprocessed) && ! program.link(messages))
+        LinkFailed = true;
+
+    // Map IO
+    if (Options & EOptionSpv) {
+        if (!program.mapIO())
+            LinkFailed = true;
+    }
+    
+    // Report
+    if (! (Options & EOptionSuppressInfolog) &&
+        ! (Options & EOptionMemoryLeakMode)) {
+        PutsIfNonEmpty(program.getInfoLog());
+        PutsIfNonEmpty(program.getInfoDebugLog());
+    }
+
+    // Reflect
+    if (Options & EOptionDumpReflection) {
+        program.buildReflection();
+        program.dumpReflection();
+    }
+
+    // Dump SPIR-V
+    if (Options & EOptionSpv) {
+        if (CompileFailed || LinkFailed)
+            printf("SPIR-V is not generated for failed compile or link\n");
+        else {
+            for (int stage = 0; stage < EShLangCount; ++stage) {
+                if (program.getIntermediate((EShLanguage)stage)) {
+                    std::vector<unsigned int> spirv;
+                    std::string warningsErrors;
+                    spv::SpvBuildLogger logger;
+                    glslang::GlslangToSpv(*program.getIntermediate((EShLanguage)stage), spirv, &logger);
+
+                    // Dump the spv to a file or stdout, etc., but only if not doing
+                    // memory/perf testing, as it's not internal to programmatic use.
+                    if (! (Options & EOptionMemoryLeakMode)) {
+                        printf("%s", logger.getAllMessages().c_str());
+                        if (Options & EOptionOutputHexadecimal) {
+                            glslang::OutputSpvHex(spirv, GetBinaryName((EShLanguage)stage));
+                        } else {
+                            glslang::OutputSpvBin(spirv, GetBinaryName((EShLanguage)stage));
+                        }
+                        if (Options & EOptionHumanReadableSpv) {
+                            spv::Disassemble(std::cout, spirv);
+                        }
+                    }
+                }
+            }
+        }
+    }
+
+    // Free everything up, program has to go before the shaders
+    // because it might have merged stuff from the shaders, and
+    // the stuff from the shaders has to have its destructors called
+    // before the pools holding the memory in the shaders is freed.
+    delete &program;
+    while (shaders.size() > 0) {
+        delete shaders.back();
+        shaders.pop_back();
+    }
+}
+
+//
+// Do file IO part of compile and link, handing off the pure
+// API/programmatic mode to CompileAndLinkShaderUnits(), which can
+// be put in a loop for testing memory footprint and performance.
+//
+// This is just for linking mode: meaning all the shaders will be put into the
+// the same program linked together.
+//
+// This means there are a limited number of work items (not multi-threading mode)
+// and that the point is testing at the linking level. Hence, to enable
+// performance and memory testing, the actual compile/link can be put in
+// a loop, independent of processing the work items and file IO.
+//
+void CompileAndLinkShaderFiles()
+{
+    std::vector<ShaderCompUnit> compUnits;
+
+    // Transfer all the work items from to a simple list of
+    // of compilation units.  (We don't care about the thread
+    // work-item distribution properties in this path, which
+    // is okay due to the limited number of shaders, know since
+    // they are all getting linked together.)
+    glslang::TWorkItem* workItem;
+    while (Worklist.remove(workItem)) {
+        ShaderCompUnit compUnit(
+            FindLanguage(workItem->name),
+            workItem->name,
+            ReadFileData(workItem->name.c_str())
+        );
+
+        if (! compUnit.text) {
+            usage();
+            return;
+        }
+
+        compUnits.push_back(compUnit);
+    }
+
+    // Actual call to programmatic processing of compile and link,
+    // in a loop for testing memory and performance.  This part contains
+    // all the perf/memory that a programmatic consumer will care about.
+    for (int i = 0; i < ((Options & EOptionMemoryLeakMode) ? 100 : 1); ++i) {
+        for (int j = 0; j < ((Options & EOptionMemoryLeakMode) ? 100 : 1); ++j)
+           CompileAndLinkShaderUnits(compUnits);
+
+        if (Options & EOptionMemoryLeakMode)
+            glslang::OS_DumpMemoryCounters();
+    }
+
+    for (auto it = compUnits.begin(); it != compUnits.end(); ++it)
+        FreeFileData(it->text);
+}
+
+int C_DECL main(int argc, char* argv[])
+{
+    ProcessArguments(argc, argv);
+
+    if (Options & EOptionDumpConfig) {
+        printf("%s", glslang::GetDefaultTBuiltInResourceString().c_str());
+        if (Worklist.empty())
+            return ESuccess;
+    }
+
+    if (Options & EOptionDumpVersions) {
+        printf("Glslang Version: %s %s\n", GLSLANG_REVISION, GLSLANG_DATE);
+        printf("ESSL Version: %s\n", glslang::GetEsslVersionString());
+        printf("GLSL Version: %s\n", glslang::GetGlslVersionString());
+        std::string spirvVersion;
+        glslang::GetSpirvVersion(spirvVersion);
+        printf("SPIR-V Version %s\n", spirvVersion.c_str());
+        printf("GLSL.std.450 Version %d, Revision %d\n", GLSLstd450Version, GLSLstd450Revision);
+        printf("Khronos Tool ID %d\n", glslang::GetKhronosToolId());
+        printf("GL_KHR_vulkan_glsl version %d\n", 100);
+        printf("ARB_GL_gl_spirv version %d\n", 100);
+        if (Worklist.empty())
+            return ESuccess;
+    }
+
+    if (Worklist.empty()) {
+        usage();
+    }
+
+    ProcessConfigFile();
+
+    //
+    // Two modes:
+    // 1) linking all arguments together, single-threaded, new C++ interface
+    // 2) independent arguments, can be tackled by multiple asynchronous threads, for testing thread safety, using the old handle interface
+    //
+    if (Options & EOptionLinkProgram ||
+        Options & EOptionOutputPreprocessed) {
+        glslang::InitializeProcess();
+        CompileAndLinkShaderFiles();
+        glslang::FinalizeProcess();
+        for (int w = 0; w < NumWorkItems; ++w) {
+          if (Work[w]) {
+            delete Work[w];
+          }
+        }
+    } else {
+        ShInitialize();
+
+        bool printShaderNames = Worklist.size() > 1;
+
+        if (Options & EOptionMultiThreaded) {
+            const int NumThreads = 16;
+            void* threads[NumThreads];
+            for (int t = 0; t < NumThreads; ++t) {
+                threads[t] = glslang::OS_CreateThread(&CompileShaders);
+                if (! threads[t]) {
+                    printf("Failed to create thread\n");
+                    return EFailThreadCreate;
+                }
+            }
+            glslang::OS_WaitForAllThreads(threads, NumThreads);
+        } else
+            CompileShaders(0);
+
+        // Print out all the resulting infologs
+        for (int w = 0; w < NumWorkItems; ++w) {
+            if (Work[w]) {
+                if (printShaderNames || Work[w]->results.size() > 0)
+                    PutsIfNonEmpty(Work[w]->name.c_str());
+                PutsIfNonEmpty(Work[w]->results.c_str());
+                delete Work[w];
+            }
+        }
+
+        ShFinalize();
+    }
+
+    delete[] Work;
+
+    if (CompileFailed)
+        return EFailCompile;
+    if (LinkFailed)
+        return EFailLink;
+
+    return 0;
+}
+
+//
+//   Deduce the language from the filename.  Files must end in one of the
+//   following extensions:
+//
+//   .vert = vertex
+//   .tesc = tessellation control
+//   .tese = tessellation evaluation
+//   .geom = geometry
+//   .frag = fragment
+//   .comp = compute
+//
+EShLanguage FindLanguage(const std::string& name, bool parseSuffix)
+{
+    size_t ext = 0;
+
+    // Search for a suffix on a filename: e.g, "myfile.frag".  If given
+    // the suffix directly, we skip looking the '.'
+    if (parseSuffix) {
+        ext = name.rfind('.');
+        if (ext == std::string::npos) {
+            usage();
+            return EShLangVertex;
+        }
+        ++ext;
+    }
+
+    std::string suffix = name.substr(ext, std::string::npos);
+    if (shaderStageName)
+        suffix = shaderStageName;
+
+    if (suffix == "vert")
+        return EShLangVertex;
+    else if (suffix == "tesc")
+        return EShLangTessControl;
+    else if (suffix == "tese")
+        return EShLangTessEvaluation;
+    else if (suffix == "geom")
+        return EShLangGeometry;
+    else if (suffix == "frag")
+        return EShLangFragment;
+    else if (suffix == "comp")
+        return EShLangCompute;
+
+    usage();
+    return EShLangVertex;
+}
+
+//
+// Read a file's data into a string, and compile it using the old interface ShCompile, 
+// for non-linkable results.
+//
+void CompileFile(const char* fileName, ShHandle compiler)
+{
+    int ret = 0;
+    char** shaderStrings = ReadFileData(fileName);
+    if (! shaderStrings) {
+        usage();
+    }
+
+    int* lengths = new int[NumShaderStrings];
+
+    // move to length-based strings, rather than null-terminated strings
+    for (int s = 0; s < NumShaderStrings; ++s)
+        lengths[s] = (int)strlen(shaderStrings[s]);
+
+    if (! shaderStrings) {
+        CompileFailed = true;
+        return;
+    }
+
+    EShMessages messages = EShMsgDefault;
+    SetMessageOptions(messages);
+    
+    for (int i = 0; i < ((Options & EOptionMemoryLeakMode) ? 100 : 1); ++i) {
+        for (int j = 0; j < ((Options & EOptionMemoryLeakMode) ? 100 : 1); ++j) {
+            //ret = ShCompile(compiler, shaderStrings, NumShaderStrings, lengths, EShOptNone, &Resources, Options, (Options & EOptionDefaultDesktop) ? 110 : 100, false, messages);
+            ret = ShCompile(compiler, shaderStrings, NumShaderStrings, nullptr, EShOptNone, &Resources, Options, (Options & EOptionDefaultDesktop) ? 110 : 100, false, messages);
+            //const char* multi[12] = { "# ve", "rsion", " 300 e", "s", "\n#err", 
+            //                         "or should be l", "ine 1", "string 5\n", "float glo", "bal", 
+            //                         ";\n#error should be line 2\n void main() {", "global = 2.3;}" };
+            //const char* multi[7] = { "/", "/", "\\", "\n", "\n", "#", "version 300 es" };
+            //ret = ShCompile(compiler, multi, 7, nullptr, EShOptNone, &Resources, Options, (Options & EOptionDefaultDesktop) ? 110 : 100, false, messages);
+        }
+
+        if (Options & EOptionMemoryLeakMode)
+            glslang::OS_DumpMemoryCounters();
+    }
+
+    delete [] lengths;
+    FreeFileData(shaderStrings);
+
+    if (ret == 0)
+        CompileFailed = true;
+}
+
+//
+//   print usage to stdout
+//
+void usage()
+{
+    printf("Usage: glslangValidator [option]... [file]...\n"
+           "\n"
+           "Where: each 'file' ends in .<stage>, where <stage> is one of\n"
+           "    .conf   to provide an optional config file that replaces the default configuration\n"
+           "            (see -c option below for generating a template)\n"
+           "    .vert   for a vertex shader\n"
+           "    .tesc   for a tessellation control shader\n"
+           "    .tese   for a tessellation evaluation shader\n"
+           "    .geom   for a geometry shader\n"
+           "    .frag   for a fragment shader\n"
+           "    .comp   for a compute shader\n"
+           "\n"
+           "Compilation warnings and errors will be printed to stdout.\n"
+           "\n"
+           "To get other information, use one of the following options:\n"
+           "Each option must be specified separately.\n"
+           "  -V          create SPIR-V binary, under Vulkan semantics; turns on -l;\n"
+           "              default file name is <stage>.spv (-o overrides this)\n"
+           "  -G          create SPIR-V binary, under OpenGL semantics; turns on -l;\n"
+           "              default file name is <stage>.spv (-o overrides this)\n"
+           "  -H          print human readable form of SPIR-V; turns on -V\n"
+           "  -E          print pre-processed GLSL; cannot be used with -l;\n"
+           "              errors will appear on stderr.\n"
+           "  -S <stage>  uses explicit stage specified, rather then the file extension.\n"
+           "              valid choices are vert, tesc, tese, geom, frag, or comp\n"
+           "  -c          configuration dump;\n"
+           "              creates the default configuration file (redirect to a .conf file)\n"
+           "  -C          cascading errors; risks crashes from accumulation of error recoveries\n"
+           "  -d          default to desktop (#version 110) when there is no shader #version\n"
+           "              (default is ES version 100)\n"
+           "  -D          input is HLSL\n"
+           "  -e          specify entry-point name\n"
+           "  -h          print this usage message\n"
+           "  -i          intermediate tree (glslang AST) is printed out\n"
+           "  -l          link all input files together to form a single module\n"
+           "  -m          memory leak mode\n"
+           "  -o  <file>  save binary to <file>, requires a binary option (e.g., -V)\n"
+           "  -q          dump reflection query database\n"
+           "  -r          relaxed semantic error-checking mode\n"
+           "  -s          silent mode\n"
+           "  -t          multi-threaded mode\n"
+           "  -v          print version strings\n"
+           "  -w          suppress warnings (except as required by #extension : warn)\n"
+           "  -x          save 32-bit hexadecimal numbers as text, requires a binary option (e.g., -V)\n"
+           "\n"
+           "  --shift-sampler-binding [stage] num     set base binding number for samplers\n"
+           "  --ssb [stage] num                       synonym for --shift-sampler-binding\n"
+           "\n"
+           "  --shift-texture-binding [stage] num     set base binding number for textures\n"
+           "  --stb [stage] num                       synonym for --shift-texture-binding\n"
+           "\n"
+           "  --shift-image-binding [stage] num       set base binding number for images (uav)\n"
+           "  --sib [stage] num                       synonym for --shift-image-binding\n"
+           "\n"
+           "  --shift-UBO-binding [stage] num         set base binding number for UBOs\n"
+           "  --sub [stage] num                       synonym for --shift-UBO-binding\n"
+           "\n"
+           "  --auto-map-bindings                     automatically bind uniform variables without\n"
+           "                                          explicit bindings.\n"
+           "  --amb                                   synonym for --auto-map-bindings\n"
+           "\n"
+           "  --flatten-uniform-arrays                flatten uniform texture & sampler arrays to scalars\n"
+           "  --fua                                   synonym for --flatten-uniform-arrays\n"
+           "\n"
+           "  --no-storage-format                     use Unknown image format\n"
+           "  --nsf                                   synonym for --no-storage-format\n"
+           );
+
+    exit(EFailUsage);
+}
+
+#if !defined _MSC_VER && !defined MINGW_HAS_SECURE_API
+
+#include <errno.h>
+
+int fopen_s(
+   FILE** pFile,
+   const char* filename,
+   const char* mode
+)
+{
+   if (!pFile || !filename || !mode) {
+      return EINVAL;
+   }
+
+   FILE* f = fopen(filename, mode);
+   if (! f) {
+      if (errno != 0) {
+         return errno;
+      } else {
+         return ENOENT;
+      }
+   }
+   *pFile = f;
+
+   return 0;
+}
+
+#endif
+
+//
+//   Malloc a string of sufficient size and read a string into it.
+//
+char** ReadFileData(const char* fileName) 
+{
+    FILE *in = nullptr;
+    int errorCode = fopen_s(&in, fileName, "r");
+
+    int count = 0;
+    const int maxSourceStrings = 5;  // for testing splitting shader/tokens across multiple strings
+    char** return_data = (char**)malloc(sizeof(char *) * (maxSourceStrings+1)); // freed in FreeFileData()
+
+    if (errorCode || in == nullptr)
+        Error("unable to open input file");
+    
+    while (fgetc(in) != EOF)
+        count++;
+
+    fseek(in, 0, SEEK_SET);
+
+    char *fdata = (char*)malloc(count+2); // freed before return of this function
+    if (! fdata)
+        Error("can't allocate memory");
+
+    if ((int)fread(fdata, 1, count, in) != count) {
+        free(fdata);
+        Error("can't read input file");
+    }
+
+    fdata[count] = '\0';
+    fclose(in);
+
+    if (count == 0) {
+        // recover from empty file
+        return_data[0] = (char*)malloc(count+2);  // freed in FreeFileData()
+        return_data[0][0]='\0';
+        NumShaderStrings = 0;
+        free(fdata);
+
+        return return_data;
+    } else
+        NumShaderStrings = 1;  // Set to larger than 1 for testing multiple strings
+
+    // compute how to split up the file into multiple strings, for testing multiple strings
+    int len = (int)(ceil)((float)count/(float)NumShaderStrings);
+    int ptr_len = 0;
+    int i = 0;
+    while (count > 0) {
+        return_data[i] = (char*)malloc(len + 2);  // freed in FreeFileData()
+        memcpy(return_data[i], fdata + ptr_len, len);
+        return_data[i][len] = '\0';
+        count -= len;
+        ptr_len += len;
+        if (count < len) {
+            if (count == 0) {
+               NumShaderStrings = i + 1;
+               break;
+            }
+            len = count;
+        }  
+        ++i;
+    }
+
+    free(fdata);
+
+    return return_data;
+}
+
+void FreeFileData(char** data)
+{
+    for(int i = 0; i < NumShaderStrings; i++)
+        free(data[i]);
+
+    free(data);
+}
+
+void InfoLogMsg(const char* msg, const char* name, const int num)
+{
+    if (num >= 0 )
+        printf("#### %s %s %d INFO LOG ####\n", msg, name, num);
+    else
+        printf("#### %s %s INFO LOG ####\n", msg, name);
+}

+ 98 - 0
3rdparty/glslang/StandAlone/Worklist.h

@@ -0,0 +1,98 @@
+//
+//Copyright (C) 2013 LunarG, Inc.
+//
+//All rights reserved.
+//
+//Redistribution and use in source and binary forms, with or without
+//modification, are permitted provided that the following conditions
+//are met:
+//
+//    Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+//
+//    Redistributions in binary form must reproduce the above
+//    copyright notice, this list of conditions and the following
+//    disclaimer in the documentation and/or other materials provided
+//    with the distribution.
+//
+//    Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+//    contributors may be used to endorse or promote products derived
+//    from this software without specific prior written permission.
+//
+//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+//"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+//LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+//FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+//COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+//INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+//BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+//CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+//LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+//ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+//POSSIBILITY OF SUCH DAMAGE.
+//
+#ifndef WORKLIST_H_INCLUDED
+#define WORKLIST_H_INCLUDED
+
+#include "../glslang/OSDependent/osinclude.h"
+#include <string>
+#include <list>
+
+namespace glslang {
+
+    class TWorkItem {
+    public:
+        TWorkItem() { }
+        explicit TWorkItem(const std::string& s) :
+            name(s) { }
+        std::string name;
+        std::string results;
+        std::string resultsIndex;
+    };
+
+    class TWorklist {
+    public:
+        TWorklist() { }
+        virtual ~TWorklist() { }
+
+        void add(TWorkItem* item)
+        {
+            GetGlobalLock();
+            
+            worklist.push_back(item);
+            
+            ReleaseGlobalLock();
+        }
+    
+        bool remove(TWorkItem*& item)
+        {
+            GetGlobalLock();
+            
+            if (worklist.empty())
+                return false;
+            item = worklist.front();
+            worklist.pop_front();
+            
+            ReleaseGlobalLock();
+
+            return true;
+        }
+
+        int size()
+        {
+            return (int)worklist.size();
+        }
+
+        bool empty()
+        {
+            return worklist.empty();
+        }
+
+    protected:
+        std::list<TWorkItem*> worklist;
+    };
+
+} // end namespace glslang
+
+#endif // WORKLIST_H_INCLUDED

+ 344 - 0
3rdparty/glslang/StandAlone/spirv-remap.cpp

@@ -0,0 +1,344 @@
+//
+//Copyright (C) 2015 LunarG, Inc.
+//
+//All rights reserved.
+//
+//Redistribution and use in source and binary forms, with or without
+//modification, are permitted provided that the following conditions
+//are met:
+//
+//    Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+//
+//    Redistributions in binary form must reproduce the above
+//    copyright notice, this list of conditions and the following
+//    disclaimer in the documentation and/or other materials provided
+//    with the distribution.
+//
+//    Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+//    contributors may be used to endorse or promote products derived
+//    from this software without specific prior written permission.
+//
+//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+//"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+//LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+//FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+//COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+//INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+//BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+//CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+//LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+//ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+//POSSIBILITY OF SUCH DAMAGE.
+//
+
+#include <iostream>
+#include <fstream>
+#include <cstring>
+#include <stdexcept>
+
+#include "../SPIRV/SPVRemapper.h"
+
+namespace {
+
+    typedef unsigned int SpvWord;
+
+    // Poor man's basename: given a complete path, return file portion.
+    // E.g:
+    //      Linux:  /foo/bar/test  -> test
+    //      Win:   c:\foo\bar\test -> test
+    // It's not very efficient, but that doesn't matter for our minimal-duty use.
+    // Using boost::filesystem would be better in many ways, but want to avoid that dependency.
+
+    // OS dependent path separator (avoiding boost::filesystem dependency)
+#if defined(_WIN32)
+    char path_sep_char() { return '\\'; }
+#else
+    char path_sep_char() { return '/';  }
+#endif
+
+    std::string basename(const std::string filename)
+    {
+        const size_t sepLoc = filename.find_last_of(path_sep_char());
+
+        return (sepLoc == filename.npos) ? filename : filename.substr(sepLoc+1);
+    }
+
+    void errHandler(const std::string& str) {
+        std::cout << str << std::endl;
+        exit(5);
+    }
+
+    void logHandler(const std::string& str) {
+        std::cout << str << std::endl;
+    }
+
+    // Read word stream from disk
+    void read(std::vector<SpvWord>& spv, const std::string& inFilename, int verbosity)
+    {
+        std::ifstream fp;
+
+        if (verbosity > 0)
+            logHandler(std::string("  reading: ") + inFilename);
+
+        spv.clear();
+        fp.open(inFilename, std::fstream::in | std::fstream::binary);
+
+        if (fp.fail())
+            errHandler("error opening file for read: ");
+
+        // Reserve space (for efficiency, not for correctness)
+        fp.seekg(0, fp.end);
+        spv.reserve(size_t(fp.tellg()) / sizeof(SpvWord));
+        fp.seekg(0, fp.beg);
+
+        while (!fp.eof()) {
+            SpvWord inWord;
+            fp.read((char *)&inWord, sizeof(inWord));
+
+            if (!fp.eof()) {
+                spv.push_back(inWord);
+                if (fp.fail())
+                    errHandler(std::string("error reading file: ") + inFilename);
+            }
+        }
+    }
+
+    void write(std::vector<SpvWord>& spv, const std::string& outFile, int verbosity)
+    {
+        if (outFile.empty())
+            errHandler("missing output filename.");
+
+        std::ofstream fp;
+
+        if (verbosity > 0)
+            logHandler(std::string("  writing: ") + outFile);
+
+        fp.open(outFile, std::fstream::out | std::fstream::binary);
+
+        if (fp.fail())
+            errHandler(std::string("error opening file for write: ") + outFile);
+
+        for (auto it = spv.cbegin(); it != spv.cend(); ++it) {
+            SpvWord word = *it;
+            fp.write((char *)&word, sizeof(word));
+            if (fp.fail())
+                errHandler(std::string("error writing file: ") + outFile);
+        }
+
+        // file is closed by destructor
+    }
+
+    // Print helpful usage message to stdout, and exit
+    void usage(const char* const name, const char* const msg = 0)
+    {
+        if (msg)
+            std::cout << msg << std::endl << std::endl;
+
+        std::cout << "Usage: " << std::endl;
+
+        std::cout << "  " << basename(name)
+            << " [-v[v[...]] | --verbose [int]]"
+            << " [--map (all|types|names|funcs)]"
+            << " [--dce (all|types|funcs)]"
+            << " [--opt (all|loadstore)]"
+            << " [--strip-all | --strip all | -s]" 
+            << " [--do-everything]" 
+            << " --input | -i file1 [file2...] --output|-o DESTDIR"
+            << std::endl;
+
+        std::cout << "  " << basename(name) << " [--version | -V]" << std::endl;
+        std::cout << "  " << basename(name) << " [--help | -?]" << std::endl;
+
+        exit(5);
+    }
+
+    // grind through each SPIR in turn
+    void execute(const std::vector<std::string>& inputFile, const std::string& outputDir,
+        int opts, int verbosity)
+    {
+        for (auto it = inputFile.cbegin(); it != inputFile.cend(); ++it) {
+            const std::string &filename = *it;
+            std::vector<SpvWord> spv;
+            read(spv, filename, verbosity);
+            spv::spirvbin_t(verbosity).remap(spv, opts);
+
+            const std::string outfile = outputDir + path_sep_char() + basename(filename);
+
+            write(spv, outfile, verbosity);
+        }
+
+        if (verbosity > 0)
+            std::cout << "Done: " << inputFile.size() << " file(s) processed" << std::endl;
+    }
+
+    // Parse command line options
+    void parseCmdLine(int argc, char** argv, std::vector<std::string>& inputFile,
+        std::string& outputDir,
+        int& options,
+        int& verbosity)
+    {
+        if (argc < 2)
+            usage(argv[0]);
+
+        verbosity  = 0;
+        options    = spv::spirvbin_t::NONE;
+
+        // Parse command line.
+        // boost::program_options would be quite a bit nicer, but we don't want to
+        // introduce a dependency on boost.
+        for (int a=1; a<argc; ) {
+            const std::string arg = argv[a];
+
+            if (arg == "--output" || arg == "-o") {
+                // Output directory
+                if (++a >= argc)
+                    usage(argv[0], "--output requires an argument");
+                if (!outputDir.empty())
+                    usage(argv[0], "--output can be provided only once");
+
+                outputDir = argv[a++];
+
+                // Remove trailing directory separator characters
+                while (!outputDir.empty() && outputDir.back() == path_sep_char())
+                    outputDir.pop_back();
+
+            }
+            else if (arg == "-vv")     { verbosity = 2; ++a; } // verbosity shortcuts
+            else if (arg == "-vvv")    { verbosity = 3; ++a; } // ...
+            else if (arg == "-vvvv")   { verbosity = 4; ++a; } // ...
+            else if (arg == "-vvvvv")  { verbosity = 5; ++a; } // ...
+
+            else if (arg == "--verbose" || arg == "-v") {
+                ++a;
+                verbosity = 1;
+
+                if (a < argc) {
+                    char* end_ptr = 0;
+                    int verb = ::strtol(argv[a], &end_ptr, 10);
+                    // If we have not read to the end of the string or
+                    // the string contained no elements, then we do not want to
+                    // store the value.
+                    if (*end_ptr == '\0' && end_ptr != argv[a]) {
+                        verbosity = verb;
+                        ++a;
+                    }
+                }
+            }
+            else if (arg == "--version" || arg == "-V") {
+                std::cout << basename(argv[0]) << " version 0.97 " << __DATE__ << " " << __TIME__ << std::endl;
+                exit(0);
+            } else if (arg == "--input" || arg == "-i") {
+                // Collect input files
+                for (++a; a < argc && argv[a][0] != '-'; ++a)
+                    inputFile.push_back(argv[a]);
+            } else if (arg == "--do-everything") {
+                ++a;
+                options = options | spv::spirvbin_t::DO_EVERYTHING;
+            } else if (arg == "--strip-all" || arg == "-s") {
+                ++a;
+                options = options | spv::spirvbin_t::STRIP;
+            } else if (arg == "--strip") {
+                ++a;
+                if (strncmp(argv[a], "all", 3) == 0) {
+                    options = options | spv::spirvbin_t::STRIP;
+                    ++a;
+                }
+            } else if (arg == "--dce") {
+                // Parse comma (or colon, etc) separated list of things to dce
+                ++a;
+                for (const char* c = argv[a]; *c; ++c) {
+                    if (strncmp(c, "all", 3) == 0) {
+                        options = (options | spv::spirvbin_t::DCE_ALL);
+                        c += 3;
+                    } else if (strncmp(c, "*", 1) == 0) {
+                        options = (options | spv::spirvbin_t::DCE_ALL);
+                        c += 1;
+                    } else if (strncmp(c, "funcs", 5) == 0) {
+                        options = (options | spv::spirvbin_t::DCE_FUNCS);
+                        c += 5;
+                    } else if (strncmp(c, "types", 5) == 0) {
+                        options = (options | spv::spirvbin_t::DCE_TYPES);
+                        c += 5;
+                    }
+                }
+                ++a;
+            } else if (arg == "--map") {
+                // Parse comma (or colon, etc) separated list of things to map
+                ++a;
+                for (const char* c = argv[a]; *c; ++c) {
+                    if (strncmp(c, "all", 3) == 0) {
+                        options = (options | spv::spirvbin_t::MAP_ALL);
+                        c += 3;
+                    } else if (strncmp(c, "*", 1) == 0) {
+                        options = (options | spv::spirvbin_t::MAP_ALL);
+                        c += 1;
+                    } else if (strncmp(c, "types", 5) == 0) {
+                        options = (options | spv::spirvbin_t::MAP_TYPES);
+                        c += 5;
+                    } else if (strncmp(c, "names", 5) == 0) {
+                        options = (options | spv::spirvbin_t::MAP_NAMES);
+                        c += 5;
+                    } else if (strncmp(c, "funcs", 5) == 0) {
+                        options = (options | spv::spirvbin_t::MAP_FUNCS);
+                        c += 5;
+                    }
+                }
+                ++a;
+            } else if (arg == "--opt") {
+                ++a;
+                for (const char* c = argv[a]; *c; ++c) {
+                    if (strncmp(c, "all", 3) == 0) {
+                        options = (options | spv::spirvbin_t::OPT_ALL);
+                        c += 3;
+                    } else if (strncmp(c, "*", 1) == 0) {
+                        options = (options | spv::spirvbin_t::OPT_ALL);
+                        c += 1;
+                    } else if (strncmp(c, "loadstore", 9) == 0) {
+                        options = (options | spv::spirvbin_t::OPT_LOADSTORE);
+                        c += 9;
+                    }
+                }
+                ++a;
+            } else if (arg == "--help" || arg == "-?") {
+                usage(argv[0]);
+            } else {
+                usage(argv[0], "Unknown command line option");
+            }
+        }
+    }
+
+} // namespace
+
+
+int main(int argc, char** argv)
+{
+    std::vector<std::string> inputFile;
+    std::string              outputDir;
+    int                      opts;
+    int                      verbosity;
+
+#ifdef use_cpp11
+    // handle errors by exiting
+    spv::spirvbin_t::registerErrorHandler(errHandler);
+
+    // Log messages to std::cout
+    spv::spirvbin_t::registerLogHandler(logHandler);
+#endif
+
+    if (argc < 2)
+        usage(argv[0]);
+
+    parseCmdLine(argc, argv, inputFile, outputDir, opts, verbosity);
+
+    if (outputDir.empty())
+        usage(argv[0], "Output directory required");
+
+    std::string errmsg;
+
+    // Main operations: read, remap, and write.
+    execute(inputFile, outputDir, opts, verbosity);
+
+    // If we get here, everything went OK!  Nothing more to be done.
+}

+ 28 - 0
3rdparty/glslang/Test/100.conf

@@ -0,0 +1,28 @@
+MaxLights 32
+MaxClipPlanes 6
+MaxTextureUnits 32
+MaxTextureCoords 32
+MaxVertexAttribs 8
+MaxVertexUniformComponents 4096
+MaxVaryingFloats 64
+MaxVertexTextureImageUnits 0
+MaxCombinedTextureImageUnits 8
+MaxTextureImageUnits 8
+MaxFragmentUniformComponents 4096
+MaxDrawBuffers 1
+MaxVertexUniformVectors 16
+MaxVaryingVectors 8
+MaxFragmentUniformVectors 16
+MaxVertexOutputVectors 16
+MaxFragmentInputVectors 15
+MinProgramTexelOffset -8
+MaxProgramTexelOffset 7
+nonInductiveForLoops 0
+whileLoops 0
+doWhileLoops 0
+generalUniformIndexing 0
+generalAttributeMatrixVectorIndexing 0
+generalVaryingIndexing 0
+generalSamplerIndexing 0
+generalVariableIndexing 0
+generalConstantMatrixVectorIndexing 0

+ 227 - 0
3rdparty/glslang/Test/100.frag

@@ -0,0 +1,227 @@
+// okay
+#version 100
+int a[3] = { 2, 3, 4, };  // ERROR (lots)
+#version 100
+int uint;
+
+attribute vec4 v[3];     // ERROR
+
+float f = 2;             // ERROR
+
+uniform block {          // ERROR
+    int x;
+};
+
+void foo(float);
+
+void main()
+{
+    foo(3);              // ERROR
+    int s = 1 << 4;      // ERROR
+    s = 16 >> 2;         // ERROR
+    if (a == a);         // ERROR
+    int b, c;
+    b = c & 4;           // ERROR
+    b = c % 4;           // ERROR
+    b = c | 4;           // ERROR
+    b >>= 2;             // ERROR
+    b <<= 2;             // ERROR
+    b %= 3;              // ERROR
+
+    struct S {
+        float f;
+        float a[10];
+    } s1, s2;
+
+    s1 = s2;             // ERROR
+    if (s1 == s2);       // ERROR
+    if (s1 != s2);       // ERROR
+
+    switch(b) {          // ERROR
+    }
+}
+
+invariant gl_FragColor;
+float fa[];              // ERROR
+float f13;
+invariant f13;           // ERROR
+struct S { int a; };
+invariant S;             // ERROR, not an input or output
+invariant float fi;      // ERROR
+varying vec4 av;
+invariant av;            // okay in v100
+
+void foo10()
+{
+    invariant f;         // ERROR
+    invariant float f2;  // ERROR
+    float f3;
+    invariant f3;        // ERROR
+}
+
+uniform vec2 uv2;
+invariant uv2;              // ERROR
+invariant uniform vec3 uv3; // ERROR
+
+sampler2D glob2D;           // ERROR
+void f11(sampler2D p2d)
+{
+    sampler2D v2D;          // ERROR
+}
+varying sampler2D vary2D;   // ERROR
+
+struct sp {
+    highp float f;
+    in float g;             // ERROR
+    uniform float h;        // ERROR
+    invariant float i;      // ERROR
+};
+
+uniform sampler3D s3D;      // ERROR
+
+#extension GL_OES_texture_3D : enable
+
+precision highp sampler3D;
+uniform sampler3D s3D2;
+
+void foo234()
+{
+    texture3D(s3D2, vec3(0.2), 0.2);
+    texture3DProj(s3D2, v[1], 0.4);
+    dFdx(v[0]);    // ERROR
+    dFdy(3.2);     // ERROR
+    fwidth(f13);   // ERROR
+}
+
+#extension GL_OES_standard_derivatives : enable
+
+void foo236()
+{
+    dFdx(v[0]);
+    dFdy(3.2);
+    fwidth(f13);
+    gl_FragDepth = f13;    // ERROR
+    gl_FragDepthEXT = f13; // ERROR
+}
+
+#extension GL_EXT_frag_depth : enable
+
+void foo239()
+{
+    gl_FragDepth = f13;    // ERROR
+    gl_FragDepthEXT = f13;
+}
+
+#extension GL_OES_EGL_image_external : enable
+
+uniform samplerExternalOES sExt;
+
+void foo245()
+{
+    texture2D(sExt, vec2(0.2));
+    texture2DProj(sExt, vec3(f13));
+    texture2DProj(sExt, v[2]);
+}
+
+precision mediump samplerExternalOES;
+uniform samplerExternalOES mediumExt;
+uniform highp samplerExternalOES highExt;
+
+void foo246()
+{
+    texture2D(mediumExt, vec2(0.2));
+    texture2DProj(highExt, v[2]);
+    texture3D(sExt, vec3(f13));   // ERROR
+    texture2DProjLod(sExt, vec3(f13), f13);  // ERROR
+    int a;
+    ~a;    // ERROR
+    a | a; // ERROR
+    a & a; // ERROR
+}
+
+#extension GL_OES_EGL_image_external : disable
+uniform sampler2D s2Dg;
+
+int foo203940(int a, float b, float a)  // ERROR, a redefined
+{
+    texture2DProjGradEXT(s2Dg, vec3(f13), uv2, uv2);  // ERROR, extension not enabled
+    return a;
+}
+
+float f123 = 4.0f;   // ERROR
+float f124 = 5e10F;  // ERROR
+
+#extension GL_EXT_shader_texture_lod : enable
+
+uniform samplerCube sCube;
+
+void foo323433()
+{
+    texture2DLodEXT(s2Dg, uv2, f13);
+    texture2DProjGradEXT(s2Dg, vec3(f13), uv2, uv2);
+    texture2DGradEXT(s2Dg, uv2, uv2, uv2);
+    textureCubeGradEXT(sCube, vec3(f13), vec3(f13), vec3(f13));
+}
+
+int fgfg(float f, mediump int i);
+int fgfg(float f, highp int i) { return 2; }   // ERROR, precision qualifier difference
+
+int fffg(float f);
+int fffg(float f);  // ERROR, can't have multiple prototypes 
+
+int gggf(float f);
+int gggf(float f) { return 2; }
+
+int agggf(float f) { return 2; }
+int agggf(float f);
+int agggf(float f);  // ERROR, second prototype
+
+varying struct SSS { float f; } s; // ERROR
+
+int vf(void);
+int vf2();
+int vf3(void v);      // ERROR
+int vf4(int, void);   // ERROR
+int vf5(int, void v); // ERROR
+
+void badswizzle()
+{
+    vec3 a[5];
+    a.y;        // ERROR, no array swizzle
+    a.zy;       // ERROR, no array swizzle
+    a.nothing;  // ERROR
+    a.length(); // ERROR, not this version
+    a.method(); // ERROR
+}
+
+float fooinit();
+
+float fooinittest()
+{
+    return fooinit();
+}
+
+// Test extra-function initializers
+const float fi1 = 3.0;
+const float fi2 = 4.0;
+const float fi3 = 5.0;
+
+float fooinit()
+{
+    return fi1 + fi2 + fi3;  // should make a constant of 12.0
+}
+
+int init1 = gl_FrontFacing ? 1 : 2; // ERROR, non-const initializer
+
+#ifdef GL_EXT_shader_non_constant_global_initializers
+#extension GL_EXT_shader_non_constant_global_initializers : enable
+#endif
+
+int init2 = gl_FrontFacing ? 1 : 2;
+
+#pragma STDGL invariant(all)
+
+#line 3000
+#error line of this error should be 3000
+
+uniform samplerExternalOES badExt;  // syntax ERROR

+ 76 - 0
3rdparty/glslang/Test/100Limits.vert

@@ -0,0 +1,76 @@
+#version 100
+
+int ga, gb;
+float f;
+
+uniform sampler2D fsa[3];
+uniform float fua[10];
+attribute mat3 am3;
+attribute vec2 av2;
+varying vec4 va[4];
+
+const mat2 m2 = mat2(1.0);
+const vec3 v3 = vec3(2.0);
+
+void foo(inout float a) {}
+
+int bar()
+{
+    return 1;
+}
+
+void main()
+{
+    while (ga < gb) { }
+
+    do { } while (false);
+
+    for (           ;              ;         );           // ERROR
+    for (           ;        ga==gb;         );           // ERROR
+    for (           ;              ;      f++);           // ERROR
+    for (     ga = 0;              ;         );           // ERROR
+    for ( bool a = false;          ;         );           // ERROR
+    for (float a = 0.0; a == sin(f);         );           // ERROR
+    for (  int a = 0;       a  < 10;   a *= 2);           // ERROR
+    for (  int a = 0;       a <= 20;      a++)  --a;      // ERROR
+    for (  int a = 0;       a <= 20;      a++)  { if (ga==0) a = 4; } // ERROR
+    for (float a = 0.0;   a <= 20.0; a += 2.0);
+    for (float a = 0.0;   a != 20.0; a -= 2.0)  { if (ga==0) ga = 4; }
+    for (float a = 0.0;   a == 20.0;      a--) for (float a = 0.0;   a == 20.0;      a--);  // two different 'a's, everything okay
+    for (float a = 0.0;   a <= 20.0; a += 2.0);
+    for (float a = 0.0;   a <= 20.0; a += 2.0);
+    for (float a = 0.0;   a > 2.0 * 20.0; a += v3.y);
+    for (float a = 0.0;   a >= 20.0; a += 2.0) foo(a);    // ERROR
+
+    int ia[9];
+
+    fsa[ga];  // ERROR
+    fua[ga];
+    am3[ga];  // ERROR
+    av2[ga];  // ERROR
+    va[2+ga]; // ERROR
+    m2[ga];   // ERROR
+    v3[ga/2]; // ERROR
+    ia[ga];   // ERROR
+
+    for (int a = 3; a >= 0; a--) {
+        fsa[a];
+        fua[a+2];
+        am3[3*a];
+        av2[3*a];
+        va[a-1];
+        m2[a/2];
+        v3[a];
+        ia[a];
+        ia[bar()];  // ERROR
+    }
+
+    fsa[2];
+    fua[3];
+    am3[2];
+    av2[1];
+    va[1];
+    m2[1];
+    v3[1];
+    ia[3];
+}

+ 76 - 0
3rdparty/glslang/Test/100scope.vert

@@ -0,0 +1,76 @@
+#version 100
+
+int f(int a, int b, int c)
+{
+	int a = b;  // ERROR, redefinition
+
+    {
+		float a = float(a) + 1.0;
+    }
+
+	return a;
+}
+
+int f(int a, int b, int c);  // okay to redeclare
+
+bool b;
+float b(int a);      // ERROR: redefinition
+
+float c(int a);
+bool c;              // ERROR: redefinition
+
+float f;             // ERROR: redefinition
+float tan;           // okay, built-in is in an outer scope
+float sin(float x);  // ERROR: can't redefine built-in functions
+float cos(float x)   // ERROR: can't redefine built-in functions
+{
+	return 1.0;
+}
+bool radians(bool x) // okay, can overload built-in functions
+{
+    return true;
+}
+
+invariant gl_Position;
+
+void main()
+{
+    int g();    // ERROR: no local function declarations
+	g();
+
+    float sin;  // okay
+	sin;
+    sin(0.7);  // ERROR, use of hidden function
+    f(1,2,3);
+
+    float f;    // hides f()
+    f = 3.0;
+
+    gl_Position = vec4(f);
+
+    for (int f = 0; f < 10; ++f)
+        ++f;
+
+    int x = 1;
+    { 
+        float x = 2.0, /* 2nd x visible here */ y = x; // y is initialized to 2
+        int z = z; // ERROR: z not previously defined.
+    }
+    {
+        int x = x; // x is initialized to '1'
+    }
+
+    struct S 
+    { 
+        int x; 
+    };
+    {
+        S S = S(0); // 'S' is only visible as a struct and constructor 
+        S.x;        // 'S' is now visible as a variable
+    }
+
+    int degrees;
+    degrees(3.2);  // ERROR, use of hidden built-in function
+}
+
+varying struct SSS { float f; } s; // ERROR

+ 74 - 0
3rdparty/glslang/Test/110scope.vert

@@ -0,0 +1,74 @@
+#version 110
+
+int f(int a, int b, int c)
+{
+	int a = b;  // ERROR, redefinition
+
+    {
+		float a = float(a) + 1.0; // okay
+    }
+
+	return a;
+}
+
+int f(int a, int b, int c);  // okay to redeclare
+
+bool b;
+float b(int a);      // okay, b and b() are different
+
+float c(int a);
+bool c;              // okay, and c() are different
+
+float f;             // okay f and f() are different
+float tan;           // okay, hides built-in function
+float sin(float x);  // okay, can redefine built-in functions
+float cos(float x)   // okay, can redefine built-in functions
+{
+	return 1.0;
+}
+bool radians(bool x) // okay, can overload built-in functions
+{
+    return true;
+}
+
+int gi = f(1,2,3);  // ERROR, can't call user-defined function from global scope
+
+void main()
+{
+    int g();    // okay
+    g();
+
+    float sin; // okay
+    sin;
+    sin(0.7);  // okay
+    f(1,2,3);
+
+    float f;
+    f = 3.0;
+
+    gl_Position = vec4(f);
+
+    for (int f = 0; f < 10; ++f)
+        ++f;
+
+    int x = 1;
+    { 
+        float x = 2.0, /* 2nd x visible here */ y = x; // y is initialized to 2
+        int z = z; // ERROR: z not previously defined.
+    }
+    {
+        int x = x; // x is initialized to '1'
+    }
+
+    struct S 
+    { 
+        int x; 
+    };
+    {
+        S S = S(0); // 'S' is only visible as a struct and constructor 
+        S.x;        // 'S' is now visible as a variable
+    }
+
+    int degrees;
+    degrees(3.2);
+}

+ 238 - 0
3rdparty/glslang/Test/120.frag

@@ -0,0 +1,238 @@
+#version 120
+
+float lowp;
+float mediump;
+float highp;
+
+float precision;
+
+in vec4 i;
+out vec4 o;
+
+uniform sampler2D s2D;
+centroid varying vec2 centTexCoord;
+
+uniform mat4x2 m;
+
+struct s {
+    float f;
+};
+
+void main()
+{
+    mat2x3 m23 = mat2x3(m);
+
+    int a;
+    bool b;
+    s sv = s(a);
+    float[2] ia = float[2](3, i.y);
+    float f1 = 1;
+    float f = a;
+    f = a;
+    ivec3 iv3;
+    vec3 v3 = iv3;
+    f = f + a;
+    f = a - f;
+    f += a;
+    f = a - f;
+    v3 *= iv3;
+    v3 = iv3 / 2.0f;
+    v3 = 3.0 * iv3;
+    v3 = 2 * v3;
+    v3 = v3 - 2;
+    if (f <  a ||
+        a <= f ||
+        f >  a ||
+        f >= a ||
+        a == f ||
+        f != a);
+    f = b ? a : f;
+    f = b ? f : a;
+    f = b ? a : a;
+    s news = sv;
+    
+    i.xy + i.xyz;      // ERROR
+    m * i.xyz;         // ERROR
+    m + i;             // ERROR
+    int aoeu = 1.0;    // ERROR
+    f = b;             // ERROR
+    f = a + b;         // ERROR
+    f = b * a;         // ERROR
+    b = a;             // ERROR
+    b = b + f;         // ERROR
+    f |= b;            // ERROR
+
+    gl_FragColor = texture2D(s2D, centTexCoord);
+
+    float flat;
+    float smooth;
+    float noperspective;
+    float uvec2;
+    float uvec3;
+    float uvec4;
+    //packed;     // ERROR, reserved word
+
+    {
+        mat4 m;
+        vec4 v;
+        bool b;
+        gl_FragColor += b ? v : m;  // ERROR, types don't match around ":"
+    }
+
+    gl_FragColor.xr;    // ERROR, swizzlers not from same field space
+    gl_FragColor.xyxyx.xy; // ERROR, cannot make a vec5, even temporarily
+    centTexCoord.z;     // ERROR, swizzler out of range
+    (a,b) = true;       // ERROR, not an l-value
+}
+
+float imageBuffer;
+float uimage2DRect;
+
+int main() {}           // ERROR
+void main(int a) {}     // ERROR
+
+const int a;            // ERROR
+
+int foo(in float a);
+int foo(out float a)    // ERROR
+{
+    return 3.2;         // ERROR
+    foo(a);             // ERROR
+}
+
+bool gen(vec3 v)
+{
+    if (abs(v[0]) < 1e-4F && abs(v[1]) < 1e-4)
+        return true;
+}
+
+void v1()
+{
+}
+
+void v2()
+{
+    return v1();  // ERROR, no expression allowed, even though void
+}
+
+void atest()
+{
+    vec4 v = gl_TexCoord[1];
+    v += gl_TexCoord[3];
+}
+
+varying vec4 gl_TexCoord[6];  // okay, assigning a size
+varying vec4 gl_TexCoord[5];  // ERROR, changing size
+
+mat2x2 m22;
+mat2x3 m23;
+mat2x4 m24;
+
+mat3x2 m32;
+mat3x3 m33;
+mat3x4 m34;
+
+mat4x2 m42;
+mat4x3 m43;
+mat4x4 m44;
+
+void foo123()
+{
+    mat2 r2 = matrixCompMult(m22, m22);
+    mat3 r3 = matrixCompMult(m33, m33);
+    mat4 r4 = matrixCompMult(m44, m44);
+
+    mat2x3 r23 = matrixCompMult(m23, m23);
+    mat2x4 r24 = matrixCompMult(m24, m24);
+    mat3x2 r32 = matrixCompMult(m32, m32);
+    mat3x4 r34 = matrixCompMult(m34, m34);
+    mat4x2 r42 = matrixCompMult(m42, m42);
+    mat4x3 r43 = matrixCompMult(m43, m43);
+
+    mat3x2 rfoo1 = matrixCompMult(m23, m32);  // ERROR
+    mat3x4 rfoo2 = matrixCompMult(m34, m44);  // ERROR    
+}
+
+void matConst()
+{
+    vec2 v2;
+    vec3 v3;
+    mat4 m4b1 = mat4(v2, v3);                      // ERROR, not enough
+    mat4 m4b2 = mat4(v2, v3, v3, v3, v3, v2, v2);  // ERROR, too much
+    mat4 m4g = mat4(v2, v3, v3, v3, v3, v3);
+    mat4 m4 = mat4(v2, v3, v3, v3, v3, v2);
+    mat3 m3 = mat3(m4);
+    mat3 m3b1 = mat3(m4, v2);                      // ERROR, extra arg
+    mat3 m3b2 = mat3(m4, m4);                      // ERROR, extra arg
+    mat3x2 m32 = mat3x2(m4);
+    mat4 m4c = mat4(m32);
+    mat3 m3s = mat3(v2.x);
+
+    mat3 m3a1[2] = mat3[2](m3s, m3s);
+    mat3 m3a2[2] = mat3[2](m3s, m3s, m3s);         // ERROR, too many args
+}
+
+uniform sampler3D s3D;
+uniform sampler1D s1D;
+uniform sampler2DShadow s2DS;
+
+void foo2323()
+{
+    vec4 v;
+    vec2 v2;
+    float f;
+    v = texture2DLod(s2D, v2, f);    // ERROR
+    v = texture3DProjLod(s3D, v, f); // ERROR
+    v = texture1DProjLod(s1D, v, f); // ERROR
+    v = shadow2DProjLod(s2DS, v, f); // ERROR
+
+    v = texture1DGradARB(s1D, f, f, f);         // ERROR
+    v = texture2DProjGradARB(s2D, v, v2, v2);   // ERROR
+    v = shadow2DProjGradARB(s2DS, v, v2, v2);   // ERROR
+}
+
+#extension GL_ARB_shader_texture_lod : require
+
+void foo2324()
+{
+    vec4 v;
+    vec2 v2;
+    float f;
+    v = texture2DLod(s2D, v2, f);
+    v = texture3DProjLod(s3D, v, f);
+    v = texture1DProjLod(s1D, v, f);
+    v = shadow2DProjLod(s2DS, v, f);
+
+    v = texture1DGradARB(s1D, f, f, f);
+    v = texture2DProjGradARB(s2D, v, v2, v2);
+    v = shadow2DProjGradARB(s2DS, v, v2, v2);
+    v = shadow2DRectProjGradARB(s2DS, v, v2, v2);  // ERROR
+}
+
+uniform sampler2DRect s2DRbad;  // ERROR
+
+void foo121111()
+{
+    vec2 v2;
+    vec4 v = texture2DRect(s2DRbad, v2);
+}
+
+#extension GL_ARB_texture_rectangle : enable
+
+uniform sampler2DRect s2DR;
+uniform sampler2DRectShadow s2DRS;
+
+void foo12111()
+{
+    vec2 v2;
+    vec3 v3;
+    vec4 v4;
+    vec4 v;
+    v = texture2DRect(s2DR, v2);
+    v = texture2DRectProj(s2DR, v3);
+    v = texture2DRectProj(s2DR, v4);
+    v = shadow2DRect(s2DRS, v3);
+    v = shadow2DRectProj(s2DRS, v4);
+
+    v = shadow2DRectProjGradARB(s2DRS, v, v2, v2);
+}

+ 203 - 0
3rdparty/glslang/Test/120.vert

@@ -0,0 +1,203 @@
+#version 120
+
+in vec4 i;                // ERROR
+out vec4 o;               // ERROR
+
+attribute vec2 attv2;
+attribute vec4 attv4;
+uniform sampler2D s2D;
+invariant varying vec2 centTexCoord;
+invariant gl_Position;
+centroid gl_Position;     // ERROR
+centroid centroid foo;    // ERROR
+invariant gl_Position, gl_PointSize;
+
+void main()
+{
+    centTexCoord = attv2; 
+    gl_Position = attv4;
+
+    gl_ClipVertex = attv4;
+    gl_ClipDistance[1] = 0.2;  // ERROR
+
+    vec3[12] a;
+    vec4[a.length()] b;
+    gl_Position = b[b.length()-1];
+
+    float f[];
+    int a1 = f.length();  // ERROR
+    float f[7];
+    int aa = f.length();
+    int a2 = f.length;    // ERROR
+    int a3 = f.length(a); // ERROR
+    int a4 = f.flizbit;   // ERROR
+    int a4 = f.flizbit(); // ERROR
+    float md[2][4];       // ERROR
+    float[2] md2[4];      // ERROR
+    float[2][4] md3;      // ERROR
+    float md5, md6[2][3]; // ERROR
+    float[2] md4, md7[4]; // ERROR
+    float md9[2][3] = float[2][3](1, 2, 3, 4, 5, 6);  // ERROR
+    float md10, md11[2][3] = float[2][3](1, 2, 3, 4, 5, 6);  // ERROR
+
+    gl_PointSize = 3.8;
+}
+
+uniform float initted = 3.4;   // okay
+
+const float concall = sin(0.3);
+
+int[2][3] foo(                 // ERROR
+              float[2][3] a,   // ERROR
+              float[2] b[3],   // ERROR
+              float c[2][3]);  // ERROR
+
+int overloadA(in float f);
+int overloadA(out float f);        // ERROR, different qualifiers
+float overloadA(float);            // ERROR, different return value for same signature
+float overloadA(out float f, int);
+float overloadA(int i);
+
+void overloadB(float, const in float) { }
+
+vec2 overloadC(int, int);
+vec2 overloadC(const in int, float);
+vec2 overloadC(float, int);
+vec2 overloadC(vec2, vec2);
+
+vec3 overloadD(int, float);
+vec3 overloadD(float, in int);
+
+vec3 overloadE(float[2]);
+vec3 overloadE(mat2 m);
+vec3 overloadE(vec2 v);
+
+vec3 overloadF(int);
+vec3 overloadF(float);
+
+void foo()
+{
+    float f;
+    int i;
+
+    overloadB(f, f);
+    overloadB(f, 2);
+    overloadB(1, i);
+
+    overloadC(1);    // ERROR
+    overloadC(1, i);
+    overloadC(vec2(1), vec2(2));
+    overloadC(f, 3.0);           // ERROR, no way
+    overloadC(ivec2(1), vec2(2));
+
+    overloadD(i, f);
+    overloadD(f, i);
+    overloadD(i, i);   // ERROR, ambiguous
+
+    int overloadB;     // hiding
+    overloadB(1, i);   // ERROR
+
+    sin(1);
+    texture2D(s2D, ivec2(0));
+    clamp(attv4, 0, 1);
+    clamp(ivec4(attv4), 0, 1);
+
+    int a[2];
+    overloadC(a, 3); // ERROR
+    overloadE(a);    // ERROR
+    overloadE(3.3);  // ERROR
+    overloadE(vec2(3.3));
+    overloadE(mat2(0.5));
+    overloadE(ivec4(1)); // ERROR
+    overloadE(ivec2(1));
+
+    float b[2];
+    overloadE(b);
+    
+    overloadF(1, 1); // ERROR
+    overloadF(1);
+}
+
+varying vec4 gl_TexCoord[35]; // ERROR, size too big
+
+// tests for output conversions
+void outFun(in float, out ivec2, in int, out float);
+int outFunRet(in float, out int, const in int, out ivec4);
+ivec2 outFunRet(in float, out ivec4, in int, out ivec4);
+
+void foo2()
+{
+    vec2 v2;
+    vec4 v4;
+    float f;
+    int i;
+
+    outFun(i, v2, i, f);
+    outFunRet(i, f, i, v4);
+    float ret = outFunRet(i, f, i, v4);
+    vec2 ret2 = outFunRet(i, v4, i, v4);
+    bool b = any(lessThan(v4, attv4));  // tests aggregate arg to unary built-in 
+}
+
+void noise()
+{
+    float f1 = noise1(1.0);
+    vec2 f2 = noise2(vec2(1.0));
+    vec3 f3 = noise3(vec3(1.0));
+    vec4 f4 = noise4(vec4(1.0));
+}
+
+// version 130 features
+
+uniform int c;
+
+attribute ivec2 x;
+attribute vec2 v2a;
+attribute float c1D;
+attribute vec2  c2D;
+attribute vec3  c3D;
+
+uniform vec4 v4;
+
+void foo213()
+{
+    float f = 3;
+    switch (c) {         // ERRORs...
+    case 1:              
+        f = sin(f);
+        break;
+    case 2:
+        f = f * f;
+    default:
+        f = 3.0;
+    }
+
+    int i;          
+    i << 3 | 0x8A >> 1 & 0xFF;      // ERRORs...
+
+    vec3 modfOut, modfIn;
+    vec3 v11 = modf(modfIn, modfOut); // ERRORS...
+    float t = trunc(f);
+    vec2 v12 = round(v2a);
+    vec2 v13 = roundEven(v2a);
+    bvec2 b10 = isnan(v2a);
+    bvec4 b11 = isinf(v4);
+
+    sinh(c1D) +                      // ERRORS...
+    cosh(c1D) * tanh(c2D);
+    asinh(c4D) + acosh(c4D);
+    atanh(c3D);
+
+    int id = gl_VertexID;            // ERROR
+    gl_ClipDistance[1] = 0.3;        // ERROR
+}
+
+int gl_ModelViewMatrix[] = 0;
+
+// token pasting (ERRORS...)
+
+#define mac abc##def
+int mac;
+
+#define macr(A,B) A ## B
+int macr(qrs,tuv);

+ 169 - 0
3rdparty/glslang/Test/130.frag

@@ -0,0 +1,169 @@
+#version 130
+
+lowp vec3 a;
+mediump float b;
+highp int c;
+
+precision highp float;
+
+in vec4 i;
+out vec4 o;
+
+flat in float fflat;
+smooth in float fsmooth;
+noperspective in float fnop;
+
+void main()
+{
+    float clip = gl_ClipDistance[3];
+}
+
+uniform samplerCube sampC;
+
+void foo()
+{
+    vec4 s = textureGather(sampC, vec3(0.2));
+}
+
+#extension GL_ARB_texture_gather : enable
+
+void bar()
+{
+    vec4 s = textureGather(sampC, vec3(0.2));
+}
+
+flat in vec3 gl_Color;     // ERROR, type
+in vec4 gl_Color;
+flat in vec4 gl_Color;
+flat in vec4 gl_Color[2];  // ERROR, array 
+vec4 gl_Color;             // ERROR, storage
+
+#extension GL_ARB_texture_gather : warn
+
+void bar2()
+{
+    vec4 s = textureGather(sampC, vec3(0.2));
+
+    uvec3 uv3;
+    bvec3 b3;
+    b3 = lessThan(uv3, uv3);
+    b3 = equal(uv3, uv3);
+    const bvec2 bl1 = greaterThanEqual(uvec2(2, 3), uvec2(3,3));
+    const bvec2 bl2 = equal(uvec2(2, 3), uvec2(3,3));
+    const bvec2 bl3 = equal(bl1, bl2);  // yes, equal
+    int a1[int(bl3.x)];
+    int a2[int(bl3.y)];
+    a1[0];  // size 1
+    a2[0];  // size 1
+    const bvec4 bl4 = notEqual(greaterThan(uvec4(1,2,3,4), uvec4(0,2,0,6)), lessThanEqual(uvec4(7,8,9,10), uvec4(6, 8, 0, 11)));  // compare (t,f,t,f) with (f,t,f,t)
+    int a3[int(bl4.x)+int(bl4.y)+int(bl4.z)+int(bl4.w)];
+    a3[3];  // size 4
+    b3 != b3;
+    b3 < b3;                   // ERROR
+    uv3 > uv3;                 // ERROR
+    uvec2(2, 3) >= uvec2(3,3); // ERROR
+    int(bl4) <= int(bl4);      // true
+    int(bl4.x) > int(bl4.y);   // false
+}
+
+#extension GL_ARB_texture_gather : enable
+#extension GL_ARB_texture_rectangle : enable
+
+uniform sampler2D samp2D;
+uniform sampler2DShadow samp2DS;
+uniform sampler2DRect samp2DR;
+uniform sampler2DArray samp2DA;
+
+void bar23()
+{
+    vec4 s;
+    s = textureGatherOffset(sampC, vec3(0.3), ivec2(1));        // ERROR
+    s = textureGatherOffset(samp2DR, vec2(0.3), ivec2(1));      // ERROR
+    s = textureGatherOffset(samp2D, vec2(0.3), ivec2(1));
+    s = textureGatherOffset(samp2DA, vec3(0.3), ivec2(1));
+    s = textureGatherOffset(samp2DS, vec2(0.3), 1.3, ivec2(1)); // ERROR
+    s = textureGatherOffset(samp2D, vec2(0.3), ivec2(1), 2);    // ERROR
+}
+
+#extension GL_ARB_gpu_shader5 : enable
+
+void bar234()
+{
+    vec4 s;
+    s = textureGatherOffset(samp2D, vec2(0.3), ivec2(1));
+    s = textureGatherOffset(samp2DA, vec3(0.3), ivec2(1));
+    s = textureGatherOffset(samp2DR, vec2(0.3), ivec2(1));
+    s = textureGatherOffset(samp2DS, vec2(0.3), 1.3, ivec2(1));
+    s = textureGatherOffset(samp2D, vec2(0.3), ivec2(1), 2);
+}
+
+#extension GL_ARB_texture_cube_map_array : enable
+
+uniform  samplerCubeArray Sca;
+uniform isamplerCubeArray Isca;
+uniform usamplerCubeArray Usca;
+uniform samplerCubeArrayShadow Scas;
+
+void bar235()
+{
+    ivec3 a = textureSize(Sca, 3);
+    vec4 b = texture(Sca, i);
+    ivec4 c = texture(Isca, i, 0.7);
+    uvec4 d = texture(Usca, i);
+    
+    b = textureLod(Sca, i, 1.7);
+    a = textureSize(Scas, a.x);
+    float f = texture(Scas, i, b.y);
+    c = textureGrad(Isca, i, vec3(0.1), vec3(0.2));
+}
+
+int \
+    x;  // ERROR until 420pack is turned on
+
+#extension GL_ARB_shading_language_420pack : enable
+
+const int ai[3] = { 10, 23, 32 };
+layout(binding=0) uniform blockname { int a; } instanceName;  // ERROR
+uniform layout(binding=0) sampler2D bounds;
+
+void bar23444()
+{
+    mat4x3 m43;  \
+    float a1 = m43[3].y;
+    vec3 v3;
+    int a2 = m43.length();
+    a2 += m43[1].length();
+    a2 += v3.length();
+    const float b = 2 * a1;
+    a.x = gl_MinProgramTexelOffset + gl_MaxProgramTexelOffset;
+    bool boolb;
+    boolb.length();     // ERROR
+    m43[3][1].length(); // ERROR
+    v3.length;          // ERROR
+    v3.length(b);       // ERROR
+}
+
+in float gl_FogFragCoord;
+
+#extension GL_ARB_separate_shader_objects : enable
+
+in float gl_FogFragCoord;
+in int gl_FogFragCoord;    // ERROR
+
+layout(early_fragment_tests) in;         // ERROR
+layout(r32i) uniform iimage2D iimg2Dbad; // ERROR
+
+#extension GL_ARB_shader_image_load_store : enable
+
+layout(early_fragment_tests) in;
+
+layout(r32i) uniform iimage2D iimg2D;
+
+void qux2()
+{
+    int i;
+    imageAtomicCompSwap(iimg2D, ivec2(i,i), i, i);
+    ivec4 pos = imageLoad(iimg2D, ivec2(i,i));
+}
+
+layout(early_fragment_tests) out;         // ERROR

+ 78 - 0
3rdparty/glslang/Test/130.vert

@@ -0,0 +1,78 @@
+#version 130
+
+uniform int c;
+uniform usampler2D us2D;
+
+in ivec2 x;
+in vec2 v2a;
+in float c1D;
+in vec2  c2D;
+in vec3  c3D;
+smooth vec4  c4D;  // ??
+
+uniform vec4 v4;
+
+void main()
+{
+    float f = 3;
+    switch (c) {     // full switch testing in switch.frag
+    case 1:
+        f = sin(f);
+        break;
+    case 2:
+        f = f * f;
+    default:
+        f = 3.0;
+    }
+
+    uint i;
+    i = texture(us2D, x).w;          // full uint testing in uint.frag
+    i << 3u | 0x8Au >> 1u & 0xFFu;
+
+    vec3 modfOut, modfIn;
+    vec3 v11 = modf(modfIn, modfOut);
+    float t = trunc(f);
+    vec2 v12 = round(v2a);
+    vec2 v13 = roundEven(v2a);
+    bvec2 b10 = isnan(v2a);
+    bvec4 b11 = isinf(v4);
+
+    sinh(c1D) +
+    cosh(c1D) * tanh(c2D);
+    asinh(c4D) + acosh(c4D);
+    atanh(c3D);
+
+    int id = gl_VertexID;
+    gl_ClipDistance[1] = 0.3;
+}
+
+// version 140 features
+
+//uniform isamplerBuffer sbuf;
+
+//layout(std140) uniform blockName {
+//    int anonMem;
+//};
+
+void foo88()
+{
+    int id = gl_InstanceID;    // ERROR
+    //id += anonMem;
+    id += texelFetch(id, 8);
+
+    gl_ClipVertex;         // these are all present...
+    gl_Color;
+    gl_LightSource[0];
+    gl_DepthRange.far;
+    gl_TexCoord;
+    gl_FogFragCoord;
+    gl_FrontColor;
+}
+
+// token pasting
+
+#define mac abc##def
+int mac;
+
+#define macr(A,B) A##B
+int macr(qrs,tuv);

+ 53 - 0
3rdparty/glslang/Test/140.frag

@@ -0,0 +1,53 @@
+#version 140
+
+varying vec4 v;
+
+in vec4 i;
+out vec4 o;
+
+in float gl_ClipDistance[5];
+
+void main()
+{
+    float clip = gl_ClipDistance[2];
+}
+#ifdef GL_ES
+#error GL_ES is set
+#else
+#error GL_ES is not set
+#endif
+
+in struct S { float f; } s; // ERROR
+
+float patch = 3.1;
+
+layout(location=3) in vec4 vl;  // ERROR
+
+layout(location = 3) out vec4 factorBad;  // ERROR
+
+#extension GL_ARB_explicit_attrib_location : enable
+
+layout(location = 5) out vec4 factor;
+
+#extension GL_ARB_separate_shader_objects : enable
+
+layout(location=4) in vec4 vl2;
+
+float fooi();
+
+void foo()
+{
+    vec2 r1 = modf(v.xy, v.zw);  // ERROR, v.zw not l-value
+    vec2 r2 = modf(o.xy, o.zw);
+    o.z = fooi();
+}
+
+// Test extra-function initializers
+
+float i1 = gl_FrontFacing ? -2.0 : 2.0;
+float i2 = 102;
+
+float fooi()
+{
+    return i1 + i2;
+}

+ 59 - 0
3rdparty/glslang/Test/140.vert

@@ -0,0 +1,59 @@
+#version 140
+
+uniform isamplerBuffer sbuf;
+
+layout(std140) uniform blockName {
+    int anonMem;
+};
+
+void main()
+{
+    int id = gl_InstanceID;
+    id += anonMem;
+    id += texelFetch(sbuf, 8).w;
+    gl_ClipVertex;      // could be ERROR, but compiling under compatibility profile
+    gl_Color;           // could be ERROR, but compiling under compatibility profile
+    gl_LightSource[0];  // could be ERROR, but compiling under compatibility profile
+    gl_DepthRange.far;
+    gl_TexCoord;        // could be ERROR, but compiling under compatibility profile
+    gl_FogFragCoord;    // could be ERROR, but compiling under compatibility profile
+    gl_FrontColor;      // could be ERROR, but compiling under compatibility profile
+}
+
+out vec4 gl_Position;  // ERROR
+
+layout(location = 9) in vec4 locBad;  // ERROR
+
+#extension GL_ARB_explicit_attrib_location : enable
+
+layout(location = 9) in vec4 loc;
+
+#extension GL_ARB_separate_shader_objects : enable
+
+out vec4 gl_Position;
+in vec4 gl_Position;   // ERROR
+out vec3 gl_Position;  // ERROR
+
+out float gl_PointSize;
+out vec4 gl_ClipVertex;
+out float gl_FogFragCoord;
+
+uniform sampler2DRect s2dr;
+uniform sampler2DRectShadow s2drs;
+in ivec2 itloc2;
+in vec2 tloc2;
+in vec3 tloc3;
+in vec4 tloc4;
+
+void foo()
+{
+    vec4 v = texelFetch(s2dr, itloc2);
+    v += texelFetch(s2dr, itloc2, 0.2);     // ERROR, no lod
+    v += texture(s2dr, tloc2);
+    v += texture(s2dr, tloc2, 0.3);         // ERROR, no bias
+    v += texture(s2drs, tloc3);
+    v += textureProj(s2dr, tloc3);
+    v += textureProj(s2dr, tloc4);
+    v += textureProjGradOffset(s2dr, tloc4, ivec2(0.0), ivec2(0.0), ivec2(1,2));
+    v += textureProjGradOffset(s2drs, tloc4, ivec2(0.0), ivec2(0.0), ivec2(1,2));
+}

+ 50 - 0
3rdparty/glslang/Test/150.frag

@@ -0,0 +1,50 @@
+#version 150 core
+
+in vec4 gl_FragCoord;
+layout(origin_upper_left, pixel_center_integer) in vec4 gl_FragCoord;  // ERROR
+layout(pixel_center_integer) in vec4 gl_FragCoord;  // ERROR
+layout(origin_upper_left) in vec4 foo;  // ERROR
+layout(origin_upper_left, pixel_center_integer) in vec4 gl_FragCoord;
+
+void main()
+{
+    vec4 c = gl_FragCoord;
+}
+
+layout(origin_upper_left, pixel_center_integer) in vec4 gl_FragCoord;  // ERROR, declared after use
+
+in struct S { float f; } s;
+
+float patch = 3.1;
+
+uniform sampler2DMS sms;
+uniform isampler2DMS isms;
+uniform usampler2DMS usms;
+uniform sampler2DMSArray smsa;
+uniform isampler2DMSArray ismsa;
+uniform usampler2DMSArray usmsa;
+
+flat in ivec2 p2;
+flat in ivec3 p3;
+flat in int samp;
+
+void barWxyz()
+{
+    ivec2 t11 = textureSize( sms);
+    ivec2 t12 = textureSize(isms);
+    ivec2 t13 = textureSize(usms);
+    ivec3 t21 = textureSize( smsa);
+    ivec3 t22 = textureSize(ismsa);
+    ivec3 t23 = textureSize(usmsa);
+     vec4 t31 = texelFetch( sms, p2, samp);
+    ivec4 t32 = texelFetch(isms, p2, samp);
+    uvec4 t33 = texelFetch(usms, p2, 3);
+     vec4 t41 = texelFetch( smsa, p3, samp);
+    ivec4 t42 = texelFetch(ismsa, ivec3(2), samp);
+    uvec4 t43 = texelFetch(usmsa, p3, samp);
+}
+
+int primitiveID()
+{
+   return gl_PrimitiveID;
+}

+ 139 - 0
3rdparty/glslang/Test/150.geom

@@ -0,0 +1,139 @@
+#version 150 core
+
+in fromVertex {
+    in vec3 color;
+} fromV[];
+
+out toFragment {
+    out vec3 color;
+} toF;
+
+out fromVertex {  // okay to reuse a block name for another block name
+    vec3 color;
+};
+
+out fooB {
+    vec2 color;
+} fromVertex;     // ERROR, cannot reuse block name as block instance
+
+int fromVertex;   // ERROR, cannot reuse a block name for something else
+
+out fooC {
+    vec2 color;
+} fooC;           // ERROR, cannot have same name for block and instance name
+
+void main()
+{
+    EmitVertex();
+    EndPrimitive();
+    EmitStreamVertex(1);    // ERROR
+    EndStreamPrimitive(0);  // ERROR
+
+    color = fromV[0].color;
+    gl_ClipDistance[3] = gl_in[1].gl_ClipDistance[2];
+    gl_Position = gl_in[0].gl_Position;
+    gl_PointSize = gl_in[3].gl_PointSize;
+    gl_PrimitiveID = gl_PrimitiveIDIn;
+    gl_Layer = 2;
+}
+
+out vec4 ov0;  // stream should be 0
+layout(stream = 4) out vec4 ov4;
+out vec4 o1v0;  // stream should be 0
+
+layout(stream = 3) uniform;        // ERROR
+layout(stream = 3) in;             // ERROR
+layout(stream = 3) uniform int ua; // ERROR
+layout(stream = 3) uniform ubb { int ua; } ibb; // ERROR
+
+layout(line_strip, points, triangle_strip, stream = 3, points, triangle_strip) out;  // just means "stream = 3, triangle_strip"
+layout(stream = 3, triangle_strip) out;
+out vec4 ov3;  // stream should be 3
+
+layout(stream = 6) out ooutb { vec4 a; } ouuaa6;
+
+layout(stream = 6) out ooutb2 {
+    layout(stream = 6) vec4 a;
+} ouua6;
+
+layout(stream = 7) out ooutb3 {
+    layout(stream = 6) vec4 a;  // ERROR
+} ouua7;
+
+out vec4 ov2s3;  // stream should be 3
+
+layout(max_vertices = 200) out;
+layout(max_vertices = 300) out;   // ERROR, too big
+void foo(layout(max_vertices = 4) int a)  // ERROR
+{
+    ouuaa6.a = vec4(1.0);
+}
+
+layout(line_strip, points, triangle_strip, stream = 3, points) out;  // ERROR, changing output primitive
+layout(line_strip, points, stream = 3) out; // ERROR, changing output primitive
+layout(triangle_strip) in; // ERROR, not an input primitive
+layout(triangle_strip) uniform; // ERROR
+layout(triangle_strip) out vec4 badv4;  // ERROR, not on a variable
+layout(triangle_strip) in vec4 bad2v4[];  // ERROR, not on a variable or input
+layout(invocations = 3) out outbn { int a; }; // 2 ERROR, not on a block, not until 4.0
+out outbn2 {
+    layout(invocations = 3)  int a; // 2 ERRORs, not on a block member, not until 4.0
+    layout(max_vertices = 3) int b; // ERROR, not on a block member
+    layout(triangle_strip)   int c; // ERROR, not on a block member
+} outbi;
+
+layout(lines) out;  // ERROR, not on output
+layout(lines_adjacency) in;
+layout(triangles) in;             // ERROR, can't change it
+layout(triangles_adjacency) in;   // ERROR, can't change it
+layout(invocations = 4) in;       // ERROR, not until 4.0
+
+in inbn {
+    layout(stream = 2) int a;     // ERROR, stream on input
+} inbi[];
+
+in sameName {
+    int a15;
+} insn[];
+
+out sameName {
+    float f15;
+};
+
+uniform sameName {
+    bool b15;
+};
+
+float summ = gl_MaxVertexAttribs +
+             gl_MaxVertexUniformComponents +
+             gl_MaxVaryingFloats +
+             gl_MaxVaryingComponents +
+             gl_MaxVertexOutputComponents  +
+             gl_MaxGeometryInputComponents  +
+             gl_MaxGeometryOutputComponents  +
+             gl_MaxFragmentInputComponents  +
+             gl_MaxVertexTextureImageUnits +
+             gl_MaxCombinedTextureImageUnits +
+             gl_MaxTextureImageUnits +
+             gl_MaxFragmentUniformComponents +
+             gl_MaxDrawBuffers +
+             gl_MaxClipDistances  +
+             gl_MaxGeometryTextureImageUnits +
+             gl_MaxGeometryOutputVertices +
+             gl_MaxGeometryTotalOutputComponents  +
+             gl_MaxGeometryUniformComponents  +
+             gl_MaxGeometryVaryingComponents;
+
+void fooe1()
+{
+    gl_ViewportIndex = gl_MaxViewports - 1;
+}
+
+#extension GL_ARB_viewport_array : enable
+
+void fooe2()
+{
+    gl_ViewportIndex = gl_MaxViewports - 1;
+}
+
+out int gl_ViewportIndex;

+ 34 - 0
3rdparty/glslang/Test/150.tesc

@@ -0,0 +1,34 @@
+#version 150
+
+#extension GL_ARB_tessellation_shader : enable
+
+layout(vertices = 4) out;
+int outa[gl_out.length()];
+
+patch out vec4 patchOut;
+
+void main()
+{
+    barrier();
+
+    int a = gl_MaxTessControlInputComponents +
+            gl_MaxTessControlOutputComponents +
+            gl_MaxTessControlTextureImageUnits +
+            gl_MaxTessControlUniformComponents +
+            gl_MaxTessControlTotalOutputComponents;
+
+    vec4 p = gl_in[1].gl_Position;
+    float ps = gl_in[1].gl_PointSize;
+    float cd = gl_in[1].gl_ClipDistance[2];
+
+    int pvi = gl_PatchVerticesIn;
+    int pid = gl_PrimitiveID;
+    int iid = gl_InvocationID;
+
+    gl_out[gl_InvocationID].gl_Position = p;
+    gl_out[gl_InvocationID].gl_PointSize = ps;
+    gl_out[gl_InvocationID].gl_ClipDistance[1] = cd;
+
+    gl_TessLevelOuter[3] = 3.2;
+    gl_TessLevelInner[1] = 1.3;
+}

+ 35 - 0
3rdparty/glslang/Test/150.tese

@@ -0,0 +1,35 @@
+#version 150
+
+#extension GL_ARB_tessellation_shader : enable
+
+layout(quads, cw) in;
+layout(fractional_odd_spacing) in;    
+layout(point_mode) in;
+patch in vec4 patchIn;
+
+void main()
+{
+    barrier(); // ERROR
+
+    int a = gl_MaxTessEvaluationInputComponents +
+            gl_MaxTessEvaluationOutputComponents +
+            gl_MaxTessEvaluationTextureImageUnits +
+            gl_MaxTessEvaluationUniformComponents +
+            gl_MaxTessPatchComponents +
+            gl_MaxPatchVertices +
+            gl_MaxTessGenLevel;
+
+    vec4 p = gl_in[1].gl_Position;
+    float ps = gl_in[1].gl_PointSize;
+    float cd = gl_in[1].gl_ClipDistance[2];
+
+    int pvi = gl_PatchVerticesIn;
+    int pid = gl_PrimitiveID;
+    vec3 tc = gl_TessCoord;
+    float tlo = gl_TessLevelOuter[3];
+    float tli = gl_TessLevelInner[1];
+
+    gl_Position = p;
+    gl_PointSize = ps;
+    gl_ClipDistance[2] = cd;
+}

+ 25 - 0
3rdparty/glslang/Test/150.vert

@@ -0,0 +1,25 @@
+#version 150 core
+
+in vec4 iv4;
+
+uniform float ps;
+
+invariant gl_Position;
+
+void main()
+{
+    gl_Position = iv4;
+    gl_PointSize = ps;
+    gl_ClipDistance[2] = iv4.x;
+    gl_ClipVertex = iv4;
+}
+
+out float gl_ClipDistance[4];
+
+uniform foob {
+    int a[];
+};
+int a[5]; // ERROR, resizing user-block member
+
+#line 3000
+#error line of this error should be 3001

+ 161 - 0
3rdparty/glslang/Test/300.frag

@@ -0,0 +1,161 @@
+#version 300 es
+void nodef1(float f); // ERROR, no default precision
+uniform sampler2D s2D;
+uniform lowp sampler3D s3D;
+uniform samplerCube sCube;
+uniform lowp samplerCubeShadow sCubeShadow;
+uniform lowp sampler2DShadow s2DShadow;
+uniform lowp sampler2DArray s2DArray;
+uniform lowp sampler2DArrayShadow s2DArrayShadow;
+
+uniform lowp isampler2D is2D;
+uniform lowp isampler3D is3D;
+uniform lowp isamplerCube isCube;
+uniform lowp isampler2DArray is2DArray;
+
+uniform lowp usampler2D us2D;
+uniform lowp usampler3D us3D;
+uniform lowp usamplerCube usCube;
+uniform lowp usampler2DArray us2DArray;
+precision lowp float;
+in float c1D;
+in vec2  c2D;
+in vec3  c3D;
+smooth vec4  c4D;
+
+flat in int   ic1D;
+flat in ivec2 ic2D;
+flat in ivec3 ic3D;
+flat in ivec4 ic4D;
+noperspective in vec4 badv; // ERROR
+in sampler2D bads;          // ERROR
+precision lowp uint;        // ERROR
+
+struct s {
+    int i;
+    sampler2D s;
+};
+
+in s badout;               // ERROR, can't contain a sampler
+                           // ERROR, can't have int in struct without flat
+struct S2 {
+    vec3 c;
+    float f;
+};
+
+in S2 s2;
+
+out vec3 sc;
+out float sf;
+
+uniform sampler2D arrayedSampler[5];
+
+void main()
+{
+    float f;
+    vec4 v;
+    v = texture(s2D, c2D);
+    v = textureProj(s3D, c4D);
+    v = textureLod(s2DArray, c3D, 1.2);
+    f = textureOffset(s2DShadow, c3D, ic2D, c1D);  // ERROR, offset argument not constant
+    v = texelFetch(s3D, ic3D, ic1D);
+    v = texelFetchOffset(arrayedSampler[2], ic2D, 4, ic2D);   // ERROR, offset argument not constant
+    f = textureLodOffset(s2DShadow, c3D, c1D, ic2D);
+    v = textureProjLodOffset(s2D, c3D, c1D, ic2D);
+    v = textureGrad(sCube, c3D, c3D, c3D);
+    f = textureGradOffset(s2DArrayShadow, c4D, c2D, c2D, ic2D);
+    v = textureProjGrad(s3D, c4D, c3D, c3D);
+    v = textureProjGradOffset(s2D, c3D, c2D, c2D, ic2D);
+    v = texture(arrayedSampler[ic1D], c2D);                 // ERROR
+
+    ivec4 iv;
+    iv = texture(is2D, c2D);
+    iv = textureProjOffset(is2D, c4D, ic2D);
+    iv = textureProjLod(is2D, c3D, c1D);
+    iv = textureProjGrad(is2D, c3D, c2D, c2D);
+    iv = texture(is3D, c3D, 4.2);
+    iv = textureLod(isCube, c3D, c1D);
+    iv = texelFetch(is2DArray, ic3D, ic1D);
+
+    iv.xy = textureSize(sCubeShadow, 2);
+
+    float precise;
+    double boo;       // ERROR
+    dvec2 boo2;       // ERROR
+    dvec3 boo3;       // ERROR
+    dvec4 boo4;       // ERROR
+
+    f += gl_FragCoord.y;
+    gl_FragDepth = f;
+
+    sc = s2.c;
+    sf = s2.f;
+
+    sinh(c1D) +
+    cosh(c1D) * tanh(c2D);
+    asinh(c4D) + acosh(c4D);
+    atanh(c3D);
+}
+
+uniform multi {
+    int[2] a[3];      // ERROR
+    int[2][3] b;      // ERROR
+    int c[2][3];      // ERROR
+} multiInst[2][3];    // ERROR
+
+out vec4 colors[4];
+
+void foo()
+{
+    colors[2] = c4D;
+    colors[ic1D] = c4D;  // ERROR
+}
+
+uniform s st1;
+uniform s st2;
+
+void foo13(s inSt2)
+{
+    if (st1 == st2);  // ERROR
+    if (st1 != st2);  // ERROR
+    st1.s == st2.s;   // ERROR
+    inSt2 = st1;      // ERROR
+    inSt2 == st1;     // ERROR
+}
+
+void foo23()
+{
+    textureOffset(s2DShadow, c3D, ivec2(-8, 7), c1D);
+    textureOffset(s2DShadow, c3D, ivec2(-9, 8), c1D);
+}
+
+void foo324(void)
+{
+    float p = pow(3.2, 4.6);
+    p += sin(0.4);
+    p += distance(vec2(10.0, 11.0), vec2(13.0, 15.0)); // 5
+    p += dot(vec3(2,3,5), vec3(-2,-1,4));              // 13
+    vec3 c3 = cross(vec3(3,-3,1), vec3(4,9,2));        // (-15, -2, 39)
+    c3 += faceforward(vec3(1,2,3), vec3(2,3,5), vec3(-2,-1,4));     // (-1,-2,-3)
+    c3 += faceforward(vec3(1,2,3), vec3(-2,-3,-5), vec3(-2,-1,4));  // (1,2,3)
+    vec2 c2 = reflect(vec2(1,3), vec2(0,1));           // (1,-3)
+    c2 += refract(vec2(1,3), vec2(0,1), 1.0);          // (1,-3)
+    c2 += refract(vec2(1,3), vec2(0,1), 3.0);
+    c2 += refract(vec2(1,0.1), vec2(0,1), 5.0);        // (0,0)
+    mat3x2 m32 = outerProduct(vec2(2,3), vec3(5,7,11));// rows: (10, 14, 22), (15, 21, 33)
+}
+
+uniform mediump;       // ERROR
+
+layout(early_fragment_tests) in;  // ERROR
+
+#ifndef GL_FRAGMENT_PRECISION_HIGH
+#error missing GL_FRAGMENT_PRECISION_HIGH
+#endif
+
+invariant in;                // ERROR
+invariant in vec4;           // ERROR
+invariant in vec4 fooinv;    // ERROR
+
+float imageBuffer;    // ERROR, reserved
+float uimage2DRect;   // ERROR, reserved

+ 187 - 0
3rdparty/glslang/Test/300.vert

@@ -0,0 +1,187 @@
+#version 300 es
+
+uniform mat4x3 m43;
+uniform mat3x3 m33;
+uniform mat4x4 m44;
+
+in vec3 v3;
+varying vec2 v2;               // ERROR, varying reserved
+in vec4 bad[10];               // ERROR, no arrayed inputs
+highp in vec4 badorder;        // ERROR, incorrect qualifier order
+out invariant vec4 badorder2;  // ERROR, incorrect qualifier order
+in centroid vec4 badorder4;    // ERROR, incorrect qualifier order
+out flat vec4 badorder3;       // ERROR, incorrect qualifier order
+void bar(in const float a);    // ERROR, incorrect qualifier order
+void bar2(highp in float b);   // ERROR, incorrect qualifier order
+smooth flat out vec4 rep;      // ERROR, replicating interpolation qualification
+centroid sample out vec4 rep2; // ERROR, replicating auxiliary qualification
+in uniform vec4 rep3;          // ERROR, replicating storage qualification
+
+struct S {
+    vec3 c;
+    float f;
+};
+
+out S s;
+
+void main()
+{
+    int id = gl_VertexID + gl_InstanceID;
+
+    int c0 = gl_MaxVertexAttribs;
+    int c1 = gl_MaxVertexUniformVectors;
+    int c2 = gl_MaxVertexOutputVectors;
+    int c3 = gl_MaxFragmentInputVectors;
+    int c4 = gl_MaxVertexTextureImageUnits;
+    int c5 = gl_MaxCombinedTextureImageUnits;
+    int c6 = gl_MaxTextureImageUnits;
+    int c7 = gl_MaxFragmentUniformVectors;
+    int c8 = gl_MaxDrawBuffers;
+    int c9 = gl_MinProgramTexelOffset;
+    int c10 = gl_MaxProgramTexelOffset;
+
+    mat3x4 tm = transpose(m43);
+    highp float dm = determinant(m44);
+    mat3x3 im = inverse(m33);
+
+    mat3x2 op = outerProduct(v2, v3);
+
+    gl_Position = m44[2];
+    gl_PointSize = v2.y;
+
+     s.c = v3;
+     s.f = dm;
+
+#ifdef GL_ES
+#error GL_ES is set
+#else
+#error GL_ES is not set
+#endif
+}
+
+float badsize[];    // ERROR
+float[] badsize2;   // ERROR
+uniform ub {
+    int a[];        // ERROR
+} ubInst[];         // ERROR
+void foo(int a[]);  // ERROR
+float okayA[] = float[](3.0f, 4.0F);  // Okay
+
+out vec3 newV;
+void newVFun()
+{
+    newV = v3;
+}
+
+invariant newV;  // ERROR, variable already used
+in vec4 invIn;
+invariant invIn; // ERROR, in v300
+out S s2;
+invariant s2;
+invariant out S s3;
+flat out int;
+
+uniform ub2 {
+    float f;
+} a;
+
+uniform ub2 {  // ERROR redeclaration of block name (same instance name)
+    float g;
+} a;
+
+uniform ub2 {  // ERROR redeclaration of block name (different instance name)
+    float f;
+} c;
+
+uniform ub2 {  // ERROR redeclaration of block name (no instance name)
+    float f123;
+};
+
+uniform ub3 {
+    bool b23;
+};
+
+uniform ub3 {  // ERROR redeclaration of block name (no instance name in first or declared)
+    bool b234;
+};
+
+precision lowp sampler3D;
+precision lowp sampler2DShadow;
+precision lowp sampler2DArrayShadow;
+
+uniform sampler2D s2D;
+uniform sampler3D s3D;
+uniform sampler2DShadow s2DS;
+uniform sampler2DArrayShadow s2DAS;
+in vec2 c2D;
+
+void foo23()
+{
+    ivec2 x1 = textureSize(s2D, 2);
+    textureSize(s2D);        // ERROR, no lod
+    ivec3 x3 = textureSize(s2DAS, -1);
+    textureSize(s2DAS);      // ERROR, no lod
+    vec4 x4 = texture(s2D, c2D);
+    texture(s2D, c2D, 0.2);  // ERROR, bias
+    vec4 x5 = textureProjOffset(s3D, vec4(0.2), ivec3(1));
+    textureProjOffset(s3D, vec4(0.2), ivec3(1), .03);  // ERROR, bias
+    float x6 = textureProjGradOffset(s2DS, invIn, vec2(4.2), vec2(5.3), ivec2(1));
+}
+
+int fgfg(float f, mediump int i);
+int fgfg(float f, highp int i);   // ERROR, precision qualifier difference
+
+int fgfgh(float f, const in mediump int i);
+int fgfgh(float f, in mediump int i);   // ERROR, precision qualifier difference
+
+void foo2349()
+{
+    float[] x = float[] (1.0, 2.0, 3.0);
+	float[] y = x;
+    float[3] z = x;
+    float[3] w;
+    w = y;
+}
+
+int[] foo213234();        // ERROR
+int foo234234(float[]);   // ERROR
+int foo234235(vec2[] v);  // ERROR
+precision highp float[2]; // ERROR
+
+int fffg(float f);
+int fffg(float f);
+
+int gggf(float f);
+int gggf(float f) { return 2; }
+int gggf(float f);
+
+int agggf(float f) { return 2; }
+int agggf(float f);
+
+out struct Ssss { float f; } ssss;
+
+uniform Bblock {
+   int a;
+} Binst;
+int Bfoo;
+
+layout(std140) Binst;    // ERROR
+layout(std140) Bblock;   // ERROR
+layout(std140) Bfoo;     // ERROR
+
+layout(std430) uniform B430 { int a; } B430i;     // ERROR
+
+struct SNA {
+    int a[];             // ERROR
+};
+
+void fooDeeparray()
+{
+    float[] x = float[] (1.0, 2.0, 3.0),
+            y = float[] (1.0, 2.0, 3.0, 4.0);
+    float xp[3], yp[4];
+    xp = x;
+    yp = y;
+    xp = y; // ERROR, wrong size
+    yp = x; // ERROR, wrong size
+}

+ 76 - 0
3rdparty/glslang/Test/300BuiltIns.frag

@@ -0,0 +1,76 @@
+#version 300 es
+
+int imax, imin;
+uint umax, umin;
+
+vec3 x, y;    // ERROR, needs default precision
+bvec3 bv;
+
+uint uy;
+uvec2 uv2c;
+uvec2 uv2y;
+uvec2 uv2x;
+uvec4 uv4y;
+
+ivec3 iv3a;
+ivec3 iv3b;
+
+ivec4 iv4a;
+ivec4 iv4b;
+
+float f;
+
+vec2 v2a, v2b;
+vec4 v4;
+
+void main()
+{
+    // 1.3 int
+    vec3 v = mix(x, y, bv);
+    ivec4 iv10 = abs(iv4a);
+    ivec4 iv11 = sign(iv4a);
+    ivec4 iv12 = min(iv4a, iv4b);
+    ivec4 iv13 = min(iv4a, imin);
+    uvec2 u = min(uv2x, uv2y);
+    uvec4 uv = min(uv4y, uy);
+    ivec3 iv14 = max(iv3a, iv3b);
+    ivec4 iv15 = max(iv4a, imax);
+    uvec2 u10 = max(uv2x, uv2y);
+    uvec2 u11 = max(uv2x, uy);
+    ivec4 iv16 = clamp(iv4a, iv4a, iv4b);
+    ivec4 iv17 = clamp(iv4a, imin, imax);
+    uvec2 u12 = clamp(uv2x, uv2y, uv2c);
+    uvec4 uv10 = clamp(uv4y, umin, umax);
+
+    // 1.3 float
+    vec3 modfOut;
+    vec3 v11 = modf(x, modfOut);
+
+    float t = trunc(f);
+    vec2 v12 = round(v2a);
+    vec2 v13 = roundEven(v2a);
+    bvec2 b10 = isnan(v2a);
+    bvec4 b11 = isinf(v4);
+
+    // 3.3 float
+    int i = floatBitsToInt(f);
+    uvec4 uv11 = floatBitsToUint(v4);
+    vec4 v14 = intBitsToFloat(iv4a);
+    vec2 v15 = uintBitsToFloat(uv2c);
+
+    // 4.0  pack
+    uint u19 = packSnorm2x16(v2a);
+    vec2 v20 = unpackSnorm2x16(uy);
+    uint u15 = packUnorm2x16(v2a);
+    vec2 v16 = unpackUnorm2x16(uy);
+    uint u17 = packHalf2x16(v2b);
+    vec2 v18 = unpackHalf2x16(uy);
+
+    // not present
+    noise2(v18);  // ERROR, not present
+
+    float t__;  // ERROR, no __ until revision 310
+
+      // ERROR, no __ until revision 310
+    #define __D
+}

+ 58 - 0
3rdparty/glslang/Test/300block.frag

@@ -0,0 +1,58 @@
+#version 300 es
+
+precision mediump float;
+
+struct S {
+    vec4 u;
+    uvec4 v;
+    lowp isampler3D sampler;
+    vec3 w;
+    struct T1 {           // ERROR
+        int a;
+    } t;
+};
+
+uniform S s;
+
+uniform fooBlock {
+    uvec4 bv;
+    uniform mat2 bm2;
+    lowp isampler2D sampler;   // ERROR
+    struct T2 {                // ERROR
+        int a;
+    } t;
+    S fbs;                     // ERROR, contains a sampler
+};
+
+uniform barBlock {
+    uvec4 nbv;
+    int ni;
+} inst;
+
+uniform barBlockArray {
+    uvec4 nbv;
+    int ni;
+} insts[4];
+
+uniform unreferenced {
+    float f;
+    uint u;
+};
+
+void main()
+{
+    texture(s.sampler, vec3(inst.ni, bv.y, insts[2].nbv.z));
+    insts[s.v.x];         // ERROR
+    fooBlock;             // ERROR
+    mat4(s);              // ERROR
+    int insts;
+    float barBlock;
+    mat4(barBlock);
+    mat4(unreferenced);   // ERROR, bad type
+    ++s;                  // ERROR
+    inst - 1;             // ERROR
+    ++barBlock;
+    2 * barBlockArray;    // ERROR
+}
+
+int fooBlock;             // ERROR, redef.

+ 19 - 0
3rdparty/glslang/Test/300layout.frag

@@ -0,0 +1,19 @@
+#version 300 es
+precision mediump float;
+in vec4 pos;
+layout (location = 2) in vec4 color;  // ERROR
+
+layout(location = 1) out vec4 c;
+layout(location = 3) out vec4 p;
+layout(location = 4) out vec4 q[2];
+
+void main()
+{
+    c = color;
+    p = pos;
+    q[1] = pos;
+}
+
+layout(location = 40) out float ca[4];  // ERROR, overlap, ERROR too big
+layout(location = 41) out float cb[2];  // ERROR, overlap, ERROR too big
+layout(location = 39) out float cc[6];  // ERROR, overlap, ERROR too big

+ 57 - 0
3rdparty/glslang/Test/300layout.vert

@@ -0,0 +1,57 @@
+#version 300 es
+
+struct s { vec4 v; };
+
+layout(location = 7) in vec3 c;
+layout(LocatioN = 3) in vec4 p;
+layout(LocatioN = 9) in vec4 q[4]; // ERROR, no array
+layout(LocatioN = 10) in s r[4];   // ERROR, no struct, ERROR, location overlap
+out vec4 pos;
+out vec3 color;
+
+layout(shared, column_major) uniform mat4 badm4; // ERROR
+layout(shared, column_major, row_major) uniform; // default is now shared and row_major
+
+layout(std140) uniform Transform { // layout of this block is std140
+    mat4 M1; // row_major
+    layout(column_major) mat4 M2; // column major
+    mat3 N1; // row_major
+    centroid float badf;  // ERROR
+    in float badg;        // ERROR
+    layout(std140) float bad1;
+    layout(shared) float bad2;
+    layout(packed) float bad3;
+} tblock;
+
+uniform T2 { // layout of this block is shared
+    bool b;
+    mat4 t2m;
+};
+
+layout(column_major) uniform T3 { // shared and column_major
+    mat4 M3; // column_major
+    layout(row_major) mat4 M4; // row major
+    mat3 N2; // column_major
+    int b;  // ERROR, redefinition (needs to be last member of block for testing, following members are skipped)
+};
+
+out badout {  // ERROR
+    float f;
+};
+
+layout (location = 10) out vec4 badoutA;  // ERROR
+
+void main()
+{
+    pos = p * (tblock.M1 + tblock.M2 + M4 + M3 + t2m);
+    color = c * tblock.N1;
+}
+
+shared vec4 compute_only;  // ERROR
+
+layout(packed) uniform;
+
+layout(packed) uniform float aoeuntaoeu;  // ERROR, packed on variable
+
+layout(location = 40) in float cd;
+layout(location = 37) in mat4x3 ce; // ERROR, overlap

+ 8 - 0
3rdparty/glslang/Test/300link.frag

@@ -0,0 +1,8 @@
+#version 300 es
+
+precision highp float;
+
+out vec4 color1;
+out vec4 color2;
+
+void main() {}

+ 11 - 0
3rdparty/glslang/Test/300link2.frag

@@ -0,0 +1,11 @@
+#version 300 es
+precision mediump float;
+in vec4 pos;
+
+layout(location = 1) out vec4 c;
+layout(location = 5) out vec4 p;
+layout(location = 9) out vec4 q[2];
+
+void main()
+{
+}

+ 7 - 0
3rdparty/glslang/Test/300link3.frag

@@ -0,0 +1,7 @@
+#version 300 es
+
+precision highp float;
+
+out vec4 color1;
+
+void main() {}

+ 135 - 0
3rdparty/glslang/Test/300operations.frag

@@ -0,0 +1,135 @@
+#version 300 es
+
+uniform block {
+    mediump float f;
+} instanceName;
+
+struct S {
+    int i;
+} s;
+
+float a[5];
+
+void main()
+{
+    bool b;
+    float f;
+    int i;
+    uint u;
+    bvec3 b3;
+    vec3 v3;
+    ivec3 iv3;
+    uvec3 uv3;
+    vec4 v4;
+    ivec4 iv4;
+    uvec4 uv4;
+    mat2 m2;
+    mat4 m4;
+
+    // These are all errors:
+    instanceName + instanceName;
+    s + s;
+    i + f;
+    u + f;
+    u + i;
+    iv3 *= iv4;
+    iv4 / uv4;
+    i - v3;
+    iv3 + uv3;
+    a * a;
+    b / b;
+
+    f % f;
+    i % f;
+    f % u;
+    instanceName++;
+    ++s;
+    a--;
+    ++b3;
+
+    iv3 < uv3;
+    m2 > m2;
+    m2 != m4;
+    i >= u;
+    a <= a;
+    b > b;
+
+    b && b3;
+    b3 ^^ b3;
+    b3 || b;
+    i && i;
+    u || u;
+    m2 ^^ m2;
+
+    !u;
+    !i;
+    !m2;
+    !v3;
+    !a;
+
+    ~f;
+    ~m4;
+    ~v3;
+    ~a;
+    ~instanceName;
+
+    i << iv3;
+    u << uv3;
+    i >> f;
+    f >> i;
+    m4 >> i;
+    a >> u;
+    iv3 >> iv4;
+
+    i & u;    
+    u &= uv3;
+    i | uv3;
+    u & f;
+    m2 | m2;
+    s ^ s;
+    (f = f) = f;
+
+    // These are all okay:
+    f * v4;
+    u + u;
+    uv4 / u;
+    iv3 -= iv3;
+    
+    i %= 3;
+    uv3 % 4u;
+    --m2;
+    iv4++;
+
+    m4 != m4;
+    m2 == m2;
+    i <= i;
+    a == a;
+    s != s;
+
+    b && b;
+    b || b;
+    b ^^ b;
+
+    !b, uv3;
+
+    ~i;
+    ~u;
+    ~uv3;
+    ~iv3;
+
+    uv3 <<= i;
+    i >> i;
+    u << u;
+    iv3 >> iv3;
+
+    i & i;
+    u | u;
+    iv3 ^ iv3;
+    u & uv3;
+    uv3 | u;
+    uv3 &= u;
+    int arr[0x222 & 0xf];
+    arr[1]; // size 2
+    int arr2[(uvec2(0, 0x2) | 0x1u).y];
+    arr2[2]; // size 3
+}

+ 74 - 0
3rdparty/glslang/Test/300scope.vert

@@ -0,0 +1,74 @@
+#version 300 es
+
+int f(int a, int b, int c)
+{
+	int a = b;  // ERROR, redefinition
+
+    {
+		float a = float(a) + 1.0;
+    }
+
+	return a;
+}
+
+int f(int a, int b, int c);  // okay to redeclare
+
+bool b;
+float b(int a);      // ERROR: redefinition
+
+float c(int a);
+bool c;              // ERROR: redefinition
+
+float f;             // ERROR: redefinition
+float tan;           // ERROR: redefines built-in function
+float sin(float x);  // ERROR: can't redefine built-in functions
+float cos(float x)   // ERROR: can't redefine built-in functions
+{
+	return 1.0;
+}
+bool radians(bool x) // ERROR: can't overload built-in functions
+{
+    return true;
+}
+
+invariant gl_Position;
+
+void main()
+{
+    int g();    // ERROR: no local function declarations
+	g();
+
+    float sin;  // okay
+	sin;
+    sin(0.7);  // ERROR, use of hidden function
+    f(1,2,3);
+
+    float f;    // hides f()
+    f = 3.0;
+
+    gl_Position = vec4(f);
+
+    for (int f = 0; f < 10; ++f)
+        ++f;
+
+    int x = 1;
+    { 
+        float x = 2.0, /* 2nd x visible here */ y = x; // y is initialized to 2
+        int z = z; // ERROR: z not previously defined.
+    }
+    {
+        int x = x; // x is initialized to '1'
+    }
+
+    struct S 
+    { 
+        int x; 
+    };
+    {
+        S S = S(0); // 'S' is only visible as a struct and constructor 
+        S.x;        // 'S' is now visible as a variable
+    }
+
+    int degrees;
+    degrees(3.2);  // ERROR, use of hidden built-in function
+}

+ 240 - 0
3rdparty/glslang/Test/310.comp

@@ -0,0 +1,240 @@
+#version 310 es
+
+layout(local_size_x = 2) in;
+layout(local_size_x = 16) in;     // ERROR, changing
+layout(local_size_z = 4096) in;   // ERROR, too large
+layout(local_size_x = 2) in;
+
+const int total = gl_MaxComputeWorkGroupCount.y 
+                + gl_MaxComputeUniformComponents
+                + gl_MaxComputeTextureImageUnits
+                + gl_MaxComputeImageUniforms
+                + gl_MaxComputeAtomicCounters
+                + gl_MaxComputeAtomicCounterBuffers;
+
+buffer ShaderStorageBlock
+{
+    int value;
+    float values[];
+};
+
+buffer InvalidShaderStorageBlock
+{
+    float values[];  // ERROR
+    int value;
+} invalid;
+
+void main()
+{
+    barrier();
+    memoryBarrier();
+    memoryBarrierAtomicCounter();
+    memoryBarrierBuffer();
+    memoryBarrierShared();
+    memoryBarrierImage();
+    groupMemoryBarrier();
+    value = int(values[gl_LocalInvocationIndex]);
+}
+
+layout(location = 2) in vec3 v3;      // ERROR
+in float f;                           // ERROR
+out float fo;                         // ERROR
+
+shared vec4 s;
+layout(location = 2) shared vec4 sl;  // ERROR
+shared float fs = 4.2;                // ERROR
+
+layout(local_size_x = 2, local_size_y = 3, local_size_z = 4) out;  // ERROR
+
+int arrX[gl_WorkGroupSize.x];
+int arrY[gl_WorkGroupSize.y];
+int arrZ[gl_WorkGroupSize.z];
+
+readonly buffer roblock
+{
+    int value;
+    float values[];
+} ro;
+
+void foo()
+{
+    ro.values[2] = 4.7;        // ERROR, readonly
+    ro.values.length();
+    ++s;
+}
+
+buffer vec4 v;  // ERROR
+
+uniform usampler2D us2dbad;  // ERROR, default precision
+
+precision highp usampler2D;
+precision highp iimage2DArray;
+precision highp iimage2D;
+
+uniform usampler2D us2d;
+
+uniform iimage2DArray ii2dabad;  // ERROR, not writeonly
+uniform writeonly iimage2DArray ii2da;
+
+layout(r32i) uniform iimage2D iimg2D;
+layout(rgba32i) uniform readonly iimage2D iimg2Drgba;
+layout(rgba32f) uniform readonly image2D img2Drgba;   // ERROR, no default
+layout(r32ui) uniform uimage2D uimg2D;                // ERROR, no default
+
+void qux()
+{
+    int i = 4;
+    imageAtomicCompSwap(iimg2D, ivec2(i,i), i, i);// ERROR no longer in 310
+    imageAtomicAdd(uimg2D, ivec2(i,i), uint(i));  // ERROR no longer in 310
+    imageAtomicMin(iimg2Drgba, ivec2(i,i), i);    // ERROR no longer in 310  // ERROR iimg2Drgba does not have r32i layout
+    imageAtomicMax(img2Drgba, ivec2(i,i), i);     // ERROR no longer in 310  // ERROR img2Drgba is not integer image
+    ivec4 pos = imageLoad(iimg2D, ivec2(i,i));
+    imageStore(ii2da, ivec3(i,i,i), ivec4(0));
+    imageLoad(img2Drgba, ivec2(i,i));
+    imageLoad(ii2da, ivec3(i,i,i));       // ERROR, drops writeonly
+}
+
+volatile float vol; // ERROR, not an image
+readonly int vol2;  // ERROR, not an image
+
+void passr(coherent readonly iimage2D image)
+{
+}
+
+layout(r32i) coherent readonly uniform iimage2D qualim1;
+layout(r32i) coherent restrict readonly uniform iimage2D qualim2;
+
+void passrc()
+{
+    passr(qualim1);
+    passr(qualim2);   // ERROR, drops restrict
+    passr(iimg2D);
+}
+
+highp layout(rg8i)     uniform readonly uimage2D i1bad; // ERROR, type mismatch
+highp layout(rgba32i)  uniform readonly image2D i2bad;  // ERROR, type mismatch
+highp layout(rgba32f)  uniform readonly uimage2D i3bad; // ERROR, type mismatch
+layout(r8_snorm) uniform readonly iimage2D i4bad; // ERROR, type mismatch
+layout(rgba32ui) uniform readonly iimage2D i5bad; // ERROR, type mismatch
+layout(r8ui)     uniform readonly iimage2D i6bad; // ERROR, type mismatch
+
+layout(binding = 0) uniform atomic_uint counter;
+
+uint func(atomic_uint c)
+{
+    return atomicCounterIncrement(c);
+}
+
+uint func2(out atomic_uint c) // ERROR, output
+{
+    return counter;           // ERROR, type mismatch
+    return atomicCounter(counter);
+}
+
+void mainAC()
+{
+     atomic_uint non_uniform_counter; // ERROR
+     uint val = atomicCounter(counter);
+     atomicCounterDecrement(counter);
+}
+
+layout(binding = 1) uniform mediump atomic_uint counterBad;  // ERROR, not highp
+
+layout(binding = 2, offset = 4) uniform atomic_uint countArr[4];
+uniform int i;
+
+void opac()
+{
+    int a[3];
+    a[counter];         // ERROR, non-integer
+    countArr[2];
+    countArr[i];
+}
+
+shared int atomi;
+shared uint atomu;
+
+void atoms()
+{
+    int origi = atomicAdd(atomi, 3);
+    uint origu = atomicAnd(atomu, 7u);
+    origi = atomicExchange(atomi, 4);
+    origu = atomicCompSwap(atomu, 10u, 8u);
+}
+
+precision highp atomic_uint;
+precision lowp atomic_uint;   // ERROR
+
+precise int pfoo;  // ERROR, reserved
+
+dmat2x4 dm;                     // ERROR
+uniform samplerCubeArray sca;   // ERROR
+uniform iimage2DRect i2dr;      // ERROR
+highp uniform image2DMS i2dms;  // ERROR
+uniform uimage2DMSArray u2dmsa; // ERROR
+
+highp layout(r32f)  coherent volatile restrict readonly writeonly uniform  image2D okay1;
+      layout(r32i)  coherent volatile restrict readonly           uniform iimage2D okay2;
+highp layout(r32ui) coherent volatile restrict          writeonly uniform uimage2D okay3;
+highp layout(r32f)  coherent volatile restrict                    uniform  image2D okay4;
+ 
+highp layout(rgba32f)  coherent volatile restrict                 uniform  image2D badQ1;  // ERROR, bad qualifiers
+      layout(rgba8i)   coherent volatile restrict                 uniform iimage2D badQ2;  // ERROR, bad qualifiers
+highp layout(rgba16ui) coherent volatile restrict                 uniform uimage2D badQ3;  // ERROR, bad qualifiers
+
+writeonly buffer woblock
+{
+    int value;
+    float values[];
+} wo;
+
+void foowo()
+{
+    float g;
+    g = wo.values[2];            // ERROR, writeonly
+    float f = wo.values[2];      // ERROR, writeonly
+    ++wo.values[2];              // ERROR, writeonly
+    wo.values[2]--;              // ERROR, writeonly
+    f + wo.values[2];            // ERROR, writeonly
+    wo.values[2] - f;            // ERROR, writeonly
+    bool b;
+    b ? f : wo.values[2];        // ERROR, writeonly
+    b ? wo.values[2] : f;        // ERROR, writeonly
+    if (f == wo.values[2])       // ERROR, writeonly
+        ++f;
+    if (f >= wo.values[2])       // ERROR, writeonly
+        ++f;
+    f = vec3(wo.values[2]).x;    // ERROR, writeonly
+    ~wo.value;                   // ERROR, writeonly
+    wo.values[2] = 3.4;
+}
+
+buffer multioblock
+{
+    readonly int value;
+    writeonly float values[];
+} multio;
+
+void foomultio()
+{
+    float g;
+    g = wo.values[2];            // ERROR, writeonly
+    ~wo.value;
+    wo.values[2] = 3.4;
+    wo.value = 2;                // ERROR, readonly
+}
+
+in inb {     // ERROR
+    int a;
+} inbi;
+
+out outb {     // ERROR
+    int a;
+} outbi;
+
+float t__;  // ERROR, no __ until revision 310
+
+    // ERROR, no __ until revision 310
+#define __D
+
+shared vec4 arr[2][3][4];

+ 431 - 0
3rdparty/glslang/Test/310.frag

@@ -0,0 +1,431 @@
+#version 310 es
+highp float nodef3(float); // ERROR, no default precision
+precision mediump float;
+precision highp usampler2D;
+precision highp sampler2D;
+precision highp isampler2DArray;
+
+layout(origin_upper_left, pixel_center_integer) in vec4 gl_FragCoord;  // ERROR, not supported
+
+layout(location = 2) in vec3 v3;
+layout(location = 2) in mat4 yi;  // ERROR, locations conflict with xi
+
+uniform sampler2D arrayedSampler[5];
+uniform usampler2D usamp2d;
+uniform usampler2DRect samp2dr;      // ERROR, reserved
+uniform isampler2DArray isamp2DA;
+
+in vec2 c2D;
+uniform int i;
+
+void main()
+{
+    vec4 v = texture(arrayedSampler[i], c2D);  // ERROR
+
+    ivec2 offsets[4];
+    const ivec2 constOffsets[4] = ivec2[4](ivec2(1,2), ivec2(3,4), ivec2(15,16), ivec2(-2,0));
+    uvec4 uv4 = textureGatherOffsets(samp2dr, c2D, offsets, 2);  // ERROR, not supported
+    vec4 v4 = textureGather(arrayedSampler[0], c2D);
+    ivec4 iv4 = textureGatherOffset(isamp2DA, vec3(0.1), ivec2(1), 3);
+    iv4 = textureGatherOffset(isamp2DA, vec3(0.1), ivec2(1), i);  // ERROR, last argument not const
+    iv4 = textureGatherOffset(isamp2DA, vec3(0.1), ivec2(1), 4);  // ERROR, last argument out of range
+    iv4 = textureGatherOffset(isamp2DA, vec3(0.1), ivec2(1), 1+2);
+    iv4 = textureGatherOffset(isamp2DA, vec3(0.1), ivec2(0.5));
+    iv4 = textureGatherOffset(isamp2DA, vec3(0.1), ivec2(i));     // ERROR, offset not constant
+}
+
+out vec4 outp;
+void foo23()
+{
+    const ivec2[3] offsets = ivec2[3](ivec2(1,2), ivec2(3,4), ivec2(15,16));
+
+    textureProjGradOffset(usamp2d, outp, vec2(0.0), vec2(0.0), ivec2(c2D));     // ERROR, offset not constant
+    textureProjGradOffset(usamp2d, outp, vec2(0.0), vec2(0.0), offsets[1]);
+    textureProjGradOffset(usamp2d, outp, vec2(0.0), vec2(0.0), offsets[2]);     // ERROR, offset out of range
+    textureProjGradOffset(usamp2d, outp, vec2(0.0), vec2(0.0), ivec2(-10, 20)); // ERROR, offset out of range
+
+    if (gl_HelperInvocation)
+        ++outp;
+
+    int sum = gl_MaxVertexImageUniforms +
+              gl_MaxFragmentImageUniforms +
+              gl_MaxComputeImageUniforms +
+              gl_MaxCombinedImageUniforms +
+              gl_MaxCombinedShaderOutputResources;
+
+    bool b1, b2, b3, b;
+
+    b1 = mix(b2, b3, b);
+    uvec3 um3 = mix(uvec3(i), uvec3(i), bvec3(b));
+    ivec4 im4 = mix(ivec4(i), ivec4(i), bvec4(b));
+}
+
+layout(binding=3) uniform sampler2D s1;
+layout(binding=3) uniform sampler2D s2; // ERROR: overlapping bindings?  Don't see that in the 310 spec.
+highp layout(binding=2) uniform writeonly image2D      i2D;
+      layout(binding=4) uniform readonly  image3D      i3D;    // ERROR, no default precision
+      layout(binding=5) uniform           imageCube    iCube;  // ERROR, no default precision
+      layout(binding=6) uniform           image2DArray i2DA;   // ERROR, no default precision
+      layout(binding=6) uniform coherent volatile restrict image2D i2Dqualified;    // ERROR, no default precision
+
+layout(binding = 1) uniform bb {
+    int foo;
+    layout(binding = 2) float f;     // ERROR
+} bbi;
+
+in centroid vec4 centroidIn;
+layout(location = 200000) uniform vec4 bigl;  // ERROR, location too big
+
+layout(early_fragment_tests) in;
+
+layout(location = 40) out vec4 bigout1;  // ERROR, too big
+layout(location = 40) out vec4 bigout2;  // ERROR, overlap
+layout(location = -2) out vec4 neg;      // ERROR, negative
+
+layout(std430) buffer b430 {
+    int i;
+} b430i;
+
+layout(shared) uniform bshar {
+    int i;
+} bshari;
+
+in smooth vec4 smoothIn;
+in flat int flatIn;
+
+uniform sampler2DMS s2dms;  // ERROR, no default precision qualifier
+
+void foots()
+{
+    highp ivec2 v2 = textureSize(s1, 2);
+    highp ivec3 v3 = textureSize(isamp2DA, 3);
+    v2 = textureSize(s2dms);
+    v2 = imageSize(i2D);
+    v3 = imageSize(i3D);
+    v2 = imageSize(iCube);
+    v3 = imageSize(i2DA);
+    v2 = imageSize(i2Dqualified);
+}
+
+out bool bout;          // ERROR
+highp out image2D imageOut;   // ERROR
+out mat2x3 mout;        // ERROR
+
+in bool inb;         // ERROR
+in sampler2D ino;    // ERROR
+in float ina[4];
+in float inaa[4][2]; // ERROR
+struct S { float f; };
+in S ins;
+in S[4] inasa;       // ERROR
+in S insa[4];        // ERROR
+struct SA { float f[4]; };
+in SA inSA;          // ERROR
+struct SS { float f; S s; };
+in SS inSS;          // ERROR
+
+#ifndef GL_EXT_shader_io_blocks
+#error GL_EXT_shader_io_blocks not defined
+#endif
+
+#extension GL_EXT_shader_io_blocks : enable
+
+out outbname { int a; } outbinst;   // ERROR, not out block in fragment shader
+
+in inbname {
+    int a;
+    vec4 v;
+    struct { int b; } s;     // ERROR, nested struct definition
+} inbinst;
+
+in inbname2 {
+    layout(location = 12) int aAnon;
+    layout(location = 13) centroid in vec4 vAnon;
+};
+
+in layout(location = 13) vec4 aliased; // ERROR, aliased
+
+in inbname2 {                // ERROR, reuse of block name
+    int aAnon;
+    centroid in vec4 vAnon;
+};
+
+in badmember {               // ERROR, aAnon already in global scope
+    int aAnon;
+};
+
+int inbname;                 // ERROR, redefinition of block name
+
+vec4 vAnon;                  // ERROR, anon in global scope; redefinition
+
+in arrayed {
+    float f;
+} arrayedInst[4];
+
+void fooIO()
+{
+    vec4 v = inbinst.v + vAnon;
+    v *= arrayedInst[2].f;
+    v *= arrayedInst[i].f;
+}
+
+in vec4 gl_FragCoord;
+layout(origin_upper_left, pixel_center_integer) in vec4 gl_FragCoord;  // ERROR, non-ES
+
+layout(early_fragment_tests) in;
+out float gl_FragDepth;
+layout(depth_any) out float gl_FragDepth;  // ERROR, non-ES
+
+void foo_IO()
+{
+    gl_FragDepth = 0.2;  // ERROR, early_fragment_tests declared
+    gl_Layer;            // ERROR, not present
+    gl_PrimitiveID;      // ERROR, not present
+    bool f = gl_FrontFacing;
+}
+
+out float gl_FragDepth;
+
+#extension GL_OES_geometry_shader : enable
+
+void foo_GS()
+{
+    highp int l = gl_Layer;
+    highp int p = gl_PrimitiveID;
+}
+
+in vec2 inf, ing;
+uniform ivec2 offsets[4];
+uniform sampler2D sArray[4];
+uniform int sIndex;
+layout(binding = 0) uniform atomic_uint auArray[2];
+uniform ubName { int i; } ubInst[4];
+buffer bbName { int i; } bbInst[4];
+highp uniform writeonly image2D iArray[5];
+const ivec2 constOffsets[4] = ivec2[4](ivec2(0.1), ivec2(0.2), ivec2(0.3), ivec2(0.4));
+
+void pfooBad()
+{
+    precise vec2 h;                                            // ERROR reserved
+    h = fma(inf, ing, h);                                      // ERROR, not available
+    textureGatherOffset(sArray[0], vec2(0.1), ivec2(inf));     // ERROR, offset not constant
+    textureGatherOffsets(sArray[0], vec2(0.1), constOffsets);  // ERROR, not available
+}
+
+#extension GL_OES_gpu_shader5 : enable
+
+void pfoo()
+{
+    precise vec2 h;
+    h = fma(inf, ing, h);
+    textureGatherOffset(sArray[0], vec2(0.1), ivec2(inf));
+    textureGatherOffsets(sArray[0], vec2(0.1), constOffsets);
+    textureGatherOffsets(sArray[0], vec2(0.1), offsets);       // ERROR, offset not constant
+}
+
+#extension GL_EXT_texture_cube_map_array : enable
+
+precision highp imageCubeArray        ;
+precision highp iimageCubeArray       ;
+precision highp uimageCubeArray       ;
+
+precision highp samplerCubeArray      ;
+precision highp samplerCubeArrayShadow;
+precision highp isamplerCubeArray     ;
+precision highp usamplerCubeArray     ;
+
+uniform writeonly imageCubeArray  CA1;
+uniform writeonly iimageCubeArray CA2;
+uniform writeonly uimageCubeArray CA3;
+
+#ifdef GL_EXT_texture_cube_map_array
+uniform samplerCubeArray          CA4;
+uniform samplerCubeArrayShadow    CA5;
+uniform isamplerCubeArray         CA6;
+uniform usamplerCubeArray         CA7;
+#endif
+
+void CAT()
+{
+    highp vec4 b4 = texture(CA4, vec4(0.5), 0.24);
+    highp ivec4 b6 = texture(CA6, vec4(0.5), 0.26);
+    highp uvec4 b7 = texture(CA7, vec4(0.5), 0.27);
+}
+
+void badSample()
+{
+    lowp     int  a1 = gl_SampleID;         // ERROR, need extension
+    mediump  vec2 a2 = gl_SamplePosition;   // ERROR, need extension
+    highp    int  a3 = gl_SampleMaskIn[0];  // ERROR, need extension
+    gl_SampleMask[0] = a3;                  // ERROR, need extension
+    mediump int n = gl_NumSamples;          // ERROR, need extension
+}
+
+#ifdef GL_OES_sample_variables
+#extension GL_OES_sample_variables : enable
+#endif
+
+void goodSample()
+{
+    lowp     int  a1 = gl_SampleID;       
+    mediump  vec2 a2 = gl_SamplePosition; 
+    highp    int  a3 = gl_SampleMaskIn[0];
+    gl_SampleMask[0] = a3;
+    mediump int n1 = gl_MaxSamples;
+    mediump int n2 = gl_NumSamples;
+}
+
+uniform layout(r32f)  highp  image2D im2Df;
+uniform layout(r32ui) highp uimage2D im2Du;
+uniform layout(r32i)  highp iimage2D im2Di;
+uniform ivec2 P;
+
+void badImageAtom()
+{
+    float datf;
+    int dati;
+    uint datu;
+
+    imageAtomicAdd(     im2Di, P, dati);        // ERROR, need extension
+    imageAtomicAdd(     im2Du, P, datu);        // ERROR, need extension
+    imageAtomicMin(     im2Di, P, dati);        // ERROR, need extension
+    imageAtomicMin(     im2Du, P, datu);        // ERROR, need extension
+    imageAtomicMax(     im2Di, P, dati);        // ERROR, need extension
+    imageAtomicMax(     im2Du, P, datu);        // ERROR, need extension
+    imageAtomicAnd(     im2Di, P, dati);        // ERROR, need extension
+    imageAtomicAnd(     im2Du, P, datu);        // ERROR, need extension
+    imageAtomicOr(      im2Di, P, dati);        // ERROR, need extension
+    imageAtomicOr(      im2Du, P, datu);        // ERROR, need extension
+    imageAtomicXor(     im2Di, P, dati);        // ERROR, need extension
+    imageAtomicXor(     im2Du, P, datu);        // ERROR, need extension
+    imageAtomicExchange(im2Di, P, dati);        // ERROR, need extension
+    imageAtomicExchange(im2Du, P, datu);        // ERROR, need extension
+    imageAtomicExchange(im2Df, P, datf);        // ERROR, need extension
+    imageAtomicCompSwap(im2Di, P,  3, dati);    // ERROR, need extension
+    imageAtomicCompSwap(im2Du, P, 5u, datu);    // ERROR, need extension
+}
+
+#ifdef GL_OES_shader_image_atomic 
+#extension GL_OES_shader_image_atomic : enable
+#endif
+
+uniform layout(rgba32f)  highp  image2D badIm2Df;  // ERROR, needs readonly or writeonly
+uniform layout(rgba8ui) highp uimage2D badIm2Du;   // ERROR, needs readonly or writeonly
+uniform layout(rgba16i)  highp iimage2D badIm2Di;  // ERROR, needs readonly or writeonly
+
+void goodImageAtom()
+{
+    float datf;
+    int dati;
+    uint datu;
+
+    imageAtomicAdd(     im2Di, P, dati);
+    imageAtomicAdd(     im2Du, P, datu);
+    imageAtomicMin(     im2Di, P, dati);
+    imageAtomicMin(     im2Du, P, datu);
+    imageAtomicMax(     im2Di, P, dati);
+    imageAtomicMax(     im2Du, P, datu);
+    imageAtomicAnd(     im2Di, P, dati);
+    imageAtomicAnd(     im2Du, P, datu);
+    imageAtomicOr(      im2Di, P, dati);
+    imageAtomicOr(      im2Du, P, datu);
+    imageAtomicXor(     im2Di, P, dati);
+    imageAtomicXor(     im2Du, P, datu);
+    imageAtomicExchange(im2Di, P, dati);
+    imageAtomicExchange(im2Du, P, datu);
+    imageAtomicExchange(im2Df, P, datf);
+    imageAtomicCompSwap(im2Di, P,  3, dati);
+    imageAtomicCompSwap(im2Du, P, 5u, datu);
+
+    imageAtomicMax(badIm2Di, P, dati);      // ERROR, not an allowed layout() on the image
+    imageAtomicMax(badIm2Du, P, datu);      // ERROR, not an allowed layout() on the image
+    imageAtomicExchange(badIm2Df, P, datf); // ERROR, not an allowed layout() on the image
+}
+
+sample in vec4 colorSampInBad;       // ERROR, reserved
+centroid out vec4 colorCentroidBad;  // ERROR
+flat out vec4 colorBadFlat;          // ERROR
+smooth out vec4 colorBadSmooth;      // ERROR
+noperspective out vec4 colorBadNo;   // ERROR
+flat centroid in vec2 colorfc;
+in float scalarIn;
+
+void badInterp()
+{
+    interpolateAtCentroid(colorfc);             // ERROR, need extension
+    interpolateAtSample(colorfc, 1);            // ERROR, need extension
+    interpolateAtOffset(colorfc, vec2(0.2));    // ERROR, need extension
+}
+
+#if defined GL_OES_shader_multisample_interpolation
+#extension GL_OES_shader_multisample_interpolation : enable
+#endif
+
+sample in vec4 colorSampIn;
+sample out vec4 colorSampleBad;     // ERROR
+flat sample in vec4 colorfsi;
+sample in vec3 sampInArray[4];
+
+void interp()
+{
+    float res;
+    vec2 res2;
+    vec3 res3;
+    vec4 res4;
+
+    res2 = interpolateAtCentroid(colorfc);
+    res4 = interpolateAtCentroid(colorSampIn);
+    res4 = interpolateAtCentroid(colorfsi);
+    res  = interpolateAtCentroid(scalarIn);
+    res3 = interpolateAtCentroid(sampInArray);         // ERROR
+    res3 = interpolateAtCentroid(sampInArray[2]);
+    res2 = interpolateAtCentroid(sampInArray[2].xy);   // ERROR
+
+    res3 = interpolateAtSample(sampInArray, 1);        // ERROR
+    res3 = interpolateAtSample(sampInArray[i], 0);
+    res2 = interpolateAtSample(sampInArray[2].xy, 2);  // ERROR
+    res  = interpolateAtSample(scalarIn, 1);
+
+    res3 = interpolateAtOffset(sampInArray, vec2(0.2));         // ERROR
+    res3 = interpolateAtOffset(sampInArray[2], vec2(0.2));
+    res2 = interpolateAtOffset(sampInArray[2].xy, vec2(0.2));   // ERROR, no swizzle
+    res  = interpolateAtOffset(scalarIn + scalarIn, vec2(0.2)); // ERROR, no binary ops other than dereference
+    res  = interpolateAtOffset(scalarIn, vec2(0.2));
+
+    float f;
+    res  = interpolateAtCentroid(f);           // ERROR, not interpolant
+    res4 = interpolateAtSample(outp, 0);       // ERROR, not interpolant
+}
+
+layout(blend_support_softlight) out;           // ERROR, need extension
+
+#ifdef GL_KHR_blend_equation_advanced
+#extension GL_KHR_blend_equation_advanced : enable
+#endif
+
+layout(blend_support_multiply) out;
+layout(blend_support_screen) out;
+layout(blend_support_overlay) out;
+layout(blend_support_darken, blend_support_lighten) out;
+layout(blend_support_colordodge) layout(blend_support_colorburn) out;
+layout(blend_support_hardlight) out;
+layout(blend_support_softlight) out;
+layout(blend_support_difference) out;
+layout(blend_support_exclusion) out;
+layout(blend_support_hsl_hue) out;
+layout(blend_support_hsl_saturation) out;
+layout(blend_support_hsl_color) out;
+layout(blend_support_hsl_luminosity) out;
+layout(blend_support_all_equations) out;
+
+layout(blend_support_hsl_luminosity) out;              // okay to repeat
+
+layout(blend_support_hsl_luminosity) in;                       // ERROR, only on "out"
+layout(blend_support_hsl_luminosity) out vec4;                 // ERROR, only on standalone
+layout(blend_support_hsl_luminosity) out vec4 badout;          // ERROR, only on standalone
+layout(blend_support_hsl_luminosity) struct badS {int i;};     // ERROR, only on standalone
+layout(blend_support_hsl_luminosity) void blendFoo() { }       // ERROR, only on standalone
+void blendFoo(layout(blend_support_hsl_luminosity) vec3 v) { } // ERROR, only on standalone
+layout(blend_support_flizbit) out;                             // ERROR, no flizbit
+
+out vec4 outAA[2][2];  // ERROR

+ 152 - 0
3rdparty/glslang/Test/310.geom

@@ -0,0 +1,152 @@
+#version 310 es
+
+#ifdef GL_EXT_geometry_shader
+#extension GL_EXT_geometry_shader : enable
+#else
+#error no GL_EXT_geometry_shader 
+#endif
+
+#ifndef GL_OES_geometry_shader
+#error no GL_OES_geometry_shader
+#endif
+
+precision mediump float;
+
+in fromVertex {
+    in vec3 color;
+} fromV[];
+
+in vec4 nonBlockUnsized[];
+
+out toFragment {
+    out vec3 color;
+} toF;
+
+out fromVertex {  // okay to reuse a block name for another block name
+    vec3 color;
+};
+
+out fooB {        // ERROR, cannot reuse block name as block instance
+    vec2 color;
+} fromVertex;
+
+int fromVertex;   // ERROR, cannot reuse a block name for something else
+
+out fooC {        // ERROR, cannot have same name for block and instance name
+    vec2 color;
+} fooC;
+
+void main()
+{
+    EmitVertex();
+    EndPrimitive();
+    EmitStreamVertex(1);    // ERROR
+    EndStreamPrimitive(0);  // ERROR
+
+    color = fromV[0].color;
+    gl_ClipDistance[3] =              // ERROR, no ClipDistance
+        gl_in[1].gl_ClipDistance[2];  // ERROR, no ClipDistance
+    gl_Position = gl_in[0].gl_Position;
+
+    gl_PrimitiveID = gl_PrimitiveIDIn;
+    gl_Layer = 2;
+}
+
+layout(stream = 4) out vec4 ov4; // ERROR, no streams
+
+layout(line_strip, points, triangle_strip, points, triangle_strip) out;  // just means triangle_strip"
+
+out ooutb { vec4 a; } ouuaa6;
+
+layout(max_vertices = 200) out;
+layout(max_vertices = 300) out;   // ERROR, too big
+void foo(layout(max_vertices = 4) int a)  // ERROR
+{
+    ouuaa6.a = vec4(1.0);
+}
+
+layout(line_strip, points, triangle_strip, points) out;  // ERROR, changing output primitive
+layout(line_strip, points) out; // ERROR, changing output primitive
+layout(triangle_strip) in; // ERROR, not an input primitive
+layout(triangle_strip) uniform; // ERROR
+layout(triangle_strip) out vec4 badv4;  // ERROR, not on a variable
+layout(triangle_strip) in vec4 bad2v4[];  // ERROR, not on a variable or input
+layout(invocations = 3) out outbn { int a; }; // 2 ERROR, not on a block, not until 4.0
+out outbn2 {
+    layout(invocations = 3)  int a; // 2 ERRORs, not on a block member, not until 4.0
+    layout(max_vertices = 3) int b; // ERROR, not on a block member
+    layout(triangle_strip)   int c; // ERROR, not on a block member
+} outbi;
+
+layout(lines) out;  // ERROR, not on output
+layout(lines_adjacency) in;
+layout(triangles) in;             // ERROR, can't change it
+layout(triangles_adjacency) in;   // ERROR, can't change it
+layout(invocations = 4) in;
+
+in sameName {
+    int a15;
+} insn[];
+
+out sameName {
+    float f15;
+};
+
+uniform sameName {
+    bool b15;
+};
+
+const int summ = gl_MaxVertexAttribs +
+             gl_MaxGeometryInputComponents +
+             gl_MaxGeometryOutputComponents +
+             gl_MaxGeometryImageUniforms +
+             gl_MaxGeometryTextureImageUnits +
+             gl_MaxGeometryOutputVertices +
+             gl_MaxGeometryTotalOutputComponents +
+             gl_MaxGeometryUniformComponents +
+             gl_MaxGeometryAtomicCounters +
+             gl_MaxGeometryAtomicCounterBuffers +
+             gl_MaxVertexTextureImageUnits +
+             gl_MaxCombinedTextureImageUnits +
+             gl_MaxTextureImageUnits +
+             gl_MaxDrawBuffers;
+
+void fooe1()
+{
+    gl_ViewportIndex;  // ERROR, not in ES
+    gl_MaxViewports;   // ERROR, not in ES
+    insn.length();     // 4: lines_adjacency
+    int inv = gl_InvocationID;
+}
+
+in vec4 explArray[4];
+in vec4 explArrayBad[5];  // ERROR, wrong size
+in vec4 nonArrayed;       // ERROR, not an array
+flat out vec3 myColor1;
+centroid out vec3 myColor2;
+centroid in vec3 centr[];
+sample out vec4 perSampleColor;  // ERROR without sample extensions
+
+layout(max_vertices = 200) out;  // matching redecl
+
+layout(location = 7, component = 2) in float comp[];  // ERROR, es has no component
+
+void notHere()
+{
+    gl_MaxGeometryVaryingComponents;  // ERROR, not in ES
+    gl_VerticesIn;                    // ERROR, not in ES
+}
+
+void pointSize1()
+{
+    highp float ps = gl_in[3].gl_PointSize;  // ERROR, need point_size extension
+    gl_PointSize = ps;                       // ERROR, need point_size extension
+}
+
+#extension GL_OES_geometry_point_size : enable
+
+void pointSize2()
+{
+    highp float ps = gl_in[3].gl_PointSize;
+    gl_PointSize = ps;
+}

+ 169 - 0
3rdparty/glslang/Test/310.tesc

@@ -0,0 +1,169 @@
+#version 310 es
+
+#extension GL_OES_tessellation_shader : enable
+
+layout(vertices = 4) out;
+out int outa[gl_out.length()];
+
+layout(quads) in;                   // ERROR
+layout(ccw) out;                    // ERROR
+layout(fractional_even_spacing) in; // ERROR
+
+patch in vec4 patchIn;              // ERROR
+patch out vec4 patchOut;
+
+void main()
+{
+    barrier();
+
+    int a = gl_MaxTessControlInputComponents +
+            gl_MaxTessControlOutputComponents +
+            gl_MaxTessControlTextureImageUnits +
+            gl_MaxTessControlUniformComponents +
+            gl_MaxTessControlTotalOutputComponents;
+
+    vec4 p = gl_in[1].gl_Position;
+    float ps = gl_in[1].gl_PointSize;        // ERROR, need point_size extension
+    float cd = gl_in[1].gl_ClipDistance[2];  // ERROR, not in ES
+
+    int pvi = gl_PatchVerticesIn;
+    int pid = gl_PrimitiveID;
+    int iid = gl_InvocationID;
+
+    gl_out[gl_InvocationID].gl_Position = p;
+    gl_out[gl_InvocationID].gl_PointSize = ps;        // ERROR, need point_size extension
+    gl_out[gl_InvocationID].gl_ClipDistance[1] = cd;  // ERROR, not in ES
+
+    gl_TessLevelOuter[3] = 3.2;
+    gl_TessLevelInner[1] = 1.3;
+
+    if (a > 10)
+        barrier();           // ERROR
+    else
+        barrier();           // ERROR
+
+    barrier();
+
+    do {
+        barrier();           // ERROR
+    } while (a > 10);
+
+    switch (a) {
+    default:
+        barrier();           // ERROR
+        break;
+    }
+    a < 12 ? a : (barrier(), a); // ERROR
+    {
+        barrier();
+    }
+
+    return;
+
+    barrier();               // ERROR
+}
+
+layout(vertices = 4) in;    // ERROR, not on in
+layout(vertices = 5) out;   // ERROR, changing #
+
+void foo()
+{
+    gl_out[4].gl_Position;  // ERROR, out of range
+
+    barrier();              // ERROR, not in main
+}
+
+in vec2 ina;                // ERROR, not array
+in vec2 inb[];
+in vec2 inc[18];            // ERROR, wrong size
+in vec2 ind[gl_MaxPatchVertices];
+patch out float implA[];    // ERROR, not sized
+
+#extension GL_ARB_separate_shader_objects : enable
+
+layout(location = 3) in vec4 ivla[];
+layout(location = 4) in vec4 ivlb[];
+layout(location = 4) in vec4 ivlc[];  // ERROR, overlapping
+
+layout(location = 3) out vec4 ovla[];
+layout(location = 4) out vec4 ovlb[];
+layout(location = 4) out vec4 ovlc[];  // ERROR, overlapping
+
+void foop()
+{
+    precise float d;                  // ERROR without gpu_shader5
+    d = fma(d, d, d);                 // ERROR without gpu_shader5
+}
+
+patch out pinbn {
+    int a;
+} pinbi;
+
+centroid out vec3 myColor2[];
+centroid in vec3 centr[];
+sample out vec4 perSampleColor[];   // ERROR without sample extensions
+
+layout(vertices = 4) out float badlay[];   // ERROR, not on a variable
+out float misSized[5];              // ERROR, size doesn't match
+out float okaySize[4];
+
+#extension GL_OES_tessellation_point_size : enable
+
+void pointSize2()
+{
+    float ps = gl_in[1].gl_PointSize;
+    gl_out[gl_InvocationID].gl_PointSize = ps;
+}
+
+#extension GL_OES_gpu_shader5 : enable
+
+precise vec3 pv3;
+
+void goodfoop()
+{
+    precise float d;
+
+    pv3 *= pv3;
+    pv3 = fma(pv3, pv3, pv3);
+    d = fma(d, d, d);
+}
+
+void bbBad()
+{
+    gl_BoundingBoxOES;  // ERROR without GL_OES_primitive_bounding_box 
+}
+
+#extension GL_OES_primitive_bounding_box : enable
+
+void bb()
+{
+    gl_BoundingBoxOES[0] = vec4(0.0);
+    gl_BoundingBoxOES[1] = vec4(1.0);
+    gl_BoundingBoxOES[2] = vec4(2.0);  // ERROR, overflow
+}
+
+out patch badpatchBName {  // ERROR, array size required
+    float f;
+} badpatchIName[];
+
+out patch patchBName {
+    float f;
+} patchIName[4];
+
+void outputtingOutparam(out int a)
+{
+    a = 2;
+}
+
+void outputting()
+{
+    outa[gl_InvocationID] = 2;
+    outa[1] = 2;                         // ERROR, not gl_InvocationID
+    gl_out[0].gl_Position = vec4(1.0);   // ERROR, not gl_InvocationID
+    outa[1];
+    gl_out[0];
+    outputtingOutparam(outa[0]);         // ERROR, not gl_InvocationID
+    outputtingOutparam(outa[gl_InvocationID]);
+    patchIName[1].f = 3.14;
+    outa[(gl_InvocationID)] = 2;
+}

+ 128 - 0
3rdparty/glslang/Test/310.tese

@@ -0,0 +1,128 @@
+#version 310 es
+
+#extension GL_EXT_tessellation_shader : enable
+#extension GL_OES_tessellation_shader : enable
+#extension GL_EXT_tessellation_shader : disable
+
+layout(vertices = 4) out; // ERROR
+layout(quads, cw) in;
+layout(triangles) in;     // ERROR
+layout(isolines) in;      // ERROR
+
+layout(ccw) in;           // ERROR
+layout(cw) in;
+
+layout(fractional_odd_spacing) in;    
+layout(equal_spacing) in;              // ERROR
+layout(fractional_even_spacing) in;    // ERROR
+
+layout(point_mode) in;
+
+patch in vec4 patchIn;
+patch out vec4 patchOut;  // ERROR
+
+void main()
+{
+    barrier(); // ERROR
+
+    int a = gl_MaxTessEvaluationInputComponents +
+            gl_MaxTessEvaluationOutputComponents +
+            gl_MaxTessEvaluationTextureImageUnits +
+            gl_MaxTessEvaluationUniformComponents +
+            gl_MaxTessPatchComponents +
+            gl_MaxPatchVertices +
+            gl_MaxTessGenLevel;
+
+    vec4 p = gl_in[1].gl_Position;
+    float ps = gl_in[1].gl_PointSize;        // ERROR, need point_size extension
+    float cd = gl_in[1].gl_ClipDistance[2];  // ERROR, not in ES
+
+    int pvi = gl_PatchVerticesIn;
+    int pid = gl_PrimitiveID;
+    vec3 tc = gl_TessCoord;
+    float tlo = gl_TessLevelOuter[3];
+    float tli = gl_TessLevelInner[1];
+
+    gl_Position = p;
+    gl_PointSize = ps;             // ERROR, need point_size extension
+    gl_ClipDistance[2] = cd;       // ERROR, not in ES
+}
+
+smooth patch in vec4 badp1;         // ERROR
+flat patch in vec4 badp2;           // ERROR
+noperspective patch in vec4 badp3;  // ERROR
+patch sample in vec3 badp4;         // ERROR
+
+#extension GL_ARB_separate_shader_objects : enable
+
+in gl_PerVertex           // ERROR, no size
+{
+    vec4 gl_Position;
+} gl_in[];
+
+in gl_PerVertex           // ERROR, second redeclaration of gl_in
+{
+    vec4 gl_Position;
+} gl_in[];
+
+layout(quads, cw) out;     // ERROR
+layout(triangles) out;     // ERROR
+layout(isolines) out;      // ERROR
+layout(cw) out;            // ERROR
+layout(fractional_odd_spacing) out;    // ERROR
+layout(equal_spacing) out;             // ERROR
+layout(fractional_even_spacing) out;   // ERROR
+layout(point_mode) out;                // ERROR
+
+in vec2 ina;      // ERROR, not array
+in vec2 inb[];
+in vec2 inc[18];  // ERROR, wrong size
+in vec2 ind[gl_MaxPatchVertices];
+
+in testbla {      // ERROR, not array
+    int f;
+} bla;
+
+in testblb {
+    int f;
+} blb[];
+
+in testblc { // ERROR wrong size
+    int f;
+} blc[18];
+
+in testbld {
+    int f;
+} bld[gl_MaxPatchVertices];
+
+layout(location = 23) in vec4 ivla[];
+layout(location = 24) in vec4 ivlb[];
+layout(location = 24) in vec4 ivlc[];  // ERROR, overlap
+
+layout(location = 23) out vec4 ovla[2];
+layout(location = 24) out vec4 ovlb[2];  // ERROR, overlap
+
+in float gl_TessLevelOuter[4];           // ERROR, can't redeclare
+
+patch in pinbn {
+    int a;
+} pinbi;
+
+centroid out vec3 myColor2;
+centroid in vec3 centr[];
+sample out vec4 perSampleColor;  // ERROR without sample extensions
+
+#extension GL_OES_tessellation_point_size : enable
+
+void pointSize2()
+{
+    float ps = gl_in[1].gl_PointSize;  // ERROR, not in the redeclaration, but no error on use of gl_PointSize
+    gl_PointSize = ps;
+}
+
+#extension GL_EXT_primitive_bounding_box : enable
+
+void bbbad()
+{
+    gl_BoundingBoxOES; // ERROR, wrong stage
+}

+ 403 - 0
3rdparty/glslang/Test/310.vert

@@ -0,0 +1,403 @@
+#version 310 es
+
+shared vec4 s;   // ERROR
+layout(local_size_x = 2) out;  // ERROR
+buffer vec4 v;  // ERROR
+in int ini;
+layout(location = 2) uniform mat4 x;
+layout(location = 3) uniform mat4 y;
+layout(location = 2) out mat4 xi;
+layout(location = 3) out mat4 yi;  // ERROR, locations conflict with xi
+
+void main()
+{
+    uvec2 u2;
+    u2 = uaddCarry(u2, u2, u2);
+    uint u1;
+    u1 = usubBorrow(u1, u1, u1);
+    uvec4 u4;
+    umulExtended(u4, u4, u4, u4);
+    ivec4 i4;
+    imulExtended(i4, i4, i4, i4);
+    int i1;
+    i1 = bitfieldExtract(i1, 4, 5);
+    uvec3 u3;
+    u3 = bitfieldExtract(u3, 4, 5);
+    ivec3 i3;
+    i3 = bitfieldInsert(i3, i3, 4, 5);
+    u1 = bitfieldInsert(u1, u1, 4, 5);
+    ivec2 i2;
+    i2 = bitfieldReverse(i2);
+    u4 = bitfieldReverse(u4);
+    i1 = bitCount(i1);
+    i3 = bitCount(u3);
+    i2 = findLSB(i2);
+    i4 = findLSB(u4);
+    i1 = findMSB(i1);
+    i2 = findMSB(u2);
+
+    vec3 v3;
+    v3 = frexp(v3, i3);
+    vec2 v2;
+    v2 = ldexp(v2, i2);
+
+    mediump vec4 v4;
+    u1 = packUnorm4x8(v4);
+    u1 = packSnorm4x8(v4);
+    v4 = unpackUnorm4x8(u1);
+    v4 = unpackSnorm4x8(u1);
+}
+
+precision highp sampler2DMS;
+precision highp isampler2DMS;
+precision highp usampler2DMS;
+
+uniform sampler2DMS s2dms;
+uniform isampler2DMS is2dms;
+uniform usampler2DMS us2dms;
+uniform usampler2DMSArray us2dmsa;   // ERROR
+
+void foo()
+{
+    ivec2 v2;
+    v2 = textureSize(s2dms);
+    v2 = textureSize(us2dms);
+    vec4 v4 = texelFetch(s2dms, v2, 2);
+    ivec4 iv4 = texelFetch(is2dms, v2, 2);
+    textureSamples(s2dms);   // ERROR
+    float f;
+    frexp(f, ini);     // ERROR, i not writable
+}
+
+out bool outb;         // ERROR
+out sampler2D outo;    // ERROR
+out float outa[4];
+out float outaa[4][2]; // ERROR
+struct S { float f; };
+out S outs;
+out S[4] outasa;       // ERROR
+out S outsa[4];        // ERROR
+struct SA { float f[4]; };
+out SA outSA;          // ERROR
+struct SS { float f; S s; };
+out SS outSS;          // ERROR
+
+layout(std430) uniform U430 { int a; } U430i;    // ERROR
+layout(std430) buffer B430 { int a; } B430i;
+
+#ifndef GL_OES_shader_io_blocks
+#error GL_OES_shader_io_blocks not defined
+#endif
+
+#extension GL_OES_shader_io_blocks : enable
+
+out outbname {
+    int a;
+    out vec4 v;
+    highp sampler2D s;   // ERROR, opaque type
+} outbinst;
+
+out outbname2 {
+    layout(location = 12) int aAnon;
+    layout(location = 13) vec4 vAnon;
+};
+
+layout(location = 12) out highp int aliased;  // ERROR, aliasing location
+
+in inbname { int a; } inbinst;  // ERROR, no in block in vertex shader
+
+out gl_PerVertex {              // ERROR, has extra member
+    highp vec4 gl_Position;
+    highp vec4 t;
+};
+
+void foo_IO()
+{
+    int sum  = gl_VertexID +
+               gl_InstanceID;
+    gl_Position = vec4(1.0);
+    gl_PointSize = 2.0;         // ERROR, removed by redeclaration
+}
+
+out gl_PerVertex {              // ERROR, already used and already redeclared
+    highp vec4 gl_Position;
+    highp vec4 t;
+};
+
+smooth out smo {                // ERROR, no smooth on a block
+    int i;
+} smon;
+
+flat out fmo {                  // ERROR, no flat on a block
+    int i;
+} fmon;
+
+centroid out cmo {              // ERROR, no centroid on a block
+    int i;
+} cmon;
+
+invariant out imo {             // ERROR, no invariant on a block
+    int i;
+} imon;
+
+in vec2 inf, ing;
+uniform ivec2 offsets[4];
+uniform sampler2D sArray[4];
+uniform int sIndex;
+layout(binding = 0) uniform atomic_uint auArray[2];
+uniform ubName { int i; } ubInst[4];
+buffer bbName { int i; } bbInst[4];
+highp uniform writeonly image2D iArray[5];
+const ivec2 constOffsets[4] = ivec2[4](ivec2(0.1), ivec2(0.2), ivec2(0.3), ivec2(0.4));
+
+void pfooBad()
+{
+    precise vec2 h;          // ERROR reserved
+    h = fma(inf, ing, h);    // ERROR, not available
+    sArray[sIndex + 1];      // ERRRO, not supported
+    auArray[sIndex + 1];
+    ubInst[1];
+    bbInst[2];
+    ubInst[sIndex + 1];      // ERROR, not supported
+    bbInst[sIndex];          // ERROR, not supported
+    iArray[2];
+    iArray[sIndex * 2];      // ERROR, not supported
+    textureGatherOffset(sArray[0], vec2(0.1), ivec2(inf));     // ERROR, offset not constant
+    textureGatherOffsets(sArray[0], vec2(0.1), constOffsets);  // ERROR, not available
+}
+
+#extension GL_OES_gpu_shader5 : enable
+
+void pfoo()
+{
+    precise vec2 h;
+    h = fma(inf, ing, h);
+    sArray[sIndex + 1];
+    ubInst[sIndex + 1];
+    bbInst[sIndex - 2];      // ERROR, still not supported
+    iArray[2];
+    iArray[sIndex - 2];
+    textureGatherOffset(sArray[0], vec2(0.1), ivec2(inf));
+    textureGatherOffsets(sArray[0], vec2(0.1), constOffsets);
+    textureGatherOffsets(sArray[0], vec2(0.1), offsets);   // ERROR, offset not constant
+}
+
+uniform samplerBuffer  badSamp1;             // ERROR, reserved
+uniform isamplerBuffer badSamp2;             // ERROR, reserved
+uniform usamplerBuffer badSamp3;             // ERROR, reserved
+uniform writeonly imageBuffer    badSamp4;   // ERROR, reserved
+uniform writeonly iimageBuffer   badSamp5;   // ERROR, reserved
+uniform writeonly uimageBuffer   badSamp6;   // ERROR, reserved
+
+#extension GL_OES_texture_buffer : enable
+#extension GL_EXT_texture_buffer : enable
+
+uniform samplerBuffer  noPreSamp1;            // ERROR, no default precision
+uniform isamplerBuffer noPreSamp2;            // ERROR, no default precision
+uniform usamplerBuffer noPreSamp3;            // ERROR, no default precision
+uniform writeonly imageBuffer    noPreSamp4;  // ERROR, no default precision
+uniform writeonly iimageBuffer   noPreSamp5;  // ERROR, no default precision
+uniform writeonly uimageBuffer   noPreSamp6;  // ERROR, no default precision
+
+precision highp samplerBuffer; 
+precision highp isamplerBuffer;
+precision highp usamplerBuffer;
+precision highp imageBuffer;   
+precision highp iimageBuffer;  
+precision highp uimageBuffer;  
+
+#ifdef GL_OES_texture_buffer
+uniform samplerBuffer  bufSamp1;          
+uniform isamplerBuffer bufSamp2;          
+uniform usamplerBuffer bufSamp3;          
+#endif
+#ifdef GL_EXT_texture_buffer
+uniform writeonly imageBuffer    bufSamp4;
+uniform writeonly iimageBuffer   bufSamp5;
+uniform writeonly uimageBuffer   bufSamp6;
+#endif
+
+void bufferT()
+{
+    highp int s1 = textureSize(bufSamp1);
+    highp int s2 = textureSize(bufSamp2);
+    highp int s3 = textureSize(bufSamp3);
+
+    highp int s4 = imageSize(bufSamp4);
+    highp int s5 = imageSize(bufSamp5);
+    highp int s6 = imageSize(bufSamp6);
+    
+    highp vec4 f1 = texelFetch(bufSamp1, s1);
+    highp ivec4 f2 = texelFetch(bufSamp2, s2);
+    highp uvec4 f3 = texelFetch(bufSamp3, s3);
+}
+
+uniform writeonly imageCubeArray  badCA1;  // ERROR, reserved
+uniform writeonly iimageCubeArray badCA2;  // ERROR, reserved
+uniform writeonly uimageCubeArray badCA3;  // ERROR, reserved
+
+uniform samplerCubeArray          badCA4;  // ERROR, reserved
+uniform samplerCubeArrayShadow    badCA5;  // ERROR, reserved
+uniform isamplerCubeArray         badCA6;  // ERROR, reserved
+uniform usamplerCubeArray         badCA7;  // ERROR, reserved
+
+#extension GL_OES_texture_cube_map_array : enable
+
+uniform writeonly imageCubeArray  noPreCA1;   // ERROR, no default precision
+uniform writeonly iimageCubeArray noPreCA2;   // ERROR, no default precision
+uniform writeonly uimageCubeArray noPreCA3;   // ERROR, no default precision
+
+uniform samplerCubeArray          noPreCA4;   // ERROR, no default precision
+uniform samplerCubeArrayShadow    noPreCA5;   // ERROR, no default precision
+uniform isamplerCubeArray         noPreCA6;   // ERROR, no default precision
+uniform usamplerCubeArray         noPreCA7;   // ERROR, no default precision
+
+precision highp imageCubeArray        ;
+precision highp iimageCubeArray       ;
+precision highp uimageCubeArray       ;
+
+precision highp samplerCubeArray      ;
+precision highp samplerCubeArrayShadow;
+precision highp isamplerCubeArray     ;
+precision highp usamplerCubeArray     ;
+
+uniform writeonly imageCubeArray  CA1;
+uniform writeonly iimageCubeArray CA2;
+uniform writeonly uimageCubeArray CA3;
+
+layout(rgba16f) uniform readonly imageCubeArray  rCA1;
+layout(rgba32i) uniform readonly iimageCubeArray rCA2;
+layout(r32ui) uniform readonly uimageCubeArray rCA3;
+
+#ifdef GL_OES_texture_cube_map_array
+uniform samplerCubeArray          CA4;
+uniform samplerCubeArrayShadow    CA5;
+uniform isamplerCubeArray         CA6;
+uniform usamplerCubeArray         CA7;
+#endif
+
+void CAT()
+{
+    highp ivec3 s4 = textureSize(CA4, 1);
+    highp ivec3 s5 = textureSize(CA5, 1);
+    highp ivec3 s6 = textureSize(CA6, 1);
+    highp ivec3 s7 = textureSize(CA7, 1);
+    
+    highp vec4 t4 = texture(CA4, vec4(0.5));
+    highp float t5 = texture(CA5, vec4(0.5), 3.0);
+    highp ivec4 t6 = texture(CA6, vec4(0.5));
+    highp uvec4 t7 = texture(CA7, vec4(0.5));
+
+    highp vec4 L4 = textureLod(CA4, vec4(0.5), 0.24);
+    highp ivec4 L6 = textureLod(CA6, vec4(0.5), 0.26);
+    highp uvec4 L7 = textureLod(CA7, vec4(0.5), 0.27);
+
+    highp vec4 g4 = textureGrad(CA4, vec4(0.5), vec3(0.1), vec3(0.2));
+    highp ivec4 g6 = textureGrad(CA6, vec4(0.5), vec3(0.1), vec3(0.2));
+    highp uvec4 g7 = textureGrad(CA7, vec4(0.5), vec3(0.1), vec3(0.2));
+
+    highp vec4 gath4 = textureGather(CA4, vec4(0.5));
+    highp vec4 gathC4 = textureGather(CA4, vec4(0.5), 2);
+    highp ivec4 gath6 = textureGather(CA6, vec4(0.5));
+    highp ivec4 gathC6 = textureGather(CA6, vec4(0.5), 1);
+    highp uvec4 gath7 = textureGather(CA7, vec4(0.5));
+    highp uvec4 gathC7 = textureGather(CA7, vec4(0.5), 0);
+
+    highp vec4 gath5 = textureGather(CA5, vec4(0.5), 2.5);
+
+    highp ivec3 s1 = imageSize(CA1);
+    highp ivec3 s2 = imageSize(CA2);
+    highp ivec3 s3 = imageSize(CA3);
+
+    imageStore(CA1, s3, vec4(1));
+    imageStore(CA2, s3, ivec4(1));
+    imageStore(CA3, s3, uvec4(1));
+
+    highp vec4 cl1 = imageLoad(rCA1, s3);
+    highp ivec4 cl2 = imageLoad(rCA2, s3);
+    highp uvec4 cl3 = imageLoad(rCA3, s3);
+}
+
+uniform sampler2DMSArray  bad2DMS;    // ERROR, reserved
+uniform isampler2DMSArray bad2DMSi;   // ERROR, reserved
+uniform usampler2DMSArray bad2DMSu;   // ERROR, reserved
+
+#extension GL_OES_texture_storage_multisample_2d_array : enable
+
+#ifdef GL_OES_texture_storage_multisample_2d_array
+
+uniform sampler2DMSArray  noPrec2DMS;    // ERROR, no default
+uniform isampler2DMSArray noPrec2DMSi;   // ERROR, no default
+uniform usampler2DMSArray noPrec2DMSu;   // ERROR, no default
+
+#endif
+
+precision highp sampler2DMSArray;
+precision highp isampler2DMSArray;
+precision highp usampler2DMSArray;
+
+uniform sampler2DMSArray  samp2DMSA;
+uniform isampler2DMSArray samp2DMSAi;
+uniform usampler2DMSArray samp2DMSAu;
+
+void MSA()
+{
+    vec4 tf = texelFetch(samp2DMSA, ivec3(5), 2);
+    ivec4 tfi = texelFetch(samp2DMSAi, ivec3(5), 2);
+    uvec4 tfu = texelFetch(samp2DMSAu, ivec3(5), 2);
+    
+    ivec3 tfs = textureSize(samp2DMSA);
+    ivec3 tfsi = textureSize(samp2DMSAi);
+    ivec3 tfsb = textureSize(samp2DMSAi, 4);  // ERROR, no lod
+    ivec3 tfsu = textureSize(samp2DMSAu);
+}
+
+#ifdef GL_OES_shader_image_atomic 
+#extension GL_OES_shader_image_atomic : enable
+#endif
+
+uniform layout(r32f)  highp  image2D im2Df;
+uniform layout(r32ui) highp uimage2D im2Du;
+uniform layout(r32i)  highp iimage2D im2Di;
+uniform ivec2 P;
+
+void goodImageAtom()
+{
+    float datf;
+    int dati;
+    uint datu;
+
+    imageAtomicAdd(     im2Di, P, dati);
+    imageAtomicAdd(     im2Du, P, datu);
+    imageAtomicMin(     im2Di, P, dati);
+    imageAtomicMin(     im2Du, P, datu);
+    imageAtomicMax(     im2Di, P, dati);
+    imageAtomicMax(     im2Du, P, datu);
+    imageAtomicAnd(     im2Di, P, dati);
+    imageAtomicAnd(     im2Du, P, datu);
+    imageAtomicOr(      im2Di, P, dati);
+    imageAtomicOr(      im2Du, P, datu);
+    imageAtomicXor(     im2Di, P, dati);
+    imageAtomicXor(     im2Du, P, datu);
+    imageAtomicExchange(im2Di, P, dati);
+    imageAtomicExchange(im2Du, P, datu);
+    imageAtomicExchange(im2Df, P, datf);
+    imageAtomicCompSwap(im2Di, P,  3, dati);
+    imageAtomicCompSwap(im2Du, P, 5u, datu);
+}
+
+sample out vec4 colorSampInBad;       // ERROR, reserved
+
+#extension GL_OES_shader_multisample_interpolation : enable
+
+sample out vec4 colorSample;
+flat sample out vec4 colorfsi;
+sample out vec3 sampInArray[4];
+in vec4 inv4;
+
+void badInterp()
+{
+    interpolateAtCentroid(inv4);             // ERROR, wrong stage
+    interpolateAtSample(inv4, 1);            // ERROR, need extension
+    interpolateAtOffset(inv4, vec2(0.2));    // ERROR, need extension
+}

+ 115 - 0
3rdparty/glslang/Test/310AofA.vert

@@ -0,0 +1,115 @@
+#version 310 es
+
+// Check name mangling of functions with parameters that are multi-dimensional arrays.
+
+#define NX 2
+#define NY 3
+#define NZ 4
+void f(bool a, float b, uint[4] c, int[NY][NX] d) {
+}
+
+void main() {
+  int[NY][NX] d;
+  f(false, 12.1, uint[NZ](uint(0),uint(1),uint(1),uint(2)), d);
+}
+
+buffer b {
+    float u[];  // ERROR
+    vec4 v[];
+} name[3];
+
+uniform ub {
+    float u;
+    vec4 v[];   // ERROR
+} uname[3];
+
+buffer b2 {
+    float u;
+    vec4 v[][];  // ERROR
+} name2[3];
+
+buffer b3 {
+    float u; 
+    vec4 v[][7];
+} name3[3];
+
+// General arrays of arrays
+
+float[4][5][6] many[1][2][3];
+
+float gu[][7];     // ERROR, size required
+float g4[4][7];
+float g5[5][7];
+
+float[4][7] foo(float a[5][7])
+{
+    float r[7];
+    r = a[2];
+    float[](a[0], a[1], r, a[3]);              // ERROR, too few dims
+    float[4][7][4](a[0], a[1], r, a[3]);       // ERROR, too many dims
+    return float[4][7](a[0], a[1], r, a[3]);
+    return float[][](a[0], a[1], r, a[3]);
+    return float[][7](a[0], a[1], a[2], a[3]);
+}
+
+void bar(float[5][7]) {}
+
+void foo2()
+{
+    {
+        float gu[3][4][2];
+
+        gu[2][4][1] = 4.0;                     // ERROR, overflow
+    }
+    vec4 ca4[3][2] = vec4[][](vec4[2](vec4(0.0), vec4(1.0)),
+                              vec4[2](vec4(0.0), vec4(1.0)),
+                              vec4[2](vec4(0.0), vec4(1.0)));
+    vec4 caim[][2] = vec4[][](vec4[2](vec4(4.0), vec4(2.0)),
+                              vec4[2](vec4(4.0), vec4(2.0)),
+                              vec4[2](vec4(4.0), vec4(2.0)));
+    vec4 caim2[][] = vec4[][](vec4[2](vec4(4.0), vec4(2.0)),
+                              vec4[2](vec4(4.0), vec4(2.0)),
+                              vec4[2](vec4(4.0), vec4(2.0)));
+    vec4 caim3[3][] = vec4[][](vec4[2](vec4(4.0), vec4(2.0)),
+                               vec4[2](vec4(4.0), vec4(2.0)),
+                               vec4[2](vec4(4.0), vec4(2.0)));
+
+    g4 = foo(g5);
+    g5 = g4;           // ERROR, wrong types
+    gu = g4;           // ERROR, not yet sized
+
+    foo(gu);           // ERROR, not yet sized
+    bar(g5);
+
+    if (foo(g5) == g4)
+        ;
+    if (foo(g5) == g5)  // ERROR, different types
+        ;
+
+    float u[5][7];
+    u[5][2] = 5.0;      // ERROR
+    foo(u);
+
+    vec4 badAss[3];
+    name[1].v[-1];     // ERROR
+    name[1].v[1] = vec4(4.3);
+    name[1].v = badAss;  // ERROR, bad assignemnt
+
+    name3[0].v[1].length();  // 7
+    name3[0].v.length();     // run time
+}
+
+struct badS {
+    int sa[];     // ERROR
+    int a[][];    // ERROR
+    int b[][2];   // ERROR
+    int c[2][];   // ERROR
+    int d[][4];   // ERROR
+};
+
+in float inArray[2][3];    // ERROR
+out float outArray[2][3];  // ERROR
+
+uniform ubaa {
+    int a;
+} ubaaname[2][3];  // ERROR

+ 8 - 0
3rdparty/glslang/Test/310implicitSizeArrayError.vert

@@ -0,0 +1,8 @@
+#version 310 es
+layout (location=0) uniform Block {
+  highp int a[];
+} uni;
+layout (location=0) out highp int o;
+void main() {
+  o = uni.a[2];
+}

+ 152 - 0
3rdparty/glslang/Test/330.frag

@@ -0,0 +1,152 @@
+#version 330 compatibility
+
+in vec4 inVar;
+layout(location=0, index=0) out vec4 outVar;
+
+varying vec4 varyingVar;
+
+void main()
+{
+    gl_FragColor = varyingVar;  // link ERROR: user output was used
+    gl_FragData[1] = inVar;     // link ERROR: user output was used
+    int buffer = 4;
+}
+
+#extension GL_ARB_separate_shader_objects : enable
+
+in gl_PerFragment {
+    vec4 gl_Color;
+};
+
+void foo()
+{
+    vec4 c = gl_Color;
+    outVar = inVar;
+}
+
+in gl_block { // ERROR
+    int gl_i;
+} gl_name;
+
+in myBlock {
+    int gl_i;  // ERROR
+} gl_name;     // ERROR
+
+in gl_PerVertex {  // ERROR
+    vec4 gl_FragCoord;
+} gl_in[];
+
+in gl_PerVertex {  // ERROR
+    vec4 gl_FragCoord;
+};  // ERROR
+
+const int start = 6;
+layout(location = -2) in vec4 v1;         // ERROR
+layout(location = start + 2) in vec4 v2;  // ERROR
+layout(location = 4.7e10) in vec4 v20;    // ERROR
+layout(location = +60) in float v21;      // ERROR
+layout(location = (2)) in float v22;      // ERROR
+
+struct S {
+    float f1;
+    layout(location = 3) float f2;        // ERROR
+};
+
+layout(location = 1) in inblock {         // ERROR
+    float f1;
+    layout(location = 3) float f2;        // ERROR
+};
+
+layout(location = 1) uniform ublock {     // ERROR
+    float f1;
+    layout(location = 3) float f2;        // ERROR
+} uinst;
+
+#extension GL_ARB_enhanced_layouts : enable
+
+layout(location = start) in vec4 v3;
+layout(location = -2) in vec4 v4;         // ERROR
+layout(location = -start) in vec4 v5;     // ERROR
+layout(location = start*start - 2 - 4) in vec4 v6;
+layout(location = +61) in float v23;
+layout(location = (62)) in float v24;
+
+struct S2 {
+    float f1;
+    layout(location = 3) float f2;        // ERROR
+};
+
+layout(location = 28) in inblock2 {
+    bool b1;
+    float f1;
+    layout(location = 25) float f2;
+    vec4 f3;
+    layout(location = 21) S2 s2;
+    vec4 f4;
+    vec4 f5;
+} ininst2;
+
+layout(location = 13) uniform ublock2 {   // ERROR
+    float f1;
+    layout(location = 3) float f2;        // ERROR
+} uinst2;
+
+in inblock3 {                             // ERROR, mix of location internal with no location external
+    float f1;
+    layout(location = 40) float f2;
+} in3;
+
+in ublock4 {
+    layout(location = 50) float f1;
+    layout(location = 51) float f2;
+} in4;
+
+layout(location = 33) in struct SS {
+    vec3 a;    // gets location 33
+    mat2 b;    // gets locations 34 and 35
+    vec4 c[2]; // gets locations 36 and 37
+    layout (location = 38) vec2 A; // ERROR, can't use on struct member
+} s;
+
+layout(location = 44) in block {
+    vec4 d; // gets location 44
+    vec4 e; // gets location 45
+    layout(location = 47) vec4 f; // gets location 47
+    vec4 g; // gets location 48
+    layout (location = 41) vec4 h; // gets location 41
+    vec4 i; // gets location 42
+    vec4 j; // gets location 43
+    vec4 k; // ERROR, location 44 already used
+};
+
+layout(index=0) out vec4 outVar2; // ERROR: missing explicit location
+layout(location=0, index=1) out vec4 outVar3; // no error even though location is overlapping
+layout(location=0, index=1) out vec4 outVar4; // ERROR overlapping
+layout(location=27, index=0) in vec4 indexIn; // ERROR, not on in
+layout(location=0, index=0) in; // ERROR, not just on in
+layout(location=0, index=0) out; // ERROR, need a variable
+layout(location=26, index=0) out indexBlock { int a; } indexBlockI; // ERROR, not on a block
+
+uniform sampler1D samp1D;
+uniform sampler2DShadow samp2Ds;
+
+void qlod()
+{
+    vec2 lod;
+    float pf;
+    vec2 pf2;
+    vec3 pf3;
+
+    lod = textureQueryLod(samp1D, pf);      // ERROR, not until 400
+    lod = textureQueryLod(samp2Ds, pf2);    // ERROR, not until 400
+}
+
+int precise;                // okay, not a keyword yet
+struct SKeyMem { int precise; } KeyMem; // okay, not a keyword yet
+
+void fooKeyMem()
+{
+    KeyMem.precise;
+}
+
+layout(location=28, index=2) out vec4 outIndex2; // ERROR index out of range

+ 12 - 0
3rdparty/glslang/Test/330comp.frag

@@ -0,0 +1,12 @@
+#version 330 compatibility
+
+in vec4 inVar;
+out vec4 outVar;
+
+varying vec4 varyingVar;
+
+void main()
+{
+    gl_FragColor = varyingVar;
+    gl_FragData[1] = inVar * gl_ModelViewMatrix;
+}

+ 197 - 0
3rdparty/glslang/Test/400.frag

@@ -0,0 +1,197 @@
+#version 400 core
+
+in vec2 c2D;
+flat in int i;
+out vec4 outp;
+uniform sampler2D arrayedSampler[5];
+uniform usampler2DRect samp2dr;
+uniform isampler2DArray isamp2DA;
+
+void main()
+{
+    vec4 v;
+    v = texture(arrayedSampler[i], c2D);
+    outp.x = gl_ClipDistance[1];
+
+    ivec2 offsets[4];
+    const ivec2 constOffsets[4] = ivec2[4](ivec2(1,2), ivec2(3,4), ivec2(15,16), ivec2(-2,0));
+    uvec4 uv4 = textureGatherOffsets(samp2dr, c2D, offsets, 2);  // ERROR, offsets not constant
+    uv4 = textureGatherOffsets(samp2dr, c2D, constOffsets, 2);
+    vec4 v4 = textureGather(arrayedSampler[0], c2D);
+    ivec4 iv4 = textureGatherOffset(isamp2DA, vec3(0.1), ivec2(1), 3);
+    iv4 = textureGatherOffset(isamp2DA, vec3(0.1), ivec2(1), i);  // ERROR, last argument not const
+    iv4 = textureGatherOffset(isamp2DA, vec3(0.1), ivec2(1), 4);  // ERROR, last argument out of range
+    iv4 = textureGatherOffset(isamp2DA, vec3(0.1), ivec2(1), 1+2);
+    iv4 = textureGatherOffset(isamp2DA, vec3(0.1), ivec2(i));
+
+    vec4 c = gl_FragCoord;
+}
+
+layout(location = 4) in vec4 vl; // ERROR, not supported
+
+#ifdef GL_ARB_separate_shader_objects
+#extension GL_ARB_separate_shader_objects : enable
+#endif
+
+layout(location = 6) in vec4 vl2;
+
+layout(location = 3) uniform vec3 uv3;
+
+layout(location = 5) in vec4 gl_Color;      // ERROR, layout
+noperspective in float gl_ClipDistance[4];  // ERROR, can't change qualifier
+
+layout(origin_upper_left, pixel_center_integer) in vec4 gl_FragCoord;  // ERROR, declared after use
+
+uniform sampler2DRectShadow u2drs;
+
+void foo23()
+{
+    const ivec2[3] offsets = ivec2[3](ivec2(1,2), ivec2(3,4), ivec2(15,16));
+
+    textureProjGradOffset(u2drs, outp, vec2(0.0), vec2(0.0), ivec2(c2D));     // ERROR, offset not constant
+    textureProjGradOffset(u2drs, outp, vec2(0.0), vec2(0.0), offsets[1]);
+    textureProjGradOffset(u2drs, outp, vec2(0.0), vec2(0.0), offsets[2]);     // ERROR, offset out of range
+    textureProjGradOffset(u2drs, outp, vec2(0.0), vec2(0.0), ivec2(-10, 20)); // ERROR, offset out of range
+}
+
+patch in vec4 patchIn;              // ERROR
+patch out vec4 patchOut;            // ERROR
+
+void foo24()
+{
+    dvec3 df, di;
+    df = modf(dvec3(outp.xyz), di);
+}
+
+in float in1;
+in vec2 in2;
+in vec3 in3;
+in vec4 in4;
+
+void foodc1()
+{
+    vec2 v2 = dFdxFine(in2);           // ERROR
+    vec3 v3 = dFdyCoarse(in3);         // ERROR
+    vec4 v4 = fwidthCoarse(in4) + fwidthFine(in4);   // ERROR
+}
+
+#extension GL_ARB_derivative_control : enable
+
+void foodc2()
+{
+    vec2 v2 = dFdxFine(in2);
+    vec3 v3 = dFdyCoarse(in3);
+    vec4 v4 = fwidthCoarse(in4) + fwidthFine(in4);
+
+    uint u1;
+    ivec3 i3;
+    ivec2 i2;
+    v2 = frexp(v2, i2);
+    v3 = ldexp(v3, i3);
+
+    u1 = packUnorm4x8(v4);
+    u1 = packSnorm4x8(v4);
+    v4 = unpackUnorm4x8(u1);
+    v4 = unpackSnorm4x8(u1);
+
+    double d;
+    uvec2 u2;
+    d = packDouble2x32(u2);
+    u2 = unpackDouble2x32(d);
+}
+
+sample in vec4 colorSampIn;
+sample out vec4 colorSampleBad;     // ERROR
+noperspective in vec4 colorfsi;
+sample in vec3 sampInArray[4];
+smooth in float scalarIn;
+flat centroid in vec2 colorfc;
+
+struct S {
+    float x;
+};
+
+in S s1;
+sample S s2;
+
+void interp()
+{
+    interpolateAtCentroid(colorfc);
+    interpolateAtCentroid(colorSampIn);
+    interpolateAtCentroid(colorfsi);
+    interpolateAtCentroid(scalarIn);
+    interpolateAtCentroid(sampInArray);         // ERROR
+    interpolateAtCentroid(sampInArray[2]);
+    interpolateAtCentroid(sampInArray[2].xy);   // ERROR
+
+    interpolateAtSample(sampInArray, 1);        // ERROR
+    interpolateAtSample(sampInArray[i], 0);
+    interpolateAtSample(s1.x, 2);
+    interpolateAtSample(scalarIn, 1);
+
+    interpolateAtOffset(sampInArray, vec2(0.2));         // ERROR
+    interpolateAtOffset(sampInArray[2], vec2(0.2));
+    interpolateAtOffset(sampInArray[2].xy, vec2(0.2));   // ERROR, no swizzle
+    interpolateAtOffset(scalarIn + scalarIn, vec2(0.2)); // ERROR, no binary ops other than dereference
+    interpolateAtOffset(s2.x, vec2(0.2));      // ERROR
+
+    float f;
+    interpolateAtCentroid(f);           // ERROR, not interpolant
+    interpolateAtSample(outp, 0);       // ERROR, not interpolant
+}
+
+uniform sampler1D samp1D;
+uniform isampler2D isamp2D;
+uniform usampler3D usamp3D;
+uniform samplerCube sampCube; 
+uniform isampler1DArray isamp1DA;
+uniform usampler2DArray usamp2DA;
+uniform isamplerCubeArray isampCubeA;
+
+uniform sampler1DShadow samp1Ds;
+uniform sampler2DShadow samp2Ds;
+uniform samplerCubeShadow sampCubes;
+uniform sampler1DArrayShadow samp1DAs;
+uniform sampler2DArrayShadow samp2DAs;
+uniform samplerCubeArrayShadow sampCubeAs;
+
+uniform samplerBuffer sampBuf;
+uniform sampler2DRect sampRect;
+
+void qlod()
+{
+    vec2 lod;
+    float pf;
+    vec2 pf2;
+    vec3 pf3;
+
+    lod = textureQueryLod(samp1D, pf);
+    lod = textureQueryLod(isamp2D, pf2);
+    lod = textureQueryLod(usamp3D, pf3);
+    lod = textureQueryLod(sampCube, pf3);
+    lod = textureQueryLod(isamp1DA, pf);
+    lod = textureQueryLod(usamp2DA, pf2);
+    lod = textureQueryLod(isampCubeA, pf3);
+
+    lod = textureQueryLod(samp1Ds, pf);
+    lod = textureQueryLod(samp2Ds, pf2);
+    lod = textureQueryLod(sampCubes, pf3);
+    lod = textureQueryLod(samp1DAs, pf);
+    lod = textureQueryLod(samp2DAs, pf2);
+    lod = textureQueryLod(sampCubeAs, pf3);
+
+    lod = textureQueryLod(sampBuf, pf);     // ERROR
+    lod = textureQueryLod(sampRect, pf2);   // ERROR
+}
+
+struct SKeyMem { int precise; } KeyMem;     // ERROR, keyword can't be a member
+
+uniform uint uu;
+out int iout;
+
+void bitwiseConv()
+{
+    iout = uu & i;
+    iout += uu ^ i;
+    iout += i | uu;
+}

+ 330 - 0
3rdparty/glslang/Test/400.geom

@@ -0,0 +1,330 @@
+#version 400 core
+
+void main()
+{
+    EmitStreamVertex(1);
+    EndStreamPrimitive(0);
+    EmitVertex();
+    EndPrimitive();
+    int id = gl_InvocationID;
+}
+
+layout(invocations = 4) in outbn { int a; } bn[]; // ERROR, not on a block
+layout(max_vertices = 127) out;
+layout(invocations = 4) in;
+
+#extension GL_ARB_separate_shader_objects : enable
+
+in gl_PerVertex {      // testing input arrays with a block redeclaration, see 420.geom for without
+    vec4 gl_Position;
+    layout(std140, location = 3) patch float gl_PointSize; // ERRORs...
+} gl_in[];
+
+void foo()
+{
+    gl_in.length();  // ERROR
+    gl_in[1].gl_Position;
+}
+
+in vec4 color[];
+in vec4 color2[];
+in vec4 colorS[3];
+in vec4 colorBad[4];
+
+void foo2()
+{
+    color.length(); // ERROR
+    colorS.length();
+}
+
+layout(triangles) in;  // give ERROR just for colorBad
+
+in vec4 color[3];
+in vec4 color2[3];
+in vec4 colorbad2[2];  // ERROR
+
+void foo3()
+{
+    gl_in.length();
+    color.length();
+    color2.length();
+    colorS.length();
+}
+
+layout(location = 4) in vec4 cva[3];
+layout(location = 5) in vec4 cvb[3];
+layout(location = 2) in mat3 cmc[3];  // ERROR, collision
+
+patch in vec4 patchIn[];            // ERROR
+patch out vec4 patchOut;            // ERROR
+
+in float scalar;  // ERROR, no array
+
+layout(max_vertices = 127, invocations = 4) out;      // ERROR
+layout(invocations = 4, max_vertices = 127) in;       // ERROR
+layout(max_vertices = 127, invocations = 4) uniform;  // 2 ERRORs
+
+in inblockscalar {
+    int a;
+} inbls;  // ERROR, not an array
+
+in inblocka {
+    int a;
+} inbla[17];  // ERROR, wrong array size
+
+void bits()
+{
+    uvec2 u2;
+    u2 = uaddCarry(u2, u2, u2);
+    uint u1;
+    u1 = usubBorrow(u1, u1, u1);
+    uvec4 u4;
+    umulExtended(u4, u4, u4, u4);
+    ivec4 i4;
+    imulExtended(i4, i4, i4, i4);
+    int i1;
+    i1 = bitfieldExtract(i1, 4, 5);
+    uvec3 u3;
+    u3 = bitfieldExtract(u3, 4, 5);
+    ivec3 i3;
+    i3 = bitfieldInsert(i3, i3, 4, 5);
+    u1 = bitfieldInsert(u1, u1, 4, 5);
+    ivec2 i2;
+    i2 = bitfieldReverse(i2);
+    u4 = bitfieldReverse(u4);
+    i1 = bitCount(i1);
+    i3 = bitCount(u3);
+    i2 = findLSB(i2);
+    i4 = findLSB(u4);
+    i1 = findMSB(i1);
+    i2 = findMSB(u2);
+}
+
+layout(location = 7, index = 1) out vec4 indexedOut;
+
+uniform sampler1D samp1D;
+uniform sampler2DShadow samp2Ds;
+
+void qlod()
+{
+    vec2 lod;
+    float pf;
+    vec2 pf2;
+    vec3 pf3;
+
+    lod = textureQueryLod(samp1D, pf);      // ERROR, only in fragment
+    lod = textureQueryLod(samp2Ds, pf2);    // ERROR, only in fragment
+}
+
+void doubles()
+{
+    double doublev;
+    dvec2 dvec2v;
+    dvec3 dvec3v;
+    dvec4 dvec4v;
+
+    bool boolv;
+    bvec2 bvec2v;
+    bvec3 bvec3v;
+    bvec4 bvec4v;
+
+    doublev = sqrt(2.9);
+    dvec2v  = sqrt(dvec2(2.7));
+    dvec3v  = sqrt(dvec3(2.0));
+    dvec4v  = sqrt(dvec4(2.1));
+
+    doublev += inversesqrt(doublev);
+    dvec2v  += inversesqrt(dvec2v);
+    dvec3v  += inversesqrt(dvec3v);
+    dvec4v  += inversesqrt(dvec4v);
+
+    doublev += abs(doublev);
+    dvec2v  += abs(dvec2v);
+    dvec3v  += abs(dvec3v);
+    dvec4v  += abs(dvec4v);
+
+    doublev += sign(doublev);
+    dvec2v  += sign(dvec2v);
+    dvec3v  += sign(dvec3v);
+    dvec4v  += sign(dvec4v);
+
+    doublev += floor(doublev);
+    dvec2v  += floor(dvec2v);
+    dvec3v  += floor(dvec3v);
+    dvec4v  += floor(dvec4v);
+
+    doublev += trunc(doublev);
+    dvec2v  += trunc(dvec2v);
+    dvec3v  += trunc(dvec3v);
+    dvec4v  += trunc(dvec4v);
+
+    doublev += round(doublev);
+    dvec2v  += round(dvec2v);
+    dvec3v  += round(dvec3v);
+    dvec4v  += round(dvec4v);
+
+    doublev += roundEven(doublev);
+    dvec2v  += roundEven(dvec2v);
+    dvec3v  += roundEven(dvec3v);
+    dvec4v  += roundEven(dvec4v);
+
+    doublev += ceil(doublev);
+    dvec2v  += ceil(dvec2v);
+    dvec3v  += ceil(dvec3v);
+    dvec4v  += ceil(dvec4v);
+
+    doublev += fract(doublev);
+    dvec2v  += fract(dvec2v);
+    dvec3v  += fract(dvec3v);
+    dvec4v  += fract(dvec4v);
+
+    doublev += mod(doublev, doublev);
+    dvec2v  += mod(dvec2v, doublev);
+    dvec3v  += mod(dvec3v, doublev);
+    dvec4v  += mod(dvec4v, doublev);
+    dvec2v  += mod(dvec2v, dvec2v);
+    dvec3v  += mod(dvec3v, dvec3v);
+    dvec4v  += mod(dvec4v, dvec4v);
+
+    doublev += modf(doublev, doublev);
+    dvec2v  += modf(dvec2v,  dvec2v);
+    dvec3v  += modf(dvec3v,  dvec3v);
+    dvec4v  += modf(dvec4v,  dvec4v);
+
+    doublev += min(doublev, doublev);
+    dvec2v  += min(dvec2v, doublev);
+    dvec3v  += min(dvec3v, doublev);
+    dvec4v  += min(dvec4v, doublev);
+    dvec2v  += min(dvec2v, dvec2v);
+    dvec3v  += min(dvec3v, dvec3v);
+    dvec4v  += min(dvec4v, dvec4v);
+
+    doublev += max(doublev, doublev);
+    dvec2v  += max(dvec2v, doublev);
+    dvec3v  += max(dvec3v, doublev);
+    dvec4v  += max(dvec4v, doublev);
+    dvec2v  += max(dvec2v, dvec2v);
+    dvec3v  += max(dvec3v, dvec3v);
+    dvec4v  += max(dvec4v, dvec4v);
+
+    doublev += clamp(doublev, doublev, doublev);
+    dvec2v  += clamp(dvec2v, doublev, doublev);
+    dvec3v  += clamp(dvec3v, doublev, doublev);
+    dvec4v  += clamp(dvec4v, doublev, doublev);
+    dvec2v  += clamp(dvec2v, dvec2v, dvec2v);
+    dvec3v  += clamp(dvec3v, dvec3v, dvec3v);
+    dvec4v  += clamp(dvec4v, dvec4v, dvec4v);
+
+    doublev += mix(doublev, doublev, doublev);
+    dvec2v  += mix(dvec2v, dvec2v, doublev);
+    dvec3v  += mix(dvec3v, dvec3v, doublev);
+    dvec4v  += mix(dvec4v, dvec4v, doublev);
+    dvec2v  += mix(dvec2v, dvec2v, dvec2v);
+    dvec3v  += mix(dvec3v, dvec3v, dvec3v);
+    dvec4v  += mix(dvec4v, dvec4v, dvec4v);
+    doublev += mix(doublev, doublev, boolv);
+    dvec2v  += mix(dvec2v, dvec2v, bvec2v);
+    dvec3v  += mix(dvec3v, dvec3v, bvec3v);
+    dvec4v  += mix(dvec4v, dvec4v, bvec4v);
+
+    doublev += step(doublev, doublev);
+    dvec2v  += step(dvec2v, dvec2v);
+    dvec3v  += step(dvec3v, dvec3v);
+    dvec4v  += step(dvec4v, dvec4v);
+    dvec2v  += step(doublev, dvec2v);
+    dvec3v  += step(doublev, dvec3v);
+    dvec4v  += step(doublev, dvec4v);
+
+    doublev += smoothstep(doublev, doublev, doublev);
+    dvec2v  += smoothstep(dvec2v, dvec2v, dvec2v);
+    dvec3v  += smoothstep(dvec3v, dvec3v, dvec3v);
+    dvec4v  += smoothstep(dvec4v, dvec4v, dvec4v);
+    dvec2v  += smoothstep(doublev, doublev, dvec2v);
+    dvec3v  += smoothstep(doublev, doublev, dvec3v);
+    dvec4v  += smoothstep(doublev, doublev, dvec4v);
+
+    boolv  = isnan(doublev);
+    bvec2v = isnan(dvec2v);
+    bvec3v = isnan(dvec3v);
+    bvec4v = isnan(dvec4v);
+
+    boolv  = boolv ? isinf(doublev) : false;
+    bvec2v = boolv ? isinf(dvec2v)  : bvec2(false);
+    bvec3v = boolv ? isinf(dvec3v)  : bvec3(false);
+    bvec4v = boolv ? isinf(dvec4v)  : bvec4(false);
+
+    doublev += length(doublev);
+    doublev += length(dvec2v);
+    doublev += length(dvec3v);
+    doublev += length(dvec4v);
+
+    doublev += distance(doublev, doublev);
+    doublev += distance(dvec2v, dvec2v);
+    doublev += distance(dvec3v, dvec3v);
+    doublev += distance(dvec4v, dvec4v);
+
+    doublev += dot(doublev, doublev);
+    doublev += dot(dvec2v, dvec2v);
+    doublev += dot(dvec3v, dvec3v);
+    doublev += dot(dvec4v, dvec4v);
+
+    dvec3v += cross(dvec3v, dvec3v);
+
+    doublev += normalize(doublev);
+    dvec2v  += normalize(dvec2v);
+    dvec3v  += normalize(dvec3v);
+    dvec4v  += normalize(dvec4v);
+
+    doublev += faceforward(doublev, doublev, doublev);
+    dvec2v  += faceforward(dvec2v, dvec2v, dvec2v);
+    dvec3v  += faceforward(dvec3v, dvec3v, dvec3v);
+    dvec4v  += faceforward(dvec4v, dvec4v, dvec4v);
+
+    doublev += reflect(doublev, doublev);
+    dvec2v  += reflect(dvec2v, dvec2v);
+    dvec3v  += reflect(dvec3v, dvec3v);
+    dvec4v  += reflect(dvec4v, dvec4v);
+
+    doublev += refract(doublev, doublev, doublev);
+    dvec2v  += refract(dvec2v, dvec2v, doublev);
+    dvec3v  += refract(dvec3v, dvec3v, doublev);
+    dvec4v  += refract(dvec4v, dvec4v, doublev);
+
+    dmat2   dmat2v   = outerProduct(dvec2v, dvec2v);
+    dmat3   dmat3v   = outerProduct(dvec3v, dvec3v);
+    dmat4   dmat4v   = outerProduct(dvec4v, dvec4v);
+    dmat2x3 dmat2x3v = outerProduct(dvec3v, dvec2v);
+    dmat3x2 dmat3x2v = outerProduct(dvec2v, dvec3v);
+    dmat2x4 dmat2x4v = outerProduct(dvec4v, dvec2v);
+    dmat4x2 dmat4x2v = outerProduct(dvec2v, dvec4v);
+    dmat3x4 dmat3x4v = outerProduct(dvec4v, dvec3v);
+    dmat4x3 dmat4x3v = outerProduct(dvec3v, dvec4v);
+
+    dmat2v *= matrixCompMult(dmat2v, dmat2v);
+    dmat3v *= matrixCompMult(dmat3v, dmat3v);
+    dmat4v *= matrixCompMult(dmat4v, dmat4v);
+    dmat2x3v = matrixCompMult(dmat2x3v, dmat2x3v);
+    dmat2x4v = matrixCompMult(dmat2x4v, dmat2x4v);
+    dmat3x2v = matrixCompMult(dmat3x2v, dmat3x2v);
+    dmat3x4v = matrixCompMult(dmat3x4v, dmat3x4v);
+    dmat4x2v = matrixCompMult(dmat4x2v, dmat4x2v);
+    dmat4x3v = matrixCompMult(dmat4x3v, dmat4x3v);
+
+    dmat2v   *= transpose(dmat2v);
+    dmat3v   *= transpose(dmat3v);
+    dmat4v   *= transpose(dmat4v);
+    dmat2x3v  = transpose(dmat3x2v);
+    dmat3x2v  = transpose(dmat2x3v);
+    dmat2x4v  = transpose(dmat4x2v);
+    dmat4x2v  = transpose(dmat2x4v);
+    dmat3x4v  = transpose(dmat4x3v);
+    dmat4x3v  = transpose(dmat3x4v);
+
+    doublev += determinant(dmat2v);
+    doublev += determinant(dmat3v);
+    doublev += determinant(dmat4v);
+
+    dmat2v *= inverse(dmat2v);
+    dmat3v *= inverse(dmat3v);
+    dmat4v *= inverse(dmat4v);
+}

+ 105 - 0
3rdparty/glslang/Test/400.tesc

@@ -0,0 +1,105 @@
+#version 400 core
+
+layout(vertices = 4) out;
+int outa[gl_out.length()];
+
+layout(quads) in;                   // ERROR
+layout(ccw) out;                    // ERROR
+layout(fractional_even_spacing) in; // ERROR
+
+patch in vec4 patchIn;              // ERROR
+patch out vec4 patchOut;
+
+void main()
+{
+    barrier();
+
+    int a = gl_MaxTessControlInputComponents +
+            gl_MaxTessControlOutputComponents +
+            gl_MaxTessControlTextureImageUnits +
+            gl_MaxTessControlUniformComponents +
+            gl_MaxTessControlTotalOutputComponents;
+
+    vec4 p = gl_in[1].gl_Position;
+    float ps = gl_in[1].gl_PointSize;
+    float cd = gl_in[1].gl_ClipDistance[2];
+
+    int pvi = gl_PatchVerticesIn;
+    int pid = gl_PrimitiveID;
+    int iid = gl_InvocationID;
+
+    gl_out[gl_InvocationID].gl_Position = p;
+    gl_out[gl_InvocationID].gl_PointSize = ps;
+    gl_out[gl_InvocationID].gl_ClipDistance[1] = cd;
+
+    gl_TessLevelOuter[3] = 3.2;
+    gl_TessLevelInner[1] = 1.3;
+
+    if (a > 10)
+        barrier();           // ERROR
+    else
+        barrier();           // ERROR
+
+    barrier();
+
+    do {
+        barrier();           // ERROR
+    } while (a > 10);
+
+    switch (a) {
+    default:
+        barrier();           // ERROR
+        break;
+    }
+    a < 12 ? a : (barrier(), a); // ERROR
+    {
+        barrier();
+    }
+
+    return;
+
+    barrier();               // ERROR
+}
+
+layout(vertices = 4) in;    // ERROR
+layout(vertices = 5) out;   // ERROR
+
+void foo()
+{
+    gl_out[4].gl_PointSize;  // ERROR
+
+    barrier();                // ERROR
+}
+
+in vec2 ina;   // ERROR, not array
+in vec2 inb[];
+in vec2 inc[18];  // ERROR, wrong size
+in vec2 ind[gl_MaxPatchVertices];
+
+#extension GL_ARB_separate_shader_objects : enable
+
+layout(location = 3) in vec4 ivla[];
+layout(location = 4) in vec4 ivlb[];
+layout(location = 4) in vec4 ivlc[];  // ERROR, overlapping
+
+layout(location = 3) out vec4 ovla[];
+layout(location = 4) out vec4 ovlb[];
+layout(location = 4) out vec4 ovlc[];  // ERROR, overlapping
+
+precise vec3 pv3;
+
+void foop()
+{
+    precise double d;
+
+    pv3 *= pv3;
+    pv3 = fma(pv3, pv3, pv3);
+    d = fma(d, d, d);
+}
+
+patch out pinbn {
+    int a;
+} pinbi;
+
+invariant precise out vec4 badOrder[]; // ERROR, precise must appear first
+void badp(out precise float f);        // ERROR, precise must appear first

+ 105 - 0
3rdparty/glslang/Test/400.tese

@@ -0,0 +1,105 @@
+#version 400 core
+
+layout(vertices = 4) out; // ERROR
+layout(quads, cw) in;
+layout(triangles) in;     // ERROR
+layout(isolines) in;      // ERROR
+
+layout(ccw) in;           // ERROR
+layout(cw) in;
+
+layout(fractional_odd_spacing) in;    
+layout(equal_spacing) in;              // ERROR
+layout(fractional_even_spacing) in;    // ERROR
+
+layout(point_mode) in;
+
+patch in vec4 patchIn;
+patch out vec4 patchOut;  // ERROR
+
+void main()
+{
+    barrier(); // ERROR
+
+    int a = gl_MaxTessEvaluationInputComponents +
+            gl_MaxTessEvaluationOutputComponents +
+            gl_MaxTessEvaluationTextureImageUnits +
+            gl_MaxTessEvaluationUniformComponents +
+            gl_MaxTessPatchComponents +
+            gl_MaxPatchVertices +
+            gl_MaxTessGenLevel;
+
+    vec4 p = gl_in[1].gl_Position;
+    float ps = gl_in[1].gl_PointSize;
+    float cd = gl_in[1].gl_ClipDistance[2];
+
+    int pvi = gl_PatchVerticesIn;
+    int pid = gl_PrimitiveID;
+    vec3 tc = gl_TessCoord;
+    float tlo = gl_TessLevelOuter[3];
+    float tli = gl_TessLevelInner[1];
+
+    gl_Position = p;
+    gl_PointSize = ps;
+    gl_ClipDistance[2] = cd;
+}
+
+smooth patch in vec4 badp1;         // ERROR
+flat patch in vec4 badp2;           // ERROR
+noperspective patch in vec4 badp3;  // ERROR
+patch sample in vec3 badp4;         // ERROR
+
+#extension GL_ARB_separate_shader_objects : enable
+
+in gl_PerVertex            // ERROR, no size
+{
+    float gl_ClipDistance[1];
+} gl_in[];
+
+in gl_PerVertex            // ERROR, second redeclaration of gl_in
+{
+    float gl_ClipDistance[1];
+} gl_in[];
+
+layout(quads, cw) out;     // ERROR
+layout(triangles) out;     // ERROR
+layout(isolines) out;      // ERROR
+layout(cw) out;            // ERROR
+layout(fractional_odd_spacing) out;    // ERROR
+layout(equal_spacing) out;             // ERROR
+layout(fractional_even_spacing) out;   // ERROR
+layout(point_mode) out;                // ERROR
+
+in vec2 ina;   // ERROR, not array
+in vec2 inb[];
+in vec2 inc[18];  // ERROR, wrong size
+in vec2 ind[gl_MaxPatchVertices];
+
+in testbla {
+    int f;
+} bla;        // ERROR, not array
+
+in testblb {
+    int f;
+} blb[];
+
+in testblc {
+    int f;
+} blc[18]; // ERROR wrong size
+
+in testbld {
+    int f;
+} bld[gl_MaxPatchVertices];
+
+layout(location = 23) in vec4 ivla[];
+layout(location = 24) in vec4 ivlb[];
+layout(location = 24) in vec4 ivlc[];  // ERROR
+
+layout(location = 23) out vec4 ovla[2];
+layout(location = 24) out vec4 ovlb[2];  // ERROR
+
+in float gl_TessLevelOuter[4];           // ERROR, can't redeclare
+
+patch in pinbn {
+    int a;
+} pinbi;

+ 106 - 0
3rdparty/glslang/Test/400.vert

@@ -0,0 +1,106 @@
+#version 400 core
+
+in double d;   // ERROR, no doubles
+in dvec3 d3;   // ERROR, no doubles
+in dmat4 dm4;  // ERROR, no doubles
+
+// function selection under type conversion
+void foo1(double a, uint b)  {}
+void foo1(double a, int b)   {}
+void foo1(double a, float b) {}
+void foo1(double a, double b){}
+
+void foo2(double a, float b) {}
+void foo2(double a, double b){}
+
+void foo3(double a, float b) {}
+void foo3(float a, double b) {}
+
+void ftd(  int,  float, double) {}
+void ftd( uint,  float, double) {}
+void ftd(float, double, double) {}
+
+void main()
+{
+    double d;
+	uint u;
+	int i;
+	float f;
+
+	foo1(d, d);
+	foo1(d, u);
+	foo1(d, i);
+	foo1(d, f);
+
+	foo1(f, d);
+	foo1(f, u);
+	foo1(f, i);
+	foo1(f, f);
+
+	foo1(u, d);
+	foo1(u, u);
+	foo1(u, i);
+	foo1(u, f);
+
+	foo1(i, d);
+	foo1(i, u);
+	foo1(i, i);
+	foo1(i, f);
+
+	foo2(d, d);
+	foo2(d, u);
+	foo2(d, i);
+	foo2(d, f);
+
+	foo2(f, d);
+	foo2(f, u);
+	foo2(f, i);
+	foo2(f, f);
+
+	foo2(u, d);
+	foo2(u, u);
+	foo2(u, i);
+	foo2(u, f);
+
+	foo2(i, d);
+	foo2(i, u);
+	foo2(i, i);
+	foo2(i, f);
+
+	foo3(d, d);  // ERROR, no match
+	foo3(d, u);
+	foo3(d, i);
+	foo3(d, f);
+
+	foo3(f, d);
+	foo3(f, u); // ERROR, ambiguous
+	foo3(f, i); // ERROR, ambiguous
+	foo3(f, f); // ERROR, ambiguous
+
+	foo3(u, d);
+	foo3(u, u); // ERROR, ambiguous
+	foo3(u, i); // ERROR, ambiguous
+	foo3(u, f); // ERROR, ambiguous
+
+	foo3(i, d);
+	foo3(i, u); // ERROR, ambiguous
+	foo3(i, i); // ERROR, ambiguous
+	foo3(i, f); // ERROR, ambiguous
+
+	ftd(i, f, f);
+	ftd(u, f, f);
+}
+
+void itf(int, float, int);
+void itf(int, double, int);
+
+void tf()
+{
+    double d;
+	uint u;
+	int i;
+	float f;
+	
+	itf(i, i, i);
+	itf(i, u, i);
+}

+ 39 - 0
3rdparty/glslang/Test/410.geom

@@ -0,0 +1,39 @@
+#version 410 core
+
+void main()
+{
+    gl_ViewportIndex = 7;
+}
+
+in gl_PerVertex {
+    float gl_PointSize;
+} myIn[];  // ERROR, can't redeclare a different name
+
+in gl_PerVertex {
+    float gl_PointSize;
+} gl_myIn[];  // ERROR, can't redeclare a different name
+
+in gl_PerVertex {
+    float gl_PointSize;
+} gl_in[];
+
+in gl_PerVertex {
+    float gl_PointSize;
+} gl_in[];     // ERROR, can't do it again
+
+out gl_PerVertex {
+    float gl_PointSize;
+};
+
+void foo()
+{
+    float p = gl_in[1].gl_PointSize;  // use of redeclared
+    gl_PointSize = p;                 // use of redeclared
+    vec4 v = gl_in[1].gl_Position;    // ERROR, not included in the redeclaration
+    gl_Position = vec4(1.0);          // ERROR, not included in the redeclaration
+}
+
+float foo5()
+{
+    return 4;  // implicit conversion of return type
+}

+ 11 - 0
3rdparty/glslang/Test/410.tesc

@@ -0,0 +1,11 @@
+#version 400 core
+
+// no layout(vertices = ...) out;
+int outa[gl_out.length()];  // ERROR
+
+patch out vec4 patchOut;
+
+void main()
+{
+
+}

+ 9 - 0
3rdparty/glslang/Test/410.vert

@@ -0,0 +1,9 @@
+#version 410 core
+
+in double d;
+in dvec3 d3;
+in dmat4 dm4;
+
+void main()
+{
+}

+ 30 - 0
3rdparty/glslang/Test/420.comp

@@ -0,0 +1,30 @@
+#version 420
+
+layout(local_size_x = 2) in;  // ERROR, no compute
+
+#extension GL_ARB_compute_shader : enable
+
+layout(local_size_x = 2, local_size_y = 4, local_size_z = 6) in;
+
+shared vec3 sfoo;
+
+void main()
+{
+    sfoo = vec3(gl_WorkGroupSize.x, gl_WorkGroupSize.y, gl_WorkGroupSize.z);
+    sfoo += gl_WorkGroupSize + gl_NumWorkGroups + gl_WorkGroupID + gl_LocalInvocationID + gl_GlobalInvocationID;
+    sfoo *= gl_LocalInvocationIndex;
+    sfoo += gl_MaxComputeWorkGroupCount + gl_MaxComputeWorkGroupSize;
+    sfoo *= gl_MaxComputeUniformComponents +
+            gl_MaxComputeTextureImageUnits +
+            gl_MaxComputeImageUniforms +
+            gl_MaxComputeAtomicCounters +
+            gl_MaxComputeAtomicCounterBuffers;
+
+    barrier();
+    memoryBarrier();
+    memoryBarrierAtomicCounter();
+    memoryBarrierBuffer();
+    memoryBarrierImage();
+    memoryBarrierShared();
+    groupMemoryBarrier();
+}

+ 14 - 0
3rdparty/glslang/Test/420.frag

@@ -0,0 +1,14 @@
+#version 420 core
+
+layout(depth_any) out float gl_FragDepth;
+layout(depth_greater) out float gl_FragDepth; // ERROR: redeclaration with different qualifier
+
+void main()
+{
+    gl_FragDepth = 0.3;
+}
+
+layout(depth_less) in float depth; // ERROR: depth_less only applies to gl_FragDepth
+layout(depth_any) out float gl_FragDepth;  // ERROR, done after use
+
+layout(binding=0) uniform atomic_uint a[];

+ 55 - 0
3rdparty/glslang/Test/420.geom

@@ -0,0 +1,55 @@
+#version 420 core
+
+// testing input arrays without a gl_in[] block redeclaration, see 400.geom for with
+
+int i;
+
+void foo()
+{
+    gl_in.length();  // ERROR
+    gl_in[1].gl_Position;
+    gl_in[i].gl_Position;  // ERROR
+}
+
+layout(triangles) in;
+
+in vec4 color3[3];
+
+void foo3()
+{
+    gl_in.length();
+    gl_in[i].gl_Position;
+    color3.length();
+}
+
+uniform sampler2D s2D;
+in vec2 coord[];
+uniform vec4 v4;
+
+void foo4()
+{
+    const ivec2 offsets[5] =
+    {
+        ivec2(0,1),
+        ivec2(1,-2),
+        ivec2(0,3),
+        ivec2(-3,0),
+        ivec2(2,1)
+    };
+
+    vec4 v = textureGatherOffset(s2D, coord[0], offsets[i].xy);
+
+    offsets[i].xy = ivec2(3);  // ERROR
+    v4.x = 3.2;                // ERROR
+    v4.xy;   // should have non-uniform type
+}
+
+out gl_PerVertex {
+    float gl_PointSize[1];  // ERROR, adding array
+    float gl_ClipDistance;  // ERROR, removing array
+};
+
+float foo5()
+{
+    return i;  // implicit conversion of return type
+}

+ 43 - 0
3rdparty/glslang/Test/420.tesc

@@ -0,0 +1,43 @@
+#version 420 core
+
+#extension GL_ARB_separate_shader_objects : enable
+
+layout(vertices = 4) out;
+
+out gl_PerVertex {
+    vec4 gl_Position;
+} gl_out[3];                 // ERROR, wrong size
+
+out int a[gl_out.length()];
+out int outb[5];             // ERROR, wrong size
+out int outc[];
+
+void main()
+{
+    vec4 p = gl_in[1].gl_Position;
+    float ps = gl_in[1].gl_PointSize;
+    float cd = gl_in[1].gl_ClipDistance[2];
+
+    int pvi = gl_PatchVerticesIn;
+    int pid = gl_PrimitiveID;
+    int iid = gl_InvocationID;
+
+    gl_out[gl_InvocationID].gl_Position = p;
+    gl_out[gl_InvocationID].gl_PointSize = ps;        // ERROR
+}
+
+out float outf;  // ERROR, no array
+
+layout (location = 0) in dmat2x4 vs_tcs_first[];
+layout (location = 12) in dmat2x4 vs_tcs_last[];
+
+void foo()
+{
+ if ((dmat2x4(dvec4(-0.625, -0.5, -0.375lf, -0.25), dvec4(-0.375, -0.25, -0.125, 0)) != vs_tcs_first[0]) ||
+        (dmat2x4(dvec4(0.375, 0.5, 0.625, 0.75), dvec4(0.625, 0.75, 0.875, -0.625)) != vs_tcs_last[0]))
+    {
+        ;
+    }
+}
+
+layout(vertices = 0) out;  // ERROR, can't be 0

+ 90 - 0
3rdparty/glslang/Test/420.tese

@@ -0,0 +1,90 @@
+#version 420 core
+
+const mat2x2 a = mat2( vec2( 1.0, 0.0 ), vec2( 0.0, 1.0 ) );
+mat2x2 b = { vec2( 1.0, 0.0 ), vec2( 0.0, 1.0 ) };
+const mat2x2 c = { { 1.0, 0.0, }, { 0.0, 1.0 } };
+
+float a2[2] = { 3.4, 4.2, 5.0 }; // illegal
+vec2 b2 = { 1.0, 2.0, 3.0 }; // illegal
+mat3x3 c2 = { vec3(0.0), vec3(1.0), vec3(2.0), vec3(3.0) }; // illegal
+mat2x2 d = { 1.0, 0.0, 0.0, 1.0 }; // illegal, can't flatten nesting
+
+struct {
+    float a;
+    int b;
+} e = { 1.2, 2, };
+
+struct {
+    float a;
+    int b;
+} e2 = { 1, 3 }; // legal, first initializer is converted
+
+struct {
+    float a;
+    int b;
+} e3 = { 1.2, 2, 3 }; // illegal
+
+int a3 = true; // illegal
+vec4 b3[2] = { vec4(0.0), 1.0 }; // illegal
+vec4 b4[2] = vec4[2](vec4(0.0), mat2x2(1.0)); // illegal
+mat4x2 c3 = { vec3(0.0), vec3(1.0) }; // illegal
+
+struct S1 {
+    vec4 a;
+    vec4 b;
+};
+
+struct {
+    float s;
+    float t;
+} d2[] = { S1(vec4(0.0), vec4(1.1)) }; // illegal
+
+float b5[] = { 3.4, 4.2, 5.0, 5.2, 1.1 };
+
+struct S3 {
+    float f;
+    mat2x3 m23;
+};
+
+struct S4 {
+    uvec2 uv2;
+    S3 s[2];
+};
+
+struct Single1 { int f; };
+Single1 single1 = { 10 };
+
+struct Single2 { uvec2 v; };
+Single2 single2 = { { 1, 2 } };
+
+struct Single3 { Single1 s1; };
+Single3 single3 = { { 3 } };
+
+struct Single4 { Single2 s1; };
+Single4 single4 = { { { 4u, 5u } } };
+
+const S4 constructed = S4(uvec2(1, 2), 
+                          S3[2](S3(3.0, mat2x3(4.0)), 
+                                S3(5.0, mat2x3(6.0))));
+
+const S4 curlybad1 = { {1, 2},
+                       { {3,   {4.0, 0, 0.0}, {0.0, 4.0, 0.0 } },       // ERROR, the mat2x3 isn't isolated
+                         {5.0, {6, 0.0, 0.0}, {0.0, 6.0, 0.0 } } } }; 
+
+const S4 curlyInit = { {1, 2},
+                       { {3,   { {4.0, 0, 0.0}, {0.0, 4.0, 0.0 } } },
+                         {5.0, { {6, 0.0, 0.0}, {0.0, 6.0, 0.0 } } } } }; 
+
+float vc1, vc2, vc3;
+vec3 av3 = vec3(vc1, vc2, vc3);
+vec3 bv3 = { vc1, vc2, vc3 };
+
+void main()
+{
+    memoryBarrier();
+
+    if (constructed == curlybad1)
+        ;
+    if (constructed == curlyInit)
+        ;
+}

+ 161 - 0
3rdparty/glslang/Test/420.vert

@@ -0,0 +1,161 @@
+#version 420 core
+#version 420 core
+varying vec2 v2;               // ERROR, varying reserved
+in vec4 bad[10];
+highp in vec4 badorder;
+out invariant vec4 badorder2;
+in centroid vec4 badorder4;    // ERROR, no centroid input to vertex stage
+out flat vec4 badorder3;
+void bar(in const float a);
+void bar2(highp in float b);
+smooth flat out vec4 rep;      // ERROR, replicating interpolation qualification
+centroid sample out vec4 rep2; // ERROR, replicating auxiliary qualification
+in uniform vec4 rep3;          // ERROR, replicating storage qualification
+
+int anonconst;
+const int aconst = 5;
+const int a = aconst;
+const int b = anonconst;       // ERROR at global scope
+
+const int foo()                // ERROR, no const functions
+{
+    const int a = aconst;
+    const int b = anonconst;
+    const int c = a;          // still compile-time const
+    const int d = b;          // not a compile-time const
+    float x[c];               // okay
+    float y[d];               // ERROR
+
+    return b;
+}
+
+void main()
+{
+    int i;
+    if (i == 3)
+        int j = i;
+    else
+        int k = j;              // ERROR, j is undeclared
+    int m = k;                  // ERROR, k is undeclared
+    int n = j;                  // ERROR, j is undeclared
+
+    while (true)
+        int jj;
+    int kk = jj;                // ERROR, jj is undeclared
+}
+
+const float cx = 4.20;
+const float dx = 4.20;
+
+void bar(in highp volatile vec4 v)
+{
+    int s;
+    s.x;       // okay
+    s.y;       // ERROR
+    if (bad[0].x == cx.x)
+        ;
+    if (cx.x == dx.x)
+        badorder3 = bad[0];
+
+    float f;
+    vec3 smeared = f.xxx;
+    f.xxxxx;   // ERROR
+    f.xxy;     // ERROR
+}
+
+layout(binding = 3) uniform;  // ERROR
+layout(binding = 3) uniform boundblock { int aoeu; } boundInst;
+layout(binding = 7) uniform anonblock { int aoeu; } ;
+layout(location = 1) in;      // ERROR
+layout(binding = 1) in inblock { int aoeua; };       // ERROR
+layout(binding = 100000) uniform anonblock2 { int aooeu; } ;
+layout(binding = 4) uniform sampler2D sampb1;
+layout(binding = 5) uniform sampler2D sampb2[10];
+layout(binding = 80) uniform sampler2D sampb3; // ERROR, binding too big
+layout(binding = 31) uniform sampler2D sampb4;
+layout(binding = 79) uniform sampler2D sampb5[2]; // ERROR, binding too big
+
+int fgfg(float f, mediump int i);
+int fgfg(float f, highp int i);
+
+out gl_PerVertex {
+    float gl_ClipDistance[4];
+};
+
+patch in vec4 patchIn;              // ERROR
+patch out vec4 patchOut;            // ERROR
+
+void bar23444()
+{
+    mat4x3 m43;  \
+    float a1 = m43[3].y;
+    vec3 v3;
+    int a2 = m43.length();
+    a2 += m43[1].length();
+    a2 += v3.length();
+    const float b = 2 * a1;
+    int a = gl_MinProgramTexelOffset + gl_MaxProgramTexelOffset;
+}
+
+const int comma0 = (2, 3);  // ERROR
+int comma1[(2, 3)];   // ERROR
+
+layout(r32i) uniform iimage2D iimg2D;
+layout(rgba32i) uniform iimage2D iimg2Drgba;
+layout(rgba32f) uniform image2D img2Drgba;
+layout(r32ui) uniform uimage2D uimg2D;
+uniform image2DMS img2DMS; // ERROR image variables not declared writeonly must have format layout qualifier
+uniform writeonly image2DMS img2DMSWO;
+void qux()
+{
+    int i = aoeu;
+    imageAtomicCompSwap(iimg2D, ivec2(i,i), i, i);
+    imageAtomicAdd(uimg2D, ivec2(i,i), uint(i));
+    imageAtomicMin(iimg2Drgba, ivec2(i,i), i); // ERROR iimg2Drgba does not have r32i layout
+    imageAtomicMax(img2Drgba, ivec2(i,i), i);  // ERROR img2Drgba is not integer image
+    ivec4 pos = imageLoad(iimg2D, ivec2(i,i));
+    vec4 col = imageLoad(img2DMS, ivec2(i,i), i);
+    imageStore(img2DMSWO, ivec2(i,i), i, vec4(0));
+    imageLoad(img2DMSWO, ivec2(i,i), i);       // ERROR, drops writeonly
+}
+
+volatile float vol; // ERROR, not an image
+readonly int vol2;  // ERROR, not an image
+
+void passr(coherent readonly iimage2D image)
+{
+}
+
+layout(r32i) coherent readonly uniform iimage2D qualim1;
+layout(r32i) coherent volatile readonly uniform iimage2D qualim2;
+
+void passrc()
+{
+    passr(qualim1);
+    passr(qualim2);   // ERROR, drops volatile
+    passr(iimg2D);
+}
+
+layout(rg8i) uniform uimage2D i1bad;     // ERROR, type mismatch
+layout(rgba32i) uniform image2D i2bad;   // ERROR, type mismatch
+layout(rgba32f) uniform uimage2D i3bad;  // ERROR, type mismatch
+layout(r8_snorm) uniform iimage2D i4bad; // ERROR, type mismatch
+layout(rgba32ui) uniform iimage2D i5bad; // ERROR, type mismatch
+layout(r8ui) uniform iimage2D i6bad;     // ERROR, type mismatch
+
+uniform offcheck {
+    layout(offset = 16) int foo;   // ERROR
+} offcheckI;
+
+uniform sampler1D samp1D;
+uniform sampler1DShadow samp1Ds;
+
+void qlod()
+{
+    int levels;
+
+    levels = textureQueryLevels(samp1D);   // ERROR, not until 430
+    levels = textureQueryLevels(samp1Ds);  // ERROR, not until 430
+}
+
+layout(binding=0) writeonly uniform image1D badArray[];

+ 21 - 0
3rdparty/glslang/Test/420_size_gl_in.geom

@@ -0,0 +1,21 @@
+#version 420 core
+
+// testing input arrays without a gl_in[] block redeclaration, see 400.geom for with
+
+int i;
+
+layout(triangles) in;
+in vec4 colorun[];
+in vec4 color3[3];
+
+void foo()
+{
+    gl_in.length();
+    gl_in[1].gl_Position;
+    gl_in.length();
+    gl_in[i].gl_Position;   // should be sized to 3 by 'triangles'
+}
+
+in gl_PerVertex {  // ERROR, already used
+    vec4 gl_Position;
+} gl_in[];

+ 87 - 0
3rdparty/glslang/Test/430.comp

@@ -0,0 +1,87 @@
+#version 430 core
+
+layout(local_size_x = 2) in;
+layout(local_size_x = 16) in;     // ERROR, changing
+layout(local_size_z = 4096) in;   // ERROR, too large
+layout(local_size_x = 2) in;
+
+const int total = gl_MaxComputeWorkGroupCount.y 
+                + gl_MaxComputeUniformComponents
+                + gl_MaxComputeTextureImageUnits
+                + gl_MaxComputeImageUniforms
+                + gl_MaxComputeAtomicCounters
+                + gl_MaxComputeAtomicCounterBuffers;
+
+buffer ShaderStorageBlock
+{
+    int value;
+    float values[];
+};
+
+buffer InvalidShaderStorageBlock
+{
+    float values[];
+    int value;
+} invalid;
+
+void main()
+{
+    barrier();
+    memoryBarrier();
+    memoryBarrierAtomicCounter();
+    memoryBarrierBuffer();
+    memoryBarrierShared();
+    memoryBarrierImage();
+    groupMemoryBarrier();
+    value = int(values[gl_LocalInvocationIndex]);
+
+    int a;
+    if (a > 10)
+        barrier();
+}
+
+layout(location = 2) in vec3 v3;      // ERROR
+in float f;                           // ERROR
+out float fo;                         // ERROR
+
+shared vec4 s;
+layout(location = 2) shared vec4 sl;  // ERROR
+shared float fs = 4.2;                // ERROR
+
+layout(local_size_x = 2, local_size_y = 3, local_size_z = 4) out;  // ERROR
+
+int arrX[gl_WorkGroupSize.x];
+int arrY[gl_WorkGroupSize.y];
+int arrZ[gl_WorkGroupSize.z];
+
+readonly buffer roblock
+{
+    int value;
+    float values[];
+} ro;
+
+void foo()
+{
+    ro.values[2] = 4.7;             // ERROR, readonly
+    ro.values.length();
+    barrier();
+}
+
+uniform double roll;
+uniform writeonly image2D destTex;
+void fooaoeu() {
+     ivec2 storePos = ivec2(gl_GlobalInvocationID.xy);
+     double localCoef = length(vec2(ivec2(gl_LocalInvocationID.xy)-8)/8.0);
+     dvec4 aa = dvec4(0.4, 0.2, 0.3, 0.4);
+     double globalCoef = 1.0;
+     int i = globalCoef;            // ERROR, can't convert from double to int
+     double di = i;
+}
+
+in inb {     // ERROR
+    int a;
+} inbi;
+
+out outb {     // ERROR
+    int a;
+} outbi;

+ 223 - 0
3rdparty/glslang/Test/430.vert

@@ -0,0 +1,223 @@
+#version 430 core
+
+layout(location = 3) vec4 v4;  // ERROR
+
+layout(location = 4) uniform vec4 uv4;
+
+layout(location = 2) in   inb1 { vec4 v; } b1;  // ERROR
+layout(location = 2) out outb1 { vec4 v; } b2;  // ERROR
+
+out gl_PerVertex {
+    float gl_ClipDistance[];
+};
+
+void foo()
+{
+    gl_ClipDistance[2] = 3.7;
+}
+
+struct sp {
+    highp float f;
+    in float g;             // ERROR
+    uniform float h;        // ERROR
+    invariant float i;      // ERROR
+    volatile float j;       // ERROR
+    layout(row_major) mat3 m3; // ERROR
+};
+
+void foo3(invariant vec4 v4,                 // ERROR
+          volatile vec3 v3,
+          layout(location = 3) vec2 v2,      // ERROR
+          centroid vec3 cv3)                 // ERROR
+{
+}
+
+struct S {
+    mat3x2 m[7];  // needs 7*3 locations
+    float f;      // needs 1 location
+};                // needs 22 locations
+
+layout(location = 10) out S cs[2];     // 10 through 10 + 2 * 22 - 1 = 53
+layout(location = 54) out float cf;
+layout(location = 53) out float cg; // ERROR, collision at 31
+
+layout(location = 10) in vec4 alias1;
+layout(location = 10) in vec4 alias2;  // okay for vertex input on desktop
+
+out float gl_ClipDistance[17];  // ERROR, size too big
+
+// enhanced_layouts (most tests are in 440.*)
+
+layout(location = start*start - 2 - 4) in vec4 v6e;    // ERROR
+
+layout(location = 28) in inblock2e {
+    layout(location = 25) float f2;                     // ERROR
+} ininst2e;
+
+in ublock4e {
+    layout(location = 50) float f1;                      // ERROR
+    layout(location = 51) float f2;                      // ERROR
+} in4e;
+
+layout(align=16, std140) uniform  ubl4e { int a; } inst4e;// ERROR
+
+layout(align=32) uniform ubl9e {                          // ERROR
+    layout(offset=12, align=4) float f;                   // ERROR
+    layout(offset=20) float g;                            // ERROR
+} inst9e;
+
+layout(std140) uniform blocke {
+                        vec4   a;
+    layout(offset = 32) vec3   b;                          // ERROR
+} spinste;
+
+int aconste[gl_MaxTransformFeedbackBuffers];               // ERROR
+int bconste[gl_MaxTransformFeedbackInterleavedComponents]; // ERROR
+
+out bblck2 {
+    layout(xfb_offset=64) vec4 bbv;                              // ERROR
+} bbinst2;
+
+layout(xfb_buffer = 3, xfb_stride = 64) out;                     // ERROR
+
+layout(xfb_buffer=2, xfb_offset=48, xfb_stride=80) out vec4 bge; // ERROR
+layout(              xfb_offset=32, xfb_stride=64) out vec4 bhe; // ERROR
+
+layout(xfb_stride=80, xfb_buffer=2, xfb_offset=16) out bblck4e { // ERROR
+    vec4 bbv1;
+    vec4 bbv2;
+} bbinst4e;
+
+out bblck5e {
+    layout(xfb_offset=0) vec4 bbv1;                               // ERROR
+    layout(xfb_stride=64, xfb_buffer=3, xfb_offset=48) vec4 bbv2; // ERROR
+} bbinst5e;
+
+#extension GL_ARB_enhanced_layouts : enable
+
+layout(align=16, std140) uniform  ubl4 { int a; } inst4;
+layout(std430) uniform;
+
+layout(align=32) uniform ubl9 {
+    layout(offset=12, align=4) float f;
+    layout(offset=20) float g;
+} inst9;
+
+layout(std140) uniform block {
+                        vec4   a;     // a takes offsets 0-15
+    layout(offset = 32) vec3   b;     // b takes offsets 32-43
+} spinst;
+
+int aconst[gl_MaxTransformFeedbackBuffers];
+int bconst[gl_MaxTransformFeedbackInterleavedComponents];
+
+const int start2 = 5;
+layout(location = start2 * start2 - 2 - 4) in vec4 v6;
+
+layout(location = 28) in inblock2 {  // ERROR, input block in vertex shader, other errors are valid checks still...
+    bool b1;
+    float f1;
+    layout(location = 25) float f2;
+} ininst2;
+
+in ublock4 {                         // ERROR, input block in vertex shader, other errors are valid checks still...
+    layout(location = 50) float f1;
+    layout(location = 51) float f2;
+} in4;
+
+out bblck2g {
+    layout(xfb_offset=64) vec4 bbv;
+} bbinst2g;
+
+layout(xfb_buffer = 1, xfb_stride = 80) out;  // default buffer is 3
+
+layout(xfb_buffer=1, xfb_offset=48, xfb_stride=80) out vec4 bg;
+layout(              xfb_offset=32, xfb_stride=80) out vec4 bh;
+
+layout(xfb_stride=80, xfb_buffer=1, xfb_offset=16) out bblck4 {
+    vec4 bbv1;
+} bbinst4;
+
+out bblck5 {
+    layout(xfb_offset=0) vec4 bbv1;
+    layout(xfb_stride=80, xfb_buffer=1, xfb_offset=64) vec4 bbv2;
+} bbinst5;
+
+shared vec4 sharedv;                // ERROR
+
+void fooBarrier()
+{
+    barrier();                       // ERROR
+    memoryBarrier();
+    memoryBarrierAtomicCounter();
+    memoryBarrierBuffer();
+    memoryBarrierShared();           // ERROR
+    memoryBarrierImage();
+    groupMemoryBarrier();            // ERROR
+}
+
+buffer vec4 v;  // ERROR
+
+uniform sampler2DMS s2dms;
+uniform usampler2DMSArray us2dmsa;
+layout(rgba32i) uniform iimage2DMS ii2dms;
+layout(rgba32f) uniform image2DMSArray i2dmsa;
+
+void fooq()
+{
+    int s = textureSamples(s2dms); // ERROR
+    s += textureSamples(us2dmsa);  // ERROR
+    s += imageSamples(ii2dms);     // ERROR
+    s += imageSamples(i2dmsa);     // ERROR
+}
+
+#extension GL_ARB_shader_texture_image_samples : enable
+
+void fooq2()
+{
+    int s = textureSamples(s2dms);
+    s += textureSamples(us2dmsa); 
+    s += imageSamples(ii2dms);    
+    s += imageSamples(i2dmsa);    
+}
+
+uniform sampler1D samp1D;
+uniform usampler2D usamp2D;
+uniform isampler3D isamp3D;
+uniform isamplerCube isampCube; 
+uniform isampler1DArray isamp1DA;
+uniform sampler2DArray samp2DA;
+uniform usamplerCubeArray usampCubeA;
+
+uniform sampler1DShadow samp1Ds;
+uniform sampler2DShadow samp2Ds;
+uniform samplerCubeShadow sampCubes;
+uniform sampler1DArrayShadow samp1DAs;
+uniform sampler2DArrayShadow samp2DAs;
+uniform samplerCubeArrayShadow sampCubeAs;
+
+uniform samplerBuffer sampBuf;
+uniform sampler2DRect sampRect;
+
+void qlod()
+{
+    int levels;
+
+    levels = textureQueryLevels(samp1D);
+    levels = textureQueryLevels(usamp2D);
+    levels = textureQueryLevels(isamp3D);
+    levels = textureQueryLevels(isampCube);
+    levels = textureQueryLevels(isamp1DA);
+    levels = textureQueryLevels(samp2DA);
+    levels = textureQueryLevels(usampCubeA);
+
+    levels = textureQueryLevels(samp1Ds);
+    levels = textureQueryLevels(samp2Ds);
+    levels = textureQueryLevels(sampCubes);
+    levels = textureQueryLevels(samp1DAs);
+    levels = textureQueryLevels(samp2DAs);
+    levels = textureQueryLevels(sampCubeAs);
+
+    levels = textureQueryLevels(sampBuf);    // ERROR
+    levels = textureQueryLevels(sampRect);   // ERROR
+}

+ 108 - 0
3rdparty/glslang/Test/430AofA.frag

@@ -0,0 +1,108 @@
+#version 430
+
+float[4][5][6] many[1][2][3];
+
+float gu[][7];
+float gimp[][];    // ERROR, implicit inner
+float g4[4][7];
+float g5[5][7];
+
+float[4][7] foo(float a[5][7])
+{
+    float r[7];
+    r = a[2];
+    float[](a[0], a[1], r, a[3]);              // ERROR, too few dims
+    float[4][7][4](a[0], a[1], r, a[3]);       // ERROR, too many dims
+    return float[4][7](a[0], a[1], r, a[3]);
+    return float[][](a[0], a[1], r, a[3]);
+    return float[][7](a[0], a[1], a[2], a[3]);
+}
+
+void bar(float[5][7]) {}
+
+void main()
+{
+    {
+        float gu[3][4][2];
+
+        gu[2][4][1] = 4.0;                     // ERROR, overflow
+    }
+    vec4 ca4[3][2] = vec4[][](vec4[2](vec4(0.0), vec4(1.0)),
+                              vec4[2](vec4(0.0), vec4(1.0)),
+                              vec4[2](vec4(0.0), vec4(1.0)));
+    vec4 caim[][2] = vec4[][](vec4[2](vec4(4.0), vec4(2.0)),
+                              vec4[2](vec4(4.0), vec4(2.0)),
+                              vec4[2](vec4(4.0), vec4(2.0)));
+    vec4 caim2[][] = vec4[][](vec4[2](vec4(4.0), vec4(2.0)),
+                              vec4[2](vec4(4.0), vec4(2.0)),
+                              vec4[2](vec4(4.0), vec4(2.0)));
+    vec4 caim3[3][] = vec4[][](vec4[2](vec4(4.0), vec4(2.0)),
+                               vec4[2](vec4(4.0), vec4(2.0)),
+                               vec4[2](vec4(4.0), vec4(2.0)));
+
+    vec4 a4[3][2] = {vec4[](vec4(0.0), vec4(1.0)),
+                     vec4[2](vec4(0.0), vec4(1.0)),
+                     vec4[2](vec4(0.0), vec4(1.0)) };
+    vec4 aim[][2] = {vec4[2](vec4(4.0), vec4(2.0)),
+                     vec4[](vec4(4.0), vec4(2.0)),
+                     vec4[2](vec4(4.0), vec4(2.0)) };
+    vec4 aim2[][] = {vec4[2](vec4(4.0), vec4(2.0)),
+                     vec4[2](vec4(4.0), vec4(2.0)),
+                     vec4[](vec4(4.0), vec4(2.0)) };
+    vec4 aim3[3][] = {vec4[2](vec4(4.0), vec4(2.0)),
+                      vec4[2](vec4(4.0), vec4(2.0)),
+                      vec4[2](vec4(4.0), vec4(2.0)) };
+
+    vec4 bad2[3][] = {vec4[2](vec4(4.0), vec4(2.0)),              // ERROR
+                      vec4[3](vec4(4.0), vec4(2.0), vec4(5.0)),
+                      vec4[2](vec4(4.0), vec4(2.0)) };
+
+    vec4 bad3[3][] = {vec4[3](vec4(4.0), vec4(2.0), vec4(5.0)),   // ERROR
+                      vec4[2](vec4(4.0), vec4(2.0)),
+                      vec4[2](vec4(4.0), vec4(2.0)) };
+
+    vec4 bad4[4][] = {vec4[2](vec4(4.0), vec4(2.0)),              // ERROR
+                      vec4[2](vec4(4.0), vec4(2.0)),
+                      vec4[2](vec4(4.0), vec4(2.0)) };
+
+
+    g4 = foo(g5);
+    g5 = g4;           // ERROR, wrong types
+    gu = g4;           // ERROR, not yet sized
+
+    foo(gu);           // ERROR, not yet sized
+    bar(g5);
+
+    if (foo(g5) == g4)
+        ;
+    if (foo(g5) == g5)  // ERROR, different types
+        ;
+
+    float u[][7];
+    u[2][2] = 3.0;
+    float u[5][7];
+    u[5][2] = 5.0;      // ERROR
+    foo(u);
+}
+
+void foo3()
+{
+    float resize1[][5][7];
+    resize1.length();           // ERROR
+    resize1[1][4][5] = 2.0;
+    resize1.length();           // ERROR
+    float resize1[3][5][7];
+    resize1.length();           // 3 in AST
+    resize1[1].length();        // 5 in AST
+    resize1[1][1].length();     // 7 in AST
+    resize1[1][1][1].length();  // ERROR
+
+    float resize2[][5][7];
+    float resize2[3][4][7];     // ERROR, inner dim change
+
+    float resize3[][5][7];
+    float resize3[3][5][9];     // ERROR, inner dim changed
+
+    float resize4[][5][7];
+    int  resize4[3][5][7];      // ERROR, element type
+}

+ 74 - 0
3rdparty/glslang/Test/430scope.vert

@@ -0,0 +1,74 @@
+#version 430 core
+
+int f(int a, int b, int c)
+{
+	int a = b;  // ERROR, redefinition
+
+    {
+		float a = float(a) + 1.0; // okay
+    }
+
+	return a;
+}
+
+int f(int a, int b, int c);  // okay to redeclare
+
+bool b;
+float b(int a);      // ERROR: redefinition
+
+float c(int a);
+bool c;              // ERROR: redefinition
+
+float f;             // ERROR: redefinition
+float tan;           // okay, hides built-in function
+float sin(float x);  // okay, can redefine built-in functions
+float cos(float x)   // okay, can redefine built-in functions
+{
+	return 1.0;
+}
+bool radians(bool x) // okay, can overload built-in functions
+{
+    return true;
+}
+
+invariant gl_Position;
+
+void main()
+{
+    int g();    // okay
+    g();
+
+    float sin; // okay
+    sin;
+    sin(0.7);  // ERROR, use of hidden function
+    f(1,2,3);
+
+    float f;    // hides f()
+    f = 3.0;
+
+    gl_Position = vec4(f);
+
+    for (int f = 0; f < 10; ++f)
+        ++f;
+
+    int x = 1;
+    { 
+        float x = 2.0, /* 2nd x visible here */ y = x; // y is initialized to 2
+        int z = z; // ERROR: z not previously defined.
+    }
+    {
+        int x = x; // x is initialized to '1'
+    }
+
+    struct S 
+    { 
+        int x; 
+    };
+    {
+        S S = S(0); // 'S' is only visible as a struct and constructor 
+        S.x;        // 'S' is now visible as a variable
+    }
+
+    int degrees;
+    degrees(3.2);  // ERROR, use of hidden built-in function
+}

+ 153 - 0
3rdparty/glslang/Test/440.frag

@@ -0,0 +1,153 @@
+#version 440
+
+// Note 'location'-only tests for enhanced layouts are in 330.frag
+// Generic 'component' tests are in 440.vert
+
+// a consumes components 2 and 3 of location 4
+layout(location = 4, component = 2) in vec2 a; 
+
+// b consumes component 1 of location 4
+layout(location = 4, component = 1) in float b; 
+layout(location = 4, component = 2) in vec2 h;  // ERROR, component overlap not okay for fragment in
+
+layout(location = 3, component = 2) in vec3 c;  // ERROR: c overflows components 2 and 3
+
+// e consumes beginning (components 0, 1 and 2) of each of 6 slots
+layout(location = 20, component = 0) in vec3 e[6];
+
+// f consumes last component of the same 6 slots 
+layout(location = 20, component = 3) in float f[6];
+
+layout(location = 30, component = 3) out int be;
+layout(location = 30, component = 0) out vec3 bf;  // ERROR, not the same basic type
+
+writeonly uniform;          // ERROR
+readonly in;                // ERROR
+flat out;                   // ERROR
+mediump uniform;
+
+layout(offset=12) uniform;  // ERROR
+layout(offset=12) in;       // ERROR
+layout(offset=12) out;      // ERROR
+
+layout(align=16) uniform;   // ERROR
+layout(align=16) in;        // ERROR
+layout(align=16) out;       // ERROR
+
+layout(offset=12) uniform  ubl1 { int a; } inst1;  // ERROR
+layout(offset=12)      in inbl2 { int a; } inst2;  // ERROR
+layout(offset=12)     out inbl3 { int a; } inst3;  // ERROR
+
+layout(align=16, std140) uniform  ubl4 { int a; } inst4;
+layout(align=16) uniform  ubl8 { int a; } inst8;  // ERROR, no packing
+layout(align=16)      in inbl5 { int a; } inst5;  // ERROR
+layout(align=16)     out inbl6 { int a; } inst6;  // ERROR
+
+layout(offset=12) uniform vec4 v1;  // ERROR
+layout(offset=12)      in vec4 v2;  // ERROR
+layout(offset=12)     out vec4 v3;  // ERROR
+
+layout(align=16) uniform vec4 v4;   // ERROR
+layout(align=16)      in vec4 v5;   // ERROR
+layout(align=16)     out vec4 v6;   // ERROR
+
+layout(std140) in;                  // ERROR
+layout(std140) uniform vec4 v7;     // ERROR
+
+layout(align=48) uniform ubl7 {          // ERROR, not power of 2
+    layout(offset=12, align=4) float f;  // ERROR, no packing
+} inst7;
+
+in ibl10 {
+    layout(offset=12) float f;  // ERROR
+    layout(align=4) float g;    // ERROR
+} inst10;
+
+layout(std430) uniform;
+
+layout(align=32) uniform ubl9 {
+    float e;
+    layout(offset=12, align=4) float f;
+    layout(offset=20) float g;
+    float h;
+} inst9;
+
+uniform ubl11 {
+    layout(offset=12, align=4) float f;
+    float g;
+} inst11;
+
+layout(std140) uniform block {
+                        vec4   a;     // a takes offsets 0-15
+    layout(offset = 32) vec3   b;     // b takes offsets 32-43
+    layout(offset = 40) vec2   c;     // ERROR, lies within previous member
+    layout(align = 6)   double g;     // ERROR, 6 is not a power of 2
+    layout(offset=68)   double h;     // ERROR, offset not aligned
+} specExampleErrors;
+
+layout(std140) uniform block2 {
+                        vec4   a;     // a takes offsets 0-15
+    layout(offset = 32) vec3   b;     // b takes offsets 32-43
+    layout(offset = 48) vec2   d;     // d takes offsets 48-55
+    layout(align = 16)  float  e;     // e takes offsets 64-67
+    layout(align = 2)   double f;     // f takes offsets 72-79
+    layout(offset = 80) float  h;     // h takes offsets 80-83
+    layout(align = 64)  dvec3  i;     // i takes offsets 128-151
+    layout(offset = 164, align = 8) float  j;     // j takes offsets 168-171
+} specExample;
+
+layout(std430) buffer block430 {
+                        vec4   a;     // a takes offsets 0-15
+    layout(offset = 32) vec3   b;     // b takes offsets 32-43
+    layout(offset = 40) vec2   c;     // ERROR, lies within previous member
+    layout(align = 6)   double g;     // ERROR, 6 is not a power of 2
+    layout(offset=68)   double h;     // ERROR, offset not aligned
+    layout(align = 0)   double i;     // ERROR, 0 not a power of 2
+} specExampleErrors430;
+
+layout(std430) buffer block2430 {
+                        vec4   a;     // a takes offsets 0-15
+    layout(offset = 32) vec3   b;     // b takes offsets 32-43
+    layout(offset = 48) vec2   d;     // d takes offsets 48-55
+    layout(align = 16)  float  e;     // e takes offsets 64-67
+    layout(align = 2)   double f;     // f takes offsets 72-79
+    layout(offset = 80) float  h;     // h takes offsets 80-83
+    layout(align = 64)  dvec3  i;     // i takes offsets 128-151
+    layout(offset = 164, align = 8) float  j;     // j takes offsets 168-171
+} specExample430;
+
+layout(std430, align = 128) buffer block24300 {
+    vec4   a;
+    vec3   b;
+    vec2   d;
+    float  e;
+    double f;
+    float  h;
+    dvec3  i;
+} specExample4300;
+
+layout(std430, align = 128) buffer block24301 {
+    vec4   a;
+    vec3   b;
+    vec2   d;
+    layout(offset=388) float  e;
+    layout(align=8) double f;
+    float  h;
+    dvec3  i;
+} specExample4301;
+
+int aconst[gl_MaxTransformFeedbackBuffers];
+int bconst[gl_MaxTransformFeedbackInterleavedComponents];
+
+sample in vec3 sampInArray[4];
+
+void interp()
+{
+    interpolateAtCentroid(sampInArray[2].xy);
+    interpolateAtSample(sampInArray[2].x.x, 2);
+}
+
+int layer()
+{
+    return gl_Layer;
+}

+ 191 - 0
3rdparty/glslang/Test/440.vert

@@ -0,0 +1,191 @@
+#version 440
+
+// Note 'location' tests for enhanced layouts are in 330.frag
+
+layout(location = 2, component = 2) in vec2 a; 
+layout(location = 2, component = 1) in float b; 
+
+layout(location = 3, component = 2) in vec3 c;      // ERROR: c overflows components 2 and 3
+
+layout(location = 0, component = 3) in float d[4]; 
+
+layout(location = 4, component = 0) in vec3 e[5];
+layout(location = 4, component = 3) in float f[5];
+
+layout(location = 9, component = 4) in float g[6];   // ERROR, component too big
+
+layout(location = 4, component = 2) in vec2 h;       // component overlap okay for vertex in
+
+layout(location = 3, component = 2) out vec2 i;
+layout(location = 3, component = 0) out vec2 j;
+
+layout(location = 4, component = 2) out vec2 k;
+layout(location = 4, component = 2) out vec2 m;      // ERROR, component overlap
+
+layout(location = 2, component = 2) out vec2 n;
+layout(location = 2, component = 0) out vec3 p;      // ERROR, component overlap
+
+layout(location = 10, component = 3) out float q[6]; 
+layout(location = 10, component = 0) out vec3 r[6];
+
+layout(location = 15, component = 3) out float s;    // ERROR, overlap
+layout(location = 10, component = 1) out float t;    // ERROR, overlap
+
+layout(location = 20, component = 2) out float u;
+layout(location = 20, component = 0) out float v;
+layout(location = 20, component = 3) out float w;
+layout(location = 20, component = 1) out vec2 x;     // ERROR, overlap
+
+layout(location = 30, component = 3) out vec2 y;     // ERROR, goes to component 4
+layout(location = 31, component = 1) out vec4 z;     // ERROR, goes to component 4
+
+layout(location = 32, component = 1) out mat4 ba;               // ERROR
+layout(location = 33, component = 1) out struct S {int a;} Ss;  // ERROR
+layout(location = 34, component = 1) out bn { int a;} bb;       // ERROR
+
+layout(component = 1) out float bc;    // ERROR, no location
+
+out blockname {
+    layout(location = 40, component = 2) out float u;
+    layout(location = 40, component = 0) out float v;
+    layout(location = 40, component = 3) out float w;
+    layout(location = 40, component = 1) out vec2 x;     // ERROR, overlap
+
+    layout(location = 41, component = 3) out vec2 y;     // ERROR, goes to component 4
+    layout(location = 42, component = 1) out vec4 z;     // ERROR, goes to component 4
+
+    layout(location = 42, component = 1) out mat4 ba;    // ERROR
+    layout(location = 43, component = 1) out S Ss;       // ERROR
+} bd;
+
+layout(location = 1, component = 1) out;                 // ERROR, no global setting
+
+layout(location = 50, component = 3) out int be;
+layout(location = 50, component = 0) out vec3 bf;
+
+layout(location = 51, component = 1) out double dfo;     // ERROR, odd component
+layout(location = 52, component = 2) out dvec2 dvo;      // ERROR, overflow
+layout(location = 53) out double dfo2;
+layout(location = 53, component = 2) out vec2 ffv2;      // okay, fits
+layout(location = 54) out dvec4 dvec4out;                // uses up location 55 too
+layout(location = 55) out float overf;                   // ERROR, collides with previous dvec4
+layout(location = 56, component = 1) out vec2 df2o;
+layout(location = 56, component = 3) out float sf2o;
+layout(location = 57, component = 2) out vec2 dv3o;
+layout(location = 57, component = 3) out float sf4o;     // ERROR, overlapping component
+layout(location=58) out flat dvec3 dv3o2;                // uses part of location 59
+layout(location=59, component=2) out flat double dfo3;   // okay, fits
+layout(location=59, component=0) out flat double dfo4;   // ERROR, overlaps the dvec3 in starting in 58
+
+out bblck1 {
+    vec4 bbv;
+} bbinst1;
+
+out bblck2 {
+    layout(xfb_offset=64) vec4 bbv;
+} bbinst2;
+
+layout(xfb_buffer = 3, xfb_stride = 64) out;  // default buffer is 3
+
+out bblck3 {
+    layout(xfb_offset=16) vec4 bbv;  // in xfb_buffer 3
+} bbinst3;
+
+uniform ubblck3 {
+    layout(xfb_offset=16) vec4 bbv;  // ERROR, not in a uniform
+} ubbinst3;
+
+layout(xfb_buffer=2, xfb_offset=48, xfb_stride=80) out vec4 bg;
+layout(              xfb_offset=32, xfb_stride=64) out vec4 bh;
+
+layout(xfb_offset=48) out; // ERROR
+
+layout(xfb_stride=80, xfb_buffer=2, xfb_offset=16) out bblck4 {
+    vec4 bbv1;
+    vec4 bbv2;
+} bbinst4;
+
+out bblck5 {
+    layout(xfb_offset=0) vec4 bbv1;
+    layout(xfb_stride=64, xfb_buffer=3, xfb_offset=48) vec4 bbv2;
+    layout(xfb_buffer=2) vec4 bbv3;                               // ERROR, wrong buffer
+} bbinst5;
+
+out layout(xfb_buffer=2) bblck6 {
+    layout(xfb_offset=0) vec4 bbv1;
+    layout(xfb_stride=64, xfb_buffer=3, xfb_offset=32) vec4 bbv2; // ERROR, overlap 32 from bh, and buffer contradiction
+    layout(xfb_buffer=2, xfb_offset=0) vec4 bbv3;                 // ERROR, overlap 0 from bbinst5
+    layout(xfb_buffer=2) vec4 bbv5;
+    layout(xfb_offset=24) float bbf6;                             // ERROR, overlap 24 from bbv1 in bbinst4
+} bbinst6;
+
+layout(xfb_stride=48) out;                   // ERROR, stride of buffer 3
+
+layout(xfb_buffer=1) out;  // default buffer is 1
+layout(xfb_offset=4) out float bj;
+layout(xfb_offset=0) out ivec2 bk;           // ERROR, overlap 4
+
+layout(xfb_buffer=3, xfb_stride=48) out;     // ERROR, stride of buffer 3 (default is now 3)
+layout(xfb_stride=48) out float bl;          // ERROR, stride of buffer 3
+
+layout(xfb_stride=48) out bblck7 {           // ERROR, stride of buffer 3
+    layout(xfb_stride=64) vec4 bbv1;
+    layout(xfb_stride=32) vec4 bbv2;         // ERROR, stride of buffer 3
+} bbinst7;
+
+struct S5 {
+    int i;    // 4 bytes plus 4 byte hole
+    double d; // 8 bytes
+    float f;  // 4 bytes
+};  // total size = 20
+
+struct T {
+    bool b;   // 4 plus 4 byte hole
+    S5 s;     // 20 
+    vec2 v2;  // 8
+};  // total size = 36
+
+out layout(xfb_buffer=0, xfb_offset=0, xfb_stride=92) bblck8 {  // ERROR, stride not multiple of 8
+    bool b;    // offset 0
+    T t;       // offset 8, size 40
+    int i;     // offset 40 + 4 = 48
+    mat3x3 m3; // offset 52
+    float f;   // offset 52 + 9*4 = 88
+    float g;   // ERROR, overflow stride
+} bbinst8;
+
+out layout(xfb_buffer=4) bblck9 {
+    layout(xfb_offset=1) bool b;     // ERROR
+    layout(xfb_offset=12) T t;       // ERROR
+    layout(xfb_offset=52) mat3x3 m3; // non-multiple of 8 okay
+    layout(xfb_offset=90) int i;     // ERROR
+    layout(xfb_offset=98) double d;  // ERROR
+    layout(xfb_offset=108) S s;      // non-multiple of 8 okay
+} bbinst9;
+
+layout(xfb_buffer=5, xfb_stride=6) out;     // link ERROR, stride not multiple of 4
+layout(xfb_offset=0) out float bm;
+
+layout(xfb_buffer=6, xfb_stride=2000) out;  // ERROR, stride too big
+
+out layout(xfb_buffer=7, xfb_offset=0) bblck10 {  // link ERROR, implicit stride too big
+    dmat4x4 m1;
+    dmat4x4 m2;
+    float f;
+} bbinst10;
+
+int drawParamsBad()
+{
+    return gl_BaseVertexARB + gl_BaseInstanceARB + gl_DrawIDARB; // ERROR, extension not requested
+}
+
+#extension GL_ARB_shader_draw_parameters: enable
+
+int drawParams()
+{
+    return gl_BaseVertexARB + gl_BaseInstanceARB + gl_DrawIDARB;
+    gl_BaseVertexARB = 3;       // ERROR, can't write to shader 'in'
+    gl_BaseInstanceARB = 3;     // ERROR, can't write to shader 'in'
+    gl_DrawIDARB = 3;           // ERROR, can't write to shader 'in'
+    glBaseInstanceARB;          // ERROR, not defined
+}

+ 1 - 0
3rdparty/glslang/Test/450.comp

@@ -0,0 +1 @@
+#version 450 core

+ 56 - 0
3rdparty/glslang/Test/450.frag

@@ -0,0 +1,56 @@
+#version 450 core
+
+in float in1;
+in vec2 in2;
+in vec3 in3;
+in vec4 in4;
+
+void main()
+{
+    vec2 v2 = dFdxFine(in2);
+    vec3 v3 = dFdyCoarse(in3);
+    vec4 v4 = fwidth(in4);
+    v4 = dFdyFine(in4);
+    v3 = dFdyFine(in3);
+    float f = dFdx(in1) + dFdxFine(in1) + dFdxCoarse(in1);
+    v4 = fwidthCoarse(in4) + fwidthFine(in4);
+
+    float cull = gl_CullDistance[2];
+    float consts = gl_MaxCullDistances + gl_MaxCombinedClipAndCullDistances + gl_MaxSamples;
+
+    if (gl_HelperInvocation)
+        ++v4;
+
+    int sum = gl_MaxVertexImageUniforms +
+              gl_MaxFragmentImageUniforms +
+              gl_MaxComputeImageUniforms +
+              gl_MaxCombinedImageUniforms +
+              gl_MaxCombinedShaderOutputResources;
+
+    bool b1, b3, b;
+    uint uin;
+    bvec2 b2 = mix(bvec2(b1), bvec2(b3), bvec2(b));
+    uint um = mix(uin, uin, b);
+    ivec3 im3 = mix(ivec3(uin), ivec3(uin), bvec3(b));
+}
+
+uniform sampler2DMS s2dms;
+uniform usampler2DMSArray us2dmsa;
+layout(rgba32i) uniform iimage2DMS ii2dms;
+layout(rgba32f) uniform image2DMSArray i2dmsa;
+
+void foo()
+{
+    int s = textureSamples(s2dms);
+    s += textureSamples(us2dmsa);
+    s += imageSamples(ii2dms);
+    s += imageSamples(i2dmsa);
+    float f = imageAtomicExchange(i2dmsa, ivec3(in3), 2, 4.5);
+}
+
+in float gl_CullDistance[6];
+
+float cull(int i)
+{
+    return (i >= 6) ? gl_CullDistance[5] : gl_CullDistance[i];
+}

Algunos archivos no se mostraron porque demasiados archivos cambiaron en este cambio