Selaa lähdekoodia

[spirv] Add support for (RW)StructuredBuffer (#593)

* Supported (RW)StructuredBuffer types
* Supported accessing elements in (RW)StructuredBuffer
* Supported the Load() method
* Supported std430 layout

Also optimized OpAccessChain CodeGen. This is necessary right
now because otherwise we will have pointers to intermediate
composite objects like structs and arrays. Structs/arrays are
different types with and without layout decorations. To have the
correct struct/array pointer type for OpAccessChain, we need to
always pass in the correct layout rules when translating the
types, which is not easy. Optimizing intermediate OpAccessChain
away solves the problem because scalar/vector/matrix types are
unique given the same parameter.
Lei Zhang 8 vuotta sitten
vanhempi
commit
92a73a28c5

+ 2 - 0
tools/clang/include/clang/SPIRV/ModuleBuilder.h

@@ -289,6 +289,8 @@ public:
                          Type::DecorationSet decorations = {});
   uint32_t getArrayType(uint32_t elemType, uint32_t count,
                         Type::DecorationSet decorations = {});
+  uint32_t getRuntimeArrayType(uint32_t elemType,
+                               Type::DecorationSet decorations = {});
   uint32_t getFunctionType(uint32_t returnType,
                            llvm::ArrayRef<uint32_t> paramTypes);
   uint32_t getImageType(uint32_t sampledType, spv::Dim, uint32_t depth,

+ 30 - 7
tools/clang/lib/SPIRV/DeclResultIdMapper.cpp

@@ -77,7 +77,9 @@ uint32_t DeclResultIdMapper::getDeclResultId(const NamedDecl *decl) {
           cast<VarDecl>(decl)->getType(),
           // We need to set decorateLayout here to avoid creating SPIR-V
           // instructions for the current type without decorations.
-          /*decorateLayout*/ true);
+          // According to the Vulkan spec, cbuffer should follow standrad
+          // uniform buffer layout, which GLSL std140 rules statisfies.
+          LayoutRule::GLSLStd140);
       return theBuilder.createAccessChain(
           theBuilder.getPointerType(varType, info->storageClass),
           info->resultId, {theBuilder.getConstantInt32(info->indexInCTBuffer)});
@@ -114,17 +116,24 @@ uint32_t DeclResultIdMapper::createFileVar(uint32_t varType, const VarDecl *var,
   return id;
 }
 
-uint32_t DeclResultIdMapper::createExternVar(uint32_t varType,
-                                             const VarDecl *var) {
+uint32_t DeclResultIdMapper::createExternVar(const VarDecl *var) {
   auto storageClass = spv::StorageClass::UniformConstant;
+  auto rule = LayoutRule::Void;
 
   // TODO: Figure out other cases where the storage class should be Uniform.
   if (auto *t = var->getType()->getAs<RecordType>()) {
     const llvm::StringRef typeName = t->getDecl()->getName();
-    if (typeName == "ByteAddressBuffer" || typeName == "RWByteAddressBuffer")
+    if (typeName == "StructuredBuffer" || typeName == "RWStructuredBuffer" ||
+        typeName == "ByteAddressBuffer" || typeName == "RWByteAddressBuffer") {
+      // These types are all translated into OpTypeStruct with BufferBlock
+      // decoration. They should follow standard storage buffer layout,
+      // which GLSL std430 rules statisfies.
       storageClass = spv::StorageClass::Uniform;
+      rule = LayoutRule::GLSLStd430;
+    }
   }
 
+  const auto varType = typeTranslator.translateType(var->getType(), rule);
   const uint32_t id = theBuilder.addModuleVar(varType, storageClass,
                                               var->getName(), llvm::None);
   astDecls[var] = {id, storageClass, -1};
@@ -155,13 +164,18 @@ DeclResultIdMapper::createVarOfExplicitLayoutStruct(const DeclContext *decl,
     auto varType = declDecl->getType();
     varType.removeLocalConst();
 
-    fieldTypes.push_back(typeTranslator.translateType(
-        varType, true, declDecl->hasAttr<HLSLRowMajorAttr>()));
+    fieldTypes.push_back(
+        typeTranslator.translateType(varType, LayoutRule::GLSLStd140,
+                                     declDecl->hasAttr<HLSLRowMajorAttr>()));
     fieldNames.push_back(declDecl->getName());
   }
 
   // Get the type for the whole buffer
-  auto decorations = typeTranslator.getLayoutDecorations(decl);
+  // cbuffers are translated into OpTypeStruct with Block decoration. They
+  // should follow standard uniform buffer layout according to the Vulkan spec.
+  // GLSL std140 rules satisfies.
+  auto decorations =
+      typeTranslator.getLayoutDecorations(decl, LayoutRule::GLSLStd140);
   decorations.push_back(Decoration::getBlock(*theBuilder.getSPIRVContext()));
   const uint32_t structType =
       theBuilder.getStructType(fieldTypes, typeName, fieldNames, decorations);
@@ -229,6 +243,11 @@ public:
   explicit StorageClassResolver(const DeclResultIdMapper &mapper)
       : declIdMapper(mapper), storageClass(spv::StorageClass::Max) {}
 
+  bool TraverseCXXMemberCallExpr(CXXMemberCallExpr *expr) {
+    // For method calls, the storage class should follow the object.
+    return TraverseStmt(expr->getImplicitObjectArgument());
+  }
+
   // For querying the storage class of a remapped decl
 
   // Semantics may be attached to FunctionDecl, ParmVarDecl, and FieldDecl.
@@ -246,6 +265,10 @@ public:
   }
 
   bool processDecl(NamedDecl *decl) {
+    // Calling C++ methods like operator[] is also represented as DeclRefExpr.
+    if (isa<CXXMethodDecl>(decl))
+      return true;
+
     const auto *info = declIdMapper.getDeclSpirvInfo(decl);
     assert(info);
     if (storageClass == spv::StorageClass::Max) {

+ 1 - 1
tools/clang/lib/SPIRV/DeclResultIdMapper.h

@@ -146,7 +146,7 @@ public:
                          llvm::Optional<uint32_t> init);
 
   /// \brief Creates an external-visible variable and returns its <result-id>.
-  uint32_t createExternVar(uint32_t varType, const VarDecl *var);
+  uint32_t createExternVar(const VarDecl *var);
 
   /// \brief Creates a cbuffer/tbuffer from the given decl.
   ///

+ 8 - 0
tools/clang/lib/SPIRV/ModuleBuilder.cpp

@@ -613,6 +613,14 @@ uint32_t ModuleBuilder::getArrayType(uint32_t elemType, uint32_t count,
   return typeId;
 }
 
+uint32_t ModuleBuilder::getRuntimeArrayType(uint32_t elemType,
+                                            Type::DecorationSet decorations) {
+  const Type *type = Type::getRuntimeArray(theContext, elemType, decorations);
+  const uint32_t typeId = theContext.getResultIdForType(type);
+  theModule.addType(type, typeId);
+  return typeId;
+}
+
 uint32_t ModuleBuilder::getFunctionType(uint32_t returnType,
                                         llvm::ArrayRef<uint32_t> paramTypes) {
   const Type *type = Type::getFunction(theContext, returnType, paramTypes);

+ 128 - 114
tools/clang/lib/SPIRV/SPIRVEmitter.cpp

@@ -110,6 +110,15 @@ bool isFloatOrVecMatOfFloatType(QualType type) {
           hlsl::GetHLSLMatElementType(type)->isFloatingType());
 }
 
+/// Returns true if the given type is a (RW)StructuredBuffer type.
+bool isStructuredBuffer(QualType type) {
+  const auto *recordType = type->getAs<RecordType>();
+  if (!recordType)
+    return false;
+  const auto name = recordType->getDecl()->getName();
+  return name == "StructuredBuffer" || name == "RWStructuredBuffer";
+}
+
 bool isSpirvMatrixOp(spv::Op opcode) {
   switch (opcode) {
   case spv::Op::OpMatrixTimesMatrix:
@@ -122,6 +131,32 @@ bool isSpirvMatrixOp(spv::Op opcode) {
   return false;
 }
 
+/// If expr is a (RW)StructuredBuffer.Load(), returns the object and writes
+/// index. Otherwiser, returns false.
+// TODO: The following doesn't handle Load(int, int) yet. And it is basically a
+// duplicate of doCXXMemberCallExpr.
+const Expr *isStructuredBufferLoad(const Expr *expr, const Expr **index) {
+  using namespace hlsl;
+
+  if (const auto *indexing = dyn_cast<CXXMemberCallExpr>(expr)) {
+    const auto *callee = indexing->getDirectCallee();
+    uint32_t opcode = static_cast<uint32_t>(IntrinsicOp::Num_Intrinsics);
+    llvm::StringRef group;
+
+    if (GetIntrinsicOp(callee, opcode, group)) {
+      if (static_cast<IntrinsicOp>(opcode) == IntrinsicOp::MOP_Load) {
+        const auto *object = indexing->getImplicitObjectArgument();
+        if (isStructuredBuffer(object->getType())) {
+          *index = indexing->getArg(0);
+          return indexing->getImplicitObjectArgument();
+        }
+      }
+    }
+  }
+
+  return nullptr;
+}
+
 /// \brief Returns the statement that is the immediate parent AST node of the
 /// given statement. Returns nullptr if there are no parents nodes.
 const Stmt *getImmediateParent(ASTContext &astContext, const Stmt *stmt) {
@@ -433,8 +468,6 @@ void SPIRVEmitter::doFunctionDecl(const FunctionDecl *decl) {
 }
 
 void SPIRVEmitter::doVarDecl(const VarDecl *decl) {
-  const uint32_t varType = typeTranslator.translateType(decl->getType());
-
   // The contents in externally visible variables can be updated via the
   // pipeline. They should be handled differently from file and function scope
   // variables.
@@ -442,6 +475,11 @@ void SPIRVEmitter::doVarDecl(const VarDecl *decl) {
   // the Private storage class, while function scope variables (normal "local"
   // variables) belongs to the Function storage class.
   if (!decl->isExternallyVisible()) {
+    // Note: cannot move varType outside of this scope because it generates
+    // SPIR-V types without decorations, while external visible variable should
+    // have SPIR-V type with decorations.
+    const uint32_t varType = typeTranslator.translateType(decl->getType());
+
     // We already know the variable is not externally visible here. If it does
     // not have local storage, it should be file scope variable.
     const bool isFileScopeVar = !decl->hasLocalStorage();
@@ -482,7 +520,7 @@ void SPIRVEmitter::doVarDecl(const VarDecl *decl) {
       }
     }
   } else {
-    (void)declIdMapper.createExternVar(varType, decl);
+    (void)declIdMapper.createExternVar(decl);
   }
 }
 
@@ -1428,6 +1466,26 @@ uint32_t SPIRVEmitter::processByteAddressBufferLoadStore(
   return resultId;
 }
 
+uint32_t
+SPIRVEmitter::processStructuredBufferLoad(const CXXMemberCallExpr *expr) {
+  if (expr->getNumArgs() == 2) {
+    emitError("Load(int, int) unimplemented for (RW)StructuredBuffer");
+    return 0;
+  }
+
+  const auto *buffer = expr->getImplicitObjectArgument();
+  const QualType structType =
+      hlsl::GetHLSLResourceResultType(buffer->getType());
+  const uint32_t ptrType = theBuilder.getPointerType(
+      typeTranslator.translateType(structType, LayoutRule::GLSLStd430),
+      declIdMapper.resolveStorageClass(buffer));
+
+  const uint32_t zero = theBuilder.getConstantInt32(0);
+  const uint32_t index = doExpr(expr->getArg(0));
+
+  return theBuilder.createAccessChain(ptrType, doExpr(buffer), {zero, index});
+}
+
 uint32_t SPIRVEmitter::doCXXMemberCallExpr(const CXXMemberCallExpr *expr) {
   using namespace hlsl;
 
@@ -1565,10 +1623,14 @@ uint32_t SPIRVEmitter::doCXXMemberCallExpr(const CXXMemberCallExpr *expr) {
 
       const auto *object = expr->getImplicitObjectArgument();
       const auto objectType = object->getType();
+
       if (typeTranslator.isRWByteAddressBuffer(objectType) ||
-          typeTranslator.isByteAddressBuffer(objectType)) {
+          typeTranslator.isByteAddressBuffer(objectType))
         return processByteAddressBufferLoadStore(expr, 1, /*doStore*/ false);
-      }
+
+      if (isStructuredBuffer(objectType))
+        return processStructuredBufferLoad(expr);
+
       if (TypeTranslator::isBuffer(objectType) ||
           TypeTranslator::isRWBuffer(objectType))
         return processBufferLoad(expr->getImplicitObjectArgument(),
@@ -1626,58 +1688,6 @@ uint32_t SPIRVEmitter::doCXXMemberCallExpr(const CXXMemberCallExpr *expr) {
 }
 
 uint32_t SPIRVEmitter::doCXXOperatorCallExpr(const CXXOperatorCallExpr *expr) {
-  { // First try to handle vector/matrix indexing
-    const Expr *baseExpr = nullptr;
-    const Expr *index0Expr = nullptr;
-    const Expr *index1Expr = nullptr;
-
-    if (isVecMatIndexing(expr, &baseExpr, &index0Expr, &index1Expr)) {
-      const auto baseType = baseExpr->getType();
-      llvm::SmallVector<uint32_t, 2> indices;
-
-      if (hlsl::IsHLSLMatType(baseType)) {
-        uint32_t rowCount = 0, colCount = 0;
-        hlsl::GetHLSLMatRowColCount(baseType, rowCount, colCount);
-
-        // Collect indices for this matrix indexing
-        if (rowCount > 1) {
-          indices.push_back(doExpr(index0Expr));
-        }
-        // Evalute index1Expr iff it is not nullptr
-        if (colCount > 1 && index1Expr) {
-          indices.push_back(doExpr(index1Expr));
-        }
-      } else { // Indexing into vector
-        if (hlsl::GetHLSLVecSize(baseType) > 1) {
-          indices.push_back(doExpr(index0Expr));
-        }
-      }
-
-      if (indices.size() == 0) {
-        return doExpr(baseExpr);
-      }
-
-      uint32_t base = doExpr(baseExpr);
-      // If we are indexing into a rvalue, to use OpAccessChain, we first need
-      // to create a local variable to hold the rvalue.
-      //
-      // TODO: We can optimize the codegen by emitting OpCompositeExtract if
-      // all indices are contant integers.
-      if (!baseExpr->isGLValue()) {
-        const uint32_t compositeType = typeTranslator.translateType(baseType);
-        const uint32_t tempVar = theBuilder.addFnVar(compositeType, "temp.var");
-        theBuilder.createStore(tempVar, base);
-        base = tempVar;
-      }
-
-      const uint32_t elemType = typeTranslator.translateType(expr->getType());
-      const uint32_t ptrType = theBuilder.getPointerType(
-          elemType, declIdMapper.resolveStorageClass(baseExpr));
-
-      return theBuilder.createAccessChain(ptrType, base, indices);
-    }
-  }
-
   { // Handle Buffer/RWBuffer indexing
     const Expr *baseExpr = nullptr;
     const Expr *indexExpr = nullptr;
@@ -1687,9 +1697,29 @@ uint32_t SPIRVEmitter::doCXXOperatorCallExpr(const CXXOperatorCallExpr *expr) {
     }
   }
 
-  emitError("unimplemented C++ operator call: %0") << expr->getOperator();
-  expr->dump();
-  return 0;
+  llvm::SmallVector<uint32_t, 4> indices;
+  const Expr *baseExpr = collectArrayStructIndices(expr, &indices);
+
+  uint32_t base = doExpr(baseExpr);
+  if (indices.empty())
+    return base; // For indexing into size-1 vectors and 1xN matrices
+  // If we are indexing into a rvalue, to use OpAccessChain, we first need
+  // to create a local variable to hold the rvalue.
+  //
+  // TODO: We can optimize the codegen by emitting OpCompositeExtract if
+  // all indices are contant integers.
+  if (!baseExpr->isGLValue()) {
+    const uint32_t baseType = typeTranslator.translateType(baseExpr->getType());
+    const uint32_t tempVar = theBuilder.addFnVar(baseType, "temp.var");
+    theBuilder.createStore(tempVar, base);
+    base = tempVar;
+  }
+
+  const uint32_t ptrType =
+      theBuilder.getPointerType(typeTranslator.translateType(expr->getType()),
+                                declIdMapper.resolveStorageClass(baseExpr));
+
+  return theBuilder.createAccessChain(ptrType, base, indices);
 }
 
 uint32_t
@@ -2210,59 +2240,6 @@ bool SPIRVEmitter::isBufferIndexing(const CXXOperatorCallExpr *indexExpr,
   return false;
 }
 
-bool SPIRVEmitter::isVecMatIndexing(const CXXOperatorCallExpr *vecIndexExpr,
-                                    const Expr **base, const Expr **index0,
-                                    const Expr **index1) {
-  // Must be operator[]
-  if (vecIndexExpr->getOperator() != OverloadedOperatorKind::OO_Subscript)
-    return false;
-
-  // Get the base of this outer operator[]
-  const Expr *vecBase = vecIndexExpr->getArg(0);
-  // If the base of the outer operator[] is a vector, try to see if we have
-  // another inner operator[] on a matrix, i.e., two levels of indexing into
-  // the matrix.
-  if (hlsl::IsHLSLVecType(vecBase->getType())) {
-    const auto *matIndexExpr = dyn_cast<CXXOperatorCallExpr>(vecBase);
-
-    if (!matIndexExpr) {
-      // No inner operator[]. So this is just indexing into a vector.
-      *base = vecBase;
-      *index0 = vecIndexExpr->getArg(1);
-      *index1 = nullptr;
-
-      return true;
-    }
-
-    // Must be operator[]
-    if (matIndexExpr->getOperator() != OverloadedOperatorKind::OO_Subscript)
-      return false;
-
-    // Get the base of this inner operator[]
-    const Expr *matBase = matIndexExpr->getArg(0);
-    if (!hlsl::IsHLSLMatType(matBase->getType()))
-      return false;
-
-    *base = matBase;
-    *index0 = matIndexExpr->getArg(1);
-    *index1 = vecIndexExpr->getArg(1);
-
-    return true;
-  }
-
-  // The base of the outside operator[] is not a vector. Try to see whether it
-  // is a matrix. If true, it means we have only one level of indexing.
-  if (hlsl::IsHLSLMatType(vecBase->getType())) {
-    *base = vecBase;
-    *index0 = vecIndexExpr->getArg(1);
-    *index1 = nullptr;
-
-    return true;
-  }
-
-  return false;
-}
-
 void SPIRVEmitter::condenseVectorElementExpr(
     const HLSLVectorElementExpr *expr, const Expr **basePtr,
     hlsl::VectorMemberAccessPositions *flattenedAccessor) {
@@ -2677,7 +2654,8 @@ uint32_t SPIRVEmitter::processMatrixBinaryOp(const Expr *lhs, const Expr *rhs,
 const Expr *SPIRVEmitter::collectArrayStructIndices(
     const Expr *expr, llvm::SmallVectorImpl<uint32_t> *indices) {
   if (const auto *indexing = dyn_cast<MemberExpr>(expr)) {
-    const Expr *base = collectArrayStructIndices(indexing->getBase(), indices);
+    const Expr *base = collectArrayStructIndices(
+        indexing->getBase()->IgnoreParenNoopCasts(astContext), indices);
 
     // Append the index of the current level
     const auto *fieldDecl = cast<FieldDecl>(indexing->getMemberDecl());
@@ -2696,6 +2674,42 @@ const Expr *SPIRVEmitter::collectArrayStructIndices(
     return base;
   }
 
+  if (const auto *indexing = dyn_cast<CXXOperatorCallExpr>(expr))
+    if (indexing->getOperator() == OverloadedOperatorKind::OO_Subscript) {
+      const Expr *thisBase =
+          indexing->getArg(0)->IgnoreParenNoopCasts(astContext);
+      const auto thisBaseType = thisBase->getType();
+      const Expr *base = collectArrayStructIndices(thisBase, indices);
+
+      // If the base is a StructureType, we need to push an addtional index 0
+      // here. This is because we created an additional OpTypeRuntimeArray
+      // in the structure.
+      if (isStructuredBuffer(thisBaseType))
+        indices->push_back(theBuilder.getConstantInt32(0));
+
+      if ((hlsl::IsHLSLVecType(thisBaseType) &&
+           (hlsl::GetHLSLVecSize(thisBaseType) == 1)) ||
+          typeTranslator.is1x1Matrix(thisBaseType) ||
+          typeTranslator.is1xNMatrix(thisBaseType)) {
+        // If this is a size-1 vector or 1xN matrix, ignore the index.
+      } else {
+        indices->push_back(doExpr(indexing->getArg(1)));
+      }
+      return base;
+    }
+
+  {
+    const Expr *index = nullptr;
+    // TODO: the following is duplicating the logic in doCXXMemberCallExpr.
+    if (const auto *object = isStructuredBufferLoad(expr, &index)) {
+      // For object.Load(index), there should be no more indexing into the
+      // object.
+      indices->push_back(theBuilder.getConstantInt32(0));
+      indices->push_back(doExpr(index));
+      return object;
+    }
+  }
+
   // This the deepest we can go. No more array or struct indexing.
   return expr;
 }

+ 6 - 20
tools/clang/lib/SPIRV/SPIRVEmitter.h

@@ -135,24 +135,6 @@ private:
   ///   the original vector, no shuffling needed).
   bool isVectorShuffle(const Expr *expr);
 
-  /// Returns true if the given CXXOperatorCallExpr is indexing into a vector or
-  /// matrix using operator[].
-  /// On success, writes the base vector/matrix into *base, and the indices into
-  /// *index0 and *index1, if there are two levels of indexing. If there is only
-  /// one level of indexing, writes the index into *index0 and nullptr into
-  /// *index1.
-  ///
-  /// matrix [index0] [index1]         vector [index0]
-  /// +-------------+
-  ///  vector                     or
-  /// +----------------------+         +-------------+
-  ///         scalar                        scalar
-  ///
-  /// Assumes base, index0, and index1 are not nullptr.
-  bool isVecMatIndexing(const CXXOperatorCallExpr *vecIndexExpr,
-                        const Expr **base, const Expr **index0,
-                        const Expr **index1);
-
   /// \brief Returns true if the given CXXOperatorCallExpr is indexing into a
   /// Buffer/RWBuffer using operator[].
   /// On success, writes the base buffer into *base if base is not nullptr, and
@@ -221,8 +203,8 @@ private:
                                  const BinaryOperatorKind opcode);
 
   /// Collects all indices (SPIR-V constant values) from consecutive MemberExprs
-  /// or ArraySubscriptExprs and writes into indices. Returns the real base
-  /// (the first Expr that is not a MemberExpr or ArraySubscriptExpr).
+  /// or ArraySubscriptExprs or operator[] calls and writes into indices.
+  /// Returns the real base.
   const Expr *
   collectArrayStructIndices(const Expr *expr,
                             llvm::SmallVectorImpl<uint32_t> *indices);
@@ -438,6 +420,10 @@ private:
   /// declaration for the (RW)Buffer object.
   uint32_t processBufferLoad(const Expr *object, const Expr *address);
 
+  /// \brief Generates an OpAccessChain instruction for the given
+  /// (RW)StructuredBuffer.Load() method call.
+  uint32_t processStructuredBufferLoad(const CXXMemberCallExpr *expr);
+
 private:
   /// \brief Wrapper method to create an error message and report it
   /// in the diagnostic engine associated with this consumer.

+ 65 - 31
tools/clang/lib/SPIRV/TypeTranslator.cpp

@@ -34,7 +34,7 @@ inline void roundToPow2(uint32_t *val, uint32_t pow2) {
 }
 } // anonymous namespace
 
-uint32_t TypeTranslator::translateType(QualType type, bool decorateLayout,
+uint32_t TypeTranslator::translateType(QualType type, LayoutRule rule,
                                        bool isRowMajor) {
   // We can only apply row_major to matrices or arrays of matrices.
   if (isRowMajor)
@@ -43,7 +43,7 @@ uint32_t TypeTranslator::translateType(QualType type, bool decorateLayout,
   // Try to translate the canonical type first
   const auto canonicalType = type.getCanonicalType();
   if (canonicalType != type)
-    return translateType(canonicalType, decorateLayout, isRowMajor);
+    return translateType(canonicalType, rule, isRowMajor);
 
   // Primitive types
   {
@@ -70,7 +70,7 @@ uint32_t TypeTranslator::translateType(QualType type, bool decorateLayout,
 
   // Typedefs
   if (const auto *typedefType = type->getAs<TypedefType>())
-    return translateType(typedefType->desugar(), decorateLayout, isRowMajor);
+    return translateType(typedefType->desugar(), rule, isRowMajor);
 
   // Reference types
   if (const auto *refType = type->getAs<ReferenceType>()) {
@@ -80,7 +80,7 @@ uint32_t TypeTranslator::translateType(QualType type, bool decorateLayout,
     // We already pass function arguments via pointers to tempoary local
     // variables. So it should be fine to drop the pointer type and treat it
     // as the underlying pointee type here.
-    return translateType(refType->getPointeeType(), decorateLayout, isRowMajor);
+    return translateType(refType->getPointeeType(), rule, isRowMajor);
   }
 
   // In AST, vector/matrix types are TypedefType of TemplateSpecializationType.
@@ -126,21 +126,21 @@ uint32_t TypeTranslator::translateType(QualType type, bool decorateLayout,
     // (ClassTemplateSpecializationDecl is a subclass of CXXRecordDecl, which is
     // then a subclass of RecordDecl.) So we need to check them before checking
     // the general struct type.
-    if (const auto id = translateResourceType(type))
+    if (const auto id = translateResourceType(type, rule))
       return id;
 
     // Collect all fields' types and names.
     llvm::SmallVector<uint32_t, 4> fieldTypes;
     llvm::SmallVector<llvm::StringRef, 4> fieldNames;
     for (const auto *field : decl->fields()) {
-      fieldTypes.push_back(translateType(field->getType(), decorateLayout,
+      fieldTypes.push_back(translateType(field->getType(), rule,
                                          field->hasAttr<HLSLRowMajorAttr>()));
       fieldNames.push_back(field->getName());
     }
 
     llvm::SmallVector<const Decoration *, 4> decorations;
-    if (decorateLayout) {
-      decorations = getLayoutDecorations(decl);
+    if (rule != LayoutRule::Void) {
+      decorations = getLayoutDecorations(decl, rule);
     }
 
     return theBuilder.getStructType(fieldTypes, decl->getName(), fieldNames,
@@ -149,15 +149,15 @@ uint32_t TypeTranslator::translateType(QualType type, bool decorateLayout,
 
   if (const auto *arrayType = astContext.getAsConstantArrayType(type)) {
     const uint32_t elemType =
-        translateType(arrayType->getElementType(), decorateLayout, isRowMajor);
+        translateType(arrayType->getElementType(), rule, isRowMajor);
     // TODO: handle extra large array size?
     const auto size =
         static_cast<uint32_t>(arrayType->getSize().getZExtValue());
 
     llvm::SmallVector<const Decoration *, 4> decorations;
-    if (decorateLayout) {
+    if (rule != LayoutRule::Void) {
       uint32_t stride = 0;
-      (void)getAlignmentAndSize(type, &stride, isRowMajor);
+      (void)getAlignmentAndSize(type, rule, isRowMajor, &stride);
       decorations.push_back(
           Decoration::getArrayStride(*theBuilder.getSPIRVContext(), stride));
     }
@@ -362,7 +362,7 @@ uint32_t TypeTranslator::getComponentVectorType(QualType matrixType) {
 }
 
 llvm::SmallVector<const Decoration *, 4>
-TypeTranslator::getLayoutDecorations(const DeclContext *decl) {
+TypeTranslator::getLayoutDecorations(const DeclContext *decl, LayoutRule rule) {
   const auto spirvContext = theBuilder.getSPIRVContext();
   llvm::SmallVector<const Decoration *, 4> decorations;
   uint32_t offset = 0, index = 0;
@@ -379,7 +379,7 @@ TypeTranslator::getLayoutDecorations(const DeclContext *decl) {
 
     uint32_t memberAlignment = 0, memberSize = 0, stride = 0;
     std::tie(memberAlignment, memberSize) =
-        getAlignmentAndSize(fieldType, &stride, isRowMajor);
+        getAlignmentAndSize(fieldType, rule, isRowMajor, &stride);
 
     // Each structure-type member must have an Offset Decoration.
     roundToPow2(&offset, memberAlignment);
@@ -398,7 +398,7 @@ TypeTranslator::getLayoutDecorations(const DeclContext *decl) {
     if (isMxNMatrix(fieldType)) {
       memberAlignment = memberSize = stride = 0;
       std::tie(memberAlignment, memberSize) =
-          getAlignmentAndSize(fieldType, &stride, isRowMajor);
+          getAlignmentAndSize(fieldType, rule, isRowMajor, &stride);
 
       decorations.push_back(
           Decoration::getMatrixStride(*spirvContext, stride, index));
@@ -421,7 +421,7 @@ TypeTranslator::getLayoutDecorations(const DeclContext *decl) {
   return decorations;
 }
 
-uint32_t TypeTranslator::translateResourceType(QualType type) {
+uint32_t TypeTranslator::translateResourceType(QualType type, LayoutRule rule) {
   const auto *recordType = type->getAs<RecordType>();
   assert(recordType);
   const llvm::StringRef name = recordType->getDecl()->getName();
@@ -453,6 +453,33 @@ uint32_t TypeTranslator::translateResourceType(QualType type) {
     return theBuilder.getSamplerType();
   }
 
+  if (name == "StructuredBuffer" || name == "RWStructuredBuffer") {
+    auto &context = *theBuilder.getSPIRVContext();
+    // StructureBuffer<S> will be translated into an OpTypeStruct with one
+    // field, which is an OpTypeRuntimeArray of OpTypeStruct (S).
+
+    const auto s = hlsl::GetHLSLResourceResultType(type);
+    const uint32_t structType = translateType(s, rule);
+    const auto structName = s->getAs<RecordType>()->getDecl()->getName();
+
+    llvm::SmallVector<const Decoration *, 4> decorations;
+    // The stride for the runtime array is the size of S.
+    uint32_t size = 0, stride = 0;
+    std::tie(std::ignore, size) =
+        getAlignmentAndSize(s, rule, /*isRowMajor*/ false, &stride);
+    decorations.push_back(Decoration::getArrayStride(context, size));
+    const uint32_t raType =
+        theBuilder.getRuntimeArrayType(structType, decorations);
+
+    decorations.clear();
+    decorations.push_back(Decoration::getOffset(context, 0, 0));
+    if (!name.startswith("RW"))
+      decorations.push_back(Decoration::getNonWritable(context, 0));
+    decorations.push_back(Decoration::getBufferBlock(context));
+    const std::string typeName = "type." + name.str() + "." + structName.str();
+    return theBuilder.getStructType(raType, typeName, {}, decorations);
+  }
+
   // ByteAddressBuffer types.
   if (name == "ByteAddressBuffer") {
     return theBuilder.getByteAddressBufferType(/*isRW*/ false);
@@ -503,8 +530,8 @@ TypeTranslator::translateSampledTypeToImageFormat(QualType sampledType) {
 }
 
 std::pair<uint32_t, uint32_t>
-TypeTranslator::getAlignmentAndSize(QualType type, uint32_t *stride,
-                                    const bool isRowMajor) {
+TypeTranslator::getAlignmentAndSize(QualType type, LayoutRule rule,
+                                    const bool isRowMajor, uint32_t *stride) {
   // std140 layout rules:
 
   // 1. If the member is a scalar consuming N basic machine units, the base
@@ -555,10 +582,11 @@ TypeTranslator::getAlignmentAndSize(QualType type, uint32_t *stride,
   //     are laid out in order, according to rule (9).
   const auto canonicalType = type.getCanonicalType();
   if (canonicalType != type)
-    return getAlignmentAndSize(canonicalType, stride, isRowMajor);
+    return getAlignmentAndSize(canonicalType, rule, isRowMajor, stride);
 
   if (const auto *typedefType = type->getAs<TypedefType>())
-    return getAlignmentAndSize(typedefType->desugar(), stride, isRowMajor);
+    return getAlignmentAndSize(typedefType->desugar(), rule, isRowMajor,
+                               stride);
 
   { // Rule 1
     QualType ty = {};
@@ -585,7 +613,7 @@ TypeTranslator::getAlignmentAndSize(QualType type, uint32_t *stride,
     if (isVectorType(type, &elemType, &elemCount)) {
       uint32_t size = 0;
       std::tie(std::ignore, size) =
-          getAlignmentAndSize(elemType, stride, isRowMajor);
+          getAlignmentAndSize(elemType, rule, isRowMajor, stride);
 
       return {(elemCount == 3 ? 4 : elemCount) * size, elemCount * size};
     }
@@ -597,7 +625,7 @@ TypeTranslator::getAlignmentAndSize(QualType type, uint32_t *stride,
     if (isMxNMatrix(type, &elemType, &rowCount, &colCount)) {
       uint32_t alignment = 0, size = 0;
       std::tie(alignment, std::ignore) =
-          getAlignmentAndSize(elemType, stride, isRowMajor);
+          getAlignmentAndSize(elemType, rule, isRowMajor, stride);
 
       // Matrices are treated as arrays of vectors:
       // The base alignment and array stride are set to match the base alignment
@@ -605,7 +633,9 @@ TypeTranslator::getAlignmentAndSize(QualType type, uint32_t *stride,
       // up to the base alignment of a vec4.
       const uint32_t vecStorageSize = isRowMajor ? colCount : rowCount;
       alignment *= (vecStorageSize == 3 ? 4 : vecStorageSize);
-      roundToPow2(&alignment, kStd140Vec4Alignment);
+      if (rule == LayoutRule::GLSLStd140) {
+        roundToPow2(&alignment, kStd140Vec4Alignment);
+      }
       *stride = alignment;
       size = (isRowMajor ? rowCount : colCount) * alignment;
 
@@ -621,7 +651,7 @@ TypeTranslator::getAlignmentAndSize(QualType type, uint32_t *stride,
     for (const auto *field : structType->getDecl()->fields()) {
       uint32_t memberAlignment = 0, memberSize = 0;
       std::tie(memberAlignment, memberSize) = getAlignmentAndSize(
-          field->getType(), stride, field->hasAttr<HLSLRowMajorAttr>());
+          field->getType(), rule, field->hasAttr<HLSLRowMajorAttr>(), stride);
 
       // The base alignment of the structure is N, where N is the largest
       // base alignment value of any of its members...
@@ -630,8 +660,10 @@ TypeTranslator::getAlignmentAndSize(QualType type, uint32_t *stride,
       structSize += memberSize;
     }
 
-    // ... and rounded up to the base alignment of a vec4.
-    roundToPow2(&maxAlignment, kStd140Vec4Alignment);
+    if (rule == LayoutRule::GLSLStd140) {
+      // ... and rounded up to the base alignment of a vec4.
+      roundToPow2(&maxAlignment, kStd140Vec4Alignment);
+    }
     // The base offset of the member following the sub-structure is rounded up
     // to the next multiple of the base alignment of the structure.
     roundToPow2(&structSize, maxAlignment);
@@ -641,13 +673,15 @@ TypeTranslator::getAlignmentAndSize(QualType type, uint32_t *stride,
   // Rule 4, 6, 8, and 10
   if (const auto *arrayType = astContext.getAsConstantArrayType(type)) {
     uint32_t alignment = 0, size = 0;
-    std::tie(alignment, size) =
-        getAlignmentAndSize(arrayType->getElementType(), stride, isRowMajor);
+    std::tie(alignment, size) = getAlignmentAndSize(arrayType->getElementType(),
+                                                    rule, isRowMajor, stride);
 
-    // The base alignment and array stride are set to match the base alignment
-    // of a single array element, according to rules 1, 2, and 3, and rounded
-    // up to the base alignment of a vec4.
-    roundToPow2(&alignment, kStd140Vec4Alignment);
+    if (rule == LayoutRule::GLSLStd140) {
+      // The base alignment and array stride are set to match the base alignment
+      // of a single array element, according to rules 1, 2, and 3, and rounded
+      // up to the base alignment of a vec4.
+      roundToPow2(&alignment, kStd140Vec4Alignment);
+    }
     // Need to round size up considering stride for scalar types
     roundToPow2(&size, alignment);
     *stride = size; // Use size instead of alignment here for Rule 10

+ 17 - 7
tools/clang/lib/SPIRV/TypeTranslator.h

@@ -19,6 +19,13 @@
 namespace clang {
 namespace spirv {
 
+/// Memory layout rules
+enum class LayoutRule {
+  Void,
+  GLSLStd140,
+  GLSLStd430,
+};
+
 /// The class responsible to translate Clang frontend types into SPIR-V type
 /// instructions.
 ///
@@ -36,14 +43,15 @@ public:
   /// frontend type and returns the type's <result-id>. On failure, reports
   /// the error and returns 0. If decorateLayout is true, layout decorations
   /// (Offset, MatrixStride, ArrayStride, RowMajor, ColMajor) will be attached
-  /// to the struct or array types. If decorateLayout is true and type is a
+  /// to the struct or array types. If layoutRule is not Void and type is a
   /// matrix or array of matrix type, isRowMajor will indicate whether it is
   /// decorated with row_major in the source code.
   ///
   /// The translation is recursive; all the types that the target type depends
   /// on will be generated and all with layout decorations (if decorateLayout
   /// is true).
-  uint32_t translateType(QualType type, bool decorateLayout = false,
+  uint32_t translateType(QualType type,
+                         LayoutRule layoutRule = LayoutRule::Void,
                          bool isRowMajor = false);
 
   /// \brief Returns true if the given type is the HLSL ByteAddressBufferType.
@@ -121,7 +129,7 @@ public:
   /// according to the spec, must be attached to the array type itself instead
   /// of a struct member.
   llvm::SmallVector<const Decoration *, 4>
-  getLayoutDecorations(const DeclContext *decl);
+  getLayoutDecorations(const DeclContext *decl, LayoutRule rule);
 
 private:
   /// \brief Wrapper method to create an error message and report it
@@ -134,10 +142,10 @@ private:
 
   /// \brief Translates the given HLSL resource type into its SPIR-V
   /// instructions and returns the <result-id>. Returns 0 on failure.
-  uint32_t translateResourceType(QualType type);
+  uint32_t translateResourceType(QualType type, LayoutRule rule);
 
   /// \brief Returns the alignment and size in bytes for the given type
-  /// according to std140.
+  /// according to the given LayoutRule.
 
   /// If the type is an array/matrix type, writes the array/matrix stride to
   /// stride. If the type is a matrix, isRowMajor will be used to indicate
@@ -147,8 +155,10 @@ private:
   /// will occupy in memory; rather it is used in conjunction with alignment
   /// to get the next available location (alignment + size), which means
   /// size contains post-paddings required by the given type.
-  std::pair<uint32_t, uint32_t>
-  getAlignmentAndSize(QualType type, uint32_t *stride, bool isRowMajor);
+  std::pair<uint32_t, uint32_t> getAlignmentAndSize(QualType type,
+                                                    LayoutRule rule,
+                                                    bool isRowMajor,
+                                                    uint32_t *stride);
 
   /// \bried For the given sampled type, returns the corresponding image format
   /// that can be used to create an image object.

+ 39 - 0
tools/clang/test/CodeGenSPIRV/method.structured-buffer.load.hlsl

@@ -0,0 +1,39 @@
+// Run: %dxc -T ps_6_0 -E main
+
+struct SBuffer {
+    float4   f1;
+    float2x3 f2[3];
+};
+
+  StructuredBuffer<SBuffer> mySBuffer1;
+RWStructuredBuffer<SBuffer> mySBuffer2;
+
+float4 main(int index: A) : SV_Target {
+    // b1 and b2's type does not need layout decorations. So it's a different
+    // SBuffer definition.
+// XXXXX-NOT:  OpMemberDecorate %SBuffer_0 0 Offset 0
+// XXXXX:      %_ptr_Function_SBuffer_0 = OpTypePointer Function %SBuffer_0
+
+// XXXXX:      %b1 = OpVariable %_ptr_Function_SBuffer_0 Function
+// XXXXX-NEXT: %b2 = OpVariable %_ptr_Function_SBuffer_0 Function
+
+// TODO: wrong codegen right now: missing load the value from sb1 & sb2
+// TODO: need to make sure we have %SBuffer (not %SBuffer_0) as the loaded type
+// XXXXX:      [[index:%\d+]] = OpLoad %int %index
+// XXXXX:      [[sb1:%\d+]] = OpAccessChain %_ptr_Uniform_SBuffer %mySBuffer1 %int_0 [[index]]
+// XXXXX:      {{%\d+}} = OpLoad %SBuffer [[sb1]]
+// XXXXX:      [[sb2:%\d+]] = OpAccessChain %_ptr_Uniform_SBuffer %mySBuffer2 %int_0 %int_0
+// XXXXX:      {{%\d+}} = OpLoad %SBuffer [[sb2]]
+    //SBuffer b1 = mySBuffer1.Load(index);
+    //SBuffer b2;
+    //b2 = mySBuffer2.Load(0);
+
+// CHECK:      [[f1:%\d+]] = OpAccessChain %_ptr_Uniform_v4float %mySBuffer1 %int_0 %int_5 %int_0
+// CHECK-NEXT: [[x:%\d+]] = OpAccessChain %_ptr_Uniform_float [[f1]] %int_0
+// CHECK-NEXT: {{%\d+}} = OpLoad %float [[x]]
+
+// CHECK:      [[index:%\d+]] = OpLoad %int %index
+// CHECK-NEXT: [[f012:%\d+]] = OpAccessChain %_ptr_Uniform_float %mySBuffer2 %int_0 [[index]] %int_1 %int_0 %uint_1 %uint_2
+// CHECK-NEXT: {{%\d+}} = OpLoad %float [[f012]]
+    return mySBuffer1.Load(5).f1.x + mySBuffer2.Load(index).f2[0][1][2];
+}

+ 2 - 4
tools/clang/test/CodeGenSPIRV/op.array.access.hlsl

@@ -35,8 +35,7 @@ float main(float val: A, uint index: B) : C {
 // CHECK-NEXT:  [[res:%\d+]] = OpVectorShuffle %v4float [[vec4]] [[vec2]] 0 1 5 4
 // CHECK-NEXT:                 OpStore [[ptr0]] [[res]]
     vecvar[3].ab = val;
-// CHECK-NEXT: [[ptr1:%\d+]] = OpAccessChain %_ptr_Function_v4float %vecvar %int_2
-// CHECK-NEXT: [[ptr2:%\d+]] = OpAccessChain %_ptr_Function_float [[ptr1]] %uint_1
+// CHECK-NEXT: [[ptr2:%\d+]] = OpAccessChain %_ptr_Function_float %vecvar %int_2 %uint_1
 // CHECK-NEXT: [[load:%\d+]] = OpLoad %float [[ptr2]]
 // CHECK-NEXT:                 OpStore %r [[load]]
     r = vecvar[2][1];
@@ -51,8 +50,7 @@ float main(float val: A, uint index: B) : C {
 // CHECK-NEXT: [[ptr2:%\d+]] = OpAccessChain %_ptr_Function_float [[ptr0]] %int_1 %int_2
 // CHECK-NEXT:                 OpStore [[ptr2]] [[val1]]
     matvar[2]._12_23 = val;
-// CHECK-NEXT: [[ptr3:%\d+]] = OpAccessChain %_ptr_Function_mat2v3float %matvar %int_0
-// CHECK-NEXT: [[ptr4:%\d+]] = OpAccessChain %_ptr_Function_float [[ptr3]] %uint_1 %uint_2
+// CHECK-NEXT: [[ptr4:%\d+]] = OpAccessChain %_ptr_Function_float %matvar %int_0 %uint_1 %uint_2
 // CHECK-NEXT: [[load:%\d+]] = OpLoad %float [[ptr4]]
 // CHECK-NEXT:                 OpStore %r [[load]]
     r = matvar[0][1][2];

+ 1 - 2
tools/clang/test/CodeGenSPIRV/op.constant-buffer.access.hlsl

@@ -23,8 +23,7 @@ float main() : A {
 // CHECK-NEXT: [[b0:%\d+]] = OpAccessChain %_ptr_Uniform_float [[b]] %int_0
 // CHECK-NEXT: {{%\d+}} = OpLoad %float [[b0]]
 
-// CHECK:      [[c:%\d+]] = OpAccessChain %_ptr_Uniform_mat3v4float %MyCbuffer %int_2
-// CHECK-NEXT: [[c12:%\d+]] = OpAccessChain %_ptr_Uniform_float [[c]] %uint_1 %uint_2
+// CHECK:      [[c12:%\d+]] = OpAccessChain %_ptr_Uniform_float %MyCbuffer %int_2 %uint_1 %uint_2
 // CHECK-NEXT: {{%\d+}} = OpLoad %float [[c12]]
 
 // CHECK:      [[s:%\d+]] = OpAccessChain %_ptr_Uniform_float %MyCbuffer %int_3 %int_0

+ 46 - 0
tools/clang/test/CodeGenSPIRV/op.rw-structured-buffer.access.hlsl

@@ -0,0 +1,46 @@
+// Run: %dxc -T ps_6_0 -E main
+
+struct S {
+    float  f;
+};
+
+struct T {
+    float    a;
+    float2   b[2];
+    float3x4 c[3];
+    S        s[2];
+    float    t[4];
+};
+
+
+RWStructuredBuffer<T> MySbuffer;
+
+void main(uint index: A) {
+// CHECK:      [[c12:%\d+]] = OpAccessChain %_ptr_Uniform_float %MySbuffer %int_0 %uint_2 %int_2 %int_2 %uint_1 %uint_2
+// CHECK-NEXT: {{%\d+}} = OpLoad %float [[c12]]
+
+// CHECK:      [[s:%\d+]] = OpAccessChain %_ptr_Uniform_float %MySbuffer %int_0 %uint_3 %int_3 %int_0 %int_0
+// CHECK-NEXT: {{%\d+}} = OpLoad %float [[s]]
+    float val = MySbuffer[2].c[2][1][2] + MySbuffer[3].s[0].f;
+
+// CHECK:       [[val:%\d+]] = OpLoad %float %val
+// CHECK-NEXT:  [[index:%\d+]] = OpLoad %uint %index
+
+// CHECK-NEXT:  [[t3:%\d+]] = OpAccessChain %_ptr_Uniform_float %MySbuffer %int_0 [[index]] %int_4 %int_3
+// CHECK-NEXT:  OpStore [[t3]] [[val]]
+
+// CHECK:       [[f:%\d+]] = OpAccessChain %_ptr_Uniform_float %MySbuffer %int_0 %uint_3 %int_3 %int_0 %int_0
+// CHECK-NEXT:  OpStore [[f]] [[val]]
+
+// CHECK-NEXT:  [[c212:%\d+]] = OpAccessChain %_ptr_Uniform_float %MySbuffer %int_0 %uint_2 %int_2 %int_2 %uint_1 %uint_2
+// CHECK-NEXT:  OpStore [[c212]] [[val]]
+
+// CHECK-NEXT:  [[b1:%\d+]] = OpAccessChain %_ptr_Uniform_v2float %MySbuffer %int_0 %uint_1 %int_1 %int_1
+// CHECK-NEXT:  [[x:%\d+]] = OpAccessChain %_ptr_Uniform_float [[b1]] %int_0
+// CHECK-NEXT:  OpStore [[x]] [[val]]
+
+// CHECK-NEXT:  [[a:%\d+]] = OpAccessChain %_ptr_Uniform_float %MySbuffer %int_0 %uint_0 %int_0
+// CHECK-NEXT:  OpStore [[a]] [[val]]
+    MySbuffer[0].a = MySbuffer[1].b[1].x = MySbuffer[2].c[2][1][2] =
+    MySbuffer[3].s[0].f = MySbuffer[index].t[3] = val;
+}

+ 4 - 8
tools/clang/test/CodeGenSPIRV/op.struct.access.hlsl

@@ -21,8 +21,7 @@ void main() {
 // CHECK-NEXT: {{%\d+}} = OpLoad %bool [[a]]
     bool v2 = t.i.a;
 
-// CHECK:      [[b:%\d+]] = OpAccessChain %_ptr_Function_v2uint %t %int_1 %int_1
-// CHECK-NEXT: [[b0:%\d+]] = OpAccessChain %_ptr_Function_uint [[b]] %uint_0
+// CHECK:      [[b0:%\d+]] = OpAccessChain %_ptr_Function_uint %t %int_1 %int_1 %uint_0
 // CHECK-NEXT: {{%\d+}} = OpLoad %uint [[b0]]
     uint v3 = t.i.b[0];
 // CHECK:      [[b:%\d+]] = OpAccessChain %_ptr_Function_v2uint %t %int_1 %int_1
@@ -36,8 +35,7 @@ void main() {
 // CHECK-NEXT: [[c11v:%\d+]] = OpLoad %float [[c11p]]
 // CHECK-NEXT: {{%\d+}} = OpCompositeConstruct %v2float [[c00v]] [[c11v]]
     float2 v5 = t.i.c._11_22;
-// CHECK:      [[c:%\d+]] = OpAccessChain %_ptr_Function_mat2v3float %t %int_1 %int_2
-// CHECK-NEXT: [[c1:%\d+]] = OpAccessChain %_ptr_Function_v3float [[c]] %uint_1
+// CHECK:      [[c1:%\d+]] = OpAccessChain %_ptr_Function_v3float %t %int_1 %int_2 %uint_1
 // CHECK-NEXT: {{%\d+}} = OpLoad %v3float [[c1]]
     float3 v6 = t.i.c[1];
 
@@ -48,8 +46,7 @@ void main() {
 // CHECK-NEXT: OpStore [[a]] {{%\d+}}
     t.i.a = v2;
 
-// CHECK:      [[b:%\d+]] = OpAccessChain %_ptr_Function_v2uint %t %int_1 %int_1
-// CHECK-NEXT: [[b1:%\d+]] = OpAccessChain %_ptr_Function_uint [[b]] %uint_1
+// CHECK:      [[b1:%\d+]] = OpAccessChain %_ptr_Function_uint %t %int_1 %int_1 %uint_1
 // CHECK-NEXT: OpStore [[b1]] {{%\d+}}
     t.i.b[1] = v3;
 // CHECK:      [[v4:%\d+]] = OpLoad %v2uint %v4
@@ -68,8 +65,7 @@ void main() {
 // CHECK-NEXT: [[c00:%\d+]] = OpAccessChain %_ptr_Function_float [[c]] %int_0 %int_0
 // CHECK-NEXT: OpStore [[c00]] [[v51]]
     t.i.c._22_11 = v5;
-// CHECK:      [[c:%\d+]] = OpAccessChain %_ptr_Function_mat2v3float %t %int_1 %int_2
-// CHECK-NEXT: [[c0:%\d+]] = OpAccessChain %_ptr_Function_v3float [[c]] %uint_0
+// CHECK:      [[c0:%\d+]] = OpAccessChain %_ptr_Function_v3float %t %int_1 %int_2 %uint_0
 // CHECK-NEXT: OpStore [[c0]] {{%\d+}}
     t.i.c[0] = v6;
 }

+ 37 - 0
tools/clang/test/CodeGenSPIRV/op.structured-buffer.access.hlsl

@@ -0,0 +1,37 @@
+// Run: %dxc -T ps_6_0 -E main
+
+struct S {
+    float  f;
+};
+
+struct T {
+    float    a;
+    float2   b[2];
+    float3x4 c[3];
+    S        s[2];
+    float    t[4];
+};
+
+
+StructuredBuffer<T> MySbuffer;
+
+float4 main(uint index: A) : SV_Target {
+// CHECK:      [[a:%\d+]] = OpAccessChain %_ptr_Uniform_float %MySbuffer %int_0 %uint_0 %int_0
+// CHECK-NEXT: {{%\d+}} = OpLoad %float [[a]]
+
+// CHECK:      [[b1:%\d+]] = OpAccessChain %_ptr_Uniform_v2float %MySbuffer %int_0 %uint_1 %int_1 %int_1
+// CHECK-NEXT: [[x:%\d+]] = OpAccessChain %_ptr_Uniform_float [[b1]] %int_0
+// CHECK-NEXT: {{%\d+}} = OpLoad %float [[x]]
+
+// CHECK:      [[c12:%\d+]] = OpAccessChain %_ptr_Uniform_float %MySbuffer %int_0 %uint_2 %int_2 %int_2 %uint_1 %uint_2
+// CHECK-NEXT: {{%\d+}} = OpLoad %float [[c12]]
+
+// CHECK:      [[s:%\d+]] = OpAccessChain %_ptr_Uniform_float %MySbuffer %int_0 %uint_3 %int_3 %int_0 %int_0
+// CHECK-NEXT: {{%\d+}} = OpLoad %float [[s]]
+
+// CHECK:      [[index:%\d+]] = OpLoad %uint %index
+// CHECK-NEXT: [[t:%\d+]] = OpAccessChain %_ptr_Uniform_float %MySbuffer %int_0 [[index]] %int_4 %int_3
+// CHECK-NEXT: {{%\d+}} = OpLoad %float [[t]]
+    return MySbuffer[0].a + MySbuffer[1].b[1].x + MySbuffer[2].c[2][1][2] +
+           MySbuffer[3].s[0].f + MySbuffer[index].t[3];
+}

+ 1 - 2
tools/clang/test/CodeGenSPIRV/spirv.storage-class.hlsl

@@ -20,8 +20,7 @@ VSOut main(float4 input: A /* Function */, uint index: B /* Function */) {
 // CHECK:      OpAccessChain %_ptr_Function_float %input
 // CHECK:      OpAccessChain %_ptr_Private_float %sgVar
 // CHECK:      OpAccessChain %_ptr_Private_float %slVar
-// CHECK:      [[lhs:%\d+]] = OpAccessChain %_ptr_Function_v4float %ret %int_0
-// CHECK-NEXT: OpAccessChain %_ptr_Function_float [[lhs]]
+// CHECK:      OpAccessChain %_ptr_Function_float %ret %int_0 {{%\d+}}
     ret.out1[index] = input[index] + sgVar[index] + slVar[index];
 
     return ret;

+ 48 - 0
tools/clang/test/CodeGenSPIRV/type.structured-buffer.hlsl

@@ -0,0 +1,48 @@
+// Run: %dxc -T ps_6_0 -E main
+
+// CHECK: OpName %type_StructuredBuffer_S "type.StructuredBuffer.S"
+// CHECK: OpName %type_StructuredBuffer_T "type.StructuredBuffer.T"
+
+// CHECK: OpName %type_RWStructuredBuffer_S "type.RWStructuredBuffer.S"
+// CHECK: OpName %type_RWStructuredBuffer_T "type.RWStructuredBuffer.T"
+
+// CHECK: %S = OpTypeStruct %float %v3float %mat2v3float
+// CHECK: %_runtimearr_S = OpTypeRuntimeArray %S
+// CHECK: %type_StructuredBuffer_S = OpTypeStruct %_runtimearr_S
+// CHECK: %_ptr_Uniform_type_StructuredBuffer_S = OpTypePointer Uniform %type_StructuredBuffer_S
+struct S {
+    float    a;
+    float3   b;
+    float2x3 c;
+};
+
+// CHECK: %T = OpTypeStruct %_arr_float_uint_3 %_arr_v3float_uint_4 %_arr_S_uint_3 %_arr_mat3v2float_uint_4
+// CHECK: %_runtimearr_T = OpTypeRuntimeArray %T
+// CHECK: %type_StructuredBuffer_T = OpTypeStruct %_runtimearr_T
+// CHECK: %_ptr_Uniform_type_StructuredBuffer_T = OpTypePointer Uniform %type_StructuredBuffer_T
+
+// CHECK: %type_RWStructuredBuffer_S = OpTypeStruct %_runtimearr_S
+// CHECK: %_ptr_Uniform_type_RWStructuredBuffer_S = OpTypePointer Uniform %type_RWStructuredBuffer_S
+
+// CHECK: %type_RWStructuredBuffer_T = OpTypeStruct %_runtimearr_T
+// CHECK: %_ptr_Uniform_type_RWStructuredBuffer_T = OpTypePointer Uniform %type_RWStructuredBuffer_T
+struct T {
+    float    a[3];
+    float3   b[4];
+    S        c[3];
+    float3x2 d[4];
+};
+
+// CHECK: %mySBuffer1 = OpVariable %_ptr_Uniform_type_StructuredBuffer_S Uniform
+StructuredBuffer<S> mySBuffer1 : register(t1);
+// CHECK: %mySBuffer2 = OpVariable %_ptr_Uniform_type_StructuredBuffer_T Uniform
+StructuredBuffer<T> mySBuffer2 : register(t2);
+
+// CHECK: %mySBuffer3 = OpVariable %_ptr_Uniform_type_RWStructuredBuffer_S Uniform
+RWStructuredBuffer<S> mySBuffer3 : register(u1);
+// CHECK: %mySBuffer4 = OpVariable %_ptr_Uniform_type_RWStructuredBuffer_T Uniform
+RWStructuredBuffer<T> mySBuffer4 : register(u2);
+
+float4 main() : SV_Target {
+    return 1.0;
+}

+ 13 - 0
tools/clang/test/CodeGenSPIRV/vk.binding.explicit.hlsl

@@ -33,6 +33,19 @@ RWBuffer<float4> myRWBuffer : register(u0, space1);
 // TODO: support [[vk::binding()]] on cbuffer
 // TODO: support [[vk::binding()]] on ConstantBuffer
 
+struct S {
+    float f;
+};
+
+// CHECK:      OpDecorate %sbuffer1 DescriptorSet 0
+// CHECK-NEXT: OpDecorate %sbuffer1 Binding 3
+[[vk::binding(3)]]
+  StructuredBuffer<S> sbuffer1 : register(t5);
+// CHECK:      OpDecorate %sbuffer2 DescriptorSet 3
+// CHECK-NEXT: OpDecorate %sbuffer2 Binding 2
+[[vk::binding(2, 3)]]
+RWStructuredBuffer<S> sbuffer2 : register(u6);
+
 float4 main() : SV_Target {
     return 1.0;
 }

+ 7 - 0
tools/clang/test/CodeGenSPIRV/vk.binding.implicit.hlsl

@@ -38,6 +38,13 @@ struct S {
 // CHECK-NEXT: OpDecorate %myCbuffer2 Binding 7
 ConstantBuffer<S> myCbuffer2;
 
+// CHECK:      OpDecorate %sbuffer1 DescriptorSet 0
+// CHECK-NEXT: OpDecorate %sbuffer1 Binding 8
+  StructuredBuffer<S> sbuffer1;
+// CHECK:      OpDecorate %sbuffer2 DescriptorSet 0
+// CHECK-NEXT: OpDecorate %sbuffer2 Binding 9
+RWStructuredBuffer<S> sbuffer2;
+
 float4 main() : SV_Target {
     return 1.0;
 }

+ 7 - 0
tools/clang/test/CodeGenSPIRV/vk.binding.register.hlsl

@@ -54,6 +54,13 @@ ConstantBuffer<S> myCbuffer2 : register(b2, space2);
 // CHECK-NEXT: OpDecorate %myCbuffer3 Binding 2
 ConstantBuffer<S> myCbuffer3 : register(b2, space3);
 
+// CHECK:      OpDecorate %sbuffer1 DescriptorSet 0
+// CHECK-NEXT: OpDecorate %sbuffer1 Binding 5
+  StructuredBuffer<S> sbuffer1 : register(t5);
+// CHECK:      OpDecorate %sbuffer2 DescriptorSet 1
+// CHECK-NEXT: OpDecorate %sbuffer2 Binding 6
+RWStructuredBuffer<S> sbuffer2 : register(u6, space1);
+
 float4 main() : SV_Target {
     return 1.0;
 }

+ 92 - 0
tools/clang/test/CodeGenSPIRV/vk.layout.sbuffer.nested.std430.hlsl

@@ -0,0 +1,92 @@
+// Run: %dxc -T ps_6_0 -E main
+
+// Deep nested array of matrices
+// Depp nested majorness
+struct R {                         // Alignment    Offset  Size                              Next
+    row_major    float2x3 rf1[3];  // 16(vec4)  -> 0     + 3(array) * stride(2 * 16(vec4)) = 96
+    column_major float2x3 rf2[4];  // 8(vec2)   -> 96    + 4(array) * stride(3 * 8(vec2))  = 192
+                 float2x3 rf3[2];  // 8(vec2)   -> 192   + 2(array) * stride(3 * 8(vec2))  = 240
+                 int      rf4;     // 4         -> 240   + 4                               = 244
+};                                 // 16(max)                                                256 (244 round up to R alignment)
+
+// Array of scalars, vectors, matrices, and structs
+struct S {                         // Alignment   Offset  Size                              Next
+    float3       sf1[3];           // 16(vec4) -> 0     + 3(array) * 16(vec4)             = 48
+    float        sf2[3];           // 4        -> 48    + 3(array) * 4                    = 60
+    R            sf3[4];           // 16       -> 64    + 4(array) * stride(256)          = 1088
+    row_major    float3x2 sf4[2];  // 8(vec2)  -> 1088  + 2(array) * stride(3 * 8(vec2))  = 1136
+    column_major float3x2 sf5[3];  // 16(vec4) -> 1136  + 3(array) * stride(2 * 16(vec4)) = 1232
+                 float3x2 sf6[4];  // 16(vec4) -> 1232  + 4(array) * stride(2 * 16(vec4)) = 1360
+                 float    sf7;     // 4        -> 1360  + 4                               = 1364
+};                                 // 16(max)                                               1376 (1364 round up to S alignment)
+
+struct T {        // Alignment    Offset  Size              Next
+    R    tf1[2];  // 16        -> 0     + 2(array) * 256  = 512
+    S    tf2[3];  // 16        -> 512   + 3(array) * 1376 = 4640
+    uint tf3;     // 4         -> 4640  + 4               = 4644
+};                // 16(max)                                4656 (4640 round up to T alignment)
+
+struct SBuffer {  // Alignment   Offset   Size                 Next
+    T    t[2];       // 16       -> 0      + 2(array) * 4656 = 9312
+    bool z;          // 4        -> 9312
+};
+
+RWStructuredBuffer<SBuffer> MySBuffer;
+
+// CHECK:      OpDecorate %_arr_mat2v3float_uint_3 ArrayStride 32
+// CHECK:      OpDecorate %_arr_mat2v3float_uint_4 ArrayStride 24
+// CHECK:      OpDecorate %_arr_mat2v3float_uint_2 ArrayStride 24
+
+// CHECK:      OpMemberDecorate %R 0 Offset 0
+// CHECK-NEXT: OpMemberDecorate %R 0 MatrixStride 16
+// CHECK-NEXT: OpMemberDecorate %R 0 ColMajor
+// CHECK-NEXT: OpMemberDecorate %R 1 Offset 96
+// CHECK-NEXT: OpMemberDecorate %R 1 MatrixStride 8
+// CHECK-NEXT: OpMemberDecorate %R 1 RowMajor
+// CHECK-NEXT: OpMemberDecorate %R 2 Offset 192
+// CHECK-NEXT: OpMemberDecorate %R 2 MatrixStride 8
+// CHECK-NEXT: OpMemberDecorate %R 2 RowMajor
+// CHECK-NEXT: OpMemberDecorate %R 3 Offset 240
+
+// CHECK:      OpDecorate %_arr_R_uint_2 ArrayStride 256
+// CHECK:      OpDecorate %_arr_v3float_uint_3 ArrayStride 16
+// CHECK:      OpDecorate %_arr_float_uint_3 ArrayStride 4
+// CHECK:      OpDecorate %_arr_R_uint_4 ArrayStride 256
+
+// CHECK:      OpDecorate %_arr_mat3v2float_uint_2 ArrayStride 24
+// CHECK:      OpDecorate %_arr_mat3v2float_uint_3 ArrayStride 32
+// CHECK:      OpDecorate %_arr_mat3v2float_uint_4 ArrayStride 32
+
+// CHECK:      OpMemberDecorate %S 0 Offset 0
+// CHECK-NEXT: OpMemberDecorate %S 1 Offset 48
+// CHECK-NEXT: OpMemberDecorate %S 2 Offset 64
+// CHECK-NEXT: OpMemberDecorate %S 3 Offset 1088
+// CHECK-NEXT: OpMemberDecorate %S 3 MatrixStride 8
+// CHECK-NEXT: OpMemberDecorate %S 3 ColMajor
+// CHECK-NEXT: OpMemberDecorate %S 4 Offset 1136
+// CHECK-NEXT: OpMemberDecorate %S 4 MatrixStride 16
+// CHECK-NEXT: OpMemberDecorate %S 4 RowMajor
+// CHECK-NEXT: OpMemberDecorate %S 5 Offset 1232
+// CHECK-NEXT: OpMemberDecorate %S 5 MatrixStride 16
+// CHECK-NEXT: OpMemberDecorate %S 5 RowMajor
+// CHECK-NEXT: OpMemberDecorate %S 6 Offset 1360
+
+// CHECK:      OpDecorate %_arr_S_uint_3 ArrayStride 1376
+
+// CHECK:      OpMemberDecorate %T 0 Offset 0
+// CHECK-NEXT: OpMemberDecorate %T 1 Offset 512
+// CHECK-NEXT: OpMemberDecorate %T 2 Offset 4640
+
+// CHECK:      OpDecorate %_arr_T_uint_2 ArrayStride 4656
+
+// CHECK-NEXT: OpMemberDecorate %SBuffer 0 Offset 0
+// CHECK-NEXT: OpMemberDecorate %SBuffer 1 Offset 9312
+
+// CHECK:      OpDecorate %_runtimearr_SBuffer ArrayStride 9328
+
+// CHECK:      OpMemberDecorate %type_RWStructuredBuffer_SBuffer 0 Offset 0
+// CHECK-NEXT: OpDecorate %type_RWStructuredBuffer_SBuffer BufferBlock
+
+float4 main() : SV_Target {
+    return 1.0;
+}

+ 85 - 0
tools/clang/test/CodeGenSPIRV/vk.layout.sbuffer.std430.hlsl

@@ -0,0 +1,85 @@
+// Run: %dxc -T ps_6_0 -E main
+
+struct R {     // Alignment       Offset     Size       Next
+    float2 rf; // 8(vec2)      -> 0        + 8(vec2)  = 8
+};             // 8               8          8
+
+struct S {      // Alignment    Offset                                Size        Next
+    R      sf1; // 8         -> 0                                   + 8         = 8
+    float  sf2; // 4         -> 8                                   + 4         = 12
+    float3 sf3; // 16(vec4)  -> 16 (12 round up to vec4 alignment)  + 12(vec3)  = 28
+    float  sf4; // 4         -> 28                                  + 4         = 32
+};              // 16(max)                                                        32
+
+struct T {           // Alignment     Offset                               Size              = Next
+    int      tf1;    // 4          -> 0                                  + 4                 = 4
+    R        tf2[3]; // 8          -> 8                                  + 3 * stride(8)     = 32
+    float3x2 tf3;    // 16(vec4)   -> 32 (32 round up to vec4 alignment) + 2 * stride(vec4)  = 64
+    S        tf4;    // 16         -> 64 (64 round up to S alignment)    + 32                = 96
+    float    tf5;    // 4          -> 96                                 + 4                 = 100
+};                   // 16(max)                                                                112(100 round up to T max alignment)
+
+struct SBuffer {              // Alignment   Offset                                 Size                     Next
+                 bool     a;     // 4        -> 0                                    +     4                  = 4
+                 uint1    b;     // 4        -> 4                                    +     4                  = 8
+                 float3   c;     // 16(vec4) -> 16 (8 round up to vec4 alignment)    + 3 * 4                  = 28
+    row_major    float2x3 d;     // 16(vec4) -> 32 (28 round up to vec4 alignment)   + 2 * stride(vec4)       = 64
+    column_major float2x3 e;     // 16(vec4) -> 64 (64 round up to vec2 alignment)   + 3 * stride(vec2)       = 88
+                 float2x1 f;     // 8(vec2)  -> 88 (88 round up to vec2 aligment)    + 2 * 4                  = 96
+    row_major    float2x3 g[3];  // 16(vec4) -> 96 (96 round up to vec4 alignment)   + 3 * 2 * stride(vec4)   = 192
+    column_major float2x2 h[4];  // 16(vec4) -> 192 (192 round up to vec2 alignment) + 4 * 2 * stride(vec2)   = 256
+                 T        t;     // 16       -> 256 (352 round up to T alignment)    + 112                    = 368
+                 float    z;     // 4        -> 368
+
+};
+
+StructuredBuffer<SBuffer> MySBuffer;
+
+// CHECK:      OpDecorate %_arr_mat2v3float_uint_3 ArrayStride 32
+// CHECK:      OpDecorate %_arr_mat2v2float_uint_4 ArrayStride 16
+
+// CHECK:      OpMemberDecorate %R 0 Offset 0
+
+// CHECK:      OpDecorate %_arr_R_uint_3 ArrayStride 8
+
+// CHECK:      OpMemberDecorate %S 0 Offset 0
+// CHECK-NEXT: OpMemberDecorate %S 1 Offset 8
+// CHECK-NEXT: OpMemberDecorate %S 2 Offset 16
+// CHECK-NEXT: OpMemberDecorate %S 3 Offset 28
+
+// CHECK:      OpMemberDecorate %T 0 Offset 0
+// CHECK-NEXT: OpMemberDecorate %T 1 Offset 8
+// CHECK-NEXT: OpMemberDecorate %T 2 Offset 32
+// CHECK-NEXT: OpMemberDecorate %T 2 MatrixStride 16
+// CHECK-NEXT: OpMemberDecorate %T 2 RowMajor
+// CHECK-NEXT: OpMemberDecorate %T 3 Offset 64
+// CHECK-NEXT: OpMemberDecorate %T 4 Offset 96
+
+// CHECK:      OpMemberDecorate %SBuffer 0 Offset 0
+// CHECK-NEXT: OpMemberDecorate %SBuffer 1 Offset 4
+// CHECK-NEXT: OpMemberDecorate %SBuffer 2 Offset 16
+// CHECK-NEXT: OpMemberDecorate %SBuffer 3 Offset 32
+// CHECK-NEXT: OpMemberDecorate %SBuffer 3 MatrixStride 16
+// CHECK-NEXT: OpMemberDecorate %SBuffer 3 ColMajor
+// CHECK-NEXT: OpMemberDecorate %SBuffer 4 Offset 64
+// CHECK-NEXT: OpMemberDecorate %SBuffer 4 MatrixStride 8
+// CHECK-NEXT: OpMemberDecorate %SBuffer 4 RowMajor
+// CHECK-NEXT: OpMemberDecorate %SBuffer 5 Offset 88
+// CHECK-NEXT: OpMemberDecorate %SBuffer 6 Offset 96
+// CHECK-NEXT: OpMemberDecorate %SBuffer 6 MatrixStride 16
+// CHECK-NEXT: OpMemberDecorate %SBuffer 6 ColMajor
+// CHECK-NEXT: OpMemberDecorate %SBuffer 7 Offset 192
+// CHECK-NEXT: OpMemberDecorate %SBuffer 7 MatrixStride 8
+// CHECK-NEXT: OpMemberDecorate %SBuffer 7 RowMajor
+// CHECK-NEXT: OpMemberDecorate %SBuffer 8 Offset 256
+// CHECK-NEXT: OpMemberDecorate %SBuffer 9 Offset 368
+
+// CHECK:      OpDecorate %_runtimearr_SBuffer ArrayStride 384
+
+// CHECK:      OpMemberDecorate %type_StructuredBuffer_SBuffer 0 Offset 0
+// CHECK-NEXT: OpMemberDecorate %type_StructuredBuffer_SBuffer 0 NonWritable
+// CHECK-NEXT: OpDecorate %type_StructuredBuffer_SBuffer BufferBlock
+
+float main() : SV_Target {
+    return 1.0;
+}

+ 21 - 3
tools/clang/unittests/SPIRV/CodeGenSPIRVTest.cpp

@@ -52,6 +52,9 @@ TEST_F(FileTest, CBufferType) { runFileTest("type.cbuffer.hlsl"); }
 TEST_F(FileTest, ConstantBufferType) {
   runFileTest("type.constant-buffer.hlsl");
 }
+TEST_F(FileTest, StructuredBufferType) {
+  runFileTest("type.structured-buffer.hlsl");
+}
 TEST_F(FileTest, ByteAddressBufferTypes) {
   runFileTest("type.byte-address-buffer.hlsl");
 }
@@ -198,13 +201,19 @@ TEST_F(FileTest, OpMatrixAccess1x1) {
 // For struct & array accessing operator
 TEST_F(FileTest, OpStructAccess) { runFileTest("op.struct.access.hlsl"); }
 TEST_F(FileTest, OpArrayAccess) { runFileTest("op.array.access.hlsl"); }
+
+// For buffer accessing operator
+TEST_F(FileTest, OpBufferAccess) { runFileTest("op.buffer.access.hlsl"); }
 TEST_F(FileTest, OpCBufferAccess) { runFileTest("op.cbuffer.access.hlsl"); }
 TEST_F(FileTest, OpConstantBufferAccess) {
   runFileTest("op.constant-buffer.access.hlsl");
 }
-
-// For Buffer/RWBuffer accessing operator
-TEST_F(FileTest, OpBufferAccess) { runFileTest("op.buffer.access.hlsl"); }
+TEST_F(FileTest, OpStructuredBufferAccess) {
+  runFileTest("op.structured-buffer.access.hlsl");
+}
+TEST_F(FileTest, OpRWStructuredBufferAccess) {
+  runFileTest("op.rw-structured-buffer.access.hlsl");
+}
 
 // For casting
 TEST_F(FileTest, CastNoOp) { runFileTest("cast.no-op.hlsl"); }
@@ -344,6 +353,9 @@ TEST_F(FileTest, TextureArraySampleGrad) {
   runFileTest("texture.array.sample-grad.hlsl");
 }
 
+TEST_F(FileTest, StructuredBufferLoad) {
+  runFileTest("method.structured-buffer.load.hlsl");
+}
 // For ByteAddressBuffer methods
 TEST_F(FileTest, ByteAddressBufferLoad) {
   runFileTest("method.byte-address-buffer.load.hlsl");
@@ -469,5 +481,11 @@ TEST_F(FileTest, VulkanLayoutCBufferStd140) {
 TEST_F(FileTest, VulkanLayoutCBufferNestedStd140) {
   runFileTest("vk.layout.cbuffer.nested.std140.hlsl");
 }
+TEST_F(FileTest, VulkanLayoutSBufferStd430) {
+  runFileTest("vk.layout.sbuffer.std430.hlsl");
+}
+TEST_F(FileTest, VulkanLayoutSBufferNestedStd430) {
+  runFileTest("vk.layout.sbuffer.nested.std430.hlsl");
+}
 
 } // namespace