Browse Source

Updated spirv-tools.

Бранимир Караџић 5 years ago
parent
commit
31c54015c7
31 changed files with 1393 additions and 423 deletions
  1. 1 1
      3rdparty/spirv-tools/include/generated/build-version.inc
  2. 4 0
      3rdparty/spirv-tools/source/fuzz/CMakeLists.txt
  3. 20 3
      3rdparty/spirv-tools/source/fuzz/fact_manager.cpp
  4. 4 0
      3rdparty/spirv-tools/source/fuzz/fuzzer.cpp
  5. 3 0
      3rdparty/spirv-tools/source/fuzz/fuzzer_context.cpp
  6. 16 0
      3rdparty/spirv-tools/source/fuzz/fuzzer_context.h
  7. 2 2
      3rdparty/spirv-tools/source/fuzz/fuzzer_pass.h
  8. 48 0
      3rdparty/spirv-tools/source/fuzz/fuzzer_pass_adjust_branch_weights.cpp
  9. 41 0
      3rdparty/spirv-tools/source/fuzz/fuzzer_pass_adjust_branch_weights.h
  10. 14 0
      3rdparty/spirv-tools/source/fuzz/protobufs/spvtoolsfuzz.proto
  11. 4 0
      3rdparty/spirv-tools/source/fuzz/transformation.cpp
  12. 97 0
      3rdparty/spirv-tools/source/fuzz/transformation_adjust_branch_weights.cpp
  13. 57 0
      3rdparty/spirv-tools/source/fuzz/transformation_adjust_branch_weights.h
  14. 4 0
      3rdparty/spirv-tools/source/opt/function.h
  15. 308 351
      3rdparty/spirv-tools/source/opt/inline_pass.cpp
  16. 58 4
      3rdparty/spirv-tools/source/opt/inline_pass.h
  17. 13 0
      3rdparty/spirv-tools/source/opt/instruction.cpp
  18. 14 0
      3rdparty/spirv-tools/source/opt/instruction.h
  19. 5 2
      3rdparty/spirv-tools/source/opt/merge_return_pass.cpp
  20. 4 1
      3rdparty/spirv-tools/source/opt/wrap_opkill.cpp
  21. 7 3
      3rdparty/spirv-tools/source/reduce/CMakeLists.txt
  22. 1 1
      3rdparty/spirv-tools/source/reduce/pch_source_reduce.h
  23. 8 5
      3rdparty/spirv-tools/source/reduce/reducer.cpp
  24. 12 0
      3rdparty/spirv-tools/source/reduce/remove_instruction_reduction_opportunity.cpp
  25. 208 0
      3rdparty/spirv-tools/source/reduce/remove_struct_member_reduction_opportunity.cpp
  26. 84 0
      3rdparty/spirv-tools/source/reduce/remove_struct_member_reduction_opportunity.h
  27. 59 42
      3rdparty/spirv-tools/source/reduce/remove_unused_instruction_reduction_opportunity_finder.cpp
  28. 19 6
      3rdparty/spirv-tools/source/reduce/remove_unused_instruction_reduction_opportunity_finder.h
  29. 193 0
      3rdparty/spirv-tools/source/reduce/remove_unused_struct_member_reduction_opportunity_finder.cpp
  30. 61 0
      3rdparty/spirv-tools/source/reduce/remove_unused_struct_member_reduction_opportunity_finder.h
  31. 24 2
      3rdparty/spirv-tools/source/val/validate_scopes.cpp

+ 1 - 1
3rdparty/spirv-tools/include/generated/build-version.inc

@@ -1 +1 @@
-"v2020.3-dev", "SPIRV-Tools v2020.3-dev 7f7c8a9bc032be95861952467a66f0c77560ec04"
+"v2020.3-dev", "SPIRV-Tools v2020.3-dev 8848e0dad1e2109f2c8d9e3cfb84ad016ce6f8cf"

+ 4 - 0
3rdparty/spirv-tools/source/fuzz/CMakeLists.txt

@@ -50,6 +50,7 @@ if(SPIRV_BUILD_FUZZER)
         fuzzer_pass_add_no_contraction_decorations.h
         fuzzer_pass_add_stores.h
         fuzzer_pass_add_useful_constructs.h
+        fuzzer_pass_adjust_branch_weights.h
         fuzzer_pass_adjust_function_controls.h
         fuzzer_pass_adjust_loop_controls.h
         fuzzer_pass_adjust_memory_operands_masks.h
@@ -98,6 +99,7 @@ if(SPIRV_BUILD_FUZZER)
         transformation_add_type_pointer.h
         transformation_add_type_struct.h
         transformation_add_type_vector.h
+        transformation_adjust_branch_weights.h
         transformation_composite_construct.h
         transformation_composite_extract.h
         transformation_compute_data_synonym_fact_closure.h
@@ -145,6 +147,7 @@ if(SPIRV_BUILD_FUZZER)
         fuzzer_pass_add_no_contraction_decorations.cpp
         fuzzer_pass_add_stores.cpp
         fuzzer_pass_add_useful_constructs.cpp
+        fuzzer_pass_adjust_branch_weights.cpp
         fuzzer_pass_adjust_function_controls.cpp
         fuzzer_pass_adjust_loop_controls.cpp
         fuzzer_pass_adjust_memory_operands_masks.cpp
@@ -192,6 +195,7 @@ if(SPIRV_BUILD_FUZZER)
         transformation_add_type_pointer.cpp
         transformation_add_type_struct.cpp
         transformation_add_type_vector.cpp
+        transformation_adjust_branch_weights.cpp
         transformation_composite_construct.cpp
         transformation_composite_extract.cpp
         transformation_compute_data_synonym_fact_closure.cpp

+ 20 - 3
3rdparty/spirv-tools/source/fuzz/fact_manager.cpp

@@ -159,9 +159,26 @@ uint32_t FactManager::ConstantUniformFacts::GetConstantId(
     uint32_t type_id) const {
   auto type = context->get_type_mgr()->GetType(type_id);
   assert(type != nullptr && "Unknown type id.");
-  auto constant = context->get_constant_mgr()->GetConstant(
-      type, GetConstantWords(constant_uniform_fact));
-  return context->get_constant_mgr()->FindDeclaredConstant(constant, type_id);
+  const opt::analysis::Constant* known_constant;
+  if (type->AsInteger()) {
+    opt::analysis::IntConstant candidate_constant(
+        type->AsInteger(), GetConstantWords(constant_uniform_fact));
+    known_constant =
+        context->get_constant_mgr()->FindConstant(&candidate_constant);
+  } else {
+    assert(
+        type->AsFloat() &&
+        "Uniform constant facts are only supported for int and float types.");
+    opt::analysis::FloatConstant candidate_constant(
+        type->AsFloat(), GetConstantWords(constant_uniform_fact));
+    known_constant =
+        context->get_constant_mgr()->FindConstant(&candidate_constant);
+  }
+  if (!known_constant) {
+    return 0;
+  }
+  return context->get_constant_mgr()->FindDeclaredConstant(known_constant,
+                                                           type_id);
 }
 
 std::vector<uint32_t> FactManager::ConstantUniformFacts::GetConstantWords(

+ 4 - 0
3rdparty/spirv-tools/source/fuzz/fuzzer.cpp

@@ -34,6 +34,7 @@
 #include "source/fuzz/fuzzer_pass_add_no_contraction_decorations.h"
 #include "source/fuzz/fuzzer_pass_add_stores.h"
 #include "source/fuzz/fuzzer_pass_add_useful_constructs.h"
+#include "source/fuzz/fuzzer_pass_adjust_branch_weights.h"
 #include "source/fuzz/fuzzer_pass_adjust_function_controls.h"
 #include "source/fuzz/fuzzer_pass_adjust_loop_controls.h"
 #include "source/fuzz/fuzzer_pass_adjust_selection_controls.h"
@@ -281,6 +282,9 @@ Fuzzer::FuzzerResultStatus Fuzzer::Run(
   // Now apply some passes that it does not make sense to apply repeatedly,
   // as they do not unlock other passes.
   std::vector<std::unique_ptr<FuzzerPass>> final_passes;
+  MaybeAddPass<FuzzerPassAdjustBranchWeights>(
+      &final_passes, ir_context.get(), &transformation_context, &fuzzer_context,
+      transformation_sequence_out);
   MaybeAddPass<FuzzerPassAdjustFunctionControls>(
       &final_passes, ir_context.get(), &transformation_context, &fuzzer_context,
       transformation_sequence_out);

+ 3 - 0
3rdparty/spirv-tools/source/fuzz/fuzzer_context.cpp

@@ -40,6 +40,7 @@ const std::pair<uint32_t, uint32_t> kChanceOfAddingNoContractionDecoration = {
     5, 70};
 const std::pair<uint32_t, uint32_t> kChanceOfAddingStore = {5, 50};
 const std::pair<uint32_t, uint32_t> kChanceOfAddingVectorType = {20, 70};
+const std::pair<uint32_t, uint32_t> kChanceOfAdjustingBranchWeights = {20, 90};
 const std::pair<uint32_t, uint32_t> kChanceOfAdjustingFunctionControl = {20,
                                                                          70};
 const std::pair<uint32_t, uint32_t> kChanceOfAdjustingLoopControl = {20, 90};
@@ -124,6 +125,8 @@ FuzzerContext::FuzzerContext(RandomGenerator* random_generator,
   chance_of_adding_store_ = ChooseBetweenMinAndMax(kChanceOfAddingStore);
   chance_of_adding_vector_type_ =
       ChooseBetweenMinAndMax(kChanceOfAddingVectorType);
+  chance_of_adjusting_branch_weights_ =
+      ChooseBetweenMinAndMax(kChanceOfAdjustingBranchWeights);
   chance_of_adjusting_function_control_ =
       ChooseBetweenMinAndMax(kChanceOfAdjustingFunctionControl);
   chance_of_adjusting_loop_control_ =

+ 16 - 0
3rdparty/spirv-tools/source/fuzz/fuzzer_context.h

@@ -136,6 +136,9 @@ class FuzzerContext {
   uint32_t GetChanceOfAddingVectorType() {
     return chance_of_adding_vector_type_;
   }
+  uint32_t GetChanceOfAdjustingBranchWeights() {
+    return chance_of_adjusting_branch_weights_;
+  }
   uint32_t GetChanceOfAdjustingFunctionControl() {
     return chance_of_adjusting_function_control_;
   }
@@ -201,6 +204,18 @@ class FuzzerContext {
   uint32_t GetRandomLoopLimit() {
     return random_generator_->RandomUint32(max_loop_limit_);
   }
+  std::pair<uint32_t, uint32_t> GetRandomBranchWeights() {
+    std::pair<uint32_t, uint32_t> branch_weights = {0, 0};
+
+    while (branch_weights.first == 0 && branch_weights.second == 0) {
+      // Using INT32_MAX to do not overflow UINT32_MAX when the branch weights
+      // are added together.
+      branch_weights.first = random_generator_->RandomUint32(INT32_MAX);
+      branch_weights.second = random_generator_->RandomUint32(INT32_MAX);
+    }
+
+    return branch_weights;
+  }
   uint32_t GetRandomSizeForNewArray() {
     // Ensure that the array size is non-zero.
     return random_generator_->RandomUint32(max_new_array_size_limit_ - 1) + 1;
@@ -231,6 +246,7 @@ class FuzzerContext {
   uint32_t chance_of_adding_no_contraction_decoration_;
   uint32_t chance_of_adding_store_;
   uint32_t chance_of_adding_vector_type_;
+  uint32_t chance_of_adjusting_branch_weights_;
   uint32_t chance_of_adjusting_function_control_;
   uint32_t chance_of_adjusting_loop_control_;
   uint32_t chance_of_adjusting_memory_operands_mask_;

+ 2 - 2
3rdparty/spirv-tools/source/fuzz/fuzzer_pass.h

@@ -20,6 +20,7 @@
 
 #include "source/fuzz/fuzzer_context.h"
 #include "source/fuzz/protobufs/spirvfuzz_protobufs.h"
+#include "source/fuzz/transformation.h"
 #include "source/fuzz/transformation_context.h"
 #include "source/opt/ir_context.h"
 
@@ -94,8 +95,7 @@ class FuzzerPass {
 
   // A generic helper for applying a transformation that should be applicable
   // by construction, and adding it to the sequence of applied transformations.
-  template <typename TransformationType>
-  void ApplyTransformation(const TransformationType& transformation) {
+  void ApplyTransformation(const Transformation& transformation) {
     assert(transformation.IsApplicable(GetIRContext(),
                                        *GetTransformationContext()) &&
            "Transformation should be applicable by construction.");

+ 48 - 0
3rdparty/spirv-tools/source/fuzz/fuzzer_pass_adjust_branch_weights.cpp

@@ -0,0 +1,48 @@
+// Copyright (c) 2020 André Perez Maselco
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "source/fuzz/fuzzer_pass_adjust_branch_weights.h"
+
+#include "source/fuzz/fuzzer_util.h"
+#include "source/fuzz/instruction_descriptor.h"
+#include "source/fuzz/transformation_adjust_branch_weights.h"
+
+namespace spvtools {
+namespace fuzz {
+
+FuzzerPassAdjustBranchWeights::FuzzerPassAdjustBranchWeights(
+    opt::IRContext* ir_context, TransformationContext* transformation_context,
+    FuzzerContext* fuzzer_context,
+    protobufs::TransformationSequence* transformations)
+    : FuzzerPass(ir_context, transformation_context, fuzzer_context,
+                 transformations) {}
+
+FuzzerPassAdjustBranchWeights::~FuzzerPassAdjustBranchWeights() = default;
+
+void FuzzerPassAdjustBranchWeights::Apply() {
+  // For all OpBranchConditional instructions,
+  // randomly applies the transformation.
+  GetIRContext()->module()->ForEachInst([this](opt::Instruction* instruction) {
+    if (instruction->opcode() == SpvOpBranchConditional &&
+        GetFuzzerContext()->ChoosePercentage(
+            GetFuzzerContext()->GetChanceOfAdjustingBranchWeights())) {
+      ApplyTransformation(TransformationAdjustBranchWeights(
+          MakeInstructionDescriptor(GetIRContext(), instruction),
+          GetFuzzerContext()->GetRandomBranchWeights()));
+    }
+  });
+}
+
+}  // namespace fuzz
+}  // namespace spvtools

+ 41 - 0
3rdparty/spirv-tools/source/fuzz/fuzzer_pass_adjust_branch_weights.h

@@ -0,0 +1,41 @@
+// Copyright (c) 2020 André Perez Maselco
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SOURCE_FUZZ_FUZZER_PASS_ADJUST_BRANCH_WEIGHTS_H_
+#define SOURCE_FUZZ_FUZZER_PASS_ADJUST_BRANCH_WEIGHTS_H_
+
+#include "source/fuzz/fuzzer_pass.h"
+
+namespace spvtools {
+namespace fuzz {
+
+// This fuzzer pass searches for branch conditional instructions
+// and randomly chooses which of these instructions will have their weights
+// adjusted.
+class FuzzerPassAdjustBranchWeights : public FuzzerPass {
+ public:
+  FuzzerPassAdjustBranchWeights(
+      opt::IRContext* ir_context, TransformationContext* transformation_context,
+      FuzzerContext* fuzzer_context,
+      protobufs::TransformationSequence* transformations);
+
+  ~FuzzerPassAdjustBranchWeights();
+
+  void Apply() override;
+};
+
+}  // namespace fuzz
+}  // namespace spvtools
+
+#endif  // SOURCE_FUZZ_FUZZER_PASS_ADJUST_BRANCH_WEIGHTS_H_

+ 14 - 0
3rdparty/spirv-tools/source/fuzz/protobufs/spvtoolsfuzz.proto

@@ -374,6 +374,7 @@ message Transformation {
     TransformationToggleAccessChainInstruction toggle_access_chain_instruction = 43;
     TransformationAddConstantNull add_constant_null = 44;
     TransformationComputeDataSynonymFactClosure compute_data_synonym_fact_closure = 45;
+    TransformationAdjustBranchWeights adjust_branch_weights = 46;
     // Add additional option using the next available number.
   }
 }
@@ -742,6 +743,19 @@ message TransformationAddTypeVector {
 
 }
 
+message TransformationAdjustBranchWeights {
+
+  // A transformation that adjusts the branch weights
+  // of a branch conditional instruction.
+
+  // A descriptor for a branch conditional instruction.
+  InstructionDescriptor instruction_descriptor = 1;
+
+  // Branch weights of a branch conditional instruction.
+  UInt32Pair branch_weights = 2;
+
+}
+
 message TransformationCompositeConstruct {
 
   // A transformation that introduces an OpCompositeConstruct instruction to

+ 4 - 0
3rdparty/spirv-tools/source/fuzz/transformation.cpp

@@ -39,6 +39,7 @@
 #include "source/fuzz/transformation_add_type_pointer.h"
 #include "source/fuzz/transformation_add_type_struct.h"
 #include "source/fuzz/transformation_add_type_vector.h"
+#include "source/fuzz/transformation_adjust_branch_weights.h"
 #include "source/fuzz/transformation_composite_construct.h"
 #include "source/fuzz/transformation_composite_extract.h"
 #include "source/fuzz/transformation_compute_data_synonym_fact_closure.h"
@@ -129,6 +130,9 @@ std::unique_ptr<Transformation> Transformation::FromMessage(
       return MakeUnique<TransformationAddTypeStruct>(message.add_type_struct());
     case protobufs::Transformation::TransformationCase::kAddTypeVector:
       return MakeUnique<TransformationAddTypeVector>(message.add_type_vector());
+    case protobufs::Transformation::TransformationCase::kAdjustBranchWeights:
+      return MakeUnique<TransformationAdjustBranchWeights>(
+          message.adjust_branch_weights());
     case protobufs::Transformation::TransformationCase::kCompositeConstruct:
       return MakeUnique<TransformationCompositeConstruct>(
           message.composite_construct());

+ 97 - 0
3rdparty/spirv-tools/source/fuzz/transformation_adjust_branch_weights.cpp

@@ -0,0 +1,97 @@
+// Copyright (c) 2020 André Perez Maselco
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "source/fuzz/transformation_adjust_branch_weights.h"
+
+#include "source/fuzz/fuzzer_util.h"
+#include "source/fuzz/instruction_descriptor.h"
+
+namespace spvtools {
+namespace fuzz {
+
+namespace {
+
+const uint32_t kBranchWeightForTrueLabelIndex = 3;
+const uint32_t kBranchWeightForFalseLabelIndex = 4;
+
+}  // namespace
+
+TransformationAdjustBranchWeights::TransformationAdjustBranchWeights(
+    const spvtools::fuzz::protobufs::TransformationAdjustBranchWeights& message)
+    : message_(message) {}
+
+TransformationAdjustBranchWeights::TransformationAdjustBranchWeights(
+    const protobufs::InstructionDescriptor& instruction_descriptor,
+    const std::pair<uint32_t, uint32_t>& branch_weights) {
+  *message_.mutable_instruction_descriptor() = instruction_descriptor;
+  message_.mutable_branch_weights()->set_first(branch_weights.first);
+  message_.mutable_branch_weights()->set_second(branch_weights.second);
+}
+
+bool TransformationAdjustBranchWeights::IsApplicable(
+    opt::IRContext* ir_context, const TransformationContext& /*unused*/) const {
+  auto instruction =
+      FindInstruction(message_.instruction_descriptor(), ir_context);
+  if (instruction == nullptr) {
+    return false;
+  }
+
+  SpvOp opcode = static_cast<SpvOp>(
+      message_.instruction_descriptor().target_instruction_opcode());
+
+  assert(instruction->opcode() == opcode &&
+         "The located instruction must have the same opcode as in the "
+         "descriptor.");
+
+  // Must be an OpBranchConditional instruction.
+  if (opcode != SpvOpBranchConditional) {
+    return false;
+  }
+
+  assert((message_.branch_weights().first() != 0 ||
+          message_.branch_weights().second() != 0) &&
+         "At least one weight must be non-zero.");
+
+  assert(message_.branch_weights().first() <=
+             UINT32_MAX - message_.branch_weights().second() &&
+         "The sum of the two weights must not be greater than UINT32_MAX.");
+
+  return true;
+}
+
+void TransformationAdjustBranchWeights::Apply(
+    opt::IRContext* ir_context, TransformationContext* /*unused*/) const {
+  auto instruction =
+      FindInstruction(message_.instruction_descriptor(), ir_context);
+  if (instruction->HasBranchWeights()) {
+    instruction->SetOperand(kBranchWeightForTrueLabelIndex,
+                            {message_.branch_weights().first()});
+    instruction->SetOperand(kBranchWeightForFalseLabelIndex,
+                            {message_.branch_weights().second()});
+  } else {
+    instruction->AddOperand({SPV_OPERAND_TYPE_OPTIONAL_LITERAL_INTEGER,
+                             {message_.branch_weights().first()}});
+    instruction->AddOperand({SPV_OPERAND_TYPE_OPTIONAL_LITERAL_INTEGER,
+                             {message_.branch_weights().second()}});
+  }
+}
+
+protobufs::Transformation TransformationAdjustBranchWeights::ToMessage() const {
+  protobufs::Transformation result;
+  *result.mutable_adjust_branch_weights() = message_;
+  return result;
+}
+
+}  // namespace fuzz
+}  // namespace spvtools

+ 57 - 0
3rdparty/spirv-tools/source/fuzz/transformation_adjust_branch_weights.h

@@ -0,0 +1,57 @@
+// Copyright (c) 2020 André Perez Maselco
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SOURCE_FUZZ_TRANSFORMATION_ADJUST_BRANCH_WEIGHTS_H_
+#define SOURCE_FUZZ_TRANSFORMATION_ADJUST_BRANCH_WEIGHTS_H_
+
+#include "source/fuzz/protobufs/spirvfuzz_protobufs.h"
+#include "source/fuzz/transformation.h"
+#include "source/fuzz/transformation_context.h"
+#include "source/opt/ir_context.h"
+
+namespace spvtools {
+namespace fuzz {
+
+class TransformationAdjustBranchWeights : public Transformation {
+ public:
+  explicit TransformationAdjustBranchWeights(
+      const protobufs::TransformationAdjustBranchWeights& message);
+
+  TransformationAdjustBranchWeights(
+      const protobufs::InstructionDescriptor& instruction_descriptor,
+      const std::pair<uint32_t, uint32_t>& branch_weights);
+
+  // - |message_.instruction_descriptor| must identify an existing
+  //   branch conditional instruction
+  // - At least one of |branch_weights| must be non-zero and
+  //   the two weights must not overflow a 32-bit unsigned integer when added
+  //   together
+  bool IsApplicable(
+      opt::IRContext* ir_context,
+      const TransformationContext& transformation_context) const override;
+
+  // Adjust the branch weights of a branch conditional instruction.
+  void Apply(opt::IRContext* ir_context,
+             TransformationContext* transformation_context) const override;
+
+  protobufs::Transformation ToMessage() const override;
+
+ private:
+  protobufs::TransformationAdjustBranchWeights message_;
+};
+
+}  // namespace fuzz
+}  // namespace spvtools
+
+#endif  // SOURCE_FUZZ_TRANSFORMATION_ADJUST_BRANCH_WEIGHTS_H_

+ 4 - 0
3rdparty/spirv-tools/source/opt/function.h

@@ -88,6 +88,10 @@ class Function {
   // Returns the entry basic block for this function.
   const std::unique_ptr<BasicBlock>& entry() const { return blocks_.front(); }
 
+  // Returns the last basic block in this function.
+  BasicBlock* tail() { return blocks_.back().get(); }
+  const BasicBlock* tail() const { return blocks_.back().get(); }
+
   iterator begin() { return iterator(&blocks_, blocks_.begin()); }
   iterator end() { return iterator(&blocks_, blocks_.end()); }
   const_iterator begin() const { return cbegin(); }

+ 308 - 351
3rdparty/spirv-tools/source/opt/inline_pass.cpp

@@ -20,6 +20,7 @@
 #include <utility>
 
 #include "source/cfa.h"
+#include "source/opt/reflect.h"
 #include "source/util/make_unique.h"
 
 // Indices of operands in SPIR-V instructions
@@ -232,6 +233,220 @@ bool InlinePass::CloneSameBlockOps(
   });
 }
 
+void InlinePass::MoveInstsBeforeEntryBlock(
+    std::unordered_map<uint32_t, Instruction*>* preCallSB,
+    BasicBlock* new_blk_ptr, BasicBlock::iterator call_inst_itr,
+    UptrVectorIterator<BasicBlock> call_block_itr) {
+  for (auto cii = call_block_itr->begin(); cii != call_inst_itr;
+       cii = call_block_itr->begin()) {
+    Instruction* inst = &*cii;
+    inst->RemoveFromList();
+    std::unique_ptr<Instruction> cp_inst(inst);
+    // Remember same-block ops for possible regeneration.
+    if (IsSameBlockOp(&*cp_inst)) {
+      auto* sb_inst_ptr = cp_inst.get();
+      (*preCallSB)[cp_inst->result_id()] = sb_inst_ptr;
+    }
+    new_blk_ptr->AddInstruction(std::move(cp_inst));
+  }
+}
+
+std::unique_ptr<BasicBlock> InlinePass::AddGuardBlock(
+    std::vector<std::unique_ptr<BasicBlock>>* new_blocks,
+    std::unordered_map<uint32_t, uint32_t>* callee2caller,
+    std::unique_ptr<BasicBlock> new_blk_ptr, uint32_t entry_blk_label_id) {
+  const auto guard_block_id = context()->TakeNextId();
+  if (guard_block_id == 0) {
+    return nullptr;
+  }
+  AddBranch(guard_block_id, &new_blk_ptr);
+  new_blocks->push_back(std::move(new_blk_ptr));
+  // Start the next block.
+  new_blk_ptr = MakeUnique<BasicBlock>(NewLabel(guard_block_id));
+  // Reset the mapping of the callee's entry block to point to
+  // the guard block.  Do this so we can fix up phis later on to
+  // satisfy dominance.
+  (*callee2caller)[entry_blk_label_id] = guard_block_id;
+  return new_blk_ptr;
+}
+
+InstructionList::iterator InlinePass::AddStoresForVariableInitializers(
+    const std::unordered_map<uint32_t, uint32_t>& callee2caller,
+    std::unique_ptr<BasicBlock>* new_blk_ptr,
+    UptrVectorIterator<BasicBlock> callee_first_block_itr) {
+  auto callee_var_itr = callee_first_block_itr->begin();
+  while (callee_var_itr->opcode() == SpvOp::SpvOpVariable) {
+    if (callee_var_itr->NumInOperands() == 2) {
+      assert(callee2caller.count(callee_var_itr->result_id()) &&
+             "Expected the variable to have already been mapped.");
+      uint32_t new_var_id = callee2caller.at(callee_var_itr->result_id());
+
+      // The initializer must be a constant or global value.  No mapped
+      // should be used.
+      uint32_t val_id = callee_var_itr->GetSingleWordInOperand(1);
+      AddStore(new_var_id, val_id, new_blk_ptr);
+    }
+    ++callee_var_itr;
+  }
+  return callee_var_itr;
+}
+
+bool InlinePass::InlineInstructionInBB(
+    const std::unordered_map<uint32_t, uint32_t>& callee2caller,
+    BasicBlock* new_blk_ptr, const Instruction* inst) {
+  // If we have return, it must be at the end of the callee. We will handle
+  // it at the end.
+  if (inst->opcode() == SpvOpReturnValue || inst->opcode() == SpvOpReturn)
+    return true;
+
+  // Copy callee instruction and remap all input Ids.
+  std::unique_ptr<Instruction> cp_inst(inst->Clone(context()));
+  cp_inst->ForEachInId([&callee2caller](uint32_t* iid) {
+    const auto mapItr = callee2caller.find(*iid);
+    if (mapItr != callee2caller.end()) {
+      *iid = mapItr->second;
+    }
+  });
+  // If result id is non-zero, remap it.
+  const uint32_t rid = cp_inst->result_id();
+  if (rid != 0) {
+    const auto mapItr = callee2caller.find(rid);
+    if (mapItr == callee2caller.end()) return false;
+    uint32_t nid = mapItr->second;
+    cp_inst->SetResultId(nid);
+    get_decoration_mgr()->CloneDecorations(rid, nid);
+  }
+  new_blk_ptr->AddInstruction(std::move(cp_inst));
+  return true;
+}
+
+std::unique_ptr<BasicBlock> InlinePass::InlineReturn(
+    const std::unordered_map<uint32_t, uint32_t>& callee2caller,
+    std::vector<std::unique_ptr<BasicBlock>>* new_blocks,
+    std::unique_ptr<BasicBlock> new_blk_ptr, Function* calleeFn,
+    const Instruction* inst, uint32_t returnVarId) {
+  // Store return value to return variable.
+  if (inst->opcode() == SpvOpReturnValue) {
+    assert(returnVarId != 0);
+    uint32_t valId = inst->GetInOperand(kSpvReturnValueId).words[0];
+    const auto mapItr = callee2caller.find(valId);
+    if (mapItr != callee2caller.end()) {
+      valId = mapItr->second;
+    }
+    AddStore(returnVarId, valId, &new_blk_ptr);
+  }
+
+  uint32_t returnLabelId = 0;
+  for (auto callee_block_itr = calleeFn->begin();
+       callee_block_itr != calleeFn->end(); ++callee_block_itr) {
+    if (callee_block_itr->tail()->opcode() == SpvOpUnreachable ||
+        callee_block_itr->tail()->opcode() == SpvOpKill) {
+      returnLabelId = context()->TakeNextId();
+      break;
+    }
+  }
+  if (returnLabelId == 0) return new_blk_ptr;
+
+  if (inst->opcode() == SpvOpReturn || inst->opcode() == SpvOpReturnValue)
+    AddBranch(returnLabelId, &new_blk_ptr);
+  new_blocks->push_back(std::move(new_blk_ptr));
+  return MakeUnique<BasicBlock>(NewLabel(returnLabelId));
+}
+
+bool InlinePass::InlineEntryBlock(
+    const std::unordered_map<uint32_t, uint32_t>& callee2caller,
+    std::unique_ptr<BasicBlock>* new_blk_ptr,
+    UptrVectorIterator<BasicBlock> callee_first_block) {
+  auto callee_inst_itr = AddStoresForVariableInitializers(
+      callee2caller, new_blk_ptr, callee_first_block);
+
+  while (callee_inst_itr != callee_first_block->end()) {
+    if (!InlineInstructionInBB(callee2caller, new_blk_ptr->get(),
+                               &*callee_inst_itr)) {
+      return false;
+    }
+    ++callee_inst_itr;
+  }
+  return true;
+}
+
+std::unique_ptr<BasicBlock> InlinePass::InlineBasicBlocks(
+    std::vector<std::unique_ptr<BasicBlock>>* new_blocks,
+    const std::unordered_map<uint32_t, uint32_t>& callee2caller,
+    std::unique_ptr<BasicBlock> new_blk_ptr, Function* calleeFn) {
+  auto callee_block_itr = calleeFn->begin();
+  ++callee_block_itr;
+
+  while (callee_block_itr != calleeFn->end()) {
+    new_blocks->push_back(std::move(new_blk_ptr));
+    const auto mapItr =
+        callee2caller.find(callee_block_itr->GetLabelInst()->result_id());
+    if (mapItr == callee2caller.end()) return nullptr;
+    new_blk_ptr = MakeUnique<BasicBlock>(NewLabel(mapItr->second));
+
+    auto tail_inst_itr = callee_block_itr->end();
+    for (auto inst_itr = callee_block_itr->begin(); inst_itr != tail_inst_itr;
+         ++inst_itr) {
+      if (!InlineInstructionInBB(callee2caller, new_blk_ptr.get(),
+                                 &*inst_itr)) {
+        return nullptr;
+      }
+    }
+
+    ++callee_block_itr;
+  }
+  return new_blk_ptr;
+}
+
+bool InlinePass::MoveCallerInstsAfterFunctionCall(
+    std::unordered_map<uint32_t, Instruction*>* preCallSB,
+    std::unordered_map<uint32_t, uint32_t>* postCallSB,
+    std::unique_ptr<BasicBlock>* new_blk_ptr,
+    BasicBlock::iterator call_inst_itr, bool multiBlocks) {
+  // Copy remaining instructions from caller block.
+  for (Instruction* inst = call_inst_itr->NextNode(); inst;
+       inst = call_inst_itr->NextNode()) {
+    inst->RemoveFromList();
+    std::unique_ptr<Instruction> cp_inst(inst);
+    // If multiple blocks generated, regenerate any same-block
+    // instruction that has not been seen in this last block.
+    if (multiBlocks) {
+      if (!CloneSameBlockOps(&cp_inst, postCallSB, preCallSB, new_blk_ptr)) {
+        return false;
+      }
+
+      // Remember same-block ops in this block.
+      if (IsSameBlockOp(&*cp_inst)) {
+        const uint32_t rid = cp_inst->result_id();
+        (*postCallSB)[rid] = rid;
+      }
+    }
+    new_blk_ptr->get()->AddInstruction(std::move(cp_inst));
+  }
+
+  return true;
+}
+
+void InlinePass::MoveLoopMergeInstToFirstBlock(
+    std::vector<std::unique_ptr<BasicBlock>>* new_blocks) {
+  // Move the OpLoopMerge from the last block back to the first, where
+  // it belongs.
+  auto& first = new_blocks->front();
+  auto& last = new_blocks->back();
+  assert(first != last);
+
+  // Insert a modified copy of the loop merge into the first block.
+  auto loop_merge_itr = last->tail();
+  --loop_merge_itr;
+  assert(loop_merge_itr->opcode() == SpvOpLoopMerge);
+  std::unique_ptr<Instruction> cp_inst(loop_merge_itr->Clone(context()));
+  first->tail().InsertBefore(std::move(cp_inst));
+
+  // Remove the loop merge from the last block.
+  loop_merge_itr->RemoveFromList();
+  delete &*loop_merge_itr;
+}
+
 bool InlinePass::GenInlineCode(
     std::vector<std::unique_ptr<BasicBlock>>* new_blocks,
     std::vector<std::unique_ptr<Instruction>>* new_vars,
@@ -250,13 +465,19 @@ bool InlinePass::GenInlineCode(
   // valid.  These operations can fail.
   context()->InvalidateAnalyses(IRContext::kAnalysisDefUse);
 
+  // If the caller is a loop header and the callee has multiple blocks, then the
+  // normal inlining logic will place the OpLoopMerge in the last of several
+  // blocks in the loop.  Instead, it should be placed at the end of the first
+  // block.  We'll wait to move the OpLoopMerge until the end of the regular
+  // inlining logic, and only if necessary.
+  bool caller_is_loop_header = call_block_itr->GetLoopMergeInst() != nullptr;
+
+  // Single-trip loop continue block
+  std::unique_ptr<BasicBlock> single_trip_loop_cont_blk;
+
   Function* calleeFn = id2function_[call_inst_itr->GetSingleWordOperand(
       kSpvFunctionCallFunctionId)];
 
-  // Check for multiple returns in the callee.
-  auto fi = early_return_funcs_.find(calleeFn->result_id());
-  const bool earlyReturn = fi != early_return_funcs_.end();
-
   // Map parameters to actual arguments.
   MapParams(calleeFn, call_inst_itr, &callee2caller);
 
@@ -266,6 +487,31 @@ bool InlinePass::GenInlineCode(
     return false;
   }
 
+  // First block needs to use label of original block
+  // but map callee label in case of phi reference.
+  uint32_t entry_blk_label_id = calleeFn->begin()->GetLabelInst()->result_id();
+  callee2caller[entry_blk_label_id] = call_block_itr->id();
+  std::unique_ptr<BasicBlock> new_blk_ptr =
+      MakeUnique<BasicBlock>(NewLabel(call_block_itr->id()));
+
+  // Move instructions of original caller block up to call instruction.
+  MoveInstsBeforeEntryBlock(&preCallSB, new_blk_ptr.get(), call_inst_itr,
+                            call_block_itr);
+
+  if (caller_is_loop_header &&
+      (*(calleeFn->begin())).GetMergeInst() != nullptr) {
+    // We can't place both the caller's merge instruction and
+    // another merge instruction in the same block.  So split the
+    // calling block. Insert an unconditional branch to a new guard
+    // block.  Later, once we know the ID of the last block,  we
+    // will move the caller's OpLoopMerge from the last generated
+    // block into the first block. We also wait to avoid
+    // invalidating various iterators.
+    new_blk_ptr = AddGuardBlock(new_blocks, &callee2caller,
+                                std::move(new_blk_ptr), entry_blk_label_id);
+    if (new_blk_ptr == nullptr) return false;
+  }
+
   // Create return var if needed.
   const uint32_t calleeTypeId = calleeFn->type_id();
   uint32_t returnVarId = 0;
@@ -277,340 +523,49 @@ bool InlinePass::GenInlineCode(
     }
   }
 
-  // Create set of callee result ids. Used to detect forward references
-  std::unordered_set<uint32_t> callee_result_ids;
-  calleeFn->ForEachInst([&callee_result_ids](const Instruction* cpi) {
+  calleeFn->WhileEachInst([&callee2caller, this](const Instruction* cpi) {
+    // Create set of callee result ids. Used to detect forward references
     const uint32_t rid = cpi->result_id();
-    if (rid != 0) callee_result_ids.insert(rid);
+    if (rid != 0 && callee2caller.find(rid) == callee2caller.end()) {
+      const uint32_t nid = context()->TakeNextId();
+      if (nid == 0) return false;
+      callee2caller[rid] = nid;
+    }
+    return true;
   });
 
-  // If the caller is a loop header and the callee has multiple blocks, then the
-  // normal inlining logic will place the OpLoopMerge in the last of several
-  // blocks in the loop.  Instead, it should be placed at the end of the first
-  // block.  We'll wait to move the OpLoopMerge until the end of the regular
-  // inlining logic, and only if necessary.
-  bool caller_is_loop_header = false;
-  if (call_block_itr->GetLoopMergeInst()) {
-    caller_is_loop_header = true;
+  // Inline the entry block of the callee function.
+  if (!InlineEntryBlock(callee2caller, &new_blk_ptr, calleeFn->begin())) {
+    return false;
   }
 
-  bool callee_begins_with_structured_header =
-      (*(calleeFn->begin())).GetMergeInst() != nullptr;
+  // Inline blocks of the callee function other than the entry block.
+  new_blk_ptr = InlineBasicBlocks(new_blocks, callee2caller,
+                                  std::move(new_blk_ptr), calleeFn);
+  if (new_blk_ptr == nullptr) return false;
 
-  // Clone and map callee code. Copy caller block code to beginning of
-  // first block and end of last block.
-  bool prevInstWasReturn = false;
-  uint32_t singleTripLoopHeaderId = 0;
-  uint32_t singleTripLoopContinueId = 0;
-  uint32_t returnLabelId = 0;
-  bool multiBlocks = false;
-  // new_blk_ptr is a new basic block in the caller.  New instructions are
-  // written to it.  It is created when we encounter the OpLabel
-  // of the first callee block.  It is appended to new_blocks only when
-  // it is complete.
-  std::unique_ptr<BasicBlock> new_blk_ptr;
-  bool successful = calleeFn->WhileEachInst(
-      [&new_blocks, &callee2caller, &call_block_itr, &call_inst_itr,
-       &new_blk_ptr, &prevInstWasReturn, &returnLabelId, &returnVarId,
-       caller_is_loop_header, callee_begins_with_structured_header,
-       &calleeTypeId, &multiBlocks, &postCallSB, &preCallSB, earlyReturn,
-       &singleTripLoopHeaderId, &singleTripLoopContinueId, &callee_result_ids,
-       this](const Instruction* cpi) {
-        switch (cpi->opcode()) {
-          case SpvOpFunction:
-          case SpvOpFunctionParameter:
-            // Already processed
-            break;
-          case SpvOpVariable:
-            if (cpi->NumInOperands() == 2) {
-              assert(callee2caller.count(cpi->result_id()) &&
-                     "Expected the variable to have already been mapped.");
-              uint32_t new_var_id = callee2caller.at(cpi->result_id());
-
-              // The initializer must be a constant or global value.  No mapped
-              // should be used.
-              uint32_t val_id = cpi->GetSingleWordInOperand(1);
-              AddStore(new_var_id, val_id, &new_blk_ptr);
-            }
-            break;
-          case SpvOpUnreachable:
-          case SpvOpKill: {
-            // Generate a return label so that we split the block with the
-            // function call. Copy the terminator into the new block.
-            if (returnLabelId == 0) {
-              returnLabelId = context()->TakeNextId();
-              if (returnLabelId == 0) {
-                return false;
-              }
-            }
-            std::unique_ptr<Instruction> terminator(
-                new Instruction(context(), cpi->opcode(), 0, 0, {}));
-            new_blk_ptr->AddInstruction(std::move(terminator));
-            break;
-          }
-          case SpvOpLabel: {
-            // If previous instruction was early return, insert branch
-            // instruction to return block.
-            if (prevInstWasReturn) {
-              if (returnLabelId == 0) {
-                returnLabelId = context()->TakeNextId();
-                if (returnLabelId == 0) {
-                  return false;
-                }
-              }
-              AddBranch(returnLabelId, &new_blk_ptr);
-              prevInstWasReturn = false;
-            }
-            // Finish current block (if it exists) and get label for next block.
-            uint32_t labelId;
-            bool firstBlock = false;
-            if (new_blk_ptr != nullptr) {
-              new_blocks->push_back(std::move(new_blk_ptr));
-              // If result id is already mapped, use it, otherwise get a new
-              // one.
-              const uint32_t rid = cpi->result_id();
-              const auto mapItr = callee2caller.find(rid);
-              labelId = (mapItr != callee2caller.end())
-                            ? mapItr->second
-                            : context()->TakeNextId();
-              if (labelId == 0) {
-                return false;
-              }
-            } else {
-              // First block needs to use label of original block
-              // but map callee label in case of phi reference.
-              labelId = call_block_itr->id();
-              callee2caller[cpi->result_id()] = labelId;
-              firstBlock = true;
-            }
-            // Create first/next block.
-            new_blk_ptr = MakeUnique<BasicBlock>(NewLabel(labelId));
-            if (firstBlock) {
-              // Copy contents of original caller block up to call instruction.
-              for (auto cii = call_block_itr->begin(); cii != call_inst_itr;
-                   cii = call_block_itr->begin()) {
-                Instruction* inst = &*cii;
-                inst->RemoveFromList();
-                std::unique_ptr<Instruction> cp_inst(inst);
-                // Remember same-block ops for possible regeneration.
-                if (IsSameBlockOp(&*cp_inst)) {
-                  auto* sb_inst_ptr = cp_inst.get();
-                  preCallSB[cp_inst->result_id()] = sb_inst_ptr;
-                }
-                new_blk_ptr->AddInstruction(std::move(cp_inst));
-              }
-              if (caller_is_loop_header &&
-                  callee_begins_with_structured_header) {
-                // We can't place both the caller's merge instruction and
-                // another merge instruction in the same block.  So split the
-                // calling block. Insert an unconditional branch to a new guard
-                // block.  Later, once we know the ID of the last block,  we
-                // will move the caller's OpLoopMerge from the last generated
-                // block into the first block. We also wait to avoid
-                // invalidating various iterators.
-                const auto guard_block_id = context()->TakeNextId();
-                if (guard_block_id == 0) {
-                  return false;
-                }
-                AddBranch(guard_block_id, &new_blk_ptr);
-                new_blocks->push_back(std::move(new_blk_ptr));
-                // Start the next block.
-                new_blk_ptr = MakeUnique<BasicBlock>(NewLabel(guard_block_id));
-                // Reset the mapping of the callee's entry block to point to
-                // the guard block.  Do this so we can fix up phis later on to
-                // satisfy dominance.
-                callee2caller[cpi->result_id()] = guard_block_id;
-              }
-              // If callee has early return, insert a header block for
-              // single-trip loop that will encompass callee code.  Start
-              // postheader block.
-              //
-              // Note: Consider the following combination:
-              //  - the caller is a single block loop
-              //  - the callee does not begin with a structure header
-              //  - the callee has multiple returns.
-              // We still need to split the caller block and insert a guard
-              // block. But we only need to do it once. We haven't done it yet,
-              // but the single-trip loop header will serve the same purpose.
-              if (earlyReturn) {
-                singleTripLoopHeaderId = context()->TakeNextId();
-                if (singleTripLoopHeaderId == 0) {
-                  return false;
-                }
-                AddBranch(singleTripLoopHeaderId, &new_blk_ptr);
-                new_blocks->push_back(std::move(new_blk_ptr));
-                new_blk_ptr =
-                    MakeUnique<BasicBlock>(NewLabel(singleTripLoopHeaderId));
-                returnLabelId = context()->TakeNextId();
-                singleTripLoopContinueId = context()->TakeNextId();
-                if (returnLabelId == 0 || singleTripLoopContinueId == 0) {
-                  return false;
-                }
-                AddLoopMerge(returnLabelId, singleTripLoopContinueId,
-                             &new_blk_ptr);
-                uint32_t postHeaderId = context()->TakeNextId();
-                if (postHeaderId == 0) {
-                  return false;
-                }
-                AddBranch(postHeaderId, &new_blk_ptr);
-                new_blocks->push_back(std::move(new_blk_ptr));
-                new_blk_ptr = MakeUnique<BasicBlock>(NewLabel(postHeaderId));
-                multiBlocks = true;
-                // Reset the mapping of the callee's entry block to point to
-                // the post-header block.  Do this so we can fix up phis later
-                // on to satisfy dominance.
-                callee2caller[cpi->result_id()] = postHeaderId;
-              }
-            } else {
-              multiBlocks = true;
-            }
-          } break;
-          case SpvOpReturnValue: {
-            // Store return value to return variable.
-            assert(returnVarId != 0);
-            uint32_t valId = cpi->GetInOperand(kSpvReturnValueId).words[0];
-            const auto mapItr = callee2caller.find(valId);
-            if (mapItr != callee2caller.end()) {
-              valId = mapItr->second;
-            }
-            AddStore(returnVarId, valId, &new_blk_ptr);
-
-            // Remember we saw a return; if followed by a label, will need to
-            // insert branch.
-            prevInstWasReturn = true;
-          } break;
-          case SpvOpReturn: {
-            // Remember we saw a return; if followed by a label, will need to
-            // insert branch.
-            prevInstWasReturn = true;
-          } break;
-          case SpvOpFunctionEnd: {
-            // If there was an early return, we generated a return label id
-            // for it.  Now we have to generate the return block with that Id.
-            if (returnLabelId != 0) {
-              // If previous instruction was return, insert branch instruction
-              // to return block.
-              if (prevInstWasReturn) AddBranch(returnLabelId, &new_blk_ptr);
-              if (earlyReturn) {
-                // If we generated a loop header for the single-trip loop
-                // to accommodate early returns, insert the continue
-                // target block now, with a false branch back to the loop
-                // header.
-                new_blocks->push_back(std::move(new_blk_ptr));
-                new_blk_ptr =
-                    MakeUnique<BasicBlock>(NewLabel(singleTripLoopContinueId));
-                uint32_t false_id = GetFalseId();
-                if (false_id == 0) {
-                  return false;
-                }
-                AddBranchCond(false_id, singleTripLoopHeaderId, returnLabelId,
-                              &new_blk_ptr);
-              }
-              // Generate the return block.
-              new_blocks->push_back(std::move(new_blk_ptr));
-              new_blk_ptr = MakeUnique<BasicBlock>(NewLabel(returnLabelId));
-              multiBlocks = true;
-            }
-            // Load return value into result id of call, if it exists.
-            if (returnVarId != 0) {
-              const uint32_t resId = call_inst_itr->result_id();
-              assert(resId != 0);
-              AddLoad(calleeTypeId, resId, returnVarId, &new_blk_ptr);
-            }
-            // Copy remaining instructions from caller block.
-            for (Instruction* inst = call_inst_itr->NextNode(); inst;
-                 inst = call_inst_itr->NextNode()) {
-              inst->RemoveFromList();
-              std::unique_ptr<Instruction> cp_inst(inst);
-              // If multiple blocks generated, regenerate any same-block
-              // instruction that has not been seen in this last block.
-              if (multiBlocks) {
-                if (!CloneSameBlockOps(&cp_inst, &postCallSB, &preCallSB,
-                                       &new_blk_ptr)) {
-                  return false;
-                }
-
-                // Remember same-block ops in this block.
-                if (IsSameBlockOp(&*cp_inst)) {
-                  const uint32_t rid = cp_inst->result_id();
-                  postCallSB[rid] = rid;
-                }
-              }
-              new_blk_ptr->AddInstruction(std::move(cp_inst));
-            }
-            // Finalize inline code.
-            new_blocks->push_back(std::move(new_blk_ptr));
-          } break;
-          default: {
-            // Copy callee instruction and remap all input Ids.
-            std::unique_ptr<Instruction> cp_inst(cpi->Clone(context()));
-            bool succeeded = cp_inst->WhileEachInId(
-                [&callee2caller, &callee_result_ids, this](uint32_t* iid) {
-                  const auto mapItr = callee2caller.find(*iid);
-                  if (mapItr != callee2caller.end()) {
-                    *iid = mapItr->second;
-                  } else if (callee_result_ids.find(*iid) !=
-                             callee_result_ids.end()) {
-                    // Forward reference. Allocate a new id, map it,
-                    // use it and check for it when remapping result ids
-                    const uint32_t nid = context()->TakeNextId();
-                    if (nid == 0) {
-                      return false;
-                    }
-                    callee2caller[*iid] = nid;
-                    *iid = nid;
-                  }
-                  return true;
-                });
-            if (!succeeded) {
-              return false;
-            }
-            // If result id is non-zero, remap it. If already mapped, use mapped
-            // value, else use next id.
-            const uint32_t rid = cp_inst->result_id();
-            if (rid != 0) {
-              const auto mapItr = callee2caller.find(rid);
-              uint32_t nid;
-              if (mapItr != callee2caller.end()) {
-                nid = mapItr->second;
-              } else {
-                nid = context()->TakeNextId();
-                if (nid == 0) {
-                  return false;
-                }
-                callee2caller[rid] = nid;
-              }
-              cp_inst->SetResultId(nid);
-              get_decoration_mgr()->CloneDecorations(rid, nid);
-            }
-            new_blk_ptr->AddInstruction(std::move(cp_inst));
-          } break;
-        }
-        return true;
-      });
+  new_blk_ptr =
+      InlineReturn(callee2caller, new_blocks, std::move(new_blk_ptr), calleeFn,
+                   &*(calleeFn->tail()->tail()), returnVarId);
 
-  if (!successful) {
-    return false;
+  // Load return value into result id of call, if it exists.
+  if (returnVarId != 0) {
+    const uint32_t resId = call_inst_itr->result_id();
+    assert(resId != 0);
+    AddLoad(calleeTypeId, resId, returnVarId, &new_blk_ptr);
   }
 
-  if (caller_is_loop_header && (new_blocks->size() > 1)) {
-    // Move the OpLoopMerge from the last block back to the first, where
-    // it belongs.
-    auto& first = new_blocks->front();
-    auto& last = new_blocks->back();
-    assert(first != last);
-
-    // Insert a modified copy of the loop merge into the first block.
-    auto loop_merge_itr = last->tail();
-    --loop_merge_itr;
-    assert(loop_merge_itr->opcode() == SpvOpLoopMerge);
-    std::unique_ptr<Instruction> cp_inst(loop_merge_itr->Clone(context()));
-    first->tail().InsertBefore(std::move(cp_inst));
-
-    // Remove the loop merge from the last block.
-    loop_merge_itr->RemoveFromList();
-    delete &*loop_merge_itr;
-  }
+  // Move instructions of original caller block after call instruction.
+  if (!MoveCallerInstsAfterFunctionCall(&preCallSB, &postCallSB, &new_blk_ptr,
+                                        call_inst_itr,
+                                        calleeFn->begin() != calleeFn->end()))
+    return false;
+
+  // Finalize inline code.
+  new_blocks->push_back(std::move(new_blk_ptr));
+
+  if (caller_is_loop_header && (new_blocks->size() > 1))
+    MoveLoopMergeInstToFirstBlock(new_blocks);
 
   // Update block map given replacement blocks.
   for (auto& blk : *new_blocks) {
@@ -624,7 +579,21 @@ bool InlinePass::IsInlinableFunctionCall(const Instruction* inst) {
   const uint32_t calleeFnId =
       inst->GetSingleWordOperand(kSpvFunctionCallFunctionId);
   const auto ci = inlinable_.find(calleeFnId);
-  return ci != inlinable_.cend();
+  if (ci == inlinable_.cend()) return false;
+
+  if (early_return_funcs_.find(calleeFnId) != early_return_funcs_.end()) {
+    // We rely on the merge-return pass to handle the early return case
+    // in advance.
+    std::string message =
+        "The function '" + id2function_[calleeFnId]->DefInst().PrettyPrint() +
+        "' could not be inlined because the return instruction "
+        "is not at the end of the function. This could be fixed by "
+        "running merge-return before inlining.";
+    consumer()(SPV_MSG_WARNING, "", {0, 0, 0}, message.c_str());
+    return false;
+  }
+
+  return true;
 }
 
 void InlinePass::UpdateSucceedingPhis(
@@ -645,26 +614,6 @@ void InlinePass::UpdateSucceedingPhis(
       });
 }
 
-bool InlinePass::HasNoReturnInStructuredConstruct(Function* func) {
-  // If control not structured, do not do loop/return analysis
-  // TODO: Analyze returns in non-structured control flow
-  if (!context()->get_feature_mgr()->HasCapability(SpvCapabilityShader))
-    return false;
-  const auto structured_analysis = context()->GetStructuredCFGAnalysis();
-  // Search for returns in structured construct.
-  bool return_in_construct = false;
-  for (auto& blk : *func) {
-    auto terminal_ii = blk.cend();
-    --terminal_ii;
-    if (spvOpcodeIsReturn(terminal_ii->opcode()) &&
-        structured_analysis->ContainingConstruct(blk.id()) != 0) {
-      return_in_construct = true;
-      break;
-    }
-  }
-  return !return_in_construct;
-}
-
 bool InlinePass::HasNoReturnInLoop(Function* func) {
   // If control not structured, do not do loop/return analysis
   // TODO: Analyze returns in non-structured control flow
@@ -686,10 +635,18 @@ bool InlinePass::HasNoReturnInLoop(Function* func) {
 }
 
 void InlinePass::AnalyzeReturns(Function* func) {
+  // Analyze functions without a return in loop.
   if (HasNoReturnInLoop(func)) {
     no_return_in_loop_.insert(func->result_id());
-    if (!HasNoReturnInStructuredConstruct(func))
+  }
+  // Analyze functions with a return before its tail basic block.
+  for (auto& blk : *func) {
+    auto terminal_ii = blk.cend();
+    --terminal_ii;
+    if (spvOpcodeIsReturn(terminal_ii->opcode()) && &blk != func->tail()) {
       early_return_funcs_.insert(func->result_id());
+      break;
+    }
   }
 }
 

+ 58 - 4
3rdparty/spirv-tools/source/opt/inline_pass.h

@@ -124,10 +124,6 @@ class InlinePass : public Pass {
   // Return true if |inst| is a function call that can be inlined.
   bool IsInlinableFunctionCall(const Instruction* inst);
 
-  // Return true if |func| does not have a return that is
-  // nested in a structured if, switch or loop.
-  bool HasNoReturnInStructuredConstruct(Function* func);
-
   // Return true if |func| has no return in a loop. The current analysis
   // requires structured control flow, so return false if control flow not
   // structured ie. module is not a shader.
@@ -171,6 +167,64 @@ class InlinePass : public Pass {
   // Set of functions that are originally called directly or indirectly from a
   // continue construct.
   std::unordered_set<uint32_t> funcs_called_from_continue_;
+
+ private:
+  // Moves instructions of the caller function up to the call instruction
+  // to |new_blk_ptr|.
+  void MoveInstsBeforeEntryBlock(
+      std::unordered_map<uint32_t, Instruction*>* preCallSB,
+      BasicBlock* new_blk_ptr, BasicBlock::iterator call_inst_itr,
+      UptrVectorIterator<BasicBlock> call_block_itr);
+
+  // Returns a new guard block after adding a branch to the end of
+  // |new_blocks|.
+  std::unique_ptr<BasicBlock> AddGuardBlock(
+      std::vector<std::unique_ptr<BasicBlock>>* new_blocks,
+      std::unordered_map<uint32_t, uint32_t>* callee2caller,
+      std::unique_ptr<BasicBlock> new_blk_ptr, uint32_t entry_blk_label_id);
+
+  // Add store instructions for initializers of variables.
+  InstructionList::iterator AddStoresForVariableInitializers(
+      const std::unordered_map<uint32_t, uint32_t>& callee2caller,
+      std::unique_ptr<BasicBlock>* new_blk_ptr,
+      UptrVectorIterator<BasicBlock> callee_block_itr);
+
+  // Inlines a single instruction of the callee function.
+  bool InlineInstructionInBB(
+      const std::unordered_map<uint32_t, uint32_t>& callee2caller,
+      BasicBlock* new_blk_ptr, const Instruction* inst);
+
+  // Inlines the return instruction of the callee function.
+  std::unique_ptr<BasicBlock> InlineReturn(
+      const std::unordered_map<uint32_t, uint32_t>& callee2caller,
+      std::vector<std::unique_ptr<BasicBlock>>* new_blocks,
+      std::unique_ptr<BasicBlock> new_blk_ptr, Function* calleeFn,
+      const Instruction* inst, uint32_t returnVarId);
+
+  // Inlines the entry block of the callee function.
+  bool InlineEntryBlock(
+      const std::unordered_map<uint32_t, uint32_t>& callee2caller,
+      std::unique_ptr<BasicBlock>* new_blk_ptr,
+      UptrVectorIterator<BasicBlock> callee_first_block);
+
+  // Inlines basic blocks of the callee function other than the entry basic
+  // block.
+  std::unique_ptr<BasicBlock> InlineBasicBlocks(
+      std::vector<std::unique_ptr<BasicBlock>>* new_blocks,
+      const std::unordered_map<uint32_t, uint32_t>& callee2caller,
+      std::unique_ptr<BasicBlock> new_blk_ptr, Function* calleeFn);
+
+  // Moves instructions of the caller function after the call instruction
+  // to |new_blk_ptr|.
+  bool MoveCallerInstsAfterFunctionCall(
+      std::unordered_map<uint32_t, Instruction*>* preCallSB,
+      std::unordered_map<uint32_t, uint32_t>* postCallSB,
+      std::unique_ptr<BasicBlock>* new_blk_ptr,
+      BasicBlock::iterator call_inst_itr, bool multiBlocks);
+
+  // Move the OpLoopMerge from the last block back to the first.
+  void MoveLoopMergeInstToFirstBlock(
+      std::vector<std::unique_ptr<BasicBlock>>* new_blocks);
 };
 
 }  // namespace opt

+ 13 - 0
3rdparty/spirv-tools/source/opt/instruction.cpp

@@ -38,6 +38,10 @@ const uint32_t kExtInstInstructionInIdx = 1;
 const uint32_t kDebugScopeNumWords = 7;
 const uint32_t kDebugScopeNumWordsWithoutInlinedAt = 6;
 const uint32_t kDebugNoScopeNumWords = 5;
+
+// Number of operands of an OpBranchConditional instruction
+// with weights.
+const uint32_t kOpBranchConditionalWithWeightsNumOperands = 5;
 }  // namespace
 
 Instruction::Instruction(IRContext* c)
@@ -166,6 +170,15 @@ uint32_t Instruction::NumInOperandWords() const {
   return size;
 }
 
+bool Instruction::HasBranchWeights() const {
+  if (opcode_ == SpvOpBranchConditional &&
+      NumOperands() == kOpBranchConditionalWithWeightsNumOperands) {
+    return true;
+  }
+
+  return false;
+}
+
 void Instruction::ToBinaryWithoutAttachedDebugInsts(
     std::vector<uint32_t>* binary) const {
   const uint32_t num_words = 1 + NumOperandWords();

+ 14 - 0
3rdparty/spirv-tools/source/opt/instruction.h

@@ -291,6 +291,8 @@ class Instruction : public utils::IntrusiveNodeBase<Instruction> {
   // Sets DebugScope.
   inline void SetDebugScope(const DebugScope& scope);
   inline const DebugScope& GetDebugScope() const { return dbg_scope_; }
+  // Updates OpLine and DebugScope based on the information of |from|.
+  inline void UpdateDebugInfo(const Instruction* from);
   // Remove the |index|-th operand
   void RemoveOperand(uint32_t index) {
     operands_.erase(operands_.begin() + index);
@@ -364,6 +366,10 @@ class Instruction : public utils::IntrusiveNodeBase<Instruction> {
   inline bool WhileEachInOperand(
       const std::function<bool(const uint32_t*)>& f) const;
 
+  // Returns true if it's an OpBranchConditional instruction
+  // with branch weights.
+  bool HasBranchWeights() const;
+
   // Returns true if any operands can be labels
   inline bool HasLabels() const;
 
@@ -636,6 +642,14 @@ inline void Instruction::SetDebugScope(const DebugScope& scope) {
   }
 }
 
+inline void Instruction::UpdateDebugInfo(const Instruction* from) {
+  if (from == nullptr) return;
+  clear_dbg_line_insts();
+  if (!from->dbg_line_insts().empty())
+    dbg_line_insts().push_back(from->dbg_line_insts()[0]);
+  SetDebugScope(from->GetDebugScope());
+}
+
 inline void Instruction::SetResultType(uint32_t ty_id) {
   // TODO(dsinclair): Allow setting a type id if there wasn't one
   // previously. Need to make room in the operands_ array to place the result,

+ 5 - 2
3rdparty/spirv-tools/source/opt/merge_return_pass.cpp

@@ -39,8 +39,11 @@ Pass::Status MergeReturnPass::Process() {
       if (!is_shader || return_blocks.size() == 0) {
         return false;
       }
-      if (context()->GetStructuredCFGAnalysis()->ContainingConstruct(
-              return_blocks[0]->id()) == 0) {
+      bool isInConstruct =
+          context()->GetStructuredCFGAnalysis()->ContainingConstruct(
+              return_blocks[0]->id()) != 0;
+      bool endsWithReturn = return_blocks[0] == function->tail();
+      if (!isInConstruct && endsWithReturn) {
         return false;
       }
     }

+ 4 - 1
3rdparty/spirv-tools/source/opt/wrap_opkill.cpp

@@ -59,9 +59,12 @@ bool WrapOpKill::ReplaceWithFunctionCall(Instruction* inst) {
   if (func_id == 0) {
     return false;
   }
-  if (ir_builder.AddFunctionCall(GetVoidTypeId(), func_id, {}) == nullptr) {
+  Instruction* call_inst =
+      ir_builder.AddFunctionCall(GetVoidTypeId(), func_id, {});
+  if (call_inst == nullptr) {
     return false;
   }
+  call_inst->UpdateDebugInfo(inst);
 
   Instruction* return_inst = nullptr;
   uint32_t return_type_id = GetOwningFunctionsReturnType(inst);

+ 7 - 3
3rdparty/spirv-tools/source/reduce/CMakeLists.txt

@@ -26,12 +26,14 @@ set(SPIRV_TOOLS_REDUCE_SOURCES
         reduction_util.h
         remove_block_reduction_opportunity.h
         remove_block_reduction_opportunity_finder.h
-        remove_instruction_reduction_opportunity.h
         remove_function_reduction_opportunity.h
         remove_function_reduction_opportunity_finder.h
+        remove_instruction_reduction_opportunity.h
         remove_selection_reduction_opportunity.h
         remove_selection_reduction_opportunity_finder.h
-        remove_unreferenced_instruction_reduction_opportunity_finder.h
+        remove_struct_member_reduction_opportunity.h
+        remove_unused_instruction_reduction_opportunity_finder.h
+        remove_unused_struct_member_reduction_opportunity_finder.h
         structured_loop_to_selection_reduction_opportunity.h
         structured_loop_to_selection_reduction_opportunity_finder.h
         conditional_branch_to_simple_conditional_branch_opportunity_finder.h
@@ -57,7 +59,9 @@ set(SPIRV_TOOLS_REDUCE_SOURCES
         remove_instruction_reduction_opportunity.cpp
         remove_selection_reduction_opportunity.cpp
         remove_selection_reduction_opportunity_finder.cpp
-        remove_unreferenced_instruction_reduction_opportunity_finder.cpp
+        remove_struct_member_reduction_opportunity.cpp
+        remove_unused_instruction_reduction_opportunity_finder.cpp
+        remove_unused_struct_member_reduction_opportunity_finder.cpp
         structured_loop_to_selection_reduction_opportunity.cpp
         structured_loop_to_selection_reduction_opportunity_finder.cpp
         conditional_branch_to_simple_conditional_branch_opportunity_finder.cpp

+ 1 - 1
3rdparty/spirv-tools/source/reduce/pch_source_reduce.h

@@ -20,4 +20,4 @@
 #include "source/reduce/reduction_opportunity.h"
 #include "source/reduce/reduction_pass.h"
 #include "source/reduce/remove_instruction_reduction_opportunity.h"
-#include "source/reduce/remove_unreferenced_instruction_reduction_opportunity_finder.h"
+#include "source/reduce/remove_unused_instruction_reduction_opportunity_finder.h"

+ 8 - 5
3rdparty/spirv-tools/source/reduce/reducer.cpp

@@ -25,7 +25,8 @@
 #include "source/reduce/remove_block_reduction_opportunity_finder.h"
 #include "source/reduce/remove_function_reduction_opportunity_finder.h"
 #include "source/reduce/remove_selection_reduction_opportunity_finder.h"
-#include "source/reduce/remove_unreferenced_instruction_reduction_opportunity_finder.h"
+#include "source/reduce/remove_unused_instruction_reduction_opportunity_finder.h"
+#include "source/reduce/remove_unused_struct_member_reduction_opportunity_finder.h"
 #include "source/reduce/simple_conditional_branch_to_branch_opportunity_finder.h"
 #include "source/reduce/structured_loop_to_selection_reduction_opportunity_finder.h"
 #include "source/spirv_reducer_options.h"
@@ -103,8 +104,8 @@ Reducer::ReductionResultStatus Reducer::Run(
 
 void Reducer::AddDefaultReductionPasses() {
   AddReductionPass(
-      spvtools::MakeUnique<
-          RemoveUnreferencedInstructionReductionOpportunityFinder>(false));
+      spvtools::MakeUnique<RemoveUnusedInstructionReductionOpportunityFinder>(
+          false));
   AddReductionPass(
       spvtools::MakeUnique<OperandToUndefReductionOpportunityFinder>());
   AddReductionPass(
@@ -126,12 +127,14 @@ void Reducer::AddDefaultReductionPasses() {
           ConditionalBranchToSimpleConditionalBranchOpportunityFinder>());
   AddReductionPass(
       spvtools::MakeUnique<SimpleConditionalBranchToBranchOpportunityFinder>());
+  AddReductionPass(spvtools::MakeUnique<
+                   RemoveUnusedStructMemberReductionOpportunityFinder>());
 
   // Cleanup passes.
 
   AddCleanupReductionPass(
-      spvtools::MakeUnique<
-          RemoveUnreferencedInstructionReductionOpportunityFinder>(true));
+      spvtools::MakeUnique<RemoveUnusedInstructionReductionOpportunityFinder>(
+          true));
 }
 
 void Reducer::AddReductionPass(

+ 12 - 0
3rdparty/spirv-tools/source/reduce/remove_instruction_reduction_opportunity.cpp

@@ -22,6 +22,18 @@ namespace reduce {
 bool RemoveInstructionReductionOpportunity::PreconditionHolds() { return true; }
 
 void RemoveInstructionReductionOpportunity::Apply() {
+  const uint32_t kNumEntryPointInOperandsBeforeInterfaceIds = 3;
+  for (auto& entry_point : inst_->context()->module()->entry_points()) {
+    opt::Instruction::OperandList new_entry_point_in_operands;
+    for (uint32_t index = 0; index < entry_point.NumInOperands(); index++) {
+      if (index >= kNumEntryPointInOperandsBeforeInterfaceIds &&
+          entry_point.GetSingleWordInOperand(index) == inst_->result_id()) {
+        continue;
+      }
+      new_entry_point_in_operands.push_back(entry_point.GetInOperand(index));
+    }
+    entry_point.SetInOperands(std::move(new_entry_point_in_operands));
+  }
   inst_->context()->KillInst(inst_);
 }
 

+ 208 - 0
3rdparty/spirv-tools/source/reduce/remove_struct_member_reduction_opportunity.cpp

@@ -0,0 +1,208 @@
+// Copyright (c) 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "source/reduce/remove_struct_member_reduction_opportunity.h"
+
+#include "source/opt/ir_context.h"
+
+namespace spvtools {
+namespace reduce {
+
+bool RemoveStructMemberReductionOpportunity::PreconditionHolds() {
+  return struct_type_->NumInOperands() == original_number_of_members_;
+}
+
+void RemoveStructMemberReductionOpportunity::Apply() {
+  std::set<opt::Instruction*> decorations_to_kill;
+
+  // We need to remove decorations that target the removed struct member, and
+  // adapt decorations that target later struct members by decrementing the
+  // member identifier.  We also need to adapt composite construction
+  // instructions so that no id is provided for the member being removed.
+  //
+  // To do this, we consider every use of the struct type.
+  struct_type_->context()->get_def_use_mgr()->ForEachUse(
+      struct_type_, [this, &decorations_to_kill](opt::Instruction* user,
+                                                 uint32_t /*operand_index*/) {
+        switch (user->opcode()) {
+          case SpvOpCompositeConstruct:
+          case SpvOpConstantComposite:
+            // This use is constructing a composite of the struct type, so we
+            // must remove the id that was provided for the member we are
+            // removing.
+            user->RemoveInOperand(member_index_);
+            break;
+          case SpvOpMemberDecorate:
+            // This use is decorating a member of the struct.
+            if (user->GetSingleWordInOperand(1) == member_index_) {
+              // The member we are removing is being decorated, so we record
+              // that we need to get rid of the decoration.
+              decorations_to_kill.insert(user);
+            } else if (user->GetSingleWordInOperand(1) > member_index_) {
+              // A member beyond the one we are removing is being decorated, so
+              // we adjust the index that identifies the member.
+              user->SetInOperand(1, {user->GetSingleWordInOperand(1) - 1});
+            }
+            break;
+          default:
+            break;
+        }
+      });
+
+  // Get rid of all the decorations that were found to target the member being
+  // removed.
+  for (auto decoration_to_kill : decorations_to_kill) {
+    decoration_to_kill->context()->KillInst(decoration_to_kill);
+  }
+
+  // We now look through all instructions that access composites via sequences
+  // of indices. Every time we find an index into the struct whose member is
+  // being removed, and if the member being accessed comes after the member
+  // being removed, we need to adjust the index accordingly.
+  //
+  // We go through every relevant instruction in every block of every function,
+  // and invoke a helper to adjust it.
+  auto context = struct_type_->context();
+  for (auto& function : *context->module()) {
+    for (auto& block : function) {
+      for (auto& inst : block) {
+        switch (inst.opcode()) {
+          case SpvOpAccessChain:
+          case SpvOpInBoundsAccessChain: {
+            // These access chain instructions take sequences of ids for
+            // indexing, starting from input operand 1.
+            auto composite_type_id =
+                context->get_def_use_mgr()
+                    ->GetDef(context->get_def_use_mgr()
+                                 ->GetDef(inst.GetSingleWordInOperand(0))
+                                 ->type_id())
+                    ->GetSingleWordInOperand(1);
+            AdjustAccessedIndices(composite_type_id, 1, false, context, &inst);
+          } break;
+          case SpvOpPtrAccessChain:
+          case SpvOpInBoundsPtrAccessChain: {
+            // These access chain instructions take sequences of ids for
+            // indexing, starting from input operand 2.
+            auto composite_type_id =
+                context->get_def_use_mgr()
+                    ->GetDef(context->get_def_use_mgr()
+                                 ->GetDef(inst.GetSingleWordInOperand(1))
+                                 ->type_id())
+                    ->GetSingleWordInOperand(1);
+            AdjustAccessedIndices(composite_type_id, 2, false, context, &inst);
+          } break;
+          case SpvOpCompositeExtract: {
+            // OpCompositeExtract uses literals for indexing, starting at input
+            // operand 1.
+            auto composite_type_id =
+                context->get_def_use_mgr()
+                    ->GetDef(inst.GetSingleWordInOperand(0))
+                    ->type_id();
+            AdjustAccessedIndices(composite_type_id, 1, true, context, &inst);
+          } break;
+          case SpvOpCompositeInsert: {
+            // OpCompositeInsert uses literals for indexing, starting at input
+            // operand 2.
+            auto composite_type_id =
+                context->get_def_use_mgr()
+                    ->GetDef(inst.GetSingleWordInOperand(1))
+                    ->type_id();
+            AdjustAccessedIndices(composite_type_id, 2, true, context, &inst);
+          } break;
+          default:
+            break;
+        }
+      }
+    }
+  }
+
+  // Remove the member from the struct type.
+  struct_type_->RemoveInOperand(member_index_);
+}
+
+void RemoveStructMemberReductionOpportunity::AdjustAccessedIndices(
+    uint32_t composite_type_id, uint32_t first_index_input_operand,
+    bool literal_indices, opt::IRContext* context,
+    opt::Instruction* composite_access_instruction) const {
+  // Walk the series of types that are encountered by following the
+  // instruction's sequence of indices. For all types except structs, this is
+  // routine: the type of the composite dictates what the next type will be
+  // regardless of the specific index value.
+  uint32_t next_type = composite_type_id;
+  for (uint32_t i = first_index_input_operand;
+       i < composite_access_instruction->NumInOperands(); i++) {
+    auto type_inst = context->get_def_use_mgr()->GetDef(next_type);
+    switch (type_inst->opcode()) {
+      case SpvOpTypeArray:
+      case SpvOpTypeMatrix:
+      case SpvOpTypeRuntimeArray:
+      case SpvOpTypeVector:
+        next_type = type_inst->GetSingleWordInOperand(0);
+        break;
+      case SpvOpTypeStruct: {
+        // Struct types are special becuase (a) we may need to adjust the index
+        // being used, if the struct type is the one from which we are removing
+        // a member, and (b) the type encountered by following the current index
+        // is dependent on the value of the index.
+
+        // Work out the member being accessed.  If literal indexing is used this
+        // is simple; otherwise we need to look up the id of the constant
+        // instruction being used as an index and get the value of the constant.
+        uint32_t index_operand =
+            composite_access_instruction->GetSingleWordInOperand(i);
+        uint32_t member = literal_indices ? index_operand
+                                          : context->get_def_use_mgr()
+                                                ->GetDef(index_operand)
+                                                ->GetSingleWordInOperand(0);
+
+        // The next type we will consider is obtained by looking up the struct
+        // type at |member|.
+        next_type = type_inst->GetSingleWordInOperand(member);
+
+        if (type_inst == struct_type_ && member > member_index_) {
+          // The struct type is the struct from which we are removing a member,
+          // and the member being accessed is beyond the member we are removing.
+          // We thus need to decrement the index by 1.
+          uint32_t new_in_operand;
+          if (literal_indices) {
+            // With literal indexing this is straightforward.
+            new_in_operand = member - 1;
+          } else {
+            // With id-based indexing this is more tricky: we need to find or
+            // create a constant instruction whose value is one less than
+            // |member|, and use the id of this constant as the replacement
+            // input operand.
+            auto constant_inst =
+                context->get_def_use_mgr()->GetDef(index_operand);
+            auto int_type = context->get_type_mgr()
+                                ->GetType(constant_inst->type_id())
+                                ->AsInteger();
+            auto new_index_constant =
+                opt::analysis::IntConstant(int_type, {member - 1});
+            new_in_operand = context->get_constant_mgr()
+                                 ->GetDefiningInstruction(&new_index_constant)
+                                 ->result_id();
+          }
+          composite_access_instruction->SetInOperand(i, {new_in_operand});
+        }
+      } break;
+      default:
+        assert(0 && "Unknown composite type.");
+        break;
+    }
+  }
+}
+
+}  // namespace reduce
+}  // namespace spvtools

+ 84 - 0
3rdparty/spirv-tools/source/reduce/remove_struct_member_reduction_opportunity.h

@@ -0,0 +1,84 @@
+// Copyright (c) 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SOURCE_REDUCE_REMOVE_STRUCT_MEMBER_REDUCTION_OPPORTUNITY_H_
+#define SOURCE_REDUCE_REMOVE_STRUCT_MEMBER_REDUCTION_OPPORTUNITY_H_
+
+#include "source/reduce/reduction_opportunity.h"
+
+#include "source/opt/instruction.h"
+
+namespace spvtools {
+namespace reduce {
+
+// An opportunity for removing a member from a struct type, adjusting all uses
+// of the struct accordingly.
+class RemoveStructMemberReductionOpportunity : public ReductionOpportunity {
+ public:
+  // Constructs a reduction opportunity from the struct type |struct_type|, for
+  // removal of member |member_index|.
+  RemoveStructMemberReductionOpportunity(opt::Instruction* struct_type,
+                                         uint32_t member_index)
+      : struct_type_(struct_type),
+        member_index_(member_index),
+        original_number_of_members_(struct_type->NumInOperands()) {}
+
+  // Opportunities to remove fields from a common struct type mutually
+  // invalidate each other.  We guard against this by requiring that the struct
+  // still has the number of members it had when the opportunity was created.
+  bool PreconditionHolds() override;
+
+ protected:
+  void Apply() override;
+
+ private:
+  // |composite_access_instruction| is an instruction that accesses a composite
+  // id using either a series of literal indices (e.g. in the case of
+  // OpCompositeInsert) or a series of index ids (e.g. in the case of
+  // OpAccessChain).
+  //
+  // This function adjusts the indices that are used by
+  // |composite_access_instruction| to that whenever an index is accessing a
+  // member of |struct_type_|, it is decremented if the member is beyond
+  // |member_index_|, to account for the removal of the |member_index_|-th
+  // member.
+  //
+  // |composite_type_id| is the id of the composite type that the series of
+  // indices is to be applied to.
+  //
+  // |first_index_input_operand| specifies the first input operand that is an
+  // index.
+  //
+  // |literal_indices| specifies whether indices are given as literals (true),
+  // or as ids (false).
+  //
+  // If id-based indexing is used, this function will add a constant for
+  // |member_index_| - 1 to the module if needed.
+  void AdjustAccessedIndices(
+      uint32_t composite_type_id, uint32_t first_index_input_operand,
+      bool literal_indices, opt::IRContext* context,
+      opt::Instruction* composite_access_instruction) const;
+
+  // The struct type from which a member is to be removed.
+  opt::Instruction* struct_type_;
+
+  uint32_t member_index_;
+
+  uint32_t original_number_of_members_;
+};
+
+}  // namespace reduce
+}  // namespace spvtools
+
+#endif  //   SOURCE_REDUCE_REMOVE_STRUCT_MEMBER_REDUCTION_OPPORTUNITY_H_

+ 59 - 42
3rdparty/spirv-tools/source/reduce/remove_unreferenced_instruction_reduction_opportunity_finder.cpp → 3rdparty/spirv-tools/source/reduce/remove_unused_instruction_reduction_opportunity_finder.cpp

@@ -12,7 +12,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-#include "source/reduce/remove_unreferenced_instruction_reduction_opportunity_finder.h"
+#include "source/reduce/remove_unused_instruction_reduction_opportunity_finder.h"
 
 #include "source/opcode.h"
 #include "source/opt/instruction.h"
@@ -21,14 +21,14 @@
 namespace spvtools {
 namespace reduce {
 
-RemoveUnreferencedInstructionReductionOpportunityFinder::
-    RemoveUnreferencedInstructionReductionOpportunityFinder(
+RemoveUnusedInstructionReductionOpportunityFinder::
+    RemoveUnusedInstructionReductionOpportunityFinder(
         bool remove_constants_and_undefs)
     : remove_constants_and_undefs_(remove_constants_and_undefs) {}
 
 std::vector<std::unique_ptr<ReductionOpportunity>>
-RemoveUnreferencedInstructionReductionOpportunityFinder::
-    GetAvailableOpportunities(opt::IRContext* context) const {
+RemoveUnusedInstructionReductionOpportunityFinder::GetAvailableOpportunities(
+    opt::IRContext* context) const {
   std::vector<std::unique_ptr<ReductionOpportunity>> result;
 
   for (auto& inst : context->module()->debugs1()) {
@@ -60,13 +60,14 @@ RemoveUnreferencedInstructionReductionOpportunityFinder::
   }
 
   for (auto& inst : context->module()->types_values()) {
-    if (context->get_def_use_mgr()->NumUsers(&inst) > 0) {
-      continue;
-    }
     if (!remove_constants_and_undefs_ &&
         spvOpcodeIsConstantOrUndef(inst.opcode())) {
       continue;
     }
+    if (!OnlyReferencedByIntimateDecorationOrEntryPointInterface(context,
+                                                                 inst)) {
+      continue;
+    }
     result.push_back(MakeUnique<RemoveInstructionReductionOpportunity>(&inst));
   }
 
@@ -74,38 +75,9 @@ RemoveUnreferencedInstructionReductionOpportunityFinder::
     if (context->get_def_use_mgr()->NumUsers(&inst) > 0) {
       continue;
     }
-
-    uint32_t decoration = SpvDecorationMax;
-    switch (inst.opcode()) {
-      case SpvOpDecorate:
-      case SpvOpDecorateId:
-      case SpvOpDecorateString:
-        decoration = inst.GetSingleWordInOperand(1u);
-        break;
-      case SpvOpMemberDecorate:
-      case SpvOpMemberDecorateString:
-        decoration = inst.GetSingleWordInOperand(2u);
-        break;
-      default:
-        break;
-    }
-
-    // We conservatively only remove specific decorations that we believe will
-    // not change the shader interface, will not make the shader invalid, will
-    // actually be found in practice, etc.
-
-    switch (decoration) {
-      case SpvDecorationRelaxedPrecision:
-      case SpvDecorationNoSignedWrap:
-      case SpvDecorationNoContraction:
-      case SpvDecorationNoUnsignedWrap:
-      case SpvDecorationUserSemantic:
-        break;
-      default:
-        // Give up.
-        continue;
+    if (!IsIndependentlyRemovableDecoration(inst)) {
+      continue;
     }
-
     result.push_back(MakeUnique<RemoveInstructionReductionOpportunity>(&inst));
   }
 
@@ -139,9 +111,54 @@ RemoveUnreferencedInstructionReductionOpportunityFinder::
   return result;
 }
 
-std::string RemoveUnreferencedInstructionReductionOpportunityFinder::GetName()
-    const {
-  return "RemoveUnreferencedInstructionReductionOpportunityFinder";
+std::string RemoveUnusedInstructionReductionOpportunityFinder::GetName() const {
+  return "RemoveUnusedInstructionReductionOpportunityFinder";
+}
+
+bool RemoveUnusedInstructionReductionOpportunityFinder::
+    OnlyReferencedByIntimateDecorationOrEntryPointInterface(
+        opt::IRContext* context, const opt::Instruction& inst) const {
+  return context->get_def_use_mgr()->WhileEachUse(
+      &inst, [this](opt::Instruction* user, uint32_t use_index) -> bool {
+        return (user->IsDecoration() &&
+                !IsIndependentlyRemovableDecoration(*user)) ||
+               (user->opcode() == SpvOpEntryPoint && use_index > 2);
+      });
+}
+
+bool RemoveUnusedInstructionReductionOpportunityFinder::
+    IsIndependentlyRemovableDecoration(const opt::Instruction& inst) const {
+  uint32_t decoration;
+  switch (inst.opcode()) {
+    case SpvOpDecorate:
+    case SpvOpDecorateId:
+    case SpvOpDecorateString:
+      decoration = inst.GetSingleWordInOperand(1u);
+      break;
+    case SpvOpMemberDecorate:
+    case SpvOpMemberDecorateString:
+      decoration = inst.GetSingleWordInOperand(2u);
+      break;
+    default:
+      // The instruction is not a decoration.  It is legitimate for this to be
+      // reached: it allows the method to be invoked on arbitrary instructions.
+      return false;
+  }
+
+  // We conservatively only remove specific decorations that we believe will
+  // not change the shader interface, will not make the shader invalid, will
+  // actually be found in practice, etc.
+
+  switch (decoration) {
+    case SpvDecorationRelaxedPrecision:
+    case SpvDecorationNoSignedWrap:
+    case SpvDecorationNoContraction:
+    case SpvDecorationNoUnsignedWrap:
+    case SpvDecorationUserSemantic:
+      return true;
+    default:
+      return false;
+  }
 }
 
 }  // namespace reduce

+ 19 - 6
3rdparty/spirv-tools/source/reduce/remove_unreferenced_instruction_reduction_opportunity_finder.h → 3rdparty/spirv-tools/source/reduce/remove_unused_instruction_reduction_opportunity_finder.h

@@ -21,17 +21,19 @@ namespace spvtools {
 namespace reduce {
 
 // A finder for opportunities to remove non-control-flow instructions in blocks
-// in cases where the instruction's id is not referenced.  As well as making the
-// module smaller, removing an instruction that references particular ids may
-// create opportunities for subsequently removing the instructions that
+// in cases where the instruction's id is either not referenced at all, or
+// referenced only in a trivial manner (for example, we regard a struct type as
+// unused if it is referenced only by struct layout decorations).  As well as
+// making the module smaller, removing an instruction that references particular
+// ids may create opportunities for subsequently removing the instructions that
 // generated those ids.
-class RemoveUnreferencedInstructionReductionOpportunityFinder
+class RemoveUnusedInstructionReductionOpportunityFinder
     : public ReductionOpportunityFinder {
  public:
-  explicit RemoveUnreferencedInstructionReductionOpportunityFinder(
+  explicit RemoveUnusedInstructionReductionOpportunityFinder(
       bool remove_constants_and_undefs);
 
-  ~RemoveUnreferencedInstructionReductionOpportunityFinder() override = default;
+  ~RemoveUnusedInstructionReductionOpportunityFinder() override = default;
 
   std::string GetName() const final;
 
@@ -39,6 +41,17 @@ class RemoveUnreferencedInstructionReductionOpportunityFinder
       opt::IRContext* context) const final;
 
  private:
+  // Returns true if and only if the only uses of |inst| are by decorations that
+  // relate intimately to the instruction (as opposed to decorations that could
+  // be removed independently), or by interface ids in OpEntryPoint.
+  bool OnlyReferencedByIntimateDecorationOrEntryPointInterface(
+      opt::IRContext* context, const opt::Instruction& inst) const;
+
+  // Returns true if and only if |inst| is a decoration instruction that can
+  // legitimately be removed on its own (rather than one that has to be removed
+  // simultaneously with other instructions).
+  bool IsIndependentlyRemovableDecoration(const opt::Instruction& inst) const;
+
   bool remove_constants_and_undefs_;
 };
 

+ 193 - 0
3rdparty/spirv-tools/source/reduce/remove_unused_struct_member_reduction_opportunity_finder.cpp

@@ -0,0 +1,193 @@
+// Copyright (c) 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "source/reduce/remove_unused_struct_member_reduction_opportunity_finder.h"
+
+#include <map>
+#include <set>
+
+#include "source/reduce/remove_struct_member_reduction_opportunity.h"
+
+namespace spvtools {
+namespace reduce {
+
+std::vector<std::unique_ptr<ReductionOpportunity>>
+RemoveUnusedStructMemberReductionOpportunityFinder::GetAvailableOpportunities(
+    opt::IRContext* context) const {
+  std::vector<std::unique_ptr<ReductionOpportunity>> result;
+
+  // We track those struct members that are never accessed.  We do this by
+  // associating a member index to all the structs that have this member index
+  // but do not use it.  This representation is designed to allow reduction
+  // opportunities to be provided in a useful manner, so that opportunities
+  // associated with the same struct are unlikely to be adjacent.
+  std::map<uint32_t, std::set<opt::Instruction*>> unused_member_to_structs;
+
+  // Consider every struct type in the module.
+  for (auto& type_or_value : context->types_values()) {
+    if (type_or_value.opcode() != SpvOpTypeStruct) {
+      continue;
+    }
+
+    // Initially, we assume that *every* member of the struct is unused.  We
+    // then refine this based on observed uses.
+    std::set<uint32_t> unused_members;
+    for (uint32_t i = 0; i < type_or_value.NumInOperands(); i++) {
+      unused_members.insert(i);
+    }
+
+    // A separate reduction pass deals with removal of names.  If a struct
+    // member is still named, we treat it as being used.
+    context->get_def_use_mgr()->ForEachUse(
+        &type_or_value,
+        [&unused_members](opt::Instruction* user, uint32_t /*operand_index*/) {
+          switch (user->opcode()) {
+            case SpvOpMemberName:
+              unused_members.erase(user->GetSingleWordInOperand(1));
+              break;
+            default:
+              break;
+          }
+        });
+
+    for (uint32_t member : unused_members) {
+      if (!unused_member_to_structs.count(member)) {
+        unused_member_to_structs.insert(
+            {member, std::set<opt::Instruction*>()});
+      }
+      unused_member_to_structs.at(member).insert(&type_or_value);
+    }
+  }
+
+  // We now go through every instruction that might index into a struct, and
+  // refine our tracking of which struct members are used based on the struct
+  // indexing we observe.  We cannot just go through all uses of a struct type
+  // because the type is not necessarily even referenced, e.g. when walking
+  // arrays of structs.
+  for (auto& function : *context->module()) {
+    for (auto& block : function) {
+      for (auto& inst : block) {
+        switch (inst.opcode()) {
+          // For each indexing operation we observe, we invoke a helper to
+          // remove from our map those struct indices that are found to be used.
+          // The way the helper is invoked depends on whether the instruction
+          // uses literal or id indices, and the offset into the instruction's
+          // input operands from which index operands are provided.
+          case SpvOpAccessChain:
+          case SpvOpInBoundsAccessChain: {
+            auto composite_type_id =
+                context->get_def_use_mgr()
+                    ->GetDef(context->get_def_use_mgr()
+                                 ->GetDef(inst.GetSingleWordInOperand(0))
+                                 ->type_id())
+                    ->GetSingleWordInOperand(1);
+            MarkAccessedMembersAsUsed(context, composite_type_id, 1, false,
+                                      inst, &unused_member_to_structs);
+          } break;
+          case SpvOpPtrAccessChain:
+          case SpvOpInBoundsPtrAccessChain: {
+            auto composite_type_id =
+                context->get_def_use_mgr()
+                    ->GetDef(context->get_def_use_mgr()
+                                 ->GetDef(inst.GetSingleWordInOperand(1))
+                                 ->type_id())
+                    ->GetSingleWordInOperand(1);
+            MarkAccessedMembersAsUsed(context, composite_type_id, 2, false,
+                                      inst, &unused_member_to_structs);
+          } break;
+          case SpvOpCompositeExtract: {
+            auto composite_type_id =
+                context->get_def_use_mgr()
+                    ->GetDef(inst.GetSingleWordInOperand(0))
+                    ->type_id();
+            MarkAccessedMembersAsUsed(context, composite_type_id, 1, true, inst,
+                                      &unused_member_to_structs);
+          } break;
+          case SpvOpCompositeInsert: {
+            auto composite_type_id =
+                context->get_def_use_mgr()
+                    ->GetDef(inst.GetSingleWordInOperand(1))
+                    ->type_id();
+            MarkAccessedMembersAsUsed(context, composite_type_id, 2, true, inst,
+                                      &unused_member_to_structs);
+          } break;
+          default:
+            break;
+        }
+      }
+    }
+  }
+
+  // We now know those struct indices that are unsed, and we make a reduction
+  // opportunity for each of them. By mapping each relevant member index to the
+  // structs in which it is unsed, we will group all opportunities to remove
+  // member k of a struct (for some k) together.  This reduces the likelihood
+  // that opportunities to remove members from the same struct will be adjacent,
+  // which is good because such opportunities mutually disable one another.
+  for (auto& entry : unused_member_to_structs) {
+    for (auto struct_type : entry.second) {
+      result.push_back(MakeUnique<RemoveStructMemberReductionOpportunity>(
+          struct_type, entry.first));
+    }
+  }
+  return result;
+}
+
+void RemoveUnusedStructMemberReductionOpportunityFinder::
+    MarkAccessedMembersAsUsed(
+        opt::IRContext* context, uint32_t composite_type_id,
+        uint32_t first_index_in_operand, bool literal_indices,
+        const opt::Instruction& composite_access_instruction,
+        std::map<uint32_t, std::set<opt::Instruction*>>*
+            unused_member_to_structs) const {
+  uint32_t next_type = composite_type_id;
+  for (uint32_t i = first_index_in_operand;
+       i < composite_access_instruction.NumInOperands(); i++) {
+    auto type_inst = context->get_def_use_mgr()->GetDef(next_type);
+    switch (type_inst->opcode()) {
+      case SpvOpTypeArray:
+      case SpvOpTypeMatrix:
+      case SpvOpTypeRuntimeArray:
+      case SpvOpTypeVector:
+        next_type = type_inst->GetSingleWordInOperand(0);
+        break;
+      case SpvOpTypeStruct: {
+        uint32_t index_operand =
+            composite_access_instruction.GetSingleWordInOperand(i);
+        uint32_t member = literal_indices ? index_operand
+                                          : context->get_def_use_mgr()
+                                                ->GetDef(index_operand)
+                                                ->GetSingleWordInOperand(0);
+        // Remove the struct type from the struct types associated with this
+        // member index, but only if a set of struct types is known to be
+        // associated with this member index.
+        if (unused_member_to_structs->count(member)) {
+          unused_member_to_structs->at(member).erase(type_inst);
+        }
+        next_type = type_inst->GetSingleWordInOperand(member);
+      } break;
+      default:
+        assert(0 && "Unknown composite type.");
+        break;
+    }
+  }
+}
+
+std::string RemoveUnusedStructMemberReductionOpportunityFinder::GetName()
+    const {
+  return "RemoveUnusedStructMemberReductionOpportunityFinder";
+}
+
+}  // namespace reduce
+}  // namespace spvtools

+ 61 - 0
3rdparty/spirv-tools/source/reduce/remove_unused_struct_member_reduction_opportunity_finder.h

@@ -0,0 +1,61 @@
+// Copyright (c) 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SOURCE_REDUCE_REMOVE_UNUSED_STRUCT_MEMBER_REDUCTION_OPPORTUNITY_FINDER_H_
+#define SOURCE_REDUCE_REMOVE_UNUSED_STRUCT_MEMBER_REDUCTION_OPPORTUNITY_FINDER_H_
+
+#include "source/reduce/reduction_opportunity_finder.h"
+
+namespace spvtools {
+namespace reduce {
+
+// A finder for opportunities to remove struct members that are not explicitly
+// used by extract, insert or access chain instructions.
+class RemoveUnusedStructMemberReductionOpportunityFinder
+    : public ReductionOpportunityFinder {
+ public:
+  RemoveUnusedStructMemberReductionOpportunityFinder() = default;
+
+  ~RemoveUnusedStructMemberReductionOpportunityFinder() override = default;
+
+  std::string GetName() const final;
+
+  std::vector<std::unique_ptr<ReductionOpportunity>> GetAvailableOpportunities(
+      opt::IRContext* context) const final;
+
+ private:
+  // A helper method to update |unused_members_to_structs| by removing from it
+  // all struct member accesses that take place in
+  // |composite_access_instruction|.
+  //
+  // |composite_type_id| is the type of the root object indexed into by the
+  // instruction.
+  //
+  // |first_index_in_operand| provides indicates where in the input operands the
+  // sequence of indices begins.
+  //
+  // |literal_indices| indicates whether indices are literals (true) or ids
+  // (false).
+  void MarkAccessedMembersAsUsed(
+      opt::IRContext* context, uint32_t composite_type_id,
+      uint32_t first_index_in_operand, bool literal_indices,
+      const opt::Instruction& composite_access_instruction,
+      std::map<uint32_t, std::set<opt::Instruction*>>* unused_member_to_structs)
+      const;
+};
+
+}  // namespace reduce
+}  // namespace spvtools
+
+#endif  // SOURCE_REDUCE_REMOVE_UNUSED_STRUCT_MEMBER_REDUCTION_OPPORTUNITY_FINDER_H_

+ 24 - 2
3rdparty/spirv-tools/source/val/validate_scopes.cpp

@@ -230,11 +230,33 @@ spv_result_t ValidateMemoryScope(ValidationState_t& _, const Instruction* inst,
     if ((_.context()->target_env == SPV_ENV_VULKAN_1_1 ||
          _.context()->target_env == SPV_ENV_VULKAN_1_2) &&
         value != SpvScopeDevice && value != SpvScopeWorkgroup &&
-        value != SpvScopeSubgroup && value != SpvScopeInvocation) {
+        value != SpvScopeSubgroup && value != SpvScopeInvocation &&
+        value != SpvScopeShaderCallKHR) {
       return _.diag(SPV_ERROR_INVALID_DATA, inst)
              << spvOpcodeString(opcode)
              << ": in Vulkan 1.1 and 1.2 environment Memory Scope is limited "
-             << "to Device, Workgroup and Invocation";
+             << "to Device, Workgroup, Invocation, and ShaderCall";
+    }
+
+    if (value == SpvScopeShaderCallKHR) {
+      _.function(inst->function()->id())
+          ->RegisterExecutionModelLimitation(
+              [](SpvExecutionModel model, std::string* message) {
+                if (model != SpvExecutionModelRayGenerationKHR &&
+                    model != SpvExecutionModelIntersectionKHR &&
+                    model != SpvExecutionModelAnyHitKHR &&
+                    model != SpvExecutionModelClosestHitKHR &&
+                    model != SpvExecutionModelMissKHR &&
+                    model != SpvExecutionModelCallableKHR) {
+                  if (message) {
+                    *message =
+                        "ShaderCallKHR Memory Scope requires a ray tracing "
+                        "execution model";
+                  }
+                  return false;
+                }
+                return true;
+              });
     }
   }