amd_ext_to_khr.cpp 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947
  1. // Copyright (c) 2019 Google LLC.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #include "source/opt/amd_ext_to_khr.h"
  15. #include <set>
  16. #include <string>
  17. #include "ir_builder.h"
  18. #include "source/opt/ir_context.h"
  19. #include "spv-amd-shader-ballot.insts.inc"
  20. #include "type_manager.h"
  21. namespace spvtools {
  22. namespace opt {
  23. namespace {
  24. enum AmdShaderBallotExtOpcodes {
  25. AmdShaderBallotSwizzleInvocationsAMD = 1,
  26. AmdShaderBallotSwizzleInvocationsMaskedAMD = 2,
  27. AmdShaderBallotWriteInvocationAMD = 3,
  28. AmdShaderBallotMbcntAMD = 4
  29. };
  30. enum AmdShaderTrinaryMinMaxExtOpCodes {
  31. FMin3AMD = 1,
  32. UMin3AMD = 2,
  33. SMin3AMD = 3,
  34. FMax3AMD = 4,
  35. UMax3AMD = 5,
  36. SMax3AMD = 6,
  37. FMid3AMD = 7,
  38. UMid3AMD = 8,
  39. SMid3AMD = 9
  40. };
  41. enum AmdGcnShader { CubeFaceCoordAMD = 2, CubeFaceIndexAMD = 1, TimeAMD = 3 };
  42. analysis::Type* GetUIntType(IRContext* ctx) {
  43. analysis::Integer int_type(32, false);
  44. return ctx->get_type_mgr()->GetRegisteredType(&int_type);
  45. }
  46. bool NotImplementedYet(IRContext*, Instruction*,
  47. const std::vector<const analysis::Constant*>&) {
  48. assert(false && "Not implemented.");
  49. return false;
  50. }
  51. // Returns a folding rule that replaces |op(a,b,c)| by |op(op(a,b),c)|, where
  52. // |op| is either min or max. |opcode| is the binary opcode in the GLSLstd450
  53. // extended instruction set that corresponds to the trinary instruction being
  54. // replaced.
  55. template <GLSLstd450 opcode>
  56. bool ReplaceTrinaryMinMax(IRContext* ctx, Instruction* inst,
  57. const std::vector<const analysis::Constant*>&) {
  58. uint32_t glsl405_ext_inst_id =
  59. ctx->get_feature_mgr()->GetExtInstImportId_GLSLstd450();
  60. if (glsl405_ext_inst_id == 0) {
  61. ctx->AddExtInstImport("GLSL.std.450");
  62. glsl405_ext_inst_id =
  63. ctx->get_feature_mgr()->GetExtInstImportId_GLSLstd450();
  64. }
  65. InstructionBuilder ir_builder(
  66. ctx, inst,
  67. IRContext::kAnalysisDefUse | IRContext::kAnalysisInstrToBlockMapping);
  68. uint32_t op1 = inst->GetSingleWordInOperand(2);
  69. uint32_t op2 = inst->GetSingleWordInOperand(3);
  70. uint32_t op3 = inst->GetSingleWordInOperand(4);
  71. Instruction* temp = ir_builder.AddNaryExtendedInstruction(
  72. inst->type_id(), glsl405_ext_inst_id, opcode, {op1, op2});
  73. Instruction::OperandList new_operands;
  74. new_operands.push_back({SPV_OPERAND_TYPE_ID, {glsl405_ext_inst_id}});
  75. new_operands.push_back({SPV_OPERAND_TYPE_EXTENSION_INSTRUCTION_NUMBER,
  76. {static_cast<uint32_t>(opcode)}});
  77. new_operands.push_back({SPV_OPERAND_TYPE_ID, {temp->result_id()}});
  78. new_operands.push_back({SPV_OPERAND_TYPE_ID, {op3}});
  79. inst->SetInOperands(std::move(new_operands));
  80. ctx->UpdateDefUse(inst);
  81. return true;
  82. }
  83. // Returns a folding rule that replaces |mid(a,b,c)| by |clamp(a, min(b,c),
  84. // max(b,c)|. The three parameters are the opcode that correspond to the min,
  85. // max, and clamp operations for the type of the instruction being replaced.
  86. template <GLSLstd450 min_opcode, GLSLstd450 max_opcode, GLSLstd450 clamp_opcode>
  87. bool ReplaceTrinaryMid(IRContext* ctx, Instruction* inst,
  88. const std::vector<const analysis::Constant*>&) {
  89. uint32_t glsl405_ext_inst_id =
  90. ctx->get_feature_mgr()->GetExtInstImportId_GLSLstd450();
  91. if (glsl405_ext_inst_id == 0) {
  92. ctx->AddExtInstImport("GLSL.std.450");
  93. glsl405_ext_inst_id =
  94. ctx->get_feature_mgr()->GetExtInstImportId_GLSLstd450();
  95. }
  96. InstructionBuilder ir_builder(
  97. ctx, inst,
  98. IRContext::kAnalysisDefUse | IRContext::kAnalysisInstrToBlockMapping);
  99. uint32_t op1 = inst->GetSingleWordInOperand(2);
  100. uint32_t op2 = inst->GetSingleWordInOperand(3);
  101. uint32_t op3 = inst->GetSingleWordInOperand(4);
  102. Instruction* min = ir_builder.AddNaryExtendedInstruction(
  103. inst->type_id(), glsl405_ext_inst_id, static_cast<uint32_t>(min_opcode),
  104. {op2, op3});
  105. Instruction* max = ir_builder.AddNaryExtendedInstruction(
  106. inst->type_id(), glsl405_ext_inst_id, static_cast<uint32_t>(max_opcode),
  107. {op2, op3});
  108. Instruction::OperandList new_operands;
  109. new_operands.push_back({SPV_OPERAND_TYPE_ID, {glsl405_ext_inst_id}});
  110. new_operands.push_back({SPV_OPERAND_TYPE_EXTENSION_INSTRUCTION_NUMBER,
  111. {static_cast<uint32_t>(clamp_opcode)}});
  112. new_operands.push_back({SPV_OPERAND_TYPE_ID, {op1}});
  113. new_operands.push_back({SPV_OPERAND_TYPE_ID, {min->result_id()}});
  114. new_operands.push_back({SPV_OPERAND_TYPE_ID, {max->result_id()}});
  115. inst->SetInOperands(std::move(new_operands));
  116. ctx->UpdateDefUse(inst);
  117. return true;
  118. }
  119. // Returns a folding rule that will replace the opcode with |opcode| and add
  120. // the capabilities required. The folding rule assumes it is folding an
  121. // OpGroup*NonUniformAMD instruction from the SPV_AMD_shader_ballot extension.
  122. template <SpvOp new_opcode>
  123. bool ReplaceGroupNonuniformOperationOpCode(
  124. IRContext* ctx, Instruction* inst,
  125. const std::vector<const analysis::Constant*>&) {
  126. switch (new_opcode) {
  127. case SpvOpGroupNonUniformIAdd:
  128. case SpvOpGroupNonUniformFAdd:
  129. case SpvOpGroupNonUniformUMin:
  130. case SpvOpGroupNonUniformSMin:
  131. case SpvOpGroupNonUniformFMin:
  132. case SpvOpGroupNonUniformUMax:
  133. case SpvOpGroupNonUniformSMax:
  134. case SpvOpGroupNonUniformFMax:
  135. break;
  136. default:
  137. assert(
  138. false &&
  139. "Should be replacing with a group non uniform arithmetic operation.");
  140. }
  141. switch (inst->opcode()) {
  142. case SpvOpGroupIAddNonUniformAMD:
  143. case SpvOpGroupFAddNonUniformAMD:
  144. case SpvOpGroupUMinNonUniformAMD:
  145. case SpvOpGroupSMinNonUniformAMD:
  146. case SpvOpGroupFMinNonUniformAMD:
  147. case SpvOpGroupUMaxNonUniformAMD:
  148. case SpvOpGroupSMaxNonUniformAMD:
  149. case SpvOpGroupFMaxNonUniformAMD:
  150. break;
  151. default:
  152. assert(false &&
  153. "Should be replacing a group non uniform arithmetic operation.");
  154. }
  155. ctx->AddCapability(SpvCapabilityGroupNonUniformArithmetic);
  156. inst->SetOpcode(new_opcode);
  157. return true;
  158. }
  159. // Returns a folding rule that will replace the SwizzleInvocationsAMD extended
  160. // instruction in the SPV_AMD_shader_ballot extension.
  161. //
  162. // The instruction
  163. //
  164. // %offset = OpConstantComposite %v3uint %x %y %z %w
  165. // %result = OpExtInst %type %1 SwizzleInvocationsAMD %data %offset
  166. //
  167. // is replaced with
  168. //
  169. // potentially new constants and types
  170. //
  171. // clang-format off
  172. // %uint_max = OpConstant %uint 0xFFFFFFFF
  173. // %v4uint = OpTypeVector %uint 4
  174. // %ballot_value = OpConstantComposite %v4uint %uint_max %uint_max %uint_max %uint_max
  175. // %null = OpConstantNull %type
  176. // clang-format on
  177. //
  178. // and the following code in the function body
  179. //
  180. // clang-format off
  181. // %id = OpLoad %uint %SubgroupLocalInvocationId
  182. // %quad_idx = OpBitwiseAnd %uint %id %uint_3
  183. // %quad_ldr = OpBitwiseXor %uint %id %quad_idx
  184. // %my_offset = OpVectorExtractDynamic %uint %offset %quad_idx
  185. // %target_inv = OpIAdd %uint %quad_ldr %my_offset
  186. // %is_active = OpGroupNonUniformBallotBitExtract %bool %uint_3 %ballot_value %target_inv
  187. // %shuffle = OpGroupNonUniformShuffle %type %uint_3 %data %target_inv
  188. // %result = OpSelect %type %is_active %shuffle %null
  189. // clang-format on
  190. //
  191. // Also adding the capabilities and builtins that are needed.
  192. bool ReplaceSwizzleInvocations(IRContext* ctx, Instruction* inst,
  193. const std::vector<const analysis::Constant*>&) {
  194. analysis::TypeManager* type_mgr = ctx->get_type_mgr();
  195. analysis::ConstantManager* const_mgr = ctx->get_constant_mgr();
  196. ctx->AddExtension("SPV_KHR_shader_ballot");
  197. ctx->AddCapability(SpvCapabilityGroupNonUniformBallot);
  198. ctx->AddCapability(SpvCapabilityGroupNonUniformShuffle);
  199. InstructionBuilder ir_builder(
  200. ctx, inst,
  201. IRContext::kAnalysisDefUse | IRContext::kAnalysisInstrToBlockMapping);
  202. uint32_t data_id = inst->GetSingleWordInOperand(2);
  203. uint32_t offset_id = inst->GetSingleWordInOperand(3);
  204. // Get the subgroup invocation id.
  205. uint32_t var_id =
  206. ctx->GetBuiltinInputVarId(SpvBuiltInSubgroupLocalInvocationId);
  207. assert(var_id != 0 && "Could not get SubgroupLocalInvocationId variable.");
  208. Instruction* var_inst = ctx->get_def_use_mgr()->GetDef(var_id);
  209. Instruction* var_ptr_type =
  210. ctx->get_def_use_mgr()->GetDef(var_inst->type_id());
  211. uint32_t uint_type_id = var_ptr_type->GetSingleWordInOperand(1);
  212. Instruction* id = ir_builder.AddLoad(uint_type_id, var_id);
  213. uint32_t quad_mask = ir_builder.GetUintConstantId(3);
  214. // This gives the offset in the group of 4 of this invocation.
  215. Instruction* quad_idx = ir_builder.AddBinaryOp(uint_type_id, SpvOpBitwiseAnd,
  216. id->result_id(), quad_mask);
  217. // Get the invocation id of the first invocation in the group of 4.
  218. Instruction* quad_ldr = ir_builder.AddBinaryOp(
  219. uint_type_id, SpvOpBitwiseXor, id->result_id(), quad_idx->result_id());
  220. // Get the offset of the target invocation from the offset vector.
  221. Instruction* my_offset =
  222. ir_builder.AddBinaryOp(uint_type_id, SpvOpVectorExtractDynamic, offset_id,
  223. quad_idx->result_id());
  224. // Determine the index of the invocation to read from.
  225. Instruction* target_inv = ir_builder.AddBinaryOp(
  226. uint_type_id, SpvOpIAdd, quad_ldr->result_id(), my_offset->result_id());
  227. // Do the group operations
  228. uint32_t uint_max_id = ir_builder.GetUintConstantId(0xFFFFFFFF);
  229. uint32_t subgroup_scope = ir_builder.GetUintConstantId(SpvScopeSubgroup);
  230. const auto* ballot_value_const = const_mgr->GetConstant(
  231. type_mgr->GetUIntVectorType(4),
  232. {uint_max_id, uint_max_id, uint_max_id, uint_max_id});
  233. Instruction* ballot_value =
  234. const_mgr->GetDefiningInstruction(ballot_value_const);
  235. Instruction* is_active = ir_builder.AddNaryOp(
  236. type_mgr->GetBoolTypeId(), SpvOpGroupNonUniformBallotBitExtract,
  237. {subgroup_scope, ballot_value->result_id(), target_inv->result_id()});
  238. Instruction* shuffle =
  239. ir_builder.AddNaryOp(inst->type_id(), SpvOpGroupNonUniformShuffle,
  240. {subgroup_scope, data_id, target_inv->result_id()});
  241. // Create the null constant to use in the select.
  242. const auto* null = const_mgr->GetConstant(type_mgr->GetType(inst->type_id()),
  243. std::vector<uint32_t>());
  244. Instruction* null_inst = const_mgr->GetDefiningInstruction(null);
  245. // Build the select.
  246. inst->SetOpcode(SpvOpSelect);
  247. Instruction::OperandList new_operands;
  248. new_operands.push_back({SPV_OPERAND_TYPE_ID, {is_active->result_id()}});
  249. new_operands.push_back({SPV_OPERAND_TYPE_ID, {shuffle->result_id()}});
  250. new_operands.push_back({SPV_OPERAND_TYPE_ID, {null_inst->result_id()}});
  251. inst->SetInOperands(std::move(new_operands));
  252. ctx->UpdateDefUse(inst);
  253. return true;
  254. }
  255. // Returns a folding rule that will replace the SwizzleInvocationsMaskedAMD
  256. // extended instruction in the SPV_AMD_shader_ballot extension.
  257. //
  258. // The instruction
  259. //
  260. // %mask = OpConstantComposite %v3uint %uint_x %uint_y %uint_z
  261. // %result = OpExtInst %uint %1 SwizzleInvocationsMaskedAMD %data %mask
  262. //
  263. // is replaced with
  264. //
  265. // potentially new constants and types
  266. //
  267. // clang-format off
  268. // %uint_mask_extend = OpConstant %uint 0xFFFFFFE0
  269. // %uint_max = OpConstant %uint 0xFFFFFFFF
  270. // %v4uint = OpTypeVector %uint 4
  271. // %ballot_value = OpConstantComposite %v4uint %uint_max %uint_max %uint_max %uint_max
  272. // clang-format on
  273. //
  274. // and the following code in the function body
  275. //
  276. // clang-format off
  277. // %id = OpLoad %uint %SubgroupLocalInvocationId
  278. // %and_mask = OpBitwiseOr %uint %uint_x %uint_mask_extend
  279. // %and = OpBitwiseAnd %uint %id %and_mask
  280. // %or = OpBitwiseOr %uint %and %uint_y
  281. // %target_inv = OpBitwiseXor %uint %or %uint_z
  282. // %is_active = OpGroupNonUniformBallotBitExtract %bool %uint_3 %ballot_value %target_inv
  283. // %shuffle = OpGroupNonUniformShuffle %type %uint_3 %data %target_inv
  284. // %result = OpSelect %type %is_active %shuffle %uint_0
  285. // clang-format on
  286. //
  287. // Also adding the capabilities and builtins that are needed.
  288. bool ReplaceSwizzleInvocationsMasked(
  289. IRContext* ctx, Instruction* inst,
  290. const std::vector<const analysis::Constant*>&) {
  291. analysis::TypeManager* type_mgr = ctx->get_type_mgr();
  292. analysis::DefUseManager* def_use_mgr = ctx->get_def_use_mgr();
  293. analysis::ConstantManager* const_mgr = ctx->get_constant_mgr();
  294. ctx->AddCapability(SpvCapabilityGroupNonUniformBallot);
  295. ctx->AddCapability(SpvCapabilityGroupNonUniformShuffle);
  296. InstructionBuilder ir_builder(
  297. ctx, inst,
  298. IRContext::kAnalysisDefUse | IRContext::kAnalysisInstrToBlockMapping);
  299. // Get the operands to inst, and the components of the mask
  300. uint32_t data_id = inst->GetSingleWordInOperand(2);
  301. Instruction* mask_inst = def_use_mgr->GetDef(inst->GetSingleWordInOperand(3));
  302. assert(mask_inst->opcode() == SpvOpConstantComposite &&
  303. "The mask is suppose to be a vector constant.");
  304. assert(mask_inst->NumInOperands() == 3 &&
  305. "The mask is suppose to have 3 components.");
  306. uint32_t uint_x = mask_inst->GetSingleWordInOperand(0);
  307. uint32_t uint_y = mask_inst->GetSingleWordInOperand(1);
  308. uint32_t uint_z = mask_inst->GetSingleWordInOperand(2);
  309. // Get the subgroup invocation id.
  310. uint32_t var_id =
  311. ctx->GetBuiltinInputVarId(SpvBuiltInSubgroupLocalInvocationId);
  312. ctx->AddExtension("SPV_KHR_shader_ballot");
  313. assert(var_id != 0 && "Could not get SubgroupLocalInvocationId variable.");
  314. Instruction* var_inst = ctx->get_def_use_mgr()->GetDef(var_id);
  315. Instruction* var_ptr_type =
  316. ctx->get_def_use_mgr()->GetDef(var_inst->type_id());
  317. uint32_t uint_type_id = var_ptr_type->GetSingleWordInOperand(1);
  318. Instruction* id = ir_builder.AddLoad(uint_type_id, var_id);
  319. // Do the bitwise operations.
  320. uint32_t mask_extended = ir_builder.GetUintConstantId(0xFFFFFFE0);
  321. Instruction* and_mask = ir_builder.AddBinaryOp(uint_type_id, SpvOpBitwiseOr,
  322. uint_x, mask_extended);
  323. Instruction* and_result = ir_builder.AddBinaryOp(
  324. uint_type_id, SpvOpBitwiseAnd, id->result_id(), and_mask->result_id());
  325. Instruction* or_result = ir_builder.AddBinaryOp(
  326. uint_type_id, SpvOpBitwiseOr, and_result->result_id(), uint_y);
  327. Instruction* target_inv = ir_builder.AddBinaryOp(
  328. uint_type_id, SpvOpBitwiseXor, or_result->result_id(), uint_z);
  329. // Do the group operations
  330. uint32_t uint_max_id = ir_builder.GetUintConstantId(0xFFFFFFFF);
  331. uint32_t subgroup_scope = ir_builder.GetUintConstantId(SpvScopeSubgroup);
  332. const auto* ballot_value_const = const_mgr->GetConstant(
  333. type_mgr->GetUIntVectorType(4),
  334. {uint_max_id, uint_max_id, uint_max_id, uint_max_id});
  335. Instruction* ballot_value =
  336. const_mgr->GetDefiningInstruction(ballot_value_const);
  337. Instruction* is_active = ir_builder.AddNaryOp(
  338. type_mgr->GetBoolTypeId(), SpvOpGroupNonUniformBallotBitExtract,
  339. {subgroup_scope, ballot_value->result_id(), target_inv->result_id()});
  340. Instruction* shuffle =
  341. ir_builder.AddNaryOp(inst->type_id(), SpvOpGroupNonUniformShuffle,
  342. {subgroup_scope, data_id, target_inv->result_id()});
  343. // Create the null constant to use in the select.
  344. const auto* null = const_mgr->GetConstant(type_mgr->GetType(inst->type_id()),
  345. std::vector<uint32_t>());
  346. Instruction* null_inst = const_mgr->GetDefiningInstruction(null);
  347. // Build the select.
  348. inst->SetOpcode(SpvOpSelect);
  349. Instruction::OperandList new_operands;
  350. new_operands.push_back({SPV_OPERAND_TYPE_ID, {is_active->result_id()}});
  351. new_operands.push_back({SPV_OPERAND_TYPE_ID, {shuffle->result_id()}});
  352. new_operands.push_back({SPV_OPERAND_TYPE_ID, {null_inst->result_id()}});
  353. inst->SetInOperands(std::move(new_operands));
  354. ctx->UpdateDefUse(inst);
  355. return true;
  356. }
  357. // Returns a folding rule that will replace the WriteInvocationAMD extended
  358. // instruction in the SPV_AMD_shader_ballot extension.
  359. //
  360. // The instruction
  361. //
  362. // clang-format off
  363. // %result = OpExtInst %type %1 WriteInvocationAMD %input_value %write_value %invocation_index
  364. // clang-format on
  365. //
  366. // with
  367. //
  368. // %id = OpLoad %uint %SubgroupLocalInvocationId
  369. // %cmp = OpIEqual %bool %id %invocation_index
  370. // %result = OpSelect %type %cmp %write_value %input_value
  371. //
  372. // Also adding the capabilities and builtins that are needed.
  373. bool ReplaceWriteInvocation(IRContext* ctx, Instruction* inst,
  374. const std::vector<const analysis::Constant*>&) {
  375. uint32_t var_id =
  376. ctx->GetBuiltinInputVarId(SpvBuiltInSubgroupLocalInvocationId);
  377. ctx->AddCapability(SpvCapabilitySubgroupBallotKHR);
  378. ctx->AddExtension("SPV_KHR_shader_ballot");
  379. assert(var_id != 0 && "Could not get SubgroupLocalInvocationId variable.");
  380. Instruction* var_inst = ctx->get_def_use_mgr()->GetDef(var_id);
  381. Instruction* var_ptr_type =
  382. ctx->get_def_use_mgr()->GetDef(var_inst->type_id());
  383. InstructionBuilder ir_builder(
  384. ctx, inst,
  385. IRContext::kAnalysisDefUse | IRContext::kAnalysisInstrToBlockMapping);
  386. Instruction* t =
  387. ir_builder.AddLoad(var_ptr_type->GetSingleWordInOperand(1), var_id);
  388. analysis::Bool bool_type;
  389. uint32_t bool_type_id = ctx->get_type_mgr()->GetTypeInstruction(&bool_type);
  390. Instruction* cmp =
  391. ir_builder.AddBinaryOp(bool_type_id, SpvOpIEqual, t->result_id(),
  392. inst->GetSingleWordInOperand(4));
  393. // Build a select.
  394. inst->SetOpcode(SpvOpSelect);
  395. Instruction::OperandList new_operands;
  396. new_operands.push_back({SPV_OPERAND_TYPE_ID, {cmp->result_id()}});
  397. new_operands.push_back(inst->GetInOperand(3));
  398. new_operands.push_back(inst->GetInOperand(2));
  399. inst->SetInOperands(std::move(new_operands));
  400. ctx->UpdateDefUse(inst);
  401. return true;
  402. }
  403. // Returns a folding rule that will replace the MbcntAMD extended instruction in
  404. // the SPV_AMD_shader_ballot extension.
  405. //
  406. // The instruction
  407. //
  408. // %result = OpExtInst %uint %1 MbcntAMD %mask
  409. //
  410. // with
  411. //
  412. // Get SubgroupLtMask and convert the first 64-bits into a uint64_t because
  413. // AMD's shader compiler expects a 64-bit integer mask.
  414. //
  415. // %var = OpLoad %v4uint %SubgroupLtMaskKHR
  416. // %shuffle = OpVectorShuffle %v2uint %var %var 0 1
  417. // %cast = OpBitcast %ulong %shuffle
  418. //
  419. // Perform the mask and count the bits.
  420. //
  421. // %and = OpBitwiseAnd %ulong %cast %mask
  422. // %result = OpBitCount %uint %and
  423. //
  424. // Also adding the capabilities and builtins that are needed.
  425. bool ReplaceMbcnt(IRContext* context, Instruction* inst,
  426. const std::vector<const analysis::Constant*>&) {
  427. analysis::TypeManager* type_mgr = context->get_type_mgr();
  428. analysis::DefUseManager* def_use_mgr = context->get_def_use_mgr();
  429. uint32_t var_id = context->GetBuiltinInputVarId(SpvBuiltInSubgroupLtMask);
  430. assert(var_id != 0 && "Could not get SubgroupLtMask variable.");
  431. context->AddCapability(SpvCapabilityGroupNonUniformBallot);
  432. Instruction* var_inst = def_use_mgr->GetDef(var_id);
  433. Instruction* var_ptr_type = def_use_mgr->GetDef(var_inst->type_id());
  434. Instruction* var_type =
  435. def_use_mgr->GetDef(var_ptr_type->GetSingleWordInOperand(1));
  436. assert(var_type->opcode() == SpvOpTypeVector &&
  437. "Variable is suppose to be a vector of 4 ints");
  438. // Get the type for the shuffle.
  439. analysis::Vector temp_type(GetUIntType(context), 2);
  440. const analysis::Type* shuffle_type =
  441. context->get_type_mgr()->GetRegisteredType(&temp_type);
  442. uint32_t shuffle_type_id = type_mgr->GetTypeInstruction(shuffle_type);
  443. uint32_t mask_id = inst->GetSingleWordInOperand(2);
  444. Instruction* mask_inst = def_use_mgr->GetDef(mask_id);
  445. // Testing with amd's shader compiler shows that a 64-bit mask is expected.
  446. assert(type_mgr->GetType(mask_inst->type_id())->AsInteger() != nullptr);
  447. assert(type_mgr->GetType(mask_inst->type_id())->AsInteger()->width() == 64);
  448. InstructionBuilder ir_builder(
  449. context, inst,
  450. IRContext::kAnalysisDefUse | IRContext::kAnalysisInstrToBlockMapping);
  451. Instruction* load = ir_builder.AddLoad(var_type->result_id(), var_id);
  452. Instruction* shuffle = ir_builder.AddVectorShuffle(
  453. shuffle_type_id, load->result_id(), load->result_id(), {0, 1});
  454. Instruction* bitcast = ir_builder.AddUnaryOp(
  455. mask_inst->type_id(), SpvOpBitcast, shuffle->result_id());
  456. Instruction* t = ir_builder.AddBinaryOp(mask_inst->type_id(), SpvOpBitwiseAnd,
  457. bitcast->result_id(), mask_id);
  458. inst->SetOpcode(SpvOpBitCount);
  459. inst->SetInOperands({{SPV_OPERAND_TYPE_ID, {t->result_id()}}});
  460. context->UpdateDefUse(inst);
  461. return true;
  462. }
  463. // A folding rule that will replace the CubeFaceCoordAMD extended
  464. // instruction in the SPV_AMD_gcn_shader_ballot. Returns true if the folding is
  465. // successful.
  466. //
  467. // The instruction
  468. //
  469. // %result = OpExtInst %v2float %1 CubeFaceCoordAMD %input
  470. //
  471. // with
  472. //
  473. // %x = OpCompositeExtract %float %input 0
  474. // %y = OpCompositeExtract %float %input 1
  475. // %z = OpCompositeExtract %float %input 2
  476. // %nx = OpFNegate %float %x
  477. // %ny = OpFNegate %float %y
  478. // %nz = OpFNegate %float %z
  479. // %ax = OpExtInst %float %n_1 FAbs %x
  480. // %ay = OpExtInst %float %n_1 FAbs %y
  481. // %az = OpExtInst %float %n_1 FAbs %z
  482. // %amax_x_y = OpExtInst %float %n_1 FMax %ay %ax
  483. // %amax = OpExtInst %float %n_1 FMax %az %amax_x_y
  484. // %cubema = OpFMul %float %float_2 %amax
  485. // %is_z_max = OpFOrdGreaterThanEqual %bool %az %amax_x_y
  486. // %not_is_z_max = OpLogicalNot %bool %is_z_max
  487. // %y_gt_x = OpFOrdGreaterThanEqual %bool %ay %ax
  488. // %is_y_max = OpLogicalAnd %bool %not_is_z_max %y_gt_x
  489. // %is_z_neg = OpFOrdLessThan %bool %z %float_0
  490. // %cubesc_case_1 = OpSelect %float %is_z_neg %nx %x
  491. // %is_x_neg = OpFOrdLessThan %bool %x %float_0
  492. // %cubesc_case_2 = OpSelect %float %is_x_neg %z %nz
  493. // %sel = OpSelect %float %is_y_max %x %cubesc_case_2
  494. // %cubesc = OpSelect %float %is_z_max %cubesc_case_1 %sel
  495. // %is_y_neg = OpFOrdLessThan %bool %y %float_0
  496. // %cubetc_case_1 = OpSelect %float %is_y_neg %nz %z
  497. // %cubetc = OpSelect %float %is_y_max %cubetc_case_1 %ny
  498. // %cube = OpCompositeConstruct %v2float %cubesc %cubetc
  499. // %denom = OpCompositeConstruct %v2float %cubema %cubema
  500. // %div = OpFDiv %v2float %cube %denom
  501. // %result = OpFAdd %v2float %div %const
  502. //
  503. // Also adding the capabilities and builtins that are needed.
  504. bool ReplaceCubeFaceCoord(IRContext* ctx, Instruction* inst,
  505. const std::vector<const analysis::Constant*>&) {
  506. analysis::TypeManager* type_mgr = ctx->get_type_mgr();
  507. analysis::ConstantManager* const_mgr = ctx->get_constant_mgr();
  508. uint32_t float_type_id = type_mgr->GetFloatTypeId();
  509. const analysis::Type* v2_float_type = type_mgr->GetFloatVectorType(2);
  510. uint32_t v2_float_type_id = type_mgr->GetId(v2_float_type);
  511. uint32_t bool_id = type_mgr->GetBoolTypeId();
  512. InstructionBuilder ir_builder(
  513. ctx, inst,
  514. IRContext::kAnalysisDefUse | IRContext::kAnalysisInstrToBlockMapping);
  515. uint32_t input_id = inst->GetSingleWordInOperand(2);
  516. uint32_t glsl405_ext_inst_id =
  517. ctx->get_feature_mgr()->GetExtInstImportId_GLSLstd450();
  518. if (glsl405_ext_inst_id == 0) {
  519. ctx->AddExtInstImport("GLSL.std.450");
  520. glsl405_ext_inst_id =
  521. ctx->get_feature_mgr()->GetExtInstImportId_GLSLstd450();
  522. }
  523. // Get the constants that will be used.
  524. uint32_t f0_const_id = const_mgr->GetFloatConst(0.0);
  525. uint32_t f2_const_id = const_mgr->GetFloatConst(2.0);
  526. uint32_t f0_5_const_id = const_mgr->GetFloatConst(0.5);
  527. const analysis::Constant* vec_const =
  528. const_mgr->GetConstant(v2_float_type, {f0_5_const_id, f0_5_const_id});
  529. uint32_t vec_const_id =
  530. const_mgr->GetDefiningInstruction(vec_const)->result_id();
  531. // Extract the input values.
  532. Instruction* x = ir_builder.AddCompositeExtract(float_type_id, input_id, {0});
  533. Instruction* y = ir_builder.AddCompositeExtract(float_type_id, input_id, {1});
  534. Instruction* z = ir_builder.AddCompositeExtract(float_type_id, input_id, {2});
  535. // Negate the input values.
  536. Instruction* nx =
  537. ir_builder.AddUnaryOp(float_type_id, SpvOpFNegate, x->result_id());
  538. Instruction* ny =
  539. ir_builder.AddUnaryOp(float_type_id, SpvOpFNegate, y->result_id());
  540. Instruction* nz =
  541. ir_builder.AddUnaryOp(float_type_id, SpvOpFNegate, z->result_id());
  542. // Get the abolsute values of the inputs.
  543. Instruction* ax = ir_builder.AddNaryExtendedInstruction(
  544. float_type_id, glsl405_ext_inst_id, GLSLstd450FAbs, {x->result_id()});
  545. Instruction* ay = ir_builder.AddNaryExtendedInstruction(
  546. float_type_id, glsl405_ext_inst_id, GLSLstd450FAbs, {y->result_id()});
  547. Instruction* az = ir_builder.AddNaryExtendedInstruction(
  548. float_type_id, glsl405_ext_inst_id, GLSLstd450FAbs, {z->result_id()});
  549. // Find which values are negative. Used in later computations.
  550. Instruction* is_z_neg = ir_builder.AddBinaryOp(bool_id, SpvOpFOrdLessThan,
  551. z->result_id(), f0_const_id);
  552. Instruction* is_y_neg = ir_builder.AddBinaryOp(bool_id, SpvOpFOrdLessThan,
  553. y->result_id(), f0_const_id);
  554. Instruction* is_x_neg = ir_builder.AddBinaryOp(bool_id, SpvOpFOrdLessThan,
  555. x->result_id(), f0_const_id);
  556. // Compute cubema
  557. Instruction* amax_x_y = ir_builder.AddNaryExtendedInstruction(
  558. float_type_id, glsl405_ext_inst_id, GLSLstd450FMax,
  559. {ax->result_id(), ay->result_id()});
  560. Instruction* amax = ir_builder.AddNaryExtendedInstruction(
  561. float_type_id, glsl405_ext_inst_id, GLSLstd450FMax,
  562. {az->result_id(), amax_x_y->result_id()});
  563. Instruction* cubema = ir_builder.AddBinaryOp(float_type_id, SpvOpFMul,
  564. f2_const_id, amax->result_id());
  565. // Do the comparisons needed for computing cubesc and cubetc.
  566. Instruction* is_z_max =
  567. ir_builder.AddBinaryOp(bool_id, SpvOpFOrdGreaterThanEqual,
  568. az->result_id(), amax_x_y->result_id());
  569. Instruction* not_is_z_max =
  570. ir_builder.AddUnaryOp(bool_id, SpvOpLogicalNot, is_z_max->result_id());
  571. Instruction* y_gr_x = ir_builder.AddBinaryOp(
  572. bool_id, SpvOpFOrdGreaterThanEqual, ay->result_id(), ax->result_id());
  573. Instruction* is_y_max = ir_builder.AddBinaryOp(
  574. bool_id, SpvOpLogicalAnd, not_is_z_max->result_id(), y_gr_x->result_id());
  575. // Select the correct value for cubesc.
  576. Instruction* cubesc_case_1 = ir_builder.AddSelect(
  577. float_type_id, is_z_neg->result_id(), nx->result_id(), x->result_id());
  578. Instruction* cubesc_case_2 = ir_builder.AddSelect(
  579. float_type_id, is_x_neg->result_id(), z->result_id(), nz->result_id());
  580. Instruction* sel =
  581. ir_builder.AddSelect(float_type_id, is_y_max->result_id(), x->result_id(),
  582. cubesc_case_2->result_id());
  583. Instruction* cubesc =
  584. ir_builder.AddSelect(float_type_id, is_z_max->result_id(),
  585. cubesc_case_1->result_id(), sel->result_id());
  586. // Select the correct value for cubetc.
  587. Instruction* cubetc_case_1 = ir_builder.AddSelect(
  588. float_type_id, is_y_neg->result_id(), nz->result_id(), z->result_id());
  589. Instruction* cubetc =
  590. ir_builder.AddSelect(float_type_id, is_y_max->result_id(),
  591. cubetc_case_1->result_id(), ny->result_id());
  592. // Do the division
  593. Instruction* cube = ir_builder.AddCompositeConstruct(
  594. v2_float_type_id, {cubesc->result_id(), cubetc->result_id()});
  595. Instruction* denom = ir_builder.AddCompositeConstruct(
  596. v2_float_type_id, {cubema->result_id(), cubema->result_id()});
  597. Instruction* div = ir_builder.AddBinaryOp(
  598. v2_float_type_id, SpvOpFDiv, cube->result_id(), denom->result_id());
  599. // Get the final result by adding 0.5 to |div|.
  600. inst->SetOpcode(SpvOpFAdd);
  601. Instruction::OperandList new_operands;
  602. new_operands.push_back({SPV_OPERAND_TYPE_ID, {div->result_id()}});
  603. new_operands.push_back({SPV_OPERAND_TYPE_ID, {vec_const_id}});
  604. inst->SetInOperands(std::move(new_operands));
  605. ctx->UpdateDefUse(inst);
  606. return true;
  607. }
  608. // A folding rule that will replace the CubeFaceCoordAMD extended
  609. // instruction in the SPV_AMD_gcn_shader_ballot. Returns true if the folding
  610. // is successful.
  611. //
  612. // The instruction
  613. //
  614. // %result = OpExtInst %v2float %1 CubeFaceCoordAMD %input
  615. //
  616. // with
  617. //
  618. // %x = OpCompositeExtract %float %input 0
  619. // %y = OpCompositeExtract %float %input 1
  620. // %z = OpCompositeExtract %float %input 2
  621. // %ax = OpExtInst %float %n_1 FAbs %x
  622. // %ay = OpExtInst %float %n_1 FAbs %y
  623. // %az = OpExtInst %float %n_1 FAbs %z
  624. // %is_z_neg = OpFOrdLessThan %bool %z %float_0
  625. // %is_y_neg = OpFOrdLessThan %bool %y %float_0
  626. // %is_x_neg = OpFOrdLessThan %bool %x %float_0
  627. // %amax_x_y = OpExtInst %float %n_1 FMax %ay %ax
  628. // %is_z_max = OpFOrdGreaterThanEqual %bool %az %amax_x_y
  629. // %y_gt_x = OpFOrdGreaterThanEqual %bool %ay %ax
  630. // %case_z = OpSelect %float %is_z_neg %float_5 %float4
  631. // %case_y = OpSelect %float %is_y_neg %float_3 %float2
  632. // %case_x = OpSelect %float %is_x_neg %float_1 %float0
  633. // %sel = OpSelect %float %y_gt_x %case_y %case_x
  634. // %result = OpSelect %float %is_z_max %case_z %sel
  635. //
  636. // Also adding the capabilities and builtins that are needed.
  637. bool ReplaceCubeFaceIndex(IRContext* ctx, Instruction* inst,
  638. const std::vector<const analysis::Constant*>&) {
  639. analysis::TypeManager* type_mgr = ctx->get_type_mgr();
  640. analysis::ConstantManager* const_mgr = ctx->get_constant_mgr();
  641. uint32_t float_type_id = type_mgr->GetFloatTypeId();
  642. uint32_t bool_id = type_mgr->GetBoolTypeId();
  643. InstructionBuilder ir_builder(
  644. ctx, inst,
  645. IRContext::kAnalysisDefUse | IRContext::kAnalysisInstrToBlockMapping);
  646. uint32_t input_id = inst->GetSingleWordInOperand(2);
  647. uint32_t glsl405_ext_inst_id =
  648. ctx->get_feature_mgr()->GetExtInstImportId_GLSLstd450();
  649. if (glsl405_ext_inst_id == 0) {
  650. ctx->AddExtInstImport("GLSL.std.450");
  651. glsl405_ext_inst_id =
  652. ctx->get_feature_mgr()->GetExtInstImportId_GLSLstd450();
  653. }
  654. // Get the constants that will be used.
  655. uint32_t f0_const_id = const_mgr->GetFloatConst(0.0);
  656. uint32_t f1_const_id = const_mgr->GetFloatConst(1.0);
  657. uint32_t f2_const_id = const_mgr->GetFloatConst(2.0);
  658. uint32_t f3_const_id = const_mgr->GetFloatConst(3.0);
  659. uint32_t f4_const_id = const_mgr->GetFloatConst(4.0);
  660. uint32_t f5_const_id = const_mgr->GetFloatConst(5.0);
  661. // Extract the input values.
  662. Instruction* x = ir_builder.AddCompositeExtract(float_type_id, input_id, {0});
  663. Instruction* y = ir_builder.AddCompositeExtract(float_type_id, input_id, {1});
  664. Instruction* z = ir_builder.AddCompositeExtract(float_type_id, input_id, {2});
  665. // Get the absolute values of the inputs.
  666. Instruction* ax = ir_builder.AddNaryExtendedInstruction(
  667. float_type_id, glsl405_ext_inst_id, GLSLstd450FAbs, {x->result_id()});
  668. Instruction* ay = ir_builder.AddNaryExtendedInstruction(
  669. float_type_id, glsl405_ext_inst_id, GLSLstd450FAbs, {y->result_id()});
  670. Instruction* az = ir_builder.AddNaryExtendedInstruction(
  671. float_type_id, glsl405_ext_inst_id, GLSLstd450FAbs, {z->result_id()});
  672. // Find which values are negative. Used in later computations.
  673. Instruction* is_z_neg = ir_builder.AddBinaryOp(bool_id, SpvOpFOrdLessThan,
  674. z->result_id(), f0_const_id);
  675. Instruction* is_y_neg = ir_builder.AddBinaryOp(bool_id, SpvOpFOrdLessThan,
  676. y->result_id(), f0_const_id);
  677. Instruction* is_x_neg = ir_builder.AddBinaryOp(bool_id, SpvOpFOrdLessThan,
  678. x->result_id(), f0_const_id);
  679. // Find the max value.
  680. Instruction* amax_x_y = ir_builder.AddNaryExtendedInstruction(
  681. float_type_id, glsl405_ext_inst_id, GLSLstd450FMax,
  682. {ax->result_id(), ay->result_id()});
  683. Instruction* is_z_max =
  684. ir_builder.AddBinaryOp(bool_id, SpvOpFOrdGreaterThanEqual,
  685. az->result_id(), amax_x_y->result_id());
  686. Instruction* y_gr_x = ir_builder.AddBinaryOp(
  687. bool_id, SpvOpFOrdGreaterThanEqual, ay->result_id(), ax->result_id());
  688. // Get the value for each case.
  689. Instruction* case_z = ir_builder.AddSelect(
  690. float_type_id, is_z_neg->result_id(), f5_const_id, f4_const_id);
  691. Instruction* case_y = ir_builder.AddSelect(
  692. float_type_id, is_y_neg->result_id(), f3_const_id, f2_const_id);
  693. Instruction* case_x = ir_builder.AddSelect(
  694. float_type_id, is_x_neg->result_id(), f1_const_id, f0_const_id);
  695. // Select the correct case.
  696. Instruction* sel =
  697. ir_builder.AddSelect(float_type_id, y_gr_x->result_id(),
  698. case_y->result_id(), case_x->result_id());
  699. // Get the final result by adding 0.5 to |div|.
  700. inst->SetOpcode(SpvOpSelect);
  701. Instruction::OperandList new_operands;
  702. new_operands.push_back({SPV_OPERAND_TYPE_ID, {is_z_max->result_id()}});
  703. new_operands.push_back({SPV_OPERAND_TYPE_ID, {case_z->result_id()}});
  704. new_operands.push_back({SPV_OPERAND_TYPE_ID, {sel->result_id()}});
  705. inst->SetInOperands(std::move(new_operands));
  706. ctx->UpdateDefUse(inst);
  707. return true;
  708. }
  709. class AmdExtFoldingRules : public FoldingRules {
  710. public:
  711. explicit AmdExtFoldingRules(IRContext* ctx) : FoldingRules(ctx) {}
  712. protected:
  713. virtual void AddFoldingRules() override {
  714. rules_[SpvOpGroupIAddNonUniformAMD].push_back(
  715. ReplaceGroupNonuniformOperationOpCode<SpvOpGroupNonUniformIAdd>);
  716. rules_[SpvOpGroupFAddNonUniformAMD].push_back(
  717. ReplaceGroupNonuniformOperationOpCode<SpvOpGroupNonUniformFAdd>);
  718. rules_[SpvOpGroupUMinNonUniformAMD].push_back(
  719. ReplaceGroupNonuniformOperationOpCode<SpvOpGroupNonUniformUMin>);
  720. rules_[SpvOpGroupSMinNonUniformAMD].push_back(
  721. ReplaceGroupNonuniformOperationOpCode<SpvOpGroupNonUniformSMin>);
  722. rules_[SpvOpGroupFMinNonUniformAMD].push_back(
  723. ReplaceGroupNonuniformOperationOpCode<SpvOpGroupNonUniformFMin>);
  724. rules_[SpvOpGroupUMaxNonUniformAMD].push_back(
  725. ReplaceGroupNonuniformOperationOpCode<SpvOpGroupNonUniformUMax>);
  726. rules_[SpvOpGroupSMaxNonUniformAMD].push_back(
  727. ReplaceGroupNonuniformOperationOpCode<SpvOpGroupNonUniformSMax>);
  728. rules_[SpvOpGroupFMaxNonUniformAMD].push_back(
  729. ReplaceGroupNonuniformOperationOpCode<SpvOpGroupNonUniformFMax>);
  730. uint32_t extension_id =
  731. context()->module()->GetExtInstImportId("SPV_AMD_shader_ballot");
  732. if (extension_id != 0) {
  733. ext_rules_[{extension_id, AmdShaderBallotSwizzleInvocationsAMD}]
  734. .push_back(ReplaceSwizzleInvocations);
  735. ext_rules_[{extension_id, AmdShaderBallotSwizzleInvocationsMaskedAMD}]
  736. .push_back(ReplaceSwizzleInvocationsMasked);
  737. ext_rules_[{extension_id, AmdShaderBallotWriteInvocationAMD}].push_back(
  738. ReplaceWriteInvocation);
  739. ext_rules_[{extension_id, AmdShaderBallotMbcntAMD}].push_back(
  740. ReplaceMbcnt);
  741. }
  742. extension_id = context()->module()->GetExtInstImportId(
  743. "SPV_AMD_shader_trinary_minmax");
  744. if (extension_id != 0) {
  745. ext_rules_[{extension_id, FMin3AMD}].push_back(
  746. ReplaceTrinaryMinMax<GLSLstd450FMin>);
  747. ext_rules_[{extension_id, UMin3AMD}].push_back(
  748. ReplaceTrinaryMinMax<GLSLstd450UMin>);
  749. ext_rules_[{extension_id, SMin3AMD}].push_back(
  750. ReplaceTrinaryMinMax<GLSLstd450SMin>);
  751. ext_rules_[{extension_id, FMax3AMD}].push_back(
  752. ReplaceTrinaryMinMax<GLSLstd450FMax>);
  753. ext_rules_[{extension_id, UMax3AMD}].push_back(
  754. ReplaceTrinaryMinMax<GLSLstd450UMax>);
  755. ext_rules_[{extension_id, SMax3AMD}].push_back(
  756. ReplaceTrinaryMinMax<GLSLstd450SMax>);
  757. ext_rules_[{extension_id, FMid3AMD}].push_back(
  758. ReplaceTrinaryMid<GLSLstd450FMin, GLSLstd450FMax, GLSLstd450FClamp>);
  759. ext_rules_[{extension_id, UMid3AMD}].push_back(
  760. ReplaceTrinaryMid<GLSLstd450UMin, GLSLstd450UMax, GLSLstd450UClamp>);
  761. ext_rules_[{extension_id, SMid3AMD}].push_back(
  762. ReplaceTrinaryMid<GLSLstd450SMin, GLSLstd450SMax, GLSLstd450SClamp>);
  763. }
  764. extension_id =
  765. context()->module()->GetExtInstImportId("SPV_AMD_gcn_shader");
  766. if (extension_id != 0) {
  767. ext_rules_[{extension_id, CubeFaceCoordAMD}].push_back(
  768. ReplaceCubeFaceCoord);
  769. ext_rules_[{extension_id, CubeFaceIndexAMD}].push_back(
  770. ReplaceCubeFaceIndex);
  771. ext_rules_[{extension_id, TimeAMD}].push_back(NotImplementedYet);
  772. }
  773. }
  774. };
  775. class AmdExtConstFoldingRules : public ConstantFoldingRules {
  776. public:
  777. AmdExtConstFoldingRules(IRContext* ctx) : ConstantFoldingRules(ctx) {}
  778. protected:
  779. virtual void AddFoldingRules() override {}
  780. };
  781. } // namespace
  782. Pass::Status AmdExtensionToKhrPass::Process() {
  783. bool changed = false;
  784. // Traverse the body of the functions to replace instructions that require
  785. // the extensions.
  786. InstructionFolder folder(
  787. context(),
  788. std::unique_ptr<AmdExtFoldingRules>(new AmdExtFoldingRules(context())),
  789. MakeUnique<AmdExtConstFoldingRules>(context()));
  790. for (Function& func : *get_module()) {
  791. func.ForEachInst([&changed, &folder](Instruction* inst) {
  792. if (folder.FoldInstruction(inst)) {
  793. changed = true;
  794. }
  795. });
  796. }
  797. // Now that instruction that require the extensions have been removed, we can
  798. // remove the extension instructions.
  799. std::set<std::string> ext_to_remove = {"SPV_AMD_shader_ballot",
  800. "SPV_AMD_shader_trinary_minmax",
  801. "SPV_AMD_gcn_shader"};
  802. std::vector<Instruction*> to_be_killed;
  803. for (Instruction& inst : context()->module()->extensions()) {
  804. if (inst.opcode() == SpvOpExtension) {
  805. if (ext_to_remove.count(reinterpret_cast<const char*>(
  806. &(inst.GetInOperand(0).words[0]))) != 0) {
  807. to_be_killed.push_back(&inst);
  808. }
  809. }
  810. }
  811. for (Instruction& inst : context()->ext_inst_imports()) {
  812. if (inst.opcode() == SpvOpExtInstImport) {
  813. if (ext_to_remove.count(reinterpret_cast<const char*>(
  814. &(inst.GetInOperand(0).words[0]))) != 0) {
  815. to_be_killed.push_back(&inst);
  816. }
  817. }
  818. }
  819. for (Instruction* inst : to_be_killed) {
  820. context()->KillInst(inst);
  821. changed = true;
  822. }
  823. // The replacements that take place use instructions that are missing before
  824. // SPIR-V 1.3. If we changed something, we will have to make sure the version
  825. // is at least SPIR-V 1.3 to make sure those instruction can be used.
  826. if (changed) {
  827. uint32_t version = get_module()->version();
  828. if (version < 0x00010300 /*1.3*/) {
  829. get_module()->set_version(0x00010300);
  830. }
  831. }
  832. return changed ? Status::SuccessWithChange : Status::SuccessWithoutChange;
  833. }
  834. } // namespace opt
  835. } // namespace spvtools