amd_ext_to_khr.cpp 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972
  1. // Copyright (c) 2019 Google LLC.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #include "source/opt/amd_ext_to_khr.h"
  15. #include <set>
  16. #include <string>
  17. #include "ir_builder.h"
  18. #include "source/opt/ir_context.h"
  19. #include "spv-amd-shader-ballot.insts.inc"
  20. #include "type_manager.h"
  21. namespace spvtools {
  22. namespace opt {
  23. namespace {
  24. enum AmdShaderBallotExtOpcodes {
  25. AmdShaderBallotSwizzleInvocationsAMD = 1,
  26. AmdShaderBallotSwizzleInvocationsMaskedAMD = 2,
  27. AmdShaderBallotWriteInvocationAMD = 3,
  28. AmdShaderBallotMbcntAMD = 4
  29. };
  30. enum AmdShaderTrinaryMinMaxExtOpCodes {
  31. FMin3AMD = 1,
  32. UMin3AMD = 2,
  33. SMin3AMD = 3,
  34. FMax3AMD = 4,
  35. UMax3AMD = 5,
  36. SMax3AMD = 6,
  37. FMid3AMD = 7,
  38. UMid3AMD = 8,
  39. SMid3AMD = 9
  40. };
  41. enum AmdGcnShader { CubeFaceCoordAMD = 2, CubeFaceIndexAMD = 1, TimeAMD = 3 };
  42. analysis::Type* GetUIntType(IRContext* ctx) {
  43. analysis::Integer int_type(32, false);
  44. return ctx->get_type_mgr()->GetRegisteredType(&int_type);
  45. }
  46. // Returns a folding rule that replaces |op(a,b,c)| by |op(op(a,b),c)|, where
  47. // |op| is either min or max. |opcode| is the binary opcode in the GLSLstd450
  48. // extended instruction set that corresponds to the trinary instruction being
  49. // replaced.
  50. template <GLSLstd450 opcode>
  51. bool ReplaceTrinaryMinMax(IRContext* ctx, Instruction* inst,
  52. const std::vector<const analysis::Constant*>&) {
  53. uint32_t glsl405_ext_inst_id =
  54. ctx->get_feature_mgr()->GetExtInstImportId_GLSLstd450();
  55. if (glsl405_ext_inst_id == 0) {
  56. ctx->AddExtInstImport("GLSL.std.450");
  57. glsl405_ext_inst_id =
  58. ctx->get_feature_mgr()->GetExtInstImportId_GLSLstd450();
  59. }
  60. InstructionBuilder ir_builder(
  61. ctx, inst,
  62. IRContext::kAnalysisDefUse | IRContext::kAnalysisInstrToBlockMapping);
  63. uint32_t op1 = inst->GetSingleWordInOperand(2);
  64. uint32_t op2 = inst->GetSingleWordInOperand(3);
  65. uint32_t op3 = inst->GetSingleWordInOperand(4);
  66. Instruction* temp = ir_builder.AddNaryExtendedInstruction(
  67. inst->type_id(), glsl405_ext_inst_id, opcode, {op1, op2});
  68. Instruction::OperandList new_operands;
  69. new_operands.push_back({SPV_OPERAND_TYPE_ID, {glsl405_ext_inst_id}});
  70. new_operands.push_back({SPV_OPERAND_TYPE_EXTENSION_INSTRUCTION_NUMBER,
  71. {static_cast<uint32_t>(opcode)}});
  72. new_operands.push_back({SPV_OPERAND_TYPE_ID, {temp->result_id()}});
  73. new_operands.push_back({SPV_OPERAND_TYPE_ID, {op3}});
  74. inst->SetInOperands(std::move(new_operands));
  75. ctx->UpdateDefUse(inst);
  76. return true;
  77. }
  78. // Returns a folding rule that replaces |mid(a,b,c)| by |clamp(a, min(b,c),
  79. // max(b,c)|. The three parameters are the opcode that correspond to the min,
  80. // max, and clamp operations for the type of the instruction being replaced.
  81. template <GLSLstd450 min_opcode, GLSLstd450 max_opcode, GLSLstd450 clamp_opcode>
  82. bool ReplaceTrinaryMid(IRContext* ctx, Instruction* inst,
  83. const std::vector<const analysis::Constant*>&) {
  84. uint32_t glsl405_ext_inst_id =
  85. ctx->get_feature_mgr()->GetExtInstImportId_GLSLstd450();
  86. if (glsl405_ext_inst_id == 0) {
  87. ctx->AddExtInstImport("GLSL.std.450");
  88. glsl405_ext_inst_id =
  89. ctx->get_feature_mgr()->GetExtInstImportId_GLSLstd450();
  90. }
  91. InstructionBuilder ir_builder(
  92. ctx, inst,
  93. IRContext::kAnalysisDefUse | IRContext::kAnalysisInstrToBlockMapping);
  94. uint32_t op1 = inst->GetSingleWordInOperand(2);
  95. uint32_t op2 = inst->GetSingleWordInOperand(3);
  96. uint32_t op3 = inst->GetSingleWordInOperand(4);
  97. Instruction* min = ir_builder.AddNaryExtendedInstruction(
  98. inst->type_id(), glsl405_ext_inst_id, static_cast<uint32_t>(min_opcode),
  99. {op2, op3});
  100. Instruction* max = ir_builder.AddNaryExtendedInstruction(
  101. inst->type_id(), glsl405_ext_inst_id, static_cast<uint32_t>(max_opcode),
  102. {op2, op3});
  103. Instruction::OperandList new_operands;
  104. new_operands.push_back({SPV_OPERAND_TYPE_ID, {glsl405_ext_inst_id}});
  105. new_operands.push_back({SPV_OPERAND_TYPE_EXTENSION_INSTRUCTION_NUMBER,
  106. {static_cast<uint32_t>(clamp_opcode)}});
  107. new_operands.push_back({SPV_OPERAND_TYPE_ID, {op1}});
  108. new_operands.push_back({SPV_OPERAND_TYPE_ID, {min->result_id()}});
  109. new_operands.push_back({SPV_OPERAND_TYPE_ID, {max->result_id()}});
  110. inst->SetInOperands(std::move(new_operands));
  111. ctx->UpdateDefUse(inst);
  112. return true;
  113. }
  114. // Returns a folding rule that will replace the opcode with |opcode| and add
  115. // the capabilities required. The folding rule assumes it is folding an
  116. // OpGroup*NonUniformAMD instruction from the SPV_AMD_shader_ballot extension.
  117. template <SpvOp new_opcode>
  118. bool ReplaceGroupNonuniformOperationOpCode(
  119. IRContext* ctx, Instruction* inst,
  120. const std::vector<const analysis::Constant*>&) {
  121. switch (new_opcode) {
  122. case SpvOpGroupNonUniformIAdd:
  123. case SpvOpGroupNonUniformFAdd:
  124. case SpvOpGroupNonUniformUMin:
  125. case SpvOpGroupNonUniformSMin:
  126. case SpvOpGroupNonUniformFMin:
  127. case SpvOpGroupNonUniformUMax:
  128. case SpvOpGroupNonUniformSMax:
  129. case SpvOpGroupNonUniformFMax:
  130. break;
  131. default:
  132. assert(
  133. false &&
  134. "Should be replacing with a group non uniform arithmetic operation.");
  135. }
  136. switch (inst->opcode()) {
  137. case SpvOpGroupIAddNonUniformAMD:
  138. case SpvOpGroupFAddNonUniformAMD:
  139. case SpvOpGroupUMinNonUniformAMD:
  140. case SpvOpGroupSMinNonUniformAMD:
  141. case SpvOpGroupFMinNonUniformAMD:
  142. case SpvOpGroupUMaxNonUniformAMD:
  143. case SpvOpGroupSMaxNonUniformAMD:
  144. case SpvOpGroupFMaxNonUniformAMD:
  145. break;
  146. default:
  147. assert(false &&
  148. "Should be replacing a group non uniform arithmetic operation.");
  149. }
  150. ctx->AddCapability(SpvCapabilityGroupNonUniformArithmetic);
  151. inst->SetOpcode(new_opcode);
  152. return true;
  153. }
  154. // Returns a folding rule that will replace the SwizzleInvocationsAMD extended
  155. // instruction in the SPV_AMD_shader_ballot extension.
  156. //
  157. // The instruction
  158. //
  159. // %offset = OpConstantComposite %v3uint %x %y %z %w
  160. // %result = OpExtInst %type %1 SwizzleInvocationsAMD %data %offset
  161. //
  162. // is replaced with
  163. //
  164. // potentially new constants and types
  165. //
  166. // clang-format off
  167. // %uint_max = OpConstant %uint 0xFFFFFFFF
  168. // %v4uint = OpTypeVector %uint 4
  169. // %ballot_value = OpConstantComposite %v4uint %uint_max %uint_max %uint_max %uint_max
  170. // %null = OpConstantNull %type
  171. // clang-format on
  172. //
  173. // and the following code in the function body
  174. //
  175. // clang-format off
  176. // %id = OpLoad %uint %SubgroupLocalInvocationId
  177. // %quad_idx = OpBitwiseAnd %uint %id %uint_3
  178. // %quad_ldr = OpBitwiseXor %uint %id %quad_idx
  179. // %my_offset = OpVectorExtractDynamic %uint %offset %quad_idx
  180. // %target_inv = OpIAdd %uint %quad_ldr %my_offset
  181. // %is_active = OpGroupNonUniformBallotBitExtract %bool %uint_3 %ballot_value %target_inv
  182. // %shuffle = OpGroupNonUniformShuffle %type %uint_3 %data %target_inv
  183. // %result = OpSelect %type %is_active %shuffle %null
  184. // clang-format on
  185. //
  186. // Also adding the capabilities and builtins that are needed.
  187. bool ReplaceSwizzleInvocations(IRContext* ctx, Instruction* inst,
  188. const std::vector<const analysis::Constant*>&) {
  189. analysis::TypeManager* type_mgr = ctx->get_type_mgr();
  190. analysis::ConstantManager* const_mgr = ctx->get_constant_mgr();
  191. ctx->AddExtension("SPV_KHR_shader_ballot");
  192. ctx->AddCapability(SpvCapabilityGroupNonUniformBallot);
  193. ctx->AddCapability(SpvCapabilityGroupNonUniformShuffle);
  194. InstructionBuilder ir_builder(
  195. ctx, inst,
  196. IRContext::kAnalysisDefUse | IRContext::kAnalysisInstrToBlockMapping);
  197. uint32_t data_id = inst->GetSingleWordInOperand(2);
  198. uint32_t offset_id = inst->GetSingleWordInOperand(3);
  199. // Get the subgroup invocation id.
  200. uint32_t var_id =
  201. ctx->GetBuiltinInputVarId(SpvBuiltInSubgroupLocalInvocationId);
  202. assert(var_id != 0 && "Could not get SubgroupLocalInvocationId variable.");
  203. Instruction* var_inst = ctx->get_def_use_mgr()->GetDef(var_id);
  204. Instruction* var_ptr_type =
  205. ctx->get_def_use_mgr()->GetDef(var_inst->type_id());
  206. uint32_t uint_type_id = var_ptr_type->GetSingleWordInOperand(1);
  207. Instruction* id = ir_builder.AddLoad(uint_type_id, var_id);
  208. uint32_t quad_mask = ir_builder.GetUintConstantId(3);
  209. // This gives the offset in the group of 4 of this invocation.
  210. Instruction* quad_idx = ir_builder.AddBinaryOp(uint_type_id, SpvOpBitwiseAnd,
  211. id->result_id(), quad_mask);
  212. // Get the invocation id of the first invocation in the group of 4.
  213. Instruction* quad_ldr = ir_builder.AddBinaryOp(
  214. uint_type_id, SpvOpBitwiseXor, id->result_id(), quad_idx->result_id());
  215. // Get the offset of the target invocation from the offset vector.
  216. Instruction* my_offset =
  217. ir_builder.AddBinaryOp(uint_type_id, SpvOpVectorExtractDynamic, offset_id,
  218. quad_idx->result_id());
  219. // Determine the index of the invocation to read from.
  220. Instruction* target_inv = ir_builder.AddBinaryOp(
  221. uint_type_id, SpvOpIAdd, quad_ldr->result_id(), my_offset->result_id());
  222. // Do the group operations
  223. uint32_t uint_max_id = ir_builder.GetUintConstantId(0xFFFFFFFF);
  224. uint32_t subgroup_scope = ir_builder.GetUintConstantId(SpvScopeSubgroup);
  225. const auto* ballot_value_const = const_mgr->GetConstant(
  226. type_mgr->GetUIntVectorType(4),
  227. {uint_max_id, uint_max_id, uint_max_id, uint_max_id});
  228. Instruction* ballot_value =
  229. const_mgr->GetDefiningInstruction(ballot_value_const);
  230. Instruction* is_active = ir_builder.AddNaryOp(
  231. type_mgr->GetBoolTypeId(), SpvOpGroupNonUniformBallotBitExtract,
  232. {subgroup_scope, ballot_value->result_id(), target_inv->result_id()});
  233. Instruction* shuffle =
  234. ir_builder.AddNaryOp(inst->type_id(), SpvOpGroupNonUniformShuffle,
  235. {subgroup_scope, data_id, target_inv->result_id()});
  236. // Create the null constant to use in the select.
  237. const auto* null = const_mgr->GetConstant(type_mgr->GetType(inst->type_id()),
  238. std::vector<uint32_t>());
  239. Instruction* null_inst = const_mgr->GetDefiningInstruction(null);
  240. // Build the select.
  241. inst->SetOpcode(SpvOpSelect);
  242. Instruction::OperandList new_operands;
  243. new_operands.push_back({SPV_OPERAND_TYPE_ID, {is_active->result_id()}});
  244. new_operands.push_back({SPV_OPERAND_TYPE_ID, {shuffle->result_id()}});
  245. new_operands.push_back({SPV_OPERAND_TYPE_ID, {null_inst->result_id()}});
  246. inst->SetInOperands(std::move(new_operands));
  247. ctx->UpdateDefUse(inst);
  248. return true;
  249. }
  250. // Returns a folding rule that will replace the SwizzleInvocationsMaskedAMD
  251. // extended instruction in the SPV_AMD_shader_ballot extension.
  252. //
  253. // The instruction
  254. //
  255. // %mask = OpConstantComposite %v3uint %uint_x %uint_y %uint_z
  256. // %result = OpExtInst %uint %1 SwizzleInvocationsMaskedAMD %data %mask
  257. //
  258. // is replaced with
  259. //
  260. // potentially new constants and types
  261. //
  262. // clang-format off
  263. // %uint_mask_extend = OpConstant %uint 0xFFFFFFE0
  264. // %uint_max = OpConstant %uint 0xFFFFFFFF
  265. // %v4uint = OpTypeVector %uint 4
  266. // %ballot_value = OpConstantComposite %v4uint %uint_max %uint_max %uint_max %uint_max
  267. // clang-format on
  268. //
  269. // and the following code in the function body
  270. //
  271. // clang-format off
  272. // %id = OpLoad %uint %SubgroupLocalInvocationId
  273. // %and_mask = OpBitwiseOr %uint %uint_x %uint_mask_extend
  274. // %and = OpBitwiseAnd %uint %id %and_mask
  275. // %or = OpBitwiseOr %uint %and %uint_y
  276. // %target_inv = OpBitwiseXor %uint %or %uint_z
  277. // %is_active = OpGroupNonUniformBallotBitExtract %bool %uint_3 %ballot_value %target_inv
  278. // %shuffle = OpGroupNonUniformShuffle %type %uint_3 %data %target_inv
  279. // %result = OpSelect %type %is_active %shuffle %uint_0
  280. // clang-format on
  281. //
  282. // Also adding the capabilities and builtins that are needed.
  283. bool ReplaceSwizzleInvocationsMasked(
  284. IRContext* ctx, Instruction* inst,
  285. const std::vector<const analysis::Constant*>&) {
  286. analysis::TypeManager* type_mgr = ctx->get_type_mgr();
  287. analysis::DefUseManager* def_use_mgr = ctx->get_def_use_mgr();
  288. analysis::ConstantManager* const_mgr = ctx->get_constant_mgr();
  289. ctx->AddCapability(SpvCapabilityGroupNonUniformBallot);
  290. ctx->AddCapability(SpvCapabilityGroupNonUniformShuffle);
  291. InstructionBuilder ir_builder(
  292. ctx, inst,
  293. IRContext::kAnalysisDefUse | IRContext::kAnalysisInstrToBlockMapping);
  294. // Get the operands to inst, and the components of the mask
  295. uint32_t data_id = inst->GetSingleWordInOperand(2);
  296. Instruction* mask_inst = def_use_mgr->GetDef(inst->GetSingleWordInOperand(3));
  297. assert(mask_inst->opcode() == SpvOpConstantComposite &&
  298. "The mask is suppose to be a vector constant.");
  299. assert(mask_inst->NumInOperands() == 3 &&
  300. "The mask is suppose to have 3 components.");
  301. uint32_t uint_x = mask_inst->GetSingleWordInOperand(0);
  302. uint32_t uint_y = mask_inst->GetSingleWordInOperand(1);
  303. uint32_t uint_z = mask_inst->GetSingleWordInOperand(2);
  304. // Get the subgroup invocation id.
  305. uint32_t var_id =
  306. ctx->GetBuiltinInputVarId(SpvBuiltInSubgroupLocalInvocationId);
  307. ctx->AddExtension("SPV_KHR_shader_ballot");
  308. assert(var_id != 0 && "Could not get SubgroupLocalInvocationId variable.");
  309. Instruction* var_inst = ctx->get_def_use_mgr()->GetDef(var_id);
  310. Instruction* var_ptr_type =
  311. ctx->get_def_use_mgr()->GetDef(var_inst->type_id());
  312. uint32_t uint_type_id = var_ptr_type->GetSingleWordInOperand(1);
  313. Instruction* id = ir_builder.AddLoad(uint_type_id, var_id);
  314. // Do the bitwise operations.
  315. uint32_t mask_extended = ir_builder.GetUintConstantId(0xFFFFFFE0);
  316. Instruction* and_mask = ir_builder.AddBinaryOp(uint_type_id, SpvOpBitwiseOr,
  317. uint_x, mask_extended);
  318. Instruction* and_result = ir_builder.AddBinaryOp(
  319. uint_type_id, SpvOpBitwiseAnd, id->result_id(), and_mask->result_id());
  320. Instruction* or_result = ir_builder.AddBinaryOp(
  321. uint_type_id, SpvOpBitwiseOr, and_result->result_id(), uint_y);
  322. Instruction* target_inv = ir_builder.AddBinaryOp(
  323. uint_type_id, SpvOpBitwiseXor, or_result->result_id(), uint_z);
  324. // Do the group operations
  325. uint32_t uint_max_id = ir_builder.GetUintConstantId(0xFFFFFFFF);
  326. uint32_t subgroup_scope = ir_builder.GetUintConstantId(SpvScopeSubgroup);
  327. const auto* ballot_value_const = const_mgr->GetConstant(
  328. type_mgr->GetUIntVectorType(4),
  329. {uint_max_id, uint_max_id, uint_max_id, uint_max_id});
  330. Instruction* ballot_value =
  331. const_mgr->GetDefiningInstruction(ballot_value_const);
  332. Instruction* is_active = ir_builder.AddNaryOp(
  333. type_mgr->GetBoolTypeId(), SpvOpGroupNonUniformBallotBitExtract,
  334. {subgroup_scope, ballot_value->result_id(), target_inv->result_id()});
  335. Instruction* shuffle =
  336. ir_builder.AddNaryOp(inst->type_id(), SpvOpGroupNonUniformShuffle,
  337. {subgroup_scope, data_id, target_inv->result_id()});
  338. // Create the null constant to use in the select.
  339. const auto* null = const_mgr->GetConstant(type_mgr->GetType(inst->type_id()),
  340. std::vector<uint32_t>());
  341. Instruction* null_inst = const_mgr->GetDefiningInstruction(null);
  342. // Build the select.
  343. inst->SetOpcode(SpvOpSelect);
  344. Instruction::OperandList new_operands;
  345. new_operands.push_back({SPV_OPERAND_TYPE_ID, {is_active->result_id()}});
  346. new_operands.push_back({SPV_OPERAND_TYPE_ID, {shuffle->result_id()}});
  347. new_operands.push_back({SPV_OPERAND_TYPE_ID, {null_inst->result_id()}});
  348. inst->SetInOperands(std::move(new_operands));
  349. ctx->UpdateDefUse(inst);
  350. return true;
  351. }
  352. // Returns a folding rule that will replace the WriteInvocationAMD extended
  353. // instruction in the SPV_AMD_shader_ballot extension.
  354. //
  355. // The instruction
  356. //
  357. // clang-format off
  358. // %result = OpExtInst %type %1 WriteInvocationAMD %input_value %write_value %invocation_index
  359. // clang-format on
  360. //
  361. // with
  362. //
  363. // %id = OpLoad %uint %SubgroupLocalInvocationId
  364. // %cmp = OpIEqual %bool %id %invocation_index
  365. // %result = OpSelect %type %cmp %write_value %input_value
  366. //
  367. // Also adding the capabilities and builtins that are needed.
  368. bool ReplaceWriteInvocation(IRContext* ctx, Instruction* inst,
  369. const std::vector<const analysis::Constant*>&) {
  370. uint32_t var_id =
  371. ctx->GetBuiltinInputVarId(SpvBuiltInSubgroupLocalInvocationId);
  372. ctx->AddCapability(SpvCapabilitySubgroupBallotKHR);
  373. ctx->AddExtension("SPV_KHR_shader_ballot");
  374. assert(var_id != 0 && "Could not get SubgroupLocalInvocationId variable.");
  375. Instruction* var_inst = ctx->get_def_use_mgr()->GetDef(var_id);
  376. Instruction* var_ptr_type =
  377. ctx->get_def_use_mgr()->GetDef(var_inst->type_id());
  378. InstructionBuilder ir_builder(
  379. ctx, inst,
  380. IRContext::kAnalysisDefUse | IRContext::kAnalysisInstrToBlockMapping);
  381. Instruction* t =
  382. ir_builder.AddLoad(var_ptr_type->GetSingleWordInOperand(1), var_id);
  383. analysis::Bool bool_type;
  384. uint32_t bool_type_id = ctx->get_type_mgr()->GetTypeInstruction(&bool_type);
  385. Instruction* cmp =
  386. ir_builder.AddBinaryOp(bool_type_id, SpvOpIEqual, t->result_id(),
  387. inst->GetSingleWordInOperand(4));
  388. // Build a select.
  389. inst->SetOpcode(SpvOpSelect);
  390. Instruction::OperandList new_operands;
  391. new_operands.push_back({SPV_OPERAND_TYPE_ID, {cmp->result_id()}});
  392. new_operands.push_back(inst->GetInOperand(3));
  393. new_operands.push_back(inst->GetInOperand(2));
  394. inst->SetInOperands(std::move(new_operands));
  395. ctx->UpdateDefUse(inst);
  396. return true;
  397. }
  398. // Returns a folding rule that will replace the MbcntAMD extended instruction in
  399. // the SPV_AMD_shader_ballot extension.
  400. //
  401. // The instruction
  402. //
  403. // %result = OpExtInst %uint %1 MbcntAMD %mask
  404. //
  405. // with
  406. //
  407. // Get SubgroupLtMask and convert the first 64-bits into a uint64_t because
  408. // AMD's shader compiler expects a 64-bit integer mask.
  409. //
  410. // %var = OpLoad %v4uint %SubgroupLtMaskKHR
  411. // %shuffle = OpVectorShuffle %v2uint %var %var 0 1
  412. // %cast = OpBitcast %ulong %shuffle
  413. //
  414. // Perform the mask and count the bits.
  415. //
  416. // %and = OpBitwiseAnd %ulong %cast %mask
  417. // %result = OpBitCount %uint %and
  418. //
  419. // Also adding the capabilities and builtins that are needed.
  420. bool ReplaceMbcnt(IRContext* context, Instruction* inst,
  421. const std::vector<const analysis::Constant*>&) {
  422. analysis::TypeManager* type_mgr = context->get_type_mgr();
  423. analysis::DefUseManager* def_use_mgr = context->get_def_use_mgr();
  424. uint32_t var_id = context->GetBuiltinInputVarId(SpvBuiltInSubgroupLtMask);
  425. assert(var_id != 0 && "Could not get SubgroupLtMask variable.");
  426. context->AddCapability(SpvCapabilityGroupNonUniformBallot);
  427. Instruction* var_inst = def_use_mgr->GetDef(var_id);
  428. Instruction* var_ptr_type = def_use_mgr->GetDef(var_inst->type_id());
  429. Instruction* var_type =
  430. def_use_mgr->GetDef(var_ptr_type->GetSingleWordInOperand(1));
  431. assert(var_type->opcode() == SpvOpTypeVector &&
  432. "Variable is suppose to be a vector of 4 ints");
  433. // Get the type for the shuffle.
  434. analysis::Vector temp_type(GetUIntType(context), 2);
  435. const analysis::Type* shuffle_type =
  436. context->get_type_mgr()->GetRegisteredType(&temp_type);
  437. uint32_t shuffle_type_id = type_mgr->GetTypeInstruction(shuffle_type);
  438. uint32_t mask_id = inst->GetSingleWordInOperand(2);
  439. Instruction* mask_inst = def_use_mgr->GetDef(mask_id);
  440. // Testing with amd's shader compiler shows that a 64-bit mask is expected.
  441. assert(type_mgr->GetType(mask_inst->type_id())->AsInteger() != nullptr);
  442. assert(type_mgr->GetType(mask_inst->type_id())->AsInteger()->width() == 64);
  443. InstructionBuilder ir_builder(
  444. context, inst,
  445. IRContext::kAnalysisDefUse | IRContext::kAnalysisInstrToBlockMapping);
  446. Instruction* load = ir_builder.AddLoad(var_type->result_id(), var_id);
  447. Instruction* shuffle = ir_builder.AddVectorShuffle(
  448. shuffle_type_id, load->result_id(), load->result_id(), {0, 1});
  449. Instruction* bitcast = ir_builder.AddUnaryOp(
  450. mask_inst->type_id(), SpvOpBitcast, shuffle->result_id());
  451. Instruction* t = ir_builder.AddBinaryOp(mask_inst->type_id(), SpvOpBitwiseAnd,
  452. bitcast->result_id(), mask_id);
  453. inst->SetOpcode(SpvOpBitCount);
  454. inst->SetInOperands({{SPV_OPERAND_TYPE_ID, {t->result_id()}}});
  455. context->UpdateDefUse(inst);
  456. return true;
  457. }
  458. // A folding rule that will replace the CubeFaceCoordAMD extended
  459. // instruction in the SPV_AMD_gcn_shader_ballot. Returns true if the folding is
  460. // successful.
  461. //
  462. // The instruction
  463. //
  464. // %result = OpExtInst %v2float %1 CubeFaceCoordAMD %input
  465. //
  466. // with
  467. //
  468. // %x = OpCompositeExtract %float %input 0
  469. // %y = OpCompositeExtract %float %input 1
  470. // %z = OpCompositeExtract %float %input 2
  471. // %nx = OpFNegate %float %x
  472. // %ny = OpFNegate %float %y
  473. // %nz = OpFNegate %float %z
  474. // %ax = OpExtInst %float %n_1 FAbs %x
  475. // %ay = OpExtInst %float %n_1 FAbs %y
  476. // %az = OpExtInst %float %n_1 FAbs %z
  477. // %amax_x_y = OpExtInst %float %n_1 FMax %ay %ax
  478. // %amax = OpExtInst %float %n_1 FMax %az %amax_x_y
  479. // %cubema = OpFMul %float %float_2 %amax
  480. // %is_z_max = OpFOrdGreaterThanEqual %bool %az %amax_x_y
  481. // %not_is_z_max = OpLogicalNot %bool %is_z_max
  482. // %y_gt_x = OpFOrdGreaterThanEqual %bool %ay %ax
  483. // %is_y_max = OpLogicalAnd %bool %not_is_z_max %y_gt_x
  484. // %is_z_neg = OpFOrdLessThan %bool %z %float_0
  485. // %cubesc_case_1 = OpSelect %float %is_z_neg %nx %x
  486. // %is_x_neg = OpFOrdLessThan %bool %x %float_0
  487. // %cubesc_case_2 = OpSelect %float %is_x_neg %z %nz
  488. // %sel = OpSelect %float %is_y_max %x %cubesc_case_2
  489. // %cubesc = OpSelect %float %is_z_max %cubesc_case_1 %sel
  490. // %is_y_neg = OpFOrdLessThan %bool %y %float_0
  491. // %cubetc_case_1 = OpSelect %float %is_y_neg %nz %z
  492. // %cubetc = OpSelect %float %is_y_max %cubetc_case_1 %ny
  493. // %cube = OpCompositeConstruct %v2float %cubesc %cubetc
  494. // %denom = OpCompositeConstruct %v2float %cubema %cubema
  495. // %div = OpFDiv %v2float %cube %denom
  496. // %result = OpFAdd %v2float %div %const
  497. //
  498. // Also adding the capabilities and builtins that are needed.
  499. bool ReplaceCubeFaceCoord(IRContext* ctx, Instruction* inst,
  500. const std::vector<const analysis::Constant*>&) {
  501. analysis::TypeManager* type_mgr = ctx->get_type_mgr();
  502. analysis::ConstantManager* const_mgr = ctx->get_constant_mgr();
  503. uint32_t float_type_id = type_mgr->GetFloatTypeId();
  504. const analysis::Type* v2_float_type = type_mgr->GetFloatVectorType(2);
  505. uint32_t v2_float_type_id = type_mgr->GetId(v2_float_type);
  506. uint32_t bool_id = type_mgr->GetBoolTypeId();
  507. InstructionBuilder ir_builder(
  508. ctx, inst,
  509. IRContext::kAnalysisDefUse | IRContext::kAnalysisInstrToBlockMapping);
  510. uint32_t input_id = inst->GetSingleWordInOperand(2);
  511. uint32_t glsl405_ext_inst_id =
  512. ctx->get_feature_mgr()->GetExtInstImportId_GLSLstd450();
  513. if (glsl405_ext_inst_id == 0) {
  514. ctx->AddExtInstImport("GLSL.std.450");
  515. glsl405_ext_inst_id =
  516. ctx->get_feature_mgr()->GetExtInstImportId_GLSLstd450();
  517. }
  518. // Get the constants that will be used.
  519. uint32_t f0_const_id = const_mgr->GetFloatConst(0.0);
  520. uint32_t f2_const_id = const_mgr->GetFloatConst(2.0);
  521. uint32_t f0_5_const_id = const_mgr->GetFloatConst(0.5);
  522. const analysis::Constant* vec_const =
  523. const_mgr->GetConstant(v2_float_type, {f0_5_const_id, f0_5_const_id});
  524. uint32_t vec_const_id =
  525. const_mgr->GetDefiningInstruction(vec_const)->result_id();
  526. // Extract the input values.
  527. Instruction* x = ir_builder.AddCompositeExtract(float_type_id, input_id, {0});
  528. Instruction* y = ir_builder.AddCompositeExtract(float_type_id, input_id, {1});
  529. Instruction* z = ir_builder.AddCompositeExtract(float_type_id, input_id, {2});
  530. // Negate the input values.
  531. Instruction* nx =
  532. ir_builder.AddUnaryOp(float_type_id, SpvOpFNegate, x->result_id());
  533. Instruction* ny =
  534. ir_builder.AddUnaryOp(float_type_id, SpvOpFNegate, y->result_id());
  535. Instruction* nz =
  536. ir_builder.AddUnaryOp(float_type_id, SpvOpFNegate, z->result_id());
  537. // Get the abolsute values of the inputs.
  538. Instruction* ax = ir_builder.AddNaryExtendedInstruction(
  539. float_type_id, glsl405_ext_inst_id, GLSLstd450FAbs, {x->result_id()});
  540. Instruction* ay = ir_builder.AddNaryExtendedInstruction(
  541. float_type_id, glsl405_ext_inst_id, GLSLstd450FAbs, {y->result_id()});
  542. Instruction* az = ir_builder.AddNaryExtendedInstruction(
  543. float_type_id, glsl405_ext_inst_id, GLSLstd450FAbs, {z->result_id()});
  544. // Find which values are negative. Used in later computations.
  545. Instruction* is_z_neg = ir_builder.AddBinaryOp(bool_id, SpvOpFOrdLessThan,
  546. z->result_id(), f0_const_id);
  547. Instruction* is_y_neg = ir_builder.AddBinaryOp(bool_id, SpvOpFOrdLessThan,
  548. y->result_id(), f0_const_id);
  549. Instruction* is_x_neg = ir_builder.AddBinaryOp(bool_id, SpvOpFOrdLessThan,
  550. x->result_id(), f0_const_id);
  551. // Compute cubema
  552. Instruction* amax_x_y = ir_builder.AddNaryExtendedInstruction(
  553. float_type_id, glsl405_ext_inst_id, GLSLstd450FMax,
  554. {ax->result_id(), ay->result_id()});
  555. Instruction* amax = ir_builder.AddNaryExtendedInstruction(
  556. float_type_id, glsl405_ext_inst_id, GLSLstd450FMax,
  557. {az->result_id(), amax_x_y->result_id()});
  558. Instruction* cubema = ir_builder.AddBinaryOp(float_type_id, SpvOpFMul,
  559. f2_const_id, amax->result_id());
  560. // Do the comparisons needed for computing cubesc and cubetc.
  561. Instruction* is_z_max =
  562. ir_builder.AddBinaryOp(bool_id, SpvOpFOrdGreaterThanEqual,
  563. az->result_id(), amax_x_y->result_id());
  564. Instruction* not_is_z_max =
  565. ir_builder.AddUnaryOp(bool_id, SpvOpLogicalNot, is_z_max->result_id());
  566. Instruction* y_gr_x = ir_builder.AddBinaryOp(
  567. bool_id, SpvOpFOrdGreaterThanEqual, ay->result_id(), ax->result_id());
  568. Instruction* is_y_max = ir_builder.AddBinaryOp(
  569. bool_id, SpvOpLogicalAnd, not_is_z_max->result_id(), y_gr_x->result_id());
  570. // Select the correct value for cubesc.
  571. Instruction* cubesc_case_1 = ir_builder.AddSelect(
  572. float_type_id, is_z_neg->result_id(), nx->result_id(), x->result_id());
  573. Instruction* cubesc_case_2 = ir_builder.AddSelect(
  574. float_type_id, is_x_neg->result_id(), z->result_id(), nz->result_id());
  575. Instruction* sel =
  576. ir_builder.AddSelect(float_type_id, is_y_max->result_id(), x->result_id(),
  577. cubesc_case_2->result_id());
  578. Instruction* cubesc =
  579. ir_builder.AddSelect(float_type_id, is_z_max->result_id(),
  580. cubesc_case_1->result_id(), sel->result_id());
  581. // Select the correct value for cubetc.
  582. Instruction* cubetc_case_1 = ir_builder.AddSelect(
  583. float_type_id, is_y_neg->result_id(), nz->result_id(), z->result_id());
  584. Instruction* cubetc =
  585. ir_builder.AddSelect(float_type_id, is_y_max->result_id(),
  586. cubetc_case_1->result_id(), ny->result_id());
  587. // Do the division
  588. Instruction* cube = ir_builder.AddCompositeConstruct(
  589. v2_float_type_id, {cubesc->result_id(), cubetc->result_id()});
  590. Instruction* denom = ir_builder.AddCompositeConstruct(
  591. v2_float_type_id, {cubema->result_id(), cubema->result_id()});
  592. Instruction* div = ir_builder.AddBinaryOp(
  593. v2_float_type_id, SpvOpFDiv, cube->result_id(), denom->result_id());
  594. // Get the final result by adding 0.5 to |div|.
  595. inst->SetOpcode(SpvOpFAdd);
  596. Instruction::OperandList new_operands;
  597. new_operands.push_back({SPV_OPERAND_TYPE_ID, {div->result_id()}});
  598. new_operands.push_back({SPV_OPERAND_TYPE_ID, {vec_const_id}});
  599. inst->SetInOperands(std::move(new_operands));
  600. ctx->UpdateDefUse(inst);
  601. return true;
  602. }
  603. // A folding rule that will replace the CubeFaceIndexAMD extended
  604. // instruction in the SPV_AMD_gcn_shader_ballot. Returns true if the folding
  605. // is successful.
  606. //
  607. // The instruction
  608. //
  609. // %result = OpExtInst %float %1 CubeFaceIndexAMD %input
  610. //
  611. // with
  612. //
  613. // %x = OpCompositeExtract %float %input 0
  614. // %y = OpCompositeExtract %float %input 1
  615. // %z = OpCompositeExtract %float %input 2
  616. // %ax = OpExtInst %float %n_1 FAbs %x
  617. // %ay = OpExtInst %float %n_1 FAbs %y
  618. // %az = OpExtInst %float %n_1 FAbs %z
  619. // %is_z_neg = OpFOrdLessThan %bool %z %float_0
  620. // %is_y_neg = OpFOrdLessThan %bool %y %float_0
  621. // %is_x_neg = OpFOrdLessThan %bool %x %float_0
  622. // %amax_x_y = OpExtInst %float %n_1 FMax %ax %ay
  623. // %is_z_max = OpFOrdGreaterThanEqual %bool %az %amax_x_y
  624. // %y_gt_x = OpFOrdGreaterThanEqual %bool %ay %ax
  625. // %case_z = OpSelect %float %is_z_neg %float_5 %float4
  626. // %case_y = OpSelect %float %is_y_neg %float_3 %float2
  627. // %case_x = OpSelect %float %is_x_neg %float_1 %float0
  628. // %sel = OpSelect %float %y_gt_x %case_y %case_x
  629. // %result = OpSelect %float %is_z_max %case_z %sel
  630. //
  631. // Also adding the capabilities and builtins that are needed.
  632. bool ReplaceCubeFaceIndex(IRContext* ctx, Instruction* inst,
  633. const std::vector<const analysis::Constant*>&) {
  634. analysis::TypeManager* type_mgr = ctx->get_type_mgr();
  635. analysis::ConstantManager* const_mgr = ctx->get_constant_mgr();
  636. uint32_t float_type_id = type_mgr->GetFloatTypeId();
  637. uint32_t bool_id = type_mgr->GetBoolTypeId();
  638. InstructionBuilder ir_builder(
  639. ctx, inst,
  640. IRContext::kAnalysisDefUse | IRContext::kAnalysisInstrToBlockMapping);
  641. uint32_t input_id = inst->GetSingleWordInOperand(2);
  642. uint32_t glsl405_ext_inst_id =
  643. ctx->get_feature_mgr()->GetExtInstImportId_GLSLstd450();
  644. if (glsl405_ext_inst_id == 0) {
  645. ctx->AddExtInstImport("GLSL.std.450");
  646. glsl405_ext_inst_id =
  647. ctx->get_feature_mgr()->GetExtInstImportId_GLSLstd450();
  648. }
  649. // Get the constants that will be used.
  650. uint32_t f0_const_id = const_mgr->GetFloatConst(0.0);
  651. uint32_t f1_const_id = const_mgr->GetFloatConst(1.0);
  652. uint32_t f2_const_id = const_mgr->GetFloatConst(2.0);
  653. uint32_t f3_const_id = const_mgr->GetFloatConst(3.0);
  654. uint32_t f4_const_id = const_mgr->GetFloatConst(4.0);
  655. uint32_t f5_const_id = const_mgr->GetFloatConst(5.0);
  656. // Extract the input values.
  657. Instruction* x = ir_builder.AddCompositeExtract(float_type_id, input_id, {0});
  658. Instruction* y = ir_builder.AddCompositeExtract(float_type_id, input_id, {1});
  659. Instruction* z = ir_builder.AddCompositeExtract(float_type_id, input_id, {2});
  660. // Get the absolute values of the inputs.
  661. Instruction* ax = ir_builder.AddNaryExtendedInstruction(
  662. float_type_id, glsl405_ext_inst_id, GLSLstd450FAbs, {x->result_id()});
  663. Instruction* ay = ir_builder.AddNaryExtendedInstruction(
  664. float_type_id, glsl405_ext_inst_id, GLSLstd450FAbs, {y->result_id()});
  665. Instruction* az = ir_builder.AddNaryExtendedInstruction(
  666. float_type_id, glsl405_ext_inst_id, GLSLstd450FAbs, {z->result_id()});
  667. // Find which values are negative. Used in later computations.
  668. Instruction* is_z_neg = ir_builder.AddBinaryOp(bool_id, SpvOpFOrdLessThan,
  669. z->result_id(), f0_const_id);
  670. Instruction* is_y_neg = ir_builder.AddBinaryOp(bool_id, SpvOpFOrdLessThan,
  671. y->result_id(), f0_const_id);
  672. Instruction* is_x_neg = ir_builder.AddBinaryOp(bool_id, SpvOpFOrdLessThan,
  673. x->result_id(), f0_const_id);
  674. // Find the max value.
  675. Instruction* amax_x_y = ir_builder.AddNaryExtendedInstruction(
  676. float_type_id, glsl405_ext_inst_id, GLSLstd450FMax,
  677. {ax->result_id(), ay->result_id()});
  678. Instruction* is_z_max =
  679. ir_builder.AddBinaryOp(bool_id, SpvOpFOrdGreaterThanEqual,
  680. az->result_id(), amax_x_y->result_id());
  681. Instruction* y_gr_x = ir_builder.AddBinaryOp(
  682. bool_id, SpvOpFOrdGreaterThanEqual, ay->result_id(), ax->result_id());
  683. // Get the value for each case.
  684. Instruction* case_z = ir_builder.AddSelect(
  685. float_type_id, is_z_neg->result_id(), f5_const_id, f4_const_id);
  686. Instruction* case_y = ir_builder.AddSelect(
  687. float_type_id, is_y_neg->result_id(), f3_const_id, f2_const_id);
  688. Instruction* case_x = ir_builder.AddSelect(
  689. float_type_id, is_x_neg->result_id(), f1_const_id, f0_const_id);
  690. // Select the correct case.
  691. Instruction* sel =
  692. ir_builder.AddSelect(float_type_id, y_gr_x->result_id(),
  693. case_y->result_id(), case_x->result_id());
  694. // Get the final result by adding 0.5 to |div|.
  695. inst->SetOpcode(SpvOpSelect);
  696. Instruction::OperandList new_operands;
  697. new_operands.push_back({SPV_OPERAND_TYPE_ID, {is_z_max->result_id()}});
  698. new_operands.push_back({SPV_OPERAND_TYPE_ID, {case_z->result_id()}});
  699. new_operands.push_back({SPV_OPERAND_TYPE_ID, {sel->result_id()}});
  700. inst->SetInOperands(std::move(new_operands));
  701. ctx->UpdateDefUse(inst);
  702. return true;
  703. }
  704. // A folding rule that will replace the TimeAMD extended instruction in the
  705. // SPV_AMD_gcn_shader_ballot. It returns true if the folding is successful.
  706. // It returns False, otherwise.
  707. //
  708. // The instruction
  709. //
  710. // %result = OpExtInst %uint64 %1 TimeAMD
  711. //
  712. // with
  713. //
  714. // %result = OpReadClockKHR %uint64 %uint_3
  715. //
  716. // NOTE: TimeAMD uses subgroup scope (it is not a real time clock).
  717. bool ReplaceTimeAMD(IRContext* ctx, Instruction* inst,
  718. const std::vector<const analysis::Constant*>&) {
  719. InstructionBuilder ir_builder(
  720. ctx, inst,
  721. IRContext::kAnalysisDefUse | IRContext::kAnalysisInstrToBlockMapping);
  722. ctx->AddExtension("SPV_KHR_shader_clock");
  723. ctx->AddCapability(SpvCapabilityShaderClockKHR);
  724. inst->SetOpcode(SpvOpReadClockKHR);
  725. Instruction::OperandList args;
  726. uint32_t subgroup_scope_id = ir_builder.GetUintConstantId(SpvScopeSubgroup);
  727. args.push_back({SPV_OPERAND_TYPE_ID, {subgroup_scope_id}});
  728. inst->SetInOperands(std::move(args));
  729. ctx->UpdateDefUse(inst);
  730. return true;
  731. }
  732. class AmdExtFoldingRules : public FoldingRules {
  733. public:
  734. explicit AmdExtFoldingRules(IRContext* ctx) : FoldingRules(ctx) {}
  735. protected:
  736. virtual void AddFoldingRules() override {
  737. rules_[SpvOpGroupIAddNonUniformAMD].push_back(
  738. ReplaceGroupNonuniformOperationOpCode<SpvOpGroupNonUniformIAdd>);
  739. rules_[SpvOpGroupFAddNonUniformAMD].push_back(
  740. ReplaceGroupNonuniformOperationOpCode<SpvOpGroupNonUniformFAdd>);
  741. rules_[SpvOpGroupUMinNonUniformAMD].push_back(
  742. ReplaceGroupNonuniformOperationOpCode<SpvOpGroupNonUniformUMin>);
  743. rules_[SpvOpGroupSMinNonUniformAMD].push_back(
  744. ReplaceGroupNonuniformOperationOpCode<SpvOpGroupNonUniformSMin>);
  745. rules_[SpvOpGroupFMinNonUniformAMD].push_back(
  746. ReplaceGroupNonuniformOperationOpCode<SpvOpGroupNonUniformFMin>);
  747. rules_[SpvOpGroupUMaxNonUniformAMD].push_back(
  748. ReplaceGroupNonuniformOperationOpCode<SpvOpGroupNonUniformUMax>);
  749. rules_[SpvOpGroupSMaxNonUniformAMD].push_back(
  750. ReplaceGroupNonuniformOperationOpCode<SpvOpGroupNonUniformSMax>);
  751. rules_[SpvOpGroupFMaxNonUniformAMD].push_back(
  752. ReplaceGroupNonuniformOperationOpCode<SpvOpGroupNonUniformFMax>);
  753. uint32_t extension_id =
  754. context()->module()->GetExtInstImportId("SPV_AMD_shader_ballot");
  755. if (extension_id != 0) {
  756. ext_rules_[{extension_id, AmdShaderBallotSwizzleInvocationsAMD}]
  757. .push_back(ReplaceSwizzleInvocations);
  758. ext_rules_[{extension_id, AmdShaderBallotSwizzleInvocationsMaskedAMD}]
  759. .push_back(ReplaceSwizzleInvocationsMasked);
  760. ext_rules_[{extension_id, AmdShaderBallotWriteInvocationAMD}].push_back(
  761. ReplaceWriteInvocation);
  762. ext_rules_[{extension_id, AmdShaderBallotMbcntAMD}].push_back(
  763. ReplaceMbcnt);
  764. }
  765. extension_id = context()->module()->GetExtInstImportId(
  766. "SPV_AMD_shader_trinary_minmax");
  767. if (extension_id != 0) {
  768. ext_rules_[{extension_id, FMin3AMD}].push_back(
  769. ReplaceTrinaryMinMax<GLSLstd450FMin>);
  770. ext_rules_[{extension_id, UMin3AMD}].push_back(
  771. ReplaceTrinaryMinMax<GLSLstd450UMin>);
  772. ext_rules_[{extension_id, SMin3AMD}].push_back(
  773. ReplaceTrinaryMinMax<GLSLstd450SMin>);
  774. ext_rules_[{extension_id, FMax3AMD}].push_back(
  775. ReplaceTrinaryMinMax<GLSLstd450FMax>);
  776. ext_rules_[{extension_id, UMax3AMD}].push_back(
  777. ReplaceTrinaryMinMax<GLSLstd450UMax>);
  778. ext_rules_[{extension_id, SMax3AMD}].push_back(
  779. ReplaceTrinaryMinMax<GLSLstd450SMax>);
  780. ext_rules_[{extension_id, FMid3AMD}].push_back(
  781. ReplaceTrinaryMid<GLSLstd450FMin, GLSLstd450FMax, GLSLstd450FClamp>);
  782. ext_rules_[{extension_id, UMid3AMD}].push_back(
  783. ReplaceTrinaryMid<GLSLstd450UMin, GLSLstd450UMax, GLSLstd450UClamp>);
  784. ext_rules_[{extension_id, SMid3AMD}].push_back(
  785. ReplaceTrinaryMid<GLSLstd450SMin, GLSLstd450SMax, GLSLstd450SClamp>);
  786. }
  787. extension_id =
  788. context()->module()->GetExtInstImportId("SPV_AMD_gcn_shader");
  789. if (extension_id != 0) {
  790. ext_rules_[{extension_id, CubeFaceCoordAMD}].push_back(
  791. ReplaceCubeFaceCoord);
  792. ext_rules_[{extension_id, CubeFaceIndexAMD}].push_back(
  793. ReplaceCubeFaceIndex);
  794. ext_rules_[{extension_id, TimeAMD}].push_back(ReplaceTimeAMD);
  795. }
  796. }
  797. };
  798. class AmdExtConstFoldingRules : public ConstantFoldingRules {
  799. public:
  800. AmdExtConstFoldingRules(IRContext* ctx) : ConstantFoldingRules(ctx) {}
  801. protected:
  802. virtual void AddFoldingRules() override {}
  803. };
  804. } // namespace
  805. Pass::Status AmdExtensionToKhrPass::Process() {
  806. bool changed = false;
  807. // Traverse the body of the functions to replace instructions that require
  808. // the extensions.
  809. InstructionFolder folder(
  810. context(),
  811. std::unique_ptr<AmdExtFoldingRules>(new AmdExtFoldingRules(context())),
  812. MakeUnique<AmdExtConstFoldingRules>(context()));
  813. for (Function& func : *get_module()) {
  814. func.ForEachInst([&changed, &folder](Instruction* inst) {
  815. if (folder.FoldInstruction(inst)) {
  816. changed = true;
  817. }
  818. });
  819. }
  820. // Now that instruction that require the extensions have been removed, we can
  821. // remove the extension instructions.
  822. std::set<std::string> ext_to_remove = {"SPV_AMD_shader_ballot",
  823. "SPV_AMD_shader_trinary_minmax",
  824. "SPV_AMD_gcn_shader"};
  825. std::vector<Instruction*> to_be_killed;
  826. for (Instruction& inst : context()->module()->extensions()) {
  827. if (inst.opcode() == SpvOpExtension) {
  828. if (ext_to_remove.count(reinterpret_cast<const char*>(
  829. &(inst.GetInOperand(0).words[0]))) != 0) {
  830. to_be_killed.push_back(&inst);
  831. }
  832. }
  833. }
  834. for (Instruction& inst : context()->ext_inst_imports()) {
  835. if (inst.opcode() == SpvOpExtInstImport) {
  836. if (ext_to_remove.count(reinterpret_cast<const char*>(
  837. &(inst.GetInOperand(0).words[0]))) != 0) {
  838. to_be_killed.push_back(&inst);
  839. }
  840. }
  841. }
  842. for (Instruction* inst : to_be_killed) {
  843. context()->KillInst(inst);
  844. changed = true;
  845. }
  846. // The replacements that take place use instructions that are missing before
  847. // SPIR-V 1.3. If we changed something, we will have to make sure the version
  848. // is at least SPIR-V 1.3 to make sure those instruction can be used.
  849. if (changed) {
  850. uint32_t version = get_module()->version();
  851. if (version < 0x00010300 /*1.3*/) {
  852. get_module()->set_version(0x00010300);
  853. }
  854. }
  855. return changed ? Status::SuccessWithChange : Status::SuccessWithoutChange;
  856. }
  857. } // namespace opt
  858. } // namespace spvtools