fuzzer_util.cpp 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405
  1. // Copyright (c) 2019 Google LLC
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #include "source/fuzz/fuzzer_util.h"
  15. namespace spvtools {
  16. namespace fuzz {
  17. namespace fuzzerutil {
  18. bool IsFreshId(opt::IRContext* context, uint32_t id) {
  19. return !context->get_def_use_mgr()->GetDef(id);
  20. }
  21. void UpdateModuleIdBound(opt::IRContext* context, uint32_t id) {
  22. // TODO(https://github.com/KhronosGroup/SPIRV-Tools/issues/2541) consider the
  23. // case where the maximum id bound is reached.
  24. context->module()->SetIdBound(
  25. std::max(context->module()->id_bound(), id + 1));
  26. }
  27. opt::BasicBlock* MaybeFindBlock(opt::IRContext* context,
  28. uint32_t maybe_block_id) {
  29. auto inst = context->get_def_use_mgr()->GetDef(maybe_block_id);
  30. if (inst == nullptr) {
  31. // No instruction defining this id was found.
  32. return nullptr;
  33. }
  34. if (inst->opcode() != SpvOpLabel) {
  35. // The instruction defining the id is not a label, so it cannot be a block
  36. // id.
  37. return nullptr;
  38. }
  39. return context->cfg()->block(maybe_block_id);
  40. }
  41. bool PhiIdsOkForNewEdge(
  42. opt::IRContext* context, opt::BasicBlock* bb_from, opt::BasicBlock* bb_to,
  43. const google::protobuf::RepeatedField<google::protobuf::uint32>& phi_ids) {
  44. if (bb_from->IsSuccessor(bb_to)) {
  45. // There is already an edge from |from_block| to |to_block|, so there is
  46. // no need to extend OpPhi instructions. Do not allow phi ids to be
  47. // present. This might turn out to be too strict; perhaps it would be OK
  48. // just to ignore the ids in this case.
  49. return phi_ids.empty();
  50. }
  51. // The edge would add a previously non-existent edge from |from_block| to
  52. // |to_block|, so we go through the given phi ids and check that they exactly
  53. // match the OpPhi instructions in |to_block|.
  54. uint32_t phi_index = 0;
  55. // An explicit loop, rather than applying a lambda to each OpPhi in |bb_to|,
  56. // makes sense here because we need to increment |phi_index| for each OpPhi
  57. // instruction.
  58. for (auto& inst : *bb_to) {
  59. if (inst.opcode() != SpvOpPhi) {
  60. // The OpPhi instructions all occur at the start of the block; if we find
  61. // a non-OpPhi then we have seen them all.
  62. break;
  63. }
  64. if (phi_index == static_cast<uint32_t>(phi_ids.size())) {
  65. // Not enough phi ids have been provided to account for the OpPhi
  66. // instructions.
  67. return false;
  68. }
  69. // Look for an instruction defining the next phi id.
  70. opt::Instruction* phi_extension =
  71. context->get_def_use_mgr()->GetDef(phi_ids[phi_index]);
  72. if (!phi_extension) {
  73. // The id given to extend this OpPhi does not exist.
  74. return false;
  75. }
  76. if (phi_extension->type_id() != inst.type_id()) {
  77. // The instruction given to extend this OpPhi either does not have a type
  78. // or its type does not match that of the OpPhi.
  79. return false;
  80. }
  81. if (context->get_instr_block(phi_extension)) {
  82. // The instruction defining the phi id has an associated block (i.e., it
  83. // is not a global value). Check whether its definition dominates the
  84. // exit of |from_block|.
  85. auto dominator_analysis =
  86. context->GetDominatorAnalysis(bb_from->GetParent());
  87. if (!dominator_analysis->Dominates(phi_extension,
  88. bb_from->terminator())) {
  89. // The given id is no good as its definition does not dominate the exit
  90. // of |from_block|
  91. return false;
  92. }
  93. }
  94. phi_index++;
  95. }
  96. // Return false if not all of the ids for extending OpPhi instructions are
  97. // needed. This might turn out to be stricter than necessary; perhaps it would
  98. // be OK just to not use the ids in this case.
  99. return phi_index == static_cast<uint32_t>(phi_ids.size());
  100. }
  101. void AddUnreachableEdgeAndUpdateOpPhis(
  102. opt::IRContext* context, opt::BasicBlock* bb_from, opt::BasicBlock* bb_to,
  103. bool condition_value,
  104. const google::protobuf::RepeatedField<google::protobuf::uint32>& phi_ids) {
  105. assert(PhiIdsOkForNewEdge(context, bb_from, bb_to, phi_ids) &&
  106. "Precondition on phi_ids is not satisfied");
  107. assert(bb_from->terminator()->opcode() == SpvOpBranch &&
  108. "Precondition on terminator of bb_from is not satisfied");
  109. // Get the id of the boolean constant to be used as the condition.
  110. opt::analysis::Bool bool_type;
  111. opt::analysis::BoolConstant bool_constant(
  112. context->get_type_mgr()->GetRegisteredType(&bool_type)->AsBool(),
  113. condition_value);
  114. uint32_t bool_id = context->get_constant_mgr()->FindDeclaredConstant(
  115. &bool_constant, context->get_type_mgr()->GetId(&bool_type));
  116. const bool from_to_edge_already_exists = bb_from->IsSuccessor(bb_to);
  117. auto successor = bb_from->terminator()->GetSingleWordInOperand(0);
  118. // Add the dead branch, by turning OpBranch into OpBranchConditional, and
  119. // ordering the targets depending on whether the given boolean corresponds to
  120. // true or false.
  121. bb_from->terminator()->SetOpcode(SpvOpBranchConditional);
  122. bb_from->terminator()->SetInOperands(
  123. {{SPV_OPERAND_TYPE_ID, {bool_id}},
  124. {SPV_OPERAND_TYPE_ID, {condition_value ? successor : bb_to->id()}},
  125. {SPV_OPERAND_TYPE_ID, {condition_value ? bb_to->id() : successor}}});
  126. // Update OpPhi instructions in the target block if this branch adds a
  127. // previously non-existent edge from source to target.
  128. if (!from_to_edge_already_exists) {
  129. uint32_t phi_index = 0;
  130. for (auto& inst : *bb_to) {
  131. if (inst.opcode() != SpvOpPhi) {
  132. break;
  133. }
  134. assert(phi_index < static_cast<uint32_t>(phi_ids.size()) &&
  135. "There should be exactly one phi id per OpPhi instruction.");
  136. inst.AddOperand({SPV_OPERAND_TYPE_ID, {phi_ids[phi_index]}});
  137. inst.AddOperand({SPV_OPERAND_TYPE_ID, {bb_from->id()}});
  138. phi_index++;
  139. }
  140. assert(phi_index == static_cast<uint32_t>(phi_ids.size()) &&
  141. "There should be exactly one phi id per OpPhi instruction.");
  142. }
  143. }
  144. bool BlockIsInLoopContinueConstruct(opt::IRContext* context, uint32_t block_id,
  145. uint32_t maybe_loop_header_id) {
  146. // We deem a block to be part of a loop's continue construct if the loop's
  147. // continue target dominates the block.
  148. auto containing_construct_block = context->cfg()->block(maybe_loop_header_id);
  149. if (containing_construct_block->IsLoopHeader()) {
  150. auto continue_target = containing_construct_block->ContinueBlockId();
  151. if (context->GetDominatorAnalysis(containing_construct_block->GetParent())
  152. ->Dominates(continue_target, block_id)) {
  153. return true;
  154. }
  155. }
  156. return false;
  157. }
  158. opt::BasicBlock::iterator GetIteratorForInstruction(
  159. opt::BasicBlock* block, const opt::Instruction* inst) {
  160. for (auto inst_it = block->begin(); inst_it != block->end(); ++inst_it) {
  161. if (inst == &*inst_it) {
  162. return inst_it;
  163. }
  164. }
  165. return block->end();
  166. }
  167. bool NewEdgeRespectsUseDefDominance(opt::IRContext* context,
  168. opt::BasicBlock* bb_from,
  169. opt::BasicBlock* bb_to) {
  170. assert(bb_from->terminator()->opcode() == SpvOpBranch);
  171. // If there is *already* an edge from |bb_from| to |bb_to|, then adding
  172. // another edge is fine from a dominance point of view.
  173. if (bb_from->terminator()->GetSingleWordInOperand(0) == bb_to->id()) {
  174. return true;
  175. }
  176. // TODO(https://github.com/KhronosGroup/SPIRV-Tools/issues/2919): the
  177. // solution below to determining whether a new edge respects dominance
  178. // rules is incomplete. Test
  179. // TransformationAddDeadContinueTest::DISABLED_Miscellaneous6 exposes the
  180. // problem. In practice, this limitation does not bite too often, and the
  181. // worst it does is leads to SPIR-V that spirv-val rejects.
  182. // Let us assume that the module being manipulated is valid according to the
  183. // rules of the SPIR-V language.
  184. //
  185. // Suppose that some block Y is dominated by |bb_to| (which includes the case
  186. // where Y = |bb_to|).
  187. //
  188. // Suppose that Y uses an id i that is defined in some other block X.
  189. //
  190. // Because the module is valid, X must dominate Y. We are concerned about
  191. // whether an edge from |bb_from| to |bb_to| could *stop* X from dominating
  192. // Y.
  193. //
  194. // Because |bb_to| dominates Y, a new edge from |bb_from| to |bb_to| can
  195. // only affect whether X dominates Y if X dominates |bb_to|.
  196. //
  197. // So let us assume that X does dominate |bb_to|, so that we have:
  198. //
  199. // (X defines i) dominates |bb_to| dominates (Y uses i)
  200. //
  201. // The new edge from |bb_from| to |bb_to| will stop the definition of i in X
  202. // from dominating the use of i in Y exactly when the new edge will stop X
  203. // from dominating |bb_to|.
  204. //
  205. // Now, the block X that we are worried about cannot dominate |bb_from|,
  206. // because in that case X would still dominate |bb_to| after we add an edge
  207. // from |bb_from| to |bb_to|.
  208. //
  209. // Also, it cannot be that X = |bb_to|, because nothing can stop a block
  210. // from dominating itself.
  211. //
  212. // So we are looking for a block X such that:
  213. //
  214. // - X strictly dominates |bb_to|
  215. // - X does not dominate |bb_from|
  216. // - X defines an id i
  217. // - i is used in some block Y
  218. // - |bb_to| dominates Y
  219. // Walk the dominator tree backwards, starting from the immediate dominator
  220. // of |bb_to|. We can stop when we find a block that also dominates
  221. // |bb_from|.
  222. auto dominator_analysis = context->GetDominatorAnalysis(bb_from->GetParent());
  223. for (auto dominator = dominator_analysis->ImmediateDominator(bb_to);
  224. dominator != nullptr &&
  225. !dominator_analysis->Dominates(dominator, bb_from);
  226. dominator = dominator_analysis->ImmediateDominator(dominator)) {
  227. // |dominator| is a candidate for block X in the above description.
  228. // We now look through the instructions for a candidate instruction i.
  229. for (auto& inst : *dominator) {
  230. // Consider all the uses of this instruction.
  231. if (!context->get_def_use_mgr()->WhileEachUse(
  232. &inst,
  233. [bb_to, context, dominator_analysis](
  234. opt::Instruction* user, uint32_t operand_index) -> bool {
  235. // If this use is in an OpPhi, we need to check that dominance
  236. // of the relevant *parent* block is not spoiled. Otherwise we
  237. // need to check that dominance of the block containing the use
  238. // is not spoiled.
  239. opt::BasicBlock* use_block_or_phi_parent =
  240. user->opcode() == SpvOpPhi
  241. ? context->cfg()->block(
  242. user->GetSingleWordOperand(operand_index + 1))
  243. : context->get_instr_block(user);
  244. // There might not be any relevant block, e.g. if the use is in
  245. // a decoration; in this case the new edge is unproblematic.
  246. if (use_block_or_phi_parent == nullptr) {
  247. return true;
  248. }
  249. // With reference to the above discussion,
  250. // |use_block_or_phi_parent| is a candidate for the block Y.
  251. // If |bb_to| dominates this block, the new edge would be
  252. // problematic.
  253. return !dominator_analysis->Dominates(bb_to,
  254. use_block_or_phi_parent);
  255. })) {
  256. return false;
  257. }
  258. }
  259. }
  260. return true;
  261. }
  262. bool BlockIsReachableInItsFunction(opt::IRContext* context,
  263. opt::BasicBlock* bb) {
  264. auto enclosing_function = bb->GetParent();
  265. return context->GetDominatorAnalysis(enclosing_function)
  266. ->Dominates(enclosing_function->entry().get(), bb);
  267. }
  268. bool CanInsertOpcodeBeforeInstruction(
  269. SpvOp opcode, const opt::BasicBlock::iterator& instruction_in_block) {
  270. if (instruction_in_block->PreviousNode() &&
  271. (instruction_in_block->PreviousNode()->opcode() == SpvOpLoopMerge ||
  272. instruction_in_block->PreviousNode()->opcode() == SpvOpSelectionMerge)) {
  273. // We cannot insert directly after a merge instruction.
  274. return false;
  275. }
  276. if (opcode != SpvOpVariable &&
  277. instruction_in_block->opcode() == SpvOpVariable) {
  278. // We cannot insert a non-OpVariable instruction directly before a
  279. // variable; variables in a function must be contiguous in the entry block.
  280. return false;
  281. }
  282. // We cannot insert a non-OpPhi instruction directly before an OpPhi, because
  283. // OpPhi instructions need to be contiguous at the start of a block.
  284. return opcode == SpvOpPhi || instruction_in_block->opcode() != SpvOpPhi;
  285. }
  286. bool CanMakeSynonymOf(opt::IRContext* ir_context, opt::Instruction* inst) {
  287. if (!inst->HasResultId()) {
  288. // We can only make a synonym of an instruction that generates an id.
  289. return false;
  290. }
  291. if (!inst->type_id()) {
  292. // We can only make a synonym of an instruction that has a type.
  293. return false;
  294. }
  295. // We do not make synonyms of objects that have decorations: if the synonym is
  296. // not decorated analogously, using the original object vs. its synonymous
  297. // form may not be equivalent.
  298. return ir_context->get_decoration_mgr()
  299. ->GetDecorationsFor(inst->result_id(), true)
  300. .empty();
  301. }
  302. bool IsCompositeType(const opt::analysis::Type* type) {
  303. return type && (type->AsArray() || type->AsMatrix() || type->AsStruct() ||
  304. type->AsVector());
  305. }
  306. std::vector<uint32_t> RepeatedFieldToVector(
  307. const google::protobuf::RepeatedField<uint32_t>& repeated_field) {
  308. std::vector<uint32_t> result;
  309. for (auto i : repeated_field) {
  310. result.push_back(i);
  311. }
  312. return result;
  313. }
  314. uint32_t WalkCompositeTypeIndices(
  315. opt::IRContext* context, uint32_t base_object_type_id,
  316. const google::protobuf::RepeatedField<google::protobuf::uint32>& indices) {
  317. uint32_t sub_object_type_id = base_object_type_id;
  318. for (auto index : indices) {
  319. auto should_be_composite_type =
  320. context->get_def_use_mgr()->GetDef(sub_object_type_id);
  321. assert(should_be_composite_type && "The type should exist.");
  322. if (SpvOpTypeArray == should_be_composite_type->opcode()) {
  323. auto array_length = GetArraySize(*should_be_composite_type, context);
  324. if (array_length == 0 || index >= array_length) {
  325. return 0;
  326. }
  327. sub_object_type_id = should_be_composite_type->GetSingleWordInOperand(0);
  328. } else if (SpvOpTypeMatrix == should_be_composite_type->opcode()) {
  329. auto matrix_column_count =
  330. should_be_composite_type->GetSingleWordInOperand(1);
  331. if (index >= matrix_column_count) {
  332. return 0;
  333. }
  334. sub_object_type_id = should_be_composite_type->GetSingleWordInOperand(0);
  335. } else if (SpvOpTypeStruct == should_be_composite_type->opcode()) {
  336. if (index >= GetNumberOfStructMembers(*should_be_composite_type)) {
  337. return 0;
  338. }
  339. sub_object_type_id =
  340. should_be_composite_type->GetSingleWordInOperand(index);
  341. } else if (SpvOpTypeVector == should_be_composite_type->opcode()) {
  342. auto vector_length = should_be_composite_type->GetSingleWordInOperand(1);
  343. if (index >= vector_length) {
  344. return 0;
  345. }
  346. sub_object_type_id = should_be_composite_type->GetSingleWordInOperand(0);
  347. } else {
  348. return 0;
  349. }
  350. }
  351. return sub_object_type_id;
  352. }
  353. uint32_t GetNumberOfStructMembers(
  354. const opt::Instruction& struct_type_instruction) {
  355. assert(struct_type_instruction.opcode() == SpvOpTypeStruct &&
  356. "An OpTypeStruct instruction is required here.");
  357. return struct_type_instruction.NumInOperands();
  358. }
  359. uint32_t GetArraySize(const opt::Instruction& array_type_instruction,
  360. opt::IRContext* context) {
  361. auto array_length_constant =
  362. context->get_constant_mgr()
  363. ->GetConstantFromInst(context->get_def_use_mgr()->GetDef(
  364. array_type_instruction.GetSingleWordInOperand(1)))
  365. ->AsIntConstant();
  366. if (array_length_constant->words().size() != 1) {
  367. return 0;
  368. }
  369. return array_length_constant->GetU32();
  370. }
  371. } // namespace fuzzerutil
  372. } // namespace fuzz
  373. } // namespace spvtools