loop_peeling.cpp 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086
  1. // Copyright (c) 2018 Google LLC.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #include <algorithm>
  15. #include <functional>
  16. #include <memory>
  17. #include <unordered_map>
  18. #include <unordered_set>
  19. #include <vector>
  20. #include "source/opt/ir_builder.h"
  21. #include "source/opt/ir_context.h"
  22. #include "source/opt/loop_descriptor.h"
  23. #include "source/opt/loop_peeling.h"
  24. #include "source/opt/loop_utils.h"
  25. #include "source/opt/scalar_analysis.h"
  26. #include "source/opt/scalar_analysis_nodes.h"
  27. namespace spvtools {
  28. namespace opt {
  29. size_t LoopPeelingPass::code_grow_threshold_ = 1000;
  30. void LoopPeeling::DuplicateAndConnectLoop(
  31. LoopUtils::LoopCloningResult* clone_results) {
  32. CFG& cfg = *context_->cfg();
  33. analysis::DefUseManager* def_use_mgr = context_->get_def_use_mgr();
  34. assert(CanPeelLoop() && "Cannot peel loop!");
  35. std::vector<BasicBlock*> ordered_loop_blocks;
  36. // TODO(1841): Handle failure to create pre-header.
  37. BasicBlock* pre_header = loop_->GetOrCreatePreHeaderBlock();
  38. loop_->ComputeLoopStructuredOrder(&ordered_loop_blocks);
  39. cloned_loop_ = loop_utils_.CloneLoop(clone_results, ordered_loop_blocks);
  40. // Add the basic block to the function.
  41. Function::iterator it =
  42. loop_utils_.GetFunction()->FindBlock(pre_header->id());
  43. assert(it != loop_utils_.GetFunction()->end() &&
  44. "Pre-header not found in the function.");
  45. loop_utils_.GetFunction()->AddBasicBlocks(
  46. clone_results->cloned_bb_.begin(), clone_results->cloned_bb_.end(), ++it);
  47. // Make the |loop_|'s preheader the |cloned_loop_| one.
  48. BasicBlock* cloned_header = cloned_loop_->GetHeaderBlock();
  49. pre_header->ForEachSuccessorLabel(
  50. [cloned_header](uint32_t* succ) { *succ = cloned_header->id(); });
  51. // Update cfg.
  52. cfg.RemoveEdge(pre_header->id(), loop_->GetHeaderBlock()->id());
  53. cloned_loop_->SetPreHeaderBlock(pre_header);
  54. loop_->SetPreHeaderBlock(nullptr);
  55. // When cloning the loop, we didn't cloned the merge block, so currently
  56. // |cloned_loop_| shares the same block as |loop_|.
  57. // We mutate all branches from |cloned_loop_| block to |loop_|'s merge into a
  58. // branch to |loop_|'s header (so header will also be the merge of
  59. // |cloned_loop_|).
  60. uint32_t cloned_loop_exit = 0;
  61. for (uint32_t pred_id : cfg.preds(loop_->GetMergeBlock()->id())) {
  62. if (loop_->IsInsideLoop(pred_id)) continue;
  63. BasicBlock* bb = cfg.block(pred_id);
  64. assert(cloned_loop_exit == 0 && "The loop has multiple exits.");
  65. cloned_loop_exit = bb->id();
  66. bb->ForEachSuccessorLabel([this](uint32_t* succ) {
  67. if (*succ == loop_->GetMergeBlock()->id())
  68. *succ = loop_->GetHeaderBlock()->id();
  69. });
  70. }
  71. // Update cfg.
  72. cfg.RemoveNonExistingEdges(loop_->GetMergeBlock()->id());
  73. cfg.AddEdge(cloned_loop_exit, loop_->GetHeaderBlock()->id());
  74. // Patch the phi of the original loop header:
  75. // - Set the loop entry branch to come from the cloned loop exit block;
  76. // - Set the initial value of the phi using the corresponding cloned loop
  77. // exit values.
  78. //
  79. // We patch the iterating value initializers of the original loop using the
  80. // corresponding cloned loop exit values. Connects the cloned loop iterating
  81. // values to the original loop. This make sure that the initial value of the
  82. // second loop starts with the last value of the first loop.
  83. //
  84. // For example, loops like:
  85. //
  86. // int z = 0;
  87. // for (int i = 0; i++ < M; i += cst1) {
  88. // if (cond)
  89. // z += cst2;
  90. // }
  91. //
  92. // Will become:
  93. //
  94. // int z = 0;
  95. // int i = 0;
  96. // for (; i++ < M; i += cst1) {
  97. // if (cond)
  98. // z += cst2;
  99. // }
  100. // for (; i++ < M; i += cst1) {
  101. // if (cond)
  102. // z += cst2;
  103. // }
  104. loop_->GetHeaderBlock()->ForEachPhiInst([cloned_loop_exit, def_use_mgr,
  105. clone_results,
  106. this](Instruction* phi) {
  107. for (uint32_t i = 0; i < phi->NumInOperands(); i += 2) {
  108. if (!loop_->IsInsideLoop(phi->GetSingleWordInOperand(i + 1))) {
  109. phi->SetInOperand(i,
  110. {clone_results->value_map_.at(
  111. exit_value_.at(phi->result_id())->result_id())});
  112. phi->SetInOperand(i + 1, {cloned_loop_exit});
  113. def_use_mgr->AnalyzeInstUse(phi);
  114. return;
  115. }
  116. }
  117. });
  118. // Force the creation of a new preheader for the original loop and set it as
  119. // the merge block for the cloned loop.
  120. // TODO(1841): Handle failure to create pre-header.
  121. cloned_loop_->SetMergeBlock(loop_->GetOrCreatePreHeaderBlock());
  122. }
  123. void LoopPeeling::InsertCanonicalInductionVariable(
  124. LoopUtils::LoopCloningResult* clone_results) {
  125. if (original_loop_canonical_induction_variable_) {
  126. canonical_induction_variable_ =
  127. context_->get_def_use_mgr()->GetDef(clone_results->value_map_.at(
  128. original_loop_canonical_induction_variable_->result_id()));
  129. return;
  130. }
  131. BasicBlock::iterator insert_point = GetClonedLoop()->GetLatchBlock()->tail();
  132. if (GetClonedLoop()->GetLatchBlock()->GetMergeInst()) {
  133. --insert_point;
  134. }
  135. InstructionBuilder builder(
  136. context_, &*insert_point,
  137. IRContext::kAnalysisDefUse | IRContext::kAnalysisInstrToBlockMapping);
  138. Instruction* uint_1_cst =
  139. builder.GetIntConstant<uint32_t>(1, int_type_->IsSigned());
  140. // Create the increment.
  141. // Note that we do "1 + 1" here, one of the operand should the phi
  142. // value but we don't have it yet. The operand will be set latter.
  143. Instruction* iv_inc = builder.AddIAdd(
  144. uint_1_cst->type_id(), uint_1_cst->result_id(), uint_1_cst->result_id());
  145. builder.SetInsertPoint(&*GetClonedLoop()->GetHeaderBlock()->begin());
  146. canonical_induction_variable_ = builder.AddPhi(
  147. uint_1_cst->type_id(),
  148. {builder.GetIntConstant<uint32_t>(0, int_type_->IsSigned())->result_id(),
  149. GetClonedLoop()->GetPreHeaderBlock()->id(), iv_inc->result_id(),
  150. GetClonedLoop()->GetLatchBlock()->id()});
  151. // Connect everything.
  152. iv_inc->SetInOperand(0, {canonical_induction_variable_->result_id()});
  153. // Update def/use manager.
  154. context_->get_def_use_mgr()->AnalyzeInstUse(iv_inc);
  155. // If do-while form, use the incremented value.
  156. if (do_while_form_) {
  157. canonical_induction_variable_ = iv_inc;
  158. }
  159. }
  160. void LoopPeeling::GetIteratorUpdateOperations(
  161. const Loop* loop, Instruction* iterator,
  162. std::unordered_set<Instruction*>* operations) {
  163. analysis::DefUseManager* def_use_mgr = context_->get_def_use_mgr();
  164. operations->insert(iterator);
  165. iterator->ForEachInId([def_use_mgr, loop, operations, this](uint32_t* id) {
  166. Instruction* insn = def_use_mgr->GetDef(*id);
  167. if (insn->opcode() == SpvOpLabel) {
  168. return;
  169. }
  170. if (operations->count(insn)) {
  171. return;
  172. }
  173. if (!loop->IsInsideLoop(insn)) {
  174. return;
  175. }
  176. GetIteratorUpdateOperations(loop, insn, operations);
  177. });
  178. }
  179. // Gather the set of blocks for all the path from |entry| to |root|.
  180. static void GetBlocksInPath(uint32_t block, uint32_t entry,
  181. std::unordered_set<uint32_t>* blocks_in_path,
  182. const CFG& cfg) {
  183. for (uint32_t pid : cfg.preds(block)) {
  184. if (blocks_in_path->insert(pid).second) {
  185. if (pid != entry) {
  186. GetBlocksInPath(pid, entry, blocks_in_path, cfg);
  187. }
  188. }
  189. }
  190. }
  191. bool LoopPeeling::IsConditionCheckSideEffectFree() const {
  192. CFG& cfg = *context_->cfg();
  193. // The "do-while" form does not cause issues, the algorithm takes into account
  194. // the first iteration.
  195. if (!do_while_form_) {
  196. uint32_t condition_block_id = cfg.preds(loop_->GetMergeBlock()->id())[0];
  197. std::unordered_set<uint32_t> blocks_in_path;
  198. blocks_in_path.insert(condition_block_id);
  199. GetBlocksInPath(condition_block_id, loop_->GetHeaderBlock()->id(),
  200. &blocks_in_path, cfg);
  201. for (uint32_t bb_id : blocks_in_path) {
  202. BasicBlock* bb = cfg.block(bb_id);
  203. if (!bb->WhileEachInst([this](Instruction* insn) {
  204. if (insn->IsBranch()) return true;
  205. switch (insn->opcode()) {
  206. case SpvOpLabel:
  207. case SpvOpSelectionMerge:
  208. case SpvOpLoopMerge:
  209. return true;
  210. default:
  211. break;
  212. }
  213. return context_->IsCombinatorInstruction(insn);
  214. })) {
  215. return false;
  216. }
  217. }
  218. }
  219. return true;
  220. }
  221. void LoopPeeling::GetIteratingExitValues() {
  222. CFG& cfg = *context_->cfg();
  223. loop_->GetHeaderBlock()->ForEachPhiInst(
  224. [this](Instruction* phi) { exit_value_[phi->result_id()] = nullptr; });
  225. if (!loop_->GetMergeBlock()) {
  226. return;
  227. }
  228. if (cfg.preds(loop_->GetMergeBlock()->id()).size() != 1) {
  229. return;
  230. }
  231. analysis::DefUseManager* def_use_mgr = context_->get_def_use_mgr();
  232. uint32_t condition_block_id = cfg.preds(loop_->GetMergeBlock()->id())[0];
  233. auto& header_pred = cfg.preds(loop_->GetHeaderBlock()->id());
  234. do_while_form_ = std::find(header_pred.begin(), header_pred.end(),
  235. condition_block_id) != header_pred.end();
  236. if (do_while_form_) {
  237. loop_->GetHeaderBlock()->ForEachPhiInst(
  238. [condition_block_id, def_use_mgr, this](Instruction* phi) {
  239. std::unordered_set<Instruction*> operations;
  240. for (uint32_t i = 0; i < phi->NumInOperands(); i += 2) {
  241. if (condition_block_id == phi->GetSingleWordInOperand(i + 1)) {
  242. exit_value_[phi->result_id()] =
  243. def_use_mgr->GetDef(phi->GetSingleWordInOperand(i));
  244. }
  245. }
  246. });
  247. } else {
  248. DominatorTree* dom_tree =
  249. &context_->GetDominatorAnalysis(loop_utils_.GetFunction())
  250. ->GetDomTree();
  251. BasicBlock* condition_block = cfg.block(condition_block_id);
  252. loop_->GetHeaderBlock()->ForEachPhiInst(
  253. [dom_tree, condition_block, this](Instruction* phi) {
  254. std::unordered_set<Instruction*> operations;
  255. // Not the back-edge value, check if the phi instruction is the only
  256. // possible candidate.
  257. GetIteratorUpdateOperations(loop_, phi, &operations);
  258. for (Instruction* insn : operations) {
  259. if (insn == phi) {
  260. continue;
  261. }
  262. if (dom_tree->Dominates(context_->get_instr_block(insn),
  263. condition_block)) {
  264. return;
  265. }
  266. }
  267. exit_value_[phi->result_id()] = phi;
  268. });
  269. }
  270. }
  271. void LoopPeeling::FixExitCondition(
  272. const std::function<uint32_t(Instruction*)>& condition_builder) {
  273. CFG& cfg = *context_->cfg();
  274. uint32_t condition_block_id = 0;
  275. for (uint32_t id : cfg.preds(GetClonedLoop()->GetMergeBlock()->id())) {
  276. if (GetClonedLoop()->IsInsideLoop(id)) {
  277. condition_block_id = id;
  278. break;
  279. }
  280. }
  281. assert(condition_block_id != 0 && "2nd loop in improperly connected");
  282. BasicBlock* condition_block = cfg.block(condition_block_id);
  283. Instruction* exit_condition = condition_block->terminator();
  284. assert(exit_condition->opcode() == SpvOpBranchConditional);
  285. BasicBlock::iterator insert_point = condition_block->tail();
  286. if (condition_block->GetMergeInst()) {
  287. --insert_point;
  288. }
  289. exit_condition->SetInOperand(0, {condition_builder(&*insert_point)});
  290. uint32_t to_continue_block_idx =
  291. GetClonedLoop()->IsInsideLoop(exit_condition->GetSingleWordInOperand(1))
  292. ? 1
  293. : 2;
  294. exit_condition->SetInOperand(
  295. 1, {exit_condition->GetSingleWordInOperand(to_continue_block_idx)});
  296. exit_condition->SetInOperand(2, {GetClonedLoop()->GetMergeBlock()->id()});
  297. // Update def/use manager.
  298. context_->get_def_use_mgr()->AnalyzeInstUse(exit_condition);
  299. }
  300. BasicBlock* LoopPeeling::CreateBlockBefore(BasicBlock* bb) {
  301. analysis::DefUseManager* def_use_mgr = context_->get_def_use_mgr();
  302. CFG& cfg = *context_->cfg();
  303. assert(cfg.preds(bb->id()).size() == 1 && "More than one predecessor");
  304. // TODO(1841): Handle id overflow.
  305. std::unique_ptr<BasicBlock> new_bb =
  306. MakeUnique<BasicBlock>(std::unique_ptr<Instruction>(new Instruction(
  307. context_, SpvOpLabel, 0, context_->TakeNextId(), {})));
  308. new_bb->SetParent(loop_utils_.GetFunction());
  309. // Update the loop descriptor.
  310. Loop* in_loop = (*loop_utils_.GetLoopDescriptor())[bb];
  311. if (in_loop) {
  312. in_loop->AddBasicBlock(new_bb.get());
  313. loop_utils_.GetLoopDescriptor()->SetBasicBlockToLoop(new_bb->id(), in_loop);
  314. }
  315. context_->set_instr_block(new_bb->GetLabelInst(), new_bb.get());
  316. def_use_mgr->AnalyzeInstDefUse(new_bb->GetLabelInst());
  317. BasicBlock* bb_pred = cfg.block(cfg.preds(bb->id())[0]);
  318. bb_pred->tail()->ForEachInId([bb, &new_bb](uint32_t* id) {
  319. if (*id == bb->id()) {
  320. *id = new_bb->id();
  321. }
  322. });
  323. cfg.RemoveEdge(bb_pred->id(), bb->id());
  324. cfg.AddEdge(bb_pred->id(), new_bb->id());
  325. def_use_mgr->AnalyzeInstUse(&*bb_pred->tail());
  326. // Update the incoming branch.
  327. bb->ForEachPhiInst([&new_bb, def_use_mgr](Instruction* phi) {
  328. phi->SetInOperand(1, {new_bb->id()});
  329. def_use_mgr->AnalyzeInstUse(phi);
  330. });
  331. InstructionBuilder(
  332. context_, new_bb.get(),
  333. IRContext::kAnalysisDefUse | IRContext::kAnalysisInstrToBlockMapping)
  334. .AddBranch(bb->id());
  335. cfg.RegisterBlock(new_bb.get());
  336. // Add the basic block to the function.
  337. Function::iterator it = loop_utils_.GetFunction()->FindBlock(bb->id());
  338. assert(it != loop_utils_.GetFunction()->end() &&
  339. "Basic block not found in the function.");
  340. BasicBlock* ret = new_bb.get();
  341. loop_utils_.GetFunction()->AddBasicBlock(std::move(new_bb), it);
  342. return ret;
  343. }
  344. BasicBlock* LoopPeeling::ProtectLoop(Loop* loop, Instruction* condition,
  345. BasicBlock* if_merge) {
  346. // TODO(1841): Handle failure to create pre-header.
  347. BasicBlock* if_block = loop->GetOrCreatePreHeaderBlock();
  348. // Will no longer be a pre-header because of the if.
  349. loop->SetPreHeaderBlock(nullptr);
  350. // Kill the branch to the header.
  351. context_->KillInst(&*if_block->tail());
  352. InstructionBuilder builder(
  353. context_, if_block,
  354. IRContext::kAnalysisDefUse | IRContext::kAnalysisInstrToBlockMapping);
  355. builder.AddConditionalBranch(condition->result_id(),
  356. loop->GetHeaderBlock()->id(), if_merge->id(),
  357. if_merge->id());
  358. return if_block;
  359. }
  360. void LoopPeeling::PeelBefore(uint32_t peel_factor) {
  361. assert(CanPeelLoop() && "Cannot peel loop");
  362. LoopUtils::LoopCloningResult clone_results;
  363. // Clone the loop and insert the cloned one before the loop.
  364. DuplicateAndConnectLoop(&clone_results);
  365. // Add a canonical induction variable "canonical_induction_variable_".
  366. InsertCanonicalInductionVariable(&clone_results);
  367. InstructionBuilder builder(
  368. context_, &*cloned_loop_->GetPreHeaderBlock()->tail(),
  369. IRContext::kAnalysisDefUse | IRContext::kAnalysisInstrToBlockMapping);
  370. Instruction* factor =
  371. builder.GetIntConstant(peel_factor, int_type_->IsSigned());
  372. Instruction* has_remaining_iteration = builder.AddLessThan(
  373. factor->result_id(), loop_iteration_count_->result_id());
  374. Instruction* max_iteration = builder.AddSelect(
  375. factor->type_id(), has_remaining_iteration->result_id(),
  376. factor->result_id(), loop_iteration_count_->result_id());
  377. // Change the exit condition of the cloned loop to be (exit when become
  378. // false):
  379. // "canonical_induction_variable_" < min("factor", "loop_iteration_count_")
  380. FixExitCondition([max_iteration, this](Instruction* insert_before_point) {
  381. return InstructionBuilder(context_, insert_before_point,
  382. IRContext::kAnalysisDefUse |
  383. IRContext::kAnalysisInstrToBlockMapping)
  384. .AddLessThan(canonical_induction_variable_->result_id(),
  385. max_iteration->result_id())
  386. ->result_id();
  387. });
  388. // "Protect" the second loop: the second loop can only be executed if
  389. // |has_remaining_iteration| is true (i.e. factor < loop_iteration_count_).
  390. BasicBlock* if_merge_block = loop_->GetMergeBlock();
  391. loop_->SetMergeBlock(CreateBlockBefore(loop_->GetMergeBlock()));
  392. // Prevent the second loop from being executed if we already executed all the
  393. // required iterations.
  394. BasicBlock* if_block =
  395. ProtectLoop(loop_, has_remaining_iteration, if_merge_block);
  396. // Patch the phi of the merge block.
  397. if_merge_block->ForEachPhiInst(
  398. [&clone_results, if_block, this](Instruction* phi) {
  399. // if_merge_block had previously only 1 predecessor.
  400. uint32_t incoming_value = phi->GetSingleWordInOperand(0);
  401. auto def_in_loop = clone_results.value_map_.find(incoming_value);
  402. if (def_in_loop != clone_results.value_map_.end())
  403. incoming_value = def_in_loop->second;
  404. phi->AddOperand(
  405. {spv_operand_type_t::SPV_OPERAND_TYPE_ID, {incoming_value}});
  406. phi->AddOperand(
  407. {spv_operand_type_t::SPV_OPERAND_TYPE_ID, {if_block->id()}});
  408. context_->get_def_use_mgr()->AnalyzeInstUse(phi);
  409. });
  410. context_->InvalidateAnalysesExceptFor(
  411. IRContext::kAnalysisDefUse | IRContext::kAnalysisInstrToBlockMapping |
  412. IRContext::kAnalysisLoopAnalysis | IRContext::kAnalysisCFG);
  413. }
  414. void LoopPeeling::PeelAfter(uint32_t peel_factor) {
  415. assert(CanPeelLoop() && "Cannot peel loop");
  416. LoopUtils::LoopCloningResult clone_results;
  417. // Clone the loop and insert the cloned one before the loop.
  418. DuplicateAndConnectLoop(&clone_results);
  419. // Add a canonical induction variable "canonical_induction_variable_".
  420. InsertCanonicalInductionVariable(&clone_results);
  421. InstructionBuilder builder(
  422. context_, &*cloned_loop_->GetPreHeaderBlock()->tail(),
  423. IRContext::kAnalysisDefUse | IRContext::kAnalysisInstrToBlockMapping);
  424. Instruction* factor =
  425. builder.GetIntConstant(peel_factor, int_type_->IsSigned());
  426. Instruction* has_remaining_iteration = builder.AddLessThan(
  427. factor->result_id(), loop_iteration_count_->result_id());
  428. // Change the exit condition of the cloned loop to be (exit when become
  429. // false):
  430. // "canonical_induction_variable_" + "factor" < "loop_iteration_count_"
  431. FixExitCondition([factor, this](Instruction* insert_before_point) {
  432. InstructionBuilder cond_builder(
  433. context_, insert_before_point,
  434. IRContext::kAnalysisDefUse | IRContext::kAnalysisInstrToBlockMapping);
  435. // Build the following check: canonical_induction_variable_ + factor <
  436. // iteration_count
  437. return cond_builder
  438. .AddLessThan(cond_builder
  439. .AddIAdd(canonical_induction_variable_->type_id(),
  440. canonical_induction_variable_->result_id(),
  441. factor->result_id())
  442. ->result_id(),
  443. loop_iteration_count_->result_id())
  444. ->result_id();
  445. });
  446. // "Protect" the first loop: the first loop can only be executed if
  447. // factor < loop_iteration_count_.
  448. // The original loop's pre-header was the cloned loop merge block.
  449. GetClonedLoop()->SetMergeBlock(
  450. CreateBlockBefore(GetOriginalLoop()->GetPreHeaderBlock()));
  451. // Use the second loop preheader as if merge block.
  452. // Prevent the first loop if only the peeled loop needs it.
  453. BasicBlock* if_block = ProtectLoop(cloned_loop_, has_remaining_iteration,
  454. GetOriginalLoop()->GetPreHeaderBlock());
  455. // Patch the phi of the header block.
  456. // We added an if to enclose the first loop and because the phi node are
  457. // connected to the exit value of the first loop, the definition no longer
  458. // dominate the preheader.
  459. // We had to the preheader (our if merge block) the required phi instruction
  460. // and patch the header phi.
  461. GetOriginalLoop()->GetHeaderBlock()->ForEachPhiInst(
  462. [&clone_results, if_block, this](Instruction* phi) {
  463. analysis::DefUseManager* def_use_mgr = context_->get_def_use_mgr();
  464. auto find_value_idx = [](Instruction* phi_inst, Loop* loop) {
  465. uint32_t preheader_value_idx =
  466. !loop->IsInsideLoop(phi_inst->GetSingleWordInOperand(1)) ? 0 : 2;
  467. return preheader_value_idx;
  468. };
  469. Instruction* cloned_phi =
  470. def_use_mgr->GetDef(clone_results.value_map_.at(phi->result_id()));
  471. uint32_t cloned_preheader_value = cloned_phi->GetSingleWordInOperand(
  472. find_value_idx(cloned_phi, GetClonedLoop()));
  473. Instruction* new_phi =
  474. InstructionBuilder(context_,
  475. &*GetOriginalLoop()->GetPreHeaderBlock()->tail(),
  476. IRContext::kAnalysisDefUse |
  477. IRContext::kAnalysisInstrToBlockMapping)
  478. .AddPhi(phi->type_id(),
  479. {phi->GetSingleWordInOperand(
  480. find_value_idx(phi, GetOriginalLoop())),
  481. GetClonedLoop()->GetMergeBlock()->id(),
  482. cloned_preheader_value, if_block->id()});
  483. phi->SetInOperand(find_value_idx(phi, GetOriginalLoop()),
  484. {new_phi->result_id()});
  485. def_use_mgr->AnalyzeInstUse(phi);
  486. });
  487. context_->InvalidateAnalysesExceptFor(
  488. IRContext::kAnalysisDefUse | IRContext::kAnalysisInstrToBlockMapping |
  489. IRContext::kAnalysisLoopAnalysis | IRContext::kAnalysisCFG);
  490. }
  491. Pass::Status LoopPeelingPass::Process() {
  492. bool modified = false;
  493. Module* module = context()->module();
  494. // Process each function in the module
  495. for (Function& f : *module) {
  496. modified |= ProcessFunction(&f);
  497. }
  498. return modified ? Status::SuccessWithChange : Status::SuccessWithoutChange;
  499. }
  500. bool LoopPeelingPass::ProcessFunction(Function* f) {
  501. bool modified = false;
  502. LoopDescriptor& loop_descriptor = *context()->GetLoopDescriptor(f);
  503. std::vector<Loop*> to_process_loop;
  504. to_process_loop.reserve(loop_descriptor.NumLoops());
  505. for (Loop& l : loop_descriptor) {
  506. to_process_loop.push_back(&l);
  507. }
  508. ScalarEvolutionAnalysis scev_analysis(context());
  509. for (Loop* loop : to_process_loop) {
  510. CodeMetrics loop_size;
  511. loop_size.Analyze(*loop);
  512. auto try_peel = [&loop_size, &modified, this](Loop* loop_to_peel) -> Loop* {
  513. if (!loop_to_peel->IsLCSSA()) {
  514. LoopUtils(context(), loop_to_peel).MakeLoopClosedSSA();
  515. }
  516. bool peeled_loop;
  517. Loop* still_peelable_loop;
  518. std::tie(peeled_loop, still_peelable_loop) =
  519. ProcessLoop(loop_to_peel, &loop_size);
  520. if (peeled_loop) {
  521. modified = true;
  522. }
  523. return still_peelable_loop;
  524. };
  525. Loop* still_peelable_loop = try_peel(loop);
  526. // The pass is working out the maximum factor by which a loop can be peeled.
  527. // If the loop can potentially be peeled again, then there is only one
  528. // possible direction, so only one call is still needed.
  529. if (still_peelable_loop) {
  530. try_peel(loop);
  531. }
  532. }
  533. return modified;
  534. }
  535. std::pair<bool, Loop*> LoopPeelingPass::ProcessLoop(Loop* loop,
  536. CodeMetrics* loop_size) {
  537. ScalarEvolutionAnalysis* scev_analysis =
  538. context()->GetScalarEvolutionAnalysis();
  539. // Default values for bailing out.
  540. std::pair<bool, Loop*> bail_out{false, nullptr};
  541. BasicBlock* exit_block = loop->FindConditionBlock();
  542. if (!exit_block) {
  543. return bail_out;
  544. }
  545. Instruction* exiting_iv = loop->FindConditionVariable(exit_block);
  546. if (!exiting_iv) {
  547. return bail_out;
  548. }
  549. size_t iterations = 0;
  550. if (!loop->FindNumberOfIterations(exiting_iv, &*exit_block->tail(),
  551. &iterations)) {
  552. return bail_out;
  553. }
  554. if (!iterations) {
  555. return bail_out;
  556. }
  557. Instruction* canonical_induction_variable = nullptr;
  558. loop->GetHeaderBlock()->WhileEachPhiInst([&canonical_induction_variable,
  559. scev_analysis,
  560. this](Instruction* insn) {
  561. if (const SERecurrentNode* iv =
  562. scev_analysis->AnalyzeInstruction(insn)->AsSERecurrentNode()) {
  563. const SEConstantNode* offset = iv->GetOffset()->AsSEConstantNode();
  564. const SEConstantNode* coeff = iv->GetCoefficient()->AsSEConstantNode();
  565. if (offset && coeff && offset->FoldToSingleValue() == 0 &&
  566. coeff->FoldToSingleValue() == 1) {
  567. if (context()->get_type_mgr()->GetType(insn->type_id())->AsInteger()) {
  568. canonical_induction_variable = insn;
  569. return false;
  570. }
  571. }
  572. }
  573. return true;
  574. });
  575. bool is_signed = canonical_induction_variable
  576. ? context()
  577. ->get_type_mgr()
  578. ->GetType(canonical_induction_variable->type_id())
  579. ->AsInteger()
  580. ->IsSigned()
  581. : false;
  582. LoopPeeling peeler(
  583. loop,
  584. InstructionBuilder(
  585. context(), loop->GetHeaderBlock(),
  586. IRContext::kAnalysisDefUse | IRContext::kAnalysisInstrToBlockMapping)
  587. .GetIntConstant<uint32_t>(static_cast<uint32_t>(iterations),
  588. is_signed),
  589. canonical_induction_variable);
  590. if (!peeler.CanPeelLoop()) {
  591. return bail_out;
  592. }
  593. // For each basic block in the loop, check if it can be peeled. If it
  594. // can, get the direction (before/after) and by which factor.
  595. LoopPeelingInfo peel_info(loop, iterations, scev_analysis);
  596. uint32_t peel_before_factor = 0;
  597. uint32_t peel_after_factor = 0;
  598. for (uint32_t block : loop->GetBlocks()) {
  599. if (block == exit_block->id()) {
  600. continue;
  601. }
  602. BasicBlock* bb = cfg()->block(block);
  603. PeelDirection direction;
  604. uint32_t factor;
  605. std::tie(direction, factor) = peel_info.GetPeelingInfo(bb);
  606. if (direction == PeelDirection::kNone) {
  607. continue;
  608. }
  609. if (direction == PeelDirection::kBefore) {
  610. peel_before_factor = std::max(peel_before_factor, factor);
  611. } else {
  612. assert(direction == PeelDirection::kAfter);
  613. peel_after_factor = std::max(peel_after_factor, factor);
  614. }
  615. }
  616. PeelDirection direction = PeelDirection::kNone;
  617. uint32_t factor = 0;
  618. // Find which direction we should peel.
  619. if (peel_before_factor) {
  620. factor = peel_before_factor;
  621. direction = PeelDirection::kBefore;
  622. }
  623. if (peel_after_factor) {
  624. if (peel_before_factor < peel_after_factor) {
  625. // Favor a peel after here and give the peel before another shot later.
  626. factor = peel_after_factor;
  627. direction = PeelDirection::kAfter;
  628. }
  629. }
  630. // Do the peel if we can.
  631. if (direction == PeelDirection::kNone) return bail_out;
  632. // This does not take into account branch elimination opportunities and
  633. // the unrolling. It assumes the peeled loop will be unrolled as well.
  634. if (factor * loop_size->roi_size_ > code_grow_threshold_) {
  635. return bail_out;
  636. }
  637. loop_size->roi_size_ *= factor;
  638. // Find if a loop should be peeled again.
  639. Loop* extra_opportunity = nullptr;
  640. if (direction == PeelDirection::kBefore) {
  641. peeler.PeelBefore(factor);
  642. if (stats_) {
  643. stats_->peeled_loops_.emplace_back(loop, PeelDirection::kBefore, factor);
  644. }
  645. if (peel_after_factor) {
  646. // We could have peeled after, give it another try.
  647. extra_opportunity = peeler.GetOriginalLoop();
  648. }
  649. } else {
  650. peeler.PeelAfter(factor);
  651. if (stats_) {
  652. stats_->peeled_loops_.emplace_back(loop, PeelDirection::kAfter, factor);
  653. }
  654. if (peel_before_factor) {
  655. // We could have peeled before, give it another try.
  656. extra_opportunity = peeler.GetClonedLoop();
  657. }
  658. }
  659. return {true, extra_opportunity};
  660. }
  661. uint32_t LoopPeelingPass::LoopPeelingInfo::GetFirstLoopInvariantOperand(
  662. Instruction* condition) const {
  663. for (uint32_t i = 0; i < condition->NumInOperands(); i++) {
  664. BasicBlock* bb =
  665. context_->get_instr_block(condition->GetSingleWordInOperand(i));
  666. if (bb && loop_->IsInsideLoop(bb)) {
  667. return condition->GetSingleWordInOperand(i);
  668. }
  669. }
  670. return 0;
  671. }
  672. uint32_t LoopPeelingPass::LoopPeelingInfo::GetFirstNonLoopInvariantOperand(
  673. Instruction* condition) const {
  674. for (uint32_t i = 0; i < condition->NumInOperands(); i++) {
  675. BasicBlock* bb =
  676. context_->get_instr_block(condition->GetSingleWordInOperand(i));
  677. if (!bb || !loop_->IsInsideLoop(bb)) {
  678. return condition->GetSingleWordInOperand(i);
  679. }
  680. }
  681. return 0;
  682. }
  683. static bool IsHandledCondition(SpvOp opcode) {
  684. switch (opcode) {
  685. case SpvOpIEqual:
  686. case SpvOpINotEqual:
  687. case SpvOpUGreaterThan:
  688. case SpvOpSGreaterThan:
  689. case SpvOpUGreaterThanEqual:
  690. case SpvOpSGreaterThanEqual:
  691. case SpvOpULessThan:
  692. case SpvOpSLessThan:
  693. case SpvOpULessThanEqual:
  694. case SpvOpSLessThanEqual:
  695. return true;
  696. default:
  697. return false;
  698. }
  699. }
  700. LoopPeelingPass::LoopPeelingInfo::Direction
  701. LoopPeelingPass::LoopPeelingInfo::GetPeelingInfo(BasicBlock* bb) const {
  702. if (bb->terminator()->opcode() != SpvOpBranchConditional) {
  703. return GetNoneDirection();
  704. }
  705. analysis::DefUseManager* def_use_mgr = context_->get_def_use_mgr();
  706. Instruction* condition =
  707. def_use_mgr->GetDef(bb->terminator()->GetSingleWordInOperand(0));
  708. if (!IsHandledCondition(condition->opcode())) {
  709. return GetNoneDirection();
  710. }
  711. if (!GetFirstLoopInvariantOperand(condition)) {
  712. // No loop invariant, it cannot be peeled by this pass.
  713. return GetNoneDirection();
  714. }
  715. if (!GetFirstNonLoopInvariantOperand(condition)) {
  716. // Seems to be a job for the unswitch pass.
  717. return GetNoneDirection();
  718. }
  719. // Left hand-side.
  720. SExpression lhs = scev_analysis_->AnalyzeInstruction(
  721. def_use_mgr->GetDef(condition->GetSingleWordInOperand(0)));
  722. if (lhs->GetType() == SENode::CanNotCompute) {
  723. // Can't make any conclusion.
  724. return GetNoneDirection();
  725. }
  726. // Right hand-side.
  727. SExpression rhs = scev_analysis_->AnalyzeInstruction(
  728. def_use_mgr->GetDef(condition->GetSingleWordInOperand(1)));
  729. if (rhs->GetType() == SENode::CanNotCompute) {
  730. // Can't make any conclusion.
  731. return GetNoneDirection();
  732. }
  733. // Only take into account recurrent expression over the current loop.
  734. bool is_lhs_rec = !scev_analysis_->IsLoopInvariant(loop_, lhs);
  735. bool is_rhs_rec = !scev_analysis_->IsLoopInvariant(loop_, rhs);
  736. if ((is_lhs_rec && is_rhs_rec) || (!is_lhs_rec && !is_rhs_rec)) {
  737. return GetNoneDirection();
  738. }
  739. if (is_lhs_rec) {
  740. if (!lhs->AsSERecurrentNode() ||
  741. lhs->AsSERecurrentNode()->GetLoop() != loop_) {
  742. return GetNoneDirection();
  743. }
  744. }
  745. if (is_rhs_rec) {
  746. if (!rhs->AsSERecurrentNode() ||
  747. rhs->AsSERecurrentNode()->GetLoop() != loop_) {
  748. return GetNoneDirection();
  749. }
  750. }
  751. // If the op code is ==, then we try a peel before or after.
  752. // If opcode is not <, >, <= or >=, we bail out.
  753. //
  754. // For the remaining cases, we canonicalize the expression so that the
  755. // constant expression is on the left hand side and the recurring expression
  756. // is on the right hand side. If we swap hand side, then < becomes >, <=
  757. // becomes >= etc.
  758. // If the opcode is <=, then we add 1 to the right hand side and do the peel
  759. // check on <.
  760. // If the opcode is >=, then we add 1 to the left hand side and do the peel
  761. // check on >.
  762. CmpOperator cmp_operator;
  763. switch (condition->opcode()) {
  764. default:
  765. return GetNoneDirection();
  766. case SpvOpIEqual:
  767. case SpvOpINotEqual:
  768. return HandleEquality(lhs, rhs);
  769. case SpvOpUGreaterThan:
  770. case SpvOpSGreaterThan: {
  771. cmp_operator = CmpOperator::kGT;
  772. break;
  773. }
  774. case SpvOpULessThan:
  775. case SpvOpSLessThan: {
  776. cmp_operator = CmpOperator::kLT;
  777. break;
  778. }
  779. // We add one to transform >= into > and <= into <.
  780. case SpvOpUGreaterThanEqual:
  781. case SpvOpSGreaterThanEqual: {
  782. cmp_operator = CmpOperator::kGE;
  783. break;
  784. }
  785. case SpvOpULessThanEqual:
  786. case SpvOpSLessThanEqual: {
  787. cmp_operator = CmpOperator::kLE;
  788. break;
  789. }
  790. }
  791. // Force the left hand side to be the non recurring expression.
  792. if (is_lhs_rec) {
  793. std::swap(lhs, rhs);
  794. switch (cmp_operator) {
  795. case CmpOperator::kLT: {
  796. cmp_operator = CmpOperator::kGT;
  797. break;
  798. }
  799. case CmpOperator::kGT: {
  800. cmp_operator = CmpOperator::kLT;
  801. break;
  802. }
  803. case CmpOperator::kLE: {
  804. cmp_operator = CmpOperator::kGE;
  805. break;
  806. }
  807. case CmpOperator::kGE: {
  808. cmp_operator = CmpOperator::kLE;
  809. break;
  810. }
  811. }
  812. }
  813. return HandleInequality(cmp_operator, lhs, rhs->AsSERecurrentNode());
  814. }
  815. SExpression LoopPeelingPass::LoopPeelingInfo::GetValueAtFirstIteration(
  816. SERecurrentNode* rec) const {
  817. return rec->GetOffset();
  818. }
  819. SExpression LoopPeelingPass::LoopPeelingInfo::GetValueAtIteration(
  820. SERecurrentNode* rec, int64_t iteration) const {
  821. SExpression coeff = rec->GetCoefficient();
  822. SExpression offset = rec->GetOffset();
  823. return (coeff * iteration) + offset;
  824. }
  825. SExpression LoopPeelingPass::LoopPeelingInfo::GetValueAtLastIteration(
  826. SERecurrentNode* rec) const {
  827. return GetValueAtIteration(rec, loop_max_iterations_ - 1);
  828. }
  829. bool LoopPeelingPass::LoopPeelingInfo::EvalOperator(CmpOperator cmp_op,
  830. SExpression lhs,
  831. SExpression rhs,
  832. bool* result) const {
  833. assert(scev_analysis_->IsLoopInvariant(loop_, lhs));
  834. assert(scev_analysis_->IsLoopInvariant(loop_, rhs));
  835. // We perform the test: 0 cmp_op rhs - lhs
  836. // What is left is then to determine the sign of the expression.
  837. switch (cmp_op) {
  838. case CmpOperator::kLT: {
  839. return scev_analysis_->IsAlwaysGreaterThanZero(rhs - lhs, result);
  840. }
  841. case CmpOperator::kGT: {
  842. return scev_analysis_->IsAlwaysGreaterThanZero(lhs - rhs, result);
  843. }
  844. case CmpOperator::kLE: {
  845. return scev_analysis_->IsAlwaysGreaterOrEqualToZero(rhs - lhs, result);
  846. }
  847. case CmpOperator::kGE: {
  848. return scev_analysis_->IsAlwaysGreaterOrEqualToZero(lhs - rhs, result);
  849. }
  850. }
  851. return false;
  852. }
  853. LoopPeelingPass::LoopPeelingInfo::Direction
  854. LoopPeelingPass::LoopPeelingInfo::HandleEquality(SExpression lhs,
  855. SExpression rhs) const {
  856. {
  857. // Try peel before opportunity.
  858. SExpression lhs_cst = lhs;
  859. if (SERecurrentNode* rec_node = lhs->AsSERecurrentNode()) {
  860. lhs_cst = rec_node->GetOffset();
  861. }
  862. SExpression rhs_cst = rhs;
  863. if (SERecurrentNode* rec_node = rhs->AsSERecurrentNode()) {
  864. rhs_cst = rec_node->GetOffset();
  865. }
  866. if (lhs_cst == rhs_cst) {
  867. return Direction{LoopPeelingPass::PeelDirection::kBefore, 1};
  868. }
  869. }
  870. {
  871. // Try peel after opportunity.
  872. SExpression lhs_cst = lhs;
  873. if (SERecurrentNode* rec_node = lhs->AsSERecurrentNode()) {
  874. // rec_node(x) = a * x + b
  875. // assign to lhs: a * (loop_max_iterations_ - 1) + b
  876. lhs_cst = GetValueAtLastIteration(rec_node);
  877. }
  878. SExpression rhs_cst = rhs;
  879. if (SERecurrentNode* rec_node = rhs->AsSERecurrentNode()) {
  880. // rec_node(x) = a * x + b
  881. // assign to lhs: a * (loop_max_iterations_ - 1) + b
  882. rhs_cst = GetValueAtLastIteration(rec_node);
  883. }
  884. if (lhs_cst == rhs_cst) {
  885. return Direction{LoopPeelingPass::PeelDirection::kAfter, 1};
  886. }
  887. }
  888. return GetNoneDirection();
  889. }
  890. LoopPeelingPass::LoopPeelingInfo::Direction
  891. LoopPeelingPass::LoopPeelingInfo::HandleInequality(CmpOperator cmp_op,
  892. SExpression lhs,
  893. SERecurrentNode* rhs) const {
  894. SExpression offset = rhs->GetOffset();
  895. SExpression coefficient = rhs->GetCoefficient();
  896. // Compute (cst - B) / A.
  897. std::pair<SExpression, int64_t> flip_iteration = (lhs - offset) / coefficient;
  898. if (!flip_iteration.first->AsSEConstantNode()) {
  899. return GetNoneDirection();
  900. }
  901. // note: !!flip_iteration.second normalize to 0/1 (via bool cast).
  902. int64_t iteration =
  903. flip_iteration.first->AsSEConstantNode()->FoldToSingleValue() +
  904. !!flip_iteration.second;
  905. if (iteration <= 0 ||
  906. loop_max_iterations_ <= static_cast<uint64_t>(iteration)) {
  907. // Always true or false within the loop bounds.
  908. return GetNoneDirection();
  909. }
  910. // If this is a <= or >= operator and the iteration, make sure |iteration| is
  911. // the one flipping the condition.
  912. // If (cst - B) and A are not divisible, this equivalent to a < or > check, so
  913. // we skip this test.
  914. if (!flip_iteration.second &&
  915. (cmp_op == CmpOperator::kLE || cmp_op == CmpOperator::kGE)) {
  916. bool first_iteration;
  917. bool current_iteration;
  918. if (!EvalOperator(cmp_op, lhs, offset, &first_iteration) ||
  919. !EvalOperator(cmp_op, lhs, GetValueAtIteration(rhs, iteration),
  920. &current_iteration)) {
  921. return GetNoneDirection();
  922. }
  923. // If the condition did not flip the next will.
  924. if (first_iteration == current_iteration) {
  925. iteration++;
  926. }
  927. }
  928. uint32_t cast_iteration = 0;
  929. // sanity check: can we fit |iteration| in a uint32_t ?
  930. if (static_cast<uint64_t>(iteration) < std::numeric_limits<uint32_t>::max()) {
  931. cast_iteration = static_cast<uint32_t>(iteration);
  932. }
  933. if (cast_iteration) {
  934. // Peel before if we are closer to the start, after if closer to the end.
  935. if (loop_max_iterations_ / 2 > cast_iteration) {
  936. return Direction{LoopPeelingPass::PeelDirection::kBefore, cast_iteration};
  937. } else {
  938. return Direction{
  939. LoopPeelingPass::PeelDirection::kAfter,
  940. static_cast<uint32_t>(loop_max_iterations_ - cast_iteration)};
  941. }
  942. }
  943. return GetNoneDirection();
  944. }
  945. } // namespace opt
  946. } // namespace spvtools