loop_peeling.cpp 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083
  1. // Copyright (c) 2018 Google LLC.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #include <algorithm>
  15. #include <functional>
  16. #include <memory>
  17. #include <unordered_map>
  18. #include <unordered_set>
  19. #include <vector>
  20. #include "source/opt/ir_builder.h"
  21. #include "source/opt/ir_context.h"
  22. #include "source/opt/loop_descriptor.h"
  23. #include "source/opt/loop_peeling.h"
  24. #include "source/opt/loop_utils.h"
  25. #include "source/opt/scalar_analysis.h"
  26. #include "source/opt/scalar_analysis_nodes.h"
  27. namespace spvtools {
  28. namespace opt {
  29. size_t LoopPeelingPass::code_grow_threshold_ = 1000;
  30. void LoopPeeling::DuplicateAndConnectLoop(
  31. LoopUtils::LoopCloningResult* clone_results) {
  32. CFG& cfg = *context_->cfg();
  33. analysis::DefUseManager* def_use_mgr = context_->get_def_use_mgr();
  34. assert(CanPeelLoop() && "Cannot peel loop!");
  35. std::vector<BasicBlock*> ordered_loop_blocks;
  36. BasicBlock* pre_header = loop_->GetOrCreatePreHeaderBlock();
  37. loop_->ComputeLoopStructuredOrder(&ordered_loop_blocks);
  38. cloned_loop_ = loop_utils_.CloneLoop(clone_results, ordered_loop_blocks);
  39. // Add the basic block to the function.
  40. Function::iterator it =
  41. loop_utils_.GetFunction()->FindBlock(pre_header->id());
  42. assert(it != loop_utils_.GetFunction()->end() &&
  43. "Pre-header not found in the function.");
  44. loop_utils_.GetFunction()->AddBasicBlocks(
  45. clone_results->cloned_bb_.begin(), clone_results->cloned_bb_.end(), ++it);
  46. // Make the |loop_|'s preheader the |cloned_loop_| one.
  47. BasicBlock* cloned_header = cloned_loop_->GetHeaderBlock();
  48. pre_header->ForEachSuccessorLabel(
  49. [cloned_header](uint32_t* succ) { *succ = cloned_header->id(); });
  50. // Update cfg.
  51. cfg.RemoveEdge(pre_header->id(), loop_->GetHeaderBlock()->id());
  52. cloned_loop_->SetPreHeaderBlock(pre_header);
  53. loop_->SetPreHeaderBlock(nullptr);
  54. // When cloning the loop, we didn't cloned the merge block, so currently
  55. // |cloned_loop_| shares the same block as |loop_|.
  56. // We mutate all branches from |cloned_loop_| block to |loop_|'s merge into a
  57. // branch to |loop_|'s header (so header will also be the merge of
  58. // |cloned_loop_|).
  59. uint32_t cloned_loop_exit = 0;
  60. for (uint32_t pred_id : cfg.preds(loop_->GetMergeBlock()->id())) {
  61. if (loop_->IsInsideLoop(pred_id)) continue;
  62. BasicBlock* bb = cfg.block(pred_id);
  63. assert(cloned_loop_exit == 0 && "The loop has multiple exits.");
  64. cloned_loop_exit = bb->id();
  65. bb->ForEachSuccessorLabel([this](uint32_t* succ) {
  66. if (*succ == loop_->GetMergeBlock()->id())
  67. *succ = loop_->GetHeaderBlock()->id();
  68. });
  69. }
  70. // Update cfg.
  71. cfg.RemoveNonExistingEdges(loop_->GetMergeBlock()->id());
  72. cfg.AddEdge(cloned_loop_exit, loop_->GetHeaderBlock()->id());
  73. // Patch the phi of the original loop header:
  74. // - Set the loop entry branch to come from the cloned loop exit block;
  75. // - Set the initial value of the phi using the corresponding cloned loop
  76. // exit values.
  77. //
  78. // We patch the iterating value initializers of the original loop using the
  79. // corresponding cloned loop exit values. Connects the cloned loop iterating
  80. // values to the original loop. This make sure that the initial value of the
  81. // second loop starts with the last value of the first loop.
  82. //
  83. // For example, loops like:
  84. //
  85. // int z = 0;
  86. // for (int i = 0; i++ < M; i += cst1) {
  87. // if (cond)
  88. // z += cst2;
  89. // }
  90. //
  91. // Will become:
  92. //
  93. // int z = 0;
  94. // int i = 0;
  95. // for (; i++ < M; i += cst1) {
  96. // if (cond)
  97. // z += cst2;
  98. // }
  99. // for (; i++ < M; i += cst1) {
  100. // if (cond)
  101. // z += cst2;
  102. // }
  103. loop_->GetHeaderBlock()->ForEachPhiInst([cloned_loop_exit, def_use_mgr,
  104. clone_results,
  105. this](Instruction* phi) {
  106. for (uint32_t i = 0; i < phi->NumInOperands(); i += 2) {
  107. if (!loop_->IsInsideLoop(phi->GetSingleWordInOperand(i + 1))) {
  108. phi->SetInOperand(i,
  109. {clone_results->value_map_.at(
  110. exit_value_.at(phi->result_id())->result_id())});
  111. phi->SetInOperand(i + 1, {cloned_loop_exit});
  112. def_use_mgr->AnalyzeInstUse(phi);
  113. return;
  114. }
  115. }
  116. });
  117. // Force the creation of a new preheader for the original loop and set it as
  118. // the merge block for the cloned loop.
  119. cloned_loop_->SetMergeBlock(loop_->GetOrCreatePreHeaderBlock());
  120. }
  121. void LoopPeeling::InsertCanonicalInductionVariable(
  122. LoopUtils::LoopCloningResult* clone_results) {
  123. if (original_loop_canonical_induction_variable_) {
  124. canonical_induction_variable_ =
  125. context_->get_def_use_mgr()->GetDef(clone_results->value_map_.at(
  126. original_loop_canonical_induction_variable_->result_id()));
  127. return;
  128. }
  129. BasicBlock::iterator insert_point = GetClonedLoop()->GetLatchBlock()->tail();
  130. if (GetClonedLoop()->GetLatchBlock()->GetMergeInst()) {
  131. --insert_point;
  132. }
  133. InstructionBuilder builder(
  134. context_, &*insert_point,
  135. IRContext::kAnalysisDefUse | IRContext::kAnalysisInstrToBlockMapping);
  136. Instruction* uint_1_cst =
  137. builder.Add32BitConstantInteger<uint32_t>(1, int_type_->IsSigned());
  138. // Create the increment.
  139. // Note that we do "1 + 1" here, one of the operand should the phi
  140. // value but we don't have it yet. The operand will be set latter.
  141. Instruction* iv_inc = builder.AddIAdd(
  142. uint_1_cst->type_id(), uint_1_cst->result_id(), uint_1_cst->result_id());
  143. builder.SetInsertPoint(&*GetClonedLoop()->GetHeaderBlock()->begin());
  144. canonical_induction_variable_ = builder.AddPhi(
  145. uint_1_cst->type_id(),
  146. {builder.Add32BitConstantInteger<uint32_t>(0, int_type_->IsSigned())
  147. ->result_id(),
  148. GetClonedLoop()->GetPreHeaderBlock()->id(), iv_inc->result_id(),
  149. GetClonedLoop()->GetLatchBlock()->id()});
  150. // Connect everything.
  151. iv_inc->SetInOperand(0, {canonical_induction_variable_->result_id()});
  152. // Update def/use manager.
  153. context_->get_def_use_mgr()->AnalyzeInstUse(iv_inc);
  154. // If do-while form, use the incremented value.
  155. if (do_while_form_) {
  156. canonical_induction_variable_ = iv_inc;
  157. }
  158. }
  159. void LoopPeeling::GetIteratorUpdateOperations(
  160. const Loop* loop, Instruction* iterator,
  161. std::unordered_set<Instruction*>* operations) {
  162. analysis::DefUseManager* def_use_mgr = context_->get_def_use_mgr();
  163. operations->insert(iterator);
  164. iterator->ForEachInId([def_use_mgr, loop, operations, this](uint32_t* id) {
  165. Instruction* insn = def_use_mgr->GetDef(*id);
  166. if (insn->opcode() == SpvOpLabel) {
  167. return;
  168. }
  169. if (operations->count(insn)) {
  170. return;
  171. }
  172. if (!loop->IsInsideLoop(insn)) {
  173. return;
  174. }
  175. GetIteratorUpdateOperations(loop, insn, operations);
  176. });
  177. }
  178. // Gather the set of blocks for all the path from |entry| to |root|.
  179. static void GetBlocksInPath(uint32_t block, uint32_t entry,
  180. std::unordered_set<uint32_t>* blocks_in_path,
  181. const CFG& cfg) {
  182. for (uint32_t pid : cfg.preds(block)) {
  183. if (blocks_in_path->insert(pid).second) {
  184. if (pid != entry) {
  185. GetBlocksInPath(pid, entry, blocks_in_path, cfg);
  186. }
  187. }
  188. }
  189. }
  190. bool LoopPeeling::IsConditionCheckSideEffectFree() const {
  191. CFG& cfg = *context_->cfg();
  192. // The "do-while" form does not cause issues, the algorithm takes into account
  193. // the first iteration.
  194. if (!do_while_form_) {
  195. uint32_t condition_block_id = cfg.preds(loop_->GetMergeBlock()->id())[0];
  196. std::unordered_set<uint32_t> blocks_in_path;
  197. blocks_in_path.insert(condition_block_id);
  198. GetBlocksInPath(condition_block_id, loop_->GetHeaderBlock()->id(),
  199. &blocks_in_path, cfg);
  200. for (uint32_t bb_id : blocks_in_path) {
  201. BasicBlock* bb = cfg.block(bb_id);
  202. if (!bb->WhileEachInst([this](Instruction* insn) {
  203. if (insn->IsBranch()) return true;
  204. switch (insn->opcode()) {
  205. case SpvOpLabel:
  206. case SpvOpSelectionMerge:
  207. case SpvOpLoopMerge:
  208. return true;
  209. default:
  210. break;
  211. }
  212. return context_->IsCombinatorInstruction(insn);
  213. })) {
  214. return false;
  215. }
  216. }
  217. }
  218. return true;
  219. }
  220. void LoopPeeling::GetIteratingExitValues() {
  221. CFG& cfg = *context_->cfg();
  222. loop_->GetHeaderBlock()->ForEachPhiInst(
  223. [this](Instruction* phi) { exit_value_[phi->result_id()] = nullptr; });
  224. if (!loop_->GetMergeBlock()) {
  225. return;
  226. }
  227. if (cfg.preds(loop_->GetMergeBlock()->id()).size() != 1) {
  228. return;
  229. }
  230. analysis::DefUseManager* def_use_mgr = context_->get_def_use_mgr();
  231. uint32_t condition_block_id = cfg.preds(loop_->GetMergeBlock()->id())[0];
  232. auto& header_pred = cfg.preds(loop_->GetHeaderBlock()->id());
  233. do_while_form_ = std::find(header_pred.begin(), header_pred.end(),
  234. condition_block_id) != header_pred.end();
  235. if (do_while_form_) {
  236. loop_->GetHeaderBlock()->ForEachPhiInst(
  237. [condition_block_id, def_use_mgr, this](Instruction* phi) {
  238. std::unordered_set<Instruction*> operations;
  239. for (uint32_t i = 0; i < phi->NumInOperands(); i += 2) {
  240. if (condition_block_id == phi->GetSingleWordInOperand(i + 1)) {
  241. exit_value_[phi->result_id()] =
  242. def_use_mgr->GetDef(phi->GetSingleWordInOperand(i));
  243. }
  244. }
  245. });
  246. } else {
  247. DominatorTree* dom_tree =
  248. &context_->GetDominatorAnalysis(loop_utils_.GetFunction())
  249. ->GetDomTree();
  250. BasicBlock* condition_block = cfg.block(condition_block_id);
  251. loop_->GetHeaderBlock()->ForEachPhiInst(
  252. [dom_tree, condition_block, this](Instruction* phi) {
  253. std::unordered_set<Instruction*> operations;
  254. // Not the back-edge value, check if the phi instruction is the only
  255. // possible candidate.
  256. GetIteratorUpdateOperations(loop_, phi, &operations);
  257. for (Instruction* insn : operations) {
  258. if (insn == phi) {
  259. continue;
  260. }
  261. if (dom_tree->Dominates(context_->get_instr_block(insn),
  262. condition_block)) {
  263. return;
  264. }
  265. }
  266. exit_value_[phi->result_id()] = phi;
  267. });
  268. }
  269. }
  270. void LoopPeeling::FixExitCondition(
  271. const std::function<uint32_t(Instruction*)>& condition_builder) {
  272. CFG& cfg = *context_->cfg();
  273. uint32_t condition_block_id = 0;
  274. for (uint32_t id : cfg.preds(GetClonedLoop()->GetMergeBlock()->id())) {
  275. if (GetClonedLoop()->IsInsideLoop(id)) {
  276. condition_block_id = id;
  277. break;
  278. }
  279. }
  280. assert(condition_block_id != 0 && "2nd loop in improperly connected");
  281. BasicBlock* condition_block = cfg.block(condition_block_id);
  282. Instruction* exit_condition = condition_block->terminator();
  283. assert(exit_condition->opcode() == SpvOpBranchConditional);
  284. BasicBlock::iterator insert_point = condition_block->tail();
  285. if (condition_block->GetMergeInst()) {
  286. --insert_point;
  287. }
  288. exit_condition->SetInOperand(0, {condition_builder(&*insert_point)});
  289. uint32_t to_continue_block_idx =
  290. GetClonedLoop()->IsInsideLoop(exit_condition->GetSingleWordInOperand(1))
  291. ? 1
  292. : 2;
  293. exit_condition->SetInOperand(
  294. 1, {exit_condition->GetSingleWordInOperand(to_continue_block_idx)});
  295. exit_condition->SetInOperand(2, {GetClonedLoop()->GetMergeBlock()->id()});
  296. // Update def/use manager.
  297. context_->get_def_use_mgr()->AnalyzeInstUse(exit_condition);
  298. }
  299. BasicBlock* LoopPeeling::CreateBlockBefore(BasicBlock* bb) {
  300. analysis::DefUseManager* def_use_mgr = context_->get_def_use_mgr();
  301. CFG& cfg = *context_->cfg();
  302. assert(cfg.preds(bb->id()).size() == 1 && "More than one predecessor");
  303. std::unique_ptr<BasicBlock> new_bb =
  304. MakeUnique<BasicBlock>(std::unique_ptr<Instruction>(new Instruction(
  305. context_, SpvOpLabel, 0, context_->TakeNextId(), {})));
  306. new_bb->SetParent(loop_utils_.GetFunction());
  307. // Update the loop descriptor.
  308. Loop* in_loop = (*loop_utils_.GetLoopDescriptor())[bb];
  309. if (in_loop) {
  310. in_loop->AddBasicBlock(new_bb.get());
  311. loop_utils_.GetLoopDescriptor()->SetBasicBlockToLoop(new_bb->id(), in_loop);
  312. }
  313. context_->set_instr_block(new_bb->GetLabelInst(), new_bb.get());
  314. def_use_mgr->AnalyzeInstDefUse(new_bb->GetLabelInst());
  315. BasicBlock* bb_pred = cfg.block(cfg.preds(bb->id())[0]);
  316. bb_pred->tail()->ForEachInId([bb, &new_bb](uint32_t* id) {
  317. if (*id == bb->id()) {
  318. *id = new_bb->id();
  319. }
  320. });
  321. cfg.RemoveEdge(bb_pred->id(), bb->id());
  322. cfg.AddEdge(bb_pred->id(), new_bb->id());
  323. def_use_mgr->AnalyzeInstUse(&*bb_pred->tail());
  324. // Update the incoming branch.
  325. bb->ForEachPhiInst([&new_bb, def_use_mgr](Instruction* phi) {
  326. phi->SetInOperand(1, {new_bb->id()});
  327. def_use_mgr->AnalyzeInstUse(phi);
  328. });
  329. InstructionBuilder(
  330. context_, new_bb.get(),
  331. IRContext::kAnalysisDefUse | IRContext::kAnalysisInstrToBlockMapping)
  332. .AddBranch(bb->id());
  333. cfg.RegisterBlock(new_bb.get());
  334. // Add the basic block to the function.
  335. Function::iterator it = loop_utils_.GetFunction()->FindBlock(bb->id());
  336. assert(it != loop_utils_.GetFunction()->end() &&
  337. "Basic block not found in the function.");
  338. BasicBlock* ret = new_bb.get();
  339. loop_utils_.GetFunction()->AddBasicBlock(std::move(new_bb), it);
  340. return ret;
  341. }
  342. BasicBlock* LoopPeeling::ProtectLoop(Loop* loop, Instruction* condition,
  343. BasicBlock* if_merge) {
  344. BasicBlock* if_block = loop->GetOrCreatePreHeaderBlock();
  345. // Will no longer be a pre-header because of the if.
  346. loop->SetPreHeaderBlock(nullptr);
  347. // Kill the branch to the header.
  348. context_->KillInst(&*if_block->tail());
  349. InstructionBuilder builder(
  350. context_, if_block,
  351. IRContext::kAnalysisDefUse | IRContext::kAnalysisInstrToBlockMapping);
  352. builder.AddConditionalBranch(condition->result_id(),
  353. loop->GetHeaderBlock()->id(), if_merge->id(),
  354. if_merge->id());
  355. return if_block;
  356. }
  357. void LoopPeeling::PeelBefore(uint32_t peel_factor) {
  358. assert(CanPeelLoop() && "Cannot peel loop");
  359. LoopUtils::LoopCloningResult clone_results;
  360. // Clone the loop and insert the cloned one before the loop.
  361. DuplicateAndConnectLoop(&clone_results);
  362. // Add a canonical induction variable "canonical_induction_variable_".
  363. InsertCanonicalInductionVariable(&clone_results);
  364. InstructionBuilder builder(
  365. context_, &*cloned_loop_->GetPreHeaderBlock()->tail(),
  366. IRContext::kAnalysisDefUse | IRContext::kAnalysisInstrToBlockMapping);
  367. Instruction* factor =
  368. builder.Add32BitConstantInteger(peel_factor, int_type_->IsSigned());
  369. Instruction* has_remaining_iteration = builder.AddLessThan(
  370. factor->result_id(), loop_iteration_count_->result_id());
  371. Instruction* max_iteration = builder.AddSelect(
  372. factor->type_id(), has_remaining_iteration->result_id(),
  373. factor->result_id(), loop_iteration_count_->result_id());
  374. // Change the exit condition of the cloned loop to be (exit when become
  375. // false):
  376. // "canonical_induction_variable_" < min("factor", "loop_iteration_count_")
  377. FixExitCondition([max_iteration, this](Instruction* insert_before_point) {
  378. return InstructionBuilder(context_, insert_before_point,
  379. IRContext::kAnalysisDefUse |
  380. IRContext::kAnalysisInstrToBlockMapping)
  381. .AddLessThan(canonical_induction_variable_->result_id(),
  382. max_iteration->result_id())
  383. ->result_id();
  384. });
  385. // "Protect" the second loop: the second loop can only be executed if
  386. // |has_remaining_iteration| is true (i.e. factor < loop_iteration_count_).
  387. BasicBlock* if_merge_block = loop_->GetMergeBlock();
  388. loop_->SetMergeBlock(CreateBlockBefore(loop_->GetMergeBlock()));
  389. // Prevent the second loop from being executed if we already executed all the
  390. // required iterations.
  391. BasicBlock* if_block =
  392. ProtectLoop(loop_, has_remaining_iteration, if_merge_block);
  393. // Patch the phi of the merge block.
  394. if_merge_block->ForEachPhiInst(
  395. [&clone_results, if_block, this](Instruction* phi) {
  396. // if_merge_block had previously only 1 predecessor.
  397. uint32_t incoming_value = phi->GetSingleWordInOperand(0);
  398. auto def_in_loop = clone_results.value_map_.find(incoming_value);
  399. if (def_in_loop != clone_results.value_map_.end())
  400. incoming_value = def_in_loop->second;
  401. phi->AddOperand(
  402. {spv_operand_type_t::SPV_OPERAND_TYPE_ID, {incoming_value}});
  403. phi->AddOperand(
  404. {spv_operand_type_t::SPV_OPERAND_TYPE_ID, {if_block->id()}});
  405. context_->get_def_use_mgr()->AnalyzeInstUse(phi);
  406. });
  407. context_->InvalidateAnalysesExceptFor(
  408. IRContext::kAnalysisDefUse | IRContext::kAnalysisInstrToBlockMapping |
  409. IRContext::kAnalysisLoopAnalysis | IRContext::kAnalysisCFG);
  410. }
  411. void LoopPeeling::PeelAfter(uint32_t peel_factor) {
  412. assert(CanPeelLoop() && "Cannot peel loop");
  413. LoopUtils::LoopCloningResult clone_results;
  414. // Clone the loop and insert the cloned one before the loop.
  415. DuplicateAndConnectLoop(&clone_results);
  416. // Add a canonical induction variable "canonical_induction_variable_".
  417. InsertCanonicalInductionVariable(&clone_results);
  418. InstructionBuilder builder(
  419. context_, &*cloned_loop_->GetPreHeaderBlock()->tail(),
  420. IRContext::kAnalysisDefUse | IRContext::kAnalysisInstrToBlockMapping);
  421. Instruction* factor =
  422. builder.Add32BitConstantInteger(peel_factor, int_type_->IsSigned());
  423. Instruction* has_remaining_iteration = builder.AddLessThan(
  424. factor->result_id(), loop_iteration_count_->result_id());
  425. // Change the exit condition of the cloned loop to be (exit when become
  426. // false):
  427. // "canonical_induction_variable_" + "factor" < "loop_iteration_count_"
  428. FixExitCondition([factor, this](Instruction* insert_before_point) {
  429. InstructionBuilder cond_builder(
  430. context_, insert_before_point,
  431. IRContext::kAnalysisDefUse | IRContext::kAnalysisInstrToBlockMapping);
  432. // Build the following check: canonical_induction_variable_ + factor <
  433. // iteration_count
  434. return cond_builder
  435. .AddLessThan(cond_builder
  436. .AddIAdd(canonical_induction_variable_->type_id(),
  437. canonical_induction_variable_->result_id(),
  438. factor->result_id())
  439. ->result_id(),
  440. loop_iteration_count_->result_id())
  441. ->result_id();
  442. });
  443. // "Protect" the first loop: the first loop can only be executed if
  444. // factor < loop_iteration_count_.
  445. // The original loop's pre-header was the cloned loop merge block.
  446. GetClonedLoop()->SetMergeBlock(
  447. CreateBlockBefore(GetOriginalLoop()->GetPreHeaderBlock()));
  448. // Use the second loop preheader as if merge block.
  449. // Prevent the first loop if only the peeled loop needs it.
  450. BasicBlock* if_block = ProtectLoop(cloned_loop_, has_remaining_iteration,
  451. GetOriginalLoop()->GetPreHeaderBlock());
  452. // Patch the phi of the header block.
  453. // We added an if to enclose the first loop and because the phi node are
  454. // connected to the exit value of the first loop, the definition no longer
  455. // dominate the preheader.
  456. // We had to the preheader (our if merge block) the required phi instruction
  457. // and patch the header phi.
  458. GetOriginalLoop()->GetHeaderBlock()->ForEachPhiInst(
  459. [&clone_results, if_block, this](Instruction* phi) {
  460. analysis::DefUseManager* def_use_mgr = context_->get_def_use_mgr();
  461. auto find_value_idx = [](Instruction* phi_inst, Loop* loop) {
  462. uint32_t preheader_value_idx =
  463. !loop->IsInsideLoop(phi_inst->GetSingleWordInOperand(1)) ? 0 : 2;
  464. return preheader_value_idx;
  465. };
  466. Instruction* cloned_phi =
  467. def_use_mgr->GetDef(clone_results.value_map_.at(phi->result_id()));
  468. uint32_t cloned_preheader_value = cloned_phi->GetSingleWordInOperand(
  469. find_value_idx(cloned_phi, GetClonedLoop()));
  470. Instruction* new_phi =
  471. InstructionBuilder(context_,
  472. &*GetOriginalLoop()->GetPreHeaderBlock()->tail(),
  473. IRContext::kAnalysisDefUse |
  474. IRContext::kAnalysisInstrToBlockMapping)
  475. .AddPhi(phi->type_id(),
  476. {phi->GetSingleWordInOperand(
  477. find_value_idx(phi, GetOriginalLoop())),
  478. GetClonedLoop()->GetMergeBlock()->id(),
  479. cloned_preheader_value, if_block->id()});
  480. phi->SetInOperand(find_value_idx(phi, GetOriginalLoop()),
  481. {new_phi->result_id()});
  482. def_use_mgr->AnalyzeInstUse(phi);
  483. });
  484. context_->InvalidateAnalysesExceptFor(
  485. IRContext::kAnalysisDefUse | IRContext::kAnalysisInstrToBlockMapping |
  486. IRContext::kAnalysisLoopAnalysis | IRContext::kAnalysisCFG);
  487. }
  488. Pass::Status LoopPeelingPass::Process() {
  489. bool modified = false;
  490. Module* module = context()->module();
  491. // Process each function in the module
  492. for (Function& f : *module) {
  493. modified |= ProcessFunction(&f);
  494. }
  495. return modified ? Status::SuccessWithChange : Status::SuccessWithoutChange;
  496. }
  497. bool LoopPeelingPass::ProcessFunction(Function* f) {
  498. bool modified = false;
  499. LoopDescriptor& loop_descriptor = *context()->GetLoopDescriptor(f);
  500. std::vector<Loop*> to_process_loop;
  501. to_process_loop.reserve(loop_descriptor.NumLoops());
  502. for (Loop& l : loop_descriptor) {
  503. to_process_loop.push_back(&l);
  504. }
  505. ScalarEvolutionAnalysis scev_analysis(context());
  506. for (Loop* loop : to_process_loop) {
  507. CodeMetrics loop_size;
  508. loop_size.Analyze(*loop);
  509. auto try_peel = [&loop_size, &modified, this](Loop* loop_to_peel) -> Loop* {
  510. if (!loop_to_peel->IsLCSSA()) {
  511. LoopUtils(context(), loop_to_peel).MakeLoopClosedSSA();
  512. }
  513. bool peeled_loop;
  514. Loop* still_peelable_loop;
  515. std::tie(peeled_loop, still_peelable_loop) =
  516. ProcessLoop(loop_to_peel, &loop_size);
  517. if (peeled_loop) {
  518. modified = true;
  519. }
  520. return still_peelable_loop;
  521. };
  522. Loop* still_peelable_loop = try_peel(loop);
  523. // The pass is working out the maximum factor by which a loop can be peeled.
  524. // If the loop can potentially be peeled again, then there is only one
  525. // possible direction, so only one call is still needed.
  526. if (still_peelable_loop) {
  527. try_peel(loop);
  528. }
  529. }
  530. return modified;
  531. }
  532. std::pair<bool, Loop*> LoopPeelingPass::ProcessLoop(Loop* loop,
  533. CodeMetrics* loop_size) {
  534. ScalarEvolutionAnalysis* scev_analysis =
  535. context()->GetScalarEvolutionAnalysis();
  536. // Default values for bailing out.
  537. std::pair<bool, Loop*> bail_out{false, nullptr};
  538. BasicBlock* exit_block = loop->FindConditionBlock();
  539. if (!exit_block) {
  540. return bail_out;
  541. }
  542. Instruction* exiting_iv = loop->FindConditionVariable(exit_block);
  543. if (!exiting_iv) {
  544. return bail_out;
  545. }
  546. size_t iterations = 0;
  547. if (!loop->FindNumberOfIterations(exiting_iv, &*exit_block->tail(),
  548. &iterations)) {
  549. return bail_out;
  550. }
  551. if (!iterations) {
  552. return bail_out;
  553. }
  554. Instruction* canonical_induction_variable = nullptr;
  555. loop->GetHeaderBlock()->WhileEachPhiInst([&canonical_induction_variable,
  556. scev_analysis,
  557. this](Instruction* insn) {
  558. if (const SERecurrentNode* iv =
  559. scev_analysis->AnalyzeInstruction(insn)->AsSERecurrentNode()) {
  560. const SEConstantNode* offset = iv->GetOffset()->AsSEConstantNode();
  561. const SEConstantNode* coeff = iv->GetCoefficient()->AsSEConstantNode();
  562. if (offset && coeff && offset->FoldToSingleValue() == 0 &&
  563. coeff->FoldToSingleValue() == 1) {
  564. if (context()->get_type_mgr()->GetType(insn->type_id())->AsInteger()) {
  565. canonical_induction_variable = insn;
  566. return false;
  567. }
  568. }
  569. }
  570. return true;
  571. });
  572. bool is_signed = canonical_induction_variable
  573. ? context()
  574. ->get_type_mgr()
  575. ->GetType(canonical_induction_variable->type_id())
  576. ->AsInteger()
  577. ->IsSigned()
  578. : false;
  579. LoopPeeling peeler(
  580. loop,
  581. InstructionBuilder(
  582. context(), loop->GetHeaderBlock(),
  583. IRContext::kAnalysisDefUse | IRContext::kAnalysisInstrToBlockMapping)
  584. .Add32BitConstantInteger<uint32_t>(static_cast<uint32_t>(iterations),
  585. is_signed),
  586. canonical_induction_variable);
  587. if (!peeler.CanPeelLoop()) {
  588. return bail_out;
  589. }
  590. // For each basic block in the loop, check if it can be peeled. If it
  591. // can, get the direction (before/after) and by which factor.
  592. LoopPeelingInfo peel_info(loop, iterations, scev_analysis);
  593. uint32_t peel_before_factor = 0;
  594. uint32_t peel_after_factor = 0;
  595. for (uint32_t block : loop->GetBlocks()) {
  596. if (block == exit_block->id()) {
  597. continue;
  598. }
  599. BasicBlock* bb = cfg()->block(block);
  600. PeelDirection direction;
  601. uint32_t factor;
  602. std::tie(direction, factor) = peel_info.GetPeelingInfo(bb);
  603. if (direction == PeelDirection::kNone) {
  604. continue;
  605. }
  606. if (direction == PeelDirection::kBefore) {
  607. peel_before_factor = std::max(peel_before_factor, factor);
  608. } else {
  609. assert(direction == PeelDirection::kAfter);
  610. peel_after_factor = std::max(peel_after_factor, factor);
  611. }
  612. }
  613. PeelDirection direction = PeelDirection::kNone;
  614. uint32_t factor = 0;
  615. // Find which direction we should peel.
  616. if (peel_before_factor) {
  617. factor = peel_before_factor;
  618. direction = PeelDirection::kBefore;
  619. }
  620. if (peel_after_factor) {
  621. if (peel_before_factor < peel_after_factor) {
  622. // Favor a peel after here and give the peel before another shot later.
  623. factor = peel_after_factor;
  624. direction = PeelDirection::kAfter;
  625. }
  626. }
  627. // Do the peel if we can.
  628. if (direction == PeelDirection::kNone) return bail_out;
  629. // This does not take into account branch elimination opportunities and
  630. // the unrolling. It assumes the peeled loop will be unrolled as well.
  631. if (factor * loop_size->roi_size_ > code_grow_threshold_) {
  632. return bail_out;
  633. }
  634. loop_size->roi_size_ *= factor;
  635. // Find if a loop should be peeled again.
  636. Loop* extra_opportunity = nullptr;
  637. if (direction == PeelDirection::kBefore) {
  638. peeler.PeelBefore(factor);
  639. if (stats_) {
  640. stats_->peeled_loops_.emplace_back(loop, PeelDirection::kBefore, factor);
  641. }
  642. if (peel_after_factor) {
  643. // We could have peeled after, give it another try.
  644. extra_opportunity = peeler.GetOriginalLoop();
  645. }
  646. } else {
  647. peeler.PeelAfter(factor);
  648. if (stats_) {
  649. stats_->peeled_loops_.emplace_back(loop, PeelDirection::kAfter, factor);
  650. }
  651. if (peel_before_factor) {
  652. // We could have peeled before, give it another try.
  653. extra_opportunity = peeler.GetClonedLoop();
  654. }
  655. }
  656. return {true, extra_opportunity};
  657. }
  658. uint32_t LoopPeelingPass::LoopPeelingInfo::GetFirstLoopInvariantOperand(
  659. Instruction* condition) const {
  660. for (uint32_t i = 0; i < condition->NumInOperands(); i++) {
  661. BasicBlock* bb =
  662. context_->get_instr_block(condition->GetSingleWordInOperand(i));
  663. if (bb && loop_->IsInsideLoop(bb)) {
  664. return condition->GetSingleWordInOperand(i);
  665. }
  666. }
  667. return 0;
  668. }
  669. uint32_t LoopPeelingPass::LoopPeelingInfo::GetFirstNonLoopInvariantOperand(
  670. Instruction* condition) const {
  671. for (uint32_t i = 0; i < condition->NumInOperands(); i++) {
  672. BasicBlock* bb =
  673. context_->get_instr_block(condition->GetSingleWordInOperand(i));
  674. if (!bb || !loop_->IsInsideLoop(bb)) {
  675. return condition->GetSingleWordInOperand(i);
  676. }
  677. }
  678. return 0;
  679. }
  680. static bool IsHandledCondition(SpvOp opcode) {
  681. switch (opcode) {
  682. case SpvOpIEqual:
  683. case SpvOpINotEqual:
  684. case SpvOpUGreaterThan:
  685. case SpvOpSGreaterThan:
  686. case SpvOpUGreaterThanEqual:
  687. case SpvOpSGreaterThanEqual:
  688. case SpvOpULessThan:
  689. case SpvOpSLessThan:
  690. case SpvOpULessThanEqual:
  691. case SpvOpSLessThanEqual:
  692. return true;
  693. default:
  694. return false;
  695. }
  696. }
  697. LoopPeelingPass::LoopPeelingInfo::Direction
  698. LoopPeelingPass::LoopPeelingInfo::GetPeelingInfo(BasicBlock* bb) const {
  699. if (bb->terminator()->opcode() != SpvOpBranchConditional) {
  700. return GetNoneDirection();
  701. }
  702. analysis::DefUseManager* def_use_mgr = context_->get_def_use_mgr();
  703. Instruction* condition =
  704. def_use_mgr->GetDef(bb->terminator()->GetSingleWordInOperand(0));
  705. if (!IsHandledCondition(condition->opcode())) {
  706. return GetNoneDirection();
  707. }
  708. if (!GetFirstLoopInvariantOperand(condition)) {
  709. // No loop invariant, it cannot be peeled by this pass.
  710. return GetNoneDirection();
  711. }
  712. if (!GetFirstNonLoopInvariantOperand(condition)) {
  713. // Seems to be a job for the unswitch pass.
  714. return GetNoneDirection();
  715. }
  716. // Left hand-side.
  717. SExpression lhs = scev_analysis_->AnalyzeInstruction(
  718. def_use_mgr->GetDef(condition->GetSingleWordInOperand(0)));
  719. if (lhs->GetType() == SENode::CanNotCompute) {
  720. // Can't make any conclusion.
  721. return GetNoneDirection();
  722. }
  723. // Right hand-side.
  724. SExpression rhs = scev_analysis_->AnalyzeInstruction(
  725. def_use_mgr->GetDef(condition->GetSingleWordInOperand(1)));
  726. if (rhs->GetType() == SENode::CanNotCompute) {
  727. // Can't make any conclusion.
  728. return GetNoneDirection();
  729. }
  730. // Only take into account recurrent expression over the current loop.
  731. bool is_lhs_rec = !scev_analysis_->IsLoopInvariant(loop_, lhs);
  732. bool is_rhs_rec = !scev_analysis_->IsLoopInvariant(loop_, rhs);
  733. if ((is_lhs_rec && is_rhs_rec) || (!is_lhs_rec && !is_rhs_rec)) {
  734. return GetNoneDirection();
  735. }
  736. if (is_lhs_rec) {
  737. if (!lhs->AsSERecurrentNode() ||
  738. lhs->AsSERecurrentNode()->GetLoop() != loop_) {
  739. return GetNoneDirection();
  740. }
  741. }
  742. if (is_rhs_rec) {
  743. if (!rhs->AsSERecurrentNode() ||
  744. rhs->AsSERecurrentNode()->GetLoop() != loop_) {
  745. return GetNoneDirection();
  746. }
  747. }
  748. // If the op code is ==, then we try a peel before or after.
  749. // If opcode is not <, >, <= or >=, we bail out.
  750. //
  751. // For the remaining cases, we canonicalize the expression so that the
  752. // constant expression is on the left hand side and the recurring expression
  753. // is on the right hand side. If we swap hand side, then < becomes >, <=
  754. // becomes >= etc.
  755. // If the opcode is <=, then we add 1 to the right hand side and do the peel
  756. // check on <.
  757. // If the opcode is >=, then we add 1 to the left hand side and do the peel
  758. // check on >.
  759. CmpOperator cmp_operator;
  760. switch (condition->opcode()) {
  761. default:
  762. return GetNoneDirection();
  763. case SpvOpIEqual:
  764. case SpvOpINotEqual:
  765. return HandleEquality(lhs, rhs);
  766. case SpvOpUGreaterThan:
  767. case SpvOpSGreaterThan: {
  768. cmp_operator = CmpOperator::kGT;
  769. break;
  770. }
  771. case SpvOpULessThan:
  772. case SpvOpSLessThan: {
  773. cmp_operator = CmpOperator::kLT;
  774. break;
  775. }
  776. // We add one to transform >= into > and <= into <.
  777. case SpvOpUGreaterThanEqual:
  778. case SpvOpSGreaterThanEqual: {
  779. cmp_operator = CmpOperator::kGE;
  780. break;
  781. }
  782. case SpvOpULessThanEqual:
  783. case SpvOpSLessThanEqual: {
  784. cmp_operator = CmpOperator::kLE;
  785. break;
  786. }
  787. }
  788. // Force the left hand side to be the non recurring expression.
  789. if (is_lhs_rec) {
  790. std::swap(lhs, rhs);
  791. switch (cmp_operator) {
  792. case CmpOperator::kLT: {
  793. cmp_operator = CmpOperator::kGT;
  794. break;
  795. }
  796. case CmpOperator::kGT: {
  797. cmp_operator = CmpOperator::kLT;
  798. break;
  799. }
  800. case CmpOperator::kLE: {
  801. cmp_operator = CmpOperator::kGE;
  802. break;
  803. }
  804. case CmpOperator::kGE: {
  805. cmp_operator = CmpOperator::kLE;
  806. break;
  807. }
  808. }
  809. }
  810. return HandleInequality(cmp_operator, lhs, rhs->AsSERecurrentNode());
  811. }
  812. SExpression LoopPeelingPass::LoopPeelingInfo::GetValueAtFirstIteration(
  813. SERecurrentNode* rec) const {
  814. return rec->GetOffset();
  815. }
  816. SExpression LoopPeelingPass::LoopPeelingInfo::GetValueAtIteration(
  817. SERecurrentNode* rec, int64_t iteration) const {
  818. SExpression coeff = rec->GetCoefficient();
  819. SExpression offset = rec->GetOffset();
  820. return (coeff * iteration) + offset;
  821. }
  822. SExpression LoopPeelingPass::LoopPeelingInfo::GetValueAtLastIteration(
  823. SERecurrentNode* rec) const {
  824. return GetValueAtIteration(rec, loop_max_iterations_ - 1);
  825. }
  826. bool LoopPeelingPass::LoopPeelingInfo::EvalOperator(CmpOperator cmp_op,
  827. SExpression lhs,
  828. SExpression rhs,
  829. bool* result) const {
  830. assert(scev_analysis_->IsLoopInvariant(loop_, lhs));
  831. assert(scev_analysis_->IsLoopInvariant(loop_, rhs));
  832. // We perform the test: 0 cmp_op rhs - lhs
  833. // What is left is then to determine the sign of the expression.
  834. switch (cmp_op) {
  835. case CmpOperator::kLT: {
  836. return scev_analysis_->IsAlwaysGreaterThanZero(rhs - lhs, result);
  837. }
  838. case CmpOperator::kGT: {
  839. return scev_analysis_->IsAlwaysGreaterThanZero(lhs - rhs, result);
  840. }
  841. case CmpOperator::kLE: {
  842. return scev_analysis_->IsAlwaysGreaterOrEqualToZero(rhs - lhs, result);
  843. }
  844. case CmpOperator::kGE: {
  845. return scev_analysis_->IsAlwaysGreaterOrEqualToZero(lhs - rhs, result);
  846. }
  847. }
  848. return false;
  849. }
  850. LoopPeelingPass::LoopPeelingInfo::Direction
  851. LoopPeelingPass::LoopPeelingInfo::HandleEquality(SExpression lhs,
  852. SExpression rhs) const {
  853. {
  854. // Try peel before opportunity.
  855. SExpression lhs_cst = lhs;
  856. if (SERecurrentNode* rec_node = lhs->AsSERecurrentNode()) {
  857. lhs_cst = rec_node->GetOffset();
  858. }
  859. SExpression rhs_cst = rhs;
  860. if (SERecurrentNode* rec_node = rhs->AsSERecurrentNode()) {
  861. rhs_cst = rec_node->GetOffset();
  862. }
  863. if (lhs_cst == rhs_cst) {
  864. return Direction{LoopPeelingPass::PeelDirection::kBefore, 1};
  865. }
  866. }
  867. {
  868. // Try peel after opportunity.
  869. SExpression lhs_cst = lhs;
  870. if (SERecurrentNode* rec_node = lhs->AsSERecurrentNode()) {
  871. // rec_node(x) = a * x + b
  872. // assign to lhs: a * (loop_max_iterations_ - 1) + b
  873. lhs_cst = GetValueAtLastIteration(rec_node);
  874. }
  875. SExpression rhs_cst = rhs;
  876. if (SERecurrentNode* rec_node = rhs->AsSERecurrentNode()) {
  877. // rec_node(x) = a * x + b
  878. // assign to lhs: a * (loop_max_iterations_ - 1) + b
  879. rhs_cst = GetValueAtLastIteration(rec_node);
  880. }
  881. if (lhs_cst == rhs_cst) {
  882. return Direction{LoopPeelingPass::PeelDirection::kAfter, 1};
  883. }
  884. }
  885. return GetNoneDirection();
  886. }
  887. LoopPeelingPass::LoopPeelingInfo::Direction
  888. LoopPeelingPass::LoopPeelingInfo::HandleInequality(CmpOperator cmp_op,
  889. SExpression lhs,
  890. SERecurrentNode* rhs) const {
  891. SExpression offset = rhs->GetOffset();
  892. SExpression coefficient = rhs->GetCoefficient();
  893. // Compute (cst - B) / A.
  894. std::pair<SExpression, int64_t> flip_iteration = (lhs - offset) / coefficient;
  895. if (!flip_iteration.first->AsSEConstantNode()) {
  896. return GetNoneDirection();
  897. }
  898. // note: !!flip_iteration.second normalize to 0/1 (via bool cast).
  899. int64_t iteration =
  900. flip_iteration.first->AsSEConstantNode()->FoldToSingleValue() +
  901. !!flip_iteration.second;
  902. if (iteration <= 0 ||
  903. loop_max_iterations_ <= static_cast<uint64_t>(iteration)) {
  904. // Always true or false within the loop bounds.
  905. return GetNoneDirection();
  906. }
  907. // If this is a <= or >= operator and the iteration, make sure |iteration| is
  908. // the one flipping the condition.
  909. // If (cst - B) and A are not divisible, this equivalent to a < or > check, so
  910. // we skip this test.
  911. if (!flip_iteration.second &&
  912. (cmp_op == CmpOperator::kLE || cmp_op == CmpOperator::kGE)) {
  913. bool first_iteration;
  914. bool current_iteration;
  915. if (!EvalOperator(cmp_op, lhs, offset, &first_iteration) ||
  916. !EvalOperator(cmp_op, lhs, GetValueAtIteration(rhs, iteration),
  917. &current_iteration)) {
  918. return GetNoneDirection();
  919. }
  920. // If the condition did not flip the next will.
  921. if (first_iteration == current_iteration) {
  922. iteration++;
  923. }
  924. }
  925. uint32_t cast_iteration = 0;
  926. // sanity check: can we fit |iteration| in a uint32_t ?
  927. if (static_cast<uint64_t>(iteration) < std::numeric_limits<uint32_t>::max()) {
  928. cast_iteration = static_cast<uint32_t>(iteration);
  929. }
  930. if (cast_iteration) {
  931. // Peel before if we are closer to the start, after if closer to the end.
  932. if (loop_max_iterations_ / 2 > cast_iteration) {
  933. return Direction{LoopPeelingPass::PeelDirection::kBefore, cast_iteration};
  934. } else {
  935. return Direction{
  936. LoopPeelingPass::PeelDirection::kAfter,
  937. static_cast<uint32_t>(loop_max_iterations_ - cast_iteration)};
  938. }
  939. }
  940. return GetNoneDirection();
  941. }
  942. } // namespace opt
  943. } // namespace spvtools