inline_pass.cpp 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757
  1. // Copyright (c) 2017 The Khronos Group Inc.
  2. // Copyright (c) 2017 Valve Corporation
  3. // Copyright (c) 2017 LunarG Inc.
  4. //
  5. // Licensed under the Apache License, Version 2.0 (the "License");
  6. // you may not use this file except in compliance with the License.
  7. // You may obtain a copy of the License at
  8. //
  9. // http://www.apache.org/licenses/LICENSE-2.0
  10. //
  11. // Unless required by applicable law or agreed to in writing, software
  12. // distributed under the License is distributed on an "AS IS" BASIS,
  13. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. // See the License for the specific language governing permissions and
  15. // limitations under the License.
  16. #include "source/opt/inline_pass.h"
  17. #include <unordered_set>
  18. #include <utility>
  19. #include "source/cfa.h"
  20. #include "source/util/make_unique.h"
  21. // Indices of operands in SPIR-V instructions
  22. static const int kSpvFunctionCallFunctionId = 2;
  23. static const int kSpvFunctionCallArgumentId = 3;
  24. static const int kSpvReturnValueId = 0;
  25. namespace spvtools {
  26. namespace opt {
  27. uint32_t InlinePass::AddPointerToType(uint32_t type_id,
  28. SpvStorageClass storage_class) {
  29. uint32_t resultId = context()->TakeNextId();
  30. if (resultId == 0) {
  31. return resultId;
  32. }
  33. std::unique_ptr<Instruction> type_inst(
  34. new Instruction(context(), SpvOpTypePointer, 0, resultId,
  35. {{spv_operand_type_t::SPV_OPERAND_TYPE_STORAGE_CLASS,
  36. {uint32_t(storage_class)}},
  37. {spv_operand_type_t::SPV_OPERAND_TYPE_ID, {type_id}}}));
  38. context()->AddType(std::move(type_inst));
  39. analysis::Type* pointeeTy;
  40. std::unique_ptr<analysis::Pointer> pointerTy;
  41. std::tie(pointeeTy, pointerTy) =
  42. context()->get_type_mgr()->GetTypeAndPointerType(type_id,
  43. SpvStorageClassFunction);
  44. context()->get_type_mgr()->RegisterType(resultId, *pointerTy);
  45. return resultId;
  46. }
  47. void InlinePass::AddBranch(uint32_t label_id,
  48. std::unique_ptr<BasicBlock>* block_ptr) {
  49. std::unique_ptr<Instruction> newBranch(
  50. new Instruction(context(), SpvOpBranch, 0, 0,
  51. {{spv_operand_type_t::SPV_OPERAND_TYPE_ID, {label_id}}}));
  52. (*block_ptr)->AddInstruction(std::move(newBranch));
  53. }
  54. void InlinePass::AddBranchCond(uint32_t cond_id, uint32_t true_id,
  55. uint32_t false_id,
  56. std::unique_ptr<BasicBlock>* block_ptr) {
  57. std::unique_ptr<Instruction> newBranch(
  58. new Instruction(context(), SpvOpBranchConditional, 0, 0,
  59. {{spv_operand_type_t::SPV_OPERAND_TYPE_ID, {cond_id}},
  60. {spv_operand_type_t::SPV_OPERAND_TYPE_ID, {true_id}},
  61. {spv_operand_type_t::SPV_OPERAND_TYPE_ID, {false_id}}}));
  62. (*block_ptr)->AddInstruction(std::move(newBranch));
  63. }
  64. void InlinePass::AddLoopMerge(uint32_t merge_id, uint32_t continue_id,
  65. std::unique_ptr<BasicBlock>* block_ptr) {
  66. std::unique_ptr<Instruction> newLoopMerge(new Instruction(
  67. context(), SpvOpLoopMerge, 0, 0,
  68. {{spv_operand_type_t::SPV_OPERAND_TYPE_ID, {merge_id}},
  69. {spv_operand_type_t::SPV_OPERAND_TYPE_ID, {continue_id}},
  70. {spv_operand_type_t::SPV_OPERAND_TYPE_LOOP_CONTROL, {0}}}));
  71. (*block_ptr)->AddInstruction(std::move(newLoopMerge));
  72. }
  73. void InlinePass::AddStore(uint32_t ptr_id, uint32_t val_id,
  74. std::unique_ptr<BasicBlock>* block_ptr) {
  75. std::unique_ptr<Instruction> newStore(
  76. new Instruction(context(), SpvOpStore, 0, 0,
  77. {{spv_operand_type_t::SPV_OPERAND_TYPE_ID, {ptr_id}},
  78. {spv_operand_type_t::SPV_OPERAND_TYPE_ID, {val_id}}}));
  79. (*block_ptr)->AddInstruction(std::move(newStore));
  80. }
  81. void InlinePass::AddLoad(uint32_t type_id, uint32_t resultId, uint32_t ptr_id,
  82. std::unique_ptr<BasicBlock>* block_ptr) {
  83. std::unique_ptr<Instruction> newLoad(
  84. new Instruction(context(), SpvOpLoad, type_id, resultId,
  85. {{spv_operand_type_t::SPV_OPERAND_TYPE_ID, {ptr_id}}}));
  86. (*block_ptr)->AddInstruction(std::move(newLoad));
  87. }
  88. std::unique_ptr<Instruction> InlinePass::NewLabel(uint32_t label_id) {
  89. std::unique_ptr<Instruction> newLabel(
  90. new Instruction(context(), SpvOpLabel, 0, label_id, {}));
  91. return newLabel;
  92. }
  93. uint32_t InlinePass::GetFalseId() {
  94. if (false_id_ != 0) return false_id_;
  95. false_id_ = get_module()->GetGlobalValue(SpvOpConstantFalse);
  96. if (false_id_ != 0) return false_id_;
  97. uint32_t boolId = get_module()->GetGlobalValue(SpvOpTypeBool);
  98. if (boolId == 0) {
  99. boolId = context()->TakeNextId();
  100. if (boolId == 0) {
  101. return 0;
  102. }
  103. get_module()->AddGlobalValue(SpvOpTypeBool, boolId, 0);
  104. }
  105. false_id_ = context()->TakeNextId();
  106. if (false_id_ == 0) {
  107. return 0;
  108. }
  109. get_module()->AddGlobalValue(SpvOpConstantFalse, false_id_, boolId);
  110. return false_id_;
  111. }
  112. void InlinePass::MapParams(
  113. Function* calleeFn, BasicBlock::iterator call_inst_itr,
  114. std::unordered_map<uint32_t, uint32_t>* callee2caller) {
  115. int param_idx = 0;
  116. calleeFn->ForEachParam(
  117. [&call_inst_itr, &param_idx, &callee2caller](const Instruction* cpi) {
  118. const uint32_t pid = cpi->result_id();
  119. (*callee2caller)[pid] = call_inst_itr->GetSingleWordOperand(
  120. kSpvFunctionCallArgumentId + param_idx);
  121. ++param_idx;
  122. });
  123. }
  124. bool InlinePass::CloneAndMapLocals(
  125. Function* calleeFn, std::vector<std::unique_ptr<Instruction>>* new_vars,
  126. std::unordered_map<uint32_t, uint32_t>* callee2caller) {
  127. auto callee_block_itr = calleeFn->begin();
  128. auto callee_var_itr = callee_block_itr->begin();
  129. while (callee_var_itr->opcode() == SpvOp::SpvOpVariable) {
  130. std::unique_ptr<Instruction> var_inst(callee_var_itr->Clone(context()));
  131. uint32_t newId = context()->TakeNextId();
  132. if (newId == 0) {
  133. return false;
  134. }
  135. get_decoration_mgr()->CloneDecorations(callee_var_itr->result_id(), newId);
  136. var_inst->SetResultId(newId);
  137. (*callee2caller)[callee_var_itr->result_id()] = newId;
  138. new_vars->push_back(std::move(var_inst));
  139. ++callee_var_itr;
  140. }
  141. return true;
  142. }
  143. uint32_t InlinePass::CreateReturnVar(
  144. Function* calleeFn, std::vector<std::unique_ptr<Instruction>>* new_vars) {
  145. uint32_t returnVarId = 0;
  146. const uint32_t calleeTypeId = calleeFn->type_id();
  147. analysis::TypeManager* type_mgr = context()->get_type_mgr();
  148. assert(type_mgr->GetType(calleeTypeId)->AsVoid() == nullptr &&
  149. "Cannot create a return variable of type void.");
  150. // Find or create ptr to callee return type.
  151. uint32_t returnVarTypeId =
  152. type_mgr->FindPointerToType(calleeTypeId, SpvStorageClassFunction);
  153. if (returnVarTypeId == 0) {
  154. returnVarTypeId = AddPointerToType(calleeTypeId, SpvStorageClassFunction);
  155. if (returnVarTypeId == 0) {
  156. return 0;
  157. }
  158. }
  159. // Add return var to new function scope variables.
  160. returnVarId = context()->TakeNextId();
  161. if (returnVarId == 0) {
  162. return 0;
  163. }
  164. std::unique_ptr<Instruction> var_inst(
  165. new Instruction(context(), SpvOpVariable, returnVarTypeId, returnVarId,
  166. {{spv_operand_type_t::SPV_OPERAND_TYPE_STORAGE_CLASS,
  167. {SpvStorageClassFunction}}}));
  168. new_vars->push_back(std::move(var_inst));
  169. get_decoration_mgr()->CloneDecorations(calleeFn->result_id(), returnVarId);
  170. return returnVarId;
  171. }
  172. bool InlinePass::IsSameBlockOp(const Instruction* inst) const {
  173. return inst->opcode() == SpvOpSampledImage || inst->opcode() == SpvOpImage;
  174. }
  175. bool InlinePass::CloneSameBlockOps(
  176. std::unique_ptr<Instruction>* inst,
  177. std::unordered_map<uint32_t, uint32_t>* postCallSB,
  178. std::unordered_map<uint32_t, Instruction*>* preCallSB,
  179. std::unique_ptr<BasicBlock>* block_ptr) {
  180. return (*inst)->WhileEachInId([&postCallSB, &preCallSB, &block_ptr,
  181. this](uint32_t* iid) {
  182. const auto mapItr = (*postCallSB).find(*iid);
  183. if (mapItr == (*postCallSB).end()) {
  184. const auto mapItr2 = (*preCallSB).find(*iid);
  185. if (mapItr2 != (*preCallSB).end()) {
  186. // Clone pre-call same-block ops, map result id.
  187. const Instruction* inInst = mapItr2->second;
  188. std::unique_ptr<Instruction> sb_inst(inInst->Clone(context()));
  189. if (!CloneSameBlockOps(&sb_inst, postCallSB, preCallSB, block_ptr)) {
  190. return false;
  191. }
  192. const uint32_t rid = sb_inst->result_id();
  193. const uint32_t nid = context()->TakeNextId();
  194. if (nid == 0) {
  195. return false;
  196. }
  197. get_decoration_mgr()->CloneDecorations(rid, nid);
  198. sb_inst->SetResultId(nid);
  199. (*postCallSB)[rid] = nid;
  200. *iid = nid;
  201. (*block_ptr)->AddInstruction(std::move(sb_inst));
  202. }
  203. } else {
  204. // Reset same-block op operand.
  205. *iid = mapItr->second;
  206. }
  207. return true;
  208. });
  209. }
  210. bool InlinePass::GenInlineCode(
  211. std::vector<std::unique_ptr<BasicBlock>>* new_blocks,
  212. std::vector<std::unique_ptr<Instruction>>* new_vars,
  213. BasicBlock::iterator call_inst_itr,
  214. UptrVectorIterator<BasicBlock> call_block_itr) {
  215. // Map from all ids in the callee to their equivalent id in the caller
  216. // as callee instructions are copied into caller.
  217. std::unordered_map<uint32_t, uint32_t> callee2caller;
  218. // Pre-call same-block insts
  219. std::unordered_map<uint32_t, Instruction*> preCallSB;
  220. // Post-call same-block op ids
  221. std::unordered_map<uint32_t, uint32_t> postCallSB;
  222. // Invalidate the def-use chains. They are not kept up to date while
  223. // inlining. However, certain calls try to keep them up-to-date if they are
  224. // valid. These operations can fail.
  225. context()->InvalidateAnalyses(IRContext::kAnalysisDefUse);
  226. Function* calleeFn = id2function_[call_inst_itr->GetSingleWordOperand(
  227. kSpvFunctionCallFunctionId)];
  228. // Check for multiple returns in the callee.
  229. auto fi = early_return_funcs_.find(calleeFn->result_id());
  230. const bool earlyReturn = fi != early_return_funcs_.end();
  231. // Map parameters to actual arguments.
  232. MapParams(calleeFn, call_inst_itr, &callee2caller);
  233. // Define caller local variables for all callee variables and create map to
  234. // them.
  235. if (!CloneAndMapLocals(calleeFn, new_vars, &callee2caller)) {
  236. return false;
  237. }
  238. // Create return var if needed.
  239. const uint32_t calleeTypeId = calleeFn->type_id();
  240. uint32_t returnVarId = 0;
  241. analysis::Type* calleeType = context()->get_type_mgr()->GetType(calleeTypeId);
  242. if (calleeType->AsVoid() == nullptr) {
  243. returnVarId = CreateReturnVar(calleeFn, new_vars);
  244. if (returnVarId == 0) {
  245. return false;
  246. }
  247. }
  248. // Create set of callee result ids. Used to detect forward references
  249. std::unordered_set<uint32_t> callee_result_ids;
  250. calleeFn->ForEachInst([&callee_result_ids](const Instruction* cpi) {
  251. const uint32_t rid = cpi->result_id();
  252. if (rid != 0) callee_result_ids.insert(rid);
  253. });
  254. // If the caller is a loop header and the callee has multiple blocks, then the
  255. // normal inlining logic will place the OpLoopMerge in the last of several
  256. // blocks in the loop. Instead, it should be placed at the end of the first
  257. // block. We'll wait to move the OpLoopMerge until the end of the regular
  258. // inlining logic, and only if necessary.
  259. bool caller_is_loop_header = false;
  260. if (call_block_itr->GetLoopMergeInst()) {
  261. caller_is_loop_header = true;
  262. }
  263. bool callee_begins_with_structured_header =
  264. (*(calleeFn->begin())).GetMergeInst() != nullptr;
  265. // Clone and map callee code. Copy caller block code to beginning of
  266. // first block and end of last block.
  267. bool prevInstWasReturn = false;
  268. uint32_t singleTripLoopHeaderId = 0;
  269. uint32_t singleTripLoopContinueId = 0;
  270. uint32_t returnLabelId = 0;
  271. bool multiBlocks = false;
  272. // new_blk_ptr is a new basic block in the caller. New instructions are
  273. // written to it. It is created when we encounter the OpLabel
  274. // of the first callee block. It is appended to new_blocks only when
  275. // it is complete.
  276. std::unique_ptr<BasicBlock> new_blk_ptr;
  277. bool successful = calleeFn->WhileEachInst(
  278. [&new_blocks, &callee2caller, &call_block_itr, &call_inst_itr,
  279. &new_blk_ptr, &prevInstWasReturn, &returnLabelId, &returnVarId,
  280. caller_is_loop_header, callee_begins_with_structured_header,
  281. &calleeTypeId, &multiBlocks, &postCallSB, &preCallSB, earlyReturn,
  282. &singleTripLoopHeaderId, &singleTripLoopContinueId, &callee_result_ids,
  283. this](const Instruction* cpi) {
  284. switch (cpi->opcode()) {
  285. case SpvOpFunction:
  286. case SpvOpFunctionParameter:
  287. // Already processed
  288. break;
  289. case SpvOpVariable:
  290. if (cpi->NumInOperands() == 2) {
  291. assert(callee2caller.count(cpi->result_id()) &&
  292. "Expected the variable to have already been mapped.");
  293. uint32_t new_var_id = callee2caller.at(cpi->result_id());
  294. // The initializer must be a constant or global value. No mapped
  295. // should be used.
  296. uint32_t val_id = cpi->GetSingleWordInOperand(1);
  297. AddStore(new_var_id, val_id, &new_blk_ptr);
  298. }
  299. break;
  300. case SpvOpUnreachable:
  301. case SpvOpKill: {
  302. // Generate a return label so that we split the block with the
  303. // function call. Copy the terminator into the new block.
  304. if (returnLabelId == 0) {
  305. returnLabelId = context()->TakeNextId();
  306. if (returnLabelId == 0) {
  307. return false;
  308. }
  309. }
  310. std::unique_ptr<Instruction> terminator(
  311. new Instruction(context(), cpi->opcode(), 0, 0, {}));
  312. new_blk_ptr->AddInstruction(std::move(terminator));
  313. break;
  314. }
  315. case SpvOpLabel: {
  316. // If previous instruction was early return, insert branch
  317. // instruction to return block.
  318. if (prevInstWasReturn) {
  319. if (returnLabelId == 0) {
  320. returnLabelId = context()->TakeNextId();
  321. if (returnLabelId == 0) {
  322. return false;
  323. }
  324. }
  325. AddBranch(returnLabelId, &new_blk_ptr);
  326. prevInstWasReturn = false;
  327. }
  328. // Finish current block (if it exists) and get label for next block.
  329. uint32_t labelId;
  330. bool firstBlock = false;
  331. if (new_blk_ptr != nullptr) {
  332. new_blocks->push_back(std::move(new_blk_ptr));
  333. // If result id is already mapped, use it, otherwise get a new
  334. // one.
  335. const uint32_t rid = cpi->result_id();
  336. const auto mapItr = callee2caller.find(rid);
  337. labelId = (mapItr != callee2caller.end())
  338. ? mapItr->second
  339. : context()->TakeNextId();
  340. if (labelId == 0) {
  341. return false;
  342. }
  343. } else {
  344. // First block needs to use label of original block
  345. // but map callee label in case of phi reference.
  346. labelId = call_block_itr->id();
  347. callee2caller[cpi->result_id()] = labelId;
  348. firstBlock = true;
  349. }
  350. // Create first/next block.
  351. new_blk_ptr = MakeUnique<BasicBlock>(NewLabel(labelId));
  352. if (firstBlock) {
  353. // Copy contents of original caller block up to call instruction.
  354. for (auto cii = call_block_itr->begin(); cii != call_inst_itr;
  355. cii = call_block_itr->begin()) {
  356. Instruction* inst = &*cii;
  357. inst->RemoveFromList();
  358. std::unique_ptr<Instruction> cp_inst(inst);
  359. // Remember same-block ops for possible regeneration.
  360. if (IsSameBlockOp(&*cp_inst)) {
  361. auto* sb_inst_ptr = cp_inst.get();
  362. preCallSB[cp_inst->result_id()] = sb_inst_ptr;
  363. }
  364. new_blk_ptr->AddInstruction(std::move(cp_inst));
  365. }
  366. if (caller_is_loop_header &&
  367. callee_begins_with_structured_header) {
  368. // We can't place both the caller's merge instruction and
  369. // another merge instruction in the same block. So split the
  370. // calling block. Insert an unconditional branch to a new guard
  371. // block. Later, once we know the ID of the last block, we
  372. // will move the caller's OpLoopMerge from the last generated
  373. // block into the first block. We also wait to avoid
  374. // invalidating various iterators.
  375. const auto guard_block_id = context()->TakeNextId();
  376. if (guard_block_id == 0) {
  377. return false;
  378. }
  379. AddBranch(guard_block_id, &new_blk_ptr);
  380. new_blocks->push_back(std::move(new_blk_ptr));
  381. // Start the next block.
  382. new_blk_ptr = MakeUnique<BasicBlock>(NewLabel(guard_block_id));
  383. // Reset the mapping of the callee's entry block to point to
  384. // the guard block. Do this so we can fix up phis later on to
  385. // satisfy dominance.
  386. callee2caller[cpi->result_id()] = guard_block_id;
  387. }
  388. // If callee has early return, insert a header block for
  389. // single-trip loop that will encompass callee code. Start
  390. // postheader block.
  391. //
  392. // Note: Consider the following combination:
  393. // - the caller is a single block loop
  394. // - the callee does not begin with a structure header
  395. // - the callee has multiple returns.
  396. // We still need to split the caller block and insert a guard
  397. // block. But we only need to do it once. We haven't done it yet,
  398. // but the single-trip loop header will serve the same purpose.
  399. if (earlyReturn) {
  400. singleTripLoopHeaderId = context()->TakeNextId();
  401. if (singleTripLoopHeaderId == 0) {
  402. return false;
  403. }
  404. AddBranch(singleTripLoopHeaderId, &new_blk_ptr);
  405. new_blocks->push_back(std::move(new_blk_ptr));
  406. new_blk_ptr =
  407. MakeUnique<BasicBlock>(NewLabel(singleTripLoopHeaderId));
  408. returnLabelId = context()->TakeNextId();
  409. singleTripLoopContinueId = context()->TakeNextId();
  410. if (returnLabelId == 0 || singleTripLoopContinueId == 0) {
  411. return false;
  412. }
  413. AddLoopMerge(returnLabelId, singleTripLoopContinueId,
  414. &new_blk_ptr);
  415. uint32_t postHeaderId = context()->TakeNextId();
  416. if (postHeaderId == 0) {
  417. return false;
  418. }
  419. AddBranch(postHeaderId, &new_blk_ptr);
  420. new_blocks->push_back(std::move(new_blk_ptr));
  421. new_blk_ptr = MakeUnique<BasicBlock>(NewLabel(postHeaderId));
  422. multiBlocks = true;
  423. // Reset the mapping of the callee's entry block to point to
  424. // the post-header block. Do this so we can fix up phis later
  425. // on to satisfy dominance.
  426. callee2caller[cpi->result_id()] = postHeaderId;
  427. }
  428. } else {
  429. multiBlocks = true;
  430. }
  431. } break;
  432. case SpvOpReturnValue: {
  433. // Store return value to return variable.
  434. assert(returnVarId != 0);
  435. uint32_t valId = cpi->GetInOperand(kSpvReturnValueId).words[0];
  436. const auto mapItr = callee2caller.find(valId);
  437. if (mapItr != callee2caller.end()) {
  438. valId = mapItr->second;
  439. }
  440. AddStore(returnVarId, valId, &new_blk_ptr);
  441. // Remember we saw a return; if followed by a label, will need to
  442. // insert branch.
  443. prevInstWasReturn = true;
  444. } break;
  445. case SpvOpReturn: {
  446. // Remember we saw a return; if followed by a label, will need to
  447. // insert branch.
  448. prevInstWasReturn = true;
  449. } break;
  450. case SpvOpFunctionEnd: {
  451. // If there was an early return, we generated a return label id
  452. // for it. Now we have to generate the return block with that Id.
  453. if (returnLabelId != 0) {
  454. // If previous instruction was return, insert branch instruction
  455. // to return block.
  456. if (prevInstWasReturn) AddBranch(returnLabelId, &new_blk_ptr);
  457. if (earlyReturn) {
  458. // If we generated a loop header for the single-trip loop
  459. // to accommodate early returns, insert the continue
  460. // target block now, with a false branch back to the loop
  461. // header.
  462. new_blocks->push_back(std::move(new_blk_ptr));
  463. new_blk_ptr =
  464. MakeUnique<BasicBlock>(NewLabel(singleTripLoopContinueId));
  465. uint32_t false_id = GetFalseId();
  466. if (false_id == 0) {
  467. return false;
  468. }
  469. AddBranchCond(false_id, singleTripLoopHeaderId, returnLabelId,
  470. &new_blk_ptr);
  471. }
  472. // Generate the return block.
  473. new_blocks->push_back(std::move(new_blk_ptr));
  474. new_blk_ptr = MakeUnique<BasicBlock>(NewLabel(returnLabelId));
  475. multiBlocks = true;
  476. }
  477. // Load return value into result id of call, if it exists.
  478. if (returnVarId != 0) {
  479. const uint32_t resId = call_inst_itr->result_id();
  480. assert(resId != 0);
  481. AddLoad(calleeTypeId, resId, returnVarId, &new_blk_ptr);
  482. }
  483. // Copy remaining instructions from caller block.
  484. for (Instruction* inst = call_inst_itr->NextNode(); inst;
  485. inst = call_inst_itr->NextNode()) {
  486. inst->RemoveFromList();
  487. std::unique_ptr<Instruction> cp_inst(inst);
  488. // If multiple blocks generated, regenerate any same-block
  489. // instruction that has not been seen in this last block.
  490. if (multiBlocks) {
  491. if (!CloneSameBlockOps(&cp_inst, &postCallSB, &preCallSB,
  492. &new_blk_ptr)) {
  493. return false;
  494. }
  495. // Remember same-block ops in this block.
  496. if (IsSameBlockOp(&*cp_inst)) {
  497. const uint32_t rid = cp_inst->result_id();
  498. postCallSB[rid] = rid;
  499. }
  500. }
  501. new_blk_ptr->AddInstruction(std::move(cp_inst));
  502. }
  503. // Finalize inline code.
  504. new_blocks->push_back(std::move(new_blk_ptr));
  505. } break;
  506. default: {
  507. // Copy callee instruction and remap all input Ids.
  508. std::unique_ptr<Instruction> cp_inst(cpi->Clone(context()));
  509. bool succeeded = cp_inst->WhileEachInId(
  510. [&callee2caller, &callee_result_ids, this](uint32_t* iid) {
  511. const auto mapItr = callee2caller.find(*iid);
  512. if (mapItr != callee2caller.end()) {
  513. *iid = mapItr->second;
  514. } else if (callee_result_ids.find(*iid) !=
  515. callee_result_ids.end()) {
  516. // Forward reference. Allocate a new id, map it,
  517. // use it and check for it when remapping result ids
  518. const uint32_t nid = context()->TakeNextId();
  519. if (nid == 0) {
  520. return false;
  521. }
  522. callee2caller[*iid] = nid;
  523. *iid = nid;
  524. }
  525. return true;
  526. });
  527. if (!succeeded) {
  528. return false;
  529. }
  530. // If result id is non-zero, remap it. If already mapped, use mapped
  531. // value, else use next id.
  532. const uint32_t rid = cp_inst->result_id();
  533. if (rid != 0) {
  534. const auto mapItr = callee2caller.find(rid);
  535. uint32_t nid;
  536. if (mapItr != callee2caller.end()) {
  537. nid = mapItr->second;
  538. } else {
  539. nid = context()->TakeNextId();
  540. if (nid == 0) {
  541. return false;
  542. }
  543. callee2caller[rid] = nid;
  544. }
  545. cp_inst->SetResultId(nid);
  546. get_decoration_mgr()->CloneDecorations(rid, nid);
  547. }
  548. new_blk_ptr->AddInstruction(std::move(cp_inst));
  549. } break;
  550. }
  551. return true;
  552. });
  553. if (!successful) {
  554. return false;
  555. }
  556. if (caller_is_loop_header && (new_blocks->size() > 1)) {
  557. // Move the OpLoopMerge from the last block back to the first, where
  558. // it belongs.
  559. auto& first = new_blocks->front();
  560. auto& last = new_blocks->back();
  561. assert(first != last);
  562. // Insert a modified copy of the loop merge into the first block.
  563. auto loop_merge_itr = last->tail();
  564. --loop_merge_itr;
  565. assert(loop_merge_itr->opcode() == SpvOpLoopMerge);
  566. std::unique_ptr<Instruction> cp_inst(loop_merge_itr->Clone(context()));
  567. first->tail().InsertBefore(std::move(cp_inst));
  568. // Remove the loop merge from the last block.
  569. loop_merge_itr->RemoveFromList();
  570. delete &*loop_merge_itr;
  571. }
  572. // Update block map given replacement blocks.
  573. for (auto& blk : *new_blocks) {
  574. id2block_[blk->id()] = &*blk;
  575. }
  576. return true;
  577. }
  578. bool InlinePass::IsInlinableFunctionCall(const Instruction* inst) {
  579. if (inst->opcode() != SpvOp::SpvOpFunctionCall) return false;
  580. const uint32_t calleeFnId =
  581. inst->GetSingleWordOperand(kSpvFunctionCallFunctionId);
  582. const auto ci = inlinable_.find(calleeFnId);
  583. return ci != inlinable_.cend();
  584. }
  585. void InlinePass::UpdateSucceedingPhis(
  586. std::vector<std::unique_ptr<BasicBlock>>& new_blocks) {
  587. const auto firstBlk = new_blocks.begin();
  588. const auto lastBlk = new_blocks.end() - 1;
  589. const uint32_t firstId = (*firstBlk)->id();
  590. const uint32_t lastId = (*lastBlk)->id();
  591. const BasicBlock& const_last_block = *lastBlk->get();
  592. const_last_block.ForEachSuccessorLabel(
  593. [&firstId, &lastId, this](const uint32_t succ) {
  594. BasicBlock* sbp = this->id2block_[succ];
  595. sbp->ForEachPhiInst([&firstId, &lastId](Instruction* phi) {
  596. phi->ForEachInId([&firstId, &lastId](uint32_t* id) {
  597. if (*id == firstId) *id = lastId;
  598. });
  599. });
  600. });
  601. }
  602. bool InlinePass::HasNoReturnInStructuredConstruct(Function* func) {
  603. // If control not structured, do not do loop/return analysis
  604. // TODO: Analyze returns in non-structured control flow
  605. if (!context()->get_feature_mgr()->HasCapability(SpvCapabilityShader))
  606. return false;
  607. const auto structured_analysis = context()->GetStructuredCFGAnalysis();
  608. // Search for returns in structured construct.
  609. bool return_in_construct = false;
  610. for (auto& blk : *func) {
  611. auto terminal_ii = blk.cend();
  612. --terminal_ii;
  613. if (spvOpcodeIsReturn(terminal_ii->opcode()) &&
  614. structured_analysis->ContainingConstruct(blk.id()) != 0) {
  615. return_in_construct = true;
  616. break;
  617. }
  618. }
  619. return !return_in_construct;
  620. }
  621. bool InlinePass::HasNoReturnInLoop(Function* func) {
  622. // If control not structured, do not do loop/return analysis
  623. // TODO: Analyze returns in non-structured control flow
  624. if (!context()->get_feature_mgr()->HasCapability(SpvCapabilityShader))
  625. return false;
  626. const auto structured_analysis = context()->GetStructuredCFGAnalysis();
  627. // Search for returns in structured construct.
  628. bool return_in_loop = false;
  629. for (auto& blk : *func) {
  630. auto terminal_ii = blk.cend();
  631. --terminal_ii;
  632. if (spvOpcodeIsReturn(terminal_ii->opcode()) &&
  633. structured_analysis->ContainingLoop(blk.id()) != 0) {
  634. return_in_loop = true;
  635. break;
  636. }
  637. }
  638. return !return_in_loop;
  639. }
  640. void InlinePass::AnalyzeReturns(Function* func) {
  641. if (HasNoReturnInLoop(func)) {
  642. no_return_in_loop_.insert(func->result_id());
  643. if (!HasNoReturnInStructuredConstruct(func))
  644. early_return_funcs_.insert(func->result_id());
  645. }
  646. }
  647. bool InlinePass::IsInlinableFunction(Function* func) {
  648. // We can only inline a function if it has blocks.
  649. if (func->cbegin() == func->cend()) return false;
  650. // Do not inline functions with returns in loops. Currently early return
  651. // functions are inlined by wrapping them in a one trip loop and implementing
  652. // the returns as a branch to the loop's merge block. However, this can only
  653. // done validly if the return was not in a loop in the original function.
  654. // Also remember functions with multiple (early) returns.
  655. AnalyzeReturns(func);
  656. if (no_return_in_loop_.find(func->result_id()) == no_return_in_loop_.cend()) {
  657. return false;
  658. }
  659. if (func->IsRecursive()) {
  660. return false;
  661. }
  662. // Do not inline functions with an OpKill if they are called from a continue
  663. // construct. If it is inlined into a continue construct it will generate
  664. // invalid code.
  665. bool func_is_called_from_continue =
  666. funcs_called_from_continue_.count(func->result_id()) != 0;
  667. if (func_is_called_from_continue && ContainsKill(func)) {
  668. return false;
  669. }
  670. return true;
  671. }
  672. bool InlinePass::ContainsKill(Function* func) const {
  673. return !func->WhileEachInst(
  674. [](Instruction* inst) { return inst->opcode() != SpvOpKill; });
  675. }
  676. void InlinePass::InitializeInline() {
  677. false_id_ = 0;
  678. // clear collections
  679. id2function_.clear();
  680. id2block_.clear();
  681. inlinable_.clear();
  682. no_return_in_loop_.clear();
  683. early_return_funcs_.clear();
  684. funcs_called_from_continue_ =
  685. context()->GetStructuredCFGAnalysis()->FindFuncsCalledFromContinue();
  686. for (auto& fn : *get_module()) {
  687. // Initialize function and block maps.
  688. id2function_[fn.result_id()] = &fn;
  689. for (auto& blk : fn) {
  690. id2block_[blk.id()] = &blk;
  691. }
  692. // Compute inlinability
  693. if (IsInlinableFunction(&fn)) inlinable_.insert(fn.result_id());
  694. }
  695. }
  696. InlinePass::InlinePass() {}
  697. } // namespace opt
  698. } // namespace spvtools