LICM.cpp 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029
  1. //===-- LICM.cpp - Loop Invariant Code Motion Pass ------------------------===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // This pass performs loop invariant code motion, attempting to remove as much
  11. // code from the body of a loop as possible. It does this by either hoisting
  12. // code into the preheader block, or by sinking code to the exit blocks if it is
  13. // safe. This pass also promotes must-aliased memory locations in the loop to
  14. // live in registers, thus hoisting and sinking "invariant" loads and stores.
  15. //
  16. // This pass uses alias analysis for two purposes:
  17. //
  18. // 1. Moving loop invariant loads and calls out of loops. If we can determine
  19. // that a load or call inside of a loop never aliases anything stored to,
  20. // we can hoist it or sink it like any other instruction.
  21. // 2. Scalar Promotion of Memory - If there is a store instruction inside of
  22. // the loop, we try to move the store to happen AFTER the loop instead of
  23. // inside of the loop. This can only happen if a few conditions are true:
  24. // A. The pointer stored through is loop invariant
  25. // B. There are no stores or loads in the loop which _may_ alias the
  26. // pointer. There are no calls in the loop which mod/ref the pointer.
  27. // If these conditions are true, we can promote the loads and stores in the
  28. // loop of the pointer to use a temporary alloca'd variable. We then use
  29. // the SSAUpdater to construct the appropriate SSA form for the value.
  30. //
  31. //===----------------------------------------------------------------------===//
  32. #include "llvm/Transforms/Scalar.h"
  33. #include "llvm/ADT/Statistic.h"
  34. #include "llvm/Analysis/AliasAnalysis.h"
  35. #include "llvm/Analysis/AliasSetTracker.h"
  36. #include "llvm/Analysis/ConstantFolding.h"
  37. #include "llvm/Analysis/LoopInfo.h"
  38. #include "llvm/Analysis/LoopPass.h"
  39. #include "llvm/Analysis/ScalarEvolution.h"
  40. #include "llvm/Analysis/TargetLibraryInfo.h"
  41. #include "llvm/Analysis/ValueTracking.h"
  42. #include "llvm/IR/CFG.h"
  43. #include "llvm/IR/Constants.h"
  44. #include "llvm/IR/DataLayout.h"
  45. #include "llvm/IR/DerivedTypes.h"
  46. #include "llvm/IR/Dominators.h"
  47. #include "llvm/IR/Instructions.h"
  48. #include "llvm/IR/IntrinsicInst.h"
  49. #include "llvm/IR/LLVMContext.h"
  50. #include "llvm/IR/Metadata.h"
  51. #include "llvm/IR/PredIteratorCache.h"
  52. #include "llvm/Support/CommandLine.h"
  53. #include "llvm/Support/Debug.h"
  54. #include "llvm/Support/raw_ostream.h"
  55. #include "llvm/Transforms/Utils/Local.h"
  56. #include "llvm/Transforms/Utils/LoopUtils.h"
  57. #include "llvm/Transforms/Utils/SSAUpdater.h"
  58. #include <algorithm>
  59. using namespace llvm;
  60. #define DEBUG_TYPE "licm"
  61. STATISTIC(NumSunk , "Number of instructions sunk out of loop");
  62. STATISTIC(NumHoisted , "Number of instructions hoisted out of loop");
  63. STATISTIC(NumMovedLoads, "Number of load insts hoisted or sunk");
  64. STATISTIC(NumMovedCalls, "Number of call insts hoisted or sunk");
  65. STATISTIC(NumPromoted , "Number of memory locations promoted to registers");
  66. #if 0 // HLSL Change Starts - option pending
  67. static cl::opt<bool>
  68. DisablePromotion("disable-licm-promotion", cl::Hidden,
  69. cl::desc("Disable memory promotion in LICM pass"));
  70. #else
  71. static bool DisablePromotion = false;
  72. #endif // HLSL Change Ends
  73. static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI);
  74. static bool isNotUsedInLoop(const Instruction &I, const Loop *CurLoop);
  75. static bool hoist(Instruction &I, BasicBlock *Preheader);
  76. static bool sink(Instruction &I, const LoopInfo *LI, const DominatorTree *DT,
  77. const Loop *CurLoop, AliasSetTracker *CurAST );
  78. static bool isGuaranteedToExecute(const Instruction &Inst,
  79. const DominatorTree *DT,
  80. const Loop *CurLoop,
  81. const LICMSafetyInfo *SafetyInfo);
  82. static bool isSafeToExecuteUnconditionally(const Instruction &Inst,
  83. const DominatorTree *DT,
  84. const TargetLibraryInfo *TLI,
  85. const Loop *CurLoop,
  86. const LICMSafetyInfo *SafetyInfo,
  87. const Instruction *CtxI = nullptr);
  88. static bool pointerInvalidatedByLoop(Value *V, uint64_t Size,
  89. const AAMDNodes &AAInfo,
  90. AliasSetTracker *CurAST);
  91. static Instruction *CloneInstructionInExitBlock(const Instruction &I,
  92. BasicBlock &ExitBlock,
  93. PHINode &PN,
  94. const LoopInfo *LI);
  95. static bool canSinkOrHoistInst(Instruction &I, AliasAnalysis *AA,
  96. DominatorTree *DT, TargetLibraryInfo *TLI,
  97. Loop *CurLoop, AliasSetTracker *CurAST,
  98. LICMSafetyInfo *SafetyInfo);
  99. namespace {
  100. struct LICM : public LoopPass {
  101. static char ID; // Pass identification, replacement for typeid
  102. LICM() : LoopPass(ID) {
  103. initializeLICMPass(*PassRegistry::getPassRegistry());
  104. }
  105. bool runOnLoop(Loop *L, LPPassManager &LPM) override;
  106. /// This transformation requires natural loop information & requires that
  107. /// loop preheaders be inserted into the CFG...
  108. ///
  109. void getAnalysisUsage(AnalysisUsage &AU) const override {
  110. AU.setPreservesCFG();
  111. AU.addRequired<DominatorTreeWrapperPass>();
  112. AU.addRequired<LoopInfoWrapperPass>();
  113. AU.addRequiredID(LoopSimplifyID);
  114. AU.addPreservedID(LoopSimplifyID);
  115. AU.addRequiredID(LCSSAID);
  116. AU.addPreservedID(LCSSAID);
  117. AU.addRequired<AliasAnalysis>();
  118. AU.addPreserved<AliasAnalysis>();
  119. AU.addPreserved<ScalarEvolution>();
  120. AU.addRequired<TargetLibraryInfoWrapperPass>();
  121. }
  122. using llvm::Pass::doFinalization;
  123. bool doFinalization() override {
  124. assert(LoopToAliasSetMap.empty() && "Didn't free loop alias sets");
  125. return false;
  126. }
  127. private:
  128. AliasAnalysis *AA; // Current AliasAnalysis information
  129. LoopInfo *LI; // Current LoopInfo
  130. DominatorTree *DT; // Dominator Tree for the current Loop.
  131. TargetLibraryInfo *TLI; // TargetLibraryInfo for constant folding.
  132. // State that is updated as we process loops.
  133. bool Changed; // Set to true when we change anything.
  134. BasicBlock *Preheader; // The preheader block of the current loop...
  135. Loop *CurLoop; // The current loop we are working on...
  136. AliasSetTracker *CurAST; // AliasSet information for the current loop...
  137. DenseMap<Loop*, AliasSetTracker*> LoopToAliasSetMap;
  138. /// cloneBasicBlockAnalysis - Simple Analysis hook. Clone alias set info.
  139. void cloneBasicBlockAnalysis(BasicBlock *From, BasicBlock *To,
  140. Loop *L) override;
  141. /// deleteAnalysisValue - Simple Analysis hook. Delete value V from alias
  142. /// set.
  143. void deleteAnalysisValue(Value *V, Loop *L) override;
  144. /// Simple Analysis hook. Delete loop L from alias set map.
  145. void deleteAnalysisLoop(Loop *L) override;
  146. };
  147. }
  148. char LICM::ID = 0;
  149. INITIALIZE_PASS_BEGIN(LICM, "licm", "Loop Invariant Code Motion", false, false)
  150. INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
  151. INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
  152. INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
  153. INITIALIZE_PASS_DEPENDENCY(LCSSA)
  154. INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
  155. INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
  156. INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
  157. INITIALIZE_PASS_END(LICM, "licm", "Loop Invariant Code Motion", false, false)
  158. Pass *llvm::createLICMPass() { return new LICM(); }
  159. /// Hoist expressions out of the specified loop. Note, alias info for inner
  160. /// loop is not preserved so it is not a good idea to run LICM multiple
  161. /// times on one loop.
  162. ///
  163. bool LICM::runOnLoop(Loop *L, LPPassManager &LPM) {
  164. if (skipOptnoneFunction(L))
  165. return false;
  166. Changed = false;
  167. // Get our Loop and Alias Analysis information...
  168. LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
  169. AA = &getAnalysis<AliasAnalysis>();
  170. DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
  171. TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
  172. assert(L->isLCSSAForm(*DT) && "Loop is not in LCSSA form.");
  173. CurAST = new AliasSetTracker(*AA);
  174. // Collect Alias info from subloops.
  175. for (Loop::iterator LoopItr = L->begin(), LoopItrE = L->end();
  176. LoopItr != LoopItrE; ++LoopItr) {
  177. Loop *InnerL = *LoopItr;
  178. AliasSetTracker *InnerAST = LoopToAliasSetMap[InnerL];
  179. assert(InnerAST && "Where is my AST?");
  180. // What if InnerLoop was modified by other passes ?
  181. CurAST->add(*InnerAST);
  182. // Once we've incorporated the inner loop's AST into ours, we don't need the
  183. // subloop's anymore.
  184. delete InnerAST;
  185. LoopToAliasSetMap.erase(InnerL);
  186. }
  187. CurLoop = L;
  188. // Get the preheader block to move instructions into...
  189. Preheader = L->getLoopPreheader();
  190. // Loop over the body of this loop, looking for calls, invokes, and stores.
  191. // Because subloops have already been incorporated into AST, we skip blocks in
  192. // subloops.
  193. //
  194. for (Loop::block_iterator I = L->block_begin(), E = L->block_end();
  195. I != E; ++I) {
  196. BasicBlock *BB = *I;
  197. if (LI->getLoopFor(BB) == L) // Ignore blocks in subloops.
  198. CurAST->add(*BB); // Incorporate the specified basic block
  199. }
  200. // Compute loop safety information.
  201. LICMSafetyInfo SafetyInfo;
  202. computeLICMSafetyInfo(&SafetyInfo, CurLoop);
  203. // We want to visit all of the instructions in this loop... that are not parts
  204. // of our subloops (they have already had their invariants hoisted out of
  205. // their loop, into this loop, so there is no need to process the BODIES of
  206. // the subloops).
  207. //
  208. // Traverse the body of the loop in depth first order on the dominator tree so
  209. // that we are guaranteed to see definitions before we see uses. This allows
  210. // us to sink instructions in one pass, without iteration. After sinking
  211. // instructions, we perform another pass to hoist them out of the loop.
  212. //
  213. if (L->hasDedicatedExits())
  214. Changed |= sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, TLI, CurLoop,
  215. CurAST, &SafetyInfo);
  216. if (Preheader)
  217. Changed |= hoistRegion(DT->getNode(L->getHeader()), AA, LI, DT, TLI,
  218. CurLoop, CurAST, &SafetyInfo);
  219. // Now that all loop invariants have been removed from the loop, promote any
  220. // memory references to scalars that we can.
  221. if (!DisablePromotion && (Preheader || L->hasDedicatedExits())) {
  222. SmallVector<BasicBlock *, 8> ExitBlocks;
  223. SmallVector<Instruction *, 8> InsertPts;
  224. PredIteratorCache PIC;
  225. // Loop over all of the alias sets in the tracker object.
  226. for (AliasSetTracker::iterator I = CurAST->begin(), E = CurAST->end();
  227. I != E; ++I)
  228. Changed |= promoteLoopAccessesToScalars(*I, ExitBlocks, InsertPts,
  229. PIC, LI, DT, CurLoop,
  230. CurAST, &SafetyInfo);
  231. // Once we have promoted values across the loop body we have to recursively
  232. // reform LCSSA as any nested loop may now have values defined within the
  233. // loop used in the outer loop.
  234. // FIXME: This is really heavy handed. It would be a bit better to use an
  235. // SSAUpdater strategy during promotion that was LCSSA aware and reformed
  236. // it as it went.
  237. if (Changed)
  238. formLCSSARecursively(*L, *DT, LI,
  239. getAnalysisIfAvailable<ScalarEvolution>());
  240. }
  241. // Check that neither this loop nor its parent have had LCSSA broken. LICM is
  242. // specifically moving instructions across the loop boundary and so it is
  243. // especially in need of sanity checking here.
  244. assert(L->isLCSSAForm(*DT) && "Loop not left in LCSSA form after LICM!");
  245. assert((!L->getParentLoop() || L->getParentLoop()->isLCSSAForm(*DT)) &&
  246. "Parent loop not left in LCSSA form after LICM!");
  247. // Clear out loops state information for the next iteration
  248. CurLoop = nullptr;
  249. Preheader = nullptr;
  250. // If this loop is nested inside of another one, save the alias information
  251. // for when we process the outer loop.
  252. if (L->getParentLoop())
  253. LoopToAliasSetMap[L] = CurAST;
  254. else
  255. delete CurAST;
  256. return Changed;
  257. }
  258. /// Walk the specified region of the CFG (defined by all blocks dominated by
  259. /// the specified block, and that are in the current loop) in reverse depth
  260. /// first order w.r.t the DominatorTree. This allows us to visit uses before
  261. /// definitions, allowing us to sink a loop body in one pass without iteration.
  262. ///
  263. bool llvm::sinkRegion(DomTreeNode *N, AliasAnalysis *AA, LoopInfo *LI,
  264. DominatorTree *DT, TargetLibraryInfo *TLI, Loop *CurLoop,
  265. AliasSetTracker *CurAST, LICMSafetyInfo *SafetyInfo) {
  266. // Verify inputs.
  267. assert(N != nullptr && AA != nullptr && LI != nullptr &&
  268. DT != nullptr && CurLoop != nullptr && CurAST != nullptr &&
  269. SafetyInfo != nullptr && "Unexpected input to sinkRegion");
  270. // Set changed as false.
  271. bool Changed = false;
  272. // Get basic block
  273. BasicBlock *BB = N->getBlock();
  274. // If this subregion is not in the top level loop at all, exit.
  275. if (!CurLoop->contains(BB)) return Changed;
  276. // We are processing blocks in reverse dfo, so process children first.
  277. const std::vector<DomTreeNode*> &Children = N->getChildren();
  278. for (unsigned i = 0, e = Children.size(); i != e; ++i)
  279. Changed |=
  280. sinkRegion(Children[i], AA, LI, DT, TLI, CurLoop, CurAST, SafetyInfo);
  281. // Only need to process the contents of this block if it is not part of a
  282. // subloop (which would already have been processed).
  283. if (inSubLoop(BB,CurLoop,LI)) return Changed;
  284. for (BasicBlock::iterator II = BB->end(); II != BB->begin(); ) {
  285. Instruction &I = *--II;
  286. // If the instruction is dead, we would try to sink it because it isn't used
  287. // in the loop, instead, just delete it.
  288. if (isInstructionTriviallyDead(&I, TLI)) {
  289. DEBUG(dbgs() << "LICM deleting dead inst: " << I << '\n');
  290. ++II;
  291. CurAST->deleteValue(&I);
  292. I.eraseFromParent();
  293. Changed = true;
  294. continue;
  295. }
  296. // Check to see if we can sink this instruction to the exit blocks
  297. // of the loop. We can do this if the all users of the instruction are
  298. // outside of the loop. In this case, it doesn't even matter if the
  299. // operands of the instruction are loop invariant.
  300. //
  301. if (isNotUsedInLoop(I, CurLoop) &&
  302. canSinkOrHoistInst(I, AA, DT, TLI, CurLoop, CurAST, SafetyInfo)) {
  303. ++II;
  304. Changed |= sink(I, LI, DT, CurLoop, CurAST);
  305. }
  306. }
  307. return Changed;
  308. }
  309. /// Walk the specified region of the CFG (defined by all blocks dominated by
  310. /// the specified block, and that are in the current loop) in depth first
  311. /// order w.r.t the DominatorTree. This allows us to visit definitions before
  312. /// uses, allowing us to hoist a loop body in one pass without iteration.
  313. ///
  314. bool llvm::hoistRegion(DomTreeNode *N, AliasAnalysis *AA, LoopInfo *LI,
  315. DominatorTree *DT, TargetLibraryInfo *TLI, Loop *CurLoop,
  316. AliasSetTracker *CurAST, LICMSafetyInfo *SafetyInfo) {
  317. // Verify inputs.
  318. assert(N != nullptr && AA != nullptr && LI != nullptr &&
  319. DT != nullptr && CurLoop != nullptr && CurAST != nullptr &&
  320. SafetyInfo != nullptr && "Unexpected input to hoistRegion");
  321. // Set changed as false.
  322. bool Changed = false;
  323. // Get basic block
  324. BasicBlock *BB = N->getBlock();
  325. // If this subregion is not in the top level loop at all, exit.
  326. if (!CurLoop->contains(BB)) return Changed;
  327. // Only need to process the contents of this block if it is not part of a
  328. // subloop (which would already have been processed).
  329. if (!inSubLoop(BB, CurLoop, LI))
  330. for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E; ) {
  331. Instruction &I = *II++;
  332. // Try constant folding this instruction. If all the operands are
  333. // constants, it is technically hoistable, but it would be better to just
  334. // fold it.
  335. if (Constant *C = ConstantFoldInstruction(
  336. &I, I.getModule()->getDataLayout(), TLI)) {
  337. DEBUG(dbgs() << "LICM folding inst: " << I << " --> " << *C << '\n');
  338. CurAST->copyValue(&I, C);
  339. CurAST->deleteValue(&I);
  340. I.replaceAllUsesWith(C);
  341. I.eraseFromParent();
  342. continue;
  343. }
  344. // Try hoisting the instruction out to the preheader. We can only do this
  345. // if all of the operands of the instruction are loop invariant and if it
  346. // is safe to hoist the instruction.
  347. //
  348. if (CurLoop->hasLoopInvariantOperands(&I) &&
  349. canSinkOrHoistInst(I, AA, DT, TLI, CurLoop, CurAST, SafetyInfo) &&
  350. isSafeToExecuteUnconditionally(I, DT, TLI, CurLoop, SafetyInfo,
  351. CurLoop->getLoopPreheader()->getTerminator()))
  352. Changed |= hoist(I, CurLoop->getLoopPreheader());
  353. }
  354. const std::vector<DomTreeNode*> &Children = N->getChildren();
  355. for (unsigned i = 0, e = Children.size(); i != e; ++i)
  356. Changed |=
  357. hoistRegion(Children[i], AA, LI, DT, TLI, CurLoop, CurAST, SafetyInfo);
  358. return Changed;
  359. }
  360. /// Computes loop safety information, checks loop body & header
  361. /// for the possiblity of may throw exception.
  362. ///
  363. void llvm::computeLICMSafetyInfo(LICMSafetyInfo * SafetyInfo, Loop * CurLoop) {
  364. assert(CurLoop != nullptr && "CurLoop cant be null");
  365. BasicBlock *Header = CurLoop->getHeader();
  366. // Setting default safety values.
  367. SafetyInfo->MayThrow = false;
  368. SafetyInfo->HeaderMayThrow = false;
  369. // Iterate over header and compute dafety info.
  370. for (BasicBlock::iterator I = Header->begin(), E = Header->end();
  371. (I != E) && !SafetyInfo->HeaderMayThrow; ++I)
  372. SafetyInfo->HeaderMayThrow |= I->mayThrow();
  373. SafetyInfo->MayThrow = SafetyInfo->HeaderMayThrow;
  374. // Iterate over loop instructions and compute safety info.
  375. for (Loop::block_iterator BB = CurLoop->block_begin(),
  376. BBE = CurLoop->block_end(); (BB != BBE) && !SafetyInfo->MayThrow ; ++BB)
  377. for (BasicBlock::iterator I = (*BB)->begin(), E = (*BB)->end();
  378. (I != E) && !SafetyInfo->MayThrow; ++I)
  379. SafetyInfo->MayThrow |= I->mayThrow();
  380. }
  381. /// canSinkOrHoistInst - Return true if the hoister and sinker can handle this
  382. /// instruction.
  383. ///
  384. bool canSinkOrHoistInst(Instruction &I, AliasAnalysis *AA, DominatorTree *DT,
  385. TargetLibraryInfo *TLI, Loop *CurLoop,
  386. AliasSetTracker *CurAST, LICMSafetyInfo *SafetyInfo) {
  387. // Loads have extra constraints we have to verify before we can hoist them.
  388. if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
  389. if (!LI->isUnordered())
  390. return false; // Don't hoist volatile/atomic loads!
  391. // Loads from constant memory are always safe to move, even if they end up
  392. // in the same alias set as something that ends up being modified.
  393. if (AA->pointsToConstantMemory(LI->getOperand(0)))
  394. return true;
  395. if (LI->getMetadata(LLVMContext::MD_invariant_load))
  396. return true;
  397. // Don't hoist loads which have may-aliased stores in loop.
  398. uint64_t Size = 0;
  399. if (LI->getType()->isSized())
  400. Size = AA->getTypeStoreSize(LI->getType());
  401. AAMDNodes AAInfo;
  402. LI->getAAMetadata(AAInfo);
  403. return !pointerInvalidatedByLoop(LI->getOperand(0), Size, AAInfo, CurAST);
  404. } else if (CallInst *CI = dyn_cast<CallInst>(&I)) {
  405. // Don't sink or hoist dbg info; it's legal, but not useful.
  406. if (isa<DbgInfoIntrinsic>(I))
  407. return false;
  408. // Handle simple cases by querying alias analysis.
  409. AliasAnalysis::ModRefBehavior Behavior = AA->getModRefBehavior(CI);
  410. if (Behavior == AliasAnalysis::DoesNotAccessMemory)
  411. return true;
  412. if (AliasAnalysis::onlyReadsMemory(Behavior)) {
  413. // If this call only reads from memory and there are no writes to memory
  414. // in the loop, we can hoist or sink the call as appropriate.
  415. bool FoundMod = false;
  416. for (AliasSetTracker::iterator I = CurAST->begin(), E = CurAST->end();
  417. I != E; ++I) {
  418. AliasSet &AS = *I;
  419. if (!AS.isForwardingAliasSet() && AS.isMod()) {
  420. FoundMod = true;
  421. break;
  422. }
  423. }
  424. if (!FoundMod) return true;
  425. }
  426. // FIXME: This should use mod/ref information to see if we can hoist or
  427. // sink the call.
  428. return false;
  429. }
  430. // Only these instructions are hoistable/sinkable.
  431. if (!isa<BinaryOperator>(I) && !isa<CastInst>(I) && !isa<SelectInst>(I) &&
  432. !isa<GetElementPtrInst>(I) && !isa<CmpInst>(I) &&
  433. !isa<InsertElementInst>(I) && !isa<ExtractElementInst>(I) &&
  434. !isa<ShuffleVectorInst>(I) && !isa<ExtractValueInst>(I) &&
  435. !isa<InsertValueInst>(I))
  436. return false;
  437. // TODO: Plumb the context instruction through to make hoisting and sinking
  438. // more powerful. Hoisting of loads already works due to the special casing
  439. // above.
  440. return isSafeToExecuteUnconditionally(I, DT, TLI, CurLoop, SafetyInfo,
  441. nullptr);
  442. }
  443. /// Returns true if a PHINode is a trivially replaceable with an
  444. /// Instruction.
  445. /// This is true when all incoming values are that instruction.
  446. /// This pattern occurs most often with LCSSA PHI nodes.
  447. ///
  448. static bool isTriviallyReplacablePHI(const PHINode &PN, const Instruction &I) {
  449. for (const Value *IncValue : PN.incoming_values())
  450. if (IncValue != &I)
  451. return false;
  452. return true;
  453. }
  454. /// Return true if the only users of this instruction are outside of
  455. /// the loop. If this is true, we can sink the instruction to the exit
  456. /// blocks of the loop.
  457. ///
  458. static bool isNotUsedInLoop(const Instruction &I, const Loop *CurLoop) {
  459. for (const User *U : I.users()) {
  460. const Instruction *UI = cast<Instruction>(U);
  461. if (const PHINode *PN = dyn_cast<PHINode>(UI)) {
  462. // A PHI node where all of the incoming values are this instruction are
  463. // special -- they can just be RAUW'ed with the instruction and thus
  464. // don't require a use in the predecessor. This is a particular important
  465. // special case because it is the pattern found in LCSSA form.
  466. if (isTriviallyReplacablePHI(*PN, I)) {
  467. if (CurLoop->contains(PN))
  468. return false;
  469. else
  470. continue;
  471. }
  472. // Otherwise, PHI node uses occur in predecessor blocks if the incoming
  473. // values. Check for such a use being inside the loop.
  474. for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
  475. if (PN->getIncomingValue(i) == &I)
  476. if (CurLoop->contains(PN->getIncomingBlock(i)))
  477. return false;
  478. continue;
  479. }
  480. if (CurLoop->contains(UI))
  481. return false;
  482. }
  483. return true;
  484. }
  485. static Instruction *CloneInstructionInExitBlock(const Instruction &I,
  486. BasicBlock &ExitBlock,
  487. PHINode &PN,
  488. const LoopInfo *LI) {
  489. Instruction *New = I.clone();
  490. ExitBlock.getInstList().insert(ExitBlock.getFirstInsertionPt(), New);
  491. if (!I.getName().empty()) New->setName(I.getName() + ".le");
  492. // Build LCSSA PHI nodes for any in-loop operands. Note that this is
  493. // particularly cheap because we can rip off the PHI node that we're
  494. // replacing for the number and blocks of the predecessors.
  495. // OPT: If this shows up in a profile, we can instead finish sinking all
  496. // invariant instructions, and then walk their operands to re-establish
  497. // LCSSA. That will eliminate creating PHI nodes just to nuke them when
  498. // sinking bottom-up.
  499. for (User::op_iterator OI = New->op_begin(), OE = New->op_end(); OI != OE;
  500. ++OI)
  501. if (Instruction *OInst = dyn_cast<Instruction>(*OI))
  502. if (Loop *OLoop = LI->getLoopFor(OInst->getParent()))
  503. if (!OLoop->contains(&PN)) {
  504. PHINode *OpPN =
  505. PHINode::Create(OInst->getType(), PN.getNumIncomingValues(),
  506. OInst->getName() + ".lcssa", ExitBlock.begin());
  507. for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
  508. OpPN->addIncoming(OInst, PN.getIncomingBlock(i));
  509. *OI = OpPN;
  510. }
  511. return New;
  512. }
  513. /// When an instruction is found to only be used outside of the loop, this
  514. /// function moves it to the exit blocks and patches up SSA form as needed.
  515. /// This method is guaranteed to remove the original instruction from its
  516. /// position, and may either delete it or move it to outside of the loop.
  517. ///
  518. static bool sink(Instruction &I, const LoopInfo *LI, const DominatorTree *DT,
  519. const Loop *CurLoop, AliasSetTracker *CurAST ) {
  520. DEBUG(dbgs() << "LICM sinking instruction: " << I << "\n");
  521. bool Changed = false;
  522. if (isa<LoadInst>(I)) ++NumMovedLoads;
  523. else if (isa<CallInst>(I)) ++NumMovedCalls;
  524. ++NumSunk;
  525. Changed = true;
  526. #ifndef NDEBUG
  527. SmallVector<BasicBlock *, 32> ExitBlocks;
  528. CurLoop->getUniqueExitBlocks(ExitBlocks);
  529. SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(),
  530. ExitBlocks.end());
  531. #endif
  532. // Clones of this instruction. Don't create more than one per exit block!
  533. SmallDenseMap<BasicBlock *, Instruction *, 32> SunkCopies;
  534. // If this instruction is only used outside of the loop, then all users are
  535. // PHI nodes in exit blocks due to LCSSA form. Just RAUW them with clones of
  536. // the instruction.
  537. while (!I.use_empty()) {
  538. Value::user_iterator UI = I.user_begin();
  539. auto *User = cast<Instruction>(*UI);
  540. if (!DT->isReachableFromEntry(User->getParent())) {
  541. User->replaceUsesOfWith(&I, UndefValue::get(I.getType()));
  542. continue;
  543. }
  544. // The user must be a PHI node.
  545. PHINode *PN = cast<PHINode>(User);
  546. // Surprisingly, instructions can be used outside of loops without any
  547. // exits. This can only happen in PHI nodes if the incoming block is
  548. // unreachable.
  549. Use &U = UI.getUse();
  550. BasicBlock *BB = PN->getIncomingBlock(U);
  551. if (!DT->isReachableFromEntry(BB)) {
  552. U = UndefValue::get(I.getType());
  553. continue;
  554. }
  555. BasicBlock *ExitBlock = PN->getParent();
  556. assert(ExitBlockSet.count(ExitBlock) &&
  557. "The LCSSA PHI is not in an exit block!");
  558. Instruction *New;
  559. auto It = SunkCopies.find(ExitBlock);
  560. if (It != SunkCopies.end())
  561. New = It->second;
  562. else
  563. New = SunkCopies[ExitBlock] =
  564. CloneInstructionInExitBlock(I, *ExitBlock, *PN, LI);
  565. PN->replaceAllUsesWith(New);
  566. PN->eraseFromParent();
  567. }
  568. CurAST->deleteValue(&I);
  569. I.eraseFromParent();
  570. return Changed;
  571. }
  572. /// When an instruction is found to only use loop invariant operands that
  573. /// is safe to hoist, this instruction is called to do the dirty work.
  574. ///
  575. static bool hoist(Instruction &I, BasicBlock *Preheader) {
  576. DEBUG(dbgs() << "LICM hoisting to " << Preheader->getName() << ": "
  577. << I << "\n");
  578. // Move the new node to the Preheader, before its terminator.
  579. I.moveBefore(Preheader->getTerminator());
  580. if (isa<LoadInst>(I)) ++NumMovedLoads;
  581. else if (isa<CallInst>(I)) ++NumMovedCalls;
  582. ++NumHoisted;
  583. return true;
  584. }
  585. /// Only sink or hoist an instruction if it is not a trapping instruction,
  586. /// or if the instruction is known not to trap when moved to the preheader.
  587. /// or if it is a trapping instruction and is guaranteed to execute.
  588. static bool isSafeToExecuteUnconditionally(const Instruction &Inst,
  589. const DominatorTree *DT,
  590. const TargetLibraryInfo *TLI,
  591. const Loop *CurLoop,
  592. const LICMSafetyInfo *SafetyInfo,
  593. const Instruction *CtxI) {
  594. if (isSafeToSpeculativelyExecute(&Inst, CtxI, DT, TLI))
  595. return true;
  596. return isGuaranteedToExecute(Inst, DT, CurLoop, SafetyInfo);
  597. }
  598. static bool isGuaranteedToExecute(const Instruction &Inst,
  599. const DominatorTree *DT,
  600. const Loop *CurLoop,
  601. const LICMSafetyInfo * SafetyInfo) {
  602. // We have to check to make sure that the instruction dominates all
  603. // of the exit blocks. If it doesn't, then there is a path out of the loop
  604. // which does not execute this instruction, so we can't hoist it.
  605. // If the instruction is in the header block for the loop (which is very
  606. // common), it is always guaranteed to dominate the exit blocks. Since this
  607. // is a common case, and can save some work, check it now.
  608. if (Inst.getParent() == CurLoop->getHeader())
  609. // If there's a throw in the header block, we can't guarantee we'll reach
  610. // Inst.
  611. return !SafetyInfo->HeaderMayThrow;
  612. // Somewhere in this loop there is an instruction which may throw and make us
  613. // exit the loop.
  614. if (SafetyInfo->MayThrow)
  615. return false;
  616. // Get the exit blocks for the current loop.
  617. SmallVector<BasicBlock*, 8> ExitBlocks;
  618. CurLoop->getExitBlocks(ExitBlocks);
  619. // Verify that the block dominates each of the exit blocks of the loop.
  620. for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i)
  621. if (!DT->dominates(Inst.getParent(), ExitBlocks[i]))
  622. return false;
  623. // As a degenerate case, if the loop is statically infinite then we haven't
  624. // proven anything since there are no exit blocks.
  625. if (ExitBlocks.empty())
  626. return false;
  627. return true;
  628. }
  629. namespace {
  630. class LoopPromoter : public LoadAndStorePromoter {
  631. Value *SomePtr; // Designated pointer to store to.
  632. SmallPtrSetImpl<Value*> &PointerMustAliases;
  633. SmallVectorImpl<BasicBlock*> &LoopExitBlocks;
  634. SmallVectorImpl<Instruction*> &LoopInsertPts;
  635. PredIteratorCache &PredCache;
  636. AliasSetTracker &AST;
  637. LoopInfo &LI;
  638. DebugLoc DL;
  639. int Alignment;
  640. AAMDNodes AATags;
  641. Value *maybeInsertLCSSAPHI(Value *V, BasicBlock *BB) const {
  642. if (Instruction *I = dyn_cast<Instruction>(V))
  643. if (Loop *L = LI.getLoopFor(I->getParent()))
  644. if (!L->contains(BB)) {
  645. // We need to create an LCSSA PHI node for the incoming value and
  646. // store that.
  647. PHINode *PN = PHINode::Create(
  648. I->getType(), PredCache.size(BB),
  649. I->getName() + ".lcssa", BB->begin());
  650. for (BasicBlock *Pred : PredCache.get(BB))
  651. PN->addIncoming(I, Pred);
  652. return PN;
  653. }
  654. return V;
  655. }
  656. public:
  657. LoopPromoter(Value *SP,
  658. ArrayRef<const Instruction *> Insts,
  659. SSAUpdater &S, SmallPtrSetImpl<Value *> &PMA,
  660. SmallVectorImpl<BasicBlock *> &LEB,
  661. SmallVectorImpl<Instruction *> &LIP, PredIteratorCache &PIC,
  662. AliasSetTracker &ast, LoopInfo &li, DebugLoc dl, int alignment,
  663. const AAMDNodes &AATags)
  664. : LoadAndStorePromoter(Insts, S), SomePtr(SP), PointerMustAliases(PMA),
  665. LoopExitBlocks(LEB), LoopInsertPts(LIP), PredCache(PIC), AST(ast),
  666. LI(li), DL(dl), Alignment(alignment), AATags(AATags) {}
  667. bool isInstInList(Instruction *I,
  668. const SmallVectorImpl<Instruction*> &) const override {
  669. Value *Ptr;
  670. if (LoadInst *LI = dyn_cast<LoadInst>(I))
  671. Ptr = LI->getOperand(0);
  672. else
  673. Ptr = cast<StoreInst>(I)->getPointerOperand();
  674. return PointerMustAliases.count(Ptr);
  675. }
  676. void doExtraRewritesBeforeFinalDeletion() const override {
  677. // Insert stores after in the loop exit blocks. Each exit block gets a
  678. // store of the live-out values that feed them. Since we've already told
  679. // the SSA updater about the defs in the loop and the preheader
  680. // definition, it is all set and we can start using it.
  681. for (unsigned i = 0, e = LoopExitBlocks.size(); i != e; ++i) {
  682. BasicBlock *ExitBlock = LoopExitBlocks[i];
  683. Value *LiveInValue = SSA.GetValueInMiddleOfBlock(ExitBlock);
  684. LiveInValue = maybeInsertLCSSAPHI(LiveInValue, ExitBlock);
  685. Value *Ptr = maybeInsertLCSSAPHI(SomePtr, ExitBlock);
  686. Instruction *InsertPos = LoopInsertPts[i];
  687. StoreInst *NewSI = new StoreInst(LiveInValue, Ptr, InsertPos);
  688. NewSI->setAlignment(Alignment);
  689. NewSI->setDebugLoc(DL);
  690. if (AATags) NewSI->setAAMetadata(AATags);
  691. }
  692. }
  693. void replaceLoadWithValue(LoadInst *LI, Value *V) const override {
  694. // Update alias analysis.
  695. AST.copyValue(LI, V);
  696. }
  697. void instructionDeleted(Instruction *I) const override {
  698. AST.deleteValue(I);
  699. }
  700. };
  701. } // end anon namespace
  702. /// Try to promote memory values to scalars by sinking stores out of the
  703. /// loop and moving loads to before the loop. We do this by looping over
  704. /// the stores in the loop, looking for stores to Must pointers which are
  705. /// loop invariant.
  706. ///
  707. bool llvm::promoteLoopAccessesToScalars(AliasSet &AS,
  708. SmallVectorImpl<BasicBlock*>&ExitBlocks,
  709. SmallVectorImpl<Instruction*>&InsertPts,
  710. PredIteratorCache &PIC, LoopInfo *LI,
  711. DominatorTree *DT, Loop *CurLoop,
  712. AliasSetTracker *CurAST,
  713. LICMSafetyInfo * SafetyInfo) {
  714. // Verify inputs.
  715. assert(LI != nullptr && DT != nullptr &&
  716. CurLoop != nullptr && CurAST != nullptr &&
  717. SafetyInfo != nullptr &&
  718. "Unexpected Input to promoteLoopAccessesToScalars");
  719. // Initially set Changed status to false.
  720. bool Changed = false;
  721. // We can promote this alias set if it has a store, if it is a "Must" alias
  722. // set, if the pointer is loop invariant, and if we are not eliminating any
  723. // volatile loads or stores.
  724. if (AS.isForwardingAliasSet() || !AS.isMod() || !AS.isMustAlias() ||
  725. AS.isVolatile() || !CurLoop->isLoopInvariant(AS.begin()->getValue()))
  726. return Changed;
  727. assert(!AS.empty() &&
  728. "Must alias set should have at least one pointer element in it!");
  729. Value *SomePtr = AS.begin()->getValue();
  730. BasicBlock * Preheader = CurLoop->getLoopPreheader();
  731. // It isn't safe to promote a load/store from the loop if the load/store is
  732. // conditional. For example, turning:
  733. //
  734. // for () { if (c) *P += 1; }
  735. //
  736. // into:
  737. //
  738. // tmp = *P; for () { if (c) tmp +=1; } *P = tmp;
  739. //
  740. // is not safe, because *P may only be valid to access if 'c' is true.
  741. //
  742. // It is safe to promote P if all uses are direct load/stores and if at
  743. // least one is guaranteed to be executed.
  744. bool GuaranteedToExecute = false;
  745. SmallVector<Instruction*, 64> LoopUses;
  746. SmallPtrSet<Value*, 4> PointerMustAliases;
  747. // We start with an alignment of one and try to find instructions that allow
  748. // us to prove better alignment.
  749. unsigned Alignment = 1;
  750. AAMDNodes AATags;
  751. bool HasDedicatedExits = CurLoop->hasDedicatedExits();
  752. // Check that all of the pointers in the alias set have the same type. We
  753. // cannot (yet) promote a memory location that is loaded and stored in
  754. // different sizes. While we are at it, collect alignment and AA info.
  755. for (AliasSet::iterator ASI = AS.begin(), E = AS.end(); ASI != E; ++ASI) {
  756. Value *ASIV = ASI->getValue();
  757. PointerMustAliases.insert(ASIV);
  758. // Check that all of the pointers in the alias set have the same type. We
  759. // cannot (yet) promote a memory location that is loaded and stored in
  760. // different sizes.
  761. if (SomePtr->getType() != ASIV->getType())
  762. return Changed;
  763. for (User *U : ASIV->users()) {
  764. // Ignore instructions that are outside the loop.
  765. Instruction *UI = dyn_cast<Instruction>(U);
  766. if (!UI || !CurLoop->contains(UI))
  767. continue;
  768. // If there is an non-load/store instruction in the loop, we can't promote
  769. // it.
  770. if (const LoadInst *load = dyn_cast<LoadInst>(UI)) {
  771. assert(!load->isVolatile() && "AST broken");
  772. if (!load->isSimple())
  773. return Changed;
  774. } else if (const StoreInst *store = dyn_cast<StoreInst>(UI)) {
  775. // Stores *of* the pointer are not interesting, only stores *to* the
  776. // pointer.
  777. if (UI->getOperand(1) != ASIV)
  778. continue;
  779. assert(!store->isVolatile() && "AST broken");
  780. if (!store->isSimple())
  781. return Changed;
  782. // Don't sink stores from loops without dedicated block exits. Exits
  783. // containing indirect branches are not transformed by loop simplify,
  784. // make sure we catch that. An additional load may be generated in the
  785. // preheader for SSA updater, so also avoid sinking when no preheader
  786. // is available.
  787. if (!HasDedicatedExits || !Preheader)
  788. return Changed;
  789. // Note that we only check GuaranteedToExecute inside the store case
  790. // so that we do not introduce stores where they did not exist before
  791. // (which would break the LLVM concurrency model).
  792. // If the alignment of this instruction allows us to specify a more
  793. // restrictive (and performant) alignment and if we are sure this
  794. // instruction will be executed, update the alignment.
  795. // Larger is better, with the exception of 0 being the best alignment.
  796. unsigned InstAlignment = store->getAlignment();
  797. if ((InstAlignment > Alignment || InstAlignment == 0) && Alignment != 0)
  798. if (isGuaranteedToExecute(*UI, DT, CurLoop, SafetyInfo)) {
  799. GuaranteedToExecute = true;
  800. Alignment = InstAlignment;
  801. }
  802. if (!GuaranteedToExecute)
  803. GuaranteedToExecute = isGuaranteedToExecute(*UI, DT,
  804. CurLoop, SafetyInfo);
  805. } else
  806. return Changed; // Not a load or store.
  807. // Merge the AA tags.
  808. if (LoopUses.empty()) {
  809. // On the first load/store, just take its AA tags.
  810. UI->getAAMetadata(AATags);
  811. } else if (AATags) {
  812. UI->getAAMetadata(AATags, /* Merge = */ true);
  813. }
  814. LoopUses.push_back(UI);
  815. }
  816. }
  817. // If there isn't a guaranteed-to-execute instruction, we can't promote.
  818. if (!GuaranteedToExecute)
  819. return Changed;
  820. // Otherwise, this is safe to promote, lets do it!
  821. DEBUG(dbgs() << "LICM: Promoting value stored to in loop: " <<*SomePtr<<'\n');
  822. Changed = true;
  823. ++NumPromoted;
  824. // Grab a debug location for the inserted loads/stores; given that the
  825. // inserted loads/stores have little relation to the original loads/stores,
  826. // this code just arbitrarily picks a location from one, since any debug
  827. // location is better than none.
  828. DebugLoc DL = LoopUses[0]->getDebugLoc();
  829. // Figure out the loop exits and their insertion points, if this is the
  830. // first promotion.
  831. if (ExitBlocks.empty()) {
  832. CurLoop->getUniqueExitBlocks(ExitBlocks);
  833. InsertPts.resize(ExitBlocks.size());
  834. for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i)
  835. InsertPts[i] = ExitBlocks[i]->getFirstInsertionPt();
  836. }
  837. // We use the SSAUpdater interface to insert phi nodes as required.
  838. SmallVector<PHINode*, 16> NewPHIs;
  839. SSAUpdater SSA(&NewPHIs);
  840. LoopPromoter Promoter(SomePtr, LoopUses, SSA,
  841. PointerMustAliases, ExitBlocks,
  842. InsertPts, PIC, *CurAST, *LI, DL, Alignment, AATags);
  843. // Set up the preheader to have a definition of the value. It is the live-out
  844. // value from the preheader that uses in the loop will use.
  845. LoadInst *PreheaderLoad =
  846. new LoadInst(SomePtr, SomePtr->getName()+".promoted",
  847. Preheader->getTerminator());
  848. PreheaderLoad->setAlignment(Alignment);
  849. PreheaderLoad->setDebugLoc(DL);
  850. if (AATags) PreheaderLoad->setAAMetadata(AATags);
  851. SSA.AddAvailableValue(Preheader, PreheaderLoad);
  852. // Rewrite all the loads in the loop and remember all the definitions from
  853. // stores in the loop.
  854. Promoter.run(LoopUses);
  855. // If the SSAUpdater didn't use the load in the preheader, just zap it now.
  856. if (PreheaderLoad->use_empty())
  857. PreheaderLoad->eraseFromParent();
  858. return Changed;
  859. }
  860. /// Simple Analysis hook. Clone alias set info.
  861. ///
  862. void LICM::cloneBasicBlockAnalysis(BasicBlock *From, BasicBlock *To, Loop *L) {
  863. AliasSetTracker *AST = LoopToAliasSetMap.lookup(L);
  864. if (!AST)
  865. return;
  866. AST->copyValue(From, To);
  867. }
  868. /// Simple Analysis hook. Delete value V from alias set
  869. ///
  870. void LICM::deleteAnalysisValue(Value *V, Loop *L) {
  871. AliasSetTracker *AST = LoopToAliasSetMap.lookup(L);
  872. if (!AST)
  873. return;
  874. AST->deleteValue(V);
  875. }
  876. /// Simple Analysis hook. Delete value L from alias set map.
  877. ///
  878. void LICM::deleteAnalysisLoop(Loop *L) {
  879. AliasSetTracker *AST = LoopToAliasSetMap.lookup(L);
  880. if (!AST)
  881. return;
  882. delete AST;
  883. LoopToAliasSetMap.erase(L);
  884. }
  885. /// Return true if the body of this loop may store into the memory
  886. /// location pointed to by V.
  887. ///
  888. static bool pointerInvalidatedByLoop(Value *V, uint64_t Size,
  889. const AAMDNodes &AAInfo,
  890. AliasSetTracker *CurAST) {
  891. // Check to see if any of the basic blocks in CurLoop invalidate *V.
  892. return CurAST->getAliasSetForPointer(V, Size, AAInfo).isMod();
  893. }
  894. /// Little predicate that returns true if the specified basic block is in
  895. /// a subloop of the current one, not the current one itself.
  896. ///
  897. static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI) {
  898. assert(CurLoop->contains(BB) && "Only valid if BB is IN the loop");
  899. return LI->getLoopFor(BB) != CurLoop;
  900. }