BasicAliasAnalysis.cpp 60 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508
  1. //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // This file defines the primary stateless implementation of the
  11. // Alias Analysis interface that implements identities (two different
  12. // globals cannot alias, etc), but does no stateful analysis.
  13. //
  14. //===----------------------------------------------------------------------===//
  15. #include "llvm/Analysis/Passes.h"
  16. #include "llvm/ADT/SmallPtrSet.h"
  17. #include "llvm/ADT/SmallVector.h"
  18. #include "llvm/Analysis/AliasAnalysis.h"
  19. #include "llvm/Analysis/AssumptionCache.h"
  20. #include "llvm/Analysis/CFG.h"
  21. #include "llvm/Analysis/CaptureTracking.h"
  22. #include "llvm/Analysis/InstructionSimplify.h"
  23. #include "llvm/Analysis/LoopInfo.h"
  24. #include "llvm/Analysis/MemoryBuiltins.h"
  25. #include "llvm/Analysis/TargetLibraryInfo.h"
  26. #include "llvm/Analysis/ValueTracking.h"
  27. #include "llvm/IR/Constants.h"
  28. #include "llvm/IR/DataLayout.h"
  29. #include "llvm/IR/DerivedTypes.h"
  30. #include "llvm/IR/Dominators.h"
  31. #include "llvm/IR/Function.h"
  32. #include "llvm/IR/GetElementPtrTypeIterator.h"
  33. #include "llvm/IR/GlobalAlias.h"
  34. #include "llvm/IR/GlobalVariable.h"
  35. #include "llvm/IR/Instructions.h"
  36. #include "llvm/IR/IntrinsicInst.h"
  37. #include "llvm/IR/LLVMContext.h"
  38. #include "llvm/IR/Operator.h"
  39. #include "llvm/Pass.h"
  40. #include "llvm/Support/ErrorHandling.h"
  41. #include <algorithm>
  42. using namespace llvm;
  43. /// Cutoff after which to stop analysing a set of phi nodes potentially involved
  44. /// in a cycle. Because we are analysing 'through' phi nodes we need to be
  45. /// careful with value equivalence. We use reachability to make sure a value
  46. /// cannot be involved in a cycle.
  47. const unsigned MaxNumPhiBBsValueReachabilityCheck = 20;
  48. // The max limit of the search depth in DecomposeGEPExpression() and
  49. // GetUnderlyingObject(), both functions need to use the same search
  50. // depth otherwise the algorithm in aliasGEP will assert.
  51. static const unsigned MaxLookupSearchDepth = 6;
  52. //===----------------------------------------------------------------------===//
  53. // Useful predicates
  54. //===----------------------------------------------------------------------===//
  55. /// isNonEscapingLocalObject - Return true if the pointer is to a function-local
  56. /// object that never escapes from the function.
  57. static bool isNonEscapingLocalObject(const Value *V) {
  58. // If this is a local allocation, check to see if it escapes.
  59. if (isa<AllocaInst>(V) || isNoAliasCall(V))
  60. // Set StoreCaptures to True so that we can assume in our callers that the
  61. // pointer is not the result of a load instruction. Currently
  62. // PointerMayBeCaptured doesn't have any special analysis for the
  63. // StoreCaptures=false case; if it did, our callers could be refined to be
  64. // more precise.
  65. return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true);
  66. // If this is an argument that corresponds to a byval or noalias argument,
  67. // then it has not escaped before entering the function. Check if it escapes
  68. // inside the function.
  69. if (const Argument *A = dyn_cast<Argument>(V))
  70. if (A->hasByValAttr() || A->hasNoAliasAttr())
  71. // Note even if the argument is marked nocapture we still need to check
  72. // for copies made inside the function. The nocapture attribute only
  73. // specifies that there are no copies made that outlive the function.
  74. return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true);
  75. return false;
  76. }
  77. /// isEscapeSource - Return true if the pointer is one which would have
  78. /// been considered an escape by isNonEscapingLocalObject.
  79. static bool isEscapeSource(const Value *V) {
  80. if (isa<CallInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V))
  81. return true;
  82. // The load case works because isNonEscapingLocalObject considers all
  83. // stores to be escapes (it passes true for the StoreCaptures argument
  84. // to PointerMayBeCaptured).
  85. if (isa<LoadInst>(V))
  86. return true;
  87. return false;
  88. }
  89. /// getObjectSize - Return the size of the object specified by V, or
  90. /// UnknownSize if unknown.
  91. static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
  92. const TargetLibraryInfo &TLI,
  93. bool RoundToAlign = false) {
  94. uint64_t Size;
  95. if (getObjectSize(V, Size, DL, &TLI, RoundToAlign))
  96. return Size;
  97. return MemoryLocation::UnknownSize;
  98. }
  99. /// isObjectSmallerThan - Return true if we can prove that the object specified
  100. /// by V is smaller than Size.
  101. static bool isObjectSmallerThan(const Value *V, uint64_t Size,
  102. const DataLayout &DL,
  103. const TargetLibraryInfo &TLI) {
  104. // Note that the meanings of the "object" are slightly different in the
  105. // following contexts:
  106. // c1: llvm::getObjectSize()
  107. // c2: llvm.objectsize() intrinsic
  108. // c3: isObjectSmallerThan()
  109. // c1 and c2 share the same meaning; however, the meaning of "object" in c3
  110. // refers to the "entire object".
  111. //
  112. // Consider this example:
  113. // char *p = (char*)malloc(100)
  114. // char *q = p+80;
  115. //
  116. // In the context of c1 and c2, the "object" pointed by q refers to the
  117. // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
  118. //
  119. // However, in the context of c3, the "object" refers to the chunk of memory
  120. // being allocated. So, the "object" has 100 bytes, and q points to the middle
  121. // the "object". In case q is passed to isObjectSmallerThan() as the 1st
  122. // parameter, before the llvm::getObjectSize() is called to get the size of
  123. // entire object, we should:
  124. // - either rewind the pointer q to the base-address of the object in
  125. // question (in this case rewind to p), or
  126. // - just give up. It is up to caller to make sure the pointer is pointing
  127. // to the base address the object.
  128. //
  129. // We go for 2nd option for simplicity.
  130. if (!isIdentifiedObject(V))
  131. return false;
  132. // This function needs to use the aligned object size because we allow
  133. // reads a bit past the end given sufficient alignment.
  134. uint64_t ObjectSize = getObjectSize(V, DL, TLI, /*RoundToAlign*/true);
  135. return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size;
  136. }
  137. /// isObjectSize - Return true if we can prove that the object specified
  138. /// by V has size Size.
  139. static bool isObjectSize(const Value *V, uint64_t Size,
  140. const DataLayout &DL, const TargetLibraryInfo &TLI) {
  141. uint64_t ObjectSize = getObjectSize(V, DL, TLI);
  142. return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size;
  143. }
  144. //===----------------------------------------------------------------------===//
  145. // GetElementPtr Instruction Decomposition and Analysis
  146. //===----------------------------------------------------------------------===//
  147. namespace {
  148. enum ExtensionKind {
  149. EK_NotExtended,
  150. EK_SignExt,
  151. EK_ZeroExt
  152. };
  153. struct VariableGEPIndex {
  154. const Value *V;
  155. ExtensionKind Extension;
  156. int64_t Scale;
  157. bool operator==(const VariableGEPIndex &Other) const {
  158. return V == Other.V && Extension == Other.Extension &&
  159. Scale == Other.Scale;
  160. }
  161. bool operator!=(const VariableGEPIndex &Other) const {
  162. return !operator==(Other);
  163. }
  164. };
  165. }
  166. /// GetLinearExpression - Analyze the specified value as a linear expression:
  167. /// "A*V + B", where A and B are constant integers. Return the scale and offset
  168. /// values as APInts and return V as a Value*, and return whether we looked
  169. /// through any sign or zero extends. The incoming Value is known to have
  170. /// IntegerType and it may already be sign or zero extended.
  171. ///
  172. /// Note that this looks through extends, so the high bits may not be
  173. /// represented in the result.
  174. static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset,
  175. ExtensionKind &Extension,
  176. const DataLayout &DL, unsigned Depth,
  177. AssumptionCache *AC, DominatorTree *DT) {
  178. assert(V->getType()->isIntegerTy() && "Not an integer value");
  179. // Limit our recursion depth.
  180. if (Depth == 6) {
  181. Scale = 1;
  182. Offset = 0;
  183. return V;
  184. }
  185. if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) {
  186. if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
  187. switch (BOp->getOpcode()) {
  188. default: break;
  189. case Instruction::Or:
  190. // X|C == X+C if all the bits in C are unset in X. Otherwise we can't
  191. // analyze it.
  192. if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC,
  193. BOp, DT))
  194. break;
  195. // FALL THROUGH.
  196. case Instruction::Add:
  197. V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, Extension,
  198. DL, Depth + 1, AC, DT);
  199. Offset += RHSC->getValue();
  200. return V;
  201. case Instruction::Mul:
  202. V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, Extension,
  203. DL, Depth + 1, AC, DT);
  204. Offset *= RHSC->getValue();
  205. Scale *= RHSC->getValue();
  206. return V;
  207. case Instruction::Shl:
  208. V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, Extension,
  209. DL, Depth + 1, AC, DT);
  210. Offset <<= RHSC->getValue().getLimitedValue();
  211. Scale <<= RHSC->getValue().getLimitedValue();
  212. return V;
  213. }
  214. }
  215. }
  216. // Since GEP indices are sign extended anyway, we don't care about the high
  217. // bits of a sign or zero extended value - just scales and offsets. The
  218. // extensions have to be consistent though.
  219. if ((isa<SExtInst>(V) && Extension != EK_ZeroExt) ||
  220. (isa<ZExtInst>(V) && Extension != EK_SignExt)) {
  221. Value *CastOp = cast<CastInst>(V)->getOperand(0);
  222. unsigned OldWidth = Scale.getBitWidth();
  223. unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits();
  224. Scale = Scale.trunc(SmallWidth);
  225. Offset = Offset.trunc(SmallWidth);
  226. Extension = isa<SExtInst>(V) ? EK_SignExt : EK_ZeroExt;
  227. Value *Result = GetLinearExpression(CastOp, Scale, Offset, Extension, DL,
  228. Depth + 1, AC, DT);
  229. Scale = Scale.zext(OldWidth);
  230. Offset = Offset.zext(OldWidth);
  231. return Result;
  232. }
  233. Scale = 1;
  234. Offset = 0;
  235. return V;
  236. }
  237. /// DecomposeGEPExpression - If V is a symbolic pointer expression, decompose it
  238. /// into a base pointer with a constant offset and a number of scaled symbolic
  239. /// offsets.
  240. ///
  241. /// The scaled symbolic offsets (represented by pairs of a Value* and a scale in
  242. /// the VarIndices vector) are Value*'s that are known to be scaled by the
  243. /// specified amount, but which may have other unrepresented high bits. As such,
  244. /// the gep cannot necessarily be reconstructed from its decomposed form.
  245. ///
  246. /// When DataLayout is around, this function is capable of analyzing everything
  247. /// that GetUnderlyingObject can look through. To be able to do that
  248. /// GetUnderlyingObject and DecomposeGEPExpression must use the same search
  249. /// depth (MaxLookupSearchDepth).
  250. /// When DataLayout not is around, it just looks through pointer casts.
  251. ///
  252. static const Value *
  253. DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
  254. SmallVectorImpl<VariableGEPIndex> &VarIndices,
  255. bool &MaxLookupReached, const DataLayout &DL,
  256. AssumptionCache *AC, DominatorTree *DT) {
  257. // Limit recursion depth to limit compile time in crazy cases.
  258. unsigned MaxLookup = MaxLookupSearchDepth;
  259. MaxLookupReached = false;
  260. BaseOffs = 0;
  261. do {
  262. // See if this is a bitcast or GEP.
  263. const Operator *Op = dyn_cast<Operator>(V);
  264. if (!Op) {
  265. // The only non-operator case we can handle are GlobalAliases.
  266. if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
  267. if (!GA->mayBeOverridden()) {
  268. V = GA->getAliasee();
  269. continue;
  270. }
  271. }
  272. return V;
  273. }
  274. if (Op->getOpcode() == Instruction::BitCast ||
  275. Op->getOpcode() == Instruction::AddrSpaceCast) {
  276. V = Op->getOperand(0);
  277. continue;
  278. }
  279. const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
  280. if (!GEPOp) {
  281. // If it's not a GEP, hand it off to SimplifyInstruction to see if it
  282. // can come up with something. This matches what GetUnderlyingObject does.
  283. if (const Instruction *I = dyn_cast<Instruction>(V))
  284. // TODO: Get a DominatorTree and AssumptionCache and use them here
  285. // (these are both now available in this function, but this should be
  286. // updated when GetUnderlyingObject is updated). TLI should be
  287. // provided also.
  288. if (const Value *Simplified =
  289. SimplifyInstruction(const_cast<Instruction *>(I), DL)) {
  290. V = Simplified;
  291. continue;
  292. }
  293. return V;
  294. }
  295. // Don't attempt to analyze GEPs over unsized objects.
  296. if (!GEPOp->getOperand(0)->getType()->getPointerElementType()->isSized())
  297. return V;
  298. unsigned AS = GEPOp->getPointerAddressSpace();
  299. // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
  300. gep_type_iterator GTI = gep_type_begin(GEPOp);
  301. for (User::const_op_iterator I = GEPOp->op_begin()+1,
  302. E = GEPOp->op_end(); I != E; ++I) {
  303. Value *Index = *I;
  304. // Compute the (potentially symbolic) offset in bytes for this index.
  305. if (StructType *STy = dyn_cast<StructType>(*GTI++)) {
  306. // For a struct, add the member offset.
  307. unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
  308. if (FieldNo == 0) continue;
  309. BaseOffs += DL.getStructLayout(STy)->getElementOffset(FieldNo);
  310. continue;
  311. }
  312. // For an array/pointer, add the element offset, explicitly scaled.
  313. if (ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
  314. if (CIdx->isZero()) continue;
  315. BaseOffs += DL.getTypeAllocSize(*GTI) * CIdx->getSExtValue();
  316. continue;
  317. }
  318. uint64_t Scale = DL.getTypeAllocSize(*GTI);
  319. ExtensionKind Extension = EK_NotExtended;
  320. // If the integer type is smaller than the pointer size, it is implicitly
  321. // sign extended to pointer size.
  322. unsigned Width = Index->getType()->getIntegerBitWidth();
  323. if (DL.getPointerSizeInBits(AS) > Width)
  324. Extension = EK_SignExt;
  325. // Use GetLinearExpression to decompose the index into a C1*V+C2 form.
  326. APInt IndexScale(Width, 0), IndexOffset(Width, 0);
  327. Index = GetLinearExpression(Index, IndexScale, IndexOffset, Extension, DL,
  328. 0, AC, DT);
  329. // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
  330. // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
  331. BaseOffs += IndexOffset.getSExtValue()*Scale;
  332. Scale *= IndexScale.getSExtValue();
  333. // If we already had an occurrence of this index variable, merge this
  334. // scale into it. For example, we want to handle:
  335. // A[x][x] -> x*16 + x*4 -> x*20
  336. // This also ensures that 'x' only appears in the index list once.
  337. for (unsigned i = 0, e = VarIndices.size(); i != e; ++i) {
  338. if (VarIndices[i].V == Index &&
  339. VarIndices[i].Extension == Extension) {
  340. Scale += VarIndices[i].Scale;
  341. VarIndices.erase(VarIndices.begin()+i);
  342. break;
  343. }
  344. }
  345. // Make sure that we have a scale that makes sense for this target's
  346. // pointer size.
  347. if (unsigned ShiftBits = 64 - DL.getPointerSizeInBits(AS)) {
  348. Scale <<= ShiftBits;
  349. Scale = (int64_t)Scale >> ShiftBits;
  350. }
  351. if (Scale) {
  352. VariableGEPIndex Entry = {Index, Extension,
  353. static_cast<int64_t>(Scale)};
  354. VarIndices.push_back(Entry);
  355. }
  356. }
  357. // Analyze the base pointer next.
  358. V = GEPOp->getOperand(0);
  359. } while (--MaxLookup);
  360. // If the chain of expressions is too deep, just return early.
  361. MaxLookupReached = true;
  362. return V;
  363. }
  364. //===----------------------------------------------------------------------===//
  365. // BasicAliasAnalysis Pass
  366. //===----------------------------------------------------------------------===//
  367. #ifndef NDEBUG
  368. static const Function *getParent(const Value *V) {
  369. if (const Instruction *inst = dyn_cast<Instruction>(V))
  370. return inst->getParent()->getParent();
  371. if (const Argument *arg = dyn_cast<Argument>(V))
  372. return arg->getParent();
  373. return nullptr;
  374. }
  375. static bool notDifferentParent(const Value *O1, const Value *O2) {
  376. const Function *F1 = getParent(O1);
  377. const Function *F2 = getParent(O2);
  378. return !F1 || !F2 || F1 == F2;
  379. }
  380. #endif
  381. namespace {
  382. /// BasicAliasAnalysis - This is the primary alias analysis implementation.
  383. struct BasicAliasAnalysis : public ImmutablePass, public AliasAnalysis {
  384. static char ID; // Class identification, replacement for typeinfo
  385. BasicAliasAnalysis() : ImmutablePass(ID) {
  386. initializeBasicAliasAnalysisPass(*PassRegistry::getPassRegistry());
  387. }
  388. bool doInitialization(Module &M) override;
  389. void getAnalysisUsage(AnalysisUsage &AU) const override {
  390. AU.addRequired<AliasAnalysis>();
  391. AU.addRequired<AssumptionCacheTracker>();
  392. AU.addRequired<TargetLibraryInfoWrapperPass>();
  393. }
  394. AliasResult alias(const MemoryLocation &LocA,
  395. const MemoryLocation &LocB) override {
  396. assert(AliasCache.empty() && "AliasCache must be cleared after use!");
  397. assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
  398. "BasicAliasAnalysis doesn't support interprocedural queries.");
  399. AliasResult Alias = aliasCheck(LocA.Ptr, LocA.Size, LocA.AATags,
  400. LocB.Ptr, LocB.Size, LocB.AATags);
  401. // AliasCache rarely has more than 1 or 2 elements, always use
  402. // shrink_and_clear so it quickly returns to the inline capacity of the
  403. // SmallDenseMap if it ever grows larger.
  404. // FIXME: This should really be shrink_to_inline_capacity_and_clear().
  405. AliasCache.shrink_and_clear();
  406. VisitedPhiBBs.clear();
  407. return Alias;
  408. }
  409. ModRefResult getModRefInfo(ImmutableCallSite CS,
  410. const MemoryLocation &Loc) override;
  411. ModRefResult getModRefInfo(ImmutableCallSite CS1,
  412. ImmutableCallSite CS2) override;
  413. /// pointsToConstantMemory - Chase pointers until we find a (constant
  414. /// global) or not.
  415. bool pointsToConstantMemory(const MemoryLocation &Loc,
  416. bool OrLocal) override;
  417. /// Get the location associated with a pointer argument of a callsite.
  418. ModRefResult getArgModRefInfo(ImmutableCallSite CS,
  419. unsigned ArgIdx) override;
  420. /// getModRefBehavior - Return the behavior when calling the given
  421. /// call site.
  422. ModRefBehavior getModRefBehavior(ImmutableCallSite CS) override;
  423. /// getModRefBehavior - Return the behavior when calling the given function.
  424. /// For use when the call site is not known.
  425. ModRefBehavior getModRefBehavior(const Function *F) override;
  426. /// getAdjustedAnalysisPointer - This method is used when a pass implements
  427. /// an analysis interface through multiple inheritance. If needed, it
  428. /// should override this to adjust the this pointer as needed for the
  429. /// specified pass info.
  430. void *getAdjustedAnalysisPointer(const void *ID) override {
  431. if (ID == &AliasAnalysis::ID)
  432. return (AliasAnalysis*)this;
  433. return this;
  434. }
  435. private:
  436. // AliasCache - Track alias queries to guard against recursion.
  437. typedef std::pair<MemoryLocation, MemoryLocation> LocPair;
  438. typedef SmallDenseMap<LocPair, AliasResult, 8> AliasCacheTy;
  439. AliasCacheTy AliasCache;
  440. /// \brief Track phi nodes we have visited. When interpret "Value" pointer
  441. /// equality as value equality we need to make sure that the "Value" is not
  442. /// part of a cycle. Otherwise, two uses could come from different
  443. /// "iterations" of a cycle and see different values for the same "Value"
  444. /// pointer.
  445. /// The following example shows the problem:
  446. /// %p = phi(%alloca1, %addr2)
  447. /// %l = load %ptr
  448. /// %addr1 = gep, %alloca2, 0, %l
  449. /// %addr2 = gep %alloca2, 0, (%l + 1)
  450. /// alias(%p, %addr1) -> MayAlias !
  451. /// store %l, ...
  452. SmallPtrSet<const BasicBlock*, 8> VisitedPhiBBs;
  453. // Visited - Track instructions visited by pointsToConstantMemory.
  454. SmallPtrSet<const Value*, 16> Visited;
  455. /// \brief Check whether two Values can be considered equivalent.
  456. ///
  457. /// In addition to pointer equivalence of \p V1 and \p V2 this checks
  458. /// whether they can not be part of a cycle in the value graph by looking at
  459. /// all visited phi nodes an making sure that the phis cannot reach the
  460. /// value. We have to do this because we are looking through phi nodes (That
  461. /// is we say noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
  462. bool isValueEqualInPotentialCycles(const Value *V1, const Value *V2);
  463. /// \brief Dest and Src are the variable indices from two decomposed
  464. /// GetElementPtr instructions GEP1 and GEP2 which have common base
  465. /// pointers. Subtract the GEP2 indices from GEP1 to find the symbolic
  466. /// difference between the two pointers.
  467. void GetIndexDifference(SmallVectorImpl<VariableGEPIndex> &Dest,
  468. const SmallVectorImpl<VariableGEPIndex> &Src);
  469. // aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP
  470. // instruction against another.
  471. AliasResult aliasGEP(const GEPOperator *V1, uint64_t V1Size,
  472. const AAMDNodes &V1AAInfo,
  473. const Value *V2, uint64_t V2Size,
  474. const AAMDNodes &V2AAInfo,
  475. const Value *UnderlyingV1, const Value *UnderlyingV2);
  476. // aliasPHI - Provide a bunch of ad-hoc rules to disambiguate a PHI
  477. // instruction against another.
  478. AliasResult aliasPHI(const PHINode *PN, uint64_t PNSize,
  479. const AAMDNodes &PNAAInfo,
  480. const Value *V2, uint64_t V2Size,
  481. const AAMDNodes &V2AAInfo);
  482. /// aliasSelect - Disambiguate a Select instruction against another value.
  483. AliasResult aliasSelect(const SelectInst *SI, uint64_t SISize,
  484. const AAMDNodes &SIAAInfo,
  485. const Value *V2, uint64_t V2Size,
  486. const AAMDNodes &V2AAInfo);
  487. AliasResult aliasCheck(const Value *V1, uint64_t V1Size,
  488. AAMDNodes V1AATag,
  489. const Value *V2, uint64_t V2Size,
  490. AAMDNodes V2AATag);
  491. };
  492. } // End of anonymous namespace
  493. // Register this pass...
  494. char BasicAliasAnalysis::ID = 0;
  495. INITIALIZE_AG_PASS_BEGIN(BasicAliasAnalysis, AliasAnalysis, "basicaa",
  496. "Basic Alias Analysis (stateless AA impl)",
  497. false, true, false)
  498. INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
  499. INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
  500. INITIALIZE_AG_PASS_END(BasicAliasAnalysis, AliasAnalysis, "basicaa",
  501. "Basic Alias Analysis (stateless AA impl)",
  502. false, true, false)
  503. ImmutablePass *llvm::createBasicAliasAnalysisPass() {
  504. return new BasicAliasAnalysis();
  505. }
  506. /// pointsToConstantMemory - Returns whether the given pointer value
  507. /// points to memory that is local to the function, with global constants being
  508. /// considered local to all functions.
  509. bool BasicAliasAnalysis::pointsToConstantMemory(const MemoryLocation &Loc,
  510. bool OrLocal) {
  511. assert(Visited.empty() && "Visited must be cleared after use!");
  512. unsigned MaxLookup = 8;
  513. SmallVector<const Value *, 16> Worklist;
  514. Worklist.push_back(Loc.Ptr);
  515. do {
  516. const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), *DL);
  517. if (!Visited.insert(V).second) {
  518. Visited.clear();
  519. return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal);
  520. }
  521. // An alloca instruction defines local memory.
  522. if (OrLocal && isa<AllocaInst>(V))
  523. continue;
  524. // A global constant counts as local memory for our purposes.
  525. if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
  526. // Note: this doesn't require GV to be "ODR" because it isn't legal for a
  527. // global to be marked constant in some modules and non-constant in
  528. // others. GV may even be a declaration, not a definition.
  529. if (!GV->isConstant()) {
  530. Visited.clear();
  531. return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal);
  532. }
  533. continue;
  534. }
  535. // If both select values point to local memory, then so does the select.
  536. if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
  537. Worklist.push_back(SI->getTrueValue());
  538. Worklist.push_back(SI->getFalseValue());
  539. continue;
  540. }
  541. // If all values incoming to a phi node point to local memory, then so does
  542. // the phi.
  543. if (const PHINode *PN = dyn_cast<PHINode>(V)) {
  544. // Don't bother inspecting phi nodes with many operands.
  545. if (PN->getNumIncomingValues() > MaxLookup) {
  546. Visited.clear();
  547. return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal);
  548. }
  549. for (Value *IncValue : PN->incoming_values())
  550. Worklist.push_back(IncValue);
  551. continue;
  552. }
  553. // Otherwise be conservative.
  554. Visited.clear();
  555. return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal);
  556. } while (!Worklist.empty() && --MaxLookup);
  557. Visited.clear();
  558. return Worklist.empty();
  559. }
  560. // FIXME: This code is duplicated with MemoryLocation and should be hoisted to
  561. // some common utility location.
  562. static bool isMemsetPattern16(const Function *MS,
  563. const TargetLibraryInfo &TLI) {
  564. if (TLI.has(LibFunc::memset_pattern16) &&
  565. MS->getName() == "memset_pattern16") {
  566. FunctionType *MemsetType = MS->getFunctionType();
  567. if (!MemsetType->isVarArg() && MemsetType->getNumParams() == 3 &&
  568. isa<PointerType>(MemsetType->getParamType(0)) &&
  569. isa<PointerType>(MemsetType->getParamType(1)) &&
  570. isa<IntegerType>(MemsetType->getParamType(2)))
  571. return true;
  572. }
  573. return false;
  574. }
  575. /// getModRefBehavior - Return the behavior when calling the given call site.
  576. AliasAnalysis::ModRefBehavior
  577. BasicAliasAnalysis::getModRefBehavior(ImmutableCallSite CS) {
  578. if (CS.doesNotAccessMemory())
  579. // Can't do better than this.
  580. return DoesNotAccessMemory;
  581. ModRefBehavior Min = UnknownModRefBehavior;
  582. // If the callsite knows it only reads memory, don't return worse
  583. // than that.
  584. if (CS.onlyReadsMemory())
  585. Min = OnlyReadsMemory;
  586. if (CS.onlyAccessesArgMemory())
  587. Min = ModRefBehavior(Min & OnlyAccessesArgumentPointees);
  588. // The AliasAnalysis base class has some smarts, lets use them.
  589. return ModRefBehavior(AliasAnalysis::getModRefBehavior(CS) & Min);
  590. }
  591. /// getModRefBehavior - Return the behavior when calling the given function.
  592. /// For use when the call site is not known.
  593. AliasAnalysis::ModRefBehavior
  594. BasicAliasAnalysis::getModRefBehavior(const Function *F) {
  595. // If the function declares it doesn't access memory, we can't do better.
  596. if (F->doesNotAccessMemory())
  597. return DoesNotAccessMemory;
  598. // For intrinsics, we can check the table.
  599. if (Intrinsic::ID iid = F->getIntrinsicID()) {
  600. #define GET_INTRINSIC_MODREF_BEHAVIOR
  601. #include "llvm/IR/Intrinsics.gen"
  602. #undef GET_INTRINSIC_MODREF_BEHAVIOR
  603. }
  604. ModRefBehavior Min = UnknownModRefBehavior;
  605. // If the function declares it only reads memory, go with that.
  606. if (F->onlyReadsMemory())
  607. Min = OnlyReadsMemory;
  608. if (F->onlyAccessesArgMemory())
  609. Min = ModRefBehavior(Min & OnlyAccessesArgumentPointees);
  610. const TargetLibraryInfo &TLI =
  611. getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
  612. if (isMemsetPattern16(F, TLI))
  613. Min = OnlyAccessesArgumentPointees;
  614. // Otherwise be conservative.
  615. return ModRefBehavior(AliasAnalysis::getModRefBehavior(F) & Min);
  616. }
  617. AliasAnalysis::ModRefResult
  618. BasicAliasAnalysis::getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) {
  619. if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction()))
  620. switch (II->getIntrinsicID()) {
  621. default:
  622. break;
  623. case Intrinsic::memset:
  624. case Intrinsic::memcpy:
  625. case Intrinsic::memmove:
  626. assert((ArgIdx == 0 || ArgIdx == 1) &&
  627. "Invalid argument index for memory intrinsic");
  628. return ArgIdx ? Ref : Mod;
  629. }
  630. // We can bound the aliasing properties of memset_pattern16 just as we can
  631. // for memcpy/memset. This is particularly important because the
  632. // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
  633. // whenever possible.
  634. if (CS.getCalledFunction() &&
  635. isMemsetPattern16(CS.getCalledFunction(), *TLI)) {
  636. assert((ArgIdx == 0 || ArgIdx == 1) &&
  637. "Invalid argument index for memset_pattern16");
  638. return ArgIdx ? Ref : Mod;
  639. }
  640. // FIXME: Handle memset_pattern4 and memset_pattern8 also.
  641. return AliasAnalysis::getArgModRefInfo(CS, ArgIdx);
  642. }
  643. static bool isAssumeIntrinsic(ImmutableCallSite CS) {
  644. const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction());
  645. if (II && II->getIntrinsicID() == Intrinsic::assume)
  646. return true;
  647. return false;
  648. }
  649. bool BasicAliasAnalysis::doInitialization(Module &M) {
  650. InitializeAliasAnalysis(this, &M.getDataLayout());
  651. return true;
  652. }
  653. /// getModRefInfo - Check to see if the specified callsite can clobber the
  654. /// specified memory object. Since we only look at local properties of this
  655. /// function, we really can't say much about this query. We do, however, use
  656. /// simple "address taken" analysis on local objects.
  657. AliasAnalysis::ModRefResult
  658. BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
  659. const MemoryLocation &Loc) {
  660. assert(notDifferentParent(CS.getInstruction(), Loc.Ptr) &&
  661. "AliasAnalysis query involving multiple functions!");
  662. const Value *Object = GetUnderlyingObject(Loc.Ptr, *DL);
  663. // If this is a tail call and Loc.Ptr points to a stack location, we know that
  664. // the tail call cannot access or modify the local stack.
  665. // We cannot exclude byval arguments here; these belong to the caller of
  666. // the current function not to the current function, and a tail callee
  667. // may reference them.
  668. if (isa<AllocaInst>(Object))
  669. if (const CallInst *CI = dyn_cast<CallInst>(CS.getInstruction()))
  670. if (CI->isTailCall())
  671. return NoModRef;
  672. // If the pointer is to a locally allocated object that does not escape,
  673. // then the call can not mod/ref the pointer unless the call takes the pointer
  674. // as an argument, and itself doesn't capture it.
  675. if (!isa<Constant>(Object) && CS.getInstruction() != Object &&
  676. isNonEscapingLocalObject(Object)) {
  677. bool PassedAsArg = false;
  678. unsigned ArgNo = 0;
  679. for (ImmutableCallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end();
  680. CI != CE; ++CI, ++ArgNo) {
  681. // Only look at the no-capture or byval pointer arguments. If this
  682. // pointer were passed to arguments that were neither of these, then it
  683. // couldn't be no-capture.
  684. if (!(*CI)->getType()->isPointerTy() ||
  685. (!CS.doesNotCapture(ArgNo) && !CS.isByValArgument(ArgNo)))
  686. continue;
  687. // If this is a no-capture pointer argument, see if we can tell that it
  688. // is impossible to alias the pointer we're checking. If not, we have to
  689. // assume that the call could touch the pointer, even though it doesn't
  690. // escape.
  691. if (!isNoAlias(MemoryLocation(*CI), MemoryLocation(Object))) {
  692. PassedAsArg = true;
  693. break;
  694. }
  695. }
  696. if (!PassedAsArg)
  697. return NoModRef;
  698. }
  699. // While the assume intrinsic is marked as arbitrarily writing so that
  700. // proper control dependencies will be maintained, it never aliases any
  701. // particular memory location.
  702. if (isAssumeIntrinsic(CS))
  703. return NoModRef;
  704. // The AliasAnalysis base class has some smarts, lets use them.
  705. return AliasAnalysis::getModRefInfo(CS, Loc);
  706. }
  707. AliasAnalysis::ModRefResult
  708. BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS1,
  709. ImmutableCallSite CS2) {
  710. // While the assume intrinsic is marked as arbitrarily writing so that
  711. // proper control dependencies will be maintained, it never aliases any
  712. // particular memory location.
  713. if (isAssumeIntrinsic(CS1) || isAssumeIntrinsic(CS2))
  714. return NoModRef;
  715. // The AliasAnalysis base class has some smarts, lets use them.
  716. return AliasAnalysis::getModRefInfo(CS1, CS2);
  717. }
  718. /// \brief Provide ad-hoc rules to disambiguate accesses through two GEP
  719. /// operators, both having the exact same pointer operand.
  720. static AliasResult aliasSameBasePointerGEPs(const GEPOperator *GEP1,
  721. uint64_t V1Size,
  722. const GEPOperator *GEP2,
  723. uint64_t V2Size,
  724. const DataLayout &DL) {
  725. assert(GEP1->getPointerOperand() == GEP2->getPointerOperand() &&
  726. "Expected GEPs with the same pointer operand");
  727. // Try to determine whether GEP1 and GEP2 index through arrays, into structs,
  728. // such that the struct field accesses provably cannot alias.
  729. // We also need at least two indices (the pointer, and the struct field).
  730. if (GEP1->getNumIndices() != GEP2->getNumIndices() ||
  731. GEP1->getNumIndices() < 2)
  732. return MayAlias;
  733. // If we don't know the size of the accesses through both GEPs, we can't
  734. // determine whether the struct fields accessed can't alias.
  735. if (V1Size == MemoryLocation::UnknownSize ||
  736. V2Size == MemoryLocation::UnknownSize)
  737. return MayAlias;
  738. ConstantInt *C1 =
  739. dyn_cast<ConstantInt>(GEP1->getOperand(GEP1->getNumOperands() - 1));
  740. ConstantInt *C2 =
  741. dyn_cast<ConstantInt>(GEP2->getOperand(GEP2->getNumOperands() - 1));
  742. // If the last (struct) indices aren't constants, we can't say anything.
  743. // If they're identical, the other indices might be also be dynamically
  744. // equal, so the GEPs can alias.
  745. if (!C1 || !C2 || C1 == C2)
  746. return MayAlias;
  747. // Find the last-indexed type of the GEP, i.e., the type you'd get if
  748. // you stripped the last index.
  749. // On the way, look at each indexed type. If there's something other
  750. // than an array, different indices can lead to different final types.
  751. SmallVector<Value *, 8> IntermediateIndices;
  752. // Insert the first index; we don't need to check the type indexed
  753. // through it as it only drops the pointer indirection.
  754. assert(GEP1->getNumIndices() > 1 && "Not enough GEP indices to examine");
  755. IntermediateIndices.push_back(GEP1->getOperand(1));
  756. // Insert all the remaining indices but the last one.
  757. // Also, check that they all index through arrays.
  758. for (unsigned i = 1, e = GEP1->getNumIndices() - 1; i != e; ++i) {
  759. if (!isa<ArrayType>(GetElementPtrInst::getIndexedType(
  760. GEP1->getSourceElementType(), IntermediateIndices)))
  761. return MayAlias;
  762. IntermediateIndices.push_back(GEP1->getOperand(i + 1));
  763. }
  764. StructType *LastIndexedStruct =
  765. dyn_cast<StructType>(GetElementPtrInst::getIndexedType(
  766. GEP1->getSourceElementType(), IntermediateIndices));
  767. if (!LastIndexedStruct)
  768. return MayAlias;
  769. // We know that:
  770. // - both GEPs begin indexing from the exact same pointer;
  771. // - the last indices in both GEPs are constants, indexing into a struct;
  772. // - said indices are different, hence, the pointed-to fields are different;
  773. // - both GEPs only index through arrays prior to that.
  774. //
  775. // This lets us determine that the struct that GEP1 indexes into and the
  776. // struct that GEP2 indexes into must either precisely overlap or be
  777. // completely disjoint. Because they cannot partially overlap, indexing into
  778. // different non-overlapping fields of the struct will never alias.
  779. // Therefore, the only remaining thing needed to show that both GEPs can't
  780. // alias is that the fields are not overlapping.
  781. const StructLayout *SL = DL.getStructLayout(LastIndexedStruct);
  782. const uint64_t StructSize = SL->getSizeInBytes();
  783. const uint64_t V1Off = SL->getElementOffset(C1->getZExtValue());
  784. const uint64_t V2Off = SL->getElementOffset(C2->getZExtValue());
  785. auto EltsDontOverlap = [StructSize](uint64_t V1Off, uint64_t V1Size,
  786. uint64_t V2Off, uint64_t V2Size) {
  787. return V1Off < V2Off && V1Off + V1Size <= V2Off &&
  788. ((V2Off + V2Size <= StructSize) ||
  789. (V2Off + V2Size - StructSize <= V1Off));
  790. };
  791. if (EltsDontOverlap(V1Off, V1Size, V2Off, V2Size) ||
  792. EltsDontOverlap(V2Off, V2Size, V1Off, V1Size))
  793. return NoAlias;
  794. return MayAlias;
  795. }
  796. /// aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP instruction
  797. /// against another pointer. We know that V1 is a GEP, but we don't know
  798. /// anything about V2. UnderlyingV1 is GetUnderlyingObject(GEP1, DL),
  799. /// UnderlyingV2 is the same for V2.
  800. ///
  801. AliasResult BasicAliasAnalysis::aliasGEP(
  802. const GEPOperator *GEP1, uint64_t V1Size, const AAMDNodes &V1AAInfo,
  803. const Value *V2, uint64_t V2Size, const AAMDNodes &V2AAInfo,
  804. const Value *UnderlyingV1, const Value *UnderlyingV2) {
  805. int64_t GEP1BaseOffset;
  806. bool GEP1MaxLookupReached;
  807. SmallVector<VariableGEPIndex, 4> GEP1VariableIndices;
  808. // We have to get two AssumptionCaches here because GEP1 and V2 may be from
  809. // different functions.
  810. // FIXME: This really doesn't make any sense. We get a dominator tree below
  811. // that can only refer to a single function. But this function (aliasGEP) is
  812. // a method on an immutable pass that can be called when there *isn't*
  813. // a single function. The old pass management layer makes this "work", but
  814. // this isn't really a clean solution.
  815. AssumptionCacheTracker &ACT = getAnalysis<AssumptionCacheTracker>();
  816. AssumptionCache *AC1 = nullptr, *AC2 = nullptr;
  817. if (auto *GEP1I = dyn_cast<Instruction>(GEP1))
  818. AC1 = &ACT.getAssumptionCache(
  819. const_cast<Function &>(*GEP1I->getParent()->getParent()));
  820. if (auto *I2 = dyn_cast<Instruction>(V2))
  821. AC2 = &ACT.getAssumptionCache(
  822. const_cast<Function &>(*I2->getParent()->getParent()));
  823. DominatorTreeWrapperPass *DTWP =
  824. getAnalysisIfAvailable<DominatorTreeWrapperPass>();
  825. DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr;
  826. // If we have two gep instructions with must-alias or not-alias'ing base
  827. // pointers, figure out if the indexes to the GEP tell us anything about the
  828. // derived pointer.
  829. if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) {
  830. // Do the base pointers alias?
  831. AliasResult BaseAlias =
  832. aliasCheck(UnderlyingV1, MemoryLocation::UnknownSize, AAMDNodes(),
  833. UnderlyingV2, MemoryLocation::UnknownSize, AAMDNodes());
  834. // Check for geps of non-aliasing underlying pointers where the offsets are
  835. // identical.
  836. if ((BaseAlias == MayAlias) && V1Size == V2Size) {
  837. // Do the base pointers alias assuming type and size.
  838. AliasResult PreciseBaseAlias = aliasCheck(UnderlyingV1, V1Size,
  839. V1AAInfo, UnderlyingV2,
  840. V2Size, V2AAInfo);
  841. if (PreciseBaseAlias == NoAlias) {
  842. // See if the computed offset from the common pointer tells us about the
  843. // relation of the resulting pointer.
  844. int64_t GEP2BaseOffset;
  845. bool GEP2MaxLookupReached;
  846. SmallVector<VariableGEPIndex, 4> GEP2VariableIndices;
  847. const Value *GEP2BasePtr =
  848. DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices,
  849. GEP2MaxLookupReached, *DL, AC2, DT);
  850. const Value *GEP1BasePtr =
  851. DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices,
  852. GEP1MaxLookupReached, *DL, AC1, DT);
  853. // DecomposeGEPExpression and GetUnderlyingObject should return the
  854. // same result except when DecomposeGEPExpression has no DataLayout.
  855. if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) {
  856. assert(!DL &&
  857. "DecomposeGEPExpression and GetUnderlyingObject disagree!");
  858. return MayAlias;
  859. }
  860. // If the max search depth is reached the result is undefined
  861. if (GEP2MaxLookupReached || GEP1MaxLookupReached)
  862. return MayAlias;
  863. // Same offsets.
  864. if (GEP1BaseOffset == GEP2BaseOffset &&
  865. GEP1VariableIndices == GEP2VariableIndices)
  866. return NoAlias;
  867. GEP1VariableIndices.clear();
  868. }
  869. }
  870. // If we get a No or May, then return it immediately, no amount of analysis
  871. // will improve this situation.
  872. if (BaseAlias != MustAlias) return BaseAlias;
  873. // Otherwise, we have a MustAlias. Since the base pointers alias each other
  874. // exactly, see if the computed offset from the common pointer tells us
  875. // about the relation of the resulting pointer.
  876. const Value *GEP1BasePtr =
  877. DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices,
  878. GEP1MaxLookupReached, *DL, AC1, DT);
  879. int64_t GEP2BaseOffset;
  880. bool GEP2MaxLookupReached;
  881. SmallVector<VariableGEPIndex, 4> GEP2VariableIndices;
  882. const Value *GEP2BasePtr =
  883. DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices,
  884. GEP2MaxLookupReached, *DL, AC2, DT);
  885. // DecomposeGEPExpression and GetUnderlyingObject should return the
  886. // same result except when DecomposeGEPExpression has no DataLayout.
  887. if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) {
  888. assert(!DL &&
  889. "DecomposeGEPExpression and GetUnderlyingObject disagree!");
  890. return MayAlias;
  891. }
  892. // If we know the two GEPs are based off of the exact same pointer (and not
  893. // just the same underlying object), see if that tells us anything about
  894. // the resulting pointers.
  895. if (DL && GEP1->getPointerOperand() == GEP2->getPointerOperand()) {
  896. AliasResult R = aliasSameBasePointerGEPs(GEP1, V1Size, GEP2, V2Size, *DL);
  897. // If we couldn't find anything interesting, don't abandon just yet.
  898. if (R != MayAlias)
  899. return R;
  900. }
  901. // If the max search depth is reached the result is undefined
  902. if (GEP2MaxLookupReached || GEP1MaxLookupReached)
  903. return MayAlias;
  904. // Subtract the GEP2 pointer from the GEP1 pointer to find out their
  905. // symbolic difference.
  906. GEP1BaseOffset -= GEP2BaseOffset;
  907. GetIndexDifference(GEP1VariableIndices, GEP2VariableIndices);
  908. } else {
  909. // Check to see if these two pointers are related by the getelementptr
  910. // instruction. If one pointer is a GEP with a non-zero index of the other
  911. // pointer, we know they cannot alias.
  912. // If both accesses are unknown size, we can't do anything useful here.
  913. if (V1Size == MemoryLocation::UnknownSize &&
  914. V2Size == MemoryLocation::UnknownSize)
  915. return MayAlias;
  916. AliasResult R = aliasCheck(UnderlyingV1, MemoryLocation::UnknownSize,
  917. AAMDNodes(), V2, V2Size, V2AAInfo);
  918. if (R != MustAlias)
  919. // If V2 may alias GEP base pointer, conservatively returns MayAlias.
  920. // If V2 is known not to alias GEP base pointer, then the two values
  921. // cannot alias per GEP semantics: "A pointer value formed from a
  922. // getelementptr instruction is associated with the addresses associated
  923. // with the first operand of the getelementptr".
  924. return R;
  925. const Value *GEP1BasePtr =
  926. DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices,
  927. GEP1MaxLookupReached, *DL, AC1, DT);
  928. // DecomposeGEPExpression and GetUnderlyingObject should return the
  929. // same result except when DecomposeGEPExpression has no DataLayout.
  930. if (GEP1BasePtr != UnderlyingV1) {
  931. assert(!DL &&
  932. "DecomposeGEPExpression and GetUnderlyingObject disagree!");
  933. return MayAlias;
  934. }
  935. // If the max search depth is reached the result is undefined
  936. if (GEP1MaxLookupReached)
  937. return MayAlias;
  938. }
  939. // In the two GEP Case, if there is no difference in the offsets of the
  940. // computed pointers, the resultant pointers are a must alias. This
  941. // hapens when we have two lexically identical GEP's (for example).
  942. //
  943. // In the other case, if we have getelementptr <ptr>, 0, 0, 0, 0, ... and V2
  944. // must aliases the GEP, the end result is a must alias also.
  945. if (GEP1BaseOffset == 0 && GEP1VariableIndices.empty())
  946. return MustAlias;
  947. // If there is a constant difference between the pointers, but the difference
  948. // is less than the size of the associated memory object, then we know
  949. // that the objects are partially overlapping. If the difference is
  950. // greater, we know they do not overlap.
  951. if (GEP1BaseOffset != 0 && GEP1VariableIndices.empty()) {
  952. if (GEP1BaseOffset >= 0) {
  953. if (V2Size != MemoryLocation::UnknownSize) {
  954. if ((uint64_t)GEP1BaseOffset < V2Size)
  955. return PartialAlias;
  956. return NoAlias;
  957. }
  958. } else {
  959. // We have the situation where:
  960. // + +
  961. // | BaseOffset |
  962. // ---------------->|
  963. // |-->V1Size |-------> V2Size
  964. // GEP1 V2
  965. // We need to know that V2Size is not unknown, otherwise we might have
  966. // stripped a gep with negative index ('gep <ptr>, -1, ...).
  967. if (V1Size != MemoryLocation::UnknownSize &&
  968. V2Size != MemoryLocation::UnknownSize) {
  969. if (-(uint64_t)GEP1BaseOffset < V1Size)
  970. return PartialAlias;
  971. return NoAlias;
  972. }
  973. }
  974. }
  975. // Try to distinguish something like &A[i][1] against &A[42][0].
  976. // Grab the least significant bit set in any of the scales.
  977. if (!GEP1VariableIndices.empty()) {
  978. uint64_t Modulo = 0;
  979. for (unsigned i = 0, e = GEP1VariableIndices.size(); i != e; ++i)
  980. Modulo |= (uint64_t) GEP1VariableIndices[i].Scale;
  981. Modulo = Modulo ^ (Modulo & (Modulo - 1));
  982. // We can compute the difference between the two addresses
  983. // mod Modulo. Check whether that difference guarantees that the
  984. // two locations do not alias.
  985. uint64_t ModOffset = (uint64_t)GEP1BaseOffset & (Modulo - 1);
  986. if (V1Size != MemoryLocation::UnknownSize &&
  987. V2Size != MemoryLocation::UnknownSize && ModOffset >= V2Size &&
  988. V1Size <= Modulo - ModOffset)
  989. return NoAlias;
  990. }
  991. // Statically, we can see that the base objects are the same, but the
  992. // pointers have dynamic offsets which we can't resolve. And none of our
  993. // little tricks above worked.
  994. //
  995. // TODO: Returning PartialAlias instead of MayAlias is a mild hack; the
  996. // practical effect of this is protecting TBAA in the case of dynamic
  997. // indices into arrays of unions or malloc'd memory.
  998. return PartialAlias;
  999. }
  1000. static AliasResult MergeAliasResults(AliasResult A, AliasResult B) {
  1001. // If the results agree, take it.
  1002. if (A == B)
  1003. return A;
  1004. // A mix of PartialAlias and MustAlias is PartialAlias.
  1005. if ((A == PartialAlias && B == MustAlias) ||
  1006. (B == PartialAlias && A == MustAlias))
  1007. return PartialAlias;
  1008. // Otherwise, we don't know anything.
  1009. return MayAlias;
  1010. }
  1011. /// aliasSelect - Provide a bunch of ad-hoc rules to disambiguate a Select
  1012. /// instruction against another.
  1013. AliasResult BasicAliasAnalysis::aliasSelect(const SelectInst *SI,
  1014. uint64_t SISize,
  1015. const AAMDNodes &SIAAInfo,
  1016. const Value *V2, uint64_t V2Size,
  1017. const AAMDNodes &V2AAInfo) {
  1018. // If the values are Selects with the same condition, we can do a more precise
  1019. // check: just check for aliases between the values on corresponding arms.
  1020. if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
  1021. if (SI->getCondition() == SI2->getCondition()) {
  1022. AliasResult Alias =
  1023. aliasCheck(SI->getTrueValue(), SISize, SIAAInfo,
  1024. SI2->getTrueValue(), V2Size, V2AAInfo);
  1025. if (Alias == MayAlias)
  1026. return MayAlias;
  1027. AliasResult ThisAlias =
  1028. aliasCheck(SI->getFalseValue(), SISize, SIAAInfo,
  1029. SI2->getFalseValue(), V2Size, V2AAInfo);
  1030. return MergeAliasResults(ThisAlias, Alias);
  1031. }
  1032. // If both arms of the Select node NoAlias or MustAlias V2, then returns
  1033. // NoAlias / MustAlias. Otherwise, returns MayAlias.
  1034. AliasResult Alias =
  1035. aliasCheck(V2, V2Size, V2AAInfo, SI->getTrueValue(), SISize, SIAAInfo);
  1036. if (Alias == MayAlias)
  1037. return MayAlias;
  1038. AliasResult ThisAlias =
  1039. aliasCheck(V2, V2Size, V2AAInfo, SI->getFalseValue(), SISize, SIAAInfo);
  1040. return MergeAliasResults(ThisAlias, Alias);
  1041. }
  1042. // aliasPHI - Provide a bunch of ad-hoc rules to disambiguate a PHI instruction
  1043. // against another.
  1044. AliasResult BasicAliasAnalysis::aliasPHI(const PHINode *PN, uint64_t PNSize,
  1045. const AAMDNodes &PNAAInfo,
  1046. const Value *V2, uint64_t V2Size,
  1047. const AAMDNodes &V2AAInfo) {
  1048. // Track phi nodes we have visited. We use this information when we determine
  1049. // value equivalence.
  1050. VisitedPhiBBs.insert(PN->getParent());
  1051. // If the values are PHIs in the same block, we can do a more precise
  1052. // as well as efficient check: just check for aliases between the values
  1053. // on corresponding edges.
  1054. if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
  1055. if (PN2->getParent() == PN->getParent()) {
  1056. LocPair Locs(MemoryLocation(PN, PNSize, PNAAInfo),
  1057. MemoryLocation(V2, V2Size, V2AAInfo));
  1058. if (PN > V2)
  1059. std::swap(Locs.first, Locs.second);
  1060. // Analyse the PHIs' inputs under the assumption that the PHIs are
  1061. // NoAlias.
  1062. // If the PHIs are May/MustAlias there must be (recursively) an input
  1063. // operand from outside the PHIs' cycle that is MayAlias/MustAlias or
  1064. // there must be an operation on the PHIs within the PHIs' value cycle
  1065. // that causes a MayAlias.
  1066. // Pretend the phis do not alias.
  1067. AliasResult Alias = NoAlias;
  1068. assert(AliasCache.count(Locs) &&
  1069. "There must exist an entry for the phi node");
  1070. AliasResult OrigAliasResult = AliasCache[Locs];
  1071. AliasCache[Locs] = NoAlias;
  1072. for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
  1073. AliasResult ThisAlias =
  1074. aliasCheck(PN->getIncomingValue(i), PNSize, PNAAInfo,
  1075. PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)),
  1076. V2Size, V2AAInfo);
  1077. Alias = MergeAliasResults(ThisAlias, Alias);
  1078. if (Alias == MayAlias)
  1079. break;
  1080. }
  1081. // Reset if speculation failed.
  1082. if (Alias != NoAlias)
  1083. AliasCache[Locs] = OrigAliasResult;
  1084. return Alias;
  1085. }
  1086. SmallPtrSet<Value*, 4> UniqueSrc;
  1087. SmallVector<Value*, 4> V1Srcs;
  1088. for (Value *PV1 : PN->incoming_values()) {
  1089. if (isa<PHINode>(PV1))
  1090. // If any of the source itself is a PHI, return MayAlias conservatively
  1091. // to avoid compile time explosion. The worst possible case is if both
  1092. // sides are PHI nodes. In which case, this is O(m x n) time where 'm'
  1093. // and 'n' are the number of PHI sources.
  1094. return MayAlias;
  1095. if (UniqueSrc.insert(PV1).second)
  1096. V1Srcs.push_back(PV1);
  1097. }
  1098. AliasResult Alias = aliasCheck(V2, V2Size, V2AAInfo,
  1099. V1Srcs[0], PNSize, PNAAInfo);
  1100. // Early exit if the check of the first PHI source against V2 is MayAlias.
  1101. // Other results are not possible.
  1102. if (Alias == MayAlias)
  1103. return MayAlias;
  1104. // If all sources of the PHI node NoAlias or MustAlias V2, then returns
  1105. // NoAlias / MustAlias. Otherwise, returns MayAlias.
  1106. for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
  1107. Value *V = V1Srcs[i];
  1108. AliasResult ThisAlias = aliasCheck(V2, V2Size, V2AAInfo,
  1109. V, PNSize, PNAAInfo);
  1110. Alias = MergeAliasResults(ThisAlias, Alias);
  1111. if (Alias == MayAlias)
  1112. break;
  1113. }
  1114. return Alias;
  1115. }
  1116. // aliasCheck - Provide a bunch of ad-hoc rules to disambiguate in common cases,
  1117. // such as array references.
  1118. //
  1119. AliasResult BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size,
  1120. AAMDNodes V1AAInfo, const Value *V2,
  1121. uint64_t V2Size,
  1122. AAMDNodes V2AAInfo) {
  1123. // If either of the memory references is empty, it doesn't matter what the
  1124. // pointer values are.
  1125. if (V1Size == 0 || V2Size == 0)
  1126. return NoAlias;
  1127. // Strip off any casts if they exist.
  1128. V1 = V1->stripPointerCasts();
  1129. V2 = V2->stripPointerCasts();
  1130. // If V1 or V2 is undef, the result is NoAlias because we can always pick a
  1131. // value for undef that aliases nothing in the program.
  1132. if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
  1133. return NoAlias;
  1134. // Are we checking for alias of the same value?
  1135. // Because we look 'through' phi nodes we could look at "Value" pointers from
  1136. // different iterations. We must therefore make sure that this is not the
  1137. // case. The function isValueEqualInPotentialCycles ensures that this cannot
  1138. // happen by looking at the visited phi nodes and making sure they cannot
  1139. // reach the value.
  1140. if (isValueEqualInPotentialCycles(V1, V2))
  1141. return MustAlias;
  1142. if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy())
  1143. return NoAlias; // Scalars cannot alias each other
  1144. // Figure out what objects these things are pointing to if we can.
  1145. const Value *O1 = GetUnderlyingObject(V1, *DL, MaxLookupSearchDepth);
  1146. const Value *O2 = GetUnderlyingObject(V2, *DL, MaxLookupSearchDepth);
  1147. // Null values in the default address space don't point to any object, so they
  1148. // don't alias any other pointer.
  1149. if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
  1150. if (CPN->getType()->getAddressSpace() == 0)
  1151. return NoAlias;
  1152. if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
  1153. if (CPN->getType()->getAddressSpace() == 0)
  1154. return NoAlias;
  1155. if (O1 != O2) {
  1156. // If V1/V2 point to two different objects we know that we have no alias.
  1157. if (isIdentifiedObject(O1) && isIdentifiedObject(O2))
  1158. return NoAlias;
  1159. // Constant pointers can't alias with non-const isIdentifiedObject objects.
  1160. if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) ||
  1161. (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1)))
  1162. return NoAlias;
  1163. // Function arguments can't alias with things that are known to be
  1164. // unambigously identified at the function level.
  1165. if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) ||
  1166. (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1)))
  1167. return NoAlias;
  1168. // Most objects can't alias null.
  1169. if ((isa<ConstantPointerNull>(O2) && isKnownNonNull(O1)) ||
  1170. (isa<ConstantPointerNull>(O1) && isKnownNonNull(O2)))
  1171. return NoAlias;
  1172. // If one pointer is the result of a call/invoke or load and the other is a
  1173. // non-escaping local object within the same function, then we know the
  1174. // object couldn't escape to a point where the call could return it.
  1175. //
  1176. // Note that if the pointers are in different functions, there are a
  1177. // variety of complications. A call with a nocapture argument may still
  1178. // temporary store the nocapture argument's value in a temporary memory
  1179. // location if that memory location doesn't escape. Or it may pass a
  1180. // nocapture value to other functions as long as they don't capture it.
  1181. if (isEscapeSource(O1) && isNonEscapingLocalObject(O2))
  1182. return NoAlias;
  1183. if (isEscapeSource(O2) && isNonEscapingLocalObject(O1))
  1184. return NoAlias;
  1185. }
  1186. // If the size of one access is larger than the entire object on the other
  1187. // side, then we know such behavior is undefined and can assume no alias.
  1188. if (DL)
  1189. if ((V1Size != MemoryLocation::UnknownSize &&
  1190. isObjectSmallerThan(O2, V1Size, *DL, *TLI)) ||
  1191. (V2Size != MemoryLocation::UnknownSize &&
  1192. isObjectSmallerThan(O1, V2Size, *DL, *TLI)))
  1193. return NoAlias;
  1194. // Check the cache before climbing up use-def chains. This also terminates
  1195. // otherwise infinitely recursive queries.
  1196. LocPair Locs(MemoryLocation(V1, V1Size, V1AAInfo),
  1197. MemoryLocation(V2, V2Size, V2AAInfo));
  1198. if (V1 > V2)
  1199. std::swap(Locs.first, Locs.second);
  1200. std::pair<AliasCacheTy::iterator, bool> Pair =
  1201. AliasCache.insert(std::make_pair(Locs, MayAlias));
  1202. if (!Pair.second)
  1203. return Pair.first->second;
  1204. // FIXME: This isn't aggressively handling alias(GEP, PHI) for example: if the
  1205. // GEP can't simplify, we don't even look at the PHI cases.
  1206. if (!isa<GEPOperator>(V1) && isa<GEPOperator>(V2)) {
  1207. std::swap(V1, V2);
  1208. std::swap(V1Size, V2Size);
  1209. std::swap(O1, O2);
  1210. std::swap(V1AAInfo, V2AAInfo);
  1211. }
  1212. if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
  1213. AliasResult Result = aliasGEP(GV1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O1, O2);
  1214. if (Result != MayAlias) return AliasCache[Locs] = Result;
  1215. }
  1216. if (isa<PHINode>(V2) && !isa<PHINode>(V1)) {
  1217. std::swap(V1, V2);
  1218. std::swap(V1Size, V2Size);
  1219. std::swap(V1AAInfo, V2AAInfo);
  1220. }
  1221. if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
  1222. AliasResult Result = aliasPHI(PN, V1Size, V1AAInfo,
  1223. V2, V2Size, V2AAInfo);
  1224. if (Result != MayAlias) return AliasCache[Locs] = Result;
  1225. }
  1226. if (isa<SelectInst>(V2) && !isa<SelectInst>(V1)) {
  1227. std::swap(V1, V2);
  1228. std::swap(V1Size, V2Size);
  1229. std::swap(V1AAInfo, V2AAInfo);
  1230. }
  1231. if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
  1232. AliasResult Result = aliasSelect(S1, V1Size, V1AAInfo,
  1233. V2, V2Size, V2AAInfo);
  1234. if (Result != MayAlias) return AliasCache[Locs] = Result;
  1235. }
  1236. // If both pointers are pointing into the same object and one of them
  1237. // accesses is accessing the entire object, then the accesses must
  1238. // overlap in some way.
  1239. if (DL && O1 == O2)
  1240. if ((V1Size != MemoryLocation::UnknownSize &&
  1241. isObjectSize(O1, V1Size, *DL, *TLI)) ||
  1242. (V2Size != MemoryLocation::UnknownSize &&
  1243. isObjectSize(O2, V2Size, *DL, *TLI)))
  1244. return AliasCache[Locs] = PartialAlias;
  1245. AliasResult Result =
  1246. AliasAnalysis::alias(MemoryLocation(V1, V1Size, V1AAInfo),
  1247. MemoryLocation(V2, V2Size, V2AAInfo));
  1248. return AliasCache[Locs] = Result;
  1249. }
  1250. bool BasicAliasAnalysis::isValueEqualInPotentialCycles(const Value *V,
  1251. const Value *V2) {
  1252. if (V != V2)
  1253. return false;
  1254. const Instruction *Inst = dyn_cast<Instruction>(V);
  1255. if (!Inst)
  1256. return true;
  1257. if (VisitedPhiBBs.empty())
  1258. return true;
  1259. if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck)
  1260. return false;
  1261. // Use dominance or loop info if available.
  1262. DominatorTreeWrapperPass *DTWP =
  1263. getAnalysisIfAvailable<DominatorTreeWrapperPass>();
  1264. DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr;
  1265. auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
  1266. LoopInfo *LI = LIWP ? &LIWP->getLoopInfo() : nullptr;
  1267. // Make sure that the visited phis cannot reach the Value. This ensures that
  1268. // the Values cannot come from different iterations of a potential cycle the
  1269. // phi nodes could be involved in.
  1270. for (auto *P : VisitedPhiBBs)
  1271. if (isPotentiallyReachable(P->begin(), Inst, DT, LI))
  1272. return false;
  1273. return true;
  1274. }
  1275. /// GetIndexDifference - Dest and Src are the variable indices from two
  1276. /// decomposed GetElementPtr instructions GEP1 and GEP2 which have common base
  1277. /// pointers. Subtract the GEP2 indices from GEP1 to find the symbolic
  1278. /// difference between the two pointers.
  1279. void BasicAliasAnalysis::GetIndexDifference(
  1280. SmallVectorImpl<VariableGEPIndex> &Dest,
  1281. const SmallVectorImpl<VariableGEPIndex> &Src) {
  1282. if (Src.empty())
  1283. return;
  1284. for (unsigned i = 0, e = Src.size(); i != e; ++i) {
  1285. const Value *V = Src[i].V;
  1286. ExtensionKind Extension = Src[i].Extension;
  1287. int64_t Scale = Src[i].Scale;
  1288. // Find V in Dest. This is N^2, but pointer indices almost never have more
  1289. // than a few variable indexes.
  1290. for (unsigned j = 0, e = Dest.size(); j != e; ++j) {
  1291. if (!isValueEqualInPotentialCycles(Dest[j].V, V) ||
  1292. Dest[j].Extension != Extension)
  1293. continue;
  1294. // If we found it, subtract off Scale V's from the entry in Dest. If it
  1295. // goes to zero, remove the entry.
  1296. if (Dest[j].Scale != Scale)
  1297. Dest[j].Scale -= Scale;
  1298. else
  1299. Dest.erase(Dest.begin() + j);
  1300. Scale = 0;
  1301. break;
  1302. }
  1303. // If we didn't consume this entry, add it to the end of the Dest list.
  1304. if (Scale) {
  1305. VariableGEPIndex Entry = { V, Extension, -Scale };
  1306. Dest.push_back(Entry);
  1307. }
  1308. }
  1309. }