Lint.cpp 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911
  1. //===-- Lint.cpp - Check for common errors in LLVM IR ---------------------===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // This pass statically checks for common and easily-identified constructs
  11. // which produce undefined or likely unintended behavior in LLVM IR.
  12. //
  13. // It is not a guarantee of correctness, in two ways. First, it isn't
  14. // comprehensive. There are checks which could be done statically which are
  15. // not yet implemented. Some of these are indicated by TODO comments, but
  16. // those aren't comprehensive either. Second, many conditions cannot be
  17. // checked statically. This pass does no dynamic instrumentation, so it
  18. // can't check for all possible problems.
  19. //
  20. // Another limitation is that it assumes all code will be executed. A store
  21. // through a null pointer in a basic block which is never reached is harmless,
  22. // but this pass will warn about it anyway. This is the main reason why most
  23. // of these checks live here instead of in the Verifier pass.
  24. //
  25. // Optimization passes may make conditions that this pass checks for more or
  26. // less obvious. If an optimization pass appears to be introducing a warning,
  27. // it may be that the optimization pass is merely exposing an existing
  28. // condition in the code.
  29. //
  30. // This code may be run before instcombine. In many cases, instcombine checks
  31. // for the same kinds of things and turns instructions with undefined behavior
  32. // into unreachable (or equivalent). Because of this, this pass makes some
  33. // effort to look through bitcasts and so on.
  34. //
  35. //===----------------------------------------------------------------------===//
  36. #include "llvm/Analysis/Lint.h"
  37. #include "llvm/ADT/STLExtras.h"
  38. #include "llvm/ADT/SmallSet.h"
  39. #include "llvm/Analysis/AliasAnalysis.h"
  40. #include "llvm/Analysis/AssumptionCache.h"
  41. #include "llvm/Analysis/ConstantFolding.h"
  42. #include "llvm/Analysis/InstructionSimplify.h"
  43. #include "llvm/Analysis/Loads.h"
  44. #include "llvm/Analysis/Passes.h"
  45. #include "llvm/Analysis/TargetLibraryInfo.h"
  46. #include "llvm/Analysis/ValueTracking.h"
  47. #include "llvm/IR/CallSite.h"
  48. #include "llvm/IR/DataLayout.h"
  49. #include "llvm/IR/Dominators.h"
  50. #include "llvm/IR/Function.h"
  51. #include "llvm/IR/InstVisitor.h"
  52. #include "llvm/IR/IntrinsicInst.h"
  53. #include "llvm/IR/LegacyPassManager.h"
  54. #include "llvm/Pass.h"
  55. #include "llvm/Support/Debug.h"
  56. #include "llvm/Support/raw_ostream.h"
  57. using namespace llvm;
  58. namespace {
  59. namespace MemRef {
  60. static const unsigned Read = 1;
  61. static const unsigned Write = 2;
  62. static const unsigned Callee = 4;
  63. static const unsigned Branchee = 8;
  64. }
  65. class Lint : public FunctionPass, public InstVisitor<Lint> {
  66. friend class InstVisitor<Lint>;
  67. void visitFunction(Function &F);
  68. void visitCallSite(CallSite CS);
  69. void visitMemoryReference(Instruction &I, Value *Ptr,
  70. uint64_t Size, unsigned Align,
  71. Type *Ty, unsigned Flags);
  72. void visitEHBeginCatch(IntrinsicInst *II);
  73. void visitEHEndCatch(IntrinsicInst *II);
  74. void visitCallInst(CallInst &I);
  75. void visitInvokeInst(InvokeInst &I);
  76. void visitReturnInst(ReturnInst &I);
  77. void visitLoadInst(LoadInst &I);
  78. void visitStoreInst(StoreInst &I);
  79. void visitXor(BinaryOperator &I);
  80. void visitSub(BinaryOperator &I);
  81. void visitLShr(BinaryOperator &I);
  82. void visitAShr(BinaryOperator &I);
  83. void visitShl(BinaryOperator &I);
  84. void visitSDiv(BinaryOperator &I);
  85. void visitUDiv(BinaryOperator &I);
  86. void visitSRem(BinaryOperator &I);
  87. void visitURem(BinaryOperator &I);
  88. void visitAllocaInst(AllocaInst &I);
  89. void visitVAArgInst(VAArgInst &I);
  90. void visitIndirectBrInst(IndirectBrInst &I);
  91. void visitExtractElementInst(ExtractElementInst &I);
  92. void visitInsertElementInst(InsertElementInst &I);
  93. void visitUnreachableInst(UnreachableInst &I);
  94. Value *findValue(Value *V, const DataLayout &DL, bool OffsetOk) const;
  95. Value *findValueImpl(Value *V, const DataLayout &DL, bool OffsetOk,
  96. SmallPtrSetImpl<Value *> &Visited) const;
  97. public:
  98. Module *Mod;
  99. AliasAnalysis *AA;
  100. AssumptionCache *AC;
  101. DominatorTree *DT;
  102. TargetLibraryInfo *TLI;
  103. std::string Messages;
  104. raw_string_ostream MessagesStr;
  105. static char ID; // Pass identification, replacement for typeid
  106. Lint() : FunctionPass(ID), MessagesStr(Messages) {
  107. initializeLintPass(*PassRegistry::getPassRegistry());
  108. }
  109. bool runOnFunction(Function &F) override;
  110. void getAnalysisUsage(AnalysisUsage &AU) const override {
  111. AU.setPreservesAll();
  112. AU.addRequired<AliasAnalysis>();
  113. AU.addRequired<AssumptionCacheTracker>();
  114. AU.addRequired<TargetLibraryInfoWrapperPass>();
  115. AU.addRequired<DominatorTreeWrapperPass>();
  116. }
  117. void print(raw_ostream &O, const Module *M) const override {}
  118. void WriteValues(ArrayRef<const Value *> Vs) {
  119. for (const Value *V : Vs) {
  120. if (!V)
  121. continue;
  122. if (isa<Instruction>(V)) {
  123. MessagesStr << *V << '\n';
  124. } else {
  125. V->printAsOperand(MessagesStr, true, Mod);
  126. MessagesStr << '\n';
  127. }
  128. }
  129. }
  130. /// \brief A check failed, so printout out the condition and the message.
  131. ///
  132. /// This provides a nice place to put a breakpoint if you want to see why
  133. /// something is not correct.
  134. void CheckFailed(const Twine &Message) { MessagesStr << Message << '\n'; }
  135. /// \brief A check failed (with values to print).
  136. ///
  137. /// This calls the Message-only version so that the above is easier to set
  138. /// a breakpoint on.
  139. template <typename T1, typename... Ts>
  140. void CheckFailed(const Twine &Message, const T1 &V1, const Ts &...Vs) {
  141. CheckFailed(Message);
  142. WriteValues({V1, Vs...});
  143. }
  144. };
  145. }
  146. char Lint::ID = 0;
  147. INITIALIZE_PASS_BEGIN(Lint, "lint", "Statically lint-checks LLVM IR",
  148. false, true)
  149. INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
  150. INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
  151. INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
  152. INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
  153. INITIALIZE_PASS_END(Lint, "lint", "Statically lint-checks LLVM IR",
  154. false, true)
  155. // Assert - We know that cond should be true, if not print an error message.
  156. #define Assert(C, ...) \
  157. do { if (!(C)) { CheckFailed(__VA_ARGS__); return; } } while (0)
  158. // Lint::run - This is the main Analysis entry point for a
  159. // function.
  160. //
  161. bool Lint::runOnFunction(Function &F) {
  162. Mod = F.getParent();
  163. AA = &getAnalysis<AliasAnalysis>();
  164. AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
  165. DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
  166. TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
  167. visit(F);
  168. dbgs() << MessagesStr.str();
  169. Messages.clear();
  170. return false;
  171. }
  172. void Lint::visitFunction(Function &F) {
  173. // This isn't undefined behavior, it's just a little unusual, and it's a
  174. // fairly common mistake to neglect to name a function.
  175. Assert(F.hasName() || F.hasLocalLinkage(),
  176. "Unusual: Unnamed function with non-local linkage", &F);
  177. // TODO: Check for irreducible control flow.
  178. }
  179. void Lint::visitCallSite(CallSite CS) {
  180. Instruction &I = *CS.getInstruction();
  181. Value *Callee = CS.getCalledValue();
  182. const DataLayout &DL = CS->getModule()->getDataLayout();
  183. visitMemoryReference(I, Callee, MemoryLocation::UnknownSize, 0, nullptr,
  184. MemRef::Callee);
  185. if (Function *F = dyn_cast<Function>(findValue(Callee, DL,
  186. /*OffsetOk=*/false))) {
  187. Assert(CS.getCallingConv() == F->getCallingConv(),
  188. "Undefined behavior: Caller and callee calling convention differ",
  189. &I);
  190. FunctionType *FT = F->getFunctionType();
  191. unsigned NumActualArgs = CS.arg_size();
  192. Assert(FT->isVarArg() ? FT->getNumParams() <= NumActualArgs
  193. : FT->getNumParams() == NumActualArgs,
  194. "Undefined behavior: Call argument count mismatches callee "
  195. "argument count",
  196. &I);
  197. Assert(FT->getReturnType() == I.getType(),
  198. "Undefined behavior: Call return type mismatches "
  199. "callee return type",
  200. &I);
  201. // Check argument types (in case the callee was casted) and attributes.
  202. // TODO: Verify that caller and callee attributes are compatible.
  203. Function::arg_iterator PI = F->arg_begin(), PE = F->arg_end();
  204. CallSite::arg_iterator AI = CS.arg_begin(), AE = CS.arg_end();
  205. for (; AI != AE; ++AI) {
  206. Value *Actual = *AI;
  207. if (PI != PE) {
  208. Argument *Formal = PI++;
  209. Assert(Formal->getType() == Actual->getType(),
  210. "Undefined behavior: Call argument type mismatches "
  211. "callee parameter type",
  212. &I);
  213. // Check that noalias arguments don't alias other arguments. This is
  214. // not fully precise because we don't know the sizes of the dereferenced
  215. // memory regions.
  216. if (Formal->hasNoAliasAttr() && Actual->getType()->isPointerTy())
  217. for (CallSite::arg_iterator BI = CS.arg_begin(); BI != AE; ++BI)
  218. if (AI != BI && (*BI)->getType()->isPointerTy()) {
  219. AliasResult Result = AA->alias(*AI, *BI);
  220. Assert(Result != MustAlias && Result != PartialAlias,
  221. "Unusual: noalias argument aliases another argument", &I);
  222. }
  223. // Check that an sret argument points to valid memory.
  224. if (Formal->hasStructRetAttr() && Actual->getType()->isPointerTy()) {
  225. Type *Ty =
  226. cast<PointerType>(Formal->getType())->getElementType();
  227. visitMemoryReference(I, Actual, AA->getTypeStoreSize(Ty),
  228. DL.getABITypeAlignment(Ty), Ty,
  229. MemRef::Read | MemRef::Write);
  230. }
  231. }
  232. }
  233. }
  234. if (CS.isCall() && cast<CallInst>(CS.getInstruction())->isTailCall())
  235. for (CallSite::arg_iterator AI = CS.arg_begin(), AE = CS.arg_end();
  236. AI != AE; ++AI) {
  237. Value *Obj = findValue(*AI, DL, /*OffsetOk=*/true);
  238. Assert(!isa<AllocaInst>(Obj),
  239. "Undefined behavior: Call with \"tail\" keyword references "
  240. "alloca",
  241. &I);
  242. }
  243. if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I))
  244. switch (II->getIntrinsicID()) {
  245. default: break;
  246. // TODO: Check more intrinsics
  247. case Intrinsic::memcpy: {
  248. MemCpyInst *MCI = cast<MemCpyInst>(&I);
  249. // TODO: If the size is known, use it.
  250. visitMemoryReference(I, MCI->getDest(), MemoryLocation::UnknownSize,
  251. MCI->getAlignment(), nullptr, MemRef::Write);
  252. visitMemoryReference(I, MCI->getSource(), MemoryLocation::UnknownSize,
  253. MCI->getAlignment(), nullptr, MemRef::Read);
  254. // Check that the memcpy arguments don't overlap. The AliasAnalysis API
  255. // isn't expressive enough for what we really want to do. Known partial
  256. // overlap is not distinguished from the case where nothing is known.
  257. uint64_t Size = 0;
  258. if (const ConstantInt *Len =
  259. dyn_cast<ConstantInt>(findValue(MCI->getLength(), DL,
  260. /*OffsetOk=*/false)))
  261. if (Len->getValue().isIntN(32))
  262. Size = Len->getValue().getZExtValue();
  263. Assert(AA->alias(MCI->getSource(), Size, MCI->getDest(), Size) !=
  264. MustAlias,
  265. "Undefined behavior: memcpy source and destination overlap", &I);
  266. break;
  267. }
  268. case Intrinsic::memmove: {
  269. MemMoveInst *MMI = cast<MemMoveInst>(&I);
  270. // TODO: If the size is known, use it.
  271. visitMemoryReference(I, MMI->getDest(), MemoryLocation::UnknownSize,
  272. MMI->getAlignment(), nullptr, MemRef::Write);
  273. visitMemoryReference(I, MMI->getSource(), MemoryLocation::UnknownSize,
  274. MMI->getAlignment(), nullptr, MemRef::Read);
  275. break;
  276. }
  277. case Intrinsic::memset: {
  278. MemSetInst *MSI = cast<MemSetInst>(&I);
  279. // TODO: If the size is known, use it.
  280. visitMemoryReference(I, MSI->getDest(), MemoryLocation::UnknownSize,
  281. MSI->getAlignment(), nullptr, MemRef::Write);
  282. break;
  283. }
  284. case Intrinsic::vastart:
  285. Assert(I.getParent()->getParent()->isVarArg(),
  286. "Undefined behavior: va_start called in a non-varargs function",
  287. &I);
  288. visitMemoryReference(I, CS.getArgument(0), MemoryLocation::UnknownSize, 0,
  289. nullptr, MemRef::Read | MemRef::Write);
  290. break;
  291. case Intrinsic::vacopy:
  292. visitMemoryReference(I, CS.getArgument(0), MemoryLocation::UnknownSize, 0,
  293. nullptr, MemRef::Write);
  294. visitMemoryReference(I, CS.getArgument(1), MemoryLocation::UnknownSize, 0,
  295. nullptr, MemRef::Read);
  296. break;
  297. case Intrinsic::vaend:
  298. visitMemoryReference(I, CS.getArgument(0), MemoryLocation::UnknownSize, 0,
  299. nullptr, MemRef::Read | MemRef::Write);
  300. break;
  301. case Intrinsic::stackrestore:
  302. // Stackrestore doesn't read or write memory, but it sets the
  303. // stack pointer, which the compiler may read from or write to
  304. // at any time, so check it for both readability and writeability.
  305. visitMemoryReference(I, CS.getArgument(0), MemoryLocation::UnknownSize, 0,
  306. nullptr, MemRef::Read | MemRef::Write);
  307. break;
  308. case Intrinsic::eh_begincatch:
  309. visitEHBeginCatch(II);
  310. break;
  311. case Intrinsic::eh_endcatch:
  312. visitEHEndCatch(II);
  313. break;
  314. }
  315. }
  316. void Lint::visitCallInst(CallInst &I) {
  317. return visitCallSite(&I);
  318. }
  319. void Lint::visitInvokeInst(InvokeInst &I) {
  320. return visitCallSite(&I);
  321. }
  322. void Lint::visitReturnInst(ReturnInst &I) {
  323. Function *F = I.getParent()->getParent();
  324. Assert(!F->doesNotReturn(),
  325. "Unusual: Return statement in function with noreturn attribute", &I);
  326. if (Value *V = I.getReturnValue()) {
  327. Value *Obj =
  328. findValue(V, F->getParent()->getDataLayout(), /*OffsetOk=*/true);
  329. Assert(!isa<AllocaInst>(Obj), "Unusual: Returning alloca value", &I);
  330. }
  331. }
  332. // TODO: Check that the reference is in bounds.
  333. // TODO: Check readnone/readonly function attributes.
  334. void Lint::visitMemoryReference(Instruction &I,
  335. Value *Ptr, uint64_t Size, unsigned Align,
  336. Type *Ty, unsigned Flags) {
  337. // If no memory is being referenced, it doesn't matter if the pointer
  338. // is valid.
  339. if (Size == 0)
  340. return;
  341. Value *UnderlyingObject =
  342. findValue(Ptr, I.getModule()->getDataLayout(), /*OffsetOk=*/true);
  343. Assert(!isa<ConstantPointerNull>(UnderlyingObject),
  344. "Undefined behavior: Null pointer dereference", &I);
  345. Assert(!isa<UndefValue>(UnderlyingObject),
  346. "Undefined behavior: Undef pointer dereference", &I);
  347. Assert(!isa<ConstantInt>(UnderlyingObject) ||
  348. !cast<ConstantInt>(UnderlyingObject)->isAllOnesValue(),
  349. "Unusual: All-ones pointer dereference", &I);
  350. Assert(!isa<ConstantInt>(UnderlyingObject) ||
  351. !cast<ConstantInt>(UnderlyingObject)->isOne(),
  352. "Unusual: Address one pointer dereference", &I);
  353. if (Flags & MemRef::Write) {
  354. if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(UnderlyingObject))
  355. Assert(!GV->isConstant(), "Undefined behavior: Write to read-only memory",
  356. &I);
  357. Assert(!isa<Function>(UnderlyingObject) &&
  358. !isa<BlockAddress>(UnderlyingObject),
  359. "Undefined behavior: Write to text section", &I);
  360. }
  361. if (Flags & MemRef::Read) {
  362. Assert(!isa<Function>(UnderlyingObject), "Unusual: Load from function body",
  363. &I);
  364. Assert(!isa<BlockAddress>(UnderlyingObject),
  365. "Undefined behavior: Load from block address", &I);
  366. }
  367. if (Flags & MemRef::Callee) {
  368. Assert(!isa<BlockAddress>(UnderlyingObject),
  369. "Undefined behavior: Call to block address", &I);
  370. }
  371. if (Flags & MemRef::Branchee) {
  372. Assert(!isa<Constant>(UnderlyingObject) ||
  373. isa<BlockAddress>(UnderlyingObject),
  374. "Undefined behavior: Branch to non-blockaddress", &I);
  375. }
  376. // Check for buffer overflows and misalignment.
  377. // Only handles memory references that read/write something simple like an
  378. // alloca instruction or a global variable.
  379. auto &DL = I.getModule()->getDataLayout();
  380. int64_t Offset = 0;
  381. if (Value *Base = GetPointerBaseWithConstantOffset(Ptr, Offset, DL)) {
  382. // OK, so the access is to a constant offset from Ptr. Check that Ptr is
  383. // something we can handle and if so extract the size of this base object
  384. // along with its alignment.
  385. uint64_t BaseSize = MemoryLocation::UnknownSize;
  386. unsigned BaseAlign = 0;
  387. if (AllocaInst *AI = dyn_cast<AllocaInst>(Base)) {
  388. Type *ATy = AI->getAllocatedType();
  389. if (!AI->isArrayAllocation() && ATy->isSized())
  390. BaseSize = DL.getTypeAllocSize(ATy);
  391. BaseAlign = AI->getAlignment();
  392. if (BaseAlign == 0 && ATy->isSized())
  393. BaseAlign = DL.getABITypeAlignment(ATy);
  394. } else if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Base)) {
  395. // If the global may be defined differently in another compilation unit
  396. // then don't warn about funky memory accesses.
  397. if (GV->hasDefinitiveInitializer()) {
  398. Type *GTy = GV->getType()->getElementType();
  399. if (GTy->isSized())
  400. BaseSize = DL.getTypeAllocSize(GTy);
  401. BaseAlign = GV->getAlignment();
  402. if (BaseAlign == 0 && GTy->isSized())
  403. BaseAlign = DL.getABITypeAlignment(GTy);
  404. }
  405. }
  406. // Accesses from before the start or after the end of the object are not
  407. // defined.
  408. Assert(Size == MemoryLocation::UnknownSize ||
  409. BaseSize == MemoryLocation::UnknownSize ||
  410. (Offset >= 0 && Offset + Size <= BaseSize),
  411. "Undefined behavior: Buffer overflow", &I);
  412. // Accesses that say that the memory is more aligned than it is are not
  413. // defined.
  414. if (Align == 0 && Ty && Ty->isSized())
  415. Align = DL.getABITypeAlignment(Ty);
  416. Assert(!BaseAlign || Align <= MinAlign(BaseAlign, Offset),
  417. "Undefined behavior: Memory reference address is misaligned", &I);
  418. }
  419. }
  420. void Lint::visitLoadInst(LoadInst &I) {
  421. visitMemoryReference(I, I.getPointerOperand(),
  422. AA->getTypeStoreSize(I.getType()), I.getAlignment(),
  423. I.getType(), MemRef::Read);
  424. }
  425. void Lint::visitStoreInst(StoreInst &I) {
  426. visitMemoryReference(I, I.getPointerOperand(),
  427. AA->getTypeStoreSize(I.getOperand(0)->getType()),
  428. I.getAlignment(),
  429. I.getOperand(0)->getType(), MemRef::Write);
  430. }
  431. void Lint::visitXor(BinaryOperator &I) {
  432. Assert(!isa<UndefValue>(I.getOperand(0)) || !isa<UndefValue>(I.getOperand(1)),
  433. "Undefined result: xor(undef, undef)", &I);
  434. }
  435. void Lint::visitSub(BinaryOperator &I) {
  436. Assert(!isa<UndefValue>(I.getOperand(0)) || !isa<UndefValue>(I.getOperand(1)),
  437. "Undefined result: sub(undef, undef)", &I);
  438. }
  439. void Lint::visitLShr(BinaryOperator &I) {
  440. if (ConstantInt *CI = dyn_cast<ConstantInt>(
  441. findValue(I.getOperand(1), I.getModule()->getDataLayout(),
  442. /*OffsetOk=*/false)))
  443. Assert(CI->getValue().ult(cast<IntegerType>(I.getType())->getBitWidth()),
  444. "Undefined result: Shift count out of range", &I);
  445. }
  446. void Lint::visitAShr(BinaryOperator &I) {
  447. if (ConstantInt *CI = dyn_cast<ConstantInt>(findValue(
  448. I.getOperand(1), I.getModule()->getDataLayout(), /*OffsetOk=*/false)))
  449. Assert(CI->getValue().ult(cast<IntegerType>(I.getType())->getBitWidth()),
  450. "Undefined result: Shift count out of range", &I);
  451. }
  452. void Lint::visitShl(BinaryOperator &I) {
  453. if (ConstantInt *CI = dyn_cast<ConstantInt>(findValue(
  454. I.getOperand(1), I.getModule()->getDataLayout(), /*OffsetOk=*/false)))
  455. Assert(CI->getValue().ult(cast<IntegerType>(I.getType())->getBitWidth()),
  456. "Undefined result: Shift count out of range", &I);
  457. }
  458. static bool
  459. allPredsCameFromLandingPad(BasicBlock *BB,
  460. SmallSet<BasicBlock *, 4> &VisitedBlocks) {
  461. VisitedBlocks.insert(BB);
  462. if (BB->isLandingPad())
  463. return true;
  464. // If we find a block with no predecessors, the search failed.
  465. if (pred_empty(BB))
  466. return false;
  467. for (BasicBlock *Pred : predecessors(BB)) {
  468. if (VisitedBlocks.count(Pred))
  469. continue;
  470. if (!allPredsCameFromLandingPad(Pred, VisitedBlocks))
  471. return false;
  472. }
  473. return true;
  474. }
  475. static bool
  476. allSuccessorsReachEndCatch(BasicBlock *BB, BasicBlock::iterator InstBegin,
  477. IntrinsicInst **SecondBeginCatch,
  478. SmallSet<BasicBlock *, 4> &VisitedBlocks) {
  479. VisitedBlocks.insert(BB);
  480. for (BasicBlock::iterator I = InstBegin, E = BB->end(); I != E; ++I) {
  481. IntrinsicInst *IC = dyn_cast<IntrinsicInst>(I);
  482. if (IC && IC->getIntrinsicID() == Intrinsic::eh_endcatch)
  483. return true;
  484. // If we find another begincatch while looking for an endcatch,
  485. // that's also an error.
  486. if (IC && IC->getIntrinsicID() == Intrinsic::eh_begincatch) {
  487. *SecondBeginCatch = IC;
  488. return false;
  489. }
  490. }
  491. // If we reach a block with no successors while searching, the
  492. // search has failed.
  493. if (succ_empty(BB))
  494. return false;
  495. // Otherwise, search all of the successors.
  496. for (BasicBlock *Succ : successors(BB)) {
  497. if (VisitedBlocks.count(Succ))
  498. continue;
  499. if (!allSuccessorsReachEndCatch(Succ, Succ->begin(), SecondBeginCatch,
  500. VisitedBlocks))
  501. return false;
  502. }
  503. return true;
  504. }
  505. void Lint::visitEHBeginCatch(IntrinsicInst *II) {
  506. // The checks in this function make a potentially dubious assumption about
  507. // the CFG, namely that any block involved in a catch is only used for the
  508. // catch. This will very likely be true of IR generated by a front end,
  509. // but it may cease to be true, for example, if the IR is run through a
  510. // pass which combines similar blocks.
  511. //
  512. // In general, if we encounter a block the isn't dominated by the catch
  513. // block while we are searching the catch block's successors for a call
  514. // to end catch intrinsic, then it is possible that it will be legal for
  515. // a path through this block to never reach a call to llvm.eh.endcatch.
  516. // An analogous statement could be made about our search for a landing
  517. // pad among the catch block's predecessors.
  518. //
  519. // What is actually required is that no path is possible at runtime that
  520. // reaches a call to llvm.eh.begincatch without having previously visited
  521. // a landingpad instruction and that no path is possible at runtime that
  522. // calls llvm.eh.begincatch and does not subsequently call llvm.eh.endcatch
  523. // (mentally adjusting for the fact that in reality these calls will be
  524. // removed before code generation).
  525. //
  526. // Because this is a lint check, we take a pessimistic approach and warn if
  527. // the control flow is potentially incorrect.
  528. SmallSet<BasicBlock *, 4> VisitedBlocks;
  529. BasicBlock *CatchBB = II->getParent();
  530. // The begin catch must occur in a landing pad block or all paths
  531. // to it must have come from a landing pad.
  532. Assert(allPredsCameFromLandingPad(CatchBB, VisitedBlocks),
  533. "llvm.eh.begincatch may be reachable without passing a landingpad",
  534. II);
  535. // Reset the visited block list.
  536. VisitedBlocks.clear();
  537. IntrinsicInst *SecondBeginCatch = nullptr;
  538. // This has to be called before it is asserted. Otherwise, the first assert
  539. // below can never be hit.
  540. bool EndCatchFound = allSuccessorsReachEndCatch(
  541. CatchBB, std::next(static_cast<BasicBlock::iterator>(II)),
  542. &SecondBeginCatch, VisitedBlocks);
  543. Assert(
  544. SecondBeginCatch == nullptr,
  545. "llvm.eh.begincatch may be called a second time before llvm.eh.endcatch",
  546. II, SecondBeginCatch);
  547. Assert(EndCatchFound,
  548. "Some paths from llvm.eh.begincatch may not reach llvm.eh.endcatch",
  549. II);
  550. }
  551. static bool allPredCameFromBeginCatch(
  552. BasicBlock *BB, BasicBlock::reverse_iterator InstRbegin,
  553. IntrinsicInst **SecondEndCatch, SmallSet<BasicBlock *, 4> &VisitedBlocks) {
  554. VisitedBlocks.insert(BB);
  555. // Look for a begincatch in this block.
  556. for (BasicBlock::reverse_iterator RI = InstRbegin, RE = BB->rend(); RI != RE;
  557. ++RI) {
  558. IntrinsicInst *IC = dyn_cast<IntrinsicInst>(&*RI);
  559. if (IC && IC->getIntrinsicID() == Intrinsic::eh_begincatch)
  560. return true;
  561. // If we find another end catch before we find a begin catch, that's
  562. // an error.
  563. if (IC && IC->getIntrinsicID() == Intrinsic::eh_endcatch) {
  564. *SecondEndCatch = IC;
  565. return false;
  566. }
  567. // If we encounter a landingpad instruction, the search failed.
  568. if (isa<LandingPadInst>(*RI))
  569. return false;
  570. }
  571. // If while searching we find a block with no predeccesors,
  572. // the search failed.
  573. if (pred_empty(BB))
  574. return false;
  575. // Search any predecessors we haven't seen before.
  576. for (BasicBlock *Pred : predecessors(BB)) {
  577. if (VisitedBlocks.count(Pred))
  578. continue;
  579. if (!allPredCameFromBeginCatch(Pred, Pred->rbegin(), SecondEndCatch,
  580. VisitedBlocks))
  581. return false;
  582. }
  583. return true;
  584. }
  585. void Lint::visitEHEndCatch(IntrinsicInst *II) {
  586. // The check in this function makes a potentially dubious assumption about
  587. // the CFG, namely that any block involved in a catch is only used for the
  588. // catch. This will very likely be true of IR generated by a front end,
  589. // but it may cease to be true, for example, if the IR is run through a
  590. // pass which combines similar blocks.
  591. //
  592. // In general, if we encounter a block the isn't post-dominated by the
  593. // end catch block while we are searching the end catch block's predecessors
  594. // for a call to the begin catch intrinsic, then it is possible that it will
  595. // be legal for a path to reach the end catch block without ever having
  596. // called llvm.eh.begincatch.
  597. //
  598. // What is actually required is that no path is possible at runtime that
  599. // reaches a call to llvm.eh.endcatch without having previously visited
  600. // a call to llvm.eh.begincatch (mentally adjusting for the fact that in
  601. // reality these calls will be removed before code generation).
  602. //
  603. // Because this is a lint check, we take a pessimistic approach and warn if
  604. // the control flow is potentially incorrect.
  605. BasicBlock *EndCatchBB = II->getParent();
  606. // Alls paths to the end catch call must pass through a begin catch call.
  607. // If llvm.eh.begincatch wasn't called in the current block, we'll use this
  608. // lambda to recursively look for it in predecessors.
  609. SmallSet<BasicBlock *, 4> VisitedBlocks;
  610. IntrinsicInst *SecondEndCatch = nullptr;
  611. // This has to be called before it is asserted. Otherwise, the first assert
  612. // below can never be hit.
  613. bool BeginCatchFound =
  614. allPredCameFromBeginCatch(EndCatchBB, BasicBlock::reverse_iterator(II),
  615. &SecondEndCatch, VisitedBlocks);
  616. Assert(
  617. SecondEndCatch == nullptr,
  618. "llvm.eh.endcatch may be called a second time after llvm.eh.begincatch",
  619. II, SecondEndCatch);
  620. Assert(BeginCatchFound,
  621. "llvm.eh.endcatch may be reachable without passing llvm.eh.begincatch",
  622. II);
  623. }
  624. static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT,
  625. AssumptionCache *AC) {
  626. // Assume undef could be zero.
  627. if (isa<UndefValue>(V))
  628. return true;
  629. VectorType *VecTy = dyn_cast<VectorType>(V->getType());
  630. if (!VecTy) {
  631. unsigned BitWidth = V->getType()->getIntegerBitWidth();
  632. APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
  633. computeKnownBits(V, KnownZero, KnownOne, DL, 0, AC,
  634. dyn_cast<Instruction>(V), DT);
  635. return KnownZero.isAllOnesValue();
  636. }
  637. // Per-component check doesn't work with zeroinitializer
  638. Constant *C = dyn_cast<Constant>(V);
  639. if (!C)
  640. return false;
  641. if (C->isZeroValue())
  642. return true;
  643. // For a vector, KnownZero will only be true if all values are zero, so check
  644. // this per component
  645. unsigned BitWidth = VecTy->getElementType()->getIntegerBitWidth();
  646. for (unsigned I = 0, N = VecTy->getNumElements(); I != N; ++I) {
  647. Constant *Elem = C->getAggregateElement(I);
  648. if (isa<UndefValue>(Elem))
  649. return true;
  650. APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
  651. computeKnownBits(Elem, KnownZero, KnownOne, DL);
  652. if (KnownZero.isAllOnesValue())
  653. return true;
  654. }
  655. return false;
  656. }
  657. void Lint::visitSDiv(BinaryOperator &I) {
  658. Assert(!isZero(I.getOperand(1), I.getModule()->getDataLayout(), DT, AC),
  659. "Undefined behavior: Division by zero", &I);
  660. }
  661. void Lint::visitUDiv(BinaryOperator &I) {
  662. Assert(!isZero(I.getOperand(1), I.getModule()->getDataLayout(), DT, AC),
  663. "Undefined behavior: Division by zero", &I);
  664. }
  665. void Lint::visitSRem(BinaryOperator &I) {
  666. Assert(!isZero(I.getOperand(1), I.getModule()->getDataLayout(), DT, AC),
  667. "Undefined behavior: Division by zero", &I);
  668. }
  669. void Lint::visitURem(BinaryOperator &I) {
  670. Assert(!isZero(I.getOperand(1), I.getModule()->getDataLayout(), DT, AC),
  671. "Undefined behavior: Division by zero", &I);
  672. }
  673. void Lint::visitAllocaInst(AllocaInst &I) {
  674. if (isa<ConstantInt>(I.getArraySize()))
  675. // This isn't undefined behavior, it's just an obvious pessimization.
  676. Assert(&I.getParent()->getParent()->getEntryBlock() == I.getParent(),
  677. "Pessimization: Static alloca outside of entry block", &I);
  678. // TODO: Check for an unusual size (MSB set?)
  679. }
  680. void Lint::visitVAArgInst(VAArgInst &I) {
  681. visitMemoryReference(I, I.getOperand(0), MemoryLocation::UnknownSize, 0,
  682. nullptr, MemRef::Read | MemRef::Write);
  683. }
  684. void Lint::visitIndirectBrInst(IndirectBrInst &I) {
  685. visitMemoryReference(I, I.getAddress(), MemoryLocation::UnknownSize, 0,
  686. nullptr, MemRef::Branchee);
  687. Assert(I.getNumDestinations() != 0,
  688. "Undefined behavior: indirectbr with no destinations", &I);
  689. }
  690. void Lint::visitExtractElementInst(ExtractElementInst &I) {
  691. if (ConstantInt *CI = dyn_cast<ConstantInt>(
  692. findValue(I.getIndexOperand(), I.getModule()->getDataLayout(),
  693. /*OffsetOk=*/false)))
  694. Assert(CI->getValue().ult(I.getVectorOperandType()->getNumElements()),
  695. "Undefined result: extractelement index out of range", &I);
  696. }
  697. void Lint::visitInsertElementInst(InsertElementInst &I) {
  698. if (ConstantInt *CI = dyn_cast<ConstantInt>(
  699. findValue(I.getOperand(2), I.getModule()->getDataLayout(),
  700. /*OffsetOk=*/false)))
  701. Assert(CI->getValue().ult(I.getType()->getNumElements()),
  702. "Undefined result: insertelement index out of range", &I);
  703. }
  704. void Lint::visitUnreachableInst(UnreachableInst &I) {
  705. // This isn't undefined behavior, it's merely suspicious.
  706. Assert(&I == I.getParent()->begin() ||
  707. std::prev(BasicBlock::iterator(&I))->mayHaveSideEffects(),
  708. "Unusual: unreachable immediately preceded by instruction without "
  709. "side effects",
  710. &I);
  711. }
  712. /// findValue - Look through bitcasts and simple memory reference patterns
  713. /// to identify an equivalent, but more informative, value. If OffsetOk
  714. /// is true, look through getelementptrs with non-zero offsets too.
  715. ///
  716. /// Most analysis passes don't require this logic, because instcombine
  717. /// will simplify most of these kinds of things away. But it's a goal of
  718. /// this Lint pass to be useful even on non-optimized IR.
  719. Value *Lint::findValue(Value *V, const DataLayout &DL, bool OffsetOk) const {
  720. SmallPtrSet<Value *, 4> Visited;
  721. return findValueImpl(V, DL, OffsetOk, Visited);
  722. }
  723. /// findValueImpl - Implementation helper for findValue.
  724. Value *Lint::findValueImpl(Value *V, const DataLayout &DL, bool OffsetOk,
  725. SmallPtrSetImpl<Value *> &Visited) const {
  726. // Detect self-referential values.
  727. if (!Visited.insert(V).second)
  728. return UndefValue::get(V->getType());
  729. // TODO: Look through sext or zext cast, when the result is known to
  730. // be interpreted as signed or unsigned, respectively.
  731. // TODO: Look through eliminable cast pairs.
  732. // TODO: Look through calls with unique return values.
  733. // TODO: Look through vector insert/extract/shuffle.
  734. V = OffsetOk ? GetUnderlyingObject(V, DL) : V->stripPointerCasts();
  735. if (LoadInst *L = dyn_cast<LoadInst>(V)) {
  736. BasicBlock::iterator BBI = L;
  737. BasicBlock *BB = L->getParent();
  738. SmallPtrSet<BasicBlock *, 4> VisitedBlocks;
  739. for (;;) {
  740. if (!VisitedBlocks.insert(BB).second)
  741. break;
  742. if (Value *U = FindAvailableLoadedValue(L->getPointerOperand(),
  743. BB, BBI, 6, AA))
  744. return findValueImpl(U, DL, OffsetOk, Visited);
  745. if (BBI != BB->begin()) break;
  746. BB = BB->getUniquePredecessor();
  747. if (!BB) break;
  748. BBI = BB->end();
  749. }
  750. } else if (PHINode *PN = dyn_cast<PHINode>(V)) {
  751. if (Value *W = PN->hasConstantValue())
  752. if (W != V)
  753. return findValueImpl(W, DL, OffsetOk, Visited);
  754. } else if (CastInst *CI = dyn_cast<CastInst>(V)) {
  755. if (CI->isNoopCast(DL))
  756. return findValueImpl(CI->getOperand(0), DL, OffsetOk, Visited);
  757. } else if (ExtractValueInst *Ex = dyn_cast<ExtractValueInst>(V)) {
  758. if (Value *W = FindInsertedValue(Ex->getAggregateOperand(),
  759. Ex->getIndices()))
  760. if (W != V)
  761. return findValueImpl(W, DL, OffsetOk, Visited);
  762. } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
  763. // Same as above, but for ConstantExpr instead of Instruction.
  764. if (Instruction::isCast(CE->getOpcode())) {
  765. if (CastInst::isNoopCast(Instruction::CastOps(CE->getOpcode()),
  766. CE->getOperand(0)->getType(), CE->getType(),
  767. DL.getIntPtrType(V->getType())))
  768. return findValueImpl(CE->getOperand(0), DL, OffsetOk, Visited);
  769. } else if (CE->getOpcode() == Instruction::ExtractValue) {
  770. ArrayRef<unsigned> Indices = CE->getIndices();
  771. if (Value *W = FindInsertedValue(CE->getOperand(0), Indices))
  772. if (W != V)
  773. return findValueImpl(W, DL, OffsetOk, Visited);
  774. }
  775. }
  776. // As a last resort, try SimplifyInstruction or constant folding.
  777. if (Instruction *Inst = dyn_cast<Instruction>(V)) {
  778. if (Value *W = SimplifyInstruction(Inst, DL, TLI, DT, AC))
  779. return findValueImpl(W, DL, OffsetOk, Visited);
  780. } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
  781. if (Value *W = ConstantFoldConstantExpression(CE, DL, TLI))
  782. if (W != V)
  783. return findValueImpl(W, DL, OffsetOk, Visited);
  784. }
  785. return V;
  786. }
  787. //===----------------------------------------------------------------------===//
  788. // Implement the public interfaces to this file...
  789. //===----------------------------------------------------------------------===//
  790. FunctionPass *llvm::createLintPass() {
  791. return new Lint();
  792. }
  793. /// lintFunction - Check a function for errors, printing messages on stderr.
  794. ///
  795. void llvm::lintFunction(const Function &f) {
  796. Function &F = const_cast<Function&>(f);
  797. assert(!F.isDeclaration() && "Cannot lint external functions");
  798. legacy::FunctionPassManager FPM(F.getParent());
  799. Lint *V = new Lint();
  800. FPM.add(V);
  801. FPM.run(F);
  802. }
  803. /// lintModule - Check a module for errors, printing messages on stderr.
  804. ///
  805. void llvm::lintModule(const Module &M) {
  806. legacy::PassManager PM;
  807. Lint *V = new Lint();
  808. PM.add(V);
  809. PM.run(const_cast<Module&>(M));
  810. }