CodeGenFunction.cpp 67 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821
  1. //===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // This coordinates the per-function state used while generating code.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #include "CodeGenFunction.h"
  14. #include "CGCleanup.h"
  15. #include "CGCUDARuntime.h"
  16. #include "CGHLSLRuntime.h" // HLSL Change
  17. #include "CGCXXABI.h"
  18. #include "CGDebugInfo.h"
  19. #include "CGOpenMPRuntime.h"
  20. #include "CodeGenModule.h"
  21. #include "CodeGenPGO.h"
  22. #include "TargetInfo.h"
  23. #include "clang/AST/ASTContext.h"
  24. #include "clang/AST/Decl.h"
  25. #include "clang/AST/DeclCXX.h"
  26. #include "clang/AST/StmtCXX.h"
  27. #include "clang/Basic/TargetInfo.h"
  28. #include "clang/CodeGen/CGFunctionInfo.h"
  29. #include "clang/Frontend/CodeGenOptions.h"
  30. #include "llvm/IR/DataLayout.h"
  31. #include "llvm/IR/Intrinsics.h"
  32. #include "llvm/IR/MDBuilder.h"
  33. #include "llvm/IR/Operator.h"
  34. using namespace clang;
  35. using namespace CodeGen;
  36. CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
  37. : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()),
  38. Builder(cgm.getModule().getContext(), llvm::ConstantFolder(),
  39. CGBuilderInserterTy(this)),
  40. CurFn(nullptr), CapturedStmtInfo(nullptr),
  41. SanOpts(CGM.getLangOpts().Sanitize), IsSanitizerScope(false),
  42. CurFuncIsThunk(false), AutoreleaseResult(false), SawAsmBlock(false),
  43. IsOutlinedSEHHelper(false), BlockInfo(nullptr), BlockPointer(nullptr),
  44. LambdaThisCaptureField(nullptr), NormalCleanupDest(nullptr),
  45. NextCleanupDestIndex(1), FirstBlockInfo(nullptr), EHResumeBlock(nullptr),
  46. ExceptionSlot(nullptr), EHSelectorSlot(nullptr),
  47. DebugInfo(CGM.getModuleDebugInfo()),
  48. DisableDebugInfo(false), DidCallStackSave(false), IndirectBranch(nullptr),
  49. PGO(cgm), SwitchInsn(nullptr), SwitchWeights(nullptr),
  50. CaseRangeBlock(nullptr), UnreachableBlock(nullptr), NumReturnExprs(0),
  51. NumSimpleReturnExprs(0), CXXABIThisDecl(nullptr),
  52. CXXABIThisValue(nullptr), CXXThisValue(nullptr),
  53. CXXDefaultInitExprThis(nullptr), CXXStructorImplicitParamDecl(nullptr),
  54. CXXStructorImplicitParamValue(nullptr), OutermostConditional(nullptr),
  55. CurLexicalScope(nullptr), TerminateLandingPad(nullptr),
  56. TerminateHandler(nullptr), TrapBB(nullptr) {
  57. if (!suppressNewContext)
  58. CGM.getCXXABI().getMangleContext().startNewFunction();
  59. llvm::FastMathFlags FMF;
  60. if (CGM.getLangOpts().FastMath)
  61. FMF.setUnsafeAlgebra();
  62. if (CGM.getLangOpts().FiniteMathOnly) {
  63. FMF.setNoNaNs();
  64. FMF.setNoInfs();
  65. }
  66. if (CGM.getCodeGenOpts().NoNaNsFPMath) {
  67. FMF.setNoNaNs();
  68. }
  69. if (CGM.getCodeGenOpts().NoSignedZeros) {
  70. FMF.setNoSignedZeros();
  71. }
  72. if (CGM.getCodeGenOpts().ReciprocalMath) {
  73. FMF.setAllowReciprocal();
  74. }
  75. Builder.SetFastMathFlags(FMF);
  76. }
  77. CodeGenFunction::~CodeGenFunction() {
  78. assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup");
  79. // If there are any unclaimed block infos, go ahead and destroy them
  80. // now. This can happen if IR-gen gets clever and skips evaluating
  81. // something.
  82. if (FirstBlockInfo)
  83. destroyBlockInfos(FirstBlockInfo);
  84. #if 0 // HLSL Change - no OpenMP support
  85. if (getLangOpts().OpenMP) {
  86. CGM.getOpenMPRuntime().functionFinished(*this);
  87. }
  88. #endif // HLSL Change - no OpenMP support
  89. }
  90. LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
  91. CharUnits Alignment;
  92. if (CGM.getCXXABI().isTypeInfoCalculable(T)) {
  93. Alignment = getContext().getTypeAlignInChars(T);
  94. unsigned MaxAlign = getContext().getLangOpts().MaxTypeAlign;
  95. if (MaxAlign && Alignment.getQuantity() > MaxAlign &&
  96. !getContext().isAlignmentRequired(T))
  97. Alignment = CharUnits::fromQuantity(MaxAlign);
  98. }
  99. return LValue::MakeAddr(V, T, Alignment, getContext(), CGM.getTBAAInfo(T));
  100. }
  101. llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
  102. return CGM.getTypes().ConvertTypeForMem(T);
  103. }
  104. llvm::Type *CodeGenFunction::ConvertType(QualType T) {
  105. return CGM.getTypes().ConvertType(T);
  106. }
  107. TypeEvaluationKind CodeGenFunction::getEvaluationKind(QualType type) {
  108. type = type.getCanonicalType();
  109. while (true) {
  110. switch (type->getTypeClass()) {
  111. #define TYPE(name, parent)
  112. #define ABSTRACT_TYPE(name, parent)
  113. #define NON_CANONICAL_TYPE(name, parent) case Type::name:
  114. #define DEPENDENT_TYPE(name, parent) case Type::name:
  115. #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
  116. #include "clang/AST/TypeNodes.def"
  117. llvm_unreachable("non-canonical or dependent type in IR-generation");
  118. case Type::Auto:
  119. llvm_unreachable("undeduced auto type in IR-generation");
  120. // Various scalar types.
  121. case Type::Builtin:
  122. case Type::Pointer:
  123. case Type::BlockPointer:
  124. case Type::LValueReference:
  125. case Type::RValueReference:
  126. case Type::MemberPointer:
  127. case Type::Vector:
  128. case Type::ExtVector:
  129. case Type::FunctionProto:
  130. case Type::FunctionNoProto:
  131. case Type::Enum:
  132. case Type::ObjCObjectPointer:
  133. return TEK_Scalar;
  134. // Complexes.
  135. case Type::Complex:
  136. return TEK_Complex;
  137. // Arrays, records, and Objective-C objects.
  138. case Type::ConstantArray:
  139. case Type::IncompleteArray:
  140. case Type::VariableArray:
  141. case Type::Record:
  142. case Type::ObjCObject:
  143. case Type::ObjCInterface:
  144. // HLSL Change Starts
  145. if (hlsl::IsHLSLVecType(type)) {
  146. // Treat hlsl vector as ext vector.
  147. return TEK_Scalar;
  148. }
  149. if (hlsl::IsHLSLMatType(type)) {
  150. // Treat hlsl matrix as scalar type too.
  151. return TEK_Scalar;
  152. }
  153. // HLSL Change Ends
  154. return TEK_Aggregate;
  155. // We operate on atomic values according to their underlying type.
  156. case Type::Atomic:
  157. type = cast<AtomicType>(type)->getValueType();
  158. continue;
  159. }
  160. llvm_unreachable("unknown type kind!");
  161. }
  162. }
  163. llvm::DebugLoc CodeGenFunction::EmitReturnBlock() {
  164. // For cleanliness, we try to avoid emitting the return block for
  165. // simple cases.
  166. llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
  167. if (CurBB) {
  168. assert(!CurBB->getTerminator() && "Unexpected terminated block.");
  169. // We have a valid insert point, reuse it if it is empty or there are no
  170. // explicit jumps to the return block.
  171. if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) {
  172. ReturnBlock.getBlock()->replaceAllUsesWith(CurBB);
  173. delete ReturnBlock.getBlock();
  174. } else
  175. EmitBlock(ReturnBlock.getBlock());
  176. return llvm::DebugLoc();
  177. }
  178. // Otherwise, if the return block is the target of a single direct
  179. // branch then we can just put the code in that block instead. This
  180. // cleans up functions which started with a unified return block.
  181. if (ReturnBlock.getBlock()->hasOneUse()) {
  182. llvm::BranchInst *BI =
  183. dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->user_begin());
  184. if (BI && BI->isUnconditional() &&
  185. BI->getSuccessor(0) == ReturnBlock.getBlock()) {
  186. // Record/return the DebugLoc of the simple 'return' expression to be used
  187. // later by the actual 'ret' instruction.
  188. llvm::DebugLoc Loc = BI->getDebugLoc();
  189. Builder.SetInsertPoint(BI->getParent());
  190. BI->eraseFromParent();
  191. delete ReturnBlock.getBlock();
  192. return Loc;
  193. }
  194. }
  195. // FIXME: We are at an unreachable point, there is no reason to emit the block
  196. // unless it has uses. However, we still need a place to put the debug
  197. // region.end for now.
  198. EmitBlock(ReturnBlock.getBlock());
  199. return llvm::DebugLoc();
  200. }
  201. static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
  202. if (!BB) return;
  203. if (!BB->use_empty())
  204. return CGF.CurFn->getBasicBlockList().push_back(BB);
  205. delete BB;
  206. }
  207. void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
  208. assert(BreakContinueStack.empty() &&
  209. "mismatched push/pop in break/continue stack!");
  210. bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0
  211. && NumSimpleReturnExprs == NumReturnExprs
  212. && ReturnBlock.getBlock()->use_empty();
  213. // Usually the return expression is evaluated before the cleanup
  214. // code. If the function contains only a simple return statement,
  215. // such as a constant, the location before the cleanup code becomes
  216. // the last useful breakpoint in the function, because the simple
  217. // return expression will be evaluated after the cleanup code. To be
  218. // safe, set the debug location for cleanup code to the location of
  219. // the return statement. Otherwise the cleanup code should be at the
  220. // end of the function's lexical scope.
  221. //
  222. // If there are multiple branches to the return block, the branch
  223. // instructions will get the location of the return statements and
  224. // all will be fine.
  225. if (CGDebugInfo *DI = getDebugInfo()) {
  226. if (OnlySimpleReturnStmts)
  227. DI->EmitLocation(Builder, LastStopPoint);
  228. else
  229. DI->EmitLocation(Builder, EndLoc);
  230. }
  231. // Pop any cleanups that might have been associated with the
  232. // parameters. Do this in whatever block we're currently in; it's
  233. // important to do this before we enter the return block or return
  234. // edges will be *really* confused.
  235. bool HasCleanups = EHStack.stable_begin() != PrologueCleanupDepth;
  236. bool HasOnlyLifetimeMarkers =
  237. HasCleanups && EHStack.containsOnlyLifetimeMarkers(PrologueCleanupDepth);
  238. bool EmitRetDbgLoc = !HasCleanups || HasOnlyLifetimeMarkers;
  239. if (HasCleanups) {
  240. // Make sure the line table doesn't jump back into the body for
  241. // the ret after it's been at EndLoc.
  242. if (CGDebugInfo *DI = getDebugInfo())
  243. if (OnlySimpleReturnStmts)
  244. DI->EmitLocation(Builder, EndLoc);
  245. PopCleanupBlocks(PrologueCleanupDepth);
  246. }
  247. // Emit function epilog (to return).
  248. llvm::DebugLoc Loc = EmitReturnBlock();
  249. if (ShouldInstrumentFunction())
  250. EmitFunctionInstrumentation("__cyg_profile_func_exit");
  251. // Emit debug descriptor for function end.
  252. if (CGDebugInfo *DI = getDebugInfo())
  253. DI->EmitFunctionEnd(Builder);
  254. // Reset the debug location to that of the simple 'return' expression, if any
  255. // rather than that of the end of the function's scope '}'.
  256. ApplyDebugLocation AL(*this, Loc);
  257. EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc, EndLoc);
  258. EmitEndEHSpec(CurCodeDecl);
  259. assert(EHStack.empty() &&
  260. "did not remove all scopes from cleanup stack!");
  261. // If someone did an indirect goto, emit the indirect goto block at the end of
  262. // the function.
  263. if (IndirectBranch) {
  264. EmitBlock(IndirectBranch->getParent());
  265. Builder.ClearInsertionPoint();
  266. }
  267. // If some of our locals escaped, insert a call to llvm.localescape in the
  268. // entry block.
  269. if (!EscapedLocals.empty()) {
  270. // Invert the map from local to index into a simple vector. There should be
  271. // no holes.
  272. SmallVector<llvm::Value *, 4> EscapeArgs;
  273. EscapeArgs.resize(EscapedLocals.size());
  274. for (auto &Pair : EscapedLocals)
  275. EscapeArgs[Pair.second] = Pair.first;
  276. llvm::Function *FrameEscapeFn = llvm::Intrinsic::getDeclaration(
  277. &CGM.getModule(), llvm::Intrinsic::localescape);
  278. CGBuilderTy(AllocaInsertPt).CreateCall(FrameEscapeFn, EscapeArgs);
  279. }
  280. // Remove the AllocaInsertPt instruction, which is just a convenience for us.
  281. llvm::Instruction *Ptr = AllocaInsertPt;
  282. AllocaInsertPt = nullptr;
  283. Ptr->eraseFromParent();
  284. // If someone took the address of a label but never did an indirect goto, we
  285. // made a zero entry PHI node, which is illegal, zap it now.
  286. if (IndirectBranch) {
  287. llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
  288. if (PN->getNumIncomingValues() == 0) {
  289. PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType()));
  290. PN->eraseFromParent();
  291. }
  292. }
  293. EmitIfUsed(*this, EHResumeBlock);
  294. EmitIfUsed(*this, TerminateLandingPad);
  295. EmitIfUsed(*this, TerminateHandler);
  296. EmitIfUsed(*this, UnreachableBlock);
  297. if (CGM.getCodeGenOpts().EmitDeclMetadata)
  298. EmitDeclMetadata();
  299. for (SmallVectorImpl<std::pair<llvm::Instruction *, llvm::Value *> >::iterator
  300. I = DeferredReplacements.begin(),
  301. E = DeferredReplacements.end();
  302. I != E; ++I) {
  303. I->first->replaceAllUsesWith(I->second);
  304. I->first->eraseFromParent();
  305. }
  306. }
  307. /// ShouldInstrumentFunction - Return true if the current function should be
  308. /// instrumented with __cyg_profile_func_* calls
  309. bool CodeGenFunction::ShouldInstrumentFunction() {
  310. if (!CGM.getCodeGenOpts().InstrumentFunctions)
  311. return false;
  312. if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
  313. return false;
  314. return true;
  315. }
  316. /// EmitFunctionInstrumentation - Emit LLVM code to call the specified
  317. /// instrumentation function with the current function and the call site, if
  318. /// function instrumentation is enabled.
  319. void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) {
  320. // void __cyg_profile_func_{enter,exit} (void *this_fn, void *call_site);
  321. llvm::PointerType *PointerTy = Int8PtrTy;
  322. llvm::Type *ProfileFuncArgs[] = { PointerTy, PointerTy };
  323. llvm::FunctionType *FunctionTy =
  324. llvm::FunctionType::get(VoidTy, ProfileFuncArgs, false);
  325. llvm::Constant *F = CGM.CreateRuntimeFunction(FunctionTy, Fn);
  326. llvm::CallInst *CallSite = Builder.CreateCall(
  327. CGM.getIntrinsic(llvm::Intrinsic::returnaddress),
  328. llvm::ConstantInt::get(Int32Ty, 0),
  329. "callsite");
  330. llvm::Value *args[] = {
  331. llvm::ConstantExpr::getBitCast(CurFn, PointerTy),
  332. CallSite
  333. };
  334. EmitNounwindRuntimeCall(F, args);
  335. }
  336. void CodeGenFunction::EmitMCountInstrumentation() {
  337. llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
  338. llvm::Constant *MCountFn =
  339. CGM.CreateRuntimeFunction(FTy, getTarget().getMCountName());
  340. EmitNounwindRuntimeCall(MCountFn);
  341. }
  342. // OpenCL v1.2 s5.6.4.6 allows the compiler to store kernel argument
  343. // information in the program executable. The argument information stored
  344. // includes the argument name, its type, the address and access qualifiers used.
  345. static void GenOpenCLArgMetadata(const FunctionDecl *FD, llvm::Function *Fn,
  346. CodeGenModule &CGM, llvm::LLVMContext &Context,
  347. SmallVector<llvm::Metadata *, 5> &kernelMDArgs,
  348. CGBuilderTy &Builder, ASTContext &ASTCtx) {
  349. // Create MDNodes that represent the kernel arg metadata.
  350. // Each MDNode is a list in the form of "key", N number of values which is
  351. // the same number of values as their are kernel arguments.
  352. const PrintingPolicy &Policy = ASTCtx.getPrintingPolicy();
  353. // MDNode for the kernel argument address space qualifiers.
  354. SmallVector<llvm::Metadata *, 8> addressQuals;
  355. addressQuals.push_back(llvm::MDString::get(Context, "kernel_arg_addr_space"));
  356. // MDNode for the kernel argument access qualifiers (images only).
  357. SmallVector<llvm::Metadata *, 8> accessQuals;
  358. accessQuals.push_back(llvm::MDString::get(Context, "kernel_arg_access_qual"));
  359. // MDNode for the kernel argument type names.
  360. SmallVector<llvm::Metadata *, 8> argTypeNames;
  361. argTypeNames.push_back(llvm::MDString::get(Context, "kernel_arg_type"));
  362. // MDNode for the kernel argument base type names.
  363. SmallVector<llvm::Metadata *, 8> argBaseTypeNames;
  364. argBaseTypeNames.push_back(
  365. llvm::MDString::get(Context, "kernel_arg_base_type"));
  366. // MDNode for the kernel argument type qualifiers.
  367. SmallVector<llvm::Metadata *, 8> argTypeQuals;
  368. argTypeQuals.push_back(llvm::MDString::get(Context, "kernel_arg_type_qual"));
  369. // MDNode for the kernel argument names.
  370. SmallVector<llvm::Metadata *, 8> argNames;
  371. argNames.push_back(llvm::MDString::get(Context, "kernel_arg_name"));
  372. for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) {
  373. const ParmVarDecl *parm = FD->getParamDecl(i);
  374. QualType ty = parm->getType();
  375. std::string typeQuals;
  376. if (ty->isPointerType()) {
  377. QualType pointeeTy = ty->getPointeeType();
  378. // Get address qualifier.
  379. addressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(
  380. ASTCtx.getTargetAddressSpace(pointeeTy.getAddressSpace()))));
  381. // Get argument type name.
  382. std::string typeName =
  383. pointeeTy.getUnqualifiedType().getAsString(Policy) + "*";
  384. // Turn "unsigned type" to "utype"
  385. std::string::size_type pos = typeName.find("unsigned");
  386. if (pointeeTy.isCanonical() && pos != std::string::npos)
  387. typeName.erase(pos+1, 8);
  388. argTypeNames.push_back(llvm::MDString::get(Context, typeName));
  389. std::string baseTypeName =
  390. pointeeTy.getUnqualifiedType().getCanonicalType().getAsString(
  391. Policy) +
  392. "*";
  393. // Turn "unsigned type" to "utype"
  394. pos = baseTypeName.find("unsigned");
  395. if (pos != std::string::npos)
  396. baseTypeName.erase(pos+1, 8);
  397. argBaseTypeNames.push_back(llvm::MDString::get(Context, baseTypeName));
  398. // Get argument type qualifiers:
  399. if (ty.isRestrictQualified())
  400. typeQuals = "restrict";
  401. if (pointeeTy.isConstQualified() ||
  402. (pointeeTy.getAddressSpace() == LangAS::opencl_constant))
  403. typeQuals += typeQuals.empty() ? "const" : " const";
  404. if (pointeeTy.isVolatileQualified())
  405. typeQuals += typeQuals.empty() ? "volatile" : " volatile";
  406. } else {
  407. uint32_t AddrSpc = 0;
  408. if (ty->isImageType())
  409. AddrSpc =
  410. CGM.getContext().getTargetAddressSpace(LangAS::opencl_global);
  411. addressQuals.push_back(
  412. llvm::ConstantAsMetadata::get(Builder.getInt32(AddrSpc)));
  413. // Get argument type name.
  414. std::string typeName = ty.getUnqualifiedType().getAsString(Policy);
  415. // Turn "unsigned type" to "utype"
  416. std::string::size_type pos = typeName.find("unsigned");
  417. if (ty.isCanonical() && pos != std::string::npos)
  418. typeName.erase(pos+1, 8);
  419. argTypeNames.push_back(llvm::MDString::get(Context, typeName));
  420. std::string baseTypeName =
  421. ty.getUnqualifiedType().getCanonicalType().getAsString(Policy);
  422. // Turn "unsigned type" to "utype"
  423. pos = baseTypeName.find("unsigned");
  424. if (pos != std::string::npos)
  425. baseTypeName.erase(pos+1, 8);
  426. argBaseTypeNames.push_back(llvm::MDString::get(Context, baseTypeName));
  427. // Get argument type qualifiers:
  428. if (ty.isConstQualified())
  429. typeQuals = "const";
  430. if (ty.isVolatileQualified())
  431. typeQuals += typeQuals.empty() ? "volatile" : " volatile";
  432. }
  433. argTypeQuals.push_back(llvm::MDString::get(Context, typeQuals));
  434. // Get image access qualifier:
  435. if (ty->isImageType()) {
  436. const OpenCLImageAccessAttr *A = parm->getAttr<OpenCLImageAccessAttr>();
  437. if (A && A->isWriteOnly())
  438. accessQuals.push_back(llvm::MDString::get(Context, "write_only"));
  439. else
  440. accessQuals.push_back(llvm::MDString::get(Context, "read_only"));
  441. // FIXME: what about read_write?
  442. } else
  443. accessQuals.push_back(llvm::MDString::get(Context, "none"));
  444. // Get argument name.
  445. argNames.push_back(llvm::MDString::get(Context, parm->getName()));
  446. }
  447. kernelMDArgs.push_back(llvm::MDNode::get(Context, addressQuals));
  448. kernelMDArgs.push_back(llvm::MDNode::get(Context, accessQuals));
  449. kernelMDArgs.push_back(llvm::MDNode::get(Context, argTypeNames));
  450. kernelMDArgs.push_back(llvm::MDNode::get(Context, argBaseTypeNames));
  451. kernelMDArgs.push_back(llvm::MDNode::get(Context, argTypeQuals));
  452. if (CGM.getCodeGenOpts().EmitOpenCLArgMetadata)
  453. kernelMDArgs.push_back(llvm::MDNode::get(Context, argNames));
  454. }
  455. void CodeGenFunction::EmitOpenCLKernelMetadata(const FunctionDecl *FD,
  456. llvm::Function *Fn)
  457. {
  458. if (!FD->hasAttr<OpenCLKernelAttr>())
  459. return;
  460. llvm::LLVMContext &Context = getLLVMContext();
  461. SmallVector<llvm::Metadata *, 5> kernelMDArgs;
  462. kernelMDArgs.push_back(llvm::ConstantAsMetadata::get(Fn));
  463. GenOpenCLArgMetadata(FD, Fn, CGM, Context, kernelMDArgs, Builder,
  464. getContext());
  465. if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) {
  466. QualType hintQTy = A->getTypeHint();
  467. const ExtVectorType *hintEltQTy = hintQTy->getAs<ExtVectorType>();
  468. bool isSignedInteger =
  469. hintQTy->isSignedIntegerType() ||
  470. (hintEltQTy && hintEltQTy->getElementType()->isSignedIntegerType());
  471. llvm::Metadata *attrMDArgs[] = {
  472. llvm::MDString::get(Context, "vec_type_hint"),
  473. llvm::ConstantAsMetadata::get(llvm::UndefValue::get(
  474. CGM.getTypes().ConvertType(A->getTypeHint()))),
  475. llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
  476. llvm::IntegerType::get(Context, 32),
  477. llvm::APInt(32, (uint64_t)(isSignedInteger ? 1 : 0))))};
  478. kernelMDArgs.push_back(llvm::MDNode::get(Context, attrMDArgs));
  479. }
  480. if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) {
  481. llvm::Metadata *attrMDArgs[] = {
  482. llvm::MDString::get(Context, "work_group_size_hint"),
  483. llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
  484. llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
  485. llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
  486. kernelMDArgs.push_back(llvm::MDNode::get(Context, attrMDArgs));
  487. }
  488. if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) {
  489. llvm::Metadata *attrMDArgs[] = {
  490. llvm::MDString::get(Context, "reqd_work_group_size"),
  491. llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
  492. llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
  493. llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
  494. kernelMDArgs.push_back(llvm::MDNode::get(Context, attrMDArgs));
  495. }
  496. llvm::MDNode *kernelMDNode = llvm::MDNode::get(Context, kernelMDArgs);
  497. llvm::NamedMDNode *OpenCLKernelMetadata =
  498. CGM.getModule().getOrInsertNamedMetadata("opencl.kernels");
  499. OpenCLKernelMetadata->addOperand(kernelMDNode);
  500. }
  501. /// Determine whether the function F ends with a return stmt.
  502. static bool endsWithReturn(const Decl* F) {
  503. const Stmt *Body = nullptr;
  504. if (auto *FD = dyn_cast_or_null<FunctionDecl>(F))
  505. Body = FD->getBody();
  506. else if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(F))
  507. Body = OMD->getBody();
  508. if (auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) {
  509. auto LastStmt = CS->body_rbegin();
  510. if (LastStmt != CS->body_rend())
  511. return isa<ReturnStmt>(*LastStmt);
  512. }
  513. return false;
  514. }
  515. void CodeGenFunction::StartFunction(GlobalDecl GD,
  516. QualType RetTy,
  517. llvm::Function *Fn,
  518. const CGFunctionInfo &FnInfo,
  519. const FunctionArgList &Args,
  520. SourceLocation Loc,
  521. SourceLocation StartLoc) {
  522. assert(!CurFn &&
  523. "Do not use a CodeGenFunction object for more than one function");
  524. const Decl *D = GD.getDecl();
  525. DidCallStackSave = false;
  526. CurCodeDecl = D;
  527. CurFuncDecl = (D ? D->getNonClosureContext() : nullptr);
  528. FnRetTy = RetTy;
  529. CurFn = Fn;
  530. CurFnInfo = &FnInfo;
  531. assert(CurFn->isDeclaration() && "Function already has body?");
  532. if (CGM.isInSanitizerBlacklist(Fn, Loc))
  533. SanOpts.clear();
  534. if (D) {
  535. // Apply the no_sanitize* attributes to SanOpts.
  536. for (auto Attr : D->specific_attrs<NoSanitizeAttr>())
  537. SanOpts.Mask &= ~Attr->getMask();
  538. }
  539. // Apply sanitizer attributes to the function.
  540. if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress))
  541. Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
  542. if (SanOpts.has(SanitizerKind::Thread))
  543. Fn->addFnAttr(llvm::Attribute::SanitizeThread);
  544. if (SanOpts.has(SanitizerKind::Memory))
  545. Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
  546. if (SanOpts.has(SanitizerKind::SafeStack))
  547. Fn->addFnAttr(llvm::Attribute::SafeStack);
  548. // Pass inline keyword to optimizer if it appears explicitly on any
  549. // declaration. Also, in the case of -fno-inline attach NoInline
  550. // attribute to all function that are not marked AlwaysInline.
  551. if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
  552. if (!CGM.getCodeGenOpts().NoInline) {
  553. for (auto RI : FD->redecls())
  554. if (RI->isInlineSpecified()) {
  555. Fn->addFnAttr(llvm::Attribute::InlineHint);
  556. break;
  557. }
  558. } else if (!FD->hasAttr<AlwaysInlineAttr>())
  559. Fn->addFnAttr(llvm::Attribute::NoInline);
  560. }
  561. if (getLangOpts().OpenCL) {
  562. // Add metadata for a kernel function.
  563. if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
  564. EmitOpenCLKernelMetadata(FD, Fn);
  565. }
  566. // If we are checking function types, emit a function type signature as
  567. // prologue data.
  568. if (getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function)) {
  569. if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
  570. if (llvm::Constant *PrologueSig =
  571. CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM)) {
  572. llvm::Constant *FTRTTIConst =
  573. CGM.GetAddrOfRTTIDescriptor(FD->getType(), /*ForEH=*/true);
  574. llvm::Constant *PrologueStructElems[] = { PrologueSig, FTRTTIConst };
  575. llvm::Constant *PrologueStructConst =
  576. llvm::ConstantStruct::getAnon(PrologueStructElems, /*Packed=*/true);
  577. Fn->setPrologueData(PrologueStructConst);
  578. }
  579. }
  580. }
  581. llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
  582. // Create a marker to make it easy to insert allocas into the entryblock
  583. // later. Don't create this with the builder, because we don't want it
  584. // folded.
  585. llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
  586. AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "", EntryBB);
  587. if (Builder.isNamePreserving())
  588. AllocaInsertPt->setName("allocapt");
  589. ReturnBlock = getJumpDestInCurrentScope("return");
  590. Builder.SetInsertPoint(EntryBB);
  591. // Emit subprogram debug descriptor.
  592. if (CGDebugInfo *DI = getDebugInfo()) {
  593. SmallVector<QualType, 16> ArgTypes;
  594. for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
  595. i != e; ++i) {
  596. ArgTypes.push_back((*i)->getType());
  597. }
  598. QualType FnType = getContext().getFunctionType(
  599. RetTy, ArgTypes, FunctionProtoType::ExtProtoInfo(), None); // HLSL Change - add param mods - TODO: review for inout
  600. DI->EmitFunctionStart(GD, Loc, StartLoc, FnType, CurFn, Builder);
  601. }
  602. if (ShouldInstrumentFunction())
  603. EmitFunctionInstrumentation("__cyg_profile_func_enter");
  604. if (CGM.getCodeGenOpts().InstrumentForProfiling)
  605. EmitMCountInstrumentation();
  606. if (RetTy->isVoidType()) {
  607. // Void type; nothing to return.
  608. ReturnValue = nullptr;
  609. // Count the implicit return.
  610. if (!endsWithReturn(D))
  611. ++NumReturnExprs;
  612. } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
  613. !hasScalarEvaluationKind(CurFnInfo->getReturnType())) {
  614. // Indirect aggregate return; emit returned value directly into sret slot.
  615. // This reduces code size, and affects correctness in C++.
  616. auto AI = CurFn->arg_begin();
  617. if (CurFnInfo->getReturnInfo().isSRetAfterThis())
  618. ++AI;
  619. ReturnValue = AI;
  620. } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca &&
  621. !hasScalarEvaluationKind(CurFnInfo->getReturnType())) {
  622. // Load the sret pointer from the argument struct and return into that.
  623. unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex();
  624. llvm::Function::arg_iterator EI = CurFn->arg_end();
  625. --EI;
  626. llvm::Value *Addr = Builder.CreateStructGEP(nullptr, EI, Idx);
  627. ReturnValue = Builder.CreateLoad(Addr, "agg.result");
  628. } else {
  629. ReturnValue = CreateIRTemp(RetTy, "retval");
  630. // Tell the epilog emitter to autorelease the result. We do this
  631. // now so that various specialized functions can suppress it
  632. // during their IR-generation.
  633. if (getLangOpts().ObjCAutoRefCount &&
  634. !CurFnInfo->isReturnsRetained() &&
  635. RetTy->isObjCRetainableType())
  636. AutoreleaseResult = true;
  637. }
  638. EmitStartEHSpec(CurCodeDecl);
  639. PrologueCleanupDepth = EHStack.stable_begin();
  640. EmitFunctionProlog(*CurFnInfo, CurFn, Args);
  641. if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) {
  642. CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
  643. const CXXMethodDecl *MD = cast<CXXMethodDecl>(D);
  644. if (MD->getParent()->isLambda() &&
  645. MD->getOverloadedOperator() == OO_Call) {
  646. // We're in a lambda; figure out the captures.
  647. MD->getParent()->getCaptureFields(LambdaCaptureFields,
  648. LambdaThisCaptureField);
  649. if (LambdaThisCaptureField) {
  650. // If this lambda captures this, load it.
  651. LValue ThisLValue = EmitLValueForLambdaField(LambdaThisCaptureField);
  652. CXXThisValue = EmitLoadOfLValue(ThisLValue,
  653. SourceLocation()).getScalarVal();
  654. }
  655. for (auto *FD : MD->getParent()->fields()) {
  656. if (FD->hasCapturedVLAType()) {
  657. auto *ExprArg = EmitLoadOfLValue(EmitLValueForLambdaField(FD),
  658. SourceLocation()).getScalarVal();
  659. auto VAT = FD->getCapturedVLAType();
  660. VLASizeMap[VAT->getSizeExpr()] = ExprArg;
  661. }
  662. }
  663. } else {
  664. // Not in a lambda; just use 'this' from the method.
  665. // FIXME: Should we generate a new load for each use of 'this'? The
  666. // fast register allocator would be happier...
  667. CXXThisValue = CXXABIThisValue;
  668. }
  669. }
  670. // If any of the arguments have a variably modified type, make sure to
  671. // emit the type size.
  672. for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
  673. i != e; ++i) {
  674. const VarDecl *VD = *i;
  675. // Dig out the type as written from ParmVarDecls; it's unclear whether
  676. // the standard (C99 6.9.1p10) requires this, but we're following the
  677. // precedent set by gcc.
  678. QualType Ty;
  679. if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD))
  680. Ty = PVD->getOriginalType();
  681. else
  682. Ty = VD->getType();
  683. if (Ty->isVariablyModifiedType())
  684. EmitVariablyModifiedType(Ty);
  685. }
  686. // Emit a location at the end of the prologue.
  687. if (CGDebugInfo *DI = getDebugInfo())
  688. DI->EmitLocation(Builder, StartLoc);
  689. }
  690. void CodeGenFunction::EmitFunctionBody(FunctionArgList &Args,
  691. const Stmt *Body) {
  692. incrementProfileCounter(Body);
  693. if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body))
  694. EmitCompoundStmtWithoutScope(*S);
  695. else
  696. EmitStmt(Body);
  697. }
  698. /// When instrumenting to collect profile data, the counts for some blocks
  699. /// such as switch cases need to not include the fall-through counts, so
  700. /// emit a branch around the instrumentation code. When not instrumenting,
  701. /// this just calls EmitBlock().
  702. void CodeGenFunction::EmitBlockWithFallThrough(llvm::BasicBlock *BB,
  703. const Stmt *S) {
  704. llvm::BasicBlock *SkipCountBB = nullptr;
  705. if (HaveInsertPoint() && CGM.getCodeGenOpts().ProfileInstrGenerate) {
  706. // When instrumenting for profiling, the fallthrough to certain
  707. // statements needs to skip over the instrumentation code so that we
  708. // get an accurate count.
  709. SkipCountBB = createBasicBlock("skipcount");
  710. EmitBranch(SkipCountBB);
  711. }
  712. EmitBlock(BB);
  713. uint64_t CurrentCount = getCurrentProfileCount();
  714. incrementProfileCounter(S);
  715. setCurrentProfileCount(getCurrentProfileCount() + CurrentCount);
  716. if (SkipCountBB)
  717. EmitBlock(SkipCountBB);
  718. }
  719. /// Tries to mark the given function nounwind based on the
  720. /// non-existence of any throwing calls within it. We believe this is
  721. /// lightweight enough to do at -O0.
  722. static void TryMarkNoThrow(llvm::Function *F) {
  723. // LLVM treats 'nounwind' on a function as part of the type, so we
  724. // can't do this on functions that can be overwritten.
  725. if (F->mayBeOverridden()) return;
  726. for (llvm::Function::iterator FI = F->begin(), FE = F->end(); FI != FE; ++FI)
  727. for (llvm::BasicBlock::iterator
  728. BI = FI->begin(), BE = FI->end(); BI != BE; ++BI)
  729. if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(&*BI)) {
  730. if (!Call->doesNotThrow())
  731. return;
  732. } else if (isa<llvm::ResumeInst>(&*BI)) {
  733. return;
  734. }
  735. F->setDoesNotThrow();
  736. }
  737. void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
  738. const CGFunctionInfo &FnInfo) {
  739. const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
  740. // Check if we should generate debug info for this function.
  741. if (FD->hasAttr<NoDebugAttr>())
  742. DebugInfo = nullptr; // disable debug info indefinitely for this function
  743. FunctionArgList Args;
  744. QualType ResTy = FD->getReturnType();
  745. // HLSL Change Start - emit root signature associated with function
  746. if (HLSLRootSignatureAttr *RSA = FD->getAttr<HLSLRootSignatureAttr>()) {
  747. CGM.getHLSLRuntime().EmitHLSLRootSignature(*this, RSA, Fn);
  748. }
  749. // HLSL Change Ends - emit root signature associated with function
  750. CurGD = GD;
  751. const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
  752. if (MD && MD->isInstance()) {
  753. if (CGM.getCXXABI().HasThisReturn(GD))
  754. ResTy = MD->getThisType(getContext());
  755. else if (CGM.getCXXABI().hasMostDerivedReturn(GD))
  756. ResTy = CGM.getContext().VoidPtrTy;
  757. CGM.getCXXABI().buildThisParam(*this, Args);
  758. }
  759. Args.append(FD->param_begin(), FD->param_end());
  760. if (MD && (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)))
  761. CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args);
  762. SourceRange BodyRange;
  763. if (Stmt *Body = FD->getBody()) BodyRange = Body->getSourceRange();
  764. CurEHLocation = BodyRange.getEnd();
  765. // Use the location of the start of the function to determine where
  766. // the function definition is located. By default use the location
  767. // of the declaration as the location for the subprogram. A function
  768. // may lack a declaration in the source code if it is created by code
  769. // gen. (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk).
  770. SourceLocation Loc = FD->getLocation();
  771. // If this is a function specialization then use the pattern body
  772. // as the location for the function.
  773. if (const FunctionDecl *SpecDecl = FD->getTemplateInstantiationPattern())
  774. if (SpecDecl->hasBody(SpecDecl))
  775. Loc = SpecDecl->getLocation();
  776. // Emit the standard function prologue.
  777. StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin());
  778. // Generate the body of the function.
  779. PGO.checkGlobalDecl(GD);
  780. PGO.assignRegionCounters(GD.getDecl(), CurFn);
  781. if (isa<CXXDestructorDecl>(FD))
  782. EmitDestructorBody(Args);
  783. else if (isa<CXXConstructorDecl>(FD))
  784. EmitConstructorBody(Args);
  785. else if (getLangOpts().CUDA &&
  786. !getLangOpts().CUDAIsDevice &&
  787. FD->hasAttr<CUDAGlobalAttr>())
  788. CGM.getCUDARuntime().emitDeviceStub(*this, Args);
  789. else if (isa<CXXConversionDecl>(FD) &&
  790. cast<CXXConversionDecl>(FD)->isLambdaToBlockPointerConversion()) {
  791. // The lambda conversion to block pointer is special; the semantics can't be
  792. // expressed in the AST, so IRGen needs to special-case it.
  793. EmitLambdaToBlockPointerBody(Args);
  794. } else if (isa<CXXMethodDecl>(FD) &&
  795. cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) {
  796. // The lambda static invoker function is special, because it forwards or
  797. // clones the body of the function call operator (but is actually static).
  798. EmitLambdaStaticInvokeFunction(cast<CXXMethodDecl>(FD));
  799. } else if (FD->isDefaulted() && isa<CXXMethodDecl>(FD) &&
  800. (cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator() ||
  801. cast<CXXMethodDecl>(FD)->isMoveAssignmentOperator())) {
  802. // Implicit copy-assignment gets the same special treatment as implicit
  803. // copy-constructors.
  804. emitImplicitAssignmentOperatorBody(Args);
  805. } else if (Stmt *Body = FD->getBody()) {
  806. EmitFunctionBody(Args, Body);
  807. } else
  808. llvm_unreachable("no definition for emitted function");
  809. // C++11 [stmt.return]p2:
  810. // Flowing off the end of a function [...] results in undefined behavior in
  811. // a value-returning function.
  812. // C11 6.9.1p12:
  813. // If the '}' that terminates a function is reached, and the value of the
  814. // function call is used by the caller, the behavior is undefined.
  815. if (getLangOpts().CPlusPlus && !FD->hasImplicitReturnZero() && !SawAsmBlock &&
  816. !FD->getReturnType()->isVoidType() && Builder.GetInsertBlock()) {
  817. if (SanOpts.has(SanitizerKind::Return)) {
  818. SanitizerScope SanScope(this);
  819. llvm::Value *IsFalse = Builder.getFalse();
  820. EmitCheck(std::make_pair(IsFalse, SanitizerKind::Return),
  821. "missing_return", EmitCheckSourceLocation(FD->getLocation()),
  822. None);
  823. } else if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
  824. EmitTrapCall(llvm::Intrinsic::trap);
  825. }
  826. Builder.CreateUnreachable();
  827. Builder.ClearInsertionPoint();
  828. }
  829. // Emit the standard function epilogue.
  830. FinishFunction(BodyRange.getEnd());
  831. // If we haven't marked the function nothrow through other means, do
  832. // a quick pass now to see if we can.
  833. if (!CurFn->doesNotThrow())
  834. TryMarkNoThrow(CurFn);
  835. }
  836. /// ContainsLabel - Return true if the statement contains a label in it. If
  837. /// this statement is not executed normally, it not containing a label means
  838. /// that we can just remove the code.
  839. bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
  840. // Null statement, not a label!
  841. if (!S) return false;
  842. // If this is a label, we have to emit the code, consider something like:
  843. // if (0) { ... foo: bar(); } goto foo;
  844. //
  845. // TODO: If anyone cared, we could track __label__'s, since we know that you
  846. // can't jump to one from outside their declared region.
  847. if (isa<LabelStmt>(S))
  848. return true;
  849. // If this is a case/default statement, and we haven't seen a switch, we have
  850. // to emit the code.
  851. if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
  852. return true;
  853. // If this is a switch statement, we want to ignore cases below it.
  854. if (isa<SwitchStmt>(S))
  855. IgnoreCaseStmts = true;
  856. // Scan subexpressions for verboten labels.
  857. for (const Stmt *SubStmt : S->children())
  858. if (ContainsLabel(SubStmt, IgnoreCaseStmts))
  859. return true;
  860. return false;
  861. }
  862. /// containsBreak - Return true if the statement contains a break out of it.
  863. /// If the statement (recursively) contains a switch or loop with a break
  864. /// inside of it, this is fine.
  865. bool CodeGenFunction::containsBreak(const Stmt *S) {
  866. // Null statement, not a label!
  867. if (!S) return false;
  868. // If this is a switch or loop that defines its own break scope, then we can
  869. // include it and anything inside of it.
  870. if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) ||
  871. isa<ForStmt>(S))
  872. return false;
  873. if (isa<BreakStmt>(S))
  874. return true;
  875. // Scan subexpressions for verboten breaks.
  876. for (const Stmt *SubStmt : S->children())
  877. if (containsBreak(SubStmt))
  878. return true;
  879. return false;
  880. }
  881. /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
  882. /// to a constant, or if it does but contains a label, return false. If it
  883. /// constant folds return true and set the boolean result in Result.
  884. bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
  885. bool &ResultBool) {
  886. llvm::APSInt ResultInt;
  887. if (!ConstantFoldsToSimpleInteger(Cond, ResultInt))
  888. return false;
  889. ResultBool = ResultInt.getBoolValue();
  890. return true;
  891. }
  892. /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
  893. /// to a constant, or if it does but contains a label, return false. If it
  894. /// constant folds return true and set the folded value.
  895. bool CodeGenFunction::
  896. ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APSInt &ResultInt) {
  897. // FIXME: Rename and handle conversion of other evaluatable things
  898. // to bool.
  899. llvm::APSInt Int;
  900. if (!Cond->EvaluateAsInt(Int, getContext()))
  901. return false; // Not foldable, not integer or not fully evaluatable.
  902. if (CodeGenFunction::ContainsLabel(Cond))
  903. return false; // Contains a label.
  904. ResultInt = Int;
  905. return true;
  906. }
  907. /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
  908. /// statement) to the specified blocks. Based on the condition, this might try
  909. /// to simplify the codegen of the conditional based on the branch.
  910. ///
  911. void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
  912. llvm::BasicBlock *TrueBlock,
  913. llvm::BasicBlock *FalseBlock,
  914. uint64_t TrueCount) {
  915. Cond = Cond->IgnoreParens();
  916. if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
  917. // Handle X && Y in a condition.
  918. if (CondBOp->getOpcode() == BO_LAnd) {
  919. // If we have "1 && X", simplify the code. "0 && X" would have constant
  920. // folded if the case was simple enough.
  921. bool ConstantBool = false;
  922. if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
  923. ConstantBool) {
  924. // br(1 && X) -> br(X).
  925. incrementProfileCounter(CondBOp);
  926. return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock,
  927. TrueCount);
  928. }
  929. // If we have "X && 1", simplify the code to use an uncond branch.
  930. // "X && 0" would have been constant folded to 0.
  931. if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
  932. ConstantBool) {
  933. // br(X && 1) -> br(X).
  934. return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock,
  935. TrueCount);
  936. }
  937. // HLSL Change Begins.
  938. if (getLangOpts().HLSL) {
  939. // HLSL don't short circuit.
  940. // Emit the code with the fully general case.
  941. llvm::Value *CondV;
  942. {
  943. ApplyDebugLocation DL(*this, Cond);
  944. CondV = EvaluateExprAsBool(Cond);
  945. }
  946. Builder.CreateCondBr(CondV, TrueBlock, FalseBlock);
  947. return;
  948. }
  949. // HLSL Change Ends.
  950. // Emit the LHS as a conditional. If the LHS conditional is false, we
  951. // want to jump to the FalseBlock.
  952. llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
  953. // The counter tells us how often we evaluate RHS, and all of TrueCount
  954. // can be propagated to that branch.
  955. uint64_t RHSCount = getProfileCount(CondBOp->getRHS());
  956. ConditionalEvaluation eval(*this);
  957. {
  958. ApplyDebugLocation DL(*this, Cond);
  959. EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock, RHSCount);
  960. EmitBlock(LHSTrue);
  961. }
  962. incrementProfileCounter(CondBOp);
  963. setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
  964. // Any temporaries created here are conditional.
  965. eval.begin(*this);
  966. EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, TrueCount);
  967. eval.end(*this);
  968. return;
  969. }
  970. if (CondBOp->getOpcode() == BO_LOr) {
  971. // If we have "0 || X", simplify the code. "1 || X" would have constant
  972. // folded if the case was simple enough.
  973. bool ConstantBool = false;
  974. if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
  975. !ConstantBool) {
  976. // br(0 || X) -> br(X).
  977. incrementProfileCounter(CondBOp);
  978. return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock,
  979. TrueCount);
  980. }
  981. // If we have "X || 0", simplify the code to use an uncond branch.
  982. // "X || 1" would have been constant folded to 1.
  983. if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
  984. !ConstantBool) {
  985. // br(X || 0) -> br(X).
  986. return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock,
  987. TrueCount);
  988. }
  989. // HLSL Change Begins.
  990. if (getLangOpts().HLSL) {
  991. // HLSL don't short circuit.
  992. // Emit the code with the fully general case.
  993. llvm::Value *CondV;
  994. {
  995. ApplyDebugLocation DL(*this, Cond);
  996. CondV = EvaluateExprAsBool(Cond);
  997. }
  998. Builder.CreateCondBr(CondV, TrueBlock, FalseBlock);
  999. return;
  1000. }
  1001. // HLSL Change Ends.
  1002. // Emit the LHS as a conditional. If the LHS conditional is true, we
  1003. // want to jump to the TrueBlock.
  1004. llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
  1005. // We have the count for entry to the RHS and for the whole expression
  1006. // being true, so we can divy up True count between the short circuit and
  1007. // the RHS.
  1008. uint64_t LHSCount =
  1009. getCurrentProfileCount() - getProfileCount(CondBOp->getRHS());
  1010. uint64_t RHSCount = TrueCount - LHSCount;
  1011. ConditionalEvaluation eval(*this);
  1012. {
  1013. ApplyDebugLocation DL(*this, Cond);
  1014. EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse, LHSCount);
  1015. EmitBlock(LHSFalse);
  1016. }
  1017. incrementProfileCounter(CondBOp);
  1018. setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
  1019. // Any temporaries created here are conditional.
  1020. eval.begin(*this);
  1021. EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, RHSCount);
  1022. eval.end(*this);
  1023. return;
  1024. }
  1025. }
  1026. if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
  1027. // br(!x, t, f) -> br(x, f, t)
  1028. if (CondUOp->getOpcode() == UO_LNot) {
  1029. // Negate the count.
  1030. uint64_t FalseCount = getCurrentProfileCount() - TrueCount;
  1031. // Negate the condition and swap the destination blocks.
  1032. return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock,
  1033. FalseCount);
  1034. }
  1035. }
  1036. if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
  1037. // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
  1038. llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
  1039. llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
  1040. ConditionalEvaluation cond(*this);
  1041. EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock,
  1042. getProfileCount(CondOp));
  1043. // When computing PGO branch weights, we only know the overall count for
  1044. // the true block. This code is essentially doing tail duplication of the
  1045. // naive code-gen, introducing new edges for which counts are not
  1046. // available. Divide the counts proportionally between the LHS and RHS of
  1047. // the conditional operator.
  1048. uint64_t LHSScaledTrueCount = 0;
  1049. if (TrueCount) {
  1050. double LHSRatio =
  1051. getProfileCount(CondOp) / (double)getCurrentProfileCount();
  1052. LHSScaledTrueCount = TrueCount * LHSRatio;
  1053. }
  1054. cond.begin(*this);
  1055. EmitBlock(LHSBlock);
  1056. incrementProfileCounter(CondOp);
  1057. {
  1058. ApplyDebugLocation DL(*this, Cond);
  1059. EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock,
  1060. LHSScaledTrueCount);
  1061. }
  1062. cond.end(*this);
  1063. cond.begin(*this);
  1064. EmitBlock(RHSBlock);
  1065. EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock,
  1066. TrueCount - LHSScaledTrueCount);
  1067. cond.end(*this);
  1068. return;
  1069. }
  1070. if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Cond)) {
  1071. // Conditional operator handling can give us a throw expression as a
  1072. // condition for a case like:
  1073. // br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f)
  1074. // Fold this to:
  1075. // br(c, throw x, br(y, t, f))
  1076. EmitCXXThrowExpr(Throw, /*KeepInsertionPoint*/false);
  1077. return;
  1078. }
  1079. // Create branch weights based on the number of times we get here and the
  1080. // number of times the condition should be true.
  1081. uint64_t CurrentCount = std::max(getCurrentProfileCount(), TrueCount);
  1082. llvm::MDNode *Weights =
  1083. createProfileWeights(TrueCount, CurrentCount - TrueCount);
  1084. // Emit the code with the fully general case.
  1085. llvm::Value *CondV;
  1086. {
  1087. ApplyDebugLocation DL(*this, Cond);
  1088. CondV = EvaluateExprAsBool(Cond);
  1089. }
  1090. Builder.CreateCondBr(CondV, TrueBlock, FalseBlock, Weights);
  1091. }
  1092. /// ErrorUnsupported - Print out an error that codegen doesn't support the
  1093. /// specified stmt yet.
  1094. void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) {
  1095. CGM.ErrorUnsupported(S, Type);
  1096. }
  1097. /// emitNonZeroVLAInit - Emit the "zero" initialization of a
  1098. /// variable-length array whose elements have a non-zero bit-pattern.
  1099. ///
  1100. /// \param baseType the inner-most element type of the array
  1101. /// \param src - a char* pointing to the bit-pattern for a single
  1102. /// base element of the array
  1103. /// \param sizeInChars - the total size of the VLA, in chars
  1104. static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
  1105. llvm::Value *dest, llvm::Value *src,
  1106. llvm::Value *sizeInChars) {
  1107. std::pair<CharUnits,CharUnits> baseSizeAndAlign
  1108. = CGF.getContext().getTypeInfoInChars(baseType);
  1109. CGBuilderTy &Builder = CGF.Builder;
  1110. llvm::Value *baseSizeInChars
  1111. = llvm::ConstantInt::get(CGF.IntPtrTy, baseSizeAndAlign.first.getQuantity());
  1112. llvm::Type *i8p = Builder.getInt8PtrTy();
  1113. llvm::Value *begin = Builder.CreateBitCast(dest, i8p, "vla.begin");
  1114. llvm::Value *end = Builder.CreateInBoundsGEP(dest, sizeInChars, "vla.end");
  1115. llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
  1116. llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop");
  1117. llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont");
  1118. // Make a loop over the VLA. C99 guarantees that the VLA element
  1119. // count must be nonzero.
  1120. CGF.EmitBlock(loopBB);
  1121. llvm::PHINode *cur = Builder.CreatePHI(i8p, 2, "vla.cur");
  1122. cur->addIncoming(begin, originBB);
  1123. // memcpy the individual element bit-pattern.
  1124. Builder.CreateMemCpy(cur, src, baseSizeInChars,
  1125. baseSizeAndAlign.second.getQuantity(),
  1126. /*volatile*/ false);
  1127. // Go to the next element.
  1128. llvm::Value *next = Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(),
  1129. cur, 1, "vla.next");
  1130. // Leave if that's the end of the VLA.
  1131. llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone");
  1132. Builder.CreateCondBr(done, contBB, loopBB);
  1133. cur->addIncoming(next, loopBB);
  1134. CGF.EmitBlock(contBB);
  1135. }
  1136. void
  1137. CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
  1138. // Ignore empty classes in C++.
  1139. if (getLangOpts().CPlusPlus) {
  1140. if (const RecordType *RT = Ty->getAs<RecordType>()) {
  1141. if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
  1142. return;
  1143. }
  1144. }
  1145. // Cast the dest ptr to the appropriate i8 pointer type.
  1146. unsigned DestAS =
  1147. cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
  1148. llvm::Type *BP = Builder.getInt8PtrTy(DestAS);
  1149. if (DestPtr->getType() != BP)
  1150. DestPtr = Builder.CreateBitCast(DestPtr, BP);
  1151. // Get size and alignment info for this aggregate.
  1152. std::pair<CharUnits, CharUnits> TypeInfo =
  1153. getContext().getTypeInfoInChars(Ty);
  1154. CharUnits Size = TypeInfo.first;
  1155. CharUnits Align = TypeInfo.second;
  1156. llvm::Value *SizeVal;
  1157. const VariableArrayType *vla;
  1158. // Don't bother emitting a zero-byte memset.
  1159. if (Size.isZero()) {
  1160. // But note that getTypeInfo returns 0 for a VLA.
  1161. if (const VariableArrayType *vlaType =
  1162. dyn_cast_or_null<VariableArrayType>(
  1163. getContext().getAsArrayType(Ty))) {
  1164. QualType eltType;
  1165. llvm::Value *numElts;
  1166. std::tie(numElts, eltType) = getVLASize(vlaType);
  1167. SizeVal = numElts;
  1168. CharUnits eltSize = getContext().getTypeSizeInChars(eltType);
  1169. if (!eltSize.isOne())
  1170. SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize));
  1171. vla = vlaType;
  1172. } else {
  1173. return;
  1174. }
  1175. } else {
  1176. SizeVal = CGM.getSize(Size);
  1177. vla = nullptr;
  1178. }
  1179. // If the type contains a pointer to data member we can't memset it to zero.
  1180. // Instead, create a null constant and copy it to the destination.
  1181. // TODO: there are other patterns besides zero that we can usefully memset,
  1182. // like -1, which happens to be the pattern used by member-pointers.
  1183. if (!CGM.getTypes().isZeroInitializable(Ty)) {
  1184. // For a VLA, emit a single element, then splat that over the VLA.
  1185. if (vla) Ty = getContext().getBaseElementType(vla);
  1186. llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
  1187. llvm::GlobalVariable *NullVariable =
  1188. new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
  1189. /*isConstant=*/true,
  1190. llvm::GlobalVariable::PrivateLinkage,
  1191. NullConstant, Twine());
  1192. llvm::Value *SrcPtr =
  1193. Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy());
  1194. if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal);
  1195. // Get and call the appropriate llvm.memcpy overload.
  1196. Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, Align.getQuantity(), false);
  1197. return;
  1198. }
  1199. // Otherwise, just memset the whole thing to zero. This is legal
  1200. // because in LLVM, all default initializers (other than the ones we just
  1201. // handled above) are guaranteed to have a bit pattern of all zeros.
  1202. Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal,
  1203. Align.getQuantity(), false);
  1204. }
  1205. llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) {
  1206. // Make sure that there is a block for the indirect goto.
  1207. if (!IndirectBranch)
  1208. GetIndirectGotoBlock();
  1209. llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock();
  1210. // Make sure the indirect branch includes all of the address-taken blocks.
  1211. IndirectBranch->addDestination(BB);
  1212. return llvm::BlockAddress::get(CurFn, BB);
  1213. }
  1214. llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
  1215. // If we already made the indirect branch for indirect goto, return its block.
  1216. if (IndirectBranch) return IndirectBranch->getParent();
  1217. CGBuilderTy TmpBuilder(createBasicBlock("indirectgoto"));
  1218. // Create the PHI node that indirect gotos will add entries to.
  1219. llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0,
  1220. "indirect.goto.dest");
  1221. // Create the indirect branch instruction.
  1222. IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
  1223. return IndirectBranch->getParent();
  1224. }
  1225. /// Computes the length of an array in elements, as well as the base
  1226. /// element type and a properly-typed first element pointer.
  1227. llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
  1228. QualType &baseType,
  1229. llvm::Value *&addr) {
  1230. const ArrayType *arrayType = origArrayType;
  1231. // If it's a VLA, we have to load the stored size. Note that
  1232. // this is the size of the VLA in bytes, not its size in elements.
  1233. llvm::Value *numVLAElements = nullptr;
  1234. if (isa<VariableArrayType>(arrayType)) {
  1235. numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).first;
  1236. // Walk into all VLAs. This doesn't require changes to addr,
  1237. // which has type T* where T is the first non-VLA element type.
  1238. do {
  1239. QualType elementType = arrayType->getElementType();
  1240. arrayType = getContext().getAsArrayType(elementType);
  1241. // If we only have VLA components, 'addr' requires no adjustment.
  1242. if (!arrayType) {
  1243. baseType = elementType;
  1244. return numVLAElements;
  1245. }
  1246. } while (isa<VariableArrayType>(arrayType));
  1247. // We get out here only if we find a constant array type
  1248. // inside the VLA.
  1249. }
  1250. // We have some number of constant-length arrays, so addr should
  1251. // have LLVM type [M x [N x [...]]]*. Build a GEP that walks
  1252. // down to the first element of addr.
  1253. SmallVector<llvm::Value*, 8> gepIndices;
  1254. // GEP down to the array type.
  1255. llvm::ConstantInt *zero = Builder.getInt32(0);
  1256. gepIndices.push_back(zero);
  1257. uint64_t countFromCLAs = 1;
  1258. QualType eltType;
  1259. llvm::ArrayType *llvmArrayType =
  1260. dyn_cast<llvm::ArrayType>(
  1261. cast<llvm::PointerType>(addr->getType())->getElementType());
  1262. while (llvmArrayType) {
  1263. assert(isa<ConstantArrayType>(arrayType));
  1264. assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue()
  1265. == llvmArrayType->getNumElements());
  1266. gepIndices.push_back(zero);
  1267. countFromCLAs *= llvmArrayType->getNumElements();
  1268. eltType = arrayType->getElementType();
  1269. llvmArrayType =
  1270. dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType());
  1271. arrayType = getContext().getAsArrayType(arrayType->getElementType());
  1272. assert((!llvmArrayType || arrayType) &&
  1273. "LLVM and Clang types are out-of-synch");
  1274. }
  1275. if (arrayType) {
  1276. // From this point onwards, the Clang array type has been emitted
  1277. // as some other type (probably a packed struct). Compute the array
  1278. // size, and just emit the 'begin' expression as a bitcast.
  1279. while (arrayType) {
  1280. countFromCLAs *=
  1281. cast<ConstantArrayType>(arrayType)->getSize().getZExtValue();
  1282. eltType = arrayType->getElementType();
  1283. arrayType = getContext().getAsArrayType(eltType);
  1284. }
  1285. unsigned AddressSpace = addr->getType()->getPointerAddressSpace();
  1286. llvm::Type *BaseType = ConvertType(eltType)->getPointerTo(AddressSpace);
  1287. addr = Builder.CreateBitCast(addr, BaseType, "array.begin");
  1288. } else {
  1289. // Create the actual GEP.
  1290. addr = Builder.CreateInBoundsGEP(addr, gepIndices, "array.begin");
  1291. }
  1292. baseType = eltType;
  1293. llvm::Value *numElements
  1294. = llvm::ConstantInt::get(SizeTy, countFromCLAs);
  1295. // If we had any VLA dimensions, factor them in.
  1296. if (numVLAElements)
  1297. numElements = Builder.CreateNUWMul(numVLAElements, numElements);
  1298. return numElements;
  1299. }
  1300. std::pair<llvm::Value*, QualType>
  1301. CodeGenFunction::getVLASize(QualType type) {
  1302. const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
  1303. assert(vla && "type was not a variable array type!");
  1304. return getVLASize(vla);
  1305. }
  1306. std::pair<llvm::Value*, QualType>
  1307. CodeGenFunction::getVLASize(const VariableArrayType *type) {
  1308. // The number of elements so far; always size_t.
  1309. llvm::Value *numElements = nullptr;
  1310. QualType elementType;
  1311. do {
  1312. elementType = type->getElementType();
  1313. llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()];
  1314. assert(vlaSize && "no size for VLA!");
  1315. assert(vlaSize->getType() == SizeTy);
  1316. if (!numElements) {
  1317. numElements = vlaSize;
  1318. } else {
  1319. // It's undefined behavior if this wraps around, so mark it that way.
  1320. // FIXME: Teach -fsanitize=undefined to trap this.
  1321. numElements = Builder.CreateNUWMul(numElements, vlaSize);
  1322. }
  1323. } while ((type = getContext().getAsVariableArrayType(elementType)));
  1324. return std::pair<llvm::Value*,QualType>(numElements, elementType);
  1325. }
  1326. void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
  1327. assert(type->isVariablyModifiedType() &&
  1328. "Must pass variably modified type to EmitVLASizes!");
  1329. EnsureInsertPoint();
  1330. // We're going to walk down into the type and look for VLA
  1331. // expressions.
  1332. do {
  1333. assert(type->isVariablyModifiedType());
  1334. const Type *ty = type.getTypePtr();
  1335. switch (ty->getTypeClass()) {
  1336. #define TYPE(Class, Base)
  1337. #define ABSTRACT_TYPE(Class, Base)
  1338. #define NON_CANONICAL_TYPE(Class, Base)
  1339. #define DEPENDENT_TYPE(Class, Base) case Type::Class:
  1340. #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
  1341. #include "clang/AST/TypeNodes.def"
  1342. llvm_unreachable("unexpected dependent type!");
  1343. // These types are never variably-modified.
  1344. case Type::Builtin:
  1345. case Type::Complex:
  1346. case Type::Vector:
  1347. case Type::ExtVector:
  1348. case Type::Record:
  1349. case Type::Enum:
  1350. case Type::Elaborated:
  1351. case Type::TemplateSpecialization:
  1352. case Type::ObjCObject:
  1353. case Type::ObjCInterface:
  1354. case Type::ObjCObjectPointer:
  1355. llvm_unreachable("type class is never variably-modified!");
  1356. case Type::Adjusted:
  1357. type = cast<AdjustedType>(ty)->getAdjustedType();
  1358. break;
  1359. case Type::Decayed:
  1360. type = cast<DecayedType>(ty)->getPointeeType();
  1361. break;
  1362. case Type::Pointer:
  1363. type = cast<PointerType>(ty)->getPointeeType();
  1364. break;
  1365. case Type::BlockPointer:
  1366. type = cast<BlockPointerType>(ty)->getPointeeType();
  1367. break;
  1368. case Type::LValueReference:
  1369. case Type::RValueReference:
  1370. type = cast<ReferenceType>(ty)->getPointeeType();
  1371. break;
  1372. case Type::MemberPointer:
  1373. type = cast<MemberPointerType>(ty)->getPointeeType();
  1374. break;
  1375. case Type::ConstantArray:
  1376. case Type::IncompleteArray:
  1377. // Losing element qualification here is fine.
  1378. type = cast<ArrayType>(ty)->getElementType();
  1379. break;
  1380. case Type::VariableArray: {
  1381. // Losing element qualification here is fine.
  1382. const VariableArrayType *vat = cast<VariableArrayType>(ty);
  1383. // Unknown size indication requires no size computation.
  1384. // Otherwise, evaluate and record it.
  1385. if (const Expr *size = vat->getSizeExpr()) {
  1386. // It's possible that we might have emitted this already,
  1387. // e.g. with a typedef and a pointer to it.
  1388. llvm::Value *&entry = VLASizeMap[size];
  1389. if (!entry) {
  1390. llvm::Value *Size = EmitScalarExpr(size);
  1391. // C11 6.7.6.2p5:
  1392. // If the size is an expression that is not an integer constant
  1393. // expression [...] each time it is evaluated it shall have a value
  1394. // greater than zero.
  1395. if (SanOpts.has(SanitizerKind::VLABound) &&
  1396. size->getType()->isSignedIntegerType()) {
  1397. SanitizerScope SanScope(this);
  1398. llvm::Value *Zero = llvm::Constant::getNullValue(Size->getType());
  1399. llvm::Constant *StaticArgs[] = {
  1400. EmitCheckSourceLocation(size->getLocStart()),
  1401. EmitCheckTypeDescriptor(size->getType())
  1402. };
  1403. EmitCheck(std::make_pair(Builder.CreateICmpSGT(Size, Zero),
  1404. SanitizerKind::VLABound),
  1405. "vla_bound_not_positive", StaticArgs, Size);
  1406. }
  1407. // Always zexting here would be wrong if it weren't
  1408. // undefined behavior to have a negative bound.
  1409. entry = Builder.CreateIntCast(Size, SizeTy, /*signed*/ false);
  1410. }
  1411. }
  1412. type = vat->getElementType();
  1413. break;
  1414. }
  1415. case Type::FunctionProto:
  1416. case Type::FunctionNoProto:
  1417. type = cast<FunctionType>(ty)->getReturnType();
  1418. break;
  1419. case Type::Paren:
  1420. case Type::TypeOf:
  1421. case Type::UnaryTransform:
  1422. case Type::Attributed:
  1423. case Type::SubstTemplateTypeParm:
  1424. case Type::PackExpansion:
  1425. // Keep walking after single level desugaring.
  1426. type = type.getSingleStepDesugaredType(getContext());
  1427. break;
  1428. case Type::Typedef:
  1429. case Type::Decltype:
  1430. case Type::Auto:
  1431. // Stop walking: nothing to do.
  1432. return;
  1433. case Type::TypeOfExpr:
  1434. // Stop walking: emit typeof expression.
  1435. EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr());
  1436. return;
  1437. case Type::Atomic:
  1438. type = cast<AtomicType>(ty)->getValueType();
  1439. break;
  1440. }
  1441. } while (type->isVariablyModifiedType());
  1442. }
  1443. llvm::Value* CodeGenFunction::EmitVAListRef(const Expr* E) {
  1444. if (getContext().getBuiltinVaListType()->isArrayType())
  1445. return EmitScalarExpr(E);
  1446. return EmitLValue(E).getAddress();
  1447. }
  1448. void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E,
  1449. llvm::Constant *Init) {
  1450. assert (Init && "Invalid DeclRefExpr initializer!");
  1451. if (CGDebugInfo *Dbg = getDebugInfo())
  1452. if (CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo)
  1453. Dbg->EmitGlobalVariable(E->getDecl(), Init);
  1454. }
  1455. CodeGenFunction::PeepholeProtection
  1456. CodeGenFunction::protectFromPeepholes(RValue rvalue) {
  1457. // At the moment, the only aggressive peephole we do in IR gen
  1458. // is trunc(zext) folding, but if we add more, we can easily
  1459. // extend this protection.
  1460. if (!rvalue.isScalar()) return PeepholeProtection();
  1461. llvm::Value *value = rvalue.getScalarVal();
  1462. if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection();
  1463. // Just make an extra bitcast.
  1464. assert(HaveInsertPoint());
  1465. llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "",
  1466. Builder.GetInsertBlock());
  1467. PeepholeProtection protection;
  1468. protection.Inst = inst;
  1469. return protection;
  1470. }
  1471. void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) {
  1472. if (!protection.Inst) return;
  1473. // In theory, we could try to duplicate the peepholes now, but whatever.
  1474. protection.Inst->eraseFromParent();
  1475. }
  1476. llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Value *AnnotationFn,
  1477. llvm::Value *AnnotatedVal,
  1478. StringRef AnnotationStr,
  1479. SourceLocation Location) {
  1480. llvm::Value *Args[4] = {
  1481. AnnotatedVal,
  1482. Builder.CreateBitCast(CGM.EmitAnnotationString(AnnotationStr), Int8PtrTy),
  1483. Builder.CreateBitCast(CGM.EmitAnnotationUnit(Location), Int8PtrTy),
  1484. CGM.EmitAnnotationLineNo(Location)
  1485. };
  1486. return Builder.CreateCall(AnnotationFn, Args);
  1487. }
  1488. void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) {
  1489. assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
  1490. // FIXME We create a new bitcast for every annotation because that's what
  1491. // llvm-gcc was doing.
  1492. for (const auto *I : D->specific_attrs<AnnotateAttr>())
  1493. EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation),
  1494. Builder.CreateBitCast(V, CGM.Int8PtrTy, V->getName()),
  1495. I->getAnnotation(), D->getLocation());
  1496. }
  1497. llvm::Value *CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
  1498. llvm::Value *V) {
  1499. assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
  1500. llvm::Type *VTy = V->getType();
  1501. llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation,
  1502. CGM.Int8PtrTy);
  1503. for (const auto *I : D->specific_attrs<AnnotateAttr>()) {
  1504. // FIXME Always emit the cast inst so we can differentiate between
  1505. // annotation on the first field of a struct and annotation on the struct
  1506. // itself.
  1507. if (VTy != CGM.Int8PtrTy)
  1508. V = Builder.Insert(new llvm::BitCastInst(V, CGM.Int8PtrTy));
  1509. V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation());
  1510. V = Builder.CreateBitCast(V, VTy);
  1511. }
  1512. return V;
  1513. }
  1514. CodeGenFunction::CGCapturedStmtInfo::~CGCapturedStmtInfo() { }
  1515. CodeGenFunction::SanitizerScope::SanitizerScope(CodeGenFunction *CGF)
  1516. : CGF(CGF) {
  1517. assert(!CGF->IsSanitizerScope);
  1518. CGF->IsSanitizerScope = true;
  1519. }
  1520. CodeGenFunction::SanitizerScope::~SanitizerScope() {
  1521. CGF->IsSanitizerScope = false;
  1522. }
  1523. void CodeGenFunction::InsertHelper(llvm::Instruction *I,
  1524. const llvm::Twine &Name,
  1525. llvm::BasicBlock *BB,
  1526. llvm::BasicBlock::iterator InsertPt) const {
  1527. LoopStack.InsertHelper(I);
  1528. if (IsSanitizerScope)
  1529. CGM.getSanitizerMetadata()->disableSanitizerForInstruction(I);
  1530. }
  1531. template <bool PreserveNames>
  1532. void CGBuilderInserter<PreserveNames>::InsertHelper(
  1533. llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB,
  1534. llvm::BasicBlock::iterator InsertPt) const {
  1535. llvm::IRBuilderDefaultInserter<PreserveNames>::InsertHelper(I, Name, BB,
  1536. InsertPt);
  1537. if (CGF)
  1538. CGF->InsertHelper(I, Name, BB, InsertPt);
  1539. }
  1540. #ifdef NDEBUG
  1541. #define PreserveNames false
  1542. #else
  1543. #define PreserveNames true
  1544. #endif
  1545. template void CGBuilderInserter<PreserveNames>::InsertHelper(
  1546. llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB,
  1547. llvm::BasicBlock::iterator InsertPt) const;
  1548. #undef PreserveNames