CGAtomic.cpp 72 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788
  1. //===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // This file contains the code for emitting atomic operations.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #include "CodeGenFunction.h"
  14. #include "CGCall.h"
  15. #include "CGRecordLayout.h"
  16. #include "CodeGenModule.h"
  17. #include "clang/AST/ASTContext.h"
  18. #include "clang/CodeGen/CGFunctionInfo.h"
  19. #include "llvm/ADT/StringExtras.h"
  20. #include "llvm/IR/DataLayout.h"
  21. #include "llvm/IR/Intrinsics.h"
  22. #include "llvm/IR/Operator.h"
  23. using namespace clang;
  24. using namespace CodeGen;
  25. namespace {
  26. class AtomicInfo {
  27. CodeGenFunction &CGF;
  28. QualType AtomicTy;
  29. QualType ValueTy;
  30. uint64_t AtomicSizeInBits;
  31. uint64_t ValueSizeInBits;
  32. CharUnits AtomicAlign;
  33. CharUnits ValueAlign;
  34. CharUnits LValueAlign;
  35. TypeEvaluationKind EvaluationKind;
  36. bool UseLibcall;
  37. LValue LVal;
  38. CGBitFieldInfo BFI;
  39. public:
  40. AtomicInfo(CodeGenFunction &CGF, LValue &lvalue)
  41. : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
  42. EvaluationKind(TEK_Scalar), UseLibcall(true) {
  43. assert(!lvalue.isGlobalReg());
  44. ASTContext &C = CGF.getContext();
  45. if (lvalue.isSimple()) {
  46. AtomicTy = lvalue.getType();
  47. if (auto *ATy = AtomicTy->getAs<AtomicType>())
  48. ValueTy = ATy->getValueType();
  49. else
  50. ValueTy = AtomicTy;
  51. EvaluationKind = CGF.getEvaluationKind(ValueTy);
  52. uint64_t ValueAlignInBits;
  53. uint64_t AtomicAlignInBits;
  54. TypeInfo ValueTI = C.getTypeInfo(ValueTy);
  55. ValueSizeInBits = ValueTI.Width;
  56. ValueAlignInBits = ValueTI.Align;
  57. TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
  58. AtomicSizeInBits = AtomicTI.Width;
  59. AtomicAlignInBits = AtomicTI.Align;
  60. assert(ValueSizeInBits <= AtomicSizeInBits);
  61. assert(ValueAlignInBits <= AtomicAlignInBits);
  62. AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
  63. ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
  64. if (lvalue.getAlignment().isZero())
  65. lvalue.setAlignment(AtomicAlign);
  66. LVal = lvalue;
  67. } else if (lvalue.isBitField()) {
  68. ValueTy = lvalue.getType();
  69. ValueSizeInBits = C.getTypeSize(ValueTy);
  70. auto &OrigBFI = lvalue.getBitFieldInfo();
  71. auto Offset = OrigBFI.Offset % C.toBits(lvalue.getAlignment());
  72. AtomicSizeInBits = C.toBits(
  73. C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1)
  74. .RoundUpToAlignment(lvalue.getAlignment()));
  75. auto VoidPtrAddr = CGF.EmitCastToVoidPtr(lvalue.getBitFieldAddr());
  76. auto OffsetInChars =
  77. (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *
  78. lvalue.getAlignment();
  79. VoidPtrAddr = CGF.Builder.CreateConstGEP1_64(
  80. VoidPtrAddr, OffsetInChars.getQuantity());
  81. auto Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  82. VoidPtrAddr,
  83. CGF.Builder.getIntNTy(AtomicSizeInBits)->getPointerTo(),
  84. "atomic_bitfield_base");
  85. BFI = OrigBFI;
  86. BFI.Offset = Offset;
  87. BFI.StorageSize = AtomicSizeInBits;
  88. BFI.StorageOffset += OffsetInChars;
  89. LVal = LValue::MakeBitfield(Addr, BFI, lvalue.getType(),
  90. lvalue.getAlignment());
  91. LVal.setTBAAInfo(lvalue.getTBAAInfo());
  92. AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
  93. if (AtomicTy.isNull()) {
  94. llvm::APInt Size(
  95. /*numBits=*/32,
  96. C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
  97. AtomicTy = C.getConstantArrayType(C.CharTy, Size, ArrayType::Normal,
  98. /*IndexTypeQuals=*/0);
  99. }
  100. AtomicAlign = ValueAlign = lvalue.getAlignment();
  101. } else if (lvalue.isVectorElt()) {
  102. ValueTy = lvalue.getType()->getAs<VectorType>()->getElementType();
  103. ValueSizeInBits = C.getTypeSize(ValueTy);
  104. AtomicTy = lvalue.getType();
  105. AtomicSizeInBits = C.getTypeSize(AtomicTy);
  106. AtomicAlign = ValueAlign = lvalue.getAlignment();
  107. LVal = lvalue;
  108. } else {
  109. assert(lvalue.isExtVectorElt());
  110. ValueTy = lvalue.getType();
  111. ValueSizeInBits = C.getTypeSize(ValueTy);
  112. AtomicTy = ValueTy = CGF.getContext().getExtVectorType(
  113. lvalue.getType(), lvalue.getExtVectorAddr()
  114. ->getType()
  115. ->getPointerElementType()
  116. ->getVectorNumElements());
  117. AtomicSizeInBits = C.getTypeSize(AtomicTy);
  118. AtomicAlign = ValueAlign = lvalue.getAlignment();
  119. LVal = lvalue;
  120. }
  121. UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(
  122. AtomicSizeInBits, C.toBits(lvalue.getAlignment()));
  123. }
  124. QualType getAtomicType() const { return AtomicTy; }
  125. QualType getValueType() const { return ValueTy; }
  126. CharUnits getAtomicAlignment() const { return AtomicAlign; }
  127. CharUnits getValueAlignment() const { return ValueAlign; }
  128. uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
  129. uint64_t getValueSizeInBits() const { return ValueSizeInBits; }
  130. TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
  131. bool shouldUseLibcall() const { return UseLibcall; }
  132. const LValue &getAtomicLValue() const { return LVal; }
  133. llvm::Value *getAtomicAddress() const {
  134. if (LVal.isSimple())
  135. return LVal.getAddress();
  136. else if (LVal.isBitField())
  137. return LVal.getBitFieldAddr();
  138. else if (LVal.isVectorElt())
  139. return LVal.getVectorAddr();
  140. assert(LVal.isExtVectorElt());
  141. return LVal.getExtVectorAddr();
  142. }
  143. /// Is the atomic size larger than the underlying value type?
  144. ///
  145. /// Note that the absence of padding does not mean that atomic
  146. /// objects are completely interchangeable with non-atomic
  147. /// objects: we might have promoted the alignment of a type
  148. /// without making it bigger.
  149. bool hasPadding() const {
  150. return (ValueSizeInBits != AtomicSizeInBits);
  151. }
  152. bool emitMemSetZeroIfNecessary() const;
  153. llvm::Value *getAtomicSizeValue() const {
  154. CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
  155. return CGF.CGM.getSize(size);
  156. }
  157. /// Cast the given pointer to an integer pointer suitable for
  158. /// atomic operations.
  159. llvm::Value *emitCastToAtomicIntPointer(llvm::Value *addr) const;
  160. /// Turn an atomic-layout object into an r-value.
  161. RValue convertTempToRValue(llvm::Value *addr, AggValueSlot resultSlot,
  162. SourceLocation loc, bool AsValue) const;
  163. /// \brief Converts a rvalue to integer value.
  164. llvm::Value *convertRValueToInt(RValue RVal) const;
  165. RValue ConvertIntToValueOrAtomic(llvm::Value *IntVal,
  166. AggValueSlot ResultSlot,
  167. SourceLocation Loc, bool AsValue) const;
  168. /// Copy an atomic r-value into atomic-layout memory.
  169. void emitCopyIntoMemory(RValue rvalue) const;
  170. /// Project an l-value down to the value field.
  171. LValue projectValue() const {
  172. assert(LVal.isSimple());
  173. llvm::Value *addr = getAtomicAddress();
  174. if (hasPadding())
  175. addr = CGF.Builder.CreateStructGEP(nullptr, addr, 0);
  176. return LValue::MakeAddr(addr, getValueType(), LVal.getAlignment(),
  177. CGF.getContext(), LVal.getTBAAInfo());
  178. }
  179. /// \brief Emits atomic load.
  180. /// \returns Loaded value.
  181. RValue EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
  182. bool AsValue, llvm::AtomicOrdering AO,
  183. bool IsVolatile);
  184. /// \brief Emits atomic compare-and-exchange sequence.
  185. /// \param Expected Expected value.
  186. /// \param Desired Desired value.
  187. /// \param Success Atomic ordering for success operation.
  188. /// \param Failure Atomic ordering for failed operation.
  189. /// \param IsWeak true if atomic operation is weak, false otherwise.
  190. /// \returns Pair of values: previous value from storage (value type) and
  191. /// boolean flag (i1 type) with true if success and false otherwise.
  192. std::pair<RValue, llvm::Value *> EmitAtomicCompareExchange(
  193. RValue Expected, RValue Desired,
  194. llvm::AtomicOrdering Success = llvm::SequentiallyConsistent,
  195. llvm::AtomicOrdering Failure = llvm::SequentiallyConsistent,
  196. bool IsWeak = false);
  197. /// \brief Emits atomic update.
  198. /// \param AO Atomic ordering.
  199. /// \param UpdateOp Update operation for the current lvalue.
  200. void EmitAtomicUpdate(llvm::AtomicOrdering AO,
  201. const llvm::function_ref<RValue(RValue)> &UpdateOp,
  202. bool IsVolatile);
  203. /// \brief Emits atomic update.
  204. /// \param AO Atomic ordering.
  205. void EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
  206. bool IsVolatile);
  207. /// Materialize an atomic r-value in atomic-layout memory.
  208. llvm::Value *materializeRValue(RValue rvalue) const;
  209. /// \brief Translates LLVM atomic ordering to GNU atomic ordering for
  210. /// libcalls.
  211. static AtomicExpr::AtomicOrderingKind
  212. translateAtomicOrdering(const llvm::AtomicOrdering AO);
  213. private:
  214. bool requiresMemSetZero(llvm::Type *type) const;
  215. /// \brief Creates temp alloca for intermediate operations on atomic value.
  216. llvm::Value *CreateTempAlloca() const;
  217. /// \brief Emits atomic load as a libcall.
  218. void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
  219. llvm::AtomicOrdering AO, bool IsVolatile);
  220. /// \brief Emits atomic load as LLVM instruction.
  221. llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO, bool IsVolatile);
  222. /// \brief Emits atomic compare-and-exchange op as a libcall.
  223. llvm::Value *EmitAtomicCompareExchangeLibcall(
  224. llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr,
  225. llvm::AtomicOrdering Success = llvm::SequentiallyConsistent,
  226. llvm::AtomicOrdering Failure = llvm::SequentiallyConsistent);
  227. /// \brief Emits atomic compare-and-exchange op as LLVM instruction.
  228. std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
  229. llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
  230. llvm::AtomicOrdering Success = llvm::SequentiallyConsistent,
  231. llvm::AtomicOrdering Failure = llvm::SequentiallyConsistent,
  232. bool IsWeak = false);
  233. /// \brief Emit atomic update as libcalls.
  234. void
  235. EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
  236. const llvm::function_ref<RValue(RValue)> &UpdateOp,
  237. bool IsVolatile);
  238. /// \brief Emit atomic update as LLVM instructions.
  239. void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
  240. const llvm::function_ref<RValue(RValue)> &UpdateOp,
  241. bool IsVolatile);
  242. /// \brief Emit atomic update as libcalls.
  243. void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, RValue UpdateRVal,
  244. bool IsVolatile);
  245. /// \brief Emit atomic update as LLVM instructions.
  246. void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRal,
  247. bool IsVolatile);
  248. };
  249. }
  250. AtomicExpr::AtomicOrderingKind
  251. AtomicInfo::translateAtomicOrdering(const llvm::AtomicOrdering AO) {
  252. switch (AO) {
  253. case llvm::Unordered:
  254. case llvm::NotAtomic:
  255. case llvm::Monotonic:
  256. return AtomicExpr::AO_ABI_memory_order_relaxed;
  257. case llvm::Acquire:
  258. return AtomicExpr::AO_ABI_memory_order_acquire;
  259. case llvm::Release:
  260. return AtomicExpr::AO_ABI_memory_order_release;
  261. case llvm::AcquireRelease:
  262. return AtomicExpr::AO_ABI_memory_order_acq_rel;
  263. case llvm::SequentiallyConsistent:
  264. return AtomicExpr::AO_ABI_memory_order_seq_cst;
  265. }
  266. llvm_unreachable("Unhandled AtomicOrdering");
  267. }
  268. llvm::Value *AtomicInfo::CreateTempAlloca() const {
  269. auto *TempAlloca = CGF.CreateMemTemp(
  270. (LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy
  271. : AtomicTy,
  272. "atomic-temp");
  273. TempAlloca->setAlignment(getAtomicAlignment().getQuantity());
  274. // Cast to pointer to value type for bitfields.
  275. if (LVal.isBitField())
  276. return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  277. TempAlloca, getAtomicAddress()->getType());
  278. return TempAlloca;
  279. }
  280. static RValue emitAtomicLibcall(CodeGenFunction &CGF,
  281. StringRef fnName,
  282. QualType resultType,
  283. CallArgList &args) {
  284. const CGFunctionInfo &fnInfo =
  285. CGF.CGM.getTypes().arrangeFreeFunctionCall(resultType, args,
  286. FunctionType::ExtInfo(), RequiredArgs::All);
  287. llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
  288. llvm::Constant *fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName);
  289. return CGF.EmitCall(fnInfo, fn, ReturnValueSlot(), args);
  290. }
  291. /// Does a store of the given IR type modify the full expected width?
  292. static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
  293. uint64_t expectedSize) {
  294. return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
  295. }
  296. /// Does the atomic type require memsetting to zero before initialization?
  297. ///
  298. /// The IR type is provided as a way of making certain queries faster.
  299. bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
  300. // If the atomic type has size padding, we definitely need a memset.
  301. if (hasPadding()) return true;
  302. // Otherwise, do some simple heuristics to try to avoid it:
  303. switch (getEvaluationKind()) {
  304. // For scalars and complexes, check whether the store size of the
  305. // type uses the full size.
  306. case TEK_Scalar:
  307. return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
  308. case TEK_Complex:
  309. return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
  310. AtomicSizeInBits / 2);
  311. // Padding in structs has an undefined bit pattern. User beware.
  312. case TEK_Aggregate:
  313. return false;
  314. }
  315. llvm_unreachable("bad evaluation kind");
  316. }
  317. bool AtomicInfo::emitMemSetZeroIfNecessary() const {
  318. assert(LVal.isSimple());
  319. llvm::Value *addr = LVal.getAddress();
  320. if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
  321. return false;
  322. CGF.Builder.CreateMemSet(
  323. addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
  324. CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(),
  325. LVal.getAlignment().getQuantity());
  326. return true;
  327. }
  328. static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
  329. llvm::Value *Dest, llvm::Value *Ptr,
  330. llvm::Value *Val1, llvm::Value *Val2,
  331. uint64_t Size, unsigned Align,
  332. llvm::AtomicOrdering SuccessOrder,
  333. llvm::AtomicOrdering FailureOrder) {
  334. // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
  335. llvm::LoadInst *Expected = CGF.Builder.CreateLoad(Val1);
  336. Expected->setAlignment(Align);
  337. llvm::LoadInst *Desired = CGF.Builder.CreateLoad(Val2);
  338. Desired->setAlignment(Align);
  339. llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
  340. Ptr, Expected, Desired, SuccessOrder, FailureOrder);
  341. Pair->setVolatile(E->isVolatile());
  342. Pair->setWeak(IsWeak);
  343. // Cmp holds the result of the compare-exchange operation: true on success,
  344. // false on failure.
  345. llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0);
  346. llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1);
  347. // This basic block is used to hold the store instruction if the operation
  348. // failed.
  349. llvm::BasicBlock *StoreExpectedBB =
  350. CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
  351. // This basic block is the exit point of the operation, we should end up
  352. // here regardless of whether or not the operation succeeded.
  353. llvm::BasicBlock *ContinueBB =
  354. CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
  355. // Update Expected if Expected isn't equal to Old, otherwise branch to the
  356. // exit point.
  357. CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
  358. CGF.Builder.SetInsertPoint(StoreExpectedBB);
  359. // Update the memory at Expected with Old's value.
  360. llvm::StoreInst *StoreExpected = CGF.Builder.CreateStore(Old, Val1);
  361. StoreExpected->setAlignment(Align);
  362. // Finally, branch to the exit point.
  363. CGF.Builder.CreateBr(ContinueBB);
  364. CGF.Builder.SetInsertPoint(ContinueBB);
  365. // Update the memory at Dest with Cmp's value.
  366. CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
  367. return;
  368. }
  369. /// Given an ordering required on success, emit all possible cmpxchg
  370. /// instructions to cope with the provided (but possibly only dynamically known)
  371. /// FailureOrder.
  372. static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
  373. bool IsWeak, llvm::Value *Dest,
  374. llvm::Value *Ptr, llvm::Value *Val1,
  375. llvm::Value *Val2,
  376. llvm::Value *FailureOrderVal,
  377. uint64_t Size, unsigned Align,
  378. llvm::AtomicOrdering SuccessOrder) {
  379. llvm::AtomicOrdering FailureOrder;
  380. if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
  381. switch (FO->getSExtValue()) {
  382. default:
  383. FailureOrder = llvm::Monotonic;
  384. break;
  385. case AtomicExpr::AO_ABI_memory_order_consume:
  386. case AtomicExpr::AO_ABI_memory_order_acquire:
  387. FailureOrder = llvm::Acquire;
  388. break;
  389. case AtomicExpr::AO_ABI_memory_order_seq_cst:
  390. FailureOrder = llvm::SequentiallyConsistent;
  391. break;
  392. }
  393. if (FailureOrder >= SuccessOrder) {
  394. // Don't assert on undefined behaviour.
  395. FailureOrder =
  396. llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
  397. }
  398. emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, Align,
  399. SuccessOrder, FailureOrder);
  400. return;
  401. }
  402. // Create all the relevant BB's
  403. llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
  404. *SeqCstBB = nullptr;
  405. MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
  406. if (SuccessOrder != llvm::Monotonic && SuccessOrder != llvm::Release)
  407. AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
  408. if (SuccessOrder == llvm::SequentiallyConsistent)
  409. SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
  410. llvm::BasicBlock *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
  411. llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
  412. // Emit all the different atomics
  413. // MonotonicBB is arbitrarily chosen as the default case; in practice, this
  414. // doesn't matter unless someone is crazy enough to use something that
  415. // doesn't fold to a constant for the ordering.
  416. CGF.Builder.SetInsertPoint(MonotonicBB);
  417. emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
  418. Size, Align, SuccessOrder, llvm::Monotonic);
  419. CGF.Builder.CreateBr(ContBB);
  420. if (AcquireBB) {
  421. CGF.Builder.SetInsertPoint(AcquireBB);
  422. emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
  423. Size, Align, SuccessOrder, llvm::Acquire);
  424. CGF.Builder.CreateBr(ContBB);
  425. SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
  426. AcquireBB);
  427. SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire),
  428. AcquireBB);
  429. }
  430. if (SeqCstBB) {
  431. CGF.Builder.SetInsertPoint(SeqCstBB);
  432. emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
  433. Size, Align, SuccessOrder, llvm::SequentiallyConsistent);
  434. CGF.Builder.CreateBr(ContBB);
  435. SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
  436. SeqCstBB);
  437. }
  438. CGF.Builder.SetInsertPoint(ContBB);
  439. }
  440. static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
  441. llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
  442. llvm::Value *IsWeak, llvm::Value *FailureOrder,
  443. uint64_t Size, unsigned Align,
  444. llvm::AtomicOrdering Order) {
  445. llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
  446. llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
  447. switch (E->getOp()) {
  448. case AtomicExpr::AO__c11_atomic_init:
  449. llvm_unreachable("Already handled!");
  450. case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
  451. emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
  452. FailureOrder, Size, Align, Order);
  453. return;
  454. case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
  455. emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
  456. FailureOrder, Size, Align, Order);
  457. return;
  458. case AtomicExpr::AO__atomic_compare_exchange:
  459. case AtomicExpr::AO__atomic_compare_exchange_n: {
  460. if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
  461. emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
  462. Val1, Val2, FailureOrder, Size, Align, Order);
  463. } else {
  464. // Create all the relevant BB's
  465. llvm::BasicBlock *StrongBB =
  466. CGF.createBasicBlock("cmpxchg.strong", CGF.CurFn);
  467. llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak", CGF.CurFn);
  468. llvm::BasicBlock *ContBB =
  469. CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
  470. llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB);
  471. SI->addCase(CGF.Builder.getInt1(false), StrongBB);
  472. CGF.Builder.SetInsertPoint(StrongBB);
  473. emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
  474. FailureOrder, Size, Align, Order);
  475. CGF.Builder.CreateBr(ContBB);
  476. CGF.Builder.SetInsertPoint(WeakBB);
  477. emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
  478. FailureOrder, Size, Align, Order);
  479. CGF.Builder.CreateBr(ContBB);
  480. CGF.Builder.SetInsertPoint(ContBB);
  481. }
  482. return;
  483. }
  484. case AtomicExpr::AO__c11_atomic_load:
  485. case AtomicExpr::AO__atomic_load_n:
  486. case AtomicExpr::AO__atomic_load: {
  487. llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
  488. Load->setAtomic(Order);
  489. Load->setAlignment(Size);
  490. Load->setVolatile(E->isVolatile());
  491. llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest);
  492. StoreDest->setAlignment(Align);
  493. return;
  494. }
  495. case AtomicExpr::AO__c11_atomic_store:
  496. case AtomicExpr::AO__atomic_store:
  497. case AtomicExpr::AO__atomic_store_n: {
  498. assert(!Dest && "Store does not return a value");
  499. llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
  500. LoadVal1->setAlignment(Align);
  501. llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
  502. Store->setAtomic(Order);
  503. Store->setAlignment(Size);
  504. Store->setVolatile(E->isVolatile());
  505. return;
  506. }
  507. case AtomicExpr::AO__c11_atomic_exchange:
  508. case AtomicExpr::AO__atomic_exchange_n:
  509. case AtomicExpr::AO__atomic_exchange:
  510. Op = llvm::AtomicRMWInst::Xchg;
  511. break;
  512. case AtomicExpr::AO__atomic_add_fetch:
  513. PostOp = llvm::Instruction::Add;
  514. // Fall through.
  515. case AtomicExpr::AO__c11_atomic_fetch_add:
  516. case AtomicExpr::AO__atomic_fetch_add:
  517. Op = llvm::AtomicRMWInst::Add;
  518. break;
  519. case AtomicExpr::AO__atomic_sub_fetch:
  520. PostOp = llvm::Instruction::Sub;
  521. // Fall through.
  522. case AtomicExpr::AO__c11_atomic_fetch_sub:
  523. case AtomicExpr::AO__atomic_fetch_sub:
  524. Op = llvm::AtomicRMWInst::Sub;
  525. break;
  526. case AtomicExpr::AO__atomic_and_fetch:
  527. PostOp = llvm::Instruction::And;
  528. // Fall through.
  529. case AtomicExpr::AO__c11_atomic_fetch_and:
  530. case AtomicExpr::AO__atomic_fetch_and:
  531. Op = llvm::AtomicRMWInst::And;
  532. break;
  533. case AtomicExpr::AO__atomic_or_fetch:
  534. PostOp = llvm::Instruction::Or;
  535. // Fall through.
  536. case AtomicExpr::AO__c11_atomic_fetch_or:
  537. case AtomicExpr::AO__atomic_fetch_or:
  538. Op = llvm::AtomicRMWInst::Or;
  539. break;
  540. case AtomicExpr::AO__atomic_xor_fetch:
  541. PostOp = llvm::Instruction::Xor;
  542. // Fall through.
  543. case AtomicExpr::AO__c11_atomic_fetch_xor:
  544. case AtomicExpr::AO__atomic_fetch_xor:
  545. Op = llvm::AtomicRMWInst::Xor;
  546. break;
  547. case AtomicExpr::AO__atomic_nand_fetch:
  548. PostOp = llvm::Instruction::And;
  549. // Fall through.
  550. case AtomicExpr::AO__atomic_fetch_nand:
  551. Op = llvm::AtomicRMWInst::Nand;
  552. break;
  553. }
  554. llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
  555. LoadVal1->setAlignment(Align);
  556. llvm::AtomicRMWInst *RMWI =
  557. CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order);
  558. RMWI->setVolatile(E->isVolatile());
  559. // For __atomic_*_fetch operations, perform the operation again to
  560. // determine the value which was written.
  561. llvm::Value *Result = RMWI;
  562. if (PostOp)
  563. Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
  564. if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
  565. Result = CGF.Builder.CreateNot(Result);
  566. llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest);
  567. StoreDest->setAlignment(Align);
  568. }
  569. // This function emits any expression (scalar, complex, or aggregate)
  570. // into a temporary alloca.
  571. static llvm::Value *
  572. EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
  573. llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
  574. CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
  575. /*Init*/ true);
  576. return DeclPtr;
  577. }
  578. static void
  579. AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
  580. bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
  581. SourceLocation Loc, CharUnits SizeInChars) {
  582. if (UseOptimizedLibcall) {
  583. // Load value and pass it to the function directly.
  584. unsigned Align = CGF.getContext().getTypeAlignInChars(ValTy).getQuantity();
  585. int64_t SizeInBits = CGF.getContext().toBits(SizeInChars);
  586. ValTy =
  587. CGF.getContext().getIntTypeForBitwidth(SizeInBits, /*Signed=*/false);
  588. llvm::Type *IPtrTy = llvm::IntegerType::get(CGF.getLLVMContext(),
  589. SizeInBits)->getPointerTo();
  590. Val = CGF.EmitLoadOfScalar(CGF.Builder.CreateBitCast(Val, IPtrTy), false,
  591. Align, CGF.getContext().getPointerType(ValTy),
  592. Loc);
  593. // Coerce the value into an appropriately sized integer type.
  594. Args.add(RValue::get(Val), ValTy);
  595. } else {
  596. // Non-optimized functions always take a reference.
  597. Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
  598. CGF.getContext().VoidPtrTy);
  599. }
  600. }
  601. RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
  602. QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
  603. QualType MemTy = AtomicTy;
  604. if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
  605. MemTy = AT->getValueType();
  606. CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
  607. uint64_t Size = sizeChars.getQuantity();
  608. CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
  609. unsigned Align = alignChars.getQuantity();
  610. unsigned MaxInlineWidthInBits =
  611. getTarget().getMaxAtomicInlineWidth();
  612. bool UseLibcall = (Size != Align ||
  613. getContext().toBits(sizeChars) > MaxInlineWidthInBits);
  614. llvm::Value *IsWeak = nullptr, *OrderFail = nullptr, *Val1 = nullptr,
  615. *Val2 = nullptr;
  616. llvm::Value *Ptr = EmitScalarExpr(E->getPtr());
  617. if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
  618. assert(!Dest && "Init does not return a value");
  619. LValue lvalue = LValue::MakeAddr(Ptr, AtomicTy, alignChars, getContext());
  620. EmitAtomicInit(E->getVal1(), lvalue);
  621. return RValue::get(nullptr);
  622. }
  623. llvm::Value *Order = EmitScalarExpr(E->getOrder());
  624. switch (E->getOp()) {
  625. case AtomicExpr::AO__c11_atomic_init:
  626. llvm_unreachable("Already handled!");
  627. case AtomicExpr::AO__c11_atomic_load:
  628. case AtomicExpr::AO__atomic_load_n:
  629. break;
  630. case AtomicExpr::AO__atomic_load:
  631. Dest = EmitScalarExpr(E->getVal1());
  632. break;
  633. case AtomicExpr::AO__atomic_store:
  634. Val1 = EmitScalarExpr(E->getVal1());
  635. break;
  636. case AtomicExpr::AO__atomic_exchange:
  637. Val1 = EmitScalarExpr(E->getVal1());
  638. Dest = EmitScalarExpr(E->getVal2());
  639. break;
  640. case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
  641. case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
  642. case AtomicExpr::AO__atomic_compare_exchange_n:
  643. case AtomicExpr::AO__atomic_compare_exchange:
  644. Val1 = EmitScalarExpr(E->getVal1());
  645. if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
  646. Val2 = EmitScalarExpr(E->getVal2());
  647. else
  648. Val2 = EmitValToTemp(*this, E->getVal2());
  649. OrderFail = EmitScalarExpr(E->getOrderFail());
  650. if (E->getNumSubExprs() == 6)
  651. IsWeak = EmitScalarExpr(E->getWeak());
  652. break;
  653. case AtomicExpr::AO__c11_atomic_fetch_add:
  654. case AtomicExpr::AO__c11_atomic_fetch_sub:
  655. if (MemTy->isPointerType()) {
  656. // For pointer arithmetic, we're required to do a bit of math:
  657. // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
  658. // ... but only for the C11 builtins. The GNU builtins expect the
  659. // user to multiply by sizeof(T).
  660. QualType Val1Ty = E->getVal1()->getType();
  661. llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
  662. CharUnits PointeeIncAmt =
  663. getContext().getTypeSizeInChars(MemTy->getPointeeType());
  664. Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
  665. Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
  666. EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
  667. break;
  668. }
  669. // Fall through.
  670. case AtomicExpr::AO__atomic_fetch_add:
  671. case AtomicExpr::AO__atomic_fetch_sub:
  672. case AtomicExpr::AO__atomic_add_fetch:
  673. case AtomicExpr::AO__atomic_sub_fetch:
  674. case AtomicExpr::AO__c11_atomic_store:
  675. case AtomicExpr::AO__c11_atomic_exchange:
  676. case AtomicExpr::AO__atomic_store_n:
  677. case AtomicExpr::AO__atomic_exchange_n:
  678. case AtomicExpr::AO__c11_atomic_fetch_and:
  679. case AtomicExpr::AO__c11_atomic_fetch_or:
  680. case AtomicExpr::AO__c11_atomic_fetch_xor:
  681. case AtomicExpr::AO__atomic_fetch_and:
  682. case AtomicExpr::AO__atomic_fetch_or:
  683. case AtomicExpr::AO__atomic_fetch_xor:
  684. case AtomicExpr::AO__atomic_fetch_nand:
  685. case AtomicExpr::AO__atomic_and_fetch:
  686. case AtomicExpr::AO__atomic_or_fetch:
  687. case AtomicExpr::AO__atomic_xor_fetch:
  688. case AtomicExpr::AO__atomic_nand_fetch:
  689. Val1 = EmitValToTemp(*this, E->getVal1());
  690. break;
  691. }
  692. QualType RValTy = E->getType().getUnqualifiedType();
  693. auto GetDest = [&] {
  694. if (!RValTy->isVoidType() && !Dest) {
  695. Dest = CreateMemTemp(RValTy, ".atomicdst");
  696. }
  697. return Dest;
  698. };
  699. // Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
  700. if (UseLibcall) {
  701. bool UseOptimizedLibcall = false;
  702. switch (E->getOp()) {
  703. case AtomicExpr::AO__c11_atomic_fetch_add:
  704. case AtomicExpr::AO__atomic_fetch_add:
  705. case AtomicExpr::AO__c11_atomic_fetch_and:
  706. case AtomicExpr::AO__atomic_fetch_and:
  707. case AtomicExpr::AO__c11_atomic_fetch_or:
  708. case AtomicExpr::AO__atomic_fetch_or:
  709. case AtomicExpr::AO__c11_atomic_fetch_sub:
  710. case AtomicExpr::AO__atomic_fetch_sub:
  711. case AtomicExpr::AO__c11_atomic_fetch_xor:
  712. case AtomicExpr::AO__atomic_fetch_xor:
  713. // For these, only library calls for certain sizes exist.
  714. UseOptimizedLibcall = true;
  715. break;
  716. default:
  717. // Only use optimized library calls for sizes for which they exist.
  718. if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
  719. UseOptimizedLibcall = true;
  720. break;
  721. }
  722. CallArgList Args;
  723. if (!UseOptimizedLibcall) {
  724. // For non-optimized library calls, the size is the first parameter
  725. Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
  726. getContext().getSizeType());
  727. }
  728. // Atomic address is the first or second parameter
  729. Args.add(RValue::get(EmitCastToVoidPtr(Ptr)), getContext().VoidPtrTy);
  730. std::string LibCallName;
  731. QualType LoweredMemTy =
  732. MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy;
  733. QualType RetTy;
  734. bool HaveRetTy = false;
  735. switch (E->getOp()) {
  736. // There is only one libcall for compare an exchange, because there is no
  737. // optimisation benefit possible from a libcall version of a weak compare
  738. // and exchange.
  739. // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
  740. // void *desired, int success, int failure)
  741. // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
  742. // int success, int failure)
  743. case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
  744. case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
  745. case AtomicExpr::AO__atomic_compare_exchange:
  746. case AtomicExpr::AO__atomic_compare_exchange_n:
  747. LibCallName = "__atomic_compare_exchange";
  748. RetTy = getContext().BoolTy;
  749. HaveRetTy = true;
  750. Args.add(RValue::get(EmitCastToVoidPtr(Val1)), getContext().VoidPtrTy);
  751. AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2, MemTy,
  752. E->getExprLoc(), sizeChars);
  753. Args.add(RValue::get(Order), getContext().IntTy);
  754. Order = OrderFail;
  755. break;
  756. // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
  757. // int order)
  758. // T __atomic_exchange_N(T *mem, T val, int order)
  759. case AtomicExpr::AO__c11_atomic_exchange:
  760. case AtomicExpr::AO__atomic_exchange_n:
  761. case AtomicExpr::AO__atomic_exchange:
  762. LibCallName = "__atomic_exchange";
  763. AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
  764. E->getExprLoc(), sizeChars);
  765. break;
  766. // void __atomic_store(size_t size, void *mem, void *val, int order)
  767. // void __atomic_store_N(T *mem, T val, int order)
  768. case AtomicExpr::AO__c11_atomic_store:
  769. case AtomicExpr::AO__atomic_store:
  770. case AtomicExpr::AO__atomic_store_n:
  771. LibCallName = "__atomic_store";
  772. RetTy = getContext().VoidTy;
  773. HaveRetTy = true;
  774. AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
  775. E->getExprLoc(), sizeChars);
  776. break;
  777. // void __atomic_load(size_t size, void *mem, void *return, int order)
  778. // T __atomic_load_N(T *mem, int order)
  779. case AtomicExpr::AO__c11_atomic_load:
  780. case AtomicExpr::AO__atomic_load:
  781. case AtomicExpr::AO__atomic_load_n:
  782. LibCallName = "__atomic_load";
  783. break;
  784. // T __atomic_fetch_add_N(T *mem, T val, int order)
  785. case AtomicExpr::AO__c11_atomic_fetch_add:
  786. case AtomicExpr::AO__atomic_fetch_add:
  787. LibCallName = "__atomic_fetch_add";
  788. AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
  789. E->getExprLoc(), sizeChars);
  790. break;
  791. // T __atomic_fetch_and_N(T *mem, T val, int order)
  792. case AtomicExpr::AO__c11_atomic_fetch_and:
  793. case AtomicExpr::AO__atomic_fetch_and:
  794. LibCallName = "__atomic_fetch_and";
  795. AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
  796. E->getExprLoc(), sizeChars);
  797. break;
  798. // T __atomic_fetch_or_N(T *mem, T val, int order)
  799. case AtomicExpr::AO__c11_atomic_fetch_or:
  800. case AtomicExpr::AO__atomic_fetch_or:
  801. LibCallName = "__atomic_fetch_or";
  802. AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
  803. E->getExprLoc(), sizeChars);
  804. break;
  805. // T __atomic_fetch_sub_N(T *mem, T val, int order)
  806. case AtomicExpr::AO__c11_atomic_fetch_sub:
  807. case AtomicExpr::AO__atomic_fetch_sub:
  808. LibCallName = "__atomic_fetch_sub";
  809. AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
  810. E->getExprLoc(), sizeChars);
  811. break;
  812. // T __atomic_fetch_xor_N(T *mem, T val, int order)
  813. case AtomicExpr::AO__c11_atomic_fetch_xor:
  814. case AtomicExpr::AO__atomic_fetch_xor:
  815. LibCallName = "__atomic_fetch_xor";
  816. AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
  817. E->getExprLoc(), sizeChars);
  818. break;
  819. default: return EmitUnsupportedRValue(E, "atomic library call");
  820. }
  821. // Optimized functions have the size in their name.
  822. if (UseOptimizedLibcall)
  823. LibCallName += "_" + llvm::utostr(Size);
  824. // By default, assume we return a value of the atomic type.
  825. if (!HaveRetTy) {
  826. if (UseOptimizedLibcall) {
  827. // Value is returned directly.
  828. // The function returns an appropriately sized integer type.
  829. RetTy = getContext().getIntTypeForBitwidth(
  830. getContext().toBits(sizeChars), /*Signed=*/false);
  831. } else {
  832. // Value is returned through parameter before the order.
  833. RetTy = getContext().VoidTy;
  834. Args.add(RValue::get(EmitCastToVoidPtr(Dest)), getContext().VoidPtrTy);
  835. }
  836. }
  837. // order is always the last parameter
  838. Args.add(RValue::get(Order),
  839. getContext().IntTy);
  840. RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args);
  841. // The value is returned directly from the libcall.
  842. if (HaveRetTy && !RetTy->isVoidType())
  843. return Res;
  844. // The value is returned via an explicit out param.
  845. if (RetTy->isVoidType())
  846. return RValue::get(nullptr);
  847. // The value is returned directly for optimized libcalls but the caller is
  848. // expected an out-param.
  849. if (UseOptimizedLibcall) {
  850. llvm::Value *ResVal = Res.getScalarVal();
  851. llvm::StoreInst *StoreDest = Builder.CreateStore(
  852. ResVal,
  853. Builder.CreateBitCast(GetDest(), ResVal->getType()->getPointerTo()));
  854. StoreDest->setAlignment(Align);
  855. }
  856. return convertTempToRValue(Dest, RValTy, E->getExprLoc());
  857. }
  858. bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
  859. E->getOp() == AtomicExpr::AO__atomic_store ||
  860. E->getOp() == AtomicExpr::AO__atomic_store_n;
  861. bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
  862. E->getOp() == AtomicExpr::AO__atomic_load ||
  863. E->getOp() == AtomicExpr::AO__atomic_load_n;
  864. llvm::Type *ITy =
  865. llvm::IntegerType::get(getLLVMContext(), Size * 8);
  866. llvm::Value *OrigDest = GetDest();
  867. Ptr = Builder.CreateBitCast(
  868. Ptr, ITy->getPointerTo(Ptr->getType()->getPointerAddressSpace()));
  869. if (Val1) Val1 = Builder.CreateBitCast(Val1, ITy->getPointerTo());
  870. if (Val2) Val2 = Builder.CreateBitCast(Val2, ITy->getPointerTo());
  871. if (Dest && !E->isCmpXChg())
  872. Dest = Builder.CreateBitCast(Dest, ITy->getPointerTo());
  873. if (isa<llvm::ConstantInt>(Order)) {
  874. int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
  875. switch (ord) {
  876. case AtomicExpr::AO_ABI_memory_order_relaxed:
  877. EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
  878. Size, Align, llvm::Monotonic);
  879. break;
  880. case AtomicExpr::AO_ABI_memory_order_consume:
  881. case AtomicExpr::AO_ABI_memory_order_acquire:
  882. if (IsStore)
  883. break; // Avoid crashing on code with undefined behavior
  884. EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
  885. Size, Align, llvm::Acquire);
  886. break;
  887. case AtomicExpr::AO_ABI_memory_order_release:
  888. if (IsLoad)
  889. break; // Avoid crashing on code with undefined behavior
  890. EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
  891. Size, Align, llvm::Release);
  892. break;
  893. case AtomicExpr::AO_ABI_memory_order_acq_rel:
  894. if (IsLoad || IsStore)
  895. break; // Avoid crashing on code with undefined behavior
  896. EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
  897. Size, Align, llvm::AcquireRelease);
  898. break;
  899. case AtomicExpr::AO_ABI_memory_order_seq_cst:
  900. EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
  901. Size, Align, llvm::SequentiallyConsistent);
  902. break;
  903. default: // invalid order
  904. // We should not ever get here normally, but it's hard to
  905. // enforce that in general.
  906. break;
  907. }
  908. if (RValTy->isVoidType())
  909. return RValue::get(nullptr);
  910. return convertTempToRValue(OrigDest, RValTy, E->getExprLoc());
  911. }
  912. // Long case, when Order isn't obviously constant.
  913. // Create all the relevant BB's
  914. llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
  915. *ReleaseBB = nullptr, *AcqRelBB = nullptr,
  916. *SeqCstBB = nullptr;
  917. MonotonicBB = createBasicBlock("monotonic", CurFn);
  918. if (!IsStore)
  919. AcquireBB = createBasicBlock("acquire", CurFn);
  920. if (!IsLoad)
  921. ReleaseBB = createBasicBlock("release", CurFn);
  922. if (!IsLoad && !IsStore)
  923. AcqRelBB = createBasicBlock("acqrel", CurFn);
  924. SeqCstBB = createBasicBlock("seqcst", CurFn);
  925. llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
  926. // Create the switch for the split
  927. // MonotonicBB is arbitrarily chosen as the default case; in practice, this
  928. // doesn't matter unless someone is crazy enough to use something that
  929. // doesn't fold to a constant for the ordering.
  930. Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
  931. llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
  932. // Emit all the different atomics
  933. Builder.SetInsertPoint(MonotonicBB);
  934. EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
  935. Size, Align, llvm::Monotonic);
  936. Builder.CreateBr(ContBB);
  937. if (!IsStore) {
  938. Builder.SetInsertPoint(AcquireBB);
  939. EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
  940. Size, Align, llvm::Acquire);
  941. Builder.CreateBr(ContBB);
  942. SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
  943. AcquireBB);
  944. SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire),
  945. AcquireBB);
  946. }
  947. if (!IsLoad) {
  948. Builder.SetInsertPoint(ReleaseBB);
  949. EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
  950. Size, Align, llvm::Release);
  951. Builder.CreateBr(ContBB);
  952. SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_release),
  953. ReleaseBB);
  954. }
  955. if (!IsLoad && !IsStore) {
  956. Builder.SetInsertPoint(AcqRelBB);
  957. EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
  958. Size, Align, llvm::AcquireRelease);
  959. Builder.CreateBr(ContBB);
  960. SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acq_rel),
  961. AcqRelBB);
  962. }
  963. Builder.SetInsertPoint(SeqCstBB);
  964. EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
  965. Size, Align, llvm::SequentiallyConsistent);
  966. Builder.CreateBr(ContBB);
  967. SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
  968. SeqCstBB);
  969. // Cleanup and return
  970. Builder.SetInsertPoint(ContBB);
  971. if (RValTy->isVoidType())
  972. return RValue::get(nullptr);
  973. return convertTempToRValue(OrigDest, RValTy, E->getExprLoc());
  974. }
  975. llvm::Value *AtomicInfo::emitCastToAtomicIntPointer(llvm::Value *addr) const {
  976. unsigned addrspace =
  977. cast<llvm::PointerType>(addr->getType())->getAddressSpace();
  978. llvm::IntegerType *ty =
  979. llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
  980. return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
  981. }
  982. RValue AtomicInfo::convertTempToRValue(llvm::Value *addr,
  983. AggValueSlot resultSlot,
  984. SourceLocation loc, bool AsValue) const {
  985. if (LVal.isSimple()) {
  986. if (EvaluationKind == TEK_Aggregate)
  987. return resultSlot.asRValue();
  988. // Drill into the padding structure if we have one.
  989. if (hasPadding())
  990. addr = CGF.Builder.CreateStructGEP(nullptr, addr, 0);
  991. // Otherwise, just convert the temporary to an r-value using the
  992. // normal conversion routine.
  993. return CGF.convertTempToRValue(addr, getValueType(), loc);
  994. }
  995. if (!AsValue)
  996. // Get RValue from temp memory as atomic for non-simple lvalues
  997. return RValue::get(
  998. CGF.Builder.CreateAlignedLoad(addr, AtomicAlign.getQuantity()));
  999. if (LVal.isBitField())
  1000. return CGF.EmitLoadOfBitfieldLValue(LValue::MakeBitfield(
  1001. addr, LVal.getBitFieldInfo(), LVal.getType(), LVal.getAlignment()));
  1002. if (LVal.isVectorElt())
  1003. return CGF.EmitLoadOfLValue(LValue::MakeVectorElt(addr, LVal.getVectorIdx(),
  1004. LVal.getType(),
  1005. LVal.getAlignment()),
  1006. loc);
  1007. assert(LVal.isExtVectorElt());
  1008. return CGF.EmitLoadOfExtVectorElementLValue(LValue::MakeExtVectorElt(
  1009. addr, LVal.getExtVectorElts(), LVal.getType(), LVal.getAlignment()));
  1010. }
  1011. RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal,
  1012. AggValueSlot ResultSlot,
  1013. SourceLocation Loc,
  1014. bool AsValue) const {
  1015. // Try not to in some easy cases.
  1016. assert(IntVal->getType()->isIntegerTy() && "Expected integer value");
  1017. if (getEvaluationKind() == TEK_Scalar &&
  1018. (((!LVal.isBitField() ||
  1019. LVal.getBitFieldInfo().Size == ValueSizeInBits) &&
  1020. !hasPadding()) ||
  1021. !AsValue)) {
  1022. auto *ValTy = AsValue
  1023. ? CGF.ConvertTypeForMem(ValueTy)
  1024. : getAtomicAddress()->getType()->getPointerElementType();
  1025. if (ValTy->isIntegerTy()) {
  1026. assert(IntVal->getType() == ValTy && "Different integer types.");
  1027. return RValue::get(CGF.EmitFromMemory(IntVal, ValueTy));
  1028. } else if (ValTy->isPointerTy())
  1029. return RValue::get(CGF.Builder.CreateIntToPtr(IntVal, ValTy));
  1030. else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy))
  1031. return RValue::get(CGF.Builder.CreateBitCast(IntVal, ValTy));
  1032. }
  1033. // Create a temporary. This needs to be big enough to hold the
  1034. // atomic integer.
  1035. llvm::Value *Temp;
  1036. bool TempIsVolatile = false;
  1037. CharUnits TempAlignment;
  1038. if (AsValue && getEvaluationKind() == TEK_Aggregate) {
  1039. assert(!ResultSlot.isIgnored());
  1040. Temp = ResultSlot.getAddr();
  1041. TempAlignment = getValueAlignment();
  1042. TempIsVolatile = ResultSlot.isVolatile();
  1043. } else {
  1044. Temp = CreateTempAlloca();
  1045. TempAlignment = getAtomicAlignment();
  1046. }
  1047. // Slam the integer into the temporary.
  1048. llvm::Value *CastTemp = emitCastToAtomicIntPointer(Temp);
  1049. CGF.Builder.CreateAlignedStore(IntVal, CastTemp, TempAlignment.getQuantity())
  1050. ->setVolatile(TempIsVolatile);
  1051. return convertTempToRValue(Temp, ResultSlot, Loc, AsValue);
  1052. }
  1053. void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
  1054. llvm::AtomicOrdering AO, bool) {
  1055. // void __atomic_load(size_t size, void *mem, void *return, int order);
  1056. CallArgList Args;
  1057. Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
  1058. Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicAddress())),
  1059. CGF.getContext().VoidPtrTy);
  1060. Args.add(RValue::get(CGF.EmitCastToVoidPtr(AddForLoaded)),
  1061. CGF.getContext().VoidPtrTy);
  1062. Args.add(RValue::get(
  1063. llvm::ConstantInt::get(CGF.IntTy, translateAtomicOrdering(AO))),
  1064. CGF.getContext().IntTy);
  1065. emitAtomicLibcall(CGF, "__atomic_load", CGF.getContext().VoidTy, Args);
  1066. }
  1067. llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
  1068. bool IsVolatile) {
  1069. // Okay, we're doing this natively.
  1070. llvm::Value *Addr = emitCastToAtomicIntPointer(getAtomicAddress());
  1071. llvm::LoadInst *Load = CGF.Builder.CreateLoad(Addr, "atomic-load");
  1072. Load->setAtomic(AO);
  1073. // Other decoration.
  1074. Load->setAlignment(getAtomicAlignment().getQuantity());
  1075. if (IsVolatile)
  1076. Load->setVolatile(true);
  1077. if (LVal.getTBAAInfo())
  1078. CGF.CGM.DecorateInstruction(Load, LVal.getTBAAInfo());
  1079. return Load;
  1080. }
  1081. /// An LValue is a candidate for having its loads and stores be made atomic if
  1082. /// we are operating under /volatile:ms *and* the LValue itself is volatile and
  1083. /// performing such an operation can be performed without a libcall.
  1084. bool CodeGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) {
  1085. AtomicInfo AI(*this, LV);
  1086. bool IsVolatile = LV.isVolatile() || hasVolatileMember(LV.getType());
  1087. // An atomic is inline if we don't need to use a libcall.
  1088. bool AtomicIsInline = !AI.shouldUseLibcall();
  1089. return CGM.getCodeGenOpts().MSVolatile && IsVolatile && AtomicIsInline;
  1090. }
  1091. /// An type is a candidate for having its loads and stores be made atomic if
  1092. /// we are operating under /volatile:ms *and* we know the access is volatile and
  1093. /// performing such an operation can be performed without a libcall.
  1094. bool CodeGenFunction::typeIsSuitableForInlineAtomic(QualType Ty,
  1095. bool IsVolatile) const {
  1096. // An atomic is inline if we don't need to use a libcall (e.g. it is builtin).
  1097. bool AtomicIsInline = getContext().getTargetInfo().hasBuiltinAtomic(
  1098. getContext().getTypeSize(Ty), getContext().getTypeAlign(Ty));
  1099. return CGM.getCodeGenOpts().MSVolatile && IsVolatile && AtomicIsInline;
  1100. }
  1101. RValue CodeGenFunction::EmitAtomicLoad(LValue LV, SourceLocation SL,
  1102. AggValueSlot Slot) {
  1103. llvm::AtomicOrdering AO;
  1104. bool IsVolatile = LV.isVolatileQualified();
  1105. if (LV.getType()->isAtomicType()) {
  1106. AO = llvm::SequentiallyConsistent;
  1107. } else {
  1108. AO = llvm::Acquire;
  1109. IsVolatile = true;
  1110. }
  1111. return EmitAtomicLoad(LV, SL, AO, IsVolatile, Slot);
  1112. }
  1113. RValue AtomicInfo::EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
  1114. bool AsValue, llvm::AtomicOrdering AO,
  1115. bool IsVolatile) {
  1116. // Check whether we should use a library call.
  1117. if (shouldUseLibcall()) {
  1118. llvm::Value *TempAddr;
  1119. if (LVal.isSimple() && !ResultSlot.isIgnored()) {
  1120. assert(getEvaluationKind() == TEK_Aggregate);
  1121. TempAddr = ResultSlot.getAddr();
  1122. } else
  1123. TempAddr = CreateTempAlloca();
  1124. EmitAtomicLoadLibcall(TempAddr, AO, IsVolatile);
  1125. // Okay, turn that back into the original value or whole atomic (for
  1126. // non-simple lvalues) type.
  1127. return convertTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
  1128. }
  1129. // Okay, we're doing this natively.
  1130. auto *Load = EmitAtomicLoadOp(AO, IsVolatile);
  1131. // If we're ignoring an aggregate return, don't do anything.
  1132. if (getEvaluationKind() == TEK_Aggregate && ResultSlot.isIgnored())
  1133. return RValue::getAggregate(nullptr, false);
  1134. // Okay, turn that back into the original value or atomic (for non-simple
  1135. // lvalues) type.
  1136. return ConvertIntToValueOrAtomic(Load, ResultSlot, Loc, AsValue);
  1137. }
  1138. /// Emit a load from an l-value of atomic type. Note that the r-value
  1139. /// we produce is an r-value of the atomic *value* type.
  1140. RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
  1141. llvm::AtomicOrdering AO, bool IsVolatile,
  1142. AggValueSlot resultSlot) {
  1143. AtomicInfo Atomics(*this, src);
  1144. return Atomics.EmitAtomicLoad(resultSlot, loc, /*AsValue=*/true, AO,
  1145. IsVolatile);
  1146. }
  1147. /// Copy an r-value into memory as part of storing to an atomic type.
  1148. /// This needs to create a bit-pattern suitable for atomic operations.
  1149. void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
  1150. assert(LVal.isSimple());
  1151. // If we have an r-value, the rvalue should be of the atomic type,
  1152. // which means that the caller is responsible for having zeroed
  1153. // any padding. Just do an aggregate copy of that type.
  1154. if (rvalue.isAggregate()) {
  1155. CGF.EmitAggregateCopy(getAtomicAddress(),
  1156. rvalue.getAggregateAddr(),
  1157. getAtomicType(),
  1158. (rvalue.isVolatileQualified()
  1159. || LVal.isVolatileQualified()),
  1160. LVal.getAlignment());
  1161. return;
  1162. }
  1163. // Okay, otherwise we're copying stuff.
  1164. // Zero out the buffer if necessary.
  1165. emitMemSetZeroIfNecessary();
  1166. // Drill past the padding if present.
  1167. LValue TempLVal = projectValue();
  1168. // Okay, store the rvalue in.
  1169. if (rvalue.isScalar()) {
  1170. CGF.EmitStoreOfScalar(rvalue.getScalarVal(), TempLVal, /*init*/ true);
  1171. } else {
  1172. CGF.EmitStoreOfComplex(rvalue.getComplexVal(), TempLVal, /*init*/ true);
  1173. }
  1174. }
  1175. /// Materialize an r-value into memory for the purposes of storing it
  1176. /// to an atomic type.
  1177. llvm::Value *AtomicInfo::materializeRValue(RValue rvalue) const {
  1178. // Aggregate r-values are already in memory, and EmitAtomicStore
  1179. // requires them to be values of the atomic type.
  1180. if (rvalue.isAggregate())
  1181. return rvalue.getAggregateAddr();
  1182. // Otherwise, make a temporary and materialize into it.
  1183. LValue TempLV = CGF.MakeAddrLValue(CreateTempAlloca(), getAtomicType(),
  1184. getAtomicAlignment());
  1185. AtomicInfo Atomics(CGF, TempLV);
  1186. Atomics.emitCopyIntoMemory(rvalue);
  1187. return TempLV.getAddress();
  1188. }
  1189. llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const {
  1190. // If we've got a scalar value of the right size, try to avoid going
  1191. // through memory.
  1192. if (RVal.isScalar() && (!hasPadding() || !LVal.isSimple())) {
  1193. llvm::Value *Value = RVal.getScalarVal();
  1194. if (isa<llvm::IntegerType>(Value->getType()))
  1195. return CGF.EmitToMemory(Value, ValueTy);
  1196. else {
  1197. llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
  1198. CGF.getLLVMContext(),
  1199. LVal.isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());
  1200. if (isa<llvm::PointerType>(Value->getType()))
  1201. return CGF.Builder.CreatePtrToInt(Value, InputIntTy);
  1202. else if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
  1203. return CGF.Builder.CreateBitCast(Value, InputIntTy);
  1204. }
  1205. }
  1206. // Otherwise, we need to go through memory.
  1207. // Put the r-value in memory.
  1208. llvm::Value *Addr = materializeRValue(RVal);
  1209. // Cast the temporary to the atomic int type and pull a value out.
  1210. Addr = emitCastToAtomicIntPointer(Addr);
  1211. return CGF.Builder.CreateAlignedLoad(Addr,
  1212. getAtomicAlignment().getQuantity());
  1213. }
  1214. std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
  1215. llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
  1216. llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak) {
  1217. // Do the atomic store.
  1218. auto *Addr = emitCastToAtomicIntPointer(getAtomicAddress());
  1219. auto *Inst = CGF.Builder.CreateAtomicCmpXchg(Addr, ExpectedVal, DesiredVal,
  1220. Success, Failure);
  1221. // Other decoration.
  1222. Inst->setVolatile(LVal.isVolatileQualified());
  1223. Inst->setWeak(IsWeak);
  1224. // Okay, turn that back into the original value type.
  1225. auto *PreviousVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/0);
  1226. auto *SuccessFailureVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/1);
  1227. return std::make_pair(PreviousVal, SuccessFailureVal);
  1228. }
  1229. llvm::Value *
  1230. AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,
  1231. llvm::Value *DesiredAddr,
  1232. llvm::AtomicOrdering Success,
  1233. llvm::AtomicOrdering Failure) {
  1234. // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
  1235. // void *desired, int success, int failure);
  1236. CallArgList Args;
  1237. Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
  1238. Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicAddress())),
  1239. CGF.getContext().VoidPtrTy);
  1240. Args.add(RValue::get(CGF.EmitCastToVoidPtr(ExpectedAddr)),
  1241. CGF.getContext().VoidPtrTy);
  1242. Args.add(RValue::get(CGF.EmitCastToVoidPtr(DesiredAddr)),
  1243. CGF.getContext().VoidPtrTy);
  1244. Args.add(RValue::get(llvm::ConstantInt::get(
  1245. CGF.IntTy, translateAtomicOrdering(Success))),
  1246. CGF.getContext().IntTy);
  1247. Args.add(RValue::get(llvm::ConstantInt::get(
  1248. CGF.IntTy, translateAtomicOrdering(Failure))),
  1249. CGF.getContext().IntTy);
  1250. auto SuccessFailureRVal = emitAtomicLibcall(CGF, "__atomic_compare_exchange",
  1251. CGF.getContext().BoolTy, Args);
  1252. return SuccessFailureRVal.getScalarVal();
  1253. }
  1254. std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
  1255. RValue Expected, RValue Desired, llvm::AtomicOrdering Success,
  1256. llvm::AtomicOrdering Failure, bool IsWeak) {
  1257. if (Failure >= Success)
  1258. // Don't assert on undefined behavior.
  1259. Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success);
  1260. // Check whether we should use a library call.
  1261. if (shouldUseLibcall()) {
  1262. // Produce a source address.
  1263. auto *ExpectedAddr = materializeRValue(Expected);
  1264. auto *DesiredAddr = materializeRValue(Desired);
  1265. auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr, DesiredAddr,
  1266. Success, Failure);
  1267. return std::make_pair(
  1268. convertTempToRValue(ExpectedAddr, AggValueSlot::ignored(),
  1269. SourceLocation(), /*AsValue=*/false),
  1270. Res);
  1271. }
  1272. // If we've got a scalar value of the right size, try to avoid going
  1273. // through memory.
  1274. auto *ExpectedVal = convertRValueToInt(Expected);
  1275. auto *DesiredVal = convertRValueToInt(Desired);
  1276. auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success,
  1277. Failure, IsWeak);
  1278. return std::make_pair(
  1279. ConvertIntToValueOrAtomic(Res.first, AggValueSlot::ignored(),
  1280. SourceLocation(), /*AsValue=*/false),
  1281. Res.second);
  1282. }
  1283. static void
  1284. EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal,
  1285. const llvm::function_ref<RValue(RValue)> &UpdateOp,
  1286. llvm::Value *DesiredAddr) {
  1287. llvm::Value *Ptr = nullptr;
  1288. LValue UpdateLVal;
  1289. RValue UpRVal;
  1290. LValue AtomicLVal = Atomics.getAtomicLValue();
  1291. LValue DesiredLVal;
  1292. if (AtomicLVal.isSimple()) {
  1293. UpRVal = OldRVal;
  1294. DesiredLVal =
  1295. LValue::MakeAddr(DesiredAddr, AtomicLVal.getType(),
  1296. AtomicLVal.getAlignment(), CGF.CGM.getContext());
  1297. } else {
  1298. // Build new lvalue for temp address
  1299. Ptr = Atomics.materializeRValue(OldRVal);
  1300. if (AtomicLVal.isBitField()) {
  1301. UpdateLVal =
  1302. LValue::MakeBitfield(Ptr, AtomicLVal.getBitFieldInfo(),
  1303. AtomicLVal.getType(), AtomicLVal.getAlignment());
  1304. DesiredLVal =
  1305. LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
  1306. AtomicLVal.getType(), AtomicLVal.getAlignment());
  1307. } else if (AtomicLVal.isVectorElt()) {
  1308. UpdateLVal = LValue::MakeVectorElt(Ptr, AtomicLVal.getVectorIdx(),
  1309. AtomicLVal.getType(),
  1310. AtomicLVal.getAlignment());
  1311. DesiredLVal = LValue::MakeVectorElt(
  1312. DesiredAddr, AtomicLVal.getVectorIdx(), AtomicLVal.getType(),
  1313. AtomicLVal.getAlignment());
  1314. } else {
  1315. assert(AtomicLVal.isExtVectorElt());
  1316. UpdateLVal = LValue::MakeExtVectorElt(Ptr, AtomicLVal.getExtVectorElts(),
  1317. AtomicLVal.getType(),
  1318. AtomicLVal.getAlignment());
  1319. DesiredLVal = LValue::MakeExtVectorElt(
  1320. DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
  1321. AtomicLVal.getAlignment());
  1322. }
  1323. UpdateLVal.setTBAAInfo(AtomicLVal.getTBAAInfo());
  1324. DesiredLVal.setTBAAInfo(AtomicLVal.getTBAAInfo());
  1325. UpRVal = CGF.EmitLoadOfLValue(UpdateLVal, SourceLocation());
  1326. }
  1327. // Store new value in the corresponding memory area
  1328. RValue NewRVal = UpdateOp(UpRVal);
  1329. if (NewRVal.isScalar()) {
  1330. CGF.EmitStoreThroughLValue(NewRVal, DesiredLVal);
  1331. } else {
  1332. assert(NewRVal.isComplex());
  1333. CGF.EmitStoreOfComplex(NewRVal.getComplexVal(), DesiredLVal,
  1334. /*isInit=*/false);
  1335. }
  1336. }
  1337. void AtomicInfo::EmitAtomicUpdateLibcall(
  1338. llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
  1339. bool IsVolatile) {
  1340. auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
  1341. llvm::Value *ExpectedAddr = CreateTempAlloca();
  1342. EmitAtomicLoadLibcall(ExpectedAddr, AO, IsVolatile);
  1343. auto *ContBB = CGF.createBasicBlock("atomic_cont");
  1344. auto *ExitBB = CGF.createBasicBlock("atomic_exit");
  1345. CGF.EmitBlock(ContBB);
  1346. auto *DesiredAddr = CreateTempAlloca();
  1347. if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
  1348. requiresMemSetZero(
  1349. getAtomicAddress()->getType()->getPointerElementType())) {
  1350. auto *OldVal = CGF.Builder.CreateAlignedLoad(
  1351. ExpectedAddr, getAtomicAlignment().getQuantity());
  1352. CGF.Builder.CreateAlignedStore(OldVal, DesiredAddr,
  1353. getAtomicAlignment().getQuantity());
  1354. }
  1355. auto OldRVal = convertTempToRValue(ExpectedAddr, AggValueSlot::ignored(),
  1356. SourceLocation(), /*AsValue=*/false);
  1357. EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, DesiredAddr);
  1358. auto *Res =
  1359. EmitAtomicCompareExchangeLibcall(ExpectedAddr, DesiredAddr, AO, Failure);
  1360. CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
  1361. CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
  1362. }
  1363. void AtomicInfo::EmitAtomicUpdateOp(
  1364. llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
  1365. bool IsVolatile) {
  1366. auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
  1367. // Do the atomic load.
  1368. auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
  1369. // For non-simple lvalues perform compare-and-swap procedure.
  1370. auto *ContBB = CGF.createBasicBlock("atomic_cont");
  1371. auto *ExitBB = CGF.createBasicBlock("atomic_exit");
  1372. auto *CurBB = CGF.Builder.GetInsertBlock();
  1373. CGF.EmitBlock(ContBB);
  1374. llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
  1375. /*NumReservedValues=*/2);
  1376. PHI->addIncoming(OldVal, CurBB);
  1377. auto *NewAtomicAddr = CreateTempAlloca();
  1378. auto *NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
  1379. if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
  1380. requiresMemSetZero(
  1381. getAtomicAddress()->getType()->getPointerElementType())) {
  1382. CGF.Builder.CreateAlignedStore(PHI, NewAtomicIntAddr,
  1383. getAtomicAlignment().getQuantity());
  1384. }
  1385. auto OldRVal = ConvertIntToValueOrAtomic(PHI, AggValueSlot::ignored(),
  1386. SourceLocation(), /*AsValue=*/false);
  1387. EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, NewAtomicAddr);
  1388. auto *DesiredVal = CGF.Builder.CreateAlignedLoad(
  1389. NewAtomicIntAddr, getAtomicAlignment().getQuantity());
  1390. // Try to write new value using cmpxchg operation
  1391. auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
  1392. PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
  1393. CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
  1394. CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
  1395. }
  1396. static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics,
  1397. RValue UpdateRVal, llvm::Value *DesiredAddr) {
  1398. LValue AtomicLVal = Atomics.getAtomicLValue();
  1399. LValue DesiredLVal;
  1400. // Build new lvalue for temp address
  1401. if (AtomicLVal.isBitField()) {
  1402. DesiredLVal =
  1403. LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
  1404. AtomicLVal.getType(), AtomicLVal.getAlignment());
  1405. } else if (AtomicLVal.isVectorElt()) {
  1406. DesiredLVal =
  1407. LValue::MakeVectorElt(DesiredAddr, AtomicLVal.getVectorIdx(),
  1408. AtomicLVal.getType(), AtomicLVal.getAlignment());
  1409. } else {
  1410. assert(AtomicLVal.isExtVectorElt());
  1411. DesiredLVal = LValue::MakeExtVectorElt(
  1412. DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
  1413. AtomicLVal.getAlignment());
  1414. }
  1415. DesiredLVal.setTBAAInfo(AtomicLVal.getTBAAInfo());
  1416. // Store new value in the corresponding memory area
  1417. assert(UpdateRVal.isScalar());
  1418. CGF.EmitStoreThroughLValue(UpdateRVal, DesiredLVal);
  1419. }
  1420. void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
  1421. RValue UpdateRVal, bool IsVolatile) {
  1422. auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
  1423. llvm::Value *ExpectedAddr = CreateTempAlloca();
  1424. EmitAtomicLoadLibcall(ExpectedAddr, AO, IsVolatile);
  1425. auto *ContBB = CGF.createBasicBlock("atomic_cont");
  1426. auto *ExitBB = CGF.createBasicBlock("atomic_exit");
  1427. CGF.EmitBlock(ContBB);
  1428. auto *DesiredAddr = CreateTempAlloca();
  1429. if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
  1430. requiresMemSetZero(
  1431. getAtomicAddress()->getType()->getPointerElementType())) {
  1432. auto *OldVal = CGF.Builder.CreateAlignedLoad(
  1433. ExpectedAddr, getAtomicAlignment().getQuantity());
  1434. CGF.Builder.CreateAlignedStore(OldVal, DesiredAddr,
  1435. getAtomicAlignment().getQuantity());
  1436. }
  1437. EmitAtomicUpdateValue(CGF, *this, UpdateRVal, DesiredAddr);
  1438. auto *Res =
  1439. EmitAtomicCompareExchangeLibcall(ExpectedAddr, DesiredAddr, AO, Failure);
  1440. CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
  1441. CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
  1442. }
  1443. void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal,
  1444. bool IsVolatile) {
  1445. auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
  1446. // Do the atomic load.
  1447. auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
  1448. // For non-simple lvalues perform compare-and-swap procedure.
  1449. auto *ContBB = CGF.createBasicBlock("atomic_cont");
  1450. auto *ExitBB = CGF.createBasicBlock("atomic_exit");
  1451. auto *CurBB = CGF.Builder.GetInsertBlock();
  1452. CGF.EmitBlock(ContBB);
  1453. llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
  1454. /*NumReservedValues=*/2);
  1455. PHI->addIncoming(OldVal, CurBB);
  1456. auto *NewAtomicAddr = CreateTempAlloca();
  1457. auto *NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
  1458. if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
  1459. requiresMemSetZero(
  1460. getAtomicAddress()->getType()->getPointerElementType())) {
  1461. CGF.Builder.CreateAlignedStore(PHI, NewAtomicIntAddr,
  1462. getAtomicAlignment().getQuantity());
  1463. }
  1464. EmitAtomicUpdateValue(CGF, *this, UpdateRVal, NewAtomicAddr);
  1465. auto *DesiredVal = CGF.Builder.CreateAlignedLoad(
  1466. NewAtomicIntAddr, getAtomicAlignment().getQuantity());
  1467. // Try to write new value using cmpxchg operation
  1468. auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
  1469. PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
  1470. CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
  1471. CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
  1472. }
  1473. void AtomicInfo::EmitAtomicUpdate(
  1474. llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
  1475. bool IsVolatile) {
  1476. if (shouldUseLibcall()) {
  1477. EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);
  1478. } else {
  1479. EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);
  1480. }
  1481. }
  1482. void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
  1483. bool IsVolatile) {
  1484. if (shouldUseLibcall()) {
  1485. EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);
  1486. } else {
  1487. EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);
  1488. }
  1489. }
  1490. void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue lvalue,
  1491. bool isInit) {
  1492. bool IsVolatile = lvalue.isVolatileQualified();
  1493. llvm::AtomicOrdering AO;
  1494. if (lvalue.getType()->isAtomicType()) {
  1495. AO = llvm::SequentiallyConsistent;
  1496. } else {
  1497. AO = llvm::Release;
  1498. IsVolatile = true;
  1499. }
  1500. return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit);
  1501. }
  1502. /// Emit a store to an l-value of atomic type.
  1503. ///
  1504. /// Note that the r-value is expected to be an r-value *of the atomic
  1505. /// type*; this means that for aggregate r-values, it should include
  1506. /// storage for any padding that was necessary.
  1507. void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
  1508. llvm::AtomicOrdering AO, bool IsVolatile,
  1509. bool isInit) {
  1510. // If this is an aggregate r-value, it should agree in type except
  1511. // maybe for address-space qualification.
  1512. assert(!rvalue.isAggregate() ||
  1513. rvalue.getAggregateAddr()->getType()->getPointerElementType()
  1514. == dest.getAddress()->getType()->getPointerElementType());
  1515. AtomicInfo atomics(*this, dest);
  1516. LValue LVal = atomics.getAtomicLValue();
  1517. // If this is an initialization, just put the value there normally.
  1518. if (LVal.isSimple()) {
  1519. if (isInit) {
  1520. atomics.emitCopyIntoMemory(rvalue);
  1521. return;
  1522. }
  1523. // Check whether we should use a library call.
  1524. if (atomics.shouldUseLibcall()) {
  1525. // Produce a source address.
  1526. llvm::Value *srcAddr = atomics.materializeRValue(rvalue);
  1527. // void __atomic_store(size_t size, void *mem, void *val, int order)
  1528. CallArgList args;
  1529. args.add(RValue::get(atomics.getAtomicSizeValue()),
  1530. getContext().getSizeType());
  1531. args.add(RValue::get(EmitCastToVoidPtr(atomics.getAtomicAddress())),
  1532. getContext().VoidPtrTy);
  1533. args.add(RValue::get(EmitCastToVoidPtr(srcAddr)), getContext().VoidPtrTy);
  1534. args.add(RValue::get(llvm::ConstantInt::get(
  1535. IntTy, AtomicInfo::translateAtomicOrdering(AO))),
  1536. getContext().IntTy);
  1537. emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
  1538. return;
  1539. }
  1540. // Okay, we're doing this natively.
  1541. llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
  1542. // Do the atomic store.
  1543. llvm::Value *addr =
  1544. atomics.emitCastToAtomicIntPointer(atomics.getAtomicAddress());
  1545. intValue = Builder.CreateIntCast(
  1546. intValue, addr->getType()->getPointerElementType(), /*isSigned=*/false);
  1547. llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
  1548. // Initializations don't need to be atomic.
  1549. if (!isInit)
  1550. store->setAtomic(AO);
  1551. // Other decoration.
  1552. store->setAlignment(dest.getAlignment().getQuantity());
  1553. if (IsVolatile)
  1554. store->setVolatile(true);
  1555. if (dest.getTBAAInfo())
  1556. CGM.DecorateInstruction(store, dest.getTBAAInfo());
  1557. return;
  1558. }
  1559. // Emit simple atomic update operation.
  1560. atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);
  1561. }
  1562. /// Emit a compare-and-exchange op for atomic type.
  1563. ///
  1564. std::pair<RValue, llvm::Value *> CodeGenFunction::EmitAtomicCompareExchange(
  1565. LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
  1566. llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak,
  1567. AggValueSlot Slot) {
  1568. // If this is an aggregate r-value, it should agree in type except
  1569. // maybe for address-space qualification.
  1570. assert(!Expected.isAggregate() ||
  1571. Expected.getAggregateAddr()->getType()->getPointerElementType() ==
  1572. Obj.getAddress()->getType()->getPointerElementType());
  1573. assert(!Desired.isAggregate() ||
  1574. Desired.getAggregateAddr()->getType()->getPointerElementType() ==
  1575. Obj.getAddress()->getType()->getPointerElementType());
  1576. AtomicInfo Atomics(*this, Obj);
  1577. return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure,
  1578. IsWeak);
  1579. }
  1580. void CodeGenFunction::EmitAtomicUpdate(
  1581. LValue LVal, llvm::AtomicOrdering AO,
  1582. const llvm::function_ref<RValue(RValue)> &UpdateOp, bool IsVolatile) {
  1583. AtomicInfo Atomics(*this, LVal);
  1584. Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);
  1585. }
  1586. void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
  1587. AtomicInfo atomics(*this, dest);
  1588. switch (atomics.getEvaluationKind()) {
  1589. case TEK_Scalar: {
  1590. llvm::Value *value = EmitScalarExpr(init);
  1591. atomics.emitCopyIntoMemory(RValue::get(value));
  1592. return;
  1593. }
  1594. case TEK_Complex: {
  1595. ComplexPairTy value = EmitComplexExpr(init);
  1596. atomics.emitCopyIntoMemory(RValue::getComplex(value));
  1597. return;
  1598. }
  1599. case TEK_Aggregate: {
  1600. // Fix up the destination if the initializer isn't an expression
  1601. // of atomic type.
  1602. bool Zeroed = false;
  1603. if (!init->getType()->isAtomicType()) {
  1604. Zeroed = atomics.emitMemSetZeroIfNecessary();
  1605. dest = atomics.projectValue();
  1606. }
  1607. // Evaluate the expression directly into the destination.
  1608. AggValueSlot slot = AggValueSlot::forLValue(dest,
  1609. AggValueSlot::IsNotDestructed,
  1610. AggValueSlot::DoesNotNeedGCBarriers,
  1611. AggValueSlot::IsNotAliased,
  1612. Zeroed ? AggValueSlot::IsZeroed :
  1613. AggValueSlot::IsNotZeroed);
  1614. EmitAggExpr(init, slot);
  1615. return;
  1616. }
  1617. }
  1618. llvm_unreachable("bad evaluation kind");
  1619. }