AtomicExpandPass.cpp 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564
  1. //===-- AtomicExpandPass.cpp - Expand atomic instructions -------===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // This file contains a pass (at IR level) to replace atomic instructions with
  11. // either (intrinsic-based) load-linked/store-conditional loops or AtomicCmpXchg.
  12. //
  13. //===----------------------------------------------------------------------===//
  14. #include "llvm/CodeGen/Passes.h"
  15. #include "llvm/IR/Function.h"
  16. #include "llvm/IR/IRBuilder.h"
  17. #include "llvm/IR/InstIterator.h"
  18. #include "llvm/IR/Instructions.h"
  19. #include "llvm/IR/Intrinsics.h"
  20. #include "llvm/IR/Module.h"
  21. #include "llvm/Support/Debug.h"
  22. #include "llvm/Target/TargetLowering.h"
  23. #include "llvm/Target/TargetMachine.h"
  24. #include "llvm/Target/TargetSubtargetInfo.h"
  25. using namespace llvm;
  26. #define DEBUG_TYPE "atomic-expand"
  27. namespace {
  28. class AtomicExpand: public FunctionPass {
  29. const TargetMachine *TM;
  30. const TargetLowering *TLI;
  31. public:
  32. static char ID; // Pass identification, replacement for typeid
  33. explicit AtomicExpand(const TargetMachine *TM = nullptr)
  34. : FunctionPass(ID), TM(TM), TLI(nullptr) {
  35. initializeAtomicExpandPass(*PassRegistry::getPassRegistry());
  36. }
  37. bool runOnFunction(Function &F) override;
  38. private:
  39. bool bracketInstWithFences(Instruction *I, AtomicOrdering Order,
  40. bool IsStore, bool IsLoad);
  41. bool expandAtomicLoad(LoadInst *LI);
  42. bool expandAtomicLoadToLL(LoadInst *LI);
  43. bool expandAtomicLoadToCmpXchg(LoadInst *LI);
  44. bool expandAtomicStore(StoreInst *SI);
  45. bool tryExpandAtomicRMW(AtomicRMWInst *AI);
  46. bool expandAtomicRMWToLLSC(AtomicRMWInst *AI);
  47. bool expandAtomicRMWToCmpXchg(AtomicRMWInst *AI);
  48. bool expandAtomicCmpXchg(AtomicCmpXchgInst *CI);
  49. bool isIdempotentRMW(AtomicRMWInst *AI);
  50. bool simplifyIdempotentRMW(AtomicRMWInst *AI);
  51. };
  52. }
  53. char AtomicExpand::ID = 0;
  54. char &llvm::AtomicExpandID = AtomicExpand::ID;
  55. INITIALIZE_TM_PASS(AtomicExpand, "atomic-expand",
  56. "Expand Atomic calls in terms of either load-linked & store-conditional or cmpxchg",
  57. false, false)
  58. FunctionPass *llvm::createAtomicExpandPass(const TargetMachine *TM) {
  59. return new AtomicExpand(TM);
  60. }
  61. bool AtomicExpand::runOnFunction(Function &F) {
  62. if (!TM || !TM->getSubtargetImpl(F)->enableAtomicExpand())
  63. return false;
  64. TLI = TM->getSubtargetImpl(F)->getTargetLowering();
  65. SmallVector<Instruction *, 1> AtomicInsts;
  66. // Changing control-flow while iterating through it is a bad idea, so gather a
  67. // list of all atomic instructions before we start.
  68. for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) {
  69. if (I->isAtomic())
  70. AtomicInsts.push_back(&*I);
  71. }
  72. bool MadeChange = false;
  73. for (auto I : AtomicInsts) {
  74. auto LI = dyn_cast<LoadInst>(I);
  75. auto SI = dyn_cast<StoreInst>(I);
  76. auto RMWI = dyn_cast<AtomicRMWInst>(I);
  77. auto CASI = dyn_cast<AtomicCmpXchgInst>(I);
  78. assert((LI || SI || RMWI || CASI || isa<FenceInst>(I)) &&
  79. "Unknown atomic instruction");
  80. auto FenceOrdering = Monotonic;
  81. bool IsStore, IsLoad;
  82. if (TLI->getInsertFencesForAtomic()) {
  83. if (LI && isAtLeastAcquire(LI->getOrdering())) {
  84. FenceOrdering = LI->getOrdering();
  85. LI->setOrdering(Monotonic);
  86. IsStore = false;
  87. IsLoad = true;
  88. } else if (SI && isAtLeastRelease(SI->getOrdering())) {
  89. FenceOrdering = SI->getOrdering();
  90. SI->setOrdering(Monotonic);
  91. IsStore = true;
  92. IsLoad = false;
  93. } else if (RMWI && (isAtLeastRelease(RMWI->getOrdering()) ||
  94. isAtLeastAcquire(RMWI->getOrdering()))) {
  95. FenceOrdering = RMWI->getOrdering();
  96. RMWI->setOrdering(Monotonic);
  97. IsStore = IsLoad = true;
  98. } else if (CASI && !TLI->hasLoadLinkedStoreConditional() &&
  99. (isAtLeastRelease(CASI->getSuccessOrdering()) ||
  100. isAtLeastAcquire(CASI->getSuccessOrdering()))) {
  101. // If a compare and swap is lowered to LL/SC, we can do smarter fence
  102. // insertion, with a stronger one on the success path than on the
  103. // failure path. As a result, fence insertion is directly done by
  104. // expandAtomicCmpXchg in that case.
  105. FenceOrdering = CASI->getSuccessOrdering();
  106. CASI->setSuccessOrdering(Monotonic);
  107. CASI->setFailureOrdering(Monotonic);
  108. IsStore = IsLoad = true;
  109. }
  110. if (FenceOrdering != Monotonic) {
  111. MadeChange |= bracketInstWithFences(I, FenceOrdering, IsStore, IsLoad);
  112. }
  113. }
  114. if (LI && TLI->shouldExpandAtomicLoadInIR(LI)) {
  115. MadeChange |= expandAtomicLoad(LI);
  116. } else if (SI && TLI->shouldExpandAtomicStoreInIR(SI)) {
  117. MadeChange |= expandAtomicStore(SI);
  118. } else if (RMWI) {
  119. // There are two different ways of expanding RMW instructions:
  120. // - into a load if it is idempotent
  121. // - into a Cmpxchg/LL-SC loop otherwise
  122. // we try them in that order.
  123. if (isIdempotentRMW(RMWI) && simplifyIdempotentRMW(RMWI)) {
  124. MadeChange = true;
  125. } else {
  126. MadeChange |= tryExpandAtomicRMW(RMWI);
  127. }
  128. } else if (CASI && TLI->hasLoadLinkedStoreConditional()) {
  129. MadeChange |= expandAtomicCmpXchg(CASI);
  130. }
  131. }
  132. return MadeChange;
  133. }
  134. bool AtomicExpand::bracketInstWithFences(Instruction *I, AtomicOrdering Order,
  135. bool IsStore, bool IsLoad) {
  136. IRBuilder<> Builder(I);
  137. auto LeadingFence = TLI->emitLeadingFence(Builder, Order, IsStore, IsLoad);
  138. auto TrailingFence = TLI->emitTrailingFence(Builder, Order, IsStore, IsLoad);
  139. // The trailing fence is emitted before the instruction instead of after
  140. // because there is no easy way of setting Builder insertion point after
  141. // an instruction. So we must erase it from the BB, and insert it back
  142. // in the right place.
  143. // We have a guard here because not every atomic operation generates a
  144. // trailing fence.
  145. if (TrailingFence) {
  146. TrailingFence->removeFromParent();
  147. TrailingFence->insertAfter(I);
  148. }
  149. return (LeadingFence || TrailingFence);
  150. }
  151. bool AtomicExpand::expandAtomicLoad(LoadInst *LI) {
  152. if (TLI->hasLoadLinkedStoreConditional())
  153. return expandAtomicLoadToLL(LI);
  154. else
  155. return expandAtomicLoadToCmpXchg(LI);
  156. }
  157. bool AtomicExpand::expandAtomicLoadToLL(LoadInst *LI) {
  158. IRBuilder<> Builder(LI);
  159. // On some architectures, load-linked instructions are atomic for larger
  160. // sizes than normal loads. For example, the only 64-bit load guaranteed
  161. // to be single-copy atomic by ARM is an ldrexd (A3.5.3).
  162. Value *Val =
  163. TLI->emitLoadLinked(Builder, LI->getPointerOperand(), LI->getOrdering());
  164. LI->replaceAllUsesWith(Val);
  165. LI->eraseFromParent();
  166. return true;
  167. }
  168. bool AtomicExpand::expandAtomicLoadToCmpXchg(LoadInst *LI) {
  169. IRBuilder<> Builder(LI);
  170. AtomicOrdering Order = LI->getOrdering();
  171. Value *Addr = LI->getPointerOperand();
  172. Type *Ty = cast<PointerType>(Addr->getType())->getElementType();
  173. Constant *DummyVal = Constant::getNullValue(Ty);
  174. Value *Pair = Builder.CreateAtomicCmpXchg(
  175. Addr, DummyVal, DummyVal, Order,
  176. AtomicCmpXchgInst::getStrongestFailureOrdering(Order));
  177. Value *Loaded = Builder.CreateExtractValue(Pair, 0, "loaded");
  178. LI->replaceAllUsesWith(Loaded);
  179. LI->eraseFromParent();
  180. return true;
  181. }
  182. bool AtomicExpand::expandAtomicStore(StoreInst *SI) {
  183. // This function is only called on atomic stores that are too large to be
  184. // atomic if implemented as a native store. So we replace them by an
  185. // atomic swap, that can be implemented for example as a ldrex/strex on ARM
  186. // or lock cmpxchg8/16b on X86, as these are atomic for larger sizes.
  187. // It is the responsibility of the target to only signal expansion via
  188. // shouldExpandAtomicRMW in cases where this is required and possible.
  189. IRBuilder<> Builder(SI);
  190. AtomicRMWInst *AI =
  191. Builder.CreateAtomicRMW(AtomicRMWInst::Xchg, SI->getPointerOperand(),
  192. SI->getValueOperand(), SI->getOrdering());
  193. SI->eraseFromParent();
  194. // Now we have an appropriate swap instruction, lower it as usual.
  195. return tryExpandAtomicRMW(AI);
  196. }
  197. bool AtomicExpand::tryExpandAtomicRMW(AtomicRMWInst *AI) {
  198. switch (TLI->shouldExpandAtomicRMWInIR(AI)) {
  199. case TargetLoweringBase::AtomicRMWExpansionKind::None:
  200. return false;
  201. case TargetLoweringBase::AtomicRMWExpansionKind::LLSC: {
  202. assert(TLI->hasLoadLinkedStoreConditional() &&
  203. "TargetLowering requested we expand AtomicRMW instruction into "
  204. "load-linked/store-conditional combos, but such instructions aren't "
  205. "supported");
  206. return expandAtomicRMWToLLSC(AI);
  207. }
  208. case TargetLoweringBase::AtomicRMWExpansionKind::CmpXChg: {
  209. return expandAtomicRMWToCmpXchg(AI);
  210. }
  211. }
  212. llvm_unreachable("Unhandled case in tryExpandAtomicRMW");
  213. }
  214. /// Emit IR to implement the given atomicrmw operation on values in registers,
  215. /// returning the new value.
  216. static Value *performAtomicOp(AtomicRMWInst::BinOp Op, IRBuilder<> &Builder,
  217. Value *Loaded, Value *Inc) {
  218. Value *NewVal;
  219. switch (Op) {
  220. case AtomicRMWInst::Xchg:
  221. return Inc;
  222. case AtomicRMWInst::Add:
  223. return Builder.CreateAdd(Loaded, Inc, "new");
  224. case AtomicRMWInst::Sub:
  225. return Builder.CreateSub(Loaded, Inc, "new");
  226. case AtomicRMWInst::And:
  227. return Builder.CreateAnd(Loaded, Inc, "new");
  228. case AtomicRMWInst::Nand:
  229. return Builder.CreateNot(Builder.CreateAnd(Loaded, Inc), "new");
  230. case AtomicRMWInst::Or:
  231. return Builder.CreateOr(Loaded, Inc, "new");
  232. case AtomicRMWInst::Xor:
  233. return Builder.CreateXor(Loaded, Inc, "new");
  234. case AtomicRMWInst::Max:
  235. NewVal = Builder.CreateICmpSGT(Loaded, Inc);
  236. return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
  237. case AtomicRMWInst::Min:
  238. NewVal = Builder.CreateICmpSLE(Loaded, Inc);
  239. return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
  240. case AtomicRMWInst::UMax:
  241. NewVal = Builder.CreateICmpUGT(Loaded, Inc);
  242. return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
  243. case AtomicRMWInst::UMin:
  244. NewVal = Builder.CreateICmpULE(Loaded, Inc);
  245. return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
  246. default:
  247. llvm_unreachable("Unknown atomic op");
  248. }
  249. }
  250. bool AtomicExpand::expandAtomicRMWToLLSC(AtomicRMWInst *AI) {
  251. AtomicOrdering MemOpOrder = AI->getOrdering();
  252. Value *Addr = AI->getPointerOperand();
  253. BasicBlock *BB = AI->getParent();
  254. Function *F = BB->getParent();
  255. LLVMContext &Ctx = F->getContext();
  256. // Given: atomicrmw some_op iN* %addr, iN %incr ordering
  257. //
  258. // The standard expansion we produce is:
  259. // [...]
  260. // fence?
  261. // atomicrmw.start:
  262. // %loaded = @load.linked(%addr)
  263. // %new = some_op iN %loaded, %incr
  264. // %stored = @store_conditional(%new, %addr)
  265. // %try_again = icmp i32 ne %stored, 0
  266. // br i1 %try_again, label %loop, label %atomicrmw.end
  267. // atomicrmw.end:
  268. // fence?
  269. // [...]
  270. BasicBlock *ExitBB = BB->splitBasicBlock(AI, "atomicrmw.end");
  271. BasicBlock *LoopBB = BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB);
  272. // This grabs the DebugLoc from AI.
  273. IRBuilder<> Builder(AI);
  274. // The split call above "helpfully" added a branch at the end of BB (to the
  275. // wrong place), but we might want a fence too. It's easiest to just remove
  276. // the branch entirely.
  277. std::prev(BB->end())->eraseFromParent();
  278. Builder.SetInsertPoint(BB);
  279. Builder.CreateBr(LoopBB);
  280. // Start the main loop block now that we've taken care of the preliminaries.
  281. Builder.SetInsertPoint(LoopBB);
  282. Value *Loaded = TLI->emitLoadLinked(Builder, Addr, MemOpOrder);
  283. Value *NewVal =
  284. performAtomicOp(AI->getOperation(), Builder, Loaded, AI->getValOperand());
  285. Value *StoreSuccess =
  286. TLI->emitStoreConditional(Builder, NewVal, Addr, MemOpOrder);
  287. Value *TryAgain = Builder.CreateICmpNE(
  288. StoreSuccess, ConstantInt::get(IntegerType::get(Ctx, 32), 0), "tryagain");
  289. Builder.CreateCondBr(TryAgain, LoopBB, ExitBB);
  290. Builder.SetInsertPoint(ExitBB, ExitBB->begin());
  291. AI->replaceAllUsesWith(Loaded);
  292. AI->eraseFromParent();
  293. return true;
  294. }
  295. bool AtomicExpand::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI) {
  296. AtomicOrdering MemOpOrder =
  297. AI->getOrdering() == Unordered ? Monotonic : AI->getOrdering();
  298. Value *Addr = AI->getPointerOperand();
  299. BasicBlock *BB = AI->getParent();
  300. Function *F = BB->getParent();
  301. LLVMContext &Ctx = F->getContext();
  302. // Given: atomicrmw some_op iN* %addr, iN %incr ordering
  303. //
  304. // The standard expansion we produce is:
  305. // [...]
  306. // %init_loaded = load atomic iN* %addr
  307. // br label %loop
  308. // loop:
  309. // %loaded = phi iN [ %init_loaded, %entry ], [ %new_loaded, %loop ]
  310. // %new = some_op iN %loaded, %incr
  311. // %pair = cmpxchg iN* %addr, iN %loaded, iN %new
  312. // %new_loaded = extractvalue { iN, i1 } %pair, 0
  313. // %success = extractvalue { iN, i1 } %pair, 1
  314. // br i1 %success, label %atomicrmw.end, label %loop
  315. // atomicrmw.end:
  316. // [...]
  317. BasicBlock *ExitBB = BB->splitBasicBlock(AI, "atomicrmw.end");
  318. BasicBlock *LoopBB = BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB);
  319. // This grabs the DebugLoc from AI.
  320. IRBuilder<> Builder(AI);
  321. // The split call above "helpfully" added a branch at the end of BB (to the
  322. // wrong place), but we want a load. It's easiest to just remove
  323. // the branch entirely.
  324. std::prev(BB->end())->eraseFromParent();
  325. Builder.SetInsertPoint(BB);
  326. LoadInst *InitLoaded = Builder.CreateLoad(Addr);
  327. // Atomics require at least natural alignment.
  328. InitLoaded->setAlignment(AI->getType()->getPrimitiveSizeInBits());
  329. Builder.CreateBr(LoopBB);
  330. // Start the main loop block now that we've taken care of the preliminaries.
  331. Builder.SetInsertPoint(LoopBB);
  332. PHINode *Loaded = Builder.CreatePHI(AI->getType(), 2, "loaded");
  333. Loaded->addIncoming(InitLoaded, BB);
  334. Value *NewVal =
  335. performAtomicOp(AI->getOperation(), Builder, Loaded, AI->getValOperand());
  336. Value *Pair = Builder.CreateAtomicCmpXchg(
  337. Addr, Loaded, NewVal, MemOpOrder,
  338. AtomicCmpXchgInst::getStrongestFailureOrdering(MemOpOrder));
  339. Value *NewLoaded = Builder.CreateExtractValue(Pair, 0, "newloaded");
  340. Loaded->addIncoming(NewLoaded, LoopBB);
  341. Value *Success = Builder.CreateExtractValue(Pair, 1, "success");
  342. Builder.CreateCondBr(Success, ExitBB, LoopBB);
  343. Builder.SetInsertPoint(ExitBB, ExitBB->begin());
  344. AI->replaceAllUsesWith(NewLoaded);
  345. AI->eraseFromParent();
  346. return true;
  347. }
  348. bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
  349. AtomicOrdering SuccessOrder = CI->getSuccessOrdering();
  350. AtomicOrdering FailureOrder = CI->getFailureOrdering();
  351. Value *Addr = CI->getPointerOperand();
  352. BasicBlock *BB = CI->getParent();
  353. Function *F = BB->getParent();
  354. LLVMContext &Ctx = F->getContext();
  355. // If getInsertFencesForAtomic() returns true, then the target does not want
  356. // to deal with memory orders, and emitLeading/TrailingFence should take care
  357. // of everything. Otherwise, emitLeading/TrailingFence are no-op and we
  358. // should preserve the ordering.
  359. AtomicOrdering MemOpOrder =
  360. TLI->getInsertFencesForAtomic() ? Monotonic : SuccessOrder;
  361. // Given: cmpxchg some_op iN* %addr, iN %desired, iN %new success_ord fail_ord
  362. //
  363. // The full expansion we produce is:
  364. // [...]
  365. // fence?
  366. // cmpxchg.start:
  367. // %loaded = @load.linked(%addr)
  368. // %should_store = icmp eq %loaded, %desired
  369. // br i1 %should_store, label %cmpxchg.trystore,
  370. // label %cmpxchg.failure
  371. // cmpxchg.trystore:
  372. // %stored = @store_conditional(%new, %addr)
  373. // %success = icmp eq i32 %stored, 0
  374. // br i1 %success, label %cmpxchg.success, label %loop/%cmpxchg.failure
  375. // cmpxchg.success:
  376. // fence?
  377. // br label %cmpxchg.end
  378. // cmpxchg.failure:
  379. // fence?
  380. // br label %cmpxchg.end
  381. // cmpxchg.end:
  382. // %success = phi i1 [true, %cmpxchg.success], [false, %cmpxchg.failure]
  383. // %restmp = insertvalue { iN, i1 } undef, iN %loaded, 0
  384. // %res = insertvalue { iN, i1 } %restmp, i1 %success, 1
  385. // [...]
  386. BasicBlock *ExitBB = BB->splitBasicBlock(CI, "cmpxchg.end");
  387. auto FailureBB = BasicBlock::Create(Ctx, "cmpxchg.failure", F, ExitBB);
  388. auto SuccessBB = BasicBlock::Create(Ctx, "cmpxchg.success", F, FailureBB);
  389. auto TryStoreBB = BasicBlock::Create(Ctx, "cmpxchg.trystore", F, SuccessBB);
  390. auto LoopBB = BasicBlock::Create(Ctx, "cmpxchg.start", F, TryStoreBB);
  391. // This grabs the DebugLoc from CI
  392. IRBuilder<> Builder(CI);
  393. // The split call above "helpfully" added a branch at the end of BB (to the
  394. // wrong place), but we might want a fence too. It's easiest to just remove
  395. // the branch entirely.
  396. std::prev(BB->end())->eraseFromParent();
  397. Builder.SetInsertPoint(BB);
  398. TLI->emitLeadingFence(Builder, SuccessOrder, /*IsStore=*/true,
  399. /*IsLoad=*/true);
  400. Builder.CreateBr(LoopBB);
  401. // Start the main loop block now that we've taken care of the preliminaries.
  402. Builder.SetInsertPoint(LoopBB);
  403. Value *Loaded = TLI->emitLoadLinked(Builder, Addr, MemOpOrder);
  404. Value *ShouldStore =
  405. Builder.CreateICmpEQ(Loaded, CI->getCompareOperand(), "should_store");
  406. // If the cmpxchg doesn't actually need any ordering when it fails, we can
  407. // jump straight past that fence instruction (if it exists).
  408. Builder.CreateCondBr(ShouldStore, TryStoreBB, FailureBB);
  409. Builder.SetInsertPoint(TryStoreBB);
  410. Value *StoreSuccess = TLI->emitStoreConditional(
  411. Builder, CI->getNewValOperand(), Addr, MemOpOrder);
  412. StoreSuccess = Builder.CreateICmpEQ(
  413. StoreSuccess, ConstantInt::get(Type::getInt32Ty(Ctx), 0), "success");
  414. Builder.CreateCondBr(StoreSuccess, SuccessBB,
  415. CI->isWeak() ? FailureBB : LoopBB);
  416. // Make sure later instructions don't get reordered with a fence if necessary.
  417. Builder.SetInsertPoint(SuccessBB);
  418. TLI->emitTrailingFence(Builder, SuccessOrder, /*IsStore=*/true,
  419. /*IsLoad=*/true);
  420. Builder.CreateBr(ExitBB);
  421. Builder.SetInsertPoint(FailureBB);
  422. TLI->emitTrailingFence(Builder, FailureOrder, /*IsStore=*/true,
  423. /*IsLoad=*/true);
  424. Builder.CreateBr(ExitBB);
  425. // Finally, we have control-flow based knowledge of whether the cmpxchg
  426. // succeeded or not. We expose this to later passes by converting any
  427. // subsequent "icmp eq/ne %loaded, %oldval" into a use of an appropriate PHI.
  428. // Setup the builder so we can create any PHIs we need.
  429. Builder.SetInsertPoint(ExitBB, ExitBB->begin());
  430. PHINode *Success = Builder.CreatePHI(Type::getInt1Ty(Ctx), 2);
  431. Success->addIncoming(ConstantInt::getTrue(Ctx), SuccessBB);
  432. Success->addIncoming(ConstantInt::getFalse(Ctx), FailureBB);
  433. // Look for any users of the cmpxchg that are just comparing the loaded value
  434. // against the desired one, and replace them with the CFG-derived version.
  435. SmallVector<ExtractValueInst *, 2> PrunedInsts;
  436. for (auto User : CI->users()) {
  437. ExtractValueInst *EV = dyn_cast<ExtractValueInst>(User);
  438. if (!EV)
  439. continue;
  440. assert(EV->getNumIndices() == 1 && EV->getIndices()[0] <= 1 &&
  441. "weird extraction from { iN, i1 }");
  442. if (EV->getIndices()[0] == 0)
  443. EV->replaceAllUsesWith(Loaded);
  444. else
  445. EV->replaceAllUsesWith(Success);
  446. PrunedInsts.push_back(EV);
  447. }
  448. // We can remove the instructions now we're no longer iterating through them.
  449. for (auto EV : PrunedInsts)
  450. EV->eraseFromParent();
  451. if (!CI->use_empty()) {
  452. // Some use of the full struct return that we don't understand has happened,
  453. // so we've got to reconstruct it properly.
  454. Value *Res;
  455. Res = Builder.CreateInsertValue(UndefValue::get(CI->getType()), Loaded, 0);
  456. Res = Builder.CreateInsertValue(Res, Success, 1);
  457. CI->replaceAllUsesWith(Res);
  458. }
  459. CI->eraseFromParent();
  460. return true;
  461. }
  462. bool AtomicExpand::isIdempotentRMW(AtomicRMWInst* RMWI) {
  463. auto C = dyn_cast<ConstantInt>(RMWI->getValOperand());
  464. if(!C)
  465. return false;
  466. AtomicRMWInst::BinOp Op = RMWI->getOperation();
  467. switch(Op) {
  468. case AtomicRMWInst::Add:
  469. case AtomicRMWInst::Sub:
  470. case AtomicRMWInst::Or:
  471. case AtomicRMWInst::Xor:
  472. return C->isZero();
  473. case AtomicRMWInst::And:
  474. return C->isMinusOne();
  475. // FIXME: we could also treat Min/Max/UMin/UMax by the INT_MIN/INT_MAX/...
  476. default:
  477. return false;
  478. }
  479. }
  480. bool AtomicExpand::simplifyIdempotentRMW(AtomicRMWInst* RMWI) {
  481. if (auto ResultingLoad = TLI->lowerIdempotentRMWIntoFencedLoad(RMWI)) {
  482. if (TLI->shouldExpandAtomicLoadInIR(ResultingLoad))
  483. expandAtomicLoad(ResultingLoad);
  484. return true;
  485. }
  486. return false;
  487. }