1*0a6a1f1dSLionel Sambuc //===-- AtomicExpandPass.cpp - Expand atomic instructions -------===//
2*0a6a1f1dSLionel Sambuc //
3*0a6a1f1dSLionel Sambuc //                     The LLVM Compiler Infrastructure
4*0a6a1f1dSLionel Sambuc //
5*0a6a1f1dSLionel Sambuc // This file is distributed under the University of Illinois Open Source
6*0a6a1f1dSLionel Sambuc // License. See LICENSE.TXT for details.
7*0a6a1f1dSLionel Sambuc //
8*0a6a1f1dSLionel Sambuc //===----------------------------------------------------------------------===//
9*0a6a1f1dSLionel Sambuc //
10*0a6a1f1dSLionel Sambuc // This file contains a pass (at IR level) to replace atomic instructions with
11*0a6a1f1dSLionel Sambuc // either (intrinsic-based) load-linked/store-conditional loops or AtomicCmpXchg.
12*0a6a1f1dSLionel Sambuc //
13*0a6a1f1dSLionel Sambuc //===----------------------------------------------------------------------===//
14*0a6a1f1dSLionel Sambuc 
15*0a6a1f1dSLionel Sambuc #include "llvm/CodeGen/Passes.h"
16*0a6a1f1dSLionel Sambuc #include "llvm/IR/Function.h"
17*0a6a1f1dSLionel Sambuc #include "llvm/IR/IRBuilder.h"
18*0a6a1f1dSLionel Sambuc #include "llvm/IR/InstIterator.h"
19*0a6a1f1dSLionel Sambuc #include "llvm/IR/Instructions.h"
20*0a6a1f1dSLionel Sambuc #include "llvm/IR/Intrinsics.h"
21*0a6a1f1dSLionel Sambuc #include "llvm/IR/Module.h"
22*0a6a1f1dSLionel Sambuc #include "llvm/Support/Debug.h"
23*0a6a1f1dSLionel Sambuc #include "llvm/Target/TargetLowering.h"
24*0a6a1f1dSLionel Sambuc #include "llvm/Target/TargetMachine.h"
25*0a6a1f1dSLionel Sambuc #include "llvm/Target/TargetSubtargetInfo.h"
26*0a6a1f1dSLionel Sambuc 
27*0a6a1f1dSLionel Sambuc using namespace llvm;
28*0a6a1f1dSLionel Sambuc 
29*0a6a1f1dSLionel Sambuc #define DEBUG_TYPE "atomic-expand"
30*0a6a1f1dSLionel Sambuc 
31*0a6a1f1dSLionel Sambuc namespace {
32*0a6a1f1dSLionel Sambuc   class AtomicExpand: public FunctionPass {
33*0a6a1f1dSLionel Sambuc     const TargetMachine *TM;
34*0a6a1f1dSLionel Sambuc   public:
35*0a6a1f1dSLionel Sambuc     static char ID; // Pass identification, replacement for typeid
AtomicExpand(const TargetMachine * TM=nullptr)36*0a6a1f1dSLionel Sambuc     explicit AtomicExpand(const TargetMachine *TM = nullptr)
37*0a6a1f1dSLionel Sambuc       : FunctionPass(ID), TM(TM) {
38*0a6a1f1dSLionel Sambuc       initializeAtomicExpandPass(*PassRegistry::getPassRegistry());
39*0a6a1f1dSLionel Sambuc     }
40*0a6a1f1dSLionel Sambuc 
41*0a6a1f1dSLionel Sambuc     bool runOnFunction(Function &F) override;
42*0a6a1f1dSLionel Sambuc 
43*0a6a1f1dSLionel Sambuc   private:
44*0a6a1f1dSLionel Sambuc     bool bracketInstWithFences(Instruction *I, AtomicOrdering Order,
45*0a6a1f1dSLionel Sambuc                                bool IsStore, bool IsLoad);
46*0a6a1f1dSLionel Sambuc     bool expandAtomicLoad(LoadInst *LI);
47*0a6a1f1dSLionel Sambuc     bool expandAtomicLoadToLL(LoadInst *LI);
48*0a6a1f1dSLionel Sambuc     bool expandAtomicLoadToCmpXchg(LoadInst *LI);
49*0a6a1f1dSLionel Sambuc     bool expandAtomicStore(StoreInst *SI);
50*0a6a1f1dSLionel Sambuc     bool expandAtomicRMW(AtomicRMWInst *AI);
51*0a6a1f1dSLionel Sambuc     bool expandAtomicRMWToLLSC(AtomicRMWInst *AI);
52*0a6a1f1dSLionel Sambuc     bool expandAtomicRMWToCmpXchg(AtomicRMWInst *AI);
53*0a6a1f1dSLionel Sambuc     bool expandAtomicCmpXchg(AtomicCmpXchgInst *CI);
54*0a6a1f1dSLionel Sambuc     bool isIdempotentRMW(AtomicRMWInst *AI);
55*0a6a1f1dSLionel Sambuc     bool simplifyIdempotentRMW(AtomicRMWInst *AI);
56*0a6a1f1dSLionel Sambuc   };
57*0a6a1f1dSLionel Sambuc }
58*0a6a1f1dSLionel Sambuc 
59*0a6a1f1dSLionel Sambuc char AtomicExpand::ID = 0;
60*0a6a1f1dSLionel Sambuc char &llvm::AtomicExpandID = AtomicExpand::ID;
61*0a6a1f1dSLionel Sambuc INITIALIZE_TM_PASS(AtomicExpand, "atomic-expand",
62*0a6a1f1dSLionel Sambuc     "Expand Atomic calls in terms of either load-linked & store-conditional or cmpxchg",
63*0a6a1f1dSLionel Sambuc     false, false)
64*0a6a1f1dSLionel Sambuc 
createAtomicExpandPass(const TargetMachine * TM)65*0a6a1f1dSLionel Sambuc FunctionPass *llvm::createAtomicExpandPass(const TargetMachine *TM) {
66*0a6a1f1dSLionel Sambuc   return new AtomicExpand(TM);
67*0a6a1f1dSLionel Sambuc }
68*0a6a1f1dSLionel Sambuc 
runOnFunction(Function & F)69*0a6a1f1dSLionel Sambuc bool AtomicExpand::runOnFunction(Function &F) {
70*0a6a1f1dSLionel Sambuc   if (!TM || !TM->getSubtargetImpl()->enableAtomicExpand())
71*0a6a1f1dSLionel Sambuc     return false;
72*0a6a1f1dSLionel Sambuc   auto TargetLowering = TM->getSubtargetImpl()->getTargetLowering();
73*0a6a1f1dSLionel Sambuc 
74*0a6a1f1dSLionel Sambuc   SmallVector<Instruction *, 1> AtomicInsts;
75*0a6a1f1dSLionel Sambuc 
76*0a6a1f1dSLionel Sambuc   // Changing control-flow while iterating through it is a bad idea, so gather a
77*0a6a1f1dSLionel Sambuc   // list of all atomic instructions before we start.
78*0a6a1f1dSLionel Sambuc   for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) {
79*0a6a1f1dSLionel Sambuc     if (I->isAtomic())
80*0a6a1f1dSLionel Sambuc       AtomicInsts.push_back(&*I);
81*0a6a1f1dSLionel Sambuc   }
82*0a6a1f1dSLionel Sambuc 
83*0a6a1f1dSLionel Sambuc   bool MadeChange = false;
84*0a6a1f1dSLionel Sambuc   for (auto I : AtomicInsts) {
85*0a6a1f1dSLionel Sambuc     auto LI = dyn_cast<LoadInst>(I);
86*0a6a1f1dSLionel Sambuc     auto SI = dyn_cast<StoreInst>(I);
87*0a6a1f1dSLionel Sambuc     auto RMWI = dyn_cast<AtomicRMWInst>(I);
88*0a6a1f1dSLionel Sambuc     auto CASI = dyn_cast<AtomicCmpXchgInst>(I);
89*0a6a1f1dSLionel Sambuc     assert((LI || SI || RMWI || CASI || isa<FenceInst>(I)) &&
90*0a6a1f1dSLionel Sambuc            "Unknown atomic instruction");
91*0a6a1f1dSLionel Sambuc 
92*0a6a1f1dSLionel Sambuc     auto FenceOrdering = Monotonic;
93*0a6a1f1dSLionel Sambuc     bool IsStore, IsLoad;
94*0a6a1f1dSLionel Sambuc     if (TargetLowering->getInsertFencesForAtomic()) {
95*0a6a1f1dSLionel Sambuc       if (LI && isAtLeastAcquire(LI->getOrdering())) {
96*0a6a1f1dSLionel Sambuc         FenceOrdering = LI->getOrdering();
97*0a6a1f1dSLionel Sambuc         LI->setOrdering(Monotonic);
98*0a6a1f1dSLionel Sambuc         IsStore = false;
99*0a6a1f1dSLionel Sambuc         IsLoad = true;
100*0a6a1f1dSLionel Sambuc       } else if (SI && isAtLeastRelease(SI->getOrdering())) {
101*0a6a1f1dSLionel Sambuc         FenceOrdering = SI->getOrdering();
102*0a6a1f1dSLionel Sambuc         SI->setOrdering(Monotonic);
103*0a6a1f1dSLionel Sambuc         IsStore = true;
104*0a6a1f1dSLionel Sambuc         IsLoad = false;
105*0a6a1f1dSLionel Sambuc       } else if (RMWI && (isAtLeastRelease(RMWI->getOrdering()) ||
106*0a6a1f1dSLionel Sambuc                           isAtLeastAcquire(RMWI->getOrdering()))) {
107*0a6a1f1dSLionel Sambuc         FenceOrdering = RMWI->getOrdering();
108*0a6a1f1dSLionel Sambuc         RMWI->setOrdering(Monotonic);
109*0a6a1f1dSLionel Sambuc         IsStore = IsLoad = true;
110*0a6a1f1dSLionel Sambuc       } else if (CASI && !TargetLowering->hasLoadLinkedStoreConditional() &&
111*0a6a1f1dSLionel Sambuc                     (isAtLeastRelease(CASI->getSuccessOrdering()) ||
112*0a6a1f1dSLionel Sambuc                      isAtLeastAcquire(CASI->getSuccessOrdering()))) {
113*0a6a1f1dSLionel Sambuc         // If a compare and swap is lowered to LL/SC, we can do smarter fence
114*0a6a1f1dSLionel Sambuc         // insertion, with a stronger one on the success path than on the
115*0a6a1f1dSLionel Sambuc         // failure path. As a result, fence insertion is directly done by
116*0a6a1f1dSLionel Sambuc         // expandAtomicCmpXchg in that case.
117*0a6a1f1dSLionel Sambuc         FenceOrdering = CASI->getSuccessOrdering();
118*0a6a1f1dSLionel Sambuc         CASI->setSuccessOrdering(Monotonic);
119*0a6a1f1dSLionel Sambuc         CASI->setFailureOrdering(Monotonic);
120*0a6a1f1dSLionel Sambuc         IsStore = IsLoad = true;
121*0a6a1f1dSLionel Sambuc       }
122*0a6a1f1dSLionel Sambuc 
123*0a6a1f1dSLionel Sambuc       if (FenceOrdering != Monotonic) {
124*0a6a1f1dSLionel Sambuc         MadeChange |= bracketInstWithFences(I, FenceOrdering, IsStore, IsLoad);
125*0a6a1f1dSLionel Sambuc       }
126*0a6a1f1dSLionel Sambuc     }
127*0a6a1f1dSLionel Sambuc 
128*0a6a1f1dSLionel Sambuc     if (LI && TargetLowering->shouldExpandAtomicLoadInIR(LI)) {
129*0a6a1f1dSLionel Sambuc       MadeChange |= expandAtomicLoad(LI);
130*0a6a1f1dSLionel Sambuc     } else if (SI && TargetLowering->shouldExpandAtomicStoreInIR(SI)) {
131*0a6a1f1dSLionel Sambuc       MadeChange |= expandAtomicStore(SI);
132*0a6a1f1dSLionel Sambuc     } else if (RMWI) {
133*0a6a1f1dSLionel Sambuc       // There are two different ways of expanding RMW instructions:
134*0a6a1f1dSLionel Sambuc       // - into a load if it is idempotent
135*0a6a1f1dSLionel Sambuc       // - into a Cmpxchg/LL-SC loop otherwise
136*0a6a1f1dSLionel Sambuc       // we try them in that order.
137*0a6a1f1dSLionel Sambuc       MadeChange |= (isIdempotentRMW(RMWI) &&
138*0a6a1f1dSLionel Sambuc                         simplifyIdempotentRMW(RMWI)) ||
139*0a6a1f1dSLionel Sambuc                     (TargetLowering->shouldExpandAtomicRMWInIR(RMWI) &&
140*0a6a1f1dSLionel Sambuc                         expandAtomicRMW(RMWI));
141*0a6a1f1dSLionel Sambuc     } else if (CASI && TargetLowering->hasLoadLinkedStoreConditional()) {
142*0a6a1f1dSLionel Sambuc       MadeChange |= expandAtomicCmpXchg(CASI);
143*0a6a1f1dSLionel Sambuc     }
144*0a6a1f1dSLionel Sambuc   }
145*0a6a1f1dSLionel Sambuc   return MadeChange;
146*0a6a1f1dSLionel Sambuc }
147*0a6a1f1dSLionel Sambuc 
bracketInstWithFences(Instruction * I,AtomicOrdering Order,bool IsStore,bool IsLoad)148*0a6a1f1dSLionel Sambuc bool AtomicExpand::bracketInstWithFences(Instruction *I, AtomicOrdering Order,
149*0a6a1f1dSLionel Sambuc                                          bool IsStore, bool IsLoad) {
150*0a6a1f1dSLionel Sambuc   IRBuilder<> Builder(I);
151*0a6a1f1dSLionel Sambuc 
152*0a6a1f1dSLionel Sambuc   auto LeadingFence =
153*0a6a1f1dSLionel Sambuc       TM->getSubtargetImpl()->getTargetLowering()->emitLeadingFence(
154*0a6a1f1dSLionel Sambuc       Builder, Order, IsStore, IsLoad);
155*0a6a1f1dSLionel Sambuc 
156*0a6a1f1dSLionel Sambuc   auto TrailingFence =
157*0a6a1f1dSLionel Sambuc       TM->getSubtargetImpl()->getTargetLowering()->emitTrailingFence(
158*0a6a1f1dSLionel Sambuc       Builder, Order, IsStore, IsLoad);
159*0a6a1f1dSLionel Sambuc   // The trailing fence is emitted before the instruction instead of after
160*0a6a1f1dSLionel Sambuc   // because there is no easy way of setting Builder insertion point after
161*0a6a1f1dSLionel Sambuc   // an instruction. So we must erase it from the BB, and insert it back
162*0a6a1f1dSLionel Sambuc   // in the right place.
163*0a6a1f1dSLionel Sambuc   // We have a guard here because not every atomic operation generates a
164*0a6a1f1dSLionel Sambuc   // trailing fence.
165*0a6a1f1dSLionel Sambuc   if (TrailingFence) {
166*0a6a1f1dSLionel Sambuc     TrailingFence->removeFromParent();
167*0a6a1f1dSLionel Sambuc     TrailingFence->insertAfter(I);
168*0a6a1f1dSLionel Sambuc   }
169*0a6a1f1dSLionel Sambuc 
170*0a6a1f1dSLionel Sambuc   return (LeadingFence || TrailingFence);
171*0a6a1f1dSLionel Sambuc }
172*0a6a1f1dSLionel Sambuc 
expandAtomicLoad(LoadInst * LI)173*0a6a1f1dSLionel Sambuc bool AtomicExpand::expandAtomicLoad(LoadInst *LI) {
174*0a6a1f1dSLionel Sambuc    if (TM->getSubtargetImpl()
175*0a6a1f1dSLionel Sambuc           ->getTargetLowering()
176*0a6a1f1dSLionel Sambuc           ->hasLoadLinkedStoreConditional())
177*0a6a1f1dSLionel Sambuc     return expandAtomicLoadToLL(LI);
178*0a6a1f1dSLionel Sambuc   else
179*0a6a1f1dSLionel Sambuc     return expandAtomicLoadToCmpXchg(LI);
180*0a6a1f1dSLionel Sambuc }
181*0a6a1f1dSLionel Sambuc 
expandAtomicLoadToLL(LoadInst * LI)182*0a6a1f1dSLionel Sambuc bool AtomicExpand::expandAtomicLoadToLL(LoadInst *LI) {
183*0a6a1f1dSLionel Sambuc   auto TLI = TM->getSubtargetImpl()->getTargetLowering();
184*0a6a1f1dSLionel Sambuc   IRBuilder<> Builder(LI);
185*0a6a1f1dSLionel Sambuc 
186*0a6a1f1dSLionel Sambuc   // On some architectures, load-linked instructions are atomic for larger
187*0a6a1f1dSLionel Sambuc   // sizes than normal loads. For example, the only 64-bit load guaranteed
188*0a6a1f1dSLionel Sambuc   // to be single-copy atomic by ARM is an ldrexd (A3.5.3).
189*0a6a1f1dSLionel Sambuc   Value *Val =
190*0a6a1f1dSLionel Sambuc       TLI->emitLoadLinked(Builder, LI->getPointerOperand(), LI->getOrdering());
191*0a6a1f1dSLionel Sambuc 
192*0a6a1f1dSLionel Sambuc   LI->replaceAllUsesWith(Val);
193*0a6a1f1dSLionel Sambuc   LI->eraseFromParent();
194*0a6a1f1dSLionel Sambuc 
195*0a6a1f1dSLionel Sambuc   return true;
196*0a6a1f1dSLionel Sambuc }
197*0a6a1f1dSLionel Sambuc 
expandAtomicLoadToCmpXchg(LoadInst * LI)198*0a6a1f1dSLionel Sambuc bool AtomicExpand::expandAtomicLoadToCmpXchg(LoadInst *LI) {
199*0a6a1f1dSLionel Sambuc   IRBuilder<> Builder(LI);
200*0a6a1f1dSLionel Sambuc   AtomicOrdering Order = LI->getOrdering();
201*0a6a1f1dSLionel Sambuc   Value *Addr = LI->getPointerOperand();
202*0a6a1f1dSLionel Sambuc   Type *Ty = cast<PointerType>(Addr->getType())->getElementType();
203*0a6a1f1dSLionel Sambuc   Constant *DummyVal = Constant::getNullValue(Ty);
204*0a6a1f1dSLionel Sambuc 
205*0a6a1f1dSLionel Sambuc   Value *Pair = Builder.CreateAtomicCmpXchg(
206*0a6a1f1dSLionel Sambuc       Addr, DummyVal, DummyVal, Order,
207*0a6a1f1dSLionel Sambuc       AtomicCmpXchgInst::getStrongestFailureOrdering(Order));
208*0a6a1f1dSLionel Sambuc   Value *Loaded = Builder.CreateExtractValue(Pair, 0, "loaded");
209*0a6a1f1dSLionel Sambuc 
210*0a6a1f1dSLionel Sambuc   LI->replaceAllUsesWith(Loaded);
211*0a6a1f1dSLionel Sambuc   LI->eraseFromParent();
212*0a6a1f1dSLionel Sambuc 
213*0a6a1f1dSLionel Sambuc   return true;
214*0a6a1f1dSLionel Sambuc }
215*0a6a1f1dSLionel Sambuc 
expandAtomicStore(StoreInst * SI)216*0a6a1f1dSLionel Sambuc bool AtomicExpand::expandAtomicStore(StoreInst *SI) {
217*0a6a1f1dSLionel Sambuc   // This function is only called on atomic stores that are too large to be
218*0a6a1f1dSLionel Sambuc   // atomic if implemented as a native store. So we replace them by an
219*0a6a1f1dSLionel Sambuc   // atomic swap, that can be implemented for example as a ldrex/strex on ARM
220*0a6a1f1dSLionel Sambuc   // or lock cmpxchg8/16b on X86, as these are atomic for larger sizes.
221*0a6a1f1dSLionel Sambuc   // It is the responsibility of the target to only return true in
222*0a6a1f1dSLionel Sambuc   // shouldExpandAtomicRMW in cases where this is required and possible.
223*0a6a1f1dSLionel Sambuc   IRBuilder<> Builder(SI);
224*0a6a1f1dSLionel Sambuc   AtomicRMWInst *AI =
225*0a6a1f1dSLionel Sambuc       Builder.CreateAtomicRMW(AtomicRMWInst::Xchg, SI->getPointerOperand(),
226*0a6a1f1dSLionel Sambuc                               SI->getValueOperand(), SI->getOrdering());
227*0a6a1f1dSLionel Sambuc   SI->eraseFromParent();
228*0a6a1f1dSLionel Sambuc 
229*0a6a1f1dSLionel Sambuc   // Now we have an appropriate swap instruction, lower it as usual.
230*0a6a1f1dSLionel Sambuc   return expandAtomicRMW(AI);
231*0a6a1f1dSLionel Sambuc }
232*0a6a1f1dSLionel Sambuc 
expandAtomicRMW(AtomicRMWInst * AI)233*0a6a1f1dSLionel Sambuc bool AtomicExpand::expandAtomicRMW(AtomicRMWInst *AI) {
234*0a6a1f1dSLionel Sambuc   if (TM->getSubtargetImpl()
235*0a6a1f1dSLionel Sambuc           ->getTargetLowering()
236*0a6a1f1dSLionel Sambuc           ->hasLoadLinkedStoreConditional())
237*0a6a1f1dSLionel Sambuc     return expandAtomicRMWToLLSC(AI);
238*0a6a1f1dSLionel Sambuc   else
239*0a6a1f1dSLionel Sambuc     return expandAtomicRMWToCmpXchg(AI);
240*0a6a1f1dSLionel Sambuc }
241*0a6a1f1dSLionel Sambuc 
242*0a6a1f1dSLionel Sambuc /// Emit IR to implement the given atomicrmw operation on values in registers,
243*0a6a1f1dSLionel Sambuc /// returning the new value.
performAtomicOp(AtomicRMWInst::BinOp Op,IRBuilder<> & Builder,Value * Loaded,Value * Inc)244*0a6a1f1dSLionel Sambuc static Value *performAtomicOp(AtomicRMWInst::BinOp Op, IRBuilder<> &Builder,
245*0a6a1f1dSLionel Sambuc                               Value *Loaded, Value *Inc) {
246*0a6a1f1dSLionel Sambuc   Value *NewVal;
247*0a6a1f1dSLionel Sambuc   switch (Op) {
248*0a6a1f1dSLionel Sambuc   case AtomicRMWInst::Xchg:
249*0a6a1f1dSLionel Sambuc     return Inc;
250*0a6a1f1dSLionel Sambuc   case AtomicRMWInst::Add:
251*0a6a1f1dSLionel Sambuc     return Builder.CreateAdd(Loaded, Inc, "new");
252*0a6a1f1dSLionel Sambuc   case AtomicRMWInst::Sub:
253*0a6a1f1dSLionel Sambuc     return Builder.CreateSub(Loaded, Inc, "new");
254*0a6a1f1dSLionel Sambuc   case AtomicRMWInst::And:
255*0a6a1f1dSLionel Sambuc     return Builder.CreateAnd(Loaded, Inc, "new");
256*0a6a1f1dSLionel Sambuc   case AtomicRMWInst::Nand:
257*0a6a1f1dSLionel Sambuc     return Builder.CreateNot(Builder.CreateAnd(Loaded, Inc), "new");
258*0a6a1f1dSLionel Sambuc   case AtomicRMWInst::Or:
259*0a6a1f1dSLionel Sambuc     return Builder.CreateOr(Loaded, Inc, "new");
260*0a6a1f1dSLionel Sambuc   case AtomicRMWInst::Xor:
261*0a6a1f1dSLionel Sambuc     return Builder.CreateXor(Loaded, Inc, "new");
262*0a6a1f1dSLionel Sambuc   case AtomicRMWInst::Max:
263*0a6a1f1dSLionel Sambuc     NewVal = Builder.CreateICmpSGT(Loaded, Inc);
264*0a6a1f1dSLionel Sambuc     return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
265*0a6a1f1dSLionel Sambuc   case AtomicRMWInst::Min:
266*0a6a1f1dSLionel Sambuc     NewVal = Builder.CreateICmpSLE(Loaded, Inc);
267*0a6a1f1dSLionel Sambuc     return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
268*0a6a1f1dSLionel Sambuc   case AtomicRMWInst::UMax:
269*0a6a1f1dSLionel Sambuc     NewVal = Builder.CreateICmpUGT(Loaded, Inc);
270*0a6a1f1dSLionel Sambuc     return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
271*0a6a1f1dSLionel Sambuc   case AtomicRMWInst::UMin:
272*0a6a1f1dSLionel Sambuc     NewVal = Builder.CreateICmpULE(Loaded, Inc);
273*0a6a1f1dSLionel Sambuc     return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
274*0a6a1f1dSLionel Sambuc   default:
275*0a6a1f1dSLionel Sambuc     llvm_unreachable("Unknown atomic op");
276*0a6a1f1dSLionel Sambuc   }
277*0a6a1f1dSLionel Sambuc }
278*0a6a1f1dSLionel Sambuc 
expandAtomicRMWToLLSC(AtomicRMWInst * AI)279*0a6a1f1dSLionel Sambuc bool AtomicExpand::expandAtomicRMWToLLSC(AtomicRMWInst *AI) {
280*0a6a1f1dSLionel Sambuc   auto TLI = TM->getSubtargetImpl()->getTargetLowering();
281*0a6a1f1dSLionel Sambuc   AtomicOrdering MemOpOrder = AI->getOrdering();
282*0a6a1f1dSLionel Sambuc   Value *Addr = AI->getPointerOperand();
283*0a6a1f1dSLionel Sambuc   BasicBlock *BB = AI->getParent();
284*0a6a1f1dSLionel Sambuc   Function *F = BB->getParent();
285*0a6a1f1dSLionel Sambuc   LLVMContext &Ctx = F->getContext();
286*0a6a1f1dSLionel Sambuc 
287*0a6a1f1dSLionel Sambuc   // Given: atomicrmw some_op iN* %addr, iN %incr ordering
288*0a6a1f1dSLionel Sambuc   //
289*0a6a1f1dSLionel Sambuc   // The standard expansion we produce is:
290*0a6a1f1dSLionel Sambuc   //     [...]
291*0a6a1f1dSLionel Sambuc   //     fence?
292*0a6a1f1dSLionel Sambuc   // atomicrmw.start:
293*0a6a1f1dSLionel Sambuc   //     %loaded = @load.linked(%addr)
294*0a6a1f1dSLionel Sambuc   //     %new = some_op iN %loaded, %incr
295*0a6a1f1dSLionel Sambuc   //     %stored = @store_conditional(%new, %addr)
296*0a6a1f1dSLionel Sambuc   //     %try_again = icmp i32 ne %stored, 0
297*0a6a1f1dSLionel Sambuc   //     br i1 %try_again, label %loop, label %atomicrmw.end
298*0a6a1f1dSLionel Sambuc   // atomicrmw.end:
299*0a6a1f1dSLionel Sambuc   //     fence?
300*0a6a1f1dSLionel Sambuc   //     [...]
301*0a6a1f1dSLionel Sambuc   BasicBlock *ExitBB = BB->splitBasicBlock(AI, "atomicrmw.end");
302*0a6a1f1dSLionel Sambuc   BasicBlock *LoopBB =  BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB);
303*0a6a1f1dSLionel Sambuc 
304*0a6a1f1dSLionel Sambuc   // This grabs the DebugLoc from AI.
305*0a6a1f1dSLionel Sambuc   IRBuilder<> Builder(AI);
306*0a6a1f1dSLionel Sambuc 
307*0a6a1f1dSLionel Sambuc   // The split call above "helpfully" added a branch at the end of BB (to the
308*0a6a1f1dSLionel Sambuc   // wrong place), but we might want a fence too. It's easiest to just remove
309*0a6a1f1dSLionel Sambuc   // the branch entirely.
310*0a6a1f1dSLionel Sambuc   std::prev(BB->end())->eraseFromParent();
311*0a6a1f1dSLionel Sambuc   Builder.SetInsertPoint(BB);
312*0a6a1f1dSLionel Sambuc   Builder.CreateBr(LoopBB);
313*0a6a1f1dSLionel Sambuc 
314*0a6a1f1dSLionel Sambuc   // Start the main loop block now that we've taken care of the preliminaries.
315*0a6a1f1dSLionel Sambuc   Builder.SetInsertPoint(LoopBB);
316*0a6a1f1dSLionel Sambuc   Value *Loaded = TLI->emitLoadLinked(Builder, Addr, MemOpOrder);
317*0a6a1f1dSLionel Sambuc 
318*0a6a1f1dSLionel Sambuc   Value *NewVal =
319*0a6a1f1dSLionel Sambuc       performAtomicOp(AI->getOperation(), Builder, Loaded, AI->getValOperand());
320*0a6a1f1dSLionel Sambuc 
321*0a6a1f1dSLionel Sambuc   Value *StoreSuccess =
322*0a6a1f1dSLionel Sambuc       TLI->emitStoreConditional(Builder, NewVal, Addr, MemOpOrder);
323*0a6a1f1dSLionel Sambuc   Value *TryAgain = Builder.CreateICmpNE(
324*0a6a1f1dSLionel Sambuc       StoreSuccess, ConstantInt::get(IntegerType::get(Ctx, 32), 0), "tryagain");
325*0a6a1f1dSLionel Sambuc   Builder.CreateCondBr(TryAgain, LoopBB, ExitBB);
326*0a6a1f1dSLionel Sambuc 
327*0a6a1f1dSLionel Sambuc   Builder.SetInsertPoint(ExitBB, ExitBB->begin());
328*0a6a1f1dSLionel Sambuc 
329*0a6a1f1dSLionel Sambuc   AI->replaceAllUsesWith(Loaded);
330*0a6a1f1dSLionel Sambuc   AI->eraseFromParent();
331*0a6a1f1dSLionel Sambuc 
332*0a6a1f1dSLionel Sambuc   return true;
333*0a6a1f1dSLionel Sambuc }
334*0a6a1f1dSLionel Sambuc 
expandAtomicRMWToCmpXchg(AtomicRMWInst * AI)335*0a6a1f1dSLionel Sambuc bool AtomicExpand::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI) {
336*0a6a1f1dSLionel Sambuc   AtomicOrdering MemOpOrder =
337*0a6a1f1dSLionel Sambuc       AI->getOrdering() == Unordered ? Monotonic : AI->getOrdering();
338*0a6a1f1dSLionel Sambuc   Value *Addr = AI->getPointerOperand();
339*0a6a1f1dSLionel Sambuc   BasicBlock *BB = AI->getParent();
340*0a6a1f1dSLionel Sambuc   Function *F = BB->getParent();
341*0a6a1f1dSLionel Sambuc   LLVMContext &Ctx = F->getContext();
342*0a6a1f1dSLionel Sambuc 
343*0a6a1f1dSLionel Sambuc   // Given: atomicrmw some_op iN* %addr, iN %incr ordering
344*0a6a1f1dSLionel Sambuc   //
345*0a6a1f1dSLionel Sambuc   // The standard expansion we produce is:
346*0a6a1f1dSLionel Sambuc   //     [...]
347*0a6a1f1dSLionel Sambuc   //     %init_loaded = load atomic iN* %addr
348*0a6a1f1dSLionel Sambuc   //     br label %loop
349*0a6a1f1dSLionel Sambuc   // loop:
350*0a6a1f1dSLionel Sambuc   //     %loaded = phi iN [ %init_loaded, %entry ], [ %new_loaded, %loop ]
351*0a6a1f1dSLionel Sambuc   //     %new = some_op iN %loaded, %incr
352*0a6a1f1dSLionel Sambuc   //     %pair = cmpxchg iN* %addr, iN %loaded, iN %new
353*0a6a1f1dSLionel Sambuc   //     %new_loaded = extractvalue { iN, i1 } %pair, 0
354*0a6a1f1dSLionel Sambuc   //     %success = extractvalue { iN, i1 } %pair, 1
355*0a6a1f1dSLionel Sambuc   //     br i1 %success, label %atomicrmw.end, label %loop
356*0a6a1f1dSLionel Sambuc   // atomicrmw.end:
357*0a6a1f1dSLionel Sambuc   //     [...]
358*0a6a1f1dSLionel Sambuc   BasicBlock *ExitBB = BB->splitBasicBlock(AI, "atomicrmw.end");
359*0a6a1f1dSLionel Sambuc   BasicBlock *LoopBB = BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB);
360*0a6a1f1dSLionel Sambuc 
361*0a6a1f1dSLionel Sambuc   // This grabs the DebugLoc from AI.
362*0a6a1f1dSLionel Sambuc   IRBuilder<> Builder(AI);
363*0a6a1f1dSLionel Sambuc 
364*0a6a1f1dSLionel Sambuc   // The split call above "helpfully" added a branch at the end of BB (to the
365*0a6a1f1dSLionel Sambuc   // wrong place), but we want a load. It's easiest to just remove
366*0a6a1f1dSLionel Sambuc   // the branch entirely.
367*0a6a1f1dSLionel Sambuc   std::prev(BB->end())->eraseFromParent();
368*0a6a1f1dSLionel Sambuc   Builder.SetInsertPoint(BB);
369*0a6a1f1dSLionel Sambuc   LoadInst *InitLoaded = Builder.CreateLoad(Addr);
370*0a6a1f1dSLionel Sambuc   // Atomics require at least natural alignment.
371*0a6a1f1dSLionel Sambuc   InitLoaded->setAlignment(AI->getType()->getPrimitiveSizeInBits());
372*0a6a1f1dSLionel Sambuc   Builder.CreateBr(LoopBB);
373*0a6a1f1dSLionel Sambuc 
374*0a6a1f1dSLionel Sambuc   // Start the main loop block now that we've taken care of the preliminaries.
375*0a6a1f1dSLionel Sambuc   Builder.SetInsertPoint(LoopBB);
376*0a6a1f1dSLionel Sambuc   PHINode *Loaded = Builder.CreatePHI(AI->getType(), 2, "loaded");
377*0a6a1f1dSLionel Sambuc   Loaded->addIncoming(InitLoaded, BB);
378*0a6a1f1dSLionel Sambuc 
379*0a6a1f1dSLionel Sambuc   Value *NewVal =
380*0a6a1f1dSLionel Sambuc       performAtomicOp(AI->getOperation(), Builder, Loaded, AI->getValOperand());
381*0a6a1f1dSLionel Sambuc 
382*0a6a1f1dSLionel Sambuc   Value *Pair = Builder.CreateAtomicCmpXchg(
383*0a6a1f1dSLionel Sambuc       Addr, Loaded, NewVal, MemOpOrder,
384*0a6a1f1dSLionel Sambuc       AtomicCmpXchgInst::getStrongestFailureOrdering(MemOpOrder));
385*0a6a1f1dSLionel Sambuc   Value *NewLoaded = Builder.CreateExtractValue(Pair, 0, "newloaded");
386*0a6a1f1dSLionel Sambuc   Loaded->addIncoming(NewLoaded, LoopBB);
387*0a6a1f1dSLionel Sambuc 
388*0a6a1f1dSLionel Sambuc   Value *Success = Builder.CreateExtractValue(Pair, 1, "success");
389*0a6a1f1dSLionel Sambuc   Builder.CreateCondBr(Success, ExitBB, LoopBB);
390*0a6a1f1dSLionel Sambuc 
391*0a6a1f1dSLionel Sambuc   Builder.SetInsertPoint(ExitBB, ExitBB->begin());
392*0a6a1f1dSLionel Sambuc 
393*0a6a1f1dSLionel Sambuc   AI->replaceAllUsesWith(NewLoaded);
394*0a6a1f1dSLionel Sambuc   AI->eraseFromParent();
395*0a6a1f1dSLionel Sambuc 
396*0a6a1f1dSLionel Sambuc   return true;
397*0a6a1f1dSLionel Sambuc }
398*0a6a1f1dSLionel Sambuc 
expandAtomicCmpXchg(AtomicCmpXchgInst * CI)399*0a6a1f1dSLionel Sambuc bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
400*0a6a1f1dSLionel Sambuc   auto TLI = TM->getSubtargetImpl()->getTargetLowering();
401*0a6a1f1dSLionel Sambuc   AtomicOrdering SuccessOrder = CI->getSuccessOrdering();
402*0a6a1f1dSLionel Sambuc   AtomicOrdering FailureOrder = CI->getFailureOrdering();
403*0a6a1f1dSLionel Sambuc   Value *Addr = CI->getPointerOperand();
404*0a6a1f1dSLionel Sambuc   BasicBlock *BB = CI->getParent();
405*0a6a1f1dSLionel Sambuc   Function *F = BB->getParent();
406*0a6a1f1dSLionel Sambuc   LLVMContext &Ctx = F->getContext();
407*0a6a1f1dSLionel Sambuc   // If getInsertFencesForAtomic() returns true, then the target does not want
408*0a6a1f1dSLionel Sambuc   // to deal with memory orders, and emitLeading/TrailingFence should take care
409*0a6a1f1dSLionel Sambuc   // of everything. Otherwise, emitLeading/TrailingFence are no-op and we
410*0a6a1f1dSLionel Sambuc   // should preserve the ordering.
411*0a6a1f1dSLionel Sambuc   AtomicOrdering MemOpOrder =
412*0a6a1f1dSLionel Sambuc       TLI->getInsertFencesForAtomic() ? Monotonic : SuccessOrder;
413*0a6a1f1dSLionel Sambuc 
414*0a6a1f1dSLionel Sambuc   // Given: cmpxchg some_op iN* %addr, iN %desired, iN %new success_ord fail_ord
415*0a6a1f1dSLionel Sambuc   //
416*0a6a1f1dSLionel Sambuc   // The full expansion we produce is:
417*0a6a1f1dSLionel Sambuc   //     [...]
418*0a6a1f1dSLionel Sambuc   //     fence?
419*0a6a1f1dSLionel Sambuc   // cmpxchg.start:
420*0a6a1f1dSLionel Sambuc   //     %loaded = @load.linked(%addr)
421*0a6a1f1dSLionel Sambuc   //     %should_store = icmp eq %loaded, %desired
422*0a6a1f1dSLionel Sambuc   //     br i1 %should_store, label %cmpxchg.trystore,
423*0a6a1f1dSLionel Sambuc   //                          label %cmpxchg.failure
424*0a6a1f1dSLionel Sambuc   // cmpxchg.trystore:
425*0a6a1f1dSLionel Sambuc   //     %stored = @store_conditional(%new, %addr)
426*0a6a1f1dSLionel Sambuc   //     %success = icmp eq i32 %stored, 0
427*0a6a1f1dSLionel Sambuc   //     br i1 %success, label %cmpxchg.success, label %loop/%cmpxchg.failure
428*0a6a1f1dSLionel Sambuc   // cmpxchg.success:
429*0a6a1f1dSLionel Sambuc   //     fence?
430*0a6a1f1dSLionel Sambuc   //     br label %cmpxchg.end
431*0a6a1f1dSLionel Sambuc   // cmpxchg.failure:
432*0a6a1f1dSLionel Sambuc   //     fence?
433*0a6a1f1dSLionel Sambuc   //     br label %cmpxchg.end
434*0a6a1f1dSLionel Sambuc   // cmpxchg.end:
435*0a6a1f1dSLionel Sambuc   //     %success = phi i1 [true, %cmpxchg.success], [false, %cmpxchg.failure]
436*0a6a1f1dSLionel Sambuc   //     %restmp = insertvalue { iN, i1 } undef, iN %loaded, 0
437*0a6a1f1dSLionel Sambuc   //     %res = insertvalue { iN, i1 } %restmp, i1 %success, 1
438*0a6a1f1dSLionel Sambuc   //     [...]
439*0a6a1f1dSLionel Sambuc   BasicBlock *ExitBB = BB->splitBasicBlock(CI, "cmpxchg.end");
440*0a6a1f1dSLionel Sambuc   auto FailureBB = BasicBlock::Create(Ctx, "cmpxchg.failure", F, ExitBB);
441*0a6a1f1dSLionel Sambuc   auto SuccessBB = BasicBlock::Create(Ctx, "cmpxchg.success", F, FailureBB);
442*0a6a1f1dSLionel Sambuc   auto TryStoreBB = BasicBlock::Create(Ctx, "cmpxchg.trystore", F, SuccessBB);
443*0a6a1f1dSLionel Sambuc   auto LoopBB = BasicBlock::Create(Ctx, "cmpxchg.start", F, TryStoreBB);
444*0a6a1f1dSLionel Sambuc 
445*0a6a1f1dSLionel Sambuc   // This grabs the DebugLoc from CI
446*0a6a1f1dSLionel Sambuc   IRBuilder<> Builder(CI);
447*0a6a1f1dSLionel Sambuc 
448*0a6a1f1dSLionel Sambuc   // The split call above "helpfully" added a branch at the end of BB (to the
449*0a6a1f1dSLionel Sambuc   // wrong place), but we might want a fence too. It's easiest to just remove
450*0a6a1f1dSLionel Sambuc   // the branch entirely.
451*0a6a1f1dSLionel Sambuc   std::prev(BB->end())->eraseFromParent();
452*0a6a1f1dSLionel Sambuc   Builder.SetInsertPoint(BB);
453*0a6a1f1dSLionel Sambuc   TLI->emitLeadingFence(Builder, SuccessOrder, /*IsStore=*/true,
454*0a6a1f1dSLionel Sambuc                         /*IsLoad=*/true);
455*0a6a1f1dSLionel Sambuc   Builder.CreateBr(LoopBB);
456*0a6a1f1dSLionel Sambuc 
457*0a6a1f1dSLionel Sambuc   // Start the main loop block now that we've taken care of the preliminaries.
458*0a6a1f1dSLionel Sambuc   Builder.SetInsertPoint(LoopBB);
459*0a6a1f1dSLionel Sambuc   Value *Loaded = TLI->emitLoadLinked(Builder, Addr, MemOpOrder);
460*0a6a1f1dSLionel Sambuc   Value *ShouldStore =
461*0a6a1f1dSLionel Sambuc       Builder.CreateICmpEQ(Loaded, CI->getCompareOperand(), "should_store");
462*0a6a1f1dSLionel Sambuc 
463*0a6a1f1dSLionel Sambuc   // If the the cmpxchg doesn't actually need any ordering when it fails, we can
464*0a6a1f1dSLionel Sambuc   // jump straight past that fence instruction (if it exists).
465*0a6a1f1dSLionel Sambuc   Builder.CreateCondBr(ShouldStore, TryStoreBB, FailureBB);
466*0a6a1f1dSLionel Sambuc 
467*0a6a1f1dSLionel Sambuc   Builder.SetInsertPoint(TryStoreBB);
468*0a6a1f1dSLionel Sambuc   Value *StoreSuccess = TLI->emitStoreConditional(
469*0a6a1f1dSLionel Sambuc       Builder, CI->getNewValOperand(), Addr, MemOpOrder);
470*0a6a1f1dSLionel Sambuc   StoreSuccess = Builder.CreateICmpEQ(
471*0a6a1f1dSLionel Sambuc       StoreSuccess, ConstantInt::get(Type::getInt32Ty(Ctx), 0), "success");
472*0a6a1f1dSLionel Sambuc   Builder.CreateCondBr(StoreSuccess, SuccessBB,
473*0a6a1f1dSLionel Sambuc                        CI->isWeak() ? FailureBB : LoopBB);
474*0a6a1f1dSLionel Sambuc 
475*0a6a1f1dSLionel Sambuc   // Make sure later instructions don't get reordered with a fence if necessary.
476*0a6a1f1dSLionel Sambuc   Builder.SetInsertPoint(SuccessBB);
477*0a6a1f1dSLionel Sambuc   TLI->emitTrailingFence(Builder, SuccessOrder, /*IsStore=*/true,
478*0a6a1f1dSLionel Sambuc                          /*IsLoad=*/true);
479*0a6a1f1dSLionel Sambuc   Builder.CreateBr(ExitBB);
480*0a6a1f1dSLionel Sambuc 
481*0a6a1f1dSLionel Sambuc   Builder.SetInsertPoint(FailureBB);
482*0a6a1f1dSLionel Sambuc   TLI->emitTrailingFence(Builder, FailureOrder, /*IsStore=*/true,
483*0a6a1f1dSLionel Sambuc                          /*IsLoad=*/true);
484*0a6a1f1dSLionel Sambuc   Builder.CreateBr(ExitBB);
485*0a6a1f1dSLionel Sambuc 
486*0a6a1f1dSLionel Sambuc   // Finally, we have control-flow based knowledge of whether the cmpxchg
487*0a6a1f1dSLionel Sambuc   // succeeded or not. We expose this to later passes by converting any
488*0a6a1f1dSLionel Sambuc   // subsequent "icmp eq/ne %loaded, %oldval" into a use of an appropriate PHI.
489*0a6a1f1dSLionel Sambuc 
490*0a6a1f1dSLionel Sambuc   // Setup the builder so we can create any PHIs we need.
491*0a6a1f1dSLionel Sambuc   Builder.SetInsertPoint(ExitBB, ExitBB->begin());
492*0a6a1f1dSLionel Sambuc   PHINode *Success = Builder.CreatePHI(Type::getInt1Ty(Ctx), 2);
493*0a6a1f1dSLionel Sambuc   Success->addIncoming(ConstantInt::getTrue(Ctx), SuccessBB);
494*0a6a1f1dSLionel Sambuc   Success->addIncoming(ConstantInt::getFalse(Ctx), FailureBB);
495*0a6a1f1dSLionel Sambuc 
496*0a6a1f1dSLionel Sambuc   // Look for any users of the cmpxchg that are just comparing the loaded value
497*0a6a1f1dSLionel Sambuc   // against the desired one, and replace them with the CFG-derived version.
498*0a6a1f1dSLionel Sambuc   SmallVector<ExtractValueInst *, 2> PrunedInsts;
499*0a6a1f1dSLionel Sambuc   for (auto User : CI->users()) {
500*0a6a1f1dSLionel Sambuc     ExtractValueInst *EV = dyn_cast<ExtractValueInst>(User);
501*0a6a1f1dSLionel Sambuc     if (!EV)
502*0a6a1f1dSLionel Sambuc       continue;
503*0a6a1f1dSLionel Sambuc 
504*0a6a1f1dSLionel Sambuc     assert(EV->getNumIndices() == 1 && EV->getIndices()[0] <= 1 &&
505*0a6a1f1dSLionel Sambuc            "weird extraction from { iN, i1 }");
506*0a6a1f1dSLionel Sambuc 
507*0a6a1f1dSLionel Sambuc     if (EV->getIndices()[0] == 0)
508*0a6a1f1dSLionel Sambuc       EV->replaceAllUsesWith(Loaded);
509*0a6a1f1dSLionel Sambuc     else
510*0a6a1f1dSLionel Sambuc       EV->replaceAllUsesWith(Success);
511*0a6a1f1dSLionel Sambuc 
512*0a6a1f1dSLionel Sambuc     PrunedInsts.push_back(EV);
513*0a6a1f1dSLionel Sambuc   }
514*0a6a1f1dSLionel Sambuc 
515*0a6a1f1dSLionel Sambuc   // We can remove the instructions now we're no longer iterating through them.
516*0a6a1f1dSLionel Sambuc   for (auto EV : PrunedInsts)
517*0a6a1f1dSLionel Sambuc     EV->eraseFromParent();
518*0a6a1f1dSLionel Sambuc 
519*0a6a1f1dSLionel Sambuc   if (!CI->use_empty()) {
520*0a6a1f1dSLionel Sambuc     // Some use of the full struct return that we don't understand has happened,
521*0a6a1f1dSLionel Sambuc     // so we've got to reconstruct it properly.
522*0a6a1f1dSLionel Sambuc     Value *Res;
523*0a6a1f1dSLionel Sambuc     Res = Builder.CreateInsertValue(UndefValue::get(CI->getType()), Loaded, 0);
524*0a6a1f1dSLionel Sambuc     Res = Builder.CreateInsertValue(Res, Success, 1);
525*0a6a1f1dSLionel Sambuc 
526*0a6a1f1dSLionel Sambuc     CI->replaceAllUsesWith(Res);
527*0a6a1f1dSLionel Sambuc   }
528*0a6a1f1dSLionel Sambuc 
529*0a6a1f1dSLionel Sambuc   CI->eraseFromParent();
530*0a6a1f1dSLionel Sambuc   return true;
531*0a6a1f1dSLionel Sambuc }
532*0a6a1f1dSLionel Sambuc 
isIdempotentRMW(AtomicRMWInst * RMWI)533*0a6a1f1dSLionel Sambuc bool AtomicExpand::isIdempotentRMW(AtomicRMWInst* RMWI) {
534*0a6a1f1dSLionel Sambuc   auto C = dyn_cast<ConstantInt>(RMWI->getValOperand());
535*0a6a1f1dSLionel Sambuc   if(!C)
536*0a6a1f1dSLionel Sambuc     return false;
537*0a6a1f1dSLionel Sambuc 
538*0a6a1f1dSLionel Sambuc   AtomicRMWInst::BinOp Op = RMWI->getOperation();
539*0a6a1f1dSLionel Sambuc   switch(Op) {
540*0a6a1f1dSLionel Sambuc     case AtomicRMWInst::Add:
541*0a6a1f1dSLionel Sambuc     case AtomicRMWInst::Sub:
542*0a6a1f1dSLionel Sambuc     case AtomicRMWInst::Or:
543*0a6a1f1dSLionel Sambuc     case AtomicRMWInst::Xor:
544*0a6a1f1dSLionel Sambuc       return C->isZero();
545*0a6a1f1dSLionel Sambuc     case AtomicRMWInst::And:
546*0a6a1f1dSLionel Sambuc       return C->isMinusOne();
547*0a6a1f1dSLionel Sambuc     // FIXME: we could also treat Min/Max/UMin/UMax by the INT_MIN/INT_MAX/...
548*0a6a1f1dSLionel Sambuc     default:
549*0a6a1f1dSLionel Sambuc       return false;
550*0a6a1f1dSLionel Sambuc   }
551*0a6a1f1dSLionel Sambuc }
552*0a6a1f1dSLionel Sambuc 
simplifyIdempotentRMW(AtomicRMWInst * RMWI)553*0a6a1f1dSLionel Sambuc bool AtomicExpand::simplifyIdempotentRMW(AtomicRMWInst* RMWI) {
554*0a6a1f1dSLionel Sambuc   auto TLI = TM->getSubtargetImpl()->getTargetLowering();
555*0a6a1f1dSLionel Sambuc 
556*0a6a1f1dSLionel Sambuc   if (auto ResultingLoad = TLI->lowerIdempotentRMWIntoFencedLoad(RMWI)) {
557*0a6a1f1dSLionel Sambuc     if (TLI->shouldExpandAtomicLoadInIR(ResultingLoad))
558*0a6a1f1dSLionel Sambuc       expandAtomicLoad(ResultingLoad);
559*0a6a1f1dSLionel Sambuc     return true;
560*0a6a1f1dSLionel Sambuc   }
561*0a6a1f1dSLionel Sambuc 
562*0a6a1f1dSLionel Sambuc   return false;
563*0a6a1f1dSLionel Sambuc }
564