1 //===- AtomicExpandPass.cpp - Expand atomic instructions ------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains a pass (at IR level) to replace atomic instructions with
10 // __atomic_* library calls, or target specific instruction which implement the
11 // same semantics in a way which better fits the target backend.  This can
12 // include the use of (intrinsic-based) load-linked/store-conditional loops,
13 // AtomicCmpXchg, or type coercions.
14 //
15 //===----------------------------------------------------------------------===//
16 
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/CodeGen/AtomicExpandUtils.h"
21 #include "llvm/CodeGen/RuntimeLibcalls.h"
22 #include "llvm/CodeGen/TargetLowering.h"
23 #include "llvm/CodeGen/TargetPassConfig.h"
24 #include "llvm/CodeGen/TargetSubtargetInfo.h"
25 #include "llvm/CodeGen/ValueTypes.h"
26 #include "llvm/IR/Attributes.h"
27 #include "llvm/IR/BasicBlock.h"
28 #include "llvm/IR/Constant.h"
29 #include "llvm/IR/Constants.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/DerivedTypes.h"
32 #include "llvm/IR/Function.h"
33 #include "llvm/IR/IRBuilder.h"
34 #include "llvm/IR/InstIterator.h"
35 #include "llvm/IR/Instruction.h"
36 #include "llvm/IR/Instructions.h"
37 #include "llvm/IR/Module.h"
38 #include "llvm/IR/Type.h"
39 #include "llvm/IR/User.h"
40 #include "llvm/IR/Value.h"
41 #include "llvm/InitializePasses.h"
42 #include "llvm/Pass.h"
43 #include "llvm/Support/AtomicOrdering.h"
44 #include "llvm/Support/Casting.h"
45 #include "llvm/Support/Debug.h"
46 #include "llvm/Support/ErrorHandling.h"
47 #include "llvm/Support/raw_ostream.h"
48 #include "llvm/Target/TargetMachine.h"
49 #include <cassert>
50 #include <cstdint>
51 #include <iterator>
52 
53 using namespace llvm;
54 
55 #define DEBUG_TYPE "atomic-expand"
56 
57 namespace {
58 
59   class AtomicExpand: public FunctionPass {
60     const TargetLowering *TLI = nullptr;
61 
62   public:
63     static char ID; // Pass identification, replacement for typeid
64 
65     AtomicExpand() : FunctionPass(ID) {
66       initializeAtomicExpandPass(*PassRegistry::getPassRegistry());
67     }
68 
69     bool runOnFunction(Function &F) override;
70 
71   private:
72     bool bracketInstWithFences(Instruction *I, AtomicOrdering Order);
73     IntegerType *getCorrespondingIntegerType(Type *T, const DataLayout &DL);
74     LoadInst *convertAtomicLoadToIntegerType(LoadInst *LI);
75     bool tryExpandAtomicLoad(LoadInst *LI);
76     bool expandAtomicLoadToLL(LoadInst *LI);
77     bool expandAtomicLoadToCmpXchg(LoadInst *LI);
78     StoreInst *convertAtomicStoreToIntegerType(StoreInst *SI);
79     bool expandAtomicStore(StoreInst *SI);
80     bool tryExpandAtomicRMW(AtomicRMWInst *AI);
81     Value *
82     insertRMWLLSCLoop(IRBuilder<> &Builder, Type *ResultTy, Value *Addr,
83                       AtomicOrdering MemOpOrder,
84                       function_ref<Value *(IRBuilder<> &, Value *)> PerformOp);
85     void expandAtomicOpToLLSC(
86         Instruction *I, Type *ResultTy, Value *Addr, AtomicOrdering MemOpOrder,
87         function_ref<Value *(IRBuilder<> &, Value *)> PerformOp);
88     void expandPartwordAtomicRMW(
89         AtomicRMWInst *I,
90         TargetLoweringBase::AtomicExpansionKind ExpansionKind);
91     AtomicRMWInst *widenPartwordAtomicRMW(AtomicRMWInst *AI);
92     void expandPartwordCmpXchg(AtomicCmpXchgInst *I);
93     void expandAtomicRMWToMaskedIntrinsic(AtomicRMWInst *AI);
94     void expandAtomicCmpXchgToMaskedIntrinsic(AtomicCmpXchgInst *CI);
95 
96     AtomicCmpXchgInst *convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI);
97     static Value *insertRMWCmpXchgLoop(
98         IRBuilder<> &Builder, Type *ResultType, Value *Addr,
99         AtomicOrdering MemOpOrder,
100         function_ref<Value *(IRBuilder<> &, Value *)> PerformOp,
101         CreateCmpXchgInstFun CreateCmpXchg);
102     bool tryExpandAtomicCmpXchg(AtomicCmpXchgInst *CI);
103 
104     bool expandAtomicCmpXchg(AtomicCmpXchgInst *CI);
105     bool isIdempotentRMW(AtomicRMWInst *RMWI);
106     bool simplifyIdempotentRMW(AtomicRMWInst *RMWI);
107 
108     bool expandAtomicOpToLibcall(Instruction *I, unsigned Size, unsigned Align,
109                                  Value *PointerOperand, Value *ValueOperand,
110                                  Value *CASExpected, AtomicOrdering Ordering,
111                                  AtomicOrdering Ordering2,
112                                  ArrayRef<RTLIB::Libcall> Libcalls);
113     void expandAtomicLoadToLibcall(LoadInst *LI);
114     void expandAtomicStoreToLibcall(StoreInst *LI);
115     void expandAtomicRMWToLibcall(AtomicRMWInst *I);
116     void expandAtomicCASToLibcall(AtomicCmpXchgInst *I);
117 
118     friend bool
119     llvm::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI,
120                                    CreateCmpXchgInstFun CreateCmpXchg);
121   };
122 
123 } // end anonymous namespace
124 
125 char AtomicExpand::ID = 0;
126 
127 char &llvm::AtomicExpandID = AtomicExpand::ID;
128 
129 INITIALIZE_PASS(AtomicExpand, DEBUG_TYPE, "Expand Atomic instructions",
130                 false, false)
131 
132 FunctionPass *llvm::createAtomicExpandPass() { return new AtomicExpand(); }
133 
134 // Helper functions to retrieve the size of atomic instructions.
135 static unsigned getAtomicOpSize(LoadInst *LI) {
136   const DataLayout &DL = LI->getModule()->getDataLayout();
137   return DL.getTypeStoreSize(LI->getType());
138 }
139 
140 static unsigned getAtomicOpSize(StoreInst *SI) {
141   const DataLayout &DL = SI->getModule()->getDataLayout();
142   return DL.getTypeStoreSize(SI->getValueOperand()->getType());
143 }
144 
145 static unsigned getAtomicOpSize(AtomicRMWInst *RMWI) {
146   const DataLayout &DL = RMWI->getModule()->getDataLayout();
147   return DL.getTypeStoreSize(RMWI->getValOperand()->getType());
148 }
149 
150 static unsigned getAtomicOpSize(AtomicCmpXchgInst *CASI) {
151   const DataLayout &DL = CASI->getModule()->getDataLayout();
152   return DL.getTypeStoreSize(CASI->getCompareOperand()->getType());
153 }
154 
155 // Helper functions to retrieve the alignment of atomic instructions.
156 static unsigned getAtomicOpAlign(LoadInst *LI) {
157   unsigned Align = LI->getAlignment();
158   // In the future, if this IR restriction is relaxed, we should
159   // return DataLayout::getABITypeAlignment when there's no align
160   // value.
161   assert(Align != 0 && "An atomic LoadInst always has an explicit alignment");
162   return Align;
163 }
164 
165 static unsigned getAtomicOpAlign(StoreInst *SI) {
166   unsigned Align = SI->getAlignment();
167   // In the future, if this IR restriction is relaxed, we should
168   // return DataLayout::getABITypeAlignment when there's no align
169   // value.
170   assert(Align != 0 && "An atomic StoreInst always has an explicit alignment");
171   return Align;
172 }
173 
174 static unsigned getAtomicOpAlign(AtomicRMWInst *RMWI) {
175   // TODO(PR27168): This instruction has no alignment attribute, but unlike the
176   // default alignment for load/store, the default here is to assume
177   // it has NATURAL alignment, not DataLayout-specified alignment.
178   const DataLayout &DL = RMWI->getModule()->getDataLayout();
179   return DL.getTypeStoreSize(RMWI->getValOperand()->getType());
180 }
181 
182 static unsigned getAtomicOpAlign(AtomicCmpXchgInst *CASI) {
183   // TODO(PR27168): same comment as above.
184   const DataLayout &DL = CASI->getModule()->getDataLayout();
185   return DL.getTypeStoreSize(CASI->getCompareOperand()->getType());
186 }
187 
188 // Determine if a particular atomic operation has a supported size,
189 // and is of appropriate alignment, to be passed through for target
190 // lowering. (Versus turning into a __atomic libcall)
191 template <typename Inst>
192 static bool atomicSizeSupported(const TargetLowering *TLI, Inst *I) {
193   unsigned Size = getAtomicOpSize(I);
194   unsigned Align = getAtomicOpAlign(I);
195   return Align >= Size && Size <= TLI->getMaxAtomicSizeInBitsSupported() / 8;
196 }
197 
198 bool AtomicExpand::runOnFunction(Function &F) {
199   auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
200   if (!TPC)
201     return false;
202 
203   auto &TM = TPC->getTM<TargetMachine>();
204   if (!TM.getSubtargetImpl(F)->enableAtomicExpand())
205     return false;
206   TLI = TM.getSubtargetImpl(F)->getTargetLowering();
207 
208   SmallVector<Instruction *, 1> AtomicInsts;
209 
210   // Changing control-flow while iterating through it is a bad idea, so gather a
211   // list of all atomic instructions before we start.
212   for (inst_iterator II = inst_begin(F), E = inst_end(F); II != E; ++II) {
213     Instruction *I = &*II;
214     if (I->isAtomic() && !isa<FenceInst>(I))
215       AtomicInsts.push_back(I);
216   }
217 
218   bool MadeChange = false;
219   for (auto I : AtomicInsts) {
220     auto LI = dyn_cast<LoadInst>(I);
221     auto SI = dyn_cast<StoreInst>(I);
222     auto RMWI = dyn_cast<AtomicRMWInst>(I);
223     auto CASI = dyn_cast<AtomicCmpXchgInst>(I);
224     assert((LI || SI || RMWI || CASI) && "Unknown atomic instruction");
225 
226     // If the Size/Alignment is not supported, replace with a libcall.
227     if (LI) {
228       if (!atomicSizeSupported(TLI, LI)) {
229         expandAtomicLoadToLibcall(LI);
230         MadeChange = true;
231         continue;
232       }
233     } else if (SI) {
234       if (!atomicSizeSupported(TLI, SI)) {
235         expandAtomicStoreToLibcall(SI);
236         MadeChange = true;
237         continue;
238       }
239     } else if (RMWI) {
240       if (!atomicSizeSupported(TLI, RMWI)) {
241         expandAtomicRMWToLibcall(RMWI);
242         MadeChange = true;
243         continue;
244       }
245     } else if (CASI) {
246       if (!atomicSizeSupported(TLI, CASI)) {
247         expandAtomicCASToLibcall(CASI);
248         MadeChange = true;
249         continue;
250       }
251     }
252 
253     if (TLI->shouldInsertFencesForAtomic(I)) {
254       auto FenceOrdering = AtomicOrdering::Monotonic;
255       if (LI && isAcquireOrStronger(LI->getOrdering())) {
256         FenceOrdering = LI->getOrdering();
257         LI->setOrdering(AtomicOrdering::Monotonic);
258       } else if (SI && isReleaseOrStronger(SI->getOrdering())) {
259         FenceOrdering = SI->getOrdering();
260         SI->setOrdering(AtomicOrdering::Monotonic);
261       } else if (RMWI && (isReleaseOrStronger(RMWI->getOrdering()) ||
262                           isAcquireOrStronger(RMWI->getOrdering()))) {
263         FenceOrdering = RMWI->getOrdering();
264         RMWI->setOrdering(AtomicOrdering::Monotonic);
265       } else if (CASI &&
266                  TLI->shouldExpandAtomicCmpXchgInIR(CASI) ==
267                      TargetLoweringBase::AtomicExpansionKind::None &&
268                  (isReleaseOrStronger(CASI->getSuccessOrdering()) ||
269                   isAcquireOrStronger(CASI->getSuccessOrdering()))) {
270         // If a compare and swap is lowered to LL/SC, we can do smarter fence
271         // insertion, with a stronger one on the success path than on the
272         // failure path. As a result, fence insertion is directly done by
273         // expandAtomicCmpXchg in that case.
274         FenceOrdering = CASI->getSuccessOrdering();
275         CASI->setSuccessOrdering(AtomicOrdering::Monotonic);
276         CASI->setFailureOrdering(AtomicOrdering::Monotonic);
277       }
278 
279       if (FenceOrdering != AtomicOrdering::Monotonic) {
280         MadeChange |= bracketInstWithFences(I, FenceOrdering);
281       }
282     }
283 
284     if (LI) {
285       if (LI->getType()->isFloatingPointTy()) {
286         // TODO: add a TLI hook to control this so that each target can
287         // convert to lowering the original type one at a time.
288         LI = convertAtomicLoadToIntegerType(LI);
289         assert(LI->getType()->isIntegerTy() && "invariant broken");
290         MadeChange = true;
291       }
292 
293       MadeChange |= tryExpandAtomicLoad(LI);
294     } else if (SI) {
295       if (SI->getValueOperand()->getType()->isFloatingPointTy()) {
296         // TODO: add a TLI hook to control this so that each target can
297         // convert to lowering the original type one at a time.
298         SI = convertAtomicStoreToIntegerType(SI);
299         assert(SI->getValueOperand()->getType()->isIntegerTy() &&
300                "invariant broken");
301         MadeChange = true;
302       }
303 
304       if (TLI->shouldExpandAtomicStoreInIR(SI))
305         MadeChange |= expandAtomicStore(SI);
306     } else if (RMWI) {
307       // There are two different ways of expanding RMW instructions:
308       // - into a load if it is idempotent
309       // - into a Cmpxchg/LL-SC loop otherwise
310       // we try them in that order.
311 
312       if (isIdempotentRMW(RMWI) && simplifyIdempotentRMW(RMWI)) {
313         MadeChange = true;
314       } else {
315         unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8;
316         unsigned ValueSize = getAtomicOpSize(RMWI);
317         AtomicRMWInst::BinOp Op = RMWI->getOperation();
318         if (ValueSize < MinCASSize &&
319             (Op == AtomicRMWInst::Or || Op == AtomicRMWInst::Xor ||
320              Op == AtomicRMWInst::And)) {
321           RMWI = widenPartwordAtomicRMW(RMWI);
322           MadeChange = true;
323         }
324 
325         MadeChange |= tryExpandAtomicRMW(RMWI);
326       }
327     } else if (CASI) {
328       // TODO: when we're ready to make the change at the IR level, we can
329       // extend convertCmpXchgToInteger for floating point too.
330       assert(!CASI->getCompareOperand()->getType()->isFloatingPointTy() &&
331              "unimplemented - floating point not legal at IR level");
332       if (CASI->getCompareOperand()->getType()->isPointerTy() ) {
333         // TODO: add a TLI hook to control this so that each target can
334         // convert to lowering the original type one at a time.
335         CASI = convertCmpXchgToIntegerType(CASI);
336         assert(CASI->getCompareOperand()->getType()->isIntegerTy() &&
337                "invariant broken");
338         MadeChange = true;
339       }
340 
341       MadeChange |= tryExpandAtomicCmpXchg(CASI);
342     }
343   }
344   return MadeChange;
345 }
346 
347 bool AtomicExpand::bracketInstWithFences(Instruction *I, AtomicOrdering Order) {
348   IRBuilder<> Builder(I);
349 
350   auto LeadingFence = TLI->emitLeadingFence(Builder, I, Order);
351 
352   auto TrailingFence = TLI->emitTrailingFence(Builder, I, Order);
353   // We have a guard here because not every atomic operation generates a
354   // trailing fence.
355   if (TrailingFence)
356     TrailingFence->moveAfter(I);
357 
358   return (LeadingFence || TrailingFence);
359 }
360 
361 /// Get the iX type with the same bitwidth as T.
362 IntegerType *AtomicExpand::getCorrespondingIntegerType(Type *T,
363                                                        const DataLayout &DL) {
364   EVT VT = TLI->getMemValueType(DL, T);
365   unsigned BitWidth = VT.getStoreSizeInBits();
366   assert(BitWidth == VT.getSizeInBits() && "must be a power of two");
367   return IntegerType::get(T->getContext(), BitWidth);
368 }
369 
370 /// Convert an atomic load of a non-integral type to an integer load of the
371 /// equivalent bitwidth.  See the function comment on
372 /// convertAtomicStoreToIntegerType for background.
373 LoadInst *AtomicExpand::convertAtomicLoadToIntegerType(LoadInst *LI) {
374   auto *M = LI->getModule();
375   Type *NewTy = getCorrespondingIntegerType(LI->getType(),
376                                             M->getDataLayout());
377 
378   IRBuilder<> Builder(LI);
379 
380   Value *Addr = LI->getPointerOperand();
381   Type *PT = PointerType::get(NewTy,
382                               Addr->getType()->getPointerAddressSpace());
383   Value *NewAddr = Builder.CreateBitCast(Addr, PT);
384 
385   auto *NewLI = Builder.CreateLoad(NewTy, NewAddr);
386   NewLI->setAlignment(MaybeAlign(LI->getAlignment()));
387   NewLI->setVolatile(LI->isVolatile());
388   NewLI->setAtomic(LI->getOrdering(), LI->getSyncScopeID());
389   LLVM_DEBUG(dbgs() << "Replaced " << *LI << " with " << *NewLI << "\n");
390 
391   Value *NewVal = Builder.CreateBitCast(NewLI, LI->getType());
392   LI->replaceAllUsesWith(NewVal);
393   LI->eraseFromParent();
394   return NewLI;
395 }
396 
397 bool AtomicExpand::tryExpandAtomicLoad(LoadInst *LI) {
398   switch (TLI->shouldExpandAtomicLoadInIR(LI)) {
399   case TargetLoweringBase::AtomicExpansionKind::None:
400     return false;
401   case TargetLoweringBase::AtomicExpansionKind::LLSC:
402     expandAtomicOpToLLSC(
403         LI, LI->getType(), LI->getPointerOperand(), LI->getOrdering(),
404         [](IRBuilder<> &Builder, Value *Loaded) { return Loaded; });
405     return true;
406   case TargetLoweringBase::AtomicExpansionKind::LLOnly:
407     return expandAtomicLoadToLL(LI);
408   case TargetLoweringBase::AtomicExpansionKind::CmpXChg:
409     return expandAtomicLoadToCmpXchg(LI);
410   default:
411     llvm_unreachable("Unhandled case in tryExpandAtomicLoad");
412   }
413 }
414 
415 bool AtomicExpand::expandAtomicLoadToLL(LoadInst *LI) {
416   IRBuilder<> Builder(LI);
417 
418   // On some architectures, load-linked instructions are atomic for larger
419   // sizes than normal loads. For example, the only 64-bit load guaranteed
420   // to be single-copy atomic by ARM is an ldrexd (A3.5.3).
421   Value *Val =
422       TLI->emitLoadLinked(Builder, LI->getPointerOperand(), LI->getOrdering());
423   TLI->emitAtomicCmpXchgNoStoreLLBalance(Builder);
424 
425   LI->replaceAllUsesWith(Val);
426   LI->eraseFromParent();
427 
428   return true;
429 }
430 
431 bool AtomicExpand::expandAtomicLoadToCmpXchg(LoadInst *LI) {
432   IRBuilder<> Builder(LI);
433   AtomicOrdering Order = LI->getOrdering();
434   if (Order == AtomicOrdering::Unordered)
435     Order = AtomicOrdering::Monotonic;
436 
437   Value *Addr = LI->getPointerOperand();
438   Type *Ty = cast<PointerType>(Addr->getType())->getElementType();
439   Constant *DummyVal = Constant::getNullValue(Ty);
440 
441   Value *Pair = Builder.CreateAtomicCmpXchg(
442       Addr, DummyVal, DummyVal, Order,
443       AtomicCmpXchgInst::getStrongestFailureOrdering(Order));
444   Value *Loaded = Builder.CreateExtractValue(Pair, 0, "loaded");
445 
446   LI->replaceAllUsesWith(Loaded);
447   LI->eraseFromParent();
448 
449   return true;
450 }
451 
452 /// Convert an atomic store of a non-integral type to an integer store of the
453 /// equivalent bitwidth.  We used to not support floating point or vector
454 /// atomics in the IR at all.  The backends learned to deal with the bitcast
455 /// idiom because that was the only way of expressing the notion of a atomic
456 /// float or vector store.  The long term plan is to teach each backend to
457 /// instruction select from the original atomic store, but as a migration
458 /// mechanism, we convert back to the old format which the backends understand.
459 /// Each backend will need individual work to recognize the new format.
460 StoreInst *AtomicExpand::convertAtomicStoreToIntegerType(StoreInst *SI) {
461   IRBuilder<> Builder(SI);
462   auto *M = SI->getModule();
463   Type *NewTy = getCorrespondingIntegerType(SI->getValueOperand()->getType(),
464                                             M->getDataLayout());
465   Value *NewVal = Builder.CreateBitCast(SI->getValueOperand(), NewTy);
466 
467   Value *Addr = SI->getPointerOperand();
468   Type *PT = PointerType::get(NewTy,
469                               Addr->getType()->getPointerAddressSpace());
470   Value *NewAddr = Builder.CreateBitCast(Addr, PT);
471 
472   StoreInst *NewSI = Builder.CreateStore(NewVal, NewAddr);
473   NewSI->setAlignment(MaybeAlign(SI->getAlignment()));
474   NewSI->setVolatile(SI->isVolatile());
475   NewSI->setAtomic(SI->getOrdering(), SI->getSyncScopeID());
476   LLVM_DEBUG(dbgs() << "Replaced " << *SI << " with " << *NewSI << "\n");
477   SI->eraseFromParent();
478   return NewSI;
479 }
480 
481 bool AtomicExpand::expandAtomicStore(StoreInst *SI) {
482   // This function is only called on atomic stores that are too large to be
483   // atomic if implemented as a native store. So we replace them by an
484   // atomic swap, that can be implemented for example as a ldrex/strex on ARM
485   // or lock cmpxchg8/16b on X86, as these are atomic for larger sizes.
486   // It is the responsibility of the target to only signal expansion via
487   // shouldExpandAtomicRMW in cases where this is required and possible.
488   IRBuilder<> Builder(SI);
489   AtomicRMWInst *AI =
490       Builder.CreateAtomicRMW(AtomicRMWInst::Xchg, SI->getPointerOperand(),
491                               SI->getValueOperand(), SI->getOrdering());
492   SI->eraseFromParent();
493 
494   // Now we have an appropriate swap instruction, lower it as usual.
495   return tryExpandAtomicRMW(AI);
496 }
497 
498 static void createCmpXchgInstFun(IRBuilder<> &Builder, Value *Addr,
499                                  Value *Loaded, Value *NewVal,
500                                  AtomicOrdering MemOpOrder,
501                                  Value *&Success, Value *&NewLoaded) {
502   Type *OrigTy = NewVal->getType();
503 
504   // This code can go away when cmpxchg supports FP types.
505   bool NeedBitcast = OrigTy->isFloatingPointTy();
506   if (NeedBitcast) {
507     IntegerType *IntTy = Builder.getIntNTy(OrigTy->getPrimitiveSizeInBits());
508     unsigned AS = Addr->getType()->getPointerAddressSpace();
509     Addr = Builder.CreateBitCast(Addr, IntTy->getPointerTo(AS));
510     NewVal = Builder.CreateBitCast(NewVal, IntTy);
511     Loaded = Builder.CreateBitCast(Loaded, IntTy);
512   }
513 
514   Value* Pair = Builder.CreateAtomicCmpXchg(
515       Addr, Loaded, NewVal, MemOpOrder,
516       AtomicCmpXchgInst::getStrongestFailureOrdering(MemOpOrder));
517   Success = Builder.CreateExtractValue(Pair, 1, "success");
518   NewLoaded = Builder.CreateExtractValue(Pair, 0, "newloaded");
519 
520   if (NeedBitcast)
521     NewLoaded = Builder.CreateBitCast(NewLoaded, OrigTy);
522 }
523 
524 /// Emit IR to implement the given atomicrmw operation on values in registers,
525 /// returning the new value.
526 static Value *performAtomicOp(AtomicRMWInst::BinOp Op, IRBuilder<> &Builder,
527                               Value *Loaded, Value *Inc) {
528   Value *NewVal;
529   switch (Op) {
530   case AtomicRMWInst::Xchg:
531     return Inc;
532   case AtomicRMWInst::Add:
533     return Builder.CreateAdd(Loaded, Inc, "new");
534   case AtomicRMWInst::Sub:
535     return Builder.CreateSub(Loaded, Inc, "new");
536   case AtomicRMWInst::And:
537     return Builder.CreateAnd(Loaded, Inc, "new");
538   case AtomicRMWInst::Nand:
539     return Builder.CreateNot(Builder.CreateAnd(Loaded, Inc), "new");
540   case AtomicRMWInst::Or:
541     return Builder.CreateOr(Loaded, Inc, "new");
542   case AtomicRMWInst::Xor:
543     return Builder.CreateXor(Loaded, Inc, "new");
544   case AtomicRMWInst::Max:
545     NewVal = Builder.CreateICmpSGT(Loaded, Inc);
546     return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
547   case AtomicRMWInst::Min:
548     NewVal = Builder.CreateICmpSLE(Loaded, Inc);
549     return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
550   case AtomicRMWInst::UMax:
551     NewVal = Builder.CreateICmpUGT(Loaded, Inc);
552     return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
553   case AtomicRMWInst::UMin:
554     NewVal = Builder.CreateICmpULE(Loaded, Inc);
555     return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
556   case AtomicRMWInst::FAdd:
557     return Builder.CreateFAdd(Loaded, Inc, "new");
558   case AtomicRMWInst::FSub:
559     return Builder.CreateFSub(Loaded, Inc, "new");
560   default:
561     llvm_unreachable("Unknown atomic op");
562   }
563 }
564 
565 bool AtomicExpand::tryExpandAtomicRMW(AtomicRMWInst *AI) {
566   switch (TLI->shouldExpandAtomicRMWInIR(AI)) {
567   case TargetLoweringBase::AtomicExpansionKind::None:
568     return false;
569   case TargetLoweringBase::AtomicExpansionKind::LLSC: {
570     unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8;
571     unsigned ValueSize = getAtomicOpSize(AI);
572     if (ValueSize < MinCASSize) {
573       llvm_unreachable(
574           "MinCmpXchgSizeInBits not yet supported for LL/SC architectures.");
575     } else {
576       auto PerformOp = [&](IRBuilder<> &Builder, Value *Loaded) {
577         return performAtomicOp(AI->getOperation(), Builder, Loaded,
578                                AI->getValOperand());
579       };
580       expandAtomicOpToLLSC(AI, AI->getType(), AI->getPointerOperand(),
581                            AI->getOrdering(), PerformOp);
582     }
583     return true;
584   }
585   case TargetLoweringBase::AtomicExpansionKind::CmpXChg: {
586     unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8;
587     unsigned ValueSize = getAtomicOpSize(AI);
588     if (ValueSize < MinCASSize) {
589       // TODO: Handle atomicrmw fadd/fsub
590       if (AI->getType()->isFloatingPointTy())
591         return false;
592 
593       expandPartwordAtomicRMW(AI,
594                               TargetLoweringBase::AtomicExpansionKind::CmpXChg);
595     } else {
596       expandAtomicRMWToCmpXchg(AI, createCmpXchgInstFun);
597     }
598     return true;
599   }
600   case TargetLoweringBase::AtomicExpansionKind::MaskedIntrinsic: {
601     expandAtomicRMWToMaskedIntrinsic(AI);
602     return true;
603   }
604   default:
605     llvm_unreachable("Unhandled case in tryExpandAtomicRMW");
606   }
607 }
608 
609 namespace {
610 
611 /// Result values from createMaskInstrs helper.
612 struct PartwordMaskValues {
613   Type *WordType;
614   Type *ValueType;
615   Value *AlignedAddr;
616   Value *ShiftAmt;
617   Value *Mask;
618   Value *Inv_Mask;
619 };
620 
621 } // end anonymous namespace
622 
623 /// This is a helper function which builds instructions to provide
624 /// values necessary for partword atomic operations. It takes an
625 /// incoming address, Addr, and ValueType, and constructs the address,
626 /// shift-amounts and masks needed to work with a larger value of size
627 /// WordSize.
628 ///
629 /// AlignedAddr: Addr rounded down to a multiple of WordSize
630 ///
631 /// ShiftAmt: Number of bits to right-shift a WordSize value loaded
632 ///           from AlignAddr for it to have the same value as if
633 ///           ValueType was loaded from Addr.
634 ///
635 /// Mask: Value to mask with the value loaded from AlignAddr to
636 ///       include only the part that would've been loaded from Addr.
637 ///
638 /// Inv_Mask: The inverse of Mask.
639 static PartwordMaskValues createMaskInstrs(IRBuilder<> &Builder, Instruction *I,
640                                            Type *ValueType, Value *Addr,
641                                            unsigned WordSize) {
642   PartwordMaskValues Ret;
643 
644   BasicBlock *BB = I->getParent();
645   Function *F = BB->getParent();
646   Module *M = I->getModule();
647 
648   LLVMContext &Ctx = F->getContext();
649   const DataLayout &DL = M->getDataLayout();
650 
651   unsigned ValueSize = DL.getTypeStoreSize(ValueType);
652 
653   assert(ValueSize < WordSize);
654 
655   Ret.ValueType = ValueType;
656   Ret.WordType = Type::getIntNTy(Ctx, WordSize * 8);
657 
658   Type *WordPtrType =
659       Ret.WordType->getPointerTo(Addr->getType()->getPointerAddressSpace());
660 
661   Value *AddrInt = Builder.CreatePtrToInt(Addr, DL.getIntPtrType(Ctx));
662   Ret.AlignedAddr = Builder.CreateIntToPtr(
663       Builder.CreateAnd(AddrInt, ~(uint64_t)(WordSize - 1)), WordPtrType,
664       "AlignedAddr");
665 
666   Value *PtrLSB = Builder.CreateAnd(AddrInt, WordSize - 1, "PtrLSB");
667   if (DL.isLittleEndian()) {
668     // turn bytes into bits
669     Ret.ShiftAmt = Builder.CreateShl(PtrLSB, 3);
670   } else {
671     // turn bytes into bits, and count from the other side.
672     Ret.ShiftAmt =
673         Builder.CreateShl(Builder.CreateXor(PtrLSB, WordSize - ValueSize), 3);
674   }
675 
676   Ret.ShiftAmt = Builder.CreateTrunc(Ret.ShiftAmt, Ret.WordType, "ShiftAmt");
677   Ret.Mask = Builder.CreateShl(
678       ConstantInt::get(Ret.WordType, (1 << (ValueSize * 8)) - 1), Ret.ShiftAmt,
679       "Mask");
680   Ret.Inv_Mask = Builder.CreateNot(Ret.Mask, "Inv_Mask");
681 
682   return Ret;
683 }
684 
685 /// Emit IR to implement a masked version of a given atomicrmw
686 /// operation. (That is, only the bits under the Mask should be
687 /// affected by the operation)
688 static Value *performMaskedAtomicOp(AtomicRMWInst::BinOp Op,
689                                     IRBuilder<> &Builder, Value *Loaded,
690                                     Value *Shifted_Inc, Value *Inc,
691                                     const PartwordMaskValues &PMV) {
692   // TODO: update to use
693   // https://graphics.stanford.edu/~seander/bithacks.html#MaskedMerge in order
694   // to merge bits from two values without requiring PMV.Inv_Mask.
695   switch (Op) {
696   case AtomicRMWInst::Xchg: {
697     Value *Loaded_MaskOut = Builder.CreateAnd(Loaded, PMV.Inv_Mask);
698     Value *FinalVal = Builder.CreateOr(Loaded_MaskOut, Shifted_Inc);
699     return FinalVal;
700   }
701   case AtomicRMWInst::Or:
702   case AtomicRMWInst::Xor:
703   case AtomicRMWInst::And:
704     llvm_unreachable("Or/Xor/And handled by widenPartwordAtomicRMW");
705   case AtomicRMWInst::Add:
706   case AtomicRMWInst::Sub:
707   case AtomicRMWInst::Nand: {
708     // The other arithmetic ops need to be masked into place.
709     Value *NewVal = performAtomicOp(Op, Builder, Loaded, Shifted_Inc);
710     Value *NewVal_Masked = Builder.CreateAnd(NewVal, PMV.Mask);
711     Value *Loaded_MaskOut = Builder.CreateAnd(Loaded, PMV.Inv_Mask);
712     Value *FinalVal = Builder.CreateOr(Loaded_MaskOut, NewVal_Masked);
713     return FinalVal;
714   }
715   case AtomicRMWInst::Max:
716   case AtomicRMWInst::Min:
717   case AtomicRMWInst::UMax:
718   case AtomicRMWInst::UMin: {
719     // Finally, comparison ops will operate on the full value, so
720     // truncate down to the original size, and expand out again after
721     // doing the operation.
722     Value *Loaded_Shiftdown = Builder.CreateTrunc(
723         Builder.CreateLShr(Loaded, PMV.ShiftAmt), PMV.ValueType);
724     Value *NewVal = performAtomicOp(Op, Builder, Loaded_Shiftdown, Inc);
725     Value *NewVal_Shiftup = Builder.CreateShl(
726         Builder.CreateZExt(NewVal, PMV.WordType), PMV.ShiftAmt);
727     Value *Loaded_MaskOut = Builder.CreateAnd(Loaded, PMV.Inv_Mask);
728     Value *FinalVal = Builder.CreateOr(Loaded_MaskOut, NewVal_Shiftup);
729     return FinalVal;
730   }
731   default:
732     llvm_unreachable("Unknown atomic op");
733   }
734 }
735 
736 /// Expand a sub-word atomicrmw operation into an appropriate
737 /// word-sized operation.
738 ///
739 /// It will create an LL/SC or cmpxchg loop, as appropriate, the same
740 /// way as a typical atomicrmw expansion. The only difference here is
741 /// that the operation inside of the loop must operate only upon a
742 /// part of the value.
743 void AtomicExpand::expandPartwordAtomicRMW(
744     AtomicRMWInst *AI, TargetLoweringBase::AtomicExpansionKind ExpansionKind) {
745   assert(ExpansionKind == TargetLoweringBase::AtomicExpansionKind::CmpXChg);
746 
747   AtomicOrdering MemOpOrder = AI->getOrdering();
748 
749   IRBuilder<> Builder(AI);
750 
751   PartwordMaskValues PMV =
752       createMaskInstrs(Builder, AI, AI->getType(), AI->getPointerOperand(),
753                        TLI->getMinCmpXchgSizeInBits() / 8);
754 
755   Value *ValOperand_Shifted =
756       Builder.CreateShl(Builder.CreateZExt(AI->getValOperand(), PMV.WordType),
757                         PMV.ShiftAmt, "ValOperand_Shifted");
758 
759   auto PerformPartwordOp = [&](IRBuilder<> &Builder, Value *Loaded) {
760     return performMaskedAtomicOp(AI->getOperation(), Builder, Loaded,
761                                  ValOperand_Shifted, AI->getValOperand(), PMV);
762   };
763 
764   // TODO: When we're ready to support LLSC conversions too, use
765   // insertRMWLLSCLoop here for ExpansionKind==LLSC.
766   Value *OldResult =
767       insertRMWCmpXchgLoop(Builder, PMV.WordType, PMV.AlignedAddr, MemOpOrder,
768                            PerformPartwordOp, createCmpXchgInstFun);
769   Value *FinalOldResult = Builder.CreateTrunc(
770       Builder.CreateLShr(OldResult, PMV.ShiftAmt), PMV.ValueType);
771   AI->replaceAllUsesWith(FinalOldResult);
772   AI->eraseFromParent();
773 }
774 
775 // Widen the bitwise atomicrmw (or/xor/and) to the minimum supported width.
776 AtomicRMWInst *AtomicExpand::widenPartwordAtomicRMW(AtomicRMWInst *AI) {
777   IRBuilder<> Builder(AI);
778   AtomicRMWInst::BinOp Op = AI->getOperation();
779 
780   assert((Op == AtomicRMWInst::Or || Op == AtomicRMWInst::Xor ||
781           Op == AtomicRMWInst::And) &&
782          "Unable to widen operation");
783 
784   PartwordMaskValues PMV =
785       createMaskInstrs(Builder, AI, AI->getType(), AI->getPointerOperand(),
786                        TLI->getMinCmpXchgSizeInBits() / 8);
787 
788   Value *ValOperand_Shifted =
789       Builder.CreateShl(Builder.CreateZExt(AI->getValOperand(), PMV.WordType),
790                         PMV.ShiftAmt, "ValOperand_Shifted");
791 
792   Value *NewOperand;
793 
794   if (Op == AtomicRMWInst::And)
795     NewOperand =
796         Builder.CreateOr(PMV.Inv_Mask, ValOperand_Shifted, "AndOperand");
797   else
798     NewOperand = ValOperand_Shifted;
799 
800   AtomicRMWInst *NewAI = Builder.CreateAtomicRMW(Op, PMV.AlignedAddr,
801                                                  NewOperand, AI->getOrdering());
802 
803   Value *FinalOldResult = Builder.CreateTrunc(
804       Builder.CreateLShr(NewAI, PMV.ShiftAmt), PMV.ValueType);
805   AI->replaceAllUsesWith(FinalOldResult);
806   AI->eraseFromParent();
807   return NewAI;
808 }
809 
810 void AtomicExpand::expandPartwordCmpXchg(AtomicCmpXchgInst *CI) {
811   // The basic idea here is that we're expanding a cmpxchg of a
812   // smaller memory size up to a word-sized cmpxchg. To do this, we
813   // need to add a retry-loop for strong cmpxchg, so that
814   // modifications to other parts of the word don't cause a spurious
815   // failure.
816 
817   // This generates code like the following:
818   //     [[Setup mask values PMV.*]]
819   //     %NewVal_Shifted = shl i32 %NewVal, %PMV.ShiftAmt
820   //     %Cmp_Shifted = shl i32 %Cmp, %PMV.ShiftAmt
821   //     %InitLoaded = load i32* %addr
822   //     %InitLoaded_MaskOut = and i32 %InitLoaded, %PMV.Inv_Mask
823   //     br partword.cmpxchg.loop
824   // partword.cmpxchg.loop:
825   //     %Loaded_MaskOut = phi i32 [ %InitLoaded_MaskOut, %entry ],
826   //        [ %OldVal_MaskOut, %partword.cmpxchg.failure ]
827   //     %FullWord_NewVal = or i32 %Loaded_MaskOut, %NewVal_Shifted
828   //     %FullWord_Cmp = or i32 %Loaded_MaskOut, %Cmp_Shifted
829   //     %NewCI = cmpxchg i32* %PMV.AlignedAddr, i32 %FullWord_Cmp,
830   //        i32 %FullWord_NewVal success_ordering failure_ordering
831   //     %OldVal = extractvalue { i32, i1 } %NewCI, 0
832   //     %Success = extractvalue { i32, i1 } %NewCI, 1
833   //     br i1 %Success, label %partword.cmpxchg.end,
834   //        label %partword.cmpxchg.failure
835   // partword.cmpxchg.failure:
836   //     %OldVal_MaskOut = and i32 %OldVal, %PMV.Inv_Mask
837   //     %ShouldContinue = icmp ne i32 %Loaded_MaskOut, %OldVal_MaskOut
838   //     br i1 %ShouldContinue, label %partword.cmpxchg.loop,
839   //         label %partword.cmpxchg.end
840   // partword.cmpxchg.end:
841   //    %tmp1 = lshr i32 %OldVal, %PMV.ShiftAmt
842   //    %FinalOldVal = trunc i32 %tmp1 to i8
843   //    %tmp2 = insertvalue { i8, i1 } undef, i8 %FinalOldVal, 0
844   //    %Res = insertvalue { i8, i1 } %25, i1 %Success, 1
845 
846   Value *Addr = CI->getPointerOperand();
847   Value *Cmp = CI->getCompareOperand();
848   Value *NewVal = CI->getNewValOperand();
849 
850   BasicBlock *BB = CI->getParent();
851   Function *F = BB->getParent();
852   IRBuilder<> Builder(CI);
853   LLVMContext &Ctx = Builder.getContext();
854 
855   const int WordSize = TLI->getMinCmpXchgSizeInBits() / 8;
856 
857   BasicBlock *EndBB =
858       BB->splitBasicBlock(CI->getIterator(), "partword.cmpxchg.end");
859   auto FailureBB =
860       BasicBlock::Create(Ctx, "partword.cmpxchg.failure", F, EndBB);
861   auto LoopBB = BasicBlock::Create(Ctx, "partword.cmpxchg.loop", F, FailureBB);
862 
863   // The split call above "helpfully" added a branch at the end of BB
864   // (to the wrong place).
865   std::prev(BB->end())->eraseFromParent();
866   Builder.SetInsertPoint(BB);
867 
868   PartwordMaskValues PMV = createMaskInstrs(
869       Builder, CI, CI->getCompareOperand()->getType(), Addr, WordSize);
870 
871   // Shift the incoming values over, into the right location in the word.
872   Value *NewVal_Shifted =
873       Builder.CreateShl(Builder.CreateZExt(NewVal, PMV.WordType), PMV.ShiftAmt);
874   Value *Cmp_Shifted =
875       Builder.CreateShl(Builder.CreateZExt(Cmp, PMV.WordType), PMV.ShiftAmt);
876 
877   // Load the entire current word, and mask into place the expected and new
878   // values
879   LoadInst *InitLoaded = Builder.CreateLoad(PMV.WordType, PMV.AlignedAddr);
880   InitLoaded->setVolatile(CI->isVolatile());
881   Value *InitLoaded_MaskOut = Builder.CreateAnd(InitLoaded, PMV.Inv_Mask);
882   Builder.CreateBr(LoopBB);
883 
884   // partword.cmpxchg.loop:
885   Builder.SetInsertPoint(LoopBB);
886   PHINode *Loaded_MaskOut = Builder.CreatePHI(PMV.WordType, 2);
887   Loaded_MaskOut->addIncoming(InitLoaded_MaskOut, BB);
888 
889   // Mask/Or the expected and new values into place in the loaded word.
890   Value *FullWord_NewVal = Builder.CreateOr(Loaded_MaskOut, NewVal_Shifted);
891   Value *FullWord_Cmp = Builder.CreateOr(Loaded_MaskOut, Cmp_Shifted);
892   AtomicCmpXchgInst *NewCI = Builder.CreateAtomicCmpXchg(
893       PMV.AlignedAddr, FullWord_Cmp, FullWord_NewVal, CI->getSuccessOrdering(),
894       CI->getFailureOrdering(), CI->getSyncScopeID());
895   NewCI->setVolatile(CI->isVolatile());
896   // When we're building a strong cmpxchg, we need a loop, so you
897   // might think we could use a weak cmpxchg inside. But, using strong
898   // allows the below comparison for ShouldContinue, and we're
899   // expecting the underlying cmpxchg to be a machine instruction,
900   // which is strong anyways.
901   NewCI->setWeak(CI->isWeak());
902 
903   Value *OldVal = Builder.CreateExtractValue(NewCI, 0);
904   Value *Success = Builder.CreateExtractValue(NewCI, 1);
905 
906   if (CI->isWeak())
907     Builder.CreateBr(EndBB);
908   else
909     Builder.CreateCondBr(Success, EndBB, FailureBB);
910 
911   // partword.cmpxchg.failure:
912   Builder.SetInsertPoint(FailureBB);
913   // Upon failure, verify that the masked-out part of the loaded value
914   // has been modified.  If it didn't, abort the cmpxchg, since the
915   // masked-in part must've.
916   Value *OldVal_MaskOut = Builder.CreateAnd(OldVal, PMV.Inv_Mask);
917   Value *ShouldContinue = Builder.CreateICmpNE(Loaded_MaskOut, OldVal_MaskOut);
918   Builder.CreateCondBr(ShouldContinue, LoopBB, EndBB);
919 
920   // Add the second value to the phi from above
921   Loaded_MaskOut->addIncoming(OldVal_MaskOut, FailureBB);
922 
923   // partword.cmpxchg.end:
924   Builder.SetInsertPoint(CI);
925 
926   Value *FinalOldVal = Builder.CreateTrunc(
927       Builder.CreateLShr(OldVal, PMV.ShiftAmt), PMV.ValueType);
928   Value *Res = UndefValue::get(CI->getType());
929   Res = Builder.CreateInsertValue(Res, FinalOldVal, 0);
930   Res = Builder.CreateInsertValue(Res, Success, 1);
931 
932   CI->replaceAllUsesWith(Res);
933   CI->eraseFromParent();
934 }
935 
936 void AtomicExpand::expandAtomicOpToLLSC(
937     Instruction *I, Type *ResultType, Value *Addr, AtomicOrdering MemOpOrder,
938     function_ref<Value *(IRBuilder<> &, Value *)> PerformOp) {
939   IRBuilder<> Builder(I);
940   Value *Loaded =
941       insertRMWLLSCLoop(Builder, ResultType, Addr, MemOpOrder, PerformOp);
942 
943   I->replaceAllUsesWith(Loaded);
944   I->eraseFromParent();
945 }
946 
947 void AtomicExpand::expandAtomicRMWToMaskedIntrinsic(AtomicRMWInst *AI) {
948   IRBuilder<> Builder(AI);
949 
950   PartwordMaskValues PMV =
951       createMaskInstrs(Builder, AI, AI->getType(), AI->getPointerOperand(),
952                        TLI->getMinCmpXchgSizeInBits() / 8);
953 
954   // The value operand must be sign-extended for signed min/max so that the
955   // target's signed comparison instructions can be used. Otherwise, just
956   // zero-ext.
957   Instruction::CastOps CastOp = Instruction::ZExt;
958   AtomicRMWInst::BinOp RMWOp = AI->getOperation();
959   if (RMWOp == AtomicRMWInst::Max || RMWOp == AtomicRMWInst::Min)
960     CastOp = Instruction::SExt;
961 
962   Value *ValOperand_Shifted = Builder.CreateShl(
963       Builder.CreateCast(CastOp, AI->getValOperand(), PMV.WordType),
964       PMV.ShiftAmt, "ValOperand_Shifted");
965   Value *OldResult = TLI->emitMaskedAtomicRMWIntrinsic(
966       Builder, AI, PMV.AlignedAddr, ValOperand_Shifted, PMV.Mask, PMV.ShiftAmt,
967       AI->getOrdering());
968   Value *FinalOldResult = Builder.CreateTrunc(
969       Builder.CreateLShr(OldResult, PMV.ShiftAmt), PMV.ValueType);
970   AI->replaceAllUsesWith(FinalOldResult);
971   AI->eraseFromParent();
972 }
973 
974 void AtomicExpand::expandAtomicCmpXchgToMaskedIntrinsic(AtomicCmpXchgInst *CI) {
975   IRBuilder<> Builder(CI);
976 
977   PartwordMaskValues PMV = createMaskInstrs(
978       Builder, CI, CI->getCompareOperand()->getType(), CI->getPointerOperand(),
979       TLI->getMinCmpXchgSizeInBits() / 8);
980 
981   Value *CmpVal_Shifted = Builder.CreateShl(
982       Builder.CreateZExt(CI->getCompareOperand(), PMV.WordType), PMV.ShiftAmt,
983       "CmpVal_Shifted");
984   Value *NewVal_Shifted = Builder.CreateShl(
985       Builder.CreateZExt(CI->getNewValOperand(), PMV.WordType), PMV.ShiftAmt,
986       "NewVal_Shifted");
987   Value *OldVal = TLI->emitMaskedAtomicCmpXchgIntrinsic(
988       Builder, CI, PMV.AlignedAddr, CmpVal_Shifted, NewVal_Shifted, PMV.Mask,
989       CI->getSuccessOrdering());
990   Value *FinalOldVal = Builder.CreateTrunc(
991       Builder.CreateLShr(OldVal, PMV.ShiftAmt), PMV.ValueType);
992 
993   Value *Res = UndefValue::get(CI->getType());
994   Res = Builder.CreateInsertValue(Res, FinalOldVal, 0);
995   Value *Success = Builder.CreateICmpEQ(
996       CmpVal_Shifted, Builder.CreateAnd(OldVal, PMV.Mask), "Success");
997   Res = Builder.CreateInsertValue(Res, Success, 1);
998 
999   CI->replaceAllUsesWith(Res);
1000   CI->eraseFromParent();
1001 }
1002 
1003 Value *AtomicExpand::insertRMWLLSCLoop(
1004     IRBuilder<> &Builder, Type *ResultTy, Value *Addr,
1005     AtomicOrdering MemOpOrder,
1006     function_ref<Value *(IRBuilder<> &, Value *)> PerformOp) {
1007   LLVMContext &Ctx = Builder.getContext();
1008   BasicBlock *BB = Builder.GetInsertBlock();
1009   Function *F = BB->getParent();
1010 
1011   // Given: atomicrmw some_op iN* %addr, iN %incr ordering
1012   //
1013   // The standard expansion we produce is:
1014   //     [...]
1015   // atomicrmw.start:
1016   //     %loaded = @load.linked(%addr)
1017   //     %new = some_op iN %loaded, %incr
1018   //     %stored = @store_conditional(%new, %addr)
1019   //     %try_again = icmp i32 ne %stored, 0
1020   //     br i1 %try_again, label %loop, label %atomicrmw.end
1021   // atomicrmw.end:
1022   //     [...]
1023   BasicBlock *ExitBB =
1024       BB->splitBasicBlock(Builder.GetInsertPoint(), "atomicrmw.end");
1025   BasicBlock *LoopBB =  BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB);
1026 
1027   // The split call above "helpfully" added a branch at the end of BB (to the
1028   // wrong place).
1029   std::prev(BB->end())->eraseFromParent();
1030   Builder.SetInsertPoint(BB);
1031   Builder.CreateBr(LoopBB);
1032 
1033   // Start the main loop block now that we've taken care of the preliminaries.
1034   Builder.SetInsertPoint(LoopBB);
1035   Value *Loaded = TLI->emitLoadLinked(Builder, Addr, MemOpOrder);
1036 
1037   Value *NewVal = PerformOp(Builder, Loaded);
1038 
1039   Value *StoreSuccess =
1040       TLI->emitStoreConditional(Builder, NewVal, Addr, MemOpOrder);
1041   Value *TryAgain = Builder.CreateICmpNE(
1042       StoreSuccess, ConstantInt::get(IntegerType::get(Ctx, 32), 0), "tryagain");
1043   Builder.CreateCondBr(TryAgain, LoopBB, ExitBB);
1044 
1045   Builder.SetInsertPoint(ExitBB, ExitBB->begin());
1046   return Loaded;
1047 }
1048 
1049 /// Convert an atomic cmpxchg of a non-integral type to an integer cmpxchg of
1050 /// the equivalent bitwidth.  We used to not support pointer cmpxchg in the
1051 /// IR.  As a migration step, we convert back to what use to be the standard
1052 /// way to represent a pointer cmpxchg so that we can update backends one by
1053 /// one.
1054 AtomicCmpXchgInst *AtomicExpand::convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI) {
1055   auto *M = CI->getModule();
1056   Type *NewTy = getCorrespondingIntegerType(CI->getCompareOperand()->getType(),
1057                                             M->getDataLayout());
1058 
1059   IRBuilder<> Builder(CI);
1060 
1061   Value *Addr = CI->getPointerOperand();
1062   Type *PT = PointerType::get(NewTy,
1063                               Addr->getType()->getPointerAddressSpace());
1064   Value *NewAddr = Builder.CreateBitCast(Addr, PT);
1065 
1066   Value *NewCmp = Builder.CreatePtrToInt(CI->getCompareOperand(), NewTy);
1067   Value *NewNewVal = Builder.CreatePtrToInt(CI->getNewValOperand(), NewTy);
1068 
1069 
1070   auto *NewCI = Builder.CreateAtomicCmpXchg(NewAddr, NewCmp, NewNewVal,
1071                                             CI->getSuccessOrdering(),
1072                                             CI->getFailureOrdering(),
1073                                             CI->getSyncScopeID());
1074   NewCI->setVolatile(CI->isVolatile());
1075   NewCI->setWeak(CI->isWeak());
1076   LLVM_DEBUG(dbgs() << "Replaced " << *CI << " with " << *NewCI << "\n");
1077 
1078   Value *OldVal = Builder.CreateExtractValue(NewCI, 0);
1079   Value *Succ = Builder.CreateExtractValue(NewCI, 1);
1080 
1081   OldVal = Builder.CreateIntToPtr(OldVal, CI->getCompareOperand()->getType());
1082 
1083   Value *Res = UndefValue::get(CI->getType());
1084   Res = Builder.CreateInsertValue(Res, OldVal, 0);
1085   Res = Builder.CreateInsertValue(Res, Succ, 1);
1086 
1087   CI->replaceAllUsesWith(Res);
1088   CI->eraseFromParent();
1089   return NewCI;
1090 }
1091 
1092 bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
1093   AtomicOrdering SuccessOrder = CI->getSuccessOrdering();
1094   AtomicOrdering FailureOrder = CI->getFailureOrdering();
1095   Value *Addr = CI->getPointerOperand();
1096   BasicBlock *BB = CI->getParent();
1097   Function *F = BB->getParent();
1098   LLVMContext &Ctx = F->getContext();
1099   // If shouldInsertFencesForAtomic() returns true, then the target does not
1100   // want to deal with memory orders, and emitLeading/TrailingFence should take
1101   // care of everything. Otherwise, emitLeading/TrailingFence are no-op and we
1102   // should preserve the ordering.
1103   bool ShouldInsertFencesForAtomic = TLI->shouldInsertFencesForAtomic(CI);
1104   AtomicOrdering MemOpOrder =
1105       ShouldInsertFencesForAtomic ? AtomicOrdering::Monotonic : SuccessOrder;
1106 
1107   // In implementations which use a barrier to achieve release semantics, we can
1108   // delay emitting this barrier until we know a store is actually going to be
1109   // attempted. The cost of this delay is that we need 2 copies of the block
1110   // emitting the load-linked, affecting code size.
1111   //
1112   // Ideally, this logic would be unconditional except for the minsize check
1113   // since in other cases the extra blocks naturally collapse down to the
1114   // minimal loop. Unfortunately, this puts too much stress on later
1115   // optimisations so we avoid emitting the extra logic in those cases too.
1116   bool HasReleasedLoadBB = !CI->isWeak() && ShouldInsertFencesForAtomic &&
1117                            SuccessOrder != AtomicOrdering::Monotonic &&
1118                            SuccessOrder != AtomicOrdering::Acquire &&
1119                            !F->hasMinSize();
1120 
1121   // There's no overhead for sinking the release barrier in a weak cmpxchg, so
1122   // do it even on minsize.
1123   bool UseUnconditionalReleaseBarrier = F->hasMinSize() && !CI->isWeak();
1124 
1125   // Given: cmpxchg some_op iN* %addr, iN %desired, iN %new success_ord fail_ord
1126   //
1127   // The full expansion we produce is:
1128   //     [...]
1129   // cmpxchg.start:
1130   //     %unreleasedload = @load.linked(%addr)
1131   //     %should_store = icmp eq %unreleasedload, %desired
1132   //     br i1 %should_store, label %cmpxchg.fencedstore,
1133   //                          label %cmpxchg.nostore
1134   // cmpxchg.releasingstore:
1135   //     fence?
1136   //     br label cmpxchg.trystore
1137   // cmpxchg.trystore:
1138   //     %loaded.trystore = phi [%unreleasedload, %releasingstore],
1139   //                            [%releasedload, %cmpxchg.releasedload]
1140   //     %stored = @store_conditional(%new, %addr)
1141   //     %success = icmp eq i32 %stored, 0
1142   //     br i1 %success, label %cmpxchg.success,
1143   //                     label %cmpxchg.releasedload/%cmpxchg.failure
1144   // cmpxchg.releasedload:
1145   //     %releasedload = @load.linked(%addr)
1146   //     %should_store = icmp eq %releasedload, %desired
1147   //     br i1 %should_store, label %cmpxchg.trystore,
1148   //                          label %cmpxchg.failure
1149   // cmpxchg.success:
1150   //     fence?
1151   //     br label %cmpxchg.end
1152   // cmpxchg.nostore:
1153   //     %loaded.nostore = phi [%unreleasedload, %cmpxchg.start],
1154   //                           [%releasedload,
1155   //                               %cmpxchg.releasedload/%cmpxchg.trystore]
1156   //     @load_linked_fail_balance()?
1157   //     br label %cmpxchg.failure
1158   // cmpxchg.failure:
1159   //     fence?
1160   //     br label %cmpxchg.end
1161   // cmpxchg.end:
1162   //     %loaded = phi [%loaded.nostore, %cmpxchg.failure],
1163   //                   [%loaded.trystore, %cmpxchg.trystore]
1164   //     %success = phi i1 [true, %cmpxchg.success], [false, %cmpxchg.failure]
1165   //     %restmp = insertvalue { iN, i1 } undef, iN %loaded, 0
1166   //     %res = insertvalue { iN, i1 } %restmp, i1 %success, 1
1167   //     [...]
1168   BasicBlock *ExitBB = BB->splitBasicBlock(CI->getIterator(), "cmpxchg.end");
1169   auto FailureBB = BasicBlock::Create(Ctx, "cmpxchg.failure", F, ExitBB);
1170   auto NoStoreBB = BasicBlock::Create(Ctx, "cmpxchg.nostore", F, FailureBB);
1171   auto SuccessBB = BasicBlock::Create(Ctx, "cmpxchg.success", F, NoStoreBB);
1172   auto ReleasedLoadBB =
1173       BasicBlock::Create(Ctx, "cmpxchg.releasedload", F, SuccessBB);
1174   auto TryStoreBB =
1175       BasicBlock::Create(Ctx, "cmpxchg.trystore", F, ReleasedLoadBB);
1176   auto ReleasingStoreBB =
1177       BasicBlock::Create(Ctx, "cmpxchg.fencedstore", F, TryStoreBB);
1178   auto StartBB = BasicBlock::Create(Ctx, "cmpxchg.start", F, ReleasingStoreBB);
1179 
1180   // This grabs the DebugLoc from CI
1181   IRBuilder<> Builder(CI);
1182 
1183   // The split call above "helpfully" added a branch at the end of BB (to the
1184   // wrong place), but we might want a fence too. It's easiest to just remove
1185   // the branch entirely.
1186   std::prev(BB->end())->eraseFromParent();
1187   Builder.SetInsertPoint(BB);
1188   if (ShouldInsertFencesForAtomic && UseUnconditionalReleaseBarrier)
1189     TLI->emitLeadingFence(Builder, CI, SuccessOrder);
1190   Builder.CreateBr(StartBB);
1191 
1192   // Start the main loop block now that we've taken care of the preliminaries.
1193   Builder.SetInsertPoint(StartBB);
1194   Value *UnreleasedLoad = TLI->emitLoadLinked(Builder, Addr, MemOpOrder);
1195   Value *ShouldStore = Builder.CreateICmpEQ(
1196       UnreleasedLoad, CI->getCompareOperand(), "should_store");
1197 
1198   // If the cmpxchg doesn't actually need any ordering when it fails, we can
1199   // jump straight past that fence instruction (if it exists).
1200   Builder.CreateCondBr(ShouldStore, ReleasingStoreBB, NoStoreBB);
1201 
1202   Builder.SetInsertPoint(ReleasingStoreBB);
1203   if (ShouldInsertFencesForAtomic && !UseUnconditionalReleaseBarrier)
1204     TLI->emitLeadingFence(Builder, CI, SuccessOrder);
1205   Builder.CreateBr(TryStoreBB);
1206 
1207   Builder.SetInsertPoint(TryStoreBB);
1208   Value *StoreSuccess = TLI->emitStoreConditional(
1209       Builder, CI->getNewValOperand(), Addr, MemOpOrder);
1210   StoreSuccess = Builder.CreateICmpEQ(
1211       StoreSuccess, ConstantInt::get(Type::getInt32Ty(Ctx), 0), "success");
1212   BasicBlock *RetryBB = HasReleasedLoadBB ? ReleasedLoadBB : StartBB;
1213   Builder.CreateCondBr(StoreSuccess, SuccessBB,
1214                        CI->isWeak() ? FailureBB : RetryBB);
1215 
1216   Builder.SetInsertPoint(ReleasedLoadBB);
1217   Value *SecondLoad;
1218   if (HasReleasedLoadBB) {
1219     SecondLoad = TLI->emitLoadLinked(Builder, Addr, MemOpOrder);
1220     ShouldStore = Builder.CreateICmpEQ(SecondLoad, CI->getCompareOperand(),
1221                                        "should_store");
1222 
1223     // If the cmpxchg doesn't actually need any ordering when it fails, we can
1224     // jump straight past that fence instruction (if it exists).
1225     Builder.CreateCondBr(ShouldStore, TryStoreBB, NoStoreBB);
1226   } else
1227     Builder.CreateUnreachable();
1228 
1229   // Make sure later instructions don't get reordered with a fence if
1230   // necessary.
1231   Builder.SetInsertPoint(SuccessBB);
1232   if (ShouldInsertFencesForAtomic)
1233     TLI->emitTrailingFence(Builder, CI, SuccessOrder);
1234   Builder.CreateBr(ExitBB);
1235 
1236   Builder.SetInsertPoint(NoStoreBB);
1237   // In the failing case, where we don't execute the store-conditional, the
1238   // target might want to balance out the load-linked with a dedicated
1239   // instruction (e.g., on ARM, clearing the exclusive monitor).
1240   TLI->emitAtomicCmpXchgNoStoreLLBalance(Builder);
1241   Builder.CreateBr(FailureBB);
1242 
1243   Builder.SetInsertPoint(FailureBB);
1244   if (ShouldInsertFencesForAtomic)
1245     TLI->emitTrailingFence(Builder, CI, FailureOrder);
1246   Builder.CreateBr(ExitBB);
1247 
1248   // Finally, we have control-flow based knowledge of whether the cmpxchg
1249   // succeeded or not. We expose this to later passes by converting any
1250   // subsequent "icmp eq/ne %loaded, %oldval" into a use of an appropriate
1251   // PHI.
1252   Builder.SetInsertPoint(ExitBB, ExitBB->begin());
1253   PHINode *Success = Builder.CreatePHI(Type::getInt1Ty(Ctx), 2);
1254   Success->addIncoming(ConstantInt::getTrue(Ctx), SuccessBB);
1255   Success->addIncoming(ConstantInt::getFalse(Ctx), FailureBB);
1256 
1257   // Setup the builder so we can create any PHIs we need.
1258   Value *Loaded;
1259   if (!HasReleasedLoadBB)
1260     Loaded = UnreleasedLoad;
1261   else {
1262     Builder.SetInsertPoint(TryStoreBB, TryStoreBB->begin());
1263     PHINode *TryStoreLoaded = Builder.CreatePHI(UnreleasedLoad->getType(), 2);
1264     TryStoreLoaded->addIncoming(UnreleasedLoad, ReleasingStoreBB);
1265     TryStoreLoaded->addIncoming(SecondLoad, ReleasedLoadBB);
1266 
1267     Builder.SetInsertPoint(NoStoreBB, NoStoreBB->begin());
1268     PHINode *NoStoreLoaded = Builder.CreatePHI(UnreleasedLoad->getType(), 2);
1269     NoStoreLoaded->addIncoming(UnreleasedLoad, StartBB);
1270     NoStoreLoaded->addIncoming(SecondLoad, ReleasedLoadBB);
1271 
1272     Builder.SetInsertPoint(ExitBB, ++ExitBB->begin());
1273     PHINode *ExitLoaded = Builder.CreatePHI(UnreleasedLoad->getType(), 2);
1274     ExitLoaded->addIncoming(TryStoreLoaded, SuccessBB);
1275     ExitLoaded->addIncoming(NoStoreLoaded, FailureBB);
1276 
1277     Loaded = ExitLoaded;
1278   }
1279 
1280   // Look for any users of the cmpxchg that are just comparing the loaded value
1281   // against the desired one, and replace them with the CFG-derived version.
1282   SmallVector<ExtractValueInst *, 2> PrunedInsts;
1283   for (auto User : CI->users()) {
1284     ExtractValueInst *EV = dyn_cast<ExtractValueInst>(User);
1285     if (!EV)
1286       continue;
1287 
1288     assert(EV->getNumIndices() == 1 && EV->getIndices()[0] <= 1 &&
1289            "weird extraction from { iN, i1 }");
1290 
1291     if (EV->getIndices()[0] == 0)
1292       EV->replaceAllUsesWith(Loaded);
1293     else
1294       EV->replaceAllUsesWith(Success);
1295 
1296     PrunedInsts.push_back(EV);
1297   }
1298 
1299   // We can remove the instructions now we're no longer iterating through them.
1300   for (auto EV : PrunedInsts)
1301     EV->eraseFromParent();
1302 
1303   if (!CI->use_empty()) {
1304     // Some use of the full struct return that we don't understand has happened,
1305     // so we've got to reconstruct it properly.
1306     Value *Res;
1307     Res = Builder.CreateInsertValue(UndefValue::get(CI->getType()), Loaded, 0);
1308     Res = Builder.CreateInsertValue(Res, Success, 1);
1309 
1310     CI->replaceAllUsesWith(Res);
1311   }
1312 
1313   CI->eraseFromParent();
1314   return true;
1315 }
1316 
1317 bool AtomicExpand::isIdempotentRMW(AtomicRMWInst* RMWI) {
1318   auto C = dyn_cast<ConstantInt>(RMWI->getValOperand());
1319   if(!C)
1320     return false;
1321 
1322   AtomicRMWInst::BinOp Op = RMWI->getOperation();
1323   switch(Op) {
1324     case AtomicRMWInst::Add:
1325     case AtomicRMWInst::Sub:
1326     case AtomicRMWInst::Or:
1327     case AtomicRMWInst::Xor:
1328       return C->isZero();
1329     case AtomicRMWInst::And:
1330       return C->isMinusOne();
1331     // FIXME: we could also treat Min/Max/UMin/UMax by the INT_MIN/INT_MAX/...
1332     default:
1333       return false;
1334   }
1335 }
1336 
1337 bool AtomicExpand::simplifyIdempotentRMW(AtomicRMWInst* RMWI) {
1338   if (auto ResultingLoad = TLI->lowerIdempotentRMWIntoFencedLoad(RMWI)) {
1339     tryExpandAtomicLoad(ResultingLoad);
1340     return true;
1341   }
1342   return false;
1343 }
1344 
1345 Value *AtomicExpand::insertRMWCmpXchgLoop(
1346     IRBuilder<> &Builder, Type *ResultTy, Value *Addr,
1347     AtomicOrdering MemOpOrder,
1348     function_ref<Value *(IRBuilder<> &, Value *)> PerformOp,
1349     CreateCmpXchgInstFun CreateCmpXchg) {
1350   LLVMContext &Ctx = Builder.getContext();
1351   BasicBlock *BB = Builder.GetInsertBlock();
1352   Function *F = BB->getParent();
1353 
1354   // Given: atomicrmw some_op iN* %addr, iN %incr ordering
1355   //
1356   // The standard expansion we produce is:
1357   //     [...]
1358   //     %init_loaded = load atomic iN* %addr
1359   //     br label %loop
1360   // loop:
1361   //     %loaded = phi iN [ %init_loaded, %entry ], [ %new_loaded, %loop ]
1362   //     %new = some_op iN %loaded, %incr
1363   //     %pair = cmpxchg iN* %addr, iN %loaded, iN %new
1364   //     %new_loaded = extractvalue { iN, i1 } %pair, 0
1365   //     %success = extractvalue { iN, i1 } %pair, 1
1366   //     br i1 %success, label %atomicrmw.end, label %loop
1367   // atomicrmw.end:
1368   //     [...]
1369   BasicBlock *ExitBB =
1370       BB->splitBasicBlock(Builder.GetInsertPoint(), "atomicrmw.end");
1371   BasicBlock *LoopBB = BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB);
1372 
1373   // The split call above "helpfully" added a branch at the end of BB (to the
1374   // wrong place), but we want a load. It's easiest to just remove
1375   // the branch entirely.
1376   std::prev(BB->end())->eraseFromParent();
1377   Builder.SetInsertPoint(BB);
1378   LoadInst *InitLoaded = Builder.CreateLoad(ResultTy, Addr);
1379   // Atomics require at least natural alignment.
1380   InitLoaded->setAlignment(MaybeAlign(ResultTy->getPrimitiveSizeInBits() / 8));
1381   Builder.CreateBr(LoopBB);
1382 
1383   // Start the main loop block now that we've taken care of the preliminaries.
1384   Builder.SetInsertPoint(LoopBB);
1385   PHINode *Loaded = Builder.CreatePHI(ResultTy, 2, "loaded");
1386   Loaded->addIncoming(InitLoaded, BB);
1387 
1388   Value *NewVal = PerformOp(Builder, Loaded);
1389 
1390   Value *NewLoaded = nullptr;
1391   Value *Success = nullptr;
1392 
1393   CreateCmpXchg(Builder, Addr, Loaded, NewVal,
1394                 MemOpOrder == AtomicOrdering::Unordered
1395                     ? AtomicOrdering::Monotonic
1396                     : MemOpOrder,
1397                 Success, NewLoaded);
1398   assert(Success && NewLoaded);
1399 
1400   Loaded->addIncoming(NewLoaded, LoopBB);
1401 
1402   Builder.CreateCondBr(Success, ExitBB, LoopBB);
1403 
1404   Builder.SetInsertPoint(ExitBB, ExitBB->begin());
1405   return NewLoaded;
1406 }
1407 
1408 bool AtomicExpand::tryExpandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
1409   unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8;
1410   unsigned ValueSize = getAtomicOpSize(CI);
1411 
1412   switch (TLI->shouldExpandAtomicCmpXchgInIR(CI)) {
1413   default:
1414     llvm_unreachable("Unhandled case in tryExpandAtomicCmpXchg");
1415   case TargetLoweringBase::AtomicExpansionKind::None:
1416     if (ValueSize < MinCASSize)
1417       expandPartwordCmpXchg(CI);
1418     return false;
1419   case TargetLoweringBase::AtomicExpansionKind::LLSC: {
1420     assert(ValueSize >= MinCASSize &&
1421            "MinCmpXchgSizeInBits not yet supported for LL/SC expansions.");
1422     return expandAtomicCmpXchg(CI);
1423   }
1424   case TargetLoweringBase::AtomicExpansionKind::MaskedIntrinsic:
1425     expandAtomicCmpXchgToMaskedIntrinsic(CI);
1426     return true;
1427   }
1428 }
1429 
1430 // Note: This function is exposed externally by AtomicExpandUtils.h
1431 bool llvm::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI,
1432                                     CreateCmpXchgInstFun CreateCmpXchg) {
1433   IRBuilder<> Builder(AI);
1434   Value *Loaded = AtomicExpand::insertRMWCmpXchgLoop(
1435       Builder, AI->getType(), AI->getPointerOperand(), AI->getOrdering(),
1436       [&](IRBuilder<> &Builder, Value *Loaded) {
1437         return performAtomicOp(AI->getOperation(), Builder, Loaded,
1438                                AI->getValOperand());
1439       },
1440       CreateCmpXchg);
1441 
1442   AI->replaceAllUsesWith(Loaded);
1443   AI->eraseFromParent();
1444   return true;
1445 }
1446 
1447 // In order to use one of the sized library calls such as
1448 // __atomic_fetch_add_4, the alignment must be sufficient, the size
1449 // must be one of the potentially-specialized sizes, and the value
1450 // type must actually exist in C on the target (otherwise, the
1451 // function wouldn't actually be defined.)
1452 static bool canUseSizedAtomicCall(unsigned Size, unsigned Align,
1453                                   const DataLayout &DL) {
1454   // TODO: "LargestSize" is an approximation for "largest type that
1455   // you can express in C". It seems to be the case that int128 is
1456   // supported on all 64-bit platforms, otherwise only up to 64-bit
1457   // integers are supported. If we get this wrong, then we'll try to
1458   // call a sized libcall that doesn't actually exist. There should
1459   // really be some more reliable way in LLVM of determining integer
1460   // sizes which are valid in the target's C ABI...
1461   unsigned LargestSize = DL.getLargestLegalIntTypeSizeInBits() >= 64 ? 16 : 8;
1462   return Align >= Size &&
1463          (Size == 1 || Size == 2 || Size == 4 || Size == 8 || Size == 16) &&
1464          Size <= LargestSize;
1465 }
1466 
1467 void AtomicExpand::expandAtomicLoadToLibcall(LoadInst *I) {
1468   static const RTLIB::Libcall Libcalls[6] = {
1469       RTLIB::ATOMIC_LOAD,   RTLIB::ATOMIC_LOAD_1, RTLIB::ATOMIC_LOAD_2,
1470       RTLIB::ATOMIC_LOAD_4, RTLIB::ATOMIC_LOAD_8, RTLIB::ATOMIC_LOAD_16};
1471   unsigned Size = getAtomicOpSize(I);
1472   unsigned Align = getAtomicOpAlign(I);
1473 
1474   bool expanded = expandAtomicOpToLibcall(
1475       I, Size, Align, I->getPointerOperand(), nullptr, nullptr,
1476       I->getOrdering(), AtomicOrdering::NotAtomic, Libcalls);
1477   (void)expanded;
1478   assert(expanded && "expandAtomicOpToLibcall shouldn't fail tor Load");
1479 }
1480 
1481 void AtomicExpand::expandAtomicStoreToLibcall(StoreInst *I) {
1482   static const RTLIB::Libcall Libcalls[6] = {
1483       RTLIB::ATOMIC_STORE,   RTLIB::ATOMIC_STORE_1, RTLIB::ATOMIC_STORE_2,
1484       RTLIB::ATOMIC_STORE_4, RTLIB::ATOMIC_STORE_8, RTLIB::ATOMIC_STORE_16};
1485   unsigned Size = getAtomicOpSize(I);
1486   unsigned Align = getAtomicOpAlign(I);
1487 
1488   bool expanded = expandAtomicOpToLibcall(
1489       I, Size, Align, I->getPointerOperand(), I->getValueOperand(), nullptr,
1490       I->getOrdering(), AtomicOrdering::NotAtomic, Libcalls);
1491   (void)expanded;
1492   assert(expanded && "expandAtomicOpToLibcall shouldn't fail tor Store");
1493 }
1494 
1495 void AtomicExpand::expandAtomicCASToLibcall(AtomicCmpXchgInst *I) {
1496   static const RTLIB::Libcall Libcalls[6] = {
1497       RTLIB::ATOMIC_COMPARE_EXCHANGE,   RTLIB::ATOMIC_COMPARE_EXCHANGE_1,
1498       RTLIB::ATOMIC_COMPARE_EXCHANGE_2, RTLIB::ATOMIC_COMPARE_EXCHANGE_4,
1499       RTLIB::ATOMIC_COMPARE_EXCHANGE_8, RTLIB::ATOMIC_COMPARE_EXCHANGE_16};
1500   unsigned Size = getAtomicOpSize(I);
1501   unsigned Align = getAtomicOpAlign(I);
1502 
1503   bool expanded = expandAtomicOpToLibcall(
1504       I, Size, Align, I->getPointerOperand(), I->getNewValOperand(),
1505       I->getCompareOperand(), I->getSuccessOrdering(), I->getFailureOrdering(),
1506       Libcalls);
1507   (void)expanded;
1508   assert(expanded && "expandAtomicOpToLibcall shouldn't fail tor CAS");
1509 }
1510 
1511 static ArrayRef<RTLIB::Libcall> GetRMWLibcall(AtomicRMWInst::BinOp Op) {
1512   static const RTLIB::Libcall LibcallsXchg[6] = {
1513       RTLIB::ATOMIC_EXCHANGE,   RTLIB::ATOMIC_EXCHANGE_1,
1514       RTLIB::ATOMIC_EXCHANGE_2, RTLIB::ATOMIC_EXCHANGE_4,
1515       RTLIB::ATOMIC_EXCHANGE_8, RTLIB::ATOMIC_EXCHANGE_16};
1516   static const RTLIB::Libcall LibcallsAdd[6] = {
1517       RTLIB::UNKNOWN_LIBCALL,    RTLIB::ATOMIC_FETCH_ADD_1,
1518       RTLIB::ATOMIC_FETCH_ADD_2, RTLIB::ATOMIC_FETCH_ADD_4,
1519       RTLIB::ATOMIC_FETCH_ADD_8, RTLIB::ATOMIC_FETCH_ADD_16};
1520   static const RTLIB::Libcall LibcallsSub[6] = {
1521       RTLIB::UNKNOWN_LIBCALL,    RTLIB::ATOMIC_FETCH_SUB_1,
1522       RTLIB::ATOMIC_FETCH_SUB_2, RTLIB::ATOMIC_FETCH_SUB_4,
1523       RTLIB::ATOMIC_FETCH_SUB_8, RTLIB::ATOMIC_FETCH_SUB_16};
1524   static const RTLIB::Libcall LibcallsAnd[6] = {
1525       RTLIB::UNKNOWN_LIBCALL,    RTLIB::ATOMIC_FETCH_AND_1,
1526       RTLIB::ATOMIC_FETCH_AND_2, RTLIB::ATOMIC_FETCH_AND_4,
1527       RTLIB::ATOMIC_FETCH_AND_8, RTLIB::ATOMIC_FETCH_AND_16};
1528   static const RTLIB::Libcall LibcallsOr[6] = {
1529       RTLIB::UNKNOWN_LIBCALL,   RTLIB::ATOMIC_FETCH_OR_1,
1530       RTLIB::ATOMIC_FETCH_OR_2, RTLIB::ATOMIC_FETCH_OR_4,
1531       RTLIB::ATOMIC_FETCH_OR_8, RTLIB::ATOMIC_FETCH_OR_16};
1532   static const RTLIB::Libcall LibcallsXor[6] = {
1533       RTLIB::UNKNOWN_LIBCALL,    RTLIB::ATOMIC_FETCH_XOR_1,
1534       RTLIB::ATOMIC_FETCH_XOR_2, RTLIB::ATOMIC_FETCH_XOR_4,
1535       RTLIB::ATOMIC_FETCH_XOR_8, RTLIB::ATOMIC_FETCH_XOR_16};
1536   static const RTLIB::Libcall LibcallsNand[6] = {
1537       RTLIB::UNKNOWN_LIBCALL,     RTLIB::ATOMIC_FETCH_NAND_1,
1538       RTLIB::ATOMIC_FETCH_NAND_2, RTLIB::ATOMIC_FETCH_NAND_4,
1539       RTLIB::ATOMIC_FETCH_NAND_8, RTLIB::ATOMIC_FETCH_NAND_16};
1540 
1541   switch (Op) {
1542   case AtomicRMWInst::BAD_BINOP:
1543     llvm_unreachable("Should not have BAD_BINOP.");
1544   case AtomicRMWInst::Xchg:
1545     return makeArrayRef(LibcallsXchg);
1546   case AtomicRMWInst::Add:
1547     return makeArrayRef(LibcallsAdd);
1548   case AtomicRMWInst::Sub:
1549     return makeArrayRef(LibcallsSub);
1550   case AtomicRMWInst::And:
1551     return makeArrayRef(LibcallsAnd);
1552   case AtomicRMWInst::Or:
1553     return makeArrayRef(LibcallsOr);
1554   case AtomicRMWInst::Xor:
1555     return makeArrayRef(LibcallsXor);
1556   case AtomicRMWInst::Nand:
1557     return makeArrayRef(LibcallsNand);
1558   case AtomicRMWInst::Max:
1559   case AtomicRMWInst::Min:
1560   case AtomicRMWInst::UMax:
1561   case AtomicRMWInst::UMin:
1562   case AtomicRMWInst::FAdd:
1563   case AtomicRMWInst::FSub:
1564     // No atomic libcalls are available for max/min/umax/umin.
1565     return {};
1566   }
1567   llvm_unreachable("Unexpected AtomicRMW operation.");
1568 }
1569 
1570 void AtomicExpand::expandAtomicRMWToLibcall(AtomicRMWInst *I) {
1571   ArrayRef<RTLIB::Libcall> Libcalls = GetRMWLibcall(I->getOperation());
1572 
1573   unsigned Size = getAtomicOpSize(I);
1574   unsigned Align = getAtomicOpAlign(I);
1575 
1576   bool Success = false;
1577   if (!Libcalls.empty())
1578     Success = expandAtomicOpToLibcall(
1579         I, Size, Align, I->getPointerOperand(), I->getValOperand(), nullptr,
1580         I->getOrdering(), AtomicOrdering::NotAtomic, Libcalls);
1581 
1582   // The expansion failed: either there were no libcalls at all for
1583   // the operation (min/max), or there were only size-specialized
1584   // libcalls (add/sub/etc) and we needed a generic. So, expand to a
1585   // CAS libcall, via a CAS loop, instead.
1586   if (!Success) {
1587     expandAtomicRMWToCmpXchg(I, [this](IRBuilder<> &Builder, Value *Addr,
1588                                        Value *Loaded, Value *NewVal,
1589                                        AtomicOrdering MemOpOrder,
1590                                        Value *&Success, Value *&NewLoaded) {
1591       // Create the CAS instruction normally...
1592       AtomicCmpXchgInst *Pair = Builder.CreateAtomicCmpXchg(
1593           Addr, Loaded, NewVal, MemOpOrder,
1594           AtomicCmpXchgInst::getStrongestFailureOrdering(MemOpOrder));
1595       Success = Builder.CreateExtractValue(Pair, 1, "success");
1596       NewLoaded = Builder.CreateExtractValue(Pair, 0, "newloaded");
1597 
1598       // ...and then expand the CAS into a libcall.
1599       expandAtomicCASToLibcall(Pair);
1600     });
1601   }
1602 }
1603 
1604 // A helper routine for the above expandAtomic*ToLibcall functions.
1605 //
1606 // 'Libcalls' contains an array of enum values for the particular
1607 // ATOMIC libcalls to be emitted. All of the other arguments besides
1608 // 'I' are extracted from the Instruction subclass by the
1609 // caller. Depending on the particular call, some will be null.
1610 bool AtomicExpand::expandAtomicOpToLibcall(
1611     Instruction *I, unsigned Size, unsigned Align, Value *PointerOperand,
1612     Value *ValueOperand, Value *CASExpected, AtomicOrdering Ordering,
1613     AtomicOrdering Ordering2, ArrayRef<RTLIB::Libcall> Libcalls) {
1614   assert(Libcalls.size() == 6);
1615 
1616   LLVMContext &Ctx = I->getContext();
1617   Module *M = I->getModule();
1618   const DataLayout &DL = M->getDataLayout();
1619   IRBuilder<> Builder(I);
1620   IRBuilder<> AllocaBuilder(&I->getFunction()->getEntryBlock().front());
1621 
1622   bool UseSizedLibcall = canUseSizedAtomicCall(Size, Align, DL);
1623   Type *SizedIntTy = Type::getIntNTy(Ctx, Size * 8);
1624 
1625   unsigned AllocaAlignment = DL.getPrefTypeAlignment(SizedIntTy);
1626 
1627   // TODO: the "order" argument type is "int", not int32. So
1628   // getInt32Ty may be wrong if the arch uses e.g. 16-bit ints.
1629   ConstantInt *SizeVal64 = ConstantInt::get(Type::getInt64Ty(Ctx), Size);
1630   assert(Ordering != AtomicOrdering::NotAtomic && "expect atomic MO");
1631   Constant *OrderingVal =
1632       ConstantInt::get(Type::getInt32Ty(Ctx), (int)toCABI(Ordering));
1633   Constant *Ordering2Val = nullptr;
1634   if (CASExpected) {
1635     assert(Ordering2 != AtomicOrdering::NotAtomic && "expect atomic MO");
1636     Ordering2Val =
1637         ConstantInt::get(Type::getInt32Ty(Ctx), (int)toCABI(Ordering2));
1638   }
1639   bool HasResult = I->getType() != Type::getVoidTy(Ctx);
1640 
1641   RTLIB::Libcall RTLibType;
1642   if (UseSizedLibcall) {
1643     switch (Size) {
1644     case 1: RTLibType = Libcalls[1]; break;
1645     case 2: RTLibType = Libcalls[2]; break;
1646     case 4: RTLibType = Libcalls[3]; break;
1647     case 8: RTLibType = Libcalls[4]; break;
1648     case 16: RTLibType = Libcalls[5]; break;
1649     }
1650   } else if (Libcalls[0] != RTLIB::UNKNOWN_LIBCALL) {
1651     RTLibType = Libcalls[0];
1652   } else {
1653     // Can't use sized function, and there's no generic for this
1654     // operation, so give up.
1655     return false;
1656   }
1657 
1658   // Build up the function call. There's two kinds. First, the sized
1659   // variants.  These calls are going to be one of the following (with
1660   // N=1,2,4,8,16):
1661   //  iN    __atomic_load_N(iN *ptr, int ordering)
1662   //  void  __atomic_store_N(iN *ptr, iN val, int ordering)
1663   //  iN    __atomic_{exchange|fetch_*}_N(iN *ptr, iN val, int ordering)
1664   //  bool  __atomic_compare_exchange_N(iN *ptr, iN *expected, iN desired,
1665   //                                    int success_order, int failure_order)
1666   //
1667   // Note that these functions can be used for non-integer atomic
1668   // operations, the values just need to be bitcast to integers on the
1669   // way in and out.
1670   //
1671   // And, then, the generic variants. They look like the following:
1672   //  void  __atomic_load(size_t size, void *ptr, void *ret, int ordering)
1673   //  void  __atomic_store(size_t size, void *ptr, void *val, int ordering)
1674   //  void  __atomic_exchange(size_t size, void *ptr, void *val, void *ret,
1675   //                          int ordering)
1676   //  bool  __atomic_compare_exchange(size_t size, void *ptr, void *expected,
1677   //                                  void *desired, int success_order,
1678   //                                  int failure_order)
1679   //
1680   // The different signatures are built up depending on the
1681   // 'UseSizedLibcall', 'CASExpected', 'ValueOperand', and 'HasResult'
1682   // variables.
1683 
1684   AllocaInst *AllocaCASExpected = nullptr;
1685   Value *AllocaCASExpected_i8 = nullptr;
1686   AllocaInst *AllocaValue = nullptr;
1687   Value *AllocaValue_i8 = nullptr;
1688   AllocaInst *AllocaResult = nullptr;
1689   Value *AllocaResult_i8 = nullptr;
1690 
1691   Type *ResultTy;
1692   SmallVector<Value *, 6> Args;
1693   AttributeList Attr;
1694 
1695   // 'size' argument.
1696   if (!UseSizedLibcall) {
1697     // Note, getIntPtrType is assumed equivalent to size_t.
1698     Args.push_back(ConstantInt::get(DL.getIntPtrType(Ctx), Size));
1699   }
1700 
1701   // 'ptr' argument.
1702   // note: This assumes all address spaces share a common libfunc
1703   // implementation and that addresses are convertable.  For systems without
1704   // that property, we'd need to extend this mechanism to support AS-specific
1705   // families of atomic intrinsics.
1706   auto PtrTypeAS = PointerOperand->getType()->getPointerAddressSpace();
1707   Value *PtrVal = Builder.CreateBitCast(PointerOperand,
1708                                         Type::getInt8PtrTy(Ctx, PtrTypeAS));
1709   PtrVal = Builder.CreateAddrSpaceCast(PtrVal, Type::getInt8PtrTy(Ctx));
1710   Args.push_back(PtrVal);
1711 
1712   // 'expected' argument, if present.
1713   if (CASExpected) {
1714     AllocaCASExpected = AllocaBuilder.CreateAlloca(CASExpected->getType());
1715     AllocaCASExpected->setAlignment(MaybeAlign(AllocaAlignment));
1716     unsigned AllocaAS =  AllocaCASExpected->getType()->getPointerAddressSpace();
1717 
1718     AllocaCASExpected_i8 =
1719       Builder.CreateBitCast(AllocaCASExpected,
1720                             Type::getInt8PtrTy(Ctx, AllocaAS));
1721     Builder.CreateLifetimeStart(AllocaCASExpected_i8, SizeVal64);
1722     Builder.CreateAlignedStore(CASExpected, AllocaCASExpected, AllocaAlignment);
1723     Args.push_back(AllocaCASExpected_i8);
1724   }
1725 
1726   // 'val' argument ('desired' for cas), if present.
1727   if (ValueOperand) {
1728     if (UseSizedLibcall) {
1729       Value *IntValue =
1730           Builder.CreateBitOrPointerCast(ValueOperand, SizedIntTy);
1731       Args.push_back(IntValue);
1732     } else {
1733       AllocaValue = AllocaBuilder.CreateAlloca(ValueOperand->getType());
1734       AllocaValue->setAlignment(MaybeAlign(AllocaAlignment));
1735       AllocaValue_i8 =
1736           Builder.CreateBitCast(AllocaValue, Type::getInt8PtrTy(Ctx));
1737       Builder.CreateLifetimeStart(AllocaValue_i8, SizeVal64);
1738       Builder.CreateAlignedStore(ValueOperand, AllocaValue, AllocaAlignment);
1739       Args.push_back(AllocaValue_i8);
1740     }
1741   }
1742 
1743   // 'ret' argument.
1744   if (!CASExpected && HasResult && !UseSizedLibcall) {
1745     AllocaResult = AllocaBuilder.CreateAlloca(I->getType());
1746     AllocaResult->setAlignment(MaybeAlign(AllocaAlignment));
1747     unsigned AllocaAS =  AllocaResult->getType()->getPointerAddressSpace();
1748     AllocaResult_i8 =
1749       Builder.CreateBitCast(AllocaResult, Type::getInt8PtrTy(Ctx, AllocaAS));
1750     Builder.CreateLifetimeStart(AllocaResult_i8, SizeVal64);
1751     Args.push_back(AllocaResult_i8);
1752   }
1753 
1754   // 'ordering' ('success_order' for cas) argument.
1755   Args.push_back(OrderingVal);
1756 
1757   // 'failure_order' argument, if present.
1758   if (Ordering2Val)
1759     Args.push_back(Ordering2Val);
1760 
1761   // Now, the return type.
1762   if (CASExpected) {
1763     ResultTy = Type::getInt1Ty(Ctx);
1764     Attr = Attr.addAttribute(Ctx, AttributeList::ReturnIndex, Attribute::ZExt);
1765   } else if (HasResult && UseSizedLibcall)
1766     ResultTy = SizedIntTy;
1767   else
1768     ResultTy = Type::getVoidTy(Ctx);
1769 
1770   // Done with setting up arguments and return types, create the call:
1771   SmallVector<Type *, 6> ArgTys;
1772   for (Value *Arg : Args)
1773     ArgTys.push_back(Arg->getType());
1774   FunctionType *FnType = FunctionType::get(ResultTy, ArgTys, false);
1775   FunctionCallee LibcallFn =
1776       M->getOrInsertFunction(TLI->getLibcallName(RTLibType), FnType, Attr);
1777   CallInst *Call = Builder.CreateCall(LibcallFn, Args);
1778   Call->setAttributes(Attr);
1779   Value *Result = Call;
1780 
1781   // And then, extract the results...
1782   if (ValueOperand && !UseSizedLibcall)
1783     Builder.CreateLifetimeEnd(AllocaValue_i8, SizeVal64);
1784 
1785   if (CASExpected) {
1786     // The final result from the CAS is {load of 'expected' alloca, bool result
1787     // from call}
1788     Type *FinalResultTy = I->getType();
1789     Value *V = UndefValue::get(FinalResultTy);
1790     Value *ExpectedOut = Builder.CreateAlignedLoad(
1791         CASExpected->getType(), AllocaCASExpected, AllocaAlignment);
1792     Builder.CreateLifetimeEnd(AllocaCASExpected_i8, SizeVal64);
1793     V = Builder.CreateInsertValue(V, ExpectedOut, 0);
1794     V = Builder.CreateInsertValue(V, Result, 1);
1795     I->replaceAllUsesWith(V);
1796   } else if (HasResult) {
1797     Value *V;
1798     if (UseSizedLibcall)
1799       V = Builder.CreateBitOrPointerCast(Result, I->getType());
1800     else {
1801       V = Builder.CreateAlignedLoad(I->getType(), AllocaResult,
1802                                     AllocaAlignment);
1803       Builder.CreateLifetimeEnd(AllocaResult_i8, SizeVal64);
1804     }
1805     I->replaceAllUsesWith(V);
1806   }
1807   I->eraseFromParent();
1808   return true;
1809 }
1810