1f4a2713aSLionel Sambuc //===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
2f4a2713aSLionel Sambuc //
3f4a2713aSLionel Sambuc //                     The LLVM Compiler Infrastructure
4f4a2713aSLionel Sambuc //
5f4a2713aSLionel Sambuc // This file is distributed under the University of Illinois Open Source
6f4a2713aSLionel Sambuc // License. See LICENSE.TXT for details.
7f4a2713aSLionel Sambuc //
8f4a2713aSLionel Sambuc //===----------------------------------------------------------------------===//
9f4a2713aSLionel Sambuc //
10f4a2713aSLionel Sambuc // This file contains the code for emitting atomic operations.
11f4a2713aSLionel Sambuc //
12f4a2713aSLionel Sambuc //===----------------------------------------------------------------------===//
13f4a2713aSLionel Sambuc 
14f4a2713aSLionel Sambuc #include "CodeGenFunction.h"
15f4a2713aSLionel Sambuc #include "CGCall.h"
16f4a2713aSLionel Sambuc #include "CodeGenModule.h"
17f4a2713aSLionel Sambuc #include "clang/AST/ASTContext.h"
18f4a2713aSLionel Sambuc #include "clang/CodeGen/CGFunctionInfo.h"
19f4a2713aSLionel Sambuc #include "llvm/ADT/StringExtras.h"
20f4a2713aSLionel Sambuc #include "llvm/IR/DataLayout.h"
21f4a2713aSLionel Sambuc #include "llvm/IR/Intrinsics.h"
22f4a2713aSLionel Sambuc #include "llvm/IR/Operator.h"
23f4a2713aSLionel Sambuc 
24f4a2713aSLionel Sambuc using namespace clang;
25f4a2713aSLionel Sambuc using namespace CodeGen;
26f4a2713aSLionel Sambuc 
27f4a2713aSLionel Sambuc namespace {
28f4a2713aSLionel Sambuc   class AtomicInfo {
29f4a2713aSLionel Sambuc     CodeGenFunction &CGF;
30f4a2713aSLionel Sambuc     QualType AtomicTy;
31f4a2713aSLionel Sambuc     QualType ValueTy;
32f4a2713aSLionel Sambuc     uint64_t AtomicSizeInBits;
33f4a2713aSLionel Sambuc     uint64_t ValueSizeInBits;
34f4a2713aSLionel Sambuc     CharUnits AtomicAlign;
35f4a2713aSLionel Sambuc     CharUnits ValueAlign;
36f4a2713aSLionel Sambuc     CharUnits LValueAlign;
37f4a2713aSLionel Sambuc     TypeEvaluationKind EvaluationKind;
38f4a2713aSLionel Sambuc     bool UseLibcall;
39f4a2713aSLionel Sambuc   public:
AtomicInfo(CodeGenFunction & CGF,LValue & lvalue)40f4a2713aSLionel Sambuc     AtomicInfo(CodeGenFunction &CGF, LValue &lvalue) : CGF(CGF) {
41f4a2713aSLionel Sambuc       assert(lvalue.isSimple());
42f4a2713aSLionel Sambuc 
43f4a2713aSLionel Sambuc       AtomicTy = lvalue.getType();
44f4a2713aSLionel Sambuc       ValueTy = AtomicTy->castAs<AtomicType>()->getValueType();
45f4a2713aSLionel Sambuc       EvaluationKind = CGF.getEvaluationKind(ValueTy);
46f4a2713aSLionel Sambuc 
47f4a2713aSLionel Sambuc       ASTContext &C = CGF.getContext();
48f4a2713aSLionel Sambuc 
49*0a6a1f1dSLionel Sambuc       uint64_t ValueAlignInBits;
50*0a6a1f1dSLionel Sambuc       uint64_t AtomicAlignInBits;
51*0a6a1f1dSLionel Sambuc       TypeInfo ValueTI = C.getTypeInfo(ValueTy);
52*0a6a1f1dSLionel Sambuc       ValueSizeInBits = ValueTI.Width;
53*0a6a1f1dSLionel Sambuc       ValueAlignInBits = ValueTI.Align;
54f4a2713aSLionel Sambuc 
55*0a6a1f1dSLionel Sambuc       TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
56*0a6a1f1dSLionel Sambuc       AtomicSizeInBits = AtomicTI.Width;
57*0a6a1f1dSLionel Sambuc       AtomicAlignInBits = AtomicTI.Align;
58f4a2713aSLionel Sambuc 
59f4a2713aSLionel Sambuc       assert(ValueSizeInBits <= AtomicSizeInBits);
60*0a6a1f1dSLionel Sambuc       assert(ValueAlignInBits <= AtomicAlignInBits);
61f4a2713aSLionel Sambuc 
62*0a6a1f1dSLionel Sambuc       AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
63*0a6a1f1dSLionel Sambuc       ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
64f4a2713aSLionel Sambuc       if (lvalue.getAlignment().isZero())
65f4a2713aSLionel Sambuc         lvalue.setAlignment(AtomicAlign);
66f4a2713aSLionel Sambuc 
67*0a6a1f1dSLionel Sambuc       UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(
68*0a6a1f1dSLionel Sambuc           AtomicSizeInBits, C.toBits(lvalue.getAlignment()));
69f4a2713aSLionel Sambuc     }
70f4a2713aSLionel Sambuc 
getAtomicType() const71f4a2713aSLionel Sambuc     QualType getAtomicType() const { return AtomicTy; }
getValueType() const72f4a2713aSLionel Sambuc     QualType getValueType() const { return ValueTy; }
getAtomicAlignment() const73f4a2713aSLionel Sambuc     CharUnits getAtomicAlignment() const { return AtomicAlign; }
getValueAlignment() const74f4a2713aSLionel Sambuc     CharUnits getValueAlignment() const { return ValueAlign; }
getAtomicSizeInBits() const75f4a2713aSLionel Sambuc     uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
getValueSizeInBits() const76*0a6a1f1dSLionel Sambuc     uint64_t getValueSizeInBits() const { return ValueSizeInBits; }
getEvaluationKind() const77f4a2713aSLionel Sambuc     TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
shouldUseLibcall() const78f4a2713aSLionel Sambuc     bool shouldUseLibcall() const { return UseLibcall; }
79f4a2713aSLionel Sambuc 
80f4a2713aSLionel Sambuc     /// Is the atomic size larger than the underlying value type?
81f4a2713aSLionel Sambuc     ///
82f4a2713aSLionel Sambuc     /// Note that the absence of padding does not mean that atomic
83f4a2713aSLionel Sambuc     /// objects are completely interchangeable with non-atomic
84f4a2713aSLionel Sambuc     /// objects: we might have promoted the alignment of a type
85f4a2713aSLionel Sambuc     /// without making it bigger.
hasPadding() const86f4a2713aSLionel Sambuc     bool hasPadding() const {
87f4a2713aSLionel Sambuc       return (ValueSizeInBits != AtomicSizeInBits);
88f4a2713aSLionel Sambuc     }
89f4a2713aSLionel Sambuc 
90f4a2713aSLionel Sambuc     bool emitMemSetZeroIfNecessary(LValue dest) const;
91f4a2713aSLionel Sambuc 
getAtomicSizeValue() const92f4a2713aSLionel Sambuc     llvm::Value *getAtomicSizeValue() const {
93f4a2713aSLionel Sambuc       CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
94f4a2713aSLionel Sambuc       return CGF.CGM.getSize(size);
95f4a2713aSLionel Sambuc     }
96f4a2713aSLionel Sambuc 
97f4a2713aSLionel Sambuc     /// Cast the given pointer to an integer pointer suitable for
98f4a2713aSLionel Sambuc     /// atomic operations.
99f4a2713aSLionel Sambuc     llvm::Value *emitCastToAtomicIntPointer(llvm::Value *addr) const;
100f4a2713aSLionel Sambuc 
101f4a2713aSLionel Sambuc     /// Turn an atomic-layout object into an r-value.
102f4a2713aSLionel Sambuc     RValue convertTempToRValue(llvm::Value *addr,
103f4a2713aSLionel Sambuc                                AggValueSlot resultSlot,
104f4a2713aSLionel Sambuc                                SourceLocation loc) const;
105f4a2713aSLionel Sambuc 
106*0a6a1f1dSLionel Sambuc     /// \brief Converts a rvalue to integer value.
107*0a6a1f1dSLionel Sambuc     llvm::Value *convertRValueToInt(RValue RVal) const;
108*0a6a1f1dSLionel Sambuc 
109*0a6a1f1dSLionel Sambuc     RValue convertIntToValue(llvm::Value *IntVal, AggValueSlot ResultSlot,
110*0a6a1f1dSLionel Sambuc                              SourceLocation Loc) const;
111*0a6a1f1dSLionel Sambuc 
112f4a2713aSLionel Sambuc     /// Copy an atomic r-value into atomic-layout memory.
113f4a2713aSLionel Sambuc     void emitCopyIntoMemory(RValue rvalue, LValue lvalue) const;
114f4a2713aSLionel Sambuc 
115f4a2713aSLionel Sambuc     /// Project an l-value down to the value field.
projectValue(LValue lvalue) const116f4a2713aSLionel Sambuc     LValue projectValue(LValue lvalue) const {
117f4a2713aSLionel Sambuc       llvm::Value *addr = lvalue.getAddress();
118f4a2713aSLionel Sambuc       if (hasPadding())
119f4a2713aSLionel Sambuc         addr = CGF.Builder.CreateStructGEP(addr, 0);
120f4a2713aSLionel Sambuc 
121f4a2713aSLionel Sambuc       return LValue::MakeAddr(addr, getValueType(), lvalue.getAlignment(),
122f4a2713aSLionel Sambuc                               CGF.getContext(), lvalue.getTBAAInfo());
123f4a2713aSLionel Sambuc     }
124f4a2713aSLionel Sambuc 
125f4a2713aSLionel Sambuc     /// Materialize an atomic r-value in atomic-layout memory.
126f4a2713aSLionel Sambuc     llvm::Value *materializeRValue(RValue rvalue) const;
127f4a2713aSLionel Sambuc 
128f4a2713aSLionel Sambuc   private:
129f4a2713aSLionel Sambuc     bool requiresMemSetZero(llvm::Type *type) const;
130f4a2713aSLionel Sambuc   };
131f4a2713aSLionel Sambuc }
132f4a2713aSLionel Sambuc 
emitAtomicLibcall(CodeGenFunction & CGF,StringRef fnName,QualType resultType,CallArgList & args)133f4a2713aSLionel Sambuc static RValue emitAtomicLibcall(CodeGenFunction &CGF,
134f4a2713aSLionel Sambuc                                 StringRef fnName,
135f4a2713aSLionel Sambuc                                 QualType resultType,
136f4a2713aSLionel Sambuc                                 CallArgList &args) {
137f4a2713aSLionel Sambuc   const CGFunctionInfo &fnInfo =
138f4a2713aSLionel Sambuc     CGF.CGM.getTypes().arrangeFreeFunctionCall(resultType, args,
139f4a2713aSLionel Sambuc             FunctionType::ExtInfo(), RequiredArgs::All);
140f4a2713aSLionel Sambuc   llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
141f4a2713aSLionel Sambuc   llvm::Constant *fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName);
142f4a2713aSLionel Sambuc   return CGF.EmitCall(fnInfo, fn, ReturnValueSlot(), args);
143f4a2713aSLionel Sambuc }
144f4a2713aSLionel Sambuc 
145f4a2713aSLionel Sambuc /// Does a store of the given IR type modify the full expected width?
isFullSizeType(CodeGenModule & CGM,llvm::Type * type,uint64_t expectedSize)146f4a2713aSLionel Sambuc static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
147f4a2713aSLionel Sambuc                            uint64_t expectedSize) {
148f4a2713aSLionel Sambuc   return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
149f4a2713aSLionel Sambuc }
150f4a2713aSLionel Sambuc 
151f4a2713aSLionel Sambuc /// Does the atomic type require memsetting to zero before initialization?
152f4a2713aSLionel Sambuc ///
153f4a2713aSLionel Sambuc /// The IR type is provided as a way of making certain queries faster.
requiresMemSetZero(llvm::Type * type) const154f4a2713aSLionel Sambuc bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
155f4a2713aSLionel Sambuc   // If the atomic type has size padding, we definitely need a memset.
156f4a2713aSLionel Sambuc   if (hasPadding()) return true;
157f4a2713aSLionel Sambuc 
158f4a2713aSLionel Sambuc   // Otherwise, do some simple heuristics to try to avoid it:
159f4a2713aSLionel Sambuc   switch (getEvaluationKind()) {
160f4a2713aSLionel Sambuc   // For scalars and complexes, check whether the store size of the
161f4a2713aSLionel Sambuc   // type uses the full size.
162f4a2713aSLionel Sambuc   case TEK_Scalar:
163f4a2713aSLionel Sambuc     return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
164f4a2713aSLionel Sambuc   case TEK_Complex:
165f4a2713aSLionel Sambuc     return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
166f4a2713aSLionel Sambuc                            AtomicSizeInBits / 2);
167f4a2713aSLionel Sambuc 
168f4a2713aSLionel Sambuc   // Padding in structs has an undefined bit pattern.  User beware.
169f4a2713aSLionel Sambuc   case TEK_Aggregate:
170f4a2713aSLionel Sambuc     return false;
171f4a2713aSLionel Sambuc   }
172f4a2713aSLionel Sambuc   llvm_unreachable("bad evaluation kind");
173f4a2713aSLionel Sambuc }
174f4a2713aSLionel Sambuc 
emitMemSetZeroIfNecessary(LValue dest) const175f4a2713aSLionel Sambuc bool AtomicInfo::emitMemSetZeroIfNecessary(LValue dest) const {
176f4a2713aSLionel Sambuc   llvm::Value *addr = dest.getAddress();
177f4a2713aSLionel Sambuc   if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
178f4a2713aSLionel Sambuc     return false;
179f4a2713aSLionel Sambuc 
180f4a2713aSLionel Sambuc   CGF.Builder.CreateMemSet(addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
181f4a2713aSLionel Sambuc                            AtomicSizeInBits / 8,
182f4a2713aSLionel Sambuc                            dest.getAlignment().getQuantity());
183f4a2713aSLionel Sambuc   return true;
184f4a2713aSLionel Sambuc }
185f4a2713aSLionel Sambuc 
emitAtomicCmpXchg(CodeGenFunction & CGF,AtomicExpr * E,bool IsWeak,llvm::Value * Dest,llvm::Value * Ptr,llvm::Value * Val1,llvm::Value * Val2,uint64_t Size,unsigned Align,llvm::AtomicOrdering SuccessOrder,llvm::AtomicOrdering FailureOrder)186*0a6a1f1dSLionel Sambuc static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
187*0a6a1f1dSLionel Sambuc                               llvm::Value *Dest, llvm::Value *Ptr,
188*0a6a1f1dSLionel Sambuc                               llvm::Value *Val1, llvm::Value *Val2,
189*0a6a1f1dSLionel Sambuc                               uint64_t Size, unsigned Align,
190*0a6a1f1dSLionel Sambuc                               llvm::AtomicOrdering SuccessOrder,
191*0a6a1f1dSLionel Sambuc                               llvm::AtomicOrdering FailureOrder) {
192*0a6a1f1dSLionel Sambuc   // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
193*0a6a1f1dSLionel Sambuc   llvm::LoadInst *Expected = CGF.Builder.CreateLoad(Val1);
194*0a6a1f1dSLionel Sambuc   Expected->setAlignment(Align);
195*0a6a1f1dSLionel Sambuc   llvm::LoadInst *Desired = CGF.Builder.CreateLoad(Val2);
196*0a6a1f1dSLionel Sambuc   Desired->setAlignment(Align);
197*0a6a1f1dSLionel Sambuc 
198*0a6a1f1dSLionel Sambuc   llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
199*0a6a1f1dSLionel Sambuc       Ptr, Expected, Desired, SuccessOrder, FailureOrder);
200*0a6a1f1dSLionel Sambuc   Pair->setVolatile(E->isVolatile());
201*0a6a1f1dSLionel Sambuc   Pair->setWeak(IsWeak);
202*0a6a1f1dSLionel Sambuc 
203*0a6a1f1dSLionel Sambuc   // Cmp holds the result of the compare-exchange operation: true on success,
204*0a6a1f1dSLionel Sambuc   // false on failure.
205*0a6a1f1dSLionel Sambuc   llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0);
206*0a6a1f1dSLionel Sambuc   llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1);
207*0a6a1f1dSLionel Sambuc 
208*0a6a1f1dSLionel Sambuc   // This basic block is used to hold the store instruction if the operation
209*0a6a1f1dSLionel Sambuc   // failed.
210*0a6a1f1dSLionel Sambuc   llvm::BasicBlock *StoreExpectedBB =
211*0a6a1f1dSLionel Sambuc       CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
212*0a6a1f1dSLionel Sambuc 
213*0a6a1f1dSLionel Sambuc   // This basic block is the exit point of the operation, we should end up
214*0a6a1f1dSLionel Sambuc   // here regardless of whether or not the operation succeeded.
215*0a6a1f1dSLionel Sambuc   llvm::BasicBlock *ContinueBB =
216*0a6a1f1dSLionel Sambuc       CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
217*0a6a1f1dSLionel Sambuc 
218*0a6a1f1dSLionel Sambuc   // Update Expected if Expected isn't equal to Old, otherwise branch to the
219*0a6a1f1dSLionel Sambuc   // exit point.
220*0a6a1f1dSLionel Sambuc   CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
221*0a6a1f1dSLionel Sambuc 
222*0a6a1f1dSLionel Sambuc   CGF.Builder.SetInsertPoint(StoreExpectedBB);
223*0a6a1f1dSLionel Sambuc   // Update the memory at Expected with Old's value.
224*0a6a1f1dSLionel Sambuc   llvm::StoreInst *StoreExpected = CGF.Builder.CreateStore(Old, Val1);
225*0a6a1f1dSLionel Sambuc   StoreExpected->setAlignment(Align);
226*0a6a1f1dSLionel Sambuc   // Finally, branch to the exit point.
227*0a6a1f1dSLionel Sambuc   CGF.Builder.CreateBr(ContinueBB);
228*0a6a1f1dSLionel Sambuc 
229*0a6a1f1dSLionel Sambuc   CGF.Builder.SetInsertPoint(ContinueBB);
230*0a6a1f1dSLionel Sambuc   // Update the memory at Dest with Cmp's value.
231*0a6a1f1dSLionel Sambuc   CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
232*0a6a1f1dSLionel Sambuc   return;
233*0a6a1f1dSLionel Sambuc }
234*0a6a1f1dSLionel Sambuc 
235*0a6a1f1dSLionel Sambuc /// Given an ordering required on success, emit all possible cmpxchg
236*0a6a1f1dSLionel Sambuc /// instructions to cope with the provided (but possibly only dynamically known)
237*0a6a1f1dSLionel Sambuc /// FailureOrder.
emitAtomicCmpXchgFailureSet(CodeGenFunction & CGF,AtomicExpr * E,bool IsWeak,llvm::Value * Dest,llvm::Value * Ptr,llvm::Value * Val1,llvm::Value * Val2,llvm::Value * FailureOrderVal,uint64_t Size,unsigned Align,llvm::AtomicOrdering SuccessOrder)238*0a6a1f1dSLionel Sambuc static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
239*0a6a1f1dSLionel Sambuc                                         bool IsWeak, llvm::Value *Dest,
240*0a6a1f1dSLionel Sambuc                                         llvm::Value *Ptr, llvm::Value *Val1,
241*0a6a1f1dSLionel Sambuc                                         llvm::Value *Val2,
242*0a6a1f1dSLionel Sambuc                                         llvm::Value *FailureOrderVal,
243*0a6a1f1dSLionel Sambuc                                         uint64_t Size, unsigned Align,
244*0a6a1f1dSLionel Sambuc                                         llvm::AtomicOrdering SuccessOrder) {
245*0a6a1f1dSLionel Sambuc   llvm::AtomicOrdering FailureOrder;
246*0a6a1f1dSLionel Sambuc   if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
247*0a6a1f1dSLionel Sambuc     switch (FO->getSExtValue()) {
248*0a6a1f1dSLionel Sambuc     default:
249*0a6a1f1dSLionel Sambuc       FailureOrder = llvm::Monotonic;
250*0a6a1f1dSLionel Sambuc       break;
251*0a6a1f1dSLionel Sambuc     case AtomicExpr::AO_ABI_memory_order_consume:
252*0a6a1f1dSLionel Sambuc     case AtomicExpr::AO_ABI_memory_order_acquire:
253*0a6a1f1dSLionel Sambuc       FailureOrder = llvm::Acquire;
254*0a6a1f1dSLionel Sambuc       break;
255*0a6a1f1dSLionel Sambuc     case AtomicExpr::AO_ABI_memory_order_seq_cst:
256*0a6a1f1dSLionel Sambuc       FailureOrder = llvm::SequentiallyConsistent;
257*0a6a1f1dSLionel Sambuc       break;
258*0a6a1f1dSLionel Sambuc     }
259*0a6a1f1dSLionel Sambuc     if (FailureOrder >= SuccessOrder) {
260*0a6a1f1dSLionel Sambuc       // Don't assert on undefined behaviour.
261*0a6a1f1dSLionel Sambuc       FailureOrder =
262*0a6a1f1dSLionel Sambuc         llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
263*0a6a1f1dSLionel Sambuc     }
264*0a6a1f1dSLionel Sambuc     emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, Align,
265*0a6a1f1dSLionel Sambuc                       SuccessOrder, FailureOrder);
266*0a6a1f1dSLionel Sambuc     return;
267*0a6a1f1dSLionel Sambuc   }
268*0a6a1f1dSLionel Sambuc 
269*0a6a1f1dSLionel Sambuc   // Create all the relevant BB's
270*0a6a1f1dSLionel Sambuc   llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
271*0a6a1f1dSLionel Sambuc                    *SeqCstBB = nullptr;
272*0a6a1f1dSLionel Sambuc   MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
273*0a6a1f1dSLionel Sambuc   if (SuccessOrder != llvm::Monotonic && SuccessOrder != llvm::Release)
274*0a6a1f1dSLionel Sambuc     AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
275*0a6a1f1dSLionel Sambuc   if (SuccessOrder == llvm::SequentiallyConsistent)
276*0a6a1f1dSLionel Sambuc     SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
277*0a6a1f1dSLionel Sambuc 
278*0a6a1f1dSLionel Sambuc   llvm::BasicBlock *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
279*0a6a1f1dSLionel Sambuc 
280*0a6a1f1dSLionel Sambuc   llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
281*0a6a1f1dSLionel Sambuc 
282*0a6a1f1dSLionel Sambuc   // Emit all the different atomics
283*0a6a1f1dSLionel Sambuc 
284*0a6a1f1dSLionel Sambuc   // MonotonicBB is arbitrarily chosen as the default case; in practice, this
285*0a6a1f1dSLionel Sambuc   // doesn't matter unless someone is crazy enough to use something that
286*0a6a1f1dSLionel Sambuc   // doesn't fold to a constant for the ordering.
287*0a6a1f1dSLionel Sambuc   CGF.Builder.SetInsertPoint(MonotonicBB);
288*0a6a1f1dSLionel Sambuc   emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
289*0a6a1f1dSLionel Sambuc                     Size, Align, SuccessOrder, llvm::Monotonic);
290*0a6a1f1dSLionel Sambuc   CGF.Builder.CreateBr(ContBB);
291*0a6a1f1dSLionel Sambuc 
292*0a6a1f1dSLionel Sambuc   if (AcquireBB) {
293*0a6a1f1dSLionel Sambuc     CGF.Builder.SetInsertPoint(AcquireBB);
294*0a6a1f1dSLionel Sambuc     emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
295*0a6a1f1dSLionel Sambuc                       Size, Align, SuccessOrder, llvm::Acquire);
296*0a6a1f1dSLionel Sambuc     CGF.Builder.CreateBr(ContBB);
297*0a6a1f1dSLionel Sambuc     SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
298*0a6a1f1dSLionel Sambuc                 AcquireBB);
299*0a6a1f1dSLionel Sambuc     SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire),
300*0a6a1f1dSLionel Sambuc                 AcquireBB);
301*0a6a1f1dSLionel Sambuc   }
302*0a6a1f1dSLionel Sambuc   if (SeqCstBB) {
303*0a6a1f1dSLionel Sambuc     CGF.Builder.SetInsertPoint(SeqCstBB);
304*0a6a1f1dSLionel Sambuc     emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
305*0a6a1f1dSLionel Sambuc                       Size, Align, SuccessOrder, llvm::SequentiallyConsistent);
306*0a6a1f1dSLionel Sambuc     CGF.Builder.CreateBr(ContBB);
307*0a6a1f1dSLionel Sambuc     SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
308*0a6a1f1dSLionel Sambuc                 SeqCstBB);
309*0a6a1f1dSLionel Sambuc   }
310*0a6a1f1dSLionel Sambuc 
311*0a6a1f1dSLionel Sambuc   CGF.Builder.SetInsertPoint(ContBB);
312*0a6a1f1dSLionel Sambuc }
313*0a6a1f1dSLionel Sambuc 
EmitAtomicOp(CodeGenFunction & CGF,AtomicExpr * E,llvm::Value * Dest,llvm::Value * Ptr,llvm::Value * Val1,llvm::Value * Val2,llvm::Value * IsWeak,llvm::Value * FailureOrder,uint64_t Size,unsigned Align,llvm::AtomicOrdering Order)314*0a6a1f1dSLionel Sambuc static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
315f4a2713aSLionel Sambuc                          llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
316*0a6a1f1dSLionel Sambuc                          llvm::Value *IsWeak, llvm::Value *FailureOrder,
317*0a6a1f1dSLionel Sambuc                          uint64_t Size, unsigned Align,
318*0a6a1f1dSLionel Sambuc                          llvm::AtomicOrdering Order) {
319f4a2713aSLionel Sambuc   llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
320f4a2713aSLionel Sambuc   llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
321f4a2713aSLionel Sambuc 
322f4a2713aSLionel Sambuc   switch (E->getOp()) {
323f4a2713aSLionel Sambuc   case AtomicExpr::AO__c11_atomic_init:
324f4a2713aSLionel Sambuc     llvm_unreachable("Already handled!");
325f4a2713aSLionel Sambuc 
326f4a2713aSLionel Sambuc   case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
327*0a6a1f1dSLionel Sambuc     emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
328*0a6a1f1dSLionel Sambuc                                 FailureOrder, Size, Align, Order);
329*0a6a1f1dSLionel Sambuc     return;
330f4a2713aSLionel Sambuc   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
331*0a6a1f1dSLionel Sambuc     emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
332*0a6a1f1dSLionel Sambuc                                 FailureOrder, Size, Align, Order);
333*0a6a1f1dSLionel Sambuc     return;
334f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_compare_exchange:
335f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_compare_exchange_n: {
336*0a6a1f1dSLionel Sambuc     if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
337*0a6a1f1dSLionel Sambuc       emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
338*0a6a1f1dSLionel Sambuc                                   Val1, Val2, FailureOrder, Size, Align, Order);
339*0a6a1f1dSLionel Sambuc     } else {
340*0a6a1f1dSLionel Sambuc       // Create all the relevant BB's
341*0a6a1f1dSLionel Sambuc       llvm::BasicBlock *StrongBB =
342*0a6a1f1dSLionel Sambuc           CGF.createBasicBlock("cmpxchg.strong", CGF.CurFn);
343*0a6a1f1dSLionel Sambuc       llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak", CGF.CurFn);
344*0a6a1f1dSLionel Sambuc       llvm::BasicBlock *ContBB =
345*0a6a1f1dSLionel Sambuc           CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
346*0a6a1f1dSLionel Sambuc 
347*0a6a1f1dSLionel Sambuc       llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB);
348*0a6a1f1dSLionel Sambuc       SI->addCase(CGF.Builder.getInt1(false), StrongBB);
349*0a6a1f1dSLionel Sambuc 
350*0a6a1f1dSLionel Sambuc       CGF.Builder.SetInsertPoint(StrongBB);
351*0a6a1f1dSLionel Sambuc       emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
352*0a6a1f1dSLionel Sambuc                                   FailureOrder, Size, Align, Order);
353*0a6a1f1dSLionel Sambuc       CGF.Builder.CreateBr(ContBB);
354*0a6a1f1dSLionel Sambuc 
355*0a6a1f1dSLionel Sambuc       CGF.Builder.SetInsertPoint(WeakBB);
356*0a6a1f1dSLionel Sambuc       emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
357*0a6a1f1dSLionel Sambuc                                   FailureOrder, Size, Align, Order);
358*0a6a1f1dSLionel Sambuc       CGF.Builder.CreateBr(ContBB);
359*0a6a1f1dSLionel Sambuc 
360*0a6a1f1dSLionel Sambuc       CGF.Builder.SetInsertPoint(ContBB);
361*0a6a1f1dSLionel Sambuc     }
362f4a2713aSLionel Sambuc     return;
363f4a2713aSLionel Sambuc   }
364f4a2713aSLionel Sambuc   case AtomicExpr::AO__c11_atomic_load:
365f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_load_n:
366f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_load: {
367f4a2713aSLionel Sambuc     llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
368f4a2713aSLionel Sambuc     Load->setAtomic(Order);
369f4a2713aSLionel Sambuc     Load->setAlignment(Size);
370f4a2713aSLionel Sambuc     Load->setVolatile(E->isVolatile());
371f4a2713aSLionel Sambuc     llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest);
372f4a2713aSLionel Sambuc     StoreDest->setAlignment(Align);
373f4a2713aSLionel Sambuc     return;
374f4a2713aSLionel Sambuc   }
375f4a2713aSLionel Sambuc 
376f4a2713aSLionel Sambuc   case AtomicExpr::AO__c11_atomic_store:
377f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_store:
378f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_store_n: {
379f4a2713aSLionel Sambuc     assert(!Dest && "Store does not return a value");
380f4a2713aSLionel Sambuc     llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
381f4a2713aSLionel Sambuc     LoadVal1->setAlignment(Align);
382f4a2713aSLionel Sambuc     llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
383f4a2713aSLionel Sambuc     Store->setAtomic(Order);
384f4a2713aSLionel Sambuc     Store->setAlignment(Size);
385f4a2713aSLionel Sambuc     Store->setVolatile(E->isVolatile());
386f4a2713aSLionel Sambuc     return;
387f4a2713aSLionel Sambuc   }
388f4a2713aSLionel Sambuc 
389f4a2713aSLionel Sambuc   case AtomicExpr::AO__c11_atomic_exchange:
390f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_exchange_n:
391f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_exchange:
392f4a2713aSLionel Sambuc     Op = llvm::AtomicRMWInst::Xchg;
393f4a2713aSLionel Sambuc     break;
394f4a2713aSLionel Sambuc 
395f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_add_fetch:
396f4a2713aSLionel Sambuc     PostOp = llvm::Instruction::Add;
397f4a2713aSLionel Sambuc     // Fall through.
398f4a2713aSLionel Sambuc   case AtomicExpr::AO__c11_atomic_fetch_add:
399f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_fetch_add:
400f4a2713aSLionel Sambuc     Op = llvm::AtomicRMWInst::Add;
401f4a2713aSLionel Sambuc     break;
402f4a2713aSLionel Sambuc 
403f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_sub_fetch:
404f4a2713aSLionel Sambuc     PostOp = llvm::Instruction::Sub;
405f4a2713aSLionel Sambuc     // Fall through.
406f4a2713aSLionel Sambuc   case AtomicExpr::AO__c11_atomic_fetch_sub:
407f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_fetch_sub:
408f4a2713aSLionel Sambuc     Op = llvm::AtomicRMWInst::Sub;
409f4a2713aSLionel Sambuc     break;
410f4a2713aSLionel Sambuc 
411f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_and_fetch:
412f4a2713aSLionel Sambuc     PostOp = llvm::Instruction::And;
413f4a2713aSLionel Sambuc     // Fall through.
414f4a2713aSLionel Sambuc   case AtomicExpr::AO__c11_atomic_fetch_and:
415f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_fetch_and:
416f4a2713aSLionel Sambuc     Op = llvm::AtomicRMWInst::And;
417f4a2713aSLionel Sambuc     break;
418f4a2713aSLionel Sambuc 
419f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_or_fetch:
420f4a2713aSLionel Sambuc     PostOp = llvm::Instruction::Or;
421f4a2713aSLionel Sambuc     // Fall through.
422f4a2713aSLionel Sambuc   case AtomicExpr::AO__c11_atomic_fetch_or:
423f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_fetch_or:
424f4a2713aSLionel Sambuc     Op = llvm::AtomicRMWInst::Or;
425f4a2713aSLionel Sambuc     break;
426f4a2713aSLionel Sambuc 
427f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_xor_fetch:
428f4a2713aSLionel Sambuc     PostOp = llvm::Instruction::Xor;
429f4a2713aSLionel Sambuc     // Fall through.
430f4a2713aSLionel Sambuc   case AtomicExpr::AO__c11_atomic_fetch_xor:
431f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_fetch_xor:
432f4a2713aSLionel Sambuc     Op = llvm::AtomicRMWInst::Xor;
433f4a2713aSLionel Sambuc     break;
434f4a2713aSLionel Sambuc 
435f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_nand_fetch:
436f4a2713aSLionel Sambuc     PostOp = llvm::Instruction::And;
437f4a2713aSLionel Sambuc     // Fall through.
438f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_fetch_nand:
439f4a2713aSLionel Sambuc     Op = llvm::AtomicRMWInst::Nand;
440f4a2713aSLionel Sambuc     break;
441f4a2713aSLionel Sambuc   }
442f4a2713aSLionel Sambuc 
443f4a2713aSLionel Sambuc   llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
444f4a2713aSLionel Sambuc   LoadVal1->setAlignment(Align);
445f4a2713aSLionel Sambuc   llvm::AtomicRMWInst *RMWI =
446f4a2713aSLionel Sambuc       CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order);
447f4a2713aSLionel Sambuc   RMWI->setVolatile(E->isVolatile());
448f4a2713aSLionel Sambuc 
449f4a2713aSLionel Sambuc   // For __atomic_*_fetch operations, perform the operation again to
450f4a2713aSLionel Sambuc   // determine the value which was written.
451f4a2713aSLionel Sambuc   llvm::Value *Result = RMWI;
452f4a2713aSLionel Sambuc   if (PostOp)
453f4a2713aSLionel Sambuc     Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
454f4a2713aSLionel Sambuc   if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
455f4a2713aSLionel Sambuc     Result = CGF.Builder.CreateNot(Result);
456f4a2713aSLionel Sambuc   llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest);
457f4a2713aSLionel Sambuc   StoreDest->setAlignment(Align);
458f4a2713aSLionel Sambuc }
459f4a2713aSLionel Sambuc 
460f4a2713aSLionel Sambuc // This function emits any expression (scalar, complex, or aggregate)
461f4a2713aSLionel Sambuc // into a temporary alloca.
462f4a2713aSLionel Sambuc static llvm::Value *
EmitValToTemp(CodeGenFunction & CGF,Expr * E)463f4a2713aSLionel Sambuc EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
464f4a2713aSLionel Sambuc   llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
465f4a2713aSLionel Sambuc   CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
466f4a2713aSLionel Sambuc                        /*Init*/ true);
467f4a2713aSLionel Sambuc   return DeclPtr;
468f4a2713aSLionel Sambuc }
469f4a2713aSLionel Sambuc 
470f4a2713aSLionel Sambuc static void
AddDirectArgument(CodeGenFunction & CGF,CallArgList & Args,bool UseOptimizedLibcall,llvm::Value * Val,QualType ValTy,SourceLocation Loc,CharUnits SizeInChars)471f4a2713aSLionel Sambuc AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
472f4a2713aSLionel Sambuc                   bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
473*0a6a1f1dSLionel Sambuc                   SourceLocation Loc, CharUnits SizeInChars) {
474f4a2713aSLionel Sambuc   if (UseOptimizedLibcall) {
475f4a2713aSLionel Sambuc     // Load value and pass it to the function directly.
476f4a2713aSLionel Sambuc     unsigned Align = CGF.getContext().getTypeAlignInChars(ValTy).getQuantity();
477*0a6a1f1dSLionel Sambuc     int64_t SizeInBits = CGF.getContext().toBits(SizeInChars);
478*0a6a1f1dSLionel Sambuc     ValTy =
479*0a6a1f1dSLionel Sambuc         CGF.getContext().getIntTypeForBitwidth(SizeInBits, /*Signed=*/false);
480*0a6a1f1dSLionel Sambuc     llvm::Type *IPtrTy = llvm::IntegerType::get(CGF.getLLVMContext(),
481*0a6a1f1dSLionel Sambuc                                                 SizeInBits)->getPointerTo();
482*0a6a1f1dSLionel Sambuc     Val = CGF.EmitLoadOfScalar(CGF.Builder.CreateBitCast(Val, IPtrTy), false,
483*0a6a1f1dSLionel Sambuc                                Align, CGF.getContext().getPointerType(ValTy),
484*0a6a1f1dSLionel Sambuc                                Loc);
485*0a6a1f1dSLionel Sambuc     // Coerce the value into an appropriately sized integer type.
486f4a2713aSLionel Sambuc     Args.add(RValue::get(Val), ValTy);
487f4a2713aSLionel Sambuc   } else {
488f4a2713aSLionel Sambuc     // Non-optimized functions always take a reference.
489f4a2713aSLionel Sambuc     Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
490f4a2713aSLionel Sambuc                          CGF.getContext().VoidPtrTy);
491f4a2713aSLionel Sambuc   }
492f4a2713aSLionel Sambuc }
493f4a2713aSLionel Sambuc 
EmitAtomicExpr(AtomicExpr * E,llvm::Value * Dest)494f4a2713aSLionel Sambuc RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
495f4a2713aSLionel Sambuc   QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
496f4a2713aSLionel Sambuc   QualType MemTy = AtomicTy;
497f4a2713aSLionel Sambuc   if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
498f4a2713aSLionel Sambuc     MemTy = AT->getValueType();
499f4a2713aSLionel Sambuc   CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
500f4a2713aSLionel Sambuc   uint64_t Size = sizeChars.getQuantity();
501f4a2713aSLionel Sambuc   CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
502f4a2713aSLionel Sambuc   unsigned Align = alignChars.getQuantity();
503f4a2713aSLionel Sambuc   unsigned MaxInlineWidthInBits =
504f4a2713aSLionel Sambuc     getTarget().getMaxAtomicInlineWidth();
505f4a2713aSLionel Sambuc   bool UseLibcall = (Size != Align ||
506f4a2713aSLionel Sambuc                      getContext().toBits(sizeChars) > MaxInlineWidthInBits);
507f4a2713aSLionel Sambuc 
508*0a6a1f1dSLionel Sambuc   llvm::Value *IsWeak = nullptr, *OrderFail = nullptr, *Val1 = nullptr,
509*0a6a1f1dSLionel Sambuc               *Val2 = nullptr;
510*0a6a1f1dSLionel Sambuc   llvm::Value *Ptr = EmitScalarExpr(E->getPtr());
511f4a2713aSLionel Sambuc 
512f4a2713aSLionel Sambuc   if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
513f4a2713aSLionel Sambuc     assert(!Dest && "Init does not return a value");
514f4a2713aSLionel Sambuc     LValue lvalue = LValue::MakeAddr(Ptr, AtomicTy, alignChars, getContext());
515f4a2713aSLionel Sambuc     EmitAtomicInit(E->getVal1(), lvalue);
516*0a6a1f1dSLionel Sambuc     return RValue::get(nullptr);
517f4a2713aSLionel Sambuc   }
518f4a2713aSLionel Sambuc 
519*0a6a1f1dSLionel Sambuc   llvm::Value *Order = EmitScalarExpr(E->getOrder());
520f4a2713aSLionel Sambuc 
521f4a2713aSLionel Sambuc   switch (E->getOp()) {
522f4a2713aSLionel Sambuc   case AtomicExpr::AO__c11_atomic_init:
523f4a2713aSLionel Sambuc     llvm_unreachable("Already handled!");
524f4a2713aSLionel Sambuc 
525f4a2713aSLionel Sambuc   case AtomicExpr::AO__c11_atomic_load:
526f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_load_n:
527f4a2713aSLionel Sambuc     break;
528f4a2713aSLionel Sambuc 
529f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_load:
530f4a2713aSLionel Sambuc     Dest = EmitScalarExpr(E->getVal1());
531f4a2713aSLionel Sambuc     break;
532f4a2713aSLionel Sambuc 
533f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_store:
534f4a2713aSLionel Sambuc     Val1 = EmitScalarExpr(E->getVal1());
535f4a2713aSLionel Sambuc     break;
536f4a2713aSLionel Sambuc 
537f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_exchange:
538f4a2713aSLionel Sambuc     Val1 = EmitScalarExpr(E->getVal1());
539f4a2713aSLionel Sambuc     Dest = EmitScalarExpr(E->getVal2());
540f4a2713aSLionel Sambuc     break;
541f4a2713aSLionel Sambuc 
542f4a2713aSLionel Sambuc   case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
543f4a2713aSLionel Sambuc   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
544f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_compare_exchange_n:
545f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_compare_exchange:
546f4a2713aSLionel Sambuc     Val1 = EmitScalarExpr(E->getVal1());
547f4a2713aSLionel Sambuc     if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
548f4a2713aSLionel Sambuc       Val2 = EmitScalarExpr(E->getVal2());
549f4a2713aSLionel Sambuc     else
550f4a2713aSLionel Sambuc       Val2 = EmitValToTemp(*this, E->getVal2());
551f4a2713aSLionel Sambuc     OrderFail = EmitScalarExpr(E->getOrderFail());
552f4a2713aSLionel Sambuc     if (E->getNumSubExprs() == 6)
553*0a6a1f1dSLionel Sambuc       IsWeak = EmitScalarExpr(E->getWeak());
554f4a2713aSLionel Sambuc     break;
555f4a2713aSLionel Sambuc 
556f4a2713aSLionel Sambuc   case AtomicExpr::AO__c11_atomic_fetch_add:
557f4a2713aSLionel Sambuc   case AtomicExpr::AO__c11_atomic_fetch_sub:
558f4a2713aSLionel Sambuc     if (MemTy->isPointerType()) {
559f4a2713aSLionel Sambuc       // For pointer arithmetic, we're required to do a bit of math:
560f4a2713aSLionel Sambuc       // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
561f4a2713aSLionel Sambuc       // ... but only for the C11 builtins. The GNU builtins expect the
562f4a2713aSLionel Sambuc       // user to multiply by sizeof(T).
563f4a2713aSLionel Sambuc       QualType Val1Ty = E->getVal1()->getType();
564f4a2713aSLionel Sambuc       llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
565f4a2713aSLionel Sambuc       CharUnits PointeeIncAmt =
566f4a2713aSLionel Sambuc           getContext().getTypeSizeInChars(MemTy->getPointeeType());
567f4a2713aSLionel Sambuc       Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
568f4a2713aSLionel Sambuc       Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
569f4a2713aSLionel Sambuc       EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
570f4a2713aSLionel Sambuc       break;
571f4a2713aSLionel Sambuc     }
572f4a2713aSLionel Sambuc     // Fall through.
573f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_fetch_add:
574f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_fetch_sub:
575f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_add_fetch:
576f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_sub_fetch:
577f4a2713aSLionel Sambuc   case AtomicExpr::AO__c11_atomic_store:
578f4a2713aSLionel Sambuc   case AtomicExpr::AO__c11_atomic_exchange:
579f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_store_n:
580f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_exchange_n:
581f4a2713aSLionel Sambuc   case AtomicExpr::AO__c11_atomic_fetch_and:
582f4a2713aSLionel Sambuc   case AtomicExpr::AO__c11_atomic_fetch_or:
583f4a2713aSLionel Sambuc   case AtomicExpr::AO__c11_atomic_fetch_xor:
584f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_fetch_and:
585f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_fetch_or:
586f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_fetch_xor:
587f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_fetch_nand:
588f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_and_fetch:
589f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_or_fetch:
590f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_xor_fetch:
591f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_nand_fetch:
592f4a2713aSLionel Sambuc     Val1 = EmitValToTemp(*this, E->getVal1());
593f4a2713aSLionel Sambuc     break;
594f4a2713aSLionel Sambuc   }
595f4a2713aSLionel Sambuc 
596*0a6a1f1dSLionel Sambuc   QualType RValTy = E->getType().getUnqualifiedType();
597*0a6a1f1dSLionel Sambuc 
598*0a6a1f1dSLionel Sambuc   auto GetDest = [&] {
599*0a6a1f1dSLionel Sambuc     if (!RValTy->isVoidType() && !Dest) {
600*0a6a1f1dSLionel Sambuc       Dest = CreateMemTemp(RValTy, ".atomicdst");
601*0a6a1f1dSLionel Sambuc     }
602*0a6a1f1dSLionel Sambuc     return Dest;
603*0a6a1f1dSLionel Sambuc   };
604f4a2713aSLionel Sambuc 
605f4a2713aSLionel Sambuc   // Use a library call.  See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
606f4a2713aSLionel Sambuc   if (UseLibcall) {
607f4a2713aSLionel Sambuc     bool UseOptimizedLibcall = false;
608f4a2713aSLionel Sambuc     switch (E->getOp()) {
609f4a2713aSLionel Sambuc     case AtomicExpr::AO__c11_atomic_fetch_add:
610f4a2713aSLionel Sambuc     case AtomicExpr::AO__atomic_fetch_add:
611f4a2713aSLionel Sambuc     case AtomicExpr::AO__c11_atomic_fetch_and:
612f4a2713aSLionel Sambuc     case AtomicExpr::AO__atomic_fetch_and:
613f4a2713aSLionel Sambuc     case AtomicExpr::AO__c11_atomic_fetch_or:
614f4a2713aSLionel Sambuc     case AtomicExpr::AO__atomic_fetch_or:
615f4a2713aSLionel Sambuc     case AtomicExpr::AO__c11_atomic_fetch_sub:
616f4a2713aSLionel Sambuc     case AtomicExpr::AO__atomic_fetch_sub:
617f4a2713aSLionel Sambuc     case AtomicExpr::AO__c11_atomic_fetch_xor:
618f4a2713aSLionel Sambuc     case AtomicExpr::AO__atomic_fetch_xor:
619f4a2713aSLionel Sambuc       // For these, only library calls for certain sizes exist.
620f4a2713aSLionel Sambuc       UseOptimizedLibcall = true;
621f4a2713aSLionel Sambuc       break;
622f4a2713aSLionel Sambuc     default:
623f4a2713aSLionel Sambuc       // Only use optimized library calls for sizes for which they exist.
624f4a2713aSLionel Sambuc       if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
625f4a2713aSLionel Sambuc         UseOptimizedLibcall = true;
626f4a2713aSLionel Sambuc       break;
627f4a2713aSLionel Sambuc     }
628f4a2713aSLionel Sambuc 
629f4a2713aSLionel Sambuc     CallArgList Args;
630f4a2713aSLionel Sambuc     if (!UseOptimizedLibcall) {
631f4a2713aSLionel Sambuc       // For non-optimized library calls, the size is the first parameter
632f4a2713aSLionel Sambuc       Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
633f4a2713aSLionel Sambuc                getContext().getSizeType());
634f4a2713aSLionel Sambuc     }
635f4a2713aSLionel Sambuc     // Atomic address is the first or second parameter
636f4a2713aSLionel Sambuc     Args.add(RValue::get(EmitCastToVoidPtr(Ptr)), getContext().VoidPtrTy);
637f4a2713aSLionel Sambuc 
638f4a2713aSLionel Sambuc     std::string LibCallName;
639*0a6a1f1dSLionel Sambuc     QualType LoweredMemTy =
640*0a6a1f1dSLionel Sambuc       MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy;
641f4a2713aSLionel Sambuc     QualType RetTy;
642f4a2713aSLionel Sambuc     bool HaveRetTy = false;
643f4a2713aSLionel Sambuc     switch (E->getOp()) {
644f4a2713aSLionel Sambuc     // There is only one libcall for compare an exchange, because there is no
645f4a2713aSLionel Sambuc     // optimisation benefit possible from a libcall version of a weak compare
646f4a2713aSLionel Sambuc     // and exchange.
647f4a2713aSLionel Sambuc     // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
648f4a2713aSLionel Sambuc     //                                void *desired, int success, int failure)
649f4a2713aSLionel Sambuc     // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
650f4a2713aSLionel Sambuc     //                                  int success, int failure)
651f4a2713aSLionel Sambuc     case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
652f4a2713aSLionel Sambuc     case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
653f4a2713aSLionel Sambuc     case AtomicExpr::AO__atomic_compare_exchange:
654f4a2713aSLionel Sambuc     case AtomicExpr::AO__atomic_compare_exchange_n:
655f4a2713aSLionel Sambuc       LibCallName = "__atomic_compare_exchange";
656f4a2713aSLionel Sambuc       RetTy = getContext().BoolTy;
657f4a2713aSLionel Sambuc       HaveRetTy = true;
658f4a2713aSLionel Sambuc       Args.add(RValue::get(EmitCastToVoidPtr(Val1)), getContext().VoidPtrTy);
659f4a2713aSLionel Sambuc       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2, MemTy,
660*0a6a1f1dSLionel Sambuc                         E->getExprLoc(), sizeChars);
661f4a2713aSLionel Sambuc       Args.add(RValue::get(Order), getContext().IntTy);
662f4a2713aSLionel Sambuc       Order = OrderFail;
663f4a2713aSLionel Sambuc       break;
664f4a2713aSLionel Sambuc     // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
665f4a2713aSLionel Sambuc     //                        int order)
666f4a2713aSLionel Sambuc     // T __atomic_exchange_N(T *mem, T val, int order)
667f4a2713aSLionel Sambuc     case AtomicExpr::AO__c11_atomic_exchange:
668f4a2713aSLionel Sambuc     case AtomicExpr::AO__atomic_exchange_n:
669f4a2713aSLionel Sambuc     case AtomicExpr::AO__atomic_exchange:
670f4a2713aSLionel Sambuc       LibCallName = "__atomic_exchange";
671f4a2713aSLionel Sambuc       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
672*0a6a1f1dSLionel Sambuc                         E->getExprLoc(), sizeChars);
673f4a2713aSLionel Sambuc       break;
674f4a2713aSLionel Sambuc     // void __atomic_store(size_t size, void *mem, void *val, int order)
675f4a2713aSLionel Sambuc     // void __atomic_store_N(T *mem, T val, int order)
676f4a2713aSLionel Sambuc     case AtomicExpr::AO__c11_atomic_store:
677f4a2713aSLionel Sambuc     case AtomicExpr::AO__atomic_store:
678f4a2713aSLionel Sambuc     case AtomicExpr::AO__atomic_store_n:
679f4a2713aSLionel Sambuc       LibCallName = "__atomic_store";
680f4a2713aSLionel Sambuc       RetTy = getContext().VoidTy;
681f4a2713aSLionel Sambuc       HaveRetTy = true;
682f4a2713aSLionel Sambuc       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
683*0a6a1f1dSLionel Sambuc                         E->getExprLoc(), sizeChars);
684f4a2713aSLionel Sambuc       break;
685f4a2713aSLionel Sambuc     // void __atomic_load(size_t size, void *mem, void *return, int order)
686f4a2713aSLionel Sambuc     // T __atomic_load_N(T *mem, int order)
687f4a2713aSLionel Sambuc     case AtomicExpr::AO__c11_atomic_load:
688f4a2713aSLionel Sambuc     case AtomicExpr::AO__atomic_load:
689f4a2713aSLionel Sambuc     case AtomicExpr::AO__atomic_load_n:
690f4a2713aSLionel Sambuc       LibCallName = "__atomic_load";
691f4a2713aSLionel Sambuc       break;
692f4a2713aSLionel Sambuc     // T __atomic_fetch_add_N(T *mem, T val, int order)
693f4a2713aSLionel Sambuc     case AtomicExpr::AO__c11_atomic_fetch_add:
694f4a2713aSLionel Sambuc     case AtomicExpr::AO__atomic_fetch_add:
695f4a2713aSLionel Sambuc       LibCallName = "__atomic_fetch_add";
696*0a6a1f1dSLionel Sambuc       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
697*0a6a1f1dSLionel Sambuc                         E->getExprLoc(), sizeChars);
698f4a2713aSLionel Sambuc       break;
699f4a2713aSLionel Sambuc     // T __atomic_fetch_and_N(T *mem, T val, int order)
700f4a2713aSLionel Sambuc     case AtomicExpr::AO__c11_atomic_fetch_and:
701f4a2713aSLionel Sambuc     case AtomicExpr::AO__atomic_fetch_and:
702f4a2713aSLionel Sambuc       LibCallName = "__atomic_fetch_and";
703f4a2713aSLionel Sambuc       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
704*0a6a1f1dSLionel Sambuc                         E->getExprLoc(), sizeChars);
705f4a2713aSLionel Sambuc       break;
706f4a2713aSLionel Sambuc     // T __atomic_fetch_or_N(T *mem, T val, int order)
707f4a2713aSLionel Sambuc     case AtomicExpr::AO__c11_atomic_fetch_or:
708f4a2713aSLionel Sambuc     case AtomicExpr::AO__atomic_fetch_or:
709f4a2713aSLionel Sambuc       LibCallName = "__atomic_fetch_or";
710f4a2713aSLionel Sambuc       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
711*0a6a1f1dSLionel Sambuc                         E->getExprLoc(), sizeChars);
712f4a2713aSLionel Sambuc       break;
713f4a2713aSLionel Sambuc     // T __atomic_fetch_sub_N(T *mem, T val, int order)
714f4a2713aSLionel Sambuc     case AtomicExpr::AO__c11_atomic_fetch_sub:
715f4a2713aSLionel Sambuc     case AtomicExpr::AO__atomic_fetch_sub:
716f4a2713aSLionel Sambuc       LibCallName = "__atomic_fetch_sub";
717*0a6a1f1dSLionel Sambuc       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
718*0a6a1f1dSLionel Sambuc                         E->getExprLoc(), sizeChars);
719f4a2713aSLionel Sambuc       break;
720f4a2713aSLionel Sambuc     // T __atomic_fetch_xor_N(T *mem, T val, int order)
721f4a2713aSLionel Sambuc     case AtomicExpr::AO__c11_atomic_fetch_xor:
722f4a2713aSLionel Sambuc     case AtomicExpr::AO__atomic_fetch_xor:
723f4a2713aSLionel Sambuc       LibCallName = "__atomic_fetch_xor";
724f4a2713aSLionel Sambuc       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
725*0a6a1f1dSLionel Sambuc                         E->getExprLoc(), sizeChars);
726f4a2713aSLionel Sambuc       break;
727f4a2713aSLionel Sambuc     default: return EmitUnsupportedRValue(E, "atomic library call");
728f4a2713aSLionel Sambuc     }
729f4a2713aSLionel Sambuc 
730f4a2713aSLionel Sambuc     // Optimized functions have the size in their name.
731f4a2713aSLionel Sambuc     if (UseOptimizedLibcall)
732f4a2713aSLionel Sambuc       LibCallName += "_" + llvm::utostr(Size);
733f4a2713aSLionel Sambuc     // By default, assume we return a value of the atomic type.
734f4a2713aSLionel Sambuc     if (!HaveRetTy) {
735f4a2713aSLionel Sambuc       if (UseOptimizedLibcall) {
736f4a2713aSLionel Sambuc         // Value is returned directly.
737*0a6a1f1dSLionel Sambuc         // The function returns an appropriately sized integer type.
738*0a6a1f1dSLionel Sambuc         RetTy = getContext().getIntTypeForBitwidth(
739*0a6a1f1dSLionel Sambuc             getContext().toBits(sizeChars), /*Signed=*/false);
740f4a2713aSLionel Sambuc       } else {
741f4a2713aSLionel Sambuc         // Value is returned through parameter before the order.
742f4a2713aSLionel Sambuc         RetTy = getContext().VoidTy;
743*0a6a1f1dSLionel Sambuc         Args.add(RValue::get(EmitCastToVoidPtr(Dest)), getContext().VoidPtrTy);
744f4a2713aSLionel Sambuc       }
745f4a2713aSLionel Sambuc     }
746f4a2713aSLionel Sambuc     // order is always the last parameter
747f4a2713aSLionel Sambuc     Args.add(RValue::get(Order),
748f4a2713aSLionel Sambuc              getContext().IntTy);
749f4a2713aSLionel Sambuc 
750*0a6a1f1dSLionel Sambuc     RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args);
751*0a6a1f1dSLionel Sambuc     // The value is returned directly from the libcall.
752*0a6a1f1dSLionel Sambuc     if (HaveRetTy && !RetTy->isVoidType())
753f4a2713aSLionel Sambuc       return Res;
754*0a6a1f1dSLionel Sambuc     // The value is returned via an explicit out param.
755*0a6a1f1dSLionel Sambuc     if (RetTy->isVoidType())
756*0a6a1f1dSLionel Sambuc       return RValue::get(nullptr);
757*0a6a1f1dSLionel Sambuc     // The value is returned directly for optimized libcalls but the caller is
758*0a6a1f1dSLionel Sambuc     // expected an out-param.
759*0a6a1f1dSLionel Sambuc     if (UseOptimizedLibcall) {
760*0a6a1f1dSLionel Sambuc       llvm::Value *ResVal = Res.getScalarVal();
761*0a6a1f1dSLionel Sambuc       llvm::StoreInst *StoreDest = Builder.CreateStore(
762*0a6a1f1dSLionel Sambuc           ResVal,
763*0a6a1f1dSLionel Sambuc           Builder.CreateBitCast(GetDest(), ResVal->getType()->getPointerTo()));
764*0a6a1f1dSLionel Sambuc       StoreDest->setAlignment(Align);
765*0a6a1f1dSLionel Sambuc     }
766*0a6a1f1dSLionel Sambuc     return convertTempToRValue(Dest, RValTy, E->getExprLoc());
767f4a2713aSLionel Sambuc   }
768f4a2713aSLionel Sambuc 
769f4a2713aSLionel Sambuc   bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
770f4a2713aSLionel Sambuc                  E->getOp() == AtomicExpr::AO__atomic_store ||
771f4a2713aSLionel Sambuc                  E->getOp() == AtomicExpr::AO__atomic_store_n;
772f4a2713aSLionel Sambuc   bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
773f4a2713aSLionel Sambuc                 E->getOp() == AtomicExpr::AO__atomic_load ||
774f4a2713aSLionel Sambuc                 E->getOp() == AtomicExpr::AO__atomic_load_n;
775f4a2713aSLionel Sambuc 
776*0a6a1f1dSLionel Sambuc   llvm::Type *ITy =
777*0a6a1f1dSLionel Sambuc       llvm::IntegerType::get(getLLVMContext(), Size * 8);
778*0a6a1f1dSLionel Sambuc   llvm::Value *OrigDest = GetDest();
779*0a6a1f1dSLionel Sambuc   Ptr = Builder.CreateBitCast(
780*0a6a1f1dSLionel Sambuc       Ptr, ITy->getPointerTo(Ptr->getType()->getPointerAddressSpace()));
781*0a6a1f1dSLionel Sambuc   if (Val1) Val1 = Builder.CreateBitCast(Val1, ITy->getPointerTo());
782*0a6a1f1dSLionel Sambuc   if (Val2) Val2 = Builder.CreateBitCast(Val2, ITy->getPointerTo());
783*0a6a1f1dSLionel Sambuc   if (Dest && !E->isCmpXChg())
784*0a6a1f1dSLionel Sambuc     Dest = Builder.CreateBitCast(Dest, ITy->getPointerTo());
785f4a2713aSLionel Sambuc 
786f4a2713aSLionel Sambuc   if (isa<llvm::ConstantInt>(Order)) {
787f4a2713aSLionel Sambuc     int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
788f4a2713aSLionel Sambuc     switch (ord) {
789*0a6a1f1dSLionel Sambuc     case AtomicExpr::AO_ABI_memory_order_relaxed:
790*0a6a1f1dSLionel Sambuc       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
791*0a6a1f1dSLionel Sambuc                    Size, Align, llvm::Monotonic);
792f4a2713aSLionel Sambuc       break;
793*0a6a1f1dSLionel Sambuc     case AtomicExpr::AO_ABI_memory_order_consume:
794*0a6a1f1dSLionel Sambuc     case AtomicExpr::AO_ABI_memory_order_acquire:
795f4a2713aSLionel Sambuc       if (IsStore)
796f4a2713aSLionel Sambuc         break; // Avoid crashing on code with undefined behavior
797*0a6a1f1dSLionel Sambuc       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
798*0a6a1f1dSLionel Sambuc                    Size, Align, llvm::Acquire);
799f4a2713aSLionel Sambuc       break;
800*0a6a1f1dSLionel Sambuc     case AtomicExpr::AO_ABI_memory_order_release:
801f4a2713aSLionel Sambuc       if (IsLoad)
802f4a2713aSLionel Sambuc         break; // Avoid crashing on code with undefined behavior
803*0a6a1f1dSLionel Sambuc       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
804*0a6a1f1dSLionel Sambuc                    Size, Align, llvm::Release);
805f4a2713aSLionel Sambuc       break;
806*0a6a1f1dSLionel Sambuc     case AtomicExpr::AO_ABI_memory_order_acq_rel:
807f4a2713aSLionel Sambuc       if (IsLoad || IsStore)
808f4a2713aSLionel Sambuc         break; // Avoid crashing on code with undefined behavior
809*0a6a1f1dSLionel Sambuc       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
810*0a6a1f1dSLionel Sambuc                    Size, Align, llvm::AcquireRelease);
811f4a2713aSLionel Sambuc       break;
812*0a6a1f1dSLionel Sambuc     case AtomicExpr::AO_ABI_memory_order_seq_cst:
813*0a6a1f1dSLionel Sambuc       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
814*0a6a1f1dSLionel Sambuc                    Size, Align, llvm::SequentiallyConsistent);
815f4a2713aSLionel Sambuc       break;
816f4a2713aSLionel Sambuc     default: // invalid order
817f4a2713aSLionel Sambuc       // We should not ever get here normally, but it's hard to
818f4a2713aSLionel Sambuc       // enforce that in general.
819f4a2713aSLionel Sambuc       break;
820f4a2713aSLionel Sambuc     }
821*0a6a1f1dSLionel Sambuc     if (RValTy->isVoidType())
822*0a6a1f1dSLionel Sambuc       return RValue::get(nullptr);
823*0a6a1f1dSLionel Sambuc     return convertTempToRValue(OrigDest, RValTy, E->getExprLoc());
824f4a2713aSLionel Sambuc   }
825f4a2713aSLionel Sambuc 
826f4a2713aSLionel Sambuc   // Long case, when Order isn't obviously constant.
827f4a2713aSLionel Sambuc 
828f4a2713aSLionel Sambuc   // Create all the relevant BB's
829*0a6a1f1dSLionel Sambuc   llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
830*0a6a1f1dSLionel Sambuc                    *ReleaseBB = nullptr, *AcqRelBB = nullptr,
831*0a6a1f1dSLionel Sambuc                    *SeqCstBB = nullptr;
832f4a2713aSLionel Sambuc   MonotonicBB = createBasicBlock("monotonic", CurFn);
833f4a2713aSLionel Sambuc   if (!IsStore)
834f4a2713aSLionel Sambuc     AcquireBB = createBasicBlock("acquire", CurFn);
835f4a2713aSLionel Sambuc   if (!IsLoad)
836f4a2713aSLionel Sambuc     ReleaseBB = createBasicBlock("release", CurFn);
837f4a2713aSLionel Sambuc   if (!IsLoad && !IsStore)
838f4a2713aSLionel Sambuc     AcqRelBB = createBasicBlock("acqrel", CurFn);
839f4a2713aSLionel Sambuc   SeqCstBB = createBasicBlock("seqcst", CurFn);
840f4a2713aSLionel Sambuc   llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
841f4a2713aSLionel Sambuc 
842f4a2713aSLionel Sambuc   // Create the switch for the split
843f4a2713aSLionel Sambuc   // MonotonicBB is arbitrarily chosen as the default case; in practice, this
844f4a2713aSLionel Sambuc   // doesn't matter unless someone is crazy enough to use something that
845f4a2713aSLionel Sambuc   // doesn't fold to a constant for the ordering.
846f4a2713aSLionel Sambuc   Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
847f4a2713aSLionel Sambuc   llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
848f4a2713aSLionel Sambuc 
849f4a2713aSLionel Sambuc   // Emit all the different atomics
850f4a2713aSLionel Sambuc   Builder.SetInsertPoint(MonotonicBB);
851*0a6a1f1dSLionel Sambuc   EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
852*0a6a1f1dSLionel Sambuc                Size, Align, llvm::Monotonic);
853f4a2713aSLionel Sambuc   Builder.CreateBr(ContBB);
854f4a2713aSLionel Sambuc   if (!IsStore) {
855f4a2713aSLionel Sambuc     Builder.SetInsertPoint(AcquireBB);
856*0a6a1f1dSLionel Sambuc     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
857*0a6a1f1dSLionel Sambuc                  Size, Align, llvm::Acquire);
858f4a2713aSLionel Sambuc     Builder.CreateBr(ContBB);
859*0a6a1f1dSLionel Sambuc     SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
860*0a6a1f1dSLionel Sambuc                 AcquireBB);
861*0a6a1f1dSLionel Sambuc     SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire),
862*0a6a1f1dSLionel Sambuc                 AcquireBB);
863f4a2713aSLionel Sambuc   }
864f4a2713aSLionel Sambuc   if (!IsLoad) {
865f4a2713aSLionel Sambuc     Builder.SetInsertPoint(ReleaseBB);
866*0a6a1f1dSLionel Sambuc     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
867*0a6a1f1dSLionel Sambuc                  Size, Align, llvm::Release);
868f4a2713aSLionel Sambuc     Builder.CreateBr(ContBB);
869*0a6a1f1dSLionel Sambuc     SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_release),
870*0a6a1f1dSLionel Sambuc                 ReleaseBB);
871f4a2713aSLionel Sambuc   }
872f4a2713aSLionel Sambuc   if (!IsLoad && !IsStore) {
873f4a2713aSLionel Sambuc     Builder.SetInsertPoint(AcqRelBB);
874*0a6a1f1dSLionel Sambuc     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
875*0a6a1f1dSLionel Sambuc                  Size, Align, llvm::AcquireRelease);
876f4a2713aSLionel Sambuc     Builder.CreateBr(ContBB);
877*0a6a1f1dSLionel Sambuc     SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acq_rel),
878*0a6a1f1dSLionel Sambuc                 AcqRelBB);
879f4a2713aSLionel Sambuc   }
880f4a2713aSLionel Sambuc   Builder.SetInsertPoint(SeqCstBB);
881*0a6a1f1dSLionel Sambuc   EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
882*0a6a1f1dSLionel Sambuc                Size, Align, llvm::SequentiallyConsistent);
883f4a2713aSLionel Sambuc   Builder.CreateBr(ContBB);
884*0a6a1f1dSLionel Sambuc   SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
885*0a6a1f1dSLionel Sambuc               SeqCstBB);
886f4a2713aSLionel Sambuc 
887f4a2713aSLionel Sambuc   // Cleanup and return
888f4a2713aSLionel Sambuc   Builder.SetInsertPoint(ContBB);
889*0a6a1f1dSLionel Sambuc   if (RValTy->isVoidType())
890*0a6a1f1dSLionel Sambuc     return RValue::get(nullptr);
891*0a6a1f1dSLionel Sambuc   return convertTempToRValue(OrigDest, RValTy, E->getExprLoc());
892f4a2713aSLionel Sambuc }
893f4a2713aSLionel Sambuc 
emitCastToAtomicIntPointer(llvm::Value * addr) const894f4a2713aSLionel Sambuc llvm::Value *AtomicInfo::emitCastToAtomicIntPointer(llvm::Value *addr) const {
895f4a2713aSLionel Sambuc   unsigned addrspace =
896f4a2713aSLionel Sambuc     cast<llvm::PointerType>(addr->getType())->getAddressSpace();
897f4a2713aSLionel Sambuc   llvm::IntegerType *ty =
898f4a2713aSLionel Sambuc     llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
899f4a2713aSLionel Sambuc   return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
900f4a2713aSLionel Sambuc }
901f4a2713aSLionel Sambuc 
convertTempToRValue(llvm::Value * addr,AggValueSlot resultSlot,SourceLocation loc) const902f4a2713aSLionel Sambuc RValue AtomicInfo::convertTempToRValue(llvm::Value *addr,
903f4a2713aSLionel Sambuc                                        AggValueSlot resultSlot,
904f4a2713aSLionel Sambuc                                        SourceLocation loc) const {
905f4a2713aSLionel Sambuc   if (EvaluationKind == TEK_Aggregate)
906f4a2713aSLionel Sambuc     return resultSlot.asRValue();
907f4a2713aSLionel Sambuc 
908f4a2713aSLionel Sambuc   // Drill into the padding structure if we have one.
909f4a2713aSLionel Sambuc   if (hasPadding())
910f4a2713aSLionel Sambuc     addr = CGF.Builder.CreateStructGEP(addr, 0);
911f4a2713aSLionel Sambuc 
912f4a2713aSLionel Sambuc   // Otherwise, just convert the temporary to an r-value using the
913f4a2713aSLionel Sambuc   // normal conversion routine.
914f4a2713aSLionel Sambuc   return CGF.convertTempToRValue(addr, getValueType(), loc);
915f4a2713aSLionel Sambuc }
916f4a2713aSLionel Sambuc 
convertIntToValue(llvm::Value * IntVal,AggValueSlot ResultSlot,SourceLocation Loc) const917*0a6a1f1dSLionel Sambuc RValue AtomicInfo::convertIntToValue(llvm::Value *IntVal,
918*0a6a1f1dSLionel Sambuc                                      AggValueSlot ResultSlot,
919*0a6a1f1dSLionel Sambuc                                      SourceLocation Loc) const {
920*0a6a1f1dSLionel Sambuc   // Try not to in some easy cases.
921*0a6a1f1dSLionel Sambuc   assert(IntVal->getType()->isIntegerTy() && "Expected integer value");
922*0a6a1f1dSLionel Sambuc   if (getEvaluationKind() == TEK_Scalar && !hasPadding()) {
923*0a6a1f1dSLionel Sambuc     auto *ValTy = CGF.ConvertTypeForMem(ValueTy);
924*0a6a1f1dSLionel Sambuc     if (ValTy->isIntegerTy()) {
925*0a6a1f1dSLionel Sambuc       assert(IntVal->getType() == ValTy && "Different integer types.");
926*0a6a1f1dSLionel Sambuc       return RValue::get(IntVal);
927*0a6a1f1dSLionel Sambuc     } else if (ValTy->isPointerTy())
928*0a6a1f1dSLionel Sambuc       return RValue::get(CGF.Builder.CreateIntToPtr(IntVal, ValTy));
929*0a6a1f1dSLionel Sambuc     else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy))
930*0a6a1f1dSLionel Sambuc       return RValue::get(CGF.Builder.CreateBitCast(IntVal, ValTy));
931*0a6a1f1dSLionel Sambuc   }
932*0a6a1f1dSLionel Sambuc 
933*0a6a1f1dSLionel Sambuc   // Create a temporary.  This needs to be big enough to hold the
934*0a6a1f1dSLionel Sambuc   // atomic integer.
935*0a6a1f1dSLionel Sambuc   llvm::Value *Temp;
936*0a6a1f1dSLionel Sambuc   bool TempIsVolatile = false;
937*0a6a1f1dSLionel Sambuc   CharUnits TempAlignment;
938*0a6a1f1dSLionel Sambuc   if (getEvaluationKind() == TEK_Aggregate) {
939*0a6a1f1dSLionel Sambuc     assert(!ResultSlot.isIgnored());
940*0a6a1f1dSLionel Sambuc     Temp = ResultSlot.getAddr();
941*0a6a1f1dSLionel Sambuc     TempAlignment = getValueAlignment();
942*0a6a1f1dSLionel Sambuc     TempIsVolatile = ResultSlot.isVolatile();
943*0a6a1f1dSLionel Sambuc   } else {
944*0a6a1f1dSLionel Sambuc     Temp = CGF.CreateMemTemp(getAtomicType(), "atomic-temp");
945*0a6a1f1dSLionel Sambuc     TempAlignment = getAtomicAlignment();
946*0a6a1f1dSLionel Sambuc   }
947*0a6a1f1dSLionel Sambuc 
948*0a6a1f1dSLionel Sambuc   // Slam the integer into the temporary.
949*0a6a1f1dSLionel Sambuc   llvm::Value *CastTemp = emitCastToAtomicIntPointer(Temp);
950*0a6a1f1dSLionel Sambuc   CGF.Builder.CreateAlignedStore(IntVal, CastTemp, TempAlignment.getQuantity())
951*0a6a1f1dSLionel Sambuc       ->setVolatile(TempIsVolatile);
952*0a6a1f1dSLionel Sambuc 
953*0a6a1f1dSLionel Sambuc   return convertTempToRValue(Temp, ResultSlot, Loc);
954*0a6a1f1dSLionel Sambuc }
955*0a6a1f1dSLionel Sambuc 
956f4a2713aSLionel Sambuc /// Emit a load from an l-value of atomic type.  Note that the r-value
957f4a2713aSLionel Sambuc /// we produce is an r-value of the atomic *value* type.
EmitAtomicLoad(LValue src,SourceLocation loc,AggValueSlot resultSlot)958f4a2713aSLionel Sambuc RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
959f4a2713aSLionel Sambuc                                        AggValueSlot resultSlot) {
960f4a2713aSLionel Sambuc   AtomicInfo atomics(*this, src);
961f4a2713aSLionel Sambuc 
962f4a2713aSLionel Sambuc   // Check whether we should use a library call.
963f4a2713aSLionel Sambuc   if (atomics.shouldUseLibcall()) {
964f4a2713aSLionel Sambuc     llvm::Value *tempAddr;
965f4a2713aSLionel Sambuc     if (!resultSlot.isIgnored()) {
966f4a2713aSLionel Sambuc       assert(atomics.getEvaluationKind() == TEK_Aggregate);
967f4a2713aSLionel Sambuc       tempAddr = resultSlot.getAddr();
968f4a2713aSLionel Sambuc     } else {
969f4a2713aSLionel Sambuc       tempAddr = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
970f4a2713aSLionel Sambuc     }
971f4a2713aSLionel Sambuc 
972f4a2713aSLionel Sambuc     // void __atomic_load(size_t size, void *mem, void *return, int order);
973f4a2713aSLionel Sambuc     CallArgList args;
974f4a2713aSLionel Sambuc     args.add(RValue::get(atomics.getAtomicSizeValue()),
975f4a2713aSLionel Sambuc              getContext().getSizeType());
976f4a2713aSLionel Sambuc     args.add(RValue::get(EmitCastToVoidPtr(src.getAddress())),
977f4a2713aSLionel Sambuc              getContext().VoidPtrTy);
978f4a2713aSLionel Sambuc     args.add(RValue::get(EmitCastToVoidPtr(tempAddr)),
979f4a2713aSLionel Sambuc              getContext().VoidPtrTy);
980*0a6a1f1dSLionel Sambuc     args.add(RValue::get(llvm::ConstantInt::get(
981*0a6a1f1dSLionel Sambuc                  IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
982f4a2713aSLionel Sambuc              getContext().IntTy);
983f4a2713aSLionel Sambuc     emitAtomicLibcall(*this, "__atomic_load", getContext().VoidTy, args);
984f4a2713aSLionel Sambuc 
985f4a2713aSLionel Sambuc     // Produce the r-value.
986f4a2713aSLionel Sambuc     return atomics.convertTempToRValue(tempAddr, resultSlot, loc);
987f4a2713aSLionel Sambuc   }
988f4a2713aSLionel Sambuc 
989f4a2713aSLionel Sambuc   // Okay, we're doing this natively.
990f4a2713aSLionel Sambuc   llvm::Value *addr = atomics.emitCastToAtomicIntPointer(src.getAddress());
991f4a2713aSLionel Sambuc   llvm::LoadInst *load = Builder.CreateLoad(addr, "atomic-load");
992f4a2713aSLionel Sambuc   load->setAtomic(llvm::SequentiallyConsistent);
993f4a2713aSLionel Sambuc 
994f4a2713aSLionel Sambuc   // Other decoration.
995f4a2713aSLionel Sambuc   load->setAlignment(src.getAlignment().getQuantity());
996f4a2713aSLionel Sambuc   if (src.isVolatileQualified())
997f4a2713aSLionel Sambuc     load->setVolatile(true);
998f4a2713aSLionel Sambuc   if (src.getTBAAInfo())
999f4a2713aSLionel Sambuc     CGM.DecorateInstruction(load, src.getTBAAInfo());
1000f4a2713aSLionel Sambuc 
1001f4a2713aSLionel Sambuc   // If we're ignoring an aggregate return, don't do anything.
1002f4a2713aSLionel Sambuc   if (atomics.getEvaluationKind() == TEK_Aggregate && resultSlot.isIgnored())
1003*0a6a1f1dSLionel Sambuc     return RValue::getAggregate(nullptr, false);
1004f4a2713aSLionel Sambuc 
1005*0a6a1f1dSLionel Sambuc   // Okay, turn that back into the original value type.
1006*0a6a1f1dSLionel Sambuc   return atomics.convertIntToValue(load, resultSlot, loc);
1007f4a2713aSLionel Sambuc }
1008f4a2713aSLionel Sambuc 
1009f4a2713aSLionel Sambuc 
1010f4a2713aSLionel Sambuc 
1011f4a2713aSLionel Sambuc /// Copy an r-value into memory as part of storing to an atomic type.
1012f4a2713aSLionel Sambuc /// This needs to create a bit-pattern suitable for atomic operations.
emitCopyIntoMemory(RValue rvalue,LValue dest) const1013f4a2713aSLionel Sambuc void AtomicInfo::emitCopyIntoMemory(RValue rvalue, LValue dest) const {
1014f4a2713aSLionel Sambuc   // If we have an r-value, the rvalue should be of the atomic type,
1015f4a2713aSLionel Sambuc   // which means that the caller is responsible for having zeroed
1016f4a2713aSLionel Sambuc   // any padding.  Just do an aggregate copy of that type.
1017f4a2713aSLionel Sambuc   if (rvalue.isAggregate()) {
1018f4a2713aSLionel Sambuc     CGF.EmitAggregateCopy(dest.getAddress(),
1019f4a2713aSLionel Sambuc                           rvalue.getAggregateAddr(),
1020f4a2713aSLionel Sambuc                           getAtomicType(),
1021f4a2713aSLionel Sambuc                           (rvalue.isVolatileQualified()
1022f4a2713aSLionel Sambuc                            || dest.isVolatileQualified()),
1023f4a2713aSLionel Sambuc                           dest.getAlignment());
1024f4a2713aSLionel Sambuc     return;
1025f4a2713aSLionel Sambuc   }
1026f4a2713aSLionel Sambuc 
1027f4a2713aSLionel Sambuc   // Okay, otherwise we're copying stuff.
1028f4a2713aSLionel Sambuc 
1029f4a2713aSLionel Sambuc   // Zero out the buffer if necessary.
1030f4a2713aSLionel Sambuc   emitMemSetZeroIfNecessary(dest);
1031f4a2713aSLionel Sambuc 
1032f4a2713aSLionel Sambuc   // Drill past the padding if present.
1033f4a2713aSLionel Sambuc   dest = projectValue(dest);
1034f4a2713aSLionel Sambuc 
1035f4a2713aSLionel Sambuc   // Okay, store the rvalue in.
1036f4a2713aSLionel Sambuc   if (rvalue.isScalar()) {
1037f4a2713aSLionel Sambuc     CGF.EmitStoreOfScalar(rvalue.getScalarVal(), dest, /*init*/ true);
1038f4a2713aSLionel Sambuc   } else {
1039f4a2713aSLionel Sambuc     CGF.EmitStoreOfComplex(rvalue.getComplexVal(), dest, /*init*/ true);
1040f4a2713aSLionel Sambuc   }
1041f4a2713aSLionel Sambuc }
1042f4a2713aSLionel Sambuc 
1043f4a2713aSLionel Sambuc 
1044f4a2713aSLionel Sambuc /// Materialize an r-value into memory for the purposes of storing it
1045f4a2713aSLionel Sambuc /// to an atomic type.
materializeRValue(RValue rvalue) const1046f4a2713aSLionel Sambuc llvm::Value *AtomicInfo::materializeRValue(RValue rvalue) const {
1047f4a2713aSLionel Sambuc   // Aggregate r-values are already in memory, and EmitAtomicStore
1048f4a2713aSLionel Sambuc   // requires them to be values of the atomic type.
1049f4a2713aSLionel Sambuc   if (rvalue.isAggregate())
1050f4a2713aSLionel Sambuc     return rvalue.getAggregateAddr();
1051f4a2713aSLionel Sambuc 
1052f4a2713aSLionel Sambuc   // Otherwise, make a temporary and materialize into it.
1053f4a2713aSLionel Sambuc   llvm::Value *temp = CGF.CreateMemTemp(getAtomicType(), "atomic-store-temp");
1054f4a2713aSLionel Sambuc   LValue tempLV = CGF.MakeAddrLValue(temp, getAtomicType(), getAtomicAlignment());
1055f4a2713aSLionel Sambuc   emitCopyIntoMemory(rvalue, tempLV);
1056f4a2713aSLionel Sambuc   return temp;
1057f4a2713aSLionel Sambuc }
1058f4a2713aSLionel Sambuc 
convertRValueToInt(RValue RVal) const1059*0a6a1f1dSLionel Sambuc llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const {
1060*0a6a1f1dSLionel Sambuc   // If we've got a scalar value of the right size, try to avoid going
1061*0a6a1f1dSLionel Sambuc   // through memory.
1062*0a6a1f1dSLionel Sambuc   if (RVal.isScalar() && !hasPadding()) {
1063*0a6a1f1dSLionel Sambuc     llvm::Value *Value = RVal.getScalarVal();
1064*0a6a1f1dSLionel Sambuc     if (isa<llvm::IntegerType>(Value->getType()))
1065*0a6a1f1dSLionel Sambuc       return Value;
1066*0a6a1f1dSLionel Sambuc     else {
1067*0a6a1f1dSLionel Sambuc       llvm::IntegerType *InputIntTy =
1068*0a6a1f1dSLionel Sambuc           llvm::IntegerType::get(CGF.getLLVMContext(), getValueSizeInBits());
1069*0a6a1f1dSLionel Sambuc       if (isa<llvm::PointerType>(Value->getType()))
1070*0a6a1f1dSLionel Sambuc         return CGF.Builder.CreatePtrToInt(Value, InputIntTy);
1071*0a6a1f1dSLionel Sambuc       else if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
1072*0a6a1f1dSLionel Sambuc         return CGF.Builder.CreateBitCast(Value, InputIntTy);
1073*0a6a1f1dSLionel Sambuc     }
1074*0a6a1f1dSLionel Sambuc   }
1075*0a6a1f1dSLionel Sambuc   // Otherwise, we need to go through memory.
1076*0a6a1f1dSLionel Sambuc   // Put the r-value in memory.
1077*0a6a1f1dSLionel Sambuc   llvm::Value *Addr = materializeRValue(RVal);
1078*0a6a1f1dSLionel Sambuc 
1079*0a6a1f1dSLionel Sambuc   // Cast the temporary to the atomic int type and pull a value out.
1080*0a6a1f1dSLionel Sambuc   Addr = emitCastToAtomicIntPointer(Addr);
1081*0a6a1f1dSLionel Sambuc   return CGF.Builder.CreateAlignedLoad(Addr,
1082*0a6a1f1dSLionel Sambuc                                        getAtomicAlignment().getQuantity());
1083*0a6a1f1dSLionel Sambuc }
1084*0a6a1f1dSLionel Sambuc 
1085f4a2713aSLionel Sambuc /// Emit a store to an l-value of atomic type.
1086f4a2713aSLionel Sambuc ///
1087f4a2713aSLionel Sambuc /// Note that the r-value is expected to be an r-value *of the atomic
1088f4a2713aSLionel Sambuc /// type*; this means that for aggregate r-values, it should include
1089f4a2713aSLionel Sambuc /// storage for any padding that was necessary.
EmitAtomicStore(RValue rvalue,LValue dest,bool isInit)1090f4a2713aSLionel Sambuc void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, bool isInit) {
1091f4a2713aSLionel Sambuc   // If this is an aggregate r-value, it should agree in type except
1092f4a2713aSLionel Sambuc   // maybe for address-space qualification.
1093f4a2713aSLionel Sambuc   assert(!rvalue.isAggregate() ||
1094f4a2713aSLionel Sambuc          rvalue.getAggregateAddr()->getType()->getPointerElementType()
1095f4a2713aSLionel Sambuc            == dest.getAddress()->getType()->getPointerElementType());
1096f4a2713aSLionel Sambuc 
1097f4a2713aSLionel Sambuc   AtomicInfo atomics(*this, dest);
1098f4a2713aSLionel Sambuc 
1099f4a2713aSLionel Sambuc   // If this is an initialization, just put the value there normally.
1100f4a2713aSLionel Sambuc   if (isInit) {
1101f4a2713aSLionel Sambuc     atomics.emitCopyIntoMemory(rvalue, dest);
1102f4a2713aSLionel Sambuc     return;
1103f4a2713aSLionel Sambuc   }
1104f4a2713aSLionel Sambuc 
1105f4a2713aSLionel Sambuc   // Check whether we should use a library call.
1106f4a2713aSLionel Sambuc   if (atomics.shouldUseLibcall()) {
1107f4a2713aSLionel Sambuc     // Produce a source address.
1108f4a2713aSLionel Sambuc     llvm::Value *srcAddr = atomics.materializeRValue(rvalue);
1109f4a2713aSLionel Sambuc 
1110f4a2713aSLionel Sambuc     // void __atomic_store(size_t size, void *mem, void *val, int order)
1111f4a2713aSLionel Sambuc     CallArgList args;
1112f4a2713aSLionel Sambuc     args.add(RValue::get(atomics.getAtomicSizeValue()),
1113f4a2713aSLionel Sambuc              getContext().getSizeType());
1114f4a2713aSLionel Sambuc     args.add(RValue::get(EmitCastToVoidPtr(dest.getAddress())),
1115f4a2713aSLionel Sambuc              getContext().VoidPtrTy);
1116f4a2713aSLionel Sambuc     args.add(RValue::get(EmitCastToVoidPtr(srcAddr)),
1117f4a2713aSLionel Sambuc              getContext().VoidPtrTy);
1118*0a6a1f1dSLionel Sambuc     args.add(RValue::get(llvm::ConstantInt::get(
1119*0a6a1f1dSLionel Sambuc                  IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
1120f4a2713aSLionel Sambuc              getContext().IntTy);
1121f4a2713aSLionel Sambuc     emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
1122f4a2713aSLionel Sambuc     return;
1123f4a2713aSLionel Sambuc   }
1124f4a2713aSLionel Sambuc 
1125f4a2713aSLionel Sambuc   // Okay, we're doing this natively.
1126*0a6a1f1dSLionel Sambuc   llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
1127f4a2713aSLionel Sambuc 
1128f4a2713aSLionel Sambuc   // Do the atomic store.
1129f4a2713aSLionel Sambuc   llvm::Value *addr = atomics.emitCastToAtomicIntPointer(dest.getAddress());
1130f4a2713aSLionel Sambuc   llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
1131f4a2713aSLionel Sambuc 
1132f4a2713aSLionel Sambuc   // Initializations don't need to be atomic.
1133f4a2713aSLionel Sambuc   if (!isInit) store->setAtomic(llvm::SequentiallyConsistent);
1134f4a2713aSLionel Sambuc 
1135f4a2713aSLionel Sambuc   // Other decoration.
1136f4a2713aSLionel Sambuc   store->setAlignment(dest.getAlignment().getQuantity());
1137f4a2713aSLionel Sambuc   if (dest.isVolatileQualified())
1138f4a2713aSLionel Sambuc     store->setVolatile(true);
1139f4a2713aSLionel Sambuc   if (dest.getTBAAInfo())
1140f4a2713aSLionel Sambuc     CGM.DecorateInstruction(store, dest.getTBAAInfo());
1141f4a2713aSLionel Sambuc }
1142f4a2713aSLionel Sambuc 
1143*0a6a1f1dSLionel Sambuc /// Emit a compare-and-exchange op for atomic type.
1144*0a6a1f1dSLionel Sambuc ///
EmitAtomicCompareExchange(LValue Obj,RValue Expected,RValue Desired,SourceLocation Loc,llvm::AtomicOrdering Success,llvm::AtomicOrdering Failure,bool IsWeak,AggValueSlot Slot)1145*0a6a1f1dSLionel Sambuc std::pair<RValue, RValue> CodeGenFunction::EmitAtomicCompareExchange(
1146*0a6a1f1dSLionel Sambuc     LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
1147*0a6a1f1dSLionel Sambuc     llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak,
1148*0a6a1f1dSLionel Sambuc     AggValueSlot Slot) {
1149*0a6a1f1dSLionel Sambuc   // If this is an aggregate r-value, it should agree in type except
1150*0a6a1f1dSLionel Sambuc   // maybe for address-space qualification.
1151*0a6a1f1dSLionel Sambuc   assert(!Expected.isAggregate() ||
1152*0a6a1f1dSLionel Sambuc          Expected.getAggregateAddr()->getType()->getPointerElementType() ==
1153*0a6a1f1dSLionel Sambuc              Obj.getAddress()->getType()->getPointerElementType());
1154*0a6a1f1dSLionel Sambuc   assert(!Desired.isAggregate() ||
1155*0a6a1f1dSLionel Sambuc          Desired.getAggregateAddr()->getType()->getPointerElementType() ==
1156*0a6a1f1dSLionel Sambuc              Obj.getAddress()->getType()->getPointerElementType());
1157*0a6a1f1dSLionel Sambuc   AtomicInfo Atomics(*this, Obj);
1158*0a6a1f1dSLionel Sambuc 
1159*0a6a1f1dSLionel Sambuc   if (Failure >= Success)
1160*0a6a1f1dSLionel Sambuc     // Don't assert on undefined behavior.
1161*0a6a1f1dSLionel Sambuc     Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success);
1162*0a6a1f1dSLionel Sambuc 
1163*0a6a1f1dSLionel Sambuc   auto Alignment = Atomics.getValueAlignment();
1164*0a6a1f1dSLionel Sambuc   // Check whether we should use a library call.
1165*0a6a1f1dSLionel Sambuc   if (Atomics.shouldUseLibcall()) {
1166*0a6a1f1dSLionel Sambuc     auto *ExpectedAddr = Atomics.materializeRValue(Expected);
1167*0a6a1f1dSLionel Sambuc     // Produce a source address.
1168*0a6a1f1dSLionel Sambuc     auto *DesiredAddr = Atomics.materializeRValue(Desired);
1169*0a6a1f1dSLionel Sambuc     // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
1170*0a6a1f1dSLionel Sambuc     // void *desired, int success, int failure);
1171*0a6a1f1dSLionel Sambuc     CallArgList Args;
1172*0a6a1f1dSLionel Sambuc     Args.add(RValue::get(Atomics.getAtomicSizeValue()),
1173*0a6a1f1dSLionel Sambuc              getContext().getSizeType());
1174*0a6a1f1dSLionel Sambuc     Args.add(RValue::get(EmitCastToVoidPtr(Obj.getAddress())),
1175*0a6a1f1dSLionel Sambuc              getContext().VoidPtrTy);
1176*0a6a1f1dSLionel Sambuc     Args.add(RValue::get(EmitCastToVoidPtr(ExpectedAddr)),
1177*0a6a1f1dSLionel Sambuc              getContext().VoidPtrTy);
1178*0a6a1f1dSLionel Sambuc     Args.add(RValue::get(EmitCastToVoidPtr(DesiredAddr)),
1179*0a6a1f1dSLionel Sambuc              getContext().VoidPtrTy);
1180*0a6a1f1dSLionel Sambuc     Args.add(RValue::get(llvm::ConstantInt::get(IntTy, Success)),
1181*0a6a1f1dSLionel Sambuc              getContext().IntTy);
1182*0a6a1f1dSLionel Sambuc     Args.add(RValue::get(llvm::ConstantInt::get(IntTy, Failure)),
1183*0a6a1f1dSLionel Sambuc              getContext().IntTy);
1184*0a6a1f1dSLionel Sambuc     auto SuccessFailureRVal = emitAtomicLibcall(
1185*0a6a1f1dSLionel Sambuc         *this, "__atomic_compare_exchange", getContext().BoolTy, Args);
1186*0a6a1f1dSLionel Sambuc     auto *PreviousVal =
1187*0a6a1f1dSLionel Sambuc         Builder.CreateAlignedLoad(ExpectedAddr, Alignment.getQuantity());
1188*0a6a1f1dSLionel Sambuc     return std::make_pair(RValue::get(PreviousVal), SuccessFailureRVal);
1189*0a6a1f1dSLionel Sambuc   }
1190*0a6a1f1dSLionel Sambuc 
1191*0a6a1f1dSLionel Sambuc   // If we've got a scalar value of the right size, try to avoid going
1192*0a6a1f1dSLionel Sambuc   // through memory.
1193*0a6a1f1dSLionel Sambuc   auto *ExpectedIntVal = Atomics.convertRValueToInt(Expected);
1194*0a6a1f1dSLionel Sambuc   auto *DesiredIntVal = Atomics.convertRValueToInt(Desired);
1195*0a6a1f1dSLionel Sambuc 
1196*0a6a1f1dSLionel Sambuc   // Do the atomic store.
1197*0a6a1f1dSLionel Sambuc   auto *Addr = Atomics.emitCastToAtomicIntPointer(Obj.getAddress());
1198*0a6a1f1dSLionel Sambuc   auto *Inst = Builder.CreateAtomicCmpXchg(Addr, ExpectedIntVal, DesiredIntVal,
1199*0a6a1f1dSLionel Sambuc                                           Success, Failure);
1200*0a6a1f1dSLionel Sambuc   // Other decoration.
1201*0a6a1f1dSLionel Sambuc   Inst->setVolatile(Obj.isVolatileQualified());
1202*0a6a1f1dSLionel Sambuc   Inst->setWeak(IsWeak);
1203*0a6a1f1dSLionel Sambuc 
1204*0a6a1f1dSLionel Sambuc   // Okay, turn that back into the original value type.
1205*0a6a1f1dSLionel Sambuc   auto *PreviousVal = Builder.CreateExtractValue(Inst, /*Idxs=*/0);
1206*0a6a1f1dSLionel Sambuc   auto *SuccessFailureVal = Builder.CreateExtractValue(Inst, /*Idxs=*/1);
1207*0a6a1f1dSLionel Sambuc   return std::make_pair(Atomics.convertIntToValue(PreviousVal, Slot, Loc),
1208*0a6a1f1dSLionel Sambuc                         RValue::get(SuccessFailureVal));
1209*0a6a1f1dSLionel Sambuc }
1210*0a6a1f1dSLionel Sambuc 
EmitAtomicInit(Expr * init,LValue dest)1211f4a2713aSLionel Sambuc void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
1212f4a2713aSLionel Sambuc   AtomicInfo atomics(*this, dest);
1213f4a2713aSLionel Sambuc 
1214f4a2713aSLionel Sambuc   switch (atomics.getEvaluationKind()) {
1215f4a2713aSLionel Sambuc   case TEK_Scalar: {
1216f4a2713aSLionel Sambuc     llvm::Value *value = EmitScalarExpr(init);
1217f4a2713aSLionel Sambuc     atomics.emitCopyIntoMemory(RValue::get(value), dest);
1218f4a2713aSLionel Sambuc     return;
1219f4a2713aSLionel Sambuc   }
1220f4a2713aSLionel Sambuc 
1221f4a2713aSLionel Sambuc   case TEK_Complex: {
1222f4a2713aSLionel Sambuc     ComplexPairTy value = EmitComplexExpr(init);
1223f4a2713aSLionel Sambuc     atomics.emitCopyIntoMemory(RValue::getComplex(value), dest);
1224f4a2713aSLionel Sambuc     return;
1225f4a2713aSLionel Sambuc   }
1226f4a2713aSLionel Sambuc 
1227f4a2713aSLionel Sambuc   case TEK_Aggregate: {
1228f4a2713aSLionel Sambuc     // Fix up the destination if the initializer isn't an expression
1229f4a2713aSLionel Sambuc     // of atomic type.
1230f4a2713aSLionel Sambuc     bool Zeroed = false;
1231f4a2713aSLionel Sambuc     if (!init->getType()->isAtomicType()) {
1232f4a2713aSLionel Sambuc       Zeroed = atomics.emitMemSetZeroIfNecessary(dest);
1233f4a2713aSLionel Sambuc       dest = atomics.projectValue(dest);
1234f4a2713aSLionel Sambuc     }
1235f4a2713aSLionel Sambuc 
1236f4a2713aSLionel Sambuc     // Evaluate the expression directly into the destination.
1237f4a2713aSLionel Sambuc     AggValueSlot slot = AggValueSlot::forLValue(dest,
1238f4a2713aSLionel Sambuc                                         AggValueSlot::IsNotDestructed,
1239f4a2713aSLionel Sambuc                                         AggValueSlot::DoesNotNeedGCBarriers,
1240f4a2713aSLionel Sambuc                                         AggValueSlot::IsNotAliased,
1241f4a2713aSLionel Sambuc                                         Zeroed ? AggValueSlot::IsZeroed :
1242f4a2713aSLionel Sambuc                                                  AggValueSlot::IsNotZeroed);
1243f4a2713aSLionel Sambuc 
1244f4a2713aSLionel Sambuc     EmitAggExpr(init, slot);
1245f4a2713aSLionel Sambuc     return;
1246f4a2713aSLionel Sambuc   }
1247f4a2713aSLionel Sambuc   }
1248f4a2713aSLionel Sambuc   llvm_unreachable("bad evaluation kind");
1249f4a2713aSLionel Sambuc }
1250