xref: /openbsd/gnu/llvm/clang/lib/CodeGen/CGAtomic.cpp (revision 12c85518)
1e5dd7070Spatrick //===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
2e5dd7070Spatrick //
3e5dd7070Spatrick // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4e5dd7070Spatrick // See https://llvm.org/LICENSE.txt for license information.
5e5dd7070Spatrick // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6e5dd7070Spatrick //
7e5dd7070Spatrick //===----------------------------------------------------------------------===//
8e5dd7070Spatrick //
9e5dd7070Spatrick // This file contains the code for emitting atomic operations.
10e5dd7070Spatrick //
11e5dd7070Spatrick //===----------------------------------------------------------------------===//
12e5dd7070Spatrick 
13e5dd7070Spatrick #include "CGCall.h"
14e5dd7070Spatrick #include "CGRecordLayout.h"
15e5dd7070Spatrick #include "CodeGenFunction.h"
16e5dd7070Spatrick #include "CodeGenModule.h"
17e5dd7070Spatrick #include "TargetInfo.h"
18e5dd7070Spatrick #include "clang/AST/ASTContext.h"
19e5dd7070Spatrick #include "clang/CodeGen/CGFunctionInfo.h"
20e5dd7070Spatrick #include "clang/Frontend/FrontendDiagnostic.h"
21e5dd7070Spatrick #include "llvm/ADT/DenseMap.h"
22e5dd7070Spatrick #include "llvm/IR/DataLayout.h"
23e5dd7070Spatrick #include "llvm/IR/Intrinsics.h"
24e5dd7070Spatrick #include "llvm/IR/Operator.h"
25e5dd7070Spatrick 
26e5dd7070Spatrick using namespace clang;
27e5dd7070Spatrick using namespace CodeGen;
28e5dd7070Spatrick 
29e5dd7070Spatrick namespace {
30e5dd7070Spatrick   class AtomicInfo {
31e5dd7070Spatrick     CodeGenFunction &CGF;
32e5dd7070Spatrick     QualType AtomicTy;
33e5dd7070Spatrick     QualType ValueTy;
34e5dd7070Spatrick     uint64_t AtomicSizeInBits;
35e5dd7070Spatrick     uint64_t ValueSizeInBits;
36e5dd7070Spatrick     CharUnits AtomicAlign;
37e5dd7070Spatrick     CharUnits ValueAlign;
38e5dd7070Spatrick     TypeEvaluationKind EvaluationKind;
39e5dd7070Spatrick     bool UseLibcall;
40e5dd7070Spatrick     LValue LVal;
41e5dd7070Spatrick     CGBitFieldInfo BFI;
42e5dd7070Spatrick   public:
AtomicInfo(CodeGenFunction & CGF,LValue & lvalue)43e5dd7070Spatrick     AtomicInfo(CodeGenFunction &CGF, LValue &lvalue)
44e5dd7070Spatrick         : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
45e5dd7070Spatrick           EvaluationKind(TEK_Scalar), UseLibcall(true) {
46e5dd7070Spatrick       assert(!lvalue.isGlobalReg());
47e5dd7070Spatrick       ASTContext &C = CGF.getContext();
48e5dd7070Spatrick       if (lvalue.isSimple()) {
49e5dd7070Spatrick         AtomicTy = lvalue.getType();
50e5dd7070Spatrick         if (auto *ATy = AtomicTy->getAs<AtomicType>())
51e5dd7070Spatrick           ValueTy = ATy->getValueType();
52e5dd7070Spatrick         else
53e5dd7070Spatrick           ValueTy = AtomicTy;
54e5dd7070Spatrick         EvaluationKind = CGF.getEvaluationKind(ValueTy);
55e5dd7070Spatrick 
56e5dd7070Spatrick         uint64_t ValueAlignInBits;
57e5dd7070Spatrick         uint64_t AtomicAlignInBits;
58e5dd7070Spatrick         TypeInfo ValueTI = C.getTypeInfo(ValueTy);
59e5dd7070Spatrick         ValueSizeInBits = ValueTI.Width;
60e5dd7070Spatrick         ValueAlignInBits = ValueTI.Align;
61e5dd7070Spatrick 
62e5dd7070Spatrick         TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
63e5dd7070Spatrick         AtomicSizeInBits = AtomicTI.Width;
64e5dd7070Spatrick         AtomicAlignInBits = AtomicTI.Align;
65e5dd7070Spatrick 
66e5dd7070Spatrick         assert(ValueSizeInBits <= AtomicSizeInBits);
67e5dd7070Spatrick         assert(ValueAlignInBits <= AtomicAlignInBits);
68e5dd7070Spatrick 
69e5dd7070Spatrick         AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
70e5dd7070Spatrick         ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
71e5dd7070Spatrick         if (lvalue.getAlignment().isZero())
72e5dd7070Spatrick           lvalue.setAlignment(AtomicAlign);
73e5dd7070Spatrick 
74e5dd7070Spatrick         LVal = lvalue;
75e5dd7070Spatrick       } else if (lvalue.isBitField()) {
76e5dd7070Spatrick         ValueTy = lvalue.getType();
77e5dd7070Spatrick         ValueSizeInBits = C.getTypeSize(ValueTy);
78e5dd7070Spatrick         auto &OrigBFI = lvalue.getBitFieldInfo();
79e5dd7070Spatrick         auto Offset = OrigBFI.Offset % C.toBits(lvalue.getAlignment());
80e5dd7070Spatrick         AtomicSizeInBits = C.toBits(
81e5dd7070Spatrick             C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1)
82e5dd7070Spatrick                 .alignTo(lvalue.getAlignment()));
83e5dd7070Spatrick         auto VoidPtrAddr = CGF.EmitCastToVoidPtr(lvalue.getBitFieldPointer());
84e5dd7070Spatrick         auto OffsetInChars =
85e5dd7070Spatrick             (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *
86e5dd7070Spatrick             lvalue.getAlignment();
87e5dd7070Spatrick         VoidPtrAddr = CGF.Builder.CreateConstGEP1_64(
88a9ac8606Spatrick             CGF.Int8Ty, VoidPtrAddr, OffsetInChars.getQuantity());
89*12c85518Srobert         llvm::Type *IntTy = CGF.Builder.getIntNTy(AtomicSizeInBits);
90e5dd7070Spatrick         auto Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
91*12c85518Srobert             VoidPtrAddr, IntTy->getPointerTo(), "atomic_bitfield_base");
92e5dd7070Spatrick         BFI = OrigBFI;
93e5dd7070Spatrick         BFI.Offset = Offset;
94e5dd7070Spatrick         BFI.StorageSize = AtomicSizeInBits;
95e5dd7070Spatrick         BFI.StorageOffset += OffsetInChars;
96*12c85518Srobert         LVal = LValue::MakeBitfield(Address(Addr, IntTy, lvalue.getAlignment()),
97e5dd7070Spatrick                                     BFI, lvalue.getType(), lvalue.getBaseInfo(),
98e5dd7070Spatrick                                     lvalue.getTBAAInfo());
99e5dd7070Spatrick         AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
100e5dd7070Spatrick         if (AtomicTy.isNull()) {
101e5dd7070Spatrick           llvm::APInt Size(
102e5dd7070Spatrick               /*numBits=*/32,
103e5dd7070Spatrick               C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
104e5dd7070Spatrick           AtomicTy =
105e5dd7070Spatrick               C.getConstantArrayType(C.CharTy, Size, nullptr, ArrayType::Normal,
106e5dd7070Spatrick                                      /*IndexTypeQuals=*/0);
107e5dd7070Spatrick         }
108e5dd7070Spatrick         AtomicAlign = ValueAlign = lvalue.getAlignment();
109e5dd7070Spatrick       } else if (lvalue.isVectorElt()) {
110e5dd7070Spatrick         ValueTy = lvalue.getType()->castAs<VectorType>()->getElementType();
111e5dd7070Spatrick         ValueSizeInBits = C.getTypeSize(ValueTy);
112e5dd7070Spatrick         AtomicTy = lvalue.getType();
113e5dd7070Spatrick         AtomicSizeInBits = C.getTypeSize(AtomicTy);
114e5dd7070Spatrick         AtomicAlign = ValueAlign = lvalue.getAlignment();
115e5dd7070Spatrick         LVal = lvalue;
116e5dd7070Spatrick       } else {
117e5dd7070Spatrick         assert(lvalue.isExtVectorElt());
118e5dd7070Spatrick         ValueTy = lvalue.getType();
119e5dd7070Spatrick         ValueSizeInBits = C.getTypeSize(ValueTy);
120e5dd7070Spatrick         AtomicTy = ValueTy = CGF.getContext().getExtVectorType(
121a9ac8606Spatrick             lvalue.getType(), cast<llvm::FixedVectorType>(
122ec727ea7Spatrick                                   lvalue.getExtVectorAddress().getElementType())
123ec727ea7Spatrick                                   ->getNumElements());
124e5dd7070Spatrick         AtomicSizeInBits = C.getTypeSize(AtomicTy);
125e5dd7070Spatrick         AtomicAlign = ValueAlign = lvalue.getAlignment();
126e5dd7070Spatrick         LVal = lvalue;
127e5dd7070Spatrick       }
128e5dd7070Spatrick       UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(
129e5dd7070Spatrick           AtomicSizeInBits, C.toBits(lvalue.getAlignment()));
130e5dd7070Spatrick     }
131e5dd7070Spatrick 
getAtomicType() const132e5dd7070Spatrick     QualType getAtomicType() const { return AtomicTy; }
getValueType() const133e5dd7070Spatrick     QualType getValueType() const { return ValueTy; }
getAtomicAlignment() const134e5dd7070Spatrick     CharUnits getAtomicAlignment() const { return AtomicAlign; }
getAtomicSizeInBits() const135e5dd7070Spatrick     uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
getValueSizeInBits() const136e5dd7070Spatrick     uint64_t getValueSizeInBits() const { return ValueSizeInBits; }
getEvaluationKind() const137e5dd7070Spatrick     TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
shouldUseLibcall() const138e5dd7070Spatrick     bool shouldUseLibcall() const { return UseLibcall; }
getAtomicLValue() const139e5dd7070Spatrick     const LValue &getAtomicLValue() const { return LVal; }
getAtomicPointer() const140e5dd7070Spatrick     llvm::Value *getAtomicPointer() const {
141e5dd7070Spatrick       if (LVal.isSimple())
142e5dd7070Spatrick         return LVal.getPointer(CGF);
143e5dd7070Spatrick       else if (LVal.isBitField())
144e5dd7070Spatrick         return LVal.getBitFieldPointer();
145e5dd7070Spatrick       else if (LVal.isVectorElt())
146e5dd7070Spatrick         return LVal.getVectorPointer();
147e5dd7070Spatrick       assert(LVal.isExtVectorElt());
148e5dd7070Spatrick       return LVal.getExtVectorPointer();
149e5dd7070Spatrick     }
getAtomicAddress() const150e5dd7070Spatrick     Address getAtomicAddress() const {
151*12c85518Srobert       llvm::Type *ElTy;
152*12c85518Srobert       if (LVal.isSimple())
153*12c85518Srobert         ElTy = LVal.getAddress(CGF).getElementType();
154*12c85518Srobert       else if (LVal.isBitField())
155*12c85518Srobert         ElTy = LVal.getBitFieldAddress().getElementType();
156*12c85518Srobert       else if (LVal.isVectorElt())
157*12c85518Srobert         ElTy = LVal.getVectorAddress().getElementType();
158*12c85518Srobert       else
159*12c85518Srobert         ElTy = LVal.getExtVectorAddress().getElementType();
160*12c85518Srobert       return Address(getAtomicPointer(), ElTy, getAtomicAlignment());
161e5dd7070Spatrick     }
162e5dd7070Spatrick 
getAtomicAddressAsAtomicIntPointer() const163e5dd7070Spatrick     Address getAtomicAddressAsAtomicIntPointer() const {
164e5dd7070Spatrick       return emitCastToAtomicIntPointer(getAtomicAddress());
165e5dd7070Spatrick     }
166e5dd7070Spatrick 
167e5dd7070Spatrick     /// Is the atomic size larger than the underlying value type?
168e5dd7070Spatrick     ///
169e5dd7070Spatrick     /// Note that the absence of padding does not mean that atomic
170e5dd7070Spatrick     /// objects are completely interchangeable with non-atomic
171e5dd7070Spatrick     /// objects: we might have promoted the alignment of a type
172e5dd7070Spatrick     /// without making it bigger.
hasPadding() const173e5dd7070Spatrick     bool hasPadding() const {
174e5dd7070Spatrick       return (ValueSizeInBits != AtomicSizeInBits);
175e5dd7070Spatrick     }
176e5dd7070Spatrick 
177e5dd7070Spatrick     bool emitMemSetZeroIfNecessary() const;
178e5dd7070Spatrick 
getAtomicSizeValue() const179e5dd7070Spatrick     llvm::Value *getAtomicSizeValue() const {
180e5dd7070Spatrick       CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
181e5dd7070Spatrick       return CGF.CGM.getSize(size);
182e5dd7070Spatrick     }
183e5dd7070Spatrick 
184e5dd7070Spatrick     /// Cast the given pointer to an integer pointer suitable for atomic
185e5dd7070Spatrick     /// operations if the source.
186e5dd7070Spatrick     Address emitCastToAtomicIntPointer(Address Addr) const;
187e5dd7070Spatrick 
188e5dd7070Spatrick     /// If Addr is compatible with the iN that will be used for an atomic
189e5dd7070Spatrick     /// operation, bitcast it. Otherwise, create a temporary that is suitable
190e5dd7070Spatrick     /// and copy the value across.
191e5dd7070Spatrick     Address convertToAtomicIntPointer(Address Addr) const;
192e5dd7070Spatrick 
193e5dd7070Spatrick     /// Turn an atomic-layout object into an r-value.
194e5dd7070Spatrick     RValue convertAtomicTempToRValue(Address addr, AggValueSlot resultSlot,
195e5dd7070Spatrick                                      SourceLocation loc, bool AsValue) const;
196e5dd7070Spatrick 
197e5dd7070Spatrick     /// Converts a rvalue to integer value.
198e5dd7070Spatrick     llvm::Value *convertRValueToInt(RValue RVal) const;
199e5dd7070Spatrick 
200e5dd7070Spatrick     RValue ConvertIntToValueOrAtomic(llvm::Value *IntVal,
201e5dd7070Spatrick                                      AggValueSlot ResultSlot,
202e5dd7070Spatrick                                      SourceLocation Loc, bool AsValue) const;
203e5dd7070Spatrick 
204e5dd7070Spatrick     /// Copy an atomic r-value into atomic-layout memory.
205e5dd7070Spatrick     void emitCopyIntoMemory(RValue rvalue) const;
206e5dd7070Spatrick 
207e5dd7070Spatrick     /// Project an l-value down to the value field.
projectValue() const208e5dd7070Spatrick     LValue projectValue() const {
209e5dd7070Spatrick       assert(LVal.isSimple());
210e5dd7070Spatrick       Address addr = getAtomicAddress();
211e5dd7070Spatrick       if (hasPadding())
212e5dd7070Spatrick         addr = CGF.Builder.CreateStructGEP(addr, 0);
213e5dd7070Spatrick 
214e5dd7070Spatrick       return LValue::MakeAddr(addr, getValueType(), CGF.getContext(),
215e5dd7070Spatrick                               LVal.getBaseInfo(), LVal.getTBAAInfo());
216e5dd7070Spatrick     }
217e5dd7070Spatrick 
218e5dd7070Spatrick     /// Emits atomic load.
219e5dd7070Spatrick     /// \returns Loaded value.
220e5dd7070Spatrick     RValue EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
221e5dd7070Spatrick                           bool AsValue, llvm::AtomicOrdering AO,
222e5dd7070Spatrick                           bool IsVolatile);
223e5dd7070Spatrick 
224e5dd7070Spatrick     /// Emits atomic compare-and-exchange sequence.
225e5dd7070Spatrick     /// \param Expected Expected value.
226e5dd7070Spatrick     /// \param Desired Desired value.
227e5dd7070Spatrick     /// \param Success Atomic ordering for success operation.
228e5dd7070Spatrick     /// \param Failure Atomic ordering for failed operation.
229e5dd7070Spatrick     /// \param IsWeak true if atomic operation is weak, false otherwise.
230e5dd7070Spatrick     /// \returns Pair of values: previous value from storage (value type) and
231e5dd7070Spatrick     /// boolean flag (i1 type) with true if success and false otherwise.
232e5dd7070Spatrick     std::pair<RValue, llvm::Value *>
233e5dd7070Spatrick     EmitAtomicCompareExchange(RValue Expected, RValue Desired,
234e5dd7070Spatrick                               llvm::AtomicOrdering Success =
235e5dd7070Spatrick                                   llvm::AtomicOrdering::SequentiallyConsistent,
236e5dd7070Spatrick                               llvm::AtomicOrdering Failure =
237e5dd7070Spatrick                                   llvm::AtomicOrdering::SequentiallyConsistent,
238e5dd7070Spatrick                               bool IsWeak = false);
239e5dd7070Spatrick 
240e5dd7070Spatrick     /// Emits atomic update.
241e5dd7070Spatrick     /// \param AO Atomic ordering.
242e5dd7070Spatrick     /// \param UpdateOp Update operation for the current lvalue.
243e5dd7070Spatrick     void EmitAtomicUpdate(llvm::AtomicOrdering AO,
244e5dd7070Spatrick                           const llvm::function_ref<RValue(RValue)> &UpdateOp,
245e5dd7070Spatrick                           bool IsVolatile);
246e5dd7070Spatrick     /// Emits atomic update.
247e5dd7070Spatrick     /// \param AO Atomic ordering.
248e5dd7070Spatrick     void EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
249e5dd7070Spatrick                           bool IsVolatile);
250e5dd7070Spatrick 
251e5dd7070Spatrick     /// Materialize an atomic r-value in atomic-layout memory.
252e5dd7070Spatrick     Address materializeRValue(RValue rvalue) const;
253e5dd7070Spatrick 
254e5dd7070Spatrick     /// Creates temp alloca for intermediate operations on atomic value.
255e5dd7070Spatrick     Address CreateTempAlloca() const;
256e5dd7070Spatrick   private:
257e5dd7070Spatrick     bool requiresMemSetZero(llvm::Type *type) const;
258e5dd7070Spatrick 
259e5dd7070Spatrick 
260e5dd7070Spatrick     /// Emits atomic load as a libcall.
261e5dd7070Spatrick     void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
262e5dd7070Spatrick                                llvm::AtomicOrdering AO, bool IsVolatile);
263e5dd7070Spatrick     /// Emits atomic load as LLVM instruction.
264e5dd7070Spatrick     llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO, bool IsVolatile);
265e5dd7070Spatrick     /// Emits atomic compare-and-exchange op as a libcall.
266e5dd7070Spatrick     llvm::Value *EmitAtomicCompareExchangeLibcall(
267e5dd7070Spatrick         llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr,
268e5dd7070Spatrick         llvm::AtomicOrdering Success =
269e5dd7070Spatrick             llvm::AtomicOrdering::SequentiallyConsistent,
270e5dd7070Spatrick         llvm::AtomicOrdering Failure =
271e5dd7070Spatrick             llvm::AtomicOrdering::SequentiallyConsistent);
272e5dd7070Spatrick     /// Emits atomic compare-and-exchange op as LLVM instruction.
273e5dd7070Spatrick     std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
274e5dd7070Spatrick         llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
275e5dd7070Spatrick         llvm::AtomicOrdering Success =
276e5dd7070Spatrick             llvm::AtomicOrdering::SequentiallyConsistent,
277e5dd7070Spatrick         llvm::AtomicOrdering Failure =
278e5dd7070Spatrick             llvm::AtomicOrdering::SequentiallyConsistent,
279e5dd7070Spatrick         bool IsWeak = false);
280e5dd7070Spatrick     /// Emit atomic update as libcalls.
281e5dd7070Spatrick     void
282e5dd7070Spatrick     EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
283e5dd7070Spatrick                             const llvm::function_ref<RValue(RValue)> &UpdateOp,
284e5dd7070Spatrick                             bool IsVolatile);
285e5dd7070Spatrick     /// Emit atomic update as LLVM instructions.
286e5dd7070Spatrick     void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
287e5dd7070Spatrick                             const llvm::function_ref<RValue(RValue)> &UpdateOp,
288e5dd7070Spatrick                             bool IsVolatile);
289e5dd7070Spatrick     /// Emit atomic update as libcalls.
290e5dd7070Spatrick     void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, RValue UpdateRVal,
291e5dd7070Spatrick                                  bool IsVolatile);
292e5dd7070Spatrick     /// Emit atomic update as LLVM instructions.
293e5dd7070Spatrick     void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRal,
294e5dd7070Spatrick                             bool IsVolatile);
295e5dd7070Spatrick   };
296e5dd7070Spatrick }
297e5dd7070Spatrick 
CreateTempAlloca() const298e5dd7070Spatrick Address AtomicInfo::CreateTempAlloca() const {
299e5dd7070Spatrick   Address TempAlloca = CGF.CreateMemTemp(
300e5dd7070Spatrick       (LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy
301e5dd7070Spatrick                                                                 : AtomicTy,
302e5dd7070Spatrick       getAtomicAlignment(),
303e5dd7070Spatrick       "atomic-temp");
304e5dd7070Spatrick   // Cast to pointer to value type for bitfields.
305e5dd7070Spatrick   if (LVal.isBitField())
306e5dd7070Spatrick     return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
307*12c85518Srobert         TempAlloca, getAtomicAddress().getType(),
308*12c85518Srobert         getAtomicAddress().getElementType());
309e5dd7070Spatrick   return TempAlloca;
310e5dd7070Spatrick }
311e5dd7070Spatrick 
emitAtomicLibcall(CodeGenFunction & CGF,StringRef fnName,QualType resultType,CallArgList & args)312e5dd7070Spatrick static RValue emitAtomicLibcall(CodeGenFunction &CGF,
313e5dd7070Spatrick                                 StringRef fnName,
314e5dd7070Spatrick                                 QualType resultType,
315e5dd7070Spatrick                                 CallArgList &args) {
316e5dd7070Spatrick   const CGFunctionInfo &fnInfo =
317e5dd7070Spatrick     CGF.CGM.getTypes().arrangeBuiltinFunctionCall(resultType, args);
318e5dd7070Spatrick   llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
319*12c85518Srobert   llvm::AttrBuilder fnAttrB(CGF.getLLVMContext());
320a9ac8606Spatrick   fnAttrB.addAttribute(llvm::Attribute::NoUnwind);
321a9ac8606Spatrick   fnAttrB.addAttribute(llvm::Attribute::WillReturn);
322a9ac8606Spatrick   llvm::AttributeList fnAttrs = llvm::AttributeList::get(
323a9ac8606Spatrick       CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, fnAttrB);
324a9ac8606Spatrick 
325a9ac8606Spatrick   llvm::FunctionCallee fn =
326a9ac8606Spatrick       CGF.CGM.CreateRuntimeFunction(fnTy, fnName, fnAttrs);
327e5dd7070Spatrick   auto callee = CGCallee::forDirect(fn);
328e5dd7070Spatrick   return CGF.EmitCall(fnInfo, callee, ReturnValueSlot(), args);
329e5dd7070Spatrick }
330e5dd7070Spatrick 
331e5dd7070Spatrick /// Does a store of the given IR type modify the full expected width?
isFullSizeType(CodeGenModule & CGM,llvm::Type * type,uint64_t expectedSize)332e5dd7070Spatrick static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
333e5dd7070Spatrick                            uint64_t expectedSize) {
334e5dd7070Spatrick   return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
335e5dd7070Spatrick }
336e5dd7070Spatrick 
337e5dd7070Spatrick /// Does the atomic type require memsetting to zero before initialization?
338e5dd7070Spatrick ///
339e5dd7070Spatrick /// The IR type is provided as a way of making certain queries faster.
requiresMemSetZero(llvm::Type * type) const340e5dd7070Spatrick bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
341e5dd7070Spatrick   // If the atomic type has size padding, we definitely need a memset.
342e5dd7070Spatrick   if (hasPadding()) return true;
343e5dd7070Spatrick 
344e5dd7070Spatrick   // Otherwise, do some simple heuristics to try to avoid it:
345e5dd7070Spatrick   switch (getEvaluationKind()) {
346e5dd7070Spatrick   // For scalars and complexes, check whether the store size of the
347e5dd7070Spatrick   // type uses the full size.
348e5dd7070Spatrick   case TEK_Scalar:
349e5dd7070Spatrick     return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
350e5dd7070Spatrick   case TEK_Complex:
351e5dd7070Spatrick     return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
352e5dd7070Spatrick                            AtomicSizeInBits / 2);
353e5dd7070Spatrick 
354e5dd7070Spatrick   // Padding in structs has an undefined bit pattern.  User beware.
355e5dd7070Spatrick   case TEK_Aggregate:
356e5dd7070Spatrick     return false;
357e5dd7070Spatrick   }
358e5dd7070Spatrick   llvm_unreachable("bad evaluation kind");
359e5dd7070Spatrick }
360e5dd7070Spatrick 
emitMemSetZeroIfNecessary() const361e5dd7070Spatrick bool AtomicInfo::emitMemSetZeroIfNecessary() const {
362e5dd7070Spatrick   assert(LVal.isSimple());
363*12c85518Srobert   Address addr = LVal.getAddress(CGF);
364*12c85518Srobert   if (!requiresMemSetZero(addr.getElementType()))
365e5dd7070Spatrick     return false;
366e5dd7070Spatrick 
367e5dd7070Spatrick   CGF.Builder.CreateMemSet(
368*12c85518Srobert       addr.getPointer(), llvm::ConstantInt::get(CGF.Int8Ty, 0),
369e5dd7070Spatrick       CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(),
370e5dd7070Spatrick       LVal.getAlignment().getAsAlign());
371e5dd7070Spatrick   return true;
372e5dd7070Spatrick }
373e5dd7070Spatrick 
emitAtomicCmpXchg(CodeGenFunction & CGF,AtomicExpr * E,bool IsWeak,Address Dest,Address Ptr,Address Val1,Address Val2,uint64_t Size,llvm::AtomicOrdering SuccessOrder,llvm::AtomicOrdering FailureOrder,llvm::SyncScope::ID Scope)374e5dd7070Spatrick static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
375e5dd7070Spatrick                               Address Dest, Address Ptr,
376e5dd7070Spatrick                               Address Val1, Address Val2,
377e5dd7070Spatrick                               uint64_t Size,
378e5dd7070Spatrick                               llvm::AtomicOrdering SuccessOrder,
379e5dd7070Spatrick                               llvm::AtomicOrdering FailureOrder,
380e5dd7070Spatrick                               llvm::SyncScope::ID Scope) {
381e5dd7070Spatrick   // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
382e5dd7070Spatrick   llvm::Value *Expected = CGF.Builder.CreateLoad(Val1);
383e5dd7070Spatrick   llvm::Value *Desired = CGF.Builder.CreateLoad(Val2);
384e5dd7070Spatrick 
385e5dd7070Spatrick   llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
386e5dd7070Spatrick       Ptr.getPointer(), Expected, Desired, SuccessOrder, FailureOrder,
387e5dd7070Spatrick       Scope);
388e5dd7070Spatrick   Pair->setVolatile(E->isVolatile());
389e5dd7070Spatrick   Pair->setWeak(IsWeak);
390e5dd7070Spatrick 
391e5dd7070Spatrick   // Cmp holds the result of the compare-exchange operation: true on success,
392e5dd7070Spatrick   // false on failure.
393e5dd7070Spatrick   llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0);
394e5dd7070Spatrick   llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1);
395e5dd7070Spatrick 
396e5dd7070Spatrick   // This basic block is used to hold the store instruction if the operation
397e5dd7070Spatrick   // failed.
398e5dd7070Spatrick   llvm::BasicBlock *StoreExpectedBB =
399e5dd7070Spatrick       CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
400e5dd7070Spatrick 
401e5dd7070Spatrick   // This basic block is the exit point of the operation, we should end up
402e5dd7070Spatrick   // here regardless of whether or not the operation succeeded.
403e5dd7070Spatrick   llvm::BasicBlock *ContinueBB =
404e5dd7070Spatrick       CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
405e5dd7070Spatrick 
406e5dd7070Spatrick   // Update Expected if Expected isn't equal to Old, otherwise branch to the
407e5dd7070Spatrick   // exit point.
408e5dd7070Spatrick   CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
409e5dd7070Spatrick 
410e5dd7070Spatrick   CGF.Builder.SetInsertPoint(StoreExpectedBB);
411e5dd7070Spatrick   // Update the memory at Expected with Old's value.
412e5dd7070Spatrick   CGF.Builder.CreateStore(Old, Val1);
413e5dd7070Spatrick   // Finally, branch to the exit point.
414e5dd7070Spatrick   CGF.Builder.CreateBr(ContinueBB);
415e5dd7070Spatrick 
416e5dd7070Spatrick   CGF.Builder.SetInsertPoint(ContinueBB);
417e5dd7070Spatrick   // Update the memory at Dest with Cmp's value.
418e5dd7070Spatrick   CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
419e5dd7070Spatrick }
420e5dd7070Spatrick 
421e5dd7070Spatrick /// Given an ordering required on success, emit all possible cmpxchg
422e5dd7070Spatrick /// instructions to cope with the provided (but possibly only dynamically known)
423e5dd7070Spatrick /// FailureOrder.
emitAtomicCmpXchgFailureSet(CodeGenFunction & CGF,AtomicExpr * E,bool IsWeak,Address Dest,Address Ptr,Address Val1,Address Val2,llvm::Value * FailureOrderVal,uint64_t Size,llvm::AtomicOrdering SuccessOrder,llvm::SyncScope::ID Scope)424e5dd7070Spatrick static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
425e5dd7070Spatrick                                         bool IsWeak, Address Dest, Address Ptr,
426e5dd7070Spatrick                                         Address Val1, Address Val2,
427e5dd7070Spatrick                                         llvm::Value *FailureOrderVal,
428e5dd7070Spatrick                                         uint64_t Size,
429e5dd7070Spatrick                                         llvm::AtomicOrdering SuccessOrder,
430e5dd7070Spatrick                                         llvm::SyncScope::ID Scope) {
431e5dd7070Spatrick   llvm::AtomicOrdering FailureOrder;
432e5dd7070Spatrick   if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
433e5dd7070Spatrick     auto FOS = FO->getSExtValue();
434e5dd7070Spatrick     if (!llvm::isValidAtomicOrderingCABI(FOS))
435e5dd7070Spatrick       FailureOrder = llvm::AtomicOrdering::Monotonic;
436e5dd7070Spatrick     else
437e5dd7070Spatrick       switch ((llvm::AtomicOrderingCABI)FOS) {
438e5dd7070Spatrick       case llvm::AtomicOrderingCABI::relaxed:
439a9ac8606Spatrick       // 31.7.2.18: "The failure argument shall not be memory_order_release
440a9ac8606Spatrick       // nor memory_order_acq_rel". Fallback to monotonic.
441e5dd7070Spatrick       case llvm::AtomicOrderingCABI::release:
442e5dd7070Spatrick       case llvm::AtomicOrderingCABI::acq_rel:
443e5dd7070Spatrick         FailureOrder = llvm::AtomicOrdering::Monotonic;
444e5dd7070Spatrick         break;
445e5dd7070Spatrick       case llvm::AtomicOrderingCABI::consume:
446e5dd7070Spatrick       case llvm::AtomicOrderingCABI::acquire:
447e5dd7070Spatrick         FailureOrder = llvm::AtomicOrdering::Acquire;
448e5dd7070Spatrick         break;
449e5dd7070Spatrick       case llvm::AtomicOrderingCABI::seq_cst:
450e5dd7070Spatrick         FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent;
451e5dd7070Spatrick         break;
452e5dd7070Spatrick       }
453a9ac8606Spatrick     // Prior to c++17, "the failure argument shall be no stronger than the
454a9ac8606Spatrick     // success argument". This condition has been lifted and the only
455a9ac8606Spatrick     // precondition is 31.7.2.18. Effectively treat this as a DR and skip
456a9ac8606Spatrick     // language version checks.
457e5dd7070Spatrick     emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
458e5dd7070Spatrick                       FailureOrder, Scope);
459e5dd7070Spatrick     return;
460e5dd7070Spatrick   }
461e5dd7070Spatrick 
462e5dd7070Spatrick   // Create all the relevant BB's
463a9ac8606Spatrick   auto *MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
464a9ac8606Spatrick   auto *AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
465a9ac8606Spatrick   auto *SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
466a9ac8606Spatrick   auto *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
467e5dd7070Spatrick 
468e5dd7070Spatrick   // MonotonicBB is arbitrarily chosen as the default case; in practice, this
469e5dd7070Spatrick   // doesn't matter unless someone is crazy enough to use something that
470e5dd7070Spatrick   // doesn't fold to a constant for the ordering.
471a9ac8606Spatrick   llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
472a9ac8606Spatrick   // Implemented as acquire, since it's the closest in LLVM.
473a9ac8606Spatrick   SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
474a9ac8606Spatrick               AcquireBB);
475a9ac8606Spatrick   SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
476a9ac8606Spatrick               AcquireBB);
477a9ac8606Spatrick   SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
478a9ac8606Spatrick               SeqCstBB);
479a9ac8606Spatrick 
480a9ac8606Spatrick   // Emit all the different atomics
481e5dd7070Spatrick   CGF.Builder.SetInsertPoint(MonotonicBB);
482e5dd7070Spatrick   emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
483e5dd7070Spatrick                     Size, SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope);
484e5dd7070Spatrick   CGF.Builder.CreateBr(ContBB);
485e5dd7070Spatrick 
486e5dd7070Spatrick   CGF.Builder.SetInsertPoint(AcquireBB);
487a9ac8606Spatrick   emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
488a9ac8606Spatrick                     llvm::AtomicOrdering::Acquire, Scope);
489e5dd7070Spatrick   CGF.Builder.CreateBr(ContBB);
490a9ac8606Spatrick 
491e5dd7070Spatrick   CGF.Builder.SetInsertPoint(SeqCstBB);
492e5dd7070Spatrick   emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
493e5dd7070Spatrick                     llvm::AtomicOrdering::SequentiallyConsistent, Scope);
494e5dd7070Spatrick   CGF.Builder.CreateBr(ContBB);
495e5dd7070Spatrick 
496e5dd7070Spatrick   CGF.Builder.SetInsertPoint(ContBB);
497e5dd7070Spatrick }
498e5dd7070Spatrick 
499e5dd7070Spatrick /// Duplicate the atomic min/max operation in conventional IR for the builtin
500e5dd7070Spatrick /// variants that return the new rather than the original value.
EmitPostAtomicMinMax(CGBuilderTy & Builder,AtomicExpr::AtomicOp Op,bool IsSigned,llvm::Value * OldVal,llvm::Value * RHS)501e5dd7070Spatrick static llvm::Value *EmitPostAtomicMinMax(CGBuilderTy &Builder,
502e5dd7070Spatrick                                          AtomicExpr::AtomicOp Op,
503e5dd7070Spatrick                                          bool IsSigned,
504e5dd7070Spatrick                                          llvm::Value *OldVal,
505e5dd7070Spatrick                                          llvm::Value *RHS) {
506e5dd7070Spatrick   llvm::CmpInst::Predicate Pred;
507e5dd7070Spatrick   switch (Op) {
508e5dd7070Spatrick   default:
509e5dd7070Spatrick     llvm_unreachable("Unexpected min/max operation");
510e5dd7070Spatrick   case AtomicExpr::AO__atomic_max_fetch:
511e5dd7070Spatrick     Pred = IsSigned ? llvm::CmpInst::ICMP_SGT : llvm::CmpInst::ICMP_UGT;
512e5dd7070Spatrick     break;
513e5dd7070Spatrick   case AtomicExpr::AO__atomic_min_fetch:
514e5dd7070Spatrick     Pred = IsSigned ? llvm::CmpInst::ICMP_SLT : llvm::CmpInst::ICMP_ULT;
515e5dd7070Spatrick     break;
516e5dd7070Spatrick   }
517e5dd7070Spatrick   llvm::Value *Cmp = Builder.CreateICmp(Pred, OldVal, RHS, "tst");
518e5dd7070Spatrick   return Builder.CreateSelect(Cmp, OldVal, RHS, "newval");
519e5dd7070Spatrick }
520e5dd7070Spatrick 
EmitAtomicOp(CodeGenFunction & CGF,AtomicExpr * E,Address Dest,Address Ptr,Address Val1,Address Val2,llvm::Value * IsWeak,llvm::Value * FailureOrder,uint64_t Size,llvm::AtomicOrdering Order,llvm::SyncScope::ID Scope)521e5dd7070Spatrick static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
522e5dd7070Spatrick                          Address Ptr, Address Val1, Address Val2,
523e5dd7070Spatrick                          llvm::Value *IsWeak, llvm::Value *FailureOrder,
524e5dd7070Spatrick                          uint64_t Size, llvm::AtomicOrdering Order,
525e5dd7070Spatrick                          llvm::SyncScope::ID Scope) {
526e5dd7070Spatrick   llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
527e5dd7070Spatrick   bool PostOpMinMax = false;
528e5dd7070Spatrick   unsigned PostOp = 0;
529e5dd7070Spatrick 
530e5dd7070Spatrick   switch (E->getOp()) {
531e5dd7070Spatrick   case AtomicExpr::AO__c11_atomic_init:
532e5dd7070Spatrick   case AtomicExpr::AO__opencl_atomic_init:
533e5dd7070Spatrick     llvm_unreachable("Already handled!");
534e5dd7070Spatrick 
535e5dd7070Spatrick   case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
536*12c85518Srobert   case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
537e5dd7070Spatrick   case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
538e5dd7070Spatrick     emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
539e5dd7070Spatrick                                 FailureOrder, Size, Order, Scope);
540e5dd7070Spatrick     return;
541e5dd7070Spatrick   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
542e5dd7070Spatrick   case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
543*12c85518Srobert   case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
544e5dd7070Spatrick     emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
545e5dd7070Spatrick                                 FailureOrder, Size, Order, Scope);
546e5dd7070Spatrick     return;
547e5dd7070Spatrick   case AtomicExpr::AO__atomic_compare_exchange:
548e5dd7070Spatrick   case AtomicExpr::AO__atomic_compare_exchange_n: {
549e5dd7070Spatrick     if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
550e5dd7070Spatrick       emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
551e5dd7070Spatrick                                   Val1, Val2, FailureOrder, Size, Order, Scope);
552e5dd7070Spatrick     } else {
553e5dd7070Spatrick       // Create all the relevant BB's
554e5dd7070Spatrick       llvm::BasicBlock *StrongBB =
555e5dd7070Spatrick           CGF.createBasicBlock("cmpxchg.strong", CGF.CurFn);
556e5dd7070Spatrick       llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak", CGF.CurFn);
557e5dd7070Spatrick       llvm::BasicBlock *ContBB =
558e5dd7070Spatrick           CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
559e5dd7070Spatrick 
560e5dd7070Spatrick       llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB);
561e5dd7070Spatrick       SI->addCase(CGF.Builder.getInt1(false), StrongBB);
562e5dd7070Spatrick 
563e5dd7070Spatrick       CGF.Builder.SetInsertPoint(StrongBB);
564e5dd7070Spatrick       emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
565e5dd7070Spatrick                                   FailureOrder, Size, Order, Scope);
566e5dd7070Spatrick       CGF.Builder.CreateBr(ContBB);
567e5dd7070Spatrick 
568e5dd7070Spatrick       CGF.Builder.SetInsertPoint(WeakBB);
569e5dd7070Spatrick       emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
570e5dd7070Spatrick                                   FailureOrder, Size, Order, Scope);
571e5dd7070Spatrick       CGF.Builder.CreateBr(ContBB);
572e5dd7070Spatrick 
573e5dd7070Spatrick       CGF.Builder.SetInsertPoint(ContBB);
574e5dd7070Spatrick     }
575e5dd7070Spatrick     return;
576e5dd7070Spatrick   }
577e5dd7070Spatrick   case AtomicExpr::AO__c11_atomic_load:
578e5dd7070Spatrick   case AtomicExpr::AO__opencl_atomic_load:
579*12c85518Srobert   case AtomicExpr::AO__hip_atomic_load:
580e5dd7070Spatrick   case AtomicExpr::AO__atomic_load_n:
581e5dd7070Spatrick   case AtomicExpr::AO__atomic_load: {
582e5dd7070Spatrick     llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
583e5dd7070Spatrick     Load->setAtomic(Order, Scope);
584e5dd7070Spatrick     Load->setVolatile(E->isVolatile());
585e5dd7070Spatrick     CGF.Builder.CreateStore(Load, Dest);
586e5dd7070Spatrick     return;
587e5dd7070Spatrick   }
588e5dd7070Spatrick 
589e5dd7070Spatrick   case AtomicExpr::AO__c11_atomic_store:
590e5dd7070Spatrick   case AtomicExpr::AO__opencl_atomic_store:
591*12c85518Srobert   case AtomicExpr::AO__hip_atomic_store:
592e5dd7070Spatrick   case AtomicExpr::AO__atomic_store:
593e5dd7070Spatrick   case AtomicExpr::AO__atomic_store_n: {
594e5dd7070Spatrick     llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
595e5dd7070Spatrick     llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
596e5dd7070Spatrick     Store->setAtomic(Order, Scope);
597e5dd7070Spatrick     Store->setVolatile(E->isVolatile());
598e5dd7070Spatrick     return;
599e5dd7070Spatrick   }
600e5dd7070Spatrick 
601e5dd7070Spatrick   case AtomicExpr::AO__c11_atomic_exchange:
602*12c85518Srobert   case AtomicExpr::AO__hip_atomic_exchange:
603e5dd7070Spatrick   case AtomicExpr::AO__opencl_atomic_exchange:
604e5dd7070Spatrick   case AtomicExpr::AO__atomic_exchange_n:
605e5dd7070Spatrick   case AtomicExpr::AO__atomic_exchange:
606e5dd7070Spatrick     Op = llvm::AtomicRMWInst::Xchg;
607e5dd7070Spatrick     break;
608e5dd7070Spatrick 
609e5dd7070Spatrick   case AtomicExpr::AO__atomic_add_fetch:
610a9ac8606Spatrick     PostOp = E->getValueType()->isFloatingType() ? llvm::Instruction::FAdd
611a9ac8606Spatrick                                                  : llvm::Instruction::Add;
612*12c85518Srobert     [[fallthrough]];
613e5dd7070Spatrick   case AtomicExpr::AO__c11_atomic_fetch_add:
614*12c85518Srobert   case AtomicExpr::AO__hip_atomic_fetch_add:
615e5dd7070Spatrick   case AtomicExpr::AO__opencl_atomic_fetch_add:
616e5dd7070Spatrick   case AtomicExpr::AO__atomic_fetch_add:
617a9ac8606Spatrick     Op = E->getValueType()->isFloatingType() ? llvm::AtomicRMWInst::FAdd
618a9ac8606Spatrick                                              : llvm::AtomicRMWInst::Add;
619e5dd7070Spatrick     break;
620e5dd7070Spatrick 
621e5dd7070Spatrick   case AtomicExpr::AO__atomic_sub_fetch:
622a9ac8606Spatrick     PostOp = E->getValueType()->isFloatingType() ? llvm::Instruction::FSub
623a9ac8606Spatrick                                                  : llvm::Instruction::Sub;
624*12c85518Srobert     [[fallthrough]];
625e5dd7070Spatrick   case AtomicExpr::AO__c11_atomic_fetch_sub:
626e5dd7070Spatrick   case AtomicExpr::AO__opencl_atomic_fetch_sub:
627e5dd7070Spatrick   case AtomicExpr::AO__atomic_fetch_sub:
628a9ac8606Spatrick     Op = E->getValueType()->isFloatingType() ? llvm::AtomicRMWInst::FSub
629a9ac8606Spatrick                                              : llvm::AtomicRMWInst::Sub;
630e5dd7070Spatrick     break;
631e5dd7070Spatrick 
632e5dd7070Spatrick   case AtomicExpr::AO__atomic_min_fetch:
633e5dd7070Spatrick     PostOpMinMax = true;
634*12c85518Srobert     [[fallthrough]];
635e5dd7070Spatrick   case AtomicExpr::AO__c11_atomic_fetch_min:
636*12c85518Srobert   case AtomicExpr::AO__hip_atomic_fetch_min:
637e5dd7070Spatrick   case AtomicExpr::AO__opencl_atomic_fetch_min:
638e5dd7070Spatrick   case AtomicExpr::AO__atomic_fetch_min:
639e5dd7070Spatrick     Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Min
640e5dd7070Spatrick                                                   : llvm::AtomicRMWInst::UMin;
641e5dd7070Spatrick     break;
642e5dd7070Spatrick 
643e5dd7070Spatrick   case AtomicExpr::AO__atomic_max_fetch:
644e5dd7070Spatrick     PostOpMinMax = true;
645*12c85518Srobert     [[fallthrough]];
646e5dd7070Spatrick   case AtomicExpr::AO__c11_atomic_fetch_max:
647*12c85518Srobert   case AtomicExpr::AO__hip_atomic_fetch_max:
648e5dd7070Spatrick   case AtomicExpr::AO__opencl_atomic_fetch_max:
649e5dd7070Spatrick   case AtomicExpr::AO__atomic_fetch_max:
650e5dd7070Spatrick     Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Max
651e5dd7070Spatrick                                                   : llvm::AtomicRMWInst::UMax;
652e5dd7070Spatrick     break;
653e5dd7070Spatrick 
654e5dd7070Spatrick   case AtomicExpr::AO__atomic_and_fetch:
655e5dd7070Spatrick     PostOp = llvm::Instruction::And;
656*12c85518Srobert     [[fallthrough]];
657e5dd7070Spatrick   case AtomicExpr::AO__c11_atomic_fetch_and:
658*12c85518Srobert   case AtomicExpr::AO__hip_atomic_fetch_and:
659e5dd7070Spatrick   case AtomicExpr::AO__opencl_atomic_fetch_and:
660e5dd7070Spatrick   case AtomicExpr::AO__atomic_fetch_and:
661e5dd7070Spatrick     Op = llvm::AtomicRMWInst::And;
662e5dd7070Spatrick     break;
663e5dd7070Spatrick 
664e5dd7070Spatrick   case AtomicExpr::AO__atomic_or_fetch:
665e5dd7070Spatrick     PostOp = llvm::Instruction::Or;
666*12c85518Srobert     [[fallthrough]];
667e5dd7070Spatrick   case AtomicExpr::AO__c11_atomic_fetch_or:
668*12c85518Srobert   case AtomicExpr::AO__hip_atomic_fetch_or:
669e5dd7070Spatrick   case AtomicExpr::AO__opencl_atomic_fetch_or:
670e5dd7070Spatrick   case AtomicExpr::AO__atomic_fetch_or:
671e5dd7070Spatrick     Op = llvm::AtomicRMWInst::Or;
672e5dd7070Spatrick     break;
673e5dd7070Spatrick 
674e5dd7070Spatrick   case AtomicExpr::AO__atomic_xor_fetch:
675e5dd7070Spatrick     PostOp = llvm::Instruction::Xor;
676*12c85518Srobert     [[fallthrough]];
677e5dd7070Spatrick   case AtomicExpr::AO__c11_atomic_fetch_xor:
678*12c85518Srobert   case AtomicExpr::AO__hip_atomic_fetch_xor:
679e5dd7070Spatrick   case AtomicExpr::AO__opencl_atomic_fetch_xor:
680e5dd7070Spatrick   case AtomicExpr::AO__atomic_fetch_xor:
681e5dd7070Spatrick     Op = llvm::AtomicRMWInst::Xor;
682e5dd7070Spatrick     break;
683e5dd7070Spatrick 
684e5dd7070Spatrick   case AtomicExpr::AO__atomic_nand_fetch:
685e5dd7070Spatrick     PostOp = llvm::Instruction::And; // the NOT is special cased below
686*12c85518Srobert     [[fallthrough]];
687*12c85518Srobert   case AtomicExpr::AO__c11_atomic_fetch_nand:
688e5dd7070Spatrick   case AtomicExpr::AO__atomic_fetch_nand:
689e5dd7070Spatrick     Op = llvm::AtomicRMWInst::Nand;
690e5dd7070Spatrick     break;
691e5dd7070Spatrick   }
692e5dd7070Spatrick 
693e5dd7070Spatrick   llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
694e5dd7070Spatrick   llvm::AtomicRMWInst *RMWI =
695e5dd7070Spatrick       CGF.Builder.CreateAtomicRMW(Op, Ptr.getPointer(), LoadVal1, Order, Scope);
696e5dd7070Spatrick   RMWI->setVolatile(E->isVolatile());
697e5dd7070Spatrick 
698e5dd7070Spatrick   // For __atomic_*_fetch operations, perform the operation again to
699e5dd7070Spatrick   // determine the value which was written.
700e5dd7070Spatrick   llvm::Value *Result = RMWI;
701e5dd7070Spatrick   if (PostOpMinMax)
702e5dd7070Spatrick     Result = EmitPostAtomicMinMax(CGF.Builder, E->getOp(),
703e5dd7070Spatrick                                   E->getValueType()->isSignedIntegerType(),
704e5dd7070Spatrick                                   RMWI, LoadVal1);
705e5dd7070Spatrick   else if (PostOp)
706e5dd7070Spatrick     Result = CGF.Builder.CreateBinOp((llvm::Instruction::BinaryOps)PostOp, RMWI,
707e5dd7070Spatrick                                      LoadVal1);
708e5dd7070Spatrick   if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
709e5dd7070Spatrick     Result = CGF.Builder.CreateNot(Result);
710e5dd7070Spatrick   CGF.Builder.CreateStore(Result, Dest);
711e5dd7070Spatrick }
712e5dd7070Spatrick 
713e5dd7070Spatrick // This function emits any expression (scalar, complex, or aggregate)
714e5dd7070Spatrick // into a temporary alloca.
715e5dd7070Spatrick static Address
EmitValToTemp(CodeGenFunction & CGF,Expr * E)716e5dd7070Spatrick EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
717e5dd7070Spatrick   Address DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
718e5dd7070Spatrick   CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
719e5dd7070Spatrick                        /*Init*/ true);
720e5dd7070Spatrick   return DeclPtr;
721e5dd7070Spatrick }
722e5dd7070Spatrick 
EmitAtomicOp(CodeGenFunction & CGF,AtomicExpr * Expr,Address Dest,Address Ptr,Address Val1,Address Val2,llvm::Value * IsWeak,llvm::Value * FailureOrder,uint64_t Size,llvm::AtomicOrdering Order,llvm::Value * Scope)723e5dd7070Spatrick static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
724e5dd7070Spatrick                          Address Ptr, Address Val1, Address Val2,
725e5dd7070Spatrick                          llvm::Value *IsWeak, llvm::Value *FailureOrder,
726e5dd7070Spatrick                          uint64_t Size, llvm::AtomicOrdering Order,
727e5dd7070Spatrick                          llvm::Value *Scope) {
728e5dd7070Spatrick   auto ScopeModel = Expr->getScopeModel();
729e5dd7070Spatrick 
730e5dd7070Spatrick   // LLVM atomic instructions always have synch scope. If clang atomic
731e5dd7070Spatrick   // expression has no scope operand, use default LLVM synch scope.
732e5dd7070Spatrick   if (!ScopeModel) {
733e5dd7070Spatrick     EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
734e5dd7070Spatrick                  Order, CGF.CGM.getLLVMContext().getOrInsertSyncScopeID(""));
735e5dd7070Spatrick     return;
736e5dd7070Spatrick   }
737e5dd7070Spatrick 
738e5dd7070Spatrick   // Handle constant scope.
739e5dd7070Spatrick   if (auto SC = dyn_cast<llvm::ConstantInt>(Scope)) {
740e5dd7070Spatrick     auto SCID = CGF.getTargetHooks().getLLVMSyncScopeID(
741e5dd7070Spatrick         CGF.CGM.getLangOpts(), ScopeModel->map(SC->getZExtValue()),
742e5dd7070Spatrick         Order, CGF.CGM.getLLVMContext());
743e5dd7070Spatrick     EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
744e5dd7070Spatrick                  Order, SCID);
745e5dd7070Spatrick     return;
746e5dd7070Spatrick   }
747e5dd7070Spatrick 
748e5dd7070Spatrick   // Handle non-constant scope.
749e5dd7070Spatrick   auto &Builder = CGF.Builder;
750e5dd7070Spatrick   auto Scopes = ScopeModel->getRuntimeValues();
751e5dd7070Spatrick   llvm::DenseMap<unsigned, llvm::BasicBlock *> BB;
752e5dd7070Spatrick   for (auto S : Scopes)
753e5dd7070Spatrick     BB[S] = CGF.createBasicBlock(getAsString(ScopeModel->map(S)), CGF.CurFn);
754e5dd7070Spatrick 
755e5dd7070Spatrick   llvm::BasicBlock *ContBB =
756e5dd7070Spatrick       CGF.createBasicBlock("atomic.scope.continue", CGF.CurFn);
757e5dd7070Spatrick 
758e5dd7070Spatrick   auto *SC = Builder.CreateIntCast(Scope, Builder.getInt32Ty(), false);
759e5dd7070Spatrick   // If unsupported synch scope is encountered at run time, assume a fallback
760e5dd7070Spatrick   // synch scope value.
761e5dd7070Spatrick   auto FallBack = ScopeModel->getFallBackValue();
762e5dd7070Spatrick   llvm::SwitchInst *SI = Builder.CreateSwitch(SC, BB[FallBack]);
763e5dd7070Spatrick   for (auto S : Scopes) {
764e5dd7070Spatrick     auto *B = BB[S];
765e5dd7070Spatrick     if (S != FallBack)
766e5dd7070Spatrick       SI->addCase(Builder.getInt32(S), B);
767e5dd7070Spatrick 
768e5dd7070Spatrick     Builder.SetInsertPoint(B);
769e5dd7070Spatrick     EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
770e5dd7070Spatrick                  Order,
771e5dd7070Spatrick                  CGF.getTargetHooks().getLLVMSyncScopeID(CGF.CGM.getLangOpts(),
772e5dd7070Spatrick                                                          ScopeModel->map(S),
773e5dd7070Spatrick                                                          Order,
774e5dd7070Spatrick                                                          CGF.getLLVMContext()));
775e5dd7070Spatrick     Builder.CreateBr(ContBB);
776e5dd7070Spatrick   }
777e5dd7070Spatrick 
778e5dd7070Spatrick   Builder.SetInsertPoint(ContBB);
779e5dd7070Spatrick }
780e5dd7070Spatrick 
781e5dd7070Spatrick static void
AddDirectArgument(CodeGenFunction & CGF,CallArgList & Args,bool UseOptimizedLibcall,llvm::Value * Val,QualType ValTy,SourceLocation Loc,CharUnits SizeInChars)782e5dd7070Spatrick AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
783e5dd7070Spatrick                   bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
784e5dd7070Spatrick                   SourceLocation Loc, CharUnits SizeInChars) {
785e5dd7070Spatrick   if (UseOptimizedLibcall) {
786e5dd7070Spatrick     // Load value and pass it to the function directly.
787e5dd7070Spatrick     CharUnits Align = CGF.getContext().getTypeAlignInChars(ValTy);
788e5dd7070Spatrick     int64_t SizeInBits = CGF.getContext().toBits(SizeInChars);
789e5dd7070Spatrick     ValTy =
790e5dd7070Spatrick         CGF.getContext().getIntTypeForBitwidth(SizeInBits, /*Signed=*/false);
791*12c85518Srobert     llvm::Type *ITy = llvm::IntegerType::get(CGF.getLLVMContext(), SizeInBits);
792*12c85518Srobert     Address Ptr = Address(CGF.Builder.CreateBitCast(Val, ITy->getPointerTo()),
793*12c85518Srobert                           ITy, Align);
794e5dd7070Spatrick     Val = CGF.EmitLoadOfScalar(Ptr, false,
795e5dd7070Spatrick                                CGF.getContext().getPointerType(ValTy),
796e5dd7070Spatrick                                Loc);
797e5dd7070Spatrick     // Coerce the value into an appropriately sized integer type.
798e5dd7070Spatrick     Args.add(RValue::get(Val), ValTy);
799e5dd7070Spatrick   } else {
800e5dd7070Spatrick     // Non-optimized functions always take a reference.
801e5dd7070Spatrick     Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
802e5dd7070Spatrick                          CGF.getContext().VoidPtrTy);
803e5dd7070Spatrick   }
804e5dd7070Spatrick }
805e5dd7070Spatrick 
EmitAtomicExpr(AtomicExpr * E)806e5dd7070Spatrick RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
807e5dd7070Spatrick   QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
808e5dd7070Spatrick   QualType MemTy = AtomicTy;
809e5dd7070Spatrick   if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
810e5dd7070Spatrick     MemTy = AT->getValueType();
811e5dd7070Spatrick   llvm::Value *IsWeak = nullptr, *OrderFail = nullptr;
812e5dd7070Spatrick 
813e5dd7070Spatrick   Address Val1 = Address::invalid();
814e5dd7070Spatrick   Address Val2 = Address::invalid();
815e5dd7070Spatrick   Address Dest = Address::invalid();
816e5dd7070Spatrick   Address Ptr = EmitPointerWithAlignment(E->getPtr());
817e5dd7070Spatrick 
818e5dd7070Spatrick   if (E->getOp() == AtomicExpr::AO__c11_atomic_init ||
819e5dd7070Spatrick       E->getOp() == AtomicExpr::AO__opencl_atomic_init) {
820e5dd7070Spatrick     LValue lvalue = MakeAddrLValue(Ptr, AtomicTy);
821e5dd7070Spatrick     EmitAtomicInit(E->getVal1(), lvalue);
822e5dd7070Spatrick     return RValue::get(nullptr);
823e5dd7070Spatrick   }
824e5dd7070Spatrick 
825a9ac8606Spatrick   auto TInfo = getContext().getTypeInfoInChars(AtomicTy);
826a9ac8606Spatrick   uint64_t Size = TInfo.Width.getQuantity();
827e5dd7070Spatrick   unsigned MaxInlineWidthInBits = getTarget().getMaxAtomicInlineWidth();
828e5dd7070Spatrick 
829a9ac8606Spatrick   bool Oversized = getContext().toBits(TInfo.Width) > MaxInlineWidthInBits;
830a9ac8606Spatrick   bool Misaligned = (Ptr.getAlignment() % TInfo.Width) != 0;
831e5dd7070Spatrick   bool UseLibcall = Misaligned | Oversized;
832a9ac8606Spatrick   bool ShouldCastToIntPtrTy = true;
833e5dd7070Spatrick 
834a9ac8606Spatrick   CharUnits MaxInlineWidth =
835a9ac8606Spatrick       getContext().toCharUnitsFromBits(MaxInlineWidthInBits);
836a9ac8606Spatrick 
837a9ac8606Spatrick   DiagnosticsEngine &Diags = CGM.getDiags();
838a9ac8606Spatrick 
839a9ac8606Spatrick   if (Misaligned) {
840a9ac8606Spatrick     Diags.Report(E->getBeginLoc(), diag::warn_atomic_op_misaligned)
841a9ac8606Spatrick         << (int)TInfo.Width.getQuantity()
842a9ac8606Spatrick         << (int)Ptr.getAlignment().getQuantity();
843a9ac8606Spatrick   }
844a9ac8606Spatrick 
845a9ac8606Spatrick   if (Oversized) {
846a9ac8606Spatrick     Diags.Report(E->getBeginLoc(), diag::warn_atomic_op_oversized)
847a9ac8606Spatrick         << (int)TInfo.Width.getQuantity() << (int)MaxInlineWidth.getQuantity();
848e5dd7070Spatrick   }
849e5dd7070Spatrick 
850e5dd7070Spatrick   llvm::Value *Order = EmitScalarExpr(E->getOrder());
851e5dd7070Spatrick   llvm::Value *Scope =
852e5dd7070Spatrick       E->getScopeModel() ? EmitScalarExpr(E->getScope()) : nullptr;
853e5dd7070Spatrick 
854e5dd7070Spatrick   switch (E->getOp()) {
855e5dd7070Spatrick   case AtomicExpr::AO__c11_atomic_init:
856e5dd7070Spatrick   case AtomicExpr::AO__opencl_atomic_init:
857e5dd7070Spatrick     llvm_unreachable("Already handled above with EmitAtomicInit!");
858e5dd7070Spatrick 
859e5dd7070Spatrick   case AtomicExpr::AO__c11_atomic_load:
860e5dd7070Spatrick   case AtomicExpr::AO__opencl_atomic_load:
861*12c85518Srobert   case AtomicExpr::AO__hip_atomic_load:
862e5dd7070Spatrick   case AtomicExpr::AO__atomic_load_n:
863e5dd7070Spatrick     break;
864e5dd7070Spatrick 
865e5dd7070Spatrick   case AtomicExpr::AO__atomic_load:
866e5dd7070Spatrick     Dest = EmitPointerWithAlignment(E->getVal1());
867e5dd7070Spatrick     break;
868e5dd7070Spatrick 
869e5dd7070Spatrick   case AtomicExpr::AO__atomic_store:
870e5dd7070Spatrick     Val1 = EmitPointerWithAlignment(E->getVal1());
871e5dd7070Spatrick     break;
872e5dd7070Spatrick 
873e5dd7070Spatrick   case AtomicExpr::AO__atomic_exchange:
874e5dd7070Spatrick     Val1 = EmitPointerWithAlignment(E->getVal1());
875e5dd7070Spatrick     Dest = EmitPointerWithAlignment(E->getVal2());
876e5dd7070Spatrick     break;
877e5dd7070Spatrick 
878e5dd7070Spatrick   case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
879e5dd7070Spatrick   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
880e5dd7070Spatrick   case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
881*12c85518Srobert   case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
882e5dd7070Spatrick   case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
883*12c85518Srobert   case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
884e5dd7070Spatrick   case AtomicExpr::AO__atomic_compare_exchange_n:
885e5dd7070Spatrick   case AtomicExpr::AO__atomic_compare_exchange:
886e5dd7070Spatrick     Val1 = EmitPointerWithAlignment(E->getVal1());
887e5dd7070Spatrick     if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
888e5dd7070Spatrick       Val2 = EmitPointerWithAlignment(E->getVal2());
889e5dd7070Spatrick     else
890e5dd7070Spatrick       Val2 = EmitValToTemp(*this, E->getVal2());
891e5dd7070Spatrick     OrderFail = EmitScalarExpr(E->getOrderFail());
892e5dd7070Spatrick     if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
893e5dd7070Spatrick         E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
894e5dd7070Spatrick       IsWeak = EmitScalarExpr(E->getWeak());
895e5dd7070Spatrick     break;
896e5dd7070Spatrick 
897e5dd7070Spatrick   case AtomicExpr::AO__c11_atomic_fetch_add:
898e5dd7070Spatrick   case AtomicExpr::AO__c11_atomic_fetch_sub:
899*12c85518Srobert   case AtomicExpr::AO__hip_atomic_fetch_add:
900e5dd7070Spatrick   case AtomicExpr::AO__opencl_atomic_fetch_add:
901e5dd7070Spatrick   case AtomicExpr::AO__opencl_atomic_fetch_sub:
902e5dd7070Spatrick     if (MemTy->isPointerType()) {
903e5dd7070Spatrick       // For pointer arithmetic, we're required to do a bit of math:
904e5dd7070Spatrick       // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
905e5dd7070Spatrick       // ... but only for the C11 builtins. The GNU builtins expect the
906e5dd7070Spatrick       // user to multiply by sizeof(T).
907e5dd7070Spatrick       QualType Val1Ty = E->getVal1()->getType();
908e5dd7070Spatrick       llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
909e5dd7070Spatrick       CharUnits PointeeIncAmt =
910e5dd7070Spatrick           getContext().getTypeSizeInChars(MemTy->getPointeeType());
911e5dd7070Spatrick       Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
912e5dd7070Spatrick       auto Temp = CreateMemTemp(Val1Ty, ".atomictmp");
913e5dd7070Spatrick       Val1 = Temp;
914e5dd7070Spatrick       EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Temp, Val1Ty));
915e5dd7070Spatrick       break;
916e5dd7070Spatrick     }
917*12c85518Srobert     [[fallthrough]];
918e5dd7070Spatrick   case AtomicExpr::AO__atomic_fetch_add:
919e5dd7070Spatrick   case AtomicExpr::AO__atomic_fetch_sub:
920e5dd7070Spatrick   case AtomicExpr::AO__atomic_add_fetch:
921e5dd7070Spatrick   case AtomicExpr::AO__atomic_sub_fetch:
922a9ac8606Spatrick     ShouldCastToIntPtrTy = !MemTy->isFloatingType();
923*12c85518Srobert     [[fallthrough]];
924a9ac8606Spatrick 
925e5dd7070Spatrick   case AtomicExpr::AO__c11_atomic_store:
926e5dd7070Spatrick   case AtomicExpr::AO__c11_atomic_exchange:
927e5dd7070Spatrick   case AtomicExpr::AO__opencl_atomic_store:
928*12c85518Srobert   case AtomicExpr::AO__hip_atomic_store:
929e5dd7070Spatrick   case AtomicExpr::AO__opencl_atomic_exchange:
930*12c85518Srobert   case AtomicExpr::AO__hip_atomic_exchange:
931e5dd7070Spatrick   case AtomicExpr::AO__atomic_store_n:
932e5dd7070Spatrick   case AtomicExpr::AO__atomic_exchange_n:
933e5dd7070Spatrick   case AtomicExpr::AO__c11_atomic_fetch_and:
934e5dd7070Spatrick   case AtomicExpr::AO__c11_atomic_fetch_or:
935e5dd7070Spatrick   case AtomicExpr::AO__c11_atomic_fetch_xor:
936*12c85518Srobert   case AtomicExpr::AO__c11_atomic_fetch_nand:
937e5dd7070Spatrick   case AtomicExpr::AO__c11_atomic_fetch_max:
938e5dd7070Spatrick   case AtomicExpr::AO__c11_atomic_fetch_min:
939e5dd7070Spatrick   case AtomicExpr::AO__opencl_atomic_fetch_and:
940e5dd7070Spatrick   case AtomicExpr::AO__opencl_atomic_fetch_or:
941e5dd7070Spatrick   case AtomicExpr::AO__opencl_atomic_fetch_xor:
942e5dd7070Spatrick   case AtomicExpr::AO__opencl_atomic_fetch_min:
943e5dd7070Spatrick   case AtomicExpr::AO__opencl_atomic_fetch_max:
944e5dd7070Spatrick   case AtomicExpr::AO__atomic_fetch_and:
945*12c85518Srobert   case AtomicExpr::AO__hip_atomic_fetch_and:
946e5dd7070Spatrick   case AtomicExpr::AO__atomic_fetch_or:
947*12c85518Srobert   case AtomicExpr::AO__hip_atomic_fetch_or:
948e5dd7070Spatrick   case AtomicExpr::AO__atomic_fetch_xor:
949*12c85518Srobert   case AtomicExpr::AO__hip_atomic_fetch_xor:
950e5dd7070Spatrick   case AtomicExpr::AO__atomic_fetch_nand:
951e5dd7070Spatrick   case AtomicExpr::AO__atomic_and_fetch:
952e5dd7070Spatrick   case AtomicExpr::AO__atomic_or_fetch:
953e5dd7070Spatrick   case AtomicExpr::AO__atomic_xor_fetch:
954e5dd7070Spatrick   case AtomicExpr::AO__atomic_nand_fetch:
955e5dd7070Spatrick   case AtomicExpr::AO__atomic_max_fetch:
956e5dd7070Spatrick   case AtomicExpr::AO__atomic_min_fetch:
957e5dd7070Spatrick   case AtomicExpr::AO__atomic_fetch_max:
958*12c85518Srobert   case AtomicExpr::AO__hip_atomic_fetch_max:
959e5dd7070Spatrick   case AtomicExpr::AO__atomic_fetch_min:
960*12c85518Srobert   case AtomicExpr::AO__hip_atomic_fetch_min:
961e5dd7070Spatrick     Val1 = EmitValToTemp(*this, E->getVal1());
962e5dd7070Spatrick     break;
963e5dd7070Spatrick   }
964e5dd7070Spatrick 
965e5dd7070Spatrick   QualType RValTy = E->getType().getUnqualifiedType();
966e5dd7070Spatrick 
967e5dd7070Spatrick   // The inlined atomics only function on iN types, where N is a power of 2. We
968e5dd7070Spatrick   // need to make sure (via temporaries if necessary) that all incoming values
969e5dd7070Spatrick   // are compatible.
970e5dd7070Spatrick   LValue AtomicVal = MakeAddrLValue(Ptr, AtomicTy);
971e5dd7070Spatrick   AtomicInfo Atomics(*this, AtomicVal);
972e5dd7070Spatrick 
973a9ac8606Spatrick   if (ShouldCastToIntPtrTy) {
974e5dd7070Spatrick     Ptr = Atomics.emitCastToAtomicIntPointer(Ptr);
975a9ac8606Spatrick     if (Val1.isValid())
976a9ac8606Spatrick       Val1 = Atomics.convertToAtomicIntPointer(Val1);
977a9ac8606Spatrick     if (Val2.isValid())
978a9ac8606Spatrick       Val2 = Atomics.convertToAtomicIntPointer(Val2);
979a9ac8606Spatrick   }
980a9ac8606Spatrick   if (Dest.isValid()) {
981a9ac8606Spatrick     if (ShouldCastToIntPtrTy)
982e5dd7070Spatrick       Dest = Atomics.emitCastToAtomicIntPointer(Dest);
983a9ac8606Spatrick   } else if (E->isCmpXChg())
984e5dd7070Spatrick     Dest = CreateMemTemp(RValTy, "cmpxchg.bool");
985a9ac8606Spatrick   else if (!RValTy->isVoidType()) {
986a9ac8606Spatrick     Dest = Atomics.CreateTempAlloca();
987a9ac8606Spatrick     if (ShouldCastToIntPtrTy)
988a9ac8606Spatrick       Dest = Atomics.emitCastToAtomicIntPointer(Dest);
989a9ac8606Spatrick   }
990e5dd7070Spatrick 
991e5dd7070Spatrick   // Use a library call.  See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
992e5dd7070Spatrick   if (UseLibcall) {
993e5dd7070Spatrick     bool UseOptimizedLibcall = false;
994e5dd7070Spatrick     switch (E->getOp()) {
995e5dd7070Spatrick     case AtomicExpr::AO__c11_atomic_init:
996e5dd7070Spatrick     case AtomicExpr::AO__opencl_atomic_init:
997e5dd7070Spatrick       llvm_unreachable("Already handled above with EmitAtomicInit!");
998e5dd7070Spatrick 
999e5dd7070Spatrick     case AtomicExpr::AO__c11_atomic_fetch_add:
1000e5dd7070Spatrick     case AtomicExpr::AO__opencl_atomic_fetch_add:
1001e5dd7070Spatrick     case AtomicExpr::AO__atomic_fetch_add:
1002*12c85518Srobert     case AtomicExpr::AO__hip_atomic_fetch_add:
1003e5dd7070Spatrick     case AtomicExpr::AO__c11_atomic_fetch_and:
1004e5dd7070Spatrick     case AtomicExpr::AO__opencl_atomic_fetch_and:
1005*12c85518Srobert     case AtomicExpr::AO__hip_atomic_fetch_and:
1006e5dd7070Spatrick     case AtomicExpr::AO__atomic_fetch_and:
1007e5dd7070Spatrick     case AtomicExpr::AO__c11_atomic_fetch_or:
1008e5dd7070Spatrick     case AtomicExpr::AO__opencl_atomic_fetch_or:
1009*12c85518Srobert     case AtomicExpr::AO__hip_atomic_fetch_or:
1010e5dd7070Spatrick     case AtomicExpr::AO__atomic_fetch_or:
1011*12c85518Srobert     case AtomicExpr::AO__c11_atomic_fetch_nand:
1012e5dd7070Spatrick     case AtomicExpr::AO__atomic_fetch_nand:
1013e5dd7070Spatrick     case AtomicExpr::AO__c11_atomic_fetch_sub:
1014e5dd7070Spatrick     case AtomicExpr::AO__opencl_atomic_fetch_sub:
1015e5dd7070Spatrick     case AtomicExpr::AO__atomic_fetch_sub:
1016e5dd7070Spatrick     case AtomicExpr::AO__c11_atomic_fetch_xor:
1017e5dd7070Spatrick     case AtomicExpr::AO__opencl_atomic_fetch_xor:
1018e5dd7070Spatrick     case AtomicExpr::AO__opencl_atomic_fetch_min:
1019e5dd7070Spatrick     case AtomicExpr::AO__opencl_atomic_fetch_max:
1020e5dd7070Spatrick     case AtomicExpr::AO__atomic_fetch_xor:
1021*12c85518Srobert     case AtomicExpr::AO__hip_atomic_fetch_xor:
1022e5dd7070Spatrick     case AtomicExpr::AO__c11_atomic_fetch_max:
1023e5dd7070Spatrick     case AtomicExpr::AO__c11_atomic_fetch_min:
1024e5dd7070Spatrick     case AtomicExpr::AO__atomic_add_fetch:
1025e5dd7070Spatrick     case AtomicExpr::AO__atomic_and_fetch:
1026e5dd7070Spatrick     case AtomicExpr::AO__atomic_nand_fetch:
1027e5dd7070Spatrick     case AtomicExpr::AO__atomic_or_fetch:
1028e5dd7070Spatrick     case AtomicExpr::AO__atomic_sub_fetch:
1029e5dd7070Spatrick     case AtomicExpr::AO__atomic_xor_fetch:
1030e5dd7070Spatrick     case AtomicExpr::AO__atomic_fetch_max:
1031*12c85518Srobert     case AtomicExpr::AO__hip_atomic_fetch_max:
1032e5dd7070Spatrick     case AtomicExpr::AO__atomic_fetch_min:
1033*12c85518Srobert     case AtomicExpr::AO__hip_atomic_fetch_min:
1034e5dd7070Spatrick     case AtomicExpr::AO__atomic_max_fetch:
1035e5dd7070Spatrick     case AtomicExpr::AO__atomic_min_fetch:
1036e5dd7070Spatrick       // For these, only library calls for certain sizes exist.
1037e5dd7070Spatrick       UseOptimizedLibcall = true;
1038e5dd7070Spatrick       break;
1039e5dd7070Spatrick 
1040e5dd7070Spatrick     case AtomicExpr::AO__atomic_load:
1041e5dd7070Spatrick     case AtomicExpr::AO__atomic_store:
1042e5dd7070Spatrick     case AtomicExpr::AO__atomic_exchange:
1043e5dd7070Spatrick     case AtomicExpr::AO__atomic_compare_exchange:
1044e5dd7070Spatrick       // Use the generic version if we don't know that the operand will be
1045e5dd7070Spatrick       // suitably aligned for the optimized version.
1046e5dd7070Spatrick       if (Misaligned)
1047e5dd7070Spatrick         break;
1048*12c85518Srobert       [[fallthrough]];
1049e5dd7070Spatrick     case AtomicExpr::AO__c11_atomic_load:
1050e5dd7070Spatrick     case AtomicExpr::AO__c11_atomic_store:
1051e5dd7070Spatrick     case AtomicExpr::AO__c11_atomic_exchange:
1052e5dd7070Spatrick     case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
1053e5dd7070Spatrick     case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
1054*12c85518Srobert     case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
1055e5dd7070Spatrick     case AtomicExpr::AO__opencl_atomic_load:
1056*12c85518Srobert     case AtomicExpr::AO__hip_atomic_load:
1057e5dd7070Spatrick     case AtomicExpr::AO__opencl_atomic_store:
1058*12c85518Srobert     case AtomicExpr::AO__hip_atomic_store:
1059e5dd7070Spatrick     case AtomicExpr::AO__opencl_atomic_exchange:
1060*12c85518Srobert     case AtomicExpr::AO__hip_atomic_exchange:
1061e5dd7070Spatrick     case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
1062*12c85518Srobert     case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
1063e5dd7070Spatrick     case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
1064e5dd7070Spatrick     case AtomicExpr::AO__atomic_load_n:
1065e5dd7070Spatrick     case AtomicExpr::AO__atomic_store_n:
1066e5dd7070Spatrick     case AtomicExpr::AO__atomic_exchange_n:
1067e5dd7070Spatrick     case AtomicExpr::AO__atomic_compare_exchange_n:
1068e5dd7070Spatrick       // Only use optimized library calls for sizes for which they exist.
1069e5dd7070Spatrick       // FIXME: Size == 16 optimized library functions exist too.
1070e5dd7070Spatrick       if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
1071e5dd7070Spatrick         UseOptimizedLibcall = true;
1072e5dd7070Spatrick       break;
1073e5dd7070Spatrick     }
1074e5dd7070Spatrick 
1075e5dd7070Spatrick     CallArgList Args;
1076e5dd7070Spatrick     if (!UseOptimizedLibcall) {
1077e5dd7070Spatrick       // For non-optimized library calls, the size is the first parameter
1078e5dd7070Spatrick       Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
1079e5dd7070Spatrick                getContext().getSizeType());
1080e5dd7070Spatrick     }
1081e5dd7070Spatrick     // Atomic address is the first or second parameter
1082e5dd7070Spatrick     // The OpenCL atomic library functions only accept pointer arguments to
1083e5dd7070Spatrick     // generic address space.
1084e5dd7070Spatrick     auto CastToGenericAddrSpace = [&](llvm::Value *V, QualType PT) {
1085e5dd7070Spatrick       if (!E->isOpenCL())
1086e5dd7070Spatrick         return V;
1087e5dd7070Spatrick       auto AS = PT->castAs<PointerType>()->getPointeeType().getAddressSpace();
1088e5dd7070Spatrick       if (AS == LangAS::opencl_generic)
1089e5dd7070Spatrick         return V;
1090e5dd7070Spatrick       auto DestAS = getContext().getTargetAddressSpace(LangAS::opencl_generic);
1091*12c85518Srobert       auto T = llvm::cast<llvm::PointerType>(V->getType());
1092*12c85518Srobert       auto *DestType = llvm::PointerType::getWithSamePointeeType(T, DestAS);
1093e5dd7070Spatrick 
1094e5dd7070Spatrick       return getTargetHooks().performAddrSpaceCast(
1095e5dd7070Spatrick           *this, V, AS, LangAS::opencl_generic, DestType, false);
1096e5dd7070Spatrick     };
1097e5dd7070Spatrick 
1098e5dd7070Spatrick     Args.add(RValue::get(CastToGenericAddrSpace(
1099e5dd7070Spatrick                  EmitCastToVoidPtr(Ptr.getPointer()), E->getPtr()->getType())),
1100e5dd7070Spatrick              getContext().VoidPtrTy);
1101e5dd7070Spatrick 
1102e5dd7070Spatrick     std::string LibCallName;
1103e5dd7070Spatrick     QualType LoweredMemTy =
1104e5dd7070Spatrick       MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy;
1105e5dd7070Spatrick     QualType RetTy;
1106e5dd7070Spatrick     bool HaveRetTy = false;
1107e5dd7070Spatrick     llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
1108e5dd7070Spatrick     bool PostOpMinMax = false;
1109e5dd7070Spatrick     switch (E->getOp()) {
1110e5dd7070Spatrick     case AtomicExpr::AO__c11_atomic_init:
1111e5dd7070Spatrick     case AtomicExpr::AO__opencl_atomic_init:
1112e5dd7070Spatrick       llvm_unreachable("Already handled!");
1113e5dd7070Spatrick 
1114e5dd7070Spatrick     // There is only one libcall for compare an exchange, because there is no
1115e5dd7070Spatrick     // optimisation benefit possible from a libcall version of a weak compare
1116e5dd7070Spatrick     // and exchange.
1117e5dd7070Spatrick     // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
1118e5dd7070Spatrick     //                                void *desired, int success, int failure)
1119e5dd7070Spatrick     // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
1120e5dd7070Spatrick     //                                  int success, int failure)
1121e5dd7070Spatrick     case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
1122e5dd7070Spatrick     case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
1123e5dd7070Spatrick     case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
1124*12c85518Srobert     case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
1125e5dd7070Spatrick     case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
1126*12c85518Srobert     case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
1127e5dd7070Spatrick     case AtomicExpr::AO__atomic_compare_exchange:
1128e5dd7070Spatrick     case AtomicExpr::AO__atomic_compare_exchange_n:
1129e5dd7070Spatrick       LibCallName = "__atomic_compare_exchange";
1130e5dd7070Spatrick       RetTy = getContext().BoolTy;
1131e5dd7070Spatrick       HaveRetTy = true;
1132e5dd7070Spatrick       Args.add(
1133e5dd7070Spatrick           RValue::get(CastToGenericAddrSpace(
1134e5dd7070Spatrick               EmitCastToVoidPtr(Val1.getPointer()), E->getVal1()->getType())),
1135e5dd7070Spatrick           getContext().VoidPtrTy);
1136e5dd7070Spatrick       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2.getPointer(),
1137a9ac8606Spatrick                         MemTy, E->getExprLoc(), TInfo.Width);
1138e5dd7070Spatrick       Args.add(RValue::get(Order), getContext().IntTy);
1139e5dd7070Spatrick       Order = OrderFail;
1140e5dd7070Spatrick       break;
1141e5dd7070Spatrick     // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
1142e5dd7070Spatrick     //                        int order)
1143e5dd7070Spatrick     // T __atomic_exchange_N(T *mem, T val, int order)
1144e5dd7070Spatrick     case AtomicExpr::AO__c11_atomic_exchange:
1145e5dd7070Spatrick     case AtomicExpr::AO__opencl_atomic_exchange:
1146e5dd7070Spatrick     case AtomicExpr::AO__atomic_exchange_n:
1147e5dd7070Spatrick     case AtomicExpr::AO__atomic_exchange:
1148*12c85518Srobert     case AtomicExpr::AO__hip_atomic_exchange:
1149e5dd7070Spatrick       LibCallName = "__atomic_exchange";
1150e5dd7070Spatrick       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1151a9ac8606Spatrick                         MemTy, E->getExprLoc(), TInfo.Width);
1152e5dd7070Spatrick       break;
1153e5dd7070Spatrick     // void __atomic_store(size_t size, void *mem, void *val, int order)
1154e5dd7070Spatrick     // void __atomic_store_N(T *mem, T val, int order)
1155e5dd7070Spatrick     case AtomicExpr::AO__c11_atomic_store:
1156e5dd7070Spatrick     case AtomicExpr::AO__opencl_atomic_store:
1157*12c85518Srobert     case AtomicExpr::AO__hip_atomic_store:
1158e5dd7070Spatrick     case AtomicExpr::AO__atomic_store:
1159e5dd7070Spatrick     case AtomicExpr::AO__atomic_store_n:
1160e5dd7070Spatrick       LibCallName = "__atomic_store";
1161e5dd7070Spatrick       RetTy = getContext().VoidTy;
1162e5dd7070Spatrick       HaveRetTy = true;
1163e5dd7070Spatrick       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1164a9ac8606Spatrick                         MemTy, E->getExprLoc(), TInfo.Width);
1165e5dd7070Spatrick       break;
1166e5dd7070Spatrick     // void __atomic_load(size_t size, void *mem, void *return, int order)
1167e5dd7070Spatrick     // T __atomic_load_N(T *mem, int order)
1168e5dd7070Spatrick     case AtomicExpr::AO__c11_atomic_load:
1169e5dd7070Spatrick     case AtomicExpr::AO__opencl_atomic_load:
1170*12c85518Srobert     case AtomicExpr::AO__hip_atomic_load:
1171e5dd7070Spatrick     case AtomicExpr::AO__atomic_load:
1172e5dd7070Spatrick     case AtomicExpr::AO__atomic_load_n:
1173e5dd7070Spatrick       LibCallName = "__atomic_load";
1174e5dd7070Spatrick       break;
1175e5dd7070Spatrick     // T __atomic_add_fetch_N(T *mem, T val, int order)
1176e5dd7070Spatrick     // T __atomic_fetch_add_N(T *mem, T val, int order)
1177e5dd7070Spatrick     case AtomicExpr::AO__atomic_add_fetch:
1178e5dd7070Spatrick       PostOp = llvm::Instruction::Add;
1179*12c85518Srobert       [[fallthrough]];
1180e5dd7070Spatrick     case AtomicExpr::AO__c11_atomic_fetch_add:
1181e5dd7070Spatrick     case AtomicExpr::AO__opencl_atomic_fetch_add:
1182e5dd7070Spatrick     case AtomicExpr::AO__atomic_fetch_add:
1183*12c85518Srobert     case AtomicExpr::AO__hip_atomic_fetch_add:
1184e5dd7070Spatrick       LibCallName = "__atomic_fetch_add";
1185e5dd7070Spatrick       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1186a9ac8606Spatrick                         LoweredMemTy, E->getExprLoc(), TInfo.Width);
1187e5dd7070Spatrick       break;
1188e5dd7070Spatrick     // T __atomic_and_fetch_N(T *mem, T val, int order)
1189e5dd7070Spatrick     // T __atomic_fetch_and_N(T *mem, T val, int order)
1190e5dd7070Spatrick     case AtomicExpr::AO__atomic_and_fetch:
1191e5dd7070Spatrick       PostOp = llvm::Instruction::And;
1192*12c85518Srobert       [[fallthrough]];
1193e5dd7070Spatrick     case AtomicExpr::AO__c11_atomic_fetch_and:
1194e5dd7070Spatrick     case AtomicExpr::AO__opencl_atomic_fetch_and:
1195*12c85518Srobert     case AtomicExpr::AO__hip_atomic_fetch_and:
1196e5dd7070Spatrick     case AtomicExpr::AO__atomic_fetch_and:
1197e5dd7070Spatrick       LibCallName = "__atomic_fetch_and";
1198e5dd7070Spatrick       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1199a9ac8606Spatrick                         MemTy, E->getExprLoc(), TInfo.Width);
1200e5dd7070Spatrick       break;
1201e5dd7070Spatrick     // T __atomic_or_fetch_N(T *mem, T val, int order)
1202e5dd7070Spatrick     // T __atomic_fetch_or_N(T *mem, T val, int order)
1203e5dd7070Spatrick     case AtomicExpr::AO__atomic_or_fetch:
1204e5dd7070Spatrick       PostOp = llvm::Instruction::Or;
1205*12c85518Srobert       [[fallthrough]];
1206e5dd7070Spatrick     case AtomicExpr::AO__c11_atomic_fetch_or:
1207e5dd7070Spatrick     case AtomicExpr::AO__opencl_atomic_fetch_or:
1208*12c85518Srobert     case AtomicExpr::AO__hip_atomic_fetch_or:
1209e5dd7070Spatrick     case AtomicExpr::AO__atomic_fetch_or:
1210e5dd7070Spatrick       LibCallName = "__atomic_fetch_or";
1211e5dd7070Spatrick       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1212a9ac8606Spatrick                         MemTy, E->getExprLoc(), TInfo.Width);
1213e5dd7070Spatrick       break;
1214e5dd7070Spatrick     // T __atomic_sub_fetch_N(T *mem, T val, int order)
1215e5dd7070Spatrick     // T __atomic_fetch_sub_N(T *mem, T val, int order)
1216e5dd7070Spatrick     case AtomicExpr::AO__atomic_sub_fetch:
1217e5dd7070Spatrick       PostOp = llvm::Instruction::Sub;
1218*12c85518Srobert       [[fallthrough]];
1219e5dd7070Spatrick     case AtomicExpr::AO__c11_atomic_fetch_sub:
1220e5dd7070Spatrick     case AtomicExpr::AO__opencl_atomic_fetch_sub:
1221e5dd7070Spatrick     case AtomicExpr::AO__atomic_fetch_sub:
1222e5dd7070Spatrick       LibCallName = "__atomic_fetch_sub";
1223e5dd7070Spatrick       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1224a9ac8606Spatrick                         LoweredMemTy, E->getExprLoc(), TInfo.Width);
1225e5dd7070Spatrick       break;
1226e5dd7070Spatrick     // T __atomic_xor_fetch_N(T *mem, T val, int order)
1227e5dd7070Spatrick     // T __atomic_fetch_xor_N(T *mem, T val, int order)
1228e5dd7070Spatrick     case AtomicExpr::AO__atomic_xor_fetch:
1229e5dd7070Spatrick       PostOp = llvm::Instruction::Xor;
1230*12c85518Srobert       [[fallthrough]];
1231e5dd7070Spatrick     case AtomicExpr::AO__c11_atomic_fetch_xor:
1232e5dd7070Spatrick     case AtomicExpr::AO__opencl_atomic_fetch_xor:
1233*12c85518Srobert     case AtomicExpr::AO__hip_atomic_fetch_xor:
1234e5dd7070Spatrick     case AtomicExpr::AO__atomic_fetch_xor:
1235e5dd7070Spatrick       LibCallName = "__atomic_fetch_xor";
1236e5dd7070Spatrick       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1237a9ac8606Spatrick                         MemTy, E->getExprLoc(), TInfo.Width);
1238e5dd7070Spatrick       break;
1239e5dd7070Spatrick     case AtomicExpr::AO__atomic_min_fetch:
1240e5dd7070Spatrick       PostOpMinMax = true;
1241*12c85518Srobert       [[fallthrough]];
1242e5dd7070Spatrick     case AtomicExpr::AO__c11_atomic_fetch_min:
1243e5dd7070Spatrick     case AtomicExpr::AO__atomic_fetch_min:
1244*12c85518Srobert     case AtomicExpr::AO__hip_atomic_fetch_min:
1245e5dd7070Spatrick     case AtomicExpr::AO__opencl_atomic_fetch_min:
1246e5dd7070Spatrick       LibCallName = E->getValueType()->isSignedIntegerType()
1247e5dd7070Spatrick                         ? "__atomic_fetch_min"
1248e5dd7070Spatrick                         : "__atomic_fetch_umin";
1249e5dd7070Spatrick       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1250a9ac8606Spatrick                         LoweredMemTy, E->getExprLoc(), TInfo.Width);
1251e5dd7070Spatrick       break;
1252e5dd7070Spatrick     case AtomicExpr::AO__atomic_max_fetch:
1253e5dd7070Spatrick       PostOpMinMax = true;
1254*12c85518Srobert       [[fallthrough]];
1255e5dd7070Spatrick     case AtomicExpr::AO__c11_atomic_fetch_max:
1256e5dd7070Spatrick     case AtomicExpr::AO__atomic_fetch_max:
1257*12c85518Srobert     case AtomicExpr::AO__hip_atomic_fetch_max:
1258e5dd7070Spatrick     case AtomicExpr::AO__opencl_atomic_fetch_max:
1259e5dd7070Spatrick       LibCallName = E->getValueType()->isSignedIntegerType()
1260e5dd7070Spatrick                         ? "__atomic_fetch_max"
1261e5dd7070Spatrick                         : "__atomic_fetch_umax";
1262e5dd7070Spatrick       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1263a9ac8606Spatrick                         LoweredMemTy, E->getExprLoc(), TInfo.Width);
1264e5dd7070Spatrick       break;
1265e5dd7070Spatrick     // T __atomic_nand_fetch_N(T *mem, T val, int order)
1266e5dd7070Spatrick     // T __atomic_fetch_nand_N(T *mem, T val, int order)
1267e5dd7070Spatrick     case AtomicExpr::AO__atomic_nand_fetch:
1268e5dd7070Spatrick       PostOp = llvm::Instruction::And; // the NOT is special cased below
1269*12c85518Srobert       [[fallthrough]];
1270*12c85518Srobert     case AtomicExpr::AO__c11_atomic_fetch_nand:
1271e5dd7070Spatrick     case AtomicExpr::AO__atomic_fetch_nand:
1272e5dd7070Spatrick       LibCallName = "__atomic_fetch_nand";
1273e5dd7070Spatrick       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1274a9ac8606Spatrick                         MemTy, E->getExprLoc(), TInfo.Width);
1275e5dd7070Spatrick       break;
1276e5dd7070Spatrick     }
1277e5dd7070Spatrick 
1278e5dd7070Spatrick     if (E->isOpenCL()) {
1279e5dd7070Spatrick       LibCallName = std::string("__opencl") +
1280e5dd7070Spatrick           StringRef(LibCallName).drop_front(1).str();
1281e5dd7070Spatrick 
1282e5dd7070Spatrick     }
1283e5dd7070Spatrick     // Optimized functions have the size in their name.
1284e5dd7070Spatrick     if (UseOptimizedLibcall)
1285e5dd7070Spatrick       LibCallName += "_" + llvm::utostr(Size);
1286e5dd7070Spatrick     // By default, assume we return a value of the atomic type.
1287e5dd7070Spatrick     if (!HaveRetTy) {
1288e5dd7070Spatrick       if (UseOptimizedLibcall) {
1289e5dd7070Spatrick         // Value is returned directly.
1290e5dd7070Spatrick         // The function returns an appropriately sized integer type.
1291e5dd7070Spatrick         RetTy = getContext().getIntTypeForBitwidth(
1292a9ac8606Spatrick             getContext().toBits(TInfo.Width), /*Signed=*/false);
1293e5dd7070Spatrick       } else {
1294e5dd7070Spatrick         // Value is returned through parameter before the order.
1295e5dd7070Spatrick         RetTy = getContext().VoidTy;
1296e5dd7070Spatrick         Args.add(RValue::get(EmitCastToVoidPtr(Dest.getPointer())),
1297e5dd7070Spatrick                  getContext().VoidPtrTy);
1298e5dd7070Spatrick       }
1299e5dd7070Spatrick     }
1300e5dd7070Spatrick     // order is always the last parameter
1301e5dd7070Spatrick     Args.add(RValue::get(Order),
1302e5dd7070Spatrick              getContext().IntTy);
1303e5dd7070Spatrick     if (E->isOpenCL())
1304e5dd7070Spatrick       Args.add(RValue::get(Scope), getContext().IntTy);
1305e5dd7070Spatrick 
1306e5dd7070Spatrick     // PostOp is only needed for the atomic_*_fetch operations, and
1307e5dd7070Spatrick     // thus is only needed for and implemented in the
1308e5dd7070Spatrick     // UseOptimizedLibcall codepath.
1309e5dd7070Spatrick     assert(UseOptimizedLibcall || (!PostOp && !PostOpMinMax));
1310e5dd7070Spatrick 
1311e5dd7070Spatrick     RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args);
1312e5dd7070Spatrick     // The value is returned directly from the libcall.
1313e5dd7070Spatrick     if (E->isCmpXChg())
1314e5dd7070Spatrick       return Res;
1315e5dd7070Spatrick 
1316e5dd7070Spatrick     // The value is returned directly for optimized libcalls but the expr
1317e5dd7070Spatrick     // provided an out-param.
1318e5dd7070Spatrick     if (UseOptimizedLibcall && Res.getScalarVal()) {
1319e5dd7070Spatrick       llvm::Value *ResVal = Res.getScalarVal();
1320e5dd7070Spatrick       if (PostOpMinMax) {
1321e5dd7070Spatrick         llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal();
1322e5dd7070Spatrick         ResVal = EmitPostAtomicMinMax(Builder, E->getOp(),
1323e5dd7070Spatrick                                       E->getValueType()->isSignedIntegerType(),
1324e5dd7070Spatrick                                       ResVal, LoadVal1);
1325e5dd7070Spatrick       } else if (PostOp) {
1326e5dd7070Spatrick         llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal();
1327e5dd7070Spatrick         ResVal = Builder.CreateBinOp(PostOp, ResVal, LoadVal1);
1328e5dd7070Spatrick       }
1329e5dd7070Spatrick       if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
1330e5dd7070Spatrick         ResVal = Builder.CreateNot(ResVal);
1331e5dd7070Spatrick 
1332e5dd7070Spatrick       Builder.CreateStore(
1333*12c85518Srobert           ResVal, Builder.CreateElementBitCast(Dest, ResVal->getType()));
1334e5dd7070Spatrick     }
1335e5dd7070Spatrick 
1336e5dd7070Spatrick     if (RValTy->isVoidType())
1337e5dd7070Spatrick       return RValue::get(nullptr);
1338e5dd7070Spatrick 
1339e5dd7070Spatrick     return convertTempToRValue(
1340*12c85518Srobert         Builder.CreateElementBitCast(Dest, ConvertTypeForMem(RValTy)),
1341e5dd7070Spatrick         RValTy, E->getExprLoc());
1342e5dd7070Spatrick   }
1343e5dd7070Spatrick 
1344e5dd7070Spatrick   bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
1345e5dd7070Spatrick                  E->getOp() == AtomicExpr::AO__opencl_atomic_store ||
1346*12c85518Srobert                  E->getOp() == AtomicExpr::AO__hip_atomic_store ||
1347e5dd7070Spatrick                  E->getOp() == AtomicExpr::AO__atomic_store ||
1348e5dd7070Spatrick                  E->getOp() == AtomicExpr::AO__atomic_store_n;
1349e5dd7070Spatrick   bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
1350e5dd7070Spatrick                 E->getOp() == AtomicExpr::AO__opencl_atomic_load ||
1351*12c85518Srobert                 E->getOp() == AtomicExpr::AO__hip_atomic_load ||
1352e5dd7070Spatrick                 E->getOp() == AtomicExpr::AO__atomic_load ||
1353e5dd7070Spatrick                 E->getOp() == AtomicExpr::AO__atomic_load_n;
1354e5dd7070Spatrick 
1355e5dd7070Spatrick   if (isa<llvm::ConstantInt>(Order)) {
1356e5dd7070Spatrick     auto ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
1357e5dd7070Spatrick     // We should not ever get to a case where the ordering isn't a valid C ABI
1358e5dd7070Spatrick     // value, but it's hard to enforce that in general.
1359e5dd7070Spatrick     if (llvm::isValidAtomicOrderingCABI(ord))
1360e5dd7070Spatrick       switch ((llvm::AtomicOrderingCABI)ord) {
1361e5dd7070Spatrick       case llvm::AtomicOrderingCABI::relaxed:
1362e5dd7070Spatrick         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1363e5dd7070Spatrick                      llvm::AtomicOrdering::Monotonic, Scope);
1364e5dd7070Spatrick         break;
1365e5dd7070Spatrick       case llvm::AtomicOrderingCABI::consume:
1366e5dd7070Spatrick       case llvm::AtomicOrderingCABI::acquire:
1367e5dd7070Spatrick         if (IsStore)
1368e5dd7070Spatrick           break; // Avoid crashing on code with undefined behavior
1369e5dd7070Spatrick         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1370e5dd7070Spatrick                      llvm::AtomicOrdering::Acquire, Scope);
1371e5dd7070Spatrick         break;
1372e5dd7070Spatrick       case llvm::AtomicOrderingCABI::release:
1373e5dd7070Spatrick         if (IsLoad)
1374e5dd7070Spatrick           break; // Avoid crashing on code with undefined behavior
1375e5dd7070Spatrick         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1376e5dd7070Spatrick                      llvm::AtomicOrdering::Release, Scope);
1377e5dd7070Spatrick         break;
1378e5dd7070Spatrick       case llvm::AtomicOrderingCABI::acq_rel:
1379e5dd7070Spatrick         if (IsLoad || IsStore)
1380e5dd7070Spatrick           break; // Avoid crashing on code with undefined behavior
1381e5dd7070Spatrick         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1382e5dd7070Spatrick                      llvm::AtomicOrdering::AcquireRelease, Scope);
1383e5dd7070Spatrick         break;
1384e5dd7070Spatrick       case llvm::AtomicOrderingCABI::seq_cst:
1385e5dd7070Spatrick         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1386e5dd7070Spatrick                      llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1387e5dd7070Spatrick         break;
1388e5dd7070Spatrick       }
1389e5dd7070Spatrick     if (RValTy->isVoidType())
1390e5dd7070Spatrick       return RValue::get(nullptr);
1391e5dd7070Spatrick 
1392e5dd7070Spatrick     return convertTempToRValue(
1393*12c85518Srobert         Builder.CreateElementBitCast(Dest, ConvertTypeForMem(RValTy)),
1394e5dd7070Spatrick         RValTy, E->getExprLoc());
1395e5dd7070Spatrick   }
1396e5dd7070Spatrick 
1397e5dd7070Spatrick   // Long case, when Order isn't obviously constant.
1398e5dd7070Spatrick 
1399e5dd7070Spatrick   // Create all the relevant BB's
1400e5dd7070Spatrick   llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
1401e5dd7070Spatrick                    *ReleaseBB = nullptr, *AcqRelBB = nullptr,
1402e5dd7070Spatrick                    *SeqCstBB = nullptr;
1403e5dd7070Spatrick   MonotonicBB = createBasicBlock("monotonic", CurFn);
1404e5dd7070Spatrick   if (!IsStore)
1405e5dd7070Spatrick     AcquireBB = createBasicBlock("acquire", CurFn);
1406e5dd7070Spatrick   if (!IsLoad)
1407e5dd7070Spatrick     ReleaseBB = createBasicBlock("release", CurFn);
1408e5dd7070Spatrick   if (!IsLoad && !IsStore)
1409e5dd7070Spatrick     AcqRelBB = createBasicBlock("acqrel", CurFn);
1410e5dd7070Spatrick   SeqCstBB = createBasicBlock("seqcst", CurFn);
1411e5dd7070Spatrick   llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
1412e5dd7070Spatrick 
1413e5dd7070Spatrick   // Create the switch for the split
1414e5dd7070Spatrick   // MonotonicBB is arbitrarily chosen as the default case; in practice, this
1415e5dd7070Spatrick   // doesn't matter unless someone is crazy enough to use something that
1416e5dd7070Spatrick   // doesn't fold to a constant for the ordering.
1417e5dd7070Spatrick   Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
1418e5dd7070Spatrick   llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
1419e5dd7070Spatrick 
1420e5dd7070Spatrick   // Emit all the different atomics
1421e5dd7070Spatrick   Builder.SetInsertPoint(MonotonicBB);
1422e5dd7070Spatrick   EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1423e5dd7070Spatrick                llvm::AtomicOrdering::Monotonic, Scope);
1424e5dd7070Spatrick   Builder.CreateBr(ContBB);
1425e5dd7070Spatrick   if (!IsStore) {
1426e5dd7070Spatrick     Builder.SetInsertPoint(AcquireBB);
1427e5dd7070Spatrick     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1428e5dd7070Spatrick                  llvm::AtomicOrdering::Acquire, Scope);
1429e5dd7070Spatrick     Builder.CreateBr(ContBB);
1430e5dd7070Spatrick     SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
1431e5dd7070Spatrick                 AcquireBB);
1432e5dd7070Spatrick     SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
1433e5dd7070Spatrick                 AcquireBB);
1434e5dd7070Spatrick   }
1435e5dd7070Spatrick   if (!IsLoad) {
1436e5dd7070Spatrick     Builder.SetInsertPoint(ReleaseBB);
1437e5dd7070Spatrick     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1438e5dd7070Spatrick                  llvm::AtomicOrdering::Release, Scope);
1439e5dd7070Spatrick     Builder.CreateBr(ContBB);
1440e5dd7070Spatrick     SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::release),
1441e5dd7070Spatrick                 ReleaseBB);
1442e5dd7070Spatrick   }
1443e5dd7070Spatrick   if (!IsLoad && !IsStore) {
1444e5dd7070Spatrick     Builder.SetInsertPoint(AcqRelBB);
1445e5dd7070Spatrick     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1446e5dd7070Spatrick                  llvm::AtomicOrdering::AcquireRelease, Scope);
1447e5dd7070Spatrick     Builder.CreateBr(ContBB);
1448e5dd7070Spatrick     SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acq_rel),
1449e5dd7070Spatrick                 AcqRelBB);
1450e5dd7070Spatrick   }
1451e5dd7070Spatrick   Builder.SetInsertPoint(SeqCstBB);
1452e5dd7070Spatrick   EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1453e5dd7070Spatrick                llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1454e5dd7070Spatrick   Builder.CreateBr(ContBB);
1455e5dd7070Spatrick   SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
1456e5dd7070Spatrick               SeqCstBB);
1457e5dd7070Spatrick 
1458e5dd7070Spatrick   // Cleanup and return
1459e5dd7070Spatrick   Builder.SetInsertPoint(ContBB);
1460e5dd7070Spatrick   if (RValTy->isVoidType())
1461e5dd7070Spatrick     return RValue::get(nullptr);
1462e5dd7070Spatrick 
1463e5dd7070Spatrick   assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());
1464e5dd7070Spatrick   return convertTempToRValue(
1465*12c85518Srobert       Builder.CreateElementBitCast(Dest, ConvertTypeForMem(RValTy)),
1466e5dd7070Spatrick       RValTy, E->getExprLoc());
1467e5dd7070Spatrick }
1468e5dd7070Spatrick 
emitCastToAtomicIntPointer(Address addr) const1469e5dd7070Spatrick Address AtomicInfo::emitCastToAtomicIntPointer(Address addr) const {
1470e5dd7070Spatrick   llvm::IntegerType *ty =
1471e5dd7070Spatrick     llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
1472*12c85518Srobert   return CGF.Builder.CreateElementBitCast(addr, ty);
1473e5dd7070Spatrick }
1474e5dd7070Spatrick 
convertToAtomicIntPointer(Address Addr) const1475e5dd7070Spatrick Address AtomicInfo::convertToAtomicIntPointer(Address Addr) const {
1476e5dd7070Spatrick   llvm::Type *Ty = Addr.getElementType();
1477e5dd7070Spatrick   uint64_t SourceSizeInBits = CGF.CGM.getDataLayout().getTypeSizeInBits(Ty);
1478e5dd7070Spatrick   if (SourceSizeInBits != AtomicSizeInBits) {
1479e5dd7070Spatrick     Address Tmp = CreateTempAlloca();
1480e5dd7070Spatrick     CGF.Builder.CreateMemCpy(Tmp, Addr,
1481e5dd7070Spatrick                              std::min(AtomicSizeInBits, SourceSizeInBits) / 8);
1482e5dd7070Spatrick     Addr = Tmp;
1483e5dd7070Spatrick   }
1484e5dd7070Spatrick 
1485e5dd7070Spatrick   return emitCastToAtomicIntPointer(Addr);
1486e5dd7070Spatrick }
1487e5dd7070Spatrick 
convertAtomicTempToRValue(Address addr,AggValueSlot resultSlot,SourceLocation loc,bool asValue) const1488e5dd7070Spatrick RValue AtomicInfo::convertAtomicTempToRValue(Address addr,
1489e5dd7070Spatrick                                              AggValueSlot resultSlot,
1490e5dd7070Spatrick                                              SourceLocation loc,
1491e5dd7070Spatrick                                              bool asValue) const {
1492e5dd7070Spatrick   if (LVal.isSimple()) {
1493e5dd7070Spatrick     if (EvaluationKind == TEK_Aggregate)
1494e5dd7070Spatrick       return resultSlot.asRValue();
1495e5dd7070Spatrick 
1496e5dd7070Spatrick     // Drill into the padding structure if we have one.
1497e5dd7070Spatrick     if (hasPadding())
1498e5dd7070Spatrick       addr = CGF.Builder.CreateStructGEP(addr, 0);
1499e5dd7070Spatrick 
1500e5dd7070Spatrick     // Otherwise, just convert the temporary to an r-value using the
1501e5dd7070Spatrick     // normal conversion routine.
1502e5dd7070Spatrick     return CGF.convertTempToRValue(addr, getValueType(), loc);
1503e5dd7070Spatrick   }
1504e5dd7070Spatrick   if (!asValue)
1505e5dd7070Spatrick     // Get RValue from temp memory as atomic for non-simple lvalues
1506e5dd7070Spatrick     return RValue::get(CGF.Builder.CreateLoad(addr));
1507e5dd7070Spatrick   if (LVal.isBitField())
1508e5dd7070Spatrick     return CGF.EmitLoadOfBitfieldLValue(
1509e5dd7070Spatrick         LValue::MakeBitfield(addr, LVal.getBitFieldInfo(), LVal.getType(),
1510e5dd7070Spatrick                              LVal.getBaseInfo(), TBAAAccessInfo()), loc);
1511e5dd7070Spatrick   if (LVal.isVectorElt())
1512e5dd7070Spatrick     return CGF.EmitLoadOfLValue(
1513e5dd7070Spatrick         LValue::MakeVectorElt(addr, LVal.getVectorIdx(), LVal.getType(),
1514e5dd7070Spatrick                               LVal.getBaseInfo(), TBAAAccessInfo()), loc);
1515e5dd7070Spatrick   assert(LVal.isExtVectorElt());
1516e5dd7070Spatrick   return CGF.EmitLoadOfExtVectorElementLValue(LValue::MakeExtVectorElt(
1517e5dd7070Spatrick       addr, LVal.getExtVectorElts(), LVal.getType(),
1518e5dd7070Spatrick       LVal.getBaseInfo(), TBAAAccessInfo()));
1519e5dd7070Spatrick }
1520e5dd7070Spatrick 
ConvertIntToValueOrAtomic(llvm::Value * IntVal,AggValueSlot ResultSlot,SourceLocation Loc,bool AsValue) const1521e5dd7070Spatrick RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal,
1522e5dd7070Spatrick                                              AggValueSlot ResultSlot,
1523e5dd7070Spatrick                                              SourceLocation Loc,
1524e5dd7070Spatrick                                              bool AsValue) const {
1525e5dd7070Spatrick   // Try not to in some easy cases.
1526e5dd7070Spatrick   assert(IntVal->getType()->isIntegerTy() && "Expected integer value");
1527e5dd7070Spatrick   if (getEvaluationKind() == TEK_Scalar &&
1528e5dd7070Spatrick       (((!LVal.isBitField() ||
1529e5dd7070Spatrick          LVal.getBitFieldInfo().Size == ValueSizeInBits) &&
1530e5dd7070Spatrick         !hasPadding()) ||
1531e5dd7070Spatrick        !AsValue)) {
1532e5dd7070Spatrick     auto *ValTy = AsValue
1533e5dd7070Spatrick                       ? CGF.ConvertTypeForMem(ValueTy)
1534*12c85518Srobert                       : getAtomicAddress().getElementType();
1535e5dd7070Spatrick     if (ValTy->isIntegerTy()) {
1536e5dd7070Spatrick       assert(IntVal->getType() == ValTy && "Different integer types.");
1537e5dd7070Spatrick       return RValue::get(CGF.EmitFromMemory(IntVal, ValueTy));
1538e5dd7070Spatrick     } else if (ValTy->isPointerTy())
1539e5dd7070Spatrick       return RValue::get(CGF.Builder.CreateIntToPtr(IntVal, ValTy));
1540e5dd7070Spatrick     else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy))
1541e5dd7070Spatrick       return RValue::get(CGF.Builder.CreateBitCast(IntVal, ValTy));
1542e5dd7070Spatrick   }
1543e5dd7070Spatrick 
1544e5dd7070Spatrick   // Create a temporary.  This needs to be big enough to hold the
1545e5dd7070Spatrick   // atomic integer.
1546e5dd7070Spatrick   Address Temp = Address::invalid();
1547e5dd7070Spatrick   bool TempIsVolatile = false;
1548e5dd7070Spatrick   if (AsValue && getEvaluationKind() == TEK_Aggregate) {
1549e5dd7070Spatrick     assert(!ResultSlot.isIgnored());
1550e5dd7070Spatrick     Temp = ResultSlot.getAddress();
1551e5dd7070Spatrick     TempIsVolatile = ResultSlot.isVolatile();
1552e5dd7070Spatrick   } else {
1553e5dd7070Spatrick     Temp = CreateTempAlloca();
1554e5dd7070Spatrick   }
1555e5dd7070Spatrick 
1556e5dd7070Spatrick   // Slam the integer into the temporary.
1557e5dd7070Spatrick   Address CastTemp = emitCastToAtomicIntPointer(Temp);
1558e5dd7070Spatrick   CGF.Builder.CreateStore(IntVal, CastTemp)
1559e5dd7070Spatrick       ->setVolatile(TempIsVolatile);
1560e5dd7070Spatrick 
1561e5dd7070Spatrick   return convertAtomicTempToRValue(Temp, ResultSlot, Loc, AsValue);
1562e5dd7070Spatrick }
1563e5dd7070Spatrick 
EmitAtomicLoadLibcall(llvm::Value * AddForLoaded,llvm::AtomicOrdering AO,bool)1564e5dd7070Spatrick void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
1565e5dd7070Spatrick                                        llvm::AtomicOrdering AO, bool) {
1566e5dd7070Spatrick   // void __atomic_load(size_t size, void *mem, void *return, int order);
1567e5dd7070Spatrick   CallArgList Args;
1568e5dd7070Spatrick   Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1569e5dd7070Spatrick   Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
1570e5dd7070Spatrick            CGF.getContext().VoidPtrTy);
1571e5dd7070Spatrick   Args.add(RValue::get(CGF.EmitCastToVoidPtr(AddForLoaded)),
1572e5dd7070Spatrick            CGF.getContext().VoidPtrTy);
1573e5dd7070Spatrick   Args.add(
1574e5dd7070Spatrick       RValue::get(llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(AO))),
1575e5dd7070Spatrick       CGF.getContext().IntTy);
1576e5dd7070Spatrick   emitAtomicLibcall(CGF, "__atomic_load", CGF.getContext().VoidTy, Args);
1577e5dd7070Spatrick }
1578e5dd7070Spatrick 
EmitAtomicLoadOp(llvm::AtomicOrdering AO,bool IsVolatile)1579e5dd7070Spatrick llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
1580e5dd7070Spatrick                                           bool IsVolatile) {
1581e5dd7070Spatrick   // Okay, we're doing this natively.
1582e5dd7070Spatrick   Address Addr = getAtomicAddressAsAtomicIntPointer();
1583e5dd7070Spatrick   llvm::LoadInst *Load = CGF.Builder.CreateLoad(Addr, "atomic-load");
1584e5dd7070Spatrick   Load->setAtomic(AO);
1585e5dd7070Spatrick 
1586e5dd7070Spatrick   // Other decoration.
1587e5dd7070Spatrick   if (IsVolatile)
1588e5dd7070Spatrick     Load->setVolatile(true);
1589e5dd7070Spatrick   CGF.CGM.DecorateInstructionWithTBAA(Load, LVal.getTBAAInfo());
1590e5dd7070Spatrick   return Load;
1591e5dd7070Spatrick }
1592e5dd7070Spatrick 
1593e5dd7070Spatrick /// An LValue is a candidate for having its loads and stores be made atomic if
1594e5dd7070Spatrick /// we are operating under /volatile:ms *and* the LValue itself is volatile and
1595e5dd7070Spatrick /// performing such an operation can be performed without a libcall.
LValueIsSuitableForInlineAtomic(LValue LV)1596e5dd7070Spatrick bool CodeGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) {
1597*12c85518Srobert   if (!CGM.getLangOpts().MSVolatile) return false;
1598e5dd7070Spatrick   AtomicInfo AI(*this, LV);
1599e5dd7070Spatrick   bool IsVolatile = LV.isVolatile() || hasVolatileMember(LV.getType());
1600e5dd7070Spatrick   // An atomic is inline if we don't need to use a libcall.
1601e5dd7070Spatrick   bool AtomicIsInline = !AI.shouldUseLibcall();
1602e5dd7070Spatrick   // MSVC doesn't seem to do this for types wider than a pointer.
1603e5dd7070Spatrick   if (getContext().getTypeSize(LV.getType()) >
1604e5dd7070Spatrick       getContext().getTypeSize(getContext().getIntPtrType()))
1605e5dd7070Spatrick     return false;
1606e5dd7070Spatrick   return IsVolatile && AtomicIsInline;
1607e5dd7070Spatrick }
1608e5dd7070Spatrick 
EmitAtomicLoad(LValue LV,SourceLocation SL,AggValueSlot Slot)1609e5dd7070Spatrick RValue CodeGenFunction::EmitAtomicLoad(LValue LV, SourceLocation SL,
1610e5dd7070Spatrick                                        AggValueSlot Slot) {
1611e5dd7070Spatrick   llvm::AtomicOrdering AO;
1612e5dd7070Spatrick   bool IsVolatile = LV.isVolatileQualified();
1613e5dd7070Spatrick   if (LV.getType()->isAtomicType()) {
1614e5dd7070Spatrick     AO = llvm::AtomicOrdering::SequentiallyConsistent;
1615e5dd7070Spatrick   } else {
1616e5dd7070Spatrick     AO = llvm::AtomicOrdering::Acquire;
1617e5dd7070Spatrick     IsVolatile = true;
1618e5dd7070Spatrick   }
1619e5dd7070Spatrick   return EmitAtomicLoad(LV, SL, AO, IsVolatile, Slot);
1620e5dd7070Spatrick }
1621e5dd7070Spatrick 
EmitAtomicLoad(AggValueSlot ResultSlot,SourceLocation Loc,bool AsValue,llvm::AtomicOrdering AO,bool IsVolatile)1622e5dd7070Spatrick RValue AtomicInfo::EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
1623e5dd7070Spatrick                                   bool AsValue, llvm::AtomicOrdering AO,
1624e5dd7070Spatrick                                   bool IsVolatile) {
1625e5dd7070Spatrick   // Check whether we should use a library call.
1626e5dd7070Spatrick   if (shouldUseLibcall()) {
1627e5dd7070Spatrick     Address TempAddr = Address::invalid();
1628e5dd7070Spatrick     if (LVal.isSimple() && !ResultSlot.isIgnored()) {
1629e5dd7070Spatrick       assert(getEvaluationKind() == TEK_Aggregate);
1630e5dd7070Spatrick       TempAddr = ResultSlot.getAddress();
1631e5dd7070Spatrick     } else
1632e5dd7070Spatrick       TempAddr = CreateTempAlloca();
1633e5dd7070Spatrick 
1634e5dd7070Spatrick     EmitAtomicLoadLibcall(TempAddr.getPointer(), AO, IsVolatile);
1635e5dd7070Spatrick 
1636e5dd7070Spatrick     // Okay, turn that back into the original value or whole atomic (for
1637e5dd7070Spatrick     // non-simple lvalues) type.
1638e5dd7070Spatrick     return convertAtomicTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
1639e5dd7070Spatrick   }
1640e5dd7070Spatrick 
1641e5dd7070Spatrick   // Okay, we're doing this natively.
1642e5dd7070Spatrick   auto *Load = EmitAtomicLoadOp(AO, IsVolatile);
1643e5dd7070Spatrick 
1644e5dd7070Spatrick   // If we're ignoring an aggregate return, don't do anything.
1645e5dd7070Spatrick   if (getEvaluationKind() == TEK_Aggregate && ResultSlot.isIgnored())
1646e5dd7070Spatrick     return RValue::getAggregate(Address::invalid(), false);
1647e5dd7070Spatrick 
1648e5dd7070Spatrick   // Okay, turn that back into the original value or atomic (for non-simple
1649e5dd7070Spatrick   // lvalues) type.
1650e5dd7070Spatrick   return ConvertIntToValueOrAtomic(Load, ResultSlot, Loc, AsValue);
1651e5dd7070Spatrick }
1652e5dd7070Spatrick 
1653e5dd7070Spatrick /// Emit a load from an l-value of atomic type.  Note that the r-value
1654e5dd7070Spatrick /// we produce is an r-value of the atomic *value* type.
EmitAtomicLoad(LValue src,SourceLocation loc,llvm::AtomicOrdering AO,bool IsVolatile,AggValueSlot resultSlot)1655e5dd7070Spatrick RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
1656e5dd7070Spatrick                                        llvm::AtomicOrdering AO, bool IsVolatile,
1657e5dd7070Spatrick                                        AggValueSlot resultSlot) {
1658e5dd7070Spatrick   AtomicInfo Atomics(*this, src);
1659e5dd7070Spatrick   return Atomics.EmitAtomicLoad(resultSlot, loc, /*AsValue=*/true, AO,
1660e5dd7070Spatrick                                 IsVolatile);
1661e5dd7070Spatrick }
1662e5dd7070Spatrick 
1663e5dd7070Spatrick /// Copy an r-value into memory as part of storing to an atomic type.
1664e5dd7070Spatrick /// This needs to create a bit-pattern suitable for atomic operations.
emitCopyIntoMemory(RValue rvalue) const1665e5dd7070Spatrick void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
1666e5dd7070Spatrick   assert(LVal.isSimple());
1667e5dd7070Spatrick   // If we have an r-value, the rvalue should be of the atomic type,
1668e5dd7070Spatrick   // which means that the caller is responsible for having zeroed
1669e5dd7070Spatrick   // any padding.  Just do an aggregate copy of that type.
1670e5dd7070Spatrick   if (rvalue.isAggregate()) {
1671e5dd7070Spatrick     LValue Dest = CGF.MakeAddrLValue(getAtomicAddress(), getAtomicType());
1672e5dd7070Spatrick     LValue Src = CGF.MakeAddrLValue(rvalue.getAggregateAddress(),
1673e5dd7070Spatrick                                     getAtomicType());
1674e5dd7070Spatrick     bool IsVolatile = rvalue.isVolatileQualified() ||
1675e5dd7070Spatrick                       LVal.isVolatileQualified();
1676e5dd7070Spatrick     CGF.EmitAggregateCopy(Dest, Src, getAtomicType(),
1677e5dd7070Spatrick                           AggValueSlot::DoesNotOverlap, IsVolatile);
1678e5dd7070Spatrick     return;
1679e5dd7070Spatrick   }
1680e5dd7070Spatrick 
1681e5dd7070Spatrick   // Okay, otherwise we're copying stuff.
1682e5dd7070Spatrick 
1683e5dd7070Spatrick   // Zero out the buffer if necessary.
1684e5dd7070Spatrick   emitMemSetZeroIfNecessary();
1685e5dd7070Spatrick 
1686e5dd7070Spatrick   // Drill past the padding if present.
1687e5dd7070Spatrick   LValue TempLVal = projectValue();
1688e5dd7070Spatrick 
1689e5dd7070Spatrick   // Okay, store the rvalue in.
1690e5dd7070Spatrick   if (rvalue.isScalar()) {
1691e5dd7070Spatrick     CGF.EmitStoreOfScalar(rvalue.getScalarVal(), TempLVal, /*init*/ true);
1692e5dd7070Spatrick   } else {
1693e5dd7070Spatrick     CGF.EmitStoreOfComplex(rvalue.getComplexVal(), TempLVal, /*init*/ true);
1694e5dd7070Spatrick   }
1695e5dd7070Spatrick }
1696e5dd7070Spatrick 
1697e5dd7070Spatrick 
1698e5dd7070Spatrick /// Materialize an r-value into memory for the purposes of storing it
1699e5dd7070Spatrick /// to an atomic type.
materializeRValue(RValue rvalue) const1700e5dd7070Spatrick Address AtomicInfo::materializeRValue(RValue rvalue) const {
1701e5dd7070Spatrick   // Aggregate r-values are already in memory, and EmitAtomicStore
1702e5dd7070Spatrick   // requires them to be values of the atomic type.
1703e5dd7070Spatrick   if (rvalue.isAggregate())
1704e5dd7070Spatrick     return rvalue.getAggregateAddress();
1705e5dd7070Spatrick 
1706e5dd7070Spatrick   // Otherwise, make a temporary and materialize into it.
1707e5dd7070Spatrick   LValue TempLV = CGF.MakeAddrLValue(CreateTempAlloca(), getAtomicType());
1708e5dd7070Spatrick   AtomicInfo Atomics(CGF, TempLV);
1709e5dd7070Spatrick   Atomics.emitCopyIntoMemory(rvalue);
1710e5dd7070Spatrick   return TempLV.getAddress(CGF);
1711e5dd7070Spatrick }
1712e5dd7070Spatrick 
convertRValueToInt(RValue RVal) const1713e5dd7070Spatrick llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const {
1714e5dd7070Spatrick   // If we've got a scalar value of the right size, try to avoid going
1715e5dd7070Spatrick   // through memory.
1716e5dd7070Spatrick   if (RVal.isScalar() && (!hasPadding() || !LVal.isSimple())) {
1717e5dd7070Spatrick     llvm::Value *Value = RVal.getScalarVal();
1718e5dd7070Spatrick     if (isa<llvm::IntegerType>(Value->getType()))
1719e5dd7070Spatrick       return CGF.EmitToMemory(Value, ValueTy);
1720e5dd7070Spatrick     else {
1721e5dd7070Spatrick       llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
1722e5dd7070Spatrick           CGF.getLLVMContext(),
1723e5dd7070Spatrick           LVal.isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());
1724e5dd7070Spatrick       if (isa<llvm::PointerType>(Value->getType()))
1725e5dd7070Spatrick         return CGF.Builder.CreatePtrToInt(Value, InputIntTy);
1726e5dd7070Spatrick       else if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
1727e5dd7070Spatrick         return CGF.Builder.CreateBitCast(Value, InputIntTy);
1728e5dd7070Spatrick     }
1729e5dd7070Spatrick   }
1730e5dd7070Spatrick   // Otherwise, we need to go through memory.
1731e5dd7070Spatrick   // Put the r-value in memory.
1732e5dd7070Spatrick   Address Addr = materializeRValue(RVal);
1733e5dd7070Spatrick 
1734e5dd7070Spatrick   // Cast the temporary to the atomic int type and pull a value out.
1735e5dd7070Spatrick   Addr = emitCastToAtomicIntPointer(Addr);
1736e5dd7070Spatrick   return CGF.Builder.CreateLoad(Addr);
1737e5dd7070Spatrick }
1738e5dd7070Spatrick 
EmitAtomicCompareExchangeOp(llvm::Value * ExpectedVal,llvm::Value * DesiredVal,llvm::AtomicOrdering Success,llvm::AtomicOrdering Failure,bool IsWeak)1739e5dd7070Spatrick std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
1740e5dd7070Spatrick     llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
1741e5dd7070Spatrick     llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak) {
1742e5dd7070Spatrick   // Do the atomic store.
1743e5dd7070Spatrick   Address Addr = getAtomicAddressAsAtomicIntPointer();
1744e5dd7070Spatrick   auto *Inst = CGF.Builder.CreateAtomicCmpXchg(Addr.getPointer(),
1745e5dd7070Spatrick                                                ExpectedVal, DesiredVal,
1746e5dd7070Spatrick                                                Success, Failure);
1747e5dd7070Spatrick   // Other decoration.
1748e5dd7070Spatrick   Inst->setVolatile(LVal.isVolatileQualified());
1749e5dd7070Spatrick   Inst->setWeak(IsWeak);
1750e5dd7070Spatrick 
1751e5dd7070Spatrick   // Okay, turn that back into the original value type.
1752e5dd7070Spatrick   auto *PreviousVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/0);
1753e5dd7070Spatrick   auto *SuccessFailureVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/1);
1754e5dd7070Spatrick   return std::make_pair(PreviousVal, SuccessFailureVal);
1755e5dd7070Spatrick }
1756e5dd7070Spatrick 
1757e5dd7070Spatrick llvm::Value *
EmitAtomicCompareExchangeLibcall(llvm::Value * ExpectedAddr,llvm::Value * DesiredAddr,llvm::AtomicOrdering Success,llvm::AtomicOrdering Failure)1758e5dd7070Spatrick AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,
1759e5dd7070Spatrick                                              llvm::Value *DesiredAddr,
1760e5dd7070Spatrick                                              llvm::AtomicOrdering Success,
1761e5dd7070Spatrick                                              llvm::AtomicOrdering Failure) {
1762e5dd7070Spatrick   // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
1763e5dd7070Spatrick   // void *desired, int success, int failure);
1764e5dd7070Spatrick   CallArgList Args;
1765e5dd7070Spatrick   Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1766e5dd7070Spatrick   Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
1767e5dd7070Spatrick            CGF.getContext().VoidPtrTy);
1768e5dd7070Spatrick   Args.add(RValue::get(CGF.EmitCastToVoidPtr(ExpectedAddr)),
1769e5dd7070Spatrick            CGF.getContext().VoidPtrTy);
1770e5dd7070Spatrick   Args.add(RValue::get(CGF.EmitCastToVoidPtr(DesiredAddr)),
1771e5dd7070Spatrick            CGF.getContext().VoidPtrTy);
1772e5dd7070Spatrick   Args.add(RValue::get(
1773e5dd7070Spatrick                llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Success))),
1774e5dd7070Spatrick            CGF.getContext().IntTy);
1775e5dd7070Spatrick   Args.add(RValue::get(
1776e5dd7070Spatrick                llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Failure))),
1777e5dd7070Spatrick            CGF.getContext().IntTy);
1778e5dd7070Spatrick   auto SuccessFailureRVal = emitAtomicLibcall(CGF, "__atomic_compare_exchange",
1779e5dd7070Spatrick                                               CGF.getContext().BoolTy, Args);
1780e5dd7070Spatrick 
1781e5dd7070Spatrick   return SuccessFailureRVal.getScalarVal();
1782e5dd7070Spatrick }
1783e5dd7070Spatrick 
EmitAtomicCompareExchange(RValue Expected,RValue Desired,llvm::AtomicOrdering Success,llvm::AtomicOrdering Failure,bool IsWeak)1784e5dd7070Spatrick std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
1785e5dd7070Spatrick     RValue Expected, RValue Desired, llvm::AtomicOrdering Success,
1786e5dd7070Spatrick     llvm::AtomicOrdering Failure, bool IsWeak) {
1787e5dd7070Spatrick   // Check whether we should use a library call.
1788e5dd7070Spatrick   if (shouldUseLibcall()) {
1789e5dd7070Spatrick     // Produce a source address.
1790e5dd7070Spatrick     Address ExpectedAddr = materializeRValue(Expected);
1791e5dd7070Spatrick     Address DesiredAddr = materializeRValue(Desired);
1792e5dd7070Spatrick     auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1793e5dd7070Spatrick                                                  DesiredAddr.getPointer(),
1794e5dd7070Spatrick                                                  Success, Failure);
1795e5dd7070Spatrick     return std::make_pair(
1796e5dd7070Spatrick         convertAtomicTempToRValue(ExpectedAddr, AggValueSlot::ignored(),
1797e5dd7070Spatrick                                   SourceLocation(), /*AsValue=*/false),
1798e5dd7070Spatrick         Res);
1799e5dd7070Spatrick   }
1800e5dd7070Spatrick 
1801e5dd7070Spatrick   // If we've got a scalar value of the right size, try to avoid going
1802e5dd7070Spatrick   // through memory.
1803e5dd7070Spatrick   auto *ExpectedVal = convertRValueToInt(Expected);
1804e5dd7070Spatrick   auto *DesiredVal = convertRValueToInt(Desired);
1805e5dd7070Spatrick   auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success,
1806e5dd7070Spatrick                                          Failure, IsWeak);
1807e5dd7070Spatrick   return std::make_pair(
1808e5dd7070Spatrick       ConvertIntToValueOrAtomic(Res.first, AggValueSlot::ignored(),
1809e5dd7070Spatrick                                 SourceLocation(), /*AsValue=*/false),
1810e5dd7070Spatrick       Res.second);
1811e5dd7070Spatrick }
1812e5dd7070Spatrick 
1813e5dd7070Spatrick static void
EmitAtomicUpdateValue(CodeGenFunction & CGF,AtomicInfo & Atomics,RValue OldRVal,const llvm::function_ref<RValue (RValue)> & UpdateOp,Address DesiredAddr)1814e5dd7070Spatrick EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal,
1815e5dd7070Spatrick                       const llvm::function_ref<RValue(RValue)> &UpdateOp,
1816e5dd7070Spatrick                       Address DesiredAddr) {
1817e5dd7070Spatrick   RValue UpRVal;
1818e5dd7070Spatrick   LValue AtomicLVal = Atomics.getAtomicLValue();
1819e5dd7070Spatrick   LValue DesiredLVal;
1820e5dd7070Spatrick   if (AtomicLVal.isSimple()) {
1821e5dd7070Spatrick     UpRVal = OldRVal;
1822e5dd7070Spatrick     DesiredLVal = CGF.MakeAddrLValue(DesiredAddr, AtomicLVal.getType());
1823e5dd7070Spatrick   } else {
1824e5dd7070Spatrick     // Build new lvalue for temp address.
1825e5dd7070Spatrick     Address Ptr = Atomics.materializeRValue(OldRVal);
1826e5dd7070Spatrick     LValue UpdateLVal;
1827e5dd7070Spatrick     if (AtomicLVal.isBitField()) {
1828e5dd7070Spatrick       UpdateLVal =
1829e5dd7070Spatrick           LValue::MakeBitfield(Ptr, AtomicLVal.getBitFieldInfo(),
1830e5dd7070Spatrick                                AtomicLVal.getType(),
1831e5dd7070Spatrick                                AtomicLVal.getBaseInfo(),
1832e5dd7070Spatrick                                AtomicLVal.getTBAAInfo());
1833e5dd7070Spatrick       DesiredLVal =
1834e5dd7070Spatrick           LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1835e5dd7070Spatrick                                AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1836e5dd7070Spatrick                                AtomicLVal.getTBAAInfo());
1837e5dd7070Spatrick     } else if (AtomicLVal.isVectorElt()) {
1838e5dd7070Spatrick       UpdateLVal = LValue::MakeVectorElt(Ptr, AtomicLVal.getVectorIdx(),
1839e5dd7070Spatrick                                          AtomicLVal.getType(),
1840e5dd7070Spatrick                                          AtomicLVal.getBaseInfo(),
1841e5dd7070Spatrick                                          AtomicLVal.getTBAAInfo());
1842e5dd7070Spatrick       DesiredLVal = LValue::MakeVectorElt(
1843e5dd7070Spatrick           DesiredAddr, AtomicLVal.getVectorIdx(), AtomicLVal.getType(),
1844e5dd7070Spatrick           AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1845e5dd7070Spatrick     } else {
1846e5dd7070Spatrick       assert(AtomicLVal.isExtVectorElt());
1847e5dd7070Spatrick       UpdateLVal = LValue::MakeExtVectorElt(Ptr, AtomicLVal.getExtVectorElts(),
1848e5dd7070Spatrick                                             AtomicLVal.getType(),
1849e5dd7070Spatrick                                             AtomicLVal.getBaseInfo(),
1850e5dd7070Spatrick                                             AtomicLVal.getTBAAInfo());
1851e5dd7070Spatrick       DesiredLVal = LValue::MakeExtVectorElt(
1852e5dd7070Spatrick           DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1853e5dd7070Spatrick           AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1854e5dd7070Spatrick     }
1855e5dd7070Spatrick     UpRVal = CGF.EmitLoadOfLValue(UpdateLVal, SourceLocation());
1856e5dd7070Spatrick   }
1857e5dd7070Spatrick   // Store new value in the corresponding memory area.
1858e5dd7070Spatrick   RValue NewRVal = UpdateOp(UpRVal);
1859e5dd7070Spatrick   if (NewRVal.isScalar()) {
1860e5dd7070Spatrick     CGF.EmitStoreThroughLValue(NewRVal, DesiredLVal);
1861e5dd7070Spatrick   } else {
1862e5dd7070Spatrick     assert(NewRVal.isComplex());
1863e5dd7070Spatrick     CGF.EmitStoreOfComplex(NewRVal.getComplexVal(), DesiredLVal,
1864e5dd7070Spatrick                            /*isInit=*/false);
1865e5dd7070Spatrick   }
1866e5dd7070Spatrick }
1867e5dd7070Spatrick 
EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,const llvm::function_ref<RValue (RValue)> & UpdateOp,bool IsVolatile)1868e5dd7070Spatrick void AtomicInfo::EmitAtomicUpdateLibcall(
1869e5dd7070Spatrick     llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1870e5dd7070Spatrick     bool IsVolatile) {
1871e5dd7070Spatrick   auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1872e5dd7070Spatrick 
1873e5dd7070Spatrick   Address ExpectedAddr = CreateTempAlloca();
1874e5dd7070Spatrick 
1875e5dd7070Spatrick   EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
1876e5dd7070Spatrick   auto *ContBB = CGF.createBasicBlock("atomic_cont");
1877e5dd7070Spatrick   auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1878e5dd7070Spatrick   CGF.EmitBlock(ContBB);
1879e5dd7070Spatrick   Address DesiredAddr = CreateTempAlloca();
1880e5dd7070Spatrick   if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1881e5dd7070Spatrick       requiresMemSetZero(getAtomicAddress().getElementType())) {
1882e5dd7070Spatrick     auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
1883e5dd7070Spatrick     CGF.Builder.CreateStore(OldVal, DesiredAddr);
1884e5dd7070Spatrick   }
1885e5dd7070Spatrick   auto OldRVal = convertAtomicTempToRValue(ExpectedAddr,
1886e5dd7070Spatrick                                            AggValueSlot::ignored(),
1887e5dd7070Spatrick                                            SourceLocation(), /*AsValue=*/false);
1888e5dd7070Spatrick   EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, DesiredAddr);
1889e5dd7070Spatrick   auto *Res =
1890e5dd7070Spatrick       EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1891e5dd7070Spatrick                                        DesiredAddr.getPointer(),
1892e5dd7070Spatrick                                        AO, Failure);
1893e5dd7070Spatrick   CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1894e5dd7070Spatrick   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1895e5dd7070Spatrick }
1896e5dd7070Spatrick 
EmitAtomicUpdateOp(llvm::AtomicOrdering AO,const llvm::function_ref<RValue (RValue)> & UpdateOp,bool IsVolatile)1897e5dd7070Spatrick void AtomicInfo::EmitAtomicUpdateOp(
1898e5dd7070Spatrick     llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1899e5dd7070Spatrick     bool IsVolatile) {
1900e5dd7070Spatrick   auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1901e5dd7070Spatrick 
1902e5dd7070Spatrick   // Do the atomic load.
1903ec727ea7Spatrick   auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile);
1904e5dd7070Spatrick   // For non-simple lvalues perform compare-and-swap procedure.
1905e5dd7070Spatrick   auto *ContBB = CGF.createBasicBlock("atomic_cont");
1906e5dd7070Spatrick   auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1907e5dd7070Spatrick   auto *CurBB = CGF.Builder.GetInsertBlock();
1908e5dd7070Spatrick   CGF.EmitBlock(ContBB);
1909e5dd7070Spatrick   llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1910e5dd7070Spatrick                                              /*NumReservedValues=*/2);
1911e5dd7070Spatrick   PHI->addIncoming(OldVal, CurBB);
1912e5dd7070Spatrick   Address NewAtomicAddr = CreateTempAlloca();
1913e5dd7070Spatrick   Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1914e5dd7070Spatrick   if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1915e5dd7070Spatrick       requiresMemSetZero(getAtomicAddress().getElementType())) {
1916e5dd7070Spatrick     CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
1917e5dd7070Spatrick   }
1918e5dd7070Spatrick   auto OldRVal = ConvertIntToValueOrAtomic(PHI, AggValueSlot::ignored(),
1919e5dd7070Spatrick                                            SourceLocation(), /*AsValue=*/false);
1920e5dd7070Spatrick   EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, NewAtomicAddr);
1921e5dd7070Spatrick   auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
1922e5dd7070Spatrick   // Try to write new value using cmpxchg operation.
1923e5dd7070Spatrick   auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1924e5dd7070Spatrick   PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1925e5dd7070Spatrick   CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1926e5dd7070Spatrick   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1927e5dd7070Spatrick }
1928e5dd7070Spatrick 
EmitAtomicUpdateValue(CodeGenFunction & CGF,AtomicInfo & Atomics,RValue UpdateRVal,Address DesiredAddr)1929e5dd7070Spatrick static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics,
1930e5dd7070Spatrick                                   RValue UpdateRVal, Address DesiredAddr) {
1931e5dd7070Spatrick   LValue AtomicLVal = Atomics.getAtomicLValue();
1932e5dd7070Spatrick   LValue DesiredLVal;
1933e5dd7070Spatrick   // Build new lvalue for temp address.
1934e5dd7070Spatrick   if (AtomicLVal.isBitField()) {
1935e5dd7070Spatrick     DesiredLVal =
1936e5dd7070Spatrick         LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1937e5dd7070Spatrick                              AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1938e5dd7070Spatrick                              AtomicLVal.getTBAAInfo());
1939e5dd7070Spatrick   } else if (AtomicLVal.isVectorElt()) {
1940e5dd7070Spatrick     DesiredLVal =
1941e5dd7070Spatrick         LValue::MakeVectorElt(DesiredAddr, AtomicLVal.getVectorIdx(),
1942e5dd7070Spatrick                               AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1943e5dd7070Spatrick                               AtomicLVal.getTBAAInfo());
1944e5dd7070Spatrick   } else {
1945e5dd7070Spatrick     assert(AtomicLVal.isExtVectorElt());
1946e5dd7070Spatrick     DesiredLVal = LValue::MakeExtVectorElt(
1947e5dd7070Spatrick         DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1948e5dd7070Spatrick         AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1949e5dd7070Spatrick   }
1950e5dd7070Spatrick   // Store new value in the corresponding memory area.
1951e5dd7070Spatrick   assert(UpdateRVal.isScalar());
1952e5dd7070Spatrick   CGF.EmitStoreThroughLValue(UpdateRVal, DesiredLVal);
1953e5dd7070Spatrick }
1954e5dd7070Spatrick 
EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,RValue UpdateRVal,bool IsVolatile)1955e5dd7070Spatrick void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
1956e5dd7070Spatrick                                          RValue UpdateRVal, bool IsVolatile) {
1957e5dd7070Spatrick   auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1958e5dd7070Spatrick 
1959e5dd7070Spatrick   Address ExpectedAddr = CreateTempAlloca();
1960e5dd7070Spatrick 
1961e5dd7070Spatrick   EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
1962e5dd7070Spatrick   auto *ContBB = CGF.createBasicBlock("atomic_cont");
1963e5dd7070Spatrick   auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1964e5dd7070Spatrick   CGF.EmitBlock(ContBB);
1965e5dd7070Spatrick   Address DesiredAddr = CreateTempAlloca();
1966e5dd7070Spatrick   if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1967e5dd7070Spatrick       requiresMemSetZero(getAtomicAddress().getElementType())) {
1968e5dd7070Spatrick     auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
1969e5dd7070Spatrick     CGF.Builder.CreateStore(OldVal, DesiredAddr);
1970e5dd7070Spatrick   }
1971e5dd7070Spatrick   EmitAtomicUpdateValue(CGF, *this, UpdateRVal, DesiredAddr);
1972e5dd7070Spatrick   auto *Res =
1973e5dd7070Spatrick       EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1974e5dd7070Spatrick                                        DesiredAddr.getPointer(),
1975e5dd7070Spatrick                                        AO, Failure);
1976e5dd7070Spatrick   CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1977e5dd7070Spatrick   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1978e5dd7070Spatrick }
1979e5dd7070Spatrick 
EmitAtomicUpdateOp(llvm::AtomicOrdering AO,RValue UpdateRVal,bool IsVolatile)1980e5dd7070Spatrick void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal,
1981e5dd7070Spatrick                                     bool IsVolatile) {
1982e5dd7070Spatrick   auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1983e5dd7070Spatrick 
1984e5dd7070Spatrick   // Do the atomic load.
1985ec727ea7Spatrick   auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile);
1986e5dd7070Spatrick   // For non-simple lvalues perform compare-and-swap procedure.
1987e5dd7070Spatrick   auto *ContBB = CGF.createBasicBlock("atomic_cont");
1988e5dd7070Spatrick   auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1989e5dd7070Spatrick   auto *CurBB = CGF.Builder.GetInsertBlock();
1990e5dd7070Spatrick   CGF.EmitBlock(ContBB);
1991e5dd7070Spatrick   llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1992e5dd7070Spatrick                                              /*NumReservedValues=*/2);
1993e5dd7070Spatrick   PHI->addIncoming(OldVal, CurBB);
1994e5dd7070Spatrick   Address NewAtomicAddr = CreateTempAlloca();
1995e5dd7070Spatrick   Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1996e5dd7070Spatrick   if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1997e5dd7070Spatrick       requiresMemSetZero(getAtomicAddress().getElementType())) {
1998e5dd7070Spatrick     CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
1999e5dd7070Spatrick   }
2000e5dd7070Spatrick   EmitAtomicUpdateValue(CGF, *this, UpdateRVal, NewAtomicAddr);
2001e5dd7070Spatrick   auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
2002e5dd7070Spatrick   // Try to write new value using cmpxchg operation.
2003e5dd7070Spatrick   auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
2004e5dd7070Spatrick   PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
2005e5dd7070Spatrick   CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
2006e5dd7070Spatrick   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
2007e5dd7070Spatrick }
2008e5dd7070Spatrick 
EmitAtomicUpdate(llvm::AtomicOrdering AO,const llvm::function_ref<RValue (RValue)> & UpdateOp,bool IsVolatile)2009e5dd7070Spatrick void AtomicInfo::EmitAtomicUpdate(
2010e5dd7070Spatrick     llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
2011e5dd7070Spatrick     bool IsVolatile) {
2012e5dd7070Spatrick   if (shouldUseLibcall()) {
2013e5dd7070Spatrick     EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);
2014e5dd7070Spatrick   } else {
2015e5dd7070Spatrick     EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);
2016e5dd7070Spatrick   }
2017e5dd7070Spatrick }
2018e5dd7070Spatrick 
EmitAtomicUpdate(llvm::AtomicOrdering AO,RValue UpdateRVal,bool IsVolatile)2019e5dd7070Spatrick void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
2020e5dd7070Spatrick                                   bool IsVolatile) {
2021e5dd7070Spatrick   if (shouldUseLibcall()) {
2022e5dd7070Spatrick     EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);
2023e5dd7070Spatrick   } else {
2024e5dd7070Spatrick     EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);
2025e5dd7070Spatrick   }
2026e5dd7070Spatrick }
2027e5dd7070Spatrick 
EmitAtomicStore(RValue rvalue,LValue lvalue,bool isInit)2028e5dd7070Spatrick void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue lvalue,
2029e5dd7070Spatrick                                       bool isInit) {
2030e5dd7070Spatrick   bool IsVolatile = lvalue.isVolatileQualified();
2031e5dd7070Spatrick   llvm::AtomicOrdering AO;
2032e5dd7070Spatrick   if (lvalue.getType()->isAtomicType()) {
2033e5dd7070Spatrick     AO = llvm::AtomicOrdering::SequentiallyConsistent;
2034e5dd7070Spatrick   } else {
2035e5dd7070Spatrick     AO = llvm::AtomicOrdering::Release;
2036e5dd7070Spatrick     IsVolatile = true;
2037e5dd7070Spatrick   }
2038e5dd7070Spatrick   return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit);
2039e5dd7070Spatrick }
2040e5dd7070Spatrick 
2041e5dd7070Spatrick /// Emit a store to an l-value of atomic type.
2042e5dd7070Spatrick ///
2043e5dd7070Spatrick /// Note that the r-value is expected to be an r-value *of the atomic
2044e5dd7070Spatrick /// type*; this means that for aggregate r-values, it should include
2045e5dd7070Spatrick /// storage for any padding that was necessary.
EmitAtomicStore(RValue rvalue,LValue dest,llvm::AtomicOrdering AO,bool IsVolatile,bool isInit)2046e5dd7070Spatrick void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
2047e5dd7070Spatrick                                       llvm::AtomicOrdering AO, bool IsVolatile,
2048e5dd7070Spatrick                                       bool isInit) {
2049e5dd7070Spatrick   // If this is an aggregate r-value, it should agree in type except
2050e5dd7070Spatrick   // maybe for address-space qualification.
2051e5dd7070Spatrick   assert(!rvalue.isAggregate() ||
2052e5dd7070Spatrick          rvalue.getAggregateAddress().getElementType() ==
2053e5dd7070Spatrick              dest.getAddress(*this).getElementType());
2054e5dd7070Spatrick 
2055e5dd7070Spatrick   AtomicInfo atomics(*this, dest);
2056e5dd7070Spatrick   LValue LVal = atomics.getAtomicLValue();
2057e5dd7070Spatrick 
2058e5dd7070Spatrick   // If this is an initialization, just put the value there normally.
2059e5dd7070Spatrick   if (LVal.isSimple()) {
2060e5dd7070Spatrick     if (isInit) {
2061e5dd7070Spatrick       atomics.emitCopyIntoMemory(rvalue);
2062e5dd7070Spatrick       return;
2063e5dd7070Spatrick     }
2064e5dd7070Spatrick 
2065e5dd7070Spatrick     // Check whether we should use a library call.
2066e5dd7070Spatrick     if (atomics.shouldUseLibcall()) {
2067e5dd7070Spatrick       // Produce a source address.
2068e5dd7070Spatrick       Address srcAddr = atomics.materializeRValue(rvalue);
2069e5dd7070Spatrick 
2070e5dd7070Spatrick       // void __atomic_store(size_t size, void *mem, void *val, int order)
2071e5dd7070Spatrick       CallArgList args;
2072e5dd7070Spatrick       args.add(RValue::get(atomics.getAtomicSizeValue()),
2073e5dd7070Spatrick                getContext().getSizeType());
2074e5dd7070Spatrick       args.add(RValue::get(EmitCastToVoidPtr(atomics.getAtomicPointer())),
2075e5dd7070Spatrick                getContext().VoidPtrTy);
2076e5dd7070Spatrick       args.add(RValue::get(EmitCastToVoidPtr(srcAddr.getPointer())),
2077e5dd7070Spatrick                getContext().VoidPtrTy);
2078e5dd7070Spatrick       args.add(
2079e5dd7070Spatrick           RValue::get(llvm::ConstantInt::get(IntTy, (int)llvm::toCABI(AO))),
2080e5dd7070Spatrick           getContext().IntTy);
2081e5dd7070Spatrick       emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
2082e5dd7070Spatrick       return;
2083e5dd7070Spatrick     }
2084e5dd7070Spatrick 
2085e5dd7070Spatrick     // Okay, we're doing this natively.
2086e5dd7070Spatrick     llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
2087e5dd7070Spatrick 
2088e5dd7070Spatrick     // Do the atomic store.
2089e5dd7070Spatrick     Address addr =
2090e5dd7070Spatrick         atomics.emitCastToAtomicIntPointer(atomics.getAtomicAddress());
2091e5dd7070Spatrick     intValue = Builder.CreateIntCast(
2092e5dd7070Spatrick         intValue, addr.getElementType(), /*isSigned=*/false);
2093e5dd7070Spatrick     llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
2094e5dd7070Spatrick 
2095ec727ea7Spatrick     if (AO == llvm::AtomicOrdering::Acquire)
2096ec727ea7Spatrick       AO = llvm::AtomicOrdering::Monotonic;
2097ec727ea7Spatrick     else if (AO == llvm::AtomicOrdering::AcquireRelease)
2098ec727ea7Spatrick       AO = llvm::AtomicOrdering::Release;
2099e5dd7070Spatrick     // Initializations don't need to be atomic.
2100e5dd7070Spatrick     if (!isInit)
2101e5dd7070Spatrick       store->setAtomic(AO);
2102e5dd7070Spatrick 
2103e5dd7070Spatrick     // Other decoration.
2104e5dd7070Spatrick     if (IsVolatile)
2105e5dd7070Spatrick       store->setVolatile(true);
2106e5dd7070Spatrick     CGM.DecorateInstructionWithTBAA(store, dest.getTBAAInfo());
2107e5dd7070Spatrick     return;
2108e5dd7070Spatrick   }
2109e5dd7070Spatrick 
2110e5dd7070Spatrick   // Emit simple atomic update operation.
2111e5dd7070Spatrick   atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);
2112e5dd7070Spatrick }
2113e5dd7070Spatrick 
2114e5dd7070Spatrick /// Emit a compare-and-exchange op for atomic type.
2115e5dd7070Spatrick ///
EmitAtomicCompareExchange(LValue Obj,RValue Expected,RValue Desired,SourceLocation Loc,llvm::AtomicOrdering Success,llvm::AtomicOrdering Failure,bool IsWeak,AggValueSlot Slot)2116e5dd7070Spatrick std::pair<RValue, llvm::Value *> CodeGenFunction::EmitAtomicCompareExchange(
2117e5dd7070Spatrick     LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
2118e5dd7070Spatrick     llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak,
2119e5dd7070Spatrick     AggValueSlot Slot) {
2120e5dd7070Spatrick   // If this is an aggregate r-value, it should agree in type except
2121e5dd7070Spatrick   // maybe for address-space qualification.
2122e5dd7070Spatrick   assert(!Expected.isAggregate() ||
2123e5dd7070Spatrick          Expected.getAggregateAddress().getElementType() ==
2124e5dd7070Spatrick              Obj.getAddress(*this).getElementType());
2125e5dd7070Spatrick   assert(!Desired.isAggregate() ||
2126e5dd7070Spatrick          Desired.getAggregateAddress().getElementType() ==
2127e5dd7070Spatrick              Obj.getAddress(*this).getElementType());
2128e5dd7070Spatrick   AtomicInfo Atomics(*this, Obj);
2129e5dd7070Spatrick 
2130e5dd7070Spatrick   return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure,
2131e5dd7070Spatrick                                            IsWeak);
2132e5dd7070Spatrick }
2133e5dd7070Spatrick 
EmitAtomicUpdate(LValue LVal,llvm::AtomicOrdering AO,const llvm::function_ref<RValue (RValue)> & UpdateOp,bool IsVolatile)2134e5dd7070Spatrick void CodeGenFunction::EmitAtomicUpdate(
2135e5dd7070Spatrick     LValue LVal, llvm::AtomicOrdering AO,
2136e5dd7070Spatrick     const llvm::function_ref<RValue(RValue)> &UpdateOp, bool IsVolatile) {
2137e5dd7070Spatrick   AtomicInfo Atomics(*this, LVal);
2138e5dd7070Spatrick   Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);
2139e5dd7070Spatrick }
2140e5dd7070Spatrick 
EmitAtomicInit(Expr * init,LValue dest)2141e5dd7070Spatrick void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
2142e5dd7070Spatrick   AtomicInfo atomics(*this, dest);
2143e5dd7070Spatrick 
2144e5dd7070Spatrick   switch (atomics.getEvaluationKind()) {
2145e5dd7070Spatrick   case TEK_Scalar: {
2146e5dd7070Spatrick     llvm::Value *value = EmitScalarExpr(init);
2147e5dd7070Spatrick     atomics.emitCopyIntoMemory(RValue::get(value));
2148e5dd7070Spatrick     return;
2149e5dd7070Spatrick   }
2150e5dd7070Spatrick 
2151e5dd7070Spatrick   case TEK_Complex: {
2152e5dd7070Spatrick     ComplexPairTy value = EmitComplexExpr(init);
2153e5dd7070Spatrick     atomics.emitCopyIntoMemory(RValue::getComplex(value));
2154e5dd7070Spatrick     return;
2155e5dd7070Spatrick   }
2156e5dd7070Spatrick 
2157e5dd7070Spatrick   case TEK_Aggregate: {
2158e5dd7070Spatrick     // Fix up the destination if the initializer isn't an expression
2159e5dd7070Spatrick     // of atomic type.
2160e5dd7070Spatrick     bool Zeroed = false;
2161e5dd7070Spatrick     if (!init->getType()->isAtomicType()) {
2162e5dd7070Spatrick       Zeroed = atomics.emitMemSetZeroIfNecessary();
2163e5dd7070Spatrick       dest = atomics.projectValue();
2164e5dd7070Spatrick     }
2165e5dd7070Spatrick 
2166e5dd7070Spatrick     // Evaluate the expression directly into the destination.
2167e5dd7070Spatrick     AggValueSlot slot = AggValueSlot::forLValue(
2168e5dd7070Spatrick         dest, *this, AggValueSlot::IsNotDestructed,
2169e5dd7070Spatrick         AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased,
2170e5dd7070Spatrick         AggValueSlot::DoesNotOverlap,
2171e5dd7070Spatrick         Zeroed ? AggValueSlot::IsZeroed : AggValueSlot::IsNotZeroed);
2172e5dd7070Spatrick 
2173e5dd7070Spatrick     EmitAggExpr(init, slot);
2174e5dd7070Spatrick     return;
2175e5dd7070Spatrick   }
2176e5dd7070Spatrick   }
2177e5dd7070Spatrick   llvm_unreachable("bad evaluation kind");
2178e5dd7070Spatrick }
2179