1 //===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the code for emitting atomic operations.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CGCall.h"
14 #include "CGRecordLayout.h"
15 #include "CodeGenFunction.h"
16 #include "CodeGenModule.h"
17 #include "TargetInfo.h"
18 #include "clang/AST/ASTContext.h"
19 #include "clang/CodeGen/CGFunctionInfo.h"
20 #include "clang/Frontend/FrontendDiagnostic.h"
21 #include "llvm/ADT/DenseMap.h"
22 #include "llvm/IR/DataLayout.h"
23 #include "llvm/IR/Intrinsics.h"
24 #include "llvm/IR/Operator.h"
25 
26 using namespace clang;
27 using namespace CodeGen;
28 
29 namespace {
30   class AtomicInfo {
31     CodeGenFunction &CGF;
32     QualType AtomicTy;
33     QualType ValueTy;
34     uint64_t AtomicSizeInBits;
35     uint64_t ValueSizeInBits;
36     CharUnits AtomicAlign;
37     CharUnits ValueAlign;
38     TypeEvaluationKind EvaluationKind;
39     bool UseLibcall;
40     LValue LVal;
41     CGBitFieldInfo BFI;
42   public:
AtomicInfo(CodeGenFunction & CGF,LValue & lvalue)43     AtomicInfo(CodeGenFunction &CGF, LValue &lvalue)
44         : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
45           EvaluationKind(TEK_Scalar), UseLibcall(true) {
46       assert(!lvalue.isGlobalReg());
47       ASTContext &C = CGF.getContext();
48       if (lvalue.isSimple()) {
49         AtomicTy = lvalue.getType();
50         if (auto *ATy = AtomicTy->getAs<AtomicType>())
51           ValueTy = ATy->getValueType();
52         else
53           ValueTy = AtomicTy;
54         EvaluationKind = CGF.getEvaluationKind(ValueTy);
55 
56         uint64_t ValueAlignInBits;
57         uint64_t AtomicAlignInBits;
58         TypeInfo ValueTI = C.getTypeInfo(ValueTy);
59         ValueSizeInBits = ValueTI.Width;
60         ValueAlignInBits = ValueTI.Align;
61 
62         TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
63         AtomicSizeInBits = AtomicTI.Width;
64         AtomicAlignInBits = AtomicTI.Align;
65 
66         assert(ValueSizeInBits <= AtomicSizeInBits);
67         assert(ValueAlignInBits <= AtomicAlignInBits);
68 
69         AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
70         ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
71         if (lvalue.getAlignment().isZero())
72           lvalue.setAlignment(AtomicAlign);
73 
74         LVal = lvalue;
75       } else if (lvalue.isBitField()) {
76         ValueTy = lvalue.getType();
77         ValueSizeInBits = C.getTypeSize(ValueTy);
78         auto &OrigBFI = lvalue.getBitFieldInfo();
79         auto Offset = OrigBFI.Offset % C.toBits(lvalue.getAlignment());
80         AtomicSizeInBits = C.toBits(
81             C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1)
82                 .alignTo(lvalue.getAlignment()));
83         auto VoidPtrAddr = CGF.EmitCastToVoidPtr(lvalue.getBitFieldPointer());
84         auto OffsetInChars =
85             (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *
86             lvalue.getAlignment();
87         VoidPtrAddr = CGF.Builder.CreateConstGEP1_64(
88             VoidPtrAddr, OffsetInChars.getQuantity());
89         auto Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
90             VoidPtrAddr,
91             CGF.Builder.getIntNTy(AtomicSizeInBits)->getPointerTo(),
92             "atomic_bitfield_base");
93         BFI = OrigBFI;
94         BFI.Offset = Offset;
95         BFI.StorageSize = AtomicSizeInBits;
96         BFI.StorageOffset += OffsetInChars;
97         LVal = LValue::MakeBitfield(Address(Addr, lvalue.getAlignment()),
98                                     BFI, lvalue.getType(), lvalue.getBaseInfo(),
99                                     lvalue.getTBAAInfo());
100         AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
101         if (AtomicTy.isNull()) {
102           llvm::APInt Size(
103               /*numBits=*/32,
104               C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
105           AtomicTy =
106               C.getConstantArrayType(C.CharTy, Size, nullptr, ArrayType::Normal,
107                                      /*IndexTypeQuals=*/0);
108         }
109         AtomicAlign = ValueAlign = lvalue.getAlignment();
110       } else if (lvalue.isVectorElt()) {
111         ValueTy = lvalue.getType()->castAs<VectorType>()->getElementType();
112         ValueSizeInBits = C.getTypeSize(ValueTy);
113         AtomicTy = lvalue.getType();
114         AtomicSizeInBits = C.getTypeSize(AtomicTy);
115         AtomicAlign = ValueAlign = lvalue.getAlignment();
116         LVal = lvalue;
117       } else {
118         assert(lvalue.isExtVectorElt());
119         ValueTy = lvalue.getType();
120         ValueSizeInBits = C.getTypeSize(ValueTy);
121         AtomicTy = ValueTy = CGF.getContext().getExtVectorType(
122             lvalue.getType(), cast<llvm::FixedVectorType>(
123                                   lvalue.getExtVectorAddress().getElementType())
124                                   ->getNumElements());
125         AtomicSizeInBits = C.getTypeSize(AtomicTy);
126         AtomicAlign = ValueAlign = lvalue.getAlignment();
127         LVal = lvalue;
128       }
129       UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(
130           AtomicSizeInBits, C.toBits(lvalue.getAlignment()));
131     }
132 
getAtomicType() const133     QualType getAtomicType() const { return AtomicTy; }
getValueType() const134     QualType getValueType() const { return ValueTy; }
getAtomicAlignment() const135     CharUnits getAtomicAlignment() const { return AtomicAlign; }
getAtomicSizeInBits() const136     uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
getValueSizeInBits() const137     uint64_t getValueSizeInBits() const { return ValueSizeInBits; }
getEvaluationKind() const138     TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
shouldUseLibcall() const139     bool shouldUseLibcall() const { return UseLibcall; }
getAtomicLValue() const140     const LValue &getAtomicLValue() const { return LVal; }
getAtomicPointer() const141     llvm::Value *getAtomicPointer() const {
142       if (LVal.isSimple())
143         return LVal.getPointer(CGF);
144       else if (LVal.isBitField())
145         return LVal.getBitFieldPointer();
146       else if (LVal.isVectorElt())
147         return LVal.getVectorPointer();
148       assert(LVal.isExtVectorElt());
149       return LVal.getExtVectorPointer();
150     }
getAtomicAddress() const151     Address getAtomicAddress() const {
152       return Address(getAtomicPointer(), getAtomicAlignment());
153     }
154 
getAtomicAddressAsAtomicIntPointer() const155     Address getAtomicAddressAsAtomicIntPointer() const {
156       return emitCastToAtomicIntPointer(getAtomicAddress());
157     }
158 
159     /// Is the atomic size larger than the underlying value type?
160     ///
161     /// Note that the absence of padding does not mean that atomic
162     /// objects are completely interchangeable with non-atomic
163     /// objects: we might have promoted the alignment of a type
164     /// without making it bigger.
hasPadding() const165     bool hasPadding() const {
166       return (ValueSizeInBits != AtomicSizeInBits);
167     }
168 
169     bool emitMemSetZeroIfNecessary() const;
170 
getAtomicSizeValue() const171     llvm::Value *getAtomicSizeValue() const {
172       CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
173       return CGF.CGM.getSize(size);
174     }
175 
176     /// Cast the given pointer to an integer pointer suitable for atomic
177     /// operations if the source.
178     Address emitCastToAtomicIntPointer(Address Addr) const;
179 
180     /// If Addr is compatible with the iN that will be used for an atomic
181     /// operation, bitcast it. Otherwise, create a temporary that is suitable
182     /// and copy the value across.
183     Address convertToAtomicIntPointer(Address Addr) const;
184 
185     /// Turn an atomic-layout object into an r-value.
186     RValue convertAtomicTempToRValue(Address addr, AggValueSlot resultSlot,
187                                      SourceLocation loc, bool AsValue) const;
188 
189     /// Converts a rvalue to integer value.
190     llvm::Value *convertRValueToInt(RValue RVal) const;
191 
192     RValue ConvertIntToValueOrAtomic(llvm::Value *IntVal,
193                                      AggValueSlot ResultSlot,
194                                      SourceLocation Loc, bool AsValue) const;
195 
196     /// Copy an atomic r-value into atomic-layout memory.
197     void emitCopyIntoMemory(RValue rvalue) const;
198 
199     /// Project an l-value down to the value field.
projectValue() const200     LValue projectValue() const {
201       assert(LVal.isSimple());
202       Address addr = getAtomicAddress();
203       if (hasPadding())
204         addr = CGF.Builder.CreateStructGEP(addr, 0);
205 
206       return LValue::MakeAddr(addr, getValueType(), CGF.getContext(),
207                               LVal.getBaseInfo(), LVal.getTBAAInfo());
208     }
209 
210     /// Emits atomic load.
211     /// \returns Loaded value.
212     RValue EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
213                           bool AsValue, llvm::AtomicOrdering AO,
214                           bool IsVolatile);
215 
216     /// Emits atomic compare-and-exchange sequence.
217     /// \param Expected Expected value.
218     /// \param Desired Desired value.
219     /// \param Success Atomic ordering for success operation.
220     /// \param Failure Atomic ordering for failed operation.
221     /// \param IsWeak true if atomic operation is weak, false otherwise.
222     /// \returns Pair of values: previous value from storage (value type) and
223     /// boolean flag (i1 type) with true if success and false otherwise.
224     std::pair<RValue, llvm::Value *>
225     EmitAtomicCompareExchange(RValue Expected, RValue Desired,
226                               llvm::AtomicOrdering Success =
227                                   llvm::AtomicOrdering::SequentiallyConsistent,
228                               llvm::AtomicOrdering Failure =
229                                   llvm::AtomicOrdering::SequentiallyConsistent,
230                               bool IsWeak = false);
231 
232     /// Emits atomic update.
233     /// \param AO Atomic ordering.
234     /// \param UpdateOp Update operation for the current lvalue.
235     void EmitAtomicUpdate(llvm::AtomicOrdering AO,
236                           const llvm::function_ref<RValue(RValue)> &UpdateOp,
237                           bool IsVolatile);
238     /// Emits atomic update.
239     /// \param AO Atomic ordering.
240     void EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
241                           bool IsVolatile);
242 
243     /// Materialize an atomic r-value in atomic-layout memory.
244     Address materializeRValue(RValue rvalue) const;
245 
246     /// Creates temp alloca for intermediate operations on atomic value.
247     Address CreateTempAlloca() const;
248   private:
249     bool requiresMemSetZero(llvm::Type *type) const;
250 
251 
252     /// Emits atomic load as a libcall.
253     void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
254                                llvm::AtomicOrdering AO, bool IsVolatile);
255     /// Emits atomic load as LLVM instruction.
256     llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO, bool IsVolatile);
257     /// Emits atomic compare-and-exchange op as a libcall.
258     llvm::Value *EmitAtomicCompareExchangeLibcall(
259         llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr,
260         llvm::AtomicOrdering Success =
261             llvm::AtomicOrdering::SequentiallyConsistent,
262         llvm::AtomicOrdering Failure =
263             llvm::AtomicOrdering::SequentiallyConsistent);
264     /// Emits atomic compare-and-exchange op as LLVM instruction.
265     std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
266         llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
267         llvm::AtomicOrdering Success =
268             llvm::AtomicOrdering::SequentiallyConsistent,
269         llvm::AtomicOrdering Failure =
270             llvm::AtomicOrdering::SequentiallyConsistent,
271         bool IsWeak = false);
272     /// Emit atomic update as libcalls.
273     void
274     EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
275                             const llvm::function_ref<RValue(RValue)> &UpdateOp,
276                             bool IsVolatile);
277     /// Emit atomic update as LLVM instructions.
278     void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
279                             const llvm::function_ref<RValue(RValue)> &UpdateOp,
280                             bool IsVolatile);
281     /// Emit atomic update as libcalls.
282     void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, RValue UpdateRVal,
283                                  bool IsVolatile);
284     /// Emit atomic update as LLVM instructions.
285     void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRal,
286                             bool IsVolatile);
287   };
288 }
289 
CreateTempAlloca() const290 Address AtomicInfo::CreateTempAlloca() const {
291   Address TempAlloca = CGF.CreateMemTemp(
292       (LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy
293                                                                 : AtomicTy,
294       getAtomicAlignment(),
295       "atomic-temp");
296   // Cast to pointer to value type for bitfields.
297   if (LVal.isBitField())
298     return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
299         TempAlloca, getAtomicAddress().getType());
300   return TempAlloca;
301 }
302 
emitAtomicLibcall(CodeGenFunction & CGF,StringRef fnName,QualType resultType,CallArgList & args)303 static RValue emitAtomicLibcall(CodeGenFunction &CGF,
304                                 StringRef fnName,
305                                 QualType resultType,
306                                 CallArgList &args) {
307   const CGFunctionInfo &fnInfo =
308     CGF.CGM.getTypes().arrangeBuiltinFunctionCall(resultType, args);
309   llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
310   llvm::AttrBuilder fnAttrB;
311   fnAttrB.addAttribute(llvm::Attribute::NoUnwind);
312   fnAttrB.addAttribute(llvm::Attribute::WillReturn);
313   llvm::AttributeList fnAttrs = llvm::AttributeList::get(
314       CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, fnAttrB);
315 
316   llvm::FunctionCallee fn =
317       CGF.CGM.CreateRuntimeFunction(fnTy, fnName, fnAttrs);
318   auto callee = CGCallee::forDirect(fn);
319   return CGF.EmitCall(fnInfo, callee, ReturnValueSlot(), args);
320 }
321 
322 /// Does a store of the given IR type modify the full expected width?
isFullSizeType(CodeGenModule & CGM,llvm::Type * type,uint64_t expectedSize)323 static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
324                            uint64_t expectedSize) {
325   return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
326 }
327 
328 /// Does the atomic type require memsetting to zero before initialization?
329 ///
330 /// The IR type is provided as a way of making certain queries faster.
requiresMemSetZero(llvm::Type * type) const331 bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
332   // If the atomic type has size padding, we definitely need a memset.
333   if (hasPadding()) return true;
334 
335   // Otherwise, do some simple heuristics to try to avoid it:
336   switch (getEvaluationKind()) {
337   // For scalars and complexes, check whether the store size of the
338   // type uses the full size.
339   case TEK_Scalar:
340     return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
341   case TEK_Complex:
342     return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
343                            AtomicSizeInBits / 2);
344 
345   // Padding in structs has an undefined bit pattern.  User beware.
346   case TEK_Aggregate:
347     return false;
348   }
349   llvm_unreachable("bad evaluation kind");
350 }
351 
emitMemSetZeroIfNecessary() const352 bool AtomicInfo::emitMemSetZeroIfNecessary() const {
353   assert(LVal.isSimple());
354   llvm::Value *addr = LVal.getPointer(CGF);
355   if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
356     return false;
357 
358   CGF.Builder.CreateMemSet(
359       addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
360       CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(),
361       LVal.getAlignment().getAsAlign());
362   return true;
363 }
364 
emitAtomicCmpXchg(CodeGenFunction & CGF,AtomicExpr * E,bool IsWeak,Address Dest,Address Ptr,Address Val1,Address Val2,uint64_t Size,llvm::AtomicOrdering SuccessOrder,llvm::AtomicOrdering FailureOrder,llvm::SyncScope::ID Scope)365 static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
366                               Address Dest, Address Ptr,
367                               Address Val1, Address Val2,
368                               uint64_t Size,
369                               llvm::AtomicOrdering SuccessOrder,
370                               llvm::AtomicOrdering FailureOrder,
371                               llvm::SyncScope::ID Scope) {
372   // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
373   llvm::Value *Expected = CGF.Builder.CreateLoad(Val1);
374   llvm::Value *Desired = CGF.Builder.CreateLoad(Val2);
375 
376   llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
377       Ptr.getPointer(), Expected, Desired, SuccessOrder, FailureOrder,
378       Scope);
379   Pair->setVolatile(E->isVolatile());
380   Pair->setWeak(IsWeak);
381 
382   // Cmp holds the result of the compare-exchange operation: true on success,
383   // false on failure.
384   llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0);
385   llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1);
386 
387   // This basic block is used to hold the store instruction if the operation
388   // failed.
389   llvm::BasicBlock *StoreExpectedBB =
390       CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
391 
392   // This basic block is the exit point of the operation, we should end up
393   // here regardless of whether or not the operation succeeded.
394   llvm::BasicBlock *ContinueBB =
395       CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
396 
397   // Update Expected if Expected isn't equal to Old, otherwise branch to the
398   // exit point.
399   CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
400 
401   CGF.Builder.SetInsertPoint(StoreExpectedBB);
402   // Update the memory at Expected with Old's value.
403   CGF.Builder.CreateStore(Old, Val1);
404   // Finally, branch to the exit point.
405   CGF.Builder.CreateBr(ContinueBB);
406 
407   CGF.Builder.SetInsertPoint(ContinueBB);
408   // Update the memory at Dest with Cmp's value.
409   CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
410 }
411 
412 /// Given an ordering required on success, emit all possible cmpxchg
413 /// instructions to cope with the provided (but possibly only dynamically known)
414 /// FailureOrder.
emitAtomicCmpXchgFailureSet(CodeGenFunction & CGF,AtomicExpr * E,bool IsWeak,Address Dest,Address Ptr,Address Val1,Address Val2,llvm::Value * FailureOrderVal,uint64_t Size,llvm::AtomicOrdering SuccessOrder,llvm::SyncScope::ID Scope)415 static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
416                                         bool IsWeak, Address Dest, Address Ptr,
417                                         Address Val1, Address Val2,
418                                         llvm::Value *FailureOrderVal,
419                                         uint64_t Size,
420                                         llvm::AtomicOrdering SuccessOrder,
421                                         llvm::SyncScope::ID Scope) {
422   llvm::AtomicOrdering FailureOrder;
423   if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
424     auto FOS = FO->getSExtValue();
425     if (!llvm::isValidAtomicOrderingCABI(FOS))
426       FailureOrder = llvm::AtomicOrdering::Monotonic;
427     else
428       switch ((llvm::AtomicOrderingCABI)FOS) {
429       case llvm::AtomicOrderingCABI::relaxed:
430       // 31.7.2.18: "The failure argument shall not be memory_order_release
431       // nor memory_order_acq_rel". Fallback to monotonic.
432       case llvm::AtomicOrderingCABI::release:
433       case llvm::AtomicOrderingCABI::acq_rel:
434         FailureOrder = llvm::AtomicOrdering::Monotonic;
435         break;
436       case llvm::AtomicOrderingCABI::consume:
437       case llvm::AtomicOrderingCABI::acquire:
438         FailureOrder = llvm::AtomicOrdering::Acquire;
439         break;
440       case llvm::AtomicOrderingCABI::seq_cst:
441         FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent;
442         break;
443       }
444     // Prior to c++17, "the failure argument shall be no stronger than the
445     // success argument". This condition has been lifted and the only
446     // precondition is 31.7.2.18. Effectively treat this as a DR and skip
447     // language version checks.
448     emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
449                       FailureOrder, Scope);
450     return;
451   }
452 
453   // Create all the relevant BB's
454   auto *MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
455   auto *AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
456   auto *SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
457   auto *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
458 
459   // MonotonicBB is arbitrarily chosen as the default case; in practice, this
460   // doesn't matter unless someone is crazy enough to use something that
461   // doesn't fold to a constant for the ordering.
462   llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
463   // Implemented as acquire, since it's the closest in LLVM.
464   SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
465               AcquireBB);
466   SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
467               AcquireBB);
468   SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
469               SeqCstBB);
470 
471   // Emit all the different atomics
472   CGF.Builder.SetInsertPoint(MonotonicBB);
473   emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
474                     Size, SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope);
475   CGF.Builder.CreateBr(ContBB);
476 
477   CGF.Builder.SetInsertPoint(AcquireBB);
478   emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
479                     llvm::AtomicOrdering::Acquire, Scope);
480   CGF.Builder.CreateBr(ContBB);
481 
482   CGF.Builder.SetInsertPoint(SeqCstBB);
483   emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
484                     llvm::AtomicOrdering::SequentiallyConsistent, Scope);
485   CGF.Builder.CreateBr(ContBB);
486 
487   CGF.Builder.SetInsertPoint(ContBB);
488 }
489 
490 /// Duplicate the atomic min/max operation in conventional IR for the builtin
491 /// variants that return the new rather than the original value.
EmitPostAtomicMinMax(CGBuilderTy & Builder,AtomicExpr::AtomicOp Op,bool IsSigned,llvm::Value * OldVal,llvm::Value * RHS)492 static llvm::Value *EmitPostAtomicMinMax(CGBuilderTy &Builder,
493                                          AtomicExpr::AtomicOp Op,
494                                          bool IsSigned,
495                                          llvm::Value *OldVal,
496                                          llvm::Value *RHS) {
497   llvm::CmpInst::Predicate Pred;
498   switch (Op) {
499   default:
500     llvm_unreachable("Unexpected min/max operation");
501   case AtomicExpr::AO__atomic_max_fetch:
502     Pred = IsSigned ? llvm::CmpInst::ICMP_SGT : llvm::CmpInst::ICMP_UGT;
503     break;
504   case AtomicExpr::AO__atomic_min_fetch:
505     Pred = IsSigned ? llvm::CmpInst::ICMP_SLT : llvm::CmpInst::ICMP_ULT;
506     break;
507   }
508   llvm::Value *Cmp = Builder.CreateICmp(Pred, OldVal, RHS, "tst");
509   return Builder.CreateSelect(Cmp, OldVal, RHS, "newval");
510 }
511 
EmitAtomicOp(CodeGenFunction & CGF,AtomicExpr * E,Address Dest,Address Ptr,Address Val1,Address Val2,llvm::Value * IsWeak,llvm::Value * FailureOrder,uint64_t Size,llvm::AtomicOrdering Order,llvm::SyncScope::ID Scope)512 static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
513                          Address Ptr, Address Val1, Address Val2,
514                          llvm::Value *IsWeak, llvm::Value *FailureOrder,
515                          uint64_t Size, llvm::AtomicOrdering Order,
516                          llvm::SyncScope::ID Scope) {
517   llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
518   bool PostOpMinMax = false;
519   unsigned PostOp = 0;
520 
521   switch (E->getOp()) {
522   case AtomicExpr::AO__c11_atomic_init:
523   case AtomicExpr::AO__opencl_atomic_init:
524     llvm_unreachable("Already handled!");
525 
526   case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
527   case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
528     emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
529                                 FailureOrder, Size, Order, Scope);
530     return;
531   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
532   case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
533     emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
534                                 FailureOrder, Size, Order, Scope);
535     return;
536   case AtomicExpr::AO__atomic_compare_exchange:
537   case AtomicExpr::AO__atomic_compare_exchange_n: {
538     if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
539       emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
540                                   Val1, Val2, FailureOrder, Size, Order, Scope);
541     } else {
542       // Create all the relevant BB's
543       llvm::BasicBlock *StrongBB =
544           CGF.createBasicBlock("cmpxchg.strong", CGF.CurFn);
545       llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak", CGF.CurFn);
546       llvm::BasicBlock *ContBB =
547           CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
548 
549       llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB);
550       SI->addCase(CGF.Builder.getInt1(false), StrongBB);
551 
552       CGF.Builder.SetInsertPoint(StrongBB);
553       emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
554                                   FailureOrder, Size, Order, Scope);
555       CGF.Builder.CreateBr(ContBB);
556 
557       CGF.Builder.SetInsertPoint(WeakBB);
558       emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
559                                   FailureOrder, Size, Order, Scope);
560       CGF.Builder.CreateBr(ContBB);
561 
562       CGF.Builder.SetInsertPoint(ContBB);
563     }
564     return;
565   }
566   case AtomicExpr::AO__c11_atomic_load:
567   case AtomicExpr::AO__opencl_atomic_load:
568   case AtomicExpr::AO__atomic_load_n:
569   case AtomicExpr::AO__atomic_load: {
570     llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
571     Load->setAtomic(Order, Scope);
572     Load->setVolatile(E->isVolatile());
573     CGF.Builder.CreateStore(Load, Dest);
574     return;
575   }
576 
577   case AtomicExpr::AO__c11_atomic_store:
578   case AtomicExpr::AO__opencl_atomic_store:
579   case AtomicExpr::AO__atomic_store:
580   case AtomicExpr::AO__atomic_store_n: {
581     llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
582     llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
583     Store->setAtomic(Order, Scope);
584     Store->setVolatile(E->isVolatile());
585     return;
586   }
587 
588   case AtomicExpr::AO__c11_atomic_exchange:
589   case AtomicExpr::AO__opencl_atomic_exchange:
590   case AtomicExpr::AO__atomic_exchange_n:
591   case AtomicExpr::AO__atomic_exchange:
592     Op = llvm::AtomicRMWInst::Xchg;
593     break;
594 
595   case AtomicExpr::AO__atomic_add_fetch:
596     PostOp = E->getValueType()->isFloatingType() ? llvm::Instruction::FAdd
597                                                  : llvm::Instruction::Add;
598     LLVM_FALLTHROUGH;
599   case AtomicExpr::AO__c11_atomic_fetch_add:
600   case AtomicExpr::AO__opencl_atomic_fetch_add:
601   case AtomicExpr::AO__atomic_fetch_add:
602     Op = E->getValueType()->isFloatingType() ? llvm::AtomicRMWInst::FAdd
603                                              : llvm::AtomicRMWInst::Add;
604     break;
605 
606   case AtomicExpr::AO__atomic_sub_fetch:
607     PostOp = E->getValueType()->isFloatingType() ? llvm::Instruction::FSub
608                                                  : llvm::Instruction::Sub;
609     LLVM_FALLTHROUGH;
610   case AtomicExpr::AO__c11_atomic_fetch_sub:
611   case AtomicExpr::AO__opencl_atomic_fetch_sub:
612   case AtomicExpr::AO__atomic_fetch_sub:
613     Op = E->getValueType()->isFloatingType() ? llvm::AtomicRMWInst::FSub
614                                              : llvm::AtomicRMWInst::Sub;
615     break;
616 
617   case AtomicExpr::AO__atomic_min_fetch:
618     PostOpMinMax = true;
619     LLVM_FALLTHROUGH;
620   case AtomicExpr::AO__c11_atomic_fetch_min:
621   case AtomicExpr::AO__opencl_atomic_fetch_min:
622   case AtomicExpr::AO__atomic_fetch_min:
623     Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Min
624                                                   : llvm::AtomicRMWInst::UMin;
625     break;
626 
627   case AtomicExpr::AO__atomic_max_fetch:
628     PostOpMinMax = true;
629     LLVM_FALLTHROUGH;
630   case AtomicExpr::AO__c11_atomic_fetch_max:
631   case AtomicExpr::AO__opencl_atomic_fetch_max:
632   case AtomicExpr::AO__atomic_fetch_max:
633     Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Max
634                                                   : llvm::AtomicRMWInst::UMax;
635     break;
636 
637   case AtomicExpr::AO__atomic_and_fetch:
638     PostOp = llvm::Instruction::And;
639     LLVM_FALLTHROUGH;
640   case AtomicExpr::AO__c11_atomic_fetch_and:
641   case AtomicExpr::AO__opencl_atomic_fetch_and:
642   case AtomicExpr::AO__atomic_fetch_and:
643     Op = llvm::AtomicRMWInst::And;
644     break;
645 
646   case AtomicExpr::AO__atomic_or_fetch:
647     PostOp = llvm::Instruction::Or;
648     LLVM_FALLTHROUGH;
649   case AtomicExpr::AO__c11_atomic_fetch_or:
650   case AtomicExpr::AO__opencl_atomic_fetch_or:
651   case AtomicExpr::AO__atomic_fetch_or:
652     Op = llvm::AtomicRMWInst::Or;
653     break;
654 
655   case AtomicExpr::AO__atomic_xor_fetch:
656     PostOp = llvm::Instruction::Xor;
657     LLVM_FALLTHROUGH;
658   case AtomicExpr::AO__c11_atomic_fetch_xor:
659   case AtomicExpr::AO__opencl_atomic_fetch_xor:
660   case AtomicExpr::AO__atomic_fetch_xor:
661     Op = llvm::AtomicRMWInst::Xor;
662     break;
663 
664   case AtomicExpr::AO__atomic_nand_fetch:
665     PostOp = llvm::Instruction::And; // the NOT is special cased below
666     LLVM_FALLTHROUGH;
667   case AtomicExpr::AO__atomic_fetch_nand:
668     Op = llvm::AtomicRMWInst::Nand;
669     break;
670   }
671 
672   llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
673   llvm::AtomicRMWInst *RMWI =
674       CGF.Builder.CreateAtomicRMW(Op, Ptr.getPointer(), LoadVal1, Order, Scope);
675   RMWI->setVolatile(E->isVolatile());
676 
677   // For __atomic_*_fetch operations, perform the operation again to
678   // determine the value which was written.
679   llvm::Value *Result = RMWI;
680   if (PostOpMinMax)
681     Result = EmitPostAtomicMinMax(CGF.Builder, E->getOp(),
682                                   E->getValueType()->isSignedIntegerType(),
683                                   RMWI, LoadVal1);
684   else if (PostOp)
685     Result = CGF.Builder.CreateBinOp((llvm::Instruction::BinaryOps)PostOp, RMWI,
686                                      LoadVal1);
687   if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
688     Result = CGF.Builder.CreateNot(Result);
689   CGF.Builder.CreateStore(Result, Dest);
690 }
691 
692 // This function emits any expression (scalar, complex, or aggregate)
693 // into a temporary alloca.
694 static Address
EmitValToTemp(CodeGenFunction & CGF,Expr * E)695 EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
696   Address DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
697   CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
698                        /*Init*/ true);
699   return DeclPtr;
700 }
701 
EmitAtomicOp(CodeGenFunction & CGF,AtomicExpr * Expr,Address Dest,Address Ptr,Address Val1,Address Val2,llvm::Value * IsWeak,llvm::Value * FailureOrder,uint64_t Size,llvm::AtomicOrdering Order,llvm::Value * Scope)702 static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
703                          Address Ptr, Address Val1, Address Val2,
704                          llvm::Value *IsWeak, llvm::Value *FailureOrder,
705                          uint64_t Size, llvm::AtomicOrdering Order,
706                          llvm::Value *Scope) {
707   auto ScopeModel = Expr->getScopeModel();
708 
709   // LLVM atomic instructions always have synch scope. If clang atomic
710   // expression has no scope operand, use default LLVM synch scope.
711   if (!ScopeModel) {
712     EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
713                  Order, CGF.CGM.getLLVMContext().getOrInsertSyncScopeID(""));
714     return;
715   }
716 
717   // Handle constant scope.
718   if (auto SC = dyn_cast<llvm::ConstantInt>(Scope)) {
719     auto SCID = CGF.getTargetHooks().getLLVMSyncScopeID(
720         CGF.CGM.getLangOpts(), ScopeModel->map(SC->getZExtValue()),
721         Order, CGF.CGM.getLLVMContext());
722     EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
723                  Order, SCID);
724     return;
725   }
726 
727   // Handle non-constant scope.
728   auto &Builder = CGF.Builder;
729   auto Scopes = ScopeModel->getRuntimeValues();
730   llvm::DenseMap<unsigned, llvm::BasicBlock *> BB;
731   for (auto S : Scopes)
732     BB[S] = CGF.createBasicBlock(getAsString(ScopeModel->map(S)), CGF.CurFn);
733 
734   llvm::BasicBlock *ContBB =
735       CGF.createBasicBlock("atomic.scope.continue", CGF.CurFn);
736 
737   auto *SC = Builder.CreateIntCast(Scope, Builder.getInt32Ty(), false);
738   // If unsupported synch scope is encountered at run time, assume a fallback
739   // synch scope value.
740   auto FallBack = ScopeModel->getFallBackValue();
741   llvm::SwitchInst *SI = Builder.CreateSwitch(SC, BB[FallBack]);
742   for (auto S : Scopes) {
743     auto *B = BB[S];
744     if (S != FallBack)
745       SI->addCase(Builder.getInt32(S), B);
746 
747     Builder.SetInsertPoint(B);
748     EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
749                  Order,
750                  CGF.getTargetHooks().getLLVMSyncScopeID(CGF.CGM.getLangOpts(),
751                                                          ScopeModel->map(S),
752                                                          Order,
753                                                          CGF.getLLVMContext()));
754     Builder.CreateBr(ContBB);
755   }
756 
757   Builder.SetInsertPoint(ContBB);
758 }
759 
760 static void
AddDirectArgument(CodeGenFunction & CGF,CallArgList & Args,bool UseOptimizedLibcall,llvm::Value * Val,QualType ValTy,SourceLocation Loc,CharUnits SizeInChars)761 AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
762                   bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
763                   SourceLocation Loc, CharUnits SizeInChars) {
764   if (UseOptimizedLibcall) {
765     // Load value and pass it to the function directly.
766     CharUnits Align = CGF.getContext().getTypeAlignInChars(ValTy);
767     int64_t SizeInBits = CGF.getContext().toBits(SizeInChars);
768     ValTy =
769         CGF.getContext().getIntTypeForBitwidth(SizeInBits, /*Signed=*/false);
770     llvm::Type *IPtrTy = llvm::IntegerType::get(CGF.getLLVMContext(),
771                                                 SizeInBits)->getPointerTo();
772     Address Ptr = Address(CGF.Builder.CreateBitCast(Val, IPtrTy), Align);
773     Val = CGF.EmitLoadOfScalar(Ptr, false,
774                                CGF.getContext().getPointerType(ValTy),
775                                Loc);
776     // Coerce the value into an appropriately sized integer type.
777     Args.add(RValue::get(Val), ValTy);
778   } else {
779     // Non-optimized functions always take a reference.
780     Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
781                          CGF.getContext().VoidPtrTy);
782   }
783 }
784 
EmitAtomicExpr(AtomicExpr * E)785 RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
786   QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
787   QualType MemTy = AtomicTy;
788   if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
789     MemTy = AT->getValueType();
790   llvm::Value *IsWeak = nullptr, *OrderFail = nullptr;
791 
792   Address Val1 = Address::invalid();
793   Address Val2 = Address::invalid();
794   Address Dest = Address::invalid();
795   Address Ptr = EmitPointerWithAlignment(E->getPtr());
796 
797   if (E->getOp() == AtomicExpr::AO__c11_atomic_init ||
798       E->getOp() == AtomicExpr::AO__opencl_atomic_init) {
799     LValue lvalue = MakeAddrLValue(Ptr, AtomicTy);
800     EmitAtomicInit(E->getVal1(), lvalue);
801     return RValue::get(nullptr);
802   }
803 
804   auto TInfo = getContext().getTypeInfoInChars(AtomicTy);
805   uint64_t Size = TInfo.Width.getQuantity();
806   unsigned MaxInlineWidthInBits = getTarget().getMaxAtomicInlineWidth();
807 
808   bool Oversized = getContext().toBits(TInfo.Width) > MaxInlineWidthInBits;
809   bool Misaligned = (Ptr.getAlignment() % TInfo.Width) != 0;
810   bool UseLibcall = Misaligned | Oversized;
811   bool ShouldCastToIntPtrTy = true;
812 
813   CharUnits MaxInlineWidth =
814       getContext().toCharUnitsFromBits(MaxInlineWidthInBits);
815 
816   DiagnosticsEngine &Diags = CGM.getDiags();
817 
818   if (Misaligned) {
819     Diags.Report(E->getBeginLoc(), diag::warn_atomic_op_misaligned)
820         << (int)TInfo.Width.getQuantity()
821         << (int)Ptr.getAlignment().getQuantity();
822   }
823 
824   if (Oversized) {
825     Diags.Report(E->getBeginLoc(), diag::warn_atomic_op_oversized)
826         << (int)TInfo.Width.getQuantity() << (int)MaxInlineWidth.getQuantity();
827   }
828 
829   llvm::Value *Order = EmitScalarExpr(E->getOrder());
830   llvm::Value *Scope =
831       E->getScopeModel() ? EmitScalarExpr(E->getScope()) : nullptr;
832 
833   switch (E->getOp()) {
834   case AtomicExpr::AO__c11_atomic_init:
835   case AtomicExpr::AO__opencl_atomic_init:
836     llvm_unreachable("Already handled above with EmitAtomicInit!");
837 
838   case AtomicExpr::AO__c11_atomic_load:
839   case AtomicExpr::AO__opencl_atomic_load:
840   case AtomicExpr::AO__atomic_load_n:
841     break;
842 
843   case AtomicExpr::AO__atomic_load:
844     Dest = EmitPointerWithAlignment(E->getVal1());
845     break;
846 
847   case AtomicExpr::AO__atomic_store:
848     Val1 = EmitPointerWithAlignment(E->getVal1());
849     break;
850 
851   case AtomicExpr::AO__atomic_exchange:
852     Val1 = EmitPointerWithAlignment(E->getVal1());
853     Dest = EmitPointerWithAlignment(E->getVal2());
854     break;
855 
856   case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
857   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
858   case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
859   case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
860   case AtomicExpr::AO__atomic_compare_exchange_n:
861   case AtomicExpr::AO__atomic_compare_exchange:
862     Val1 = EmitPointerWithAlignment(E->getVal1());
863     if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
864       Val2 = EmitPointerWithAlignment(E->getVal2());
865     else
866       Val2 = EmitValToTemp(*this, E->getVal2());
867     OrderFail = EmitScalarExpr(E->getOrderFail());
868     if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
869         E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
870       IsWeak = EmitScalarExpr(E->getWeak());
871     break;
872 
873   case AtomicExpr::AO__c11_atomic_fetch_add:
874   case AtomicExpr::AO__c11_atomic_fetch_sub:
875   case AtomicExpr::AO__opencl_atomic_fetch_add:
876   case AtomicExpr::AO__opencl_atomic_fetch_sub:
877     if (MemTy->isPointerType()) {
878       // For pointer arithmetic, we're required to do a bit of math:
879       // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
880       // ... but only for the C11 builtins. The GNU builtins expect the
881       // user to multiply by sizeof(T).
882       QualType Val1Ty = E->getVal1()->getType();
883       llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
884       CharUnits PointeeIncAmt =
885           getContext().getTypeSizeInChars(MemTy->getPointeeType());
886       Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
887       auto Temp = CreateMemTemp(Val1Ty, ".atomictmp");
888       Val1 = Temp;
889       EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Temp, Val1Ty));
890       break;
891     }
892     LLVM_FALLTHROUGH;
893   case AtomicExpr::AO__atomic_fetch_add:
894   case AtomicExpr::AO__atomic_fetch_sub:
895   case AtomicExpr::AO__atomic_add_fetch:
896   case AtomicExpr::AO__atomic_sub_fetch:
897     ShouldCastToIntPtrTy = !MemTy->isFloatingType();
898     LLVM_FALLTHROUGH;
899 
900   case AtomicExpr::AO__c11_atomic_store:
901   case AtomicExpr::AO__c11_atomic_exchange:
902   case AtomicExpr::AO__opencl_atomic_store:
903   case AtomicExpr::AO__opencl_atomic_exchange:
904   case AtomicExpr::AO__atomic_store_n:
905   case AtomicExpr::AO__atomic_exchange_n:
906   case AtomicExpr::AO__c11_atomic_fetch_and:
907   case AtomicExpr::AO__c11_atomic_fetch_or:
908   case AtomicExpr::AO__c11_atomic_fetch_xor:
909   case AtomicExpr::AO__c11_atomic_fetch_max:
910   case AtomicExpr::AO__c11_atomic_fetch_min:
911   case AtomicExpr::AO__opencl_atomic_fetch_and:
912   case AtomicExpr::AO__opencl_atomic_fetch_or:
913   case AtomicExpr::AO__opencl_atomic_fetch_xor:
914   case AtomicExpr::AO__opencl_atomic_fetch_min:
915   case AtomicExpr::AO__opencl_atomic_fetch_max:
916   case AtomicExpr::AO__atomic_fetch_and:
917   case AtomicExpr::AO__atomic_fetch_or:
918   case AtomicExpr::AO__atomic_fetch_xor:
919   case AtomicExpr::AO__atomic_fetch_nand:
920   case AtomicExpr::AO__atomic_and_fetch:
921   case AtomicExpr::AO__atomic_or_fetch:
922   case AtomicExpr::AO__atomic_xor_fetch:
923   case AtomicExpr::AO__atomic_nand_fetch:
924   case AtomicExpr::AO__atomic_max_fetch:
925   case AtomicExpr::AO__atomic_min_fetch:
926   case AtomicExpr::AO__atomic_fetch_max:
927   case AtomicExpr::AO__atomic_fetch_min:
928     Val1 = EmitValToTemp(*this, E->getVal1());
929     break;
930   }
931 
932   QualType RValTy = E->getType().getUnqualifiedType();
933 
934   // The inlined atomics only function on iN types, where N is a power of 2. We
935   // need to make sure (via temporaries if necessary) that all incoming values
936   // are compatible.
937   LValue AtomicVal = MakeAddrLValue(Ptr, AtomicTy);
938   AtomicInfo Atomics(*this, AtomicVal);
939 
940   if (ShouldCastToIntPtrTy) {
941     Ptr = Atomics.emitCastToAtomicIntPointer(Ptr);
942     if (Val1.isValid())
943       Val1 = Atomics.convertToAtomicIntPointer(Val1);
944     if (Val2.isValid())
945       Val2 = Atomics.convertToAtomicIntPointer(Val2);
946   }
947   if (Dest.isValid()) {
948     if (ShouldCastToIntPtrTy)
949       Dest = Atomics.emitCastToAtomicIntPointer(Dest);
950   } else if (E->isCmpXChg())
951     Dest = CreateMemTemp(RValTy, "cmpxchg.bool");
952   else if (!RValTy->isVoidType()) {
953     Dest = Atomics.CreateTempAlloca();
954     if (ShouldCastToIntPtrTy)
955       Dest = Atomics.emitCastToAtomicIntPointer(Dest);
956   }
957 
958   // Use a library call.  See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
959   if (UseLibcall) {
960     bool UseOptimizedLibcall = false;
961     switch (E->getOp()) {
962     case AtomicExpr::AO__c11_atomic_init:
963     case AtomicExpr::AO__opencl_atomic_init:
964       llvm_unreachable("Already handled above with EmitAtomicInit!");
965 
966     case AtomicExpr::AO__c11_atomic_fetch_add:
967     case AtomicExpr::AO__opencl_atomic_fetch_add:
968     case AtomicExpr::AO__atomic_fetch_add:
969     case AtomicExpr::AO__c11_atomic_fetch_and:
970     case AtomicExpr::AO__opencl_atomic_fetch_and:
971     case AtomicExpr::AO__atomic_fetch_and:
972     case AtomicExpr::AO__c11_atomic_fetch_or:
973     case AtomicExpr::AO__opencl_atomic_fetch_or:
974     case AtomicExpr::AO__atomic_fetch_or:
975     case AtomicExpr::AO__atomic_fetch_nand:
976     case AtomicExpr::AO__c11_atomic_fetch_sub:
977     case AtomicExpr::AO__opencl_atomic_fetch_sub:
978     case AtomicExpr::AO__atomic_fetch_sub:
979     case AtomicExpr::AO__c11_atomic_fetch_xor:
980     case AtomicExpr::AO__opencl_atomic_fetch_xor:
981     case AtomicExpr::AO__opencl_atomic_fetch_min:
982     case AtomicExpr::AO__opencl_atomic_fetch_max:
983     case AtomicExpr::AO__atomic_fetch_xor:
984     case AtomicExpr::AO__c11_atomic_fetch_max:
985     case AtomicExpr::AO__c11_atomic_fetch_min:
986     case AtomicExpr::AO__atomic_add_fetch:
987     case AtomicExpr::AO__atomic_and_fetch:
988     case AtomicExpr::AO__atomic_nand_fetch:
989     case AtomicExpr::AO__atomic_or_fetch:
990     case AtomicExpr::AO__atomic_sub_fetch:
991     case AtomicExpr::AO__atomic_xor_fetch:
992     case AtomicExpr::AO__atomic_fetch_max:
993     case AtomicExpr::AO__atomic_fetch_min:
994     case AtomicExpr::AO__atomic_max_fetch:
995     case AtomicExpr::AO__atomic_min_fetch:
996       // For these, only library calls for certain sizes exist.
997       UseOptimizedLibcall = true;
998       break;
999 
1000     case AtomicExpr::AO__atomic_load:
1001     case AtomicExpr::AO__atomic_store:
1002     case AtomicExpr::AO__atomic_exchange:
1003     case AtomicExpr::AO__atomic_compare_exchange:
1004       // Use the generic version if we don't know that the operand will be
1005       // suitably aligned for the optimized version.
1006       if (Misaligned)
1007         break;
1008       LLVM_FALLTHROUGH;
1009     case AtomicExpr::AO__c11_atomic_load:
1010     case AtomicExpr::AO__c11_atomic_store:
1011     case AtomicExpr::AO__c11_atomic_exchange:
1012     case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
1013     case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
1014     case AtomicExpr::AO__opencl_atomic_load:
1015     case AtomicExpr::AO__opencl_atomic_store:
1016     case AtomicExpr::AO__opencl_atomic_exchange:
1017     case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
1018     case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
1019     case AtomicExpr::AO__atomic_load_n:
1020     case AtomicExpr::AO__atomic_store_n:
1021     case AtomicExpr::AO__atomic_exchange_n:
1022     case AtomicExpr::AO__atomic_compare_exchange_n:
1023       // Only use optimized library calls for sizes for which they exist.
1024       // FIXME: Size == 16 optimized library functions exist too.
1025       if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
1026         UseOptimizedLibcall = true;
1027       break;
1028     }
1029 
1030     CallArgList Args;
1031     if (!UseOptimizedLibcall) {
1032       // For non-optimized library calls, the size is the first parameter
1033       Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
1034                getContext().getSizeType());
1035     }
1036     // Atomic address is the first or second parameter
1037     // The OpenCL atomic library functions only accept pointer arguments to
1038     // generic address space.
1039     auto CastToGenericAddrSpace = [&](llvm::Value *V, QualType PT) {
1040       if (!E->isOpenCL())
1041         return V;
1042       auto AS = PT->castAs<PointerType>()->getPointeeType().getAddressSpace();
1043       if (AS == LangAS::opencl_generic)
1044         return V;
1045       auto DestAS = getContext().getTargetAddressSpace(LangAS::opencl_generic);
1046       auto T = V->getType();
1047       auto *DestType = T->getPointerElementType()->getPointerTo(DestAS);
1048 
1049       return getTargetHooks().performAddrSpaceCast(
1050           *this, V, AS, LangAS::opencl_generic, DestType, false);
1051     };
1052 
1053     Args.add(RValue::get(CastToGenericAddrSpace(
1054                  EmitCastToVoidPtr(Ptr.getPointer()), E->getPtr()->getType())),
1055              getContext().VoidPtrTy);
1056 
1057     std::string LibCallName;
1058     QualType LoweredMemTy =
1059       MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy;
1060     QualType RetTy;
1061     bool HaveRetTy = false;
1062     llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
1063     bool PostOpMinMax = false;
1064     switch (E->getOp()) {
1065     case AtomicExpr::AO__c11_atomic_init:
1066     case AtomicExpr::AO__opencl_atomic_init:
1067       llvm_unreachable("Already handled!");
1068 
1069     // There is only one libcall for compare an exchange, because there is no
1070     // optimisation benefit possible from a libcall version of a weak compare
1071     // and exchange.
1072     // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
1073     //                                void *desired, int success, int failure)
1074     // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
1075     //                                  int success, int failure)
1076     case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
1077     case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
1078     case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
1079     case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
1080     case AtomicExpr::AO__atomic_compare_exchange:
1081     case AtomicExpr::AO__atomic_compare_exchange_n:
1082       LibCallName = "__atomic_compare_exchange";
1083       RetTy = getContext().BoolTy;
1084       HaveRetTy = true;
1085       Args.add(
1086           RValue::get(CastToGenericAddrSpace(
1087               EmitCastToVoidPtr(Val1.getPointer()), E->getVal1()->getType())),
1088           getContext().VoidPtrTy);
1089       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2.getPointer(),
1090                         MemTy, E->getExprLoc(), TInfo.Width);
1091       Args.add(RValue::get(Order), getContext().IntTy);
1092       Order = OrderFail;
1093       break;
1094     // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
1095     //                        int order)
1096     // T __atomic_exchange_N(T *mem, T val, int order)
1097     case AtomicExpr::AO__c11_atomic_exchange:
1098     case AtomicExpr::AO__opencl_atomic_exchange:
1099     case AtomicExpr::AO__atomic_exchange_n:
1100     case AtomicExpr::AO__atomic_exchange:
1101       LibCallName = "__atomic_exchange";
1102       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1103                         MemTy, E->getExprLoc(), TInfo.Width);
1104       break;
1105     // void __atomic_store(size_t size, void *mem, void *val, int order)
1106     // void __atomic_store_N(T *mem, T val, int order)
1107     case AtomicExpr::AO__c11_atomic_store:
1108     case AtomicExpr::AO__opencl_atomic_store:
1109     case AtomicExpr::AO__atomic_store:
1110     case AtomicExpr::AO__atomic_store_n:
1111       LibCallName = "__atomic_store";
1112       RetTy = getContext().VoidTy;
1113       HaveRetTy = true;
1114       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1115                         MemTy, E->getExprLoc(), TInfo.Width);
1116       break;
1117     // void __atomic_load(size_t size, void *mem, void *return, int order)
1118     // T __atomic_load_N(T *mem, int order)
1119     case AtomicExpr::AO__c11_atomic_load:
1120     case AtomicExpr::AO__opencl_atomic_load:
1121     case AtomicExpr::AO__atomic_load:
1122     case AtomicExpr::AO__atomic_load_n:
1123       LibCallName = "__atomic_load";
1124       break;
1125     // T __atomic_add_fetch_N(T *mem, T val, int order)
1126     // T __atomic_fetch_add_N(T *mem, T val, int order)
1127     case AtomicExpr::AO__atomic_add_fetch:
1128       PostOp = llvm::Instruction::Add;
1129       LLVM_FALLTHROUGH;
1130     case AtomicExpr::AO__c11_atomic_fetch_add:
1131     case AtomicExpr::AO__opencl_atomic_fetch_add:
1132     case AtomicExpr::AO__atomic_fetch_add:
1133       LibCallName = "__atomic_fetch_add";
1134       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1135                         LoweredMemTy, E->getExprLoc(), TInfo.Width);
1136       break;
1137     // T __atomic_and_fetch_N(T *mem, T val, int order)
1138     // T __atomic_fetch_and_N(T *mem, T val, int order)
1139     case AtomicExpr::AO__atomic_and_fetch:
1140       PostOp = llvm::Instruction::And;
1141       LLVM_FALLTHROUGH;
1142     case AtomicExpr::AO__c11_atomic_fetch_and:
1143     case AtomicExpr::AO__opencl_atomic_fetch_and:
1144     case AtomicExpr::AO__atomic_fetch_and:
1145       LibCallName = "__atomic_fetch_and";
1146       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1147                         MemTy, E->getExprLoc(), TInfo.Width);
1148       break;
1149     // T __atomic_or_fetch_N(T *mem, T val, int order)
1150     // T __atomic_fetch_or_N(T *mem, T val, int order)
1151     case AtomicExpr::AO__atomic_or_fetch:
1152       PostOp = llvm::Instruction::Or;
1153       LLVM_FALLTHROUGH;
1154     case AtomicExpr::AO__c11_atomic_fetch_or:
1155     case AtomicExpr::AO__opencl_atomic_fetch_or:
1156     case AtomicExpr::AO__atomic_fetch_or:
1157       LibCallName = "__atomic_fetch_or";
1158       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1159                         MemTy, E->getExprLoc(), TInfo.Width);
1160       break;
1161     // T __atomic_sub_fetch_N(T *mem, T val, int order)
1162     // T __atomic_fetch_sub_N(T *mem, T val, int order)
1163     case AtomicExpr::AO__atomic_sub_fetch:
1164       PostOp = llvm::Instruction::Sub;
1165       LLVM_FALLTHROUGH;
1166     case AtomicExpr::AO__c11_atomic_fetch_sub:
1167     case AtomicExpr::AO__opencl_atomic_fetch_sub:
1168     case AtomicExpr::AO__atomic_fetch_sub:
1169       LibCallName = "__atomic_fetch_sub";
1170       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1171                         LoweredMemTy, E->getExprLoc(), TInfo.Width);
1172       break;
1173     // T __atomic_xor_fetch_N(T *mem, T val, int order)
1174     // T __atomic_fetch_xor_N(T *mem, T val, int order)
1175     case AtomicExpr::AO__atomic_xor_fetch:
1176       PostOp = llvm::Instruction::Xor;
1177       LLVM_FALLTHROUGH;
1178     case AtomicExpr::AO__c11_atomic_fetch_xor:
1179     case AtomicExpr::AO__opencl_atomic_fetch_xor:
1180     case AtomicExpr::AO__atomic_fetch_xor:
1181       LibCallName = "__atomic_fetch_xor";
1182       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1183                         MemTy, E->getExprLoc(), TInfo.Width);
1184       break;
1185     case AtomicExpr::AO__atomic_min_fetch:
1186       PostOpMinMax = true;
1187       LLVM_FALLTHROUGH;
1188     case AtomicExpr::AO__c11_atomic_fetch_min:
1189     case AtomicExpr::AO__atomic_fetch_min:
1190     case AtomicExpr::AO__opencl_atomic_fetch_min:
1191       LibCallName = E->getValueType()->isSignedIntegerType()
1192                         ? "__atomic_fetch_min"
1193                         : "__atomic_fetch_umin";
1194       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1195                         LoweredMemTy, E->getExprLoc(), TInfo.Width);
1196       break;
1197     case AtomicExpr::AO__atomic_max_fetch:
1198       PostOpMinMax = true;
1199       LLVM_FALLTHROUGH;
1200     case AtomicExpr::AO__c11_atomic_fetch_max:
1201     case AtomicExpr::AO__atomic_fetch_max:
1202     case AtomicExpr::AO__opencl_atomic_fetch_max:
1203       LibCallName = E->getValueType()->isSignedIntegerType()
1204                         ? "__atomic_fetch_max"
1205                         : "__atomic_fetch_umax";
1206       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1207                         LoweredMemTy, E->getExprLoc(), TInfo.Width);
1208       break;
1209     // T __atomic_nand_fetch_N(T *mem, T val, int order)
1210     // T __atomic_fetch_nand_N(T *mem, T val, int order)
1211     case AtomicExpr::AO__atomic_nand_fetch:
1212       PostOp = llvm::Instruction::And; // the NOT is special cased below
1213       LLVM_FALLTHROUGH;
1214     case AtomicExpr::AO__atomic_fetch_nand:
1215       LibCallName = "__atomic_fetch_nand";
1216       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1217                         MemTy, E->getExprLoc(), TInfo.Width);
1218       break;
1219     }
1220 
1221     if (E->isOpenCL()) {
1222       LibCallName = std::string("__opencl") +
1223           StringRef(LibCallName).drop_front(1).str();
1224 
1225     }
1226     // Optimized functions have the size in their name.
1227     if (UseOptimizedLibcall)
1228       LibCallName += "_" + llvm::utostr(Size);
1229     // By default, assume we return a value of the atomic type.
1230     if (!HaveRetTy) {
1231       if (UseOptimizedLibcall) {
1232         // Value is returned directly.
1233         // The function returns an appropriately sized integer type.
1234         RetTy = getContext().getIntTypeForBitwidth(
1235             getContext().toBits(TInfo.Width), /*Signed=*/false);
1236       } else {
1237         // Value is returned through parameter before the order.
1238         RetTy = getContext().VoidTy;
1239         Args.add(RValue::get(EmitCastToVoidPtr(Dest.getPointer())),
1240                  getContext().VoidPtrTy);
1241       }
1242     }
1243     // order is always the last parameter
1244     Args.add(RValue::get(Order),
1245              getContext().IntTy);
1246     if (E->isOpenCL())
1247       Args.add(RValue::get(Scope), getContext().IntTy);
1248 
1249     // PostOp is only needed for the atomic_*_fetch operations, and
1250     // thus is only needed for and implemented in the
1251     // UseOptimizedLibcall codepath.
1252     assert(UseOptimizedLibcall || (!PostOp && !PostOpMinMax));
1253 
1254     RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args);
1255     // The value is returned directly from the libcall.
1256     if (E->isCmpXChg())
1257       return Res;
1258 
1259     // The value is returned directly for optimized libcalls but the expr
1260     // provided an out-param.
1261     if (UseOptimizedLibcall && Res.getScalarVal()) {
1262       llvm::Value *ResVal = Res.getScalarVal();
1263       if (PostOpMinMax) {
1264         llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal();
1265         ResVal = EmitPostAtomicMinMax(Builder, E->getOp(),
1266                                       E->getValueType()->isSignedIntegerType(),
1267                                       ResVal, LoadVal1);
1268       } else if (PostOp) {
1269         llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal();
1270         ResVal = Builder.CreateBinOp(PostOp, ResVal, LoadVal1);
1271       }
1272       if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
1273         ResVal = Builder.CreateNot(ResVal);
1274 
1275       Builder.CreateStore(
1276           ResVal,
1277           Builder.CreateBitCast(Dest, ResVal->getType()->getPointerTo()));
1278     }
1279 
1280     if (RValTy->isVoidType())
1281       return RValue::get(nullptr);
1282 
1283     return convertTempToRValue(
1284         Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo()),
1285         RValTy, E->getExprLoc());
1286   }
1287 
1288   bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
1289                  E->getOp() == AtomicExpr::AO__opencl_atomic_store ||
1290                  E->getOp() == AtomicExpr::AO__atomic_store ||
1291                  E->getOp() == AtomicExpr::AO__atomic_store_n;
1292   bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
1293                 E->getOp() == AtomicExpr::AO__opencl_atomic_load ||
1294                 E->getOp() == AtomicExpr::AO__atomic_load ||
1295                 E->getOp() == AtomicExpr::AO__atomic_load_n;
1296 
1297   if (isa<llvm::ConstantInt>(Order)) {
1298     auto ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
1299     // We should not ever get to a case where the ordering isn't a valid C ABI
1300     // value, but it's hard to enforce that in general.
1301     if (llvm::isValidAtomicOrderingCABI(ord))
1302       switch ((llvm::AtomicOrderingCABI)ord) {
1303       case llvm::AtomicOrderingCABI::relaxed:
1304         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1305                      llvm::AtomicOrdering::Monotonic, Scope);
1306         break;
1307       case llvm::AtomicOrderingCABI::consume:
1308       case llvm::AtomicOrderingCABI::acquire:
1309         if (IsStore)
1310           break; // Avoid crashing on code with undefined behavior
1311         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1312                      llvm::AtomicOrdering::Acquire, Scope);
1313         break;
1314       case llvm::AtomicOrderingCABI::release:
1315         if (IsLoad)
1316           break; // Avoid crashing on code with undefined behavior
1317         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1318                      llvm::AtomicOrdering::Release, Scope);
1319         break;
1320       case llvm::AtomicOrderingCABI::acq_rel:
1321         if (IsLoad || IsStore)
1322           break; // Avoid crashing on code with undefined behavior
1323         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1324                      llvm::AtomicOrdering::AcquireRelease, Scope);
1325         break;
1326       case llvm::AtomicOrderingCABI::seq_cst:
1327         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1328                      llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1329         break;
1330       }
1331     if (RValTy->isVoidType())
1332       return RValue::get(nullptr);
1333 
1334     return convertTempToRValue(
1335         Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo(
1336                                         Dest.getAddressSpace())),
1337         RValTy, E->getExprLoc());
1338   }
1339 
1340   // Long case, when Order isn't obviously constant.
1341 
1342   // Create all the relevant BB's
1343   llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
1344                    *ReleaseBB = nullptr, *AcqRelBB = nullptr,
1345                    *SeqCstBB = nullptr;
1346   MonotonicBB = createBasicBlock("monotonic", CurFn);
1347   if (!IsStore)
1348     AcquireBB = createBasicBlock("acquire", CurFn);
1349   if (!IsLoad)
1350     ReleaseBB = createBasicBlock("release", CurFn);
1351   if (!IsLoad && !IsStore)
1352     AcqRelBB = createBasicBlock("acqrel", CurFn);
1353   SeqCstBB = createBasicBlock("seqcst", CurFn);
1354   llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
1355 
1356   // Create the switch for the split
1357   // MonotonicBB is arbitrarily chosen as the default case; in practice, this
1358   // doesn't matter unless someone is crazy enough to use something that
1359   // doesn't fold to a constant for the ordering.
1360   Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
1361   llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
1362 
1363   // Emit all the different atomics
1364   Builder.SetInsertPoint(MonotonicBB);
1365   EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1366                llvm::AtomicOrdering::Monotonic, Scope);
1367   Builder.CreateBr(ContBB);
1368   if (!IsStore) {
1369     Builder.SetInsertPoint(AcquireBB);
1370     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1371                  llvm::AtomicOrdering::Acquire, Scope);
1372     Builder.CreateBr(ContBB);
1373     SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
1374                 AcquireBB);
1375     SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
1376                 AcquireBB);
1377   }
1378   if (!IsLoad) {
1379     Builder.SetInsertPoint(ReleaseBB);
1380     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1381                  llvm::AtomicOrdering::Release, Scope);
1382     Builder.CreateBr(ContBB);
1383     SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::release),
1384                 ReleaseBB);
1385   }
1386   if (!IsLoad && !IsStore) {
1387     Builder.SetInsertPoint(AcqRelBB);
1388     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1389                  llvm::AtomicOrdering::AcquireRelease, Scope);
1390     Builder.CreateBr(ContBB);
1391     SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acq_rel),
1392                 AcqRelBB);
1393   }
1394   Builder.SetInsertPoint(SeqCstBB);
1395   EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1396                llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1397   Builder.CreateBr(ContBB);
1398   SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
1399               SeqCstBB);
1400 
1401   // Cleanup and return
1402   Builder.SetInsertPoint(ContBB);
1403   if (RValTy->isVoidType())
1404     return RValue::get(nullptr);
1405 
1406   assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());
1407   return convertTempToRValue(
1408       Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo(
1409                                       Dest.getAddressSpace())),
1410       RValTy, E->getExprLoc());
1411 }
1412 
emitCastToAtomicIntPointer(Address addr) const1413 Address AtomicInfo::emitCastToAtomicIntPointer(Address addr) const {
1414   unsigned addrspace =
1415     cast<llvm::PointerType>(addr.getPointer()->getType())->getAddressSpace();
1416   llvm::IntegerType *ty =
1417     llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
1418   return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
1419 }
1420 
convertToAtomicIntPointer(Address Addr) const1421 Address AtomicInfo::convertToAtomicIntPointer(Address Addr) const {
1422   llvm::Type *Ty = Addr.getElementType();
1423   uint64_t SourceSizeInBits = CGF.CGM.getDataLayout().getTypeSizeInBits(Ty);
1424   if (SourceSizeInBits != AtomicSizeInBits) {
1425     Address Tmp = CreateTempAlloca();
1426     CGF.Builder.CreateMemCpy(Tmp, Addr,
1427                              std::min(AtomicSizeInBits, SourceSizeInBits) / 8);
1428     Addr = Tmp;
1429   }
1430 
1431   return emitCastToAtomicIntPointer(Addr);
1432 }
1433 
convertAtomicTempToRValue(Address addr,AggValueSlot resultSlot,SourceLocation loc,bool asValue) const1434 RValue AtomicInfo::convertAtomicTempToRValue(Address addr,
1435                                              AggValueSlot resultSlot,
1436                                              SourceLocation loc,
1437                                              bool asValue) const {
1438   if (LVal.isSimple()) {
1439     if (EvaluationKind == TEK_Aggregate)
1440       return resultSlot.asRValue();
1441 
1442     // Drill into the padding structure if we have one.
1443     if (hasPadding())
1444       addr = CGF.Builder.CreateStructGEP(addr, 0);
1445 
1446     // Otherwise, just convert the temporary to an r-value using the
1447     // normal conversion routine.
1448     return CGF.convertTempToRValue(addr, getValueType(), loc);
1449   }
1450   if (!asValue)
1451     // Get RValue from temp memory as atomic for non-simple lvalues
1452     return RValue::get(CGF.Builder.CreateLoad(addr));
1453   if (LVal.isBitField())
1454     return CGF.EmitLoadOfBitfieldLValue(
1455         LValue::MakeBitfield(addr, LVal.getBitFieldInfo(), LVal.getType(),
1456                              LVal.getBaseInfo(), TBAAAccessInfo()), loc);
1457   if (LVal.isVectorElt())
1458     return CGF.EmitLoadOfLValue(
1459         LValue::MakeVectorElt(addr, LVal.getVectorIdx(), LVal.getType(),
1460                               LVal.getBaseInfo(), TBAAAccessInfo()), loc);
1461   assert(LVal.isExtVectorElt());
1462   return CGF.EmitLoadOfExtVectorElementLValue(LValue::MakeExtVectorElt(
1463       addr, LVal.getExtVectorElts(), LVal.getType(),
1464       LVal.getBaseInfo(), TBAAAccessInfo()));
1465 }
1466 
ConvertIntToValueOrAtomic(llvm::Value * IntVal,AggValueSlot ResultSlot,SourceLocation Loc,bool AsValue) const1467 RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal,
1468                                              AggValueSlot ResultSlot,
1469                                              SourceLocation Loc,
1470                                              bool AsValue) const {
1471   // Try not to in some easy cases.
1472   assert(IntVal->getType()->isIntegerTy() && "Expected integer value");
1473   if (getEvaluationKind() == TEK_Scalar &&
1474       (((!LVal.isBitField() ||
1475          LVal.getBitFieldInfo().Size == ValueSizeInBits) &&
1476         !hasPadding()) ||
1477        !AsValue)) {
1478     auto *ValTy = AsValue
1479                       ? CGF.ConvertTypeForMem(ValueTy)
1480                       : getAtomicAddress().getType()->getPointerElementType();
1481     if (ValTy->isIntegerTy()) {
1482       assert(IntVal->getType() == ValTy && "Different integer types.");
1483       return RValue::get(CGF.EmitFromMemory(IntVal, ValueTy));
1484     } else if (ValTy->isPointerTy())
1485       return RValue::get(CGF.Builder.CreateIntToPtr(IntVal, ValTy));
1486     else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy))
1487       return RValue::get(CGF.Builder.CreateBitCast(IntVal, ValTy));
1488   }
1489 
1490   // Create a temporary.  This needs to be big enough to hold the
1491   // atomic integer.
1492   Address Temp = Address::invalid();
1493   bool TempIsVolatile = false;
1494   if (AsValue && getEvaluationKind() == TEK_Aggregate) {
1495     assert(!ResultSlot.isIgnored());
1496     Temp = ResultSlot.getAddress();
1497     TempIsVolatile = ResultSlot.isVolatile();
1498   } else {
1499     Temp = CreateTempAlloca();
1500   }
1501 
1502   // Slam the integer into the temporary.
1503   Address CastTemp = emitCastToAtomicIntPointer(Temp);
1504   CGF.Builder.CreateStore(IntVal, CastTemp)
1505       ->setVolatile(TempIsVolatile);
1506 
1507   return convertAtomicTempToRValue(Temp, ResultSlot, Loc, AsValue);
1508 }
1509 
EmitAtomicLoadLibcall(llvm::Value * AddForLoaded,llvm::AtomicOrdering AO,bool)1510 void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
1511                                        llvm::AtomicOrdering AO, bool) {
1512   // void __atomic_load(size_t size, void *mem, void *return, int order);
1513   CallArgList Args;
1514   Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1515   Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
1516            CGF.getContext().VoidPtrTy);
1517   Args.add(RValue::get(CGF.EmitCastToVoidPtr(AddForLoaded)),
1518            CGF.getContext().VoidPtrTy);
1519   Args.add(
1520       RValue::get(llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(AO))),
1521       CGF.getContext().IntTy);
1522   emitAtomicLibcall(CGF, "__atomic_load", CGF.getContext().VoidTy, Args);
1523 }
1524 
EmitAtomicLoadOp(llvm::AtomicOrdering AO,bool IsVolatile)1525 llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
1526                                           bool IsVolatile) {
1527   // Okay, we're doing this natively.
1528   Address Addr = getAtomicAddressAsAtomicIntPointer();
1529   llvm::LoadInst *Load = CGF.Builder.CreateLoad(Addr, "atomic-load");
1530   Load->setAtomic(AO);
1531 
1532   // Other decoration.
1533   if (IsVolatile)
1534     Load->setVolatile(true);
1535   CGF.CGM.DecorateInstructionWithTBAA(Load, LVal.getTBAAInfo());
1536   return Load;
1537 }
1538 
1539 /// An LValue is a candidate for having its loads and stores be made atomic if
1540 /// we are operating under /volatile:ms *and* the LValue itself is volatile and
1541 /// performing such an operation can be performed without a libcall.
LValueIsSuitableForInlineAtomic(LValue LV)1542 bool CodeGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) {
1543   if (!CGM.getCodeGenOpts().MSVolatile) return false;
1544   AtomicInfo AI(*this, LV);
1545   bool IsVolatile = LV.isVolatile() || hasVolatileMember(LV.getType());
1546   // An atomic is inline if we don't need to use a libcall.
1547   bool AtomicIsInline = !AI.shouldUseLibcall();
1548   // MSVC doesn't seem to do this for types wider than a pointer.
1549   if (getContext().getTypeSize(LV.getType()) >
1550       getContext().getTypeSize(getContext().getIntPtrType()))
1551     return false;
1552   return IsVolatile && AtomicIsInline;
1553 }
1554 
EmitAtomicLoad(LValue LV,SourceLocation SL,AggValueSlot Slot)1555 RValue CodeGenFunction::EmitAtomicLoad(LValue LV, SourceLocation SL,
1556                                        AggValueSlot Slot) {
1557   llvm::AtomicOrdering AO;
1558   bool IsVolatile = LV.isVolatileQualified();
1559   if (LV.getType()->isAtomicType()) {
1560     AO = llvm::AtomicOrdering::SequentiallyConsistent;
1561   } else {
1562     AO = llvm::AtomicOrdering::Acquire;
1563     IsVolatile = true;
1564   }
1565   return EmitAtomicLoad(LV, SL, AO, IsVolatile, Slot);
1566 }
1567 
EmitAtomicLoad(AggValueSlot ResultSlot,SourceLocation Loc,bool AsValue,llvm::AtomicOrdering AO,bool IsVolatile)1568 RValue AtomicInfo::EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
1569                                   bool AsValue, llvm::AtomicOrdering AO,
1570                                   bool IsVolatile) {
1571   // Check whether we should use a library call.
1572   if (shouldUseLibcall()) {
1573     Address TempAddr = Address::invalid();
1574     if (LVal.isSimple() && !ResultSlot.isIgnored()) {
1575       assert(getEvaluationKind() == TEK_Aggregate);
1576       TempAddr = ResultSlot.getAddress();
1577     } else
1578       TempAddr = CreateTempAlloca();
1579 
1580     EmitAtomicLoadLibcall(TempAddr.getPointer(), AO, IsVolatile);
1581 
1582     // Okay, turn that back into the original value or whole atomic (for
1583     // non-simple lvalues) type.
1584     return convertAtomicTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
1585   }
1586 
1587   // Okay, we're doing this natively.
1588   auto *Load = EmitAtomicLoadOp(AO, IsVolatile);
1589 
1590   // If we're ignoring an aggregate return, don't do anything.
1591   if (getEvaluationKind() == TEK_Aggregate && ResultSlot.isIgnored())
1592     return RValue::getAggregate(Address::invalid(), false);
1593 
1594   // Okay, turn that back into the original value or atomic (for non-simple
1595   // lvalues) type.
1596   return ConvertIntToValueOrAtomic(Load, ResultSlot, Loc, AsValue);
1597 }
1598 
1599 /// Emit a load from an l-value of atomic type.  Note that the r-value
1600 /// we produce is an r-value of the atomic *value* type.
EmitAtomicLoad(LValue src,SourceLocation loc,llvm::AtomicOrdering AO,bool IsVolatile,AggValueSlot resultSlot)1601 RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
1602                                        llvm::AtomicOrdering AO, bool IsVolatile,
1603                                        AggValueSlot resultSlot) {
1604   AtomicInfo Atomics(*this, src);
1605   return Atomics.EmitAtomicLoad(resultSlot, loc, /*AsValue=*/true, AO,
1606                                 IsVolatile);
1607 }
1608 
1609 /// Copy an r-value into memory as part of storing to an atomic type.
1610 /// This needs to create a bit-pattern suitable for atomic operations.
emitCopyIntoMemory(RValue rvalue) const1611 void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
1612   assert(LVal.isSimple());
1613   // If we have an r-value, the rvalue should be of the atomic type,
1614   // which means that the caller is responsible for having zeroed
1615   // any padding.  Just do an aggregate copy of that type.
1616   if (rvalue.isAggregate()) {
1617     LValue Dest = CGF.MakeAddrLValue(getAtomicAddress(), getAtomicType());
1618     LValue Src = CGF.MakeAddrLValue(rvalue.getAggregateAddress(),
1619                                     getAtomicType());
1620     bool IsVolatile = rvalue.isVolatileQualified() ||
1621                       LVal.isVolatileQualified();
1622     CGF.EmitAggregateCopy(Dest, Src, getAtomicType(),
1623                           AggValueSlot::DoesNotOverlap, IsVolatile);
1624     return;
1625   }
1626 
1627   // Okay, otherwise we're copying stuff.
1628 
1629   // Zero out the buffer if necessary.
1630   emitMemSetZeroIfNecessary();
1631 
1632   // Drill past the padding if present.
1633   LValue TempLVal = projectValue();
1634 
1635   // Okay, store the rvalue in.
1636   if (rvalue.isScalar()) {
1637     CGF.EmitStoreOfScalar(rvalue.getScalarVal(), TempLVal, /*init*/ true);
1638   } else {
1639     CGF.EmitStoreOfComplex(rvalue.getComplexVal(), TempLVal, /*init*/ true);
1640   }
1641 }
1642 
1643 
1644 /// Materialize an r-value into memory for the purposes of storing it
1645 /// to an atomic type.
materializeRValue(RValue rvalue) const1646 Address AtomicInfo::materializeRValue(RValue rvalue) const {
1647   // Aggregate r-values are already in memory, and EmitAtomicStore
1648   // requires them to be values of the atomic type.
1649   if (rvalue.isAggregate())
1650     return rvalue.getAggregateAddress();
1651 
1652   // Otherwise, make a temporary and materialize into it.
1653   LValue TempLV = CGF.MakeAddrLValue(CreateTempAlloca(), getAtomicType());
1654   AtomicInfo Atomics(CGF, TempLV);
1655   Atomics.emitCopyIntoMemory(rvalue);
1656   return TempLV.getAddress(CGF);
1657 }
1658 
convertRValueToInt(RValue RVal) const1659 llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const {
1660   // If we've got a scalar value of the right size, try to avoid going
1661   // through memory.
1662   if (RVal.isScalar() && (!hasPadding() || !LVal.isSimple())) {
1663     llvm::Value *Value = RVal.getScalarVal();
1664     if (isa<llvm::IntegerType>(Value->getType()))
1665       return CGF.EmitToMemory(Value, ValueTy);
1666     else {
1667       llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
1668           CGF.getLLVMContext(),
1669           LVal.isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());
1670       if (isa<llvm::PointerType>(Value->getType()))
1671         return CGF.Builder.CreatePtrToInt(Value, InputIntTy);
1672       else if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
1673         return CGF.Builder.CreateBitCast(Value, InputIntTy);
1674     }
1675   }
1676   // Otherwise, we need to go through memory.
1677   // Put the r-value in memory.
1678   Address Addr = materializeRValue(RVal);
1679 
1680   // Cast the temporary to the atomic int type and pull a value out.
1681   Addr = emitCastToAtomicIntPointer(Addr);
1682   return CGF.Builder.CreateLoad(Addr);
1683 }
1684 
EmitAtomicCompareExchangeOp(llvm::Value * ExpectedVal,llvm::Value * DesiredVal,llvm::AtomicOrdering Success,llvm::AtomicOrdering Failure,bool IsWeak)1685 std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
1686     llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
1687     llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak) {
1688   // Do the atomic store.
1689   Address Addr = getAtomicAddressAsAtomicIntPointer();
1690   auto *Inst = CGF.Builder.CreateAtomicCmpXchg(Addr.getPointer(),
1691                                                ExpectedVal, DesiredVal,
1692                                                Success, Failure);
1693   // Other decoration.
1694   Inst->setVolatile(LVal.isVolatileQualified());
1695   Inst->setWeak(IsWeak);
1696 
1697   // Okay, turn that back into the original value type.
1698   auto *PreviousVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/0);
1699   auto *SuccessFailureVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/1);
1700   return std::make_pair(PreviousVal, SuccessFailureVal);
1701 }
1702 
1703 llvm::Value *
EmitAtomicCompareExchangeLibcall(llvm::Value * ExpectedAddr,llvm::Value * DesiredAddr,llvm::AtomicOrdering Success,llvm::AtomicOrdering Failure)1704 AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,
1705                                              llvm::Value *DesiredAddr,
1706                                              llvm::AtomicOrdering Success,
1707                                              llvm::AtomicOrdering Failure) {
1708   // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
1709   // void *desired, int success, int failure);
1710   CallArgList Args;
1711   Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1712   Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
1713            CGF.getContext().VoidPtrTy);
1714   Args.add(RValue::get(CGF.EmitCastToVoidPtr(ExpectedAddr)),
1715            CGF.getContext().VoidPtrTy);
1716   Args.add(RValue::get(CGF.EmitCastToVoidPtr(DesiredAddr)),
1717            CGF.getContext().VoidPtrTy);
1718   Args.add(RValue::get(
1719                llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Success))),
1720            CGF.getContext().IntTy);
1721   Args.add(RValue::get(
1722                llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Failure))),
1723            CGF.getContext().IntTy);
1724   auto SuccessFailureRVal = emitAtomicLibcall(CGF, "__atomic_compare_exchange",
1725                                               CGF.getContext().BoolTy, Args);
1726 
1727   return SuccessFailureRVal.getScalarVal();
1728 }
1729 
EmitAtomicCompareExchange(RValue Expected,RValue Desired,llvm::AtomicOrdering Success,llvm::AtomicOrdering Failure,bool IsWeak)1730 std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
1731     RValue Expected, RValue Desired, llvm::AtomicOrdering Success,
1732     llvm::AtomicOrdering Failure, bool IsWeak) {
1733   if (isStrongerThan(Failure, Success))
1734     // Don't assert on undefined behavior "failure argument shall be no stronger
1735     // than the success argument".
1736     Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success);
1737 
1738   // Check whether we should use a library call.
1739   if (shouldUseLibcall()) {
1740     // Produce a source address.
1741     Address ExpectedAddr = materializeRValue(Expected);
1742     Address DesiredAddr = materializeRValue(Desired);
1743     auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1744                                                  DesiredAddr.getPointer(),
1745                                                  Success, Failure);
1746     return std::make_pair(
1747         convertAtomicTempToRValue(ExpectedAddr, AggValueSlot::ignored(),
1748                                   SourceLocation(), /*AsValue=*/false),
1749         Res);
1750   }
1751 
1752   // If we've got a scalar value of the right size, try to avoid going
1753   // through memory.
1754   auto *ExpectedVal = convertRValueToInt(Expected);
1755   auto *DesiredVal = convertRValueToInt(Desired);
1756   auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success,
1757                                          Failure, IsWeak);
1758   return std::make_pair(
1759       ConvertIntToValueOrAtomic(Res.first, AggValueSlot::ignored(),
1760                                 SourceLocation(), /*AsValue=*/false),
1761       Res.second);
1762 }
1763 
1764 static void
EmitAtomicUpdateValue(CodeGenFunction & CGF,AtomicInfo & Atomics,RValue OldRVal,const llvm::function_ref<RValue (RValue)> & UpdateOp,Address DesiredAddr)1765 EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal,
1766                       const llvm::function_ref<RValue(RValue)> &UpdateOp,
1767                       Address DesiredAddr) {
1768   RValue UpRVal;
1769   LValue AtomicLVal = Atomics.getAtomicLValue();
1770   LValue DesiredLVal;
1771   if (AtomicLVal.isSimple()) {
1772     UpRVal = OldRVal;
1773     DesiredLVal = CGF.MakeAddrLValue(DesiredAddr, AtomicLVal.getType());
1774   } else {
1775     // Build new lvalue for temp address.
1776     Address Ptr = Atomics.materializeRValue(OldRVal);
1777     LValue UpdateLVal;
1778     if (AtomicLVal.isBitField()) {
1779       UpdateLVal =
1780           LValue::MakeBitfield(Ptr, AtomicLVal.getBitFieldInfo(),
1781                                AtomicLVal.getType(),
1782                                AtomicLVal.getBaseInfo(),
1783                                AtomicLVal.getTBAAInfo());
1784       DesiredLVal =
1785           LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1786                                AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1787                                AtomicLVal.getTBAAInfo());
1788     } else if (AtomicLVal.isVectorElt()) {
1789       UpdateLVal = LValue::MakeVectorElt(Ptr, AtomicLVal.getVectorIdx(),
1790                                          AtomicLVal.getType(),
1791                                          AtomicLVal.getBaseInfo(),
1792                                          AtomicLVal.getTBAAInfo());
1793       DesiredLVal = LValue::MakeVectorElt(
1794           DesiredAddr, AtomicLVal.getVectorIdx(), AtomicLVal.getType(),
1795           AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1796     } else {
1797       assert(AtomicLVal.isExtVectorElt());
1798       UpdateLVal = LValue::MakeExtVectorElt(Ptr, AtomicLVal.getExtVectorElts(),
1799                                             AtomicLVal.getType(),
1800                                             AtomicLVal.getBaseInfo(),
1801                                             AtomicLVal.getTBAAInfo());
1802       DesiredLVal = LValue::MakeExtVectorElt(
1803           DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1804           AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1805     }
1806     UpRVal = CGF.EmitLoadOfLValue(UpdateLVal, SourceLocation());
1807   }
1808   // Store new value in the corresponding memory area.
1809   RValue NewRVal = UpdateOp(UpRVal);
1810   if (NewRVal.isScalar()) {
1811     CGF.EmitStoreThroughLValue(NewRVal, DesiredLVal);
1812   } else {
1813     assert(NewRVal.isComplex());
1814     CGF.EmitStoreOfComplex(NewRVal.getComplexVal(), DesiredLVal,
1815                            /*isInit=*/false);
1816   }
1817 }
1818 
EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,const llvm::function_ref<RValue (RValue)> & UpdateOp,bool IsVolatile)1819 void AtomicInfo::EmitAtomicUpdateLibcall(
1820     llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1821     bool IsVolatile) {
1822   auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1823 
1824   Address ExpectedAddr = CreateTempAlloca();
1825 
1826   EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
1827   auto *ContBB = CGF.createBasicBlock("atomic_cont");
1828   auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1829   CGF.EmitBlock(ContBB);
1830   Address DesiredAddr = CreateTempAlloca();
1831   if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1832       requiresMemSetZero(getAtomicAddress().getElementType())) {
1833     auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
1834     CGF.Builder.CreateStore(OldVal, DesiredAddr);
1835   }
1836   auto OldRVal = convertAtomicTempToRValue(ExpectedAddr,
1837                                            AggValueSlot::ignored(),
1838                                            SourceLocation(), /*AsValue=*/false);
1839   EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, DesiredAddr);
1840   auto *Res =
1841       EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1842                                        DesiredAddr.getPointer(),
1843                                        AO, Failure);
1844   CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1845   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1846 }
1847 
EmitAtomicUpdateOp(llvm::AtomicOrdering AO,const llvm::function_ref<RValue (RValue)> & UpdateOp,bool IsVolatile)1848 void AtomicInfo::EmitAtomicUpdateOp(
1849     llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1850     bool IsVolatile) {
1851   auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1852 
1853   // Do the atomic load.
1854   auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile);
1855   // For non-simple lvalues perform compare-and-swap procedure.
1856   auto *ContBB = CGF.createBasicBlock("atomic_cont");
1857   auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1858   auto *CurBB = CGF.Builder.GetInsertBlock();
1859   CGF.EmitBlock(ContBB);
1860   llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1861                                              /*NumReservedValues=*/2);
1862   PHI->addIncoming(OldVal, CurBB);
1863   Address NewAtomicAddr = CreateTempAlloca();
1864   Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1865   if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1866       requiresMemSetZero(getAtomicAddress().getElementType())) {
1867     CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
1868   }
1869   auto OldRVal = ConvertIntToValueOrAtomic(PHI, AggValueSlot::ignored(),
1870                                            SourceLocation(), /*AsValue=*/false);
1871   EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, NewAtomicAddr);
1872   auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
1873   // Try to write new value using cmpxchg operation.
1874   auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1875   PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1876   CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1877   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1878 }
1879 
EmitAtomicUpdateValue(CodeGenFunction & CGF,AtomicInfo & Atomics,RValue UpdateRVal,Address DesiredAddr)1880 static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics,
1881                                   RValue UpdateRVal, Address DesiredAddr) {
1882   LValue AtomicLVal = Atomics.getAtomicLValue();
1883   LValue DesiredLVal;
1884   // Build new lvalue for temp address.
1885   if (AtomicLVal.isBitField()) {
1886     DesiredLVal =
1887         LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1888                              AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1889                              AtomicLVal.getTBAAInfo());
1890   } else if (AtomicLVal.isVectorElt()) {
1891     DesiredLVal =
1892         LValue::MakeVectorElt(DesiredAddr, AtomicLVal.getVectorIdx(),
1893                               AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1894                               AtomicLVal.getTBAAInfo());
1895   } else {
1896     assert(AtomicLVal.isExtVectorElt());
1897     DesiredLVal = LValue::MakeExtVectorElt(
1898         DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1899         AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1900   }
1901   // Store new value in the corresponding memory area.
1902   assert(UpdateRVal.isScalar());
1903   CGF.EmitStoreThroughLValue(UpdateRVal, DesiredLVal);
1904 }
1905 
EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,RValue UpdateRVal,bool IsVolatile)1906 void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
1907                                          RValue UpdateRVal, bool IsVolatile) {
1908   auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1909 
1910   Address ExpectedAddr = CreateTempAlloca();
1911 
1912   EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
1913   auto *ContBB = CGF.createBasicBlock("atomic_cont");
1914   auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1915   CGF.EmitBlock(ContBB);
1916   Address DesiredAddr = CreateTempAlloca();
1917   if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1918       requiresMemSetZero(getAtomicAddress().getElementType())) {
1919     auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
1920     CGF.Builder.CreateStore(OldVal, DesiredAddr);
1921   }
1922   EmitAtomicUpdateValue(CGF, *this, UpdateRVal, DesiredAddr);
1923   auto *Res =
1924       EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1925                                        DesiredAddr.getPointer(),
1926                                        AO, Failure);
1927   CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1928   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1929 }
1930 
EmitAtomicUpdateOp(llvm::AtomicOrdering AO,RValue UpdateRVal,bool IsVolatile)1931 void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal,
1932                                     bool IsVolatile) {
1933   auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1934 
1935   // Do the atomic load.
1936   auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile);
1937   // For non-simple lvalues perform compare-and-swap procedure.
1938   auto *ContBB = CGF.createBasicBlock("atomic_cont");
1939   auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1940   auto *CurBB = CGF.Builder.GetInsertBlock();
1941   CGF.EmitBlock(ContBB);
1942   llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1943                                              /*NumReservedValues=*/2);
1944   PHI->addIncoming(OldVal, CurBB);
1945   Address NewAtomicAddr = CreateTempAlloca();
1946   Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1947   if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1948       requiresMemSetZero(getAtomicAddress().getElementType())) {
1949     CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
1950   }
1951   EmitAtomicUpdateValue(CGF, *this, UpdateRVal, NewAtomicAddr);
1952   auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
1953   // Try to write new value using cmpxchg operation.
1954   auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1955   PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1956   CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1957   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1958 }
1959 
EmitAtomicUpdate(llvm::AtomicOrdering AO,const llvm::function_ref<RValue (RValue)> & UpdateOp,bool IsVolatile)1960 void AtomicInfo::EmitAtomicUpdate(
1961     llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1962     bool IsVolatile) {
1963   if (shouldUseLibcall()) {
1964     EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);
1965   } else {
1966     EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);
1967   }
1968 }
1969 
EmitAtomicUpdate(llvm::AtomicOrdering AO,RValue UpdateRVal,bool IsVolatile)1970 void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
1971                                   bool IsVolatile) {
1972   if (shouldUseLibcall()) {
1973     EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);
1974   } else {
1975     EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);
1976   }
1977 }
1978 
EmitAtomicStore(RValue rvalue,LValue lvalue,bool isInit)1979 void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue lvalue,
1980                                       bool isInit) {
1981   bool IsVolatile = lvalue.isVolatileQualified();
1982   llvm::AtomicOrdering AO;
1983   if (lvalue.getType()->isAtomicType()) {
1984     AO = llvm::AtomicOrdering::SequentiallyConsistent;
1985   } else {
1986     AO = llvm::AtomicOrdering::Release;
1987     IsVolatile = true;
1988   }
1989   return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit);
1990 }
1991 
1992 /// Emit a store to an l-value of atomic type.
1993 ///
1994 /// Note that the r-value is expected to be an r-value *of the atomic
1995 /// type*; this means that for aggregate r-values, it should include
1996 /// storage for any padding that was necessary.
EmitAtomicStore(RValue rvalue,LValue dest,llvm::AtomicOrdering AO,bool IsVolatile,bool isInit)1997 void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
1998                                       llvm::AtomicOrdering AO, bool IsVolatile,
1999                                       bool isInit) {
2000   // If this is an aggregate r-value, it should agree in type except
2001   // maybe for address-space qualification.
2002   assert(!rvalue.isAggregate() ||
2003          rvalue.getAggregateAddress().getElementType() ==
2004              dest.getAddress(*this).getElementType());
2005 
2006   AtomicInfo atomics(*this, dest);
2007   LValue LVal = atomics.getAtomicLValue();
2008 
2009   // If this is an initialization, just put the value there normally.
2010   if (LVal.isSimple()) {
2011     if (isInit) {
2012       atomics.emitCopyIntoMemory(rvalue);
2013       return;
2014     }
2015 
2016     // Check whether we should use a library call.
2017     if (atomics.shouldUseLibcall()) {
2018       // Produce a source address.
2019       Address srcAddr = atomics.materializeRValue(rvalue);
2020 
2021       // void __atomic_store(size_t size, void *mem, void *val, int order)
2022       CallArgList args;
2023       args.add(RValue::get(atomics.getAtomicSizeValue()),
2024                getContext().getSizeType());
2025       args.add(RValue::get(EmitCastToVoidPtr(atomics.getAtomicPointer())),
2026                getContext().VoidPtrTy);
2027       args.add(RValue::get(EmitCastToVoidPtr(srcAddr.getPointer())),
2028                getContext().VoidPtrTy);
2029       args.add(
2030           RValue::get(llvm::ConstantInt::get(IntTy, (int)llvm::toCABI(AO))),
2031           getContext().IntTy);
2032       emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
2033       return;
2034     }
2035 
2036     // Okay, we're doing this natively.
2037     llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
2038 
2039     // Do the atomic store.
2040     Address addr =
2041         atomics.emitCastToAtomicIntPointer(atomics.getAtomicAddress());
2042     intValue = Builder.CreateIntCast(
2043         intValue, addr.getElementType(), /*isSigned=*/false);
2044     llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
2045 
2046     if (AO == llvm::AtomicOrdering::Acquire)
2047       AO = llvm::AtomicOrdering::Monotonic;
2048     else if (AO == llvm::AtomicOrdering::AcquireRelease)
2049       AO = llvm::AtomicOrdering::Release;
2050     // Initializations don't need to be atomic.
2051     if (!isInit)
2052       store->setAtomic(AO);
2053 
2054     // Other decoration.
2055     if (IsVolatile)
2056       store->setVolatile(true);
2057     CGM.DecorateInstructionWithTBAA(store, dest.getTBAAInfo());
2058     return;
2059   }
2060 
2061   // Emit simple atomic update operation.
2062   atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);
2063 }
2064 
2065 /// Emit a compare-and-exchange op for atomic type.
2066 ///
EmitAtomicCompareExchange(LValue Obj,RValue Expected,RValue Desired,SourceLocation Loc,llvm::AtomicOrdering Success,llvm::AtomicOrdering Failure,bool IsWeak,AggValueSlot Slot)2067 std::pair<RValue, llvm::Value *> CodeGenFunction::EmitAtomicCompareExchange(
2068     LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
2069     llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak,
2070     AggValueSlot Slot) {
2071   // If this is an aggregate r-value, it should agree in type except
2072   // maybe for address-space qualification.
2073   assert(!Expected.isAggregate() ||
2074          Expected.getAggregateAddress().getElementType() ==
2075              Obj.getAddress(*this).getElementType());
2076   assert(!Desired.isAggregate() ||
2077          Desired.getAggregateAddress().getElementType() ==
2078              Obj.getAddress(*this).getElementType());
2079   AtomicInfo Atomics(*this, Obj);
2080 
2081   return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure,
2082                                            IsWeak);
2083 }
2084 
EmitAtomicUpdate(LValue LVal,llvm::AtomicOrdering AO,const llvm::function_ref<RValue (RValue)> & UpdateOp,bool IsVolatile)2085 void CodeGenFunction::EmitAtomicUpdate(
2086     LValue LVal, llvm::AtomicOrdering AO,
2087     const llvm::function_ref<RValue(RValue)> &UpdateOp, bool IsVolatile) {
2088   AtomicInfo Atomics(*this, LVal);
2089   Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);
2090 }
2091 
EmitAtomicInit(Expr * init,LValue dest)2092 void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
2093   AtomicInfo atomics(*this, dest);
2094 
2095   switch (atomics.getEvaluationKind()) {
2096   case TEK_Scalar: {
2097     llvm::Value *value = EmitScalarExpr(init);
2098     atomics.emitCopyIntoMemory(RValue::get(value));
2099     return;
2100   }
2101 
2102   case TEK_Complex: {
2103     ComplexPairTy value = EmitComplexExpr(init);
2104     atomics.emitCopyIntoMemory(RValue::getComplex(value));
2105     return;
2106   }
2107 
2108   case TEK_Aggregate: {
2109     // Fix up the destination if the initializer isn't an expression
2110     // of atomic type.
2111     bool Zeroed = false;
2112     if (!init->getType()->isAtomicType()) {
2113       Zeroed = atomics.emitMemSetZeroIfNecessary();
2114       dest = atomics.projectValue();
2115     }
2116 
2117     // Evaluate the expression directly into the destination.
2118     AggValueSlot slot = AggValueSlot::forLValue(
2119         dest, *this, AggValueSlot::IsNotDestructed,
2120         AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased,
2121         AggValueSlot::DoesNotOverlap,
2122         Zeroed ? AggValueSlot::IsZeroed : AggValueSlot::IsNotZeroed);
2123 
2124     EmitAggExpr(init, slot);
2125     return;
2126   }
2127   }
2128   llvm_unreachable("bad evaluation kind");
2129 }
2130