1 //===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This contains code to emit Aggregate Expr nodes as LLVM code.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CGCXXABI.h"
14 #include "CGObjCRuntime.h"
15 #include "CodeGenFunction.h"
16 #include "CodeGenModule.h"
17 #include "ConstantEmitter.h"
18 #include "clang/AST/ASTContext.h"
19 #include "clang/AST/Attr.h"
20 #include "clang/AST/DeclCXX.h"
21 #include "clang/AST/DeclTemplate.h"
22 #include "clang/AST/StmtVisitor.h"
23 #include "llvm/IR/Constants.h"
24 #include "llvm/IR/Function.h"
25 #include "llvm/IR/GlobalVariable.h"
26 #include "llvm/IR/IntrinsicInst.h"
27 #include "llvm/IR/Intrinsics.h"
28 using namespace clang;
29 using namespace CodeGen;
30 
31 //===----------------------------------------------------------------------===//
32 //                        Aggregate Expression Emitter
33 //===----------------------------------------------------------------------===//
34 
35 namespace  {
36 class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
37   CodeGenFunction &CGF;
38   CGBuilderTy &Builder;
39   AggValueSlot Dest;
40   bool IsResultUnused;
41 
EnsureSlot(QualType T)42   AggValueSlot EnsureSlot(QualType T) {
43     if (!Dest.isIgnored()) return Dest;
44     return CGF.CreateAggTemp(T, "agg.tmp.ensured");
45   }
EnsureDest(QualType T)46   void EnsureDest(QualType T) {
47     if (!Dest.isIgnored()) return;
48     Dest = CGF.CreateAggTemp(T, "agg.tmp.ensured");
49   }
50 
51   // Calls `Fn` with a valid return value slot, potentially creating a temporary
52   // to do so. If a temporary is created, an appropriate copy into `Dest` will
53   // be emitted, as will lifetime markers.
54   //
55   // The given function should take a ReturnValueSlot, and return an RValue that
56   // points to said slot.
57   void withReturnValueSlot(const Expr *E,
58                            llvm::function_ref<RValue(ReturnValueSlot)> Fn);
59 
60 public:
AggExprEmitter(CodeGenFunction & cgf,AggValueSlot Dest,bool IsResultUnused)61   AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest, bool IsResultUnused)
62     : CGF(cgf), Builder(CGF.Builder), Dest(Dest),
63     IsResultUnused(IsResultUnused) { }
64 
65   //===--------------------------------------------------------------------===//
66   //                               Utilities
67   //===--------------------------------------------------------------------===//
68 
69   /// EmitAggLoadOfLValue - Given an expression with aggregate type that
70   /// represents a value lvalue, this method emits the address of the lvalue,
71   /// then loads the result into DestPtr.
72   void EmitAggLoadOfLValue(const Expr *E);
73 
74   enum ExprValueKind {
75     EVK_RValue,
76     EVK_NonRValue
77   };
78 
79   /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
80   /// SrcIsRValue is true if source comes from an RValue.
81   void EmitFinalDestCopy(QualType type, const LValue &src,
82                          ExprValueKind SrcValueKind = EVK_NonRValue);
83   void EmitFinalDestCopy(QualType type, RValue src);
84   void EmitCopy(QualType type, const AggValueSlot &dest,
85                 const AggValueSlot &src);
86 
87   void EmitMoveFromReturnSlot(const Expr *E, RValue Src);
88 
89   void EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
90                      QualType ArrayQTy, InitListExpr *E);
91 
needsGC(QualType T)92   AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) {
93     if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T))
94       return AggValueSlot::NeedsGCBarriers;
95     return AggValueSlot::DoesNotNeedGCBarriers;
96   }
97 
98   bool TypeRequiresGCollection(QualType T);
99 
100   //===--------------------------------------------------------------------===//
101   //                            Visitor Methods
102   //===--------------------------------------------------------------------===//
103 
Visit(Expr * E)104   void Visit(Expr *E) {
105     ApplyDebugLocation DL(CGF, E);
106     StmtVisitor<AggExprEmitter>::Visit(E);
107   }
108 
VisitStmt(Stmt * S)109   void VisitStmt(Stmt *S) {
110     CGF.ErrorUnsupported(S, "aggregate expression");
111   }
VisitParenExpr(ParenExpr * PE)112   void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); }
VisitGenericSelectionExpr(GenericSelectionExpr * GE)113   void VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
114     Visit(GE->getResultExpr());
115   }
VisitCoawaitExpr(CoawaitExpr * E)116   void VisitCoawaitExpr(CoawaitExpr *E) {
117     CGF.EmitCoawaitExpr(*E, Dest, IsResultUnused);
118   }
VisitCoyieldExpr(CoyieldExpr * E)119   void VisitCoyieldExpr(CoyieldExpr *E) {
120     CGF.EmitCoyieldExpr(*E, Dest, IsResultUnused);
121   }
VisitUnaryCoawait(UnaryOperator * E)122   void VisitUnaryCoawait(UnaryOperator *E) { Visit(E->getSubExpr()); }
VisitUnaryExtension(UnaryOperator * E)123   void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); }
VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr * E)124   void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
125     return Visit(E->getReplacement());
126   }
127 
VisitConstantExpr(ConstantExpr * E)128   void VisitConstantExpr(ConstantExpr *E) {
129     return Visit(E->getSubExpr());
130   }
131 
132   // l-values.
VisitDeclRefExpr(DeclRefExpr * E)133   void VisitDeclRefExpr(DeclRefExpr *E) { EmitAggLoadOfLValue(E); }
VisitMemberExpr(MemberExpr * ME)134   void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
VisitUnaryDeref(UnaryOperator * E)135   void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
VisitStringLiteral(StringLiteral * E)136   void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
137   void VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
VisitArraySubscriptExpr(ArraySubscriptExpr * E)138   void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
139     EmitAggLoadOfLValue(E);
140   }
VisitPredefinedExpr(const PredefinedExpr * E)141   void VisitPredefinedExpr(const PredefinedExpr *E) {
142     EmitAggLoadOfLValue(E);
143   }
144 
145   // Operators.
146   void VisitCastExpr(CastExpr *E);
147   void VisitCallExpr(const CallExpr *E);
148   void VisitStmtExpr(const StmtExpr *E);
149   void VisitBinaryOperator(const BinaryOperator *BO);
150   void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO);
151   void VisitBinAssign(const BinaryOperator *E);
152   void VisitBinComma(const BinaryOperator *E);
153   void VisitBinCmp(const BinaryOperator *E);
VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator * E)154   void VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) {
155     Visit(E->getSemanticForm());
156   }
157 
158   void VisitObjCMessageExpr(ObjCMessageExpr *E);
VisitObjCIvarRefExpr(ObjCIvarRefExpr * E)159   void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
160     EmitAggLoadOfLValue(E);
161   }
162 
163   void VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E);
164   void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
165   void VisitChooseExpr(const ChooseExpr *CE);
166   void VisitInitListExpr(InitListExpr *E);
167   void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
168                               llvm::Value *outerBegin = nullptr);
169   void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
VisitNoInitExpr(NoInitExpr * E)170   void VisitNoInitExpr(NoInitExpr *E) { } // Do nothing.
VisitCXXDefaultArgExpr(CXXDefaultArgExpr * DAE)171   void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
172     CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
173     Visit(DAE->getExpr());
174   }
VisitCXXDefaultInitExpr(CXXDefaultInitExpr * DIE)175   void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
176     CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);
177     Visit(DIE->getExpr());
178   }
179   void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
180   void VisitCXXConstructExpr(const CXXConstructExpr *E);
181   void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *E);
182   void VisitLambdaExpr(LambdaExpr *E);
183   void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E);
184   void VisitExprWithCleanups(ExprWithCleanups *E);
185   void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
VisitCXXTypeidExpr(CXXTypeidExpr * E)186   void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
187   void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E);
188   void VisitOpaqueValueExpr(OpaqueValueExpr *E);
189 
VisitPseudoObjectExpr(PseudoObjectExpr * E)190   void VisitPseudoObjectExpr(PseudoObjectExpr *E) {
191     if (E->isGLValue()) {
192       LValue LV = CGF.EmitPseudoObjectLValue(E);
193       return EmitFinalDestCopy(E->getType(), LV);
194     }
195 
196     CGF.EmitPseudoObjectRValue(E, EnsureSlot(E->getType()));
197   }
198 
199   void VisitVAArgExpr(VAArgExpr *E);
200 
201   void EmitInitializationToLValue(Expr *E, LValue Address);
202   void EmitNullInitializationToLValue(LValue Address);
203   //  case Expr::ChooseExprClass:
VisitCXXThrowExpr(const CXXThrowExpr * E)204   void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }
VisitAtomicExpr(AtomicExpr * E)205   void VisitAtomicExpr(AtomicExpr *E) {
206     RValue Res = CGF.EmitAtomicExpr(E);
207     EmitFinalDestCopy(E->getType(), Res);
208   }
209 };
210 }  // end anonymous namespace.
211 
212 //===----------------------------------------------------------------------===//
213 //                                Utilities
214 //===----------------------------------------------------------------------===//
215 
216 /// EmitAggLoadOfLValue - Given an expression with aggregate type that
217 /// represents a value lvalue, this method emits the address of the lvalue,
218 /// then loads the result into DestPtr.
EmitAggLoadOfLValue(const Expr * E)219 void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
220   LValue LV = CGF.EmitLValue(E);
221 
222   // If the type of the l-value is atomic, then do an atomic load.
223   if (LV.getType()->isAtomicType() || CGF.LValueIsSuitableForInlineAtomic(LV)) {
224     CGF.EmitAtomicLoad(LV, E->getExprLoc(), Dest);
225     return;
226   }
227 
228   EmitFinalDestCopy(E->getType(), LV);
229 }
230 
231 /// True if the given aggregate type requires special GC API calls.
TypeRequiresGCollection(QualType T)232 bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
233   // Only record types have members that might require garbage collection.
234   const RecordType *RecordTy = T->getAs<RecordType>();
235   if (!RecordTy) return false;
236 
237   // Don't mess with non-trivial C++ types.
238   RecordDecl *Record = RecordTy->getDecl();
239   if (isa<CXXRecordDecl>(Record) &&
240       (cast<CXXRecordDecl>(Record)->hasNonTrivialCopyConstructor() ||
241        !cast<CXXRecordDecl>(Record)->hasTrivialDestructor()))
242     return false;
243 
244   // Check whether the type has an object member.
245   return Record->hasObjectMember();
246 }
247 
withReturnValueSlot(const Expr * E,llvm::function_ref<RValue (ReturnValueSlot)> EmitCall)248 void AggExprEmitter::withReturnValueSlot(
249     const Expr *E, llvm::function_ref<RValue(ReturnValueSlot)> EmitCall) {
250   QualType RetTy = E->getType();
251   bool RequiresDestruction =
252       Dest.isIgnored() &&
253       RetTy.isDestructedType() == QualType::DK_nontrivial_c_struct;
254 
255   // If it makes no observable difference, save a memcpy + temporary.
256   //
257   // We need to always provide our own temporary if destruction is required.
258   // Otherwise, EmitCall will emit its own, notice that it's "unused", and end
259   // its lifetime before we have the chance to emit a proper destructor call.
260   bool UseTemp = Dest.isPotentiallyAliased() || Dest.requiresGCollection() ||
261                  (RequiresDestruction && !Dest.getAddress().isValid());
262 
263   Address RetAddr = Address::invalid();
264   Address RetAllocaAddr = Address::invalid();
265 
266   EHScopeStack::stable_iterator LifetimeEndBlock;
267   llvm::Value *LifetimeSizePtr = nullptr;
268   llvm::IntrinsicInst *LifetimeStartInst = nullptr;
269   if (!UseTemp) {
270     RetAddr = Dest.getAddress();
271   } else {
272     RetAddr = CGF.CreateMemTemp(RetTy, "tmp", &RetAllocaAddr);
273     uint64_t Size =
274         CGF.CGM.getDataLayout().getTypeAllocSize(CGF.ConvertTypeForMem(RetTy));
275     LifetimeSizePtr = CGF.EmitLifetimeStart(Size, RetAllocaAddr.getPointer());
276     if (LifetimeSizePtr) {
277       LifetimeStartInst =
278           cast<llvm::IntrinsicInst>(std::prev(Builder.GetInsertPoint()));
279       assert(LifetimeStartInst->getIntrinsicID() ==
280                  llvm::Intrinsic::lifetime_start &&
281              "Last insertion wasn't a lifetime.start?");
282 
283       CGF.pushFullExprCleanup<CodeGenFunction::CallLifetimeEnd>(
284           NormalEHLifetimeMarker, RetAllocaAddr, LifetimeSizePtr);
285       LifetimeEndBlock = CGF.EHStack.stable_begin();
286     }
287   }
288 
289   RValue Src =
290       EmitCall(ReturnValueSlot(RetAddr, Dest.isVolatile(), IsResultUnused));
291 
292   if (RequiresDestruction)
293     CGF.pushDestroy(RetTy.isDestructedType(), Src.getAggregateAddress(), RetTy);
294 
295   if (!UseTemp)
296     return;
297 
298   assert(Dest.getPointer() != Src.getAggregatePointer());
299   EmitFinalDestCopy(E->getType(), Src);
300 
301   if (!RequiresDestruction && LifetimeStartInst) {
302     // If there's no dtor to run, the copy was the last use of our temporary.
303     // Since we're not guaranteed to be in an ExprWithCleanups, clean up
304     // eagerly.
305     CGF.DeactivateCleanupBlock(LifetimeEndBlock, LifetimeStartInst);
306     CGF.EmitLifetimeEnd(LifetimeSizePtr, RetAllocaAddr.getPointer());
307   }
308 }
309 
310 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
EmitFinalDestCopy(QualType type,RValue src)311 void AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src) {
312   assert(src.isAggregate() && "value must be aggregate value!");
313   LValue srcLV = CGF.MakeAddrLValue(src.getAggregateAddress(), type);
314   EmitFinalDestCopy(type, srcLV, EVK_RValue);
315 }
316 
317 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
EmitFinalDestCopy(QualType type,const LValue & src,ExprValueKind SrcValueKind)318 void AggExprEmitter::EmitFinalDestCopy(QualType type, const LValue &src,
319                                        ExprValueKind SrcValueKind) {
320   // If Dest is ignored, then we're evaluating an aggregate expression
321   // in a context that doesn't care about the result.  Note that loads
322   // from volatile l-values force the existence of a non-ignored
323   // destination.
324   if (Dest.isIgnored())
325     return;
326 
327   // Copy non-trivial C structs here.
328   LValue DstLV = CGF.MakeAddrLValue(
329       Dest.getAddress(), Dest.isVolatile() ? type.withVolatile() : type);
330 
331   if (SrcValueKind == EVK_RValue) {
332     if (type.isNonTrivialToPrimitiveDestructiveMove() == QualType::PCK_Struct) {
333       if (Dest.isPotentiallyAliased())
334         CGF.callCStructMoveAssignmentOperator(DstLV, src);
335       else
336         CGF.callCStructMoveConstructor(DstLV, src);
337       return;
338     }
339   } else {
340     if (type.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) {
341       if (Dest.isPotentiallyAliased())
342         CGF.callCStructCopyAssignmentOperator(DstLV, src);
343       else
344         CGF.callCStructCopyConstructor(DstLV, src);
345       return;
346     }
347   }
348 
349   AggValueSlot srcAgg = AggValueSlot::forLValue(
350       src, CGF, AggValueSlot::IsDestructed, needsGC(type),
351       AggValueSlot::IsAliased, AggValueSlot::MayOverlap);
352   EmitCopy(type, Dest, srcAgg);
353 }
354 
355 /// Perform a copy from the source into the destination.
356 ///
357 /// \param type - the type of the aggregate being copied; qualifiers are
358 ///   ignored
EmitCopy(QualType type,const AggValueSlot & dest,const AggValueSlot & src)359 void AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest,
360                               const AggValueSlot &src) {
361   if (dest.requiresGCollection()) {
362     CharUnits sz = dest.getPreferredSize(CGF.getContext(), type);
363     llvm::Value *size = llvm::ConstantInt::get(CGF.SizeTy, sz.getQuantity());
364     CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
365                                                       dest.getAddress(),
366                                                       src.getAddress(),
367                                                       size);
368     return;
369   }
370 
371   // If the result of the assignment is used, copy the LHS there also.
372   // It's volatile if either side is.  Use the minimum alignment of
373   // the two sides.
374   LValue DestLV = CGF.MakeAddrLValue(dest.getAddress(), type);
375   LValue SrcLV = CGF.MakeAddrLValue(src.getAddress(), type);
376   CGF.EmitAggregateCopy(DestLV, SrcLV, type, dest.mayOverlap(),
377                         dest.isVolatile() || src.isVolatile());
378 }
379 
380 /// Emit the initializer for a std::initializer_list initialized with a
381 /// real initializer list.
382 void
VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr * E)383 AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
384   // Emit an array containing the elements.  The array is externally destructed
385   // if the std::initializer_list object is.
386   ASTContext &Ctx = CGF.getContext();
387   LValue Array = CGF.EmitLValue(E->getSubExpr());
388   assert(Array.isSimple() && "initializer_list array not a simple lvalue");
389   Address ArrayPtr = Array.getAddress(CGF);
390 
391   const ConstantArrayType *ArrayType =
392       Ctx.getAsConstantArrayType(E->getSubExpr()->getType());
393   assert(ArrayType && "std::initializer_list constructed from non-array");
394 
395   // FIXME: Perform the checks on the field types in SemaInit.
396   RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl();
397   RecordDecl::field_iterator Field = Record->field_begin();
398   if (Field == Record->field_end()) {
399     CGF.ErrorUnsupported(E, "weird std::initializer_list");
400     return;
401   }
402 
403   // Start pointer.
404   if (!Field->getType()->isPointerType() ||
405       !Ctx.hasSameType(Field->getType()->getPointeeType(),
406                        ArrayType->getElementType())) {
407     CGF.ErrorUnsupported(E, "weird std::initializer_list");
408     return;
409   }
410 
411   AggValueSlot Dest = EnsureSlot(E->getType());
412   LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
413   LValue Start = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
414   llvm::Value *Zero = llvm::ConstantInt::get(CGF.PtrDiffTy, 0);
415   llvm::Value *IdxStart[] = { Zero, Zero };
416   llvm::Value *ArrayStart =
417       Builder.CreateInBoundsGEP(ArrayPtr.getPointer(), IdxStart, "arraystart");
418   CGF.EmitStoreThroughLValue(RValue::get(ArrayStart), Start);
419   ++Field;
420 
421   if (Field == Record->field_end()) {
422     CGF.ErrorUnsupported(E, "weird std::initializer_list");
423     return;
424   }
425 
426   llvm::Value *Size = Builder.getInt(ArrayType->getSize());
427   LValue EndOrLength = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
428   if (Field->getType()->isPointerType() &&
429       Ctx.hasSameType(Field->getType()->getPointeeType(),
430                       ArrayType->getElementType())) {
431     // End pointer.
432     llvm::Value *IdxEnd[] = { Zero, Size };
433     llvm::Value *ArrayEnd =
434         Builder.CreateInBoundsGEP(ArrayPtr.getPointer(), IdxEnd, "arrayend");
435     CGF.EmitStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength);
436   } else if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) {
437     // Length.
438     CGF.EmitStoreThroughLValue(RValue::get(Size), EndOrLength);
439   } else {
440     CGF.ErrorUnsupported(E, "weird std::initializer_list");
441     return;
442   }
443 }
444 
445 /// Determine if E is a trivial array filler, that is, one that is
446 /// equivalent to zero-initialization.
isTrivialFiller(Expr * E)447 static bool isTrivialFiller(Expr *E) {
448   if (!E)
449     return true;
450 
451   if (isa<ImplicitValueInitExpr>(E))
452     return true;
453 
454   if (auto *ILE = dyn_cast<InitListExpr>(E)) {
455     if (ILE->getNumInits())
456       return false;
457     return isTrivialFiller(ILE->getArrayFiller());
458   }
459 
460   if (auto *Cons = dyn_cast_or_null<CXXConstructExpr>(E))
461     return Cons->getConstructor()->isDefaultConstructor() &&
462            Cons->getConstructor()->isTrivial();
463 
464   // FIXME: Are there other cases where we can avoid emitting an initializer?
465   return false;
466 }
467 
468 /// Emit initialization of an array from an initializer list.
EmitArrayInit(Address DestPtr,llvm::ArrayType * AType,QualType ArrayQTy,InitListExpr * E)469 void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
470                                    QualType ArrayQTy, InitListExpr *E) {
471   uint64_t NumInitElements = E->getNumInits();
472 
473   uint64_t NumArrayElements = AType->getNumElements();
474   assert(NumInitElements <= NumArrayElements);
475 
476   QualType elementType =
477       CGF.getContext().getAsArrayType(ArrayQTy)->getElementType();
478 
479   // DestPtr is an array*.  Construct an elementType* by drilling
480   // down a level.
481   llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
482   llvm::Value *indices[] = { zero, zero };
483   llvm::Value *begin =
484     Builder.CreateInBoundsGEP(DestPtr.getPointer(), indices, "arrayinit.begin");
485 
486   CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
487   CharUnits elementAlign =
488     DestPtr.getAlignment().alignmentOfArrayElement(elementSize);
489 
490   // Consider initializing the array by copying from a global. For this to be
491   // more efficient than per-element initialization, the size of the elements
492   // with explicit initializers should be large enough.
493   if (NumInitElements * elementSize.getQuantity() > 16 &&
494       elementType.isTriviallyCopyableType(CGF.getContext())) {
495     CodeGen::CodeGenModule &CGM = CGF.CGM;
496     ConstantEmitter Emitter(CGF);
497     LangAS AS = ArrayQTy.getAddressSpace();
498     if (llvm::Constant *C = Emitter.tryEmitForInitializer(E, AS, ArrayQTy)) {
499       auto GV = new llvm::GlobalVariable(
500           CGM.getModule(), C->getType(),
501           CGM.isTypeConstant(ArrayQTy, /* ExcludeCtorDtor= */ true),
502           llvm::GlobalValue::PrivateLinkage, C, "constinit",
503           /* InsertBefore= */ nullptr, llvm::GlobalVariable::NotThreadLocal,
504           CGM.getContext().getTargetAddressSpace(AS));
505       Emitter.finalize(GV);
506       CharUnits Align = CGM.getContext().getTypeAlignInChars(ArrayQTy);
507       GV->setAlignment(Align.getAsAlign());
508       EmitFinalDestCopy(ArrayQTy, CGF.MakeAddrLValue(GV, ArrayQTy, Align));
509       return;
510     }
511   }
512 
513   // Exception safety requires us to destroy all the
514   // already-constructed members if an initializer throws.
515   // For that, we'll need an EH cleanup.
516   QualType::DestructionKind dtorKind = elementType.isDestructedType();
517   Address endOfInit = Address::invalid();
518   EHScopeStack::stable_iterator cleanup;
519   llvm::Instruction *cleanupDominator = nullptr;
520   if (CGF.needsEHCleanup(dtorKind)) {
521     // In principle we could tell the cleanup where we are more
522     // directly, but the control flow can get so varied here that it
523     // would actually be quite complex.  Therefore we go through an
524     // alloca.
525     endOfInit = CGF.CreateTempAlloca(begin->getType(), CGF.getPointerAlign(),
526                                      "arrayinit.endOfInit");
527     cleanupDominator = Builder.CreateStore(begin, endOfInit);
528     CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
529                                          elementAlign,
530                                          CGF.getDestroyer(dtorKind));
531     cleanup = CGF.EHStack.stable_begin();
532 
533   // Otherwise, remember that we didn't need a cleanup.
534   } else {
535     dtorKind = QualType::DK_none;
536   }
537 
538   llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1);
539 
540   // The 'current element to initialize'.  The invariants on this
541   // variable are complicated.  Essentially, after each iteration of
542   // the loop, it points to the last initialized element, except
543   // that it points to the beginning of the array before any
544   // elements have been initialized.
545   llvm::Value *element = begin;
546 
547   // Emit the explicit initializers.
548   for (uint64_t i = 0; i != NumInitElements; ++i) {
549     // Advance to the next element.
550     if (i > 0) {
551       element = Builder.CreateInBoundsGEP(element, one, "arrayinit.element");
552 
553       // Tell the cleanup that it needs to destroy up to this
554       // element.  TODO: some of these stores can be trivially
555       // observed to be unnecessary.
556       if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit);
557     }
558 
559     LValue elementLV =
560       CGF.MakeAddrLValue(Address(element, elementAlign), elementType);
561     EmitInitializationToLValue(E->getInit(i), elementLV);
562   }
563 
564   // Check whether there's a non-trivial array-fill expression.
565   Expr *filler = E->getArrayFiller();
566   bool hasTrivialFiller = isTrivialFiller(filler);
567 
568   // Any remaining elements need to be zero-initialized, possibly
569   // using the filler expression.  We can skip this if the we're
570   // emitting to zeroed memory.
571   if (NumInitElements != NumArrayElements &&
572       !(Dest.isZeroed() && hasTrivialFiller &&
573         CGF.getTypes().isZeroInitializable(elementType))) {
574 
575     // Use an actual loop.  This is basically
576     //   do { *array++ = filler; } while (array != end);
577 
578     // Advance to the start of the rest of the array.
579     if (NumInitElements) {
580       element = Builder.CreateInBoundsGEP(element, one, "arrayinit.start");
581       if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit);
582     }
583 
584     // Compute the end of the array.
585     llvm::Value *end = Builder.CreateInBoundsGEP(begin,
586                       llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements),
587                                                  "arrayinit.end");
588 
589     llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
590     llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
591 
592     // Jump into the body.
593     CGF.EmitBlock(bodyBB);
594     llvm::PHINode *currentElement =
595       Builder.CreatePHI(element->getType(), 2, "arrayinit.cur");
596     currentElement->addIncoming(element, entryBB);
597 
598     // Emit the actual filler expression.
599     {
600       // C++1z [class.temporary]p5:
601       //   when a default constructor is called to initialize an element of
602       //   an array with no corresponding initializer [...] the destruction of
603       //   every temporary created in a default argument is sequenced before
604       //   the construction of the next array element, if any
605       CodeGenFunction::RunCleanupsScope CleanupsScope(CGF);
606       LValue elementLV =
607         CGF.MakeAddrLValue(Address(currentElement, elementAlign), elementType);
608       if (filler)
609         EmitInitializationToLValue(filler, elementLV);
610       else
611         EmitNullInitializationToLValue(elementLV);
612     }
613 
614     // Move on to the next element.
615     llvm::Value *nextElement =
616       Builder.CreateInBoundsGEP(currentElement, one, "arrayinit.next");
617 
618     // Tell the EH cleanup that we finished with the last element.
619     if (endOfInit.isValid()) Builder.CreateStore(nextElement, endOfInit);
620 
621     // Leave the loop if we're done.
622     llvm::Value *done = Builder.CreateICmpEQ(nextElement, end,
623                                              "arrayinit.done");
624     llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
625     Builder.CreateCondBr(done, endBB, bodyBB);
626     currentElement->addIncoming(nextElement, Builder.GetInsertBlock());
627 
628     CGF.EmitBlock(endBB);
629   }
630 
631   // Leave the partial-array cleanup if we entered one.
632   if (dtorKind) CGF.DeactivateCleanupBlock(cleanup, cleanupDominator);
633 }
634 
635 //===----------------------------------------------------------------------===//
636 //                            Visitor Methods
637 //===----------------------------------------------------------------------===//
638 
VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr * E)639 void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){
640   Visit(E->getSubExpr());
641 }
642 
VisitOpaqueValueExpr(OpaqueValueExpr * e)643 void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) {
644   // If this is a unique OVE, just visit its source expression.
645   if (e->isUnique())
646     Visit(e->getSourceExpr());
647   else
648     EmitFinalDestCopy(e->getType(), CGF.getOrCreateOpaqueLValueMapping(e));
649 }
650 
651 void
VisitCompoundLiteralExpr(CompoundLiteralExpr * E)652 AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
653   if (Dest.isPotentiallyAliased() &&
654       E->getType().isPODType(CGF.getContext())) {
655     // For a POD type, just emit a load of the lvalue + a copy, because our
656     // compound literal might alias the destination.
657     EmitAggLoadOfLValue(E);
658     return;
659   }
660 
661   AggValueSlot Slot = EnsureSlot(E->getType());
662   CGF.EmitAggExpr(E->getInitializer(), Slot);
663 }
664 
665 /// Attempt to look through various unimportant expressions to find a
666 /// cast of the given kind.
findPeephole(Expr * op,CastKind kind)667 static Expr *findPeephole(Expr *op, CastKind kind) {
668   while (true) {
669     op = op->IgnoreParens();
670     if (CastExpr *castE = dyn_cast<CastExpr>(op)) {
671       if (castE->getCastKind() == kind)
672         return castE->getSubExpr();
673       if (castE->getCastKind() == CK_NoOp)
674         continue;
675     }
676     return nullptr;
677   }
678 }
679 
VisitCastExpr(CastExpr * E)680 void AggExprEmitter::VisitCastExpr(CastExpr *E) {
681   if (const auto *ECE = dyn_cast<ExplicitCastExpr>(E))
682     CGF.CGM.EmitExplicitCastExprType(ECE, &CGF);
683   switch (E->getCastKind()) {
684   case CK_Dynamic: {
685     // FIXME: Can this actually happen? We have no test coverage for it.
686     assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");
687     LValue LV = CGF.EmitCheckedLValue(E->getSubExpr(),
688                                       CodeGenFunction::TCK_Load);
689     // FIXME: Do we also need to handle property references here?
690     if (LV.isSimple())
691       CGF.EmitDynamicCast(LV.getAddress(CGF), cast<CXXDynamicCastExpr>(E));
692     else
693       CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");
694 
695     if (!Dest.isIgnored())
696       CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination");
697     break;
698   }
699 
700   case CK_ToUnion: {
701     // Evaluate even if the destination is ignored.
702     if (Dest.isIgnored()) {
703       CGF.EmitAnyExpr(E->getSubExpr(), AggValueSlot::ignored(),
704                       /*ignoreResult=*/true);
705       break;
706     }
707 
708     // GCC union extension
709     QualType Ty = E->getSubExpr()->getType();
710     Address CastPtr =
711       Builder.CreateElementBitCast(Dest.getAddress(), CGF.ConvertType(Ty));
712     EmitInitializationToLValue(E->getSubExpr(),
713                                CGF.MakeAddrLValue(CastPtr, Ty));
714     break;
715   }
716 
717   case CK_LValueToRValueBitCast: {
718     if (Dest.isIgnored()) {
719       CGF.EmitAnyExpr(E->getSubExpr(), AggValueSlot::ignored(),
720                       /*ignoreResult=*/true);
721       break;
722     }
723 
724     LValue SourceLV = CGF.EmitLValue(E->getSubExpr());
725     Address SourceAddress =
726         Builder.CreateElementBitCast(SourceLV.getAddress(CGF), CGF.Int8Ty);
727     Address DestAddress =
728         Builder.CreateElementBitCast(Dest.getAddress(), CGF.Int8Ty);
729     llvm::Value *SizeVal = llvm::ConstantInt::get(
730         CGF.SizeTy,
731         CGF.getContext().getTypeSizeInChars(E->getType()).getQuantity());
732     Builder.CreateMemCpy(DestAddress, SourceAddress, SizeVal);
733     break;
734   }
735 
736   case CK_DerivedToBase:
737   case CK_BaseToDerived:
738   case CK_UncheckedDerivedToBase: {
739     llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: "
740                 "should have been unpacked before we got here");
741   }
742 
743   case CK_NonAtomicToAtomic:
744   case CK_AtomicToNonAtomic: {
745     bool isToAtomic = (E->getCastKind() == CK_NonAtomicToAtomic);
746 
747     // Determine the atomic and value types.
748     QualType atomicType = E->getSubExpr()->getType();
749     QualType valueType = E->getType();
750     if (isToAtomic) std::swap(atomicType, valueType);
751 
752     assert(atomicType->isAtomicType());
753     assert(CGF.getContext().hasSameUnqualifiedType(valueType,
754                           atomicType->castAs<AtomicType>()->getValueType()));
755 
756     // Just recurse normally if we're ignoring the result or the
757     // atomic type doesn't change representation.
758     if (Dest.isIgnored() || !CGF.CGM.isPaddedAtomicType(atomicType)) {
759       return Visit(E->getSubExpr());
760     }
761 
762     CastKind peepholeTarget =
763       (isToAtomic ? CK_AtomicToNonAtomic : CK_NonAtomicToAtomic);
764 
765     // These two cases are reverses of each other; try to peephole them.
766     if (Expr *op = findPeephole(E->getSubExpr(), peepholeTarget)) {
767       assert(CGF.getContext().hasSameUnqualifiedType(op->getType(),
768                                                      E->getType()) &&
769            "peephole significantly changed types?");
770       return Visit(op);
771     }
772 
773     // If we're converting an r-value of non-atomic type to an r-value
774     // of atomic type, just emit directly into the relevant sub-object.
775     if (isToAtomic) {
776       AggValueSlot valueDest = Dest;
777       if (!valueDest.isIgnored() && CGF.CGM.isPaddedAtomicType(atomicType)) {
778         // Zero-initialize.  (Strictly speaking, we only need to initialize
779         // the padding at the end, but this is simpler.)
780         if (!Dest.isZeroed())
781           CGF.EmitNullInitialization(Dest.getAddress(), atomicType);
782 
783         // Build a GEP to refer to the subobject.
784         Address valueAddr =
785             CGF.Builder.CreateStructGEP(valueDest.getAddress(), 0);
786         valueDest = AggValueSlot::forAddr(valueAddr,
787                                           valueDest.getQualifiers(),
788                                           valueDest.isExternallyDestructed(),
789                                           valueDest.requiresGCollection(),
790                                           valueDest.isPotentiallyAliased(),
791                                           AggValueSlot::DoesNotOverlap,
792                                           AggValueSlot::IsZeroed);
793       }
794 
795       CGF.EmitAggExpr(E->getSubExpr(), valueDest);
796       return;
797     }
798 
799     // Otherwise, we're converting an atomic type to a non-atomic type.
800     // Make an atomic temporary, emit into that, and then copy the value out.
801     AggValueSlot atomicSlot =
802       CGF.CreateAggTemp(atomicType, "atomic-to-nonatomic.temp");
803     CGF.EmitAggExpr(E->getSubExpr(), atomicSlot);
804 
805     Address valueAddr = Builder.CreateStructGEP(atomicSlot.getAddress(), 0);
806     RValue rvalue = RValue::getAggregate(valueAddr, atomicSlot.isVolatile());
807     return EmitFinalDestCopy(valueType, rvalue);
808   }
809   case CK_AddressSpaceConversion:
810      return Visit(E->getSubExpr());
811 
812   case CK_LValueToRValue:
813     // If we're loading from a volatile type, force the destination
814     // into existence.
815     if (E->getSubExpr()->getType().isVolatileQualified()) {
816       EnsureDest(E->getType());
817       return Visit(E->getSubExpr());
818     }
819 
820     LLVM_FALLTHROUGH;
821 
822 
823   case CK_NoOp:
824   case CK_UserDefinedConversion:
825   case CK_ConstructorConversion:
826     assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(),
827                                                    E->getType()) &&
828            "Implicit cast types must be compatible");
829     Visit(E->getSubExpr());
830     break;
831 
832   case CK_LValueBitCast:
833     llvm_unreachable("should not be emitting lvalue bitcast as rvalue");
834 
835   case CK_Dependent:
836   case CK_BitCast:
837   case CK_ArrayToPointerDecay:
838   case CK_FunctionToPointerDecay:
839   case CK_NullToPointer:
840   case CK_NullToMemberPointer:
841   case CK_BaseToDerivedMemberPointer:
842   case CK_DerivedToBaseMemberPointer:
843   case CK_MemberPointerToBoolean:
844   case CK_ReinterpretMemberPointer:
845   case CK_IntegralToPointer:
846   case CK_PointerToIntegral:
847   case CK_PointerToBoolean:
848   case CK_ToVoid:
849   case CK_VectorSplat:
850   case CK_IntegralCast:
851   case CK_BooleanToSignedIntegral:
852   case CK_IntegralToBoolean:
853   case CK_IntegralToFloating:
854   case CK_FloatingToIntegral:
855   case CK_FloatingToBoolean:
856   case CK_FloatingCast:
857   case CK_CPointerToObjCPointerCast:
858   case CK_BlockPointerToObjCPointerCast:
859   case CK_AnyPointerToBlockPointerCast:
860   case CK_ObjCObjectLValueCast:
861   case CK_FloatingRealToComplex:
862   case CK_FloatingComplexToReal:
863   case CK_FloatingComplexToBoolean:
864   case CK_FloatingComplexCast:
865   case CK_FloatingComplexToIntegralComplex:
866   case CK_IntegralRealToComplex:
867   case CK_IntegralComplexToReal:
868   case CK_IntegralComplexToBoolean:
869   case CK_IntegralComplexCast:
870   case CK_IntegralComplexToFloatingComplex:
871   case CK_ARCProduceObject:
872   case CK_ARCConsumeObject:
873   case CK_ARCReclaimReturnedObject:
874   case CK_ARCExtendBlockObject:
875   case CK_CopyAndAutoreleaseBlockObject:
876   case CK_BuiltinFnToFnPtr:
877   case CK_ZeroToOCLOpaqueType:
878 
879   case CK_IntToOCLSampler:
880   case CK_FixedPointCast:
881   case CK_FixedPointToBoolean:
882   case CK_FixedPointToIntegral:
883   case CK_IntegralToFixedPoint:
884     llvm_unreachable("cast kind invalid for aggregate types");
885   }
886 }
887 
VisitCallExpr(const CallExpr * E)888 void AggExprEmitter::VisitCallExpr(const CallExpr *E) {
889   if (E->getCallReturnType(CGF.getContext())->isReferenceType()) {
890     EmitAggLoadOfLValue(E);
891     return;
892   }
893 
894   withReturnValueSlot(E, [&](ReturnValueSlot Slot) {
895     return CGF.EmitCallExpr(E, Slot);
896   });
897 }
898 
VisitObjCMessageExpr(ObjCMessageExpr * E)899 void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
900   withReturnValueSlot(E, [&](ReturnValueSlot Slot) {
901     return CGF.EmitObjCMessageExpr(E, Slot);
902   });
903 }
904 
VisitBinComma(const BinaryOperator * E)905 void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
906   CGF.EmitIgnoredExpr(E->getLHS());
907   Visit(E->getRHS());
908 }
909 
VisitStmtExpr(const StmtExpr * E)910 void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
911   CodeGenFunction::StmtExprEvaluation eval(CGF);
912   CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest);
913 }
914 
915 enum CompareKind {
916   CK_Less,
917   CK_Greater,
918   CK_Equal,
919 };
920 
EmitCompare(CGBuilderTy & Builder,CodeGenFunction & CGF,const BinaryOperator * E,llvm::Value * LHS,llvm::Value * RHS,CompareKind Kind,const char * NameSuffix="")921 static llvm::Value *EmitCompare(CGBuilderTy &Builder, CodeGenFunction &CGF,
922                                 const BinaryOperator *E, llvm::Value *LHS,
923                                 llvm::Value *RHS, CompareKind Kind,
924                                 const char *NameSuffix = "") {
925   QualType ArgTy = E->getLHS()->getType();
926   if (const ComplexType *CT = ArgTy->getAs<ComplexType>())
927     ArgTy = CT->getElementType();
928 
929   if (const auto *MPT = ArgTy->getAs<MemberPointerType>()) {
930     assert(Kind == CK_Equal &&
931            "member pointers may only be compared for equality");
932     return CGF.CGM.getCXXABI().EmitMemberPointerComparison(
933         CGF, LHS, RHS, MPT, /*IsInequality*/ false);
934   }
935 
936   // Compute the comparison instructions for the specified comparison kind.
937   struct CmpInstInfo {
938     const char *Name;
939     llvm::CmpInst::Predicate FCmp;
940     llvm::CmpInst::Predicate SCmp;
941     llvm::CmpInst::Predicate UCmp;
942   };
943   CmpInstInfo InstInfo = [&]() -> CmpInstInfo {
944     using FI = llvm::FCmpInst;
945     using II = llvm::ICmpInst;
946     switch (Kind) {
947     case CK_Less:
948       return {"cmp.lt", FI::FCMP_OLT, II::ICMP_SLT, II::ICMP_ULT};
949     case CK_Greater:
950       return {"cmp.gt", FI::FCMP_OGT, II::ICMP_SGT, II::ICMP_UGT};
951     case CK_Equal:
952       return {"cmp.eq", FI::FCMP_OEQ, II::ICMP_EQ, II::ICMP_EQ};
953     }
954     llvm_unreachable("Unrecognised CompareKind enum");
955   }();
956 
957   if (ArgTy->hasFloatingRepresentation())
958     return Builder.CreateFCmp(InstInfo.FCmp, LHS, RHS,
959                               llvm::Twine(InstInfo.Name) + NameSuffix);
960   if (ArgTy->isIntegralOrEnumerationType() || ArgTy->isPointerType()) {
961     auto Inst =
962         ArgTy->hasSignedIntegerRepresentation() ? InstInfo.SCmp : InstInfo.UCmp;
963     return Builder.CreateICmp(Inst, LHS, RHS,
964                               llvm::Twine(InstInfo.Name) + NameSuffix);
965   }
966 
967   llvm_unreachable("unsupported aggregate binary expression should have "
968                    "already been handled");
969 }
970 
VisitBinCmp(const BinaryOperator * E)971 void AggExprEmitter::VisitBinCmp(const BinaryOperator *E) {
972   using llvm::BasicBlock;
973   using llvm::PHINode;
974   using llvm::Value;
975   assert(CGF.getContext().hasSameType(E->getLHS()->getType(),
976                                       E->getRHS()->getType()));
977   const ComparisonCategoryInfo &CmpInfo =
978       CGF.getContext().CompCategories.getInfoForType(E->getType());
979   assert(CmpInfo.Record->isTriviallyCopyable() &&
980          "cannot copy non-trivially copyable aggregate");
981 
982   QualType ArgTy = E->getLHS()->getType();
983 
984   if (!ArgTy->isIntegralOrEnumerationType() && !ArgTy->isRealFloatingType() &&
985       !ArgTy->isNullPtrType() && !ArgTy->isPointerType() &&
986       !ArgTy->isMemberPointerType() && !ArgTy->isAnyComplexType()) {
987     return CGF.ErrorUnsupported(E, "aggregate three-way comparison");
988   }
989   bool IsComplex = ArgTy->isAnyComplexType();
990 
991   // Evaluate the operands to the expression and extract their values.
992   auto EmitOperand = [&](Expr *E) -> std::pair<Value *, Value *> {
993     RValue RV = CGF.EmitAnyExpr(E);
994     if (RV.isScalar())
995       return {RV.getScalarVal(), nullptr};
996     if (RV.isAggregate())
997       return {RV.getAggregatePointer(), nullptr};
998     assert(RV.isComplex());
999     return RV.getComplexVal();
1000   };
1001   auto LHSValues = EmitOperand(E->getLHS()),
1002        RHSValues = EmitOperand(E->getRHS());
1003 
1004   auto EmitCmp = [&](CompareKind K) {
1005     Value *Cmp = EmitCompare(Builder, CGF, E, LHSValues.first, RHSValues.first,
1006                              K, IsComplex ? ".r" : "");
1007     if (!IsComplex)
1008       return Cmp;
1009     assert(K == CompareKind::CK_Equal);
1010     Value *CmpImag = EmitCompare(Builder, CGF, E, LHSValues.second,
1011                                  RHSValues.second, K, ".i");
1012     return Builder.CreateAnd(Cmp, CmpImag, "and.eq");
1013   };
1014   auto EmitCmpRes = [&](const ComparisonCategoryInfo::ValueInfo *VInfo) {
1015     return Builder.getInt(VInfo->getIntValue());
1016   };
1017 
1018   Value *Select;
1019   if (ArgTy->isNullPtrType()) {
1020     Select = EmitCmpRes(CmpInfo.getEqualOrEquiv());
1021   } else if (!CmpInfo.isPartial()) {
1022     Value *SelectOne =
1023         Builder.CreateSelect(EmitCmp(CK_Less), EmitCmpRes(CmpInfo.getLess()),
1024                              EmitCmpRes(CmpInfo.getGreater()), "sel.lt");
1025     Select = Builder.CreateSelect(EmitCmp(CK_Equal),
1026                                   EmitCmpRes(CmpInfo.getEqualOrEquiv()),
1027                                   SelectOne, "sel.eq");
1028   } else {
1029     Value *SelectEq = Builder.CreateSelect(
1030         EmitCmp(CK_Equal), EmitCmpRes(CmpInfo.getEqualOrEquiv()),
1031         EmitCmpRes(CmpInfo.getUnordered()), "sel.eq");
1032     Value *SelectGT = Builder.CreateSelect(EmitCmp(CK_Greater),
1033                                            EmitCmpRes(CmpInfo.getGreater()),
1034                                            SelectEq, "sel.gt");
1035     Select = Builder.CreateSelect(
1036         EmitCmp(CK_Less), EmitCmpRes(CmpInfo.getLess()), SelectGT, "sel.lt");
1037   }
1038   // Create the return value in the destination slot.
1039   EnsureDest(E->getType());
1040   LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
1041 
1042   // Emit the address of the first (and only) field in the comparison category
1043   // type, and initialize it from the constant integer value selected above.
1044   LValue FieldLV = CGF.EmitLValueForFieldInitialization(
1045       DestLV, *CmpInfo.Record->field_begin());
1046   CGF.EmitStoreThroughLValue(RValue::get(Select), FieldLV, /*IsInit*/ true);
1047 
1048   // All done! The result is in the Dest slot.
1049 }
1050 
VisitBinaryOperator(const BinaryOperator * E)1051 void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
1052   if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI)
1053     VisitPointerToDataMemberBinaryOperator(E);
1054   else
1055     CGF.ErrorUnsupported(E, "aggregate binary expression");
1056 }
1057 
VisitPointerToDataMemberBinaryOperator(const BinaryOperator * E)1058 void AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
1059                                                     const BinaryOperator *E) {
1060   LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E);
1061   EmitFinalDestCopy(E->getType(), LV);
1062 }
1063 
1064 /// Is the value of the given expression possibly a reference to or
1065 /// into a __block variable?
isBlockVarRef(const Expr * E)1066 static bool isBlockVarRef(const Expr *E) {
1067   // Make sure we look through parens.
1068   E = E->IgnoreParens();
1069 
1070   // Check for a direct reference to a __block variable.
1071   if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
1072     const VarDecl *var = dyn_cast<VarDecl>(DRE->getDecl());
1073     return (var && var->hasAttr<BlocksAttr>());
1074   }
1075 
1076   // More complicated stuff.
1077 
1078   // Binary operators.
1079   if (const BinaryOperator *op = dyn_cast<BinaryOperator>(E)) {
1080     // For an assignment or pointer-to-member operation, just care
1081     // about the LHS.
1082     if (op->isAssignmentOp() || op->isPtrMemOp())
1083       return isBlockVarRef(op->getLHS());
1084 
1085     // For a comma, just care about the RHS.
1086     if (op->getOpcode() == BO_Comma)
1087       return isBlockVarRef(op->getRHS());
1088 
1089     // FIXME: pointer arithmetic?
1090     return false;
1091 
1092   // Check both sides of a conditional operator.
1093   } else if (const AbstractConditionalOperator *op
1094                = dyn_cast<AbstractConditionalOperator>(E)) {
1095     return isBlockVarRef(op->getTrueExpr())
1096         || isBlockVarRef(op->getFalseExpr());
1097 
1098   // OVEs are required to support BinaryConditionalOperators.
1099   } else if (const OpaqueValueExpr *op
1100                = dyn_cast<OpaqueValueExpr>(E)) {
1101     if (const Expr *src = op->getSourceExpr())
1102       return isBlockVarRef(src);
1103 
1104   // Casts are necessary to get things like (*(int*)&var) = foo().
1105   // We don't really care about the kind of cast here, except
1106   // we don't want to look through l2r casts, because it's okay
1107   // to get the *value* in a __block variable.
1108   } else if (const CastExpr *cast = dyn_cast<CastExpr>(E)) {
1109     if (cast->getCastKind() == CK_LValueToRValue)
1110       return false;
1111     return isBlockVarRef(cast->getSubExpr());
1112 
1113   // Handle unary operators.  Again, just aggressively look through
1114   // it, ignoring the operation.
1115   } else if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E)) {
1116     return isBlockVarRef(uop->getSubExpr());
1117 
1118   // Look into the base of a field access.
1119   } else if (const MemberExpr *mem = dyn_cast<MemberExpr>(E)) {
1120     return isBlockVarRef(mem->getBase());
1121 
1122   // Look into the base of a subscript.
1123   } else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(E)) {
1124     return isBlockVarRef(sub->getBase());
1125   }
1126 
1127   return false;
1128 }
1129 
VisitBinAssign(const BinaryOperator * E)1130 void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
1131   // For an assignment to work, the value on the right has
1132   // to be compatible with the value on the left.
1133   assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
1134                                                  E->getRHS()->getType())
1135          && "Invalid assignment");
1136 
1137   // If the LHS might be a __block variable, and the RHS can
1138   // potentially cause a block copy, we need to evaluate the RHS first
1139   // so that the assignment goes the right place.
1140   // This is pretty semantically fragile.
1141   if (isBlockVarRef(E->getLHS()) &&
1142       E->getRHS()->HasSideEffects(CGF.getContext())) {
1143     // Ensure that we have a destination, and evaluate the RHS into that.
1144     EnsureDest(E->getRHS()->getType());
1145     Visit(E->getRHS());
1146 
1147     // Now emit the LHS and copy into it.
1148     LValue LHS = CGF.EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
1149 
1150     // That copy is an atomic copy if the LHS is atomic.
1151     if (LHS.getType()->isAtomicType() ||
1152         CGF.LValueIsSuitableForInlineAtomic(LHS)) {
1153       CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
1154       return;
1155     }
1156 
1157     EmitCopy(E->getLHS()->getType(),
1158              AggValueSlot::forLValue(LHS, CGF, AggValueSlot::IsDestructed,
1159                                      needsGC(E->getLHS()->getType()),
1160                                      AggValueSlot::IsAliased,
1161                                      AggValueSlot::MayOverlap),
1162              Dest);
1163     return;
1164   }
1165 
1166   LValue LHS = CGF.EmitLValue(E->getLHS());
1167 
1168   // If we have an atomic type, evaluate into the destination and then
1169   // do an atomic copy.
1170   if (LHS.getType()->isAtomicType() ||
1171       CGF.LValueIsSuitableForInlineAtomic(LHS)) {
1172     EnsureDest(E->getRHS()->getType());
1173     Visit(E->getRHS());
1174     CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
1175     return;
1176   }
1177 
1178   // Codegen the RHS so that it stores directly into the LHS.
1179   AggValueSlot LHSSlot = AggValueSlot::forLValue(
1180       LHS, CGF, AggValueSlot::IsDestructed, needsGC(E->getLHS()->getType()),
1181       AggValueSlot::IsAliased, AggValueSlot::MayOverlap);
1182   // A non-volatile aggregate destination might have volatile member.
1183   if (!LHSSlot.isVolatile() &&
1184       CGF.hasVolatileMember(E->getLHS()->getType()))
1185     LHSSlot.setVolatile(true);
1186 
1187   CGF.EmitAggExpr(E->getRHS(), LHSSlot);
1188 
1189   // Copy into the destination if the assignment isn't ignored.
1190   EmitFinalDestCopy(E->getType(), LHS);
1191 }
1192 
1193 void AggExprEmitter::
VisitAbstractConditionalOperator(const AbstractConditionalOperator * E)1194 VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
1195   llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
1196   llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
1197   llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
1198 
1199   // Bind the common expression if necessary.
1200   CodeGenFunction::OpaqueValueMapping binding(CGF, E);
1201 
1202   CodeGenFunction::ConditionalEvaluation eval(CGF);
1203   CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock,
1204                            CGF.getProfileCount(E));
1205 
1206   // Save whether the destination's lifetime is externally managed.
1207   bool isExternallyDestructed = Dest.isExternallyDestructed();
1208 
1209   eval.begin(CGF);
1210   CGF.EmitBlock(LHSBlock);
1211   CGF.incrementProfileCounter(E);
1212   Visit(E->getTrueExpr());
1213   eval.end(CGF);
1214 
1215   assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!");
1216   CGF.Builder.CreateBr(ContBlock);
1217 
1218   // If the result of an agg expression is unused, then the emission
1219   // of the LHS might need to create a destination slot.  That's fine
1220   // with us, and we can safely emit the RHS into the same slot, but
1221   // we shouldn't claim that it's already being destructed.
1222   Dest.setExternallyDestructed(isExternallyDestructed);
1223 
1224   eval.begin(CGF);
1225   CGF.EmitBlock(RHSBlock);
1226   Visit(E->getFalseExpr());
1227   eval.end(CGF);
1228 
1229   CGF.EmitBlock(ContBlock);
1230 }
1231 
VisitChooseExpr(const ChooseExpr * CE)1232 void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
1233   Visit(CE->getChosenSubExpr());
1234 }
1235 
VisitVAArgExpr(VAArgExpr * VE)1236 void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
1237   Address ArgValue = Address::invalid();
1238   Address ArgPtr = CGF.EmitVAArg(VE, ArgValue);
1239 
1240   // If EmitVAArg fails, emit an error.
1241   if (!ArgPtr.isValid()) {
1242     CGF.ErrorUnsupported(VE, "aggregate va_arg expression");
1243     return;
1244   }
1245 
1246   EmitFinalDestCopy(VE->getType(), CGF.MakeAddrLValue(ArgPtr, VE->getType()));
1247 }
1248 
VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr * E)1249 void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
1250   // Ensure that we have a slot, but if we already do, remember
1251   // whether it was externally destructed.
1252   bool wasExternallyDestructed = Dest.isExternallyDestructed();
1253   EnsureDest(E->getType());
1254 
1255   // We're going to push a destructor if there isn't already one.
1256   Dest.setExternallyDestructed();
1257 
1258   Visit(E->getSubExpr());
1259 
1260   // Push that destructor we promised.
1261   if (!wasExternallyDestructed)
1262     CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddress());
1263 }
1264 
1265 void
VisitCXXConstructExpr(const CXXConstructExpr * E)1266 AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
1267   AggValueSlot Slot = EnsureSlot(E->getType());
1268   CGF.EmitCXXConstructExpr(E, Slot);
1269 }
1270 
VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr * E)1271 void AggExprEmitter::VisitCXXInheritedCtorInitExpr(
1272     const CXXInheritedCtorInitExpr *E) {
1273   AggValueSlot Slot = EnsureSlot(E->getType());
1274   CGF.EmitInheritedCXXConstructorCall(
1275       E->getConstructor(), E->constructsVBase(), Slot.getAddress(),
1276       E->inheritedFromVBase(), E);
1277 }
1278 
1279 void
VisitLambdaExpr(LambdaExpr * E)1280 AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) {
1281   AggValueSlot Slot = EnsureSlot(E->getType());
1282   LValue SlotLV = CGF.MakeAddrLValue(Slot.getAddress(), E->getType());
1283 
1284   // We'll need to enter cleanup scopes in case any of the element
1285   // initializers throws an exception.
1286   SmallVector<EHScopeStack::stable_iterator, 16> Cleanups;
1287   llvm::Instruction *CleanupDominator = nullptr;
1288 
1289   CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
1290   for (LambdaExpr::const_capture_init_iterator i = E->capture_init_begin(),
1291                                                e = E->capture_init_end();
1292        i != e; ++i, ++CurField) {
1293     // Emit initialization
1294     LValue LV = CGF.EmitLValueForFieldInitialization(SlotLV, *CurField);
1295     if (CurField->hasCapturedVLAType()) {
1296       CGF.EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
1297       continue;
1298     }
1299 
1300     EmitInitializationToLValue(*i, LV);
1301 
1302     // Push a destructor if necessary.
1303     if (QualType::DestructionKind DtorKind =
1304             CurField->getType().isDestructedType()) {
1305       assert(LV.isSimple());
1306       if (CGF.needsEHCleanup(DtorKind)) {
1307         if (!CleanupDominator)
1308           CleanupDominator = CGF.Builder.CreateAlignedLoad(
1309               CGF.Int8Ty,
1310               llvm::Constant::getNullValue(CGF.Int8PtrTy),
1311               CharUnits::One()); // placeholder
1312 
1313         CGF.pushDestroy(EHCleanup, LV.getAddress(CGF), CurField->getType(),
1314                         CGF.getDestroyer(DtorKind), false);
1315         Cleanups.push_back(CGF.EHStack.stable_begin());
1316       }
1317     }
1318   }
1319 
1320   // Deactivate all the partial cleanups in reverse order, which
1321   // generally means popping them.
1322   for (unsigned i = Cleanups.size(); i != 0; --i)
1323     CGF.DeactivateCleanupBlock(Cleanups[i-1], CleanupDominator);
1324 
1325   // Destroy the placeholder if we made one.
1326   if (CleanupDominator)
1327     CleanupDominator->eraseFromParent();
1328 }
1329 
VisitExprWithCleanups(ExprWithCleanups * E)1330 void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
1331   CGF.enterFullExpression(E);
1332   CodeGenFunction::RunCleanupsScope cleanups(CGF);
1333   Visit(E->getSubExpr());
1334 }
1335 
VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr * E)1336 void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
1337   QualType T = E->getType();
1338   AggValueSlot Slot = EnsureSlot(T);
1339   EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
1340 }
1341 
VisitImplicitValueInitExpr(ImplicitValueInitExpr * E)1342 void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
1343   QualType T = E->getType();
1344   AggValueSlot Slot = EnsureSlot(T);
1345   EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
1346 }
1347 
1348 /// isSimpleZero - If emitting this value will obviously just cause a store of
1349 /// zero to memory, return true.  This can return false if uncertain, so it just
1350 /// handles simple cases.
isSimpleZero(const Expr * E,CodeGenFunction & CGF)1351 static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {
1352   E = E->IgnoreParens();
1353 
1354   // 0
1355   if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E))
1356     return IL->getValue() == 0;
1357   // +0.0
1358   if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E))
1359     return FL->getValue().isPosZero();
1360   // int()
1361   if ((isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) &&
1362       CGF.getTypes().isZeroInitializable(E->getType()))
1363     return true;
1364   // (int*)0 - Null pointer expressions.
1365   if (const CastExpr *ICE = dyn_cast<CastExpr>(E))
1366     return ICE->getCastKind() == CK_NullToPointer &&
1367            CGF.getTypes().isPointerZeroInitializable(E->getType()) &&
1368            !E->HasSideEffects(CGF.getContext());
1369   // '\0'
1370   if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E))
1371     return CL->getValue() == 0;
1372 
1373   // Otherwise, hard case: conservatively return false.
1374   return false;
1375 }
1376 
1377 
1378 void
EmitInitializationToLValue(Expr * E,LValue LV)1379 AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) {
1380   QualType type = LV.getType();
1381   // FIXME: Ignore result?
1382   // FIXME: Are initializers affected by volatile?
1383   if (Dest.isZeroed() && isSimpleZero(E, CGF)) {
1384     // Storing "i32 0" to a zero'd memory location is a noop.
1385     return;
1386   } else if (isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) {
1387     return EmitNullInitializationToLValue(LV);
1388   } else if (isa<NoInitExpr>(E)) {
1389     // Do nothing.
1390     return;
1391   } else if (type->isReferenceType()) {
1392     RValue RV = CGF.EmitReferenceBindingToExpr(E);
1393     return CGF.EmitStoreThroughLValue(RV, LV);
1394   }
1395 
1396   switch (CGF.getEvaluationKind(type)) {
1397   case TEK_Complex:
1398     CGF.EmitComplexExprIntoLValue(E, LV, /*isInit*/ true);
1399     return;
1400   case TEK_Aggregate:
1401     CGF.EmitAggExpr(
1402         E, AggValueSlot::forLValue(LV, CGF, AggValueSlot::IsDestructed,
1403                                    AggValueSlot::DoesNotNeedGCBarriers,
1404                                    AggValueSlot::IsNotAliased,
1405                                    AggValueSlot::MayOverlap, Dest.isZeroed()));
1406     return;
1407   case TEK_Scalar:
1408     if (LV.isSimple()) {
1409       CGF.EmitScalarInit(E, /*D=*/nullptr, LV, /*Captured=*/false);
1410     } else {
1411       CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV);
1412     }
1413     return;
1414   }
1415   llvm_unreachable("bad evaluation kind");
1416 }
1417 
EmitNullInitializationToLValue(LValue lv)1418 void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) {
1419   QualType type = lv.getType();
1420 
1421   // If the destination slot is already zeroed out before the aggregate is
1422   // copied into it, we don't have to emit any zeros here.
1423   if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type))
1424     return;
1425 
1426   if (CGF.hasScalarEvaluationKind(type)) {
1427     // For non-aggregates, we can store the appropriate null constant.
1428     llvm::Value *null = CGF.CGM.EmitNullConstant(type);
1429     // Note that the following is not equivalent to
1430     // EmitStoreThroughBitfieldLValue for ARC types.
1431     if (lv.isBitField()) {
1432       CGF.EmitStoreThroughBitfieldLValue(RValue::get(null), lv);
1433     } else {
1434       assert(lv.isSimple());
1435       CGF.EmitStoreOfScalar(null, lv, /* isInitialization */ true);
1436     }
1437   } else {
1438     // There's a potential optimization opportunity in combining
1439     // memsets; that would be easy for arrays, but relatively
1440     // difficult for structures with the current code.
1441     CGF.EmitNullInitialization(lv.getAddress(CGF), lv.getType());
1442   }
1443 }
1444 
VisitInitListExpr(InitListExpr * E)1445 void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
1446 #if 0
1447   // FIXME: Assess perf here?  Figure out what cases are worth optimizing here
1448   // (Length of globals? Chunks of zeroed-out space?).
1449   //
1450   // If we can, prefer a copy from a global; this is a lot less code for long
1451   // globals, and it's easier for the current optimizers to analyze.
1452   if (llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, E->getType(), &CGF)) {
1453     llvm::GlobalVariable* GV =
1454     new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
1455                              llvm::GlobalValue::InternalLinkage, C, "");
1456     EmitFinalDestCopy(E->getType(), CGF.MakeAddrLValue(GV, E->getType()));
1457     return;
1458   }
1459 #endif
1460   if (E->hadArrayRangeDesignator())
1461     CGF.ErrorUnsupported(E, "GNU array range designator extension");
1462 
1463   if (E->isTransparent())
1464     return Visit(E->getInit(0));
1465 
1466   AggValueSlot Dest = EnsureSlot(E->getType());
1467 
1468   LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
1469 
1470   // Handle initialization of an array.
1471   if (E->getType()->isArrayType()) {
1472     auto AType = cast<llvm::ArrayType>(Dest.getAddress().getElementType());
1473     EmitArrayInit(Dest.getAddress(), AType, E->getType(), E);
1474     return;
1475   }
1476 
1477   assert(E->getType()->isRecordType() && "Only support structs/unions here!");
1478 
1479   // Do struct initialization; this code just sets each individual member
1480   // to the approprate value.  This makes bitfield support automatic;
1481   // the disadvantage is that the generated code is more difficult for
1482   // the optimizer, especially with bitfields.
1483   unsigned NumInitElements = E->getNumInits();
1484   RecordDecl *record = E->getType()->castAs<RecordType>()->getDecl();
1485 
1486   // We'll need to enter cleanup scopes in case any of the element
1487   // initializers throws an exception.
1488   SmallVector<EHScopeStack::stable_iterator, 16> cleanups;
1489   llvm::Instruction *cleanupDominator = nullptr;
1490   auto addCleanup = [&](const EHScopeStack::stable_iterator &cleanup) {
1491     cleanups.push_back(cleanup);
1492     if (!cleanupDominator) // create placeholder once needed
1493       cleanupDominator = CGF.Builder.CreateAlignedLoad(
1494           CGF.Int8Ty, llvm::Constant::getNullValue(CGF.Int8PtrTy),
1495           CharUnits::One());
1496   };
1497 
1498   unsigned curInitIndex = 0;
1499 
1500   // Emit initialization of base classes.
1501   if (auto *CXXRD = dyn_cast<CXXRecordDecl>(record)) {
1502     assert(E->getNumInits() >= CXXRD->getNumBases() &&
1503            "missing initializer for base class");
1504     for (auto &Base : CXXRD->bases()) {
1505       assert(!Base.isVirtual() && "should not see vbases here");
1506       auto *BaseRD = Base.getType()->getAsCXXRecordDecl();
1507       Address V = CGF.GetAddressOfDirectBaseInCompleteClass(
1508           Dest.getAddress(), CXXRD, BaseRD,
1509           /*isBaseVirtual*/ false);
1510       AggValueSlot AggSlot = AggValueSlot::forAddr(
1511           V, Qualifiers(),
1512           AggValueSlot::IsDestructed,
1513           AggValueSlot::DoesNotNeedGCBarriers,
1514           AggValueSlot::IsNotAliased,
1515           CGF.getOverlapForBaseInit(CXXRD, BaseRD, Base.isVirtual()));
1516       CGF.EmitAggExpr(E->getInit(curInitIndex++), AggSlot);
1517 
1518       if (QualType::DestructionKind dtorKind =
1519               Base.getType().isDestructedType()) {
1520         CGF.pushDestroy(dtorKind, V, Base.getType());
1521         addCleanup(CGF.EHStack.stable_begin());
1522       }
1523     }
1524   }
1525 
1526   // Prepare a 'this' for CXXDefaultInitExprs.
1527   CodeGenFunction::FieldConstructionScope FCS(CGF, Dest.getAddress());
1528 
1529   if (record->isUnion()) {
1530     // Only initialize one field of a union. The field itself is
1531     // specified by the initializer list.
1532     if (!E->getInitializedFieldInUnion()) {
1533       // Empty union; we have nothing to do.
1534 
1535 #ifndef NDEBUG
1536       // Make sure that it's really an empty and not a failure of
1537       // semantic analysis.
1538       for (const auto *Field : record->fields())
1539         assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
1540 #endif
1541       return;
1542     }
1543 
1544     // FIXME: volatility
1545     FieldDecl *Field = E->getInitializedFieldInUnion();
1546 
1547     LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestLV, Field);
1548     if (NumInitElements) {
1549       // Store the initializer into the field
1550       EmitInitializationToLValue(E->getInit(0), FieldLoc);
1551     } else {
1552       // Default-initialize to null.
1553       EmitNullInitializationToLValue(FieldLoc);
1554     }
1555 
1556     return;
1557   }
1558 
1559   // Here we iterate over the fields; this makes it simpler to both
1560   // default-initialize fields and skip over unnamed fields.
1561   for (const auto *field : record->fields()) {
1562     // We're done once we hit the flexible array member.
1563     if (field->getType()->isIncompleteArrayType())
1564       break;
1565 
1566     // Always skip anonymous bitfields.
1567     if (field->isUnnamedBitfield())
1568       continue;
1569 
1570     // We're done if we reach the end of the explicit initializers, we
1571     // have a zeroed object, and the rest of the fields are
1572     // zero-initializable.
1573     if (curInitIndex == NumInitElements && Dest.isZeroed() &&
1574         CGF.getTypes().isZeroInitializable(E->getType()))
1575       break;
1576 
1577 
1578     LValue LV = CGF.EmitLValueForFieldInitialization(DestLV, field);
1579     // We never generate write-barries for initialized fields.
1580     LV.setNonGC(true);
1581 
1582     if (curInitIndex < NumInitElements) {
1583       // Store the initializer into the field.
1584       EmitInitializationToLValue(E->getInit(curInitIndex++), LV);
1585     } else {
1586       // We're out of initializers; default-initialize to null
1587       EmitNullInitializationToLValue(LV);
1588     }
1589 
1590     // Push a destructor if necessary.
1591     // FIXME: if we have an array of structures, all explicitly
1592     // initialized, we can end up pushing a linear number of cleanups.
1593     bool pushedCleanup = false;
1594     if (QualType::DestructionKind dtorKind
1595           = field->getType().isDestructedType()) {
1596       assert(LV.isSimple());
1597       if (CGF.needsEHCleanup(dtorKind)) {
1598         CGF.pushDestroy(EHCleanup, LV.getAddress(CGF), field->getType(),
1599                         CGF.getDestroyer(dtorKind), false);
1600         addCleanup(CGF.EHStack.stable_begin());
1601         pushedCleanup = true;
1602       }
1603     }
1604 
1605     // If the GEP didn't get used because of a dead zero init or something
1606     // else, clean it up for -O0 builds and general tidiness.
1607     if (!pushedCleanup && LV.isSimple())
1608       if (llvm::GetElementPtrInst *GEP =
1609               dyn_cast<llvm::GetElementPtrInst>(LV.getPointer(CGF)))
1610         if (GEP->use_empty())
1611           GEP->eraseFromParent();
1612   }
1613 
1614   // Deactivate all the partial cleanups in reverse order, which
1615   // generally means popping them.
1616   assert((cleanupDominator || cleanups.empty()) &&
1617          "Missing cleanupDominator before deactivating cleanup blocks");
1618   for (unsigned i = cleanups.size(); i != 0; --i)
1619     CGF.DeactivateCleanupBlock(cleanups[i-1], cleanupDominator);
1620 
1621   // Destroy the placeholder if we made one.
1622   if (cleanupDominator)
1623     cleanupDominator->eraseFromParent();
1624 }
1625 
VisitArrayInitLoopExpr(const ArrayInitLoopExpr * E,llvm::Value * outerBegin)1626 void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
1627                                             llvm::Value *outerBegin) {
1628   // Emit the common subexpression.
1629   CodeGenFunction::OpaqueValueMapping binding(CGF, E->getCommonExpr());
1630 
1631   Address destPtr = EnsureSlot(E->getType()).getAddress();
1632   uint64_t numElements = E->getArraySize().getZExtValue();
1633 
1634   if (!numElements)
1635     return;
1636 
1637   // destPtr is an array*. Construct an elementType* by drilling down a level.
1638   llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
1639   llvm::Value *indices[] = {zero, zero};
1640   llvm::Value *begin = Builder.CreateInBoundsGEP(destPtr.getPointer(), indices,
1641                                                  "arrayinit.begin");
1642 
1643   // Prepare to special-case multidimensional array initialization: we avoid
1644   // emitting multiple destructor loops in that case.
1645   if (!outerBegin)
1646     outerBegin = begin;
1647   ArrayInitLoopExpr *InnerLoop = dyn_cast<ArrayInitLoopExpr>(E->getSubExpr());
1648 
1649   QualType elementType =
1650       CGF.getContext().getAsArrayType(E->getType())->getElementType();
1651   CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
1652   CharUnits elementAlign =
1653       destPtr.getAlignment().alignmentOfArrayElement(elementSize);
1654 
1655   llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
1656   llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
1657 
1658   // Jump into the body.
1659   CGF.EmitBlock(bodyBB);
1660   llvm::PHINode *index =
1661       Builder.CreatePHI(zero->getType(), 2, "arrayinit.index");
1662   index->addIncoming(zero, entryBB);
1663   llvm::Value *element = Builder.CreateInBoundsGEP(begin, index);
1664 
1665   // Prepare for a cleanup.
1666   QualType::DestructionKind dtorKind = elementType.isDestructedType();
1667   EHScopeStack::stable_iterator cleanup;
1668   if (CGF.needsEHCleanup(dtorKind) && !InnerLoop) {
1669     if (outerBegin->getType() != element->getType())
1670       outerBegin = Builder.CreateBitCast(outerBegin, element->getType());
1671     CGF.pushRegularPartialArrayCleanup(outerBegin, element, elementType,
1672                                        elementAlign,
1673                                        CGF.getDestroyer(dtorKind));
1674     cleanup = CGF.EHStack.stable_begin();
1675   } else {
1676     dtorKind = QualType::DK_none;
1677   }
1678 
1679   // Emit the actual filler expression.
1680   {
1681     // Temporaries created in an array initialization loop are destroyed
1682     // at the end of each iteration.
1683     CodeGenFunction::RunCleanupsScope CleanupsScope(CGF);
1684     CodeGenFunction::ArrayInitLoopExprScope Scope(CGF, index);
1685     LValue elementLV =
1686         CGF.MakeAddrLValue(Address(element, elementAlign), elementType);
1687 
1688     if (InnerLoop) {
1689       // If the subexpression is an ArrayInitLoopExpr, share its cleanup.
1690       auto elementSlot = AggValueSlot::forLValue(
1691           elementLV, CGF, AggValueSlot::IsDestructed,
1692           AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased,
1693           AggValueSlot::DoesNotOverlap);
1694       AggExprEmitter(CGF, elementSlot, false)
1695           .VisitArrayInitLoopExpr(InnerLoop, outerBegin);
1696     } else
1697       EmitInitializationToLValue(E->getSubExpr(), elementLV);
1698   }
1699 
1700   // Move on to the next element.
1701   llvm::Value *nextIndex = Builder.CreateNUWAdd(
1702       index, llvm::ConstantInt::get(CGF.SizeTy, 1), "arrayinit.next");
1703   index->addIncoming(nextIndex, Builder.GetInsertBlock());
1704 
1705   // Leave the loop if we're done.
1706   llvm::Value *done = Builder.CreateICmpEQ(
1707       nextIndex, llvm::ConstantInt::get(CGF.SizeTy, numElements),
1708       "arrayinit.done");
1709   llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
1710   Builder.CreateCondBr(done, endBB, bodyBB);
1711 
1712   CGF.EmitBlock(endBB);
1713 
1714   // Leave the partial-array cleanup if we entered one.
1715   if (dtorKind)
1716     CGF.DeactivateCleanupBlock(cleanup, index);
1717 }
1718 
VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr * E)1719 void AggExprEmitter::VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E) {
1720   AggValueSlot Dest = EnsureSlot(E->getType());
1721 
1722   LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
1723   EmitInitializationToLValue(E->getBase(), DestLV);
1724   VisitInitListExpr(E->getUpdater());
1725 }
1726 
1727 //===----------------------------------------------------------------------===//
1728 //                        Entry Points into this File
1729 //===----------------------------------------------------------------------===//
1730 
1731 /// GetNumNonZeroBytesInInit - Get an approximate count of the number of
1732 /// non-zero bytes that will be stored when outputting the initializer for the
1733 /// specified initializer expression.
GetNumNonZeroBytesInInit(const Expr * E,CodeGenFunction & CGF)1734 static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
1735   E = E->IgnoreParens();
1736 
1737   // 0 and 0.0 won't require any non-zero stores!
1738   if (isSimpleZero(E, CGF)) return CharUnits::Zero();
1739 
1740   // If this is an initlist expr, sum up the size of sizes of the (present)
1741   // elements.  If this is something weird, assume the whole thing is non-zero.
1742   const InitListExpr *ILE = dyn_cast<InitListExpr>(E);
1743   while (ILE && ILE->isTransparent())
1744     ILE = dyn_cast<InitListExpr>(ILE->getInit(0));
1745   if (!ILE || !CGF.getTypes().isZeroInitializable(ILE->getType()))
1746     return CGF.getContext().getTypeSizeInChars(E->getType());
1747 
1748   // InitListExprs for structs have to be handled carefully.  If there are
1749   // reference members, we need to consider the size of the reference, not the
1750   // referencee.  InitListExprs for unions and arrays can't have references.
1751   if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
1752     if (!RT->isUnionType()) {
1753       RecordDecl *SD = RT->getDecl();
1754       CharUnits NumNonZeroBytes = CharUnits::Zero();
1755 
1756       unsigned ILEElement = 0;
1757       if (auto *CXXRD = dyn_cast<CXXRecordDecl>(SD))
1758         while (ILEElement != CXXRD->getNumBases())
1759           NumNonZeroBytes +=
1760               GetNumNonZeroBytesInInit(ILE->getInit(ILEElement++), CGF);
1761       for (const auto *Field : SD->fields()) {
1762         // We're done once we hit the flexible array member or run out of
1763         // InitListExpr elements.
1764         if (Field->getType()->isIncompleteArrayType() ||
1765             ILEElement == ILE->getNumInits())
1766           break;
1767         if (Field->isUnnamedBitfield())
1768           continue;
1769 
1770         const Expr *E = ILE->getInit(ILEElement++);
1771 
1772         // Reference values are always non-null and have the width of a pointer.
1773         if (Field->getType()->isReferenceType())
1774           NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits(
1775               CGF.getTarget().getPointerWidth(0));
1776         else
1777           NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF);
1778       }
1779 
1780       return NumNonZeroBytes;
1781     }
1782   }
1783 
1784 
1785   CharUnits NumNonZeroBytes = CharUnits::Zero();
1786   for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
1787     NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF);
1788   return NumNonZeroBytes;
1789 }
1790 
1791 /// CheckAggExprForMemSetUse - If the initializer is large and has a lot of
1792 /// zeros in it, emit a memset and avoid storing the individual zeros.
1793 ///
CheckAggExprForMemSetUse(AggValueSlot & Slot,const Expr * E,CodeGenFunction & CGF)1794 static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
1795                                      CodeGenFunction &CGF) {
1796   // If the slot is already known to be zeroed, nothing to do.  Don't mess with
1797   // volatile stores.
1798   if (Slot.isZeroed() || Slot.isVolatile() || !Slot.getAddress().isValid())
1799     return;
1800 
1801   // C++ objects with a user-declared constructor don't need zero'ing.
1802   if (CGF.getLangOpts().CPlusPlus)
1803     if (const RecordType *RT = CGF.getContext()
1804                        .getBaseElementType(E->getType())->getAs<RecordType>()) {
1805       const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1806       if (RD->hasUserDeclaredConstructor())
1807         return;
1808     }
1809 
1810   // If the type is 16-bytes or smaller, prefer individual stores over memset.
1811   CharUnits Size = Slot.getPreferredSize(CGF.getContext(), E->getType());
1812   if (Size <= CharUnits::fromQuantity(16))
1813     return;
1814 
1815   // Check to see if over 3/4 of the initializer are known to be zero.  If so,
1816   // we prefer to emit memset + individual stores for the rest.
1817   CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF);
1818   if (NumNonZeroBytes*4 > Size)
1819     return;
1820 
1821   // Okay, it seems like a good idea to use an initial memset, emit the call.
1822   llvm::Constant *SizeVal = CGF.Builder.getInt64(Size.getQuantity());
1823 
1824   Address Loc = Slot.getAddress();
1825   Loc = CGF.Builder.CreateElementBitCast(Loc, CGF.Int8Ty);
1826   CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal, false);
1827 
1828   // Tell the AggExprEmitter that the slot is known zero.
1829   Slot.setZeroed();
1830 }
1831 
1832 
1833 
1834 
1835 /// EmitAggExpr - Emit the computation of the specified expression of aggregate
1836 /// type.  The result is computed into DestPtr.  Note that if DestPtr is null,
1837 /// the value of the aggregate expression is not needed.  If VolatileDest is
1838 /// true, DestPtr cannot be 0.
EmitAggExpr(const Expr * E,AggValueSlot Slot)1839 void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot) {
1840   assert(E && hasAggregateEvaluationKind(E->getType()) &&
1841          "Invalid aggregate expression to emit");
1842   assert((Slot.getAddress().isValid() || Slot.isIgnored()) &&
1843          "slot has bits but no address");
1844 
1845   // Optimize the slot if possible.
1846   CheckAggExprForMemSetUse(Slot, E, *this);
1847 
1848   AggExprEmitter(*this, Slot, Slot.isIgnored()).Visit(const_cast<Expr*>(E));
1849 }
1850 
EmitAggExprToLValue(const Expr * E)1851 LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
1852   assert(hasAggregateEvaluationKind(E->getType()) && "Invalid argument!");
1853   Address Temp = CreateMemTemp(E->getType());
1854   LValue LV = MakeAddrLValue(Temp, E->getType());
1855   EmitAggExpr(E, AggValueSlot::forLValue(
1856                      LV, *this, AggValueSlot::IsNotDestructed,
1857                      AggValueSlot::DoesNotNeedGCBarriers,
1858                      AggValueSlot::IsNotAliased, AggValueSlot::DoesNotOverlap));
1859   return LV;
1860 }
1861 
1862 AggValueSlot::Overlap_t
getOverlapForFieldInit(const FieldDecl * FD)1863 CodeGenFunction::getOverlapForFieldInit(const FieldDecl *FD) {
1864   if (!FD->hasAttr<NoUniqueAddressAttr>() || !FD->getType()->isRecordType())
1865     return AggValueSlot::DoesNotOverlap;
1866 
1867   // If the field lies entirely within the enclosing class's nvsize, its tail
1868   // padding cannot overlap any already-initialized object. (The only subobjects
1869   // with greater addresses that might already be initialized are vbases.)
1870   const RecordDecl *ClassRD = FD->getParent();
1871   const ASTRecordLayout &Layout = getContext().getASTRecordLayout(ClassRD);
1872   if (Layout.getFieldOffset(FD->getFieldIndex()) +
1873           getContext().getTypeSize(FD->getType()) <=
1874       (uint64_t)getContext().toBits(Layout.getNonVirtualSize()))
1875     return AggValueSlot::DoesNotOverlap;
1876 
1877   // The tail padding may contain values we need to preserve.
1878   return AggValueSlot::MayOverlap;
1879 }
1880 
getOverlapForBaseInit(const CXXRecordDecl * RD,const CXXRecordDecl * BaseRD,bool IsVirtual)1881 AggValueSlot::Overlap_t CodeGenFunction::getOverlapForBaseInit(
1882     const CXXRecordDecl *RD, const CXXRecordDecl *BaseRD, bool IsVirtual) {
1883   // If the most-derived object is a field declared with [[no_unique_address]],
1884   // the tail padding of any virtual base could be reused for other subobjects
1885   // of that field's class.
1886   if (IsVirtual)
1887     return AggValueSlot::MayOverlap;
1888 
1889   // If the base class is laid out entirely within the nvsize of the derived
1890   // class, its tail padding cannot yet be initialized, so we can issue
1891   // stores at the full width of the base class.
1892   const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
1893   if (Layout.getBaseClassOffset(BaseRD) +
1894           getContext().getASTRecordLayout(BaseRD).getSize() <=
1895       Layout.getNonVirtualSize())
1896     return AggValueSlot::DoesNotOverlap;
1897 
1898   // The tail padding may contain values we need to preserve.
1899   return AggValueSlot::MayOverlap;
1900 }
1901 
EmitAggregateCopy(LValue Dest,LValue Src,QualType Ty,AggValueSlot::Overlap_t MayOverlap,bool isVolatile)1902 void CodeGenFunction::EmitAggregateCopy(LValue Dest, LValue Src, QualType Ty,
1903                                         AggValueSlot::Overlap_t MayOverlap,
1904                                         bool isVolatile) {
1905   assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
1906 
1907   Address DestPtr = Dest.getAddress(*this);
1908   Address SrcPtr = Src.getAddress(*this);
1909 
1910   if (getLangOpts().CPlusPlus) {
1911     if (const RecordType *RT = Ty->getAs<RecordType>()) {
1912       CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
1913       assert((Record->hasTrivialCopyConstructor() ||
1914               Record->hasTrivialCopyAssignment() ||
1915               Record->hasTrivialMoveConstructor() ||
1916               Record->hasTrivialMoveAssignment() ||
1917               Record->isUnion()) &&
1918              "Trying to aggregate-copy a type without a trivial copy/move "
1919              "constructor or assignment operator");
1920       // Ignore empty classes in C++.
1921       if (Record->isEmpty())
1922         return;
1923     }
1924   }
1925 
1926   // Aggregate assignment turns into llvm.memcpy.  This is almost valid per
1927   // C99 6.5.16.1p3, which states "If the value being stored in an object is
1928   // read from another object that overlaps in anyway the storage of the first
1929   // object, then the overlap shall be exact and the two objects shall have
1930   // qualified or unqualified versions of a compatible type."
1931   //
1932   // memcpy is not defined if the source and destination pointers are exactly
1933   // equal, but other compilers do this optimization, and almost every memcpy
1934   // implementation handles this case safely.  If there is a libc that does not
1935   // safely handle this, we can add a target hook.
1936 
1937   // Get data size info for this aggregate. Don't copy the tail padding if this
1938   // might be a potentially-overlapping subobject, since the tail padding might
1939   // be occupied by a different object. Otherwise, copying it is fine.
1940   std::pair<CharUnits, CharUnits> TypeInfo;
1941   if (MayOverlap)
1942     TypeInfo = getContext().getTypeInfoDataSizeInChars(Ty);
1943   else
1944     TypeInfo = getContext().getTypeInfoInChars(Ty);
1945 
1946   llvm::Value *SizeVal = nullptr;
1947   if (TypeInfo.first.isZero()) {
1948     // But note that getTypeInfo returns 0 for a VLA.
1949     if (auto *VAT = dyn_cast_or_null<VariableArrayType>(
1950             getContext().getAsArrayType(Ty))) {
1951       QualType BaseEltTy;
1952       SizeVal = emitArrayLength(VAT, BaseEltTy, DestPtr);
1953       TypeInfo = getContext().getTypeInfoInChars(BaseEltTy);
1954       assert(!TypeInfo.first.isZero());
1955       SizeVal = Builder.CreateNUWMul(
1956           SizeVal,
1957           llvm::ConstantInt::get(SizeTy, TypeInfo.first.getQuantity()));
1958     }
1959   }
1960   if (!SizeVal) {
1961     SizeVal = llvm::ConstantInt::get(SizeTy, TypeInfo.first.getQuantity());
1962   }
1963 
1964   // FIXME: If we have a volatile struct, the optimizer can remove what might
1965   // appear to be `extra' memory ops:
1966   //
1967   // volatile struct { int i; } a, b;
1968   //
1969   // int main() {
1970   //   a = b;
1971   //   a = b;
1972   // }
1973   //
1974   // we need to use a different call here.  We use isVolatile to indicate when
1975   // either the source or the destination is volatile.
1976 
1977   DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty);
1978   SrcPtr = Builder.CreateElementBitCast(SrcPtr, Int8Ty);
1979 
1980   // Don't do any of the memmove_collectable tests if GC isn't set.
1981   if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
1982     // fall through
1983   } else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
1984     RecordDecl *Record = RecordTy->getDecl();
1985     if (Record->hasObjectMember()) {
1986       CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
1987                                                     SizeVal);
1988       return;
1989     }
1990   } else if (Ty->isArrayType()) {
1991     QualType BaseType = getContext().getBaseElementType(Ty);
1992     if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
1993       if (RecordTy->getDecl()->hasObjectMember()) {
1994         CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
1995                                                       SizeVal);
1996         return;
1997       }
1998     }
1999   }
2000 
2001   auto Inst = Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, isVolatile);
2002 
2003   // Determine the metadata to describe the position of any padding in this
2004   // memcpy, as well as the TBAA tags for the members of the struct, in case
2005   // the optimizer wishes to expand it in to scalar memory operations.
2006   if (llvm::MDNode *TBAAStructTag = CGM.getTBAAStructInfo(Ty))
2007     Inst->setMetadata(llvm::LLVMContext::MD_tbaa_struct, TBAAStructTag);
2008 
2009   if (CGM.getCodeGenOpts().NewStructPathTBAA) {
2010     TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForMemoryTransfer(
2011         Dest.getTBAAInfo(), Src.getTBAAInfo());
2012     CGM.DecorateInstructionWithTBAA(Inst, TBAAInfo);
2013   }
2014 }
2015