1 //===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This contains code to emit Expr nodes as LLVM code.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "ABIInfoImpl.h"
14 #include "CGCUDARuntime.h"
15 #include "CGCXXABI.h"
16 #include "CGCall.h"
17 #include "CGCleanup.h"
18 #include "CGDebugInfo.h"
19 #include "CGObjCRuntime.h"
20 #include "CGOpenMPRuntime.h"
21 #include "CGRecordLayout.h"
22 #include "CodeGenFunction.h"
23 #include "CodeGenModule.h"
24 #include "ConstantEmitter.h"
25 #include "TargetInfo.h"
26 #include "clang/AST/ASTContext.h"
27 #include "clang/AST/Attr.h"
28 #include "clang/AST/DeclObjC.h"
29 #include "clang/AST/NSAPI.h"
30 #include "clang/AST/StmtVisitor.h"
31 #include "clang/Basic/Builtins.h"
32 #include "clang/Basic/CodeGenOptions.h"
33 #include "clang/Basic/SourceManager.h"
34 #include "llvm/ADT/Hashing.h"
35 #include "llvm/ADT/STLExtras.h"
36 #include "llvm/ADT/StringExtras.h"
37 #include "llvm/IR/DataLayout.h"
38 #include "llvm/IR/Intrinsics.h"
39 #include "llvm/IR/IntrinsicsWebAssembly.h"
40 #include "llvm/IR/LLVMContext.h"
41 #include "llvm/IR/MDBuilder.h"
42 #include "llvm/IR/MatrixBuilder.h"
43 #include "llvm/Passes/OptimizationLevel.h"
44 #include "llvm/Support/ConvertUTF.h"
45 #include "llvm/Support/MathExtras.h"
46 #include "llvm/Support/Path.h"
47 #include "llvm/Support/SaveAndRestore.h"
48 #include "llvm/Support/xxhash.h"
49 #include "llvm/Transforms/Utils/SanitizerStats.h"
50
51 #include <optional>
52 #include <string>
53
54 using namespace clang;
55 using namespace CodeGen;
56
57 // Experiment to make sanitizers easier to debug
58 static llvm::cl::opt<bool> ClSanitizeDebugDeoptimization(
59 "ubsan-unique-traps", llvm::cl::Optional,
60 llvm::cl::desc("Deoptimize traps for UBSAN so there is 1 trap per check."));
61
62 // TODO: Introduce frontend options to enabled per sanitizers, similar to
63 // `fsanitize-trap`.
64 static llvm::cl::opt<bool> ClSanitizeGuardChecks(
65 "ubsan-guard-checks", llvm::cl::Optional,
66 llvm::cl::desc("Guard UBSAN checks with `llvm.allow.ubsan.check()`."));
67
68 //===--------------------------------------------------------------------===//
69 // Miscellaneous Helper Methods
70 //===--------------------------------------------------------------------===//
71
72 /// CreateTempAlloca - This creates a alloca and inserts it into the entry
73 /// block.
74 RawAddress
CreateTempAllocaWithoutCast(llvm::Type * Ty,CharUnits Align,const Twine & Name,llvm::Value * ArraySize)75 CodeGenFunction::CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits Align,
76 const Twine &Name,
77 llvm::Value *ArraySize) {
78 auto Alloca = CreateTempAlloca(Ty, Name, ArraySize);
79 Alloca->setAlignment(Align.getAsAlign());
80 return RawAddress(Alloca, Ty, Align, KnownNonNull);
81 }
82
83 /// CreateTempAlloca - This creates a alloca and inserts it into the entry
84 /// block. The alloca is casted to default address space if necessary.
CreateTempAlloca(llvm::Type * Ty,CharUnits Align,const Twine & Name,llvm::Value * ArraySize,RawAddress * AllocaAddr)85 RawAddress CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, CharUnits Align,
86 const Twine &Name,
87 llvm::Value *ArraySize,
88 RawAddress *AllocaAddr) {
89 auto Alloca = CreateTempAllocaWithoutCast(Ty, Align, Name, ArraySize);
90 if (AllocaAddr)
91 *AllocaAddr = Alloca;
92 llvm::Value *V = Alloca.getPointer();
93 // Alloca always returns a pointer in alloca address space, which may
94 // be different from the type defined by the language. For example,
95 // in C++ the auto variables are in the default address space. Therefore
96 // cast alloca to the default address space when necessary.
97 if (getASTAllocaAddressSpace() != LangAS::Default) {
98 auto DestAddrSpace = getContext().getTargetAddressSpace(LangAS::Default);
99 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
100 // When ArraySize is nullptr, alloca is inserted at AllocaInsertPt,
101 // otherwise alloca is inserted at the current insertion point of the
102 // builder.
103 if (!ArraySize)
104 Builder.SetInsertPoint(getPostAllocaInsertPoint());
105 V = getTargetHooks().performAddrSpaceCast(
106 *this, V, getASTAllocaAddressSpace(), LangAS::Default,
107 Ty->getPointerTo(DestAddrSpace), /*non-null*/ true);
108 }
109
110 return RawAddress(V, Ty, Align, KnownNonNull);
111 }
112
113 /// CreateTempAlloca - This creates an alloca and inserts it into the entry
114 /// block if \p ArraySize is nullptr, otherwise inserts it at the current
115 /// insertion point of the builder.
CreateTempAlloca(llvm::Type * Ty,const Twine & Name,llvm::Value * ArraySize)116 llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty,
117 const Twine &Name,
118 llvm::Value *ArraySize) {
119 llvm::AllocaInst *Alloca;
120 if (ArraySize)
121 Alloca = Builder.CreateAlloca(Ty, ArraySize, Name);
122 else
123 Alloca = new llvm::AllocaInst(Ty, CGM.getDataLayout().getAllocaAddrSpace(),
124 ArraySize, Name, &*AllocaInsertPt);
125 if (Allocas) {
126 Allocas->Add(Alloca);
127 }
128 return Alloca;
129 }
130
131 /// CreateDefaultAlignTempAlloca - This creates an alloca with the
132 /// default alignment of the corresponding LLVM type, which is *not*
133 /// guaranteed to be related in any way to the expected alignment of
134 /// an AST type that might have been lowered to Ty.
CreateDefaultAlignTempAlloca(llvm::Type * Ty,const Twine & Name)135 RawAddress CodeGenFunction::CreateDefaultAlignTempAlloca(llvm::Type *Ty,
136 const Twine &Name) {
137 CharUnits Align =
138 CharUnits::fromQuantity(CGM.getDataLayout().getPrefTypeAlign(Ty));
139 return CreateTempAlloca(Ty, Align, Name);
140 }
141
CreateIRTemp(QualType Ty,const Twine & Name)142 RawAddress CodeGenFunction::CreateIRTemp(QualType Ty, const Twine &Name) {
143 CharUnits Align = getContext().getTypeAlignInChars(Ty);
144 return CreateTempAlloca(ConvertType(Ty), Align, Name);
145 }
146
CreateMemTemp(QualType Ty,const Twine & Name,RawAddress * Alloca)147 RawAddress CodeGenFunction::CreateMemTemp(QualType Ty, const Twine &Name,
148 RawAddress *Alloca) {
149 // FIXME: Should we prefer the preferred type alignment here?
150 return CreateMemTemp(Ty, getContext().getTypeAlignInChars(Ty), Name, Alloca);
151 }
152
CreateMemTemp(QualType Ty,CharUnits Align,const Twine & Name,RawAddress * Alloca)153 RawAddress CodeGenFunction::CreateMemTemp(QualType Ty, CharUnits Align,
154 const Twine &Name,
155 RawAddress *Alloca) {
156 RawAddress Result = CreateTempAlloca(ConvertTypeForMem(Ty), Align, Name,
157 /*ArraySize=*/nullptr, Alloca);
158
159 if (Ty->isConstantMatrixType()) {
160 auto *ArrayTy = cast<llvm::ArrayType>(Result.getElementType());
161 auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
162 ArrayTy->getNumElements());
163
164 Result = Address(Result.getPointer(), VectorTy, Result.getAlignment(),
165 KnownNonNull);
166 }
167 return Result;
168 }
169
CreateMemTempWithoutCast(QualType Ty,CharUnits Align,const Twine & Name)170 RawAddress CodeGenFunction::CreateMemTempWithoutCast(QualType Ty,
171 CharUnits Align,
172 const Twine &Name) {
173 return CreateTempAllocaWithoutCast(ConvertTypeForMem(Ty), Align, Name);
174 }
175
CreateMemTempWithoutCast(QualType Ty,const Twine & Name)176 RawAddress CodeGenFunction::CreateMemTempWithoutCast(QualType Ty,
177 const Twine &Name) {
178 return CreateMemTempWithoutCast(Ty, getContext().getTypeAlignInChars(Ty),
179 Name);
180 }
181
182 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
183 /// expression and compare the result against zero, returning an Int1Ty value.
EvaluateExprAsBool(const Expr * E)184 llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
185 PGO.setCurrentStmt(E);
186 if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) {
187 llvm::Value *MemPtr = EmitScalarExpr(E);
188 return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT);
189 }
190
191 QualType BoolTy = getContext().BoolTy;
192 SourceLocation Loc = E->getExprLoc();
193 CGFPOptionsRAII FPOptsRAII(*this, E);
194 if (!E->getType()->isAnyComplexType())
195 return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy, Loc);
196
197 return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(), BoolTy,
198 Loc);
199 }
200
201 /// EmitIgnoredExpr - Emit code to compute the specified expression,
202 /// ignoring the result.
EmitIgnoredExpr(const Expr * E)203 void CodeGenFunction::EmitIgnoredExpr(const Expr *E) {
204 if (E->isPRValue())
205 return (void)EmitAnyExpr(E, AggValueSlot::ignored(), true);
206
207 // if this is a bitfield-resulting conditional operator, we can special case
208 // emit this. The normal 'EmitLValue' version of this is particularly
209 // difficult to codegen for, since creating a single "LValue" for two
210 // different sized arguments here is not particularly doable.
211 if (const auto *CondOp = dyn_cast<AbstractConditionalOperator>(
212 E->IgnoreParenNoopCasts(getContext()))) {
213 if (CondOp->getObjectKind() == OK_BitField)
214 return EmitIgnoredConditionalOperator(CondOp);
215 }
216
217 // Just emit it as an l-value and drop the result.
218 EmitLValue(E);
219 }
220
221 /// EmitAnyExpr - Emit code to compute the specified expression which
222 /// can have any type. The result is returned as an RValue struct.
223 /// If this is an aggregate expression, AggSlot indicates where the
224 /// result should be returned.
EmitAnyExpr(const Expr * E,AggValueSlot aggSlot,bool ignoreResult)225 RValue CodeGenFunction::EmitAnyExpr(const Expr *E,
226 AggValueSlot aggSlot,
227 bool ignoreResult) {
228 switch (getEvaluationKind(E->getType())) {
229 case TEK_Scalar:
230 return RValue::get(EmitScalarExpr(E, ignoreResult));
231 case TEK_Complex:
232 return RValue::getComplex(EmitComplexExpr(E, ignoreResult, ignoreResult));
233 case TEK_Aggregate:
234 if (!ignoreResult && aggSlot.isIgnored())
235 aggSlot = CreateAggTemp(E->getType(), "agg-temp");
236 EmitAggExpr(E, aggSlot);
237 return aggSlot.asRValue();
238 }
239 llvm_unreachable("bad evaluation kind");
240 }
241
242 /// EmitAnyExprToTemp - Similar to EmitAnyExpr(), however, the result will
243 /// always be accessible even if no aggregate location is provided.
EmitAnyExprToTemp(const Expr * E)244 RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) {
245 AggValueSlot AggSlot = AggValueSlot::ignored();
246
247 if (hasAggregateEvaluationKind(E->getType()))
248 AggSlot = CreateAggTemp(E->getType(), "agg.tmp");
249 return EmitAnyExpr(E, AggSlot);
250 }
251
252 /// EmitAnyExprToMem - Evaluate an expression into a given memory
253 /// location.
EmitAnyExprToMem(const Expr * E,Address Location,Qualifiers Quals,bool IsInit)254 void CodeGenFunction::EmitAnyExprToMem(const Expr *E,
255 Address Location,
256 Qualifiers Quals,
257 bool IsInit) {
258 // FIXME: This function should take an LValue as an argument.
259 switch (getEvaluationKind(E->getType())) {
260 case TEK_Complex:
261 EmitComplexExprIntoLValue(E, MakeAddrLValue(Location, E->getType()),
262 /*isInit*/ false);
263 return;
264
265 case TEK_Aggregate: {
266 EmitAggExpr(E, AggValueSlot::forAddr(Location, Quals,
267 AggValueSlot::IsDestructed_t(IsInit),
268 AggValueSlot::DoesNotNeedGCBarriers,
269 AggValueSlot::IsAliased_t(!IsInit),
270 AggValueSlot::MayOverlap));
271 return;
272 }
273
274 case TEK_Scalar: {
275 RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
276 LValue LV = MakeAddrLValue(Location, E->getType());
277 EmitStoreThroughLValue(RV, LV);
278 return;
279 }
280 }
281 llvm_unreachable("bad evaluation kind");
282 }
283
284 static void
pushTemporaryCleanup(CodeGenFunction & CGF,const MaterializeTemporaryExpr * M,const Expr * E,Address ReferenceTemporary)285 pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M,
286 const Expr *E, Address ReferenceTemporary) {
287 // Objective-C++ ARC:
288 // If we are binding a reference to a temporary that has ownership, we
289 // need to perform retain/release operations on the temporary.
290 //
291 // FIXME: This should be looking at E, not M.
292 if (auto Lifetime = M->getType().getObjCLifetime()) {
293 switch (Lifetime) {
294 case Qualifiers::OCL_None:
295 case Qualifiers::OCL_ExplicitNone:
296 // Carry on to normal cleanup handling.
297 break;
298
299 case Qualifiers::OCL_Autoreleasing:
300 // Nothing to do; cleaned up by an autorelease pool.
301 return;
302
303 case Qualifiers::OCL_Strong:
304 case Qualifiers::OCL_Weak:
305 switch (StorageDuration Duration = M->getStorageDuration()) {
306 case SD_Static:
307 // Note: we intentionally do not register a cleanup to release
308 // the object on program termination.
309 return;
310
311 case SD_Thread:
312 // FIXME: We should probably register a cleanup in this case.
313 return;
314
315 case SD_Automatic:
316 case SD_FullExpression:
317 CodeGenFunction::Destroyer *Destroy;
318 CleanupKind CleanupKind;
319 if (Lifetime == Qualifiers::OCL_Strong) {
320 const ValueDecl *VD = M->getExtendingDecl();
321 bool Precise = isa_and_nonnull<VarDecl>(VD) &&
322 VD->hasAttr<ObjCPreciseLifetimeAttr>();
323 CleanupKind = CGF.getARCCleanupKind();
324 Destroy = Precise ? &CodeGenFunction::destroyARCStrongPrecise
325 : &CodeGenFunction::destroyARCStrongImprecise;
326 } else {
327 // __weak objects always get EH cleanups; otherwise, exceptions
328 // could cause really nasty crashes instead of mere leaks.
329 CleanupKind = NormalAndEHCleanup;
330 Destroy = &CodeGenFunction::destroyARCWeak;
331 }
332 if (Duration == SD_FullExpression)
333 CGF.pushDestroy(CleanupKind, ReferenceTemporary,
334 M->getType(), *Destroy,
335 CleanupKind & EHCleanup);
336 else
337 CGF.pushLifetimeExtendedDestroy(CleanupKind, ReferenceTemporary,
338 M->getType(),
339 *Destroy, CleanupKind & EHCleanup);
340 return;
341
342 case SD_Dynamic:
343 llvm_unreachable("temporary cannot have dynamic storage duration");
344 }
345 llvm_unreachable("unknown storage duration");
346 }
347 }
348
349 CXXDestructorDecl *ReferenceTemporaryDtor = nullptr;
350 if (const RecordType *RT =
351 E->getType()->getBaseElementTypeUnsafe()->getAs<RecordType>()) {
352 // Get the destructor for the reference temporary.
353 auto *ClassDecl = cast<CXXRecordDecl>(RT->getDecl());
354 if (!ClassDecl->hasTrivialDestructor())
355 ReferenceTemporaryDtor = ClassDecl->getDestructor();
356 }
357
358 if (!ReferenceTemporaryDtor)
359 return;
360
361 // Call the destructor for the temporary.
362 switch (M->getStorageDuration()) {
363 case SD_Static:
364 case SD_Thread: {
365 llvm::FunctionCallee CleanupFn;
366 llvm::Constant *CleanupArg;
367 if (E->getType()->isArrayType()) {
368 CleanupFn = CodeGenFunction(CGF.CGM).generateDestroyHelper(
369 ReferenceTemporary, E->getType(),
370 CodeGenFunction::destroyCXXObject, CGF.getLangOpts().Exceptions,
371 dyn_cast_or_null<VarDecl>(M->getExtendingDecl()));
372 CleanupArg = llvm::Constant::getNullValue(CGF.Int8PtrTy);
373 } else {
374 CleanupFn = CGF.CGM.getAddrAndTypeOfCXXStructor(
375 GlobalDecl(ReferenceTemporaryDtor, Dtor_Complete));
376 CleanupArg = cast<llvm::Constant>(ReferenceTemporary.emitRawPointer(CGF));
377 }
378 CGF.CGM.getCXXABI().registerGlobalDtor(
379 CGF, *cast<VarDecl>(M->getExtendingDecl()), CleanupFn, CleanupArg);
380 break;
381 }
382
383 case SD_FullExpression:
384 CGF.pushDestroy(NormalAndEHCleanup, ReferenceTemporary, E->getType(),
385 CodeGenFunction::destroyCXXObject,
386 CGF.getLangOpts().Exceptions);
387 break;
388
389 case SD_Automatic:
390 CGF.pushLifetimeExtendedDestroy(NormalAndEHCleanup,
391 ReferenceTemporary, E->getType(),
392 CodeGenFunction::destroyCXXObject,
393 CGF.getLangOpts().Exceptions);
394 break;
395
396 case SD_Dynamic:
397 llvm_unreachable("temporary cannot have dynamic storage duration");
398 }
399 }
400
createReferenceTemporary(CodeGenFunction & CGF,const MaterializeTemporaryExpr * M,const Expr * Inner,RawAddress * Alloca=nullptr)401 static RawAddress createReferenceTemporary(CodeGenFunction &CGF,
402 const MaterializeTemporaryExpr *M,
403 const Expr *Inner,
404 RawAddress *Alloca = nullptr) {
405 auto &TCG = CGF.getTargetHooks();
406 switch (M->getStorageDuration()) {
407 case SD_FullExpression:
408 case SD_Automatic: {
409 // If we have a constant temporary array or record try to promote it into a
410 // constant global under the same rules a normal constant would've been
411 // promoted. This is easier on the optimizer and generally emits fewer
412 // instructions.
413 QualType Ty = Inner->getType();
414 if (CGF.CGM.getCodeGenOpts().MergeAllConstants &&
415 (Ty->isArrayType() || Ty->isRecordType()) &&
416 Ty.isConstantStorage(CGF.getContext(), true, false))
417 if (auto Init = ConstantEmitter(CGF).tryEmitAbstract(Inner, Ty)) {
418 auto AS = CGF.CGM.GetGlobalConstantAddressSpace();
419 auto *GV = new llvm::GlobalVariable(
420 CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
421 llvm::GlobalValue::PrivateLinkage, Init, ".ref.tmp", nullptr,
422 llvm::GlobalValue::NotThreadLocal,
423 CGF.getContext().getTargetAddressSpace(AS));
424 CharUnits alignment = CGF.getContext().getTypeAlignInChars(Ty);
425 GV->setAlignment(alignment.getAsAlign());
426 llvm::Constant *C = GV;
427 if (AS != LangAS::Default)
428 C = TCG.performAddrSpaceCast(
429 CGF.CGM, GV, AS, LangAS::Default,
430 GV->getValueType()->getPointerTo(
431 CGF.getContext().getTargetAddressSpace(LangAS::Default)));
432 // FIXME: Should we put the new global into a COMDAT?
433 return RawAddress(C, GV->getValueType(), alignment);
434 }
435 return CGF.CreateMemTemp(Ty, "ref.tmp", Alloca);
436 }
437 case SD_Thread:
438 case SD_Static:
439 return CGF.CGM.GetAddrOfGlobalTemporary(M, Inner);
440
441 case SD_Dynamic:
442 llvm_unreachable("temporary can't have dynamic storage duration");
443 }
444 llvm_unreachable("unknown storage duration");
445 }
446
447 /// Helper method to check if the underlying ABI is AAPCS
isAAPCS(const TargetInfo & TargetInfo)448 static bool isAAPCS(const TargetInfo &TargetInfo) {
449 return TargetInfo.getABI().starts_with("aapcs");
450 }
451
452 LValue CodeGenFunction::
EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr * M)453 EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
454 const Expr *E = M->getSubExpr();
455
456 assert((!M->getExtendingDecl() || !isa<VarDecl>(M->getExtendingDecl()) ||
457 !cast<VarDecl>(M->getExtendingDecl())->isARCPseudoStrong()) &&
458 "Reference should never be pseudo-strong!");
459
460 // FIXME: ideally this would use EmitAnyExprToMem, however, we cannot do so
461 // as that will cause the lifetime adjustment to be lost for ARC
462 auto ownership = M->getType().getObjCLifetime();
463 if (ownership != Qualifiers::OCL_None &&
464 ownership != Qualifiers::OCL_ExplicitNone) {
465 RawAddress Object = createReferenceTemporary(*this, M, E);
466 if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object.getPointer())) {
467 llvm::Type *Ty = ConvertTypeForMem(E->getType());
468 Object = Object.withElementType(Ty);
469
470 // createReferenceTemporary will promote the temporary to a global with a
471 // constant initializer if it can. It can only do this to a value of
472 // ARC-manageable type if the value is global and therefore "immune" to
473 // ref-counting operations. Therefore we have no need to emit either a
474 // dynamic initialization or a cleanup and we can just return the address
475 // of the temporary.
476 if (Var->hasInitializer())
477 return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl);
478
479 Var->setInitializer(CGM.EmitNullConstant(E->getType()));
480 }
481 LValue RefTempDst = MakeAddrLValue(Object, M->getType(),
482 AlignmentSource::Decl);
483
484 switch (getEvaluationKind(E->getType())) {
485 default: llvm_unreachable("expected scalar or aggregate expression");
486 case TEK_Scalar:
487 EmitScalarInit(E, M->getExtendingDecl(), RefTempDst, false);
488 break;
489 case TEK_Aggregate: {
490 EmitAggExpr(E, AggValueSlot::forAddr(Object,
491 E->getType().getQualifiers(),
492 AggValueSlot::IsDestructed,
493 AggValueSlot::DoesNotNeedGCBarriers,
494 AggValueSlot::IsNotAliased,
495 AggValueSlot::DoesNotOverlap));
496 break;
497 }
498 }
499
500 pushTemporaryCleanup(*this, M, E, Object);
501 return RefTempDst;
502 }
503
504 SmallVector<const Expr *, 2> CommaLHSs;
505 SmallVector<SubobjectAdjustment, 2> Adjustments;
506 E = E->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments);
507
508 for (const auto &Ignored : CommaLHSs)
509 EmitIgnoredExpr(Ignored);
510
511 if (const auto *opaque = dyn_cast<OpaqueValueExpr>(E)) {
512 if (opaque->getType()->isRecordType()) {
513 assert(Adjustments.empty());
514 return EmitOpaqueValueLValue(opaque);
515 }
516 }
517
518 // Create and initialize the reference temporary.
519 RawAddress Alloca = Address::invalid();
520 RawAddress Object = createReferenceTemporary(*this, M, E, &Alloca);
521 if (auto *Var = dyn_cast<llvm::GlobalVariable>(
522 Object.getPointer()->stripPointerCasts())) {
523 llvm::Type *TemporaryType = ConvertTypeForMem(E->getType());
524 Object = Object.withElementType(TemporaryType);
525 // If the temporary is a global and has a constant initializer or is a
526 // constant temporary that we promoted to a global, we may have already
527 // initialized it.
528 if (!Var->hasInitializer()) {
529 Var->setInitializer(CGM.EmitNullConstant(E->getType()));
530 EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true);
531 }
532 } else {
533 switch (M->getStorageDuration()) {
534 case SD_Automatic:
535 if (auto *Size = EmitLifetimeStart(
536 CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()),
537 Alloca.getPointer())) {
538 pushCleanupAfterFullExpr<CallLifetimeEnd>(NormalEHLifetimeMarker,
539 Alloca, Size);
540 }
541 break;
542
543 case SD_FullExpression: {
544 if (!ShouldEmitLifetimeMarkers)
545 break;
546
547 // Avoid creating a conditional cleanup just to hold an llvm.lifetime.end
548 // marker. Instead, start the lifetime of a conditional temporary earlier
549 // so that it's unconditional. Don't do this with sanitizers which need
550 // more precise lifetime marks. However when inside an "await.suspend"
551 // block, we should always avoid conditional cleanup because it creates
552 // boolean marker that lives across await_suspend, which can destroy coro
553 // frame.
554 ConditionalEvaluation *OldConditional = nullptr;
555 CGBuilderTy::InsertPoint OldIP;
556 if (isInConditionalBranch() && !E->getType().isDestructedType() &&
557 ((!SanOpts.has(SanitizerKind::HWAddress) &&
558 !SanOpts.has(SanitizerKind::Memory) &&
559 !CGM.getCodeGenOpts().SanitizeAddressUseAfterScope) ||
560 inSuspendBlock())) {
561 OldConditional = OutermostConditional;
562 OutermostConditional = nullptr;
563
564 OldIP = Builder.saveIP();
565 llvm::BasicBlock *Block = OldConditional->getStartingBlock();
566 Builder.restoreIP(CGBuilderTy::InsertPoint(
567 Block, llvm::BasicBlock::iterator(Block->back())));
568 }
569
570 if (auto *Size = EmitLifetimeStart(
571 CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()),
572 Alloca.getPointer())) {
573 pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, Alloca,
574 Size);
575 }
576
577 if (OldConditional) {
578 OutermostConditional = OldConditional;
579 Builder.restoreIP(OldIP);
580 }
581 break;
582 }
583
584 default:
585 break;
586 }
587 EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true);
588 }
589 pushTemporaryCleanup(*this, M, E, Object);
590
591 // Perform derived-to-base casts and/or field accesses, to get from the
592 // temporary object we created (and, potentially, for which we extended
593 // the lifetime) to the subobject we're binding the reference to.
594 for (SubobjectAdjustment &Adjustment : llvm::reverse(Adjustments)) {
595 switch (Adjustment.Kind) {
596 case SubobjectAdjustment::DerivedToBaseAdjustment:
597 Object =
598 GetAddressOfBaseClass(Object, Adjustment.DerivedToBase.DerivedClass,
599 Adjustment.DerivedToBase.BasePath->path_begin(),
600 Adjustment.DerivedToBase.BasePath->path_end(),
601 /*NullCheckValue=*/ false, E->getExprLoc());
602 break;
603
604 case SubobjectAdjustment::FieldAdjustment: {
605 LValue LV = MakeAddrLValue(Object, E->getType(), AlignmentSource::Decl);
606 LV = EmitLValueForField(LV, Adjustment.Field);
607 assert(LV.isSimple() &&
608 "materialized temporary field is not a simple lvalue");
609 Object = LV.getAddress();
610 break;
611 }
612
613 case SubobjectAdjustment::MemberPointerAdjustment: {
614 llvm::Value *Ptr = EmitScalarExpr(Adjustment.Ptr.RHS);
615 Object = EmitCXXMemberDataPointerAddress(E, Object, Ptr,
616 Adjustment.Ptr.MPT);
617 break;
618 }
619 }
620 }
621
622 return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl);
623 }
624
625 RValue
EmitReferenceBindingToExpr(const Expr * E)626 CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E) {
627 // Emit the expression as an lvalue.
628 LValue LV = EmitLValue(E);
629 assert(LV.isSimple());
630 llvm::Value *Value = LV.getPointer(*this);
631
632 if (sanitizePerformTypeCheck() && !E->getType()->isFunctionType()) {
633 // C++11 [dcl.ref]p5 (as amended by core issue 453):
634 // If a glvalue to which a reference is directly bound designates neither
635 // an existing object or function of an appropriate type nor a region of
636 // storage of suitable size and alignment to contain an object of the
637 // reference's type, the behavior is undefined.
638 QualType Ty = E->getType();
639 EmitTypeCheck(TCK_ReferenceBinding, E->getExprLoc(), Value, Ty);
640 }
641
642 return RValue::get(Value);
643 }
644
645
646 /// getAccessedFieldNo - Given an encoded value and a result number, return the
647 /// input field number being accessed.
getAccessedFieldNo(unsigned Idx,const llvm::Constant * Elts)648 unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx,
649 const llvm::Constant *Elts) {
650 return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx))
651 ->getZExtValue();
652 }
653
emitHashMix(CGBuilderTy & Builder,llvm::Value * Acc,llvm::Value * Ptr)654 static llvm::Value *emitHashMix(CGBuilderTy &Builder, llvm::Value *Acc,
655 llvm::Value *Ptr) {
656 llvm::Value *A0 =
657 Builder.CreateMul(Ptr, Builder.getInt64(0xbf58476d1ce4e5b9u));
658 llvm::Value *A1 =
659 Builder.CreateXor(A0, Builder.CreateLShr(A0, Builder.getInt64(31)));
660 return Builder.CreateXor(Acc, A1);
661 }
662
isNullPointerAllowed(TypeCheckKind TCK)663 bool CodeGenFunction::isNullPointerAllowed(TypeCheckKind TCK) {
664 return TCK == TCK_DowncastPointer || TCK == TCK_Upcast ||
665 TCK == TCK_UpcastToVirtualBase || TCK == TCK_DynamicOperation;
666 }
667
isVptrCheckRequired(TypeCheckKind TCK,QualType Ty)668 bool CodeGenFunction::isVptrCheckRequired(TypeCheckKind TCK, QualType Ty) {
669 CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
670 return (RD && RD->hasDefinition() && RD->isDynamicClass()) &&
671 (TCK == TCK_MemberAccess || TCK == TCK_MemberCall ||
672 TCK == TCK_DowncastPointer || TCK == TCK_DowncastReference ||
673 TCK == TCK_UpcastToVirtualBase || TCK == TCK_DynamicOperation);
674 }
675
sanitizePerformTypeCheck() const676 bool CodeGenFunction::sanitizePerformTypeCheck() const {
677 return SanOpts.has(SanitizerKind::Null) ||
678 SanOpts.has(SanitizerKind::Alignment) ||
679 SanOpts.has(SanitizerKind::ObjectSize) ||
680 SanOpts.has(SanitizerKind::Vptr);
681 }
682
EmitTypeCheck(TypeCheckKind TCK,SourceLocation Loc,llvm::Value * Ptr,QualType Ty,CharUnits Alignment,SanitizerSet SkippedChecks,llvm::Value * ArraySize)683 void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
684 llvm::Value *Ptr, QualType Ty,
685 CharUnits Alignment,
686 SanitizerSet SkippedChecks,
687 llvm::Value *ArraySize) {
688 if (!sanitizePerformTypeCheck())
689 return;
690
691 // Don't check pointers outside the default address space. The null check
692 // isn't correct, the object-size check isn't supported by LLVM, and we can't
693 // communicate the addresses to the runtime handler for the vptr check.
694 if (Ptr->getType()->getPointerAddressSpace())
695 return;
696
697 // Don't check pointers to volatile data. The behavior here is implementation-
698 // defined.
699 if (Ty.isVolatileQualified())
700 return;
701
702 SanitizerScope SanScope(this);
703
704 SmallVector<std::pair<llvm::Value *, SanitizerMask>, 3> Checks;
705 llvm::BasicBlock *Done = nullptr;
706
707 // Quickly determine whether we have a pointer to an alloca. It's possible
708 // to skip null checks, and some alignment checks, for these pointers. This
709 // can reduce compile-time significantly.
710 auto PtrToAlloca = dyn_cast<llvm::AllocaInst>(Ptr->stripPointerCasts());
711
712 llvm::Value *True = llvm::ConstantInt::getTrue(getLLVMContext());
713 llvm::Value *IsNonNull = nullptr;
714 bool IsGuaranteedNonNull =
715 SkippedChecks.has(SanitizerKind::Null) || PtrToAlloca;
716 bool AllowNullPointers = isNullPointerAllowed(TCK);
717 if ((SanOpts.has(SanitizerKind::Null) || AllowNullPointers) &&
718 !IsGuaranteedNonNull) {
719 // The glvalue must not be an empty glvalue.
720 IsNonNull = Builder.CreateIsNotNull(Ptr);
721
722 // The IR builder can constant-fold the null check if the pointer points to
723 // a constant.
724 IsGuaranteedNonNull = IsNonNull == True;
725
726 // Skip the null check if the pointer is known to be non-null.
727 if (!IsGuaranteedNonNull) {
728 if (AllowNullPointers) {
729 // When performing pointer casts, it's OK if the value is null.
730 // Skip the remaining checks in that case.
731 Done = createBasicBlock("null");
732 llvm::BasicBlock *Rest = createBasicBlock("not.null");
733 Builder.CreateCondBr(IsNonNull, Rest, Done);
734 EmitBlock(Rest);
735 } else {
736 Checks.push_back(std::make_pair(IsNonNull, SanitizerKind::Null));
737 }
738 }
739 }
740
741 if (SanOpts.has(SanitizerKind::ObjectSize) &&
742 !SkippedChecks.has(SanitizerKind::ObjectSize) &&
743 !Ty->isIncompleteType()) {
744 uint64_t TySize = CGM.getMinimumObjectSize(Ty).getQuantity();
745 llvm::Value *Size = llvm::ConstantInt::get(IntPtrTy, TySize);
746 if (ArraySize)
747 Size = Builder.CreateMul(Size, ArraySize);
748
749 // Degenerate case: new X[0] does not need an objectsize check.
750 llvm::Constant *ConstantSize = dyn_cast<llvm::Constant>(Size);
751 if (!ConstantSize || !ConstantSize->isNullValue()) {
752 // The glvalue must refer to a large enough storage region.
753 // FIXME: If Address Sanitizer is enabled, insert dynamic instrumentation
754 // to check this.
755 // FIXME: Get object address space
756 llvm::Type *Tys[2] = { IntPtrTy, Int8PtrTy };
757 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, Tys);
758 llvm::Value *Min = Builder.getFalse();
759 llvm::Value *NullIsUnknown = Builder.getFalse();
760 llvm::Value *Dynamic = Builder.getFalse();
761 llvm::Value *LargeEnough = Builder.CreateICmpUGE(
762 Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic}), Size);
763 Checks.push_back(std::make_pair(LargeEnough, SanitizerKind::ObjectSize));
764 }
765 }
766
767 llvm::MaybeAlign AlignVal;
768 llvm::Value *PtrAsInt = nullptr;
769
770 if (SanOpts.has(SanitizerKind::Alignment) &&
771 !SkippedChecks.has(SanitizerKind::Alignment)) {
772 AlignVal = Alignment.getAsMaybeAlign();
773 if (!Ty->isIncompleteType() && !AlignVal)
774 AlignVal = CGM.getNaturalTypeAlignment(Ty, nullptr, nullptr,
775 /*ForPointeeType=*/true)
776 .getAsMaybeAlign();
777
778 // The glvalue must be suitably aligned.
779 if (AlignVal && *AlignVal > llvm::Align(1) &&
780 (!PtrToAlloca || PtrToAlloca->getAlign() < *AlignVal)) {
781 PtrAsInt = Builder.CreatePtrToInt(Ptr, IntPtrTy);
782 llvm::Value *Align = Builder.CreateAnd(
783 PtrAsInt, llvm::ConstantInt::get(IntPtrTy, AlignVal->value() - 1));
784 llvm::Value *Aligned =
785 Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0));
786 if (Aligned != True)
787 Checks.push_back(std::make_pair(Aligned, SanitizerKind::Alignment));
788 }
789 }
790
791 if (Checks.size() > 0) {
792 llvm::Constant *StaticData[] = {
793 EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(Ty),
794 llvm::ConstantInt::get(Int8Ty, AlignVal ? llvm::Log2(*AlignVal) : 1),
795 llvm::ConstantInt::get(Int8Ty, TCK)};
796 EmitCheck(Checks, SanitizerHandler::TypeMismatch, StaticData,
797 PtrAsInt ? PtrAsInt : Ptr);
798 }
799
800 // If possible, check that the vptr indicates that there is a subobject of
801 // type Ty at offset zero within this object.
802 //
803 // C++11 [basic.life]p5,6:
804 // [For storage which does not refer to an object within its lifetime]
805 // The program has undefined behavior if:
806 // -- the [pointer or glvalue] is used to access a non-static data member
807 // or call a non-static member function
808 if (SanOpts.has(SanitizerKind::Vptr) &&
809 !SkippedChecks.has(SanitizerKind::Vptr) && isVptrCheckRequired(TCK, Ty)) {
810 // Ensure that the pointer is non-null before loading it. If there is no
811 // compile-time guarantee, reuse the run-time null check or emit a new one.
812 if (!IsGuaranteedNonNull) {
813 if (!IsNonNull)
814 IsNonNull = Builder.CreateIsNotNull(Ptr);
815 if (!Done)
816 Done = createBasicBlock("vptr.null");
817 llvm::BasicBlock *VptrNotNull = createBasicBlock("vptr.not.null");
818 Builder.CreateCondBr(IsNonNull, VptrNotNull, Done);
819 EmitBlock(VptrNotNull);
820 }
821
822 // Compute a deterministic hash of the mangled name of the type.
823 SmallString<64> MangledName;
824 llvm::raw_svector_ostream Out(MangledName);
825 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty.getUnqualifiedType(),
826 Out);
827
828 // Contained in NoSanitizeList based on the mangled type.
829 if (!CGM.getContext().getNoSanitizeList().containsType(SanitizerKind::Vptr,
830 Out.str())) {
831 // Load the vptr, and mix it with TypeHash.
832 llvm::Value *TypeHash =
833 llvm::ConstantInt::get(Int64Ty, xxh3_64bits(Out.str()));
834
835 llvm::Type *VPtrTy = llvm::PointerType::get(IntPtrTy, 0);
836 Address VPtrAddr(Ptr, IntPtrTy, getPointerAlign());
837 llvm::Value *VPtrVal = GetVTablePtr(VPtrAddr, VPtrTy,
838 Ty->getAsCXXRecordDecl(),
839 VTableAuthMode::UnsafeUbsanStrip);
840 VPtrVal = Builder.CreateBitOrPointerCast(VPtrVal, IntPtrTy);
841
842 llvm::Value *Hash =
843 emitHashMix(Builder, TypeHash, Builder.CreateZExt(VPtrVal, Int64Ty));
844 Hash = Builder.CreateTrunc(Hash, IntPtrTy);
845
846 // Look the hash up in our cache.
847 const int CacheSize = 128;
848 llvm::Type *HashTable = llvm::ArrayType::get(IntPtrTy, CacheSize);
849 llvm::Value *Cache = CGM.CreateRuntimeVariable(HashTable,
850 "__ubsan_vptr_type_cache");
851 llvm::Value *Slot = Builder.CreateAnd(Hash,
852 llvm::ConstantInt::get(IntPtrTy,
853 CacheSize-1));
854 llvm::Value *Indices[] = { Builder.getInt32(0), Slot };
855 llvm::Value *CacheVal = Builder.CreateAlignedLoad(
856 IntPtrTy, Builder.CreateInBoundsGEP(HashTable, Cache, Indices),
857 getPointerAlign());
858
859 // If the hash isn't in the cache, call a runtime handler to perform the
860 // hard work of checking whether the vptr is for an object of the right
861 // type. This will either fill in the cache and return, or produce a
862 // diagnostic.
863 llvm::Value *EqualHash = Builder.CreateICmpEQ(CacheVal, Hash);
864 llvm::Constant *StaticData[] = {
865 EmitCheckSourceLocation(Loc),
866 EmitCheckTypeDescriptor(Ty),
867 CGM.GetAddrOfRTTIDescriptor(Ty.getUnqualifiedType()),
868 llvm::ConstantInt::get(Int8Ty, TCK)
869 };
870 llvm::Value *DynamicData[] = { Ptr, Hash };
871 EmitCheck(std::make_pair(EqualHash, SanitizerKind::Vptr),
872 SanitizerHandler::DynamicTypeCacheMiss, StaticData,
873 DynamicData);
874 }
875 }
876
877 if (Done) {
878 Builder.CreateBr(Done);
879 EmitBlock(Done);
880 }
881 }
882
LoadPassedObjectSize(const Expr * E,QualType EltTy)883 llvm::Value *CodeGenFunction::LoadPassedObjectSize(const Expr *E,
884 QualType EltTy) {
885 ASTContext &C = getContext();
886 uint64_t EltSize = C.getTypeSizeInChars(EltTy).getQuantity();
887 if (!EltSize)
888 return nullptr;
889
890 auto *ArrayDeclRef = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts());
891 if (!ArrayDeclRef)
892 return nullptr;
893
894 auto *ParamDecl = dyn_cast<ParmVarDecl>(ArrayDeclRef->getDecl());
895 if (!ParamDecl)
896 return nullptr;
897
898 auto *POSAttr = ParamDecl->getAttr<PassObjectSizeAttr>();
899 if (!POSAttr)
900 return nullptr;
901
902 // Don't load the size if it's a lower bound.
903 int POSType = POSAttr->getType();
904 if (POSType != 0 && POSType != 1)
905 return nullptr;
906
907 // Find the implicit size parameter.
908 auto PassedSizeIt = SizeArguments.find(ParamDecl);
909 if (PassedSizeIt == SizeArguments.end())
910 return nullptr;
911
912 const ImplicitParamDecl *PassedSizeDecl = PassedSizeIt->second;
913 assert(LocalDeclMap.count(PassedSizeDecl) && "Passed size not loadable");
914 Address AddrOfSize = LocalDeclMap.find(PassedSizeDecl)->second;
915 llvm::Value *SizeInBytes = EmitLoadOfScalar(AddrOfSize, /*Volatile=*/false,
916 C.getSizeType(), E->getExprLoc());
917 llvm::Value *SizeOfElement =
918 llvm::ConstantInt::get(SizeInBytes->getType(), EltSize);
919 return Builder.CreateUDiv(SizeInBytes, SizeOfElement);
920 }
921
922 /// If Base is known to point to the start of an array, return the length of
923 /// that array. Return 0 if the length cannot be determined.
getArrayIndexingBound(CodeGenFunction & CGF,const Expr * Base,QualType & IndexedType,LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel)924 static llvm::Value *getArrayIndexingBound(CodeGenFunction &CGF,
925 const Expr *Base,
926 QualType &IndexedType,
927 LangOptions::StrictFlexArraysLevelKind
928 StrictFlexArraysLevel) {
929 // For the vector indexing extension, the bound is the number of elements.
930 if (const VectorType *VT = Base->getType()->getAs<VectorType>()) {
931 IndexedType = Base->getType();
932 return CGF.Builder.getInt32(VT->getNumElements());
933 }
934
935 Base = Base->IgnoreParens();
936
937 if (const auto *CE = dyn_cast<CastExpr>(Base)) {
938 if (CE->getCastKind() == CK_ArrayToPointerDecay &&
939 !CE->getSubExpr()->isFlexibleArrayMemberLike(CGF.getContext(),
940 StrictFlexArraysLevel)) {
941 CodeGenFunction::SanitizerScope SanScope(&CGF);
942
943 IndexedType = CE->getSubExpr()->getType();
944 const ArrayType *AT = IndexedType->castAsArrayTypeUnsafe();
945 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT))
946 return CGF.Builder.getInt(CAT->getSize());
947
948 if (const auto *VAT = dyn_cast<VariableArrayType>(AT))
949 return CGF.getVLASize(VAT).NumElts;
950 // Ignore pass_object_size here. It's not applicable on decayed pointers.
951 }
952 }
953
954 CodeGenFunction::SanitizerScope SanScope(&CGF);
955
956 QualType EltTy{Base->getType()->getPointeeOrArrayElementType(), 0};
957 if (llvm::Value *POS = CGF.LoadPassedObjectSize(Base, EltTy)) {
958 IndexedType = Base->getType();
959 return POS;
960 }
961
962 return nullptr;
963 }
964
965 namespace {
966
967 /// \p StructAccessBase returns the base \p Expr of a field access. It returns
968 /// either a \p DeclRefExpr, representing the base pointer to the struct, i.e.:
969 ///
970 /// p in p-> a.b.c
971 ///
972 /// or a \p MemberExpr, if the \p MemberExpr has the \p RecordDecl we're
973 /// looking for:
974 ///
975 /// struct s {
976 /// struct s *ptr;
977 /// int count;
978 /// char array[] __attribute__((counted_by(count)));
979 /// };
980 ///
981 /// If we have an expression like \p p->ptr->array[index], we want the
982 /// \p MemberExpr for \p p->ptr instead of \p p.
983 class StructAccessBase
984 : public ConstStmtVisitor<StructAccessBase, const Expr *> {
985 const RecordDecl *ExpectedRD;
986
IsExpectedRecordDecl(const Expr * E) const987 bool IsExpectedRecordDecl(const Expr *E) const {
988 QualType Ty = E->getType();
989 if (Ty->isPointerType())
990 Ty = Ty->getPointeeType();
991 return ExpectedRD == Ty->getAsRecordDecl();
992 }
993
994 public:
StructAccessBase(const RecordDecl * ExpectedRD)995 StructAccessBase(const RecordDecl *ExpectedRD) : ExpectedRD(ExpectedRD) {}
996
997 //===--------------------------------------------------------------------===//
998 // Visitor Methods
999 //===--------------------------------------------------------------------===//
1000
1001 // NOTE: If we build C++ support for counted_by, then we'll have to handle
1002 // horrors like this:
1003 //
1004 // struct S {
1005 // int x, y;
1006 // int blah[] __attribute__((counted_by(x)));
1007 // } s;
1008 //
1009 // int foo(int index, int val) {
1010 // int (S::*IHatePMDs)[] = &S::blah;
1011 // (s.*IHatePMDs)[index] = val;
1012 // }
1013
Visit(const Expr * E)1014 const Expr *Visit(const Expr *E) {
1015 return ConstStmtVisitor<StructAccessBase, const Expr *>::Visit(E);
1016 }
1017
VisitStmt(const Stmt * S)1018 const Expr *VisitStmt(const Stmt *S) { return nullptr; }
1019
1020 // These are the types we expect to return (in order of most to least
1021 // likely):
1022 //
1023 // 1. DeclRefExpr - This is the expression for the base of the structure.
1024 // It's exactly what we want to build an access to the \p counted_by
1025 // field.
1026 // 2. MemberExpr - This is the expression that has the same \p RecordDecl
1027 // as the flexble array member's lexical enclosing \p RecordDecl. This
1028 // allows us to catch things like: "p->p->array"
1029 // 3. CompoundLiteralExpr - This is for people who create something
1030 // heretical like (struct foo has a flexible array member):
1031 //
1032 // (struct foo){ 1, 2 }.blah[idx];
VisitDeclRefExpr(const DeclRefExpr * E)1033 const Expr *VisitDeclRefExpr(const DeclRefExpr *E) {
1034 return IsExpectedRecordDecl(E) ? E : nullptr;
1035 }
VisitMemberExpr(const MemberExpr * E)1036 const Expr *VisitMemberExpr(const MemberExpr *E) {
1037 if (IsExpectedRecordDecl(E) && E->isArrow())
1038 return E;
1039 const Expr *Res = Visit(E->getBase());
1040 return !Res && IsExpectedRecordDecl(E) ? E : Res;
1041 }
VisitCompoundLiteralExpr(const CompoundLiteralExpr * E)1042 const Expr *VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
1043 return IsExpectedRecordDecl(E) ? E : nullptr;
1044 }
VisitCallExpr(const CallExpr * E)1045 const Expr *VisitCallExpr(const CallExpr *E) {
1046 return IsExpectedRecordDecl(E) ? E : nullptr;
1047 }
1048
VisitArraySubscriptExpr(const ArraySubscriptExpr * E)1049 const Expr *VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
1050 if (IsExpectedRecordDecl(E))
1051 return E;
1052 return Visit(E->getBase());
1053 }
VisitCastExpr(const CastExpr * E)1054 const Expr *VisitCastExpr(const CastExpr *E) {
1055 if (E->getCastKind() == CK_LValueToRValue)
1056 return IsExpectedRecordDecl(E) ? E : nullptr;
1057 return Visit(E->getSubExpr());
1058 }
VisitParenExpr(const ParenExpr * E)1059 const Expr *VisitParenExpr(const ParenExpr *E) {
1060 return Visit(E->getSubExpr());
1061 }
VisitUnaryAddrOf(const UnaryOperator * E)1062 const Expr *VisitUnaryAddrOf(const UnaryOperator *E) {
1063 return Visit(E->getSubExpr());
1064 }
VisitUnaryDeref(const UnaryOperator * E)1065 const Expr *VisitUnaryDeref(const UnaryOperator *E) {
1066 return Visit(E->getSubExpr());
1067 }
1068 };
1069
1070 } // end anonymous namespace
1071
1072 using RecIndicesTy =
1073 SmallVector<std::pair<const RecordDecl *, llvm::Value *>, 8>;
1074
getGEPIndicesToField(CodeGenFunction & CGF,const RecordDecl * RD,const FieldDecl * Field,RecIndicesTy & Indices)1075 static bool getGEPIndicesToField(CodeGenFunction &CGF, const RecordDecl *RD,
1076 const FieldDecl *Field,
1077 RecIndicesTy &Indices) {
1078 const CGRecordLayout &Layout = CGF.CGM.getTypes().getCGRecordLayout(RD);
1079 int64_t FieldNo = -1;
1080 for (const FieldDecl *FD : RD->fields()) {
1081 if (!Layout.containsFieldDecl(FD))
1082 // This could happen if the field has a struct type that's empty. I don't
1083 // know why either.
1084 continue;
1085
1086 FieldNo = Layout.getLLVMFieldNo(FD);
1087 if (FD == Field) {
1088 Indices.emplace_back(std::make_pair(RD, CGF.Builder.getInt32(FieldNo)));
1089 return true;
1090 }
1091
1092 QualType Ty = FD->getType();
1093 if (Ty->isRecordType()) {
1094 if (getGEPIndicesToField(CGF, Ty->getAsRecordDecl(), Field, Indices)) {
1095 if (RD->isUnion())
1096 FieldNo = 0;
1097 Indices.emplace_back(std::make_pair(RD, CGF.Builder.getInt32(FieldNo)));
1098 return true;
1099 }
1100 }
1101 }
1102
1103 return false;
1104 }
1105
1106 /// This method is typically called in contexts where we can't generate
1107 /// side-effects, like in __builtin_dynamic_object_size. When finding
1108 /// expressions, only choose those that have either already been emitted or can
1109 /// be loaded without side-effects.
1110 ///
1111 /// - \p FAMDecl: the \p Decl for the flexible array member. It may not be
1112 /// within the top-level struct.
1113 /// - \p CountDecl: must be within the same non-anonymous struct as \p FAMDecl.
EmitCountedByFieldExpr(const Expr * Base,const FieldDecl * FAMDecl,const FieldDecl * CountDecl)1114 llvm::Value *CodeGenFunction::EmitCountedByFieldExpr(
1115 const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl) {
1116 const RecordDecl *RD = CountDecl->getParent()->getOuterLexicalRecordContext();
1117
1118 // Find the base struct expr (i.e. p in p->a.b.c.d).
1119 const Expr *StructBase = StructAccessBase(RD).Visit(Base);
1120 if (!StructBase || StructBase->HasSideEffects(getContext()))
1121 return nullptr;
1122
1123 llvm::Value *Res = nullptr;
1124 if (StructBase->getType()->isPointerType()) {
1125 LValueBaseInfo BaseInfo;
1126 TBAAAccessInfo TBAAInfo;
1127 Address Addr = EmitPointerWithAlignment(StructBase, &BaseInfo, &TBAAInfo);
1128 Res = Addr.emitRawPointer(*this);
1129 } else if (StructBase->isLValue()) {
1130 LValue LV = EmitLValue(StructBase);
1131 Address Addr = LV.getAddress();
1132 Res = Addr.emitRawPointer(*this);
1133 } else {
1134 return nullptr;
1135 }
1136
1137 llvm::Value *Zero = Builder.getInt32(0);
1138 RecIndicesTy Indices;
1139
1140 getGEPIndicesToField(*this, RD, CountDecl, Indices);
1141
1142 for (auto I = Indices.rbegin(), E = Indices.rend(); I != E; ++I)
1143 Res = Builder.CreateInBoundsGEP(
1144 ConvertType(QualType(I->first->getTypeForDecl(), 0)), Res,
1145 {Zero, I->second}, "..counted_by.gep");
1146
1147 return Builder.CreateAlignedLoad(ConvertType(CountDecl->getType()), Res,
1148 getIntAlign(), "..counted_by.load");
1149 }
1150
FindCountedByField(const FieldDecl * FD)1151 const FieldDecl *CodeGenFunction::FindCountedByField(const FieldDecl *FD) {
1152 if (!FD)
1153 return nullptr;
1154
1155 const auto *CAT = FD->getType()->getAs<CountAttributedType>();
1156 if (!CAT)
1157 return nullptr;
1158
1159 const auto *CountDRE = cast<DeclRefExpr>(CAT->getCountExpr());
1160 const auto *CountDecl = CountDRE->getDecl();
1161 if (const auto *IFD = dyn_cast<IndirectFieldDecl>(CountDecl))
1162 CountDecl = IFD->getAnonField();
1163
1164 return dyn_cast<FieldDecl>(CountDecl);
1165 }
1166
EmitBoundsCheck(const Expr * E,const Expr * Base,llvm::Value * Index,QualType IndexType,bool Accessed)1167 void CodeGenFunction::EmitBoundsCheck(const Expr *E, const Expr *Base,
1168 llvm::Value *Index, QualType IndexType,
1169 bool Accessed) {
1170 assert(SanOpts.has(SanitizerKind::ArrayBounds) &&
1171 "should not be called unless adding bounds checks");
1172 const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
1173 getLangOpts().getStrictFlexArraysLevel();
1174 QualType IndexedType;
1175 llvm::Value *Bound =
1176 getArrayIndexingBound(*this, Base, IndexedType, StrictFlexArraysLevel);
1177
1178 EmitBoundsCheckImpl(E, Bound, Index, IndexType, IndexedType, Accessed);
1179 }
1180
EmitBoundsCheckImpl(const Expr * E,llvm::Value * Bound,llvm::Value * Index,QualType IndexType,QualType IndexedType,bool Accessed)1181 void CodeGenFunction::EmitBoundsCheckImpl(const Expr *E, llvm::Value *Bound,
1182 llvm::Value *Index,
1183 QualType IndexType,
1184 QualType IndexedType, bool Accessed) {
1185 if (!Bound)
1186 return;
1187
1188 SanitizerScope SanScope(this);
1189
1190 bool IndexSigned = IndexType->isSignedIntegerOrEnumerationType();
1191 llvm::Value *IndexVal = Builder.CreateIntCast(Index, SizeTy, IndexSigned);
1192 llvm::Value *BoundVal = Builder.CreateIntCast(Bound, SizeTy, false);
1193
1194 llvm::Constant *StaticData[] = {
1195 EmitCheckSourceLocation(E->getExprLoc()),
1196 EmitCheckTypeDescriptor(IndexedType),
1197 EmitCheckTypeDescriptor(IndexType)
1198 };
1199 llvm::Value *Check = Accessed ? Builder.CreateICmpULT(IndexVal, BoundVal)
1200 : Builder.CreateICmpULE(IndexVal, BoundVal);
1201 EmitCheck(std::make_pair(Check, SanitizerKind::ArrayBounds),
1202 SanitizerHandler::OutOfBounds, StaticData, Index);
1203 }
1204
1205 CodeGenFunction::ComplexPairTy CodeGenFunction::
EmitComplexPrePostIncDec(const UnaryOperator * E,LValue LV,bool isInc,bool isPre)1206 EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
1207 bool isInc, bool isPre) {
1208 ComplexPairTy InVal = EmitLoadOfComplex(LV, E->getExprLoc());
1209
1210 llvm::Value *NextVal;
1211 if (isa<llvm::IntegerType>(InVal.first->getType())) {
1212 uint64_t AmountVal = isInc ? 1 : -1;
1213 NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);
1214
1215 // Add the inc/dec to the real part.
1216 NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
1217 } else {
1218 QualType ElemTy = E->getType()->castAs<ComplexType>()->getElementType();
1219 llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1);
1220 if (!isInc)
1221 FVal.changeSign();
1222 NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal);
1223
1224 // Add the inc/dec to the real part.
1225 NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
1226 }
1227
1228 ComplexPairTy IncVal(NextVal, InVal.second);
1229
1230 // Store the updated result through the lvalue.
1231 EmitStoreOfComplex(IncVal, LV, /*init*/ false);
1232 if (getLangOpts().OpenMP)
1233 CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(*this,
1234 E->getSubExpr());
1235
1236 // If this is a postinc, return the value read from memory, otherwise use the
1237 // updated value.
1238 return isPre ? IncVal : InVal;
1239 }
1240
EmitExplicitCastExprType(const ExplicitCastExpr * E,CodeGenFunction * CGF)1241 void CodeGenModule::EmitExplicitCastExprType(const ExplicitCastExpr *E,
1242 CodeGenFunction *CGF) {
1243 // Bind VLAs in the cast type.
1244 if (CGF && E->getType()->isVariablyModifiedType())
1245 CGF->EmitVariablyModifiedType(E->getType());
1246
1247 if (CGDebugInfo *DI = getModuleDebugInfo())
1248 DI->EmitExplicitCastType(E->getType());
1249 }
1250
1251 //===----------------------------------------------------------------------===//
1252 // LValue Expression Emission
1253 //===----------------------------------------------------------------------===//
1254
EmitPointerWithAlignment(const Expr * E,LValueBaseInfo * BaseInfo,TBAAAccessInfo * TBAAInfo,KnownNonNull_t IsKnownNonNull,CodeGenFunction & CGF)1255 static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo,
1256 TBAAAccessInfo *TBAAInfo,
1257 KnownNonNull_t IsKnownNonNull,
1258 CodeGenFunction &CGF) {
1259 // We allow this with ObjC object pointers because of fragile ABIs.
1260 assert(E->getType()->isPointerType() ||
1261 E->getType()->isObjCObjectPointerType());
1262 E = E->IgnoreParens();
1263
1264 // Casts:
1265 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
1266 if (const auto *ECE = dyn_cast<ExplicitCastExpr>(CE))
1267 CGF.CGM.EmitExplicitCastExprType(ECE, &CGF);
1268
1269 switch (CE->getCastKind()) {
1270 // Non-converting casts (but not C's implicit conversion from void*).
1271 case CK_BitCast:
1272 case CK_NoOp:
1273 case CK_AddressSpaceConversion:
1274 if (auto PtrTy = CE->getSubExpr()->getType()->getAs<PointerType>()) {
1275 if (PtrTy->getPointeeType()->isVoidType())
1276 break;
1277
1278 LValueBaseInfo InnerBaseInfo;
1279 TBAAAccessInfo InnerTBAAInfo;
1280 Address Addr = CGF.EmitPointerWithAlignment(
1281 CE->getSubExpr(), &InnerBaseInfo, &InnerTBAAInfo, IsKnownNonNull);
1282 if (BaseInfo) *BaseInfo = InnerBaseInfo;
1283 if (TBAAInfo) *TBAAInfo = InnerTBAAInfo;
1284
1285 if (isa<ExplicitCastExpr>(CE)) {
1286 LValueBaseInfo TargetTypeBaseInfo;
1287 TBAAAccessInfo TargetTypeTBAAInfo;
1288 CharUnits Align = CGF.CGM.getNaturalPointeeTypeAlignment(
1289 E->getType(), &TargetTypeBaseInfo, &TargetTypeTBAAInfo);
1290 if (TBAAInfo)
1291 *TBAAInfo =
1292 CGF.CGM.mergeTBAAInfoForCast(*TBAAInfo, TargetTypeTBAAInfo);
1293 // If the source l-value is opaque, honor the alignment of the
1294 // casted-to type.
1295 if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) {
1296 if (BaseInfo)
1297 BaseInfo->mergeForCast(TargetTypeBaseInfo);
1298 Addr.setAlignment(Align);
1299 }
1300 }
1301
1302 if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast) &&
1303 CE->getCastKind() == CK_BitCast) {
1304 if (auto PT = E->getType()->getAs<PointerType>())
1305 CGF.EmitVTablePtrCheckForCast(PT->getPointeeType(), Addr,
1306 /*MayBeNull=*/true,
1307 CodeGenFunction::CFITCK_UnrelatedCast,
1308 CE->getBeginLoc());
1309 }
1310
1311 llvm::Type *ElemTy =
1312 CGF.ConvertTypeForMem(E->getType()->getPointeeType());
1313 Addr = Addr.withElementType(ElemTy);
1314 if (CE->getCastKind() == CK_AddressSpaceConversion)
1315 Addr = CGF.Builder.CreateAddrSpaceCast(
1316 Addr, CGF.ConvertType(E->getType()), ElemTy);
1317 return CGF.authPointerToPointerCast(Addr, CE->getSubExpr()->getType(),
1318 CE->getType());
1319 }
1320 break;
1321
1322 // Array-to-pointer decay.
1323 case CK_ArrayToPointerDecay:
1324 return CGF.EmitArrayToPointerDecay(CE->getSubExpr(), BaseInfo, TBAAInfo);
1325
1326 // Derived-to-base conversions.
1327 case CK_UncheckedDerivedToBase:
1328 case CK_DerivedToBase: {
1329 // TODO: Support accesses to members of base classes in TBAA. For now, we
1330 // conservatively pretend that the complete object is of the base class
1331 // type.
1332 if (TBAAInfo)
1333 *TBAAInfo = CGF.CGM.getTBAAAccessInfo(E->getType());
1334 Address Addr = CGF.EmitPointerWithAlignment(
1335 CE->getSubExpr(), BaseInfo, nullptr,
1336 (KnownNonNull_t)(IsKnownNonNull ||
1337 CE->getCastKind() == CK_UncheckedDerivedToBase));
1338 auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl();
1339 return CGF.GetAddressOfBaseClass(
1340 Addr, Derived, CE->path_begin(), CE->path_end(),
1341 CGF.ShouldNullCheckClassCastValue(CE), CE->getExprLoc());
1342 }
1343
1344 // TODO: Is there any reason to treat base-to-derived conversions
1345 // specially?
1346 default:
1347 break;
1348 }
1349 }
1350
1351 // Unary &.
1352 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
1353 if (UO->getOpcode() == UO_AddrOf) {
1354 LValue LV = CGF.EmitLValue(UO->getSubExpr(), IsKnownNonNull);
1355 if (BaseInfo) *BaseInfo = LV.getBaseInfo();
1356 if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
1357 return LV.getAddress();
1358 }
1359 }
1360
1361 // std::addressof and variants.
1362 if (auto *Call = dyn_cast<CallExpr>(E)) {
1363 switch (Call->getBuiltinCallee()) {
1364 default:
1365 break;
1366 case Builtin::BIaddressof:
1367 case Builtin::BI__addressof:
1368 case Builtin::BI__builtin_addressof: {
1369 LValue LV = CGF.EmitLValue(Call->getArg(0), IsKnownNonNull);
1370 if (BaseInfo) *BaseInfo = LV.getBaseInfo();
1371 if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
1372 return LV.getAddress();
1373 }
1374 }
1375 }
1376
1377 // TODO: conditional operators, comma.
1378
1379 // Otherwise, use the alignment of the type.
1380 return CGF.makeNaturalAddressForPointer(
1381 CGF.EmitScalarExpr(E), E->getType()->getPointeeType(), CharUnits(),
1382 /*ForPointeeType=*/true, BaseInfo, TBAAInfo, IsKnownNonNull);
1383 }
1384
1385 /// EmitPointerWithAlignment - Given an expression of pointer type, try to
1386 /// derive a more accurate bound on the alignment of the pointer.
EmitPointerWithAlignment(const Expr * E,LValueBaseInfo * BaseInfo,TBAAAccessInfo * TBAAInfo,KnownNonNull_t IsKnownNonNull)1387 Address CodeGenFunction::EmitPointerWithAlignment(
1388 const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo,
1389 KnownNonNull_t IsKnownNonNull) {
1390 Address Addr =
1391 ::EmitPointerWithAlignment(E, BaseInfo, TBAAInfo, IsKnownNonNull, *this);
1392 if (IsKnownNonNull && !Addr.isKnownNonNull())
1393 Addr.setKnownNonNull();
1394 return Addr;
1395 }
1396
EmitNonNullRValueCheck(RValue RV,QualType T)1397 llvm::Value *CodeGenFunction::EmitNonNullRValueCheck(RValue RV, QualType T) {
1398 llvm::Value *V = RV.getScalarVal();
1399 if (auto MPT = T->getAs<MemberPointerType>())
1400 return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, V, MPT);
1401 return Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
1402 }
1403
GetUndefRValue(QualType Ty)1404 RValue CodeGenFunction::GetUndefRValue(QualType Ty) {
1405 if (Ty->isVoidType())
1406 return RValue::get(nullptr);
1407
1408 switch (getEvaluationKind(Ty)) {
1409 case TEK_Complex: {
1410 llvm::Type *EltTy =
1411 ConvertType(Ty->castAs<ComplexType>()->getElementType());
1412 llvm::Value *U = llvm::UndefValue::get(EltTy);
1413 return RValue::getComplex(std::make_pair(U, U));
1414 }
1415
1416 // If this is a use of an undefined aggregate type, the aggregate must have an
1417 // identifiable address. Just because the contents of the value are undefined
1418 // doesn't mean that the address can't be taken and compared.
1419 case TEK_Aggregate: {
1420 Address DestPtr = CreateMemTemp(Ty, "undef.agg.tmp");
1421 return RValue::getAggregate(DestPtr);
1422 }
1423
1424 case TEK_Scalar:
1425 return RValue::get(llvm::UndefValue::get(ConvertType(Ty)));
1426 }
1427 llvm_unreachable("bad evaluation kind");
1428 }
1429
EmitUnsupportedRValue(const Expr * E,const char * Name)1430 RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E,
1431 const char *Name) {
1432 ErrorUnsupported(E, Name);
1433 return GetUndefRValue(E->getType());
1434 }
1435
EmitUnsupportedLValue(const Expr * E,const char * Name)1436 LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
1437 const char *Name) {
1438 ErrorUnsupported(E, Name);
1439 llvm::Type *ElTy = ConvertType(E->getType());
1440 llvm::Type *Ty = UnqualPtrTy;
1441 return MakeAddrLValue(
1442 Address(llvm::UndefValue::get(Ty), ElTy, CharUnits::One()), E->getType());
1443 }
1444
IsWrappedCXXThis(const Expr * Obj)1445 bool CodeGenFunction::IsWrappedCXXThis(const Expr *Obj) {
1446 const Expr *Base = Obj;
1447 while (!isa<CXXThisExpr>(Base)) {
1448 // The result of a dynamic_cast can be null.
1449 if (isa<CXXDynamicCastExpr>(Base))
1450 return false;
1451
1452 if (const auto *CE = dyn_cast<CastExpr>(Base)) {
1453 Base = CE->getSubExpr();
1454 } else if (const auto *PE = dyn_cast<ParenExpr>(Base)) {
1455 Base = PE->getSubExpr();
1456 } else if (const auto *UO = dyn_cast<UnaryOperator>(Base)) {
1457 if (UO->getOpcode() == UO_Extension)
1458 Base = UO->getSubExpr();
1459 else
1460 return false;
1461 } else {
1462 return false;
1463 }
1464 }
1465 return true;
1466 }
1467
EmitCheckedLValue(const Expr * E,TypeCheckKind TCK)1468 LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) {
1469 LValue LV;
1470 if (SanOpts.has(SanitizerKind::ArrayBounds) && isa<ArraySubscriptExpr>(E))
1471 LV = EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E), /*Accessed*/true);
1472 else
1473 LV = EmitLValue(E);
1474 if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple()) {
1475 SanitizerSet SkippedChecks;
1476 if (const auto *ME = dyn_cast<MemberExpr>(E)) {
1477 bool IsBaseCXXThis = IsWrappedCXXThis(ME->getBase());
1478 if (IsBaseCXXThis)
1479 SkippedChecks.set(SanitizerKind::Alignment, true);
1480 if (IsBaseCXXThis || isa<DeclRefExpr>(ME->getBase()))
1481 SkippedChecks.set(SanitizerKind::Null, true);
1482 }
1483 EmitTypeCheck(TCK, E->getExprLoc(), LV, E->getType(), SkippedChecks);
1484 }
1485 return LV;
1486 }
1487
1488 /// EmitLValue - Emit code to compute a designator that specifies the location
1489 /// of the expression.
1490 ///
1491 /// This can return one of two things: a simple address or a bitfield reference.
1492 /// In either case, the LLVM Value* in the LValue structure is guaranteed to be
1493 /// an LLVM pointer type.
1494 ///
1495 /// If this returns a bitfield reference, nothing about the pointee type of the
1496 /// LLVM value is known: For example, it may not be a pointer to an integer.
1497 ///
1498 /// If this returns a normal address, and if the lvalue's C type is fixed size,
1499 /// this method guarantees that the returned pointer type will point to an LLVM
1500 /// type of the same size of the lvalue's type. If the lvalue has a variable
1501 /// length type, this is not possible.
1502 ///
EmitLValue(const Expr * E,KnownNonNull_t IsKnownNonNull)1503 LValue CodeGenFunction::EmitLValue(const Expr *E,
1504 KnownNonNull_t IsKnownNonNull) {
1505 LValue LV = EmitLValueHelper(E, IsKnownNonNull);
1506 if (IsKnownNonNull && !LV.isKnownNonNull())
1507 LV.setKnownNonNull();
1508 return LV;
1509 }
1510
getConstantExprReferredType(const FullExpr * E,const ASTContext & Ctx)1511 static QualType getConstantExprReferredType(const FullExpr *E,
1512 const ASTContext &Ctx) {
1513 const Expr *SE = E->getSubExpr()->IgnoreImplicit();
1514 if (isa<OpaqueValueExpr>(SE))
1515 return SE->getType();
1516 return cast<CallExpr>(SE)->getCallReturnType(Ctx)->getPointeeType();
1517 }
1518
EmitLValueHelper(const Expr * E,KnownNonNull_t IsKnownNonNull)1519 LValue CodeGenFunction::EmitLValueHelper(const Expr *E,
1520 KnownNonNull_t IsKnownNonNull) {
1521 ApplyDebugLocation DL(*this, E);
1522 switch (E->getStmtClass()) {
1523 default: return EmitUnsupportedLValue(E, "l-value expression");
1524
1525 case Expr::ObjCPropertyRefExprClass:
1526 llvm_unreachable("cannot emit a property reference directly");
1527
1528 case Expr::ObjCSelectorExprClass:
1529 return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E));
1530 case Expr::ObjCIsaExprClass:
1531 return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E));
1532 case Expr::BinaryOperatorClass:
1533 return EmitBinaryOperatorLValue(cast<BinaryOperator>(E));
1534 case Expr::CompoundAssignOperatorClass: {
1535 QualType Ty = E->getType();
1536 if (const AtomicType *AT = Ty->getAs<AtomicType>())
1537 Ty = AT->getValueType();
1538 if (!Ty->isAnyComplexType())
1539 return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
1540 return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
1541 }
1542 case Expr::CallExprClass:
1543 case Expr::CXXMemberCallExprClass:
1544 case Expr::CXXOperatorCallExprClass:
1545 case Expr::UserDefinedLiteralClass:
1546 return EmitCallExprLValue(cast<CallExpr>(E));
1547 case Expr::CXXRewrittenBinaryOperatorClass:
1548 return EmitLValue(cast<CXXRewrittenBinaryOperator>(E)->getSemanticForm(),
1549 IsKnownNonNull);
1550 case Expr::VAArgExprClass:
1551 return EmitVAArgExprLValue(cast<VAArgExpr>(E));
1552 case Expr::DeclRefExprClass:
1553 return EmitDeclRefLValue(cast<DeclRefExpr>(E));
1554 case Expr::ConstantExprClass: {
1555 const ConstantExpr *CE = cast<ConstantExpr>(E);
1556 if (llvm::Value *Result = ConstantEmitter(*this).tryEmitConstantExpr(CE)) {
1557 QualType RetType = getConstantExprReferredType(CE, getContext());
1558 return MakeNaturalAlignAddrLValue(Result, RetType);
1559 }
1560 return EmitLValue(cast<ConstantExpr>(E)->getSubExpr(), IsKnownNonNull);
1561 }
1562 case Expr::ParenExprClass:
1563 return EmitLValue(cast<ParenExpr>(E)->getSubExpr(), IsKnownNonNull);
1564 case Expr::GenericSelectionExprClass:
1565 return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr(),
1566 IsKnownNonNull);
1567 case Expr::PredefinedExprClass:
1568 return EmitPredefinedLValue(cast<PredefinedExpr>(E));
1569 case Expr::StringLiteralClass:
1570 return EmitStringLiteralLValue(cast<StringLiteral>(E));
1571 case Expr::ObjCEncodeExprClass:
1572 return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E));
1573 case Expr::PseudoObjectExprClass:
1574 return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E));
1575 case Expr::InitListExprClass:
1576 return EmitInitListLValue(cast<InitListExpr>(E));
1577 case Expr::CXXTemporaryObjectExprClass:
1578 case Expr::CXXConstructExprClass:
1579 return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));
1580 case Expr::CXXBindTemporaryExprClass:
1581 return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));
1582 case Expr::CXXUuidofExprClass:
1583 return EmitCXXUuidofLValue(cast<CXXUuidofExpr>(E));
1584 case Expr::LambdaExprClass:
1585 return EmitAggExprToLValue(E);
1586
1587 case Expr::ExprWithCleanupsClass: {
1588 const auto *cleanups = cast<ExprWithCleanups>(E);
1589 RunCleanupsScope Scope(*this);
1590 LValue LV = EmitLValue(cleanups->getSubExpr(), IsKnownNonNull);
1591 if (LV.isSimple()) {
1592 // Defend against branches out of gnu statement expressions surrounded by
1593 // cleanups.
1594 Address Addr = LV.getAddress();
1595 llvm::Value *V = Addr.getBasePointer();
1596 Scope.ForceCleanup({&V});
1597 Addr.replaceBasePointer(V);
1598 return LValue::MakeAddr(Addr, LV.getType(), getContext(),
1599 LV.getBaseInfo(), LV.getTBAAInfo());
1600 }
1601 // FIXME: Is it possible to create an ExprWithCleanups that produces a
1602 // bitfield lvalue or some other non-simple lvalue?
1603 return LV;
1604 }
1605
1606 case Expr::CXXDefaultArgExprClass: {
1607 auto *DAE = cast<CXXDefaultArgExpr>(E);
1608 CXXDefaultArgExprScope Scope(*this, DAE);
1609 return EmitLValue(DAE->getExpr(), IsKnownNonNull);
1610 }
1611 case Expr::CXXDefaultInitExprClass: {
1612 auto *DIE = cast<CXXDefaultInitExpr>(E);
1613 CXXDefaultInitExprScope Scope(*this, DIE);
1614 return EmitLValue(DIE->getExpr(), IsKnownNonNull);
1615 }
1616 case Expr::CXXTypeidExprClass:
1617 return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E));
1618
1619 case Expr::ObjCMessageExprClass:
1620 return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E));
1621 case Expr::ObjCIvarRefExprClass:
1622 return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E));
1623 case Expr::StmtExprClass:
1624 return EmitStmtExprLValue(cast<StmtExpr>(E));
1625 case Expr::UnaryOperatorClass:
1626 return EmitUnaryOpLValue(cast<UnaryOperator>(E));
1627 case Expr::ArraySubscriptExprClass:
1628 return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E));
1629 case Expr::MatrixSubscriptExprClass:
1630 return EmitMatrixSubscriptExpr(cast<MatrixSubscriptExpr>(E));
1631 case Expr::ArraySectionExprClass:
1632 return EmitArraySectionExpr(cast<ArraySectionExpr>(E));
1633 case Expr::ExtVectorElementExprClass:
1634 return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E));
1635 case Expr::CXXThisExprClass:
1636 return MakeAddrLValue(LoadCXXThisAddress(), E->getType());
1637 case Expr::MemberExprClass:
1638 return EmitMemberExpr(cast<MemberExpr>(E));
1639 case Expr::CompoundLiteralExprClass:
1640 return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E));
1641 case Expr::ConditionalOperatorClass:
1642 return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E));
1643 case Expr::BinaryConditionalOperatorClass:
1644 return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E));
1645 case Expr::ChooseExprClass:
1646 return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(), IsKnownNonNull);
1647 case Expr::OpaqueValueExprClass:
1648 return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E));
1649 case Expr::SubstNonTypeTemplateParmExprClass:
1650 return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement(),
1651 IsKnownNonNull);
1652 case Expr::ImplicitCastExprClass:
1653 case Expr::CStyleCastExprClass:
1654 case Expr::CXXFunctionalCastExprClass:
1655 case Expr::CXXStaticCastExprClass:
1656 case Expr::CXXDynamicCastExprClass:
1657 case Expr::CXXReinterpretCastExprClass:
1658 case Expr::CXXConstCastExprClass:
1659 case Expr::CXXAddrspaceCastExprClass:
1660 case Expr::ObjCBridgedCastExprClass:
1661 return EmitCastLValue(cast<CastExpr>(E));
1662
1663 case Expr::MaterializeTemporaryExprClass:
1664 return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E));
1665
1666 case Expr::CoawaitExprClass:
1667 return EmitCoawaitLValue(cast<CoawaitExpr>(E));
1668 case Expr::CoyieldExprClass:
1669 return EmitCoyieldLValue(cast<CoyieldExpr>(E));
1670 case Expr::PackIndexingExprClass:
1671 return EmitLValue(cast<PackIndexingExpr>(E)->getSelectedExpr());
1672 }
1673 }
1674
1675 /// Given an object of the given canonical type, can we safely copy a
1676 /// value out of it based on its initializer?
isConstantEmittableObjectType(QualType type)1677 static bool isConstantEmittableObjectType(QualType type) {
1678 assert(type.isCanonical());
1679 assert(!type->isReferenceType());
1680
1681 // Must be const-qualified but non-volatile.
1682 Qualifiers qs = type.getLocalQualifiers();
1683 if (!qs.hasConst() || qs.hasVolatile()) return false;
1684
1685 // Otherwise, all object types satisfy this except C++ classes with
1686 // mutable subobjects or non-trivial copy/destroy behavior.
1687 if (const auto *RT = dyn_cast<RecordType>(type))
1688 if (const auto *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1689 if (RD->hasMutableFields() || !RD->isTrivial())
1690 return false;
1691
1692 return true;
1693 }
1694
1695 /// Can we constant-emit a load of a reference to a variable of the
1696 /// given type? This is different from predicates like
1697 /// Decl::mightBeUsableInConstantExpressions because we do want it to apply
1698 /// in situations that don't necessarily satisfy the language's rules
1699 /// for this (e.g. C++'s ODR-use rules). For example, we want to able
1700 /// to do this with const float variables even if those variables
1701 /// aren't marked 'constexpr'.
1702 enum ConstantEmissionKind {
1703 CEK_None,
1704 CEK_AsReferenceOnly,
1705 CEK_AsValueOrReference,
1706 CEK_AsValueOnly
1707 };
checkVarTypeForConstantEmission(QualType type)1708 static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type) {
1709 type = type.getCanonicalType();
1710 if (const auto *ref = dyn_cast<ReferenceType>(type)) {
1711 if (isConstantEmittableObjectType(ref->getPointeeType()))
1712 return CEK_AsValueOrReference;
1713 return CEK_AsReferenceOnly;
1714 }
1715 if (isConstantEmittableObjectType(type))
1716 return CEK_AsValueOnly;
1717 return CEK_None;
1718 }
1719
1720 /// Try to emit a reference to the given value without producing it as
1721 /// an l-value. This is just an optimization, but it avoids us needing
1722 /// to emit global copies of variables if they're named without triggering
1723 /// a formal use in a context where we can't emit a direct reference to them,
1724 /// for instance if a block or lambda or a member of a local class uses a
1725 /// const int variable or constexpr variable from an enclosing function.
1726 CodeGenFunction::ConstantEmission
tryEmitAsConstant(DeclRefExpr * refExpr)1727 CodeGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) {
1728 ValueDecl *value = refExpr->getDecl();
1729
1730 // The value needs to be an enum constant or a constant variable.
1731 ConstantEmissionKind CEK;
1732 if (isa<ParmVarDecl>(value)) {
1733 CEK = CEK_None;
1734 } else if (auto *var = dyn_cast<VarDecl>(value)) {
1735 CEK = checkVarTypeForConstantEmission(var->getType());
1736 } else if (isa<EnumConstantDecl>(value)) {
1737 CEK = CEK_AsValueOnly;
1738 } else {
1739 CEK = CEK_None;
1740 }
1741 if (CEK == CEK_None) return ConstantEmission();
1742
1743 Expr::EvalResult result;
1744 bool resultIsReference;
1745 QualType resultType;
1746
1747 // It's best to evaluate all the way as an r-value if that's permitted.
1748 if (CEK != CEK_AsReferenceOnly &&
1749 refExpr->EvaluateAsRValue(result, getContext())) {
1750 resultIsReference = false;
1751 resultType = refExpr->getType();
1752
1753 // Otherwise, try to evaluate as an l-value.
1754 } else if (CEK != CEK_AsValueOnly &&
1755 refExpr->EvaluateAsLValue(result, getContext())) {
1756 resultIsReference = true;
1757 resultType = value->getType();
1758
1759 // Failure.
1760 } else {
1761 return ConstantEmission();
1762 }
1763
1764 // In any case, if the initializer has side-effects, abandon ship.
1765 if (result.HasSideEffects)
1766 return ConstantEmission();
1767
1768 // In CUDA/HIP device compilation, a lambda may capture a reference variable
1769 // referencing a global host variable by copy. In this case the lambda should
1770 // make a copy of the value of the global host variable. The DRE of the
1771 // captured reference variable cannot be emitted as load from the host
1772 // global variable as compile time constant, since the host variable is not
1773 // accessible on device. The DRE of the captured reference variable has to be
1774 // loaded from captures.
1775 if (CGM.getLangOpts().CUDAIsDevice && result.Val.isLValue() &&
1776 refExpr->refersToEnclosingVariableOrCapture()) {
1777 auto *MD = dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl);
1778 if (MD && MD->getParent()->isLambda() &&
1779 MD->getOverloadedOperator() == OO_Call) {
1780 const APValue::LValueBase &base = result.Val.getLValueBase();
1781 if (const ValueDecl *D = base.dyn_cast<const ValueDecl *>()) {
1782 if (const VarDecl *VD = dyn_cast<const VarDecl>(D)) {
1783 if (!VD->hasAttr<CUDADeviceAttr>()) {
1784 return ConstantEmission();
1785 }
1786 }
1787 }
1788 }
1789 }
1790
1791 // Emit as a constant.
1792 auto C = ConstantEmitter(*this).emitAbstract(refExpr->getLocation(),
1793 result.Val, resultType);
1794
1795 // Make sure we emit a debug reference to the global variable.
1796 // This should probably fire even for
1797 if (isa<VarDecl>(value)) {
1798 if (!getContext().DeclMustBeEmitted(cast<VarDecl>(value)))
1799 EmitDeclRefExprDbgValue(refExpr, result.Val);
1800 } else {
1801 assert(isa<EnumConstantDecl>(value));
1802 EmitDeclRefExprDbgValue(refExpr, result.Val);
1803 }
1804
1805 // If we emitted a reference constant, we need to dereference that.
1806 if (resultIsReference)
1807 return ConstantEmission::forReference(C);
1808
1809 return ConstantEmission::forValue(C);
1810 }
1811
tryToConvertMemberExprToDeclRefExpr(CodeGenFunction & CGF,const MemberExpr * ME)1812 static DeclRefExpr *tryToConvertMemberExprToDeclRefExpr(CodeGenFunction &CGF,
1813 const MemberExpr *ME) {
1814 if (auto *VD = dyn_cast<VarDecl>(ME->getMemberDecl())) {
1815 // Try to emit static variable member expressions as DREs.
1816 return DeclRefExpr::Create(
1817 CGF.getContext(), NestedNameSpecifierLoc(), SourceLocation(), VD,
1818 /*RefersToEnclosingVariableOrCapture=*/false, ME->getExprLoc(),
1819 ME->getType(), ME->getValueKind(), nullptr, nullptr, ME->isNonOdrUse());
1820 }
1821 return nullptr;
1822 }
1823
1824 CodeGenFunction::ConstantEmission
tryEmitAsConstant(const MemberExpr * ME)1825 CodeGenFunction::tryEmitAsConstant(const MemberExpr *ME) {
1826 if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(*this, ME))
1827 return tryEmitAsConstant(DRE);
1828 return ConstantEmission();
1829 }
1830
emitScalarConstant(const CodeGenFunction::ConstantEmission & Constant,Expr * E)1831 llvm::Value *CodeGenFunction::emitScalarConstant(
1832 const CodeGenFunction::ConstantEmission &Constant, Expr *E) {
1833 assert(Constant && "not a constant");
1834 if (Constant.isReference())
1835 return EmitLoadOfLValue(Constant.getReferenceLValue(*this, E),
1836 E->getExprLoc())
1837 .getScalarVal();
1838 return Constant.getValue();
1839 }
1840
EmitLoadOfScalar(LValue lvalue,SourceLocation Loc)1841 llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue,
1842 SourceLocation Loc) {
1843 return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
1844 lvalue.getType(), Loc, lvalue.getBaseInfo(),
1845 lvalue.getTBAAInfo(), lvalue.isNontemporal());
1846 }
1847
hasBooleanRepresentation(QualType Ty)1848 static bool hasBooleanRepresentation(QualType Ty) {
1849 if (Ty->isBooleanType())
1850 return true;
1851
1852 if (const EnumType *ET = Ty->getAs<EnumType>())
1853 return ET->getDecl()->getIntegerType()->isBooleanType();
1854
1855 if (const AtomicType *AT = Ty->getAs<AtomicType>())
1856 return hasBooleanRepresentation(AT->getValueType());
1857
1858 return false;
1859 }
1860
getRangeForType(CodeGenFunction & CGF,QualType Ty,llvm::APInt & Min,llvm::APInt & End,bool StrictEnums,bool IsBool)1861 static bool getRangeForType(CodeGenFunction &CGF, QualType Ty,
1862 llvm::APInt &Min, llvm::APInt &End,
1863 bool StrictEnums, bool IsBool) {
1864 const EnumType *ET = Ty->getAs<EnumType>();
1865 bool IsRegularCPlusPlusEnum = CGF.getLangOpts().CPlusPlus && StrictEnums &&
1866 ET && !ET->getDecl()->isFixed();
1867 if (!IsBool && !IsRegularCPlusPlusEnum)
1868 return false;
1869
1870 if (IsBool) {
1871 Min = llvm::APInt(CGF.getContext().getTypeSize(Ty), 0);
1872 End = llvm::APInt(CGF.getContext().getTypeSize(Ty), 2);
1873 } else {
1874 const EnumDecl *ED = ET->getDecl();
1875 ED->getValueRange(End, Min);
1876 }
1877 return true;
1878 }
1879
getRangeForLoadFromType(QualType Ty)1880 llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
1881 llvm::APInt Min, End;
1882 if (!getRangeForType(*this, Ty, Min, End, CGM.getCodeGenOpts().StrictEnums,
1883 hasBooleanRepresentation(Ty)))
1884 return nullptr;
1885
1886 llvm::MDBuilder MDHelper(getLLVMContext());
1887 return MDHelper.createRange(Min, End);
1888 }
1889
EmitScalarRangeCheck(llvm::Value * Value,QualType Ty,SourceLocation Loc)1890 bool CodeGenFunction::EmitScalarRangeCheck(llvm::Value *Value, QualType Ty,
1891 SourceLocation Loc) {
1892 bool HasBoolCheck = SanOpts.has(SanitizerKind::Bool);
1893 bool HasEnumCheck = SanOpts.has(SanitizerKind::Enum);
1894 if (!HasBoolCheck && !HasEnumCheck)
1895 return false;
1896
1897 bool IsBool = hasBooleanRepresentation(Ty) ||
1898 NSAPI(CGM.getContext()).isObjCBOOLType(Ty);
1899 bool NeedsBoolCheck = HasBoolCheck && IsBool;
1900 bool NeedsEnumCheck = HasEnumCheck && Ty->getAs<EnumType>();
1901 if (!NeedsBoolCheck && !NeedsEnumCheck)
1902 return false;
1903
1904 // Single-bit booleans don't need to be checked. Special-case this to avoid
1905 // a bit width mismatch when handling bitfield values. This is handled by
1906 // EmitFromMemory for the non-bitfield case.
1907 if (IsBool &&
1908 cast<llvm::IntegerType>(Value->getType())->getBitWidth() == 1)
1909 return false;
1910
1911 llvm::APInt Min, End;
1912 if (!getRangeForType(*this, Ty, Min, End, /*StrictEnums=*/true, IsBool))
1913 return true;
1914
1915 auto &Ctx = getLLVMContext();
1916 SanitizerScope SanScope(this);
1917 llvm::Value *Check;
1918 --End;
1919 if (!Min) {
1920 Check = Builder.CreateICmpULE(Value, llvm::ConstantInt::get(Ctx, End));
1921 } else {
1922 llvm::Value *Upper =
1923 Builder.CreateICmpSLE(Value, llvm::ConstantInt::get(Ctx, End));
1924 llvm::Value *Lower =
1925 Builder.CreateICmpSGE(Value, llvm::ConstantInt::get(Ctx, Min));
1926 Check = Builder.CreateAnd(Upper, Lower);
1927 }
1928 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc),
1929 EmitCheckTypeDescriptor(Ty)};
1930 SanitizerMask Kind =
1931 NeedsEnumCheck ? SanitizerKind::Enum : SanitizerKind::Bool;
1932 EmitCheck(std::make_pair(Check, Kind), SanitizerHandler::LoadInvalidValue,
1933 StaticArgs, EmitCheckValue(Value));
1934 return true;
1935 }
1936
EmitLoadOfScalar(Address Addr,bool Volatile,QualType Ty,SourceLocation Loc,LValueBaseInfo BaseInfo,TBAAAccessInfo TBAAInfo,bool isNontemporal)1937 llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile,
1938 QualType Ty,
1939 SourceLocation Loc,
1940 LValueBaseInfo BaseInfo,
1941 TBAAAccessInfo TBAAInfo,
1942 bool isNontemporal) {
1943 if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getBasePointer()))
1944 if (GV->isThreadLocal())
1945 Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV),
1946 NotKnownNonNull);
1947
1948 if (const auto *ClangVecTy = Ty->getAs<VectorType>()) {
1949 // Boolean vectors use `iN` as storage type.
1950 if (ClangVecTy->isExtVectorBoolType()) {
1951 llvm::Type *ValTy = ConvertType(Ty);
1952 unsigned ValNumElems =
1953 cast<llvm::FixedVectorType>(ValTy)->getNumElements();
1954 // Load the `iP` storage object (P is the padded vector size).
1955 auto *RawIntV = Builder.CreateLoad(Addr, Volatile, "load_bits");
1956 const auto *RawIntTy = RawIntV->getType();
1957 assert(RawIntTy->isIntegerTy() && "compressed iN storage for bitvectors");
1958 // Bitcast iP --> <P x i1>.
1959 auto *PaddedVecTy = llvm::FixedVectorType::get(
1960 Builder.getInt1Ty(), RawIntTy->getPrimitiveSizeInBits());
1961 llvm::Value *V = Builder.CreateBitCast(RawIntV, PaddedVecTy);
1962 // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
1963 V = emitBoolVecConversion(V, ValNumElems, "extractvec");
1964
1965 return EmitFromMemory(V, Ty);
1966 }
1967
1968 // Handle vectors of size 3 like size 4 for better performance.
1969 const llvm::Type *EltTy = Addr.getElementType();
1970 const auto *VTy = cast<llvm::FixedVectorType>(EltTy);
1971
1972 if (!CGM.getCodeGenOpts().PreserveVec3Type && VTy->getNumElements() == 3) {
1973
1974 llvm::VectorType *vec4Ty =
1975 llvm::FixedVectorType::get(VTy->getElementType(), 4);
1976 Address Cast = Addr.withElementType(vec4Ty);
1977 // Now load value.
1978 llvm::Value *V = Builder.CreateLoad(Cast, Volatile, "loadVec4");
1979
1980 // Shuffle vector to get vec3.
1981 V = Builder.CreateShuffleVector(V, ArrayRef<int>{0, 1, 2}, "extractVec");
1982 return EmitFromMemory(V, Ty);
1983 }
1984 }
1985
1986 // Atomic operations have to be done on integral types.
1987 LValue AtomicLValue =
1988 LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo);
1989 if (Ty->isAtomicType() || LValueIsSuitableForInlineAtomic(AtomicLValue)) {
1990 return EmitAtomicLoad(AtomicLValue, Loc).getScalarVal();
1991 }
1992
1993 Addr =
1994 Addr.withElementType(convertTypeForLoadStore(Ty, Addr.getElementType()));
1995
1996 llvm::LoadInst *Load = Builder.CreateLoad(Addr, Volatile);
1997 if (isNontemporal) {
1998 llvm::MDNode *Node = llvm::MDNode::get(
1999 Load->getContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
2000 Load->setMetadata(llvm::LLVMContext::MD_nontemporal, Node);
2001 }
2002
2003 CGM.DecorateInstructionWithTBAA(Load, TBAAInfo);
2004
2005 if (EmitScalarRangeCheck(Load, Ty, Loc)) {
2006 // In order to prevent the optimizer from throwing away the check, don't
2007 // attach range metadata to the load.
2008 } else if (CGM.getCodeGenOpts().OptimizationLevel > 0)
2009 if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty)) {
2010 Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo);
2011 Load->setMetadata(llvm::LLVMContext::MD_noundef,
2012 llvm::MDNode::get(getLLVMContext(), std::nullopt));
2013 }
2014
2015 return EmitFromMemory(Load, Ty);
2016 }
2017
2018 /// Converts a scalar value from its primary IR type (as returned
2019 /// by ConvertType) to its load/store type (as returned by
2020 /// convertTypeForLoadStore).
EmitToMemory(llvm::Value * Value,QualType Ty)2021 llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) {
2022 if (hasBooleanRepresentation(Ty) || Ty->isBitIntType()) {
2023 llvm::Type *StoreTy = convertTypeForLoadStore(Ty, Value->getType());
2024 bool Signed = Ty->isSignedIntegerOrEnumerationType();
2025 return Builder.CreateIntCast(Value, StoreTy, Signed, "storedv");
2026 }
2027
2028 if (Ty->isExtVectorBoolType()) {
2029 llvm::Type *StoreTy = convertTypeForLoadStore(Ty, Value->getType());
2030 // Expand to the memory bit width.
2031 unsigned MemNumElems = StoreTy->getPrimitiveSizeInBits();
2032 // <N x i1> --> <P x i1>.
2033 Value = emitBoolVecConversion(Value, MemNumElems, "insertvec");
2034 // <P x i1> --> iP.
2035 Value = Builder.CreateBitCast(Value, StoreTy);
2036 }
2037
2038 return Value;
2039 }
2040
2041 /// Converts a scalar value from its load/store type (as returned
2042 /// by convertTypeForLoadStore) to its primary IR type (as returned
2043 /// by ConvertType).
EmitFromMemory(llvm::Value * Value,QualType Ty)2044 llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
2045 if (Ty->isExtVectorBoolType()) {
2046 const auto *RawIntTy = Value->getType();
2047 // Bitcast iP --> <P x i1>.
2048 auto *PaddedVecTy = llvm::FixedVectorType::get(
2049 Builder.getInt1Ty(), RawIntTy->getPrimitiveSizeInBits());
2050 auto *V = Builder.CreateBitCast(Value, PaddedVecTy);
2051 // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
2052 llvm::Type *ValTy = ConvertType(Ty);
2053 unsigned ValNumElems = cast<llvm::FixedVectorType>(ValTy)->getNumElements();
2054 return emitBoolVecConversion(V, ValNumElems, "extractvec");
2055 }
2056
2057 if (hasBooleanRepresentation(Ty) || Ty->isBitIntType()) {
2058 llvm::Type *ResTy = ConvertType(Ty);
2059 return Builder.CreateTrunc(Value, ResTy, "loadedv");
2060 }
2061
2062 return Value;
2063 }
2064
2065 // Convert the pointer of \p Addr to a pointer to a vector (the value type of
2066 // MatrixType), if it points to a array (the memory type of MatrixType).
MaybeConvertMatrixAddress(RawAddress Addr,CodeGenFunction & CGF,bool IsVector=true)2067 static RawAddress MaybeConvertMatrixAddress(RawAddress Addr,
2068 CodeGenFunction &CGF,
2069 bool IsVector = true) {
2070 auto *ArrayTy = dyn_cast<llvm::ArrayType>(Addr.getElementType());
2071 if (ArrayTy && IsVector) {
2072 auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
2073 ArrayTy->getNumElements());
2074
2075 return Addr.withElementType(VectorTy);
2076 }
2077 auto *VectorTy = dyn_cast<llvm::VectorType>(Addr.getElementType());
2078 if (VectorTy && !IsVector) {
2079 auto *ArrayTy = llvm::ArrayType::get(
2080 VectorTy->getElementType(),
2081 cast<llvm::FixedVectorType>(VectorTy)->getNumElements());
2082
2083 return Addr.withElementType(ArrayTy);
2084 }
2085
2086 return Addr;
2087 }
2088
2089 // Emit a store of a matrix LValue. This may require casting the original
2090 // pointer to memory address (ArrayType) to a pointer to the value type
2091 // (VectorType).
EmitStoreOfMatrixScalar(llvm::Value * value,LValue lvalue,bool isInit,CodeGenFunction & CGF)2092 static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue,
2093 bool isInit, CodeGenFunction &CGF) {
2094 Address Addr = MaybeConvertMatrixAddress(lvalue.getAddress(), CGF,
2095 value->getType()->isVectorTy());
2096 CGF.EmitStoreOfScalar(value, Addr, lvalue.isVolatile(), lvalue.getType(),
2097 lvalue.getBaseInfo(), lvalue.getTBAAInfo(), isInit,
2098 lvalue.isNontemporal());
2099 }
2100
EmitStoreOfScalar(llvm::Value * Value,Address Addr,bool Volatile,QualType Ty,LValueBaseInfo BaseInfo,TBAAAccessInfo TBAAInfo,bool isInit,bool isNontemporal)2101 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr,
2102 bool Volatile, QualType Ty,
2103 LValueBaseInfo BaseInfo,
2104 TBAAAccessInfo TBAAInfo,
2105 bool isInit, bool isNontemporal) {
2106 if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getBasePointer()))
2107 if (GV->isThreadLocal())
2108 Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV),
2109 NotKnownNonNull);
2110
2111 llvm::Type *SrcTy = Value->getType();
2112 if (const auto *ClangVecTy = Ty->getAs<VectorType>()) {
2113 auto *VecTy = dyn_cast<llvm::FixedVectorType>(SrcTy);
2114 if (!CGM.getCodeGenOpts().PreserveVec3Type) {
2115 // Handle vec3 special.
2116 if (VecTy && !ClangVecTy->isExtVectorBoolType() &&
2117 cast<llvm::FixedVectorType>(VecTy)->getNumElements() == 3) {
2118 // Our source is a vec3, do a shuffle vector to make it a vec4.
2119 Value = Builder.CreateShuffleVector(Value, ArrayRef<int>{0, 1, 2, -1},
2120 "extractVec");
2121 SrcTy = llvm::FixedVectorType::get(VecTy->getElementType(), 4);
2122 }
2123 if (Addr.getElementType() != SrcTy) {
2124 Addr = Addr.withElementType(SrcTy);
2125 }
2126 }
2127 }
2128
2129 Value = EmitToMemory(Value, Ty);
2130
2131 LValue AtomicLValue =
2132 LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo);
2133 if (Ty->isAtomicType() ||
2134 (!isInit && LValueIsSuitableForInlineAtomic(AtomicLValue))) {
2135 EmitAtomicStore(RValue::get(Value), AtomicLValue, isInit);
2136 return;
2137 }
2138
2139 llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
2140 if (isNontemporal) {
2141 llvm::MDNode *Node =
2142 llvm::MDNode::get(Store->getContext(),
2143 llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
2144 Store->setMetadata(llvm::LLVMContext::MD_nontemporal, Node);
2145 }
2146
2147 CGM.DecorateInstructionWithTBAA(Store, TBAAInfo);
2148 }
2149
EmitStoreOfScalar(llvm::Value * value,LValue lvalue,bool isInit)2150 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
2151 bool isInit) {
2152 if (lvalue.getType()->isConstantMatrixType()) {
2153 EmitStoreOfMatrixScalar(value, lvalue, isInit, *this);
2154 return;
2155 }
2156
2157 EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
2158 lvalue.getType(), lvalue.getBaseInfo(),
2159 lvalue.getTBAAInfo(), isInit, lvalue.isNontemporal());
2160 }
2161
2162 // Emit a load of a LValue of matrix type. This may require casting the pointer
2163 // to memory address (ArrayType) to a pointer to the value type (VectorType).
EmitLoadOfMatrixLValue(LValue LV,SourceLocation Loc,CodeGenFunction & CGF)2164 static RValue EmitLoadOfMatrixLValue(LValue LV, SourceLocation Loc,
2165 CodeGenFunction &CGF) {
2166 assert(LV.getType()->isConstantMatrixType());
2167 Address Addr = MaybeConvertMatrixAddress(LV.getAddress(), CGF);
2168 LV.setAddress(Addr);
2169 return RValue::get(CGF.EmitLoadOfScalar(LV, Loc));
2170 }
2171
EmitLoadOfAnyValue(LValue LV,AggValueSlot Slot,SourceLocation Loc)2172 RValue CodeGenFunction::EmitLoadOfAnyValue(LValue LV, AggValueSlot Slot,
2173 SourceLocation Loc) {
2174 QualType Ty = LV.getType();
2175 switch (getEvaluationKind(Ty)) {
2176 case TEK_Scalar:
2177 return EmitLoadOfLValue(LV, Loc);
2178 case TEK_Complex:
2179 return RValue::getComplex(EmitLoadOfComplex(LV, Loc));
2180 case TEK_Aggregate:
2181 EmitAggFinalDestCopy(Ty, Slot, LV, EVK_NonRValue);
2182 return Slot.asRValue();
2183 }
2184 llvm_unreachable("bad evaluation kind");
2185 }
2186
2187 /// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
2188 /// method emits the address of the lvalue, then loads the result as an rvalue,
2189 /// returning the rvalue.
EmitLoadOfLValue(LValue LV,SourceLocation Loc)2190 RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
2191 if (LV.isObjCWeak()) {
2192 // load of a __weak object.
2193 Address AddrWeakObj = LV.getAddress();
2194 return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this,
2195 AddrWeakObj));
2196 }
2197 if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) {
2198 // In MRC mode, we do a load+autorelease.
2199 if (!getLangOpts().ObjCAutoRefCount) {
2200 return RValue::get(EmitARCLoadWeak(LV.getAddress()));
2201 }
2202
2203 // In ARC mode, we load retained and then consume the value.
2204 llvm::Value *Object = EmitARCLoadWeakRetained(LV.getAddress());
2205 Object = EmitObjCConsumeObject(LV.getType(), Object);
2206 return RValue::get(Object);
2207 }
2208
2209 if (LV.isSimple()) {
2210 assert(!LV.getType()->isFunctionType());
2211
2212 if (LV.getType()->isConstantMatrixType())
2213 return EmitLoadOfMatrixLValue(LV, Loc, *this);
2214
2215 // Everything needs a load.
2216 return RValue::get(EmitLoadOfScalar(LV, Loc));
2217 }
2218
2219 if (LV.isVectorElt()) {
2220 llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddress(),
2221 LV.isVolatileQualified());
2222 return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(),
2223 "vecext"));
2224 }
2225
2226 // If this is a reference to a subset of the elements of a vector, either
2227 // shuffle the input or extract/insert them as appropriate.
2228 if (LV.isExtVectorElt()) {
2229 return EmitLoadOfExtVectorElementLValue(LV);
2230 }
2231
2232 // Global Register variables always invoke intrinsics
2233 if (LV.isGlobalReg())
2234 return EmitLoadOfGlobalRegLValue(LV);
2235
2236 if (LV.isMatrixElt()) {
2237 llvm::Value *Idx = LV.getMatrixIdx();
2238 if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
2239 const auto *const MatTy = LV.getType()->castAs<ConstantMatrixType>();
2240 llvm::MatrixBuilder MB(Builder);
2241 MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened());
2242 }
2243 llvm::LoadInst *Load =
2244 Builder.CreateLoad(LV.getMatrixAddress(), LV.isVolatileQualified());
2245 return RValue::get(Builder.CreateExtractElement(Load, Idx, "matrixext"));
2246 }
2247
2248 assert(LV.isBitField() && "Unknown LValue type!");
2249 return EmitLoadOfBitfieldLValue(LV, Loc);
2250 }
2251
EmitLoadOfBitfieldLValue(LValue LV,SourceLocation Loc)2252 RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV,
2253 SourceLocation Loc) {
2254 const CGBitFieldInfo &Info = LV.getBitFieldInfo();
2255
2256 // Get the output type.
2257 llvm::Type *ResLTy = ConvertType(LV.getType());
2258
2259 Address Ptr = LV.getBitFieldAddress();
2260 llvm::Value *Val =
2261 Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "bf.load");
2262
2263 bool UseVolatile = LV.isVolatileQualified() &&
2264 Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());
2265 const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
2266 const unsigned StorageSize =
2267 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
2268 if (Info.IsSigned) {
2269 assert(static_cast<unsigned>(Offset + Info.Size) <= StorageSize);
2270 unsigned HighBits = StorageSize - Offset - Info.Size;
2271 if (HighBits)
2272 Val = Builder.CreateShl(Val, HighBits, "bf.shl");
2273 if (Offset + HighBits)
2274 Val = Builder.CreateAShr(Val, Offset + HighBits, "bf.ashr");
2275 } else {
2276 if (Offset)
2277 Val = Builder.CreateLShr(Val, Offset, "bf.lshr");
2278 if (static_cast<unsigned>(Offset) + Info.Size < StorageSize)
2279 Val = Builder.CreateAnd(
2280 Val, llvm::APInt::getLowBitsSet(StorageSize, Info.Size), "bf.clear");
2281 }
2282 Val = Builder.CreateIntCast(Val, ResLTy, Info.IsSigned, "bf.cast");
2283 EmitScalarRangeCheck(Val, LV.getType(), Loc);
2284 return RValue::get(Val);
2285 }
2286
2287 // If this is a reference to a subset of the elements of a vector, create an
2288 // appropriate shufflevector.
EmitLoadOfExtVectorElementLValue(LValue LV)2289 RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
2290 llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddress(),
2291 LV.isVolatileQualified());
2292
2293 // HLSL allows treating scalars as one-element vectors. Converting the scalar
2294 // IR value to a vector here allows the rest of codegen to behave as normal.
2295 if (getLangOpts().HLSL && !Vec->getType()->isVectorTy()) {
2296 llvm::Type *DstTy = llvm::FixedVectorType::get(Vec->getType(), 1);
2297 llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty);
2298 Vec = Builder.CreateInsertElement(DstTy, Vec, Zero, "cast.splat");
2299 }
2300
2301 const llvm::Constant *Elts = LV.getExtVectorElts();
2302
2303 // If the result of the expression is a non-vector type, we must be extracting
2304 // a single element. Just codegen as an extractelement.
2305 const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
2306 if (!ExprVT) {
2307 unsigned InIdx = getAccessedFieldNo(0, Elts);
2308 llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);
2309 return RValue::get(Builder.CreateExtractElement(Vec, Elt));
2310 }
2311
2312 // Always use shuffle vector to try to retain the original program structure
2313 unsigned NumResultElts = ExprVT->getNumElements();
2314
2315 SmallVector<int, 4> Mask;
2316 for (unsigned i = 0; i != NumResultElts; ++i)
2317 Mask.push_back(getAccessedFieldNo(i, Elts));
2318
2319 Vec = Builder.CreateShuffleVector(Vec, Mask);
2320 return RValue::get(Vec);
2321 }
2322
2323 /// Generates lvalue for partial ext_vector access.
EmitExtVectorElementLValue(LValue LV)2324 Address CodeGenFunction::EmitExtVectorElementLValue(LValue LV) {
2325 Address VectorAddress = LV.getExtVectorAddress();
2326 QualType EQT = LV.getType()->castAs<VectorType>()->getElementType();
2327 llvm::Type *VectorElementTy = CGM.getTypes().ConvertType(EQT);
2328
2329 Address CastToPointerElement = VectorAddress.withElementType(VectorElementTy);
2330
2331 const llvm::Constant *Elts = LV.getExtVectorElts();
2332 unsigned ix = getAccessedFieldNo(0, Elts);
2333
2334 Address VectorBasePtrPlusIx =
2335 Builder.CreateConstInBoundsGEP(CastToPointerElement, ix,
2336 "vector.elt");
2337
2338 return VectorBasePtrPlusIx;
2339 }
2340
2341 /// Load of global gamed gegisters are always calls to intrinsics.
EmitLoadOfGlobalRegLValue(LValue LV)2342 RValue CodeGenFunction::EmitLoadOfGlobalRegLValue(LValue LV) {
2343 assert((LV.getType()->isIntegerType() || LV.getType()->isPointerType()) &&
2344 "Bad type for register variable");
2345 llvm::MDNode *RegName = cast<llvm::MDNode>(
2346 cast<llvm::MetadataAsValue>(LV.getGlobalReg())->getMetadata());
2347
2348 // We accept integer and pointer types only
2349 llvm::Type *OrigTy = CGM.getTypes().ConvertType(LV.getType());
2350 llvm::Type *Ty = OrigTy;
2351 if (OrigTy->isPointerTy())
2352 Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
2353 llvm::Type *Types[] = { Ty };
2354
2355 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
2356 llvm::Value *Call = Builder.CreateCall(
2357 F, llvm::MetadataAsValue::get(Ty->getContext(), RegName));
2358 if (OrigTy->isPointerTy())
2359 Call = Builder.CreateIntToPtr(Call, OrigTy);
2360 return RValue::get(Call);
2361 }
2362
2363 /// EmitStoreThroughLValue - Store the specified rvalue into the specified
2364 /// lvalue, where both are guaranteed to the have the same type, and that type
2365 /// is 'Ty'.
EmitStoreThroughLValue(RValue Src,LValue Dst,bool isInit)2366 void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
2367 bool isInit) {
2368 if (!Dst.isSimple()) {
2369 if (Dst.isVectorElt()) {
2370 // Read/modify/write the vector, inserting the new element.
2371 llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddress(),
2372 Dst.isVolatileQualified());
2373 auto *IRStoreTy = dyn_cast<llvm::IntegerType>(Vec->getType());
2374 if (IRStoreTy) {
2375 auto *IRVecTy = llvm::FixedVectorType::get(
2376 Builder.getInt1Ty(), IRStoreTy->getPrimitiveSizeInBits());
2377 Vec = Builder.CreateBitCast(Vec, IRVecTy);
2378 // iN --> <N x i1>.
2379 }
2380 Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(),
2381 Dst.getVectorIdx(), "vecins");
2382 if (IRStoreTy) {
2383 // <N x i1> --> <iN>.
2384 Vec = Builder.CreateBitCast(Vec, IRStoreTy);
2385 }
2386 Builder.CreateStore(Vec, Dst.getVectorAddress(),
2387 Dst.isVolatileQualified());
2388 return;
2389 }
2390
2391 // If this is an update of extended vector elements, insert them as
2392 // appropriate.
2393 if (Dst.isExtVectorElt())
2394 return EmitStoreThroughExtVectorComponentLValue(Src, Dst);
2395
2396 if (Dst.isGlobalReg())
2397 return EmitStoreThroughGlobalRegLValue(Src, Dst);
2398
2399 if (Dst.isMatrixElt()) {
2400 llvm::Value *Idx = Dst.getMatrixIdx();
2401 if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
2402 const auto *const MatTy = Dst.getType()->castAs<ConstantMatrixType>();
2403 llvm::MatrixBuilder MB(Builder);
2404 MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened());
2405 }
2406 llvm::Instruction *Load = Builder.CreateLoad(Dst.getMatrixAddress());
2407 llvm::Value *Vec =
2408 Builder.CreateInsertElement(Load, Src.getScalarVal(), Idx, "matins");
2409 Builder.CreateStore(Vec, Dst.getMatrixAddress(),
2410 Dst.isVolatileQualified());
2411 return;
2412 }
2413
2414 assert(Dst.isBitField() && "Unknown LValue type");
2415 return EmitStoreThroughBitfieldLValue(Src, Dst);
2416 }
2417
2418 // There's special magic for assigning into an ARC-qualified l-value.
2419 if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) {
2420 switch (Lifetime) {
2421 case Qualifiers::OCL_None:
2422 llvm_unreachable("present but none");
2423
2424 case Qualifiers::OCL_ExplicitNone:
2425 // nothing special
2426 break;
2427
2428 case Qualifiers::OCL_Strong:
2429 if (isInit) {
2430 Src = RValue::get(EmitARCRetain(Dst.getType(), Src.getScalarVal()));
2431 break;
2432 }
2433 EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true);
2434 return;
2435
2436 case Qualifiers::OCL_Weak:
2437 if (isInit)
2438 // Initialize and then skip the primitive store.
2439 EmitARCInitWeak(Dst.getAddress(), Src.getScalarVal());
2440 else
2441 EmitARCStoreWeak(Dst.getAddress(), Src.getScalarVal(),
2442 /*ignore*/ true);
2443 return;
2444
2445 case Qualifiers::OCL_Autoreleasing:
2446 Src = RValue::get(EmitObjCExtendObjectLifetime(Dst.getType(),
2447 Src.getScalarVal()));
2448 // fall into the normal path
2449 break;
2450 }
2451 }
2452
2453 if (Dst.isObjCWeak() && !Dst.isNonGC()) {
2454 // load of a __weak object.
2455 Address LvalueDst = Dst.getAddress();
2456 llvm::Value *src = Src.getScalarVal();
2457 CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);
2458 return;
2459 }
2460
2461 if (Dst.isObjCStrong() && !Dst.isNonGC()) {
2462 // load of a __strong object.
2463 Address LvalueDst = Dst.getAddress();
2464 llvm::Value *src = Src.getScalarVal();
2465 if (Dst.isObjCIvar()) {
2466 assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
2467 llvm::Type *ResultType = IntPtrTy;
2468 Address dst = EmitPointerWithAlignment(Dst.getBaseIvarExp());
2469 llvm::Value *RHS = dst.emitRawPointer(*this);
2470 RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
2471 llvm::Value *LHS = Builder.CreatePtrToInt(LvalueDst.emitRawPointer(*this),
2472 ResultType, "sub.ptr.lhs.cast");
2473 llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");
2474 CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst, BytesBetween);
2475 } else if (Dst.isGlobalObjCRef()) {
2476 CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst,
2477 Dst.isThreadLocalRef());
2478 }
2479 else
2480 CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst);
2481 return;
2482 }
2483
2484 assert(Src.isScalar() && "Can't emit an agg store with this method");
2485 EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit);
2486 }
2487
EmitStoreThroughBitfieldLValue(RValue Src,LValue Dst,llvm::Value ** Result)2488 void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
2489 llvm::Value **Result) {
2490 const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
2491 llvm::Type *ResLTy = convertTypeForLoadStore(Dst.getType());
2492 Address Ptr = Dst.getBitFieldAddress();
2493
2494 // Get the source value, truncated to the width of the bit-field.
2495 llvm::Value *SrcVal = Src.getScalarVal();
2496
2497 // Cast the source to the storage type and shift it into place.
2498 SrcVal = Builder.CreateIntCast(SrcVal, Ptr.getElementType(),
2499 /*isSigned=*/false);
2500 llvm::Value *MaskedVal = SrcVal;
2501
2502 const bool UseVolatile =
2503 CGM.getCodeGenOpts().AAPCSBitfieldWidth && Dst.isVolatileQualified() &&
2504 Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());
2505 const unsigned StorageSize =
2506 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
2507 const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
2508 // See if there are other bits in the bitfield's storage we'll need to load
2509 // and mask together with source before storing.
2510 if (StorageSize != Info.Size) {
2511 assert(StorageSize > Info.Size && "Invalid bitfield size.");
2512 llvm::Value *Val =
2513 Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), "bf.load");
2514
2515 // Mask the source value as needed.
2516 if (!hasBooleanRepresentation(Dst.getType()))
2517 SrcVal = Builder.CreateAnd(
2518 SrcVal, llvm::APInt::getLowBitsSet(StorageSize, Info.Size),
2519 "bf.value");
2520 MaskedVal = SrcVal;
2521 if (Offset)
2522 SrcVal = Builder.CreateShl(SrcVal, Offset, "bf.shl");
2523
2524 // Mask out the original value.
2525 Val = Builder.CreateAnd(
2526 Val, ~llvm::APInt::getBitsSet(StorageSize, Offset, Offset + Info.Size),
2527 "bf.clear");
2528
2529 // Or together the unchanged values and the source value.
2530 SrcVal = Builder.CreateOr(Val, SrcVal, "bf.set");
2531 } else {
2532 assert(Offset == 0);
2533 // According to the AACPS:
2534 // When a volatile bit-field is written, and its container does not overlap
2535 // with any non-bit-field member, its container must be read exactly once
2536 // and written exactly once using the access width appropriate to the type
2537 // of the container. The two accesses are not atomic.
2538 if (Dst.isVolatileQualified() && isAAPCS(CGM.getTarget()) &&
2539 CGM.getCodeGenOpts().ForceAAPCSBitfieldLoad)
2540 Builder.CreateLoad(Ptr, true, "bf.load");
2541 }
2542
2543 // Write the new value back out.
2544 Builder.CreateStore(SrcVal, Ptr, Dst.isVolatileQualified());
2545
2546 // Return the new value of the bit-field, if requested.
2547 if (Result) {
2548 llvm::Value *ResultVal = MaskedVal;
2549
2550 // Sign extend the value if needed.
2551 if (Info.IsSigned) {
2552 assert(Info.Size <= StorageSize);
2553 unsigned HighBits = StorageSize - Info.Size;
2554 if (HighBits) {
2555 ResultVal = Builder.CreateShl(ResultVal, HighBits, "bf.result.shl");
2556 ResultVal = Builder.CreateAShr(ResultVal, HighBits, "bf.result.ashr");
2557 }
2558 }
2559
2560 ResultVal = Builder.CreateIntCast(ResultVal, ResLTy, Info.IsSigned,
2561 "bf.result.cast");
2562 *Result = EmitFromMemory(ResultVal, Dst.getType());
2563 }
2564 }
2565
EmitStoreThroughExtVectorComponentLValue(RValue Src,LValue Dst)2566 void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
2567 LValue Dst) {
2568 // HLSL allows storing to scalar values through ExtVector component LValues.
2569 // To support this we need to handle the case where the destination address is
2570 // a scalar.
2571 Address DstAddr = Dst.getExtVectorAddress();
2572 if (!DstAddr.getElementType()->isVectorTy()) {
2573 assert(!Dst.getType()->isVectorType() &&
2574 "this should only occur for non-vector l-values");
2575 Builder.CreateStore(Src.getScalarVal(), DstAddr, Dst.isVolatileQualified());
2576 return;
2577 }
2578
2579 // This access turns into a read/modify/write of the vector. Load the input
2580 // value now.
2581 llvm::Value *Vec = Builder.CreateLoad(DstAddr, Dst.isVolatileQualified());
2582 const llvm::Constant *Elts = Dst.getExtVectorElts();
2583
2584 llvm::Value *SrcVal = Src.getScalarVal();
2585
2586 if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) {
2587 unsigned NumSrcElts = VTy->getNumElements();
2588 unsigned NumDstElts =
2589 cast<llvm::FixedVectorType>(Vec->getType())->getNumElements();
2590 if (NumDstElts == NumSrcElts) {
2591 // Use shuffle vector is the src and destination are the same number of
2592 // elements and restore the vector mask since it is on the side it will be
2593 // stored.
2594 SmallVector<int, 4> Mask(NumDstElts);
2595 for (unsigned i = 0; i != NumSrcElts; ++i)
2596 Mask[getAccessedFieldNo(i, Elts)] = i;
2597
2598 Vec = Builder.CreateShuffleVector(SrcVal, Mask);
2599 } else if (NumDstElts > NumSrcElts) {
2600 // Extended the source vector to the same length and then shuffle it
2601 // into the destination.
2602 // FIXME: since we're shuffling with undef, can we just use the indices
2603 // into that? This could be simpler.
2604 SmallVector<int, 4> ExtMask;
2605 for (unsigned i = 0; i != NumSrcElts; ++i)
2606 ExtMask.push_back(i);
2607 ExtMask.resize(NumDstElts, -1);
2608 llvm::Value *ExtSrcVal = Builder.CreateShuffleVector(SrcVal, ExtMask);
2609 // build identity
2610 SmallVector<int, 4> Mask;
2611 for (unsigned i = 0; i != NumDstElts; ++i)
2612 Mask.push_back(i);
2613
2614 // When the vector size is odd and .odd or .hi is used, the last element
2615 // of the Elts constant array will be one past the size of the vector.
2616 // Ignore the last element here, if it is greater than the mask size.
2617 if (getAccessedFieldNo(NumSrcElts - 1, Elts) == Mask.size())
2618 NumSrcElts--;
2619
2620 // modify when what gets shuffled in
2621 for (unsigned i = 0; i != NumSrcElts; ++i)
2622 Mask[getAccessedFieldNo(i, Elts)] = i + NumDstElts;
2623 Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, Mask);
2624 } else {
2625 // We should never shorten the vector
2626 llvm_unreachable("unexpected shorten vector length");
2627 }
2628 } else {
2629 // If the Src is a scalar (not a vector), and the target is a vector it must
2630 // be updating one element.
2631 unsigned InIdx = getAccessedFieldNo(0, Elts);
2632 llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);
2633 Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt);
2634 }
2635
2636 Builder.CreateStore(Vec, Dst.getExtVectorAddress(),
2637 Dst.isVolatileQualified());
2638 }
2639
2640 /// Store of global named registers are always calls to intrinsics.
EmitStoreThroughGlobalRegLValue(RValue Src,LValue Dst)2641 void CodeGenFunction::EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst) {
2642 assert((Dst.getType()->isIntegerType() || Dst.getType()->isPointerType()) &&
2643 "Bad type for register variable");
2644 llvm::MDNode *RegName = cast<llvm::MDNode>(
2645 cast<llvm::MetadataAsValue>(Dst.getGlobalReg())->getMetadata());
2646 assert(RegName && "Register LValue is not metadata");
2647
2648 // We accept integer and pointer types only
2649 llvm::Type *OrigTy = CGM.getTypes().ConvertType(Dst.getType());
2650 llvm::Type *Ty = OrigTy;
2651 if (OrigTy->isPointerTy())
2652 Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
2653 llvm::Type *Types[] = { Ty };
2654
2655 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
2656 llvm::Value *Value = Src.getScalarVal();
2657 if (OrigTy->isPointerTy())
2658 Value = Builder.CreatePtrToInt(Value, Ty);
2659 Builder.CreateCall(
2660 F, {llvm::MetadataAsValue::get(Ty->getContext(), RegName), Value});
2661 }
2662
2663 // setObjCGCLValueClass - sets class of the lvalue for the purpose of
2664 // generating write-barries API. It is currently a global, ivar,
2665 // or neither.
setObjCGCLValueClass(const ASTContext & Ctx,const Expr * E,LValue & LV,bool IsMemberAccess=false)2666 static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
2667 LValue &LV,
2668 bool IsMemberAccess=false) {
2669 if (Ctx.getLangOpts().getGC() == LangOptions::NonGC)
2670 return;
2671
2672 if (isa<ObjCIvarRefExpr>(E)) {
2673 QualType ExpTy = E->getType();
2674 if (IsMemberAccess && ExpTy->isPointerType()) {
2675 // If ivar is a structure pointer, assigning to field of
2676 // this struct follows gcc's behavior and makes it a non-ivar
2677 // writer-barrier conservatively.
2678 ExpTy = ExpTy->castAs<PointerType>()->getPointeeType();
2679 if (ExpTy->isRecordType()) {
2680 LV.setObjCIvar(false);
2681 return;
2682 }
2683 }
2684 LV.setObjCIvar(true);
2685 auto *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr *>(E));
2686 LV.setBaseIvarExp(Exp->getBase());
2687 LV.setObjCArray(E->getType()->isArrayType());
2688 return;
2689 }
2690
2691 if (const auto *Exp = dyn_cast<DeclRefExpr>(E)) {
2692 if (const auto *VD = dyn_cast<VarDecl>(Exp->getDecl())) {
2693 if (VD->hasGlobalStorage()) {
2694 LV.setGlobalObjCRef(true);
2695 LV.setThreadLocalRef(VD->getTLSKind() != VarDecl::TLS_None);
2696 }
2697 }
2698 LV.setObjCArray(E->getType()->isArrayType());
2699 return;
2700 }
2701
2702 if (const auto *Exp = dyn_cast<UnaryOperator>(E)) {
2703 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2704 return;
2705 }
2706
2707 if (const auto *Exp = dyn_cast<ParenExpr>(E)) {
2708 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2709 if (LV.isObjCIvar()) {
2710 // If cast is to a structure pointer, follow gcc's behavior and make it
2711 // a non-ivar write-barrier.
2712 QualType ExpTy = E->getType();
2713 if (ExpTy->isPointerType())
2714 ExpTy = ExpTy->castAs<PointerType>()->getPointeeType();
2715 if (ExpTy->isRecordType())
2716 LV.setObjCIvar(false);
2717 }
2718 return;
2719 }
2720
2721 if (const auto *Exp = dyn_cast<GenericSelectionExpr>(E)) {
2722 setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV);
2723 return;
2724 }
2725
2726 if (const auto *Exp = dyn_cast<ImplicitCastExpr>(E)) {
2727 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2728 return;
2729 }
2730
2731 if (const auto *Exp = dyn_cast<CStyleCastExpr>(E)) {
2732 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2733 return;
2734 }
2735
2736 if (const auto *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) {
2737 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2738 return;
2739 }
2740
2741 if (const auto *Exp = dyn_cast<ArraySubscriptExpr>(E)) {
2742 setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
2743 if (LV.isObjCIvar() && !LV.isObjCArray())
2744 // Using array syntax to assigning to what an ivar points to is not
2745 // same as assigning to the ivar itself. {id *Names;} Names[i] = 0;
2746 LV.setObjCIvar(false);
2747 else if (LV.isGlobalObjCRef() && !LV.isObjCArray())
2748 // Using array syntax to assigning to what global points to is not
2749 // same as assigning to the global itself. {id *G;} G[i] = 0;
2750 LV.setGlobalObjCRef(false);
2751 return;
2752 }
2753
2754 if (const auto *Exp = dyn_cast<MemberExpr>(E)) {
2755 setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true);
2756 // We don't know if member is an 'ivar', but this flag is looked at
2757 // only in the context of LV.isObjCIvar().
2758 LV.setObjCArray(E->getType()->isArrayType());
2759 return;
2760 }
2761 }
2762
EmitThreadPrivateVarDeclLValue(CodeGenFunction & CGF,const VarDecl * VD,QualType T,Address Addr,llvm::Type * RealVarTy,SourceLocation Loc)2763 static LValue EmitThreadPrivateVarDeclLValue(
2764 CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr,
2765 llvm::Type *RealVarTy, SourceLocation Loc) {
2766 if (CGF.CGM.getLangOpts().OpenMPIRBuilder)
2767 Addr = CodeGenFunction::OMPBuilderCBHelpers::getAddrOfThreadPrivate(
2768 CGF, VD, Addr, Loc);
2769 else
2770 Addr =
2771 CGF.CGM.getOpenMPRuntime().getAddrOfThreadPrivate(CGF, VD, Addr, Loc);
2772
2773 Addr = Addr.withElementType(RealVarTy);
2774 return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
2775 }
2776
emitDeclTargetVarDeclLValue(CodeGenFunction & CGF,const VarDecl * VD,QualType T)2777 static Address emitDeclTargetVarDeclLValue(CodeGenFunction &CGF,
2778 const VarDecl *VD, QualType T) {
2779 std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
2780 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
2781 // Return an invalid address if variable is MT_To (or MT_Enter starting with
2782 // OpenMP 5.2) and unified memory is not enabled. For all other cases: MT_Link
2783 // and MT_To (or MT_Enter) with unified memory, return a valid address.
2784 if (!Res || ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
2785 *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
2786 !CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory()))
2787 return Address::invalid();
2788 assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||
2789 ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
2790 *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
2791 CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())) &&
2792 "Expected link clause OR to clause with unified memory enabled.");
2793 QualType PtrTy = CGF.getContext().getPointerType(VD->getType());
2794 Address Addr = CGF.CGM.getOpenMPRuntime().getAddrOfDeclareTargetVar(VD);
2795 return CGF.EmitLoadOfPointer(Addr, PtrTy->castAs<PointerType>());
2796 }
2797
2798 Address
EmitLoadOfReference(LValue RefLVal,LValueBaseInfo * PointeeBaseInfo,TBAAAccessInfo * PointeeTBAAInfo)2799 CodeGenFunction::EmitLoadOfReference(LValue RefLVal,
2800 LValueBaseInfo *PointeeBaseInfo,
2801 TBAAAccessInfo *PointeeTBAAInfo) {
2802 llvm::LoadInst *Load =
2803 Builder.CreateLoad(RefLVal.getAddress(), RefLVal.isVolatile());
2804 CGM.DecorateInstructionWithTBAA(Load, RefLVal.getTBAAInfo());
2805 return makeNaturalAddressForPointer(Load, RefLVal.getType()->getPointeeType(),
2806 CharUnits(), /*ForPointeeType=*/true,
2807 PointeeBaseInfo, PointeeTBAAInfo);
2808 }
2809
EmitLoadOfReferenceLValue(LValue RefLVal)2810 LValue CodeGenFunction::EmitLoadOfReferenceLValue(LValue RefLVal) {
2811 LValueBaseInfo PointeeBaseInfo;
2812 TBAAAccessInfo PointeeTBAAInfo;
2813 Address PointeeAddr = EmitLoadOfReference(RefLVal, &PointeeBaseInfo,
2814 &PointeeTBAAInfo);
2815 return MakeAddrLValue(PointeeAddr, RefLVal.getType()->getPointeeType(),
2816 PointeeBaseInfo, PointeeTBAAInfo);
2817 }
2818
EmitLoadOfPointer(Address Ptr,const PointerType * PtrTy,LValueBaseInfo * BaseInfo,TBAAAccessInfo * TBAAInfo)2819 Address CodeGenFunction::EmitLoadOfPointer(Address Ptr,
2820 const PointerType *PtrTy,
2821 LValueBaseInfo *BaseInfo,
2822 TBAAAccessInfo *TBAAInfo) {
2823 llvm::Value *Addr = Builder.CreateLoad(Ptr);
2824 return makeNaturalAddressForPointer(Addr, PtrTy->getPointeeType(),
2825 CharUnits(), /*ForPointeeType=*/true,
2826 BaseInfo, TBAAInfo);
2827 }
2828
EmitLoadOfPointerLValue(Address PtrAddr,const PointerType * PtrTy)2829 LValue CodeGenFunction::EmitLoadOfPointerLValue(Address PtrAddr,
2830 const PointerType *PtrTy) {
2831 LValueBaseInfo BaseInfo;
2832 TBAAAccessInfo TBAAInfo;
2833 Address Addr = EmitLoadOfPointer(PtrAddr, PtrTy, &BaseInfo, &TBAAInfo);
2834 return MakeAddrLValue(Addr, PtrTy->getPointeeType(), BaseInfo, TBAAInfo);
2835 }
2836
EmitGlobalVarDeclLValue(CodeGenFunction & CGF,const Expr * E,const VarDecl * VD)2837 static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
2838 const Expr *E, const VarDecl *VD) {
2839 QualType T = E->getType();
2840
2841 // If it's thread_local, emit a call to its wrapper function instead.
2842 if (VD->getTLSKind() == VarDecl::TLS_Dynamic &&
2843 CGF.CGM.getCXXABI().usesThreadWrapperFunction(VD))
2844 return CGF.CGM.getCXXABI().EmitThreadLocalVarDeclLValue(CGF, VD, T);
2845 // Check if the variable is marked as declare target with link clause in
2846 // device codegen.
2847 if (CGF.getLangOpts().OpenMPIsTargetDevice) {
2848 Address Addr = emitDeclTargetVarDeclLValue(CGF, VD, T);
2849 if (Addr.isValid())
2850 return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
2851 }
2852
2853 llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
2854
2855 if (VD->getTLSKind() != VarDecl::TLS_None)
2856 V = CGF.Builder.CreateThreadLocalAddress(V);
2857
2858 llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType());
2859 CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
2860 Address Addr(V, RealVarTy, Alignment);
2861 // Emit reference to the private copy of the variable if it is an OpenMP
2862 // threadprivate variable.
2863 if (CGF.getLangOpts().OpenMP && !CGF.getLangOpts().OpenMPSimd &&
2864 VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
2865 return EmitThreadPrivateVarDeclLValue(CGF, VD, T, Addr, RealVarTy,
2866 E->getExprLoc());
2867 }
2868 LValue LV = VD->getType()->isReferenceType() ?
2869 CGF.EmitLoadOfReferenceLValue(Addr, VD->getType(),
2870 AlignmentSource::Decl) :
2871 CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
2872 setObjCGCLValueClass(CGF.getContext(), E, LV);
2873 return LV;
2874 }
2875
getRawFunctionPointer(GlobalDecl GD,llvm::Type * Ty)2876 llvm::Constant *CodeGenModule::getRawFunctionPointer(GlobalDecl GD,
2877 llvm::Type *Ty) {
2878 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
2879 if (FD->hasAttr<WeakRefAttr>()) {
2880 ConstantAddress aliasee = GetWeakRefReference(FD);
2881 return aliasee.getPointer();
2882 }
2883
2884 llvm::Constant *V = GetAddrOfFunction(GD, Ty);
2885 return V;
2886 }
2887
EmitFunctionDeclLValue(CodeGenFunction & CGF,const Expr * E,GlobalDecl GD)2888 static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, const Expr *E,
2889 GlobalDecl GD) {
2890 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
2891 llvm::Constant *V = CGF.CGM.getFunctionPointer(GD);
2892 CharUnits Alignment = CGF.getContext().getDeclAlign(FD);
2893 return CGF.MakeAddrLValue(V, E->getType(), Alignment,
2894 AlignmentSource::Decl);
2895 }
2896
EmitCapturedFieldLValue(CodeGenFunction & CGF,const FieldDecl * FD,llvm::Value * ThisValue)2897 static LValue EmitCapturedFieldLValue(CodeGenFunction &CGF, const FieldDecl *FD,
2898 llvm::Value *ThisValue) {
2899
2900 return CGF.EmitLValueForLambdaField(FD, ThisValue);
2901 }
2902
2903 /// Named Registers are named metadata pointing to the register name
2904 /// which will be read from/written to as an argument to the intrinsic
2905 /// @llvm.read/write_register.
2906 /// So far, only the name is being passed down, but other options such as
2907 /// register type, allocation type or even optimization options could be
2908 /// passed down via the metadata node.
EmitGlobalNamedRegister(const VarDecl * VD,CodeGenModule & CGM)2909 static LValue EmitGlobalNamedRegister(const VarDecl *VD, CodeGenModule &CGM) {
2910 SmallString<64> Name("llvm.named.register.");
2911 AsmLabelAttr *Asm = VD->getAttr<AsmLabelAttr>();
2912 assert(Asm->getLabel().size() < 64-Name.size() &&
2913 "Register name too big");
2914 Name.append(Asm->getLabel());
2915 llvm::NamedMDNode *M =
2916 CGM.getModule().getOrInsertNamedMetadata(Name);
2917 if (M->getNumOperands() == 0) {
2918 llvm::MDString *Str = llvm::MDString::get(CGM.getLLVMContext(),
2919 Asm->getLabel());
2920 llvm::Metadata *Ops[] = {Str};
2921 M->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops));
2922 }
2923
2924 CharUnits Alignment = CGM.getContext().getDeclAlign(VD);
2925
2926 llvm::Value *Ptr =
2927 llvm::MetadataAsValue::get(CGM.getLLVMContext(), M->getOperand(0));
2928 return LValue::MakeGlobalReg(Ptr, Alignment, VD->getType());
2929 }
2930
2931 /// Determine whether we can emit a reference to \p VD from the current
2932 /// context, despite not necessarily having seen an odr-use of the variable in
2933 /// this context.
canEmitSpuriousReferenceToVariable(CodeGenFunction & CGF,const DeclRefExpr * E,const VarDecl * VD)2934 static bool canEmitSpuriousReferenceToVariable(CodeGenFunction &CGF,
2935 const DeclRefExpr *E,
2936 const VarDecl *VD) {
2937 // For a variable declared in an enclosing scope, do not emit a spurious
2938 // reference even if we have a capture, as that will emit an unwarranted
2939 // reference to our capture state, and will likely generate worse code than
2940 // emitting a local copy.
2941 if (E->refersToEnclosingVariableOrCapture())
2942 return false;
2943
2944 // For a local declaration declared in this function, we can always reference
2945 // it even if we don't have an odr-use.
2946 if (VD->hasLocalStorage()) {
2947 return VD->getDeclContext() ==
2948 dyn_cast_or_null<DeclContext>(CGF.CurCodeDecl);
2949 }
2950
2951 // For a global declaration, we can emit a reference to it if we know
2952 // for sure that we are able to emit a definition of it.
2953 VD = VD->getDefinition(CGF.getContext());
2954 if (!VD)
2955 return false;
2956
2957 // Don't emit a spurious reference if it might be to a variable that only
2958 // exists on a different device / target.
2959 // FIXME: This is unnecessarily broad. Check whether this would actually be a
2960 // cross-target reference.
2961 if (CGF.getLangOpts().OpenMP || CGF.getLangOpts().CUDA ||
2962 CGF.getLangOpts().OpenCL) {
2963 return false;
2964 }
2965
2966 // We can emit a spurious reference only if the linkage implies that we'll
2967 // be emitting a non-interposable symbol that will be retained until link
2968 // time.
2969 switch (CGF.CGM.getLLVMLinkageVarDefinition(VD)) {
2970 case llvm::GlobalValue::ExternalLinkage:
2971 case llvm::GlobalValue::LinkOnceODRLinkage:
2972 case llvm::GlobalValue::WeakODRLinkage:
2973 case llvm::GlobalValue::InternalLinkage:
2974 case llvm::GlobalValue::PrivateLinkage:
2975 return true;
2976 default:
2977 return false;
2978 }
2979 }
2980
EmitDeclRefLValue(const DeclRefExpr * E)2981 LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
2982 const NamedDecl *ND = E->getDecl();
2983 QualType T = E->getType();
2984
2985 assert(E->isNonOdrUse() != NOUR_Unevaluated &&
2986 "should not emit an unevaluated operand");
2987
2988 if (const auto *VD = dyn_cast<VarDecl>(ND)) {
2989 // Global Named registers access via intrinsics only
2990 if (VD->getStorageClass() == SC_Register &&
2991 VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl())
2992 return EmitGlobalNamedRegister(VD, CGM);
2993
2994 // If this DeclRefExpr does not constitute an odr-use of the variable,
2995 // we're not permitted to emit a reference to it in general, and it might
2996 // not be captured if capture would be necessary for a use. Emit the
2997 // constant value directly instead.
2998 if (E->isNonOdrUse() == NOUR_Constant &&
2999 (VD->getType()->isReferenceType() ||
3000 !canEmitSpuriousReferenceToVariable(*this, E, VD))) {
3001 VD->getAnyInitializer(VD);
3002 llvm::Constant *Val = ConstantEmitter(*this).emitAbstract(
3003 E->getLocation(), *VD->evaluateValue(), VD->getType());
3004 assert(Val && "failed to emit constant expression");
3005
3006 Address Addr = Address::invalid();
3007 if (!VD->getType()->isReferenceType()) {
3008 // Spill the constant value to a global.
3009 Addr = CGM.createUnnamedGlobalFrom(*VD, Val,
3010 getContext().getDeclAlign(VD));
3011 llvm::Type *VarTy = getTypes().ConvertTypeForMem(VD->getType());
3012 auto *PTy = llvm::PointerType::get(
3013 VarTy, getTypes().getTargetAddressSpace(VD->getType()));
3014 Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, PTy, VarTy);
3015 } else {
3016 // Should we be using the alignment of the constant pointer we emitted?
3017 CharUnits Alignment =
3018 CGM.getNaturalTypeAlignment(E->getType(),
3019 /* BaseInfo= */ nullptr,
3020 /* TBAAInfo= */ nullptr,
3021 /* forPointeeType= */ true);
3022 Addr = makeNaturalAddressForPointer(Val, T, Alignment);
3023 }
3024 return MakeAddrLValue(Addr, T, AlignmentSource::Decl);
3025 }
3026
3027 // FIXME: Handle other kinds of non-odr-use DeclRefExprs.
3028
3029 // Check for captured variables.
3030 if (E->refersToEnclosingVariableOrCapture()) {
3031 VD = VD->getCanonicalDecl();
3032 if (auto *FD = LambdaCaptureFields.lookup(VD))
3033 return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
3034 if (CapturedStmtInfo) {
3035 auto I = LocalDeclMap.find(VD);
3036 if (I != LocalDeclMap.end()) {
3037 LValue CapLVal;
3038 if (VD->getType()->isReferenceType())
3039 CapLVal = EmitLoadOfReferenceLValue(I->second, VD->getType(),
3040 AlignmentSource::Decl);
3041 else
3042 CapLVal = MakeAddrLValue(I->second, T);
3043 // Mark lvalue as nontemporal if the variable is marked as nontemporal
3044 // in simd context.
3045 if (getLangOpts().OpenMP &&
3046 CGM.getOpenMPRuntime().isNontemporalDecl(VD))
3047 CapLVal.setNontemporal(/*Value=*/true);
3048 return CapLVal;
3049 }
3050 LValue CapLVal =
3051 EmitCapturedFieldLValue(*this, CapturedStmtInfo->lookup(VD),
3052 CapturedStmtInfo->getContextValue());
3053 Address LValueAddress = CapLVal.getAddress();
3054 CapLVal = MakeAddrLValue(Address(LValueAddress.emitRawPointer(*this),
3055 LValueAddress.getElementType(),
3056 getContext().getDeclAlign(VD)),
3057 CapLVal.getType(),
3058 LValueBaseInfo(AlignmentSource::Decl),
3059 CapLVal.getTBAAInfo());
3060 // Mark lvalue as nontemporal if the variable is marked as nontemporal
3061 // in simd context.
3062 if (getLangOpts().OpenMP &&
3063 CGM.getOpenMPRuntime().isNontemporalDecl(VD))
3064 CapLVal.setNontemporal(/*Value=*/true);
3065 return CapLVal;
3066 }
3067
3068 assert(isa<BlockDecl>(CurCodeDecl));
3069 Address addr = GetAddrOfBlockDecl(VD);
3070 return MakeAddrLValue(addr, T, AlignmentSource::Decl);
3071 }
3072 }
3073
3074 // FIXME: We should be able to assert this for FunctionDecls as well!
3075 // FIXME: We should be able to assert this for all DeclRefExprs, not just
3076 // those with a valid source location.
3077 assert((ND->isUsed(false) || !isa<VarDecl>(ND) || E->isNonOdrUse() ||
3078 !E->getLocation().isValid()) &&
3079 "Should not use decl without marking it used!");
3080
3081 if (ND->hasAttr<WeakRefAttr>()) {
3082 const auto *VD = cast<ValueDecl>(ND);
3083 ConstantAddress Aliasee = CGM.GetWeakRefReference(VD);
3084 return MakeAddrLValue(Aliasee, T, AlignmentSource::Decl);
3085 }
3086
3087 if (const auto *VD = dyn_cast<VarDecl>(ND)) {
3088 // Check if this is a global variable.
3089 if (VD->hasLinkage() || VD->isStaticDataMember())
3090 return EmitGlobalVarDeclLValue(*this, E, VD);
3091
3092 Address addr = Address::invalid();
3093
3094 // The variable should generally be present in the local decl map.
3095 auto iter = LocalDeclMap.find(VD);
3096 if (iter != LocalDeclMap.end()) {
3097 addr = iter->second;
3098
3099 // Otherwise, it might be static local we haven't emitted yet for
3100 // some reason; most likely, because it's in an outer function.
3101 } else if (VD->isStaticLocal()) {
3102 llvm::Constant *var = CGM.getOrCreateStaticVarDecl(
3103 *VD, CGM.getLLVMLinkageVarDefinition(VD));
3104 addr = Address(
3105 var, ConvertTypeForMem(VD->getType()), getContext().getDeclAlign(VD));
3106
3107 // No other cases for now.
3108 } else {
3109 llvm_unreachable("DeclRefExpr for Decl not entered in LocalDeclMap?");
3110 }
3111
3112 // Handle threadlocal function locals.
3113 if (VD->getTLSKind() != VarDecl::TLS_None)
3114 addr = addr.withPointer(
3115 Builder.CreateThreadLocalAddress(addr.getBasePointer()),
3116 NotKnownNonNull);
3117
3118 // Check for OpenMP threadprivate variables.
3119 if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd &&
3120 VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
3121 return EmitThreadPrivateVarDeclLValue(
3122 *this, VD, T, addr, getTypes().ConvertTypeForMem(VD->getType()),
3123 E->getExprLoc());
3124 }
3125
3126 // Drill into block byref variables.
3127 bool isBlockByref = VD->isEscapingByref();
3128 if (isBlockByref) {
3129 addr = emitBlockByrefAddress(addr, VD);
3130 }
3131
3132 // Drill into reference types.
3133 LValue LV = VD->getType()->isReferenceType() ?
3134 EmitLoadOfReferenceLValue(addr, VD->getType(), AlignmentSource::Decl) :
3135 MakeAddrLValue(addr, T, AlignmentSource::Decl);
3136
3137 bool isLocalStorage = VD->hasLocalStorage();
3138
3139 bool NonGCable = isLocalStorage &&
3140 !VD->getType()->isReferenceType() &&
3141 !isBlockByref;
3142 if (NonGCable) {
3143 LV.getQuals().removeObjCGCAttr();
3144 LV.setNonGC(true);
3145 }
3146
3147 bool isImpreciseLifetime =
3148 (isLocalStorage && !VD->hasAttr<ObjCPreciseLifetimeAttr>());
3149 if (isImpreciseLifetime)
3150 LV.setARCPreciseLifetime(ARCImpreciseLifetime);
3151 setObjCGCLValueClass(getContext(), E, LV);
3152 return LV;
3153 }
3154
3155 if (const auto *FD = dyn_cast<FunctionDecl>(ND))
3156 return EmitFunctionDeclLValue(*this, E, FD);
3157
3158 // FIXME: While we're emitting a binding from an enclosing scope, all other
3159 // DeclRefExprs we see should be implicitly treated as if they also refer to
3160 // an enclosing scope.
3161 if (const auto *BD = dyn_cast<BindingDecl>(ND)) {
3162 if (E->refersToEnclosingVariableOrCapture()) {
3163 auto *FD = LambdaCaptureFields.lookup(BD);
3164 return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
3165 }
3166 return EmitLValue(BD->getBinding());
3167 }
3168
3169 // We can form DeclRefExprs naming GUID declarations when reconstituting
3170 // non-type template parameters into expressions.
3171 if (const auto *GD = dyn_cast<MSGuidDecl>(ND))
3172 return MakeAddrLValue(CGM.GetAddrOfMSGuidDecl(GD), T,
3173 AlignmentSource::Decl);
3174
3175 if (const auto *TPO = dyn_cast<TemplateParamObjectDecl>(ND)) {
3176 auto ATPO = CGM.GetAddrOfTemplateParamObject(TPO);
3177 auto AS = getLangASFromTargetAS(ATPO.getAddressSpace());
3178
3179 if (AS != T.getAddressSpace()) {
3180 auto TargetAS = getContext().getTargetAddressSpace(T.getAddressSpace());
3181 auto PtrTy = ATPO.getElementType()->getPointerTo(TargetAS);
3182 auto ASC = getTargetHooks().performAddrSpaceCast(
3183 CGM, ATPO.getPointer(), AS, T.getAddressSpace(), PtrTy);
3184 ATPO = ConstantAddress(ASC, ATPO.getElementType(), ATPO.getAlignment());
3185 }
3186
3187 return MakeAddrLValue(ATPO, T, AlignmentSource::Decl);
3188 }
3189
3190 llvm_unreachable("Unhandled DeclRefExpr");
3191 }
3192
EmitUnaryOpLValue(const UnaryOperator * E)3193 LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
3194 // __extension__ doesn't affect lvalue-ness.
3195 if (E->getOpcode() == UO_Extension)
3196 return EmitLValue(E->getSubExpr());
3197
3198 QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType());
3199 switch (E->getOpcode()) {
3200 default: llvm_unreachable("Unknown unary operator lvalue!");
3201 case UO_Deref: {
3202 QualType T = E->getSubExpr()->getType()->getPointeeType();
3203 assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
3204
3205 LValueBaseInfo BaseInfo;
3206 TBAAAccessInfo TBAAInfo;
3207 Address Addr = EmitPointerWithAlignment(E->getSubExpr(), &BaseInfo,
3208 &TBAAInfo);
3209 LValue LV = MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo);
3210 LV.getQuals().setAddressSpace(ExprTy.getAddressSpace());
3211
3212 // We should not generate __weak write barrier on indirect reference
3213 // of a pointer to object; as in void foo (__weak id *param); *param = 0;
3214 // But, we continue to generate __strong write barrier on indirect write
3215 // into a pointer to object.
3216 if (getLangOpts().ObjC &&
3217 getLangOpts().getGC() != LangOptions::NonGC &&
3218 LV.isObjCWeak())
3219 LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
3220 return LV;
3221 }
3222 case UO_Real:
3223 case UO_Imag: {
3224 LValue LV = EmitLValue(E->getSubExpr());
3225 assert(LV.isSimple() && "real/imag on non-ordinary l-value");
3226
3227 // __real is valid on scalars. This is a faster way of testing that.
3228 // __imag can only produce an rvalue on scalars.
3229 if (E->getOpcode() == UO_Real &&
3230 !LV.getAddress().getElementType()->isStructTy()) {
3231 assert(E->getSubExpr()->getType()->isArithmeticType());
3232 return LV;
3233 }
3234
3235 QualType T = ExprTy->castAs<ComplexType>()->getElementType();
3236
3237 Address Component =
3238 (E->getOpcode() == UO_Real
3239 ? emitAddrOfRealComponent(LV.getAddress(), LV.getType())
3240 : emitAddrOfImagComponent(LV.getAddress(), LV.getType()));
3241 LValue ElemLV = MakeAddrLValue(Component, T, LV.getBaseInfo(),
3242 CGM.getTBAAInfoForSubobject(LV, T));
3243 ElemLV.getQuals().addQualifiers(LV.getQuals());
3244 return ElemLV;
3245 }
3246 case UO_PreInc:
3247 case UO_PreDec: {
3248 LValue LV = EmitLValue(E->getSubExpr());
3249 bool isInc = E->getOpcode() == UO_PreInc;
3250
3251 if (E->getType()->isAnyComplexType())
3252 EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/);
3253 else
3254 EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/);
3255 return LV;
3256 }
3257 }
3258 }
3259
EmitStringLiteralLValue(const StringLiteral * E)3260 LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) {
3261 return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(E),
3262 E->getType(), AlignmentSource::Decl);
3263 }
3264
EmitObjCEncodeExprLValue(const ObjCEncodeExpr * E)3265 LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) {
3266 return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E),
3267 E->getType(), AlignmentSource::Decl);
3268 }
3269
EmitPredefinedLValue(const PredefinedExpr * E)3270 LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
3271 auto SL = E->getFunctionName();
3272 assert(SL != nullptr && "No StringLiteral name in PredefinedExpr");
3273 StringRef FnName = CurFn->getName();
3274 if (FnName.starts_with("\01"))
3275 FnName = FnName.substr(1);
3276 StringRef NameItems[] = {
3277 PredefinedExpr::getIdentKindName(E->getIdentKind()), FnName};
3278 std::string GVName = llvm::join(NameItems, NameItems + 2, ".");
3279 if (auto *BD = dyn_cast_or_null<BlockDecl>(CurCodeDecl)) {
3280 std::string Name = std::string(SL->getString());
3281 if (!Name.empty()) {
3282 unsigned Discriminator =
3283 CGM.getCXXABI().getMangleContext().getBlockId(BD, true);
3284 if (Discriminator)
3285 Name += "_" + Twine(Discriminator + 1).str();
3286 auto C = CGM.GetAddrOfConstantCString(Name, GVName.c_str());
3287 return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl);
3288 } else {
3289 auto C =
3290 CGM.GetAddrOfConstantCString(std::string(FnName), GVName.c_str());
3291 return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl);
3292 }
3293 }
3294 auto C = CGM.GetAddrOfConstantStringFromLiteral(SL, GVName);
3295 return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl);
3296 }
3297
3298 /// Emit a type description suitable for use by a runtime sanitizer library. The
3299 /// format of a type descriptor is
3300 ///
3301 /// \code
3302 /// { i16 TypeKind, i16 TypeInfo }
3303 /// \endcode
3304 ///
3305 /// followed by an array of i8 containing the type name. TypeKind is 0 for an
3306 /// integer, 1 for a floating point value, and -1 for anything else.
EmitCheckTypeDescriptor(QualType T)3307 llvm::Constant *CodeGenFunction::EmitCheckTypeDescriptor(QualType T) {
3308 // Only emit each type's descriptor once.
3309 if (llvm::Constant *C = CGM.getTypeDescriptorFromMap(T))
3310 return C;
3311
3312 uint16_t TypeKind = -1;
3313 uint16_t TypeInfo = 0;
3314
3315 if (T->isIntegerType()) {
3316 TypeKind = 0;
3317 TypeInfo = (llvm::Log2_32(getContext().getTypeSize(T)) << 1) |
3318 (T->isSignedIntegerType() ? 1 : 0);
3319 } else if (T->isFloatingType()) {
3320 TypeKind = 1;
3321 TypeInfo = getContext().getTypeSize(T);
3322 }
3323
3324 // Format the type name as if for a diagnostic, including quotes and
3325 // optionally an 'aka'.
3326 SmallString<32> Buffer;
3327 CGM.getDiags().ConvertArgToString(
3328 DiagnosticsEngine::ak_qualtype, (intptr_t)T.getAsOpaquePtr(), StringRef(),
3329 StringRef(), std::nullopt, Buffer, std::nullopt);
3330
3331 llvm::Constant *Components[] = {
3332 Builder.getInt16(TypeKind), Builder.getInt16(TypeInfo),
3333 llvm::ConstantDataArray::getString(getLLVMContext(), Buffer)
3334 };
3335 llvm::Constant *Descriptor = llvm::ConstantStruct::getAnon(Components);
3336
3337 auto *GV = new llvm::GlobalVariable(
3338 CGM.getModule(), Descriptor->getType(),
3339 /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage, Descriptor);
3340 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3341 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(GV);
3342
3343 // Remember the descriptor for this type.
3344 CGM.setTypeDescriptorInMap(T, GV);
3345
3346 return GV;
3347 }
3348
EmitCheckValue(llvm::Value * V)3349 llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) {
3350 llvm::Type *TargetTy = IntPtrTy;
3351
3352 if (V->getType() == TargetTy)
3353 return V;
3354
3355 // Floating-point types which fit into intptr_t are bitcast to integers
3356 // and then passed directly (after zero-extension, if necessary).
3357 if (V->getType()->isFloatingPointTy()) {
3358 unsigned Bits = V->getType()->getPrimitiveSizeInBits().getFixedValue();
3359 if (Bits <= TargetTy->getIntegerBitWidth())
3360 V = Builder.CreateBitCast(V, llvm::Type::getIntNTy(getLLVMContext(),
3361 Bits));
3362 }
3363
3364 // Integers which fit in intptr_t are zero-extended and passed directly.
3365 if (V->getType()->isIntegerTy() &&
3366 V->getType()->getIntegerBitWidth() <= TargetTy->getIntegerBitWidth())
3367 return Builder.CreateZExt(V, TargetTy);
3368
3369 // Pointers are passed directly, everything else is passed by address.
3370 if (!V->getType()->isPointerTy()) {
3371 RawAddress Ptr = CreateDefaultAlignTempAlloca(V->getType());
3372 Builder.CreateStore(V, Ptr);
3373 V = Ptr.getPointer();
3374 }
3375 return Builder.CreatePtrToInt(V, TargetTy);
3376 }
3377
3378 /// Emit a representation of a SourceLocation for passing to a handler
3379 /// in a sanitizer runtime library. The format for this data is:
3380 /// \code
3381 /// struct SourceLocation {
3382 /// const char *Filename;
3383 /// int32_t Line, Column;
3384 /// };
3385 /// \endcode
3386 /// For an invalid SourceLocation, the Filename pointer is null.
EmitCheckSourceLocation(SourceLocation Loc)3387 llvm::Constant *CodeGenFunction::EmitCheckSourceLocation(SourceLocation Loc) {
3388 llvm::Constant *Filename;
3389 int Line, Column;
3390
3391 PresumedLoc PLoc = getContext().getSourceManager().getPresumedLoc(Loc);
3392 if (PLoc.isValid()) {
3393 StringRef FilenameString = PLoc.getFilename();
3394
3395 int PathComponentsToStrip =
3396 CGM.getCodeGenOpts().EmitCheckPathComponentsToStrip;
3397 if (PathComponentsToStrip < 0) {
3398 assert(PathComponentsToStrip != INT_MIN);
3399 int PathComponentsToKeep = -PathComponentsToStrip;
3400 auto I = llvm::sys::path::rbegin(FilenameString);
3401 auto E = llvm::sys::path::rend(FilenameString);
3402 while (I != E && --PathComponentsToKeep)
3403 ++I;
3404
3405 FilenameString = FilenameString.substr(I - E);
3406 } else if (PathComponentsToStrip > 0) {
3407 auto I = llvm::sys::path::begin(FilenameString);
3408 auto E = llvm::sys::path::end(FilenameString);
3409 while (I != E && PathComponentsToStrip--)
3410 ++I;
3411
3412 if (I != E)
3413 FilenameString =
3414 FilenameString.substr(I - llvm::sys::path::begin(FilenameString));
3415 else
3416 FilenameString = llvm::sys::path::filename(FilenameString);
3417 }
3418
3419 auto FilenameGV =
3420 CGM.GetAddrOfConstantCString(std::string(FilenameString), ".src");
3421 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(
3422 cast<llvm::GlobalVariable>(
3423 FilenameGV.getPointer()->stripPointerCasts()));
3424 Filename = FilenameGV.getPointer();
3425 Line = PLoc.getLine();
3426 Column = PLoc.getColumn();
3427 } else {
3428 Filename = llvm::Constant::getNullValue(Int8PtrTy);
3429 Line = Column = 0;
3430 }
3431
3432 llvm::Constant *Data[] = {Filename, Builder.getInt32(Line),
3433 Builder.getInt32(Column)};
3434
3435 return llvm::ConstantStruct::getAnon(Data);
3436 }
3437
3438 namespace {
3439 /// Specify under what conditions this check can be recovered
3440 enum class CheckRecoverableKind {
3441 /// Always terminate program execution if this check fails.
3442 Unrecoverable,
3443 /// Check supports recovering, runtime has both fatal (noreturn) and
3444 /// non-fatal handlers for this check.
3445 Recoverable,
3446 /// Runtime conditionally aborts, always need to support recovery.
3447 AlwaysRecoverable
3448 };
3449 }
3450
getRecoverableKind(SanitizerMask Kind)3451 static CheckRecoverableKind getRecoverableKind(SanitizerMask Kind) {
3452 assert(Kind.countPopulation() == 1);
3453 if (Kind == SanitizerKind::Vptr)
3454 return CheckRecoverableKind::AlwaysRecoverable;
3455 else if (Kind == SanitizerKind::Return || Kind == SanitizerKind::Unreachable)
3456 return CheckRecoverableKind::Unrecoverable;
3457 else
3458 return CheckRecoverableKind::Recoverable;
3459 }
3460
3461 namespace {
3462 struct SanitizerHandlerInfo {
3463 char const *const Name;
3464 unsigned Version;
3465 };
3466 }
3467
3468 const SanitizerHandlerInfo SanitizerHandlers[] = {
3469 #define SANITIZER_CHECK(Enum, Name, Version) {#Name, Version},
3470 LIST_SANITIZER_CHECKS
3471 #undef SANITIZER_CHECK
3472 };
3473
emitCheckHandlerCall(CodeGenFunction & CGF,llvm::FunctionType * FnType,ArrayRef<llvm::Value * > FnArgs,SanitizerHandler CheckHandler,CheckRecoverableKind RecoverKind,bool IsFatal,llvm::BasicBlock * ContBB)3474 static void emitCheckHandlerCall(CodeGenFunction &CGF,
3475 llvm::FunctionType *FnType,
3476 ArrayRef<llvm::Value *> FnArgs,
3477 SanitizerHandler CheckHandler,
3478 CheckRecoverableKind RecoverKind, bool IsFatal,
3479 llvm::BasicBlock *ContBB) {
3480 assert(IsFatal || RecoverKind != CheckRecoverableKind::Unrecoverable);
3481 std::optional<ApplyDebugLocation> DL;
3482 if (!CGF.Builder.getCurrentDebugLocation()) {
3483 // Ensure that the call has at least an artificial debug location.
3484 DL.emplace(CGF, SourceLocation());
3485 }
3486 bool NeedsAbortSuffix =
3487 IsFatal && RecoverKind != CheckRecoverableKind::Unrecoverable;
3488 bool MinimalRuntime = CGF.CGM.getCodeGenOpts().SanitizeMinimalRuntime;
3489 const SanitizerHandlerInfo &CheckInfo = SanitizerHandlers[CheckHandler];
3490 const StringRef CheckName = CheckInfo.Name;
3491 std::string FnName = "__ubsan_handle_" + CheckName.str();
3492 if (CheckInfo.Version && !MinimalRuntime)
3493 FnName += "_v" + llvm::utostr(CheckInfo.Version);
3494 if (MinimalRuntime)
3495 FnName += "_minimal";
3496 if (NeedsAbortSuffix)
3497 FnName += "_abort";
3498 bool MayReturn =
3499 !IsFatal || RecoverKind == CheckRecoverableKind::AlwaysRecoverable;
3500
3501 llvm::AttrBuilder B(CGF.getLLVMContext());
3502 if (!MayReturn) {
3503 B.addAttribute(llvm::Attribute::NoReturn)
3504 .addAttribute(llvm::Attribute::NoUnwind);
3505 }
3506 B.addUWTableAttr(llvm::UWTableKind::Default);
3507
3508 llvm::FunctionCallee Fn = CGF.CGM.CreateRuntimeFunction(
3509 FnType, FnName,
3510 llvm::AttributeList::get(CGF.getLLVMContext(),
3511 llvm::AttributeList::FunctionIndex, B),
3512 /*Local=*/true);
3513 llvm::CallInst *HandlerCall = CGF.EmitNounwindRuntimeCall(Fn, FnArgs);
3514 if (!MayReturn) {
3515 HandlerCall->setDoesNotReturn();
3516 CGF.Builder.CreateUnreachable();
3517 } else {
3518 CGF.Builder.CreateBr(ContBB);
3519 }
3520 }
3521
EmitCheck(ArrayRef<std::pair<llvm::Value *,SanitizerMask>> Checked,SanitizerHandler CheckHandler,ArrayRef<llvm::Constant * > StaticArgs,ArrayRef<llvm::Value * > DynamicArgs)3522 void CodeGenFunction::EmitCheck(
3523 ArrayRef<std::pair<llvm::Value *, SanitizerMask>> Checked,
3524 SanitizerHandler CheckHandler, ArrayRef<llvm::Constant *> StaticArgs,
3525 ArrayRef<llvm::Value *> DynamicArgs) {
3526 assert(IsSanitizerScope);
3527 assert(Checked.size() > 0);
3528 assert(CheckHandler >= 0 &&
3529 size_t(CheckHandler) < std::size(SanitizerHandlers));
3530 const StringRef CheckName = SanitizerHandlers[CheckHandler].Name;
3531
3532 llvm::Value *FatalCond = nullptr;
3533 llvm::Value *RecoverableCond = nullptr;
3534 llvm::Value *TrapCond = nullptr;
3535 for (int i = 0, n = Checked.size(); i < n; ++i) {
3536 llvm::Value *Check = Checked[i].first;
3537 // -fsanitize-trap= overrides -fsanitize-recover=.
3538 llvm::Value *&Cond =
3539 CGM.getCodeGenOpts().SanitizeTrap.has(Checked[i].second)
3540 ? TrapCond
3541 : CGM.getCodeGenOpts().SanitizeRecover.has(Checked[i].second)
3542 ? RecoverableCond
3543 : FatalCond;
3544 Cond = Cond ? Builder.CreateAnd(Cond, Check) : Check;
3545 }
3546
3547 if (ClSanitizeGuardChecks) {
3548 llvm::Value *Allow =
3549 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::allow_ubsan_check),
3550 llvm::ConstantInt::get(CGM.Int8Ty, CheckHandler));
3551
3552 for (llvm::Value **Cond : {&FatalCond, &RecoverableCond, &TrapCond}) {
3553 if (*Cond)
3554 *Cond = Builder.CreateOr(*Cond, Builder.CreateNot(Allow));
3555 }
3556 }
3557
3558 if (TrapCond)
3559 EmitTrapCheck(TrapCond, CheckHandler);
3560 if (!FatalCond && !RecoverableCond)
3561 return;
3562
3563 llvm::Value *JointCond;
3564 if (FatalCond && RecoverableCond)
3565 JointCond = Builder.CreateAnd(FatalCond, RecoverableCond);
3566 else
3567 JointCond = FatalCond ? FatalCond : RecoverableCond;
3568 assert(JointCond);
3569
3570 CheckRecoverableKind RecoverKind = getRecoverableKind(Checked[0].second);
3571 assert(SanOpts.has(Checked[0].second));
3572 #ifndef NDEBUG
3573 for (int i = 1, n = Checked.size(); i < n; ++i) {
3574 assert(RecoverKind == getRecoverableKind(Checked[i].second) &&
3575 "All recoverable kinds in a single check must be same!");
3576 assert(SanOpts.has(Checked[i].second));
3577 }
3578 #endif
3579
3580 llvm::BasicBlock *Cont = createBasicBlock("cont");
3581 llvm::BasicBlock *Handlers = createBasicBlock("handler." + CheckName);
3582 llvm::Instruction *Branch = Builder.CreateCondBr(JointCond, Cont, Handlers);
3583 // Give hint that we very much don't expect to execute the handler
3584 llvm::MDBuilder MDHelper(getLLVMContext());
3585 llvm::MDNode *Node = MDHelper.createLikelyBranchWeights();
3586 Branch->setMetadata(llvm::LLVMContext::MD_prof, Node);
3587 EmitBlock(Handlers);
3588
3589 // Handler functions take an i8* pointing to the (handler-specific) static
3590 // information block, followed by a sequence of intptr_t arguments
3591 // representing operand values.
3592 SmallVector<llvm::Value *, 4> Args;
3593 SmallVector<llvm::Type *, 4> ArgTypes;
3594 if (!CGM.getCodeGenOpts().SanitizeMinimalRuntime) {
3595 Args.reserve(DynamicArgs.size() + 1);
3596 ArgTypes.reserve(DynamicArgs.size() + 1);
3597
3598 // Emit handler arguments and create handler function type.
3599 if (!StaticArgs.empty()) {
3600 llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
3601 auto *InfoPtr = new llvm::GlobalVariable(
3602 CGM.getModule(), Info->getType(), false,
3603 llvm::GlobalVariable::PrivateLinkage, Info, "", nullptr,
3604 llvm::GlobalVariable::NotThreadLocal,
3605 CGM.getDataLayout().getDefaultGlobalsAddressSpace());
3606 InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3607 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr);
3608 Args.push_back(InfoPtr);
3609 ArgTypes.push_back(Args.back()->getType());
3610 }
3611
3612 for (size_t i = 0, n = DynamicArgs.size(); i != n; ++i) {
3613 Args.push_back(EmitCheckValue(DynamicArgs[i]));
3614 ArgTypes.push_back(IntPtrTy);
3615 }
3616 }
3617
3618 llvm::FunctionType *FnType =
3619 llvm::FunctionType::get(CGM.VoidTy, ArgTypes, false);
3620
3621 if (!FatalCond || !RecoverableCond) {
3622 // Simple case: we need to generate a single handler call, either
3623 // fatal, or non-fatal.
3624 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind,
3625 (FatalCond != nullptr), Cont);
3626 } else {
3627 // Emit two handler calls: first one for set of unrecoverable checks,
3628 // another one for recoverable.
3629 llvm::BasicBlock *NonFatalHandlerBB =
3630 createBasicBlock("non_fatal." + CheckName);
3631 llvm::BasicBlock *FatalHandlerBB = createBasicBlock("fatal." + CheckName);
3632 Builder.CreateCondBr(FatalCond, NonFatalHandlerBB, FatalHandlerBB);
3633 EmitBlock(FatalHandlerBB);
3634 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, true,
3635 NonFatalHandlerBB);
3636 EmitBlock(NonFatalHandlerBB);
3637 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, false,
3638 Cont);
3639 }
3640
3641 EmitBlock(Cont);
3642 }
3643
EmitCfiSlowPathCheck(SanitizerMask Kind,llvm::Value * Cond,llvm::ConstantInt * TypeId,llvm::Value * Ptr,ArrayRef<llvm::Constant * > StaticArgs)3644 void CodeGenFunction::EmitCfiSlowPathCheck(
3645 SanitizerMask Kind, llvm::Value *Cond, llvm::ConstantInt *TypeId,
3646 llvm::Value *Ptr, ArrayRef<llvm::Constant *> StaticArgs) {
3647 llvm::BasicBlock *Cont = createBasicBlock("cfi.cont");
3648
3649 llvm::BasicBlock *CheckBB = createBasicBlock("cfi.slowpath");
3650 llvm::BranchInst *BI = Builder.CreateCondBr(Cond, Cont, CheckBB);
3651
3652 llvm::MDBuilder MDHelper(getLLVMContext());
3653 llvm::MDNode *Node = MDHelper.createLikelyBranchWeights();
3654 BI->setMetadata(llvm::LLVMContext::MD_prof, Node);
3655
3656 EmitBlock(CheckBB);
3657
3658 bool WithDiag = !CGM.getCodeGenOpts().SanitizeTrap.has(Kind);
3659
3660 llvm::CallInst *CheckCall;
3661 llvm::FunctionCallee SlowPathFn;
3662 if (WithDiag) {
3663 llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
3664 auto *InfoPtr =
3665 new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false,
3666 llvm::GlobalVariable::PrivateLinkage, Info);
3667 InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3668 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr);
3669
3670 SlowPathFn = CGM.getModule().getOrInsertFunction(
3671 "__cfi_slowpath_diag",
3672 llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy, Int8PtrTy},
3673 false));
3674 CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr, InfoPtr});
3675 } else {
3676 SlowPathFn = CGM.getModule().getOrInsertFunction(
3677 "__cfi_slowpath",
3678 llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy}, false));
3679 CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr});
3680 }
3681
3682 CGM.setDSOLocal(
3683 cast<llvm::GlobalValue>(SlowPathFn.getCallee()->stripPointerCasts()));
3684 CheckCall->setDoesNotThrow();
3685
3686 EmitBlock(Cont);
3687 }
3688
3689 // Emit a stub for __cfi_check function so that the linker knows about this
3690 // symbol in LTO mode.
EmitCfiCheckStub()3691 void CodeGenFunction::EmitCfiCheckStub() {
3692 llvm::Module *M = &CGM.getModule();
3693 ASTContext &C = getContext();
3694 QualType QInt64Ty = C.getIntTypeForBitwidth(64, false);
3695
3696 FunctionArgList FnArgs;
3697 ImplicitParamDecl ArgCallsiteTypeId(C, QInt64Ty, ImplicitParamKind::Other);
3698 ImplicitParamDecl ArgAddr(C, C.VoidPtrTy, ImplicitParamKind::Other);
3699 ImplicitParamDecl ArgCFICheckFailData(C, C.VoidPtrTy,
3700 ImplicitParamKind::Other);
3701 FnArgs.push_back(&ArgCallsiteTypeId);
3702 FnArgs.push_back(&ArgAddr);
3703 FnArgs.push_back(&ArgCFICheckFailData);
3704 const CGFunctionInfo &FI =
3705 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, FnArgs);
3706
3707 llvm::Function *F = llvm::Function::Create(
3708 llvm::FunctionType::get(VoidTy, {Int64Ty, VoidPtrTy, VoidPtrTy}, false),
3709 llvm::GlobalValue::WeakAnyLinkage, "__cfi_check", M);
3710 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F, /*IsThunk=*/false);
3711 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, F);
3712 F->setAlignment(llvm::Align(4096));
3713 CGM.setDSOLocal(F);
3714
3715 llvm::LLVMContext &Ctx = M->getContext();
3716 llvm::BasicBlock *BB = llvm::BasicBlock::Create(Ctx, "entry", F);
3717 // CrossDSOCFI pass is not executed if there is no executable code.
3718 SmallVector<llvm::Value*> Args{F->getArg(2), F->getArg(1)};
3719 llvm::CallInst::Create(M->getFunction("__cfi_check_fail"), Args, "", BB);
3720 llvm::ReturnInst::Create(Ctx, nullptr, BB);
3721 }
3722
3723 // This function is basically a switch over the CFI failure kind, which is
3724 // extracted from CFICheckFailData (1st function argument). Each case is either
3725 // llvm.trap or a call to one of the two runtime handlers, based on
3726 // -fsanitize-trap and -fsanitize-recover settings. Default case (invalid
3727 // failure kind) traps, but this should really never happen. CFICheckFailData
3728 // can be nullptr if the calling module has -fsanitize-trap behavior for this
3729 // check kind; in this case __cfi_check_fail traps as well.
EmitCfiCheckFail()3730 void CodeGenFunction::EmitCfiCheckFail() {
3731 SanitizerScope SanScope(this);
3732 FunctionArgList Args;
3733 ImplicitParamDecl ArgData(getContext(), getContext().VoidPtrTy,
3734 ImplicitParamKind::Other);
3735 ImplicitParamDecl ArgAddr(getContext(), getContext().VoidPtrTy,
3736 ImplicitParamKind::Other);
3737 Args.push_back(&ArgData);
3738 Args.push_back(&ArgAddr);
3739
3740 const CGFunctionInfo &FI =
3741 CGM.getTypes().arrangeBuiltinFunctionDeclaration(getContext().VoidTy, Args);
3742
3743 llvm::Function *F = llvm::Function::Create(
3744 llvm::FunctionType::get(VoidTy, {VoidPtrTy, VoidPtrTy}, false),
3745 llvm::GlobalValue::WeakODRLinkage, "__cfi_check_fail", &CGM.getModule());
3746
3747 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F, /*IsThunk=*/false);
3748 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, F);
3749 F->setVisibility(llvm::GlobalValue::HiddenVisibility);
3750
3751 StartFunction(GlobalDecl(), CGM.getContext().VoidTy, F, FI, Args,
3752 SourceLocation());
3753
3754 // This function is not affected by NoSanitizeList. This function does
3755 // not have a source location, but "src:*" would still apply. Revert any
3756 // changes to SanOpts made in StartFunction.
3757 SanOpts = CGM.getLangOpts().Sanitize;
3758
3759 llvm::Value *Data =
3760 EmitLoadOfScalar(GetAddrOfLocalVar(&ArgData), /*Volatile=*/false,
3761 CGM.getContext().VoidPtrTy, ArgData.getLocation());
3762 llvm::Value *Addr =
3763 EmitLoadOfScalar(GetAddrOfLocalVar(&ArgAddr), /*Volatile=*/false,
3764 CGM.getContext().VoidPtrTy, ArgAddr.getLocation());
3765
3766 // Data == nullptr means the calling module has trap behaviour for this check.
3767 llvm::Value *DataIsNotNullPtr =
3768 Builder.CreateICmpNE(Data, llvm::ConstantPointerNull::get(Int8PtrTy));
3769 EmitTrapCheck(DataIsNotNullPtr, SanitizerHandler::CFICheckFail);
3770
3771 llvm::StructType *SourceLocationTy =
3772 llvm::StructType::get(VoidPtrTy, Int32Ty, Int32Ty);
3773 llvm::StructType *CfiCheckFailDataTy =
3774 llvm::StructType::get(Int8Ty, SourceLocationTy, VoidPtrTy);
3775
3776 llvm::Value *V = Builder.CreateConstGEP2_32(
3777 CfiCheckFailDataTy,
3778 Builder.CreatePointerCast(Data, CfiCheckFailDataTy->getPointerTo(0)), 0,
3779 0);
3780
3781 Address CheckKindAddr(V, Int8Ty, getIntAlign());
3782 llvm::Value *CheckKind = Builder.CreateLoad(CheckKindAddr);
3783
3784 llvm::Value *AllVtables = llvm::MetadataAsValue::get(
3785 CGM.getLLVMContext(),
3786 llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
3787 llvm::Value *ValidVtable = Builder.CreateZExt(
3788 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
3789 {Addr, AllVtables}),
3790 IntPtrTy);
3791
3792 const std::pair<int, SanitizerMask> CheckKinds[] = {
3793 {CFITCK_VCall, SanitizerKind::CFIVCall},
3794 {CFITCK_NVCall, SanitizerKind::CFINVCall},
3795 {CFITCK_DerivedCast, SanitizerKind::CFIDerivedCast},
3796 {CFITCK_UnrelatedCast, SanitizerKind::CFIUnrelatedCast},
3797 {CFITCK_ICall, SanitizerKind::CFIICall}};
3798
3799 SmallVector<std::pair<llvm::Value *, SanitizerMask>, 5> Checks;
3800 for (auto CheckKindMaskPair : CheckKinds) {
3801 int Kind = CheckKindMaskPair.first;
3802 SanitizerMask Mask = CheckKindMaskPair.second;
3803 llvm::Value *Cond =
3804 Builder.CreateICmpNE(CheckKind, llvm::ConstantInt::get(Int8Ty, Kind));
3805 if (CGM.getLangOpts().Sanitize.has(Mask))
3806 EmitCheck(std::make_pair(Cond, Mask), SanitizerHandler::CFICheckFail, {},
3807 {Data, Addr, ValidVtable});
3808 else
3809 EmitTrapCheck(Cond, SanitizerHandler::CFICheckFail);
3810 }
3811
3812 FinishFunction();
3813 // The only reference to this function will be created during LTO link.
3814 // Make sure it survives until then.
3815 CGM.addUsedGlobal(F);
3816 }
3817
EmitUnreachable(SourceLocation Loc)3818 void CodeGenFunction::EmitUnreachable(SourceLocation Loc) {
3819 if (SanOpts.has(SanitizerKind::Unreachable)) {
3820 SanitizerScope SanScope(this);
3821 EmitCheck(std::make_pair(static_cast<llvm::Value *>(Builder.getFalse()),
3822 SanitizerKind::Unreachable),
3823 SanitizerHandler::BuiltinUnreachable,
3824 EmitCheckSourceLocation(Loc), std::nullopt);
3825 }
3826 Builder.CreateUnreachable();
3827 }
3828
EmitTrapCheck(llvm::Value * Checked,SanitizerHandler CheckHandlerID)3829 void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked,
3830 SanitizerHandler CheckHandlerID) {
3831 llvm::BasicBlock *Cont = createBasicBlock("cont");
3832
3833 // If we're optimizing, collapse all calls to trap down to just one per
3834 // check-type per function to save on code size.
3835 if ((int)TrapBBs.size() <= CheckHandlerID)
3836 TrapBBs.resize(CheckHandlerID + 1);
3837
3838 llvm::BasicBlock *&TrapBB = TrapBBs[CheckHandlerID];
3839
3840 if (!ClSanitizeDebugDeoptimization &&
3841 CGM.getCodeGenOpts().OptimizationLevel && TrapBB &&
3842 (!CurCodeDecl || !CurCodeDecl->hasAttr<OptimizeNoneAttr>())) {
3843 auto Call = TrapBB->begin();
3844 assert(isa<llvm::CallInst>(Call) && "Expected call in trap BB");
3845
3846 Call->applyMergedLocation(Call->getDebugLoc(),
3847 Builder.getCurrentDebugLocation());
3848 Builder.CreateCondBr(Checked, Cont, TrapBB);
3849 } else {
3850 TrapBB = createBasicBlock("trap");
3851 Builder.CreateCondBr(Checked, Cont, TrapBB);
3852 EmitBlock(TrapBB);
3853
3854 llvm::CallInst *TrapCall = Builder.CreateCall(
3855 CGM.getIntrinsic(llvm::Intrinsic::ubsantrap),
3856 llvm::ConstantInt::get(CGM.Int8Ty,
3857 ClSanitizeDebugDeoptimization
3858 ? TrapBB->getParent()->size()
3859 : static_cast<uint64_t>(CheckHandlerID)));
3860
3861 if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
3862 auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
3863 CGM.getCodeGenOpts().TrapFuncName);
3864 TrapCall->addFnAttr(A);
3865 }
3866 TrapCall->setDoesNotReturn();
3867 TrapCall->setDoesNotThrow();
3868 Builder.CreateUnreachable();
3869 }
3870
3871 EmitBlock(Cont);
3872 }
3873
EmitTrapCall(llvm::Intrinsic::ID IntrID)3874 llvm::CallInst *CodeGenFunction::EmitTrapCall(llvm::Intrinsic::ID IntrID) {
3875 llvm::CallInst *TrapCall =
3876 Builder.CreateCall(CGM.getIntrinsic(IntrID));
3877
3878 if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
3879 auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
3880 CGM.getCodeGenOpts().TrapFuncName);
3881 TrapCall->addFnAttr(A);
3882 }
3883
3884 return TrapCall;
3885 }
3886
EmitArrayToPointerDecay(const Expr * E,LValueBaseInfo * BaseInfo,TBAAAccessInfo * TBAAInfo)3887 Address CodeGenFunction::EmitArrayToPointerDecay(const Expr *E,
3888 LValueBaseInfo *BaseInfo,
3889 TBAAAccessInfo *TBAAInfo) {
3890 assert(E->getType()->isArrayType() &&
3891 "Array to pointer decay must have array source type!");
3892
3893 // Expressions of array type can't be bitfields or vector elements.
3894 LValue LV = EmitLValue(E);
3895 Address Addr = LV.getAddress();
3896
3897 // If the array type was an incomplete type, we need to make sure
3898 // the decay ends up being the right type.
3899 llvm::Type *NewTy = ConvertType(E->getType());
3900 Addr = Addr.withElementType(NewTy);
3901
3902 // Note that VLA pointers are always decayed, so we don't need to do
3903 // anything here.
3904 if (!E->getType()->isVariableArrayType()) {
3905 assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
3906 "Expected pointer to array");
3907 Addr = Builder.CreateConstArrayGEP(Addr, 0, "arraydecay");
3908 }
3909
3910 // The result of this decay conversion points to an array element within the
3911 // base lvalue. However, since TBAA currently does not support representing
3912 // accesses to elements of member arrays, we conservatively represent accesses
3913 // to the pointee object as if it had no any base lvalue specified.
3914 // TODO: Support TBAA for member arrays.
3915 QualType EltType = E->getType()->castAsArrayTypeUnsafe()->getElementType();
3916 if (BaseInfo) *BaseInfo = LV.getBaseInfo();
3917 if (TBAAInfo) *TBAAInfo = CGM.getTBAAAccessInfo(EltType);
3918
3919 return Addr.withElementType(ConvertTypeForMem(EltType));
3920 }
3921
3922 /// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
3923 /// array to pointer, return the array subexpression.
isSimpleArrayDecayOperand(const Expr * E)3924 static const Expr *isSimpleArrayDecayOperand(const Expr *E) {
3925 // If this isn't just an array->pointer decay, bail out.
3926 const auto *CE = dyn_cast<CastExpr>(E);
3927 if (!CE || CE->getCastKind() != CK_ArrayToPointerDecay)
3928 return nullptr;
3929
3930 // If this is a decay from variable width array, bail out.
3931 const Expr *SubExpr = CE->getSubExpr();
3932 if (SubExpr->getType()->isVariableArrayType())
3933 return nullptr;
3934
3935 return SubExpr;
3936 }
3937
emitArraySubscriptGEP(CodeGenFunction & CGF,llvm::Type * elemType,llvm::Value * ptr,ArrayRef<llvm::Value * > indices,bool inbounds,bool signedIndices,SourceLocation loc,const llvm::Twine & name="arrayidx")3938 static llvm::Value *emitArraySubscriptGEP(CodeGenFunction &CGF,
3939 llvm::Type *elemType,
3940 llvm::Value *ptr,
3941 ArrayRef<llvm::Value*> indices,
3942 bool inbounds,
3943 bool signedIndices,
3944 SourceLocation loc,
3945 const llvm::Twine &name = "arrayidx") {
3946 if (inbounds) {
3947 return CGF.EmitCheckedInBoundsGEP(elemType, ptr, indices, signedIndices,
3948 CodeGenFunction::NotSubtraction, loc,
3949 name);
3950 } else {
3951 return CGF.Builder.CreateGEP(elemType, ptr, indices, name);
3952 }
3953 }
3954
emitArraySubscriptGEP(CodeGenFunction & CGF,Address addr,ArrayRef<llvm::Value * > indices,llvm::Type * elementType,bool inbounds,bool signedIndices,SourceLocation loc,CharUnits align,const llvm::Twine & name="arrayidx")3955 static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr,
3956 ArrayRef<llvm::Value *> indices,
3957 llvm::Type *elementType, bool inbounds,
3958 bool signedIndices, SourceLocation loc,
3959 CharUnits align,
3960 const llvm::Twine &name = "arrayidx") {
3961 if (inbounds) {
3962 return CGF.EmitCheckedInBoundsGEP(addr, indices, elementType, signedIndices,
3963 CodeGenFunction::NotSubtraction, loc,
3964 align, name);
3965 } else {
3966 return CGF.Builder.CreateGEP(addr, indices, elementType, align, name);
3967 }
3968 }
3969
getArrayElementAlign(CharUnits arrayAlign,llvm::Value * idx,CharUnits eltSize)3970 static CharUnits getArrayElementAlign(CharUnits arrayAlign,
3971 llvm::Value *idx,
3972 CharUnits eltSize) {
3973 // If we have a constant index, we can use the exact offset of the
3974 // element we're accessing.
3975 if (auto constantIdx = dyn_cast<llvm::ConstantInt>(idx)) {
3976 CharUnits offset = constantIdx->getZExtValue() * eltSize;
3977 return arrayAlign.alignmentAtOffset(offset);
3978
3979 // Otherwise, use the worst-case alignment for any element.
3980 } else {
3981 return arrayAlign.alignmentOfArrayElement(eltSize);
3982 }
3983 }
3984
getFixedSizeElementType(const ASTContext & ctx,const VariableArrayType * vla)3985 static QualType getFixedSizeElementType(const ASTContext &ctx,
3986 const VariableArrayType *vla) {
3987 QualType eltType;
3988 do {
3989 eltType = vla->getElementType();
3990 } while ((vla = ctx.getAsVariableArrayType(eltType)));
3991 return eltType;
3992 }
3993
hasBPFPreserveStaticOffset(const RecordDecl * D)3994 static bool hasBPFPreserveStaticOffset(const RecordDecl *D) {
3995 return D && D->hasAttr<BPFPreserveStaticOffsetAttr>();
3996 }
3997
hasBPFPreserveStaticOffset(const Expr * E)3998 static bool hasBPFPreserveStaticOffset(const Expr *E) {
3999 if (!E)
4000 return false;
4001 QualType PointeeType = E->getType()->getPointeeType();
4002 if (PointeeType.isNull())
4003 return false;
4004 if (const auto *BaseDecl = PointeeType->getAsRecordDecl())
4005 return hasBPFPreserveStaticOffset(BaseDecl);
4006 return false;
4007 }
4008
4009 // Wraps Addr with a call to llvm.preserve.static.offset intrinsic.
wrapWithBPFPreserveStaticOffset(CodeGenFunction & CGF,Address & Addr)4010 static Address wrapWithBPFPreserveStaticOffset(CodeGenFunction &CGF,
4011 Address &Addr) {
4012 if (!CGF.getTarget().getTriple().isBPF())
4013 return Addr;
4014
4015 llvm::Function *Fn =
4016 CGF.CGM.getIntrinsic(llvm::Intrinsic::preserve_static_offset);
4017 llvm::CallInst *Call = CGF.Builder.CreateCall(Fn, {Addr.emitRawPointer(CGF)});
4018 return Address(Call, Addr.getElementType(), Addr.getAlignment());
4019 }
4020
4021 /// Given an array base, check whether its member access belongs to a record
4022 /// with preserve_access_index attribute or not.
IsPreserveAIArrayBase(CodeGenFunction & CGF,const Expr * ArrayBase)4023 static bool IsPreserveAIArrayBase(CodeGenFunction &CGF, const Expr *ArrayBase) {
4024 if (!ArrayBase || !CGF.getDebugInfo())
4025 return false;
4026
4027 // Only support base as either a MemberExpr or DeclRefExpr.
4028 // DeclRefExpr to cover cases like:
4029 // struct s { int a; int b[10]; };
4030 // struct s *p;
4031 // p[1].a
4032 // p[1] will generate a DeclRefExpr and p[1].a is a MemberExpr.
4033 // p->b[5] is a MemberExpr example.
4034 const Expr *E = ArrayBase->IgnoreImpCasts();
4035 if (const auto *ME = dyn_cast<MemberExpr>(E))
4036 return ME->getMemberDecl()->hasAttr<BPFPreserveAccessIndexAttr>();
4037
4038 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
4039 const auto *VarDef = dyn_cast<VarDecl>(DRE->getDecl());
4040 if (!VarDef)
4041 return false;
4042
4043 const auto *PtrT = VarDef->getType()->getAs<PointerType>();
4044 if (!PtrT)
4045 return false;
4046
4047 const auto *PointeeT = PtrT->getPointeeType()
4048 ->getUnqualifiedDesugaredType();
4049 if (const auto *RecT = dyn_cast<RecordType>(PointeeT))
4050 return RecT->getDecl()->hasAttr<BPFPreserveAccessIndexAttr>();
4051 return false;
4052 }
4053
4054 return false;
4055 }
4056
emitArraySubscriptGEP(CodeGenFunction & CGF,Address addr,ArrayRef<llvm::Value * > indices,QualType eltType,bool inbounds,bool signedIndices,SourceLocation loc,QualType * arrayType=nullptr,const Expr * Base=nullptr,const llvm::Twine & name="arrayidx")4057 static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr,
4058 ArrayRef<llvm::Value *> indices,
4059 QualType eltType, bool inbounds,
4060 bool signedIndices, SourceLocation loc,
4061 QualType *arrayType = nullptr,
4062 const Expr *Base = nullptr,
4063 const llvm::Twine &name = "arrayidx") {
4064 // All the indices except that last must be zero.
4065 #ifndef NDEBUG
4066 for (auto *idx : indices.drop_back())
4067 assert(isa<llvm::ConstantInt>(idx) &&
4068 cast<llvm::ConstantInt>(idx)->isZero());
4069 #endif
4070
4071 // Determine the element size of the statically-sized base. This is
4072 // the thing that the indices are expressed in terms of.
4073 if (auto vla = CGF.getContext().getAsVariableArrayType(eltType)) {
4074 eltType = getFixedSizeElementType(CGF.getContext(), vla);
4075 }
4076
4077 // We can use that to compute the best alignment of the element.
4078 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(eltType);
4079 CharUnits eltAlign =
4080 getArrayElementAlign(addr.getAlignment(), indices.back(), eltSize);
4081
4082 if (hasBPFPreserveStaticOffset(Base))
4083 addr = wrapWithBPFPreserveStaticOffset(CGF, addr);
4084
4085 llvm::Value *eltPtr;
4086 auto LastIndex = dyn_cast<llvm::ConstantInt>(indices.back());
4087 if (!LastIndex ||
4088 (!CGF.IsInPreservedAIRegion && !IsPreserveAIArrayBase(CGF, Base))) {
4089 addr = emitArraySubscriptGEP(CGF, addr, indices,
4090 CGF.ConvertTypeForMem(eltType), inbounds,
4091 signedIndices, loc, eltAlign, name);
4092 return addr;
4093 } else {
4094 // Remember the original array subscript for bpf target
4095 unsigned idx = LastIndex->getZExtValue();
4096 llvm::DIType *DbgInfo = nullptr;
4097 if (arrayType)
4098 DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(*arrayType, loc);
4099 eltPtr = CGF.Builder.CreatePreserveArrayAccessIndex(
4100 addr.getElementType(), addr.emitRawPointer(CGF), indices.size() - 1,
4101 idx, DbgInfo);
4102 }
4103
4104 return Address(eltPtr, CGF.ConvertTypeForMem(eltType), eltAlign);
4105 }
4106
4107 /// The offset of a field from the beginning of the record.
getFieldOffsetInBits(CodeGenFunction & CGF,const RecordDecl * RD,const FieldDecl * FD,int64_t & Offset)4108 static bool getFieldOffsetInBits(CodeGenFunction &CGF, const RecordDecl *RD,
4109 const FieldDecl *FD, int64_t &Offset) {
4110 ASTContext &Ctx = CGF.getContext();
4111 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
4112 unsigned FieldNo = 0;
4113
4114 for (const Decl *D : RD->decls()) {
4115 if (const auto *Record = dyn_cast<RecordDecl>(D))
4116 if (getFieldOffsetInBits(CGF, Record, FD, Offset)) {
4117 Offset += Layout.getFieldOffset(FieldNo);
4118 return true;
4119 }
4120
4121 if (const auto *Field = dyn_cast<FieldDecl>(D))
4122 if (FD == Field) {
4123 Offset += Layout.getFieldOffset(FieldNo);
4124 return true;
4125 }
4126
4127 if (isa<FieldDecl>(D))
4128 ++FieldNo;
4129 }
4130
4131 return false;
4132 }
4133
4134 /// Returns the relative offset difference between \p FD1 and \p FD2.
4135 /// \code
4136 /// offsetof(struct foo, FD1) - offsetof(struct foo, FD2)
4137 /// \endcode
4138 /// Both fields must be within the same struct.
getOffsetDifferenceInBits(CodeGenFunction & CGF,const FieldDecl * FD1,const FieldDecl * FD2)4139 static std::optional<int64_t> getOffsetDifferenceInBits(CodeGenFunction &CGF,
4140 const FieldDecl *FD1,
4141 const FieldDecl *FD2) {
4142 const RecordDecl *FD1OuterRec =
4143 FD1->getParent()->getOuterLexicalRecordContext();
4144 const RecordDecl *FD2OuterRec =
4145 FD2->getParent()->getOuterLexicalRecordContext();
4146
4147 if (FD1OuterRec != FD2OuterRec)
4148 // Fields must be within the same RecordDecl.
4149 return std::optional<int64_t>();
4150
4151 int64_t FD1Offset = 0;
4152 if (!getFieldOffsetInBits(CGF, FD1OuterRec, FD1, FD1Offset))
4153 return std::optional<int64_t>();
4154
4155 int64_t FD2Offset = 0;
4156 if (!getFieldOffsetInBits(CGF, FD2OuterRec, FD2, FD2Offset))
4157 return std::optional<int64_t>();
4158
4159 return std::make_optional<int64_t>(FD1Offset - FD2Offset);
4160 }
4161
EmitArraySubscriptExpr(const ArraySubscriptExpr * E,bool Accessed)4162 LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
4163 bool Accessed) {
4164 // The index must always be an integer, which is not an aggregate. Emit it
4165 // in lexical order (this complexity is, sadly, required by C++17).
4166 llvm::Value *IdxPre =
4167 (E->getLHS() == E->getIdx()) ? EmitScalarExpr(E->getIdx()) : nullptr;
4168 bool SignedIndices = false;
4169 auto EmitIdxAfterBase = [&, IdxPre](bool Promote) -> llvm::Value * {
4170 auto *Idx = IdxPre;
4171 if (E->getLHS() != E->getIdx()) {
4172 assert(E->getRHS() == E->getIdx() && "index was neither LHS nor RHS");
4173 Idx = EmitScalarExpr(E->getIdx());
4174 }
4175
4176 QualType IdxTy = E->getIdx()->getType();
4177 bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType();
4178 SignedIndices |= IdxSigned;
4179
4180 if (SanOpts.has(SanitizerKind::ArrayBounds))
4181 EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, Accessed);
4182
4183 // Extend or truncate the index type to 32 or 64-bits.
4184 if (Promote && Idx->getType() != IntPtrTy)
4185 Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom");
4186
4187 return Idx;
4188 };
4189 IdxPre = nullptr;
4190
4191 // If the base is a vector type, then we are forming a vector element lvalue
4192 // with this subscript.
4193 if (E->getBase()->getType()->isSubscriptableVectorType() &&
4194 !isa<ExtVectorElementExpr>(E->getBase())) {
4195 // Emit the vector as an lvalue to get its address.
4196 LValue LHS = EmitLValue(E->getBase());
4197 auto *Idx = EmitIdxAfterBase(/*Promote*/false);
4198 assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
4199 return LValue::MakeVectorElt(LHS.getAddress(), Idx, E->getBase()->getType(),
4200 LHS.getBaseInfo(), TBAAAccessInfo());
4201 }
4202
4203 // All the other cases basically behave like simple offsetting.
4204
4205 // Handle the extvector case we ignored above.
4206 if (isa<ExtVectorElementExpr>(E->getBase())) {
4207 LValue LV = EmitLValue(E->getBase());
4208 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4209 Address Addr = EmitExtVectorElementLValue(LV);
4210
4211 QualType EltType = LV.getType()->castAs<VectorType>()->getElementType();
4212 Addr = emitArraySubscriptGEP(*this, Addr, Idx, EltType, /*inbounds*/ true,
4213 SignedIndices, E->getExprLoc());
4214 return MakeAddrLValue(Addr, EltType, LV.getBaseInfo(),
4215 CGM.getTBAAInfoForSubobject(LV, EltType));
4216 }
4217
4218 LValueBaseInfo EltBaseInfo;
4219 TBAAAccessInfo EltTBAAInfo;
4220 Address Addr = Address::invalid();
4221 if (const VariableArrayType *vla =
4222 getContext().getAsVariableArrayType(E->getType())) {
4223 // The base must be a pointer, which is not an aggregate. Emit
4224 // it. It needs to be emitted first in case it's what captures
4225 // the VLA bounds.
4226 Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
4227 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4228
4229 // The element count here is the total number of non-VLA elements.
4230 llvm::Value *numElements = getVLASize(vla).NumElts;
4231
4232 // Effectively, the multiply by the VLA size is part of the GEP.
4233 // GEP indexes are signed, and scaling an index isn't permitted to
4234 // signed-overflow, so we use the same semantics for our explicit
4235 // multiply. We suppress this if overflow is not undefined behavior.
4236 if (getLangOpts().isSignedOverflowDefined()) {
4237 Idx = Builder.CreateMul(Idx, numElements);
4238 } else {
4239 Idx = Builder.CreateNSWMul(Idx, numElements);
4240 }
4241
4242 Addr = emitArraySubscriptGEP(*this, Addr, Idx, vla->getElementType(),
4243 !getLangOpts().isSignedOverflowDefined(),
4244 SignedIndices, E->getExprLoc());
4245
4246 } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
4247 // Indexing over an interface, as in "NSString *P; P[4];"
4248
4249 // Emit the base pointer.
4250 Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
4251 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4252
4253 CharUnits InterfaceSize = getContext().getTypeSizeInChars(OIT);
4254 llvm::Value *InterfaceSizeVal =
4255 llvm::ConstantInt::get(Idx->getType(), InterfaceSize.getQuantity());
4256
4257 llvm::Value *ScaledIdx = Builder.CreateMul(Idx, InterfaceSizeVal);
4258
4259 // We don't necessarily build correct LLVM struct types for ObjC
4260 // interfaces, so we can't rely on GEP to do this scaling
4261 // correctly, so we need to cast to i8*. FIXME: is this actually
4262 // true? A lot of other things in the fragile ABI would break...
4263 llvm::Type *OrigBaseElemTy = Addr.getElementType();
4264
4265 // Do the GEP.
4266 CharUnits EltAlign =
4267 getArrayElementAlign(Addr.getAlignment(), Idx, InterfaceSize);
4268 llvm::Value *EltPtr =
4269 emitArraySubscriptGEP(*this, Int8Ty, Addr.emitRawPointer(*this),
4270 ScaledIdx, false, SignedIndices, E->getExprLoc());
4271 Addr = Address(EltPtr, OrigBaseElemTy, EltAlign);
4272 } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
4273 // If this is A[i] where A is an array, the frontend will have decayed the
4274 // base to be a ArrayToPointerDecay implicit cast. While correct, it is
4275 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
4276 // "gep x, i" here. Emit one "gep A, 0, i".
4277 assert(Array->getType()->isArrayType() &&
4278 "Array to pointer decay must have array source type!");
4279 LValue ArrayLV;
4280 // For simple multidimensional array indexing, set the 'accessed' flag for
4281 // better bounds-checking of the base expression.
4282 if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array))
4283 ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
4284 else
4285 ArrayLV = EmitLValue(Array);
4286 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4287
4288 if (SanOpts.has(SanitizerKind::ArrayBounds)) {
4289 // If the array being accessed has a "counted_by" attribute, generate
4290 // bounds checking code. The "count" field is at the top level of the
4291 // struct or in an anonymous struct, that's also at the top level. Future
4292 // expansions may allow the "count" to reside at any place in the struct,
4293 // but the value of "counted_by" will be a "simple" path to the count,
4294 // i.e. "a.b.count", so we shouldn't need the full force of EmitLValue or
4295 // similar to emit the correct GEP.
4296 const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
4297 getLangOpts().getStrictFlexArraysLevel();
4298
4299 if (const auto *ME = dyn_cast<MemberExpr>(Array);
4300 ME &&
4301 ME->isFlexibleArrayMemberLike(getContext(), StrictFlexArraysLevel) &&
4302 ME->getMemberDecl()->getType()->isCountAttributedType()) {
4303 const FieldDecl *FAMDecl = dyn_cast<FieldDecl>(ME->getMemberDecl());
4304 if (const FieldDecl *CountFD = FindCountedByField(FAMDecl)) {
4305 if (std::optional<int64_t> Diff =
4306 getOffsetDifferenceInBits(*this, CountFD, FAMDecl)) {
4307 CharUnits OffsetDiff = CGM.getContext().toCharUnitsFromBits(*Diff);
4308
4309 // Create a GEP with a byte offset between the FAM and count and
4310 // use that to load the count value.
4311 Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(
4312 ArrayLV.getAddress(), Int8PtrTy, Int8Ty);
4313
4314 llvm::Type *CountTy = ConvertType(CountFD->getType());
4315 llvm::Value *Res = Builder.CreateInBoundsGEP(
4316 Int8Ty, Addr.emitRawPointer(*this),
4317 Builder.getInt32(OffsetDiff.getQuantity()), ".counted_by.gep");
4318 Res = Builder.CreateAlignedLoad(CountTy, Res, getIntAlign(),
4319 ".counted_by.load");
4320
4321 // Now emit the bounds checking.
4322 EmitBoundsCheckImpl(E, Res, Idx, E->getIdx()->getType(),
4323 Array->getType(), Accessed);
4324 }
4325 }
4326 }
4327 }
4328
4329 // Propagate the alignment from the array itself to the result.
4330 QualType arrayType = Array->getType();
4331 Addr = emitArraySubscriptGEP(
4332 *this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx},
4333 E->getType(), !getLangOpts().isSignedOverflowDefined(), SignedIndices,
4334 E->getExprLoc(), &arrayType, E->getBase());
4335 EltBaseInfo = ArrayLV.getBaseInfo();
4336 EltTBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, E->getType());
4337 } else {
4338 // The base must be a pointer; emit it with an estimate of its alignment.
4339 Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
4340 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4341 QualType ptrType = E->getBase()->getType();
4342 Addr = emitArraySubscriptGEP(*this, Addr, Idx, E->getType(),
4343 !getLangOpts().isSignedOverflowDefined(),
4344 SignedIndices, E->getExprLoc(), &ptrType,
4345 E->getBase());
4346 }
4347
4348 LValue LV = MakeAddrLValue(Addr, E->getType(), EltBaseInfo, EltTBAAInfo);
4349
4350 if (getLangOpts().ObjC &&
4351 getLangOpts().getGC() != LangOptions::NonGC) {
4352 LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
4353 setObjCGCLValueClass(getContext(), E, LV);
4354 }
4355 return LV;
4356 }
4357
EmitMatrixSubscriptExpr(const MatrixSubscriptExpr * E)4358 LValue CodeGenFunction::EmitMatrixSubscriptExpr(const MatrixSubscriptExpr *E) {
4359 assert(
4360 !E->isIncomplete() &&
4361 "incomplete matrix subscript expressions should be rejected during Sema");
4362 LValue Base = EmitLValue(E->getBase());
4363 llvm::Value *RowIdx = EmitScalarExpr(E->getRowIdx());
4364 llvm::Value *ColIdx = EmitScalarExpr(E->getColumnIdx());
4365 llvm::Value *NumRows = Builder.getIntN(
4366 RowIdx->getType()->getScalarSizeInBits(),
4367 E->getBase()->getType()->castAs<ConstantMatrixType>()->getNumRows());
4368 llvm::Value *FinalIdx =
4369 Builder.CreateAdd(Builder.CreateMul(ColIdx, NumRows), RowIdx);
4370 return LValue::MakeMatrixElt(
4371 MaybeConvertMatrixAddress(Base.getAddress(), *this), FinalIdx,
4372 E->getBase()->getType(), Base.getBaseInfo(), TBAAAccessInfo());
4373 }
4374
emitOMPArraySectionBase(CodeGenFunction & CGF,const Expr * Base,LValueBaseInfo & BaseInfo,TBAAAccessInfo & TBAAInfo,QualType BaseTy,QualType ElTy,bool IsLowerBound)4375 static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base,
4376 LValueBaseInfo &BaseInfo,
4377 TBAAAccessInfo &TBAAInfo,
4378 QualType BaseTy, QualType ElTy,
4379 bool IsLowerBound) {
4380 LValue BaseLVal;
4381 if (auto *ASE = dyn_cast<ArraySectionExpr>(Base->IgnoreParenImpCasts())) {
4382 BaseLVal = CGF.EmitArraySectionExpr(ASE, IsLowerBound);
4383 if (BaseTy->isArrayType()) {
4384 Address Addr = BaseLVal.getAddress();
4385 BaseInfo = BaseLVal.getBaseInfo();
4386
4387 // If the array type was an incomplete type, we need to make sure
4388 // the decay ends up being the right type.
4389 llvm::Type *NewTy = CGF.ConvertType(BaseTy);
4390 Addr = Addr.withElementType(NewTy);
4391
4392 // Note that VLA pointers are always decayed, so we don't need to do
4393 // anything here.
4394 if (!BaseTy->isVariableArrayType()) {
4395 assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
4396 "Expected pointer to array");
4397 Addr = CGF.Builder.CreateConstArrayGEP(Addr, 0, "arraydecay");
4398 }
4399
4400 return Addr.withElementType(CGF.ConvertTypeForMem(ElTy));
4401 }
4402 LValueBaseInfo TypeBaseInfo;
4403 TBAAAccessInfo TypeTBAAInfo;
4404 CharUnits Align =
4405 CGF.CGM.getNaturalTypeAlignment(ElTy, &TypeBaseInfo, &TypeTBAAInfo);
4406 BaseInfo.mergeForCast(TypeBaseInfo);
4407 TBAAInfo = CGF.CGM.mergeTBAAInfoForCast(TBAAInfo, TypeTBAAInfo);
4408 return Address(CGF.Builder.CreateLoad(BaseLVal.getAddress()),
4409 CGF.ConvertTypeForMem(ElTy), Align);
4410 }
4411 return CGF.EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo);
4412 }
4413
EmitArraySectionExpr(const ArraySectionExpr * E,bool IsLowerBound)4414 LValue CodeGenFunction::EmitArraySectionExpr(const ArraySectionExpr *E,
4415 bool IsLowerBound) {
4416
4417 assert(!E->isOpenACCArraySection() &&
4418 "OpenACC Array section codegen not implemented");
4419
4420 QualType BaseTy = ArraySectionExpr::getBaseOriginalType(E->getBase());
4421 QualType ResultExprTy;
4422 if (auto *AT = getContext().getAsArrayType(BaseTy))
4423 ResultExprTy = AT->getElementType();
4424 else
4425 ResultExprTy = BaseTy->getPointeeType();
4426 llvm::Value *Idx = nullptr;
4427 if (IsLowerBound || E->getColonLocFirst().isInvalid()) {
4428 // Requesting lower bound or upper bound, but without provided length and
4429 // without ':' symbol for the default length -> length = 1.
4430 // Idx = LowerBound ?: 0;
4431 if (auto *LowerBound = E->getLowerBound()) {
4432 Idx = Builder.CreateIntCast(
4433 EmitScalarExpr(LowerBound), IntPtrTy,
4434 LowerBound->getType()->hasSignedIntegerRepresentation());
4435 } else
4436 Idx = llvm::ConstantInt::getNullValue(IntPtrTy);
4437 } else {
4438 // Try to emit length or lower bound as constant. If this is possible, 1
4439 // is subtracted from constant length or lower bound. Otherwise, emit LLVM
4440 // IR (LB + Len) - 1.
4441 auto &C = CGM.getContext();
4442 auto *Length = E->getLength();
4443 llvm::APSInt ConstLength;
4444 if (Length) {
4445 // Idx = LowerBound + Length - 1;
4446 if (std::optional<llvm::APSInt> CL = Length->getIntegerConstantExpr(C)) {
4447 ConstLength = CL->zextOrTrunc(PointerWidthInBits);
4448 Length = nullptr;
4449 }
4450 auto *LowerBound = E->getLowerBound();
4451 llvm::APSInt ConstLowerBound(PointerWidthInBits, /*isUnsigned=*/false);
4452 if (LowerBound) {
4453 if (std::optional<llvm::APSInt> LB =
4454 LowerBound->getIntegerConstantExpr(C)) {
4455 ConstLowerBound = LB->zextOrTrunc(PointerWidthInBits);
4456 LowerBound = nullptr;
4457 }
4458 }
4459 if (!Length)
4460 --ConstLength;
4461 else if (!LowerBound)
4462 --ConstLowerBound;
4463
4464 if (Length || LowerBound) {
4465 auto *LowerBoundVal =
4466 LowerBound
4467 ? Builder.CreateIntCast(
4468 EmitScalarExpr(LowerBound), IntPtrTy,
4469 LowerBound->getType()->hasSignedIntegerRepresentation())
4470 : llvm::ConstantInt::get(IntPtrTy, ConstLowerBound);
4471 auto *LengthVal =
4472 Length
4473 ? Builder.CreateIntCast(
4474 EmitScalarExpr(Length), IntPtrTy,
4475 Length->getType()->hasSignedIntegerRepresentation())
4476 : llvm::ConstantInt::get(IntPtrTy, ConstLength);
4477 Idx = Builder.CreateAdd(LowerBoundVal, LengthVal, "lb_add_len",
4478 /*HasNUW=*/false,
4479 !getLangOpts().isSignedOverflowDefined());
4480 if (Length && LowerBound) {
4481 Idx = Builder.CreateSub(
4482 Idx, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "idx_sub_1",
4483 /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined());
4484 }
4485 } else
4486 Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength + ConstLowerBound);
4487 } else {
4488 // Idx = ArraySize - 1;
4489 QualType ArrayTy = BaseTy->isPointerType()
4490 ? E->getBase()->IgnoreParenImpCasts()->getType()
4491 : BaseTy;
4492 if (auto *VAT = C.getAsVariableArrayType(ArrayTy)) {
4493 Length = VAT->getSizeExpr();
4494 if (std::optional<llvm::APSInt> L = Length->getIntegerConstantExpr(C)) {
4495 ConstLength = *L;
4496 Length = nullptr;
4497 }
4498 } else {
4499 auto *CAT = C.getAsConstantArrayType(ArrayTy);
4500 assert(CAT && "unexpected type for array initializer");
4501 ConstLength = CAT->getSize();
4502 }
4503 if (Length) {
4504 auto *LengthVal = Builder.CreateIntCast(
4505 EmitScalarExpr(Length), IntPtrTy,
4506 Length->getType()->hasSignedIntegerRepresentation());
4507 Idx = Builder.CreateSub(
4508 LengthVal, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "len_sub_1",
4509 /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined());
4510 } else {
4511 ConstLength = ConstLength.zextOrTrunc(PointerWidthInBits);
4512 --ConstLength;
4513 Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength);
4514 }
4515 }
4516 }
4517 assert(Idx);
4518
4519 Address EltPtr = Address::invalid();
4520 LValueBaseInfo BaseInfo;
4521 TBAAAccessInfo TBAAInfo;
4522 if (auto *VLA = getContext().getAsVariableArrayType(ResultExprTy)) {
4523 // The base must be a pointer, which is not an aggregate. Emit
4524 // it. It needs to be emitted first in case it's what captures
4525 // the VLA bounds.
4526 Address Base =
4527 emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo,
4528 BaseTy, VLA->getElementType(), IsLowerBound);
4529 // The element count here is the total number of non-VLA elements.
4530 llvm::Value *NumElements = getVLASize(VLA).NumElts;
4531
4532 // Effectively, the multiply by the VLA size is part of the GEP.
4533 // GEP indexes are signed, and scaling an index isn't permitted to
4534 // signed-overflow, so we use the same semantics for our explicit
4535 // multiply. We suppress this if overflow is not undefined behavior.
4536 if (getLangOpts().isSignedOverflowDefined())
4537 Idx = Builder.CreateMul(Idx, NumElements);
4538 else
4539 Idx = Builder.CreateNSWMul(Idx, NumElements);
4540 EltPtr = emitArraySubscriptGEP(*this, Base, Idx, VLA->getElementType(),
4541 !getLangOpts().isSignedOverflowDefined(),
4542 /*signedIndices=*/false, E->getExprLoc());
4543 } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
4544 // If this is A[i] where A is an array, the frontend will have decayed the
4545 // base to be a ArrayToPointerDecay implicit cast. While correct, it is
4546 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
4547 // "gep x, i" here. Emit one "gep A, 0, i".
4548 assert(Array->getType()->isArrayType() &&
4549 "Array to pointer decay must have array source type!");
4550 LValue ArrayLV;
4551 // For simple multidimensional array indexing, set the 'accessed' flag for
4552 // better bounds-checking of the base expression.
4553 if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array))
4554 ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
4555 else
4556 ArrayLV = EmitLValue(Array);
4557
4558 // Propagate the alignment from the array itself to the result.
4559 EltPtr = emitArraySubscriptGEP(
4560 *this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx},
4561 ResultExprTy, !getLangOpts().isSignedOverflowDefined(),
4562 /*signedIndices=*/false, E->getExprLoc());
4563 BaseInfo = ArrayLV.getBaseInfo();
4564 TBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, ResultExprTy);
4565 } else {
4566 Address Base =
4567 emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo, BaseTy,
4568 ResultExprTy, IsLowerBound);
4569 EltPtr = emitArraySubscriptGEP(*this, Base, Idx, ResultExprTy,
4570 !getLangOpts().isSignedOverflowDefined(),
4571 /*signedIndices=*/false, E->getExprLoc());
4572 }
4573
4574 return MakeAddrLValue(EltPtr, ResultExprTy, BaseInfo, TBAAInfo);
4575 }
4576
4577 LValue CodeGenFunction::
EmitExtVectorElementExpr(const ExtVectorElementExpr * E)4578 EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
4579 // Emit the base vector as an l-value.
4580 LValue Base;
4581
4582 // ExtVectorElementExpr's base can either be a vector or pointer to vector.
4583 if (E->isArrow()) {
4584 // If it is a pointer to a vector, emit the address and form an lvalue with
4585 // it.
4586 LValueBaseInfo BaseInfo;
4587 TBAAAccessInfo TBAAInfo;
4588 Address Ptr = EmitPointerWithAlignment(E->getBase(), &BaseInfo, &TBAAInfo);
4589 const auto *PT = E->getBase()->getType()->castAs<PointerType>();
4590 Base = MakeAddrLValue(Ptr, PT->getPointeeType(), BaseInfo, TBAAInfo);
4591 Base.getQuals().removeObjCGCAttr();
4592 } else if (E->getBase()->isGLValue()) {
4593 // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
4594 // emit the base as an lvalue.
4595 assert(E->getBase()->getType()->isVectorType());
4596 Base = EmitLValue(E->getBase());
4597 } else {
4598 // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
4599 assert(E->getBase()->getType()->isVectorType() &&
4600 "Result must be a vector");
4601 llvm::Value *Vec = EmitScalarExpr(E->getBase());
4602
4603 // Store the vector to memory (because LValue wants an address).
4604 Address VecMem = CreateMemTemp(E->getBase()->getType());
4605 Builder.CreateStore(Vec, VecMem);
4606 Base = MakeAddrLValue(VecMem, E->getBase()->getType(),
4607 AlignmentSource::Decl);
4608 }
4609
4610 QualType type =
4611 E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers());
4612
4613 // Encode the element access list into a vector of unsigned indices.
4614 SmallVector<uint32_t, 4> Indices;
4615 E->getEncodedElementAccess(Indices);
4616
4617 if (Base.isSimple()) {
4618 llvm::Constant *CV =
4619 llvm::ConstantDataVector::get(getLLVMContext(), Indices);
4620 return LValue::MakeExtVectorElt(Base.getAddress(), CV, type,
4621 Base.getBaseInfo(), TBAAAccessInfo());
4622 }
4623 assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
4624
4625 llvm::Constant *BaseElts = Base.getExtVectorElts();
4626 SmallVector<llvm::Constant *, 4> CElts;
4627
4628 for (unsigned i = 0, e = Indices.size(); i != e; ++i)
4629 CElts.push_back(BaseElts->getAggregateElement(Indices[i]));
4630 llvm::Constant *CV = llvm::ConstantVector::get(CElts);
4631 return LValue::MakeExtVectorElt(Base.getExtVectorAddress(), CV, type,
4632 Base.getBaseInfo(), TBAAAccessInfo());
4633 }
4634
EmitMemberExpr(const MemberExpr * E)4635 LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
4636 if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(*this, E)) {
4637 EmitIgnoredExpr(E->getBase());
4638 return EmitDeclRefLValue(DRE);
4639 }
4640
4641 Expr *BaseExpr = E->getBase();
4642 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
4643 LValue BaseLV;
4644 if (E->isArrow()) {
4645 LValueBaseInfo BaseInfo;
4646 TBAAAccessInfo TBAAInfo;
4647 Address Addr = EmitPointerWithAlignment(BaseExpr, &BaseInfo, &TBAAInfo);
4648 QualType PtrTy = BaseExpr->getType()->getPointeeType();
4649 SanitizerSet SkippedChecks;
4650 bool IsBaseCXXThis = IsWrappedCXXThis(BaseExpr);
4651 if (IsBaseCXXThis)
4652 SkippedChecks.set(SanitizerKind::Alignment, true);
4653 if (IsBaseCXXThis || isa<DeclRefExpr>(BaseExpr))
4654 SkippedChecks.set(SanitizerKind::Null, true);
4655 EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Addr, PtrTy,
4656 /*Alignment=*/CharUnits::Zero(), SkippedChecks);
4657 BaseLV = MakeAddrLValue(Addr, PtrTy, BaseInfo, TBAAInfo);
4658 } else
4659 BaseLV = EmitCheckedLValue(BaseExpr, TCK_MemberAccess);
4660
4661 NamedDecl *ND = E->getMemberDecl();
4662 if (auto *Field = dyn_cast<FieldDecl>(ND)) {
4663 LValue LV = EmitLValueForField(BaseLV, Field);
4664 setObjCGCLValueClass(getContext(), E, LV);
4665 if (getLangOpts().OpenMP) {
4666 // If the member was explicitly marked as nontemporal, mark it as
4667 // nontemporal. If the base lvalue is marked as nontemporal, mark access
4668 // to children as nontemporal too.
4669 if ((IsWrappedCXXThis(BaseExpr) &&
4670 CGM.getOpenMPRuntime().isNontemporalDecl(Field)) ||
4671 BaseLV.isNontemporal())
4672 LV.setNontemporal(/*Value=*/true);
4673 }
4674 return LV;
4675 }
4676
4677 if (const auto *FD = dyn_cast<FunctionDecl>(ND))
4678 return EmitFunctionDeclLValue(*this, E, FD);
4679
4680 llvm_unreachable("Unhandled member declaration!");
4681 }
4682
4683 /// Given that we are currently emitting a lambda, emit an l-value for
4684 /// one of its members.
4685 ///
EmitLValueForLambdaField(const FieldDecl * Field,llvm::Value * ThisValue)4686 LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field,
4687 llvm::Value *ThisValue) {
4688 bool HasExplicitObjectParameter = false;
4689 const auto *MD = dyn_cast_if_present<CXXMethodDecl>(CurCodeDecl);
4690 if (MD) {
4691 HasExplicitObjectParameter = MD->isExplicitObjectMemberFunction();
4692 assert(MD->getParent()->isLambda());
4693 assert(MD->getParent() == Field->getParent());
4694 }
4695 LValue LambdaLV;
4696 if (HasExplicitObjectParameter) {
4697 const VarDecl *D = cast<CXXMethodDecl>(CurCodeDecl)->getParamDecl(0);
4698 auto It = LocalDeclMap.find(D);
4699 assert(It != LocalDeclMap.end() && "explicit parameter not loaded?");
4700 Address AddrOfExplicitObject = It->getSecond();
4701 if (D->getType()->isReferenceType())
4702 LambdaLV = EmitLoadOfReferenceLValue(AddrOfExplicitObject, D->getType(),
4703 AlignmentSource::Decl);
4704 else
4705 LambdaLV = MakeAddrLValue(AddrOfExplicitObject,
4706 D->getType().getNonReferenceType());
4707
4708 // Make sure we have an lvalue to the lambda itself and not a derived class.
4709 auto *ThisTy = D->getType().getNonReferenceType()->getAsCXXRecordDecl();
4710 auto *LambdaTy = cast<CXXRecordDecl>(Field->getParent());
4711 if (ThisTy != LambdaTy) {
4712 const CXXCastPath &BasePathArray = getContext().LambdaCastPaths.at(MD);
4713 Address Base = GetAddressOfBaseClass(
4714 LambdaLV.getAddress(), ThisTy, BasePathArray.begin(),
4715 BasePathArray.end(), /*NullCheckValue=*/false, SourceLocation());
4716 LambdaLV = MakeAddrLValue(Base, QualType{LambdaTy->getTypeForDecl(), 0});
4717 }
4718 } else {
4719 QualType LambdaTagType = getContext().getTagDeclType(Field->getParent());
4720 LambdaLV = MakeNaturalAlignAddrLValue(ThisValue, LambdaTagType);
4721 }
4722 return EmitLValueForField(LambdaLV, Field);
4723 }
4724
EmitLValueForLambdaField(const FieldDecl * Field)4725 LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field) {
4726 return EmitLValueForLambdaField(Field, CXXABIThisValue);
4727 }
4728
4729 /// Get the field index in the debug info. The debug info structure/union
4730 /// will ignore the unnamed bitfields.
getDebugInfoFIndex(const RecordDecl * Rec,unsigned FieldIndex)4731 unsigned CodeGenFunction::getDebugInfoFIndex(const RecordDecl *Rec,
4732 unsigned FieldIndex) {
4733 unsigned I = 0, Skipped = 0;
4734
4735 for (auto *F : Rec->getDefinition()->fields()) {
4736 if (I == FieldIndex)
4737 break;
4738 if (F->isUnnamedBitField())
4739 Skipped++;
4740 I++;
4741 }
4742
4743 return FieldIndex - Skipped;
4744 }
4745
4746 /// Get the address of a zero-sized field within a record. The resulting
4747 /// address doesn't necessarily have the right type.
emitAddrOfZeroSizeField(CodeGenFunction & CGF,Address Base,const FieldDecl * Field)4748 static Address emitAddrOfZeroSizeField(CodeGenFunction &CGF, Address Base,
4749 const FieldDecl *Field) {
4750 CharUnits Offset = CGF.getContext().toCharUnitsFromBits(
4751 CGF.getContext().getFieldOffset(Field));
4752 if (Offset.isZero())
4753 return Base;
4754 Base = Base.withElementType(CGF.Int8Ty);
4755 return CGF.Builder.CreateConstInBoundsByteGEP(Base, Offset);
4756 }
4757
4758 /// Drill down to the storage of a field without walking into
4759 /// reference types.
4760 ///
4761 /// The resulting address doesn't necessarily have the right type.
emitAddrOfFieldStorage(CodeGenFunction & CGF,Address base,const FieldDecl * field)4762 static Address emitAddrOfFieldStorage(CodeGenFunction &CGF, Address base,
4763 const FieldDecl *field) {
4764 if (isEmptyFieldForLayout(CGF.getContext(), field))
4765 return emitAddrOfZeroSizeField(CGF, base, field);
4766
4767 const RecordDecl *rec = field->getParent();
4768
4769 unsigned idx =
4770 CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
4771
4772 return CGF.Builder.CreateStructGEP(base, idx, field->getName());
4773 }
4774
emitPreserveStructAccess(CodeGenFunction & CGF,LValue base,Address addr,const FieldDecl * field)4775 static Address emitPreserveStructAccess(CodeGenFunction &CGF, LValue base,
4776 Address addr, const FieldDecl *field) {
4777 const RecordDecl *rec = field->getParent();
4778 llvm::DIType *DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(
4779 base.getType(), rec->getLocation());
4780
4781 unsigned idx =
4782 CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
4783
4784 return CGF.Builder.CreatePreserveStructAccessIndex(
4785 addr, idx, CGF.getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo);
4786 }
4787
hasAnyVptr(const QualType Type,const ASTContext & Context)4788 static bool hasAnyVptr(const QualType Type, const ASTContext &Context) {
4789 const auto *RD = Type.getTypePtr()->getAsCXXRecordDecl();
4790 if (!RD)
4791 return false;
4792
4793 if (RD->isDynamicClass())
4794 return true;
4795
4796 for (const auto &Base : RD->bases())
4797 if (hasAnyVptr(Base.getType(), Context))
4798 return true;
4799
4800 for (const FieldDecl *Field : RD->fields())
4801 if (hasAnyVptr(Field->getType(), Context))
4802 return true;
4803
4804 return false;
4805 }
4806
EmitLValueForField(LValue base,const FieldDecl * field)4807 LValue CodeGenFunction::EmitLValueForField(LValue base,
4808 const FieldDecl *field) {
4809 LValueBaseInfo BaseInfo = base.getBaseInfo();
4810
4811 if (field->isBitField()) {
4812 const CGRecordLayout &RL =
4813 CGM.getTypes().getCGRecordLayout(field->getParent());
4814 const CGBitFieldInfo &Info = RL.getBitFieldInfo(field);
4815 const bool UseVolatile = isAAPCS(CGM.getTarget()) &&
4816 CGM.getCodeGenOpts().AAPCSBitfieldWidth &&
4817 Info.VolatileStorageSize != 0 &&
4818 field->getType()
4819 .withCVRQualifiers(base.getVRQualifiers())
4820 .isVolatileQualified();
4821 Address Addr = base.getAddress();
4822 unsigned Idx = RL.getLLVMFieldNo(field);
4823 const RecordDecl *rec = field->getParent();
4824 if (hasBPFPreserveStaticOffset(rec))
4825 Addr = wrapWithBPFPreserveStaticOffset(*this, Addr);
4826 if (!UseVolatile) {
4827 if (!IsInPreservedAIRegion &&
4828 (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
4829 if (Idx != 0)
4830 // For structs, we GEP to the field that the record layout suggests.
4831 Addr = Builder.CreateStructGEP(Addr, Idx, field->getName());
4832 } else {
4833 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateRecordType(
4834 getContext().getRecordType(rec), rec->getLocation());
4835 Addr = Builder.CreatePreserveStructAccessIndex(
4836 Addr, Idx, getDebugInfoFIndex(rec, field->getFieldIndex()),
4837 DbgInfo);
4838 }
4839 }
4840 const unsigned SS =
4841 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
4842 // Get the access type.
4843 llvm::Type *FieldIntTy = llvm::Type::getIntNTy(getLLVMContext(), SS);
4844 Addr = Addr.withElementType(FieldIntTy);
4845 if (UseVolatile) {
4846 const unsigned VolatileOffset = Info.VolatileStorageOffset.getQuantity();
4847 if (VolatileOffset)
4848 Addr = Builder.CreateConstInBoundsGEP(Addr, VolatileOffset);
4849 }
4850
4851 QualType fieldType =
4852 field->getType().withCVRQualifiers(base.getVRQualifiers());
4853 // TODO: Support TBAA for bit fields.
4854 LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource());
4855 return LValue::MakeBitfield(Addr, Info, fieldType, FieldBaseInfo,
4856 TBAAAccessInfo());
4857 }
4858
4859 // Fields of may-alias structures are may-alias themselves.
4860 // FIXME: this should get propagated down through anonymous structs
4861 // and unions.
4862 QualType FieldType = field->getType();
4863 const RecordDecl *rec = field->getParent();
4864 AlignmentSource BaseAlignSource = BaseInfo.getAlignmentSource();
4865 LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(BaseAlignSource));
4866 TBAAAccessInfo FieldTBAAInfo;
4867 if (base.getTBAAInfo().isMayAlias() ||
4868 rec->hasAttr<MayAliasAttr>() || FieldType->isVectorType()) {
4869 FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
4870 } else if (rec->isUnion()) {
4871 // TODO: Support TBAA for unions.
4872 FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
4873 } else {
4874 // If no base type been assigned for the base access, then try to generate
4875 // one for this base lvalue.
4876 FieldTBAAInfo = base.getTBAAInfo();
4877 if (!FieldTBAAInfo.BaseType) {
4878 FieldTBAAInfo.BaseType = CGM.getTBAABaseTypeInfo(base.getType());
4879 assert(!FieldTBAAInfo.Offset &&
4880 "Nonzero offset for an access with no base type!");
4881 }
4882
4883 // Adjust offset to be relative to the base type.
4884 const ASTRecordLayout &Layout =
4885 getContext().getASTRecordLayout(field->getParent());
4886 unsigned CharWidth = getContext().getCharWidth();
4887 if (FieldTBAAInfo.BaseType)
4888 FieldTBAAInfo.Offset +=
4889 Layout.getFieldOffset(field->getFieldIndex()) / CharWidth;
4890
4891 // Update the final access type and size.
4892 FieldTBAAInfo.AccessType = CGM.getTBAATypeInfo(FieldType);
4893 FieldTBAAInfo.Size =
4894 getContext().getTypeSizeInChars(FieldType).getQuantity();
4895 }
4896
4897 Address addr = base.getAddress();
4898 if (hasBPFPreserveStaticOffset(rec))
4899 addr = wrapWithBPFPreserveStaticOffset(*this, addr);
4900 if (auto *ClassDef = dyn_cast<CXXRecordDecl>(rec)) {
4901 if (CGM.getCodeGenOpts().StrictVTablePointers &&
4902 ClassDef->isDynamicClass()) {
4903 // Getting to any field of dynamic object requires stripping dynamic
4904 // information provided by invariant.group. This is because accessing
4905 // fields may leak the real address of dynamic object, which could result
4906 // in miscompilation when leaked pointer would be compared.
4907 auto *stripped =
4908 Builder.CreateStripInvariantGroup(addr.emitRawPointer(*this));
4909 addr = Address(stripped, addr.getElementType(), addr.getAlignment());
4910 }
4911 }
4912
4913 unsigned RecordCVR = base.getVRQualifiers();
4914 if (rec->isUnion()) {
4915 // For unions, there is no pointer adjustment.
4916 if (CGM.getCodeGenOpts().StrictVTablePointers &&
4917 hasAnyVptr(FieldType, getContext()))
4918 // Because unions can easily skip invariant.barriers, we need to add
4919 // a barrier every time CXXRecord field with vptr is referenced.
4920 addr = Builder.CreateLaunderInvariantGroup(addr);
4921
4922 if (IsInPreservedAIRegion ||
4923 (getDebugInfo() && rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
4924 // Remember the original union field index
4925 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(base.getType(),
4926 rec->getLocation());
4927 addr =
4928 Address(Builder.CreatePreserveUnionAccessIndex(
4929 addr.emitRawPointer(*this),
4930 getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo),
4931 addr.getElementType(), addr.getAlignment());
4932 }
4933
4934 if (FieldType->isReferenceType())
4935 addr = addr.withElementType(CGM.getTypes().ConvertTypeForMem(FieldType));
4936 } else {
4937 if (!IsInPreservedAIRegion &&
4938 (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>()))
4939 // For structs, we GEP to the field that the record layout suggests.
4940 addr = emitAddrOfFieldStorage(*this, addr, field);
4941 else
4942 // Remember the original struct field index
4943 addr = emitPreserveStructAccess(*this, base, addr, field);
4944 }
4945
4946 // If this is a reference field, load the reference right now.
4947 if (FieldType->isReferenceType()) {
4948 LValue RefLVal =
4949 MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo);
4950 if (RecordCVR & Qualifiers::Volatile)
4951 RefLVal.getQuals().addVolatile();
4952 addr = EmitLoadOfReference(RefLVal, &FieldBaseInfo, &FieldTBAAInfo);
4953
4954 // Qualifiers on the struct don't apply to the referencee.
4955 RecordCVR = 0;
4956 FieldType = FieldType->getPointeeType();
4957 }
4958
4959 // Make sure that the address is pointing to the right type. This is critical
4960 // for both unions and structs.
4961 addr = addr.withElementType(CGM.getTypes().ConvertTypeForMem(FieldType));
4962
4963 if (field->hasAttr<AnnotateAttr>())
4964 addr = EmitFieldAnnotations(field, addr);
4965
4966 LValue LV = MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo);
4967 LV.getQuals().addCVRQualifiers(RecordCVR);
4968
4969 // __weak attribute on a field is ignored.
4970 if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak)
4971 LV.getQuals().removeObjCGCAttr();
4972
4973 return LV;
4974 }
4975
4976 LValue
EmitLValueForFieldInitialization(LValue Base,const FieldDecl * Field)4977 CodeGenFunction::EmitLValueForFieldInitialization(LValue Base,
4978 const FieldDecl *Field) {
4979 QualType FieldType = Field->getType();
4980
4981 if (!FieldType->isReferenceType())
4982 return EmitLValueForField(Base, Field);
4983
4984 Address V = emitAddrOfFieldStorage(*this, Base.getAddress(), Field);
4985
4986 // Make sure that the address is pointing to the right type.
4987 llvm::Type *llvmType = ConvertTypeForMem(FieldType);
4988 V = V.withElementType(llvmType);
4989
4990 // TODO: Generate TBAA information that describes this access as a structure
4991 // member access and not just an access to an object of the field's type. This
4992 // should be similar to what we do in EmitLValueForField().
4993 LValueBaseInfo BaseInfo = Base.getBaseInfo();
4994 AlignmentSource FieldAlignSource = BaseInfo.getAlignmentSource();
4995 LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(FieldAlignSource));
4996 return MakeAddrLValue(V, FieldType, FieldBaseInfo,
4997 CGM.getTBAAInfoForSubobject(Base, FieldType));
4998 }
4999
EmitCompoundLiteralLValue(const CompoundLiteralExpr * E)5000 LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){
5001 if (E->isFileScope()) {
5002 ConstantAddress GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E);
5003 return MakeAddrLValue(GlobalPtr, E->getType(), AlignmentSource::Decl);
5004 }
5005 if (E->getType()->isVariablyModifiedType())
5006 // make sure to emit the VLA size.
5007 EmitVariablyModifiedType(E->getType());
5008
5009 Address DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
5010 const Expr *InitExpr = E->getInitializer();
5011 LValue Result = MakeAddrLValue(DeclPtr, E->getType(), AlignmentSource::Decl);
5012
5013 EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(),
5014 /*Init*/ true);
5015
5016 // Block-scope compound literals are destroyed at the end of the enclosing
5017 // scope in C.
5018 if (!getLangOpts().CPlusPlus)
5019 if (QualType::DestructionKind DtorKind = E->getType().isDestructedType())
5020 pushLifetimeExtendedDestroy(getCleanupKind(DtorKind), DeclPtr,
5021 E->getType(), getDestroyer(DtorKind),
5022 DtorKind & EHCleanup);
5023
5024 return Result;
5025 }
5026
EmitInitListLValue(const InitListExpr * E)5027 LValue CodeGenFunction::EmitInitListLValue(const InitListExpr *E) {
5028 if (!E->isGLValue())
5029 // Initializing an aggregate temporary in C++11: T{...}.
5030 return EmitAggExprToLValue(E);
5031
5032 // An lvalue initializer list must be initializing a reference.
5033 assert(E->isTransparent() && "non-transparent glvalue init list");
5034 return EmitLValue(E->getInit(0));
5035 }
5036
5037 /// Emit the operand of a glvalue conditional operator. This is either a glvalue
5038 /// or a (possibly-parenthesized) throw-expression. If this is a throw, no
5039 /// LValue is returned and the current block has been terminated.
EmitLValueOrThrowExpression(CodeGenFunction & CGF,const Expr * Operand)5040 static std::optional<LValue> EmitLValueOrThrowExpression(CodeGenFunction &CGF,
5041 const Expr *Operand) {
5042 if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Operand->IgnoreParens())) {
5043 CGF.EmitCXXThrowExpr(ThrowExpr, /*KeepInsertionPoint*/false);
5044 return std::nullopt;
5045 }
5046
5047 return CGF.EmitLValue(Operand);
5048 }
5049
5050 namespace {
5051 // Handle the case where the condition is a constant evaluatable simple integer,
5052 // which means we don't have to separately handle the true/false blocks.
HandleConditionalOperatorLValueSimpleCase(CodeGenFunction & CGF,const AbstractConditionalOperator * E)5053 std::optional<LValue> HandleConditionalOperatorLValueSimpleCase(
5054 CodeGenFunction &CGF, const AbstractConditionalOperator *E) {
5055 const Expr *condExpr = E->getCond();
5056 bool CondExprBool;
5057 if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
5058 const Expr *Live = E->getTrueExpr(), *Dead = E->getFalseExpr();
5059 if (!CondExprBool)
5060 std::swap(Live, Dead);
5061
5062 if (!CGF.ContainsLabel(Dead)) {
5063 // If the true case is live, we need to track its region.
5064 if (CondExprBool)
5065 CGF.incrementProfileCounter(E);
5066 // If a throw expression we emit it and return an undefined lvalue
5067 // because it can't be used.
5068 if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Live->IgnoreParens())) {
5069 CGF.EmitCXXThrowExpr(ThrowExpr);
5070 llvm::Type *ElemTy = CGF.ConvertType(Dead->getType());
5071 llvm::Type *Ty = CGF.UnqualPtrTy;
5072 return CGF.MakeAddrLValue(
5073 Address(llvm::UndefValue::get(Ty), ElemTy, CharUnits::One()),
5074 Dead->getType());
5075 }
5076 return CGF.EmitLValue(Live);
5077 }
5078 }
5079 return std::nullopt;
5080 }
5081 struct ConditionalInfo {
5082 llvm::BasicBlock *lhsBlock, *rhsBlock;
5083 std::optional<LValue> LHS, RHS;
5084 };
5085
5086 // Create and generate the 3 blocks for a conditional operator.
5087 // Leaves the 'current block' in the continuation basic block.
5088 template<typename FuncTy>
EmitConditionalBlocks(CodeGenFunction & CGF,const AbstractConditionalOperator * E,const FuncTy & BranchGenFunc)5089 ConditionalInfo EmitConditionalBlocks(CodeGenFunction &CGF,
5090 const AbstractConditionalOperator *E,
5091 const FuncTy &BranchGenFunc) {
5092 ConditionalInfo Info{CGF.createBasicBlock("cond.true"),
5093 CGF.createBasicBlock("cond.false"), std::nullopt,
5094 std::nullopt};
5095 llvm::BasicBlock *endBlock = CGF.createBasicBlock("cond.end");
5096
5097 CodeGenFunction::ConditionalEvaluation eval(CGF);
5098 CGF.EmitBranchOnBoolExpr(E->getCond(), Info.lhsBlock, Info.rhsBlock,
5099 CGF.getProfileCount(E));
5100
5101 // Any temporaries created here are conditional.
5102 CGF.EmitBlock(Info.lhsBlock);
5103 CGF.incrementProfileCounter(E);
5104 eval.begin(CGF);
5105 Info.LHS = BranchGenFunc(CGF, E->getTrueExpr());
5106 eval.end(CGF);
5107 Info.lhsBlock = CGF.Builder.GetInsertBlock();
5108
5109 if (Info.LHS)
5110 CGF.Builder.CreateBr(endBlock);
5111
5112 // Any temporaries created here are conditional.
5113 CGF.EmitBlock(Info.rhsBlock);
5114 eval.begin(CGF);
5115 Info.RHS = BranchGenFunc(CGF, E->getFalseExpr());
5116 eval.end(CGF);
5117 Info.rhsBlock = CGF.Builder.GetInsertBlock();
5118 CGF.EmitBlock(endBlock);
5119
5120 return Info;
5121 }
5122 } // namespace
5123
EmitIgnoredConditionalOperator(const AbstractConditionalOperator * E)5124 void CodeGenFunction::EmitIgnoredConditionalOperator(
5125 const AbstractConditionalOperator *E) {
5126 if (!E->isGLValue()) {
5127 // ?: here should be an aggregate.
5128 assert(hasAggregateEvaluationKind(E->getType()) &&
5129 "Unexpected conditional operator!");
5130 return (void)EmitAggExprToLValue(E);
5131 }
5132
5133 OpaqueValueMapping binding(*this, E);
5134 if (HandleConditionalOperatorLValueSimpleCase(*this, E))
5135 return;
5136
5137 EmitConditionalBlocks(*this, E, [](CodeGenFunction &CGF, const Expr *E) {
5138 CGF.EmitIgnoredExpr(E);
5139 return LValue{};
5140 });
5141 }
EmitConditionalOperatorLValue(const AbstractConditionalOperator * expr)5142 LValue CodeGenFunction::EmitConditionalOperatorLValue(
5143 const AbstractConditionalOperator *expr) {
5144 if (!expr->isGLValue()) {
5145 // ?: here should be an aggregate.
5146 assert(hasAggregateEvaluationKind(expr->getType()) &&
5147 "Unexpected conditional operator!");
5148 return EmitAggExprToLValue(expr);
5149 }
5150
5151 OpaqueValueMapping binding(*this, expr);
5152 if (std::optional<LValue> Res =
5153 HandleConditionalOperatorLValueSimpleCase(*this, expr))
5154 return *Res;
5155
5156 ConditionalInfo Info = EmitConditionalBlocks(
5157 *this, expr, [](CodeGenFunction &CGF, const Expr *E) {
5158 return EmitLValueOrThrowExpression(CGF, E);
5159 });
5160
5161 if ((Info.LHS && !Info.LHS->isSimple()) ||
5162 (Info.RHS && !Info.RHS->isSimple()))
5163 return EmitUnsupportedLValue(expr, "conditional operator");
5164
5165 if (Info.LHS && Info.RHS) {
5166 Address lhsAddr = Info.LHS->getAddress();
5167 Address rhsAddr = Info.RHS->getAddress();
5168 Address result = mergeAddressesInConditionalExpr(
5169 lhsAddr, rhsAddr, Info.lhsBlock, Info.rhsBlock,
5170 Builder.GetInsertBlock(), expr->getType());
5171 AlignmentSource alignSource =
5172 std::max(Info.LHS->getBaseInfo().getAlignmentSource(),
5173 Info.RHS->getBaseInfo().getAlignmentSource());
5174 TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForConditionalOperator(
5175 Info.LHS->getTBAAInfo(), Info.RHS->getTBAAInfo());
5176 return MakeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource),
5177 TBAAInfo);
5178 } else {
5179 assert((Info.LHS || Info.RHS) &&
5180 "both operands of glvalue conditional are throw-expressions?");
5181 return Info.LHS ? *Info.LHS : *Info.RHS;
5182 }
5183 }
5184
5185 /// EmitCastLValue - Casts are never lvalues unless that cast is to a reference
5186 /// type. If the cast is to a reference, we can have the usual lvalue result,
5187 /// otherwise if a cast is needed by the code generator in an lvalue context,
5188 /// then it must mean that we need the address of an aggregate in order to
5189 /// access one of its members. This can happen for all the reasons that casts
5190 /// are permitted with aggregate result, including noop aggregate casts, and
5191 /// cast from scalar to union.
EmitCastLValue(const CastExpr * E)5192 LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
5193 switch (E->getCastKind()) {
5194 case CK_ToVoid:
5195 case CK_BitCast:
5196 case CK_LValueToRValueBitCast:
5197 case CK_ArrayToPointerDecay:
5198 case CK_FunctionToPointerDecay:
5199 case CK_NullToMemberPointer:
5200 case CK_NullToPointer:
5201 case CK_IntegralToPointer:
5202 case CK_PointerToIntegral:
5203 case CK_PointerToBoolean:
5204 case CK_IntegralCast:
5205 case CK_BooleanToSignedIntegral:
5206 case CK_IntegralToBoolean:
5207 case CK_IntegralToFloating:
5208 case CK_FloatingToIntegral:
5209 case CK_FloatingToBoolean:
5210 case CK_FloatingCast:
5211 case CK_FloatingRealToComplex:
5212 case CK_FloatingComplexToReal:
5213 case CK_FloatingComplexToBoolean:
5214 case CK_FloatingComplexCast:
5215 case CK_FloatingComplexToIntegralComplex:
5216 case CK_IntegralRealToComplex:
5217 case CK_IntegralComplexToReal:
5218 case CK_IntegralComplexToBoolean:
5219 case CK_IntegralComplexCast:
5220 case CK_IntegralComplexToFloatingComplex:
5221 case CK_DerivedToBaseMemberPointer:
5222 case CK_BaseToDerivedMemberPointer:
5223 case CK_MemberPointerToBoolean:
5224 case CK_ReinterpretMemberPointer:
5225 case CK_AnyPointerToBlockPointerCast:
5226 case CK_ARCProduceObject:
5227 case CK_ARCConsumeObject:
5228 case CK_ARCReclaimReturnedObject:
5229 case CK_ARCExtendBlockObject:
5230 case CK_CopyAndAutoreleaseBlockObject:
5231 case CK_IntToOCLSampler:
5232 case CK_FloatingToFixedPoint:
5233 case CK_FixedPointToFloating:
5234 case CK_FixedPointCast:
5235 case CK_FixedPointToBoolean:
5236 case CK_FixedPointToIntegral:
5237 case CK_IntegralToFixedPoint:
5238 case CK_MatrixCast:
5239 case CK_HLSLVectorTruncation:
5240 case CK_HLSLArrayRValue:
5241 return EmitUnsupportedLValue(E, "unexpected cast lvalue");
5242
5243 case CK_Dependent:
5244 llvm_unreachable("dependent cast kind in IR gen!");
5245
5246 case CK_BuiltinFnToFnPtr:
5247 llvm_unreachable("builtin functions are handled elsewhere");
5248
5249 // These are never l-values; just use the aggregate emission code.
5250 case CK_NonAtomicToAtomic:
5251 case CK_AtomicToNonAtomic:
5252 return EmitAggExprToLValue(E);
5253
5254 case CK_Dynamic: {
5255 LValue LV = EmitLValue(E->getSubExpr());
5256 Address V = LV.getAddress();
5257 const auto *DCE = cast<CXXDynamicCastExpr>(E);
5258 return MakeNaturalAlignRawAddrLValue(EmitDynamicCast(V, DCE), E->getType());
5259 }
5260
5261 case CK_ConstructorConversion:
5262 case CK_UserDefinedConversion:
5263 case CK_CPointerToObjCPointerCast:
5264 case CK_BlockPointerToObjCPointerCast:
5265 case CK_LValueToRValue:
5266 return EmitLValue(E->getSubExpr());
5267
5268 case CK_NoOp: {
5269 // CK_NoOp can model a qualification conversion, which can remove an array
5270 // bound and change the IR type.
5271 // FIXME: Once pointee types are removed from IR, remove this.
5272 LValue LV = EmitLValue(E->getSubExpr());
5273 // Propagate the volatile qualifer to LValue, if exist in E.
5274 if (E->changesVolatileQualification())
5275 LV.getQuals() = E->getType().getQualifiers();
5276 if (LV.isSimple()) {
5277 Address V = LV.getAddress();
5278 if (V.isValid()) {
5279 llvm::Type *T = ConvertTypeForMem(E->getType());
5280 if (V.getElementType() != T)
5281 LV.setAddress(V.withElementType(T));
5282 }
5283 }
5284 return LV;
5285 }
5286
5287 case CK_UncheckedDerivedToBase:
5288 case CK_DerivedToBase: {
5289 const auto *DerivedClassTy =
5290 E->getSubExpr()->getType()->castAs<RecordType>();
5291 auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl());
5292
5293 LValue LV = EmitLValue(E->getSubExpr());
5294 Address This = LV.getAddress();
5295
5296 // Perform the derived-to-base conversion
5297 Address Base = GetAddressOfBaseClass(
5298 This, DerivedClassDecl, E->path_begin(), E->path_end(),
5299 /*NullCheckValue=*/false, E->getExprLoc());
5300
5301 // TODO: Support accesses to members of base classes in TBAA. For now, we
5302 // conservatively pretend that the complete object is of the base class
5303 // type.
5304 return MakeAddrLValue(Base, E->getType(), LV.getBaseInfo(),
5305 CGM.getTBAAInfoForSubobject(LV, E->getType()));
5306 }
5307 case CK_ToUnion:
5308 return EmitAggExprToLValue(E);
5309 case CK_BaseToDerived: {
5310 const auto *DerivedClassTy = E->getType()->castAs<RecordType>();
5311 auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl());
5312
5313 LValue LV = EmitLValue(E->getSubExpr());
5314
5315 // Perform the base-to-derived conversion
5316 Address Derived = GetAddressOfDerivedClass(
5317 LV.getAddress(), DerivedClassDecl, E->path_begin(), E->path_end(),
5318 /*NullCheckValue=*/false);
5319
5320 // C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is
5321 // performed and the object is not of the derived type.
5322 if (sanitizePerformTypeCheck())
5323 EmitTypeCheck(TCK_DowncastReference, E->getExprLoc(), Derived,
5324 E->getType());
5325
5326 if (SanOpts.has(SanitizerKind::CFIDerivedCast))
5327 EmitVTablePtrCheckForCast(E->getType(), Derived,
5328 /*MayBeNull=*/false, CFITCK_DerivedCast,
5329 E->getBeginLoc());
5330
5331 return MakeAddrLValue(Derived, E->getType(), LV.getBaseInfo(),
5332 CGM.getTBAAInfoForSubobject(LV, E->getType()));
5333 }
5334 case CK_LValueBitCast: {
5335 // This must be a reinterpret_cast (or c-style equivalent).
5336 const auto *CE = cast<ExplicitCastExpr>(E);
5337
5338 CGM.EmitExplicitCastExprType(CE, this);
5339 LValue LV = EmitLValue(E->getSubExpr());
5340 Address V = LV.getAddress().withElementType(
5341 ConvertTypeForMem(CE->getTypeAsWritten()->getPointeeType()));
5342
5343 if (SanOpts.has(SanitizerKind::CFIUnrelatedCast))
5344 EmitVTablePtrCheckForCast(E->getType(), V,
5345 /*MayBeNull=*/false, CFITCK_UnrelatedCast,
5346 E->getBeginLoc());
5347
5348 return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),
5349 CGM.getTBAAInfoForSubobject(LV, E->getType()));
5350 }
5351 case CK_AddressSpaceConversion: {
5352 LValue LV = EmitLValue(E->getSubExpr());
5353 QualType DestTy = getContext().getPointerType(E->getType());
5354 llvm::Value *V = getTargetHooks().performAddrSpaceCast(
5355 *this, LV.getPointer(*this),
5356 E->getSubExpr()->getType().getAddressSpace(),
5357 E->getType().getAddressSpace(), ConvertType(DestTy));
5358 return MakeAddrLValue(Address(V, ConvertTypeForMem(E->getType()),
5359 LV.getAddress().getAlignment()),
5360 E->getType(), LV.getBaseInfo(), LV.getTBAAInfo());
5361 }
5362 case CK_ObjCObjectLValueCast: {
5363 LValue LV = EmitLValue(E->getSubExpr());
5364 Address V = LV.getAddress().withElementType(ConvertType(E->getType()));
5365 return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),
5366 CGM.getTBAAInfoForSubobject(LV, E->getType()));
5367 }
5368 case CK_ZeroToOCLOpaqueType:
5369 llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid");
5370
5371 case CK_VectorSplat: {
5372 // LValue results of vector splats are only supported in HLSL.
5373 if (!getLangOpts().HLSL)
5374 return EmitUnsupportedLValue(E, "unexpected cast lvalue");
5375 return EmitLValue(E->getSubExpr());
5376 }
5377 }
5378
5379 llvm_unreachable("Unhandled lvalue cast kind?");
5380 }
5381
EmitOpaqueValueLValue(const OpaqueValueExpr * e)5382 LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) {
5383 assert(OpaqueValueMappingData::shouldBindAsLValue(e));
5384 return getOrCreateOpaqueLValueMapping(e);
5385 }
5386
5387 LValue
getOrCreateOpaqueLValueMapping(const OpaqueValueExpr * e)5388 CodeGenFunction::getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e) {
5389 assert(OpaqueValueMapping::shouldBindAsLValue(e));
5390
5391 llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator
5392 it = OpaqueLValues.find(e);
5393
5394 if (it != OpaqueLValues.end())
5395 return it->second;
5396
5397 assert(e->isUnique() && "LValue for a nonunique OVE hasn't been emitted");
5398 return EmitLValue(e->getSourceExpr());
5399 }
5400
5401 RValue
getOrCreateOpaqueRValueMapping(const OpaqueValueExpr * e)5402 CodeGenFunction::getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e) {
5403 assert(!OpaqueValueMapping::shouldBindAsLValue(e));
5404
5405 llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator
5406 it = OpaqueRValues.find(e);
5407
5408 if (it != OpaqueRValues.end())
5409 return it->second;
5410
5411 assert(e->isUnique() && "RValue for a nonunique OVE hasn't been emitted");
5412 return EmitAnyExpr(e->getSourceExpr());
5413 }
5414
EmitRValueForField(LValue LV,const FieldDecl * FD,SourceLocation Loc)5415 RValue CodeGenFunction::EmitRValueForField(LValue LV,
5416 const FieldDecl *FD,
5417 SourceLocation Loc) {
5418 QualType FT = FD->getType();
5419 LValue FieldLV = EmitLValueForField(LV, FD);
5420 switch (getEvaluationKind(FT)) {
5421 case TEK_Complex:
5422 return RValue::getComplex(EmitLoadOfComplex(FieldLV, Loc));
5423 case TEK_Aggregate:
5424 return FieldLV.asAggregateRValue();
5425 case TEK_Scalar:
5426 // This routine is used to load fields one-by-one to perform a copy, so
5427 // don't load reference fields.
5428 if (FD->getType()->isReferenceType())
5429 return RValue::get(FieldLV.getPointer(*this));
5430 // Call EmitLoadOfScalar except when the lvalue is a bitfield to emit a
5431 // primitive load.
5432 if (FieldLV.isBitField())
5433 return EmitLoadOfLValue(FieldLV, Loc);
5434 return RValue::get(EmitLoadOfScalar(FieldLV, Loc));
5435 }
5436 llvm_unreachable("bad evaluation kind");
5437 }
5438
5439 //===--------------------------------------------------------------------===//
5440 // Expression Emission
5441 //===--------------------------------------------------------------------===//
5442
EmitCallExpr(const CallExpr * E,ReturnValueSlot ReturnValue)5443 RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
5444 ReturnValueSlot ReturnValue) {
5445 // Builtins never have block type.
5446 if (E->getCallee()->getType()->isBlockPointerType())
5447 return EmitBlockCallExpr(E, ReturnValue);
5448
5449 if (const auto *CE = dyn_cast<CXXMemberCallExpr>(E))
5450 return EmitCXXMemberCallExpr(CE, ReturnValue);
5451
5452 if (const auto *CE = dyn_cast<CUDAKernelCallExpr>(E))
5453 return EmitCUDAKernelCallExpr(CE, ReturnValue);
5454
5455 // A CXXOperatorCallExpr is created even for explicit object methods, but
5456 // these should be treated like static function call.
5457 if (const auto *CE = dyn_cast<CXXOperatorCallExpr>(E))
5458 if (const auto *MD =
5459 dyn_cast_if_present<CXXMethodDecl>(CE->getCalleeDecl());
5460 MD && MD->isImplicitObjectMemberFunction())
5461 return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue);
5462
5463 CGCallee callee = EmitCallee(E->getCallee());
5464
5465 if (callee.isBuiltin()) {
5466 return EmitBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(),
5467 E, ReturnValue);
5468 }
5469
5470 if (callee.isPseudoDestructor()) {
5471 return EmitCXXPseudoDestructorExpr(callee.getPseudoDestructorExpr());
5472 }
5473
5474 return EmitCall(E->getCallee()->getType(), callee, E, ReturnValue);
5475 }
5476
5477 /// Emit a CallExpr without considering whether it might be a subclass.
EmitSimpleCallExpr(const CallExpr * E,ReturnValueSlot ReturnValue)5478 RValue CodeGenFunction::EmitSimpleCallExpr(const CallExpr *E,
5479 ReturnValueSlot ReturnValue) {
5480 CGCallee Callee = EmitCallee(E->getCallee());
5481 return EmitCall(E->getCallee()->getType(), Callee, E, ReturnValue);
5482 }
5483
5484 // Detect the unusual situation where an inline version is shadowed by a
5485 // non-inline version. In that case we should pick the external one
5486 // everywhere. That's GCC behavior too.
OnlyHasInlineBuiltinDeclaration(const FunctionDecl * FD)5487 static bool OnlyHasInlineBuiltinDeclaration(const FunctionDecl *FD) {
5488 for (const FunctionDecl *PD = FD; PD; PD = PD->getPreviousDecl())
5489 if (!PD->isInlineBuiltinDeclaration())
5490 return false;
5491 return true;
5492 }
5493
EmitDirectCallee(CodeGenFunction & CGF,GlobalDecl GD)5494 static CGCallee EmitDirectCallee(CodeGenFunction &CGF, GlobalDecl GD) {
5495 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
5496
5497 if (auto builtinID = FD->getBuiltinID()) {
5498 std::string NoBuiltinFD = ("no-builtin-" + FD->getName()).str();
5499 std::string NoBuiltins = "no-builtins";
5500
5501 StringRef Ident = CGF.CGM.getMangledName(GD);
5502 std::string FDInlineName = (Ident + ".inline").str();
5503
5504 bool IsPredefinedLibFunction =
5505 CGF.getContext().BuiltinInfo.isPredefinedLibFunction(builtinID);
5506 bool HasAttributeNoBuiltin =
5507 CGF.CurFn->getAttributes().hasFnAttr(NoBuiltinFD) ||
5508 CGF.CurFn->getAttributes().hasFnAttr(NoBuiltins);
5509
5510 // When directing calling an inline builtin, call it through it's mangled
5511 // name to make it clear it's not the actual builtin.
5512 if (CGF.CurFn->getName() != FDInlineName &&
5513 OnlyHasInlineBuiltinDeclaration(FD)) {
5514 llvm::Constant *CalleePtr = CGF.CGM.getRawFunctionPointer(GD);
5515 llvm::Function *Fn = llvm::cast<llvm::Function>(CalleePtr);
5516 llvm::Module *M = Fn->getParent();
5517 llvm::Function *Clone = M->getFunction(FDInlineName);
5518 if (!Clone) {
5519 Clone = llvm::Function::Create(Fn->getFunctionType(),
5520 llvm::GlobalValue::InternalLinkage,
5521 Fn->getAddressSpace(), FDInlineName, M);
5522 Clone->addFnAttr(llvm::Attribute::AlwaysInline);
5523 }
5524 return CGCallee::forDirect(Clone, GD);
5525 }
5526
5527 // Replaceable builtins provide their own implementation of a builtin. If we
5528 // are in an inline builtin implementation, avoid trivial infinite
5529 // recursion. Honor __attribute__((no_builtin("foo"))) or
5530 // __attribute__((no_builtin)) on the current function unless foo is
5531 // not a predefined library function which means we must generate the
5532 // builtin no matter what.
5533 else if (!IsPredefinedLibFunction || !HasAttributeNoBuiltin)
5534 return CGCallee::forBuiltin(builtinID, FD);
5535 }
5536
5537 llvm::Constant *CalleePtr = CGF.CGM.getRawFunctionPointer(GD);
5538 if (CGF.CGM.getLangOpts().CUDA && !CGF.CGM.getLangOpts().CUDAIsDevice &&
5539 FD->hasAttr<CUDAGlobalAttr>())
5540 CalleePtr = CGF.CGM.getCUDARuntime().getKernelStub(
5541 cast<llvm::GlobalValue>(CalleePtr->stripPointerCasts()));
5542
5543 return CGCallee::forDirect(CalleePtr, GD);
5544 }
5545
EmitCallee(const Expr * E)5546 CGCallee CodeGenFunction::EmitCallee(const Expr *E) {
5547 E = E->IgnoreParens();
5548
5549 // Look through function-to-pointer decay.
5550 if (auto ICE = dyn_cast<ImplicitCastExpr>(E)) {
5551 if (ICE->getCastKind() == CK_FunctionToPointerDecay ||
5552 ICE->getCastKind() == CK_BuiltinFnToFnPtr) {
5553 return EmitCallee(ICE->getSubExpr());
5554 }
5555
5556 // Resolve direct calls.
5557 } else if (auto DRE = dyn_cast<DeclRefExpr>(E)) {
5558 if (auto FD = dyn_cast<FunctionDecl>(DRE->getDecl())) {
5559 return EmitDirectCallee(*this, FD);
5560 }
5561 } else if (auto ME = dyn_cast<MemberExpr>(E)) {
5562 if (auto FD = dyn_cast<FunctionDecl>(ME->getMemberDecl())) {
5563 EmitIgnoredExpr(ME->getBase());
5564 return EmitDirectCallee(*this, FD);
5565 }
5566
5567 // Look through template substitutions.
5568 } else if (auto NTTP = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) {
5569 return EmitCallee(NTTP->getReplacement());
5570
5571 // Treat pseudo-destructor calls differently.
5572 } else if (auto PDE = dyn_cast<CXXPseudoDestructorExpr>(E)) {
5573 return CGCallee::forPseudoDestructor(PDE);
5574 }
5575
5576 // Otherwise, we have an indirect reference.
5577 llvm::Value *calleePtr;
5578 QualType functionType;
5579 if (auto ptrType = E->getType()->getAs<PointerType>()) {
5580 calleePtr = EmitScalarExpr(E);
5581 functionType = ptrType->getPointeeType();
5582 } else {
5583 functionType = E->getType();
5584 calleePtr = EmitLValue(E, KnownNonNull).getPointer(*this);
5585 }
5586 assert(functionType->isFunctionType());
5587
5588 GlobalDecl GD;
5589 if (const auto *VD =
5590 dyn_cast_or_null<VarDecl>(E->getReferencedDeclOfCallee()))
5591 GD = GlobalDecl(VD);
5592
5593 CGCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(), GD);
5594 CGPointerAuthInfo pointerAuth = CGM.getFunctionPointerAuthInfo(functionType);
5595 CGCallee callee(calleeInfo, calleePtr, pointerAuth);
5596 return callee;
5597 }
5598
EmitBinaryOperatorLValue(const BinaryOperator * E)5599 LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
5600 // Comma expressions just emit their LHS then their RHS as an l-value.
5601 if (E->getOpcode() == BO_Comma) {
5602 EmitIgnoredExpr(E->getLHS());
5603 EnsureInsertPoint();
5604 return EmitLValue(E->getRHS());
5605 }
5606
5607 if (E->getOpcode() == BO_PtrMemD ||
5608 E->getOpcode() == BO_PtrMemI)
5609 return EmitPointerToDataMemberBinaryExpr(E);
5610
5611 assert(E->getOpcode() == BO_Assign && "unexpected binary l-value");
5612
5613 // Note that in all of these cases, __block variables need the RHS
5614 // evaluated first just in case the variable gets moved by the RHS.
5615
5616 switch (getEvaluationKind(E->getType())) {
5617 case TEK_Scalar: {
5618 switch (E->getLHS()->getType().getObjCLifetime()) {
5619 case Qualifiers::OCL_Strong:
5620 return EmitARCStoreStrong(E, /*ignored*/ false).first;
5621
5622 case Qualifiers::OCL_Autoreleasing:
5623 return EmitARCStoreAutoreleasing(E).first;
5624
5625 // No reason to do any of these differently.
5626 case Qualifiers::OCL_None:
5627 case Qualifiers::OCL_ExplicitNone:
5628 case Qualifiers::OCL_Weak:
5629 break;
5630 }
5631
5632 // TODO: Can we de-duplicate this code with the corresponding code in
5633 // CGExprScalar, similar to the way EmitCompoundAssignmentLValue works?
5634 RValue RV;
5635 llvm::Value *Previous = nullptr;
5636 QualType SrcType = E->getRHS()->getType();
5637 // Check if LHS is a bitfield, if RHS contains an implicit cast expression
5638 // we want to extract that value and potentially (if the bitfield sanitizer
5639 // is enabled) use it to check for an implicit conversion.
5640 if (E->getLHS()->refersToBitField()) {
5641 llvm::Value *RHS =
5642 EmitWithOriginalRHSBitfieldAssignment(E, &Previous, &SrcType);
5643 RV = RValue::get(RHS);
5644 } else
5645 RV = EmitAnyExpr(E->getRHS());
5646
5647 LValue LV = EmitCheckedLValue(E->getLHS(), TCK_Store);
5648
5649 if (RV.isScalar())
5650 EmitNullabilityCheck(LV, RV.getScalarVal(), E->getExprLoc());
5651
5652 if (LV.isBitField()) {
5653 llvm::Value *Result = nullptr;
5654 // If bitfield sanitizers are enabled we want to use the result
5655 // to check whether a truncation or sign change has occurred.
5656 if (SanOpts.has(SanitizerKind::ImplicitBitfieldConversion))
5657 EmitStoreThroughBitfieldLValue(RV, LV, &Result);
5658 else
5659 EmitStoreThroughBitfieldLValue(RV, LV);
5660
5661 // If the expression contained an implicit conversion, make sure
5662 // to use the value before the scalar conversion.
5663 llvm::Value *Src = Previous ? Previous : RV.getScalarVal();
5664 QualType DstType = E->getLHS()->getType();
5665 EmitBitfieldConversionCheck(Src, SrcType, Result, DstType,
5666 LV.getBitFieldInfo(), E->getExprLoc());
5667 } else
5668 EmitStoreThroughLValue(RV, LV);
5669
5670 if (getLangOpts().OpenMP)
5671 CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(*this,
5672 E->getLHS());
5673 return LV;
5674 }
5675
5676 case TEK_Complex:
5677 return EmitComplexAssignmentLValue(E);
5678
5679 case TEK_Aggregate:
5680 return EmitAggExprToLValue(E);
5681 }
5682 llvm_unreachable("bad evaluation kind");
5683 }
5684
EmitCallExprLValue(const CallExpr * E)5685 LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) {
5686 RValue RV = EmitCallExpr(E);
5687
5688 if (!RV.isScalar())
5689 return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
5690 AlignmentSource::Decl);
5691
5692 assert(E->getCallReturnType(getContext())->isReferenceType() &&
5693 "Can't have a scalar return unless the return type is a "
5694 "reference type!");
5695
5696 return MakeNaturalAlignPointeeAddrLValue(RV.getScalarVal(), E->getType());
5697 }
5698
EmitVAArgExprLValue(const VAArgExpr * E)5699 LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) {
5700 // FIXME: This shouldn't require another copy.
5701 return EmitAggExprToLValue(E);
5702 }
5703
EmitCXXConstructLValue(const CXXConstructExpr * E)5704 LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) {
5705 assert(E->getType()->getAsCXXRecordDecl()->hasTrivialDestructor()
5706 && "binding l-value to type which needs a temporary");
5707 AggValueSlot Slot = CreateAggTemp(E->getType());
5708 EmitCXXConstructExpr(E, Slot);
5709 return MakeAddrLValue(Slot.getAddress(), E->getType(), AlignmentSource::Decl);
5710 }
5711
5712 LValue
EmitCXXTypeidLValue(const CXXTypeidExpr * E)5713 CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) {
5714 return MakeNaturalAlignRawAddrLValue(EmitCXXTypeidExpr(E), E->getType());
5715 }
5716
EmitCXXUuidofExpr(const CXXUuidofExpr * E)5717 Address CodeGenFunction::EmitCXXUuidofExpr(const CXXUuidofExpr *E) {
5718 return CGM.GetAddrOfMSGuidDecl(E->getGuidDecl())
5719 .withElementType(ConvertType(E->getType()));
5720 }
5721
EmitCXXUuidofLValue(const CXXUuidofExpr * E)5722 LValue CodeGenFunction::EmitCXXUuidofLValue(const CXXUuidofExpr *E) {
5723 return MakeAddrLValue(EmitCXXUuidofExpr(E), E->getType(),
5724 AlignmentSource::Decl);
5725 }
5726
5727 LValue
EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr * E)5728 CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) {
5729 AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
5730 Slot.setExternallyDestructed();
5731 EmitAggExpr(E->getSubExpr(), Slot);
5732 EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddress());
5733 return MakeAddrLValue(Slot.getAddress(), E->getType(), AlignmentSource::Decl);
5734 }
5735
EmitObjCMessageExprLValue(const ObjCMessageExpr * E)5736 LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) {
5737 RValue RV = EmitObjCMessageExpr(E);
5738
5739 if (!RV.isScalar())
5740 return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
5741 AlignmentSource::Decl);
5742
5743 assert(E->getMethodDecl()->getReturnType()->isReferenceType() &&
5744 "Can't have a scalar return unless the return type is a "
5745 "reference type!");
5746
5747 return MakeNaturalAlignPointeeAddrLValue(RV.getScalarVal(), E->getType());
5748 }
5749
EmitObjCSelectorLValue(const ObjCSelectorExpr * E)5750 LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) {
5751 Address V =
5752 CGM.getObjCRuntime().GetAddrOfSelector(*this, E->getSelector());
5753 return MakeAddrLValue(V, E->getType(), AlignmentSource::Decl);
5754 }
5755
EmitIvarOffset(const ObjCInterfaceDecl * Interface,const ObjCIvarDecl * Ivar)5756 llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface,
5757 const ObjCIvarDecl *Ivar) {
5758 return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar);
5759 }
5760
5761 llvm::Value *
EmitIvarOffsetAsPointerDiff(const ObjCInterfaceDecl * Interface,const ObjCIvarDecl * Ivar)5762 CodeGenFunction::EmitIvarOffsetAsPointerDiff(const ObjCInterfaceDecl *Interface,
5763 const ObjCIvarDecl *Ivar) {
5764 llvm::Value *OffsetValue = EmitIvarOffset(Interface, Ivar);
5765 QualType PointerDiffType = getContext().getPointerDiffType();
5766 return Builder.CreateZExtOrTrunc(OffsetValue,
5767 getTypes().ConvertType(PointerDiffType));
5768 }
5769
EmitLValueForIvar(QualType ObjectTy,llvm::Value * BaseValue,const ObjCIvarDecl * Ivar,unsigned CVRQualifiers)5770 LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy,
5771 llvm::Value *BaseValue,
5772 const ObjCIvarDecl *Ivar,
5773 unsigned CVRQualifiers) {
5774 return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue,
5775 Ivar, CVRQualifiers);
5776 }
5777
EmitObjCIvarRefLValue(const ObjCIvarRefExpr * E)5778 LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) {
5779 // FIXME: A lot of the code below could be shared with EmitMemberExpr.
5780 llvm::Value *BaseValue = nullptr;
5781 const Expr *BaseExpr = E->getBase();
5782 Qualifiers BaseQuals;
5783 QualType ObjectTy;
5784 if (E->isArrow()) {
5785 BaseValue = EmitScalarExpr(BaseExpr);
5786 ObjectTy = BaseExpr->getType()->getPointeeType();
5787 BaseQuals = ObjectTy.getQualifiers();
5788 } else {
5789 LValue BaseLV = EmitLValue(BaseExpr);
5790 BaseValue = BaseLV.getPointer(*this);
5791 ObjectTy = BaseExpr->getType();
5792 BaseQuals = ObjectTy.getQualifiers();
5793 }
5794
5795 LValue LV =
5796 EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(),
5797 BaseQuals.getCVRQualifiers());
5798 setObjCGCLValueClass(getContext(), E, LV);
5799 return LV;
5800 }
5801
EmitStmtExprLValue(const StmtExpr * E)5802 LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) {
5803 // Can only get l-value for message expression returning aggregate type
5804 RValue RV = EmitAnyExprToTemp(E);
5805 return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
5806 AlignmentSource::Decl);
5807 }
5808
EmitCall(QualType CalleeType,const CGCallee & OrigCallee,const CallExpr * E,ReturnValueSlot ReturnValue,llvm::Value * Chain)5809 RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee,
5810 const CallExpr *E, ReturnValueSlot ReturnValue,
5811 llvm::Value *Chain) {
5812 // Get the actual function type. The callee type will always be a pointer to
5813 // function type or a block pointer type.
5814 assert(CalleeType->isFunctionPointerType() &&
5815 "Call must have function pointer type!");
5816
5817 const Decl *TargetDecl =
5818 OrigCallee.getAbstractInfo().getCalleeDecl().getDecl();
5819
5820 assert((!isa_and_present<FunctionDecl>(TargetDecl) ||
5821 !cast<FunctionDecl>(TargetDecl)->isImmediateFunction()) &&
5822 "trying to emit a call to an immediate function");
5823
5824 CalleeType = getContext().getCanonicalType(CalleeType);
5825
5826 auto PointeeType = cast<PointerType>(CalleeType)->getPointeeType();
5827
5828 CGCallee Callee = OrigCallee;
5829
5830 if (SanOpts.has(SanitizerKind::Function) &&
5831 (!TargetDecl || !isa<FunctionDecl>(TargetDecl)) &&
5832 !isa<FunctionNoProtoType>(PointeeType)) {
5833 if (llvm::Constant *PrefixSig =
5834 CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM)) {
5835 SanitizerScope SanScope(this);
5836 auto *TypeHash = getUBSanFunctionTypeHash(PointeeType);
5837
5838 llvm::Type *PrefixSigType = PrefixSig->getType();
5839 llvm::StructType *PrefixStructTy = llvm::StructType::get(
5840 CGM.getLLVMContext(), {PrefixSigType, Int32Ty}, /*isPacked=*/true);
5841
5842 llvm::Value *CalleePtr = Callee.getFunctionPointer();
5843 if (CGM.getCodeGenOpts().PointerAuth.FunctionPointers) {
5844 // Use raw pointer since we are using the callee pointer as data here.
5845 Address Addr =
5846 Address(CalleePtr, CalleePtr->getType(),
5847 CharUnits::fromQuantity(
5848 CalleePtr->getPointerAlignment(CGM.getDataLayout())),
5849 Callee.getPointerAuthInfo(), nullptr);
5850 CalleePtr = Addr.emitRawPointer(*this);
5851 }
5852
5853 // On 32-bit Arm, the low bit of a function pointer indicates whether
5854 // it's using the Arm or Thumb instruction set. The actual first
5855 // instruction lives at the same address either way, so we must clear
5856 // that low bit before using the function address to find the prefix
5857 // structure.
5858 //
5859 // This applies to both Arm and Thumb target triples, because
5860 // either one could be used in an interworking context where it
5861 // might be passed function pointers of both types.
5862 llvm::Value *AlignedCalleePtr;
5863 if (CGM.getTriple().isARM() || CGM.getTriple().isThumb()) {
5864 llvm::Value *CalleeAddress =
5865 Builder.CreatePtrToInt(CalleePtr, IntPtrTy);
5866 llvm::Value *Mask = llvm::ConstantInt::get(IntPtrTy, ~1);
5867 llvm::Value *AlignedCalleeAddress =
5868 Builder.CreateAnd(CalleeAddress, Mask);
5869 AlignedCalleePtr =
5870 Builder.CreateIntToPtr(AlignedCalleeAddress, CalleePtr->getType());
5871 } else {
5872 AlignedCalleePtr = CalleePtr;
5873 }
5874
5875 llvm::Value *CalleePrefixStruct = AlignedCalleePtr;
5876 llvm::Value *CalleeSigPtr =
5877 Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, -1, 0);
5878 llvm::Value *CalleeSig =
5879 Builder.CreateAlignedLoad(PrefixSigType, CalleeSigPtr, getIntAlign());
5880 llvm::Value *CalleeSigMatch = Builder.CreateICmpEQ(CalleeSig, PrefixSig);
5881
5882 llvm::BasicBlock *Cont = createBasicBlock("cont");
5883 llvm::BasicBlock *TypeCheck = createBasicBlock("typecheck");
5884 Builder.CreateCondBr(CalleeSigMatch, TypeCheck, Cont);
5885
5886 EmitBlock(TypeCheck);
5887 llvm::Value *CalleeTypeHash = Builder.CreateAlignedLoad(
5888 Int32Ty,
5889 Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, -1, 1),
5890 getPointerAlign());
5891 llvm::Value *CalleeTypeHashMatch =
5892 Builder.CreateICmpEQ(CalleeTypeHash, TypeHash);
5893 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(E->getBeginLoc()),
5894 EmitCheckTypeDescriptor(CalleeType)};
5895 EmitCheck(std::make_pair(CalleeTypeHashMatch, SanitizerKind::Function),
5896 SanitizerHandler::FunctionTypeMismatch, StaticData,
5897 {CalleePtr});
5898
5899 Builder.CreateBr(Cont);
5900 EmitBlock(Cont);
5901 }
5902 }
5903
5904 const auto *FnType = cast<FunctionType>(PointeeType);
5905
5906 // If we are checking indirect calls and this call is indirect, check that the
5907 // function pointer is a member of the bit set for the function type.
5908 if (SanOpts.has(SanitizerKind::CFIICall) &&
5909 (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {
5910 SanitizerScope SanScope(this);
5911 EmitSanitizerStatReport(llvm::SanStat_CFI_ICall);
5912
5913 llvm::Metadata *MD;
5914 if (CGM.getCodeGenOpts().SanitizeCfiICallGeneralizePointers)
5915 MD = CGM.CreateMetadataIdentifierGeneralized(QualType(FnType, 0));
5916 else
5917 MD = CGM.CreateMetadataIdentifierForType(QualType(FnType, 0));
5918
5919 llvm::Value *TypeId = llvm::MetadataAsValue::get(getLLVMContext(), MD);
5920
5921 llvm::Value *CalleePtr = Callee.getFunctionPointer();
5922 llvm::Value *TypeTest = Builder.CreateCall(
5923 CGM.getIntrinsic(llvm::Intrinsic::type_test), {CalleePtr, TypeId});
5924
5925 auto CrossDsoTypeId = CGM.CreateCrossDsoCfiTypeId(MD);
5926 llvm::Constant *StaticData[] = {
5927 llvm::ConstantInt::get(Int8Ty, CFITCK_ICall),
5928 EmitCheckSourceLocation(E->getBeginLoc()),
5929 EmitCheckTypeDescriptor(QualType(FnType, 0)),
5930 };
5931 if (CGM.getCodeGenOpts().SanitizeCfiCrossDso && CrossDsoTypeId) {
5932 EmitCfiSlowPathCheck(SanitizerKind::CFIICall, TypeTest, CrossDsoTypeId,
5933 CalleePtr, StaticData);
5934 } else {
5935 EmitCheck(std::make_pair(TypeTest, SanitizerKind::CFIICall),
5936 SanitizerHandler::CFICheckFail, StaticData,
5937 {CalleePtr, llvm::UndefValue::get(IntPtrTy)});
5938 }
5939 }
5940
5941 CallArgList Args;
5942 if (Chain)
5943 Args.add(RValue::get(Chain), CGM.getContext().VoidPtrTy);
5944
5945 // C++17 requires that we evaluate arguments to a call using assignment syntax
5946 // right-to-left, and that we evaluate arguments to certain other operators
5947 // left-to-right. Note that we allow this to override the order dictated by
5948 // the calling convention on the MS ABI, which means that parameter
5949 // destruction order is not necessarily reverse construction order.
5950 // FIXME: Revisit this based on C++ committee response to unimplementability.
5951 EvaluationOrder Order = EvaluationOrder::Default;
5952 bool StaticOperator = false;
5953 if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(E)) {
5954 if (OCE->isAssignmentOp())
5955 Order = EvaluationOrder::ForceRightToLeft;
5956 else {
5957 switch (OCE->getOperator()) {
5958 case OO_LessLess:
5959 case OO_GreaterGreater:
5960 case OO_AmpAmp:
5961 case OO_PipePipe:
5962 case OO_Comma:
5963 case OO_ArrowStar:
5964 Order = EvaluationOrder::ForceLeftToRight;
5965 break;
5966 default:
5967 break;
5968 }
5969 }
5970
5971 if (const auto *MD =
5972 dyn_cast_if_present<CXXMethodDecl>(OCE->getCalleeDecl());
5973 MD && MD->isStatic())
5974 StaticOperator = true;
5975 }
5976
5977 auto Arguments = E->arguments();
5978 if (StaticOperator) {
5979 // If we're calling a static operator, we need to emit the object argument
5980 // and ignore it.
5981 EmitIgnoredExpr(E->getArg(0));
5982 Arguments = drop_begin(Arguments, 1);
5983 }
5984 EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), Arguments,
5985 E->getDirectCallee(), /*ParamsToSkip=*/0, Order);
5986
5987 const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeFreeFunctionCall(
5988 Args, FnType, /*ChainCall=*/Chain);
5989
5990 // C99 6.5.2.2p6:
5991 // If the expression that denotes the called function has a type
5992 // that does not include a prototype, [the default argument
5993 // promotions are performed]. If the number of arguments does not
5994 // equal the number of parameters, the behavior is undefined. If
5995 // the function is defined with a type that includes a prototype,
5996 // and either the prototype ends with an ellipsis (, ...) or the
5997 // types of the arguments after promotion are not compatible with
5998 // the types of the parameters, the behavior is undefined. If the
5999 // function is defined with a type that does not include a
6000 // prototype, and the types of the arguments after promotion are
6001 // not compatible with those of the parameters after promotion,
6002 // the behavior is undefined [except in some trivial cases].
6003 // That is, in the general case, we should assume that a call
6004 // through an unprototyped function type works like a *non-variadic*
6005 // call. The way we make this work is to cast to the exact type
6006 // of the promoted arguments.
6007 //
6008 // Chain calls use this same code path to add the invisible chain parameter
6009 // to the function type.
6010 if (isa<FunctionNoProtoType>(FnType) || Chain) {
6011 llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo);
6012 int AS = Callee.getFunctionPointer()->getType()->getPointerAddressSpace();
6013 CalleeTy = CalleeTy->getPointerTo(AS);
6014
6015 llvm::Value *CalleePtr = Callee.getFunctionPointer();
6016 CalleePtr = Builder.CreateBitCast(CalleePtr, CalleeTy, "callee.knr.cast");
6017 Callee.setFunctionPointer(CalleePtr);
6018 }
6019
6020 // HIP function pointer contains kernel handle when it is used in triple
6021 // chevron. The kernel stub needs to be loaded from kernel handle and used
6022 // as callee.
6023 if (CGM.getLangOpts().HIP && !CGM.getLangOpts().CUDAIsDevice &&
6024 isa<CUDAKernelCallExpr>(E) &&
6025 (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {
6026 llvm::Value *Handle = Callee.getFunctionPointer();
6027 auto *Stub = Builder.CreateLoad(
6028 Address(Handle, Handle->getType(), CGM.getPointerAlign()));
6029 Callee.setFunctionPointer(Stub);
6030 }
6031 llvm::CallBase *CallOrInvoke = nullptr;
6032 RValue Call = EmitCall(FnInfo, Callee, ReturnValue, Args, &CallOrInvoke,
6033 E == MustTailCall, E->getExprLoc());
6034
6035 // Generate function declaration DISuprogram in order to be used
6036 // in debug info about call sites.
6037 if (CGDebugInfo *DI = getDebugInfo()) {
6038 if (auto *CalleeDecl = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
6039 FunctionArgList Args;
6040 QualType ResTy = BuildFunctionArgList(CalleeDecl, Args);
6041 DI->EmitFuncDeclForCallSite(CallOrInvoke,
6042 DI->getFunctionType(CalleeDecl, ResTy, Args),
6043 CalleeDecl);
6044 }
6045 }
6046
6047 return Call;
6048 }
6049
6050 LValue CodeGenFunction::
EmitPointerToDataMemberBinaryExpr(const BinaryOperator * E)6051 EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) {
6052 Address BaseAddr = Address::invalid();
6053 if (E->getOpcode() == BO_PtrMemI) {
6054 BaseAddr = EmitPointerWithAlignment(E->getLHS());
6055 } else {
6056 BaseAddr = EmitLValue(E->getLHS()).getAddress();
6057 }
6058
6059 llvm::Value *OffsetV = EmitScalarExpr(E->getRHS());
6060 const auto *MPT = E->getRHS()->getType()->castAs<MemberPointerType>();
6061
6062 LValueBaseInfo BaseInfo;
6063 TBAAAccessInfo TBAAInfo;
6064 Address MemberAddr =
6065 EmitCXXMemberDataPointerAddress(E, BaseAddr, OffsetV, MPT, &BaseInfo,
6066 &TBAAInfo);
6067
6068 return MakeAddrLValue(MemberAddr, MPT->getPointeeType(), BaseInfo, TBAAInfo);
6069 }
6070
6071 /// Given the address of a temporary variable, produce an r-value of
6072 /// its type.
convertTempToRValue(Address addr,QualType type,SourceLocation loc)6073 RValue CodeGenFunction::convertTempToRValue(Address addr,
6074 QualType type,
6075 SourceLocation loc) {
6076 LValue lvalue = MakeAddrLValue(addr, type, AlignmentSource::Decl);
6077 switch (getEvaluationKind(type)) {
6078 case TEK_Complex:
6079 return RValue::getComplex(EmitLoadOfComplex(lvalue, loc));
6080 case TEK_Aggregate:
6081 return lvalue.asAggregateRValue();
6082 case TEK_Scalar:
6083 return RValue::get(EmitLoadOfScalar(lvalue, loc));
6084 }
6085 llvm_unreachable("bad evaluation kind");
6086 }
6087
SetFPAccuracy(llvm::Value * Val,float Accuracy)6088 void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) {
6089 assert(Val->getType()->isFPOrFPVectorTy());
6090 if (Accuracy == 0.0 || !isa<llvm::Instruction>(Val))
6091 return;
6092
6093 llvm::MDBuilder MDHelper(getLLVMContext());
6094 llvm::MDNode *Node = MDHelper.createFPMath(Accuracy);
6095
6096 cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpmath, Node);
6097 }
6098
SetSqrtFPAccuracy(llvm::Value * Val)6099 void CodeGenFunction::SetSqrtFPAccuracy(llvm::Value *Val) {
6100 llvm::Type *EltTy = Val->getType()->getScalarType();
6101 if (!EltTy->isFloatTy())
6102 return;
6103
6104 if ((getLangOpts().OpenCL &&
6105 !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||
6106 (getLangOpts().HIP && getLangOpts().CUDAIsDevice &&
6107 !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {
6108 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 3ulp
6109 //
6110 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
6111 // build option allows an application to specify that single precision
6112 // floating-point divide (x/y and 1/x) and sqrt used in the program
6113 // source are correctly rounded.
6114 //
6115 // TODO: CUDA has a prec-sqrt flag
6116 SetFPAccuracy(Val, 3.0f);
6117 }
6118 }
6119
SetDivFPAccuracy(llvm::Value * Val)6120 void CodeGenFunction::SetDivFPAccuracy(llvm::Value *Val) {
6121 llvm::Type *EltTy = Val->getType()->getScalarType();
6122 if (!EltTy->isFloatTy())
6123 return;
6124
6125 if ((getLangOpts().OpenCL &&
6126 !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||
6127 (getLangOpts().HIP && getLangOpts().CUDAIsDevice &&
6128 !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {
6129 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp
6130 //
6131 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
6132 // build option allows an application to specify that single precision
6133 // floating-point divide (x/y and 1/x) and sqrt used in the program
6134 // source are correctly rounded.
6135 //
6136 // TODO: CUDA has a prec-div flag
6137 SetFPAccuracy(Val, 2.5f);
6138 }
6139 }
6140
6141 namespace {
6142 struct LValueOrRValue {
6143 LValue LV;
6144 RValue RV;
6145 };
6146 }
6147
emitPseudoObjectExpr(CodeGenFunction & CGF,const PseudoObjectExpr * E,bool forLValue,AggValueSlot slot)6148 static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF,
6149 const PseudoObjectExpr *E,
6150 bool forLValue,
6151 AggValueSlot slot) {
6152 SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques;
6153
6154 // Find the result expression, if any.
6155 const Expr *resultExpr = E->getResultExpr();
6156 LValueOrRValue result;
6157
6158 for (PseudoObjectExpr::const_semantics_iterator
6159 i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
6160 const Expr *semantic = *i;
6161
6162 // If this semantic expression is an opaque value, bind it
6163 // to the result of its source expression.
6164 if (const auto *ov = dyn_cast<OpaqueValueExpr>(semantic)) {
6165 // Skip unique OVEs.
6166 if (ov->isUnique()) {
6167 assert(ov != resultExpr &&
6168 "A unique OVE cannot be used as the result expression");
6169 continue;
6170 }
6171
6172 // If this is the result expression, we may need to evaluate
6173 // directly into the slot.
6174 typedef CodeGenFunction::OpaqueValueMappingData OVMA;
6175 OVMA opaqueData;
6176 if (ov == resultExpr && ov->isPRValue() && !forLValue &&
6177 CodeGenFunction::hasAggregateEvaluationKind(ov->getType())) {
6178 CGF.EmitAggExpr(ov->getSourceExpr(), slot);
6179 LValue LV = CGF.MakeAddrLValue(slot.getAddress(), ov->getType(),
6180 AlignmentSource::Decl);
6181 opaqueData = OVMA::bind(CGF, ov, LV);
6182 result.RV = slot.asRValue();
6183
6184 // Otherwise, emit as normal.
6185 } else {
6186 opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr());
6187
6188 // If this is the result, also evaluate the result now.
6189 if (ov == resultExpr) {
6190 if (forLValue)
6191 result.LV = CGF.EmitLValue(ov);
6192 else
6193 result.RV = CGF.EmitAnyExpr(ov, slot);
6194 }
6195 }
6196
6197 opaques.push_back(opaqueData);
6198
6199 // Otherwise, if the expression is the result, evaluate it
6200 // and remember the result.
6201 } else if (semantic == resultExpr) {
6202 if (forLValue)
6203 result.LV = CGF.EmitLValue(semantic);
6204 else
6205 result.RV = CGF.EmitAnyExpr(semantic, slot);
6206
6207 // Otherwise, evaluate the expression in an ignored context.
6208 } else {
6209 CGF.EmitIgnoredExpr(semantic);
6210 }
6211 }
6212
6213 // Unbind all the opaques now.
6214 for (unsigned i = 0, e = opaques.size(); i != e; ++i)
6215 opaques[i].unbind(CGF);
6216
6217 return result;
6218 }
6219
EmitPseudoObjectRValue(const PseudoObjectExpr * E,AggValueSlot slot)6220 RValue CodeGenFunction::EmitPseudoObjectRValue(const PseudoObjectExpr *E,
6221 AggValueSlot slot) {
6222 return emitPseudoObjectExpr(*this, E, false, slot).RV;
6223 }
6224
EmitPseudoObjectLValue(const PseudoObjectExpr * E)6225 LValue CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr *E) {
6226 return emitPseudoObjectExpr(*this, E, true, AggValueSlot::ignored()).LV;
6227 }
6228