1 //===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "CGCXXABI.h"
14 #include "CGCleanup.h"
15 #include "CGDebugInfo.h"
16 #include "CGObjCRuntime.h"
17 #include "CGOpenMPRuntime.h"
18 #include "CodeGenFunction.h"
19 #include "CodeGenModule.h"
20 #include "ConstantEmitter.h"
21 #include "TargetInfo.h"
22 #include "clang/AST/ASTContext.h"
23 #include "clang/AST/Attr.h"
24 #include "clang/AST/DeclObjC.h"
25 #include "clang/AST/Expr.h"
26 #include "clang/AST/RecordLayout.h"
27 #include "clang/AST/StmtVisitor.h"
28 #include "clang/Basic/CodeGenOptions.h"
29 #include "clang/Basic/TargetInfo.h"
30 #include "llvm/ADT/APFixedPoint.h"
31 #include "llvm/ADT/Optional.h"
32 #include "llvm/IR/CFG.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/FixedPointBuilder.h"
36 #include "llvm/IR/Function.h"
37 #include "llvm/IR/GetElementPtrTypeIterator.h"
38 #include "llvm/IR/GlobalVariable.h"
39 #include "llvm/IR/Intrinsics.h"
40 #include "llvm/IR/IntrinsicsPowerPC.h"
41 #include "llvm/IR/MatrixBuilder.h"
42 #include "llvm/IR/Module.h"
43 #include <cstdarg>
44
45 using namespace clang;
46 using namespace CodeGen;
47 using llvm::Value;
48
49 //===----------------------------------------------------------------------===//
50 // Scalar Expression Emitter
51 //===----------------------------------------------------------------------===//
52
53 namespace {
54
55 /// Determine whether the given binary operation may overflow.
56 /// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul,
57 /// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem},
58 /// the returned overflow check is precise. The returned value is 'true' for
59 /// all other opcodes, to be conservative.
mayHaveIntegerOverflow(llvm::ConstantInt * LHS,llvm::ConstantInt * RHS,BinaryOperator::Opcode Opcode,bool Signed,llvm::APInt & Result)60 bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS,
61 BinaryOperator::Opcode Opcode, bool Signed,
62 llvm::APInt &Result) {
63 // Assume overflow is possible, unless we can prove otherwise.
64 bool Overflow = true;
65 const auto &LHSAP = LHS->getValue();
66 const auto &RHSAP = RHS->getValue();
67 if (Opcode == BO_Add) {
68 if (Signed)
69 Result = LHSAP.sadd_ov(RHSAP, Overflow);
70 else
71 Result = LHSAP.uadd_ov(RHSAP, Overflow);
72 } else if (Opcode == BO_Sub) {
73 if (Signed)
74 Result = LHSAP.ssub_ov(RHSAP, Overflow);
75 else
76 Result = LHSAP.usub_ov(RHSAP, Overflow);
77 } else if (Opcode == BO_Mul) {
78 if (Signed)
79 Result = LHSAP.smul_ov(RHSAP, Overflow);
80 else
81 Result = LHSAP.umul_ov(RHSAP, Overflow);
82 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
83 if (Signed && !RHS->isZero())
84 Result = LHSAP.sdiv_ov(RHSAP, Overflow);
85 else
86 return false;
87 }
88 return Overflow;
89 }
90
91 struct BinOpInfo {
92 Value *LHS;
93 Value *RHS;
94 QualType Ty; // Computation Type.
95 BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
96 FPOptions FPFeatures;
97 const Expr *E; // Entire expr, for error unsupported. May not be binop.
98
99 /// Check if the binop can result in integer overflow.
mayHaveIntegerOverflow__anon5e7787c30111::BinOpInfo100 bool mayHaveIntegerOverflow() const {
101 // Without constant input, we can't rule out overflow.
102 auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS);
103 auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS);
104 if (!LHSCI || !RHSCI)
105 return true;
106
107 llvm::APInt Result;
108 return ::mayHaveIntegerOverflow(
109 LHSCI, RHSCI, Opcode, Ty->hasSignedIntegerRepresentation(), Result);
110 }
111
112 /// Check if the binop computes a division or a remainder.
isDivremOp__anon5e7787c30111::BinOpInfo113 bool isDivremOp() const {
114 return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign ||
115 Opcode == BO_RemAssign;
116 }
117
118 /// Check if the binop can result in an integer division by zero.
mayHaveIntegerDivisionByZero__anon5e7787c30111::BinOpInfo119 bool mayHaveIntegerDivisionByZero() const {
120 if (isDivremOp())
121 if (auto *CI = dyn_cast<llvm::ConstantInt>(RHS))
122 return CI->isZero();
123 return true;
124 }
125
126 /// Check if the binop can result in a float division by zero.
mayHaveFloatDivisionByZero__anon5e7787c30111::BinOpInfo127 bool mayHaveFloatDivisionByZero() const {
128 if (isDivremOp())
129 if (auto *CFP = dyn_cast<llvm::ConstantFP>(RHS))
130 return CFP->isZero();
131 return true;
132 }
133
134 /// Check if at least one operand is a fixed point type. In such cases, this
135 /// operation did not follow usual arithmetic conversion and both operands
136 /// might not be of the same type.
isFixedPointOp__anon5e7787c30111::BinOpInfo137 bool isFixedPointOp() const {
138 // We cannot simply check the result type since comparison operations return
139 // an int.
140 if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) {
141 QualType LHSType = BinOp->getLHS()->getType();
142 QualType RHSType = BinOp->getRHS()->getType();
143 return LHSType->isFixedPointType() || RHSType->isFixedPointType();
144 }
145 if (const auto *UnOp = dyn_cast<UnaryOperator>(E))
146 return UnOp->getSubExpr()->getType()->isFixedPointType();
147 return false;
148 }
149 };
150
MustVisitNullValue(const Expr * E)151 static bool MustVisitNullValue(const Expr *E) {
152 // If a null pointer expression's type is the C++0x nullptr_t, then
153 // it's not necessarily a simple constant and it must be evaluated
154 // for its potential side effects.
155 return E->getType()->isNullPtrType();
156 }
157
158 /// If \p E is a widened promoted integer, get its base (unpromoted) type.
getUnwidenedIntegerType(const ASTContext & Ctx,const Expr * E)159 static llvm::Optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx,
160 const Expr *E) {
161 const Expr *Base = E->IgnoreImpCasts();
162 if (E == Base)
163 return llvm::None;
164
165 QualType BaseTy = Base->getType();
166 if (!BaseTy->isPromotableIntegerType() ||
167 Ctx.getTypeSize(BaseTy) >= Ctx.getTypeSize(E->getType()))
168 return llvm::None;
169
170 return BaseTy;
171 }
172
173 /// Check if \p E is a widened promoted integer.
IsWidenedIntegerOp(const ASTContext & Ctx,const Expr * E)174 static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) {
175 return getUnwidenedIntegerType(Ctx, E).hasValue();
176 }
177
178 /// Check if we can skip the overflow check for \p Op.
CanElideOverflowCheck(const ASTContext & Ctx,const BinOpInfo & Op)179 static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) {
180 assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) &&
181 "Expected a unary or binary operator");
182
183 // If the binop has constant inputs and we can prove there is no overflow,
184 // we can elide the overflow check.
185 if (!Op.mayHaveIntegerOverflow())
186 return true;
187
188 // If a unary op has a widened operand, the op cannot overflow.
189 if (const auto *UO = dyn_cast<UnaryOperator>(Op.E))
190 return !UO->canOverflow();
191
192 // We usually don't need overflow checks for binops with widened operands.
193 // Multiplication with promoted unsigned operands is a special case.
194 const auto *BO = cast<BinaryOperator>(Op.E);
195 auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS());
196 if (!OptionalLHSTy)
197 return false;
198
199 auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, BO->getRHS());
200 if (!OptionalRHSTy)
201 return false;
202
203 QualType LHSTy = *OptionalLHSTy;
204 QualType RHSTy = *OptionalRHSTy;
205
206 // This is the simple case: binops without unsigned multiplication, and with
207 // widened operands. No overflow check is needed here.
208 if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) ||
209 !LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType())
210 return true;
211
212 // For unsigned multiplication the overflow check can be elided if either one
213 // of the unpromoted types are less than half the size of the promoted type.
214 unsigned PromotedSize = Ctx.getTypeSize(Op.E->getType());
215 return (2 * Ctx.getTypeSize(LHSTy)) < PromotedSize ||
216 (2 * Ctx.getTypeSize(RHSTy)) < PromotedSize;
217 }
218
219 class ScalarExprEmitter
220 : public StmtVisitor<ScalarExprEmitter, Value*> {
221 CodeGenFunction &CGF;
222 CGBuilderTy &Builder;
223 bool IgnoreResultAssign;
224 llvm::LLVMContext &VMContext;
225 public:
226
ScalarExprEmitter(CodeGenFunction & cgf,bool ira=false)227 ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
228 : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
229 VMContext(cgf.getLLVMContext()) {
230 }
231
232 //===--------------------------------------------------------------------===//
233 // Utilities
234 //===--------------------------------------------------------------------===//
235
TestAndClearIgnoreResultAssign()236 bool TestAndClearIgnoreResultAssign() {
237 bool I = IgnoreResultAssign;
238 IgnoreResultAssign = false;
239 return I;
240 }
241
ConvertType(QualType T)242 llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
EmitLValue(const Expr * E)243 LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
EmitCheckedLValue(const Expr * E,CodeGenFunction::TypeCheckKind TCK)244 LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) {
245 return CGF.EmitCheckedLValue(E, TCK);
246 }
247
248 void EmitBinOpCheck(ArrayRef<std::pair<Value *, SanitizerMask>> Checks,
249 const BinOpInfo &Info);
250
EmitLoadOfLValue(LValue LV,SourceLocation Loc)251 Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
252 return CGF.EmitLoadOfLValue(LV, Loc).getScalarVal();
253 }
254
EmitLValueAlignmentAssumption(const Expr * E,Value * V)255 void EmitLValueAlignmentAssumption(const Expr *E, Value *V) {
256 const AlignValueAttr *AVAttr = nullptr;
257 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
258 const ValueDecl *VD = DRE->getDecl();
259
260 if (VD->getType()->isReferenceType()) {
261 if (const auto *TTy =
262 dyn_cast<TypedefType>(VD->getType().getNonReferenceType()))
263 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
264 } else {
265 // Assumptions for function parameters are emitted at the start of the
266 // function, so there is no need to repeat that here,
267 // unless the alignment-assumption sanitizer is enabled,
268 // then we prefer the assumption over alignment attribute
269 // on IR function param.
270 if (isa<ParmVarDecl>(VD) && !CGF.SanOpts.has(SanitizerKind::Alignment))
271 return;
272
273 AVAttr = VD->getAttr<AlignValueAttr>();
274 }
275 }
276
277 if (!AVAttr)
278 if (const auto *TTy =
279 dyn_cast<TypedefType>(E->getType()))
280 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
281
282 if (!AVAttr)
283 return;
284
285 Value *AlignmentValue = CGF.EmitScalarExpr(AVAttr->getAlignment());
286 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(AlignmentValue);
287 CGF.emitAlignmentAssumption(V, E, AVAttr->getLocation(), AlignmentCI);
288 }
289
290 /// EmitLoadOfLValue - Given an expression with complex type that represents a
291 /// value l-value, this method emits the address of the l-value, then loads
292 /// and returns the result.
EmitLoadOfLValue(const Expr * E)293 Value *EmitLoadOfLValue(const Expr *E) {
294 Value *V = EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load),
295 E->getExprLoc());
296
297 EmitLValueAlignmentAssumption(E, V);
298 return V;
299 }
300
301 /// EmitConversionToBool - Convert the specified expression value to a
302 /// boolean (i1) truth value. This is equivalent to "Val != 0".
303 Value *EmitConversionToBool(Value *Src, QualType DstTy);
304
305 /// Emit a check that a conversion from a floating-point type does not
306 /// overflow.
307 void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType,
308 Value *Src, QualType SrcType, QualType DstType,
309 llvm::Type *DstTy, SourceLocation Loc);
310
311 /// Known implicit conversion check kinds.
312 /// Keep in sync with the enum of the same name in ubsan_handlers.h
313 enum ImplicitConversionCheckKind : unsigned char {
314 ICCK_IntegerTruncation = 0, // Legacy, was only used by clang 7.
315 ICCK_UnsignedIntegerTruncation = 1,
316 ICCK_SignedIntegerTruncation = 2,
317 ICCK_IntegerSignChange = 3,
318 ICCK_SignedIntegerTruncationOrSignChange = 4,
319 };
320
321 /// Emit a check that an [implicit] truncation of an integer does not
322 /// discard any bits. It is not UB, so we use the value after truncation.
323 void EmitIntegerTruncationCheck(Value *Src, QualType SrcType, Value *Dst,
324 QualType DstType, SourceLocation Loc);
325
326 /// Emit a check that an [implicit] conversion of an integer does not change
327 /// the sign of the value. It is not UB, so we use the value after conversion.
328 /// NOTE: Src and Dst may be the exact same value! (point to the same thing)
329 void EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, Value *Dst,
330 QualType DstType, SourceLocation Loc);
331
332 /// Emit a conversion from the specified type to the specified destination
333 /// type, both of which are LLVM scalar types.
334 struct ScalarConversionOpts {
335 bool TreatBooleanAsSigned;
336 bool EmitImplicitIntegerTruncationChecks;
337 bool EmitImplicitIntegerSignChangeChecks;
338
ScalarConversionOpts__anon5e7787c30111::ScalarExprEmitter::ScalarConversionOpts339 ScalarConversionOpts()
340 : TreatBooleanAsSigned(false),
341 EmitImplicitIntegerTruncationChecks(false),
342 EmitImplicitIntegerSignChangeChecks(false) {}
343
ScalarConversionOpts__anon5e7787c30111::ScalarExprEmitter::ScalarConversionOpts344 ScalarConversionOpts(clang::SanitizerSet SanOpts)
345 : TreatBooleanAsSigned(false),
346 EmitImplicitIntegerTruncationChecks(
347 SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)),
348 EmitImplicitIntegerSignChangeChecks(
349 SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {}
350 };
351 Value *EmitScalarCast(Value *Src, QualType SrcType, QualType DstType,
352 llvm::Type *SrcTy, llvm::Type *DstTy,
353 ScalarConversionOpts Opts);
354 Value *
355 EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy,
356 SourceLocation Loc,
357 ScalarConversionOpts Opts = ScalarConversionOpts());
358
359 /// Convert between either a fixed point and other fixed point or fixed point
360 /// and an integer.
361 Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy,
362 SourceLocation Loc);
363
364 /// Emit a conversion from the specified complex type to the specified
365 /// destination type, where the destination type is an LLVM scalar type.
366 Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
367 QualType SrcTy, QualType DstTy,
368 SourceLocation Loc);
369
370 /// EmitNullValue - Emit a value that corresponds to null for the given type.
371 Value *EmitNullValue(QualType Ty);
372
373 /// EmitFloatToBoolConversion - Perform an FP to boolean conversion.
EmitFloatToBoolConversion(Value * V)374 Value *EmitFloatToBoolConversion(Value *V) {
375 // Compare against 0.0 for fp scalars.
376 llvm::Value *Zero = llvm::Constant::getNullValue(V->getType());
377 return Builder.CreateFCmpUNE(V, Zero, "tobool");
378 }
379
380 /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
EmitPointerToBoolConversion(Value * V,QualType QT)381 Value *EmitPointerToBoolConversion(Value *V, QualType QT) {
382 Value *Zero = CGF.CGM.getNullPointer(cast<llvm::PointerType>(V->getType()), QT);
383
384 return Builder.CreateICmpNE(V, Zero, "tobool");
385 }
386
EmitIntToBoolConversion(Value * V)387 Value *EmitIntToBoolConversion(Value *V) {
388 // Because of the type rules of C, we often end up computing a
389 // logical value, then zero extending it to int, then wanting it
390 // as a logical value again. Optimize this common case.
391 if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(V)) {
392 if (ZI->getOperand(0)->getType() == Builder.getInt1Ty()) {
393 Value *Result = ZI->getOperand(0);
394 // If there aren't any more uses, zap the instruction to save space.
395 // Note that there can be more uses, for example if this
396 // is the result of an assignment.
397 if (ZI->use_empty())
398 ZI->eraseFromParent();
399 return Result;
400 }
401 }
402
403 return Builder.CreateIsNotNull(V, "tobool");
404 }
405
406 //===--------------------------------------------------------------------===//
407 // Visitor Methods
408 //===--------------------------------------------------------------------===//
409
Visit(Expr * E)410 Value *Visit(Expr *E) {
411 ApplyDebugLocation DL(CGF, E);
412 return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E);
413 }
414
VisitStmt(Stmt * S)415 Value *VisitStmt(Stmt *S) {
416 S->dump(llvm::errs(), CGF.getContext());
417 llvm_unreachable("Stmt can't have complex result type!");
418 }
419 Value *VisitExpr(Expr *S);
420
VisitConstantExpr(ConstantExpr * E)421 Value *VisitConstantExpr(ConstantExpr *E) {
422 if (Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {
423 if (E->isGLValue())
424 return CGF.Builder.CreateLoad(Address(
425 Result, CGF.getContext().getTypeAlignInChars(E->getType())));
426 return Result;
427 }
428 return Visit(E->getSubExpr());
429 }
VisitParenExpr(ParenExpr * PE)430 Value *VisitParenExpr(ParenExpr *PE) {
431 return Visit(PE->getSubExpr());
432 }
VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr * E)433 Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
434 return Visit(E->getReplacement());
435 }
VisitGenericSelectionExpr(GenericSelectionExpr * GE)436 Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
437 return Visit(GE->getResultExpr());
438 }
VisitCoawaitExpr(CoawaitExpr * S)439 Value *VisitCoawaitExpr(CoawaitExpr *S) {
440 return CGF.EmitCoawaitExpr(*S).getScalarVal();
441 }
VisitCoyieldExpr(CoyieldExpr * S)442 Value *VisitCoyieldExpr(CoyieldExpr *S) {
443 return CGF.EmitCoyieldExpr(*S).getScalarVal();
444 }
VisitUnaryCoawait(const UnaryOperator * E)445 Value *VisitUnaryCoawait(const UnaryOperator *E) {
446 return Visit(E->getSubExpr());
447 }
448
449 // Leaves.
VisitIntegerLiteral(const IntegerLiteral * E)450 Value *VisitIntegerLiteral(const IntegerLiteral *E) {
451 return Builder.getInt(E->getValue());
452 }
VisitFixedPointLiteral(const FixedPointLiteral * E)453 Value *VisitFixedPointLiteral(const FixedPointLiteral *E) {
454 return Builder.getInt(E->getValue());
455 }
VisitFloatingLiteral(const FloatingLiteral * E)456 Value *VisitFloatingLiteral(const FloatingLiteral *E) {
457 return llvm::ConstantFP::get(VMContext, E->getValue());
458 }
VisitCharacterLiteral(const CharacterLiteral * E)459 Value *VisitCharacterLiteral(const CharacterLiteral *E) {
460 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
461 }
VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr * E)462 Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
463 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
464 }
VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr * E)465 Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
466 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
467 }
VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr * E)468 Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
469 return EmitNullValue(E->getType());
470 }
VisitGNUNullExpr(const GNUNullExpr * E)471 Value *VisitGNUNullExpr(const GNUNullExpr *E) {
472 return EmitNullValue(E->getType());
473 }
474 Value *VisitOffsetOfExpr(OffsetOfExpr *E);
475 Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
VisitAddrLabelExpr(const AddrLabelExpr * E)476 Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
477 llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel());
478 return Builder.CreateBitCast(V, ConvertType(E->getType()));
479 }
480
VisitSizeOfPackExpr(SizeOfPackExpr * E)481 Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) {
482 return llvm::ConstantInt::get(ConvertType(E->getType()),E->getPackLength());
483 }
484
VisitPseudoObjectExpr(PseudoObjectExpr * E)485 Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) {
486 return CGF.EmitPseudoObjectRValue(E).getScalarVal();
487 }
488
489 Value *VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E);
490
VisitOpaqueValueExpr(OpaqueValueExpr * E)491 Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
492 if (E->isGLValue())
493 return EmitLoadOfLValue(CGF.getOrCreateOpaqueLValueMapping(E),
494 E->getExprLoc());
495
496 // Otherwise, assume the mapping is the scalar directly.
497 return CGF.getOrCreateOpaqueRValueMapping(E).getScalarVal();
498 }
499
500 // l-values.
VisitDeclRefExpr(DeclRefExpr * E)501 Value *VisitDeclRefExpr(DeclRefExpr *E) {
502 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E))
503 return CGF.emitScalarConstant(Constant, E);
504 return EmitLoadOfLValue(E);
505 }
506
VisitObjCSelectorExpr(ObjCSelectorExpr * E)507 Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
508 return CGF.EmitObjCSelectorExpr(E);
509 }
VisitObjCProtocolExpr(ObjCProtocolExpr * E)510 Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
511 return CGF.EmitObjCProtocolExpr(E);
512 }
VisitObjCIvarRefExpr(ObjCIvarRefExpr * E)513 Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
514 return EmitLoadOfLValue(E);
515 }
VisitObjCMessageExpr(ObjCMessageExpr * E)516 Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
517 if (E->getMethodDecl() &&
518 E->getMethodDecl()->getReturnType()->isReferenceType())
519 return EmitLoadOfLValue(E);
520 return CGF.EmitObjCMessageExpr(E).getScalarVal();
521 }
522
VisitObjCIsaExpr(ObjCIsaExpr * E)523 Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
524 LValue LV = CGF.EmitObjCIsaExpr(E);
525 Value *V = CGF.EmitLoadOfLValue(LV, E->getExprLoc()).getScalarVal();
526 return V;
527 }
528
VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr * E)529 Value *VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) {
530 VersionTuple Version = E->getVersion();
531
532 // If we're checking for a platform older than our minimum deployment
533 // target, we can fold the check away.
534 if (Version <= CGF.CGM.getTarget().getPlatformMinVersion())
535 return llvm::ConstantInt::get(Builder.getInt1Ty(), 1);
536
537 return CGF.EmitBuiltinAvailable(Version);
538 }
539
540 Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
541 Value *VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E);
542 Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
543 Value *VisitConvertVectorExpr(ConvertVectorExpr *E);
544 Value *VisitMemberExpr(MemberExpr *E);
VisitExtVectorElementExpr(Expr * E)545 Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
VisitCompoundLiteralExpr(CompoundLiteralExpr * E)546 Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
547 // Strictly speaking, we shouldn't be calling EmitLoadOfLValue, which
548 // transitively calls EmitCompoundLiteralLValue, here in C++ since compound
549 // literals aren't l-values in C++. We do so simply because that's the
550 // cleanest way to handle compound literals in C++.
551 // See the discussion here: https://reviews.llvm.org/D64464
552 return EmitLoadOfLValue(E);
553 }
554
555 Value *VisitInitListExpr(InitListExpr *E);
556
VisitArrayInitIndexExpr(ArrayInitIndexExpr * E)557 Value *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {
558 assert(CGF.getArrayInitIndex() &&
559 "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?");
560 return CGF.getArrayInitIndex();
561 }
562
VisitImplicitValueInitExpr(const ImplicitValueInitExpr * E)563 Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
564 return EmitNullValue(E->getType());
565 }
VisitExplicitCastExpr(ExplicitCastExpr * E)566 Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {
567 CGF.CGM.EmitExplicitCastExprType(E, &CGF);
568 return VisitCastExpr(E);
569 }
570 Value *VisitCastExpr(CastExpr *E);
571
VisitCallExpr(const CallExpr * E)572 Value *VisitCallExpr(const CallExpr *E) {
573 if (E->getCallReturnType(CGF.getContext())->isReferenceType())
574 return EmitLoadOfLValue(E);
575
576 Value *V = CGF.EmitCallExpr(E).getScalarVal();
577
578 EmitLValueAlignmentAssumption(E, V);
579 return V;
580 }
581
582 Value *VisitStmtExpr(const StmtExpr *E);
583
584 // Unary Operators.
VisitUnaryPostDec(const UnaryOperator * E)585 Value *VisitUnaryPostDec(const UnaryOperator *E) {
586 LValue LV = EmitLValue(E->getSubExpr());
587 return EmitScalarPrePostIncDec(E, LV, false, false);
588 }
VisitUnaryPostInc(const UnaryOperator * E)589 Value *VisitUnaryPostInc(const UnaryOperator *E) {
590 LValue LV = EmitLValue(E->getSubExpr());
591 return EmitScalarPrePostIncDec(E, LV, true, false);
592 }
VisitUnaryPreDec(const UnaryOperator * E)593 Value *VisitUnaryPreDec(const UnaryOperator *E) {
594 LValue LV = EmitLValue(E->getSubExpr());
595 return EmitScalarPrePostIncDec(E, LV, false, true);
596 }
VisitUnaryPreInc(const UnaryOperator * E)597 Value *VisitUnaryPreInc(const UnaryOperator *E) {
598 LValue LV = EmitLValue(E->getSubExpr());
599 return EmitScalarPrePostIncDec(E, LV, true, true);
600 }
601
602 llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E,
603 llvm::Value *InVal,
604 bool IsInc);
605
606 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
607 bool isInc, bool isPre);
608
609
VisitUnaryAddrOf(const UnaryOperator * E)610 Value *VisitUnaryAddrOf(const UnaryOperator *E) {
611 if (isa<MemberPointerType>(E->getType())) // never sugared
612 return CGF.CGM.getMemberPointerConstant(E);
613
614 return EmitLValue(E->getSubExpr()).getPointer(CGF);
615 }
VisitUnaryDeref(const UnaryOperator * E)616 Value *VisitUnaryDeref(const UnaryOperator *E) {
617 if (E->getType()->isVoidType())
618 return Visit(E->getSubExpr()); // the actual value should be unused
619 return EmitLoadOfLValue(E);
620 }
VisitUnaryPlus(const UnaryOperator * E)621 Value *VisitUnaryPlus(const UnaryOperator *E) {
622 // This differs from gcc, though, most likely due to a bug in gcc.
623 TestAndClearIgnoreResultAssign();
624 return Visit(E->getSubExpr());
625 }
626 Value *VisitUnaryMinus (const UnaryOperator *E);
627 Value *VisitUnaryNot (const UnaryOperator *E);
628 Value *VisitUnaryLNot (const UnaryOperator *E);
629 Value *VisitUnaryReal (const UnaryOperator *E);
630 Value *VisitUnaryImag (const UnaryOperator *E);
VisitUnaryExtension(const UnaryOperator * E)631 Value *VisitUnaryExtension(const UnaryOperator *E) {
632 return Visit(E->getSubExpr());
633 }
634
635 // C++
VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr * E)636 Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) {
637 return EmitLoadOfLValue(E);
638 }
VisitSourceLocExpr(SourceLocExpr * SLE)639 Value *VisitSourceLocExpr(SourceLocExpr *SLE) {
640 auto &Ctx = CGF.getContext();
641 APValue Evaluated =
642 SLE->EvaluateInContext(Ctx, CGF.CurSourceLocExprScope.getDefaultExpr());
643 return ConstantEmitter(CGF).emitAbstract(SLE->getLocation(), Evaluated,
644 SLE->getType());
645 }
646
VisitCXXDefaultArgExpr(CXXDefaultArgExpr * DAE)647 Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
648 CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
649 return Visit(DAE->getExpr());
650 }
VisitCXXDefaultInitExpr(CXXDefaultInitExpr * DIE)651 Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
652 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);
653 return Visit(DIE->getExpr());
654 }
VisitCXXThisExpr(CXXThisExpr * TE)655 Value *VisitCXXThisExpr(CXXThisExpr *TE) {
656 return CGF.LoadCXXThis();
657 }
658
659 Value *VisitExprWithCleanups(ExprWithCleanups *E);
VisitCXXNewExpr(const CXXNewExpr * E)660 Value *VisitCXXNewExpr(const CXXNewExpr *E) {
661 return CGF.EmitCXXNewExpr(E);
662 }
VisitCXXDeleteExpr(const CXXDeleteExpr * E)663 Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
664 CGF.EmitCXXDeleteExpr(E);
665 return nullptr;
666 }
667
VisitTypeTraitExpr(const TypeTraitExpr * E)668 Value *VisitTypeTraitExpr(const TypeTraitExpr *E) {
669 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
670 }
671
VisitConceptSpecializationExpr(const ConceptSpecializationExpr * E)672 Value *VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E) {
673 return Builder.getInt1(E->isSatisfied());
674 }
675
VisitRequiresExpr(const RequiresExpr * E)676 Value *VisitRequiresExpr(const RequiresExpr *E) {
677 return Builder.getInt1(E->isSatisfied());
678 }
679
VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr * E)680 Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
681 return llvm::ConstantInt::get(Builder.getInt32Ty(), E->getValue());
682 }
683
VisitExpressionTraitExpr(const ExpressionTraitExpr * E)684 Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
685 return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue());
686 }
687
VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr * E)688 Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
689 // C++ [expr.pseudo]p1:
690 // The result shall only be used as the operand for the function call
691 // operator (), and the result of such a call has type void. The only
692 // effect is the evaluation of the postfix-expression before the dot or
693 // arrow.
694 CGF.EmitScalarExpr(E->getBase());
695 return nullptr;
696 }
697
VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr * E)698 Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
699 return EmitNullValue(E->getType());
700 }
701
VisitCXXThrowExpr(const CXXThrowExpr * E)702 Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {
703 CGF.EmitCXXThrowExpr(E);
704 return nullptr;
705 }
706
VisitCXXNoexceptExpr(const CXXNoexceptExpr * E)707 Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
708 return Builder.getInt1(E->getValue());
709 }
710
711 // Binary Operators.
EmitMul(const BinOpInfo & Ops)712 Value *EmitMul(const BinOpInfo &Ops) {
713 if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
714 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
715 case LangOptions::SOB_Defined:
716 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
717 case LangOptions::SOB_Undefined:
718 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
719 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
720 LLVM_FALLTHROUGH;
721 case LangOptions::SOB_Trapping:
722 if (CanElideOverflowCheck(CGF.getContext(), Ops))
723 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
724 return EmitOverflowCheckedBinOp(Ops);
725 }
726 }
727
728 if (Ops.Ty->isConstantMatrixType()) {
729 llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
730 // We need to check the types of the operands of the operator to get the
731 // correct matrix dimensions.
732 auto *BO = cast<BinaryOperator>(Ops.E);
733 auto *LHSMatTy = dyn_cast<ConstantMatrixType>(
734 BO->getLHS()->getType().getCanonicalType());
735 auto *RHSMatTy = dyn_cast<ConstantMatrixType>(
736 BO->getRHS()->getType().getCanonicalType());
737 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
738 if (LHSMatTy && RHSMatTy)
739 return MB.CreateMatrixMultiply(Ops.LHS, Ops.RHS, LHSMatTy->getNumRows(),
740 LHSMatTy->getNumColumns(),
741 RHSMatTy->getNumColumns());
742 return MB.CreateScalarMultiply(Ops.LHS, Ops.RHS);
743 }
744
745 if (Ops.Ty->isUnsignedIntegerType() &&
746 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
747 !CanElideOverflowCheck(CGF.getContext(), Ops))
748 return EmitOverflowCheckedBinOp(Ops);
749
750 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
751 // Preserve the old values
752 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
753 return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
754 }
755 if (Ops.isFixedPointOp())
756 return EmitFixedPointBinOp(Ops);
757 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
758 }
759 /// Create a binary op that checks for overflow.
760 /// Currently only supports +, - and *.
761 Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
762
763 // Check for undefined division and modulus behaviors.
764 void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
765 llvm::Value *Zero,bool isDiv);
766 // Common helper for getting how wide LHS of shift is.
767 static Value *GetWidthMinusOneValue(Value* LHS,Value* RHS);
768
769 // Used for shifting constraints for OpenCL, do mask for powers of 2, URem for
770 // non powers of two.
771 Value *ConstrainShiftValue(Value *LHS, Value *RHS, const Twine &Name);
772
773 Value *EmitDiv(const BinOpInfo &Ops);
774 Value *EmitRem(const BinOpInfo &Ops);
775 Value *EmitAdd(const BinOpInfo &Ops);
776 Value *EmitSub(const BinOpInfo &Ops);
777 Value *EmitShl(const BinOpInfo &Ops);
778 Value *EmitShr(const BinOpInfo &Ops);
EmitAnd(const BinOpInfo & Ops)779 Value *EmitAnd(const BinOpInfo &Ops) {
780 return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and");
781 }
EmitXor(const BinOpInfo & Ops)782 Value *EmitXor(const BinOpInfo &Ops) {
783 return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor");
784 }
EmitOr(const BinOpInfo & Ops)785 Value *EmitOr (const BinOpInfo &Ops) {
786 return Builder.CreateOr(Ops.LHS, Ops.RHS, "or");
787 }
788
789 // Helper functions for fixed point binary operations.
790 Value *EmitFixedPointBinOp(const BinOpInfo &Ops);
791
792 BinOpInfo EmitBinOps(const BinaryOperator *E);
793 LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
794 Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
795 Value *&Result);
796
797 Value *EmitCompoundAssign(const CompoundAssignOperator *E,
798 Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
799
800 // Binary operators and binary compound assignment operators.
801 #define HANDLEBINOP(OP) \
802 Value *VisitBin ## OP(const BinaryOperator *E) { \
803 return Emit ## OP(EmitBinOps(E)); \
804 } \
805 Value *VisitBin ## OP ## Assign(const CompoundAssignOperator *E) { \
806 return EmitCompoundAssign(E, &ScalarExprEmitter::Emit ## OP); \
807 }
808 HANDLEBINOP(Mul)
809 HANDLEBINOP(Div)
810 HANDLEBINOP(Rem)
811 HANDLEBINOP(Add)
812 HANDLEBINOP(Sub)
813 HANDLEBINOP(Shl)
814 HANDLEBINOP(Shr)
815 HANDLEBINOP(And)
816 HANDLEBINOP(Xor)
817 HANDLEBINOP(Or)
818 #undef HANDLEBINOP
819
820 // Comparisons.
821 Value *EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc,
822 llvm::CmpInst::Predicate SICmpOpc,
823 llvm::CmpInst::Predicate FCmpOpc, bool IsSignaling);
824 #define VISITCOMP(CODE, UI, SI, FP, SIG) \
825 Value *VisitBin##CODE(const BinaryOperator *E) { \
826 return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
827 llvm::FCmpInst::FP, SIG); }
828 VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT, true)
829 VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT, true)
830 VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE, true)
831 VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE, true)
832 VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ, false)
833 VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE, false)
834 #undef VISITCOMP
835
836 Value *VisitBinAssign (const BinaryOperator *E);
837
838 Value *VisitBinLAnd (const BinaryOperator *E);
839 Value *VisitBinLOr (const BinaryOperator *E);
840 Value *VisitBinComma (const BinaryOperator *E);
841
VisitBinPtrMemD(const Expr * E)842 Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); }
VisitBinPtrMemI(const Expr * E)843 Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); }
844
VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator * E)845 Value *VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) {
846 return Visit(E->getSemanticForm());
847 }
848
849 // Other Operators.
850 Value *VisitBlockExpr(const BlockExpr *BE);
851 Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *);
852 Value *VisitChooseExpr(ChooseExpr *CE);
853 Value *VisitVAArgExpr(VAArgExpr *VE);
VisitObjCStringLiteral(const ObjCStringLiteral * E)854 Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
855 return CGF.EmitObjCStringLiteral(E);
856 }
VisitObjCBoxedExpr(ObjCBoxedExpr * E)857 Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
858 return CGF.EmitObjCBoxedExpr(E);
859 }
VisitObjCArrayLiteral(ObjCArrayLiteral * E)860 Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
861 return CGF.EmitObjCArrayLiteral(E);
862 }
VisitObjCDictionaryLiteral(ObjCDictionaryLiteral * E)863 Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
864 return CGF.EmitObjCDictionaryLiteral(E);
865 }
866 Value *VisitAsTypeExpr(AsTypeExpr *CE);
867 Value *VisitAtomicExpr(AtomicExpr *AE);
868 };
869 } // end anonymous namespace.
870
871 //===----------------------------------------------------------------------===//
872 // Utilities
873 //===----------------------------------------------------------------------===//
874
875 /// EmitConversionToBool - Convert the specified expression value to a
876 /// boolean (i1) truth value. This is equivalent to "Val != 0".
EmitConversionToBool(Value * Src,QualType SrcType)877 Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
878 assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs");
879
880 if (SrcType->isRealFloatingType())
881 return EmitFloatToBoolConversion(Src);
882
883 if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType))
884 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT);
885
886 assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&
887 "Unknown scalar type to convert");
888
889 if (isa<llvm::IntegerType>(Src->getType()))
890 return EmitIntToBoolConversion(Src);
891
892 assert(isa<llvm::PointerType>(Src->getType()));
893 return EmitPointerToBoolConversion(Src, SrcType);
894 }
895
EmitFloatConversionCheck(Value * OrigSrc,QualType OrigSrcType,Value * Src,QualType SrcType,QualType DstType,llvm::Type * DstTy,SourceLocation Loc)896 void ScalarExprEmitter::EmitFloatConversionCheck(
897 Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType,
898 QualType DstType, llvm::Type *DstTy, SourceLocation Loc) {
899 assert(SrcType->isFloatingType() && "not a conversion from floating point");
900 if (!isa<llvm::IntegerType>(DstTy))
901 return;
902
903 CodeGenFunction::SanitizerScope SanScope(&CGF);
904 using llvm::APFloat;
905 using llvm::APSInt;
906
907 llvm::Value *Check = nullptr;
908 const llvm::fltSemantics &SrcSema =
909 CGF.getContext().getFloatTypeSemantics(OrigSrcType);
910
911 // Floating-point to integer. This has undefined behavior if the source is
912 // +-Inf, NaN, or doesn't fit into the destination type (after truncation
913 // to an integer).
914 unsigned Width = CGF.getContext().getIntWidth(DstType);
915 bool Unsigned = DstType->isUnsignedIntegerOrEnumerationType();
916
917 APSInt Min = APSInt::getMinValue(Width, Unsigned);
918 APFloat MinSrc(SrcSema, APFloat::uninitialized);
919 if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) &
920 APFloat::opOverflow)
921 // Don't need an overflow check for lower bound. Just check for
922 // -Inf/NaN.
923 MinSrc = APFloat::getInf(SrcSema, true);
924 else
925 // Find the largest value which is too small to represent (before
926 // truncation toward zero).
927 MinSrc.subtract(APFloat(SrcSema, 1), APFloat::rmTowardNegative);
928
929 APSInt Max = APSInt::getMaxValue(Width, Unsigned);
930 APFloat MaxSrc(SrcSema, APFloat::uninitialized);
931 if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) &
932 APFloat::opOverflow)
933 // Don't need an overflow check for upper bound. Just check for
934 // +Inf/NaN.
935 MaxSrc = APFloat::getInf(SrcSema, false);
936 else
937 // Find the smallest value which is too large to represent (before
938 // truncation toward zero).
939 MaxSrc.add(APFloat(SrcSema, 1), APFloat::rmTowardPositive);
940
941 // If we're converting from __half, convert the range to float to match
942 // the type of src.
943 if (OrigSrcType->isHalfType()) {
944 const llvm::fltSemantics &Sema =
945 CGF.getContext().getFloatTypeSemantics(SrcType);
946 bool IsInexact;
947 MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
948 MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
949 }
950
951 llvm::Value *GE =
952 Builder.CreateFCmpOGT(Src, llvm::ConstantFP::get(VMContext, MinSrc));
953 llvm::Value *LE =
954 Builder.CreateFCmpOLT(Src, llvm::ConstantFP::get(VMContext, MaxSrc));
955 Check = Builder.CreateAnd(GE, LE);
956
957 llvm::Constant *StaticArgs[] = {CGF.EmitCheckSourceLocation(Loc),
958 CGF.EmitCheckTypeDescriptor(OrigSrcType),
959 CGF.EmitCheckTypeDescriptor(DstType)};
960 CGF.EmitCheck(std::make_pair(Check, SanitizerKind::FloatCastOverflow),
961 SanitizerHandler::FloatCastOverflow, StaticArgs, OrigSrc);
962 }
963
964 // Should be called within CodeGenFunction::SanitizerScope RAII scope.
965 // Returns 'i1 false' when the truncation Src -> Dst was lossy.
966 static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
967 std::pair<llvm::Value *, SanitizerMask>>
EmitIntegerTruncationCheckHelper(Value * Src,QualType SrcType,Value * Dst,QualType DstType,CGBuilderTy & Builder)968 EmitIntegerTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst,
969 QualType DstType, CGBuilderTy &Builder) {
970 llvm::Type *SrcTy = Src->getType();
971 llvm::Type *DstTy = Dst->getType();
972 (void)DstTy; // Only used in assert()
973
974 // This should be truncation of integral types.
975 assert(Src != Dst);
976 assert(SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits());
977 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
978 "non-integer llvm type");
979
980 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
981 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
982
983 // If both (src and dst) types are unsigned, then it's an unsigned truncation.
984 // Else, it is a signed truncation.
985 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
986 SanitizerMask Mask;
987 if (!SrcSigned && !DstSigned) {
988 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
989 Mask = SanitizerKind::ImplicitUnsignedIntegerTruncation;
990 } else {
991 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
992 Mask = SanitizerKind::ImplicitSignedIntegerTruncation;
993 }
994
995 llvm::Value *Check = nullptr;
996 // 1. Extend the truncated value back to the same width as the Src.
997 Check = Builder.CreateIntCast(Dst, SrcTy, DstSigned, "anyext");
998 // 2. Equality-compare with the original source value
999 Check = Builder.CreateICmpEQ(Check, Src, "truncheck");
1000 // If the comparison result is 'i1 false', then the truncation was lossy.
1001 return std::make_pair(Kind, std::make_pair(Check, Mask));
1002 }
1003
PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(QualType SrcType,QualType DstType)1004 static bool PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(
1005 QualType SrcType, QualType DstType) {
1006 return SrcType->isIntegerType() && DstType->isIntegerType();
1007 }
1008
EmitIntegerTruncationCheck(Value * Src,QualType SrcType,Value * Dst,QualType DstType,SourceLocation Loc)1009 void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType,
1010 Value *Dst, QualType DstType,
1011 SourceLocation Loc) {
1012 if (!CGF.SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation))
1013 return;
1014
1015 // We only care about int->int conversions here.
1016 // We ignore conversions to/from pointer and/or bool.
1017 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType,
1018 DstType))
1019 return;
1020
1021 unsigned SrcBits = Src->getType()->getScalarSizeInBits();
1022 unsigned DstBits = Dst->getType()->getScalarSizeInBits();
1023 // This must be truncation. Else we do not care.
1024 if (SrcBits <= DstBits)
1025 return;
1026
1027 assert(!DstType->isBooleanType() && "we should not get here with booleans.");
1028
1029 // If the integer sign change sanitizer is enabled,
1030 // and we are truncating from larger unsigned type to smaller signed type,
1031 // let that next sanitizer deal with it.
1032 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1033 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1034 if (CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange) &&
1035 (!SrcSigned && DstSigned))
1036 return;
1037
1038 CodeGenFunction::SanitizerScope SanScope(&CGF);
1039
1040 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1041 std::pair<llvm::Value *, SanitizerMask>>
1042 Check =
1043 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1044 // If the comparison result is 'i1 false', then the truncation was lossy.
1045
1046 // Do we care about this type of truncation?
1047 if (!CGF.SanOpts.has(Check.second.second))
1048 return;
1049
1050 llvm::Constant *StaticArgs[] = {
1051 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1052 CGF.EmitCheckTypeDescriptor(DstType),
1053 llvm::ConstantInt::get(Builder.getInt8Ty(), Check.first)};
1054 CGF.EmitCheck(Check.second, SanitizerHandler::ImplicitConversion, StaticArgs,
1055 {Src, Dst});
1056 }
1057
1058 // Should be called within CodeGenFunction::SanitizerScope RAII scope.
1059 // Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1060 static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1061 std::pair<llvm::Value *, SanitizerMask>>
EmitIntegerSignChangeCheckHelper(Value * Src,QualType SrcType,Value * Dst,QualType DstType,CGBuilderTy & Builder)1062 EmitIntegerSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst,
1063 QualType DstType, CGBuilderTy &Builder) {
1064 llvm::Type *SrcTy = Src->getType();
1065 llvm::Type *DstTy = Dst->getType();
1066
1067 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1068 "non-integer llvm type");
1069
1070 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1071 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1072 (void)SrcSigned; // Only used in assert()
1073 (void)DstSigned; // Only used in assert()
1074 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1075 unsigned DstBits = DstTy->getScalarSizeInBits();
1076 (void)SrcBits; // Only used in assert()
1077 (void)DstBits; // Only used in assert()
1078
1079 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
1080 "either the widths should be different, or the signednesses.");
1081
1082 // NOTE: zero value is considered to be non-negative.
1083 auto EmitIsNegativeTest = [&Builder](Value *V, QualType VType,
1084 const char *Name) -> Value * {
1085 // Is this value a signed type?
1086 bool VSigned = VType->isSignedIntegerOrEnumerationType();
1087 llvm::Type *VTy = V->getType();
1088 if (!VSigned) {
1089 // If the value is unsigned, then it is never negative.
1090 // FIXME: can we encounter non-scalar VTy here?
1091 return llvm::ConstantInt::getFalse(VTy->getContext());
1092 }
1093 // Get the zero of the same type with which we will be comparing.
1094 llvm::Constant *Zero = llvm::ConstantInt::get(VTy, 0);
1095 // %V.isnegative = icmp slt %V, 0
1096 // I.e is %V *strictly* less than zero, does it have negative value?
1097 return Builder.CreateICmp(llvm::ICmpInst::ICMP_SLT, V, Zero,
1098 llvm::Twine(Name) + "." + V->getName() +
1099 ".negativitycheck");
1100 };
1101
1102 // 1. Was the old Value negative?
1103 llvm::Value *SrcIsNegative = EmitIsNegativeTest(Src, SrcType, "src");
1104 // 2. Is the new Value negative?
1105 llvm::Value *DstIsNegative = EmitIsNegativeTest(Dst, DstType, "dst");
1106 // 3. Now, was the 'negativity status' preserved during the conversion?
1107 // NOTE: conversion from negative to zero is considered to change the sign.
1108 // (We want to get 'false' when the conversion changed the sign)
1109 // So we should just equality-compare the negativity statuses.
1110 llvm::Value *Check = nullptr;
1111 Check = Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "signchangecheck");
1112 // If the comparison result is 'false', then the conversion changed the sign.
1113 return std::make_pair(
1114 ScalarExprEmitter::ICCK_IntegerSignChange,
1115 std::make_pair(Check, SanitizerKind::ImplicitIntegerSignChange));
1116 }
1117
EmitIntegerSignChangeCheck(Value * Src,QualType SrcType,Value * Dst,QualType DstType,SourceLocation Loc)1118 void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value *Src, QualType SrcType,
1119 Value *Dst, QualType DstType,
1120 SourceLocation Loc) {
1121 if (!CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange))
1122 return;
1123
1124 llvm::Type *SrcTy = Src->getType();
1125 llvm::Type *DstTy = Dst->getType();
1126
1127 // We only care about int->int conversions here.
1128 // We ignore conversions to/from pointer and/or bool.
1129 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType,
1130 DstType))
1131 return;
1132
1133 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1134 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1135 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1136 unsigned DstBits = DstTy->getScalarSizeInBits();
1137
1138 // Now, we do not need to emit the check in *all* of the cases.
1139 // We can avoid emitting it in some obvious cases where it would have been
1140 // dropped by the opt passes (instcombine) always anyways.
1141 // If it's a cast between effectively the same type, no check.
1142 // NOTE: this is *not* equivalent to checking the canonical types.
1143 if (SrcSigned == DstSigned && SrcBits == DstBits)
1144 return;
1145 // At least one of the values needs to have signed type.
1146 // If both are unsigned, then obviously, neither of them can be negative.
1147 if (!SrcSigned && !DstSigned)
1148 return;
1149 // If the conversion is to *larger* *signed* type, then no check is needed.
1150 // Because either sign-extension happens (so the sign will remain),
1151 // or zero-extension will happen (the sign bit will be zero.)
1152 if ((DstBits > SrcBits) && DstSigned)
1153 return;
1154 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1155 (SrcBits > DstBits) && SrcSigned) {
1156 // If the signed integer truncation sanitizer is enabled,
1157 // and this is a truncation from signed type, then no check is needed.
1158 // Because here sign change check is interchangeable with truncation check.
1159 return;
1160 }
1161 // That's it. We can't rule out any more cases with the data we have.
1162
1163 CodeGenFunction::SanitizerScope SanScope(&CGF);
1164
1165 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1166 std::pair<llvm::Value *, SanitizerMask>>
1167 Check;
1168
1169 // Each of these checks needs to return 'false' when an issue was detected.
1170 ImplicitConversionCheckKind CheckKind;
1171 llvm::SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
1172 // So we can 'and' all the checks together, and still get 'false',
1173 // if at least one of the checks detected an issue.
1174
1175 Check = EmitIntegerSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1176 CheckKind = Check.first;
1177 Checks.emplace_back(Check.second);
1178
1179 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1180 (SrcBits > DstBits) && !SrcSigned && DstSigned) {
1181 // If the signed integer truncation sanitizer was enabled,
1182 // and we are truncating from larger unsigned type to smaller signed type,
1183 // let's handle the case we skipped in that check.
1184 Check =
1185 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1186 CheckKind = ICCK_SignedIntegerTruncationOrSignChange;
1187 Checks.emplace_back(Check.second);
1188 // If the comparison result is 'i1 false', then the truncation was lossy.
1189 }
1190
1191 llvm::Constant *StaticArgs[] = {
1192 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1193 CGF.EmitCheckTypeDescriptor(DstType),
1194 llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind)};
1195 // EmitCheck() will 'and' all the checks together.
1196 CGF.EmitCheck(Checks, SanitizerHandler::ImplicitConversion, StaticArgs,
1197 {Src, Dst});
1198 }
1199
EmitScalarCast(Value * Src,QualType SrcType,QualType DstType,llvm::Type * SrcTy,llvm::Type * DstTy,ScalarConversionOpts Opts)1200 Value *ScalarExprEmitter::EmitScalarCast(Value *Src, QualType SrcType,
1201 QualType DstType, llvm::Type *SrcTy,
1202 llvm::Type *DstTy,
1203 ScalarConversionOpts Opts) {
1204 // The Element types determine the type of cast to perform.
1205 llvm::Type *SrcElementTy;
1206 llvm::Type *DstElementTy;
1207 QualType SrcElementType;
1208 QualType DstElementType;
1209 if (SrcType->isMatrixType() && DstType->isMatrixType()) {
1210 SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();
1211 DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();
1212 SrcElementType = SrcType->castAs<MatrixType>()->getElementType();
1213 DstElementType = DstType->castAs<MatrixType>()->getElementType();
1214 } else {
1215 assert(!SrcType->isMatrixType() && !DstType->isMatrixType() &&
1216 "cannot cast between matrix and non-matrix types");
1217 SrcElementTy = SrcTy;
1218 DstElementTy = DstTy;
1219 SrcElementType = SrcType;
1220 DstElementType = DstType;
1221 }
1222
1223 if (isa<llvm::IntegerType>(SrcElementTy)) {
1224 bool InputSigned = SrcElementType->isSignedIntegerOrEnumerationType();
1225 if (SrcElementType->isBooleanType() && Opts.TreatBooleanAsSigned) {
1226 InputSigned = true;
1227 }
1228
1229 if (isa<llvm::IntegerType>(DstElementTy))
1230 return Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1231 if (InputSigned)
1232 return Builder.CreateSIToFP(Src, DstTy, "conv");
1233 return Builder.CreateUIToFP(Src, DstTy, "conv");
1234 }
1235
1236 if (isa<llvm::IntegerType>(DstElementTy)) {
1237 assert(SrcElementTy->isFloatingPointTy() && "Unknown real conversion");
1238 if (DstElementType->isSignedIntegerOrEnumerationType())
1239 return Builder.CreateFPToSI(Src, DstTy, "conv");
1240 return Builder.CreateFPToUI(Src, DstTy, "conv");
1241 }
1242
1243 if (DstElementTy->getTypeID() < SrcElementTy->getTypeID())
1244 return Builder.CreateFPTrunc(Src, DstTy, "conv");
1245 return Builder.CreateFPExt(Src, DstTy, "conv");
1246 }
1247
1248 /// Emit a conversion from the specified type to the specified destination type,
1249 /// both of which are LLVM scalar types.
EmitScalarConversion(Value * Src,QualType SrcType,QualType DstType,SourceLocation Loc,ScalarConversionOpts Opts)1250 Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
1251 QualType DstType,
1252 SourceLocation Loc,
1253 ScalarConversionOpts Opts) {
1254 // All conversions involving fixed point types should be handled by the
1255 // EmitFixedPoint family functions. This is done to prevent bloating up this
1256 // function more, and although fixed point numbers are represented by
1257 // integers, we do not want to follow any logic that assumes they should be
1258 // treated as integers.
1259 // TODO(leonardchan): When necessary, add another if statement checking for
1260 // conversions to fixed point types from other types.
1261 if (SrcType->isFixedPointType()) {
1262 if (DstType->isBooleanType())
1263 // It is important that we check this before checking if the dest type is
1264 // an integer because booleans are technically integer types.
1265 // We do not need to check the padding bit on unsigned types if unsigned
1266 // padding is enabled because overflow into this bit is undefined
1267 // behavior.
1268 return Builder.CreateIsNotNull(Src, "tobool");
1269 if (DstType->isFixedPointType() || DstType->isIntegerType() ||
1270 DstType->isRealFloatingType())
1271 return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1272
1273 llvm_unreachable(
1274 "Unhandled scalar conversion from a fixed point type to another type.");
1275 } else if (DstType->isFixedPointType()) {
1276 if (SrcType->isIntegerType() || SrcType->isRealFloatingType())
1277 // This also includes converting booleans and enums to fixed point types.
1278 return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1279
1280 llvm_unreachable(
1281 "Unhandled scalar conversion to a fixed point type from another type.");
1282 }
1283
1284 QualType NoncanonicalSrcType = SrcType;
1285 QualType NoncanonicalDstType = DstType;
1286
1287 SrcType = CGF.getContext().getCanonicalType(SrcType);
1288 DstType = CGF.getContext().getCanonicalType(DstType);
1289 if (SrcType == DstType) return Src;
1290
1291 if (DstType->isVoidType()) return nullptr;
1292
1293 llvm::Value *OrigSrc = Src;
1294 QualType OrigSrcType = SrcType;
1295 llvm::Type *SrcTy = Src->getType();
1296
1297 // Handle conversions to bool first, they are special: comparisons against 0.
1298 if (DstType->isBooleanType())
1299 return EmitConversionToBool(Src, SrcType);
1300
1301 llvm::Type *DstTy = ConvertType(DstType);
1302
1303 // Cast from half through float if half isn't a native type.
1304 if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1305 // Cast to FP using the intrinsic if the half type itself isn't supported.
1306 if (DstTy->isFloatingPointTy()) {
1307 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics())
1308 return Builder.CreateCall(
1309 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, DstTy),
1310 Src);
1311 } else {
1312 // Cast to other types through float, using either the intrinsic or FPExt,
1313 // depending on whether the half type itself is supported
1314 // (as opposed to operations on half, available with NativeHalfType).
1315 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1316 Src = Builder.CreateCall(
1317 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
1318 CGF.CGM.FloatTy),
1319 Src);
1320 } else {
1321 Src = Builder.CreateFPExt(Src, CGF.CGM.FloatTy, "conv");
1322 }
1323 SrcType = CGF.getContext().FloatTy;
1324 SrcTy = CGF.FloatTy;
1325 }
1326 }
1327
1328 // Ignore conversions like int -> uint.
1329 if (SrcTy == DstTy) {
1330 if (Opts.EmitImplicitIntegerSignChangeChecks)
1331 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Src,
1332 NoncanonicalDstType, Loc);
1333
1334 return Src;
1335 }
1336
1337 // Handle pointer conversions next: pointers can only be converted to/from
1338 // other pointers and integers. Check for pointer types in terms of LLVM, as
1339 // some native types (like Obj-C id) may map to a pointer type.
1340 if (auto DstPT = dyn_cast<llvm::PointerType>(DstTy)) {
1341 // The source value may be an integer, or a pointer.
1342 if (isa<llvm::PointerType>(SrcTy))
1343 return Builder.CreateBitCast(Src, DstTy, "conv");
1344
1345 assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
1346 // First, convert to the correct width so that we control the kind of
1347 // extension.
1348 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DstPT);
1349 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
1350 llvm::Value* IntResult =
1351 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
1352 // Then, cast to pointer.
1353 return Builder.CreateIntToPtr(IntResult, DstTy, "conv");
1354 }
1355
1356 if (isa<llvm::PointerType>(SrcTy)) {
1357 // Must be an ptr to int cast.
1358 assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?");
1359 return Builder.CreatePtrToInt(Src, DstTy, "conv");
1360 }
1361
1362 // A scalar can be splatted to an extended vector of the same element type
1363 if (DstType->isExtVectorType() && !SrcType->isVectorType()) {
1364 // Sema should add casts to make sure that the source expression's type is
1365 // the same as the vector's element type (sans qualifiers)
1366 assert(DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==
1367 SrcType.getTypePtr() &&
1368 "Splatted expr doesn't match with vector element type?");
1369
1370 // Splat the element across to all elements
1371 unsigned NumElements = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
1372 return Builder.CreateVectorSplat(NumElements, Src, "splat");
1373 }
1374
1375 if (SrcType->isMatrixType() && DstType->isMatrixType())
1376 return EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1377
1378 if (isa<llvm::VectorType>(SrcTy) || isa<llvm::VectorType>(DstTy)) {
1379 // Allow bitcast from vector to integer/fp of the same size.
1380 unsigned SrcSize = SrcTy->getPrimitiveSizeInBits();
1381 unsigned DstSize = DstTy->getPrimitiveSizeInBits();
1382 if (SrcSize == DstSize)
1383 return Builder.CreateBitCast(Src, DstTy, "conv");
1384
1385 // Conversions between vectors of different sizes are not allowed except
1386 // when vectors of half are involved. Operations on storage-only half
1387 // vectors require promoting half vector operands to float vectors and
1388 // truncating the result, which is either an int or float vector, to a
1389 // short or half vector.
1390
1391 // Source and destination are both expected to be vectors.
1392 llvm::Type *SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();
1393 llvm::Type *DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();
1394 (void)DstElementTy;
1395
1396 assert(((SrcElementTy->isIntegerTy() &&
1397 DstElementTy->isIntegerTy()) ||
1398 (SrcElementTy->isFloatingPointTy() &&
1399 DstElementTy->isFloatingPointTy())) &&
1400 "unexpected conversion between a floating-point vector and an "
1401 "integer vector");
1402
1403 // Truncate an i32 vector to an i16 vector.
1404 if (SrcElementTy->isIntegerTy())
1405 return Builder.CreateIntCast(Src, DstTy, false, "conv");
1406
1407 // Truncate a float vector to a half vector.
1408 if (SrcSize > DstSize)
1409 return Builder.CreateFPTrunc(Src, DstTy, "conv");
1410
1411 // Promote a half vector to a float vector.
1412 return Builder.CreateFPExt(Src, DstTy, "conv");
1413 }
1414
1415 // Finally, we have the arithmetic types: real int/float.
1416 Value *Res = nullptr;
1417 llvm::Type *ResTy = DstTy;
1418
1419 // An overflowing conversion has undefined behavior if either the source type
1420 // or the destination type is a floating-point type. However, we consider the
1421 // range of representable values for all floating-point types to be
1422 // [-inf,+inf], so no overflow can ever happen when the destination type is a
1423 // floating-point type.
1424 if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) &&
1425 OrigSrcType->isFloatingType())
1426 EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy,
1427 Loc);
1428
1429 // Cast to half through float if half isn't a native type.
1430 if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1431 // Make sure we cast in a single step if from another FP type.
1432 if (SrcTy->isFloatingPointTy()) {
1433 // Use the intrinsic if the half type itself isn't supported
1434 // (as opposed to operations on half, available with NativeHalfType).
1435 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics())
1436 return Builder.CreateCall(
1437 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, SrcTy), Src);
1438 // If the half type is supported, just use an fptrunc.
1439 return Builder.CreateFPTrunc(Src, DstTy);
1440 }
1441 DstTy = CGF.FloatTy;
1442 }
1443
1444 Res = EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1445
1446 if (DstTy != ResTy) {
1447 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1448 assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion");
1449 Res = Builder.CreateCall(
1450 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, CGF.CGM.FloatTy),
1451 Res);
1452 } else {
1453 Res = Builder.CreateFPTrunc(Res, ResTy, "conv");
1454 }
1455 }
1456
1457 if (Opts.EmitImplicitIntegerTruncationChecks)
1458 EmitIntegerTruncationCheck(Src, NoncanonicalSrcType, Res,
1459 NoncanonicalDstType, Loc);
1460
1461 if (Opts.EmitImplicitIntegerSignChangeChecks)
1462 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Res,
1463 NoncanonicalDstType, Loc);
1464
1465 return Res;
1466 }
1467
EmitFixedPointConversion(Value * Src,QualType SrcTy,QualType DstTy,SourceLocation Loc)1468 Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy,
1469 QualType DstTy,
1470 SourceLocation Loc) {
1471 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
1472 llvm::Value *Result;
1473 if (SrcTy->isRealFloatingType())
1474 Result = FPBuilder.CreateFloatingToFixed(Src,
1475 CGF.getContext().getFixedPointSemantics(DstTy));
1476 else if (DstTy->isRealFloatingType())
1477 Result = FPBuilder.CreateFixedToFloating(Src,
1478 CGF.getContext().getFixedPointSemantics(SrcTy),
1479 ConvertType(DstTy));
1480 else {
1481 auto SrcFPSema = CGF.getContext().getFixedPointSemantics(SrcTy);
1482 auto DstFPSema = CGF.getContext().getFixedPointSemantics(DstTy);
1483
1484 if (DstTy->isIntegerType())
1485 Result = FPBuilder.CreateFixedToInteger(Src, SrcFPSema,
1486 DstFPSema.getWidth(),
1487 DstFPSema.isSigned());
1488 else if (SrcTy->isIntegerType())
1489 Result = FPBuilder.CreateIntegerToFixed(Src, SrcFPSema.isSigned(),
1490 DstFPSema);
1491 else
1492 Result = FPBuilder.CreateFixedToFixed(Src, SrcFPSema, DstFPSema);
1493 }
1494 return Result;
1495 }
1496
1497 /// Emit a conversion from the specified complex type to the specified
1498 /// destination type, where the destination type is an LLVM scalar type.
EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,QualType SrcTy,QualType DstTy,SourceLocation Loc)1499 Value *ScalarExprEmitter::EmitComplexToScalarConversion(
1500 CodeGenFunction::ComplexPairTy Src, QualType SrcTy, QualType DstTy,
1501 SourceLocation Loc) {
1502 // Get the source element type.
1503 SrcTy = SrcTy->castAs<ComplexType>()->getElementType();
1504
1505 // Handle conversions to bool first, they are special: comparisons against 0.
1506 if (DstTy->isBooleanType()) {
1507 // Complex != 0 -> (Real != 0) | (Imag != 0)
1508 Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1509 Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy, Loc);
1510 return Builder.CreateOr(Src.first, Src.second, "tobool");
1511 }
1512
1513 // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
1514 // the imaginary part of the complex value is discarded and the value of the
1515 // real part is converted according to the conversion rules for the
1516 // corresponding real type.
1517 return EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1518 }
1519
EmitNullValue(QualType Ty)1520 Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
1521 return CGF.EmitFromMemory(CGF.CGM.EmitNullConstant(Ty), Ty);
1522 }
1523
1524 /// Emit a sanitization check for the given "binary" operation (which
1525 /// might actually be a unary increment which has been lowered to a binary
1526 /// operation). The check passes if all values in \p Checks (which are \c i1),
1527 /// are \c true.
EmitBinOpCheck(ArrayRef<std::pair<Value *,SanitizerMask>> Checks,const BinOpInfo & Info)1528 void ScalarExprEmitter::EmitBinOpCheck(
1529 ArrayRef<std::pair<Value *, SanitizerMask>> Checks, const BinOpInfo &Info) {
1530 assert(CGF.IsSanitizerScope);
1531 SanitizerHandler Check;
1532 SmallVector<llvm::Constant *, 4> StaticData;
1533 SmallVector<llvm::Value *, 2> DynamicData;
1534
1535 BinaryOperatorKind Opcode = Info.Opcode;
1536 if (BinaryOperator::isCompoundAssignmentOp(Opcode))
1537 Opcode = BinaryOperator::getOpForCompoundAssignment(Opcode);
1538
1539 StaticData.push_back(CGF.EmitCheckSourceLocation(Info.E->getExprLoc()));
1540 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Info.E);
1541 if (UO && UO->getOpcode() == UO_Minus) {
1542 Check = SanitizerHandler::NegateOverflow;
1543 StaticData.push_back(CGF.EmitCheckTypeDescriptor(UO->getType()));
1544 DynamicData.push_back(Info.RHS);
1545 } else {
1546 if (BinaryOperator::isShiftOp(Opcode)) {
1547 // Shift LHS negative or too large, or RHS out of bounds.
1548 Check = SanitizerHandler::ShiftOutOfBounds;
1549 const BinaryOperator *BO = cast<BinaryOperator>(Info.E);
1550 StaticData.push_back(
1551 CGF.EmitCheckTypeDescriptor(BO->getLHS()->getType()));
1552 StaticData.push_back(
1553 CGF.EmitCheckTypeDescriptor(BO->getRHS()->getType()));
1554 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
1555 // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1).
1556 Check = SanitizerHandler::DivremOverflow;
1557 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1558 } else {
1559 // Arithmetic overflow (+, -, *).
1560 switch (Opcode) {
1561 case BO_Add: Check = SanitizerHandler::AddOverflow; break;
1562 case BO_Sub: Check = SanitizerHandler::SubOverflow; break;
1563 case BO_Mul: Check = SanitizerHandler::MulOverflow; break;
1564 default: llvm_unreachable("unexpected opcode for bin op check");
1565 }
1566 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1567 }
1568 DynamicData.push_back(Info.LHS);
1569 DynamicData.push_back(Info.RHS);
1570 }
1571
1572 CGF.EmitCheck(Checks, Check, StaticData, DynamicData);
1573 }
1574
1575 //===----------------------------------------------------------------------===//
1576 // Visitor Methods
1577 //===----------------------------------------------------------------------===//
1578
VisitExpr(Expr * E)1579 Value *ScalarExprEmitter::VisitExpr(Expr *E) {
1580 CGF.ErrorUnsupported(E, "scalar expression");
1581 if (E->getType()->isVoidType())
1582 return nullptr;
1583 return llvm::UndefValue::get(CGF.ConvertType(E->getType()));
1584 }
1585
1586 Value *
VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr * E)1587 ScalarExprEmitter::VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E) {
1588 ASTContext &Context = CGF.getContext();
1589 llvm::Optional<LangAS> GlobalAS =
1590 Context.getTargetInfo().getConstantAddressSpace();
1591 llvm::Constant *GlobalConstStr = Builder.CreateGlobalStringPtr(
1592 E->ComputeName(Context), "__usn_str",
1593 static_cast<unsigned>(GlobalAS.getValueOr(LangAS::Default)));
1594
1595 unsigned ExprAS = Context.getTargetAddressSpace(E->getType());
1596
1597 if (GlobalConstStr->getType()->getPointerAddressSpace() == ExprAS)
1598 return GlobalConstStr;
1599
1600 llvm::Type *EltTy = GlobalConstStr->getType()->getPointerElementType();
1601 llvm::PointerType *NewPtrTy = llvm::PointerType::get(EltTy, ExprAS);
1602 return Builder.CreateAddrSpaceCast(GlobalConstStr, NewPtrTy, "usn_addr_cast");
1603 }
1604
VisitShuffleVectorExpr(ShuffleVectorExpr * E)1605 Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
1606 // Vector Mask Case
1607 if (E->getNumSubExprs() == 2) {
1608 Value *LHS = CGF.EmitScalarExpr(E->getExpr(0));
1609 Value *RHS = CGF.EmitScalarExpr(E->getExpr(1));
1610 Value *Mask;
1611
1612 auto *LTy = cast<llvm::FixedVectorType>(LHS->getType());
1613 unsigned LHSElts = LTy->getNumElements();
1614
1615 Mask = RHS;
1616
1617 auto *MTy = cast<llvm::FixedVectorType>(Mask->getType());
1618
1619 // Mask off the high bits of each shuffle index.
1620 Value *MaskBits =
1621 llvm::ConstantInt::get(MTy, llvm::NextPowerOf2(LHSElts - 1) - 1);
1622 Mask = Builder.CreateAnd(Mask, MaskBits, "mask");
1623
1624 // newv = undef
1625 // mask = mask & maskbits
1626 // for each elt
1627 // n = extract mask i
1628 // x = extract val n
1629 // newv = insert newv, x, i
1630 auto *RTy = llvm::FixedVectorType::get(LTy->getElementType(),
1631 MTy->getNumElements());
1632 Value* NewV = llvm::UndefValue::get(RTy);
1633 for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
1634 Value *IIndx = llvm::ConstantInt::get(CGF.SizeTy, i);
1635 Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx");
1636
1637 Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt");
1638 NewV = Builder.CreateInsertElement(NewV, VExt, IIndx, "shuf_ins");
1639 }
1640 return NewV;
1641 }
1642
1643 Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
1644 Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
1645
1646 SmallVector<int, 32> Indices;
1647 for (unsigned i = 2; i < E->getNumSubExprs(); ++i) {
1648 llvm::APSInt Idx = E->getShuffleMaskIdx(CGF.getContext(), i-2);
1649 // Check for -1 and output it as undef in the IR.
1650 if (Idx.isSigned() && Idx.isAllOnesValue())
1651 Indices.push_back(-1);
1652 else
1653 Indices.push_back(Idx.getZExtValue());
1654 }
1655
1656 return Builder.CreateShuffleVector(V1, V2, Indices, "shuffle");
1657 }
1658
VisitConvertVectorExpr(ConvertVectorExpr * E)1659 Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
1660 QualType SrcType = E->getSrcExpr()->getType(),
1661 DstType = E->getType();
1662
1663 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
1664
1665 SrcType = CGF.getContext().getCanonicalType(SrcType);
1666 DstType = CGF.getContext().getCanonicalType(DstType);
1667 if (SrcType == DstType) return Src;
1668
1669 assert(SrcType->isVectorType() &&
1670 "ConvertVector source type must be a vector");
1671 assert(DstType->isVectorType() &&
1672 "ConvertVector destination type must be a vector");
1673
1674 llvm::Type *SrcTy = Src->getType();
1675 llvm::Type *DstTy = ConvertType(DstType);
1676
1677 // Ignore conversions like int -> uint.
1678 if (SrcTy == DstTy)
1679 return Src;
1680
1681 QualType SrcEltType = SrcType->castAs<VectorType>()->getElementType(),
1682 DstEltType = DstType->castAs<VectorType>()->getElementType();
1683
1684 assert(SrcTy->isVectorTy() &&
1685 "ConvertVector source IR type must be a vector");
1686 assert(DstTy->isVectorTy() &&
1687 "ConvertVector destination IR type must be a vector");
1688
1689 llvm::Type *SrcEltTy = cast<llvm::VectorType>(SrcTy)->getElementType(),
1690 *DstEltTy = cast<llvm::VectorType>(DstTy)->getElementType();
1691
1692 if (DstEltType->isBooleanType()) {
1693 assert((SrcEltTy->isFloatingPointTy() ||
1694 isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion");
1695
1696 llvm::Value *Zero = llvm::Constant::getNullValue(SrcTy);
1697 if (SrcEltTy->isFloatingPointTy()) {
1698 return Builder.CreateFCmpUNE(Src, Zero, "tobool");
1699 } else {
1700 return Builder.CreateICmpNE(Src, Zero, "tobool");
1701 }
1702 }
1703
1704 // We have the arithmetic types: real int/float.
1705 Value *Res = nullptr;
1706
1707 if (isa<llvm::IntegerType>(SrcEltTy)) {
1708 bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType();
1709 if (isa<llvm::IntegerType>(DstEltTy))
1710 Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1711 else if (InputSigned)
1712 Res = Builder.CreateSIToFP(Src, DstTy, "conv");
1713 else
1714 Res = Builder.CreateUIToFP(Src, DstTy, "conv");
1715 } else if (isa<llvm::IntegerType>(DstEltTy)) {
1716 assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion");
1717 if (DstEltType->isSignedIntegerOrEnumerationType())
1718 Res = Builder.CreateFPToSI(Src, DstTy, "conv");
1719 else
1720 Res = Builder.CreateFPToUI(Src, DstTy, "conv");
1721 } else {
1722 assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() &&
1723 "Unknown real conversion");
1724 if (DstEltTy->getTypeID() < SrcEltTy->getTypeID())
1725 Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
1726 else
1727 Res = Builder.CreateFPExt(Src, DstTy, "conv");
1728 }
1729
1730 return Res;
1731 }
1732
VisitMemberExpr(MemberExpr * E)1733 Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
1734 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) {
1735 CGF.EmitIgnoredExpr(E->getBase());
1736 return CGF.emitScalarConstant(Constant, E);
1737 } else {
1738 Expr::EvalResult Result;
1739 if (E->EvaluateAsInt(Result, CGF.getContext(), Expr::SE_AllowSideEffects)) {
1740 llvm::APSInt Value = Result.Val.getInt();
1741 CGF.EmitIgnoredExpr(E->getBase());
1742 return Builder.getInt(Value);
1743 }
1744 }
1745
1746 return EmitLoadOfLValue(E);
1747 }
1748
VisitArraySubscriptExpr(ArraySubscriptExpr * E)1749 Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
1750 TestAndClearIgnoreResultAssign();
1751
1752 // Emit subscript expressions in rvalue context's. For most cases, this just
1753 // loads the lvalue formed by the subscript expr. However, we have to be
1754 // careful, because the base of a vector subscript is occasionally an rvalue,
1755 // so we can't get it as an lvalue.
1756 if (!E->getBase()->getType()->isVectorType())
1757 return EmitLoadOfLValue(E);
1758
1759 // Handle the vector case. The base must be a vector, the index must be an
1760 // integer value.
1761 Value *Base = Visit(E->getBase());
1762 Value *Idx = Visit(E->getIdx());
1763 QualType IdxTy = E->getIdx()->getType();
1764
1765 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
1766 CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true);
1767
1768 return Builder.CreateExtractElement(Base, Idx, "vecext");
1769 }
1770
VisitMatrixSubscriptExpr(MatrixSubscriptExpr * E)1771 Value *ScalarExprEmitter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) {
1772 TestAndClearIgnoreResultAssign();
1773
1774 // Handle the vector case. The base must be a vector, the index must be an
1775 // integer value.
1776 Value *RowIdx = Visit(E->getRowIdx());
1777 Value *ColumnIdx = Visit(E->getColumnIdx());
1778 Value *Matrix = Visit(E->getBase());
1779
1780 // TODO: Should we emit bounds checks with SanitizerKind::ArrayBounds?
1781 llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
1782 return MB.CreateExtractElement(
1783 Matrix, RowIdx, ColumnIdx,
1784 E->getBase()->getType()->castAs<ConstantMatrixType>()->getNumRows());
1785 }
1786
getMaskElt(llvm::ShuffleVectorInst * SVI,unsigned Idx,unsigned Off)1787 static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
1788 unsigned Off) {
1789 int MV = SVI->getMaskValue(Idx);
1790 if (MV == -1)
1791 return -1;
1792 return Off + MV;
1793 }
1794
getAsInt32(llvm::ConstantInt * C,llvm::Type * I32Ty)1795 static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) {
1796 assert(llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) &&
1797 "Index operand too large for shufflevector mask!");
1798 return C->getZExtValue();
1799 }
1800
VisitInitListExpr(InitListExpr * E)1801 Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
1802 bool Ignore = TestAndClearIgnoreResultAssign();
1803 (void)Ignore;
1804 assert (Ignore == false && "init list ignored");
1805 unsigned NumInitElements = E->getNumInits();
1806
1807 if (E->hadArrayRangeDesignator())
1808 CGF.ErrorUnsupported(E, "GNU array range designator extension");
1809
1810 llvm::VectorType *VType =
1811 dyn_cast<llvm::VectorType>(ConvertType(E->getType()));
1812
1813 if (!VType) {
1814 if (NumInitElements == 0) {
1815 // C++11 value-initialization for the scalar.
1816 return EmitNullValue(E->getType());
1817 }
1818 // We have a scalar in braces. Just use the first element.
1819 return Visit(E->getInit(0));
1820 }
1821
1822 unsigned ResElts = cast<llvm::FixedVectorType>(VType)->getNumElements();
1823
1824 // Loop over initializers collecting the Value for each, and remembering
1825 // whether the source was swizzle (ExtVectorElementExpr). This will allow
1826 // us to fold the shuffle for the swizzle into the shuffle for the vector
1827 // initializer, since LLVM optimizers generally do not want to touch
1828 // shuffles.
1829 unsigned CurIdx = 0;
1830 bool VIsUndefShuffle = false;
1831 llvm::Value *V = llvm::UndefValue::get(VType);
1832 for (unsigned i = 0; i != NumInitElements; ++i) {
1833 Expr *IE = E->getInit(i);
1834 Value *Init = Visit(IE);
1835 SmallVector<int, 16> Args;
1836
1837 llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType());
1838
1839 // Handle scalar elements. If the scalar initializer is actually one
1840 // element of a different vector of the same width, use shuffle instead of
1841 // extract+insert.
1842 if (!VVT) {
1843 if (isa<ExtVectorElementExpr>(IE)) {
1844 llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init);
1845
1846 if (cast<llvm::FixedVectorType>(EI->getVectorOperandType())
1847 ->getNumElements() == ResElts) {
1848 llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand());
1849 Value *LHS = nullptr, *RHS = nullptr;
1850 if (CurIdx == 0) {
1851 // insert into undef -> shuffle (src, undef)
1852 // shufflemask must use an i32
1853 Args.push_back(getAsInt32(C, CGF.Int32Ty));
1854 Args.resize(ResElts, -1);
1855
1856 LHS = EI->getVectorOperand();
1857 RHS = V;
1858 VIsUndefShuffle = true;
1859 } else if (VIsUndefShuffle) {
1860 // insert into undefshuffle && size match -> shuffle (v, src)
1861 llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V);
1862 for (unsigned j = 0; j != CurIdx; ++j)
1863 Args.push_back(getMaskElt(SVV, j, 0));
1864 Args.push_back(ResElts + C->getZExtValue());
1865 Args.resize(ResElts, -1);
1866
1867 LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
1868 RHS = EI->getVectorOperand();
1869 VIsUndefShuffle = false;
1870 }
1871 if (!Args.empty()) {
1872 V = Builder.CreateShuffleVector(LHS, RHS, Args);
1873 ++CurIdx;
1874 continue;
1875 }
1876 }
1877 }
1878 V = Builder.CreateInsertElement(V, Init, Builder.getInt32(CurIdx),
1879 "vecinit");
1880 VIsUndefShuffle = false;
1881 ++CurIdx;
1882 continue;
1883 }
1884
1885 unsigned InitElts = cast<llvm::FixedVectorType>(VVT)->getNumElements();
1886
1887 // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
1888 // input is the same width as the vector being constructed, generate an
1889 // optimized shuffle of the swizzle input into the result.
1890 unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
1891 if (isa<ExtVectorElementExpr>(IE)) {
1892 llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init);
1893 Value *SVOp = SVI->getOperand(0);
1894 auto *OpTy = cast<llvm::FixedVectorType>(SVOp->getType());
1895
1896 if (OpTy->getNumElements() == ResElts) {
1897 for (unsigned j = 0; j != CurIdx; ++j) {
1898 // If the current vector initializer is a shuffle with undef, merge
1899 // this shuffle directly into it.
1900 if (VIsUndefShuffle) {
1901 Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0));
1902 } else {
1903 Args.push_back(j);
1904 }
1905 }
1906 for (unsigned j = 0, je = InitElts; j != je; ++j)
1907 Args.push_back(getMaskElt(SVI, j, Offset));
1908 Args.resize(ResElts, -1);
1909
1910 if (VIsUndefShuffle)
1911 V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
1912
1913 Init = SVOp;
1914 }
1915 }
1916
1917 // Extend init to result vector length, and then shuffle its contribution
1918 // to the vector initializer into V.
1919 if (Args.empty()) {
1920 for (unsigned j = 0; j != InitElts; ++j)
1921 Args.push_back(j);
1922 Args.resize(ResElts, -1);
1923 Init = Builder.CreateShuffleVector(Init, Args, "vext");
1924
1925 Args.clear();
1926 for (unsigned j = 0; j != CurIdx; ++j)
1927 Args.push_back(j);
1928 for (unsigned j = 0; j != InitElts; ++j)
1929 Args.push_back(j + Offset);
1930 Args.resize(ResElts, -1);
1931 }
1932
1933 // If V is undef, make sure it ends up on the RHS of the shuffle to aid
1934 // merging subsequent shuffles into this one.
1935 if (CurIdx == 0)
1936 std::swap(V, Init);
1937 V = Builder.CreateShuffleVector(V, Init, Args, "vecinit");
1938 VIsUndefShuffle = isa<llvm::UndefValue>(Init);
1939 CurIdx += InitElts;
1940 }
1941
1942 // FIXME: evaluate codegen vs. shuffling against constant null vector.
1943 // Emit remaining default initializers.
1944 llvm::Type *EltTy = VType->getElementType();
1945
1946 // Emit remaining default initializers
1947 for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
1948 Value *Idx = Builder.getInt32(CurIdx);
1949 llvm::Value *Init = llvm::Constant::getNullValue(EltTy);
1950 V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
1951 }
1952 return V;
1953 }
1954
ShouldNullCheckClassCastValue(const CastExpr * CE)1955 bool CodeGenFunction::ShouldNullCheckClassCastValue(const CastExpr *CE) {
1956 const Expr *E = CE->getSubExpr();
1957
1958 if (CE->getCastKind() == CK_UncheckedDerivedToBase)
1959 return false;
1960
1961 if (isa<CXXThisExpr>(E->IgnoreParens())) {
1962 // We always assume that 'this' is never null.
1963 return false;
1964 }
1965
1966 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
1967 // And that glvalue casts are never null.
1968 if (ICE->isGLValue())
1969 return false;
1970 }
1971
1972 return true;
1973 }
1974
1975 // VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
1976 // have to handle a more broad range of conversions than explicit casts, as they
1977 // handle things like function to ptr-to-function decay etc.
VisitCastExpr(CastExpr * CE)1978 Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
1979 Expr *E = CE->getSubExpr();
1980 QualType DestTy = CE->getType();
1981 CastKind Kind = CE->getCastKind();
1982
1983 // These cases are generally not written to ignore the result of
1984 // evaluating their sub-expressions, so we clear this now.
1985 bool Ignored = TestAndClearIgnoreResultAssign();
1986
1987 // Since almost all cast kinds apply to scalars, this switch doesn't have
1988 // a default case, so the compiler will warn on a missing case. The cases
1989 // are in the same order as in the CastKind enum.
1990 switch (Kind) {
1991 case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
1992 case CK_BuiltinFnToFnPtr:
1993 llvm_unreachable("builtin functions are handled elsewhere");
1994
1995 case CK_LValueBitCast:
1996 case CK_ObjCObjectLValueCast: {
1997 Address Addr = EmitLValue(E).getAddress(CGF);
1998 Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy));
1999 LValue LV = CGF.MakeAddrLValue(Addr, DestTy);
2000 return EmitLoadOfLValue(LV, CE->getExprLoc());
2001 }
2002
2003 case CK_LValueToRValueBitCast: {
2004 LValue SourceLVal = CGF.EmitLValue(E);
2005 Address Addr = Builder.CreateElementBitCast(SourceLVal.getAddress(CGF),
2006 CGF.ConvertTypeForMem(DestTy));
2007 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2008 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2009 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2010 }
2011
2012 case CK_CPointerToObjCPointerCast:
2013 case CK_BlockPointerToObjCPointerCast:
2014 case CK_AnyPointerToBlockPointerCast:
2015 case CK_BitCast: {
2016 Value *Src = Visit(const_cast<Expr*>(E));
2017 llvm::Type *SrcTy = Src->getType();
2018 llvm::Type *DstTy = ConvertType(DestTy);
2019 if (SrcTy->isPtrOrPtrVectorTy() && DstTy->isPtrOrPtrVectorTy() &&
2020 SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) {
2021 llvm_unreachable("wrong cast for pointers in different address spaces"
2022 "(must be an address space cast)!");
2023 }
2024
2025 if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
2026 if (auto PT = DestTy->getAs<PointerType>())
2027 CGF.EmitVTablePtrCheckForCast(PT->getPointeeType(), Src,
2028 /*MayBeNull=*/true,
2029 CodeGenFunction::CFITCK_UnrelatedCast,
2030 CE->getBeginLoc());
2031 }
2032
2033 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2034 const QualType SrcType = E->getType();
2035
2036 if (SrcType.mayBeNotDynamicClass() && DestTy.mayBeDynamicClass()) {
2037 // Casting to pointer that could carry dynamic information (provided by
2038 // invariant.group) requires launder.
2039 Src = Builder.CreateLaunderInvariantGroup(Src);
2040 } else if (SrcType.mayBeDynamicClass() && DestTy.mayBeNotDynamicClass()) {
2041 // Casting to pointer that does not carry dynamic information (provided
2042 // by invariant.group) requires stripping it. Note that we don't do it
2043 // if the source could not be dynamic type and destination could be
2044 // dynamic because dynamic information is already laundered. It is
2045 // because launder(strip(src)) == launder(src), so there is no need to
2046 // add extra strip before launder.
2047 Src = Builder.CreateStripInvariantGroup(Src);
2048 }
2049 }
2050
2051 // Update heapallocsite metadata when there is an explicit pointer cast.
2052 if (auto *CI = dyn_cast<llvm::CallBase>(Src)) {
2053 if (CI->getMetadata("heapallocsite") && isa<ExplicitCastExpr>(CE)) {
2054 QualType PointeeType = DestTy->getPointeeType();
2055 if (!PointeeType.isNull())
2056 CGF.getDebugInfo()->addHeapAllocSiteMetadata(CI, PointeeType,
2057 CE->getExprLoc());
2058 }
2059 }
2060
2061 // If Src is a fixed vector and Dst is a scalable vector, and both have the
2062 // same element type, use the llvm.experimental.vector.insert intrinsic to
2063 // perform the bitcast.
2064 if (const auto *FixedSrc = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
2065 if (const auto *ScalableDst = dyn_cast<llvm::ScalableVectorType>(DstTy)) {
2066 if (FixedSrc->getElementType() == ScalableDst->getElementType()) {
2067 llvm::Value *UndefVec = llvm::UndefValue::get(DstTy);
2068 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
2069 return Builder.CreateInsertVector(DstTy, UndefVec, Src, Zero,
2070 "castScalableSve");
2071 }
2072 }
2073 }
2074
2075 // If Src is a scalable vector and Dst is a fixed vector, and both have the
2076 // same element type, use the llvm.experimental.vector.extract intrinsic to
2077 // perform the bitcast.
2078 if (const auto *ScalableSrc = dyn_cast<llvm::ScalableVectorType>(SrcTy)) {
2079 if (const auto *FixedDst = dyn_cast<llvm::FixedVectorType>(DstTy)) {
2080 if (ScalableSrc->getElementType() == FixedDst->getElementType()) {
2081 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
2082 return Builder.CreateExtractVector(DstTy, Src, Zero, "castFixedSve");
2083 }
2084 }
2085 }
2086
2087 // Perform VLAT <-> VLST bitcast through memory.
2088 // TODO: since the llvm.experimental.vector.{insert,extract} intrinsics
2089 // require the element types of the vectors to be the same, we
2090 // need to keep this around for casting between predicates, or more
2091 // generally for bitcasts between VLAT <-> VLST where the element
2092 // types of the vectors are not the same, until we figure out a better
2093 // way of doing these casts.
2094 if ((isa<llvm::FixedVectorType>(SrcTy) &&
2095 isa<llvm::ScalableVectorType>(DstTy)) ||
2096 (isa<llvm::ScalableVectorType>(SrcTy) &&
2097 isa<llvm::FixedVectorType>(DstTy))) {
2098 Address Addr = CGF.CreateDefaultAlignTempAlloca(SrcTy, "saved-value");
2099 LValue LV = CGF.MakeAddrLValue(Addr, E->getType());
2100 CGF.EmitStoreOfScalar(Src, LV);
2101 Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy),
2102 "castFixedSve");
2103 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2104 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2105 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2106 }
2107
2108 return Builder.CreateBitCast(Src, DstTy);
2109 }
2110 case CK_AddressSpaceConversion: {
2111 Expr::EvalResult Result;
2112 if (E->EvaluateAsRValue(Result, CGF.getContext()) &&
2113 Result.Val.isNullPointer()) {
2114 // If E has side effect, it is emitted even if its final result is a
2115 // null pointer. In that case, a DCE pass should be able to
2116 // eliminate the useless instructions emitted during translating E.
2117 if (Result.HasSideEffects)
2118 Visit(E);
2119 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(
2120 ConvertType(DestTy)), DestTy);
2121 }
2122 // Since target may map different address spaces in AST to the same address
2123 // space, an address space conversion may end up as a bitcast.
2124 return CGF.CGM.getTargetCodeGenInfo().performAddrSpaceCast(
2125 CGF, Visit(E), E->getType()->getPointeeType().getAddressSpace(),
2126 DestTy->getPointeeType().getAddressSpace(), ConvertType(DestTy));
2127 }
2128 case CK_AtomicToNonAtomic:
2129 case CK_NonAtomicToAtomic:
2130 case CK_NoOp:
2131 case CK_UserDefinedConversion:
2132 return Visit(const_cast<Expr*>(E));
2133
2134 case CK_BaseToDerived: {
2135 const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl();
2136 assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!");
2137
2138 Address Base = CGF.EmitPointerWithAlignment(E);
2139 Address Derived =
2140 CGF.GetAddressOfDerivedClass(Base, DerivedClassDecl,
2141 CE->path_begin(), CE->path_end(),
2142 CGF.ShouldNullCheckClassCastValue(CE));
2143
2144 // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
2145 // performed and the object is not of the derived type.
2146 if (CGF.sanitizePerformTypeCheck())
2147 CGF.EmitTypeCheck(CodeGenFunction::TCK_DowncastPointer, CE->getExprLoc(),
2148 Derived.getPointer(), DestTy->getPointeeType());
2149
2150 if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast))
2151 CGF.EmitVTablePtrCheckForCast(
2152 DestTy->getPointeeType(), Derived.getPointer(),
2153 /*MayBeNull=*/true, CodeGenFunction::CFITCK_DerivedCast,
2154 CE->getBeginLoc());
2155
2156 return Derived.getPointer();
2157 }
2158 case CK_UncheckedDerivedToBase:
2159 case CK_DerivedToBase: {
2160 // The EmitPointerWithAlignment path does this fine; just discard
2161 // the alignment.
2162 return CGF.EmitPointerWithAlignment(CE).getPointer();
2163 }
2164
2165 case CK_Dynamic: {
2166 Address V = CGF.EmitPointerWithAlignment(E);
2167 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE);
2168 return CGF.EmitDynamicCast(V, DCE);
2169 }
2170
2171 case CK_ArrayToPointerDecay:
2172 return CGF.EmitArrayToPointerDecay(E).getPointer();
2173 case CK_FunctionToPointerDecay:
2174 return EmitLValue(E).getPointer(CGF);
2175
2176 case CK_NullToPointer:
2177 if (MustVisitNullValue(E))
2178 CGF.EmitIgnoredExpr(E);
2179
2180 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(ConvertType(DestTy)),
2181 DestTy);
2182
2183 case CK_NullToMemberPointer: {
2184 if (MustVisitNullValue(E))
2185 CGF.EmitIgnoredExpr(E);
2186
2187 const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();
2188 return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
2189 }
2190
2191 case CK_ReinterpretMemberPointer:
2192 case CK_BaseToDerivedMemberPointer:
2193 case CK_DerivedToBaseMemberPointer: {
2194 Value *Src = Visit(E);
2195
2196 // Note that the AST doesn't distinguish between checked and
2197 // unchecked member pointer conversions, so we always have to
2198 // implement checked conversions here. This is inefficient when
2199 // actual control flow may be required in order to perform the
2200 // check, which it is for data member pointers (but not member
2201 // function pointers on Itanium and ARM).
2202 return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src);
2203 }
2204
2205 case CK_ARCProduceObject:
2206 return CGF.EmitARCRetainScalarExpr(E);
2207 case CK_ARCConsumeObject:
2208 return CGF.EmitObjCConsumeObject(E->getType(), Visit(E));
2209 case CK_ARCReclaimReturnedObject:
2210 return CGF.EmitARCReclaimReturnedObject(E, /*allowUnsafe*/ Ignored);
2211 case CK_ARCExtendBlockObject:
2212 return CGF.EmitARCExtendBlockObject(E);
2213
2214 case CK_CopyAndAutoreleaseBlockObject:
2215 return CGF.EmitBlockCopyAndAutorelease(Visit(E), E->getType());
2216
2217 case CK_FloatingRealToComplex:
2218 case CK_FloatingComplexCast:
2219 case CK_IntegralRealToComplex:
2220 case CK_IntegralComplexCast:
2221 case CK_IntegralComplexToFloatingComplex:
2222 case CK_FloatingComplexToIntegralComplex:
2223 case CK_ConstructorConversion:
2224 case CK_ToUnion:
2225 llvm_unreachable("scalar cast to non-scalar value");
2226
2227 case CK_LValueToRValue:
2228 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));
2229 assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!");
2230 return Visit(const_cast<Expr*>(E));
2231
2232 case CK_IntegralToPointer: {
2233 Value *Src = Visit(const_cast<Expr*>(E));
2234
2235 // First, convert to the correct width so that we control the kind of
2236 // extension.
2237 auto DestLLVMTy = ConvertType(DestTy);
2238 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestLLVMTy);
2239 bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType();
2240 llvm::Value* IntResult =
2241 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
2242
2243 auto *IntToPtr = Builder.CreateIntToPtr(IntResult, DestLLVMTy);
2244
2245 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2246 // Going from integer to pointer that could be dynamic requires reloading
2247 // dynamic information from invariant.group.
2248 if (DestTy.mayBeDynamicClass())
2249 IntToPtr = Builder.CreateLaunderInvariantGroup(IntToPtr);
2250 }
2251 return IntToPtr;
2252 }
2253 case CK_PointerToIntegral: {
2254 assert(!DestTy->isBooleanType() && "bool should use PointerToBool");
2255 auto *PtrExpr = Visit(E);
2256
2257 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2258 const QualType SrcType = E->getType();
2259
2260 // Casting to integer requires stripping dynamic information as it does
2261 // not carries it.
2262 if (SrcType.mayBeDynamicClass())
2263 PtrExpr = Builder.CreateStripInvariantGroup(PtrExpr);
2264 }
2265
2266 return Builder.CreatePtrToInt(PtrExpr, ConvertType(DestTy));
2267 }
2268 case CK_ToVoid: {
2269 CGF.EmitIgnoredExpr(E);
2270 return nullptr;
2271 }
2272 case CK_MatrixCast: {
2273 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2274 CE->getExprLoc());
2275 }
2276 case CK_VectorSplat: {
2277 llvm::Type *DstTy = ConvertType(DestTy);
2278 Value *Elt = Visit(const_cast<Expr*>(E));
2279 // Splat the element across to all elements
2280 unsigned NumElements = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
2281 return Builder.CreateVectorSplat(NumElements, Elt, "splat");
2282 }
2283
2284 case CK_FixedPointCast:
2285 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2286 CE->getExprLoc());
2287
2288 case CK_FixedPointToBoolean:
2289 assert(E->getType()->isFixedPointType() &&
2290 "Expected src type to be fixed point type");
2291 assert(DestTy->isBooleanType() && "Expected dest type to be boolean type");
2292 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2293 CE->getExprLoc());
2294
2295 case CK_FixedPointToIntegral:
2296 assert(E->getType()->isFixedPointType() &&
2297 "Expected src type to be fixed point type");
2298 assert(DestTy->isIntegerType() && "Expected dest type to be an integer");
2299 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2300 CE->getExprLoc());
2301
2302 case CK_IntegralToFixedPoint:
2303 assert(E->getType()->isIntegerType() &&
2304 "Expected src type to be an integer");
2305 assert(DestTy->isFixedPointType() &&
2306 "Expected dest type to be fixed point type");
2307 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2308 CE->getExprLoc());
2309
2310 case CK_IntegralCast: {
2311 ScalarConversionOpts Opts;
2312 if (auto *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
2313 if (!ICE->isPartOfExplicitCast())
2314 Opts = ScalarConversionOpts(CGF.SanOpts);
2315 }
2316 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2317 CE->getExprLoc(), Opts);
2318 }
2319 case CK_IntegralToFloating:
2320 case CK_FloatingToIntegral:
2321 case CK_FloatingCast:
2322 case CK_FixedPointToFloating:
2323 case CK_FloatingToFixedPoint: {
2324 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2325 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2326 CE->getExprLoc());
2327 }
2328 case CK_BooleanToSignedIntegral: {
2329 ScalarConversionOpts Opts;
2330 Opts.TreatBooleanAsSigned = true;
2331 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2332 CE->getExprLoc(), Opts);
2333 }
2334 case CK_IntegralToBoolean:
2335 return EmitIntToBoolConversion(Visit(E));
2336 case CK_PointerToBoolean:
2337 return EmitPointerToBoolConversion(Visit(E), E->getType());
2338 case CK_FloatingToBoolean: {
2339 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2340 return EmitFloatToBoolConversion(Visit(E));
2341 }
2342 case CK_MemberPointerToBoolean: {
2343 llvm::Value *MemPtr = Visit(E);
2344 const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
2345 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
2346 }
2347
2348 case CK_FloatingComplexToReal:
2349 case CK_IntegralComplexToReal:
2350 return CGF.EmitComplexExpr(E, false, true).first;
2351
2352 case CK_FloatingComplexToBoolean:
2353 case CK_IntegralComplexToBoolean: {
2354 CodeGenFunction::ComplexPairTy V = CGF.EmitComplexExpr(E);
2355
2356 // TODO: kill this function off, inline appropriate case here
2357 return EmitComplexToScalarConversion(V, E->getType(), DestTy,
2358 CE->getExprLoc());
2359 }
2360
2361 case CK_ZeroToOCLOpaqueType: {
2362 assert((DestTy->isEventT() || DestTy->isQueueT() ||
2363 DestTy->isOCLIntelSubgroupAVCType()) &&
2364 "CK_ZeroToOCLEvent cast on non-event type");
2365 return llvm::Constant::getNullValue(ConvertType(DestTy));
2366 }
2367
2368 case CK_IntToOCLSampler:
2369 return CGF.CGM.createOpenCLIntToSamplerConversion(E, CGF);
2370
2371 } // end of switch
2372
2373 llvm_unreachable("unknown scalar cast");
2374 }
2375
VisitStmtExpr(const StmtExpr * E)2376 Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
2377 CodeGenFunction::StmtExprEvaluation eval(CGF);
2378 Address RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(),
2379 !E->getType()->isVoidType());
2380 if (!RetAlloca.isValid())
2381 return nullptr;
2382 return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()),
2383 E->getExprLoc());
2384 }
2385
VisitExprWithCleanups(ExprWithCleanups * E)2386 Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
2387 CodeGenFunction::RunCleanupsScope Scope(CGF);
2388 Value *V = Visit(E->getSubExpr());
2389 // Defend against dominance problems caused by jumps out of expression
2390 // evaluation through the shared cleanup block.
2391 Scope.ForceCleanup({&V});
2392 return V;
2393 }
2394
2395 //===----------------------------------------------------------------------===//
2396 // Unary Operators
2397 //===----------------------------------------------------------------------===//
2398
createBinOpInfoFromIncDec(const UnaryOperator * E,llvm::Value * InVal,bool IsInc,FPOptions FPFeatures)2399 static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E,
2400 llvm::Value *InVal, bool IsInc,
2401 FPOptions FPFeatures) {
2402 BinOpInfo BinOp;
2403 BinOp.LHS = InVal;
2404 BinOp.RHS = llvm::ConstantInt::get(InVal->getType(), 1, false);
2405 BinOp.Ty = E->getType();
2406 BinOp.Opcode = IsInc ? BO_Add : BO_Sub;
2407 BinOp.FPFeatures = FPFeatures;
2408 BinOp.E = E;
2409 return BinOp;
2410 }
2411
EmitIncDecConsiderOverflowBehavior(const UnaryOperator * E,llvm::Value * InVal,bool IsInc)2412 llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
2413 const UnaryOperator *E, llvm::Value *InVal, bool IsInc) {
2414 llvm::Value *Amount =
2415 llvm::ConstantInt::get(InVal->getType(), IsInc ? 1 : -1, true);
2416 StringRef Name = IsInc ? "inc" : "dec";
2417 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
2418 case LangOptions::SOB_Defined:
2419 return Builder.CreateAdd(InVal, Amount, Name);
2420 case LangOptions::SOB_Undefined:
2421 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
2422 return Builder.CreateNSWAdd(InVal, Amount, Name);
2423 LLVM_FALLTHROUGH;
2424 case LangOptions::SOB_Trapping:
2425 if (!E->canOverflow())
2426 return Builder.CreateNSWAdd(InVal, Amount, Name);
2427 return EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(
2428 E, InVal, IsInc, E->getFPFeaturesInEffect(CGF.getLangOpts())));
2429 }
2430 llvm_unreachable("Unknown SignedOverflowBehaviorTy");
2431 }
2432
2433 namespace {
2434 /// Handles check and update for lastprivate conditional variables.
2435 class OMPLastprivateConditionalUpdateRAII {
2436 private:
2437 CodeGenFunction &CGF;
2438 const UnaryOperator *E;
2439
2440 public:
OMPLastprivateConditionalUpdateRAII(CodeGenFunction & CGF,const UnaryOperator * E)2441 OMPLastprivateConditionalUpdateRAII(CodeGenFunction &CGF,
2442 const UnaryOperator *E)
2443 : CGF(CGF), E(E) {}
~OMPLastprivateConditionalUpdateRAII()2444 ~OMPLastprivateConditionalUpdateRAII() {
2445 if (CGF.getLangOpts().OpenMP)
2446 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(
2447 CGF, E->getSubExpr());
2448 }
2449 };
2450 } // namespace
2451
2452 llvm::Value *
EmitScalarPrePostIncDec(const UnaryOperator * E,LValue LV,bool isInc,bool isPre)2453 ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
2454 bool isInc, bool isPre) {
2455 OMPLastprivateConditionalUpdateRAII OMPRegion(CGF, E);
2456 QualType type = E->getSubExpr()->getType();
2457 llvm::PHINode *atomicPHI = nullptr;
2458 llvm::Value *value;
2459 llvm::Value *input;
2460
2461 int amount = (isInc ? 1 : -1);
2462 bool isSubtraction = !isInc;
2463
2464 if (const AtomicType *atomicTy = type->getAs<AtomicType>()) {
2465 type = atomicTy->getValueType();
2466 if (isInc && type->isBooleanType()) {
2467 llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type);
2468 if (isPre) {
2469 Builder.CreateStore(True, LV.getAddress(CGF), LV.isVolatileQualified())
2470 ->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent);
2471 return Builder.getTrue();
2472 }
2473 // For atomic bool increment, we just store true and return it for
2474 // preincrement, do an atomic swap with true for postincrement
2475 return Builder.CreateAtomicRMW(
2476 llvm::AtomicRMWInst::Xchg, LV.getPointer(CGF), True,
2477 llvm::AtomicOrdering::SequentiallyConsistent);
2478 }
2479 // Special case for atomic increment / decrement on integers, emit
2480 // atomicrmw instructions. We skip this if we want to be doing overflow
2481 // checking, and fall into the slow path with the atomic cmpxchg loop.
2482 if (!type->isBooleanType() && type->isIntegerType() &&
2483 !(type->isUnsignedIntegerType() &&
2484 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
2485 CGF.getLangOpts().getSignedOverflowBehavior() !=
2486 LangOptions::SOB_Trapping) {
2487 llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add :
2488 llvm::AtomicRMWInst::Sub;
2489 llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add :
2490 llvm::Instruction::Sub;
2491 llvm::Value *amt = CGF.EmitToMemory(
2492 llvm::ConstantInt::get(ConvertType(type), 1, true), type);
2493 llvm::Value *old =
2494 Builder.CreateAtomicRMW(aop, LV.getPointer(CGF), amt,
2495 llvm::AtomicOrdering::SequentiallyConsistent);
2496 return isPre ? Builder.CreateBinOp(op, old, amt) : old;
2497 }
2498 value = EmitLoadOfLValue(LV, E->getExprLoc());
2499 input = value;
2500 // For every other atomic operation, we need to emit a load-op-cmpxchg loop
2501 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
2502 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
2503 value = CGF.EmitToMemory(value, type);
2504 Builder.CreateBr(opBB);
2505 Builder.SetInsertPoint(opBB);
2506 atomicPHI = Builder.CreatePHI(value->getType(), 2);
2507 atomicPHI->addIncoming(value, startBB);
2508 value = atomicPHI;
2509 } else {
2510 value = EmitLoadOfLValue(LV, E->getExprLoc());
2511 input = value;
2512 }
2513
2514 // Special case of integer increment that we have to check first: bool++.
2515 // Due to promotion rules, we get:
2516 // bool++ -> bool = bool + 1
2517 // -> bool = (int)bool + 1
2518 // -> bool = ((int)bool + 1 != 0)
2519 // An interesting aspect of this is that increment is always true.
2520 // Decrement does not have this property.
2521 if (isInc && type->isBooleanType()) {
2522 value = Builder.getTrue();
2523
2524 // Most common case by far: integer increment.
2525 } else if (type->isIntegerType()) {
2526 QualType promotedType;
2527 bool canPerformLossyDemotionCheck = false;
2528 if (type->isPromotableIntegerType()) {
2529 promotedType = CGF.getContext().getPromotedIntegerType(type);
2530 assert(promotedType != type && "Shouldn't promote to the same type.");
2531 canPerformLossyDemotionCheck = true;
2532 canPerformLossyDemotionCheck &=
2533 CGF.getContext().getCanonicalType(type) !=
2534 CGF.getContext().getCanonicalType(promotedType);
2535 canPerformLossyDemotionCheck &=
2536 PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(
2537 type, promotedType);
2538 assert((!canPerformLossyDemotionCheck ||
2539 type->isSignedIntegerOrEnumerationType() ||
2540 promotedType->isSignedIntegerOrEnumerationType() ||
2541 ConvertType(type)->getScalarSizeInBits() ==
2542 ConvertType(promotedType)->getScalarSizeInBits()) &&
2543 "The following check expects that if we do promotion to different "
2544 "underlying canonical type, at least one of the types (either "
2545 "base or promoted) will be signed, or the bitwidths will match.");
2546 }
2547 if (CGF.SanOpts.hasOneOf(
2548 SanitizerKind::ImplicitIntegerArithmeticValueChange) &&
2549 canPerformLossyDemotionCheck) {
2550 // While `x += 1` (for `x` with width less than int) is modeled as
2551 // promotion+arithmetics+demotion, and we can catch lossy demotion with
2552 // ease; inc/dec with width less than int can't overflow because of
2553 // promotion rules, so we omit promotion+demotion, which means that we can
2554 // not catch lossy "demotion". Because we still want to catch these cases
2555 // when the sanitizer is enabled, we perform the promotion, then perform
2556 // the increment/decrement in the wider type, and finally
2557 // perform the demotion. This will catch lossy demotions.
2558
2559 value = EmitScalarConversion(value, type, promotedType, E->getExprLoc());
2560 Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
2561 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2562 // Do pass non-default ScalarConversionOpts so that sanitizer check is
2563 // emitted.
2564 value = EmitScalarConversion(value, promotedType, type, E->getExprLoc(),
2565 ScalarConversionOpts(CGF.SanOpts));
2566
2567 // Note that signed integer inc/dec with width less than int can't
2568 // overflow because of promotion rules; we're just eliding a few steps
2569 // here.
2570 } else if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
2571 value = EmitIncDecConsiderOverflowBehavior(E, value, isInc);
2572 } else if (E->canOverflow() && type->isUnsignedIntegerType() &&
2573 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) {
2574 value = EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(
2575 E, value, isInc, E->getFPFeaturesInEffect(CGF.getLangOpts())));
2576 } else {
2577 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
2578 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2579 }
2580
2581 // Next most common: pointer increment.
2582 } else if (const PointerType *ptr = type->getAs<PointerType>()) {
2583 QualType type = ptr->getPointeeType();
2584
2585 // VLA types don't have constant size.
2586 if (const VariableArrayType *vla
2587 = CGF.getContext().getAsVariableArrayType(type)) {
2588 llvm::Value *numElts = CGF.getVLASize(vla).NumElts;
2589 if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
2590 if (CGF.getLangOpts().isSignedOverflowDefined())
2591 value = Builder.CreateGEP(value->getType()->getPointerElementType(),
2592 value, numElts, "vla.inc");
2593 else
2594 value = CGF.EmitCheckedInBoundsGEP(
2595 value, numElts, /*SignedIndices=*/false, isSubtraction,
2596 E->getExprLoc(), "vla.inc");
2597
2598 // Arithmetic on function pointers (!) is just +-1.
2599 } else if (type->isFunctionType()) {
2600 llvm::Value *amt = Builder.getInt32(amount);
2601
2602 value = CGF.EmitCastToVoidPtr(value);
2603 if (CGF.getLangOpts().isSignedOverflowDefined())
2604 value = Builder.CreateGEP(CGF.Int8Ty, value, amt, "incdec.funcptr");
2605 else
2606 value = CGF.EmitCheckedInBoundsGEP(value, amt, /*SignedIndices=*/false,
2607 isSubtraction, E->getExprLoc(),
2608 "incdec.funcptr");
2609 value = Builder.CreateBitCast(value, input->getType());
2610
2611 // For everything else, we can just do a simple increment.
2612 } else {
2613 llvm::Value *amt = Builder.getInt32(amount);
2614 if (CGF.getLangOpts().isSignedOverflowDefined())
2615 value = Builder.CreateGEP(value->getType()->getPointerElementType(),
2616 value, amt, "incdec.ptr");
2617 else
2618 value = CGF.EmitCheckedInBoundsGEP(value, amt, /*SignedIndices=*/false,
2619 isSubtraction, E->getExprLoc(),
2620 "incdec.ptr");
2621 }
2622
2623 // Vector increment/decrement.
2624 } else if (type->isVectorType()) {
2625 if (type->hasIntegerRepresentation()) {
2626 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount);
2627
2628 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2629 } else {
2630 value = Builder.CreateFAdd(
2631 value,
2632 llvm::ConstantFP::get(value->getType(), amount),
2633 isInc ? "inc" : "dec");
2634 }
2635
2636 // Floating point.
2637 } else if (type->isRealFloatingType()) {
2638 // Add the inc/dec to the real part.
2639 llvm::Value *amt;
2640 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
2641
2642 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
2643 // Another special case: half FP increment should be done via float
2644 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
2645 value = Builder.CreateCall(
2646 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
2647 CGF.CGM.FloatTy),
2648 input, "incdec.conv");
2649 } else {
2650 value = Builder.CreateFPExt(input, CGF.CGM.FloatTy, "incdec.conv");
2651 }
2652 }
2653
2654 if (value->getType()->isFloatTy())
2655 amt = llvm::ConstantFP::get(VMContext,
2656 llvm::APFloat(static_cast<float>(amount)));
2657 else if (value->getType()->isDoubleTy())
2658 amt = llvm::ConstantFP::get(VMContext,
2659 llvm::APFloat(static_cast<double>(amount)));
2660 else {
2661 // Remaining types are Half, LongDouble or __float128. Convert from float.
2662 llvm::APFloat F(static_cast<float>(amount));
2663 bool ignored;
2664 const llvm::fltSemantics *FS;
2665 // Don't use getFloatTypeSemantics because Half isn't
2666 // necessarily represented using the "half" LLVM type.
2667 if (value->getType()->isFP128Ty())
2668 FS = &CGF.getTarget().getFloat128Format();
2669 else if (value->getType()->isHalfTy())
2670 FS = &CGF.getTarget().getHalfFormat();
2671 else
2672 FS = &CGF.getTarget().getLongDoubleFormat();
2673 F.convert(*FS, llvm::APFloat::rmTowardZero, &ignored);
2674 amt = llvm::ConstantFP::get(VMContext, F);
2675 }
2676 value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec");
2677
2678 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
2679 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
2680 value = Builder.CreateCall(
2681 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16,
2682 CGF.CGM.FloatTy),
2683 value, "incdec.conv");
2684 } else {
2685 value = Builder.CreateFPTrunc(value, input->getType(), "incdec.conv");
2686 }
2687 }
2688
2689 // Fixed-point types.
2690 } else if (type->isFixedPointType()) {
2691 // Fixed-point types are tricky. In some cases, it isn't possible to
2692 // represent a 1 or a -1 in the type at all. Piggyback off of
2693 // EmitFixedPointBinOp to avoid having to reimplement saturation.
2694 BinOpInfo Info;
2695 Info.E = E;
2696 Info.Ty = E->getType();
2697 Info.Opcode = isInc ? BO_Add : BO_Sub;
2698 Info.LHS = value;
2699 Info.RHS = llvm::ConstantInt::get(value->getType(), 1, false);
2700 // If the type is signed, it's better to represent this as +(-1) or -(-1),
2701 // since -1 is guaranteed to be representable.
2702 if (type->isSignedFixedPointType()) {
2703 Info.Opcode = isInc ? BO_Sub : BO_Add;
2704 Info.RHS = Builder.CreateNeg(Info.RHS);
2705 }
2706 // Now, convert from our invented integer literal to the type of the unary
2707 // op. This will upscale and saturate if necessary. This value can become
2708 // undef in some cases.
2709 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
2710 auto DstSema = CGF.getContext().getFixedPointSemantics(Info.Ty);
2711 Info.RHS = FPBuilder.CreateIntegerToFixed(Info.RHS, true, DstSema);
2712 value = EmitFixedPointBinOp(Info);
2713
2714 // Objective-C pointer types.
2715 } else {
2716 const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
2717 value = CGF.EmitCastToVoidPtr(value);
2718
2719 CharUnits size = CGF.getContext().getTypeSizeInChars(OPT->getObjectType());
2720 if (!isInc) size = -size;
2721 llvm::Value *sizeValue =
2722 llvm::ConstantInt::get(CGF.SizeTy, size.getQuantity());
2723
2724 if (CGF.getLangOpts().isSignedOverflowDefined())
2725 value = Builder.CreateGEP(CGF.Int8Ty, value, sizeValue, "incdec.objptr");
2726 else
2727 value = CGF.EmitCheckedInBoundsGEP(value, sizeValue,
2728 /*SignedIndices=*/false, isSubtraction,
2729 E->getExprLoc(), "incdec.objptr");
2730 value = Builder.CreateBitCast(value, input->getType());
2731 }
2732
2733 if (atomicPHI) {
2734 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
2735 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
2736 auto Pair = CGF.EmitAtomicCompareExchange(
2737 LV, RValue::get(atomicPHI), RValue::get(value), E->getExprLoc());
2738 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), type);
2739 llvm::Value *success = Pair.second;
2740 atomicPHI->addIncoming(old, curBlock);
2741 Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
2742 Builder.SetInsertPoint(contBB);
2743 return isPre ? value : input;
2744 }
2745
2746 // Store the updated result through the lvalue.
2747 if (LV.isBitField())
2748 CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, &value);
2749 else
2750 CGF.EmitStoreThroughLValue(RValue::get(value), LV);
2751
2752 // If this is a postinc, return the value read from memory, otherwise use the
2753 // updated value.
2754 return isPre ? value : input;
2755 }
2756
2757
2758
VisitUnaryMinus(const UnaryOperator * E)2759 Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
2760 TestAndClearIgnoreResultAssign();
2761 Value *Op = Visit(E->getSubExpr());
2762
2763 // Generate a unary FNeg for FP ops.
2764 if (Op->getType()->isFPOrFPVectorTy())
2765 return Builder.CreateFNeg(Op, "fneg");
2766
2767 // Emit unary minus with EmitSub so we handle overflow cases etc.
2768 BinOpInfo BinOp;
2769 BinOp.RHS = Op;
2770 BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType());
2771 BinOp.Ty = E->getType();
2772 BinOp.Opcode = BO_Sub;
2773 BinOp.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
2774 BinOp.E = E;
2775 return EmitSub(BinOp);
2776 }
2777
VisitUnaryNot(const UnaryOperator * E)2778 Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
2779 TestAndClearIgnoreResultAssign();
2780 Value *Op = Visit(E->getSubExpr());
2781 return Builder.CreateNot(Op, "neg");
2782 }
2783
VisitUnaryLNot(const UnaryOperator * E)2784 Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
2785 // Perform vector logical not on comparison with zero vector.
2786 if (E->getType()->isVectorType() &&
2787 E->getType()->castAs<VectorType>()->getVectorKind() ==
2788 VectorType::GenericVector) {
2789 Value *Oper = Visit(E->getSubExpr());
2790 Value *Zero = llvm::Constant::getNullValue(Oper->getType());
2791 Value *Result;
2792 if (Oper->getType()->isFPOrFPVectorTy()) {
2793 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
2794 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
2795 Result = Builder.CreateFCmp(llvm::CmpInst::FCMP_OEQ, Oper, Zero, "cmp");
2796 } else
2797 Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp");
2798 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
2799 }
2800
2801 // Compare operand to zero.
2802 Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());
2803
2804 // Invert value.
2805 // TODO: Could dynamically modify easy computations here. For example, if
2806 // the operand is an icmp ne, turn into icmp eq.
2807 BoolVal = Builder.CreateNot(BoolVal, "lnot");
2808
2809 // ZExt result to the expr type.
2810 return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext");
2811 }
2812
VisitOffsetOfExpr(OffsetOfExpr * E)2813 Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
2814 // Try folding the offsetof to a constant.
2815 Expr::EvalResult EVResult;
2816 if (E->EvaluateAsInt(EVResult, CGF.getContext())) {
2817 llvm::APSInt Value = EVResult.Val.getInt();
2818 return Builder.getInt(Value);
2819 }
2820
2821 // Loop over the components of the offsetof to compute the value.
2822 unsigned n = E->getNumComponents();
2823 llvm::Type* ResultType = ConvertType(E->getType());
2824 llvm::Value* Result = llvm::Constant::getNullValue(ResultType);
2825 QualType CurrentType = E->getTypeSourceInfo()->getType();
2826 for (unsigned i = 0; i != n; ++i) {
2827 OffsetOfNode ON = E->getComponent(i);
2828 llvm::Value *Offset = nullptr;
2829 switch (ON.getKind()) {
2830 case OffsetOfNode::Array: {
2831 // Compute the index
2832 Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex());
2833 llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr);
2834 bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType();
2835 Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv");
2836
2837 // Save the element type
2838 CurrentType =
2839 CGF.getContext().getAsArrayType(CurrentType)->getElementType();
2840
2841 // Compute the element size
2842 llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType,
2843 CGF.getContext().getTypeSizeInChars(CurrentType).getQuantity());
2844
2845 // Multiply out to compute the result
2846 Offset = Builder.CreateMul(Idx, ElemSize);
2847 break;
2848 }
2849
2850 case OffsetOfNode::Field: {
2851 FieldDecl *MemberDecl = ON.getField();
2852 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
2853 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
2854
2855 // Compute the index of the field in its parent.
2856 unsigned i = 0;
2857 // FIXME: It would be nice if we didn't have to loop here!
2858 for (RecordDecl::field_iterator Field = RD->field_begin(),
2859 FieldEnd = RD->field_end();
2860 Field != FieldEnd; ++Field, ++i) {
2861 if (*Field == MemberDecl)
2862 break;
2863 }
2864 assert(i < RL.getFieldCount() && "offsetof field in wrong type");
2865
2866 // Compute the offset to the field
2867 int64_t OffsetInt = RL.getFieldOffset(i) /
2868 CGF.getContext().getCharWidth();
2869 Offset = llvm::ConstantInt::get(ResultType, OffsetInt);
2870
2871 // Save the element type.
2872 CurrentType = MemberDecl->getType();
2873 break;
2874 }
2875
2876 case OffsetOfNode::Identifier:
2877 llvm_unreachable("dependent __builtin_offsetof");
2878
2879 case OffsetOfNode::Base: {
2880 if (ON.getBase()->isVirtual()) {
2881 CGF.ErrorUnsupported(E, "virtual base in offsetof");
2882 continue;
2883 }
2884
2885 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
2886 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
2887
2888 // Save the element type.
2889 CurrentType = ON.getBase()->getType();
2890
2891 // Compute the offset to the base.
2892 const RecordType *BaseRT = CurrentType->getAs<RecordType>();
2893 CXXRecordDecl *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl());
2894 CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD);
2895 Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity());
2896 break;
2897 }
2898 }
2899 Result = Builder.CreateAdd(Result, Offset);
2900 }
2901 return Result;
2902 }
2903
2904 /// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of
2905 /// argument of the sizeof expression as an integer.
2906 Value *
VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr * E)2907 ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
2908 const UnaryExprOrTypeTraitExpr *E) {
2909 QualType TypeToSize = E->getTypeOfArgument();
2910 if (E->getKind() == UETT_SizeOf) {
2911 if (const VariableArrayType *VAT =
2912 CGF.getContext().getAsVariableArrayType(TypeToSize)) {
2913 if (E->isArgumentType()) {
2914 // sizeof(type) - make sure to emit the VLA size.
2915 CGF.EmitVariablyModifiedType(TypeToSize);
2916 } else {
2917 // C99 6.5.3.4p2: If the argument is an expression of type
2918 // VLA, it is evaluated.
2919 CGF.EmitIgnoredExpr(E->getArgumentExpr());
2920 }
2921
2922 auto VlaSize = CGF.getVLASize(VAT);
2923 llvm::Value *size = VlaSize.NumElts;
2924
2925 // Scale the number of non-VLA elements by the non-VLA element size.
2926 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(VlaSize.Type);
2927 if (!eltSize.isOne())
2928 size = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), size);
2929
2930 return size;
2931 }
2932 } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) {
2933 auto Alignment =
2934 CGF.getContext()
2935 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign(
2936 E->getTypeOfArgument()->getPointeeType()))
2937 .getQuantity();
2938 return llvm::ConstantInt::get(CGF.SizeTy, Alignment);
2939 }
2940
2941 // If this isn't sizeof(vla), the result must be constant; use the constant
2942 // folding logic so we don't have to duplicate it here.
2943 return Builder.getInt(E->EvaluateKnownConstInt(CGF.getContext()));
2944 }
2945
VisitUnaryReal(const UnaryOperator * E)2946 Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) {
2947 Expr *Op = E->getSubExpr();
2948 if (Op->getType()->isAnyComplexType()) {
2949 // If it's an l-value, load through the appropriate subobject l-value.
2950 // Note that we have to ask E because Op might be an l-value that
2951 // this won't work for, e.g. an Obj-C property.
2952 if (E->isGLValue())
2953 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E),
2954 E->getExprLoc()).getScalarVal();
2955
2956 // Otherwise, calculate and project.
2957 return CGF.EmitComplexExpr(Op, false, true).first;
2958 }
2959
2960 return Visit(Op);
2961 }
2962
VisitUnaryImag(const UnaryOperator * E)2963 Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) {
2964 Expr *Op = E->getSubExpr();
2965 if (Op->getType()->isAnyComplexType()) {
2966 // If it's an l-value, load through the appropriate subobject l-value.
2967 // Note that we have to ask E because Op might be an l-value that
2968 // this won't work for, e.g. an Obj-C property.
2969 if (Op->isGLValue())
2970 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E),
2971 E->getExprLoc()).getScalarVal();
2972
2973 // Otherwise, calculate and project.
2974 return CGF.EmitComplexExpr(Op, true, false).second;
2975 }
2976
2977 // __imag on a scalar returns zero. Emit the subexpr to ensure side
2978 // effects are evaluated, but not the actual value.
2979 if (Op->isGLValue())
2980 CGF.EmitLValue(Op);
2981 else
2982 CGF.EmitScalarExpr(Op, true);
2983 return llvm::Constant::getNullValue(ConvertType(E->getType()));
2984 }
2985
2986 //===----------------------------------------------------------------------===//
2987 // Binary Operators
2988 //===----------------------------------------------------------------------===//
2989
EmitBinOps(const BinaryOperator * E)2990 BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) {
2991 TestAndClearIgnoreResultAssign();
2992 BinOpInfo Result;
2993 Result.LHS = Visit(E->getLHS());
2994 Result.RHS = Visit(E->getRHS());
2995 Result.Ty = E->getType();
2996 Result.Opcode = E->getOpcode();
2997 Result.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
2998 Result.E = E;
2999 return Result;
3000 }
3001
EmitCompoundAssignLValue(const CompoundAssignOperator * E,Value * (ScalarExprEmitter::* Func)(const BinOpInfo &),Value * & Result)3002 LValue ScalarExprEmitter::EmitCompoundAssignLValue(
3003 const CompoundAssignOperator *E,
3004 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),
3005 Value *&Result) {
3006 QualType LHSTy = E->getLHS()->getType();
3007 BinOpInfo OpInfo;
3008
3009 if (E->getComputationResultType()->isAnyComplexType())
3010 return CGF.EmitScalarCompoundAssignWithComplex(E, Result);
3011
3012 // Emit the RHS first. __block variables need to have the rhs evaluated
3013 // first, plus this should improve codegen a little.
3014 OpInfo.RHS = Visit(E->getRHS());
3015 OpInfo.Ty = E->getComputationResultType();
3016 OpInfo.Opcode = E->getOpcode();
3017 OpInfo.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
3018 OpInfo.E = E;
3019 // Load/convert the LHS.
3020 LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
3021
3022 llvm::PHINode *atomicPHI = nullptr;
3023 if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) {
3024 QualType type = atomicTy->getValueType();
3025 if (!type->isBooleanType() && type->isIntegerType() &&
3026 !(type->isUnsignedIntegerType() &&
3027 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
3028 CGF.getLangOpts().getSignedOverflowBehavior() !=
3029 LangOptions::SOB_Trapping) {
3030 llvm::AtomicRMWInst::BinOp AtomicOp = llvm::AtomicRMWInst::BAD_BINOP;
3031 llvm::Instruction::BinaryOps Op;
3032 switch (OpInfo.Opcode) {
3033 // We don't have atomicrmw operands for *, %, /, <<, >>
3034 case BO_MulAssign: case BO_DivAssign:
3035 case BO_RemAssign:
3036 case BO_ShlAssign:
3037 case BO_ShrAssign:
3038 break;
3039 case BO_AddAssign:
3040 AtomicOp = llvm::AtomicRMWInst::Add;
3041 Op = llvm::Instruction::Add;
3042 break;
3043 case BO_SubAssign:
3044 AtomicOp = llvm::AtomicRMWInst::Sub;
3045 Op = llvm::Instruction::Sub;
3046 break;
3047 case BO_AndAssign:
3048 AtomicOp = llvm::AtomicRMWInst::And;
3049 Op = llvm::Instruction::And;
3050 break;
3051 case BO_XorAssign:
3052 AtomicOp = llvm::AtomicRMWInst::Xor;
3053 Op = llvm::Instruction::Xor;
3054 break;
3055 case BO_OrAssign:
3056 AtomicOp = llvm::AtomicRMWInst::Or;
3057 Op = llvm::Instruction::Or;
3058 break;
3059 default:
3060 llvm_unreachable("Invalid compound assignment type");
3061 }
3062 if (AtomicOp != llvm::AtomicRMWInst::BAD_BINOP) {
3063 llvm::Value *Amt = CGF.EmitToMemory(
3064 EmitScalarConversion(OpInfo.RHS, E->getRHS()->getType(), LHSTy,
3065 E->getExprLoc()),
3066 LHSTy);
3067 Value *OldVal = Builder.CreateAtomicRMW(
3068 AtomicOp, LHSLV.getPointer(CGF), Amt,
3069 llvm::AtomicOrdering::SequentiallyConsistent);
3070
3071 // Since operation is atomic, the result type is guaranteed to be the
3072 // same as the input in LLVM terms.
3073 Result = Builder.CreateBinOp(Op, OldVal, Amt);
3074 return LHSLV;
3075 }
3076 }
3077 // FIXME: For floating point types, we should be saving and restoring the
3078 // floating point environment in the loop.
3079 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
3080 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
3081 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
3082 OpInfo.LHS = CGF.EmitToMemory(OpInfo.LHS, type);
3083 Builder.CreateBr(opBB);
3084 Builder.SetInsertPoint(opBB);
3085 atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2);
3086 atomicPHI->addIncoming(OpInfo.LHS, startBB);
3087 OpInfo.LHS = atomicPHI;
3088 }
3089 else
3090 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
3091
3092 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, OpInfo.FPFeatures);
3093 SourceLocation Loc = E->getExprLoc();
3094 OpInfo.LHS =
3095 EmitScalarConversion(OpInfo.LHS, LHSTy, E->getComputationLHSType(), Loc);
3096
3097 // Expand the binary operator.
3098 Result = (this->*Func)(OpInfo);
3099
3100 // Convert the result back to the LHS type,
3101 // potentially with Implicit Conversion sanitizer check.
3102 Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy,
3103 Loc, ScalarConversionOpts(CGF.SanOpts));
3104
3105 if (atomicPHI) {
3106 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
3107 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
3108 auto Pair = CGF.EmitAtomicCompareExchange(
3109 LHSLV, RValue::get(atomicPHI), RValue::get(Result), E->getExprLoc());
3110 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), LHSTy);
3111 llvm::Value *success = Pair.second;
3112 atomicPHI->addIncoming(old, curBlock);
3113 Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
3114 Builder.SetInsertPoint(contBB);
3115 return LHSLV;
3116 }
3117
3118 // Store the result value into the LHS lvalue. Bit-fields are handled
3119 // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
3120 // 'An assignment expression has the value of the left operand after the
3121 // assignment...'.
3122 if (LHSLV.isBitField())
3123 CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, &Result);
3124 else
3125 CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV);
3126
3127 if (CGF.getLangOpts().OpenMP)
3128 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF,
3129 E->getLHS());
3130 return LHSLV;
3131 }
3132
EmitCompoundAssign(const CompoundAssignOperator * E,Value * (ScalarExprEmitter::* Func)(const BinOpInfo &))3133 Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
3134 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
3135 bool Ignore = TestAndClearIgnoreResultAssign();
3136 Value *RHS = nullptr;
3137 LValue LHS = EmitCompoundAssignLValue(E, Func, RHS);
3138
3139 // If the result is clearly ignored, return now.
3140 if (Ignore)
3141 return nullptr;
3142
3143 // The result of an assignment in C is the assigned r-value.
3144 if (!CGF.getLangOpts().CPlusPlus)
3145 return RHS;
3146
3147 // If the lvalue is non-volatile, return the computed value of the assignment.
3148 if (!LHS.isVolatileQualified())
3149 return RHS;
3150
3151 // Otherwise, reload the value.
3152 return EmitLoadOfLValue(LHS, E->getExprLoc());
3153 }
3154
EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo & Ops,llvm::Value * Zero,bool isDiv)3155 void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
3156 const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) {
3157 SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
3158
3159 if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) {
3160 Checks.push_back(std::make_pair(Builder.CreateICmpNE(Ops.RHS, Zero),
3161 SanitizerKind::IntegerDivideByZero));
3162 }
3163
3164 const auto *BO = cast<BinaryOperator>(Ops.E);
3165 if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) &&
3166 Ops.Ty->hasSignedIntegerRepresentation() &&
3167 !IsWidenedIntegerOp(CGF.getContext(), BO->getLHS()) &&
3168 Ops.mayHaveIntegerOverflow()) {
3169 llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
3170
3171 llvm::Value *IntMin =
3172 Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth()));
3173 llvm::Value *NegOne = llvm::Constant::getAllOnesValue(Ty);
3174
3175 llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin);
3176 llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne);
3177 llvm::Value *NotOverflow = Builder.CreateOr(LHSCmp, RHSCmp, "or");
3178 Checks.push_back(
3179 std::make_pair(NotOverflow, SanitizerKind::SignedIntegerOverflow));
3180 }
3181
3182 if (Checks.size() > 0)
3183 EmitBinOpCheck(Checks, Ops);
3184 }
3185
EmitDiv(const BinOpInfo & Ops)3186 Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
3187 {
3188 CodeGenFunction::SanitizerScope SanScope(&CGF);
3189 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
3190 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
3191 Ops.Ty->isIntegerType() &&
3192 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
3193 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
3194 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);
3195 } else if (CGF.SanOpts.has(SanitizerKind::FloatDivideByZero) &&
3196 Ops.Ty->isRealFloatingType() &&
3197 Ops.mayHaveFloatDivisionByZero()) {
3198 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
3199 llvm::Value *NonZero = Builder.CreateFCmpUNE(Ops.RHS, Zero);
3200 EmitBinOpCheck(std::make_pair(NonZero, SanitizerKind::FloatDivideByZero),
3201 Ops);
3202 }
3203 }
3204
3205 if (Ops.Ty->isConstantMatrixType()) {
3206 llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
3207 // We need to check the types of the operands of the operator to get the
3208 // correct matrix dimensions.
3209 auto *BO = cast<BinaryOperator>(Ops.E);
3210 (void)BO;
3211 assert(
3212 isa<ConstantMatrixType>(BO->getLHS()->getType().getCanonicalType()) &&
3213 "first operand must be a matrix");
3214 assert(BO->getRHS()->getType().getCanonicalType()->isArithmeticType() &&
3215 "second operand must be an arithmetic type");
3216 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
3217 return MB.CreateScalarDiv(Ops.LHS, Ops.RHS,
3218 Ops.Ty->hasUnsignedIntegerRepresentation());
3219 }
3220
3221 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
3222 llvm::Value *Val;
3223 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
3224 Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
3225 if ((CGF.getLangOpts().OpenCL &&
3226 !CGF.CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||
3227 (CGF.getLangOpts().HIP && CGF.getLangOpts().CUDAIsDevice &&
3228 !CGF.CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {
3229 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp
3230 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
3231 // build option allows an application to specify that single precision
3232 // floating-point divide (x/y and 1/x) and sqrt used in the program
3233 // source are correctly rounded.
3234 llvm::Type *ValTy = Val->getType();
3235 if (ValTy->isFloatTy() ||
3236 (isa<llvm::VectorType>(ValTy) &&
3237 cast<llvm::VectorType>(ValTy)->getElementType()->isFloatTy()))
3238 CGF.SetFPAccuracy(Val, 2.5);
3239 }
3240 return Val;
3241 }
3242 else if (Ops.isFixedPointOp())
3243 return EmitFixedPointBinOp(Ops);
3244 else if (Ops.Ty->hasUnsignedIntegerRepresentation())
3245 return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
3246 else
3247 return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div");
3248 }
3249
EmitRem(const BinOpInfo & Ops)3250 Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
3251 // Rem in C can't be a floating point type: C99 6.5.5p2.
3252 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
3253 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
3254 Ops.Ty->isIntegerType() &&
3255 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
3256 CodeGenFunction::SanitizerScope SanScope(&CGF);
3257 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
3258 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false);
3259 }
3260
3261 if (Ops.Ty->hasUnsignedIntegerRepresentation())
3262 return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem");
3263 else
3264 return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem");
3265 }
3266
EmitOverflowCheckedBinOp(const BinOpInfo & Ops)3267 Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
3268 unsigned IID;
3269 unsigned OpID = 0;
3270 SanitizerHandler OverflowKind;
3271
3272 bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType();
3273 switch (Ops.Opcode) {
3274 case BO_Add:
3275 case BO_AddAssign:
3276 OpID = 1;
3277 IID = isSigned ? llvm::Intrinsic::sadd_with_overflow :
3278 llvm::Intrinsic::uadd_with_overflow;
3279 OverflowKind = SanitizerHandler::AddOverflow;
3280 break;
3281 case BO_Sub:
3282 case BO_SubAssign:
3283 OpID = 2;
3284 IID = isSigned ? llvm::Intrinsic::ssub_with_overflow :
3285 llvm::Intrinsic::usub_with_overflow;
3286 OverflowKind = SanitizerHandler::SubOverflow;
3287 break;
3288 case BO_Mul:
3289 case BO_MulAssign:
3290 OpID = 3;
3291 IID = isSigned ? llvm::Intrinsic::smul_with_overflow :
3292 llvm::Intrinsic::umul_with_overflow;
3293 OverflowKind = SanitizerHandler::MulOverflow;
3294 break;
3295 default:
3296 llvm_unreachable("Unsupported operation for overflow detection");
3297 }
3298 OpID <<= 1;
3299 if (isSigned)
3300 OpID |= 1;
3301
3302 CodeGenFunction::SanitizerScope SanScope(&CGF);
3303 llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty);
3304
3305 llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, opTy);
3306
3307 Value *resultAndOverflow = Builder.CreateCall(intrinsic, {Ops.LHS, Ops.RHS});
3308 Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);
3309 Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
3310
3311 // Handle overflow with llvm.trap if no custom handler has been specified.
3312 const std::string *handlerName =
3313 &CGF.getLangOpts().OverflowHandler;
3314 if (handlerName->empty()) {
3315 // If the signed-integer-overflow sanitizer is enabled, emit a call to its
3316 // runtime. Otherwise, this is a -ftrapv check, so just emit a trap.
3317 if (!isSigned || CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) {
3318 llvm::Value *NotOverflow = Builder.CreateNot(overflow);
3319 SanitizerMask Kind = isSigned ? SanitizerKind::SignedIntegerOverflow
3320 : SanitizerKind::UnsignedIntegerOverflow;
3321 EmitBinOpCheck(std::make_pair(NotOverflow, Kind), Ops);
3322 } else
3323 CGF.EmitTrapCheck(Builder.CreateNot(overflow), OverflowKind);
3324 return result;
3325 }
3326
3327 // Branch in case of overflow.
3328 llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
3329 llvm::BasicBlock *continueBB =
3330 CGF.createBasicBlock("nooverflow", CGF.CurFn, initialBB->getNextNode());
3331 llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
3332
3333 Builder.CreateCondBr(overflow, overflowBB, continueBB);
3334
3335 // If an overflow handler is set, then we want to call it and then use its
3336 // result, if it returns.
3337 Builder.SetInsertPoint(overflowBB);
3338
3339 // Get the overflow handler.
3340 llvm::Type *Int8Ty = CGF.Int8Ty;
3341 llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
3342 llvm::FunctionType *handlerTy =
3343 llvm::FunctionType::get(CGF.Int64Ty, argTypes, true);
3344 llvm::FunctionCallee handler =
3345 CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName);
3346
3347 // Sign extend the args to 64-bit, so that we can use the same handler for
3348 // all types of overflow.
3349 llvm::Value *lhs = Builder.CreateSExt(Ops.LHS, CGF.Int64Ty);
3350 llvm::Value *rhs = Builder.CreateSExt(Ops.RHS, CGF.Int64Ty);
3351
3352 // Call the handler with the two arguments, the operation, and the size of
3353 // the result.
3354 llvm::Value *handlerArgs[] = {
3355 lhs,
3356 rhs,
3357 Builder.getInt8(OpID),
3358 Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth())
3359 };
3360 llvm::Value *handlerResult =
3361 CGF.EmitNounwindRuntimeCall(handler, handlerArgs);
3362
3363 // Truncate the result back to the desired size.
3364 handlerResult = Builder.CreateTrunc(handlerResult, opTy);
3365 Builder.CreateBr(continueBB);
3366
3367 Builder.SetInsertPoint(continueBB);
3368 llvm::PHINode *phi = Builder.CreatePHI(opTy, 2);
3369 phi->addIncoming(result, initialBB);
3370 phi->addIncoming(handlerResult, overflowBB);
3371
3372 return phi;
3373 }
3374
3375 /// Emit pointer + index arithmetic.
emitPointerArithmetic(CodeGenFunction & CGF,const BinOpInfo & op,bool isSubtraction)3376 static Value *emitPointerArithmetic(CodeGenFunction &CGF,
3377 const BinOpInfo &op,
3378 bool isSubtraction) {
3379 // Must have binary (not unary) expr here. Unary pointer
3380 // increment/decrement doesn't use this path.
3381 const BinaryOperator *expr = cast<BinaryOperator>(op.E);
3382
3383 Value *pointer = op.LHS;
3384 Expr *pointerOperand = expr->getLHS();
3385 Value *index = op.RHS;
3386 Expr *indexOperand = expr->getRHS();
3387
3388 // In a subtraction, the LHS is always the pointer.
3389 if (!isSubtraction && !pointer->getType()->isPointerTy()) {
3390 std::swap(pointer, index);
3391 std::swap(pointerOperand, indexOperand);
3392 }
3393
3394 bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
3395
3396 unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth();
3397 auto &DL = CGF.CGM.getDataLayout();
3398 auto PtrTy = cast<llvm::PointerType>(pointer->getType());
3399
3400 // Some versions of glibc and gcc use idioms (particularly in their malloc
3401 // routines) that add a pointer-sized integer (known to be a pointer value)
3402 // to a null pointer in order to cast the value back to an integer or as
3403 // part of a pointer alignment algorithm. This is undefined behavior, but
3404 // we'd like to be able to compile programs that use it.
3405 //
3406 // Normally, we'd generate a GEP with a null-pointer base here in response
3407 // to that code, but it's also UB to dereference a pointer created that
3408 // way. Instead (as an acknowledged hack to tolerate the idiom) we will
3409 // generate a direct cast of the integer value to a pointer.
3410 //
3411 // The idiom (p = nullptr + N) is not met if any of the following are true:
3412 //
3413 // The operation is subtraction.
3414 // The index is not pointer-sized.
3415 // The pointer type is not byte-sized.
3416 //
3417 if (BinaryOperator::isNullPointerArithmeticExtension(CGF.getContext(),
3418 op.Opcode,
3419 expr->getLHS(),
3420 expr->getRHS()))
3421 return CGF.Builder.CreateIntToPtr(index, pointer->getType());
3422
3423 if (width != DL.getIndexTypeSizeInBits(PtrTy)) {
3424 // Zero-extend or sign-extend the pointer value according to
3425 // whether the index is signed or not.
3426 index = CGF.Builder.CreateIntCast(index, DL.getIndexType(PtrTy), isSigned,
3427 "idx.ext");
3428 }
3429
3430 // If this is subtraction, negate the index.
3431 if (isSubtraction)
3432 index = CGF.Builder.CreateNeg(index, "idx.neg");
3433
3434 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
3435 CGF.EmitBoundsCheck(op.E, pointerOperand, index, indexOperand->getType(),
3436 /*Accessed*/ false);
3437
3438 const PointerType *pointerType
3439 = pointerOperand->getType()->getAs<PointerType>();
3440 if (!pointerType) {
3441 QualType objectType = pointerOperand->getType()
3442 ->castAs<ObjCObjectPointerType>()
3443 ->getPointeeType();
3444 llvm::Value *objectSize
3445 = CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(objectType));
3446
3447 index = CGF.Builder.CreateMul(index, objectSize);
3448
3449 Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy);
3450 result = CGF.Builder.CreateGEP(CGF.Int8Ty, result, index, "add.ptr");
3451 return CGF.Builder.CreateBitCast(result, pointer->getType());
3452 }
3453
3454 QualType elementType = pointerType->getPointeeType();
3455 if (const VariableArrayType *vla
3456 = CGF.getContext().getAsVariableArrayType(elementType)) {
3457 // The element count here is the total number of non-VLA elements.
3458 llvm::Value *numElements = CGF.getVLASize(vla).NumElts;
3459
3460 // Effectively, the multiply by the VLA size is part of the GEP.
3461 // GEP indexes are signed, and scaling an index isn't permitted to
3462 // signed-overflow, so we use the same semantics for our explicit
3463 // multiply. We suppress this if overflow is not undefined behavior.
3464 if (CGF.getLangOpts().isSignedOverflowDefined()) {
3465 index = CGF.Builder.CreateMul(index, numElements, "vla.index");
3466 pointer = CGF.Builder.CreateGEP(
3467 pointer->getType()->getPointerElementType(), pointer, index,
3468 "add.ptr");
3469 } else {
3470 index = CGF.Builder.CreateNSWMul(index, numElements, "vla.index");
3471 pointer =
3472 CGF.EmitCheckedInBoundsGEP(pointer, index, isSigned, isSubtraction,
3473 op.E->getExprLoc(), "add.ptr");
3474 }
3475 return pointer;
3476 }
3477
3478 // Explicitly handle GNU void* and function pointer arithmetic extensions. The
3479 // GNU void* casts amount to no-ops since our void* type is i8*, but this is
3480 // future proof.
3481 if (elementType->isVoidType() || elementType->isFunctionType()) {
3482 Value *result = CGF.EmitCastToVoidPtr(pointer);
3483 result = CGF.Builder.CreateGEP(CGF.Int8Ty, result, index, "add.ptr");
3484 return CGF.Builder.CreateBitCast(result, pointer->getType());
3485 }
3486
3487 if (CGF.getLangOpts().isSignedOverflowDefined())
3488 return CGF.Builder.CreateGEP(
3489 pointer->getType()->getPointerElementType(), pointer, index, "add.ptr");
3490
3491 return CGF.EmitCheckedInBoundsGEP(pointer, index, isSigned, isSubtraction,
3492 op.E->getExprLoc(), "add.ptr");
3493 }
3494
3495 // Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
3496 // Addend. Use negMul and negAdd to negate the first operand of the Mul or
3497 // the add operand respectively. This allows fmuladd to represent a*b-c, or
3498 // c-a*b. Patterns in LLVM should catch the negated forms and translate them to
3499 // efficient operations.
buildFMulAdd(llvm::Instruction * MulOp,Value * Addend,const CodeGenFunction & CGF,CGBuilderTy & Builder,bool negMul,bool negAdd)3500 static Value* buildFMulAdd(llvm::Instruction *MulOp, Value *Addend,
3501 const CodeGenFunction &CGF, CGBuilderTy &Builder,
3502 bool negMul, bool negAdd) {
3503 assert(!(negMul && negAdd) && "Only one of negMul and negAdd should be set.");
3504
3505 Value *MulOp0 = MulOp->getOperand(0);
3506 Value *MulOp1 = MulOp->getOperand(1);
3507 if (negMul)
3508 MulOp0 = Builder.CreateFNeg(MulOp0, "neg");
3509 if (negAdd)
3510 Addend = Builder.CreateFNeg(Addend, "neg");
3511
3512 Value *FMulAdd = nullptr;
3513 if (Builder.getIsFPConstrained()) {
3514 assert(isa<llvm::ConstrainedFPIntrinsic>(MulOp) &&
3515 "Only constrained operation should be created when Builder is in FP "
3516 "constrained mode");
3517 FMulAdd = Builder.CreateConstrainedFPCall(
3518 CGF.CGM.getIntrinsic(llvm::Intrinsic::experimental_constrained_fmuladd,
3519 Addend->getType()),
3520 {MulOp0, MulOp1, Addend});
3521 } else {
3522 FMulAdd = Builder.CreateCall(
3523 CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()),
3524 {MulOp0, MulOp1, Addend});
3525 }
3526 MulOp->eraseFromParent();
3527
3528 return FMulAdd;
3529 }
3530
3531 // Check whether it would be legal to emit an fmuladd intrinsic call to
3532 // represent op and if so, build the fmuladd.
3533 //
3534 // Checks that (a) the operation is fusable, and (b) -ffp-contract=on.
3535 // Does NOT check the type of the operation - it's assumed that this function
3536 // will be called from contexts where it's known that the type is contractable.
tryEmitFMulAdd(const BinOpInfo & op,const CodeGenFunction & CGF,CGBuilderTy & Builder,bool isSub=false)3537 static Value* tryEmitFMulAdd(const BinOpInfo &op,
3538 const CodeGenFunction &CGF, CGBuilderTy &Builder,
3539 bool isSub=false) {
3540
3541 assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign ||
3542 op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) &&
3543 "Only fadd/fsub can be the root of an fmuladd.");
3544
3545 // Check whether this op is marked as fusable.
3546 if (!op.FPFeatures.allowFPContractWithinStatement())
3547 return nullptr;
3548
3549 // We have a potentially fusable op. Look for a mul on one of the operands.
3550 // Also, make sure that the mul result isn't used directly. In that case,
3551 // there's no point creating a muladd operation.
3552 if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(op.LHS)) {
3553 if (LHSBinOp->getOpcode() == llvm::Instruction::FMul &&
3554 LHSBinOp->use_empty())
3555 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub);
3556 }
3557 if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(op.RHS)) {
3558 if (RHSBinOp->getOpcode() == llvm::Instruction::FMul &&
3559 RHSBinOp->use_empty())
3560 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false);
3561 }
3562
3563 if (auto *LHSBinOp = dyn_cast<llvm::CallBase>(op.LHS)) {
3564 if (LHSBinOp->getIntrinsicID() ==
3565 llvm::Intrinsic::experimental_constrained_fmul &&
3566 LHSBinOp->use_empty())
3567 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub);
3568 }
3569 if (auto *RHSBinOp = dyn_cast<llvm::CallBase>(op.RHS)) {
3570 if (RHSBinOp->getIntrinsicID() ==
3571 llvm::Intrinsic::experimental_constrained_fmul &&
3572 RHSBinOp->use_empty())
3573 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false);
3574 }
3575
3576 return nullptr;
3577 }
3578
EmitAdd(const BinOpInfo & op)3579 Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
3580 if (op.LHS->getType()->isPointerTy() ||
3581 op.RHS->getType()->isPointerTy())
3582 return emitPointerArithmetic(CGF, op, CodeGenFunction::NotSubtraction);
3583
3584 if (op.Ty->isSignedIntegerOrEnumerationType()) {
3585 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
3586 case LangOptions::SOB_Defined:
3587 return Builder.CreateAdd(op.LHS, op.RHS, "add");
3588 case LangOptions::SOB_Undefined:
3589 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
3590 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
3591 LLVM_FALLTHROUGH;
3592 case LangOptions::SOB_Trapping:
3593 if (CanElideOverflowCheck(CGF.getContext(), op))
3594 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
3595 return EmitOverflowCheckedBinOp(op);
3596 }
3597 }
3598
3599 if (op.Ty->isConstantMatrixType()) {
3600 llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
3601 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
3602 return MB.CreateAdd(op.LHS, op.RHS);
3603 }
3604
3605 if (op.Ty->isUnsignedIntegerType() &&
3606 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
3607 !CanElideOverflowCheck(CGF.getContext(), op))
3608 return EmitOverflowCheckedBinOp(op);
3609
3610 if (op.LHS->getType()->isFPOrFPVectorTy()) {
3611 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
3612 // Try to form an fmuladd.
3613 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))
3614 return FMulAdd;
3615
3616 return Builder.CreateFAdd(op.LHS, op.RHS, "add");
3617 }
3618
3619 if (op.isFixedPointOp())
3620 return EmitFixedPointBinOp(op);
3621
3622 return Builder.CreateAdd(op.LHS, op.RHS, "add");
3623 }
3624
3625 /// The resulting value must be calculated with exact precision, so the operands
3626 /// may not be the same type.
EmitFixedPointBinOp(const BinOpInfo & op)3627 Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
3628 using llvm::APSInt;
3629 using llvm::ConstantInt;
3630
3631 // This is either a binary operation where at least one of the operands is
3632 // a fixed-point type, or a unary operation where the operand is a fixed-point
3633 // type. The result type of a binary operation is determined by
3634 // Sema::handleFixedPointConversions().
3635 QualType ResultTy = op.Ty;
3636 QualType LHSTy, RHSTy;
3637 if (const auto *BinOp = dyn_cast<BinaryOperator>(op.E)) {
3638 RHSTy = BinOp->getRHS()->getType();
3639 if (const auto *CAO = dyn_cast<CompoundAssignOperator>(BinOp)) {
3640 // For compound assignment, the effective type of the LHS at this point
3641 // is the computation LHS type, not the actual LHS type, and the final
3642 // result type is not the type of the expression but rather the
3643 // computation result type.
3644 LHSTy = CAO->getComputationLHSType();
3645 ResultTy = CAO->getComputationResultType();
3646 } else
3647 LHSTy = BinOp->getLHS()->getType();
3648 } else if (const auto *UnOp = dyn_cast<UnaryOperator>(op.E)) {
3649 LHSTy = UnOp->getSubExpr()->getType();
3650 RHSTy = UnOp->getSubExpr()->getType();
3651 }
3652 ASTContext &Ctx = CGF.getContext();
3653 Value *LHS = op.LHS;
3654 Value *RHS = op.RHS;
3655
3656 auto LHSFixedSema = Ctx.getFixedPointSemantics(LHSTy);
3657 auto RHSFixedSema = Ctx.getFixedPointSemantics(RHSTy);
3658 auto ResultFixedSema = Ctx.getFixedPointSemantics(ResultTy);
3659 auto CommonFixedSema = LHSFixedSema.getCommonSemantics(RHSFixedSema);
3660
3661 // Perform the actual operation.
3662 Value *Result;
3663 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
3664 switch (op.Opcode) {
3665 case BO_AddAssign:
3666 case BO_Add:
3667 Result = FPBuilder.CreateAdd(LHS, LHSFixedSema, RHS, RHSFixedSema);
3668 break;
3669 case BO_SubAssign:
3670 case BO_Sub:
3671 Result = FPBuilder.CreateSub(LHS, LHSFixedSema, RHS, RHSFixedSema);
3672 break;
3673 case BO_MulAssign:
3674 case BO_Mul:
3675 Result = FPBuilder.CreateMul(LHS, LHSFixedSema, RHS, RHSFixedSema);
3676 break;
3677 case BO_DivAssign:
3678 case BO_Div:
3679 Result = FPBuilder.CreateDiv(LHS, LHSFixedSema, RHS, RHSFixedSema);
3680 break;
3681 case BO_ShlAssign:
3682 case BO_Shl:
3683 Result = FPBuilder.CreateShl(LHS, LHSFixedSema, RHS);
3684 break;
3685 case BO_ShrAssign:
3686 case BO_Shr:
3687 Result = FPBuilder.CreateShr(LHS, LHSFixedSema, RHS);
3688 break;
3689 case BO_LT:
3690 return FPBuilder.CreateLT(LHS, LHSFixedSema, RHS, RHSFixedSema);
3691 case BO_GT:
3692 return FPBuilder.CreateGT(LHS, LHSFixedSema, RHS, RHSFixedSema);
3693 case BO_LE:
3694 return FPBuilder.CreateLE(LHS, LHSFixedSema, RHS, RHSFixedSema);
3695 case BO_GE:
3696 return FPBuilder.CreateGE(LHS, LHSFixedSema, RHS, RHSFixedSema);
3697 case BO_EQ:
3698 // For equality operations, we assume any padding bits on unsigned types are
3699 // zero'd out. They could be overwritten through non-saturating operations
3700 // that cause overflow, but this leads to undefined behavior.
3701 return FPBuilder.CreateEQ(LHS, LHSFixedSema, RHS, RHSFixedSema);
3702 case BO_NE:
3703 return FPBuilder.CreateNE(LHS, LHSFixedSema, RHS, RHSFixedSema);
3704 case BO_Cmp:
3705 case BO_LAnd:
3706 case BO_LOr:
3707 llvm_unreachable("Found unimplemented fixed point binary operation");
3708 case BO_PtrMemD:
3709 case BO_PtrMemI:
3710 case BO_Rem:
3711 case BO_Xor:
3712 case BO_And:
3713 case BO_Or:
3714 case BO_Assign:
3715 case BO_RemAssign:
3716 case BO_AndAssign:
3717 case BO_XorAssign:
3718 case BO_OrAssign:
3719 case BO_Comma:
3720 llvm_unreachable("Found unsupported binary operation for fixed point types.");
3721 }
3722
3723 bool IsShift = BinaryOperator::isShiftOp(op.Opcode) ||
3724 BinaryOperator::isShiftAssignOp(op.Opcode);
3725 // Convert to the result type.
3726 return FPBuilder.CreateFixedToFixed(Result, IsShift ? LHSFixedSema
3727 : CommonFixedSema,
3728 ResultFixedSema);
3729 }
3730
EmitSub(const BinOpInfo & op)3731 Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
3732 // The LHS is always a pointer if either side is.
3733 if (!op.LHS->getType()->isPointerTy()) {
3734 if (op.Ty->isSignedIntegerOrEnumerationType()) {
3735 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
3736 case LangOptions::SOB_Defined:
3737 return Builder.CreateSub(op.LHS, op.RHS, "sub");
3738 case LangOptions::SOB_Undefined:
3739 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
3740 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
3741 LLVM_FALLTHROUGH;
3742 case LangOptions::SOB_Trapping:
3743 if (CanElideOverflowCheck(CGF.getContext(), op))
3744 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
3745 return EmitOverflowCheckedBinOp(op);
3746 }
3747 }
3748
3749 if (op.Ty->isConstantMatrixType()) {
3750 llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
3751 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
3752 return MB.CreateSub(op.LHS, op.RHS);
3753 }
3754
3755 if (op.Ty->isUnsignedIntegerType() &&
3756 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
3757 !CanElideOverflowCheck(CGF.getContext(), op))
3758 return EmitOverflowCheckedBinOp(op);
3759
3760 if (op.LHS->getType()->isFPOrFPVectorTy()) {
3761 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
3762 // Try to form an fmuladd.
3763 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true))
3764 return FMulAdd;
3765 return Builder.CreateFSub(op.LHS, op.RHS, "sub");
3766 }
3767
3768 if (op.isFixedPointOp())
3769 return EmitFixedPointBinOp(op);
3770
3771 return Builder.CreateSub(op.LHS, op.RHS, "sub");
3772 }
3773
3774 // If the RHS is not a pointer, then we have normal pointer
3775 // arithmetic.
3776 if (!op.RHS->getType()->isPointerTy())
3777 return emitPointerArithmetic(CGF, op, CodeGenFunction::IsSubtraction);
3778
3779 // Otherwise, this is a pointer subtraction.
3780
3781 // Do the raw subtraction part.
3782 llvm::Value *LHS
3783 = Builder.CreatePtrToInt(op.LHS, CGF.PtrDiffTy, "sub.ptr.lhs.cast");
3784 llvm::Value *RHS
3785 = Builder.CreatePtrToInt(op.RHS, CGF.PtrDiffTy, "sub.ptr.rhs.cast");
3786 Value *diffInChars = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
3787
3788 // Okay, figure out the element size.
3789 const BinaryOperator *expr = cast<BinaryOperator>(op.E);
3790 QualType elementType = expr->getLHS()->getType()->getPointeeType();
3791
3792 llvm::Value *divisor = nullptr;
3793
3794 // For a variable-length array, this is going to be non-constant.
3795 if (const VariableArrayType *vla
3796 = CGF.getContext().getAsVariableArrayType(elementType)) {
3797 auto VlaSize = CGF.getVLASize(vla);
3798 elementType = VlaSize.Type;
3799 divisor = VlaSize.NumElts;
3800
3801 // Scale the number of non-VLA elements by the non-VLA element size.
3802 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(elementType);
3803 if (!eltSize.isOne())
3804 divisor = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), divisor);
3805
3806 // For everything elese, we can just compute it, safe in the
3807 // assumption that Sema won't let anything through that we can't
3808 // safely compute the size of.
3809 } else {
3810 CharUnits elementSize;
3811 // Handle GCC extension for pointer arithmetic on void* and
3812 // function pointer types.
3813 if (elementType->isVoidType() || elementType->isFunctionType())
3814 elementSize = CharUnits::One();
3815 else
3816 elementSize = CGF.getContext().getTypeSizeInChars(elementType);
3817
3818 // Don't even emit the divide for element size of 1.
3819 if (elementSize.isOne())
3820 return diffInChars;
3821
3822 divisor = CGF.CGM.getSize(elementSize);
3823 }
3824
3825 // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
3826 // pointer difference in C is only defined in the case where both operands
3827 // are pointing to elements of an array.
3828 return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div");
3829 }
3830
GetWidthMinusOneValue(Value * LHS,Value * RHS)3831 Value *ScalarExprEmitter::GetWidthMinusOneValue(Value* LHS,Value* RHS) {
3832 llvm::IntegerType *Ty;
3833 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
3834 Ty = cast<llvm::IntegerType>(VT->getElementType());
3835 else
3836 Ty = cast<llvm::IntegerType>(LHS->getType());
3837 return llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth() - 1);
3838 }
3839
ConstrainShiftValue(Value * LHS,Value * RHS,const Twine & Name)3840 Value *ScalarExprEmitter::ConstrainShiftValue(Value *LHS, Value *RHS,
3841 const Twine &Name) {
3842 llvm::IntegerType *Ty;
3843 if (auto *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
3844 Ty = cast<llvm::IntegerType>(VT->getElementType());
3845 else
3846 Ty = cast<llvm::IntegerType>(LHS->getType());
3847
3848 if (llvm::isPowerOf2_64(Ty->getBitWidth()))
3849 return Builder.CreateAnd(RHS, GetWidthMinusOneValue(LHS, RHS), Name);
3850
3851 return Builder.CreateURem(
3852 RHS, llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth()), Name);
3853 }
3854
EmitShl(const BinOpInfo & Ops)3855 Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
3856 // TODO: This misses out on the sanitizer check below.
3857 if (Ops.isFixedPointOp())
3858 return EmitFixedPointBinOp(Ops);
3859
3860 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
3861 // RHS to the same size as the LHS.
3862 Value *RHS = Ops.RHS;
3863 if (Ops.LHS->getType() != RHS->getType())
3864 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
3865
3866 bool SanitizeSignedBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) &&
3867 Ops.Ty->hasSignedIntegerRepresentation() &&
3868 !CGF.getLangOpts().isSignedOverflowDefined() &&
3869 !CGF.getLangOpts().CPlusPlus20;
3870 bool SanitizeUnsignedBase =
3871 CGF.SanOpts.has(SanitizerKind::UnsignedShiftBase) &&
3872 Ops.Ty->hasUnsignedIntegerRepresentation();
3873 bool SanitizeBase = SanitizeSignedBase || SanitizeUnsignedBase;
3874 bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent);
3875 // OpenCL 6.3j: shift values are effectively % word size of LHS.
3876 if (CGF.getLangOpts().OpenCL)
3877 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shl.mask");
3878 else if ((SanitizeBase || SanitizeExponent) &&
3879 isa<llvm::IntegerType>(Ops.LHS->getType())) {
3880 CodeGenFunction::SanitizerScope SanScope(&CGF);
3881 SmallVector<std::pair<Value *, SanitizerMask>, 2> Checks;
3882 llvm::Value *WidthMinusOne = GetWidthMinusOneValue(Ops.LHS, Ops.RHS);
3883 llvm::Value *ValidExponent = Builder.CreateICmpULE(Ops.RHS, WidthMinusOne);
3884
3885 if (SanitizeExponent) {
3886 Checks.push_back(
3887 std::make_pair(ValidExponent, SanitizerKind::ShiftExponent));
3888 }
3889
3890 if (SanitizeBase) {
3891 // Check whether we are shifting any non-zero bits off the top of the
3892 // integer. We only emit this check if exponent is valid - otherwise
3893 // instructions below will have undefined behavior themselves.
3894 llvm::BasicBlock *Orig = Builder.GetInsertBlock();
3895 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
3896 llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock("check");
3897 Builder.CreateCondBr(ValidExponent, CheckShiftBase, Cont);
3898 llvm::Value *PromotedWidthMinusOne =
3899 (RHS == Ops.RHS) ? WidthMinusOne
3900 : GetWidthMinusOneValue(Ops.LHS, RHS);
3901 CGF.EmitBlock(CheckShiftBase);
3902 llvm::Value *BitsShiftedOff = Builder.CreateLShr(
3903 Ops.LHS, Builder.CreateSub(PromotedWidthMinusOne, RHS, "shl.zeros",
3904 /*NUW*/ true, /*NSW*/ true),
3905 "shl.check");
3906 if (SanitizeUnsignedBase || CGF.getLangOpts().CPlusPlus) {
3907 // In C99, we are not permitted to shift a 1 bit into the sign bit.
3908 // Under C++11's rules, shifting a 1 bit into the sign bit is
3909 // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't
3910 // define signed left shifts, so we use the C99 and C++11 rules there).
3911 // Unsigned shifts can always shift into the top bit.
3912 llvm::Value *One = llvm::ConstantInt::get(BitsShiftedOff->getType(), 1);
3913 BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One);
3914 }
3915 llvm::Value *Zero = llvm::ConstantInt::get(BitsShiftedOff->getType(), 0);
3916 llvm::Value *ValidBase = Builder.CreateICmpEQ(BitsShiftedOff, Zero);
3917 CGF.EmitBlock(Cont);
3918 llvm::PHINode *BaseCheck = Builder.CreatePHI(ValidBase->getType(), 2);
3919 BaseCheck->addIncoming(Builder.getTrue(), Orig);
3920 BaseCheck->addIncoming(ValidBase, CheckShiftBase);
3921 Checks.push_back(std::make_pair(
3922 BaseCheck, SanitizeSignedBase ? SanitizerKind::ShiftBase
3923 : SanitizerKind::UnsignedShiftBase));
3924 }
3925
3926 assert(!Checks.empty());
3927 EmitBinOpCheck(Checks, Ops);
3928 }
3929
3930 return Builder.CreateShl(Ops.LHS, RHS, "shl");
3931 }
3932
EmitShr(const BinOpInfo & Ops)3933 Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
3934 // TODO: This misses out on the sanitizer check below.
3935 if (Ops.isFixedPointOp())
3936 return EmitFixedPointBinOp(Ops);
3937
3938 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
3939 // RHS to the same size as the LHS.
3940 Value *RHS = Ops.RHS;
3941 if (Ops.LHS->getType() != RHS->getType())
3942 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
3943
3944 // OpenCL 6.3j: shift values are effectively % word size of LHS.
3945 if (CGF.getLangOpts().OpenCL)
3946 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shr.mask");
3947 else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) &&
3948 isa<llvm::IntegerType>(Ops.LHS->getType())) {
3949 CodeGenFunction::SanitizerScope SanScope(&CGF);
3950 llvm::Value *Valid =
3951 Builder.CreateICmpULE(RHS, GetWidthMinusOneValue(Ops.LHS, RHS));
3952 EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::ShiftExponent), Ops);
3953 }
3954
3955 if (Ops.Ty->hasUnsignedIntegerRepresentation())
3956 return Builder.CreateLShr(Ops.LHS, RHS, "shr");
3957 return Builder.CreateAShr(Ops.LHS, RHS, "shr");
3958 }
3959
3960 enum IntrinsicType { VCMPEQ, VCMPGT };
3961 // return corresponding comparison intrinsic for given vector type
GetIntrinsic(IntrinsicType IT,BuiltinType::Kind ElemKind)3962 static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT,
3963 BuiltinType::Kind ElemKind) {
3964 switch (ElemKind) {
3965 default: llvm_unreachable("unexpected element type");
3966 case BuiltinType::Char_U:
3967 case BuiltinType::UChar:
3968 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
3969 llvm::Intrinsic::ppc_altivec_vcmpgtub_p;
3970 case BuiltinType::Char_S:
3971 case BuiltinType::SChar:
3972 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
3973 llvm::Intrinsic::ppc_altivec_vcmpgtsb_p;
3974 case BuiltinType::UShort:
3975 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
3976 llvm::Intrinsic::ppc_altivec_vcmpgtuh_p;
3977 case BuiltinType::Short:
3978 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
3979 llvm::Intrinsic::ppc_altivec_vcmpgtsh_p;
3980 case BuiltinType::UInt:
3981 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
3982 llvm::Intrinsic::ppc_altivec_vcmpgtuw_p;
3983 case BuiltinType::Int:
3984 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
3985 llvm::Intrinsic::ppc_altivec_vcmpgtsw_p;
3986 case BuiltinType::ULong:
3987 case BuiltinType::ULongLong:
3988 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
3989 llvm::Intrinsic::ppc_altivec_vcmpgtud_p;
3990 case BuiltinType::Long:
3991 case BuiltinType::LongLong:
3992 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
3993 llvm::Intrinsic::ppc_altivec_vcmpgtsd_p;
3994 case BuiltinType::Float:
3995 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p :
3996 llvm::Intrinsic::ppc_altivec_vcmpgtfp_p;
3997 case BuiltinType::Double:
3998 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p :
3999 llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p;
4000 case BuiltinType::UInt128:
4001 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
4002 : llvm::Intrinsic::ppc_altivec_vcmpgtuq_p;
4003 case BuiltinType::Int128:
4004 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
4005 : llvm::Intrinsic::ppc_altivec_vcmpgtsq_p;
4006 }
4007 }
4008
EmitCompare(const BinaryOperator * E,llvm::CmpInst::Predicate UICmpOpc,llvm::CmpInst::Predicate SICmpOpc,llvm::CmpInst::Predicate FCmpOpc,bool IsSignaling)4009 Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,
4010 llvm::CmpInst::Predicate UICmpOpc,
4011 llvm::CmpInst::Predicate SICmpOpc,
4012 llvm::CmpInst::Predicate FCmpOpc,
4013 bool IsSignaling) {
4014 TestAndClearIgnoreResultAssign();
4015 Value *Result;
4016 QualType LHSTy = E->getLHS()->getType();
4017 QualType RHSTy = E->getRHS()->getType();
4018 if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) {
4019 assert(E->getOpcode() == BO_EQ ||
4020 E->getOpcode() == BO_NE);
4021 Value *LHS = CGF.EmitScalarExpr(E->getLHS());
4022 Value *RHS = CGF.EmitScalarExpr(E->getRHS());
4023 Result = CGF.CGM.getCXXABI().EmitMemberPointerComparison(
4024 CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE);
4025 } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) {
4026 BinOpInfo BOInfo = EmitBinOps(E);
4027 Value *LHS = BOInfo.LHS;
4028 Value *RHS = BOInfo.RHS;
4029
4030 // If AltiVec, the comparison results in a numeric type, so we use
4031 // intrinsics comparing vectors and giving 0 or 1 as a result
4032 if (LHSTy->isVectorType() && !E->getType()->isVectorType()) {
4033 // constants for mapping CR6 register bits to predicate result
4034 enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6;
4035
4036 llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic;
4037
4038 // in several cases vector arguments order will be reversed
4039 Value *FirstVecArg = LHS,
4040 *SecondVecArg = RHS;
4041
4042 QualType ElTy = LHSTy->castAs<VectorType>()->getElementType();
4043 BuiltinType::Kind ElementKind = ElTy->castAs<BuiltinType>()->getKind();
4044
4045 switch(E->getOpcode()) {
4046 default: llvm_unreachable("is not a comparison operation");
4047 case BO_EQ:
4048 CR6 = CR6_LT;
4049 ID = GetIntrinsic(VCMPEQ, ElementKind);
4050 break;
4051 case BO_NE:
4052 CR6 = CR6_EQ;
4053 ID = GetIntrinsic(VCMPEQ, ElementKind);
4054 break;
4055 case BO_LT:
4056 CR6 = CR6_LT;
4057 ID = GetIntrinsic(VCMPGT, ElementKind);
4058 std::swap(FirstVecArg, SecondVecArg);
4059 break;
4060 case BO_GT:
4061 CR6 = CR6_LT;
4062 ID = GetIntrinsic(VCMPGT, ElementKind);
4063 break;
4064 case BO_LE:
4065 if (ElementKind == BuiltinType::Float) {
4066 CR6 = CR6_LT;
4067 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
4068 std::swap(FirstVecArg, SecondVecArg);
4069 }
4070 else {
4071 CR6 = CR6_EQ;
4072 ID = GetIntrinsic(VCMPGT, ElementKind);
4073 }
4074 break;
4075 case BO_GE:
4076 if (ElementKind == BuiltinType::Float) {
4077 CR6 = CR6_LT;
4078 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
4079 }
4080 else {
4081 CR6 = CR6_EQ;
4082 ID = GetIntrinsic(VCMPGT, ElementKind);
4083 std::swap(FirstVecArg, SecondVecArg);
4084 }
4085 break;
4086 }
4087
4088 Value *CR6Param = Builder.getInt32(CR6);
4089 llvm::Function *F = CGF.CGM.getIntrinsic(ID);
4090 Result = Builder.CreateCall(F, {CR6Param, FirstVecArg, SecondVecArg});
4091
4092 // The result type of intrinsic may not be same as E->getType().
4093 // If E->getType() is not BoolTy, EmitScalarConversion will do the
4094 // conversion work. If E->getType() is BoolTy, EmitScalarConversion will
4095 // do nothing, if ResultTy is not i1 at the same time, it will cause
4096 // crash later.
4097 llvm::IntegerType *ResultTy = cast<llvm::IntegerType>(Result->getType());
4098 if (ResultTy->getBitWidth() > 1 &&
4099 E->getType() == CGF.getContext().BoolTy)
4100 Result = Builder.CreateTrunc(Result, Builder.getInt1Ty());
4101 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
4102 E->getExprLoc());
4103 }
4104
4105 if (BOInfo.isFixedPointOp()) {
4106 Result = EmitFixedPointBinOp(BOInfo);
4107 } else if (LHS->getType()->isFPOrFPVectorTy()) {
4108 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, BOInfo.FPFeatures);
4109 if (!IsSignaling)
4110 Result = Builder.CreateFCmp(FCmpOpc, LHS, RHS, "cmp");
4111 else
4112 Result = Builder.CreateFCmpS(FCmpOpc, LHS, RHS, "cmp");
4113 } else if (LHSTy->hasSignedIntegerRepresentation()) {
4114 Result = Builder.CreateICmp(SICmpOpc, LHS, RHS, "cmp");
4115 } else {
4116 // Unsigned integers and pointers.
4117
4118 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers &&
4119 !isa<llvm::ConstantPointerNull>(LHS) &&
4120 !isa<llvm::ConstantPointerNull>(RHS)) {
4121
4122 // Dynamic information is required to be stripped for comparisons,
4123 // because it could leak the dynamic information. Based on comparisons
4124 // of pointers to dynamic objects, the optimizer can replace one pointer
4125 // with another, which might be incorrect in presence of invariant
4126 // groups. Comparison with null is safe because null does not carry any
4127 // dynamic information.
4128 if (LHSTy.mayBeDynamicClass())
4129 LHS = Builder.CreateStripInvariantGroup(LHS);
4130 if (RHSTy.mayBeDynamicClass())
4131 RHS = Builder.CreateStripInvariantGroup(RHS);
4132 }
4133
4134 Result = Builder.CreateICmp(UICmpOpc, LHS, RHS, "cmp");
4135 }
4136
4137 // If this is a vector comparison, sign extend the result to the appropriate
4138 // vector integer type and return it (don't convert to bool).
4139 if (LHSTy->isVectorType())
4140 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
4141
4142 } else {
4143 // Complex Comparison: can only be an equality comparison.
4144 CodeGenFunction::ComplexPairTy LHS, RHS;
4145 QualType CETy;
4146 if (auto *CTy = LHSTy->getAs<ComplexType>()) {
4147 LHS = CGF.EmitComplexExpr(E->getLHS());
4148 CETy = CTy->getElementType();
4149 } else {
4150 LHS.first = Visit(E->getLHS());
4151 LHS.second = llvm::Constant::getNullValue(LHS.first->getType());
4152 CETy = LHSTy;
4153 }
4154 if (auto *CTy = RHSTy->getAs<ComplexType>()) {
4155 RHS = CGF.EmitComplexExpr(E->getRHS());
4156 assert(CGF.getContext().hasSameUnqualifiedType(CETy,
4157 CTy->getElementType()) &&
4158 "The element types must always match.");
4159 (void)CTy;
4160 } else {
4161 RHS.first = Visit(E->getRHS());
4162 RHS.second = llvm::Constant::getNullValue(RHS.first->getType());
4163 assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&
4164 "The element types must always match.");
4165 }
4166
4167 Value *ResultR, *ResultI;
4168 if (CETy->isRealFloatingType()) {
4169 // As complex comparisons can only be equality comparisons, they
4170 // are never signaling comparisons.
4171 ResultR = Builder.CreateFCmp(FCmpOpc, LHS.first, RHS.first, "cmp.r");
4172 ResultI = Builder.CreateFCmp(FCmpOpc, LHS.second, RHS.second, "cmp.i");
4173 } else {
4174 // Complex comparisons can only be equality comparisons. As such, signed
4175 // and unsigned opcodes are the same.
4176 ResultR = Builder.CreateICmp(UICmpOpc, LHS.first, RHS.first, "cmp.r");
4177 ResultI = Builder.CreateICmp(UICmpOpc, LHS.second, RHS.second, "cmp.i");
4178 }
4179
4180 if (E->getOpcode() == BO_EQ) {
4181 Result = Builder.CreateAnd(ResultR, ResultI, "and.ri");
4182 } else {
4183 assert(E->getOpcode() == BO_NE &&
4184 "Complex comparison other than == or != ?");
4185 Result = Builder.CreateOr(ResultR, ResultI, "or.ri");
4186 }
4187 }
4188
4189 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
4190 E->getExprLoc());
4191 }
4192
VisitBinAssign(const BinaryOperator * E)4193 Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
4194 bool Ignore = TestAndClearIgnoreResultAssign();
4195
4196 Value *RHS;
4197 LValue LHS;
4198
4199 switch (E->getLHS()->getType().getObjCLifetime()) {
4200 case Qualifiers::OCL_Strong:
4201 std::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore);
4202 break;
4203
4204 case Qualifiers::OCL_Autoreleasing:
4205 std::tie(LHS, RHS) = CGF.EmitARCStoreAutoreleasing(E);
4206 break;
4207
4208 case Qualifiers::OCL_ExplicitNone:
4209 std::tie(LHS, RHS) = CGF.EmitARCStoreUnsafeUnretained(E, Ignore);
4210 break;
4211
4212 case Qualifiers::OCL_Weak:
4213 RHS = Visit(E->getRHS());
4214 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
4215 RHS = CGF.EmitARCStoreWeak(LHS.getAddress(CGF), RHS, Ignore);
4216 break;
4217
4218 case Qualifiers::OCL_None:
4219 // __block variables need to have the rhs evaluated first, plus
4220 // this should improve codegen just a little.
4221 RHS = Visit(E->getRHS());
4222 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
4223
4224 // Store the value into the LHS. Bit-fields are handled specially
4225 // because the result is altered by the store, i.e., [C99 6.5.16p1]
4226 // 'An assignment expression has the value of the left operand after
4227 // the assignment...'.
4228 if (LHS.isBitField()) {
4229 CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS);
4230 } else {
4231 CGF.EmitNullabilityCheck(LHS, RHS, E->getExprLoc());
4232 CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS);
4233 }
4234 }
4235
4236 // If the result is clearly ignored, return now.
4237 if (Ignore)
4238 return nullptr;
4239
4240 // The result of an assignment in C is the assigned r-value.
4241 if (!CGF.getLangOpts().CPlusPlus)
4242 return RHS;
4243
4244 // If the lvalue is non-volatile, return the computed value of the assignment.
4245 if (!LHS.isVolatileQualified())
4246 return RHS;
4247
4248 // Otherwise, reload the value.
4249 return EmitLoadOfLValue(LHS, E->getExprLoc());
4250 }
4251
VisitBinLAnd(const BinaryOperator * E)4252 Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
4253 // Perform vector logical and on comparisons with zero vectors.
4254 if (E->getType()->isVectorType()) {
4255 CGF.incrementProfileCounter(E);
4256
4257 Value *LHS = Visit(E->getLHS());
4258 Value *RHS = Visit(E->getRHS());
4259 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
4260 if (LHS->getType()->isFPOrFPVectorTy()) {
4261 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
4262 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
4263 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
4264 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
4265 } else {
4266 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
4267 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
4268 }
4269 Value *And = Builder.CreateAnd(LHS, RHS);
4270 return Builder.CreateSExt(And, ConvertType(E->getType()), "sext");
4271 }
4272
4273 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
4274 llvm::Type *ResTy = ConvertType(E->getType());
4275
4276 // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
4277 // If we have 1 && X, just emit X without inserting the control flow.
4278 bool LHSCondVal;
4279 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
4280 if (LHSCondVal) { // If we have 1 && X, just emit X.
4281 CGF.incrementProfileCounter(E);
4282
4283 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
4284
4285 // If we're generating for profiling or coverage, generate a branch to a
4286 // block that increments the RHS counter needed to track branch condition
4287 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
4288 // "FalseBlock" after the increment is done.
4289 if (InstrumentRegions &&
4290 CodeGenFunction::isInstrumentedCondition(E->getRHS())) {
4291 llvm::BasicBlock *FBlock = CGF.createBasicBlock("land.end");
4292 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt");
4293 Builder.CreateCondBr(RHSCond, RHSBlockCnt, FBlock);
4294 CGF.EmitBlock(RHSBlockCnt);
4295 CGF.incrementProfileCounter(E->getRHS());
4296 CGF.EmitBranch(FBlock);
4297 CGF.EmitBlock(FBlock);
4298 }
4299
4300 // ZExt result to int or bool.
4301 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext");
4302 }
4303
4304 // 0 && RHS: If it is safe, just elide the RHS, and return 0/false.
4305 if (!CGF.ContainsLabel(E->getRHS()))
4306 return llvm::Constant::getNullValue(ResTy);
4307 }
4308
4309 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end");
4310 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs");
4311
4312 CodeGenFunction::ConditionalEvaluation eval(CGF);
4313
4314 // Branch on the LHS first. If it is false, go to the failure (cont) block.
4315 CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock,
4316 CGF.getProfileCount(E->getRHS()));
4317
4318 // Any edges into the ContBlock are now from an (indeterminate number of)
4319 // edges from this first condition. All of these values will be false. Start
4320 // setting up the PHI node in the Cont Block for this.
4321 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
4322 "", ContBlock);
4323 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
4324 PI != PE; ++PI)
4325 PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI);
4326
4327 eval.begin(CGF);
4328 CGF.EmitBlock(RHSBlock);
4329 CGF.incrementProfileCounter(E);
4330 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
4331 eval.end(CGF);
4332
4333 // Reaquire the RHS block, as there may be subblocks inserted.
4334 RHSBlock = Builder.GetInsertBlock();
4335
4336 // If we're generating for profiling or coverage, generate a branch on the
4337 // RHS to a block that increments the RHS true counter needed to track branch
4338 // condition coverage.
4339 if (InstrumentRegions &&
4340 CodeGenFunction::isInstrumentedCondition(E->getRHS())) {
4341 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt");
4342 Builder.CreateCondBr(RHSCond, RHSBlockCnt, ContBlock);
4343 CGF.EmitBlock(RHSBlockCnt);
4344 CGF.incrementProfileCounter(E->getRHS());
4345 CGF.EmitBranch(ContBlock);
4346 PN->addIncoming(RHSCond, RHSBlockCnt);
4347 }
4348
4349 // Emit an unconditional branch from this block to ContBlock.
4350 {
4351 // There is no need to emit line number for unconditional branch.
4352 auto NL = ApplyDebugLocation::CreateEmpty(CGF);
4353 CGF.EmitBlock(ContBlock);
4354 }
4355 // Insert an entry into the phi node for the edge with the value of RHSCond.
4356 PN->addIncoming(RHSCond, RHSBlock);
4357
4358 // Artificial location to preserve the scope information
4359 {
4360 auto NL = ApplyDebugLocation::CreateArtificial(CGF);
4361 PN->setDebugLoc(Builder.getCurrentDebugLocation());
4362 }
4363
4364 // ZExt result to int.
4365 return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext");
4366 }
4367
VisitBinLOr(const BinaryOperator * E)4368 Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
4369 // Perform vector logical or on comparisons with zero vectors.
4370 if (E->getType()->isVectorType()) {
4371 CGF.incrementProfileCounter(E);
4372
4373 Value *LHS = Visit(E->getLHS());
4374 Value *RHS = Visit(E->getRHS());
4375 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
4376 if (LHS->getType()->isFPOrFPVectorTy()) {
4377 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
4378 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
4379 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
4380 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
4381 } else {
4382 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
4383 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
4384 }
4385 Value *Or = Builder.CreateOr(LHS, RHS);
4386 return Builder.CreateSExt(Or, ConvertType(E->getType()), "sext");
4387 }
4388
4389 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
4390 llvm::Type *ResTy = ConvertType(E->getType());
4391
4392 // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
4393 // If we have 0 || X, just emit X without inserting the control flow.
4394 bool LHSCondVal;
4395 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
4396 if (!LHSCondVal) { // If we have 0 || X, just emit X.
4397 CGF.incrementProfileCounter(E);
4398
4399 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
4400
4401 // If we're generating for profiling or coverage, generate a branch to a
4402 // block that increments the RHS counter need to track branch condition
4403 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
4404 // "FalseBlock" after the increment is done.
4405 if (InstrumentRegions &&
4406 CodeGenFunction::isInstrumentedCondition(E->getRHS())) {
4407 llvm::BasicBlock *FBlock = CGF.createBasicBlock("lor.end");
4408 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt");
4409 Builder.CreateCondBr(RHSCond, FBlock, RHSBlockCnt);
4410 CGF.EmitBlock(RHSBlockCnt);
4411 CGF.incrementProfileCounter(E->getRHS());
4412 CGF.EmitBranch(FBlock);
4413 CGF.EmitBlock(FBlock);
4414 }
4415
4416 // ZExt result to int or bool.
4417 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext");
4418 }
4419
4420 // 1 || RHS: If it is safe, just elide the RHS, and return 1/true.
4421 if (!CGF.ContainsLabel(E->getRHS()))
4422 return llvm::ConstantInt::get(ResTy, 1);
4423 }
4424
4425 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end");
4426 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs");
4427
4428 CodeGenFunction::ConditionalEvaluation eval(CGF);
4429
4430 // Branch on the LHS first. If it is true, go to the success (cont) block.
4431 CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock,
4432 CGF.getCurrentProfileCount() -
4433 CGF.getProfileCount(E->getRHS()));
4434
4435 // Any edges into the ContBlock are now from an (indeterminate number of)
4436 // edges from this first condition. All of these values will be true. Start
4437 // setting up the PHI node in the Cont Block for this.
4438 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
4439 "", ContBlock);
4440 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
4441 PI != PE; ++PI)
4442 PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI);
4443
4444 eval.begin(CGF);
4445
4446 // Emit the RHS condition as a bool value.
4447 CGF.EmitBlock(RHSBlock);
4448 CGF.incrementProfileCounter(E);
4449 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
4450
4451 eval.end(CGF);
4452
4453 // Reaquire the RHS block, as there may be subblocks inserted.
4454 RHSBlock = Builder.GetInsertBlock();
4455
4456 // If we're generating for profiling or coverage, generate a branch on the
4457 // RHS to a block that increments the RHS true counter needed to track branch
4458 // condition coverage.
4459 if (InstrumentRegions &&
4460 CodeGenFunction::isInstrumentedCondition(E->getRHS())) {
4461 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt");
4462 Builder.CreateCondBr(RHSCond, ContBlock, RHSBlockCnt);
4463 CGF.EmitBlock(RHSBlockCnt);
4464 CGF.incrementProfileCounter(E->getRHS());
4465 CGF.EmitBranch(ContBlock);
4466 PN->addIncoming(RHSCond, RHSBlockCnt);
4467 }
4468
4469 // Emit an unconditional branch from this block to ContBlock. Insert an entry
4470 // into the phi node for the edge with the value of RHSCond.
4471 CGF.EmitBlock(ContBlock);
4472 PN->addIncoming(RHSCond, RHSBlock);
4473
4474 // ZExt result to int.
4475 return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext");
4476 }
4477
VisitBinComma(const BinaryOperator * E)4478 Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
4479 CGF.EmitIgnoredExpr(E->getLHS());
4480 CGF.EnsureInsertPoint();
4481 return Visit(E->getRHS());
4482 }
4483
4484 //===----------------------------------------------------------------------===//
4485 // Other Operators
4486 //===----------------------------------------------------------------------===//
4487
4488 /// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
4489 /// expression is cheap enough and side-effect-free enough to evaluate
4490 /// unconditionally instead of conditionally. This is used to convert control
4491 /// flow into selects in some cases.
isCheapEnoughToEvaluateUnconditionally(const Expr * E,CodeGenFunction & CGF)4492 static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E,
4493 CodeGenFunction &CGF) {
4494 // Anything that is an integer or floating point constant is fine.
4495 return E->IgnoreParens()->isEvaluatable(CGF.getContext());
4496
4497 // Even non-volatile automatic variables can't be evaluated unconditionally.
4498 // Referencing a thread_local may cause non-trivial initialization work to
4499 // occur. If we're inside a lambda and one of the variables is from the scope
4500 // outside the lambda, that function may have returned already. Reading its
4501 // locals is a bad idea. Also, these reads may introduce races there didn't
4502 // exist in the source-level program.
4503 }
4504
4505
4506 Value *ScalarExprEmitter::
VisitAbstractConditionalOperator(const AbstractConditionalOperator * E)4507 VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
4508 TestAndClearIgnoreResultAssign();
4509
4510 // Bind the common expression if necessary.
4511 CodeGenFunction::OpaqueValueMapping binding(CGF, E);
4512
4513 Expr *condExpr = E->getCond();
4514 Expr *lhsExpr = E->getTrueExpr();
4515 Expr *rhsExpr = E->getFalseExpr();
4516
4517 // If the condition constant folds and can be elided, try to avoid emitting
4518 // the condition and the dead arm.
4519 bool CondExprBool;
4520 if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
4521 Expr *live = lhsExpr, *dead = rhsExpr;
4522 if (!CondExprBool) std::swap(live, dead);
4523
4524 // If the dead side doesn't have labels we need, just emit the Live part.
4525 if (!CGF.ContainsLabel(dead)) {
4526 if (CondExprBool)
4527 CGF.incrementProfileCounter(E);
4528 Value *Result = Visit(live);
4529
4530 // If the live part is a throw expression, it acts like it has a void
4531 // type, so evaluating it returns a null Value*. However, a conditional
4532 // with non-void type must return a non-null Value*.
4533 if (!Result && !E->getType()->isVoidType())
4534 Result = llvm::UndefValue::get(CGF.ConvertType(E->getType()));
4535
4536 return Result;
4537 }
4538 }
4539
4540 // OpenCL: If the condition is a vector, we can treat this condition like
4541 // the select function.
4542 if ((CGF.getLangOpts().OpenCL && condExpr->getType()->isVectorType()) ||
4543 condExpr->getType()->isExtVectorType()) {
4544 CGF.incrementProfileCounter(E);
4545
4546 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
4547 llvm::Value *LHS = Visit(lhsExpr);
4548 llvm::Value *RHS = Visit(rhsExpr);
4549
4550 llvm::Type *condType = ConvertType(condExpr->getType());
4551 auto *vecTy = cast<llvm::FixedVectorType>(condType);
4552
4553 unsigned numElem = vecTy->getNumElements();
4554 llvm::Type *elemType = vecTy->getElementType();
4555
4556 llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy);
4557 llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec);
4558 llvm::Value *tmp = Builder.CreateSExt(
4559 TestMSB, llvm::FixedVectorType::get(elemType, numElem), "sext");
4560 llvm::Value *tmp2 = Builder.CreateNot(tmp);
4561
4562 // Cast float to int to perform ANDs if necessary.
4563 llvm::Value *RHSTmp = RHS;
4564 llvm::Value *LHSTmp = LHS;
4565 bool wasCast = false;
4566 llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType());
4567 if (rhsVTy->getElementType()->isFloatingPointTy()) {
4568 RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType());
4569 LHSTmp = Builder.CreateBitCast(LHS, tmp->getType());
4570 wasCast = true;
4571 }
4572
4573 llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2);
4574 llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp);
4575 llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond");
4576 if (wasCast)
4577 tmp5 = Builder.CreateBitCast(tmp5, RHS->getType());
4578
4579 return tmp5;
4580 }
4581
4582 if (condExpr->getType()->isVectorType()) {
4583 CGF.incrementProfileCounter(E);
4584
4585 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
4586 llvm::Value *LHS = Visit(lhsExpr);
4587 llvm::Value *RHS = Visit(rhsExpr);
4588
4589 llvm::Type *CondType = ConvertType(condExpr->getType());
4590 auto *VecTy = cast<llvm::VectorType>(CondType);
4591 llvm::Value *ZeroVec = llvm::Constant::getNullValue(VecTy);
4592
4593 CondV = Builder.CreateICmpNE(CondV, ZeroVec, "vector_cond");
4594 return Builder.CreateSelect(CondV, LHS, RHS, "vector_select");
4595 }
4596
4597 // If this is a really simple expression (like x ? 4 : 5), emit this as a
4598 // select instead of as control flow. We can only do this if it is cheap and
4599 // safe to evaluate the LHS and RHS unconditionally.
4600 if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) &&
4601 isCheapEnoughToEvaluateUnconditionally(rhsExpr, CGF)) {
4602 llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr);
4603 llvm::Value *StepV = Builder.CreateZExtOrBitCast(CondV, CGF.Int64Ty);
4604
4605 CGF.incrementProfileCounter(E, StepV);
4606
4607 llvm::Value *LHS = Visit(lhsExpr);
4608 llvm::Value *RHS = Visit(rhsExpr);
4609 if (!LHS) {
4610 // If the conditional has void type, make sure we return a null Value*.
4611 assert(!RHS && "LHS and RHS types must match");
4612 return nullptr;
4613 }
4614 return Builder.CreateSelect(CondV, LHS, RHS, "cond");
4615 }
4616
4617 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
4618 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
4619 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
4620
4621 CodeGenFunction::ConditionalEvaluation eval(CGF);
4622 CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock,
4623 CGF.getProfileCount(lhsExpr));
4624
4625 CGF.EmitBlock(LHSBlock);
4626 CGF.incrementProfileCounter(E);
4627 eval.begin(CGF);
4628 Value *LHS = Visit(lhsExpr);
4629 eval.end(CGF);
4630
4631 LHSBlock = Builder.GetInsertBlock();
4632 Builder.CreateBr(ContBlock);
4633
4634 CGF.EmitBlock(RHSBlock);
4635 eval.begin(CGF);
4636 Value *RHS = Visit(rhsExpr);
4637 eval.end(CGF);
4638
4639 RHSBlock = Builder.GetInsertBlock();
4640 CGF.EmitBlock(ContBlock);
4641
4642 // If the LHS or RHS is a throw expression, it will be legitimately null.
4643 if (!LHS)
4644 return RHS;
4645 if (!RHS)
4646 return LHS;
4647
4648 // Create a PHI node for the real part.
4649 llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), 2, "cond");
4650 PN->addIncoming(LHS, LHSBlock);
4651 PN->addIncoming(RHS, RHSBlock);
4652 return PN;
4653 }
4654
VisitChooseExpr(ChooseExpr * E)4655 Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {
4656 return Visit(E->getChosenSubExpr());
4657 }
4658
VisitVAArgExpr(VAArgExpr * VE)4659 Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
4660 QualType Ty = VE->getType();
4661
4662 if (Ty->isVariablyModifiedType())
4663 CGF.EmitVariablyModifiedType(Ty);
4664
4665 Address ArgValue = Address::invalid();
4666 Address ArgPtr = CGF.EmitVAArg(VE, ArgValue);
4667
4668 llvm::Type *ArgTy = ConvertType(VE->getType());
4669
4670 // If EmitVAArg fails, emit an error.
4671 if (!ArgPtr.isValid()) {
4672 CGF.ErrorUnsupported(VE, "va_arg expression");
4673 return llvm::UndefValue::get(ArgTy);
4674 }
4675
4676 // FIXME Volatility.
4677 llvm::Value *Val = Builder.CreateLoad(ArgPtr);
4678
4679 // If EmitVAArg promoted the type, we must truncate it.
4680 if (ArgTy != Val->getType()) {
4681 if (ArgTy->isPointerTy() && !Val->getType()->isPointerTy())
4682 Val = Builder.CreateIntToPtr(Val, ArgTy);
4683 else
4684 Val = Builder.CreateTrunc(Val, ArgTy);
4685 }
4686
4687 return Val;
4688 }
4689
VisitBlockExpr(const BlockExpr * block)4690 Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
4691 return CGF.EmitBlockLiteral(block);
4692 }
4693
4694 // Convert a vec3 to vec4, or vice versa.
ConvertVec3AndVec4(CGBuilderTy & Builder,CodeGenFunction & CGF,Value * Src,unsigned NumElementsDst)4695 static Value *ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF,
4696 Value *Src, unsigned NumElementsDst) {
4697 static constexpr int Mask[] = {0, 1, 2, -1};
4698 return Builder.CreateShuffleVector(Src,
4699 llvm::makeArrayRef(Mask, NumElementsDst));
4700 }
4701
4702 // Create cast instructions for converting LLVM value \p Src to LLVM type \p
4703 // DstTy. \p Src has the same size as \p DstTy. Both are single value types
4704 // but could be scalar or vectors of different lengths, and either can be
4705 // pointer.
4706 // There are 4 cases:
4707 // 1. non-pointer -> non-pointer : needs 1 bitcast
4708 // 2. pointer -> pointer : needs 1 bitcast or addrspacecast
4709 // 3. pointer -> non-pointer
4710 // a) pointer -> intptr_t : needs 1 ptrtoint
4711 // b) pointer -> non-intptr_t : needs 1 ptrtoint then 1 bitcast
4712 // 4. non-pointer -> pointer
4713 // a) intptr_t -> pointer : needs 1 inttoptr
4714 // b) non-intptr_t -> pointer : needs 1 bitcast then 1 inttoptr
4715 // Note: for cases 3b and 4b two casts are required since LLVM casts do not
4716 // allow casting directly between pointer types and non-integer non-pointer
4717 // types.
createCastsForTypeOfSameSize(CGBuilderTy & Builder,const llvm::DataLayout & DL,Value * Src,llvm::Type * DstTy,StringRef Name="")4718 static Value *createCastsForTypeOfSameSize(CGBuilderTy &Builder,
4719 const llvm::DataLayout &DL,
4720 Value *Src, llvm::Type *DstTy,
4721 StringRef Name = "") {
4722 auto SrcTy = Src->getType();
4723
4724 // Case 1.
4725 if (!SrcTy->isPointerTy() && !DstTy->isPointerTy())
4726 return Builder.CreateBitCast(Src, DstTy, Name);
4727
4728 // Case 2.
4729 if (SrcTy->isPointerTy() && DstTy->isPointerTy())
4730 return Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy, Name);
4731
4732 // Case 3.
4733 if (SrcTy->isPointerTy() && !DstTy->isPointerTy()) {
4734 // Case 3b.
4735 if (!DstTy->isIntegerTy())
4736 Src = Builder.CreatePtrToInt(Src, DL.getIntPtrType(SrcTy));
4737 // Cases 3a and 3b.
4738 return Builder.CreateBitOrPointerCast(Src, DstTy, Name);
4739 }
4740
4741 // Case 4b.
4742 if (!SrcTy->isIntegerTy())
4743 Src = Builder.CreateBitCast(Src, DL.getIntPtrType(DstTy));
4744 // Cases 4a and 4b.
4745 return Builder.CreateIntToPtr(Src, DstTy, Name);
4746 }
4747
VisitAsTypeExpr(AsTypeExpr * E)4748 Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
4749 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
4750 llvm::Type *DstTy = ConvertType(E->getType());
4751
4752 llvm::Type *SrcTy = Src->getType();
4753 unsigned NumElementsSrc =
4754 isa<llvm::VectorType>(SrcTy)
4755 ? cast<llvm::FixedVectorType>(SrcTy)->getNumElements()
4756 : 0;
4757 unsigned NumElementsDst =
4758 isa<llvm::VectorType>(DstTy)
4759 ? cast<llvm::FixedVectorType>(DstTy)->getNumElements()
4760 : 0;
4761
4762 // Going from vec3 to non-vec3 is a special case and requires a shuffle
4763 // vector to get a vec4, then a bitcast if the target type is different.
4764 if (NumElementsSrc == 3 && NumElementsDst != 3) {
4765 Src = ConvertVec3AndVec4(Builder, CGF, Src, 4);
4766
4767 if (!CGF.CGM.getCodeGenOpts().PreserveVec3Type) {
4768 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
4769 DstTy);
4770 }
4771
4772 Src->setName("astype");
4773 return Src;
4774 }
4775
4776 // Going from non-vec3 to vec3 is a special case and requires a bitcast
4777 // to vec4 if the original type is not vec4, then a shuffle vector to
4778 // get a vec3.
4779 if (NumElementsSrc != 3 && NumElementsDst == 3) {
4780 if (!CGF.CGM.getCodeGenOpts().PreserveVec3Type) {
4781 auto *Vec4Ty = llvm::FixedVectorType::get(
4782 cast<llvm::VectorType>(DstTy)->getElementType(), 4);
4783 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
4784 Vec4Ty);
4785 }
4786
4787 Src = ConvertVec3AndVec4(Builder, CGF, Src, 3);
4788 Src->setName("astype");
4789 return Src;
4790 }
4791
4792 return createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(),
4793 Src, DstTy, "astype");
4794 }
4795
VisitAtomicExpr(AtomicExpr * E)4796 Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) {
4797 return CGF.EmitAtomicExpr(E).getScalarVal();
4798 }
4799
4800 //===----------------------------------------------------------------------===//
4801 // Entry Point into this File
4802 //===----------------------------------------------------------------------===//
4803
4804 /// Emit the computation of the specified expression of scalar type, ignoring
4805 /// the result.
EmitScalarExpr(const Expr * E,bool IgnoreResultAssign)4806 Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
4807 assert(E && hasScalarEvaluationKind(E->getType()) &&
4808 "Invalid scalar expression to emit");
4809
4810 return ScalarExprEmitter(*this, IgnoreResultAssign)
4811 .Visit(const_cast<Expr *>(E));
4812 }
4813
4814 /// Emit a conversion from the specified type to the specified destination type,
4815 /// both of which are LLVM scalar types.
EmitScalarConversion(Value * Src,QualType SrcTy,QualType DstTy,SourceLocation Loc)4816 Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy,
4817 QualType DstTy,
4818 SourceLocation Loc) {
4819 assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) &&
4820 "Invalid scalar expression to emit");
4821 return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy, Loc);
4822 }
4823
4824 /// Emit a conversion from the specified complex type to the specified
4825 /// destination type, where the destination type is an LLVM scalar type.
EmitComplexToScalarConversion(ComplexPairTy Src,QualType SrcTy,QualType DstTy,SourceLocation Loc)4826 Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src,
4827 QualType SrcTy,
4828 QualType DstTy,
4829 SourceLocation Loc) {
4830 assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) &&
4831 "Invalid complex -> scalar conversion");
4832 return ScalarExprEmitter(*this)
4833 .EmitComplexToScalarConversion(Src, SrcTy, DstTy, Loc);
4834 }
4835
4836
4837 llvm::Value *CodeGenFunction::
EmitScalarPrePostIncDec(const UnaryOperator * E,LValue LV,bool isInc,bool isPre)4838 EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
4839 bool isInc, bool isPre) {
4840 return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre);
4841 }
4842
EmitObjCIsaExpr(const ObjCIsaExpr * E)4843 LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
4844 // object->isa or (*object).isa
4845 // Generate code as for: *(Class*)object
4846
4847 Expr *BaseExpr = E->getBase();
4848 Address Addr = Address::invalid();
4849 if (BaseExpr->isPRValue()) {
4850 Addr = Address(EmitScalarExpr(BaseExpr), getPointerAlign());
4851 } else {
4852 Addr = EmitLValue(BaseExpr).getAddress(*this);
4853 }
4854
4855 // Cast the address to Class*.
4856 Addr = Builder.CreateElementBitCast(Addr, ConvertType(E->getType()));
4857 return MakeAddrLValue(Addr, E->getType());
4858 }
4859
4860
EmitCompoundAssignmentLValue(const CompoundAssignOperator * E)4861 LValue CodeGenFunction::EmitCompoundAssignmentLValue(
4862 const CompoundAssignOperator *E) {
4863 ScalarExprEmitter Scalar(*this);
4864 Value *Result = nullptr;
4865 switch (E->getOpcode()) {
4866 #define COMPOUND_OP(Op) \
4867 case BO_##Op##Assign: \
4868 return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \
4869 Result)
4870 COMPOUND_OP(Mul);
4871 COMPOUND_OP(Div);
4872 COMPOUND_OP(Rem);
4873 COMPOUND_OP(Add);
4874 COMPOUND_OP(Sub);
4875 COMPOUND_OP(Shl);
4876 COMPOUND_OP(Shr);
4877 COMPOUND_OP(And);
4878 COMPOUND_OP(Xor);
4879 COMPOUND_OP(Or);
4880 #undef COMPOUND_OP
4881
4882 case BO_PtrMemD:
4883 case BO_PtrMemI:
4884 case BO_Mul:
4885 case BO_Div:
4886 case BO_Rem:
4887 case BO_Add:
4888 case BO_Sub:
4889 case BO_Shl:
4890 case BO_Shr:
4891 case BO_LT:
4892 case BO_GT:
4893 case BO_LE:
4894 case BO_GE:
4895 case BO_EQ:
4896 case BO_NE:
4897 case BO_Cmp:
4898 case BO_And:
4899 case BO_Xor:
4900 case BO_Or:
4901 case BO_LAnd:
4902 case BO_LOr:
4903 case BO_Assign:
4904 case BO_Comma:
4905 llvm_unreachable("Not valid compound assignment operators");
4906 }
4907
4908 llvm_unreachable("Unhandled compound assignment operator");
4909 }
4910
4911 struct GEPOffsetAndOverflow {
4912 // The total (signed) byte offset for the GEP.
4913 llvm::Value *TotalOffset;
4914 // The offset overflow flag - true if the total offset overflows.
4915 llvm::Value *OffsetOverflows;
4916 };
4917
4918 /// Evaluate given GEPVal, which is either an inbounds GEP, or a constant,
4919 /// and compute the total offset it applies from it's base pointer BasePtr.
4920 /// Returns offset in bytes and a boolean flag whether an overflow happened
4921 /// during evaluation.
EmitGEPOffsetInBytes(Value * BasePtr,Value * GEPVal,llvm::LLVMContext & VMContext,CodeGenModule & CGM,CGBuilderTy & Builder)4922 static GEPOffsetAndOverflow EmitGEPOffsetInBytes(Value *BasePtr, Value *GEPVal,
4923 llvm::LLVMContext &VMContext,
4924 CodeGenModule &CGM,
4925 CGBuilderTy &Builder) {
4926 const auto &DL = CGM.getDataLayout();
4927
4928 // The total (signed) byte offset for the GEP.
4929 llvm::Value *TotalOffset = nullptr;
4930
4931 // Was the GEP already reduced to a constant?
4932 if (isa<llvm::Constant>(GEPVal)) {
4933 // Compute the offset by casting both pointers to integers and subtracting:
4934 // GEPVal = BasePtr + ptr(Offset) <--> Offset = int(GEPVal) - int(BasePtr)
4935 Value *BasePtr_int =
4936 Builder.CreatePtrToInt(BasePtr, DL.getIntPtrType(BasePtr->getType()));
4937 Value *GEPVal_int =
4938 Builder.CreatePtrToInt(GEPVal, DL.getIntPtrType(GEPVal->getType()));
4939 TotalOffset = Builder.CreateSub(GEPVal_int, BasePtr_int);
4940 return {TotalOffset, /*OffsetOverflows=*/Builder.getFalse()};
4941 }
4942
4943 auto *GEP = cast<llvm::GEPOperator>(GEPVal);
4944 assert(GEP->getPointerOperand() == BasePtr &&
4945 "BasePtr must be the the base of the GEP.");
4946 assert(GEP->isInBounds() && "Expected inbounds GEP");
4947
4948 auto *IntPtrTy = DL.getIntPtrType(GEP->getPointerOperandType());
4949
4950 // Grab references to the signed add/mul overflow intrinsics for intptr_t.
4951 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
4952 auto *SAddIntrinsic =
4953 CGM.getIntrinsic(llvm::Intrinsic::sadd_with_overflow, IntPtrTy);
4954 auto *SMulIntrinsic =
4955 CGM.getIntrinsic(llvm::Intrinsic::smul_with_overflow, IntPtrTy);
4956
4957 // The offset overflow flag - true if the total offset overflows.
4958 llvm::Value *OffsetOverflows = Builder.getFalse();
4959
4960 /// Return the result of the given binary operation.
4961 auto eval = [&](BinaryOperator::Opcode Opcode, llvm::Value *LHS,
4962 llvm::Value *RHS) -> llvm::Value * {
4963 assert((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop");
4964
4965 // If the operands are constants, return a constant result.
4966 if (auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS)) {
4967 if (auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS)) {
4968 llvm::APInt N;
4969 bool HasOverflow = mayHaveIntegerOverflow(LHSCI, RHSCI, Opcode,
4970 /*Signed=*/true, N);
4971 if (HasOverflow)
4972 OffsetOverflows = Builder.getTrue();
4973 return llvm::ConstantInt::get(VMContext, N);
4974 }
4975 }
4976
4977 // Otherwise, compute the result with checked arithmetic.
4978 auto *ResultAndOverflow = Builder.CreateCall(
4979 (Opcode == BO_Add) ? SAddIntrinsic : SMulIntrinsic, {LHS, RHS});
4980 OffsetOverflows = Builder.CreateOr(
4981 Builder.CreateExtractValue(ResultAndOverflow, 1), OffsetOverflows);
4982 return Builder.CreateExtractValue(ResultAndOverflow, 0);
4983 };
4984
4985 // Determine the total byte offset by looking at each GEP operand.
4986 for (auto GTI = llvm::gep_type_begin(GEP), GTE = llvm::gep_type_end(GEP);
4987 GTI != GTE; ++GTI) {
4988 llvm::Value *LocalOffset;
4989 auto *Index = GTI.getOperand();
4990 // Compute the local offset contributed by this indexing step:
4991 if (auto *STy = GTI.getStructTypeOrNull()) {
4992 // For struct indexing, the local offset is the byte position of the
4993 // specified field.
4994 unsigned FieldNo = cast<llvm::ConstantInt>(Index)->getZExtValue();
4995 LocalOffset = llvm::ConstantInt::get(
4996 IntPtrTy, DL.getStructLayout(STy)->getElementOffset(FieldNo));
4997 } else {
4998 // Otherwise this is array-like indexing. The local offset is the index
4999 // multiplied by the element size.
5000 auto *ElementSize = llvm::ConstantInt::get(
5001 IntPtrTy, DL.getTypeAllocSize(GTI.getIndexedType()));
5002 auto *IndexS = Builder.CreateIntCast(Index, IntPtrTy, /*isSigned=*/true);
5003 LocalOffset = eval(BO_Mul, ElementSize, IndexS);
5004 }
5005
5006 // If this is the first offset, set it as the total offset. Otherwise, add
5007 // the local offset into the running total.
5008 if (!TotalOffset || TotalOffset == Zero)
5009 TotalOffset = LocalOffset;
5010 else
5011 TotalOffset = eval(BO_Add, TotalOffset, LocalOffset);
5012 }
5013
5014 return {TotalOffset, OffsetOverflows};
5015 }
5016
5017 Value *
EmitCheckedInBoundsGEP(Value * Ptr,ArrayRef<Value * > IdxList,bool SignedIndices,bool IsSubtraction,SourceLocation Loc,const Twine & Name)5018 CodeGenFunction::EmitCheckedInBoundsGEP(Value *Ptr, ArrayRef<Value *> IdxList,
5019 bool SignedIndices, bool IsSubtraction,
5020 SourceLocation Loc, const Twine &Name) {
5021 llvm::Type *PtrTy = Ptr->getType();
5022 Value *GEPVal = Builder.CreateInBoundsGEP(
5023 PtrTy->getPointerElementType(), Ptr, IdxList, Name);
5024
5025 // If the pointer overflow sanitizer isn't enabled, do nothing.
5026 if (!SanOpts.has(SanitizerKind::PointerOverflow))
5027 return GEPVal;
5028
5029 // Perform nullptr-and-offset check unless the nullptr is defined.
5030 bool PerformNullCheck = !NullPointerIsDefined(
5031 Builder.GetInsertBlock()->getParent(), PtrTy->getPointerAddressSpace());
5032 // Check for overflows unless the GEP got constant-folded,
5033 // and only in the default address space
5034 bool PerformOverflowCheck =
5035 !isa<llvm::Constant>(GEPVal) && PtrTy->getPointerAddressSpace() == 0;
5036
5037 if (!(PerformNullCheck || PerformOverflowCheck))
5038 return GEPVal;
5039
5040 const auto &DL = CGM.getDataLayout();
5041
5042 SanitizerScope SanScope(this);
5043 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy);
5044
5045 GEPOffsetAndOverflow EvaluatedGEP =
5046 EmitGEPOffsetInBytes(Ptr, GEPVal, getLLVMContext(), CGM, Builder);
5047
5048 assert((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) ||
5049 EvaluatedGEP.OffsetOverflows == Builder.getFalse()) &&
5050 "If the offset got constant-folded, we don't expect that there was an "
5051 "overflow.");
5052
5053 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
5054
5055 // Common case: if the total offset is zero, and we are using C++ semantics,
5056 // where nullptr+0 is defined, don't emit a check.
5057 if (EvaluatedGEP.TotalOffset == Zero && CGM.getLangOpts().CPlusPlus)
5058 return GEPVal;
5059
5060 // Now that we've computed the total offset, add it to the base pointer (with
5061 // wrapping semantics).
5062 auto *IntPtr = Builder.CreatePtrToInt(Ptr, IntPtrTy);
5063 auto *ComputedGEP = Builder.CreateAdd(IntPtr, EvaluatedGEP.TotalOffset);
5064
5065 llvm::SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
5066
5067 if (PerformNullCheck) {
5068 // In C++, if the base pointer evaluates to a null pointer value,
5069 // the only valid pointer this inbounds GEP can produce is also
5070 // a null pointer, so the offset must also evaluate to zero.
5071 // Likewise, if we have non-zero base pointer, we can not get null pointer
5072 // as a result, so the offset can not be -intptr_t(BasePtr).
5073 // In other words, both pointers are either null, or both are non-null,
5074 // or the behaviour is undefined.
5075 //
5076 // C, however, is more strict in this regard, and gives more
5077 // optimization opportunities: in C, additionally, nullptr+0 is undefined.
5078 // So both the input to the 'gep inbounds' AND the output must not be null.
5079 auto *BaseIsNotNullptr = Builder.CreateIsNotNull(Ptr);
5080 auto *ResultIsNotNullptr = Builder.CreateIsNotNull(ComputedGEP);
5081 auto *Valid =
5082 CGM.getLangOpts().CPlusPlus
5083 ? Builder.CreateICmpEQ(BaseIsNotNullptr, ResultIsNotNullptr)
5084 : Builder.CreateAnd(BaseIsNotNullptr, ResultIsNotNullptr);
5085 Checks.emplace_back(Valid, SanitizerKind::PointerOverflow);
5086 }
5087
5088 if (PerformOverflowCheck) {
5089 // The GEP is valid if:
5090 // 1) The total offset doesn't overflow, and
5091 // 2) The sign of the difference between the computed address and the base
5092 // pointer matches the sign of the total offset.
5093 llvm::Value *ValidGEP;
5094 auto *NoOffsetOverflow = Builder.CreateNot(EvaluatedGEP.OffsetOverflows);
5095 if (SignedIndices) {
5096 // GEP is computed as `unsigned base + signed offset`, therefore:
5097 // * If offset was positive, then the computed pointer can not be
5098 // [unsigned] less than the base pointer, unless it overflowed.
5099 // * If offset was negative, then the computed pointer can not be
5100 // [unsigned] greater than the bas pointere, unless it overflowed.
5101 auto *PosOrZeroValid = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
5102 auto *PosOrZeroOffset =
5103 Builder.CreateICmpSGE(EvaluatedGEP.TotalOffset, Zero);
5104 llvm::Value *NegValid = Builder.CreateICmpULT(ComputedGEP, IntPtr);
5105 ValidGEP =
5106 Builder.CreateSelect(PosOrZeroOffset, PosOrZeroValid, NegValid);
5107 } else if (!IsSubtraction) {
5108 // GEP is computed as `unsigned base + unsigned offset`, therefore the
5109 // computed pointer can not be [unsigned] less than base pointer,
5110 // unless there was an overflow.
5111 // Equivalent to `@llvm.uadd.with.overflow(%base, %offset)`.
5112 ValidGEP = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
5113 } else {
5114 // GEP is computed as `unsigned base - unsigned offset`, therefore the
5115 // computed pointer can not be [unsigned] greater than base pointer,
5116 // unless there was an overflow.
5117 // Equivalent to `@llvm.usub.with.overflow(%base, sub(0, %offset))`.
5118 ValidGEP = Builder.CreateICmpULE(ComputedGEP, IntPtr);
5119 }
5120 ValidGEP = Builder.CreateAnd(ValidGEP, NoOffsetOverflow);
5121 Checks.emplace_back(ValidGEP, SanitizerKind::PointerOverflow);
5122 }
5123
5124 assert(!Checks.empty() && "Should have produced some checks.");
5125
5126 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc)};
5127 // Pass the computed GEP to the runtime to avoid emitting poisoned arguments.
5128 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
5129 EmitCheck(Checks, SanitizerHandler::PointerOverflow, StaticArgs, DynamicArgs);
5130
5131 return GEPVal;
5132 }
5133