1 //===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "CGCXXABI.h"
14 #include "CGCleanup.h"
15 #include "CGDebugInfo.h"
16 #include "CGObjCRuntime.h"
17 #include "CGOpenMPRuntime.h"
18 #include "CodeGenFunction.h"
19 #include "CodeGenModule.h"
20 #include "ConstantEmitter.h"
21 #include "TargetInfo.h"
22 #include "clang/AST/ASTContext.h"
23 #include "clang/AST/Attr.h"
24 #include "clang/AST/DeclObjC.h"
25 #include "clang/AST/Expr.h"
26 #include "clang/AST/RecordLayout.h"
27 #include "clang/AST/StmtVisitor.h"
28 #include "clang/Basic/CodeGenOptions.h"
29 #include "clang/Basic/TargetInfo.h"
30 #include "llvm/ADT/APFixedPoint.h"
31 #include "llvm/ADT/Optional.h"
32 #include "llvm/IR/CFG.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/FixedPointBuilder.h"
36 #include "llvm/IR/Function.h"
37 #include "llvm/IR/GetElementPtrTypeIterator.h"
38 #include "llvm/IR/GlobalVariable.h"
39 #include "llvm/IR/Intrinsics.h"
40 #include "llvm/IR/IntrinsicsPowerPC.h"
41 #include "llvm/IR/MatrixBuilder.h"
42 #include "llvm/IR/Module.h"
43 #include <cstdarg>
44
45 using namespace clang;
46 using namespace CodeGen;
47 using llvm::Value;
48
49 //===----------------------------------------------------------------------===//
50 // Scalar Expression Emitter
51 //===----------------------------------------------------------------------===//
52
53 namespace {
54
55 /// Determine whether the given binary operation may overflow.
56 /// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul,
57 /// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem},
58 /// the returned overflow check is precise. The returned value is 'true' for
59 /// all other opcodes, to be conservative.
mayHaveIntegerOverflow(llvm::ConstantInt * LHS,llvm::ConstantInt * RHS,BinaryOperator::Opcode Opcode,bool Signed,llvm::APInt & Result)60 bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS,
61 BinaryOperator::Opcode Opcode, bool Signed,
62 llvm::APInt &Result) {
63 // Assume overflow is possible, unless we can prove otherwise.
64 bool Overflow = true;
65 const auto &LHSAP = LHS->getValue();
66 const auto &RHSAP = RHS->getValue();
67 if (Opcode == BO_Add) {
68 if (Signed)
69 Result = LHSAP.sadd_ov(RHSAP, Overflow);
70 else
71 Result = LHSAP.uadd_ov(RHSAP, Overflow);
72 } else if (Opcode == BO_Sub) {
73 if (Signed)
74 Result = LHSAP.ssub_ov(RHSAP, Overflow);
75 else
76 Result = LHSAP.usub_ov(RHSAP, Overflow);
77 } else if (Opcode == BO_Mul) {
78 if (Signed)
79 Result = LHSAP.smul_ov(RHSAP, Overflow);
80 else
81 Result = LHSAP.umul_ov(RHSAP, Overflow);
82 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
83 if (Signed && !RHS->isZero())
84 Result = LHSAP.sdiv_ov(RHSAP, Overflow);
85 else
86 return false;
87 }
88 return Overflow;
89 }
90
91 struct BinOpInfo {
92 Value *LHS;
93 Value *RHS;
94 QualType Ty; // Computation Type.
95 BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
96 FPOptions FPFeatures;
97 const Expr *E; // Entire expr, for error unsupported. May not be binop.
98
99 /// Check if the binop can result in integer overflow.
mayHaveIntegerOverflow__anon5f1a3eb50111::BinOpInfo100 bool mayHaveIntegerOverflow() const {
101 // Without constant input, we can't rule out overflow.
102 auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS);
103 auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS);
104 if (!LHSCI || !RHSCI)
105 return true;
106
107 llvm::APInt Result;
108 return ::mayHaveIntegerOverflow(
109 LHSCI, RHSCI, Opcode, Ty->hasSignedIntegerRepresentation(), Result);
110 }
111
112 /// Check if the binop computes a division or a remainder.
isDivremOp__anon5f1a3eb50111::BinOpInfo113 bool isDivremOp() const {
114 return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign ||
115 Opcode == BO_RemAssign;
116 }
117
118 /// Check if the binop can result in an integer division by zero.
mayHaveIntegerDivisionByZero__anon5f1a3eb50111::BinOpInfo119 bool mayHaveIntegerDivisionByZero() const {
120 if (isDivremOp())
121 if (auto *CI = dyn_cast<llvm::ConstantInt>(RHS))
122 return CI->isZero();
123 return true;
124 }
125
126 /// Check if the binop can result in a float division by zero.
mayHaveFloatDivisionByZero__anon5f1a3eb50111::BinOpInfo127 bool mayHaveFloatDivisionByZero() const {
128 if (isDivremOp())
129 if (auto *CFP = dyn_cast<llvm::ConstantFP>(RHS))
130 return CFP->isZero();
131 return true;
132 }
133
134 /// Check if at least one operand is a fixed point type. In such cases, this
135 /// operation did not follow usual arithmetic conversion and both operands
136 /// might not be of the same type.
isFixedPointOp__anon5f1a3eb50111::BinOpInfo137 bool isFixedPointOp() const {
138 // We cannot simply check the result type since comparison operations return
139 // an int.
140 if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) {
141 QualType LHSType = BinOp->getLHS()->getType();
142 QualType RHSType = BinOp->getRHS()->getType();
143 return LHSType->isFixedPointType() || RHSType->isFixedPointType();
144 }
145 if (const auto *UnOp = dyn_cast<UnaryOperator>(E))
146 return UnOp->getSubExpr()->getType()->isFixedPointType();
147 return false;
148 }
149 };
150
MustVisitNullValue(const Expr * E)151 static bool MustVisitNullValue(const Expr *E) {
152 // If a null pointer expression's type is the C++0x nullptr_t, then
153 // it's not necessarily a simple constant and it must be evaluated
154 // for its potential side effects.
155 return E->getType()->isNullPtrType();
156 }
157
158 /// If \p E is a widened promoted integer, get its base (unpromoted) type.
getUnwidenedIntegerType(const ASTContext & Ctx,const Expr * E)159 static llvm::Optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx,
160 const Expr *E) {
161 const Expr *Base = E->IgnoreImpCasts();
162 if (E == Base)
163 return llvm::None;
164
165 QualType BaseTy = Base->getType();
166 if (!BaseTy->isPromotableIntegerType() ||
167 Ctx.getTypeSize(BaseTy) >= Ctx.getTypeSize(E->getType()))
168 return llvm::None;
169
170 return BaseTy;
171 }
172
173 /// Check if \p E is a widened promoted integer.
IsWidenedIntegerOp(const ASTContext & Ctx,const Expr * E)174 static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) {
175 return getUnwidenedIntegerType(Ctx, E).hasValue();
176 }
177
178 /// Check if we can skip the overflow check for \p Op.
CanElideOverflowCheck(const ASTContext & Ctx,const BinOpInfo & Op)179 static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) {
180 assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) &&
181 "Expected a unary or binary operator");
182
183 // If the binop has constant inputs and we can prove there is no overflow,
184 // we can elide the overflow check.
185 if (!Op.mayHaveIntegerOverflow())
186 return true;
187
188 // If a unary op has a widened operand, the op cannot overflow.
189 if (const auto *UO = dyn_cast<UnaryOperator>(Op.E))
190 return !UO->canOverflow();
191
192 // We usually don't need overflow checks for binops with widened operands.
193 // Multiplication with promoted unsigned operands is a special case.
194 const auto *BO = cast<BinaryOperator>(Op.E);
195 auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS());
196 if (!OptionalLHSTy)
197 return false;
198
199 auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, BO->getRHS());
200 if (!OptionalRHSTy)
201 return false;
202
203 QualType LHSTy = *OptionalLHSTy;
204 QualType RHSTy = *OptionalRHSTy;
205
206 // This is the simple case: binops without unsigned multiplication, and with
207 // widened operands. No overflow check is needed here.
208 if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) ||
209 !LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType())
210 return true;
211
212 // For unsigned multiplication the overflow check can be elided if either one
213 // of the unpromoted types are less than half the size of the promoted type.
214 unsigned PromotedSize = Ctx.getTypeSize(Op.E->getType());
215 return (2 * Ctx.getTypeSize(LHSTy)) < PromotedSize ||
216 (2 * Ctx.getTypeSize(RHSTy)) < PromotedSize;
217 }
218
219 class ScalarExprEmitter
220 : public StmtVisitor<ScalarExprEmitter, Value*> {
221 CodeGenFunction &CGF;
222 CGBuilderTy &Builder;
223 bool IgnoreResultAssign;
224 llvm::LLVMContext &VMContext;
225 public:
226
ScalarExprEmitter(CodeGenFunction & cgf,bool ira=false)227 ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
228 : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
229 VMContext(cgf.getLLVMContext()) {
230 }
231
232 //===--------------------------------------------------------------------===//
233 // Utilities
234 //===--------------------------------------------------------------------===//
235
TestAndClearIgnoreResultAssign()236 bool TestAndClearIgnoreResultAssign() {
237 bool I = IgnoreResultAssign;
238 IgnoreResultAssign = false;
239 return I;
240 }
241
ConvertType(QualType T)242 llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
EmitLValue(const Expr * E)243 LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
EmitCheckedLValue(const Expr * E,CodeGenFunction::TypeCheckKind TCK)244 LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) {
245 return CGF.EmitCheckedLValue(E, TCK);
246 }
247
248 void EmitBinOpCheck(ArrayRef<std::pair<Value *, SanitizerMask>> Checks,
249 const BinOpInfo &Info);
250
EmitLoadOfLValue(LValue LV,SourceLocation Loc)251 Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
252 return CGF.EmitLoadOfLValue(LV, Loc).getScalarVal();
253 }
254
EmitLValueAlignmentAssumption(const Expr * E,Value * V)255 void EmitLValueAlignmentAssumption(const Expr *E, Value *V) {
256 const AlignValueAttr *AVAttr = nullptr;
257 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
258 const ValueDecl *VD = DRE->getDecl();
259
260 if (VD->getType()->isReferenceType()) {
261 if (const auto *TTy =
262 dyn_cast<TypedefType>(VD->getType().getNonReferenceType()))
263 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
264 } else {
265 // Assumptions for function parameters are emitted at the start of the
266 // function, so there is no need to repeat that here,
267 // unless the alignment-assumption sanitizer is enabled,
268 // then we prefer the assumption over alignment attribute
269 // on IR function param.
270 if (isa<ParmVarDecl>(VD) && !CGF.SanOpts.has(SanitizerKind::Alignment))
271 return;
272
273 AVAttr = VD->getAttr<AlignValueAttr>();
274 }
275 }
276
277 if (!AVAttr)
278 if (const auto *TTy =
279 dyn_cast<TypedefType>(E->getType()))
280 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
281
282 if (!AVAttr)
283 return;
284
285 Value *AlignmentValue = CGF.EmitScalarExpr(AVAttr->getAlignment());
286 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(AlignmentValue);
287 CGF.emitAlignmentAssumption(V, E, AVAttr->getLocation(), AlignmentCI);
288 }
289
290 /// EmitLoadOfLValue - Given an expression with complex type that represents a
291 /// value l-value, this method emits the address of the l-value, then loads
292 /// and returns the result.
EmitLoadOfLValue(const Expr * E)293 Value *EmitLoadOfLValue(const Expr *E) {
294 Value *V = EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load),
295 E->getExprLoc());
296
297 EmitLValueAlignmentAssumption(E, V);
298 return V;
299 }
300
301 /// EmitConversionToBool - Convert the specified expression value to a
302 /// boolean (i1) truth value. This is equivalent to "Val != 0".
303 Value *EmitConversionToBool(Value *Src, QualType DstTy);
304
305 /// Emit a check that a conversion from a floating-point type does not
306 /// overflow.
307 void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType,
308 Value *Src, QualType SrcType, QualType DstType,
309 llvm::Type *DstTy, SourceLocation Loc);
310
311 /// Known implicit conversion check kinds.
312 /// Keep in sync with the enum of the same name in ubsan_handlers.h
313 enum ImplicitConversionCheckKind : unsigned char {
314 ICCK_IntegerTruncation = 0, // Legacy, was only used by clang 7.
315 ICCK_UnsignedIntegerTruncation = 1,
316 ICCK_SignedIntegerTruncation = 2,
317 ICCK_IntegerSignChange = 3,
318 ICCK_SignedIntegerTruncationOrSignChange = 4,
319 };
320
321 /// Emit a check that an [implicit] truncation of an integer does not
322 /// discard any bits. It is not UB, so we use the value after truncation.
323 void EmitIntegerTruncationCheck(Value *Src, QualType SrcType, Value *Dst,
324 QualType DstType, SourceLocation Loc);
325
326 /// Emit a check that an [implicit] conversion of an integer does not change
327 /// the sign of the value. It is not UB, so we use the value after conversion.
328 /// NOTE: Src and Dst may be the exact same value! (point to the same thing)
329 void EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, Value *Dst,
330 QualType DstType, SourceLocation Loc);
331
332 /// Emit a conversion from the specified type to the specified destination
333 /// type, both of which are LLVM scalar types.
334 struct ScalarConversionOpts {
335 bool TreatBooleanAsSigned;
336 bool EmitImplicitIntegerTruncationChecks;
337 bool EmitImplicitIntegerSignChangeChecks;
338
ScalarConversionOpts__anon5f1a3eb50111::ScalarExprEmitter::ScalarConversionOpts339 ScalarConversionOpts()
340 : TreatBooleanAsSigned(false),
341 EmitImplicitIntegerTruncationChecks(false),
342 EmitImplicitIntegerSignChangeChecks(false) {}
343
ScalarConversionOpts__anon5f1a3eb50111::ScalarExprEmitter::ScalarConversionOpts344 ScalarConversionOpts(clang::SanitizerSet SanOpts)
345 : TreatBooleanAsSigned(false),
346 EmitImplicitIntegerTruncationChecks(
347 SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)),
348 EmitImplicitIntegerSignChangeChecks(
349 SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {}
350 };
351 Value *
352 EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy,
353 SourceLocation Loc,
354 ScalarConversionOpts Opts = ScalarConversionOpts());
355
356 /// Convert between either a fixed point and other fixed point or fixed point
357 /// and an integer.
358 Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy,
359 SourceLocation Loc);
360
361 /// Emit a conversion from the specified complex type to the specified
362 /// destination type, where the destination type is an LLVM scalar type.
363 Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
364 QualType SrcTy, QualType DstTy,
365 SourceLocation Loc);
366
367 /// EmitNullValue - Emit a value that corresponds to null for the given type.
368 Value *EmitNullValue(QualType Ty);
369
370 /// EmitFloatToBoolConversion - Perform an FP to boolean conversion.
EmitFloatToBoolConversion(Value * V)371 Value *EmitFloatToBoolConversion(Value *V) {
372 // Compare against 0.0 for fp scalars.
373 llvm::Value *Zero = llvm::Constant::getNullValue(V->getType());
374 return Builder.CreateFCmpUNE(V, Zero, "tobool");
375 }
376
377 /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
EmitPointerToBoolConversion(Value * V,QualType QT)378 Value *EmitPointerToBoolConversion(Value *V, QualType QT) {
379 Value *Zero = CGF.CGM.getNullPointer(cast<llvm::PointerType>(V->getType()), QT);
380
381 return Builder.CreateICmpNE(V, Zero, "tobool");
382 }
383
EmitIntToBoolConversion(Value * V)384 Value *EmitIntToBoolConversion(Value *V) {
385 // Because of the type rules of C, we often end up computing a
386 // logical value, then zero extending it to int, then wanting it
387 // as a logical value again. Optimize this common case.
388 if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(V)) {
389 if (ZI->getOperand(0)->getType() == Builder.getInt1Ty()) {
390 Value *Result = ZI->getOperand(0);
391 // If there aren't any more uses, zap the instruction to save space.
392 // Note that there can be more uses, for example if this
393 // is the result of an assignment.
394 if (ZI->use_empty())
395 ZI->eraseFromParent();
396 return Result;
397 }
398 }
399
400 return Builder.CreateIsNotNull(V, "tobool");
401 }
402
403 //===--------------------------------------------------------------------===//
404 // Visitor Methods
405 //===--------------------------------------------------------------------===//
406
Visit(Expr * E)407 Value *Visit(Expr *E) {
408 ApplyDebugLocation DL(CGF, E);
409 return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E);
410 }
411
VisitStmt(Stmt * S)412 Value *VisitStmt(Stmt *S) {
413 S->dump(llvm::errs(), CGF.getContext());
414 llvm_unreachable("Stmt can't have complex result type!");
415 }
416 Value *VisitExpr(Expr *S);
417
VisitConstantExpr(ConstantExpr * E)418 Value *VisitConstantExpr(ConstantExpr *E) {
419 if (Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {
420 if (E->isGLValue())
421 return CGF.Builder.CreateLoad(Address(
422 Result, CGF.getContext().getTypeAlignInChars(E->getType())));
423 return Result;
424 }
425 return Visit(E->getSubExpr());
426 }
VisitParenExpr(ParenExpr * PE)427 Value *VisitParenExpr(ParenExpr *PE) {
428 return Visit(PE->getSubExpr());
429 }
VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr * E)430 Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
431 return Visit(E->getReplacement());
432 }
VisitGenericSelectionExpr(GenericSelectionExpr * GE)433 Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
434 return Visit(GE->getResultExpr());
435 }
VisitCoawaitExpr(CoawaitExpr * S)436 Value *VisitCoawaitExpr(CoawaitExpr *S) {
437 return CGF.EmitCoawaitExpr(*S).getScalarVal();
438 }
VisitCoyieldExpr(CoyieldExpr * S)439 Value *VisitCoyieldExpr(CoyieldExpr *S) {
440 return CGF.EmitCoyieldExpr(*S).getScalarVal();
441 }
VisitUnaryCoawait(const UnaryOperator * E)442 Value *VisitUnaryCoawait(const UnaryOperator *E) {
443 return Visit(E->getSubExpr());
444 }
445
446 // Leaves.
VisitIntegerLiteral(const IntegerLiteral * E)447 Value *VisitIntegerLiteral(const IntegerLiteral *E) {
448 return Builder.getInt(E->getValue());
449 }
VisitFixedPointLiteral(const FixedPointLiteral * E)450 Value *VisitFixedPointLiteral(const FixedPointLiteral *E) {
451 return Builder.getInt(E->getValue());
452 }
VisitFloatingLiteral(const FloatingLiteral * E)453 Value *VisitFloatingLiteral(const FloatingLiteral *E) {
454 return llvm::ConstantFP::get(VMContext, E->getValue());
455 }
VisitCharacterLiteral(const CharacterLiteral * E)456 Value *VisitCharacterLiteral(const CharacterLiteral *E) {
457 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
458 }
VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr * E)459 Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
460 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
461 }
VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr * E)462 Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
463 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
464 }
VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr * E)465 Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
466 return EmitNullValue(E->getType());
467 }
VisitGNUNullExpr(const GNUNullExpr * E)468 Value *VisitGNUNullExpr(const GNUNullExpr *E) {
469 return EmitNullValue(E->getType());
470 }
471 Value *VisitOffsetOfExpr(OffsetOfExpr *E);
472 Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
VisitAddrLabelExpr(const AddrLabelExpr * E)473 Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
474 llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel());
475 return Builder.CreateBitCast(V, ConvertType(E->getType()));
476 }
477
VisitSizeOfPackExpr(SizeOfPackExpr * E)478 Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) {
479 return llvm::ConstantInt::get(ConvertType(E->getType()),E->getPackLength());
480 }
481
VisitPseudoObjectExpr(PseudoObjectExpr * E)482 Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) {
483 return CGF.EmitPseudoObjectRValue(E).getScalarVal();
484 }
485
VisitOpaqueValueExpr(OpaqueValueExpr * E)486 Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
487 if (E->isGLValue())
488 return EmitLoadOfLValue(CGF.getOrCreateOpaqueLValueMapping(E),
489 E->getExprLoc());
490
491 // Otherwise, assume the mapping is the scalar directly.
492 return CGF.getOrCreateOpaqueRValueMapping(E).getScalarVal();
493 }
494
495 // l-values.
VisitDeclRefExpr(DeclRefExpr * E)496 Value *VisitDeclRefExpr(DeclRefExpr *E) {
497 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E))
498 return CGF.emitScalarConstant(Constant, E);
499 return EmitLoadOfLValue(E);
500 }
501
VisitObjCSelectorExpr(ObjCSelectorExpr * E)502 Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
503 return CGF.EmitObjCSelectorExpr(E);
504 }
VisitObjCProtocolExpr(ObjCProtocolExpr * E)505 Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
506 return CGF.EmitObjCProtocolExpr(E);
507 }
VisitObjCIvarRefExpr(ObjCIvarRefExpr * E)508 Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
509 return EmitLoadOfLValue(E);
510 }
VisitObjCMessageExpr(ObjCMessageExpr * E)511 Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
512 if (E->getMethodDecl() &&
513 E->getMethodDecl()->getReturnType()->isReferenceType())
514 return EmitLoadOfLValue(E);
515 return CGF.EmitObjCMessageExpr(E).getScalarVal();
516 }
517
VisitObjCIsaExpr(ObjCIsaExpr * E)518 Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
519 LValue LV = CGF.EmitObjCIsaExpr(E);
520 Value *V = CGF.EmitLoadOfLValue(LV, E->getExprLoc()).getScalarVal();
521 return V;
522 }
523
VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr * E)524 Value *VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) {
525 VersionTuple Version = E->getVersion();
526
527 // If we're checking for a platform older than our minimum deployment
528 // target, we can fold the check away.
529 if (Version <= CGF.CGM.getTarget().getPlatformMinVersion())
530 return llvm::ConstantInt::get(Builder.getInt1Ty(), 1);
531
532 return CGF.EmitBuiltinAvailable(Version);
533 }
534
535 Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
536 Value *VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E);
537 Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
538 Value *VisitConvertVectorExpr(ConvertVectorExpr *E);
539 Value *VisitMemberExpr(MemberExpr *E);
VisitExtVectorElementExpr(Expr * E)540 Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
VisitCompoundLiteralExpr(CompoundLiteralExpr * E)541 Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
542 // Strictly speaking, we shouldn't be calling EmitLoadOfLValue, which
543 // transitively calls EmitCompoundLiteralLValue, here in C++ since compound
544 // literals aren't l-values in C++. We do so simply because that's the
545 // cleanest way to handle compound literals in C++.
546 // See the discussion here: https://reviews.llvm.org/D64464
547 return EmitLoadOfLValue(E);
548 }
549
550 Value *VisitInitListExpr(InitListExpr *E);
551
VisitArrayInitIndexExpr(ArrayInitIndexExpr * E)552 Value *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {
553 assert(CGF.getArrayInitIndex() &&
554 "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?");
555 return CGF.getArrayInitIndex();
556 }
557
VisitImplicitValueInitExpr(const ImplicitValueInitExpr * E)558 Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
559 return EmitNullValue(E->getType());
560 }
VisitExplicitCastExpr(ExplicitCastExpr * E)561 Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {
562 CGF.CGM.EmitExplicitCastExprType(E, &CGF);
563 return VisitCastExpr(E);
564 }
565 Value *VisitCastExpr(CastExpr *E);
566
VisitCallExpr(const CallExpr * E)567 Value *VisitCallExpr(const CallExpr *E) {
568 if (E->getCallReturnType(CGF.getContext())->isReferenceType())
569 return EmitLoadOfLValue(E);
570
571 Value *V = CGF.EmitCallExpr(E).getScalarVal();
572
573 EmitLValueAlignmentAssumption(E, V);
574 return V;
575 }
576
577 Value *VisitStmtExpr(const StmtExpr *E);
578
579 // Unary Operators.
VisitUnaryPostDec(const UnaryOperator * E)580 Value *VisitUnaryPostDec(const UnaryOperator *E) {
581 LValue LV = EmitLValue(E->getSubExpr());
582 return EmitScalarPrePostIncDec(E, LV, false, false);
583 }
VisitUnaryPostInc(const UnaryOperator * E)584 Value *VisitUnaryPostInc(const UnaryOperator *E) {
585 LValue LV = EmitLValue(E->getSubExpr());
586 return EmitScalarPrePostIncDec(E, LV, true, false);
587 }
VisitUnaryPreDec(const UnaryOperator * E)588 Value *VisitUnaryPreDec(const UnaryOperator *E) {
589 LValue LV = EmitLValue(E->getSubExpr());
590 return EmitScalarPrePostIncDec(E, LV, false, true);
591 }
VisitUnaryPreInc(const UnaryOperator * E)592 Value *VisitUnaryPreInc(const UnaryOperator *E) {
593 LValue LV = EmitLValue(E->getSubExpr());
594 return EmitScalarPrePostIncDec(E, LV, true, true);
595 }
596
597 llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E,
598 llvm::Value *InVal,
599 bool IsInc);
600
601 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
602 bool isInc, bool isPre);
603
604
VisitUnaryAddrOf(const UnaryOperator * E)605 Value *VisitUnaryAddrOf(const UnaryOperator *E) {
606 if (isa<MemberPointerType>(E->getType())) // never sugared
607 return CGF.CGM.getMemberPointerConstant(E);
608
609 return EmitLValue(E->getSubExpr()).getPointer(CGF);
610 }
VisitUnaryDeref(const UnaryOperator * E)611 Value *VisitUnaryDeref(const UnaryOperator *E) {
612 if (E->getType()->isVoidType())
613 return Visit(E->getSubExpr()); // the actual value should be unused
614 return EmitLoadOfLValue(E);
615 }
VisitUnaryPlus(const UnaryOperator * E)616 Value *VisitUnaryPlus(const UnaryOperator *E) {
617 // This differs from gcc, though, most likely due to a bug in gcc.
618 TestAndClearIgnoreResultAssign();
619 return Visit(E->getSubExpr());
620 }
621 Value *VisitUnaryMinus (const UnaryOperator *E);
622 Value *VisitUnaryNot (const UnaryOperator *E);
623 Value *VisitUnaryLNot (const UnaryOperator *E);
624 Value *VisitUnaryReal (const UnaryOperator *E);
625 Value *VisitUnaryImag (const UnaryOperator *E);
VisitUnaryExtension(const UnaryOperator * E)626 Value *VisitUnaryExtension(const UnaryOperator *E) {
627 return Visit(E->getSubExpr());
628 }
629
630 // C++
VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr * E)631 Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) {
632 return EmitLoadOfLValue(E);
633 }
VisitSourceLocExpr(SourceLocExpr * SLE)634 Value *VisitSourceLocExpr(SourceLocExpr *SLE) {
635 auto &Ctx = CGF.getContext();
636 APValue Evaluated =
637 SLE->EvaluateInContext(Ctx, CGF.CurSourceLocExprScope.getDefaultExpr());
638 return ConstantEmitter(CGF).emitAbstract(SLE->getLocation(), Evaluated,
639 SLE->getType());
640 }
641
VisitCXXDefaultArgExpr(CXXDefaultArgExpr * DAE)642 Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
643 CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
644 return Visit(DAE->getExpr());
645 }
VisitCXXDefaultInitExpr(CXXDefaultInitExpr * DIE)646 Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
647 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);
648 return Visit(DIE->getExpr());
649 }
VisitCXXThisExpr(CXXThisExpr * TE)650 Value *VisitCXXThisExpr(CXXThisExpr *TE) {
651 return CGF.LoadCXXThis();
652 }
653
654 Value *VisitExprWithCleanups(ExprWithCleanups *E);
VisitCXXNewExpr(const CXXNewExpr * E)655 Value *VisitCXXNewExpr(const CXXNewExpr *E) {
656 return CGF.EmitCXXNewExpr(E);
657 }
VisitCXXDeleteExpr(const CXXDeleteExpr * E)658 Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
659 CGF.EmitCXXDeleteExpr(E);
660 return nullptr;
661 }
662
VisitTypeTraitExpr(const TypeTraitExpr * E)663 Value *VisitTypeTraitExpr(const TypeTraitExpr *E) {
664 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
665 }
666
VisitConceptSpecializationExpr(const ConceptSpecializationExpr * E)667 Value *VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E) {
668 return Builder.getInt1(E->isSatisfied());
669 }
670
VisitRequiresExpr(const RequiresExpr * E)671 Value *VisitRequiresExpr(const RequiresExpr *E) {
672 return Builder.getInt1(E->isSatisfied());
673 }
674
VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr * E)675 Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
676 return llvm::ConstantInt::get(Builder.getInt32Ty(), E->getValue());
677 }
678
VisitExpressionTraitExpr(const ExpressionTraitExpr * E)679 Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
680 return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue());
681 }
682
VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr * E)683 Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
684 // C++ [expr.pseudo]p1:
685 // The result shall only be used as the operand for the function call
686 // operator (), and the result of such a call has type void. The only
687 // effect is the evaluation of the postfix-expression before the dot or
688 // arrow.
689 CGF.EmitScalarExpr(E->getBase());
690 return nullptr;
691 }
692
VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr * E)693 Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
694 return EmitNullValue(E->getType());
695 }
696
VisitCXXThrowExpr(const CXXThrowExpr * E)697 Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {
698 CGF.EmitCXXThrowExpr(E);
699 return nullptr;
700 }
701
VisitCXXNoexceptExpr(const CXXNoexceptExpr * E)702 Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
703 return Builder.getInt1(E->getValue());
704 }
705
706 // Binary Operators.
EmitMul(const BinOpInfo & Ops)707 Value *EmitMul(const BinOpInfo &Ops) {
708 if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
709 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
710 case LangOptions::SOB_Defined:
711 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
712 case LangOptions::SOB_Undefined:
713 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
714 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
715 LLVM_FALLTHROUGH;
716 case LangOptions::SOB_Trapping:
717 if (CanElideOverflowCheck(CGF.getContext(), Ops))
718 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
719 return EmitOverflowCheckedBinOp(Ops);
720 }
721 }
722
723 if (Ops.Ty->isConstantMatrixType()) {
724 llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
725 // We need to check the types of the operands of the operator to get the
726 // correct matrix dimensions.
727 auto *BO = cast<BinaryOperator>(Ops.E);
728 auto *LHSMatTy = dyn_cast<ConstantMatrixType>(
729 BO->getLHS()->getType().getCanonicalType());
730 auto *RHSMatTy = dyn_cast<ConstantMatrixType>(
731 BO->getRHS()->getType().getCanonicalType());
732 if (LHSMatTy && RHSMatTy)
733 return MB.CreateMatrixMultiply(Ops.LHS, Ops.RHS, LHSMatTy->getNumRows(),
734 LHSMatTy->getNumColumns(),
735 RHSMatTy->getNumColumns());
736 return MB.CreateScalarMultiply(Ops.LHS, Ops.RHS);
737 }
738
739 if (Ops.Ty->isUnsignedIntegerType() &&
740 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
741 !CanElideOverflowCheck(CGF.getContext(), Ops))
742 return EmitOverflowCheckedBinOp(Ops);
743
744 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
745 // Preserve the old values
746 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
747 return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
748 }
749 if (Ops.isFixedPointOp())
750 return EmitFixedPointBinOp(Ops);
751 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
752 }
753 /// Create a binary op that checks for overflow.
754 /// Currently only supports +, - and *.
755 Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
756
757 // Check for undefined division and modulus behaviors.
758 void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
759 llvm::Value *Zero,bool isDiv);
760 // Common helper for getting how wide LHS of shift is.
761 static Value *GetWidthMinusOneValue(Value* LHS,Value* RHS);
762
763 // Used for shifting constraints for OpenCL, do mask for powers of 2, URem for
764 // non powers of two.
765 Value *ConstrainShiftValue(Value *LHS, Value *RHS, const Twine &Name);
766
767 Value *EmitDiv(const BinOpInfo &Ops);
768 Value *EmitRem(const BinOpInfo &Ops);
769 Value *EmitAdd(const BinOpInfo &Ops);
770 Value *EmitSub(const BinOpInfo &Ops);
771 Value *EmitShl(const BinOpInfo &Ops);
772 Value *EmitShr(const BinOpInfo &Ops);
EmitAnd(const BinOpInfo & Ops)773 Value *EmitAnd(const BinOpInfo &Ops) {
774 return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and");
775 }
EmitXor(const BinOpInfo & Ops)776 Value *EmitXor(const BinOpInfo &Ops) {
777 return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor");
778 }
EmitOr(const BinOpInfo & Ops)779 Value *EmitOr (const BinOpInfo &Ops) {
780 return Builder.CreateOr(Ops.LHS, Ops.RHS, "or");
781 }
782
783 // Helper functions for fixed point binary operations.
784 Value *EmitFixedPointBinOp(const BinOpInfo &Ops);
785
786 BinOpInfo EmitBinOps(const BinaryOperator *E);
787 LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
788 Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
789 Value *&Result);
790
791 Value *EmitCompoundAssign(const CompoundAssignOperator *E,
792 Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
793
794 // Binary operators and binary compound assignment operators.
795 #define HANDLEBINOP(OP) \
796 Value *VisitBin ## OP(const BinaryOperator *E) { \
797 return Emit ## OP(EmitBinOps(E)); \
798 } \
799 Value *VisitBin ## OP ## Assign(const CompoundAssignOperator *E) { \
800 return EmitCompoundAssign(E, &ScalarExprEmitter::Emit ## OP); \
801 }
802 HANDLEBINOP(Mul)
803 HANDLEBINOP(Div)
804 HANDLEBINOP(Rem)
805 HANDLEBINOP(Add)
806 HANDLEBINOP(Sub)
807 HANDLEBINOP(Shl)
808 HANDLEBINOP(Shr)
809 HANDLEBINOP(And)
810 HANDLEBINOP(Xor)
811 HANDLEBINOP(Or)
812 #undef HANDLEBINOP
813
814 // Comparisons.
815 Value *EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc,
816 llvm::CmpInst::Predicate SICmpOpc,
817 llvm::CmpInst::Predicate FCmpOpc, bool IsSignaling);
818 #define VISITCOMP(CODE, UI, SI, FP, SIG) \
819 Value *VisitBin##CODE(const BinaryOperator *E) { \
820 return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
821 llvm::FCmpInst::FP, SIG); }
822 VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT, true)
823 VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT, true)
824 VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE, true)
825 VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE, true)
826 VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ, false)
827 VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE, false)
828 #undef VISITCOMP
829
830 Value *VisitBinAssign (const BinaryOperator *E);
831
832 Value *VisitBinLAnd (const BinaryOperator *E);
833 Value *VisitBinLOr (const BinaryOperator *E);
834 Value *VisitBinComma (const BinaryOperator *E);
835
VisitBinPtrMemD(const Expr * E)836 Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); }
VisitBinPtrMemI(const Expr * E)837 Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); }
838
VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator * E)839 Value *VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) {
840 return Visit(E->getSemanticForm());
841 }
842
843 // Other Operators.
844 Value *VisitBlockExpr(const BlockExpr *BE);
845 Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *);
846 Value *VisitChooseExpr(ChooseExpr *CE);
847 Value *VisitVAArgExpr(VAArgExpr *VE);
VisitObjCStringLiteral(const ObjCStringLiteral * E)848 Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
849 return CGF.EmitObjCStringLiteral(E);
850 }
VisitObjCBoxedExpr(ObjCBoxedExpr * E)851 Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
852 return CGF.EmitObjCBoxedExpr(E);
853 }
VisitObjCArrayLiteral(ObjCArrayLiteral * E)854 Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
855 return CGF.EmitObjCArrayLiteral(E);
856 }
VisitObjCDictionaryLiteral(ObjCDictionaryLiteral * E)857 Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
858 return CGF.EmitObjCDictionaryLiteral(E);
859 }
860 Value *VisitAsTypeExpr(AsTypeExpr *CE);
861 Value *VisitAtomicExpr(AtomicExpr *AE);
862 };
863 } // end anonymous namespace.
864
865 //===----------------------------------------------------------------------===//
866 // Utilities
867 //===----------------------------------------------------------------------===//
868
869 /// EmitConversionToBool - Convert the specified expression value to a
870 /// boolean (i1) truth value. This is equivalent to "Val != 0".
EmitConversionToBool(Value * Src,QualType SrcType)871 Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
872 assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs");
873
874 if (SrcType->isRealFloatingType())
875 return EmitFloatToBoolConversion(Src);
876
877 if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType))
878 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT);
879
880 assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&
881 "Unknown scalar type to convert");
882
883 if (isa<llvm::IntegerType>(Src->getType()))
884 return EmitIntToBoolConversion(Src);
885
886 assert(isa<llvm::PointerType>(Src->getType()));
887 return EmitPointerToBoolConversion(Src, SrcType);
888 }
889
EmitFloatConversionCheck(Value * OrigSrc,QualType OrigSrcType,Value * Src,QualType SrcType,QualType DstType,llvm::Type * DstTy,SourceLocation Loc)890 void ScalarExprEmitter::EmitFloatConversionCheck(
891 Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType,
892 QualType DstType, llvm::Type *DstTy, SourceLocation Loc) {
893 assert(SrcType->isFloatingType() && "not a conversion from floating point");
894 if (!isa<llvm::IntegerType>(DstTy))
895 return;
896
897 CodeGenFunction::SanitizerScope SanScope(&CGF);
898 using llvm::APFloat;
899 using llvm::APSInt;
900
901 llvm::Value *Check = nullptr;
902 const llvm::fltSemantics &SrcSema =
903 CGF.getContext().getFloatTypeSemantics(OrigSrcType);
904
905 // Floating-point to integer. This has undefined behavior if the source is
906 // +-Inf, NaN, or doesn't fit into the destination type (after truncation
907 // to an integer).
908 unsigned Width = CGF.getContext().getIntWidth(DstType);
909 bool Unsigned = DstType->isUnsignedIntegerOrEnumerationType();
910
911 APSInt Min = APSInt::getMinValue(Width, Unsigned);
912 APFloat MinSrc(SrcSema, APFloat::uninitialized);
913 if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) &
914 APFloat::opOverflow)
915 // Don't need an overflow check for lower bound. Just check for
916 // -Inf/NaN.
917 MinSrc = APFloat::getInf(SrcSema, true);
918 else
919 // Find the largest value which is too small to represent (before
920 // truncation toward zero).
921 MinSrc.subtract(APFloat(SrcSema, 1), APFloat::rmTowardNegative);
922
923 APSInt Max = APSInt::getMaxValue(Width, Unsigned);
924 APFloat MaxSrc(SrcSema, APFloat::uninitialized);
925 if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) &
926 APFloat::opOverflow)
927 // Don't need an overflow check for upper bound. Just check for
928 // +Inf/NaN.
929 MaxSrc = APFloat::getInf(SrcSema, false);
930 else
931 // Find the smallest value which is too large to represent (before
932 // truncation toward zero).
933 MaxSrc.add(APFloat(SrcSema, 1), APFloat::rmTowardPositive);
934
935 // If we're converting from __half, convert the range to float to match
936 // the type of src.
937 if (OrigSrcType->isHalfType()) {
938 const llvm::fltSemantics &Sema =
939 CGF.getContext().getFloatTypeSemantics(SrcType);
940 bool IsInexact;
941 MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
942 MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
943 }
944
945 llvm::Value *GE =
946 Builder.CreateFCmpOGT(Src, llvm::ConstantFP::get(VMContext, MinSrc));
947 llvm::Value *LE =
948 Builder.CreateFCmpOLT(Src, llvm::ConstantFP::get(VMContext, MaxSrc));
949 Check = Builder.CreateAnd(GE, LE);
950
951 llvm::Constant *StaticArgs[] = {CGF.EmitCheckSourceLocation(Loc),
952 CGF.EmitCheckTypeDescriptor(OrigSrcType),
953 CGF.EmitCheckTypeDescriptor(DstType)};
954 CGF.EmitCheck(std::make_pair(Check, SanitizerKind::FloatCastOverflow),
955 SanitizerHandler::FloatCastOverflow, StaticArgs, OrigSrc);
956 }
957
958 // Should be called within CodeGenFunction::SanitizerScope RAII scope.
959 // Returns 'i1 false' when the truncation Src -> Dst was lossy.
960 static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
961 std::pair<llvm::Value *, SanitizerMask>>
EmitIntegerTruncationCheckHelper(Value * Src,QualType SrcType,Value * Dst,QualType DstType,CGBuilderTy & Builder)962 EmitIntegerTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst,
963 QualType DstType, CGBuilderTy &Builder) {
964 llvm::Type *SrcTy = Src->getType();
965 llvm::Type *DstTy = Dst->getType();
966 (void)DstTy; // Only used in assert()
967
968 // This should be truncation of integral types.
969 assert(Src != Dst);
970 assert(SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits());
971 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
972 "non-integer llvm type");
973
974 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
975 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
976
977 // If both (src and dst) types are unsigned, then it's an unsigned truncation.
978 // Else, it is a signed truncation.
979 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
980 SanitizerMask Mask;
981 if (!SrcSigned && !DstSigned) {
982 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
983 Mask = SanitizerKind::ImplicitUnsignedIntegerTruncation;
984 } else {
985 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
986 Mask = SanitizerKind::ImplicitSignedIntegerTruncation;
987 }
988
989 llvm::Value *Check = nullptr;
990 // 1. Extend the truncated value back to the same width as the Src.
991 Check = Builder.CreateIntCast(Dst, SrcTy, DstSigned, "anyext");
992 // 2. Equality-compare with the original source value
993 Check = Builder.CreateICmpEQ(Check, Src, "truncheck");
994 // If the comparison result is 'i1 false', then the truncation was lossy.
995 return std::make_pair(Kind, std::make_pair(Check, Mask));
996 }
997
PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(QualType SrcType,QualType DstType)998 static bool PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(
999 QualType SrcType, QualType DstType) {
1000 return SrcType->isIntegerType() && DstType->isIntegerType();
1001 }
1002
EmitIntegerTruncationCheck(Value * Src,QualType SrcType,Value * Dst,QualType DstType,SourceLocation Loc)1003 void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType,
1004 Value *Dst, QualType DstType,
1005 SourceLocation Loc) {
1006 if (!CGF.SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation))
1007 return;
1008
1009 // We only care about int->int conversions here.
1010 // We ignore conversions to/from pointer and/or bool.
1011 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType,
1012 DstType))
1013 return;
1014
1015 unsigned SrcBits = Src->getType()->getScalarSizeInBits();
1016 unsigned DstBits = Dst->getType()->getScalarSizeInBits();
1017 // This must be truncation. Else we do not care.
1018 if (SrcBits <= DstBits)
1019 return;
1020
1021 assert(!DstType->isBooleanType() && "we should not get here with booleans.");
1022
1023 // If the integer sign change sanitizer is enabled,
1024 // and we are truncating from larger unsigned type to smaller signed type,
1025 // let that next sanitizer deal with it.
1026 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1027 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1028 if (CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange) &&
1029 (!SrcSigned && DstSigned))
1030 return;
1031
1032 CodeGenFunction::SanitizerScope SanScope(&CGF);
1033
1034 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1035 std::pair<llvm::Value *, SanitizerMask>>
1036 Check =
1037 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1038 // If the comparison result is 'i1 false', then the truncation was lossy.
1039
1040 // Do we care about this type of truncation?
1041 if (!CGF.SanOpts.has(Check.second.second))
1042 return;
1043
1044 llvm::Constant *StaticArgs[] = {
1045 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1046 CGF.EmitCheckTypeDescriptor(DstType),
1047 llvm::ConstantInt::get(Builder.getInt8Ty(), Check.first)};
1048 CGF.EmitCheck(Check.second, SanitizerHandler::ImplicitConversion, StaticArgs,
1049 {Src, Dst});
1050 }
1051
1052 // Should be called within CodeGenFunction::SanitizerScope RAII scope.
1053 // Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1054 static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1055 std::pair<llvm::Value *, SanitizerMask>>
EmitIntegerSignChangeCheckHelper(Value * Src,QualType SrcType,Value * Dst,QualType DstType,CGBuilderTy & Builder)1056 EmitIntegerSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst,
1057 QualType DstType, CGBuilderTy &Builder) {
1058 llvm::Type *SrcTy = Src->getType();
1059 llvm::Type *DstTy = Dst->getType();
1060
1061 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1062 "non-integer llvm type");
1063
1064 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1065 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1066 (void)SrcSigned; // Only used in assert()
1067 (void)DstSigned; // Only used in assert()
1068 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1069 unsigned DstBits = DstTy->getScalarSizeInBits();
1070 (void)SrcBits; // Only used in assert()
1071 (void)DstBits; // Only used in assert()
1072
1073 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
1074 "either the widths should be different, or the signednesses.");
1075
1076 // NOTE: zero value is considered to be non-negative.
1077 auto EmitIsNegativeTest = [&Builder](Value *V, QualType VType,
1078 const char *Name) -> Value * {
1079 // Is this value a signed type?
1080 bool VSigned = VType->isSignedIntegerOrEnumerationType();
1081 llvm::Type *VTy = V->getType();
1082 if (!VSigned) {
1083 // If the value is unsigned, then it is never negative.
1084 // FIXME: can we encounter non-scalar VTy here?
1085 return llvm::ConstantInt::getFalse(VTy->getContext());
1086 }
1087 // Get the zero of the same type with which we will be comparing.
1088 llvm::Constant *Zero = llvm::ConstantInt::get(VTy, 0);
1089 // %V.isnegative = icmp slt %V, 0
1090 // I.e is %V *strictly* less than zero, does it have negative value?
1091 return Builder.CreateICmp(llvm::ICmpInst::ICMP_SLT, V, Zero,
1092 llvm::Twine(Name) + "." + V->getName() +
1093 ".negativitycheck");
1094 };
1095
1096 // 1. Was the old Value negative?
1097 llvm::Value *SrcIsNegative = EmitIsNegativeTest(Src, SrcType, "src");
1098 // 2. Is the new Value negative?
1099 llvm::Value *DstIsNegative = EmitIsNegativeTest(Dst, DstType, "dst");
1100 // 3. Now, was the 'negativity status' preserved during the conversion?
1101 // NOTE: conversion from negative to zero is considered to change the sign.
1102 // (We want to get 'false' when the conversion changed the sign)
1103 // So we should just equality-compare the negativity statuses.
1104 llvm::Value *Check = nullptr;
1105 Check = Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "signchangecheck");
1106 // If the comparison result is 'false', then the conversion changed the sign.
1107 return std::make_pair(
1108 ScalarExprEmitter::ICCK_IntegerSignChange,
1109 std::make_pair(Check, SanitizerKind::ImplicitIntegerSignChange));
1110 }
1111
EmitIntegerSignChangeCheck(Value * Src,QualType SrcType,Value * Dst,QualType DstType,SourceLocation Loc)1112 void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value *Src, QualType SrcType,
1113 Value *Dst, QualType DstType,
1114 SourceLocation Loc) {
1115 if (!CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange))
1116 return;
1117
1118 llvm::Type *SrcTy = Src->getType();
1119 llvm::Type *DstTy = Dst->getType();
1120
1121 // We only care about int->int conversions here.
1122 // We ignore conversions to/from pointer and/or bool.
1123 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType,
1124 DstType))
1125 return;
1126
1127 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1128 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1129 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1130 unsigned DstBits = DstTy->getScalarSizeInBits();
1131
1132 // Now, we do not need to emit the check in *all* of the cases.
1133 // We can avoid emitting it in some obvious cases where it would have been
1134 // dropped by the opt passes (instcombine) always anyways.
1135 // If it's a cast between effectively the same type, no check.
1136 // NOTE: this is *not* equivalent to checking the canonical types.
1137 if (SrcSigned == DstSigned && SrcBits == DstBits)
1138 return;
1139 // At least one of the values needs to have signed type.
1140 // If both are unsigned, then obviously, neither of them can be negative.
1141 if (!SrcSigned && !DstSigned)
1142 return;
1143 // If the conversion is to *larger* *signed* type, then no check is needed.
1144 // Because either sign-extension happens (so the sign will remain),
1145 // or zero-extension will happen (the sign bit will be zero.)
1146 if ((DstBits > SrcBits) && DstSigned)
1147 return;
1148 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1149 (SrcBits > DstBits) && SrcSigned) {
1150 // If the signed integer truncation sanitizer is enabled,
1151 // and this is a truncation from signed type, then no check is needed.
1152 // Because here sign change check is interchangeable with truncation check.
1153 return;
1154 }
1155 // That's it. We can't rule out any more cases with the data we have.
1156
1157 CodeGenFunction::SanitizerScope SanScope(&CGF);
1158
1159 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1160 std::pair<llvm::Value *, SanitizerMask>>
1161 Check;
1162
1163 // Each of these checks needs to return 'false' when an issue was detected.
1164 ImplicitConversionCheckKind CheckKind;
1165 llvm::SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
1166 // So we can 'and' all the checks together, and still get 'false',
1167 // if at least one of the checks detected an issue.
1168
1169 Check = EmitIntegerSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1170 CheckKind = Check.first;
1171 Checks.emplace_back(Check.second);
1172
1173 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1174 (SrcBits > DstBits) && !SrcSigned && DstSigned) {
1175 // If the signed integer truncation sanitizer was enabled,
1176 // and we are truncating from larger unsigned type to smaller signed type,
1177 // let's handle the case we skipped in that check.
1178 Check =
1179 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1180 CheckKind = ICCK_SignedIntegerTruncationOrSignChange;
1181 Checks.emplace_back(Check.second);
1182 // If the comparison result is 'i1 false', then the truncation was lossy.
1183 }
1184
1185 llvm::Constant *StaticArgs[] = {
1186 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1187 CGF.EmitCheckTypeDescriptor(DstType),
1188 llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind)};
1189 // EmitCheck() will 'and' all the checks together.
1190 CGF.EmitCheck(Checks, SanitizerHandler::ImplicitConversion, StaticArgs,
1191 {Src, Dst});
1192 }
1193
1194 /// Emit a conversion from the specified type to the specified destination type,
1195 /// both of which are LLVM scalar types.
EmitScalarConversion(Value * Src,QualType SrcType,QualType DstType,SourceLocation Loc,ScalarConversionOpts Opts)1196 Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
1197 QualType DstType,
1198 SourceLocation Loc,
1199 ScalarConversionOpts Opts) {
1200 // All conversions involving fixed point types should be handled by the
1201 // EmitFixedPoint family functions. This is done to prevent bloating up this
1202 // function more, and although fixed point numbers are represented by
1203 // integers, we do not want to follow any logic that assumes they should be
1204 // treated as integers.
1205 // TODO(leonardchan): When necessary, add another if statement checking for
1206 // conversions to fixed point types from other types.
1207 if (SrcType->isFixedPointType()) {
1208 if (DstType->isBooleanType())
1209 // It is important that we check this before checking if the dest type is
1210 // an integer because booleans are technically integer types.
1211 // We do not need to check the padding bit on unsigned types if unsigned
1212 // padding is enabled because overflow into this bit is undefined
1213 // behavior.
1214 return Builder.CreateIsNotNull(Src, "tobool");
1215 if (DstType->isFixedPointType() || DstType->isIntegerType() ||
1216 DstType->isRealFloatingType())
1217 return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1218
1219 llvm_unreachable(
1220 "Unhandled scalar conversion from a fixed point type to another type.");
1221 } else if (DstType->isFixedPointType()) {
1222 if (SrcType->isIntegerType() || SrcType->isRealFloatingType())
1223 // This also includes converting booleans and enums to fixed point types.
1224 return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1225
1226 llvm_unreachable(
1227 "Unhandled scalar conversion to a fixed point type from another type.");
1228 }
1229
1230 QualType NoncanonicalSrcType = SrcType;
1231 QualType NoncanonicalDstType = DstType;
1232
1233 SrcType = CGF.getContext().getCanonicalType(SrcType);
1234 DstType = CGF.getContext().getCanonicalType(DstType);
1235 if (SrcType == DstType) return Src;
1236
1237 if (DstType->isVoidType()) return nullptr;
1238
1239 llvm::Value *OrigSrc = Src;
1240 QualType OrigSrcType = SrcType;
1241 llvm::Type *SrcTy = Src->getType();
1242
1243 // Handle conversions to bool first, they are special: comparisons against 0.
1244 if (DstType->isBooleanType())
1245 return EmitConversionToBool(Src, SrcType);
1246
1247 llvm::Type *DstTy = ConvertType(DstType);
1248
1249 // Cast from half through float if half isn't a native type.
1250 if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1251 // Cast to FP using the intrinsic if the half type itself isn't supported.
1252 if (DstTy->isFloatingPointTy()) {
1253 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics())
1254 return Builder.CreateCall(
1255 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, DstTy),
1256 Src);
1257 } else {
1258 // Cast to other types through float, using either the intrinsic or FPExt,
1259 // depending on whether the half type itself is supported
1260 // (as opposed to operations on half, available with NativeHalfType).
1261 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1262 Src = Builder.CreateCall(
1263 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
1264 CGF.CGM.FloatTy),
1265 Src);
1266 } else {
1267 Src = Builder.CreateFPExt(Src, CGF.CGM.FloatTy, "conv");
1268 }
1269 SrcType = CGF.getContext().FloatTy;
1270 SrcTy = CGF.FloatTy;
1271 }
1272 }
1273
1274 // Ignore conversions like int -> uint.
1275 if (SrcTy == DstTy) {
1276 if (Opts.EmitImplicitIntegerSignChangeChecks)
1277 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Src,
1278 NoncanonicalDstType, Loc);
1279
1280 return Src;
1281 }
1282
1283 // Handle pointer conversions next: pointers can only be converted to/from
1284 // other pointers and integers. Check for pointer types in terms of LLVM, as
1285 // some native types (like Obj-C id) may map to a pointer type.
1286 if (auto DstPT = dyn_cast<llvm::PointerType>(DstTy)) {
1287 // The source value may be an integer, or a pointer.
1288 if (isa<llvm::PointerType>(SrcTy))
1289 return Builder.CreateBitCast(Src, DstTy, "conv");
1290
1291 assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
1292 // First, convert to the correct width so that we control the kind of
1293 // extension.
1294 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DstPT);
1295 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
1296 llvm::Value* IntResult =
1297 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
1298 // Then, cast to pointer.
1299 return Builder.CreateIntToPtr(IntResult, DstTy, "conv");
1300 }
1301
1302 if (isa<llvm::PointerType>(SrcTy)) {
1303 // Must be an ptr to int cast.
1304 assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?");
1305 return Builder.CreatePtrToInt(Src, DstTy, "conv");
1306 }
1307
1308 // A scalar can be splatted to an extended vector of the same element type
1309 if (DstType->isExtVectorType() && !SrcType->isVectorType()) {
1310 // Sema should add casts to make sure that the source expression's type is
1311 // the same as the vector's element type (sans qualifiers)
1312 assert(DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==
1313 SrcType.getTypePtr() &&
1314 "Splatted expr doesn't match with vector element type?");
1315
1316 // Splat the element across to all elements
1317 unsigned NumElements = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
1318 return Builder.CreateVectorSplat(NumElements, Src, "splat");
1319 }
1320
1321 if (isa<llvm::VectorType>(SrcTy) || isa<llvm::VectorType>(DstTy)) {
1322 // Allow bitcast from vector to integer/fp of the same size.
1323 unsigned SrcSize = SrcTy->getPrimitiveSizeInBits();
1324 unsigned DstSize = DstTy->getPrimitiveSizeInBits();
1325 if (SrcSize == DstSize)
1326 return Builder.CreateBitCast(Src, DstTy, "conv");
1327
1328 // Conversions between vectors of different sizes are not allowed except
1329 // when vectors of half are involved. Operations on storage-only half
1330 // vectors require promoting half vector operands to float vectors and
1331 // truncating the result, which is either an int or float vector, to a
1332 // short or half vector.
1333
1334 // Source and destination are both expected to be vectors.
1335 llvm::Type *SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();
1336 llvm::Type *DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();
1337 (void)DstElementTy;
1338
1339 assert(((SrcElementTy->isIntegerTy() &&
1340 DstElementTy->isIntegerTy()) ||
1341 (SrcElementTy->isFloatingPointTy() &&
1342 DstElementTy->isFloatingPointTy())) &&
1343 "unexpected conversion between a floating-point vector and an "
1344 "integer vector");
1345
1346 // Truncate an i32 vector to an i16 vector.
1347 if (SrcElementTy->isIntegerTy())
1348 return Builder.CreateIntCast(Src, DstTy, false, "conv");
1349
1350 // Truncate a float vector to a half vector.
1351 if (SrcSize > DstSize)
1352 return Builder.CreateFPTrunc(Src, DstTy, "conv");
1353
1354 // Promote a half vector to a float vector.
1355 return Builder.CreateFPExt(Src, DstTy, "conv");
1356 }
1357
1358 // Finally, we have the arithmetic types: real int/float.
1359 Value *Res = nullptr;
1360 llvm::Type *ResTy = DstTy;
1361
1362 // An overflowing conversion has undefined behavior if either the source type
1363 // or the destination type is a floating-point type. However, we consider the
1364 // range of representable values for all floating-point types to be
1365 // [-inf,+inf], so no overflow can ever happen when the destination type is a
1366 // floating-point type.
1367 if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) &&
1368 OrigSrcType->isFloatingType())
1369 EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy,
1370 Loc);
1371
1372 // Cast to half through float if half isn't a native type.
1373 if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1374 // Make sure we cast in a single step if from another FP type.
1375 if (SrcTy->isFloatingPointTy()) {
1376 // Use the intrinsic if the half type itself isn't supported
1377 // (as opposed to operations on half, available with NativeHalfType).
1378 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics())
1379 return Builder.CreateCall(
1380 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, SrcTy), Src);
1381 // If the half type is supported, just use an fptrunc.
1382 return Builder.CreateFPTrunc(Src, DstTy);
1383 }
1384 DstTy = CGF.FloatTy;
1385 }
1386
1387 if (isa<llvm::IntegerType>(SrcTy)) {
1388 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
1389 if (SrcType->isBooleanType() && Opts.TreatBooleanAsSigned) {
1390 InputSigned = true;
1391 }
1392 if (isa<llvm::IntegerType>(DstTy))
1393 Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1394 else if (InputSigned)
1395 Res = Builder.CreateSIToFP(Src, DstTy, "conv");
1396 else
1397 Res = Builder.CreateUIToFP(Src, DstTy, "conv");
1398 } else if (isa<llvm::IntegerType>(DstTy)) {
1399 assert(SrcTy->isFloatingPointTy() && "Unknown real conversion");
1400 if (DstType->isSignedIntegerOrEnumerationType())
1401 Res = Builder.CreateFPToSI(Src, DstTy, "conv");
1402 else
1403 Res = Builder.CreateFPToUI(Src, DstTy, "conv");
1404 } else {
1405 assert(SrcTy->isFloatingPointTy() && DstTy->isFloatingPointTy() &&
1406 "Unknown real conversion");
1407 if (DstTy->getTypeID() < SrcTy->getTypeID())
1408 Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
1409 else
1410 Res = Builder.CreateFPExt(Src, DstTy, "conv");
1411 }
1412
1413 if (DstTy != ResTy) {
1414 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1415 assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion");
1416 Res = Builder.CreateCall(
1417 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, CGF.CGM.FloatTy),
1418 Res);
1419 } else {
1420 Res = Builder.CreateFPTrunc(Res, ResTy, "conv");
1421 }
1422 }
1423
1424 if (Opts.EmitImplicitIntegerTruncationChecks)
1425 EmitIntegerTruncationCheck(Src, NoncanonicalSrcType, Res,
1426 NoncanonicalDstType, Loc);
1427
1428 if (Opts.EmitImplicitIntegerSignChangeChecks)
1429 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Res,
1430 NoncanonicalDstType, Loc);
1431
1432 return Res;
1433 }
1434
EmitFixedPointConversion(Value * Src,QualType SrcTy,QualType DstTy,SourceLocation Loc)1435 Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy,
1436 QualType DstTy,
1437 SourceLocation Loc) {
1438 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
1439 llvm::Value *Result;
1440 if (SrcTy->isRealFloatingType())
1441 Result = FPBuilder.CreateFloatingToFixed(Src,
1442 CGF.getContext().getFixedPointSemantics(DstTy));
1443 else if (DstTy->isRealFloatingType())
1444 Result = FPBuilder.CreateFixedToFloating(Src,
1445 CGF.getContext().getFixedPointSemantics(SrcTy),
1446 ConvertType(DstTy));
1447 else {
1448 auto SrcFPSema = CGF.getContext().getFixedPointSemantics(SrcTy);
1449 auto DstFPSema = CGF.getContext().getFixedPointSemantics(DstTy);
1450
1451 if (DstTy->isIntegerType())
1452 Result = FPBuilder.CreateFixedToInteger(Src, SrcFPSema,
1453 DstFPSema.getWidth(),
1454 DstFPSema.isSigned());
1455 else if (SrcTy->isIntegerType())
1456 Result = FPBuilder.CreateIntegerToFixed(Src, SrcFPSema.isSigned(),
1457 DstFPSema);
1458 else
1459 Result = FPBuilder.CreateFixedToFixed(Src, SrcFPSema, DstFPSema);
1460 }
1461 return Result;
1462 }
1463
1464 /// Emit a conversion from the specified complex type to the specified
1465 /// destination type, where the destination type is an LLVM scalar type.
EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,QualType SrcTy,QualType DstTy,SourceLocation Loc)1466 Value *ScalarExprEmitter::EmitComplexToScalarConversion(
1467 CodeGenFunction::ComplexPairTy Src, QualType SrcTy, QualType DstTy,
1468 SourceLocation Loc) {
1469 // Get the source element type.
1470 SrcTy = SrcTy->castAs<ComplexType>()->getElementType();
1471
1472 // Handle conversions to bool first, they are special: comparisons against 0.
1473 if (DstTy->isBooleanType()) {
1474 // Complex != 0 -> (Real != 0) | (Imag != 0)
1475 Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1476 Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy, Loc);
1477 return Builder.CreateOr(Src.first, Src.second, "tobool");
1478 }
1479
1480 // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
1481 // the imaginary part of the complex value is discarded and the value of the
1482 // real part is converted according to the conversion rules for the
1483 // corresponding real type.
1484 return EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1485 }
1486
EmitNullValue(QualType Ty)1487 Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
1488 return CGF.EmitFromMemory(CGF.CGM.EmitNullConstant(Ty), Ty);
1489 }
1490
1491 /// Emit a sanitization check for the given "binary" operation (which
1492 /// might actually be a unary increment which has been lowered to a binary
1493 /// operation). The check passes if all values in \p Checks (which are \c i1),
1494 /// are \c true.
EmitBinOpCheck(ArrayRef<std::pair<Value *,SanitizerMask>> Checks,const BinOpInfo & Info)1495 void ScalarExprEmitter::EmitBinOpCheck(
1496 ArrayRef<std::pair<Value *, SanitizerMask>> Checks, const BinOpInfo &Info) {
1497 assert(CGF.IsSanitizerScope);
1498 SanitizerHandler Check;
1499 SmallVector<llvm::Constant *, 4> StaticData;
1500 SmallVector<llvm::Value *, 2> DynamicData;
1501
1502 BinaryOperatorKind Opcode = Info.Opcode;
1503 if (BinaryOperator::isCompoundAssignmentOp(Opcode))
1504 Opcode = BinaryOperator::getOpForCompoundAssignment(Opcode);
1505
1506 StaticData.push_back(CGF.EmitCheckSourceLocation(Info.E->getExprLoc()));
1507 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Info.E);
1508 if (UO && UO->getOpcode() == UO_Minus) {
1509 Check = SanitizerHandler::NegateOverflow;
1510 StaticData.push_back(CGF.EmitCheckTypeDescriptor(UO->getType()));
1511 DynamicData.push_back(Info.RHS);
1512 } else {
1513 if (BinaryOperator::isShiftOp(Opcode)) {
1514 // Shift LHS negative or too large, or RHS out of bounds.
1515 Check = SanitizerHandler::ShiftOutOfBounds;
1516 const BinaryOperator *BO = cast<BinaryOperator>(Info.E);
1517 StaticData.push_back(
1518 CGF.EmitCheckTypeDescriptor(BO->getLHS()->getType()));
1519 StaticData.push_back(
1520 CGF.EmitCheckTypeDescriptor(BO->getRHS()->getType()));
1521 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
1522 // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1).
1523 Check = SanitizerHandler::DivremOverflow;
1524 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1525 } else {
1526 // Arithmetic overflow (+, -, *).
1527 switch (Opcode) {
1528 case BO_Add: Check = SanitizerHandler::AddOverflow; break;
1529 case BO_Sub: Check = SanitizerHandler::SubOverflow; break;
1530 case BO_Mul: Check = SanitizerHandler::MulOverflow; break;
1531 default: llvm_unreachable("unexpected opcode for bin op check");
1532 }
1533 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1534 }
1535 DynamicData.push_back(Info.LHS);
1536 DynamicData.push_back(Info.RHS);
1537 }
1538
1539 CGF.EmitCheck(Checks, Check, StaticData, DynamicData);
1540 }
1541
1542 //===----------------------------------------------------------------------===//
1543 // Visitor Methods
1544 //===----------------------------------------------------------------------===//
1545
VisitExpr(Expr * E)1546 Value *ScalarExprEmitter::VisitExpr(Expr *E) {
1547 CGF.ErrorUnsupported(E, "scalar expression");
1548 if (E->getType()->isVoidType())
1549 return nullptr;
1550 return llvm::UndefValue::get(CGF.ConvertType(E->getType()));
1551 }
1552
VisitShuffleVectorExpr(ShuffleVectorExpr * E)1553 Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
1554 // Vector Mask Case
1555 if (E->getNumSubExprs() == 2) {
1556 Value *LHS = CGF.EmitScalarExpr(E->getExpr(0));
1557 Value *RHS = CGF.EmitScalarExpr(E->getExpr(1));
1558 Value *Mask;
1559
1560 auto *LTy = cast<llvm::FixedVectorType>(LHS->getType());
1561 unsigned LHSElts = LTy->getNumElements();
1562
1563 Mask = RHS;
1564
1565 auto *MTy = cast<llvm::FixedVectorType>(Mask->getType());
1566
1567 // Mask off the high bits of each shuffle index.
1568 Value *MaskBits =
1569 llvm::ConstantInt::get(MTy, llvm::NextPowerOf2(LHSElts - 1) - 1);
1570 Mask = Builder.CreateAnd(Mask, MaskBits, "mask");
1571
1572 // newv = undef
1573 // mask = mask & maskbits
1574 // for each elt
1575 // n = extract mask i
1576 // x = extract val n
1577 // newv = insert newv, x, i
1578 auto *RTy = llvm::FixedVectorType::get(LTy->getElementType(),
1579 MTy->getNumElements());
1580 Value* NewV = llvm::UndefValue::get(RTy);
1581 for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
1582 Value *IIndx = llvm::ConstantInt::get(CGF.SizeTy, i);
1583 Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx");
1584
1585 Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt");
1586 NewV = Builder.CreateInsertElement(NewV, VExt, IIndx, "shuf_ins");
1587 }
1588 return NewV;
1589 }
1590
1591 Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
1592 Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
1593
1594 SmallVector<int, 32> Indices;
1595 for (unsigned i = 2; i < E->getNumSubExprs(); ++i) {
1596 llvm::APSInt Idx = E->getShuffleMaskIdx(CGF.getContext(), i-2);
1597 // Check for -1 and output it as undef in the IR.
1598 if (Idx.isSigned() && Idx.isAllOnesValue())
1599 Indices.push_back(-1);
1600 else
1601 Indices.push_back(Idx.getZExtValue());
1602 }
1603
1604 return Builder.CreateShuffleVector(V1, V2, Indices, "shuffle");
1605 }
1606
VisitConvertVectorExpr(ConvertVectorExpr * E)1607 Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
1608 QualType SrcType = E->getSrcExpr()->getType(),
1609 DstType = E->getType();
1610
1611 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
1612
1613 SrcType = CGF.getContext().getCanonicalType(SrcType);
1614 DstType = CGF.getContext().getCanonicalType(DstType);
1615 if (SrcType == DstType) return Src;
1616
1617 assert(SrcType->isVectorType() &&
1618 "ConvertVector source type must be a vector");
1619 assert(DstType->isVectorType() &&
1620 "ConvertVector destination type must be a vector");
1621
1622 llvm::Type *SrcTy = Src->getType();
1623 llvm::Type *DstTy = ConvertType(DstType);
1624
1625 // Ignore conversions like int -> uint.
1626 if (SrcTy == DstTy)
1627 return Src;
1628
1629 QualType SrcEltType = SrcType->castAs<VectorType>()->getElementType(),
1630 DstEltType = DstType->castAs<VectorType>()->getElementType();
1631
1632 assert(SrcTy->isVectorTy() &&
1633 "ConvertVector source IR type must be a vector");
1634 assert(DstTy->isVectorTy() &&
1635 "ConvertVector destination IR type must be a vector");
1636
1637 llvm::Type *SrcEltTy = cast<llvm::VectorType>(SrcTy)->getElementType(),
1638 *DstEltTy = cast<llvm::VectorType>(DstTy)->getElementType();
1639
1640 if (DstEltType->isBooleanType()) {
1641 assert((SrcEltTy->isFloatingPointTy() ||
1642 isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion");
1643
1644 llvm::Value *Zero = llvm::Constant::getNullValue(SrcTy);
1645 if (SrcEltTy->isFloatingPointTy()) {
1646 return Builder.CreateFCmpUNE(Src, Zero, "tobool");
1647 } else {
1648 return Builder.CreateICmpNE(Src, Zero, "tobool");
1649 }
1650 }
1651
1652 // We have the arithmetic types: real int/float.
1653 Value *Res = nullptr;
1654
1655 if (isa<llvm::IntegerType>(SrcEltTy)) {
1656 bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType();
1657 if (isa<llvm::IntegerType>(DstEltTy))
1658 Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1659 else if (InputSigned)
1660 Res = Builder.CreateSIToFP(Src, DstTy, "conv");
1661 else
1662 Res = Builder.CreateUIToFP(Src, DstTy, "conv");
1663 } else if (isa<llvm::IntegerType>(DstEltTy)) {
1664 assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion");
1665 if (DstEltType->isSignedIntegerOrEnumerationType())
1666 Res = Builder.CreateFPToSI(Src, DstTy, "conv");
1667 else
1668 Res = Builder.CreateFPToUI(Src, DstTy, "conv");
1669 } else {
1670 assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() &&
1671 "Unknown real conversion");
1672 if (DstEltTy->getTypeID() < SrcEltTy->getTypeID())
1673 Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
1674 else
1675 Res = Builder.CreateFPExt(Src, DstTy, "conv");
1676 }
1677
1678 return Res;
1679 }
1680
VisitMemberExpr(MemberExpr * E)1681 Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
1682 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) {
1683 CGF.EmitIgnoredExpr(E->getBase());
1684 return CGF.emitScalarConstant(Constant, E);
1685 } else {
1686 Expr::EvalResult Result;
1687 if (E->EvaluateAsInt(Result, CGF.getContext(), Expr::SE_AllowSideEffects)) {
1688 llvm::APSInt Value = Result.Val.getInt();
1689 CGF.EmitIgnoredExpr(E->getBase());
1690 return Builder.getInt(Value);
1691 }
1692 }
1693
1694 return EmitLoadOfLValue(E);
1695 }
1696
VisitArraySubscriptExpr(ArraySubscriptExpr * E)1697 Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
1698 TestAndClearIgnoreResultAssign();
1699
1700 // Emit subscript expressions in rvalue context's. For most cases, this just
1701 // loads the lvalue formed by the subscript expr. However, we have to be
1702 // careful, because the base of a vector subscript is occasionally an rvalue,
1703 // so we can't get it as an lvalue.
1704 if (!E->getBase()->getType()->isVectorType())
1705 return EmitLoadOfLValue(E);
1706
1707 // Handle the vector case. The base must be a vector, the index must be an
1708 // integer value.
1709 Value *Base = Visit(E->getBase());
1710 Value *Idx = Visit(E->getIdx());
1711 QualType IdxTy = E->getIdx()->getType();
1712
1713 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
1714 CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true);
1715
1716 return Builder.CreateExtractElement(Base, Idx, "vecext");
1717 }
1718
VisitMatrixSubscriptExpr(MatrixSubscriptExpr * E)1719 Value *ScalarExprEmitter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) {
1720 TestAndClearIgnoreResultAssign();
1721
1722 // Handle the vector case. The base must be a vector, the index must be an
1723 // integer value.
1724 Value *RowIdx = Visit(E->getRowIdx());
1725 Value *ColumnIdx = Visit(E->getColumnIdx());
1726 Value *Matrix = Visit(E->getBase());
1727
1728 // TODO: Should we emit bounds checks with SanitizerKind::ArrayBounds?
1729 llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
1730 return MB.CreateExtractElement(
1731 Matrix, RowIdx, ColumnIdx,
1732 E->getBase()->getType()->getAs<ConstantMatrixType>()->getNumRows());
1733 }
1734
getMaskElt(llvm::ShuffleVectorInst * SVI,unsigned Idx,unsigned Off)1735 static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
1736 unsigned Off) {
1737 int MV = SVI->getMaskValue(Idx);
1738 if (MV == -1)
1739 return -1;
1740 return Off + MV;
1741 }
1742
getAsInt32(llvm::ConstantInt * C,llvm::Type * I32Ty)1743 static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) {
1744 assert(llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) &&
1745 "Index operand too large for shufflevector mask!");
1746 return C->getZExtValue();
1747 }
1748
VisitInitListExpr(InitListExpr * E)1749 Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
1750 bool Ignore = TestAndClearIgnoreResultAssign();
1751 (void)Ignore;
1752 assert (Ignore == false && "init list ignored");
1753 unsigned NumInitElements = E->getNumInits();
1754
1755 if (E->hadArrayRangeDesignator())
1756 CGF.ErrorUnsupported(E, "GNU array range designator extension");
1757
1758 llvm::VectorType *VType =
1759 dyn_cast<llvm::VectorType>(ConvertType(E->getType()));
1760
1761 if (!VType) {
1762 if (NumInitElements == 0) {
1763 // C++11 value-initialization for the scalar.
1764 return EmitNullValue(E->getType());
1765 }
1766 // We have a scalar in braces. Just use the first element.
1767 return Visit(E->getInit(0));
1768 }
1769
1770 unsigned ResElts = cast<llvm::FixedVectorType>(VType)->getNumElements();
1771
1772 // Loop over initializers collecting the Value for each, and remembering
1773 // whether the source was swizzle (ExtVectorElementExpr). This will allow
1774 // us to fold the shuffle for the swizzle into the shuffle for the vector
1775 // initializer, since LLVM optimizers generally do not want to touch
1776 // shuffles.
1777 unsigned CurIdx = 0;
1778 bool VIsUndefShuffle = false;
1779 llvm::Value *V = llvm::UndefValue::get(VType);
1780 for (unsigned i = 0; i != NumInitElements; ++i) {
1781 Expr *IE = E->getInit(i);
1782 Value *Init = Visit(IE);
1783 SmallVector<int, 16> Args;
1784
1785 llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType());
1786
1787 // Handle scalar elements. If the scalar initializer is actually one
1788 // element of a different vector of the same width, use shuffle instead of
1789 // extract+insert.
1790 if (!VVT) {
1791 if (isa<ExtVectorElementExpr>(IE)) {
1792 llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init);
1793
1794 if (cast<llvm::FixedVectorType>(EI->getVectorOperandType())
1795 ->getNumElements() == ResElts) {
1796 llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand());
1797 Value *LHS = nullptr, *RHS = nullptr;
1798 if (CurIdx == 0) {
1799 // insert into undef -> shuffle (src, undef)
1800 // shufflemask must use an i32
1801 Args.push_back(getAsInt32(C, CGF.Int32Ty));
1802 Args.resize(ResElts, -1);
1803
1804 LHS = EI->getVectorOperand();
1805 RHS = V;
1806 VIsUndefShuffle = true;
1807 } else if (VIsUndefShuffle) {
1808 // insert into undefshuffle && size match -> shuffle (v, src)
1809 llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V);
1810 for (unsigned j = 0; j != CurIdx; ++j)
1811 Args.push_back(getMaskElt(SVV, j, 0));
1812 Args.push_back(ResElts + C->getZExtValue());
1813 Args.resize(ResElts, -1);
1814
1815 LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
1816 RHS = EI->getVectorOperand();
1817 VIsUndefShuffle = false;
1818 }
1819 if (!Args.empty()) {
1820 V = Builder.CreateShuffleVector(LHS, RHS, Args);
1821 ++CurIdx;
1822 continue;
1823 }
1824 }
1825 }
1826 V = Builder.CreateInsertElement(V, Init, Builder.getInt32(CurIdx),
1827 "vecinit");
1828 VIsUndefShuffle = false;
1829 ++CurIdx;
1830 continue;
1831 }
1832
1833 unsigned InitElts = cast<llvm::FixedVectorType>(VVT)->getNumElements();
1834
1835 // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
1836 // input is the same width as the vector being constructed, generate an
1837 // optimized shuffle of the swizzle input into the result.
1838 unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
1839 if (isa<ExtVectorElementExpr>(IE)) {
1840 llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init);
1841 Value *SVOp = SVI->getOperand(0);
1842 auto *OpTy = cast<llvm::FixedVectorType>(SVOp->getType());
1843
1844 if (OpTy->getNumElements() == ResElts) {
1845 for (unsigned j = 0; j != CurIdx; ++j) {
1846 // If the current vector initializer is a shuffle with undef, merge
1847 // this shuffle directly into it.
1848 if (VIsUndefShuffle) {
1849 Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0));
1850 } else {
1851 Args.push_back(j);
1852 }
1853 }
1854 for (unsigned j = 0, je = InitElts; j != je; ++j)
1855 Args.push_back(getMaskElt(SVI, j, Offset));
1856 Args.resize(ResElts, -1);
1857
1858 if (VIsUndefShuffle)
1859 V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
1860
1861 Init = SVOp;
1862 }
1863 }
1864
1865 // Extend init to result vector length, and then shuffle its contribution
1866 // to the vector initializer into V.
1867 if (Args.empty()) {
1868 for (unsigned j = 0; j != InitElts; ++j)
1869 Args.push_back(j);
1870 Args.resize(ResElts, -1);
1871 Init = Builder.CreateShuffleVector(Init, Args, "vext");
1872
1873 Args.clear();
1874 for (unsigned j = 0; j != CurIdx; ++j)
1875 Args.push_back(j);
1876 for (unsigned j = 0; j != InitElts; ++j)
1877 Args.push_back(j + Offset);
1878 Args.resize(ResElts, -1);
1879 }
1880
1881 // If V is undef, make sure it ends up on the RHS of the shuffle to aid
1882 // merging subsequent shuffles into this one.
1883 if (CurIdx == 0)
1884 std::swap(V, Init);
1885 V = Builder.CreateShuffleVector(V, Init, Args, "vecinit");
1886 VIsUndefShuffle = isa<llvm::UndefValue>(Init);
1887 CurIdx += InitElts;
1888 }
1889
1890 // FIXME: evaluate codegen vs. shuffling against constant null vector.
1891 // Emit remaining default initializers.
1892 llvm::Type *EltTy = VType->getElementType();
1893
1894 // Emit remaining default initializers
1895 for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
1896 Value *Idx = Builder.getInt32(CurIdx);
1897 llvm::Value *Init = llvm::Constant::getNullValue(EltTy);
1898 V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
1899 }
1900 return V;
1901 }
1902
ShouldNullCheckClassCastValue(const CastExpr * CE)1903 bool CodeGenFunction::ShouldNullCheckClassCastValue(const CastExpr *CE) {
1904 const Expr *E = CE->getSubExpr();
1905
1906 if (CE->getCastKind() == CK_UncheckedDerivedToBase)
1907 return false;
1908
1909 if (isa<CXXThisExpr>(E->IgnoreParens())) {
1910 // We always assume that 'this' is never null.
1911 return false;
1912 }
1913
1914 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
1915 // And that glvalue casts are never null.
1916 if (ICE->getValueKind() != VK_RValue)
1917 return false;
1918 }
1919
1920 return true;
1921 }
1922
1923 // VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
1924 // have to handle a more broad range of conversions than explicit casts, as they
1925 // handle things like function to ptr-to-function decay etc.
VisitCastExpr(CastExpr * CE)1926 Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
1927 Expr *E = CE->getSubExpr();
1928 QualType DestTy = CE->getType();
1929 CastKind Kind = CE->getCastKind();
1930
1931 // These cases are generally not written to ignore the result of
1932 // evaluating their sub-expressions, so we clear this now.
1933 bool Ignored = TestAndClearIgnoreResultAssign();
1934
1935 // Since almost all cast kinds apply to scalars, this switch doesn't have
1936 // a default case, so the compiler will warn on a missing case. The cases
1937 // are in the same order as in the CastKind enum.
1938 switch (Kind) {
1939 case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
1940 case CK_BuiltinFnToFnPtr:
1941 llvm_unreachable("builtin functions are handled elsewhere");
1942
1943 case CK_LValueBitCast:
1944 case CK_ObjCObjectLValueCast: {
1945 Address Addr = EmitLValue(E).getAddress(CGF);
1946 Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy));
1947 LValue LV = CGF.MakeAddrLValue(Addr, DestTy);
1948 return EmitLoadOfLValue(LV, CE->getExprLoc());
1949 }
1950
1951 case CK_LValueToRValueBitCast: {
1952 LValue SourceLVal = CGF.EmitLValue(E);
1953 Address Addr = Builder.CreateElementBitCast(SourceLVal.getAddress(CGF),
1954 CGF.ConvertTypeForMem(DestTy));
1955 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
1956 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
1957 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
1958 }
1959
1960 case CK_CPointerToObjCPointerCast:
1961 case CK_BlockPointerToObjCPointerCast:
1962 case CK_AnyPointerToBlockPointerCast:
1963 case CK_BitCast: {
1964 Value *Src = Visit(const_cast<Expr*>(E));
1965 llvm::Type *SrcTy = Src->getType();
1966 llvm::Type *DstTy = ConvertType(DestTy);
1967 if (SrcTy->isPtrOrPtrVectorTy() && DstTy->isPtrOrPtrVectorTy() &&
1968 SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) {
1969 llvm_unreachable("wrong cast for pointers in different address spaces"
1970 "(must be an address space cast)!");
1971 }
1972
1973 if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
1974 if (auto PT = DestTy->getAs<PointerType>())
1975 CGF.EmitVTablePtrCheckForCast(PT->getPointeeType(), Src,
1976 /*MayBeNull=*/true,
1977 CodeGenFunction::CFITCK_UnrelatedCast,
1978 CE->getBeginLoc());
1979 }
1980
1981 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
1982 const QualType SrcType = E->getType();
1983
1984 if (SrcType.mayBeNotDynamicClass() && DestTy.mayBeDynamicClass()) {
1985 // Casting to pointer that could carry dynamic information (provided by
1986 // invariant.group) requires launder.
1987 Src = Builder.CreateLaunderInvariantGroup(Src);
1988 } else if (SrcType.mayBeDynamicClass() && DestTy.mayBeNotDynamicClass()) {
1989 // Casting to pointer that does not carry dynamic information (provided
1990 // by invariant.group) requires stripping it. Note that we don't do it
1991 // if the source could not be dynamic type and destination could be
1992 // dynamic because dynamic information is already laundered. It is
1993 // because launder(strip(src)) == launder(src), so there is no need to
1994 // add extra strip before launder.
1995 Src = Builder.CreateStripInvariantGroup(Src);
1996 }
1997 }
1998
1999 // Update heapallocsite metadata when there is an explicit pointer cast.
2000 if (auto *CI = dyn_cast<llvm::CallBase>(Src)) {
2001 if (CI->getMetadata("heapallocsite") && isa<ExplicitCastExpr>(CE)) {
2002 QualType PointeeType = DestTy->getPointeeType();
2003 if (!PointeeType.isNull())
2004 CGF.getDebugInfo()->addHeapAllocSiteMetadata(CI, PointeeType,
2005 CE->getExprLoc());
2006 }
2007 }
2008
2009 // If Src is a fixed vector and Dst is a scalable vector, and both have the
2010 // same element type, use the llvm.experimental.vector.insert intrinsic to
2011 // perform the bitcast.
2012 if (const auto *FixedSrc = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
2013 if (const auto *ScalableDst = dyn_cast<llvm::ScalableVectorType>(DstTy)) {
2014 if (FixedSrc->getElementType() == ScalableDst->getElementType()) {
2015 llvm::Value *UndefVec = llvm::UndefValue::get(DstTy);
2016 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
2017 return Builder.CreateInsertVector(DstTy, UndefVec, Src, Zero,
2018 "castScalableSve");
2019 }
2020 }
2021 }
2022
2023 // If Src is a scalable vector and Dst is a fixed vector, and both have the
2024 // same element type, use the llvm.experimental.vector.extract intrinsic to
2025 // perform the bitcast.
2026 if (const auto *ScalableSrc = dyn_cast<llvm::ScalableVectorType>(SrcTy)) {
2027 if (const auto *FixedDst = dyn_cast<llvm::FixedVectorType>(DstTy)) {
2028 if (ScalableSrc->getElementType() == FixedDst->getElementType()) {
2029 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
2030 return Builder.CreateExtractVector(DstTy, Src, Zero, "castFixedSve");
2031 }
2032 }
2033 }
2034
2035 // Perform VLAT <-> VLST bitcast through memory.
2036 // TODO: since the llvm.experimental.vector.{insert,extract} intrinsics
2037 // require the element types of the vectors to be the same, we
2038 // need to keep this around for casting between predicates, or more
2039 // generally for bitcasts between VLAT <-> VLST where the element
2040 // types of the vectors are not the same, until we figure out a better
2041 // way of doing these casts.
2042 if ((isa<llvm::FixedVectorType>(SrcTy) &&
2043 isa<llvm::ScalableVectorType>(DstTy)) ||
2044 (isa<llvm::ScalableVectorType>(SrcTy) &&
2045 isa<llvm::FixedVectorType>(DstTy))) {
2046 if (const CallExpr *CE = dyn_cast<CallExpr>(E)) {
2047 // Call expressions can't have a scalar return unless the return type
2048 // is a reference type so an lvalue can't be emitted. Create a temp
2049 // alloca to store the call, bitcast the address then load.
2050 QualType RetTy = CE->getCallReturnType(CGF.getContext());
2051 Address Addr =
2052 CGF.CreateDefaultAlignTempAlloca(SrcTy, "saved-call-rvalue");
2053 LValue LV = CGF.MakeAddrLValue(Addr, RetTy);
2054 CGF.EmitStoreOfScalar(Src, LV);
2055 Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy),
2056 "castFixedSve");
2057 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2058 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2059 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2060 }
2061
2062 Address Addr = EmitLValue(E).getAddress(CGF);
2063 Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy));
2064 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2065 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2066 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2067 }
2068
2069 return Builder.CreateBitCast(Src, DstTy);
2070 }
2071 case CK_AddressSpaceConversion: {
2072 Expr::EvalResult Result;
2073 if (E->EvaluateAsRValue(Result, CGF.getContext()) &&
2074 Result.Val.isNullPointer()) {
2075 // If E has side effect, it is emitted even if its final result is a
2076 // null pointer. In that case, a DCE pass should be able to
2077 // eliminate the useless instructions emitted during translating E.
2078 if (Result.HasSideEffects)
2079 Visit(E);
2080 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(
2081 ConvertType(DestTy)), DestTy);
2082 }
2083 // Since target may map different address spaces in AST to the same address
2084 // space, an address space conversion may end up as a bitcast.
2085 return CGF.CGM.getTargetCodeGenInfo().performAddrSpaceCast(
2086 CGF, Visit(E), E->getType()->getPointeeType().getAddressSpace(),
2087 DestTy->getPointeeType().getAddressSpace(), ConvertType(DestTy));
2088 }
2089 case CK_AtomicToNonAtomic:
2090 case CK_NonAtomicToAtomic:
2091 case CK_NoOp:
2092 case CK_UserDefinedConversion:
2093 return Visit(const_cast<Expr*>(E));
2094
2095 case CK_BaseToDerived: {
2096 const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl();
2097 assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!");
2098
2099 Address Base = CGF.EmitPointerWithAlignment(E);
2100 Address Derived =
2101 CGF.GetAddressOfDerivedClass(Base, DerivedClassDecl,
2102 CE->path_begin(), CE->path_end(),
2103 CGF.ShouldNullCheckClassCastValue(CE));
2104
2105 // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
2106 // performed and the object is not of the derived type.
2107 if (CGF.sanitizePerformTypeCheck())
2108 CGF.EmitTypeCheck(CodeGenFunction::TCK_DowncastPointer, CE->getExprLoc(),
2109 Derived.getPointer(), DestTy->getPointeeType());
2110
2111 if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast))
2112 CGF.EmitVTablePtrCheckForCast(
2113 DestTy->getPointeeType(), Derived.getPointer(),
2114 /*MayBeNull=*/true, CodeGenFunction::CFITCK_DerivedCast,
2115 CE->getBeginLoc());
2116
2117 return Derived.getPointer();
2118 }
2119 case CK_UncheckedDerivedToBase:
2120 case CK_DerivedToBase: {
2121 // The EmitPointerWithAlignment path does this fine; just discard
2122 // the alignment.
2123 return CGF.EmitPointerWithAlignment(CE).getPointer();
2124 }
2125
2126 case CK_Dynamic: {
2127 Address V = CGF.EmitPointerWithAlignment(E);
2128 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE);
2129 return CGF.EmitDynamicCast(V, DCE);
2130 }
2131
2132 case CK_ArrayToPointerDecay:
2133 return CGF.EmitArrayToPointerDecay(E).getPointer();
2134 case CK_FunctionToPointerDecay:
2135 return EmitLValue(E).getPointer(CGF);
2136
2137 case CK_NullToPointer:
2138 if (MustVisitNullValue(E))
2139 CGF.EmitIgnoredExpr(E);
2140
2141 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(ConvertType(DestTy)),
2142 DestTy);
2143
2144 case CK_NullToMemberPointer: {
2145 if (MustVisitNullValue(E))
2146 CGF.EmitIgnoredExpr(E);
2147
2148 const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();
2149 return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
2150 }
2151
2152 case CK_ReinterpretMemberPointer:
2153 case CK_BaseToDerivedMemberPointer:
2154 case CK_DerivedToBaseMemberPointer: {
2155 Value *Src = Visit(E);
2156
2157 // Note that the AST doesn't distinguish between checked and
2158 // unchecked member pointer conversions, so we always have to
2159 // implement checked conversions here. This is inefficient when
2160 // actual control flow may be required in order to perform the
2161 // check, which it is for data member pointers (but not member
2162 // function pointers on Itanium and ARM).
2163 return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src);
2164 }
2165
2166 case CK_ARCProduceObject:
2167 return CGF.EmitARCRetainScalarExpr(E);
2168 case CK_ARCConsumeObject:
2169 return CGF.EmitObjCConsumeObject(E->getType(), Visit(E));
2170 case CK_ARCReclaimReturnedObject:
2171 return CGF.EmitARCReclaimReturnedObject(E, /*allowUnsafe*/ Ignored);
2172 case CK_ARCExtendBlockObject:
2173 return CGF.EmitARCExtendBlockObject(E);
2174
2175 case CK_CopyAndAutoreleaseBlockObject:
2176 return CGF.EmitBlockCopyAndAutorelease(Visit(E), E->getType());
2177
2178 case CK_FloatingRealToComplex:
2179 case CK_FloatingComplexCast:
2180 case CK_IntegralRealToComplex:
2181 case CK_IntegralComplexCast:
2182 case CK_IntegralComplexToFloatingComplex:
2183 case CK_FloatingComplexToIntegralComplex:
2184 case CK_ConstructorConversion:
2185 case CK_ToUnion:
2186 llvm_unreachable("scalar cast to non-scalar value");
2187
2188 case CK_LValueToRValue:
2189 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));
2190 assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!");
2191 return Visit(const_cast<Expr*>(E));
2192
2193 case CK_IntegralToPointer: {
2194 Value *Src = Visit(const_cast<Expr*>(E));
2195
2196 // First, convert to the correct width so that we control the kind of
2197 // extension.
2198 auto DestLLVMTy = ConvertType(DestTy);
2199 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestLLVMTy);
2200 bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType();
2201 llvm::Value* IntResult =
2202 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
2203
2204 auto *IntToPtr = Builder.CreateIntToPtr(IntResult, DestLLVMTy);
2205
2206 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2207 // Going from integer to pointer that could be dynamic requires reloading
2208 // dynamic information from invariant.group.
2209 if (DestTy.mayBeDynamicClass())
2210 IntToPtr = Builder.CreateLaunderInvariantGroup(IntToPtr);
2211 }
2212 return IntToPtr;
2213 }
2214 case CK_PointerToIntegral: {
2215 assert(!DestTy->isBooleanType() && "bool should use PointerToBool");
2216 auto *PtrExpr = Visit(E);
2217
2218 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2219 const QualType SrcType = E->getType();
2220
2221 // Casting to integer requires stripping dynamic information as it does
2222 // not carries it.
2223 if (SrcType.mayBeDynamicClass())
2224 PtrExpr = Builder.CreateStripInvariantGroup(PtrExpr);
2225 }
2226
2227 return Builder.CreatePtrToInt(PtrExpr, ConvertType(DestTy));
2228 }
2229 case CK_ToVoid: {
2230 CGF.EmitIgnoredExpr(E);
2231 return nullptr;
2232 }
2233 case CK_VectorSplat: {
2234 llvm::Type *DstTy = ConvertType(DestTy);
2235 Value *Elt = Visit(const_cast<Expr*>(E));
2236 // Splat the element across to all elements
2237 unsigned NumElements = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
2238 return Builder.CreateVectorSplat(NumElements, Elt, "splat");
2239 }
2240
2241 case CK_FixedPointCast:
2242 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2243 CE->getExprLoc());
2244
2245 case CK_FixedPointToBoolean:
2246 assert(E->getType()->isFixedPointType() &&
2247 "Expected src type to be fixed point type");
2248 assert(DestTy->isBooleanType() && "Expected dest type to be boolean type");
2249 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2250 CE->getExprLoc());
2251
2252 case CK_FixedPointToIntegral:
2253 assert(E->getType()->isFixedPointType() &&
2254 "Expected src type to be fixed point type");
2255 assert(DestTy->isIntegerType() && "Expected dest type to be an integer");
2256 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2257 CE->getExprLoc());
2258
2259 case CK_IntegralToFixedPoint:
2260 assert(E->getType()->isIntegerType() &&
2261 "Expected src type to be an integer");
2262 assert(DestTy->isFixedPointType() &&
2263 "Expected dest type to be fixed point type");
2264 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2265 CE->getExprLoc());
2266
2267 case CK_IntegralCast: {
2268 ScalarConversionOpts Opts;
2269 if (auto *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
2270 if (!ICE->isPartOfExplicitCast())
2271 Opts = ScalarConversionOpts(CGF.SanOpts);
2272 }
2273 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2274 CE->getExprLoc(), Opts);
2275 }
2276 case CK_IntegralToFloating:
2277 case CK_FloatingToIntegral:
2278 case CK_FloatingCast:
2279 case CK_FixedPointToFloating:
2280 case CK_FloatingToFixedPoint: {
2281 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2282 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2283 CE->getExprLoc());
2284 }
2285 case CK_BooleanToSignedIntegral: {
2286 ScalarConversionOpts Opts;
2287 Opts.TreatBooleanAsSigned = true;
2288 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2289 CE->getExprLoc(), Opts);
2290 }
2291 case CK_IntegralToBoolean:
2292 return EmitIntToBoolConversion(Visit(E));
2293 case CK_PointerToBoolean:
2294 return EmitPointerToBoolConversion(Visit(E), E->getType());
2295 case CK_FloatingToBoolean: {
2296 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2297 return EmitFloatToBoolConversion(Visit(E));
2298 }
2299 case CK_MemberPointerToBoolean: {
2300 llvm::Value *MemPtr = Visit(E);
2301 const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
2302 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
2303 }
2304
2305 case CK_FloatingComplexToReal:
2306 case CK_IntegralComplexToReal:
2307 return CGF.EmitComplexExpr(E, false, true).first;
2308
2309 case CK_FloatingComplexToBoolean:
2310 case CK_IntegralComplexToBoolean: {
2311 CodeGenFunction::ComplexPairTy V = CGF.EmitComplexExpr(E);
2312
2313 // TODO: kill this function off, inline appropriate case here
2314 return EmitComplexToScalarConversion(V, E->getType(), DestTy,
2315 CE->getExprLoc());
2316 }
2317
2318 case CK_ZeroToOCLOpaqueType: {
2319 assert((DestTy->isEventT() || DestTy->isQueueT() ||
2320 DestTy->isOCLIntelSubgroupAVCType()) &&
2321 "CK_ZeroToOCLEvent cast on non-event type");
2322 return llvm::Constant::getNullValue(ConvertType(DestTy));
2323 }
2324
2325 case CK_IntToOCLSampler:
2326 return CGF.CGM.createOpenCLIntToSamplerConversion(E, CGF);
2327
2328 } // end of switch
2329
2330 llvm_unreachable("unknown scalar cast");
2331 }
2332
VisitStmtExpr(const StmtExpr * E)2333 Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
2334 CodeGenFunction::StmtExprEvaluation eval(CGF);
2335 Address RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(),
2336 !E->getType()->isVoidType());
2337 if (!RetAlloca.isValid())
2338 return nullptr;
2339 return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()),
2340 E->getExprLoc());
2341 }
2342
VisitExprWithCleanups(ExprWithCleanups * E)2343 Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
2344 CodeGenFunction::RunCleanupsScope Scope(CGF);
2345 Value *V = Visit(E->getSubExpr());
2346 // Defend against dominance problems caused by jumps out of expression
2347 // evaluation through the shared cleanup block.
2348 Scope.ForceCleanup({&V});
2349 return V;
2350 }
2351
2352 //===----------------------------------------------------------------------===//
2353 // Unary Operators
2354 //===----------------------------------------------------------------------===//
2355
createBinOpInfoFromIncDec(const UnaryOperator * E,llvm::Value * InVal,bool IsInc,FPOptions FPFeatures)2356 static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E,
2357 llvm::Value *InVal, bool IsInc,
2358 FPOptions FPFeatures) {
2359 BinOpInfo BinOp;
2360 BinOp.LHS = InVal;
2361 BinOp.RHS = llvm::ConstantInt::get(InVal->getType(), 1, false);
2362 BinOp.Ty = E->getType();
2363 BinOp.Opcode = IsInc ? BO_Add : BO_Sub;
2364 BinOp.FPFeatures = FPFeatures;
2365 BinOp.E = E;
2366 return BinOp;
2367 }
2368
EmitIncDecConsiderOverflowBehavior(const UnaryOperator * E,llvm::Value * InVal,bool IsInc)2369 llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
2370 const UnaryOperator *E, llvm::Value *InVal, bool IsInc) {
2371 llvm::Value *Amount =
2372 llvm::ConstantInt::get(InVal->getType(), IsInc ? 1 : -1, true);
2373 StringRef Name = IsInc ? "inc" : "dec";
2374 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
2375 case LangOptions::SOB_Defined:
2376 return Builder.CreateAdd(InVal, Amount, Name);
2377 case LangOptions::SOB_Undefined:
2378 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
2379 return Builder.CreateNSWAdd(InVal, Amount, Name);
2380 LLVM_FALLTHROUGH;
2381 case LangOptions::SOB_Trapping:
2382 if (!E->canOverflow())
2383 return Builder.CreateNSWAdd(InVal, Amount, Name);
2384 return EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(
2385 E, InVal, IsInc, E->getFPFeaturesInEffect(CGF.getLangOpts())));
2386 }
2387 llvm_unreachable("Unknown SignedOverflowBehaviorTy");
2388 }
2389
2390 namespace {
2391 /// Handles check and update for lastprivate conditional variables.
2392 class OMPLastprivateConditionalUpdateRAII {
2393 private:
2394 CodeGenFunction &CGF;
2395 const UnaryOperator *E;
2396
2397 public:
OMPLastprivateConditionalUpdateRAII(CodeGenFunction & CGF,const UnaryOperator * E)2398 OMPLastprivateConditionalUpdateRAII(CodeGenFunction &CGF,
2399 const UnaryOperator *E)
2400 : CGF(CGF), E(E) {}
~OMPLastprivateConditionalUpdateRAII()2401 ~OMPLastprivateConditionalUpdateRAII() {
2402 if (CGF.getLangOpts().OpenMP)
2403 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(
2404 CGF, E->getSubExpr());
2405 }
2406 };
2407 } // namespace
2408
2409 llvm::Value *
EmitScalarPrePostIncDec(const UnaryOperator * E,LValue LV,bool isInc,bool isPre)2410 ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
2411 bool isInc, bool isPre) {
2412 OMPLastprivateConditionalUpdateRAII OMPRegion(CGF, E);
2413 QualType type = E->getSubExpr()->getType();
2414 llvm::PHINode *atomicPHI = nullptr;
2415 llvm::Value *value;
2416 llvm::Value *input;
2417
2418 int amount = (isInc ? 1 : -1);
2419 bool isSubtraction = !isInc;
2420
2421 if (const AtomicType *atomicTy = type->getAs<AtomicType>()) {
2422 type = atomicTy->getValueType();
2423 if (isInc && type->isBooleanType()) {
2424 llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type);
2425 if (isPre) {
2426 Builder.CreateStore(True, LV.getAddress(CGF), LV.isVolatileQualified())
2427 ->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent);
2428 return Builder.getTrue();
2429 }
2430 // For atomic bool increment, we just store true and return it for
2431 // preincrement, do an atomic swap with true for postincrement
2432 return Builder.CreateAtomicRMW(
2433 llvm::AtomicRMWInst::Xchg, LV.getPointer(CGF), True,
2434 llvm::AtomicOrdering::SequentiallyConsistent);
2435 }
2436 // Special case for atomic increment / decrement on integers, emit
2437 // atomicrmw instructions. We skip this if we want to be doing overflow
2438 // checking, and fall into the slow path with the atomic cmpxchg loop.
2439 if (!type->isBooleanType() && type->isIntegerType() &&
2440 !(type->isUnsignedIntegerType() &&
2441 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
2442 CGF.getLangOpts().getSignedOverflowBehavior() !=
2443 LangOptions::SOB_Trapping) {
2444 llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add :
2445 llvm::AtomicRMWInst::Sub;
2446 llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add :
2447 llvm::Instruction::Sub;
2448 llvm::Value *amt = CGF.EmitToMemory(
2449 llvm::ConstantInt::get(ConvertType(type), 1, true), type);
2450 llvm::Value *old =
2451 Builder.CreateAtomicRMW(aop, LV.getPointer(CGF), amt,
2452 llvm::AtomicOrdering::SequentiallyConsistent);
2453 return isPre ? Builder.CreateBinOp(op, old, amt) : old;
2454 }
2455 value = EmitLoadOfLValue(LV, E->getExprLoc());
2456 input = value;
2457 // For every other atomic operation, we need to emit a load-op-cmpxchg loop
2458 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
2459 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
2460 value = CGF.EmitToMemory(value, type);
2461 Builder.CreateBr(opBB);
2462 Builder.SetInsertPoint(opBB);
2463 atomicPHI = Builder.CreatePHI(value->getType(), 2);
2464 atomicPHI->addIncoming(value, startBB);
2465 value = atomicPHI;
2466 } else {
2467 value = EmitLoadOfLValue(LV, E->getExprLoc());
2468 input = value;
2469 }
2470
2471 // Special case of integer increment that we have to check first: bool++.
2472 // Due to promotion rules, we get:
2473 // bool++ -> bool = bool + 1
2474 // -> bool = (int)bool + 1
2475 // -> bool = ((int)bool + 1 != 0)
2476 // An interesting aspect of this is that increment is always true.
2477 // Decrement does not have this property.
2478 if (isInc && type->isBooleanType()) {
2479 value = Builder.getTrue();
2480
2481 // Most common case by far: integer increment.
2482 } else if (type->isIntegerType()) {
2483 QualType promotedType;
2484 bool canPerformLossyDemotionCheck = false;
2485 if (type->isPromotableIntegerType()) {
2486 promotedType = CGF.getContext().getPromotedIntegerType(type);
2487 assert(promotedType != type && "Shouldn't promote to the same type.");
2488 canPerformLossyDemotionCheck = true;
2489 canPerformLossyDemotionCheck &=
2490 CGF.getContext().getCanonicalType(type) !=
2491 CGF.getContext().getCanonicalType(promotedType);
2492 canPerformLossyDemotionCheck &=
2493 PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(
2494 type, promotedType);
2495 assert((!canPerformLossyDemotionCheck ||
2496 type->isSignedIntegerOrEnumerationType() ||
2497 promotedType->isSignedIntegerOrEnumerationType() ||
2498 ConvertType(type)->getScalarSizeInBits() ==
2499 ConvertType(promotedType)->getScalarSizeInBits()) &&
2500 "The following check expects that if we do promotion to different "
2501 "underlying canonical type, at least one of the types (either "
2502 "base or promoted) will be signed, or the bitwidths will match.");
2503 }
2504 if (CGF.SanOpts.hasOneOf(
2505 SanitizerKind::ImplicitIntegerArithmeticValueChange) &&
2506 canPerformLossyDemotionCheck) {
2507 // While `x += 1` (for `x` with width less than int) is modeled as
2508 // promotion+arithmetics+demotion, and we can catch lossy demotion with
2509 // ease; inc/dec with width less than int can't overflow because of
2510 // promotion rules, so we omit promotion+demotion, which means that we can
2511 // not catch lossy "demotion". Because we still want to catch these cases
2512 // when the sanitizer is enabled, we perform the promotion, then perform
2513 // the increment/decrement in the wider type, and finally
2514 // perform the demotion. This will catch lossy demotions.
2515
2516 value = EmitScalarConversion(value, type, promotedType, E->getExprLoc());
2517 Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
2518 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2519 // Do pass non-default ScalarConversionOpts so that sanitizer check is
2520 // emitted.
2521 value = EmitScalarConversion(value, promotedType, type, E->getExprLoc(),
2522 ScalarConversionOpts(CGF.SanOpts));
2523
2524 // Note that signed integer inc/dec with width less than int can't
2525 // overflow because of promotion rules; we're just eliding a few steps
2526 // here.
2527 } else if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
2528 value = EmitIncDecConsiderOverflowBehavior(E, value, isInc);
2529 } else if (E->canOverflow() && type->isUnsignedIntegerType() &&
2530 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) {
2531 value = EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(
2532 E, value, isInc, E->getFPFeaturesInEffect(CGF.getLangOpts())));
2533 } else {
2534 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
2535 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2536 }
2537
2538 // Next most common: pointer increment.
2539 } else if (const PointerType *ptr = type->getAs<PointerType>()) {
2540 QualType type = ptr->getPointeeType();
2541
2542 // VLA types don't have constant size.
2543 if (const VariableArrayType *vla
2544 = CGF.getContext().getAsVariableArrayType(type)) {
2545 llvm::Value *numElts = CGF.getVLASize(vla).NumElts;
2546 if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
2547 if (CGF.getLangOpts().isSignedOverflowDefined())
2548 value = Builder.CreateGEP(value, numElts, "vla.inc");
2549 else
2550 value = CGF.EmitCheckedInBoundsGEP(
2551 value, numElts, /*SignedIndices=*/false, isSubtraction,
2552 E->getExprLoc(), "vla.inc");
2553
2554 // Arithmetic on function pointers (!) is just +-1.
2555 } else if (type->isFunctionType()) {
2556 llvm::Value *amt = Builder.getInt32(amount);
2557
2558 value = CGF.EmitCastToVoidPtr(value);
2559 if (CGF.getLangOpts().isSignedOverflowDefined())
2560 value = Builder.CreateGEP(value, amt, "incdec.funcptr");
2561 else
2562 value = CGF.EmitCheckedInBoundsGEP(value, amt, /*SignedIndices=*/false,
2563 isSubtraction, E->getExprLoc(),
2564 "incdec.funcptr");
2565 value = Builder.CreateBitCast(value, input->getType());
2566
2567 // For everything else, we can just do a simple increment.
2568 } else {
2569 llvm::Value *amt = Builder.getInt32(amount);
2570 if (CGF.getLangOpts().isSignedOverflowDefined())
2571 value = Builder.CreateGEP(value, amt, "incdec.ptr");
2572 else
2573 value = CGF.EmitCheckedInBoundsGEP(value, amt, /*SignedIndices=*/false,
2574 isSubtraction, E->getExprLoc(),
2575 "incdec.ptr");
2576 }
2577
2578 // Vector increment/decrement.
2579 } else if (type->isVectorType()) {
2580 if (type->hasIntegerRepresentation()) {
2581 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount);
2582
2583 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2584 } else {
2585 value = Builder.CreateFAdd(
2586 value,
2587 llvm::ConstantFP::get(value->getType(), amount),
2588 isInc ? "inc" : "dec");
2589 }
2590
2591 // Floating point.
2592 } else if (type->isRealFloatingType()) {
2593 // Add the inc/dec to the real part.
2594 llvm::Value *amt;
2595 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
2596
2597 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
2598 // Another special case: half FP increment should be done via float
2599 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
2600 value = Builder.CreateCall(
2601 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
2602 CGF.CGM.FloatTy),
2603 input, "incdec.conv");
2604 } else {
2605 value = Builder.CreateFPExt(input, CGF.CGM.FloatTy, "incdec.conv");
2606 }
2607 }
2608
2609 if (value->getType()->isFloatTy())
2610 amt = llvm::ConstantFP::get(VMContext,
2611 llvm::APFloat(static_cast<float>(amount)));
2612 else if (value->getType()->isDoubleTy())
2613 amt = llvm::ConstantFP::get(VMContext,
2614 llvm::APFloat(static_cast<double>(amount)));
2615 else {
2616 // Remaining types are Half, LongDouble or __float128. Convert from float.
2617 llvm::APFloat F(static_cast<float>(amount));
2618 bool ignored;
2619 const llvm::fltSemantics *FS;
2620 // Don't use getFloatTypeSemantics because Half isn't
2621 // necessarily represented using the "half" LLVM type.
2622 if (value->getType()->isFP128Ty())
2623 FS = &CGF.getTarget().getFloat128Format();
2624 else if (value->getType()->isHalfTy())
2625 FS = &CGF.getTarget().getHalfFormat();
2626 else
2627 FS = &CGF.getTarget().getLongDoubleFormat();
2628 F.convert(*FS, llvm::APFloat::rmTowardZero, &ignored);
2629 amt = llvm::ConstantFP::get(VMContext, F);
2630 }
2631 value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec");
2632
2633 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
2634 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
2635 value = Builder.CreateCall(
2636 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16,
2637 CGF.CGM.FloatTy),
2638 value, "incdec.conv");
2639 } else {
2640 value = Builder.CreateFPTrunc(value, input->getType(), "incdec.conv");
2641 }
2642 }
2643
2644 // Fixed-point types.
2645 } else if (type->isFixedPointType()) {
2646 // Fixed-point types are tricky. In some cases, it isn't possible to
2647 // represent a 1 or a -1 in the type at all. Piggyback off of
2648 // EmitFixedPointBinOp to avoid having to reimplement saturation.
2649 BinOpInfo Info;
2650 Info.E = E;
2651 Info.Ty = E->getType();
2652 Info.Opcode = isInc ? BO_Add : BO_Sub;
2653 Info.LHS = value;
2654 Info.RHS = llvm::ConstantInt::get(value->getType(), 1, false);
2655 // If the type is signed, it's better to represent this as +(-1) or -(-1),
2656 // since -1 is guaranteed to be representable.
2657 if (type->isSignedFixedPointType()) {
2658 Info.Opcode = isInc ? BO_Sub : BO_Add;
2659 Info.RHS = Builder.CreateNeg(Info.RHS);
2660 }
2661 // Now, convert from our invented integer literal to the type of the unary
2662 // op. This will upscale and saturate if necessary. This value can become
2663 // undef in some cases.
2664 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
2665 auto DstSema = CGF.getContext().getFixedPointSemantics(Info.Ty);
2666 Info.RHS = FPBuilder.CreateIntegerToFixed(Info.RHS, true, DstSema);
2667 value = EmitFixedPointBinOp(Info);
2668
2669 // Objective-C pointer types.
2670 } else {
2671 const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
2672 value = CGF.EmitCastToVoidPtr(value);
2673
2674 CharUnits size = CGF.getContext().getTypeSizeInChars(OPT->getObjectType());
2675 if (!isInc) size = -size;
2676 llvm::Value *sizeValue =
2677 llvm::ConstantInt::get(CGF.SizeTy, size.getQuantity());
2678
2679 if (CGF.getLangOpts().isSignedOverflowDefined())
2680 value = Builder.CreateGEP(value, sizeValue, "incdec.objptr");
2681 else
2682 value = CGF.EmitCheckedInBoundsGEP(value, sizeValue,
2683 /*SignedIndices=*/false, isSubtraction,
2684 E->getExprLoc(), "incdec.objptr");
2685 value = Builder.CreateBitCast(value, input->getType());
2686 }
2687
2688 if (atomicPHI) {
2689 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
2690 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
2691 auto Pair = CGF.EmitAtomicCompareExchange(
2692 LV, RValue::get(atomicPHI), RValue::get(value), E->getExprLoc());
2693 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), type);
2694 llvm::Value *success = Pair.second;
2695 atomicPHI->addIncoming(old, curBlock);
2696 Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
2697 Builder.SetInsertPoint(contBB);
2698 return isPre ? value : input;
2699 }
2700
2701 // Store the updated result through the lvalue.
2702 if (LV.isBitField())
2703 CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, &value);
2704 else
2705 CGF.EmitStoreThroughLValue(RValue::get(value), LV);
2706
2707 // If this is a postinc, return the value read from memory, otherwise use the
2708 // updated value.
2709 return isPre ? value : input;
2710 }
2711
2712
2713
VisitUnaryMinus(const UnaryOperator * E)2714 Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
2715 TestAndClearIgnoreResultAssign();
2716 Value *Op = Visit(E->getSubExpr());
2717
2718 // Generate a unary FNeg for FP ops.
2719 if (Op->getType()->isFPOrFPVectorTy())
2720 return Builder.CreateFNeg(Op, "fneg");
2721
2722 // Emit unary minus with EmitSub so we handle overflow cases etc.
2723 BinOpInfo BinOp;
2724 BinOp.RHS = Op;
2725 BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType());
2726 BinOp.Ty = E->getType();
2727 BinOp.Opcode = BO_Sub;
2728 BinOp.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
2729 BinOp.E = E;
2730 return EmitSub(BinOp);
2731 }
2732
VisitUnaryNot(const UnaryOperator * E)2733 Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
2734 TestAndClearIgnoreResultAssign();
2735 Value *Op = Visit(E->getSubExpr());
2736 return Builder.CreateNot(Op, "neg");
2737 }
2738
VisitUnaryLNot(const UnaryOperator * E)2739 Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
2740 // Perform vector logical not on comparison with zero vector.
2741 if (E->getType()->isVectorType() &&
2742 E->getType()->castAs<VectorType>()->getVectorKind() ==
2743 VectorType::GenericVector) {
2744 Value *Oper = Visit(E->getSubExpr());
2745 Value *Zero = llvm::Constant::getNullValue(Oper->getType());
2746 Value *Result;
2747 if (Oper->getType()->isFPOrFPVectorTy()) {
2748 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
2749 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
2750 Result = Builder.CreateFCmp(llvm::CmpInst::FCMP_OEQ, Oper, Zero, "cmp");
2751 } else
2752 Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp");
2753 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
2754 }
2755
2756 // Compare operand to zero.
2757 Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());
2758
2759 // Invert value.
2760 // TODO: Could dynamically modify easy computations here. For example, if
2761 // the operand is an icmp ne, turn into icmp eq.
2762 BoolVal = Builder.CreateNot(BoolVal, "lnot");
2763
2764 // ZExt result to the expr type.
2765 return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext");
2766 }
2767
VisitOffsetOfExpr(OffsetOfExpr * E)2768 Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
2769 // Try folding the offsetof to a constant.
2770 Expr::EvalResult EVResult;
2771 if (E->EvaluateAsInt(EVResult, CGF.getContext())) {
2772 llvm::APSInt Value = EVResult.Val.getInt();
2773 return Builder.getInt(Value);
2774 }
2775
2776 // Loop over the components of the offsetof to compute the value.
2777 unsigned n = E->getNumComponents();
2778 llvm::Type* ResultType = ConvertType(E->getType());
2779 llvm::Value* Result = llvm::Constant::getNullValue(ResultType);
2780 QualType CurrentType = E->getTypeSourceInfo()->getType();
2781 for (unsigned i = 0; i != n; ++i) {
2782 OffsetOfNode ON = E->getComponent(i);
2783 llvm::Value *Offset = nullptr;
2784 switch (ON.getKind()) {
2785 case OffsetOfNode::Array: {
2786 // Compute the index
2787 Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex());
2788 llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr);
2789 bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType();
2790 Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv");
2791
2792 // Save the element type
2793 CurrentType =
2794 CGF.getContext().getAsArrayType(CurrentType)->getElementType();
2795
2796 // Compute the element size
2797 llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType,
2798 CGF.getContext().getTypeSizeInChars(CurrentType).getQuantity());
2799
2800 // Multiply out to compute the result
2801 Offset = Builder.CreateMul(Idx, ElemSize);
2802 break;
2803 }
2804
2805 case OffsetOfNode::Field: {
2806 FieldDecl *MemberDecl = ON.getField();
2807 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
2808 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
2809
2810 // Compute the index of the field in its parent.
2811 unsigned i = 0;
2812 // FIXME: It would be nice if we didn't have to loop here!
2813 for (RecordDecl::field_iterator Field = RD->field_begin(),
2814 FieldEnd = RD->field_end();
2815 Field != FieldEnd; ++Field, ++i) {
2816 if (*Field == MemberDecl)
2817 break;
2818 }
2819 assert(i < RL.getFieldCount() && "offsetof field in wrong type");
2820
2821 // Compute the offset to the field
2822 int64_t OffsetInt = RL.getFieldOffset(i) /
2823 CGF.getContext().getCharWidth();
2824 Offset = llvm::ConstantInt::get(ResultType, OffsetInt);
2825
2826 // Save the element type.
2827 CurrentType = MemberDecl->getType();
2828 break;
2829 }
2830
2831 case OffsetOfNode::Identifier:
2832 llvm_unreachable("dependent __builtin_offsetof");
2833
2834 case OffsetOfNode::Base: {
2835 if (ON.getBase()->isVirtual()) {
2836 CGF.ErrorUnsupported(E, "virtual base in offsetof");
2837 continue;
2838 }
2839
2840 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
2841 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
2842
2843 // Save the element type.
2844 CurrentType = ON.getBase()->getType();
2845
2846 // Compute the offset to the base.
2847 const RecordType *BaseRT = CurrentType->getAs<RecordType>();
2848 CXXRecordDecl *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl());
2849 CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD);
2850 Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity());
2851 break;
2852 }
2853 }
2854 Result = Builder.CreateAdd(Result, Offset);
2855 }
2856 return Result;
2857 }
2858
2859 /// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of
2860 /// argument of the sizeof expression as an integer.
2861 Value *
VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr * E)2862 ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
2863 const UnaryExprOrTypeTraitExpr *E) {
2864 QualType TypeToSize = E->getTypeOfArgument();
2865 if (E->getKind() == UETT_SizeOf) {
2866 if (const VariableArrayType *VAT =
2867 CGF.getContext().getAsVariableArrayType(TypeToSize)) {
2868 if (E->isArgumentType()) {
2869 // sizeof(type) - make sure to emit the VLA size.
2870 CGF.EmitVariablyModifiedType(TypeToSize);
2871 } else {
2872 // C99 6.5.3.4p2: If the argument is an expression of type
2873 // VLA, it is evaluated.
2874 CGF.EmitIgnoredExpr(E->getArgumentExpr());
2875 }
2876
2877 auto VlaSize = CGF.getVLASize(VAT);
2878 llvm::Value *size = VlaSize.NumElts;
2879
2880 // Scale the number of non-VLA elements by the non-VLA element size.
2881 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(VlaSize.Type);
2882 if (!eltSize.isOne())
2883 size = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), size);
2884
2885 return size;
2886 }
2887 } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) {
2888 auto Alignment =
2889 CGF.getContext()
2890 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign(
2891 E->getTypeOfArgument()->getPointeeType()))
2892 .getQuantity();
2893 return llvm::ConstantInt::get(CGF.SizeTy, Alignment);
2894 }
2895
2896 // If this isn't sizeof(vla), the result must be constant; use the constant
2897 // folding logic so we don't have to duplicate it here.
2898 return Builder.getInt(E->EvaluateKnownConstInt(CGF.getContext()));
2899 }
2900
VisitUnaryReal(const UnaryOperator * E)2901 Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) {
2902 Expr *Op = E->getSubExpr();
2903 if (Op->getType()->isAnyComplexType()) {
2904 // If it's an l-value, load through the appropriate subobject l-value.
2905 // Note that we have to ask E because Op might be an l-value that
2906 // this won't work for, e.g. an Obj-C property.
2907 if (E->isGLValue())
2908 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E),
2909 E->getExprLoc()).getScalarVal();
2910
2911 // Otherwise, calculate and project.
2912 return CGF.EmitComplexExpr(Op, false, true).first;
2913 }
2914
2915 return Visit(Op);
2916 }
2917
VisitUnaryImag(const UnaryOperator * E)2918 Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) {
2919 Expr *Op = E->getSubExpr();
2920 if (Op->getType()->isAnyComplexType()) {
2921 // If it's an l-value, load through the appropriate subobject l-value.
2922 // Note that we have to ask E because Op might be an l-value that
2923 // this won't work for, e.g. an Obj-C property.
2924 if (Op->isGLValue())
2925 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E),
2926 E->getExprLoc()).getScalarVal();
2927
2928 // Otherwise, calculate and project.
2929 return CGF.EmitComplexExpr(Op, true, false).second;
2930 }
2931
2932 // __imag on a scalar returns zero. Emit the subexpr to ensure side
2933 // effects are evaluated, but not the actual value.
2934 if (Op->isGLValue())
2935 CGF.EmitLValue(Op);
2936 else
2937 CGF.EmitScalarExpr(Op, true);
2938 return llvm::Constant::getNullValue(ConvertType(E->getType()));
2939 }
2940
2941 //===----------------------------------------------------------------------===//
2942 // Binary Operators
2943 //===----------------------------------------------------------------------===//
2944
EmitBinOps(const BinaryOperator * E)2945 BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) {
2946 TestAndClearIgnoreResultAssign();
2947 BinOpInfo Result;
2948 Result.LHS = Visit(E->getLHS());
2949 Result.RHS = Visit(E->getRHS());
2950 Result.Ty = E->getType();
2951 Result.Opcode = E->getOpcode();
2952 Result.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
2953 Result.E = E;
2954 return Result;
2955 }
2956
EmitCompoundAssignLValue(const CompoundAssignOperator * E,Value * (ScalarExprEmitter::* Func)(const BinOpInfo &),Value * & Result)2957 LValue ScalarExprEmitter::EmitCompoundAssignLValue(
2958 const CompoundAssignOperator *E,
2959 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),
2960 Value *&Result) {
2961 QualType LHSTy = E->getLHS()->getType();
2962 BinOpInfo OpInfo;
2963
2964 if (E->getComputationResultType()->isAnyComplexType())
2965 return CGF.EmitScalarCompoundAssignWithComplex(E, Result);
2966
2967 // Emit the RHS first. __block variables need to have the rhs evaluated
2968 // first, plus this should improve codegen a little.
2969 OpInfo.RHS = Visit(E->getRHS());
2970 OpInfo.Ty = E->getComputationResultType();
2971 OpInfo.Opcode = E->getOpcode();
2972 OpInfo.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
2973 OpInfo.E = E;
2974 // Load/convert the LHS.
2975 LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
2976
2977 llvm::PHINode *atomicPHI = nullptr;
2978 if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) {
2979 QualType type = atomicTy->getValueType();
2980 if (!type->isBooleanType() && type->isIntegerType() &&
2981 !(type->isUnsignedIntegerType() &&
2982 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
2983 CGF.getLangOpts().getSignedOverflowBehavior() !=
2984 LangOptions::SOB_Trapping) {
2985 llvm::AtomicRMWInst::BinOp AtomicOp = llvm::AtomicRMWInst::BAD_BINOP;
2986 llvm::Instruction::BinaryOps Op;
2987 switch (OpInfo.Opcode) {
2988 // We don't have atomicrmw operands for *, %, /, <<, >>
2989 case BO_MulAssign: case BO_DivAssign:
2990 case BO_RemAssign:
2991 case BO_ShlAssign:
2992 case BO_ShrAssign:
2993 break;
2994 case BO_AddAssign:
2995 AtomicOp = llvm::AtomicRMWInst::Add;
2996 Op = llvm::Instruction::Add;
2997 break;
2998 case BO_SubAssign:
2999 AtomicOp = llvm::AtomicRMWInst::Sub;
3000 Op = llvm::Instruction::Sub;
3001 break;
3002 case BO_AndAssign:
3003 AtomicOp = llvm::AtomicRMWInst::And;
3004 Op = llvm::Instruction::And;
3005 break;
3006 case BO_XorAssign:
3007 AtomicOp = llvm::AtomicRMWInst::Xor;
3008 Op = llvm::Instruction::Xor;
3009 break;
3010 case BO_OrAssign:
3011 AtomicOp = llvm::AtomicRMWInst::Or;
3012 Op = llvm::Instruction::Or;
3013 break;
3014 default:
3015 llvm_unreachable("Invalid compound assignment type");
3016 }
3017 if (AtomicOp != llvm::AtomicRMWInst::BAD_BINOP) {
3018 llvm::Value *Amt = CGF.EmitToMemory(
3019 EmitScalarConversion(OpInfo.RHS, E->getRHS()->getType(), LHSTy,
3020 E->getExprLoc()),
3021 LHSTy);
3022 Value *OldVal = Builder.CreateAtomicRMW(
3023 AtomicOp, LHSLV.getPointer(CGF), Amt,
3024 llvm::AtomicOrdering::SequentiallyConsistent);
3025
3026 // Since operation is atomic, the result type is guaranteed to be the
3027 // same as the input in LLVM terms.
3028 Result = Builder.CreateBinOp(Op, OldVal, Amt);
3029 return LHSLV;
3030 }
3031 }
3032 // FIXME: For floating point types, we should be saving and restoring the
3033 // floating point environment in the loop.
3034 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
3035 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
3036 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
3037 OpInfo.LHS = CGF.EmitToMemory(OpInfo.LHS, type);
3038 Builder.CreateBr(opBB);
3039 Builder.SetInsertPoint(opBB);
3040 atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2);
3041 atomicPHI->addIncoming(OpInfo.LHS, startBB);
3042 OpInfo.LHS = atomicPHI;
3043 }
3044 else
3045 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
3046
3047 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, OpInfo.FPFeatures);
3048 SourceLocation Loc = E->getExprLoc();
3049 OpInfo.LHS =
3050 EmitScalarConversion(OpInfo.LHS, LHSTy, E->getComputationLHSType(), Loc);
3051
3052 // Expand the binary operator.
3053 Result = (this->*Func)(OpInfo);
3054
3055 // Convert the result back to the LHS type,
3056 // potentially with Implicit Conversion sanitizer check.
3057 Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy,
3058 Loc, ScalarConversionOpts(CGF.SanOpts));
3059
3060 if (atomicPHI) {
3061 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
3062 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
3063 auto Pair = CGF.EmitAtomicCompareExchange(
3064 LHSLV, RValue::get(atomicPHI), RValue::get(Result), E->getExprLoc());
3065 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), LHSTy);
3066 llvm::Value *success = Pair.second;
3067 atomicPHI->addIncoming(old, curBlock);
3068 Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
3069 Builder.SetInsertPoint(contBB);
3070 return LHSLV;
3071 }
3072
3073 // Store the result value into the LHS lvalue. Bit-fields are handled
3074 // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
3075 // 'An assignment expression has the value of the left operand after the
3076 // assignment...'.
3077 if (LHSLV.isBitField())
3078 CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, &Result);
3079 else
3080 CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV);
3081
3082 if (CGF.getLangOpts().OpenMP)
3083 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF,
3084 E->getLHS());
3085 return LHSLV;
3086 }
3087
EmitCompoundAssign(const CompoundAssignOperator * E,Value * (ScalarExprEmitter::* Func)(const BinOpInfo &))3088 Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
3089 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
3090 bool Ignore = TestAndClearIgnoreResultAssign();
3091 Value *RHS = nullptr;
3092 LValue LHS = EmitCompoundAssignLValue(E, Func, RHS);
3093
3094 // If the result is clearly ignored, return now.
3095 if (Ignore)
3096 return nullptr;
3097
3098 // The result of an assignment in C is the assigned r-value.
3099 if (!CGF.getLangOpts().CPlusPlus)
3100 return RHS;
3101
3102 // If the lvalue is non-volatile, return the computed value of the assignment.
3103 if (!LHS.isVolatileQualified())
3104 return RHS;
3105
3106 // Otherwise, reload the value.
3107 return EmitLoadOfLValue(LHS, E->getExprLoc());
3108 }
3109
EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo & Ops,llvm::Value * Zero,bool isDiv)3110 void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
3111 const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) {
3112 SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
3113
3114 if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) {
3115 Checks.push_back(std::make_pair(Builder.CreateICmpNE(Ops.RHS, Zero),
3116 SanitizerKind::IntegerDivideByZero));
3117 }
3118
3119 const auto *BO = cast<BinaryOperator>(Ops.E);
3120 if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) &&
3121 Ops.Ty->hasSignedIntegerRepresentation() &&
3122 !IsWidenedIntegerOp(CGF.getContext(), BO->getLHS()) &&
3123 Ops.mayHaveIntegerOverflow()) {
3124 llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
3125
3126 llvm::Value *IntMin =
3127 Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth()));
3128 llvm::Value *NegOne = llvm::ConstantInt::get(Ty, -1ULL);
3129
3130 llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin);
3131 llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne);
3132 llvm::Value *NotOverflow = Builder.CreateOr(LHSCmp, RHSCmp, "or");
3133 Checks.push_back(
3134 std::make_pair(NotOverflow, SanitizerKind::SignedIntegerOverflow));
3135 }
3136
3137 if (Checks.size() > 0)
3138 EmitBinOpCheck(Checks, Ops);
3139 }
3140
EmitDiv(const BinOpInfo & Ops)3141 Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
3142 {
3143 CodeGenFunction::SanitizerScope SanScope(&CGF);
3144 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
3145 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
3146 Ops.Ty->isIntegerType() &&
3147 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
3148 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
3149 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);
3150 } else if (CGF.SanOpts.has(SanitizerKind::FloatDivideByZero) &&
3151 Ops.Ty->isRealFloatingType() &&
3152 Ops.mayHaveFloatDivisionByZero()) {
3153 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
3154 llvm::Value *NonZero = Builder.CreateFCmpUNE(Ops.RHS, Zero);
3155 EmitBinOpCheck(std::make_pair(NonZero, SanitizerKind::FloatDivideByZero),
3156 Ops);
3157 }
3158 }
3159
3160 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
3161 llvm::Value *Val;
3162 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
3163 Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
3164 if (CGF.getLangOpts().OpenCL &&
3165 !CGF.CGM.getCodeGenOpts().CorrectlyRoundedDivSqrt) {
3166 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp
3167 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
3168 // build option allows an application to specify that single precision
3169 // floating-point divide (x/y and 1/x) and sqrt used in the program
3170 // source are correctly rounded.
3171 llvm::Type *ValTy = Val->getType();
3172 if (ValTy->isFloatTy() ||
3173 (isa<llvm::VectorType>(ValTy) &&
3174 cast<llvm::VectorType>(ValTy)->getElementType()->isFloatTy()))
3175 CGF.SetFPAccuracy(Val, 2.5);
3176 }
3177 return Val;
3178 }
3179 else if (Ops.isFixedPointOp())
3180 return EmitFixedPointBinOp(Ops);
3181 else if (Ops.Ty->hasUnsignedIntegerRepresentation())
3182 return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
3183 else
3184 return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div");
3185 }
3186
EmitRem(const BinOpInfo & Ops)3187 Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
3188 // Rem in C can't be a floating point type: C99 6.5.5p2.
3189 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
3190 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
3191 Ops.Ty->isIntegerType() &&
3192 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
3193 CodeGenFunction::SanitizerScope SanScope(&CGF);
3194 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
3195 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false);
3196 }
3197
3198 if (Ops.Ty->hasUnsignedIntegerRepresentation())
3199 return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem");
3200 else
3201 return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem");
3202 }
3203
EmitOverflowCheckedBinOp(const BinOpInfo & Ops)3204 Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
3205 unsigned IID;
3206 unsigned OpID = 0;
3207 SanitizerHandler OverflowKind;
3208
3209 bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType();
3210 switch (Ops.Opcode) {
3211 case BO_Add:
3212 case BO_AddAssign:
3213 OpID = 1;
3214 IID = isSigned ? llvm::Intrinsic::sadd_with_overflow :
3215 llvm::Intrinsic::uadd_with_overflow;
3216 OverflowKind = SanitizerHandler::AddOverflow;
3217 break;
3218 case BO_Sub:
3219 case BO_SubAssign:
3220 OpID = 2;
3221 IID = isSigned ? llvm::Intrinsic::ssub_with_overflow :
3222 llvm::Intrinsic::usub_with_overflow;
3223 OverflowKind = SanitizerHandler::SubOverflow;
3224 break;
3225 case BO_Mul:
3226 case BO_MulAssign:
3227 OpID = 3;
3228 IID = isSigned ? llvm::Intrinsic::smul_with_overflow :
3229 llvm::Intrinsic::umul_with_overflow;
3230 OverflowKind = SanitizerHandler::MulOverflow;
3231 break;
3232 default:
3233 llvm_unreachable("Unsupported operation for overflow detection");
3234 }
3235 OpID <<= 1;
3236 if (isSigned)
3237 OpID |= 1;
3238
3239 CodeGenFunction::SanitizerScope SanScope(&CGF);
3240 llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty);
3241
3242 llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, opTy);
3243
3244 Value *resultAndOverflow = Builder.CreateCall(intrinsic, {Ops.LHS, Ops.RHS});
3245 Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);
3246 Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
3247
3248 // Handle overflow with llvm.trap if no custom handler has been specified.
3249 const std::string *handlerName =
3250 &CGF.getLangOpts().OverflowHandler;
3251 if (handlerName->empty()) {
3252 // If the signed-integer-overflow sanitizer is enabled, emit a call to its
3253 // runtime. Otherwise, this is a -ftrapv check, so just emit a trap.
3254 if (!isSigned || CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) {
3255 llvm::Value *NotOverflow = Builder.CreateNot(overflow);
3256 SanitizerMask Kind = isSigned ? SanitizerKind::SignedIntegerOverflow
3257 : SanitizerKind::UnsignedIntegerOverflow;
3258 EmitBinOpCheck(std::make_pair(NotOverflow, Kind), Ops);
3259 } else
3260 CGF.EmitTrapCheck(Builder.CreateNot(overflow), OverflowKind);
3261 return result;
3262 }
3263
3264 // Branch in case of overflow.
3265 llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
3266 llvm::BasicBlock *continueBB =
3267 CGF.createBasicBlock("nooverflow", CGF.CurFn, initialBB->getNextNode());
3268 llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
3269
3270 Builder.CreateCondBr(overflow, overflowBB, continueBB);
3271
3272 // If an overflow handler is set, then we want to call it and then use its
3273 // result, if it returns.
3274 Builder.SetInsertPoint(overflowBB);
3275
3276 // Get the overflow handler.
3277 llvm::Type *Int8Ty = CGF.Int8Ty;
3278 llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
3279 llvm::FunctionType *handlerTy =
3280 llvm::FunctionType::get(CGF.Int64Ty, argTypes, true);
3281 llvm::FunctionCallee handler =
3282 CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName);
3283
3284 // Sign extend the args to 64-bit, so that we can use the same handler for
3285 // all types of overflow.
3286 llvm::Value *lhs = Builder.CreateSExt(Ops.LHS, CGF.Int64Ty);
3287 llvm::Value *rhs = Builder.CreateSExt(Ops.RHS, CGF.Int64Ty);
3288
3289 // Call the handler with the two arguments, the operation, and the size of
3290 // the result.
3291 llvm::Value *handlerArgs[] = {
3292 lhs,
3293 rhs,
3294 Builder.getInt8(OpID),
3295 Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth())
3296 };
3297 llvm::Value *handlerResult =
3298 CGF.EmitNounwindRuntimeCall(handler, handlerArgs);
3299
3300 // Truncate the result back to the desired size.
3301 handlerResult = Builder.CreateTrunc(handlerResult, opTy);
3302 Builder.CreateBr(continueBB);
3303
3304 Builder.SetInsertPoint(continueBB);
3305 llvm::PHINode *phi = Builder.CreatePHI(opTy, 2);
3306 phi->addIncoming(result, initialBB);
3307 phi->addIncoming(handlerResult, overflowBB);
3308
3309 return phi;
3310 }
3311
3312 /// Emit pointer + index arithmetic.
emitPointerArithmetic(CodeGenFunction & CGF,const BinOpInfo & op,bool isSubtraction)3313 static Value *emitPointerArithmetic(CodeGenFunction &CGF,
3314 const BinOpInfo &op,
3315 bool isSubtraction) {
3316 // Must have binary (not unary) expr here. Unary pointer
3317 // increment/decrement doesn't use this path.
3318 const BinaryOperator *expr = cast<BinaryOperator>(op.E);
3319
3320 Value *pointer = op.LHS;
3321 Expr *pointerOperand = expr->getLHS();
3322 Value *index = op.RHS;
3323 Expr *indexOperand = expr->getRHS();
3324
3325 // In a subtraction, the LHS is always the pointer.
3326 if (!isSubtraction && !pointer->getType()->isPointerTy()) {
3327 std::swap(pointer, index);
3328 std::swap(pointerOperand, indexOperand);
3329 }
3330
3331 bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
3332
3333 unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth();
3334 auto &DL = CGF.CGM.getDataLayout();
3335 auto PtrTy = cast<llvm::PointerType>(pointer->getType());
3336
3337 // Some versions of glibc and gcc use idioms (particularly in their malloc
3338 // routines) that add a pointer-sized integer (known to be a pointer value)
3339 // to a null pointer in order to cast the value back to an integer or as
3340 // part of a pointer alignment algorithm. This is undefined behavior, but
3341 // we'd like to be able to compile programs that use it.
3342 //
3343 // Normally, we'd generate a GEP with a null-pointer base here in response
3344 // to that code, but it's also UB to dereference a pointer created that
3345 // way. Instead (as an acknowledged hack to tolerate the idiom) we will
3346 // generate a direct cast of the integer value to a pointer.
3347 //
3348 // The idiom (p = nullptr + N) is not met if any of the following are true:
3349 //
3350 // The operation is subtraction.
3351 // The index is not pointer-sized.
3352 // The pointer type is not byte-sized.
3353 //
3354 if (BinaryOperator::isNullPointerArithmeticExtension(CGF.getContext(),
3355 op.Opcode,
3356 expr->getLHS(),
3357 expr->getRHS()))
3358 return CGF.Builder.CreateIntToPtr(index, pointer->getType());
3359
3360 if (width != DL.getIndexTypeSizeInBits(PtrTy)) {
3361 // Zero-extend or sign-extend the pointer value according to
3362 // whether the index is signed or not.
3363 index = CGF.Builder.CreateIntCast(index, DL.getIndexType(PtrTy), isSigned,
3364 "idx.ext");
3365 }
3366
3367 // If this is subtraction, negate the index.
3368 if (isSubtraction)
3369 index = CGF.Builder.CreateNeg(index, "idx.neg");
3370
3371 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
3372 CGF.EmitBoundsCheck(op.E, pointerOperand, index, indexOperand->getType(),
3373 /*Accessed*/ false);
3374
3375 const PointerType *pointerType
3376 = pointerOperand->getType()->getAs<PointerType>();
3377 if (!pointerType) {
3378 QualType objectType = pointerOperand->getType()
3379 ->castAs<ObjCObjectPointerType>()
3380 ->getPointeeType();
3381 llvm::Value *objectSize
3382 = CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(objectType));
3383
3384 index = CGF.Builder.CreateMul(index, objectSize);
3385
3386 Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy);
3387 result = CGF.Builder.CreateGEP(result, index, "add.ptr");
3388 return CGF.Builder.CreateBitCast(result, pointer->getType());
3389 }
3390
3391 QualType elementType = pointerType->getPointeeType();
3392 if (const VariableArrayType *vla
3393 = CGF.getContext().getAsVariableArrayType(elementType)) {
3394 // The element count here is the total number of non-VLA elements.
3395 llvm::Value *numElements = CGF.getVLASize(vla).NumElts;
3396
3397 // Effectively, the multiply by the VLA size is part of the GEP.
3398 // GEP indexes are signed, and scaling an index isn't permitted to
3399 // signed-overflow, so we use the same semantics for our explicit
3400 // multiply. We suppress this if overflow is not undefined behavior.
3401 if (CGF.getLangOpts().isSignedOverflowDefined()) {
3402 index = CGF.Builder.CreateMul(index, numElements, "vla.index");
3403 pointer = CGF.Builder.CreateGEP(pointer, index, "add.ptr");
3404 } else {
3405 index = CGF.Builder.CreateNSWMul(index, numElements, "vla.index");
3406 pointer =
3407 CGF.EmitCheckedInBoundsGEP(pointer, index, isSigned, isSubtraction,
3408 op.E->getExprLoc(), "add.ptr");
3409 }
3410 return pointer;
3411 }
3412
3413 // Explicitly handle GNU void* and function pointer arithmetic extensions. The
3414 // GNU void* casts amount to no-ops since our void* type is i8*, but this is
3415 // future proof.
3416 if (elementType->isVoidType() || elementType->isFunctionType()) {
3417 Value *result = CGF.EmitCastToVoidPtr(pointer);
3418 result = CGF.Builder.CreateGEP(result, index, "add.ptr");
3419 return CGF.Builder.CreateBitCast(result, pointer->getType());
3420 }
3421
3422 if (CGF.getLangOpts().isSignedOverflowDefined())
3423 return CGF.Builder.CreateGEP(pointer, index, "add.ptr");
3424
3425 return CGF.EmitCheckedInBoundsGEP(pointer, index, isSigned, isSubtraction,
3426 op.E->getExprLoc(), "add.ptr");
3427 }
3428
3429 // Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
3430 // Addend. Use negMul and negAdd to negate the first operand of the Mul or
3431 // the add operand respectively. This allows fmuladd to represent a*b-c, or
3432 // c-a*b. Patterns in LLVM should catch the negated forms and translate them to
3433 // efficient operations.
buildFMulAdd(llvm::Instruction * MulOp,Value * Addend,const CodeGenFunction & CGF,CGBuilderTy & Builder,bool negMul,bool negAdd)3434 static Value* buildFMulAdd(llvm::Instruction *MulOp, Value *Addend,
3435 const CodeGenFunction &CGF, CGBuilderTy &Builder,
3436 bool negMul, bool negAdd) {
3437 assert(!(negMul && negAdd) && "Only one of negMul and negAdd should be set.");
3438
3439 Value *MulOp0 = MulOp->getOperand(0);
3440 Value *MulOp1 = MulOp->getOperand(1);
3441 if (negMul)
3442 MulOp0 = Builder.CreateFNeg(MulOp0, "neg");
3443 if (negAdd)
3444 Addend = Builder.CreateFNeg(Addend, "neg");
3445
3446 Value *FMulAdd = nullptr;
3447 if (Builder.getIsFPConstrained()) {
3448 assert(isa<llvm::ConstrainedFPIntrinsic>(MulOp) &&
3449 "Only constrained operation should be created when Builder is in FP "
3450 "constrained mode");
3451 FMulAdd = Builder.CreateConstrainedFPCall(
3452 CGF.CGM.getIntrinsic(llvm::Intrinsic::experimental_constrained_fmuladd,
3453 Addend->getType()),
3454 {MulOp0, MulOp1, Addend});
3455 } else {
3456 FMulAdd = Builder.CreateCall(
3457 CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()),
3458 {MulOp0, MulOp1, Addend});
3459 }
3460 MulOp->eraseFromParent();
3461
3462 return FMulAdd;
3463 }
3464
3465 // Check whether it would be legal to emit an fmuladd intrinsic call to
3466 // represent op and if so, build the fmuladd.
3467 //
3468 // Checks that (a) the operation is fusable, and (b) -ffp-contract=on.
3469 // Does NOT check the type of the operation - it's assumed that this function
3470 // will be called from contexts where it's known that the type is contractable.
tryEmitFMulAdd(const BinOpInfo & op,const CodeGenFunction & CGF,CGBuilderTy & Builder,bool isSub=false)3471 static Value* tryEmitFMulAdd(const BinOpInfo &op,
3472 const CodeGenFunction &CGF, CGBuilderTy &Builder,
3473 bool isSub=false) {
3474
3475 assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign ||
3476 op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) &&
3477 "Only fadd/fsub can be the root of an fmuladd.");
3478
3479 // Check whether this op is marked as fusable.
3480 if (!op.FPFeatures.allowFPContractWithinStatement())
3481 return nullptr;
3482
3483 // We have a potentially fusable op. Look for a mul on one of the operands.
3484 // Also, make sure that the mul result isn't used directly. In that case,
3485 // there's no point creating a muladd operation.
3486 if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(op.LHS)) {
3487 if (LHSBinOp->getOpcode() == llvm::Instruction::FMul &&
3488 LHSBinOp->use_empty())
3489 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub);
3490 }
3491 if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(op.RHS)) {
3492 if (RHSBinOp->getOpcode() == llvm::Instruction::FMul &&
3493 RHSBinOp->use_empty())
3494 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false);
3495 }
3496
3497 if (auto *LHSBinOp = dyn_cast<llvm::CallBase>(op.LHS)) {
3498 if (LHSBinOp->getIntrinsicID() ==
3499 llvm::Intrinsic::experimental_constrained_fmul &&
3500 LHSBinOp->use_empty())
3501 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub);
3502 }
3503 if (auto *RHSBinOp = dyn_cast<llvm::CallBase>(op.RHS)) {
3504 if (RHSBinOp->getIntrinsicID() ==
3505 llvm::Intrinsic::experimental_constrained_fmul &&
3506 RHSBinOp->use_empty())
3507 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false);
3508 }
3509
3510 return nullptr;
3511 }
3512
EmitAdd(const BinOpInfo & op)3513 Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
3514 if (op.LHS->getType()->isPointerTy() ||
3515 op.RHS->getType()->isPointerTy())
3516 return emitPointerArithmetic(CGF, op, CodeGenFunction::NotSubtraction);
3517
3518 if (op.Ty->isSignedIntegerOrEnumerationType()) {
3519 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
3520 case LangOptions::SOB_Defined:
3521 return Builder.CreateAdd(op.LHS, op.RHS, "add");
3522 case LangOptions::SOB_Undefined:
3523 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
3524 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
3525 LLVM_FALLTHROUGH;
3526 case LangOptions::SOB_Trapping:
3527 if (CanElideOverflowCheck(CGF.getContext(), op))
3528 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
3529 return EmitOverflowCheckedBinOp(op);
3530 }
3531 }
3532
3533 if (op.Ty->isConstantMatrixType()) {
3534 llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
3535 return MB.CreateAdd(op.LHS, op.RHS);
3536 }
3537
3538 if (op.Ty->isUnsignedIntegerType() &&
3539 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
3540 !CanElideOverflowCheck(CGF.getContext(), op))
3541 return EmitOverflowCheckedBinOp(op);
3542
3543 if (op.LHS->getType()->isFPOrFPVectorTy()) {
3544 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
3545 // Try to form an fmuladd.
3546 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))
3547 return FMulAdd;
3548
3549 return Builder.CreateFAdd(op.LHS, op.RHS, "add");
3550 }
3551
3552 if (op.isFixedPointOp())
3553 return EmitFixedPointBinOp(op);
3554
3555 return Builder.CreateAdd(op.LHS, op.RHS, "add");
3556 }
3557
3558 /// The resulting value must be calculated with exact precision, so the operands
3559 /// may not be the same type.
EmitFixedPointBinOp(const BinOpInfo & op)3560 Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
3561 using llvm::APSInt;
3562 using llvm::ConstantInt;
3563
3564 // This is either a binary operation where at least one of the operands is
3565 // a fixed-point type, or a unary operation where the operand is a fixed-point
3566 // type. The result type of a binary operation is determined by
3567 // Sema::handleFixedPointConversions().
3568 QualType ResultTy = op.Ty;
3569 QualType LHSTy, RHSTy;
3570 if (const auto *BinOp = dyn_cast<BinaryOperator>(op.E)) {
3571 RHSTy = BinOp->getRHS()->getType();
3572 if (const auto *CAO = dyn_cast<CompoundAssignOperator>(BinOp)) {
3573 // For compound assignment, the effective type of the LHS at this point
3574 // is the computation LHS type, not the actual LHS type, and the final
3575 // result type is not the type of the expression but rather the
3576 // computation result type.
3577 LHSTy = CAO->getComputationLHSType();
3578 ResultTy = CAO->getComputationResultType();
3579 } else
3580 LHSTy = BinOp->getLHS()->getType();
3581 } else if (const auto *UnOp = dyn_cast<UnaryOperator>(op.E)) {
3582 LHSTy = UnOp->getSubExpr()->getType();
3583 RHSTy = UnOp->getSubExpr()->getType();
3584 }
3585 ASTContext &Ctx = CGF.getContext();
3586 Value *LHS = op.LHS;
3587 Value *RHS = op.RHS;
3588
3589 auto LHSFixedSema = Ctx.getFixedPointSemantics(LHSTy);
3590 auto RHSFixedSema = Ctx.getFixedPointSemantics(RHSTy);
3591 auto ResultFixedSema = Ctx.getFixedPointSemantics(ResultTy);
3592 auto CommonFixedSema = LHSFixedSema.getCommonSemantics(RHSFixedSema);
3593
3594 // Perform the actual operation.
3595 Value *Result;
3596 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
3597 switch (op.Opcode) {
3598 case BO_AddAssign:
3599 case BO_Add:
3600 Result = FPBuilder.CreateAdd(LHS, LHSFixedSema, RHS, RHSFixedSema);
3601 break;
3602 case BO_SubAssign:
3603 case BO_Sub:
3604 Result = FPBuilder.CreateSub(LHS, LHSFixedSema, RHS, RHSFixedSema);
3605 break;
3606 case BO_MulAssign:
3607 case BO_Mul:
3608 Result = FPBuilder.CreateMul(LHS, LHSFixedSema, RHS, RHSFixedSema);
3609 break;
3610 case BO_DivAssign:
3611 case BO_Div:
3612 Result = FPBuilder.CreateDiv(LHS, LHSFixedSema, RHS, RHSFixedSema);
3613 break;
3614 case BO_ShlAssign:
3615 case BO_Shl:
3616 Result = FPBuilder.CreateShl(LHS, LHSFixedSema, RHS);
3617 break;
3618 case BO_ShrAssign:
3619 case BO_Shr:
3620 Result = FPBuilder.CreateShr(LHS, LHSFixedSema, RHS);
3621 break;
3622 case BO_LT:
3623 return FPBuilder.CreateLT(LHS, LHSFixedSema, RHS, RHSFixedSema);
3624 case BO_GT:
3625 return FPBuilder.CreateGT(LHS, LHSFixedSema, RHS, RHSFixedSema);
3626 case BO_LE:
3627 return FPBuilder.CreateLE(LHS, LHSFixedSema, RHS, RHSFixedSema);
3628 case BO_GE:
3629 return FPBuilder.CreateGE(LHS, LHSFixedSema, RHS, RHSFixedSema);
3630 case BO_EQ:
3631 // For equality operations, we assume any padding bits on unsigned types are
3632 // zero'd out. They could be overwritten through non-saturating operations
3633 // that cause overflow, but this leads to undefined behavior.
3634 return FPBuilder.CreateEQ(LHS, LHSFixedSema, RHS, RHSFixedSema);
3635 case BO_NE:
3636 return FPBuilder.CreateNE(LHS, LHSFixedSema, RHS, RHSFixedSema);
3637 case BO_Cmp:
3638 case BO_LAnd:
3639 case BO_LOr:
3640 llvm_unreachable("Found unimplemented fixed point binary operation");
3641 case BO_PtrMemD:
3642 case BO_PtrMemI:
3643 case BO_Rem:
3644 case BO_Xor:
3645 case BO_And:
3646 case BO_Or:
3647 case BO_Assign:
3648 case BO_RemAssign:
3649 case BO_AndAssign:
3650 case BO_XorAssign:
3651 case BO_OrAssign:
3652 case BO_Comma:
3653 llvm_unreachable("Found unsupported binary operation for fixed point types.");
3654 }
3655
3656 bool IsShift = BinaryOperator::isShiftOp(op.Opcode) ||
3657 BinaryOperator::isShiftAssignOp(op.Opcode);
3658 // Convert to the result type.
3659 return FPBuilder.CreateFixedToFixed(Result, IsShift ? LHSFixedSema
3660 : CommonFixedSema,
3661 ResultFixedSema);
3662 }
3663
EmitSub(const BinOpInfo & op)3664 Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
3665 // The LHS is always a pointer if either side is.
3666 if (!op.LHS->getType()->isPointerTy()) {
3667 if (op.Ty->isSignedIntegerOrEnumerationType()) {
3668 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
3669 case LangOptions::SOB_Defined:
3670 return Builder.CreateSub(op.LHS, op.RHS, "sub");
3671 case LangOptions::SOB_Undefined:
3672 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
3673 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
3674 LLVM_FALLTHROUGH;
3675 case LangOptions::SOB_Trapping:
3676 if (CanElideOverflowCheck(CGF.getContext(), op))
3677 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
3678 return EmitOverflowCheckedBinOp(op);
3679 }
3680 }
3681
3682 if (op.Ty->isConstantMatrixType()) {
3683 llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
3684 return MB.CreateSub(op.LHS, op.RHS);
3685 }
3686
3687 if (op.Ty->isUnsignedIntegerType() &&
3688 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
3689 !CanElideOverflowCheck(CGF.getContext(), op))
3690 return EmitOverflowCheckedBinOp(op);
3691
3692 if (op.LHS->getType()->isFPOrFPVectorTy()) {
3693 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
3694 // Try to form an fmuladd.
3695 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true))
3696 return FMulAdd;
3697 return Builder.CreateFSub(op.LHS, op.RHS, "sub");
3698 }
3699
3700 if (op.isFixedPointOp())
3701 return EmitFixedPointBinOp(op);
3702
3703 return Builder.CreateSub(op.LHS, op.RHS, "sub");
3704 }
3705
3706 // If the RHS is not a pointer, then we have normal pointer
3707 // arithmetic.
3708 if (!op.RHS->getType()->isPointerTy())
3709 return emitPointerArithmetic(CGF, op, CodeGenFunction::IsSubtraction);
3710
3711 // Otherwise, this is a pointer subtraction.
3712
3713 // Do the raw subtraction part.
3714 llvm::Value *LHS
3715 = Builder.CreatePtrToInt(op.LHS, CGF.PtrDiffTy, "sub.ptr.lhs.cast");
3716 llvm::Value *RHS
3717 = Builder.CreatePtrToInt(op.RHS, CGF.PtrDiffTy, "sub.ptr.rhs.cast");
3718 Value *diffInChars = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
3719
3720 // Okay, figure out the element size.
3721 const BinaryOperator *expr = cast<BinaryOperator>(op.E);
3722 QualType elementType = expr->getLHS()->getType()->getPointeeType();
3723
3724 llvm::Value *divisor = nullptr;
3725
3726 // For a variable-length array, this is going to be non-constant.
3727 if (const VariableArrayType *vla
3728 = CGF.getContext().getAsVariableArrayType(elementType)) {
3729 auto VlaSize = CGF.getVLASize(vla);
3730 elementType = VlaSize.Type;
3731 divisor = VlaSize.NumElts;
3732
3733 // Scale the number of non-VLA elements by the non-VLA element size.
3734 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(elementType);
3735 if (!eltSize.isOne())
3736 divisor = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), divisor);
3737
3738 // For everything elese, we can just compute it, safe in the
3739 // assumption that Sema won't let anything through that we can't
3740 // safely compute the size of.
3741 } else {
3742 CharUnits elementSize;
3743 // Handle GCC extension for pointer arithmetic on void* and
3744 // function pointer types.
3745 if (elementType->isVoidType() || elementType->isFunctionType())
3746 elementSize = CharUnits::One();
3747 else
3748 elementSize = CGF.getContext().getTypeSizeInChars(elementType);
3749
3750 // Don't even emit the divide for element size of 1.
3751 if (elementSize.isOne())
3752 return diffInChars;
3753
3754 divisor = CGF.CGM.getSize(elementSize);
3755 }
3756
3757 // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
3758 // pointer difference in C is only defined in the case where both operands
3759 // are pointing to elements of an array.
3760 return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div");
3761 }
3762
GetWidthMinusOneValue(Value * LHS,Value * RHS)3763 Value *ScalarExprEmitter::GetWidthMinusOneValue(Value* LHS,Value* RHS) {
3764 llvm::IntegerType *Ty;
3765 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
3766 Ty = cast<llvm::IntegerType>(VT->getElementType());
3767 else
3768 Ty = cast<llvm::IntegerType>(LHS->getType());
3769 return llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth() - 1);
3770 }
3771
ConstrainShiftValue(Value * LHS,Value * RHS,const Twine & Name)3772 Value *ScalarExprEmitter::ConstrainShiftValue(Value *LHS, Value *RHS,
3773 const Twine &Name) {
3774 llvm::IntegerType *Ty;
3775 if (auto *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
3776 Ty = cast<llvm::IntegerType>(VT->getElementType());
3777 else
3778 Ty = cast<llvm::IntegerType>(LHS->getType());
3779
3780 if (llvm::isPowerOf2_64(Ty->getBitWidth()))
3781 return Builder.CreateAnd(RHS, GetWidthMinusOneValue(LHS, RHS), Name);
3782
3783 return Builder.CreateURem(
3784 RHS, llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth()), Name);
3785 }
3786
EmitShl(const BinOpInfo & Ops)3787 Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
3788 // TODO: This misses out on the sanitizer check below.
3789 if (Ops.isFixedPointOp())
3790 return EmitFixedPointBinOp(Ops);
3791
3792 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
3793 // RHS to the same size as the LHS.
3794 Value *RHS = Ops.RHS;
3795 if (Ops.LHS->getType() != RHS->getType())
3796 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
3797
3798 bool SanitizeSignedBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) &&
3799 Ops.Ty->hasSignedIntegerRepresentation() &&
3800 !CGF.getLangOpts().isSignedOverflowDefined() &&
3801 !CGF.getLangOpts().CPlusPlus20;
3802 bool SanitizeUnsignedBase =
3803 CGF.SanOpts.has(SanitizerKind::UnsignedShiftBase) &&
3804 Ops.Ty->hasUnsignedIntegerRepresentation();
3805 bool SanitizeBase = SanitizeSignedBase || SanitizeUnsignedBase;
3806 bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent);
3807 // OpenCL 6.3j: shift values are effectively % word size of LHS.
3808 if (CGF.getLangOpts().OpenCL)
3809 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shl.mask");
3810 else if ((SanitizeBase || SanitizeExponent) &&
3811 isa<llvm::IntegerType>(Ops.LHS->getType())) {
3812 CodeGenFunction::SanitizerScope SanScope(&CGF);
3813 SmallVector<std::pair<Value *, SanitizerMask>, 2> Checks;
3814 llvm::Value *WidthMinusOne = GetWidthMinusOneValue(Ops.LHS, Ops.RHS);
3815 llvm::Value *ValidExponent = Builder.CreateICmpULE(Ops.RHS, WidthMinusOne);
3816
3817 if (SanitizeExponent) {
3818 Checks.push_back(
3819 std::make_pair(ValidExponent, SanitizerKind::ShiftExponent));
3820 }
3821
3822 if (SanitizeBase) {
3823 // Check whether we are shifting any non-zero bits off the top of the
3824 // integer. We only emit this check if exponent is valid - otherwise
3825 // instructions below will have undefined behavior themselves.
3826 llvm::BasicBlock *Orig = Builder.GetInsertBlock();
3827 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
3828 llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock("check");
3829 Builder.CreateCondBr(ValidExponent, CheckShiftBase, Cont);
3830 llvm::Value *PromotedWidthMinusOne =
3831 (RHS == Ops.RHS) ? WidthMinusOne
3832 : GetWidthMinusOneValue(Ops.LHS, RHS);
3833 CGF.EmitBlock(CheckShiftBase);
3834 llvm::Value *BitsShiftedOff = Builder.CreateLShr(
3835 Ops.LHS, Builder.CreateSub(PromotedWidthMinusOne, RHS, "shl.zeros",
3836 /*NUW*/ true, /*NSW*/ true),
3837 "shl.check");
3838 if (SanitizeUnsignedBase || CGF.getLangOpts().CPlusPlus) {
3839 // In C99, we are not permitted to shift a 1 bit into the sign bit.
3840 // Under C++11's rules, shifting a 1 bit into the sign bit is
3841 // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't
3842 // define signed left shifts, so we use the C99 and C++11 rules there).
3843 // Unsigned shifts can always shift into the top bit.
3844 llvm::Value *One = llvm::ConstantInt::get(BitsShiftedOff->getType(), 1);
3845 BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One);
3846 }
3847 llvm::Value *Zero = llvm::ConstantInt::get(BitsShiftedOff->getType(), 0);
3848 llvm::Value *ValidBase = Builder.CreateICmpEQ(BitsShiftedOff, Zero);
3849 CGF.EmitBlock(Cont);
3850 llvm::PHINode *BaseCheck = Builder.CreatePHI(ValidBase->getType(), 2);
3851 BaseCheck->addIncoming(Builder.getTrue(), Orig);
3852 BaseCheck->addIncoming(ValidBase, CheckShiftBase);
3853 Checks.push_back(std::make_pair(
3854 BaseCheck, SanitizeSignedBase ? SanitizerKind::ShiftBase
3855 : SanitizerKind::UnsignedShiftBase));
3856 }
3857
3858 assert(!Checks.empty());
3859 EmitBinOpCheck(Checks, Ops);
3860 }
3861
3862 return Builder.CreateShl(Ops.LHS, RHS, "shl");
3863 }
3864
EmitShr(const BinOpInfo & Ops)3865 Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
3866 // TODO: This misses out on the sanitizer check below.
3867 if (Ops.isFixedPointOp())
3868 return EmitFixedPointBinOp(Ops);
3869
3870 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
3871 // RHS to the same size as the LHS.
3872 Value *RHS = Ops.RHS;
3873 if (Ops.LHS->getType() != RHS->getType())
3874 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
3875
3876 // OpenCL 6.3j: shift values are effectively % word size of LHS.
3877 if (CGF.getLangOpts().OpenCL)
3878 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shr.mask");
3879 else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) &&
3880 isa<llvm::IntegerType>(Ops.LHS->getType())) {
3881 CodeGenFunction::SanitizerScope SanScope(&CGF);
3882 llvm::Value *Valid =
3883 Builder.CreateICmpULE(RHS, GetWidthMinusOneValue(Ops.LHS, RHS));
3884 EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::ShiftExponent), Ops);
3885 }
3886
3887 if (Ops.Ty->hasUnsignedIntegerRepresentation())
3888 return Builder.CreateLShr(Ops.LHS, RHS, "shr");
3889 return Builder.CreateAShr(Ops.LHS, RHS, "shr");
3890 }
3891
3892 enum IntrinsicType { VCMPEQ, VCMPGT };
3893 // return corresponding comparison intrinsic for given vector type
GetIntrinsic(IntrinsicType IT,BuiltinType::Kind ElemKind)3894 static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT,
3895 BuiltinType::Kind ElemKind) {
3896 switch (ElemKind) {
3897 default: llvm_unreachable("unexpected element type");
3898 case BuiltinType::Char_U:
3899 case BuiltinType::UChar:
3900 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
3901 llvm::Intrinsic::ppc_altivec_vcmpgtub_p;
3902 case BuiltinType::Char_S:
3903 case BuiltinType::SChar:
3904 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
3905 llvm::Intrinsic::ppc_altivec_vcmpgtsb_p;
3906 case BuiltinType::UShort:
3907 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
3908 llvm::Intrinsic::ppc_altivec_vcmpgtuh_p;
3909 case BuiltinType::Short:
3910 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
3911 llvm::Intrinsic::ppc_altivec_vcmpgtsh_p;
3912 case BuiltinType::UInt:
3913 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
3914 llvm::Intrinsic::ppc_altivec_vcmpgtuw_p;
3915 case BuiltinType::Int:
3916 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
3917 llvm::Intrinsic::ppc_altivec_vcmpgtsw_p;
3918 case BuiltinType::ULong:
3919 case BuiltinType::ULongLong:
3920 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
3921 llvm::Intrinsic::ppc_altivec_vcmpgtud_p;
3922 case BuiltinType::Long:
3923 case BuiltinType::LongLong:
3924 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
3925 llvm::Intrinsic::ppc_altivec_vcmpgtsd_p;
3926 case BuiltinType::Float:
3927 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p :
3928 llvm::Intrinsic::ppc_altivec_vcmpgtfp_p;
3929 case BuiltinType::Double:
3930 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p :
3931 llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p;
3932 case BuiltinType::UInt128:
3933 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
3934 : llvm::Intrinsic::ppc_altivec_vcmpgtuq_p;
3935 case BuiltinType::Int128:
3936 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
3937 : llvm::Intrinsic::ppc_altivec_vcmpgtsq_p;
3938 }
3939 }
3940
EmitCompare(const BinaryOperator * E,llvm::CmpInst::Predicate UICmpOpc,llvm::CmpInst::Predicate SICmpOpc,llvm::CmpInst::Predicate FCmpOpc,bool IsSignaling)3941 Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,
3942 llvm::CmpInst::Predicate UICmpOpc,
3943 llvm::CmpInst::Predicate SICmpOpc,
3944 llvm::CmpInst::Predicate FCmpOpc,
3945 bool IsSignaling) {
3946 TestAndClearIgnoreResultAssign();
3947 Value *Result;
3948 QualType LHSTy = E->getLHS()->getType();
3949 QualType RHSTy = E->getRHS()->getType();
3950 if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) {
3951 assert(E->getOpcode() == BO_EQ ||
3952 E->getOpcode() == BO_NE);
3953 Value *LHS = CGF.EmitScalarExpr(E->getLHS());
3954 Value *RHS = CGF.EmitScalarExpr(E->getRHS());
3955 Result = CGF.CGM.getCXXABI().EmitMemberPointerComparison(
3956 CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE);
3957 } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) {
3958 BinOpInfo BOInfo = EmitBinOps(E);
3959 Value *LHS = BOInfo.LHS;
3960 Value *RHS = BOInfo.RHS;
3961
3962 // If AltiVec, the comparison results in a numeric type, so we use
3963 // intrinsics comparing vectors and giving 0 or 1 as a result
3964 if (LHSTy->isVectorType() && !E->getType()->isVectorType()) {
3965 // constants for mapping CR6 register bits to predicate result
3966 enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6;
3967
3968 llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic;
3969
3970 // in several cases vector arguments order will be reversed
3971 Value *FirstVecArg = LHS,
3972 *SecondVecArg = RHS;
3973
3974 QualType ElTy = LHSTy->castAs<VectorType>()->getElementType();
3975 BuiltinType::Kind ElementKind = ElTy->castAs<BuiltinType>()->getKind();
3976
3977 switch(E->getOpcode()) {
3978 default: llvm_unreachable("is not a comparison operation");
3979 case BO_EQ:
3980 CR6 = CR6_LT;
3981 ID = GetIntrinsic(VCMPEQ, ElementKind);
3982 break;
3983 case BO_NE:
3984 CR6 = CR6_EQ;
3985 ID = GetIntrinsic(VCMPEQ, ElementKind);
3986 break;
3987 case BO_LT:
3988 CR6 = CR6_LT;
3989 ID = GetIntrinsic(VCMPGT, ElementKind);
3990 std::swap(FirstVecArg, SecondVecArg);
3991 break;
3992 case BO_GT:
3993 CR6 = CR6_LT;
3994 ID = GetIntrinsic(VCMPGT, ElementKind);
3995 break;
3996 case BO_LE:
3997 if (ElementKind == BuiltinType::Float) {
3998 CR6 = CR6_LT;
3999 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
4000 std::swap(FirstVecArg, SecondVecArg);
4001 }
4002 else {
4003 CR6 = CR6_EQ;
4004 ID = GetIntrinsic(VCMPGT, ElementKind);
4005 }
4006 break;
4007 case BO_GE:
4008 if (ElementKind == BuiltinType::Float) {
4009 CR6 = CR6_LT;
4010 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
4011 }
4012 else {
4013 CR6 = CR6_EQ;
4014 ID = GetIntrinsic(VCMPGT, ElementKind);
4015 std::swap(FirstVecArg, SecondVecArg);
4016 }
4017 break;
4018 }
4019
4020 Value *CR6Param = Builder.getInt32(CR6);
4021 llvm::Function *F = CGF.CGM.getIntrinsic(ID);
4022 Result = Builder.CreateCall(F, {CR6Param, FirstVecArg, SecondVecArg});
4023
4024 // The result type of intrinsic may not be same as E->getType().
4025 // If E->getType() is not BoolTy, EmitScalarConversion will do the
4026 // conversion work. If E->getType() is BoolTy, EmitScalarConversion will
4027 // do nothing, if ResultTy is not i1 at the same time, it will cause
4028 // crash later.
4029 llvm::IntegerType *ResultTy = cast<llvm::IntegerType>(Result->getType());
4030 if (ResultTy->getBitWidth() > 1 &&
4031 E->getType() == CGF.getContext().BoolTy)
4032 Result = Builder.CreateTrunc(Result, Builder.getInt1Ty());
4033 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
4034 E->getExprLoc());
4035 }
4036
4037 if (BOInfo.isFixedPointOp()) {
4038 Result = EmitFixedPointBinOp(BOInfo);
4039 } else if (LHS->getType()->isFPOrFPVectorTy()) {
4040 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, BOInfo.FPFeatures);
4041 if (!IsSignaling)
4042 Result = Builder.CreateFCmp(FCmpOpc, LHS, RHS, "cmp");
4043 else
4044 Result = Builder.CreateFCmpS(FCmpOpc, LHS, RHS, "cmp");
4045 } else if (LHSTy->hasSignedIntegerRepresentation()) {
4046 Result = Builder.CreateICmp(SICmpOpc, LHS, RHS, "cmp");
4047 } else {
4048 // Unsigned integers and pointers.
4049
4050 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers &&
4051 !isa<llvm::ConstantPointerNull>(LHS) &&
4052 !isa<llvm::ConstantPointerNull>(RHS)) {
4053
4054 // Dynamic information is required to be stripped for comparisons,
4055 // because it could leak the dynamic information. Based on comparisons
4056 // of pointers to dynamic objects, the optimizer can replace one pointer
4057 // with another, which might be incorrect in presence of invariant
4058 // groups. Comparison with null is safe because null does not carry any
4059 // dynamic information.
4060 if (LHSTy.mayBeDynamicClass())
4061 LHS = Builder.CreateStripInvariantGroup(LHS);
4062 if (RHSTy.mayBeDynamicClass())
4063 RHS = Builder.CreateStripInvariantGroup(RHS);
4064 }
4065
4066 Result = Builder.CreateICmp(UICmpOpc, LHS, RHS, "cmp");
4067 }
4068
4069 // If this is a vector comparison, sign extend the result to the appropriate
4070 // vector integer type and return it (don't convert to bool).
4071 if (LHSTy->isVectorType())
4072 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
4073
4074 } else {
4075 // Complex Comparison: can only be an equality comparison.
4076 CodeGenFunction::ComplexPairTy LHS, RHS;
4077 QualType CETy;
4078 if (auto *CTy = LHSTy->getAs<ComplexType>()) {
4079 LHS = CGF.EmitComplexExpr(E->getLHS());
4080 CETy = CTy->getElementType();
4081 } else {
4082 LHS.first = Visit(E->getLHS());
4083 LHS.second = llvm::Constant::getNullValue(LHS.first->getType());
4084 CETy = LHSTy;
4085 }
4086 if (auto *CTy = RHSTy->getAs<ComplexType>()) {
4087 RHS = CGF.EmitComplexExpr(E->getRHS());
4088 assert(CGF.getContext().hasSameUnqualifiedType(CETy,
4089 CTy->getElementType()) &&
4090 "The element types must always match.");
4091 (void)CTy;
4092 } else {
4093 RHS.first = Visit(E->getRHS());
4094 RHS.second = llvm::Constant::getNullValue(RHS.first->getType());
4095 assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&
4096 "The element types must always match.");
4097 }
4098
4099 Value *ResultR, *ResultI;
4100 if (CETy->isRealFloatingType()) {
4101 // As complex comparisons can only be equality comparisons, they
4102 // are never signaling comparisons.
4103 ResultR = Builder.CreateFCmp(FCmpOpc, LHS.first, RHS.first, "cmp.r");
4104 ResultI = Builder.CreateFCmp(FCmpOpc, LHS.second, RHS.second, "cmp.i");
4105 } else {
4106 // Complex comparisons can only be equality comparisons. As such, signed
4107 // and unsigned opcodes are the same.
4108 ResultR = Builder.CreateICmp(UICmpOpc, LHS.first, RHS.first, "cmp.r");
4109 ResultI = Builder.CreateICmp(UICmpOpc, LHS.second, RHS.second, "cmp.i");
4110 }
4111
4112 if (E->getOpcode() == BO_EQ) {
4113 Result = Builder.CreateAnd(ResultR, ResultI, "and.ri");
4114 } else {
4115 assert(E->getOpcode() == BO_NE &&
4116 "Complex comparison other than == or != ?");
4117 Result = Builder.CreateOr(ResultR, ResultI, "or.ri");
4118 }
4119 }
4120
4121 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
4122 E->getExprLoc());
4123 }
4124
VisitBinAssign(const BinaryOperator * E)4125 Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
4126 bool Ignore = TestAndClearIgnoreResultAssign();
4127
4128 Value *RHS;
4129 LValue LHS;
4130
4131 switch (E->getLHS()->getType().getObjCLifetime()) {
4132 case Qualifiers::OCL_Strong:
4133 std::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore);
4134 break;
4135
4136 case Qualifiers::OCL_Autoreleasing:
4137 std::tie(LHS, RHS) = CGF.EmitARCStoreAutoreleasing(E);
4138 break;
4139
4140 case Qualifiers::OCL_ExplicitNone:
4141 std::tie(LHS, RHS) = CGF.EmitARCStoreUnsafeUnretained(E, Ignore);
4142 break;
4143
4144 case Qualifiers::OCL_Weak:
4145 RHS = Visit(E->getRHS());
4146 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
4147 RHS = CGF.EmitARCStoreWeak(LHS.getAddress(CGF), RHS, Ignore);
4148 break;
4149
4150 case Qualifiers::OCL_None:
4151 // __block variables need to have the rhs evaluated first, plus
4152 // this should improve codegen just a little.
4153 RHS = Visit(E->getRHS());
4154 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
4155
4156 // Store the value into the LHS. Bit-fields are handled specially
4157 // because the result is altered by the store, i.e., [C99 6.5.16p1]
4158 // 'An assignment expression has the value of the left operand after
4159 // the assignment...'.
4160 if (LHS.isBitField()) {
4161 CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS);
4162 } else {
4163 CGF.EmitNullabilityCheck(LHS, RHS, E->getExprLoc());
4164 CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS);
4165 }
4166 }
4167
4168 // If the result is clearly ignored, return now.
4169 if (Ignore)
4170 return nullptr;
4171
4172 // The result of an assignment in C is the assigned r-value.
4173 if (!CGF.getLangOpts().CPlusPlus)
4174 return RHS;
4175
4176 // If the lvalue is non-volatile, return the computed value of the assignment.
4177 if (!LHS.isVolatileQualified())
4178 return RHS;
4179
4180 // Otherwise, reload the value.
4181 return EmitLoadOfLValue(LHS, E->getExprLoc());
4182 }
4183
VisitBinLAnd(const BinaryOperator * E)4184 Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
4185 // Perform vector logical and on comparisons with zero vectors.
4186 if (E->getType()->isVectorType()) {
4187 CGF.incrementProfileCounter(E);
4188
4189 Value *LHS = Visit(E->getLHS());
4190 Value *RHS = Visit(E->getRHS());
4191 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
4192 if (LHS->getType()->isFPOrFPVectorTy()) {
4193 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
4194 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
4195 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
4196 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
4197 } else {
4198 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
4199 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
4200 }
4201 Value *And = Builder.CreateAnd(LHS, RHS);
4202 return Builder.CreateSExt(And, ConvertType(E->getType()), "sext");
4203 }
4204
4205 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
4206 llvm::Type *ResTy = ConvertType(E->getType());
4207
4208 // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
4209 // If we have 1 && X, just emit X without inserting the control flow.
4210 bool LHSCondVal;
4211 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
4212 if (LHSCondVal) { // If we have 1 && X, just emit X.
4213 CGF.incrementProfileCounter(E);
4214
4215 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
4216
4217 // If we're generating for profiling or coverage, generate a branch to a
4218 // block that increments the RHS counter needed to track branch condition
4219 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
4220 // "FalseBlock" after the increment is done.
4221 if (InstrumentRegions &&
4222 CodeGenFunction::isInstrumentedCondition(E->getRHS())) {
4223 llvm::BasicBlock *FBlock = CGF.createBasicBlock("land.end");
4224 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt");
4225 Builder.CreateCondBr(RHSCond, RHSBlockCnt, FBlock);
4226 CGF.EmitBlock(RHSBlockCnt);
4227 CGF.incrementProfileCounter(E->getRHS());
4228 CGF.EmitBranch(FBlock);
4229 CGF.EmitBlock(FBlock);
4230 }
4231
4232 // ZExt result to int or bool.
4233 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext");
4234 }
4235
4236 // 0 && RHS: If it is safe, just elide the RHS, and return 0/false.
4237 if (!CGF.ContainsLabel(E->getRHS()))
4238 return llvm::Constant::getNullValue(ResTy);
4239 }
4240
4241 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end");
4242 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs");
4243
4244 CodeGenFunction::ConditionalEvaluation eval(CGF);
4245
4246 // Branch on the LHS first. If it is false, go to the failure (cont) block.
4247 CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock,
4248 CGF.getProfileCount(E->getRHS()));
4249
4250 // Any edges into the ContBlock are now from an (indeterminate number of)
4251 // edges from this first condition. All of these values will be false. Start
4252 // setting up the PHI node in the Cont Block for this.
4253 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
4254 "", ContBlock);
4255 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
4256 PI != PE; ++PI)
4257 PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI);
4258
4259 eval.begin(CGF);
4260 CGF.EmitBlock(RHSBlock);
4261 CGF.incrementProfileCounter(E);
4262 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
4263 eval.end(CGF);
4264
4265 // Reaquire the RHS block, as there may be subblocks inserted.
4266 RHSBlock = Builder.GetInsertBlock();
4267
4268 // If we're generating for profiling or coverage, generate a branch on the
4269 // RHS to a block that increments the RHS true counter needed to track branch
4270 // condition coverage.
4271 if (InstrumentRegions &&
4272 CodeGenFunction::isInstrumentedCondition(E->getRHS())) {
4273 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt");
4274 Builder.CreateCondBr(RHSCond, RHSBlockCnt, ContBlock);
4275 CGF.EmitBlock(RHSBlockCnt);
4276 CGF.incrementProfileCounter(E->getRHS());
4277 CGF.EmitBranch(ContBlock);
4278 PN->addIncoming(RHSCond, RHSBlockCnt);
4279 }
4280
4281 // Emit an unconditional branch from this block to ContBlock.
4282 {
4283 // There is no need to emit line number for unconditional branch.
4284 auto NL = ApplyDebugLocation::CreateEmpty(CGF);
4285 CGF.EmitBlock(ContBlock);
4286 }
4287 // Insert an entry into the phi node for the edge with the value of RHSCond.
4288 PN->addIncoming(RHSCond, RHSBlock);
4289
4290 // Artificial location to preserve the scope information
4291 {
4292 auto NL = ApplyDebugLocation::CreateArtificial(CGF);
4293 PN->setDebugLoc(Builder.getCurrentDebugLocation());
4294 }
4295
4296 // ZExt result to int.
4297 return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext");
4298 }
4299
VisitBinLOr(const BinaryOperator * E)4300 Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
4301 // Perform vector logical or on comparisons with zero vectors.
4302 if (E->getType()->isVectorType()) {
4303 CGF.incrementProfileCounter(E);
4304
4305 Value *LHS = Visit(E->getLHS());
4306 Value *RHS = Visit(E->getRHS());
4307 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
4308 if (LHS->getType()->isFPOrFPVectorTy()) {
4309 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
4310 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
4311 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
4312 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
4313 } else {
4314 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
4315 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
4316 }
4317 Value *Or = Builder.CreateOr(LHS, RHS);
4318 return Builder.CreateSExt(Or, ConvertType(E->getType()), "sext");
4319 }
4320
4321 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
4322 llvm::Type *ResTy = ConvertType(E->getType());
4323
4324 // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
4325 // If we have 0 || X, just emit X without inserting the control flow.
4326 bool LHSCondVal;
4327 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
4328 if (!LHSCondVal) { // If we have 0 || X, just emit X.
4329 CGF.incrementProfileCounter(E);
4330
4331 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
4332
4333 // If we're generating for profiling or coverage, generate a branch to a
4334 // block that increments the RHS counter need to track branch condition
4335 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
4336 // "FalseBlock" after the increment is done.
4337 if (InstrumentRegions &&
4338 CodeGenFunction::isInstrumentedCondition(E->getRHS())) {
4339 llvm::BasicBlock *FBlock = CGF.createBasicBlock("lor.end");
4340 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt");
4341 Builder.CreateCondBr(RHSCond, FBlock, RHSBlockCnt);
4342 CGF.EmitBlock(RHSBlockCnt);
4343 CGF.incrementProfileCounter(E->getRHS());
4344 CGF.EmitBranch(FBlock);
4345 CGF.EmitBlock(FBlock);
4346 }
4347
4348 // ZExt result to int or bool.
4349 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext");
4350 }
4351
4352 // 1 || RHS: If it is safe, just elide the RHS, and return 1/true.
4353 if (!CGF.ContainsLabel(E->getRHS()))
4354 return llvm::ConstantInt::get(ResTy, 1);
4355 }
4356
4357 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end");
4358 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs");
4359
4360 CodeGenFunction::ConditionalEvaluation eval(CGF);
4361
4362 // Branch on the LHS first. If it is true, go to the success (cont) block.
4363 CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock,
4364 CGF.getCurrentProfileCount() -
4365 CGF.getProfileCount(E->getRHS()));
4366
4367 // Any edges into the ContBlock are now from an (indeterminate number of)
4368 // edges from this first condition. All of these values will be true. Start
4369 // setting up the PHI node in the Cont Block for this.
4370 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
4371 "", ContBlock);
4372 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
4373 PI != PE; ++PI)
4374 PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI);
4375
4376 eval.begin(CGF);
4377
4378 // Emit the RHS condition as a bool value.
4379 CGF.EmitBlock(RHSBlock);
4380 CGF.incrementProfileCounter(E);
4381 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
4382
4383 eval.end(CGF);
4384
4385 // Reaquire the RHS block, as there may be subblocks inserted.
4386 RHSBlock = Builder.GetInsertBlock();
4387
4388 // If we're generating for profiling or coverage, generate a branch on the
4389 // RHS to a block that increments the RHS true counter needed to track branch
4390 // condition coverage.
4391 if (InstrumentRegions &&
4392 CodeGenFunction::isInstrumentedCondition(E->getRHS())) {
4393 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt");
4394 Builder.CreateCondBr(RHSCond, ContBlock, RHSBlockCnt);
4395 CGF.EmitBlock(RHSBlockCnt);
4396 CGF.incrementProfileCounter(E->getRHS());
4397 CGF.EmitBranch(ContBlock);
4398 PN->addIncoming(RHSCond, RHSBlockCnt);
4399 }
4400
4401 // Emit an unconditional branch from this block to ContBlock. Insert an entry
4402 // into the phi node for the edge with the value of RHSCond.
4403 CGF.EmitBlock(ContBlock);
4404 PN->addIncoming(RHSCond, RHSBlock);
4405
4406 // ZExt result to int.
4407 return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext");
4408 }
4409
VisitBinComma(const BinaryOperator * E)4410 Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
4411 CGF.EmitIgnoredExpr(E->getLHS());
4412 CGF.EnsureInsertPoint();
4413 return Visit(E->getRHS());
4414 }
4415
4416 //===----------------------------------------------------------------------===//
4417 // Other Operators
4418 //===----------------------------------------------------------------------===//
4419
4420 /// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
4421 /// expression is cheap enough and side-effect-free enough to evaluate
4422 /// unconditionally instead of conditionally. This is used to convert control
4423 /// flow into selects in some cases.
isCheapEnoughToEvaluateUnconditionally(const Expr * E,CodeGenFunction & CGF)4424 static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E,
4425 CodeGenFunction &CGF) {
4426 // Anything that is an integer or floating point constant is fine.
4427 return E->IgnoreParens()->isEvaluatable(CGF.getContext());
4428
4429 // Even non-volatile automatic variables can't be evaluated unconditionally.
4430 // Referencing a thread_local may cause non-trivial initialization work to
4431 // occur. If we're inside a lambda and one of the variables is from the scope
4432 // outside the lambda, that function may have returned already. Reading its
4433 // locals is a bad idea. Also, these reads may introduce races there didn't
4434 // exist in the source-level program.
4435 }
4436
4437
4438 Value *ScalarExprEmitter::
VisitAbstractConditionalOperator(const AbstractConditionalOperator * E)4439 VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
4440 TestAndClearIgnoreResultAssign();
4441
4442 // Bind the common expression if necessary.
4443 CodeGenFunction::OpaqueValueMapping binding(CGF, E);
4444
4445 Expr *condExpr = E->getCond();
4446 Expr *lhsExpr = E->getTrueExpr();
4447 Expr *rhsExpr = E->getFalseExpr();
4448
4449 // If the condition constant folds and can be elided, try to avoid emitting
4450 // the condition and the dead arm.
4451 bool CondExprBool;
4452 if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
4453 Expr *live = lhsExpr, *dead = rhsExpr;
4454 if (!CondExprBool) std::swap(live, dead);
4455
4456 // If the dead side doesn't have labels we need, just emit the Live part.
4457 if (!CGF.ContainsLabel(dead)) {
4458 if (CondExprBool)
4459 CGF.incrementProfileCounter(E);
4460 Value *Result = Visit(live);
4461
4462 // If the live part is a throw expression, it acts like it has a void
4463 // type, so evaluating it returns a null Value*. However, a conditional
4464 // with non-void type must return a non-null Value*.
4465 if (!Result && !E->getType()->isVoidType())
4466 Result = llvm::UndefValue::get(CGF.ConvertType(E->getType()));
4467
4468 return Result;
4469 }
4470 }
4471
4472 // OpenCL: If the condition is a vector, we can treat this condition like
4473 // the select function.
4474 if ((CGF.getLangOpts().OpenCL && condExpr->getType()->isVectorType()) ||
4475 condExpr->getType()->isExtVectorType()) {
4476 CGF.incrementProfileCounter(E);
4477
4478 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
4479 llvm::Value *LHS = Visit(lhsExpr);
4480 llvm::Value *RHS = Visit(rhsExpr);
4481
4482 llvm::Type *condType = ConvertType(condExpr->getType());
4483 auto *vecTy = cast<llvm::FixedVectorType>(condType);
4484
4485 unsigned numElem = vecTy->getNumElements();
4486 llvm::Type *elemType = vecTy->getElementType();
4487
4488 llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy);
4489 llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec);
4490 llvm::Value *tmp = Builder.CreateSExt(
4491 TestMSB, llvm::FixedVectorType::get(elemType, numElem), "sext");
4492 llvm::Value *tmp2 = Builder.CreateNot(tmp);
4493
4494 // Cast float to int to perform ANDs if necessary.
4495 llvm::Value *RHSTmp = RHS;
4496 llvm::Value *LHSTmp = LHS;
4497 bool wasCast = false;
4498 llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType());
4499 if (rhsVTy->getElementType()->isFloatingPointTy()) {
4500 RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType());
4501 LHSTmp = Builder.CreateBitCast(LHS, tmp->getType());
4502 wasCast = true;
4503 }
4504
4505 llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2);
4506 llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp);
4507 llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond");
4508 if (wasCast)
4509 tmp5 = Builder.CreateBitCast(tmp5, RHS->getType());
4510
4511 return tmp5;
4512 }
4513
4514 if (condExpr->getType()->isVectorType()) {
4515 CGF.incrementProfileCounter(E);
4516
4517 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
4518 llvm::Value *LHS = Visit(lhsExpr);
4519 llvm::Value *RHS = Visit(rhsExpr);
4520
4521 llvm::Type *CondType = ConvertType(condExpr->getType());
4522 auto *VecTy = cast<llvm::VectorType>(CondType);
4523 llvm::Value *ZeroVec = llvm::Constant::getNullValue(VecTy);
4524
4525 CondV = Builder.CreateICmpNE(CondV, ZeroVec, "vector_cond");
4526 return Builder.CreateSelect(CondV, LHS, RHS, "vector_select");
4527 }
4528
4529 // If this is a really simple expression (like x ? 4 : 5), emit this as a
4530 // select instead of as control flow. We can only do this if it is cheap and
4531 // safe to evaluate the LHS and RHS unconditionally.
4532 if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) &&
4533 isCheapEnoughToEvaluateUnconditionally(rhsExpr, CGF)) {
4534 llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr);
4535 llvm::Value *StepV = Builder.CreateZExtOrBitCast(CondV, CGF.Int64Ty);
4536
4537 CGF.incrementProfileCounter(E, StepV);
4538
4539 llvm::Value *LHS = Visit(lhsExpr);
4540 llvm::Value *RHS = Visit(rhsExpr);
4541 if (!LHS) {
4542 // If the conditional has void type, make sure we return a null Value*.
4543 assert(!RHS && "LHS and RHS types must match");
4544 return nullptr;
4545 }
4546 return Builder.CreateSelect(CondV, LHS, RHS, "cond");
4547 }
4548
4549 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
4550 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
4551 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
4552
4553 CodeGenFunction::ConditionalEvaluation eval(CGF);
4554 CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock,
4555 CGF.getProfileCount(lhsExpr));
4556
4557 CGF.EmitBlock(LHSBlock);
4558 CGF.incrementProfileCounter(E);
4559 eval.begin(CGF);
4560 Value *LHS = Visit(lhsExpr);
4561 eval.end(CGF);
4562
4563 LHSBlock = Builder.GetInsertBlock();
4564 Builder.CreateBr(ContBlock);
4565
4566 CGF.EmitBlock(RHSBlock);
4567 eval.begin(CGF);
4568 Value *RHS = Visit(rhsExpr);
4569 eval.end(CGF);
4570
4571 RHSBlock = Builder.GetInsertBlock();
4572 CGF.EmitBlock(ContBlock);
4573
4574 // If the LHS or RHS is a throw expression, it will be legitimately null.
4575 if (!LHS)
4576 return RHS;
4577 if (!RHS)
4578 return LHS;
4579
4580 // Create a PHI node for the real part.
4581 llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), 2, "cond");
4582 PN->addIncoming(LHS, LHSBlock);
4583 PN->addIncoming(RHS, RHSBlock);
4584 return PN;
4585 }
4586
VisitChooseExpr(ChooseExpr * E)4587 Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {
4588 return Visit(E->getChosenSubExpr());
4589 }
4590
VisitVAArgExpr(VAArgExpr * VE)4591 Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
4592 QualType Ty = VE->getType();
4593
4594 if (Ty->isVariablyModifiedType())
4595 CGF.EmitVariablyModifiedType(Ty);
4596
4597 Address ArgValue = Address::invalid();
4598 Address ArgPtr = CGF.EmitVAArg(VE, ArgValue);
4599
4600 llvm::Type *ArgTy = ConvertType(VE->getType());
4601
4602 // If EmitVAArg fails, emit an error.
4603 if (!ArgPtr.isValid()) {
4604 CGF.ErrorUnsupported(VE, "va_arg expression");
4605 return llvm::UndefValue::get(ArgTy);
4606 }
4607
4608 // FIXME Volatility.
4609 llvm::Value *Val = Builder.CreateLoad(ArgPtr);
4610
4611 // If EmitVAArg promoted the type, we must truncate it.
4612 if (ArgTy != Val->getType()) {
4613 if (ArgTy->isPointerTy() && !Val->getType()->isPointerTy())
4614 Val = Builder.CreateIntToPtr(Val, ArgTy);
4615 else
4616 Val = Builder.CreateTrunc(Val, ArgTy);
4617 }
4618
4619 return Val;
4620 }
4621
VisitBlockExpr(const BlockExpr * block)4622 Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
4623 return CGF.EmitBlockLiteral(block);
4624 }
4625
4626 // Convert a vec3 to vec4, or vice versa.
ConvertVec3AndVec4(CGBuilderTy & Builder,CodeGenFunction & CGF,Value * Src,unsigned NumElementsDst)4627 static Value *ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF,
4628 Value *Src, unsigned NumElementsDst) {
4629 static constexpr int Mask[] = {0, 1, 2, -1};
4630 return Builder.CreateShuffleVector(Src,
4631 llvm::makeArrayRef(Mask, NumElementsDst));
4632 }
4633
4634 // Create cast instructions for converting LLVM value \p Src to LLVM type \p
4635 // DstTy. \p Src has the same size as \p DstTy. Both are single value types
4636 // but could be scalar or vectors of different lengths, and either can be
4637 // pointer.
4638 // There are 4 cases:
4639 // 1. non-pointer -> non-pointer : needs 1 bitcast
4640 // 2. pointer -> pointer : needs 1 bitcast or addrspacecast
4641 // 3. pointer -> non-pointer
4642 // a) pointer -> intptr_t : needs 1 ptrtoint
4643 // b) pointer -> non-intptr_t : needs 1 ptrtoint then 1 bitcast
4644 // 4. non-pointer -> pointer
4645 // a) intptr_t -> pointer : needs 1 inttoptr
4646 // b) non-intptr_t -> pointer : needs 1 bitcast then 1 inttoptr
4647 // Note: for cases 3b and 4b two casts are required since LLVM casts do not
4648 // allow casting directly between pointer types and non-integer non-pointer
4649 // types.
createCastsForTypeOfSameSize(CGBuilderTy & Builder,const llvm::DataLayout & DL,Value * Src,llvm::Type * DstTy,StringRef Name="")4650 static Value *createCastsForTypeOfSameSize(CGBuilderTy &Builder,
4651 const llvm::DataLayout &DL,
4652 Value *Src, llvm::Type *DstTy,
4653 StringRef Name = "") {
4654 auto SrcTy = Src->getType();
4655
4656 // Case 1.
4657 if (!SrcTy->isPointerTy() && !DstTy->isPointerTy())
4658 return Builder.CreateBitCast(Src, DstTy, Name);
4659
4660 // Case 2.
4661 if (SrcTy->isPointerTy() && DstTy->isPointerTy())
4662 return Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy, Name);
4663
4664 // Case 3.
4665 if (SrcTy->isPointerTy() && !DstTy->isPointerTy()) {
4666 // Case 3b.
4667 if (!DstTy->isIntegerTy())
4668 Src = Builder.CreatePtrToInt(Src, DL.getIntPtrType(SrcTy));
4669 // Cases 3a and 3b.
4670 return Builder.CreateBitOrPointerCast(Src, DstTy, Name);
4671 }
4672
4673 // Case 4b.
4674 if (!SrcTy->isIntegerTy())
4675 Src = Builder.CreateBitCast(Src, DL.getIntPtrType(DstTy));
4676 // Cases 4a and 4b.
4677 return Builder.CreateIntToPtr(Src, DstTy, Name);
4678 }
4679
VisitAsTypeExpr(AsTypeExpr * E)4680 Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
4681 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
4682 llvm::Type *DstTy = ConvertType(E->getType());
4683
4684 llvm::Type *SrcTy = Src->getType();
4685 unsigned NumElementsSrc =
4686 isa<llvm::VectorType>(SrcTy)
4687 ? cast<llvm::FixedVectorType>(SrcTy)->getNumElements()
4688 : 0;
4689 unsigned NumElementsDst =
4690 isa<llvm::VectorType>(DstTy)
4691 ? cast<llvm::FixedVectorType>(DstTy)->getNumElements()
4692 : 0;
4693
4694 // Going from vec3 to non-vec3 is a special case and requires a shuffle
4695 // vector to get a vec4, then a bitcast if the target type is different.
4696 if (NumElementsSrc == 3 && NumElementsDst != 3) {
4697 Src = ConvertVec3AndVec4(Builder, CGF, Src, 4);
4698
4699 if (!CGF.CGM.getCodeGenOpts().PreserveVec3Type) {
4700 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
4701 DstTy);
4702 }
4703
4704 Src->setName("astype");
4705 return Src;
4706 }
4707
4708 // Going from non-vec3 to vec3 is a special case and requires a bitcast
4709 // to vec4 if the original type is not vec4, then a shuffle vector to
4710 // get a vec3.
4711 if (NumElementsSrc != 3 && NumElementsDst == 3) {
4712 if (!CGF.CGM.getCodeGenOpts().PreserveVec3Type) {
4713 auto *Vec4Ty = llvm::FixedVectorType::get(
4714 cast<llvm::VectorType>(DstTy)->getElementType(), 4);
4715 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
4716 Vec4Ty);
4717 }
4718
4719 Src = ConvertVec3AndVec4(Builder, CGF, Src, 3);
4720 Src->setName("astype");
4721 return Src;
4722 }
4723
4724 return createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(),
4725 Src, DstTy, "astype");
4726 }
4727
VisitAtomicExpr(AtomicExpr * E)4728 Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) {
4729 return CGF.EmitAtomicExpr(E).getScalarVal();
4730 }
4731
4732 //===----------------------------------------------------------------------===//
4733 // Entry Point into this File
4734 //===----------------------------------------------------------------------===//
4735
4736 /// Emit the computation of the specified expression of scalar type, ignoring
4737 /// the result.
EmitScalarExpr(const Expr * E,bool IgnoreResultAssign)4738 Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
4739 assert(E && hasScalarEvaluationKind(E->getType()) &&
4740 "Invalid scalar expression to emit");
4741
4742 return ScalarExprEmitter(*this, IgnoreResultAssign)
4743 .Visit(const_cast<Expr *>(E));
4744 }
4745
4746 /// Emit a conversion from the specified type to the specified destination type,
4747 /// both of which are LLVM scalar types.
EmitScalarConversion(Value * Src,QualType SrcTy,QualType DstTy,SourceLocation Loc)4748 Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy,
4749 QualType DstTy,
4750 SourceLocation Loc) {
4751 assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) &&
4752 "Invalid scalar expression to emit");
4753 return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy, Loc);
4754 }
4755
4756 /// Emit a conversion from the specified complex type to the specified
4757 /// destination type, where the destination type is an LLVM scalar type.
EmitComplexToScalarConversion(ComplexPairTy Src,QualType SrcTy,QualType DstTy,SourceLocation Loc)4758 Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src,
4759 QualType SrcTy,
4760 QualType DstTy,
4761 SourceLocation Loc) {
4762 assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) &&
4763 "Invalid complex -> scalar conversion");
4764 return ScalarExprEmitter(*this)
4765 .EmitComplexToScalarConversion(Src, SrcTy, DstTy, Loc);
4766 }
4767
4768
4769 llvm::Value *CodeGenFunction::
EmitScalarPrePostIncDec(const UnaryOperator * E,LValue LV,bool isInc,bool isPre)4770 EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
4771 bool isInc, bool isPre) {
4772 return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre);
4773 }
4774
EmitObjCIsaExpr(const ObjCIsaExpr * E)4775 LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
4776 // object->isa or (*object).isa
4777 // Generate code as for: *(Class*)object
4778
4779 Expr *BaseExpr = E->getBase();
4780 Address Addr = Address::invalid();
4781 if (BaseExpr->isRValue()) {
4782 Addr = Address(EmitScalarExpr(BaseExpr), getPointerAlign());
4783 } else {
4784 Addr = EmitLValue(BaseExpr).getAddress(*this);
4785 }
4786
4787 // Cast the address to Class*.
4788 Addr = Builder.CreateElementBitCast(Addr, ConvertType(E->getType()));
4789 return MakeAddrLValue(Addr, E->getType());
4790 }
4791
4792
EmitCompoundAssignmentLValue(const CompoundAssignOperator * E)4793 LValue CodeGenFunction::EmitCompoundAssignmentLValue(
4794 const CompoundAssignOperator *E) {
4795 ScalarExprEmitter Scalar(*this);
4796 Value *Result = nullptr;
4797 switch (E->getOpcode()) {
4798 #define COMPOUND_OP(Op) \
4799 case BO_##Op##Assign: \
4800 return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \
4801 Result)
4802 COMPOUND_OP(Mul);
4803 COMPOUND_OP(Div);
4804 COMPOUND_OP(Rem);
4805 COMPOUND_OP(Add);
4806 COMPOUND_OP(Sub);
4807 COMPOUND_OP(Shl);
4808 COMPOUND_OP(Shr);
4809 COMPOUND_OP(And);
4810 COMPOUND_OP(Xor);
4811 COMPOUND_OP(Or);
4812 #undef COMPOUND_OP
4813
4814 case BO_PtrMemD:
4815 case BO_PtrMemI:
4816 case BO_Mul:
4817 case BO_Div:
4818 case BO_Rem:
4819 case BO_Add:
4820 case BO_Sub:
4821 case BO_Shl:
4822 case BO_Shr:
4823 case BO_LT:
4824 case BO_GT:
4825 case BO_LE:
4826 case BO_GE:
4827 case BO_EQ:
4828 case BO_NE:
4829 case BO_Cmp:
4830 case BO_And:
4831 case BO_Xor:
4832 case BO_Or:
4833 case BO_LAnd:
4834 case BO_LOr:
4835 case BO_Assign:
4836 case BO_Comma:
4837 llvm_unreachable("Not valid compound assignment operators");
4838 }
4839
4840 llvm_unreachable("Unhandled compound assignment operator");
4841 }
4842
4843 struct GEPOffsetAndOverflow {
4844 // The total (signed) byte offset for the GEP.
4845 llvm::Value *TotalOffset;
4846 // The offset overflow flag - true if the total offset overflows.
4847 llvm::Value *OffsetOverflows;
4848 };
4849
4850 /// Evaluate given GEPVal, which is either an inbounds GEP, or a constant,
4851 /// and compute the total offset it applies from it's base pointer BasePtr.
4852 /// Returns offset in bytes and a boolean flag whether an overflow happened
4853 /// during evaluation.
EmitGEPOffsetInBytes(Value * BasePtr,Value * GEPVal,llvm::LLVMContext & VMContext,CodeGenModule & CGM,CGBuilderTy & Builder)4854 static GEPOffsetAndOverflow EmitGEPOffsetInBytes(Value *BasePtr, Value *GEPVal,
4855 llvm::LLVMContext &VMContext,
4856 CodeGenModule &CGM,
4857 CGBuilderTy &Builder) {
4858 const auto &DL = CGM.getDataLayout();
4859
4860 // The total (signed) byte offset for the GEP.
4861 llvm::Value *TotalOffset = nullptr;
4862
4863 // Was the GEP already reduced to a constant?
4864 if (isa<llvm::Constant>(GEPVal)) {
4865 // Compute the offset by casting both pointers to integers and subtracting:
4866 // GEPVal = BasePtr + ptr(Offset) <--> Offset = int(GEPVal) - int(BasePtr)
4867 Value *BasePtr_int =
4868 Builder.CreatePtrToInt(BasePtr, DL.getIntPtrType(BasePtr->getType()));
4869 Value *GEPVal_int =
4870 Builder.CreatePtrToInt(GEPVal, DL.getIntPtrType(GEPVal->getType()));
4871 TotalOffset = Builder.CreateSub(GEPVal_int, BasePtr_int);
4872 return {TotalOffset, /*OffsetOverflows=*/Builder.getFalse()};
4873 }
4874
4875 auto *GEP = cast<llvm::GEPOperator>(GEPVal);
4876 assert(GEP->getPointerOperand() == BasePtr &&
4877 "BasePtr must be the the base of the GEP.");
4878 assert(GEP->isInBounds() && "Expected inbounds GEP");
4879
4880 auto *IntPtrTy = DL.getIntPtrType(GEP->getPointerOperandType());
4881
4882 // Grab references to the signed add/mul overflow intrinsics for intptr_t.
4883 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
4884 auto *SAddIntrinsic =
4885 CGM.getIntrinsic(llvm::Intrinsic::sadd_with_overflow, IntPtrTy);
4886 auto *SMulIntrinsic =
4887 CGM.getIntrinsic(llvm::Intrinsic::smul_with_overflow, IntPtrTy);
4888
4889 // The offset overflow flag - true if the total offset overflows.
4890 llvm::Value *OffsetOverflows = Builder.getFalse();
4891
4892 /// Return the result of the given binary operation.
4893 auto eval = [&](BinaryOperator::Opcode Opcode, llvm::Value *LHS,
4894 llvm::Value *RHS) -> llvm::Value * {
4895 assert((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop");
4896
4897 // If the operands are constants, return a constant result.
4898 if (auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS)) {
4899 if (auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS)) {
4900 llvm::APInt N;
4901 bool HasOverflow = mayHaveIntegerOverflow(LHSCI, RHSCI, Opcode,
4902 /*Signed=*/true, N);
4903 if (HasOverflow)
4904 OffsetOverflows = Builder.getTrue();
4905 return llvm::ConstantInt::get(VMContext, N);
4906 }
4907 }
4908
4909 // Otherwise, compute the result with checked arithmetic.
4910 auto *ResultAndOverflow = Builder.CreateCall(
4911 (Opcode == BO_Add) ? SAddIntrinsic : SMulIntrinsic, {LHS, RHS});
4912 OffsetOverflows = Builder.CreateOr(
4913 Builder.CreateExtractValue(ResultAndOverflow, 1), OffsetOverflows);
4914 return Builder.CreateExtractValue(ResultAndOverflow, 0);
4915 };
4916
4917 // Determine the total byte offset by looking at each GEP operand.
4918 for (auto GTI = llvm::gep_type_begin(GEP), GTE = llvm::gep_type_end(GEP);
4919 GTI != GTE; ++GTI) {
4920 llvm::Value *LocalOffset;
4921 auto *Index = GTI.getOperand();
4922 // Compute the local offset contributed by this indexing step:
4923 if (auto *STy = GTI.getStructTypeOrNull()) {
4924 // For struct indexing, the local offset is the byte position of the
4925 // specified field.
4926 unsigned FieldNo = cast<llvm::ConstantInt>(Index)->getZExtValue();
4927 LocalOffset = llvm::ConstantInt::get(
4928 IntPtrTy, DL.getStructLayout(STy)->getElementOffset(FieldNo));
4929 } else {
4930 // Otherwise this is array-like indexing. The local offset is the index
4931 // multiplied by the element size.
4932 auto *ElementSize = llvm::ConstantInt::get(
4933 IntPtrTy, DL.getTypeAllocSize(GTI.getIndexedType()));
4934 auto *IndexS = Builder.CreateIntCast(Index, IntPtrTy, /*isSigned=*/true);
4935 LocalOffset = eval(BO_Mul, ElementSize, IndexS);
4936 }
4937
4938 // If this is the first offset, set it as the total offset. Otherwise, add
4939 // the local offset into the running total.
4940 if (!TotalOffset || TotalOffset == Zero)
4941 TotalOffset = LocalOffset;
4942 else
4943 TotalOffset = eval(BO_Add, TotalOffset, LocalOffset);
4944 }
4945
4946 return {TotalOffset, OffsetOverflows};
4947 }
4948
4949 Value *
EmitCheckedInBoundsGEP(Value * Ptr,ArrayRef<Value * > IdxList,bool SignedIndices,bool IsSubtraction,SourceLocation Loc,const Twine & Name)4950 CodeGenFunction::EmitCheckedInBoundsGEP(Value *Ptr, ArrayRef<Value *> IdxList,
4951 bool SignedIndices, bool IsSubtraction,
4952 SourceLocation Loc, const Twine &Name) {
4953 Value *GEPVal = Builder.CreateInBoundsGEP(Ptr, IdxList, Name);
4954
4955 // If the pointer overflow sanitizer isn't enabled, do nothing.
4956 if (!SanOpts.has(SanitizerKind::PointerOverflow))
4957 return GEPVal;
4958
4959 llvm::Type *PtrTy = Ptr->getType();
4960
4961 // Perform nullptr-and-offset check unless the nullptr is defined.
4962 bool PerformNullCheck = !NullPointerIsDefined(
4963 Builder.GetInsertBlock()->getParent(), PtrTy->getPointerAddressSpace());
4964 // Check for overflows unless the GEP got constant-folded,
4965 // and only in the default address space
4966 bool PerformOverflowCheck =
4967 !isa<llvm::Constant>(GEPVal) && PtrTy->getPointerAddressSpace() == 0;
4968
4969 if (!(PerformNullCheck || PerformOverflowCheck))
4970 return GEPVal;
4971
4972 const auto &DL = CGM.getDataLayout();
4973
4974 SanitizerScope SanScope(this);
4975 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy);
4976
4977 GEPOffsetAndOverflow EvaluatedGEP =
4978 EmitGEPOffsetInBytes(Ptr, GEPVal, getLLVMContext(), CGM, Builder);
4979
4980 assert((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) ||
4981 EvaluatedGEP.OffsetOverflows == Builder.getFalse()) &&
4982 "If the offset got constant-folded, we don't expect that there was an "
4983 "overflow.");
4984
4985 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
4986
4987 // Common case: if the total offset is zero, and we are using C++ semantics,
4988 // where nullptr+0 is defined, don't emit a check.
4989 if (EvaluatedGEP.TotalOffset == Zero && CGM.getLangOpts().CPlusPlus)
4990 return GEPVal;
4991
4992 // Now that we've computed the total offset, add it to the base pointer (with
4993 // wrapping semantics).
4994 auto *IntPtr = Builder.CreatePtrToInt(Ptr, IntPtrTy);
4995 auto *ComputedGEP = Builder.CreateAdd(IntPtr, EvaluatedGEP.TotalOffset);
4996
4997 llvm::SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
4998
4999 if (PerformNullCheck) {
5000 // In C++, if the base pointer evaluates to a null pointer value,
5001 // the only valid pointer this inbounds GEP can produce is also
5002 // a null pointer, so the offset must also evaluate to zero.
5003 // Likewise, if we have non-zero base pointer, we can not get null pointer
5004 // as a result, so the offset can not be -intptr_t(BasePtr).
5005 // In other words, both pointers are either null, or both are non-null,
5006 // or the behaviour is undefined.
5007 //
5008 // C, however, is more strict in this regard, and gives more
5009 // optimization opportunities: in C, additionally, nullptr+0 is undefined.
5010 // So both the input to the 'gep inbounds' AND the output must not be null.
5011 auto *BaseIsNotNullptr = Builder.CreateIsNotNull(Ptr);
5012 auto *ResultIsNotNullptr = Builder.CreateIsNotNull(ComputedGEP);
5013 auto *Valid =
5014 CGM.getLangOpts().CPlusPlus
5015 ? Builder.CreateICmpEQ(BaseIsNotNullptr, ResultIsNotNullptr)
5016 : Builder.CreateAnd(BaseIsNotNullptr, ResultIsNotNullptr);
5017 Checks.emplace_back(Valid, SanitizerKind::PointerOverflow);
5018 }
5019
5020 if (PerformOverflowCheck) {
5021 // The GEP is valid if:
5022 // 1) The total offset doesn't overflow, and
5023 // 2) The sign of the difference between the computed address and the base
5024 // pointer matches the sign of the total offset.
5025 llvm::Value *ValidGEP;
5026 auto *NoOffsetOverflow = Builder.CreateNot(EvaluatedGEP.OffsetOverflows);
5027 if (SignedIndices) {
5028 // GEP is computed as `unsigned base + signed offset`, therefore:
5029 // * If offset was positive, then the computed pointer can not be
5030 // [unsigned] less than the base pointer, unless it overflowed.
5031 // * If offset was negative, then the computed pointer can not be
5032 // [unsigned] greater than the bas pointere, unless it overflowed.
5033 auto *PosOrZeroValid = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
5034 auto *PosOrZeroOffset =
5035 Builder.CreateICmpSGE(EvaluatedGEP.TotalOffset, Zero);
5036 llvm::Value *NegValid = Builder.CreateICmpULT(ComputedGEP, IntPtr);
5037 ValidGEP =
5038 Builder.CreateSelect(PosOrZeroOffset, PosOrZeroValid, NegValid);
5039 } else if (!IsSubtraction) {
5040 // GEP is computed as `unsigned base + unsigned offset`, therefore the
5041 // computed pointer can not be [unsigned] less than base pointer,
5042 // unless there was an overflow.
5043 // Equivalent to `@llvm.uadd.with.overflow(%base, %offset)`.
5044 ValidGEP = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
5045 } else {
5046 // GEP is computed as `unsigned base - unsigned offset`, therefore the
5047 // computed pointer can not be [unsigned] greater than base pointer,
5048 // unless there was an overflow.
5049 // Equivalent to `@llvm.usub.with.overflow(%base, sub(0, %offset))`.
5050 ValidGEP = Builder.CreateICmpULE(ComputedGEP, IntPtr);
5051 }
5052 ValidGEP = Builder.CreateAnd(ValidGEP, NoOffsetOverflow);
5053 Checks.emplace_back(ValidGEP, SanitizerKind::PointerOverflow);
5054 }
5055
5056 assert(!Checks.empty() && "Should have produced some checks.");
5057
5058 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc)};
5059 // Pass the computed GEP to the runtime to avoid emitting poisoned arguments.
5060 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
5061 EmitCheck(Checks, SanitizerHandler::PointerOverflow, StaticArgs, DynamicArgs);
5062
5063 return GEPVal;
5064 }
5065