1 //===- SemaChecking.cpp - Extra Semantic Checking -------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements extra semantic analysis beyond what is enforced 10 // by the C type system. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "clang/AST/APValue.h" 15 #include "clang/AST/ASTContext.h" 16 #include "clang/AST/Attr.h" 17 #include "clang/AST/AttrIterator.h" 18 #include "clang/AST/CharUnits.h" 19 #include "clang/AST/Decl.h" 20 #include "clang/AST/DeclBase.h" 21 #include "clang/AST/DeclCXX.h" 22 #include "clang/AST/DeclObjC.h" 23 #include "clang/AST/DeclarationName.h" 24 #include "clang/AST/EvaluatedExprVisitor.h" 25 #include "clang/AST/Expr.h" 26 #include "clang/AST/ExprCXX.h" 27 #include "clang/AST/ExprObjC.h" 28 #include "clang/AST/ExprOpenMP.h" 29 #include "clang/AST/FormatString.h" 30 #include "clang/AST/NSAPI.h" 31 #include "clang/AST/NonTrivialTypeVisitor.h" 32 #include "clang/AST/OperationKinds.h" 33 #include "clang/AST/RecordLayout.h" 34 #include "clang/AST/Stmt.h" 35 #include "clang/AST/TemplateBase.h" 36 #include "clang/AST/Type.h" 37 #include "clang/AST/TypeLoc.h" 38 #include "clang/AST/UnresolvedSet.h" 39 #include "clang/Basic/AddressSpaces.h" 40 #include "clang/Basic/CharInfo.h" 41 #include "clang/Basic/Diagnostic.h" 42 #include "clang/Basic/IdentifierTable.h" 43 #include "clang/Basic/LLVM.h" 44 #include "clang/Basic/LangOptions.h" 45 #include "clang/Basic/OpenCLOptions.h" 46 #include "clang/Basic/OperatorKinds.h" 47 #include "clang/Basic/PartialDiagnostic.h" 48 #include "clang/Basic/SourceLocation.h" 49 #include "clang/Basic/SourceManager.h" 50 #include "clang/Basic/Specifiers.h" 51 #include "clang/Basic/SyncScope.h" 52 #include "clang/Basic/TargetBuiltins.h" 53 #include "clang/Basic/TargetCXXABI.h" 54 #include "clang/Basic/TargetInfo.h" 55 #include "clang/Basic/TypeTraits.h" 56 #include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering. 57 #include "clang/Sema/Initialization.h" 58 #include "clang/Sema/Lookup.h" 59 #include "clang/Sema/Ownership.h" 60 #include "clang/Sema/Scope.h" 61 #include "clang/Sema/ScopeInfo.h" 62 #include "clang/Sema/Sema.h" 63 #include "clang/Sema/SemaInternal.h" 64 #include "llvm/ADT/APFloat.h" 65 #include "llvm/ADT/APInt.h" 66 #include "llvm/ADT/APSInt.h" 67 #include "llvm/ADT/ArrayRef.h" 68 #include "llvm/ADT/DenseMap.h" 69 #include "llvm/ADT/FoldingSet.h" 70 #include "llvm/ADT/None.h" 71 #include "llvm/ADT/Optional.h" 72 #include "llvm/ADT/STLExtras.h" 73 #include "llvm/ADT/SmallBitVector.h" 74 #include "llvm/ADT/SmallPtrSet.h" 75 #include "llvm/ADT/SmallString.h" 76 #include "llvm/ADT/SmallVector.h" 77 #include "llvm/ADT/StringRef.h" 78 #include "llvm/ADT/StringSwitch.h" 79 #include "llvm/ADT/Triple.h" 80 #include "llvm/Support/AtomicOrdering.h" 81 #include "llvm/Support/Casting.h" 82 #include "llvm/Support/Compiler.h" 83 #include "llvm/Support/ConvertUTF.h" 84 #include "llvm/Support/ErrorHandling.h" 85 #include "llvm/Support/Format.h" 86 #include "llvm/Support/Locale.h" 87 #include "llvm/Support/MathExtras.h" 88 #include "llvm/Support/SaveAndRestore.h" 89 #include "llvm/Support/raw_ostream.h" 90 #include <algorithm> 91 #include <bitset> 92 #include <cassert> 93 #include <cstddef> 94 #include <cstdint> 95 #include <functional> 96 #include <limits> 97 #include <string> 98 #include <tuple> 99 #include <utility> 100 101 using namespace clang; 102 using namespace sema; 103 104 SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL, 105 unsigned ByteNo) const { 106 return SL->getLocationOfByte(ByteNo, getSourceManager(), LangOpts, 107 Context.getTargetInfo()); 108 } 109 110 /// Checks that a call expression's argument count is the desired number. 111 /// This is useful when doing custom type-checking. Returns true on error. 112 static bool checkArgCount(Sema &S, CallExpr *call, unsigned desiredArgCount) { 113 unsigned argCount = call->getNumArgs(); 114 if (argCount == desiredArgCount) return false; 115 116 if (argCount < desiredArgCount) 117 return S.Diag(call->getEndLoc(), diag::err_typecheck_call_too_few_args) 118 << 0 /*function call*/ << desiredArgCount << argCount 119 << call->getSourceRange(); 120 121 // Highlight all the excess arguments. 122 SourceRange range(call->getArg(desiredArgCount)->getBeginLoc(), 123 call->getArg(argCount - 1)->getEndLoc()); 124 125 return S.Diag(range.getBegin(), diag::err_typecheck_call_too_many_args) 126 << 0 /*function call*/ << desiredArgCount << argCount 127 << call->getArg(1)->getSourceRange(); 128 } 129 130 /// Check that the first argument to __builtin_annotation is an integer 131 /// and the second argument is a non-wide string literal. 132 static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) { 133 if (checkArgCount(S, TheCall, 2)) 134 return true; 135 136 // First argument should be an integer. 137 Expr *ValArg = TheCall->getArg(0); 138 QualType Ty = ValArg->getType(); 139 if (!Ty->isIntegerType()) { 140 S.Diag(ValArg->getBeginLoc(), diag::err_builtin_annotation_first_arg) 141 << ValArg->getSourceRange(); 142 return true; 143 } 144 145 // Second argument should be a constant string. 146 Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts(); 147 StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg); 148 if (!Literal || !Literal->isAscii()) { 149 S.Diag(StrArg->getBeginLoc(), diag::err_builtin_annotation_second_arg) 150 << StrArg->getSourceRange(); 151 return true; 152 } 153 154 TheCall->setType(Ty); 155 return false; 156 } 157 158 static bool SemaBuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) { 159 // We need at least one argument. 160 if (TheCall->getNumArgs() < 1) { 161 S.Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 162 << 0 << 1 << TheCall->getNumArgs() 163 << TheCall->getCallee()->getSourceRange(); 164 return true; 165 } 166 167 // All arguments should be wide string literals. 168 for (Expr *Arg : TheCall->arguments()) { 169 auto *Literal = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts()); 170 if (!Literal || !Literal->isWide()) { 171 S.Diag(Arg->getBeginLoc(), diag::err_msvc_annotation_wide_str) 172 << Arg->getSourceRange(); 173 return true; 174 } 175 } 176 177 return false; 178 } 179 180 /// Check that the argument to __builtin_addressof is a glvalue, and set the 181 /// result type to the corresponding pointer type. 182 static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) { 183 if (checkArgCount(S, TheCall, 1)) 184 return true; 185 186 ExprResult Arg(TheCall->getArg(0)); 187 QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getBeginLoc()); 188 if (ResultType.isNull()) 189 return true; 190 191 TheCall->setArg(0, Arg.get()); 192 TheCall->setType(ResultType); 193 return false; 194 } 195 196 /// Check the number of arguments and set the result type to 197 /// the argument type. 198 static bool SemaBuiltinPreserveAI(Sema &S, CallExpr *TheCall) { 199 if (checkArgCount(S, TheCall, 1)) 200 return true; 201 202 TheCall->setType(TheCall->getArg(0)->getType()); 203 return false; 204 } 205 206 /// Check that the value argument for __builtin_is_aligned(value, alignment) and 207 /// __builtin_aligned_{up,down}(value, alignment) is an integer or a pointer 208 /// type (but not a function pointer) and that the alignment is a power-of-two. 209 static bool SemaBuiltinAlignment(Sema &S, CallExpr *TheCall, unsigned ID) { 210 if (checkArgCount(S, TheCall, 2)) 211 return true; 212 213 clang::Expr *Source = TheCall->getArg(0); 214 bool IsBooleanAlignBuiltin = ID == Builtin::BI__builtin_is_aligned; 215 216 auto IsValidIntegerType = [](QualType Ty) { 217 return Ty->isIntegerType() && !Ty->isEnumeralType() && !Ty->isBooleanType(); 218 }; 219 QualType SrcTy = Source->getType(); 220 // We should also be able to use it with arrays (but not functions!). 221 if (SrcTy->canDecayToPointerType() && SrcTy->isArrayType()) { 222 SrcTy = S.Context.getDecayedType(SrcTy); 223 } 224 if ((!SrcTy->isPointerType() && !IsValidIntegerType(SrcTy)) || 225 SrcTy->isFunctionPointerType()) { 226 // FIXME: this is not quite the right error message since we don't allow 227 // floating point types, or member pointers. 228 S.Diag(Source->getExprLoc(), diag::err_typecheck_expect_scalar_operand) 229 << SrcTy; 230 return true; 231 } 232 233 clang::Expr *AlignOp = TheCall->getArg(1); 234 if (!IsValidIntegerType(AlignOp->getType())) { 235 S.Diag(AlignOp->getExprLoc(), diag::err_typecheck_expect_int) 236 << AlignOp->getType(); 237 return true; 238 } 239 Expr::EvalResult AlignResult; 240 unsigned MaxAlignmentBits = S.Context.getIntWidth(SrcTy) - 1; 241 // We can't check validity of alignment if it is value dependent. 242 if (!AlignOp->isValueDependent() && 243 AlignOp->EvaluateAsInt(AlignResult, S.Context, 244 Expr::SE_AllowSideEffects)) { 245 llvm::APSInt AlignValue = AlignResult.Val.getInt(); 246 llvm::APSInt MaxValue( 247 llvm::APInt::getOneBitSet(MaxAlignmentBits + 1, MaxAlignmentBits)); 248 if (AlignValue < 1) { 249 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_small) << 1; 250 return true; 251 } 252 if (llvm::APSInt::compareValues(AlignValue, MaxValue) > 0) { 253 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_big) 254 << MaxValue.toString(10); 255 return true; 256 } 257 if (!AlignValue.isPowerOf2()) { 258 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_not_power_of_two); 259 return true; 260 } 261 if (AlignValue == 1) { 262 S.Diag(AlignOp->getExprLoc(), diag::warn_alignment_builtin_useless) 263 << IsBooleanAlignBuiltin; 264 } 265 } 266 267 ExprResult SrcArg = S.PerformCopyInitialization( 268 InitializedEntity::InitializeParameter(S.Context, SrcTy, false), 269 SourceLocation(), Source); 270 if (SrcArg.isInvalid()) 271 return true; 272 TheCall->setArg(0, SrcArg.get()); 273 ExprResult AlignArg = 274 S.PerformCopyInitialization(InitializedEntity::InitializeParameter( 275 S.Context, AlignOp->getType(), false), 276 SourceLocation(), AlignOp); 277 if (AlignArg.isInvalid()) 278 return true; 279 TheCall->setArg(1, AlignArg.get()); 280 // For align_up/align_down, the return type is the same as the (potentially 281 // decayed) argument type including qualifiers. For is_aligned(), the result 282 // is always bool. 283 TheCall->setType(IsBooleanAlignBuiltin ? S.Context.BoolTy : SrcTy); 284 return false; 285 } 286 287 static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall, 288 unsigned BuiltinID) { 289 if (checkArgCount(S, TheCall, 3)) 290 return true; 291 292 // First two arguments should be integers. 293 for (unsigned I = 0; I < 2; ++I) { 294 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(I)); 295 if (Arg.isInvalid()) return true; 296 TheCall->setArg(I, Arg.get()); 297 298 QualType Ty = Arg.get()->getType(); 299 if (!Ty->isIntegerType()) { 300 S.Diag(Arg.get()->getBeginLoc(), diag::err_overflow_builtin_must_be_int) 301 << Ty << Arg.get()->getSourceRange(); 302 return true; 303 } 304 } 305 306 // Third argument should be a pointer to a non-const integer. 307 // IRGen correctly handles volatile, restrict, and address spaces, and 308 // the other qualifiers aren't possible. 309 { 310 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(2)); 311 if (Arg.isInvalid()) return true; 312 TheCall->setArg(2, Arg.get()); 313 314 QualType Ty = Arg.get()->getType(); 315 const auto *PtrTy = Ty->getAs<PointerType>(); 316 if (!PtrTy || 317 !PtrTy->getPointeeType()->isIntegerType() || 318 PtrTy->getPointeeType().isConstQualified()) { 319 S.Diag(Arg.get()->getBeginLoc(), 320 diag::err_overflow_builtin_must_be_ptr_int) 321 << Ty << Arg.get()->getSourceRange(); 322 return true; 323 } 324 } 325 326 // Disallow signed ExtIntType args larger than 128 bits to mul function until 327 // we improve backend support. 328 if (BuiltinID == Builtin::BI__builtin_mul_overflow) { 329 for (unsigned I = 0; I < 3; ++I) { 330 const auto Arg = TheCall->getArg(I); 331 // Third argument will be a pointer. 332 auto Ty = I < 2 ? Arg->getType() : Arg->getType()->getPointeeType(); 333 if (Ty->isExtIntType() && Ty->isSignedIntegerType() && 334 S.getASTContext().getIntWidth(Ty) > 128) 335 return S.Diag(Arg->getBeginLoc(), 336 diag::err_overflow_builtin_ext_int_max_size) 337 << 128; 338 } 339 } 340 341 return false; 342 } 343 344 static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) { 345 if (checkArgCount(S, BuiltinCall, 2)) 346 return true; 347 348 SourceLocation BuiltinLoc = BuiltinCall->getBeginLoc(); 349 Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts(); 350 Expr *Call = BuiltinCall->getArg(0); 351 Expr *Chain = BuiltinCall->getArg(1); 352 353 if (Call->getStmtClass() != Stmt::CallExprClass) { 354 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call) 355 << Call->getSourceRange(); 356 return true; 357 } 358 359 auto CE = cast<CallExpr>(Call); 360 if (CE->getCallee()->getType()->isBlockPointerType()) { 361 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call) 362 << Call->getSourceRange(); 363 return true; 364 } 365 366 const Decl *TargetDecl = CE->getCalleeDecl(); 367 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) 368 if (FD->getBuiltinID()) { 369 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call) 370 << Call->getSourceRange(); 371 return true; 372 } 373 374 if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens())) { 375 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call) 376 << Call->getSourceRange(); 377 return true; 378 } 379 380 ExprResult ChainResult = S.UsualUnaryConversions(Chain); 381 if (ChainResult.isInvalid()) 382 return true; 383 if (!ChainResult.get()->getType()->isPointerType()) { 384 S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer) 385 << Chain->getSourceRange(); 386 return true; 387 } 388 389 QualType ReturnTy = CE->getCallReturnType(S.Context); 390 QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() }; 391 QualType BuiltinTy = S.Context.getFunctionType( 392 ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo()); 393 QualType BuiltinPtrTy = S.Context.getPointerType(BuiltinTy); 394 395 Builtin = 396 S.ImpCastExprToType(Builtin, BuiltinPtrTy, CK_BuiltinFnToFnPtr).get(); 397 398 BuiltinCall->setType(CE->getType()); 399 BuiltinCall->setValueKind(CE->getValueKind()); 400 BuiltinCall->setObjectKind(CE->getObjectKind()); 401 BuiltinCall->setCallee(Builtin); 402 BuiltinCall->setArg(1, ChainResult.get()); 403 404 return false; 405 } 406 407 namespace { 408 409 class EstimateSizeFormatHandler 410 : public analyze_format_string::FormatStringHandler { 411 size_t Size; 412 413 public: 414 EstimateSizeFormatHandler(StringRef Format) 415 : Size(std::min(Format.find(0), Format.size()) + 416 1 /* null byte always written by sprintf */) {} 417 418 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 419 const char *, unsigned SpecifierLen) override { 420 421 const size_t FieldWidth = computeFieldWidth(FS); 422 const size_t Precision = computePrecision(FS); 423 424 // The actual format. 425 switch (FS.getConversionSpecifier().getKind()) { 426 // Just a char. 427 case analyze_format_string::ConversionSpecifier::cArg: 428 case analyze_format_string::ConversionSpecifier::CArg: 429 Size += std::max(FieldWidth, (size_t)1); 430 break; 431 // Just an integer. 432 case analyze_format_string::ConversionSpecifier::dArg: 433 case analyze_format_string::ConversionSpecifier::DArg: 434 case analyze_format_string::ConversionSpecifier::iArg: 435 case analyze_format_string::ConversionSpecifier::oArg: 436 case analyze_format_string::ConversionSpecifier::OArg: 437 case analyze_format_string::ConversionSpecifier::uArg: 438 case analyze_format_string::ConversionSpecifier::UArg: 439 case analyze_format_string::ConversionSpecifier::xArg: 440 case analyze_format_string::ConversionSpecifier::XArg: 441 Size += std::max(FieldWidth, Precision); 442 break; 443 444 // %g style conversion switches between %f or %e style dynamically. 445 // %f always takes less space, so default to it. 446 case analyze_format_string::ConversionSpecifier::gArg: 447 case analyze_format_string::ConversionSpecifier::GArg: 448 449 // Floating point number in the form '[+]ddd.ddd'. 450 case analyze_format_string::ConversionSpecifier::fArg: 451 case analyze_format_string::ConversionSpecifier::FArg: 452 Size += std::max(FieldWidth, 1 /* integer part */ + 453 (Precision ? 1 + Precision 454 : 0) /* period + decimal */); 455 break; 456 457 // Floating point number in the form '[-]d.ddde[+-]dd'. 458 case analyze_format_string::ConversionSpecifier::eArg: 459 case analyze_format_string::ConversionSpecifier::EArg: 460 Size += 461 std::max(FieldWidth, 462 1 /* integer part */ + 463 (Precision ? 1 + Precision : 0) /* period + decimal */ + 464 1 /* e or E letter */ + 2 /* exponent */); 465 break; 466 467 // Floating point number in the form '[-]0xh.hhhhp±dd'. 468 case analyze_format_string::ConversionSpecifier::aArg: 469 case analyze_format_string::ConversionSpecifier::AArg: 470 Size += 471 std::max(FieldWidth, 472 2 /* 0x */ + 1 /* integer part */ + 473 (Precision ? 1 + Precision : 0) /* period + decimal */ + 474 1 /* p or P letter */ + 1 /* + or - */ + 1 /* value */); 475 break; 476 477 // Just a string. 478 case analyze_format_string::ConversionSpecifier::sArg: 479 case analyze_format_string::ConversionSpecifier::SArg: 480 Size += FieldWidth; 481 break; 482 483 // Just a pointer in the form '0xddd'. 484 case analyze_format_string::ConversionSpecifier::pArg: 485 Size += std::max(FieldWidth, 2 /* leading 0x */ + Precision); 486 break; 487 488 // A plain percent. 489 case analyze_format_string::ConversionSpecifier::PercentArg: 490 Size += 1; 491 break; 492 493 default: 494 break; 495 } 496 497 Size += FS.hasPlusPrefix() || FS.hasSpacePrefix(); 498 499 if (FS.hasAlternativeForm()) { 500 switch (FS.getConversionSpecifier().getKind()) { 501 default: 502 break; 503 // Force a leading '0'. 504 case analyze_format_string::ConversionSpecifier::oArg: 505 Size += 1; 506 break; 507 // Force a leading '0x'. 508 case analyze_format_string::ConversionSpecifier::xArg: 509 case analyze_format_string::ConversionSpecifier::XArg: 510 Size += 2; 511 break; 512 // Force a period '.' before decimal, even if precision is 0. 513 case analyze_format_string::ConversionSpecifier::aArg: 514 case analyze_format_string::ConversionSpecifier::AArg: 515 case analyze_format_string::ConversionSpecifier::eArg: 516 case analyze_format_string::ConversionSpecifier::EArg: 517 case analyze_format_string::ConversionSpecifier::fArg: 518 case analyze_format_string::ConversionSpecifier::FArg: 519 case analyze_format_string::ConversionSpecifier::gArg: 520 case analyze_format_string::ConversionSpecifier::GArg: 521 Size += (Precision ? 0 : 1); 522 break; 523 } 524 } 525 assert(SpecifierLen <= Size && "no underflow"); 526 Size -= SpecifierLen; 527 return true; 528 } 529 530 size_t getSizeLowerBound() const { return Size; } 531 532 private: 533 static size_t computeFieldWidth(const analyze_printf::PrintfSpecifier &FS) { 534 const analyze_format_string::OptionalAmount &FW = FS.getFieldWidth(); 535 size_t FieldWidth = 0; 536 if (FW.getHowSpecified() == analyze_format_string::OptionalAmount::Constant) 537 FieldWidth = FW.getConstantAmount(); 538 return FieldWidth; 539 } 540 541 static size_t computePrecision(const analyze_printf::PrintfSpecifier &FS) { 542 const analyze_format_string::OptionalAmount &FW = FS.getPrecision(); 543 size_t Precision = 0; 544 545 // See man 3 printf for default precision value based on the specifier. 546 switch (FW.getHowSpecified()) { 547 case analyze_format_string::OptionalAmount::NotSpecified: 548 switch (FS.getConversionSpecifier().getKind()) { 549 default: 550 break; 551 case analyze_format_string::ConversionSpecifier::dArg: // %d 552 case analyze_format_string::ConversionSpecifier::DArg: // %D 553 case analyze_format_string::ConversionSpecifier::iArg: // %i 554 Precision = 1; 555 break; 556 case analyze_format_string::ConversionSpecifier::oArg: // %d 557 case analyze_format_string::ConversionSpecifier::OArg: // %D 558 case analyze_format_string::ConversionSpecifier::uArg: // %d 559 case analyze_format_string::ConversionSpecifier::UArg: // %D 560 case analyze_format_string::ConversionSpecifier::xArg: // %d 561 case analyze_format_string::ConversionSpecifier::XArg: // %D 562 Precision = 1; 563 break; 564 case analyze_format_string::ConversionSpecifier::fArg: // %f 565 case analyze_format_string::ConversionSpecifier::FArg: // %F 566 case analyze_format_string::ConversionSpecifier::eArg: // %e 567 case analyze_format_string::ConversionSpecifier::EArg: // %E 568 case analyze_format_string::ConversionSpecifier::gArg: // %g 569 case analyze_format_string::ConversionSpecifier::GArg: // %G 570 Precision = 6; 571 break; 572 case analyze_format_string::ConversionSpecifier::pArg: // %d 573 Precision = 1; 574 break; 575 } 576 break; 577 case analyze_format_string::OptionalAmount::Constant: 578 Precision = FW.getConstantAmount(); 579 break; 580 default: 581 break; 582 } 583 return Precision; 584 } 585 }; 586 587 } // namespace 588 589 /// Check a call to BuiltinID for buffer overflows. If BuiltinID is a 590 /// __builtin_*_chk function, then use the object size argument specified in the 591 /// source. Otherwise, infer the object size using __builtin_object_size. 592 void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, 593 CallExpr *TheCall) { 594 // FIXME: There are some more useful checks we could be doing here: 595 // - Evaluate strlen of strcpy arguments, use as object size. 596 597 if (TheCall->isValueDependent() || TheCall->isTypeDependent() || 598 isConstantEvaluated()) 599 return; 600 601 unsigned BuiltinID = FD->getBuiltinID(/*ConsiderWrappers=*/true); 602 if (!BuiltinID) 603 return; 604 605 const TargetInfo &TI = getASTContext().getTargetInfo(); 606 unsigned SizeTypeWidth = TI.getTypeWidth(TI.getSizeType()); 607 608 unsigned DiagID = 0; 609 bool IsChkVariant = false; 610 Optional<llvm::APSInt> UsedSize; 611 unsigned SizeIndex, ObjectIndex; 612 switch (BuiltinID) { 613 default: 614 return; 615 case Builtin::BIsprintf: 616 case Builtin::BI__builtin___sprintf_chk: { 617 size_t FormatIndex = BuiltinID == Builtin::BIsprintf ? 1 : 3; 618 auto *FormatExpr = TheCall->getArg(FormatIndex)->IgnoreParenImpCasts(); 619 620 if (auto *Format = dyn_cast<StringLiteral>(FormatExpr)) { 621 622 if (!Format->isAscii() && !Format->isUTF8()) 623 return; 624 625 StringRef FormatStrRef = Format->getString(); 626 EstimateSizeFormatHandler H(FormatStrRef); 627 const char *FormatBytes = FormatStrRef.data(); 628 const ConstantArrayType *T = 629 Context.getAsConstantArrayType(Format->getType()); 630 assert(T && "String literal not of constant array type!"); 631 size_t TypeSize = T->getSize().getZExtValue(); 632 633 // In case there's a null byte somewhere. 634 size_t StrLen = 635 std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0)); 636 if (!analyze_format_string::ParsePrintfString( 637 H, FormatBytes, FormatBytes + StrLen, getLangOpts(), 638 Context.getTargetInfo(), false)) { 639 DiagID = diag::warn_fortify_source_format_overflow; 640 UsedSize = llvm::APSInt::getUnsigned(H.getSizeLowerBound()) 641 .extOrTrunc(SizeTypeWidth); 642 if (BuiltinID == Builtin::BI__builtin___sprintf_chk) { 643 IsChkVariant = true; 644 ObjectIndex = 2; 645 } else { 646 IsChkVariant = false; 647 ObjectIndex = 0; 648 } 649 break; 650 } 651 } 652 return; 653 } 654 case Builtin::BI__builtin___memcpy_chk: 655 case Builtin::BI__builtin___memmove_chk: 656 case Builtin::BI__builtin___memset_chk: 657 case Builtin::BI__builtin___strlcat_chk: 658 case Builtin::BI__builtin___strlcpy_chk: 659 case Builtin::BI__builtin___strncat_chk: 660 case Builtin::BI__builtin___strncpy_chk: 661 case Builtin::BI__builtin___stpncpy_chk: 662 case Builtin::BI__builtin___memccpy_chk: 663 case Builtin::BI__builtin___mempcpy_chk: { 664 DiagID = diag::warn_builtin_chk_overflow; 665 IsChkVariant = true; 666 SizeIndex = TheCall->getNumArgs() - 2; 667 ObjectIndex = TheCall->getNumArgs() - 1; 668 break; 669 } 670 671 case Builtin::BI__builtin___snprintf_chk: 672 case Builtin::BI__builtin___vsnprintf_chk: { 673 DiagID = diag::warn_builtin_chk_overflow; 674 IsChkVariant = true; 675 SizeIndex = 1; 676 ObjectIndex = 3; 677 break; 678 } 679 680 case Builtin::BIstrncat: 681 case Builtin::BI__builtin_strncat: 682 case Builtin::BIstrncpy: 683 case Builtin::BI__builtin_strncpy: 684 case Builtin::BIstpncpy: 685 case Builtin::BI__builtin_stpncpy: { 686 // Whether these functions overflow depends on the runtime strlen of the 687 // string, not just the buffer size, so emitting the "always overflow" 688 // diagnostic isn't quite right. We should still diagnose passing a buffer 689 // size larger than the destination buffer though; this is a runtime abort 690 // in _FORTIFY_SOURCE mode, and is quite suspicious otherwise. 691 DiagID = diag::warn_fortify_source_size_mismatch; 692 SizeIndex = TheCall->getNumArgs() - 1; 693 ObjectIndex = 0; 694 break; 695 } 696 697 case Builtin::BImemcpy: 698 case Builtin::BI__builtin_memcpy: 699 case Builtin::BImemmove: 700 case Builtin::BI__builtin_memmove: 701 case Builtin::BImemset: 702 case Builtin::BI__builtin_memset: 703 case Builtin::BImempcpy: 704 case Builtin::BI__builtin_mempcpy: { 705 DiagID = diag::warn_fortify_source_overflow; 706 SizeIndex = TheCall->getNumArgs() - 1; 707 ObjectIndex = 0; 708 break; 709 } 710 case Builtin::BIsnprintf: 711 case Builtin::BI__builtin_snprintf: 712 case Builtin::BIvsnprintf: 713 case Builtin::BI__builtin_vsnprintf: { 714 DiagID = diag::warn_fortify_source_size_mismatch; 715 SizeIndex = 1; 716 ObjectIndex = 0; 717 break; 718 } 719 } 720 721 llvm::APSInt ObjectSize; 722 // For __builtin___*_chk, the object size is explicitly provided by the caller 723 // (usually using __builtin_object_size). Use that value to check this call. 724 if (IsChkVariant) { 725 Expr::EvalResult Result; 726 Expr *SizeArg = TheCall->getArg(ObjectIndex); 727 if (!SizeArg->EvaluateAsInt(Result, getASTContext())) 728 return; 729 ObjectSize = Result.Val.getInt(); 730 731 // Otherwise, try to evaluate an imaginary call to __builtin_object_size. 732 } else { 733 // If the parameter has a pass_object_size attribute, then we should use its 734 // (potentially) more strict checking mode. Otherwise, conservatively assume 735 // type 0. 736 int BOSType = 0; 737 if (const auto *POS = 738 FD->getParamDecl(ObjectIndex)->getAttr<PassObjectSizeAttr>()) 739 BOSType = POS->getType(); 740 741 Expr *ObjArg = TheCall->getArg(ObjectIndex); 742 uint64_t Result; 743 if (!ObjArg->tryEvaluateObjectSize(Result, getASTContext(), BOSType)) 744 return; 745 // Get the object size in the target's size_t width. 746 ObjectSize = llvm::APSInt::getUnsigned(Result).extOrTrunc(SizeTypeWidth); 747 } 748 749 // Evaluate the number of bytes of the object that this call will use. 750 if (!UsedSize) { 751 Expr::EvalResult Result; 752 Expr *UsedSizeArg = TheCall->getArg(SizeIndex); 753 if (!UsedSizeArg->EvaluateAsInt(Result, getASTContext())) 754 return; 755 UsedSize = Result.Val.getInt().extOrTrunc(SizeTypeWidth); 756 } 757 758 if (UsedSize.getValue().ule(ObjectSize)) 759 return; 760 761 StringRef FunctionName = getASTContext().BuiltinInfo.getName(BuiltinID); 762 // Skim off the details of whichever builtin was called to produce a better 763 // diagnostic, as it's unlikley that the user wrote the __builtin explicitly. 764 if (IsChkVariant) { 765 FunctionName = FunctionName.drop_front(std::strlen("__builtin___")); 766 FunctionName = FunctionName.drop_back(std::strlen("_chk")); 767 } else if (FunctionName.startswith("__builtin_")) { 768 FunctionName = FunctionName.drop_front(std::strlen("__builtin_")); 769 } 770 771 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 772 PDiag(DiagID) 773 << FunctionName << ObjectSize.toString(/*Radix=*/10) 774 << UsedSize.getValue().toString(/*Radix=*/10)); 775 } 776 777 static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall, 778 Scope::ScopeFlags NeededScopeFlags, 779 unsigned DiagID) { 780 // Scopes aren't available during instantiation. Fortunately, builtin 781 // functions cannot be template args so they cannot be formed through template 782 // instantiation. Therefore checking once during the parse is sufficient. 783 if (SemaRef.inTemplateInstantiation()) 784 return false; 785 786 Scope *S = SemaRef.getCurScope(); 787 while (S && !S->isSEHExceptScope()) 788 S = S->getParent(); 789 if (!S || !(S->getFlags() & NeededScopeFlags)) { 790 auto *DRE = cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 791 SemaRef.Diag(TheCall->getExprLoc(), DiagID) 792 << DRE->getDecl()->getIdentifier(); 793 return true; 794 } 795 796 return false; 797 } 798 799 static inline bool isBlockPointer(Expr *Arg) { 800 return Arg->getType()->isBlockPointerType(); 801 } 802 803 /// OpenCL C v2.0, s6.13.17.2 - Checks that the block parameters are all local 804 /// void*, which is a requirement of device side enqueue. 805 static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) { 806 const BlockPointerType *BPT = 807 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 808 ArrayRef<QualType> Params = 809 BPT->getPointeeType()->castAs<FunctionProtoType>()->getParamTypes(); 810 unsigned ArgCounter = 0; 811 bool IllegalParams = false; 812 // Iterate through the block parameters until either one is found that is not 813 // a local void*, or the block is valid. 814 for (ArrayRef<QualType>::iterator I = Params.begin(), E = Params.end(); 815 I != E; ++I, ++ArgCounter) { 816 if (!(*I)->isPointerType() || !(*I)->getPointeeType()->isVoidType() || 817 (*I)->getPointeeType().getQualifiers().getAddressSpace() != 818 LangAS::opencl_local) { 819 // Get the location of the error. If a block literal has been passed 820 // (BlockExpr) then we can point straight to the offending argument, 821 // else we just point to the variable reference. 822 SourceLocation ErrorLoc; 823 if (isa<BlockExpr>(BlockArg)) { 824 BlockDecl *BD = cast<BlockExpr>(BlockArg)->getBlockDecl(); 825 ErrorLoc = BD->getParamDecl(ArgCounter)->getBeginLoc(); 826 } else if (isa<DeclRefExpr>(BlockArg)) { 827 ErrorLoc = cast<DeclRefExpr>(BlockArg)->getBeginLoc(); 828 } 829 S.Diag(ErrorLoc, 830 diag::err_opencl_enqueue_kernel_blocks_non_local_void_args); 831 IllegalParams = true; 832 } 833 } 834 835 return IllegalParams; 836 } 837 838 static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) { 839 if (!S.getOpenCLOptions().isEnabled("cl_khr_subgroups")) { 840 S.Diag(Call->getBeginLoc(), diag::err_opencl_requires_extension) 841 << 1 << Call->getDirectCallee() << "cl_khr_subgroups"; 842 return true; 843 } 844 return false; 845 } 846 847 static bool SemaOpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) { 848 if (checkArgCount(S, TheCall, 2)) 849 return true; 850 851 if (checkOpenCLSubgroupExt(S, TheCall)) 852 return true; 853 854 // First argument is an ndrange_t type. 855 Expr *NDRangeArg = TheCall->getArg(0); 856 if (NDRangeArg->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 857 S.Diag(NDRangeArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 858 << TheCall->getDirectCallee() << "'ndrange_t'"; 859 return true; 860 } 861 862 Expr *BlockArg = TheCall->getArg(1); 863 if (!isBlockPointer(BlockArg)) { 864 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 865 << TheCall->getDirectCallee() << "block"; 866 return true; 867 } 868 return checkOpenCLBlockArgs(S, BlockArg); 869 } 870 871 /// OpenCL C v2.0, s6.13.17.6 - Check the argument to the 872 /// get_kernel_work_group_size 873 /// and get_kernel_preferred_work_group_size_multiple builtin functions. 874 static bool SemaOpenCLBuiltinKernelWorkGroupSize(Sema &S, CallExpr *TheCall) { 875 if (checkArgCount(S, TheCall, 1)) 876 return true; 877 878 Expr *BlockArg = TheCall->getArg(0); 879 if (!isBlockPointer(BlockArg)) { 880 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 881 << TheCall->getDirectCallee() << "block"; 882 return true; 883 } 884 return checkOpenCLBlockArgs(S, BlockArg); 885 } 886 887 /// Diagnose integer type and any valid implicit conversion to it. 888 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, 889 const QualType &IntType); 890 891 static bool checkOpenCLEnqueueLocalSizeArgs(Sema &S, CallExpr *TheCall, 892 unsigned Start, unsigned End) { 893 bool IllegalParams = false; 894 for (unsigned I = Start; I <= End; ++I) 895 IllegalParams |= checkOpenCLEnqueueIntType(S, TheCall->getArg(I), 896 S.Context.getSizeType()); 897 return IllegalParams; 898 } 899 900 /// OpenCL v2.0, s6.13.17.1 - Check that sizes are provided for all 901 /// 'local void*' parameter of passed block. 902 static bool checkOpenCLEnqueueVariadicArgs(Sema &S, CallExpr *TheCall, 903 Expr *BlockArg, 904 unsigned NumNonVarArgs) { 905 const BlockPointerType *BPT = 906 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 907 unsigned NumBlockParams = 908 BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams(); 909 unsigned TotalNumArgs = TheCall->getNumArgs(); 910 911 // For each argument passed to the block, a corresponding uint needs to 912 // be passed to describe the size of the local memory. 913 if (TotalNumArgs != NumBlockParams + NumNonVarArgs) { 914 S.Diag(TheCall->getBeginLoc(), 915 diag::err_opencl_enqueue_kernel_local_size_args); 916 return true; 917 } 918 919 // Check that the sizes of the local memory are specified by integers. 920 return checkOpenCLEnqueueLocalSizeArgs(S, TheCall, NumNonVarArgs, 921 TotalNumArgs - 1); 922 } 923 924 /// OpenCL C v2.0, s6.13.17 - Enqueue kernel function contains four different 925 /// overload formats specified in Table 6.13.17.1. 926 /// int enqueue_kernel(queue_t queue, 927 /// kernel_enqueue_flags_t flags, 928 /// const ndrange_t ndrange, 929 /// void (^block)(void)) 930 /// int enqueue_kernel(queue_t queue, 931 /// kernel_enqueue_flags_t flags, 932 /// const ndrange_t ndrange, 933 /// uint num_events_in_wait_list, 934 /// clk_event_t *event_wait_list, 935 /// clk_event_t *event_ret, 936 /// void (^block)(void)) 937 /// int enqueue_kernel(queue_t queue, 938 /// kernel_enqueue_flags_t flags, 939 /// const ndrange_t ndrange, 940 /// void (^block)(local void*, ...), 941 /// uint size0, ...) 942 /// int enqueue_kernel(queue_t queue, 943 /// kernel_enqueue_flags_t flags, 944 /// const ndrange_t ndrange, 945 /// uint num_events_in_wait_list, 946 /// clk_event_t *event_wait_list, 947 /// clk_event_t *event_ret, 948 /// void (^block)(local void*, ...), 949 /// uint size0, ...) 950 static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) { 951 unsigned NumArgs = TheCall->getNumArgs(); 952 953 if (NumArgs < 4) { 954 S.Diag(TheCall->getBeginLoc(), 955 diag::err_typecheck_call_too_few_args_at_least) 956 << 0 << 4 << NumArgs; 957 return true; 958 } 959 960 Expr *Arg0 = TheCall->getArg(0); 961 Expr *Arg1 = TheCall->getArg(1); 962 Expr *Arg2 = TheCall->getArg(2); 963 Expr *Arg3 = TheCall->getArg(3); 964 965 // First argument always needs to be a queue_t type. 966 if (!Arg0->getType()->isQueueT()) { 967 S.Diag(TheCall->getArg(0)->getBeginLoc(), 968 diag::err_opencl_builtin_expected_type) 969 << TheCall->getDirectCallee() << S.Context.OCLQueueTy; 970 return true; 971 } 972 973 // Second argument always needs to be a kernel_enqueue_flags_t enum value. 974 if (!Arg1->getType()->isIntegerType()) { 975 S.Diag(TheCall->getArg(1)->getBeginLoc(), 976 diag::err_opencl_builtin_expected_type) 977 << TheCall->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)"; 978 return true; 979 } 980 981 // Third argument is always an ndrange_t type. 982 if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 983 S.Diag(TheCall->getArg(2)->getBeginLoc(), 984 diag::err_opencl_builtin_expected_type) 985 << TheCall->getDirectCallee() << "'ndrange_t'"; 986 return true; 987 } 988 989 // With four arguments, there is only one form that the function could be 990 // called in: no events and no variable arguments. 991 if (NumArgs == 4) { 992 // check that the last argument is the right block type. 993 if (!isBlockPointer(Arg3)) { 994 S.Diag(Arg3->getBeginLoc(), diag::err_opencl_builtin_expected_type) 995 << TheCall->getDirectCallee() << "block"; 996 return true; 997 } 998 // we have a block type, check the prototype 999 const BlockPointerType *BPT = 1000 cast<BlockPointerType>(Arg3->getType().getCanonicalType()); 1001 if (BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams() > 0) { 1002 S.Diag(Arg3->getBeginLoc(), 1003 diag::err_opencl_enqueue_kernel_blocks_no_args); 1004 return true; 1005 } 1006 return false; 1007 } 1008 // we can have block + varargs. 1009 if (isBlockPointer(Arg3)) 1010 return (checkOpenCLBlockArgs(S, Arg3) || 1011 checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg3, 4)); 1012 // last two cases with either exactly 7 args or 7 args and varargs. 1013 if (NumArgs >= 7) { 1014 // check common block argument. 1015 Expr *Arg6 = TheCall->getArg(6); 1016 if (!isBlockPointer(Arg6)) { 1017 S.Diag(Arg6->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1018 << TheCall->getDirectCallee() << "block"; 1019 return true; 1020 } 1021 if (checkOpenCLBlockArgs(S, Arg6)) 1022 return true; 1023 1024 // Forth argument has to be any integer type. 1025 if (!Arg3->getType()->isIntegerType()) { 1026 S.Diag(TheCall->getArg(3)->getBeginLoc(), 1027 diag::err_opencl_builtin_expected_type) 1028 << TheCall->getDirectCallee() << "integer"; 1029 return true; 1030 } 1031 // check remaining common arguments. 1032 Expr *Arg4 = TheCall->getArg(4); 1033 Expr *Arg5 = TheCall->getArg(5); 1034 1035 // Fifth argument is always passed as a pointer to clk_event_t. 1036 if (!Arg4->isNullPointerConstant(S.Context, 1037 Expr::NPC_ValueDependentIsNotNull) && 1038 !Arg4->getType()->getPointeeOrArrayElementType()->isClkEventT()) { 1039 S.Diag(TheCall->getArg(4)->getBeginLoc(), 1040 diag::err_opencl_builtin_expected_type) 1041 << TheCall->getDirectCallee() 1042 << S.Context.getPointerType(S.Context.OCLClkEventTy); 1043 return true; 1044 } 1045 1046 // Sixth argument is always passed as a pointer to clk_event_t. 1047 if (!Arg5->isNullPointerConstant(S.Context, 1048 Expr::NPC_ValueDependentIsNotNull) && 1049 !(Arg5->getType()->isPointerType() && 1050 Arg5->getType()->getPointeeType()->isClkEventT())) { 1051 S.Diag(TheCall->getArg(5)->getBeginLoc(), 1052 diag::err_opencl_builtin_expected_type) 1053 << TheCall->getDirectCallee() 1054 << S.Context.getPointerType(S.Context.OCLClkEventTy); 1055 return true; 1056 } 1057 1058 if (NumArgs == 7) 1059 return false; 1060 1061 return checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg6, 7); 1062 } 1063 1064 // None of the specific case has been detected, give generic error 1065 S.Diag(TheCall->getBeginLoc(), 1066 diag::err_opencl_enqueue_kernel_incorrect_args); 1067 return true; 1068 } 1069 1070 /// Returns OpenCL access qual. 1071 static OpenCLAccessAttr *getOpenCLArgAccess(const Decl *D) { 1072 return D->getAttr<OpenCLAccessAttr>(); 1073 } 1074 1075 /// Returns true if pipe element type is different from the pointer. 1076 static bool checkOpenCLPipeArg(Sema &S, CallExpr *Call) { 1077 const Expr *Arg0 = Call->getArg(0); 1078 // First argument type should always be pipe. 1079 if (!Arg0->getType()->isPipeType()) { 1080 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 1081 << Call->getDirectCallee() << Arg0->getSourceRange(); 1082 return true; 1083 } 1084 OpenCLAccessAttr *AccessQual = 1085 getOpenCLArgAccess(cast<DeclRefExpr>(Arg0)->getDecl()); 1086 // Validates the access qualifier is compatible with the call. 1087 // OpenCL v2.0 s6.13.16 - The access qualifiers for pipe should only be 1088 // read_only and write_only, and assumed to be read_only if no qualifier is 1089 // specified. 1090 switch (Call->getDirectCallee()->getBuiltinID()) { 1091 case Builtin::BIread_pipe: 1092 case Builtin::BIreserve_read_pipe: 1093 case Builtin::BIcommit_read_pipe: 1094 case Builtin::BIwork_group_reserve_read_pipe: 1095 case Builtin::BIsub_group_reserve_read_pipe: 1096 case Builtin::BIwork_group_commit_read_pipe: 1097 case Builtin::BIsub_group_commit_read_pipe: 1098 if (!(!AccessQual || AccessQual->isReadOnly())) { 1099 S.Diag(Arg0->getBeginLoc(), 1100 diag::err_opencl_builtin_pipe_invalid_access_modifier) 1101 << "read_only" << Arg0->getSourceRange(); 1102 return true; 1103 } 1104 break; 1105 case Builtin::BIwrite_pipe: 1106 case Builtin::BIreserve_write_pipe: 1107 case Builtin::BIcommit_write_pipe: 1108 case Builtin::BIwork_group_reserve_write_pipe: 1109 case Builtin::BIsub_group_reserve_write_pipe: 1110 case Builtin::BIwork_group_commit_write_pipe: 1111 case Builtin::BIsub_group_commit_write_pipe: 1112 if (!(AccessQual && AccessQual->isWriteOnly())) { 1113 S.Diag(Arg0->getBeginLoc(), 1114 diag::err_opencl_builtin_pipe_invalid_access_modifier) 1115 << "write_only" << Arg0->getSourceRange(); 1116 return true; 1117 } 1118 break; 1119 default: 1120 break; 1121 } 1122 return false; 1123 } 1124 1125 /// Returns true if pipe element type is different from the pointer. 1126 static bool checkOpenCLPipePacketType(Sema &S, CallExpr *Call, unsigned Idx) { 1127 const Expr *Arg0 = Call->getArg(0); 1128 const Expr *ArgIdx = Call->getArg(Idx); 1129 const PipeType *PipeTy = cast<PipeType>(Arg0->getType()); 1130 const QualType EltTy = PipeTy->getElementType(); 1131 const PointerType *ArgTy = ArgIdx->getType()->getAs<PointerType>(); 1132 // The Idx argument should be a pointer and the type of the pointer and 1133 // the type of pipe element should also be the same. 1134 if (!ArgTy || 1135 !S.Context.hasSameType( 1136 EltTy, ArgTy->getPointeeType()->getCanonicalTypeInternal())) { 1137 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1138 << Call->getDirectCallee() << S.Context.getPointerType(EltTy) 1139 << ArgIdx->getType() << ArgIdx->getSourceRange(); 1140 return true; 1141 } 1142 return false; 1143 } 1144 1145 // Performs semantic analysis for the read/write_pipe call. 1146 // \param S Reference to the semantic analyzer. 1147 // \param Call A pointer to the builtin call. 1148 // \return True if a semantic error has been found, false otherwise. 1149 static bool SemaBuiltinRWPipe(Sema &S, CallExpr *Call) { 1150 // OpenCL v2.0 s6.13.16.2 - The built-in read/write 1151 // functions have two forms. 1152 switch (Call->getNumArgs()) { 1153 case 2: 1154 if (checkOpenCLPipeArg(S, Call)) 1155 return true; 1156 // The call with 2 arguments should be 1157 // read/write_pipe(pipe T, T*). 1158 // Check packet type T. 1159 if (checkOpenCLPipePacketType(S, Call, 1)) 1160 return true; 1161 break; 1162 1163 case 4: { 1164 if (checkOpenCLPipeArg(S, Call)) 1165 return true; 1166 // The call with 4 arguments should be 1167 // read/write_pipe(pipe T, reserve_id_t, uint, T*). 1168 // Check reserve_id_t. 1169 if (!Call->getArg(1)->getType()->isReserveIDT()) { 1170 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1171 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 1172 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1173 return true; 1174 } 1175 1176 // Check the index. 1177 const Expr *Arg2 = Call->getArg(2); 1178 if (!Arg2->getType()->isIntegerType() && 1179 !Arg2->getType()->isUnsignedIntegerType()) { 1180 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1181 << Call->getDirectCallee() << S.Context.UnsignedIntTy 1182 << Arg2->getType() << Arg2->getSourceRange(); 1183 return true; 1184 } 1185 1186 // Check packet type T. 1187 if (checkOpenCLPipePacketType(S, Call, 3)) 1188 return true; 1189 } break; 1190 default: 1191 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_arg_num) 1192 << Call->getDirectCallee() << Call->getSourceRange(); 1193 return true; 1194 } 1195 1196 return false; 1197 } 1198 1199 // Performs a semantic analysis on the {work_group_/sub_group_ 1200 // /_}reserve_{read/write}_pipe 1201 // \param S Reference to the semantic analyzer. 1202 // \param Call The call to the builtin function to be analyzed. 1203 // \return True if a semantic error was found, false otherwise. 1204 static bool SemaBuiltinReserveRWPipe(Sema &S, CallExpr *Call) { 1205 if (checkArgCount(S, Call, 2)) 1206 return true; 1207 1208 if (checkOpenCLPipeArg(S, Call)) 1209 return true; 1210 1211 // Check the reserve size. 1212 if (!Call->getArg(1)->getType()->isIntegerType() && 1213 !Call->getArg(1)->getType()->isUnsignedIntegerType()) { 1214 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1215 << Call->getDirectCallee() << S.Context.UnsignedIntTy 1216 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1217 return true; 1218 } 1219 1220 // Since return type of reserve_read/write_pipe built-in function is 1221 // reserve_id_t, which is not defined in the builtin def file , we used int 1222 // as return type and need to override the return type of these functions. 1223 Call->setType(S.Context.OCLReserveIDTy); 1224 1225 return false; 1226 } 1227 1228 // Performs a semantic analysis on {work_group_/sub_group_ 1229 // /_}commit_{read/write}_pipe 1230 // \param S Reference to the semantic analyzer. 1231 // \param Call The call to the builtin function to be analyzed. 1232 // \return True if a semantic error was found, false otherwise. 1233 static bool SemaBuiltinCommitRWPipe(Sema &S, CallExpr *Call) { 1234 if (checkArgCount(S, Call, 2)) 1235 return true; 1236 1237 if (checkOpenCLPipeArg(S, Call)) 1238 return true; 1239 1240 // Check reserve_id_t. 1241 if (!Call->getArg(1)->getType()->isReserveIDT()) { 1242 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1243 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 1244 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1245 return true; 1246 } 1247 1248 return false; 1249 } 1250 1251 // Performs a semantic analysis on the call to built-in Pipe 1252 // Query Functions. 1253 // \param S Reference to the semantic analyzer. 1254 // \param Call The call to the builtin function to be analyzed. 1255 // \return True if a semantic error was found, false otherwise. 1256 static bool SemaBuiltinPipePackets(Sema &S, CallExpr *Call) { 1257 if (checkArgCount(S, Call, 1)) 1258 return true; 1259 1260 if (!Call->getArg(0)->getType()->isPipeType()) { 1261 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 1262 << Call->getDirectCallee() << Call->getArg(0)->getSourceRange(); 1263 return true; 1264 } 1265 1266 return false; 1267 } 1268 1269 // OpenCL v2.0 s6.13.9 - Address space qualifier functions. 1270 // Performs semantic analysis for the to_global/local/private call. 1271 // \param S Reference to the semantic analyzer. 1272 // \param BuiltinID ID of the builtin function. 1273 // \param Call A pointer to the builtin call. 1274 // \return True if a semantic error has been found, false otherwise. 1275 static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID, 1276 CallExpr *Call) { 1277 if (Call->getNumArgs() != 1) { 1278 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_arg_num) 1279 << Call->getDirectCallee() << Call->getSourceRange(); 1280 return true; 1281 } 1282 1283 auto RT = Call->getArg(0)->getType(); 1284 if (!RT->isPointerType() || RT->getPointeeType() 1285 .getAddressSpace() == LangAS::opencl_constant) { 1286 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_invalid_arg) 1287 << Call->getArg(0) << Call->getDirectCallee() << Call->getSourceRange(); 1288 return true; 1289 } 1290 1291 if (RT->getPointeeType().getAddressSpace() != LangAS::opencl_generic) { 1292 S.Diag(Call->getArg(0)->getBeginLoc(), 1293 diag::warn_opencl_generic_address_space_arg) 1294 << Call->getDirectCallee()->getNameInfo().getAsString() 1295 << Call->getArg(0)->getSourceRange(); 1296 } 1297 1298 RT = RT->getPointeeType(); 1299 auto Qual = RT.getQualifiers(); 1300 switch (BuiltinID) { 1301 case Builtin::BIto_global: 1302 Qual.setAddressSpace(LangAS::opencl_global); 1303 break; 1304 case Builtin::BIto_local: 1305 Qual.setAddressSpace(LangAS::opencl_local); 1306 break; 1307 case Builtin::BIto_private: 1308 Qual.setAddressSpace(LangAS::opencl_private); 1309 break; 1310 default: 1311 llvm_unreachable("Invalid builtin function"); 1312 } 1313 Call->setType(S.Context.getPointerType(S.Context.getQualifiedType( 1314 RT.getUnqualifiedType(), Qual))); 1315 1316 return false; 1317 } 1318 1319 static ExprResult SemaBuiltinLaunder(Sema &S, CallExpr *TheCall) { 1320 if (checkArgCount(S, TheCall, 1)) 1321 return ExprError(); 1322 1323 // Compute __builtin_launder's parameter type from the argument. 1324 // The parameter type is: 1325 // * The type of the argument if it's not an array or function type, 1326 // Otherwise, 1327 // * The decayed argument type. 1328 QualType ParamTy = [&]() { 1329 QualType ArgTy = TheCall->getArg(0)->getType(); 1330 if (const ArrayType *Ty = ArgTy->getAsArrayTypeUnsafe()) 1331 return S.Context.getPointerType(Ty->getElementType()); 1332 if (ArgTy->isFunctionType()) { 1333 return S.Context.getPointerType(ArgTy); 1334 } 1335 return ArgTy; 1336 }(); 1337 1338 TheCall->setType(ParamTy); 1339 1340 auto DiagSelect = [&]() -> llvm::Optional<unsigned> { 1341 if (!ParamTy->isPointerType()) 1342 return 0; 1343 if (ParamTy->isFunctionPointerType()) 1344 return 1; 1345 if (ParamTy->isVoidPointerType()) 1346 return 2; 1347 return llvm::Optional<unsigned>{}; 1348 }(); 1349 if (DiagSelect.hasValue()) { 1350 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_launder_invalid_arg) 1351 << DiagSelect.getValue() << TheCall->getSourceRange(); 1352 return ExprError(); 1353 } 1354 1355 // We either have an incomplete class type, or we have a class template 1356 // whose instantiation has not been forced. Example: 1357 // 1358 // template <class T> struct Foo { T value; }; 1359 // Foo<int> *p = nullptr; 1360 // auto *d = __builtin_launder(p); 1361 if (S.RequireCompleteType(TheCall->getBeginLoc(), ParamTy->getPointeeType(), 1362 diag::err_incomplete_type)) 1363 return ExprError(); 1364 1365 assert(ParamTy->getPointeeType()->isObjectType() && 1366 "Unhandled non-object pointer case"); 1367 1368 InitializedEntity Entity = 1369 InitializedEntity::InitializeParameter(S.Context, ParamTy, false); 1370 ExprResult Arg = 1371 S.PerformCopyInitialization(Entity, SourceLocation(), TheCall->getArg(0)); 1372 if (Arg.isInvalid()) 1373 return ExprError(); 1374 TheCall->setArg(0, Arg.get()); 1375 1376 return TheCall; 1377 } 1378 1379 // Emit an error and return true if the current architecture is not in the list 1380 // of supported architectures. 1381 static bool 1382 CheckBuiltinTargetSupport(Sema &S, unsigned BuiltinID, CallExpr *TheCall, 1383 ArrayRef<llvm::Triple::ArchType> SupportedArchs) { 1384 llvm::Triple::ArchType CurArch = 1385 S.getASTContext().getTargetInfo().getTriple().getArch(); 1386 if (llvm::is_contained(SupportedArchs, CurArch)) 1387 return false; 1388 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 1389 << TheCall->getSourceRange(); 1390 return true; 1391 } 1392 1393 static void CheckNonNullArgument(Sema &S, const Expr *ArgExpr, 1394 SourceLocation CallSiteLoc); 1395 1396 bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 1397 CallExpr *TheCall) { 1398 switch (TI.getTriple().getArch()) { 1399 default: 1400 // Some builtins don't require additional checking, so just consider these 1401 // acceptable. 1402 return false; 1403 case llvm::Triple::arm: 1404 case llvm::Triple::armeb: 1405 case llvm::Triple::thumb: 1406 case llvm::Triple::thumbeb: 1407 return CheckARMBuiltinFunctionCall(TI, BuiltinID, TheCall); 1408 case llvm::Triple::aarch64: 1409 case llvm::Triple::aarch64_32: 1410 case llvm::Triple::aarch64_be: 1411 return CheckAArch64BuiltinFunctionCall(TI, BuiltinID, TheCall); 1412 case llvm::Triple::bpfeb: 1413 case llvm::Triple::bpfel: 1414 return CheckBPFBuiltinFunctionCall(BuiltinID, TheCall); 1415 case llvm::Triple::hexagon: 1416 return CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall); 1417 case llvm::Triple::mips: 1418 case llvm::Triple::mipsel: 1419 case llvm::Triple::mips64: 1420 case llvm::Triple::mips64el: 1421 return CheckMipsBuiltinFunctionCall(TI, BuiltinID, TheCall); 1422 case llvm::Triple::systemz: 1423 return CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall); 1424 case llvm::Triple::x86: 1425 case llvm::Triple::x86_64: 1426 return CheckX86BuiltinFunctionCall(TI, BuiltinID, TheCall); 1427 case llvm::Triple::ppc: 1428 case llvm::Triple::ppc64: 1429 case llvm::Triple::ppc64le: 1430 return CheckPPCBuiltinFunctionCall(TI, BuiltinID, TheCall); 1431 case llvm::Triple::amdgcn: 1432 return CheckAMDGCNBuiltinFunctionCall(BuiltinID, TheCall); 1433 } 1434 } 1435 1436 ExprResult 1437 Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, 1438 CallExpr *TheCall) { 1439 ExprResult TheCallResult(TheCall); 1440 1441 // Find out if any arguments are required to be integer constant expressions. 1442 unsigned ICEArguments = 0; 1443 ASTContext::GetBuiltinTypeError Error; 1444 Context.GetBuiltinType(BuiltinID, Error, &ICEArguments); 1445 if (Error != ASTContext::GE_None) 1446 ICEArguments = 0; // Don't diagnose previously diagnosed errors. 1447 1448 // If any arguments are required to be ICE's, check and diagnose. 1449 for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) { 1450 // Skip arguments not required to be ICE's. 1451 if ((ICEArguments & (1 << ArgNo)) == 0) continue; 1452 1453 llvm::APSInt Result; 1454 if (SemaBuiltinConstantArg(TheCall, ArgNo, Result)) 1455 return true; 1456 ICEArguments &= ~(1 << ArgNo); 1457 } 1458 1459 switch (BuiltinID) { 1460 case Builtin::BI__builtin___CFStringMakeConstantString: 1461 assert(TheCall->getNumArgs() == 1 && 1462 "Wrong # arguments to builtin CFStringMakeConstantString"); 1463 if (CheckObjCString(TheCall->getArg(0))) 1464 return ExprError(); 1465 break; 1466 case Builtin::BI__builtin_ms_va_start: 1467 case Builtin::BI__builtin_stdarg_start: 1468 case Builtin::BI__builtin_va_start: 1469 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 1470 return ExprError(); 1471 break; 1472 case Builtin::BI__va_start: { 1473 switch (Context.getTargetInfo().getTriple().getArch()) { 1474 case llvm::Triple::aarch64: 1475 case llvm::Triple::arm: 1476 case llvm::Triple::thumb: 1477 if (SemaBuiltinVAStartARMMicrosoft(TheCall)) 1478 return ExprError(); 1479 break; 1480 default: 1481 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 1482 return ExprError(); 1483 break; 1484 } 1485 break; 1486 } 1487 1488 // The acquire, release, and no fence variants are ARM and AArch64 only. 1489 case Builtin::BI_interlockedbittestandset_acq: 1490 case Builtin::BI_interlockedbittestandset_rel: 1491 case Builtin::BI_interlockedbittestandset_nf: 1492 case Builtin::BI_interlockedbittestandreset_acq: 1493 case Builtin::BI_interlockedbittestandreset_rel: 1494 case Builtin::BI_interlockedbittestandreset_nf: 1495 if (CheckBuiltinTargetSupport( 1496 *this, BuiltinID, TheCall, 1497 {llvm::Triple::arm, llvm::Triple::thumb, llvm::Triple::aarch64})) 1498 return ExprError(); 1499 break; 1500 1501 // The 64-bit bittest variants are x64, ARM, and AArch64 only. 1502 case Builtin::BI_bittest64: 1503 case Builtin::BI_bittestandcomplement64: 1504 case Builtin::BI_bittestandreset64: 1505 case Builtin::BI_bittestandset64: 1506 case Builtin::BI_interlockedbittestandreset64: 1507 case Builtin::BI_interlockedbittestandset64: 1508 if (CheckBuiltinTargetSupport(*this, BuiltinID, TheCall, 1509 {llvm::Triple::x86_64, llvm::Triple::arm, 1510 llvm::Triple::thumb, llvm::Triple::aarch64})) 1511 return ExprError(); 1512 break; 1513 1514 case Builtin::BI__builtin_isgreater: 1515 case Builtin::BI__builtin_isgreaterequal: 1516 case Builtin::BI__builtin_isless: 1517 case Builtin::BI__builtin_islessequal: 1518 case Builtin::BI__builtin_islessgreater: 1519 case Builtin::BI__builtin_isunordered: 1520 if (SemaBuiltinUnorderedCompare(TheCall)) 1521 return ExprError(); 1522 break; 1523 case Builtin::BI__builtin_fpclassify: 1524 if (SemaBuiltinFPClassification(TheCall, 6)) 1525 return ExprError(); 1526 break; 1527 case Builtin::BI__builtin_isfinite: 1528 case Builtin::BI__builtin_isinf: 1529 case Builtin::BI__builtin_isinf_sign: 1530 case Builtin::BI__builtin_isnan: 1531 case Builtin::BI__builtin_isnormal: 1532 case Builtin::BI__builtin_signbit: 1533 case Builtin::BI__builtin_signbitf: 1534 case Builtin::BI__builtin_signbitl: 1535 if (SemaBuiltinFPClassification(TheCall, 1)) 1536 return ExprError(); 1537 break; 1538 case Builtin::BI__builtin_shufflevector: 1539 return SemaBuiltinShuffleVector(TheCall); 1540 // TheCall will be freed by the smart pointer here, but that's fine, since 1541 // SemaBuiltinShuffleVector guts it, but then doesn't release it. 1542 case Builtin::BI__builtin_prefetch: 1543 if (SemaBuiltinPrefetch(TheCall)) 1544 return ExprError(); 1545 break; 1546 case Builtin::BI__builtin_alloca_with_align: 1547 if (SemaBuiltinAllocaWithAlign(TheCall)) 1548 return ExprError(); 1549 LLVM_FALLTHROUGH; 1550 case Builtin::BI__builtin_alloca: 1551 Diag(TheCall->getBeginLoc(), diag::warn_alloca) 1552 << TheCall->getDirectCallee(); 1553 break; 1554 case Builtin::BI__assume: 1555 case Builtin::BI__builtin_assume: 1556 if (SemaBuiltinAssume(TheCall)) 1557 return ExprError(); 1558 break; 1559 case Builtin::BI__builtin_assume_aligned: 1560 if (SemaBuiltinAssumeAligned(TheCall)) 1561 return ExprError(); 1562 break; 1563 case Builtin::BI__builtin_dynamic_object_size: 1564 case Builtin::BI__builtin_object_size: 1565 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 3)) 1566 return ExprError(); 1567 break; 1568 case Builtin::BI__builtin_longjmp: 1569 if (SemaBuiltinLongjmp(TheCall)) 1570 return ExprError(); 1571 break; 1572 case Builtin::BI__builtin_setjmp: 1573 if (SemaBuiltinSetjmp(TheCall)) 1574 return ExprError(); 1575 break; 1576 case Builtin::BI__builtin_classify_type: 1577 if (checkArgCount(*this, TheCall, 1)) return true; 1578 TheCall->setType(Context.IntTy); 1579 break; 1580 case Builtin::BI__builtin_constant_p: { 1581 if (checkArgCount(*this, TheCall, 1)) return true; 1582 ExprResult Arg = DefaultFunctionArrayLvalueConversion(TheCall->getArg(0)); 1583 if (Arg.isInvalid()) return true; 1584 TheCall->setArg(0, Arg.get()); 1585 TheCall->setType(Context.IntTy); 1586 break; 1587 } 1588 case Builtin::BI__builtin_launder: 1589 return SemaBuiltinLaunder(*this, TheCall); 1590 case Builtin::BI__sync_fetch_and_add: 1591 case Builtin::BI__sync_fetch_and_add_1: 1592 case Builtin::BI__sync_fetch_and_add_2: 1593 case Builtin::BI__sync_fetch_and_add_4: 1594 case Builtin::BI__sync_fetch_and_add_8: 1595 case Builtin::BI__sync_fetch_and_add_16: 1596 case Builtin::BI__sync_fetch_and_sub: 1597 case Builtin::BI__sync_fetch_and_sub_1: 1598 case Builtin::BI__sync_fetch_and_sub_2: 1599 case Builtin::BI__sync_fetch_and_sub_4: 1600 case Builtin::BI__sync_fetch_and_sub_8: 1601 case Builtin::BI__sync_fetch_and_sub_16: 1602 case Builtin::BI__sync_fetch_and_or: 1603 case Builtin::BI__sync_fetch_and_or_1: 1604 case Builtin::BI__sync_fetch_and_or_2: 1605 case Builtin::BI__sync_fetch_and_or_4: 1606 case Builtin::BI__sync_fetch_and_or_8: 1607 case Builtin::BI__sync_fetch_and_or_16: 1608 case Builtin::BI__sync_fetch_and_and: 1609 case Builtin::BI__sync_fetch_and_and_1: 1610 case Builtin::BI__sync_fetch_and_and_2: 1611 case Builtin::BI__sync_fetch_and_and_4: 1612 case Builtin::BI__sync_fetch_and_and_8: 1613 case Builtin::BI__sync_fetch_and_and_16: 1614 case Builtin::BI__sync_fetch_and_xor: 1615 case Builtin::BI__sync_fetch_and_xor_1: 1616 case Builtin::BI__sync_fetch_and_xor_2: 1617 case Builtin::BI__sync_fetch_and_xor_4: 1618 case Builtin::BI__sync_fetch_and_xor_8: 1619 case Builtin::BI__sync_fetch_and_xor_16: 1620 case Builtin::BI__sync_fetch_and_nand: 1621 case Builtin::BI__sync_fetch_and_nand_1: 1622 case Builtin::BI__sync_fetch_and_nand_2: 1623 case Builtin::BI__sync_fetch_and_nand_4: 1624 case Builtin::BI__sync_fetch_and_nand_8: 1625 case Builtin::BI__sync_fetch_and_nand_16: 1626 case Builtin::BI__sync_add_and_fetch: 1627 case Builtin::BI__sync_add_and_fetch_1: 1628 case Builtin::BI__sync_add_and_fetch_2: 1629 case Builtin::BI__sync_add_and_fetch_4: 1630 case Builtin::BI__sync_add_and_fetch_8: 1631 case Builtin::BI__sync_add_and_fetch_16: 1632 case Builtin::BI__sync_sub_and_fetch: 1633 case Builtin::BI__sync_sub_and_fetch_1: 1634 case Builtin::BI__sync_sub_and_fetch_2: 1635 case Builtin::BI__sync_sub_and_fetch_4: 1636 case Builtin::BI__sync_sub_and_fetch_8: 1637 case Builtin::BI__sync_sub_and_fetch_16: 1638 case Builtin::BI__sync_and_and_fetch: 1639 case Builtin::BI__sync_and_and_fetch_1: 1640 case Builtin::BI__sync_and_and_fetch_2: 1641 case Builtin::BI__sync_and_and_fetch_4: 1642 case Builtin::BI__sync_and_and_fetch_8: 1643 case Builtin::BI__sync_and_and_fetch_16: 1644 case Builtin::BI__sync_or_and_fetch: 1645 case Builtin::BI__sync_or_and_fetch_1: 1646 case Builtin::BI__sync_or_and_fetch_2: 1647 case Builtin::BI__sync_or_and_fetch_4: 1648 case Builtin::BI__sync_or_and_fetch_8: 1649 case Builtin::BI__sync_or_and_fetch_16: 1650 case Builtin::BI__sync_xor_and_fetch: 1651 case Builtin::BI__sync_xor_and_fetch_1: 1652 case Builtin::BI__sync_xor_and_fetch_2: 1653 case Builtin::BI__sync_xor_and_fetch_4: 1654 case Builtin::BI__sync_xor_and_fetch_8: 1655 case Builtin::BI__sync_xor_and_fetch_16: 1656 case Builtin::BI__sync_nand_and_fetch: 1657 case Builtin::BI__sync_nand_and_fetch_1: 1658 case Builtin::BI__sync_nand_and_fetch_2: 1659 case Builtin::BI__sync_nand_and_fetch_4: 1660 case Builtin::BI__sync_nand_and_fetch_8: 1661 case Builtin::BI__sync_nand_and_fetch_16: 1662 case Builtin::BI__sync_val_compare_and_swap: 1663 case Builtin::BI__sync_val_compare_and_swap_1: 1664 case Builtin::BI__sync_val_compare_and_swap_2: 1665 case Builtin::BI__sync_val_compare_and_swap_4: 1666 case Builtin::BI__sync_val_compare_and_swap_8: 1667 case Builtin::BI__sync_val_compare_and_swap_16: 1668 case Builtin::BI__sync_bool_compare_and_swap: 1669 case Builtin::BI__sync_bool_compare_and_swap_1: 1670 case Builtin::BI__sync_bool_compare_and_swap_2: 1671 case Builtin::BI__sync_bool_compare_and_swap_4: 1672 case Builtin::BI__sync_bool_compare_and_swap_8: 1673 case Builtin::BI__sync_bool_compare_and_swap_16: 1674 case Builtin::BI__sync_lock_test_and_set: 1675 case Builtin::BI__sync_lock_test_and_set_1: 1676 case Builtin::BI__sync_lock_test_and_set_2: 1677 case Builtin::BI__sync_lock_test_and_set_4: 1678 case Builtin::BI__sync_lock_test_and_set_8: 1679 case Builtin::BI__sync_lock_test_and_set_16: 1680 case Builtin::BI__sync_lock_release: 1681 case Builtin::BI__sync_lock_release_1: 1682 case Builtin::BI__sync_lock_release_2: 1683 case Builtin::BI__sync_lock_release_4: 1684 case Builtin::BI__sync_lock_release_8: 1685 case Builtin::BI__sync_lock_release_16: 1686 case Builtin::BI__sync_swap: 1687 case Builtin::BI__sync_swap_1: 1688 case Builtin::BI__sync_swap_2: 1689 case Builtin::BI__sync_swap_4: 1690 case Builtin::BI__sync_swap_8: 1691 case Builtin::BI__sync_swap_16: 1692 return SemaBuiltinAtomicOverloaded(TheCallResult); 1693 case Builtin::BI__sync_synchronize: 1694 Diag(TheCall->getBeginLoc(), diag::warn_atomic_implicit_seq_cst) 1695 << TheCall->getCallee()->getSourceRange(); 1696 break; 1697 case Builtin::BI__builtin_nontemporal_load: 1698 case Builtin::BI__builtin_nontemporal_store: 1699 return SemaBuiltinNontemporalOverloaded(TheCallResult); 1700 case Builtin::BI__builtin_memcpy_inline: { 1701 clang::Expr *SizeOp = TheCall->getArg(2); 1702 // We warn about copying to or from `nullptr` pointers when `size` is 1703 // greater than 0. When `size` is value dependent we cannot evaluate its 1704 // value so we bail out. 1705 if (SizeOp->isValueDependent()) 1706 break; 1707 if (!SizeOp->EvaluateKnownConstInt(Context).isNullValue()) { 1708 CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc()); 1709 CheckNonNullArgument(*this, TheCall->getArg(1), TheCall->getExprLoc()); 1710 } 1711 break; 1712 } 1713 #define BUILTIN(ID, TYPE, ATTRS) 1714 #define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \ 1715 case Builtin::BI##ID: \ 1716 return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID); 1717 #include "clang/Basic/Builtins.def" 1718 case Builtin::BI__annotation: 1719 if (SemaBuiltinMSVCAnnotation(*this, TheCall)) 1720 return ExprError(); 1721 break; 1722 case Builtin::BI__builtin_annotation: 1723 if (SemaBuiltinAnnotation(*this, TheCall)) 1724 return ExprError(); 1725 break; 1726 case Builtin::BI__builtin_addressof: 1727 if (SemaBuiltinAddressof(*this, TheCall)) 1728 return ExprError(); 1729 break; 1730 case Builtin::BI__builtin_is_aligned: 1731 case Builtin::BI__builtin_align_up: 1732 case Builtin::BI__builtin_align_down: 1733 if (SemaBuiltinAlignment(*this, TheCall, BuiltinID)) 1734 return ExprError(); 1735 break; 1736 case Builtin::BI__builtin_add_overflow: 1737 case Builtin::BI__builtin_sub_overflow: 1738 case Builtin::BI__builtin_mul_overflow: 1739 if (SemaBuiltinOverflow(*this, TheCall, BuiltinID)) 1740 return ExprError(); 1741 break; 1742 case Builtin::BI__builtin_operator_new: 1743 case Builtin::BI__builtin_operator_delete: { 1744 bool IsDelete = BuiltinID == Builtin::BI__builtin_operator_delete; 1745 ExprResult Res = 1746 SemaBuiltinOperatorNewDeleteOverloaded(TheCallResult, IsDelete); 1747 if (Res.isInvalid()) 1748 CorrectDelayedTyposInExpr(TheCallResult.get()); 1749 return Res; 1750 } 1751 case Builtin::BI__builtin_dump_struct: { 1752 // We first want to ensure we are called with 2 arguments 1753 if (checkArgCount(*this, TheCall, 2)) 1754 return ExprError(); 1755 // Ensure that the first argument is of type 'struct XX *' 1756 const Expr *PtrArg = TheCall->getArg(0)->IgnoreParenImpCasts(); 1757 const QualType PtrArgType = PtrArg->getType(); 1758 if (!PtrArgType->isPointerType() || 1759 !PtrArgType->getPointeeType()->isRecordType()) { 1760 Diag(PtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1761 << PtrArgType << "structure pointer" << 1 << 0 << 3 << 1 << PtrArgType 1762 << "structure pointer"; 1763 return ExprError(); 1764 } 1765 1766 // Ensure that the second argument is of type 'FunctionType' 1767 const Expr *FnPtrArg = TheCall->getArg(1)->IgnoreImpCasts(); 1768 const QualType FnPtrArgType = FnPtrArg->getType(); 1769 if (!FnPtrArgType->isPointerType()) { 1770 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1771 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2 1772 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1773 return ExprError(); 1774 } 1775 1776 const auto *FuncType = 1777 FnPtrArgType->getPointeeType()->getAs<FunctionType>(); 1778 1779 if (!FuncType) { 1780 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1781 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2 1782 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1783 return ExprError(); 1784 } 1785 1786 if (const auto *FT = dyn_cast<FunctionProtoType>(FuncType)) { 1787 if (!FT->getNumParams()) { 1788 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1789 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 1790 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1791 return ExprError(); 1792 } 1793 QualType PT = FT->getParamType(0); 1794 if (!FT->isVariadic() || FT->getReturnType() != Context.IntTy || 1795 !PT->isPointerType() || !PT->getPointeeType()->isCharType() || 1796 !PT->getPointeeType().isConstQualified()) { 1797 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1798 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 1799 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1800 return ExprError(); 1801 } 1802 } 1803 1804 TheCall->setType(Context.IntTy); 1805 break; 1806 } 1807 case Builtin::BI__builtin_expect_with_probability: { 1808 // We first want to ensure we are called with 3 arguments 1809 if (checkArgCount(*this, TheCall, 3)) 1810 return ExprError(); 1811 // then check probability is constant float in range [0.0, 1.0] 1812 const Expr *ProbArg = TheCall->getArg(2); 1813 SmallVector<PartialDiagnosticAt, 8> Notes; 1814 Expr::EvalResult Eval; 1815 Eval.Diag = &Notes; 1816 if ((!ProbArg->EvaluateAsConstantExpr(Eval, Expr::EvaluateForCodeGen, 1817 Context)) || 1818 !Eval.Val.isFloat()) { 1819 Diag(ProbArg->getBeginLoc(), diag::err_probability_not_constant_float) 1820 << ProbArg->getSourceRange(); 1821 for (const PartialDiagnosticAt &PDiag : Notes) 1822 Diag(PDiag.first, PDiag.second); 1823 return ExprError(); 1824 } 1825 llvm::APFloat Probability = Eval.Val.getFloat(); 1826 bool LoseInfo = false; 1827 Probability.convert(llvm::APFloat::IEEEdouble(), 1828 llvm::RoundingMode::Dynamic, &LoseInfo); 1829 if (!(Probability >= llvm::APFloat(0.0) && 1830 Probability <= llvm::APFloat(1.0))) { 1831 Diag(ProbArg->getBeginLoc(), diag::err_probability_out_of_range) 1832 << ProbArg->getSourceRange(); 1833 return ExprError(); 1834 } 1835 break; 1836 } 1837 case Builtin::BI__builtin_preserve_access_index: 1838 if (SemaBuiltinPreserveAI(*this, TheCall)) 1839 return ExprError(); 1840 break; 1841 case Builtin::BI__builtin_call_with_static_chain: 1842 if (SemaBuiltinCallWithStaticChain(*this, TheCall)) 1843 return ExprError(); 1844 break; 1845 case Builtin::BI__exception_code: 1846 case Builtin::BI_exception_code: 1847 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope, 1848 diag::err_seh___except_block)) 1849 return ExprError(); 1850 break; 1851 case Builtin::BI__exception_info: 1852 case Builtin::BI_exception_info: 1853 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope, 1854 diag::err_seh___except_filter)) 1855 return ExprError(); 1856 break; 1857 case Builtin::BI__GetExceptionInfo: 1858 if (checkArgCount(*this, TheCall, 1)) 1859 return ExprError(); 1860 1861 if (CheckCXXThrowOperand( 1862 TheCall->getBeginLoc(), 1863 Context.getExceptionObjectType(FDecl->getParamDecl(0)->getType()), 1864 TheCall)) 1865 return ExprError(); 1866 1867 TheCall->setType(Context.VoidPtrTy); 1868 break; 1869 // OpenCL v2.0, s6.13.16 - Pipe functions 1870 case Builtin::BIread_pipe: 1871 case Builtin::BIwrite_pipe: 1872 // Since those two functions are declared with var args, we need a semantic 1873 // check for the argument. 1874 if (SemaBuiltinRWPipe(*this, TheCall)) 1875 return ExprError(); 1876 break; 1877 case Builtin::BIreserve_read_pipe: 1878 case Builtin::BIreserve_write_pipe: 1879 case Builtin::BIwork_group_reserve_read_pipe: 1880 case Builtin::BIwork_group_reserve_write_pipe: 1881 if (SemaBuiltinReserveRWPipe(*this, TheCall)) 1882 return ExprError(); 1883 break; 1884 case Builtin::BIsub_group_reserve_read_pipe: 1885 case Builtin::BIsub_group_reserve_write_pipe: 1886 if (checkOpenCLSubgroupExt(*this, TheCall) || 1887 SemaBuiltinReserveRWPipe(*this, TheCall)) 1888 return ExprError(); 1889 break; 1890 case Builtin::BIcommit_read_pipe: 1891 case Builtin::BIcommit_write_pipe: 1892 case Builtin::BIwork_group_commit_read_pipe: 1893 case Builtin::BIwork_group_commit_write_pipe: 1894 if (SemaBuiltinCommitRWPipe(*this, TheCall)) 1895 return ExprError(); 1896 break; 1897 case Builtin::BIsub_group_commit_read_pipe: 1898 case Builtin::BIsub_group_commit_write_pipe: 1899 if (checkOpenCLSubgroupExt(*this, TheCall) || 1900 SemaBuiltinCommitRWPipe(*this, TheCall)) 1901 return ExprError(); 1902 break; 1903 case Builtin::BIget_pipe_num_packets: 1904 case Builtin::BIget_pipe_max_packets: 1905 if (SemaBuiltinPipePackets(*this, TheCall)) 1906 return ExprError(); 1907 break; 1908 case Builtin::BIto_global: 1909 case Builtin::BIto_local: 1910 case Builtin::BIto_private: 1911 if (SemaOpenCLBuiltinToAddr(*this, BuiltinID, TheCall)) 1912 return ExprError(); 1913 break; 1914 // OpenCL v2.0, s6.13.17 - Enqueue kernel functions. 1915 case Builtin::BIenqueue_kernel: 1916 if (SemaOpenCLBuiltinEnqueueKernel(*this, TheCall)) 1917 return ExprError(); 1918 break; 1919 case Builtin::BIget_kernel_work_group_size: 1920 case Builtin::BIget_kernel_preferred_work_group_size_multiple: 1921 if (SemaOpenCLBuiltinKernelWorkGroupSize(*this, TheCall)) 1922 return ExprError(); 1923 break; 1924 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange: 1925 case Builtin::BIget_kernel_sub_group_count_for_ndrange: 1926 if (SemaOpenCLBuiltinNDRangeAndBlock(*this, TheCall)) 1927 return ExprError(); 1928 break; 1929 case Builtin::BI__builtin_os_log_format: 1930 Cleanup.setExprNeedsCleanups(true); 1931 LLVM_FALLTHROUGH; 1932 case Builtin::BI__builtin_os_log_format_buffer_size: 1933 if (SemaBuiltinOSLogFormat(TheCall)) 1934 return ExprError(); 1935 break; 1936 case Builtin::BI__builtin_frame_address: 1937 case Builtin::BI__builtin_return_address: { 1938 if (SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xFFFF)) 1939 return ExprError(); 1940 1941 // -Wframe-address warning if non-zero passed to builtin 1942 // return/frame address. 1943 Expr::EvalResult Result; 1944 if (TheCall->getArg(0)->EvaluateAsInt(Result, getASTContext()) && 1945 Result.Val.getInt() != 0) 1946 Diag(TheCall->getBeginLoc(), diag::warn_frame_address) 1947 << ((BuiltinID == Builtin::BI__builtin_return_address) 1948 ? "__builtin_return_address" 1949 : "__builtin_frame_address") 1950 << TheCall->getSourceRange(); 1951 break; 1952 } 1953 1954 case Builtin::BI__builtin_matrix_transpose: 1955 return SemaBuiltinMatrixTranspose(TheCall, TheCallResult); 1956 1957 case Builtin::BI__builtin_matrix_column_major_load: 1958 return SemaBuiltinMatrixColumnMajorLoad(TheCall, TheCallResult); 1959 1960 case Builtin::BI__builtin_matrix_column_major_store: 1961 return SemaBuiltinMatrixColumnMajorStore(TheCall, TheCallResult); 1962 } 1963 1964 // Since the target specific builtins for each arch overlap, only check those 1965 // of the arch we are compiling for. 1966 if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) { 1967 if (Context.BuiltinInfo.isAuxBuiltinID(BuiltinID)) { 1968 assert(Context.getAuxTargetInfo() && 1969 "Aux Target Builtin, but not an aux target?"); 1970 1971 if (CheckTSBuiltinFunctionCall( 1972 *Context.getAuxTargetInfo(), 1973 Context.BuiltinInfo.getAuxBuiltinID(BuiltinID), TheCall)) 1974 return ExprError(); 1975 } else { 1976 if (CheckTSBuiltinFunctionCall(Context.getTargetInfo(), BuiltinID, 1977 TheCall)) 1978 return ExprError(); 1979 } 1980 } 1981 1982 return TheCallResult; 1983 } 1984 1985 // Get the valid immediate range for the specified NEON type code. 1986 static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) { 1987 NeonTypeFlags Type(t); 1988 int IsQuad = ForceQuad ? true : Type.isQuad(); 1989 switch (Type.getEltType()) { 1990 case NeonTypeFlags::Int8: 1991 case NeonTypeFlags::Poly8: 1992 return shift ? 7 : (8 << IsQuad) - 1; 1993 case NeonTypeFlags::Int16: 1994 case NeonTypeFlags::Poly16: 1995 return shift ? 15 : (4 << IsQuad) - 1; 1996 case NeonTypeFlags::Int32: 1997 return shift ? 31 : (2 << IsQuad) - 1; 1998 case NeonTypeFlags::Int64: 1999 case NeonTypeFlags::Poly64: 2000 return shift ? 63 : (1 << IsQuad) - 1; 2001 case NeonTypeFlags::Poly128: 2002 return shift ? 127 : (1 << IsQuad) - 1; 2003 case NeonTypeFlags::Float16: 2004 assert(!shift && "cannot shift float types!"); 2005 return (4 << IsQuad) - 1; 2006 case NeonTypeFlags::Float32: 2007 assert(!shift && "cannot shift float types!"); 2008 return (2 << IsQuad) - 1; 2009 case NeonTypeFlags::Float64: 2010 assert(!shift && "cannot shift float types!"); 2011 return (1 << IsQuad) - 1; 2012 case NeonTypeFlags::BFloat16: 2013 assert(!shift && "cannot shift float types!"); 2014 return (4 << IsQuad) - 1; 2015 } 2016 llvm_unreachable("Invalid NeonTypeFlag!"); 2017 } 2018 2019 /// getNeonEltType - Return the QualType corresponding to the elements of 2020 /// the vector type specified by the NeonTypeFlags. This is used to check 2021 /// the pointer arguments for Neon load/store intrinsics. 2022 static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context, 2023 bool IsPolyUnsigned, bool IsInt64Long) { 2024 switch (Flags.getEltType()) { 2025 case NeonTypeFlags::Int8: 2026 return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy; 2027 case NeonTypeFlags::Int16: 2028 return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy; 2029 case NeonTypeFlags::Int32: 2030 return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy; 2031 case NeonTypeFlags::Int64: 2032 if (IsInt64Long) 2033 return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy; 2034 else 2035 return Flags.isUnsigned() ? Context.UnsignedLongLongTy 2036 : Context.LongLongTy; 2037 case NeonTypeFlags::Poly8: 2038 return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy; 2039 case NeonTypeFlags::Poly16: 2040 return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy; 2041 case NeonTypeFlags::Poly64: 2042 if (IsInt64Long) 2043 return Context.UnsignedLongTy; 2044 else 2045 return Context.UnsignedLongLongTy; 2046 case NeonTypeFlags::Poly128: 2047 break; 2048 case NeonTypeFlags::Float16: 2049 return Context.HalfTy; 2050 case NeonTypeFlags::Float32: 2051 return Context.FloatTy; 2052 case NeonTypeFlags::Float64: 2053 return Context.DoubleTy; 2054 case NeonTypeFlags::BFloat16: 2055 return Context.BFloat16Ty; 2056 } 2057 llvm_unreachable("Invalid NeonTypeFlag!"); 2058 } 2059 2060 bool Sema::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 2061 // Range check SVE intrinsics that take immediate values. 2062 SmallVector<std::tuple<int,int,int>, 3> ImmChecks; 2063 2064 switch (BuiltinID) { 2065 default: 2066 return false; 2067 #define GET_SVE_IMMEDIATE_CHECK 2068 #include "clang/Basic/arm_sve_sema_rangechecks.inc" 2069 #undef GET_SVE_IMMEDIATE_CHECK 2070 } 2071 2072 // Perform all the immediate checks for this builtin call. 2073 bool HasError = false; 2074 for (auto &I : ImmChecks) { 2075 int ArgNum, CheckTy, ElementSizeInBits; 2076 std::tie(ArgNum, CheckTy, ElementSizeInBits) = I; 2077 2078 typedef bool(*OptionSetCheckFnTy)(int64_t Value); 2079 2080 // Function that checks whether the operand (ArgNum) is an immediate 2081 // that is one of the predefined values. 2082 auto CheckImmediateInSet = [&](OptionSetCheckFnTy CheckImm, 2083 int ErrDiag) -> bool { 2084 // We can't check the value of a dependent argument. 2085 Expr *Arg = TheCall->getArg(ArgNum); 2086 if (Arg->isTypeDependent() || Arg->isValueDependent()) 2087 return false; 2088 2089 // Check constant-ness first. 2090 llvm::APSInt Imm; 2091 if (SemaBuiltinConstantArg(TheCall, ArgNum, Imm)) 2092 return true; 2093 2094 if (!CheckImm(Imm.getSExtValue())) 2095 return Diag(TheCall->getBeginLoc(), ErrDiag) << Arg->getSourceRange(); 2096 return false; 2097 }; 2098 2099 switch ((SVETypeFlags::ImmCheckType)CheckTy) { 2100 case SVETypeFlags::ImmCheck0_31: 2101 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 31)) 2102 HasError = true; 2103 break; 2104 case SVETypeFlags::ImmCheck0_13: 2105 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 13)) 2106 HasError = true; 2107 break; 2108 case SVETypeFlags::ImmCheck1_16: 2109 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 16)) 2110 HasError = true; 2111 break; 2112 case SVETypeFlags::ImmCheck0_7: 2113 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 7)) 2114 HasError = true; 2115 break; 2116 case SVETypeFlags::ImmCheckExtract: 2117 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2118 (2048 / ElementSizeInBits) - 1)) 2119 HasError = true; 2120 break; 2121 case SVETypeFlags::ImmCheckShiftRight: 2122 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, ElementSizeInBits)) 2123 HasError = true; 2124 break; 2125 case SVETypeFlags::ImmCheckShiftRightNarrow: 2126 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 2127 ElementSizeInBits / 2)) 2128 HasError = true; 2129 break; 2130 case SVETypeFlags::ImmCheckShiftLeft: 2131 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2132 ElementSizeInBits - 1)) 2133 HasError = true; 2134 break; 2135 case SVETypeFlags::ImmCheckLaneIndex: 2136 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2137 (128 / (1 * ElementSizeInBits)) - 1)) 2138 HasError = true; 2139 break; 2140 case SVETypeFlags::ImmCheckLaneIndexCompRotate: 2141 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2142 (128 / (2 * ElementSizeInBits)) - 1)) 2143 HasError = true; 2144 break; 2145 case SVETypeFlags::ImmCheckLaneIndexDot: 2146 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2147 (128 / (4 * ElementSizeInBits)) - 1)) 2148 HasError = true; 2149 break; 2150 case SVETypeFlags::ImmCheckComplexRot90_270: 2151 if (CheckImmediateInSet([](int64_t V) { return V == 90 || V == 270; }, 2152 diag::err_rotation_argument_to_cadd)) 2153 HasError = true; 2154 break; 2155 case SVETypeFlags::ImmCheckComplexRotAll90: 2156 if (CheckImmediateInSet( 2157 [](int64_t V) { 2158 return V == 0 || V == 90 || V == 180 || V == 270; 2159 }, 2160 diag::err_rotation_argument_to_cmla)) 2161 HasError = true; 2162 break; 2163 case SVETypeFlags::ImmCheck0_1: 2164 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 1)) 2165 HasError = true; 2166 break; 2167 case SVETypeFlags::ImmCheck0_2: 2168 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2)) 2169 HasError = true; 2170 break; 2171 case SVETypeFlags::ImmCheck0_3: 2172 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 3)) 2173 HasError = true; 2174 break; 2175 } 2176 } 2177 2178 return HasError; 2179 } 2180 2181 bool Sema::CheckNeonBuiltinFunctionCall(const TargetInfo &TI, 2182 unsigned BuiltinID, CallExpr *TheCall) { 2183 llvm::APSInt Result; 2184 uint64_t mask = 0; 2185 unsigned TV = 0; 2186 int PtrArgNum = -1; 2187 bool HasConstPtr = false; 2188 switch (BuiltinID) { 2189 #define GET_NEON_OVERLOAD_CHECK 2190 #include "clang/Basic/arm_neon.inc" 2191 #include "clang/Basic/arm_fp16.inc" 2192 #undef GET_NEON_OVERLOAD_CHECK 2193 } 2194 2195 // For NEON intrinsics which are overloaded on vector element type, validate 2196 // the immediate which specifies which variant to emit. 2197 unsigned ImmArg = TheCall->getNumArgs()-1; 2198 if (mask) { 2199 if (SemaBuiltinConstantArg(TheCall, ImmArg, Result)) 2200 return true; 2201 2202 TV = Result.getLimitedValue(64); 2203 if ((TV > 63) || (mask & (1ULL << TV)) == 0) 2204 return Diag(TheCall->getBeginLoc(), diag::err_invalid_neon_type_code) 2205 << TheCall->getArg(ImmArg)->getSourceRange(); 2206 } 2207 2208 if (PtrArgNum >= 0) { 2209 // Check that pointer arguments have the specified type. 2210 Expr *Arg = TheCall->getArg(PtrArgNum); 2211 if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) 2212 Arg = ICE->getSubExpr(); 2213 ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg); 2214 QualType RHSTy = RHS.get()->getType(); 2215 2216 llvm::Triple::ArchType Arch = TI.getTriple().getArch(); 2217 bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 || 2218 Arch == llvm::Triple::aarch64_32 || 2219 Arch == llvm::Triple::aarch64_be; 2220 bool IsInt64Long = TI.getInt64Type() == TargetInfo::SignedLong; 2221 QualType EltTy = 2222 getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long); 2223 if (HasConstPtr) 2224 EltTy = EltTy.withConst(); 2225 QualType LHSTy = Context.getPointerType(EltTy); 2226 AssignConvertType ConvTy; 2227 ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS); 2228 if (RHS.isInvalid()) 2229 return true; 2230 if (DiagnoseAssignmentResult(ConvTy, Arg->getBeginLoc(), LHSTy, RHSTy, 2231 RHS.get(), AA_Assigning)) 2232 return true; 2233 } 2234 2235 // For NEON intrinsics which take an immediate value as part of the 2236 // instruction, range check them here. 2237 unsigned i = 0, l = 0, u = 0; 2238 switch (BuiltinID) { 2239 default: 2240 return false; 2241 #define GET_NEON_IMMEDIATE_CHECK 2242 #include "clang/Basic/arm_neon.inc" 2243 #include "clang/Basic/arm_fp16.inc" 2244 #undef GET_NEON_IMMEDIATE_CHECK 2245 } 2246 2247 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 2248 } 2249 2250 bool Sema::CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 2251 switch (BuiltinID) { 2252 default: 2253 return false; 2254 #include "clang/Basic/arm_mve_builtin_sema.inc" 2255 } 2256 } 2257 2258 bool Sema::CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 2259 CallExpr *TheCall) { 2260 bool Err = false; 2261 switch (BuiltinID) { 2262 default: 2263 return false; 2264 #include "clang/Basic/arm_cde_builtin_sema.inc" 2265 } 2266 2267 if (Err) 2268 return true; 2269 2270 return CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), /*WantCDE*/ true); 2271 } 2272 2273 bool Sema::CheckARMCoprocessorImmediate(const TargetInfo &TI, 2274 const Expr *CoprocArg, bool WantCDE) { 2275 if (isConstantEvaluated()) 2276 return false; 2277 2278 // We can't check the value of a dependent argument. 2279 if (CoprocArg->isTypeDependent() || CoprocArg->isValueDependent()) 2280 return false; 2281 2282 llvm::APSInt CoprocNoAP; 2283 bool IsICE = CoprocArg->isIntegerConstantExpr(CoprocNoAP, Context); 2284 (void)IsICE; 2285 assert(IsICE && "Coprocossor immediate is not a constant expression"); 2286 int64_t CoprocNo = CoprocNoAP.getExtValue(); 2287 assert(CoprocNo >= 0 && "Coprocessor immediate must be non-negative"); 2288 2289 uint32_t CDECoprocMask = TI.getARMCDECoprocMask(); 2290 bool IsCDECoproc = CoprocNo <= 7 && (CDECoprocMask & (1 << CoprocNo)); 2291 2292 if (IsCDECoproc != WantCDE) 2293 return Diag(CoprocArg->getBeginLoc(), diag::err_arm_invalid_coproc) 2294 << (int)CoprocNo << (int)WantCDE << CoprocArg->getSourceRange(); 2295 2296 return false; 2297 } 2298 2299 bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, 2300 unsigned MaxWidth) { 2301 assert((BuiltinID == ARM::BI__builtin_arm_ldrex || 2302 BuiltinID == ARM::BI__builtin_arm_ldaex || 2303 BuiltinID == ARM::BI__builtin_arm_strex || 2304 BuiltinID == ARM::BI__builtin_arm_stlex || 2305 BuiltinID == AArch64::BI__builtin_arm_ldrex || 2306 BuiltinID == AArch64::BI__builtin_arm_ldaex || 2307 BuiltinID == AArch64::BI__builtin_arm_strex || 2308 BuiltinID == AArch64::BI__builtin_arm_stlex) && 2309 "unexpected ARM builtin"); 2310 bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex || 2311 BuiltinID == ARM::BI__builtin_arm_ldaex || 2312 BuiltinID == AArch64::BI__builtin_arm_ldrex || 2313 BuiltinID == AArch64::BI__builtin_arm_ldaex; 2314 2315 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 2316 2317 // Ensure that we have the proper number of arguments. 2318 if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2)) 2319 return true; 2320 2321 // Inspect the pointer argument of the atomic builtin. This should always be 2322 // a pointer type, whose element is an integral scalar or pointer type. 2323 // Because it is a pointer type, we don't have to worry about any implicit 2324 // casts here. 2325 Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1); 2326 ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(PointerArg); 2327 if (PointerArgRes.isInvalid()) 2328 return true; 2329 PointerArg = PointerArgRes.get(); 2330 2331 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 2332 if (!pointerType) { 2333 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 2334 << PointerArg->getType() << PointerArg->getSourceRange(); 2335 return true; 2336 } 2337 2338 // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next 2339 // task is to insert the appropriate casts into the AST. First work out just 2340 // what the appropriate type is. 2341 QualType ValType = pointerType->getPointeeType(); 2342 QualType AddrType = ValType.getUnqualifiedType().withVolatile(); 2343 if (IsLdrex) 2344 AddrType.addConst(); 2345 2346 // Issue a warning if the cast is dodgy. 2347 CastKind CastNeeded = CK_NoOp; 2348 if (!AddrType.isAtLeastAsQualifiedAs(ValType)) { 2349 CastNeeded = CK_BitCast; 2350 Diag(DRE->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers) 2351 << PointerArg->getType() << Context.getPointerType(AddrType) 2352 << AA_Passing << PointerArg->getSourceRange(); 2353 } 2354 2355 // Finally, do the cast and replace the argument with the corrected version. 2356 AddrType = Context.getPointerType(AddrType); 2357 PointerArgRes = ImpCastExprToType(PointerArg, AddrType, CastNeeded); 2358 if (PointerArgRes.isInvalid()) 2359 return true; 2360 PointerArg = PointerArgRes.get(); 2361 2362 TheCall->setArg(IsLdrex ? 0 : 1, PointerArg); 2363 2364 // In general, we allow ints, floats and pointers to be loaded and stored. 2365 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 2366 !ValType->isBlockPointerType() && !ValType->isFloatingType()) { 2367 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr) 2368 << PointerArg->getType() << PointerArg->getSourceRange(); 2369 return true; 2370 } 2371 2372 // But ARM doesn't have instructions to deal with 128-bit versions. 2373 if (Context.getTypeSize(ValType) > MaxWidth) { 2374 assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate"); 2375 Diag(DRE->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size) 2376 << PointerArg->getType() << PointerArg->getSourceRange(); 2377 return true; 2378 } 2379 2380 switch (ValType.getObjCLifetime()) { 2381 case Qualifiers::OCL_None: 2382 case Qualifiers::OCL_ExplicitNone: 2383 // okay 2384 break; 2385 2386 case Qualifiers::OCL_Weak: 2387 case Qualifiers::OCL_Strong: 2388 case Qualifiers::OCL_Autoreleasing: 2389 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 2390 << ValType << PointerArg->getSourceRange(); 2391 return true; 2392 } 2393 2394 if (IsLdrex) { 2395 TheCall->setType(ValType); 2396 return false; 2397 } 2398 2399 // Initialize the argument to be stored. 2400 ExprResult ValArg = TheCall->getArg(0); 2401 InitializedEntity Entity = InitializedEntity::InitializeParameter( 2402 Context, ValType, /*consume*/ false); 2403 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 2404 if (ValArg.isInvalid()) 2405 return true; 2406 TheCall->setArg(0, ValArg.get()); 2407 2408 // __builtin_arm_strex always returns an int. It's marked as such in the .def, 2409 // but the custom checker bypasses all default analysis. 2410 TheCall->setType(Context.IntTy); 2411 return false; 2412 } 2413 2414 bool Sema::CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 2415 CallExpr *TheCall) { 2416 if (BuiltinID == ARM::BI__builtin_arm_ldrex || 2417 BuiltinID == ARM::BI__builtin_arm_ldaex || 2418 BuiltinID == ARM::BI__builtin_arm_strex || 2419 BuiltinID == ARM::BI__builtin_arm_stlex) { 2420 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64); 2421 } 2422 2423 if (BuiltinID == ARM::BI__builtin_arm_prefetch) { 2424 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 2425 SemaBuiltinConstantArgRange(TheCall, 2, 0, 1); 2426 } 2427 2428 if (BuiltinID == ARM::BI__builtin_arm_rsr64 || 2429 BuiltinID == ARM::BI__builtin_arm_wsr64) 2430 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false); 2431 2432 if (BuiltinID == ARM::BI__builtin_arm_rsr || 2433 BuiltinID == ARM::BI__builtin_arm_rsrp || 2434 BuiltinID == ARM::BI__builtin_arm_wsr || 2435 BuiltinID == ARM::BI__builtin_arm_wsrp) 2436 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 2437 2438 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) 2439 return true; 2440 if (CheckMVEBuiltinFunctionCall(BuiltinID, TheCall)) 2441 return true; 2442 if (CheckCDEBuiltinFunctionCall(TI, BuiltinID, TheCall)) 2443 return true; 2444 2445 // For intrinsics which take an immediate value as part of the instruction, 2446 // range check them here. 2447 // FIXME: VFP Intrinsics should error if VFP not present. 2448 switch (BuiltinID) { 2449 default: return false; 2450 case ARM::BI__builtin_arm_ssat: 2451 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 32); 2452 case ARM::BI__builtin_arm_usat: 2453 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); 2454 case ARM::BI__builtin_arm_ssat16: 2455 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); 2456 case ARM::BI__builtin_arm_usat16: 2457 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 2458 case ARM::BI__builtin_arm_vcvtr_f: 2459 case ARM::BI__builtin_arm_vcvtr_d: 2460 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 2461 case ARM::BI__builtin_arm_dmb: 2462 case ARM::BI__builtin_arm_dsb: 2463 case ARM::BI__builtin_arm_isb: 2464 case ARM::BI__builtin_arm_dbg: 2465 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15); 2466 case ARM::BI__builtin_arm_cdp: 2467 case ARM::BI__builtin_arm_cdp2: 2468 case ARM::BI__builtin_arm_mcr: 2469 case ARM::BI__builtin_arm_mcr2: 2470 case ARM::BI__builtin_arm_mrc: 2471 case ARM::BI__builtin_arm_mrc2: 2472 case ARM::BI__builtin_arm_mcrr: 2473 case ARM::BI__builtin_arm_mcrr2: 2474 case ARM::BI__builtin_arm_mrrc: 2475 case ARM::BI__builtin_arm_mrrc2: 2476 case ARM::BI__builtin_arm_ldc: 2477 case ARM::BI__builtin_arm_ldcl: 2478 case ARM::BI__builtin_arm_ldc2: 2479 case ARM::BI__builtin_arm_ldc2l: 2480 case ARM::BI__builtin_arm_stc: 2481 case ARM::BI__builtin_arm_stcl: 2482 case ARM::BI__builtin_arm_stc2: 2483 case ARM::BI__builtin_arm_stc2l: 2484 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15) || 2485 CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), 2486 /*WantCDE*/ false); 2487 } 2488 } 2489 2490 bool Sema::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, 2491 unsigned BuiltinID, 2492 CallExpr *TheCall) { 2493 if (BuiltinID == AArch64::BI__builtin_arm_ldrex || 2494 BuiltinID == AArch64::BI__builtin_arm_ldaex || 2495 BuiltinID == AArch64::BI__builtin_arm_strex || 2496 BuiltinID == AArch64::BI__builtin_arm_stlex) { 2497 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128); 2498 } 2499 2500 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) { 2501 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 2502 SemaBuiltinConstantArgRange(TheCall, 2, 0, 2) || 2503 SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) || 2504 SemaBuiltinConstantArgRange(TheCall, 4, 0, 1); 2505 } 2506 2507 if (BuiltinID == AArch64::BI__builtin_arm_rsr64 || 2508 BuiltinID == AArch64::BI__builtin_arm_wsr64) 2509 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 2510 2511 // Memory Tagging Extensions (MTE) Intrinsics 2512 if (BuiltinID == AArch64::BI__builtin_arm_irg || 2513 BuiltinID == AArch64::BI__builtin_arm_addg || 2514 BuiltinID == AArch64::BI__builtin_arm_gmi || 2515 BuiltinID == AArch64::BI__builtin_arm_ldg || 2516 BuiltinID == AArch64::BI__builtin_arm_stg || 2517 BuiltinID == AArch64::BI__builtin_arm_subp) { 2518 return SemaBuiltinARMMemoryTaggingCall(BuiltinID, TheCall); 2519 } 2520 2521 if (BuiltinID == AArch64::BI__builtin_arm_rsr || 2522 BuiltinID == AArch64::BI__builtin_arm_rsrp || 2523 BuiltinID == AArch64::BI__builtin_arm_wsr || 2524 BuiltinID == AArch64::BI__builtin_arm_wsrp) 2525 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 2526 2527 // Only check the valid encoding range. Any constant in this range would be 2528 // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw 2529 // an exception for incorrect registers. This matches MSVC behavior. 2530 if (BuiltinID == AArch64::BI_ReadStatusReg || 2531 BuiltinID == AArch64::BI_WriteStatusReg) 2532 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0x7fff); 2533 2534 if (BuiltinID == AArch64::BI__getReg) 2535 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31); 2536 2537 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) 2538 return true; 2539 2540 if (CheckSVEBuiltinFunctionCall(BuiltinID, TheCall)) 2541 return true; 2542 2543 // For intrinsics which take an immediate value as part of the instruction, 2544 // range check them here. 2545 unsigned i = 0, l = 0, u = 0; 2546 switch (BuiltinID) { 2547 default: return false; 2548 case AArch64::BI__builtin_arm_dmb: 2549 case AArch64::BI__builtin_arm_dsb: 2550 case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break; 2551 case AArch64::BI__builtin_arm_tcancel: l = 0; u = 65535; break; 2552 } 2553 2554 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 2555 } 2556 2557 bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID, 2558 CallExpr *TheCall) { 2559 assert((BuiltinID == BPF::BI__builtin_preserve_field_info || 2560 BuiltinID == BPF::BI__builtin_btf_type_id) && 2561 "unexpected ARM builtin"); 2562 2563 if (checkArgCount(*this, TheCall, 2)) 2564 return true; 2565 2566 Expr *Arg; 2567 if (BuiltinID == BPF::BI__builtin_btf_type_id) { 2568 // The second argument needs to be a constant int 2569 llvm::APSInt Value; 2570 Arg = TheCall->getArg(1); 2571 if (!Arg->isIntegerConstantExpr(Value, Context)) { 2572 Diag(Arg->getBeginLoc(), diag::err_btf_type_id_not_const) 2573 << 2 << Arg->getSourceRange(); 2574 return true; 2575 } 2576 2577 TheCall->setType(Context.UnsignedIntTy); 2578 return false; 2579 } 2580 2581 // The first argument needs to be a record field access. 2582 // If it is an array element access, we delay decision 2583 // to BPF backend to check whether the access is a 2584 // field access or not. 2585 Arg = TheCall->getArg(0); 2586 if (Arg->getType()->getAsPlaceholderType() || 2587 (Arg->IgnoreParens()->getObjectKind() != OK_BitField && 2588 !dyn_cast<MemberExpr>(Arg->IgnoreParens()) && 2589 !dyn_cast<ArraySubscriptExpr>(Arg->IgnoreParens()))) { 2590 Diag(Arg->getBeginLoc(), diag::err_preserve_field_info_not_field) 2591 << 1 << Arg->getSourceRange(); 2592 return true; 2593 } 2594 2595 // The second argument needs to be a constant int 2596 Arg = TheCall->getArg(1); 2597 llvm::APSInt Value; 2598 if (!Arg->isIntegerConstantExpr(Value, Context)) { 2599 Diag(Arg->getBeginLoc(), diag::err_preserve_field_info_not_const) 2600 << 2 << Arg->getSourceRange(); 2601 return true; 2602 } 2603 2604 TheCall->setType(Context.UnsignedIntTy); 2605 return false; 2606 } 2607 2608 bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 2609 struct ArgInfo { 2610 uint8_t OpNum; 2611 bool IsSigned; 2612 uint8_t BitWidth; 2613 uint8_t Align; 2614 }; 2615 struct BuiltinInfo { 2616 unsigned BuiltinID; 2617 ArgInfo Infos[2]; 2618 }; 2619 2620 static BuiltinInfo Infos[] = { 2621 { Hexagon::BI__builtin_circ_ldd, {{ 3, true, 4, 3 }} }, 2622 { Hexagon::BI__builtin_circ_ldw, {{ 3, true, 4, 2 }} }, 2623 { Hexagon::BI__builtin_circ_ldh, {{ 3, true, 4, 1 }} }, 2624 { Hexagon::BI__builtin_circ_lduh, {{ 3, true, 4, 1 }} }, 2625 { Hexagon::BI__builtin_circ_ldb, {{ 3, true, 4, 0 }} }, 2626 { Hexagon::BI__builtin_circ_ldub, {{ 3, true, 4, 0 }} }, 2627 { Hexagon::BI__builtin_circ_std, {{ 3, true, 4, 3 }} }, 2628 { Hexagon::BI__builtin_circ_stw, {{ 3, true, 4, 2 }} }, 2629 { Hexagon::BI__builtin_circ_sth, {{ 3, true, 4, 1 }} }, 2630 { Hexagon::BI__builtin_circ_sthhi, {{ 3, true, 4, 1 }} }, 2631 { Hexagon::BI__builtin_circ_stb, {{ 3, true, 4, 0 }} }, 2632 2633 { Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci, {{ 1, true, 4, 0 }} }, 2634 { Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci, {{ 1, true, 4, 0 }} }, 2635 { Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci, {{ 1, true, 4, 1 }} }, 2636 { Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci, {{ 1, true, 4, 1 }} }, 2637 { Hexagon::BI__builtin_HEXAGON_L2_loadri_pci, {{ 1, true, 4, 2 }} }, 2638 { Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci, {{ 1, true, 4, 3 }} }, 2639 { Hexagon::BI__builtin_HEXAGON_S2_storerb_pci, {{ 1, true, 4, 0 }} }, 2640 { Hexagon::BI__builtin_HEXAGON_S2_storerh_pci, {{ 1, true, 4, 1 }} }, 2641 { Hexagon::BI__builtin_HEXAGON_S2_storerf_pci, {{ 1, true, 4, 1 }} }, 2642 { Hexagon::BI__builtin_HEXAGON_S2_storeri_pci, {{ 1, true, 4, 2 }} }, 2643 { Hexagon::BI__builtin_HEXAGON_S2_storerd_pci, {{ 1, true, 4, 3 }} }, 2644 2645 { Hexagon::BI__builtin_HEXAGON_A2_combineii, {{ 1, true, 8, 0 }} }, 2646 { Hexagon::BI__builtin_HEXAGON_A2_tfrih, {{ 1, false, 16, 0 }} }, 2647 { Hexagon::BI__builtin_HEXAGON_A2_tfril, {{ 1, false, 16, 0 }} }, 2648 { Hexagon::BI__builtin_HEXAGON_A2_tfrpi, {{ 0, true, 8, 0 }} }, 2649 { Hexagon::BI__builtin_HEXAGON_A4_bitspliti, {{ 1, false, 5, 0 }} }, 2650 { Hexagon::BI__builtin_HEXAGON_A4_cmpbeqi, {{ 1, false, 8, 0 }} }, 2651 { Hexagon::BI__builtin_HEXAGON_A4_cmpbgti, {{ 1, true, 8, 0 }} }, 2652 { Hexagon::BI__builtin_HEXAGON_A4_cround_ri, {{ 1, false, 5, 0 }} }, 2653 { Hexagon::BI__builtin_HEXAGON_A4_round_ri, {{ 1, false, 5, 0 }} }, 2654 { Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat, {{ 1, false, 5, 0 }} }, 2655 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbeqi, {{ 1, false, 8, 0 }} }, 2656 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgti, {{ 1, true, 8, 0 }} }, 2657 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgtui, {{ 1, false, 7, 0 }} }, 2658 { Hexagon::BI__builtin_HEXAGON_A4_vcmpheqi, {{ 1, true, 8, 0 }} }, 2659 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgti, {{ 1, true, 8, 0 }} }, 2660 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgtui, {{ 1, false, 7, 0 }} }, 2661 { Hexagon::BI__builtin_HEXAGON_A4_vcmpweqi, {{ 1, true, 8, 0 }} }, 2662 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgti, {{ 1, true, 8, 0 }} }, 2663 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgtui, {{ 1, false, 7, 0 }} }, 2664 { Hexagon::BI__builtin_HEXAGON_C2_bitsclri, {{ 1, false, 6, 0 }} }, 2665 { Hexagon::BI__builtin_HEXAGON_C2_muxii, {{ 2, true, 8, 0 }} }, 2666 { Hexagon::BI__builtin_HEXAGON_C4_nbitsclri, {{ 1, false, 6, 0 }} }, 2667 { Hexagon::BI__builtin_HEXAGON_F2_dfclass, {{ 1, false, 5, 0 }} }, 2668 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_n, {{ 0, false, 10, 0 }} }, 2669 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_p, {{ 0, false, 10, 0 }} }, 2670 { Hexagon::BI__builtin_HEXAGON_F2_sfclass, {{ 1, false, 5, 0 }} }, 2671 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_n, {{ 0, false, 10, 0 }} }, 2672 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_p, {{ 0, false, 10, 0 }} }, 2673 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addi, {{ 2, false, 6, 0 }} }, 2674 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addr_u2, {{ 1, false, 6, 2 }} }, 2675 { Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri, {{ 2, false, 3, 0 }} }, 2676 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc, {{ 2, false, 6, 0 }} }, 2677 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and, {{ 2, false, 6, 0 }} }, 2678 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p, {{ 1, false, 6, 0 }} }, 2679 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac, {{ 2, false, 6, 0 }} }, 2680 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or, {{ 2, false, 6, 0 }} }, 2681 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc, {{ 2, false, 6, 0 }} }, 2682 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc, {{ 2, false, 5, 0 }} }, 2683 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and, {{ 2, false, 5, 0 }} }, 2684 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r, {{ 1, false, 5, 0 }} }, 2685 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac, {{ 2, false, 5, 0 }} }, 2686 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or, {{ 2, false, 5, 0 }} }, 2687 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat, {{ 1, false, 5, 0 }} }, 2688 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc, {{ 2, false, 5, 0 }} }, 2689 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh, {{ 1, false, 4, 0 }} }, 2690 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw, {{ 1, false, 5, 0 }} }, 2691 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc, {{ 2, false, 6, 0 }} }, 2692 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and, {{ 2, false, 6, 0 }} }, 2693 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p, {{ 1, false, 6, 0 }} }, 2694 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac, {{ 2, false, 6, 0 }} }, 2695 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or, {{ 2, false, 6, 0 }} }, 2696 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax, 2697 {{ 1, false, 6, 0 }} }, 2698 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd, {{ 1, false, 6, 0 }} }, 2699 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc, {{ 2, false, 5, 0 }} }, 2700 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and, {{ 2, false, 5, 0 }} }, 2701 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r, {{ 1, false, 5, 0 }} }, 2702 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac, {{ 2, false, 5, 0 }} }, 2703 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or, {{ 2, false, 5, 0 }} }, 2704 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax, 2705 {{ 1, false, 5, 0 }} }, 2706 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd, {{ 1, false, 5, 0 }} }, 2707 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun, {{ 1, false, 5, 0 }} }, 2708 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh, {{ 1, false, 4, 0 }} }, 2709 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw, {{ 1, false, 5, 0 }} }, 2710 { Hexagon::BI__builtin_HEXAGON_S2_clrbit_i, {{ 1, false, 5, 0 }} }, 2711 { Hexagon::BI__builtin_HEXAGON_S2_extractu, {{ 1, false, 5, 0 }, 2712 { 2, false, 5, 0 }} }, 2713 { Hexagon::BI__builtin_HEXAGON_S2_extractup, {{ 1, false, 6, 0 }, 2714 { 2, false, 6, 0 }} }, 2715 { Hexagon::BI__builtin_HEXAGON_S2_insert, {{ 2, false, 5, 0 }, 2716 { 3, false, 5, 0 }} }, 2717 { Hexagon::BI__builtin_HEXAGON_S2_insertp, {{ 2, false, 6, 0 }, 2718 { 3, false, 6, 0 }} }, 2719 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc, {{ 2, false, 6, 0 }} }, 2720 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and, {{ 2, false, 6, 0 }} }, 2721 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p, {{ 1, false, 6, 0 }} }, 2722 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac, {{ 2, false, 6, 0 }} }, 2723 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or, {{ 2, false, 6, 0 }} }, 2724 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc, {{ 2, false, 6, 0 }} }, 2725 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc, {{ 2, false, 5, 0 }} }, 2726 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and, {{ 2, false, 5, 0 }} }, 2727 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r, {{ 1, false, 5, 0 }} }, 2728 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac, {{ 2, false, 5, 0 }} }, 2729 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or, {{ 2, false, 5, 0 }} }, 2730 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc, {{ 2, false, 5, 0 }} }, 2731 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh, {{ 1, false, 4, 0 }} }, 2732 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw, {{ 1, false, 5, 0 }} }, 2733 { Hexagon::BI__builtin_HEXAGON_S2_setbit_i, {{ 1, false, 5, 0 }} }, 2734 { Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax, 2735 {{ 2, false, 4, 0 }, 2736 { 3, false, 5, 0 }} }, 2737 { Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax, 2738 {{ 2, false, 4, 0 }, 2739 { 3, false, 5, 0 }} }, 2740 { Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax, 2741 {{ 2, false, 4, 0 }, 2742 { 3, false, 5, 0 }} }, 2743 { Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax, 2744 {{ 2, false, 4, 0 }, 2745 { 3, false, 5, 0 }} }, 2746 { Hexagon::BI__builtin_HEXAGON_S2_togglebit_i, {{ 1, false, 5, 0 }} }, 2747 { Hexagon::BI__builtin_HEXAGON_S2_tstbit_i, {{ 1, false, 5, 0 }} }, 2748 { Hexagon::BI__builtin_HEXAGON_S2_valignib, {{ 2, false, 3, 0 }} }, 2749 { Hexagon::BI__builtin_HEXAGON_S2_vspliceib, {{ 2, false, 3, 0 }} }, 2750 { Hexagon::BI__builtin_HEXAGON_S4_addi_asl_ri, {{ 2, false, 5, 0 }} }, 2751 { Hexagon::BI__builtin_HEXAGON_S4_addi_lsr_ri, {{ 2, false, 5, 0 }} }, 2752 { Hexagon::BI__builtin_HEXAGON_S4_andi_asl_ri, {{ 2, false, 5, 0 }} }, 2753 { Hexagon::BI__builtin_HEXAGON_S4_andi_lsr_ri, {{ 2, false, 5, 0 }} }, 2754 { Hexagon::BI__builtin_HEXAGON_S4_clbaddi, {{ 1, true , 6, 0 }} }, 2755 { Hexagon::BI__builtin_HEXAGON_S4_clbpaddi, {{ 1, true, 6, 0 }} }, 2756 { Hexagon::BI__builtin_HEXAGON_S4_extract, {{ 1, false, 5, 0 }, 2757 { 2, false, 5, 0 }} }, 2758 { Hexagon::BI__builtin_HEXAGON_S4_extractp, {{ 1, false, 6, 0 }, 2759 { 2, false, 6, 0 }} }, 2760 { Hexagon::BI__builtin_HEXAGON_S4_lsli, {{ 0, true, 6, 0 }} }, 2761 { Hexagon::BI__builtin_HEXAGON_S4_ntstbit_i, {{ 1, false, 5, 0 }} }, 2762 { Hexagon::BI__builtin_HEXAGON_S4_ori_asl_ri, {{ 2, false, 5, 0 }} }, 2763 { Hexagon::BI__builtin_HEXAGON_S4_ori_lsr_ri, {{ 2, false, 5, 0 }} }, 2764 { Hexagon::BI__builtin_HEXAGON_S4_subi_asl_ri, {{ 2, false, 5, 0 }} }, 2765 { Hexagon::BI__builtin_HEXAGON_S4_subi_lsr_ri, {{ 2, false, 5, 0 }} }, 2766 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate_acc, {{ 3, false, 2, 0 }} }, 2767 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate, {{ 2, false, 2, 0 }} }, 2768 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax, 2769 {{ 1, false, 4, 0 }} }, 2770 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_sat, {{ 1, false, 4, 0 }} }, 2771 { Hexagon::BI__builtin_HEXAGON_S5_vasrhrnd_goodsyntax, 2772 {{ 1, false, 4, 0 }} }, 2773 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {{ 1, false, 6, 0 }} }, 2774 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {{ 2, false, 6, 0 }} }, 2775 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {{ 2, false, 6, 0 }} }, 2776 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {{ 2, false, 6, 0 }} }, 2777 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {{ 2, false, 6, 0 }} }, 2778 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {{ 2, false, 6, 0 }} }, 2779 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {{ 1, false, 5, 0 }} }, 2780 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {{ 2, false, 5, 0 }} }, 2781 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {{ 2, false, 5, 0 }} }, 2782 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {{ 2, false, 5, 0 }} }, 2783 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {{ 2, false, 5, 0 }} }, 2784 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {{ 2, false, 5, 0 }} }, 2785 { Hexagon::BI__builtin_HEXAGON_V6_valignbi, {{ 2, false, 3, 0 }} }, 2786 { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {{ 2, false, 3, 0 }} }, 2787 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {{ 2, false, 3, 0 }} }, 2788 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {{ 2, false, 3, 0 }} }, 2789 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {{ 2, false, 1, 0 }} }, 2790 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {{ 2, false, 1, 0 }} }, 2791 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {{ 3, false, 1, 0 }} }, 2792 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B, 2793 {{ 3, false, 1, 0 }} }, 2794 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {{ 2, false, 1, 0 }} }, 2795 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {{ 2, false, 1, 0 }} }, 2796 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {{ 3, false, 1, 0 }} }, 2797 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B, 2798 {{ 3, false, 1, 0 }} }, 2799 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {{ 2, false, 1, 0 }} }, 2800 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {{ 2, false, 1, 0 }} }, 2801 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {{ 3, false, 1, 0 }} }, 2802 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B, 2803 {{ 3, false, 1, 0 }} }, 2804 }; 2805 2806 // Use a dynamically initialized static to sort the table exactly once on 2807 // first run. 2808 static const bool SortOnce = 2809 (llvm::sort(Infos, 2810 [](const BuiltinInfo &LHS, const BuiltinInfo &RHS) { 2811 return LHS.BuiltinID < RHS.BuiltinID; 2812 }), 2813 true); 2814 (void)SortOnce; 2815 2816 const BuiltinInfo *F = llvm::partition_point( 2817 Infos, [=](const BuiltinInfo &BI) { return BI.BuiltinID < BuiltinID; }); 2818 if (F == std::end(Infos) || F->BuiltinID != BuiltinID) 2819 return false; 2820 2821 bool Error = false; 2822 2823 for (const ArgInfo &A : F->Infos) { 2824 // Ignore empty ArgInfo elements. 2825 if (A.BitWidth == 0) 2826 continue; 2827 2828 int32_t Min = A.IsSigned ? -(1 << (A.BitWidth - 1)) : 0; 2829 int32_t Max = (1 << (A.IsSigned ? A.BitWidth - 1 : A.BitWidth)) - 1; 2830 if (!A.Align) { 2831 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max); 2832 } else { 2833 unsigned M = 1 << A.Align; 2834 Min *= M; 2835 Max *= M; 2836 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max) | 2837 SemaBuiltinConstantArgMultiple(TheCall, A.OpNum, M); 2838 } 2839 } 2840 return Error; 2841 } 2842 2843 bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, 2844 CallExpr *TheCall) { 2845 return CheckHexagonBuiltinArgument(BuiltinID, TheCall); 2846 } 2847 2848 bool Sema::CheckMipsBuiltinFunctionCall(const TargetInfo &TI, 2849 unsigned BuiltinID, CallExpr *TheCall) { 2850 return CheckMipsBuiltinCpu(TI, BuiltinID, TheCall) || 2851 CheckMipsBuiltinArgument(BuiltinID, TheCall); 2852 } 2853 2854 bool Sema::CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID, 2855 CallExpr *TheCall) { 2856 2857 if (Mips::BI__builtin_mips_addu_qb <= BuiltinID && 2858 BuiltinID <= Mips::BI__builtin_mips_lwx) { 2859 if (!TI.hasFeature("dsp")) 2860 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_dsp); 2861 } 2862 2863 if (Mips::BI__builtin_mips_absq_s_qb <= BuiltinID && 2864 BuiltinID <= Mips::BI__builtin_mips_subuh_r_qb) { 2865 if (!TI.hasFeature("dspr2")) 2866 return Diag(TheCall->getBeginLoc(), 2867 diag::err_mips_builtin_requires_dspr2); 2868 } 2869 2870 if (Mips::BI__builtin_msa_add_a_b <= BuiltinID && 2871 BuiltinID <= Mips::BI__builtin_msa_xori_b) { 2872 if (!TI.hasFeature("msa")) 2873 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_msa); 2874 } 2875 2876 return false; 2877 } 2878 2879 // CheckMipsBuiltinArgument - Checks the constant value passed to the 2880 // intrinsic is correct. The switch statement is ordered by DSP, MSA. The 2881 // ordering for DSP is unspecified. MSA is ordered by the data format used 2882 // by the underlying instruction i.e., df/m, df/n and then by size. 2883 // 2884 // FIXME: The size tests here should instead be tablegen'd along with the 2885 // definitions from include/clang/Basic/BuiltinsMips.def. 2886 // FIXME: GCC is strict on signedness for some of these intrinsics, we should 2887 // be too. 2888 bool Sema::CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 2889 unsigned i = 0, l = 0, u = 0, m = 0; 2890 switch (BuiltinID) { 2891 default: return false; 2892 case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break; 2893 case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break; 2894 case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break; 2895 case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break; 2896 case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break; 2897 case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break; 2898 case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break; 2899 // MSA intrinsics. Instructions (which the intrinsics maps to) which use the 2900 // df/m field. 2901 // These intrinsics take an unsigned 3 bit immediate. 2902 case Mips::BI__builtin_msa_bclri_b: 2903 case Mips::BI__builtin_msa_bnegi_b: 2904 case Mips::BI__builtin_msa_bseti_b: 2905 case Mips::BI__builtin_msa_sat_s_b: 2906 case Mips::BI__builtin_msa_sat_u_b: 2907 case Mips::BI__builtin_msa_slli_b: 2908 case Mips::BI__builtin_msa_srai_b: 2909 case Mips::BI__builtin_msa_srari_b: 2910 case Mips::BI__builtin_msa_srli_b: 2911 case Mips::BI__builtin_msa_srlri_b: i = 1; l = 0; u = 7; break; 2912 case Mips::BI__builtin_msa_binsli_b: 2913 case Mips::BI__builtin_msa_binsri_b: i = 2; l = 0; u = 7; break; 2914 // These intrinsics take an unsigned 4 bit immediate. 2915 case Mips::BI__builtin_msa_bclri_h: 2916 case Mips::BI__builtin_msa_bnegi_h: 2917 case Mips::BI__builtin_msa_bseti_h: 2918 case Mips::BI__builtin_msa_sat_s_h: 2919 case Mips::BI__builtin_msa_sat_u_h: 2920 case Mips::BI__builtin_msa_slli_h: 2921 case Mips::BI__builtin_msa_srai_h: 2922 case Mips::BI__builtin_msa_srari_h: 2923 case Mips::BI__builtin_msa_srli_h: 2924 case Mips::BI__builtin_msa_srlri_h: i = 1; l = 0; u = 15; break; 2925 case Mips::BI__builtin_msa_binsli_h: 2926 case Mips::BI__builtin_msa_binsri_h: i = 2; l = 0; u = 15; break; 2927 // These intrinsics take an unsigned 5 bit immediate. 2928 // The first block of intrinsics actually have an unsigned 5 bit field, 2929 // not a df/n field. 2930 case Mips::BI__builtin_msa_cfcmsa: 2931 case Mips::BI__builtin_msa_ctcmsa: i = 0; l = 0; u = 31; break; 2932 case Mips::BI__builtin_msa_clei_u_b: 2933 case Mips::BI__builtin_msa_clei_u_h: 2934 case Mips::BI__builtin_msa_clei_u_w: 2935 case Mips::BI__builtin_msa_clei_u_d: 2936 case Mips::BI__builtin_msa_clti_u_b: 2937 case Mips::BI__builtin_msa_clti_u_h: 2938 case Mips::BI__builtin_msa_clti_u_w: 2939 case Mips::BI__builtin_msa_clti_u_d: 2940 case Mips::BI__builtin_msa_maxi_u_b: 2941 case Mips::BI__builtin_msa_maxi_u_h: 2942 case Mips::BI__builtin_msa_maxi_u_w: 2943 case Mips::BI__builtin_msa_maxi_u_d: 2944 case Mips::BI__builtin_msa_mini_u_b: 2945 case Mips::BI__builtin_msa_mini_u_h: 2946 case Mips::BI__builtin_msa_mini_u_w: 2947 case Mips::BI__builtin_msa_mini_u_d: 2948 case Mips::BI__builtin_msa_addvi_b: 2949 case Mips::BI__builtin_msa_addvi_h: 2950 case Mips::BI__builtin_msa_addvi_w: 2951 case Mips::BI__builtin_msa_addvi_d: 2952 case Mips::BI__builtin_msa_bclri_w: 2953 case Mips::BI__builtin_msa_bnegi_w: 2954 case Mips::BI__builtin_msa_bseti_w: 2955 case Mips::BI__builtin_msa_sat_s_w: 2956 case Mips::BI__builtin_msa_sat_u_w: 2957 case Mips::BI__builtin_msa_slli_w: 2958 case Mips::BI__builtin_msa_srai_w: 2959 case Mips::BI__builtin_msa_srari_w: 2960 case Mips::BI__builtin_msa_srli_w: 2961 case Mips::BI__builtin_msa_srlri_w: 2962 case Mips::BI__builtin_msa_subvi_b: 2963 case Mips::BI__builtin_msa_subvi_h: 2964 case Mips::BI__builtin_msa_subvi_w: 2965 case Mips::BI__builtin_msa_subvi_d: i = 1; l = 0; u = 31; break; 2966 case Mips::BI__builtin_msa_binsli_w: 2967 case Mips::BI__builtin_msa_binsri_w: i = 2; l = 0; u = 31; break; 2968 // These intrinsics take an unsigned 6 bit immediate. 2969 case Mips::BI__builtin_msa_bclri_d: 2970 case Mips::BI__builtin_msa_bnegi_d: 2971 case Mips::BI__builtin_msa_bseti_d: 2972 case Mips::BI__builtin_msa_sat_s_d: 2973 case Mips::BI__builtin_msa_sat_u_d: 2974 case Mips::BI__builtin_msa_slli_d: 2975 case Mips::BI__builtin_msa_srai_d: 2976 case Mips::BI__builtin_msa_srari_d: 2977 case Mips::BI__builtin_msa_srli_d: 2978 case Mips::BI__builtin_msa_srlri_d: i = 1; l = 0; u = 63; break; 2979 case Mips::BI__builtin_msa_binsli_d: 2980 case Mips::BI__builtin_msa_binsri_d: i = 2; l = 0; u = 63; break; 2981 // These intrinsics take a signed 5 bit immediate. 2982 case Mips::BI__builtin_msa_ceqi_b: 2983 case Mips::BI__builtin_msa_ceqi_h: 2984 case Mips::BI__builtin_msa_ceqi_w: 2985 case Mips::BI__builtin_msa_ceqi_d: 2986 case Mips::BI__builtin_msa_clti_s_b: 2987 case Mips::BI__builtin_msa_clti_s_h: 2988 case Mips::BI__builtin_msa_clti_s_w: 2989 case Mips::BI__builtin_msa_clti_s_d: 2990 case Mips::BI__builtin_msa_clei_s_b: 2991 case Mips::BI__builtin_msa_clei_s_h: 2992 case Mips::BI__builtin_msa_clei_s_w: 2993 case Mips::BI__builtin_msa_clei_s_d: 2994 case Mips::BI__builtin_msa_maxi_s_b: 2995 case Mips::BI__builtin_msa_maxi_s_h: 2996 case Mips::BI__builtin_msa_maxi_s_w: 2997 case Mips::BI__builtin_msa_maxi_s_d: 2998 case Mips::BI__builtin_msa_mini_s_b: 2999 case Mips::BI__builtin_msa_mini_s_h: 3000 case Mips::BI__builtin_msa_mini_s_w: 3001 case Mips::BI__builtin_msa_mini_s_d: i = 1; l = -16; u = 15; break; 3002 // These intrinsics take an unsigned 8 bit immediate. 3003 case Mips::BI__builtin_msa_andi_b: 3004 case Mips::BI__builtin_msa_nori_b: 3005 case Mips::BI__builtin_msa_ori_b: 3006 case Mips::BI__builtin_msa_shf_b: 3007 case Mips::BI__builtin_msa_shf_h: 3008 case Mips::BI__builtin_msa_shf_w: 3009 case Mips::BI__builtin_msa_xori_b: i = 1; l = 0; u = 255; break; 3010 case Mips::BI__builtin_msa_bseli_b: 3011 case Mips::BI__builtin_msa_bmnzi_b: 3012 case Mips::BI__builtin_msa_bmzi_b: i = 2; l = 0; u = 255; break; 3013 // df/n format 3014 // These intrinsics take an unsigned 4 bit immediate. 3015 case Mips::BI__builtin_msa_copy_s_b: 3016 case Mips::BI__builtin_msa_copy_u_b: 3017 case Mips::BI__builtin_msa_insve_b: 3018 case Mips::BI__builtin_msa_splati_b: i = 1; l = 0; u = 15; break; 3019 case Mips::BI__builtin_msa_sldi_b: i = 2; l = 0; u = 15; break; 3020 // These intrinsics take an unsigned 3 bit immediate. 3021 case Mips::BI__builtin_msa_copy_s_h: 3022 case Mips::BI__builtin_msa_copy_u_h: 3023 case Mips::BI__builtin_msa_insve_h: 3024 case Mips::BI__builtin_msa_splati_h: i = 1; l = 0; u = 7; break; 3025 case Mips::BI__builtin_msa_sldi_h: i = 2; l = 0; u = 7; break; 3026 // These intrinsics take an unsigned 2 bit immediate. 3027 case Mips::BI__builtin_msa_copy_s_w: 3028 case Mips::BI__builtin_msa_copy_u_w: 3029 case Mips::BI__builtin_msa_insve_w: 3030 case Mips::BI__builtin_msa_splati_w: i = 1; l = 0; u = 3; break; 3031 case Mips::BI__builtin_msa_sldi_w: i = 2; l = 0; u = 3; break; 3032 // These intrinsics take an unsigned 1 bit immediate. 3033 case Mips::BI__builtin_msa_copy_s_d: 3034 case Mips::BI__builtin_msa_copy_u_d: 3035 case Mips::BI__builtin_msa_insve_d: 3036 case Mips::BI__builtin_msa_splati_d: i = 1; l = 0; u = 1; break; 3037 case Mips::BI__builtin_msa_sldi_d: i = 2; l = 0; u = 1; break; 3038 // Memory offsets and immediate loads. 3039 // These intrinsics take a signed 10 bit immediate. 3040 case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 255; break; 3041 case Mips::BI__builtin_msa_ldi_h: 3042 case Mips::BI__builtin_msa_ldi_w: 3043 case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break; 3044 case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 1; break; 3045 case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 2; break; 3046 case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 4; break; 3047 case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 8; break; 3048 case Mips::BI__builtin_msa_ldr_d: i = 1; l = -4096; u = 4088; m = 8; break; 3049 case Mips::BI__builtin_msa_ldr_w: i = 1; l = -2048; u = 2044; m = 4; break; 3050 case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 1; break; 3051 case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 2; break; 3052 case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 4; break; 3053 case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 8; break; 3054 case Mips::BI__builtin_msa_str_d: i = 2; l = -4096; u = 4088; m = 8; break; 3055 case Mips::BI__builtin_msa_str_w: i = 2; l = -2048; u = 2044; m = 4; break; 3056 } 3057 3058 if (!m) 3059 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3060 3061 return SemaBuiltinConstantArgRange(TheCall, i, l, u) || 3062 SemaBuiltinConstantArgMultiple(TheCall, i, m); 3063 } 3064 3065 bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 3066 CallExpr *TheCall) { 3067 unsigned i = 0, l = 0, u = 0; 3068 bool Is64BitBltin = BuiltinID == PPC::BI__builtin_divde || 3069 BuiltinID == PPC::BI__builtin_divdeu || 3070 BuiltinID == PPC::BI__builtin_bpermd; 3071 bool IsTarget64Bit = TI.getTypeWidth(TI.getIntPtrType()) == 64; 3072 bool IsBltinExtDiv = BuiltinID == PPC::BI__builtin_divwe || 3073 BuiltinID == PPC::BI__builtin_divweu || 3074 BuiltinID == PPC::BI__builtin_divde || 3075 BuiltinID == PPC::BI__builtin_divdeu; 3076 3077 if (Is64BitBltin && !IsTarget64Bit) 3078 return Diag(TheCall->getBeginLoc(), diag::err_64_bit_builtin_32_bit_tgt) 3079 << TheCall->getSourceRange(); 3080 3081 if ((IsBltinExtDiv && !TI.hasFeature("extdiv")) || 3082 (BuiltinID == PPC::BI__builtin_bpermd && !TI.hasFeature("bpermd"))) 3083 return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_only_on_pwr7) 3084 << TheCall->getSourceRange(); 3085 3086 auto SemaVSXCheck = [&](CallExpr *TheCall) -> bool { 3087 if (!TI.hasFeature("vsx")) 3088 return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_only_on_pwr7) 3089 << TheCall->getSourceRange(); 3090 return false; 3091 }; 3092 3093 switch (BuiltinID) { 3094 default: return false; 3095 case PPC::BI__builtin_altivec_crypto_vshasigmaw: 3096 case PPC::BI__builtin_altivec_crypto_vshasigmad: 3097 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 3098 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 3099 case PPC::BI__builtin_altivec_dss: 3100 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3); 3101 case PPC::BI__builtin_tbegin: 3102 case PPC::BI__builtin_tend: i = 0; l = 0; u = 1; break; 3103 case PPC::BI__builtin_tsr: i = 0; l = 0; u = 7; break; 3104 case PPC::BI__builtin_tabortwc: 3105 case PPC::BI__builtin_tabortdc: i = 0; l = 0; u = 31; break; 3106 case PPC::BI__builtin_tabortwci: 3107 case PPC::BI__builtin_tabortdci: 3108 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) || 3109 SemaBuiltinConstantArgRange(TheCall, 2, 0, 31); 3110 case PPC::BI__builtin_altivec_dst: 3111 case PPC::BI__builtin_altivec_dstt: 3112 case PPC::BI__builtin_altivec_dstst: 3113 case PPC::BI__builtin_altivec_dststt: 3114 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3); 3115 case PPC::BI__builtin_vsx_xxpermdi: 3116 case PPC::BI__builtin_vsx_xxsldwi: 3117 return SemaBuiltinVSX(TheCall); 3118 case PPC::BI__builtin_unpack_vector_int128: 3119 return SemaVSXCheck(TheCall) || 3120 SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 3121 case PPC::BI__builtin_pack_vector_int128: 3122 return SemaVSXCheck(TheCall); 3123 case PPC::BI__builtin_altivec_vgnb: 3124 return SemaBuiltinConstantArgRange(TheCall, 1, 2, 7); 3125 case PPC::BI__builtin_vsx_xxeval: 3126 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 255); 3127 case PPC::BI__builtin_altivec_vsldbi: 3128 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); 3129 case PPC::BI__builtin_altivec_vsrdbi: 3130 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); 3131 case PPC::BI__builtin_vsx_xxpermx: 3132 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 7); 3133 } 3134 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3135 } 3136 3137 bool Sema::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, 3138 CallExpr *TheCall) { 3139 // position of memory order and scope arguments in the builtin 3140 unsigned OrderIndex, ScopeIndex; 3141 switch (BuiltinID) { 3142 case AMDGPU::BI__builtin_amdgcn_atomic_inc32: 3143 case AMDGPU::BI__builtin_amdgcn_atomic_inc64: 3144 case AMDGPU::BI__builtin_amdgcn_atomic_dec32: 3145 case AMDGPU::BI__builtin_amdgcn_atomic_dec64: 3146 OrderIndex = 2; 3147 ScopeIndex = 3; 3148 break; 3149 case AMDGPU::BI__builtin_amdgcn_fence: 3150 OrderIndex = 0; 3151 ScopeIndex = 1; 3152 break; 3153 default: 3154 return false; 3155 } 3156 3157 ExprResult Arg = TheCall->getArg(OrderIndex); 3158 auto ArgExpr = Arg.get(); 3159 Expr::EvalResult ArgResult; 3160 3161 if (!ArgExpr->EvaluateAsInt(ArgResult, Context)) 3162 return Diag(ArgExpr->getExprLoc(), diag::err_typecheck_expect_int) 3163 << ArgExpr->getType(); 3164 int ord = ArgResult.Val.getInt().getZExtValue(); 3165 3166 // Check valididty of memory ordering as per C11 / C++11's memody model. 3167 switch (static_cast<llvm::AtomicOrderingCABI>(ord)) { 3168 case llvm::AtomicOrderingCABI::acquire: 3169 case llvm::AtomicOrderingCABI::release: 3170 case llvm::AtomicOrderingCABI::acq_rel: 3171 case llvm::AtomicOrderingCABI::seq_cst: 3172 break; 3173 default: { 3174 return Diag(ArgExpr->getBeginLoc(), 3175 diag::warn_atomic_op_has_invalid_memory_order) 3176 << ArgExpr->getSourceRange(); 3177 } 3178 } 3179 3180 Arg = TheCall->getArg(ScopeIndex); 3181 ArgExpr = Arg.get(); 3182 Expr::EvalResult ArgResult1; 3183 // Check that sync scope is a constant literal 3184 if (!ArgExpr->EvaluateAsConstantExpr(ArgResult1, Expr::EvaluateForCodeGen, 3185 Context)) 3186 return Diag(ArgExpr->getExprLoc(), diag::err_expr_not_string_literal) 3187 << ArgExpr->getType(); 3188 3189 return false; 3190 } 3191 3192 bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, 3193 CallExpr *TheCall) { 3194 if (BuiltinID == SystemZ::BI__builtin_tabort) { 3195 Expr *Arg = TheCall->getArg(0); 3196 llvm::APSInt AbortCode(32); 3197 if (Arg->isIntegerConstantExpr(AbortCode, Context) && 3198 AbortCode.getSExtValue() >= 0 && AbortCode.getSExtValue() < 256) 3199 return Diag(Arg->getBeginLoc(), diag::err_systemz_invalid_tabort_code) 3200 << Arg->getSourceRange(); 3201 } 3202 3203 // For intrinsics which take an immediate value as part of the instruction, 3204 // range check them here. 3205 unsigned i = 0, l = 0, u = 0; 3206 switch (BuiltinID) { 3207 default: return false; 3208 case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break; 3209 case SystemZ::BI__builtin_s390_verimb: 3210 case SystemZ::BI__builtin_s390_verimh: 3211 case SystemZ::BI__builtin_s390_verimf: 3212 case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break; 3213 case SystemZ::BI__builtin_s390_vfaeb: 3214 case SystemZ::BI__builtin_s390_vfaeh: 3215 case SystemZ::BI__builtin_s390_vfaef: 3216 case SystemZ::BI__builtin_s390_vfaebs: 3217 case SystemZ::BI__builtin_s390_vfaehs: 3218 case SystemZ::BI__builtin_s390_vfaefs: 3219 case SystemZ::BI__builtin_s390_vfaezb: 3220 case SystemZ::BI__builtin_s390_vfaezh: 3221 case SystemZ::BI__builtin_s390_vfaezf: 3222 case SystemZ::BI__builtin_s390_vfaezbs: 3223 case SystemZ::BI__builtin_s390_vfaezhs: 3224 case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break; 3225 case SystemZ::BI__builtin_s390_vfisb: 3226 case SystemZ::BI__builtin_s390_vfidb: 3227 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15) || 3228 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 3229 case SystemZ::BI__builtin_s390_vftcisb: 3230 case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break; 3231 case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break; 3232 case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break; 3233 case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break; 3234 case SystemZ::BI__builtin_s390_vstrcb: 3235 case SystemZ::BI__builtin_s390_vstrch: 3236 case SystemZ::BI__builtin_s390_vstrcf: 3237 case SystemZ::BI__builtin_s390_vstrczb: 3238 case SystemZ::BI__builtin_s390_vstrczh: 3239 case SystemZ::BI__builtin_s390_vstrczf: 3240 case SystemZ::BI__builtin_s390_vstrcbs: 3241 case SystemZ::BI__builtin_s390_vstrchs: 3242 case SystemZ::BI__builtin_s390_vstrcfs: 3243 case SystemZ::BI__builtin_s390_vstrczbs: 3244 case SystemZ::BI__builtin_s390_vstrczhs: 3245 case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break; 3246 case SystemZ::BI__builtin_s390_vmslg: i = 3; l = 0; u = 15; break; 3247 case SystemZ::BI__builtin_s390_vfminsb: 3248 case SystemZ::BI__builtin_s390_vfmaxsb: 3249 case SystemZ::BI__builtin_s390_vfmindb: 3250 case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break; 3251 case SystemZ::BI__builtin_s390_vsld: i = 2; l = 0; u = 7; break; 3252 case SystemZ::BI__builtin_s390_vsrd: i = 2; l = 0; u = 7; break; 3253 } 3254 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3255 } 3256 3257 /// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *). 3258 /// This checks that the target supports __builtin_cpu_supports and 3259 /// that the string argument is constant and valid. 3260 static bool SemaBuiltinCpuSupports(Sema &S, const TargetInfo &TI, 3261 CallExpr *TheCall) { 3262 Expr *Arg = TheCall->getArg(0); 3263 3264 // Check if the argument is a string literal. 3265 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 3266 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 3267 << Arg->getSourceRange(); 3268 3269 // Check the contents of the string. 3270 StringRef Feature = 3271 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 3272 if (!TI.validateCpuSupports(Feature)) 3273 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_supports) 3274 << Arg->getSourceRange(); 3275 return false; 3276 } 3277 3278 /// SemaBuiltinCpuIs - Handle __builtin_cpu_is(char *). 3279 /// This checks that the target supports __builtin_cpu_is and 3280 /// that the string argument is constant and valid. 3281 static bool SemaBuiltinCpuIs(Sema &S, const TargetInfo &TI, CallExpr *TheCall) { 3282 Expr *Arg = TheCall->getArg(0); 3283 3284 // Check if the argument is a string literal. 3285 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 3286 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 3287 << Arg->getSourceRange(); 3288 3289 // Check the contents of the string. 3290 StringRef Feature = 3291 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 3292 if (!TI.validateCpuIs(Feature)) 3293 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_is) 3294 << Arg->getSourceRange(); 3295 return false; 3296 } 3297 3298 // Check if the rounding mode is legal. 3299 bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) { 3300 // Indicates if this instruction has rounding control or just SAE. 3301 bool HasRC = false; 3302 3303 unsigned ArgNum = 0; 3304 switch (BuiltinID) { 3305 default: 3306 return false; 3307 case X86::BI__builtin_ia32_vcvttsd2si32: 3308 case X86::BI__builtin_ia32_vcvttsd2si64: 3309 case X86::BI__builtin_ia32_vcvttsd2usi32: 3310 case X86::BI__builtin_ia32_vcvttsd2usi64: 3311 case X86::BI__builtin_ia32_vcvttss2si32: 3312 case X86::BI__builtin_ia32_vcvttss2si64: 3313 case X86::BI__builtin_ia32_vcvttss2usi32: 3314 case X86::BI__builtin_ia32_vcvttss2usi64: 3315 ArgNum = 1; 3316 break; 3317 case X86::BI__builtin_ia32_maxpd512: 3318 case X86::BI__builtin_ia32_maxps512: 3319 case X86::BI__builtin_ia32_minpd512: 3320 case X86::BI__builtin_ia32_minps512: 3321 ArgNum = 2; 3322 break; 3323 case X86::BI__builtin_ia32_cvtps2pd512_mask: 3324 case X86::BI__builtin_ia32_cvttpd2dq512_mask: 3325 case X86::BI__builtin_ia32_cvttpd2qq512_mask: 3326 case X86::BI__builtin_ia32_cvttpd2udq512_mask: 3327 case X86::BI__builtin_ia32_cvttpd2uqq512_mask: 3328 case X86::BI__builtin_ia32_cvttps2dq512_mask: 3329 case X86::BI__builtin_ia32_cvttps2qq512_mask: 3330 case X86::BI__builtin_ia32_cvttps2udq512_mask: 3331 case X86::BI__builtin_ia32_cvttps2uqq512_mask: 3332 case X86::BI__builtin_ia32_exp2pd_mask: 3333 case X86::BI__builtin_ia32_exp2ps_mask: 3334 case X86::BI__builtin_ia32_getexppd512_mask: 3335 case X86::BI__builtin_ia32_getexpps512_mask: 3336 case X86::BI__builtin_ia32_rcp28pd_mask: 3337 case X86::BI__builtin_ia32_rcp28ps_mask: 3338 case X86::BI__builtin_ia32_rsqrt28pd_mask: 3339 case X86::BI__builtin_ia32_rsqrt28ps_mask: 3340 case X86::BI__builtin_ia32_vcomisd: 3341 case X86::BI__builtin_ia32_vcomiss: 3342 case X86::BI__builtin_ia32_vcvtph2ps512_mask: 3343 ArgNum = 3; 3344 break; 3345 case X86::BI__builtin_ia32_cmppd512_mask: 3346 case X86::BI__builtin_ia32_cmpps512_mask: 3347 case X86::BI__builtin_ia32_cmpsd_mask: 3348 case X86::BI__builtin_ia32_cmpss_mask: 3349 case X86::BI__builtin_ia32_cvtss2sd_round_mask: 3350 case X86::BI__builtin_ia32_getexpsd128_round_mask: 3351 case X86::BI__builtin_ia32_getexpss128_round_mask: 3352 case X86::BI__builtin_ia32_getmantpd512_mask: 3353 case X86::BI__builtin_ia32_getmantps512_mask: 3354 case X86::BI__builtin_ia32_maxsd_round_mask: 3355 case X86::BI__builtin_ia32_maxss_round_mask: 3356 case X86::BI__builtin_ia32_minsd_round_mask: 3357 case X86::BI__builtin_ia32_minss_round_mask: 3358 case X86::BI__builtin_ia32_rcp28sd_round_mask: 3359 case X86::BI__builtin_ia32_rcp28ss_round_mask: 3360 case X86::BI__builtin_ia32_reducepd512_mask: 3361 case X86::BI__builtin_ia32_reduceps512_mask: 3362 case X86::BI__builtin_ia32_rndscalepd_mask: 3363 case X86::BI__builtin_ia32_rndscaleps_mask: 3364 case X86::BI__builtin_ia32_rsqrt28sd_round_mask: 3365 case X86::BI__builtin_ia32_rsqrt28ss_round_mask: 3366 ArgNum = 4; 3367 break; 3368 case X86::BI__builtin_ia32_fixupimmpd512_mask: 3369 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 3370 case X86::BI__builtin_ia32_fixupimmps512_mask: 3371 case X86::BI__builtin_ia32_fixupimmps512_maskz: 3372 case X86::BI__builtin_ia32_fixupimmsd_mask: 3373 case X86::BI__builtin_ia32_fixupimmsd_maskz: 3374 case X86::BI__builtin_ia32_fixupimmss_mask: 3375 case X86::BI__builtin_ia32_fixupimmss_maskz: 3376 case X86::BI__builtin_ia32_getmantsd_round_mask: 3377 case X86::BI__builtin_ia32_getmantss_round_mask: 3378 case X86::BI__builtin_ia32_rangepd512_mask: 3379 case X86::BI__builtin_ia32_rangeps512_mask: 3380 case X86::BI__builtin_ia32_rangesd128_round_mask: 3381 case X86::BI__builtin_ia32_rangess128_round_mask: 3382 case X86::BI__builtin_ia32_reducesd_mask: 3383 case X86::BI__builtin_ia32_reducess_mask: 3384 case X86::BI__builtin_ia32_rndscalesd_round_mask: 3385 case X86::BI__builtin_ia32_rndscaless_round_mask: 3386 ArgNum = 5; 3387 break; 3388 case X86::BI__builtin_ia32_vcvtsd2si64: 3389 case X86::BI__builtin_ia32_vcvtsd2si32: 3390 case X86::BI__builtin_ia32_vcvtsd2usi32: 3391 case X86::BI__builtin_ia32_vcvtsd2usi64: 3392 case X86::BI__builtin_ia32_vcvtss2si32: 3393 case X86::BI__builtin_ia32_vcvtss2si64: 3394 case X86::BI__builtin_ia32_vcvtss2usi32: 3395 case X86::BI__builtin_ia32_vcvtss2usi64: 3396 case X86::BI__builtin_ia32_sqrtpd512: 3397 case X86::BI__builtin_ia32_sqrtps512: 3398 ArgNum = 1; 3399 HasRC = true; 3400 break; 3401 case X86::BI__builtin_ia32_addpd512: 3402 case X86::BI__builtin_ia32_addps512: 3403 case X86::BI__builtin_ia32_divpd512: 3404 case X86::BI__builtin_ia32_divps512: 3405 case X86::BI__builtin_ia32_mulpd512: 3406 case X86::BI__builtin_ia32_mulps512: 3407 case X86::BI__builtin_ia32_subpd512: 3408 case X86::BI__builtin_ia32_subps512: 3409 case X86::BI__builtin_ia32_cvtsi2sd64: 3410 case X86::BI__builtin_ia32_cvtsi2ss32: 3411 case X86::BI__builtin_ia32_cvtsi2ss64: 3412 case X86::BI__builtin_ia32_cvtusi2sd64: 3413 case X86::BI__builtin_ia32_cvtusi2ss32: 3414 case X86::BI__builtin_ia32_cvtusi2ss64: 3415 ArgNum = 2; 3416 HasRC = true; 3417 break; 3418 case X86::BI__builtin_ia32_cvtdq2ps512_mask: 3419 case X86::BI__builtin_ia32_cvtudq2ps512_mask: 3420 case X86::BI__builtin_ia32_cvtpd2ps512_mask: 3421 case X86::BI__builtin_ia32_cvtpd2dq512_mask: 3422 case X86::BI__builtin_ia32_cvtpd2qq512_mask: 3423 case X86::BI__builtin_ia32_cvtpd2udq512_mask: 3424 case X86::BI__builtin_ia32_cvtpd2uqq512_mask: 3425 case X86::BI__builtin_ia32_cvtps2dq512_mask: 3426 case X86::BI__builtin_ia32_cvtps2qq512_mask: 3427 case X86::BI__builtin_ia32_cvtps2udq512_mask: 3428 case X86::BI__builtin_ia32_cvtps2uqq512_mask: 3429 case X86::BI__builtin_ia32_cvtqq2pd512_mask: 3430 case X86::BI__builtin_ia32_cvtqq2ps512_mask: 3431 case X86::BI__builtin_ia32_cvtuqq2pd512_mask: 3432 case X86::BI__builtin_ia32_cvtuqq2ps512_mask: 3433 ArgNum = 3; 3434 HasRC = true; 3435 break; 3436 case X86::BI__builtin_ia32_addss_round_mask: 3437 case X86::BI__builtin_ia32_addsd_round_mask: 3438 case X86::BI__builtin_ia32_divss_round_mask: 3439 case X86::BI__builtin_ia32_divsd_round_mask: 3440 case X86::BI__builtin_ia32_mulss_round_mask: 3441 case X86::BI__builtin_ia32_mulsd_round_mask: 3442 case X86::BI__builtin_ia32_subss_round_mask: 3443 case X86::BI__builtin_ia32_subsd_round_mask: 3444 case X86::BI__builtin_ia32_scalefpd512_mask: 3445 case X86::BI__builtin_ia32_scalefps512_mask: 3446 case X86::BI__builtin_ia32_scalefsd_round_mask: 3447 case X86::BI__builtin_ia32_scalefss_round_mask: 3448 case X86::BI__builtin_ia32_cvtsd2ss_round_mask: 3449 case X86::BI__builtin_ia32_sqrtsd_round_mask: 3450 case X86::BI__builtin_ia32_sqrtss_round_mask: 3451 case X86::BI__builtin_ia32_vfmaddsd3_mask: 3452 case X86::BI__builtin_ia32_vfmaddsd3_maskz: 3453 case X86::BI__builtin_ia32_vfmaddsd3_mask3: 3454 case X86::BI__builtin_ia32_vfmaddss3_mask: 3455 case X86::BI__builtin_ia32_vfmaddss3_maskz: 3456 case X86::BI__builtin_ia32_vfmaddss3_mask3: 3457 case X86::BI__builtin_ia32_vfmaddpd512_mask: 3458 case X86::BI__builtin_ia32_vfmaddpd512_maskz: 3459 case X86::BI__builtin_ia32_vfmaddpd512_mask3: 3460 case X86::BI__builtin_ia32_vfmsubpd512_mask3: 3461 case X86::BI__builtin_ia32_vfmaddps512_mask: 3462 case X86::BI__builtin_ia32_vfmaddps512_maskz: 3463 case X86::BI__builtin_ia32_vfmaddps512_mask3: 3464 case X86::BI__builtin_ia32_vfmsubps512_mask3: 3465 case X86::BI__builtin_ia32_vfmaddsubpd512_mask: 3466 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz: 3467 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3: 3468 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3: 3469 case X86::BI__builtin_ia32_vfmaddsubps512_mask: 3470 case X86::BI__builtin_ia32_vfmaddsubps512_maskz: 3471 case X86::BI__builtin_ia32_vfmaddsubps512_mask3: 3472 case X86::BI__builtin_ia32_vfmsubaddps512_mask3: 3473 ArgNum = 4; 3474 HasRC = true; 3475 break; 3476 } 3477 3478 llvm::APSInt Result; 3479 3480 // We can't check the value of a dependent argument. 3481 Expr *Arg = TheCall->getArg(ArgNum); 3482 if (Arg->isTypeDependent() || Arg->isValueDependent()) 3483 return false; 3484 3485 // Check constant-ness first. 3486 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 3487 return true; 3488 3489 // Make sure rounding mode is either ROUND_CUR_DIRECTION or ROUND_NO_EXC bit 3490 // is set. If the intrinsic has rounding control(bits 1:0), make sure its only 3491 // combined with ROUND_NO_EXC. If the intrinsic does not have rounding 3492 // control, allow ROUND_NO_EXC and ROUND_CUR_DIRECTION together. 3493 if (Result == 4/*ROUND_CUR_DIRECTION*/ || 3494 Result == 8/*ROUND_NO_EXC*/ || 3495 (!HasRC && Result == 12/*ROUND_CUR_DIRECTION|ROUND_NO_EXC*/) || 3496 (HasRC && Result.getZExtValue() >= 8 && Result.getZExtValue() <= 11)) 3497 return false; 3498 3499 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_rounding) 3500 << Arg->getSourceRange(); 3501 } 3502 3503 // Check if the gather/scatter scale is legal. 3504 bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, 3505 CallExpr *TheCall) { 3506 unsigned ArgNum = 0; 3507 switch (BuiltinID) { 3508 default: 3509 return false; 3510 case X86::BI__builtin_ia32_gatherpfdpd: 3511 case X86::BI__builtin_ia32_gatherpfdps: 3512 case X86::BI__builtin_ia32_gatherpfqpd: 3513 case X86::BI__builtin_ia32_gatherpfqps: 3514 case X86::BI__builtin_ia32_scatterpfdpd: 3515 case X86::BI__builtin_ia32_scatterpfdps: 3516 case X86::BI__builtin_ia32_scatterpfqpd: 3517 case X86::BI__builtin_ia32_scatterpfqps: 3518 ArgNum = 3; 3519 break; 3520 case X86::BI__builtin_ia32_gatherd_pd: 3521 case X86::BI__builtin_ia32_gatherd_pd256: 3522 case X86::BI__builtin_ia32_gatherq_pd: 3523 case X86::BI__builtin_ia32_gatherq_pd256: 3524 case X86::BI__builtin_ia32_gatherd_ps: 3525 case X86::BI__builtin_ia32_gatherd_ps256: 3526 case X86::BI__builtin_ia32_gatherq_ps: 3527 case X86::BI__builtin_ia32_gatherq_ps256: 3528 case X86::BI__builtin_ia32_gatherd_q: 3529 case X86::BI__builtin_ia32_gatherd_q256: 3530 case X86::BI__builtin_ia32_gatherq_q: 3531 case X86::BI__builtin_ia32_gatherq_q256: 3532 case X86::BI__builtin_ia32_gatherd_d: 3533 case X86::BI__builtin_ia32_gatherd_d256: 3534 case X86::BI__builtin_ia32_gatherq_d: 3535 case X86::BI__builtin_ia32_gatherq_d256: 3536 case X86::BI__builtin_ia32_gather3div2df: 3537 case X86::BI__builtin_ia32_gather3div2di: 3538 case X86::BI__builtin_ia32_gather3div4df: 3539 case X86::BI__builtin_ia32_gather3div4di: 3540 case X86::BI__builtin_ia32_gather3div4sf: 3541 case X86::BI__builtin_ia32_gather3div4si: 3542 case X86::BI__builtin_ia32_gather3div8sf: 3543 case X86::BI__builtin_ia32_gather3div8si: 3544 case X86::BI__builtin_ia32_gather3siv2df: 3545 case X86::BI__builtin_ia32_gather3siv2di: 3546 case X86::BI__builtin_ia32_gather3siv4df: 3547 case X86::BI__builtin_ia32_gather3siv4di: 3548 case X86::BI__builtin_ia32_gather3siv4sf: 3549 case X86::BI__builtin_ia32_gather3siv4si: 3550 case X86::BI__builtin_ia32_gather3siv8sf: 3551 case X86::BI__builtin_ia32_gather3siv8si: 3552 case X86::BI__builtin_ia32_gathersiv8df: 3553 case X86::BI__builtin_ia32_gathersiv16sf: 3554 case X86::BI__builtin_ia32_gatherdiv8df: 3555 case X86::BI__builtin_ia32_gatherdiv16sf: 3556 case X86::BI__builtin_ia32_gathersiv8di: 3557 case X86::BI__builtin_ia32_gathersiv16si: 3558 case X86::BI__builtin_ia32_gatherdiv8di: 3559 case X86::BI__builtin_ia32_gatherdiv16si: 3560 case X86::BI__builtin_ia32_scatterdiv2df: 3561 case X86::BI__builtin_ia32_scatterdiv2di: 3562 case X86::BI__builtin_ia32_scatterdiv4df: 3563 case X86::BI__builtin_ia32_scatterdiv4di: 3564 case X86::BI__builtin_ia32_scatterdiv4sf: 3565 case X86::BI__builtin_ia32_scatterdiv4si: 3566 case X86::BI__builtin_ia32_scatterdiv8sf: 3567 case X86::BI__builtin_ia32_scatterdiv8si: 3568 case X86::BI__builtin_ia32_scattersiv2df: 3569 case X86::BI__builtin_ia32_scattersiv2di: 3570 case X86::BI__builtin_ia32_scattersiv4df: 3571 case X86::BI__builtin_ia32_scattersiv4di: 3572 case X86::BI__builtin_ia32_scattersiv4sf: 3573 case X86::BI__builtin_ia32_scattersiv4si: 3574 case X86::BI__builtin_ia32_scattersiv8sf: 3575 case X86::BI__builtin_ia32_scattersiv8si: 3576 case X86::BI__builtin_ia32_scattersiv8df: 3577 case X86::BI__builtin_ia32_scattersiv16sf: 3578 case X86::BI__builtin_ia32_scatterdiv8df: 3579 case X86::BI__builtin_ia32_scatterdiv16sf: 3580 case X86::BI__builtin_ia32_scattersiv8di: 3581 case X86::BI__builtin_ia32_scattersiv16si: 3582 case X86::BI__builtin_ia32_scatterdiv8di: 3583 case X86::BI__builtin_ia32_scatterdiv16si: 3584 ArgNum = 4; 3585 break; 3586 } 3587 3588 llvm::APSInt Result; 3589 3590 // We can't check the value of a dependent argument. 3591 Expr *Arg = TheCall->getArg(ArgNum); 3592 if (Arg->isTypeDependent() || Arg->isValueDependent()) 3593 return false; 3594 3595 // Check constant-ness first. 3596 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 3597 return true; 3598 3599 if (Result == 1 || Result == 2 || Result == 4 || Result == 8) 3600 return false; 3601 3602 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_scale) 3603 << Arg->getSourceRange(); 3604 } 3605 3606 enum { TileRegLow = 0, TileRegHigh = 7 }; 3607 3608 bool Sema::CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, 3609 ArrayRef<int> ArgNums) { 3610 for (int ArgNum : ArgNums) { 3611 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, TileRegLow, TileRegHigh)) 3612 return true; 3613 } 3614 return false; 3615 } 3616 3617 bool Sema::CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, int ArgNum) { 3618 return SemaBuiltinConstantArgRange(TheCall, ArgNum, TileRegLow, TileRegHigh); 3619 } 3620 3621 bool Sema::CheckX86BuiltinTileDuplicate(CallExpr *TheCall, 3622 ArrayRef<int> ArgNums) { 3623 // Because the max number of tile register is TileRegHigh + 1, so here we use 3624 // each bit to represent the usage of them in bitset. 3625 std::bitset<TileRegHigh + 1> ArgValues; 3626 for (int ArgNum : ArgNums) { 3627 llvm::APSInt Arg; 3628 SemaBuiltinConstantArg(TheCall, ArgNum, Arg); 3629 int ArgExtValue = Arg.getExtValue(); 3630 assert((ArgExtValue >= TileRegLow || ArgExtValue <= TileRegHigh) && 3631 "Incorrect tile register num."); 3632 if (ArgValues.test(ArgExtValue)) 3633 return Diag(TheCall->getBeginLoc(), 3634 diag::err_x86_builtin_tile_arg_duplicate) 3635 << TheCall->getArg(ArgNum)->getSourceRange(); 3636 ArgValues.set(ArgExtValue); 3637 } 3638 return false; 3639 } 3640 3641 bool Sema::CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall, 3642 ArrayRef<int> ArgNums) { 3643 return CheckX86BuiltinTileArgumentsRange(TheCall, ArgNums) || 3644 CheckX86BuiltinTileDuplicate(TheCall, ArgNums); 3645 } 3646 3647 bool Sema::CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall) { 3648 switch (BuiltinID) { 3649 default: 3650 return false; 3651 case X86::BI__builtin_ia32_tileloadd64: 3652 case X86::BI__builtin_ia32_tileloaddt164: 3653 case X86::BI__builtin_ia32_tilestored64: 3654 case X86::BI__builtin_ia32_tilezero: 3655 return CheckX86BuiltinTileArgumentsRange(TheCall, 0); 3656 case X86::BI__builtin_ia32_tdpbssd: 3657 case X86::BI__builtin_ia32_tdpbsud: 3658 case X86::BI__builtin_ia32_tdpbusd: 3659 case X86::BI__builtin_ia32_tdpbuud: 3660 case X86::BI__builtin_ia32_tdpbf16ps: 3661 return CheckX86BuiltinTileRangeAndDuplicate(TheCall, {0, 1, 2}); 3662 } 3663 } 3664 static bool isX86_32Builtin(unsigned BuiltinID) { 3665 // These builtins only work on x86-32 targets. 3666 switch (BuiltinID) { 3667 case X86::BI__builtin_ia32_readeflags_u32: 3668 case X86::BI__builtin_ia32_writeeflags_u32: 3669 return true; 3670 } 3671 3672 return false; 3673 } 3674 3675 bool Sema::CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 3676 CallExpr *TheCall) { 3677 if (BuiltinID == X86::BI__builtin_cpu_supports) 3678 return SemaBuiltinCpuSupports(*this, TI, TheCall); 3679 3680 if (BuiltinID == X86::BI__builtin_cpu_is) 3681 return SemaBuiltinCpuIs(*this, TI, TheCall); 3682 3683 // Check for 32-bit only builtins on a 64-bit target. 3684 const llvm::Triple &TT = TI.getTriple(); 3685 if (TT.getArch() != llvm::Triple::x86 && isX86_32Builtin(BuiltinID)) 3686 return Diag(TheCall->getCallee()->getBeginLoc(), 3687 diag::err_32_bit_builtin_64_bit_tgt); 3688 3689 // If the intrinsic has rounding or SAE make sure its valid. 3690 if (CheckX86BuiltinRoundingOrSAE(BuiltinID, TheCall)) 3691 return true; 3692 3693 // If the intrinsic has a gather/scatter scale immediate make sure its valid. 3694 if (CheckX86BuiltinGatherScatterScale(BuiltinID, TheCall)) 3695 return true; 3696 3697 // If the intrinsic has a tile arguments, make sure they are valid. 3698 if (CheckX86BuiltinTileArguments(BuiltinID, TheCall)) 3699 return true; 3700 3701 // For intrinsics which take an immediate value as part of the instruction, 3702 // range check them here. 3703 int i = 0, l = 0, u = 0; 3704 switch (BuiltinID) { 3705 default: 3706 return false; 3707 case X86::BI__builtin_ia32_vec_ext_v2si: 3708 case X86::BI__builtin_ia32_vec_ext_v2di: 3709 case X86::BI__builtin_ia32_vextractf128_pd256: 3710 case X86::BI__builtin_ia32_vextractf128_ps256: 3711 case X86::BI__builtin_ia32_vextractf128_si256: 3712 case X86::BI__builtin_ia32_extract128i256: 3713 case X86::BI__builtin_ia32_extractf64x4_mask: 3714 case X86::BI__builtin_ia32_extracti64x4_mask: 3715 case X86::BI__builtin_ia32_extractf32x8_mask: 3716 case X86::BI__builtin_ia32_extracti32x8_mask: 3717 case X86::BI__builtin_ia32_extractf64x2_256_mask: 3718 case X86::BI__builtin_ia32_extracti64x2_256_mask: 3719 case X86::BI__builtin_ia32_extractf32x4_256_mask: 3720 case X86::BI__builtin_ia32_extracti32x4_256_mask: 3721 i = 1; l = 0; u = 1; 3722 break; 3723 case X86::BI__builtin_ia32_vec_set_v2di: 3724 case X86::BI__builtin_ia32_vinsertf128_pd256: 3725 case X86::BI__builtin_ia32_vinsertf128_ps256: 3726 case X86::BI__builtin_ia32_vinsertf128_si256: 3727 case X86::BI__builtin_ia32_insert128i256: 3728 case X86::BI__builtin_ia32_insertf32x8: 3729 case X86::BI__builtin_ia32_inserti32x8: 3730 case X86::BI__builtin_ia32_insertf64x4: 3731 case X86::BI__builtin_ia32_inserti64x4: 3732 case X86::BI__builtin_ia32_insertf64x2_256: 3733 case X86::BI__builtin_ia32_inserti64x2_256: 3734 case X86::BI__builtin_ia32_insertf32x4_256: 3735 case X86::BI__builtin_ia32_inserti32x4_256: 3736 i = 2; l = 0; u = 1; 3737 break; 3738 case X86::BI__builtin_ia32_vpermilpd: 3739 case X86::BI__builtin_ia32_vec_ext_v4hi: 3740 case X86::BI__builtin_ia32_vec_ext_v4si: 3741 case X86::BI__builtin_ia32_vec_ext_v4sf: 3742 case X86::BI__builtin_ia32_vec_ext_v4di: 3743 case X86::BI__builtin_ia32_extractf32x4_mask: 3744 case X86::BI__builtin_ia32_extracti32x4_mask: 3745 case X86::BI__builtin_ia32_extractf64x2_512_mask: 3746 case X86::BI__builtin_ia32_extracti64x2_512_mask: 3747 i = 1; l = 0; u = 3; 3748 break; 3749 case X86::BI_mm_prefetch: 3750 case X86::BI__builtin_ia32_vec_ext_v8hi: 3751 case X86::BI__builtin_ia32_vec_ext_v8si: 3752 i = 1; l = 0; u = 7; 3753 break; 3754 case X86::BI__builtin_ia32_sha1rnds4: 3755 case X86::BI__builtin_ia32_blendpd: 3756 case X86::BI__builtin_ia32_shufpd: 3757 case X86::BI__builtin_ia32_vec_set_v4hi: 3758 case X86::BI__builtin_ia32_vec_set_v4si: 3759 case X86::BI__builtin_ia32_vec_set_v4di: 3760 case X86::BI__builtin_ia32_shuf_f32x4_256: 3761 case X86::BI__builtin_ia32_shuf_f64x2_256: 3762 case X86::BI__builtin_ia32_shuf_i32x4_256: 3763 case X86::BI__builtin_ia32_shuf_i64x2_256: 3764 case X86::BI__builtin_ia32_insertf64x2_512: 3765 case X86::BI__builtin_ia32_inserti64x2_512: 3766 case X86::BI__builtin_ia32_insertf32x4: 3767 case X86::BI__builtin_ia32_inserti32x4: 3768 i = 2; l = 0; u = 3; 3769 break; 3770 case X86::BI__builtin_ia32_vpermil2pd: 3771 case X86::BI__builtin_ia32_vpermil2pd256: 3772 case X86::BI__builtin_ia32_vpermil2ps: 3773 case X86::BI__builtin_ia32_vpermil2ps256: 3774 i = 3; l = 0; u = 3; 3775 break; 3776 case X86::BI__builtin_ia32_cmpb128_mask: 3777 case X86::BI__builtin_ia32_cmpw128_mask: 3778 case X86::BI__builtin_ia32_cmpd128_mask: 3779 case X86::BI__builtin_ia32_cmpq128_mask: 3780 case X86::BI__builtin_ia32_cmpb256_mask: 3781 case X86::BI__builtin_ia32_cmpw256_mask: 3782 case X86::BI__builtin_ia32_cmpd256_mask: 3783 case X86::BI__builtin_ia32_cmpq256_mask: 3784 case X86::BI__builtin_ia32_cmpb512_mask: 3785 case X86::BI__builtin_ia32_cmpw512_mask: 3786 case X86::BI__builtin_ia32_cmpd512_mask: 3787 case X86::BI__builtin_ia32_cmpq512_mask: 3788 case X86::BI__builtin_ia32_ucmpb128_mask: 3789 case X86::BI__builtin_ia32_ucmpw128_mask: 3790 case X86::BI__builtin_ia32_ucmpd128_mask: 3791 case X86::BI__builtin_ia32_ucmpq128_mask: 3792 case X86::BI__builtin_ia32_ucmpb256_mask: 3793 case X86::BI__builtin_ia32_ucmpw256_mask: 3794 case X86::BI__builtin_ia32_ucmpd256_mask: 3795 case X86::BI__builtin_ia32_ucmpq256_mask: 3796 case X86::BI__builtin_ia32_ucmpb512_mask: 3797 case X86::BI__builtin_ia32_ucmpw512_mask: 3798 case X86::BI__builtin_ia32_ucmpd512_mask: 3799 case X86::BI__builtin_ia32_ucmpq512_mask: 3800 case X86::BI__builtin_ia32_vpcomub: 3801 case X86::BI__builtin_ia32_vpcomuw: 3802 case X86::BI__builtin_ia32_vpcomud: 3803 case X86::BI__builtin_ia32_vpcomuq: 3804 case X86::BI__builtin_ia32_vpcomb: 3805 case X86::BI__builtin_ia32_vpcomw: 3806 case X86::BI__builtin_ia32_vpcomd: 3807 case X86::BI__builtin_ia32_vpcomq: 3808 case X86::BI__builtin_ia32_vec_set_v8hi: 3809 case X86::BI__builtin_ia32_vec_set_v8si: 3810 i = 2; l = 0; u = 7; 3811 break; 3812 case X86::BI__builtin_ia32_vpermilpd256: 3813 case X86::BI__builtin_ia32_roundps: 3814 case X86::BI__builtin_ia32_roundpd: 3815 case X86::BI__builtin_ia32_roundps256: 3816 case X86::BI__builtin_ia32_roundpd256: 3817 case X86::BI__builtin_ia32_getmantpd128_mask: 3818 case X86::BI__builtin_ia32_getmantpd256_mask: 3819 case X86::BI__builtin_ia32_getmantps128_mask: 3820 case X86::BI__builtin_ia32_getmantps256_mask: 3821 case X86::BI__builtin_ia32_getmantpd512_mask: 3822 case X86::BI__builtin_ia32_getmantps512_mask: 3823 case X86::BI__builtin_ia32_vec_ext_v16qi: 3824 case X86::BI__builtin_ia32_vec_ext_v16hi: 3825 i = 1; l = 0; u = 15; 3826 break; 3827 case X86::BI__builtin_ia32_pblendd128: 3828 case X86::BI__builtin_ia32_blendps: 3829 case X86::BI__builtin_ia32_blendpd256: 3830 case X86::BI__builtin_ia32_shufpd256: 3831 case X86::BI__builtin_ia32_roundss: 3832 case X86::BI__builtin_ia32_roundsd: 3833 case X86::BI__builtin_ia32_rangepd128_mask: 3834 case X86::BI__builtin_ia32_rangepd256_mask: 3835 case X86::BI__builtin_ia32_rangepd512_mask: 3836 case X86::BI__builtin_ia32_rangeps128_mask: 3837 case X86::BI__builtin_ia32_rangeps256_mask: 3838 case X86::BI__builtin_ia32_rangeps512_mask: 3839 case X86::BI__builtin_ia32_getmantsd_round_mask: 3840 case X86::BI__builtin_ia32_getmantss_round_mask: 3841 case X86::BI__builtin_ia32_vec_set_v16qi: 3842 case X86::BI__builtin_ia32_vec_set_v16hi: 3843 i = 2; l = 0; u = 15; 3844 break; 3845 case X86::BI__builtin_ia32_vec_ext_v32qi: 3846 i = 1; l = 0; u = 31; 3847 break; 3848 case X86::BI__builtin_ia32_cmpps: 3849 case X86::BI__builtin_ia32_cmpss: 3850 case X86::BI__builtin_ia32_cmppd: 3851 case X86::BI__builtin_ia32_cmpsd: 3852 case X86::BI__builtin_ia32_cmpps256: 3853 case X86::BI__builtin_ia32_cmppd256: 3854 case X86::BI__builtin_ia32_cmpps128_mask: 3855 case X86::BI__builtin_ia32_cmppd128_mask: 3856 case X86::BI__builtin_ia32_cmpps256_mask: 3857 case X86::BI__builtin_ia32_cmppd256_mask: 3858 case X86::BI__builtin_ia32_cmpps512_mask: 3859 case X86::BI__builtin_ia32_cmppd512_mask: 3860 case X86::BI__builtin_ia32_cmpsd_mask: 3861 case X86::BI__builtin_ia32_cmpss_mask: 3862 case X86::BI__builtin_ia32_vec_set_v32qi: 3863 i = 2; l = 0; u = 31; 3864 break; 3865 case X86::BI__builtin_ia32_permdf256: 3866 case X86::BI__builtin_ia32_permdi256: 3867 case X86::BI__builtin_ia32_permdf512: 3868 case X86::BI__builtin_ia32_permdi512: 3869 case X86::BI__builtin_ia32_vpermilps: 3870 case X86::BI__builtin_ia32_vpermilps256: 3871 case X86::BI__builtin_ia32_vpermilpd512: 3872 case X86::BI__builtin_ia32_vpermilps512: 3873 case X86::BI__builtin_ia32_pshufd: 3874 case X86::BI__builtin_ia32_pshufd256: 3875 case X86::BI__builtin_ia32_pshufd512: 3876 case X86::BI__builtin_ia32_pshufhw: 3877 case X86::BI__builtin_ia32_pshufhw256: 3878 case X86::BI__builtin_ia32_pshufhw512: 3879 case X86::BI__builtin_ia32_pshuflw: 3880 case X86::BI__builtin_ia32_pshuflw256: 3881 case X86::BI__builtin_ia32_pshuflw512: 3882 case X86::BI__builtin_ia32_vcvtps2ph: 3883 case X86::BI__builtin_ia32_vcvtps2ph_mask: 3884 case X86::BI__builtin_ia32_vcvtps2ph256: 3885 case X86::BI__builtin_ia32_vcvtps2ph256_mask: 3886 case X86::BI__builtin_ia32_vcvtps2ph512_mask: 3887 case X86::BI__builtin_ia32_rndscaleps_128_mask: 3888 case X86::BI__builtin_ia32_rndscalepd_128_mask: 3889 case X86::BI__builtin_ia32_rndscaleps_256_mask: 3890 case X86::BI__builtin_ia32_rndscalepd_256_mask: 3891 case X86::BI__builtin_ia32_rndscaleps_mask: 3892 case X86::BI__builtin_ia32_rndscalepd_mask: 3893 case X86::BI__builtin_ia32_reducepd128_mask: 3894 case X86::BI__builtin_ia32_reducepd256_mask: 3895 case X86::BI__builtin_ia32_reducepd512_mask: 3896 case X86::BI__builtin_ia32_reduceps128_mask: 3897 case X86::BI__builtin_ia32_reduceps256_mask: 3898 case X86::BI__builtin_ia32_reduceps512_mask: 3899 case X86::BI__builtin_ia32_prold512: 3900 case X86::BI__builtin_ia32_prolq512: 3901 case X86::BI__builtin_ia32_prold128: 3902 case X86::BI__builtin_ia32_prold256: 3903 case X86::BI__builtin_ia32_prolq128: 3904 case X86::BI__builtin_ia32_prolq256: 3905 case X86::BI__builtin_ia32_prord512: 3906 case X86::BI__builtin_ia32_prorq512: 3907 case X86::BI__builtin_ia32_prord128: 3908 case X86::BI__builtin_ia32_prord256: 3909 case X86::BI__builtin_ia32_prorq128: 3910 case X86::BI__builtin_ia32_prorq256: 3911 case X86::BI__builtin_ia32_fpclasspd128_mask: 3912 case X86::BI__builtin_ia32_fpclasspd256_mask: 3913 case X86::BI__builtin_ia32_fpclassps128_mask: 3914 case X86::BI__builtin_ia32_fpclassps256_mask: 3915 case X86::BI__builtin_ia32_fpclassps512_mask: 3916 case X86::BI__builtin_ia32_fpclasspd512_mask: 3917 case X86::BI__builtin_ia32_fpclasssd_mask: 3918 case X86::BI__builtin_ia32_fpclassss_mask: 3919 case X86::BI__builtin_ia32_pslldqi128_byteshift: 3920 case X86::BI__builtin_ia32_pslldqi256_byteshift: 3921 case X86::BI__builtin_ia32_pslldqi512_byteshift: 3922 case X86::BI__builtin_ia32_psrldqi128_byteshift: 3923 case X86::BI__builtin_ia32_psrldqi256_byteshift: 3924 case X86::BI__builtin_ia32_psrldqi512_byteshift: 3925 case X86::BI__builtin_ia32_kshiftliqi: 3926 case X86::BI__builtin_ia32_kshiftlihi: 3927 case X86::BI__builtin_ia32_kshiftlisi: 3928 case X86::BI__builtin_ia32_kshiftlidi: 3929 case X86::BI__builtin_ia32_kshiftriqi: 3930 case X86::BI__builtin_ia32_kshiftrihi: 3931 case X86::BI__builtin_ia32_kshiftrisi: 3932 case X86::BI__builtin_ia32_kshiftridi: 3933 i = 1; l = 0; u = 255; 3934 break; 3935 case X86::BI__builtin_ia32_vperm2f128_pd256: 3936 case X86::BI__builtin_ia32_vperm2f128_ps256: 3937 case X86::BI__builtin_ia32_vperm2f128_si256: 3938 case X86::BI__builtin_ia32_permti256: 3939 case X86::BI__builtin_ia32_pblendw128: 3940 case X86::BI__builtin_ia32_pblendw256: 3941 case X86::BI__builtin_ia32_blendps256: 3942 case X86::BI__builtin_ia32_pblendd256: 3943 case X86::BI__builtin_ia32_palignr128: 3944 case X86::BI__builtin_ia32_palignr256: 3945 case X86::BI__builtin_ia32_palignr512: 3946 case X86::BI__builtin_ia32_alignq512: 3947 case X86::BI__builtin_ia32_alignd512: 3948 case X86::BI__builtin_ia32_alignd128: 3949 case X86::BI__builtin_ia32_alignd256: 3950 case X86::BI__builtin_ia32_alignq128: 3951 case X86::BI__builtin_ia32_alignq256: 3952 case X86::BI__builtin_ia32_vcomisd: 3953 case X86::BI__builtin_ia32_vcomiss: 3954 case X86::BI__builtin_ia32_shuf_f32x4: 3955 case X86::BI__builtin_ia32_shuf_f64x2: 3956 case X86::BI__builtin_ia32_shuf_i32x4: 3957 case X86::BI__builtin_ia32_shuf_i64x2: 3958 case X86::BI__builtin_ia32_shufpd512: 3959 case X86::BI__builtin_ia32_shufps: 3960 case X86::BI__builtin_ia32_shufps256: 3961 case X86::BI__builtin_ia32_shufps512: 3962 case X86::BI__builtin_ia32_dbpsadbw128: 3963 case X86::BI__builtin_ia32_dbpsadbw256: 3964 case X86::BI__builtin_ia32_dbpsadbw512: 3965 case X86::BI__builtin_ia32_vpshldd128: 3966 case X86::BI__builtin_ia32_vpshldd256: 3967 case X86::BI__builtin_ia32_vpshldd512: 3968 case X86::BI__builtin_ia32_vpshldq128: 3969 case X86::BI__builtin_ia32_vpshldq256: 3970 case X86::BI__builtin_ia32_vpshldq512: 3971 case X86::BI__builtin_ia32_vpshldw128: 3972 case X86::BI__builtin_ia32_vpshldw256: 3973 case X86::BI__builtin_ia32_vpshldw512: 3974 case X86::BI__builtin_ia32_vpshrdd128: 3975 case X86::BI__builtin_ia32_vpshrdd256: 3976 case X86::BI__builtin_ia32_vpshrdd512: 3977 case X86::BI__builtin_ia32_vpshrdq128: 3978 case X86::BI__builtin_ia32_vpshrdq256: 3979 case X86::BI__builtin_ia32_vpshrdq512: 3980 case X86::BI__builtin_ia32_vpshrdw128: 3981 case X86::BI__builtin_ia32_vpshrdw256: 3982 case X86::BI__builtin_ia32_vpshrdw512: 3983 i = 2; l = 0; u = 255; 3984 break; 3985 case X86::BI__builtin_ia32_fixupimmpd512_mask: 3986 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 3987 case X86::BI__builtin_ia32_fixupimmps512_mask: 3988 case X86::BI__builtin_ia32_fixupimmps512_maskz: 3989 case X86::BI__builtin_ia32_fixupimmsd_mask: 3990 case X86::BI__builtin_ia32_fixupimmsd_maskz: 3991 case X86::BI__builtin_ia32_fixupimmss_mask: 3992 case X86::BI__builtin_ia32_fixupimmss_maskz: 3993 case X86::BI__builtin_ia32_fixupimmpd128_mask: 3994 case X86::BI__builtin_ia32_fixupimmpd128_maskz: 3995 case X86::BI__builtin_ia32_fixupimmpd256_mask: 3996 case X86::BI__builtin_ia32_fixupimmpd256_maskz: 3997 case X86::BI__builtin_ia32_fixupimmps128_mask: 3998 case X86::BI__builtin_ia32_fixupimmps128_maskz: 3999 case X86::BI__builtin_ia32_fixupimmps256_mask: 4000 case X86::BI__builtin_ia32_fixupimmps256_maskz: 4001 case X86::BI__builtin_ia32_pternlogd512_mask: 4002 case X86::BI__builtin_ia32_pternlogd512_maskz: 4003 case X86::BI__builtin_ia32_pternlogq512_mask: 4004 case X86::BI__builtin_ia32_pternlogq512_maskz: 4005 case X86::BI__builtin_ia32_pternlogd128_mask: 4006 case X86::BI__builtin_ia32_pternlogd128_maskz: 4007 case X86::BI__builtin_ia32_pternlogd256_mask: 4008 case X86::BI__builtin_ia32_pternlogd256_maskz: 4009 case X86::BI__builtin_ia32_pternlogq128_mask: 4010 case X86::BI__builtin_ia32_pternlogq128_maskz: 4011 case X86::BI__builtin_ia32_pternlogq256_mask: 4012 case X86::BI__builtin_ia32_pternlogq256_maskz: 4013 i = 3; l = 0; u = 255; 4014 break; 4015 case X86::BI__builtin_ia32_gatherpfdpd: 4016 case X86::BI__builtin_ia32_gatherpfdps: 4017 case X86::BI__builtin_ia32_gatherpfqpd: 4018 case X86::BI__builtin_ia32_gatherpfqps: 4019 case X86::BI__builtin_ia32_scatterpfdpd: 4020 case X86::BI__builtin_ia32_scatterpfdps: 4021 case X86::BI__builtin_ia32_scatterpfqpd: 4022 case X86::BI__builtin_ia32_scatterpfqps: 4023 i = 4; l = 2; u = 3; 4024 break; 4025 case X86::BI__builtin_ia32_reducesd_mask: 4026 case X86::BI__builtin_ia32_reducess_mask: 4027 case X86::BI__builtin_ia32_rndscalesd_round_mask: 4028 case X86::BI__builtin_ia32_rndscaless_round_mask: 4029 i = 4; l = 0; u = 255; 4030 break; 4031 } 4032 4033 // Note that we don't force a hard error on the range check here, allowing 4034 // template-generated or macro-generated dead code to potentially have out-of- 4035 // range values. These need to code generate, but don't need to necessarily 4036 // make any sense. We use a warning that defaults to an error. 4037 return SemaBuiltinConstantArgRange(TheCall, i, l, u, /*RangeIsError*/ false); 4038 } 4039 4040 /// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo 4041 /// parameter with the FormatAttr's correct format_idx and firstDataArg. 4042 /// Returns true when the format fits the function and the FormatStringInfo has 4043 /// been populated. 4044 bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, 4045 FormatStringInfo *FSI) { 4046 FSI->HasVAListArg = Format->getFirstArg() == 0; 4047 FSI->FormatIdx = Format->getFormatIdx() - 1; 4048 FSI->FirstDataArg = FSI->HasVAListArg ? 0 : Format->getFirstArg() - 1; 4049 4050 // The way the format attribute works in GCC, the implicit this argument 4051 // of member functions is counted. However, it doesn't appear in our own 4052 // lists, so decrement format_idx in that case. 4053 if (IsCXXMember) { 4054 if(FSI->FormatIdx == 0) 4055 return false; 4056 --FSI->FormatIdx; 4057 if (FSI->FirstDataArg != 0) 4058 --FSI->FirstDataArg; 4059 } 4060 return true; 4061 } 4062 4063 /// Checks if a the given expression evaluates to null. 4064 /// 4065 /// Returns true if the value evaluates to null. 4066 static bool CheckNonNullExpr(Sema &S, const Expr *Expr) { 4067 // If the expression has non-null type, it doesn't evaluate to null. 4068 if (auto nullability 4069 = Expr->IgnoreImplicit()->getType()->getNullability(S.Context)) { 4070 if (*nullability == NullabilityKind::NonNull) 4071 return false; 4072 } 4073 4074 // As a special case, transparent unions initialized with zero are 4075 // considered null for the purposes of the nonnull attribute. 4076 if (const RecordType *UT = Expr->getType()->getAsUnionType()) { 4077 if (UT->getDecl()->hasAttr<TransparentUnionAttr>()) 4078 if (const CompoundLiteralExpr *CLE = 4079 dyn_cast<CompoundLiteralExpr>(Expr)) 4080 if (const InitListExpr *ILE = 4081 dyn_cast<InitListExpr>(CLE->getInitializer())) 4082 Expr = ILE->getInit(0); 4083 } 4084 4085 bool Result; 4086 return (!Expr->isValueDependent() && 4087 Expr->EvaluateAsBooleanCondition(Result, S.Context) && 4088 !Result); 4089 } 4090 4091 static void CheckNonNullArgument(Sema &S, 4092 const Expr *ArgExpr, 4093 SourceLocation CallSiteLoc) { 4094 if (CheckNonNullExpr(S, ArgExpr)) 4095 S.DiagRuntimeBehavior(CallSiteLoc, ArgExpr, 4096 S.PDiag(diag::warn_null_arg) 4097 << ArgExpr->getSourceRange()); 4098 } 4099 4100 bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) { 4101 FormatStringInfo FSI; 4102 if ((GetFormatStringType(Format) == FST_NSString) && 4103 getFormatStringInfo(Format, false, &FSI)) { 4104 Idx = FSI.FormatIdx; 4105 return true; 4106 } 4107 return false; 4108 } 4109 4110 /// Diagnose use of %s directive in an NSString which is being passed 4111 /// as formatting string to formatting method. 4112 static void 4113 DiagnoseCStringFormatDirectiveInCFAPI(Sema &S, 4114 const NamedDecl *FDecl, 4115 Expr **Args, 4116 unsigned NumArgs) { 4117 unsigned Idx = 0; 4118 bool Format = false; 4119 ObjCStringFormatFamily SFFamily = FDecl->getObjCFStringFormattingFamily(); 4120 if (SFFamily == ObjCStringFormatFamily::SFF_CFString) { 4121 Idx = 2; 4122 Format = true; 4123 } 4124 else 4125 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 4126 if (S.GetFormatNSStringIdx(I, Idx)) { 4127 Format = true; 4128 break; 4129 } 4130 } 4131 if (!Format || NumArgs <= Idx) 4132 return; 4133 const Expr *FormatExpr = Args[Idx]; 4134 if (const CStyleCastExpr *CSCE = dyn_cast<CStyleCastExpr>(FormatExpr)) 4135 FormatExpr = CSCE->getSubExpr(); 4136 const StringLiteral *FormatString; 4137 if (const ObjCStringLiteral *OSL = 4138 dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts())) 4139 FormatString = OSL->getString(); 4140 else 4141 FormatString = dyn_cast<StringLiteral>(FormatExpr->IgnoreParenImpCasts()); 4142 if (!FormatString) 4143 return; 4144 if (S.FormatStringHasSArg(FormatString)) { 4145 S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string) 4146 << "%s" << 1 << 1; 4147 S.Diag(FDecl->getLocation(), diag::note_entity_declared_at) 4148 << FDecl->getDeclName(); 4149 } 4150 } 4151 4152 /// Determine whether the given type has a non-null nullability annotation. 4153 static bool isNonNullType(ASTContext &ctx, QualType type) { 4154 if (auto nullability = type->getNullability(ctx)) 4155 return *nullability == NullabilityKind::NonNull; 4156 4157 return false; 4158 } 4159 4160 static void CheckNonNullArguments(Sema &S, 4161 const NamedDecl *FDecl, 4162 const FunctionProtoType *Proto, 4163 ArrayRef<const Expr *> Args, 4164 SourceLocation CallSiteLoc) { 4165 assert((FDecl || Proto) && "Need a function declaration or prototype"); 4166 4167 // Already checked by by constant evaluator. 4168 if (S.isConstantEvaluated()) 4169 return; 4170 // Check the attributes attached to the method/function itself. 4171 llvm::SmallBitVector NonNullArgs; 4172 if (FDecl) { 4173 // Handle the nonnull attribute on the function/method declaration itself. 4174 for (const auto *NonNull : FDecl->specific_attrs<NonNullAttr>()) { 4175 if (!NonNull->args_size()) { 4176 // Easy case: all pointer arguments are nonnull. 4177 for (const auto *Arg : Args) 4178 if (S.isValidPointerAttrType(Arg->getType())) 4179 CheckNonNullArgument(S, Arg, CallSiteLoc); 4180 return; 4181 } 4182 4183 for (const ParamIdx &Idx : NonNull->args()) { 4184 unsigned IdxAST = Idx.getASTIndex(); 4185 if (IdxAST >= Args.size()) 4186 continue; 4187 if (NonNullArgs.empty()) 4188 NonNullArgs.resize(Args.size()); 4189 NonNullArgs.set(IdxAST); 4190 } 4191 } 4192 } 4193 4194 if (FDecl && (isa<FunctionDecl>(FDecl) || isa<ObjCMethodDecl>(FDecl))) { 4195 // Handle the nonnull attribute on the parameters of the 4196 // function/method. 4197 ArrayRef<ParmVarDecl*> parms; 4198 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FDecl)) 4199 parms = FD->parameters(); 4200 else 4201 parms = cast<ObjCMethodDecl>(FDecl)->parameters(); 4202 4203 unsigned ParamIndex = 0; 4204 for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end(); 4205 I != E; ++I, ++ParamIndex) { 4206 const ParmVarDecl *PVD = *I; 4207 if (PVD->hasAttr<NonNullAttr>() || 4208 isNonNullType(S.Context, PVD->getType())) { 4209 if (NonNullArgs.empty()) 4210 NonNullArgs.resize(Args.size()); 4211 4212 NonNullArgs.set(ParamIndex); 4213 } 4214 } 4215 } else { 4216 // If we have a non-function, non-method declaration but no 4217 // function prototype, try to dig out the function prototype. 4218 if (!Proto) { 4219 if (const ValueDecl *VD = dyn_cast<ValueDecl>(FDecl)) { 4220 QualType type = VD->getType().getNonReferenceType(); 4221 if (auto pointerType = type->getAs<PointerType>()) 4222 type = pointerType->getPointeeType(); 4223 else if (auto blockType = type->getAs<BlockPointerType>()) 4224 type = blockType->getPointeeType(); 4225 // FIXME: data member pointers? 4226 4227 // Dig out the function prototype, if there is one. 4228 Proto = type->getAs<FunctionProtoType>(); 4229 } 4230 } 4231 4232 // Fill in non-null argument information from the nullability 4233 // information on the parameter types (if we have them). 4234 if (Proto) { 4235 unsigned Index = 0; 4236 for (auto paramType : Proto->getParamTypes()) { 4237 if (isNonNullType(S.Context, paramType)) { 4238 if (NonNullArgs.empty()) 4239 NonNullArgs.resize(Args.size()); 4240 4241 NonNullArgs.set(Index); 4242 } 4243 4244 ++Index; 4245 } 4246 } 4247 } 4248 4249 // Check for non-null arguments. 4250 for (unsigned ArgIndex = 0, ArgIndexEnd = NonNullArgs.size(); 4251 ArgIndex != ArgIndexEnd; ++ArgIndex) { 4252 if (NonNullArgs[ArgIndex]) 4253 CheckNonNullArgument(S, Args[ArgIndex], CallSiteLoc); 4254 } 4255 } 4256 4257 /// Handles the checks for format strings, non-POD arguments to vararg 4258 /// functions, NULL arguments passed to non-NULL parameters, and diagnose_if 4259 /// attributes. 4260 void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, 4261 const Expr *ThisArg, ArrayRef<const Expr *> Args, 4262 bool IsMemberFunction, SourceLocation Loc, 4263 SourceRange Range, VariadicCallType CallType) { 4264 // FIXME: We should check as much as we can in the template definition. 4265 if (CurContext->isDependentContext()) 4266 return; 4267 4268 // Printf and scanf checking. 4269 llvm::SmallBitVector CheckedVarArgs; 4270 if (FDecl) { 4271 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 4272 // Only create vector if there are format attributes. 4273 CheckedVarArgs.resize(Args.size()); 4274 4275 CheckFormatArguments(I, Args, IsMemberFunction, CallType, Loc, Range, 4276 CheckedVarArgs); 4277 } 4278 } 4279 4280 // Refuse POD arguments that weren't caught by the format string 4281 // checks above. 4282 auto *FD = dyn_cast_or_null<FunctionDecl>(FDecl); 4283 if (CallType != VariadicDoesNotApply && 4284 (!FD || FD->getBuiltinID() != Builtin::BI__noop)) { 4285 unsigned NumParams = Proto ? Proto->getNumParams() 4286 : FDecl && isa<FunctionDecl>(FDecl) 4287 ? cast<FunctionDecl>(FDecl)->getNumParams() 4288 : FDecl && isa<ObjCMethodDecl>(FDecl) 4289 ? cast<ObjCMethodDecl>(FDecl)->param_size() 4290 : 0; 4291 4292 for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) { 4293 // Args[ArgIdx] can be null in malformed code. 4294 if (const Expr *Arg = Args[ArgIdx]) { 4295 if (CheckedVarArgs.empty() || !CheckedVarArgs[ArgIdx]) 4296 checkVariadicArgument(Arg, CallType); 4297 } 4298 } 4299 } 4300 4301 if (FDecl || Proto) { 4302 CheckNonNullArguments(*this, FDecl, Proto, Args, Loc); 4303 4304 // Type safety checking. 4305 if (FDecl) { 4306 for (const auto *I : FDecl->specific_attrs<ArgumentWithTypeTagAttr>()) 4307 CheckArgumentWithTypeTag(I, Args, Loc); 4308 } 4309 } 4310 4311 if (FDecl && FDecl->hasAttr<AllocAlignAttr>()) { 4312 auto *AA = FDecl->getAttr<AllocAlignAttr>(); 4313 const Expr *Arg = Args[AA->getParamIndex().getASTIndex()]; 4314 if (!Arg->isValueDependent()) { 4315 Expr::EvalResult Align; 4316 if (Arg->EvaluateAsInt(Align, Context)) { 4317 const llvm::APSInt &I = Align.Val.getInt(); 4318 if (!I.isPowerOf2()) 4319 Diag(Arg->getExprLoc(), diag::warn_alignment_not_power_of_two) 4320 << Arg->getSourceRange(); 4321 4322 if (I > Sema::MaximumAlignment) 4323 Diag(Arg->getExprLoc(), diag::warn_assume_aligned_too_great) 4324 << Arg->getSourceRange() << Sema::MaximumAlignment; 4325 } 4326 } 4327 } 4328 4329 if (FD) 4330 diagnoseArgDependentDiagnoseIfAttrs(FD, ThisArg, Args, Loc); 4331 } 4332 4333 /// CheckConstructorCall - Check a constructor call for correctness and safety 4334 /// properties not enforced by the C type system. 4335 void Sema::CheckConstructorCall(FunctionDecl *FDecl, 4336 ArrayRef<const Expr *> Args, 4337 const FunctionProtoType *Proto, 4338 SourceLocation Loc) { 4339 VariadicCallType CallType = 4340 Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply; 4341 checkCall(FDecl, Proto, /*ThisArg=*/nullptr, Args, /*IsMemberFunction=*/true, 4342 Loc, SourceRange(), CallType); 4343 } 4344 4345 /// CheckFunctionCall - Check a direct function call for various correctness 4346 /// and safety properties not strictly enforced by the C type system. 4347 bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, 4348 const FunctionProtoType *Proto) { 4349 bool IsMemberOperatorCall = isa<CXXOperatorCallExpr>(TheCall) && 4350 isa<CXXMethodDecl>(FDecl); 4351 bool IsMemberFunction = isa<CXXMemberCallExpr>(TheCall) || 4352 IsMemberOperatorCall; 4353 VariadicCallType CallType = getVariadicCallType(FDecl, Proto, 4354 TheCall->getCallee()); 4355 Expr** Args = TheCall->getArgs(); 4356 unsigned NumArgs = TheCall->getNumArgs(); 4357 4358 Expr *ImplicitThis = nullptr; 4359 if (IsMemberOperatorCall) { 4360 // If this is a call to a member operator, hide the first argument 4361 // from checkCall. 4362 // FIXME: Our choice of AST representation here is less than ideal. 4363 ImplicitThis = Args[0]; 4364 ++Args; 4365 --NumArgs; 4366 } else if (IsMemberFunction) 4367 ImplicitThis = 4368 cast<CXXMemberCallExpr>(TheCall)->getImplicitObjectArgument(); 4369 4370 checkCall(FDecl, Proto, ImplicitThis, llvm::makeArrayRef(Args, NumArgs), 4371 IsMemberFunction, TheCall->getRParenLoc(), 4372 TheCall->getCallee()->getSourceRange(), CallType); 4373 4374 IdentifierInfo *FnInfo = FDecl->getIdentifier(); 4375 // None of the checks below are needed for functions that don't have 4376 // simple names (e.g., C++ conversion functions). 4377 if (!FnInfo) 4378 return false; 4379 4380 CheckAbsoluteValueFunction(TheCall, FDecl); 4381 CheckMaxUnsignedZero(TheCall, FDecl); 4382 4383 if (getLangOpts().ObjC) 4384 DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs); 4385 4386 unsigned CMId = FDecl->getMemoryFunctionKind(); 4387 if (CMId == 0) 4388 return false; 4389 4390 // Handle memory setting and copying functions. 4391 if (CMId == Builtin::BIstrlcpy || CMId == Builtin::BIstrlcat) 4392 CheckStrlcpycatArguments(TheCall, FnInfo); 4393 else if (CMId == Builtin::BIstrncat) 4394 CheckStrncatArguments(TheCall, FnInfo); 4395 else 4396 CheckMemaccessArguments(TheCall, CMId, FnInfo); 4397 4398 return false; 4399 } 4400 4401 bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac, 4402 ArrayRef<const Expr *> Args) { 4403 VariadicCallType CallType = 4404 Method->isVariadic() ? VariadicMethod : VariadicDoesNotApply; 4405 4406 checkCall(Method, nullptr, /*ThisArg=*/nullptr, Args, 4407 /*IsMemberFunction=*/false, lbrac, Method->getSourceRange(), 4408 CallType); 4409 4410 return false; 4411 } 4412 4413 bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, 4414 const FunctionProtoType *Proto) { 4415 QualType Ty; 4416 if (const auto *V = dyn_cast<VarDecl>(NDecl)) 4417 Ty = V->getType().getNonReferenceType(); 4418 else if (const auto *F = dyn_cast<FieldDecl>(NDecl)) 4419 Ty = F->getType().getNonReferenceType(); 4420 else 4421 return false; 4422 4423 if (!Ty->isBlockPointerType() && !Ty->isFunctionPointerType() && 4424 !Ty->isFunctionProtoType()) 4425 return false; 4426 4427 VariadicCallType CallType; 4428 if (!Proto || !Proto->isVariadic()) { 4429 CallType = VariadicDoesNotApply; 4430 } else if (Ty->isBlockPointerType()) { 4431 CallType = VariadicBlock; 4432 } else { // Ty->isFunctionPointerType() 4433 CallType = VariadicFunction; 4434 } 4435 4436 checkCall(NDecl, Proto, /*ThisArg=*/nullptr, 4437 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 4438 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 4439 TheCall->getCallee()->getSourceRange(), CallType); 4440 4441 return false; 4442 } 4443 4444 /// Checks function calls when a FunctionDecl or a NamedDecl is not available, 4445 /// such as function pointers returned from functions. 4446 bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) { 4447 VariadicCallType CallType = getVariadicCallType(/*FDecl=*/nullptr, Proto, 4448 TheCall->getCallee()); 4449 checkCall(/*FDecl=*/nullptr, Proto, /*ThisArg=*/nullptr, 4450 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 4451 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 4452 TheCall->getCallee()->getSourceRange(), CallType); 4453 4454 return false; 4455 } 4456 4457 static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) { 4458 if (!llvm::isValidAtomicOrderingCABI(Ordering)) 4459 return false; 4460 4461 auto OrderingCABI = (llvm::AtomicOrderingCABI)Ordering; 4462 switch (Op) { 4463 case AtomicExpr::AO__c11_atomic_init: 4464 case AtomicExpr::AO__opencl_atomic_init: 4465 llvm_unreachable("There is no ordering argument for an init"); 4466 4467 case AtomicExpr::AO__c11_atomic_load: 4468 case AtomicExpr::AO__opencl_atomic_load: 4469 case AtomicExpr::AO__atomic_load_n: 4470 case AtomicExpr::AO__atomic_load: 4471 return OrderingCABI != llvm::AtomicOrderingCABI::release && 4472 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 4473 4474 case AtomicExpr::AO__c11_atomic_store: 4475 case AtomicExpr::AO__opencl_atomic_store: 4476 case AtomicExpr::AO__atomic_store: 4477 case AtomicExpr::AO__atomic_store_n: 4478 return OrderingCABI != llvm::AtomicOrderingCABI::consume && 4479 OrderingCABI != llvm::AtomicOrderingCABI::acquire && 4480 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 4481 4482 default: 4483 return true; 4484 } 4485 } 4486 4487 ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult, 4488 AtomicExpr::AtomicOp Op) { 4489 CallExpr *TheCall = cast<CallExpr>(TheCallResult.get()); 4490 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 4491 MultiExprArg Args{TheCall->getArgs(), TheCall->getNumArgs()}; 4492 return BuildAtomicExpr({TheCall->getBeginLoc(), TheCall->getEndLoc()}, 4493 DRE->getSourceRange(), TheCall->getRParenLoc(), Args, 4494 Op); 4495 } 4496 4497 ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, 4498 SourceLocation RParenLoc, MultiExprArg Args, 4499 AtomicExpr::AtomicOp Op, 4500 AtomicArgumentOrder ArgOrder) { 4501 // All the non-OpenCL operations take one of the following forms. 4502 // The OpenCL operations take the __c11 forms with one extra argument for 4503 // synchronization scope. 4504 enum { 4505 // C __c11_atomic_init(A *, C) 4506 Init, 4507 4508 // C __c11_atomic_load(A *, int) 4509 Load, 4510 4511 // void __atomic_load(A *, CP, int) 4512 LoadCopy, 4513 4514 // void __atomic_store(A *, CP, int) 4515 Copy, 4516 4517 // C __c11_atomic_add(A *, M, int) 4518 Arithmetic, 4519 4520 // C __atomic_exchange_n(A *, CP, int) 4521 Xchg, 4522 4523 // void __atomic_exchange(A *, C *, CP, int) 4524 GNUXchg, 4525 4526 // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int) 4527 C11CmpXchg, 4528 4529 // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int) 4530 GNUCmpXchg 4531 } Form = Init; 4532 4533 const unsigned NumForm = GNUCmpXchg + 1; 4534 const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6 }; 4535 const unsigned NumVals[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3 }; 4536 // where: 4537 // C is an appropriate type, 4538 // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins, 4539 // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise, 4540 // M is C if C is an integer, and ptrdiff_t if C is a pointer, and 4541 // the int parameters are for orderings. 4542 4543 static_assert(sizeof(NumArgs)/sizeof(NumArgs[0]) == NumForm 4544 && sizeof(NumVals)/sizeof(NumVals[0]) == NumForm, 4545 "need to update code for modified forms"); 4546 static_assert(AtomicExpr::AO__c11_atomic_init == 0 && 4547 AtomicExpr::AO__c11_atomic_fetch_min + 1 == 4548 AtomicExpr::AO__atomic_load, 4549 "need to update code for modified C11 atomics"); 4550 bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_init && 4551 Op <= AtomicExpr::AO__opencl_atomic_fetch_max; 4552 bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_init && 4553 Op <= AtomicExpr::AO__c11_atomic_fetch_min) || 4554 IsOpenCL; 4555 bool IsN = Op == AtomicExpr::AO__atomic_load_n || 4556 Op == AtomicExpr::AO__atomic_store_n || 4557 Op == AtomicExpr::AO__atomic_exchange_n || 4558 Op == AtomicExpr::AO__atomic_compare_exchange_n; 4559 bool IsAddSub = false; 4560 4561 switch (Op) { 4562 case AtomicExpr::AO__c11_atomic_init: 4563 case AtomicExpr::AO__opencl_atomic_init: 4564 Form = Init; 4565 break; 4566 4567 case AtomicExpr::AO__c11_atomic_load: 4568 case AtomicExpr::AO__opencl_atomic_load: 4569 case AtomicExpr::AO__atomic_load_n: 4570 Form = Load; 4571 break; 4572 4573 case AtomicExpr::AO__atomic_load: 4574 Form = LoadCopy; 4575 break; 4576 4577 case AtomicExpr::AO__c11_atomic_store: 4578 case AtomicExpr::AO__opencl_atomic_store: 4579 case AtomicExpr::AO__atomic_store: 4580 case AtomicExpr::AO__atomic_store_n: 4581 Form = Copy; 4582 break; 4583 4584 case AtomicExpr::AO__c11_atomic_fetch_add: 4585 case AtomicExpr::AO__c11_atomic_fetch_sub: 4586 case AtomicExpr::AO__opencl_atomic_fetch_add: 4587 case AtomicExpr::AO__opencl_atomic_fetch_sub: 4588 case AtomicExpr::AO__atomic_fetch_add: 4589 case AtomicExpr::AO__atomic_fetch_sub: 4590 case AtomicExpr::AO__atomic_add_fetch: 4591 case AtomicExpr::AO__atomic_sub_fetch: 4592 IsAddSub = true; 4593 LLVM_FALLTHROUGH; 4594 case AtomicExpr::AO__c11_atomic_fetch_and: 4595 case AtomicExpr::AO__c11_atomic_fetch_or: 4596 case AtomicExpr::AO__c11_atomic_fetch_xor: 4597 case AtomicExpr::AO__opencl_atomic_fetch_and: 4598 case AtomicExpr::AO__opencl_atomic_fetch_or: 4599 case AtomicExpr::AO__opencl_atomic_fetch_xor: 4600 case AtomicExpr::AO__atomic_fetch_and: 4601 case AtomicExpr::AO__atomic_fetch_or: 4602 case AtomicExpr::AO__atomic_fetch_xor: 4603 case AtomicExpr::AO__atomic_fetch_nand: 4604 case AtomicExpr::AO__atomic_and_fetch: 4605 case AtomicExpr::AO__atomic_or_fetch: 4606 case AtomicExpr::AO__atomic_xor_fetch: 4607 case AtomicExpr::AO__atomic_nand_fetch: 4608 case AtomicExpr::AO__c11_atomic_fetch_min: 4609 case AtomicExpr::AO__c11_atomic_fetch_max: 4610 case AtomicExpr::AO__opencl_atomic_fetch_min: 4611 case AtomicExpr::AO__opencl_atomic_fetch_max: 4612 case AtomicExpr::AO__atomic_min_fetch: 4613 case AtomicExpr::AO__atomic_max_fetch: 4614 case AtomicExpr::AO__atomic_fetch_min: 4615 case AtomicExpr::AO__atomic_fetch_max: 4616 Form = Arithmetic; 4617 break; 4618 4619 case AtomicExpr::AO__c11_atomic_exchange: 4620 case AtomicExpr::AO__opencl_atomic_exchange: 4621 case AtomicExpr::AO__atomic_exchange_n: 4622 Form = Xchg; 4623 break; 4624 4625 case AtomicExpr::AO__atomic_exchange: 4626 Form = GNUXchg; 4627 break; 4628 4629 case AtomicExpr::AO__c11_atomic_compare_exchange_strong: 4630 case AtomicExpr::AO__c11_atomic_compare_exchange_weak: 4631 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: 4632 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak: 4633 Form = C11CmpXchg; 4634 break; 4635 4636 case AtomicExpr::AO__atomic_compare_exchange: 4637 case AtomicExpr::AO__atomic_compare_exchange_n: 4638 Form = GNUCmpXchg; 4639 break; 4640 } 4641 4642 unsigned AdjustedNumArgs = NumArgs[Form]; 4643 if (IsOpenCL && Op != AtomicExpr::AO__opencl_atomic_init) 4644 ++AdjustedNumArgs; 4645 // Check we have the right number of arguments. 4646 if (Args.size() < AdjustedNumArgs) { 4647 Diag(CallRange.getEnd(), diag::err_typecheck_call_too_few_args) 4648 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size()) 4649 << ExprRange; 4650 return ExprError(); 4651 } else if (Args.size() > AdjustedNumArgs) { 4652 Diag(Args[AdjustedNumArgs]->getBeginLoc(), 4653 diag::err_typecheck_call_too_many_args) 4654 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size()) 4655 << ExprRange; 4656 return ExprError(); 4657 } 4658 4659 // Inspect the first argument of the atomic operation. 4660 Expr *Ptr = Args[0]; 4661 ExprResult ConvertedPtr = DefaultFunctionArrayLvalueConversion(Ptr); 4662 if (ConvertedPtr.isInvalid()) 4663 return ExprError(); 4664 4665 Ptr = ConvertedPtr.get(); 4666 const PointerType *pointerType = Ptr->getType()->getAs<PointerType>(); 4667 if (!pointerType) { 4668 Diag(ExprRange.getBegin(), diag::err_atomic_builtin_must_be_pointer) 4669 << Ptr->getType() << Ptr->getSourceRange(); 4670 return ExprError(); 4671 } 4672 4673 // For a __c11 builtin, this should be a pointer to an _Atomic type. 4674 QualType AtomTy = pointerType->getPointeeType(); // 'A' 4675 QualType ValType = AtomTy; // 'C' 4676 if (IsC11) { 4677 if (!AtomTy->isAtomicType()) { 4678 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic) 4679 << Ptr->getType() << Ptr->getSourceRange(); 4680 return ExprError(); 4681 } 4682 if ((Form != Load && Form != LoadCopy && AtomTy.isConstQualified()) || 4683 AtomTy.getAddressSpace() == LangAS::opencl_constant) { 4684 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_atomic) 4685 << (AtomTy.isConstQualified() ? 0 : 1) << Ptr->getType() 4686 << Ptr->getSourceRange(); 4687 return ExprError(); 4688 } 4689 ValType = AtomTy->castAs<AtomicType>()->getValueType(); 4690 } else if (Form != Load && Form != LoadCopy) { 4691 if (ValType.isConstQualified()) { 4692 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_pointer) 4693 << Ptr->getType() << Ptr->getSourceRange(); 4694 return ExprError(); 4695 } 4696 } 4697 4698 // For an arithmetic operation, the implied arithmetic must be well-formed. 4699 if (Form == Arithmetic) { 4700 // gcc does not enforce these rules for GNU atomics, but we do so for sanity. 4701 if (IsAddSub && !ValType->isIntegerType() 4702 && !ValType->isPointerType()) { 4703 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_or_ptr) 4704 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 4705 return ExprError(); 4706 } 4707 if (!IsAddSub && !ValType->isIntegerType()) { 4708 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int) 4709 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 4710 return ExprError(); 4711 } 4712 if (IsC11 && ValType->isPointerType() && 4713 RequireCompleteType(Ptr->getBeginLoc(), ValType->getPointeeType(), 4714 diag::err_incomplete_type)) { 4715 return ExprError(); 4716 } 4717 } else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) { 4718 // For __atomic_*_n operations, the value type must be a scalar integral or 4719 // pointer type which is 1, 2, 4, 8 or 16 bytes in length. 4720 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_or_ptr) 4721 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 4722 return ExprError(); 4723 } 4724 4725 if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context) && 4726 !AtomTy->isScalarType()) { 4727 // For GNU atomics, require a trivially-copyable type. This is not part of 4728 // the GNU atomics specification, but we enforce it for sanity. 4729 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_trivial_copy) 4730 << Ptr->getType() << Ptr->getSourceRange(); 4731 return ExprError(); 4732 } 4733 4734 switch (ValType.getObjCLifetime()) { 4735 case Qualifiers::OCL_None: 4736 case Qualifiers::OCL_ExplicitNone: 4737 // okay 4738 break; 4739 4740 case Qualifiers::OCL_Weak: 4741 case Qualifiers::OCL_Strong: 4742 case Qualifiers::OCL_Autoreleasing: 4743 // FIXME: Can this happen? By this point, ValType should be known 4744 // to be trivially copyable. 4745 Diag(ExprRange.getBegin(), diag::err_arc_atomic_ownership) 4746 << ValType << Ptr->getSourceRange(); 4747 return ExprError(); 4748 } 4749 4750 // All atomic operations have an overload which takes a pointer to a volatile 4751 // 'A'. We shouldn't let the volatile-ness of the pointee-type inject itself 4752 // into the result or the other operands. Similarly atomic_load takes a 4753 // pointer to a const 'A'. 4754 ValType.removeLocalVolatile(); 4755 ValType.removeLocalConst(); 4756 QualType ResultType = ValType; 4757 if (Form == Copy || Form == LoadCopy || Form == GNUXchg || 4758 Form == Init) 4759 ResultType = Context.VoidTy; 4760 else if (Form == C11CmpXchg || Form == GNUCmpXchg) 4761 ResultType = Context.BoolTy; 4762 4763 // The type of a parameter passed 'by value'. In the GNU atomics, such 4764 // arguments are actually passed as pointers. 4765 QualType ByValType = ValType; // 'CP' 4766 bool IsPassedByAddress = false; 4767 if (!IsC11 && !IsN) { 4768 ByValType = Ptr->getType(); 4769 IsPassedByAddress = true; 4770 } 4771 4772 SmallVector<Expr *, 5> APIOrderedArgs; 4773 if (ArgOrder == Sema::AtomicArgumentOrder::AST) { 4774 APIOrderedArgs.push_back(Args[0]); 4775 switch (Form) { 4776 case Init: 4777 case Load: 4778 APIOrderedArgs.push_back(Args[1]); // Val1/Order 4779 break; 4780 case LoadCopy: 4781 case Copy: 4782 case Arithmetic: 4783 case Xchg: 4784 APIOrderedArgs.push_back(Args[2]); // Val1 4785 APIOrderedArgs.push_back(Args[1]); // Order 4786 break; 4787 case GNUXchg: 4788 APIOrderedArgs.push_back(Args[2]); // Val1 4789 APIOrderedArgs.push_back(Args[3]); // Val2 4790 APIOrderedArgs.push_back(Args[1]); // Order 4791 break; 4792 case C11CmpXchg: 4793 APIOrderedArgs.push_back(Args[2]); // Val1 4794 APIOrderedArgs.push_back(Args[4]); // Val2 4795 APIOrderedArgs.push_back(Args[1]); // Order 4796 APIOrderedArgs.push_back(Args[3]); // OrderFail 4797 break; 4798 case GNUCmpXchg: 4799 APIOrderedArgs.push_back(Args[2]); // Val1 4800 APIOrderedArgs.push_back(Args[4]); // Val2 4801 APIOrderedArgs.push_back(Args[5]); // Weak 4802 APIOrderedArgs.push_back(Args[1]); // Order 4803 APIOrderedArgs.push_back(Args[3]); // OrderFail 4804 break; 4805 } 4806 } else 4807 APIOrderedArgs.append(Args.begin(), Args.end()); 4808 4809 // The first argument's non-CV pointer type is used to deduce the type of 4810 // subsequent arguments, except for: 4811 // - weak flag (always converted to bool) 4812 // - memory order (always converted to int) 4813 // - scope (always converted to int) 4814 for (unsigned i = 0; i != APIOrderedArgs.size(); ++i) { 4815 QualType Ty; 4816 if (i < NumVals[Form] + 1) { 4817 switch (i) { 4818 case 0: 4819 // The first argument is always a pointer. It has a fixed type. 4820 // It is always dereferenced, a nullptr is undefined. 4821 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin()); 4822 // Nothing else to do: we already know all we want about this pointer. 4823 continue; 4824 case 1: 4825 // The second argument is the non-atomic operand. For arithmetic, this 4826 // is always passed by value, and for a compare_exchange it is always 4827 // passed by address. For the rest, GNU uses by-address and C11 uses 4828 // by-value. 4829 assert(Form != Load); 4830 if (Form == Init || (Form == Arithmetic && ValType->isIntegerType())) 4831 Ty = ValType; 4832 else if (Form == Copy || Form == Xchg) { 4833 if (IsPassedByAddress) { 4834 // The value pointer is always dereferenced, a nullptr is undefined. 4835 CheckNonNullArgument(*this, APIOrderedArgs[i], 4836 ExprRange.getBegin()); 4837 } 4838 Ty = ByValType; 4839 } else if (Form == Arithmetic) 4840 Ty = Context.getPointerDiffType(); 4841 else { 4842 Expr *ValArg = APIOrderedArgs[i]; 4843 // The value pointer is always dereferenced, a nullptr is undefined. 4844 CheckNonNullArgument(*this, ValArg, ExprRange.getBegin()); 4845 LangAS AS = LangAS::Default; 4846 // Keep address space of non-atomic pointer type. 4847 if (const PointerType *PtrTy = 4848 ValArg->getType()->getAs<PointerType>()) { 4849 AS = PtrTy->getPointeeType().getAddressSpace(); 4850 } 4851 Ty = Context.getPointerType( 4852 Context.getAddrSpaceQualType(ValType.getUnqualifiedType(), AS)); 4853 } 4854 break; 4855 case 2: 4856 // The third argument to compare_exchange / GNU exchange is the desired 4857 // value, either by-value (for the C11 and *_n variant) or as a pointer. 4858 if (IsPassedByAddress) 4859 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin()); 4860 Ty = ByValType; 4861 break; 4862 case 3: 4863 // The fourth argument to GNU compare_exchange is a 'weak' flag. 4864 Ty = Context.BoolTy; 4865 break; 4866 } 4867 } else { 4868 // The order(s) and scope are always converted to int. 4869 Ty = Context.IntTy; 4870 } 4871 4872 InitializedEntity Entity = 4873 InitializedEntity::InitializeParameter(Context, Ty, false); 4874 ExprResult Arg = APIOrderedArgs[i]; 4875 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 4876 if (Arg.isInvalid()) 4877 return true; 4878 APIOrderedArgs[i] = Arg.get(); 4879 } 4880 4881 // Permute the arguments into a 'consistent' order. 4882 SmallVector<Expr*, 5> SubExprs; 4883 SubExprs.push_back(Ptr); 4884 switch (Form) { 4885 case Init: 4886 // Note, AtomicExpr::getVal1() has a special case for this atomic. 4887 SubExprs.push_back(APIOrderedArgs[1]); // Val1 4888 break; 4889 case Load: 4890 SubExprs.push_back(APIOrderedArgs[1]); // Order 4891 break; 4892 case LoadCopy: 4893 case Copy: 4894 case Arithmetic: 4895 case Xchg: 4896 SubExprs.push_back(APIOrderedArgs[2]); // Order 4897 SubExprs.push_back(APIOrderedArgs[1]); // Val1 4898 break; 4899 case GNUXchg: 4900 // Note, AtomicExpr::getVal2() has a special case for this atomic. 4901 SubExprs.push_back(APIOrderedArgs[3]); // Order 4902 SubExprs.push_back(APIOrderedArgs[1]); // Val1 4903 SubExprs.push_back(APIOrderedArgs[2]); // Val2 4904 break; 4905 case C11CmpXchg: 4906 SubExprs.push_back(APIOrderedArgs[3]); // Order 4907 SubExprs.push_back(APIOrderedArgs[1]); // Val1 4908 SubExprs.push_back(APIOrderedArgs[4]); // OrderFail 4909 SubExprs.push_back(APIOrderedArgs[2]); // Val2 4910 break; 4911 case GNUCmpXchg: 4912 SubExprs.push_back(APIOrderedArgs[4]); // Order 4913 SubExprs.push_back(APIOrderedArgs[1]); // Val1 4914 SubExprs.push_back(APIOrderedArgs[5]); // OrderFail 4915 SubExprs.push_back(APIOrderedArgs[2]); // Val2 4916 SubExprs.push_back(APIOrderedArgs[3]); // Weak 4917 break; 4918 } 4919 4920 if (SubExprs.size() >= 2 && Form != Init) { 4921 llvm::APSInt Result(32); 4922 if (SubExprs[1]->isIntegerConstantExpr(Result, Context) && 4923 !isValidOrderingForOp(Result.getSExtValue(), Op)) 4924 Diag(SubExprs[1]->getBeginLoc(), 4925 diag::warn_atomic_op_has_invalid_memory_order) 4926 << SubExprs[1]->getSourceRange(); 4927 } 4928 4929 if (auto ScopeModel = AtomicExpr::getScopeModel(Op)) { 4930 auto *Scope = Args[Args.size() - 1]; 4931 llvm::APSInt Result(32); 4932 if (Scope->isIntegerConstantExpr(Result, Context) && 4933 !ScopeModel->isValid(Result.getZExtValue())) { 4934 Diag(Scope->getBeginLoc(), diag::err_atomic_op_has_invalid_synch_scope) 4935 << Scope->getSourceRange(); 4936 } 4937 SubExprs.push_back(Scope); 4938 } 4939 4940 AtomicExpr *AE = new (Context) 4941 AtomicExpr(ExprRange.getBegin(), SubExprs, ResultType, Op, RParenLoc); 4942 4943 if ((Op == AtomicExpr::AO__c11_atomic_load || 4944 Op == AtomicExpr::AO__c11_atomic_store || 4945 Op == AtomicExpr::AO__opencl_atomic_load || 4946 Op == AtomicExpr::AO__opencl_atomic_store ) && 4947 Context.AtomicUsesUnsupportedLibcall(AE)) 4948 Diag(AE->getBeginLoc(), diag::err_atomic_load_store_uses_lib) 4949 << ((Op == AtomicExpr::AO__c11_atomic_load || 4950 Op == AtomicExpr::AO__opencl_atomic_load) 4951 ? 0 4952 : 1); 4953 4954 if (ValType->isExtIntType()) { 4955 Diag(Ptr->getExprLoc(), diag::err_atomic_builtin_ext_int_prohibit); 4956 return ExprError(); 4957 } 4958 4959 return AE; 4960 } 4961 4962 /// checkBuiltinArgument - Given a call to a builtin function, perform 4963 /// normal type-checking on the given argument, updating the call in 4964 /// place. This is useful when a builtin function requires custom 4965 /// type-checking for some of its arguments but not necessarily all of 4966 /// them. 4967 /// 4968 /// Returns true on error. 4969 static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) { 4970 FunctionDecl *Fn = E->getDirectCallee(); 4971 assert(Fn && "builtin call without direct callee!"); 4972 4973 ParmVarDecl *Param = Fn->getParamDecl(ArgIndex); 4974 InitializedEntity Entity = 4975 InitializedEntity::InitializeParameter(S.Context, Param); 4976 4977 ExprResult Arg = E->getArg(0); 4978 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg); 4979 if (Arg.isInvalid()) 4980 return true; 4981 4982 E->setArg(ArgIndex, Arg.get()); 4983 return false; 4984 } 4985 4986 /// We have a call to a function like __sync_fetch_and_add, which is an 4987 /// overloaded function based on the pointer type of its first argument. 4988 /// The main BuildCallExpr routines have already promoted the types of 4989 /// arguments because all of these calls are prototyped as void(...). 4990 /// 4991 /// This function goes through and does final semantic checking for these 4992 /// builtins, as well as generating any warnings. 4993 ExprResult 4994 Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) { 4995 CallExpr *TheCall = static_cast<CallExpr *>(TheCallResult.get()); 4996 Expr *Callee = TheCall->getCallee(); 4997 DeclRefExpr *DRE = cast<DeclRefExpr>(Callee->IgnoreParenCasts()); 4998 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 4999 5000 // Ensure that we have at least one argument to do type inference from. 5001 if (TheCall->getNumArgs() < 1) { 5002 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 5003 << 0 << 1 << TheCall->getNumArgs() << Callee->getSourceRange(); 5004 return ExprError(); 5005 } 5006 5007 // Inspect the first argument of the atomic builtin. This should always be 5008 // a pointer type, whose element is an integral scalar or pointer type. 5009 // Because it is a pointer type, we don't have to worry about any implicit 5010 // casts here. 5011 // FIXME: We don't allow floating point scalars as input. 5012 Expr *FirstArg = TheCall->getArg(0); 5013 ExprResult FirstArgResult = DefaultFunctionArrayLvalueConversion(FirstArg); 5014 if (FirstArgResult.isInvalid()) 5015 return ExprError(); 5016 FirstArg = FirstArgResult.get(); 5017 TheCall->setArg(0, FirstArg); 5018 5019 const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>(); 5020 if (!pointerType) { 5021 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 5022 << FirstArg->getType() << FirstArg->getSourceRange(); 5023 return ExprError(); 5024 } 5025 5026 QualType ValType = pointerType->getPointeeType(); 5027 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 5028 !ValType->isBlockPointerType()) { 5029 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intptr) 5030 << FirstArg->getType() << FirstArg->getSourceRange(); 5031 return ExprError(); 5032 } 5033 5034 if (ValType.isConstQualified()) { 5035 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_cannot_be_const) 5036 << FirstArg->getType() << FirstArg->getSourceRange(); 5037 return ExprError(); 5038 } 5039 5040 switch (ValType.getObjCLifetime()) { 5041 case Qualifiers::OCL_None: 5042 case Qualifiers::OCL_ExplicitNone: 5043 // okay 5044 break; 5045 5046 case Qualifiers::OCL_Weak: 5047 case Qualifiers::OCL_Strong: 5048 case Qualifiers::OCL_Autoreleasing: 5049 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 5050 << ValType << FirstArg->getSourceRange(); 5051 return ExprError(); 5052 } 5053 5054 // Strip any qualifiers off ValType. 5055 ValType = ValType.getUnqualifiedType(); 5056 5057 // The majority of builtins return a value, but a few have special return 5058 // types, so allow them to override appropriately below. 5059 QualType ResultType = ValType; 5060 5061 // We need to figure out which concrete builtin this maps onto. For example, 5062 // __sync_fetch_and_add with a 2 byte object turns into 5063 // __sync_fetch_and_add_2. 5064 #define BUILTIN_ROW(x) \ 5065 { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \ 5066 Builtin::BI##x##_8, Builtin::BI##x##_16 } 5067 5068 static const unsigned BuiltinIndices[][5] = { 5069 BUILTIN_ROW(__sync_fetch_and_add), 5070 BUILTIN_ROW(__sync_fetch_and_sub), 5071 BUILTIN_ROW(__sync_fetch_and_or), 5072 BUILTIN_ROW(__sync_fetch_and_and), 5073 BUILTIN_ROW(__sync_fetch_and_xor), 5074 BUILTIN_ROW(__sync_fetch_and_nand), 5075 5076 BUILTIN_ROW(__sync_add_and_fetch), 5077 BUILTIN_ROW(__sync_sub_and_fetch), 5078 BUILTIN_ROW(__sync_and_and_fetch), 5079 BUILTIN_ROW(__sync_or_and_fetch), 5080 BUILTIN_ROW(__sync_xor_and_fetch), 5081 BUILTIN_ROW(__sync_nand_and_fetch), 5082 5083 BUILTIN_ROW(__sync_val_compare_and_swap), 5084 BUILTIN_ROW(__sync_bool_compare_and_swap), 5085 BUILTIN_ROW(__sync_lock_test_and_set), 5086 BUILTIN_ROW(__sync_lock_release), 5087 BUILTIN_ROW(__sync_swap) 5088 }; 5089 #undef BUILTIN_ROW 5090 5091 // Determine the index of the size. 5092 unsigned SizeIndex; 5093 switch (Context.getTypeSizeInChars(ValType).getQuantity()) { 5094 case 1: SizeIndex = 0; break; 5095 case 2: SizeIndex = 1; break; 5096 case 4: SizeIndex = 2; break; 5097 case 8: SizeIndex = 3; break; 5098 case 16: SizeIndex = 4; break; 5099 default: 5100 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_pointer_size) 5101 << FirstArg->getType() << FirstArg->getSourceRange(); 5102 return ExprError(); 5103 } 5104 5105 // Each of these builtins has one pointer argument, followed by some number of 5106 // values (0, 1 or 2) followed by a potentially empty varags list of stuff 5107 // that we ignore. Find out which row of BuiltinIndices to read from as well 5108 // as the number of fixed args. 5109 unsigned BuiltinID = FDecl->getBuiltinID(); 5110 unsigned BuiltinIndex, NumFixed = 1; 5111 bool WarnAboutSemanticsChange = false; 5112 switch (BuiltinID) { 5113 default: llvm_unreachable("Unknown overloaded atomic builtin!"); 5114 case Builtin::BI__sync_fetch_and_add: 5115 case Builtin::BI__sync_fetch_and_add_1: 5116 case Builtin::BI__sync_fetch_and_add_2: 5117 case Builtin::BI__sync_fetch_and_add_4: 5118 case Builtin::BI__sync_fetch_and_add_8: 5119 case Builtin::BI__sync_fetch_and_add_16: 5120 BuiltinIndex = 0; 5121 break; 5122 5123 case Builtin::BI__sync_fetch_and_sub: 5124 case Builtin::BI__sync_fetch_and_sub_1: 5125 case Builtin::BI__sync_fetch_and_sub_2: 5126 case Builtin::BI__sync_fetch_and_sub_4: 5127 case Builtin::BI__sync_fetch_and_sub_8: 5128 case Builtin::BI__sync_fetch_and_sub_16: 5129 BuiltinIndex = 1; 5130 break; 5131 5132 case Builtin::BI__sync_fetch_and_or: 5133 case Builtin::BI__sync_fetch_and_or_1: 5134 case Builtin::BI__sync_fetch_and_or_2: 5135 case Builtin::BI__sync_fetch_and_or_4: 5136 case Builtin::BI__sync_fetch_and_or_8: 5137 case Builtin::BI__sync_fetch_and_or_16: 5138 BuiltinIndex = 2; 5139 break; 5140 5141 case Builtin::BI__sync_fetch_and_and: 5142 case Builtin::BI__sync_fetch_and_and_1: 5143 case Builtin::BI__sync_fetch_and_and_2: 5144 case Builtin::BI__sync_fetch_and_and_4: 5145 case Builtin::BI__sync_fetch_and_and_8: 5146 case Builtin::BI__sync_fetch_and_and_16: 5147 BuiltinIndex = 3; 5148 break; 5149 5150 case Builtin::BI__sync_fetch_and_xor: 5151 case Builtin::BI__sync_fetch_and_xor_1: 5152 case Builtin::BI__sync_fetch_and_xor_2: 5153 case Builtin::BI__sync_fetch_and_xor_4: 5154 case Builtin::BI__sync_fetch_and_xor_8: 5155 case Builtin::BI__sync_fetch_and_xor_16: 5156 BuiltinIndex = 4; 5157 break; 5158 5159 case Builtin::BI__sync_fetch_and_nand: 5160 case Builtin::BI__sync_fetch_and_nand_1: 5161 case Builtin::BI__sync_fetch_and_nand_2: 5162 case Builtin::BI__sync_fetch_and_nand_4: 5163 case Builtin::BI__sync_fetch_and_nand_8: 5164 case Builtin::BI__sync_fetch_and_nand_16: 5165 BuiltinIndex = 5; 5166 WarnAboutSemanticsChange = true; 5167 break; 5168 5169 case Builtin::BI__sync_add_and_fetch: 5170 case Builtin::BI__sync_add_and_fetch_1: 5171 case Builtin::BI__sync_add_and_fetch_2: 5172 case Builtin::BI__sync_add_and_fetch_4: 5173 case Builtin::BI__sync_add_and_fetch_8: 5174 case Builtin::BI__sync_add_and_fetch_16: 5175 BuiltinIndex = 6; 5176 break; 5177 5178 case Builtin::BI__sync_sub_and_fetch: 5179 case Builtin::BI__sync_sub_and_fetch_1: 5180 case Builtin::BI__sync_sub_and_fetch_2: 5181 case Builtin::BI__sync_sub_and_fetch_4: 5182 case Builtin::BI__sync_sub_and_fetch_8: 5183 case Builtin::BI__sync_sub_and_fetch_16: 5184 BuiltinIndex = 7; 5185 break; 5186 5187 case Builtin::BI__sync_and_and_fetch: 5188 case Builtin::BI__sync_and_and_fetch_1: 5189 case Builtin::BI__sync_and_and_fetch_2: 5190 case Builtin::BI__sync_and_and_fetch_4: 5191 case Builtin::BI__sync_and_and_fetch_8: 5192 case Builtin::BI__sync_and_and_fetch_16: 5193 BuiltinIndex = 8; 5194 break; 5195 5196 case Builtin::BI__sync_or_and_fetch: 5197 case Builtin::BI__sync_or_and_fetch_1: 5198 case Builtin::BI__sync_or_and_fetch_2: 5199 case Builtin::BI__sync_or_and_fetch_4: 5200 case Builtin::BI__sync_or_and_fetch_8: 5201 case Builtin::BI__sync_or_and_fetch_16: 5202 BuiltinIndex = 9; 5203 break; 5204 5205 case Builtin::BI__sync_xor_and_fetch: 5206 case Builtin::BI__sync_xor_and_fetch_1: 5207 case Builtin::BI__sync_xor_and_fetch_2: 5208 case Builtin::BI__sync_xor_and_fetch_4: 5209 case Builtin::BI__sync_xor_and_fetch_8: 5210 case Builtin::BI__sync_xor_and_fetch_16: 5211 BuiltinIndex = 10; 5212 break; 5213 5214 case Builtin::BI__sync_nand_and_fetch: 5215 case Builtin::BI__sync_nand_and_fetch_1: 5216 case Builtin::BI__sync_nand_and_fetch_2: 5217 case Builtin::BI__sync_nand_and_fetch_4: 5218 case Builtin::BI__sync_nand_and_fetch_8: 5219 case Builtin::BI__sync_nand_and_fetch_16: 5220 BuiltinIndex = 11; 5221 WarnAboutSemanticsChange = true; 5222 break; 5223 5224 case Builtin::BI__sync_val_compare_and_swap: 5225 case Builtin::BI__sync_val_compare_and_swap_1: 5226 case Builtin::BI__sync_val_compare_and_swap_2: 5227 case Builtin::BI__sync_val_compare_and_swap_4: 5228 case Builtin::BI__sync_val_compare_and_swap_8: 5229 case Builtin::BI__sync_val_compare_and_swap_16: 5230 BuiltinIndex = 12; 5231 NumFixed = 2; 5232 break; 5233 5234 case Builtin::BI__sync_bool_compare_and_swap: 5235 case Builtin::BI__sync_bool_compare_and_swap_1: 5236 case Builtin::BI__sync_bool_compare_and_swap_2: 5237 case Builtin::BI__sync_bool_compare_and_swap_4: 5238 case Builtin::BI__sync_bool_compare_and_swap_8: 5239 case Builtin::BI__sync_bool_compare_and_swap_16: 5240 BuiltinIndex = 13; 5241 NumFixed = 2; 5242 ResultType = Context.BoolTy; 5243 break; 5244 5245 case Builtin::BI__sync_lock_test_and_set: 5246 case Builtin::BI__sync_lock_test_and_set_1: 5247 case Builtin::BI__sync_lock_test_and_set_2: 5248 case Builtin::BI__sync_lock_test_and_set_4: 5249 case Builtin::BI__sync_lock_test_and_set_8: 5250 case Builtin::BI__sync_lock_test_and_set_16: 5251 BuiltinIndex = 14; 5252 break; 5253 5254 case Builtin::BI__sync_lock_release: 5255 case Builtin::BI__sync_lock_release_1: 5256 case Builtin::BI__sync_lock_release_2: 5257 case Builtin::BI__sync_lock_release_4: 5258 case Builtin::BI__sync_lock_release_8: 5259 case Builtin::BI__sync_lock_release_16: 5260 BuiltinIndex = 15; 5261 NumFixed = 0; 5262 ResultType = Context.VoidTy; 5263 break; 5264 5265 case Builtin::BI__sync_swap: 5266 case Builtin::BI__sync_swap_1: 5267 case Builtin::BI__sync_swap_2: 5268 case Builtin::BI__sync_swap_4: 5269 case Builtin::BI__sync_swap_8: 5270 case Builtin::BI__sync_swap_16: 5271 BuiltinIndex = 16; 5272 break; 5273 } 5274 5275 // Now that we know how many fixed arguments we expect, first check that we 5276 // have at least that many. 5277 if (TheCall->getNumArgs() < 1+NumFixed) { 5278 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 5279 << 0 << 1 + NumFixed << TheCall->getNumArgs() 5280 << Callee->getSourceRange(); 5281 return ExprError(); 5282 } 5283 5284 Diag(TheCall->getEndLoc(), diag::warn_atomic_implicit_seq_cst) 5285 << Callee->getSourceRange(); 5286 5287 if (WarnAboutSemanticsChange) { 5288 Diag(TheCall->getEndLoc(), diag::warn_sync_fetch_and_nand_semantics_change) 5289 << Callee->getSourceRange(); 5290 } 5291 5292 // Get the decl for the concrete builtin from this, we can tell what the 5293 // concrete integer type we should convert to is. 5294 unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex]; 5295 const char *NewBuiltinName = Context.BuiltinInfo.getName(NewBuiltinID); 5296 FunctionDecl *NewBuiltinDecl; 5297 if (NewBuiltinID == BuiltinID) 5298 NewBuiltinDecl = FDecl; 5299 else { 5300 // Perform builtin lookup to avoid redeclaring it. 5301 DeclarationName DN(&Context.Idents.get(NewBuiltinName)); 5302 LookupResult Res(*this, DN, DRE->getBeginLoc(), LookupOrdinaryName); 5303 LookupName(Res, TUScope, /*AllowBuiltinCreation=*/true); 5304 assert(Res.getFoundDecl()); 5305 NewBuiltinDecl = dyn_cast<FunctionDecl>(Res.getFoundDecl()); 5306 if (!NewBuiltinDecl) 5307 return ExprError(); 5308 } 5309 5310 // The first argument --- the pointer --- has a fixed type; we 5311 // deduce the types of the rest of the arguments accordingly. Walk 5312 // the remaining arguments, converting them to the deduced value type. 5313 for (unsigned i = 0; i != NumFixed; ++i) { 5314 ExprResult Arg = TheCall->getArg(i+1); 5315 5316 // GCC does an implicit conversion to the pointer or integer ValType. This 5317 // can fail in some cases (1i -> int**), check for this error case now. 5318 // Initialize the argument. 5319 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 5320 ValType, /*consume*/ false); 5321 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 5322 if (Arg.isInvalid()) 5323 return ExprError(); 5324 5325 // Okay, we have something that *can* be converted to the right type. Check 5326 // to see if there is a potentially weird extension going on here. This can 5327 // happen when you do an atomic operation on something like an char* and 5328 // pass in 42. The 42 gets converted to char. This is even more strange 5329 // for things like 45.123 -> char, etc. 5330 // FIXME: Do this check. 5331 TheCall->setArg(i+1, Arg.get()); 5332 } 5333 5334 // Create a new DeclRefExpr to refer to the new decl. 5335 DeclRefExpr *NewDRE = DeclRefExpr::Create( 5336 Context, DRE->getQualifierLoc(), SourceLocation(), NewBuiltinDecl, 5337 /*enclosing*/ false, DRE->getLocation(), Context.BuiltinFnTy, 5338 DRE->getValueKind(), nullptr, nullptr, DRE->isNonOdrUse()); 5339 5340 // Set the callee in the CallExpr. 5341 // FIXME: This loses syntactic information. 5342 QualType CalleePtrTy = Context.getPointerType(NewBuiltinDecl->getType()); 5343 ExprResult PromotedCall = ImpCastExprToType(NewDRE, CalleePtrTy, 5344 CK_BuiltinFnToFnPtr); 5345 TheCall->setCallee(PromotedCall.get()); 5346 5347 // Change the result type of the call to match the original value type. This 5348 // is arbitrary, but the codegen for these builtins ins design to handle it 5349 // gracefully. 5350 TheCall->setType(ResultType); 5351 5352 // Prohibit use of _ExtInt with atomic builtins. 5353 // The arguments would have already been converted to the first argument's 5354 // type, so only need to check the first argument. 5355 const auto *ExtIntValType = ValType->getAs<ExtIntType>(); 5356 if (ExtIntValType && !llvm::isPowerOf2_64(ExtIntValType->getNumBits())) { 5357 Diag(FirstArg->getExprLoc(), diag::err_atomic_builtin_ext_int_size); 5358 return ExprError(); 5359 } 5360 5361 return TheCallResult; 5362 } 5363 5364 /// SemaBuiltinNontemporalOverloaded - We have a call to 5365 /// __builtin_nontemporal_store or __builtin_nontemporal_load, which is an 5366 /// overloaded function based on the pointer type of its last argument. 5367 /// 5368 /// This function goes through and does final semantic checking for these 5369 /// builtins. 5370 ExprResult Sema::SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult) { 5371 CallExpr *TheCall = (CallExpr *)TheCallResult.get(); 5372 DeclRefExpr *DRE = 5373 cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 5374 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 5375 unsigned BuiltinID = FDecl->getBuiltinID(); 5376 assert((BuiltinID == Builtin::BI__builtin_nontemporal_store || 5377 BuiltinID == Builtin::BI__builtin_nontemporal_load) && 5378 "Unexpected nontemporal load/store builtin!"); 5379 bool isStore = BuiltinID == Builtin::BI__builtin_nontemporal_store; 5380 unsigned numArgs = isStore ? 2 : 1; 5381 5382 // Ensure that we have the proper number of arguments. 5383 if (checkArgCount(*this, TheCall, numArgs)) 5384 return ExprError(); 5385 5386 // Inspect the last argument of the nontemporal builtin. This should always 5387 // be a pointer type, from which we imply the type of the memory access. 5388 // Because it is a pointer type, we don't have to worry about any implicit 5389 // casts here. 5390 Expr *PointerArg = TheCall->getArg(numArgs - 1); 5391 ExprResult PointerArgResult = 5392 DefaultFunctionArrayLvalueConversion(PointerArg); 5393 5394 if (PointerArgResult.isInvalid()) 5395 return ExprError(); 5396 PointerArg = PointerArgResult.get(); 5397 TheCall->setArg(numArgs - 1, PointerArg); 5398 5399 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 5400 if (!pointerType) { 5401 Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer) 5402 << PointerArg->getType() << PointerArg->getSourceRange(); 5403 return ExprError(); 5404 } 5405 5406 QualType ValType = pointerType->getPointeeType(); 5407 5408 // Strip any qualifiers off ValType. 5409 ValType = ValType.getUnqualifiedType(); 5410 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 5411 !ValType->isBlockPointerType() && !ValType->isFloatingType() && 5412 !ValType->isVectorType()) { 5413 Diag(DRE->getBeginLoc(), 5414 diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector) 5415 << PointerArg->getType() << PointerArg->getSourceRange(); 5416 return ExprError(); 5417 } 5418 5419 if (!isStore) { 5420 TheCall->setType(ValType); 5421 return TheCallResult; 5422 } 5423 5424 ExprResult ValArg = TheCall->getArg(0); 5425 InitializedEntity Entity = InitializedEntity::InitializeParameter( 5426 Context, ValType, /*consume*/ false); 5427 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 5428 if (ValArg.isInvalid()) 5429 return ExprError(); 5430 5431 TheCall->setArg(0, ValArg.get()); 5432 TheCall->setType(Context.VoidTy); 5433 return TheCallResult; 5434 } 5435 5436 /// CheckObjCString - Checks that the argument to the builtin 5437 /// CFString constructor is correct 5438 /// Note: It might also make sense to do the UTF-16 conversion here (would 5439 /// simplify the backend). 5440 bool Sema::CheckObjCString(Expr *Arg) { 5441 Arg = Arg->IgnoreParenCasts(); 5442 StringLiteral *Literal = dyn_cast<StringLiteral>(Arg); 5443 5444 if (!Literal || !Literal->isAscii()) { 5445 Diag(Arg->getBeginLoc(), diag::err_cfstring_literal_not_string_constant) 5446 << Arg->getSourceRange(); 5447 return true; 5448 } 5449 5450 if (Literal->containsNonAsciiOrNull()) { 5451 StringRef String = Literal->getString(); 5452 unsigned NumBytes = String.size(); 5453 SmallVector<llvm::UTF16, 128> ToBuf(NumBytes); 5454 const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data(); 5455 llvm::UTF16 *ToPtr = &ToBuf[0]; 5456 5457 llvm::ConversionResult Result = 5458 llvm::ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes, &ToPtr, 5459 ToPtr + NumBytes, llvm::strictConversion); 5460 // Check for conversion failure. 5461 if (Result != llvm::conversionOK) 5462 Diag(Arg->getBeginLoc(), diag::warn_cfstring_truncated) 5463 << Arg->getSourceRange(); 5464 } 5465 return false; 5466 } 5467 5468 /// CheckObjCString - Checks that the format string argument to the os_log() 5469 /// and os_trace() functions is correct, and converts it to const char *. 5470 ExprResult Sema::CheckOSLogFormatStringArg(Expr *Arg) { 5471 Arg = Arg->IgnoreParenCasts(); 5472 auto *Literal = dyn_cast<StringLiteral>(Arg); 5473 if (!Literal) { 5474 if (auto *ObjcLiteral = dyn_cast<ObjCStringLiteral>(Arg)) { 5475 Literal = ObjcLiteral->getString(); 5476 } 5477 } 5478 5479 if (!Literal || (!Literal->isAscii() && !Literal->isUTF8())) { 5480 return ExprError( 5481 Diag(Arg->getBeginLoc(), diag::err_os_log_format_not_string_constant) 5482 << Arg->getSourceRange()); 5483 } 5484 5485 ExprResult Result(Literal); 5486 QualType ResultTy = Context.getPointerType(Context.CharTy.withConst()); 5487 InitializedEntity Entity = 5488 InitializedEntity::InitializeParameter(Context, ResultTy, false); 5489 Result = PerformCopyInitialization(Entity, SourceLocation(), Result); 5490 return Result; 5491 } 5492 5493 /// Check that the user is calling the appropriate va_start builtin for the 5494 /// target and calling convention. 5495 static bool checkVAStartABI(Sema &S, unsigned BuiltinID, Expr *Fn) { 5496 const llvm::Triple &TT = S.Context.getTargetInfo().getTriple(); 5497 bool IsX64 = TT.getArch() == llvm::Triple::x86_64; 5498 bool IsAArch64 = (TT.getArch() == llvm::Triple::aarch64 || 5499 TT.getArch() == llvm::Triple::aarch64_32); 5500 bool IsWindows = TT.isOSWindows(); 5501 bool IsMSVAStart = BuiltinID == Builtin::BI__builtin_ms_va_start; 5502 if (IsX64 || IsAArch64) { 5503 CallingConv CC = CC_C; 5504 if (const FunctionDecl *FD = S.getCurFunctionDecl()) 5505 CC = FD->getType()->castAs<FunctionType>()->getCallConv(); 5506 if (IsMSVAStart) { 5507 // Don't allow this in System V ABI functions. 5508 if (CC == CC_X86_64SysV || (!IsWindows && CC != CC_Win64)) 5509 return S.Diag(Fn->getBeginLoc(), 5510 diag::err_ms_va_start_used_in_sysv_function); 5511 } else { 5512 // On x86-64/AArch64 Unix, don't allow this in Win64 ABI functions. 5513 // On x64 Windows, don't allow this in System V ABI functions. 5514 // (Yes, that means there's no corresponding way to support variadic 5515 // System V ABI functions on Windows.) 5516 if ((IsWindows && CC == CC_X86_64SysV) || 5517 (!IsWindows && CC == CC_Win64)) 5518 return S.Diag(Fn->getBeginLoc(), 5519 diag::err_va_start_used_in_wrong_abi_function) 5520 << !IsWindows; 5521 } 5522 return false; 5523 } 5524 5525 if (IsMSVAStart) 5526 return S.Diag(Fn->getBeginLoc(), diag::err_builtin_x64_aarch64_only); 5527 return false; 5528 } 5529 5530 static bool checkVAStartIsInVariadicFunction(Sema &S, Expr *Fn, 5531 ParmVarDecl **LastParam = nullptr) { 5532 // Determine whether the current function, block, or obj-c method is variadic 5533 // and get its parameter list. 5534 bool IsVariadic = false; 5535 ArrayRef<ParmVarDecl *> Params; 5536 DeclContext *Caller = S.CurContext; 5537 if (auto *Block = dyn_cast<BlockDecl>(Caller)) { 5538 IsVariadic = Block->isVariadic(); 5539 Params = Block->parameters(); 5540 } else if (auto *FD = dyn_cast<FunctionDecl>(Caller)) { 5541 IsVariadic = FD->isVariadic(); 5542 Params = FD->parameters(); 5543 } else if (auto *MD = dyn_cast<ObjCMethodDecl>(Caller)) { 5544 IsVariadic = MD->isVariadic(); 5545 // FIXME: This isn't correct for methods (results in bogus warning). 5546 Params = MD->parameters(); 5547 } else if (isa<CapturedDecl>(Caller)) { 5548 // We don't support va_start in a CapturedDecl. 5549 S.Diag(Fn->getBeginLoc(), diag::err_va_start_captured_stmt); 5550 return true; 5551 } else { 5552 // This must be some other declcontext that parses exprs. 5553 S.Diag(Fn->getBeginLoc(), diag::err_va_start_outside_function); 5554 return true; 5555 } 5556 5557 if (!IsVariadic) { 5558 S.Diag(Fn->getBeginLoc(), diag::err_va_start_fixed_function); 5559 return true; 5560 } 5561 5562 if (LastParam) 5563 *LastParam = Params.empty() ? nullptr : Params.back(); 5564 5565 return false; 5566 } 5567 5568 /// Check the arguments to '__builtin_va_start' or '__builtin_ms_va_start' 5569 /// for validity. Emit an error and return true on failure; return false 5570 /// on success. 5571 bool Sema::SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) { 5572 Expr *Fn = TheCall->getCallee(); 5573 5574 if (checkVAStartABI(*this, BuiltinID, Fn)) 5575 return true; 5576 5577 if (TheCall->getNumArgs() > 2) { 5578 Diag(TheCall->getArg(2)->getBeginLoc(), 5579 diag::err_typecheck_call_too_many_args) 5580 << 0 /*function call*/ << 2 << TheCall->getNumArgs() 5581 << Fn->getSourceRange() 5582 << SourceRange(TheCall->getArg(2)->getBeginLoc(), 5583 (*(TheCall->arg_end() - 1))->getEndLoc()); 5584 return true; 5585 } 5586 5587 if (TheCall->getNumArgs() < 2) { 5588 return Diag(TheCall->getEndLoc(), 5589 diag::err_typecheck_call_too_few_args_at_least) 5590 << 0 /*function call*/ << 2 << TheCall->getNumArgs(); 5591 } 5592 5593 // Type-check the first argument normally. 5594 if (checkBuiltinArgument(*this, TheCall, 0)) 5595 return true; 5596 5597 // Check that the current function is variadic, and get its last parameter. 5598 ParmVarDecl *LastParam; 5599 if (checkVAStartIsInVariadicFunction(*this, Fn, &LastParam)) 5600 return true; 5601 5602 // Verify that the second argument to the builtin is the last argument of the 5603 // current function or method. 5604 bool SecondArgIsLastNamedArgument = false; 5605 const Expr *Arg = TheCall->getArg(1)->IgnoreParenCasts(); 5606 5607 // These are valid if SecondArgIsLastNamedArgument is false after the next 5608 // block. 5609 QualType Type; 5610 SourceLocation ParamLoc; 5611 bool IsCRegister = false; 5612 5613 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Arg)) { 5614 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(DR->getDecl())) { 5615 SecondArgIsLastNamedArgument = PV == LastParam; 5616 5617 Type = PV->getType(); 5618 ParamLoc = PV->getLocation(); 5619 IsCRegister = 5620 PV->getStorageClass() == SC_Register && !getLangOpts().CPlusPlus; 5621 } 5622 } 5623 5624 if (!SecondArgIsLastNamedArgument) 5625 Diag(TheCall->getArg(1)->getBeginLoc(), 5626 diag::warn_second_arg_of_va_start_not_last_named_param); 5627 else if (IsCRegister || Type->isReferenceType() || 5628 Type->isSpecificBuiltinType(BuiltinType::Float) || [=] { 5629 // Promotable integers are UB, but enumerations need a bit of 5630 // extra checking to see what their promotable type actually is. 5631 if (!Type->isPromotableIntegerType()) 5632 return false; 5633 if (!Type->isEnumeralType()) 5634 return true; 5635 const EnumDecl *ED = Type->castAs<EnumType>()->getDecl(); 5636 return !(ED && 5637 Context.typesAreCompatible(ED->getPromotionType(), Type)); 5638 }()) { 5639 unsigned Reason = 0; 5640 if (Type->isReferenceType()) Reason = 1; 5641 else if (IsCRegister) Reason = 2; 5642 Diag(Arg->getBeginLoc(), diag::warn_va_start_type_is_undefined) << Reason; 5643 Diag(ParamLoc, diag::note_parameter_type) << Type; 5644 } 5645 5646 TheCall->setType(Context.VoidTy); 5647 return false; 5648 } 5649 5650 bool Sema::SemaBuiltinVAStartARMMicrosoft(CallExpr *Call) { 5651 // void __va_start(va_list *ap, const char *named_addr, size_t slot_size, 5652 // const char *named_addr); 5653 5654 Expr *Func = Call->getCallee(); 5655 5656 if (Call->getNumArgs() < 3) 5657 return Diag(Call->getEndLoc(), 5658 diag::err_typecheck_call_too_few_args_at_least) 5659 << 0 /*function call*/ << 3 << Call->getNumArgs(); 5660 5661 // Type-check the first argument normally. 5662 if (checkBuiltinArgument(*this, Call, 0)) 5663 return true; 5664 5665 // Check that the current function is variadic. 5666 if (checkVAStartIsInVariadicFunction(*this, Func)) 5667 return true; 5668 5669 // __va_start on Windows does not validate the parameter qualifiers 5670 5671 const Expr *Arg1 = Call->getArg(1)->IgnoreParens(); 5672 const Type *Arg1Ty = Arg1->getType().getCanonicalType().getTypePtr(); 5673 5674 const Expr *Arg2 = Call->getArg(2)->IgnoreParens(); 5675 const Type *Arg2Ty = Arg2->getType().getCanonicalType().getTypePtr(); 5676 5677 const QualType &ConstCharPtrTy = 5678 Context.getPointerType(Context.CharTy.withConst()); 5679 if (!Arg1Ty->isPointerType() || 5680 Arg1Ty->getPointeeType().withoutLocalFastQualifiers() != Context.CharTy) 5681 Diag(Arg1->getBeginLoc(), diag::err_typecheck_convert_incompatible) 5682 << Arg1->getType() << ConstCharPtrTy << 1 /* different class */ 5683 << 0 /* qualifier difference */ 5684 << 3 /* parameter mismatch */ 5685 << 2 << Arg1->getType() << ConstCharPtrTy; 5686 5687 const QualType SizeTy = Context.getSizeType(); 5688 if (Arg2Ty->getCanonicalTypeInternal().withoutLocalFastQualifiers() != SizeTy) 5689 Diag(Arg2->getBeginLoc(), diag::err_typecheck_convert_incompatible) 5690 << Arg2->getType() << SizeTy << 1 /* different class */ 5691 << 0 /* qualifier difference */ 5692 << 3 /* parameter mismatch */ 5693 << 3 << Arg2->getType() << SizeTy; 5694 5695 return false; 5696 } 5697 5698 /// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isgreater and 5699 /// friends. This is declared to take (...), so we have to check everything. 5700 bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) { 5701 if (TheCall->getNumArgs() < 2) 5702 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args) 5703 << 0 << 2 << TheCall->getNumArgs() /*function call*/; 5704 if (TheCall->getNumArgs() > 2) 5705 return Diag(TheCall->getArg(2)->getBeginLoc(), 5706 diag::err_typecheck_call_too_many_args) 5707 << 0 /*function call*/ << 2 << TheCall->getNumArgs() 5708 << SourceRange(TheCall->getArg(2)->getBeginLoc(), 5709 (*(TheCall->arg_end() - 1))->getEndLoc()); 5710 5711 ExprResult OrigArg0 = TheCall->getArg(0); 5712 ExprResult OrigArg1 = TheCall->getArg(1); 5713 5714 // Do standard promotions between the two arguments, returning their common 5715 // type. 5716 QualType Res = UsualArithmeticConversions( 5717 OrigArg0, OrigArg1, TheCall->getExprLoc(), ACK_Comparison); 5718 if (OrigArg0.isInvalid() || OrigArg1.isInvalid()) 5719 return true; 5720 5721 // Make sure any conversions are pushed back into the call; this is 5722 // type safe since unordered compare builtins are declared as "_Bool 5723 // foo(...)". 5724 TheCall->setArg(0, OrigArg0.get()); 5725 TheCall->setArg(1, OrigArg1.get()); 5726 5727 if (OrigArg0.get()->isTypeDependent() || OrigArg1.get()->isTypeDependent()) 5728 return false; 5729 5730 // If the common type isn't a real floating type, then the arguments were 5731 // invalid for this operation. 5732 if (Res.isNull() || !Res->isRealFloatingType()) 5733 return Diag(OrigArg0.get()->getBeginLoc(), 5734 diag::err_typecheck_call_invalid_ordered_compare) 5735 << OrigArg0.get()->getType() << OrigArg1.get()->getType() 5736 << SourceRange(OrigArg0.get()->getBeginLoc(), 5737 OrigArg1.get()->getEndLoc()); 5738 5739 return false; 5740 } 5741 5742 /// SemaBuiltinSemaBuiltinFPClassification - Handle functions like 5743 /// __builtin_isnan and friends. This is declared to take (...), so we have 5744 /// to check everything. We expect the last argument to be a floating point 5745 /// value. 5746 bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) { 5747 if (TheCall->getNumArgs() < NumArgs) 5748 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args) 5749 << 0 << NumArgs << TheCall->getNumArgs() /*function call*/; 5750 if (TheCall->getNumArgs() > NumArgs) 5751 return Diag(TheCall->getArg(NumArgs)->getBeginLoc(), 5752 diag::err_typecheck_call_too_many_args) 5753 << 0 /*function call*/ << NumArgs << TheCall->getNumArgs() 5754 << SourceRange(TheCall->getArg(NumArgs)->getBeginLoc(), 5755 (*(TheCall->arg_end() - 1))->getEndLoc()); 5756 5757 // __builtin_fpclassify is the only case where NumArgs != 1, so we can count 5758 // on all preceding parameters just being int. Try all of those. 5759 for (unsigned i = 0; i < NumArgs - 1; ++i) { 5760 Expr *Arg = TheCall->getArg(i); 5761 5762 if (Arg->isTypeDependent()) 5763 return false; 5764 5765 ExprResult Res = PerformImplicitConversion(Arg, Context.IntTy, AA_Passing); 5766 5767 if (Res.isInvalid()) 5768 return true; 5769 TheCall->setArg(i, Res.get()); 5770 } 5771 5772 Expr *OrigArg = TheCall->getArg(NumArgs-1); 5773 5774 if (OrigArg->isTypeDependent()) 5775 return false; 5776 5777 // Usual Unary Conversions will convert half to float, which we want for 5778 // machines that use fp16 conversion intrinsics. Else, we wnat to leave the 5779 // type how it is, but do normal L->Rvalue conversions. 5780 if (Context.getTargetInfo().useFP16ConversionIntrinsics()) 5781 OrigArg = UsualUnaryConversions(OrigArg).get(); 5782 else 5783 OrigArg = DefaultFunctionArrayLvalueConversion(OrigArg).get(); 5784 TheCall->setArg(NumArgs - 1, OrigArg); 5785 5786 // This operation requires a non-_Complex floating-point number. 5787 if (!OrigArg->getType()->isRealFloatingType()) 5788 return Diag(OrigArg->getBeginLoc(), 5789 diag::err_typecheck_call_invalid_unary_fp) 5790 << OrigArg->getType() << OrigArg->getSourceRange(); 5791 5792 return false; 5793 } 5794 5795 // Customized Sema Checking for VSX builtins that have the following signature: 5796 // vector [...] builtinName(vector [...], vector [...], const int); 5797 // Which takes the same type of vectors (any legal vector type) for the first 5798 // two arguments and takes compile time constant for the third argument. 5799 // Example builtins are : 5800 // vector double vec_xxpermdi(vector double, vector double, int); 5801 // vector short vec_xxsldwi(vector short, vector short, int); 5802 bool Sema::SemaBuiltinVSX(CallExpr *TheCall) { 5803 unsigned ExpectedNumArgs = 3; 5804 if (TheCall->getNumArgs() < ExpectedNumArgs) 5805 return Diag(TheCall->getEndLoc(), 5806 diag::err_typecheck_call_too_few_args_at_least) 5807 << 0 /*function call*/ << ExpectedNumArgs << TheCall->getNumArgs() 5808 << TheCall->getSourceRange(); 5809 5810 if (TheCall->getNumArgs() > ExpectedNumArgs) 5811 return Diag(TheCall->getEndLoc(), 5812 diag::err_typecheck_call_too_many_args_at_most) 5813 << 0 /*function call*/ << ExpectedNumArgs << TheCall->getNumArgs() 5814 << TheCall->getSourceRange(); 5815 5816 // Check the third argument is a compile time constant 5817 llvm::APSInt Value; 5818 if(!TheCall->getArg(2)->isIntegerConstantExpr(Value, Context)) 5819 return Diag(TheCall->getBeginLoc(), 5820 diag::err_vsx_builtin_nonconstant_argument) 5821 << 3 /* argument index */ << TheCall->getDirectCallee() 5822 << SourceRange(TheCall->getArg(2)->getBeginLoc(), 5823 TheCall->getArg(2)->getEndLoc()); 5824 5825 QualType Arg1Ty = TheCall->getArg(0)->getType(); 5826 QualType Arg2Ty = TheCall->getArg(1)->getType(); 5827 5828 // Check the type of argument 1 and argument 2 are vectors. 5829 SourceLocation BuiltinLoc = TheCall->getBeginLoc(); 5830 if ((!Arg1Ty->isVectorType() && !Arg1Ty->isDependentType()) || 5831 (!Arg2Ty->isVectorType() && !Arg2Ty->isDependentType())) { 5832 return Diag(BuiltinLoc, diag::err_vec_builtin_non_vector) 5833 << TheCall->getDirectCallee() 5834 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 5835 TheCall->getArg(1)->getEndLoc()); 5836 } 5837 5838 // Check the first two arguments are the same type. 5839 if (!Context.hasSameUnqualifiedType(Arg1Ty, Arg2Ty)) { 5840 return Diag(BuiltinLoc, diag::err_vec_builtin_incompatible_vector) 5841 << TheCall->getDirectCallee() 5842 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 5843 TheCall->getArg(1)->getEndLoc()); 5844 } 5845 5846 // When default clang type checking is turned off and the customized type 5847 // checking is used, the returning type of the function must be explicitly 5848 // set. Otherwise it is _Bool by default. 5849 TheCall->setType(Arg1Ty); 5850 5851 return false; 5852 } 5853 5854 /// SemaBuiltinShuffleVector - Handle __builtin_shufflevector. 5855 // This is declared to take (...), so we have to check everything. 5856 ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) { 5857 if (TheCall->getNumArgs() < 2) 5858 return ExprError(Diag(TheCall->getEndLoc(), 5859 diag::err_typecheck_call_too_few_args_at_least) 5860 << 0 /*function call*/ << 2 << TheCall->getNumArgs() 5861 << TheCall->getSourceRange()); 5862 5863 // Determine which of the following types of shufflevector we're checking: 5864 // 1) unary, vector mask: (lhs, mask) 5865 // 2) binary, scalar mask: (lhs, rhs, index, ..., index) 5866 QualType resType = TheCall->getArg(0)->getType(); 5867 unsigned numElements = 0; 5868 5869 if (!TheCall->getArg(0)->isTypeDependent() && 5870 !TheCall->getArg(1)->isTypeDependent()) { 5871 QualType LHSType = TheCall->getArg(0)->getType(); 5872 QualType RHSType = TheCall->getArg(1)->getType(); 5873 5874 if (!LHSType->isVectorType() || !RHSType->isVectorType()) 5875 return ExprError( 5876 Diag(TheCall->getBeginLoc(), diag::err_vec_builtin_non_vector) 5877 << TheCall->getDirectCallee() 5878 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 5879 TheCall->getArg(1)->getEndLoc())); 5880 5881 numElements = LHSType->castAs<VectorType>()->getNumElements(); 5882 unsigned numResElements = TheCall->getNumArgs() - 2; 5883 5884 // Check to see if we have a call with 2 vector arguments, the unary shuffle 5885 // with mask. If so, verify that RHS is an integer vector type with the 5886 // same number of elts as lhs. 5887 if (TheCall->getNumArgs() == 2) { 5888 if (!RHSType->hasIntegerRepresentation() || 5889 RHSType->castAs<VectorType>()->getNumElements() != numElements) 5890 return ExprError(Diag(TheCall->getBeginLoc(), 5891 diag::err_vec_builtin_incompatible_vector) 5892 << TheCall->getDirectCallee() 5893 << SourceRange(TheCall->getArg(1)->getBeginLoc(), 5894 TheCall->getArg(1)->getEndLoc())); 5895 } else if (!Context.hasSameUnqualifiedType(LHSType, RHSType)) { 5896 return ExprError(Diag(TheCall->getBeginLoc(), 5897 diag::err_vec_builtin_incompatible_vector) 5898 << TheCall->getDirectCallee() 5899 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 5900 TheCall->getArg(1)->getEndLoc())); 5901 } else if (numElements != numResElements) { 5902 QualType eltType = LHSType->castAs<VectorType>()->getElementType(); 5903 resType = Context.getVectorType(eltType, numResElements, 5904 VectorType::GenericVector); 5905 } 5906 } 5907 5908 for (unsigned i = 2; i < TheCall->getNumArgs(); i++) { 5909 if (TheCall->getArg(i)->isTypeDependent() || 5910 TheCall->getArg(i)->isValueDependent()) 5911 continue; 5912 5913 llvm::APSInt Result(32); 5914 if (!TheCall->getArg(i)->isIntegerConstantExpr(Result, Context)) 5915 return ExprError(Diag(TheCall->getBeginLoc(), 5916 diag::err_shufflevector_nonconstant_argument) 5917 << TheCall->getArg(i)->getSourceRange()); 5918 5919 // Allow -1 which will be translated to undef in the IR. 5920 if (Result.isSigned() && Result.isAllOnesValue()) 5921 continue; 5922 5923 if (Result.getActiveBits() > 64 || Result.getZExtValue() >= numElements*2) 5924 return ExprError(Diag(TheCall->getBeginLoc(), 5925 diag::err_shufflevector_argument_too_large) 5926 << TheCall->getArg(i)->getSourceRange()); 5927 } 5928 5929 SmallVector<Expr*, 32> exprs; 5930 5931 for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; i++) { 5932 exprs.push_back(TheCall->getArg(i)); 5933 TheCall->setArg(i, nullptr); 5934 } 5935 5936 return new (Context) ShuffleVectorExpr(Context, exprs, resType, 5937 TheCall->getCallee()->getBeginLoc(), 5938 TheCall->getRParenLoc()); 5939 } 5940 5941 /// SemaConvertVectorExpr - Handle __builtin_convertvector 5942 ExprResult Sema::SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, 5943 SourceLocation BuiltinLoc, 5944 SourceLocation RParenLoc) { 5945 ExprValueKind VK = VK_RValue; 5946 ExprObjectKind OK = OK_Ordinary; 5947 QualType DstTy = TInfo->getType(); 5948 QualType SrcTy = E->getType(); 5949 5950 if (!SrcTy->isVectorType() && !SrcTy->isDependentType()) 5951 return ExprError(Diag(BuiltinLoc, 5952 diag::err_convertvector_non_vector) 5953 << E->getSourceRange()); 5954 if (!DstTy->isVectorType() && !DstTy->isDependentType()) 5955 return ExprError(Diag(BuiltinLoc, 5956 diag::err_convertvector_non_vector_type)); 5957 5958 if (!SrcTy->isDependentType() && !DstTy->isDependentType()) { 5959 unsigned SrcElts = SrcTy->castAs<VectorType>()->getNumElements(); 5960 unsigned DstElts = DstTy->castAs<VectorType>()->getNumElements(); 5961 if (SrcElts != DstElts) 5962 return ExprError(Diag(BuiltinLoc, 5963 diag::err_convertvector_incompatible_vector) 5964 << E->getSourceRange()); 5965 } 5966 5967 return new (Context) 5968 ConvertVectorExpr(E, TInfo, DstTy, VK, OK, BuiltinLoc, RParenLoc); 5969 } 5970 5971 /// SemaBuiltinPrefetch - Handle __builtin_prefetch. 5972 // This is declared to take (const void*, ...) and can take two 5973 // optional constant int args. 5974 bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) { 5975 unsigned NumArgs = TheCall->getNumArgs(); 5976 5977 if (NumArgs > 3) 5978 return Diag(TheCall->getEndLoc(), 5979 diag::err_typecheck_call_too_many_args_at_most) 5980 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 5981 5982 // Argument 0 is checked for us and the remaining arguments must be 5983 // constant integers. 5984 for (unsigned i = 1; i != NumArgs; ++i) 5985 if (SemaBuiltinConstantArgRange(TheCall, i, 0, i == 1 ? 1 : 3)) 5986 return true; 5987 5988 return false; 5989 } 5990 5991 /// SemaBuiltinAssume - Handle __assume (MS Extension). 5992 // __assume does not evaluate its arguments, and should warn if its argument 5993 // has side effects. 5994 bool Sema::SemaBuiltinAssume(CallExpr *TheCall) { 5995 Expr *Arg = TheCall->getArg(0); 5996 if (Arg->isInstantiationDependent()) return false; 5997 5998 if (Arg->HasSideEffects(Context)) 5999 Diag(Arg->getBeginLoc(), diag::warn_assume_side_effects) 6000 << Arg->getSourceRange() 6001 << cast<FunctionDecl>(TheCall->getCalleeDecl())->getIdentifier(); 6002 6003 return false; 6004 } 6005 6006 /// Handle __builtin_alloca_with_align. This is declared 6007 /// as (size_t, size_t) where the second size_t must be a power of 2 greater 6008 /// than 8. 6009 bool Sema::SemaBuiltinAllocaWithAlign(CallExpr *TheCall) { 6010 // The alignment must be a constant integer. 6011 Expr *Arg = TheCall->getArg(1); 6012 6013 // We can't check the value of a dependent argument. 6014 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 6015 if (const auto *UE = 6016 dyn_cast<UnaryExprOrTypeTraitExpr>(Arg->IgnoreParenImpCasts())) 6017 if (UE->getKind() == UETT_AlignOf || 6018 UE->getKind() == UETT_PreferredAlignOf) 6019 Diag(TheCall->getBeginLoc(), diag::warn_alloca_align_alignof) 6020 << Arg->getSourceRange(); 6021 6022 llvm::APSInt Result = Arg->EvaluateKnownConstInt(Context); 6023 6024 if (!Result.isPowerOf2()) 6025 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 6026 << Arg->getSourceRange(); 6027 6028 if (Result < Context.getCharWidth()) 6029 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_small) 6030 << (unsigned)Context.getCharWidth() << Arg->getSourceRange(); 6031 6032 if (Result > std::numeric_limits<int32_t>::max()) 6033 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_big) 6034 << std::numeric_limits<int32_t>::max() << Arg->getSourceRange(); 6035 } 6036 6037 return false; 6038 } 6039 6040 /// Handle __builtin_assume_aligned. This is declared 6041 /// as (const void*, size_t, ...) and can take one optional constant int arg. 6042 bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) { 6043 unsigned NumArgs = TheCall->getNumArgs(); 6044 6045 if (NumArgs > 3) 6046 return Diag(TheCall->getEndLoc(), 6047 diag::err_typecheck_call_too_many_args_at_most) 6048 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 6049 6050 // The alignment must be a constant integer. 6051 Expr *Arg = TheCall->getArg(1); 6052 6053 // We can't check the value of a dependent argument. 6054 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 6055 llvm::APSInt Result; 6056 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 6057 return true; 6058 6059 if (!Result.isPowerOf2()) 6060 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 6061 << Arg->getSourceRange(); 6062 6063 if (Result > Sema::MaximumAlignment) 6064 Diag(TheCall->getBeginLoc(), diag::warn_assume_aligned_too_great) 6065 << Arg->getSourceRange() << Sema::MaximumAlignment; 6066 } 6067 6068 if (NumArgs > 2) { 6069 ExprResult Arg(TheCall->getArg(2)); 6070 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 6071 Context.getSizeType(), false); 6072 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 6073 if (Arg.isInvalid()) return true; 6074 TheCall->setArg(2, Arg.get()); 6075 } 6076 6077 return false; 6078 } 6079 6080 bool Sema::SemaBuiltinOSLogFormat(CallExpr *TheCall) { 6081 unsigned BuiltinID = 6082 cast<FunctionDecl>(TheCall->getCalleeDecl())->getBuiltinID(); 6083 bool IsSizeCall = BuiltinID == Builtin::BI__builtin_os_log_format_buffer_size; 6084 6085 unsigned NumArgs = TheCall->getNumArgs(); 6086 unsigned NumRequiredArgs = IsSizeCall ? 1 : 2; 6087 if (NumArgs < NumRequiredArgs) { 6088 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args) 6089 << 0 /* function call */ << NumRequiredArgs << NumArgs 6090 << TheCall->getSourceRange(); 6091 } 6092 if (NumArgs >= NumRequiredArgs + 0x100) { 6093 return Diag(TheCall->getEndLoc(), 6094 diag::err_typecheck_call_too_many_args_at_most) 6095 << 0 /* function call */ << (NumRequiredArgs + 0xff) << NumArgs 6096 << TheCall->getSourceRange(); 6097 } 6098 unsigned i = 0; 6099 6100 // For formatting call, check buffer arg. 6101 if (!IsSizeCall) { 6102 ExprResult Arg(TheCall->getArg(i)); 6103 InitializedEntity Entity = InitializedEntity::InitializeParameter( 6104 Context, Context.VoidPtrTy, false); 6105 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 6106 if (Arg.isInvalid()) 6107 return true; 6108 TheCall->setArg(i, Arg.get()); 6109 i++; 6110 } 6111 6112 // Check string literal arg. 6113 unsigned FormatIdx = i; 6114 { 6115 ExprResult Arg = CheckOSLogFormatStringArg(TheCall->getArg(i)); 6116 if (Arg.isInvalid()) 6117 return true; 6118 TheCall->setArg(i, Arg.get()); 6119 i++; 6120 } 6121 6122 // Make sure variadic args are scalar. 6123 unsigned FirstDataArg = i; 6124 while (i < NumArgs) { 6125 ExprResult Arg = DefaultVariadicArgumentPromotion( 6126 TheCall->getArg(i), VariadicFunction, nullptr); 6127 if (Arg.isInvalid()) 6128 return true; 6129 CharUnits ArgSize = Context.getTypeSizeInChars(Arg.get()->getType()); 6130 if (ArgSize.getQuantity() >= 0x100) { 6131 return Diag(Arg.get()->getEndLoc(), diag::err_os_log_argument_too_big) 6132 << i << (int)ArgSize.getQuantity() << 0xff 6133 << TheCall->getSourceRange(); 6134 } 6135 TheCall->setArg(i, Arg.get()); 6136 i++; 6137 } 6138 6139 // Check formatting specifiers. NOTE: We're only doing this for the non-size 6140 // call to avoid duplicate diagnostics. 6141 if (!IsSizeCall) { 6142 llvm::SmallBitVector CheckedVarArgs(NumArgs, false); 6143 ArrayRef<const Expr *> Args(TheCall->getArgs(), TheCall->getNumArgs()); 6144 bool Success = CheckFormatArguments( 6145 Args, /*HasVAListArg*/ false, FormatIdx, FirstDataArg, FST_OSLog, 6146 VariadicFunction, TheCall->getBeginLoc(), SourceRange(), 6147 CheckedVarArgs); 6148 if (!Success) 6149 return true; 6150 } 6151 6152 if (IsSizeCall) { 6153 TheCall->setType(Context.getSizeType()); 6154 } else { 6155 TheCall->setType(Context.VoidPtrTy); 6156 } 6157 return false; 6158 } 6159 6160 /// SemaBuiltinConstantArg - Handle a check if argument ArgNum of CallExpr 6161 /// TheCall is a constant expression. 6162 bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, 6163 llvm::APSInt &Result) { 6164 Expr *Arg = TheCall->getArg(ArgNum); 6165 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 6166 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 6167 6168 if (Arg->isTypeDependent() || Arg->isValueDependent()) return false; 6169 6170 if (!Arg->isIntegerConstantExpr(Result, Context)) 6171 return Diag(TheCall->getBeginLoc(), diag::err_constant_integer_arg_type) 6172 << FDecl->getDeclName() << Arg->getSourceRange(); 6173 6174 return false; 6175 } 6176 6177 /// SemaBuiltinConstantArgRange - Handle a check if argument ArgNum of CallExpr 6178 /// TheCall is a constant expression in the range [Low, High]. 6179 bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, 6180 int Low, int High, bool RangeIsError) { 6181 if (isConstantEvaluated()) 6182 return false; 6183 llvm::APSInt Result; 6184 6185 // We can't check the value of a dependent argument. 6186 Expr *Arg = TheCall->getArg(ArgNum); 6187 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6188 return false; 6189 6190 // Check constant-ness first. 6191 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 6192 return true; 6193 6194 if (Result.getSExtValue() < Low || Result.getSExtValue() > High) { 6195 if (RangeIsError) 6196 return Diag(TheCall->getBeginLoc(), diag::err_argument_invalid_range) 6197 << Result.toString(10) << Low << High << Arg->getSourceRange(); 6198 else 6199 // Defer the warning until we know if the code will be emitted so that 6200 // dead code can ignore this. 6201 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 6202 PDiag(diag::warn_argument_invalid_range) 6203 << Result.toString(10) << Low << High 6204 << Arg->getSourceRange()); 6205 } 6206 6207 return false; 6208 } 6209 6210 /// SemaBuiltinConstantArgMultiple - Handle a check if argument ArgNum of CallExpr 6211 /// TheCall is a constant expression is a multiple of Num.. 6212 bool Sema::SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, 6213 unsigned Num) { 6214 llvm::APSInt Result; 6215 6216 // We can't check the value of a dependent argument. 6217 Expr *Arg = TheCall->getArg(ArgNum); 6218 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6219 return false; 6220 6221 // Check constant-ness first. 6222 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 6223 return true; 6224 6225 if (Result.getSExtValue() % Num != 0) 6226 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_multiple) 6227 << Num << Arg->getSourceRange(); 6228 6229 return false; 6230 } 6231 6232 /// SemaBuiltinConstantArgPower2 - Check if argument ArgNum of TheCall is a 6233 /// constant expression representing a power of 2. 6234 bool Sema::SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum) { 6235 llvm::APSInt Result; 6236 6237 // We can't check the value of a dependent argument. 6238 Expr *Arg = TheCall->getArg(ArgNum); 6239 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6240 return false; 6241 6242 // Check constant-ness first. 6243 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 6244 return true; 6245 6246 // Bit-twiddling to test for a power of 2: for x > 0, x & (x-1) is zero if 6247 // and only if x is a power of 2. 6248 if (Result.isStrictlyPositive() && (Result & (Result - 1)) == 0) 6249 return false; 6250 6251 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_power_of_2) 6252 << Arg->getSourceRange(); 6253 } 6254 6255 static bool IsShiftedByte(llvm::APSInt Value) { 6256 if (Value.isNegative()) 6257 return false; 6258 6259 // Check if it's a shifted byte, by shifting it down 6260 while (true) { 6261 // If the value fits in the bottom byte, the check passes. 6262 if (Value < 0x100) 6263 return true; 6264 6265 // Otherwise, if the value has _any_ bits in the bottom byte, the check 6266 // fails. 6267 if ((Value & 0xFF) != 0) 6268 return false; 6269 6270 // If the bottom 8 bits are all 0, but something above that is nonzero, 6271 // then shifting the value right by 8 bits won't affect whether it's a 6272 // shifted byte or not. So do that, and go round again. 6273 Value >>= 8; 6274 } 6275 } 6276 6277 /// SemaBuiltinConstantArgShiftedByte - Check if argument ArgNum of TheCall is 6278 /// a constant expression representing an arbitrary byte value shifted left by 6279 /// a multiple of 8 bits. 6280 bool Sema::SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum, 6281 unsigned ArgBits) { 6282 llvm::APSInt Result; 6283 6284 // We can't check the value of a dependent argument. 6285 Expr *Arg = TheCall->getArg(ArgNum); 6286 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6287 return false; 6288 6289 // Check constant-ness first. 6290 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 6291 return true; 6292 6293 // Truncate to the given size. 6294 Result = Result.getLoBits(ArgBits); 6295 Result.setIsUnsigned(true); 6296 6297 if (IsShiftedByte(Result)) 6298 return false; 6299 6300 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_shifted_byte) 6301 << Arg->getSourceRange(); 6302 } 6303 6304 /// SemaBuiltinConstantArgShiftedByteOr0xFF - Check if argument ArgNum of 6305 /// TheCall is a constant expression representing either a shifted byte value, 6306 /// or a value of the form 0x??FF (i.e. a member of the arithmetic progression 6307 /// 0x00FF, 0x01FF, ..., 0xFFFF). This strange range check is needed for some 6308 /// Arm MVE intrinsics. 6309 bool Sema::SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, 6310 int ArgNum, 6311 unsigned ArgBits) { 6312 llvm::APSInt Result; 6313 6314 // We can't check the value of a dependent argument. 6315 Expr *Arg = TheCall->getArg(ArgNum); 6316 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6317 return false; 6318 6319 // Check constant-ness first. 6320 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 6321 return true; 6322 6323 // Truncate to the given size. 6324 Result = Result.getLoBits(ArgBits); 6325 Result.setIsUnsigned(true); 6326 6327 // Check to see if it's in either of the required forms. 6328 if (IsShiftedByte(Result) || 6329 (Result > 0 && Result < 0x10000 && (Result & 0xFF) == 0xFF)) 6330 return false; 6331 6332 return Diag(TheCall->getBeginLoc(), 6333 diag::err_argument_not_shifted_byte_or_xxff) 6334 << Arg->getSourceRange(); 6335 } 6336 6337 /// SemaBuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions 6338 bool Sema::SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall) { 6339 if (BuiltinID == AArch64::BI__builtin_arm_irg) { 6340 if (checkArgCount(*this, TheCall, 2)) 6341 return true; 6342 Expr *Arg0 = TheCall->getArg(0); 6343 Expr *Arg1 = TheCall->getArg(1); 6344 6345 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 6346 if (FirstArg.isInvalid()) 6347 return true; 6348 QualType FirstArgType = FirstArg.get()->getType(); 6349 if (!FirstArgType->isAnyPointerType()) 6350 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 6351 << "first" << FirstArgType << Arg0->getSourceRange(); 6352 TheCall->setArg(0, FirstArg.get()); 6353 6354 ExprResult SecArg = DefaultLvalueConversion(Arg1); 6355 if (SecArg.isInvalid()) 6356 return true; 6357 QualType SecArgType = SecArg.get()->getType(); 6358 if (!SecArgType->isIntegerType()) 6359 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 6360 << "second" << SecArgType << Arg1->getSourceRange(); 6361 6362 // Derive the return type from the pointer argument. 6363 TheCall->setType(FirstArgType); 6364 return false; 6365 } 6366 6367 if (BuiltinID == AArch64::BI__builtin_arm_addg) { 6368 if (checkArgCount(*this, TheCall, 2)) 6369 return true; 6370 6371 Expr *Arg0 = TheCall->getArg(0); 6372 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 6373 if (FirstArg.isInvalid()) 6374 return true; 6375 QualType FirstArgType = FirstArg.get()->getType(); 6376 if (!FirstArgType->isAnyPointerType()) 6377 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 6378 << "first" << FirstArgType << Arg0->getSourceRange(); 6379 TheCall->setArg(0, FirstArg.get()); 6380 6381 // Derive the return type from the pointer argument. 6382 TheCall->setType(FirstArgType); 6383 6384 // Second arg must be an constant in range [0,15] 6385 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 6386 } 6387 6388 if (BuiltinID == AArch64::BI__builtin_arm_gmi) { 6389 if (checkArgCount(*this, TheCall, 2)) 6390 return true; 6391 Expr *Arg0 = TheCall->getArg(0); 6392 Expr *Arg1 = TheCall->getArg(1); 6393 6394 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 6395 if (FirstArg.isInvalid()) 6396 return true; 6397 QualType FirstArgType = FirstArg.get()->getType(); 6398 if (!FirstArgType->isAnyPointerType()) 6399 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 6400 << "first" << FirstArgType << Arg0->getSourceRange(); 6401 6402 QualType SecArgType = Arg1->getType(); 6403 if (!SecArgType->isIntegerType()) 6404 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 6405 << "second" << SecArgType << Arg1->getSourceRange(); 6406 TheCall->setType(Context.IntTy); 6407 return false; 6408 } 6409 6410 if (BuiltinID == AArch64::BI__builtin_arm_ldg || 6411 BuiltinID == AArch64::BI__builtin_arm_stg) { 6412 if (checkArgCount(*this, TheCall, 1)) 6413 return true; 6414 Expr *Arg0 = TheCall->getArg(0); 6415 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 6416 if (FirstArg.isInvalid()) 6417 return true; 6418 6419 QualType FirstArgType = FirstArg.get()->getType(); 6420 if (!FirstArgType->isAnyPointerType()) 6421 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 6422 << "first" << FirstArgType << Arg0->getSourceRange(); 6423 TheCall->setArg(0, FirstArg.get()); 6424 6425 // Derive the return type from the pointer argument. 6426 if (BuiltinID == AArch64::BI__builtin_arm_ldg) 6427 TheCall->setType(FirstArgType); 6428 return false; 6429 } 6430 6431 if (BuiltinID == AArch64::BI__builtin_arm_subp) { 6432 Expr *ArgA = TheCall->getArg(0); 6433 Expr *ArgB = TheCall->getArg(1); 6434 6435 ExprResult ArgExprA = DefaultFunctionArrayLvalueConversion(ArgA); 6436 ExprResult ArgExprB = DefaultFunctionArrayLvalueConversion(ArgB); 6437 6438 if (ArgExprA.isInvalid() || ArgExprB.isInvalid()) 6439 return true; 6440 6441 QualType ArgTypeA = ArgExprA.get()->getType(); 6442 QualType ArgTypeB = ArgExprB.get()->getType(); 6443 6444 auto isNull = [&] (Expr *E) -> bool { 6445 return E->isNullPointerConstant( 6446 Context, Expr::NPC_ValueDependentIsNotNull); }; 6447 6448 // argument should be either a pointer or null 6449 if (!ArgTypeA->isAnyPointerType() && !isNull(ArgA)) 6450 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 6451 << "first" << ArgTypeA << ArgA->getSourceRange(); 6452 6453 if (!ArgTypeB->isAnyPointerType() && !isNull(ArgB)) 6454 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 6455 << "second" << ArgTypeB << ArgB->getSourceRange(); 6456 6457 // Ensure Pointee types are compatible 6458 if (ArgTypeA->isAnyPointerType() && !isNull(ArgA) && 6459 ArgTypeB->isAnyPointerType() && !isNull(ArgB)) { 6460 QualType pointeeA = ArgTypeA->getPointeeType(); 6461 QualType pointeeB = ArgTypeB->getPointeeType(); 6462 if (!Context.typesAreCompatible( 6463 Context.getCanonicalType(pointeeA).getUnqualifiedType(), 6464 Context.getCanonicalType(pointeeB).getUnqualifiedType())) { 6465 return Diag(TheCall->getBeginLoc(), diag::err_typecheck_sub_ptr_compatible) 6466 << ArgTypeA << ArgTypeB << ArgA->getSourceRange() 6467 << ArgB->getSourceRange(); 6468 } 6469 } 6470 6471 // at least one argument should be pointer type 6472 if (!ArgTypeA->isAnyPointerType() && !ArgTypeB->isAnyPointerType()) 6473 return Diag(TheCall->getBeginLoc(), diag::err_memtag_any2arg_pointer) 6474 << ArgTypeA << ArgTypeB << ArgA->getSourceRange(); 6475 6476 if (isNull(ArgA)) // adopt type of the other pointer 6477 ArgExprA = ImpCastExprToType(ArgExprA.get(), ArgTypeB, CK_NullToPointer); 6478 6479 if (isNull(ArgB)) 6480 ArgExprB = ImpCastExprToType(ArgExprB.get(), ArgTypeA, CK_NullToPointer); 6481 6482 TheCall->setArg(0, ArgExprA.get()); 6483 TheCall->setArg(1, ArgExprB.get()); 6484 TheCall->setType(Context.LongLongTy); 6485 return false; 6486 } 6487 assert(false && "Unhandled ARM MTE intrinsic"); 6488 return true; 6489 } 6490 6491 /// SemaBuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr 6492 /// TheCall is an ARM/AArch64 special register string literal. 6493 bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, 6494 int ArgNum, unsigned ExpectedFieldNum, 6495 bool AllowName) { 6496 bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 || 6497 BuiltinID == ARM::BI__builtin_arm_wsr64 || 6498 BuiltinID == ARM::BI__builtin_arm_rsr || 6499 BuiltinID == ARM::BI__builtin_arm_rsrp || 6500 BuiltinID == ARM::BI__builtin_arm_wsr || 6501 BuiltinID == ARM::BI__builtin_arm_wsrp; 6502 bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 || 6503 BuiltinID == AArch64::BI__builtin_arm_wsr64 || 6504 BuiltinID == AArch64::BI__builtin_arm_rsr || 6505 BuiltinID == AArch64::BI__builtin_arm_rsrp || 6506 BuiltinID == AArch64::BI__builtin_arm_wsr || 6507 BuiltinID == AArch64::BI__builtin_arm_wsrp; 6508 assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin."); 6509 6510 // We can't check the value of a dependent argument. 6511 Expr *Arg = TheCall->getArg(ArgNum); 6512 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6513 return false; 6514 6515 // Check if the argument is a string literal. 6516 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 6517 return Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 6518 << Arg->getSourceRange(); 6519 6520 // Check the type of special register given. 6521 StringRef Reg = cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 6522 SmallVector<StringRef, 6> Fields; 6523 Reg.split(Fields, ":"); 6524 6525 if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1)) 6526 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 6527 << Arg->getSourceRange(); 6528 6529 // If the string is the name of a register then we cannot check that it is 6530 // valid here but if the string is of one the forms described in ACLE then we 6531 // can check that the supplied fields are integers and within the valid 6532 // ranges. 6533 if (Fields.size() > 1) { 6534 bool FiveFields = Fields.size() == 5; 6535 6536 bool ValidString = true; 6537 if (IsARMBuiltin) { 6538 ValidString &= Fields[0].startswith_lower("cp") || 6539 Fields[0].startswith_lower("p"); 6540 if (ValidString) 6541 Fields[0] = 6542 Fields[0].drop_front(Fields[0].startswith_lower("cp") ? 2 : 1); 6543 6544 ValidString &= Fields[2].startswith_lower("c"); 6545 if (ValidString) 6546 Fields[2] = Fields[2].drop_front(1); 6547 6548 if (FiveFields) { 6549 ValidString &= Fields[3].startswith_lower("c"); 6550 if (ValidString) 6551 Fields[3] = Fields[3].drop_front(1); 6552 } 6553 } 6554 6555 SmallVector<int, 5> Ranges; 6556 if (FiveFields) 6557 Ranges.append({IsAArch64Builtin ? 1 : 15, 7, 15, 15, 7}); 6558 else 6559 Ranges.append({15, 7, 15}); 6560 6561 for (unsigned i=0; i<Fields.size(); ++i) { 6562 int IntField; 6563 ValidString &= !Fields[i].getAsInteger(10, IntField); 6564 ValidString &= (IntField >= 0 && IntField <= Ranges[i]); 6565 } 6566 6567 if (!ValidString) 6568 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 6569 << Arg->getSourceRange(); 6570 } else if (IsAArch64Builtin && Fields.size() == 1) { 6571 // If the register name is one of those that appear in the condition below 6572 // and the special register builtin being used is one of the write builtins, 6573 // then we require that the argument provided for writing to the register 6574 // is an integer constant expression. This is because it will be lowered to 6575 // an MSR (immediate) instruction, so we need to know the immediate at 6576 // compile time. 6577 if (TheCall->getNumArgs() != 2) 6578 return false; 6579 6580 std::string RegLower = Reg.lower(); 6581 if (RegLower != "spsel" && RegLower != "daifset" && RegLower != "daifclr" && 6582 RegLower != "pan" && RegLower != "uao") 6583 return false; 6584 6585 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 6586 } 6587 6588 return false; 6589 } 6590 6591 /// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val). 6592 /// This checks that the target supports __builtin_longjmp and 6593 /// that val is a constant 1. 6594 bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) { 6595 if (!Context.getTargetInfo().hasSjLjLowering()) 6596 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_unsupported) 6597 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 6598 6599 Expr *Arg = TheCall->getArg(1); 6600 llvm::APSInt Result; 6601 6602 // TODO: This is less than ideal. Overload this to take a value. 6603 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 6604 return true; 6605 6606 if (Result != 1) 6607 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_invalid_val) 6608 << SourceRange(Arg->getBeginLoc(), Arg->getEndLoc()); 6609 6610 return false; 6611 } 6612 6613 /// SemaBuiltinSetjmp - Handle __builtin_setjmp(void *env[5]). 6614 /// This checks that the target supports __builtin_setjmp. 6615 bool Sema::SemaBuiltinSetjmp(CallExpr *TheCall) { 6616 if (!Context.getTargetInfo().hasSjLjLowering()) 6617 return Diag(TheCall->getBeginLoc(), diag::err_builtin_setjmp_unsupported) 6618 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 6619 return false; 6620 } 6621 6622 namespace { 6623 6624 class UncoveredArgHandler { 6625 enum { Unknown = -1, AllCovered = -2 }; 6626 6627 signed FirstUncoveredArg = Unknown; 6628 SmallVector<const Expr *, 4> DiagnosticExprs; 6629 6630 public: 6631 UncoveredArgHandler() = default; 6632 6633 bool hasUncoveredArg() const { 6634 return (FirstUncoveredArg >= 0); 6635 } 6636 6637 unsigned getUncoveredArg() const { 6638 assert(hasUncoveredArg() && "no uncovered argument"); 6639 return FirstUncoveredArg; 6640 } 6641 6642 void setAllCovered() { 6643 // A string has been found with all arguments covered, so clear out 6644 // the diagnostics. 6645 DiagnosticExprs.clear(); 6646 FirstUncoveredArg = AllCovered; 6647 } 6648 6649 void Update(signed NewFirstUncoveredArg, const Expr *StrExpr) { 6650 assert(NewFirstUncoveredArg >= 0 && "Outside range"); 6651 6652 // Don't update if a previous string covers all arguments. 6653 if (FirstUncoveredArg == AllCovered) 6654 return; 6655 6656 // UncoveredArgHandler tracks the highest uncovered argument index 6657 // and with it all the strings that match this index. 6658 if (NewFirstUncoveredArg == FirstUncoveredArg) 6659 DiagnosticExprs.push_back(StrExpr); 6660 else if (NewFirstUncoveredArg > FirstUncoveredArg) { 6661 DiagnosticExprs.clear(); 6662 DiagnosticExprs.push_back(StrExpr); 6663 FirstUncoveredArg = NewFirstUncoveredArg; 6664 } 6665 } 6666 6667 void Diagnose(Sema &S, bool IsFunctionCall, const Expr *ArgExpr); 6668 }; 6669 6670 enum StringLiteralCheckType { 6671 SLCT_NotALiteral, 6672 SLCT_UncheckedLiteral, 6673 SLCT_CheckedLiteral 6674 }; 6675 6676 } // namespace 6677 6678 static void sumOffsets(llvm::APSInt &Offset, llvm::APSInt Addend, 6679 BinaryOperatorKind BinOpKind, 6680 bool AddendIsRight) { 6681 unsigned BitWidth = Offset.getBitWidth(); 6682 unsigned AddendBitWidth = Addend.getBitWidth(); 6683 // There might be negative interim results. 6684 if (Addend.isUnsigned()) { 6685 Addend = Addend.zext(++AddendBitWidth); 6686 Addend.setIsSigned(true); 6687 } 6688 // Adjust the bit width of the APSInts. 6689 if (AddendBitWidth > BitWidth) { 6690 Offset = Offset.sext(AddendBitWidth); 6691 BitWidth = AddendBitWidth; 6692 } else if (BitWidth > AddendBitWidth) { 6693 Addend = Addend.sext(BitWidth); 6694 } 6695 6696 bool Ov = false; 6697 llvm::APSInt ResOffset = Offset; 6698 if (BinOpKind == BO_Add) 6699 ResOffset = Offset.sadd_ov(Addend, Ov); 6700 else { 6701 assert(AddendIsRight && BinOpKind == BO_Sub && 6702 "operator must be add or sub with addend on the right"); 6703 ResOffset = Offset.ssub_ov(Addend, Ov); 6704 } 6705 6706 // We add an offset to a pointer here so we should support an offset as big as 6707 // possible. 6708 if (Ov) { 6709 assert(BitWidth <= std::numeric_limits<unsigned>::max() / 2 && 6710 "index (intermediate) result too big"); 6711 Offset = Offset.sext(2 * BitWidth); 6712 sumOffsets(Offset, Addend, BinOpKind, AddendIsRight); 6713 return; 6714 } 6715 6716 Offset = ResOffset; 6717 } 6718 6719 namespace { 6720 6721 // This is a wrapper class around StringLiteral to support offsetted string 6722 // literals as format strings. It takes the offset into account when returning 6723 // the string and its length or the source locations to display notes correctly. 6724 class FormatStringLiteral { 6725 const StringLiteral *FExpr; 6726 int64_t Offset; 6727 6728 public: 6729 FormatStringLiteral(const StringLiteral *fexpr, int64_t Offset = 0) 6730 : FExpr(fexpr), Offset(Offset) {} 6731 6732 StringRef getString() const { 6733 return FExpr->getString().drop_front(Offset); 6734 } 6735 6736 unsigned getByteLength() const { 6737 return FExpr->getByteLength() - getCharByteWidth() * Offset; 6738 } 6739 6740 unsigned getLength() const { return FExpr->getLength() - Offset; } 6741 unsigned getCharByteWidth() const { return FExpr->getCharByteWidth(); } 6742 6743 StringLiteral::StringKind getKind() const { return FExpr->getKind(); } 6744 6745 QualType getType() const { return FExpr->getType(); } 6746 6747 bool isAscii() const { return FExpr->isAscii(); } 6748 bool isWide() const { return FExpr->isWide(); } 6749 bool isUTF8() const { return FExpr->isUTF8(); } 6750 bool isUTF16() const { return FExpr->isUTF16(); } 6751 bool isUTF32() const { return FExpr->isUTF32(); } 6752 bool isPascal() const { return FExpr->isPascal(); } 6753 6754 SourceLocation getLocationOfByte( 6755 unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, 6756 const TargetInfo &Target, unsigned *StartToken = nullptr, 6757 unsigned *StartTokenByteOffset = nullptr) const { 6758 return FExpr->getLocationOfByte(ByteNo + Offset, SM, Features, Target, 6759 StartToken, StartTokenByteOffset); 6760 } 6761 6762 SourceLocation getBeginLoc() const LLVM_READONLY { 6763 return FExpr->getBeginLoc().getLocWithOffset(Offset); 6764 } 6765 6766 SourceLocation getEndLoc() const LLVM_READONLY { return FExpr->getEndLoc(); } 6767 }; 6768 6769 } // namespace 6770 6771 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr, 6772 const Expr *OrigFormatExpr, 6773 ArrayRef<const Expr *> Args, 6774 bool HasVAListArg, unsigned format_idx, 6775 unsigned firstDataArg, 6776 Sema::FormatStringType Type, 6777 bool inFunctionCall, 6778 Sema::VariadicCallType CallType, 6779 llvm::SmallBitVector &CheckedVarArgs, 6780 UncoveredArgHandler &UncoveredArg, 6781 bool IgnoreStringsWithoutSpecifiers); 6782 6783 // Determine if an expression is a string literal or constant string. 6784 // If this function returns false on the arguments to a function expecting a 6785 // format string, we will usually need to emit a warning. 6786 // True string literals are then checked by CheckFormatString. 6787 static StringLiteralCheckType 6788 checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args, 6789 bool HasVAListArg, unsigned format_idx, 6790 unsigned firstDataArg, Sema::FormatStringType Type, 6791 Sema::VariadicCallType CallType, bool InFunctionCall, 6792 llvm::SmallBitVector &CheckedVarArgs, 6793 UncoveredArgHandler &UncoveredArg, 6794 llvm::APSInt Offset, 6795 bool IgnoreStringsWithoutSpecifiers = false) { 6796 if (S.isConstantEvaluated()) 6797 return SLCT_NotALiteral; 6798 tryAgain: 6799 assert(Offset.isSigned() && "invalid offset"); 6800 6801 if (E->isTypeDependent() || E->isValueDependent()) 6802 return SLCT_NotALiteral; 6803 6804 E = E->IgnoreParenCasts(); 6805 6806 if (E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull)) 6807 // Technically -Wformat-nonliteral does not warn about this case. 6808 // The behavior of printf and friends in this case is implementation 6809 // dependent. Ideally if the format string cannot be null then 6810 // it should have a 'nonnull' attribute in the function prototype. 6811 return SLCT_UncheckedLiteral; 6812 6813 switch (E->getStmtClass()) { 6814 case Stmt::BinaryConditionalOperatorClass: 6815 case Stmt::ConditionalOperatorClass: { 6816 // The expression is a literal if both sub-expressions were, and it was 6817 // completely checked only if both sub-expressions were checked. 6818 const AbstractConditionalOperator *C = 6819 cast<AbstractConditionalOperator>(E); 6820 6821 // Determine whether it is necessary to check both sub-expressions, for 6822 // example, because the condition expression is a constant that can be 6823 // evaluated at compile time. 6824 bool CheckLeft = true, CheckRight = true; 6825 6826 bool Cond; 6827 if (C->getCond()->EvaluateAsBooleanCondition(Cond, S.getASTContext(), 6828 S.isConstantEvaluated())) { 6829 if (Cond) 6830 CheckRight = false; 6831 else 6832 CheckLeft = false; 6833 } 6834 6835 // We need to maintain the offsets for the right and the left hand side 6836 // separately to check if every possible indexed expression is a valid 6837 // string literal. They might have different offsets for different string 6838 // literals in the end. 6839 StringLiteralCheckType Left; 6840 if (!CheckLeft) 6841 Left = SLCT_UncheckedLiteral; 6842 else { 6843 Left = checkFormatStringExpr(S, C->getTrueExpr(), Args, 6844 HasVAListArg, format_idx, firstDataArg, 6845 Type, CallType, InFunctionCall, 6846 CheckedVarArgs, UncoveredArg, Offset, 6847 IgnoreStringsWithoutSpecifiers); 6848 if (Left == SLCT_NotALiteral || !CheckRight) { 6849 return Left; 6850 } 6851 } 6852 6853 StringLiteralCheckType Right = checkFormatStringExpr( 6854 S, C->getFalseExpr(), Args, HasVAListArg, format_idx, firstDataArg, 6855 Type, CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 6856 IgnoreStringsWithoutSpecifiers); 6857 6858 return (CheckLeft && Left < Right) ? Left : Right; 6859 } 6860 6861 case Stmt::ImplicitCastExprClass: 6862 E = cast<ImplicitCastExpr>(E)->getSubExpr(); 6863 goto tryAgain; 6864 6865 case Stmt::OpaqueValueExprClass: 6866 if (const Expr *src = cast<OpaqueValueExpr>(E)->getSourceExpr()) { 6867 E = src; 6868 goto tryAgain; 6869 } 6870 return SLCT_NotALiteral; 6871 6872 case Stmt::PredefinedExprClass: 6873 // While __func__, etc., are technically not string literals, they 6874 // cannot contain format specifiers and thus are not a security 6875 // liability. 6876 return SLCT_UncheckedLiteral; 6877 6878 case Stmt::DeclRefExprClass: { 6879 const DeclRefExpr *DR = cast<DeclRefExpr>(E); 6880 6881 // As an exception, do not flag errors for variables binding to 6882 // const string literals. 6883 if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) { 6884 bool isConstant = false; 6885 QualType T = DR->getType(); 6886 6887 if (const ArrayType *AT = S.Context.getAsArrayType(T)) { 6888 isConstant = AT->getElementType().isConstant(S.Context); 6889 } else if (const PointerType *PT = T->getAs<PointerType>()) { 6890 isConstant = T.isConstant(S.Context) && 6891 PT->getPointeeType().isConstant(S.Context); 6892 } else if (T->isObjCObjectPointerType()) { 6893 // In ObjC, there is usually no "const ObjectPointer" type, 6894 // so don't check if the pointee type is constant. 6895 isConstant = T.isConstant(S.Context); 6896 } 6897 6898 if (isConstant) { 6899 if (const Expr *Init = VD->getAnyInitializer()) { 6900 // Look through initializers like const char c[] = { "foo" } 6901 if (const InitListExpr *InitList = dyn_cast<InitListExpr>(Init)) { 6902 if (InitList->isStringLiteralInit()) 6903 Init = InitList->getInit(0)->IgnoreParenImpCasts(); 6904 } 6905 return checkFormatStringExpr(S, Init, Args, 6906 HasVAListArg, format_idx, 6907 firstDataArg, Type, CallType, 6908 /*InFunctionCall*/ false, CheckedVarArgs, 6909 UncoveredArg, Offset); 6910 } 6911 } 6912 6913 // For vprintf* functions (i.e., HasVAListArg==true), we add a 6914 // special check to see if the format string is a function parameter 6915 // of the function calling the printf function. If the function 6916 // has an attribute indicating it is a printf-like function, then we 6917 // should suppress warnings concerning non-literals being used in a call 6918 // to a vprintf function. For example: 6919 // 6920 // void 6921 // logmessage(char const *fmt __attribute__ (format (printf, 1, 2)), ...){ 6922 // va_list ap; 6923 // va_start(ap, fmt); 6924 // vprintf(fmt, ap); // Do NOT emit a warning about "fmt". 6925 // ... 6926 // } 6927 if (HasVAListArg) { 6928 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(VD)) { 6929 if (const NamedDecl *ND = dyn_cast<NamedDecl>(PV->getDeclContext())) { 6930 int PVIndex = PV->getFunctionScopeIndex() + 1; 6931 for (const auto *PVFormat : ND->specific_attrs<FormatAttr>()) { 6932 // adjust for implicit parameter 6933 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(ND)) 6934 if (MD->isInstance()) 6935 ++PVIndex; 6936 // We also check if the formats are compatible. 6937 // We can't pass a 'scanf' string to a 'printf' function. 6938 if (PVIndex == PVFormat->getFormatIdx() && 6939 Type == S.GetFormatStringType(PVFormat)) 6940 return SLCT_UncheckedLiteral; 6941 } 6942 } 6943 } 6944 } 6945 } 6946 6947 return SLCT_NotALiteral; 6948 } 6949 6950 case Stmt::CallExprClass: 6951 case Stmt::CXXMemberCallExprClass: { 6952 const CallExpr *CE = cast<CallExpr>(E); 6953 if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(CE->getCalleeDecl())) { 6954 bool IsFirst = true; 6955 StringLiteralCheckType CommonResult; 6956 for (const auto *FA : ND->specific_attrs<FormatArgAttr>()) { 6957 const Expr *Arg = CE->getArg(FA->getFormatIdx().getASTIndex()); 6958 StringLiteralCheckType Result = checkFormatStringExpr( 6959 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type, 6960 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 6961 IgnoreStringsWithoutSpecifiers); 6962 if (IsFirst) { 6963 CommonResult = Result; 6964 IsFirst = false; 6965 } 6966 } 6967 if (!IsFirst) 6968 return CommonResult; 6969 6970 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) { 6971 unsigned BuiltinID = FD->getBuiltinID(); 6972 if (BuiltinID == Builtin::BI__builtin___CFStringMakeConstantString || 6973 BuiltinID == Builtin::BI__builtin___NSStringMakeConstantString) { 6974 const Expr *Arg = CE->getArg(0); 6975 return checkFormatStringExpr(S, Arg, Args, 6976 HasVAListArg, format_idx, 6977 firstDataArg, Type, CallType, 6978 InFunctionCall, CheckedVarArgs, 6979 UncoveredArg, Offset, 6980 IgnoreStringsWithoutSpecifiers); 6981 } 6982 } 6983 } 6984 6985 return SLCT_NotALiteral; 6986 } 6987 case Stmt::ObjCMessageExprClass: { 6988 const auto *ME = cast<ObjCMessageExpr>(E); 6989 if (const auto *MD = ME->getMethodDecl()) { 6990 if (const auto *FA = MD->getAttr<FormatArgAttr>()) { 6991 // As a special case heuristic, if we're using the method -[NSBundle 6992 // localizedStringForKey:value:table:], ignore any key strings that lack 6993 // format specifiers. The idea is that if the key doesn't have any 6994 // format specifiers then its probably just a key to map to the 6995 // localized strings. If it does have format specifiers though, then its 6996 // likely that the text of the key is the format string in the 6997 // programmer's language, and should be checked. 6998 const ObjCInterfaceDecl *IFace; 6999 if (MD->isInstanceMethod() && (IFace = MD->getClassInterface()) && 7000 IFace->getIdentifier()->isStr("NSBundle") && 7001 MD->getSelector().isKeywordSelector( 7002 {"localizedStringForKey", "value", "table"})) { 7003 IgnoreStringsWithoutSpecifiers = true; 7004 } 7005 7006 const Expr *Arg = ME->getArg(FA->getFormatIdx().getASTIndex()); 7007 return checkFormatStringExpr( 7008 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type, 7009 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 7010 IgnoreStringsWithoutSpecifiers); 7011 } 7012 } 7013 7014 return SLCT_NotALiteral; 7015 } 7016 case Stmt::ObjCStringLiteralClass: 7017 case Stmt::StringLiteralClass: { 7018 const StringLiteral *StrE = nullptr; 7019 7020 if (const ObjCStringLiteral *ObjCFExpr = dyn_cast<ObjCStringLiteral>(E)) 7021 StrE = ObjCFExpr->getString(); 7022 else 7023 StrE = cast<StringLiteral>(E); 7024 7025 if (StrE) { 7026 if (Offset.isNegative() || Offset > StrE->getLength()) { 7027 // TODO: It would be better to have an explicit warning for out of 7028 // bounds literals. 7029 return SLCT_NotALiteral; 7030 } 7031 FormatStringLiteral FStr(StrE, Offset.sextOrTrunc(64).getSExtValue()); 7032 CheckFormatString(S, &FStr, E, Args, HasVAListArg, format_idx, 7033 firstDataArg, Type, InFunctionCall, CallType, 7034 CheckedVarArgs, UncoveredArg, 7035 IgnoreStringsWithoutSpecifiers); 7036 return SLCT_CheckedLiteral; 7037 } 7038 7039 return SLCT_NotALiteral; 7040 } 7041 case Stmt::BinaryOperatorClass: { 7042 const BinaryOperator *BinOp = cast<BinaryOperator>(E); 7043 7044 // A string literal + an int offset is still a string literal. 7045 if (BinOp->isAdditiveOp()) { 7046 Expr::EvalResult LResult, RResult; 7047 7048 bool LIsInt = BinOp->getLHS()->EvaluateAsInt( 7049 LResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 7050 bool RIsInt = BinOp->getRHS()->EvaluateAsInt( 7051 RResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 7052 7053 if (LIsInt != RIsInt) { 7054 BinaryOperatorKind BinOpKind = BinOp->getOpcode(); 7055 7056 if (LIsInt) { 7057 if (BinOpKind == BO_Add) { 7058 sumOffsets(Offset, LResult.Val.getInt(), BinOpKind, RIsInt); 7059 E = BinOp->getRHS(); 7060 goto tryAgain; 7061 } 7062 } else { 7063 sumOffsets(Offset, RResult.Val.getInt(), BinOpKind, RIsInt); 7064 E = BinOp->getLHS(); 7065 goto tryAgain; 7066 } 7067 } 7068 } 7069 7070 return SLCT_NotALiteral; 7071 } 7072 case Stmt::UnaryOperatorClass: { 7073 const UnaryOperator *UnaOp = cast<UnaryOperator>(E); 7074 auto ASE = dyn_cast<ArraySubscriptExpr>(UnaOp->getSubExpr()); 7075 if (UnaOp->getOpcode() == UO_AddrOf && ASE) { 7076 Expr::EvalResult IndexResult; 7077 if (ASE->getRHS()->EvaluateAsInt(IndexResult, S.Context, 7078 Expr::SE_NoSideEffects, 7079 S.isConstantEvaluated())) { 7080 sumOffsets(Offset, IndexResult.Val.getInt(), BO_Add, 7081 /*RHS is int*/ true); 7082 E = ASE->getBase(); 7083 goto tryAgain; 7084 } 7085 } 7086 7087 return SLCT_NotALiteral; 7088 } 7089 7090 default: 7091 return SLCT_NotALiteral; 7092 } 7093 } 7094 7095 Sema::FormatStringType Sema::GetFormatStringType(const FormatAttr *Format) { 7096 return llvm::StringSwitch<FormatStringType>(Format->getType()->getName()) 7097 .Case("scanf", FST_Scanf) 7098 .Cases("printf", "printf0", "syslog", FST_Printf) 7099 .Cases("NSString", "CFString", FST_NSString) 7100 .Case("strftime", FST_Strftime) 7101 .Case("strfmon", FST_Strfmon) 7102 .Cases("kprintf", "cmn_err", "vcmn_err", "zcmn_err", FST_Kprintf) 7103 .Case("freebsd_kprintf", FST_FreeBSDKPrintf) 7104 .Case("os_trace", FST_OSLog) 7105 .Case("os_log", FST_OSLog) 7106 .Default(FST_Unknown); 7107 } 7108 7109 /// CheckFormatArguments - Check calls to printf and scanf (and similar 7110 /// functions) for correct use of format strings. 7111 /// Returns true if a format string has been fully checked. 7112 bool Sema::CheckFormatArguments(const FormatAttr *Format, 7113 ArrayRef<const Expr *> Args, 7114 bool IsCXXMember, 7115 VariadicCallType CallType, 7116 SourceLocation Loc, SourceRange Range, 7117 llvm::SmallBitVector &CheckedVarArgs) { 7118 FormatStringInfo FSI; 7119 if (getFormatStringInfo(Format, IsCXXMember, &FSI)) 7120 return CheckFormatArguments(Args, FSI.HasVAListArg, FSI.FormatIdx, 7121 FSI.FirstDataArg, GetFormatStringType(Format), 7122 CallType, Loc, Range, CheckedVarArgs); 7123 return false; 7124 } 7125 7126 bool Sema::CheckFormatArguments(ArrayRef<const Expr *> Args, 7127 bool HasVAListArg, unsigned format_idx, 7128 unsigned firstDataArg, FormatStringType Type, 7129 VariadicCallType CallType, 7130 SourceLocation Loc, SourceRange Range, 7131 llvm::SmallBitVector &CheckedVarArgs) { 7132 // CHECK: printf/scanf-like function is called with no format string. 7133 if (format_idx >= Args.size()) { 7134 Diag(Loc, diag::warn_missing_format_string) << Range; 7135 return false; 7136 } 7137 7138 const Expr *OrigFormatExpr = Args[format_idx]->IgnoreParenCasts(); 7139 7140 // CHECK: format string is not a string literal. 7141 // 7142 // Dynamically generated format strings are difficult to 7143 // automatically vet at compile time. Requiring that format strings 7144 // are string literals: (1) permits the checking of format strings by 7145 // the compiler and thereby (2) can practically remove the source of 7146 // many format string exploits. 7147 7148 // Format string can be either ObjC string (e.g. @"%d") or 7149 // C string (e.g. "%d") 7150 // ObjC string uses the same format specifiers as C string, so we can use 7151 // the same format string checking logic for both ObjC and C strings. 7152 UncoveredArgHandler UncoveredArg; 7153 StringLiteralCheckType CT = 7154 checkFormatStringExpr(*this, OrigFormatExpr, Args, HasVAListArg, 7155 format_idx, firstDataArg, Type, CallType, 7156 /*IsFunctionCall*/ true, CheckedVarArgs, 7157 UncoveredArg, 7158 /*no string offset*/ llvm::APSInt(64, false) = 0); 7159 7160 // Generate a diagnostic where an uncovered argument is detected. 7161 if (UncoveredArg.hasUncoveredArg()) { 7162 unsigned ArgIdx = UncoveredArg.getUncoveredArg() + firstDataArg; 7163 assert(ArgIdx < Args.size() && "ArgIdx outside bounds"); 7164 UncoveredArg.Diagnose(*this, /*IsFunctionCall*/true, Args[ArgIdx]); 7165 } 7166 7167 if (CT != SLCT_NotALiteral) 7168 // Literal format string found, check done! 7169 return CT == SLCT_CheckedLiteral; 7170 7171 // Strftime is particular as it always uses a single 'time' argument, 7172 // so it is safe to pass a non-literal string. 7173 if (Type == FST_Strftime) 7174 return false; 7175 7176 // Do not emit diag when the string param is a macro expansion and the 7177 // format is either NSString or CFString. This is a hack to prevent 7178 // diag when using the NSLocalizedString and CFCopyLocalizedString macros 7179 // which are usually used in place of NS and CF string literals. 7180 SourceLocation FormatLoc = Args[format_idx]->getBeginLoc(); 7181 if (Type == FST_NSString && SourceMgr.isInSystemMacro(FormatLoc)) 7182 return false; 7183 7184 // If there are no arguments specified, warn with -Wformat-security, otherwise 7185 // warn only with -Wformat-nonliteral. 7186 if (Args.size() == firstDataArg) { 7187 Diag(FormatLoc, diag::warn_format_nonliteral_noargs) 7188 << OrigFormatExpr->getSourceRange(); 7189 switch (Type) { 7190 default: 7191 break; 7192 case FST_Kprintf: 7193 case FST_FreeBSDKPrintf: 7194 case FST_Printf: 7195 case FST_Syslog: 7196 Diag(FormatLoc, diag::note_format_security_fixit) 7197 << FixItHint::CreateInsertion(FormatLoc, "\"%s\", "); 7198 break; 7199 case FST_NSString: 7200 Diag(FormatLoc, diag::note_format_security_fixit) 7201 << FixItHint::CreateInsertion(FormatLoc, "@\"%@\", "); 7202 break; 7203 } 7204 } else { 7205 Diag(FormatLoc, diag::warn_format_nonliteral) 7206 << OrigFormatExpr->getSourceRange(); 7207 } 7208 return false; 7209 } 7210 7211 namespace { 7212 7213 class CheckFormatHandler : public analyze_format_string::FormatStringHandler { 7214 protected: 7215 Sema &S; 7216 const FormatStringLiteral *FExpr; 7217 const Expr *OrigFormatExpr; 7218 const Sema::FormatStringType FSType; 7219 const unsigned FirstDataArg; 7220 const unsigned NumDataArgs; 7221 const char *Beg; // Start of format string. 7222 const bool HasVAListArg; 7223 ArrayRef<const Expr *> Args; 7224 unsigned FormatIdx; 7225 llvm::SmallBitVector CoveredArgs; 7226 bool usesPositionalArgs = false; 7227 bool atFirstArg = true; 7228 bool inFunctionCall; 7229 Sema::VariadicCallType CallType; 7230 llvm::SmallBitVector &CheckedVarArgs; 7231 UncoveredArgHandler &UncoveredArg; 7232 7233 public: 7234 CheckFormatHandler(Sema &s, const FormatStringLiteral *fexpr, 7235 const Expr *origFormatExpr, 7236 const Sema::FormatStringType type, unsigned firstDataArg, 7237 unsigned numDataArgs, const char *beg, bool hasVAListArg, 7238 ArrayRef<const Expr *> Args, unsigned formatIdx, 7239 bool inFunctionCall, Sema::VariadicCallType callType, 7240 llvm::SmallBitVector &CheckedVarArgs, 7241 UncoveredArgHandler &UncoveredArg) 7242 : S(s), FExpr(fexpr), OrigFormatExpr(origFormatExpr), FSType(type), 7243 FirstDataArg(firstDataArg), NumDataArgs(numDataArgs), Beg(beg), 7244 HasVAListArg(hasVAListArg), Args(Args), FormatIdx(formatIdx), 7245 inFunctionCall(inFunctionCall), CallType(callType), 7246 CheckedVarArgs(CheckedVarArgs), UncoveredArg(UncoveredArg) { 7247 CoveredArgs.resize(numDataArgs); 7248 CoveredArgs.reset(); 7249 } 7250 7251 void DoneProcessing(); 7252 7253 void HandleIncompleteSpecifier(const char *startSpecifier, 7254 unsigned specifierLen) override; 7255 7256 void HandleInvalidLengthModifier( 7257 const analyze_format_string::FormatSpecifier &FS, 7258 const analyze_format_string::ConversionSpecifier &CS, 7259 const char *startSpecifier, unsigned specifierLen, 7260 unsigned DiagID); 7261 7262 void HandleNonStandardLengthModifier( 7263 const analyze_format_string::FormatSpecifier &FS, 7264 const char *startSpecifier, unsigned specifierLen); 7265 7266 void HandleNonStandardConversionSpecifier( 7267 const analyze_format_string::ConversionSpecifier &CS, 7268 const char *startSpecifier, unsigned specifierLen); 7269 7270 void HandlePosition(const char *startPos, unsigned posLen) override; 7271 7272 void HandleInvalidPosition(const char *startSpecifier, 7273 unsigned specifierLen, 7274 analyze_format_string::PositionContext p) override; 7275 7276 void HandleZeroPosition(const char *startPos, unsigned posLen) override; 7277 7278 void HandleNullChar(const char *nullCharacter) override; 7279 7280 template <typename Range> 7281 static void 7282 EmitFormatDiagnostic(Sema &S, bool inFunctionCall, const Expr *ArgumentExpr, 7283 const PartialDiagnostic &PDiag, SourceLocation StringLoc, 7284 bool IsStringLocation, Range StringRange, 7285 ArrayRef<FixItHint> Fixit = None); 7286 7287 protected: 7288 bool HandleInvalidConversionSpecifier(unsigned argIndex, SourceLocation Loc, 7289 const char *startSpec, 7290 unsigned specifierLen, 7291 const char *csStart, unsigned csLen); 7292 7293 void HandlePositionalNonpositionalArgs(SourceLocation Loc, 7294 const char *startSpec, 7295 unsigned specifierLen); 7296 7297 SourceRange getFormatStringRange(); 7298 CharSourceRange getSpecifierRange(const char *startSpecifier, 7299 unsigned specifierLen); 7300 SourceLocation getLocationOfByte(const char *x); 7301 7302 const Expr *getDataArg(unsigned i) const; 7303 7304 bool CheckNumArgs(const analyze_format_string::FormatSpecifier &FS, 7305 const analyze_format_string::ConversionSpecifier &CS, 7306 const char *startSpecifier, unsigned specifierLen, 7307 unsigned argIndex); 7308 7309 template <typename Range> 7310 void EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation StringLoc, 7311 bool IsStringLocation, Range StringRange, 7312 ArrayRef<FixItHint> Fixit = None); 7313 }; 7314 7315 } // namespace 7316 7317 SourceRange CheckFormatHandler::getFormatStringRange() { 7318 return OrigFormatExpr->getSourceRange(); 7319 } 7320 7321 CharSourceRange CheckFormatHandler:: 7322 getSpecifierRange(const char *startSpecifier, unsigned specifierLen) { 7323 SourceLocation Start = getLocationOfByte(startSpecifier); 7324 SourceLocation End = getLocationOfByte(startSpecifier + specifierLen - 1); 7325 7326 // Advance the end SourceLocation by one due to half-open ranges. 7327 End = End.getLocWithOffset(1); 7328 7329 return CharSourceRange::getCharRange(Start, End); 7330 } 7331 7332 SourceLocation CheckFormatHandler::getLocationOfByte(const char *x) { 7333 return FExpr->getLocationOfByte(x - Beg, S.getSourceManager(), 7334 S.getLangOpts(), S.Context.getTargetInfo()); 7335 } 7336 7337 void CheckFormatHandler::HandleIncompleteSpecifier(const char *startSpecifier, 7338 unsigned specifierLen){ 7339 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_incomplete_specifier), 7340 getLocationOfByte(startSpecifier), 7341 /*IsStringLocation*/true, 7342 getSpecifierRange(startSpecifier, specifierLen)); 7343 } 7344 7345 void CheckFormatHandler::HandleInvalidLengthModifier( 7346 const analyze_format_string::FormatSpecifier &FS, 7347 const analyze_format_string::ConversionSpecifier &CS, 7348 const char *startSpecifier, unsigned specifierLen, unsigned DiagID) { 7349 using namespace analyze_format_string; 7350 7351 const LengthModifier &LM = FS.getLengthModifier(); 7352 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 7353 7354 // See if we know how to fix this length modifier. 7355 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 7356 if (FixedLM) { 7357 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 7358 getLocationOfByte(LM.getStart()), 7359 /*IsStringLocation*/true, 7360 getSpecifierRange(startSpecifier, specifierLen)); 7361 7362 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 7363 << FixedLM->toString() 7364 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 7365 7366 } else { 7367 FixItHint Hint; 7368 if (DiagID == diag::warn_format_nonsensical_length) 7369 Hint = FixItHint::CreateRemoval(LMRange); 7370 7371 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 7372 getLocationOfByte(LM.getStart()), 7373 /*IsStringLocation*/true, 7374 getSpecifierRange(startSpecifier, specifierLen), 7375 Hint); 7376 } 7377 } 7378 7379 void CheckFormatHandler::HandleNonStandardLengthModifier( 7380 const analyze_format_string::FormatSpecifier &FS, 7381 const char *startSpecifier, unsigned specifierLen) { 7382 using namespace analyze_format_string; 7383 7384 const LengthModifier &LM = FS.getLengthModifier(); 7385 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 7386 7387 // See if we know how to fix this length modifier. 7388 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 7389 if (FixedLM) { 7390 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 7391 << LM.toString() << 0, 7392 getLocationOfByte(LM.getStart()), 7393 /*IsStringLocation*/true, 7394 getSpecifierRange(startSpecifier, specifierLen)); 7395 7396 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 7397 << FixedLM->toString() 7398 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 7399 7400 } else { 7401 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 7402 << LM.toString() << 0, 7403 getLocationOfByte(LM.getStart()), 7404 /*IsStringLocation*/true, 7405 getSpecifierRange(startSpecifier, specifierLen)); 7406 } 7407 } 7408 7409 void CheckFormatHandler::HandleNonStandardConversionSpecifier( 7410 const analyze_format_string::ConversionSpecifier &CS, 7411 const char *startSpecifier, unsigned specifierLen) { 7412 using namespace analyze_format_string; 7413 7414 // See if we know how to fix this conversion specifier. 7415 Optional<ConversionSpecifier> FixedCS = CS.getStandardSpecifier(); 7416 if (FixedCS) { 7417 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 7418 << CS.toString() << /*conversion specifier*/1, 7419 getLocationOfByte(CS.getStart()), 7420 /*IsStringLocation*/true, 7421 getSpecifierRange(startSpecifier, specifierLen)); 7422 7423 CharSourceRange CSRange = getSpecifierRange(CS.getStart(), CS.getLength()); 7424 S.Diag(getLocationOfByte(CS.getStart()), diag::note_format_fix_specifier) 7425 << FixedCS->toString() 7426 << FixItHint::CreateReplacement(CSRange, FixedCS->toString()); 7427 } else { 7428 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 7429 << CS.toString() << /*conversion specifier*/1, 7430 getLocationOfByte(CS.getStart()), 7431 /*IsStringLocation*/true, 7432 getSpecifierRange(startSpecifier, specifierLen)); 7433 } 7434 } 7435 7436 void CheckFormatHandler::HandlePosition(const char *startPos, 7437 unsigned posLen) { 7438 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard_positional_arg), 7439 getLocationOfByte(startPos), 7440 /*IsStringLocation*/true, 7441 getSpecifierRange(startPos, posLen)); 7442 } 7443 7444 void 7445 CheckFormatHandler::HandleInvalidPosition(const char *startPos, unsigned posLen, 7446 analyze_format_string::PositionContext p) { 7447 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_positional_specifier) 7448 << (unsigned) p, 7449 getLocationOfByte(startPos), /*IsStringLocation*/true, 7450 getSpecifierRange(startPos, posLen)); 7451 } 7452 7453 void CheckFormatHandler::HandleZeroPosition(const char *startPos, 7454 unsigned posLen) { 7455 EmitFormatDiagnostic(S.PDiag(diag::warn_format_zero_positional_specifier), 7456 getLocationOfByte(startPos), 7457 /*IsStringLocation*/true, 7458 getSpecifierRange(startPos, posLen)); 7459 } 7460 7461 void CheckFormatHandler::HandleNullChar(const char *nullCharacter) { 7462 if (!isa<ObjCStringLiteral>(OrigFormatExpr)) { 7463 // The presence of a null character is likely an error. 7464 EmitFormatDiagnostic( 7465 S.PDiag(diag::warn_printf_format_string_contains_null_char), 7466 getLocationOfByte(nullCharacter), /*IsStringLocation*/true, 7467 getFormatStringRange()); 7468 } 7469 } 7470 7471 // Note that this may return NULL if there was an error parsing or building 7472 // one of the argument expressions. 7473 const Expr *CheckFormatHandler::getDataArg(unsigned i) const { 7474 return Args[FirstDataArg + i]; 7475 } 7476 7477 void CheckFormatHandler::DoneProcessing() { 7478 // Does the number of data arguments exceed the number of 7479 // format conversions in the format string? 7480 if (!HasVAListArg) { 7481 // Find any arguments that weren't covered. 7482 CoveredArgs.flip(); 7483 signed notCoveredArg = CoveredArgs.find_first(); 7484 if (notCoveredArg >= 0) { 7485 assert((unsigned)notCoveredArg < NumDataArgs); 7486 UncoveredArg.Update(notCoveredArg, OrigFormatExpr); 7487 } else { 7488 UncoveredArg.setAllCovered(); 7489 } 7490 } 7491 } 7492 7493 void UncoveredArgHandler::Diagnose(Sema &S, bool IsFunctionCall, 7494 const Expr *ArgExpr) { 7495 assert(hasUncoveredArg() && DiagnosticExprs.size() > 0 && 7496 "Invalid state"); 7497 7498 if (!ArgExpr) 7499 return; 7500 7501 SourceLocation Loc = ArgExpr->getBeginLoc(); 7502 7503 if (S.getSourceManager().isInSystemMacro(Loc)) 7504 return; 7505 7506 PartialDiagnostic PDiag = S.PDiag(diag::warn_printf_data_arg_not_used); 7507 for (auto E : DiagnosticExprs) 7508 PDiag << E->getSourceRange(); 7509 7510 CheckFormatHandler::EmitFormatDiagnostic( 7511 S, IsFunctionCall, DiagnosticExprs[0], 7512 PDiag, Loc, /*IsStringLocation*/false, 7513 DiagnosticExprs[0]->getSourceRange()); 7514 } 7515 7516 bool 7517 CheckFormatHandler::HandleInvalidConversionSpecifier(unsigned argIndex, 7518 SourceLocation Loc, 7519 const char *startSpec, 7520 unsigned specifierLen, 7521 const char *csStart, 7522 unsigned csLen) { 7523 bool keepGoing = true; 7524 if (argIndex < NumDataArgs) { 7525 // Consider the argument coverered, even though the specifier doesn't 7526 // make sense. 7527 CoveredArgs.set(argIndex); 7528 } 7529 else { 7530 // If argIndex exceeds the number of data arguments we 7531 // don't issue a warning because that is just a cascade of warnings (and 7532 // they may have intended '%%' anyway). We don't want to continue processing 7533 // the format string after this point, however, as we will like just get 7534 // gibberish when trying to match arguments. 7535 keepGoing = false; 7536 } 7537 7538 StringRef Specifier(csStart, csLen); 7539 7540 // If the specifier in non-printable, it could be the first byte of a UTF-8 7541 // sequence. In that case, print the UTF-8 code point. If not, print the byte 7542 // hex value. 7543 std::string CodePointStr; 7544 if (!llvm::sys::locale::isPrint(*csStart)) { 7545 llvm::UTF32 CodePoint; 7546 const llvm::UTF8 **B = reinterpret_cast<const llvm::UTF8 **>(&csStart); 7547 const llvm::UTF8 *E = 7548 reinterpret_cast<const llvm::UTF8 *>(csStart + csLen); 7549 llvm::ConversionResult Result = 7550 llvm::convertUTF8Sequence(B, E, &CodePoint, llvm::strictConversion); 7551 7552 if (Result != llvm::conversionOK) { 7553 unsigned char FirstChar = *csStart; 7554 CodePoint = (llvm::UTF32)FirstChar; 7555 } 7556 7557 llvm::raw_string_ostream OS(CodePointStr); 7558 if (CodePoint < 256) 7559 OS << "\\x" << llvm::format("%02x", CodePoint); 7560 else if (CodePoint <= 0xFFFF) 7561 OS << "\\u" << llvm::format("%04x", CodePoint); 7562 else 7563 OS << "\\U" << llvm::format("%08x", CodePoint); 7564 OS.flush(); 7565 Specifier = CodePointStr; 7566 } 7567 7568 EmitFormatDiagnostic( 7569 S.PDiag(diag::warn_format_invalid_conversion) << Specifier, Loc, 7570 /*IsStringLocation*/ true, getSpecifierRange(startSpec, specifierLen)); 7571 7572 return keepGoing; 7573 } 7574 7575 void 7576 CheckFormatHandler::HandlePositionalNonpositionalArgs(SourceLocation Loc, 7577 const char *startSpec, 7578 unsigned specifierLen) { 7579 EmitFormatDiagnostic( 7580 S.PDiag(diag::warn_format_mix_positional_nonpositional_args), 7581 Loc, /*isStringLoc*/true, getSpecifierRange(startSpec, specifierLen)); 7582 } 7583 7584 bool 7585 CheckFormatHandler::CheckNumArgs( 7586 const analyze_format_string::FormatSpecifier &FS, 7587 const analyze_format_string::ConversionSpecifier &CS, 7588 const char *startSpecifier, unsigned specifierLen, unsigned argIndex) { 7589 7590 if (argIndex >= NumDataArgs) { 7591 PartialDiagnostic PDiag = FS.usesPositionalArg() 7592 ? (S.PDiag(diag::warn_printf_positional_arg_exceeds_data_args) 7593 << (argIndex+1) << NumDataArgs) 7594 : S.PDiag(diag::warn_printf_insufficient_data_args); 7595 EmitFormatDiagnostic( 7596 PDiag, getLocationOfByte(CS.getStart()), /*IsStringLocation*/true, 7597 getSpecifierRange(startSpecifier, specifierLen)); 7598 7599 // Since more arguments than conversion tokens are given, by extension 7600 // all arguments are covered, so mark this as so. 7601 UncoveredArg.setAllCovered(); 7602 return false; 7603 } 7604 return true; 7605 } 7606 7607 template<typename Range> 7608 void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag, 7609 SourceLocation Loc, 7610 bool IsStringLocation, 7611 Range StringRange, 7612 ArrayRef<FixItHint> FixIt) { 7613 EmitFormatDiagnostic(S, inFunctionCall, Args[FormatIdx], PDiag, 7614 Loc, IsStringLocation, StringRange, FixIt); 7615 } 7616 7617 /// If the format string is not within the function call, emit a note 7618 /// so that the function call and string are in diagnostic messages. 7619 /// 7620 /// \param InFunctionCall if true, the format string is within the function 7621 /// call and only one diagnostic message will be produced. Otherwise, an 7622 /// extra note will be emitted pointing to location of the format string. 7623 /// 7624 /// \param ArgumentExpr the expression that is passed as the format string 7625 /// argument in the function call. Used for getting locations when two 7626 /// diagnostics are emitted. 7627 /// 7628 /// \param PDiag the callee should already have provided any strings for the 7629 /// diagnostic message. This function only adds locations and fixits 7630 /// to diagnostics. 7631 /// 7632 /// \param Loc primary location for diagnostic. If two diagnostics are 7633 /// required, one will be at Loc and a new SourceLocation will be created for 7634 /// the other one. 7635 /// 7636 /// \param IsStringLocation if true, Loc points to the format string should be 7637 /// used for the note. Otherwise, Loc points to the argument list and will 7638 /// be used with PDiag. 7639 /// 7640 /// \param StringRange some or all of the string to highlight. This is 7641 /// templated so it can accept either a CharSourceRange or a SourceRange. 7642 /// 7643 /// \param FixIt optional fix it hint for the format string. 7644 template <typename Range> 7645 void CheckFormatHandler::EmitFormatDiagnostic( 7646 Sema &S, bool InFunctionCall, const Expr *ArgumentExpr, 7647 const PartialDiagnostic &PDiag, SourceLocation Loc, bool IsStringLocation, 7648 Range StringRange, ArrayRef<FixItHint> FixIt) { 7649 if (InFunctionCall) { 7650 const Sema::SemaDiagnosticBuilder &D = S.Diag(Loc, PDiag); 7651 D << StringRange; 7652 D << FixIt; 7653 } else { 7654 S.Diag(IsStringLocation ? ArgumentExpr->getExprLoc() : Loc, PDiag) 7655 << ArgumentExpr->getSourceRange(); 7656 7657 const Sema::SemaDiagnosticBuilder &Note = 7658 S.Diag(IsStringLocation ? Loc : StringRange.getBegin(), 7659 diag::note_format_string_defined); 7660 7661 Note << StringRange; 7662 Note << FixIt; 7663 } 7664 } 7665 7666 //===--- CHECK: Printf format string checking ------------------------------===// 7667 7668 namespace { 7669 7670 class CheckPrintfHandler : public CheckFormatHandler { 7671 public: 7672 CheckPrintfHandler(Sema &s, const FormatStringLiteral *fexpr, 7673 const Expr *origFormatExpr, 7674 const Sema::FormatStringType type, unsigned firstDataArg, 7675 unsigned numDataArgs, bool isObjC, const char *beg, 7676 bool hasVAListArg, ArrayRef<const Expr *> Args, 7677 unsigned formatIdx, bool inFunctionCall, 7678 Sema::VariadicCallType CallType, 7679 llvm::SmallBitVector &CheckedVarArgs, 7680 UncoveredArgHandler &UncoveredArg) 7681 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 7682 numDataArgs, beg, hasVAListArg, Args, formatIdx, 7683 inFunctionCall, CallType, CheckedVarArgs, 7684 UncoveredArg) {} 7685 7686 bool isObjCContext() const { return FSType == Sema::FST_NSString; } 7687 7688 /// Returns true if '%@' specifiers are allowed in the format string. 7689 bool allowsObjCArg() const { 7690 return FSType == Sema::FST_NSString || FSType == Sema::FST_OSLog || 7691 FSType == Sema::FST_OSTrace; 7692 } 7693 7694 bool HandleInvalidPrintfConversionSpecifier( 7695 const analyze_printf::PrintfSpecifier &FS, 7696 const char *startSpecifier, 7697 unsigned specifierLen) override; 7698 7699 void handleInvalidMaskType(StringRef MaskType) override; 7700 7701 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 7702 const char *startSpecifier, 7703 unsigned specifierLen) override; 7704 bool checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 7705 const char *StartSpecifier, 7706 unsigned SpecifierLen, 7707 const Expr *E); 7708 7709 bool HandleAmount(const analyze_format_string::OptionalAmount &Amt, unsigned k, 7710 const char *startSpecifier, unsigned specifierLen); 7711 void HandleInvalidAmount(const analyze_printf::PrintfSpecifier &FS, 7712 const analyze_printf::OptionalAmount &Amt, 7713 unsigned type, 7714 const char *startSpecifier, unsigned specifierLen); 7715 void HandleFlag(const analyze_printf::PrintfSpecifier &FS, 7716 const analyze_printf::OptionalFlag &flag, 7717 const char *startSpecifier, unsigned specifierLen); 7718 void HandleIgnoredFlag(const analyze_printf::PrintfSpecifier &FS, 7719 const analyze_printf::OptionalFlag &ignoredFlag, 7720 const analyze_printf::OptionalFlag &flag, 7721 const char *startSpecifier, unsigned specifierLen); 7722 bool checkForCStrMembers(const analyze_printf::ArgType &AT, 7723 const Expr *E); 7724 7725 void HandleEmptyObjCModifierFlag(const char *startFlag, 7726 unsigned flagLen) override; 7727 7728 void HandleInvalidObjCModifierFlag(const char *startFlag, 7729 unsigned flagLen) override; 7730 7731 void HandleObjCFlagsWithNonObjCConversion(const char *flagsStart, 7732 const char *flagsEnd, 7733 const char *conversionPosition) 7734 override; 7735 }; 7736 7737 } // namespace 7738 7739 bool CheckPrintfHandler::HandleInvalidPrintfConversionSpecifier( 7740 const analyze_printf::PrintfSpecifier &FS, 7741 const char *startSpecifier, 7742 unsigned specifierLen) { 7743 const analyze_printf::PrintfConversionSpecifier &CS = 7744 FS.getConversionSpecifier(); 7745 7746 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 7747 getLocationOfByte(CS.getStart()), 7748 startSpecifier, specifierLen, 7749 CS.getStart(), CS.getLength()); 7750 } 7751 7752 void CheckPrintfHandler::handleInvalidMaskType(StringRef MaskType) { 7753 S.Diag(getLocationOfByte(MaskType.data()), diag::err_invalid_mask_type_size); 7754 } 7755 7756 bool CheckPrintfHandler::HandleAmount( 7757 const analyze_format_string::OptionalAmount &Amt, 7758 unsigned k, const char *startSpecifier, 7759 unsigned specifierLen) { 7760 if (Amt.hasDataArgument()) { 7761 if (!HasVAListArg) { 7762 unsigned argIndex = Amt.getArgIndex(); 7763 if (argIndex >= NumDataArgs) { 7764 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_missing_arg) 7765 << k, 7766 getLocationOfByte(Amt.getStart()), 7767 /*IsStringLocation*/true, 7768 getSpecifierRange(startSpecifier, specifierLen)); 7769 // Don't do any more checking. We will just emit 7770 // spurious errors. 7771 return false; 7772 } 7773 7774 // Type check the data argument. It should be an 'int'. 7775 // Although not in conformance with C99, we also allow the argument to be 7776 // an 'unsigned int' as that is a reasonably safe case. GCC also 7777 // doesn't emit a warning for that case. 7778 CoveredArgs.set(argIndex); 7779 const Expr *Arg = getDataArg(argIndex); 7780 if (!Arg) 7781 return false; 7782 7783 QualType T = Arg->getType(); 7784 7785 const analyze_printf::ArgType &AT = Amt.getArgType(S.Context); 7786 assert(AT.isValid()); 7787 7788 if (!AT.matchesType(S.Context, T)) { 7789 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_wrong_type) 7790 << k << AT.getRepresentativeTypeName(S.Context) 7791 << T << Arg->getSourceRange(), 7792 getLocationOfByte(Amt.getStart()), 7793 /*IsStringLocation*/true, 7794 getSpecifierRange(startSpecifier, specifierLen)); 7795 // Don't do any more checking. We will just emit 7796 // spurious errors. 7797 return false; 7798 } 7799 } 7800 } 7801 return true; 7802 } 7803 7804 void CheckPrintfHandler::HandleInvalidAmount( 7805 const analyze_printf::PrintfSpecifier &FS, 7806 const analyze_printf::OptionalAmount &Amt, 7807 unsigned type, 7808 const char *startSpecifier, 7809 unsigned specifierLen) { 7810 const analyze_printf::PrintfConversionSpecifier &CS = 7811 FS.getConversionSpecifier(); 7812 7813 FixItHint fixit = 7814 Amt.getHowSpecified() == analyze_printf::OptionalAmount::Constant 7815 ? FixItHint::CreateRemoval(getSpecifierRange(Amt.getStart(), 7816 Amt.getConstantLength())) 7817 : FixItHint(); 7818 7819 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_optional_amount) 7820 << type << CS.toString(), 7821 getLocationOfByte(Amt.getStart()), 7822 /*IsStringLocation*/true, 7823 getSpecifierRange(startSpecifier, specifierLen), 7824 fixit); 7825 } 7826 7827 void CheckPrintfHandler::HandleFlag(const analyze_printf::PrintfSpecifier &FS, 7828 const analyze_printf::OptionalFlag &flag, 7829 const char *startSpecifier, 7830 unsigned specifierLen) { 7831 // Warn about pointless flag with a fixit removal. 7832 const analyze_printf::PrintfConversionSpecifier &CS = 7833 FS.getConversionSpecifier(); 7834 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_flag) 7835 << flag.toString() << CS.toString(), 7836 getLocationOfByte(flag.getPosition()), 7837 /*IsStringLocation*/true, 7838 getSpecifierRange(startSpecifier, specifierLen), 7839 FixItHint::CreateRemoval( 7840 getSpecifierRange(flag.getPosition(), 1))); 7841 } 7842 7843 void CheckPrintfHandler::HandleIgnoredFlag( 7844 const analyze_printf::PrintfSpecifier &FS, 7845 const analyze_printf::OptionalFlag &ignoredFlag, 7846 const analyze_printf::OptionalFlag &flag, 7847 const char *startSpecifier, 7848 unsigned specifierLen) { 7849 // Warn about ignored flag with a fixit removal. 7850 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_ignored_flag) 7851 << ignoredFlag.toString() << flag.toString(), 7852 getLocationOfByte(ignoredFlag.getPosition()), 7853 /*IsStringLocation*/true, 7854 getSpecifierRange(startSpecifier, specifierLen), 7855 FixItHint::CreateRemoval( 7856 getSpecifierRange(ignoredFlag.getPosition(), 1))); 7857 } 7858 7859 void CheckPrintfHandler::HandleEmptyObjCModifierFlag(const char *startFlag, 7860 unsigned flagLen) { 7861 // Warn about an empty flag. 7862 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_empty_objc_flag), 7863 getLocationOfByte(startFlag), 7864 /*IsStringLocation*/true, 7865 getSpecifierRange(startFlag, flagLen)); 7866 } 7867 7868 void CheckPrintfHandler::HandleInvalidObjCModifierFlag(const char *startFlag, 7869 unsigned flagLen) { 7870 // Warn about an invalid flag. 7871 auto Range = getSpecifierRange(startFlag, flagLen); 7872 StringRef flag(startFlag, flagLen); 7873 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_invalid_objc_flag) << flag, 7874 getLocationOfByte(startFlag), 7875 /*IsStringLocation*/true, 7876 Range, FixItHint::CreateRemoval(Range)); 7877 } 7878 7879 void CheckPrintfHandler::HandleObjCFlagsWithNonObjCConversion( 7880 const char *flagsStart, const char *flagsEnd, const char *conversionPosition) { 7881 // Warn about using '[...]' without a '@' conversion. 7882 auto Range = getSpecifierRange(flagsStart, flagsEnd - flagsStart + 1); 7883 auto diag = diag::warn_printf_ObjCflags_without_ObjCConversion; 7884 EmitFormatDiagnostic(S.PDiag(diag) << StringRef(conversionPosition, 1), 7885 getLocationOfByte(conversionPosition), 7886 /*IsStringLocation*/true, 7887 Range, FixItHint::CreateRemoval(Range)); 7888 } 7889 7890 // Determines if the specified is a C++ class or struct containing 7891 // a member with the specified name and kind (e.g. a CXXMethodDecl named 7892 // "c_str()"). 7893 template<typename MemberKind> 7894 static llvm::SmallPtrSet<MemberKind*, 1> 7895 CXXRecordMembersNamed(StringRef Name, Sema &S, QualType Ty) { 7896 const RecordType *RT = Ty->getAs<RecordType>(); 7897 llvm::SmallPtrSet<MemberKind*, 1> Results; 7898 7899 if (!RT) 7900 return Results; 7901 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 7902 if (!RD || !RD->getDefinition()) 7903 return Results; 7904 7905 LookupResult R(S, &S.Context.Idents.get(Name), SourceLocation(), 7906 Sema::LookupMemberName); 7907 R.suppressDiagnostics(); 7908 7909 // We just need to include all members of the right kind turned up by the 7910 // filter, at this point. 7911 if (S.LookupQualifiedName(R, RT->getDecl())) 7912 for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) { 7913 NamedDecl *decl = (*I)->getUnderlyingDecl(); 7914 if (MemberKind *FK = dyn_cast<MemberKind>(decl)) 7915 Results.insert(FK); 7916 } 7917 return Results; 7918 } 7919 7920 /// Check if we could call '.c_str()' on an object. 7921 /// 7922 /// FIXME: This returns the wrong results in some cases (if cv-qualifiers don't 7923 /// allow the call, or if it would be ambiguous). 7924 bool Sema::hasCStrMethod(const Expr *E) { 7925 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 7926 7927 MethodSet Results = 7928 CXXRecordMembersNamed<CXXMethodDecl>("c_str", *this, E->getType()); 7929 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 7930 MI != ME; ++MI) 7931 if ((*MI)->getMinRequiredArguments() == 0) 7932 return true; 7933 return false; 7934 } 7935 7936 // Check if a (w)string was passed when a (w)char* was needed, and offer a 7937 // better diagnostic if so. AT is assumed to be valid. 7938 // Returns true when a c_str() conversion method is found. 7939 bool CheckPrintfHandler::checkForCStrMembers( 7940 const analyze_printf::ArgType &AT, const Expr *E) { 7941 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 7942 7943 MethodSet Results = 7944 CXXRecordMembersNamed<CXXMethodDecl>("c_str", S, E->getType()); 7945 7946 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 7947 MI != ME; ++MI) { 7948 const CXXMethodDecl *Method = *MI; 7949 if (Method->getMinRequiredArguments() == 0 && 7950 AT.matchesType(S.Context, Method->getReturnType())) { 7951 // FIXME: Suggest parens if the expression needs them. 7952 SourceLocation EndLoc = S.getLocForEndOfToken(E->getEndLoc()); 7953 S.Diag(E->getBeginLoc(), diag::note_printf_c_str) 7954 << "c_str()" << FixItHint::CreateInsertion(EndLoc, ".c_str()"); 7955 return true; 7956 } 7957 } 7958 7959 return false; 7960 } 7961 7962 bool 7963 CheckPrintfHandler::HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier 7964 &FS, 7965 const char *startSpecifier, 7966 unsigned specifierLen) { 7967 using namespace analyze_format_string; 7968 using namespace analyze_printf; 7969 7970 const PrintfConversionSpecifier &CS = FS.getConversionSpecifier(); 7971 7972 if (FS.consumesDataArgument()) { 7973 if (atFirstArg) { 7974 atFirstArg = false; 7975 usesPositionalArgs = FS.usesPositionalArg(); 7976 } 7977 else if (usesPositionalArgs != FS.usesPositionalArg()) { 7978 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 7979 startSpecifier, specifierLen); 7980 return false; 7981 } 7982 } 7983 7984 // First check if the field width, precision, and conversion specifier 7985 // have matching data arguments. 7986 if (!HandleAmount(FS.getFieldWidth(), /* field width */ 0, 7987 startSpecifier, specifierLen)) { 7988 return false; 7989 } 7990 7991 if (!HandleAmount(FS.getPrecision(), /* precision */ 1, 7992 startSpecifier, specifierLen)) { 7993 return false; 7994 } 7995 7996 if (!CS.consumesDataArgument()) { 7997 // FIXME: Technically specifying a precision or field width here 7998 // makes no sense. Worth issuing a warning at some point. 7999 return true; 8000 } 8001 8002 // Consume the argument. 8003 unsigned argIndex = FS.getArgIndex(); 8004 if (argIndex < NumDataArgs) { 8005 // The check to see if the argIndex is valid will come later. 8006 // We set the bit here because we may exit early from this 8007 // function if we encounter some other error. 8008 CoveredArgs.set(argIndex); 8009 } 8010 8011 // FreeBSD kernel extensions. 8012 if (CS.getKind() == ConversionSpecifier::FreeBSDbArg || 8013 CS.getKind() == ConversionSpecifier::FreeBSDDArg) { 8014 // We need at least two arguments. 8015 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex + 1)) 8016 return false; 8017 8018 // Claim the second argument. 8019 CoveredArgs.set(argIndex + 1); 8020 8021 const Expr *Ex = getDataArg(argIndex); 8022 if (CS.getKind() == ConversionSpecifier::FreeBSDDArg) { 8023 // Type check the first argument (pointer for %D) 8024 const analyze_printf::ArgType &AT = ArgType::CPointerTy; 8025 if (AT.isValid() && !AT.matchesType(S.Context, Ex->getType())) 8026 EmitFormatDiagnostic( 8027 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 8028 << AT.getRepresentativeTypeName(S.Context) << Ex->getType() 8029 << false << Ex->getSourceRange(), 8030 Ex->getBeginLoc(), /*IsStringLocation*/false, 8031 getSpecifierRange(startSpecifier, specifierLen)); 8032 } else { 8033 // Check the length modifier for %b 8034 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 8035 S.getLangOpts())) 8036 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 8037 diag::warn_format_nonsensical_length); 8038 else if (!FS.hasStandardLengthModifier()) 8039 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 8040 else if (!FS.hasStandardLengthConversionCombination()) 8041 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 8042 diag::warn_format_non_standard_conversion_spec); 8043 8044 // Type check the first argument of %b 8045 if (!checkFormatExpr(FS, startSpecifier, specifierLen, Ex)) 8046 return false; 8047 } 8048 8049 // Type check the second argument (char * for both %b and %D) 8050 Ex = getDataArg(argIndex + 1); 8051 const analyze_printf::ArgType &AT2 = ArgType::CStrTy; 8052 if (AT2.isValid() && !AT2.matchesType(S.Context, Ex->getType())) 8053 EmitFormatDiagnostic( 8054 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 8055 << AT2.getRepresentativeTypeName(S.Context) << Ex->getType() 8056 << false << Ex->getSourceRange(), 8057 Ex->getBeginLoc(), /*IsStringLocation*/ false, 8058 getSpecifierRange(startSpecifier, specifierLen)); 8059 8060 return true; 8061 } 8062 8063 // Check for using an Objective-C specific conversion specifier 8064 // in a non-ObjC literal. 8065 if (!allowsObjCArg() && CS.isObjCArg()) { 8066 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 8067 specifierLen); 8068 } 8069 8070 // %P can only be used with os_log. 8071 if (FSType != Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::PArg) { 8072 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 8073 specifierLen); 8074 } 8075 8076 // %n is not allowed with os_log. 8077 if (FSType == Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::nArg) { 8078 EmitFormatDiagnostic(S.PDiag(diag::warn_os_log_format_narg), 8079 getLocationOfByte(CS.getStart()), 8080 /*IsStringLocation*/ false, 8081 getSpecifierRange(startSpecifier, specifierLen)); 8082 8083 return true; 8084 } 8085 8086 // %n is not allowed anywhere 8087 if (CS.getKind() == ConversionSpecifier::nArg) { 8088 EmitFormatDiagnostic(S.PDiag(diag::warn_format_narg), 8089 getLocationOfByte(CS.getStart()), 8090 /*IsStringLocation*/ false, 8091 getSpecifierRange(startSpecifier, specifierLen)); 8092 return true; 8093 } 8094 8095 // Only scalars are allowed for os_trace. 8096 if (FSType == Sema::FST_OSTrace && 8097 (CS.getKind() == ConversionSpecifier::PArg || 8098 CS.getKind() == ConversionSpecifier::sArg || 8099 CS.getKind() == ConversionSpecifier::ObjCObjArg)) { 8100 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 8101 specifierLen); 8102 } 8103 8104 // Check for use of public/private annotation outside of os_log(). 8105 if (FSType != Sema::FST_OSLog) { 8106 if (FS.isPublic().isSet()) { 8107 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 8108 << "public", 8109 getLocationOfByte(FS.isPublic().getPosition()), 8110 /*IsStringLocation*/ false, 8111 getSpecifierRange(startSpecifier, specifierLen)); 8112 } 8113 if (FS.isPrivate().isSet()) { 8114 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 8115 << "private", 8116 getLocationOfByte(FS.isPrivate().getPosition()), 8117 /*IsStringLocation*/ false, 8118 getSpecifierRange(startSpecifier, specifierLen)); 8119 } 8120 } 8121 8122 // Check for invalid use of field width 8123 if (!FS.hasValidFieldWidth()) { 8124 HandleInvalidAmount(FS, FS.getFieldWidth(), /* field width */ 0, 8125 startSpecifier, specifierLen); 8126 } 8127 8128 // Check for invalid use of precision 8129 if (!FS.hasValidPrecision()) { 8130 HandleInvalidAmount(FS, FS.getPrecision(), /* precision */ 1, 8131 startSpecifier, specifierLen); 8132 } 8133 8134 // Precision is mandatory for %P specifier. 8135 if (CS.getKind() == ConversionSpecifier::PArg && 8136 FS.getPrecision().getHowSpecified() == OptionalAmount::NotSpecified) { 8137 EmitFormatDiagnostic(S.PDiag(diag::warn_format_P_no_precision), 8138 getLocationOfByte(startSpecifier), 8139 /*IsStringLocation*/ false, 8140 getSpecifierRange(startSpecifier, specifierLen)); 8141 } 8142 8143 // Check each flag does not conflict with any other component. 8144 if (!FS.hasValidThousandsGroupingPrefix()) 8145 HandleFlag(FS, FS.hasThousandsGrouping(), startSpecifier, specifierLen); 8146 if (!FS.hasValidLeadingZeros()) 8147 HandleFlag(FS, FS.hasLeadingZeros(), startSpecifier, specifierLen); 8148 if (!FS.hasValidPlusPrefix()) 8149 HandleFlag(FS, FS.hasPlusPrefix(), startSpecifier, specifierLen); 8150 if (!FS.hasValidSpacePrefix()) 8151 HandleFlag(FS, FS.hasSpacePrefix(), startSpecifier, specifierLen); 8152 if (!FS.hasValidAlternativeForm()) 8153 HandleFlag(FS, FS.hasAlternativeForm(), startSpecifier, specifierLen); 8154 if (!FS.hasValidLeftJustified()) 8155 HandleFlag(FS, FS.isLeftJustified(), startSpecifier, specifierLen); 8156 8157 // Check that flags are not ignored by another flag 8158 if (FS.hasSpacePrefix() && FS.hasPlusPrefix()) // ' ' ignored by '+' 8159 HandleIgnoredFlag(FS, FS.hasSpacePrefix(), FS.hasPlusPrefix(), 8160 startSpecifier, specifierLen); 8161 if (FS.hasLeadingZeros() && FS.isLeftJustified()) // '0' ignored by '-' 8162 HandleIgnoredFlag(FS, FS.hasLeadingZeros(), FS.isLeftJustified(), 8163 startSpecifier, specifierLen); 8164 8165 // Check the length modifier is valid with the given conversion specifier. 8166 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 8167 S.getLangOpts())) 8168 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 8169 diag::warn_format_nonsensical_length); 8170 else if (!FS.hasStandardLengthModifier()) 8171 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 8172 else if (!FS.hasStandardLengthConversionCombination()) 8173 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 8174 diag::warn_format_non_standard_conversion_spec); 8175 8176 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 8177 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 8178 8179 // The remaining checks depend on the data arguments. 8180 if (HasVAListArg) 8181 return true; 8182 8183 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 8184 return false; 8185 8186 const Expr *Arg = getDataArg(argIndex); 8187 if (!Arg) 8188 return true; 8189 8190 return checkFormatExpr(FS, startSpecifier, specifierLen, Arg); 8191 } 8192 8193 static bool requiresParensToAddCast(const Expr *E) { 8194 // FIXME: We should have a general way to reason about operator 8195 // precedence and whether parens are actually needed here. 8196 // Take care of a few common cases where they aren't. 8197 const Expr *Inside = E->IgnoreImpCasts(); 8198 if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(Inside)) 8199 Inside = POE->getSyntacticForm()->IgnoreImpCasts(); 8200 8201 switch (Inside->getStmtClass()) { 8202 case Stmt::ArraySubscriptExprClass: 8203 case Stmt::CallExprClass: 8204 case Stmt::CharacterLiteralClass: 8205 case Stmt::CXXBoolLiteralExprClass: 8206 case Stmt::DeclRefExprClass: 8207 case Stmt::FloatingLiteralClass: 8208 case Stmt::IntegerLiteralClass: 8209 case Stmt::MemberExprClass: 8210 case Stmt::ObjCArrayLiteralClass: 8211 case Stmt::ObjCBoolLiteralExprClass: 8212 case Stmt::ObjCBoxedExprClass: 8213 case Stmt::ObjCDictionaryLiteralClass: 8214 case Stmt::ObjCEncodeExprClass: 8215 case Stmt::ObjCIvarRefExprClass: 8216 case Stmt::ObjCMessageExprClass: 8217 case Stmt::ObjCPropertyRefExprClass: 8218 case Stmt::ObjCStringLiteralClass: 8219 case Stmt::ObjCSubscriptRefExprClass: 8220 case Stmt::ParenExprClass: 8221 case Stmt::StringLiteralClass: 8222 case Stmt::UnaryOperatorClass: 8223 return false; 8224 default: 8225 return true; 8226 } 8227 } 8228 8229 static std::pair<QualType, StringRef> 8230 shouldNotPrintDirectly(const ASTContext &Context, 8231 QualType IntendedTy, 8232 const Expr *E) { 8233 // Use a 'while' to peel off layers of typedefs. 8234 QualType TyTy = IntendedTy; 8235 while (const TypedefType *UserTy = TyTy->getAs<TypedefType>()) { 8236 StringRef Name = UserTy->getDecl()->getName(); 8237 QualType CastTy = llvm::StringSwitch<QualType>(Name) 8238 .Case("CFIndex", Context.getNSIntegerType()) 8239 .Case("NSInteger", Context.getNSIntegerType()) 8240 .Case("NSUInteger", Context.getNSUIntegerType()) 8241 .Case("SInt32", Context.IntTy) 8242 .Case("UInt32", Context.UnsignedIntTy) 8243 .Default(QualType()); 8244 8245 if (!CastTy.isNull()) 8246 return std::make_pair(CastTy, Name); 8247 8248 TyTy = UserTy->desugar(); 8249 } 8250 8251 // Strip parens if necessary. 8252 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) 8253 return shouldNotPrintDirectly(Context, 8254 PE->getSubExpr()->getType(), 8255 PE->getSubExpr()); 8256 8257 // If this is a conditional expression, then its result type is constructed 8258 // via usual arithmetic conversions and thus there might be no necessary 8259 // typedef sugar there. Recurse to operands to check for NSInteger & 8260 // Co. usage condition. 8261 if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) { 8262 QualType TrueTy, FalseTy; 8263 StringRef TrueName, FalseName; 8264 8265 std::tie(TrueTy, TrueName) = 8266 shouldNotPrintDirectly(Context, 8267 CO->getTrueExpr()->getType(), 8268 CO->getTrueExpr()); 8269 std::tie(FalseTy, FalseName) = 8270 shouldNotPrintDirectly(Context, 8271 CO->getFalseExpr()->getType(), 8272 CO->getFalseExpr()); 8273 8274 if (TrueTy == FalseTy) 8275 return std::make_pair(TrueTy, TrueName); 8276 else if (TrueTy.isNull()) 8277 return std::make_pair(FalseTy, FalseName); 8278 else if (FalseTy.isNull()) 8279 return std::make_pair(TrueTy, TrueName); 8280 } 8281 8282 return std::make_pair(QualType(), StringRef()); 8283 } 8284 8285 /// Return true if \p ICE is an implicit argument promotion of an arithmetic 8286 /// type. Bit-field 'promotions' from a higher ranked type to a lower ranked 8287 /// type do not count. 8288 static bool 8289 isArithmeticArgumentPromotion(Sema &S, const ImplicitCastExpr *ICE) { 8290 QualType From = ICE->getSubExpr()->getType(); 8291 QualType To = ICE->getType(); 8292 // It's an integer promotion if the destination type is the promoted 8293 // source type. 8294 if (ICE->getCastKind() == CK_IntegralCast && 8295 From->isPromotableIntegerType() && 8296 S.Context.getPromotedIntegerType(From) == To) 8297 return true; 8298 // Look through vector types, since we do default argument promotion for 8299 // those in OpenCL. 8300 if (const auto *VecTy = From->getAs<ExtVectorType>()) 8301 From = VecTy->getElementType(); 8302 if (const auto *VecTy = To->getAs<ExtVectorType>()) 8303 To = VecTy->getElementType(); 8304 // It's a floating promotion if the source type is a lower rank. 8305 return ICE->getCastKind() == CK_FloatingCast && 8306 S.Context.getFloatingTypeOrder(From, To) < 0; 8307 } 8308 8309 bool 8310 CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 8311 const char *StartSpecifier, 8312 unsigned SpecifierLen, 8313 const Expr *E) { 8314 using namespace analyze_format_string; 8315 using namespace analyze_printf; 8316 8317 // Now type check the data expression that matches the 8318 // format specifier. 8319 const analyze_printf::ArgType &AT = FS.getArgType(S.Context, isObjCContext()); 8320 if (!AT.isValid()) 8321 return true; 8322 8323 QualType ExprTy = E->getType(); 8324 while (const TypeOfExprType *TET = dyn_cast<TypeOfExprType>(ExprTy)) { 8325 ExprTy = TET->getUnderlyingExpr()->getType(); 8326 } 8327 8328 // Diagnose attempts to print a boolean value as a character. Unlike other 8329 // -Wformat diagnostics, this is fine from a type perspective, but it still 8330 // doesn't make sense. 8331 if (FS.getConversionSpecifier().getKind() == ConversionSpecifier::cArg && 8332 E->isKnownToHaveBooleanValue()) { 8333 const CharSourceRange &CSR = 8334 getSpecifierRange(StartSpecifier, SpecifierLen); 8335 SmallString<4> FSString; 8336 llvm::raw_svector_ostream os(FSString); 8337 FS.toString(os); 8338 EmitFormatDiagnostic(S.PDiag(diag::warn_format_bool_as_character) 8339 << FSString, 8340 E->getExprLoc(), false, CSR); 8341 return true; 8342 } 8343 8344 analyze_printf::ArgType::MatchKind Match = AT.matchesType(S.Context, ExprTy); 8345 if (Match == analyze_printf::ArgType::Match) 8346 return true; 8347 8348 // Look through argument promotions for our error message's reported type. 8349 // This includes the integral and floating promotions, but excludes array 8350 // and function pointer decay (seeing that an argument intended to be a 8351 // string has type 'char [6]' is probably more confusing than 'char *') and 8352 // certain bitfield promotions (bitfields can be 'demoted' to a lesser type). 8353 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 8354 if (isArithmeticArgumentPromotion(S, ICE)) { 8355 E = ICE->getSubExpr(); 8356 ExprTy = E->getType(); 8357 8358 // Check if we didn't match because of an implicit cast from a 'char' 8359 // or 'short' to an 'int'. This is done because printf is a varargs 8360 // function. 8361 if (ICE->getType() == S.Context.IntTy || 8362 ICE->getType() == S.Context.UnsignedIntTy) { 8363 // All further checking is done on the subexpression 8364 const analyze_printf::ArgType::MatchKind ImplicitMatch = 8365 AT.matchesType(S.Context, ExprTy); 8366 if (ImplicitMatch == analyze_printf::ArgType::Match) 8367 return true; 8368 if (ImplicitMatch == ArgType::NoMatchPedantic || 8369 ImplicitMatch == ArgType::NoMatchTypeConfusion) 8370 Match = ImplicitMatch; 8371 } 8372 } 8373 } else if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E)) { 8374 // Special case for 'a', which has type 'int' in C. 8375 // Note, however, that we do /not/ want to treat multibyte constants like 8376 // 'MooV' as characters! This form is deprecated but still exists. 8377 if (ExprTy == S.Context.IntTy) 8378 if (llvm::isUIntN(S.Context.getCharWidth(), CL->getValue())) 8379 ExprTy = S.Context.CharTy; 8380 } 8381 8382 // Look through enums to their underlying type. 8383 bool IsEnum = false; 8384 if (auto EnumTy = ExprTy->getAs<EnumType>()) { 8385 ExprTy = EnumTy->getDecl()->getIntegerType(); 8386 IsEnum = true; 8387 } 8388 8389 // %C in an Objective-C context prints a unichar, not a wchar_t. 8390 // If the argument is an integer of some kind, believe the %C and suggest 8391 // a cast instead of changing the conversion specifier. 8392 QualType IntendedTy = ExprTy; 8393 if (isObjCContext() && 8394 FS.getConversionSpecifier().getKind() == ConversionSpecifier::CArg) { 8395 if (ExprTy->isIntegralOrUnscopedEnumerationType() && 8396 !ExprTy->isCharType()) { 8397 // 'unichar' is defined as a typedef of unsigned short, but we should 8398 // prefer using the typedef if it is visible. 8399 IntendedTy = S.Context.UnsignedShortTy; 8400 8401 // While we are here, check if the value is an IntegerLiteral that happens 8402 // to be within the valid range. 8403 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E)) { 8404 const llvm::APInt &V = IL->getValue(); 8405 if (V.getActiveBits() <= S.Context.getTypeSize(IntendedTy)) 8406 return true; 8407 } 8408 8409 LookupResult Result(S, &S.Context.Idents.get("unichar"), E->getBeginLoc(), 8410 Sema::LookupOrdinaryName); 8411 if (S.LookupName(Result, S.getCurScope())) { 8412 NamedDecl *ND = Result.getFoundDecl(); 8413 if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(ND)) 8414 if (TD->getUnderlyingType() == IntendedTy) 8415 IntendedTy = S.Context.getTypedefType(TD); 8416 } 8417 } 8418 } 8419 8420 // Special-case some of Darwin's platform-independence types by suggesting 8421 // casts to primitive types that are known to be large enough. 8422 bool ShouldNotPrintDirectly = false; StringRef CastTyName; 8423 if (S.Context.getTargetInfo().getTriple().isOSDarwin()) { 8424 QualType CastTy; 8425 std::tie(CastTy, CastTyName) = shouldNotPrintDirectly(S.Context, IntendedTy, E); 8426 if (!CastTy.isNull()) { 8427 // %zi/%zu and %td/%tu are OK to use for NSInteger/NSUInteger of type int 8428 // (long in ASTContext). Only complain to pedants. 8429 if ((CastTyName == "NSInteger" || CastTyName == "NSUInteger") && 8430 (AT.isSizeT() || AT.isPtrdiffT()) && 8431 AT.matchesType(S.Context, CastTy)) 8432 Match = ArgType::NoMatchPedantic; 8433 IntendedTy = CastTy; 8434 ShouldNotPrintDirectly = true; 8435 } 8436 } 8437 8438 // We may be able to offer a FixItHint if it is a supported type. 8439 PrintfSpecifier fixedFS = FS; 8440 bool Success = 8441 fixedFS.fixType(IntendedTy, S.getLangOpts(), S.Context, isObjCContext()); 8442 8443 if (Success) { 8444 // Get the fix string from the fixed format specifier 8445 SmallString<16> buf; 8446 llvm::raw_svector_ostream os(buf); 8447 fixedFS.toString(os); 8448 8449 CharSourceRange SpecRange = getSpecifierRange(StartSpecifier, SpecifierLen); 8450 8451 if (IntendedTy == ExprTy && !ShouldNotPrintDirectly) { 8452 unsigned Diag; 8453 switch (Match) { 8454 case ArgType::Match: llvm_unreachable("expected non-matching"); 8455 case ArgType::NoMatchPedantic: 8456 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; 8457 break; 8458 case ArgType::NoMatchTypeConfusion: 8459 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; 8460 break; 8461 case ArgType::NoMatch: 8462 Diag = diag::warn_format_conversion_argument_type_mismatch; 8463 break; 8464 } 8465 8466 // In this case, the specifier is wrong and should be changed to match 8467 // the argument. 8468 EmitFormatDiagnostic(S.PDiag(Diag) 8469 << AT.getRepresentativeTypeName(S.Context) 8470 << IntendedTy << IsEnum << E->getSourceRange(), 8471 E->getBeginLoc(), 8472 /*IsStringLocation*/ false, SpecRange, 8473 FixItHint::CreateReplacement(SpecRange, os.str())); 8474 } else { 8475 // The canonical type for formatting this value is different from the 8476 // actual type of the expression. (This occurs, for example, with Darwin's 8477 // NSInteger on 32-bit platforms, where it is typedef'd as 'int', but 8478 // should be printed as 'long' for 64-bit compatibility.) 8479 // Rather than emitting a normal format/argument mismatch, we want to 8480 // add a cast to the recommended type (and correct the format string 8481 // if necessary). 8482 SmallString<16> CastBuf; 8483 llvm::raw_svector_ostream CastFix(CastBuf); 8484 CastFix << "("; 8485 IntendedTy.print(CastFix, S.Context.getPrintingPolicy()); 8486 CastFix << ")"; 8487 8488 SmallVector<FixItHint,4> Hints; 8489 if (!AT.matchesType(S.Context, IntendedTy) || ShouldNotPrintDirectly) 8490 Hints.push_back(FixItHint::CreateReplacement(SpecRange, os.str())); 8491 8492 if (const CStyleCastExpr *CCast = dyn_cast<CStyleCastExpr>(E)) { 8493 // If there's already a cast present, just replace it. 8494 SourceRange CastRange(CCast->getLParenLoc(), CCast->getRParenLoc()); 8495 Hints.push_back(FixItHint::CreateReplacement(CastRange, CastFix.str())); 8496 8497 } else if (!requiresParensToAddCast(E)) { 8498 // If the expression has high enough precedence, 8499 // just write the C-style cast. 8500 Hints.push_back( 8501 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 8502 } else { 8503 // Otherwise, add parens around the expression as well as the cast. 8504 CastFix << "("; 8505 Hints.push_back( 8506 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 8507 8508 SourceLocation After = S.getLocForEndOfToken(E->getEndLoc()); 8509 Hints.push_back(FixItHint::CreateInsertion(After, ")")); 8510 } 8511 8512 if (ShouldNotPrintDirectly) { 8513 // The expression has a type that should not be printed directly. 8514 // We extract the name from the typedef because we don't want to show 8515 // the underlying type in the diagnostic. 8516 StringRef Name; 8517 if (const TypedefType *TypedefTy = dyn_cast<TypedefType>(ExprTy)) 8518 Name = TypedefTy->getDecl()->getName(); 8519 else 8520 Name = CastTyName; 8521 unsigned Diag = Match == ArgType::NoMatchPedantic 8522 ? diag::warn_format_argument_needs_cast_pedantic 8523 : diag::warn_format_argument_needs_cast; 8524 EmitFormatDiagnostic(S.PDiag(Diag) << Name << IntendedTy << IsEnum 8525 << E->getSourceRange(), 8526 E->getBeginLoc(), /*IsStringLocation=*/false, 8527 SpecRange, Hints); 8528 } else { 8529 // In this case, the expression could be printed using a different 8530 // specifier, but we've decided that the specifier is probably correct 8531 // and we should cast instead. Just use the normal warning message. 8532 EmitFormatDiagnostic( 8533 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 8534 << AT.getRepresentativeTypeName(S.Context) << ExprTy << IsEnum 8535 << E->getSourceRange(), 8536 E->getBeginLoc(), /*IsStringLocation*/ false, SpecRange, Hints); 8537 } 8538 } 8539 } else { 8540 const CharSourceRange &CSR = getSpecifierRange(StartSpecifier, 8541 SpecifierLen); 8542 // Since the warning for passing non-POD types to variadic functions 8543 // was deferred until now, we emit a warning for non-POD 8544 // arguments here. 8545 switch (S.isValidVarArgType(ExprTy)) { 8546 case Sema::VAK_Valid: 8547 case Sema::VAK_ValidInCXX11: { 8548 unsigned Diag; 8549 switch (Match) { 8550 case ArgType::Match: llvm_unreachable("expected non-matching"); 8551 case ArgType::NoMatchPedantic: 8552 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; 8553 break; 8554 case ArgType::NoMatchTypeConfusion: 8555 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; 8556 break; 8557 case ArgType::NoMatch: 8558 Diag = diag::warn_format_conversion_argument_type_mismatch; 8559 break; 8560 } 8561 8562 EmitFormatDiagnostic( 8563 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) << ExprTy 8564 << IsEnum << CSR << E->getSourceRange(), 8565 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 8566 break; 8567 } 8568 case Sema::VAK_Undefined: 8569 case Sema::VAK_MSVCUndefined: 8570 EmitFormatDiagnostic(S.PDiag(diag::warn_non_pod_vararg_with_format_string) 8571 << S.getLangOpts().CPlusPlus11 << ExprTy 8572 << CallType 8573 << AT.getRepresentativeTypeName(S.Context) << CSR 8574 << E->getSourceRange(), 8575 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 8576 checkForCStrMembers(AT, E); 8577 break; 8578 8579 case Sema::VAK_Invalid: 8580 if (ExprTy->isObjCObjectType()) 8581 EmitFormatDiagnostic( 8582 S.PDiag(diag::err_cannot_pass_objc_interface_to_vararg_format) 8583 << S.getLangOpts().CPlusPlus11 << ExprTy << CallType 8584 << AT.getRepresentativeTypeName(S.Context) << CSR 8585 << E->getSourceRange(), 8586 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 8587 else 8588 // FIXME: If this is an initializer list, suggest removing the braces 8589 // or inserting a cast to the target type. 8590 S.Diag(E->getBeginLoc(), diag::err_cannot_pass_to_vararg_format) 8591 << isa<InitListExpr>(E) << ExprTy << CallType 8592 << AT.getRepresentativeTypeName(S.Context) << E->getSourceRange(); 8593 break; 8594 } 8595 8596 assert(FirstDataArg + FS.getArgIndex() < CheckedVarArgs.size() && 8597 "format string specifier index out of range"); 8598 CheckedVarArgs[FirstDataArg + FS.getArgIndex()] = true; 8599 } 8600 8601 return true; 8602 } 8603 8604 //===--- CHECK: Scanf format string checking ------------------------------===// 8605 8606 namespace { 8607 8608 class CheckScanfHandler : public CheckFormatHandler { 8609 public: 8610 CheckScanfHandler(Sema &s, const FormatStringLiteral *fexpr, 8611 const Expr *origFormatExpr, Sema::FormatStringType type, 8612 unsigned firstDataArg, unsigned numDataArgs, 8613 const char *beg, bool hasVAListArg, 8614 ArrayRef<const Expr *> Args, unsigned formatIdx, 8615 bool inFunctionCall, Sema::VariadicCallType CallType, 8616 llvm::SmallBitVector &CheckedVarArgs, 8617 UncoveredArgHandler &UncoveredArg) 8618 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 8619 numDataArgs, beg, hasVAListArg, Args, formatIdx, 8620 inFunctionCall, CallType, CheckedVarArgs, 8621 UncoveredArg) {} 8622 8623 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS, 8624 const char *startSpecifier, 8625 unsigned specifierLen) override; 8626 8627 bool HandleInvalidScanfConversionSpecifier( 8628 const analyze_scanf::ScanfSpecifier &FS, 8629 const char *startSpecifier, 8630 unsigned specifierLen) override; 8631 8632 void HandleIncompleteScanList(const char *start, const char *end) override; 8633 }; 8634 8635 } // namespace 8636 8637 void CheckScanfHandler::HandleIncompleteScanList(const char *start, 8638 const char *end) { 8639 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_scanlist_incomplete), 8640 getLocationOfByte(end), /*IsStringLocation*/true, 8641 getSpecifierRange(start, end - start)); 8642 } 8643 8644 bool CheckScanfHandler::HandleInvalidScanfConversionSpecifier( 8645 const analyze_scanf::ScanfSpecifier &FS, 8646 const char *startSpecifier, 8647 unsigned specifierLen) { 8648 const analyze_scanf::ScanfConversionSpecifier &CS = 8649 FS.getConversionSpecifier(); 8650 8651 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 8652 getLocationOfByte(CS.getStart()), 8653 startSpecifier, specifierLen, 8654 CS.getStart(), CS.getLength()); 8655 } 8656 8657 bool CheckScanfHandler::HandleScanfSpecifier( 8658 const analyze_scanf::ScanfSpecifier &FS, 8659 const char *startSpecifier, 8660 unsigned specifierLen) { 8661 using namespace analyze_scanf; 8662 using namespace analyze_format_string; 8663 8664 const ScanfConversionSpecifier &CS = FS.getConversionSpecifier(); 8665 8666 // Handle case where '%' and '*' don't consume an argument. These shouldn't 8667 // be used to decide if we are using positional arguments consistently. 8668 if (FS.consumesDataArgument()) { 8669 if (atFirstArg) { 8670 atFirstArg = false; 8671 usesPositionalArgs = FS.usesPositionalArg(); 8672 } 8673 else if (usesPositionalArgs != FS.usesPositionalArg()) { 8674 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 8675 startSpecifier, specifierLen); 8676 return false; 8677 } 8678 } 8679 8680 // Check if the field with is non-zero. 8681 const OptionalAmount &Amt = FS.getFieldWidth(); 8682 if (Amt.getHowSpecified() == OptionalAmount::Constant) { 8683 if (Amt.getConstantAmount() == 0) { 8684 const CharSourceRange &R = getSpecifierRange(Amt.getStart(), 8685 Amt.getConstantLength()); 8686 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_nonzero_width), 8687 getLocationOfByte(Amt.getStart()), 8688 /*IsStringLocation*/true, R, 8689 FixItHint::CreateRemoval(R)); 8690 } 8691 } 8692 8693 if (!FS.consumesDataArgument()) { 8694 // FIXME: Technically specifying a precision or field width here 8695 // makes no sense. Worth issuing a warning at some point. 8696 return true; 8697 } 8698 8699 // Consume the argument. 8700 unsigned argIndex = FS.getArgIndex(); 8701 if (argIndex < NumDataArgs) { 8702 // The check to see if the argIndex is valid will come later. 8703 // We set the bit here because we may exit early from this 8704 // function if we encounter some other error. 8705 CoveredArgs.set(argIndex); 8706 } 8707 8708 // Check the length modifier is valid with the given conversion specifier. 8709 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 8710 S.getLangOpts())) 8711 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 8712 diag::warn_format_nonsensical_length); 8713 else if (!FS.hasStandardLengthModifier()) 8714 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 8715 else if (!FS.hasStandardLengthConversionCombination()) 8716 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 8717 diag::warn_format_non_standard_conversion_spec); 8718 8719 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 8720 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 8721 8722 // The remaining checks depend on the data arguments. 8723 if (HasVAListArg) 8724 return true; 8725 8726 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 8727 return false; 8728 8729 // Check that the argument type matches the format specifier. 8730 const Expr *Ex = getDataArg(argIndex); 8731 if (!Ex) 8732 return true; 8733 8734 const analyze_format_string::ArgType &AT = FS.getArgType(S.Context); 8735 8736 if (!AT.isValid()) { 8737 return true; 8738 } 8739 8740 analyze_format_string::ArgType::MatchKind Match = 8741 AT.matchesType(S.Context, Ex->getType()); 8742 bool Pedantic = Match == analyze_format_string::ArgType::NoMatchPedantic; 8743 if (Match == analyze_format_string::ArgType::Match) 8744 return true; 8745 8746 ScanfSpecifier fixedFS = FS; 8747 bool Success = fixedFS.fixType(Ex->getType(), Ex->IgnoreImpCasts()->getType(), 8748 S.getLangOpts(), S.Context); 8749 8750 unsigned Diag = 8751 Pedantic ? diag::warn_format_conversion_argument_type_mismatch_pedantic 8752 : diag::warn_format_conversion_argument_type_mismatch; 8753 8754 if (Success) { 8755 // Get the fix string from the fixed format specifier. 8756 SmallString<128> buf; 8757 llvm::raw_svector_ostream os(buf); 8758 fixedFS.toString(os); 8759 8760 EmitFormatDiagnostic( 8761 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) 8762 << Ex->getType() << false << Ex->getSourceRange(), 8763 Ex->getBeginLoc(), 8764 /*IsStringLocation*/ false, 8765 getSpecifierRange(startSpecifier, specifierLen), 8766 FixItHint::CreateReplacement( 8767 getSpecifierRange(startSpecifier, specifierLen), os.str())); 8768 } else { 8769 EmitFormatDiagnostic(S.PDiag(Diag) 8770 << AT.getRepresentativeTypeName(S.Context) 8771 << Ex->getType() << false << Ex->getSourceRange(), 8772 Ex->getBeginLoc(), 8773 /*IsStringLocation*/ false, 8774 getSpecifierRange(startSpecifier, specifierLen)); 8775 } 8776 8777 return true; 8778 } 8779 8780 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr, 8781 const Expr *OrigFormatExpr, 8782 ArrayRef<const Expr *> Args, 8783 bool HasVAListArg, unsigned format_idx, 8784 unsigned firstDataArg, 8785 Sema::FormatStringType Type, 8786 bool inFunctionCall, 8787 Sema::VariadicCallType CallType, 8788 llvm::SmallBitVector &CheckedVarArgs, 8789 UncoveredArgHandler &UncoveredArg, 8790 bool IgnoreStringsWithoutSpecifiers) { 8791 // CHECK: is the format string a wide literal? 8792 if (!FExpr->isAscii() && !FExpr->isUTF8()) { 8793 CheckFormatHandler::EmitFormatDiagnostic( 8794 S, inFunctionCall, Args[format_idx], 8795 S.PDiag(diag::warn_format_string_is_wide_literal), FExpr->getBeginLoc(), 8796 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 8797 return; 8798 } 8799 8800 // Str - The format string. NOTE: this is NOT null-terminated! 8801 StringRef StrRef = FExpr->getString(); 8802 const char *Str = StrRef.data(); 8803 // Account for cases where the string literal is truncated in a declaration. 8804 const ConstantArrayType *T = 8805 S.Context.getAsConstantArrayType(FExpr->getType()); 8806 assert(T && "String literal not of constant array type!"); 8807 size_t TypeSize = T->getSize().getZExtValue(); 8808 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 8809 const unsigned numDataArgs = Args.size() - firstDataArg; 8810 8811 if (IgnoreStringsWithoutSpecifiers && 8812 !analyze_format_string::parseFormatStringHasFormattingSpecifiers( 8813 Str, Str + StrLen, S.getLangOpts(), S.Context.getTargetInfo())) 8814 return; 8815 8816 // Emit a warning if the string literal is truncated and does not contain an 8817 // embedded null character. 8818 if (TypeSize <= StrRef.size() && 8819 StrRef.substr(0, TypeSize).find('\0') == StringRef::npos) { 8820 CheckFormatHandler::EmitFormatDiagnostic( 8821 S, inFunctionCall, Args[format_idx], 8822 S.PDiag(diag::warn_printf_format_string_not_null_terminated), 8823 FExpr->getBeginLoc(), 8824 /*IsStringLocation=*/true, OrigFormatExpr->getSourceRange()); 8825 return; 8826 } 8827 8828 // CHECK: empty format string? 8829 if (StrLen == 0 && numDataArgs > 0) { 8830 CheckFormatHandler::EmitFormatDiagnostic( 8831 S, inFunctionCall, Args[format_idx], 8832 S.PDiag(diag::warn_empty_format_string), FExpr->getBeginLoc(), 8833 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 8834 return; 8835 } 8836 8837 if (Type == Sema::FST_Printf || Type == Sema::FST_NSString || 8838 Type == Sema::FST_Kprintf || Type == Sema::FST_FreeBSDKPrintf || 8839 Type == Sema::FST_OSLog || Type == Sema::FST_OSTrace || 8840 Type == Sema::FST_Syslog) { 8841 CheckPrintfHandler H( 8842 S, FExpr, OrigFormatExpr, Type, firstDataArg, numDataArgs, 8843 (Type == Sema::FST_NSString || Type == Sema::FST_OSTrace), Str, 8844 HasVAListArg, Args, format_idx, inFunctionCall, CallType, 8845 CheckedVarArgs, UncoveredArg); 8846 8847 if (!analyze_format_string::ParsePrintfString(H, Str, Str + StrLen, 8848 S.getLangOpts(), 8849 S.Context.getTargetInfo(), 8850 Type == Sema::FST_Kprintf || Type == Sema::FST_FreeBSDKPrintf)) 8851 H.DoneProcessing(); 8852 } else if (Type == Sema::FST_Scanf) { 8853 CheckScanfHandler H(S, FExpr, OrigFormatExpr, Type, firstDataArg, 8854 numDataArgs, Str, HasVAListArg, Args, format_idx, 8855 inFunctionCall, CallType, CheckedVarArgs, UncoveredArg); 8856 8857 if (!analyze_format_string::ParseScanfString(H, Str, Str + StrLen, 8858 S.getLangOpts(), 8859 S.Context.getTargetInfo())) 8860 H.DoneProcessing(); 8861 } // TODO: handle other formats 8862 } 8863 8864 bool Sema::FormatStringHasSArg(const StringLiteral *FExpr) { 8865 // Str - The format string. NOTE: this is NOT null-terminated! 8866 StringRef StrRef = FExpr->getString(); 8867 const char *Str = StrRef.data(); 8868 // Account for cases where the string literal is truncated in a declaration. 8869 const ConstantArrayType *T = Context.getAsConstantArrayType(FExpr->getType()); 8870 assert(T && "String literal not of constant array type!"); 8871 size_t TypeSize = T->getSize().getZExtValue(); 8872 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 8873 return analyze_format_string::ParseFormatStringHasSArg(Str, Str + StrLen, 8874 getLangOpts(), 8875 Context.getTargetInfo()); 8876 } 8877 8878 //===--- CHECK: Warn on use of wrong absolute value function. -------------===// 8879 8880 // Returns the related absolute value function that is larger, of 0 if one 8881 // does not exist. 8882 static unsigned getLargerAbsoluteValueFunction(unsigned AbsFunction) { 8883 switch (AbsFunction) { 8884 default: 8885 return 0; 8886 8887 case Builtin::BI__builtin_abs: 8888 return Builtin::BI__builtin_labs; 8889 case Builtin::BI__builtin_labs: 8890 return Builtin::BI__builtin_llabs; 8891 case Builtin::BI__builtin_llabs: 8892 return 0; 8893 8894 case Builtin::BI__builtin_fabsf: 8895 return Builtin::BI__builtin_fabs; 8896 case Builtin::BI__builtin_fabs: 8897 return Builtin::BI__builtin_fabsl; 8898 case Builtin::BI__builtin_fabsl: 8899 return 0; 8900 8901 case Builtin::BI__builtin_cabsf: 8902 return Builtin::BI__builtin_cabs; 8903 case Builtin::BI__builtin_cabs: 8904 return Builtin::BI__builtin_cabsl; 8905 case Builtin::BI__builtin_cabsl: 8906 return 0; 8907 8908 case Builtin::BIabs: 8909 return Builtin::BIlabs; 8910 case Builtin::BIlabs: 8911 return Builtin::BIllabs; 8912 case Builtin::BIllabs: 8913 return 0; 8914 8915 case Builtin::BIfabsf: 8916 return Builtin::BIfabs; 8917 case Builtin::BIfabs: 8918 return Builtin::BIfabsl; 8919 case Builtin::BIfabsl: 8920 return 0; 8921 8922 case Builtin::BIcabsf: 8923 return Builtin::BIcabs; 8924 case Builtin::BIcabs: 8925 return Builtin::BIcabsl; 8926 case Builtin::BIcabsl: 8927 return 0; 8928 } 8929 } 8930 8931 // Returns the argument type of the absolute value function. 8932 static QualType getAbsoluteValueArgumentType(ASTContext &Context, 8933 unsigned AbsType) { 8934 if (AbsType == 0) 8935 return QualType(); 8936 8937 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None; 8938 QualType BuiltinType = Context.GetBuiltinType(AbsType, Error); 8939 if (Error != ASTContext::GE_None) 8940 return QualType(); 8941 8942 const FunctionProtoType *FT = BuiltinType->getAs<FunctionProtoType>(); 8943 if (!FT) 8944 return QualType(); 8945 8946 if (FT->getNumParams() != 1) 8947 return QualType(); 8948 8949 return FT->getParamType(0); 8950 } 8951 8952 // Returns the best absolute value function, or zero, based on type and 8953 // current absolute value function. 8954 static unsigned getBestAbsFunction(ASTContext &Context, QualType ArgType, 8955 unsigned AbsFunctionKind) { 8956 unsigned BestKind = 0; 8957 uint64_t ArgSize = Context.getTypeSize(ArgType); 8958 for (unsigned Kind = AbsFunctionKind; Kind != 0; 8959 Kind = getLargerAbsoluteValueFunction(Kind)) { 8960 QualType ParamType = getAbsoluteValueArgumentType(Context, Kind); 8961 if (Context.getTypeSize(ParamType) >= ArgSize) { 8962 if (BestKind == 0) 8963 BestKind = Kind; 8964 else if (Context.hasSameType(ParamType, ArgType)) { 8965 BestKind = Kind; 8966 break; 8967 } 8968 } 8969 } 8970 return BestKind; 8971 } 8972 8973 enum AbsoluteValueKind { 8974 AVK_Integer, 8975 AVK_Floating, 8976 AVK_Complex 8977 }; 8978 8979 static AbsoluteValueKind getAbsoluteValueKind(QualType T) { 8980 if (T->isIntegralOrEnumerationType()) 8981 return AVK_Integer; 8982 if (T->isRealFloatingType()) 8983 return AVK_Floating; 8984 if (T->isAnyComplexType()) 8985 return AVK_Complex; 8986 8987 llvm_unreachable("Type not integer, floating, or complex"); 8988 } 8989 8990 // Changes the absolute value function to a different type. Preserves whether 8991 // the function is a builtin. 8992 static unsigned changeAbsFunction(unsigned AbsKind, 8993 AbsoluteValueKind ValueKind) { 8994 switch (ValueKind) { 8995 case AVK_Integer: 8996 switch (AbsKind) { 8997 default: 8998 return 0; 8999 case Builtin::BI__builtin_fabsf: 9000 case Builtin::BI__builtin_fabs: 9001 case Builtin::BI__builtin_fabsl: 9002 case Builtin::BI__builtin_cabsf: 9003 case Builtin::BI__builtin_cabs: 9004 case Builtin::BI__builtin_cabsl: 9005 return Builtin::BI__builtin_abs; 9006 case Builtin::BIfabsf: 9007 case Builtin::BIfabs: 9008 case Builtin::BIfabsl: 9009 case Builtin::BIcabsf: 9010 case Builtin::BIcabs: 9011 case Builtin::BIcabsl: 9012 return Builtin::BIabs; 9013 } 9014 case AVK_Floating: 9015 switch (AbsKind) { 9016 default: 9017 return 0; 9018 case Builtin::BI__builtin_abs: 9019 case Builtin::BI__builtin_labs: 9020 case Builtin::BI__builtin_llabs: 9021 case Builtin::BI__builtin_cabsf: 9022 case Builtin::BI__builtin_cabs: 9023 case Builtin::BI__builtin_cabsl: 9024 return Builtin::BI__builtin_fabsf; 9025 case Builtin::BIabs: 9026 case Builtin::BIlabs: 9027 case Builtin::BIllabs: 9028 case Builtin::BIcabsf: 9029 case Builtin::BIcabs: 9030 case Builtin::BIcabsl: 9031 return Builtin::BIfabsf; 9032 } 9033 case AVK_Complex: 9034 switch (AbsKind) { 9035 default: 9036 return 0; 9037 case Builtin::BI__builtin_abs: 9038 case Builtin::BI__builtin_labs: 9039 case Builtin::BI__builtin_llabs: 9040 case Builtin::BI__builtin_fabsf: 9041 case Builtin::BI__builtin_fabs: 9042 case Builtin::BI__builtin_fabsl: 9043 return Builtin::BI__builtin_cabsf; 9044 case Builtin::BIabs: 9045 case Builtin::BIlabs: 9046 case Builtin::BIllabs: 9047 case Builtin::BIfabsf: 9048 case Builtin::BIfabs: 9049 case Builtin::BIfabsl: 9050 return Builtin::BIcabsf; 9051 } 9052 } 9053 llvm_unreachable("Unable to convert function"); 9054 } 9055 9056 static unsigned getAbsoluteValueFunctionKind(const FunctionDecl *FDecl) { 9057 const IdentifierInfo *FnInfo = FDecl->getIdentifier(); 9058 if (!FnInfo) 9059 return 0; 9060 9061 switch (FDecl->getBuiltinID()) { 9062 default: 9063 return 0; 9064 case Builtin::BI__builtin_abs: 9065 case Builtin::BI__builtin_fabs: 9066 case Builtin::BI__builtin_fabsf: 9067 case Builtin::BI__builtin_fabsl: 9068 case Builtin::BI__builtin_labs: 9069 case Builtin::BI__builtin_llabs: 9070 case Builtin::BI__builtin_cabs: 9071 case Builtin::BI__builtin_cabsf: 9072 case Builtin::BI__builtin_cabsl: 9073 case Builtin::BIabs: 9074 case Builtin::BIlabs: 9075 case Builtin::BIllabs: 9076 case Builtin::BIfabs: 9077 case Builtin::BIfabsf: 9078 case Builtin::BIfabsl: 9079 case Builtin::BIcabs: 9080 case Builtin::BIcabsf: 9081 case Builtin::BIcabsl: 9082 return FDecl->getBuiltinID(); 9083 } 9084 llvm_unreachable("Unknown Builtin type"); 9085 } 9086 9087 // If the replacement is valid, emit a note with replacement function. 9088 // Additionally, suggest including the proper header if not already included. 9089 static void emitReplacement(Sema &S, SourceLocation Loc, SourceRange Range, 9090 unsigned AbsKind, QualType ArgType) { 9091 bool EmitHeaderHint = true; 9092 const char *HeaderName = nullptr; 9093 const char *FunctionName = nullptr; 9094 if (S.getLangOpts().CPlusPlus && !ArgType->isAnyComplexType()) { 9095 FunctionName = "std::abs"; 9096 if (ArgType->isIntegralOrEnumerationType()) { 9097 HeaderName = "cstdlib"; 9098 } else if (ArgType->isRealFloatingType()) { 9099 HeaderName = "cmath"; 9100 } else { 9101 llvm_unreachable("Invalid Type"); 9102 } 9103 9104 // Lookup all std::abs 9105 if (NamespaceDecl *Std = S.getStdNamespace()) { 9106 LookupResult R(S, &S.Context.Idents.get("abs"), Loc, Sema::LookupAnyName); 9107 R.suppressDiagnostics(); 9108 S.LookupQualifiedName(R, Std); 9109 9110 for (const auto *I : R) { 9111 const FunctionDecl *FDecl = nullptr; 9112 if (const UsingShadowDecl *UsingD = dyn_cast<UsingShadowDecl>(I)) { 9113 FDecl = dyn_cast<FunctionDecl>(UsingD->getTargetDecl()); 9114 } else { 9115 FDecl = dyn_cast<FunctionDecl>(I); 9116 } 9117 if (!FDecl) 9118 continue; 9119 9120 // Found std::abs(), check that they are the right ones. 9121 if (FDecl->getNumParams() != 1) 9122 continue; 9123 9124 // Check that the parameter type can handle the argument. 9125 QualType ParamType = FDecl->getParamDecl(0)->getType(); 9126 if (getAbsoluteValueKind(ArgType) == getAbsoluteValueKind(ParamType) && 9127 S.Context.getTypeSize(ArgType) <= 9128 S.Context.getTypeSize(ParamType)) { 9129 // Found a function, don't need the header hint. 9130 EmitHeaderHint = false; 9131 break; 9132 } 9133 } 9134 } 9135 } else { 9136 FunctionName = S.Context.BuiltinInfo.getName(AbsKind); 9137 HeaderName = S.Context.BuiltinInfo.getHeaderName(AbsKind); 9138 9139 if (HeaderName) { 9140 DeclarationName DN(&S.Context.Idents.get(FunctionName)); 9141 LookupResult R(S, DN, Loc, Sema::LookupAnyName); 9142 R.suppressDiagnostics(); 9143 S.LookupName(R, S.getCurScope()); 9144 9145 if (R.isSingleResult()) { 9146 FunctionDecl *FD = dyn_cast<FunctionDecl>(R.getFoundDecl()); 9147 if (FD && FD->getBuiltinID() == AbsKind) { 9148 EmitHeaderHint = false; 9149 } else { 9150 return; 9151 } 9152 } else if (!R.empty()) { 9153 return; 9154 } 9155 } 9156 } 9157 9158 S.Diag(Loc, diag::note_replace_abs_function) 9159 << FunctionName << FixItHint::CreateReplacement(Range, FunctionName); 9160 9161 if (!HeaderName) 9162 return; 9163 9164 if (!EmitHeaderHint) 9165 return; 9166 9167 S.Diag(Loc, diag::note_include_header_or_declare) << HeaderName 9168 << FunctionName; 9169 } 9170 9171 template <std::size_t StrLen> 9172 static bool IsStdFunction(const FunctionDecl *FDecl, 9173 const char (&Str)[StrLen]) { 9174 if (!FDecl) 9175 return false; 9176 if (!FDecl->getIdentifier() || !FDecl->getIdentifier()->isStr(Str)) 9177 return false; 9178 if (!FDecl->isInStdNamespace()) 9179 return false; 9180 9181 return true; 9182 } 9183 9184 // Warn when using the wrong abs() function. 9185 void Sema::CheckAbsoluteValueFunction(const CallExpr *Call, 9186 const FunctionDecl *FDecl) { 9187 if (Call->getNumArgs() != 1) 9188 return; 9189 9190 unsigned AbsKind = getAbsoluteValueFunctionKind(FDecl); 9191 bool IsStdAbs = IsStdFunction(FDecl, "abs"); 9192 if (AbsKind == 0 && !IsStdAbs) 9193 return; 9194 9195 QualType ArgType = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 9196 QualType ParamType = Call->getArg(0)->getType(); 9197 9198 // Unsigned types cannot be negative. Suggest removing the absolute value 9199 // function call. 9200 if (ArgType->isUnsignedIntegerType()) { 9201 const char *FunctionName = 9202 IsStdAbs ? "std::abs" : Context.BuiltinInfo.getName(AbsKind); 9203 Diag(Call->getExprLoc(), diag::warn_unsigned_abs) << ArgType << ParamType; 9204 Diag(Call->getExprLoc(), diag::note_remove_abs) 9205 << FunctionName 9206 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()); 9207 return; 9208 } 9209 9210 // Taking the absolute value of a pointer is very suspicious, they probably 9211 // wanted to index into an array, dereference a pointer, call a function, etc. 9212 if (ArgType->isPointerType() || ArgType->canDecayToPointerType()) { 9213 unsigned DiagType = 0; 9214 if (ArgType->isFunctionType()) 9215 DiagType = 1; 9216 else if (ArgType->isArrayType()) 9217 DiagType = 2; 9218 9219 Diag(Call->getExprLoc(), diag::warn_pointer_abs) << DiagType << ArgType; 9220 return; 9221 } 9222 9223 // std::abs has overloads which prevent most of the absolute value problems 9224 // from occurring. 9225 if (IsStdAbs) 9226 return; 9227 9228 AbsoluteValueKind ArgValueKind = getAbsoluteValueKind(ArgType); 9229 AbsoluteValueKind ParamValueKind = getAbsoluteValueKind(ParamType); 9230 9231 // The argument and parameter are the same kind. Check if they are the right 9232 // size. 9233 if (ArgValueKind == ParamValueKind) { 9234 if (Context.getTypeSize(ArgType) <= Context.getTypeSize(ParamType)) 9235 return; 9236 9237 unsigned NewAbsKind = getBestAbsFunction(Context, ArgType, AbsKind); 9238 Diag(Call->getExprLoc(), diag::warn_abs_too_small) 9239 << FDecl << ArgType << ParamType; 9240 9241 if (NewAbsKind == 0) 9242 return; 9243 9244 emitReplacement(*this, Call->getExprLoc(), 9245 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 9246 return; 9247 } 9248 9249 // ArgValueKind != ParamValueKind 9250 // The wrong type of absolute value function was used. Attempt to find the 9251 // proper one. 9252 unsigned NewAbsKind = changeAbsFunction(AbsKind, ArgValueKind); 9253 NewAbsKind = getBestAbsFunction(Context, ArgType, NewAbsKind); 9254 if (NewAbsKind == 0) 9255 return; 9256 9257 Diag(Call->getExprLoc(), diag::warn_wrong_absolute_value_type) 9258 << FDecl << ParamValueKind << ArgValueKind; 9259 9260 emitReplacement(*this, Call->getExprLoc(), 9261 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 9262 } 9263 9264 //===--- CHECK: Warn on use of std::max and unsigned zero. r---------------===// 9265 void Sema::CheckMaxUnsignedZero(const CallExpr *Call, 9266 const FunctionDecl *FDecl) { 9267 if (!Call || !FDecl) return; 9268 9269 // Ignore template specializations and macros. 9270 if (inTemplateInstantiation()) return; 9271 if (Call->getExprLoc().isMacroID()) return; 9272 9273 // Only care about the one template argument, two function parameter std::max 9274 if (Call->getNumArgs() != 2) return; 9275 if (!IsStdFunction(FDecl, "max")) return; 9276 const auto * ArgList = FDecl->getTemplateSpecializationArgs(); 9277 if (!ArgList) return; 9278 if (ArgList->size() != 1) return; 9279 9280 // Check that template type argument is unsigned integer. 9281 const auto& TA = ArgList->get(0); 9282 if (TA.getKind() != TemplateArgument::Type) return; 9283 QualType ArgType = TA.getAsType(); 9284 if (!ArgType->isUnsignedIntegerType()) return; 9285 9286 // See if either argument is a literal zero. 9287 auto IsLiteralZeroArg = [](const Expr* E) -> bool { 9288 const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E); 9289 if (!MTE) return false; 9290 const auto *Num = dyn_cast<IntegerLiteral>(MTE->getSubExpr()); 9291 if (!Num) return false; 9292 if (Num->getValue() != 0) return false; 9293 return true; 9294 }; 9295 9296 const Expr *FirstArg = Call->getArg(0); 9297 const Expr *SecondArg = Call->getArg(1); 9298 const bool IsFirstArgZero = IsLiteralZeroArg(FirstArg); 9299 const bool IsSecondArgZero = IsLiteralZeroArg(SecondArg); 9300 9301 // Only warn when exactly one argument is zero. 9302 if (IsFirstArgZero == IsSecondArgZero) return; 9303 9304 SourceRange FirstRange = FirstArg->getSourceRange(); 9305 SourceRange SecondRange = SecondArg->getSourceRange(); 9306 9307 SourceRange ZeroRange = IsFirstArgZero ? FirstRange : SecondRange; 9308 9309 Diag(Call->getExprLoc(), diag::warn_max_unsigned_zero) 9310 << IsFirstArgZero << Call->getCallee()->getSourceRange() << ZeroRange; 9311 9312 // Deduce what parts to remove so that "std::max(0u, foo)" becomes "(foo)". 9313 SourceRange RemovalRange; 9314 if (IsFirstArgZero) { 9315 RemovalRange = SourceRange(FirstRange.getBegin(), 9316 SecondRange.getBegin().getLocWithOffset(-1)); 9317 } else { 9318 RemovalRange = SourceRange(getLocForEndOfToken(FirstRange.getEnd()), 9319 SecondRange.getEnd()); 9320 } 9321 9322 Diag(Call->getExprLoc(), diag::note_remove_max_call) 9323 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()) 9324 << FixItHint::CreateRemoval(RemovalRange); 9325 } 9326 9327 //===--- CHECK: Standard memory functions ---------------------------------===// 9328 9329 /// Takes the expression passed to the size_t parameter of functions 9330 /// such as memcmp, strncat, etc and warns if it's a comparison. 9331 /// 9332 /// This is to catch typos like `if (memcmp(&a, &b, sizeof(a) > 0))`. 9333 static bool CheckMemorySizeofForComparison(Sema &S, const Expr *E, 9334 IdentifierInfo *FnName, 9335 SourceLocation FnLoc, 9336 SourceLocation RParenLoc) { 9337 const BinaryOperator *Size = dyn_cast<BinaryOperator>(E); 9338 if (!Size) 9339 return false; 9340 9341 // if E is binop and op is <=>, >, <, >=, <=, ==, &&, ||: 9342 if (!Size->isComparisonOp() && !Size->isLogicalOp()) 9343 return false; 9344 9345 SourceRange SizeRange = Size->getSourceRange(); 9346 S.Diag(Size->getOperatorLoc(), diag::warn_memsize_comparison) 9347 << SizeRange << FnName; 9348 S.Diag(FnLoc, diag::note_memsize_comparison_paren) 9349 << FnName 9350 << FixItHint::CreateInsertion( 9351 S.getLocForEndOfToken(Size->getLHS()->getEndLoc()), ")") 9352 << FixItHint::CreateRemoval(RParenLoc); 9353 S.Diag(SizeRange.getBegin(), diag::note_memsize_comparison_cast_silence) 9354 << FixItHint::CreateInsertion(SizeRange.getBegin(), "(size_t)(") 9355 << FixItHint::CreateInsertion(S.getLocForEndOfToken(SizeRange.getEnd()), 9356 ")"); 9357 9358 return true; 9359 } 9360 9361 /// Determine whether the given type is or contains a dynamic class type 9362 /// (e.g., whether it has a vtable). 9363 static const CXXRecordDecl *getContainedDynamicClass(QualType T, 9364 bool &IsContained) { 9365 // Look through array types while ignoring qualifiers. 9366 const Type *Ty = T->getBaseElementTypeUnsafe(); 9367 IsContained = false; 9368 9369 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); 9370 RD = RD ? RD->getDefinition() : nullptr; 9371 if (!RD || RD->isInvalidDecl()) 9372 return nullptr; 9373 9374 if (RD->isDynamicClass()) 9375 return RD; 9376 9377 // Check all the fields. If any bases were dynamic, the class is dynamic. 9378 // It's impossible for a class to transitively contain itself by value, so 9379 // infinite recursion is impossible. 9380 for (auto *FD : RD->fields()) { 9381 bool SubContained; 9382 if (const CXXRecordDecl *ContainedRD = 9383 getContainedDynamicClass(FD->getType(), SubContained)) { 9384 IsContained = true; 9385 return ContainedRD; 9386 } 9387 } 9388 9389 return nullptr; 9390 } 9391 9392 static const UnaryExprOrTypeTraitExpr *getAsSizeOfExpr(const Expr *E) { 9393 if (const auto *Unary = dyn_cast<UnaryExprOrTypeTraitExpr>(E)) 9394 if (Unary->getKind() == UETT_SizeOf) 9395 return Unary; 9396 return nullptr; 9397 } 9398 9399 /// If E is a sizeof expression, returns its argument expression, 9400 /// otherwise returns NULL. 9401 static const Expr *getSizeOfExprArg(const Expr *E) { 9402 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 9403 if (!SizeOf->isArgumentType()) 9404 return SizeOf->getArgumentExpr()->IgnoreParenImpCasts(); 9405 return nullptr; 9406 } 9407 9408 /// If E is a sizeof expression, returns its argument type. 9409 static QualType getSizeOfArgType(const Expr *E) { 9410 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 9411 return SizeOf->getTypeOfArgument(); 9412 return QualType(); 9413 } 9414 9415 namespace { 9416 9417 struct SearchNonTrivialToInitializeField 9418 : DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField> { 9419 using Super = 9420 DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField>; 9421 9422 SearchNonTrivialToInitializeField(const Expr *E, Sema &S) : E(E), S(S) {} 9423 9424 void visitWithKind(QualType::PrimitiveDefaultInitializeKind PDIK, QualType FT, 9425 SourceLocation SL) { 9426 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 9427 asDerived().visitArray(PDIK, AT, SL); 9428 return; 9429 } 9430 9431 Super::visitWithKind(PDIK, FT, SL); 9432 } 9433 9434 void visitARCStrong(QualType FT, SourceLocation SL) { 9435 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 9436 } 9437 void visitARCWeak(QualType FT, SourceLocation SL) { 9438 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 9439 } 9440 void visitStruct(QualType FT, SourceLocation SL) { 9441 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 9442 visit(FD->getType(), FD->getLocation()); 9443 } 9444 void visitArray(QualType::PrimitiveDefaultInitializeKind PDIK, 9445 const ArrayType *AT, SourceLocation SL) { 9446 visit(getContext().getBaseElementType(AT), SL); 9447 } 9448 void visitTrivial(QualType FT, SourceLocation SL) {} 9449 9450 static void diag(QualType RT, const Expr *E, Sema &S) { 9451 SearchNonTrivialToInitializeField(E, S).visitStruct(RT, SourceLocation()); 9452 } 9453 9454 ASTContext &getContext() { return S.getASTContext(); } 9455 9456 const Expr *E; 9457 Sema &S; 9458 }; 9459 9460 struct SearchNonTrivialToCopyField 9461 : CopiedTypeVisitor<SearchNonTrivialToCopyField, false> { 9462 using Super = CopiedTypeVisitor<SearchNonTrivialToCopyField, false>; 9463 9464 SearchNonTrivialToCopyField(const Expr *E, Sema &S) : E(E), S(S) {} 9465 9466 void visitWithKind(QualType::PrimitiveCopyKind PCK, QualType FT, 9467 SourceLocation SL) { 9468 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 9469 asDerived().visitArray(PCK, AT, SL); 9470 return; 9471 } 9472 9473 Super::visitWithKind(PCK, FT, SL); 9474 } 9475 9476 void visitARCStrong(QualType FT, SourceLocation SL) { 9477 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 9478 } 9479 void visitARCWeak(QualType FT, SourceLocation SL) { 9480 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 9481 } 9482 void visitStruct(QualType FT, SourceLocation SL) { 9483 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 9484 visit(FD->getType(), FD->getLocation()); 9485 } 9486 void visitArray(QualType::PrimitiveCopyKind PCK, const ArrayType *AT, 9487 SourceLocation SL) { 9488 visit(getContext().getBaseElementType(AT), SL); 9489 } 9490 void preVisit(QualType::PrimitiveCopyKind PCK, QualType FT, 9491 SourceLocation SL) {} 9492 void visitTrivial(QualType FT, SourceLocation SL) {} 9493 void visitVolatileTrivial(QualType FT, SourceLocation SL) {} 9494 9495 static void diag(QualType RT, const Expr *E, Sema &S) { 9496 SearchNonTrivialToCopyField(E, S).visitStruct(RT, SourceLocation()); 9497 } 9498 9499 ASTContext &getContext() { return S.getASTContext(); } 9500 9501 const Expr *E; 9502 Sema &S; 9503 }; 9504 9505 } 9506 9507 /// Detect if \c SizeofExpr is likely to calculate the sizeof an object. 9508 static bool doesExprLikelyComputeSize(const Expr *SizeofExpr) { 9509 SizeofExpr = SizeofExpr->IgnoreParenImpCasts(); 9510 9511 if (const auto *BO = dyn_cast<BinaryOperator>(SizeofExpr)) { 9512 if (BO->getOpcode() != BO_Mul && BO->getOpcode() != BO_Add) 9513 return false; 9514 9515 return doesExprLikelyComputeSize(BO->getLHS()) || 9516 doesExprLikelyComputeSize(BO->getRHS()); 9517 } 9518 9519 return getAsSizeOfExpr(SizeofExpr) != nullptr; 9520 } 9521 9522 /// Check if the ArgLoc originated from a macro passed to the call at CallLoc. 9523 /// 9524 /// \code 9525 /// #define MACRO 0 9526 /// foo(MACRO); 9527 /// foo(0); 9528 /// \endcode 9529 /// 9530 /// This should return true for the first call to foo, but not for the second 9531 /// (regardless of whether foo is a macro or function). 9532 static bool isArgumentExpandedFromMacro(SourceManager &SM, 9533 SourceLocation CallLoc, 9534 SourceLocation ArgLoc) { 9535 if (!CallLoc.isMacroID()) 9536 return SM.getFileID(CallLoc) != SM.getFileID(ArgLoc); 9537 9538 return SM.getFileID(SM.getImmediateMacroCallerLoc(CallLoc)) != 9539 SM.getFileID(SM.getImmediateMacroCallerLoc(ArgLoc)); 9540 } 9541 9542 /// Diagnose cases like 'memset(buf, sizeof(buf), 0)', which should have the 9543 /// last two arguments transposed. 9544 static void CheckMemaccessSize(Sema &S, unsigned BId, const CallExpr *Call) { 9545 if (BId != Builtin::BImemset && BId != Builtin::BIbzero) 9546 return; 9547 9548 const Expr *SizeArg = 9549 Call->getArg(BId == Builtin::BImemset ? 2 : 1)->IgnoreImpCasts(); 9550 9551 auto isLiteralZero = [](const Expr *E) { 9552 return isa<IntegerLiteral>(E) && cast<IntegerLiteral>(E)->getValue() == 0; 9553 }; 9554 9555 // If we're memsetting or bzeroing 0 bytes, then this is likely an error. 9556 SourceLocation CallLoc = Call->getRParenLoc(); 9557 SourceManager &SM = S.getSourceManager(); 9558 if (isLiteralZero(SizeArg) && 9559 !isArgumentExpandedFromMacro(SM, CallLoc, SizeArg->getExprLoc())) { 9560 9561 SourceLocation DiagLoc = SizeArg->getExprLoc(); 9562 9563 // Some platforms #define bzero to __builtin_memset. See if this is the 9564 // case, and if so, emit a better diagnostic. 9565 if (BId == Builtin::BIbzero || 9566 (CallLoc.isMacroID() && Lexer::getImmediateMacroName( 9567 CallLoc, SM, S.getLangOpts()) == "bzero")) { 9568 S.Diag(DiagLoc, diag::warn_suspicious_bzero_size); 9569 S.Diag(DiagLoc, diag::note_suspicious_bzero_size_silence); 9570 } else if (!isLiteralZero(Call->getArg(1)->IgnoreImpCasts())) { 9571 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 0; 9572 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 0; 9573 } 9574 return; 9575 } 9576 9577 // If the second argument to a memset is a sizeof expression and the third 9578 // isn't, this is also likely an error. This should catch 9579 // 'memset(buf, sizeof(buf), 0xff)'. 9580 if (BId == Builtin::BImemset && 9581 doesExprLikelyComputeSize(Call->getArg(1)) && 9582 !doesExprLikelyComputeSize(Call->getArg(2))) { 9583 SourceLocation DiagLoc = Call->getArg(1)->getExprLoc(); 9584 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 1; 9585 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 1; 9586 return; 9587 } 9588 } 9589 9590 /// Check for dangerous or invalid arguments to memset(). 9591 /// 9592 /// This issues warnings on known problematic, dangerous or unspecified 9593 /// arguments to the standard 'memset', 'memcpy', 'memmove', and 'memcmp' 9594 /// function calls. 9595 /// 9596 /// \param Call The call expression to diagnose. 9597 void Sema::CheckMemaccessArguments(const CallExpr *Call, 9598 unsigned BId, 9599 IdentifierInfo *FnName) { 9600 assert(BId != 0); 9601 9602 // It is possible to have a non-standard definition of memset. Validate 9603 // we have enough arguments, and if not, abort further checking. 9604 unsigned ExpectedNumArgs = 9605 (BId == Builtin::BIstrndup || BId == Builtin::BIbzero ? 2 : 3); 9606 if (Call->getNumArgs() < ExpectedNumArgs) 9607 return; 9608 9609 unsigned LastArg = (BId == Builtin::BImemset || BId == Builtin::BIbzero || 9610 BId == Builtin::BIstrndup ? 1 : 2); 9611 unsigned LenArg = 9612 (BId == Builtin::BIbzero || BId == Builtin::BIstrndup ? 1 : 2); 9613 const Expr *LenExpr = Call->getArg(LenArg)->IgnoreParenImpCasts(); 9614 9615 if (CheckMemorySizeofForComparison(*this, LenExpr, FnName, 9616 Call->getBeginLoc(), Call->getRParenLoc())) 9617 return; 9618 9619 // Catch cases like 'memset(buf, sizeof(buf), 0)'. 9620 CheckMemaccessSize(*this, BId, Call); 9621 9622 // We have special checking when the length is a sizeof expression. 9623 QualType SizeOfArgTy = getSizeOfArgType(LenExpr); 9624 const Expr *SizeOfArg = getSizeOfExprArg(LenExpr); 9625 llvm::FoldingSetNodeID SizeOfArgID; 9626 9627 // Although widely used, 'bzero' is not a standard function. Be more strict 9628 // with the argument types before allowing diagnostics and only allow the 9629 // form bzero(ptr, sizeof(...)). 9630 QualType FirstArgTy = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 9631 if (BId == Builtin::BIbzero && !FirstArgTy->getAs<PointerType>()) 9632 return; 9633 9634 for (unsigned ArgIdx = 0; ArgIdx != LastArg; ++ArgIdx) { 9635 const Expr *Dest = Call->getArg(ArgIdx)->IgnoreParenImpCasts(); 9636 SourceRange ArgRange = Call->getArg(ArgIdx)->getSourceRange(); 9637 9638 QualType DestTy = Dest->getType(); 9639 QualType PointeeTy; 9640 if (const PointerType *DestPtrTy = DestTy->getAs<PointerType>()) { 9641 PointeeTy = DestPtrTy->getPointeeType(); 9642 9643 // Never warn about void type pointers. This can be used to suppress 9644 // false positives. 9645 if (PointeeTy->isVoidType()) 9646 continue; 9647 9648 // Catch "memset(p, 0, sizeof(p))" -- needs to be sizeof(*p). Do this by 9649 // actually comparing the expressions for equality. Because computing the 9650 // expression IDs can be expensive, we only do this if the diagnostic is 9651 // enabled. 9652 if (SizeOfArg && 9653 !Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, 9654 SizeOfArg->getExprLoc())) { 9655 // We only compute IDs for expressions if the warning is enabled, and 9656 // cache the sizeof arg's ID. 9657 if (SizeOfArgID == llvm::FoldingSetNodeID()) 9658 SizeOfArg->Profile(SizeOfArgID, Context, true); 9659 llvm::FoldingSetNodeID DestID; 9660 Dest->Profile(DestID, Context, true); 9661 if (DestID == SizeOfArgID) { 9662 // TODO: For strncpy() and friends, this could suggest sizeof(dst) 9663 // over sizeof(src) as well. 9664 unsigned ActionIdx = 0; // Default is to suggest dereferencing. 9665 StringRef ReadableName = FnName->getName(); 9666 9667 if (const UnaryOperator *UnaryOp = dyn_cast<UnaryOperator>(Dest)) 9668 if (UnaryOp->getOpcode() == UO_AddrOf) 9669 ActionIdx = 1; // If its an address-of operator, just remove it. 9670 if (!PointeeTy->isIncompleteType() && 9671 (Context.getTypeSize(PointeeTy) == Context.getCharWidth())) 9672 ActionIdx = 2; // If the pointee's size is sizeof(char), 9673 // suggest an explicit length. 9674 9675 // If the function is defined as a builtin macro, do not show macro 9676 // expansion. 9677 SourceLocation SL = SizeOfArg->getExprLoc(); 9678 SourceRange DSR = Dest->getSourceRange(); 9679 SourceRange SSR = SizeOfArg->getSourceRange(); 9680 SourceManager &SM = getSourceManager(); 9681 9682 if (SM.isMacroArgExpansion(SL)) { 9683 ReadableName = Lexer::getImmediateMacroName(SL, SM, LangOpts); 9684 SL = SM.getSpellingLoc(SL); 9685 DSR = SourceRange(SM.getSpellingLoc(DSR.getBegin()), 9686 SM.getSpellingLoc(DSR.getEnd())); 9687 SSR = SourceRange(SM.getSpellingLoc(SSR.getBegin()), 9688 SM.getSpellingLoc(SSR.getEnd())); 9689 } 9690 9691 DiagRuntimeBehavior(SL, SizeOfArg, 9692 PDiag(diag::warn_sizeof_pointer_expr_memaccess) 9693 << ReadableName 9694 << PointeeTy 9695 << DestTy 9696 << DSR 9697 << SSR); 9698 DiagRuntimeBehavior(SL, SizeOfArg, 9699 PDiag(diag::warn_sizeof_pointer_expr_memaccess_note) 9700 << ActionIdx 9701 << SSR); 9702 9703 break; 9704 } 9705 } 9706 9707 // Also check for cases where the sizeof argument is the exact same 9708 // type as the memory argument, and where it points to a user-defined 9709 // record type. 9710 if (SizeOfArgTy != QualType()) { 9711 if (PointeeTy->isRecordType() && 9712 Context.typesAreCompatible(SizeOfArgTy, DestTy)) { 9713 DiagRuntimeBehavior(LenExpr->getExprLoc(), Dest, 9714 PDiag(diag::warn_sizeof_pointer_type_memaccess) 9715 << FnName << SizeOfArgTy << ArgIdx 9716 << PointeeTy << Dest->getSourceRange() 9717 << LenExpr->getSourceRange()); 9718 break; 9719 } 9720 } 9721 } else if (DestTy->isArrayType()) { 9722 PointeeTy = DestTy; 9723 } 9724 9725 if (PointeeTy == QualType()) 9726 continue; 9727 9728 // Always complain about dynamic classes. 9729 bool IsContained; 9730 if (const CXXRecordDecl *ContainedRD = 9731 getContainedDynamicClass(PointeeTy, IsContained)) { 9732 9733 unsigned OperationType = 0; 9734 const bool IsCmp = BId == Builtin::BImemcmp || BId == Builtin::BIbcmp; 9735 // "overwritten" if we're warning about the destination for any call 9736 // but memcmp; otherwise a verb appropriate to the call. 9737 if (ArgIdx != 0 || IsCmp) { 9738 if (BId == Builtin::BImemcpy) 9739 OperationType = 1; 9740 else if(BId == Builtin::BImemmove) 9741 OperationType = 2; 9742 else if (IsCmp) 9743 OperationType = 3; 9744 } 9745 9746 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 9747 PDiag(diag::warn_dyn_class_memaccess) 9748 << (IsCmp ? ArgIdx + 2 : ArgIdx) << FnName 9749 << IsContained << ContainedRD << OperationType 9750 << Call->getCallee()->getSourceRange()); 9751 } else if (PointeeTy.hasNonTrivialObjCLifetime() && 9752 BId != Builtin::BImemset) 9753 DiagRuntimeBehavior( 9754 Dest->getExprLoc(), Dest, 9755 PDiag(diag::warn_arc_object_memaccess) 9756 << ArgIdx << FnName << PointeeTy 9757 << Call->getCallee()->getSourceRange()); 9758 else if (const auto *RT = PointeeTy->getAs<RecordType>()) { 9759 if ((BId == Builtin::BImemset || BId == Builtin::BIbzero) && 9760 RT->getDecl()->isNonTrivialToPrimitiveDefaultInitialize()) { 9761 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 9762 PDiag(diag::warn_cstruct_memaccess) 9763 << ArgIdx << FnName << PointeeTy << 0); 9764 SearchNonTrivialToInitializeField::diag(PointeeTy, Dest, *this); 9765 } else if ((BId == Builtin::BImemcpy || BId == Builtin::BImemmove) && 9766 RT->getDecl()->isNonTrivialToPrimitiveCopy()) { 9767 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 9768 PDiag(diag::warn_cstruct_memaccess) 9769 << ArgIdx << FnName << PointeeTy << 1); 9770 SearchNonTrivialToCopyField::diag(PointeeTy, Dest, *this); 9771 } else { 9772 continue; 9773 } 9774 } else 9775 continue; 9776 9777 DiagRuntimeBehavior( 9778 Dest->getExprLoc(), Dest, 9779 PDiag(diag::note_bad_memaccess_silence) 9780 << FixItHint::CreateInsertion(ArgRange.getBegin(), "(void*)")); 9781 break; 9782 } 9783 } 9784 9785 // A little helper routine: ignore addition and subtraction of integer literals. 9786 // This intentionally does not ignore all integer constant expressions because 9787 // we don't want to remove sizeof(). 9788 static const Expr *ignoreLiteralAdditions(const Expr *Ex, ASTContext &Ctx) { 9789 Ex = Ex->IgnoreParenCasts(); 9790 9791 while (true) { 9792 const BinaryOperator * BO = dyn_cast<BinaryOperator>(Ex); 9793 if (!BO || !BO->isAdditiveOp()) 9794 break; 9795 9796 const Expr *RHS = BO->getRHS()->IgnoreParenCasts(); 9797 const Expr *LHS = BO->getLHS()->IgnoreParenCasts(); 9798 9799 if (isa<IntegerLiteral>(RHS)) 9800 Ex = LHS; 9801 else if (isa<IntegerLiteral>(LHS)) 9802 Ex = RHS; 9803 else 9804 break; 9805 } 9806 9807 return Ex; 9808 } 9809 9810 static bool isConstantSizeArrayWithMoreThanOneElement(QualType Ty, 9811 ASTContext &Context) { 9812 // Only handle constant-sized or VLAs, but not flexible members. 9813 if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(Ty)) { 9814 // Only issue the FIXIT for arrays of size > 1. 9815 if (CAT->getSize().getSExtValue() <= 1) 9816 return false; 9817 } else if (!Ty->isVariableArrayType()) { 9818 return false; 9819 } 9820 return true; 9821 } 9822 9823 // Warn if the user has made the 'size' argument to strlcpy or strlcat 9824 // be the size of the source, instead of the destination. 9825 void Sema::CheckStrlcpycatArguments(const CallExpr *Call, 9826 IdentifierInfo *FnName) { 9827 9828 // Don't crash if the user has the wrong number of arguments 9829 unsigned NumArgs = Call->getNumArgs(); 9830 if ((NumArgs != 3) && (NumArgs != 4)) 9831 return; 9832 9833 const Expr *SrcArg = ignoreLiteralAdditions(Call->getArg(1), Context); 9834 const Expr *SizeArg = ignoreLiteralAdditions(Call->getArg(2), Context); 9835 const Expr *CompareWithSrc = nullptr; 9836 9837 if (CheckMemorySizeofForComparison(*this, SizeArg, FnName, 9838 Call->getBeginLoc(), Call->getRParenLoc())) 9839 return; 9840 9841 // Look for 'strlcpy(dst, x, sizeof(x))' 9842 if (const Expr *Ex = getSizeOfExprArg(SizeArg)) 9843 CompareWithSrc = Ex; 9844 else { 9845 // Look for 'strlcpy(dst, x, strlen(x))' 9846 if (const CallExpr *SizeCall = dyn_cast<CallExpr>(SizeArg)) { 9847 if (SizeCall->getBuiltinCallee() == Builtin::BIstrlen && 9848 SizeCall->getNumArgs() == 1) 9849 CompareWithSrc = ignoreLiteralAdditions(SizeCall->getArg(0), Context); 9850 } 9851 } 9852 9853 if (!CompareWithSrc) 9854 return; 9855 9856 // Determine if the argument to sizeof/strlen is equal to the source 9857 // argument. In principle there's all kinds of things you could do 9858 // here, for instance creating an == expression and evaluating it with 9859 // EvaluateAsBooleanCondition, but this uses a more direct technique: 9860 const DeclRefExpr *SrcArgDRE = dyn_cast<DeclRefExpr>(SrcArg); 9861 if (!SrcArgDRE) 9862 return; 9863 9864 const DeclRefExpr *CompareWithSrcDRE = dyn_cast<DeclRefExpr>(CompareWithSrc); 9865 if (!CompareWithSrcDRE || 9866 SrcArgDRE->getDecl() != CompareWithSrcDRE->getDecl()) 9867 return; 9868 9869 const Expr *OriginalSizeArg = Call->getArg(2); 9870 Diag(CompareWithSrcDRE->getBeginLoc(), diag::warn_strlcpycat_wrong_size) 9871 << OriginalSizeArg->getSourceRange() << FnName; 9872 9873 // Output a FIXIT hint if the destination is an array (rather than a 9874 // pointer to an array). This could be enhanced to handle some 9875 // pointers if we know the actual size, like if DstArg is 'array+2' 9876 // we could say 'sizeof(array)-2'. 9877 const Expr *DstArg = Call->getArg(0)->IgnoreParenImpCasts(); 9878 if (!isConstantSizeArrayWithMoreThanOneElement(DstArg->getType(), Context)) 9879 return; 9880 9881 SmallString<128> sizeString; 9882 llvm::raw_svector_ostream OS(sizeString); 9883 OS << "sizeof("; 9884 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 9885 OS << ")"; 9886 9887 Diag(OriginalSizeArg->getBeginLoc(), diag::note_strlcpycat_wrong_size) 9888 << FixItHint::CreateReplacement(OriginalSizeArg->getSourceRange(), 9889 OS.str()); 9890 } 9891 9892 /// Check if two expressions refer to the same declaration. 9893 static bool referToTheSameDecl(const Expr *E1, const Expr *E2) { 9894 if (const DeclRefExpr *D1 = dyn_cast_or_null<DeclRefExpr>(E1)) 9895 if (const DeclRefExpr *D2 = dyn_cast_or_null<DeclRefExpr>(E2)) 9896 return D1->getDecl() == D2->getDecl(); 9897 return false; 9898 } 9899 9900 static const Expr *getStrlenExprArg(const Expr *E) { 9901 if (const CallExpr *CE = dyn_cast<CallExpr>(E)) { 9902 const FunctionDecl *FD = CE->getDirectCallee(); 9903 if (!FD || FD->getMemoryFunctionKind() != Builtin::BIstrlen) 9904 return nullptr; 9905 return CE->getArg(0)->IgnoreParenCasts(); 9906 } 9907 return nullptr; 9908 } 9909 9910 // Warn on anti-patterns as the 'size' argument to strncat. 9911 // The correct size argument should look like following: 9912 // strncat(dst, src, sizeof(dst) - strlen(dest) - 1); 9913 void Sema::CheckStrncatArguments(const CallExpr *CE, 9914 IdentifierInfo *FnName) { 9915 // Don't crash if the user has the wrong number of arguments. 9916 if (CE->getNumArgs() < 3) 9917 return; 9918 const Expr *DstArg = CE->getArg(0)->IgnoreParenCasts(); 9919 const Expr *SrcArg = CE->getArg(1)->IgnoreParenCasts(); 9920 const Expr *LenArg = CE->getArg(2)->IgnoreParenCasts(); 9921 9922 if (CheckMemorySizeofForComparison(*this, LenArg, FnName, CE->getBeginLoc(), 9923 CE->getRParenLoc())) 9924 return; 9925 9926 // Identify common expressions, which are wrongly used as the size argument 9927 // to strncat and may lead to buffer overflows. 9928 unsigned PatternType = 0; 9929 if (const Expr *SizeOfArg = getSizeOfExprArg(LenArg)) { 9930 // - sizeof(dst) 9931 if (referToTheSameDecl(SizeOfArg, DstArg)) 9932 PatternType = 1; 9933 // - sizeof(src) 9934 else if (referToTheSameDecl(SizeOfArg, SrcArg)) 9935 PatternType = 2; 9936 } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(LenArg)) { 9937 if (BE->getOpcode() == BO_Sub) { 9938 const Expr *L = BE->getLHS()->IgnoreParenCasts(); 9939 const Expr *R = BE->getRHS()->IgnoreParenCasts(); 9940 // - sizeof(dst) - strlen(dst) 9941 if (referToTheSameDecl(DstArg, getSizeOfExprArg(L)) && 9942 referToTheSameDecl(DstArg, getStrlenExprArg(R))) 9943 PatternType = 1; 9944 // - sizeof(src) - (anything) 9945 else if (referToTheSameDecl(SrcArg, getSizeOfExprArg(L))) 9946 PatternType = 2; 9947 } 9948 } 9949 9950 if (PatternType == 0) 9951 return; 9952 9953 // Generate the diagnostic. 9954 SourceLocation SL = LenArg->getBeginLoc(); 9955 SourceRange SR = LenArg->getSourceRange(); 9956 SourceManager &SM = getSourceManager(); 9957 9958 // If the function is defined as a builtin macro, do not show macro expansion. 9959 if (SM.isMacroArgExpansion(SL)) { 9960 SL = SM.getSpellingLoc(SL); 9961 SR = SourceRange(SM.getSpellingLoc(SR.getBegin()), 9962 SM.getSpellingLoc(SR.getEnd())); 9963 } 9964 9965 // Check if the destination is an array (rather than a pointer to an array). 9966 QualType DstTy = DstArg->getType(); 9967 bool isKnownSizeArray = isConstantSizeArrayWithMoreThanOneElement(DstTy, 9968 Context); 9969 if (!isKnownSizeArray) { 9970 if (PatternType == 1) 9971 Diag(SL, diag::warn_strncat_wrong_size) << SR; 9972 else 9973 Diag(SL, diag::warn_strncat_src_size) << SR; 9974 return; 9975 } 9976 9977 if (PatternType == 1) 9978 Diag(SL, diag::warn_strncat_large_size) << SR; 9979 else 9980 Diag(SL, diag::warn_strncat_src_size) << SR; 9981 9982 SmallString<128> sizeString; 9983 llvm::raw_svector_ostream OS(sizeString); 9984 OS << "sizeof("; 9985 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 9986 OS << ") - "; 9987 OS << "strlen("; 9988 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 9989 OS << ") - 1"; 9990 9991 Diag(SL, diag::note_strncat_wrong_size) 9992 << FixItHint::CreateReplacement(SR, OS.str()); 9993 } 9994 9995 void 9996 Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType, 9997 SourceLocation ReturnLoc, 9998 bool isObjCMethod, 9999 const AttrVec *Attrs, 10000 const FunctionDecl *FD) { 10001 // Check if the return value is null but should not be. 10002 if (((Attrs && hasSpecificAttr<ReturnsNonNullAttr>(*Attrs)) || 10003 (!isObjCMethod && isNonNullType(Context, lhsType))) && 10004 CheckNonNullExpr(*this, RetValExp)) 10005 Diag(ReturnLoc, diag::warn_null_ret) 10006 << (isObjCMethod ? 1 : 0) << RetValExp->getSourceRange(); 10007 10008 // C++11 [basic.stc.dynamic.allocation]p4: 10009 // If an allocation function declared with a non-throwing 10010 // exception-specification fails to allocate storage, it shall return 10011 // a null pointer. Any other allocation function that fails to allocate 10012 // storage shall indicate failure only by throwing an exception [...] 10013 if (FD) { 10014 OverloadedOperatorKind Op = FD->getOverloadedOperator(); 10015 if (Op == OO_New || Op == OO_Array_New) { 10016 const FunctionProtoType *Proto 10017 = FD->getType()->castAs<FunctionProtoType>(); 10018 if (!Proto->isNothrow(/*ResultIfDependent*/true) && 10019 CheckNonNullExpr(*this, RetValExp)) 10020 Diag(ReturnLoc, diag::warn_operator_new_returns_null) 10021 << FD << getLangOpts().CPlusPlus11; 10022 } 10023 } 10024 } 10025 10026 //===--- CHECK: Floating-Point comparisons (-Wfloat-equal) ---------------===// 10027 10028 /// Check for comparisons of floating point operands using != and ==. 10029 /// Issue a warning if these are no self-comparisons, as they are not likely 10030 /// to do what the programmer intended. 10031 void Sema::CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr *RHS) { 10032 Expr* LeftExprSansParen = LHS->IgnoreParenImpCasts(); 10033 Expr* RightExprSansParen = RHS->IgnoreParenImpCasts(); 10034 10035 // Special case: check for x == x (which is OK). 10036 // Do not emit warnings for such cases. 10037 if (DeclRefExpr* DRL = dyn_cast<DeclRefExpr>(LeftExprSansParen)) 10038 if (DeclRefExpr* DRR = dyn_cast<DeclRefExpr>(RightExprSansParen)) 10039 if (DRL->getDecl() == DRR->getDecl()) 10040 return; 10041 10042 // Special case: check for comparisons against literals that can be exactly 10043 // represented by APFloat. In such cases, do not emit a warning. This 10044 // is a heuristic: often comparison against such literals are used to 10045 // detect if a value in a variable has not changed. This clearly can 10046 // lead to false negatives. 10047 if (FloatingLiteral* FLL = dyn_cast<FloatingLiteral>(LeftExprSansParen)) { 10048 if (FLL->isExact()) 10049 return; 10050 } else 10051 if (FloatingLiteral* FLR = dyn_cast<FloatingLiteral>(RightExprSansParen)) 10052 if (FLR->isExact()) 10053 return; 10054 10055 // Check for comparisons with builtin types. 10056 if (CallExpr* CL = dyn_cast<CallExpr>(LeftExprSansParen)) 10057 if (CL->getBuiltinCallee()) 10058 return; 10059 10060 if (CallExpr* CR = dyn_cast<CallExpr>(RightExprSansParen)) 10061 if (CR->getBuiltinCallee()) 10062 return; 10063 10064 // Emit the diagnostic. 10065 Diag(Loc, diag::warn_floatingpoint_eq) 10066 << LHS->getSourceRange() << RHS->getSourceRange(); 10067 } 10068 10069 //===--- CHECK: Integer mixed-sign comparisons (-Wsign-compare) --------===// 10070 //===--- CHECK: Lossy implicit conversions (-Wconversion) --------------===// 10071 10072 namespace { 10073 10074 /// Structure recording the 'active' range of an integer-valued 10075 /// expression. 10076 struct IntRange { 10077 /// The number of bits active in the int. 10078 unsigned Width; 10079 10080 /// True if the int is known not to have negative values. 10081 bool NonNegative; 10082 10083 IntRange(unsigned Width, bool NonNegative) 10084 : Width(Width), NonNegative(NonNegative) {} 10085 10086 /// Returns the range of the bool type. 10087 static IntRange forBoolType() { 10088 return IntRange(1, true); 10089 } 10090 10091 /// Returns the range of an opaque value of the given integral type. 10092 static IntRange forValueOfType(ASTContext &C, QualType T) { 10093 return forValueOfCanonicalType(C, 10094 T->getCanonicalTypeInternal().getTypePtr()); 10095 } 10096 10097 /// Returns the range of an opaque value of a canonical integral type. 10098 static IntRange forValueOfCanonicalType(ASTContext &C, const Type *T) { 10099 assert(T->isCanonicalUnqualified()); 10100 10101 if (const VectorType *VT = dyn_cast<VectorType>(T)) 10102 T = VT->getElementType().getTypePtr(); 10103 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 10104 T = CT->getElementType().getTypePtr(); 10105 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 10106 T = AT->getValueType().getTypePtr(); 10107 10108 if (!C.getLangOpts().CPlusPlus) { 10109 // For enum types in C code, use the underlying datatype. 10110 if (const EnumType *ET = dyn_cast<EnumType>(T)) 10111 T = ET->getDecl()->getIntegerType().getDesugaredType(C).getTypePtr(); 10112 } else if (const EnumType *ET = dyn_cast<EnumType>(T)) { 10113 // For enum types in C++, use the known bit width of the enumerators. 10114 EnumDecl *Enum = ET->getDecl(); 10115 // In C++11, enums can have a fixed underlying type. Use this type to 10116 // compute the range. 10117 if (Enum->isFixed()) { 10118 return IntRange(C.getIntWidth(QualType(T, 0)), 10119 !ET->isSignedIntegerOrEnumerationType()); 10120 } 10121 10122 unsigned NumPositive = Enum->getNumPositiveBits(); 10123 unsigned NumNegative = Enum->getNumNegativeBits(); 10124 10125 if (NumNegative == 0) 10126 return IntRange(NumPositive, true/*NonNegative*/); 10127 else 10128 return IntRange(std::max(NumPositive + 1, NumNegative), 10129 false/*NonNegative*/); 10130 } 10131 10132 if (const auto *EIT = dyn_cast<ExtIntType>(T)) 10133 return IntRange(EIT->getNumBits(), EIT->isUnsigned()); 10134 10135 const BuiltinType *BT = cast<BuiltinType>(T); 10136 assert(BT->isInteger()); 10137 10138 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 10139 } 10140 10141 /// Returns the "target" range of a canonical integral type, i.e. 10142 /// the range of values expressible in the type. 10143 /// 10144 /// This matches forValueOfCanonicalType except that enums have the 10145 /// full range of their type, not the range of their enumerators. 10146 static IntRange forTargetOfCanonicalType(ASTContext &C, const Type *T) { 10147 assert(T->isCanonicalUnqualified()); 10148 10149 if (const VectorType *VT = dyn_cast<VectorType>(T)) 10150 T = VT->getElementType().getTypePtr(); 10151 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 10152 T = CT->getElementType().getTypePtr(); 10153 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 10154 T = AT->getValueType().getTypePtr(); 10155 if (const EnumType *ET = dyn_cast<EnumType>(T)) 10156 T = C.getCanonicalType(ET->getDecl()->getIntegerType()).getTypePtr(); 10157 10158 if (const auto *EIT = dyn_cast<ExtIntType>(T)) 10159 return IntRange(EIT->getNumBits(), EIT->isUnsigned()); 10160 10161 const BuiltinType *BT = cast<BuiltinType>(T); 10162 assert(BT->isInteger()); 10163 10164 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 10165 } 10166 10167 /// Returns the supremum of two ranges: i.e. their conservative merge. 10168 static IntRange join(IntRange L, IntRange R) { 10169 return IntRange(std::max(L.Width, R.Width), 10170 L.NonNegative && R.NonNegative); 10171 } 10172 10173 /// Returns the infinum of two ranges: i.e. their aggressive merge. 10174 static IntRange meet(IntRange L, IntRange R) { 10175 return IntRange(std::min(L.Width, R.Width), 10176 L.NonNegative || R.NonNegative); 10177 } 10178 }; 10179 10180 } // namespace 10181 10182 static IntRange GetValueRange(ASTContext &C, llvm::APSInt &value, 10183 unsigned MaxWidth) { 10184 if (value.isSigned() && value.isNegative()) 10185 return IntRange(value.getMinSignedBits(), false); 10186 10187 if (value.getBitWidth() > MaxWidth) 10188 value = value.trunc(MaxWidth); 10189 10190 // isNonNegative() just checks the sign bit without considering 10191 // signedness. 10192 return IntRange(value.getActiveBits(), true); 10193 } 10194 10195 static IntRange GetValueRange(ASTContext &C, APValue &result, QualType Ty, 10196 unsigned MaxWidth) { 10197 if (result.isInt()) 10198 return GetValueRange(C, result.getInt(), MaxWidth); 10199 10200 if (result.isVector()) { 10201 IntRange R = GetValueRange(C, result.getVectorElt(0), Ty, MaxWidth); 10202 for (unsigned i = 1, e = result.getVectorLength(); i != e; ++i) { 10203 IntRange El = GetValueRange(C, result.getVectorElt(i), Ty, MaxWidth); 10204 R = IntRange::join(R, El); 10205 } 10206 return R; 10207 } 10208 10209 if (result.isComplexInt()) { 10210 IntRange R = GetValueRange(C, result.getComplexIntReal(), MaxWidth); 10211 IntRange I = GetValueRange(C, result.getComplexIntImag(), MaxWidth); 10212 return IntRange::join(R, I); 10213 } 10214 10215 // This can happen with lossless casts to intptr_t of "based" lvalues. 10216 // Assume it might use arbitrary bits. 10217 // FIXME: The only reason we need to pass the type in here is to get 10218 // the sign right on this one case. It would be nice if APValue 10219 // preserved this. 10220 assert(result.isLValue() || result.isAddrLabelDiff()); 10221 return IntRange(MaxWidth, Ty->isUnsignedIntegerOrEnumerationType()); 10222 } 10223 10224 static QualType GetExprType(const Expr *E) { 10225 QualType Ty = E->getType(); 10226 if (const AtomicType *AtomicRHS = Ty->getAs<AtomicType>()) 10227 Ty = AtomicRHS->getValueType(); 10228 return Ty; 10229 } 10230 10231 /// Pseudo-evaluate the given integer expression, estimating the 10232 /// range of values it might take. 10233 /// 10234 /// \param MaxWidth - the width to which the value will be truncated 10235 static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth, 10236 bool InConstantContext) { 10237 E = E->IgnoreParens(); 10238 10239 // Try a full evaluation first. 10240 Expr::EvalResult result; 10241 if (E->EvaluateAsRValue(result, C, InConstantContext)) 10242 return GetValueRange(C, result.Val, GetExprType(E), MaxWidth); 10243 10244 // I think we only want to look through implicit casts here; if the 10245 // user has an explicit widening cast, we should treat the value as 10246 // being of the new, wider type. 10247 if (const auto *CE = dyn_cast<ImplicitCastExpr>(E)) { 10248 if (CE->getCastKind() == CK_NoOp || CE->getCastKind() == CK_LValueToRValue) 10249 return GetExprRange(C, CE->getSubExpr(), MaxWidth, InConstantContext); 10250 10251 IntRange OutputTypeRange = IntRange::forValueOfType(C, GetExprType(CE)); 10252 10253 bool isIntegerCast = CE->getCastKind() == CK_IntegralCast || 10254 CE->getCastKind() == CK_BooleanToSignedIntegral; 10255 10256 // Assume that non-integer casts can span the full range of the type. 10257 if (!isIntegerCast) 10258 return OutputTypeRange; 10259 10260 IntRange SubRange = GetExprRange(C, CE->getSubExpr(), 10261 std::min(MaxWidth, OutputTypeRange.Width), 10262 InConstantContext); 10263 10264 // Bail out if the subexpr's range is as wide as the cast type. 10265 if (SubRange.Width >= OutputTypeRange.Width) 10266 return OutputTypeRange; 10267 10268 // Otherwise, we take the smaller width, and we're non-negative if 10269 // either the output type or the subexpr is. 10270 return IntRange(SubRange.Width, 10271 SubRange.NonNegative || OutputTypeRange.NonNegative); 10272 } 10273 10274 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 10275 // If we can fold the condition, just take that operand. 10276 bool CondResult; 10277 if (CO->getCond()->EvaluateAsBooleanCondition(CondResult, C)) 10278 return GetExprRange(C, 10279 CondResult ? CO->getTrueExpr() : CO->getFalseExpr(), 10280 MaxWidth, InConstantContext); 10281 10282 // Otherwise, conservatively merge. 10283 IntRange L = 10284 GetExprRange(C, CO->getTrueExpr(), MaxWidth, InConstantContext); 10285 IntRange R = 10286 GetExprRange(C, CO->getFalseExpr(), MaxWidth, InConstantContext); 10287 return IntRange::join(L, R); 10288 } 10289 10290 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 10291 switch (BO->getOpcode()) { 10292 case BO_Cmp: 10293 llvm_unreachable("builtin <=> should have class type"); 10294 10295 // Boolean-valued operations are single-bit and positive. 10296 case BO_LAnd: 10297 case BO_LOr: 10298 case BO_LT: 10299 case BO_GT: 10300 case BO_LE: 10301 case BO_GE: 10302 case BO_EQ: 10303 case BO_NE: 10304 return IntRange::forBoolType(); 10305 10306 // The type of the assignments is the type of the LHS, so the RHS 10307 // is not necessarily the same type. 10308 case BO_MulAssign: 10309 case BO_DivAssign: 10310 case BO_RemAssign: 10311 case BO_AddAssign: 10312 case BO_SubAssign: 10313 case BO_XorAssign: 10314 case BO_OrAssign: 10315 // TODO: bitfields? 10316 return IntRange::forValueOfType(C, GetExprType(E)); 10317 10318 // Simple assignments just pass through the RHS, which will have 10319 // been coerced to the LHS type. 10320 case BO_Assign: 10321 // TODO: bitfields? 10322 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext); 10323 10324 // Operations with opaque sources are black-listed. 10325 case BO_PtrMemD: 10326 case BO_PtrMemI: 10327 return IntRange::forValueOfType(C, GetExprType(E)); 10328 10329 // Bitwise-and uses the *infinum* of the two source ranges. 10330 case BO_And: 10331 case BO_AndAssign: 10332 return IntRange::meet( 10333 GetExprRange(C, BO->getLHS(), MaxWidth, InConstantContext), 10334 GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext)); 10335 10336 // Left shift gets black-listed based on a judgement call. 10337 case BO_Shl: 10338 // ...except that we want to treat '1 << (blah)' as logically 10339 // positive. It's an important idiom. 10340 if (IntegerLiteral *I 10341 = dyn_cast<IntegerLiteral>(BO->getLHS()->IgnoreParenCasts())) { 10342 if (I->getValue() == 1) { 10343 IntRange R = IntRange::forValueOfType(C, GetExprType(E)); 10344 return IntRange(R.Width, /*NonNegative*/ true); 10345 } 10346 } 10347 LLVM_FALLTHROUGH; 10348 10349 case BO_ShlAssign: 10350 return IntRange::forValueOfType(C, GetExprType(E)); 10351 10352 // Right shift by a constant can narrow its left argument. 10353 case BO_Shr: 10354 case BO_ShrAssign: { 10355 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth, InConstantContext); 10356 10357 // If the shift amount is a positive constant, drop the width by 10358 // that much. 10359 llvm::APSInt shift; 10360 if (BO->getRHS()->isIntegerConstantExpr(shift, C) && 10361 shift.isNonNegative()) { 10362 unsigned zext = shift.getZExtValue(); 10363 if (zext >= L.Width) 10364 L.Width = (L.NonNegative ? 0 : 1); 10365 else 10366 L.Width -= zext; 10367 } 10368 10369 return L; 10370 } 10371 10372 // Comma acts as its right operand. 10373 case BO_Comma: 10374 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext); 10375 10376 // Black-list pointer subtractions. 10377 case BO_Sub: 10378 if (BO->getLHS()->getType()->isPointerType()) 10379 return IntRange::forValueOfType(C, GetExprType(E)); 10380 break; 10381 10382 // The width of a division result is mostly determined by the size 10383 // of the LHS. 10384 case BO_Div: { 10385 // Don't 'pre-truncate' the operands. 10386 unsigned opWidth = C.getIntWidth(GetExprType(E)); 10387 IntRange L = GetExprRange(C, BO->getLHS(), opWidth, InConstantContext); 10388 10389 // If the divisor is constant, use that. 10390 llvm::APSInt divisor; 10391 if (BO->getRHS()->isIntegerConstantExpr(divisor, C)) { 10392 unsigned log2 = divisor.logBase2(); // floor(log_2(divisor)) 10393 if (log2 >= L.Width) 10394 L.Width = (L.NonNegative ? 0 : 1); 10395 else 10396 L.Width = std::min(L.Width - log2, MaxWidth); 10397 return L; 10398 } 10399 10400 // Otherwise, just use the LHS's width. 10401 IntRange R = GetExprRange(C, BO->getRHS(), opWidth, InConstantContext); 10402 return IntRange(L.Width, L.NonNegative && R.NonNegative); 10403 } 10404 10405 // The result of a remainder can't be larger than the result of 10406 // either side. 10407 case BO_Rem: { 10408 // Don't 'pre-truncate' the operands. 10409 unsigned opWidth = C.getIntWidth(GetExprType(E)); 10410 IntRange L = GetExprRange(C, BO->getLHS(), opWidth, InConstantContext); 10411 IntRange R = GetExprRange(C, BO->getRHS(), opWidth, InConstantContext); 10412 10413 IntRange meet = IntRange::meet(L, R); 10414 meet.Width = std::min(meet.Width, MaxWidth); 10415 return meet; 10416 } 10417 10418 // The default behavior is okay for these. 10419 case BO_Mul: 10420 case BO_Add: 10421 case BO_Xor: 10422 case BO_Or: 10423 break; 10424 } 10425 10426 // The default case is to treat the operation as if it were closed 10427 // on the narrowest type that encompasses both operands. 10428 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth, InConstantContext); 10429 IntRange R = GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext); 10430 return IntRange::join(L, R); 10431 } 10432 10433 if (const auto *UO = dyn_cast<UnaryOperator>(E)) { 10434 switch (UO->getOpcode()) { 10435 // Boolean-valued operations are white-listed. 10436 case UO_LNot: 10437 return IntRange::forBoolType(); 10438 10439 // Operations with opaque sources are black-listed. 10440 case UO_Deref: 10441 case UO_AddrOf: // should be impossible 10442 return IntRange::forValueOfType(C, GetExprType(E)); 10443 10444 default: 10445 return GetExprRange(C, UO->getSubExpr(), MaxWidth, InConstantContext); 10446 } 10447 } 10448 10449 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 10450 return GetExprRange(C, OVE->getSourceExpr(), MaxWidth, InConstantContext); 10451 10452 if (const auto *BitField = E->getSourceBitField()) 10453 return IntRange(BitField->getBitWidthValue(C), 10454 BitField->getType()->isUnsignedIntegerOrEnumerationType()); 10455 10456 return IntRange::forValueOfType(C, GetExprType(E)); 10457 } 10458 10459 static IntRange GetExprRange(ASTContext &C, const Expr *E, 10460 bool InConstantContext) { 10461 return GetExprRange(C, E, C.getIntWidth(GetExprType(E)), InConstantContext); 10462 } 10463 10464 /// Checks whether the given value, which currently has the given 10465 /// source semantics, has the same value when coerced through the 10466 /// target semantics. 10467 static bool IsSameFloatAfterCast(const llvm::APFloat &value, 10468 const llvm::fltSemantics &Src, 10469 const llvm::fltSemantics &Tgt) { 10470 llvm::APFloat truncated = value; 10471 10472 bool ignored; 10473 truncated.convert(Src, llvm::APFloat::rmNearestTiesToEven, &ignored); 10474 truncated.convert(Tgt, llvm::APFloat::rmNearestTiesToEven, &ignored); 10475 10476 return truncated.bitwiseIsEqual(value); 10477 } 10478 10479 /// Checks whether the given value, which currently has the given 10480 /// source semantics, has the same value when coerced through the 10481 /// target semantics. 10482 /// 10483 /// The value might be a vector of floats (or a complex number). 10484 static bool IsSameFloatAfterCast(const APValue &value, 10485 const llvm::fltSemantics &Src, 10486 const llvm::fltSemantics &Tgt) { 10487 if (value.isFloat()) 10488 return IsSameFloatAfterCast(value.getFloat(), Src, Tgt); 10489 10490 if (value.isVector()) { 10491 for (unsigned i = 0, e = value.getVectorLength(); i != e; ++i) 10492 if (!IsSameFloatAfterCast(value.getVectorElt(i), Src, Tgt)) 10493 return false; 10494 return true; 10495 } 10496 10497 assert(value.isComplexFloat()); 10498 return (IsSameFloatAfterCast(value.getComplexFloatReal(), Src, Tgt) && 10499 IsSameFloatAfterCast(value.getComplexFloatImag(), Src, Tgt)); 10500 } 10501 10502 static void AnalyzeImplicitConversions(Sema &S, Expr *E, SourceLocation CC, 10503 bool IsListInit = false); 10504 10505 static bool IsEnumConstOrFromMacro(Sema &S, Expr *E) { 10506 // Suppress cases where we are comparing against an enum constant. 10507 if (const DeclRefExpr *DR = 10508 dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) 10509 if (isa<EnumConstantDecl>(DR->getDecl())) 10510 return true; 10511 10512 // Suppress cases where the value is expanded from a macro, unless that macro 10513 // is how a language represents a boolean literal. This is the case in both C 10514 // and Objective-C. 10515 SourceLocation BeginLoc = E->getBeginLoc(); 10516 if (BeginLoc.isMacroID()) { 10517 StringRef MacroName = Lexer::getImmediateMacroName( 10518 BeginLoc, S.getSourceManager(), S.getLangOpts()); 10519 return MacroName != "YES" && MacroName != "NO" && 10520 MacroName != "true" && MacroName != "false"; 10521 } 10522 10523 return false; 10524 } 10525 10526 static bool isKnownToHaveUnsignedValue(Expr *E) { 10527 return E->getType()->isIntegerType() && 10528 (!E->getType()->isSignedIntegerType() || 10529 !E->IgnoreParenImpCasts()->getType()->isSignedIntegerType()); 10530 } 10531 10532 namespace { 10533 /// The promoted range of values of a type. In general this has the 10534 /// following structure: 10535 /// 10536 /// |-----------| . . . |-----------| 10537 /// ^ ^ ^ ^ 10538 /// Min HoleMin HoleMax Max 10539 /// 10540 /// ... where there is only a hole if a signed type is promoted to unsigned 10541 /// (in which case Min and Max are the smallest and largest representable 10542 /// values). 10543 struct PromotedRange { 10544 // Min, or HoleMax if there is a hole. 10545 llvm::APSInt PromotedMin; 10546 // Max, or HoleMin if there is a hole. 10547 llvm::APSInt PromotedMax; 10548 10549 PromotedRange(IntRange R, unsigned BitWidth, bool Unsigned) { 10550 if (R.Width == 0) 10551 PromotedMin = PromotedMax = llvm::APSInt(BitWidth, Unsigned); 10552 else if (R.Width >= BitWidth && !Unsigned) { 10553 // Promotion made the type *narrower*. This happens when promoting 10554 // a < 32-bit unsigned / <= 32-bit signed bit-field to 'signed int'. 10555 // Treat all values of 'signed int' as being in range for now. 10556 PromotedMin = llvm::APSInt::getMinValue(BitWidth, Unsigned); 10557 PromotedMax = llvm::APSInt::getMaxValue(BitWidth, Unsigned); 10558 } else { 10559 PromotedMin = llvm::APSInt::getMinValue(R.Width, R.NonNegative) 10560 .extOrTrunc(BitWidth); 10561 PromotedMin.setIsUnsigned(Unsigned); 10562 10563 PromotedMax = llvm::APSInt::getMaxValue(R.Width, R.NonNegative) 10564 .extOrTrunc(BitWidth); 10565 PromotedMax.setIsUnsigned(Unsigned); 10566 } 10567 } 10568 10569 // Determine whether this range is contiguous (has no hole). 10570 bool isContiguous() const { return PromotedMin <= PromotedMax; } 10571 10572 // Where a constant value is within the range. 10573 enum ComparisonResult { 10574 LT = 0x1, 10575 LE = 0x2, 10576 GT = 0x4, 10577 GE = 0x8, 10578 EQ = 0x10, 10579 NE = 0x20, 10580 InRangeFlag = 0x40, 10581 10582 Less = LE | LT | NE, 10583 Min = LE | InRangeFlag, 10584 InRange = InRangeFlag, 10585 Max = GE | InRangeFlag, 10586 Greater = GE | GT | NE, 10587 10588 OnlyValue = LE | GE | EQ | InRangeFlag, 10589 InHole = NE 10590 }; 10591 10592 ComparisonResult compare(const llvm::APSInt &Value) const { 10593 assert(Value.getBitWidth() == PromotedMin.getBitWidth() && 10594 Value.isUnsigned() == PromotedMin.isUnsigned()); 10595 if (!isContiguous()) { 10596 assert(Value.isUnsigned() && "discontiguous range for signed compare"); 10597 if (Value.isMinValue()) return Min; 10598 if (Value.isMaxValue()) return Max; 10599 if (Value >= PromotedMin) return InRange; 10600 if (Value <= PromotedMax) return InRange; 10601 return InHole; 10602 } 10603 10604 switch (llvm::APSInt::compareValues(Value, PromotedMin)) { 10605 case -1: return Less; 10606 case 0: return PromotedMin == PromotedMax ? OnlyValue : Min; 10607 case 1: 10608 switch (llvm::APSInt::compareValues(Value, PromotedMax)) { 10609 case -1: return InRange; 10610 case 0: return Max; 10611 case 1: return Greater; 10612 } 10613 } 10614 10615 llvm_unreachable("impossible compare result"); 10616 } 10617 10618 static llvm::Optional<StringRef> 10619 constantValue(BinaryOperatorKind Op, ComparisonResult R, bool ConstantOnRHS) { 10620 if (Op == BO_Cmp) { 10621 ComparisonResult LTFlag = LT, GTFlag = GT; 10622 if (ConstantOnRHS) std::swap(LTFlag, GTFlag); 10623 10624 if (R & EQ) return StringRef("'std::strong_ordering::equal'"); 10625 if (R & LTFlag) return StringRef("'std::strong_ordering::less'"); 10626 if (R & GTFlag) return StringRef("'std::strong_ordering::greater'"); 10627 return llvm::None; 10628 } 10629 10630 ComparisonResult TrueFlag, FalseFlag; 10631 if (Op == BO_EQ) { 10632 TrueFlag = EQ; 10633 FalseFlag = NE; 10634 } else if (Op == BO_NE) { 10635 TrueFlag = NE; 10636 FalseFlag = EQ; 10637 } else { 10638 if ((Op == BO_LT || Op == BO_GE) ^ ConstantOnRHS) { 10639 TrueFlag = LT; 10640 FalseFlag = GE; 10641 } else { 10642 TrueFlag = GT; 10643 FalseFlag = LE; 10644 } 10645 if (Op == BO_GE || Op == BO_LE) 10646 std::swap(TrueFlag, FalseFlag); 10647 } 10648 if (R & TrueFlag) 10649 return StringRef("true"); 10650 if (R & FalseFlag) 10651 return StringRef("false"); 10652 return llvm::None; 10653 } 10654 }; 10655 } 10656 10657 static bool HasEnumType(Expr *E) { 10658 // Strip off implicit integral promotions. 10659 while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 10660 if (ICE->getCastKind() != CK_IntegralCast && 10661 ICE->getCastKind() != CK_NoOp) 10662 break; 10663 E = ICE->getSubExpr(); 10664 } 10665 10666 return E->getType()->isEnumeralType(); 10667 } 10668 10669 static int classifyConstantValue(Expr *Constant) { 10670 // The values of this enumeration are used in the diagnostics 10671 // diag::warn_out_of_range_compare and diag::warn_tautological_bool_compare. 10672 enum ConstantValueKind { 10673 Miscellaneous = 0, 10674 LiteralTrue, 10675 LiteralFalse 10676 }; 10677 if (auto *BL = dyn_cast<CXXBoolLiteralExpr>(Constant)) 10678 return BL->getValue() ? ConstantValueKind::LiteralTrue 10679 : ConstantValueKind::LiteralFalse; 10680 return ConstantValueKind::Miscellaneous; 10681 } 10682 10683 static bool CheckTautologicalComparison(Sema &S, BinaryOperator *E, 10684 Expr *Constant, Expr *Other, 10685 const llvm::APSInt &Value, 10686 bool RhsConstant) { 10687 if (S.inTemplateInstantiation()) 10688 return false; 10689 10690 Expr *OriginalOther = Other; 10691 10692 Constant = Constant->IgnoreParenImpCasts(); 10693 Other = Other->IgnoreParenImpCasts(); 10694 10695 // Suppress warnings on tautological comparisons between values of the same 10696 // enumeration type. There are only two ways we could warn on this: 10697 // - If the constant is outside the range of representable values of 10698 // the enumeration. In such a case, we should warn about the cast 10699 // to enumeration type, not about the comparison. 10700 // - If the constant is the maximum / minimum in-range value. For an 10701 // enumeratin type, such comparisons can be meaningful and useful. 10702 if (Constant->getType()->isEnumeralType() && 10703 S.Context.hasSameUnqualifiedType(Constant->getType(), Other->getType())) 10704 return false; 10705 10706 // TODO: Investigate using GetExprRange() to get tighter bounds 10707 // on the bit ranges. 10708 QualType OtherT = Other->getType(); 10709 if (const auto *AT = OtherT->getAs<AtomicType>()) 10710 OtherT = AT->getValueType(); 10711 IntRange OtherRange = IntRange::forValueOfType(S.Context, OtherT); 10712 10713 // Special case for ObjC BOOL on targets where its a typedef for a signed char 10714 // (Namely, macOS). 10715 bool IsObjCSignedCharBool = S.getLangOpts().ObjC && 10716 S.NSAPIObj->isObjCBOOLType(OtherT) && 10717 OtherT->isSpecificBuiltinType(BuiltinType::SChar); 10718 10719 // Whether we're treating Other as being a bool because of the form of 10720 // expression despite it having another type (typically 'int' in C). 10721 bool OtherIsBooleanDespiteType = 10722 !OtherT->isBooleanType() && Other->isKnownToHaveBooleanValue(); 10723 if (OtherIsBooleanDespiteType || IsObjCSignedCharBool) 10724 OtherRange = IntRange::forBoolType(); 10725 10726 // Determine the promoted range of the other type and see if a comparison of 10727 // the constant against that range is tautological. 10728 PromotedRange OtherPromotedRange(OtherRange, Value.getBitWidth(), 10729 Value.isUnsigned()); 10730 auto Cmp = OtherPromotedRange.compare(Value); 10731 auto Result = PromotedRange::constantValue(E->getOpcode(), Cmp, RhsConstant); 10732 if (!Result) 10733 return false; 10734 10735 // Suppress the diagnostic for an in-range comparison if the constant comes 10736 // from a macro or enumerator. We don't want to diagnose 10737 // 10738 // some_long_value <= INT_MAX 10739 // 10740 // when sizeof(int) == sizeof(long). 10741 bool InRange = Cmp & PromotedRange::InRangeFlag; 10742 if (InRange && IsEnumConstOrFromMacro(S, Constant)) 10743 return false; 10744 10745 // If this is a comparison to an enum constant, include that 10746 // constant in the diagnostic. 10747 const EnumConstantDecl *ED = nullptr; 10748 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Constant)) 10749 ED = dyn_cast<EnumConstantDecl>(DR->getDecl()); 10750 10751 // Should be enough for uint128 (39 decimal digits) 10752 SmallString<64> PrettySourceValue; 10753 llvm::raw_svector_ostream OS(PrettySourceValue); 10754 if (ED) { 10755 OS << '\'' << *ED << "' (" << Value << ")"; 10756 } else if (auto *BL = dyn_cast<ObjCBoolLiteralExpr>( 10757 Constant->IgnoreParenImpCasts())) { 10758 OS << (BL->getValue() ? "YES" : "NO"); 10759 } else { 10760 OS << Value; 10761 } 10762 10763 if (IsObjCSignedCharBool) { 10764 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 10765 S.PDiag(diag::warn_tautological_compare_objc_bool) 10766 << OS.str() << *Result); 10767 return true; 10768 } 10769 10770 // FIXME: We use a somewhat different formatting for the in-range cases and 10771 // cases involving boolean values for historical reasons. We should pick a 10772 // consistent way of presenting these diagnostics. 10773 if (!InRange || Other->isKnownToHaveBooleanValue()) { 10774 10775 S.DiagRuntimeBehavior( 10776 E->getOperatorLoc(), E, 10777 S.PDiag(!InRange ? diag::warn_out_of_range_compare 10778 : diag::warn_tautological_bool_compare) 10779 << OS.str() << classifyConstantValue(Constant) << OtherT 10780 << OtherIsBooleanDespiteType << *Result 10781 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange()); 10782 } else { 10783 unsigned Diag = (isKnownToHaveUnsignedValue(OriginalOther) && Value == 0) 10784 ? (HasEnumType(OriginalOther) 10785 ? diag::warn_unsigned_enum_always_true_comparison 10786 : diag::warn_unsigned_always_true_comparison) 10787 : diag::warn_tautological_constant_compare; 10788 10789 S.Diag(E->getOperatorLoc(), Diag) 10790 << RhsConstant << OtherT << E->getOpcodeStr() << OS.str() << *Result 10791 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 10792 } 10793 10794 return true; 10795 } 10796 10797 /// Analyze the operands of the given comparison. Implements the 10798 /// fallback case from AnalyzeComparison. 10799 static void AnalyzeImpConvsInComparison(Sema &S, BinaryOperator *E) { 10800 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 10801 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 10802 } 10803 10804 /// Implements -Wsign-compare. 10805 /// 10806 /// \param E the binary operator to check for warnings 10807 static void AnalyzeComparison(Sema &S, BinaryOperator *E) { 10808 // The type the comparison is being performed in. 10809 QualType T = E->getLHS()->getType(); 10810 10811 // Only analyze comparison operators where both sides have been converted to 10812 // the same type. 10813 if (!S.Context.hasSameUnqualifiedType(T, E->getRHS()->getType())) 10814 return AnalyzeImpConvsInComparison(S, E); 10815 10816 // Don't analyze value-dependent comparisons directly. 10817 if (E->isValueDependent()) 10818 return AnalyzeImpConvsInComparison(S, E); 10819 10820 Expr *LHS = E->getLHS(); 10821 Expr *RHS = E->getRHS(); 10822 10823 if (T->isIntegralType(S.Context)) { 10824 llvm::APSInt RHSValue; 10825 llvm::APSInt LHSValue; 10826 10827 bool IsRHSIntegralLiteral = RHS->isIntegerConstantExpr(RHSValue, S.Context); 10828 bool IsLHSIntegralLiteral = LHS->isIntegerConstantExpr(LHSValue, S.Context); 10829 10830 // We don't care about expressions whose result is a constant. 10831 if (IsRHSIntegralLiteral && IsLHSIntegralLiteral) 10832 return AnalyzeImpConvsInComparison(S, E); 10833 10834 // We only care about expressions where just one side is literal 10835 if (IsRHSIntegralLiteral ^ IsLHSIntegralLiteral) { 10836 // Is the constant on the RHS or LHS? 10837 const bool RhsConstant = IsRHSIntegralLiteral; 10838 Expr *Const = RhsConstant ? RHS : LHS; 10839 Expr *Other = RhsConstant ? LHS : RHS; 10840 const llvm::APSInt &Value = RhsConstant ? RHSValue : LHSValue; 10841 10842 // Check whether an integer constant comparison results in a value 10843 // of 'true' or 'false'. 10844 if (CheckTautologicalComparison(S, E, Const, Other, Value, RhsConstant)) 10845 return AnalyzeImpConvsInComparison(S, E); 10846 } 10847 } 10848 10849 if (!T->hasUnsignedIntegerRepresentation()) { 10850 // We don't do anything special if this isn't an unsigned integral 10851 // comparison: we're only interested in integral comparisons, and 10852 // signed comparisons only happen in cases we don't care to warn about. 10853 return AnalyzeImpConvsInComparison(S, E); 10854 } 10855 10856 LHS = LHS->IgnoreParenImpCasts(); 10857 RHS = RHS->IgnoreParenImpCasts(); 10858 10859 if (!S.getLangOpts().CPlusPlus) { 10860 // Avoid warning about comparison of integers with different signs when 10861 // RHS/LHS has a `typeof(E)` type whose sign is different from the sign of 10862 // the type of `E`. 10863 if (const auto *TET = dyn_cast<TypeOfExprType>(LHS->getType())) 10864 LHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 10865 if (const auto *TET = dyn_cast<TypeOfExprType>(RHS->getType())) 10866 RHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 10867 } 10868 10869 // Check to see if one of the (unmodified) operands is of different 10870 // signedness. 10871 Expr *signedOperand, *unsignedOperand; 10872 if (LHS->getType()->hasSignedIntegerRepresentation()) { 10873 assert(!RHS->getType()->hasSignedIntegerRepresentation() && 10874 "unsigned comparison between two signed integer expressions?"); 10875 signedOperand = LHS; 10876 unsignedOperand = RHS; 10877 } else if (RHS->getType()->hasSignedIntegerRepresentation()) { 10878 signedOperand = RHS; 10879 unsignedOperand = LHS; 10880 } else { 10881 return AnalyzeImpConvsInComparison(S, E); 10882 } 10883 10884 // Otherwise, calculate the effective range of the signed operand. 10885 IntRange signedRange = 10886 GetExprRange(S.Context, signedOperand, S.isConstantEvaluated()); 10887 10888 // Go ahead and analyze implicit conversions in the operands. Note 10889 // that we skip the implicit conversions on both sides. 10890 AnalyzeImplicitConversions(S, LHS, E->getOperatorLoc()); 10891 AnalyzeImplicitConversions(S, RHS, E->getOperatorLoc()); 10892 10893 // If the signed range is non-negative, -Wsign-compare won't fire. 10894 if (signedRange.NonNegative) 10895 return; 10896 10897 // For (in)equality comparisons, if the unsigned operand is a 10898 // constant which cannot collide with a overflowed signed operand, 10899 // then reinterpreting the signed operand as unsigned will not 10900 // change the result of the comparison. 10901 if (E->isEqualityOp()) { 10902 unsigned comparisonWidth = S.Context.getIntWidth(T); 10903 IntRange unsignedRange = 10904 GetExprRange(S.Context, unsignedOperand, S.isConstantEvaluated()); 10905 10906 // We should never be unable to prove that the unsigned operand is 10907 // non-negative. 10908 assert(unsignedRange.NonNegative && "unsigned range includes negative?"); 10909 10910 if (unsignedRange.Width < comparisonWidth) 10911 return; 10912 } 10913 10914 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 10915 S.PDiag(diag::warn_mixed_sign_comparison) 10916 << LHS->getType() << RHS->getType() 10917 << LHS->getSourceRange() << RHS->getSourceRange()); 10918 } 10919 10920 /// Analyzes an attempt to assign the given value to a bitfield. 10921 /// 10922 /// Returns true if there was something fishy about the attempt. 10923 static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init, 10924 SourceLocation InitLoc) { 10925 assert(Bitfield->isBitField()); 10926 if (Bitfield->isInvalidDecl()) 10927 return false; 10928 10929 // White-list bool bitfields. 10930 QualType BitfieldType = Bitfield->getType(); 10931 if (BitfieldType->isBooleanType()) 10932 return false; 10933 10934 if (BitfieldType->isEnumeralType()) { 10935 EnumDecl *BitfieldEnumDecl = BitfieldType->castAs<EnumType>()->getDecl(); 10936 // If the underlying enum type was not explicitly specified as an unsigned 10937 // type and the enum contain only positive values, MSVC++ will cause an 10938 // inconsistency by storing this as a signed type. 10939 if (S.getLangOpts().CPlusPlus11 && 10940 !BitfieldEnumDecl->getIntegerTypeSourceInfo() && 10941 BitfieldEnumDecl->getNumPositiveBits() > 0 && 10942 BitfieldEnumDecl->getNumNegativeBits() == 0) { 10943 S.Diag(InitLoc, diag::warn_no_underlying_type_specified_for_enum_bitfield) 10944 << BitfieldEnumDecl->getNameAsString(); 10945 } 10946 } 10947 10948 if (Bitfield->getType()->isBooleanType()) 10949 return false; 10950 10951 // Ignore value- or type-dependent expressions. 10952 if (Bitfield->getBitWidth()->isValueDependent() || 10953 Bitfield->getBitWidth()->isTypeDependent() || 10954 Init->isValueDependent() || 10955 Init->isTypeDependent()) 10956 return false; 10957 10958 Expr *OriginalInit = Init->IgnoreParenImpCasts(); 10959 unsigned FieldWidth = Bitfield->getBitWidthValue(S.Context); 10960 10961 Expr::EvalResult Result; 10962 if (!OriginalInit->EvaluateAsInt(Result, S.Context, 10963 Expr::SE_AllowSideEffects)) { 10964 // The RHS is not constant. If the RHS has an enum type, make sure the 10965 // bitfield is wide enough to hold all the values of the enum without 10966 // truncation. 10967 if (const auto *EnumTy = OriginalInit->getType()->getAs<EnumType>()) { 10968 EnumDecl *ED = EnumTy->getDecl(); 10969 bool SignedBitfield = BitfieldType->isSignedIntegerType(); 10970 10971 // Enum types are implicitly signed on Windows, so check if there are any 10972 // negative enumerators to see if the enum was intended to be signed or 10973 // not. 10974 bool SignedEnum = ED->getNumNegativeBits() > 0; 10975 10976 // Check for surprising sign changes when assigning enum values to a 10977 // bitfield of different signedness. If the bitfield is signed and we 10978 // have exactly the right number of bits to store this unsigned enum, 10979 // suggest changing the enum to an unsigned type. This typically happens 10980 // on Windows where unfixed enums always use an underlying type of 'int'. 10981 unsigned DiagID = 0; 10982 if (SignedEnum && !SignedBitfield) { 10983 DiagID = diag::warn_unsigned_bitfield_assigned_signed_enum; 10984 } else if (SignedBitfield && !SignedEnum && 10985 ED->getNumPositiveBits() == FieldWidth) { 10986 DiagID = diag::warn_signed_bitfield_enum_conversion; 10987 } 10988 10989 if (DiagID) { 10990 S.Diag(InitLoc, DiagID) << Bitfield << ED; 10991 TypeSourceInfo *TSI = Bitfield->getTypeSourceInfo(); 10992 SourceRange TypeRange = 10993 TSI ? TSI->getTypeLoc().getSourceRange() : SourceRange(); 10994 S.Diag(Bitfield->getTypeSpecStartLoc(), diag::note_change_bitfield_sign) 10995 << SignedEnum << TypeRange; 10996 } 10997 10998 // Compute the required bitwidth. If the enum has negative values, we need 10999 // one more bit than the normal number of positive bits to represent the 11000 // sign bit. 11001 unsigned BitsNeeded = SignedEnum ? std::max(ED->getNumPositiveBits() + 1, 11002 ED->getNumNegativeBits()) 11003 : ED->getNumPositiveBits(); 11004 11005 // Check the bitwidth. 11006 if (BitsNeeded > FieldWidth) { 11007 Expr *WidthExpr = Bitfield->getBitWidth(); 11008 S.Diag(InitLoc, diag::warn_bitfield_too_small_for_enum) 11009 << Bitfield << ED; 11010 S.Diag(WidthExpr->getExprLoc(), diag::note_widen_bitfield) 11011 << BitsNeeded << ED << WidthExpr->getSourceRange(); 11012 } 11013 } 11014 11015 return false; 11016 } 11017 11018 llvm::APSInt Value = Result.Val.getInt(); 11019 11020 unsigned OriginalWidth = Value.getBitWidth(); 11021 11022 if (!Value.isSigned() || Value.isNegative()) 11023 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(OriginalInit)) 11024 if (UO->getOpcode() == UO_Minus || UO->getOpcode() == UO_Not) 11025 OriginalWidth = Value.getMinSignedBits(); 11026 11027 if (OriginalWidth <= FieldWidth) 11028 return false; 11029 11030 // Compute the value which the bitfield will contain. 11031 llvm::APSInt TruncatedValue = Value.trunc(FieldWidth); 11032 TruncatedValue.setIsSigned(BitfieldType->isSignedIntegerType()); 11033 11034 // Check whether the stored value is equal to the original value. 11035 TruncatedValue = TruncatedValue.extend(OriginalWidth); 11036 if (llvm::APSInt::isSameValue(Value, TruncatedValue)) 11037 return false; 11038 11039 // Special-case bitfields of width 1: booleans are naturally 0/1, and 11040 // therefore don't strictly fit into a signed bitfield of width 1. 11041 if (FieldWidth == 1 && Value == 1) 11042 return false; 11043 11044 std::string PrettyValue = Value.toString(10); 11045 std::string PrettyTrunc = TruncatedValue.toString(10); 11046 11047 S.Diag(InitLoc, diag::warn_impcast_bitfield_precision_constant) 11048 << PrettyValue << PrettyTrunc << OriginalInit->getType() 11049 << Init->getSourceRange(); 11050 11051 return true; 11052 } 11053 11054 /// Analyze the given simple or compound assignment for warning-worthy 11055 /// operations. 11056 static void AnalyzeAssignment(Sema &S, BinaryOperator *E) { 11057 // Just recurse on the LHS. 11058 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 11059 11060 // We want to recurse on the RHS as normal unless we're assigning to 11061 // a bitfield. 11062 if (FieldDecl *Bitfield = E->getLHS()->getSourceBitField()) { 11063 if (AnalyzeBitFieldAssignment(S, Bitfield, E->getRHS(), 11064 E->getOperatorLoc())) { 11065 // Recurse, ignoring any implicit conversions on the RHS. 11066 return AnalyzeImplicitConversions(S, E->getRHS()->IgnoreParenImpCasts(), 11067 E->getOperatorLoc()); 11068 } 11069 } 11070 11071 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 11072 11073 // Diagnose implicitly sequentially-consistent atomic assignment. 11074 if (E->getLHS()->getType()->isAtomicType()) 11075 S.Diag(E->getRHS()->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 11076 } 11077 11078 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 11079 static void DiagnoseImpCast(Sema &S, Expr *E, QualType SourceType, QualType T, 11080 SourceLocation CContext, unsigned diag, 11081 bool pruneControlFlow = false) { 11082 if (pruneControlFlow) { 11083 S.DiagRuntimeBehavior(E->getExprLoc(), E, 11084 S.PDiag(diag) 11085 << SourceType << T << E->getSourceRange() 11086 << SourceRange(CContext)); 11087 return; 11088 } 11089 S.Diag(E->getExprLoc(), diag) 11090 << SourceType << T << E->getSourceRange() << SourceRange(CContext); 11091 } 11092 11093 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 11094 static void DiagnoseImpCast(Sema &S, Expr *E, QualType T, 11095 SourceLocation CContext, 11096 unsigned diag, bool pruneControlFlow = false) { 11097 DiagnoseImpCast(S, E, E->getType(), T, CContext, diag, pruneControlFlow); 11098 } 11099 11100 static bool isObjCSignedCharBool(Sema &S, QualType Ty) { 11101 return Ty->isSpecificBuiltinType(BuiltinType::SChar) && 11102 S.getLangOpts().ObjC && S.NSAPIObj->isObjCBOOLType(Ty); 11103 } 11104 11105 static void adornObjCBoolConversionDiagWithTernaryFixit( 11106 Sema &S, Expr *SourceExpr, const Sema::SemaDiagnosticBuilder &Builder) { 11107 Expr *Ignored = SourceExpr->IgnoreImplicit(); 11108 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(Ignored)) 11109 Ignored = OVE->getSourceExpr(); 11110 bool NeedsParens = isa<AbstractConditionalOperator>(Ignored) || 11111 isa<BinaryOperator>(Ignored) || 11112 isa<CXXOperatorCallExpr>(Ignored); 11113 SourceLocation EndLoc = S.getLocForEndOfToken(SourceExpr->getEndLoc()); 11114 if (NeedsParens) 11115 Builder << FixItHint::CreateInsertion(SourceExpr->getBeginLoc(), "(") 11116 << FixItHint::CreateInsertion(EndLoc, ")"); 11117 Builder << FixItHint::CreateInsertion(EndLoc, " ? YES : NO"); 11118 } 11119 11120 /// Diagnose an implicit cast from a floating point value to an integer value. 11121 static void DiagnoseFloatingImpCast(Sema &S, Expr *E, QualType T, 11122 SourceLocation CContext) { 11123 const bool IsBool = T->isSpecificBuiltinType(BuiltinType::Bool); 11124 const bool PruneWarnings = S.inTemplateInstantiation(); 11125 11126 Expr *InnerE = E->IgnoreParenImpCasts(); 11127 // We also want to warn on, e.g., "int i = -1.234" 11128 if (UnaryOperator *UOp = dyn_cast<UnaryOperator>(InnerE)) 11129 if (UOp->getOpcode() == UO_Minus || UOp->getOpcode() == UO_Plus) 11130 InnerE = UOp->getSubExpr()->IgnoreParenImpCasts(); 11131 11132 const bool IsLiteral = 11133 isa<FloatingLiteral>(E) || isa<FloatingLiteral>(InnerE); 11134 11135 llvm::APFloat Value(0.0); 11136 bool IsConstant = 11137 E->EvaluateAsFloat(Value, S.Context, Expr::SE_AllowSideEffects); 11138 if (!IsConstant) { 11139 if (isObjCSignedCharBool(S, T)) { 11140 return adornObjCBoolConversionDiagWithTernaryFixit( 11141 S, E, 11142 S.Diag(CContext, diag::warn_impcast_float_to_objc_signed_char_bool) 11143 << E->getType()); 11144 } 11145 11146 return DiagnoseImpCast(S, E, T, CContext, 11147 diag::warn_impcast_float_integer, PruneWarnings); 11148 } 11149 11150 bool isExact = false; 11151 11152 llvm::APSInt IntegerValue(S.Context.getIntWidth(T), 11153 T->hasUnsignedIntegerRepresentation()); 11154 llvm::APFloat::opStatus Result = Value.convertToInteger( 11155 IntegerValue, llvm::APFloat::rmTowardZero, &isExact); 11156 11157 // FIXME: Force the precision of the source value down so we don't print 11158 // digits which are usually useless (we don't really care here if we 11159 // truncate a digit by accident in edge cases). Ideally, APFloat::toString 11160 // would automatically print the shortest representation, but it's a bit 11161 // tricky to implement. 11162 SmallString<16> PrettySourceValue; 11163 unsigned precision = llvm::APFloat::semanticsPrecision(Value.getSemantics()); 11164 precision = (precision * 59 + 195) / 196; 11165 Value.toString(PrettySourceValue, precision); 11166 11167 if (isObjCSignedCharBool(S, T) && IntegerValue != 0 && IntegerValue != 1) { 11168 return adornObjCBoolConversionDiagWithTernaryFixit( 11169 S, E, 11170 S.Diag(CContext, diag::warn_impcast_constant_value_to_objc_bool) 11171 << PrettySourceValue); 11172 } 11173 11174 if (Result == llvm::APFloat::opOK && isExact) { 11175 if (IsLiteral) return; 11176 return DiagnoseImpCast(S, E, T, CContext, diag::warn_impcast_float_integer, 11177 PruneWarnings); 11178 } 11179 11180 // Conversion of a floating-point value to a non-bool integer where the 11181 // integral part cannot be represented by the integer type is undefined. 11182 if (!IsBool && Result == llvm::APFloat::opInvalidOp) 11183 return DiagnoseImpCast( 11184 S, E, T, CContext, 11185 IsLiteral ? diag::warn_impcast_literal_float_to_integer_out_of_range 11186 : diag::warn_impcast_float_to_integer_out_of_range, 11187 PruneWarnings); 11188 11189 unsigned DiagID = 0; 11190 if (IsLiteral) { 11191 // Warn on floating point literal to integer. 11192 DiagID = diag::warn_impcast_literal_float_to_integer; 11193 } else if (IntegerValue == 0) { 11194 if (Value.isZero()) { // Skip -0.0 to 0 conversion. 11195 return DiagnoseImpCast(S, E, T, CContext, 11196 diag::warn_impcast_float_integer, PruneWarnings); 11197 } 11198 // Warn on non-zero to zero conversion. 11199 DiagID = diag::warn_impcast_float_to_integer_zero; 11200 } else { 11201 if (IntegerValue.isUnsigned()) { 11202 if (!IntegerValue.isMaxValue()) { 11203 return DiagnoseImpCast(S, E, T, CContext, 11204 diag::warn_impcast_float_integer, PruneWarnings); 11205 } 11206 } else { // IntegerValue.isSigned() 11207 if (!IntegerValue.isMaxSignedValue() && 11208 !IntegerValue.isMinSignedValue()) { 11209 return DiagnoseImpCast(S, E, T, CContext, 11210 diag::warn_impcast_float_integer, PruneWarnings); 11211 } 11212 } 11213 // Warn on evaluatable floating point expression to integer conversion. 11214 DiagID = diag::warn_impcast_float_to_integer; 11215 } 11216 11217 SmallString<16> PrettyTargetValue; 11218 if (IsBool) 11219 PrettyTargetValue = Value.isZero() ? "false" : "true"; 11220 else 11221 IntegerValue.toString(PrettyTargetValue); 11222 11223 if (PruneWarnings) { 11224 S.DiagRuntimeBehavior(E->getExprLoc(), E, 11225 S.PDiag(DiagID) 11226 << E->getType() << T.getUnqualifiedType() 11227 << PrettySourceValue << PrettyTargetValue 11228 << E->getSourceRange() << SourceRange(CContext)); 11229 } else { 11230 S.Diag(E->getExprLoc(), DiagID) 11231 << E->getType() << T.getUnqualifiedType() << PrettySourceValue 11232 << PrettyTargetValue << E->getSourceRange() << SourceRange(CContext); 11233 } 11234 } 11235 11236 /// Analyze the given compound assignment for the possible losing of 11237 /// floating-point precision. 11238 static void AnalyzeCompoundAssignment(Sema &S, BinaryOperator *E) { 11239 assert(isa<CompoundAssignOperator>(E) && 11240 "Must be compound assignment operation"); 11241 // Recurse on the LHS and RHS in here 11242 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 11243 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 11244 11245 if (E->getLHS()->getType()->isAtomicType()) 11246 S.Diag(E->getOperatorLoc(), diag::warn_atomic_implicit_seq_cst); 11247 11248 // Now check the outermost expression 11249 const auto *ResultBT = E->getLHS()->getType()->getAs<BuiltinType>(); 11250 const auto *RBT = cast<CompoundAssignOperator>(E) 11251 ->getComputationResultType() 11252 ->getAs<BuiltinType>(); 11253 11254 // The below checks assume source is floating point. 11255 if (!ResultBT || !RBT || !RBT->isFloatingPoint()) return; 11256 11257 // If source is floating point but target is an integer. 11258 if (ResultBT->isInteger()) 11259 return DiagnoseImpCast(S, E, E->getRHS()->getType(), E->getLHS()->getType(), 11260 E->getExprLoc(), diag::warn_impcast_float_integer); 11261 11262 if (!ResultBT->isFloatingPoint()) 11263 return; 11264 11265 // If both source and target are floating points, warn about losing precision. 11266 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 11267 QualType(ResultBT, 0), QualType(RBT, 0)); 11268 if (Order < 0 && !S.SourceMgr.isInSystemMacro(E->getOperatorLoc())) 11269 // warn about dropping FP rank. 11270 DiagnoseImpCast(S, E->getRHS(), E->getLHS()->getType(), E->getOperatorLoc(), 11271 diag::warn_impcast_float_result_precision); 11272 } 11273 11274 static std::string PrettyPrintInRange(const llvm::APSInt &Value, 11275 IntRange Range) { 11276 if (!Range.Width) return "0"; 11277 11278 llvm::APSInt ValueInRange = Value; 11279 ValueInRange.setIsSigned(!Range.NonNegative); 11280 ValueInRange = ValueInRange.trunc(Range.Width); 11281 return ValueInRange.toString(10); 11282 } 11283 11284 static bool IsImplicitBoolFloatConversion(Sema &S, Expr *Ex, bool ToBool) { 11285 if (!isa<ImplicitCastExpr>(Ex)) 11286 return false; 11287 11288 Expr *InnerE = Ex->IgnoreParenImpCasts(); 11289 const Type *Target = S.Context.getCanonicalType(Ex->getType()).getTypePtr(); 11290 const Type *Source = 11291 S.Context.getCanonicalType(InnerE->getType()).getTypePtr(); 11292 if (Target->isDependentType()) 11293 return false; 11294 11295 const BuiltinType *FloatCandidateBT = 11296 dyn_cast<BuiltinType>(ToBool ? Source : Target); 11297 const Type *BoolCandidateType = ToBool ? Target : Source; 11298 11299 return (BoolCandidateType->isSpecificBuiltinType(BuiltinType::Bool) && 11300 FloatCandidateBT && (FloatCandidateBT->isFloatingPoint())); 11301 } 11302 11303 static void CheckImplicitArgumentConversions(Sema &S, CallExpr *TheCall, 11304 SourceLocation CC) { 11305 unsigned NumArgs = TheCall->getNumArgs(); 11306 for (unsigned i = 0; i < NumArgs; ++i) { 11307 Expr *CurrA = TheCall->getArg(i); 11308 if (!IsImplicitBoolFloatConversion(S, CurrA, true)) 11309 continue; 11310 11311 bool IsSwapped = ((i > 0) && 11312 IsImplicitBoolFloatConversion(S, TheCall->getArg(i - 1), false)); 11313 IsSwapped |= ((i < (NumArgs - 1)) && 11314 IsImplicitBoolFloatConversion(S, TheCall->getArg(i + 1), false)); 11315 if (IsSwapped) { 11316 // Warn on this floating-point to bool conversion. 11317 DiagnoseImpCast(S, CurrA->IgnoreParenImpCasts(), 11318 CurrA->getType(), CC, 11319 diag::warn_impcast_floating_point_to_bool); 11320 } 11321 } 11322 } 11323 11324 static void DiagnoseNullConversion(Sema &S, Expr *E, QualType T, 11325 SourceLocation CC) { 11326 if (S.Diags.isIgnored(diag::warn_impcast_null_pointer_to_integer, 11327 E->getExprLoc())) 11328 return; 11329 11330 // Don't warn on functions which have return type nullptr_t. 11331 if (isa<CallExpr>(E)) 11332 return; 11333 11334 // Check for NULL (GNUNull) or nullptr (CXX11_nullptr). 11335 const Expr::NullPointerConstantKind NullKind = 11336 E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull); 11337 if (NullKind != Expr::NPCK_GNUNull && NullKind != Expr::NPCK_CXX11_nullptr) 11338 return; 11339 11340 // Return if target type is a safe conversion. 11341 if (T->isAnyPointerType() || T->isBlockPointerType() || 11342 T->isMemberPointerType() || !T->isScalarType() || T->isNullPtrType()) 11343 return; 11344 11345 SourceLocation Loc = E->getSourceRange().getBegin(); 11346 11347 // Venture through the macro stacks to get to the source of macro arguments. 11348 // The new location is a better location than the complete location that was 11349 // passed in. 11350 Loc = S.SourceMgr.getTopMacroCallerLoc(Loc); 11351 CC = S.SourceMgr.getTopMacroCallerLoc(CC); 11352 11353 // __null is usually wrapped in a macro. Go up a macro if that is the case. 11354 if (NullKind == Expr::NPCK_GNUNull && Loc.isMacroID()) { 11355 StringRef MacroName = Lexer::getImmediateMacroNameForDiagnostics( 11356 Loc, S.SourceMgr, S.getLangOpts()); 11357 if (MacroName == "NULL") 11358 Loc = S.SourceMgr.getImmediateExpansionRange(Loc).getBegin(); 11359 } 11360 11361 // Only warn if the null and context location are in the same macro expansion. 11362 if (S.SourceMgr.getFileID(Loc) != S.SourceMgr.getFileID(CC)) 11363 return; 11364 11365 S.Diag(Loc, diag::warn_impcast_null_pointer_to_integer) 11366 << (NullKind == Expr::NPCK_CXX11_nullptr) << T << SourceRange(CC) 11367 << FixItHint::CreateReplacement(Loc, 11368 S.getFixItZeroLiteralForType(T, Loc)); 11369 } 11370 11371 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 11372 ObjCArrayLiteral *ArrayLiteral); 11373 11374 static void 11375 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 11376 ObjCDictionaryLiteral *DictionaryLiteral); 11377 11378 /// Check a single element within a collection literal against the 11379 /// target element type. 11380 static void checkObjCCollectionLiteralElement(Sema &S, 11381 QualType TargetElementType, 11382 Expr *Element, 11383 unsigned ElementKind) { 11384 // Skip a bitcast to 'id' or qualified 'id'. 11385 if (auto ICE = dyn_cast<ImplicitCastExpr>(Element)) { 11386 if (ICE->getCastKind() == CK_BitCast && 11387 ICE->getSubExpr()->getType()->getAs<ObjCObjectPointerType>()) 11388 Element = ICE->getSubExpr(); 11389 } 11390 11391 QualType ElementType = Element->getType(); 11392 ExprResult ElementResult(Element); 11393 if (ElementType->getAs<ObjCObjectPointerType>() && 11394 S.CheckSingleAssignmentConstraints(TargetElementType, 11395 ElementResult, 11396 false, false) 11397 != Sema::Compatible) { 11398 S.Diag(Element->getBeginLoc(), diag::warn_objc_collection_literal_element) 11399 << ElementType << ElementKind << TargetElementType 11400 << Element->getSourceRange(); 11401 } 11402 11403 if (auto ArrayLiteral = dyn_cast<ObjCArrayLiteral>(Element)) 11404 checkObjCArrayLiteral(S, TargetElementType, ArrayLiteral); 11405 else if (auto DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(Element)) 11406 checkObjCDictionaryLiteral(S, TargetElementType, DictionaryLiteral); 11407 } 11408 11409 /// Check an Objective-C array literal being converted to the given 11410 /// target type. 11411 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 11412 ObjCArrayLiteral *ArrayLiteral) { 11413 if (!S.NSArrayDecl) 11414 return; 11415 11416 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 11417 if (!TargetObjCPtr) 11418 return; 11419 11420 if (TargetObjCPtr->isUnspecialized() || 11421 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 11422 != S.NSArrayDecl->getCanonicalDecl()) 11423 return; 11424 11425 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 11426 if (TypeArgs.size() != 1) 11427 return; 11428 11429 QualType TargetElementType = TypeArgs[0]; 11430 for (unsigned I = 0, N = ArrayLiteral->getNumElements(); I != N; ++I) { 11431 checkObjCCollectionLiteralElement(S, TargetElementType, 11432 ArrayLiteral->getElement(I), 11433 0); 11434 } 11435 } 11436 11437 /// Check an Objective-C dictionary literal being converted to the given 11438 /// target type. 11439 static void 11440 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 11441 ObjCDictionaryLiteral *DictionaryLiteral) { 11442 if (!S.NSDictionaryDecl) 11443 return; 11444 11445 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 11446 if (!TargetObjCPtr) 11447 return; 11448 11449 if (TargetObjCPtr->isUnspecialized() || 11450 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 11451 != S.NSDictionaryDecl->getCanonicalDecl()) 11452 return; 11453 11454 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 11455 if (TypeArgs.size() != 2) 11456 return; 11457 11458 QualType TargetKeyType = TypeArgs[0]; 11459 QualType TargetObjectType = TypeArgs[1]; 11460 for (unsigned I = 0, N = DictionaryLiteral->getNumElements(); I != N; ++I) { 11461 auto Element = DictionaryLiteral->getKeyValueElement(I); 11462 checkObjCCollectionLiteralElement(S, TargetKeyType, Element.Key, 1); 11463 checkObjCCollectionLiteralElement(S, TargetObjectType, Element.Value, 2); 11464 } 11465 } 11466 11467 // Helper function to filter out cases for constant width constant conversion. 11468 // Don't warn on char array initialization or for non-decimal values. 11469 static bool isSameWidthConstantConversion(Sema &S, Expr *E, QualType T, 11470 SourceLocation CC) { 11471 // If initializing from a constant, and the constant starts with '0', 11472 // then it is a binary, octal, or hexadecimal. Allow these constants 11473 // to fill all the bits, even if there is a sign change. 11474 if (auto *IntLit = dyn_cast<IntegerLiteral>(E->IgnoreParenImpCasts())) { 11475 const char FirstLiteralCharacter = 11476 S.getSourceManager().getCharacterData(IntLit->getBeginLoc())[0]; 11477 if (FirstLiteralCharacter == '0') 11478 return false; 11479 } 11480 11481 // If the CC location points to a '{', and the type is char, then assume 11482 // assume it is an array initialization. 11483 if (CC.isValid() && T->isCharType()) { 11484 const char FirstContextCharacter = 11485 S.getSourceManager().getCharacterData(CC)[0]; 11486 if (FirstContextCharacter == '{') 11487 return false; 11488 } 11489 11490 return true; 11491 } 11492 11493 static const IntegerLiteral *getIntegerLiteral(Expr *E) { 11494 const auto *IL = dyn_cast<IntegerLiteral>(E); 11495 if (!IL) { 11496 if (auto *UO = dyn_cast<UnaryOperator>(E)) { 11497 if (UO->getOpcode() == UO_Minus) 11498 return dyn_cast<IntegerLiteral>(UO->getSubExpr()); 11499 } 11500 } 11501 11502 return IL; 11503 } 11504 11505 static void DiagnoseIntInBoolContext(Sema &S, Expr *E) { 11506 E = E->IgnoreParenImpCasts(); 11507 SourceLocation ExprLoc = E->getExprLoc(); 11508 11509 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 11510 BinaryOperator::Opcode Opc = BO->getOpcode(); 11511 Expr::EvalResult Result; 11512 // Do not diagnose unsigned shifts. 11513 if (Opc == BO_Shl) { 11514 const auto *LHS = getIntegerLiteral(BO->getLHS()); 11515 const auto *RHS = getIntegerLiteral(BO->getRHS()); 11516 if (LHS && LHS->getValue() == 0) 11517 S.Diag(ExprLoc, diag::warn_left_shift_always) << 0; 11518 else if (!E->isValueDependent() && LHS && RHS && 11519 RHS->getValue().isNonNegative() && 11520 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) 11521 S.Diag(ExprLoc, diag::warn_left_shift_always) 11522 << (Result.Val.getInt() != 0); 11523 else if (E->getType()->isSignedIntegerType()) 11524 S.Diag(ExprLoc, diag::warn_left_shift_in_bool_context) << E; 11525 } 11526 } 11527 11528 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 11529 const auto *LHS = getIntegerLiteral(CO->getTrueExpr()); 11530 const auto *RHS = getIntegerLiteral(CO->getFalseExpr()); 11531 if (!LHS || !RHS) 11532 return; 11533 if ((LHS->getValue() == 0 || LHS->getValue() == 1) && 11534 (RHS->getValue() == 0 || RHS->getValue() == 1)) 11535 // Do not diagnose common idioms. 11536 return; 11537 if (LHS->getValue() != 0 && RHS->getValue() != 0) 11538 S.Diag(ExprLoc, diag::warn_integer_constants_in_conditional_always_true); 11539 } 11540 } 11541 11542 static void CheckImplicitConversion(Sema &S, Expr *E, QualType T, 11543 SourceLocation CC, 11544 bool *ICContext = nullptr, 11545 bool IsListInit = false) { 11546 if (E->isTypeDependent() || E->isValueDependent()) return; 11547 11548 const Type *Source = S.Context.getCanonicalType(E->getType()).getTypePtr(); 11549 const Type *Target = S.Context.getCanonicalType(T).getTypePtr(); 11550 if (Source == Target) return; 11551 if (Target->isDependentType()) return; 11552 11553 // If the conversion context location is invalid don't complain. We also 11554 // don't want to emit a warning if the issue occurs from the expansion of 11555 // a system macro. The problem is that 'getSpellingLoc()' is slow, so we 11556 // delay this check as long as possible. Once we detect we are in that 11557 // scenario, we just return. 11558 if (CC.isInvalid()) 11559 return; 11560 11561 if (Source->isAtomicType()) 11562 S.Diag(E->getExprLoc(), diag::warn_atomic_implicit_seq_cst); 11563 11564 // Diagnose implicit casts to bool. 11565 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) { 11566 if (isa<StringLiteral>(E)) 11567 // Warn on string literal to bool. Checks for string literals in logical 11568 // and expressions, for instance, assert(0 && "error here"), are 11569 // prevented by a check in AnalyzeImplicitConversions(). 11570 return DiagnoseImpCast(S, E, T, CC, 11571 diag::warn_impcast_string_literal_to_bool); 11572 if (isa<ObjCStringLiteral>(E) || isa<ObjCArrayLiteral>(E) || 11573 isa<ObjCDictionaryLiteral>(E) || isa<ObjCBoxedExpr>(E)) { 11574 // This covers the literal expressions that evaluate to Objective-C 11575 // objects. 11576 return DiagnoseImpCast(S, E, T, CC, 11577 diag::warn_impcast_objective_c_literal_to_bool); 11578 } 11579 if (Source->isPointerType() || Source->canDecayToPointerType()) { 11580 // Warn on pointer to bool conversion that is always true. 11581 S.DiagnoseAlwaysNonNullPointer(E, Expr::NPCK_NotNull, /*IsEqual*/ false, 11582 SourceRange(CC)); 11583 } 11584 } 11585 11586 // If the we're converting a constant to an ObjC BOOL on a platform where BOOL 11587 // is a typedef for signed char (macOS), then that constant value has to be 1 11588 // or 0. 11589 if (isObjCSignedCharBool(S, T) && Source->isIntegralType(S.Context)) { 11590 Expr::EvalResult Result; 11591 if (E->EvaluateAsInt(Result, S.getASTContext(), 11592 Expr::SE_AllowSideEffects)) { 11593 if (Result.Val.getInt() != 1 && Result.Val.getInt() != 0) { 11594 adornObjCBoolConversionDiagWithTernaryFixit( 11595 S, E, 11596 S.Diag(CC, diag::warn_impcast_constant_value_to_objc_bool) 11597 << Result.Val.getInt().toString(10)); 11598 } 11599 return; 11600 } 11601 } 11602 11603 // Check implicit casts from Objective-C collection literals to specialized 11604 // collection types, e.g., NSArray<NSString *> *. 11605 if (auto *ArrayLiteral = dyn_cast<ObjCArrayLiteral>(E)) 11606 checkObjCArrayLiteral(S, QualType(Target, 0), ArrayLiteral); 11607 else if (auto *DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(E)) 11608 checkObjCDictionaryLiteral(S, QualType(Target, 0), DictionaryLiteral); 11609 11610 // Strip vector types. 11611 if (isa<VectorType>(Source)) { 11612 if (!isa<VectorType>(Target)) { 11613 if (S.SourceMgr.isInSystemMacro(CC)) 11614 return; 11615 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_vector_scalar); 11616 } 11617 11618 // If the vector cast is cast between two vectors of the same size, it is 11619 // a bitcast, not a conversion. 11620 if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target)) 11621 return; 11622 11623 Source = cast<VectorType>(Source)->getElementType().getTypePtr(); 11624 Target = cast<VectorType>(Target)->getElementType().getTypePtr(); 11625 } 11626 if (auto VecTy = dyn_cast<VectorType>(Target)) 11627 Target = VecTy->getElementType().getTypePtr(); 11628 11629 // Strip complex types. 11630 if (isa<ComplexType>(Source)) { 11631 if (!isa<ComplexType>(Target)) { 11632 if (S.SourceMgr.isInSystemMacro(CC) || Target->isBooleanType()) 11633 return; 11634 11635 return DiagnoseImpCast(S, E, T, CC, 11636 S.getLangOpts().CPlusPlus 11637 ? diag::err_impcast_complex_scalar 11638 : diag::warn_impcast_complex_scalar); 11639 } 11640 11641 Source = cast<ComplexType>(Source)->getElementType().getTypePtr(); 11642 Target = cast<ComplexType>(Target)->getElementType().getTypePtr(); 11643 } 11644 11645 const BuiltinType *SourceBT = dyn_cast<BuiltinType>(Source); 11646 const BuiltinType *TargetBT = dyn_cast<BuiltinType>(Target); 11647 11648 // If the source is floating point... 11649 if (SourceBT && SourceBT->isFloatingPoint()) { 11650 // ...and the target is floating point... 11651 if (TargetBT && TargetBT->isFloatingPoint()) { 11652 // ...then warn if we're dropping FP rank. 11653 11654 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 11655 QualType(SourceBT, 0), QualType(TargetBT, 0)); 11656 if (Order > 0) { 11657 // Don't warn about float constants that are precisely 11658 // representable in the target type. 11659 Expr::EvalResult result; 11660 if (E->EvaluateAsRValue(result, S.Context)) { 11661 // Value might be a float, a float vector, or a float complex. 11662 if (IsSameFloatAfterCast(result.Val, 11663 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0)), 11664 S.Context.getFloatTypeSemantics(QualType(SourceBT, 0)))) 11665 return; 11666 } 11667 11668 if (S.SourceMgr.isInSystemMacro(CC)) 11669 return; 11670 11671 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_float_precision); 11672 } 11673 // ... or possibly if we're increasing rank, too 11674 else if (Order < 0) { 11675 if (S.SourceMgr.isInSystemMacro(CC)) 11676 return; 11677 11678 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_double_promotion); 11679 } 11680 return; 11681 } 11682 11683 // If the target is integral, always warn. 11684 if (TargetBT && TargetBT->isInteger()) { 11685 if (S.SourceMgr.isInSystemMacro(CC)) 11686 return; 11687 11688 DiagnoseFloatingImpCast(S, E, T, CC); 11689 } 11690 11691 // Detect the case where a call result is converted from floating-point to 11692 // to bool, and the final argument to the call is converted from bool, to 11693 // discover this typo: 11694 // 11695 // bool b = fabs(x < 1.0); // should be "bool b = fabs(x) < 1.0;" 11696 // 11697 // FIXME: This is an incredibly special case; is there some more general 11698 // way to detect this class of misplaced-parentheses bug? 11699 if (Target->isBooleanType() && isa<CallExpr>(E)) { 11700 // Check last argument of function call to see if it is an 11701 // implicit cast from a type matching the type the result 11702 // is being cast to. 11703 CallExpr *CEx = cast<CallExpr>(E); 11704 if (unsigned NumArgs = CEx->getNumArgs()) { 11705 Expr *LastA = CEx->getArg(NumArgs - 1); 11706 Expr *InnerE = LastA->IgnoreParenImpCasts(); 11707 if (isa<ImplicitCastExpr>(LastA) && 11708 InnerE->getType()->isBooleanType()) { 11709 // Warn on this floating-point to bool conversion 11710 DiagnoseImpCast(S, E, T, CC, 11711 diag::warn_impcast_floating_point_to_bool); 11712 } 11713 } 11714 } 11715 return; 11716 } 11717 11718 // Valid casts involving fixed point types should be accounted for here. 11719 if (Source->isFixedPointType()) { 11720 if (Target->isUnsaturatedFixedPointType()) { 11721 Expr::EvalResult Result; 11722 if (E->EvaluateAsFixedPoint(Result, S.Context, Expr::SE_AllowSideEffects, 11723 S.isConstantEvaluated())) { 11724 APFixedPoint Value = Result.Val.getFixedPoint(); 11725 APFixedPoint MaxVal = S.Context.getFixedPointMax(T); 11726 APFixedPoint MinVal = S.Context.getFixedPointMin(T); 11727 if (Value > MaxVal || Value < MinVal) { 11728 S.DiagRuntimeBehavior(E->getExprLoc(), E, 11729 S.PDiag(diag::warn_impcast_fixed_point_range) 11730 << Value.toString() << T 11731 << E->getSourceRange() 11732 << clang::SourceRange(CC)); 11733 return; 11734 } 11735 } 11736 } else if (Target->isIntegerType()) { 11737 Expr::EvalResult Result; 11738 if (!S.isConstantEvaluated() && 11739 E->EvaluateAsFixedPoint(Result, S.Context, 11740 Expr::SE_AllowSideEffects)) { 11741 APFixedPoint FXResult = Result.Val.getFixedPoint(); 11742 11743 bool Overflowed; 11744 llvm::APSInt IntResult = FXResult.convertToInt( 11745 S.Context.getIntWidth(T), 11746 Target->isSignedIntegerOrEnumerationType(), &Overflowed); 11747 11748 if (Overflowed) { 11749 S.DiagRuntimeBehavior(E->getExprLoc(), E, 11750 S.PDiag(diag::warn_impcast_fixed_point_range) 11751 << FXResult.toString() << T 11752 << E->getSourceRange() 11753 << clang::SourceRange(CC)); 11754 return; 11755 } 11756 } 11757 } 11758 } else if (Target->isUnsaturatedFixedPointType()) { 11759 if (Source->isIntegerType()) { 11760 Expr::EvalResult Result; 11761 if (!S.isConstantEvaluated() && 11762 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) { 11763 llvm::APSInt Value = Result.Val.getInt(); 11764 11765 bool Overflowed; 11766 APFixedPoint IntResult = APFixedPoint::getFromIntValue( 11767 Value, S.Context.getFixedPointSemantics(T), &Overflowed); 11768 11769 if (Overflowed) { 11770 S.DiagRuntimeBehavior(E->getExprLoc(), E, 11771 S.PDiag(diag::warn_impcast_fixed_point_range) 11772 << Value.toString(/*Radix=*/10) << T 11773 << E->getSourceRange() 11774 << clang::SourceRange(CC)); 11775 return; 11776 } 11777 } 11778 } 11779 } 11780 11781 // If we are casting an integer type to a floating point type without 11782 // initialization-list syntax, we might lose accuracy if the floating 11783 // point type has a narrower significand than the integer type. 11784 if (SourceBT && TargetBT && SourceBT->isIntegerType() && 11785 TargetBT->isFloatingType() && !IsListInit) { 11786 // Determine the number of precision bits in the source integer type. 11787 IntRange SourceRange = GetExprRange(S.Context, E, S.isConstantEvaluated()); 11788 unsigned int SourcePrecision = SourceRange.Width; 11789 11790 // Determine the number of precision bits in the 11791 // target floating point type. 11792 unsigned int TargetPrecision = llvm::APFloatBase::semanticsPrecision( 11793 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0))); 11794 11795 if (SourcePrecision > 0 && TargetPrecision > 0 && 11796 SourcePrecision > TargetPrecision) { 11797 11798 llvm::APSInt SourceInt; 11799 if (E->isIntegerConstantExpr(SourceInt, S.Context)) { 11800 // If the source integer is a constant, convert it to the target 11801 // floating point type. Issue a warning if the value changes 11802 // during the whole conversion. 11803 llvm::APFloat TargetFloatValue( 11804 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0))); 11805 llvm::APFloat::opStatus ConversionStatus = 11806 TargetFloatValue.convertFromAPInt( 11807 SourceInt, SourceBT->isSignedInteger(), 11808 llvm::APFloat::rmNearestTiesToEven); 11809 11810 if (ConversionStatus != llvm::APFloat::opOK) { 11811 std::string PrettySourceValue = SourceInt.toString(10); 11812 SmallString<32> PrettyTargetValue; 11813 TargetFloatValue.toString(PrettyTargetValue, TargetPrecision); 11814 11815 S.DiagRuntimeBehavior( 11816 E->getExprLoc(), E, 11817 S.PDiag(diag::warn_impcast_integer_float_precision_constant) 11818 << PrettySourceValue << PrettyTargetValue << E->getType() << T 11819 << E->getSourceRange() << clang::SourceRange(CC)); 11820 } 11821 } else { 11822 // Otherwise, the implicit conversion may lose precision. 11823 DiagnoseImpCast(S, E, T, CC, 11824 diag::warn_impcast_integer_float_precision); 11825 } 11826 } 11827 } 11828 11829 DiagnoseNullConversion(S, E, T, CC); 11830 11831 S.DiscardMisalignedMemberAddress(Target, E); 11832 11833 if (Target->isBooleanType()) 11834 DiagnoseIntInBoolContext(S, E); 11835 11836 if (!Source->isIntegerType() || !Target->isIntegerType()) 11837 return; 11838 11839 // TODO: remove this early return once the false positives for constant->bool 11840 // in templates, macros, etc, are reduced or removed. 11841 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) 11842 return; 11843 11844 if (isObjCSignedCharBool(S, T) && !Source->isCharType() && 11845 !E->isKnownToHaveBooleanValue(/*Semantic=*/false)) { 11846 return adornObjCBoolConversionDiagWithTernaryFixit( 11847 S, E, 11848 S.Diag(CC, diag::warn_impcast_int_to_objc_signed_char_bool) 11849 << E->getType()); 11850 } 11851 11852 IntRange SourceRange = GetExprRange(S.Context, E, S.isConstantEvaluated()); 11853 IntRange TargetRange = IntRange::forTargetOfCanonicalType(S.Context, Target); 11854 11855 if (SourceRange.Width > TargetRange.Width) { 11856 // If the source is a constant, use a default-on diagnostic. 11857 // TODO: this should happen for bitfield stores, too. 11858 Expr::EvalResult Result; 11859 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects, 11860 S.isConstantEvaluated())) { 11861 llvm::APSInt Value(32); 11862 Value = Result.Val.getInt(); 11863 11864 if (S.SourceMgr.isInSystemMacro(CC)) 11865 return; 11866 11867 std::string PrettySourceValue = Value.toString(10); 11868 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 11869 11870 S.DiagRuntimeBehavior( 11871 E->getExprLoc(), E, 11872 S.PDiag(diag::warn_impcast_integer_precision_constant) 11873 << PrettySourceValue << PrettyTargetValue << E->getType() << T 11874 << E->getSourceRange() << clang::SourceRange(CC)); 11875 return; 11876 } 11877 11878 // People want to build with -Wshorten-64-to-32 and not -Wconversion. 11879 if (S.SourceMgr.isInSystemMacro(CC)) 11880 return; 11881 11882 if (TargetRange.Width == 32 && S.Context.getIntWidth(E->getType()) == 64) 11883 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_64_32, 11884 /* pruneControlFlow */ true); 11885 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_precision); 11886 } 11887 11888 if (TargetRange.Width > SourceRange.Width) { 11889 if (auto *UO = dyn_cast<UnaryOperator>(E)) 11890 if (UO->getOpcode() == UO_Minus) 11891 if (Source->isUnsignedIntegerType()) { 11892 if (Target->isUnsignedIntegerType()) 11893 return DiagnoseImpCast(S, E, T, CC, 11894 diag::warn_impcast_high_order_zero_bits); 11895 if (Target->isSignedIntegerType()) 11896 return DiagnoseImpCast(S, E, T, CC, 11897 diag::warn_impcast_nonnegative_result); 11898 } 11899 } 11900 11901 if (TargetRange.Width == SourceRange.Width && !TargetRange.NonNegative && 11902 SourceRange.NonNegative && Source->isSignedIntegerType()) { 11903 // Warn when doing a signed to signed conversion, warn if the positive 11904 // source value is exactly the width of the target type, which will 11905 // cause a negative value to be stored. 11906 11907 Expr::EvalResult Result; 11908 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects) && 11909 !S.SourceMgr.isInSystemMacro(CC)) { 11910 llvm::APSInt Value = Result.Val.getInt(); 11911 if (isSameWidthConstantConversion(S, E, T, CC)) { 11912 std::string PrettySourceValue = Value.toString(10); 11913 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 11914 11915 S.DiagRuntimeBehavior( 11916 E->getExprLoc(), E, 11917 S.PDiag(diag::warn_impcast_integer_precision_constant) 11918 << PrettySourceValue << PrettyTargetValue << E->getType() << T 11919 << E->getSourceRange() << clang::SourceRange(CC)); 11920 return; 11921 } 11922 } 11923 11924 // Fall through for non-constants to give a sign conversion warning. 11925 } 11926 11927 if ((TargetRange.NonNegative && !SourceRange.NonNegative) || 11928 (!TargetRange.NonNegative && SourceRange.NonNegative && 11929 SourceRange.Width == TargetRange.Width)) { 11930 if (S.SourceMgr.isInSystemMacro(CC)) 11931 return; 11932 11933 unsigned DiagID = diag::warn_impcast_integer_sign; 11934 11935 // Traditionally, gcc has warned about this under -Wsign-compare. 11936 // We also want to warn about it in -Wconversion. 11937 // So if -Wconversion is off, use a completely identical diagnostic 11938 // in the sign-compare group. 11939 // The conditional-checking code will 11940 if (ICContext) { 11941 DiagID = diag::warn_impcast_integer_sign_conditional; 11942 *ICContext = true; 11943 } 11944 11945 return DiagnoseImpCast(S, E, T, CC, DiagID); 11946 } 11947 11948 // Diagnose conversions between different enumeration types. 11949 // In C, we pretend that the type of an EnumConstantDecl is its enumeration 11950 // type, to give us better diagnostics. 11951 QualType SourceType = E->getType(); 11952 if (!S.getLangOpts().CPlusPlus) { 11953 if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 11954 if (EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(DRE->getDecl())) { 11955 EnumDecl *Enum = cast<EnumDecl>(ECD->getDeclContext()); 11956 SourceType = S.Context.getTypeDeclType(Enum); 11957 Source = S.Context.getCanonicalType(SourceType).getTypePtr(); 11958 } 11959 } 11960 11961 if (const EnumType *SourceEnum = Source->getAs<EnumType>()) 11962 if (const EnumType *TargetEnum = Target->getAs<EnumType>()) 11963 if (SourceEnum->getDecl()->hasNameForLinkage() && 11964 TargetEnum->getDecl()->hasNameForLinkage() && 11965 SourceEnum != TargetEnum) { 11966 if (S.SourceMgr.isInSystemMacro(CC)) 11967 return; 11968 11969 return DiagnoseImpCast(S, E, SourceType, T, CC, 11970 diag::warn_impcast_different_enum_types); 11971 } 11972 } 11973 11974 static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E, 11975 SourceLocation CC, QualType T); 11976 11977 static void CheckConditionalOperand(Sema &S, Expr *E, QualType T, 11978 SourceLocation CC, bool &ICContext) { 11979 E = E->IgnoreParenImpCasts(); 11980 11981 if (auto *CO = dyn_cast<AbstractConditionalOperator>(E)) 11982 return CheckConditionalOperator(S, CO, CC, T); 11983 11984 AnalyzeImplicitConversions(S, E, CC); 11985 if (E->getType() != T) 11986 return CheckImplicitConversion(S, E, T, CC, &ICContext); 11987 } 11988 11989 static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E, 11990 SourceLocation CC, QualType T) { 11991 AnalyzeImplicitConversions(S, E->getCond(), E->getQuestionLoc()); 11992 11993 Expr *TrueExpr = E->getTrueExpr(); 11994 if (auto *BCO = dyn_cast<BinaryConditionalOperator>(E)) 11995 TrueExpr = BCO->getCommon(); 11996 11997 bool Suspicious = false; 11998 CheckConditionalOperand(S, TrueExpr, T, CC, Suspicious); 11999 CheckConditionalOperand(S, E->getFalseExpr(), T, CC, Suspicious); 12000 12001 if (T->isBooleanType()) 12002 DiagnoseIntInBoolContext(S, E); 12003 12004 // If -Wconversion would have warned about either of the candidates 12005 // for a signedness conversion to the context type... 12006 if (!Suspicious) return; 12007 12008 // ...but it's currently ignored... 12009 if (!S.Diags.isIgnored(diag::warn_impcast_integer_sign_conditional, CC)) 12010 return; 12011 12012 // ...then check whether it would have warned about either of the 12013 // candidates for a signedness conversion to the condition type. 12014 if (E->getType() == T) return; 12015 12016 Suspicious = false; 12017 CheckImplicitConversion(S, TrueExpr->IgnoreParenImpCasts(), 12018 E->getType(), CC, &Suspicious); 12019 if (!Suspicious) 12020 CheckImplicitConversion(S, E->getFalseExpr()->IgnoreParenImpCasts(), 12021 E->getType(), CC, &Suspicious); 12022 } 12023 12024 /// Check conversion of given expression to boolean. 12025 /// Input argument E is a logical expression. 12026 static void CheckBoolLikeConversion(Sema &S, Expr *E, SourceLocation CC) { 12027 if (S.getLangOpts().Bool) 12028 return; 12029 if (E->IgnoreParenImpCasts()->getType()->isAtomicType()) 12030 return; 12031 CheckImplicitConversion(S, E->IgnoreParenImpCasts(), S.Context.BoolTy, CC); 12032 } 12033 12034 namespace { 12035 struct AnalyzeImplicitConversionsWorkItem { 12036 Expr *E; 12037 SourceLocation CC; 12038 bool IsListInit; 12039 }; 12040 } 12041 12042 /// Data recursive variant of AnalyzeImplicitConversions. Subexpressions 12043 /// that should be visited are added to WorkList. 12044 static void AnalyzeImplicitConversions( 12045 Sema &S, AnalyzeImplicitConversionsWorkItem Item, 12046 llvm::SmallVectorImpl<AnalyzeImplicitConversionsWorkItem> &WorkList) { 12047 Expr *OrigE = Item.E; 12048 SourceLocation CC = Item.CC; 12049 12050 QualType T = OrigE->getType(); 12051 Expr *E = OrigE->IgnoreParenImpCasts(); 12052 12053 // Propagate whether we are in a C++ list initialization expression. 12054 // If so, we do not issue warnings for implicit int-float conversion 12055 // precision loss, because C++11 narrowing already handles it. 12056 bool IsListInit = Item.IsListInit || 12057 (isa<InitListExpr>(OrigE) && S.getLangOpts().CPlusPlus); 12058 12059 if (E->isTypeDependent() || E->isValueDependent()) 12060 return; 12061 12062 Expr *SourceExpr = E; 12063 // Examine, but don't traverse into the source expression of an 12064 // OpaqueValueExpr, since it may have multiple parents and we don't want to 12065 // emit duplicate diagnostics. Its fine to examine the form or attempt to 12066 // evaluate it in the context of checking the specific conversion to T though. 12067 if (auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 12068 if (auto *Src = OVE->getSourceExpr()) 12069 SourceExpr = Src; 12070 12071 if (const auto *UO = dyn_cast<UnaryOperator>(SourceExpr)) 12072 if (UO->getOpcode() == UO_Not && 12073 UO->getSubExpr()->isKnownToHaveBooleanValue()) 12074 S.Diag(UO->getBeginLoc(), diag::warn_bitwise_negation_bool) 12075 << OrigE->getSourceRange() << T->isBooleanType() 12076 << FixItHint::CreateReplacement(UO->getBeginLoc(), "!"); 12077 12078 // For conditional operators, we analyze the arguments as if they 12079 // were being fed directly into the output. 12080 if (auto *CO = dyn_cast<AbstractConditionalOperator>(SourceExpr)) { 12081 CheckConditionalOperator(S, CO, CC, T); 12082 return; 12083 } 12084 12085 // Check implicit argument conversions for function calls. 12086 if (CallExpr *Call = dyn_cast<CallExpr>(SourceExpr)) 12087 CheckImplicitArgumentConversions(S, Call, CC); 12088 12089 // Go ahead and check any implicit conversions we might have skipped. 12090 // The non-canonical typecheck is just an optimization; 12091 // CheckImplicitConversion will filter out dead implicit conversions. 12092 if (SourceExpr->getType() != T) 12093 CheckImplicitConversion(S, SourceExpr, T, CC, nullptr, IsListInit); 12094 12095 // Now continue drilling into this expression. 12096 12097 if (PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E)) { 12098 // The bound subexpressions in a PseudoObjectExpr are not reachable 12099 // as transitive children. 12100 // FIXME: Use a more uniform representation for this. 12101 for (auto *SE : POE->semantics()) 12102 if (auto *OVE = dyn_cast<OpaqueValueExpr>(SE)) 12103 WorkList.push_back({OVE->getSourceExpr(), CC, IsListInit}); 12104 } 12105 12106 // Skip past explicit casts. 12107 if (auto *CE = dyn_cast<ExplicitCastExpr>(E)) { 12108 E = CE->getSubExpr()->IgnoreParenImpCasts(); 12109 if (!CE->getType()->isVoidType() && E->getType()->isAtomicType()) 12110 S.Diag(E->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 12111 WorkList.push_back({E, CC, IsListInit}); 12112 return; 12113 } 12114 12115 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 12116 // Do a somewhat different check with comparison operators. 12117 if (BO->isComparisonOp()) 12118 return AnalyzeComparison(S, BO); 12119 12120 // And with simple assignments. 12121 if (BO->getOpcode() == BO_Assign) 12122 return AnalyzeAssignment(S, BO); 12123 // And with compound assignments. 12124 if (BO->isAssignmentOp()) 12125 return AnalyzeCompoundAssignment(S, BO); 12126 } 12127 12128 // These break the otherwise-useful invariant below. Fortunately, 12129 // we don't really need to recurse into them, because any internal 12130 // expressions should have been analyzed already when they were 12131 // built into statements. 12132 if (isa<StmtExpr>(E)) return; 12133 12134 // Don't descend into unevaluated contexts. 12135 if (isa<UnaryExprOrTypeTraitExpr>(E)) return; 12136 12137 // Now just recurse over the expression's children. 12138 CC = E->getExprLoc(); 12139 BinaryOperator *BO = dyn_cast<BinaryOperator>(E); 12140 bool IsLogicalAndOperator = BO && BO->getOpcode() == BO_LAnd; 12141 for (Stmt *SubStmt : E->children()) { 12142 Expr *ChildExpr = dyn_cast_or_null<Expr>(SubStmt); 12143 if (!ChildExpr) 12144 continue; 12145 12146 if (IsLogicalAndOperator && 12147 isa<StringLiteral>(ChildExpr->IgnoreParenImpCasts())) 12148 // Ignore checking string literals that are in logical and operators. 12149 // This is a common pattern for asserts. 12150 continue; 12151 WorkList.push_back({ChildExpr, CC, IsListInit}); 12152 } 12153 12154 if (BO && BO->isLogicalOp()) { 12155 Expr *SubExpr = BO->getLHS()->IgnoreParenImpCasts(); 12156 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 12157 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 12158 12159 SubExpr = BO->getRHS()->IgnoreParenImpCasts(); 12160 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 12161 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 12162 } 12163 12164 if (const UnaryOperator *U = dyn_cast<UnaryOperator>(E)) { 12165 if (U->getOpcode() == UO_LNot) { 12166 ::CheckBoolLikeConversion(S, U->getSubExpr(), CC); 12167 } else if (U->getOpcode() != UO_AddrOf) { 12168 if (U->getSubExpr()->getType()->isAtomicType()) 12169 S.Diag(U->getSubExpr()->getBeginLoc(), 12170 diag::warn_atomic_implicit_seq_cst); 12171 } 12172 } 12173 } 12174 12175 /// AnalyzeImplicitConversions - Find and report any interesting 12176 /// implicit conversions in the given expression. There are a couple 12177 /// of competing diagnostics here, -Wconversion and -Wsign-compare. 12178 static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC, 12179 bool IsListInit/*= false*/) { 12180 llvm::SmallVector<AnalyzeImplicitConversionsWorkItem, 16> WorkList; 12181 WorkList.push_back({OrigE, CC, IsListInit}); 12182 while (!WorkList.empty()) 12183 AnalyzeImplicitConversions(S, WorkList.pop_back_val(), WorkList); 12184 } 12185 12186 /// Diagnose integer type and any valid implicit conversion to it. 12187 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, const QualType &IntT) { 12188 // Taking into account implicit conversions, 12189 // allow any integer. 12190 if (!E->getType()->isIntegerType()) { 12191 S.Diag(E->getBeginLoc(), 12192 diag::err_opencl_enqueue_kernel_invalid_local_size_type); 12193 return true; 12194 } 12195 // Potentially emit standard warnings for implicit conversions if enabled 12196 // using -Wconversion. 12197 CheckImplicitConversion(S, E, IntT, E->getBeginLoc()); 12198 return false; 12199 } 12200 12201 // Helper function for Sema::DiagnoseAlwaysNonNullPointer. 12202 // Returns true when emitting a warning about taking the address of a reference. 12203 static bool CheckForReference(Sema &SemaRef, const Expr *E, 12204 const PartialDiagnostic &PD) { 12205 E = E->IgnoreParenImpCasts(); 12206 12207 const FunctionDecl *FD = nullptr; 12208 12209 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) { 12210 if (!DRE->getDecl()->getType()->isReferenceType()) 12211 return false; 12212 } else if (const MemberExpr *M = dyn_cast<MemberExpr>(E)) { 12213 if (!M->getMemberDecl()->getType()->isReferenceType()) 12214 return false; 12215 } else if (const CallExpr *Call = dyn_cast<CallExpr>(E)) { 12216 if (!Call->getCallReturnType(SemaRef.Context)->isReferenceType()) 12217 return false; 12218 FD = Call->getDirectCallee(); 12219 } else { 12220 return false; 12221 } 12222 12223 SemaRef.Diag(E->getExprLoc(), PD); 12224 12225 // If possible, point to location of function. 12226 if (FD) { 12227 SemaRef.Diag(FD->getLocation(), diag::note_reference_is_return_value) << FD; 12228 } 12229 12230 return true; 12231 } 12232 12233 // Returns true if the SourceLocation is expanded from any macro body. 12234 // Returns false if the SourceLocation is invalid, is from not in a macro 12235 // expansion, or is from expanded from a top-level macro argument. 12236 static bool IsInAnyMacroBody(const SourceManager &SM, SourceLocation Loc) { 12237 if (Loc.isInvalid()) 12238 return false; 12239 12240 while (Loc.isMacroID()) { 12241 if (SM.isMacroBodyExpansion(Loc)) 12242 return true; 12243 Loc = SM.getImmediateMacroCallerLoc(Loc); 12244 } 12245 12246 return false; 12247 } 12248 12249 /// Diagnose pointers that are always non-null. 12250 /// \param E the expression containing the pointer 12251 /// \param NullKind NPCK_NotNull if E is a cast to bool, otherwise, E is 12252 /// compared to a null pointer 12253 /// \param IsEqual True when the comparison is equal to a null pointer 12254 /// \param Range Extra SourceRange to highlight in the diagnostic 12255 void Sema::DiagnoseAlwaysNonNullPointer(Expr *E, 12256 Expr::NullPointerConstantKind NullKind, 12257 bool IsEqual, SourceRange Range) { 12258 if (!E) 12259 return; 12260 12261 // Don't warn inside macros. 12262 if (E->getExprLoc().isMacroID()) { 12263 const SourceManager &SM = getSourceManager(); 12264 if (IsInAnyMacroBody(SM, E->getExprLoc()) || 12265 IsInAnyMacroBody(SM, Range.getBegin())) 12266 return; 12267 } 12268 E = E->IgnoreImpCasts(); 12269 12270 const bool IsCompare = NullKind != Expr::NPCK_NotNull; 12271 12272 if (isa<CXXThisExpr>(E)) { 12273 unsigned DiagID = IsCompare ? diag::warn_this_null_compare 12274 : diag::warn_this_bool_conversion; 12275 Diag(E->getExprLoc(), DiagID) << E->getSourceRange() << Range << IsEqual; 12276 return; 12277 } 12278 12279 bool IsAddressOf = false; 12280 12281 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 12282 if (UO->getOpcode() != UO_AddrOf) 12283 return; 12284 IsAddressOf = true; 12285 E = UO->getSubExpr(); 12286 } 12287 12288 if (IsAddressOf) { 12289 unsigned DiagID = IsCompare 12290 ? diag::warn_address_of_reference_null_compare 12291 : diag::warn_address_of_reference_bool_conversion; 12292 PartialDiagnostic PD = PDiag(DiagID) << E->getSourceRange() << Range 12293 << IsEqual; 12294 if (CheckForReference(*this, E, PD)) { 12295 return; 12296 } 12297 } 12298 12299 auto ComplainAboutNonnullParamOrCall = [&](const Attr *NonnullAttr) { 12300 bool IsParam = isa<NonNullAttr>(NonnullAttr); 12301 std::string Str; 12302 llvm::raw_string_ostream S(Str); 12303 E->printPretty(S, nullptr, getPrintingPolicy()); 12304 unsigned DiagID = IsCompare ? diag::warn_nonnull_expr_compare 12305 : diag::warn_cast_nonnull_to_bool; 12306 Diag(E->getExprLoc(), DiagID) << IsParam << S.str() 12307 << E->getSourceRange() << Range << IsEqual; 12308 Diag(NonnullAttr->getLocation(), diag::note_declared_nonnull) << IsParam; 12309 }; 12310 12311 // If we have a CallExpr that is tagged with returns_nonnull, we can complain. 12312 if (auto *Call = dyn_cast<CallExpr>(E->IgnoreParenImpCasts())) { 12313 if (auto *Callee = Call->getDirectCallee()) { 12314 if (const Attr *A = Callee->getAttr<ReturnsNonNullAttr>()) { 12315 ComplainAboutNonnullParamOrCall(A); 12316 return; 12317 } 12318 } 12319 } 12320 12321 // Expect to find a single Decl. Skip anything more complicated. 12322 ValueDecl *D = nullptr; 12323 if (DeclRefExpr *R = dyn_cast<DeclRefExpr>(E)) { 12324 D = R->getDecl(); 12325 } else if (MemberExpr *M = dyn_cast<MemberExpr>(E)) { 12326 D = M->getMemberDecl(); 12327 } 12328 12329 // Weak Decls can be null. 12330 if (!D || D->isWeak()) 12331 return; 12332 12333 // Check for parameter decl with nonnull attribute 12334 if (const auto* PV = dyn_cast<ParmVarDecl>(D)) { 12335 if (getCurFunction() && 12336 !getCurFunction()->ModifiedNonNullParams.count(PV)) { 12337 if (const Attr *A = PV->getAttr<NonNullAttr>()) { 12338 ComplainAboutNonnullParamOrCall(A); 12339 return; 12340 } 12341 12342 if (const auto *FD = dyn_cast<FunctionDecl>(PV->getDeclContext())) { 12343 // Skip function template not specialized yet. 12344 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate) 12345 return; 12346 auto ParamIter = llvm::find(FD->parameters(), PV); 12347 assert(ParamIter != FD->param_end()); 12348 unsigned ParamNo = std::distance(FD->param_begin(), ParamIter); 12349 12350 for (const auto *NonNull : FD->specific_attrs<NonNullAttr>()) { 12351 if (!NonNull->args_size()) { 12352 ComplainAboutNonnullParamOrCall(NonNull); 12353 return; 12354 } 12355 12356 for (const ParamIdx &ArgNo : NonNull->args()) { 12357 if (ArgNo.getASTIndex() == ParamNo) { 12358 ComplainAboutNonnullParamOrCall(NonNull); 12359 return; 12360 } 12361 } 12362 } 12363 } 12364 } 12365 } 12366 12367 QualType T = D->getType(); 12368 const bool IsArray = T->isArrayType(); 12369 const bool IsFunction = T->isFunctionType(); 12370 12371 // Address of function is used to silence the function warning. 12372 if (IsAddressOf && IsFunction) { 12373 return; 12374 } 12375 12376 // Found nothing. 12377 if (!IsAddressOf && !IsFunction && !IsArray) 12378 return; 12379 12380 // Pretty print the expression for the diagnostic. 12381 std::string Str; 12382 llvm::raw_string_ostream S(Str); 12383 E->printPretty(S, nullptr, getPrintingPolicy()); 12384 12385 unsigned DiagID = IsCompare ? diag::warn_null_pointer_compare 12386 : diag::warn_impcast_pointer_to_bool; 12387 enum { 12388 AddressOf, 12389 FunctionPointer, 12390 ArrayPointer 12391 } DiagType; 12392 if (IsAddressOf) 12393 DiagType = AddressOf; 12394 else if (IsFunction) 12395 DiagType = FunctionPointer; 12396 else if (IsArray) 12397 DiagType = ArrayPointer; 12398 else 12399 llvm_unreachable("Could not determine diagnostic."); 12400 Diag(E->getExprLoc(), DiagID) << DiagType << S.str() << E->getSourceRange() 12401 << Range << IsEqual; 12402 12403 if (!IsFunction) 12404 return; 12405 12406 // Suggest '&' to silence the function warning. 12407 Diag(E->getExprLoc(), diag::note_function_warning_silence) 12408 << FixItHint::CreateInsertion(E->getBeginLoc(), "&"); 12409 12410 // Check to see if '()' fixit should be emitted. 12411 QualType ReturnType; 12412 UnresolvedSet<4> NonTemplateOverloads; 12413 tryExprAsCall(*E, ReturnType, NonTemplateOverloads); 12414 if (ReturnType.isNull()) 12415 return; 12416 12417 if (IsCompare) { 12418 // There are two cases here. If there is null constant, the only suggest 12419 // for a pointer return type. If the null is 0, then suggest if the return 12420 // type is a pointer or an integer type. 12421 if (!ReturnType->isPointerType()) { 12422 if (NullKind == Expr::NPCK_ZeroExpression || 12423 NullKind == Expr::NPCK_ZeroLiteral) { 12424 if (!ReturnType->isIntegerType()) 12425 return; 12426 } else { 12427 return; 12428 } 12429 } 12430 } else { // !IsCompare 12431 // For function to bool, only suggest if the function pointer has bool 12432 // return type. 12433 if (!ReturnType->isSpecificBuiltinType(BuiltinType::Bool)) 12434 return; 12435 } 12436 Diag(E->getExprLoc(), diag::note_function_to_function_call) 12437 << FixItHint::CreateInsertion(getLocForEndOfToken(E->getEndLoc()), "()"); 12438 } 12439 12440 /// Diagnoses "dangerous" implicit conversions within the given 12441 /// expression (which is a full expression). Implements -Wconversion 12442 /// and -Wsign-compare. 12443 /// 12444 /// \param CC the "context" location of the implicit conversion, i.e. 12445 /// the most location of the syntactic entity requiring the implicit 12446 /// conversion 12447 void Sema::CheckImplicitConversions(Expr *E, SourceLocation CC) { 12448 // Don't diagnose in unevaluated contexts. 12449 if (isUnevaluatedContext()) 12450 return; 12451 12452 // Don't diagnose for value- or type-dependent expressions. 12453 if (E->isTypeDependent() || E->isValueDependent()) 12454 return; 12455 12456 // Check for array bounds violations in cases where the check isn't triggered 12457 // elsewhere for other Expr types (like BinaryOperators), e.g. when an 12458 // ArraySubscriptExpr is on the RHS of a variable initialization. 12459 CheckArrayAccess(E); 12460 12461 // This is not the right CC for (e.g.) a variable initialization. 12462 AnalyzeImplicitConversions(*this, E, CC); 12463 } 12464 12465 /// CheckBoolLikeConversion - Check conversion of given expression to boolean. 12466 /// Input argument E is a logical expression. 12467 void Sema::CheckBoolLikeConversion(Expr *E, SourceLocation CC) { 12468 ::CheckBoolLikeConversion(*this, E, CC); 12469 } 12470 12471 /// Diagnose when expression is an integer constant expression and its evaluation 12472 /// results in integer overflow 12473 void Sema::CheckForIntOverflow (Expr *E) { 12474 // Use a work list to deal with nested struct initializers. 12475 SmallVector<Expr *, 2> Exprs(1, E); 12476 12477 do { 12478 Expr *OriginalE = Exprs.pop_back_val(); 12479 Expr *E = OriginalE->IgnoreParenCasts(); 12480 12481 if (isa<BinaryOperator>(E)) { 12482 E->EvaluateForOverflow(Context); 12483 continue; 12484 } 12485 12486 if (auto InitList = dyn_cast<InitListExpr>(OriginalE)) 12487 Exprs.append(InitList->inits().begin(), InitList->inits().end()); 12488 else if (isa<ObjCBoxedExpr>(OriginalE)) 12489 E->EvaluateForOverflow(Context); 12490 else if (auto Call = dyn_cast<CallExpr>(E)) 12491 Exprs.append(Call->arg_begin(), Call->arg_end()); 12492 else if (auto Message = dyn_cast<ObjCMessageExpr>(E)) 12493 Exprs.append(Message->arg_begin(), Message->arg_end()); 12494 } while (!Exprs.empty()); 12495 } 12496 12497 namespace { 12498 12499 /// Visitor for expressions which looks for unsequenced operations on the 12500 /// same object. 12501 class SequenceChecker : public ConstEvaluatedExprVisitor<SequenceChecker> { 12502 using Base = ConstEvaluatedExprVisitor<SequenceChecker>; 12503 12504 /// A tree of sequenced regions within an expression. Two regions are 12505 /// unsequenced if one is an ancestor or a descendent of the other. When we 12506 /// finish processing an expression with sequencing, such as a comma 12507 /// expression, we fold its tree nodes into its parent, since they are 12508 /// unsequenced with respect to nodes we will visit later. 12509 class SequenceTree { 12510 struct Value { 12511 explicit Value(unsigned Parent) : Parent(Parent), Merged(false) {} 12512 unsigned Parent : 31; 12513 unsigned Merged : 1; 12514 }; 12515 SmallVector<Value, 8> Values; 12516 12517 public: 12518 /// A region within an expression which may be sequenced with respect 12519 /// to some other region. 12520 class Seq { 12521 friend class SequenceTree; 12522 12523 unsigned Index; 12524 12525 explicit Seq(unsigned N) : Index(N) {} 12526 12527 public: 12528 Seq() : Index(0) {} 12529 }; 12530 12531 SequenceTree() { Values.push_back(Value(0)); } 12532 Seq root() const { return Seq(0); } 12533 12534 /// Create a new sequence of operations, which is an unsequenced 12535 /// subset of \p Parent. This sequence of operations is sequenced with 12536 /// respect to other children of \p Parent. 12537 Seq allocate(Seq Parent) { 12538 Values.push_back(Value(Parent.Index)); 12539 return Seq(Values.size() - 1); 12540 } 12541 12542 /// Merge a sequence of operations into its parent. 12543 void merge(Seq S) { 12544 Values[S.Index].Merged = true; 12545 } 12546 12547 /// Determine whether two operations are unsequenced. This operation 12548 /// is asymmetric: \p Cur should be the more recent sequence, and \p Old 12549 /// should have been merged into its parent as appropriate. 12550 bool isUnsequenced(Seq Cur, Seq Old) { 12551 unsigned C = representative(Cur.Index); 12552 unsigned Target = representative(Old.Index); 12553 while (C >= Target) { 12554 if (C == Target) 12555 return true; 12556 C = Values[C].Parent; 12557 } 12558 return false; 12559 } 12560 12561 private: 12562 /// Pick a representative for a sequence. 12563 unsigned representative(unsigned K) { 12564 if (Values[K].Merged) 12565 // Perform path compression as we go. 12566 return Values[K].Parent = representative(Values[K].Parent); 12567 return K; 12568 } 12569 }; 12570 12571 /// An object for which we can track unsequenced uses. 12572 using Object = const NamedDecl *; 12573 12574 /// Different flavors of object usage which we track. We only track the 12575 /// least-sequenced usage of each kind. 12576 enum UsageKind { 12577 /// A read of an object. Multiple unsequenced reads are OK. 12578 UK_Use, 12579 12580 /// A modification of an object which is sequenced before the value 12581 /// computation of the expression, such as ++n in C++. 12582 UK_ModAsValue, 12583 12584 /// A modification of an object which is not sequenced before the value 12585 /// computation of the expression, such as n++. 12586 UK_ModAsSideEffect, 12587 12588 UK_Count = UK_ModAsSideEffect + 1 12589 }; 12590 12591 /// Bundle together a sequencing region and the expression corresponding 12592 /// to a specific usage. One Usage is stored for each usage kind in UsageInfo. 12593 struct Usage { 12594 const Expr *UsageExpr; 12595 SequenceTree::Seq Seq; 12596 12597 Usage() : UsageExpr(nullptr), Seq() {} 12598 }; 12599 12600 struct UsageInfo { 12601 Usage Uses[UK_Count]; 12602 12603 /// Have we issued a diagnostic for this object already? 12604 bool Diagnosed; 12605 12606 UsageInfo() : Uses(), Diagnosed(false) {} 12607 }; 12608 using UsageInfoMap = llvm::SmallDenseMap<Object, UsageInfo, 16>; 12609 12610 Sema &SemaRef; 12611 12612 /// Sequenced regions within the expression. 12613 SequenceTree Tree; 12614 12615 /// Declaration modifications and references which we have seen. 12616 UsageInfoMap UsageMap; 12617 12618 /// The region we are currently within. 12619 SequenceTree::Seq Region; 12620 12621 /// Filled in with declarations which were modified as a side-effect 12622 /// (that is, post-increment operations). 12623 SmallVectorImpl<std::pair<Object, Usage>> *ModAsSideEffect = nullptr; 12624 12625 /// Expressions to check later. We defer checking these to reduce 12626 /// stack usage. 12627 SmallVectorImpl<const Expr *> &WorkList; 12628 12629 /// RAII object wrapping the visitation of a sequenced subexpression of an 12630 /// expression. At the end of this process, the side-effects of the evaluation 12631 /// become sequenced with respect to the value computation of the result, so 12632 /// we downgrade any UK_ModAsSideEffect within the evaluation to 12633 /// UK_ModAsValue. 12634 struct SequencedSubexpression { 12635 SequencedSubexpression(SequenceChecker &Self) 12636 : Self(Self), OldModAsSideEffect(Self.ModAsSideEffect) { 12637 Self.ModAsSideEffect = &ModAsSideEffect; 12638 } 12639 12640 ~SequencedSubexpression() { 12641 for (const std::pair<Object, Usage> &M : llvm::reverse(ModAsSideEffect)) { 12642 // Add a new usage with usage kind UK_ModAsValue, and then restore 12643 // the previous usage with UK_ModAsSideEffect (thus clearing it if 12644 // the previous one was empty). 12645 UsageInfo &UI = Self.UsageMap[M.first]; 12646 auto &SideEffectUsage = UI.Uses[UK_ModAsSideEffect]; 12647 Self.addUsage(M.first, UI, SideEffectUsage.UsageExpr, UK_ModAsValue); 12648 SideEffectUsage = M.second; 12649 } 12650 Self.ModAsSideEffect = OldModAsSideEffect; 12651 } 12652 12653 SequenceChecker &Self; 12654 SmallVector<std::pair<Object, Usage>, 4> ModAsSideEffect; 12655 SmallVectorImpl<std::pair<Object, Usage>> *OldModAsSideEffect; 12656 }; 12657 12658 /// RAII object wrapping the visitation of a subexpression which we might 12659 /// choose to evaluate as a constant. If any subexpression is evaluated and 12660 /// found to be non-constant, this allows us to suppress the evaluation of 12661 /// the outer expression. 12662 class EvaluationTracker { 12663 public: 12664 EvaluationTracker(SequenceChecker &Self) 12665 : Self(Self), Prev(Self.EvalTracker) { 12666 Self.EvalTracker = this; 12667 } 12668 12669 ~EvaluationTracker() { 12670 Self.EvalTracker = Prev; 12671 if (Prev) 12672 Prev->EvalOK &= EvalOK; 12673 } 12674 12675 bool evaluate(const Expr *E, bool &Result) { 12676 if (!EvalOK || E->isValueDependent()) 12677 return false; 12678 EvalOK = E->EvaluateAsBooleanCondition( 12679 Result, Self.SemaRef.Context, Self.SemaRef.isConstantEvaluated()); 12680 return EvalOK; 12681 } 12682 12683 private: 12684 SequenceChecker &Self; 12685 EvaluationTracker *Prev; 12686 bool EvalOK = true; 12687 } *EvalTracker = nullptr; 12688 12689 /// Find the object which is produced by the specified expression, 12690 /// if any. 12691 Object getObject(const Expr *E, bool Mod) const { 12692 E = E->IgnoreParenCasts(); 12693 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 12694 if (Mod && (UO->getOpcode() == UO_PreInc || UO->getOpcode() == UO_PreDec)) 12695 return getObject(UO->getSubExpr(), Mod); 12696 } else if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 12697 if (BO->getOpcode() == BO_Comma) 12698 return getObject(BO->getRHS(), Mod); 12699 if (Mod && BO->isAssignmentOp()) 12700 return getObject(BO->getLHS(), Mod); 12701 } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) { 12702 // FIXME: Check for more interesting cases, like "x.n = ++x.n". 12703 if (isa<CXXThisExpr>(ME->getBase()->IgnoreParenCasts())) 12704 return ME->getMemberDecl(); 12705 } else if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 12706 // FIXME: If this is a reference, map through to its value. 12707 return DRE->getDecl(); 12708 return nullptr; 12709 } 12710 12711 /// Note that an object \p O was modified or used by an expression 12712 /// \p UsageExpr with usage kind \p UK. \p UI is the \p UsageInfo for 12713 /// the object \p O as obtained via the \p UsageMap. 12714 void addUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, UsageKind UK) { 12715 // Get the old usage for the given object and usage kind. 12716 Usage &U = UI.Uses[UK]; 12717 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) { 12718 // If we have a modification as side effect and are in a sequenced 12719 // subexpression, save the old Usage so that we can restore it later 12720 // in SequencedSubexpression::~SequencedSubexpression. 12721 if (UK == UK_ModAsSideEffect && ModAsSideEffect) 12722 ModAsSideEffect->push_back(std::make_pair(O, U)); 12723 // Then record the new usage with the current sequencing region. 12724 U.UsageExpr = UsageExpr; 12725 U.Seq = Region; 12726 } 12727 } 12728 12729 /// Check whether a modification or use of an object \p O in an expression 12730 /// \p UsageExpr conflicts with a prior usage of kind \p OtherKind. \p UI is 12731 /// the \p UsageInfo for the object \p O as obtained via the \p UsageMap. 12732 /// \p IsModMod is true when we are checking for a mod-mod unsequenced 12733 /// usage and false we are checking for a mod-use unsequenced usage. 12734 void checkUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, 12735 UsageKind OtherKind, bool IsModMod) { 12736 if (UI.Diagnosed) 12737 return; 12738 12739 const Usage &U = UI.Uses[OtherKind]; 12740 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) 12741 return; 12742 12743 const Expr *Mod = U.UsageExpr; 12744 const Expr *ModOrUse = UsageExpr; 12745 if (OtherKind == UK_Use) 12746 std::swap(Mod, ModOrUse); 12747 12748 SemaRef.DiagRuntimeBehavior( 12749 Mod->getExprLoc(), {Mod, ModOrUse}, 12750 SemaRef.PDiag(IsModMod ? diag::warn_unsequenced_mod_mod 12751 : diag::warn_unsequenced_mod_use) 12752 << O << SourceRange(ModOrUse->getExprLoc())); 12753 UI.Diagnosed = true; 12754 } 12755 12756 // A note on note{Pre, Post}{Use, Mod}: 12757 // 12758 // (It helps to follow the algorithm with an expression such as 12759 // "((++k)++, k) = k" or "k = (k++, k++)". Both contain unsequenced 12760 // operations before C++17 and both are well-defined in C++17). 12761 // 12762 // When visiting a node which uses/modify an object we first call notePreUse 12763 // or notePreMod before visiting its sub-expression(s). At this point the 12764 // children of the current node have not yet been visited and so the eventual 12765 // uses/modifications resulting from the children of the current node have not 12766 // been recorded yet. 12767 // 12768 // We then visit the children of the current node. After that notePostUse or 12769 // notePostMod is called. These will 1) detect an unsequenced modification 12770 // as side effect (as in "k++ + k") and 2) add a new usage with the 12771 // appropriate usage kind. 12772 // 12773 // We also have to be careful that some operation sequences modification as 12774 // side effect as well (for example: || or ,). To account for this we wrap 12775 // the visitation of such a sub-expression (for example: the LHS of || or ,) 12776 // with SequencedSubexpression. SequencedSubexpression is an RAII object 12777 // which record usages which are modifications as side effect, and then 12778 // downgrade them (or more accurately restore the previous usage which was a 12779 // modification as side effect) when exiting the scope of the sequenced 12780 // subexpression. 12781 12782 void notePreUse(Object O, const Expr *UseExpr) { 12783 UsageInfo &UI = UsageMap[O]; 12784 // Uses conflict with other modifications. 12785 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/false); 12786 } 12787 12788 void notePostUse(Object O, const Expr *UseExpr) { 12789 UsageInfo &UI = UsageMap[O]; 12790 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsSideEffect, 12791 /*IsModMod=*/false); 12792 addUsage(O, UI, UseExpr, /*UsageKind=*/UK_Use); 12793 } 12794 12795 void notePreMod(Object O, const Expr *ModExpr) { 12796 UsageInfo &UI = UsageMap[O]; 12797 // Modifications conflict with other modifications and with uses. 12798 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/true); 12799 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_Use, /*IsModMod=*/false); 12800 } 12801 12802 void notePostMod(Object O, const Expr *ModExpr, UsageKind UK) { 12803 UsageInfo &UI = UsageMap[O]; 12804 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsSideEffect, 12805 /*IsModMod=*/true); 12806 addUsage(O, UI, ModExpr, /*UsageKind=*/UK); 12807 } 12808 12809 public: 12810 SequenceChecker(Sema &S, const Expr *E, 12811 SmallVectorImpl<const Expr *> &WorkList) 12812 : Base(S.Context), SemaRef(S), Region(Tree.root()), WorkList(WorkList) { 12813 Visit(E); 12814 // Silence a -Wunused-private-field since WorkList is now unused. 12815 // TODO: Evaluate if it can be used, and if not remove it. 12816 (void)this->WorkList; 12817 } 12818 12819 void VisitStmt(const Stmt *S) { 12820 // Skip all statements which aren't expressions for now. 12821 } 12822 12823 void VisitExpr(const Expr *E) { 12824 // By default, just recurse to evaluated subexpressions. 12825 Base::VisitStmt(E); 12826 } 12827 12828 void VisitCastExpr(const CastExpr *E) { 12829 Object O = Object(); 12830 if (E->getCastKind() == CK_LValueToRValue) 12831 O = getObject(E->getSubExpr(), false); 12832 12833 if (O) 12834 notePreUse(O, E); 12835 VisitExpr(E); 12836 if (O) 12837 notePostUse(O, E); 12838 } 12839 12840 void VisitSequencedExpressions(const Expr *SequencedBefore, 12841 const Expr *SequencedAfter) { 12842 SequenceTree::Seq BeforeRegion = Tree.allocate(Region); 12843 SequenceTree::Seq AfterRegion = Tree.allocate(Region); 12844 SequenceTree::Seq OldRegion = Region; 12845 12846 { 12847 SequencedSubexpression SeqBefore(*this); 12848 Region = BeforeRegion; 12849 Visit(SequencedBefore); 12850 } 12851 12852 Region = AfterRegion; 12853 Visit(SequencedAfter); 12854 12855 Region = OldRegion; 12856 12857 Tree.merge(BeforeRegion); 12858 Tree.merge(AfterRegion); 12859 } 12860 12861 void VisitArraySubscriptExpr(const ArraySubscriptExpr *ASE) { 12862 // C++17 [expr.sub]p1: 12863 // The expression E1[E2] is identical (by definition) to *((E1)+(E2)). The 12864 // expression E1 is sequenced before the expression E2. 12865 if (SemaRef.getLangOpts().CPlusPlus17) 12866 VisitSequencedExpressions(ASE->getLHS(), ASE->getRHS()); 12867 else { 12868 Visit(ASE->getLHS()); 12869 Visit(ASE->getRHS()); 12870 } 12871 } 12872 12873 void VisitBinPtrMemD(const BinaryOperator *BO) { VisitBinPtrMem(BO); } 12874 void VisitBinPtrMemI(const BinaryOperator *BO) { VisitBinPtrMem(BO); } 12875 void VisitBinPtrMem(const BinaryOperator *BO) { 12876 // C++17 [expr.mptr.oper]p4: 12877 // Abbreviating pm-expression.*cast-expression as E1.*E2, [...] 12878 // the expression E1 is sequenced before the expression E2. 12879 if (SemaRef.getLangOpts().CPlusPlus17) 12880 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 12881 else { 12882 Visit(BO->getLHS()); 12883 Visit(BO->getRHS()); 12884 } 12885 } 12886 12887 void VisitBinShl(const BinaryOperator *BO) { VisitBinShlShr(BO); } 12888 void VisitBinShr(const BinaryOperator *BO) { VisitBinShlShr(BO); } 12889 void VisitBinShlShr(const BinaryOperator *BO) { 12890 // C++17 [expr.shift]p4: 12891 // The expression E1 is sequenced before the expression E2. 12892 if (SemaRef.getLangOpts().CPlusPlus17) 12893 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 12894 else { 12895 Visit(BO->getLHS()); 12896 Visit(BO->getRHS()); 12897 } 12898 } 12899 12900 void VisitBinComma(const BinaryOperator *BO) { 12901 // C++11 [expr.comma]p1: 12902 // Every value computation and side effect associated with the left 12903 // expression is sequenced before every value computation and side 12904 // effect associated with the right expression. 12905 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 12906 } 12907 12908 void VisitBinAssign(const BinaryOperator *BO) { 12909 SequenceTree::Seq RHSRegion; 12910 SequenceTree::Seq LHSRegion; 12911 if (SemaRef.getLangOpts().CPlusPlus17) { 12912 RHSRegion = Tree.allocate(Region); 12913 LHSRegion = Tree.allocate(Region); 12914 } else { 12915 RHSRegion = Region; 12916 LHSRegion = Region; 12917 } 12918 SequenceTree::Seq OldRegion = Region; 12919 12920 // C++11 [expr.ass]p1: 12921 // [...] the assignment is sequenced after the value computation 12922 // of the right and left operands, [...] 12923 // 12924 // so check it before inspecting the operands and update the 12925 // map afterwards. 12926 Object O = getObject(BO->getLHS(), /*Mod=*/true); 12927 if (O) 12928 notePreMod(O, BO); 12929 12930 if (SemaRef.getLangOpts().CPlusPlus17) { 12931 // C++17 [expr.ass]p1: 12932 // [...] The right operand is sequenced before the left operand. [...] 12933 { 12934 SequencedSubexpression SeqBefore(*this); 12935 Region = RHSRegion; 12936 Visit(BO->getRHS()); 12937 } 12938 12939 Region = LHSRegion; 12940 Visit(BO->getLHS()); 12941 12942 if (O && isa<CompoundAssignOperator>(BO)) 12943 notePostUse(O, BO); 12944 12945 } else { 12946 // C++11 does not specify any sequencing between the LHS and RHS. 12947 Region = LHSRegion; 12948 Visit(BO->getLHS()); 12949 12950 if (O && isa<CompoundAssignOperator>(BO)) 12951 notePostUse(O, BO); 12952 12953 Region = RHSRegion; 12954 Visit(BO->getRHS()); 12955 } 12956 12957 // C++11 [expr.ass]p1: 12958 // the assignment is sequenced [...] before the value computation of the 12959 // assignment expression. 12960 // C11 6.5.16/3 has no such rule. 12961 Region = OldRegion; 12962 if (O) 12963 notePostMod(O, BO, 12964 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 12965 : UK_ModAsSideEffect); 12966 if (SemaRef.getLangOpts().CPlusPlus17) { 12967 Tree.merge(RHSRegion); 12968 Tree.merge(LHSRegion); 12969 } 12970 } 12971 12972 void VisitCompoundAssignOperator(const CompoundAssignOperator *CAO) { 12973 VisitBinAssign(CAO); 12974 } 12975 12976 void VisitUnaryPreInc(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 12977 void VisitUnaryPreDec(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 12978 void VisitUnaryPreIncDec(const UnaryOperator *UO) { 12979 Object O = getObject(UO->getSubExpr(), true); 12980 if (!O) 12981 return VisitExpr(UO); 12982 12983 notePreMod(O, UO); 12984 Visit(UO->getSubExpr()); 12985 // C++11 [expr.pre.incr]p1: 12986 // the expression ++x is equivalent to x+=1 12987 notePostMod(O, UO, 12988 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 12989 : UK_ModAsSideEffect); 12990 } 12991 12992 void VisitUnaryPostInc(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 12993 void VisitUnaryPostDec(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 12994 void VisitUnaryPostIncDec(const UnaryOperator *UO) { 12995 Object O = getObject(UO->getSubExpr(), true); 12996 if (!O) 12997 return VisitExpr(UO); 12998 12999 notePreMod(O, UO); 13000 Visit(UO->getSubExpr()); 13001 notePostMod(O, UO, UK_ModAsSideEffect); 13002 } 13003 13004 void VisitBinLOr(const BinaryOperator *BO) { 13005 // C++11 [expr.log.or]p2: 13006 // If the second expression is evaluated, every value computation and 13007 // side effect associated with the first expression is sequenced before 13008 // every value computation and side effect associated with the 13009 // second expression. 13010 SequenceTree::Seq LHSRegion = Tree.allocate(Region); 13011 SequenceTree::Seq RHSRegion = Tree.allocate(Region); 13012 SequenceTree::Seq OldRegion = Region; 13013 13014 EvaluationTracker Eval(*this); 13015 { 13016 SequencedSubexpression Sequenced(*this); 13017 Region = LHSRegion; 13018 Visit(BO->getLHS()); 13019 } 13020 13021 // C++11 [expr.log.or]p1: 13022 // [...] the second operand is not evaluated if the first operand 13023 // evaluates to true. 13024 bool EvalResult = false; 13025 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult); 13026 bool ShouldVisitRHS = !EvalOK || (EvalOK && !EvalResult); 13027 if (ShouldVisitRHS) { 13028 Region = RHSRegion; 13029 Visit(BO->getRHS()); 13030 } 13031 13032 Region = OldRegion; 13033 Tree.merge(LHSRegion); 13034 Tree.merge(RHSRegion); 13035 } 13036 13037 void VisitBinLAnd(const BinaryOperator *BO) { 13038 // C++11 [expr.log.and]p2: 13039 // If the second expression is evaluated, every value computation and 13040 // side effect associated with the first expression is sequenced before 13041 // every value computation and side effect associated with the 13042 // second expression. 13043 SequenceTree::Seq LHSRegion = Tree.allocate(Region); 13044 SequenceTree::Seq RHSRegion = Tree.allocate(Region); 13045 SequenceTree::Seq OldRegion = Region; 13046 13047 EvaluationTracker Eval(*this); 13048 { 13049 SequencedSubexpression Sequenced(*this); 13050 Region = LHSRegion; 13051 Visit(BO->getLHS()); 13052 } 13053 13054 // C++11 [expr.log.and]p1: 13055 // [...] the second operand is not evaluated if the first operand is false. 13056 bool EvalResult = false; 13057 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult); 13058 bool ShouldVisitRHS = !EvalOK || (EvalOK && EvalResult); 13059 if (ShouldVisitRHS) { 13060 Region = RHSRegion; 13061 Visit(BO->getRHS()); 13062 } 13063 13064 Region = OldRegion; 13065 Tree.merge(LHSRegion); 13066 Tree.merge(RHSRegion); 13067 } 13068 13069 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO) { 13070 // C++11 [expr.cond]p1: 13071 // [...] Every value computation and side effect associated with the first 13072 // expression is sequenced before every value computation and side effect 13073 // associated with the second or third expression. 13074 SequenceTree::Seq ConditionRegion = Tree.allocate(Region); 13075 13076 // No sequencing is specified between the true and false expression. 13077 // However since exactly one of both is going to be evaluated we can 13078 // consider them to be sequenced. This is needed to avoid warning on 13079 // something like "x ? y+= 1 : y += 2;" in the case where we will visit 13080 // both the true and false expressions because we can't evaluate x. 13081 // This will still allow us to detect an expression like (pre C++17) 13082 // "(x ? y += 1 : y += 2) = y". 13083 // 13084 // We don't wrap the visitation of the true and false expression with 13085 // SequencedSubexpression because we don't want to downgrade modifications 13086 // as side effect in the true and false expressions after the visition 13087 // is done. (for example in the expression "(x ? y++ : y++) + y" we should 13088 // not warn between the two "y++", but we should warn between the "y++" 13089 // and the "y". 13090 SequenceTree::Seq TrueRegion = Tree.allocate(Region); 13091 SequenceTree::Seq FalseRegion = Tree.allocate(Region); 13092 SequenceTree::Seq OldRegion = Region; 13093 13094 EvaluationTracker Eval(*this); 13095 { 13096 SequencedSubexpression Sequenced(*this); 13097 Region = ConditionRegion; 13098 Visit(CO->getCond()); 13099 } 13100 13101 // C++11 [expr.cond]p1: 13102 // [...] The first expression is contextually converted to bool (Clause 4). 13103 // It is evaluated and if it is true, the result of the conditional 13104 // expression is the value of the second expression, otherwise that of the 13105 // third expression. Only one of the second and third expressions is 13106 // evaluated. [...] 13107 bool EvalResult = false; 13108 bool EvalOK = Eval.evaluate(CO->getCond(), EvalResult); 13109 bool ShouldVisitTrueExpr = !EvalOK || (EvalOK && EvalResult); 13110 bool ShouldVisitFalseExpr = !EvalOK || (EvalOK && !EvalResult); 13111 if (ShouldVisitTrueExpr) { 13112 Region = TrueRegion; 13113 Visit(CO->getTrueExpr()); 13114 } 13115 if (ShouldVisitFalseExpr) { 13116 Region = FalseRegion; 13117 Visit(CO->getFalseExpr()); 13118 } 13119 13120 Region = OldRegion; 13121 Tree.merge(ConditionRegion); 13122 Tree.merge(TrueRegion); 13123 Tree.merge(FalseRegion); 13124 } 13125 13126 void VisitCallExpr(const CallExpr *CE) { 13127 // FIXME: CXXNewExpr and CXXDeleteExpr implicitly call functions. 13128 13129 if (CE->isUnevaluatedBuiltinCall(Context)) 13130 return; 13131 13132 // C++11 [intro.execution]p15: 13133 // When calling a function [...], every value computation and side effect 13134 // associated with any argument expression, or with the postfix expression 13135 // designating the called function, is sequenced before execution of every 13136 // expression or statement in the body of the function [and thus before 13137 // the value computation of its result]. 13138 SequencedSubexpression Sequenced(*this); 13139 SemaRef.runWithSufficientStackSpace(CE->getExprLoc(), [&] { 13140 // C++17 [expr.call]p5 13141 // The postfix-expression is sequenced before each expression in the 13142 // expression-list and any default argument. [...] 13143 SequenceTree::Seq CalleeRegion; 13144 SequenceTree::Seq OtherRegion; 13145 if (SemaRef.getLangOpts().CPlusPlus17) { 13146 CalleeRegion = Tree.allocate(Region); 13147 OtherRegion = Tree.allocate(Region); 13148 } else { 13149 CalleeRegion = Region; 13150 OtherRegion = Region; 13151 } 13152 SequenceTree::Seq OldRegion = Region; 13153 13154 // Visit the callee expression first. 13155 Region = CalleeRegion; 13156 if (SemaRef.getLangOpts().CPlusPlus17) { 13157 SequencedSubexpression Sequenced(*this); 13158 Visit(CE->getCallee()); 13159 } else { 13160 Visit(CE->getCallee()); 13161 } 13162 13163 // Then visit the argument expressions. 13164 Region = OtherRegion; 13165 for (const Expr *Argument : CE->arguments()) 13166 Visit(Argument); 13167 13168 Region = OldRegion; 13169 if (SemaRef.getLangOpts().CPlusPlus17) { 13170 Tree.merge(CalleeRegion); 13171 Tree.merge(OtherRegion); 13172 } 13173 }); 13174 } 13175 13176 void VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *CXXOCE) { 13177 // C++17 [over.match.oper]p2: 13178 // [...] the operator notation is first transformed to the equivalent 13179 // function-call notation as summarized in Table 12 (where @ denotes one 13180 // of the operators covered in the specified subclause). However, the 13181 // operands are sequenced in the order prescribed for the built-in 13182 // operator (Clause 8). 13183 // 13184 // From the above only overloaded binary operators and overloaded call 13185 // operators have sequencing rules in C++17 that we need to handle 13186 // separately. 13187 if (!SemaRef.getLangOpts().CPlusPlus17 || 13188 (CXXOCE->getNumArgs() != 2 && CXXOCE->getOperator() != OO_Call)) 13189 return VisitCallExpr(CXXOCE); 13190 13191 enum { 13192 NoSequencing, 13193 LHSBeforeRHS, 13194 RHSBeforeLHS, 13195 LHSBeforeRest 13196 } SequencingKind; 13197 switch (CXXOCE->getOperator()) { 13198 case OO_Equal: 13199 case OO_PlusEqual: 13200 case OO_MinusEqual: 13201 case OO_StarEqual: 13202 case OO_SlashEqual: 13203 case OO_PercentEqual: 13204 case OO_CaretEqual: 13205 case OO_AmpEqual: 13206 case OO_PipeEqual: 13207 case OO_LessLessEqual: 13208 case OO_GreaterGreaterEqual: 13209 SequencingKind = RHSBeforeLHS; 13210 break; 13211 13212 case OO_LessLess: 13213 case OO_GreaterGreater: 13214 case OO_AmpAmp: 13215 case OO_PipePipe: 13216 case OO_Comma: 13217 case OO_ArrowStar: 13218 case OO_Subscript: 13219 SequencingKind = LHSBeforeRHS; 13220 break; 13221 13222 case OO_Call: 13223 SequencingKind = LHSBeforeRest; 13224 break; 13225 13226 default: 13227 SequencingKind = NoSequencing; 13228 break; 13229 } 13230 13231 if (SequencingKind == NoSequencing) 13232 return VisitCallExpr(CXXOCE); 13233 13234 // This is a call, so all subexpressions are sequenced before the result. 13235 SequencedSubexpression Sequenced(*this); 13236 13237 SemaRef.runWithSufficientStackSpace(CXXOCE->getExprLoc(), [&] { 13238 assert(SemaRef.getLangOpts().CPlusPlus17 && 13239 "Should only get there with C++17 and above!"); 13240 assert((CXXOCE->getNumArgs() == 2 || CXXOCE->getOperator() == OO_Call) && 13241 "Should only get there with an overloaded binary operator" 13242 " or an overloaded call operator!"); 13243 13244 if (SequencingKind == LHSBeforeRest) { 13245 assert(CXXOCE->getOperator() == OO_Call && 13246 "We should only have an overloaded call operator here!"); 13247 13248 // This is very similar to VisitCallExpr, except that we only have the 13249 // C++17 case. The postfix-expression is the first argument of the 13250 // CXXOperatorCallExpr. The expressions in the expression-list, if any, 13251 // are in the following arguments. 13252 // 13253 // Note that we intentionally do not visit the callee expression since 13254 // it is just a decayed reference to a function. 13255 SequenceTree::Seq PostfixExprRegion = Tree.allocate(Region); 13256 SequenceTree::Seq ArgsRegion = Tree.allocate(Region); 13257 SequenceTree::Seq OldRegion = Region; 13258 13259 assert(CXXOCE->getNumArgs() >= 1 && 13260 "An overloaded call operator must have at least one argument" 13261 " for the postfix-expression!"); 13262 const Expr *PostfixExpr = CXXOCE->getArgs()[0]; 13263 llvm::ArrayRef<const Expr *> Args(CXXOCE->getArgs() + 1, 13264 CXXOCE->getNumArgs() - 1); 13265 13266 // Visit the postfix-expression first. 13267 { 13268 Region = PostfixExprRegion; 13269 SequencedSubexpression Sequenced(*this); 13270 Visit(PostfixExpr); 13271 } 13272 13273 // Then visit the argument expressions. 13274 Region = ArgsRegion; 13275 for (const Expr *Arg : Args) 13276 Visit(Arg); 13277 13278 Region = OldRegion; 13279 Tree.merge(PostfixExprRegion); 13280 Tree.merge(ArgsRegion); 13281 } else { 13282 assert(CXXOCE->getNumArgs() == 2 && 13283 "Should only have two arguments here!"); 13284 assert((SequencingKind == LHSBeforeRHS || 13285 SequencingKind == RHSBeforeLHS) && 13286 "Unexpected sequencing kind!"); 13287 13288 // We do not visit the callee expression since it is just a decayed 13289 // reference to a function. 13290 const Expr *E1 = CXXOCE->getArg(0); 13291 const Expr *E2 = CXXOCE->getArg(1); 13292 if (SequencingKind == RHSBeforeLHS) 13293 std::swap(E1, E2); 13294 13295 return VisitSequencedExpressions(E1, E2); 13296 } 13297 }); 13298 } 13299 13300 void VisitCXXConstructExpr(const CXXConstructExpr *CCE) { 13301 // This is a call, so all subexpressions are sequenced before the result. 13302 SequencedSubexpression Sequenced(*this); 13303 13304 if (!CCE->isListInitialization()) 13305 return VisitExpr(CCE); 13306 13307 // In C++11, list initializations are sequenced. 13308 SmallVector<SequenceTree::Seq, 32> Elts; 13309 SequenceTree::Seq Parent = Region; 13310 for (CXXConstructExpr::const_arg_iterator I = CCE->arg_begin(), 13311 E = CCE->arg_end(); 13312 I != E; ++I) { 13313 Region = Tree.allocate(Parent); 13314 Elts.push_back(Region); 13315 Visit(*I); 13316 } 13317 13318 // Forget that the initializers are sequenced. 13319 Region = Parent; 13320 for (unsigned I = 0; I < Elts.size(); ++I) 13321 Tree.merge(Elts[I]); 13322 } 13323 13324 void VisitInitListExpr(const InitListExpr *ILE) { 13325 if (!SemaRef.getLangOpts().CPlusPlus11) 13326 return VisitExpr(ILE); 13327 13328 // In C++11, list initializations are sequenced. 13329 SmallVector<SequenceTree::Seq, 32> Elts; 13330 SequenceTree::Seq Parent = Region; 13331 for (unsigned I = 0; I < ILE->getNumInits(); ++I) { 13332 const Expr *E = ILE->getInit(I); 13333 if (!E) 13334 continue; 13335 Region = Tree.allocate(Parent); 13336 Elts.push_back(Region); 13337 Visit(E); 13338 } 13339 13340 // Forget that the initializers are sequenced. 13341 Region = Parent; 13342 for (unsigned I = 0; I < Elts.size(); ++I) 13343 Tree.merge(Elts[I]); 13344 } 13345 }; 13346 13347 } // namespace 13348 13349 void Sema::CheckUnsequencedOperations(const Expr *E) { 13350 SmallVector<const Expr *, 8> WorkList; 13351 WorkList.push_back(E); 13352 while (!WorkList.empty()) { 13353 const Expr *Item = WorkList.pop_back_val(); 13354 SequenceChecker(*this, Item, WorkList); 13355 } 13356 } 13357 13358 void Sema::CheckCompletedExpr(Expr *E, SourceLocation CheckLoc, 13359 bool IsConstexpr) { 13360 llvm::SaveAndRestore<bool> ConstantContext( 13361 isConstantEvaluatedOverride, IsConstexpr || isa<ConstantExpr>(E)); 13362 CheckImplicitConversions(E, CheckLoc); 13363 if (!E->isInstantiationDependent()) 13364 CheckUnsequencedOperations(E); 13365 if (!IsConstexpr && !E->isValueDependent()) 13366 CheckForIntOverflow(E); 13367 DiagnoseMisalignedMembers(); 13368 } 13369 13370 void Sema::CheckBitFieldInitialization(SourceLocation InitLoc, 13371 FieldDecl *BitField, 13372 Expr *Init) { 13373 (void) AnalyzeBitFieldAssignment(*this, BitField, Init, InitLoc); 13374 } 13375 13376 static void diagnoseArrayStarInParamType(Sema &S, QualType PType, 13377 SourceLocation Loc) { 13378 if (!PType->isVariablyModifiedType()) 13379 return; 13380 if (const auto *PointerTy = dyn_cast<PointerType>(PType)) { 13381 diagnoseArrayStarInParamType(S, PointerTy->getPointeeType(), Loc); 13382 return; 13383 } 13384 if (const auto *ReferenceTy = dyn_cast<ReferenceType>(PType)) { 13385 diagnoseArrayStarInParamType(S, ReferenceTy->getPointeeType(), Loc); 13386 return; 13387 } 13388 if (const auto *ParenTy = dyn_cast<ParenType>(PType)) { 13389 diagnoseArrayStarInParamType(S, ParenTy->getInnerType(), Loc); 13390 return; 13391 } 13392 13393 const ArrayType *AT = S.Context.getAsArrayType(PType); 13394 if (!AT) 13395 return; 13396 13397 if (AT->getSizeModifier() != ArrayType::Star) { 13398 diagnoseArrayStarInParamType(S, AT->getElementType(), Loc); 13399 return; 13400 } 13401 13402 S.Diag(Loc, diag::err_array_star_in_function_definition); 13403 } 13404 13405 /// CheckParmsForFunctionDef - Check that the parameters of the given 13406 /// function are appropriate for the definition of a function. This 13407 /// takes care of any checks that cannot be performed on the 13408 /// declaration itself, e.g., that the types of each of the function 13409 /// parameters are complete. 13410 bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, 13411 bool CheckParameterNames) { 13412 bool HasInvalidParm = false; 13413 for (ParmVarDecl *Param : Parameters) { 13414 // C99 6.7.5.3p4: the parameters in a parameter type list in a 13415 // function declarator that is part of a function definition of 13416 // that function shall not have incomplete type. 13417 // 13418 // This is also C++ [dcl.fct]p6. 13419 if (!Param->isInvalidDecl() && 13420 RequireCompleteType(Param->getLocation(), Param->getType(), 13421 diag::err_typecheck_decl_incomplete_type)) { 13422 Param->setInvalidDecl(); 13423 HasInvalidParm = true; 13424 } 13425 13426 // C99 6.9.1p5: If the declarator includes a parameter type list, the 13427 // declaration of each parameter shall include an identifier. 13428 if (CheckParameterNames && Param->getIdentifier() == nullptr && 13429 !Param->isImplicit() && !getLangOpts().CPlusPlus) { 13430 // Diagnose this as an extension in C17 and earlier. 13431 if (!getLangOpts().C2x) 13432 Diag(Param->getLocation(), diag::ext_parameter_name_omitted_c2x); 13433 } 13434 13435 // C99 6.7.5.3p12: 13436 // If the function declarator is not part of a definition of that 13437 // function, parameters may have incomplete type and may use the [*] 13438 // notation in their sequences of declarator specifiers to specify 13439 // variable length array types. 13440 QualType PType = Param->getOriginalType(); 13441 // FIXME: This diagnostic should point the '[*]' if source-location 13442 // information is added for it. 13443 diagnoseArrayStarInParamType(*this, PType, Param->getLocation()); 13444 13445 // If the parameter is a c++ class type and it has to be destructed in the 13446 // callee function, declare the destructor so that it can be called by the 13447 // callee function. Do not perform any direct access check on the dtor here. 13448 if (!Param->isInvalidDecl()) { 13449 if (CXXRecordDecl *ClassDecl = Param->getType()->getAsCXXRecordDecl()) { 13450 if (!ClassDecl->isInvalidDecl() && 13451 !ClassDecl->hasIrrelevantDestructor() && 13452 !ClassDecl->isDependentContext() && 13453 ClassDecl->isParamDestroyedInCallee()) { 13454 CXXDestructorDecl *Destructor = LookupDestructor(ClassDecl); 13455 MarkFunctionReferenced(Param->getLocation(), Destructor); 13456 DiagnoseUseOfDecl(Destructor, Param->getLocation()); 13457 } 13458 } 13459 } 13460 13461 // Parameters with the pass_object_size attribute only need to be marked 13462 // constant at function definitions. Because we lack information about 13463 // whether we're on a declaration or definition when we're instantiating the 13464 // attribute, we need to check for constness here. 13465 if (const auto *Attr = Param->getAttr<PassObjectSizeAttr>()) 13466 if (!Param->getType().isConstQualified()) 13467 Diag(Param->getLocation(), diag::err_attribute_pointers_only) 13468 << Attr->getSpelling() << 1; 13469 13470 // Check for parameter names shadowing fields from the class. 13471 if (LangOpts.CPlusPlus && !Param->isInvalidDecl()) { 13472 // The owning context for the parameter should be the function, but we 13473 // want to see if this function's declaration context is a record. 13474 DeclContext *DC = Param->getDeclContext(); 13475 if (DC && DC->isFunctionOrMethod()) { 13476 if (auto *RD = dyn_cast<CXXRecordDecl>(DC->getParent())) 13477 CheckShadowInheritedFields(Param->getLocation(), Param->getDeclName(), 13478 RD, /*DeclIsField*/ false); 13479 } 13480 } 13481 } 13482 13483 return HasInvalidParm; 13484 } 13485 13486 Optional<std::pair<CharUnits, CharUnits>> 13487 static getBaseAlignmentAndOffsetFromPtr(const Expr *E, ASTContext &Ctx); 13488 13489 /// Compute the alignment and offset of the base class object given the 13490 /// derived-to-base cast expression and the alignment and offset of the derived 13491 /// class object. 13492 static std::pair<CharUnits, CharUnits> 13493 getDerivedToBaseAlignmentAndOffset(const CastExpr *CE, QualType DerivedType, 13494 CharUnits BaseAlignment, CharUnits Offset, 13495 ASTContext &Ctx) { 13496 for (auto PathI = CE->path_begin(), PathE = CE->path_end(); PathI != PathE; 13497 ++PathI) { 13498 const CXXBaseSpecifier *Base = *PathI; 13499 const CXXRecordDecl *BaseDecl = Base->getType()->getAsCXXRecordDecl(); 13500 if (Base->isVirtual()) { 13501 // The complete object may have a lower alignment than the non-virtual 13502 // alignment of the base, in which case the base may be misaligned. Choose 13503 // the smaller of the non-virtual alignment and BaseAlignment, which is a 13504 // conservative lower bound of the complete object alignment. 13505 CharUnits NonVirtualAlignment = 13506 Ctx.getASTRecordLayout(BaseDecl).getNonVirtualAlignment(); 13507 BaseAlignment = std::min(BaseAlignment, NonVirtualAlignment); 13508 Offset = CharUnits::Zero(); 13509 } else { 13510 const ASTRecordLayout &RL = 13511 Ctx.getASTRecordLayout(DerivedType->getAsCXXRecordDecl()); 13512 Offset += RL.getBaseClassOffset(BaseDecl); 13513 } 13514 DerivedType = Base->getType(); 13515 } 13516 13517 return std::make_pair(BaseAlignment, Offset); 13518 } 13519 13520 /// Compute the alignment and offset of a binary additive operator. 13521 static Optional<std::pair<CharUnits, CharUnits>> 13522 getAlignmentAndOffsetFromBinAddOrSub(const Expr *PtrE, const Expr *IntE, 13523 bool IsSub, ASTContext &Ctx) { 13524 QualType PointeeType = PtrE->getType()->getPointeeType(); 13525 13526 if (!PointeeType->isConstantSizeType()) 13527 return llvm::None; 13528 13529 auto P = getBaseAlignmentAndOffsetFromPtr(PtrE, Ctx); 13530 13531 if (!P) 13532 return llvm::None; 13533 13534 llvm::APSInt IdxRes; 13535 CharUnits EltSize = Ctx.getTypeSizeInChars(PointeeType); 13536 if (IntE->isIntegerConstantExpr(IdxRes, Ctx)) { 13537 CharUnits Offset = EltSize * IdxRes.getExtValue(); 13538 if (IsSub) 13539 Offset = -Offset; 13540 return std::make_pair(P->first, P->second + Offset); 13541 } 13542 13543 // If the integer expression isn't a constant expression, compute the lower 13544 // bound of the alignment using the alignment and offset of the pointer 13545 // expression and the element size. 13546 return std::make_pair( 13547 P->first.alignmentAtOffset(P->second).alignmentAtOffset(EltSize), 13548 CharUnits::Zero()); 13549 } 13550 13551 /// This helper function takes an lvalue expression and returns the alignment of 13552 /// a VarDecl and a constant offset from the VarDecl. 13553 Optional<std::pair<CharUnits, CharUnits>> 13554 static getBaseAlignmentAndOffsetFromLValue(const Expr *E, ASTContext &Ctx) { 13555 E = E->IgnoreParens(); 13556 switch (E->getStmtClass()) { 13557 default: 13558 break; 13559 case Stmt::CStyleCastExprClass: 13560 case Stmt::CXXStaticCastExprClass: 13561 case Stmt::ImplicitCastExprClass: { 13562 auto *CE = cast<CastExpr>(E); 13563 const Expr *From = CE->getSubExpr(); 13564 switch (CE->getCastKind()) { 13565 default: 13566 break; 13567 case CK_NoOp: 13568 return getBaseAlignmentAndOffsetFromLValue(From, Ctx); 13569 case CK_UncheckedDerivedToBase: 13570 case CK_DerivedToBase: { 13571 auto P = getBaseAlignmentAndOffsetFromLValue(From, Ctx); 13572 if (!P) 13573 break; 13574 return getDerivedToBaseAlignmentAndOffset(CE, From->getType(), P->first, 13575 P->second, Ctx); 13576 } 13577 } 13578 break; 13579 } 13580 case Stmt::ArraySubscriptExprClass: { 13581 auto *ASE = cast<ArraySubscriptExpr>(E); 13582 return getAlignmentAndOffsetFromBinAddOrSub(ASE->getBase(), ASE->getIdx(), 13583 false, Ctx); 13584 } 13585 case Stmt::DeclRefExprClass: { 13586 if (auto *VD = dyn_cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl())) { 13587 // FIXME: If VD is captured by copy or is an escaping __block variable, 13588 // use the alignment of VD's type. 13589 if (!VD->getType()->isReferenceType()) 13590 return std::make_pair(Ctx.getDeclAlign(VD), CharUnits::Zero()); 13591 if (VD->hasInit()) 13592 return getBaseAlignmentAndOffsetFromLValue(VD->getInit(), Ctx); 13593 } 13594 break; 13595 } 13596 case Stmt::MemberExprClass: { 13597 auto *ME = cast<MemberExpr>(E); 13598 auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl()); 13599 if (!FD || FD->getType()->isReferenceType()) 13600 break; 13601 Optional<std::pair<CharUnits, CharUnits>> P; 13602 if (ME->isArrow()) 13603 P = getBaseAlignmentAndOffsetFromPtr(ME->getBase(), Ctx); 13604 else 13605 P = getBaseAlignmentAndOffsetFromLValue(ME->getBase(), Ctx); 13606 if (!P) 13607 break; 13608 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(FD->getParent()); 13609 uint64_t Offset = Layout.getFieldOffset(FD->getFieldIndex()); 13610 return std::make_pair(P->first, 13611 P->second + CharUnits::fromQuantity(Offset)); 13612 } 13613 case Stmt::UnaryOperatorClass: { 13614 auto *UO = cast<UnaryOperator>(E); 13615 switch (UO->getOpcode()) { 13616 default: 13617 break; 13618 case UO_Deref: 13619 return getBaseAlignmentAndOffsetFromPtr(UO->getSubExpr(), Ctx); 13620 } 13621 break; 13622 } 13623 case Stmt::BinaryOperatorClass: { 13624 auto *BO = cast<BinaryOperator>(E); 13625 auto Opcode = BO->getOpcode(); 13626 switch (Opcode) { 13627 default: 13628 break; 13629 case BO_Comma: 13630 return getBaseAlignmentAndOffsetFromLValue(BO->getRHS(), Ctx); 13631 } 13632 break; 13633 } 13634 } 13635 return llvm::None; 13636 } 13637 13638 /// This helper function takes a pointer expression and returns the alignment of 13639 /// a VarDecl and a constant offset from the VarDecl. 13640 Optional<std::pair<CharUnits, CharUnits>> 13641 static getBaseAlignmentAndOffsetFromPtr(const Expr *E, ASTContext &Ctx) { 13642 E = E->IgnoreParens(); 13643 switch (E->getStmtClass()) { 13644 default: 13645 break; 13646 case Stmt::CStyleCastExprClass: 13647 case Stmt::CXXStaticCastExprClass: 13648 case Stmt::ImplicitCastExprClass: { 13649 auto *CE = cast<CastExpr>(E); 13650 const Expr *From = CE->getSubExpr(); 13651 switch (CE->getCastKind()) { 13652 default: 13653 break; 13654 case CK_NoOp: 13655 return getBaseAlignmentAndOffsetFromPtr(From, Ctx); 13656 case CK_ArrayToPointerDecay: 13657 return getBaseAlignmentAndOffsetFromLValue(From, Ctx); 13658 case CK_UncheckedDerivedToBase: 13659 case CK_DerivedToBase: { 13660 auto P = getBaseAlignmentAndOffsetFromPtr(From, Ctx); 13661 if (!P) 13662 break; 13663 return getDerivedToBaseAlignmentAndOffset( 13664 CE, From->getType()->getPointeeType(), P->first, P->second, Ctx); 13665 } 13666 } 13667 break; 13668 } 13669 case Stmt::CXXThisExprClass: { 13670 auto *RD = E->getType()->getPointeeType()->getAsCXXRecordDecl(); 13671 CharUnits Alignment = Ctx.getASTRecordLayout(RD).getNonVirtualAlignment(); 13672 return std::make_pair(Alignment, CharUnits::Zero()); 13673 } 13674 case Stmt::UnaryOperatorClass: { 13675 auto *UO = cast<UnaryOperator>(E); 13676 if (UO->getOpcode() == UO_AddrOf) 13677 return getBaseAlignmentAndOffsetFromLValue(UO->getSubExpr(), Ctx); 13678 break; 13679 } 13680 case Stmt::BinaryOperatorClass: { 13681 auto *BO = cast<BinaryOperator>(E); 13682 auto Opcode = BO->getOpcode(); 13683 switch (Opcode) { 13684 default: 13685 break; 13686 case BO_Add: 13687 case BO_Sub: { 13688 const Expr *LHS = BO->getLHS(), *RHS = BO->getRHS(); 13689 if (Opcode == BO_Add && !RHS->getType()->isIntegralOrEnumerationType()) 13690 std::swap(LHS, RHS); 13691 return getAlignmentAndOffsetFromBinAddOrSub(LHS, RHS, Opcode == BO_Sub, 13692 Ctx); 13693 } 13694 case BO_Comma: 13695 return getBaseAlignmentAndOffsetFromPtr(BO->getRHS(), Ctx); 13696 } 13697 break; 13698 } 13699 } 13700 return llvm::None; 13701 } 13702 13703 static CharUnits getPresumedAlignmentOfPointer(const Expr *E, Sema &S) { 13704 // See if we can compute the alignment of a VarDecl and an offset from it. 13705 Optional<std::pair<CharUnits, CharUnits>> P = 13706 getBaseAlignmentAndOffsetFromPtr(E, S.Context); 13707 13708 if (P) 13709 return P->first.alignmentAtOffset(P->second); 13710 13711 // If that failed, return the type's alignment. 13712 return S.Context.getTypeAlignInChars(E->getType()->getPointeeType()); 13713 } 13714 13715 /// CheckCastAlign - Implements -Wcast-align, which warns when a 13716 /// pointer cast increases the alignment requirements. 13717 void Sema::CheckCastAlign(Expr *Op, QualType T, SourceRange TRange) { 13718 // This is actually a lot of work to potentially be doing on every 13719 // cast; don't do it if we're ignoring -Wcast_align (as is the default). 13720 if (getDiagnostics().isIgnored(diag::warn_cast_align, TRange.getBegin())) 13721 return; 13722 13723 // Ignore dependent types. 13724 if (T->isDependentType() || Op->getType()->isDependentType()) 13725 return; 13726 13727 // Require that the destination be a pointer type. 13728 const PointerType *DestPtr = T->getAs<PointerType>(); 13729 if (!DestPtr) return; 13730 13731 // If the destination has alignment 1, we're done. 13732 QualType DestPointee = DestPtr->getPointeeType(); 13733 if (DestPointee->isIncompleteType()) return; 13734 CharUnits DestAlign = Context.getTypeAlignInChars(DestPointee); 13735 if (DestAlign.isOne()) return; 13736 13737 // Require that the source be a pointer type. 13738 const PointerType *SrcPtr = Op->getType()->getAs<PointerType>(); 13739 if (!SrcPtr) return; 13740 QualType SrcPointee = SrcPtr->getPointeeType(); 13741 13742 // Explicitly allow casts from cv void*. We already implicitly 13743 // allowed casts to cv void*, since they have alignment 1. 13744 // Also allow casts involving incomplete types, which implicitly 13745 // includes 'void'. 13746 if (SrcPointee->isIncompleteType()) return; 13747 13748 CharUnits SrcAlign = getPresumedAlignmentOfPointer(Op, *this); 13749 13750 if (SrcAlign >= DestAlign) return; 13751 13752 Diag(TRange.getBegin(), diag::warn_cast_align) 13753 << Op->getType() << T 13754 << static_cast<unsigned>(SrcAlign.getQuantity()) 13755 << static_cast<unsigned>(DestAlign.getQuantity()) 13756 << TRange << Op->getSourceRange(); 13757 } 13758 13759 /// Check whether this array fits the idiom of a size-one tail padded 13760 /// array member of a struct. 13761 /// 13762 /// We avoid emitting out-of-bounds access warnings for such arrays as they are 13763 /// commonly used to emulate flexible arrays in C89 code. 13764 static bool IsTailPaddedMemberArray(Sema &S, const llvm::APInt &Size, 13765 const NamedDecl *ND) { 13766 if (Size != 1 || !ND) return false; 13767 13768 const FieldDecl *FD = dyn_cast<FieldDecl>(ND); 13769 if (!FD) return false; 13770 13771 // Don't consider sizes resulting from macro expansions or template argument 13772 // substitution to form C89 tail-padded arrays. 13773 13774 TypeSourceInfo *TInfo = FD->getTypeSourceInfo(); 13775 while (TInfo) { 13776 TypeLoc TL = TInfo->getTypeLoc(); 13777 // Look through typedefs. 13778 if (TypedefTypeLoc TTL = TL.getAs<TypedefTypeLoc>()) { 13779 const TypedefNameDecl *TDL = TTL.getTypedefNameDecl(); 13780 TInfo = TDL->getTypeSourceInfo(); 13781 continue; 13782 } 13783 if (ConstantArrayTypeLoc CTL = TL.getAs<ConstantArrayTypeLoc>()) { 13784 const Expr *SizeExpr = dyn_cast<IntegerLiteral>(CTL.getSizeExpr()); 13785 if (!SizeExpr || SizeExpr->getExprLoc().isMacroID()) 13786 return false; 13787 } 13788 break; 13789 } 13790 13791 const RecordDecl *RD = dyn_cast<RecordDecl>(FD->getDeclContext()); 13792 if (!RD) return false; 13793 if (RD->isUnion()) return false; 13794 if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) { 13795 if (!CRD->isStandardLayout()) return false; 13796 } 13797 13798 // See if this is the last field decl in the record. 13799 const Decl *D = FD; 13800 while ((D = D->getNextDeclInContext())) 13801 if (isa<FieldDecl>(D)) 13802 return false; 13803 return true; 13804 } 13805 13806 void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, 13807 const ArraySubscriptExpr *ASE, 13808 bool AllowOnePastEnd, bool IndexNegated) { 13809 // Already diagnosed by the constant evaluator. 13810 if (isConstantEvaluated()) 13811 return; 13812 13813 IndexExpr = IndexExpr->IgnoreParenImpCasts(); 13814 if (IndexExpr->isValueDependent()) 13815 return; 13816 13817 const Type *EffectiveType = 13818 BaseExpr->getType()->getPointeeOrArrayElementType(); 13819 BaseExpr = BaseExpr->IgnoreParenCasts(); 13820 const ConstantArrayType *ArrayTy = 13821 Context.getAsConstantArrayType(BaseExpr->getType()); 13822 13823 if (!ArrayTy) 13824 return; 13825 13826 const Type *BaseType = ArrayTy->getElementType().getTypePtr(); 13827 if (EffectiveType->isDependentType() || BaseType->isDependentType()) 13828 return; 13829 13830 Expr::EvalResult Result; 13831 if (!IndexExpr->EvaluateAsInt(Result, Context, Expr::SE_AllowSideEffects)) 13832 return; 13833 13834 llvm::APSInt index = Result.Val.getInt(); 13835 if (IndexNegated) 13836 index = -index; 13837 13838 const NamedDecl *ND = nullptr; 13839 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 13840 ND = DRE->getDecl(); 13841 if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr)) 13842 ND = ME->getMemberDecl(); 13843 13844 if (index.isUnsigned() || !index.isNegative()) { 13845 // It is possible that the type of the base expression after 13846 // IgnoreParenCasts is incomplete, even though the type of the base 13847 // expression before IgnoreParenCasts is complete (see PR39746 for an 13848 // example). In this case we have no information about whether the array 13849 // access exceeds the array bounds. However we can still diagnose an array 13850 // access which precedes the array bounds. 13851 if (BaseType->isIncompleteType()) 13852 return; 13853 13854 llvm::APInt size = ArrayTy->getSize(); 13855 if (!size.isStrictlyPositive()) 13856 return; 13857 13858 if (BaseType != EffectiveType) { 13859 // Make sure we're comparing apples to apples when comparing index to size 13860 uint64_t ptrarith_typesize = Context.getTypeSize(EffectiveType); 13861 uint64_t array_typesize = Context.getTypeSize(BaseType); 13862 // Handle ptrarith_typesize being zero, such as when casting to void* 13863 if (!ptrarith_typesize) ptrarith_typesize = 1; 13864 if (ptrarith_typesize != array_typesize) { 13865 // There's a cast to a different size type involved 13866 uint64_t ratio = array_typesize / ptrarith_typesize; 13867 // TODO: Be smarter about handling cases where array_typesize is not a 13868 // multiple of ptrarith_typesize 13869 if (ptrarith_typesize * ratio == array_typesize) 13870 size *= llvm::APInt(size.getBitWidth(), ratio); 13871 } 13872 } 13873 13874 if (size.getBitWidth() > index.getBitWidth()) 13875 index = index.zext(size.getBitWidth()); 13876 else if (size.getBitWidth() < index.getBitWidth()) 13877 size = size.zext(index.getBitWidth()); 13878 13879 // For array subscripting the index must be less than size, but for pointer 13880 // arithmetic also allow the index (offset) to be equal to size since 13881 // computing the next address after the end of the array is legal and 13882 // commonly done e.g. in C++ iterators and range-based for loops. 13883 if (AllowOnePastEnd ? index.ule(size) : index.ult(size)) 13884 return; 13885 13886 // Also don't warn for arrays of size 1 which are members of some 13887 // structure. These are often used to approximate flexible arrays in C89 13888 // code. 13889 if (IsTailPaddedMemberArray(*this, size, ND)) 13890 return; 13891 13892 // Suppress the warning if the subscript expression (as identified by the 13893 // ']' location) and the index expression are both from macro expansions 13894 // within a system header. 13895 if (ASE) { 13896 SourceLocation RBracketLoc = SourceMgr.getSpellingLoc( 13897 ASE->getRBracketLoc()); 13898 if (SourceMgr.isInSystemHeader(RBracketLoc)) { 13899 SourceLocation IndexLoc = 13900 SourceMgr.getSpellingLoc(IndexExpr->getBeginLoc()); 13901 if (SourceMgr.isWrittenInSameFile(RBracketLoc, IndexLoc)) 13902 return; 13903 } 13904 } 13905 13906 unsigned DiagID = diag::warn_ptr_arith_exceeds_bounds; 13907 if (ASE) 13908 DiagID = diag::warn_array_index_exceeds_bounds; 13909 13910 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 13911 PDiag(DiagID) << index.toString(10, true) 13912 << size.toString(10, true) 13913 << (unsigned)size.getLimitedValue(~0U) 13914 << IndexExpr->getSourceRange()); 13915 } else { 13916 unsigned DiagID = diag::warn_array_index_precedes_bounds; 13917 if (!ASE) { 13918 DiagID = diag::warn_ptr_arith_precedes_bounds; 13919 if (index.isNegative()) index = -index; 13920 } 13921 13922 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 13923 PDiag(DiagID) << index.toString(10, true) 13924 << IndexExpr->getSourceRange()); 13925 } 13926 13927 if (!ND) { 13928 // Try harder to find a NamedDecl to point at in the note. 13929 while (const ArraySubscriptExpr *ASE = 13930 dyn_cast<ArraySubscriptExpr>(BaseExpr)) 13931 BaseExpr = ASE->getBase()->IgnoreParenCasts(); 13932 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 13933 ND = DRE->getDecl(); 13934 if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr)) 13935 ND = ME->getMemberDecl(); 13936 } 13937 13938 if (ND) 13939 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr, 13940 PDiag(diag::note_array_declared_here) 13941 << ND->getDeclName()); 13942 } 13943 13944 void Sema::CheckArrayAccess(const Expr *expr) { 13945 int AllowOnePastEnd = 0; 13946 while (expr) { 13947 expr = expr->IgnoreParenImpCasts(); 13948 switch (expr->getStmtClass()) { 13949 case Stmt::ArraySubscriptExprClass: { 13950 const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(expr); 13951 CheckArrayAccess(ASE->getBase(), ASE->getIdx(), ASE, 13952 AllowOnePastEnd > 0); 13953 expr = ASE->getBase(); 13954 break; 13955 } 13956 case Stmt::MemberExprClass: { 13957 expr = cast<MemberExpr>(expr)->getBase(); 13958 break; 13959 } 13960 case Stmt::OMPArraySectionExprClass: { 13961 const OMPArraySectionExpr *ASE = cast<OMPArraySectionExpr>(expr); 13962 if (ASE->getLowerBound()) 13963 CheckArrayAccess(ASE->getBase(), ASE->getLowerBound(), 13964 /*ASE=*/nullptr, AllowOnePastEnd > 0); 13965 return; 13966 } 13967 case Stmt::UnaryOperatorClass: { 13968 // Only unwrap the * and & unary operators 13969 const UnaryOperator *UO = cast<UnaryOperator>(expr); 13970 expr = UO->getSubExpr(); 13971 switch (UO->getOpcode()) { 13972 case UO_AddrOf: 13973 AllowOnePastEnd++; 13974 break; 13975 case UO_Deref: 13976 AllowOnePastEnd--; 13977 break; 13978 default: 13979 return; 13980 } 13981 break; 13982 } 13983 case Stmt::ConditionalOperatorClass: { 13984 const ConditionalOperator *cond = cast<ConditionalOperator>(expr); 13985 if (const Expr *lhs = cond->getLHS()) 13986 CheckArrayAccess(lhs); 13987 if (const Expr *rhs = cond->getRHS()) 13988 CheckArrayAccess(rhs); 13989 return; 13990 } 13991 case Stmt::CXXOperatorCallExprClass: { 13992 const auto *OCE = cast<CXXOperatorCallExpr>(expr); 13993 for (const auto *Arg : OCE->arguments()) 13994 CheckArrayAccess(Arg); 13995 return; 13996 } 13997 default: 13998 return; 13999 } 14000 } 14001 } 14002 14003 //===--- CHECK: Objective-C retain cycles ----------------------------------// 14004 14005 namespace { 14006 14007 struct RetainCycleOwner { 14008 VarDecl *Variable = nullptr; 14009 SourceRange Range; 14010 SourceLocation Loc; 14011 bool Indirect = false; 14012 14013 RetainCycleOwner() = default; 14014 14015 void setLocsFrom(Expr *e) { 14016 Loc = e->getExprLoc(); 14017 Range = e->getSourceRange(); 14018 } 14019 }; 14020 14021 } // namespace 14022 14023 /// Consider whether capturing the given variable can possibly lead to 14024 /// a retain cycle. 14025 static bool considerVariable(VarDecl *var, Expr *ref, RetainCycleOwner &owner) { 14026 // In ARC, it's captured strongly iff the variable has __strong 14027 // lifetime. In MRR, it's captured strongly if the variable is 14028 // __block and has an appropriate type. 14029 if (var->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 14030 return false; 14031 14032 owner.Variable = var; 14033 if (ref) 14034 owner.setLocsFrom(ref); 14035 return true; 14036 } 14037 14038 static bool findRetainCycleOwner(Sema &S, Expr *e, RetainCycleOwner &owner) { 14039 while (true) { 14040 e = e->IgnoreParens(); 14041 if (CastExpr *cast = dyn_cast<CastExpr>(e)) { 14042 switch (cast->getCastKind()) { 14043 case CK_BitCast: 14044 case CK_LValueBitCast: 14045 case CK_LValueToRValue: 14046 case CK_ARCReclaimReturnedObject: 14047 e = cast->getSubExpr(); 14048 continue; 14049 14050 default: 14051 return false; 14052 } 14053 } 14054 14055 if (ObjCIvarRefExpr *ref = dyn_cast<ObjCIvarRefExpr>(e)) { 14056 ObjCIvarDecl *ivar = ref->getDecl(); 14057 if (ivar->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 14058 return false; 14059 14060 // Try to find a retain cycle in the base. 14061 if (!findRetainCycleOwner(S, ref->getBase(), owner)) 14062 return false; 14063 14064 if (ref->isFreeIvar()) owner.setLocsFrom(ref); 14065 owner.Indirect = true; 14066 return true; 14067 } 14068 14069 if (DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e)) { 14070 VarDecl *var = dyn_cast<VarDecl>(ref->getDecl()); 14071 if (!var) return false; 14072 return considerVariable(var, ref, owner); 14073 } 14074 14075 if (MemberExpr *member = dyn_cast<MemberExpr>(e)) { 14076 if (member->isArrow()) return false; 14077 14078 // Don't count this as an indirect ownership. 14079 e = member->getBase(); 14080 continue; 14081 } 14082 14083 if (PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) { 14084 // Only pay attention to pseudo-objects on property references. 14085 ObjCPropertyRefExpr *pre 14086 = dyn_cast<ObjCPropertyRefExpr>(pseudo->getSyntacticForm() 14087 ->IgnoreParens()); 14088 if (!pre) return false; 14089 if (pre->isImplicitProperty()) return false; 14090 ObjCPropertyDecl *property = pre->getExplicitProperty(); 14091 if (!property->isRetaining() && 14092 !(property->getPropertyIvarDecl() && 14093 property->getPropertyIvarDecl()->getType() 14094 .getObjCLifetime() == Qualifiers::OCL_Strong)) 14095 return false; 14096 14097 owner.Indirect = true; 14098 if (pre->isSuperReceiver()) { 14099 owner.Variable = S.getCurMethodDecl()->getSelfDecl(); 14100 if (!owner.Variable) 14101 return false; 14102 owner.Loc = pre->getLocation(); 14103 owner.Range = pre->getSourceRange(); 14104 return true; 14105 } 14106 e = const_cast<Expr*>(cast<OpaqueValueExpr>(pre->getBase()) 14107 ->getSourceExpr()); 14108 continue; 14109 } 14110 14111 // Array ivars? 14112 14113 return false; 14114 } 14115 } 14116 14117 namespace { 14118 14119 struct FindCaptureVisitor : EvaluatedExprVisitor<FindCaptureVisitor> { 14120 ASTContext &Context; 14121 VarDecl *Variable; 14122 Expr *Capturer = nullptr; 14123 bool VarWillBeReased = false; 14124 14125 FindCaptureVisitor(ASTContext &Context, VarDecl *variable) 14126 : EvaluatedExprVisitor<FindCaptureVisitor>(Context), 14127 Context(Context), Variable(variable) {} 14128 14129 void VisitDeclRefExpr(DeclRefExpr *ref) { 14130 if (ref->getDecl() == Variable && !Capturer) 14131 Capturer = ref; 14132 } 14133 14134 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *ref) { 14135 if (Capturer) return; 14136 Visit(ref->getBase()); 14137 if (Capturer && ref->isFreeIvar()) 14138 Capturer = ref; 14139 } 14140 14141 void VisitBlockExpr(BlockExpr *block) { 14142 // Look inside nested blocks 14143 if (block->getBlockDecl()->capturesVariable(Variable)) 14144 Visit(block->getBlockDecl()->getBody()); 14145 } 14146 14147 void VisitOpaqueValueExpr(OpaqueValueExpr *OVE) { 14148 if (Capturer) return; 14149 if (OVE->getSourceExpr()) 14150 Visit(OVE->getSourceExpr()); 14151 } 14152 14153 void VisitBinaryOperator(BinaryOperator *BinOp) { 14154 if (!Variable || VarWillBeReased || BinOp->getOpcode() != BO_Assign) 14155 return; 14156 Expr *LHS = BinOp->getLHS(); 14157 if (const DeclRefExpr *DRE = dyn_cast_or_null<DeclRefExpr>(LHS)) { 14158 if (DRE->getDecl() != Variable) 14159 return; 14160 if (Expr *RHS = BinOp->getRHS()) { 14161 RHS = RHS->IgnoreParenCasts(); 14162 llvm::APSInt Value; 14163 VarWillBeReased = 14164 (RHS && RHS->isIntegerConstantExpr(Value, Context) && Value == 0); 14165 } 14166 } 14167 } 14168 }; 14169 14170 } // namespace 14171 14172 /// Check whether the given argument is a block which captures a 14173 /// variable. 14174 static Expr *findCapturingExpr(Sema &S, Expr *e, RetainCycleOwner &owner) { 14175 assert(owner.Variable && owner.Loc.isValid()); 14176 14177 e = e->IgnoreParenCasts(); 14178 14179 // Look through [^{...} copy] and Block_copy(^{...}). 14180 if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(e)) { 14181 Selector Cmd = ME->getSelector(); 14182 if (Cmd.isUnarySelector() && Cmd.getNameForSlot(0) == "copy") { 14183 e = ME->getInstanceReceiver(); 14184 if (!e) 14185 return nullptr; 14186 e = e->IgnoreParenCasts(); 14187 } 14188 } else if (CallExpr *CE = dyn_cast<CallExpr>(e)) { 14189 if (CE->getNumArgs() == 1) { 14190 FunctionDecl *Fn = dyn_cast_or_null<FunctionDecl>(CE->getCalleeDecl()); 14191 if (Fn) { 14192 const IdentifierInfo *FnI = Fn->getIdentifier(); 14193 if (FnI && FnI->isStr("_Block_copy")) { 14194 e = CE->getArg(0)->IgnoreParenCasts(); 14195 } 14196 } 14197 } 14198 } 14199 14200 BlockExpr *block = dyn_cast<BlockExpr>(e); 14201 if (!block || !block->getBlockDecl()->capturesVariable(owner.Variable)) 14202 return nullptr; 14203 14204 FindCaptureVisitor visitor(S.Context, owner.Variable); 14205 visitor.Visit(block->getBlockDecl()->getBody()); 14206 return visitor.VarWillBeReased ? nullptr : visitor.Capturer; 14207 } 14208 14209 static void diagnoseRetainCycle(Sema &S, Expr *capturer, 14210 RetainCycleOwner &owner) { 14211 assert(capturer); 14212 assert(owner.Variable && owner.Loc.isValid()); 14213 14214 S.Diag(capturer->getExprLoc(), diag::warn_arc_retain_cycle) 14215 << owner.Variable << capturer->getSourceRange(); 14216 S.Diag(owner.Loc, diag::note_arc_retain_cycle_owner) 14217 << owner.Indirect << owner.Range; 14218 } 14219 14220 /// Check for a keyword selector that starts with the word 'add' or 14221 /// 'set'. 14222 static bool isSetterLikeSelector(Selector sel) { 14223 if (sel.isUnarySelector()) return false; 14224 14225 StringRef str = sel.getNameForSlot(0); 14226 while (!str.empty() && str.front() == '_') str = str.substr(1); 14227 if (str.startswith("set")) 14228 str = str.substr(3); 14229 else if (str.startswith("add")) { 14230 // Specially allow 'addOperationWithBlock:'. 14231 if (sel.getNumArgs() == 1 && str.startswith("addOperationWithBlock")) 14232 return false; 14233 str = str.substr(3); 14234 } 14235 else 14236 return false; 14237 14238 if (str.empty()) return true; 14239 return !isLowercase(str.front()); 14240 } 14241 14242 static Optional<int> GetNSMutableArrayArgumentIndex(Sema &S, 14243 ObjCMessageExpr *Message) { 14244 bool IsMutableArray = S.NSAPIObj->isSubclassOfNSClass( 14245 Message->getReceiverInterface(), 14246 NSAPI::ClassId_NSMutableArray); 14247 if (!IsMutableArray) { 14248 return None; 14249 } 14250 14251 Selector Sel = Message->getSelector(); 14252 14253 Optional<NSAPI::NSArrayMethodKind> MKOpt = 14254 S.NSAPIObj->getNSArrayMethodKind(Sel); 14255 if (!MKOpt) { 14256 return None; 14257 } 14258 14259 NSAPI::NSArrayMethodKind MK = *MKOpt; 14260 14261 switch (MK) { 14262 case NSAPI::NSMutableArr_addObject: 14263 case NSAPI::NSMutableArr_insertObjectAtIndex: 14264 case NSAPI::NSMutableArr_setObjectAtIndexedSubscript: 14265 return 0; 14266 case NSAPI::NSMutableArr_replaceObjectAtIndex: 14267 return 1; 14268 14269 default: 14270 return None; 14271 } 14272 14273 return None; 14274 } 14275 14276 static 14277 Optional<int> GetNSMutableDictionaryArgumentIndex(Sema &S, 14278 ObjCMessageExpr *Message) { 14279 bool IsMutableDictionary = S.NSAPIObj->isSubclassOfNSClass( 14280 Message->getReceiverInterface(), 14281 NSAPI::ClassId_NSMutableDictionary); 14282 if (!IsMutableDictionary) { 14283 return None; 14284 } 14285 14286 Selector Sel = Message->getSelector(); 14287 14288 Optional<NSAPI::NSDictionaryMethodKind> MKOpt = 14289 S.NSAPIObj->getNSDictionaryMethodKind(Sel); 14290 if (!MKOpt) { 14291 return None; 14292 } 14293 14294 NSAPI::NSDictionaryMethodKind MK = *MKOpt; 14295 14296 switch (MK) { 14297 case NSAPI::NSMutableDict_setObjectForKey: 14298 case NSAPI::NSMutableDict_setValueForKey: 14299 case NSAPI::NSMutableDict_setObjectForKeyedSubscript: 14300 return 0; 14301 14302 default: 14303 return None; 14304 } 14305 14306 return None; 14307 } 14308 14309 static Optional<int> GetNSSetArgumentIndex(Sema &S, ObjCMessageExpr *Message) { 14310 bool IsMutableSet = S.NSAPIObj->isSubclassOfNSClass( 14311 Message->getReceiverInterface(), 14312 NSAPI::ClassId_NSMutableSet); 14313 14314 bool IsMutableOrderedSet = S.NSAPIObj->isSubclassOfNSClass( 14315 Message->getReceiverInterface(), 14316 NSAPI::ClassId_NSMutableOrderedSet); 14317 if (!IsMutableSet && !IsMutableOrderedSet) { 14318 return None; 14319 } 14320 14321 Selector Sel = Message->getSelector(); 14322 14323 Optional<NSAPI::NSSetMethodKind> MKOpt = S.NSAPIObj->getNSSetMethodKind(Sel); 14324 if (!MKOpt) { 14325 return None; 14326 } 14327 14328 NSAPI::NSSetMethodKind MK = *MKOpt; 14329 14330 switch (MK) { 14331 case NSAPI::NSMutableSet_addObject: 14332 case NSAPI::NSOrderedSet_setObjectAtIndex: 14333 case NSAPI::NSOrderedSet_setObjectAtIndexedSubscript: 14334 case NSAPI::NSOrderedSet_insertObjectAtIndex: 14335 return 0; 14336 case NSAPI::NSOrderedSet_replaceObjectAtIndexWithObject: 14337 return 1; 14338 } 14339 14340 return None; 14341 } 14342 14343 void Sema::CheckObjCCircularContainer(ObjCMessageExpr *Message) { 14344 if (!Message->isInstanceMessage()) { 14345 return; 14346 } 14347 14348 Optional<int> ArgOpt; 14349 14350 if (!(ArgOpt = GetNSMutableArrayArgumentIndex(*this, Message)) && 14351 !(ArgOpt = GetNSMutableDictionaryArgumentIndex(*this, Message)) && 14352 !(ArgOpt = GetNSSetArgumentIndex(*this, Message))) { 14353 return; 14354 } 14355 14356 int ArgIndex = *ArgOpt; 14357 14358 Expr *Arg = Message->getArg(ArgIndex)->IgnoreImpCasts(); 14359 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Arg)) { 14360 Arg = OE->getSourceExpr()->IgnoreImpCasts(); 14361 } 14362 14363 if (Message->getReceiverKind() == ObjCMessageExpr::SuperInstance) { 14364 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 14365 if (ArgRE->isObjCSelfExpr()) { 14366 Diag(Message->getSourceRange().getBegin(), 14367 diag::warn_objc_circular_container) 14368 << ArgRE->getDecl() << StringRef("'super'"); 14369 } 14370 } 14371 } else { 14372 Expr *Receiver = Message->getInstanceReceiver()->IgnoreImpCasts(); 14373 14374 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Receiver)) { 14375 Receiver = OE->getSourceExpr()->IgnoreImpCasts(); 14376 } 14377 14378 if (DeclRefExpr *ReceiverRE = dyn_cast<DeclRefExpr>(Receiver)) { 14379 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 14380 if (ReceiverRE->getDecl() == ArgRE->getDecl()) { 14381 ValueDecl *Decl = ReceiverRE->getDecl(); 14382 Diag(Message->getSourceRange().getBegin(), 14383 diag::warn_objc_circular_container) 14384 << Decl << Decl; 14385 if (!ArgRE->isObjCSelfExpr()) { 14386 Diag(Decl->getLocation(), 14387 diag::note_objc_circular_container_declared_here) 14388 << Decl; 14389 } 14390 } 14391 } 14392 } else if (ObjCIvarRefExpr *IvarRE = dyn_cast<ObjCIvarRefExpr>(Receiver)) { 14393 if (ObjCIvarRefExpr *IvarArgRE = dyn_cast<ObjCIvarRefExpr>(Arg)) { 14394 if (IvarRE->getDecl() == IvarArgRE->getDecl()) { 14395 ObjCIvarDecl *Decl = IvarRE->getDecl(); 14396 Diag(Message->getSourceRange().getBegin(), 14397 diag::warn_objc_circular_container) 14398 << Decl << Decl; 14399 Diag(Decl->getLocation(), 14400 diag::note_objc_circular_container_declared_here) 14401 << Decl; 14402 } 14403 } 14404 } 14405 } 14406 } 14407 14408 /// Check a message send to see if it's likely to cause a retain cycle. 14409 void Sema::checkRetainCycles(ObjCMessageExpr *msg) { 14410 // Only check instance methods whose selector looks like a setter. 14411 if (!msg->isInstanceMessage() || !isSetterLikeSelector(msg->getSelector())) 14412 return; 14413 14414 // Try to find a variable that the receiver is strongly owned by. 14415 RetainCycleOwner owner; 14416 if (msg->getReceiverKind() == ObjCMessageExpr::Instance) { 14417 if (!findRetainCycleOwner(*this, msg->getInstanceReceiver(), owner)) 14418 return; 14419 } else { 14420 assert(msg->getReceiverKind() == ObjCMessageExpr::SuperInstance); 14421 owner.Variable = getCurMethodDecl()->getSelfDecl(); 14422 owner.Loc = msg->getSuperLoc(); 14423 owner.Range = msg->getSuperLoc(); 14424 } 14425 14426 // Check whether the receiver is captured by any of the arguments. 14427 const ObjCMethodDecl *MD = msg->getMethodDecl(); 14428 for (unsigned i = 0, e = msg->getNumArgs(); i != e; ++i) { 14429 if (Expr *capturer = findCapturingExpr(*this, msg->getArg(i), owner)) { 14430 // noescape blocks should not be retained by the method. 14431 if (MD && MD->parameters()[i]->hasAttr<NoEscapeAttr>()) 14432 continue; 14433 return diagnoseRetainCycle(*this, capturer, owner); 14434 } 14435 } 14436 } 14437 14438 /// Check a property assign to see if it's likely to cause a retain cycle. 14439 void Sema::checkRetainCycles(Expr *receiver, Expr *argument) { 14440 RetainCycleOwner owner; 14441 if (!findRetainCycleOwner(*this, receiver, owner)) 14442 return; 14443 14444 if (Expr *capturer = findCapturingExpr(*this, argument, owner)) 14445 diagnoseRetainCycle(*this, capturer, owner); 14446 } 14447 14448 void Sema::checkRetainCycles(VarDecl *Var, Expr *Init) { 14449 RetainCycleOwner Owner; 14450 if (!considerVariable(Var, /*DeclRefExpr=*/nullptr, Owner)) 14451 return; 14452 14453 // Because we don't have an expression for the variable, we have to set the 14454 // location explicitly here. 14455 Owner.Loc = Var->getLocation(); 14456 Owner.Range = Var->getSourceRange(); 14457 14458 if (Expr *Capturer = findCapturingExpr(*this, Init, Owner)) 14459 diagnoseRetainCycle(*this, Capturer, Owner); 14460 } 14461 14462 static bool checkUnsafeAssignLiteral(Sema &S, SourceLocation Loc, 14463 Expr *RHS, bool isProperty) { 14464 // Check if RHS is an Objective-C object literal, which also can get 14465 // immediately zapped in a weak reference. Note that we explicitly 14466 // allow ObjCStringLiterals, since those are designed to never really die. 14467 RHS = RHS->IgnoreParenImpCasts(); 14468 14469 // This enum needs to match with the 'select' in 14470 // warn_objc_arc_literal_assign (off-by-1). 14471 Sema::ObjCLiteralKind Kind = S.CheckLiteralKind(RHS); 14472 if (Kind == Sema::LK_String || Kind == Sema::LK_None) 14473 return false; 14474 14475 S.Diag(Loc, diag::warn_arc_literal_assign) 14476 << (unsigned) Kind 14477 << (isProperty ? 0 : 1) 14478 << RHS->getSourceRange(); 14479 14480 return true; 14481 } 14482 14483 static bool checkUnsafeAssignObject(Sema &S, SourceLocation Loc, 14484 Qualifiers::ObjCLifetime LT, 14485 Expr *RHS, bool isProperty) { 14486 // Strip off any implicit cast added to get to the one ARC-specific. 14487 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 14488 if (cast->getCastKind() == CK_ARCConsumeObject) { 14489 S.Diag(Loc, diag::warn_arc_retained_assign) 14490 << (LT == Qualifiers::OCL_ExplicitNone) 14491 << (isProperty ? 0 : 1) 14492 << RHS->getSourceRange(); 14493 return true; 14494 } 14495 RHS = cast->getSubExpr(); 14496 } 14497 14498 if (LT == Qualifiers::OCL_Weak && 14499 checkUnsafeAssignLiteral(S, Loc, RHS, isProperty)) 14500 return true; 14501 14502 return false; 14503 } 14504 14505 bool Sema::checkUnsafeAssigns(SourceLocation Loc, 14506 QualType LHS, Expr *RHS) { 14507 Qualifiers::ObjCLifetime LT = LHS.getObjCLifetime(); 14508 14509 if (LT != Qualifiers::OCL_Weak && LT != Qualifiers::OCL_ExplicitNone) 14510 return false; 14511 14512 if (checkUnsafeAssignObject(*this, Loc, LT, RHS, false)) 14513 return true; 14514 14515 return false; 14516 } 14517 14518 void Sema::checkUnsafeExprAssigns(SourceLocation Loc, 14519 Expr *LHS, Expr *RHS) { 14520 QualType LHSType; 14521 // PropertyRef on LHS type need be directly obtained from 14522 // its declaration as it has a PseudoType. 14523 ObjCPropertyRefExpr *PRE 14524 = dyn_cast<ObjCPropertyRefExpr>(LHS->IgnoreParens()); 14525 if (PRE && !PRE->isImplicitProperty()) { 14526 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 14527 if (PD) 14528 LHSType = PD->getType(); 14529 } 14530 14531 if (LHSType.isNull()) 14532 LHSType = LHS->getType(); 14533 14534 Qualifiers::ObjCLifetime LT = LHSType.getObjCLifetime(); 14535 14536 if (LT == Qualifiers::OCL_Weak) { 14537 if (!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, Loc)) 14538 getCurFunction()->markSafeWeakUse(LHS); 14539 } 14540 14541 if (checkUnsafeAssigns(Loc, LHSType, RHS)) 14542 return; 14543 14544 // FIXME. Check for other life times. 14545 if (LT != Qualifiers::OCL_None) 14546 return; 14547 14548 if (PRE) { 14549 if (PRE->isImplicitProperty()) 14550 return; 14551 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 14552 if (!PD) 14553 return; 14554 14555 unsigned Attributes = PD->getPropertyAttributes(); 14556 if (Attributes & ObjCPropertyAttribute::kind_assign) { 14557 // when 'assign' attribute was not explicitly specified 14558 // by user, ignore it and rely on property type itself 14559 // for lifetime info. 14560 unsigned AsWrittenAttr = PD->getPropertyAttributesAsWritten(); 14561 if (!(AsWrittenAttr & ObjCPropertyAttribute::kind_assign) && 14562 LHSType->isObjCRetainableType()) 14563 return; 14564 14565 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 14566 if (cast->getCastKind() == CK_ARCConsumeObject) { 14567 Diag(Loc, diag::warn_arc_retained_property_assign) 14568 << RHS->getSourceRange(); 14569 return; 14570 } 14571 RHS = cast->getSubExpr(); 14572 } 14573 } else if (Attributes & ObjCPropertyAttribute::kind_weak) { 14574 if (checkUnsafeAssignObject(*this, Loc, Qualifiers::OCL_Weak, RHS, true)) 14575 return; 14576 } 14577 } 14578 } 14579 14580 //===--- CHECK: Empty statement body (-Wempty-body) ---------------------===// 14581 14582 static bool ShouldDiagnoseEmptyStmtBody(const SourceManager &SourceMgr, 14583 SourceLocation StmtLoc, 14584 const NullStmt *Body) { 14585 // Do not warn if the body is a macro that expands to nothing, e.g: 14586 // 14587 // #define CALL(x) 14588 // if (condition) 14589 // CALL(0); 14590 if (Body->hasLeadingEmptyMacro()) 14591 return false; 14592 14593 // Get line numbers of statement and body. 14594 bool StmtLineInvalid; 14595 unsigned StmtLine = SourceMgr.getPresumedLineNumber(StmtLoc, 14596 &StmtLineInvalid); 14597 if (StmtLineInvalid) 14598 return false; 14599 14600 bool BodyLineInvalid; 14601 unsigned BodyLine = SourceMgr.getSpellingLineNumber(Body->getSemiLoc(), 14602 &BodyLineInvalid); 14603 if (BodyLineInvalid) 14604 return false; 14605 14606 // Warn if null statement and body are on the same line. 14607 if (StmtLine != BodyLine) 14608 return false; 14609 14610 return true; 14611 } 14612 14613 void Sema::DiagnoseEmptyStmtBody(SourceLocation StmtLoc, 14614 const Stmt *Body, 14615 unsigned DiagID) { 14616 // Since this is a syntactic check, don't emit diagnostic for template 14617 // instantiations, this just adds noise. 14618 if (CurrentInstantiationScope) 14619 return; 14620 14621 // The body should be a null statement. 14622 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 14623 if (!NBody) 14624 return; 14625 14626 // Do the usual checks. 14627 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 14628 return; 14629 14630 Diag(NBody->getSemiLoc(), DiagID); 14631 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 14632 } 14633 14634 void Sema::DiagnoseEmptyLoopBody(const Stmt *S, 14635 const Stmt *PossibleBody) { 14636 assert(!CurrentInstantiationScope); // Ensured by caller 14637 14638 SourceLocation StmtLoc; 14639 const Stmt *Body; 14640 unsigned DiagID; 14641 if (const ForStmt *FS = dyn_cast<ForStmt>(S)) { 14642 StmtLoc = FS->getRParenLoc(); 14643 Body = FS->getBody(); 14644 DiagID = diag::warn_empty_for_body; 14645 } else if (const WhileStmt *WS = dyn_cast<WhileStmt>(S)) { 14646 StmtLoc = WS->getCond()->getSourceRange().getEnd(); 14647 Body = WS->getBody(); 14648 DiagID = diag::warn_empty_while_body; 14649 } else 14650 return; // Neither `for' nor `while'. 14651 14652 // The body should be a null statement. 14653 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 14654 if (!NBody) 14655 return; 14656 14657 // Skip expensive checks if diagnostic is disabled. 14658 if (Diags.isIgnored(DiagID, NBody->getSemiLoc())) 14659 return; 14660 14661 // Do the usual checks. 14662 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 14663 return; 14664 14665 // `for(...);' and `while(...);' are popular idioms, so in order to keep 14666 // noise level low, emit diagnostics only if for/while is followed by a 14667 // CompoundStmt, e.g.: 14668 // for (int i = 0; i < n; i++); 14669 // { 14670 // a(i); 14671 // } 14672 // or if for/while is followed by a statement with more indentation 14673 // than for/while itself: 14674 // for (int i = 0; i < n; i++); 14675 // a(i); 14676 bool ProbableTypo = isa<CompoundStmt>(PossibleBody); 14677 if (!ProbableTypo) { 14678 bool BodyColInvalid; 14679 unsigned BodyCol = SourceMgr.getPresumedColumnNumber( 14680 PossibleBody->getBeginLoc(), &BodyColInvalid); 14681 if (BodyColInvalid) 14682 return; 14683 14684 bool StmtColInvalid; 14685 unsigned StmtCol = 14686 SourceMgr.getPresumedColumnNumber(S->getBeginLoc(), &StmtColInvalid); 14687 if (StmtColInvalid) 14688 return; 14689 14690 if (BodyCol > StmtCol) 14691 ProbableTypo = true; 14692 } 14693 14694 if (ProbableTypo) { 14695 Diag(NBody->getSemiLoc(), DiagID); 14696 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 14697 } 14698 } 14699 14700 //===--- CHECK: Warn on self move with std::move. -------------------------===// 14701 14702 /// DiagnoseSelfMove - Emits a warning if a value is moved to itself. 14703 void Sema::DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, 14704 SourceLocation OpLoc) { 14705 if (Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, OpLoc)) 14706 return; 14707 14708 if (inTemplateInstantiation()) 14709 return; 14710 14711 // Strip parens and casts away. 14712 LHSExpr = LHSExpr->IgnoreParenImpCasts(); 14713 RHSExpr = RHSExpr->IgnoreParenImpCasts(); 14714 14715 // Check for a call expression 14716 const CallExpr *CE = dyn_cast<CallExpr>(RHSExpr); 14717 if (!CE || CE->getNumArgs() != 1) 14718 return; 14719 14720 // Check for a call to std::move 14721 if (!CE->isCallToStdMove()) 14722 return; 14723 14724 // Get argument from std::move 14725 RHSExpr = CE->getArg(0); 14726 14727 const DeclRefExpr *LHSDeclRef = dyn_cast<DeclRefExpr>(LHSExpr); 14728 const DeclRefExpr *RHSDeclRef = dyn_cast<DeclRefExpr>(RHSExpr); 14729 14730 // Two DeclRefExpr's, check that the decls are the same. 14731 if (LHSDeclRef && RHSDeclRef) { 14732 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 14733 return; 14734 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 14735 RHSDeclRef->getDecl()->getCanonicalDecl()) 14736 return; 14737 14738 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 14739 << LHSExpr->getSourceRange() 14740 << RHSExpr->getSourceRange(); 14741 return; 14742 } 14743 14744 // Member variables require a different approach to check for self moves. 14745 // MemberExpr's are the same if every nested MemberExpr refers to the same 14746 // Decl and that the base Expr's are DeclRefExpr's with the same Decl or 14747 // the base Expr's are CXXThisExpr's. 14748 const Expr *LHSBase = LHSExpr; 14749 const Expr *RHSBase = RHSExpr; 14750 const MemberExpr *LHSME = dyn_cast<MemberExpr>(LHSExpr); 14751 const MemberExpr *RHSME = dyn_cast<MemberExpr>(RHSExpr); 14752 if (!LHSME || !RHSME) 14753 return; 14754 14755 while (LHSME && RHSME) { 14756 if (LHSME->getMemberDecl()->getCanonicalDecl() != 14757 RHSME->getMemberDecl()->getCanonicalDecl()) 14758 return; 14759 14760 LHSBase = LHSME->getBase(); 14761 RHSBase = RHSME->getBase(); 14762 LHSME = dyn_cast<MemberExpr>(LHSBase); 14763 RHSME = dyn_cast<MemberExpr>(RHSBase); 14764 } 14765 14766 LHSDeclRef = dyn_cast<DeclRefExpr>(LHSBase); 14767 RHSDeclRef = dyn_cast<DeclRefExpr>(RHSBase); 14768 if (LHSDeclRef && RHSDeclRef) { 14769 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 14770 return; 14771 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 14772 RHSDeclRef->getDecl()->getCanonicalDecl()) 14773 return; 14774 14775 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 14776 << LHSExpr->getSourceRange() 14777 << RHSExpr->getSourceRange(); 14778 return; 14779 } 14780 14781 if (isa<CXXThisExpr>(LHSBase) && isa<CXXThisExpr>(RHSBase)) 14782 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 14783 << LHSExpr->getSourceRange() 14784 << RHSExpr->getSourceRange(); 14785 } 14786 14787 //===--- Layout compatibility ----------------------------------------------// 14788 14789 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2); 14790 14791 /// Check if two enumeration types are layout-compatible. 14792 static bool isLayoutCompatible(ASTContext &C, EnumDecl *ED1, EnumDecl *ED2) { 14793 // C++11 [dcl.enum] p8: 14794 // Two enumeration types are layout-compatible if they have the same 14795 // underlying type. 14796 return ED1->isComplete() && ED2->isComplete() && 14797 C.hasSameType(ED1->getIntegerType(), ED2->getIntegerType()); 14798 } 14799 14800 /// Check if two fields are layout-compatible. 14801 static bool isLayoutCompatible(ASTContext &C, FieldDecl *Field1, 14802 FieldDecl *Field2) { 14803 if (!isLayoutCompatible(C, Field1->getType(), Field2->getType())) 14804 return false; 14805 14806 if (Field1->isBitField() != Field2->isBitField()) 14807 return false; 14808 14809 if (Field1->isBitField()) { 14810 // Make sure that the bit-fields are the same length. 14811 unsigned Bits1 = Field1->getBitWidthValue(C); 14812 unsigned Bits2 = Field2->getBitWidthValue(C); 14813 14814 if (Bits1 != Bits2) 14815 return false; 14816 } 14817 14818 return true; 14819 } 14820 14821 /// Check if two standard-layout structs are layout-compatible. 14822 /// (C++11 [class.mem] p17) 14823 static bool isLayoutCompatibleStruct(ASTContext &C, RecordDecl *RD1, 14824 RecordDecl *RD2) { 14825 // If both records are C++ classes, check that base classes match. 14826 if (const CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(RD1)) { 14827 // If one of records is a CXXRecordDecl we are in C++ mode, 14828 // thus the other one is a CXXRecordDecl, too. 14829 const CXXRecordDecl *D2CXX = cast<CXXRecordDecl>(RD2); 14830 // Check number of base classes. 14831 if (D1CXX->getNumBases() != D2CXX->getNumBases()) 14832 return false; 14833 14834 // Check the base classes. 14835 for (CXXRecordDecl::base_class_const_iterator 14836 Base1 = D1CXX->bases_begin(), 14837 BaseEnd1 = D1CXX->bases_end(), 14838 Base2 = D2CXX->bases_begin(); 14839 Base1 != BaseEnd1; 14840 ++Base1, ++Base2) { 14841 if (!isLayoutCompatible(C, Base1->getType(), Base2->getType())) 14842 return false; 14843 } 14844 } else if (const CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(RD2)) { 14845 // If only RD2 is a C++ class, it should have zero base classes. 14846 if (D2CXX->getNumBases() > 0) 14847 return false; 14848 } 14849 14850 // Check the fields. 14851 RecordDecl::field_iterator Field2 = RD2->field_begin(), 14852 Field2End = RD2->field_end(), 14853 Field1 = RD1->field_begin(), 14854 Field1End = RD1->field_end(); 14855 for ( ; Field1 != Field1End && Field2 != Field2End; ++Field1, ++Field2) { 14856 if (!isLayoutCompatible(C, *Field1, *Field2)) 14857 return false; 14858 } 14859 if (Field1 != Field1End || Field2 != Field2End) 14860 return false; 14861 14862 return true; 14863 } 14864 14865 /// Check if two standard-layout unions are layout-compatible. 14866 /// (C++11 [class.mem] p18) 14867 static bool isLayoutCompatibleUnion(ASTContext &C, RecordDecl *RD1, 14868 RecordDecl *RD2) { 14869 llvm::SmallPtrSet<FieldDecl *, 8> UnmatchedFields; 14870 for (auto *Field2 : RD2->fields()) 14871 UnmatchedFields.insert(Field2); 14872 14873 for (auto *Field1 : RD1->fields()) { 14874 llvm::SmallPtrSet<FieldDecl *, 8>::iterator 14875 I = UnmatchedFields.begin(), 14876 E = UnmatchedFields.end(); 14877 14878 for ( ; I != E; ++I) { 14879 if (isLayoutCompatible(C, Field1, *I)) { 14880 bool Result = UnmatchedFields.erase(*I); 14881 (void) Result; 14882 assert(Result); 14883 break; 14884 } 14885 } 14886 if (I == E) 14887 return false; 14888 } 14889 14890 return UnmatchedFields.empty(); 14891 } 14892 14893 static bool isLayoutCompatible(ASTContext &C, RecordDecl *RD1, 14894 RecordDecl *RD2) { 14895 if (RD1->isUnion() != RD2->isUnion()) 14896 return false; 14897 14898 if (RD1->isUnion()) 14899 return isLayoutCompatibleUnion(C, RD1, RD2); 14900 else 14901 return isLayoutCompatibleStruct(C, RD1, RD2); 14902 } 14903 14904 /// Check if two types are layout-compatible in C++11 sense. 14905 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2) { 14906 if (T1.isNull() || T2.isNull()) 14907 return false; 14908 14909 // C++11 [basic.types] p11: 14910 // If two types T1 and T2 are the same type, then T1 and T2 are 14911 // layout-compatible types. 14912 if (C.hasSameType(T1, T2)) 14913 return true; 14914 14915 T1 = T1.getCanonicalType().getUnqualifiedType(); 14916 T2 = T2.getCanonicalType().getUnqualifiedType(); 14917 14918 const Type::TypeClass TC1 = T1->getTypeClass(); 14919 const Type::TypeClass TC2 = T2->getTypeClass(); 14920 14921 if (TC1 != TC2) 14922 return false; 14923 14924 if (TC1 == Type::Enum) { 14925 return isLayoutCompatible(C, 14926 cast<EnumType>(T1)->getDecl(), 14927 cast<EnumType>(T2)->getDecl()); 14928 } else if (TC1 == Type::Record) { 14929 if (!T1->isStandardLayoutType() || !T2->isStandardLayoutType()) 14930 return false; 14931 14932 return isLayoutCompatible(C, 14933 cast<RecordType>(T1)->getDecl(), 14934 cast<RecordType>(T2)->getDecl()); 14935 } 14936 14937 return false; 14938 } 14939 14940 //===--- CHECK: pointer_with_type_tag attribute: datatypes should match ----// 14941 14942 /// Given a type tag expression find the type tag itself. 14943 /// 14944 /// \param TypeExpr Type tag expression, as it appears in user's code. 14945 /// 14946 /// \param VD Declaration of an identifier that appears in a type tag. 14947 /// 14948 /// \param MagicValue Type tag magic value. 14949 /// 14950 /// \param isConstantEvaluated wether the evalaution should be performed in 14951 14952 /// constant context. 14953 static bool FindTypeTagExpr(const Expr *TypeExpr, const ASTContext &Ctx, 14954 const ValueDecl **VD, uint64_t *MagicValue, 14955 bool isConstantEvaluated) { 14956 while(true) { 14957 if (!TypeExpr) 14958 return false; 14959 14960 TypeExpr = TypeExpr->IgnoreParenImpCasts()->IgnoreParenCasts(); 14961 14962 switch (TypeExpr->getStmtClass()) { 14963 case Stmt::UnaryOperatorClass: { 14964 const UnaryOperator *UO = cast<UnaryOperator>(TypeExpr); 14965 if (UO->getOpcode() == UO_AddrOf || UO->getOpcode() == UO_Deref) { 14966 TypeExpr = UO->getSubExpr(); 14967 continue; 14968 } 14969 return false; 14970 } 14971 14972 case Stmt::DeclRefExprClass: { 14973 const DeclRefExpr *DRE = cast<DeclRefExpr>(TypeExpr); 14974 *VD = DRE->getDecl(); 14975 return true; 14976 } 14977 14978 case Stmt::IntegerLiteralClass: { 14979 const IntegerLiteral *IL = cast<IntegerLiteral>(TypeExpr); 14980 llvm::APInt MagicValueAPInt = IL->getValue(); 14981 if (MagicValueAPInt.getActiveBits() <= 64) { 14982 *MagicValue = MagicValueAPInt.getZExtValue(); 14983 return true; 14984 } else 14985 return false; 14986 } 14987 14988 case Stmt::BinaryConditionalOperatorClass: 14989 case Stmt::ConditionalOperatorClass: { 14990 const AbstractConditionalOperator *ACO = 14991 cast<AbstractConditionalOperator>(TypeExpr); 14992 bool Result; 14993 if (ACO->getCond()->EvaluateAsBooleanCondition(Result, Ctx, 14994 isConstantEvaluated)) { 14995 if (Result) 14996 TypeExpr = ACO->getTrueExpr(); 14997 else 14998 TypeExpr = ACO->getFalseExpr(); 14999 continue; 15000 } 15001 return false; 15002 } 15003 15004 case Stmt::BinaryOperatorClass: { 15005 const BinaryOperator *BO = cast<BinaryOperator>(TypeExpr); 15006 if (BO->getOpcode() == BO_Comma) { 15007 TypeExpr = BO->getRHS(); 15008 continue; 15009 } 15010 return false; 15011 } 15012 15013 default: 15014 return false; 15015 } 15016 } 15017 } 15018 15019 /// Retrieve the C type corresponding to type tag TypeExpr. 15020 /// 15021 /// \param TypeExpr Expression that specifies a type tag. 15022 /// 15023 /// \param MagicValues Registered magic values. 15024 /// 15025 /// \param FoundWrongKind Set to true if a type tag was found, but of a wrong 15026 /// kind. 15027 /// 15028 /// \param TypeInfo Information about the corresponding C type. 15029 /// 15030 /// \param isConstantEvaluated wether the evalaution should be performed in 15031 /// constant context. 15032 /// 15033 /// \returns true if the corresponding C type was found. 15034 static bool GetMatchingCType( 15035 const IdentifierInfo *ArgumentKind, const Expr *TypeExpr, 15036 const ASTContext &Ctx, 15037 const llvm::DenseMap<Sema::TypeTagMagicValue, Sema::TypeTagData> 15038 *MagicValues, 15039 bool &FoundWrongKind, Sema::TypeTagData &TypeInfo, 15040 bool isConstantEvaluated) { 15041 FoundWrongKind = false; 15042 15043 // Variable declaration that has type_tag_for_datatype attribute. 15044 const ValueDecl *VD = nullptr; 15045 15046 uint64_t MagicValue; 15047 15048 if (!FindTypeTagExpr(TypeExpr, Ctx, &VD, &MagicValue, isConstantEvaluated)) 15049 return false; 15050 15051 if (VD) { 15052 if (TypeTagForDatatypeAttr *I = VD->getAttr<TypeTagForDatatypeAttr>()) { 15053 if (I->getArgumentKind() != ArgumentKind) { 15054 FoundWrongKind = true; 15055 return false; 15056 } 15057 TypeInfo.Type = I->getMatchingCType(); 15058 TypeInfo.LayoutCompatible = I->getLayoutCompatible(); 15059 TypeInfo.MustBeNull = I->getMustBeNull(); 15060 return true; 15061 } 15062 return false; 15063 } 15064 15065 if (!MagicValues) 15066 return false; 15067 15068 llvm::DenseMap<Sema::TypeTagMagicValue, 15069 Sema::TypeTagData>::const_iterator I = 15070 MagicValues->find(std::make_pair(ArgumentKind, MagicValue)); 15071 if (I == MagicValues->end()) 15072 return false; 15073 15074 TypeInfo = I->second; 15075 return true; 15076 } 15077 15078 void Sema::RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, 15079 uint64_t MagicValue, QualType Type, 15080 bool LayoutCompatible, 15081 bool MustBeNull) { 15082 if (!TypeTagForDatatypeMagicValues) 15083 TypeTagForDatatypeMagicValues.reset( 15084 new llvm::DenseMap<TypeTagMagicValue, TypeTagData>); 15085 15086 TypeTagMagicValue Magic(ArgumentKind, MagicValue); 15087 (*TypeTagForDatatypeMagicValues)[Magic] = 15088 TypeTagData(Type, LayoutCompatible, MustBeNull); 15089 } 15090 15091 static bool IsSameCharType(QualType T1, QualType T2) { 15092 const BuiltinType *BT1 = T1->getAs<BuiltinType>(); 15093 if (!BT1) 15094 return false; 15095 15096 const BuiltinType *BT2 = T2->getAs<BuiltinType>(); 15097 if (!BT2) 15098 return false; 15099 15100 BuiltinType::Kind T1Kind = BT1->getKind(); 15101 BuiltinType::Kind T2Kind = BT2->getKind(); 15102 15103 return (T1Kind == BuiltinType::SChar && T2Kind == BuiltinType::Char_S) || 15104 (T1Kind == BuiltinType::UChar && T2Kind == BuiltinType::Char_U) || 15105 (T1Kind == BuiltinType::Char_U && T2Kind == BuiltinType::UChar) || 15106 (T1Kind == BuiltinType::Char_S && T2Kind == BuiltinType::SChar); 15107 } 15108 15109 void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, 15110 const ArrayRef<const Expr *> ExprArgs, 15111 SourceLocation CallSiteLoc) { 15112 const IdentifierInfo *ArgumentKind = Attr->getArgumentKind(); 15113 bool IsPointerAttr = Attr->getIsPointer(); 15114 15115 // Retrieve the argument representing the 'type_tag'. 15116 unsigned TypeTagIdxAST = Attr->getTypeTagIdx().getASTIndex(); 15117 if (TypeTagIdxAST >= ExprArgs.size()) { 15118 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 15119 << 0 << Attr->getTypeTagIdx().getSourceIndex(); 15120 return; 15121 } 15122 const Expr *TypeTagExpr = ExprArgs[TypeTagIdxAST]; 15123 bool FoundWrongKind; 15124 TypeTagData TypeInfo; 15125 if (!GetMatchingCType(ArgumentKind, TypeTagExpr, Context, 15126 TypeTagForDatatypeMagicValues.get(), FoundWrongKind, 15127 TypeInfo, isConstantEvaluated())) { 15128 if (FoundWrongKind) 15129 Diag(TypeTagExpr->getExprLoc(), 15130 diag::warn_type_tag_for_datatype_wrong_kind) 15131 << TypeTagExpr->getSourceRange(); 15132 return; 15133 } 15134 15135 // Retrieve the argument representing the 'arg_idx'. 15136 unsigned ArgumentIdxAST = Attr->getArgumentIdx().getASTIndex(); 15137 if (ArgumentIdxAST >= ExprArgs.size()) { 15138 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 15139 << 1 << Attr->getArgumentIdx().getSourceIndex(); 15140 return; 15141 } 15142 const Expr *ArgumentExpr = ExprArgs[ArgumentIdxAST]; 15143 if (IsPointerAttr) { 15144 // Skip implicit cast of pointer to `void *' (as a function argument). 15145 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgumentExpr)) 15146 if (ICE->getType()->isVoidPointerType() && 15147 ICE->getCastKind() == CK_BitCast) 15148 ArgumentExpr = ICE->getSubExpr(); 15149 } 15150 QualType ArgumentType = ArgumentExpr->getType(); 15151 15152 // Passing a `void*' pointer shouldn't trigger a warning. 15153 if (IsPointerAttr && ArgumentType->isVoidPointerType()) 15154 return; 15155 15156 if (TypeInfo.MustBeNull) { 15157 // Type tag with matching void type requires a null pointer. 15158 if (!ArgumentExpr->isNullPointerConstant(Context, 15159 Expr::NPC_ValueDependentIsNotNull)) { 15160 Diag(ArgumentExpr->getExprLoc(), 15161 diag::warn_type_safety_null_pointer_required) 15162 << ArgumentKind->getName() 15163 << ArgumentExpr->getSourceRange() 15164 << TypeTagExpr->getSourceRange(); 15165 } 15166 return; 15167 } 15168 15169 QualType RequiredType = TypeInfo.Type; 15170 if (IsPointerAttr) 15171 RequiredType = Context.getPointerType(RequiredType); 15172 15173 bool mismatch = false; 15174 if (!TypeInfo.LayoutCompatible) { 15175 mismatch = !Context.hasSameType(ArgumentType, RequiredType); 15176 15177 // C++11 [basic.fundamental] p1: 15178 // Plain char, signed char, and unsigned char are three distinct types. 15179 // 15180 // But we treat plain `char' as equivalent to `signed char' or `unsigned 15181 // char' depending on the current char signedness mode. 15182 if (mismatch) 15183 if ((IsPointerAttr && IsSameCharType(ArgumentType->getPointeeType(), 15184 RequiredType->getPointeeType())) || 15185 (!IsPointerAttr && IsSameCharType(ArgumentType, RequiredType))) 15186 mismatch = false; 15187 } else 15188 if (IsPointerAttr) 15189 mismatch = !isLayoutCompatible(Context, 15190 ArgumentType->getPointeeType(), 15191 RequiredType->getPointeeType()); 15192 else 15193 mismatch = !isLayoutCompatible(Context, ArgumentType, RequiredType); 15194 15195 if (mismatch) 15196 Diag(ArgumentExpr->getExprLoc(), diag::warn_type_safety_type_mismatch) 15197 << ArgumentType << ArgumentKind 15198 << TypeInfo.LayoutCompatible << RequiredType 15199 << ArgumentExpr->getSourceRange() 15200 << TypeTagExpr->getSourceRange(); 15201 } 15202 15203 void Sema::AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, 15204 CharUnits Alignment) { 15205 MisalignedMembers.emplace_back(E, RD, MD, Alignment); 15206 } 15207 15208 void Sema::DiagnoseMisalignedMembers() { 15209 for (MisalignedMember &m : MisalignedMembers) { 15210 const NamedDecl *ND = m.RD; 15211 if (ND->getName().empty()) { 15212 if (const TypedefNameDecl *TD = m.RD->getTypedefNameForAnonDecl()) 15213 ND = TD; 15214 } 15215 Diag(m.E->getBeginLoc(), diag::warn_taking_address_of_packed_member) 15216 << m.MD << ND << m.E->getSourceRange(); 15217 } 15218 MisalignedMembers.clear(); 15219 } 15220 15221 void Sema::DiscardMisalignedMemberAddress(const Type *T, Expr *E) { 15222 E = E->IgnoreParens(); 15223 if (!T->isPointerType() && !T->isIntegerType()) 15224 return; 15225 if (isa<UnaryOperator>(E) && 15226 cast<UnaryOperator>(E)->getOpcode() == UO_AddrOf) { 15227 auto *Op = cast<UnaryOperator>(E)->getSubExpr()->IgnoreParens(); 15228 if (isa<MemberExpr>(Op)) { 15229 auto MA = llvm::find(MisalignedMembers, MisalignedMember(Op)); 15230 if (MA != MisalignedMembers.end() && 15231 (T->isIntegerType() || 15232 (T->isPointerType() && (T->getPointeeType()->isIncompleteType() || 15233 Context.getTypeAlignInChars( 15234 T->getPointeeType()) <= MA->Alignment)))) 15235 MisalignedMembers.erase(MA); 15236 } 15237 } 15238 } 15239 15240 void Sema::RefersToMemberWithReducedAlignment( 15241 Expr *E, 15242 llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> 15243 Action) { 15244 const auto *ME = dyn_cast<MemberExpr>(E); 15245 if (!ME) 15246 return; 15247 15248 // No need to check expressions with an __unaligned-qualified type. 15249 if (E->getType().getQualifiers().hasUnaligned()) 15250 return; 15251 15252 // For a chain of MemberExpr like "a.b.c.d" this list 15253 // will keep FieldDecl's like [d, c, b]. 15254 SmallVector<FieldDecl *, 4> ReverseMemberChain; 15255 const MemberExpr *TopME = nullptr; 15256 bool AnyIsPacked = false; 15257 do { 15258 QualType BaseType = ME->getBase()->getType(); 15259 if (BaseType->isDependentType()) 15260 return; 15261 if (ME->isArrow()) 15262 BaseType = BaseType->getPointeeType(); 15263 RecordDecl *RD = BaseType->castAs<RecordType>()->getDecl(); 15264 if (RD->isInvalidDecl()) 15265 return; 15266 15267 ValueDecl *MD = ME->getMemberDecl(); 15268 auto *FD = dyn_cast<FieldDecl>(MD); 15269 // We do not care about non-data members. 15270 if (!FD || FD->isInvalidDecl()) 15271 return; 15272 15273 AnyIsPacked = 15274 AnyIsPacked || (RD->hasAttr<PackedAttr>() || MD->hasAttr<PackedAttr>()); 15275 ReverseMemberChain.push_back(FD); 15276 15277 TopME = ME; 15278 ME = dyn_cast<MemberExpr>(ME->getBase()->IgnoreParens()); 15279 } while (ME); 15280 assert(TopME && "We did not compute a topmost MemberExpr!"); 15281 15282 // Not the scope of this diagnostic. 15283 if (!AnyIsPacked) 15284 return; 15285 15286 const Expr *TopBase = TopME->getBase()->IgnoreParenImpCasts(); 15287 const auto *DRE = dyn_cast<DeclRefExpr>(TopBase); 15288 // TODO: The innermost base of the member expression may be too complicated. 15289 // For now, just disregard these cases. This is left for future 15290 // improvement. 15291 if (!DRE && !isa<CXXThisExpr>(TopBase)) 15292 return; 15293 15294 // Alignment expected by the whole expression. 15295 CharUnits ExpectedAlignment = Context.getTypeAlignInChars(E->getType()); 15296 15297 // No need to do anything else with this case. 15298 if (ExpectedAlignment.isOne()) 15299 return; 15300 15301 // Synthesize offset of the whole access. 15302 CharUnits Offset; 15303 for (auto I = ReverseMemberChain.rbegin(); I != ReverseMemberChain.rend(); 15304 I++) { 15305 Offset += Context.toCharUnitsFromBits(Context.getFieldOffset(*I)); 15306 } 15307 15308 // Compute the CompleteObjectAlignment as the alignment of the whole chain. 15309 CharUnits CompleteObjectAlignment = Context.getTypeAlignInChars( 15310 ReverseMemberChain.back()->getParent()->getTypeForDecl()); 15311 15312 // The base expression of the innermost MemberExpr may give 15313 // stronger guarantees than the class containing the member. 15314 if (DRE && !TopME->isArrow()) { 15315 const ValueDecl *VD = DRE->getDecl(); 15316 if (!VD->getType()->isReferenceType()) 15317 CompleteObjectAlignment = 15318 std::max(CompleteObjectAlignment, Context.getDeclAlign(VD)); 15319 } 15320 15321 // Check if the synthesized offset fulfills the alignment. 15322 if (Offset % ExpectedAlignment != 0 || 15323 // It may fulfill the offset it but the effective alignment may still be 15324 // lower than the expected expression alignment. 15325 CompleteObjectAlignment < ExpectedAlignment) { 15326 // If this happens, we want to determine a sensible culprit of this. 15327 // Intuitively, watching the chain of member expressions from right to 15328 // left, we start with the required alignment (as required by the field 15329 // type) but some packed attribute in that chain has reduced the alignment. 15330 // It may happen that another packed structure increases it again. But if 15331 // we are here such increase has not been enough. So pointing the first 15332 // FieldDecl that either is packed or else its RecordDecl is, 15333 // seems reasonable. 15334 FieldDecl *FD = nullptr; 15335 CharUnits Alignment; 15336 for (FieldDecl *FDI : ReverseMemberChain) { 15337 if (FDI->hasAttr<PackedAttr>() || 15338 FDI->getParent()->hasAttr<PackedAttr>()) { 15339 FD = FDI; 15340 Alignment = std::min( 15341 Context.getTypeAlignInChars(FD->getType()), 15342 Context.getTypeAlignInChars(FD->getParent()->getTypeForDecl())); 15343 break; 15344 } 15345 } 15346 assert(FD && "We did not find a packed FieldDecl!"); 15347 Action(E, FD->getParent(), FD, Alignment); 15348 } 15349 } 15350 15351 void Sema::CheckAddressOfPackedMember(Expr *rhs) { 15352 using namespace std::placeholders; 15353 15354 RefersToMemberWithReducedAlignment( 15355 rhs, std::bind(&Sema::AddPotentialMisalignedMembers, std::ref(*this), _1, 15356 _2, _3, _4)); 15357 } 15358 15359 ExprResult Sema::SemaBuiltinMatrixTranspose(CallExpr *TheCall, 15360 ExprResult CallResult) { 15361 if (checkArgCount(*this, TheCall, 1)) 15362 return ExprError(); 15363 15364 ExprResult MatrixArg = DefaultLvalueConversion(TheCall->getArg(0)); 15365 if (MatrixArg.isInvalid()) 15366 return MatrixArg; 15367 Expr *Matrix = MatrixArg.get(); 15368 15369 auto *MType = Matrix->getType()->getAs<ConstantMatrixType>(); 15370 if (!MType) { 15371 Diag(Matrix->getBeginLoc(), diag::err_builtin_matrix_arg); 15372 return ExprError(); 15373 } 15374 15375 // Create returned matrix type by swapping rows and columns of the argument 15376 // matrix type. 15377 QualType ResultType = Context.getConstantMatrixType( 15378 MType->getElementType(), MType->getNumColumns(), MType->getNumRows()); 15379 15380 // Change the return type to the type of the returned matrix. 15381 TheCall->setType(ResultType); 15382 15383 // Update call argument to use the possibly converted matrix argument. 15384 TheCall->setArg(0, Matrix); 15385 return CallResult; 15386 } 15387 15388 // Get and verify the matrix dimensions. 15389 static llvm::Optional<unsigned> 15390 getAndVerifyMatrixDimension(Expr *Expr, StringRef Name, Sema &S) { 15391 llvm::APSInt Value(64); 15392 SourceLocation ErrorPos; 15393 if (!Expr->isIntegerConstantExpr(Value, S.Context, &ErrorPos)) { 15394 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_scalar_unsigned_arg) 15395 << Name; 15396 return {}; 15397 } 15398 uint64_t Dim = Value.getZExtValue(); 15399 if (!ConstantMatrixType::isDimensionValid(Dim)) { 15400 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_invalid_dimension) 15401 << Name << ConstantMatrixType::getMaxElementsPerDimension(); 15402 return {}; 15403 } 15404 return Dim; 15405 } 15406 15407 ExprResult Sema::SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall, 15408 ExprResult CallResult) { 15409 if (!getLangOpts().MatrixTypes) { 15410 Diag(TheCall->getBeginLoc(), diag::err_builtin_matrix_disabled); 15411 return ExprError(); 15412 } 15413 15414 if (checkArgCount(*this, TheCall, 4)) 15415 return ExprError(); 15416 15417 unsigned PtrArgIdx = 0; 15418 Expr *PtrExpr = TheCall->getArg(PtrArgIdx); 15419 Expr *RowsExpr = TheCall->getArg(1); 15420 Expr *ColumnsExpr = TheCall->getArg(2); 15421 Expr *StrideExpr = TheCall->getArg(3); 15422 15423 bool ArgError = false; 15424 15425 // Check pointer argument. 15426 { 15427 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr); 15428 if (PtrConv.isInvalid()) 15429 return PtrConv; 15430 PtrExpr = PtrConv.get(); 15431 TheCall->setArg(0, PtrExpr); 15432 if (PtrExpr->isTypeDependent()) { 15433 TheCall->setType(Context.DependentTy); 15434 return TheCall; 15435 } 15436 } 15437 15438 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>(); 15439 QualType ElementTy; 15440 if (!PtrTy) { 15441 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_pointer_arg) 15442 << PtrArgIdx + 1; 15443 ArgError = true; 15444 } else { 15445 ElementTy = PtrTy->getPointeeType().getUnqualifiedType(); 15446 15447 if (!ConstantMatrixType::isValidElementType(ElementTy)) { 15448 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_pointer_arg) 15449 << PtrArgIdx + 1; 15450 ArgError = true; 15451 } 15452 } 15453 15454 // Apply default Lvalue conversions and convert the expression to size_t. 15455 auto ApplyArgumentConversions = [this](Expr *E) { 15456 ExprResult Conv = DefaultLvalueConversion(E); 15457 if (Conv.isInvalid()) 15458 return Conv; 15459 15460 return tryConvertExprToType(Conv.get(), Context.getSizeType()); 15461 }; 15462 15463 // Apply conversion to row and column expressions. 15464 ExprResult RowsConv = ApplyArgumentConversions(RowsExpr); 15465 if (!RowsConv.isInvalid()) { 15466 RowsExpr = RowsConv.get(); 15467 TheCall->setArg(1, RowsExpr); 15468 } else 15469 RowsExpr = nullptr; 15470 15471 ExprResult ColumnsConv = ApplyArgumentConversions(ColumnsExpr); 15472 if (!ColumnsConv.isInvalid()) { 15473 ColumnsExpr = ColumnsConv.get(); 15474 TheCall->setArg(2, ColumnsExpr); 15475 } else 15476 ColumnsExpr = nullptr; 15477 15478 // If any any part of the result matrix type is still pending, just use 15479 // Context.DependentTy, until all parts are resolved. 15480 if ((RowsExpr && RowsExpr->isTypeDependent()) || 15481 (ColumnsExpr && ColumnsExpr->isTypeDependent())) { 15482 TheCall->setType(Context.DependentTy); 15483 return CallResult; 15484 } 15485 15486 // Check row and column dimenions. 15487 llvm::Optional<unsigned> MaybeRows; 15488 if (RowsExpr) 15489 MaybeRows = getAndVerifyMatrixDimension(RowsExpr, "row", *this); 15490 15491 llvm::Optional<unsigned> MaybeColumns; 15492 if (ColumnsExpr) 15493 MaybeColumns = getAndVerifyMatrixDimension(ColumnsExpr, "column", *this); 15494 15495 // Check stride argument. 15496 ExprResult StrideConv = ApplyArgumentConversions(StrideExpr); 15497 if (StrideConv.isInvalid()) 15498 return ExprError(); 15499 StrideExpr = StrideConv.get(); 15500 TheCall->setArg(3, StrideExpr); 15501 15502 llvm::APSInt Value(64); 15503 if (MaybeRows && StrideExpr->isIntegerConstantExpr(Value, Context)) { 15504 uint64_t Stride = Value.getZExtValue(); 15505 if (Stride < *MaybeRows) { 15506 Diag(StrideExpr->getBeginLoc(), 15507 diag::err_builtin_matrix_stride_too_small); 15508 ArgError = true; 15509 } 15510 } 15511 15512 if (ArgError || !MaybeRows || !MaybeColumns) 15513 return ExprError(); 15514 15515 TheCall->setType( 15516 Context.getConstantMatrixType(ElementTy, *MaybeRows, *MaybeColumns)); 15517 return CallResult; 15518 } 15519 15520 ExprResult Sema::SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall, 15521 ExprResult CallResult) { 15522 if (checkArgCount(*this, TheCall, 3)) 15523 return ExprError(); 15524 15525 unsigned PtrArgIdx = 1; 15526 Expr *MatrixExpr = TheCall->getArg(0); 15527 Expr *PtrExpr = TheCall->getArg(PtrArgIdx); 15528 Expr *StrideExpr = TheCall->getArg(2); 15529 15530 bool ArgError = false; 15531 15532 { 15533 ExprResult MatrixConv = DefaultLvalueConversion(MatrixExpr); 15534 if (MatrixConv.isInvalid()) 15535 return MatrixConv; 15536 MatrixExpr = MatrixConv.get(); 15537 TheCall->setArg(0, MatrixExpr); 15538 } 15539 if (MatrixExpr->isTypeDependent()) { 15540 TheCall->setType(Context.DependentTy); 15541 return TheCall; 15542 } 15543 15544 auto *MatrixTy = MatrixExpr->getType()->getAs<ConstantMatrixType>(); 15545 if (!MatrixTy) { 15546 Diag(MatrixExpr->getBeginLoc(), diag::err_builtin_matrix_arg) << 0; 15547 ArgError = true; 15548 } 15549 15550 { 15551 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr); 15552 if (PtrConv.isInvalid()) 15553 return PtrConv; 15554 PtrExpr = PtrConv.get(); 15555 TheCall->setArg(1, PtrExpr); 15556 if (PtrExpr->isTypeDependent()) { 15557 TheCall->setType(Context.DependentTy); 15558 return TheCall; 15559 } 15560 } 15561 15562 // Check pointer argument. 15563 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>(); 15564 if (!PtrTy) { 15565 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_pointer_arg) 15566 << PtrArgIdx + 1; 15567 ArgError = true; 15568 } else { 15569 QualType ElementTy = PtrTy->getPointeeType(); 15570 if (ElementTy.isConstQualified()) { 15571 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_store_to_const); 15572 ArgError = true; 15573 } 15574 ElementTy = ElementTy.getUnqualifiedType().getCanonicalType(); 15575 if (MatrixTy && 15576 !Context.hasSameType(ElementTy, MatrixTy->getElementType())) { 15577 Diag(PtrExpr->getBeginLoc(), 15578 diag::err_builtin_matrix_pointer_arg_mismatch) 15579 << ElementTy << MatrixTy->getElementType(); 15580 ArgError = true; 15581 } 15582 } 15583 15584 // Apply default Lvalue conversions and convert the stride expression to 15585 // size_t. 15586 { 15587 ExprResult StrideConv = DefaultLvalueConversion(StrideExpr); 15588 if (StrideConv.isInvalid()) 15589 return StrideConv; 15590 15591 StrideConv = tryConvertExprToType(StrideConv.get(), Context.getSizeType()); 15592 if (StrideConv.isInvalid()) 15593 return StrideConv; 15594 StrideExpr = StrideConv.get(); 15595 TheCall->setArg(2, StrideExpr); 15596 } 15597 15598 // Check stride argument. 15599 llvm::APSInt Value(64); 15600 if (MatrixTy && StrideExpr->isIntegerConstantExpr(Value, Context)) { 15601 uint64_t Stride = Value.getZExtValue(); 15602 if (Stride < MatrixTy->getNumRows()) { 15603 Diag(StrideExpr->getBeginLoc(), 15604 diag::err_builtin_matrix_stride_too_small); 15605 ArgError = true; 15606 } 15607 } 15608 15609 if (ArgError) 15610 return ExprError(); 15611 15612 return CallResult; 15613 } 15614