1 //===- SemaChecking.cpp - Extra Semantic Checking -------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements extra semantic analysis beyond what is enforced 10 // by the C type system. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "clang/AST/APValue.h" 15 #include "clang/AST/ASTContext.h" 16 #include "clang/AST/Attr.h" 17 #include "clang/AST/AttrIterator.h" 18 #include "clang/AST/CharUnits.h" 19 #include "clang/AST/Decl.h" 20 #include "clang/AST/DeclBase.h" 21 #include "clang/AST/DeclCXX.h" 22 #include "clang/AST/DeclObjC.h" 23 #include "clang/AST/DeclarationName.h" 24 #include "clang/AST/EvaluatedExprVisitor.h" 25 #include "clang/AST/Expr.h" 26 #include "clang/AST/ExprCXX.h" 27 #include "clang/AST/ExprObjC.h" 28 #include "clang/AST/ExprOpenMP.h" 29 #include "clang/AST/FormatString.h" 30 #include "clang/AST/NSAPI.h" 31 #include "clang/AST/NonTrivialTypeVisitor.h" 32 #include "clang/AST/OperationKinds.h" 33 #include "clang/AST/RecordLayout.h" 34 #include "clang/AST/Stmt.h" 35 #include "clang/AST/TemplateBase.h" 36 #include "clang/AST/Type.h" 37 #include "clang/AST/TypeLoc.h" 38 #include "clang/AST/UnresolvedSet.h" 39 #include "clang/Basic/AddressSpaces.h" 40 #include "clang/Basic/CharInfo.h" 41 #include "clang/Basic/Diagnostic.h" 42 #include "clang/Basic/IdentifierTable.h" 43 #include "clang/Basic/LLVM.h" 44 #include "clang/Basic/LangOptions.h" 45 #include "clang/Basic/OpenCLOptions.h" 46 #include "clang/Basic/OperatorKinds.h" 47 #include "clang/Basic/PartialDiagnostic.h" 48 #include "clang/Basic/SourceLocation.h" 49 #include "clang/Basic/SourceManager.h" 50 #include "clang/Basic/Specifiers.h" 51 #include "clang/Basic/SyncScope.h" 52 #include "clang/Basic/TargetBuiltins.h" 53 #include "clang/Basic/TargetCXXABI.h" 54 #include "clang/Basic/TargetInfo.h" 55 #include "clang/Basic/TypeTraits.h" 56 #include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering. 57 #include "clang/Sema/Initialization.h" 58 #include "clang/Sema/Lookup.h" 59 #include "clang/Sema/Ownership.h" 60 #include "clang/Sema/Scope.h" 61 #include "clang/Sema/ScopeInfo.h" 62 #include "clang/Sema/Sema.h" 63 #include "clang/Sema/SemaInternal.h" 64 #include "llvm/ADT/APFloat.h" 65 #include "llvm/ADT/APInt.h" 66 #include "llvm/ADT/APSInt.h" 67 #include "llvm/ADT/ArrayRef.h" 68 #include "llvm/ADT/DenseMap.h" 69 #include "llvm/ADT/FoldingSet.h" 70 #include "llvm/ADT/STLExtras.h" 71 #include "llvm/ADT/SmallBitVector.h" 72 #include "llvm/ADT/SmallPtrSet.h" 73 #include "llvm/ADT/SmallString.h" 74 #include "llvm/ADT/SmallVector.h" 75 #include "llvm/ADT/StringExtras.h" 76 #include "llvm/ADT/StringRef.h" 77 #include "llvm/ADT/StringSet.h" 78 #include "llvm/ADT/StringSwitch.h" 79 #include "llvm/Support/AtomicOrdering.h" 80 #include "llvm/Support/Casting.h" 81 #include "llvm/Support/Compiler.h" 82 #include "llvm/Support/ConvertUTF.h" 83 #include "llvm/Support/ErrorHandling.h" 84 #include "llvm/Support/Format.h" 85 #include "llvm/Support/Locale.h" 86 #include "llvm/Support/MathExtras.h" 87 #include "llvm/Support/SaveAndRestore.h" 88 #include "llvm/Support/raw_ostream.h" 89 #include "llvm/TargetParser/Triple.h" 90 #include <algorithm> 91 #include <bitset> 92 #include <cassert> 93 #include <cctype> 94 #include <cstddef> 95 #include <cstdint> 96 #include <functional> 97 #include <limits> 98 #include <optional> 99 #include <string> 100 #include <tuple> 101 #include <utility> 102 103 using namespace clang; 104 using namespace sema; 105 106 SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL, 107 unsigned ByteNo) const { 108 return SL->getLocationOfByte(ByteNo, getSourceManager(), LangOpts, 109 Context.getTargetInfo()); 110 } 111 112 static constexpr unsigned short combineFAPK(Sema::FormatArgumentPassingKind A, 113 Sema::FormatArgumentPassingKind B) { 114 return (A << 8) | B; 115 } 116 117 /// Checks that a call expression's argument count is at least the desired 118 /// number. This is useful when doing custom type-checking on a variadic 119 /// function. Returns true on error. 120 static bool checkArgCountAtLeast(Sema &S, CallExpr *Call, 121 unsigned MinArgCount) { 122 unsigned ArgCount = Call->getNumArgs(); 123 if (ArgCount >= MinArgCount) 124 return false; 125 126 return S.Diag(Call->getEndLoc(), diag::err_typecheck_call_too_few_args) 127 << 0 /*function call*/ << MinArgCount << ArgCount 128 << Call->getSourceRange(); 129 } 130 131 /// Checks that a call expression's argument count is at most the desired 132 /// number. This is useful when doing custom type-checking on a variadic 133 /// function. Returns true on error. 134 static bool checkArgCountAtMost(Sema &S, CallExpr *Call, unsigned MaxArgCount) { 135 unsigned ArgCount = Call->getNumArgs(); 136 if (ArgCount <= MaxArgCount) 137 return false; 138 return S.Diag(Call->getEndLoc(), 139 diag::err_typecheck_call_too_many_args_at_most) 140 << 0 /*function call*/ << MaxArgCount << ArgCount 141 << Call->getSourceRange(); 142 } 143 144 /// Checks that a call expression's argument count is in the desired range. This 145 /// is useful when doing custom type-checking on a variadic function. Returns 146 /// true on error. 147 static bool checkArgCountRange(Sema &S, CallExpr *Call, unsigned MinArgCount, 148 unsigned MaxArgCount) { 149 return checkArgCountAtLeast(S, Call, MinArgCount) || 150 checkArgCountAtMost(S, Call, MaxArgCount); 151 } 152 153 /// Checks that a call expression's argument count is the desired number. 154 /// This is useful when doing custom type-checking. Returns true on error. 155 static bool checkArgCount(Sema &S, CallExpr *Call, unsigned DesiredArgCount) { 156 unsigned ArgCount = Call->getNumArgs(); 157 if (ArgCount == DesiredArgCount) 158 return false; 159 160 if (checkArgCountAtLeast(S, Call, DesiredArgCount)) 161 return true; 162 assert(ArgCount > DesiredArgCount && "should have diagnosed this"); 163 164 // Highlight all the excess arguments. 165 SourceRange Range(Call->getArg(DesiredArgCount)->getBeginLoc(), 166 Call->getArg(ArgCount - 1)->getEndLoc()); 167 168 return S.Diag(Range.getBegin(), diag::err_typecheck_call_too_many_args) 169 << 0 /*function call*/ << DesiredArgCount << ArgCount 170 << Call->getArg(1)->getSourceRange(); 171 } 172 173 static bool convertArgumentToType(Sema &S, Expr *&Value, QualType Ty) { 174 if (Value->isTypeDependent()) 175 return false; 176 177 InitializedEntity Entity = 178 InitializedEntity::InitializeParameter(S.Context, Ty, false); 179 ExprResult Result = 180 S.PerformCopyInitialization(Entity, SourceLocation(), Value); 181 if (Result.isInvalid()) 182 return true; 183 Value = Result.get(); 184 return false; 185 } 186 187 /// Check that the first argument to __builtin_annotation is an integer 188 /// and the second argument is a non-wide string literal. 189 static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) { 190 if (checkArgCount(S, TheCall, 2)) 191 return true; 192 193 // First argument should be an integer. 194 Expr *ValArg = TheCall->getArg(0); 195 QualType Ty = ValArg->getType(); 196 if (!Ty->isIntegerType()) { 197 S.Diag(ValArg->getBeginLoc(), diag::err_builtin_annotation_first_arg) 198 << ValArg->getSourceRange(); 199 return true; 200 } 201 202 // Second argument should be a constant string. 203 Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts(); 204 StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg); 205 if (!Literal || !Literal->isOrdinary()) { 206 S.Diag(StrArg->getBeginLoc(), diag::err_builtin_annotation_second_arg) 207 << StrArg->getSourceRange(); 208 return true; 209 } 210 211 TheCall->setType(Ty); 212 return false; 213 } 214 215 static bool SemaBuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) { 216 // We need at least one argument. 217 if (TheCall->getNumArgs() < 1) { 218 S.Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 219 << 0 << 1 << TheCall->getNumArgs() 220 << TheCall->getCallee()->getSourceRange(); 221 return true; 222 } 223 224 // All arguments should be wide string literals. 225 for (Expr *Arg : TheCall->arguments()) { 226 auto *Literal = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts()); 227 if (!Literal || !Literal->isWide()) { 228 S.Diag(Arg->getBeginLoc(), diag::err_msvc_annotation_wide_str) 229 << Arg->getSourceRange(); 230 return true; 231 } 232 } 233 234 return false; 235 } 236 237 /// Check that the argument to __builtin_addressof is a glvalue, and set the 238 /// result type to the corresponding pointer type. 239 static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) { 240 if (checkArgCount(S, TheCall, 1)) 241 return true; 242 243 ExprResult Arg(TheCall->getArg(0)); 244 QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getBeginLoc()); 245 if (ResultType.isNull()) 246 return true; 247 248 TheCall->setArg(0, Arg.get()); 249 TheCall->setType(ResultType); 250 return false; 251 } 252 253 /// Check that the argument to __builtin_function_start is a function. 254 static bool SemaBuiltinFunctionStart(Sema &S, CallExpr *TheCall) { 255 if (checkArgCount(S, TheCall, 1)) 256 return true; 257 258 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(0)); 259 if (Arg.isInvalid()) 260 return true; 261 262 TheCall->setArg(0, Arg.get()); 263 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>( 264 Arg.get()->getAsBuiltinConstantDeclRef(S.getASTContext())); 265 266 if (!FD) { 267 S.Diag(TheCall->getBeginLoc(), diag::err_function_start_invalid_type) 268 << TheCall->getSourceRange(); 269 return true; 270 } 271 272 return !S.checkAddressOfFunctionIsAvailable(FD, /*Complain=*/true, 273 TheCall->getBeginLoc()); 274 } 275 276 /// Check the number of arguments and set the result type to 277 /// the argument type. 278 static bool SemaBuiltinPreserveAI(Sema &S, CallExpr *TheCall) { 279 if (checkArgCount(S, TheCall, 1)) 280 return true; 281 282 TheCall->setType(TheCall->getArg(0)->getType()); 283 return false; 284 } 285 286 /// Check that the value argument for __builtin_is_aligned(value, alignment) and 287 /// __builtin_aligned_{up,down}(value, alignment) is an integer or a pointer 288 /// type (but not a function pointer) and that the alignment is a power-of-two. 289 static bool SemaBuiltinAlignment(Sema &S, CallExpr *TheCall, unsigned ID) { 290 if (checkArgCount(S, TheCall, 2)) 291 return true; 292 293 clang::Expr *Source = TheCall->getArg(0); 294 bool IsBooleanAlignBuiltin = ID == Builtin::BI__builtin_is_aligned; 295 296 auto IsValidIntegerType = [](QualType Ty) { 297 return Ty->isIntegerType() && !Ty->isEnumeralType() && !Ty->isBooleanType(); 298 }; 299 QualType SrcTy = Source->getType(); 300 // We should also be able to use it with arrays (but not functions!). 301 if (SrcTy->canDecayToPointerType() && SrcTy->isArrayType()) { 302 SrcTy = S.Context.getDecayedType(SrcTy); 303 } 304 if ((!SrcTy->isPointerType() && !IsValidIntegerType(SrcTy)) || 305 SrcTy->isFunctionPointerType()) { 306 // FIXME: this is not quite the right error message since we don't allow 307 // floating point types, or member pointers. 308 S.Diag(Source->getExprLoc(), diag::err_typecheck_expect_scalar_operand) 309 << SrcTy; 310 return true; 311 } 312 313 clang::Expr *AlignOp = TheCall->getArg(1); 314 if (!IsValidIntegerType(AlignOp->getType())) { 315 S.Diag(AlignOp->getExprLoc(), diag::err_typecheck_expect_int) 316 << AlignOp->getType(); 317 return true; 318 } 319 Expr::EvalResult AlignResult; 320 unsigned MaxAlignmentBits = S.Context.getIntWidth(SrcTy) - 1; 321 // We can't check validity of alignment if it is value dependent. 322 if (!AlignOp->isValueDependent() && 323 AlignOp->EvaluateAsInt(AlignResult, S.Context, 324 Expr::SE_AllowSideEffects)) { 325 llvm::APSInt AlignValue = AlignResult.Val.getInt(); 326 llvm::APSInt MaxValue( 327 llvm::APInt::getOneBitSet(MaxAlignmentBits + 1, MaxAlignmentBits)); 328 if (AlignValue < 1) { 329 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_small) << 1; 330 return true; 331 } 332 if (llvm::APSInt::compareValues(AlignValue, MaxValue) > 0) { 333 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_big) 334 << toString(MaxValue, 10); 335 return true; 336 } 337 if (!AlignValue.isPowerOf2()) { 338 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_not_power_of_two); 339 return true; 340 } 341 if (AlignValue == 1) { 342 S.Diag(AlignOp->getExprLoc(), diag::warn_alignment_builtin_useless) 343 << IsBooleanAlignBuiltin; 344 } 345 } 346 347 ExprResult SrcArg = S.PerformCopyInitialization( 348 InitializedEntity::InitializeParameter(S.Context, SrcTy, false), 349 SourceLocation(), Source); 350 if (SrcArg.isInvalid()) 351 return true; 352 TheCall->setArg(0, SrcArg.get()); 353 ExprResult AlignArg = 354 S.PerformCopyInitialization(InitializedEntity::InitializeParameter( 355 S.Context, AlignOp->getType(), false), 356 SourceLocation(), AlignOp); 357 if (AlignArg.isInvalid()) 358 return true; 359 TheCall->setArg(1, AlignArg.get()); 360 // For align_up/align_down, the return type is the same as the (potentially 361 // decayed) argument type including qualifiers. For is_aligned(), the result 362 // is always bool. 363 TheCall->setType(IsBooleanAlignBuiltin ? S.Context.BoolTy : SrcTy); 364 return false; 365 } 366 367 static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall, 368 unsigned BuiltinID) { 369 if (checkArgCount(S, TheCall, 3)) 370 return true; 371 372 // First two arguments should be integers. 373 for (unsigned I = 0; I < 2; ++I) { 374 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(I)); 375 if (Arg.isInvalid()) return true; 376 TheCall->setArg(I, Arg.get()); 377 378 QualType Ty = Arg.get()->getType(); 379 if (!Ty->isIntegerType()) { 380 S.Diag(Arg.get()->getBeginLoc(), diag::err_overflow_builtin_must_be_int) 381 << Ty << Arg.get()->getSourceRange(); 382 return true; 383 } 384 } 385 386 // Third argument should be a pointer to a non-const integer. 387 // IRGen correctly handles volatile, restrict, and address spaces, and 388 // the other qualifiers aren't possible. 389 { 390 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(2)); 391 if (Arg.isInvalid()) return true; 392 TheCall->setArg(2, Arg.get()); 393 394 QualType Ty = Arg.get()->getType(); 395 const auto *PtrTy = Ty->getAs<PointerType>(); 396 if (!PtrTy || 397 !PtrTy->getPointeeType()->isIntegerType() || 398 PtrTy->getPointeeType().isConstQualified()) { 399 S.Diag(Arg.get()->getBeginLoc(), 400 diag::err_overflow_builtin_must_be_ptr_int) 401 << Ty << Arg.get()->getSourceRange(); 402 return true; 403 } 404 } 405 406 // Disallow signed bit-precise integer args larger than 128 bits to mul 407 // function until we improve backend support. 408 if (BuiltinID == Builtin::BI__builtin_mul_overflow) { 409 for (unsigned I = 0; I < 3; ++I) { 410 const auto Arg = TheCall->getArg(I); 411 // Third argument will be a pointer. 412 auto Ty = I < 2 ? Arg->getType() : Arg->getType()->getPointeeType(); 413 if (Ty->isBitIntType() && Ty->isSignedIntegerType() && 414 S.getASTContext().getIntWidth(Ty) > 128) 415 return S.Diag(Arg->getBeginLoc(), 416 diag::err_overflow_builtin_bit_int_max_size) 417 << 128; 418 } 419 } 420 421 return false; 422 } 423 424 namespace { 425 struct BuiltinDumpStructGenerator { 426 Sema &S; 427 CallExpr *TheCall; 428 SourceLocation Loc = TheCall->getBeginLoc(); 429 SmallVector<Expr *, 32> Actions; 430 DiagnosticErrorTrap ErrorTracker; 431 PrintingPolicy Policy; 432 433 BuiltinDumpStructGenerator(Sema &S, CallExpr *TheCall) 434 : S(S), TheCall(TheCall), ErrorTracker(S.getDiagnostics()), 435 Policy(S.Context.getPrintingPolicy()) { 436 Policy.AnonymousTagLocations = false; 437 } 438 439 Expr *makeOpaqueValueExpr(Expr *Inner) { 440 auto *OVE = new (S.Context) 441 OpaqueValueExpr(Loc, Inner->getType(), Inner->getValueKind(), 442 Inner->getObjectKind(), Inner); 443 Actions.push_back(OVE); 444 return OVE; 445 } 446 447 Expr *getStringLiteral(llvm::StringRef Str) { 448 Expr *Lit = S.Context.getPredefinedStringLiteralFromCache(Str); 449 // Wrap the literal in parentheses to attach a source location. 450 return new (S.Context) ParenExpr(Loc, Loc, Lit); 451 } 452 453 bool callPrintFunction(llvm::StringRef Format, 454 llvm::ArrayRef<Expr *> Exprs = {}) { 455 SmallVector<Expr *, 8> Args; 456 assert(TheCall->getNumArgs() >= 2); 457 Args.reserve((TheCall->getNumArgs() - 2) + /*Format*/ 1 + Exprs.size()); 458 Args.assign(TheCall->arg_begin() + 2, TheCall->arg_end()); 459 Args.push_back(getStringLiteral(Format)); 460 Args.insert(Args.end(), Exprs.begin(), Exprs.end()); 461 462 // Register a note to explain why we're performing the call. 463 Sema::CodeSynthesisContext Ctx; 464 Ctx.Kind = Sema::CodeSynthesisContext::BuildingBuiltinDumpStructCall; 465 Ctx.PointOfInstantiation = Loc; 466 Ctx.CallArgs = Args.data(); 467 Ctx.NumCallArgs = Args.size(); 468 S.pushCodeSynthesisContext(Ctx); 469 470 ExprResult RealCall = 471 S.BuildCallExpr(/*Scope=*/nullptr, TheCall->getArg(1), 472 TheCall->getBeginLoc(), Args, TheCall->getRParenLoc()); 473 474 S.popCodeSynthesisContext(); 475 if (!RealCall.isInvalid()) 476 Actions.push_back(RealCall.get()); 477 // Bail out if we've hit any errors, even if we managed to build the 478 // call. We don't want to produce more than one error. 479 return RealCall.isInvalid() || ErrorTracker.hasErrorOccurred(); 480 } 481 482 Expr *getIndentString(unsigned Depth) { 483 if (!Depth) 484 return nullptr; 485 486 llvm::SmallString<32> Indent; 487 Indent.resize(Depth * Policy.Indentation, ' '); 488 return getStringLiteral(Indent); 489 } 490 491 Expr *getTypeString(QualType T) { 492 return getStringLiteral(T.getAsString(Policy)); 493 } 494 495 bool appendFormatSpecifier(QualType T, llvm::SmallVectorImpl<char> &Str) { 496 llvm::raw_svector_ostream OS(Str); 497 498 // Format 'bool', 'char', 'signed char', 'unsigned char' as numbers, rather 499 // than trying to print a single character. 500 if (auto *BT = T->getAs<BuiltinType>()) { 501 switch (BT->getKind()) { 502 case BuiltinType::Bool: 503 OS << "%d"; 504 return true; 505 case BuiltinType::Char_U: 506 case BuiltinType::UChar: 507 OS << "%hhu"; 508 return true; 509 case BuiltinType::Char_S: 510 case BuiltinType::SChar: 511 OS << "%hhd"; 512 return true; 513 default: 514 break; 515 } 516 } 517 518 analyze_printf::PrintfSpecifier Specifier; 519 if (Specifier.fixType(T, S.getLangOpts(), S.Context, /*IsObjCLiteral=*/false)) { 520 // We were able to guess how to format this. 521 if (Specifier.getConversionSpecifier().getKind() == 522 analyze_printf::PrintfConversionSpecifier::sArg) { 523 // Wrap double-quotes around a '%s' specifier and limit its maximum 524 // length. Ideally we'd also somehow escape special characters in the 525 // contents but printf doesn't support that. 526 // FIXME: '%s' formatting is not safe in general. 527 OS << '"'; 528 Specifier.setPrecision(analyze_printf::OptionalAmount(32u)); 529 Specifier.toString(OS); 530 OS << '"'; 531 // FIXME: It would be nice to include a '...' if the string doesn't fit 532 // in the length limit. 533 } else { 534 Specifier.toString(OS); 535 } 536 return true; 537 } 538 539 if (T->isPointerType()) { 540 // Format all pointers with '%p'. 541 OS << "%p"; 542 return true; 543 } 544 545 return false; 546 } 547 548 bool dumpUnnamedRecord(const RecordDecl *RD, Expr *E, unsigned Depth) { 549 Expr *IndentLit = getIndentString(Depth); 550 Expr *TypeLit = getTypeString(S.Context.getRecordType(RD)); 551 if (IndentLit ? callPrintFunction("%s%s", {IndentLit, TypeLit}) 552 : callPrintFunction("%s", {TypeLit})) 553 return true; 554 555 return dumpRecordValue(RD, E, IndentLit, Depth); 556 } 557 558 // Dump a record value. E should be a pointer or lvalue referring to an RD. 559 bool dumpRecordValue(const RecordDecl *RD, Expr *E, Expr *RecordIndent, 560 unsigned Depth) { 561 // FIXME: Decide what to do if RD is a union. At least we should probably 562 // turn off printing `const char*` members with `%s`, because that is very 563 // likely to crash if that's not the active member. Whatever we decide, we 564 // should document it. 565 566 // Build an OpaqueValueExpr so we can refer to E more than once without 567 // triggering re-evaluation. 568 Expr *RecordArg = makeOpaqueValueExpr(E); 569 bool RecordArgIsPtr = RecordArg->getType()->isPointerType(); 570 571 if (callPrintFunction(" {\n")) 572 return true; 573 574 // Dump each base class, regardless of whether they're aggregates. 575 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 576 for (const auto &Base : CXXRD->bases()) { 577 QualType BaseType = 578 RecordArgIsPtr ? S.Context.getPointerType(Base.getType()) 579 : S.Context.getLValueReferenceType(Base.getType()); 580 ExprResult BasePtr = S.BuildCStyleCastExpr( 581 Loc, S.Context.getTrivialTypeSourceInfo(BaseType, Loc), Loc, 582 RecordArg); 583 if (BasePtr.isInvalid() || 584 dumpUnnamedRecord(Base.getType()->getAsRecordDecl(), BasePtr.get(), 585 Depth + 1)) 586 return true; 587 } 588 } 589 590 Expr *FieldIndentArg = getIndentString(Depth + 1); 591 592 // Dump each field. 593 for (auto *D : RD->decls()) { 594 auto *IFD = dyn_cast<IndirectFieldDecl>(D); 595 auto *FD = IFD ? IFD->getAnonField() : dyn_cast<FieldDecl>(D); 596 if (!FD || FD->isUnnamedBitfield() || FD->isAnonymousStructOrUnion()) 597 continue; 598 599 llvm::SmallString<20> Format = llvm::StringRef("%s%s %s "); 600 llvm::SmallVector<Expr *, 5> Args = {FieldIndentArg, 601 getTypeString(FD->getType()), 602 getStringLiteral(FD->getName())}; 603 604 if (FD->isBitField()) { 605 Format += ": %zu "; 606 QualType SizeT = S.Context.getSizeType(); 607 llvm::APInt BitWidth(S.Context.getIntWidth(SizeT), 608 FD->getBitWidthValue(S.Context)); 609 Args.push_back(IntegerLiteral::Create(S.Context, BitWidth, SizeT, Loc)); 610 } 611 612 Format += "="; 613 614 ExprResult Field = 615 IFD ? S.BuildAnonymousStructUnionMemberReference( 616 CXXScopeSpec(), Loc, IFD, 617 DeclAccessPair::make(IFD, AS_public), RecordArg, Loc) 618 : S.BuildFieldReferenceExpr( 619 RecordArg, RecordArgIsPtr, Loc, CXXScopeSpec(), FD, 620 DeclAccessPair::make(FD, AS_public), 621 DeclarationNameInfo(FD->getDeclName(), Loc)); 622 if (Field.isInvalid()) 623 return true; 624 625 auto *InnerRD = FD->getType()->getAsRecordDecl(); 626 auto *InnerCXXRD = dyn_cast_or_null<CXXRecordDecl>(InnerRD); 627 if (InnerRD && (!InnerCXXRD || InnerCXXRD->isAggregate())) { 628 // Recursively print the values of members of aggregate record type. 629 if (callPrintFunction(Format, Args) || 630 dumpRecordValue(InnerRD, Field.get(), FieldIndentArg, Depth + 1)) 631 return true; 632 } else { 633 Format += " "; 634 if (appendFormatSpecifier(FD->getType(), Format)) { 635 // We know how to print this field. 636 Args.push_back(Field.get()); 637 } else { 638 // We don't know how to print this field. Print out its address 639 // with a format specifier that a smart tool will be able to 640 // recognize and treat specially. 641 Format += "*%p"; 642 ExprResult FieldAddr = 643 S.BuildUnaryOp(nullptr, Loc, UO_AddrOf, Field.get()); 644 if (FieldAddr.isInvalid()) 645 return true; 646 Args.push_back(FieldAddr.get()); 647 } 648 Format += "\n"; 649 if (callPrintFunction(Format, Args)) 650 return true; 651 } 652 } 653 654 return RecordIndent ? callPrintFunction("%s}\n", RecordIndent) 655 : callPrintFunction("}\n"); 656 } 657 658 Expr *buildWrapper() { 659 auto *Wrapper = PseudoObjectExpr::Create(S.Context, TheCall, Actions, 660 PseudoObjectExpr::NoResult); 661 TheCall->setType(Wrapper->getType()); 662 TheCall->setValueKind(Wrapper->getValueKind()); 663 return Wrapper; 664 } 665 }; 666 } // namespace 667 668 static ExprResult SemaBuiltinDumpStruct(Sema &S, CallExpr *TheCall) { 669 if (checkArgCountAtLeast(S, TheCall, 2)) 670 return ExprError(); 671 672 ExprResult PtrArgResult = S.DefaultLvalueConversion(TheCall->getArg(0)); 673 if (PtrArgResult.isInvalid()) 674 return ExprError(); 675 TheCall->setArg(0, PtrArgResult.get()); 676 677 // First argument should be a pointer to a struct. 678 QualType PtrArgType = PtrArgResult.get()->getType(); 679 if (!PtrArgType->isPointerType() || 680 !PtrArgType->getPointeeType()->isRecordType()) { 681 S.Diag(PtrArgResult.get()->getBeginLoc(), 682 diag::err_expected_struct_pointer_argument) 683 << 1 << TheCall->getDirectCallee() << PtrArgType; 684 return ExprError(); 685 } 686 const RecordDecl *RD = PtrArgType->getPointeeType()->getAsRecordDecl(); 687 688 // Second argument is a callable, but we can't fully validate it until we try 689 // calling it. 690 QualType FnArgType = TheCall->getArg(1)->getType(); 691 if (!FnArgType->isFunctionType() && !FnArgType->isFunctionPointerType() && 692 !FnArgType->isBlockPointerType() && 693 !(S.getLangOpts().CPlusPlus && FnArgType->isRecordType())) { 694 auto *BT = FnArgType->getAs<BuiltinType>(); 695 switch (BT ? BT->getKind() : BuiltinType::Void) { 696 case BuiltinType::Dependent: 697 case BuiltinType::Overload: 698 case BuiltinType::BoundMember: 699 case BuiltinType::PseudoObject: 700 case BuiltinType::UnknownAny: 701 case BuiltinType::BuiltinFn: 702 // This might be a callable. 703 break; 704 705 default: 706 S.Diag(TheCall->getArg(1)->getBeginLoc(), 707 diag::err_expected_callable_argument) 708 << 2 << TheCall->getDirectCallee() << FnArgType; 709 return ExprError(); 710 } 711 } 712 713 BuiltinDumpStructGenerator Generator(S, TheCall); 714 715 // Wrap parentheses around the given pointer. This is not necessary for 716 // correct code generation, but it means that when we pretty-print the call 717 // arguments in our diagnostics we will produce '(&s)->n' instead of the 718 // incorrect '&s->n'. 719 Expr *PtrArg = PtrArgResult.get(); 720 PtrArg = new (S.Context) 721 ParenExpr(PtrArg->getBeginLoc(), 722 S.getLocForEndOfToken(PtrArg->getEndLoc()), PtrArg); 723 if (Generator.dumpUnnamedRecord(RD, PtrArg, 0)) 724 return ExprError(); 725 726 return Generator.buildWrapper(); 727 } 728 729 static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) { 730 if (checkArgCount(S, BuiltinCall, 2)) 731 return true; 732 733 SourceLocation BuiltinLoc = BuiltinCall->getBeginLoc(); 734 Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts(); 735 Expr *Call = BuiltinCall->getArg(0); 736 Expr *Chain = BuiltinCall->getArg(1); 737 738 if (Call->getStmtClass() != Stmt::CallExprClass) { 739 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call) 740 << Call->getSourceRange(); 741 return true; 742 } 743 744 auto CE = cast<CallExpr>(Call); 745 if (CE->getCallee()->getType()->isBlockPointerType()) { 746 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call) 747 << Call->getSourceRange(); 748 return true; 749 } 750 751 const Decl *TargetDecl = CE->getCalleeDecl(); 752 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) 753 if (FD->getBuiltinID()) { 754 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call) 755 << Call->getSourceRange(); 756 return true; 757 } 758 759 if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens())) { 760 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call) 761 << Call->getSourceRange(); 762 return true; 763 } 764 765 ExprResult ChainResult = S.UsualUnaryConversions(Chain); 766 if (ChainResult.isInvalid()) 767 return true; 768 if (!ChainResult.get()->getType()->isPointerType()) { 769 S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer) 770 << Chain->getSourceRange(); 771 return true; 772 } 773 774 QualType ReturnTy = CE->getCallReturnType(S.Context); 775 QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() }; 776 QualType BuiltinTy = S.Context.getFunctionType( 777 ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo()); 778 QualType BuiltinPtrTy = S.Context.getPointerType(BuiltinTy); 779 780 Builtin = 781 S.ImpCastExprToType(Builtin, BuiltinPtrTy, CK_BuiltinFnToFnPtr).get(); 782 783 BuiltinCall->setType(CE->getType()); 784 BuiltinCall->setValueKind(CE->getValueKind()); 785 BuiltinCall->setObjectKind(CE->getObjectKind()); 786 BuiltinCall->setCallee(Builtin); 787 BuiltinCall->setArg(1, ChainResult.get()); 788 789 return false; 790 } 791 792 namespace { 793 794 class ScanfDiagnosticFormatHandler 795 : public analyze_format_string::FormatStringHandler { 796 // Accepts the argument index (relative to the first destination index) of the 797 // argument whose size we want. 798 using ComputeSizeFunction = 799 llvm::function_ref<std::optional<llvm::APSInt>(unsigned)>; 800 801 // Accepts the argument index (relative to the first destination index), the 802 // destination size, and the source size). 803 using DiagnoseFunction = 804 llvm::function_ref<void(unsigned, unsigned, unsigned)>; 805 806 ComputeSizeFunction ComputeSizeArgument; 807 DiagnoseFunction Diagnose; 808 809 public: 810 ScanfDiagnosticFormatHandler(ComputeSizeFunction ComputeSizeArgument, 811 DiagnoseFunction Diagnose) 812 : ComputeSizeArgument(ComputeSizeArgument), Diagnose(Diagnose) {} 813 814 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS, 815 const char *StartSpecifier, 816 unsigned specifierLen) override { 817 if (!FS.consumesDataArgument()) 818 return true; 819 820 unsigned NulByte = 0; 821 switch ((FS.getConversionSpecifier().getKind())) { 822 default: 823 return true; 824 case analyze_format_string::ConversionSpecifier::sArg: 825 case analyze_format_string::ConversionSpecifier::ScanListArg: 826 NulByte = 1; 827 break; 828 case analyze_format_string::ConversionSpecifier::cArg: 829 break; 830 } 831 832 analyze_format_string::OptionalAmount FW = FS.getFieldWidth(); 833 if (FW.getHowSpecified() != 834 analyze_format_string::OptionalAmount::HowSpecified::Constant) 835 return true; 836 837 unsigned SourceSize = FW.getConstantAmount() + NulByte; 838 839 std::optional<llvm::APSInt> DestSizeAPS = 840 ComputeSizeArgument(FS.getArgIndex()); 841 if (!DestSizeAPS) 842 return true; 843 844 unsigned DestSize = DestSizeAPS->getZExtValue(); 845 846 if (DestSize < SourceSize) 847 Diagnose(FS.getArgIndex(), DestSize, SourceSize); 848 849 return true; 850 } 851 }; 852 853 class EstimateSizeFormatHandler 854 : public analyze_format_string::FormatStringHandler { 855 size_t Size; 856 857 public: 858 EstimateSizeFormatHandler(StringRef Format) 859 : Size(std::min(Format.find(0), Format.size()) + 860 1 /* null byte always written by sprintf */) {} 861 862 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 863 const char *, unsigned SpecifierLen, 864 const TargetInfo &) override { 865 866 const size_t FieldWidth = computeFieldWidth(FS); 867 const size_t Precision = computePrecision(FS); 868 869 // The actual format. 870 switch (FS.getConversionSpecifier().getKind()) { 871 // Just a char. 872 case analyze_format_string::ConversionSpecifier::cArg: 873 case analyze_format_string::ConversionSpecifier::CArg: 874 Size += std::max(FieldWidth, (size_t)1); 875 break; 876 // Just an integer. 877 case analyze_format_string::ConversionSpecifier::dArg: 878 case analyze_format_string::ConversionSpecifier::DArg: 879 case analyze_format_string::ConversionSpecifier::iArg: 880 case analyze_format_string::ConversionSpecifier::oArg: 881 case analyze_format_string::ConversionSpecifier::OArg: 882 case analyze_format_string::ConversionSpecifier::uArg: 883 case analyze_format_string::ConversionSpecifier::UArg: 884 case analyze_format_string::ConversionSpecifier::xArg: 885 case analyze_format_string::ConversionSpecifier::XArg: 886 Size += std::max(FieldWidth, Precision); 887 break; 888 889 // %g style conversion switches between %f or %e style dynamically. 890 // %f always takes less space, so default to it. 891 case analyze_format_string::ConversionSpecifier::gArg: 892 case analyze_format_string::ConversionSpecifier::GArg: 893 894 // Floating point number in the form '[+]ddd.ddd'. 895 case analyze_format_string::ConversionSpecifier::fArg: 896 case analyze_format_string::ConversionSpecifier::FArg: 897 Size += std::max(FieldWidth, 1 /* integer part */ + 898 (Precision ? 1 + Precision 899 : 0) /* period + decimal */); 900 break; 901 902 // Floating point number in the form '[-]d.ddde[+-]dd'. 903 case analyze_format_string::ConversionSpecifier::eArg: 904 case analyze_format_string::ConversionSpecifier::EArg: 905 Size += 906 std::max(FieldWidth, 907 1 /* integer part */ + 908 (Precision ? 1 + Precision : 0) /* period + decimal */ + 909 1 /* e or E letter */ + 2 /* exponent */); 910 break; 911 912 // Floating point number in the form '[-]0xh.hhhhp±dd'. 913 case analyze_format_string::ConversionSpecifier::aArg: 914 case analyze_format_string::ConversionSpecifier::AArg: 915 Size += 916 std::max(FieldWidth, 917 2 /* 0x */ + 1 /* integer part */ + 918 (Precision ? 1 + Precision : 0) /* period + decimal */ + 919 1 /* p or P letter */ + 1 /* + or - */ + 1 /* value */); 920 break; 921 922 // Just a string. 923 case analyze_format_string::ConversionSpecifier::sArg: 924 case analyze_format_string::ConversionSpecifier::SArg: 925 Size += FieldWidth; 926 break; 927 928 // Just a pointer in the form '0xddd'. 929 case analyze_format_string::ConversionSpecifier::pArg: 930 Size += std::max(FieldWidth, 2 /* leading 0x */ + Precision); 931 break; 932 933 // A plain percent. 934 case analyze_format_string::ConversionSpecifier::PercentArg: 935 Size += 1; 936 break; 937 938 default: 939 break; 940 } 941 942 Size += FS.hasPlusPrefix() || FS.hasSpacePrefix(); 943 944 if (FS.hasAlternativeForm()) { 945 switch (FS.getConversionSpecifier().getKind()) { 946 default: 947 break; 948 // Force a leading '0'. 949 case analyze_format_string::ConversionSpecifier::oArg: 950 Size += 1; 951 break; 952 // Force a leading '0x'. 953 case analyze_format_string::ConversionSpecifier::xArg: 954 case analyze_format_string::ConversionSpecifier::XArg: 955 Size += 2; 956 break; 957 // Force a period '.' before decimal, even if precision is 0. 958 case analyze_format_string::ConversionSpecifier::aArg: 959 case analyze_format_string::ConversionSpecifier::AArg: 960 case analyze_format_string::ConversionSpecifier::eArg: 961 case analyze_format_string::ConversionSpecifier::EArg: 962 case analyze_format_string::ConversionSpecifier::fArg: 963 case analyze_format_string::ConversionSpecifier::FArg: 964 case analyze_format_string::ConversionSpecifier::gArg: 965 case analyze_format_string::ConversionSpecifier::GArg: 966 Size += (Precision ? 0 : 1); 967 break; 968 } 969 } 970 assert(SpecifierLen <= Size && "no underflow"); 971 Size -= SpecifierLen; 972 return true; 973 } 974 975 size_t getSizeLowerBound() const { return Size; } 976 977 private: 978 static size_t computeFieldWidth(const analyze_printf::PrintfSpecifier &FS) { 979 const analyze_format_string::OptionalAmount &FW = FS.getFieldWidth(); 980 size_t FieldWidth = 0; 981 if (FW.getHowSpecified() == analyze_format_string::OptionalAmount::Constant) 982 FieldWidth = FW.getConstantAmount(); 983 return FieldWidth; 984 } 985 986 static size_t computePrecision(const analyze_printf::PrintfSpecifier &FS) { 987 const analyze_format_string::OptionalAmount &FW = FS.getPrecision(); 988 size_t Precision = 0; 989 990 // See man 3 printf for default precision value based on the specifier. 991 switch (FW.getHowSpecified()) { 992 case analyze_format_string::OptionalAmount::NotSpecified: 993 switch (FS.getConversionSpecifier().getKind()) { 994 default: 995 break; 996 case analyze_format_string::ConversionSpecifier::dArg: // %d 997 case analyze_format_string::ConversionSpecifier::DArg: // %D 998 case analyze_format_string::ConversionSpecifier::iArg: // %i 999 Precision = 1; 1000 break; 1001 case analyze_format_string::ConversionSpecifier::oArg: // %d 1002 case analyze_format_string::ConversionSpecifier::OArg: // %D 1003 case analyze_format_string::ConversionSpecifier::uArg: // %d 1004 case analyze_format_string::ConversionSpecifier::UArg: // %D 1005 case analyze_format_string::ConversionSpecifier::xArg: // %d 1006 case analyze_format_string::ConversionSpecifier::XArg: // %D 1007 Precision = 1; 1008 break; 1009 case analyze_format_string::ConversionSpecifier::fArg: // %f 1010 case analyze_format_string::ConversionSpecifier::FArg: // %F 1011 case analyze_format_string::ConversionSpecifier::eArg: // %e 1012 case analyze_format_string::ConversionSpecifier::EArg: // %E 1013 case analyze_format_string::ConversionSpecifier::gArg: // %g 1014 case analyze_format_string::ConversionSpecifier::GArg: // %G 1015 Precision = 6; 1016 break; 1017 case analyze_format_string::ConversionSpecifier::pArg: // %d 1018 Precision = 1; 1019 break; 1020 } 1021 break; 1022 case analyze_format_string::OptionalAmount::Constant: 1023 Precision = FW.getConstantAmount(); 1024 break; 1025 default: 1026 break; 1027 } 1028 return Precision; 1029 } 1030 }; 1031 1032 } // namespace 1033 1034 void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, 1035 CallExpr *TheCall) { 1036 if (TheCall->isValueDependent() || TheCall->isTypeDependent() || 1037 isConstantEvaluated()) 1038 return; 1039 1040 bool UseDABAttr = false; 1041 const FunctionDecl *UseDecl = FD; 1042 1043 const auto *DABAttr = FD->getAttr<DiagnoseAsBuiltinAttr>(); 1044 if (DABAttr) { 1045 UseDecl = DABAttr->getFunction(); 1046 assert(UseDecl && "Missing FunctionDecl in DiagnoseAsBuiltin attribute!"); 1047 UseDABAttr = true; 1048 } 1049 1050 unsigned BuiltinID = UseDecl->getBuiltinID(/*ConsiderWrappers=*/true); 1051 1052 if (!BuiltinID) 1053 return; 1054 1055 const TargetInfo &TI = getASTContext().getTargetInfo(); 1056 unsigned SizeTypeWidth = TI.getTypeWidth(TI.getSizeType()); 1057 1058 auto TranslateIndex = [&](unsigned Index) -> std::optional<unsigned> { 1059 // If we refer to a diagnose_as_builtin attribute, we need to change the 1060 // argument index to refer to the arguments of the called function. Unless 1061 // the index is out of bounds, which presumably means it's a variadic 1062 // function. 1063 if (!UseDABAttr) 1064 return Index; 1065 unsigned DABIndices = DABAttr->argIndices_size(); 1066 unsigned NewIndex = Index < DABIndices 1067 ? DABAttr->argIndices_begin()[Index] 1068 : Index - DABIndices + FD->getNumParams(); 1069 if (NewIndex >= TheCall->getNumArgs()) 1070 return std::nullopt; 1071 return NewIndex; 1072 }; 1073 1074 auto ComputeExplicitObjectSizeArgument = 1075 [&](unsigned Index) -> std::optional<llvm::APSInt> { 1076 std::optional<unsigned> IndexOptional = TranslateIndex(Index); 1077 if (!IndexOptional) 1078 return std::nullopt; 1079 unsigned NewIndex = *IndexOptional; 1080 Expr::EvalResult Result; 1081 Expr *SizeArg = TheCall->getArg(NewIndex); 1082 if (!SizeArg->EvaluateAsInt(Result, getASTContext())) 1083 return std::nullopt; 1084 llvm::APSInt Integer = Result.Val.getInt(); 1085 Integer.setIsUnsigned(true); 1086 return Integer; 1087 }; 1088 1089 auto ComputeSizeArgument = 1090 [&](unsigned Index) -> std::optional<llvm::APSInt> { 1091 // If the parameter has a pass_object_size attribute, then we should use its 1092 // (potentially) more strict checking mode. Otherwise, conservatively assume 1093 // type 0. 1094 int BOSType = 0; 1095 // This check can fail for variadic functions. 1096 if (Index < FD->getNumParams()) { 1097 if (const auto *POS = 1098 FD->getParamDecl(Index)->getAttr<PassObjectSizeAttr>()) 1099 BOSType = POS->getType(); 1100 } 1101 1102 std::optional<unsigned> IndexOptional = TranslateIndex(Index); 1103 if (!IndexOptional) 1104 return std::nullopt; 1105 unsigned NewIndex = *IndexOptional; 1106 1107 if (NewIndex >= TheCall->getNumArgs()) 1108 return std::nullopt; 1109 1110 const Expr *ObjArg = TheCall->getArg(NewIndex); 1111 uint64_t Result; 1112 if (!ObjArg->tryEvaluateObjectSize(Result, getASTContext(), BOSType)) 1113 return std::nullopt; 1114 1115 // Get the object size in the target's size_t width. 1116 return llvm::APSInt::getUnsigned(Result).extOrTrunc(SizeTypeWidth); 1117 }; 1118 1119 auto ComputeStrLenArgument = 1120 [&](unsigned Index) -> std::optional<llvm::APSInt> { 1121 std::optional<unsigned> IndexOptional = TranslateIndex(Index); 1122 if (!IndexOptional) 1123 return std::nullopt; 1124 unsigned NewIndex = *IndexOptional; 1125 1126 const Expr *ObjArg = TheCall->getArg(NewIndex); 1127 uint64_t Result; 1128 if (!ObjArg->tryEvaluateStrLen(Result, getASTContext())) 1129 return std::nullopt; 1130 // Add 1 for null byte. 1131 return llvm::APSInt::getUnsigned(Result + 1).extOrTrunc(SizeTypeWidth); 1132 }; 1133 1134 std::optional<llvm::APSInt> SourceSize; 1135 std::optional<llvm::APSInt> DestinationSize; 1136 unsigned DiagID = 0; 1137 bool IsChkVariant = false; 1138 1139 auto GetFunctionName = [&]() { 1140 StringRef FunctionName = getASTContext().BuiltinInfo.getName(BuiltinID); 1141 // Skim off the details of whichever builtin was called to produce a better 1142 // diagnostic, as it's unlikely that the user wrote the __builtin 1143 // explicitly. 1144 if (IsChkVariant) { 1145 FunctionName = FunctionName.drop_front(std::strlen("__builtin___")); 1146 FunctionName = FunctionName.drop_back(std::strlen("_chk")); 1147 } else if (FunctionName.startswith("__builtin_")) { 1148 FunctionName = FunctionName.drop_front(std::strlen("__builtin_")); 1149 } 1150 return FunctionName; 1151 }; 1152 1153 switch (BuiltinID) { 1154 default: 1155 return; 1156 case Builtin::BI__builtin_strcpy: 1157 case Builtin::BIstrcpy: { 1158 DiagID = diag::warn_fortify_strlen_overflow; 1159 SourceSize = ComputeStrLenArgument(1); 1160 DestinationSize = ComputeSizeArgument(0); 1161 break; 1162 } 1163 1164 case Builtin::BI__builtin___strcpy_chk: { 1165 DiagID = diag::warn_fortify_strlen_overflow; 1166 SourceSize = ComputeStrLenArgument(1); 1167 DestinationSize = ComputeExplicitObjectSizeArgument(2); 1168 IsChkVariant = true; 1169 break; 1170 } 1171 1172 case Builtin::BIscanf: 1173 case Builtin::BIfscanf: 1174 case Builtin::BIsscanf: { 1175 unsigned FormatIndex = 1; 1176 unsigned DataIndex = 2; 1177 if (BuiltinID == Builtin::BIscanf) { 1178 FormatIndex = 0; 1179 DataIndex = 1; 1180 } 1181 1182 const auto *FormatExpr = 1183 TheCall->getArg(FormatIndex)->IgnoreParenImpCasts(); 1184 1185 const auto *Format = dyn_cast<StringLiteral>(FormatExpr); 1186 if (!Format) 1187 return; 1188 1189 if (!Format->isOrdinary() && !Format->isUTF8()) 1190 return; 1191 1192 auto Diagnose = [&](unsigned ArgIndex, unsigned DestSize, 1193 unsigned SourceSize) { 1194 DiagID = diag::warn_fortify_scanf_overflow; 1195 unsigned Index = ArgIndex + DataIndex; 1196 StringRef FunctionName = GetFunctionName(); 1197 DiagRuntimeBehavior(TheCall->getArg(Index)->getBeginLoc(), TheCall, 1198 PDiag(DiagID) << FunctionName << (Index + 1) 1199 << DestSize << SourceSize); 1200 }; 1201 1202 StringRef FormatStrRef = Format->getString(); 1203 auto ShiftedComputeSizeArgument = [&](unsigned Index) { 1204 return ComputeSizeArgument(Index + DataIndex); 1205 }; 1206 ScanfDiagnosticFormatHandler H(ShiftedComputeSizeArgument, Diagnose); 1207 const char *FormatBytes = FormatStrRef.data(); 1208 const ConstantArrayType *T = 1209 Context.getAsConstantArrayType(Format->getType()); 1210 assert(T && "String literal not of constant array type!"); 1211 size_t TypeSize = T->getSize().getZExtValue(); 1212 1213 // In case there's a null byte somewhere. 1214 size_t StrLen = 1215 std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0)); 1216 1217 analyze_format_string::ParseScanfString(H, FormatBytes, 1218 FormatBytes + StrLen, getLangOpts(), 1219 Context.getTargetInfo()); 1220 1221 // Unlike the other cases, in this one we have already issued the diagnostic 1222 // here, so no need to continue (because unlike the other cases, here the 1223 // diagnostic refers to the argument number). 1224 return; 1225 } 1226 1227 case Builtin::BIsprintf: 1228 case Builtin::BI__builtin___sprintf_chk: { 1229 size_t FormatIndex = BuiltinID == Builtin::BIsprintf ? 1 : 3; 1230 auto *FormatExpr = TheCall->getArg(FormatIndex)->IgnoreParenImpCasts(); 1231 1232 if (auto *Format = dyn_cast<StringLiteral>(FormatExpr)) { 1233 1234 if (!Format->isOrdinary() && !Format->isUTF8()) 1235 return; 1236 1237 StringRef FormatStrRef = Format->getString(); 1238 EstimateSizeFormatHandler H(FormatStrRef); 1239 const char *FormatBytes = FormatStrRef.data(); 1240 const ConstantArrayType *T = 1241 Context.getAsConstantArrayType(Format->getType()); 1242 assert(T && "String literal not of constant array type!"); 1243 size_t TypeSize = T->getSize().getZExtValue(); 1244 1245 // In case there's a null byte somewhere. 1246 size_t StrLen = 1247 std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0)); 1248 if (!analyze_format_string::ParsePrintfString( 1249 H, FormatBytes, FormatBytes + StrLen, getLangOpts(), 1250 Context.getTargetInfo(), false)) { 1251 DiagID = diag::warn_fortify_source_format_overflow; 1252 SourceSize = llvm::APSInt::getUnsigned(H.getSizeLowerBound()) 1253 .extOrTrunc(SizeTypeWidth); 1254 if (BuiltinID == Builtin::BI__builtin___sprintf_chk) { 1255 DestinationSize = ComputeExplicitObjectSizeArgument(2); 1256 IsChkVariant = true; 1257 } else { 1258 DestinationSize = ComputeSizeArgument(0); 1259 } 1260 break; 1261 } 1262 } 1263 return; 1264 } 1265 case Builtin::BI__builtin___memcpy_chk: 1266 case Builtin::BI__builtin___memmove_chk: 1267 case Builtin::BI__builtin___memset_chk: 1268 case Builtin::BI__builtin___strlcat_chk: 1269 case Builtin::BI__builtin___strlcpy_chk: 1270 case Builtin::BI__builtin___strncat_chk: 1271 case Builtin::BI__builtin___strncpy_chk: 1272 case Builtin::BI__builtin___stpncpy_chk: 1273 case Builtin::BI__builtin___memccpy_chk: 1274 case Builtin::BI__builtin___mempcpy_chk: { 1275 DiagID = diag::warn_builtin_chk_overflow; 1276 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 2); 1277 DestinationSize = 1278 ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 1279 IsChkVariant = true; 1280 break; 1281 } 1282 1283 case Builtin::BI__builtin___snprintf_chk: 1284 case Builtin::BI__builtin___vsnprintf_chk: { 1285 DiagID = diag::warn_builtin_chk_overflow; 1286 SourceSize = ComputeExplicitObjectSizeArgument(1); 1287 DestinationSize = ComputeExplicitObjectSizeArgument(3); 1288 IsChkVariant = true; 1289 break; 1290 } 1291 1292 case Builtin::BIstrncat: 1293 case Builtin::BI__builtin_strncat: 1294 case Builtin::BIstrncpy: 1295 case Builtin::BI__builtin_strncpy: 1296 case Builtin::BIstpncpy: 1297 case Builtin::BI__builtin_stpncpy: { 1298 // Whether these functions overflow depends on the runtime strlen of the 1299 // string, not just the buffer size, so emitting the "always overflow" 1300 // diagnostic isn't quite right. We should still diagnose passing a buffer 1301 // size larger than the destination buffer though; this is a runtime abort 1302 // in _FORTIFY_SOURCE mode, and is quite suspicious otherwise. 1303 DiagID = diag::warn_fortify_source_size_mismatch; 1304 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 1305 DestinationSize = ComputeSizeArgument(0); 1306 break; 1307 } 1308 1309 case Builtin::BImemcpy: 1310 case Builtin::BI__builtin_memcpy: 1311 case Builtin::BImemmove: 1312 case Builtin::BI__builtin_memmove: 1313 case Builtin::BImemset: 1314 case Builtin::BI__builtin_memset: 1315 case Builtin::BImempcpy: 1316 case Builtin::BI__builtin_mempcpy: { 1317 DiagID = diag::warn_fortify_source_overflow; 1318 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 1319 DestinationSize = ComputeSizeArgument(0); 1320 break; 1321 } 1322 case Builtin::BIsnprintf: 1323 case Builtin::BI__builtin_snprintf: 1324 case Builtin::BIvsnprintf: 1325 case Builtin::BI__builtin_vsnprintf: { 1326 DiagID = diag::warn_fortify_source_size_mismatch; 1327 SourceSize = ComputeExplicitObjectSizeArgument(1); 1328 DestinationSize = ComputeSizeArgument(0); 1329 break; 1330 } 1331 } 1332 1333 if (!SourceSize || !DestinationSize || 1334 llvm::APSInt::compareValues(*SourceSize, *DestinationSize) <= 0) 1335 return; 1336 1337 StringRef FunctionName = GetFunctionName(); 1338 1339 SmallString<16> DestinationStr; 1340 SmallString<16> SourceStr; 1341 DestinationSize->toString(DestinationStr, /*Radix=*/10); 1342 SourceSize->toString(SourceStr, /*Radix=*/10); 1343 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 1344 PDiag(DiagID) 1345 << FunctionName << DestinationStr << SourceStr); 1346 } 1347 1348 static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall, 1349 Scope::ScopeFlags NeededScopeFlags, 1350 unsigned DiagID) { 1351 // Scopes aren't available during instantiation. Fortunately, builtin 1352 // functions cannot be template args so they cannot be formed through template 1353 // instantiation. Therefore checking once during the parse is sufficient. 1354 if (SemaRef.inTemplateInstantiation()) 1355 return false; 1356 1357 Scope *S = SemaRef.getCurScope(); 1358 while (S && !S->isSEHExceptScope()) 1359 S = S->getParent(); 1360 if (!S || !(S->getFlags() & NeededScopeFlags)) { 1361 auto *DRE = cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 1362 SemaRef.Diag(TheCall->getExprLoc(), DiagID) 1363 << DRE->getDecl()->getIdentifier(); 1364 return true; 1365 } 1366 1367 return false; 1368 } 1369 1370 static inline bool isBlockPointer(Expr *Arg) { 1371 return Arg->getType()->isBlockPointerType(); 1372 } 1373 1374 /// OpenCL C v2.0, s6.13.17.2 - Checks that the block parameters are all local 1375 /// void*, which is a requirement of device side enqueue. 1376 static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) { 1377 const BlockPointerType *BPT = 1378 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 1379 ArrayRef<QualType> Params = 1380 BPT->getPointeeType()->castAs<FunctionProtoType>()->getParamTypes(); 1381 unsigned ArgCounter = 0; 1382 bool IllegalParams = false; 1383 // Iterate through the block parameters until either one is found that is not 1384 // a local void*, or the block is valid. 1385 for (ArrayRef<QualType>::iterator I = Params.begin(), E = Params.end(); 1386 I != E; ++I, ++ArgCounter) { 1387 if (!(*I)->isPointerType() || !(*I)->getPointeeType()->isVoidType() || 1388 (*I)->getPointeeType().getQualifiers().getAddressSpace() != 1389 LangAS::opencl_local) { 1390 // Get the location of the error. If a block literal has been passed 1391 // (BlockExpr) then we can point straight to the offending argument, 1392 // else we just point to the variable reference. 1393 SourceLocation ErrorLoc; 1394 if (isa<BlockExpr>(BlockArg)) { 1395 BlockDecl *BD = cast<BlockExpr>(BlockArg)->getBlockDecl(); 1396 ErrorLoc = BD->getParamDecl(ArgCounter)->getBeginLoc(); 1397 } else if (isa<DeclRefExpr>(BlockArg)) { 1398 ErrorLoc = cast<DeclRefExpr>(BlockArg)->getBeginLoc(); 1399 } 1400 S.Diag(ErrorLoc, 1401 diag::err_opencl_enqueue_kernel_blocks_non_local_void_args); 1402 IllegalParams = true; 1403 } 1404 } 1405 1406 return IllegalParams; 1407 } 1408 1409 static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) { 1410 // OpenCL device can support extension but not the feature as extension 1411 // requires subgroup independent forward progress, but subgroup independent 1412 // forward progress is optional in OpenCL C 3.0 __opencl_c_subgroups feature. 1413 if (!S.getOpenCLOptions().isSupported("cl_khr_subgroups", S.getLangOpts()) && 1414 !S.getOpenCLOptions().isSupported("__opencl_c_subgroups", 1415 S.getLangOpts())) { 1416 S.Diag(Call->getBeginLoc(), diag::err_opencl_requires_extension) 1417 << 1 << Call->getDirectCallee() 1418 << "cl_khr_subgroups or __opencl_c_subgroups"; 1419 return true; 1420 } 1421 return false; 1422 } 1423 1424 static bool SemaOpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) { 1425 if (checkArgCount(S, TheCall, 2)) 1426 return true; 1427 1428 if (checkOpenCLSubgroupExt(S, TheCall)) 1429 return true; 1430 1431 // First argument is an ndrange_t type. 1432 Expr *NDRangeArg = TheCall->getArg(0); 1433 if (NDRangeArg->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 1434 S.Diag(NDRangeArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1435 << TheCall->getDirectCallee() << "'ndrange_t'"; 1436 return true; 1437 } 1438 1439 Expr *BlockArg = TheCall->getArg(1); 1440 if (!isBlockPointer(BlockArg)) { 1441 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1442 << TheCall->getDirectCallee() << "block"; 1443 return true; 1444 } 1445 return checkOpenCLBlockArgs(S, BlockArg); 1446 } 1447 1448 /// OpenCL C v2.0, s6.13.17.6 - Check the argument to the 1449 /// get_kernel_work_group_size 1450 /// and get_kernel_preferred_work_group_size_multiple builtin functions. 1451 static bool SemaOpenCLBuiltinKernelWorkGroupSize(Sema &S, CallExpr *TheCall) { 1452 if (checkArgCount(S, TheCall, 1)) 1453 return true; 1454 1455 Expr *BlockArg = TheCall->getArg(0); 1456 if (!isBlockPointer(BlockArg)) { 1457 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1458 << TheCall->getDirectCallee() << "block"; 1459 return true; 1460 } 1461 return checkOpenCLBlockArgs(S, BlockArg); 1462 } 1463 1464 /// Diagnose integer type and any valid implicit conversion to it. 1465 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, 1466 const QualType &IntType); 1467 1468 static bool checkOpenCLEnqueueLocalSizeArgs(Sema &S, CallExpr *TheCall, 1469 unsigned Start, unsigned End) { 1470 bool IllegalParams = false; 1471 for (unsigned I = Start; I <= End; ++I) 1472 IllegalParams |= checkOpenCLEnqueueIntType(S, TheCall->getArg(I), 1473 S.Context.getSizeType()); 1474 return IllegalParams; 1475 } 1476 1477 /// OpenCL v2.0, s6.13.17.1 - Check that sizes are provided for all 1478 /// 'local void*' parameter of passed block. 1479 static bool checkOpenCLEnqueueVariadicArgs(Sema &S, CallExpr *TheCall, 1480 Expr *BlockArg, 1481 unsigned NumNonVarArgs) { 1482 const BlockPointerType *BPT = 1483 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 1484 unsigned NumBlockParams = 1485 BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams(); 1486 unsigned TotalNumArgs = TheCall->getNumArgs(); 1487 1488 // For each argument passed to the block, a corresponding uint needs to 1489 // be passed to describe the size of the local memory. 1490 if (TotalNumArgs != NumBlockParams + NumNonVarArgs) { 1491 S.Diag(TheCall->getBeginLoc(), 1492 diag::err_opencl_enqueue_kernel_local_size_args); 1493 return true; 1494 } 1495 1496 // Check that the sizes of the local memory are specified by integers. 1497 return checkOpenCLEnqueueLocalSizeArgs(S, TheCall, NumNonVarArgs, 1498 TotalNumArgs - 1); 1499 } 1500 1501 /// OpenCL C v2.0, s6.13.17 - Enqueue kernel function contains four different 1502 /// overload formats specified in Table 6.13.17.1. 1503 /// int enqueue_kernel(queue_t queue, 1504 /// kernel_enqueue_flags_t flags, 1505 /// const ndrange_t ndrange, 1506 /// void (^block)(void)) 1507 /// int enqueue_kernel(queue_t queue, 1508 /// kernel_enqueue_flags_t flags, 1509 /// const ndrange_t ndrange, 1510 /// uint num_events_in_wait_list, 1511 /// clk_event_t *event_wait_list, 1512 /// clk_event_t *event_ret, 1513 /// void (^block)(void)) 1514 /// int enqueue_kernel(queue_t queue, 1515 /// kernel_enqueue_flags_t flags, 1516 /// const ndrange_t ndrange, 1517 /// void (^block)(local void*, ...), 1518 /// uint size0, ...) 1519 /// int enqueue_kernel(queue_t queue, 1520 /// kernel_enqueue_flags_t flags, 1521 /// const ndrange_t ndrange, 1522 /// uint num_events_in_wait_list, 1523 /// clk_event_t *event_wait_list, 1524 /// clk_event_t *event_ret, 1525 /// void (^block)(local void*, ...), 1526 /// uint size0, ...) 1527 static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) { 1528 unsigned NumArgs = TheCall->getNumArgs(); 1529 1530 if (NumArgs < 4) { 1531 S.Diag(TheCall->getBeginLoc(), 1532 diag::err_typecheck_call_too_few_args_at_least) 1533 << 0 << 4 << NumArgs; 1534 return true; 1535 } 1536 1537 Expr *Arg0 = TheCall->getArg(0); 1538 Expr *Arg1 = TheCall->getArg(1); 1539 Expr *Arg2 = TheCall->getArg(2); 1540 Expr *Arg3 = TheCall->getArg(3); 1541 1542 // First argument always needs to be a queue_t type. 1543 if (!Arg0->getType()->isQueueT()) { 1544 S.Diag(TheCall->getArg(0)->getBeginLoc(), 1545 diag::err_opencl_builtin_expected_type) 1546 << TheCall->getDirectCallee() << S.Context.OCLQueueTy; 1547 return true; 1548 } 1549 1550 // Second argument always needs to be a kernel_enqueue_flags_t enum value. 1551 if (!Arg1->getType()->isIntegerType()) { 1552 S.Diag(TheCall->getArg(1)->getBeginLoc(), 1553 diag::err_opencl_builtin_expected_type) 1554 << TheCall->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)"; 1555 return true; 1556 } 1557 1558 // Third argument is always an ndrange_t type. 1559 if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 1560 S.Diag(TheCall->getArg(2)->getBeginLoc(), 1561 diag::err_opencl_builtin_expected_type) 1562 << TheCall->getDirectCallee() << "'ndrange_t'"; 1563 return true; 1564 } 1565 1566 // With four arguments, there is only one form that the function could be 1567 // called in: no events and no variable arguments. 1568 if (NumArgs == 4) { 1569 // check that the last argument is the right block type. 1570 if (!isBlockPointer(Arg3)) { 1571 S.Diag(Arg3->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1572 << TheCall->getDirectCallee() << "block"; 1573 return true; 1574 } 1575 // we have a block type, check the prototype 1576 const BlockPointerType *BPT = 1577 cast<BlockPointerType>(Arg3->getType().getCanonicalType()); 1578 if (BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams() > 0) { 1579 S.Diag(Arg3->getBeginLoc(), 1580 diag::err_opencl_enqueue_kernel_blocks_no_args); 1581 return true; 1582 } 1583 return false; 1584 } 1585 // we can have block + varargs. 1586 if (isBlockPointer(Arg3)) 1587 return (checkOpenCLBlockArgs(S, Arg3) || 1588 checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg3, 4)); 1589 // last two cases with either exactly 7 args or 7 args and varargs. 1590 if (NumArgs >= 7) { 1591 // check common block argument. 1592 Expr *Arg6 = TheCall->getArg(6); 1593 if (!isBlockPointer(Arg6)) { 1594 S.Diag(Arg6->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1595 << TheCall->getDirectCallee() << "block"; 1596 return true; 1597 } 1598 if (checkOpenCLBlockArgs(S, Arg6)) 1599 return true; 1600 1601 // Forth argument has to be any integer type. 1602 if (!Arg3->getType()->isIntegerType()) { 1603 S.Diag(TheCall->getArg(3)->getBeginLoc(), 1604 diag::err_opencl_builtin_expected_type) 1605 << TheCall->getDirectCallee() << "integer"; 1606 return true; 1607 } 1608 // check remaining common arguments. 1609 Expr *Arg4 = TheCall->getArg(4); 1610 Expr *Arg5 = TheCall->getArg(5); 1611 1612 // Fifth argument is always passed as a pointer to clk_event_t. 1613 if (!Arg4->isNullPointerConstant(S.Context, 1614 Expr::NPC_ValueDependentIsNotNull) && 1615 !Arg4->getType()->getPointeeOrArrayElementType()->isClkEventT()) { 1616 S.Diag(TheCall->getArg(4)->getBeginLoc(), 1617 diag::err_opencl_builtin_expected_type) 1618 << TheCall->getDirectCallee() 1619 << S.Context.getPointerType(S.Context.OCLClkEventTy); 1620 return true; 1621 } 1622 1623 // Sixth argument is always passed as a pointer to clk_event_t. 1624 if (!Arg5->isNullPointerConstant(S.Context, 1625 Expr::NPC_ValueDependentIsNotNull) && 1626 !(Arg5->getType()->isPointerType() && 1627 Arg5->getType()->getPointeeType()->isClkEventT())) { 1628 S.Diag(TheCall->getArg(5)->getBeginLoc(), 1629 diag::err_opencl_builtin_expected_type) 1630 << TheCall->getDirectCallee() 1631 << S.Context.getPointerType(S.Context.OCLClkEventTy); 1632 return true; 1633 } 1634 1635 if (NumArgs == 7) 1636 return false; 1637 1638 return checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg6, 7); 1639 } 1640 1641 // None of the specific case has been detected, give generic error 1642 S.Diag(TheCall->getBeginLoc(), 1643 diag::err_opencl_enqueue_kernel_incorrect_args); 1644 return true; 1645 } 1646 1647 /// Returns OpenCL access qual. 1648 static OpenCLAccessAttr *getOpenCLArgAccess(const Decl *D) { 1649 return D->getAttr<OpenCLAccessAttr>(); 1650 } 1651 1652 /// Returns true if pipe element type is different from the pointer. 1653 static bool checkOpenCLPipeArg(Sema &S, CallExpr *Call) { 1654 const Expr *Arg0 = Call->getArg(0); 1655 // First argument type should always be pipe. 1656 if (!Arg0->getType()->isPipeType()) { 1657 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 1658 << Call->getDirectCallee() << Arg0->getSourceRange(); 1659 return true; 1660 } 1661 OpenCLAccessAttr *AccessQual = 1662 getOpenCLArgAccess(cast<DeclRefExpr>(Arg0)->getDecl()); 1663 // Validates the access qualifier is compatible with the call. 1664 // OpenCL v2.0 s6.13.16 - The access qualifiers for pipe should only be 1665 // read_only and write_only, and assumed to be read_only if no qualifier is 1666 // specified. 1667 switch (Call->getDirectCallee()->getBuiltinID()) { 1668 case Builtin::BIread_pipe: 1669 case Builtin::BIreserve_read_pipe: 1670 case Builtin::BIcommit_read_pipe: 1671 case Builtin::BIwork_group_reserve_read_pipe: 1672 case Builtin::BIsub_group_reserve_read_pipe: 1673 case Builtin::BIwork_group_commit_read_pipe: 1674 case Builtin::BIsub_group_commit_read_pipe: 1675 if (!(!AccessQual || AccessQual->isReadOnly())) { 1676 S.Diag(Arg0->getBeginLoc(), 1677 diag::err_opencl_builtin_pipe_invalid_access_modifier) 1678 << "read_only" << Arg0->getSourceRange(); 1679 return true; 1680 } 1681 break; 1682 case Builtin::BIwrite_pipe: 1683 case Builtin::BIreserve_write_pipe: 1684 case Builtin::BIcommit_write_pipe: 1685 case Builtin::BIwork_group_reserve_write_pipe: 1686 case Builtin::BIsub_group_reserve_write_pipe: 1687 case Builtin::BIwork_group_commit_write_pipe: 1688 case Builtin::BIsub_group_commit_write_pipe: 1689 if (!(AccessQual && AccessQual->isWriteOnly())) { 1690 S.Diag(Arg0->getBeginLoc(), 1691 diag::err_opencl_builtin_pipe_invalid_access_modifier) 1692 << "write_only" << Arg0->getSourceRange(); 1693 return true; 1694 } 1695 break; 1696 default: 1697 break; 1698 } 1699 return false; 1700 } 1701 1702 /// Returns true if pipe element type is different from the pointer. 1703 static bool checkOpenCLPipePacketType(Sema &S, CallExpr *Call, unsigned Idx) { 1704 const Expr *Arg0 = Call->getArg(0); 1705 const Expr *ArgIdx = Call->getArg(Idx); 1706 const PipeType *PipeTy = cast<PipeType>(Arg0->getType()); 1707 const QualType EltTy = PipeTy->getElementType(); 1708 const PointerType *ArgTy = ArgIdx->getType()->getAs<PointerType>(); 1709 // The Idx argument should be a pointer and the type of the pointer and 1710 // the type of pipe element should also be the same. 1711 if (!ArgTy || 1712 !S.Context.hasSameType( 1713 EltTy, ArgTy->getPointeeType()->getCanonicalTypeInternal())) { 1714 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1715 << Call->getDirectCallee() << S.Context.getPointerType(EltTy) 1716 << ArgIdx->getType() << ArgIdx->getSourceRange(); 1717 return true; 1718 } 1719 return false; 1720 } 1721 1722 // Performs semantic analysis for the read/write_pipe call. 1723 // \param S Reference to the semantic analyzer. 1724 // \param Call A pointer to the builtin call. 1725 // \return True if a semantic error has been found, false otherwise. 1726 static bool SemaBuiltinRWPipe(Sema &S, CallExpr *Call) { 1727 // OpenCL v2.0 s6.13.16.2 - The built-in read/write 1728 // functions have two forms. 1729 switch (Call->getNumArgs()) { 1730 case 2: 1731 if (checkOpenCLPipeArg(S, Call)) 1732 return true; 1733 // The call with 2 arguments should be 1734 // read/write_pipe(pipe T, T*). 1735 // Check packet type T. 1736 if (checkOpenCLPipePacketType(S, Call, 1)) 1737 return true; 1738 break; 1739 1740 case 4: { 1741 if (checkOpenCLPipeArg(S, Call)) 1742 return true; 1743 // The call with 4 arguments should be 1744 // read/write_pipe(pipe T, reserve_id_t, uint, T*). 1745 // Check reserve_id_t. 1746 if (!Call->getArg(1)->getType()->isReserveIDT()) { 1747 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1748 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 1749 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1750 return true; 1751 } 1752 1753 // Check the index. 1754 const Expr *Arg2 = Call->getArg(2); 1755 if (!Arg2->getType()->isIntegerType() && 1756 !Arg2->getType()->isUnsignedIntegerType()) { 1757 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1758 << Call->getDirectCallee() << S.Context.UnsignedIntTy 1759 << Arg2->getType() << Arg2->getSourceRange(); 1760 return true; 1761 } 1762 1763 // Check packet type T. 1764 if (checkOpenCLPipePacketType(S, Call, 3)) 1765 return true; 1766 } break; 1767 default: 1768 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_arg_num) 1769 << Call->getDirectCallee() << Call->getSourceRange(); 1770 return true; 1771 } 1772 1773 return false; 1774 } 1775 1776 // Performs a semantic analysis on the {work_group_/sub_group_ 1777 // /_}reserve_{read/write}_pipe 1778 // \param S Reference to the semantic analyzer. 1779 // \param Call The call to the builtin function to be analyzed. 1780 // \return True if a semantic error was found, false otherwise. 1781 static bool SemaBuiltinReserveRWPipe(Sema &S, CallExpr *Call) { 1782 if (checkArgCount(S, Call, 2)) 1783 return true; 1784 1785 if (checkOpenCLPipeArg(S, Call)) 1786 return true; 1787 1788 // Check the reserve size. 1789 if (!Call->getArg(1)->getType()->isIntegerType() && 1790 !Call->getArg(1)->getType()->isUnsignedIntegerType()) { 1791 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1792 << Call->getDirectCallee() << S.Context.UnsignedIntTy 1793 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1794 return true; 1795 } 1796 1797 // Since return type of reserve_read/write_pipe built-in function is 1798 // reserve_id_t, which is not defined in the builtin def file , we used int 1799 // as return type and need to override the return type of these functions. 1800 Call->setType(S.Context.OCLReserveIDTy); 1801 1802 return false; 1803 } 1804 1805 // Performs a semantic analysis on {work_group_/sub_group_ 1806 // /_}commit_{read/write}_pipe 1807 // \param S Reference to the semantic analyzer. 1808 // \param Call The call to the builtin function to be analyzed. 1809 // \return True if a semantic error was found, false otherwise. 1810 static bool SemaBuiltinCommitRWPipe(Sema &S, CallExpr *Call) { 1811 if (checkArgCount(S, Call, 2)) 1812 return true; 1813 1814 if (checkOpenCLPipeArg(S, Call)) 1815 return true; 1816 1817 // Check reserve_id_t. 1818 if (!Call->getArg(1)->getType()->isReserveIDT()) { 1819 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1820 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 1821 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1822 return true; 1823 } 1824 1825 return false; 1826 } 1827 1828 // Performs a semantic analysis on the call to built-in Pipe 1829 // Query Functions. 1830 // \param S Reference to the semantic analyzer. 1831 // \param Call The call to the builtin function to be analyzed. 1832 // \return True if a semantic error was found, false otherwise. 1833 static bool SemaBuiltinPipePackets(Sema &S, CallExpr *Call) { 1834 if (checkArgCount(S, Call, 1)) 1835 return true; 1836 1837 if (!Call->getArg(0)->getType()->isPipeType()) { 1838 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 1839 << Call->getDirectCallee() << Call->getArg(0)->getSourceRange(); 1840 return true; 1841 } 1842 1843 return false; 1844 } 1845 1846 // OpenCL v2.0 s6.13.9 - Address space qualifier functions. 1847 // Performs semantic analysis for the to_global/local/private call. 1848 // \param S Reference to the semantic analyzer. 1849 // \param BuiltinID ID of the builtin function. 1850 // \param Call A pointer to the builtin call. 1851 // \return True if a semantic error has been found, false otherwise. 1852 static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID, 1853 CallExpr *Call) { 1854 if (checkArgCount(S, Call, 1)) 1855 return true; 1856 1857 auto RT = Call->getArg(0)->getType(); 1858 if (!RT->isPointerType() || RT->getPointeeType() 1859 .getAddressSpace() == LangAS::opencl_constant) { 1860 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_invalid_arg) 1861 << Call->getArg(0) << Call->getDirectCallee() << Call->getSourceRange(); 1862 return true; 1863 } 1864 1865 if (RT->getPointeeType().getAddressSpace() != LangAS::opencl_generic) { 1866 S.Diag(Call->getArg(0)->getBeginLoc(), 1867 diag::warn_opencl_generic_address_space_arg) 1868 << Call->getDirectCallee()->getNameInfo().getAsString() 1869 << Call->getArg(0)->getSourceRange(); 1870 } 1871 1872 RT = RT->getPointeeType(); 1873 auto Qual = RT.getQualifiers(); 1874 switch (BuiltinID) { 1875 case Builtin::BIto_global: 1876 Qual.setAddressSpace(LangAS::opencl_global); 1877 break; 1878 case Builtin::BIto_local: 1879 Qual.setAddressSpace(LangAS::opencl_local); 1880 break; 1881 case Builtin::BIto_private: 1882 Qual.setAddressSpace(LangAS::opencl_private); 1883 break; 1884 default: 1885 llvm_unreachable("Invalid builtin function"); 1886 } 1887 Call->setType(S.Context.getPointerType(S.Context.getQualifiedType( 1888 RT.getUnqualifiedType(), Qual))); 1889 1890 return false; 1891 } 1892 1893 static ExprResult SemaBuiltinLaunder(Sema &S, CallExpr *TheCall) { 1894 if (checkArgCount(S, TheCall, 1)) 1895 return ExprError(); 1896 1897 // Compute __builtin_launder's parameter type from the argument. 1898 // The parameter type is: 1899 // * The type of the argument if it's not an array or function type, 1900 // Otherwise, 1901 // * The decayed argument type. 1902 QualType ParamTy = [&]() { 1903 QualType ArgTy = TheCall->getArg(0)->getType(); 1904 if (const ArrayType *Ty = ArgTy->getAsArrayTypeUnsafe()) 1905 return S.Context.getPointerType(Ty->getElementType()); 1906 if (ArgTy->isFunctionType()) { 1907 return S.Context.getPointerType(ArgTy); 1908 } 1909 return ArgTy; 1910 }(); 1911 1912 TheCall->setType(ParamTy); 1913 1914 auto DiagSelect = [&]() -> std::optional<unsigned> { 1915 if (!ParamTy->isPointerType()) 1916 return 0; 1917 if (ParamTy->isFunctionPointerType()) 1918 return 1; 1919 if (ParamTy->isVoidPointerType()) 1920 return 2; 1921 return std::optional<unsigned>{}; 1922 }(); 1923 if (DiagSelect) { 1924 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_launder_invalid_arg) 1925 << *DiagSelect << TheCall->getSourceRange(); 1926 return ExprError(); 1927 } 1928 1929 // We either have an incomplete class type, or we have a class template 1930 // whose instantiation has not been forced. Example: 1931 // 1932 // template <class T> struct Foo { T value; }; 1933 // Foo<int> *p = nullptr; 1934 // auto *d = __builtin_launder(p); 1935 if (S.RequireCompleteType(TheCall->getBeginLoc(), ParamTy->getPointeeType(), 1936 diag::err_incomplete_type)) 1937 return ExprError(); 1938 1939 assert(ParamTy->getPointeeType()->isObjectType() && 1940 "Unhandled non-object pointer case"); 1941 1942 InitializedEntity Entity = 1943 InitializedEntity::InitializeParameter(S.Context, ParamTy, false); 1944 ExprResult Arg = 1945 S.PerformCopyInitialization(Entity, SourceLocation(), TheCall->getArg(0)); 1946 if (Arg.isInvalid()) 1947 return ExprError(); 1948 TheCall->setArg(0, Arg.get()); 1949 1950 return TheCall; 1951 } 1952 1953 // Emit an error and return true if the current object format type is in the 1954 // list of unsupported types. 1955 static bool CheckBuiltinTargetNotInUnsupported( 1956 Sema &S, unsigned BuiltinID, CallExpr *TheCall, 1957 ArrayRef<llvm::Triple::ObjectFormatType> UnsupportedObjectFormatTypes) { 1958 llvm::Triple::ObjectFormatType CurObjFormat = 1959 S.getASTContext().getTargetInfo().getTriple().getObjectFormat(); 1960 if (llvm::is_contained(UnsupportedObjectFormatTypes, CurObjFormat)) { 1961 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 1962 << TheCall->getSourceRange(); 1963 return true; 1964 } 1965 return false; 1966 } 1967 1968 // Emit an error and return true if the current architecture is not in the list 1969 // of supported architectures. 1970 static bool 1971 CheckBuiltinTargetInSupported(Sema &S, unsigned BuiltinID, CallExpr *TheCall, 1972 ArrayRef<llvm::Triple::ArchType> SupportedArchs) { 1973 llvm::Triple::ArchType CurArch = 1974 S.getASTContext().getTargetInfo().getTriple().getArch(); 1975 if (llvm::is_contained(SupportedArchs, CurArch)) 1976 return false; 1977 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 1978 << TheCall->getSourceRange(); 1979 return true; 1980 } 1981 1982 static void CheckNonNullArgument(Sema &S, const Expr *ArgExpr, 1983 SourceLocation CallSiteLoc); 1984 1985 bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 1986 CallExpr *TheCall) { 1987 switch (TI.getTriple().getArch()) { 1988 default: 1989 // Some builtins don't require additional checking, so just consider these 1990 // acceptable. 1991 return false; 1992 case llvm::Triple::arm: 1993 case llvm::Triple::armeb: 1994 case llvm::Triple::thumb: 1995 case llvm::Triple::thumbeb: 1996 return CheckARMBuiltinFunctionCall(TI, BuiltinID, TheCall); 1997 case llvm::Triple::aarch64: 1998 case llvm::Triple::aarch64_32: 1999 case llvm::Triple::aarch64_be: 2000 return CheckAArch64BuiltinFunctionCall(TI, BuiltinID, TheCall); 2001 case llvm::Triple::bpfeb: 2002 case llvm::Triple::bpfel: 2003 return CheckBPFBuiltinFunctionCall(BuiltinID, TheCall); 2004 case llvm::Triple::hexagon: 2005 return CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall); 2006 case llvm::Triple::mips: 2007 case llvm::Triple::mipsel: 2008 case llvm::Triple::mips64: 2009 case llvm::Triple::mips64el: 2010 return CheckMipsBuiltinFunctionCall(TI, BuiltinID, TheCall); 2011 case llvm::Triple::systemz: 2012 return CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall); 2013 case llvm::Triple::x86: 2014 case llvm::Triple::x86_64: 2015 return CheckX86BuiltinFunctionCall(TI, BuiltinID, TheCall); 2016 case llvm::Triple::ppc: 2017 case llvm::Triple::ppcle: 2018 case llvm::Triple::ppc64: 2019 case llvm::Triple::ppc64le: 2020 return CheckPPCBuiltinFunctionCall(TI, BuiltinID, TheCall); 2021 case llvm::Triple::amdgcn: 2022 return CheckAMDGCNBuiltinFunctionCall(BuiltinID, TheCall); 2023 case llvm::Triple::riscv32: 2024 case llvm::Triple::riscv64: 2025 return CheckRISCVBuiltinFunctionCall(TI, BuiltinID, TheCall); 2026 case llvm::Triple::loongarch32: 2027 case llvm::Triple::loongarch64: 2028 return CheckLoongArchBuiltinFunctionCall(TI, BuiltinID, TheCall); 2029 case llvm::Triple::wasm32: 2030 case llvm::Triple::wasm64: 2031 return CheckWebAssemblyBuiltinFunctionCall(TI, BuiltinID, TheCall); 2032 case llvm::Triple::nvptx: 2033 case llvm::Triple::nvptx64: 2034 return CheckNVPTXBuiltinFunctionCall(TI, BuiltinID, TheCall); 2035 } 2036 } 2037 2038 // Check if \p Ty is a valid type for the elementwise math builtins. If it is 2039 // not a valid type, emit an error message and return true. Otherwise return 2040 // false. 2041 static bool checkMathBuiltinElementType(Sema &S, SourceLocation Loc, 2042 QualType Ty) { 2043 if (!Ty->getAs<VectorType>() && !ConstantMatrixType::isValidElementType(Ty)) { 2044 return S.Diag(Loc, diag::err_builtin_invalid_arg_type) 2045 << 1 << /* vector, integer or float ty*/ 0 << Ty; 2046 } 2047 2048 return false; 2049 } 2050 2051 static bool checkFPMathBuiltinElementType(Sema &S, SourceLocation Loc, 2052 QualType ArgTy, int ArgIndex) { 2053 QualType EltTy = ArgTy; 2054 if (auto *VecTy = EltTy->getAs<VectorType>()) 2055 EltTy = VecTy->getElementType(); 2056 2057 if (!EltTy->isRealFloatingType()) { 2058 return S.Diag(Loc, diag::err_builtin_invalid_arg_type) 2059 << ArgIndex << /* vector or float ty*/ 5 << ArgTy; 2060 } 2061 2062 return false; 2063 } 2064 2065 ExprResult 2066 Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, 2067 CallExpr *TheCall) { 2068 ExprResult TheCallResult(TheCall); 2069 2070 // Find out if any arguments are required to be integer constant expressions. 2071 unsigned ICEArguments = 0; 2072 ASTContext::GetBuiltinTypeError Error; 2073 Context.GetBuiltinType(BuiltinID, Error, &ICEArguments); 2074 if (Error != ASTContext::GE_None) 2075 ICEArguments = 0; // Don't diagnose previously diagnosed errors. 2076 2077 // If any arguments are required to be ICE's, check and diagnose. 2078 for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) { 2079 // Skip arguments not required to be ICE's. 2080 if ((ICEArguments & (1 << ArgNo)) == 0) continue; 2081 2082 llvm::APSInt Result; 2083 // If we don't have enough arguments, continue so we can issue better 2084 // diagnostic in checkArgCount(...) 2085 if (ArgNo < TheCall->getNumArgs() && 2086 SemaBuiltinConstantArg(TheCall, ArgNo, Result)) 2087 return true; 2088 ICEArguments &= ~(1 << ArgNo); 2089 } 2090 2091 switch (BuiltinID) { 2092 case Builtin::BI__builtin___CFStringMakeConstantString: 2093 // CFStringMakeConstantString is currently not implemented for GOFF (i.e., 2094 // on z/OS) and for XCOFF (i.e., on AIX). Emit unsupported 2095 if (CheckBuiltinTargetNotInUnsupported( 2096 *this, BuiltinID, TheCall, 2097 {llvm::Triple::GOFF, llvm::Triple::XCOFF})) 2098 return ExprError(); 2099 assert(TheCall->getNumArgs() == 1 && 2100 "Wrong # arguments to builtin CFStringMakeConstantString"); 2101 if (CheckObjCString(TheCall->getArg(0))) 2102 return ExprError(); 2103 break; 2104 case Builtin::BI__builtin_ms_va_start: 2105 case Builtin::BI__builtin_stdarg_start: 2106 case Builtin::BI__builtin_va_start: 2107 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 2108 return ExprError(); 2109 break; 2110 case Builtin::BI__va_start: { 2111 switch (Context.getTargetInfo().getTriple().getArch()) { 2112 case llvm::Triple::aarch64: 2113 case llvm::Triple::arm: 2114 case llvm::Triple::thumb: 2115 if (SemaBuiltinVAStartARMMicrosoft(TheCall)) 2116 return ExprError(); 2117 break; 2118 default: 2119 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 2120 return ExprError(); 2121 break; 2122 } 2123 break; 2124 } 2125 2126 // The acquire, release, and no fence variants are ARM and AArch64 only. 2127 case Builtin::BI_interlockedbittestandset_acq: 2128 case Builtin::BI_interlockedbittestandset_rel: 2129 case Builtin::BI_interlockedbittestandset_nf: 2130 case Builtin::BI_interlockedbittestandreset_acq: 2131 case Builtin::BI_interlockedbittestandreset_rel: 2132 case Builtin::BI_interlockedbittestandreset_nf: 2133 if (CheckBuiltinTargetInSupported( 2134 *this, BuiltinID, TheCall, 2135 {llvm::Triple::arm, llvm::Triple::thumb, llvm::Triple::aarch64})) 2136 return ExprError(); 2137 break; 2138 2139 // The 64-bit bittest variants are x64, ARM, and AArch64 only. 2140 case Builtin::BI_bittest64: 2141 case Builtin::BI_bittestandcomplement64: 2142 case Builtin::BI_bittestandreset64: 2143 case Builtin::BI_bittestandset64: 2144 case Builtin::BI_interlockedbittestandreset64: 2145 case Builtin::BI_interlockedbittestandset64: 2146 if (CheckBuiltinTargetInSupported(*this, BuiltinID, TheCall, 2147 {llvm::Triple::x86_64, llvm::Triple::arm, 2148 llvm::Triple::thumb, 2149 llvm::Triple::aarch64})) 2150 return ExprError(); 2151 break; 2152 2153 case Builtin::BI__builtin_set_flt_rounds: 2154 if (CheckBuiltinTargetInSupported(*this, BuiltinID, TheCall, 2155 {llvm::Triple::x86, llvm::Triple::x86_64, 2156 llvm::Triple::arm, llvm::Triple::thumb, 2157 llvm::Triple::aarch64})) 2158 return ExprError(); 2159 break; 2160 2161 case Builtin::BI__builtin_isgreater: 2162 case Builtin::BI__builtin_isgreaterequal: 2163 case Builtin::BI__builtin_isless: 2164 case Builtin::BI__builtin_islessequal: 2165 case Builtin::BI__builtin_islessgreater: 2166 case Builtin::BI__builtin_isunordered: 2167 if (SemaBuiltinUnorderedCompare(TheCall)) 2168 return ExprError(); 2169 break; 2170 case Builtin::BI__builtin_fpclassify: 2171 if (SemaBuiltinFPClassification(TheCall, 6)) 2172 return ExprError(); 2173 break; 2174 case Builtin::BI__builtin_isfpclass: 2175 if (SemaBuiltinFPClassification(TheCall, 2)) 2176 return ExprError(); 2177 break; 2178 case Builtin::BI__builtin_isfinite: 2179 case Builtin::BI__builtin_isinf: 2180 case Builtin::BI__builtin_isinf_sign: 2181 case Builtin::BI__builtin_isnan: 2182 case Builtin::BI__builtin_isnormal: 2183 case Builtin::BI__builtin_signbit: 2184 case Builtin::BI__builtin_signbitf: 2185 case Builtin::BI__builtin_signbitl: 2186 if (SemaBuiltinFPClassification(TheCall, 1)) 2187 return ExprError(); 2188 break; 2189 case Builtin::BI__builtin_shufflevector: 2190 return SemaBuiltinShuffleVector(TheCall); 2191 // TheCall will be freed by the smart pointer here, but that's fine, since 2192 // SemaBuiltinShuffleVector guts it, but then doesn't release it. 2193 case Builtin::BI__builtin_prefetch: 2194 if (SemaBuiltinPrefetch(TheCall)) 2195 return ExprError(); 2196 break; 2197 case Builtin::BI__builtin_alloca_with_align: 2198 case Builtin::BI__builtin_alloca_with_align_uninitialized: 2199 if (SemaBuiltinAllocaWithAlign(TheCall)) 2200 return ExprError(); 2201 [[fallthrough]]; 2202 case Builtin::BI__builtin_alloca: 2203 case Builtin::BI__builtin_alloca_uninitialized: 2204 Diag(TheCall->getBeginLoc(), diag::warn_alloca) 2205 << TheCall->getDirectCallee(); 2206 break; 2207 case Builtin::BI__arithmetic_fence: 2208 if (SemaBuiltinArithmeticFence(TheCall)) 2209 return ExprError(); 2210 break; 2211 case Builtin::BI__assume: 2212 case Builtin::BI__builtin_assume: 2213 if (SemaBuiltinAssume(TheCall)) 2214 return ExprError(); 2215 break; 2216 case Builtin::BI__builtin_assume_aligned: 2217 if (SemaBuiltinAssumeAligned(TheCall)) 2218 return ExprError(); 2219 break; 2220 case Builtin::BI__builtin_dynamic_object_size: 2221 case Builtin::BI__builtin_object_size: 2222 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 3)) 2223 return ExprError(); 2224 break; 2225 case Builtin::BI__builtin_longjmp: 2226 if (SemaBuiltinLongjmp(TheCall)) 2227 return ExprError(); 2228 break; 2229 case Builtin::BI__builtin_setjmp: 2230 if (SemaBuiltinSetjmp(TheCall)) 2231 return ExprError(); 2232 break; 2233 case Builtin::BI__builtin_classify_type: 2234 if (checkArgCount(*this, TheCall, 1)) return true; 2235 TheCall->setType(Context.IntTy); 2236 break; 2237 case Builtin::BI__builtin_complex: 2238 if (SemaBuiltinComplex(TheCall)) 2239 return ExprError(); 2240 break; 2241 case Builtin::BI__builtin_constant_p: { 2242 if (checkArgCount(*this, TheCall, 1)) return true; 2243 ExprResult Arg = DefaultFunctionArrayLvalueConversion(TheCall->getArg(0)); 2244 if (Arg.isInvalid()) return true; 2245 TheCall->setArg(0, Arg.get()); 2246 TheCall->setType(Context.IntTy); 2247 break; 2248 } 2249 case Builtin::BI__builtin_launder: 2250 return SemaBuiltinLaunder(*this, TheCall); 2251 case Builtin::BI__sync_fetch_and_add: 2252 case Builtin::BI__sync_fetch_and_add_1: 2253 case Builtin::BI__sync_fetch_and_add_2: 2254 case Builtin::BI__sync_fetch_and_add_4: 2255 case Builtin::BI__sync_fetch_and_add_8: 2256 case Builtin::BI__sync_fetch_and_add_16: 2257 case Builtin::BI__sync_fetch_and_sub: 2258 case Builtin::BI__sync_fetch_and_sub_1: 2259 case Builtin::BI__sync_fetch_and_sub_2: 2260 case Builtin::BI__sync_fetch_and_sub_4: 2261 case Builtin::BI__sync_fetch_and_sub_8: 2262 case Builtin::BI__sync_fetch_and_sub_16: 2263 case Builtin::BI__sync_fetch_and_or: 2264 case Builtin::BI__sync_fetch_and_or_1: 2265 case Builtin::BI__sync_fetch_and_or_2: 2266 case Builtin::BI__sync_fetch_and_or_4: 2267 case Builtin::BI__sync_fetch_and_or_8: 2268 case Builtin::BI__sync_fetch_and_or_16: 2269 case Builtin::BI__sync_fetch_and_and: 2270 case Builtin::BI__sync_fetch_and_and_1: 2271 case Builtin::BI__sync_fetch_and_and_2: 2272 case Builtin::BI__sync_fetch_and_and_4: 2273 case Builtin::BI__sync_fetch_and_and_8: 2274 case Builtin::BI__sync_fetch_and_and_16: 2275 case Builtin::BI__sync_fetch_and_xor: 2276 case Builtin::BI__sync_fetch_and_xor_1: 2277 case Builtin::BI__sync_fetch_and_xor_2: 2278 case Builtin::BI__sync_fetch_and_xor_4: 2279 case Builtin::BI__sync_fetch_and_xor_8: 2280 case Builtin::BI__sync_fetch_and_xor_16: 2281 case Builtin::BI__sync_fetch_and_nand: 2282 case Builtin::BI__sync_fetch_and_nand_1: 2283 case Builtin::BI__sync_fetch_and_nand_2: 2284 case Builtin::BI__sync_fetch_and_nand_4: 2285 case Builtin::BI__sync_fetch_and_nand_8: 2286 case Builtin::BI__sync_fetch_and_nand_16: 2287 case Builtin::BI__sync_add_and_fetch: 2288 case Builtin::BI__sync_add_and_fetch_1: 2289 case Builtin::BI__sync_add_and_fetch_2: 2290 case Builtin::BI__sync_add_and_fetch_4: 2291 case Builtin::BI__sync_add_and_fetch_8: 2292 case Builtin::BI__sync_add_and_fetch_16: 2293 case Builtin::BI__sync_sub_and_fetch: 2294 case Builtin::BI__sync_sub_and_fetch_1: 2295 case Builtin::BI__sync_sub_and_fetch_2: 2296 case Builtin::BI__sync_sub_and_fetch_4: 2297 case Builtin::BI__sync_sub_and_fetch_8: 2298 case Builtin::BI__sync_sub_and_fetch_16: 2299 case Builtin::BI__sync_and_and_fetch: 2300 case Builtin::BI__sync_and_and_fetch_1: 2301 case Builtin::BI__sync_and_and_fetch_2: 2302 case Builtin::BI__sync_and_and_fetch_4: 2303 case Builtin::BI__sync_and_and_fetch_8: 2304 case Builtin::BI__sync_and_and_fetch_16: 2305 case Builtin::BI__sync_or_and_fetch: 2306 case Builtin::BI__sync_or_and_fetch_1: 2307 case Builtin::BI__sync_or_and_fetch_2: 2308 case Builtin::BI__sync_or_and_fetch_4: 2309 case Builtin::BI__sync_or_and_fetch_8: 2310 case Builtin::BI__sync_or_and_fetch_16: 2311 case Builtin::BI__sync_xor_and_fetch: 2312 case Builtin::BI__sync_xor_and_fetch_1: 2313 case Builtin::BI__sync_xor_and_fetch_2: 2314 case Builtin::BI__sync_xor_and_fetch_4: 2315 case Builtin::BI__sync_xor_and_fetch_8: 2316 case Builtin::BI__sync_xor_and_fetch_16: 2317 case Builtin::BI__sync_nand_and_fetch: 2318 case Builtin::BI__sync_nand_and_fetch_1: 2319 case Builtin::BI__sync_nand_and_fetch_2: 2320 case Builtin::BI__sync_nand_and_fetch_4: 2321 case Builtin::BI__sync_nand_and_fetch_8: 2322 case Builtin::BI__sync_nand_and_fetch_16: 2323 case Builtin::BI__sync_val_compare_and_swap: 2324 case Builtin::BI__sync_val_compare_and_swap_1: 2325 case Builtin::BI__sync_val_compare_and_swap_2: 2326 case Builtin::BI__sync_val_compare_and_swap_4: 2327 case Builtin::BI__sync_val_compare_and_swap_8: 2328 case Builtin::BI__sync_val_compare_and_swap_16: 2329 case Builtin::BI__sync_bool_compare_and_swap: 2330 case Builtin::BI__sync_bool_compare_and_swap_1: 2331 case Builtin::BI__sync_bool_compare_and_swap_2: 2332 case Builtin::BI__sync_bool_compare_and_swap_4: 2333 case Builtin::BI__sync_bool_compare_and_swap_8: 2334 case Builtin::BI__sync_bool_compare_and_swap_16: 2335 case Builtin::BI__sync_lock_test_and_set: 2336 case Builtin::BI__sync_lock_test_and_set_1: 2337 case Builtin::BI__sync_lock_test_and_set_2: 2338 case Builtin::BI__sync_lock_test_and_set_4: 2339 case Builtin::BI__sync_lock_test_and_set_8: 2340 case Builtin::BI__sync_lock_test_and_set_16: 2341 case Builtin::BI__sync_lock_release: 2342 case Builtin::BI__sync_lock_release_1: 2343 case Builtin::BI__sync_lock_release_2: 2344 case Builtin::BI__sync_lock_release_4: 2345 case Builtin::BI__sync_lock_release_8: 2346 case Builtin::BI__sync_lock_release_16: 2347 case Builtin::BI__sync_swap: 2348 case Builtin::BI__sync_swap_1: 2349 case Builtin::BI__sync_swap_2: 2350 case Builtin::BI__sync_swap_4: 2351 case Builtin::BI__sync_swap_8: 2352 case Builtin::BI__sync_swap_16: 2353 return SemaBuiltinAtomicOverloaded(TheCallResult); 2354 case Builtin::BI__sync_synchronize: 2355 Diag(TheCall->getBeginLoc(), diag::warn_atomic_implicit_seq_cst) 2356 << TheCall->getCallee()->getSourceRange(); 2357 break; 2358 case Builtin::BI__builtin_nontemporal_load: 2359 case Builtin::BI__builtin_nontemporal_store: 2360 return SemaBuiltinNontemporalOverloaded(TheCallResult); 2361 case Builtin::BI__builtin_memcpy_inline: { 2362 clang::Expr *SizeOp = TheCall->getArg(2); 2363 // We warn about copying to or from `nullptr` pointers when `size` is 2364 // greater than 0. When `size` is value dependent we cannot evaluate its 2365 // value so we bail out. 2366 if (SizeOp->isValueDependent()) 2367 break; 2368 if (!SizeOp->EvaluateKnownConstInt(Context).isZero()) { 2369 CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc()); 2370 CheckNonNullArgument(*this, TheCall->getArg(1), TheCall->getExprLoc()); 2371 } 2372 break; 2373 } 2374 case Builtin::BI__builtin_memset_inline: { 2375 clang::Expr *SizeOp = TheCall->getArg(2); 2376 // We warn about filling to `nullptr` pointers when `size` is greater than 2377 // 0. When `size` is value dependent we cannot evaluate its value so we bail 2378 // out. 2379 if (SizeOp->isValueDependent()) 2380 break; 2381 if (!SizeOp->EvaluateKnownConstInt(Context).isZero()) 2382 CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc()); 2383 break; 2384 } 2385 #define BUILTIN(ID, TYPE, ATTRS) 2386 #define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \ 2387 case Builtin::BI##ID: \ 2388 return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID); 2389 #include "clang/Basic/Builtins.def" 2390 case Builtin::BI__annotation: 2391 if (SemaBuiltinMSVCAnnotation(*this, TheCall)) 2392 return ExprError(); 2393 break; 2394 case Builtin::BI__builtin_annotation: 2395 if (SemaBuiltinAnnotation(*this, TheCall)) 2396 return ExprError(); 2397 break; 2398 case Builtin::BI__builtin_addressof: 2399 if (SemaBuiltinAddressof(*this, TheCall)) 2400 return ExprError(); 2401 break; 2402 case Builtin::BI__builtin_function_start: 2403 if (SemaBuiltinFunctionStart(*this, TheCall)) 2404 return ExprError(); 2405 break; 2406 case Builtin::BI__builtin_is_aligned: 2407 case Builtin::BI__builtin_align_up: 2408 case Builtin::BI__builtin_align_down: 2409 if (SemaBuiltinAlignment(*this, TheCall, BuiltinID)) 2410 return ExprError(); 2411 break; 2412 case Builtin::BI__builtin_add_overflow: 2413 case Builtin::BI__builtin_sub_overflow: 2414 case Builtin::BI__builtin_mul_overflow: 2415 if (SemaBuiltinOverflow(*this, TheCall, BuiltinID)) 2416 return ExprError(); 2417 break; 2418 case Builtin::BI__builtin_operator_new: 2419 case Builtin::BI__builtin_operator_delete: { 2420 bool IsDelete = BuiltinID == Builtin::BI__builtin_operator_delete; 2421 ExprResult Res = 2422 SemaBuiltinOperatorNewDeleteOverloaded(TheCallResult, IsDelete); 2423 if (Res.isInvalid()) 2424 CorrectDelayedTyposInExpr(TheCallResult.get()); 2425 return Res; 2426 } 2427 case Builtin::BI__builtin_dump_struct: 2428 return SemaBuiltinDumpStruct(*this, TheCall); 2429 case Builtin::BI__builtin_expect_with_probability: { 2430 // We first want to ensure we are called with 3 arguments 2431 if (checkArgCount(*this, TheCall, 3)) 2432 return ExprError(); 2433 // then check probability is constant float in range [0.0, 1.0] 2434 const Expr *ProbArg = TheCall->getArg(2); 2435 SmallVector<PartialDiagnosticAt, 8> Notes; 2436 Expr::EvalResult Eval; 2437 Eval.Diag = &Notes; 2438 if ((!ProbArg->EvaluateAsConstantExpr(Eval, Context)) || 2439 !Eval.Val.isFloat()) { 2440 Diag(ProbArg->getBeginLoc(), diag::err_probability_not_constant_float) 2441 << ProbArg->getSourceRange(); 2442 for (const PartialDiagnosticAt &PDiag : Notes) 2443 Diag(PDiag.first, PDiag.second); 2444 return ExprError(); 2445 } 2446 llvm::APFloat Probability = Eval.Val.getFloat(); 2447 bool LoseInfo = false; 2448 Probability.convert(llvm::APFloat::IEEEdouble(), 2449 llvm::RoundingMode::Dynamic, &LoseInfo); 2450 if (!(Probability >= llvm::APFloat(0.0) && 2451 Probability <= llvm::APFloat(1.0))) { 2452 Diag(ProbArg->getBeginLoc(), diag::err_probability_out_of_range) 2453 << ProbArg->getSourceRange(); 2454 return ExprError(); 2455 } 2456 break; 2457 } 2458 case Builtin::BI__builtin_preserve_access_index: 2459 if (SemaBuiltinPreserveAI(*this, TheCall)) 2460 return ExprError(); 2461 break; 2462 case Builtin::BI__builtin_call_with_static_chain: 2463 if (SemaBuiltinCallWithStaticChain(*this, TheCall)) 2464 return ExprError(); 2465 break; 2466 case Builtin::BI__exception_code: 2467 case Builtin::BI_exception_code: 2468 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope, 2469 diag::err_seh___except_block)) 2470 return ExprError(); 2471 break; 2472 case Builtin::BI__exception_info: 2473 case Builtin::BI_exception_info: 2474 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope, 2475 diag::err_seh___except_filter)) 2476 return ExprError(); 2477 break; 2478 case Builtin::BI__GetExceptionInfo: 2479 if (checkArgCount(*this, TheCall, 1)) 2480 return ExprError(); 2481 2482 if (CheckCXXThrowOperand( 2483 TheCall->getBeginLoc(), 2484 Context.getExceptionObjectType(FDecl->getParamDecl(0)->getType()), 2485 TheCall)) 2486 return ExprError(); 2487 2488 TheCall->setType(Context.VoidPtrTy); 2489 break; 2490 case Builtin::BIaddressof: 2491 case Builtin::BI__addressof: 2492 case Builtin::BIforward: 2493 case Builtin::BIforward_like: 2494 case Builtin::BImove: 2495 case Builtin::BImove_if_noexcept: 2496 case Builtin::BIas_const: { 2497 // These are all expected to be of the form 2498 // T &/&&/* f(U &/&&) 2499 // where T and U only differ in qualification. 2500 if (checkArgCount(*this, TheCall, 1)) 2501 return ExprError(); 2502 QualType Param = FDecl->getParamDecl(0)->getType(); 2503 QualType Result = FDecl->getReturnType(); 2504 bool ReturnsPointer = BuiltinID == Builtin::BIaddressof || 2505 BuiltinID == Builtin::BI__addressof; 2506 if (!(Param->isReferenceType() && 2507 (ReturnsPointer ? Result->isAnyPointerType() 2508 : Result->isReferenceType()) && 2509 Context.hasSameUnqualifiedType(Param->getPointeeType(), 2510 Result->getPointeeType()))) { 2511 Diag(TheCall->getBeginLoc(), diag::err_builtin_move_forward_unsupported) 2512 << FDecl; 2513 return ExprError(); 2514 } 2515 break; 2516 } 2517 // OpenCL v2.0, s6.13.16 - Pipe functions 2518 case Builtin::BIread_pipe: 2519 case Builtin::BIwrite_pipe: 2520 // Since those two functions are declared with var args, we need a semantic 2521 // check for the argument. 2522 if (SemaBuiltinRWPipe(*this, TheCall)) 2523 return ExprError(); 2524 break; 2525 case Builtin::BIreserve_read_pipe: 2526 case Builtin::BIreserve_write_pipe: 2527 case Builtin::BIwork_group_reserve_read_pipe: 2528 case Builtin::BIwork_group_reserve_write_pipe: 2529 if (SemaBuiltinReserveRWPipe(*this, TheCall)) 2530 return ExprError(); 2531 break; 2532 case Builtin::BIsub_group_reserve_read_pipe: 2533 case Builtin::BIsub_group_reserve_write_pipe: 2534 if (checkOpenCLSubgroupExt(*this, TheCall) || 2535 SemaBuiltinReserveRWPipe(*this, TheCall)) 2536 return ExprError(); 2537 break; 2538 case Builtin::BIcommit_read_pipe: 2539 case Builtin::BIcommit_write_pipe: 2540 case Builtin::BIwork_group_commit_read_pipe: 2541 case Builtin::BIwork_group_commit_write_pipe: 2542 if (SemaBuiltinCommitRWPipe(*this, TheCall)) 2543 return ExprError(); 2544 break; 2545 case Builtin::BIsub_group_commit_read_pipe: 2546 case Builtin::BIsub_group_commit_write_pipe: 2547 if (checkOpenCLSubgroupExt(*this, TheCall) || 2548 SemaBuiltinCommitRWPipe(*this, TheCall)) 2549 return ExprError(); 2550 break; 2551 case Builtin::BIget_pipe_num_packets: 2552 case Builtin::BIget_pipe_max_packets: 2553 if (SemaBuiltinPipePackets(*this, TheCall)) 2554 return ExprError(); 2555 break; 2556 case Builtin::BIto_global: 2557 case Builtin::BIto_local: 2558 case Builtin::BIto_private: 2559 if (SemaOpenCLBuiltinToAddr(*this, BuiltinID, TheCall)) 2560 return ExprError(); 2561 break; 2562 // OpenCL v2.0, s6.13.17 - Enqueue kernel functions. 2563 case Builtin::BIenqueue_kernel: 2564 if (SemaOpenCLBuiltinEnqueueKernel(*this, TheCall)) 2565 return ExprError(); 2566 break; 2567 case Builtin::BIget_kernel_work_group_size: 2568 case Builtin::BIget_kernel_preferred_work_group_size_multiple: 2569 if (SemaOpenCLBuiltinKernelWorkGroupSize(*this, TheCall)) 2570 return ExprError(); 2571 break; 2572 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange: 2573 case Builtin::BIget_kernel_sub_group_count_for_ndrange: 2574 if (SemaOpenCLBuiltinNDRangeAndBlock(*this, TheCall)) 2575 return ExprError(); 2576 break; 2577 case Builtin::BI__builtin_os_log_format: 2578 Cleanup.setExprNeedsCleanups(true); 2579 [[fallthrough]]; 2580 case Builtin::BI__builtin_os_log_format_buffer_size: 2581 if (SemaBuiltinOSLogFormat(TheCall)) 2582 return ExprError(); 2583 break; 2584 case Builtin::BI__builtin_frame_address: 2585 case Builtin::BI__builtin_return_address: { 2586 if (SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xFFFF)) 2587 return ExprError(); 2588 2589 // -Wframe-address warning if non-zero passed to builtin 2590 // return/frame address. 2591 Expr::EvalResult Result; 2592 if (!TheCall->getArg(0)->isValueDependent() && 2593 TheCall->getArg(0)->EvaluateAsInt(Result, getASTContext()) && 2594 Result.Val.getInt() != 0) 2595 Diag(TheCall->getBeginLoc(), diag::warn_frame_address) 2596 << ((BuiltinID == Builtin::BI__builtin_return_address) 2597 ? "__builtin_return_address" 2598 : "__builtin_frame_address") 2599 << TheCall->getSourceRange(); 2600 break; 2601 } 2602 2603 case Builtin::BI__builtin_nondeterministic_value: { 2604 if (SemaBuiltinNonDeterministicValue(TheCall)) 2605 return ExprError(); 2606 break; 2607 } 2608 2609 // __builtin_elementwise_abs restricts the element type to signed integers or 2610 // floating point types only. 2611 case Builtin::BI__builtin_elementwise_abs: { 2612 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall)) 2613 return ExprError(); 2614 2615 QualType ArgTy = TheCall->getArg(0)->getType(); 2616 QualType EltTy = ArgTy; 2617 2618 if (auto *VecTy = EltTy->getAs<VectorType>()) 2619 EltTy = VecTy->getElementType(); 2620 if (EltTy->isUnsignedIntegerType()) { 2621 Diag(TheCall->getArg(0)->getBeginLoc(), 2622 diag::err_builtin_invalid_arg_type) 2623 << 1 << /* signed integer or float ty*/ 3 << ArgTy; 2624 return ExprError(); 2625 } 2626 break; 2627 } 2628 2629 // These builtins restrict the element type to floating point 2630 // types only. 2631 case Builtin::BI__builtin_elementwise_ceil: 2632 case Builtin::BI__builtin_elementwise_cos: 2633 case Builtin::BI__builtin_elementwise_exp: 2634 case Builtin::BI__builtin_elementwise_exp2: 2635 case Builtin::BI__builtin_elementwise_floor: 2636 case Builtin::BI__builtin_elementwise_log: 2637 case Builtin::BI__builtin_elementwise_log2: 2638 case Builtin::BI__builtin_elementwise_log10: 2639 case Builtin::BI__builtin_elementwise_roundeven: 2640 case Builtin::BI__builtin_elementwise_round: 2641 case Builtin::BI__builtin_elementwise_rint: 2642 case Builtin::BI__builtin_elementwise_nearbyint: 2643 case Builtin::BI__builtin_elementwise_sin: 2644 case Builtin::BI__builtin_elementwise_trunc: 2645 case Builtin::BI__builtin_elementwise_canonicalize: { 2646 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall)) 2647 return ExprError(); 2648 2649 QualType ArgTy = TheCall->getArg(0)->getType(); 2650 if (checkFPMathBuiltinElementType(*this, TheCall->getArg(0)->getBeginLoc(), 2651 ArgTy, 1)) 2652 return ExprError(); 2653 break; 2654 } 2655 case Builtin::BI__builtin_elementwise_fma: { 2656 if (SemaBuiltinElementwiseTernaryMath(TheCall)) 2657 return ExprError(); 2658 break; 2659 } 2660 2661 // These builtins restrict the element type to floating point 2662 // types only, and take in two arguments. 2663 case Builtin::BI__builtin_elementwise_pow: { 2664 if (SemaBuiltinElementwiseMath(TheCall)) 2665 return ExprError(); 2666 2667 QualType ArgTy = TheCall->getArg(0)->getType(); 2668 if (checkFPMathBuiltinElementType(*this, TheCall->getArg(0)->getBeginLoc(), 2669 ArgTy, 1) || 2670 checkFPMathBuiltinElementType(*this, TheCall->getArg(1)->getBeginLoc(), 2671 ArgTy, 2)) 2672 return ExprError(); 2673 break; 2674 } 2675 2676 // These builtins restrict the element type to integer 2677 // types only. 2678 case Builtin::BI__builtin_elementwise_add_sat: 2679 case Builtin::BI__builtin_elementwise_sub_sat: { 2680 if (SemaBuiltinElementwiseMath(TheCall)) 2681 return ExprError(); 2682 2683 const Expr *Arg = TheCall->getArg(0); 2684 QualType ArgTy = Arg->getType(); 2685 QualType EltTy = ArgTy; 2686 2687 if (auto *VecTy = EltTy->getAs<VectorType>()) 2688 EltTy = VecTy->getElementType(); 2689 2690 if (!EltTy->isIntegerType()) { 2691 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type) 2692 << 1 << /* integer ty */ 6 << ArgTy; 2693 return ExprError(); 2694 } 2695 break; 2696 } 2697 2698 case Builtin::BI__builtin_elementwise_min: 2699 case Builtin::BI__builtin_elementwise_max: 2700 if (SemaBuiltinElementwiseMath(TheCall)) 2701 return ExprError(); 2702 break; 2703 case Builtin::BI__builtin_elementwise_copysign: { 2704 if (checkArgCount(*this, TheCall, 2)) 2705 return ExprError(); 2706 2707 ExprResult Magnitude = UsualUnaryConversions(TheCall->getArg(0)); 2708 ExprResult Sign = UsualUnaryConversions(TheCall->getArg(1)); 2709 if (Magnitude.isInvalid() || Sign.isInvalid()) 2710 return ExprError(); 2711 2712 QualType MagnitudeTy = Magnitude.get()->getType(); 2713 QualType SignTy = Sign.get()->getType(); 2714 if (checkFPMathBuiltinElementType(*this, TheCall->getArg(0)->getBeginLoc(), 2715 MagnitudeTy, 1) || 2716 checkFPMathBuiltinElementType(*this, TheCall->getArg(1)->getBeginLoc(), 2717 SignTy, 2)) { 2718 return ExprError(); 2719 } 2720 2721 if (MagnitudeTy.getCanonicalType() != SignTy.getCanonicalType()) { 2722 return Diag(Sign.get()->getBeginLoc(), 2723 diag::err_typecheck_call_different_arg_types) 2724 << MagnitudeTy << SignTy; 2725 } 2726 2727 TheCall->setArg(0, Magnitude.get()); 2728 TheCall->setArg(1, Sign.get()); 2729 TheCall->setType(Magnitude.get()->getType()); 2730 break; 2731 } 2732 case Builtin::BI__builtin_reduce_max: 2733 case Builtin::BI__builtin_reduce_min: { 2734 if (PrepareBuiltinReduceMathOneArgCall(TheCall)) 2735 return ExprError(); 2736 2737 const Expr *Arg = TheCall->getArg(0); 2738 const auto *TyA = Arg->getType()->getAs<VectorType>(); 2739 if (!TyA) { 2740 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type) 2741 << 1 << /* vector ty*/ 4 << Arg->getType(); 2742 return ExprError(); 2743 } 2744 2745 TheCall->setType(TyA->getElementType()); 2746 break; 2747 } 2748 2749 // These builtins support vectors of integers only. 2750 // TODO: ADD/MUL should support floating-point types. 2751 case Builtin::BI__builtin_reduce_add: 2752 case Builtin::BI__builtin_reduce_mul: 2753 case Builtin::BI__builtin_reduce_xor: 2754 case Builtin::BI__builtin_reduce_or: 2755 case Builtin::BI__builtin_reduce_and: { 2756 if (PrepareBuiltinReduceMathOneArgCall(TheCall)) 2757 return ExprError(); 2758 2759 const Expr *Arg = TheCall->getArg(0); 2760 const auto *TyA = Arg->getType()->getAs<VectorType>(); 2761 if (!TyA || !TyA->getElementType()->isIntegerType()) { 2762 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type) 2763 << 1 << /* vector of integers */ 6 << Arg->getType(); 2764 return ExprError(); 2765 } 2766 TheCall->setType(TyA->getElementType()); 2767 break; 2768 } 2769 2770 case Builtin::BI__builtin_matrix_transpose: 2771 return SemaBuiltinMatrixTranspose(TheCall, TheCallResult); 2772 2773 case Builtin::BI__builtin_matrix_column_major_load: 2774 return SemaBuiltinMatrixColumnMajorLoad(TheCall, TheCallResult); 2775 2776 case Builtin::BI__builtin_matrix_column_major_store: 2777 return SemaBuiltinMatrixColumnMajorStore(TheCall, TheCallResult); 2778 2779 case Builtin::BI__builtin_get_device_side_mangled_name: { 2780 auto Check = [](CallExpr *TheCall) { 2781 if (TheCall->getNumArgs() != 1) 2782 return false; 2783 auto *DRE = dyn_cast<DeclRefExpr>(TheCall->getArg(0)->IgnoreImpCasts()); 2784 if (!DRE) 2785 return false; 2786 auto *D = DRE->getDecl(); 2787 if (!isa<FunctionDecl>(D) && !isa<VarDecl>(D)) 2788 return false; 2789 return D->hasAttr<CUDAGlobalAttr>() || D->hasAttr<CUDADeviceAttr>() || 2790 D->hasAttr<CUDAConstantAttr>() || D->hasAttr<HIPManagedAttr>(); 2791 }; 2792 if (!Check(TheCall)) { 2793 Diag(TheCall->getBeginLoc(), 2794 diag::err_hip_invalid_args_builtin_mangled_name); 2795 return ExprError(); 2796 } 2797 } 2798 } 2799 2800 // Since the target specific builtins for each arch overlap, only check those 2801 // of the arch we are compiling for. 2802 if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) { 2803 if (Context.BuiltinInfo.isAuxBuiltinID(BuiltinID)) { 2804 assert(Context.getAuxTargetInfo() && 2805 "Aux Target Builtin, but not an aux target?"); 2806 2807 if (CheckTSBuiltinFunctionCall( 2808 *Context.getAuxTargetInfo(), 2809 Context.BuiltinInfo.getAuxBuiltinID(BuiltinID), TheCall)) 2810 return ExprError(); 2811 } else { 2812 if (CheckTSBuiltinFunctionCall(Context.getTargetInfo(), BuiltinID, 2813 TheCall)) 2814 return ExprError(); 2815 } 2816 } 2817 2818 return TheCallResult; 2819 } 2820 2821 // Get the valid immediate range for the specified NEON type code. 2822 static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) { 2823 NeonTypeFlags Type(t); 2824 int IsQuad = ForceQuad ? true : Type.isQuad(); 2825 switch (Type.getEltType()) { 2826 case NeonTypeFlags::Int8: 2827 case NeonTypeFlags::Poly8: 2828 return shift ? 7 : (8 << IsQuad) - 1; 2829 case NeonTypeFlags::Int16: 2830 case NeonTypeFlags::Poly16: 2831 return shift ? 15 : (4 << IsQuad) - 1; 2832 case NeonTypeFlags::Int32: 2833 return shift ? 31 : (2 << IsQuad) - 1; 2834 case NeonTypeFlags::Int64: 2835 case NeonTypeFlags::Poly64: 2836 return shift ? 63 : (1 << IsQuad) - 1; 2837 case NeonTypeFlags::Poly128: 2838 return shift ? 127 : (1 << IsQuad) - 1; 2839 case NeonTypeFlags::Float16: 2840 assert(!shift && "cannot shift float types!"); 2841 return (4 << IsQuad) - 1; 2842 case NeonTypeFlags::Float32: 2843 assert(!shift && "cannot shift float types!"); 2844 return (2 << IsQuad) - 1; 2845 case NeonTypeFlags::Float64: 2846 assert(!shift && "cannot shift float types!"); 2847 return (1 << IsQuad) - 1; 2848 case NeonTypeFlags::BFloat16: 2849 assert(!shift && "cannot shift float types!"); 2850 return (4 << IsQuad) - 1; 2851 } 2852 llvm_unreachable("Invalid NeonTypeFlag!"); 2853 } 2854 2855 /// getNeonEltType - Return the QualType corresponding to the elements of 2856 /// the vector type specified by the NeonTypeFlags. This is used to check 2857 /// the pointer arguments for Neon load/store intrinsics. 2858 static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context, 2859 bool IsPolyUnsigned, bool IsInt64Long) { 2860 switch (Flags.getEltType()) { 2861 case NeonTypeFlags::Int8: 2862 return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy; 2863 case NeonTypeFlags::Int16: 2864 return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy; 2865 case NeonTypeFlags::Int32: 2866 return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy; 2867 case NeonTypeFlags::Int64: 2868 if (IsInt64Long) 2869 return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy; 2870 else 2871 return Flags.isUnsigned() ? Context.UnsignedLongLongTy 2872 : Context.LongLongTy; 2873 case NeonTypeFlags::Poly8: 2874 return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy; 2875 case NeonTypeFlags::Poly16: 2876 return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy; 2877 case NeonTypeFlags::Poly64: 2878 if (IsInt64Long) 2879 return Context.UnsignedLongTy; 2880 else 2881 return Context.UnsignedLongLongTy; 2882 case NeonTypeFlags::Poly128: 2883 break; 2884 case NeonTypeFlags::Float16: 2885 return Context.HalfTy; 2886 case NeonTypeFlags::Float32: 2887 return Context.FloatTy; 2888 case NeonTypeFlags::Float64: 2889 return Context.DoubleTy; 2890 case NeonTypeFlags::BFloat16: 2891 return Context.BFloat16Ty; 2892 } 2893 llvm_unreachable("Invalid NeonTypeFlag!"); 2894 } 2895 2896 bool Sema::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 2897 // Range check SVE intrinsics that take immediate values. 2898 SmallVector<std::tuple<int,int,int>, 3> ImmChecks; 2899 2900 switch (BuiltinID) { 2901 default: 2902 return false; 2903 #define GET_SVE_IMMEDIATE_CHECK 2904 #include "clang/Basic/arm_sve_sema_rangechecks.inc" 2905 #undef GET_SVE_IMMEDIATE_CHECK 2906 #define GET_SME_IMMEDIATE_CHECK 2907 #include "clang/Basic/arm_sme_sema_rangechecks.inc" 2908 #undef GET_SME_IMMEDIATE_CHECK 2909 } 2910 2911 // Perform all the immediate checks for this builtin call. 2912 bool HasError = false; 2913 for (auto &I : ImmChecks) { 2914 int ArgNum, CheckTy, ElementSizeInBits; 2915 std::tie(ArgNum, CheckTy, ElementSizeInBits) = I; 2916 2917 typedef bool(*OptionSetCheckFnTy)(int64_t Value); 2918 2919 // Function that checks whether the operand (ArgNum) is an immediate 2920 // that is one of the predefined values. 2921 auto CheckImmediateInSet = [&](OptionSetCheckFnTy CheckImm, 2922 int ErrDiag) -> bool { 2923 // We can't check the value of a dependent argument. 2924 Expr *Arg = TheCall->getArg(ArgNum); 2925 if (Arg->isTypeDependent() || Arg->isValueDependent()) 2926 return false; 2927 2928 // Check constant-ness first. 2929 llvm::APSInt Imm; 2930 if (SemaBuiltinConstantArg(TheCall, ArgNum, Imm)) 2931 return true; 2932 2933 if (!CheckImm(Imm.getSExtValue())) 2934 return Diag(TheCall->getBeginLoc(), ErrDiag) << Arg->getSourceRange(); 2935 return false; 2936 }; 2937 2938 switch ((SVETypeFlags::ImmCheckType)CheckTy) { 2939 case SVETypeFlags::ImmCheck0_31: 2940 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 31)) 2941 HasError = true; 2942 break; 2943 case SVETypeFlags::ImmCheck0_13: 2944 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 13)) 2945 HasError = true; 2946 break; 2947 case SVETypeFlags::ImmCheck1_16: 2948 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 16)) 2949 HasError = true; 2950 break; 2951 case SVETypeFlags::ImmCheck0_7: 2952 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 7)) 2953 HasError = true; 2954 break; 2955 case SVETypeFlags::ImmCheckExtract: 2956 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2957 (2048 / ElementSizeInBits) - 1)) 2958 HasError = true; 2959 break; 2960 case SVETypeFlags::ImmCheckShiftRight: 2961 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, ElementSizeInBits)) 2962 HasError = true; 2963 break; 2964 case SVETypeFlags::ImmCheckShiftRightNarrow: 2965 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 2966 ElementSizeInBits / 2)) 2967 HasError = true; 2968 break; 2969 case SVETypeFlags::ImmCheckShiftLeft: 2970 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2971 ElementSizeInBits - 1)) 2972 HasError = true; 2973 break; 2974 case SVETypeFlags::ImmCheckLaneIndex: 2975 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2976 (128 / (1 * ElementSizeInBits)) - 1)) 2977 HasError = true; 2978 break; 2979 case SVETypeFlags::ImmCheckLaneIndexCompRotate: 2980 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2981 (128 / (2 * ElementSizeInBits)) - 1)) 2982 HasError = true; 2983 break; 2984 case SVETypeFlags::ImmCheckLaneIndexDot: 2985 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2986 (128 / (4 * ElementSizeInBits)) - 1)) 2987 HasError = true; 2988 break; 2989 case SVETypeFlags::ImmCheckComplexRot90_270: 2990 if (CheckImmediateInSet([](int64_t V) { return V == 90 || V == 270; }, 2991 diag::err_rotation_argument_to_cadd)) 2992 HasError = true; 2993 break; 2994 case SVETypeFlags::ImmCheckComplexRotAll90: 2995 if (CheckImmediateInSet( 2996 [](int64_t V) { 2997 return V == 0 || V == 90 || V == 180 || V == 270; 2998 }, 2999 diag::err_rotation_argument_to_cmla)) 3000 HasError = true; 3001 break; 3002 case SVETypeFlags::ImmCheck0_1: 3003 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 1)) 3004 HasError = true; 3005 break; 3006 case SVETypeFlags::ImmCheck0_2: 3007 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2)) 3008 HasError = true; 3009 break; 3010 case SVETypeFlags::ImmCheck0_3: 3011 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 3)) 3012 HasError = true; 3013 break; 3014 case SVETypeFlags::ImmCheck0_0: 3015 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 0)) 3016 HasError = true; 3017 break; 3018 case SVETypeFlags::ImmCheck0_15: 3019 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 15)) 3020 HasError = true; 3021 break; 3022 case SVETypeFlags::ImmCheck0_255: 3023 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 255)) 3024 HasError = true; 3025 break; 3026 } 3027 } 3028 3029 return HasError; 3030 } 3031 3032 bool Sema::CheckNeonBuiltinFunctionCall(const TargetInfo &TI, 3033 unsigned BuiltinID, CallExpr *TheCall) { 3034 llvm::APSInt Result; 3035 uint64_t mask = 0; 3036 unsigned TV = 0; 3037 int PtrArgNum = -1; 3038 bool HasConstPtr = false; 3039 switch (BuiltinID) { 3040 #define GET_NEON_OVERLOAD_CHECK 3041 #include "clang/Basic/arm_neon.inc" 3042 #include "clang/Basic/arm_fp16.inc" 3043 #undef GET_NEON_OVERLOAD_CHECK 3044 } 3045 3046 // For NEON intrinsics which are overloaded on vector element type, validate 3047 // the immediate which specifies which variant to emit. 3048 unsigned ImmArg = TheCall->getNumArgs()-1; 3049 if (mask) { 3050 if (SemaBuiltinConstantArg(TheCall, ImmArg, Result)) 3051 return true; 3052 3053 TV = Result.getLimitedValue(64); 3054 if ((TV > 63) || (mask & (1ULL << TV)) == 0) 3055 return Diag(TheCall->getBeginLoc(), diag::err_invalid_neon_type_code) 3056 << TheCall->getArg(ImmArg)->getSourceRange(); 3057 } 3058 3059 if (PtrArgNum >= 0) { 3060 // Check that pointer arguments have the specified type. 3061 Expr *Arg = TheCall->getArg(PtrArgNum); 3062 if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) 3063 Arg = ICE->getSubExpr(); 3064 ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg); 3065 QualType RHSTy = RHS.get()->getType(); 3066 3067 llvm::Triple::ArchType Arch = TI.getTriple().getArch(); 3068 bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 || 3069 Arch == llvm::Triple::aarch64_32 || 3070 Arch == llvm::Triple::aarch64_be; 3071 bool IsInt64Long = TI.getInt64Type() == TargetInfo::SignedLong; 3072 QualType EltTy = 3073 getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long); 3074 if (HasConstPtr) 3075 EltTy = EltTy.withConst(); 3076 QualType LHSTy = Context.getPointerType(EltTy); 3077 AssignConvertType ConvTy; 3078 ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS); 3079 if (RHS.isInvalid()) 3080 return true; 3081 if (DiagnoseAssignmentResult(ConvTy, Arg->getBeginLoc(), LHSTy, RHSTy, 3082 RHS.get(), AA_Assigning)) 3083 return true; 3084 } 3085 3086 // For NEON intrinsics which take an immediate value as part of the 3087 // instruction, range check them here. 3088 unsigned i = 0, l = 0, u = 0; 3089 switch (BuiltinID) { 3090 default: 3091 return false; 3092 #define GET_NEON_IMMEDIATE_CHECK 3093 #include "clang/Basic/arm_neon.inc" 3094 #include "clang/Basic/arm_fp16.inc" 3095 #undef GET_NEON_IMMEDIATE_CHECK 3096 } 3097 3098 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 3099 } 3100 3101 bool Sema::CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 3102 switch (BuiltinID) { 3103 default: 3104 return false; 3105 #include "clang/Basic/arm_mve_builtin_sema.inc" 3106 } 3107 } 3108 3109 bool Sema::CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 3110 CallExpr *TheCall) { 3111 bool Err = false; 3112 switch (BuiltinID) { 3113 default: 3114 return false; 3115 #include "clang/Basic/arm_cde_builtin_sema.inc" 3116 } 3117 3118 if (Err) 3119 return true; 3120 3121 return CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), /*WantCDE*/ true); 3122 } 3123 3124 bool Sema::CheckARMCoprocessorImmediate(const TargetInfo &TI, 3125 const Expr *CoprocArg, bool WantCDE) { 3126 if (isConstantEvaluated()) 3127 return false; 3128 3129 // We can't check the value of a dependent argument. 3130 if (CoprocArg->isTypeDependent() || CoprocArg->isValueDependent()) 3131 return false; 3132 3133 llvm::APSInt CoprocNoAP = *CoprocArg->getIntegerConstantExpr(Context); 3134 int64_t CoprocNo = CoprocNoAP.getExtValue(); 3135 assert(CoprocNo >= 0 && "Coprocessor immediate must be non-negative"); 3136 3137 uint32_t CDECoprocMask = TI.getARMCDECoprocMask(); 3138 bool IsCDECoproc = CoprocNo <= 7 && (CDECoprocMask & (1 << CoprocNo)); 3139 3140 if (IsCDECoproc != WantCDE) 3141 return Diag(CoprocArg->getBeginLoc(), diag::err_arm_invalid_coproc) 3142 << (int)CoprocNo << (int)WantCDE << CoprocArg->getSourceRange(); 3143 3144 return false; 3145 } 3146 3147 bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, 3148 unsigned MaxWidth) { 3149 assert((BuiltinID == ARM::BI__builtin_arm_ldrex || 3150 BuiltinID == ARM::BI__builtin_arm_ldaex || 3151 BuiltinID == ARM::BI__builtin_arm_strex || 3152 BuiltinID == ARM::BI__builtin_arm_stlex || 3153 BuiltinID == AArch64::BI__builtin_arm_ldrex || 3154 BuiltinID == AArch64::BI__builtin_arm_ldaex || 3155 BuiltinID == AArch64::BI__builtin_arm_strex || 3156 BuiltinID == AArch64::BI__builtin_arm_stlex) && 3157 "unexpected ARM builtin"); 3158 bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex || 3159 BuiltinID == ARM::BI__builtin_arm_ldaex || 3160 BuiltinID == AArch64::BI__builtin_arm_ldrex || 3161 BuiltinID == AArch64::BI__builtin_arm_ldaex; 3162 3163 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 3164 3165 // Ensure that we have the proper number of arguments. 3166 if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2)) 3167 return true; 3168 3169 // Inspect the pointer argument of the atomic builtin. This should always be 3170 // a pointer type, whose element is an integral scalar or pointer type. 3171 // Because it is a pointer type, we don't have to worry about any implicit 3172 // casts here. 3173 Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1); 3174 ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(PointerArg); 3175 if (PointerArgRes.isInvalid()) 3176 return true; 3177 PointerArg = PointerArgRes.get(); 3178 3179 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 3180 if (!pointerType) { 3181 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 3182 << PointerArg->getType() << PointerArg->getSourceRange(); 3183 return true; 3184 } 3185 3186 // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next 3187 // task is to insert the appropriate casts into the AST. First work out just 3188 // what the appropriate type is. 3189 QualType ValType = pointerType->getPointeeType(); 3190 QualType AddrType = ValType.getUnqualifiedType().withVolatile(); 3191 if (IsLdrex) 3192 AddrType.addConst(); 3193 3194 // Issue a warning if the cast is dodgy. 3195 CastKind CastNeeded = CK_NoOp; 3196 if (!AddrType.isAtLeastAsQualifiedAs(ValType)) { 3197 CastNeeded = CK_BitCast; 3198 Diag(DRE->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers) 3199 << PointerArg->getType() << Context.getPointerType(AddrType) 3200 << AA_Passing << PointerArg->getSourceRange(); 3201 } 3202 3203 // Finally, do the cast and replace the argument with the corrected version. 3204 AddrType = Context.getPointerType(AddrType); 3205 PointerArgRes = ImpCastExprToType(PointerArg, AddrType, CastNeeded); 3206 if (PointerArgRes.isInvalid()) 3207 return true; 3208 PointerArg = PointerArgRes.get(); 3209 3210 TheCall->setArg(IsLdrex ? 0 : 1, PointerArg); 3211 3212 // In general, we allow ints, floats and pointers to be loaded and stored. 3213 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 3214 !ValType->isBlockPointerType() && !ValType->isFloatingType()) { 3215 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr) 3216 << PointerArg->getType() << PointerArg->getSourceRange(); 3217 return true; 3218 } 3219 3220 // But ARM doesn't have instructions to deal with 128-bit versions. 3221 if (Context.getTypeSize(ValType) > MaxWidth) { 3222 assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate"); 3223 Diag(DRE->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size) 3224 << PointerArg->getType() << PointerArg->getSourceRange(); 3225 return true; 3226 } 3227 3228 switch (ValType.getObjCLifetime()) { 3229 case Qualifiers::OCL_None: 3230 case Qualifiers::OCL_ExplicitNone: 3231 // okay 3232 break; 3233 3234 case Qualifiers::OCL_Weak: 3235 case Qualifiers::OCL_Strong: 3236 case Qualifiers::OCL_Autoreleasing: 3237 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 3238 << ValType << PointerArg->getSourceRange(); 3239 return true; 3240 } 3241 3242 if (IsLdrex) { 3243 TheCall->setType(ValType); 3244 return false; 3245 } 3246 3247 // Initialize the argument to be stored. 3248 ExprResult ValArg = TheCall->getArg(0); 3249 InitializedEntity Entity = InitializedEntity::InitializeParameter( 3250 Context, ValType, /*consume*/ false); 3251 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 3252 if (ValArg.isInvalid()) 3253 return true; 3254 TheCall->setArg(0, ValArg.get()); 3255 3256 // __builtin_arm_strex always returns an int. It's marked as such in the .def, 3257 // but the custom checker bypasses all default analysis. 3258 TheCall->setType(Context.IntTy); 3259 return false; 3260 } 3261 3262 bool Sema::CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 3263 CallExpr *TheCall) { 3264 if (BuiltinID == ARM::BI__builtin_arm_ldrex || 3265 BuiltinID == ARM::BI__builtin_arm_ldaex || 3266 BuiltinID == ARM::BI__builtin_arm_strex || 3267 BuiltinID == ARM::BI__builtin_arm_stlex) { 3268 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64); 3269 } 3270 3271 if (BuiltinID == ARM::BI__builtin_arm_prefetch) { 3272 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 3273 SemaBuiltinConstantArgRange(TheCall, 2, 0, 1); 3274 } 3275 3276 if (BuiltinID == ARM::BI__builtin_arm_rsr64 || 3277 BuiltinID == ARM::BI__builtin_arm_wsr64) 3278 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false); 3279 3280 if (BuiltinID == ARM::BI__builtin_arm_rsr || 3281 BuiltinID == ARM::BI__builtin_arm_rsrp || 3282 BuiltinID == ARM::BI__builtin_arm_wsr || 3283 BuiltinID == ARM::BI__builtin_arm_wsrp) 3284 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 3285 3286 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) 3287 return true; 3288 if (CheckMVEBuiltinFunctionCall(BuiltinID, TheCall)) 3289 return true; 3290 if (CheckCDEBuiltinFunctionCall(TI, BuiltinID, TheCall)) 3291 return true; 3292 3293 // For intrinsics which take an immediate value as part of the instruction, 3294 // range check them here. 3295 // FIXME: VFP Intrinsics should error if VFP not present. 3296 switch (BuiltinID) { 3297 default: return false; 3298 case ARM::BI__builtin_arm_ssat: 3299 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 32); 3300 case ARM::BI__builtin_arm_usat: 3301 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); 3302 case ARM::BI__builtin_arm_ssat16: 3303 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); 3304 case ARM::BI__builtin_arm_usat16: 3305 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 3306 case ARM::BI__builtin_arm_vcvtr_f: 3307 case ARM::BI__builtin_arm_vcvtr_d: 3308 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 3309 case ARM::BI__builtin_arm_dmb: 3310 case ARM::BI__builtin_arm_dsb: 3311 case ARM::BI__builtin_arm_isb: 3312 case ARM::BI__builtin_arm_dbg: 3313 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15); 3314 case ARM::BI__builtin_arm_cdp: 3315 case ARM::BI__builtin_arm_cdp2: 3316 case ARM::BI__builtin_arm_mcr: 3317 case ARM::BI__builtin_arm_mcr2: 3318 case ARM::BI__builtin_arm_mrc: 3319 case ARM::BI__builtin_arm_mrc2: 3320 case ARM::BI__builtin_arm_mcrr: 3321 case ARM::BI__builtin_arm_mcrr2: 3322 case ARM::BI__builtin_arm_mrrc: 3323 case ARM::BI__builtin_arm_mrrc2: 3324 case ARM::BI__builtin_arm_ldc: 3325 case ARM::BI__builtin_arm_ldcl: 3326 case ARM::BI__builtin_arm_ldc2: 3327 case ARM::BI__builtin_arm_ldc2l: 3328 case ARM::BI__builtin_arm_stc: 3329 case ARM::BI__builtin_arm_stcl: 3330 case ARM::BI__builtin_arm_stc2: 3331 case ARM::BI__builtin_arm_stc2l: 3332 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15) || 3333 CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), 3334 /*WantCDE*/ false); 3335 } 3336 } 3337 3338 bool Sema::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, 3339 unsigned BuiltinID, 3340 CallExpr *TheCall) { 3341 if (BuiltinID == AArch64::BI__builtin_arm_ldrex || 3342 BuiltinID == AArch64::BI__builtin_arm_ldaex || 3343 BuiltinID == AArch64::BI__builtin_arm_strex || 3344 BuiltinID == AArch64::BI__builtin_arm_stlex) { 3345 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128); 3346 } 3347 3348 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) { 3349 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 3350 SemaBuiltinConstantArgRange(TheCall, 2, 0, 3) || 3351 SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) || 3352 SemaBuiltinConstantArgRange(TheCall, 4, 0, 1); 3353 } 3354 3355 if (BuiltinID == AArch64::BI__builtin_arm_rsr64 || 3356 BuiltinID == AArch64::BI__builtin_arm_wsr64 || 3357 BuiltinID == AArch64::BI__builtin_arm_rsr128 || 3358 BuiltinID == AArch64::BI__builtin_arm_wsr128) 3359 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 3360 3361 // Memory Tagging Extensions (MTE) Intrinsics 3362 if (BuiltinID == AArch64::BI__builtin_arm_irg || 3363 BuiltinID == AArch64::BI__builtin_arm_addg || 3364 BuiltinID == AArch64::BI__builtin_arm_gmi || 3365 BuiltinID == AArch64::BI__builtin_arm_ldg || 3366 BuiltinID == AArch64::BI__builtin_arm_stg || 3367 BuiltinID == AArch64::BI__builtin_arm_subp) { 3368 return SemaBuiltinARMMemoryTaggingCall(BuiltinID, TheCall); 3369 } 3370 3371 if (BuiltinID == AArch64::BI__builtin_arm_rsr || 3372 BuiltinID == AArch64::BI__builtin_arm_rsrp || 3373 BuiltinID == AArch64::BI__builtin_arm_wsr || 3374 BuiltinID == AArch64::BI__builtin_arm_wsrp) 3375 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 3376 3377 // Only check the valid encoding range. Any constant in this range would be 3378 // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw 3379 // an exception for incorrect registers. This matches MSVC behavior. 3380 if (BuiltinID == AArch64::BI_ReadStatusReg || 3381 BuiltinID == AArch64::BI_WriteStatusReg) 3382 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0x7fff); 3383 3384 if (BuiltinID == AArch64::BI__getReg) 3385 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31); 3386 3387 if (BuiltinID == AArch64::BI__break) 3388 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xffff); 3389 3390 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) 3391 return true; 3392 3393 if (CheckSVEBuiltinFunctionCall(BuiltinID, TheCall)) 3394 return true; 3395 3396 // For intrinsics which take an immediate value as part of the instruction, 3397 // range check them here. 3398 unsigned i = 0, l = 0, u = 0; 3399 switch (BuiltinID) { 3400 default: return false; 3401 case AArch64::BI__builtin_arm_dmb: 3402 case AArch64::BI__builtin_arm_dsb: 3403 case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break; 3404 case AArch64::BI__builtin_arm_tcancel: l = 0; u = 65535; break; 3405 } 3406 3407 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 3408 } 3409 3410 static bool isValidBPFPreserveFieldInfoArg(Expr *Arg) { 3411 if (Arg->getType()->getAsPlaceholderType()) 3412 return false; 3413 3414 // The first argument needs to be a record field access. 3415 // If it is an array element access, we delay decision 3416 // to BPF backend to check whether the access is a 3417 // field access or not. 3418 return (Arg->IgnoreParens()->getObjectKind() == OK_BitField || 3419 isa<MemberExpr>(Arg->IgnoreParens()) || 3420 isa<ArraySubscriptExpr>(Arg->IgnoreParens())); 3421 } 3422 3423 static bool isValidBPFPreserveTypeInfoArg(Expr *Arg) { 3424 QualType ArgType = Arg->getType(); 3425 if (ArgType->getAsPlaceholderType()) 3426 return false; 3427 3428 // for TYPE_EXISTENCE/TYPE_MATCH/TYPE_SIZEOF reloc type 3429 // format: 3430 // 1. __builtin_preserve_type_info(*(<type> *)0, flag); 3431 // 2. <type> var; 3432 // __builtin_preserve_type_info(var, flag); 3433 if (!isa<DeclRefExpr>(Arg->IgnoreParens()) && 3434 !isa<UnaryOperator>(Arg->IgnoreParens())) 3435 return false; 3436 3437 // Typedef type. 3438 if (ArgType->getAs<TypedefType>()) 3439 return true; 3440 3441 // Record type or Enum type. 3442 const Type *Ty = ArgType->getUnqualifiedDesugaredType(); 3443 if (const auto *RT = Ty->getAs<RecordType>()) { 3444 if (!RT->getDecl()->getDeclName().isEmpty()) 3445 return true; 3446 } else if (const auto *ET = Ty->getAs<EnumType>()) { 3447 if (!ET->getDecl()->getDeclName().isEmpty()) 3448 return true; 3449 } 3450 3451 return false; 3452 } 3453 3454 static bool isValidBPFPreserveEnumValueArg(Expr *Arg) { 3455 QualType ArgType = Arg->getType(); 3456 if (ArgType->getAsPlaceholderType()) 3457 return false; 3458 3459 // for ENUM_VALUE_EXISTENCE/ENUM_VALUE reloc type 3460 // format: 3461 // __builtin_preserve_enum_value(*(<enum_type> *)<enum_value>, 3462 // flag); 3463 const auto *UO = dyn_cast<UnaryOperator>(Arg->IgnoreParens()); 3464 if (!UO) 3465 return false; 3466 3467 const auto *CE = dyn_cast<CStyleCastExpr>(UO->getSubExpr()); 3468 if (!CE) 3469 return false; 3470 if (CE->getCastKind() != CK_IntegralToPointer && 3471 CE->getCastKind() != CK_NullToPointer) 3472 return false; 3473 3474 // The integer must be from an EnumConstantDecl. 3475 const auto *DR = dyn_cast<DeclRefExpr>(CE->getSubExpr()); 3476 if (!DR) 3477 return false; 3478 3479 const EnumConstantDecl *Enumerator = 3480 dyn_cast<EnumConstantDecl>(DR->getDecl()); 3481 if (!Enumerator) 3482 return false; 3483 3484 // The type must be EnumType. 3485 const Type *Ty = ArgType->getUnqualifiedDesugaredType(); 3486 const auto *ET = Ty->getAs<EnumType>(); 3487 if (!ET) 3488 return false; 3489 3490 // The enum value must be supported. 3491 return llvm::is_contained(ET->getDecl()->enumerators(), Enumerator); 3492 } 3493 3494 bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID, 3495 CallExpr *TheCall) { 3496 assert((BuiltinID == BPF::BI__builtin_preserve_field_info || 3497 BuiltinID == BPF::BI__builtin_btf_type_id || 3498 BuiltinID == BPF::BI__builtin_preserve_type_info || 3499 BuiltinID == BPF::BI__builtin_preserve_enum_value) && 3500 "unexpected BPF builtin"); 3501 3502 if (checkArgCount(*this, TheCall, 2)) 3503 return true; 3504 3505 // The second argument needs to be a constant int 3506 Expr *Arg = TheCall->getArg(1); 3507 std::optional<llvm::APSInt> Value = Arg->getIntegerConstantExpr(Context); 3508 diag::kind kind; 3509 if (!Value) { 3510 if (BuiltinID == BPF::BI__builtin_preserve_field_info) 3511 kind = diag::err_preserve_field_info_not_const; 3512 else if (BuiltinID == BPF::BI__builtin_btf_type_id) 3513 kind = diag::err_btf_type_id_not_const; 3514 else if (BuiltinID == BPF::BI__builtin_preserve_type_info) 3515 kind = diag::err_preserve_type_info_not_const; 3516 else 3517 kind = diag::err_preserve_enum_value_not_const; 3518 Diag(Arg->getBeginLoc(), kind) << 2 << Arg->getSourceRange(); 3519 return true; 3520 } 3521 3522 // The first argument 3523 Arg = TheCall->getArg(0); 3524 bool InvalidArg = false; 3525 bool ReturnUnsignedInt = true; 3526 if (BuiltinID == BPF::BI__builtin_preserve_field_info) { 3527 if (!isValidBPFPreserveFieldInfoArg(Arg)) { 3528 InvalidArg = true; 3529 kind = diag::err_preserve_field_info_not_field; 3530 } 3531 } else if (BuiltinID == BPF::BI__builtin_preserve_type_info) { 3532 if (!isValidBPFPreserveTypeInfoArg(Arg)) { 3533 InvalidArg = true; 3534 kind = diag::err_preserve_type_info_invalid; 3535 } 3536 } else if (BuiltinID == BPF::BI__builtin_preserve_enum_value) { 3537 if (!isValidBPFPreserveEnumValueArg(Arg)) { 3538 InvalidArg = true; 3539 kind = diag::err_preserve_enum_value_invalid; 3540 } 3541 ReturnUnsignedInt = false; 3542 } else if (BuiltinID == BPF::BI__builtin_btf_type_id) { 3543 ReturnUnsignedInt = false; 3544 } 3545 3546 if (InvalidArg) { 3547 Diag(Arg->getBeginLoc(), kind) << 1 << Arg->getSourceRange(); 3548 return true; 3549 } 3550 3551 if (ReturnUnsignedInt) 3552 TheCall->setType(Context.UnsignedIntTy); 3553 else 3554 TheCall->setType(Context.UnsignedLongTy); 3555 return false; 3556 } 3557 3558 bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 3559 struct ArgInfo { 3560 uint8_t OpNum; 3561 bool IsSigned; 3562 uint8_t BitWidth; 3563 uint8_t Align; 3564 }; 3565 struct BuiltinInfo { 3566 unsigned BuiltinID; 3567 ArgInfo Infos[2]; 3568 }; 3569 3570 static BuiltinInfo Infos[] = { 3571 { Hexagon::BI__builtin_circ_ldd, {{ 3, true, 4, 3 }} }, 3572 { Hexagon::BI__builtin_circ_ldw, {{ 3, true, 4, 2 }} }, 3573 { Hexagon::BI__builtin_circ_ldh, {{ 3, true, 4, 1 }} }, 3574 { Hexagon::BI__builtin_circ_lduh, {{ 3, true, 4, 1 }} }, 3575 { Hexagon::BI__builtin_circ_ldb, {{ 3, true, 4, 0 }} }, 3576 { Hexagon::BI__builtin_circ_ldub, {{ 3, true, 4, 0 }} }, 3577 { Hexagon::BI__builtin_circ_std, {{ 3, true, 4, 3 }} }, 3578 { Hexagon::BI__builtin_circ_stw, {{ 3, true, 4, 2 }} }, 3579 { Hexagon::BI__builtin_circ_sth, {{ 3, true, 4, 1 }} }, 3580 { Hexagon::BI__builtin_circ_sthhi, {{ 3, true, 4, 1 }} }, 3581 { Hexagon::BI__builtin_circ_stb, {{ 3, true, 4, 0 }} }, 3582 3583 { Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci, {{ 1, true, 4, 0 }} }, 3584 { Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci, {{ 1, true, 4, 0 }} }, 3585 { Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci, {{ 1, true, 4, 1 }} }, 3586 { Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci, {{ 1, true, 4, 1 }} }, 3587 { Hexagon::BI__builtin_HEXAGON_L2_loadri_pci, {{ 1, true, 4, 2 }} }, 3588 { Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci, {{ 1, true, 4, 3 }} }, 3589 { Hexagon::BI__builtin_HEXAGON_S2_storerb_pci, {{ 1, true, 4, 0 }} }, 3590 { Hexagon::BI__builtin_HEXAGON_S2_storerh_pci, {{ 1, true, 4, 1 }} }, 3591 { Hexagon::BI__builtin_HEXAGON_S2_storerf_pci, {{ 1, true, 4, 1 }} }, 3592 { Hexagon::BI__builtin_HEXAGON_S2_storeri_pci, {{ 1, true, 4, 2 }} }, 3593 { Hexagon::BI__builtin_HEXAGON_S2_storerd_pci, {{ 1, true, 4, 3 }} }, 3594 3595 { Hexagon::BI__builtin_HEXAGON_A2_combineii, {{ 1, true, 8, 0 }} }, 3596 { Hexagon::BI__builtin_HEXAGON_A2_tfrih, {{ 1, false, 16, 0 }} }, 3597 { Hexagon::BI__builtin_HEXAGON_A2_tfril, {{ 1, false, 16, 0 }} }, 3598 { Hexagon::BI__builtin_HEXAGON_A2_tfrpi, {{ 0, true, 8, 0 }} }, 3599 { Hexagon::BI__builtin_HEXAGON_A4_bitspliti, {{ 1, false, 5, 0 }} }, 3600 { Hexagon::BI__builtin_HEXAGON_A4_cmpbeqi, {{ 1, false, 8, 0 }} }, 3601 { Hexagon::BI__builtin_HEXAGON_A4_cmpbgti, {{ 1, true, 8, 0 }} }, 3602 { Hexagon::BI__builtin_HEXAGON_A4_cround_ri, {{ 1, false, 5, 0 }} }, 3603 { Hexagon::BI__builtin_HEXAGON_A4_round_ri, {{ 1, false, 5, 0 }} }, 3604 { Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat, {{ 1, false, 5, 0 }} }, 3605 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbeqi, {{ 1, false, 8, 0 }} }, 3606 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgti, {{ 1, true, 8, 0 }} }, 3607 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgtui, {{ 1, false, 7, 0 }} }, 3608 { Hexagon::BI__builtin_HEXAGON_A4_vcmpheqi, {{ 1, true, 8, 0 }} }, 3609 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgti, {{ 1, true, 8, 0 }} }, 3610 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgtui, {{ 1, false, 7, 0 }} }, 3611 { Hexagon::BI__builtin_HEXAGON_A4_vcmpweqi, {{ 1, true, 8, 0 }} }, 3612 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgti, {{ 1, true, 8, 0 }} }, 3613 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgtui, {{ 1, false, 7, 0 }} }, 3614 { Hexagon::BI__builtin_HEXAGON_C2_bitsclri, {{ 1, false, 6, 0 }} }, 3615 { Hexagon::BI__builtin_HEXAGON_C2_muxii, {{ 2, true, 8, 0 }} }, 3616 { Hexagon::BI__builtin_HEXAGON_C4_nbitsclri, {{ 1, false, 6, 0 }} }, 3617 { Hexagon::BI__builtin_HEXAGON_F2_dfclass, {{ 1, false, 5, 0 }} }, 3618 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_n, {{ 0, false, 10, 0 }} }, 3619 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_p, {{ 0, false, 10, 0 }} }, 3620 { Hexagon::BI__builtin_HEXAGON_F2_sfclass, {{ 1, false, 5, 0 }} }, 3621 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_n, {{ 0, false, 10, 0 }} }, 3622 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_p, {{ 0, false, 10, 0 }} }, 3623 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addi, {{ 2, false, 6, 0 }} }, 3624 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addr_u2, {{ 1, false, 6, 2 }} }, 3625 { Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri, {{ 2, false, 3, 0 }} }, 3626 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc, {{ 2, false, 6, 0 }} }, 3627 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and, {{ 2, false, 6, 0 }} }, 3628 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p, {{ 1, false, 6, 0 }} }, 3629 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac, {{ 2, false, 6, 0 }} }, 3630 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or, {{ 2, false, 6, 0 }} }, 3631 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc, {{ 2, false, 6, 0 }} }, 3632 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc, {{ 2, false, 5, 0 }} }, 3633 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and, {{ 2, false, 5, 0 }} }, 3634 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r, {{ 1, false, 5, 0 }} }, 3635 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac, {{ 2, false, 5, 0 }} }, 3636 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or, {{ 2, false, 5, 0 }} }, 3637 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat, {{ 1, false, 5, 0 }} }, 3638 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc, {{ 2, false, 5, 0 }} }, 3639 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh, {{ 1, false, 4, 0 }} }, 3640 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw, {{ 1, false, 5, 0 }} }, 3641 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc, {{ 2, false, 6, 0 }} }, 3642 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and, {{ 2, false, 6, 0 }} }, 3643 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p, {{ 1, false, 6, 0 }} }, 3644 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac, {{ 2, false, 6, 0 }} }, 3645 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or, {{ 2, false, 6, 0 }} }, 3646 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax, 3647 {{ 1, false, 6, 0 }} }, 3648 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd, {{ 1, false, 6, 0 }} }, 3649 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc, {{ 2, false, 5, 0 }} }, 3650 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and, {{ 2, false, 5, 0 }} }, 3651 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r, {{ 1, false, 5, 0 }} }, 3652 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac, {{ 2, false, 5, 0 }} }, 3653 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or, {{ 2, false, 5, 0 }} }, 3654 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax, 3655 {{ 1, false, 5, 0 }} }, 3656 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd, {{ 1, false, 5, 0 }} }, 3657 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun, {{ 1, false, 5, 0 }} }, 3658 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh, {{ 1, false, 4, 0 }} }, 3659 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw, {{ 1, false, 5, 0 }} }, 3660 { Hexagon::BI__builtin_HEXAGON_S2_clrbit_i, {{ 1, false, 5, 0 }} }, 3661 { Hexagon::BI__builtin_HEXAGON_S2_extractu, {{ 1, false, 5, 0 }, 3662 { 2, false, 5, 0 }} }, 3663 { Hexagon::BI__builtin_HEXAGON_S2_extractup, {{ 1, false, 6, 0 }, 3664 { 2, false, 6, 0 }} }, 3665 { Hexagon::BI__builtin_HEXAGON_S2_insert, {{ 2, false, 5, 0 }, 3666 { 3, false, 5, 0 }} }, 3667 { Hexagon::BI__builtin_HEXAGON_S2_insertp, {{ 2, false, 6, 0 }, 3668 { 3, false, 6, 0 }} }, 3669 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc, {{ 2, false, 6, 0 }} }, 3670 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and, {{ 2, false, 6, 0 }} }, 3671 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p, {{ 1, false, 6, 0 }} }, 3672 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac, {{ 2, false, 6, 0 }} }, 3673 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or, {{ 2, false, 6, 0 }} }, 3674 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc, {{ 2, false, 6, 0 }} }, 3675 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc, {{ 2, false, 5, 0 }} }, 3676 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and, {{ 2, false, 5, 0 }} }, 3677 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r, {{ 1, false, 5, 0 }} }, 3678 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac, {{ 2, false, 5, 0 }} }, 3679 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or, {{ 2, false, 5, 0 }} }, 3680 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc, {{ 2, false, 5, 0 }} }, 3681 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh, {{ 1, false, 4, 0 }} }, 3682 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw, {{ 1, false, 5, 0 }} }, 3683 { Hexagon::BI__builtin_HEXAGON_S2_setbit_i, {{ 1, false, 5, 0 }} }, 3684 { Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax, 3685 {{ 2, false, 4, 0 }, 3686 { 3, false, 5, 0 }} }, 3687 { Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax, 3688 {{ 2, false, 4, 0 }, 3689 { 3, false, 5, 0 }} }, 3690 { Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax, 3691 {{ 2, false, 4, 0 }, 3692 { 3, false, 5, 0 }} }, 3693 { Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax, 3694 {{ 2, false, 4, 0 }, 3695 { 3, false, 5, 0 }} }, 3696 { Hexagon::BI__builtin_HEXAGON_S2_togglebit_i, {{ 1, false, 5, 0 }} }, 3697 { Hexagon::BI__builtin_HEXAGON_S2_tstbit_i, {{ 1, false, 5, 0 }} }, 3698 { Hexagon::BI__builtin_HEXAGON_S2_valignib, {{ 2, false, 3, 0 }} }, 3699 { Hexagon::BI__builtin_HEXAGON_S2_vspliceib, {{ 2, false, 3, 0 }} }, 3700 { Hexagon::BI__builtin_HEXAGON_S4_addi_asl_ri, {{ 2, false, 5, 0 }} }, 3701 { Hexagon::BI__builtin_HEXAGON_S4_addi_lsr_ri, {{ 2, false, 5, 0 }} }, 3702 { Hexagon::BI__builtin_HEXAGON_S4_andi_asl_ri, {{ 2, false, 5, 0 }} }, 3703 { Hexagon::BI__builtin_HEXAGON_S4_andi_lsr_ri, {{ 2, false, 5, 0 }} }, 3704 { Hexagon::BI__builtin_HEXAGON_S4_clbaddi, {{ 1, true , 6, 0 }} }, 3705 { Hexagon::BI__builtin_HEXAGON_S4_clbpaddi, {{ 1, true, 6, 0 }} }, 3706 { Hexagon::BI__builtin_HEXAGON_S4_extract, {{ 1, false, 5, 0 }, 3707 { 2, false, 5, 0 }} }, 3708 { Hexagon::BI__builtin_HEXAGON_S4_extractp, {{ 1, false, 6, 0 }, 3709 { 2, false, 6, 0 }} }, 3710 { Hexagon::BI__builtin_HEXAGON_S4_lsli, {{ 0, true, 6, 0 }} }, 3711 { Hexagon::BI__builtin_HEXAGON_S4_ntstbit_i, {{ 1, false, 5, 0 }} }, 3712 { Hexagon::BI__builtin_HEXAGON_S4_ori_asl_ri, {{ 2, false, 5, 0 }} }, 3713 { Hexagon::BI__builtin_HEXAGON_S4_ori_lsr_ri, {{ 2, false, 5, 0 }} }, 3714 { Hexagon::BI__builtin_HEXAGON_S4_subi_asl_ri, {{ 2, false, 5, 0 }} }, 3715 { Hexagon::BI__builtin_HEXAGON_S4_subi_lsr_ri, {{ 2, false, 5, 0 }} }, 3716 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate_acc, {{ 3, false, 2, 0 }} }, 3717 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate, {{ 2, false, 2, 0 }} }, 3718 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax, 3719 {{ 1, false, 4, 0 }} }, 3720 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_sat, {{ 1, false, 4, 0 }} }, 3721 { Hexagon::BI__builtin_HEXAGON_S5_vasrhrnd_goodsyntax, 3722 {{ 1, false, 4, 0 }} }, 3723 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {{ 1, false, 6, 0 }} }, 3724 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {{ 2, false, 6, 0 }} }, 3725 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {{ 2, false, 6, 0 }} }, 3726 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {{ 2, false, 6, 0 }} }, 3727 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {{ 2, false, 6, 0 }} }, 3728 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {{ 2, false, 6, 0 }} }, 3729 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {{ 1, false, 5, 0 }} }, 3730 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {{ 2, false, 5, 0 }} }, 3731 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {{ 2, false, 5, 0 }} }, 3732 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {{ 2, false, 5, 0 }} }, 3733 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {{ 2, false, 5, 0 }} }, 3734 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {{ 2, false, 5, 0 }} }, 3735 { Hexagon::BI__builtin_HEXAGON_V6_valignbi, {{ 2, false, 3, 0 }} }, 3736 { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {{ 2, false, 3, 0 }} }, 3737 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {{ 2, false, 3, 0 }} }, 3738 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {{ 2, false, 3, 0 }} }, 3739 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {{ 2, false, 1, 0 }} }, 3740 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {{ 2, false, 1, 0 }} }, 3741 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {{ 3, false, 1, 0 }} }, 3742 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B, 3743 {{ 3, false, 1, 0 }} }, 3744 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {{ 2, false, 1, 0 }} }, 3745 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {{ 2, false, 1, 0 }} }, 3746 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {{ 3, false, 1, 0 }} }, 3747 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B, 3748 {{ 3, false, 1, 0 }} }, 3749 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {{ 2, false, 1, 0 }} }, 3750 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {{ 2, false, 1, 0 }} }, 3751 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {{ 3, false, 1, 0 }} }, 3752 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B, 3753 {{ 3, false, 1, 0 }} }, 3754 3755 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10, {{ 2, false, 2, 0 }} }, 3756 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10_128B, 3757 {{ 2, false, 2, 0 }} }, 3758 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10_vxx, 3759 {{ 3, false, 2, 0 }} }, 3760 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10_vxx_128B, 3761 {{ 3, false, 2, 0 }} }, 3762 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10, {{ 2, false, 2, 0 }} }, 3763 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10_128B, 3764 {{ 2, false, 2, 0 }} }, 3765 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10_vxx, 3766 {{ 3, false, 2, 0 }} }, 3767 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10_vxx_128B, 3768 {{ 3, false, 2, 0 }} }, 3769 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi, {{ 2, false, 3, 0 }} }, 3770 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi_128B, {{ 2, false, 3, 0 }} }, 3771 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci, {{ 3, false, 3, 0 }} }, 3772 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci_128B, 3773 {{ 3, false, 3, 0 }} }, 3774 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi, {{ 2, false, 3, 0 }} }, 3775 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi_128B, {{ 2, false, 3, 0 }} }, 3776 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci, {{ 3, false, 3, 0 }} }, 3777 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci_128B, 3778 {{ 3, false, 3, 0 }} }, 3779 }; 3780 3781 // Use a dynamically initialized static to sort the table exactly once on 3782 // first run. 3783 static const bool SortOnce = 3784 (llvm::sort(Infos, 3785 [](const BuiltinInfo &LHS, const BuiltinInfo &RHS) { 3786 return LHS.BuiltinID < RHS.BuiltinID; 3787 }), 3788 true); 3789 (void)SortOnce; 3790 3791 const BuiltinInfo *F = llvm::partition_point( 3792 Infos, [=](const BuiltinInfo &BI) { return BI.BuiltinID < BuiltinID; }); 3793 if (F == std::end(Infos) || F->BuiltinID != BuiltinID) 3794 return false; 3795 3796 bool Error = false; 3797 3798 for (const ArgInfo &A : F->Infos) { 3799 // Ignore empty ArgInfo elements. 3800 if (A.BitWidth == 0) 3801 continue; 3802 3803 int32_t Min = A.IsSigned ? -(1 << (A.BitWidth - 1)) : 0; 3804 int32_t Max = (1 << (A.IsSigned ? A.BitWidth - 1 : A.BitWidth)) - 1; 3805 if (!A.Align) { 3806 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max); 3807 } else { 3808 unsigned M = 1 << A.Align; 3809 Min *= M; 3810 Max *= M; 3811 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max); 3812 Error |= SemaBuiltinConstantArgMultiple(TheCall, A.OpNum, M); 3813 } 3814 } 3815 return Error; 3816 } 3817 3818 bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, 3819 CallExpr *TheCall) { 3820 return CheckHexagonBuiltinArgument(BuiltinID, TheCall); 3821 } 3822 3823 bool Sema::CheckLoongArchBuiltinFunctionCall(const TargetInfo &TI, 3824 unsigned BuiltinID, 3825 CallExpr *TheCall) { 3826 switch (BuiltinID) { 3827 default: 3828 break; 3829 case LoongArch::BI__builtin_loongarch_cacop_d: 3830 if (!TI.hasFeature("64bit")) 3831 return Diag(TheCall->getBeginLoc(), 3832 diag::err_loongarch_builtin_requires_la64) 3833 << TheCall->getSourceRange(); 3834 [[fallthrough]]; 3835 case LoongArch::BI__builtin_loongarch_cacop_w: { 3836 if (BuiltinID == LoongArch::BI__builtin_loongarch_cacop_w && 3837 !TI.hasFeature("32bit")) 3838 return Diag(TheCall->getBeginLoc(), 3839 diag::err_loongarch_builtin_requires_la32) 3840 << TheCall->getSourceRange(); 3841 SemaBuiltinConstantArgRange(TheCall, 0, 0, llvm::maxUIntN(5)); 3842 SemaBuiltinConstantArgRange(TheCall, 2, llvm::minIntN(12), 3843 llvm::maxIntN(12)); 3844 break; 3845 } 3846 case LoongArch::BI__builtin_loongarch_crc_w_b_w: 3847 case LoongArch::BI__builtin_loongarch_crc_w_h_w: 3848 case LoongArch::BI__builtin_loongarch_crc_w_w_w: 3849 case LoongArch::BI__builtin_loongarch_crc_w_d_w: 3850 case LoongArch::BI__builtin_loongarch_crcc_w_b_w: 3851 case LoongArch::BI__builtin_loongarch_crcc_w_h_w: 3852 case LoongArch::BI__builtin_loongarch_crcc_w_w_w: 3853 case LoongArch::BI__builtin_loongarch_crcc_w_d_w: 3854 case LoongArch::BI__builtin_loongarch_iocsrrd_d: 3855 case LoongArch::BI__builtin_loongarch_iocsrwr_d: 3856 case LoongArch::BI__builtin_loongarch_asrtle_d: 3857 case LoongArch::BI__builtin_loongarch_asrtgt_d: 3858 if (!TI.hasFeature("64bit")) 3859 return Diag(TheCall->getBeginLoc(), 3860 diag::err_loongarch_builtin_requires_la64) 3861 << TheCall->getSourceRange(); 3862 break; 3863 case LoongArch::BI__builtin_loongarch_break: 3864 case LoongArch::BI__builtin_loongarch_dbar: 3865 case LoongArch::BI__builtin_loongarch_ibar: 3866 case LoongArch::BI__builtin_loongarch_syscall: 3867 // Check if immediate is in [0, 32767]. 3868 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 32767); 3869 case LoongArch::BI__builtin_loongarch_csrrd_w: 3870 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 16383); 3871 case LoongArch::BI__builtin_loongarch_csrwr_w: 3872 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 16383); 3873 case LoongArch::BI__builtin_loongarch_csrxchg_w: 3874 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 16383); 3875 case LoongArch::BI__builtin_loongarch_csrrd_d: 3876 if (!TI.hasFeature("64bit")) 3877 return Diag(TheCall->getBeginLoc(), 3878 diag::err_loongarch_builtin_requires_la64) 3879 << TheCall->getSourceRange(); 3880 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 16383); 3881 case LoongArch::BI__builtin_loongarch_csrwr_d: 3882 if (!TI.hasFeature("64bit")) 3883 return Diag(TheCall->getBeginLoc(), 3884 diag::err_loongarch_builtin_requires_la64) 3885 << TheCall->getSourceRange(); 3886 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 16383); 3887 case LoongArch::BI__builtin_loongarch_csrxchg_d: 3888 if (!TI.hasFeature("64bit")) 3889 return Diag(TheCall->getBeginLoc(), 3890 diag::err_loongarch_builtin_requires_la64) 3891 << TheCall->getSourceRange(); 3892 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 16383); 3893 case LoongArch::BI__builtin_loongarch_lddir_d: 3894 case LoongArch::BI__builtin_loongarch_ldpte_d: 3895 if (!TI.hasFeature("64bit")) 3896 return Diag(TheCall->getBeginLoc(), 3897 diag::err_loongarch_builtin_requires_la64) 3898 << TheCall->getSourceRange(); 3899 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); 3900 case LoongArch::BI__builtin_loongarch_movfcsr2gr: 3901 case LoongArch::BI__builtin_loongarch_movgr2fcsr: 3902 return SemaBuiltinConstantArgRange(TheCall, 0, 0, llvm::maxUIntN(2)); 3903 } 3904 3905 return false; 3906 } 3907 3908 bool Sema::CheckMipsBuiltinFunctionCall(const TargetInfo &TI, 3909 unsigned BuiltinID, CallExpr *TheCall) { 3910 return CheckMipsBuiltinCpu(TI, BuiltinID, TheCall) || 3911 CheckMipsBuiltinArgument(BuiltinID, TheCall); 3912 } 3913 3914 bool Sema::CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID, 3915 CallExpr *TheCall) { 3916 3917 if (Mips::BI__builtin_mips_addu_qb <= BuiltinID && 3918 BuiltinID <= Mips::BI__builtin_mips_lwx) { 3919 if (!TI.hasFeature("dsp")) 3920 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_dsp); 3921 } 3922 3923 if (Mips::BI__builtin_mips_absq_s_qb <= BuiltinID && 3924 BuiltinID <= Mips::BI__builtin_mips_subuh_r_qb) { 3925 if (!TI.hasFeature("dspr2")) 3926 return Diag(TheCall->getBeginLoc(), 3927 diag::err_mips_builtin_requires_dspr2); 3928 } 3929 3930 if (Mips::BI__builtin_msa_add_a_b <= BuiltinID && 3931 BuiltinID <= Mips::BI__builtin_msa_xori_b) { 3932 if (!TI.hasFeature("msa")) 3933 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_msa); 3934 } 3935 3936 return false; 3937 } 3938 3939 // CheckMipsBuiltinArgument - Checks the constant value passed to the 3940 // intrinsic is correct. The switch statement is ordered by DSP, MSA. The 3941 // ordering for DSP is unspecified. MSA is ordered by the data format used 3942 // by the underlying instruction i.e., df/m, df/n and then by size. 3943 // 3944 // FIXME: The size tests here should instead be tablegen'd along with the 3945 // definitions from include/clang/Basic/BuiltinsMips.def. 3946 // FIXME: GCC is strict on signedness for some of these intrinsics, we should 3947 // be too. 3948 bool Sema::CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 3949 unsigned i = 0, l = 0, u = 0, m = 0; 3950 switch (BuiltinID) { 3951 default: return false; 3952 case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break; 3953 case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break; 3954 case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break; 3955 case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break; 3956 case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break; 3957 case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break; 3958 case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break; 3959 // MSA intrinsics. Instructions (which the intrinsics maps to) which use the 3960 // df/m field. 3961 // These intrinsics take an unsigned 3 bit immediate. 3962 case Mips::BI__builtin_msa_bclri_b: 3963 case Mips::BI__builtin_msa_bnegi_b: 3964 case Mips::BI__builtin_msa_bseti_b: 3965 case Mips::BI__builtin_msa_sat_s_b: 3966 case Mips::BI__builtin_msa_sat_u_b: 3967 case Mips::BI__builtin_msa_slli_b: 3968 case Mips::BI__builtin_msa_srai_b: 3969 case Mips::BI__builtin_msa_srari_b: 3970 case Mips::BI__builtin_msa_srli_b: 3971 case Mips::BI__builtin_msa_srlri_b: i = 1; l = 0; u = 7; break; 3972 case Mips::BI__builtin_msa_binsli_b: 3973 case Mips::BI__builtin_msa_binsri_b: i = 2; l = 0; u = 7; break; 3974 // These intrinsics take an unsigned 4 bit immediate. 3975 case Mips::BI__builtin_msa_bclri_h: 3976 case Mips::BI__builtin_msa_bnegi_h: 3977 case Mips::BI__builtin_msa_bseti_h: 3978 case Mips::BI__builtin_msa_sat_s_h: 3979 case Mips::BI__builtin_msa_sat_u_h: 3980 case Mips::BI__builtin_msa_slli_h: 3981 case Mips::BI__builtin_msa_srai_h: 3982 case Mips::BI__builtin_msa_srari_h: 3983 case Mips::BI__builtin_msa_srli_h: 3984 case Mips::BI__builtin_msa_srlri_h: i = 1; l = 0; u = 15; break; 3985 case Mips::BI__builtin_msa_binsli_h: 3986 case Mips::BI__builtin_msa_binsri_h: i = 2; l = 0; u = 15; break; 3987 // These intrinsics take an unsigned 5 bit immediate. 3988 // The first block of intrinsics actually have an unsigned 5 bit field, 3989 // not a df/n field. 3990 case Mips::BI__builtin_msa_cfcmsa: 3991 case Mips::BI__builtin_msa_ctcmsa: i = 0; l = 0; u = 31; break; 3992 case Mips::BI__builtin_msa_clei_u_b: 3993 case Mips::BI__builtin_msa_clei_u_h: 3994 case Mips::BI__builtin_msa_clei_u_w: 3995 case Mips::BI__builtin_msa_clei_u_d: 3996 case Mips::BI__builtin_msa_clti_u_b: 3997 case Mips::BI__builtin_msa_clti_u_h: 3998 case Mips::BI__builtin_msa_clti_u_w: 3999 case Mips::BI__builtin_msa_clti_u_d: 4000 case Mips::BI__builtin_msa_maxi_u_b: 4001 case Mips::BI__builtin_msa_maxi_u_h: 4002 case Mips::BI__builtin_msa_maxi_u_w: 4003 case Mips::BI__builtin_msa_maxi_u_d: 4004 case Mips::BI__builtin_msa_mini_u_b: 4005 case Mips::BI__builtin_msa_mini_u_h: 4006 case Mips::BI__builtin_msa_mini_u_w: 4007 case Mips::BI__builtin_msa_mini_u_d: 4008 case Mips::BI__builtin_msa_addvi_b: 4009 case Mips::BI__builtin_msa_addvi_h: 4010 case Mips::BI__builtin_msa_addvi_w: 4011 case Mips::BI__builtin_msa_addvi_d: 4012 case Mips::BI__builtin_msa_bclri_w: 4013 case Mips::BI__builtin_msa_bnegi_w: 4014 case Mips::BI__builtin_msa_bseti_w: 4015 case Mips::BI__builtin_msa_sat_s_w: 4016 case Mips::BI__builtin_msa_sat_u_w: 4017 case Mips::BI__builtin_msa_slli_w: 4018 case Mips::BI__builtin_msa_srai_w: 4019 case Mips::BI__builtin_msa_srari_w: 4020 case Mips::BI__builtin_msa_srli_w: 4021 case Mips::BI__builtin_msa_srlri_w: 4022 case Mips::BI__builtin_msa_subvi_b: 4023 case Mips::BI__builtin_msa_subvi_h: 4024 case Mips::BI__builtin_msa_subvi_w: 4025 case Mips::BI__builtin_msa_subvi_d: i = 1; l = 0; u = 31; break; 4026 case Mips::BI__builtin_msa_binsli_w: 4027 case Mips::BI__builtin_msa_binsri_w: i = 2; l = 0; u = 31; break; 4028 // These intrinsics take an unsigned 6 bit immediate. 4029 case Mips::BI__builtin_msa_bclri_d: 4030 case Mips::BI__builtin_msa_bnegi_d: 4031 case Mips::BI__builtin_msa_bseti_d: 4032 case Mips::BI__builtin_msa_sat_s_d: 4033 case Mips::BI__builtin_msa_sat_u_d: 4034 case Mips::BI__builtin_msa_slli_d: 4035 case Mips::BI__builtin_msa_srai_d: 4036 case Mips::BI__builtin_msa_srari_d: 4037 case Mips::BI__builtin_msa_srli_d: 4038 case Mips::BI__builtin_msa_srlri_d: i = 1; l = 0; u = 63; break; 4039 case Mips::BI__builtin_msa_binsli_d: 4040 case Mips::BI__builtin_msa_binsri_d: i = 2; l = 0; u = 63; break; 4041 // These intrinsics take a signed 5 bit immediate. 4042 case Mips::BI__builtin_msa_ceqi_b: 4043 case Mips::BI__builtin_msa_ceqi_h: 4044 case Mips::BI__builtin_msa_ceqi_w: 4045 case Mips::BI__builtin_msa_ceqi_d: 4046 case Mips::BI__builtin_msa_clti_s_b: 4047 case Mips::BI__builtin_msa_clti_s_h: 4048 case Mips::BI__builtin_msa_clti_s_w: 4049 case Mips::BI__builtin_msa_clti_s_d: 4050 case Mips::BI__builtin_msa_clei_s_b: 4051 case Mips::BI__builtin_msa_clei_s_h: 4052 case Mips::BI__builtin_msa_clei_s_w: 4053 case Mips::BI__builtin_msa_clei_s_d: 4054 case Mips::BI__builtin_msa_maxi_s_b: 4055 case Mips::BI__builtin_msa_maxi_s_h: 4056 case Mips::BI__builtin_msa_maxi_s_w: 4057 case Mips::BI__builtin_msa_maxi_s_d: 4058 case Mips::BI__builtin_msa_mini_s_b: 4059 case Mips::BI__builtin_msa_mini_s_h: 4060 case Mips::BI__builtin_msa_mini_s_w: 4061 case Mips::BI__builtin_msa_mini_s_d: i = 1; l = -16; u = 15; break; 4062 // These intrinsics take an unsigned 8 bit immediate. 4063 case Mips::BI__builtin_msa_andi_b: 4064 case Mips::BI__builtin_msa_nori_b: 4065 case Mips::BI__builtin_msa_ori_b: 4066 case Mips::BI__builtin_msa_shf_b: 4067 case Mips::BI__builtin_msa_shf_h: 4068 case Mips::BI__builtin_msa_shf_w: 4069 case Mips::BI__builtin_msa_xori_b: i = 1; l = 0; u = 255; break; 4070 case Mips::BI__builtin_msa_bseli_b: 4071 case Mips::BI__builtin_msa_bmnzi_b: 4072 case Mips::BI__builtin_msa_bmzi_b: i = 2; l = 0; u = 255; break; 4073 // df/n format 4074 // These intrinsics take an unsigned 4 bit immediate. 4075 case Mips::BI__builtin_msa_copy_s_b: 4076 case Mips::BI__builtin_msa_copy_u_b: 4077 case Mips::BI__builtin_msa_insve_b: 4078 case Mips::BI__builtin_msa_splati_b: i = 1; l = 0; u = 15; break; 4079 case Mips::BI__builtin_msa_sldi_b: i = 2; l = 0; u = 15; break; 4080 // These intrinsics take an unsigned 3 bit immediate. 4081 case Mips::BI__builtin_msa_copy_s_h: 4082 case Mips::BI__builtin_msa_copy_u_h: 4083 case Mips::BI__builtin_msa_insve_h: 4084 case Mips::BI__builtin_msa_splati_h: i = 1; l = 0; u = 7; break; 4085 case Mips::BI__builtin_msa_sldi_h: i = 2; l = 0; u = 7; break; 4086 // These intrinsics take an unsigned 2 bit immediate. 4087 case Mips::BI__builtin_msa_copy_s_w: 4088 case Mips::BI__builtin_msa_copy_u_w: 4089 case Mips::BI__builtin_msa_insve_w: 4090 case Mips::BI__builtin_msa_splati_w: i = 1; l = 0; u = 3; break; 4091 case Mips::BI__builtin_msa_sldi_w: i = 2; l = 0; u = 3; break; 4092 // These intrinsics take an unsigned 1 bit immediate. 4093 case Mips::BI__builtin_msa_copy_s_d: 4094 case Mips::BI__builtin_msa_copy_u_d: 4095 case Mips::BI__builtin_msa_insve_d: 4096 case Mips::BI__builtin_msa_splati_d: i = 1; l = 0; u = 1; break; 4097 case Mips::BI__builtin_msa_sldi_d: i = 2; l = 0; u = 1; break; 4098 // Memory offsets and immediate loads. 4099 // These intrinsics take a signed 10 bit immediate. 4100 case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 255; break; 4101 case Mips::BI__builtin_msa_ldi_h: 4102 case Mips::BI__builtin_msa_ldi_w: 4103 case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break; 4104 case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 1; break; 4105 case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 2; break; 4106 case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 4; break; 4107 case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 8; break; 4108 case Mips::BI__builtin_msa_ldr_d: i = 1; l = -4096; u = 4088; m = 8; break; 4109 case Mips::BI__builtin_msa_ldr_w: i = 1; l = -2048; u = 2044; m = 4; break; 4110 case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 1; break; 4111 case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 2; break; 4112 case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 4; break; 4113 case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 8; break; 4114 case Mips::BI__builtin_msa_str_d: i = 2; l = -4096; u = 4088; m = 8; break; 4115 case Mips::BI__builtin_msa_str_w: i = 2; l = -2048; u = 2044; m = 4; break; 4116 } 4117 4118 if (!m) 4119 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 4120 4121 return SemaBuiltinConstantArgRange(TheCall, i, l, u) || 4122 SemaBuiltinConstantArgMultiple(TheCall, i, m); 4123 } 4124 4125 /// DecodePPCMMATypeFromStr - This decodes one PPC MMA type descriptor from Str, 4126 /// advancing the pointer over the consumed characters. The decoded type is 4127 /// returned. If the decoded type represents a constant integer with a 4128 /// constraint on its value then Mask is set to that value. The type descriptors 4129 /// used in Str are specific to PPC MMA builtins and are documented in the file 4130 /// defining the PPC builtins. 4131 static QualType DecodePPCMMATypeFromStr(ASTContext &Context, const char *&Str, 4132 unsigned &Mask) { 4133 bool RequireICE = false; 4134 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None; 4135 switch (*Str++) { 4136 case 'V': 4137 return Context.getVectorType(Context.UnsignedCharTy, 16, 4138 VectorType::VectorKind::AltiVecVector); 4139 case 'i': { 4140 char *End; 4141 unsigned size = strtoul(Str, &End, 10); 4142 assert(End != Str && "Missing constant parameter constraint"); 4143 Str = End; 4144 Mask = size; 4145 return Context.IntTy; 4146 } 4147 case 'W': { 4148 char *End; 4149 unsigned size = strtoul(Str, &End, 10); 4150 assert(End != Str && "Missing PowerPC MMA type size"); 4151 Str = End; 4152 QualType Type; 4153 switch (size) { 4154 #define PPC_VECTOR_TYPE(typeName, Id, size) \ 4155 case size: Type = Context.Id##Ty; break; 4156 #include "clang/Basic/PPCTypes.def" 4157 default: llvm_unreachable("Invalid PowerPC MMA vector type"); 4158 } 4159 bool CheckVectorArgs = false; 4160 while (!CheckVectorArgs) { 4161 switch (*Str++) { 4162 case '*': 4163 Type = Context.getPointerType(Type); 4164 break; 4165 case 'C': 4166 Type = Type.withConst(); 4167 break; 4168 default: 4169 CheckVectorArgs = true; 4170 --Str; 4171 break; 4172 } 4173 } 4174 return Type; 4175 } 4176 default: 4177 return Context.DecodeTypeStr(--Str, Context, Error, RequireICE, true); 4178 } 4179 } 4180 4181 static bool isPPC_64Builtin(unsigned BuiltinID) { 4182 // These builtins only work on PPC 64bit targets. 4183 switch (BuiltinID) { 4184 case PPC::BI__builtin_divde: 4185 case PPC::BI__builtin_divdeu: 4186 case PPC::BI__builtin_bpermd: 4187 case PPC::BI__builtin_pdepd: 4188 case PPC::BI__builtin_pextd: 4189 case PPC::BI__builtin_ppc_ldarx: 4190 case PPC::BI__builtin_ppc_stdcx: 4191 case PPC::BI__builtin_ppc_tdw: 4192 case PPC::BI__builtin_ppc_trapd: 4193 case PPC::BI__builtin_ppc_cmpeqb: 4194 case PPC::BI__builtin_ppc_setb: 4195 case PPC::BI__builtin_ppc_mulhd: 4196 case PPC::BI__builtin_ppc_mulhdu: 4197 case PPC::BI__builtin_ppc_maddhd: 4198 case PPC::BI__builtin_ppc_maddhdu: 4199 case PPC::BI__builtin_ppc_maddld: 4200 case PPC::BI__builtin_ppc_load8r: 4201 case PPC::BI__builtin_ppc_store8r: 4202 case PPC::BI__builtin_ppc_insert_exp: 4203 case PPC::BI__builtin_ppc_extract_sig: 4204 case PPC::BI__builtin_ppc_addex: 4205 case PPC::BI__builtin_darn: 4206 case PPC::BI__builtin_darn_raw: 4207 case PPC::BI__builtin_ppc_compare_and_swaplp: 4208 case PPC::BI__builtin_ppc_fetch_and_addlp: 4209 case PPC::BI__builtin_ppc_fetch_and_andlp: 4210 case PPC::BI__builtin_ppc_fetch_and_orlp: 4211 case PPC::BI__builtin_ppc_fetch_and_swaplp: 4212 return true; 4213 } 4214 return false; 4215 } 4216 4217 /// Returns true if the argument consists of one contiguous run of 1s with any 4218 /// number of 0s on either side. The 1s are allowed to wrap from LSB to MSB, so 4219 /// 0x000FFF0, 0x0000FFFF, 0xFF0000FF, 0x0 are all runs. 0x0F0F0000 is not, 4220 /// since all 1s are not contiguous. 4221 bool Sema::SemaValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum) { 4222 llvm::APSInt Result; 4223 // We can't check the value of a dependent argument. 4224 Expr *Arg = TheCall->getArg(ArgNum); 4225 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4226 return false; 4227 4228 // Check constant-ness first. 4229 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4230 return true; 4231 4232 // Check contiguous run of 1s, 0xFF0000FF is also a run of 1s. 4233 if (Result.isShiftedMask() || (~Result).isShiftedMask()) 4234 return false; 4235 4236 return Diag(TheCall->getBeginLoc(), 4237 diag::err_argument_not_contiguous_bit_field) 4238 << ArgNum << Arg->getSourceRange(); 4239 } 4240 4241 bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 4242 CallExpr *TheCall) { 4243 unsigned i = 0, l = 0, u = 0; 4244 bool IsTarget64Bit = TI.getTypeWidth(TI.getIntPtrType()) == 64; 4245 llvm::APSInt Result; 4246 4247 if (isPPC_64Builtin(BuiltinID) && !IsTarget64Bit) 4248 return Diag(TheCall->getBeginLoc(), diag::err_64_bit_builtin_32_bit_tgt) 4249 << TheCall->getSourceRange(); 4250 4251 switch (BuiltinID) { 4252 default: return false; 4253 case PPC::BI__builtin_altivec_crypto_vshasigmaw: 4254 case PPC::BI__builtin_altivec_crypto_vshasigmad: 4255 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 4256 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 4257 case PPC::BI__builtin_altivec_dss: 4258 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3); 4259 case PPC::BI__builtin_tbegin: 4260 case PPC::BI__builtin_tend: 4261 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 1); 4262 case PPC::BI__builtin_tsr: 4263 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 7); 4264 case PPC::BI__builtin_tabortwc: 4265 case PPC::BI__builtin_tabortdc: 4266 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31); 4267 case PPC::BI__builtin_tabortwci: 4268 case PPC::BI__builtin_tabortdci: 4269 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) || 4270 SemaBuiltinConstantArgRange(TheCall, 2, 0, 31); 4271 // According to GCC 'Basic PowerPC Built-in Functions Available on ISA 2.05', 4272 // __builtin_(un)pack_longdouble are available only if long double uses IBM 4273 // extended double representation. 4274 case PPC::BI__builtin_unpack_longdouble: 4275 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 1)) 4276 return true; 4277 [[fallthrough]]; 4278 case PPC::BI__builtin_pack_longdouble: 4279 if (&TI.getLongDoubleFormat() != &llvm::APFloat::PPCDoubleDouble()) 4280 return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_requires_abi) 4281 << "ibmlongdouble"; 4282 return false; 4283 case PPC::BI__builtin_altivec_dst: 4284 case PPC::BI__builtin_altivec_dstt: 4285 case PPC::BI__builtin_altivec_dstst: 4286 case PPC::BI__builtin_altivec_dststt: 4287 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3); 4288 case PPC::BI__builtin_vsx_xxpermdi: 4289 case PPC::BI__builtin_vsx_xxsldwi: 4290 return SemaBuiltinVSX(TheCall); 4291 case PPC::BI__builtin_unpack_vector_int128: 4292 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 4293 case PPC::BI__builtin_altivec_vgnb: 4294 return SemaBuiltinConstantArgRange(TheCall, 1, 2, 7); 4295 case PPC::BI__builtin_vsx_xxeval: 4296 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 255); 4297 case PPC::BI__builtin_altivec_vsldbi: 4298 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); 4299 case PPC::BI__builtin_altivec_vsrdbi: 4300 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); 4301 case PPC::BI__builtin_vsx_xxpermx: 4302 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 7); 4303 case PPC::BI__builtin_ppc_tw: 4304 case PPC::BI__builtin_ppc_tdw: 4305 return SemaBuiltinConstantArgRange(TheCall, 2, 1, 31); 4306 case PPC::BI__builtin_ppc_cmprb: 4307 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 1); 4308 // For __rlwnm, __rlwimi and __rldimi, the last parameter mask must 4309 // be a constant that represents a contiguous bit field. 4310 case PPC::BI__builtin_ppc_rlwnm: 4311 return SemaValueIsRunOfOnes(TheCall, 2); 4312 case PPC::BI__builtin_ppc_rlwimi: 4313 case PPC::BI__builtin_ppc_rldimi: 4314 return SemaBuiltinConstantArg(TheCall, 2, Result) || 4315 SemaValueIsRunOfOnes(TheCall, 3); 4316 case PPC::BI__builtin_ppc_addex: { 4317 if (SemaBuiltinConstantArgRange(TheCall, 2, 0, 3)) 4318 return true; 4319 // Output warning for reserved values 1 to 3. 4320 int ArgValue = 4321 TheCall->getArg(2)->getIntegerConstantExpr(Context)->getSExtValue(); 4322 if (ArgValue != 0) 4323 Diag(TheCall->getBeginLoc(), diag::warn_argument_undefined_behaviour) 4324 << ArgValue; 4325 return false; 4326 } 4327 case PPC::BI__builtin_ppc_mtfsb0: 4328 case PPC::BI__builtin_ppc_mtfsb1: 4329 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31); 4330 case PPC::BI__builtin_ppc_mtfsf: 4331 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 255); 4332 case PPC::BI__builtin_ppc_mtfsfi: 4333 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 7) || 4334 SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 4335 case PPC::BI__builtin_ppc_alignx: 4336 return SemaBuiltinConstantArgPower2(TheCall, 0); 4337 case PPC::BI__builtin_ppc_rdlam: 4338 return SemaValueIsRunOfOnes(TheCall, 2); 4339 case PPC::BI__builtin_vsx_ldrmb: 4340 case PPC::BI__builtin_vsx_strmb: 4341 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); 4342 case PPC::BI__builtin_altivec_vcntmbb: 4343 case PPC::BI__builtin_altivec_vcntmbh: 4344 case PPC::BI__builtin_altivec_vcntmbw: 4345 case PPC::BI__builtin_altivec_vcntmbd: 4346 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 4347 case PPC::BI__builtin_vsx_xxgenpcvbm: 4348 case PPC::BI__builtin_vsx_xxgenpcvhm: 4349 case PPC::BI__builtin_vsx_xxgenpcvwm: 4350 case PPC::BI__builtin_vsx_xxgenpcvdm: 4351 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3); 4352 case PPC::BI__builtin_ppc_test_data_class: { 4353 // Check if the first argument of the __builtin_ppc_test_data_class call is 4354 // valid. The argument must be 'float' or 'double' or '__float128'. 4355 QualType ArgType = TheCall->getArg(0)->getType(); 4356 if (ArgType != QualType(Context.FloatTy) && 4357 ArgType != QualType(Context.DoubleTy) && 4358 ArgType != QualType(Context.Float128Ty)) 4359 return Diag(TheCall->getBeginLoc(), 4360 diag::err_ppc_invalid_test_data_class_type); 4361 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 127); 4362 } 4363 case PPC::BI__builtin_ppc_maxfe: 4364 case PPC::BI__builtin_ppc_minfe: 4365 case PPC::BI__builtin_ppc_maxfl: 4366 case PPC::BI__builtin_ppc_minfl: 4367 case PPC::BI__builtin_ppc_maxfs: 4368 case PPC::BI__builtin_ppc_minfs: { 4369 if (Context.getTargetInfo().getTriple().isOSAIX() && 4370 (BuiltinID == PPC::BI__builtin_ppc_maxfe || 4371 BuiltinID == PPC::BI__builtin_ppc_minfe)) 4372 return Diag(TheCall->getBeginLoc(), diag::err_target_unsupported_type) 4373 << "builtin" << true << 128 << QualType(Context.LongDoubleTy) 4374 << false << Context.getTargetInfo().getTriple().str(); 4375 // Argument type should be exact. 4376 QualType ArgType = QualType(Context.LongDoubleTy); 4377 if (BuiltinID == PPC::BI__builtin_ppc_maxfl || 4378 BuiltinID == PPC::BI__builtin_ppc_minfl) 4379 ArgType = QualType(Context.DoubleTy); 4380 else if (BuiltinID == PPC::BI__builtin_ppc_maxfs || 4381 BuiltinID == PPC::BI__builtin_ppc_minfs) 4382 ArgType = QualType(Context.FloatTy); 4383 for (unsigned I = 0, E = TheCall->getNumArgs(); I < E; ++I) 4384 if (TheCall->getArg(I)->getType() != ArgType) 4385 return Diag(TheCall->getBeginLoc(), 4386 diag::err_typecheck_convert_incompatible) 4387 << TheCall->getArg(I)->getType() << ArgType << 1 << 0 << 0; 4388 return false; 4389 } 4390 #define CUSTOM_BUILTIN(Name, Intr, Types, Acc, Feature) \ 4391 case PPC::BI__builtin_##Name: \ 4392 return SemaBuiltinPPCMMACall(TheCall, BuiltinID, Types); 4393 #include "clang/Basic/BuiltinsPPC.def" 4394 } 4395 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 4396 } 4397 4398 // Check if the given type is a non-pointer PPC MMA type. This function is used 4399 // in Sema to prevent invalid uses of restricted PPC MMA types. 4400 bool Sema::CheckPPCMMAType(QualType Type, SourceLocation TypeLoc) { 4401 if (Type->isPointerType() || Type->isArrayType()) 4402 return false; 4403 4404 QualType CoreType = Type.getCanonicalType().getUnqualifiedType(); 4405 #define PPC_VECTOR_TYPE(Name, Id, Size) || CoreType == Context.Id##Ty 4406 if (false 4407 #include "clang/Basic/PPCTypes.def" 4408 ) { 4409 Diag(TypeLoc, diag::err_ppc_invalid_use_mma_type); 4410 return true; 4411 } 4412 return false; 4413 } 4414 4415 bool Sema::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, 4416 CallExpr *TheCall) { 4417 // position of memory order and scope arguments in the builtin 4418 unsigned OrderIndex, ScopeIndex; 4419 switch (BuiltinID) { 4420 case AMDGPU::BI__builtin_amdgcn_atomic_inc32: 4421 case AMDGPU::BI__builtin_amdgcn_atomic_inc64: 4422 case AMDGPU::BI__builtin_amdgcn_atomic_dec32: 4423 case AMDGPU::BI__builtin_amdgcn_atomic_dec64: 4424 OrderIndex = 2; 4425 ScopeIndex = 3; 4426 break; 4427 case AMDGPU::BI__builtin_amdgcn_fence: 4428 OrderIndex = 0; 4429 ScopeIndex = 1; 4430 break; 4431 default: 4432 return false; 4433 } 4434 4435 ExprResult Arg = TheCall->getArg(OrderIndex); 4436 auto ArgExpr = Arg.get(); 4437 Expr::EvalResult ArgResult; 4438 4439 if (!ArgExpr->EvaluateAsInt(ArgResult, Context)) 4440 return Diag(ArgExpr->getExprLoc(), diag::err_typecheck_expect_int) 4441 << ArgExpr->getType(); 4442 auto Ord = ArgResult.Val.getInt().getZExtValue(); 4443 4444 // Check validity of memory ordering as per C11 / C++11's memody model. 4445 // Only fence needs check. Atomic dec/inc allow all memory orders. 4446 if (!llvm::isValidAtomicOrderingCABI(Ord)) 4447 return Diag(ArgExpr->getBeginLoc(), 4448 diag::warn_atomic_op_has_invalid_memory_order) 4449 << ArgExpr->getSourceRange(); 4450 switch (static_cast<llvm::AtomicOrderingCABI>(Ord)) { 4451 case llvm::AtomicOrderingCABI::relaxed: 4452 case llvm::AtomicOrderingCABI::consume: 4453 if (BuiltinID == AMDGPU::BI__builtin_amdgcn_fence) 4454 return Diag(ArgExpr->getBeginLoc(), 4455 diag::warn_atomic_op_has_invalid_memory_order) 4456 << ArgExpr->getSourceRange(); 4457 break; 4458 case llvm::AtomicOrderingCABI::acquire: 4459 case llvm::AtomicOrderingCABI::release: 4460 case llvm::AtomicOrderingCABI::acq_rel: 4461 case llvm::AtomicOrderingCABI::seq_cst: 4462 break; 4463 } 4464 4465 Arg = TheCall->getArg(ScopeIndex); 4466 ArgExpr = Arg.get(); 4467 Expr::EvalResult ArgResult1; 4468 // Check that sync scope is a constant literal 4469 if (!ArgExpr->EvaluateAsConstantExpr(ArgResult1, Context)) 4470 return Diag(ArgExpr->getExprLoc(), diag::err_expr_not_string_literal) 4471 << ArgExpr->getType(); 4472 4473 return false; 4474 } 4475 4476 bool Sema::CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum) { 4477 llvm::APSInt Result; 4478 4479 // We can't check the value of a dependent argument. 4480 Expr *Arg = TheCall->getArg(ArgNum); 4481 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4482 return false; 4483 4484 // Check constant-ness first. 4485 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4486 return true; 4487 4488 int64_t Val = Result.getSExtValue(); 4489 if ((Val >= 0 && Val <= 3) || (Val >= 5 && Val <= 7)) 4490 return false; 4491 4492 return Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_invalid_lmul) 4493 << Arg->getSourceRange(); 4494 } 4495 4496 bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, 4497 unsigned BuiltinID, 4498 CallExpr *TheCall) { 4499 // CodeGenFunction can also detect this, but this gives a better error 4500 // message. 4501 bool FeatureMissing = false; 4502 SmallVector<StringRef> ReqFeatures; 4503 StringRef Features = Context.BuiltinInfo.getRequiredFeatures(BuiltinID); 4504 Features.split(ReqFeatures, ',', -1, false); 4505 4506 // Check if each required feature is included 4507 for (StringRef F : ReqFeatures) { 4508 SmallVector<StringRef> ReqOpFeatures; 4509 F.split(ReqOpFeatures, '|'); 4510 4511 if (llvm::none_of(ReqOpFeatures, 4512 [&TI](StringRef OF) { return TI.hasFeature(OF); })) { 4513 std::string FeatureStrs; 4514 bool IsExtension = true; 4515 for (StringRef OF : ReqOpFeatures) { 4516 // If the feature is 64bit, alter the string so it will print better in 4517 // the diagnostic. 4518 if (OF == "64bit") { 4519 assert(ReqOpFeatures.size() == 1 && "Expected '64bit' to be alone"); 4520 OF = "RV64"; 4521 IsExtension = false; 4522 } 4523 if (OF == "32bit") { 4524 assert(ReqOpFeatures.size() == 1 && "Expected '32bit' to be alone"); 4525 OF = "RV32"; 4526 IsExtension = false; 4527 } 4528 4529 // Convert features like "zbr" and "experimental-zbr" to "Zbr". 4530 OF.consume_front("experimental-"); 4531 std::string FeatureStr = OF.str(); 4532 FeatureStr[0] = std::toupper(FeatureStr[0]); 4533 // Combine strings. 4534 FeatureStrs += FeatureStrs.empty() ? "" : ", "; 4535 FeatureStrs += "'"; 4536 FeatureStrs += FeatureStr; 4537 FeatureStrs += "'"; 4538 } 4539 // Error message 4540 FeatureMissing = true; 4541 Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_requires_extension) 4542 << IsExtension 4543 << TheCall->getSourceRange() << StringRef(FeatureStrs); 4544 } 4545 } 4546 4547 if (FeatureMissing) 4548 return true; 4549 4550 // vmulh.vv, vmulh.vx, vmulhu.vv, vmulhu.vx, vmulhsu.vv, vmulhsu.vx, 4551 // vsmul.vv, vsmul.vx are not included for EEW=64 in Zve64*. 4552 switch (BuiltinID) { 4553 default: 4554 break; 4555 case RISCVVector::BI__builtin_rvv_vmulhsu_vv: 4556 case RISCVVector::BI__builtin_rvv_vmulhsu_vx: 4557 case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tu: 4558 case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tu: 4559 case RISCVVector::BI__builtin_rvv_vmulhsu_vv_m: 4560 case RISCVVector::BI__builtin_rvv_vmulhsu_vx_m: 4561 case RISCVVector::BI__builtin_rvv_vmulhsu_vv_mu: 4562 case RISCVVector::BI__builtin_rvv_vmulhsu_vx_mu: 4563 case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tum: 4564 case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tum: 4565 case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tumu: 4566 case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tumu: 4567 case RISCVVector::BI__builtin_rvv_vmulhu_vv: 4568 case RISCVVector::BI__builtin_rvv_vmulhu_vx: 4569 case RISCVVector::BI__builtin_rvv_vmulhu_vv_tu: 4570 case RISCVVector::BI__builtin_rvv_vmulhu_vx_tu: 4571 case RISCVVector::BI__builtin_rvv_vmulhu_vv_m: 4572 case RISCVVector::BI__builtin_rvv_vmulhu_vx_m: 4573 case RISCVVector::BI__builtin_rvv_vmulhu_vv_mu: 4574 case RISCVVector::BI__builtin_rvv_vmulhu_vx_mu: 4575 case RISCVVector::BI__builtin_rvv_vmulhu_vv_tum: 4576 case RISCVVector::BI__builtin_rvv_vmulhu_vx_tum: 4577 case RISCVVector::BI__builtin_rvv_vmulhu_vv_tumu: 4578 case RISCVVector::BI__builtin_rvv_vmulhu_vx_tumu: 4579 case RISCVVector::BI__builtin_rvv_vmulh_vv: 4580 case RISCVVector::BI__builtin_rvv_vmulh_vx: 4581 case RISCVVector::BI__builtin_rvv_vmulh_vv_tu: 4582 case RISCVVector::BI__builtin_rvv_vmulh_vx_tu: 4583 case RISCVVector::BI__builtin_rvv_vmulh_vv_m: 4584 case RISCVVector::BI__builtin_rvv_vmulh_vx_m: 4585 case RISCVVector::BI__builtin_rvv_vmulh_vv_mu: 4586 case RISCVVector::BI__builtin_rvv_vmulh_vx_mu: 4587 case RISCVVector::BI__builtin_rvv_vmulh_vv_tum: 4588 case RISCVVector::BI__builtin_rvv_vmulh_vx_tum: 4589 case RISCVVector::BI__builtin_rvv_vmulh_vv_tumu: 4590 case RISCVVector::BI__builtin_rvv_vmulh_vx_tumu: 4591 case RISCVVector::BI__builtin_rvv_vsmul_vv: 4592 case RISCVVector::BI__builtin_rvv_vsmul_vx: 4593 case RISCVVector::BI__builtin_rvv_vsmul_vv_tu: 4594 case RISCVVector::BI__builtin_rvv_vsmul_vx_tu: 4595 case RISCVVector::BI__builtin_rvv_vsmul_vv_m: 4596 case RISCVVector::BI__builtin_rvv_vsmul_vx_m: 4597 case RISCVVector::BI__builtin_rvv_vsmul_vv_mu: 4598 case RISCVVector::BI__builtin_rvv_vsmul_vx_mu: 4599 case RISCVVector::BI__builtin_rvv_vsmul_vv_tum: 4600 case RISCVVector::BI__builtin_rvv_vsmul_vx_tum: 4601 case RISCVVector::BI__builtin_rvv_vsmul_vv_tumu: 4602 case RISCVVector::BI__builtin_rvv_vsmul_vx_tumu: { 4603 bool RequireV = false; 4604 for (unsigned ArgNum = 0; ArgNum < TheCall->getNumArgs(); ++ArgNum) 4605 RequireV |= TheCall->getArg(ArgNum)->getType()->isRVVType( 4606 /* Bitwidth */ 64, /* IsFloat */ false); 4607 4608 if (RequireV && !TI.hasFeature("v")) 4609 return Diag(TheCall->getBeginLoc(), 4610 diag::err_riscv_builtin_requires_extension) 4611 << /* IsExtension */ false << TheCall->getSourceRange() << "v"; 4612 4613 break; 4614 } 4615 } 4616 4617 switch (BuiltinID) { 4618 case RISCVVector::BI__builtin_rvv_vsetvli: 4619 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3) || 4620 CheckRISCVLMUL(TheCall, 2); 4621 case RISCVVector::BI__builtin_rvv_vsetvlimax: 4622 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) || 4623 CheckRISCVLMUL(TheCall, 1); 4624 case RISCVVector::BI__builtin_rvv_vget_v: { 4625 ASTContext::BuiltinVectorTypeInfo ResVecInfo = 4626 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>( 4627 TheCall->getType().getCanonicalType().getTypePtr())); 4628 ASTContext::BuiltinVectorTypeInfo VecInfo = 4629 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>( 4630 TheCall->getArg(0)->getType().getCanonicalType().getTypePtr())); 4631 unsigned MaxIndex; 4632 if (VecInfo.NumVectors != 1) // vget for tuple type 4633 MaxIndex = VecInfo.NumVectors; 4634 else // vget for non-tuple type 4635 MaxIndex = (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors) / 4636 (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors); 4637 return SemaBuiltinConstantArgRange(TheCall, 1, 0, MaxIndex - 1); 4638 } 4639 case RISCVVector::BI__builtin_rvv_vset_v: { 4640 ASTContext::BuiltinVectorTypeInfo ResVecInfo = 4641 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>( 4642 TheCall->getType().getCanonicalType().getTypePtr())); 4643 ASTContext::BuiltinVectorTypeInfo VecInfo = 4644 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>( 4645 TheCall->getArg(2)->getType().getCanonicalType().getTypePtr())); 4646 unsigned MaxIndex; 4647 if (ResVecInfo.NumVectors != 1) // vset for tuple type 4648 MaxIndex = ResVecInfo.NumVectors; 4649 else // vset fo non-tuple type 4650 MaxIndex = (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors) / 4651 (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors); 4652 return SemaBuiltinConstantArgRange(TheCall, 1, 0, MaxIndex - 1); 4653 } 4654 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8mf8: 4655 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8mf4: 4656 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8mf2: 4657 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8m1: 4658 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8m2: 4659 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8m4: 4660 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8m8: 4661 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16mf4: 4662 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16mf2: 4663 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16m1: 4664 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16m2: 4665 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16m4: 4666 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16m8: 4667 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u32mf2: 4668 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u32m1: 4669 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u32m2: 4670 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u32m4: 4671 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u32m8: 4672 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u64m1: 4673 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u64m2: 4674 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u64m4: 4675 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u64m8: 4676 // bit_27_26, bit_24_20, bit_11_7, simm5 4677 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) || 4678 SemaBuiltinConstantArgRange(TheCall, 1, 0, 31) || 4679 SemaBuiltinConstantArgRange(TheCall, 2, 0, 31) || 4680 SemaBuiltinConstantArgRange(TheCall, 3, -16, 15); 4681 case RISCVVector::BI__builtin_rvv_sf_vc_iv_se: 4682 // bit_27_26, bit_11_7, vs2, simm5 4683 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) || 4684 SemaBuiltinConstantArgRange(TheCall, 1, 0, 31) || 4685 SemaBuiltinConstantArgRange(TheCall, 3, -16, 15); 4686 case RISCVVector::BI__builtin_rvv_sf_vc_v_i: 4687 case RISCVVector::BI__builtin_rvv_sf_vc_v_i_se: 4688 // bit_27_26, bit_24_20, simm5 4689 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) || 4690 SemaBuiltinConstantArgRange(TheCall, 1, 0, 31) || 4691 SemaBuiltinConstantArgRange(TheCall, 2, -16, 15); 4692 case RISCVVector::BI__builtin_rvv_sf_vc_v_iv: 4693 case RISCVVector::BI__builtin_rvv_sf_vc_v_iv_se: 4694 // bit_27_26, vs2, simm5 4695 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) || 4696 SemaBuiltinConstantArgRange(TheCall, 2, -16, 15); 4697 case RISCVVector::BI__builtin_rvv_sf_vc_ivv_se: 4698 case RISCVVector::BI__builtin_rvv_sf_vc_ivw_se: 4699 case RISCVVector::BI__builtin_rvv_sf_vc_v_ivv: 4700 case RISCVVector::BI__builtin_rvv_sf_vc_v_ivw: 4701 case RISCVVector::BI__builtin_rvv_sf_vc_v_ivv_se: 4702 case RISCVVector::BI__builtin_rvv_sf_vc_v_ivw_se: 4703 // bit_27_26, vd, vs2, simm5 4704 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) || 4705 SemaBuiltinConstantArgRange(TheCall, 3, -16, 15); 4706 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8mf8: 4707 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8mf4: 4708 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8mf2: 4709 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8m1: 4710 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8m2: 4711 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8m4: 4712 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8m8: 4713 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16mf4: 4714 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16mf2: 4715 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16m1: 4716 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16m2: 4717 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16m4: 4718 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16m8: 4719 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u32mf2: 4720 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u32m1: 4721 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u32m2: 4722 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u32m4: 4723 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u32m8: 4724 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u64m1: 4725 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u64m2: 4726 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u64m4: 4727 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u64m8: 4728 // bit_27_26, bit_24_20, bit_11_7, xs1 4729 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) || 4730 SemaBuiltinConstantArgRange(TheCall, 1, 0, 31) || 4731 SemaBuiltinConstantArgRange(TheCall, 2, 0, 31); 4732 case RISCVVector::BI__builtin_rvv_sf_vc_xv_se: 4733 case RISCVVector::BI__builtin_rvv_sf_vc_vv_se: 4734 // bit_27_26, bit_11_7, vs2, xs1/vs1 4735 case RISCVVector::BI__builtin_rvv_sf_vc_v_x: 4736 case RISCVVector::BI__builtin_rvv_sf_vc_v_x_se: 4737 // bit_27_26, bit_24-20, xs1 4738 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) || 4739 SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); 4740 case RISCVVector::BI__builtin_rvv_sf_vc_vvv_se: 4741 case RISCVVector::BI__builtin_rvv_sf_vc_xvv_se: 4742 case RISCVVector::BI__builtin_rvv_sf_vc_vvw_se: 4743 case RISCVVector::BI__builtin_rvv_sf_vc_xvw_se: 4744 // bit_27_26, vd, vs2, xs1 4745 case RISCVVector::BI__builtin_rvv_sf_vc_v_xv: 4746 case RISCVVector::BI__builtin_rvv_sf_vc_v_vv: 4747 case RISCVVector::BI__builtin_rvv_sf_vc_v_xv_se: 4748 case RISCVVector::BI__builtin_rvv_sf_vc_v_vv_se: 4749 // bit_27_26, vs2, xs1/vs1 4750 case RISCVVector::BI__builtin_rvv_sf_vc_v_xvv: 4751 case RISCVVector::BI__builtin_rvv_sf_vc_v_vvv: 4752 case RISCVVector::BI__builtin_rvv_sf_vc_v_xvw: 4753 case RISCVVector::BI__builtin_rvv_sf_vc_v_vvw: 4754 case RISCVVector::BI__builtin_rvv_sf_vc_v_xvv_se: 4755 case RISCVVector::BI__builtin_rvv_sf_vc_v_vvv_se: 4756 case RISCVVector::BI__builtin_rvv_sf_vc_v_xvw_se: 4757 case RISCVVector::BI__builtin_rvv_sf_vc_v_vvw_se: 4758 // bit_27_26, vd, vs2, xs1/vs1 4759 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3); 4760 case RISCVVector::BI__builtin_rvv_sf_vc_fv_se: 4761 // bit_26, bit_11_7, vs2, fs1 4762 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 1) || 4763 SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); 4764 case RISCVVector::BI__builtin_rvv_sf_vc_fvv_se: 4765 case RISCVVector::BI__builtin_rvv_sf_vc_fvw_se: 4766 case RISCVVector::BI__builtin_rvv_sf_vc_v_fvv: 4767 case RISCVVector::BI__builtin_rvv_sf_vc_v_fvw: 4768 case RISCVVector::BI__builtin_rvv_sf_vc_v_fvv_se: 4769 case RISCVVector::BI__builtin_rvv_sf_vc_v_fvw_se: 4770 // bit_26, vd, vs2, fs1 4771 case RISCVVector::BI__builtin_rvv_sf_vc_v_fv: 4772 case RISCVVector::BI__builtin_rvv_sf_vc_v_fv_se: 4773 // bit_26, vs2, fs1 4774 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 1); 4775 // Check if byteselect is in [0, 3] 4776 case RISCV::BI__builtin_riscv_aes32dsi: 4777 case RISCV::BI__builtin_riscv_aes32dsmi: 4778 case RISCV::BI__builtin_riscv_aes32esi: 4779 case RISCV::BI__builtin_riscv_aes32esmi: 4780 case RISCV::BI__builtin_riscv_sm4ks: 4781 case RISCV::BI__builtin_riscv_sm4ed: 4782 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3); 4783 // Check if rnum is in [0, 10] 4784 case RISCV::BI__builtin_riscv_aes64ks1i: 4785 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 10); 4786 // Check if value range for vxrm is in [0, 3] 4787 case RISCVVector::BI__builtin_rvv_vaaddu_vv: 4788 case RISCVVector::BI__builtin_rvv_vaaddu_vx: 4789 case RISCVVector::BI__builtin_rvv_vaadd_vv: 4790 case RISCVVector::BI__builtin_rvv_vaadd_vx: 4791 case RISCVVector::BI__builtin_rvv_vasubu_vv: 4792 case RISCVVector::BI__builtin_rvv_vasubu_vx: 4793 case RISCVVector::BI__builtin_rvv_vasub_vv: 4794 case RISCVVector::BI__builtin_rvv_vasub_vx: 4795 case RISCVVector::BI__builtin_rvv_vsmul_vv: 4796 case RISCVVector::BI__builtin_rvv_vsmul_vx: 4797 case RISCVVector::BI__builtin_rvv_vssra_vv: 4798 case RISCVVector::BI__builtin_rvv_vssra_vx: 4799 case RISCVVector::BI__builtin_rvv_vssrl_vv: 4800 case RISCVVector::BI__builtin_rvv_vssrl_vx: 4801 case RISCVVector::BI__builtin_rvv_vnclip_wv: 4802 case RISCVVector::BI__builtin_rvv_vnclip_wx: 4803 case RISCVVector::BI__builtin_rvv_vnclipu_wv: 4804 case RISCVVector::BI__builtin_rvv_vnclipu_wx: 4805 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3); 4806 case RISCVVector::BI__builtin_rvv_vaaddu_vv_tu: 4807 case RISCVVector::BI__builtin_rvv_vaaddu_vx_tu: 4808 case RISCVVector::BI__builtin_rvv_vaadd_vv_tu: 4809 case RISCVVector::BI__builtin_rvv_vaadd_vx_tu: 4810 case RISCVVector::BI__builtin_rvv_vasubu_vv_tu: 4811 case RISCVVector::BI__builtin_rvv_vasubu_vx_tu: 4812 case RISCVVector::BI__builtin_rvv_vasub_vv_tu: 4813 case RISCVVector::BI__builtin_rvv_vasub_vx_tu: 4814 case RISCVVector::BI__builtin_rvv_vsmul_vv_tu: 4815 case RISCVVector::BI__builtin_rvv_vsmul_vx_tu: 4816 case RISCVVector::BI__builtin_rvv_vssra_vv_tu: 4817 case RISCVVector::BI__builtin_rvv_vssra_vx_tu: 4818 case RISCVVector::BI__builtin_rvv_vssrl_vv_tu: 4819 case RISCVVector::BI__builtin_rvv_vssrl_vx_tu: 4820 case RISCVVector::BI__builtin_rvv_vnclip_wv_tu: 4821 case RISCVVector::BI__builtin_rvv_vnclip_wx_tu: 4822 case RISCVVector::BI__builtin_rvv_vnclipu_wv_tu: 4823 case RISCVVector::BI__builtin_rvv_vnclipu_wx_tu: 4824 case RISCVVector::BI__builtin_rvv_vaaddu_vv_m: 4825 case RISCVVector::BI__builtin_rvv_vaaddu_vx_m: 4826 case RISCVVector::BI__builtin_rvv_vaadd_vv_m: 4827 case RISCVVector::BI__builtin_rvv_vaadd_vx_m: 4828 case RISCVVector::BI__builtin_rvv_vasubu_vv_m: 4829 case RISCVVector::BI__builtin_rvv_vasubu_vx_m: 4830 case RISCVVector::BI__builtin_rvv_vasub_vv_m: 4831 case RISCVVector::BI__builtin_rvv_vasub_vx_m: 4832 case RISCVVector::BI__builtin_rvv_vsmul_vv_m: 4833 case RISCVVector::BI__builtin_rvv_vsmul_vx_m: 4834 case RISCVVector::BI__builtin_rvv_vssra_vv_m: 4835 case RISCVVector::BI__builtin_rvv_vssra_vx_m: 4836 case RISCVVector::BI__builtin_rvv_vssrl_vv_m: 4837 case RISCVVector::BI__builtin_rvv_vssrl_vx_m: 4838 case RISCVVector::BI__builtin_rvv_vnclip_wv_m: 4839 case RISCVVector::BI__builtin_rvv_vnclip_wx_m: 4840 case RISCVVector::BI__builtin_rvv_vnclipu_wv_m: 4841 case RISCVVector::BI__builtin_rvv_vnclipu_wx_m: 4842 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 3); 4843 case RISCVVector::BI__builtin_rvv_vaaddu_vv_tum: 4844 case RISCVVector::BI__builtin_rvv_vaaddu_vv_tumu: 4845 case RISCVVector::BI__builtin_rvv_vaaddu_vv_mu: 4846 case RISCVVector::BI__builtin_rvv_vaaddu_vx_tum: 4847 case RISCVVector::BI__builtin_rvv_vaaddu_vx_tumu: 4848 case RISCVVector::BI__builtin_rvv_vaaddu_vx_mu: 4849 case RISCVVector::BI__builtin_rvv_vaadd_vv_tum: 4850 case RISCVVector::BI__builtin_rvv_vaadd_vv_tumu: 4851 case RISCVVector::BI__builtin_rvv_vaadd_vv_mu: 4852 case RISCVVector::BI__builtin_rvv_vaadd_vx_tum: 4853 case RISCVVector::BI__builtin_rvv_vaadd_vx_tumu: 4854 case RISCVVector::BI__builtin_rvv_vaadd_vx_mu: 4855 case RISCVVector::BI__builtin_rvv_vasubu_vv_tum: 4856 case RISCVVector::BI__builtin_rvv_vasubu_vv_tumu: 4857 case RISCVVector::BI__builtin_rvv_vasubu_vv_mu: 4858 case RISCVVector::BI__builtin_rvv_vasubu_vx_tum: 4859 case RISCVVector::BI__builtin_rvv_vasubu_vx_tumu: 4860 case RISCVVector::BI__builtin_rvv_vasubu_vx_mu: 4861 case RISCVVector::BI__builtin_rvv_vasub_vv_tum: 4862 case RISCVVector::BI__builtin_rvv_vasub_vv_tumu: 4863 case RISCVVector::BI__builtin_rvv_vasub_vv_mu: 4864 case RISCVVector::BI__builtin_rvv_vasub_vx_tum: 4865 case RISCVVector::BI__builtin_rvv_vasub_vx_tumu: 4866 case RISCVVector::BI__builtin_rvv_vasub_vx_mu: 4867 case RISCVVector::BI__builtin_rvv_vsmul_vv_mu: 4868 case RISCVVector::BI__builtin_rvv_vsmul_vx_mu: 4869 case RISCVVector::BI__builtin_rvv_vssra_vv_mu: 4870 case RISCVVector::BI__builtin_rvv_vssra_vx_mu: 4871 case RISCVVector::BI__builtin_rvv_vssrl_vv_mu: 4872 case RISCVVector::BI__builtin_rvv_vssrl_vx_mu: 4873 case RISCVVector::BI__builtin_rvv_vnclip_wv_mu: 4874 case RISCVVector::BI__builtin_rvv_vnclip_wx_mu: 4875 case RISCVVector::BI__builtin_rvv_vnclipu_wv_mu: 4876 case RISCVVector::BI__builtin_rvv_vnclipu_wx_mu: 4877 case RISCVVector::BI__builtin_rvv_vsmul_vv_tum: 4878 case RISCVVector::BI__builtin_rvv_vsmul_vx_tum: 4879 case RISCVVector::BI__builtin_rvv_vssra_vv_tum: 4880 case RISCVVector::BI__builtin_rvv_vssra_vx_tum: 4881 case RISCVVector::BI__builtin_rvv_vssrl_vv_tum: 4882 case RISCVVector::BI__builtin_rvv_vssrl_vx_tum: 4883 case RISCVVector::BI__builtin_rvv_vnclip_wv_tum: 4884 case RISCVVector::BI__builtin_rvv_vnclip_wx_tum: 4885 case RISCVVector::BI__builtin_rvv_vnclipu_wv_tum: 4886 case RISCVVector::BI__builtin_rvv_vnclipu_wx_tum: 4887 case RISCVVector::BI__builtin_rvv_vsmul_vv_tumu: 4888 case RISCVVector::BI__builtin_rvv_vsmul_vx_tumu: 4889 case RISCVVector::BI__builtin_rvv_vssra_vv_tumu: 4890 case RISCVVector::BI__builtin_rvv_vssra_vx_tumu: 4891 case RISCVVector::BI__builtin_rvv_vssrl_vv_tumu: 4892 case RISCVVector::BI__builtin_rvv_vssrl_vx_tumu: 4893 case RISCVVector::BI__builtin_rvv_vnclip_wv_tumu: 4894 case RISCVVector::BI__builtin_rvv_vnclip_wx_tumu: 4895 case RISCVVector::BI__builtin_rvv_vnclipu_wv_tumu: 4896 case RISCVVector::BI__builtin_rvv_vnclipu_wx_tumu: 4897 return SemaBuiltinConstantArgRange(TheCall, 4, 0, 3); 4898 case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm: 4899 case RISCVVector::BI__builtin_rvv_vfrec7_v_rm: 4900 case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm: 4901 case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm: 4902 case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm: 4903 case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm: 4904 case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm: 4905 case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm: 4906 case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm: 4907 case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm: 4908 case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm: 4909 case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm: 4910 case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm: 4911 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 4); 4912 case RISCVVector::BI__builtin_rvv_vfadd_vv_rm: 4913 case RISCVVector::BI__builtin_rvv_vfadd_vf_rm: 4914 case RISCVVector::BI__builtin_rvv_vfsub_vv_rm: 4915 case RISCVVector::BI__builtin_rvv_vfsub_vf_rm: 4916 case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm: 4917 case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm: 4918 case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm: 4919 case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm: 4920 case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm: 4921 case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm: 4922 case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm: 4923 case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm: 4924 case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm: 4925 case RISCVVector::BI__builtin_rvv_vfmul_vv_rm: 4926 case RISCVVector::BI__builtin_rvv_vfmul_vf_rm: 4927 case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm: 4928 case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm: 4929 case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm: 4930 case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm: 4931 case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm: 4932 case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm: 4933 case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm: 4934 case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm: 4935 case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm: 4936 case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tu: 4937 case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tu: 4938 case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_tu: 4939 case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_tu: 4940 case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_tu: 4941 case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_tu: 4942 case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_tu: 4943 case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_tu: 4944 case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_tu: 4945 case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_tu: 4946 case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_tu: 4947 case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_tu: 4948 case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_tu: 4949 case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_m: 4950 case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_m: 4951 case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_m: 4952 case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_m: 4953 case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_m: 4954 case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_m: 4955 case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_m: 4956 case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_m: 4957 case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_m: 4958 case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_m: 4959 case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_m: 4960 case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_m: 4961 case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_m: 4962 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 4); 4963 case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tu: 4964 case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tu: 4965 case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_tu: 4966 case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_tu: 4967 case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_tu: 4968 case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_tu: 4969 case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_tu: 4970 case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_tu: 4971 case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_tu: 4972 case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_tu: 4973 case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_tu: 4974 case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_tu: 4975 case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_tu: 4976 case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_tu: 4977 case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_tu: 4978 case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_tu: 4979 case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_tu: 4980 case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_tu: 4981 case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_tu: 4982 case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_tu: 4983 case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm_tu: 4984 case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm_tu: 4985 case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm_tu: 4986 case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm_tu: 4987 case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm: 4988 case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm: 4989 case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm: 4990 case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm: 4991 case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm: 4992 case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm: 4993 case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm: 4994 case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm: 4995 case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm: 4996 case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm: 4997 case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm: 4998 case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm: 4999 case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm: 5000 case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm: 5001 case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm: 5002 case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm: 5003 case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm: 5004 case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm: 5005 case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm: 5006 case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm: 5007 case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm: 5008 case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm: 5009 case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm: 5010 case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm: 5011 case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_tu: 5012 case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_tu: 5013 case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_tu: 5014 case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_tu: 5015 case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_tu: 5016 case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_tu: 5017 case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_tu: 5018 case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_tu: 5019 case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_tu: 5020 case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_tu: 5021 case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_tu: 5022 case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_tu: 5023 case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_tu: 5024 case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_tu: 5025 case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_tu: 5026 case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_tu: 5027 case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_tu: 5028 case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_tu: 5029 case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_tu: 5030 case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_tu: 5031 case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_tu: 5032 case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_tu: 5033 case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_tu: 5034 case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_tu: 5035 case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_m: 5036 case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_m: 5037 case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_m: 5038 case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_m: 5039 case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_m: 5040 case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_m: 5041 case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_m: 5042 case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_m: 5043 case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_m: 5044 case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_m: 5045 case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_m: 5046 case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_m: 5047 case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_m: 5048 case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_m: 5049 case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_m: 5050 case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_m: 5051 case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_m: 5052 case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_m: 5053 case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_m: 5054 case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_m: 5055 case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm_m: 5056 case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm_m: 5057 case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm_m: 5058 case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm_m: 5059 case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tum: 5060 case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tum: 5061 case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_tum: 5062 case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_tum: 5063 case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_tum: 5064 case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_tum: 5065 case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_tum: 5066 case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_tum: 5067 case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_tum: 5068 case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_tum: 5069 case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_tum: 5070 case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_tum: 5071 case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_tum: 5072 case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tumu: 5073 case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tumu: 5074 case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_tumu: 5075 case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_tumu: 5076 case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_tumu: 5077 case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_tumu: 5078 case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_tumu: 5079 case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_tumu: 5080 case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_tumu: 5081 case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_tumu: 5082 case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_tumu: 5083 case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_tumu: 5084 case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_tumu: 5085 case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_mu: 5086 case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_mu: 5087 case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_mu: 5088 case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_mu: 5089 case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_mu: 5090 case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_mu: 5091 case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_mu: 5092 case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_mu: 5093 case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_mu: 5094 case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_mu: 5095 case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_mu: 5096 case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_mu: 5097 case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_mu: 5098 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 4); 5099 case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_m: 5100 case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_m: 5101 case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_m: 5102 case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_m: 5103 case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_m: 5104 case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_m: 5105 case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_m: 5106 case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_m: 5107 case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_m: 5108 case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_m: 5109 case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_m: 5110 case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_m: 5111 case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_m: 5112 case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_m: 5113 case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_m: 5114 case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_m: 5115 case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_m: 5116 case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_m: 5117 case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_m: 5118 case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_m: 5119 case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_m: 5120 case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_m: 5121 case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_m: 5122 case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_m: 5123 case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tum: 5124 case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tum: 5125 case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_tum: 5126 case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_tum: 5127 case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_tum: 5128 case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_tum: 5129 case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_tum: 5130 case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_tum: 5131 case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_tum: 5132 case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_tum: 5133 case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_tum: 5134 case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_tum: 5135 case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_tum: 5136 case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_tum: 5137 case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_tum: 5138 case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_tum: 5139 case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_tum: 5140 case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_tum: 5141 case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_tum: 5142 case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_tum: 5143 case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_tum: 5144 case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_tum: 5145 case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_tum: 5146 case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_tum: 5147 case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_tum: 5148 case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_tum: 5149 case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_tum: 5150 case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_tum: 5151 case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_tum: 5152 case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_tum: 5153 case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_tum: 5154 case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_tum: 5155 case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_tum: 5156 case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_tum: 5157 case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_tum: 5158 case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_tum: 5159 case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_tum: 5160 case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_tum: 5161 case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_tum: 5162 case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_tum: 5163 case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_tum: 5164 case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_tum: 5165 case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_tum: 5166 case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_tum: 5167 case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm_tum: 5168 case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm_tum: 5169 case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm_tum: 5170 case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm_tum: 5171 case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tumu: 5172 case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tumu: 5173 case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_tumu: 5174 case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_tumu: 5175 case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_tumu: 5176 case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_tumu: 5177 case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_tumu: 5178 case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_tumu: 5179 case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_tumu: 5180 case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_tumu: 5181 case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_tumu: 5182 case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_tumu: 5183 case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_tumu: 5184 case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_tumu: 5185 case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_tumu: 5186 case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_tumu: 5187 case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_tumu: 5188 case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_tumu: 5189 case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_tumu: 5190 case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_tumu: 5191 case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_tumu: 5192 case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_tumu: 5193 case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_tumu: 5194 case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_tumu: 5195 case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_tumu: 5196 case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_tumu: 5197 case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_tumu: 5198 case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_tumu: 5199 case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_tumu: 5200 case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_tumu: 5201 case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_tumu: 5202 case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_tumu: 5203 case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_tumu: 5204 case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_tumu: 5205 case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_tumu: 5206 case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_tumu: 5207 case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_tumu: 5208 case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_tumu: 5209 case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_tumu: 5210 case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_tumu: 5211 case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_tumu: 5212 case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_tumu: 5213 case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_tumu: 5214 case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_tumu: 5215 case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_mu: 5216 case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_mu: 5217 case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_mu: 5218 case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_mu: 5219 case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_mu: 5220 case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_mu: 5221 case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_mu: 5222 case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_mu: 5223 case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_mu: 5224 case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_mu: 5225 case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_mu: 5226 case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_mu: 5227 case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_mu: 5228 case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_mu: 5229 case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_mu: 5230 case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_mu: 5231 case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_mu: 5232 case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_mu: 5233 case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_mu: 5234 case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_mu: 5235 case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_mu: 5236 case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_mu: 5237 case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_mu: 5238 case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_mu: 5239 case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_mu: 5240 case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_mu: 5241 case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_mu: 5242 case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_mu: 5243 case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_mu: 5244 case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_mu: 5245 case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_mu: 5246 case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_mu: 5247 case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_mu: 5248 case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_mu: 5249 case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_mu: 5250 case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_mu: 5251 case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_mu: 5252 case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_mu: 5253 case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_mu: 5254 case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_mu: 5255 case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_mu: 5256 case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_mu: 5257 case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_mu: 5258 case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_mu: 5259 return SemaBuiltinConstantArgRange(TheCall, 4, 0, 4); 5260 case RISCV::BI__builtin_riscv_ntl_load: 5261 case RISCV::BI__builtin_riscv_ntl_store: 5262 DeclRefExpr *DRE = 5263 cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 5264 assert((BuiltinID == RISCV::BI__builtin_riscv_ntl_store || 5265 BuiltinID == RISCV::BI__builtin_riscv_ntl_load) && 5266 "Unexpected RISC-V nontemporal load/store builtin!"); 5267 bool IsStore = BuiltinID == RISCV::BI__builtin_riscv_ntl_store; 5268 unsigned NumArgs = IsStore ? 3 : 2; 5269 5270 if (checkArgCount(*this, TheCall, NumArgs)) 5271 return true; 5272 5273 // Domain value should be compile-time constant. 5274 // 2 <= domain <= 5 5275 if (SemaBuiltinConstantArgRange(TheCall, NumArgs - 1, 2, 5)) 5276 return true; 5277 5278 Expr *PointerArg = TheCall->getArg(0); 5279 ExprResult PointerArgResult = 5280 DefaultFunctionArrayLvalueConversion(PointerArg); 5281 5282 if (PointerArgResult.isInvalid()) 5283 return true; 5284 PointerArg = PointerArgResult.get(); 5285 5286 const PointerType *PtrType = PointerArg->getType()->getAs<PointerType>(); 5287 if (!PtrType) { 5288 Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer) 5289 << PointerArg->getType() << PointerArg->getSourceRange(); 5290 return true; 5291 } 5292 5293 QualType ValType = PtrType->getPointeeType(); 5294 ValType = ValType.getUnqualifiedType(); 5295 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 5296 !ValType->isBlockPointerType() && !ValType->isFloatingType() && 5297 !ValType->isVectorType() && !ValType->isRVVType()) { 5298 Diag(DRE->getBeginLoc(), 5299 diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector) 5300 << PointerArg->getType() << PointerArg->getSourceRange(); 5301 return true; 5302 } 5303 5304 if (!IsStore) { 5305 TheCall->setType(ValType); 5306 return false; 5307 } 5308 5309 ExprResult ValArg = TheCall->getArg(1); 5310 InitializedEntity Entity = InitializedEntity::InitializeParameter( 5311 Context, ValType, /*consume*/ false); 5312 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 5313 if (ValArg.isInvalid()) 5314 return true; 5315 5316 TheCall->setArg(1, ValArg.get()); 5317 TheCall->setType(Context.VoidTy); 5318 return false; 5319 } 5320 5321 return false; 5322 } 5323 5324 bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, 5325 CallExpr *TheCall) { 5326 if (BuiltinID == SystemZ::BI__builtin_tabort) { 5327 Expr *Arg = TheCall->getArg(0); 5328 if (std::optional<llvm::APSInt> AbortCode = 5329 Arg->getIntegerConstantExpr(Context)) 5330 if (AbortCode->getSExtValue() >= 0 && AbortCode->getSExtValue() < 256) 5331 return Diag(Arg->getBeginLoc(), diag::err_systemz_invalid_tabort_code) 5332 << Arg->getSourceRange(); 5333 } 5334 5335 // For intrinsics which take an immediate value as part of the instruction, 5336 // range check them here. 5337 unsigned i = 0, l = 0, u = 0; 5338 switch (BuiltinID) { 5339 default: return false; 5340 case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break; 5341 case SystemZ::BI__builtin_s390_verimb: 5342 case SystemZ::BI__builtin_s390_verimh: 5343 case SystemZ::BI__builtin_s390_verimf: 5344 case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break; 5345 case SystemZ::BI__builtin_s390_vfaeb: 5346 case SystemZ::BI__builtin_s390_vfaeh: 5347 case SystemZ::BI__builtin_s390_vfaef: 5348 case SystemZ::BI__builtin_s390_vfaebs: 5349 case SystemZ::BI__builtin_s390_vfaehs: 5350 case SystemZ::BI__builtin_s390_vfaefs: 5351 case SystemZ::BI__builtin_s390_vfaezb: 5352 case SystemZ::BI__builtin_s390_vfaezh: 5353 case SystemZ::BI__builtin_s390_vfaezf: 5354 case SystemZ::BI__builtin_s390_vfaezbs: 5355 case SystemZ::BI__builtin_s390_vfaezhs: 5356 case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break; 5357 case SystemZ::BI__builtin_s390_vfisb: 5358 case SystemZ::BI__builtin_s390_vfidb: 5359 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15) || 5360 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 5361 case SystemZ::BI__builtin_s390_vftcisb: 5362 case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break; 5363 case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break; 5364 case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break; 5365 case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break; 5366 case SystemZ::BI__builtin_s390_vstrcb: 5367 case SystemZ::BI__builtin_s390_vstrch: 5368 case SystemZ::BI__builtin_s390_vstrcf: 5369 case SystemZ::BI__builtin_s390_vstrczb: 5370 case SystemZ::BI__builtin_s390_vstrczh: 5371 case SystemZ::BI__builtin_s390_vstrczf: 5372 case SystemZ::BI__builtin_s390_vstrcbs: 5373 case SystemZ::BI__builtin_s390_vstrchs: 5374 case SystemZ::BI__builtin_s390_vstrcfs: 5375 case SystemZ::BI__builtin_s390_vstrczbs: 5376 case SystemZ::BI__builtin_s390_vstrczhs: 5377 case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break; 5378 case SystemZ::BI__builtin_s390_vmslg: i = 3; l = 0; u = 15; break; 5379 case SystemZ::BI__builtin_s390_vfminsb: 5380 case SystemZ::BI__builtin_s390_vfmaxsb: 5381 case SystemZ::BI__builtin_s390_vfmindb: 5382 case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break; 5383 case SystemZ::BI__builtin_s390_vsld: i = 2; l = 0; u = 7; break; 5384 case SystemZ::BI__builtin_s390_vsrd: i = 2; l = 0; u = 7; break; 5385 case SystemZ::BI__builtin_s390_vclfnhs: 5386 case SystemZ::BI__builtin_s390_vclfnls: 5387 case SystemZ::BI__builtin_s390_vcfn: 5388 case SystemZ::BI__builtin_s390_vcnf: i = 1; l = 0; u = 15; break; 5389 case SystemZ::BI__builtin_s390_vcrnfs: i = 2; l = 0; u = 15; break; 5390 } 5391 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 5392 } 5393 5394 bool Sema::CheckWebAssemblyBuiltinFunctionCall(const TargetInfo &TI, 5395 unsigned BuiltinID, 5396 CallExpr *TheCall) { 5397 switch (BuiltinID) { 5398 case WebAssembly::BI__builtin_wasm_ref_null_extern: 5399 return BuiltinWasmRefNullExtern(TheCall); 5400 case WebAssembly::BI__builtin_wasm_ref_null_func: 5401 return BuiltinWasmRefNullFunc(TheCall); 5402 case WebAssembly::BI__builtin_wasm_table_get: 5403 return BuiltinWasmTableGet(TheCall); 5404 case WebAssembly::BI__builtin_wasm_table_set: 5405 return BuiltinWasmTableSet(TheCall); 5406 case WebAssembly::BI__builtin_wasm_table_size: 5407 return BuiltinWasmTableSize(TheCall); 5408 case WebAssembly::BI__builtin_wasm_table_grow: 5409 return BuiltinWasmTableGrow(TheCall); 5410 case WebAssembly::BI__builtin_wasm_table_fill: 5411 return BuiltinWasmTableFill(TheCall); 5412 case WebAssembly::BI__builtin_wasm_table_copy: 5413 return BuiltinWasmTableCopy(TheCall); 5414 } 5415 5416 return false; 5417 } 5418 5419 void Sema::checkRVVTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D) { 5420 const TargetInfo &TI = Context.getTargetInfo(); 5421 // (ELEN, LMUL) pairs of (8, mf8), (16, mf4), (32, mf2), (64, m1) requires at 5422 // least zve64x 5423 if ((Ty->isRVVType(/* Bitwidth */ 64, /* IsFloat */ false) || 5424 Ty->isRVVType(/* ElementCount */ 1)) && 5425 !TI.hasFeature("zve64x")) 5426 Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve64x"; 5427 if (Ty->isRVVType(/* Bitwidth */ 16, /* IsFloat */ true) && 5428 !TI.hasFeature("zvfh")) 5429 Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zvfh"; 5430 if (Ty->isRVVType(/* Bitwidth */ 32, /* IsFloat */ true) && 5431 !TI.hasFeature("zve32f")) 5432 Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve32f"; 5433 if (Ty->isRVVType(/* Bitwidth */ 64, /* IsFloat */ true) && 5434 !TI.hasFeature("zve64d")) 5435 Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve64d"; 5436 // Given that caller already checked isRVVType() before calling this function, 5437 // if we don't have at least zve32x supported, then we need to emit error. 5438 if (!TI.hasFeature("zve32x")) 5439 Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve32x"; 5440 } 5441 5442 bool Sema::CheckNVPTXBuiltinFunctionCall(const TargetInfo &TI, 5443 unsigned BuiltinID, 5444 CallExpr *TheCall) { 5445 switch (BuiltinID) { 5446 case NVPTX::BI__nvvm_cp_async_ca_shared_global_4: 5447 case NVPTX::BI__nvvm_cp_async_ca_shared_global_8: 5448 case NVPTX::BI__nvvm_cp_async_ca_shared_global_16: 5449 case NVPTX::BI__nvvm_cp_async_cg_shared_global_16: 5450 return checkArgCountAtMost(*this, TheCall, 3); 5451 } 5452 5453 return false; 5454 } 5455 5456 /// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *). 5457 /// This checks that the target supports __builtin_cpu_supports and 5458 /// that the string argument is constant and valid. 5459 static bool SemaBuiltinCpuSupports(Sema &S, const TargetInfo &TI, 5460 CallExpr *TheCall) { 5461 Expr *Arg = TheCall->getArg(0); 5462 5463 // Check if the argument is a string literal. 5464 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 5465 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 5466 << Arg->getSourceRange(); 5467 5468 // Check the contents of the string. 5469 StringRef Feature = 5470 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 5471 if (!TI.validateCpuSupports(Feature)) 5472 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_supports) 5473 << Arg->getSourceRange(); 5474 return false; 5475 } 5476 5477 /// SemaBuiltinCpuIs - Handle __builtin_cpu_is(char *). 5478 /// This checks that the target supports __builtin_cpu_is and 5479 /// that the string argument is constant and valid. 5480 static bool SemaBuiltinCpuIs(Sema &S, const TargetInfo &TI, CallExpr *TheCall) { 5481 Expr *Arg = TheCall->getArg(0); 5482 5483 // Check if the argument is a string literal. 5484 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 5485 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 5486 << Arg->getSourceRange(); 5487 5488 // Check the contents of the string. 5489 StringRef Feature = 5490 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 5491 if (!TI.validateCpuIs(Feature)) 5492 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_is) 5493 << Arg->getSourceRange(); 5494 return false; 5495 } 5496 5497 // Check if the rounding mode is legal. 5498 bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) { 5499 // Indicates if this instruction has rounding control or just SAE. 5500 bool HasRC = false; 5501 5502 unsigned ArgNum = 0; 5503 switch (BuiltinID) { 5504 default: 5505 return false; 5506 case X86::BI__builtin_ia32_vcvttsd2si32: 5507 case X86::BI__builtin_ia32_vcvttsd2si64: 5508 case X86::BI__builtin_ia32_vcvttsd2usi32: 5509 case X86::BI__builtin_ia32_vcvttsd2usi64: 5510 case X86::BI__builtin_ia32_vcvttss2si32: 5511 case X86::BI__builtin_ia32_vcvttss2si64: 5512 case X86::BI__builtin_ia32_vcvttss2usi32: 5513 case X86::BI__builtin_ia32_vcvttss2usi64: 5514 case X86::BI__builtin_ia32_vcvttsh2si32: 5515 case X86::BI__builtin_ia32_vcvttsh2si64: 5516 case X86::BI__builtin_ia32_vcvttsh2usi32: 5517 case X86::BI__builtin_ia32_vcvttsh2usi64: 5518 ArgNum = 1; 5519 break; 5520 case X86::BI__builtin_ia32_maxpd512: 5521 case X86::BI__builtin_ia32_maxps512: 5522 case X86::BI__builtin_ia32_minpd512: 5523 case X86::BI__builtin_ia32_minps512: 5524 case X86::BI__builtin_ia32_maxph512: 5525 case X86::BI__builtin_ia32_minph512: 5526 ArgNum = 2; 5527 break; 5528 case X86::BI__builtin_ia32_vcvtph2pd512_mask: 5529 case X86::BI__builtin_ia32_vcvtph2psx512_mask: 5530 case X86::BI__builtin_ia32_cvtps2pd512_mask: 5531 case X86::BI__builtin_ia32_cvttpd2dq512_mask: 5532 case X86::BI__builtin_ia32_cvttpd2qq512_mask: 5533 case X86::BI__builtin_ia32_cvttpd2udq512_mask: 5534 case X86::BI__builtin_ia32_cvttpd2uqq512_mask: 5535 case X86::BI__builtin_ia32_cvttps2dq512_mask: 5536 case X86::BI__builtin_ia32_cvttps2qq512_mask: 5537 case X86::BI__builtin_ia32_cvttps2udq512_mask: 5538 case X86::BI__builtin_ia32_cvttps2uqq512_mask: 5539 case X86::BI__builtin_ia32_vcvttph2w512_mask: 5540 case X86::BI__builtin_ia32_vcvttph2uw512_mask: 5541 case X86::BI__builtin_ia32_vcvttph2dq512_mask: 5542 case X86::BI__builtin_ia32_vcvttph2udq512_mask: 5543 case X86::BI__builtin_ia32_vcvttph2qq512_mask: 5544 case X86::BI__builtin_ia32_vcvttph2uqq512_mask: 5545 case X86::BI__builtin_ia32_exp2pd_mask: 5546 case X86::BI__builtin_ia32_exp2ps_mask: 5547 case X86::BI__builtin_ia32_getexppd512_mask: 5548 case X86::BI__builtin_ia32_getexpps512_mask: 5549 case X86::BI__builtin_ia32_getexpph512_mask: 5550 case X86::BI__builtin_ia32_rcp28pd_mask: 5551 case X86::BI__builtin_ia32_rcp28ps_mask: 5552 case X86::BI__builtin_ia32_rsqrt28pd_mask: 5553 case X86::BI__builtin_ia32_rsqrt28ps_mask: 5554 case X86::BI__builtin_ia32_vcomisd: 5555 case X86::BI__builtin_ia32_vcomiss: 5556 case X86::BI__builtin_ia32_vcomish: 5557 case X86::BI__builtin_ia32_vcvtph2ps512_mask: 5558 ArgNum = 3; 5559 break; 5560 case X86::BI__builtin_ia32_cmppd512_mask: 5561 case X86::BI__builtin_ia32_cmpps512_mask: 5562 case X86::BI__builtin_ia32_cmpsd_mask: 5563 case X86::BI__builtin_ia32_cmpss_mask: 5564 case X86::BI__builtin_ia32_cmpsh_mask: 5565 case X86::BI__builtin_ia32_vcvtsh2sd_round_mask: 5566 case X86::BI__builtin_ia32_vcvtsh2ss_round_mask: 5567 case X86::BI__builtin_ia32_cvtss2sd_round_mask: 5568 case X86::BI__builtin_ia32_getexpsd128_round_mask: 5569 case X86::BI__builtin_ia32_getexpss128_round_mask: 5570 case X86::BI__builtin_ia32_getexpsh128_round_mask: 5571 case X86::BI__builtin_ia32_getmantpd512_mask: 5572 case X86::BI__builtin_ia32_getmantps512_mask: 5573 case X86::BI__builtin_ia32_getmantph512_mask: 5574 case X86::BI__builtin_ia32_maxsd_round_mask: 5575 case X86::BI__builtin_ia32_maxss_round_mask: 5576 case X86::BI__builtin_ia32_maxsh_round_mask: 5577 case X86::BI__builtin_ia32_minsd_round_mask: 5578 case X86::BI__builtin_ia32_minss_round_mask: 5579 case X86::BI__builtin_ia32_minsh_round_mask: 5580 case X86::BI__builtin_ia32_rcp28sd_round_mask: 5581 case X86::BI__builtin_ia32_rcp28ss_round_mask: 5582 case X86::BI__builtin_ia32_reducepd512_mask: 5583 case X86::BI__builtin_ia32_reduceps512_mask: 5584 case X86::BI__builtin_ia32_reduceph512_mask: 5585 case X86::BI__builtin_ia32_rndscalepd_mask: 5586 case X86::BI__builtin_ia32_rndscaleps_mask: 5587 case X86::BI__builtin_ia32_rndscaleph_mask: 5588 case X86::BI__builtin_ia32_rsqrt28sd_round_mask: 5589 case X86::BI__builtin_ia32_rsqrt28ss_round_mask: 5590 ArgNum = 4; 5591 break; 5592 case X86::BI__builtin_ia32_fixupimmpd512_mask: 5593 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 5594 case X86::BI__builtin_ia32_fixupimmps512_mask: 5595 case X86::BI__builtin_ia32_fixupimmps512_maskz: 5596 case X86::BI__builtin_ia32_fixupimmsd_mask: 5597 case X86::BI__builtin_ia32_fixupimmsd_maskz: 5598 case X86::BI__builtin_ia32_fixupimmss_mask: 5599 case X86::BI__builtin_ia32_fixupimmss_maskz: 5600 case X86::BI__builtin_ia32_getmantsd_round_mask: 5601 case X86::BI__builtin_ia32_getmantss_round_mask: 5602 case X86::BI__builtin_ia32_getmantsh_round_mask: 5603 case X86::BI__builtin_ia32_rangepd512_mask: 5604 case X86::BI__builtin_ia32_rangeps512_mask: 5605 case X86::BI__builtin_ia32_rangesd128_round_mask: 5606 case X86::BI__builtin_ia32_rangess128_round_mask: 5607 case X86::BI__builtin_ia32_reducesd_mask: 5608 case X86::BI__builtin_ia32_reducess_mask: 5609 case X86::BI__builtin_ia32_reducesh_mask: 5610 case X86::BI__builtin_ia32_rndscalesd_round_mask: 5611 case X86::BI__builtin_ia32_rndscaless_round_mask: 5612 case X86::BI__builtin_ia32_rndscalesh_round_mask: 5613 ArgNum = 5; 5614 break; 5615 case X86::BI__builtin_ia32_vcvtsd2si64: 5616 case X86::BI__builtin_ia32_vcvtsd2si32: 5617 case X86::BI__builtin_ia32_vcvtsd2usi32: 5618 case X86::BI__builtin_ia32_vcvtsd2usi64: 5619 case X86::BI__builtin_ia32_vcvtss2si32: 5620 case X86::BI__builtin_ia32_vcvtss2si64: 5621 case X86::BI__builtin_ia32_vcvtss2usi32: 5622 case X86::BI__builtin_ia32_vcvtss2usi64: 5623 case X86::BI__builtin_ia32_vcvtsh2si32: 5624 case X86::BI__builtin_ia32_vcvtsh2si64: 5625 case X86::BI__builtin_ia32_vcvtsh2usi32: 5626 case X86::BI__builtin_ia32_vcvtsh2usi64: 5627 case X86::BI__builtin_ia32_sqrtpd512: 5628 case X86::BI__builtin_ia32_sqrtps512: 5629 case X86::BI__builtin_ia32_sqrtph512: 5630 ArgNum = 1; 5631 HasRC = true; 5632 break; 5633 case X86::BI__builtin_ia32_addph512: 5634 case X86::BI__builtin_ia32_divph512: 5635 case X86::BI__builtin_ia32_mulph512: 5636 case X86::BI__builtin_ia32_subph512: 5637 case X86::BI__builtin_ia32_addpd512: 5638 case X86::BI__builtin_ia32_addps512: 5639 case X86::BI__builtin_ia32_divpd512: 5640 case X86::BI__builtin_ia32_divps512: 5641 case X86::BI__builtin_ia32_mulpd512: 5642 case X86::BI__builtin_ia32_mulps512: 5643 case X86::BI__builtin_ia32_subpd512: 5644 case X86::BI__builtin_ia32_subps512: 5645 case X86::BI__builtin_ia32_cvtsi2sd64: 5646 case X86::BI__builtin_ia32_cvtsi2ss32: 5647 case X86::BI__builtin_ia32_cvtsi2ss64: 5648 case X86::BI__builtin_ia32_cvtusi2sd64: 5649 case X86::BI__builtin_ia32_cvtusi2ss32: 5650 case X86::BI__builtin_ia32_cvtusi2ss64: 5651 case X86::BI__builtin_ia32_vcvtusi2sh: 5652 case X86::BI__builtin_ia32_vcvtusi642sh: 5653 case X86::BI__builtin_ia32_vcvtsi2sh: 5654 case X86::BI__builtin_ia32_vcvtsi642sh: 5655 ArgNum = 2; 5656 HasRC = true; 5657 break; 5658 case X86::BI__builtin_ia32_cvtdq2ps512_mask: 5659 case X86::BI__builtin_ia32_cvtudq2ps512_mask: 5660 case X86::BI__builtin_ia32_vcvtpd2ph512_mask: 5661 case X86::BI__builtin_ia32_vcvtps2phx512_mask: 5662 case X86::BI__builtin_ia32_cvtpd2ps512_mask: 5663 case X86::BI__builtin_ia32_cvtpd2dq512_mask: 5664 case X86::BI__builtin_ia32_cvtpd2qq512_mask: 5665 case X86::BI__builtin_ia32_cvtpd2udq512_mask: 5666 case X86::BI__builtin_ia32_cvtpd2uqq512_mask: 5667 case X86::BI__builtin_ia32_cvtps2dq512_mask: 5668 case X86::BI__builtin_ia32_cvtps2qq512_mask: 5669 case X86::BI__builtin_ia32_cvtps2udq512_mask: 5670 case X86::BI__builtin_ia32_cvtps2uqq512_mask: 5671 case X86::BI__builtin_ia32_cvtqq2pd512_mask: 5672 case X86::BI__builtin_ia32_cvtqq2ps512_mask: 5673 case X86::BI__builtin_ia32_cvtuqq2pd512_mask: 5674 case X86::BI__builtin_ia32_cvtuqq2ps512_mask: 5675 case X86::BI__builtin_ia32_vcvtdq2ph512_mask: 5676 case X86::BI__builtin_ia32_vcvtudq2ph512_mask: 5677 case X86::BI__builtin_ia32_vcvtw2ph512_mask: 5678 case X86::BI__builtin_ia32_vcvtuw2ph512_mask: 5679 case X86::BI__builtin_ia32_vcvtph2w512_mask: 5680 case X86::BI__builtin_ia32_vcvtph2uw512_mask: 5681 case X86::BI__builtin_ia32_vcvtph2dq512_mask: 5682 case X86::BI__builtin_ia32_vcvtph2udq512_mask: 5683 case X86::BI__builtin_ia32_vcvtph2qq512_mask: 5684 case X86::BI__builtin_ia32_vcvtph2uqq512_mask: 5685 case X86::BI__builtin_ia32_vcvtqq2ph512_mask: 5686 case X86::BI__builtin_ia32_vcvtuqq2ph512_mask: 5687 ArgNum = 3; 5688 HasRC = true; 5689 break; 5690 case X86::BI__builtin_ia32_addsh_round_mask: 5691 case X86::BI__builtin_ia32_addss_round_mask: 5692 case X86::BI__builtin_ia32_addsd_round_mask: 5693 case X86::BI__builtin_ia32_divsh_round_mask: 5694 case X86::BI__builtin_ia32_divss_round_mask: 5695 case X86::BI__builtin_ia32_divsd_round_mask: 5696 case X86::BI__builtin_ia32_mulsh_round_mask: 5697 case X86::BI__builtin_ia32_mulss_round_mask: 5698 case X86::BI__builtin_ia32_mulsd_round_mask: 5699 case X86::BI__builtin_ia32_subsh_round_mask: 5700 case X86::BI__builtin_ia32_subss_round_mask: 5701 case X86::BI__builtin_ia32_subsd_round_mask: 5702 case X86::BI__builtin_ia32_scalefph512_mask: 5703 case X86::BI__builtin_ia32_scalefpd512_mask: 5704 case X86::BI__builtin_ia32_scalefps512_mask: 5705 case X86::BI__builtin_ia32_scalefsd_round_mask: 5706 case X86::BI__builtin_ia32_scalefss_round_mask: 5707 case X86::BI__builtin_ia32_scalefsh_round_mask: 5708 case X86::BI__builtin_ia32_cvtsd2ss_round_mask: 5709 case X86::BI__builtin_ia32_vcvtss2sh_round_mask: 5710 case X86::BI__builtin_ia32_vcvtsd2sh_round_mask: 5711 case X86::BI__builtin_ia32_sqrtsd_round_mask: 5712 case X86::BI__builtin_ia32_sqrtss_round_mask: 5713 case X86::BI__builtin_ia32_sqrtsh_round_mask: 5714 case X86::BI__builtin_ia32_vfmaddsd3_mask: 5715 case X86::BI__builtin_ia32_vfmaddsd3_maskz: 5716 case X86::BI__builtin_ia32_vfmaddsd3_mask3: 5717 case X86::BI__builtin_ia32_vfmaddss3_mask: 5718 case X86::BI__builtin_ia32_vfmaddss3_maskz: 5719 case X86::BI__builtin_ia32_vfmaddss3_mask3: 5720 case X86::BI__builtin_ia32_vfmaddsh3_mask: 5721 case X86::BI__builtin_ia32_vfmaddsh3_maskz: 5722 case X86::BI__builtin_ia32_vfmaddsh3_mask3: 5723 case X86::BI__builtin_ia32_vfmaddpd512_mask: 5724 case X86::BI__builtin_ia32_vfmaddpd512_maskz: 5725 case X86::BI__builtin_ia32_vfmaddpd512_mask3: 5726 case X86::BI__builtin_ia32_vfmsubpd512_mask3: 5727 case X86::BI__builtin_ia32_vfmaddps512_mask: 5728 case X86::BI__builtin_ia32_vfmaddps512_maskz: 5729 case X86::BI__builtin_ia32_vfmaddps512_mask3: 5730 case X86::BI__builtin_ia32_vfmsubps512_mask3: 5731 case X86::BI__builtin_ia32_vfmaddph512_mask: 5732 case X86::BI__builtin_ia32_vfmaddph512_maskz: 5733 case X86::BI__builtin_ia32_vfmaddph512_mask3: 5734 case X86::BI__builtin_ia32_vfmsubph512_mask3: 5735 case X86::BI__builtin_ia32_vfmaddsubpd512_mask: 5736 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz: 5737 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3: 5738 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3: 5739 case X86::BI__builtin_ia32_vfmaddsubps512_mask: 5740 case X86::BI__builtin_ia32_vfmaddsubps512_maskz: 5741 case X86::BI__builtin_ia32_vfmaddsubps512_mask3: 5742 case X86::BI__builtin_ia32_vfmsubaddps512_mask3: 5743 case X86::BI__builtin_ia32_vfmaddsubph512_mask: 5744 case X86::BI__builtin_ia32_vfmaddsubph512_maskz: 5745 case X86::BI__builtin_ia32_vfmaddsubph512_mask3: 5746 case X86::BI__builtin_ia32_vfmsubaddph512_mask3: 5747 case X86::BI__builtin_ia32_vfmaddcsh_mask: 5748 case X86::BI__builtin_ia32_vfmaddcsh_round_mask: 5749 case X86::BI__builtin_ia32_vfmaddcsh_round_mask3: 5750 case X86::BI__builtin_ia32_vfmaddcph512_mask: 5751 case X86::BI__builtin_ia32_vfmaddcph512_maskz: 5752 case X86::BI__builtin_ia32_vfmaddcph512_mask3: 5753 case X86::BI__builtin_ia32_vfcmaddcsh_mask: 5754 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask: 5755 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask3: 5756 case X86::BI__builtin_ia32_vfcmaddcph512_mask: 5757 case X86::BI__builtin_ia32_vfcmaddcph512_maskz: 5758 case X86::BI__builtin_ia32_vfcmaddcph512_mask3: 5759 case X86::BI__builtin_ia32_vfmulcsh_mask: 5760 case X86::BI__builtin_ia32_vfmulcph512_mask: 5761 case X86::BI__builtin_ia32_vfcmulcsh_mask: 5762 case X86::BI__builtin_ia32_vfcmulcph512_mask: 5763 ArgNum = 4; 5764 HasRC = true; 5765 break; 5766 } 5767 5768 llvm::APSInt Result; 5769 5770 // We can't check the value of a dependent argument. 5771 Expr *Arg = TheCall->getArg(ArgNum); 5772 if (Arg->isTypeDependent() || Arg->isValueDependent()) 5773 return false; 5774 5775 // Check constant-ness first. 5776 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 5777 return true; 5778 5779 // Make sure rounding mode is either ROUND_CUR_DIRECTION or ROUND_NO_EXC bit 5780 // is set. If the intrinsic has rounding control(bits 1:0), make sure its only 5781 // combined with ROUND_NO_EXC. If the intrinsic does not have rounding 5782 // control, allow ROUND_NO_EXC and ROUND_CUR_DIRECTION together. 5783 if (Result == 4/*ROUND_CUR_DIRECTION*/ || 5784 Result == 8/*ROUND_NO_EXC*/ || 5785 (!HasRC && Result == 12/*ROUND_CUR_DIRECTION|ROUND_NO_EXC*/) || 5786 (HasRC && Result.getZExtValue() >= 8 && Result.getZExtValue() <= 11)) 5787 return false; 5788 5789 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_rounding) 5790 << Arg->getSourceRange(); 5791 } 5792 5793 // Check if the gather/scatter scale is legal. 5794 bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, 5795 CallExpr *TheCall) { 5796 unsigned ArgNum = 0; 5797 switch (BuiltinID) { 5798 default: 5799 return false; 5800 case X86::BI__builtin_ia32_gatherpfdpd: 5801 case X86::BI__builtin_ia32_gatherpfdps: 5802 case X86::BI__builtin_ia32_gatherpfqpd: 5803 case X86::BI__builtin_ia32_gatherpfqps: 5804 case X86::BI__builtin_ia32_scatterpfdpd: 5805 case X86::BI__builtin_ia32_scatterpfdps: 5806 case X86::BI__builtin_ia32_scatterpfqpd: 5807 case X86::BI__builtin_ia32_scatterpfqps: 5808 ArgNum = 3; 5809 break; 5810 case X86::BI__builtin_ia32_gatherd_pd: 5811 case X86::BI__builtin_ia32_gatherd_pd256: 5812 case X86::BI__builtin_ia32_gatherq_pd: 5813 case X86::BI__builtin_ia32_gatherq_pd256: 5814 case X86::BI__builtin_ia32_gatherd_ps: 5815 case X86::BI__builtin_ia32_gatherd_ps256: 5816 case X86::BI__builtin_ia32_gatherq_ps: 5817 case X86::BI__builtin_ia32_gatherq_ps256: 5818 case X86::BI__builtin_ia32_gatherd_q: 5819 case X86::BI__builtin_ia32_gatherd_q256: 5820 case X86::BI__builtin_ia32_gatherq_q: 5821 case X86::BI__builtin_ia32_gatherq_q256: 5822 case X86::BI__builtin_ia32_gatherd_d: 5823 case X86::BI__builtin_ia32_gatherd_d256: 5824 case X86::BI__builtin_ia32_gatherq_d: 5825 case X86::BI__builtin_ia32_gatherq_d256: 5826 case X86::BI__builtin_ia32_gather3div2df: 5827 case X86::BI__builtin_ia32_gather3div2di: 5828 case X86::BI__builtin_ia32_gather3div4df: 5829 case X86::BI__builtin_ia32_gather3div4di: 5830 case X86::BI__builtin_ia32_gather3div4sf: 5831 case X86::BI__builtin_ia32_gather3div4si: 5832 case X86::BI__builtin_ia32_gather3div8sf: 5833 case X86::BI__builtin_ia32_gather3div8si: 5834 case X86::BI__builtin_ia32_gather3siv2df: 5835 case X86::BI__builtin_ia32_gather3siv2di: 5836 case X86::BI__builtin_ia32_gather3siv4df: 5837 case X86::BI__builtin_ia32_gather3siv4di: 5838 case X86::BI__builtin_ia32_gather3siv4sf: 5839 case X86::BI__builtin_ia32_gather3siv4si: 5840 case X86::BI__builtin_ia32_gather3siv8sf: 5841 case X86::BI__builtin_ia32_gather3siv8si: 5842 case X86::BI__builtin_ia32_gathersiv8df: 5843 case X86::BI__builtin_ia32_gathersiv16sf: 5844 case X86::BI__builtin_ia32_gatherdiv8df: 5845 case X86::BI__builtin_ia32_gatherdiv16sf: 5846 case X86::BI__builtin_ia32_gathersiv8di: 5847 case X86::BI__builtin_ia32_gathersiv16si: 5848 case X86::BI__builtin_ia32_gatherdiv8di: 5849 case X86::BI__builtin_ia32_gatherdiv16si: 5850 case X86::BI__builtin_ia32_scatterdiv2df: 5851 case X86::BI__builtin_ia32_scatterdiv2di: 5852 case X86::BI__builtin_ia32_scatterdiv4df: 5853 case X86::BI__builtin_ia32_scatterdiv4di: 5854 case X86::BI__builtin_ia32_scatterdiv4sf: 5855 case X86::BI__builtin_ia32_scatterdiv4si: 5856 case X86::BI__builtin_ia32_scatterdiv8sf: 5857 case X86::BI__builtin_ia32_scatterdiv8si: 5858 case X86::BI__builtin_ia32_scattersiv2df: 5859 case X86::BI__builtin_ia32_scattersiv2di: 5860 case X86::BI__builtin_ia32_scattersiv4df: 5861 case X86::BI__builtin_ia32_scattersiv4di: 5862 case X86::BI__builtin_ia32_scattersiv4sf: 5863 case X86::BI__builtin_ia32_scattersiv4si: 5864 case X86::BI__builtin_ia32_scattersiv8sf: 5865 case X86::BI__builtin_ia32_scattersiv8si: 5866 case X86::BI__builtin_ia32_scattersiv8df: 5867 case X86::BI__builtin_ia32_scattersiv16sf: 5868 case X86::BI__builtin_ia32_scatterdiv8df: 5869 case X86::BI__builtin_ia32_scatterdiv16sf: 5870 case X86::BI__builtin_ia32_scattersiv8di: 5871 case X86::BI__builtin_ia32_scattersiv16si: 5872 case X86::BI__builtin_ia32_scatterdiv8di: 5873 case X86::BI__builtin_ia32_scatterdiv16si: 5874 ArgNum = 4; 5875 break; 5876 } 5877 5878 llvm::APSInt Result; 5879 5880 // We can't check the value of a dependent argument. 5881 Expr *Arg = TheCall->getArg(ArgNum); 5882 if (Arg->isTypeDependent() || Arg->isValueDependent()) 5883 return false; 5884 5885 // Check constant-ness first. 5886 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 5887 return true; 5888 5889 if (Result == 1 || Result == 2 || Result == 4 || Result == 8) 5890 return false; 5891 5892 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_scale) 5893 << Arg->getSourceRange(); 5894 } 5895 5896 enum { TileRegLow = 0, TileRegHigh = 7 }; 5897 5898 bool Sema::CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, 5899 ArrayRef<int> ArgNums) { 5900 for (int ArgNum : ArgNums) { 5901 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, TileRegLow, TileRegHigh)) 5902 return true; 5903 } 5904 return false; 5905 } 5906 5907 bool Sema::CheckX86BuiltinTileDuplicate(CallExpr *TheCall, 5908 ArrayRef<int> ArgNums) { 5909 // Because the max number of tile register is TileRegHigh + 1, so here we use 5910 // each bit to represent the usage of them in bitset. 5911 std::bitset<TileRegHigh + 1> ArgValues; 5912 for (int ArgNum : ArgNums) { 5913 Expr *Arg = TheCall->getArg(ArgNum); 5914 if (Arg->isTypeDependent() || Arg->isValueDependent()) 5915 continue; 5916 5917 llvm::APSInt Result; 5918 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 5919 return true; 5920 int ArgExtValue = Result.getExtValue(); 5921 assert((ArgExtValue >= TileRegLow || ArgExtValue <= TileRegHigh) && 5922 "Incorrect tile register num."); 5923 if (ArgValues.test(ArgExtValue)) 5924 return Diag(TheCall->getBeginLoc(), 5925 diag::err_x86_builtin_tile_arg_duplicate) 5926 << TheCall->getArg(ArgNum)->getSourceRange(); 5927 ArgValues.set(ArgExtValue); 5928 } 5929 return false; 5930 } 5931 5932 bool Sema::CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall, 5933 ArrayRef<int> ArgNums) { 5934 return CheckX86BuiltinTileArgumentsRange(TheCall, ArgNums) || 5935 CheckX86BuiltinTileDuplicate(TheCall, ArgNums); 5936 } 5937 5938 bool Sema::CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall) { 5939 switch (BuiltinID) { 5940 default: 5941 return false; 5942 case X86::BI__builtin_ia32_tileloadd64: 5943 case X86::BI__builtin_ia32_tileloaddt164: 5944 case X86::BI__builtin_ia32_tilestored64: 5945 case X86::BI__builtin_ia32_tilezero: 5946 return CheckX86BuiltinTileArgumentsRange(TheCall, 0); 5947 case X86::BI__builtin_ia32_tdpbssd: 5948 case X86::BI__builtin_ia32_tdpbsud: 5949 case X86::BI__builtin_ia32_tdpbusd: 5950 case X86::BI__builtin_ia32_tdpbuud: 5951 case X86::BI__builtin_ia32_tdpbf16ps: 5952 case X86::BI__builtin_ia32_tdpfp16ps: 5953 case X86::BI__builtin_ia32_tcmmimfp16ps: 5954 case X86::BI__builtin_ia32_tcmmrlfp16ps: 5955 return CheckX86BuiltinTileRangeAndDuplicate(TheCall, {0, 1, 2}); 5956 } 5957 } 5958 static bool isX86_32Builtin(unsigned BuiltinID) { 5959 // These builtins only work on x86-32 targets. 5960 switch (BuiltinID) { 5961 case X86::BI__builtin_ia32_readeflags_u32: 5962 case X86::BI__builtin_ia32_writeeflags_u32: 5963 return true; 5964 } 5965 5966 return false; 5967 } 5968 5969 bool Sema::CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 5970 CallExpr *TheCall) { 5971 if (BuiltinID == X86::BI__builtin_cpu_supports) 5972 return SemaBuiltinCpuSupports(*this, TI, TheCall); 5973 5974 if (BuiltinID == X86::BI__builtin_cpu_is) 5975 return SemaBuiltinCpuIs(*this, TI, TheCall); 5976 5977 // Check for 32-bit only builtins on a 64-bit target. 5978 const llvm::Triple &TT = TI.getTriple(); 5979 if (TT.getArch() != llvm::Triple::x86 && isX86_32Builtin(BuiltinID)) 5980 return Diag(TheCall->getCallee()->getBeginLoc(), 5981 diag::err_32_bit_builtin_64_bit_tgt); 5982 5983 // If the intrinsic has rounding or SAE make sure its valid. 5984 if (CheckX86BuiltinRoundingOrSAE(BuiltinID, TheCall)) 5985 return true; 5986 5987 // If the intrinsic has a gather/scatter scale immediate make sure its valid. 5988 if (CheckX86BuiltinGatherScatterScale(BuiltinID, TheCall)) 5989 return true; 5990 5991 // If the intrinsic has a tile arguments, make sure they are valid. 5992 if (CheckX86BuiltinTileArguments(BuiltinID, TheCall)) 5993 return true; 5994 5995 // For intrinsics which take an immediate value as part of the instruction, 5996 // range check them here. 5997 int i = 0, l = 0, u = 0; 5998 switch (BuiltinID) { 5999 default: 6000 return false; 6001 case X86::BI__builtin_ia32_vec_ext_v2si: 6002 case X86::BI__builtin_ia32_vec_ext_v2di: 6003 case X86::BI__builtin_ia32_vextractf128_pd256: 6004 case X86::BI__builtin_ia32_vextractf128_ps256: 6005 case X86::BI__builtin_ia32_vextractf128_si256: 6006 case X86::BI__builtin_ia32_extract128i256: 6007 case X86::BI__builtin_ia32_extractf64x4_mask: 6008 case X86::BI__builtin_ia32_extracti64x4_mask: 6009 case X86::BI__builtin_ia32_extractf32x8_mask: 6010 case X86::BI__builtin_ia32_extracti32x8_mask: 6011 case X86::BI__builtin_ia32_extractf64x2_256_mask: 6012 case X86::BI__builtin_ia32_extracti64x2_256_mask: 6013 case X86::BI__builtin_ia32_extractf32x4_256_mask: 6014 case X86::BI__builtin_ia32_extracti32x4_256_mask: 6015 i = 1; l = 0; u = 1; 6016 break; 6017 case X86::BI__builtin_ia32_vec_set_v2di: 6018 case X86::BI__builtin_ia32_vinsertf128_pd256: 6019 case X86::BI__builtin_ia32_vinsertf128_ps256: 6020 case X86::BI__builtin_ia32_vinsertf128_si256: 6021 case X86::BI__builtin_ia32_insert128i256: 6022 case X86::BI__builtin_ia32_insertf32x8: 6023 case X86::BI__builtin_ia32_inserti32x8: 6024 case X86::BI__builtin_ia32_insertf64x4: 6025 case X86::BI__builtin_ia32_inserti64x4: 6026 case X86::BI__builtin_ia32_insertf64x2_256: 6027 case X86::BI__builtin_ia32_inserti64x2_256: 6028 case X86::BI__builtin_ia32_insertf32x4_256: 6029 case X86::BI__builtin_ia32_inserti32x4_256: 6030 i = 2; l = 0; u = 1; 6031 break; 6032 case X86::BI__builtin_ia32_vpermilpd: 6033 case X86::BI__builtin_ia32_vec_ext_v4hi: 6034 case X86::BI__builtin_ia32_vec_ext_v4si: 6035 case X86::BI__builtin_ia32_vec_ext_v4sf: 6036 case X86::BI__builtin_ia32_vec_ext_v4di: 6037 case X86::BI__builtin_ia32_extractf32x4_mask: 6038 case X86::BI__builtin_ia32_extracti32x4_mask: 6039 case X86::BI__builtin_ia32_extractf64x2_512_mask: 6040 case X86::BI__builtin_ia32_extracti64x2_512_mask: 6041 i = 1; l = 0; u = 3; 6042 break; 6043 case X86::BI_mm_prefetch: 6044 case X86::BI__builtin_ia32_vec_ext_v8hi: 6045 case X86::BI__builtin_ia32_vec_ext_v8si: 6046 i = 1; l = 0; u = 7; 6047 break; 6048 case X86::BI__builtin_ia32_sha1rnds4: 6049 case X86::BI__builtin_ia32_blendpd: 6050 case X86::BI__builtin_ia32_shufpd: 6051 case X86::BI__builtin_ia32_vec_set_v4hi: 6052 case X86::BI__builtin_ia32_vec_set_v4si: 6053 case X86::BI__builtin_ia32_vec_set_v4di: 6054 case X86::BI__builtin_ia32_shuf_f32x4_256: 6055 case X86::BI__builtin_ia32_shuf_f64x2_256: 6056 case X86::BI__builtin_ia32_shuf_i32x4_256: 6057 case X86::BI__builtin_ia32_shuf_i64x2_256: 6058 case X86::BI__builtin_ia32_insertf64x2_512: 6059 case X86::BI__builtin_ia32_inserti64x2_512: 6060 case X86::BI__builtin_ia32_insertf32x4: 6061 case X86::BI__builtin_ia32_inserti32x4: 6062 i = 2; l = 0; u = 3; 6063 break; 6064 case X86::BI__builtin_ia32_vpermil2pd: 6065 case X86::BI__builtin_ia32_vpermil2pd256: 6066 case X86::BI__builtin_ia32_vpermil2ps: 6067 case X86::BI__builtin_ia32_vpermil2ps256: 6068 i = 3; l = 0; u = 3; 6069 break; 6070 case X86::BI__builtin_ia32_cmpb128_mask: 6071 case X86::BI__builtin_ia32_cmpw128_mask: 6072 case X86::BI__builtin_ia32_cmpd128_mask: 6073 case X86::BI__builtin_ia32_cmpq128_mask: 6074 case X86::BI__builtin_ia32_cmpb256_mask: 6075 case X86::BI__builtin_ia32_cmpw256_mask: 6076 case X86::BI__builtin_ia32_cmpd256_mask: 6077 case X86::BI__builtin_ia32_cmpq256_mask: 6078 case X86::BI__builtin_ia32_cmpb512_mask: 6079 case X86::BI__builtin_ia32_cmpw512_mask: 6080 case X86::BI__builtin_ia32_cmpd512_mask: 6081 case X86::BI__builtin_ia32_cmpq512_mask: 6082 case X86::BI__builtin_ia32_ucmpb128_mask: 6083 case X86::BI__builtin_ia32_ucmpw128_mask: 6084 case X86::BI__builtin_ia32_ucmpd128_mask: 6085 case X86::BI__builtin_ia32_ucmpq128_mask: 6086 case X86::BI__builtin_ia32_ucmpb256_mask: 6087 case X86::BI__builtin_ia32_ucmpw256_mask: 6088 case X86::BI__builtin_ia32_ucmpd256_mask: 6089 case X86::BI__builtin_ia32_ucmpq256_mask: 6090 case X86::BI__builtin_ia32_ucmpb512_mask: 6091 case X86::BI__builtin_ia32_ucmpw512_mask: 6092 case X86::BI__builtin_ia32_ucmpd512_mask: 6093 case X86::BI__builtin_ia32_ucmpq512_mask: 6094 case X86::BI__builtin_ia32_vpcomub: 6095 case X86::BI__builtin_ia32_vpcomuw: 6096 case X86::BI__builtin_ia32_vpcomud: 6097 case X86::BI__builtin_ia32_vpcomuq: 6098 case X86::BI__builtin_ia32_vpcomb: 6099 case X86::BI__builtin_ia32_vpcomw: 6100 case X86::BI__builtin_ia32_vpcomd: 6101 case X86::BI__builtin_ia32_vpcomq: 6102 case X86::BI__builtin_ia32_vec_set_v8hi: 6103 case X86::BI__builtin_ia32_vec_set_v8si: 6104 i = 2; l = 0; u = 7; 6105 break; 6106 case X86::BI__builtin_ia32_vpermilpd256: 6107 case X86::BI__builtin_ia32_roundps: 6108 case X86::BI__builtin_ia32_roundpd: 6109 case X86::BI__builtin_ia32_roundps256: 6110 case X86::BI__builtin_ia32_roundpd256: 6111 case X86::BI__builtin_ia32_getmantpd128_mask: 6112 case X86::BI__builtin_ia32_getmantpd256_mask: 6113 case X86::BI__builtin_ia32_getmantps128_mask: 6114 case X86::BI__builtin_ia32_getmantps256_mask: 6115 case X86::BI__builtin_ia32_getmantpd512_mask: 6116 case X86::BI__builtin_ia32_getmantps512_mask: 6117 case X86::BI__builtin_ia32_getmantph128_mask: 6118 case X86::BI__builtin_ia32_getmantph256_mask: 6119 case X86::BI__builtin_ia32_getmantph512_mask: 6120 case X86::BI__builtin_ia32_vec_ext_v16qi: 6121 case X86::BI__builtin_ia32_vec_ext_v16hi: 6122 i = 1; l = 0; u = 15; 6123 break; 6124 case X86::BI__builtin_ia32_pblendd128: 6125 case X86::BI__builtin_ia32_blendps: 6126 case X86::BI__builtin_ia32_blendpd256: 6127 case X86::BI__builtin_ia32_shufpd256: 6128 case X86::BI__builtin_ia32_roundss: 6129 case X86::BI__builtin_ia32_roundsd: 6130 case X86::BI__builtin_ia32_rangepd128_mask: 6131 case X86::BI__builtin_ia32_rangepd256_mask: 6132 case X86::BI__builtin_ia32_rangepd512_mask: 6133 case X86::BI__builtin_ia32_rangeps128_mask: 6134 case X86::BI__builtin_ia32_rangeps256_mask: 6135 case X86::BI__builtin_ia32_rangeps512_mask: 6136 case X86::BI__builtin_ia32_getmantsd_round_mask: 6137 case X86::BI__builtin_ia32_getmantss_round_mask: 6138 case X86::BI__builtin_ia32_getmantsh_round_mask: 6139 case X86::BI__builtin_ia32_vec_set_v16qi: 6140 case X86::BI__builtin_ia32_vec_set_v16hi: 6141 i = 2; l = 0; u = 15; 6142 break; 6143 case X86::BI__builtin_ia32_vec_ext_v32qi: 6144 i = 1; l = 0; u = 31; 6145 break; 6146 case X86::BI__builtin_ia32_cmpps: 6147 case X86::BI__builtin_ia32_cmpss: 6148 case X86::BI__builtin_ia32_cmppd: 6149 case X86::BI__builtin_ia32_cmpsd: 6150 case X86::BI__builtin_ia32_cmpps256: 6151 case X86::BI__builtin_ia32_cmppd256: 6152 case X86::BI__builtin_ia32_cmpps128_mask: 6153 case X86::BI__builtin_ia32_cmppd128_mask: 6154 case X86::BI__builtin_ia32_cmpps256_mask: 6155 case X86::BI__builtin_ia32_cmppd256_mask: 6156 case X86::BI__builtin_ia32_cmpps512_mask: 6157 case X86::BI__builtin_ia32_cmppd512_mask: 6158 case X86::BI__builtin_ia32_cmpsd_mask: 6159 case X86::BI__builtin_ia32_cmpss_mask: 6160 case X86::BI__builtin_ia32_vec_set_v32qi: 6161 i = 2; l = 0; u = 31; 6162 break; 6163 case X86::BI__builtin_ia32_permdf256: 6164 case X86::BI__builtin_ia32_permdi256: 6165 case X86::BI__builtin_ia32_permdf512: 6166 case X86::BI__builtin_ia32_permdi512: 6167 case X86::BI__builtin_ia32_vpermilps: 6168 case X86::BI__builtin_ia32_vpermilps256: 6169 case X86::BI__builtin_ia32_vpermilpd512: 6170 case X86::BI__builtin_ia32_vpermilps512: 6171 case X86::BI__builtin_ia32_pshufd: 6172 case X86::BI__builtin_ia32_pshufd256: 6173 case X86::BI__builtin_ia32_pshufd512: 6174 case X86::BI__builtin_ia32_pshufhw: 6175 case X86::BI__builtin_ia32_pshufhw256: 6176 case X86::BI__builtin_ia32_pshufhw512: 6177 case X86::BI__builtin_ia32_pshuflw: 6178 case X86::BI__builtin_ia32_pshuflw256: 6179 case X86::BI__builtin_ia32_pshuflw512: 6180 case X86::BI__builtin_ia32_vcvtps2ph: 6181 case X86::BI__builtin_ia32_vcvtps2ph_mask: 6182 case X86::BI__builtin_ia32_vcvtps2ph256: 6183 case X86::BI__builtin_ia32_vcvtps2ph256_mask: 6184 case X86::BI__builtin_ia32_vcvtps2ph512_mask: 6185 case X86::BI__builtin_ia32_rndscaleps_128_mask: 6186 case X86::BI__builtin_ia32_rndscalepd_128_mask: 6187 case X86::BI__builtin_ia32_rndscaleps_256_mask: 6188 case X86::BI__builtin_ia32_rndscalepd_256_mask: 6189 case X86::BI__builtin_ia32_rndscaleps_mask: 6190 case X86::BI__builtin_ia32_rndscalepd_mask: 6191 case X86::BI__builtin_ia32_rndscaleph_mask: 6192 case X86::BI__builtin_ia32_reducepd128_mask: 6193 case X86::BI__builtin_ia32_reducepd256_mask: 6194 case X86::BI__builtin_ia32_reducepd512_mask: 6195 case X86::BI__builtin_ia32_reduceps128_mask: 6196 case X86::BI__builtin_ia32_reduceps256_mask: 6197 case X86::BI__builtin_ia32_reduceps512_mask: 6198 case X86::BI__builtin_ia32_reduceph128_mask: 6199 case X86::BI__builtin_ia32_reduceph256_mask: 6200 case X86::BI__builtin_ia32_reduceph512_mask: 6201 case X86::BI__builtin_ia32_prold512: 6202 case X86::BI__builtin_ia32_prolq512: 6203 case X86::BI__builtin_ia32_prold128: 6204 case X86::BI__builtin_ia32_prold256: 6205 case X86::BI__builtin_ia32_prolq128: 6206 case X86::BI__builtin_ia32_prolq256: 6207 case X86::BI__builtin_ia32_prord512: 6208 case X86::BI__builtin_ia32_prorq512: 6209 case X86::BI__builtin_ia32_prord128: 6210 case X86::BI__builtin_ia32_prord256: 6211 case X86::BI__builtin_ia32_prorq128: 6212 case X86::BI__builtin_ia32_prorq256: 6213 case X86::BI__builtin_ia32_fpclasspd128_mask: 6214 case X86::BI__builtin_ia32_fpclasspd256_mask: 6215 case X86::BI__builtin_ia32_fpclassps128_mask: 6216 case X86::BI__builtin_ia32_fpclassps256_mask: 6217 case X86::BI__builtin_ia32_fpclassps512_mask: 6218 case X86::BI__builtin_ia32_fpclasspd512_mask: 6219 case X86::BI__builtin_ia32_fpclassph128_mask: 6220 case X86::BI__builtin_ia32_fpclassph256_mask: 6221 case X86::BI__builtin_ia32_fpclassph512_mask: 6222 case X86::BI__builtin_ia32_fpclasssd_mask: 6223 case X86::BI__builtin_ia32_fpclassss_mask: 6224 case X86::BI__builtin_ia32_fpclasssh_mask: 6225 case X86::BI__builtin_ia32_pslldqi128_byteshift: 6226 case X86::BI__builtin_ia32_pslldqi256_byteshift: 6227 case X86::BI__builtin_ia32_pslldqi512_byteshift: 6228 case X86::BI__builtin_ia32_psrldqi128_byteshift: 6229 case X86::BI__builtin_ia32_psrldqi256_byteshift: 6230 case X86::BI__builtin_ia32_psrldqi512_byteshift: 6231 case X86::BI__builtin_ia32_kshiftliqi: 6232 case X86::BI__builtin_ia32_kshiftlihi: 6233 case X86::BI__builtin_ia32_kshiftlisi: 6234 case X86::BI__builtin_ia32_kshiftlidi: 6235 case X86::BI__builtin_ia32_kshiftriqi: 6236 case X86::BI__builtin_ia32_kshiftrihi: 6237 case X86::BI__builtin_ia32_kshiftrisi: 6238 case X86::BI__builtin_ia32_kshiftridi: 6239 i = 1; l = 0; u = 255; 6240 break; 6241 case X86::BI__builtin_ia32_vperm2f128_pd256: 6242 case X86::BI__builtin_ia32_vperm2f128_ps256: 6243 case X86::BI__builtin_ia32_vperm2f128_si256: 6244 case X86::BI__builtin_ia32_permti256: 6245 case X86::BI__builtin_ia32_pblendw128: 6246 case X86::BI__builtin_ia32_pblendw256: 6247 case X86::BI__builtin_ia32_blendps256: 6248 case X86::BI__builtin_ia32_pblendd256: 6249 case X86::BI__builtin_ia32_palignr128: 6250 case X86::BI__builtin_ia32_palignr256: 6251 case X86::BI__builtin_ia32_palignr512: 6252 case X86::BI__builtin_ia32_alignq512: 6253 case X86::BI__builtin_ia32_alignd512: 6254 case X86::BI__builtin_ia32_alignd128: 6255 case X86::BI__builtin_ia32_alignd256: 6256 case X86::BI__builtin_ia32_alignq128: 6257 case X86::BI__builtin_ia32_alignq256: 6258 case X86::BI__builtin_ia32_vcomisd: 6259 case X86::BI__builtin_ia32_vcomiss: 6260 case X86::BI__builtin_ia32_shuf_f32x4: 6261 case X86::BI__builtin_ia32_shuf_f64x2: 6262 case X86::BI__builtin_ia32_shuf_i32x4: 6263 case X86::BI__builtin_ia32_shuf_i64x2: 6264 case X86::BI__builtin_ia32_shufpd512: 6265 case X86::BI__builtin_ia32_shufps: 6266 case X86::BI__builtin_ia32_shufps256: 6267 case X86::BI__builtin_ia32_shufps512: 6268 case X86::BI__builtin_ia32_dbpsadbw128: 6269 case X86::BI__builtin_ia32_dbpsadbw256: 6270 case X86::BI__builtin_ia32_dbpsadbw512: 6271 case X86::BI__builtin_ia32_vpshldd128: 6272 case X86::BI__builtin_ia32_vpshldd256: 6273 case X86::BI__builtin_ia32_vpshldd512: 6274 case X86::BI__builtin_ia32_vpshldq128: 6275 case X86::BI__builtin_ia32_vpshldq256: 6276 case X86::BI__builtin_ia32_vpshldq512: 6277 case X86::BI__builtin_ia32_vpshldw128: 6278 case X86::BI__builtin_ia32_vpshldw256: 6279 case X86::BI__builtin_ia32_vpshldw512: 6280 case X86::BI__builtin_ia32_vpshrdd128: 6281 case X86::BI__builtin_ia32_vpshrdd256: 6282 case X86::BI__builtin_ia32_vpshrdd512: 6283 case X86::BI__builtin_ia32_vpshrdq128: 6284 case X86::BI__builtin_ia32_vpshrdq256: 6285 case X86::BI__builtin_ia32_vpshrdq512: 6286 case X86::BI__builtin_ia32_vpshrdw128: 6287 case X86::BI__builtin_ia32_vpshrdw256: 6288 case X86::BI__builtin_ia32_vpshrdw512: 6289 i = 2; l = 0; u = 255; 6290 break; 6291 case X86::BI__builtin_ia32_fixupimmpd512_mask: 6292 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 6293 case X86::BI__builtin_ia32_fixupimmps512_mask: 6294 case X86::BI__builtin_ia32_fixupimmps512_maskz: 6295 case X86::BI__builtin_ia32_fixupimmsd_mask: 6296 case X86::BI__builtin_ia32_fixupimmsd_maskz: 6297 case X86::BI__builtin_ia32_fixupimmss_mask: 6298 case X86::BI__builtin_ia32_fixupimmss_maskz: 6299 case X86::BI__builtin_ia32_fixupimmpd128_mask: 6300 case X86::BI__builtin_ia32_fixupimmpd128_maskz: 6301 case X86::BI__builtin_ia32_fixupimmpd256_mask: 6302 case X86::BI__builtin_ia32_fixupimmpd256_maskz: 6303 case X86::BI__builtin_ia32_fixupimmps128_mask: 6304 case X86::BI__builtin_ia32_fixupimmps128_maskz: 6305 case X86::BI__builtin_ia32_fixupimmps256_mask: 6306 case X86::BI__builtin_ia32_fixupimmps256_maskz: 6307 case X86::BI__builtin_ia32_pternlogd512_mask: 6308 case X86::BI__builtin_ia32_pternlogd512_maskz: 6309 case X86::BI__builtin_ia32_pternlogq512_mask: 6310 case X86::BI__builtin_ia32_pternlogq512_maskz: 6311 case X86::BI__builtin_ia32_pternlogd128_mask: 6312 case X86::BI__builtin_ia32_pternlogd128_maskz: 6313 case X86::BI__builtin_ia32_pternlogd256_mask: 6314 case X86::BI__builtin_ia32_pternlogd256_maskz: 6315 case X86::BI__builtin_ia32_pternlogq128_mask: 6316 case X86::BI__builtin_ia32_pternlogq128_maskz: 6317 case X86::BI__builtin_ia32_pternlogq256_mask: 6318 case X86::BI__builtin_ia32_pternlogq256_maskz: 6319 case X86::BI__builtin_ia32_vsm3rnds2: 6320 i = 3; l = 0; u = 255; 6321 break; 6322 case X86::BI__builtin_ia32_gatherpfdpd: 6323 case X86::BI__builtin_ia32_gatherpfdps: 6324 case X86::BI__builtin_ia32_gatherpfqpd: 6325 case X86::BI__builtin_ia32_gatherpfqps: 6326 case X86::BI__builtin_ia32_scatterpfdpd: 6327 case X86::BI__builtin_ia32_scatterpfdps: 6328 case X86::BI__builtin_ia32_scatterpfqpd: 6329 case X86::BI__builtin_ia32_scatterpfqps: 6330 i = 4; l = 2; u = 3; 6331 break; 6332 case X86::BI__builtin_ia32_reducesd_mask: 6333 case X86::BI__builtin_ia32_reducess_mask: 6334 case X86::BI__builtin_ia32_rndscalesd_round_mask: 6335 case X86::BI__builtin_ia32_rndscaless_round_mask: 6336 case X86::BI__builtin_ia32_rndscalesh_round_mask: 6337 case X86::BI__builtin_ia32_reducesh_mask: 6338 i = 4; l = 0; u = 255; 6339 break; 6340 case X86::BI__builtin_ia32_cmpccxadd32: 6341 case X86::BI__builtin_ia32_cmpccxadd64: 6342 i = 3; l = 0; u = 15; 6343 break; 6344 } 6345 6346 // Note that we don't force a hard error on the range check here, allowing 6347 // template-generated or macro-generated dead code to potentially have out-of- 6348 // range values. These need to code generate, but don't need to necessarily 6349 // make any sense. We use a warning that defaults to an error. 6350 return SemaBuiltinConstantArgRange(TheCall, i, l, u, /*RangeIsError*/ false); 6351 } 6352 6353 /// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo 6354 /// parameter with the FormatAttr's correct format_idx and firstDataArg. 6355 /// Returns true when the format fits the function and the FormatStringInfo has 6356 /// been populated. 6357 bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, 6358 bool IsVariadic, FormatStringInfo *FSI) { 6359 if (Format->getFirstArg() == 0) 6360 FSI->ArgPassingKind = FAPK_VAList; 6361 else if (IsVariadic) 6362 FSI->ArgPassingKind = FAPK_Variadic; 6363 else 6364 FSI->ArgPassingKind = FAPK_Fixed; 6365 FSI->FormatIdx = Format->getFormatIdx() - 1; 6366 FSI->FirstDataArg = 6367 FSI->ArgPassingKind == FAPK_VAList ? 0 : Format->getFirstArg() - 1; 6368 6369 // The way the format attribute works in GCC, the implicit this argument 6370 // of member functions is counted. However, it doesn't appear in our own 6371 // lists, so decrement format_idx in that case. 6372 if (IsCXXMember) { 6373 if(FSI->FormatIdx == 0) 6374 return false; 6375 --FSI->FormatIdx; 6376 if (FSI->FirstDataArg != 0) 6377 --FSI->FirstDataArg; 6378 } 6379 return true; 6380 } 6381 6382 /// Checks if a the given expression evaluates to null. 6383 /// 6384 /// Returns true if the value evaluates to null. 6385 static bool CheckNonNullExpr(Sema &S, const Expr *Expr) { 6386 // If the expression has non-null type, it doesn't evaluate to null. 6387 if (auto nullability = Expr->IgnoreImplicit()->getType()->getNullability()) { 6388 if (*nullability == NullabilityKind::NonNull) 6389 return false; 6390 } 6391 6392 // As a special case, transparent unions initialized with zero are 6393 // considered null for the purposes of the nonnull attribute. 6394 if (const RecordType *UT = Expr->getType()->getAsUnionType()) { 6395 if (UT->getDecl()->hasAttr<TransparentUnionAttr>()) 6396 if (const CompoundLiteralExpr *CLE = 6397 dyn_cast<CompoundLiteralExpr>(Expr)) 6398 if (const InitListExpr *ILE = 6399 dyn_cast<InitListExpr>(CLE->getInitializer())) 6400 Expr = ILE->getInit(0); 6401 } 6402 6403 bool Result; 6404 return (!Expr->isValueDependent() && 6405 Expr->EvaluateAsBooleanCondition(Result, S.Context) && 6406 !Result); 6407 } 6408 6409 static void CheckNonNullArgument(Sema &S, 6410 const Expr *ArgExpr, 6411 SourceLocation CallSiteLoc) { 6412 if (CheckNonNullExpr(S, ArgExpr)) 6413 S.DiagRuntimeBehavior(CallSiteLoc, ArgExpr, 6414 S.PDiag(diag::warn_null_arg) 6415 << ArgExpr->getSourceRange()); 6416 } 6417 6418 bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) { 6419 FormatStringInfo FSI; 6420 if ((GetFormatStringType(Format) == FST_NSString) && 6421 getFormatStringInfo(Format, false, true, &FSI)) { 6422 Idx = FSI.FormatIdx; 6423 return true; 6424 } 6425 return false; 6426 } 6427 6428 /// Diagnose use of %s directive in an NSString which is being passed 6429 /// as formatting string to formatting method. 6430 static void 6431 DiagnoseCStringFormatDirectiveInCFAPI(Sema &S, 6432 const NamedDecl *FDecl, 6433 Expr **Args, 6434 unsigned NumArgs) { 6435 unsigned Idx = 0; 6436 bool Format = false; 6437 ObjCStringFormatFamily SFFamily = FDecl->getObjCFStringFormattingFamily(); 6438 if (SFFamily == ObjCStringFormatFamily::SFF_CFString) { 6439 Idx = 2; 6440 Format = true; 6441 } 6442 else 6443 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 6444 if (S.GetFormatNSStringIdx(I, Idx)) { 6445 Format = true; 6446 break; 6447 } 6448 } 6449 if (!Format || NumArgs <= Idx) 6450 return; 6451 const Expr *FormatExpr = Args[Idx]; 6452 if (const CStyleCastExpr *CSCE = dyn_cast<CStyleCastExpr>(FormatExpr)) 6453 FormatExpr = CSCE->getSubExpr(); 6454 const StringLiteral *FormatString; 6455 if (const ObjCStringLiteral *OSL = 6456 dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts())) 6457 FormatString = OSL->getString(); 6458 else 6459 FormatString = dyn_cast<StringLiteral>(FormatExpr->IgnoreParenImpCasts()); 6460 if (!FormatString) 6461 return; 6462 if (S.FormatStringHasSArg(FormatString)) { 6463 S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string) 6464 << "%s" << 1 << 1; 6465 S.Diag(FDecl->getLocation(), diag::note_entity_declared_at) 6466 << FDecl->getDeclName(); 6467 } 6468 } 6469 6470 /// Determine whether the given type has a non-null nullability annotation. 6471 static bool isNonNullType(QualType type) { 6472 if (auto nullability = type->getNullability()) 6473 return *nullability == NullabilityKind::NonNull; 6474 6475 return false; 6476 } 6477 6478 static void CheckNonNullArguments(Sema &S, 6479 const NamedDecl *FDecl, 6480 const FunctionProtoType *Proto, 6481 ArrayRef<const Expr *> Args, 6482 SourceLocation CallSiteLoc) { 6483 assert((FDecl || Proto) && "Need a function declaration or prototype"); 6484 6485 // Already checked by constant evaluator. 6486 if (S.isConstantEvaluated()) 6487 return; 6488 // Check the attributes attached to the method/function itself. 6489 llvm::SmallBitVector NonNullArgs; 6490 if (FDecl) { 6491 // Handle the nonnull attribute on the function/method declaration itself. 6492 for (const auto *NonNull : FDecl->specific_attrs<NonNullAttr>()) { 6493 if (!NonNull->args_size()) { 6494 // Easy case: all pointer arguments are nonnull. 6495 for (const auto *Arg : Args) 6496 if (S.isValidPointerAttrType(Arg->getType())) 6497 CheckNonNullArgument(S, Arg, CallSiteLoc); 6498 return; 6499 } 6500 6501 for (const ParamIdx &Idx : NonNull->args()) { 6502 unsigned IdxAST = Idx.getASTIndex(); 6503 if (IdxAST >= Args.size()) 6504 continue; 6505 if (NonNullArgs.empty()) 6506 NonNullArgs.resize(Args.size()); 6507 NonNullArgs.set(IdxAST); 6508 } 6509 } 6510 } 6511 6512 if (FDecl && (isa<FunctionDecl>(FDecl) || isa<ObjCMethodDecl>(FDecl))) { 6513 // Handle the nonnull attribute on the parameters of the 6514 // function/method. 6515 ArrayRef<ParmVarDecl*> parms; 6516 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FDecl)) 6517 parms = FD->parameters(); 6518 else 6519 parms = cast<ObjCMethodDecl>(FDecl)->parameters(); 6520 6521 unsigned ParamIndex = 0; 6522 for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end(); 6523 I != E; ++I, ++ParamIndex) { 6524 const ParmVarDecl *PVD = *I; 6525 if (PVD->hasAttr<NonNullAttr>() || isNonNullType(PVD->getType())) { 6526 if (NonNullArgs.empty()) 6527 NonNullArgs.resize(Args.size()); 6528 6529 NonNullArgs.set(ParamIndex); 6530 } 6531 } 6532 } else { 6533 // If we have a non-function, non-method declaration but no 6534 // function prototype, try to dig out the function prototype. 6535 if (!Proto) { 6536 if (const ValueDecl *VD = dyn_cast<ValueDecl>(FDecl)) { 6537 QualType type = VD->getType().getNonReferenceType(); 6538 if (auto pointerType = type->getAs<PointerType>()) 6539 type = pointerType->getPointeeType(); 6540 else if (auto blockType = type->getAs<BlockPointerType>()) 6541 type = blockType->getPointeeType(); 6542 // FIXME: data member pointers? 6543 6544 // Dig out the function prototype, if there is one. 6545 Proto = type->getAs<FunctionProtoType>(); 6546 } 6547 } 6548 6549 // Fill in non-null argument information from the nullability 6550 // information on the parameter types (if we have them). 6551 if (Proto) { 6552 unsigned Index = 0; 6553 for (auto paramType : Proto->getParamTypes()) { 6554 if (isNonNullType(paramType)) { 6555 if (NonNullArgs.empty()) 6556 NonNullArgs.resize(Args.size()); 6557 6558 NonNullArgs.set(Index); 6559 } 6560 6561 ++Index; 6562 } 6563 } 6564 } 6565 6566 // Check for non-null arguments. 6567 for (unsigned ArgIndex = 0, ArgIndexEnd = NonNullArgs.size(); 6568 ArgIndex != ArgIndexEnd; ++ArgIndex) { 6569 if (NonNullArgs[ArgIndex]) 6570 CheckNonNullArgument(S, Args[ArgIndex], Args[ArgIndex]->getExprLoc()); 6571 } 6572 } 6573 6574 // 16 byte ByVal alignment not due to a vector member is not honoured by XL 6575 // on AIX. Emit a warning here that users are generating binary incompatible 6576 // code to be safe. 6577 // Here we try to get information about the alignment of the struct member 6578 // from the struct passed to the caller function. We only warn when the struct 6579 // is passed byval, hence the series of checks and early returns if we are a not 6580 // passing a struct byval. 6581 void Sema::checkAIXMemberAlignment(SourceLocation Loc, const Expr *Arg) { 6582 const auto *ICE = dyn_cast<ImplicitCastExpr>(Arg->IgnoreParens()); 6583 if (!ICE) 6584 return; 6585 6586 const auto *DR = dyn_cast<DeclRefExpr>(ICE->getSubExpr()); 6587 if (!DR) 6588 return; 6589 6590 const auto *PD = dyn_cast<ParmVarDecl>(DR->getDecl()); 6591 if (!PD || !PD->getType()->isRecordType()) 6592 return; 6593 6594 QualType ArgType = Arg->getType(); 6595 for (const FieldDecl *FD : 6596 ArgType->castAs<RecordType>()->getDecl()->fields()) { 6597 if (const auto *AA = FD->getAttr<AlignedAttr>()) { 6598 CharUnits Alignment = 6599 Context.toCharUnitsFromBits(AA->getAlignment(Context)); 6600 if (Alignment.getQuantity() == 16) { 6601 Diag(FD->getLocation(), diag::warn_not_xl_compatible) << FD; 6602 Diag(Loc, diag::note_misaligned_member_used_here) << PD; 6603 } 6604 } 6605 } 6606 } 6607 6608 /// Warn if a pointer or reference argument passed to a function points to an 6609 /// object that is less aligned than the parameter. This can happen when 6610 /// creating a typedef with a lower alignment than the original type and then 6611 /// calling functions defined in terms of the original type. 6612 void Sema::CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl, 6613 StringRef ParamName, QualType ArgTy, 6614 QualType ParamTy) { 6615 6616 // If a function accepts a pointer or reference type 6617 if (!ParamTy->isPointerType() && !ParamTy->isReferenceType()) 6618 return; 6619 6620 // If the parameter is a pointer type, get the pointee type for the 6621 // argument too. If the parameter is a reference type, don't try to get 6622 // the pointee type for the argument. 6623 if (ParamTy->isPointerType()) 6624 ArgTy = ArgTy->getPointeeType(); 6625 6626 // Remove reference or pointer 6627 ParamTy = ParamTy->getPointeeType(); 6628 6629 // Find expected alignment, and the actual alignment of the passed object. 6630 // getTypeAlignInChars requires complete types 6631 if (ArgTy.isNull() || ParamTy->isDependentType() || 6632 ParamTy->isIncompleteType() || ArgTy->isIncompleteType() || 6633 ParamTy->isUndeducedType() || ArgTy->isUndeducedType()) 6634 return; 6635 6636 CharUnits ParamAlign = Context.getTypeAlignInChars(ParamTy); 6637 CharUnits ArgAlign = Context.getTypeAlignInChars(ArgTy); 6638 6639 // If the argument is less aligned than the parameter, there is a 6640 // potential alignment issue. 6641 if (ArgAlign < ParamAlign) 6642 Diag(Loc, diag::warn_param_mismatched_alignment) 6643 << (int)ArgAlign.getQuantity() << (int)ParamAlign.getQuantity() 6644 << ParamName << (FDecl != nullptr) << FDecl; 6645 } 6646 6647 /// Handles the checks for format strings, non-POD arguments to vararg 6648 /// functions, NULL arguments passed to non-NULL parameters, and diagnose_if 6649 /// attributes. 6650 void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, 6651 const Expr *ThisArg, ArrayRef<const Expr *> Args, 6652 bool IsMemberFunction, SourceLocation Loc, 6653 SourceRange Range, VariadicCallType CallType) { 6654 // FIXME: We should check as much as we can in the template definition. 6655 if (CurContext->isDependentContext()) 6656 return; 6657 6658 // Printf and scanf checking. 6659 llvm::SmallBitVector CheckedVarArgs; 6660 if (FDecl) { 6661 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 6662 // Only create vector if there are format attributes. 6663 CheckedVarArgs.resize(Args.size()); 6664 6665 CheckFormatArguments(I, Args, IsMemberFunction, CallType, Loc, Range, 6666 CheckedVarArgs); 6667 } 6668 } 6669 6670 // Refuse POD arguments that weren't caught by the format string 6671 // checks above. 6672 auto *FD = dyn_cast_or_null<FunctionDecl>(FDecl); 6673 if (CallType != VariadicDoesNotApply && 6674 (!FD || FD->getBuiltinID() != Builtin::BI__noop)) { 6675 unsigned NumParams = Proto ? Proto->getNumParams() 6676 : FDecl && isa<FunctionDecl>(FDecl) 6677 ? cast<FunctionDecl>(FDecl)->getNumParams() 6678 : FDecl && isa<ObjCMethodDecl>(FDecl) 6679 ? cast<ObjCMethodDecl>(FDecl)->param_size() 6680 : 0; 6681 6682 for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) { 6683 // Args[ArgIdx] can be null in malformed code. 6684 if (const Expr *Arg = Args[ArgIdx]) { 6685 if (CheckedVarArgs.empty() || !CheckedVarArgs[ArgIdx]) 6686 checkVariadicArgument(Arg, CallType); 6687 } 6688 } 6689 } 6690 6691 if (FDecl || Proto) { 6692 CheckNonNullArguments(*this, FDecl, Proto, Args, Loc); 6693 6694 // Type safety checking. 6695 if (FDecl) { 6696 for (const auto *I : FDecl->specific_attrs<ArgumentWithTypeTagAttr>()) 6697 CheckArgumentWithTypeTag(I, Args, Loc); 6698 } 6699 } 6700 6701 // Check that passed arguments match the alignment of original arguments. 6702 // Try to get the missing prototype from the declaration. 6703 if (!Proto && FDecl) { 6704 const auto *FT = FDecl->getFunctionType(); 6705 if (isa_and_nonnull<FunctionProtoType>(FT)) 6706 Proto = cast<FunctionProtoType>(FDecl->getFunctionType()); 6707 } 6708 if (Proto) { 6709 // For variadic functions, we may have more args than parameters. 6710 // For some K&R functions, we may have less args than parameters. 6711 const auto N = std::min<unsigned>(Proto->getNumParams(), Args.size()); 6712 for (unsigned ArgIdx = 0; ArgIdx < N; ++ArgIdx) { 6713 // Args[ArgIdx] can be null in malformed code. 6714 if (const Expr *Arg = Args[ArgIdx]) { 6715 if (Arg->containsErrors()) 6716 continue; 6717 6718 if (Context.getTargetInfo().getTriple().isOSAIX() && FDecl && Arg && 6719 FDecl->hasLinkage() && 6720 FDecl->getFormalLinkage() != InternalLinkage && 6721 CallType == VariadicDoesNotApply) 6722 checkAIXMemberAlignment((Arg->getExprLoc()), Arg); 6723 6724 QualType ParamTy = Proto->getParamType(ArgIdx); 6725 QualType ArgTy = Arg->getType(); 6726 CheckArgAlignment(Arg->getExprLoc(), FDecl, std::to_string(ArgIdx + 1), 6727 ArgTy, ParamTy); 6728 } 6729 } 6730 } 6731 6732 if (FDecl && FDecl->hasAttr<AllocAlignAttr>()) { 6733 auto *AA = FDecl->getAttr<AllocAlignAttr>(); 6734 const Expr *Arg = Args[AA->getParamIndex().getASTIndex()]; 6735 if (!Arg->isValueDependent()) { 6736 Expr::EvalResult Align; 6737 if (Arg->EvaluateAsInt(Align, Context)) { 6738 const llvm::APSInt &I = Align.Val.getInt(); 6739 if (!I.isPowerOf2()) 6740 Diag(Arg->getExprLoc(), diag::warn_alignment_not_power_of_two) 6741 << Arg->getSourceRange(); 6742 6743 if (I > Sema::MaximumAlignment) 6744 Diag(Arg->getExprLoc(), diag::warn_assume_aligned_too_great) 6745 << Arg->getSourceRange() << Sema::MaximumAlignment; 6746 } 6747 } 6748 } 6749 6750 if (FD) 6751 diagnoseArgDependentDiagnoseIfAttrs(FD, ThisArg, Args, Loc); 6752 } 6753 6754 /// CheckConstructorCall - Check a constructor call for correctness and safety 6755 /// properties not enforced by the C type system. 6756 void Sema::CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType, 6757 ArrayRef<const Expr *> Args, 6758 const FunctionProtoType *Proto, 6759 SourceLocation Loc) { 6760 VariadicCallType CallType = 6761 Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply; 6762 6763 auto *Ctor = cast<CXXConstructorDecl>(FDecl); 6764 CheckArgAlignment(Loc, FDecl, "'this'", Context.getPointerType(ThisType), 6765 Context.getPointerType(Ctor->getThisObjectType())); 6766 6767 checkCall(FDecl, Proto, /*ThisArg=*/nullptr, Args, /*IsMemberFunction=*/true, 6768 Loc, SourceRange(), CallType); 6769 } 6770 6771 /// CheckFunctionCall - Check a direct function call for various correctness 6772 /// and safety properties not strictly enforced by the C type system. 6773 bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, 6774 const FunctionProtoType *Proto) { 6775 bool IsMemberOperatorCall = isa<CXXOperatorCallExpr>(TheCall) && 6776 isa<CXXMethodDecl>(FDecl); 6777 bool IsMemberFunction = isa<CXXMemberCallExpr>(TheCall) || 6778 IsMemberOperatorCall; 6779 VariadicCallType CallType = getVariadicCallType(FDecl, Proto, 6780 TheCall->getCallee()); 6781 Expr** Args = TheCall->getArgs(); 6782 unsigned NumArgs = TheCall->getNumArgs(); 6783 6784 Expr *ImplicitThis = nullptr; 6785 if (IsMemberOperatorCall && !FDecl->isStatic()) { 6786 // If this is a call to a non-static member operator, hide the first 6787 // argument from checkCall. 6788 // FIXME: Our choice of AST representation here is less than ideal. 6789 ImplicitThis = Args[0]; 6790 ++Args; 6791 --NumArgs; 6792 } else if (IsMemberFunction && !FDecl->isStatic()) 6793 ImplicitThis = 6794 cast<CXXMemberCallExpr>(TheCall)->getImplicitObjectArgument(); 6795 6796 if (ImplicitThis) { 6797 // ImplicitThis may or may not be a pointer, depending on whether . or -> is 6798 // used. 6799 QualType ThisType = ImplicitThis->getType(); 6800 if (!ThisType->isPointerType()) { 6801 assert(!ThisType->isReferenceType()); 6802 ThisType = Context.getPointerType(ThisType); 6803 } 6804 6805 QualType ThisTypeFromDecl = 6806 Context.getPointerType(cast<CXXMethodDecl>(FDecl)->getThisObjectType()); 6807 6808 CheckArgAlignment(TheCall->getRParenLoc(), FDecl, "'this'", ThisType, 6809 ThisTypeFromDecl); 6810 } 6811 6812 checkCall(FDecl, Proto, ImplicitThis, llvm::ArrayRef(Args, NumArgs), 6813 IsMemberFunction, TheCall->getRParenLoc(), 6814 TheCall->getCallee()->getSourceRange(), CallType); 6815 6816 IdentifierInfo *FnInfo = FDecl->getIdentifier(); 6817 // None of the checks below are needed for functions that don't have 6818 // simple names (e.g., C++ conversion functions). 6819 if (!FnInfo) 6820 return false; 6821 6822 // Enforce TCB except for builtin calls, which are always allowed. 6823 if (FDecl->getBuiltinID() == 0) 6824 CheckTCBEnforcement(TheCall->getExprLoc(), FDecl); 6825 6826 CheckAbsoluteValueFunction(TheCall, FDecl); 6827 CheckMaxUnsignedZero(TheCall, FDecl); 6828 6829 if (getLangOpts().ObjC) 6830 DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs); 6831 6832 unsigned CMId = FDecl->getMemoryFunctionKind(); 6833 6834 // Handle memory setting and copying functions. 6835 switch (CMId) { 6836 case 0: 6837 return false; 6838 case Builtin::BIstrlcpy: // fallthrough 6839 case Builtin::BIstrlcat: 6840 CheckStrlcpycatArguments(TheCall, FnInfo); 6841 break; 6842 case Builtin::BIstrncat: 6843 CheckStrncatArguments(TheCall, FnInfo); 6844 break; 6845 case Builtin::BIfree: 6846 CheckFreeArguments(TheCall); 6847 break; 6848 default: 6849 CheckMemaccessArguments(TheCall, CMId, FnInfo); 6850 } 6851 6852 return false; 6853 } 6854 6855 bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac, 6856 ArrayRef<const Expr *> Args) { 6857 VariadicCallType CallType = 6858 Method->isVariadic() ? VariadicMethod : VariadicDoesNotApply; 6859 6860 checkCall(Method, nullptr, /*ThisArg=*/nullptr, Args, 6861 /*IsMemberFunction=*/false, lbrac, Method->getSourceRange(), 6862 CallType); 6863 6864 CheckTCBEnforcement(lbrac, Method); 6865 6866 return false; 6867 } 6868 6869 bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, 6870 const FunctionProtoType *Proto) { 6871 QualType Ty; 6872 if (const auto *V = dyn_cast<VarDecl>(NDecl)) 6873 Ty = V->getType().getNonReferenceType(); 6874 else if (const auto *F = dyn_cast<FieldDecl>(NDecl)) 6875 Ty = F->getType().getNonReferenceType(); 6876 else 6877 return false; 6878 6879 if (!Ty->isBlockPointerType() && !Ty->isFunctionPointerType() && 6880 !Ty->isFunctionProtoType()) 6881 return false; 6882 6883 VariadicCallType CallType; 6884 if (!Proto || !Proto->isVariadic()) { 6885 CallType = VariadicDoesNotApply; 6886 } else if (Ty->isBlockPointerType()) { 6887 CallType = VariadicBlock; 6888 } else { // Ty->isFunctionPointerType() 6889 CallType = VariadicFunction; 6890 } 6891 6892 checkCall(NDecl, Proto, /*ThisArg=*/nullptr, 6893 llvm::ArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 6894 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 6895 TheCall->getCallee()->getSourceRange(), CallType); 6896 6897 return false; 6898 } 6899 6900 /// Checks function calls when a FunctionDecl or a NamedDecl is not available, 6901 /// such as function pointers returned from functions. 6902 bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) { 6903 VariadicCallType CallType = getVariadicCallType(/*FDecl=*/nullptr, Proto, 6904 TheCall->getCallee()); 6905 checkCall(/*FDecl=*/nullptr, Proto, /*ThisArg=*/nullptr, 6906 llvm::ArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 6907 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 6908 TheCall->getCallee()->getSourceRange(), CallType); 6909 6910 return false; 6911 } 6912 6913 static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) { 6914 if (!llvm::isValidAtomicOrderingCABI(Ordering)) 6915 return false; 6916 6917 auto OrderingCABI = (llvm::AtomicOrderingCABI)Ordering; 6918 switch (Op) { 6919 case AtomicExpr::AO__c11_atomic_init: 6920 case AtomicExpr::AO__opencl_atomic_init: 6921 llvm_unreachable("There is no ordering argument for an init"); 6922 6923 case AtomicExpr::AO__c11_atomic_load: 6924 case AtomicExpr::AO__opencl_atomic_load: 6925 case AtomicExpr::AO__hip_atomic_load: 6926 case AtomicExpr::AO__atomic_load_n: 6927 case AtomicExpr::AO__atomic_load: 6928 return OrderingCABI != llvm::AtomicOrderingCABI::release && 6929 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 6930 6931 case AtomicExpr::AO__c11_atomic_store: 6932 case AtomicExpr::AO__opencl_atomic_store: 6933 case AtomicExpr::AO__hip_atomic_store: 6934 case AtomicExpr::AO__atomic_store: 6935 case AtomicExpr::AO__atomic_store_n: 6936 return OrderingCABI != llvm::AtomicOrderingCABI::consume && 6937 OrderingCABI != llvm::AtomicOrderingCABI::acquire && 6938 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 6939 6940 default: 6941 return true; 6942 } 6943 } 6944 6945 ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult, 6946 AtomicExpr::AtomicOp Op) { 6947 CallExpr *TheCall = cast<CallExpr>(TheCallResult.get()); 6948 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 6949 MultiExprArg Args{TheCall->getArgs(), TheCall->getNumArgs()}; 6950 return BuildAtomicExpr({TheCall->getBeginLoc(), TheCall->getEndLoc()}, 6951 DRE->getSourceRange(), TheCall->getRParenLoc(), Args, 6952 Op); 6953 } 6954 6955 ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, 6956 SourceLocation RParenLoc, MultiExprArg Args, 6957 AtomicExpr::AtomicOp Op, 6958 AtomicArgumentOrder ArgOrder) { 6959 // All the non-OpenCL operations take one of the following forms. 6960 // The OpenCL operations take the __c11 forms with one extra argument for 6961 // synchronization scope. 6962 enum { 6963 // C __c11_atomic_init(A *, C) 6964 Init, 6965 6966 // C __c11_atomic_load(A *, int) 6967 Load, 6968 6969 // void __atomic_load(A *, CP, int) 6970 LoadCopy, 6971 6972 // void __atomic_store(A *, CP, int) 6973 Copy, 6974 6975 // C __c11_atomic_add(A *, M, int) 6976 Arithmetic, 6977 6978 // C __atomic_exchange_n(A *, CP, int) 6979 Xchg, 6980 6981 // void __atomic_exchange(A *, C *, CP, int) 6982 GNUXchg, 6983 6984 // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int) 6985 C11CmpXchg, 6986 6987 // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int) 6988 GNUCmpXchg 6989 } Form = Init; 6990 6991 const unsigned NumForm = GNUCmpXchg + 1; 6992 const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6 }; 6993 const unsigned NumVals[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3 }; 6994 // where: 6995 // C is an appropriate type, 6996 // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins, 6997 // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise, 6998 // M is C if C is an integer, and ptrdiff_t if C is a pointer, and 6999 // the int parameters are for orderings. 7000 7001 static_assert(sizeof(NumArgs)/sizeof(NumArgs[0]) == NumForm 7002 && sizeof(NumVals)/sizeof(NumVals[0]) == NumForm, 7003 "need to update code for modified forms"); 7004 static_assert(AtomicExpr::AO__c11_atomic_init == 0 && 7005 AtomicExpr::AO__c11_atomic_fetch_min + 1 == 7006 AtomicExpr::AO__atomic_load, 7007 "need to update code for modified C11 atomics"); 7008 bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_init && 7009 Op <= AtomicExpr::AO__opencl_atomic_fetch_max; 7010 bool IsHIP = Op >= AtomicExpr::AO__hip_atomic_load && 7011 Op <= AtomicExpr::AO__hip_atomic_fetch_max; 7012 bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_init && 7013 Op <= AtomicExpr::AO__c11_atomic_fetch_min) || 7014 IsOpenCL; 7015 bool IsN = Op == AtomicExpr::AO__atomic_load_n || 7016 Op == AtomicExpr::AO__atomic_store_n || 7017 Op == AtomicExpr::AO__atomic_exchange_n || 7018 Op == AtomicExpr::AO__atomic_compare_exchange_n; 7019 // Bit mask for extra allowed value types other than integers for atomic 7020 // arithmetic operations. Add/sub allow pointer and floating point. Min/max 7021 // allow floating point. 7022 enum ArithOpExtraValueType { 7023 AOEVT_None = 0, 7024 AOEVT_Pointer = 1, 7025 AOEVT_FP = 2, 7026 }; 7027 unsigned ArithAllows = AOEVT_None; 7028 7029 switch (Op) { 7030 case AtomicExpr::AO__c11_atomic_init: 7031 case AtomicExpr::AO__opencl_atomic_init: 7032 Form = Init; 7033 break; 7034 7035 case AtomicExpr::AO__c11_atomic_load: 7036 case AtomicExpr::AO__opencl_atomic_load: 7037 case AtomicExpr::AO__hip_atomic_load: 7038 case AtomicExpr::AO__atomic_load_n: 7039 Form = Load; 7040 break; 7041 7042 case AtomicExpr::AO__atomic_load: 7043 Form = LoadCopy; 7044 break; 7045 7046 case AtomicExpr::AO__c11_atomic_store: 7047 case AtomicExpr::AO__opencl_atomic_store: 7048 case AtomicExpr::AO__hip_atomic_store: 7049 case AtomicExpr::AO__atomic_store: 7050 case AtomicExpr::AO__atomic_store_n: 7051 Form = Copy; 7052 break; 7053 case AtomicExpr::AO__atomic_fetch_add: 7054 case AtomicExpr::AO__atomic_fetch_sub: 7055 case AtomicExpr::AO__atomic_add_fetch: 7056 case AtomicExpr::AO__atomic_sub_fetch: 7057 case AtomicExpr::AO__c11_atomic_fetch_add: 7058 case AtomicExpr::AO__c11_atomic_fetch_sub: 7059 case AtomicExpr::AO__opencl_atomic_fetch_add: 7060 case AtomicExpr::AO__opencl_atomic_fetch_sub: 7061 case AtomicExpr::AO__hip_atomic_fetch_add: 7062 case AtomicExpr::AO__hip_atomic_fetch_sub: 7063 ArithAllows = AOEVT_Pointer | AOEVT_FP; 7064 Form = Arithmetic; 7065 break; 7066 case AtomicExpr::AO__atomic_fetch_max: 7067 case AtomicExpr::AO__atomic_fetch_min: 7068 case AtomicExpr::AO__atomic_max_fetch: 7069 case AtomicExpr::AO__atomic_min_fetch: 7070 case AtomicExpr::AO__c11_atomic_fetch_max: 7071 case AtomicExpr::AO__c11_atomic_fetch_min: 7072 case AtomicExpr::AO__opencl_atomic_fetch_max: 7073 case AtomicExpr::AO__opencl_atomic_fetch_min: 7074 case AtomicExpr::AO__hip_atomic_fetch_max: 7075 case AtomicExpr::AO__hip_atomic_fetch_min: 7076 ArithAllows = AOEVT_FP; 7077 Form = Arithmetic; 7078 break; 7079 case AtomicExpr::AO__c11_atomic_fetch_and: 7080 case AtomicExpr::AO__c11_atomic_fetch_or: 7081 case AtomicExpr::AO__c11_atomic_fetch_xor: 7082 case AtomicExpr::AO__hip_atomic_fetch_and: 7083 case AtomicExpr::AO__hip_atomic_fetch_or: 7084 case AtomicExpr::AO__hip_atomic_fetch_xor: 7085 case AtomicExpr::AO__c11_atomic_fetch_nand: 7086 case AtomicExpr::AO__opencl_atomic_fetch_and: 7087 case AtomicExpr::AO__opencl_atomic_fetch_or: 7088 case AtomicExpr::AO__opencl_atomic_fetch_xor: 7089 case AtomicExpr::AO__atomic_fetch_and: 7090 case AtomicExpr::AO__atomic_fetch_or: 7091 case AtomicExpr::AO__atomic_fetch_xor: 7092 case AtomicExpr::AO__atomic_fetch_nand: 7093 case AtomicExpr::AO__atomic_and_fetch: 7094 case AtomicExpr::AO__atomic_or_fetch: 7095 case AtomicExpr::AO__atomic_xor_fetch: 7096 case AtomicExpr::AO__atomic_nand_fetch: 7097 Form = Arithmetic; 7098 break; 7099 7100 case AtomicExpr::AO__c11_atomic_exchange: 7101 case AtomicExpr::AO__hip_atomic_exchange: 7102 case AtomicExpr::AO__opencl_atomic_exchange: 7103 case AtomicExpr::AO__atomic_exchange_n: 7104 Form = Xchg; 7105 break; 7106 7107 case AtomicExpr::AO__atomic_exchange: 7108 Form = GNUXchg; 7109 break; 7110 7111 case AtomicExpr::AO__c11_atomic_compare_exchange_strong: 7112 case AtomicExpr::AO__c11_atomic_compare_exchange_weak: 7113 case AtomicExpr::AO__hip_atomic_compare_exchange_strong: 7114 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: 7115 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak: 7116 case AtomicExpr::AO__hip_atomic_compare_exchange_weak: 7117 Form = C11CmpXchg; 7118 break; 7119 7120 case AtomicExpr::AO__atomic_compare_exchange: 7121 case AtomicExpr::AO__atomic_compare_exchange_n: 7122 Form = GNUCmpXchg; 7123 break; 7124 } 7125 7126 unsigned AdjustedNumArgs = NumArgs[Form]; 7127 if ((IsOpenCL || IsHIP) && Op != AtomicExpr::AO__opencl_atomic_init) 7128 ++AdjustedNumArgs; 7129 // Check we have the right number of arguments. 7130 if (Args.size() < AdjustedNumArgs) { 7131 Diag(CallRange.getEnd(), diag::err_typecheck_call_too_few_args) 7132 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size()) 7133 << ExprRange; 7134 return ExprError(); 7135 } else if (Args.size() > AdjustedNumArgs) { 7136 Diag(Args[AdjustedNumArgs]->getBeginLoc(), 7137 diag::err_typecheck_call_too_many_args) 7138 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size()) 7139 << ExprRange; 7140 return ExprError(); 7141 } 7142 7143 // Inspect the first argument of the atomic operation. 7144 Expr *Ptr = Args[0]; 7145 ExprResult ConvertedPtr = DefaultFunctionArrayLvalueConversion(Ptr); 7146 if (ConvertedPtr.isInvalid()) 7147 return ExprError(); 7148 7149 Ptr = ConvertedPtr.get(); 7150 const PointerType *pointerType = Ptr->getType()->getAs<PointerType>(); 7151 if (!pointerType) { 7152 Diag(ExprRange.getBegin(), diag::err_atomic_builtin_must_be_pointer) 7153 << Ptr->getType() << Ptr->getSourceRange(); 7154 return ExprError(); 7155 } 7156 7157 // For a __c11 builtin, this should be a pointer to an _Atomic type. 7158 QualType AtomTy = pointerType->getPointeeType(); // 'A' 7159 QualType ValType = AtomTy; // 'C' 7160 if (IsC11) { 7161 if (!AtomTy->isAtomicType()) { 7162 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic) 7163 << Ptr->getType() << Ptr->getSourceRange(); 7164 return ExprError(); 7165 } 7166 if ((Form != Load && Form != LoadCopy && AtomTy.isConstQualified()) || 7167 AtomTy.getAddressSpace() == LangAS::opencl_constant) { 7168 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_atomic) 7169 << (AtomTy.isConstQualified() ? 0 : 1) << Ptr->getType() 7170 << Ptr->getSourceRange(); 7171 return ExprError(); 7172 } 7173 ValType = AtomTy->castAs<AtomicType>()->getValueType(); 7174 } else if (Form != Load && Form != LoadCopy) { 7175 if (ValType.isConstQualified()) { 7176 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_pointer) 7177 << Ptr->getType() << Ptr->getSourceRange(); 7178 return ExprError(); 7179 } 7180 } 7181 7182 // For an arithmetic operation, the implied arithmetic must be well-formed. 7183 if (Form == Arithmetic) { 7184 // GCC does not enforce these rules for GNU atomics, but we do to help catch 7185 // trivial type errors. 7186 auto IsAllowedValueType = [&](QualType ValType, 7187 unsigned AllowedType) -> bool { 7188 if (ValType->isIntegerType()) 7189 return true; 7190 if (ValType->isPointerType()) 7191 return AllowedType & AOEVT_Pointer; 7192 if (!(ValType->isFloatingType() && (AllowedType & AOEVT_FP))) 7193 return false; 7194 // LLVM Parser does not allow atomicrmw with x86_fp80 type. 7195 if (ValType->isSpecificBuiltinType(BuiltinType::LongDouble) && 7196 &Context.getTargetInfo().getLongDoubleFormat() == 7197 &llvm::APFloat::x87DoubleExtended()) 7198 return false; 7199 return true; 7200 }; 7201 if (!IsAllowedValueType(ValType, ArithAllows)) { 7202 auto DID = ArithAllows & AOEVT_FP 7203 ? (ArithAllows & AOEVT_Pointer 7204 ? diag::err_atomic_op_needs_atomic_int_ptr_or_fp 7205 : diag::err_atomic_op_needs_atomic_int_or_fp) 7206 : diag::err_atomic_op_needs_atomic_int; 7207 Diag(ExprRange.getBegin(), DID) 7208 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 7209 return ExprError(); 7210 } 7211 if (IsC11 && ValType->isPointerType() && 7212 RequireCompleteType(Ptr->getBeginLoc(), ValType->getPointeeType(), 7213 diag::err_incomplete_type)) { 7214 return ExprError(); 7215 } 7216 } else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) { 7217 // For __atomic_*_n operations, the value type must be a scalar integral or 7218 // pointer type which is 1, 2, 4, 8 or 16 bytes in length. 7219 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_or_ptr) 7220 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 7221 return ExprError(); 7222 } 7223 7224 if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context) && 7225 !AtomTy->isScalarType()) { 7226 // For GNU atomics, require a trivially-copyable type. This is not part of 7227 // the GNU atomics specification but we enforce it for consistency with 7228 // other atomics which generally all require a trivially-copyable type. This 7229 // is because atomics just copy bits. 7230 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_trivial_copy) 7231 << Ptr->getType() << Ptr->getSourceRange(); 7232 return ExprError(); 7233 } 7234 7235 switch (ValType.getObjCLifetime()) { 7236 case Qualifiers::OCL_None: 7237 case Qualifiers::OCL_ExplicitNone: 7238 // okay 7239 break; 7240 7241 case Qualifiers::OCL_Weak: 7242 case Qualifiers::OCL_Strong: 7243 case Qualifiers::OCL_Autoreleasing: 7244 // FIXME: Can this happen? By this point, ValType should be known 7245 // to be trivially copyable. 7246 Diag(ExprRange.getBegin(), diag::err_arc_atomic_ownership) 7247 << ValType << Ptr->getSourceRange(); 7248 return ExprError(); 7249 } 7250 7251 // All atomic operations have an overload which takes a pointer to a volatile 7252 // 'A'. We shouldn't let the volatile-ness of the pointee-type inject itself 7253 // into the result or the other operands. Similarly atomic_load takes a 7254 // pointer to a const 'A'. 7255 ValType.removeLocalVolatile(); 7256 ValType.removeLocalConst(); 7257 QualType ResultType = ValType; 7258 if (Form == Copy || Form == LoadCopy || Form == GNUXchg || 7259 Form == Init) 7260 ResultType = Context.VoidTy; 7261 else if (Form == C11CmpXchg || Form == GNUCmpXchg) 7262 ResultType = Context.BoolTy; 7263 7264 // The type of a parameter passed 'by value'. In the GNU atomics, such 7265 // arguments are actually passed as pointers. 7266 QualType ByValType = ValType; // 'CP' 7267 bool IsPassedByAddress = false; 7268 if (!IsC11 && !IsHIP && !IsN) { 7269 ByValType = Ptr->getType(); 7270 IsPassedByAddress = true; 7271 } 7272 7273 SmallVector<Expr *, 5> APIOrderedArgs; 7274 if (ArgOrder == Sema::AtomicArgumentOrder::AST) { 7275 APIOrderedArgs.push_back(Args[0]); 7276 switch (Form) { 7277 case Init: 7278 case Load: 7279 APIOrderedArgs.push_back(Args[1]); // Val1/Order 7280 break; 7281 case LoadCopy: 7282 case Copy: 7283 case Arithmetic: 7284 case Xchg: 7285 APIOrderedArgs.push_back(Args[2]); // Val1 7286 APIOrderedArgs.push_back(Args[1]); // Order 7287 break; 7288 case GNUXchg: 7289 APIOrderedArgs.push_back(Args[2]); // Val1 7290 APIOrderedArgs.push_back(Args[3]); // Val2 7291 APIOrderedArgs.push_back(Args[1]); // Order 7292 break; 7293 case C11CmpXchg: 7294 APIOrderedArgs.push_back(Args[2]); // Val1 7295 APIOrderedArgs.push_back(Args[4]); // Val2 7296 APIOrderedArgs.push_back(Args[1]); // Order 7297 APIOrderedArgs.push_back(Args[3]); // OrderFail 7298 break; 7299 case GNUCmpXchg: 7300 APIOrderedArgs.push_back(Args[2]); // Val1 7301 APIOrderedArgs.push_back(Args[4]); // Val2 7302 APIOrderedArgs.push_back(Args[5]); // Weak 7303 APIOrderedArgs.push_back(Args[1]); // Order 7304 APIOrderedArgs.push_back(Args[3]); // OrderFail 7305 break; 7306 } 7307 } else 7308 APIOrderedArgs.append(Args.begin(), Args.end()); 7309 7310 // The first argument's non-CV pointer type is used to deduce the type of 7311 // subsequent arguments, except for: 7312 // - weak flag (always converted to bool) 7313 // - memory order (always converted to int) 7314 // - scope (always converted to int) 7315 for (unsigned i = 0; i != APIOrderedArgs.size(); ++i) { 7316 QualType Ty; 7317 if (i < NumVals[Form] + 1) { 7318 switch (i) { 7319 case 0: 7320 // The first argument is always a pointer. It has a fixed type. 7321 // It is always dereferenced, a nullptr is undefined. 7322 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin()); 7323 // Nothing else to do: we already know all we want about this pointer. 7324 continue; 7325 case 1: 7326 // The second argument is the non-atomic operand. For arithmetic, this 7327 // is always passed by value, and for a compare_exchange it is always 7328 // passed by address. For the rest, GNU uses by-address and C11 uses 7329 // by-value. 7330 assert(Form != Load); 7331 if (Form == Arithmetic && ValType->isPointerType()) 7332 Ty = Context.getPointerDiffType(); 7333 else if (Form == Init || Form == Arithmetic) 7334 Ty = ValType; 7335 else if (Form == Copy || Form == Xchg) { 7336 if (IsPassedByAddress) { 7337 // The value pointer is always dereferenced, a nullptr is undefined. 7338 CheckNonNullArgument(*this, APIOrderedArgs[i], 7339 ExprRange.getBegin()); 7340 } 7341 Ty = ByValType; 7342 } else { 7343 Expr *ValArg = APIOrderedArgs[i]; 7344 // The value pointer is always dereferenced, a nullptr is undefined. 7345 CheckNonNullArgument(*this, ValArg, ExprRange.getBegin()); 7346 LangAS AS = LangAS::Default; 7347 // Keep address space of non-atomic pointer type. 7348 if (const PointerType *PtrTy = 7349 ValArg->getType()->getAs<PointerType>()) { 7350 AS = PtrTy->getPointeeType().getAddressSpace(); 7351 } 7352 Ty = Context.getPointerType( 7353 Context.getAddrSpaceQualType(ValType.getUnqualifiedType(), AS)); 7354 } 7355 break; 7356 case 2: 7357 // The third argument to compare_exchange / GNU exchange is the desired 7358 // value, either by-value (for the C11 and *_n variant) or as a pointer. 7359 if (IsPassedByAddress) 7360 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin()); 7361 Ty = ByValType; 7362 break; 7363 case 3: 7364 // The fourth argument to GNU compare_exchange is a 'weak' flag. 7365 Ty = Context.BoolTy; 7366 break; 7367 } 7368 } else { 7369 // The order(s) and scope are always converted to int. 7370 Ty = Context.IntTy; 7371 } 7372 7373 InitializedEntity Entity = 7374 InitializedEntity::InitializeParameter(Context, Ty, false); 7375 ExprResult Arg = APIOrderedArgs[i]; 7376 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 7377 if (Arg.isInvalid()) 7378 return true; 7379 APIOrderedArgs[i] = Arg.get(); 7380 } 7381 7382 // Permute the arguments into a 'consistent' order. 7383 SmallVector<Expr*, 5> SubExprs; 7384 SubExprs.push_back(Ptr); 7385 switch (Form) { 7386 case Init: 7387 // Note, AtomicExpr::getVal1() has a special case for this atomic. 7388 SubExprs.push_back(APIOrderedArgs[1]); // Val1 7389 break; 7390 case Load: 7391 SubExprs.push_back(APIOrderedArgs[1]); // Order 7392 break; 7393 case LoadCopy: 7394 case Copy: 7395 case Arithmetic: 7396 case Xchg: 7397 SubExprs.push_back(APIOrderedArgs[2]); // Order 7398 SubExprs.push_back(APIOrderedArgs[1]); // Val1 7399 break; 7400 case GNUXchg: 7401 // Note, AtomicExpr::getVal2() has a special case for this atomic. 7402 SubExprs.push_back(APIOrderedArgs[3]); // Order 7403 SubExprs.push_back(APIOrderedArgs[1]); // Val1 7404 SubExprs.push_back(APIOrderedArgs[2]); // Val2 7405 break; 7406 case C11CmpXchg: 7407 SubExprs.push_back(APIOrderedArgs[3]); // Order 7408 SubExprs.push_back(APIOrderedArgs[1]); // Val1 7409 SubExprs.push_back(APIOrderedArgs[4]); // OrderFail 7410 SubExprs.push_back(APIOrderedArgs[2]); // Val2 7411 break; 7412 case GNUCmpXchg: 7413 SubExprs.push_back(APIOrderedArgs[4]); // Order 7414 SubExprs.push_back(APIOrderedArgs[1]); // Val1 7415 SubExprs.push_back(APIOrderedArgs[5]); // OrderFail 7416 SubExprs.push_back(APIOrderedArgs[2]); // Val2 7417 SubExprs.push_back(APIOrderedArgs[3]); // Weak 7418 break; 7419 } 7420 7421 if (SubExprs.size() >= 2 && Form != Init) { 7422 if (std::optional<llvm::APSInt> Result = 7423 SubExprs[1]->getIntegerConstantExpr(Context)) 7424 if (!isValidOrderingForOp(Result->getSExtValue(), Op)) 7425 Diag(SubExprs[1]->getBeginLoc(), 7426 diag::warn_atomic_op_has_invalid_memory_order) 7427 << SubExprs[1]->getSourceRange(); 7428 } 7429 7430 if (auto ScopeModel = AtomicExpr::getScopeModel(Op)) { 7431 auto *Scope = Args[Args.size() - 1]; 7432 if (std::optional<llvm::APSInt> Result = 7433 Scope->getIntegerConstantExpr(Context)) { 7434 if (!ScopeModel->isValid(Result->getZExtValue())) 7435 Diag(Scope->getBeginLoc(), diag::err_atomic_op_has_invalid_synch_scope) 7436 << Scope->getSourceRange(); 7437 } 7438 SubExprs.push_back(Scope); 7439 } 7440 7441 AtomicExpr *AE = new (Context) 7442 AtomicExpr(ExprRange.getBegin(), SubExprs, ResultType, Op, RParenLoc); 7443 7444 if ((Op == AtomicExpr::AO__c11_atomic_load || 7445 Op == AtomicExpr::AO__c11_atomic_store || 7446 Op == AtomicExpr::AO__opencl_atomic_load || 7447 Op == AtomicExpr::AO__hip_atomic_load || 7448 Op == AtomicExpr::AO__opencl_atomic_store || 7449 Op == AtomicExpr::AO__hip_atomic_store) && 7450 Context.AtomicUsesUnsupportedLibcall(AE)) 7451 Diag(AE->getBeginLoc(), diag::err_atomic_load_store_uses_lib) 7452 << ((Op == AtomicExpr::AO__c11_atomic_load || 7453 Op == AtomicExpr::AO__opencl_atomic_load || 7454 Op == AtomicExpr::AO__hip_atomic_load) 7455 ? 0 7456 : 1); 7457 7458 if (ValType->isBitIntType()) { 7459 Diag(Ptr->getExprLoc(), diag::err_atomic_builtin_bit_int_prohibit); 7460 return ExprError(); 7461 } 7462 7463 return AE; 7464 } 7465 7466 /// checkBuiltinArgument - Given a call to a builtin function, perform 7467 /// normal type-checking on the given argument, updating the call in 7468 /// place. This is useful when a builtin function requires custom 7469 /// type-checking for some of its arguments but not necessarily all of 7470 /// them. 7471 /// 7472 /// Returns true on error. 7473 static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) { 7474 FunctionDecl *Fn = E->getDirectCallee(); 7475 assert(Fn && "builtin call without direct callee!"); 7476 7477 ParmVarDecl *Param = Fn->getParamDecl(ArgIndex); 7478 InitializedEntity Entity = 7479 InitializedEntity::InitializeParameter(S.Context, Param); 7480 7481 ExprResult Arg = E->getArg(ArgIndex); 7482 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg); 7483 if (Arg.isInvalid()) 7484 return true; 7485 7486 E->setArg(ArgIndex, Arg.get()); 7487 return false; 7488 } 7489 7490 bool Sema::BuiltinWasmRefNullExtern(CallExpr *TheCall) { 7491 if (TheCall->getNumArgs() != 0) 7492 return true; 7493 7494 TheCall->setType(Context.getWebAssemblyExternrefType()); 7495 7496 return false; 7497 } 7498 7499 bool Sema::BuiltinWasmRefNullFunc(CallExpr *TheCall) { 7500 if (TheCall->getNumArgs() != 0) { 7501 Diag(TheCall->getBeginLoc(), diag::err_typecheck_call_too_many_args) 7502 << 0 /*function call*/ << 0 << TheCall->getNumArgs(); 7503 return true; 7504 } 7505 7506 // This custom type checking code ensures that the nodes are as expected 7507 // in order to later on generate the necessary builtin. 7508 QualType Pointee = Context.getFunctionType(Context.VoidTy, {}, {}); 7509 QualType Type = Context.getPointerType(Pointee); 7510 Pointee = Context.getAddrSpaceQualType(Pointee, LangAS::wasm_funcref); 7511 Type = Context.getAttributedType(attr::WebAssemblyFuncref, Type, 7512 Context.getPointerType(Pointee)); 7513 TheCall->setType(Type); 7514 7515 return false; 7516 } 7517 7518 /// We have a call to a function like __sync_fetch_and_add, which is an 7519 /// overloaded function based on the pointer type of its first argument. 7520 /// The main BuildCallExpr routines have already promoted the types of 7521 /// arguments because all of these calls are prototyped as void(...). 7522 /// 7523 /// This function goes through and does final semantic checking for these 7524 /// builtins, as well as generating any warnings. 7525 ExprResult 7526 Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) { 7527 CallExpr *TheCall = static_cast<CallExpr *>(TheCallResult.get()); 7528 Expr *Callee = TheCall->getCallee(); 7529 DeclRefExpr *DRE = cast<DeclRefExpr>(Callee->IgnoreParenCasts()); 7530 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 7531 7532 // Ensure that we have at least one argument to do type inference from. 7533 if (TheCall->getNumArgs() < 1) { 7534 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 7535 << 0 << 1 << TheCall->getNumArgs() << Callee->getSourceRange(); 7536 return ExprError(); 7537 } 7538 7539 // Inspect the first argument of the atomic builtin. This should always be 7540 // a pointer type, whose element is an integral scalar or pointer type. 7541 // Because it is a pointer type, we don't have to worry about any implicit 7542 // casts here. 7543 // FIXME: We don't allow floating point scalars as input. 7544 Expr *FirstArg = TheCall->getArg(0); 7545 ExprResult FirstArgResult = DefaultFunctionArrayLvalueConversion(FirstArg); 7546 if (FirstArgResult.isInvalid()) 7547 return ExprError(); 7548 FirstArg = FirstArgResult.get(); 7549 TheCall->setArg(0, FirstArg); 7550 7551 const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>(); 7552 if (!pointerType) { 7553 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 7554 << FirstArg->getType() << FirstArg->getSourceRange(); 7555 return ExprError(); 7556 } 7557 7558 QualType ValType = pointerType->getPointeeType(); 7559 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 7560 !ValType->isBlockPointerType()) { 7561 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intptr) 7562 << FirstArg->getType() << FirstArg->getSourceRange(); 7563 return ExprError(); 7564 } 7565 7566 if (ValType.isConstQualified()) { 7567 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_cannot_be_const) 7568 << FirstArg->getType() << FirstArg->getSourceRange(); 7569 return ExprError(); 7570 } 7571 7572 switch (ValType.getObjCLifetime()) { 7573 case Qualifiers::OCL_None: 7574 case Qualifiers::OCL_ExplicitNone: 7575 // okay 7576 break; 7577 7578 case Qualifiers::OCL_Weak: 7579 case Qualifiers::OCL_Strong: 7580 case Qualifiers::OCL_Autoreleasing: 7581 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 7582 << ValType << FirstArg->getSourceRange(); 7583 return ExprError(); 7584 } 7585 7586 // Strip any qualifiers off ValType. 7587 ValType = ValType.getUnqualifiedType(); 7588 7589 // The majority of builtins return a value, but a few have special return 7590 // types, so allow them to override appropriately below. 7591 QualType ResultType = ValType; 7592 7593 // We need to figure out which concrete builtin this maps onto. For example, 7594 // __sync_fetch_and_add with a 2 byte object turns into 7595 // __sync_fetch_and_add_2. 7596 #define BUILTIN_ROW(x) \ 7597 { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \ 7598 Builtin::BI##x##_8, Builtin::BI##x##_16 } 7599 7600 static const unsigned BuiltinIndices[][5] = { 7601 BUILTIN_ROW(__sync_fetch_and_add), 7602 BUILTIN_ROW(__sync_fetch_and_sub), 7603 BUILTIN_ROW(__sync_fetch_and_or), 7604 BUILTIN_ROW(__sync_fetch_and_and), 7605 BUILTIN_ROW(__sync_fetch_and_xor), 7606 BUILTIN_ROW(__sync_fetch_and_nand), 7607 7608 BUILTIN_ROW(__sync_add_and_fetch), 7609 BUILTIN_ROW(__sync_sub_and_fetch), 7610 BUILTIN_ROW(__sync_and_and_fetch), 7611 BUILTIN_ROW(__sync_or_and_fetch), 7612 BUILTIN_ROW(__sync_xor_and_fetch), 7613 BUILTIN_ROW(__sync_nand_and_fetch), 7614 7615 BUILTIN_ROW(__sync_val_compare_and_swap), 7616 BUILTIN_ROW(__sync_bool_compare_and_swap), 7617 BUILTIN_ROW(__sync_lock_test_and_set), 7618 BUILTIN_ROW(__sync_lock_release), 7619 BUILTIN_ROW(__sync_swap) 7620 }; 7621 #undef BUILTIN_ROW 7622 7623 // Determine the index of the size. 7624 unsigned SizeIndex; 7625 switch (Context.getTypeSizeInChars(ValType).getQuantity()) { 7626 case 1: SizeIndex = 0; break; 7627 case 2: SizeIndex = 1; break; 7628 case 4: SizeIndex = 2; break; 7629 case 8: SizeIndex = 3; break; 7630 case 16: SizeIndex = 4; break; 7631 default: 7632 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_pointer_size) 7633 << FirstArg->getType() << FirstArg->getSourceRange(); 7634 return ExprError(); 7635 } 7636 7637 // Each of these builtins has one pointer argument, followed by some number of 7638 // values (0, 1 or 2) followed by a potentially empty varags list of stuff 7639 // that we ignore. Find out which row of BuiltinIndices to read from as well 7640 // as the number of fixed args. 7641 unsigned BuiltinID = FDecl->getBuiltinID(); 7642 unsigned BuiltinIndex, NumFixed = 1; 7643 bool WarnAboutSemanticsChange = false; 7644 switch (BuiltinID) { 7645 default: llvm_unreachable("Unknown overloaded atomic builtin!"); 7646 case Builtin::BI__sync_fetch_and_add: 7647 case Builtin::BI__sync_fetch_and_add_1: 7648 case Builtin::BI__sync_fetch_and_add_2: 7649 case Builtin::BI__sync_fetch_and_add_4: 7650 case Builtin::BI__sync_fetch_and_add_8: 7651 case Builtin::BI__sync_fetch_and_add_16: 7652 BuiltinIndex = 0; 7653 break; 7654 7655 case Builtin::BI__sync_fetch_and_sub: 7656 case Builtin::BI__sync_fetch_and_sub_1: 7657 case Builtin::BI__sync_fetch_and_sub_2: 7658 case Builtin::BI__sync_fetch_and_sub_4: 7659 case Builtin::BI__sync_fetch_and_sub_8: 7660 case Builtin::BI__sync_fetch_and_sub_16: 7661 BuiltinIndex = 1; 7662 break; 7663 7664 case Builtin::BI__sync_fetch_and_or: 7665 case Builtin::BI__sync_fetch_and_or_1: 7666 case Builtin::BI__sync_fetch_and_or_2: 7667 case Builtin::BI__sync_fetch_and_or_4: 7668 case Builtin::BI__sync_fetch_and_or_8: 7669 case Builtin::BI__sync_fetch_and_or_16: 7670 BuiltinIndex = 2; 7671 break; 7672 7673 case Builtin::BI__sync_fetch_and_and: 7674 case Builtin::BI__sync_fetch_and_and_1: 7675 case Builtin::BI__sync_fetch_and_and_2: 7676 case Builtin::BI__sync_fetch_and_and_4: 7677 case Builtin::BI__sync_fetch_and_and_8: 7678 case Builtin::BI__sync_fetch_and_and_16: 7679 BuiltinIndex = 3; 7680 break; 7681 7682 case Builtin::BI__sync_fetch_and_xor: 7683 case Builtin::BI__sync_fetch_and_xor_1: 7684 case Builtin::BI__sync_fetch_and_xor_2: 7685 case Builtin::BI__sync_fetch_and_xor_4: 7686 case Builtin::BI__sync_fetch_and_xor_8: 7687 case Builtin::BI__sync_fetch_and_xor_16: 7688 BuiltinIndex = 4; 7689 break; 7690 7691 case Builtin::BI__sync_fetch_and_nand: 7692 case Builtin::BI__sync_fetch_and_nand_1: 7693 case Builtin::BI__sync_fetch_and_nand_2: 7694 case Builtin::BI__sync_fetch_and_nand_4: 7695 case Builtin::BI__sync_fetch_and_nand_8: 7696 case Builtin::BI__sync_fetch_and_nand_16: 7697 BuiltinIndex = 5; 7698 WarnAboutSemanticsChange = true; 7699 break; 7700 7701 case Builtin::BI__sync_add_and_fetch: 7702 case Builtin::BI__sync_add_and_fetch_1: 7703 case Builtin::BI__sync_add_and_fetch_2: 7704 case Builtin::BI__sync_add_and_fetch_4: 7705 case Builtin::BI__sync_add_and_fetch_8: 7706 case Builtin::BI__sync_add_and_fetch_16: 7707 BuiltinIndex = 6; 7708 break; 7709 7710 case Builtin::BI__sync_sub_and_fetch: 7711 case Builtin::BI__sync_sub_and_fetch_1: 7712 case Builtin::BI__sync_sub_and_fetch_2: 7713 case Builtin::BI__sync_sub_and_fetch_4: 7714 case Builtin::BI__sync_sub_and_fetch_8: 7715 case Builtin::BI__sync_sub_and_fetch_16: 7716 BuiltinIndex = 7; 7717 break; 7718 7719 case Builtin::BI__sync_and_and_fetch: 7720 case Builtin::BI__sync_and_and_fetch_1: 7721 case Builtin::BI__sync_and_and_fetch_2: 7722 case Builtin::BI__sync_and_and_fetch_4: 7723 case Builtin::BI__sync_and_and_fetch_8: 7724 case Builtin::BI__sync_and_and_fetch_16: 7725 BuiltinIndex = 8; 7726 break; 7727 7728 case Builtin::BI__sync_or_and_fetch: 7729 case Builtin::BI__sync_or_and_fetch_1: 7730 case Builtin::BI__sync_or_and_fetch_2: 7731 case Builtin::BI__sync_or_and_fetch_4: 7732 case Builtin::BI__sync_or_and_fetch_8: 7733 case Builtin::BI__sync_or_and_fetch_16: 7734 BuiltinIndex = 9; 7735 break; 7736 7737 case Builtin::BI__sync_xor_and_fetch: 7738 case Builtin::BI__sync_xor_and_fetch_1: 7739 case Builtin::BI__sync_xor_and_fetch_2: 7740 case Builtin::BI__sync_xor_and_fetch_4: 7741 case Builtin::BI__sync_xor_and_fetch_8: 7742 case Builtin::BI__sync_xor_and_fetch_16: 7743 BuiltinIndex = 10; 7744 break; 7745 7746 case Builtin::BI__sync_nand_and_fetch: 7747 case Builtin::BI__sync_nand_and_fetch_1: 7748 case Builtin::BI__sync_nand_and_fetch_2: 7749 case Builtin::BI__sync_nand_and_fetch_4: 7750 case Builtin::BI__sync_nand_and_fetch_8: 7751 case Builtin::BI__sync_nand_and_fetch_16: 7752 BuiltinIndex = 11; 7753 WarnAboutSemanticsChange = true; 7754 break; 7755 7756 case Builtin::BI__sync_val_compare_and_swap: 7757 case Builtin::BI__sync_val_compare_and_swap_1: 7758 case Builtin::BI__sync_val_compare_and_swap_2: 7759 case Builtin::BI__sync_val_compare_and_swap_4: 7760 case Builtin::BI__sync_val_compare_and_swap_8: 7761 case Builtin::BI__sync_val_compare_and_swap_16: 7762 BuiltinIndex = 12; 7763 NumFixed = 2; 7764 break; 7765 7766 case Builtin::BI__sync_bool_compare_and_swap: 7767 case Builtin::BI__sync_bool_compare_and_swap_1: 7768 case Builtin::BI__sync_bool_compare_and_swap_2: 7769 case Builtin::BI__sync_bool_compare_and_swap_4: 7770 case Builtin::BI__sync_bool_compare_and_swap_8: 7771 case Builtin::BI__sync_bool_compare_and_swap_16: 7772 BuiltinIndex = 13; 7773 NumFixed = 2; 7774 ResultType = Context.BoolTy; 7775 break; 7776 7777 case Builtin::BI__sync_lock_test_and_set: 7778 case Builtin::BI__sync_lock_test_and_set_1: 7779 case Builtin::BI__sync_lock_test_and_set_2: 7780 case Builtin::BI__sync_lock_test_and_set_4: 7781 case Builtin::BI__sync_lock_test_and_set_8: 7782 case Builtin::BI__sync_lock_test_and_set_16: 7783 BuiltinIndex = 14; 7784 break; 7785 7786 case Builtin::BI__sync_lock_release: 7787 case Builtin::BI__sync_lock_release_1: 7788 case Builtin::BI__sync_lock_release_2: 7789 case Builtin::BI__sync_lock_release_4: 7790 case Builtin::BI__sync_lock_release_8: 7791 case Builtin::BI__sync_lock_release_16: 7792 BuiltinIndex = 15; 7793 NumFixed = 0; 7794 ResultType = Context.VoidTy; 7795 break; 7796 7797 case Builtin::BI__sync_swap: 7798 case Builtin::BI__sync_swap_1: 7799 case Builtin::BI__sync_swap_2: 7800 case Builtin::BI__sync_swap_4: 7801 case Builtin::BI__sync_swap_8: 7802 case Builtin::BI__sync_swap_16: 7803 BuiltinIndex = 16; 7804 break; 7805 } 7806 7807 // Now that we know how many fixed arguments we expect, first check that we 7808 // have at least that many. 7809 if (TheCall->getNumArgs() < 1+NumFixed) { 7810 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 7811 << 0 << 1 + NumFixed << TheCall->getNumArgs() 7812 << Callee->getSourceRange(); 7813 return ExprError(); 7814 } 7815 7816 Diag(TheCall->getEndLoc(), diag::warn_atomic_implicit_seq_cst) 7817 << Callee->getSourceRange(); 7818 7819 if (WarnAboutSemanticsChange) { 7820 Diag(TheCall->getEndLoc(), diag::warn_sync_fetch_and_nand_semantics_change) 7821 << Callee->getSourceRange(); 7822 } 7823 7824 // Get the decl for the concrete builtin from this, we can tell what the 7825 // concrete integer type we should convert to is. 7826 unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex]; 7827 StringRef NewBuiltinName = Context.BuiltinInfo.getName(NewBuiltinID); 7828 FunctionDecl *NewBuiltinDecl; 7829 if (NewBuiltinID == BuiltinID) 7830 NewBuiltinDecl = FDecl; 7831 else { 7832 // Perform builtin lookup to avoid redeclaring it. 7833 DeclarationName DN(&Context.Idents.get(NewBuiltinName)); 7834 LookupResult Res(*this, DN, DRE->getBeginLoc(), LookupOrdinaryName); 7835 LookupName(Res, TUScope, /*AllowBuiltinCreation=*/true); 7836 assert(Res.getFoundDecl()); 7837 NewBuiltinDecl = dyn_cast<FunctionDecl>(Res.getFoundDecl()); 7838 if (!NewBuiltinDecl) 7839 return ExprError(); 7840 } 7841 7842 // The first argument --- the pointer --- has a fixed type; we 7843 // deduce the types of the rest of the arguments accordingly. Walk 7844 // the remaining arguments, converting them to the deduced value type. 7845 for (unsigned i = 0; i != NumFixed; ++i) { 7846 ExprResult Arg = TheCall->getArg(i+1); 7847 7848 // GCC does an implicit conversion to the pointer or integer ValType. This 7849 // can fail in some cases (1i -> int**), check for this error case now. 7850 // Initialize the argument. 7851 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 7852 ValType, /*consume*/ false); 7853 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 7854 if (Arg.isInvalid()) 7855 return ExprError(); 7856 7857 // Okay, we have something that *can* be converted to the right type. Check 7858 // to see if there is a potentially weird extension going on here. This can 7859 // happen when you do an atomic operation on something like an char* and 7860 // pass in 42. The 42 gets converted to char. This is even more strange 7861 // for things like 45.123 -> char, etc. 7862 // FIXME: Do this check. 7863 TheCall->setArg(i+1, Arg.get()); 7864 } 7865 7866 // Create a new DeclRefExpr to refer to the new decl. 7867 DeclRefExpr *NewDRE = DeclRefExpr::Create( 7868 Context, DRE->getQualifierLoc(), SourceLocation(), NewBuiltinDecl, 7869 /*enclosing*/ false, DRE->getLocation(), Context.BuiltinFnTy, 7870 DRE->getValueKind(), nullptr, nullptr, DRE->isNonOdrUse()); 7871 7872 // Set the callee in the CallExpr. 7873 // FIXME: This loses syntactic information. 7874 QualType CalleePtrTy = Context.getPointerType(NewBuiltinDecl->getType()); 7875 ExprResult PromotedCall = ImpCastExprToType(NewDRE, CalleePtrTy, 7876 CK_BuiltinFnToFnPtr); 7877 TheCall->setCallee(PromotedCall.get()); 7878 7879 // Change the result type of the call to match the original value type. This 7880 // is arbitrary, but the codegen for these builtins ins design to handle it 7881 // gracefully. 7882 TheCall->setType(ResultType); 7883 7884 // Prohibit problematic uses of bit-precise integer types with atomic 7885 // builtins. The arguments would have already been converted to the first 7886 // argument's type, so only need to check the first argument. 7887 const auto *BitIntValType = ValType->getAs<BitIntType>(); 7888 if (BitIntValType && !llvm::isPowerOf2_64(BitIntValType->getNumBits())) { 7889 Diag(FirstArg->getExprLoc(), diag::err_atomic_builtin_ext_int_size); 7890 return ExprError(); 7891 } 7892 7893 return TheCallResult; 7894 } 7895 7896 /// SemaBuiltinNontemporalOverloaded - We have a call to 7897 /// __builtin_nontemporal_store or __builtin_nontemporal_load, which is an 7898 /// overloaded function based on the pointer type of its last argument. 7899 /// 7900 /// This function goes through and does final semantic checking for these 7901 /// builtins. 7902 ExprResult Sema::SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult) { 7903 CallExpr *TheCall = (CallExpr *)TheCallResult.get(); 7904 DeclRefExpr *DRE = 7905 cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 7906 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 7907 unsigned BuiltinID = FDecl->getBuiltinID(); 7908 assert((BuiltinID == Builtin::BI__builtin_nontemporal_store || 7909 BuiltinID == Builtin::BI__builtin_nontemporal_load) && 7910 "Unexpected nontemporal load/store builtin!"); 7911 bool isStore = BuiltinID == Builtin::BI__builtin_nontemporal_store; 7912 unsigned numArgs = isStore ? 2 : 1; 7913 7914 // Ensure that we have the proper number of arguments. 7915 if (checkArgCount(*this, TheCall, numArgs)) 7916 return ExprError(); 7917 7918 // Inspect the last argument of the nontemporal builtin. This should always 7919 // be a pointer type, from which we imply the type of the memory access. 7920 // Because it is a pointer type, we don't have to worry about any implicit 7921 // casts here. 7922 Expr *PointerArg = TheCall->getArg(numArgs - 1); 7923 ExprResult PointerArgResult = 7924 DefaultFunctionArrayLvalueConversion(PointerArg); 7925 7926 if (PointerArgResult.isInvalid()) 7927 return ExprError(); 7928 PointerArg = PointerArgResult.get(); 7929 TheCall->setArg(numArgs - 1, PointerArg); 7930 7931 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 7932 if (!pointerType) { 7933 Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer) 7934 << PointerArg->getType() << PointerArg->getSourceRange(); 7935 return ExprError(); 7936 } 7937 7938 QualType ValType = pointerType->getPointeeType(); 7939 7940 // Strip any qualifiers off ValType. 7941 ValType = ValType.getUnqualifiedType(); 7942 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 7943 !ValType->isBlockPointerType() && !ValType->isFloatingType() && 7944 !ValType->isVectorType()) { 7945 Diag(DRE->getBeginLoc(), 7946 diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector) 7947 << PointerArg->getType() << PointerArg->getSourceRange(); 7948 return ExprError(); 7949 } 7950 7951 if (!isStore) { 7952 TheCall->setType(ValType); 7953 return TheCallResult; 7954 } 7955 7956 ExprResult ValArg = TheCall->getArg(0); 7957 InitializedEntity Entity = InitializedEntity::InitializeParameter( 7958 Context, ValType, /*consume*/ false); 7959 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 7960 if (ValArg.isInvalid()) 7961 return ExprError(); 7962 7963 TheCall->setArg(0, ValArg.get()); 7964 TheCall->setType(Context.VoidTy); 7965 return TheCallResult; 7966 } 7967 7968 /// CheckObjCString - Checks that the argument to the builtin 7969 /// CFString constructor is correct 7970 /// Note: It might also make sense to do the UTF-16 conversion here (would 7971 /// simplify the backend). 7972 bool Sema::CheckObjCString(Expr *Arg) { 7973 Arg = Arg->IgnoreParenCasts(); 7974 StringLiteral *Literal = dyn_cast<StringLiteral>(Arg); 7975 7976 if (!Literal || !Literal->isOrdinary()) { 7977 Diag(Arg->getBeginLoc(), diag::err_cfstring_literal_not_string_constant) 7978 << Arg->getSourceRange(); 7979 return true; 7980 } 7981 7982 if (Literal->containsNonAsciiOrNull()) { 7983 StringRef String = Literal->getString(); 7984 unsigned NumBytes = String.size(); 7985 SmallVector<llvm::UTF16, 128> ToBuf(NumBytes); 7986 const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data(); 7987 llvm::UTF16 *ToPtr = &ToBuf[0]; 7988 7989 llvm::ConversionResult Result = 7990 llvm::ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes, &ToPtr, 7991 ToPtr + NumBytes, llvm::strictConversion); 7992 // Check for conversion failure. 7993 if (Result != llvm::conversionOK) 7994 Diag(Arg->getBeginLoc(), diag::warn_cfstring_truncated) 7995 << Arg->getSourceRange(); 7996 } 7997 return false; 7998 } 7999 8000 /// CheckObjCString - Checks that the format string argument to the os_log() 8001 /// and os_trace() functions is correct, and converts it to const char *. 8002 ExprResult Sema::CheckOSLogFormatStringArg(Expr *Arg) { 8003 Arg = Arg->IgnoreParenCasts(); 8004 auto *Literal = dyn_cast<StringLiteral>(Arg); 8005 if (!Literal) { 8006 if (auto *ObjcLiteral = dyn_cast<ObjCStringLiteral>(Arg)) { 8007 Literal = ObjcLiteral->getString(); 8008 } 8009 } 8010 8011 if (!Literal || (!Literal->isOrdinary() && !Literal->isUTF8())) { 8012 return ExprError( 8013 Diag(Arg->getBeginLoc(), diag::err_os_log_format_not_string_constant) 8014 << Arg->getSourceRange()); 8015 } 8016 8017 ExprResult Result(Literal); 8018 QualType ResultTy = Context.getPointerType(Context.CharTy.withConst()); 8019 InitializedEntity Entity = 8020 InitializedEntity::InitializeParameter(Context, ResultTy, false); 8021 Result = PerformCopyInitialization(Entity, SourceLocation(), Result); 8022 return Result; 8023 } 8024 8025 /// Check that the user is calling the appropriate va_start builtin for the 8026 /// target and calling convention. 8027 static bool checkVAStartABI(Sema &S, unsigned BuiltinID, Expr *Fn) { 8028 const llvm::Triple &TT = S.Context.getTargetInfo().getTriple(); 8029 bool IsX64 = TT.getArch() == llvm::Triple::x86_64; 8030 bool IsAArch64 = (TT.getArch() == llvm::Triple::aarch64 || 8031 TT.getArch() == llvm::Triple::aarch64_32); 8032 bool IsWindows = TT.isOSWindows(); 8033 bool IsMSVAStart = BuiltinID == Builtin::BI__builtin_ms_va_start; 8034 if (IsX64 || IsAArch64) { 8035 CallingConv CC = CC_C; 8036 if (const FunctionDecl *FD = S.getCurFunctionDecl()) 8037 CC = FD->getType()->castAs<FunctionType>()->getCallConv(); 8038 if (IsMSVAStart) { 8039 // Don't allow this in System V ABI functions. 8040 if (CC == CC_X86_64SysV || (!IsWindows && CC != CC_Win64)) 8041 return S.Diag(Fn->getBeginLoc(), 8042 diag::err_ms_va_start_used_in_sysv_function); 8043 } else { 8044 // On x86-64/AArch64 Unix, don't allow this in Win64 ABI functions. 8045 // On x64 Windows, don't allow this in System V ABI functions. 8046 // (Yes, that means there's no corresponding way to support variadic 8047 // System V ABI functions on Windows.) 8048 if ((IsWindows && CC == CC_X86_64SysV) || 8049 (!IsWindows && CC == CC_Win64)) 8050 return S.Diag(Fn->getBeginLoc(), 8051 diag::err_va_start_used_in_wrong_abi_function) 8052 << !IsWindows; 8053 } 8054 return false; 8055 } 8056 8057 if (IsMSVAStart) 8058 return S.Diag(Fn->getBeginLoc(), diag::err_builtin_x64_aarch64_only); 8059 return false; 8060 } 8061 8062 static bool checkVAStartIsInVariadicFunction(Sema &S, Expr *Fn, 8063 ParmVarDecl **LastParam = nullptr) { 8064 // Determine whether the current function, block, or obj-c method is variadic 8065 // and get its parameter list. 8066 bool IsVariadic = false; 8067 ArrayRef<ParmVarDecl *> Params; 8068 DeclContext *Caller = S.CurContext; 8069 if (auto *Block = dyn_cast<BlockDecl>(Caller)) { 8070 IsVariadic = Block->isVariadic(); 8071 Params = Block->parameters(); 8072 } else if (auto *FD = dyn_cast<FunctionDecl>(Caller)) { 8073 IsVariadic = FD->isVariadic(); 8074 Params = FD->parameters(); 8075 } else if (auto *MD = dyn_cast<ObjCMethodDecl>(Caller)) { 8076 IsVariadic = MD->isVariadic(); 8077 // FIXME: This isn't correct for methods (results in bogus warning). 8078 Params = MD->parameters(); 8079 } else if (isa<CapturedDecl>(Caller)) { 8080 // We don't support va_start in a CapturedDecl. 8081 S.Diag(Fn->getBeginLoc(), diag::err_va_start_captured_stmt); 8082 return true; 8083 } else { 8084 // This must be some other declcontext that parses exprs. 8085 S.Diag(Fn->getBeginLoc(), diag::err_va_start_outside_function); 8086 return true; 8087 } 8088 8089 if (!IsVariadic) { 8090 S.Diag(Fn->getBeginLoc(), diag::err_va_start_fixed_function); 8091 return true; 8092 } 8093 8094 if (LastParam) 8095 *LastParam = Params.empty() ? nullptr : Params.back(); 8096 8097 return false; 8098 } 8099 8100 /// Check the arguments to '__builtin_va_start' or '__builtin_ms_va_start' 8101 /// for validity. Emit an error and return true on failure; return false 8102 /// on success. 8103 bool Sema::SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) { 8104 Expr *Fn = TheCall->getCallee(); 8105 8106 if (checkVAStartABI(*this, BuiltinID, Fn)) 8107 return true; 8108 8109 // In C2x mode, va_start only needs one argument. However, the builtin still 8110 // requires two arguments (which matches the behavior of the GCC builtin), 8111 // <stdarg.h> passes `0` as the second argument in C2x mode. 8112 if (checkArgCount(*this, TheCall, 2)) 8113 return true; 8114 8115 // Type-check the first argument normally. 8116 if (checkBuiltinArgument(*this, TheCall, 0)) 8117 return true; 8118 8119 // Check that the current function is variadic, and get its last parameter. 8120 ParmVarDecl *LastParam; 8121 if (checkVAStartIsInVariadicFunction(*this, Fn, &LastParam)) 8122 return true; 8123 8124 // Verify that the second argument to the builtin is the last argument of the 8125 // current function or method. In C2x mode, if the second argument is an 8126 // integer constant expression with value 0, then we don't bother with this 8127 // check. 8128 bool SecondArgIsLastNamedArgument = false; 8129 const Expr *Arg = TheCall->getArg(1)->IgnoreParenCasts(); 8130 if (std::optional<llvm::APSInt> Val = 8131 TheCall->getArg(1)->getIntegerConstantExpr(Context); 8132 Val && LangOpts.C2x && *Val == 0) 8133 return false; 8134 8135 // These are valid if SecondArgIsLastNamedArgument is false after the next 8136 // block. 8137 QualType Type; 8138 SourceLocation ParamLoc; 8139 bool IsCRegister = false; 8140 8141 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Arg)) { 8142 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(DR->getDecl())) { 8143 SecondArgIsLastNamedArgument = PV == LastParam; 8144 8145 Type = PV->getType(); 8146 ParamLoc = PV->getLocation(); 8147 IsCRegister = 8148 PV->getStorageClass() == SC_Register && !getLangOpts().CPlusPlus; 8149 } 8150 } 8151 8152 if (!SecondArgIsLastNamedArgument) 8153 Diag(TheCall->getArg(1)->getBeginLoc(), 8154 diag::warn_second_arg_of_va_start_not_last_named_param); 8155 else if (IsCRegister || Type->isReferenceType() || 8156 Type->isSpecificBuiltinType(BuiltinType::Float) || [=] { 8157 // Promotable integers are UB, but enumerations need a bit of 8158 // extra checking to see what their promotable type actually is. 8159 if (!Context.isPromotableIntegerType(Type)) 8160 return false; 8161 if (!Type->isEnumeralType()) 8162 return true; 8163 const EnumDecl *ED = Type->castAs<EnumType>()->getDecl(); 8164 return !(ED && 8165 Context.typesAreCompatible(ED->getPromotionType(), Type)); 8166 }()) { 8167 unsigned Reason = 0; 8168 if (Type->isReferenceType()) Reason = 1; 8169 else if (IsCRegister) Reason = 2; 8170 Diag(Arg->getBeginLoc(), diag::warn_va_start_type_is_undefined) << Reason; 8171 Diag(ParamLoc, diag::note_parameter_type) << Type; 8172 } 8173 8174 return false; 8175 } 8176 8177 bool Sema::SemaBuiltinVAStartARMMicrosoft(CallExpr *Call) { 8178 auto IsSuitablyTypedFormatArgument = [this](const Expr *Arg) -> bool { 8179 const LangOptions &LO = getLangOpts(); 8180 8181 if (LO.CPlusPlus) 8182 return Arg->getType() 8183 .getCanonicalType() 8184 .getTypePtr() 8185 ->getPointeeType() 8186 .withoutLocalFastQualifiers() == Context.CharTy; 8187 8188 // In C, allow aliasing through `char *`, this is required for AArch64 at 8189 // least. 8190 return true; 8191 }; 8192 8193 // void __va_start(va_list *ap, const char *named_addr, size_t slot_size, 8194 // const char *named_addr); 8195 8196 Expr *Func = Call->getCallee(); 8197 8198 if (Call->getNumArgs() < 3) 8199 return Diag(Call->getEndLoc(), 8200 diag::err_typecheck_call_too_few_args_at_least) 8201 << 0 /*function call*/ << 3 << Call->getNumArgs(); 8202 8203 // Type-check the first argument normally. 8204 if (checkBuiltinArgument(*this, Call, 0)) 8205 return true; 8206 8207 // Check that the current function is variadic. 8208 if (checkVAStartIsInVariadicFunction(*this, Func)) 8209 return true; 8210 8211 // __va_start on Windows does not validate the parameter qualifiers 8212 8213 const Expr *Arg1 = Call->getArg(1)->IgnoreParens(); 8214 const Type *Arg1Ty = Arg1->getType().getCanonicalType().getTypePtr(); 8215 8216 const Expr *Arg2 = Call->getArg(2)->IgnoreParens(); 8217 const Type *Arg2Ty = Arg2->getType().getCanonicalType().getTypePtr(); 8218 8219 const QualType &ConstCharPtrTy = 8220 Context.getPointerType(Context.CharTy.withConst()); 8221 if (!Arg1Ty->isPointerType() || !IsSuitablyTypedFormatArgument(Arg1)) 8222 Diag(Arg1->getBeginLoc(), diag::err_typecheck_convert_incompatible) 8223 << Arg1->getType() << ConstCharPtrTy << 1 /* different class */ 8224 << 0 /* qualifier difference */ 8225 << 3 /* parameter mismatch */ 8226 << 2 << Arg1->getType() << ConstCharPtrTy; 8227 8228 const QualType SizeTy = Context.getSizeType(); 8229 if (Arg2Ty->getCanonicalTypeInternal().withoutLocalFastQualifiers() != SizeTy) 8230 Diag(Arg2->getBeginLoc(), diag::err_typecheck_convert_incompatible) 8231 << Arg2->getType() << SizeTy << 1 /* different class */ 8232 << 0 /* qualifier difference */ 8233 << 3 /* parameter mismatch */ 8234 << 3 << Arg2->getType() << SizeTy; 8235 8236 return false; 8237 } 8238 8239 /// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isgreater and 8240 /// friends. This is declared to take (...), so we have to check everything. 8241 bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) { 8242 if (checkArgCount(*this, TheCall, 2)) 8243 return true; 8244 8245 ExprResult OrigArg0 = TheCall->getArg(0); 8246 ExprResult OrigArg1 = TheCall->getArg(1); 8247 8248 // Do standard promotions between the two arguments, returning their common 8249 // type. 8250 QualType Res = UsualArithmeticConversions( 8251 OrigArg0, OrigArg1, TheCall->getExprLoc(), ACK_Comparison); 8252 if (OrigArg0.isInvalid() || OrigArg1.isInvalid()) 8253 return true; 8254 8255 // Make sure any conversions are pushed back into the call; this is 8256 // type safe since unordered compare builtins are declared as "_Bool 8257 // foo(...)". 8258 TheCall->setArg(0, OrigArg0.get()); 8259 TheCall->setArg(1, OrigArg1.get()); 8260 8261 if (OrigArg0.get()->isTypeDependent() || OrigArg1.get()->isTypeDependent()) 8262 return false; 8263 8264 // If the common type isn't a real floating type, then the arguments were 8265 // invalid for this operation. 8266 if (Res.isNull() || !Res->isRealFloatingType()) 8267 return Diag(OrigArg0.get()->getBeginLoc(), 8268 diag::err_typecheck_call_invalid_ordered_compare) 8269 << OrigArg0.get()->getType() << OrigArg1.get()->getType() 8270 << SourceRange(OrigArg0.get()->getBeginLoc(), 8271 OrigArg1.get()->getEndLoc()); 8272 8273 return false; 8274 } 8275 8276 /// SemaBuiltinSemaBuiltinFPClassification - Handle functions like 8277 /// __builtin_isnan and friends. This is declared to take (...), so we have 8278 /// to check everything. We expect the last argument to be a floating point 8279 /// value. 8280 bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) { 8281 if (checkArgCount(*this, TheCall, NumArgs)) 8282 return true; 8283 8284 // Find out position of floating-point argument. 8285 unsigned FPArgNo = (NumArgs == 2) ? 0 : NumArgs - 1; 8286 8287 // We can count on all parameters preceding the floating-point just being int. 8288 // Try all of those. 8289 for (unsigned i = 0; i < FPArgNo; ++i) { 8290 Expr *Arg = TheCall->getArg(i); 8291 8292 if (Arg->isTypeDependent()) 8293 return false; 8294 8295 ExprResult Res = PerformImplicitConversion(Arg, Context.IntTy, AA_Passing); 8296 8297 if (Res.isInvalid()) 8298 return true; 8299 TheCall->setArg(i, Res.get()); 8300 } 8301 8302 Expr *OrigArg = TheCall->getArg(FPArgNo); 8303 8304 if (OrigArg->isTypeDependent()) 8305 return false; 8306 8307 // Usual Unary Conversions will convert half to float, which we want for 8308 // machines that use fp16 conversion intrinsics. Else, we wnat to leave the 8309 // type how it is, but do normal L->Rvalue conversions. 8310 if (Context.getTargetInfo().useFP16ConversionIntrinsics()) 8311 OrigArg = UsualUnaryConversions(OrigArg).get(); 8312 else 8313 OrigArg = DefaultFunctionArrayLvalueConversion(OrigArg).get(); 8314 TheCall->setArg(FPArgNo, OrigArg); 8315 8316 // This operation requires a non-_Complex floating-point number. 8317 if (!OrigArg->getType()->isRealFloatingType()) 8318 return Diag(OrigArg->getBeginLoc(), 8319 diag::err_typecheck_call_invalid_unary_fp) 8320 << OrigArg->getType() << OrigArg->getSourceRange(); 8321 8322 // __builtin_isfpclass has integer parameter that specify test mask. It is 8323 // passed in (...), so it should be analyzed completely here. 8324 if (NumArgs == 2) 8325 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, llvm::fcAllFlags)) 8326 return true; 8327 8328 return false; 8329 } 8330 8331 /// Perform semantic analysis for a call to __builtin_complex. 8332 bool Sema::SemaBuiltinComplex(CallExpr *TheCall) { 8333 if (checkArgCount(*this, TheCall, 2)) 8334 return true; 8335 8336 bool Dependent = false; 8337 for (unsigned I = 0; I != 2; ++I) { 8338 Expr *Arg = TheCall->getArg(I); 8339 QualType T = Arg->getType(); 8340 if (T->isDependentType()) { 8341 Dependent = true; 8342 continue; 8343 } 8344 8345 // Despite supporting _Complex int, GCC requires a real floating point type 8346 // for the operands of __builtin_complex. 8347 if (!T->isRealFloatingType()) { 8348 return Diag(Arg->getBeginLoc(), diag::err_typecheck_call_requires_real_fp) 8349 << Arg->getType() << Arg->getSourceRange(); 8350 } 8351 8352 ExprResult Converted = DefaultLvalueConversion(Arg); 8353 if (Converted.isInvalid()) 8354 return true; 8355 TheCall->setArg(I, Converted.get()); 8356 } 8357 8358 if (Dependent) { 8359 TheCall->setType(Context.DependentTy); 8360 return false; 8361 } 8362 8363 Expr *Real = TheCall->getArg(0); 8364 Expr *Imag = TheCall->getArg(1); 8365 if (!Context.hasSameType(Real->getType(), Imag->getType())) { 8366 return Diag(Real->getBeginLoc(), 8367 diag::err_typecheck_call_different_arg_types) 8368 << Real->getType() << Imag->getType() 8369 << Real->getSourceRange() << Imag->getSourceRange(); 8370 } 8371 8372 // We don't allow _Complex _Float16 nor _Complex __fp16 as type specifiers; 8373 // don't allow this builtin to form those types either. 8374 // FIXME: Should we allow these types? 8375 if (Real->getType()->isFloat16Type()) 8376 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec) 8377 << "_Float16"; 8378 if (Real->getType()->isHalfType()) 8379 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec) 8380 << "half"; 8381 8382 TheCall->setType(Context.getComplexType(Real->getType())); 8383 return false; 8384 } 8385 8386 // Customized Sema Checking for VSX builtins that have the following signature: 8387 // vector [...] builtinName(vector [...], vector [...], const int); 8388 // Which takes the same type of vectors (any legal vector type) for the first 8389 // two arguments and takes compile time constant for the third argument. 8390 // Example builtins are : 8391 // vector double vec_xxpermdi(vector double, vector double, int); 8392 // vector short vec_xxsldwi(vector short, vector short, int); 8393 bool Sema::SemaBuiltinVSX(CallExpr *TheCall) { 8394 unsigned ExpectedNumArgs = 3; 8395 if (checkArgCount(*this, TheCall, ExpectedNumArgs)) 8396 return true; 8397 8398 // Check the third argument is a compile time constant 8399 if (!TheCall->getArg(2)->isIntegerConstantExpr(Context)) 8400 return Diag(TheCall->getBeginLoc(), 8401 diag::err_vsx_builtin_nonconstant_argument) 8402 << 3 /* argument index */ << TheCall->getDirectCallee() 8403 << SourceRange(TheCall->getArg(2)->getBeginLoc(), 8404 TheCall->getArg(2)->getEndLoc()); 8405 8406 QualType Arg1Ty = TheCall->getArg(0)->getType(); 8407 QualType Arg2Ty = TheCall->getArg(1)->getType(); 8408 8409 // Check the type of argument 1 and argument 2 are vectors. 8410 SourceLocation BuiltinLoc = TheCall->getBeginLoc(); 8411 if ((!Arg1Ty->isVectorType() && !Arg1Ty->isDependentType()) || 8412 (!Arg2Ty->isVectorType() && !Arg2Ty->isDependentType())) { 8413 return Diag(BuiltinLoc, diag::err_vec_builtin_non_vector) 8414 << TheCall->getDirectCallee() 8415 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 8416 TheCall->getArg(1)->getEndLoc()); 8417 } 8418 8419 // Check the first two arguments are the same type. 8420 if (!Context.hasSameUnqualifiedType(Arg1Ty, Arg2Ty)) { 8421 return Diag(BuiltinLoc, diag::err_vec_builtin_incompatible_vector) 8422 << TheCall->getDirectCallee() 8423 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 8424 TheCall->getArg(1)->getEndLoc()); 8425 } 8426 8427 // When default clang type checking is turned off and the customized type 8428 // checking is used, the returning type of the function must be explicitly 8429 // set. Otherwise it is _Bool by default. 8430 TheCall->setType(Arg1Ty); 8431 8432 return false; 8433 } 8434 8435 /// SemaBuiltinShuffleVector - Handle __builtin_shufflevector. 8436 // This is declared to take (...), so we have to check everything. 8437 ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) { 8438 if (TheCall->getNumArgs() < 2) 8439 return ExprError(Diag(TheCall->getEndLoc(), 8440 diag::err_typecheck_call_too_few_args_at_least) 8441 << 0 /*function call*/ << 2 << TheCall->getNumArgs() 8442 << TheCall->getSourceRange()); 8443 8444 // Determine which of the following types of shufflevector we're checking: 8445 // 1) unary, vector mask: (lhs, mask) 8446 // 2) binary, scalar mask: (lhs, rhs, index, ..., index) 8447 QualType resType = TheCall->getArg(0)->getType(); 8448 unsigned numElements = 0; 8449 8450 if (!TheCall->getArg(0)->isTypeDependent() && 8451 !TheCall->getArg(1)->isTypeDependent()) { 8452 QualType LHSType = TheCall->getArg(0)->getType(); 8453 QualType RHSType = TheCall->getArg(1)->getType(); 8454 8455 if (!LHSType->isVectorType() || !RHSType->isVectorType()) 8456 return ExprError( 8457 Diag(TheCall->getBeginLoc(), diag::err_vec_builtin_non_vector) 8458 << TheCall->getDirectCallee() 8459 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 8460 TheCall->getArg(1)->getEndLoc())); 8461 8462 numElements = LHSType->castAs<VectorType>()->getNumElements(); 8463 unsigned numResElements = TheCall->getNumArgs() - 2; 8464 8465 // Check to see if we have a call with 2 vector arguments, the unary shuffle 8466 // with mask. If so, verify that RHS is an integer vector type with the 8467 // same number of elts as lhs. 8468 if (TheCall->getNumArgs() == 2) { 8469 if (!RHSType->hasIntegerRepresentation() || 8470 RHSType->castAs<VectorType>()->getNumElements() != numElements) 8471 return ExprError(Diag(TheCall->getBeginLoc(), 8472 diag::err_vec_builtin_incompatible_vector) 8473 << TheCall->getDirectCallee() 8474 << SourceRange(TheCall->getArg(1)->getBeginLoc(), 8475 TheCall->getArg(1)->getEndLoc())); 8476 } else if (!Context.hasSameUnqualifiedType(LHSType, RHSType)) { 8477 return ExprError(Diag(TheCall->getBeginLoc(), 8478 diag::err_vec_builtin_incompatible_vector) 8479 << TheCall->getDirectCallee() 8480 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 8481 TheCall->getArg(1)->getEndLoc())); 8482 } else if (numElements != numResElements) { 8483 QualType eltType = LHSType->castAs<VectorType>()->getElementType(); 8484 resType = Context.getVectorType(eltType, numResElements, 8485 VectorType::GenericVector); 8486 } 8487 } 8488 8489 for (unsigned i = 2; i < TheCall->getNumArgs(); i++) { 8490 if (TheCall->getArg(i)->isTypeDependent() || 8491 TheCall->getArg(i)->isValueDependent()) 8492 continue; 8493 8494 std::optional<llvm::APSInt> Result; 8495 if (!(Result = TheCall->getArg(i)->getIntegerConstantExpr(Context))) 8496 return ExprError(Diag(TheCall->getBeginLoc(), 8497 diag::err_shufflevector_nonconstant_argument) 8498 << TheCall->getArg(i)->getSourceRange()); 8499 8500 // Allow -1 which will be translated to undef in the IR. 8501 if (Result->isSigned() && Result->isAllOnes()) 8502 continue; 8503 8504 if (Result->getActiveBits() > 64 || 8505 Result->getZExtValue() >= numElements * 2) 8506 return ExprError(Diag(TheCall->getBeginLoc(), 8507 diag::err_shufflevector_argument_too_large) 8508 << TheCall->getArg(i)->getSourceRange()); 8509 } 8510 8511 SmallVector<Expr*, 32> exprs; 8512 8513 for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; i++) { 8514 exprs.push_back(TheCall->getArg(i)); 8515 TheCall->setArg(i, nullptr); 8516 } 8517 8518 return new (Context) ShuffleVectorExpr(Context, exprs, resType, 8519 TheCall->getCallee()->getBeginLoc(), 8520 TheCall->getRParenLoc()); 8521 } 8522 8523 /// SemaConvertVectorExpr - Handle __builtin_convertvector 8524 ExprResult Sema::SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, 8525 SourceLocation BuiltinLoc, 8526 SourceLocation RParenLoc) { 8527 ExprValueKind VK = VK_PRValue; 8528 ExprObjectKind OK = OK_Ordinary; 8529 QualType DstTy = TInfo->getType(); 8530 QualType SrcTy = E->getType(); 8531 8532 if (!SrcTy->isVectorType() && !SrcTy->isDependentType()) 8533 return ExprError(Diag(BuiltinLoc, 8534 diag::err_convertvector_non_vector) 8535 << E->getSourceRange()); 8536 if (!DstTy->isVectorType() && !DstTy->isDependentType()) 8537 return ExprError(Diag(BuiltinLoc, 8538 diag::err_convertvector_non_vector_type)); 8539 8540 if (!SrcTy->isDependentType() && !DstTy->isDependentType()) { 8541 unsigned SrcElts = SrcTy->castAs<VectorType>()->getNumElements(); 8542 unsigned DstElts = DstTy->castAs<VectorType>()->getNumElements(); 8543 if (SrcElts != DstElts) 8544 return ExprError(Diag(BuiltinLoc, 8545 diag::err_convertvector_incompatible_vector) 8546 << E->getSourceRange()); 8547 } 8548 8549 return new (Context) 8550 ConvertVectorExpr(E, TInfo, DstTy, VK, OK, BuiltinLoc, RParenLoc); 8551 } 8552 8553 /// SemaBuiltinPrefetch - Handle __builtin_prefetch. 8554 // This is declared to take (const void*, ...) and can take two 8555 // optional constant int args. 8556 bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) { 8557 unsigned NumArgs = TheCall->getNumArgs(); 8558 8559 if (NumArgs > 3) 8560 return Diag(TheCall->getEndLoc(), 8561 diag::err_typecheck_call_too_many_args_at_most) 8562 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 8563 8564 // Argument 0 is checked for us and the remaining arguments must be 8565 // constant integers. 8566 for (unsigned i = 1; i != NumArgs; ++i) 8567 if (SemaBuiltinConstantArgRange(TheCall, i, 0, i == 1 ? 1 : 3)) 8568 return true; 8569 8570 return false; 8571 } 8572 8573 /// SemaBuiltinArithmeticFence - Handle __arithmetic_fence. 8574 bool Sema::SemaBuiltinArithmeticFence(CallExpr *TheCall) { 8575 if (!Context.getTargetInfo().checkArithmeticFenceSupported()) 8576 return Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 8577 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 8578 if (checkArgCount(*this, TheCall, 1)) 8579 return true; 8580 Expr *Arg = TheCall->getArg(0); 8581 if (Arg->isInstantiationDependent()) 8582 return false; 8583 8584 QualType ArgTy = Arg->getType(); 8585 if (!ArgTy->hasFloatingRepresentation()) 8586 return Diag(TheCall->getEndLoc(), diag::err_typecheck_expect_flt_or_vector) 8587 << ArgTy; 8588 if (Arg->isLValue()) { 8589 ExprResult FirstArg = DefaultLvalueConversion(Arg); 8590 TheCall->setArg(0, FirstArg.get()); 8591 } 8592 TheCall->setType(TheCall->getArg(0)->getType()); 8593 return false; 8594 } 8595 8596 /// SemaBuiltinAssume - Handle __assume (MS Extension). 8597 // __assume does not evaluate its arguments, and should warn if its argument 8598 // has side effects. 8599 bool Sema::SemaBuiltinAssume(CallExpr *TheCall) { 8600 Expr *Arg = TheCall->getArg(0); 8601 if (Arg->isInstantiationDependent()) return false; 8602 8603 if (Arg->HasSideEffects(Context)) 8604 Diag(Arg->getBeginLoc(), diag::warn_assume_side_effects) 8605 << Arg->getSourceRange() 8606 << cast<FunctionDecl>(TheCall->getCalleeDecl())->getIdentifier(); 8607 8608 return false; 8609 } 8610 8611 /// Handle __builtin_alloca_with_align. This is declared 8612 /// as (size_t, size_t) where the second size_t must be a power of 2 greater 8613 /// than 8. 8614 bool Sema::SemaBuiltinAllocaWithAlign(CallExpr *TheCall) { 8615 // The alignment must be a constant integer. 8616 Expr *Arg = TheCall->getArg(1); 8617 8618 // We can't check the value of a dependent argument. 8619 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 8620 if (const auto *UE = 8621 dyn_cast<UnaryExprOrTypeTraitExpr>(Arg->IgnoreParenImpCasts())) 8622 if (UE->getKind() == UETT_AlignOf || 8623 UE->getKind() == UETT_PreferredAlignOf) 8624 Diag(TheCall->getBeginLoc(), diag::warn_alloca_align_alignof) 8625 << Arg->getSourceRange(); 8626 8627 llvm::APSInt Result = Arg->EvaluateKnownConstInt(Context); 8628 8629 if (!Result.isPowerOf2()) 8630 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 8631 << Arg->getSourceRange(); 8632 8633 if (Result < Context.getCharWidth()) 8634 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_small) 8635 << (unsigned)Context.getCharWidth() << Arg->getSourceRange(); 8636 8637 if (Result > std::numeric_limits<int32_t>::max()) 8638 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_big) 8639 << std::numeric_limits<int32_t>::max() << Arg->getSourceRange(); 8640 } 8641 8642 return false; 8643 } 8644 8645 /// Handle __builtin_assume_aligned. This is declared 8646 /// as (const void*, size_t, ...) and can take one optional constant int arg. 8647 bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) { 8648 if (checkArgCountRange(*this, TheCall, 2, 3)) 8649 return true; 8650 8651 unsigned NumArgs = TheCall->getNumArgs(); 8652 Expr *FirstArg = TheCall->getArg(0); 8653 8654 { 8655 ExprResult FirstArgResult = 8656 DefaultFunctionArrayLvalueConversion(FirstArg); 8657 if (checkBuiltinArgument(*this, TheCall, 0)) 8658 return true; 8659 /// In-place updation of FirstArg by checkBuiltinArgument is ignored. 8660 TheCall->setArg(0, FirstArgResult.get()); 8661 } 8662 8663 // The alignment must be a constant integer. 8664 Expr *SecondArg = TheCall->getArg(1); 8665 8666 // We can't check the value of a dependent argument. 8667 if (!SecondArg->isValueDependent()) { 8668 llvm::APSInt Result; 8669 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 8670 return true; 8671 8672 if (!Result.isPowerOf2()) 8673 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 8674 << SecondArg->getSourceRange(); 8675 8676 if (Result > Sema::MaximumAlignment) 8677 Diag(TheCall->getBeginLoc(), diag::warn_assume_aligned_too_great) 8678 << SecondArg->getSourceRange() << Sema::MaximumAlignment; 8679 } 8680 8681 if (NumArgs > 2) { 8682 Expr *ThirdArg = TheCall->getArg(2); 8683 if (convertArgumentToType(*this, ThirdArg, Context.getSizeType())) 8684 return true; 8685 TheCall->setArg(2, ThirdArg); 8686 } 8687 8688 return false; 8689 } 8690 8691 bool Sema::SemaBuiltinOSLogFormat(CallExpr *TheCall) { 8692 unsigned BuiltinID = 8693 cast<FunctionDecl>(TheCall->getCalleeDecl())->getBuiltinID(); 8694 bool IsSizeCall = BuiltinID == Builtin::BI__builtin_os_log_format_buffer_size; 8695 8696 unsigned NumArgs = TheCall->getNumArgs(); 8697 unsigned NumRequiredArgs = IsSizeCall ? 1 : 2; 8698 if (NumArgs < NumRequiredArgs) { 8699 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args) 8700 << 0 /* function call */ << NumRequiredArgs << NumArgs 8701 << TheCall->getSourceRange(); 8702 } 8703 if (NumArgs >= NumRequiredArgs + 0x100) { 8704 return Diag(TheCall->getEndLoc(), 8705 diag::err_typecheck_call_too_many_args_at_most) 8706 << 0 /* function call */ << (NumRequiredArgs + 0xff) << NumArgs 8707 << TheCall->getSourceRange(); 8708 } 8709 unsigned i = 0; 8710 8711 // For formatting call, check buffer arg. 8712 if (!IsSizeCall) { 8713 ExprResult Arg(TheCall->getArg(i)); 8714 InitializedEntity Entity = InitializedEntity::InitializeParameter( 8715 Context, Context.VoidPtrTy, false); 8716 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 8717 if (Arg.isInvalid()) 8718 return true; 8719 TheCall->setArg(i, Arg.get()); 8720 i++; 8721 } 8722 8723 // Check string literal arg. 8724 unsigned FormatIdx = i; 8725 { 8726 ExprResult Arg = CheckOSLogFormatStringArg(TheCall->getArg(i)); 8727 if (Arg.isInvalid()) 8728 return true; 8729 TheCall->setArg(i, Arg.get()); 8730 i++; 8731 } 8732 8733 // Make sure variadic args are scalar. 8734 unsigned FirstDataArg = i; 8735 while (i < NumArgs) { 8736 ExprResult Arg = DefaultVariadicArgumentPromotion( 8737 TheCall->getArg(i), VariadicFunction, nullptr); 8738 if (Arg.isInvalid()) 8739 return true; 8740 CharUnits ArgSize = Context.getTypeSizeInChars(Arg.get()->getType()); 8741 if (ArgSize.getQuantity() >= 0x100) { 8742 return Diag(Arg.get()->getEndLoc(), diag::err_os_log_argument_too_big) 8743 << i << (int)ArgSize.getQuantity() << 0xff 8744 << TheCall->getSourceRange(); 8745 } 8746 TheCall->setArg(i, Arg.get()); 8747 i++; 8748 } 8749 8750 // Check formatting specifiers. NOTE: We're only doing this for the non-size 8751 // call to avoid duplicate diagnostics. 8752 if (!IsSizeCall) { 8753 llvm::SmallBitVector CheckedVarArgs(NumArgs, false); 8754 ArrayRef<const Expr *> Args(TheCall->getArgs(), TheCall->getNumArgs()); 8755 bool Success = CheckFormatArguments( 8756 Args, FAPK_Variadic, FormatIdx, FirstDataArg, FST_OSLog, 8757 VariadicFunction, TheCall->getBeginLoc(), SourceRange(), 8758 CheckedVarArgs); 8759 if (!Success) 8760 return true; 8761 } 8762 8763 if (IsSizeCall) { 8764 TheCall->setType(Context.getSizeType()); 8765 } else { 8766 TheCall->setType(Context.VoidPtrTy); 8767 } 8768 return false; 8769 } 8770 8771 /// SemaBuiltinConstantArg - Handle a check if argument ArgNum of CallExpr 8772 /// TheCall is a constant expression. 8773 bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, 8774 llvm::APSInt &Result) { 8775 Expr *Arg = TheCall->getArg(ArgNum); 8776 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 8777 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 8778 8779 if (Arg->isTypeDependent() || Arg->isValueDependent()) return false; 8780 8781 std::optional<llvm::APSInt> R; 8782 if (!(R = Arg->getIntegerConstantExpr(Context))) 8783 return Diag(TheCall->getBeginLoc(), diag::err_constant_integer_arg_type) 8784 << FDecl->getDeclName() << Arg->getSourceRange(); 8785 Result = *R; 8786 return false; 8787 } 8788 8789 /// SemaBuiltinConstantArgRange - Handle a check if argument ArgNum of CallExpr 8790 /// TheCall is a constant expression in the range [Low, High]. 8791 bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, 8792 int Low, int High, bool RangeIsError) { 8793 if (isConstantEvaluated()) 8794 return false; 8795 llvm::APSInt Result; 8796 8797 // We can't check the value of a dependent argument. 8798 Expr *Arg = TheCall->getArg(ArgNum); 8799 if (Arg->isTypeDependent() || Arg->isValueDependent()) 8800 return false; 8801 8802 // Check constant-ness first. 8803 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 8804 return true; 8805 8806 if (Result.getSExtValue() < Low || Result.getSExtValue() > High) { 8807 if (RangeIsError) 8808 return Diag(TheCall->getBeginLoc(), diag::err_argument_invalid_range) 8809 << toString(Result, 10) << Low << High << Arg->getSourceRange(); 8810 else 8811 // Defer the warning until we know if the code will be emitted so that 8812 // dead code can ignore this. 8813 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 8814 PDiag(diag::warn_argument_invalid_range) 8815 << toString(Result, 10) << Low << High 8816 << Arg->getSourceRange()); 8817 } 8818 8819 return false; 8820 } 8821 8822 /// SemaBuiltinConstantArgMultiple - Handle a check if argument ArgNum of CallExpr 8823 /// TheCall is a constant expression is a multiple of Num.. 8824 bool Sema::SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, 8825 unsigned Num) { 8826 llvm::APSInt Result; 8827 8828 // We can't check the value of a dependent argument. 8829 Expr *Arg = TheCall->getArg(ArgNum); 8830 if (Arg->isTypeDependent() || Arg->isValueDependent()) 8831 return false; 8832 8833 // Check constant-ness first. 8834 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 8835 return true; 8836 8837 if (Result.getSExtValue() % Num != 0) 8838 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_multiple) 8839 << Num << Arg->getSourceRange(); 8840 8841 return false; 8842 } 8843 8844 /// SemaBuiltinConstantArgPower2 - Check if argument ArgNum of TheCall is a 8845 /// constant expression representing a power of 2. 8846 bool Sema::SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum) { 8847 llvm::APSInt Result; 8848 8849 // We can't check the value of a dependent argument. 8850 Expr *Arg = TheCall->getArg(ArgNum); 8851 if (Arg->isTypeDependent() || Arg->isValueDependent()) 8852 return false; 8853 8854 // Check constant-ness first. 8855 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 8856 return true; 8857 8858 // Bit-twiddling to test for a power of 2: for x > 0, x & (x-1) is zero if 8859 // and only if x is a power of 2. 8860 if (Result.isStrictlyPositive() && (Result & (Result - 1)) == 0) 8861 return false; 8862 8863 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_power_of_2) 8864 << Arg->getSourceRange(); 8865 } 8866 8867 static bool IsShiftedByte(llvm::APSInt Value) { 8868 if (Value.isNegative()) 8869 return false; 8870 8871 // Check if it's a shifted byte, by shifting it down 8872 while (true) { 8873 // If the value fits in the bottom byte, the check passes. 8874 if (Value < 0x100) 8875 return true; 8876 8877 // Otherwise, if the value has _any_ bits in the bottom byte, the check 8878 // fails. 8879 if ((Value & 0xFF) != 0) 8880 return false; 8881 8882 // If the bottom 8 bits are all 0, but something above that is nonzero, 8883 // then shifting the value right by 8 bits won't affect whether it's a 8884 // shifted byte or not. So do that, and go round again. 8885 Value >>= 8; 8886 } 8887 } 8888 8889 /// SemaBuiltinConstantArgShiftedByte - Check if argument ArgNum of TheCall is 8890 /// a constant expression representing an arbitrary byte value shifted left by 8891 /// a multiple of 8 bits. 8892 bool Sema::SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum, 8893 unsigned ArgBits) { 8894 llvm::APSInt Result; 8895 8896 // We can't check the value of a dependent argument. 8897 Expr *Arg = TheCall->getArg(ArgNum); 8898 if (Arg->isTypeDependent() || Arg->isValueDependent()) 8899 return false; 8900 8901 // Check constant-ness first. 8902 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 8903 return true; 8904 8905 // Truncate to the given size. 8906 Result = Result.getLoBits(ArgBits); 8907 Result.setIsUnsigned(true); 8908 8909 if (IsShiftedByte(Result)) 8910 return false; 8911 8912 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_shifted_byte) 8913 << Arg->getSourceRange(); 8914 } 8915 8916 /// SemaBuiltinConstantArgShiftedByteOr0xFF - Check if argument ArgNum of 8917 /// TheCall is a constant expression representing either a shifted byte value, 8918 /// or a value of the form 0x??FF (i.e. a member of the arithmetic progression 8919 /// 0x00FF, 0x01FF, ..., 0xFFFF). This strange range check is needed for some 8920 /// Arm MVE intrinsics. 8921 bool Sema::SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, 8922 int ArgNum, 8923 unsigned ArgBits) { 8924 llvm::APSInt Result; 8925 8926 // We can't check the value of a dependent argument. 8927 Expr *Arg = TheCall->getArg(ArgNum); 8928 if (Arg->isTypeDependent() || Arg->isValueDependent()) 8929 return false; 8930 8931 // Check constant-ness first. 8932 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 8933 return true; 8934 8935 // Truncate to the given size. 8936 Result = Result.getLoBits(ArgBits); 8937 Result.setIsUnsigned(true); 8938 8939 // Check to see if it's in either of the required forms. 8940 if (IsShiftedByte(Result) || 8941 (Result > 0 && Result < 0x10000 && (Result & 0xFF) == 0xFF)) 8942 return false; 8943 8944 return Diag(TheCall->getBeginLoc(), 8945 diag::err_argument_not_shifted_byte_or_xxff) 8946 << Arg->getSourceRange(); 8947 } 8948 8949 /// SemaBuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions 8950 bool Sema::SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall) { 8951 if (BuiltinID == AArch64::BI__builtin_arm_irg) { 8952 if (checkArgCount(*this, TheCall, 2)) 8953 return true; 8954 Expr *Arg0 = TheCall->getArg(0); 8955 Expr *Arg1 = TheCall->getArg(1); 8956 8957 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 8958 if (FirstArg.isInvalid()) 8959 return true; 8960 QualType FirstArgType = FirstArg.get()->getType(); 8961 if (!FirstArgType->isAnyPointerType()) 8962 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 8963 << "first" << FirstArgType << Arg0->getSourceRange(); 8964 TheCall->setArg(0, FirstArg.get()); 8965 8966 ExprResult SecArg = DefaultLvalueConversion(Arg1); 8967 if (SecArg.isInvalid()) 8968 return true; 8969 QualType SecArgType = SecArg.get()->getType(); 8970 if (!SecArgType->isIntegerType()) 8971 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 8972 << "second" << SecArgType << Arg1->getSourceRange(); 8973 8974 // Derive the return type from the pointer argument. 8975 TheCall->setType(FirstArgType); 8976 return false; 8977 } 8978 8979 if (BuiltinID == AArch64::BI__builtin_arm_addg) { 8980 if (checkArgCount(*this, TheCall, 2)) 8981 return true; 8982 8983 Expr *Arg0 = TheCall->getArg(0); 8984 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 8985 if (FirstArg.isInvalid()) 8986 return true; 8987 QualType FirstArgType = FirstArg.get()->getType(); 8988 if (!FirstArgType->isAnyPointerType()) 8989 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 8990 << "first" << FirstArgType << Arg0->getSourceRange(); 8991 TheCall->setArg(0, FirstArg.get()); 8992 8993 // Derive the return type from the pointer argument. 8994 TheCall->setType(FirstArgType); 8995 8996 // Second arg must be an constant in range [0,15] 8997 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 8998 } 8999 9000 if (BuiltinID == AArch64::BI__builtin_arm_gmi) { 9001 if (checkArgCount(*this, TheCall, 2)) 9002 return true; 9003 Expr *Arg0 = TheCall->getArg(0); 9004 Expr *Arg1 = TheCall->getArg(1); 9005 9006 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 9007 if (FirstArg.isInvalid()) 9008 return true; 9009 QualType FirstArgType = FirstArg.get()->getType(); 9010 if (!FirstArgType->isAnyPointerType()) 9011 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 9012 << "first" << FirstArgType << Arg0->getSourceRange(); 9013 9014 QualType SecArgType = Arg1->getType(); 9015 if (!SecArgType->isIntegerType()) 9016 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 9017 << "second" << SecArgType << Arg1->getSourceRange(); 9018 TheCall->setType(Context.IntTy); 9019 return false; 9020 } 9021 9022 if (BuiltinID == AArch64::BI__builtin_arm_ldg || 9023 BuiltinID == AArch64::BI__builtin_arm_stg) { 9024 if (checkArgCount(*this, TheCall, 1)) 9025 return true; 9026 Expr *Arg0 = TheCall->getArg(0); 9027 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 9028 if (FirstArg.isInvalid()) 9029 return true; 9030 9031 QualType FirstArgType = FirstArg.get()->getType(); 9032 if (!FirstArgType->isAnyPointerType()) 9033 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 9034 << "first" << FirstArgType << Arg0->getSourceRange(); 9035 TheCall->setArg(0, FirstArg.get()); 9036 9037 // Derive the return type from the pointer argument. 9038 if (BuiltinID == AArch64::BI__builtin_arm_ldg) 9039 TheCall->setType(FirstArgType); 9040 return false; 9041 } 9042 9043 if (BuiltinID == AArch64::BI__builtin_arm_subp) { 9044 Expr *ArgA = TheCall->getArg(0); 9045 Expr *ArgB = TheCall->getArg(1); 9046 9047 ExprResult ArgExprA = DefaultFunctionArrayLvalueConversion(ArgA); 9048 ExprResult ArgExprB = DefaultFunctionArrayLvalueConversion(ArgB); 9049 9050 if (ArgExprA.isInvalid() || ArgExprB.isInvalid()) 9051 return true; 9052 9053 QualType ArgTypeA = ArgExprA.get()->getType(); 9054 QualType ArgTypeB = ArgExprB.get()->getType(); 9055 9056 auto isNull = [&] (Expr *E) -> bool { 9057 return E->isNullPointerConstant( 9058 Context, Expr::NPC_ValueDependentIsNotNull); }; 9059 9060 // argument should be either a pointer or null 9061 if (!ArgTypeA->isAnyPointerType() && !isNull(ArgA)) 9062 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 9063 << "first" << ArgTypeA << ArgA->getSourceRange(); 9064 9065 if (!ArgTypeB->isAnyPointerType() && !isNull(ArgB)) 9066 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 9067 << "second" << ArgTypeB << ArgB->getSourceRange(); 9068 9069 // Ensure Pointee types are compatible 9070 if (ArgTypeA->isAnyPointerType() && !isNull(ArgA) && 9071 ArgTypeB->isAnyPointerType() && !isNull(ArgB)) { 9072 QualType pointeeA = ArgTypeA->getPointeeType(); 9073 QualType pointeeB = ArgTypeB->getPointeeType(); 9074 if (!Context.typesAreCompatible( 9075 Context.getCanonicalType(pointeeA).getUnqualifiedType(), 9076 Context.getCanonicalType(pointeeB).getUnqualifiedType())) { 9077 return Diag(TheCall->getBeginLoc(), diag::err_typecheck_sub_ptr_compatible) 9078 << ArgTypeA << ArgTypeB << ArgA->getSourceRange() 9079 << ArgB->getSourceRange(); 9080 } 9081 } 9082 9083 // at least one argument should be pointer type 9084 if (!ArgTypeA->isAnyPointerType() && !ArgTypeB->isAnyPointerType()) 9085 return Diag(TheCall->getBeginLoc(), diag::err_memtag_any2arg_pointer) 9086 << ArgTypeA << ArgTypeB << ArgA->getSourceRange(); 9087 9088 if (isNull(ArgA)) // adopt type of the other pointer 9089 ArgExprA = ImpCastExprToType(ArgExprA.get(), ArgTypeB, CK_NullToPointer); 9090 9091 if (isNull(ArgB)) 9092 ArgExprB = ImpCastExprToType(ArgExprB.get(), ArgTypeA, CK_NullToPointer); 9093 9094 TheCall->setArg(0, ArgExprA.get()); 9095 TheCall->setArg(1, ArgExprB.get()); 9096 TheCall->setType(Context.LongLongTy); 9097 return false; 9098 } 9099 assert(false && "Unhandled ARM MTE intrinsic"); 9100 return true; 9101 } 9102 9103 /// SemaBuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr 9104 /// TheCall is an ARM/AArch64 special register string literal. 9105 bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, 9106 int ArgNum, unsigned ExpectedFieldNum, 9107 bool AllowName) { 9108 bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 || 9109 BuiltinID == ARM::BI__builtin_arm_wsr64 || 9110 BuiltinID == ARM::BI__builtin_arm_rsr || 9111 BuiltinID == ARM::BI__builtin_arm_rsrp || 9112 BuiltinID == ARM::BI__builtin_arm_wsr || 9113 BuiltinID == ARM::BI__builtin_arm_wsrp; 9114 bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 || 9115 BuiltinID == AArch64::BI__builtin_arm_wsr64 || 9116 BuiltinID == AArch64::BI__builtin_arm_rsr128 || 9117 BuiltinID == AArch64::BI__builtin_arm_wsr128 || 9118 BuiltinID == AArch64::BI__builtin_arm_rsr || 9119 BuiltinID == AArch64::BI__builtin_arm_rsrp || 9120 BuiltinID == AArch64::BI__builtin_arm_wsr || 9121 BuiltinID == AArch64::BI__builtin_arm_wsrp; 9122 assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin."); 9123 9124 // We can't check the value of a dependent argument. 9125 Expr *Arg = TheCall->getArg(ArgNum); 9126 if (Arg->isTypeDependent() || Arg->isValueDependent()) 9127 return false; 9128 9129 // Check if the argument is a string literal. 9130 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 9131 return Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 9132 << Arg->getSourceRange(); 9133 9134 // Check the type of special register given. 9135 StringRef Reg = cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 9136 SmallVector<StringRef, 6> Fields; 9137 Reg.split(Fields, ":"); 9138 9139 if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1)) 9140 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 9141 << Arg->getSourceRange(); 9142 9143 // If the string is the name of a register then we cannot check that it is 9144 // valid here but if the string is of one the forms described in ACLE then we 9145 // can check that the supplied fields are integers and within the valid 9146 // ranges. 9147 if (Fields.size() > 1) { 9148 bool FiveFields = Fields.size() == 5; 9149 9150 bool ValidString = true; 9151 if (IsARMBuiltin) { 9152 ValidString &= Fields[0].starts_with_insensitive("cp") || 9153 Fields[0].starts_with_insensitive("p"); 9154 if (ValidString) 9155 Fields[0] = Fields[0].drop_front( 9156 Fields[0].starts_with_insensitive("cp") ? 2 : 1); 9157 9158 ValidString &= Fields[2].starts_with_insensitive("c"); 9159 if (ValidString) 9160 Fields[2] = Fields[2].drop_front(1); 9161 9162 if (FiveFields) { 9163 ValidString &= Fields[3].starts_with_insensitive("c"); 9164 if (ValidString) 9165 Fields[3] = Fields[3].drop_front(1); 9166 } 9167 } 9168 9169 SmallVector<int, 5> Ranges; 9170 if (FiveFields) 9171 Ranges.append({IsAArch64Builtin ? 1 : 15, 7, 15, 15, 7}); 9172 else 9173 Ranges.append({15, 7, 15}); 9174 9175 for (unsigned i=0; i<Fields.size(); ++i) { 9176 int IntField; 9177 ValidString &= !Fields[i].getAsInteger(10, IntField); 9178 ValidString &= (IntField >= 0 && IntField <= Ranges[i]); 9179 } 9180 9181 if (!ValidString) 9182 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 9183 << Arg->getSourceRange(); 9184 } else if (IsAArch64Builtin && Fields.size() == 1) { 9185 // This code validates writes to PSTATE registers. 9186 9187 // Not a write. 9188 if (TheCall->getNumArgs() != 2) 9189 return false; 9190 9191 // The 128-bit system register accesses do not touch PSTATE. 9192 if (BuiltinID == AArch64::BI__builtin_arm_rsr128 || 9193 BuiltinID == AArch64::BI__builtin_arm_wsr128) 9194 return false; 9195 9196 // These are the named PSTATE accesses using "MSR (immediate)" instructions, 9197 // along with the upper limit on the immediates allowed. 9198 auto MaxLimit = llvm::StringSwitch<std::optional<unsigned>>(Reg) 9199 .CaseLower("spsel", 15) 9200 .CaseLower("daifclr", 15) 9201 .CaseLower("daifset", 15) 9202 .CaseLower("pan", 15) 9203 .CaseLower("uao", 15) 9204 .CaseLower("dit", 15) 9205 .CaseLower("ssbs", 15) 9206 .CaseLower("tco", 15) 9207 .CaseLower("allint", 1) 9208 .CaseLower("pm", 1) 9209 .Default(std::nullopt); 9210 9211 // If this is not a named PSTATE, just continue without validating, as this 9212 // will be lowered to an "MSR (register)" instruction directly 9213 if (!MaxLimit) 9214 return false; 9215 9216 // Here we only allow constants in the range for that pstate, as required by 9217 // the ACLE. 9218 // 9219 // While clang also accepts the names of system registers in its ACLE 9220 // intrinsics, we prevent this with the PSTATE names used in MSR (immediate) 9221 // as the value written via a register is different to the value used as an 9222 // immediate to have the same effect. e.g., for the instruction `msr tco, 9223 // x0`, it is bit 25 of register x0 that is written into PSTATE.TCO, but 9224 // with `msr tco, #imm`, it is bit 0 of xN that is written into PSTATE.TCO. 9225 // 9226 // If a programmer wants to codegen the MSR (register) form of `msr tco, 9227 // xN`, they can still do so by specifying the register using five 9228 // colon-separated numbers in a string. 9229 return SemaBuiltinConstantArgRange(TheCall, 1, 0, *MaxLimit); 9230 } 9231 9232 return false; 9233 } 9234 9235 /// SemaBuiltinPPCMMACall - Check the call to a PPC MMA builtin for validity. 9236 /// Emit an error and return true on failure; return false on success. 9237 /// TypeStr is a string containing the type descriptor of the value returned by 9238 /// the builtin and the descriptors of the expected type of the arguments. 9239 bool Sema::SemaBuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID, 9240 const char *TypeStr) { 9241 9242 assert((TypeStr[0] != '\0') && 9243 "Invalid types in PPC MMA builtin declaration"); 9244 9245 unsigned Mask = 0; 9246 unsigned ArgNum = 0; 9247 9248 // The first type in TypeStr is the type of the value returned by the 9249 // builtin. So we first read that type and change the type of TheCall. 9250 QualType type = DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 9251 TheCall->setType(type); 9252 9253 while (*TypeStr != '\0') { 9254 Mask = 0; 9255 QualType ExpectedType = DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 9256 if (ArgNum >= TheCall->getNumArgs()) { 9257 ArgNum++; 9258 break; 9259 } 9260 9261 Expr *Arg = TheCall->getArg(ArgNum); 9262 QualType PassedType = Arg->getType(); 9263 QualType StrippedRVType = PassedType.getCanonicalType(); 9264 9265 // Strip Restrict/Volatile qualifiers. 9266 if (StrippedRVType.isRestrictQualified() || 9267 StrippedRVType.isVolatileQualified()) 9268 StrippedRVType = StrippedRVType.getCanonicalType().getUnqualifiedType(); 9269 9270 // The only case where the argument type and expected type are allowed to 9271 // mismatch is if the argument type is a non-void pointer (or array) and 9272 // expected type is a void pointer. 9273 if (StrippedRVType != ExpectedType) 9274 if (!(ExpectedType->isVoidPointerType() && 9275 (StrippedRVType->isPointerType() || StrippedRVType->isArrayType()))) 9276 return Diag(Arg->getBeginLoc(), 9277 diag::err_typecheck_convert_incompatible) 9278 << PassedType << ExpectedType << 1 << 0 << 0; 9279 9280 // If the value of the Mask is not 0, we have a constraint in the size of 9281 // the integer argument so here we ensure the argument is a constant that 9282 // is in the valid range. 9283 if (Mask != 0 && 9284 SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, Mask, true)) 9285 return true; 9286 9287 ArgNum++; 9288 } 9289 9290 // In case we exited early from the previous loop, there are other types to 9291 // read from TypeStr. So we need to read them all to ensure we have the right 9292 // number of arguments in TheCall and if it is not the case, to display a 9293 // better error message. 9294 while (*TypeStr != '\0') { 9295 (void) DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 9296 ArgNum++; 9297 } 9298 if (checkArgCount(*this, TheCall, ArgNum)) 9299 return true; 9300 9301 return false; 9302 } 9303 9304 /// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val). 9305 /// This checks that the target supports __builtin_longjmp and 9306 /// that val is a constant 1. 9307 bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) { 9308 if (!Context.getTargetInfo().hasSjLjLowering()) 9309 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_unsupported) 9310 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 9311 9312 Expr *Arg = TheCall->getArg(1); 9313 llvm::APSInt Result; 9314 9315 // TODO: This is less than ideal. Overload this to take a value. 9316 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 9317 return true; 9318 9319 if (Result != 1) 9320 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_invalid_val) 9321 << SourceRange(Arg->getBeginLoc(), Arg->getEndLoc()); 9322 9323 return false; 9324 } 9325 9326 /// SemaBuiltinSetjmp - Handle __builtin_setjmp(void *env[5]). 9327 /// This checks that the target supports __builtin_setjmp. 9328 bool Sema::SemaBuiltinSetjmp(CallExpr *TheCall) { 9329 if (!Context.getTargetInfo().hasSjLjLowering()) 9330 return Diag(TheCall->getBeginLoc(), diag::err_builtin_setjmp_unsupported) 9331 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 9332 return false; 9333 } 9334 9335 namespace { 9336 9337 class UncoveredArgHandler { 9338 enum { Unknown = -1, AllCovered = -2 }; 9339 9340 signed FirstUncoveredArg = Unknown; 9341 SmallVector<const Expr *, 4> DiagnosticExprs; 9342 9343 public: 9344 UncoveredArgHandler() = default; 9345 9346 bool hasUncoveredArg() const { 9347 return (FirstUncoveredArg >= 0); 9348 } 9349 9350 unsigned getUncoveredArg() const { 9351 assert(hasUncoveredArg() && "no uncovered argument"); 9352 return FirstUncoveredArg; 9353 } 9354 9355 void setAllCovered() { 9356 // A string has been found with all arguments covered, so clear out 9357 // the diagnostics. 9358 DiagnosticExprs.clear(); 9359 FirstUncoveredArg = AllCovered; 9360 } 9361 9362 void Update(signed NewFirstUncoveredArg, const Expr *StrExpr) { 9363 assert(NewFirstUncoveredArg >= 0 && "Outside range"); 9364 9365 // Don't update if a previous string covers all arguments. 9366 if (FirstUncoveredArg == AllCovered) 9367 return; 9368 9369 // UncoveredArgHandler tracks the highest uncovered argument index 9370 // and with it all the strings that match this index. 9371 if (NewFirstUncoveredArg == FirstUncoveredArg) 9372 DiagnosticExprs.push_back(StrExpr); 9373 else if (NewFirstUncoveredArg > FirstUncoveredArg) { 9374 DiagnosticExprs.clear(); 9375 DiagnosticExprs.push_back(StrExpr); 9376 FirstUncoveredArg = NewFirstUncoveredArg; 9377 } 9378 } 9379 9380 void Diagnose(Sema &S, bool IsFunctionCall, const Expr *ArgExpr); 9381 }; 9382 9383 enum StringLiteralCheckType { 9384 SLCT_NotALiteral, 9385 SLCT_UncheckedLiteral, 9386 SLCT_CheckedLiteral 9387 }; 9388 9389 } // namespace 9390 9391 static void sumOffsets(llvm::APSInt &Offset, llvm::APSInt Addend, 9392 BinaryOperatorKind BinOpKind, 9393 bool AddendIsRight) { 9394 unsigned BitWidth = Offset.getBitWidth(); 9395 unsigned AddendBitWidth = Addend.getBitWidth(); 9396 // There might be negative interim results. 9397 if (Addend.isUnsigned()) { 9398 Addend = Addend.zext(++AddendBitWidth); 9399 Addend.setIsSigned(true); 9400 } 9401 // Adjust the bit width of the APSInts. 9402 if (AddendBitWidth > BitWidth) { 9403 Offset = Offset.sext(AddendBitWidth); 9404 BitWidth = AddendBitWidth; 9405 } else if (BitWidth > AddendBitWidth) { 9406 Addend = Addend.sext(BitWidth); 9407 } 9408 9409 bool Ov = false; 9410 llvm::APSInt ResOffset = Offset; 9411 if (BinOpKind == BO_Add) 9412 ResOffset = Offset.sadd_ov(Addend, Ov); 9413 else { 9414 assert(AddendIsRight && BinOpKind == BO_Sub && 9415 "operator must be add or sub with addend on the right"); 9416 ResOffset = Offset.ssub_ov(Addend, Ov); 9417 } 9418 9419 // We add an offset to a pointer here so we should support an offset as big as 9420 // possible. 9421 if (Ov) { 9422 assert(BitWidth <= std::numeric_limits<unsigned>::max() / 2 && 9423 "index (intermediate) result too big"); 9424 Offset = Offset.sext(2 * BitWidth); 9425 sumOffsets(Offset, Addend, BinOpKind, AddendIsRight); 9426 return; 9427 } 9428 9429 Offset = ResOffset; 9430 } 9431 9432 namespace { 9433 9434 // This is a wrapper class around StringLiteral to support offsetted string 9435 // literals as format strings. It takes the offset into account when returning 9436 // the string and its length or the source locations to display notes correctly. 9437 class FormatStringLiteral { 9438 const StringLiteral *FExpr; 9439 int64_t Offset; 9440 9441 public: 9442 FormatStringLiteral(const StringLiteral *fexpr, int64_t Offset = 0) 9443 : FExpr(fexpr), Offset(Offset) {} 9444 9445 StringRef getString() const { 9446 return FExpr->getString().drop_front(Offset); 9447 } 9448 9449 unsigned getByteLength() const { 9450 return FExpr->getByteLength() - getCharByteWidth() * Offset; 9451 } 9452 9453 unsigned getLength() const { return FExpr->getLength() - Offset; } 9454 unsigned getCharByteWidth() const { return FExpr->getCharByteWidth(); } 9455 9456 StringLiteral::StringKind getKind() const { return FExpr->getKind(); } 9457 9458 QualType getType() const { return FExpr->getType(); } 9459 9460 bool isAscii() const { return FExpr->isOrdinary(); } 9461 bool isWide() const { return FExpr->isWide(); } 9462 bool isUTF8() const { return FExpr->isUTF8(); } 9463 bool isUTF16() const { return FExpr->isUTF16(); } 9464 bool isUTF32() const { return FExpr->isUTF32(); } 9465 bool isPascal() const { return FExpr->isPascal(); } 9466 9467 SourceLocation getLocationOfByte( 9468 unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, 9469 const TargetInfo &Target, unsigned *StartToken = nullptr, 9470 unsigned *StartTokenByteOffset = nullptr) const { 9471 return FExpr->getLocationOfByte(ByteNo + Offset, SM, Features, Target, 9472 StartToken, StartTokenByteOffset); 9473 } 9474 9475 SourceLocation getBeginLoc() const LLVM_READONLY { 9476 return FExpr->getBeginLoc().getLocWithOffset(Offset); 9477 } 9478 9479 SourceLocation getEndLoc() const LLVM_READONLY { return FExpr->getEndLoc(); } 9480 }; 9481 9482 } // namespace 9483 9484 static void CheckFormatString( 9485 Sema &S, const FormatStringLiteral *FExpr, const Expr *OrigFormatExpr, 9486 ArrayRef<const Expr *> Args, Sema::FormatArgumentPassingKind APK, 9487 unsigned format_idx, unsigned firstDataArg, Sema::FormatStringType Type, 9488 bool inFunctionCall, Sema::VariadicCallType CallType, 9489 llvm::SmallBitVector &CheckedVarArgs, UncoveredArgHandler &UncoveredArg, 9490 bool IgnoreStringsWithoutSpecifiers); 9491 9492 static const Expr *maybeConstEvalStringLiteral(ASTContext &Context, 9493 const Expr *E); 9494 9495 // Determine if an expression is a string literal or constant string. 9496 // If this function returns false on the arguments to a function expecting a 9497 // format string, we will usually need to emit a warning. 9498 // True string literals are then checked by CheckFormatString. 9499 static StringLiteralCheckType 9500 checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args, 9501 Sema::FormatArgumentPassingKind APK, unsigned format_idx, 9502 unsigned firstDataArg, Sema::FormatStringType Type, 9503 Sema::VariadicCallType CallType, bool InFunctionCall, 9504 llvm::SmallBitVector &CheckedVarArgs, 9505 UncoveredArgHandler &UncoveredArg, llvm::APSInt Offset, 9506 bool IgnoreStringsWithoutSpecifiers = false) { 9507 if (S.isConstantEvaluated()) 9508 return SLCT_NotALiteral; 9509 tryAgain: 9510 assert(Offset.isSigned() && "invalid offset"); 9511 9512 if (E->isTypeDependent() || E->isValueDependent()) 9513 return SLCT_NotALiteral; 9514 9515 E = E->IgnoreParenCasts(); 9516 9517 if (E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull)) 9518 // Technically -Wformat-nonliteral does not warn about this case. 9519 // The behavior of printf and friends in this case is implementation 9520 // dependent. Ideally if the format string cannot be null then 9521 // it should have a 'nonnull' attribute in the function prototype. 9522 return SLCT_UncheckedLiteral; 9523 9524 switch (E->getStmtClass()) { 9525 case Stmt::InitListExprClass: 9526 // Handle expressions like {"foobar"}. 9527 if (const clang::Expr *SLE = maybeConstEvalStringLiteral(S.Context, E)) { 9528 return checkFormatStringExpr(S, SLE, Args, APK, format_idx, firstDataArg, 9529 Type, CallType, /*InFunctionCall*/ false, 9530 CheckedVarArgs, UncoveredArg, Offset, 9531 IgnoreStringsWithoutSpecifiers); 9532 } 9533 return SLCT_NotALiteral; 9534 case Stmt::BinaryConditionalOperatorClass: 9535 case Stmt::ConditionalOperatorClass: { 9536 // The expression is a literal if both sub-expressions were, and it was 9537 // completely checked only if both sub-expressions were checked. 9538 const AbstractConditionalOperator *C = 9539 cast<AbstractConditionalOperator>(E); 9540 9541 // Determine whether it is necessary to check both sub-expressions, for 9542 // example, because the condition expression is a constant that can be 9543 // evaluated at compile time. 9544 bool CheckLeft = true, CheckRight = true; 9545 9546 bool Cond; 9547 if (C->getCond()->EvaluateAsBooleanCondition(Cond, S.getASTContext(), 9548 S.isConstantEvaluated())) { 9549 if (Cond) 9550 CheckRight = false; 9551 else 9552 CheckLeft = false; 9553 } 9554 9555 // We need to maintain the offsets for the right and the left hand side 9556 // separately to check if every possible indexed expression is a valid 9557 // string literal. They might have different offsets for different string 9558 // literals in the end. 9559 StringLiteralCheckType Left; 9560 if (!CheckLeft) 9561 Left = SLCT_UncheckedLiteral; 9562 else { 9563 Left = checkFormatStringExpr(S, C->getTrueExpr(), Args, APK, format_idx, 9564 firstDataArg, Type, CallType, InFunctionCall, 9565 CheckedVarArgs, UncoveredArg, Offset, 9566 IgnoreStringsWithoutSpecifiers); 9567 if (Left == SLCT_NotALiteral || !CheckRight) { 9568 return Left; 9569 } 9570 } 9571 9572 StringLiteralCheckType Right = checkFormatStringExpr( 9573 S, C->getFalseExpr(), Args, APK, format_idx, firstDataArg, Type, 9574 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 9575 IgnoreStringsWithoutSpecifiers); 9576 9577 return (CheckLeft && Left < Right) ? Left : Right; 9578 } 9579 9580 case Stmt::ImplicitCastExprClass: 9581 E = cast<ImplicitCastExpr>(E)->getSubExpr(); 9582 goto tryAgain; 9583 9584 case Stmt::OpaqueValueExprClass: 9585 if (const Expr *src = cast<OpaqueValueExpr>(E)->getSourceExpr()) { 9586 E = src; 9587 goto tryAgain; 9588 } 9589 return SLCT_NotALiteral; 9590 9591 case Stmt::PredefinedExprClass: 9592 // While __func__, etc., are technically not string literals, they 9593 // cannot contain format specifiers and thus are not a security 9594 // liability. 9595 return SLCT_UncheckedLiteral; 9596 9597 case Stmt::DeclRefExprClass: { 9598 const DeclRefExpr *DR = cast<DeclRefExpr>(E); 9599 9600 // As an exception, do not flag errors for variables binding to 9601 // const string literals. 9602 if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) { 9603 bool isConstant = false; 9604 QualType T = DR->getType(); 9605 9606 if (const ArrayType *AT = S.Context.getAsArrayType(T)) { 9607 isConstant = AT->getElementType().isConstant(S.Context); 9608 } else if (const PointerType *PT = T->getAs<PointerType>()) { 9609 isConstant = T.isConstant(S.Context) && 9610 PT->getPointeeType().isConstant(S.Context); 9611 } else if (T->isObjCObjectPointerType()) { 9612 // In ObjC, there is usually no "const ObjectPointer" type, 9613 // so don't check if the pointee type is constant. 9614 isConstant = T.isConstant(S.Context); 9615 } 9616 9617 if (isConstant) { 9618 if (const Expr *Init = VD->getAnyInitializer()) { 9619 // Look through initializers like const char c[] = { "foo" } 9620 if (const InitListExpr *InitList = dyn_cast<InitListExpr>(Init)) { 9621 if (InitList->isStringLiteralInit()) 9622 Init = InitList->getInit(0)->IgnoreParenImpCasts(); 9623 } 9624 return checkFormatStringExpr( 9625 S, Init, Args, APK, format_idx, firstDataArg, Type, CallType, 9626 /*InFunctionCall*/ false, CheckedVarArgs, UncoveredArg, Offset); 9627 } 9628 } 9629 9630 // When the format argument is an argument of this function, and this 9631 // function also has the format attribute, there are several interactions 9632 // for which there shouldn't be a warning. For instance, when calling 9633 // v*printf from a function that has the printf format attribute, we 9634 // should not emit a warning about using `fmt`, even though it's not 9635 // constant, because the arguments have already been checked for the 9636 // caller of `logmessage`: 9637 // 9638 // __attribute__((format(printf, 1, 2))) 9639 // void logmessage(char const *fmt, ...) { 9640 // va_list ap; 9641 // va_start(ap, fmt); 9642 // vprintf(fmt, ap); /* do not emit a warning about "fmt" */ 9643 // ... 9644 // } 9645 // 9646 // Another interaction that we need to support is calling a variadic 9647 // format function from a format function that has fixed arguments. For 9648 // instance: 9649 // 9650 // __attribute__((format(printf, 1, 2))) 9651 // void logstring(char const *fmt, char const *str) { 9652 // printf(fmt, str); /* do not emit a warning about "fmt" */ 9653 // } 9654 // 9655 // Same (and perhaps more relatably) for the variadic template case: 9656 // 9657 // template<typename... Args> 9658 // __attribute__((format(printf, 1, 2))) 9659 // void log(const char *fmt, Args&&... args) { 9660 // printf(fmt, forward<Args>(args)...); 9661 // /* do not emit a warning about "fmt" */ 9662 // } 9663 // 9664 // Due to implementation difficulty, we only check the format, not the 9665 // format arguments, in all cases. 9666 // 9667 if (const auto *PV = dyn_cast<ParmVarDecl>(VD)) { 9668 if (const auto *D = dyn_cast<Decl>(PV->getDeclContext())) { 9669 for (const auto *PVFormat : D->specific_attrs<FormatAttr>()) { 9670 bool IsCXXMember = false; 9671 if (const auto *MD = dyn_cast<CXXMethodDecl>(D)) 9672 IsCXXMember = MD->isInstance(); 9673 9674 bool IsVariadic = false; 9675 if (const FunctionType *FnTy = D->getFunctionType()) 9676 IsVariadic = cast<FunctionProtoType>(FnTy)->isVariadic(); 9677 else if (const auto *BD = dyn_cast<BlockDecl>(D)) 9678 IsVariadic = BD->isVariadic(); 9679 else if (const auto *OMD = dyn_cast<ObjCMethodDecl>(D)) 9680 IsVariadic = OMD->isVariadic(); 9681 9682 Sema::FormatStringInfo CallerFSI; 9683 if (Sema::getFormatStringInfo(PVFormat, IsCXXMember, IsVariadic, 9684 &CallerFSI)) { 9685 // We also check if the formats are compatible. 9686 // We can't pass a 'scanf' string to a 'printf' function. 9687 if (PV->getFunctionScopeIndex() == CallerFSI.FormatIdx && 9688 Type == S.GetFormatStringType(PVFormat)) { 9689 // Lastly, check that argument passing kinds transition in a 9690 // way that makes sense: 9691 // from a caller with FAPK_VAList, allow FAPK_VAList 9692 // from a caller with FAPK_Fixed, allow FAPK_Fixed 9693 // from a caller with FAPK_Fixed, allow FAPK_Variadic 9694 // from a caller with FAPK_Variadic, allow FAPK_VAList 9695 switch (combineFAPK(CallerFSI.ArgPassingKind, APK)) { 9696 case combineFAPK(Sema::FAPK_VAList, Sema::FAPK_VAList): 9697 case combineFAPK(Sema::FAPK_Fixed, Sema::FAPK_Fixed): 9698 case combineFAPK(Sema::FAPK_Fixed, Sema::FAPK_Variadic): 9699 case combineFAPK(Sema::FAPK_Variadic, Sema::FAPK_VAList): 9700 return SLCT_UncheckedLiteral; 9701 } 9702 } 9703 } 9704 } 9705 } 9706 } 9707 } 9708 9709 return SLCT_NotALiteral; 9710 } 9711 9712 case Stmt::CallExprClass: 9713 case Stmt::CXXMemberCallExprClass: { 9714 const CallExpr *CE = cast<CallExpr>(E); 9715 if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(CE->getCalleeDecl())) { 9716 bool IsFirst = true; 9717 StringLiteralCheckType CommonResult; 9718 for (const auto *FA : ND->specific_attrs<FormatArgAttr>()) { 9719 const Expr *Arg = CE->getArg(FA->getFormatIdx().getASTIndex()); 9720 StringLiteralCheckType Result = checkFormatStringExpr( 9721 S, Arg, Args, APK, format_idx, firstDataArg, Type, CallType, 9722 InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 9723 IgnoreStringsWithoutSpecifiers); 9724 if (IsFirst) { 9725 CommonResult = Result; 9726 IsFirst = false; 9727 } 9728 } 9729 if (!IsFirst) 9730 return CommonResult; 9731 9732 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) { 9733 unsigned BuiltinID = FD->getBuiltinID(); 9734 if (BuiltinID == Builtin::BI__builtin___CFStringMakeConstantString || 9735 BuiltinID == Builtin::BI__builtin___NSStringMakeConstantString) { 9736 const Expr *Arg = CE->getArg(0); 9737 return checkFormatStringExpr( 9738 S, Arg, Args, APK, format_idx, firstDataArg, Type, CallType, 9739 InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 9740 IgnoreStringsWithoutSpecifiers); 9741 } 9742 } 9743 } 9744 if (const Expr *SLE = maybeConstEvalStringLiteral(S.Context, E)) 9745 return checkFormatStringExpr(S, SLE, Args, APK, format_idx, firstDataArg, 9746 Type, CallType, /*InFunctionCall*/ false, 9747 CheckedVarArgs, UncoveredArg, Offset, 9748 IgnoreStringsWithoutSpecifiers); 9749 return SLCT_NotALiteral; 9750 } 9751 case Stmt::ObjCMessageExprClass: { 9752 const auto *ME = cast<ObjCMessageExpr>(E); 9753 if (const auto *MD = ME->getMethodDecl()) { 9754 if (const auto *FA = MD->getAttr<FormatArgAttr>()) { 9755 // As a special case heuristic, if we're using the method -[NSBundle 9756 // localizedStringForKey:value:table:], ignore any key strings that lack 9757 // format specifiers. The idea is that if the key doesn't have any 9758 // format specifiers then its probably just a key to map to the 9759 // localized strings. If it does have format specifiers though, then its 9760 // likely that the text of the key is the format string in the 9761 // programmer's language, and should be checked. 9762 const ObjCInterfaceDecl *IFace; 9763 if (MD->isInstanceMethod() && (IFace = MD->getClassInterface()) && 9764 IFace->getIdentifier()->isStr("NSBundle") && 9765 MD->getSelector().isKeywordSelector( 9766 {"localizedStringForKey", "value", "table"})) { 9767 IgnoreStringsWithoutSpecifiers = true; 9768 } 9769 9770 const Expr *Arg = ME->getArg(FA->getFormatIdx().getASTIndex()); 9771 return checkFormatStringExpr( 9772 S, Arg, Args, APK, format_idx, firstDataArg, Type, CallType, 9773 InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 9774 IgnoreStringsWithoutSpecifiers); 9775 } 9776 } 9777 9778 return SLCT_NotALiteral; 9779 } 9780 case Stmt::ObjCStringLiteralClass: 9781 case Stmt::StringLiteralClass: { 9782 const StringLiteral *StrE = nullptr; 9783 9784 if (const ObjCStringLiteral *ObjCFExpr = dyn_cast<ObjCStringLiteral>(E)) 9785 StrE = ObjCFExpr->getString(); 9786 else 9787 StrE = cast<StringLiteral>(E); 9788 9789 if (StrE) { 9790 if (Offset.isNegative() || Offset > StrE->getLength()) { 9791 // TODO: It would be better to have an explicit warning for out of 9792 // bounds literals. 9793 return SLCT_NotALiteral; 9794 } 9795 FormatStringLiteral FStr(StrE, Offset.sextOrTrunc(64).getSExtValue()); 9796 CheckFormatString(S, &FStr, E, Args, APK, format_idx, firstDataArg, Type, 9797 InFunctionCall, CallType, CheckedVarArgs, UncoveredArg, 9798 IgnoreStringsWithoutSpecifiers); 9799 return SLCT_CheckedLiteral; 9800 } 9801 9802 return SLCT_NotALiteral; 9803 } 9804 case Stmt::BinaryOperatorClass: { 9805 const BinaryOperator *BinOp = cast<BinaryOperator>(E); 9806 9807 // A string literal + an int offset is still a string literal. 9808 if (BinOp->isAdditiveOp()) { 9809 Expr::EvalResult LResult, RResult; 9810 9811 bool LIsInt = BinOp->getLHS()->EvaluateAsInt( 9812 LResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 9813 bool RIsInt = BinOp->getRHS()->EvaluateAsInt( 9814 RResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 9815 9816 if (LIsInt != RIsInt) { 9817 BinaryOperatorKind BinOpKind = BinOp->getOpcode(); 9818 9819 if (LIsInt) { 9820 if (BinOpKind == BO_Add) { 9821 sumOffsets(Offset, LResult.Val.getInt(), BinOpKind, RIsInt); 9822 E = BinOp->getRHS(); 9823 goto tryAgain; 9824 } 9825 } else { 9826 sumOffsets(Offset, RResult.Val.getInt(), BinOpKind, RIsInt); 9827 E = BinOp->getLHS(); 9828 goto tryAgain; 9829 } 9830 } 9831 } 9832 9833 return SLCT_NotALiteral; 9834 } 9835 case Stmt::UnaryOperatorClass: { 9836 const UnaryOperator *UnaOp = cast<UnaryOperator>(E); 9837 auto ASE = dyn_cast<ArraySubscriptExpr>(UnaOp->getSubExpr()); 9838 if (UnaOp->getOpcode() == UO_AddrOf && ASE) { 9839 Expr::EvalResult IndexResult; 9840 if (ASE->getRHS()->EvaluateAsInt(IndexResult, S.Context, 9841 Expr::SE_NoSideEffects, 9842 S.isConstantEvaluated())) { 9843 sumOffsets(Offset, IndexResult.Val.getInt(), BO_Add, 9844 /*RHS is int*/ true); 9845 E = ASE->getBase(); 9846 goto tryAgain; 9847 } 9848 } 9849 9850 return SLCT_NotALiteral; 9851 } 9852 9853 default: 9854 return SLCT_NotALiteral; 9855 } 9856 } 9857 9858 // If this expression can be evaluated at compile-time, 9859 // check if the result is a StringLiteral and return it 9860 // otherwise return nullptr 9861 static const Expr *maybeConstEvalStringLiteral(ASTContext &Context, 9862 const Expr *E) { 9863 Expr::EvalResult Result; 9864 if (E->EvaluateAsRValue(Result, Context) && Result.Val.isLValue()) { 9865 const auto *LVE = Result.Val.getLValueBase().dyn_cast<const Expr *>(); 9866 if (isa_and_nonnull<StringLiteral>(LVE)) 9867 return LVE; 9868 } 9869 return nullptr; 9870 } 9871 9872 Sema::FormatStringType Sema::GetFormatStringType(const FormatAttr *Format) { 9873 return llvm::StringSwitch<FormatStringType>(Format->getType()->getName()) 9874 .Case("scanf", FST_Scanf) 9875 .Cases("printf", "printf0", FST_Printf) 9876 .Cases("NSString", "CFString", FST_NSString) 9877 .Case("strftime", FST_Strftime) 9878 .Case("strfmon", FST_Strfmon) 9879 .Cases("kprintf", "cmn_err", "vcmn_err", "zcmn_err", FST_Kprintf) 9880 .Case("freebsd_kprintf", FST_FreeBSDKPrintf) 9881 .Case("os_trace", FST_OSLog) 9882 .Case("os_log", FST_OSLog) 9883 .Default(FST_Unknown); 9884 } 9885 9886 /// CheckFormatArguments - Check calls to printf and scanf (and similar 9887 /// functions) for correct use of format strings. 9888 /// Returns true if a format string has been fully checked. 9889 bool Sema::CheckFormatArguments(const FormatAttr *Format, 9890 ArrayRef<const Expr *> Args, bool IsCXXMember, 9891 VariadicCallType CallType, SourceLocation Loc, 9892 SourceRange Range, 9893 llvm::SmallBitVector &CheckedVarArgs) { 9894 FormatStringInfo FSI; 9895 if (getFormatStringInfo(Format, IsCXXMember, CallType != VariadicDoesNotApply, 9896 &FSI)) 9897 return CheckFormatArguments(Args, FSI.ArgPassingKind, FSI.FormatIdx, 9898 FSI.FirstDataArg, GetFormatStringType(Format), 9899 CallType, Loc, Range, CheckedVarArgs); 9900 return false; 9901 } 9902 9903 bool Sema::CheckFormatArguments(ArrayRef<const Expr *> Args, 9904 Sema::FormatArgumentPassingKind APK, 9905 unsigned format_idx, unsigned firstDataArg, 9906 FormatStringType Type, 9907 VariadicCallType CallType, SourceLocation Loc, 9908 SourceRange Range, 9909 llvm::SmallBitVector &CheckedVarArgs) { 9910 // CHECK: printf/scanf-like function is called with no format string. 9911 if (format_idx >= Args.size()) { 9912 Diag(Loc, diag::warn_missing_format_string) << Range; 9913 return false; 9914 } 9915 9916 const Expr *OrigFormatExpr = Args[format_idx]->IgnoreParenCasts(); 9917 9918 // CHECK: format string is not a string literal. 9919 // 9920 // Dynamically generated format strings are difficult to 9921 // automatically vet at compile time. Requiring that format strings 9922 // are string literals: (1) permits the checking of format strings by 9923 // the compiler and thereby (2) can practically remove the source of 9924 // many format string exploits. 9925 9926 // Format string can be either ObjC string (e.g. @"%d") or 9927 // C string (e.g. "%d") 9928 // ObjC string uses the same format specifiers as C string, so we can use 9929 // the same format string checking logic for both ObjC and C strings. 9930 UncoveredArgHandler UncoveredArg; 9931 StringLiteralCheckType CT = checkFormatStringExpr( 9932 *this, OrigFormatExpr, Args, APK, format_idx, firstDataArg, Type, 9933 CallType, 9934 /*IsFunctionCall*/ true, CheckedVarArgs, UncoveredArg, 9935 /*no string offset*/ llvm::APSInt(64, false) = 0); 9936 9937 // Generate a diagnostic where an uncovered argument is detected. 9938 if (UncoveredArg.hasUncoveredArg()) { 9939 unsigned ArgIdx = UncoveredArg.getUncoveredArg() + firstDataArg; 9940 assert(ArgIdx < Args.size() && "ArgIdx outside bounds"); 9941 UncoveredArg.Diagnose(*this, /*IsFunctionCall*/true, Args[ArgIdx]); 9942 } 9943 9944 if (CT != SLCT_NotALiteral) 9945 // Literal format string found, check done! 9946 return CT == SLCT_CheckedLiteral; 9947 9948 // Strftime is particular as it always uses a single 'time' argument, 9949 // so it is safe to pass a non-literal string. 9950 if (Type == FST_Strftime) 9951 return false; 9952 9953 // Do not emit diag when the string param is a macro expansion and the 9954 // format is either NSString or CFString. This is a hack to prevent 9955 // diag when using the NSLocalizedString and CFCopyLocalizedString macros 9956 // which are usually used in place of NS and CF string literals. 9957 SourceLocation FormatLoc = Args[format_idx]->getBeginLoc(); 9958 if (Type == FST_NSString && SourceMgr.isInSystemMacro(FormatLoc)) 9959 return false; 9960 9961 // If there are no arguments specified, warn with -Wformat-security, otherwise 9962 // warn only with -Wformat-nonliteral. 9963 if (Args.size() == firstDataArg) { 9964 Diag(FormatLoc, diag::warn_format_nonliteral_noargs) 9965 << OrigFormatExpr->getSourceRange(); 9966 switch (Type) { 9967 default: 9968 break; 9969 case FST_Kprintf: 9970 case FST_FreeBSDKPrintf: 9971 case FST_Printf: 9972 Diag(FormatLoc, diag::note_format_security_fixit) 9973 << FixItHint::CreateInsertion(FormatLoc, "\"%s\", "); 9974 break; 9975 case FST_NSString: 9976 Diag(FormatLoc, diag::note_format_security_fixit) 9977 << FixItHint::CreateInsertion(FormatLoc, "@\"%@\", "); 9978 break; 9979 } 9980 } else { 9981 Diag(FormatLoc, diag::warn_format_nonliteral) 9982 << OrigFormatExpr->getSourceRange(); 9983 } 9984 return false; 9985 } 9986 9987 namespace { 9988 9989 class CheckFormatHandler : public analyze_format_string::FormatStringHandler { 9990 protected: 9991 Sema &S; 9992 const FormatStringLiteral *FExpr; 9993 const Expr *OrigFormatExpr; 9994 const Sema::FormatStringType FSType; 9995 const unsigned FirstDataArg; 9996 const unsigned NumDataArgs; 9997 const char *Beg; // Start of format string. 9998 const Sema::FormatArgumentPassingKind ArgPassingKind; 9999 ArrayRef<const Expr *> Args; 10000 unsigned FormatIdx; 10001 llvm::SmallBitVector CoveredArgs; 10002 bool usesPositionalArgs = false; 10003 bool atFirstArg = true; 10004 bool inFunctionCall; 10005 Sema::VariadicCallType CallType; 10006 llvm::SmallBitVector &CheckedVarArgs; 10007 UncoveredArgHandler &UncoveredArg; 10008 10009 public: 10010 CheckFormatHandler(Sema &s, const FormatStringLiteral *fexpr, 10011 const Expr *origFormatExpr, 10012 const Sema::FormatStringType type, unsigned firstDataArg, 10013 unsigned numDataArgs, const char *beg, 10014 Sema::FormatArgumentPassingKind APK, 10015 ArrayRef<const Expr *> Args, unsigned formatIdx, 10016 bool inFunctionCall, Sema::VariadicCallType callType, 10017 llvm::SmallBitVector &CheckedVarArgs, 10018 UncoveredArgHandler &UncoveredArg) 10019 : S(s), FExpr(fexpr), OrigFormatExpr(origFormatExpr), FSType(type), 10020 FirstDataArg(firstDataArg), NumDataArgs(numDataArgs), Beg(beg), 10021 ArgPassingKind(APK), Args(Args), FormatIdx(formatIdx), 10022 inFunctionCall(inFunctionCall), CallType(callType), 10023 CheckedVarArgs(CheckedVarArgs), UncoveredArg(UncoveredArg) { 10024 CoveredArgs.resize(numDataArgs); 10025 CoveredArgs.reset(); 10026 } 10027 10028 void DoneProcessing(); 10029 10030 void HandleIncompleteSpecifier(const char *startSpecifier, 10031 unsigned specifierLen) override; 10032 10033 void HandleInvalidLengthModifier( 10034 const analyze_format_string::FormatSpecifier &FS, 10035 const analyze_format_string::ConversionSpecifier &CS, 10036 const char *startSpecifier, unsigned specifierLen, 10037 unsigned DiagID); 10038 10039 void HandleNonStandardLengthModifier( 10040 const analyze_format_string::FormatSpecifier &FS, 10041 const char *startSpecifier, unsigned specifierLen); 10042 10043 void HandleNonStandardConversionSpecifier( 10044 const analyze_format_string::ConversionSpecifier &CS, 10045 const char *startSpecifier, unsigned specifierLen); 10046 10047 void HandlePosition(const char *startPos, unsigned posLen) override; 10048 10049 void HandleInvalidPosition(const char *startSpecifier, 10050 unsigned specifierLen, 10051 analyze_format_string::PositionContext p) override; 10052 10053 void HandleZeroPosition(const char *startPos, unsigned posLen) override; 10054 10055 void HandleNullChar(const char *nullCharacter) override; 10056 10057 template <typename Range> 10058 static void 10059 EmitFormatDiagnostic(Sema &S, bool inFunctionCall, const Expr *ArgumentExpr, 10060 const PartialDiagnostic &PDiag, SourceLocation StringLoc, 10061 bool IsStringLocation, Range StringRange, 10062 ArrayRef<FixItHint> Fixit = std::nullopt); 10063 10064 protected: 10065 bool HandleInvalidConversionSpecifier(unsigned argIndex, SourceLocation Loc, 10066 const char *startSpec, 10067 unsigned specifierLen, 10068 const char *csStart, unsigned csLen); 10069 10070 void HandlePositionalNonpositionalArgs(SourceLocation Loc, 10071 const char *startSpec, 10072 unsigned specifierLen); 10073 10074 SourceRange getFormatStringRange(); 10075 CharSourceRange getSpecifierRange(const char *startSpecifier, 10076 unsigned specifierLen); 10077 SourceLocation getLocationOfByte(const char *x); 10078 10079 const Expr *getDataArg(unsigned i) const; 10080 10081 bool CheckNumArgs(const analyze_format_string::FormatSpecifier &FS, 10082 const analyze_format_string::ConversionSpecifier &CS, 10083 const char *startSpecifier, unsigned specifierLen, 10084 unsigned argIndex); 10085 10086 template <typename Range> 10087 void EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation StringLoc, 10088 bool IsStringLocation, Range StringRange, 10089 ArrayRef<FixItHint> Fixit = std::nullopt); 10090 }; 10091 10092 } // namespace 10093 10094 SourceRange CheckFormatHandler::getFormatStringRange() { 10095 return OrigFormatExpr->getSourceRange(); 10096 } 10097 10098 CharSourceRange CheckFormatHandler:: 10099 getSpecifierRange(const char *startSpecifier, unsigned specifierLen) { 10100 SourceLocation Start = getLocationOfByte(startSpecifier); 10101 SourceLocation End = getLocationOfByte(startSpecifier + specifierLen - 1); 10102 10103 // Advance the end SourceLocation by one due to half-open ranges. 10104 End = End.getLocWithOffset(1); 10105 10106 return CharSourceRange::getCharRange(Start, End); 10107 } 10108 10109 SourceLocation CheckFormatHandler::getLocationOfByte(const char *x) { 10110 return FExpr->getLocationOfByte(x - Beg, S.getSourceManager(), 10111 S.getLangOpts(), S.Context.getTargetInfo()); 10112 } 10113 10114 void CheckFormatHandler::HandleIncompleteSpecifier(const char *startSpecifier, 10115 unsigned specifierLen){ 10116 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_incomplete_specifier), 10117 getLocationOfByte(startSpecifier), 10118 /*IsStringLocation*/true, 10119 getSpecifierRange(startSpecifier, specifierLen)); 10120 } 10121 10122 void CheckFormatHandler::HandleInvalidLengthModifier( 10123 const analyze_format_string::FormatSpecifier &FS, 10124 const analyze_format_string::ConversionSpecifier &CS, 10125 const char *startSpecifier, unsigned specifierLen, unsigned DiagID) { 10126 using namespace analyze_format_string; 10127 10128 const LengthModifier &LM = FS.getLengthModifier(); 10129 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 10130 10131 // See if we know how to fix this length modifier. 10132 std::optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 10133 if (FixedLM) { 10134 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 10135 getLocationOfByte(LM.getStart()), 10136 /*IsStringLocation*/true, 10137 getSpecifierRange(startSpecifier, specifierLen)); 10138 10139 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 10140 << FixedLM->toString() 10141 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 10142 10143 } else { 10144 FixItHint Hint; 10145 if (DiagID == diag::warn_format_nonsensical_length) 10146 Hint = FixItHint::CreateRemoval(LMRange); 10147 10148 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 10149 getLocationOfByte(LM.getStart()), 10150 /*IsStringLocation*/true, 10151 getSpecifierRange(startSpecifier, specifierLen), 10152 Hint); 10153 } 10154 } 10155 10156 void CheckFormatHandler::HandleNonStandardLengthModifier( 10157 const analyze_format_string::FormatSpecifier &FS, 10158 const char *startSpecifier, unsigned specifierLen) { 10159 using namespace analyze_format_string; 10160 10161 const LengthModifier &LM = FS.getLengthModifier(); 10162 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 10163 10164 // See if we know how to fix this length modifier. 10165 std::optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 10166 if (FixedLM) { 10167 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 10168 << LM.toString() << 0, 10169 getLocationOfByte(LM.getStart()), 10170 /*IsStringLocation*/true, 10171 getSpecifierRange(startSpecifier, specifierLen)); 10172 10173 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 10174 << FixedLM->toString() 10175 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 10176 10177 } else { 10178 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 10179 << LM.toString() << 0, 10180 getLocationOfByte(LM.getStart()), 10181 /*IsStringLocation*/true, 10182 getSpecifierRange(startSpecifier, specifierLen)); 10183 } 10184 } 10185 10186 void CheckFormatHandler::HandleNonStandardConversionSpecifier( 10187 const analyze_format_string::ConversionSpecifier &CS, 10188 const char *startSpecifier, unsigned specifierLen) { 10189 using namespace analyze_format_string; 10190 10191 // See if we know how to fix this conversion specifier. 10192 std::optional<ConversionSpecifier> FixedCS = CS.getStandardSpecifier(); 10193 if (FixedCS) { 10194 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 10195 << CS.toString() << /*conversion specifier*/1, 10196 getLocationOfByte(CS.getStart()), 10197 /*IsStringLocation*/true, 10198 getSpecifierRange(startSpecifier, specifierLen)); 10199 10200 CharSourceRange CSRange = getSpecifierRange(CS.getStart(), CS.getLength()); 10201 S.Diag(getLocationOfByte(CS.getStart()), diag::note_format_fix_specifier) 10202 << FixedCS->toString() 10203 << FixItHint::CreateReplacement(CSRange, FixedCS->toString()); 10204 } else { 10205 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 10206 << CS.toString() << /*conversion specifier*/1, 10207 getLocationOfByte(CS.getStart()), 10208 /*IsStringLocation*/true, 10209 getSpecifierRange(startSpecifier, specifierLen)); 10210 } 10211 } 10212 10213 void CheckFormatHandler::HandlePosition(const char *startPos, 10214 unsigned posLen) { 10215 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard_positional_arg), 10216 getLocationOfByte(startPos), 10217 /*IsStringLocation*/true, 10218 getSpecifierRange(startPos, posLen)); 10219 } 10220 10221 void CheckFormatHandler::HandleInvalidPosition( 10222 const char *startSpecifier, unsigned specifierLen, 10223 analyze_format_string::PositionContext p) { 10224 EmitFormatDiagnostic( 10225 S.PDiag(diag::warn_format_invalid_positional_specifier) << (unsigned)p, 10226 getLocationOfByte(startSpecifier), /*IsStringLocation*/ true, 10227 getSpecifierRange(startSpecifier, specifierLen)); 10228 } 10229 10230 void CheckFormatHandler::HandleZeroPosition(const char *startPos, 10231 unsigned posLen) { 10232 EmitFormatDiagnostic(S.PDiag(diag::warn_format_zero_positional_specifier), 10233 getLocationOfByte(startPos), 10234 /*IsStringLocation*/true, 10235 getSpecifierRange(startPos, posLen)); 10236 } 10237 10238 void CheckFormatHandler::HandleNullChar(const char *nullCharacter) { 10239 if (!isa<ObjCStringLiteral>(OrigFormatExpr)) { 10240 // The presence of a null character is likely an error. 10241 EmitFormatDiagnostic( 10242 S.PDiag(diag::warn_printf_format_string_contains_null_char), 10243 getLocationOfByte(nullCharacter), /*IsStringLocation*/true, 10244 getFormatStringRange()); 10245 } 10246 } 10247 10248 // Note that this may return NULL if there was an error parsing or building 10249 // one of the argument expressions. 10250 const Expr *CheckFormatHandler::getDataArg(unsigned i) const { 10251 return Args[FirstDataArg + i]; 10252 } 10253 10254 void CheckFormatHandler::DoneProcessing() { 10255 // Does the number of data arguments exceed the number of 10256 // format conversions in the format string? 10257 if (ArgPassingKind != Sema::FAPK_VAList) { 10258 // Find any arguments that weren't covered. 10259 CoveredArgs.flip(); 10260 signed notCoveredArg = CoveredArgs.find_first(); 10261 if (notCoveredArg >= 0) { 10262 assert((unsigned)notCoveredArg < NumDataArgs); 10263 UncoveredArg.Update(notCoveredArg, OrigFormatExpr); 10264 } else { 10265 UncoveredArg.setAllCovered(); 10266 } 10267 } 10268 } 10269 10270 void UncoveredArgHandler::Diagnose(Sema &S, bool IsFunctionCall, 10271 const Expr *ArgExpr) { 10272 assert(hasUncoveredArg() && !DiagnosticExprs.empty() && 10273 "Invalid state"); 10274 10275 if (!ArgExpr) 10276 return; 10277 10278 SourceLocation Loc = ArgExpr->getBeginLoc(); 10279 10280 if (S.getSourceManager().isInSystemMacro(Loc)) 10281 return; 10282 10283 PartialDiagnostic PDiag = S.PDiag(diag::warn_printf_data_arg_not_used); 10284 for (auto E : DiagnosticExprs) 10285 PDiag << E->getSourceRange(); 10286 10287 CheckFormatHandler::EmitFormatDiagnostic( 10288 S, IsFunctionCall, DiagnosticExprs[0], 10289 PDiag, Loc, /*IsStringLocation*/false, 10290 DiagnosticExprs[0]->getSourceRange()); 10291 } 10292 10293 bool 10294 CheckFormatHandler::HandleInvalidConversionSpecifier(unsigned argIndex, 10295 SourceLocation Loc, 10296 const char *startSpec, 10297 unsigned specifierLen, 10298 const char *csStart, 10299 unsigned csLen) { 10300 bool keepGoing = true; 10301 if (argIndex < NumDataArgs) { 10302 // Consider the argument coverered, even though the specifier doesn't 10303 // make sense. 10304 CoveredArgs.set(argIndex); 10305 } 10306 else { 10307 // If argIndex exceeds the number of data arguments we 10308 // don't issue a warning because that is just a cascade of warnings (and 10309 // they may have intended '%%' anyway). We don't want to continue processing 10310 // the format string after this point, however, as we will like just get 10311 // gibberish when trying to match arguments. 10312 keepGoing = false; 10313 } 10314 10315 StringRef Specifier(csStart, csLen); 10316 10317 // If the specifier in non-printable, it could be the first byte of a UTF-8 10318 // sequence. In that case, print the UTF-8 code point. If not, print the byte 10319 // hex value. 10320 std::string CodePointStr; 10321 if (!llvm::sys::locale::isPrint(*csStart)) { 10322 llvm::UTF32 CodePoint; 10323 const llvm::UTF8 **B = reinterpret_cast<const llvm::UTF8 **>(&csStart); 10324 const llvm::UTF8 *E = 10325 reinterpret_cast<const llvm::UTF8 *>(csStart + csLen); 10326 llvm::ConversionResult Result = 10327 llvm::convertUTF8Sequence(B, E, &CodePoint, llvm::strictConversion); 10328 10329 if (Result != llvm::conversionOK) { 10330 unsigned char FirstChar = *csStart; 10331 CodePoint = (llvm::UTF32)FirstChar; 10332 } 10333 10334 llvm::raw_string_ostream OS(CodePointStr); 10335 if (CodePoint < 256) 10336 OS << "\\x" << llvm::format("%02x", CodePoint); 10337 else if (CodePoint <= 0xFFFF) 10338 OS << "\\u" << llvm::format("%04x", CodePoint); 10339 else 10340 OS << "\\U" << llvm::format("%08x", CodePoint); 10341 OS.flush(); 10342 Specifier = CodePointStr; 10343 } 10344 10345 EmitFormatDiagnostic( 10346 S.PDiag(diag::warn_format_invalid_conversion) << Specifier, Loc, 10347 /*IsStringLocation*/ true, getSpecifierRange(startSpec, specifierLen)); 10348 10349 return keepGoing; 10350 } 10351 10352 void 10353 CheckFormatHandler::HandlePositionalNonpositionalArgs(SourceLocation Loc, 10354 const char *startSpec, 10355 unsigned specifierLen) { 10356 EmitFormatDiagnostic( 10357 S.PDiag(diag::warn_format_mix_positional_nonpositional_args), 10358 Loc, /*isStringLoc*/true, getSpecifierRange(startSpec, specifierLen)); 10359 } 10360 10361 bool 10362 CheckFormatHandler::CheckNumArgs( 10363 const analyze_format_string::FormatSpecifier &FS, 10364 const analyze_format_string::ConversionSpecifier &CS, 10365 const char *startSpecifier, unsigned specifierLen, unsigned argIndex) { 10366 10367 if (argIndex >= NumDataArgs) { 10368 PartialDiagnostic PDiag = FS.usesPositionalArg() 10369 ? (S.PDiag(diag::warn_printf_positional_arg_exceeds_data_args) 10370 << (argIndex+1) << NumDataArgs) 10371 : S.PDiag(diag::warn_printf_insufficient_data_args); 10372 EmitFormatDiagnostic( 10373 PDiag, getLocationOfByte(CS.getStart()), /*IsStringLocation*/true, 10374 getSpecifierRange(startSpecifier, specifierLen)); 10375 10376 // Since more arguments than conversion tokens are given, by extension 10377 // all arguments are covered, so mark this as so. 10378 UncoveredArg.setAllCovered(); 10379 return false; 10380 } 10381 return true; 10382 } 10383 10384 template<typename Range> 10385 void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag, 10386 SourceLocation Loc, 10387 bool IsStringLocation, 10388 Range StringRange, 10389 ArrayRef<FixItHint> FixIt) { 10390 EmitFormatDiagnostic(S, inFunctionCall, Args[FormatIdx], PDiag, 10391 Loc, IsStringLocation, StringRange, FixIt); 10392 } 10393 10394 /// If the format string is not within the function call, emit a note 10395 /// so that the function call and string are in diagnostic messages. 10396 /// 10397 /// \param InFunctionCall if true, the format string is within the function 10398 /// call and only one diagnostic message will be produced. Otherwise, an 10399 /// extra note will be emitted pointing to location of the format string. 10400 /// 10401 /// \param ArgumentExpr the expression that is passed as the format string 10402 /// argument in the function call. Used for getting locations when two 10403 /// diagnostics are emitted. 10404 /// 10405 /// \param PDiag the callee should already have provided any strings for the 10406 /// diagnostic message. This function only adds locations and fixits 10407 /// to diagnostics. 10408 /// 10409 /// \param Loc primary location for diagnostic. If two diagnostics are 10410 /// required, one will be at Loc and a new SourceLocation will be created for 10411 /// the other one. 10412 /// 10413 /// \param IsStringLocation if true, Loc points to the format string should be 10414 /// used for the note. Otherwise, Loc points to the argument list and will 10415 /// be used with PDiag. 10416 /// 10417 /// \param StringRange some or all of the string to highlight. This is 10418 /// templated so it can accept either a CharSourceRange or a SourceRange. 10419 /// 10420 /// \param FixIt optional fix it hint for the format string. 10421 template <typename Range> 10422 void CheckFormatHandler::EmitFormatDiagnostic( 10423 Sema &S, bool InFunctionCall, const Expr *ArgumentExpr, 10424 const PartialDiagnostic &PDiag, SourceLocation Loc, bool IsStringLocation, 10425 Range StringRange, ArrayRef<FixItHint> FixIt) { 10426 if (InFunctionCall) { 10427 const Sema::SemaDiagnosticBuilder &D = S.Diag(Loc, PDiag); 10428 D << StringRange; 10429 D << FixIt; 10430 } else { 10431 S.Diag(IsStringLocation ? ArgumentExpr->getExprLoc() : Loc, PDiag) 10432 << ArgumentExpr->getSourceRange(); 10433 10434 const Sema::SemaDiagnosticBuilder &Note = 10435 S.Diag(IsStringLocation ? Loc : StringRange.getBegin(), 10436 diag::note_format_string_defined); 10437 10438 Note << StringRange; 10439 Note << FixIt; 10440 } 10441 } 10442 10443 //===--- CHECK: Printf format string checking ------------------------------===// 10444 10445 namespace { 10446 10447 class CheckPrintfHandler : public CheckFormatHandler { 10448 public: 10449 CheckPrintfHandler(Sema &s, const FormatStringLiteral *fexpr, 10450 const Expr *origFormatExpr, 10451 const Sema::FormatStringType type, unsigned firstDataArg, 10452 unsigned numDataArgs, bool isObjC, const char *beg, 10453 Sema::FormatArgumentPassingKind APK, 10454 ArrayRef<const Expr *> Args, unsigned formatIdx, 10455 bool inFunctionCall, Sema::VariadicCallType CallType, 10456 llvm::SmallBitVector &CheckedVarArgs, 10457 UncoveredArgHandler &UncoveredArg) 10458 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 10459 numDataArgs, beg, APK, Args, formatIdx, 10460 inFunctionCall, CallType, CheckedVarArgs, 10461 UncoveredArg) {} 10462 10463 bool isObjCContext() const { return FSType == Sema::FST_NSString; } 10464 10465 /// Returns true if '%@' specifiers are allowed in the format string. 10466 bool allowsObjCArg() const { 10467 return FSType == Sema::FST_NSString || FSType == Sema::FST_OSLog || 10468 FSType == Sema::FST_OSTrace; 10469 } 10470 10471 bool HandleInvalidPrintfConversionSpecifier( 10472 const analyze_printf::PrintfSpecifier &FS, 10473 const char *startSpecifier, 10474 unsigned specifierLen) override; 10475 10476 void handleInvalidMaskType(StringRef MaskType) override; 10477 10478 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 10479 const char *startSpecifier, unsigned specifierLen, 10480 const TargetInfo &Target) override; 10481 bool checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 10482 const char *StartSpecifier, 10483 unsigned SpecifierLen, 10484 const Expr *E); 10485 10486 bool HandleAmount(const analyze_format_string::OptionalAmount &Amt, unsigned k, 10487 const char *startSpecifier, unsigned specifierLen); 10488 void HandleInvalidAmount(const analyze_printf::PrintfSpecifier &FS, 10489 const analyze_printf::OptionalAmount &Amt, 10490 unsigned type, 10491 const char *startSpecifier, unsigned specifierLen); 10492 void HandleFlag(const analyze_printf::PrintfSpecifier &FS, 10493 const analyze_printf::OptionalFlag &flag, 10494 const char *startSpecifier, unsigned specifierLen); 10495 void HandleIgnoredFlag(const analyze_printf::PrintfSpecifier &FS, 10496 const analyze_printf::OptionalFlag &ignoredFlag, 10497 const analyze_printf::OptionalFlag &flag, 10498 const char *startSpecifier, unsigned specifierLen); 10499 bool checkForCStrMembers(const analyze_printf::ArgType &AT, 10500 const Expr *E); 10501 10502 void HandleEmptyObjCModifierFlag(const char *startFlag, 10503 unsigned flagLen) override; 10504 10505 void HandleInvalidObjCModifierFlag(const char *startFlag, 10506 unsigned flagLen) override; 10507 10508 void HandleObjCFlagsWithNonObjCConversion(const char *flagsStart, 10509 const char *flagsEnd, 10510 const char *conversionPosition) 10511 override; 10512 }; 10513 10514 } // namespace 10515 10516 bool CheckPrintfHandler::HandleInvalidPrintfConversionSpecifier( 10517 const analyze_printf::PrintfSpecifier &FS, 10518 const char *startSpecifier, 10519 unsigned specifierLen) { 10520 const analyze_printf::PrintfConversionSpecifier &CS = 10521 FS.getConversionSpecifier(); 10522 10523 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 10524 getLocationOfByte(CS.getStart()), 10525 startSpecifier, specifierLen, 10526 CS.getStart(), CS.getLength()); 10527 } 10528 10529 void CheckPrintfHandler::handleInvalidMaskType(StringRef MaskType) { 10530 S.Diag(getLocationOfByte(MaskType.data()), diag::err_invalid_mask_type_size); 10531 } 10532 10533 bool CheckPrintfHandler::HandleAmount( 10534 const analyze_format_string::OptionalAmount &Amt, unsigned k, 10535 const char *startSpecifier, unsigned specifierLen) { 10536 if (Amt.hasDataArgument()) { 10537 if (ArgPassingKind != Sema::FAPK_VAList) { 10538 unsigned argIndex = Amt.getArgIndex(); 10539 if (argIndex >= NumDataArgs) { 10540 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_missing_arg) 10541 << k, 10542 getLocationOfByte(Amt.getStart()), 10543 /*IsStringLocation*/ true, 10544 getSpecifierRange(startSpecifier, specifierLen)); 10545 // Don't do any more checking. We will just emit 10546 // spurious errors. 10547 return false; 10548 } 10549 10550 // Type check the data argument. It should be an 'int'. 10551 // Although not in conformance with C99, we also allow the argument to be 10552 // an 'unsigned int' as that is a reasonably safe case. GCC also 10553 // doesn't emit a warning for that case. 10554 CoveredArgs.set(argIndex); 10555 const Expr *Arg = getDataArg(argIndex); 10556 if (!Arg) 10557 return false; 10558 10559 QualType T = Arg->getType(); 10560 10561 const analyze_printf::ArgType &AT = Amt.getArgType(S.Context); 10562 assert(AT.isValid()); 10563 10564 if (!AT.matchesType(S.Context, T)) { 10565 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_wrong_type) 10566 << k << AT.getRepresentativeTypeName(S.Context) 10567 << T << Arg->getSourceRange(), 10568 getLocationOfByte(Amt.getStart()), 10569 /*IsStringLocation*/true, 10570 getSpecifierRange(startSpecifier, specifierLen)); 10571 // Don't do any more checking. We will just emit 10572 // spurious errors. 10573 return false; 10574 } 10575 } 10576 } 10577 return true; 10578 } 10579 10580 void CheckPrintfHandler::HandleInvalidAmount( 10581 const analyze_printf::PrintfSpecifier &FS, 10582 const analyze_printf::OptionalAmount &Amt, 10583 unsigned type, 10584 const char *startSpecifier, 10585 unsigned specifierLen) { 10586 const analyze_printf::PrintfConversionSpecifier &CS = 10587 FS.getConversionSpecifier(); 10588 10589 FixItHint fixit = 10590 Amt.getHowSpecified() == analyze_printf::OptionalAmount::Constant 10591 ? FixItHint::CreateRemoval(getSpecifierRange(Amt.getStart(), 10592 Amt.getConstantLength())) 10593 : FixItHint(); 10594 10595 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_optional_amount) 10596 << type << CS.toString(), 10597 getLocationOfByte(Amt.getStart()), 10598 /*IsStringLocation*/true, 10599 getSpecifierRange(startSpecifier, specifierLen), 10600 fixit); 10601 } 10602 10603 void CheckPrintfHandler::HandleFlag(const analyze_printf::PrintfSpecifier &FS, 10604 const analyze_printf::OptionalFlag &flag, 10605 const char *startSpecifier, 10606 unsigned specifierLen) { 10607 // Warn about pointless flag with a fixit removal. 10608 const analyze_printf::PrintfConversionSpecifier &CS = 10609 FS.getConversionSpecifier(); 10610 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_flag) 10611 << flag.toString() << CS.toString(), 10612 getLocationOfByte(flag.getPosition()), 10613 /*IsStringLocation*/true, 10614 getSpecifierRange(startSpecifier, specifierLen), 10615 FixItHint::CreateRemoval( 10616 getSpecifierRange(flag.getPosition(), 1))); 10617 } 10618 10619 void CheckPrintfHandler::HandleIgnoredFlag( 10620 const analyze_printf::PrintfSpecifier &FS, 10621 const analyze_printf::OptionalFlag &ignoredFlag, 10622 const analyze_printf::OptionalFlag &flag, 10623 const char *startSpecifier, 10624 unsigned specifierLen) { 10625 // Warn about ignored flag with a fixit removal. 10626 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_ignored_flag) 10627 << ignoredFlag.toString() << flag.toString(), 10628 getLocationOfByte(ignoredFlag.getPosition()), 10629 /*IsStringLocation*/true, 10630 getSpecifierRange(startSpecifier, specifierLen), 10631 FixItHint::CreateRemoval( 10632 getSpecifierRange(ignoredFlag.getPosition(), 1))); 10633 } 10634 10635 void CheckPrintfHandler::HandleEmptyObjCModifierFlag(const char *startFlag, 10636 unsigned flagLen) { 10637 // Warn about an empty flag. 10638 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_empty_objc_flag), 10639 getLocationOfByte(startFlag), 10640 /*IsStringLocation*/true, 10641 getSpecifierRange(startFlag, flagLen)); 10642 } 10643 10644 void CheckPrintfHandler::HandleInvalidObjCModifierFlag(const char *startFlag, 10645 unsigned flagLen) { 10646 // Warn about an invalid flag. 10647 auto Range = getSpecifierRange(startFlag, flagLen); 10648 StringRef flag(startFlag, flagLen); 10649 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_invalid_objc_flag) << flag, 10650 getLocationOfByte(startFlag), 10651 /*IsStringLocation*/true, 10652 Range, FixItHint::CreateRemoval(Range)); 10653 } 10654 10655 void CheckPrintfHandler::HandleObjCFlagsWithNonObjCConversion( 10656 const char *flagsStart, const char *flagsEnd, const char *conversionPosition) { 10657 // Warn about using '[...]' without a '@' conversion. 10658 auto Range = getSpecifierRange(flagsStart, flagsEnd - flagsStart + 1); 10659 auto diag = diag::warn_printf_ObjCflags_without_ObjCConversion; 10660 EmitFormatDiagnostic(S.PDiag(diag) << StringRef(conversionPosition, 1), 10661 getLocationOfByte(conversionPosition), 10662 /*IsStringLocation*/true, 10663 Range, FixItHint::CreateRemoval(Range)); 10664 } 10665 10666 // Determines if the specified is a C++ class or struct containing 10667 // a member with the specified name and kind (e.g. a CXXMethodDecl named 10668 // "c_str()"). 10669 template<typename MemberKind> 10670 static llvm::SmallPtrSet<MemberKind*, 1> 10671 CXXRecordMembersNamed(StringRef Name, Sema &S, QualType Ty) { 10672 const RecordType *RT = Ty->getAs<RecordType>(); 10673 llvm::SmallPtrSet<MemberKind*, 1> Results; 10674 10675 if (!RT) 10676 return Results; 10677 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 10678 if (!RD || !RD->getDefinition()) 10679 return Results; 10680 10681 LookupResult R(S, &S.Context.Idents.get(Name), SourceLocation(), 10682 Sema::LookupMemberName); 10683 R.suppressDiagnostics(); 10684 10685 // We just need to include all members of the right kind turned up by the 10686 // filter, at this point. 10687 if (S.LookupQualifiedName(R, RT->getDecl())) 10688 for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) { 10689 NamedDecl *decl = (*I)->getUnderlyingDecl(); 10690 if (MemberKind *FK = dyn_cast<MemberKind>(decl)) 10691 Results.insert(FK); 10692 } 10693 return Results; 10694 } 10695 10696 /// Check if we could call '.c_str()' on an object. 10697 /// 10698 /// FIXME: This returns the wrong results in some cases (if cv-qualifiers don't 10699 /// allow the call, or if it would be ambiguous). 10700 bool Sema::hasCStrMethod(const Expr *E) { 10701 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 10702 10703 MethodSet Results = 10704 CXXRecordMembersNamed<CXXMethodDecl>("c_str", *this, E->getType()); 10705 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 10706 MI != ME; ++MI) 10707 if ((*MI)->getMinRequiredArguments() == 0) 10708 return true; 10709 return false; 10710 } 10711 10712 // Check if a (w)string was passed when a (w)char* was needed, and offer a 10713 // better diagnostic if so. AT is assumed to be valid. 10714 // Returns true when a c_str() conversion method is found. 10715 bool CheckPrintfHandler::checkForCStrMembers( 10716 const analyze_printf::ArgType &AT, const Expr *E) { 10717 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 10718 10719 MethodSet Results = 10720 CXXRecordMembersNamed<CXXMethodDecl>("c_str", S, E->getType()); 10721 10722 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 10723 MI != ME; ++MI) { 10724 const CXXMethodDecl *Method = *MI; 10725 if (Method->getMinRequiredArguments() == 0 && 10726 AT.matchesType(S.Context, Method->getReturnType())) { 10727 // FIXME: Suggest parens if the expression needs them. 10728 SourceLocation EndLoc = S.getLocForEndOfToken(E->getEndLoc()); 10729 S.Diag(E->getBeginLoc(), diag::note_printf_c_str) 10730 << "c_str()" << FixItHint::CreateInsertion(EndLoc, ".c_str()"); 10731 return true; 10732 } 10733 } 10734 10735 return false; 10736 } 10737 10738 bool CheckPrintfHandler::HandlePrintfSpecifier( 10739 const analyze_printf::PrintfSpecifier &FS, const char *startSpecifier, 10740 unsigned specifierLen, const TargetInfo &Target) { 10741 using namespace analyze_format_string; 10742 using namespace analyze_printf; 10743 10744 const PrintfConversionSpecifier &CS = FS.getConversionSpecifier(); 10745 10746 if (FS.consumesDataArgument()) { 10747 if (atFirstArg) { 10748 atFirstArg = false; 10749 usesPositionalArgs = FS.usesPositionalArg(); 10750 } 10751 else if (usesPositionalArgs != FS.usesPositionalArg()) { 10752 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 10753 startSpecifier, specifierLen); 10754 return false; 10755 } 10756 } 10757 10758 // First check if the field width, precision, and conversion specifier 10759 // have matching data arguments. 10760 if (!HandleAmount(FS.getFieldWidth(), /* field width */ 0, 10761 startSpecifier, specifierLen)) { 10762 return false; 10763 } 10764 10765 if (!HandleAmount(FS.getPrecision(), /* precision */ 1, 10766 startSpecifier, specifierLen)) { 10767 return false; 10768 } 10769 10770 if (!CS.consumesDataArgument()) { 10771 // FIXME: Technically specifying a precision or field width here 10772 // makes no sense. Worth issuing a warning at some point. 10773 return true; 10774 } 10775 10776 // Consume the argument. 10777 unsigned argIndex = FS.getArgIndex(); 10778 if (argIndex < NumDataArgs) { 10779 // The check to see if the argIndex is valid will come later. 10780 // We set the bit here because we may exit early from this 10781 // function if we encounter some other error. 10782 CoveredArgs.set(argIndex); 10783 } 10784 10785 // FreeBSD kernel extensions. 10786 if (CS.getKind() == ConversionSpecifier::FreeBSDbArg || 10787 CS.getKind() == ConversionSpecifier::FreeBSDDArg) { 10788 // We need at least two arguments. 10789 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex + 1)) 10790 return false; 10791 10792 // Claim the second argument. 10793 CoveredArgs.set(argIndex + 1); 10794 10795 // Type check the first argument (int for %b, pointer for %D) 10796 const Expr *Ex = getDataArg(argIndex); 10797 const analyze_printf::ArgType &AT = 10798 (CS.getKind() == ConversionSpecifier::FreeBSDbArg) ? 10799 ArgType(S.Context.IntTy) : ArgType::CPointerTy; 10800 if (AT.isValid() && !AT.matchesType(S.Context, Ex->getType())) 10801 EmitFormatDiagnostic( 10802 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 10803 << AT.getRepresentativeTypeName(S.Context) << Ex->getType() 10804 << false << Ex->getSourceRange(), 10805 Ex->getBeginLoc(), /*IsStringLocation*/ false, 10806 getSpecifierRange(startSpecifier, specifierLen)); 10807 10808 // Type check the second argument (char * for both %b and %D) 10809 Ex = getDataArg(argIndex + 1); 10810 const analyze_printf::ArgType &AT2 = ArgType::CStrTy; 10811 if (AT2.isValid() && !AT2.matchesType(S.Context, Ex->getType())) 10812 EmitFormatDiagnostic( 10813 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 10814 << AT2.getRepresentativeTypeName(S.Context) << Ex->getType() 10815 << false << Ex->getSourceRange(), 10816 Ex->getBeginLoc(), /*IsStringLocation*/ false, 10817 getSpecifierRange(startSpecifier, specifierLen)); 10818 10819 return true; 10820 } 10821 10822 // Check for using an Objective-C specific conversion specifier 10823 // in a non-ObjC literal. 10824 if (!allowsObjCArg() && CS.isObjCArg()) { 10825 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 10826 specifierLen); 10827 } 10828 10829 // %P can only be used with os_log. 10830 if (FSType != Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::PArg) { 10831 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 10832 specifierLen); 10833 } 10834 10835 // %n is not allowed with os_log. 10836 if (FSType == Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::nArg) { 10837 EmitFormatDiagnostic(S.PDiag(diag::warn_os_log_format_narg), 10838 getLocationOfByte(CS.getStart()), 10839 /*IsStringLocation*/ false, 10840 getSpecifierRange(startSpecifier, specifierLen)); 10841 10842 return true; 10843 } 10844 10845 // Only scalars are allowed for os_trace. 10846 if (FSType == Sema::FST_OSTrace && 10847 (CS.getKind() == ConversionSpecifier::PArg || 10848 CS.getKind() == ConversionSpecifier::sArg || 10849 CS.getKind() == ConversionSpecifier::ObjCObjArg)) { 10850 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 10851 specifierLen); 10852 } 10853 10854 // Check for use of public/private annotation outside of os_log(). 10855 if (FSType != Sema::FST_OSLog) { 10856 if (FS.isPublic().isSet()) { 10857 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 10858 << "public", 10859 getLocationOfByte(FS.isPublic().getPosition()), 10860 /*IsStringLocation*/ false, 10861 getSpecifierRange(startSpecifier, specifierLen)); 10862 } 10863 if (FS.isPrivate().isSet()) { 10864 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 10865 << "private", 10866 getLocationOfByte(FS.isPrivate().getPosition()), 10867 /*IsStringLocation*/ false, 10868 getSpecifierRange(startSpecifier, specifierLen)); 10869 } 10870 } 10871 10872 const llvm::Triple &Triple = Target.getTriple(); 10873 if (CS.getKind() == ConversionSpecifier::nArg && 10874 (Triple.isAndroid() || Triple.isOSFuchsia())) { 10875 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_narg_not_supported), 10876 getLocationOfByte(CS.getStart()), 10877 /*IsStringLocation*/ false, 10878 getSpecifierRange(startSpecifier, specifierLen)); 10879 } 10880 10881 // Check for invalid use of field width 10882 if (!FS.hasValidFieldWidth()) { 10883 HandleInvalidAmount(FS, FS.getFieldWidth(), /* field width */ 0, 10884 startSpecifier, specifierLen); 10885 } 10886 10887 // Check for invalid use of precision 10888 if (!FS.hasValidPrecision()) { 10889 HandleInvalidAmount(FS, FS.getPrecision(), /* precision */ 1, 10890 startSpecifier, specifierLen); 10891 } 10892 10893 // Precision is mandatory for %P specifier. 10894 if (CS.getKind() == ConversionSpecifier::PArg && 10895 FS.getPrecision().getHowSpecified() == OptionalAmount::NotSpecified) { 10896 EmitFormatDiagnostic(S.PDiag(diag::warn_format_P_no_precision), 10897 getLocationOfByte(startSpecifier), 10898 /*IsStringLocation*/ false, 10899 getSpecifierRange(startSpecifier, specifierLen)); 10900 } 10901 10902 // Check each flag does not conflict with any other component. 10903 if (!FS.hasValidThousandsGroupingPrefix()) 10904 HandleFlag(FS, FS.hasThousandsGrouping(), startSpecifier, specifierLen); 10905 if (!FS.hasValidLeadingZeros()) 10906 HandleFlag(FS, FS.hasLeadingZeros(), startSpecifier, specifierLen); 10907 if (!FS.hasValidPlusPrefix()) 10908 HandleFlag(FS, FS.hasPlusPrefix(), startSpecifier, specifierLen); 10909 if (!FS.hasValidSpacePrefix()) 10910 HandleFlag(FS, FS.hasSpacePrefix(), startSpecifier, specifierLen); 10911 if (!FS.hasValidAlternativeForm()) 10912 HandleFlag(FS, FS.hasAlternativeForm(), startSpecifier, specifierLen); 10913 if (!FS.hasValidLeftJustified()) 10914 HandleFlag(FS, FS.isLeftJustified(), startSpecifier, specifierLen); 10915 10916 // Check that flags are not ignored by another flag 10917 if (FS.hasSpacePrefix() && FS.hasPlusPrefix()) // ' ' ignored by '+' 10918 HandleIgnoredFlag(FS, FS.hasSpacePrefix(), FS.hasPlusPrefix(), 10919 startSpecifier, specifierLen); 10920 if (FS.hasLeadingZeros() && FS.isLeftJustified()) // '0' ignored by '-' 10921 HandleIgnoredFlag(FS, FS.hasLeadingZeros(), FS.isLeftJustified(), 10922 startSpecifier, specifierLen); 10923 10924 // Check the length modifier is valid with the given conversion specifier. 10925 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 10926 S.getLangOpts())) 10927 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 10928 diag::warn_format_nonsensical_length); 10929 else if (!FS.hasStandardLengthModifier()) 10930 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 10931 else if (!FS.hasStandardLengthConversionCombination()) 10932 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 10933 diag::warn_format_non_standard_conversion_spec); 10934 10935 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 10936 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 10937 10938 // The remaining checks depend on the data arguments. 10939 if (ArgPassingKind == Sema::FAPK_VAList) 10940 return true; 10941 10942 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 10943 return false; 10944 10945 const Expr *Arg = getDataArg(argIndex); 10946 if (!Arg) 10947 return true; 10948 10949 return checkFormatExpr(FS, startSpecifier, specifierLen, Arg); 10950 } 10951 10952 static bool requiresParensToAddCast(const Expr *E) { 10953 // FIXME: We should have a general way to reason about operator 10954 // precedence and whether parens are actually needed here. 10955 // Take care of a few common cases where they aren't. 10956 const Expr *Inside = E->IgnoreImpCasts(); 10957 if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(Inside)) 10958 Inside = POE->getSyntacticForm()->IgnoreImpCasts(); 10959 10960 switch (Inside->getStmtClass()) { 10961 case Stmt::ArraySubscriptExprClass: 10962 case Stmt::CallExprClass: 10963 case Stmt::CharacterLiteralClass: 10964 case Stmt::CXXBoolLiteralExprClass: 10965 case Stmt::DeclRefExprClass: 10966 case Stmt::FloatingLiteralClass: 10967 case Stmt::IntegerLiteralClass: 10968 case Stmt::MemberExprClass: 10969 case Stmt::ObjCArrayLiteralClass: 10970 case Stmt::ObjCBoolLiteralExprClass: 10971 case Stmt::ObjCBoxedExprClass: 10972 case Stmt::ObjCDictionaryLiteralClass: 10973 case Stmt::ObjCEncodeExprClass: 10974 case Stmt::ObjCIvarRefExprClass: 10975 case Stmt::ObjCMessageExprClass: 10976 case Stmt::ObjCPropertyRefExprClass: 10977 case Stmt::ObjCStringLiteralClass: 10978 case Stmt::ObjCSubscriptRefExprClass: 10979 case Stmt::ParenExprClass: 10980 case Stmt::StringLiteralClass: 10981 case Stmt::UnaryOperatorClass: 10982 return false; 10983 default: 10984 return true; 10985 } 10986 } 10987 10988 static std::pair<QualType, StringRef> 10989 shouldNotPrintDirectly(const ASTContext &Context, 10990 QualType IntendedTy, 10991 const Expr *E) { 10992 // Use a 'while' to peel off layers of typedefs. 10993 QualType TyTy = IntendedTy; 10994 while (const TypedefType *UserTy = TyTy->getAs<TypedefType>()) { 10995 StringRef Name = UserTy->getDecl()->getName(); 10996 QualType CastTy = llvm::StringSwitch<QualType>(Name) 10997 .Case("CFIndex", Context.getNSIntegerType()) 10998 .Case("NSInteger", Context.getNSIntegerType()) 10999 .Case("NSUInteger", Context.getNSUIntegerType()) 11000 .Case("SInt32", Context.IntTy) 11001 .Case("UInt32", Context.UnsignedIntTy) 11002 .Default(QualType()); 11003 11004 if (!CastTy.isNull()) 11005 return std::make_pair(CastTy, Name); 11006 11007 TyTy = UserTy->desugar(); 11008 } 11009 11010 // Strip parens if necessary. 11011 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) 11012 return shouldNotPrintDirectly(Context, 11013 PE->getSubExpr()->getType(), 11014 PE->getSubExpr()); 11015 11016 // If this is a conditional expression, then its result type is constructed 11017 // via usual arithmetic conversions and thus there might be no necessary 11018 // typedef sugar there. Recurse to operands to check for NSInteger & 11019 // Co. usage condition. 11020 if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) { 11021 QualType TrueTy, FalseTy; 11022 StringRef TrueName, FalseName; 11023 11024 std::tie(TrueTy, TrueName) = 11025 shouldNotPrintDirectly(Context, 11026 CO->getTrueExpr()->getType(), 11027 CO->getTrueExpr()); 11028 std::tie(FalseTy, FalseName) = 11029 shouldNotPrintDirectly(Context, 11030 CO->getFalseExpr()->getType(), 11031 CO->getFalseExpr()); 11032 11033 if (TrueTy == FalseTy) 11034 return std::make_pair(TrueTy, TrueName); 11035 else if (TrueTy.isNull()) 11036 return std::make_pair(FalseTy, FalseName); 11037 else if (FalseTy.isNull()) 11038 return std::make_pair(TrueTy, TrueName); 11039 } 11040 11041 return std::make_pair(QualType(), StringRef()); 11042 } 11043 11044 /// Return true if \p ICE is an implicit argument promotion of an arithmetic 11045 /// type. Bit-field 'promotions' from a higher ranked type to a lower ranked 11046 /// type do not count. 11047 static bool 11048 isArithmeticArgumentPromotion(Sema &S, const ImplicitCastExpr *ICE) { 11049 QualType From = ICE->getSubExpr()->getType(); 11050 QualType To = ICE->getType(); 11051 // It's an integer promotion if the destination type is the promoted 11052 // source type. 11053 if (ICE->getCastKind() == CK_IntegralCast && 11054 S.Context.isPromotableIntegerType(From) && 11055 S.Context.getPromotedIntegerType(From) == To) 11056 return true; 11057 // Look through vector types, since we do default argument promotion for 11058 // those in OpenCL. 11059 if (const auto *VecTy = From->getAs<ExtVectorType>()) 11060 From = VecTy->getElementType(); 11061 if (const auto *VecTy = To->getAs<ExtVectorType>()) 11062 To = VecTy->getElementType(); 11063 // It's a floating promotion if the source type is a lower rank. 11064 return ICE->getCastKind() == CK_FloatingCast && 11065 S.Context.getFloatingTypeOrder(From, To) < 0; 11066 } 11067 11068 bool 11069 CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 11070 const char *StartSpecifier, 11071 unsigned SpecifierLen, 11072 const Expr *E) { 11073 using namespace analyze_format_string; 11074 using namespace analyze_printf; 11075 11076 // Now type check the data expression that matches the 11077 // format specifier. 11078 const analyze_printf::ArgType &AT = FS.getArgType(S.Context, isObjCContext()); 11079 if (!AT.isValid()) 11080 return true; 11081 11082 QualType ExprTy = E->getType(); 11083 while (const TypeOfExprType *TET = dyn_cast<TypeOfExprType>(ExprTy)) { 11084 ExprTy = TET->getUnderlyingExpr()->getType(); 11085 } 11086 11087 // When using the format attribute in C++, you can receive a function or an 11088 // array that will necessarily decay to a pointer when passed to the final 11089 // format consumer. Apply decay before type comparison. 11090 if (ExprTy->canDecayToPointerType()) 11091 ExprTy = S.Context.getDecayedType(ExprTy); 11092 11093 // Diagnose attempts to print a boolean value as a character. Unlike other 11094 // -Wformat diagnostics, this is fine from a type perspective, but it still 11095 // doesn't make sense. 11096 if (FS.getConversionSpecifier().getKind() == ConversionSpecifier::cArg && 11097 E->isKnownToHaveBooleanValue()) { 11098 const CharSourceRange &CSR = 11099 getSpecifierRange(StartSpecifier, SpecifierLen); 11100 SmallString<4> FSString; 11101 llvm::raw_svector_ostream os(FSString); 11102 FS.toString(os); 11103 EmitFormatDiagnostic(S.PDiag(diag::warn_format_bool_as_character) 11104 << FSString, 11105 E->getExprLoc(), false, CSR); 11106 return true; 11107 } 11108 11109 ArgType::MatchKind ImplicitMatch = ArgType::NoMatch; 11110 ArgType::MatchKind Match = AT.matchesType(S.Context, ExprTy); 11111 if (Match == ArgType::Match) 11112 return true; 11113 11114 // NoMatchPromotionTypeConfusion should be only returned in ImplictCastExpr 11115 assert(Match != ArgType::NoMatchPromotionTypeConfusion); 11116 11117 // Look through argument promotions for our error message's reported type. 11118 // This includes the integral and floating promotions, but excludes array 11119 // and function pointer decay (seeing that an argument intended to be a 11120 // string has type 'char [6]' is probably more confusing than 'char *') and 11121 // certain bitfield promotions (bitfields can be 'demoted' to a lesser type). 11122 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 11123 if (isArithmeticArgumentPromotion(S, ICE)) { 11124 E = ICE->getSubExpr(); 11125 ExprTy = E->getType(); 11126 11127 // Check if we didn't match because of an implicit cast from a 'char' 11128 // or 'short' to an 'int'. This is done because printf is a varargs 11129 // function. 11130 if (ICE->getType() == S.Context.IntTy || 11131 ICE->getType() == S.Context.UnsignedIntTy) { 11132 // All further checking is done on the subexpression 11133 ImplicitMatch = AT.matchesType(S.Context, ExprTy); 11134 if (ImplicitMatch == ArgType::Match) 11135 return true; 11136 } 11137 } 11138 } else if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E)) { 11139 // Special case for 'a', which has type 'int' in C. 11140 // Note, however, that we do /not/ want to treat multibyte constants like 11141 // 'MooV' as characters! This form is deprecated but still exists. In 11142 // addition, don't treat expressions as of type 'char' if one byte length 11143 // modifier is provided. 11144 if (ExprTy == S.Context.IntTy && 11145 FS.getLengthModifier().getKind() != LengthModifier::AsChar) 11146 if (llvm::isUIntN(S.Context.getCharWidth(), CL->getValue())) { 11147 ExprTy = S.Context.CharTy; 11148 // To improve check results, we consider a character literal in C 11149 // to be a 'char' rather than an 'int'. 'printf("%hd", 'a');' is 11150 // more likely a type confusion situation, so we will suggest to 11151 // use '%hhd' instead by discarding the MatchPromotion. 11152 if (Match == ArgType::MatchPromotion) 11153 Match = ArgType::NoMatch; 11154 } 11155 } 11156 if (Match == ArgType::MatchPromotion) { 11157 // WG14 N2562 only clarified promotions in *printf 11158 // For NSLog in ObjC, just preserve -Wformat behavior 11159 if (!S.getLangOpts().ObjC && 11160 ImplicitMatch != ArgType::NoMatchPromotionTypeConfusion && 11161 ImplicitMatch != ArgType::NoMatchTypeConfusion) 11162 return true; 11163 Match = ArgType::NoMatch; 11164 } 11165 if (ImplicitMatch == ArgType::NoMatchPedantic || 11166 ImplicitMatch == ArgType::NoMatchTypeConfusion) 11167 Match = ImplicitMatch; 11168 assert(Match != ArgType::MatchPromotion); 11169 // Look through unscoped enums to their underlying type. 11170 bool IsEnum = false; 11171 bool IsScopedEnum = false; 11172 if (auto EnumTy = ExprTy->getAs<EnumType>()) { 11173 if (EnumTy->isUnscopedEnumerationType()) { 11174 ExprTy = EnumTy->getDecl()->getIntegerType(); 11175 // This controls whether we're talking about the underlying type or not, 11176 // which we only want to do when it's an unscoped enum. 11177 IsEnum = true; 11178 } else { 11179 IsScopedEnum = true; 11180 } 11181 } 11182 11183 // %C in an Objective-C context prints a unichar, not a wchar_t. 11184 // If the argument is an integer of some kind, believe the %C and suggest 11185 // a cast instead of changing the conversion specifier. 11186 QualType IntendedTy = ExprTy; 11187 if (isObjCContext() && 11188 FS.getConversionSpecifier().getKind() == ConversionSpecifier::CArg) { 11189 if (ExprTy->isIntegralOrUnscopedEnumerationType() && 11190 !ExprTy->isCharType()) { 11191 // 'unichar' is defined as a typedef of unsigned short, but we should 11192 // prefer using the typedef if it is visible. 11193 IntendedTy = S.Context.UnsignedShortTy; 11194 11195 // While we are here, check if the value is an IntegerLiteral that happens 11196 // to be within the valid range. 11197 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E)) { 11198 const llvm::APInt &V = IL->getValue(); 11199 if (V.getActiveBits() <= S.Context.getTypeSize(IntendedTy)) 11200 return true; 11201 } 11202 11203 LookupResult Result(S, &S.Context.Idents.get("unichar"), E->getBeginLoc(), 11204 Sema::LookupOrdinaryName); 11205 if (S.LookupName(Result, S.getCurScope())) { 11206 NamedDecl *ND = Result.getFoundDecl(); 11207 if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(ND)) 11208 if (TD->getUnderlyingType() == IntendedTy) 11209 IntendedTy = S.Context.getTypedefType(TD); 11210 } 11211 } 11212 } 11213 11214 // Special-case some of Darwin's platform-independence types by suggesting 11215 // casts to primitive types that are known to be large enough. 11216 bool ShouldNotPrintDirectly = false; StringRef CastTyName; 11217 if (S.Context.getTargetInfo().getTriple().isOSDarwin()) { 11218 QualType CastTy; 11219 std::tie(CastTy, CastTyName) = shouldNotPrintDirectly(S.Context, IntendedTy, E); 11220 if (!CastTy.isNull()) { 11221 // %zi/%zu and %td/%tu are OK to use for NSInteger/NSUInteger of type int 11222 // (long in ASTContext). Only complain to pedants. 11223 if ((CastTyName == "NSInteger" || CastTyName == "NSUInteger") && 11224 (AT.isSizeT() || AT.isPtrdiffT()) && 11225 AT.matchesType(S.Context, CastTy)) 11226 Match = ArgType::NoMatchPedantic; 11227 IntendedTy = CastTy; 11228 ShouldNotPrintDirectly = true; 11229 } 11230 } 11231 11232 // We may be able to offer a FixItHint if it is a supported type. 11233 PrintfSpecifier fixedFS = FS; 11234 bool Success = 11235 fixedFS.fixType(IntendedTy, S.getLangOpts(), S.Context, isObjCContext()); 11236 11237 if (Success) { 11238 // Get the fix string from the fixed format specifier 11239 SmallString<16> buf; 11240 llvm::raw_svector_ostream os(buf); 11241 fixedFS.toString(os); 11242 11243 CharSourceRange SpecRange = getSpecifierRange(StartSpecifier, SpecifierLen); 11244 11245 if (IntendedTy == ExprTy && !ShouldNotPrintDirectly && !IsScopedEnum) { 11246 unsigned Diag; 11247 switch (Match) { 11248 case ArgType::Match: 11249 case ArgType::MatchPromotion: 11250 case ArgType::NoMatchPromotionTypeConfusion: 11251 llvm_unreachable("expected non-matching"); 11252 case ArgType::NoMatchPedantic: 11253 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; 11254 break; 11255 case ArgType::NoMatchTypeConfusion: 11256 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; 11257 break; 11258 case ArgType::NoMatch: 11259 Diag = diag::warn_format_conversion_argument_type_mismatch; 11260 break; 11261 } 11262 11263 // In this case, the specifier is wrong and should be changed to match 11264 // the argument. 11265 EmitFormatDiagnostic(S.PDiag(Diag) 11266 << AT.getRepresentativeTypeName(S.Context) 11267 << IntendedTy << IsEnum << E->getSourceRange(), 11268 E->getBeginLoc(), 11269 /*IsStringLocation*/ false, SpecRange, 11270 FixItHint::CreateReplacement(SpecRange, os.str())); 11271 } else { 11272 // The canonical type for formatting this value is different from the 11273 // actual type of the expression. (This occurs, for example, with Darwin's 11274 // NSInteger on 32-bit platforms, where it is typedef'd as 'int', but 11275 // should be printed as 'long' for 64-bit compatibility.) 11276 // Rather than emitting a normal format/argument mismatch, we want to 11277 // add a cast to the recommended type (and correct the format string 11278 // if necessary). 11279 SmallString<16> CastBuf; 11280 llvm::raw_svector_ostream CastFix(CastBuf); 11281 CastFix << (S.LangOpts.CPlusPlus ? "static_cast<" : "("); 11282 if (IsScopedEnum) { 11283 CastFix << AT.getRepresentativeType(S.Context).getAsString( 11284 S.Context.getPrintingPolicy()); 11285 } else { 11286 IntendedTy.print(CastFix, S.Context.getPrintingPolicy()); 11287 } 11288 CastFix << (S.LangOpts.CPlusPlus ? ">" : ")"); 11289 11290 SmallVector<FixItHint,4> Hints; 11291 if ((!AT.matchesType(S.Context, IntendedTy) && !IsScopedEnum) || 11292 ShouldNotPrintDirectly) 11293 Hints.push_back(FixItHint::CreateReplacement(SpecRange, os.str())); 11294 11295 if (const CStyleCastExpr *CCast = dyn_cast<CStyleCastExpr>(E)) { 11296 // If there's already a cast present, just replace it. 11297 SourceRange CastRange(CCast->getLParenLoc(), CCast->getRParenLoc()); 11298 Hints.push_back(FixItHint::CreateReplacement(CastRange, CastFix.str())); 11299 11300 } else if (!requiresParensToAddCast(E) && !S.LangOpts.CPlusPlus) { 11301 // If the expression has high enough precedence, 11302 // just write the C-style cast. 11303 Hints.push_back( 11304 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 11305 } else { 11306 // Otherwise, add parens around the expression as well as the cast. 11307 CastFix << "("; 11308 Hints.push_back( 11309 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 11310 11311 SourceLocation After = S.getLocForEndOfToken(E->getEndLoc()); 11312 Hints.push_back(FixItHint::CreateInsertion(After, ")")); 11313 } 11314 11315 if (ShouldNotPrintDirectly) { 11316 // The expression has a type that should not be printed directly. 11317 // We extract the name from the typedef because we don't want to show 11318 // the underlying type in the diagnostic. 11319 StringRef Name; 11320 if (const auto *TypedefTy = ExprTy->getAs<TypedefType>()) 11321 Name = TypedefTy->getDecl()->getName(); 11322 else 11323 Name = CastTyName; 11324 unsigned Diag = Match == ArgType::NoMatchPedantic 11325 ? diag::warn_format_argument_needs_cast_pedantic 11326 : diag::warn_format_argument_needs_cast; 11327 EmitFormatDiagnostic(S.PDiag(Diag) << Name << IntendedTy << IsEnum 11328 << E->getSourceRange(), 11329 E->getBeginLoc(), /*IsStringLocation=*/false, 11330 SpecRange, Hints); 11331 } else { 11332 // In this case, the expression could be printed using a different 11333 // specifier, but we've decided that the specifier is probably correct 11334 // and we should cast instead. Just use the normal warning message. 11335 EmitFormatDiagnostic( 11336 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 11337 << AT.getRepresentativeTypeName(S.Context) << ExprTy << IsEnum 11338 << E->getSourceRange(), 11339 E->getBeginLoc(), /*IsStringLocation*/ false, SpecRange, Hints); 11340 } 11341 } 11342 } else { 11343 const CharSourceRange &CSR = getSpecifierRange(StartSpecifier, 11344 SpecifierLen); 11345 // Since the warning for passing non-POD types to variadic functions 11346 // was deferred until now, we emit a warning for non-POD 11347 // arguments here. 11348 bool EmitTypeMismatch = false; 11349 switch (S.isValidVarArgType(ExprTy)) { 11350 case Sema::VAK_Valid: 11351 case Sema::VAK_ValidInCXX11: { 11352 unsigned Diag; 11353 switch (Match) { 11354 case ArgType::Match: 11355 case ArgType::MatchPromotion: 11356 case ArgType::NoMatchPromotionTypeConfusion: 11357 llvm_unreachable("expected non-matching"); 11358 case ArgType::NoMatchPedantic: 11359 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; 11360 break; 11361 case ArgType::NoMatchTypeConfusion: 11362 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; 11363 break; 11364 case ArgType::NoMatch: 11365 Diag = diag::warn_format_conversion_argument_type_mismatch; 11366 break; 11367 } 11368 11369 EmitFormatDiagnostic( 11370 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) << ExprTy 11371 << IsEnum << CSR << E->getSourceRange(), 11372 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 11373 break; 11374 } 11375 case Sema::VAK_Undefined: 11376 case Sema::VAK_MSVCUndefined: 11377 if (CallType == Sema::VariadicDoesNotApply) { 11378 EmitTypeMismatch = true; 11379 } else { 11380 EmitFormatDiagnostic( 11381 S.PDiag(diag::warn_non_pod_vararg_with_format_string) 11382 << S.getLangOpts().CPlusPlus11 << ExprTy << CallType 11383 << AT.getRepresentativeTypeName(S.Context) << CSR 11384 << E->getSourceRange(), 11385 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 11386 checkForCStrMembers(AT, E); 11387 } 11388 break; 11389 11390 case Sema::VAK_Invalid: 11391 if (CallType == Sema::VariadicDoesNotApply) 11392 EmitTypeMismatch = true; 11393 else if (ExprTy->isObjCObjectType()) 11394 EmitFormatDiagnostic( 11395 S.PDiag(diag::err_cannot_pass_objc_interface_to_vararg_format) 11396 << S.getLangOpts().CPlusPlus11 << ExprTy << CallType 11397 << AT.getRepresentativeTypeName(S.Context) << CSR 11398 << E->getSourceRange(), 11399 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 11400 else 11401 // FIXME: If this is an initializer list, suggest removing the braces 11402 // or inserting a cast to the target type. 11403 S.Diag(E->getBeginLoc(), diag::err_cannot_pass_to_vararg_format) 11404 << isa<InitListExpr>(E) << ExprTy << CallType 11405 << AT.getRepresentativeTypeName(S.Context) << E->getSourceRange(); 11406 break; 11407 } 11408 11409 if (EmitTypeMismatch) { 11410 // The function is not variadic, so we do not generate warnings about 11411 // being allowed to pass that object as a variadic argument. Instead, 11412 // since there are inherently no printf specifiers for types which cannot 11413 // be passed as variadic arguments, emit a plain old specifier mismatch 11414 // argument. 11415 EmitFormatDiagnostic( 11416 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 11417 << AT.getRepresentativeTypeName(S.Context) << ExprTy << false 11418 << E->getSourceRange(), 11419 E->getBeginLoc(), false, CSR); 11420 } 11421 11422 assert(FirstDataArg + FS.getArgIndex() < CheckedVarArgs.size() && 11423 "format string specifier index out of range"); 11424 CheckedVarArgs[FirstDataArg + FS.getArgIndex()] = true; 11425 } 11426 11427 return true; 11428 } 11429 11430 //===--- CHECK: Scanf format string checking ------------------------------===// 11431 11432 namespace { 11433 11434 class CheckScanfHandler : public CheckFormatHandler { 11435 public: 11436 CheckScanfHandler(Sema &s, const FormatStringLiteral *fexpr, 11437 const Expr *origFormatExpr, Sema::FormatStringType type, 11438 unsigned firstDataArg, unsigned numDataArgs, 11439 const char *beg, Sema::FormatArgumentPassingKind APK, 11440 ArrayRef<const Expr *> Args, unsigned formatIdx, 11441 bool inFunctionCall, Sema::VariadicCallType CallType, 11442 llvm::SmallBitVector &CheckedVarArgs, 11443 UncoveredArgHandler &UncoveredArg) 11444 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 11445 numDataArgs, beg, APK, Args, formatIdx, 11446 inFunctionCall, CallType, CheckedVarArgs, 11447 UncoveredArg) {} 11448 11449 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS, 11450 const char *startSpecifier, 11451 unsigned specifierLen) override; 11452 11453 bool HandleInvalidScanfConversionSpecifier( 11454 const analyze_scanf::ScanfSpecifier &FS, 11455 const char *startSpecifier, 11456 unsigned specifierLen) override; 11457 11458 void HandleIncompleteScanList(const char *start, const char *end) override; 11459 }; 11460 11461 } // namespace 11462 11463 void CheckScanfHandler::HandleIncompleteScanList(const char *start, 11464 const char *end) { 11465 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_scanlist_incomplete), 11466 getLocationOfByte(end), /*IsStringLocation*/true, 11467 getSpecifierRange(start, end - start)); 11468 } 11469 11470 bool CheckScanfHandler::HandleInvalidScanfConversionSpecifier( 11471 const analyze_scanf::ScanfSpecifier &FS, 11472 const char *startSpecifier, 11473 unsigned specifierLen) { 11474 const analyze_scanf::ScanfConversionSpecifier &CS = 11475 FS.getConversionSpecifier(); 11476 11477 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 11478 getLocationOfByte(CS.getStart()), 11479 startSpecifier, specifierLen, 11480 CS.getStart(), CS.getLength()); 11481 } 11482 11483 bool CheckScanfHandler::HandleScanfSpecifier( 11484 const analyze_scanf::ScanfSpecifier &FS, 11485 const char *startSpecifier, 11486 unsigned specifierLen) { 11487 using namespace analyze_scanf; 11488 using namespace analyze_format_string; 11489 11490 const ScanfConversionSpecifier &CS = FS.getConversionSpecifier(); 11491 11492 // Handle case where '%' and '*' don't consume an argument. These shouldn't 11493 // be used to decide if we are using positional arguments consistently. 11494 if (FS.consumesDataArgument()) { 11495 if (atFirstArg) { 11496 atFirstArg = false; 11497 usesPositionalArgs = FS.usesPositionalArg(); 11498 } 11499 else if (usesPositionalArgs != FS.usesPositionalArg()) { 11500 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 11501 startSpecifier, specifierLen); 11502 return false; 11503 } 11504 } 11505 11506 // Check if the field with is non-zero. 11507 const OptionalAmount &Amt = FS.getFieldWidth(); 11508 if (Amt.getHowSpecified() == OptionalAmount::Constant) { 11509 if (Amt.getConstantAmount() == 0) { 11510 const CharSourceRange &R = getSpecifierRange(Amt.getStart(), 11511 Amt.getConstantLength()); 11512 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_nonzero_width), 11513 getLocationOfByte(Amt.getStart()), 11514 /*IsStringLocation*/true, R, 11515 FixItHint::CreateRemoval(R)); 11516 } 11517 } 11518 11519 if (!FS.consumesDataArgument()) { 11520 // FIXME: Technically specifying a precision or field width here 11521 // makes no sense. Worth issuing a warning at some point. 11522 return true; 11523 } 11524 11525 // Consume the argument. 11526 unsigned argIndex = FS.getArgIndex(); 11527 if (argIndex < NumDataArgs) { 11528 // The check to see if the argIndex is valid will come later. 11529 // We set the bit here because we may exit early from this 11530 // function if we encounter some other error. 11531 CoveredArgs.set(argIndex); 11532 } 11533 11534 // Check the length modifier is valid with the given conversion specifier. 11535 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 11536 S.getLangOpts())) 11537 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 11538 diag::warn_format_nonsensical_length); 11539 else if (!FS.hasStandardLengthModifier()) 11540 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 11541 else if (!FS.hasStandardLengthConversionCombination()) 11542 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 11543 diag::warn_format_non_standard_conversion_spec); 11544 11545 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 11546 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 11547 11548 // The remaining checks depend on the data arguments. 11549 if (ArgPassingKind == Sema::FAPK_VAList) 11550 return true; 11551 11552 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 11553 return false; 11554 11555 // Check that the argument type matches the format specifier. 11556 const Expr *Ex = getDataArg(argIndex); 11557 if (!Ex) 11558 return true; 11559 11560 const analyze_format_string::ArgType &AT = FS.getArgType(S.Context); 11561 11562 if (!AT.isValid()) { 11563 return true; 11564 } 11565 11566 analyze_format_string::ArgType::MatchKind Match = 11567 AT.matchesType(S.Context, Ex->getType()); 11568 bool Pedantic = Match == analyze_format_string::ArgType::NoMatchPedantic; 11569 if (Match == analyze_format_string::ArgType::Match) 11570 return true; 11571 11572 ScanfSpecifier fixedFS = FS; 11573 bool Success = fixedFS.fixType(Ex->getType(), Ex->IgnoreImpCasts()->getType(), 11574 S.getLangOpts(), S.Context); 11575 11576 unsigned Diag = 11577 Pedantic ? diag::warn_format_conversion_argument_type_mismatch_pedantic 11578 : diag::warn_format_conversion_argument_type_mismatch; 11579 11580 if (Success) { 11581 // Get the fix string from the fixed format specifier. 11582 SmallString<128> buf; 11583 llvm::raw_svector_ostream os(buf); 11584 fixedFS.toString(os); 11585 11586 EmitFormatDiagnostic( 11587 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) 11588 << Ex->getType() << false << Ex->getSourceRange(), 11589 Ex->getBeginLoc(), 11590 /*IsStringLocation*/ false, 11591 getSpecifierRange(startSpecifier, specifierLen), 11592 FixItHint::CreateReplacement( 11593 getSpecifierRange(startSpecifier, specifierLen), os.str())); 11594 } else { 11595 EmitFormatDiagnostic(S.PDiag(Diag) 11596 << AT.getRepresentativeTypeName(S.Context) 11597 << Ex->getType() << false << Ex->getSourceRange(), 11598 Ex->getBeginLoc(), 11599 /*IsStringLocation*/ false, 11600 getSpecifierRange(startSpecifier, specifierLen)); 11601 } 11602 11603 return true; 11604 } 11605 11606 static void CheckFormatString( 11607 Sema &S, const FormatStringLiteral *FExpr, const Expr *OrigFormatExpr, 11608 ArrayRef<const Expr *> Args, Sema::FormatArgumentPassingKind APK, 11609 unsigned format_idx, unsigned firstDataArg, Sema::FormatStringType Type, 11610 bool inFunctionCall, Sema::VariadicCallType CallType, 11611 llvm::SmallBitVector &CheckedVarArgs, UncoveredArgHandler &UncoveredArg, 11612 bool IgnoreStringsWithoutSpecifiers) { 11613 // CHECK: is the format string a wide literal? 11614 if (!FExpr->isAscii() && !FExpr->isUTF8()) { 11615 CheckFormatHandler::EmitFormatDiagnostic( 11616 S, inFunctionCall, Args[format_idx], 11617 S.PDiag(diag::warn_format_string_is_wide_literal), FExpr->getBeginLoc(), 11618 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 11619 return; 11620 } 11621 11622 // Str - The format string. NOTE: this is NOT null-terminated! 11623 StringRef StrRef = FExpr->getString(); 11624 const char *Str = StrRef.data(); 11625 // Account for cases where the string literal is truncated in a declaration. 11626 const ConstantArrayType *T = 11627 S.Context.getAsConstantArrayType(FExpr->getType()); 11628 assert(T && "String literal not of constant array type!"); 11629 size_t TypeSize = T->getSize().getZExtValue(); 11630 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 11631 const unsigned numDataArgs = Args.size() - firstDataArg; 11632 11633 if (IgnoreStringsWithoutSpecifiers && 11634 !analyze_format_string::parseFormatStringHasFormattingSpecifiers( 11635 Str, Str + StrLen, S.getLangOpts(), S.Context.getTargetInfo())) 11636 return; 11637 11638 // Emit a warning if the string literal is truncated and does not contain an 11639 // embedded null character. 11640 if (TypeSize <= StrRef.size() && !StrRef.substr(0, TypeSize).contains('\0')) { 11641 CheckFormatHandler::EmitFormatDiagnostic( 11642 S, inFunctionCall, Args[format_idx], 11643 S.PDiag(diag::warn_printf_format_string_not_null_terminated), 11644 FExpr->getBeginLoc(), 11645 /*IsStringLocation=*/true, OrigFormatExpr->getSourceRange()); 11646 return; 11647 } 11648 11649 // CHECK: empty format string? 11650 if (StrLen == 0 && numDataArgs > 0) { 11651 CheckFormatHandler::EmitFormatDiagnostic( 11652 S, inFunctionCall, Args[format_idx], 11653 S.PDiag(diag::warn_empty_format_string), FExpr->getBeginLoc(), 11654 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 11655 return; 11656 } 11657 11658 if (Type == Sema::FST_Printf || Type == Sema::FST_NSString || 11659 Type == Sema::FST_FreeBSDKPrintf || Type == Sema::FST_OSLog || 11660 Type == Sema::FST_OSTrace) { 11661 CheckPrintfHandler H( 11662 S, FExpr, OrigFormatExpr, Type, firstDataArg, numDataArgs, 11663 (Type == Sema::FST_NSString || Type == Sema::FST_OSTrace), Str, APK, 11664 Args, format_idx, inFunctionCall, CallType, CheckedVarArgs, 11665 UncoveredArg); 11666 11667 if (!analyze_format_string::ParsePrintfString( 11668 H, Str, Str + StrLen, S.getLangOpts(), S.Context.getTargetInfo(), 11669 Type == Sema::FST_FreeBSDKPrintf)) 11670 H.DoneProcessing(); 11671 } else if (Type == Sema::FST_Scanf) { 11672 CheckScanfHandler H(S, FExpr, OrigFormatExpr, Type, firstDataArg, 11673 numDataArgs, Str, APK, Args, format_idx, inFunctionCall, 11674 CallType, CheckedVarArgs, UncoveredArg); 11675 11676 if (!analyze_format_string::ParseScanfString( 11677 H, Str, Str + StrLen, S.getLangOpts(), S.Context.getTargetInfo())) 11678 H.DoneProcessing(); 11679 } // TODO: handle other formats 11680 } 11681 11682 bool Sema::FormatStringHasSArg(const StringLiteral *FExpr) { 11683 // Str - The format string. NOTE: this is NOT null-terminated! 11684 StringRef StrRef = FExpr->getString(); 11685 const char *Str = StrRef.data(); 11686 // Account for cases where the string literal is truncated in a declaration. 11687 const ConstantArrayType *T = Context.getAsConstantArrayType(FExpr->getType()); 11688 assert(T && "String literal not of constant array type!"); 11689 size_t TypeSize = T->getSize().getZExtValue(); 11690 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 11691 return analyze_format_string::ParseFormatStringHasSArg(Str, Str + StrLen, 11692 getLangOpts(), 11693 Context.getTargetInfo()); 11694 } 11695 11696 //===--- CHECK: Warn on use of wrong absolute value function. -------------===// 11697 11698 // Returns the related absolute value function that is larger, of 0 if one 11699 // does not exist. 11700 static unsigned getLargerAbsoluteValueFunction(unsigned AbsFunction) { 11701 switch (AbsFunction) { 11702 default: 11703 return 0; 11704 11705 case Builtin::BI__builtin_abs: 11706 return Builtin::BI__builtin_labs; 11707 case Builtin::BI__builtin_labs: 11708 return Builtin::BI__builtin_llabs; 11709 case Builtin::BI__builtin_llabs: 11710 return 0; 11711 11712 case Builtin::BI__builtin_fabsf: 11713 return Builtin::BI__builtin_fabs; 11714 case Builtin::BI__builtin_fabs: 11715 return Builtin::BI__builtin_fabsl; 11716 case Builtin::BI__builtin_fabsl: 11717 return 0; 11718 11719 case Builtin::BI__builtin_cabsf: 11720 return Builtin::BI__builtin_cabs; 11721 case Builtin::BI__builtin_cabs: 11722 return Builtin::BI__builtin_cabsl; 11723 case Builtin::BI__builtin_cabsl: 11724 return 0; 11725 11726 case Builtin::BIabs: 11727 return Builtin::BIlabs; 11728 case Builtin::BIlabs: 11729 return Builtin::BIllabs; 11730 case Builtin::BIllabs: 11731 return 0; 11732 11733 case Builtin::BIfabsf: 11734 return Builtin::BIfabs; 11735 case Builtin::BIfabs: 11736 return Builtin::BIfabsl; 11737 case Builtin::BIfabsl: 11738 return 0; 11739 11740 case Builtin::BIcabsf: 11741 return Builtin::BIcabs; 11742 case Builtin::BIcabs: 11743 return Builtin::BIcabsl; 11744 case Builtin::BIcabsl: 11745 return 0; 11746 } 11747 } 11748 11749 // Returns the argument type of the absolute value function. 11750 static QualType getAbsoluteValueArgumentType(ASTContext &Context, 11751 unsigned AbsType) { 11752 if (AbsType == 0) 11753 return QualType(); 11754 11755 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None; 11756 QualType BuiltinType = Context.GetBuiltinType(AbsType, Error); 11757 if (Error != ASTContext::GE_None) 11758 return QualType(); 11759 11760 const FunctionProtoType *FT = BuiltinType->getAs<FunctionProtoType>(); 11761 if (!FT) 11762 return QualType(); 11763 11764 if (FT->getNumParams() != 1) 11765 return QualType(); 11766 11767 return FT->getParamType(0); 11768 } 11769 11770 // Returns the best absolute value function, or zero, based on type and 11771 // current absolute value function. 11772 static unsigned getBestAbsFunction(ASTContext &Context, QualType ArgType, 11773 unsigned AbsFunctionKind) { 11774 unsigned BestKind = 0; 11775 uint64_t ArgSize = Context.getTypeSize(ArgType); 11776 for (unsigned Kind = AbsFunctionKind; Kind != 0; 11777 Kind = getLargerAbsoluteValueFunction(Kind)) { 11778 QualType ParamType = getAbsoluteValueArgumentType(Context, Kind); 11779 if (Context.getTypeSize(ParamType) >= ArgSize) { 11780 if (BestKind == 0) 11781 BestKind = Kind; 11782 else if (Context.hasSameType(ParamType, ArgType)) { 11783 BestKind = Kind; 11784 break; 11785 } 11786 } 11787 } 11788 return BestKind; 11789 } 11790 11791 enum AbsoluteValueKind { 11792 AVK_Integer, 11793 AVK_Floating, 11794 AVK_Complex 11795 }; 11796 11797 static AbsoluteValueKind getAbsoluteValueKind(QualType T) { 11798 if (T->isIntegralOrEnumerationType()) 11799 return AVK_Integer; 11800 if (T->isRealFloatingType()) 11801 return AVK_Floating; 11802 if (T->isAnyComplexType()) 11803 return AVK_Complex; 11804 11805 llvm_unreachable("Type not integer, floating, or complex"); 11806 } 11807 11808 // Changes the absolute value function to a different type. Preserves whether 11809 // the function is a builtin. 11810 static unsigned changeAbsFunction(unsigned AbsKind, 11811 AbsoluteValueKind ValueKind) { 11812 switch (ValueKind) { 11813 case AVK_Integer: 11814 switch (AbsKind) { 11815 default: 11816 return 0; 11817 case Builtin::BI__builtin_fabsf: 11818 case Builtin::BI__builtin_fabs: 11819 case Builtin::BI__builtin_fabsl: 11820 case Builtin::BI__builtin_cabsf: 11821 case Builtin::BI__builtin_cabs: 11822 case Builtin::BI__builtin_cabsl: 11823 return Builtin::BI__builtin_abs; 11824 case Builtin::BIfabsf: 11825 case Builtin::BIfabs: 11826 case Builtin::BIfabsl: 11827 case Builtin::BIcabsf: 11828 case Builtin::BIcabs: 11829 case Builtin::BIcabsl: 11830 return Builtin::BIabs; 11831 } 11832 case AVK_Floating: 11833 switch (AbsKind) { 11834 default: 11835 return 0; 11836 case Builtin::BI__builtin_abs: 11837 case Builtin::BI__builtin_labs: 11838 case Builtin::BI__builtin_llabs: 11839 case Builtin::BI__builtin_cabsf: 11840 case Builtin::BI__builtin_cabs: 11841 case Builtin::BI__builtin_cabsl: 11842 return Builtin::BI__builtin_fabsf; 11843 case Builtin::BIabs: 11844 case Builtin::BIlabs: 11845 case Builtin::BIllabs: 11846 case Builtin::BIcabsf: 11847 case Builtin::BIcabs: 11848 case Builtin::BIcabsl: 11849 return Builtin::BIfabsf; 11850 } 11851 case AVK_Complex: 11852 switch (AbsKind) { 11853 default: 11854 return 0; 11855 case Builtin::BI__builtin_abs: 11856 case Builtin::BI__builtin_labs: 11857 case Builtin::BI__builtin_llabs: 11858 case Builtin::BI__builtin_fabsf: 11859 case Builtin::BI__builtin_fabs: 11860 case Builtin::BI__builtin_fabsl: 11861 return Builtin::BI__builtin_cabsf; 11862 case Builtin::BIabs: 11863 case Builtin::BIlabs: 11864 case Builtin::BIllabs: 11865 case Builtin::BIfabsf: 11866 case Builtin::BIfabs: 11867 case Builtin::BIfabsl: 11868 return Builtin::BIcabsf; 11869 } 11870 } 11871 llvm_unreachable("Unable to convert function"); 11872 } 11873 11874 static unsigned getAbsoluteValueFunctionKind(const FunctionDecl *FDecl) { 11875 const IdentifierInfo *FnInfo = FDecl->getIdentifier(); 11876 if (!FnInfo) 11877 return 0; 11878 11879 switch (FDecl->getBuiltinID()) { 11880 default: 11881 return 0; 11882 case Builtin::BI__builtin_abs: 11883 case Builtin::BI__builtin_fabs: 11884 case Builtin::BI__builtin_fabsf: 11885 case Builtin::BI__builtin_fabsl: 11886 case Builtin::BI__builtin_labs: 11887 case Builtin::BI__builtin_llabs: 11888 case Builtin::BI__builtin_cabs: 11889 case Builtin::BI__builtin_cabsf: 11890 case Builtin::BI__builtin_cabsl: 11891 case Builtin::BIabs: 11892 case Builtin::BIlabs: 11893 case Builtin::BIllabs: 11894 case Builtin::BIfabs: 11895 case Builtin::BIfabsf: 11896 case Builtin::BIfabsl: 11897 case Builtin::BIcabs: 11898 case Builtin::BIcabsf: 11899 case Builtin::BIcabsl: 11900 return FDecl->getBuiltinID(); 11901 } 11902 llvm_unreachable("Unknown Builtin type"); 11903 } 11904 11905 // If the replacement is valid, emit a note with replacement function. 11906 // Additionally, suggest including the proper header if not already included. 11907 static void emitReplacement(Sema &S, SourceLocation Loc, SourceRange Range, 11908 unsigned AbsKind, QualType ArgType) { 11909 bool EmitHeaderHint = true; 11910 const char *HeaderName = nullptr; 11911 StringRef FunctionName; 11912 if (S.getLangOpts().CPlusPlus && !ArgType->isAnyComplexType()) { 11913 FunctionName = "std::abs"; 11914 if (ArgType->isIntegralOrEnumerationType()) { 11915 HeaderName = "cstdlib"; 11916 } else if (ArgType->isRealFloatingType()) { 11917 HeaderName = "cmath"; 11918 } else { 11919 llvm_unreachable("Invalid Type"); 11920 } 11921 11922 // Lookup all std::abs 11923 if (NamespaceDecl *Std = S.getStdNamespace()) { 11924 LookupResult R(S, &S.Context.Idents.get("abs"), Loc, Sema::LookupAnyName); 11925 R.suppressDiagnostics(); 11926 S.LookupQualifiedName(R, Std); 11927 11928 for (const auto *I : R) { 11929 const FunctionDecl *FDecl = nullptr; 11930 if (const UsingShadowDecl *UsingD = dyn_cast<UsingShadowDecl>(I)) { 11931 FDecl = dyn_cast<FunctionDecl>(UsingD->getTargetDecl()); 11932 } else { 11933 FDecl = dyn_cast<FunctionDecl>(I); 11934 } 11935 if (!FDecl) 11936 continue; 11937 11938 // Found std::abs(), check that they are the right ones. 11939 if (FDecl->getNumParams() != 1) 11940 continue; 11941 11942 // Check that the parameter type can handle the argument. 11943 QualType ParamType = FDecl->getParamDecl(0)->getType(); 11944 if (getAbsoluteValueKind(ArgType) == getAbsoluteValueKind(ParamType) && 11945 S.Context.getTypeSize(ArgType) <= 11946 S.Context.getTypeSize(ParamType)) { 11947 // Found a function, don't need the header hint. 11948 EmitHeaderHint = false; 11949 break; 11950 } 11951 } 11952 } 11953 } else { 11954 FunctionName = S.Context.BuiltinInfo.getName(AbsKind); 11955 HeaderName = S.Context.BuiltinInfo.getHeaderName(AbsKind); 11956 11957 if (HeaderName) { 11958 DeclarationName DN(&S.Context.Idents.get(FunctionName)); 11959 LookupResult R(S, DN, Loc, Sema::LookupAnyName); 11960 R.suppressDiagnostics(); 11961 S.LookupName(R, S.getCurScope()); 11962 11963 if (R.isSingleResult()) { 11964 FunctionDecl *FD = dyn_cast<FunctionDecl>(R.getFoundDecl()); 11965 if (FD && FD->getBuiltinID() == AbsKind) { 11966 EmitHeaderHint = false; 11967 } else { 11968 return; 11969 } 11970 } else if (!R.empty()) { 11971 return; 11972 } 11973 } 11974 } 11975 11976 S.Diag(Loc, diag::note_replace_abs_function) 11977 << FunctionName << FixItHint::CreateReplacement(Range, FunctionName); 11978 11979 if (!HeaderName) 11980 return; 11981 11982 if (!EmitHeaderHint) 11983 return; 11984 11985 S.Diag(Loc, diag::note_include_header_or_declare) << HeaderName 11986 << FunctionName; 11987 } 11988 11989 template <std::size_t StrLen> 11990 static bool IsStdFunction(const FunctionDecl *FDecl, 11991 const char (&Str)[StrLen]) { 11992 if (!FDecl) 11993 return false; 11994 if (!FDecl->getIdentifier() || !FDecl->getIdentifier()->isStr(Str)) 11995 return false; 11996 if (!FDecl->isInStdNamespace()) 11997 return false; 11998 11999 return true; 12000 } 12001 12002 // Warn when using the wrong abs() function. 12003 void Sema::CheckAbsoluteValueFunction(const CallExpr *Call, 12004 const FunctionDecl *FDecl) { 12005 if (Call->getNumArgs() != 1) 12006 return; 12007 12008 unsigned AbsKind = getAbsoluteValueFunctionKind(FDecl); 12009 bool IsStdAbs = IsStdFunction(FDecl, "abs"); 12010 if (AbsKind == 0 && !IsStdAbs) 12011 return; 12012 12013 QualType ArgType = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 12014 QualType ParamType = Call->getArg(0)->getType(); 12015 12016 // Unsigned types cannot be negative. Suggest removing the absolute value 12017 // function call. 12018 if (ArgType->isUnsignedIntegerType()) { 12019 StringRef FunctionName = 12020 IsStdAbs ? "std::abs" : Context.BuiltinInfo.getName(AbsKind); 12021 Diag(Call->getExprLoc(), diag::warn_unsigned_abs) << ArgType << ParamType; 12022 Diag(Call->getExprLoc(), diag::note_remove_abs) 12023 << FunctionName 12024 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()); 12025 return; 12026 } 12027 12028 // Taking the absolute value of a pointer is very suspicious, they probably 12029 // wanted to index into an array, dereference a pointer, call a function, etc. 12030 if (ArgType->isPointerType() || ArgType->canDecayToPointerType()) { 12031 unsigned DiagType = 0; 12032 if (ArgType->isFunctionType()) 12033 DiagType = 1; 12034 else if (ArgType->isArrayType()) 12035 DiagType = 2; 12036 12037 Diag(Call->getExprLoc(), diag::warn_pointer_abs) << DiagType << ArgType; 12038 return; 12039 } 12040 12041 // std::abs has overloads which prevent most of the absolute value problems 12042 // from occurring. 12043 if (IsStdAbs) 12044 return; 12045 12046 AbsoluteValueKind ArgValueKind = getAbsoluteValueKind(ArgType); 12047 AbsoluteValueKind ParamValueKind = getAbsoluteValueKind(ParamType); 12048 12049 // The argument and parameter are the same kind. Check if they are the right 12050 // size. 12051 if (ArgValueKind == ParamValueKind) { 12052 if (Context.getTypeSize(ArgType) <= Context.getTypeSize(ParamType)) 12053 return; 12054 12055 unsigned NewAbsKind = getBestAbsFunction(Context, ArgType, AbsKind); 12056 Diag(Call->getExprLoc(), diag::warn_abs_too_small) 12057 << FDecl << ArgType << ParamType; 12058 12059 if (NewAbsKind == 0) 12060 return; 12061 12062 emitReplacement(*this, Call->getExprLoc(), 12063 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 12064 return; 12065 } 12066 12067 // ArgValueKind != ParamValueKind 12068 // The wrong type of absolute value function was used. Attempt to find the 12069 // proper one. 12070 unsigned NewAbsKind = changeAbsFunction(AbsKind, ArgValueKind); 12071 NewAbsKind = getBestAbsFunction(Context, ArgType, NewAbsKind); 12072 if (NewAbsKind == 0) 12073 return; 12074 12075 Diag(Call->getExprLoc(), diag::warn_wrong_absolute_value_type) 12076 << FDecl << ParamValueKind << ArgValueKind; 12077 12078 emitReplacement(*this, Call->getExprLoc(), 12079 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 12080 } 12081 12082 //===--- CHECK: Warn on use of std::max and unsigned zero. r---------------===// 12083 void Sema::CheckMaxUnsignedZero(const CallExpr *Call, 12084 const FunctionDecl *FDecl) { 12085 if (!Call || !FDecl) return; 12086 12087 // Ignore template specializations and macros. 12088 if (inTemplateInstantiation()) return; 12089 if (Call->getExprLoc().isMacroID()) return; 12090 12091 // Only care about the one template argument, two function parameter std::max 12092 if (Call->getNumArgs() != 2) return; 12093 if (!IsStdFunction(FDecl, "max")) return; 12094 const auto * ArgList = FDecl->getTemplateSpecializationArgs(); 12095 if (!ArgList) return; 12096 if (ArgList->size() != 1) return; 12097 12098 // Check that template type argument is unsigned integer. 12099 const auto& TA = ArgList->get(0); 12100 if (TA.getKind() != TemplateArgument::Type) return; 12101 QualType ArgType = TA.getAsType(); 12102 if (!ArgType->isUnsignedIntegerType()) return; 12103 12104 // See if either argument is a literal zero. 12105 auto IsLiteralZeroArg = [](const Expr* E) -> bool { 12106 const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E); 12107 if (!MTE) return false; 12108 const auto *Num = dyn_cast<IntegerLiteral>(MTE->getSubExpr()); 12109 if (!Num) return false; 12110 if (Num->getValue() != 0) return false; 12111 return true; 12112 }; 12113 12114 const Expr *FirstArg = Call->getArg(0); 12115 const Expr *SecondArg = Call->getArg(1); 12116 const bool IsFirstArgZero = IsLiteralZeroArg(FirstArg); 12117 const bool IsSecondArgZero = IsLiteralZeroArg(SecondArg); 12118 12119 // Only warn when exactly one argument is zero. 12120 if (IsFirstArgZero == IsSecondArgZero) return; 12121 12122 SourceRange FirstRange = FirstArg->getSourceRange(); 12123 SourceRange SecondRange = SecondArg->getSourceRange(); 12124 12125 SourceRange ZeroRange = IsFirstArgZero ? FirstRange : SecondRange; 12126 12127 Diag(Call->getExprLoc(), diag::warn_max_unsigned_zero) 12128 << IsFirstArgZero << Call->getCallee()->getSourceRange() << ZeroRange; 12129 12130 // Deduce what parts to remove so that "std::max(0u, foo)" becomes "(foo)". 12131 SourceRange RemovalRange; 12132 if (IsFirstArgZero) { 12133 RemovalRange = SourceRange(FirstRange.getBegin(), 12134 SecondRange.getBegin().getLocWithOffset(-1)); 12135 } else { 12136 RemovalRange = SourceRange(getLocForEndOfToken(FirstRange.getEnd()), 12137 SecondRange.getEnd()); 12138 } 12139 12140 Diag(Call->getExprLoc(), diag::note_remove_max_call) 12141 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()) 12142 << FixItHint::CreateRemoval(RemovalRange); 12143 } 12144 12145 //===--- CHECK: Standard memory functions ---------------------------------===// 12146 12147 /// Takes the expression passed to the size_t parameter of functions 12148 /// such as memcmp, strncat, etc and warns if it's a comparison. 12149 /// 12150 /// This is to catch typos like `if (memcmp(&a, &b, sizeof(a) > 0))`. 12151 static bool CheckMemorySizeofForComparison(Sema &S, const Expr *E, 12152 IdentifierInfo *FnName, 12153 SourceLocation FnLoc, 12154 SourceLocation RParenLoc) { 12155 const BinaryOperator *Size = dyn_cast<BinaryOperator>(E); 12156 if (!Size) 12157 return false; 12158 12159 // if E is binop and op is <=>, >, <, >=, <=, ==, &&, ||: 12160 if (!Size->isComparisonOp() && !Size->isLogicalOp()) 12161 return false; 12162 12163 SourceRange SizeRange = Size->getSourceRange(); 12164 S.Diag(Size->getOperatorLoc(), diag::warn_memsize_comparison) 12165 << SizeRange << FnName; 12166 S.Diag(FnLoc, diag::note_memsize_comparison_paren) 12167 << FnName 12168 << FixItHint::CreateInsertion( 12169 S.getLocForEndOfToken(Size->getLHS()->getEndLoc()), ")") 12170 << FixItHint::CreateRemoval(RParenLoc); 12171 S.Diag(SizeRange.getBegin(), diag::note_memsize_comparison_cast_silence) 12172 << FixItHint::CreateInsertion(SizeRange.getBegin(), "(size_t)(") 12173 << FixItHint::CreateInsertion(S.getLocForEndOfToken(SizeRange.getEnd()), 12174 ")"); 12175 12176 return true; 12177 } 12178 12179 /// Determine whether the given type is or contains a dynamic class type 12180 /// (e.g., whether it has a vtable). 12181 static const CXXRecordDecl *getContainedDynamicClass(QualType T, 12182 bool &IsContained) { 12183 // Look through array types while ignoring qualifiers. 12184 const Type *Ty = T->getBaseElementTypeUnsafe(); 12185 IsContained = false; 12186 12187 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); 12188 RD = RD ? RD->getDefinition() : nullptr; 12189 if (!RD || RD->isInvalidDecl()) 12190 return nullptr; 12191 12192 if (RD->isDynamicClass()) 12193 return RD; 12194 12195 // Check all the fields. If any bases were dynamic, the class is dynamic. 12196 // It's impossible for a class to transitively contain itself by value, so 12197 // infinite recursion is impossible. 12198 for (auto *FD : RD->fields()) { 12199 bool SubContained; 12200 if (const CXXRecordDecl *ContainedRD = 12201 getContainedDynamicClass(FD->getType(), SubContained)) { 12202 IsContained = true; 12203 return ContainedRD; 12204 } 12205 } 12206 12207 return nullptr; 12208 } 12209 12210 static const UnaryExprOrTypeTraitExpr *getAsSizeOfExpr(const Expr *E) { 12211 if (const auto *Unary = dyn_cast<UnaryExprOrTypeTraitExpr>(E)) 12212 if (Unary->getKind() == UETT_SizeOf) 12213 return Unary; 12214 return nullptr; 12215 } 12216 12217 /// If E is a sizeof expression, returns its argument expression, 12218 /// otherwise returns NULL. 12219 static const Expr *getSizeOfExprArg(const Expr *E) { 12220 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 12221 if (!SizeOf->isArgumentType()) 12222 return SizeOf->getArgumentExpr()->IgnoreParenImpCasts(); 12223 return nullptr; 12224 } 12225 12226 /// If E is a sizeof expression, returns its argument type. 12227 static QualType getSizeOfArgType(const Expr *E) { 12228 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 12229 return SizeOf->getTypeOfArgument(); 12230 return QualType(); 12231 } 12232 12233 namespace { 12234 12235 struct SearchNonTrivialToInitializeField 12236 : DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField> { 12237 using Super = 12238 DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField>; 12239 12240 SearchNonTrivialToInitializeField(const Expr *E, Sema &S) : E(E), S(S) {} 12241 12242 void visitWithKind(QualType::PrimitiveDefaultInitializeKind PDIK, QualType FT, 12243 SourceLocation SL) { 12244 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 12245 asDerived().visitArray(PDIK, AT, SL); 12246 return; 12247 } 12248 12249 Super::visitWithKind(PDIK, FT, SL); 12250 } 12251 12252 void visitARCStrong(QualType FT, SourceLocation SL) { 12253 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 12254 } 12255 void visitARCWeak(QualType FT, SourceLocation SL) { 12256 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 12257 } 12258 void visitStruct(QualType FT, SourceLocation SL) { 12259 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 12260 visit(FD->getType(), FD->getLocation()); 12261 } 12262 void visitArray(QualType::PrimitiveDefaultInitializeKind PDIK, 12263 const ArrayType *AT, SourceLocation SL) { 12264 visit(getContext().getBaseElementType(AT), SL); 12265 } 12266 void visitTrivial(QualType FT, SourceLocation SL) {} 12267 12268 static void diag(QualType RT, const Expr *E, Sema &S) { 12269 SearchNonTrivialToInitializeField(E, S).visitStruct(RT, SourceLocation()); 12270 } 12271 12272 ASTContext &getContext() { return S.getASTContext(); } 12273 12274 const Expr *E; 12275 Sema &S; 12276 }; 12277 12278 struct SearchNonTrivialToCopyField 12279 : CopiedTypeVisitor<SearchNonTrivialToCopyField, false> { 12280 using Super = CopiedTypeVisitor<SearchNonTrivialToCopyField, false>; 12281 12282 SearchNonTrivialToCopyField(const Expr *E, Sema &S) : E(E), S(S) {} 12283 12284 void visitWithKind(QualType::PrimitiveCopyKind PCK, QualType FT, 12285 SourceLocation SL) { 12286 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 12287 asDerived().visitArray(PCK, AT, SL); 12288 return; 12289 } 12290 12291 Super::visitWithKind(PCK, FT, SL); 12292 } 12293 12294 void visitARCStrong(QualType FT, SourceLocation SL) { 12295 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 12296 } 12297 void visitARCWeak(QualType FT, SourceLocation SL) { 12298 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 12299 } 12300 void visitStruct(QualType FT, SourceLocation SL) { 12301 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 12302 visit(FD->getType(), FD->getLocation()); 12303 } 12304 void visitArray(QualType::PrimitiveCopyKind PCK, const ArrayType *AT, 12305 SourceLocation SL) { 12306 visit(getContext().getBaseElementType(AT), SL); 12307 } 12308 void preVisit(QualType::PrimitiveCopyKind PCK, QualType FT, 12309 SourceLocation SL) {} 12310 void visitTrivial(QualType FT, SourceLocation SL) {} 12311 void visitVolatileTrivial(QualType FT, SourceLocation SL) {} 12312 12313 static void diag(QualType RT, const Expr *E, Sema &S) { 12314 SearchNonTrivialToCopyField(E, S).visitStruct(RT, SourceLocation()); 12315 } 12316 12317 ASTContext &getContext() { return S.getASTContext(); } 12318 12319 const Expr *E; 12320 Sema &S; 12321 }; 12322 12323 } 12324 12325 /// Detect if \c SizeofExpr is likely to calculate the sizeof an object. 12326 static bool doesExprLikelyComputeSize(const Expr *SizeofExpr) { 12327 SizeofExpr = SizeofExpr->IgnoreParenImpCasts(); 12328 12329 if (const auto *BO = dyn_cast<BinaryOperator>(SizeofExpr)) { 12330 if (BO->getOpcode() != BO_Mul && BO->getOpcode() != BO_Add) 12331 return false; 12332 12333 return doesExprLikelyComputeSize(BO->getLHS()) || 12334 doesExprLikelyComputeSize(BO->getRHS()); 12335 } 12336 12337 return getAsSizeOfExpr(SizeofExpr) != nullptr; 12338 } 12339 12340 /// Check if the ArgLoc originated from a macro passed to the call at CallLoc. 12341 /// 12342 /// \code 12343 /// #define MACRO 0 12344 /// foo(MACRO); 12345 /// foo(0); 12346 /// \endcode 12347 /// 12348 /// This should return true for the first call to foo, but not for the second 12349 /// (regardless of whether foo is a macro or function). 12350 static bool isArgumentExpandedFromMacro(SourceManager &SM, 12351 SourceLocation CallLoc, 12352 SourceLocation ArgLoc) { 12353 if (!CallLoc.isMacroID()) 12354 return SM.getFileID(CallLoc) != SM.getFileID(ArgLoc); 12355 12356 return SM.getFileID(SM.getImmediateMacroCallerLoc(CallLoc)) != 12357 SM.getFileID(SM.getImmediateMacroCallerLoc(ArgLoc)); 12358 } 12359 12360 /// Diagnose cases like 'memset(buf, sizeof(buf), 0)', which should have the 12361 /// last two arguments transposed. 12362 static void CheckMemaccessSize(Sema &S, unsigned BId, const CallExpr *Call) { 12363 if (BId != Builtin::BImemset && BId != Builtin::BIbzero) 12364 return; 12365 12366 const Expr *SizeArg = 12367 Call->getArg(BId == Builtin::BImemset ? 2 : 1)->IgnoreImpCasts(); 12368 12369 auto isLiteralZero = [](const Expr *E) { 12370 return (isa<IntegerLiteral>(E) && 12371 cast<IntegerLiteral>(E)->getValue() == 0) || 12372 (isa<CharacterLiteral>(E) && 12373 cast<CharacterLiteral>(E)->getValue() == 0); 12374 }; 12375 12376 // If we're memsetting or bzeroing 0 bytes, then this is likely an error. 12377 SourceLocation CallLoc = Call->getRParenLoc(); 12378 SourceManager &SM = S.getSourceManager(); 12379 if (isLiteralZero(SizeArg) && 12380 !isArgumentExpandedFromMacro(SM, CallLoc, SizeArg->getExprLoc())) { 12381 12382 SourceLocation DiagLoc = SizeArg->getExprLoc(); 12383 12384 // Some platforms #define bzero to __builtin_memset. See if this is the 12385 // case, and if so, emit a better diagnostic. 12386 if (BId == Builtin::BIbzero || 12387 (CallLoc.isMacroID() && Lexer::getImmediateMacroName( 12388 CallLoc, SM, S.getLangOpts()) == "bzero")) { 12389 S.Diag(DiagLoc, diag::warn_suspicious_bzero_size); 12390 S.Diag(DiagLoc, diag::note_suspicious_bzero_size_silence); 12391 } else if (!isLiteralZero(Call->getArg(1)->IgnoreImpCasts())) { 12392 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 0; 12393 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 0; 12394 } 12395 return; 12396 } 12397 12398 // If the second argument to a memset is a sizeof expression and the third 12399 // isn't, this is also likely an error. This should catch 12400 // 'memset(buf, sizeof(buf), 0xff)'. 12401 if (BId == Builtin::BImemset && 12402 doesExprLikelyComputeSize(Call->getArg(1)) && 12403 !doesExprLikelyComputeSize(Call->getArg(2))) { 12404 SourceLocation DiagLoc = Call->getArg(1)->getExprLoc(); 12405 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 1; 12406 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 1; 12407 return; 12408 } 12409 } 12410 12411 /// Check for dangerous or invalid arguments to memset(). 12412 /// 12413 /// This issues warnings on known problematic, dangerous or unspecified 12414 /// arguments to the standard 'memset', 'memcpy', 'memmove', and 'memcmp' 12415 /// function calls. 12416 /// 12417 /// \param Call The call expression to diagnose. 12418 void Sema::CheckMemaccessArguments(const CallExpr *Call, 12419 unsigned BId, 12420 IdentifierInfo *FnName) { 12421 assert(BId != 0); 12422 12423 // It is possible to have a non-standard definition of memset. Validate 12424 // we have enough arguments, and if not, abort further checking. 12425 unsigned ExpectedNumArgs = 12426 (BId == Builtin::BIstrndup || BId == Builtin::BIbzero ? 2 : 3); 12427 if (Call->getNumArgs() < ExpectedNumArgs) 12428 return; 12429 12430 unsigned LastArg = (BId == Builtin::BImemset || BId == Builtin::BIbzero || 12431 BId == Builtin::BIstrndup ? 1 : 2); 12432 unsigned LenArg = 12433 (BId == Builtin::BIbzero || BId == Builtin::BIstrndup ? 1 : 2); 12434 const Expr *LenExpr = Call->getArg(LenArg)->IgnoreParenImpCasts(); 12435 12436 if (CheckMemorySizeofForComparison(*this, LenExpr, FnName, 12437 Call->getBeginLoc(), Call->getRParenLoc())) 12438 return; 12439 12440 // Catch cases like 'memset(buf, sizeof(buf), 0)'. 12441 CheckMemaccessSize(*this, BId, Call); 12442 12443 // We have special checking when the length is a sizeof expression. 12444 QualType SizeOfArgTy = getSizeOfArgType(LenExpr); 12445 const Expr *SizeOfArg = getSizeOfExprArg(LenExpr); 12446 llvm::FoldingSetNodeID SizeOfArgID; 12447 12448 // Although widely used, 'bzero' is not a standard function. Be more strict 12449 // with the argument types before allowing diagnostics and only allow the 12450 // form bzero(ptr, sizeof(...)). 12451 QualType FirstArgTy = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 12452 if (BId == Builtin::BIbzero && !FirstArgTy->getAs<PointerType>()) 12453 return; 12454 12455 for (unsigned ArgIdx = 0; ArgIdx != LastArg; ++ArgIdx) { 12456 const Expr *Dest = Call->getArg(ArgIdx)->IgnoreParenImpCasts(); 12457 SourceRange ArgRange = Call->getArg(ArgIdx)->getSourceRange(); 12458 12459 QualType DestTy = Dest->getType(); 12460 QualType PointeeTy; 12461 if (const PointerType *DestPtrTy = DestTy->getAs<PointerType>()) { 12462 PointeeTy = DestPtrTy->getPointeeType(); 12463 12464 // Never warn about void type pointers. This can be used to suppress 12465 // false positives. 12466 if (PointeeTy->isVoidType()) 12467 continue; 12468 12469 // Catch "memset(p, 0, sizeof(p))" -- needs to be sizeof(*p). Do this by 12470 // actually comparing the expressions for equality. Because computing the 12471 // expression IDs can be expensive, we only do this if the diagnostic is 12472 // enabled. 12473 if (SizeOfArg && 12474 !Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, 12475 SizeOfArg->getExprLoc())) { 12476 // We only compute IDs for expressions if the warning is enabled, and 12477 // cache the sizeof arg's ID. 12478 if (SizeOfArgID == llvm::FoldingSetNodeID()) 12479 SizeOfArg->Profile(SizeOfArgID, Context, true); 12480 llvm::FoldingSetNodeID DestID; 12481 Dest->Profile(DestID, Context, true); 12482 if (DestID == SizeOfArgID) { 12483 // TODO: For strncpy() and friends, this could suggest sizeof(dst) 12484 // over sizeof(src) as well. 12485 unsigned ActionIdx = 0; // Default is to suggest dereferencing. 12486 StringRef ReadableName = FnName->getName(); 12487 12488 if (const UnaryOperator *UnaryOp = dyn_cast<UnaryOperator>(Dest)) 12489 if (UnaryOp->getOpcode() == UO_AddrOf) 12490 ActionIdx = 1; // If its an address-of operator, just remove it. 12491 if (!PointeeTy->isIncompleteType() && 12492 (Context.getTypeSize(PointeeTy) == Context.getCharWidth())) 12493 ActionIdx = 2; // If the pointee's size is sizeof(char), 12494 // suggest an explicit length. 12495 12496 // If the function is defined as a builtin macro, do not show macro 12497 // expansion. 12498 SourceLocation SL = SizeOfArg->getExprLoc(); 12499 SourceRange DSR = Dest->getSourceRange(); 12500 SourceRange SSR = SizeOfArg->getSourceRange(); 12501 SourceManager &SM = getSourceManager(); 12502 12503 if (SM.isMacroArgExpansion(SL)) { 12504 ReadableName = Lexer::getImmediateMacroName(SL, SM, LangOpts); 12505 SL = SM.getSpellingLoc(SL); 12506 DSR = SourceRange(SM.getSpellingLoc(DSR.getBegin()), 12507 SM.getSpellingLoc(DSR.getEnd())); 12508 SSR = SourceRange(SM.getSpellingLoc(SSR.getBegin()), 12509 SM.getSpellingLoc(SSR.getEnd())); 12510 } 12511 12512 DiagRuntimeBehavior(SL, SizeOfArg, 12513 PDiag(diag::warn_sizeof_pointer_expr_memaccess) 12514 << ReadableName 12515 << PointeeTy 12516 << DestTy 12517 << DSR 12518 << SSR); 12519 DiagRuntimeBehavior(SL, SizeOfArg, 12520 PDiag(diag::warn_sizeof_pointer_expr_memaccess_note) 12521 << ActionIdx 12522 << SSR); 12523 12524 break; 12525 } 12526 } 12527 12528 // Also check for cases where the sizeof argument is the exact same 12529 // type as the memory argument, and where it points to a user-defined 12530 // record type. 12531 if (SizeOfArgTy != QualType()) { 12532 if (PointeeTy->isRecordType() && 12533 Context.typesAreCompatible(SizeOfArgTy, DestTy)) { 12534 DiagRuntimeBehavior(LenExpr->getExprLoc(), Dest, 12535 PDiag(diag::warn_sizeof_pointer_type_memaccess) 12536 << FnName << SizeOfArgTy << ArgIdx 12537 << PointeeTy << Dest->getSourceRange() 12538 << LenExpr->getSourceRange()); 12539 break; 12540 } 12541 } 12542 } else if (DestTy->isArrayType()) { 12543 PointeeTy = DestTy; 12544 } 12545 12546 if (PointeeTy == QualType()) 12547 continue; 12548 12549 // Always complain about dynamic classes. 12550 bool IsContained; 12551 if (const CXXRecordDecl *ContainedRD = 12552 getContainedDynamicClass(PointeeTy, IsContained)) { 12553 12554 unsigned OperationType = 0; 12555 const bool IsCmp = BId == Builtin::BImemcmp || BId == Builtin::BIbcmp; 12556 // "overwritten" if we're warning about the destination for any call 12557 // but memcmp; otherwise a verb appropriate to the call. 12558 if (ArgIdx != 0 || IsCmp) { 12559 if (BId == Builtin::BImemcpy) 12560 OperationType = 1; 12561 else if(BId == Builtin::BImemmove) 12562 OperationType = 2; 12563 else if (IsCmp) 12564 OperationType = 3; 12565 } 12566 12567 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 12568 PDiag(diag::warn_dyn_class_memaccess) 12569 << (IsCmp ? ArgIdx + 2 : ArgIdx) << FnName 12570 << IsContained << ContainedRD << OperationType 12571 << Call->getCallee()->getSourceRange()); 12572 } else if (PointeeTy.hasNonTrivialObjCLifetime() && 12573 BId != Builtin::BImemset) 12574 DiagRuntimeBehavior( 12575 Dest->getExprLoc(), Dest, 12576 PDiag(diag::warn_arc_object_memaccess) 12577 << ArgIdx << FnName << PointeeTy 12578 << Call->getCallee()->getSourceRange()); 12579 else if (const auto *RT = PointeeTy->getAs<RecordType>()) { 12580 if ((BId == Builtin::BImemset || BId == Builtin::BIbzero) && 12581 RT->getDecl()->isNonTrivialToPrimitiveDefaultInitialize()) { 12582 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 12583 PDiag(diag::warn_cstruct_memaccess) 12584 << ArgIdx << FnName << PointeeTy << 0); 12585 SearchNonTrivialToInitializeField::diag(PointeeTy, Dest, *this); 12586 } else if ((BId == Builtin::BImemcpy || BId == Builtin::BImemmove) && 12587 RT->getDecl()->isNonTrivialToPrimitiveCopy()) { 12588 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 12589 PDiag(diag::warn_cstruct_memaccess) 12590 << ArgIdx << FnName << PointeeTy << 1); 12591 SearchNonTrivialToCopyField::diag(PointeeTy, Dest, *this); 12592 } else { 12593 continue; 12594 } 12595 } else 12596 continue; 12597 12598 DiagRuntimeBehavior( 12599 Dest->getExprLoc(), Dest, 12600 PDiag(diag::note_bad_memaccess_silence) 12601 << FixItHint::CreateInsertion(ArgRange.getBegin(), "(void*)")); 12602 break; 12603 } 12604 } 12605 12606 // A little helper routine: ignore addition and subtraction of integer literals. 12607 // This intentionally does not ignore all integer constant expressions because 12608 // we don't want to remove sizeof(). 12609 static const Expr *ignoreLiteralAdditions(const Expr *Ex, ASTContext &Ctx) { 12610 Ex = Ex->IgnoreParenCasts(); 12611 12612 while (true) { 12613 const BinaryOperator * BO = dyn_cast<BinaryOperator>(Ex); 12614 if (!BO || !BO->isAdditiveOp()) 12615 break; 12616 12617 const Expr *RHS = BO->getRHS()->IgnoreParenCasts(); 12618 const Expr *LHS = BO->getLHS()->IgnoreParenCasts(); 12619 12620 if (isa<IntegerLiteral>(RHS)) 12621 Ex = LHS; 12622 else if (isa<IntegerLiteral>(LHS)) 12623 Ex = RHS; 12624 else 12625 break; 12626 } 12627 12628 return Ex; 12629 } 12630 12631 static bool isConstantSizeArrayWithMoreThanOneElement(QualType Ty, 12632 ASTContext &Context) { 12633 // Only handle constant-sized or VLAs, but not flexible members. 12634 if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(Ty)) { 12635 // Only issue the FIXIT for arrays of size > 1. 12636 if (CAT->getSize().getSExtValue() <= 1) 12637 return false; 12638 } else if (!Ty->isVariableArrayType()) { 12639 return false; 12640 } 12641 return true; 12642 } 12643 12644 // Warn if the user has made the 'size' argument to strlcpy or strlcat 12645 // be the size of the source, instead of the destination. 12646 void Sema::CheckStrlcpycatArguments(const CallExpr *Call, 12647 IdentifierInfo *FnName) { 12648 12649 // Don't crash if the user has the wrong number of arguments 12650 unsigned NumArgs = Call->getNumArgs(); 12651 if ((NumArgs != 3) && (NumArgs != 4)) 12652 return; 12653 12654 const Expr *SrcArg = ignoreLiteralAdditions(Call->getArg(1), Context); 12655 const Expr *SizeArg = ignoreLiteralAdditions(Call->getArg(2), Context); 12656 const Expr *CompareWithSrc = nullptr; 12657 12658 if (CheckMemorySizeofForComparison(*this, SizeArg, FnName, 12659 Call->getBeginLoc(), Call->getRParenLoc())) 12660 return; 12661 12662 // Look for 'strlcpy(dst, x, sizeof(x))' 12663 if (const Expr *Ex = getSizeOfExprArg(SizeArg)) 12664 CompareWithSrc = Ex; 12665 else { 12666 // Look for 'strlcpy(dst, x, strlen(x))' 12667 if (const CallExpr *SizeCall = dyn_cast<CallExpr>(SizeArg)) { 12668 if (SizeCall->getBuiltinCallee() == Builtin::BIstrlen && 12669 SizeCall->getNumArgs() == 1) 12670 CompareWithSrc = ignoreLiteralAdditions(SizeCall->getArg(0), Context); 12671 } 12672 } 12673 12674 if (!CompareWithSrc) 12675 return; 12676 12677 // Determine if the argument to sizeof/strlen is equal to the source 12678 // argument. In principle there's all kinds of things you could do 12679 // here, for instance creating an == expression and evaluating it with 12680 // EvaluateAsBooleanCondition, but this uses a more direct technique: 12681 const DeclRefExpr *SrcArgDRE = dyn_cast<DeclRefExpr>(SrcArg); 12682 if (!SrcArgDRE) 12683 return; 12684 12685 const DeclRefExpr *CompareWithSrcDRE = dyn_cast<DeclRefExpr>(CompareWithSrc); 12686 if (!CompareWithSrcDRE || 12687 SrcArgDRE->getDecl() != CompareWithSrcDRE->getDecl()) 12688 return; 12689 12690 const Expr *OriginalSizeArg = Call->getArg(2); 12691 Diag(CompareWithSrcDRE->getBeginLoc(), diag::warn_strlcpycat_wrong_size) 12692 << OriginalSizeArg->getSourceRange() << FnName; 12693 12694 // Output a FIXIT hint if the destination is an array (rather than a 12695 // pointer to an array). This could be enhanced to handle some 12696 // pointers if we know the actual size, like if DstArg is 'array+2' 12697 // we could say 'sizeof(array)-2'. 12698 const Expr *DstArg = Call->getArg(0)->IgnoreParenImpCasts(); 12699 if (!isConstantSizeArrayWithMoreThanOneElement(DstArg->getType(), Context)) 12700 return; 12701 12702 SmallString<128> sizeString; 12703 llvm::raw_svector_ostream OS(sizeString); 12704 OS << "sizeof("; 12705 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 12706 OS << ")"; 12707 12708 Diag(OriginalSizeArg->getBeginLoc(), diag::note_strlcpycat_wrong_size) 12709 << FixItHint::CreateReplacement(OriginalSizeArg->getSourceRange(), 12710 OS.str()); 12711 } 12712 12713 /// Check if two expressions refer to the same declaration. 12714 static bool referToTheSameDecl(const Expr *E1, const Expr *E2) { 12715 if (const DeclRefExpr *D1 = dyn_cast_or_null<DeclRefExpr>(E1)) 12716 if (const DeclRefExpr *D2 = dyn_cast_or_null<DeclRefExpr>(E2)) 12717 return D1->getDecl() == D2->getDecl(); 12718 return false; 12719 } 12720 12721 static const Expr *getStrlenExprArg(const Expr *E) { 12722 if (const CallExpr *CE = dyn_cast<CallExpr>(E)) { 12723 const FunctionDecl *FD = CE->getDirectCallee(); 12724 if (!FD || FD->getMemoryFunctionKind() != Builtin::BIstrlen) 12725 return nullptr; 12726 return CE->getArg(0)->IgnoreParenCasts(); 12727 } 12728 return nullptr; 12729 } 12730 12731 // Warn on anti-patterns as the 'size' argument to strncat. 12732 // The correct size argument should look like following: 12733 // strncat(dst, src, sizeof(dst) - strlen(dest) - 1); 12734 void Sema::CheckStrncatArguments(const CallExpr *CE, 12735 IdentifierInfo *FnName) { 12736 // Don't crash if the user has the wrong number of arguments. 12737 if (CE->getNumArgs() < 3) 12738 return; 12739 const Expr *DstArg = CE->getArg(0)->IgnoreParenCasts(); 12740 const Expr *SrcArg = CE->getArg(1)->IgnoreParenCasts(); 12741 const Expr *LenArg = CE->getArg(2)->IgnoreParenCasts(); 12742 12743 if (CheckMemorySizeofForComparison(*this, LenArg, FnName, CE->getBeginLoc(), 12744 CE->getRParenLoc())) 12745 return; 12746 12747 // Identify common expressions, which are wrongly used as the size argument 12748 // to strncat and may lead to buffer overflows. 12749 unsigned PatternType = 0; 12750 if (const Expr *SizeOfArg = getSizeOfExprArg(LenArg)) { 12751 // - sizeof(dst) 12752 if (referToTheSameDecl(SizeOfArg, DstArg)) 12753 PatternType = 1; 12754 // - sizeof(src) 12755 else if (referToTheSameDecl(SizeOfArg, SrcArg)) 12756 PatternType = 2; 12757 } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(LenArg)) { 12758 if (BE->getOpcode() == BO_Sub) { 12759 const Expr *L = BE->getLHS()->IgnoreParenCasts(); 12760 const Expr *R = BE->getRHS()->IgnoreParenCasts(); 12761 // - sizeof(dst) - strlen(dst) 12762 if (referToTheSameDecl(DstArg, getSizeOfExprArg(L)) && 12763 referToTheSameDecl(DstArg, getStrlenExprArg(R))) 12764 PatternType = 1; 12765 // - sizeof(src) - (anything) 12766 else if (referToTheSameDecl(SrcArg, getSizeOfExprArg(L))) 12767 PatternType = 2; 12768 } 12769 } 12770 12771 if (PatternType == 0) 12772 return; 12773 12774 // Generate the diagnostic. 12775 SourceLocation SL = LenArg->getBeginLoc(); 12776 SourceRange SR = LenArg->getSourceRange(); 12777 SourceManager &SM = getSourceManager(); 12778 12779 // If the function is defined as a builtin macro, do not show macro expansion. 12780 if (SM.isMacroArgExpansion(SL)) { 12781 SL = SM.getSpellingLoc(SL); 12782 SR = SourceRange(SM.getSpellingLoc(SR.getBegin()), 12783 SM.getSpellingLoc(SR.getEnd())); 12784 } 12785 12786 // Check if the destination is an array (rather than a pointer to an array). 12787 QualType DstTy = DstArg->getType(); 12788 bool isKnownSizeArray = isConstantSizeArrayWithMoreThanOneElement(DstTy, 12789 Context); 12790 if (!isKnownSizeArray) { 12791 if (PatternType == 1) 12792 Diag(SL, diag::warn_strncat_wrong_size) << SR; 12793 else 12794 Diag(SL, diag::warn_strncat_src_size) << SR; 12795 return; 12796 } 12797 12798 if (PatternType == 1) 12799 Diag(SL, diag::warn_strncat_large_size) << SR; 12800 else 12801 Diag(SL, diag::warn_strncat_src_size) << SR; 12802 12803 SmallString<128> sizeString; 12804 llvm::raw_svector_ostream OS(sizeString); 12805 OS << "sizeof("; 12806 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 12807 OS << ") - "; 12808 OS << "strlen("; 12809 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 12810 OS << ") - 1"; 12811 12812 Diag(SL, diag::note_strncat_wrong_size) 12813 << FixItHint::CreateReplacement(SR, OS.str()); 12814 } 12815 12816 namespace { 12817 void CheckFreeArgumentsOnLvalue(Sema &S, const std::string &CalleeName, 12818 const UnaryOperator *UnaryExpr, const Decl *D) { 12819 if (isa<FieldDecl, FunctionDecl, VarDecl>(D)) { 12820 S.Diag(UnaryExpr->getBeginLoc(), diag::warn_free_nonheap_object) 12821 << CalleeName << 0 /*object: */ << cast<NamedDecl>(D); 12822 return; 12823 } 12824 } 12825 12826 void CheckFreeArgumentsAddressof(Sema &S, const std::string &CalleeName, 12827 const UnaryOperator *UnaryExpr) { 12828 if (const auto *Lvalue = dyn_cast<DeclRefExpr>(UnaryExpr->getSubExpr())) { 12829 const Decl *D = Lvalue->getDecl(); 12830 if (isa<DeclaratorDecl>(D)) 12831 if (!dyn_cast<DeclaratorDecl>(D)->getType()->isReferenceType()) 12832 return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr, D); 12833 } 12834 12835 if (const auto *Lvalue = dyn_cast<MemberExpr>(UnaryExpr->getSubExpr())) 12836 return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr, 12837 Lvalue->getMemberDecl()); 12838 } 12839 12840 void CheckFreeArgumentsPlus(Sema &S, const std::string &CalleeName, 12841 const UnaryOperator *UnaryExpr) { 12842 const auto *Lambda = dyn_cast<LambdaExpr>( 12843 UnaryExpr->getSubExpr()->IgnoreImplicitAsWritten()->IgnoreParens()); 12844 if (!Lambda) 12845 return; 12846 12847 S.Diag(Lambda->getBeginLoc(), diag::warn_free_nonheap_object) 12848 << CalleeName << 2 /*object: lambda expression*/; 12849 } 12850 12851 void CheckFreeArgumentsStackArray(Sema &S, const std::string &CalleeName, 12852 const DeclRefExpr *Lvalue) { 12853 const auto *Var = dyn_cast<VarDecl>(Lvalue->getDecl()); 12854 if (Var == nullptr) 12855 return; 12856 12857 S.Diag(Lvalue->getBeginLoc(), diag::warn_free_nonheap_object) 12858 << CalleeName << 0 /*object: */ << Var; 12859 } 12860 12861 void CheckFreeArgumentsCast(Sema &S, const std::string &CalleeName, 12862 const CastExpr *Cast) { 12863 SmallString<128> SizeString; 12864 llvm::raw_svector_ostream OS(SizeString); 12865 12866 clang::CastKind Kind = Cast->getCastKind(); 12867 if (Kind == clang::CK_BitCast && 12868 !Cast->getSubExpr()->getType()->isFunctionPointerType()) 12869 return; 12870 if (Kind == clang::CK_IntegralToPointer && 12871 !isa<IntegerLiteral>( 12872 Cast->getSubExpr()->IgnoreParenImpCasts()->IgnoreParens())) 12873 return; 12874 12875 switch (Cast->getCastKind()) { 12876 case clang::CK_BitCast: 12877 case clang::CK_IntegralToPointer: 12878 case clang::CK_FunctionToPointerDecay: 12879 OS << '\''; 12880 Cast->printPretty(OS, nullptr, S.getPrintingPolicy()); 12881 OS << '\''; 12882 break; 12883 default: 12884 return; 12885 } 12886 12887 S.Diag(Cast->getBeginLoc(), diag::warn_free_nonheap_object) 12888 << CalleeName << 0 /*object: */ << OS.str(); 12889 } 12890 } // namespace 12891 12892 /// Alerts the user that they are attempting to free a non-malloc'd object. 12893 void Sema::CheckFreeArguments(const CallExpr *E) { 12894 const std::string CalleeName = 12895 cast<FunctionDecl>(E->getCalleeDecl())->getQualifiedNameAsString(); 12896 12897 { // Prefer something that doesn't involve a cast to make things simpler. 12898 const Expr *Arg = E->getArg(0)->IgnoreParenCasts(); 12899 if (const auto *UnaryExpr = dyn_cast<UnaryOperator>(Arg)) 12900 switch (UnaryExpr->getOpcode()) { 12901 case UnaryOperator::Opcode::UO_AddrOf: 12902 return CheckFreeArgumentsAddressof(*this, CalleeName, UnaryExpr); 12903 case UnaryOperator::Opcode::UO_Plus: 12904 return CheckFreeArgumentsPlus(*this, CalleeName, UnaryExpr); 12905 default: 12906 break; 12907 } 12908 12909 if (const auto *Lvalue = dyn_cast<DeclRefExpr>(Arg)) 12910 if (Lvalue->getType()->isArrayType()) 12911 return CheckFreeArgumentsStackArray(*this, CalleeName, Lvalue); 12912 12913 if (const auto *Label = dyn_cast<AddrLabelExpr>(Arg)) { 12914 Diag(Label->getBeginLoc(), diag::warn_free_nonheap_object) 12915 << CalleeName << 0 /*object: */ << Label->getLabel()->getIdentifier(); 12916 return; 12917 } 12918 12919 if (isa<BlockExpr>(Arg)) { 12920 Diag(Arg->getBeginLoc(), diag::warn_free_nonheap_object) 12921 << CalleeName << 1 /*object: block*/; 12922 return; 12923 } 12924 } 12925 // Maybe the cast was important, check after the other cases. 12926 if (const auto *Cast = dyn_cast<CastExpr>(E->getArg(0))) 12927 return CheckFreeArgumentsCast(*this, CalleeName, Cast); 12928 } 12929 12930 void 12931 Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType, 12932 SourceLocation ReturnLoc, 12933 bool isObjCMethod, 12934 const AttrVec *Attrs, 12935 const FunctionDecl *FD) { 12936 // Check if the return value is null but should not be. 12937 if (((Attrs && hasSpecificAttr<ReturnsNonNullAttr>(*Attrs)) || 12938 (!isObjCMethod && isNonNullType(lhsType))) && 12939 CheckNonNullExpr(*this, RetValExp)) 12940 Diag(ReturnLoc, diag::warn_null_ret) 12941 << (isObjCMethod ? 1 : 0) << RetValExp->getSourceRange(); 12942 12943 // C++11 [basic.stc.dynamic.allocation]p4: 12944 // If an allocation function declared with a non-throwing 12945 // exception-specification fails to allocate storage, it shall return 12946 // a null pointer. Any other allocation function that fails to allocate 12947 // storage shall indicate failure only by throwing an exception [...] 12948 if (FD) { 12949 OverloadedOperatorKind Op = FD->getOverloadedOperator(); 12950 if (Op == OO_New || Op == OO_Array_New) { 12951 const FunctionProtoType *Proto 12952 = FD->getType()->castAs<FunctionProtoType>(); 12953 if (!Proto->isNothrow(/*ResultIfDependent*/true) && 12954 CheckNonNullExpr(*this, RetValExp)) 12955 Diag(ReturnLoc, diag::warn_operator_new_returns_null) 12956 << FD << getLangOpts().CPlusPlus11; 12957 } 12958 } 12959 12960 if (RetValExp && RetValExp->getType()->isWebAssemblyTableType()) { 12961 Diag(ReturnLoc, diag::err_wasm_table_art) << 1; 12962 } 12963 12964 // PPC MMA non-pointer types are not allowed as return type. Checking the type 12965 // here prevent the user from using a PPC MMA type as trailing return type. 12966 if (Context.getTargetInfo().getTriple().isPPC64()) 12967 CheckPPCMMAType(RetValExp->getType(), ReturnLoc); 12968 } 12969 12970 /// Check for comparisons of floating-point values using == and !=. Issue a 12971 /// warning if the comparison is not likely to do what the programmer intended. 12972 void Sema::CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS, 12973 BinaryOperatorKind Opcode) { 12974 if (!BinaryOperator::isEqualityOp(Opcode)) 12975 return; 12976 12977 // Match and capture subexpressions such as "(float) X == 0.1". 12978 FloatingLiteral *FPLiteral; 12979 CastExpr *FPCast; 12980 auto getCastAndLiteral = [&FPLiteral, &FPCast](Expr *L, Expr *R) { 12981 FPLiteral = dyn_cast<FloatingLiteral>(L->IgnoreParens()); 12982 FPCast = dyn_cast<CastExpr>(R->IgnoreParens()); 12983 return FPLiteral && FPCast; 12984 }; 12985 12986 if (getCastAndLiteral(LHS, RHS) || getCastAndLiteral(RHS, LHS)) { 12987 auto *SourceTy = FPCast->getSubExpr()->getType()->getAs<BuiltinType>(); 12988 auto *TargetTy = FPLiteral->getType()->getAs<BuiltinType>(); 12989 if (SourceTy && TargetTy && SourceTy->isFloatingPoint() && 12990 TargetTy->isFloatingPoint()) { 12991 bool Lossy; 12992 llvm::APFloat TargetC = FPLiteral->getValue(); 12993 TargetC.convert(Context.getFloatTypeSemantics(QualType(SourceTy, 0)), 12994 llvm::APFloat::rmNearestTiesToEven, &Lossy); 12995 if (Lossy) { 12996 // If the literal cannot be represented in the source type, then a 12997 // check for == is always false and check for != is always true. 12998 Diag(Loc, diag::warn_float_compare_literal) 12999 << (Opcode == BO_EQ) << QualType(SourceTy, 0) 13000 << LHS->getSourceRange() << RHS->getSourceRange(); 13001 return; 13002 } 13003 } 13004 } 13005 13006 // Match a more general floating-point equality comparison (-Wfloat-equal). 13007 Expr* LeftExprSansParen = LHS->IgnoreParenImpCasts(); 13008 Expr* RightExprSansParen = RHS->IgnoreParenImpCasts(); 13009 13010 // Special case: check for x == x (which is OK). 13011 // Do not emit warnings for such cases. 13012 if (auto *DRL = dyn_cast<DeclRefExpr>(LeftExprSansParen)) 13013 if (auto *DRR = dyn_cast<DeclRefExpr>(RightExprSansParen)) 13014 if (DRL->getDecl() == DRR->getDecl()) 13015 return; 13016 13017 // Special case: check for comparisons against literals that can be exactly 13018 // represented by APFloat. In such cases, do not emit a warning. This 13019 // is a heuristic: often comparison against such literals are used to 13020 // detect if a value in a variable has not changed. This clearly can 13021 // lead to false negatives. 13022 if (FloatingLiteral* FLL = dyn_cast<FloatingLiteral>(LeftExprSansParen)) { 13023 if (FLL->isExact()) 13024 return; 13025 } else 13026 if (FloatingLiteral* FLR = dyn_cast<FloatingLiteral>(RightExprSansParen)) 13027 if (FLR->isExact()) 13028 return; 13029 13030 // Check for comparisons with builtin types. 13031 if (CallExpr* CL = dyn_cast<CallExpr>(LeftExprSansParen)) 13032 if (CL->getBuiltinCallee()) 13033 return; 13034 13035 if (CallExpr* CR = dyn_cast<CallExpr>(RightExprSansParen)) 13036 if (CR->getBuiltinCallee()) 13037 return; 13038 13039 // Emit the diagnostic. 13040 Diag(Loc, diag::warn_floatingpoint_eq) 13041 << LHS->getSourceRange() << RHS->getSourceRange(); 13042 } 13043 13044 //===--- CHECK: Integer mixed-sign comparisons (-Wsign-compare) --------===// 13045 //===--- CHECK: Lossy implicit conversions (-Wconversion) --------------===// 13046 13047 namespace { 13048 13049 /// Structure recording the 'active' range of an integer-valued 13050 /// expression. 13051 struct IntRange { 13052 /// The number of bits active in the int. Note that this includes exactly one 13053 /// sign bit if !NonNegative. 13054 unsigned Width; 13055 13056 /// True if the int is known not to have negative values. If so, all leading 13057 /// bits before Width are known zero, otherwise they are known to be the 13058 /// same as the MSB within Width. 13059 bool NonNegative; 13060 13061 IntRange(unsigned Width, bool NonNegative) 13062 : Width(Width), NonNegative(NonNegative) {} 13063 13064 /// Number of bits excluding the sign bit. 13065 unsigned valueBits() const { 13066 return NonNegative ? Width : Width - 1; 13067 } 13068 13069 /// Returns the range of the bool type. 13070 static IntRange forBoolType() { 13071 return IntRange(1, true); 13072 } 13073 13074 /// Returns the range of an opaque value of the given integral type. 13075 static IntRange forValueOfType(ASTContext &C, QualType T) { 13076 return forValueOfCanonicalType(C, 13077 T->getCanonicalTypeInternal().getTypePtr()); 13078 } 13079 13080 /// Returns the range of an opaque value of a canonical integral type. 13081 static IntRange forValueOfCanonicalType(ASTContext &C, const Type *T) { 13082 assert(T->isCanonicalUnqualified()); 13083 13084 if (const VectorType *VT = dyn_cast<VectorType>(T)) 13085 T = VT->getElementType().getTypePtr(); 13086 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 13087 T = CT->getElementType().getTypePtr(); 13088 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 13089 T = AT->getValueType().getTypePtr(); 13090 13091 if (!C.getLangOpts().CPlusPlus) { 13092 // For enum types in C code, use the underlying datatype. 13093 if (const EnumType *ET = dyn_cast<EnumType>(T)) 13094 T = ET->getDecl()->getIntegerType().getDesugaredType(C).getTypePtr(); 13095 } else if (const EnumType *ET = dyn_cast<EnumType>(T)) { 13096 // For enum types in C++, use the known bit width of the enumerators. 13097 EnumDecl *Enum = ET->getDecl(); 13098 // In C++11, enums can have a fixed underlying type. Use this type to 13099 // compute the range. 13100 if (Enum->isFixed()) { 13101 return IntRange(C.getIntWidth(QualType(T, 0)), 13102 !ET->isSignedIntegerOrEnumerationType()); 13103 } 13104 13105 unsigned NumPositive = Enum->getNumPositiveBits(); 13106 unsigned NumNegative = Enum->getNumNegativeBits(); 13107 13108 if (NumNegative == 0) 13109 return IntRange(NumPositive, true/*NonNegative*/); 13110 else 13111 return IntRange(std::max(NumPositive + 1, NumNegative), 13112 false/*NonNegative*/); 13113 } 13114 13115 if (const auto *EIT = dyn_cast<BitIntType>(T)) 13116 return IntRange(EIT->getNumBits(), EIT->isUnsigned()); 13117 13118 const BuiltinType *BT = cast<BuiltinType>(T); 13119 assert(BT->isInteger()); 13120 13121 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 13122 } 13123 13124 /// Returns the "target" range of a canonical integral type, i.e. 13125 /// the range of values expressible in the type. 13126 /// 13127 /// This matches forValueOfCanonicalType except that enums have the 13128 /// full range of their type, not the range of their enumerators. 13129 static IntRange forTargetOfCanonicalType(ASTContext &C, const Type *T) { 13130 assert(T->isCanonicalUnqualified()); 13131 13132 if (const VectorType *VT = dyn_cast<VectorType>(T)) 13133 T = VT->getElementType().getTypePtr(); 13134 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 13135 T = CT->getElementType().getTypePtr(); 13136 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 13137 T = AT->getValueType().getTypePtr(); 13138 if (const EnumType *ET = dyn_cast<EnumType>(T)) 13139 T = C.getCanonicalType(ET->getDecl()->getIntegerType()).getTypePtr(); 13140 13141 if (const auto *EIT = dyn_cast<BitIntType>(T)) 13142 return IntRange(EIT->getNumBits(), EIT->isUnsigned()); 13143 13144 const BuiltinType *BT = cast<BuiltinType>(T); 13145 assert(BT->isInteger()); 13146 13147 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 13148 } 13149 13150 /// Returns the supremum of two ranges: i.e. their conservative merge. 13151 static IntRange join(IntRange L, IntRange R) { 13152 bool Unsigned = L.NonNegative && R.NonNegative; 13153 return IntRange(std::max(L.valueBits(), R.valueBits()) + !Unsigned, 13154 L.NonNegative && R.NonNegative); 13155 } 13156 13157 /// Return the range of a bitwise-AND of the two ranges. 13158 static IntRange bit_and(IntRange L, IntRange R) { 13159 unsigned Bits = std::max(L.Width, R.Width); 13160 bool NonNegative = false; 13161 if (L.NonNegative) { 13162 Bits = std::min(Bits, L.Width); 13163 NonNegative = true; 13164 } 13165 if (R.NonNegative) { 13166 Bits = std::min(Bits, R.Width); 13167 NonNegative = true; 13168 } 13169 return IntRange(Bits, NonNegative); 13170 } 13171 13172 /// Return the range of a sum of the two ranges. 13173 static IntRange sum(IntRange L, IntRange R) { 13174 bool Unsigned = L.NonNegative && R.NonNegative; 13175 return IntRange(std::max(L.valueBits(), R.valueBits()) + 1 + !Unsigned, 13176 Unsigned); 13177 } 13178 13179 /// Return the range of a difference of the two ranges. 13180 static IntRange difference(IntRange L, IntRange R) { 13181 // We need a 1-bit-wider range if: 13182 // 1) LHS can be negative: least value can be reduced. 13183 // 2) RHS can be negative: greatest value can be increased. 13184 bool CanWiden = !L.NonNegative || !R.NonNegative; 13185 bool Unsigned = L.NonNegative && R.Width == 0; 13186 return IntRange(std::max(L.valueBits(), R.valueBits()) + CanWiden + 13187 !Unsigned, 13188 Unsigned); 13189 } 13190 13191 /// Return the range of a product of the two ranges. 13192 static IntRange product(IntRange L, IntRange R) { 13193 // If both LHS and RHS can be negative, we can form 13194 // -2^L * -2^R = 2^(L + R) 13195 // which requires L + R + 1 value bits to represent. 13196 bool CanWiden = !L.NonNegative && !R.NonNegative; 13197 bool Unsigned = L.NonNegative && R.NonNegative; 13198 return IntRange(L.valueBits() + R.valueBits() + CanWiden + !Unsigned, 13199 Unsigned); 13200 } 13201 13202 /// Return the range of a remainder operation between the two ranges. 13203 static IntRange rem(IntRange L, IntRange R) { 13204 // The result of a remainder can't be larger than the result of 13205 // either side. The sign of the result is the sign of the LHS. 13206 bool Unsigned = L.NonNegative; 13207 return IntRange(std::min(L.valueBits(), R.valueBits()) + !Unsigned, 13208 Unsigned); 13209 } 13210 }; 13211 13212 } // namespace 13213 13214 static IntRange GetValueRange(ASTContext &C, llvm::APSInt &value, 13215 unsigned MaxWidth) { 13216 if (value.isSigned() && value.isNegative()) 13217 return IntRange(value.getSignificantBits(), false); 13218 13219 if (value.getBitWidth() > MaxWidth) 13220 value = value.trunc(MaxWidth); 13221 13222 // isNonNegative() just checks the sign bit without considering 13223 // signedness. 13224 return IntRange(value.getActiveBits(), true); 13225 } 13226 13227 static IntRange GetValueRange(ASTContext &C, APValue &result, QualType Ty, 13228 unsigned MaxWidth) { 13229 if (result.isInt()) 13230 return GetValueRange(C, result.getInt(), MaxWidth); 13231 13232 if (result.isVector()) { 13233 IntRange R = GetValueRange(C, result.getVectorElt(0), Ty, MaxWidth); 13234 for (unsigned i = 1, e = result.getVectorLength(); i != e; ++i) { 13235 IntRange El = GetValueRange(C, result.getVectorElt(i), Ty, MaxWidth); 13236 R = IntRange::join(R, El); 13237 } 13238 return R; 13239 } 13240 13241 if (result.isComplexInt()) { 13242 IntRange R = GetValueRange(C, result.getComplexIntReal(), MaxWidth); 13243 IntRange I = GetValueRange(C, result.getComplexIntImag(), MaxWidth); 13244 return IntRange::join(R, I); 13245 } 13246 13247 // This can happen with lossless casts to intptr_t of "based" lvalues. 13248 // Assume it might use arbitrary bits. 13249 // FIXME: The only reason we need to pass the type in here is to get 13250 // the sign right on this one case. It would be nice if APValue 13251 // preserved this. 13252 assert(result.isLValue() || result.isAddrLabelDiff()); 13253 return IntRange(MaxWidth, Ty->isUnsignedIntegerOrEnumerationType()); 13254 } 13255 13256 static QualType GetExprType(const Expr *E) { 13257 QualType Ty = E->getType(); 13258 if (const AtomicType *AtomicRHS = Ty->getAs<AtomicType>()) 13259 Ty = AtomicRHS->getValueType(); 13260 return Ty; 13261 } 13262 13263 /// Pseudo-evaluate the given integer expression, estimating the 13264 /// range of values it might take. 13265 /// 13266 /// \param MaxWidth The width to which the value will be truncated. 13267 /// \param Approximate If \c true, return a likely range for the result: in 13268 /// particular, assume that arithmetic on narrower types doesn't leave 13269 /// those types. If \c false, return a range including all possible 13270 /// result values. 13271 static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth, 13272 bool InConstantContext, bool Approximate) { 13273 E = E->IgnoreParens(); 13274 13275 // Try a full evaluation first. 13276 Expr::EvalResult result; 13277 if (E->EvaluateAsRValue(result, C, InConstantContext)) 13278 return GetValueRange(C, result.Val, GetExprType(E), MaxWidth); 13279 13280 // I think we only want to look through implicit casts here; if the 13281 // user has an explicit widening cast, we should treat the value as 13282 // being of the new, wider type. 13283 if (const auto *CE = dyn_cast<ImplicitCastExpr>(E)) { 13284 if (CE->getCastKind() == CK_NoOp || CE->getCastKind() == CK_LValueToRValue) 13285 return GetExprRange(C, CE->getSubExpr(), MaxWidth, InConstantContext, 13286 Approximate); 13287 13288 IntRange OutputTypeRange = IntRange::forValueOfType(C, GetExprType(CE)); 13289 13290 bool isIntegerCast = CE->getCastKind() == CK_IntegralCast || 13291 CE->getCastKind() == CK_BooleanToSignedIntegral; 13292 13293 // Assume that non-integer casts can span the full range of the type. 13294 if (!isIntegerCast) 13295 return OutputTypeRange; 13296 13297 IntRange SubRange = GetExprRange(C, CE->getSubExpr(), 13298 std::min(MaxWidth, OutputTypeRange.Width), 13299 InConstantContext, Approximate); 13300 13301 // Bail out if the subexpr's range is as wide as the cast type. 13302 if (SubRange.Width >= OutputTypeRange.Width) 13303 return OutputTypeRange; 13304 13305 // Otherwise, we take the smaller width, and we're non-negative if 13306 // either the output type or the subexpr is. 13307 return IntRange(SubRange.Width, 13308 SubRange.NonNegative || OutputTypeRange.NonNegative); 13309 } 13310 13311 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 13312 // If we can fold the condition, just take that operand. 13313 bool CondResult; 13314 if (CO->getCond()->EvaluateAsBooleanCondition(CondResult, C)) 13315 return GetExprRange(C, 13316 CondResult ? CO->getTrueExpr() : CO->getFalseExpr(), 13317 MaxWidth, InConstantContext, Approximate); 13318 13319 // Otherwise, conservatively merge. 13320 // GetExprRange requires an integer expression, but a throw expression 13321 // results in a void type. 13322 Expr *E = CO->getTrueExpr(); 13323 IntRange L = E->getType()->isVoidType() 13324 ? IntRange{0, true} 13325 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate); 13326 E = CO->getFalseExpr(); 13327 IntRange R = E->getType()->isVoidType() 13328 ? IntRange{0, true} 13329 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate); 13330 return IntRange::join(L, R); 13331 } 13332 13333 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 13334 IntRange (*Combine)(IntRange, IntRange) = IntRange::join; 13335 13336 switch (BO->getOpcode()) { 13337 case BO_Cmp: 13338 llvm_unreachable("builtin <=> should have class type"); 13339 13340 // Boolean-valued operations are single-bit and positive. 13341 case BO_LAnd: 13342 case BO_LOr: 13343 case BO_LT: 13344 case BO_GT: 13345 case BO_LE: 13346 case BO_GE: 13347 case BO_EQ: 13348 case BO_NE: 13349 return IntRange::forBoolType(); 13350 13351 // The type of the assignments is the type of the LHS, so the RHS 13352 // is not necessarily the same type. 13353 case BO_MulAssign: 13354 case BO_DivAssign: 13355 case BO_RemAssign: 13356 case BO_AddAssign: 13357 case BO_SubAssign: 13358 case BO_XorAssign: 13359 case BO_OrAssign: 13360 // TODO: bitfields? 13361 return IntRange::forValueOfType(C, GetExprType(E)); 13362 13363 // Simple assignments just pass through the RHS, which will have 13364 // been coerced to the LHS type. 13365 case BO_Assign: 13366 // TODO: bitfields? 13367 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext, 13368 Approximate); 13369 13370 // Operations with opaque sources are black-listed. 13371 case BO_PtrMemD: 13372 case BO_PtrMemI: 13373 return IntRange::forValueOfType(C, GetExprType(E)); 13374 13375 // Bitwise-and uses the *infinum* of the two source ranges. 13376 case BO_And: 13377 case BO_AndAssign: 13378 Combine = IntRange::bit_and; 13379 break; 13380 13381 // Left shift gets black-listed based on a judgement call. 13382 case BO_Shl: 13383 // ...except that we want to treat '1 << (blah)' as logically 13384 // positive. It's an important idiom. 13385 if (IntegerLiteral *I 13386 = dyn_cast<IntegerLiteral>(BO->getLHS()->IgnoreParenCasts())) { 13387 if (I->getValue() == 1) { 13388 IntRange R = IntRange::forValueOfType(C, GetExprType(E)); 13389 return IntRange(R.Width, /*NonNegative*/ true); 13390 } 13391 } 13392 [[fallthrough]]; 13393 13394 case BO_ShlAssign: 13395 return IntRange::forValueOfType(C, GetExprType(E)); 13396 13397 // Right shift by a constant can narrow its left argument. 13398 case BO_Shr: 13399 case BO_ShrAssign: { 13400 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth, InConstantContext, 13401 Approximate); 13402 13403 // If the shift amount is a positive constant, drop the width by 13404 // that much. 13405 if (std::optional<llvm::APSInt> shift = 13406 BO->getRHS()->getIntegerConstantExpr(C)) { 13407 if (shift->isNonNegative()) { 13408 unsigned zext = shift->getZExtValue(); 13409 if (zext >= L.Width) 13410 L.Width = (L.NonNegative ? 0 : 1); 13411 else 13412 L.Width -= zext; 13413 } 13414 } 13415 13416 return L; 13417 } 13418 13419 // Comma acts as its right operand. 13420 case BO_Comma: 13421 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext, 13422 Approximate); 13423 13424 case BO_Add: 13425 if (!Approximate) 13426 Combine = IntRange::sum; 13427 break; 13428 13429 case BO_Sub: 13430 if (BO->getLHS()->getType()->isPointerType()) 13431 return IntRange::forValueOfType(C, GetExprType(E)); 13432 if (!Approximate) 13433 Combine = IntRange::difference; 13434 break; 13435 13436 case BO_Mul: 13437 if (!Approximate) 13438 Combine = IntRange::product; 13439 break; 13440 13441 // The width of a division result is mostly determined by the size 13442 // of the LHS. 13443 case BO_Div: { 13444 // Don't 'pre-truncate' the operands. 13445 unsigned opWidth = C.getIntWidth(GetExprType(E)); 13446 IntRange L = GetExprRange(C, BO->getLHS(), opWidth, InConstantContext, 13447 Approximate); 13448 13449 // If the divisor is constant, use that. 13450 if (std::optional<llvm::APSInt> divisor = 13451 BO->getRHS()->getIntegerConstantExpr(C)) { 13452 unsigned log2 = divisor->logBase2(); // floor(log_2(divisor)) 13453 if (log2 >= L.Width) 13454 L.Width = (L.NonNegative ? 0 : 1); 13455 else 13456 L.Width = std::min(L.Width - log2, MaxWidth); 13457 return L; 13458 } 13459 13460 // Otherwise, just use the LHS's width. 13461 // FIXME: This is wrong if the LHS could be its minimal value and the RHS 13462 // could be -1. 13463 IntRange R = GetExprRange(C, BO->getRHS(), opWidth, InConstantContext, 13464 Approximate); 13465 return IntRange(L.Width, L.NonNegative && R.NonNegative); 13466 } 13467 13468 case BO_Rem: 13469 Combine = IntRange::rem; 13470 break; 13471 13472 // The default behavior is okay for these. 13473 case BO_Xor: 13474 case BO_Or: 13475 break; 13476 } 13477 13478 // Combine the two ranges, but limit the result to the type in which we 13479 // performed the computation. 13480 QualType T = GetExprType(E); 13481 unsigned opWidth = C.getIntWidth(T); 13482 IntRange L = 13483 GetExprRange(C, BO->getLHS(), opWidth, InConstantContext, Approximate); 13484 IntRange R = 13485 GetExprRange(C, BO->getRHS(), opWidth, InConstantContext, Approximate); 13486 IntRange C = Combine(L, R); 13487 C.NonNegative |= T->isUnsignedIntegerOrEnumerationType(); 13488 C.Width = std::min(C.Width, MaxWidth); 13489 return C; 13490 } 13491 13492 if (const auto *UO = dyn_cast<UnaryOperator>(E)) { 13493 switch (UO->getOpcode()) { 13494 // Boolean-valued operations are white-listed. 13495 case UO_LNot: 13496 return IntRange::forBoolType(); 13497 13498 // Operations with opaque sources are black-listed. 13499 case UO_Deref: 13500 case UO_AddrOf: // should be impossible 13501 return IntRange::forValueOfType(C, GetExprType(E)); 13502 13503 default: 13504 return GetExprRange(C, UO->getSubExpr(), MaxWidth, InConstantContext, 13505 Approximate); 13506 } 13507 } 13508 13509 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 13510 return GetExprRange(C, OVE->getSourceExpr(), MaxWidth, InConstantContext, 13511 Approximate); 13512 13513 if (const auto *BitField = E->getSourceBitField()) 13514 return IntRange(BitField->getBitWidthValue(C), 13515 BitField->getType()->isUnsignedIntegerOrEnumerationType()); 13516 13517 return IntRange::forValueOfType(C, GetExprType(E)); 13518 } 13519 13520 static IntRange GetExprRange(ASTContext &C, const Expr *E, 13521 bool InConstantContext, bool Approximate) { 13522 return GetExprRange(C, E, C.getIntWidth(GetExprType(E)), InConstantContext, 13523 Approximate); 13524 } 13525 13526 /// Checks whether the given value, which currently has the given 13527 /// source semantics, has the same value when coerced through the 13528 /// target semantics. 13529 static bool IsSameFloatAfterCast(const llvm::APFloat &value, 13530 const llvm::fltSemantics &Src, 13531 const llvm::fltSemantics &Tgt) { 13532 llvm::APFloat truncated = value; 13533 13534 bool ignored; 13535 truncated.convert(Src, llvm::APFloat::rmNearestTiesToEven, &ignored); 13536 truncated.convert(Tgt, llvm::APFloat::rmNearestTiesToEven, &ignored); 13537 13538 return truncated.bitwiseIsEqual(value); 13539 } 13540 13541 /// Checks whether the given value, which currently has the given 13542 /// source semantics, has the same value when coerced through the 13543 /// target semantics. 13544 /// 13545 /// The value might be a vector of floats (or a complex number). 13546 static bool IsSameFloatAfterCast(const APValue &value, 13547 const llvm::fltSemantics &Src, 13548 const llvm::fltSemantics &Tgt) { 13549 if (value.isFloat()) 13550 return IsSameFloatAfterCast(value.getFloat(), Src, Tgt); 13551 13552 if (value.isVector()) { 13553 for (unsigned i = 0, e = value.getVectorLength(); i != e; ++i) 13554 if (!IsSameFloatAfterCast(value.getVectorElt(i), Src, Tgt)) 13555 return false; 13556 return true; 13557 } 13558 13559 assert(value.isComplexFloat()); 13560 return (IsSameFloatAfterCast(value.getComplexFloatReal(), Src, Tgt) && 13561 IsSameFloatAfterCast(value.getComplexFloatImag(), Src, Tgt)); 13562 } 13563 13564 static void AnalyzeImplicitConversions(Sema &S, Expr *E, SourceLocation CC, 13565 bool IsListInit = false); 13566 13567 static bool IsEnumConstOrFromMacro(Sema &S, Expr *E) { 13568 // Suppress cases where we are comparing against an enum constant. 13569 if (const DeclRefExpr *DR = 13570 dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) 13571 if (isa<EnumConstantDecl>(DR->getDecl())) 13572 return true; 13573 13574 // Suppress cases where the value is expanded from a macro, unless that macro 13575 // is how a language represents a boolean literal. This is the case in both C 13576 // and Objective-C. 13577 SourceLocation BeginLoc = E->getBeginLoc(); 13578 if (BeginLoc.isMacroID()) { 13579 StringRef MacroName = Lexer::getImmediateMacroName( 13580 BeginLoc, S.getSourceManager(), S.getLangOpts()); 13581 return MacroName != "YES" && MacroName != "NO" && 13582 MacroName != "true" && MacroName != "false"; 13583 } 13584 13585 return false; 13586 } 13587 13588 static bool isKnownToHaveUnsignedValue(Expr *E) { 13589 return E->getType()->isIntegerType() && 13590 (!E->getType()->isSignedIntegerType() || 13591 !E->IgnoreParenImpCasts()->getType()->isSignedIntegerType()); 13592 } 13593 13594 namespace { 13595 /// The promoted range of values of a type. In general this has the 13596 /// following structure: 13597 /// 13598 /// |-----------| . . . |-----------| 13599 /// ^ ^ ^ ^ 13600 /// Min HoleMin HoleMax Max 13601 /// 13602 /// ... where there is only a hole if a signed type is promoted to unsigned 13603 /// (in which case Min and Max are the smallest and largest representable 13604 /// values). 13605 struct PromotedRange { 13606 // Min, or HoleMax if there is a hole. 13607 llvm::APSInt PromotedMin; 13608 // Max, or HoleMin if there is a hole. 13609 llvm::APSInt PromotedMax; 13610 13611 PromotedRange(IntRange R, unsigned BitWidth, bool Unsigned) { 13612 if (R.Width == 0) 13613 PromotedMin = PromotedMax = llvm::APSInt(BitWidth, Unsigned); 13614 else if (R.Width >= BitWidth && !Unsigned) { 13615 // Promotion made the type *narrower*. This happens when promoting 13616 // a < 32-bit unsigned / <= 32-bit signed bit-field to 'signed int'. 13617 // Treat all values of 'signed int' as being in range for now. 13618 PromotedMin = llvm::APSInt::getMinValue(BitWidth, Unsigned); 13619 PromotedMax = llvm::APSInt::getMaxValue(BitWidth, Unsigned); 13620 } else { 13621 PromotedMin = llvm::APSInt::getMinValue(R.Width, R.NonNegative) 13622 .extOrTrunc(BitWidth); 13623 PromotedMin.setIsUnsigned(Unsigned); 13624 13625 PromotedMax = llvm::APSInt::getMaxValue(R.Width, R.NonNegative) 13626 .extOrTrunc(BitWidth); 13627 PromotedMax.setIsUnsigned(Unsigned); 13628 } 13629 } 13630 13631 // Determine whether this range is contiguous (has no hole). 13632 bool isContiguous() const { return PromotedMin <= PromotedMax; } 13633 13634 // Where a constant value is within the range. 13635 enum ComparisonResult { 13636 LT = 0x1, 13637 LE = 0x2, 13638 GT = 0x4, 13639 GE = 0x8, 13640 EQ = 0x10, 13641 NE = 0x20, 13642 InRangeFlag = 0x40, 13643 13644 Less = LE | LT | NE, 13645 Min = LE | InRangeFlag, 13646 InRange = InRangeFlag, 13647 Max = GE | InRangeFlag, 13648 Greater = GE | GT | NE, 13649 13650 OnlyValue = LE | GE | EQ | InRangeFlag, 13651 InHole = NE 13652 }; 13653 13654 ComparisonResult compare(const llvm::APSInt &Value) const { 13655 assert(Value.getBitWidth() == PromotedMin.getBitWidth() && 13656 Value.isUnsigned() == PromotedMin.isUnsigned()); 13657 if (!isContiguous()) { 13658 assert(Value.isUnsigned() && "discontiguous range for signed compare"); 13659 if (Value.isMinValue()) return Min; 13660 if (Value.isMaxValue()) return Max; 13661 if (Value >= PromotedMin) return InRange; 13662 if (Value <= PromotedMax) return InRange; 13663 return InHole; 13664 } 13665 13666 switch (llvm::APSInt::compareValues(Value, PromotedMin)) { 13667 case -1: return Less; 13668 case 0: return PromotedMin == PromotedMax ? OnlyValue : Min; 13669 case 1: 13670 switch (llvm::APSInt::compareValues(Value, PromotedMax)) { 13671 case -1: return InRange; 13672 case 0: return Max; 13673 case 1: return Greater; 13674 } 13675 } 13676 13677 llvm_unreachable("impossible compare result"); 13678 } 13679 13680 static std::optional<StringRef> 13681 constantValue(BinaryOperatorKind Op, ComparisonResult R, bool ConstantOnRHS) { 13682 if (Op == BO_Cmp) { 13683 ComparisonResult LTFlag = LT, GTFlag = GT; 13684 if (ConstantOnRHS) std::swap(LTFlag, GTFlag); 13685 13686 if (R & EQ) return StringRef("'std::strong_ordering::equal'"); 13687 if (R & LTFlag) return StringRef("'std::strong_ordering::less'"); 13688 if (R & GTFlag) return StringRef("'std::strong_ordering::greater'"); 13689 return std::nullopt; 13690 } 13691 13692 ComparisonResult TrueFlag, FalseFlag; 13693 if (Op == BO_EQ) { 13694 TrueFlag = EQ; 13695 FalseFlag = NE; 13696 } else if (Op == BO_NE) { 13697 TrueFlag = NE; 13698 FalseFlag = EQ; 13699 } else { 13700 if ((Op == BO_LT || Op == BO_GE) ^ ConstantOnRHS) { 13701 TrueFlag = LT; 13702 FalseFlag = GE; 13703 } else { 13704 TrueFlag = GT; 13705 FalseFlag = LE; 13706 } 13707 if (Op == BO_GE || Op == BO_LE) 13708 std::swap(TrueFlag, FalseFlag); 13709 } 13710 if (R & TrueFlag) 13711 return StringRef("true"); 13712 if (R & FalseFlag) 13713 return StringRef("false"); 13714 return std::nullopt; 13715 } 13716 }; 13717 } 13718 13719 static bool HasEnumType(Expr *E) { 13720 // Strip off implicit integral promotions. 13721 while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 13722 if (ICE->getCastKind() != CK_IntegralCast && 13723 ICE->getCastKind() != CK_NoOp) 13724 break; 13725 E = ICE->getSubExpr(); 13726 } 13727 13728 return E->getType()->isEnumeralType(); 13729 } 13730 13731 static int classifyConstantValue(Expr *Constant) { 13732 // The values of this enumeration are used in the diagnostics 13733 // diag::warn_out_of_range_compare and diag::warn_tautological_bool_compare. 13734 enum ConstantValueKind { 13735 Miscellaneous = 0, 13736 LiteralTrue, 13737 LiteralFalse 13738 }; 13739 if (auto *BL = dyn_cast<CXXBoolLiteralExpr>(Constant)) 13740 return BL->getValue() ? ConstantValueKind::LiteralTrue 13741 : ConstantValueKind::LiteralFalse; 13742 return ConstantValueKind::Miscellaneous; 13743 } 13744 13745 static bool CheckTautologicalComparison(Sema &S, BinaryOperator *E, 13746 Expr *Constant, Expr *Other, 13747 const llvm::APSInt &Value, 13748 bool RhsConstant) { 13749 if (S.inTemplateInstantiation()) 13750 return false; 13751 13752 Expr *OriginalOther = Other; 13753 13754 Constant = Constant->IgnoreParenImpCasts(); 13755 Other = Other->IgnoreParenImpCasts(); 13756 13757 // Suppress warnings on tautological comparisons between values of the same 13758 // enumeration type. There are only two ways we could warn on this: 13759 // - If the constant is outside the range of representable values of 13760 // the enumeration. In such a case, we should warn about the cast 13761 // to enumeration type, not about the comparison. 13762 // - If the constant is the maximum / minimum in-range value. For an 13763 // enumeratin type, such comparisons can be meaningful and useful. 13764 if (Constant->getType()->isEnumeralType() && 13765 S.Context.hasSameUnqualifiedType(Constant->getType(), Other->getType())) 13766 return false; 13767 13768 IntRange OtherValueRange = GetExprRange( 13769 S.Context, Other, S.isConstantEvaluated(), /*Approximate*/ false); 13770 13771 QualType OtherT = Other->getType(); 13772 if (const auto *AT = OtherT->getAs<AtomicType>()) 13773 OtherT = AT->getValueType(); 13774 IntRange OtherTypeRange = IntRange::forValueOfType(S.Context, OtherT); 13775 13776 // Special case for ObjC BOOL on targets where its a typedef for a signed char 13777 // (Namely, macOS). FIXME: IntRange::forValueOfType should do this. 13778 bool IsObjCSignedCharBool = S.getLangOpts().ObjC && 13779 S.NSAPIObj->isObjCBOOLType(OtherT) && 13780 OtherT->isSpecificBuiltinType(BuiltinType::SChar); 13781 13782 // Whether we're treating Other as being a bool because of the form of 13783 // expression despite it having another type (typically 'int' in C). 13784 bool OtherIsBooleanDespiteType = 13785 !OtherT->isBooleanType() && Other->isKnownToHaveBooleanValue(); 13786 if (OtherIsBooleanDespiteType || IsObjCSignedCharBool) 13787 OtherTypeRange = OtherValueRange = IntRange::forBoolType(); 13788 13789 // Check if all values in the range of possible values of this expression 13790 // lead to the same comparison outcome. 13791 PromotedRange OtherPromotedValueRange(OtherValueRange, Value.getBitWidth(), 13792 Value.isUnsigned()); 13793 auto Cmp = OtherPromotedValueRange.compare(Value); 13794 auto Result = PromotedRange::constantValue(E->getOpcode(), Cmp, RhsConstant); 13795 if (!Result) 13796 return false; 13797 13798 // Also consider the range determined by the type alone. This allows us to 13799 // classify the warning under the proper diagnostic group. 13800 bool TautologicalTypeCompare = false; 13801 { 13802 PromotedRange OtherPromotedTypeRange(OtherTypeRange, Value.getBitWidth(), 13803 Value.isUnsigned()); 13804 auto TypeCmp = OtherPromotedTypeRange.compare(Value); 13805 if (auto TypeResult = PromotedRange::constantValue(E->getOpcode(), TypeCmp, 13806 RhsConstant)) { 13807 TautologicalTypeCompare = true; 13808 Cmp = TypeCmp; 13809 Result = TypeResult; 13810 } 13811 } 13812 13813 // Don't warn if the non-constant operand actually always evaluates to the 13814 // same value. 13815 if (!TautologicalTypeCompare && OtherValueRange.Width == 0) 13816 return false; 13817 13818 // Suppress the diagnostic for an in-range comparison if the constant comes 13819 // from a macro or enumerator. We don't want to diagnose 13820 // 13821 // some_long_value <= INT_MAX 13822 // 13823 // when sizeof(int) == sizeof(long). 13824 bool InRange = Cmp & PromotedRange::InRangeFlag; 13825 if (InRange && IsEnumConstOrFromMacro(S, Constant)) 13826 return false; 13827 13828 // A comparison of an unsigned bit-field against 0 is really a type problem, 13829 // even though at the type level the bit-field might promote to 'signed int'. 13830 if (Other->refersToBitField() && InRange && Value == 0 && 13831 Other->getType()->isUnsignedIntegerOrEnumerationType()) 13832 TautologicalTypeCompare = true; 13833 13834 // If this is a comparison to an enum constant, include that 13835 // constant in the diagnostic. 13836 const EnumConstantDecl *ED = nullptr; 13837 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Constant)) 13838 ED = dyn_cast<EnumConstantDecl>(DR->getDecl()); 13839 13840 // Should be enough for uint128 (39 decimal digits) 13841 SmallString<64> PrettySourceValue; 13842 llvm::raw_svector_ostream OS(PrettySourceValue); 13843 if (ED) { 13844 OS << '\'' << *ED << "' (" << Value << ")"; 13845 } else if (auto *BL = dyn_cast<ObjCBoolLiteralExpr>( 13846 Constant->IgnoreParenImpCasts())) { 13847 OS << (BL->getValue() ? "YES" : "NO"); 13848 } else { 13849 OS << Value; 13850 } 13851 13852 if (!TautologicalTypeCompare) { 13853 S.Diag(E->getOperatorLoc(), diag::warn_tautological_compare_value_range) 13854 << RhsConstant << OtherValueRange.Width << OtherValueRange.NonNegative 13855 << E->getOpcodeStr() << OS.str() << *Result 13856 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 13857 return true; 13858 } 13859 13860 if (IsObjCSignedCharBool) { 13861 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 13862 S.PDiag(diag::warn_tautological_compare_objc_bool) 13863 << OS.str() << *Result); 13864 return true; 13865 } 13866 13867 // FIXME: We use a somewhat different formatting for the in-range cases and 13868 // cases involving boolean values for historical reasons. We should pick a 13869 // consistent way of presenting these diagnostics. 13870 if (!InRange || Other->isKnownToHaveBooleanValue()) { 13871 13872 S.DiagRuntimeBehavior( 13873 E->getOperatorLoc(), E, 13874 S.PDiag(!InRange ? diag::warn_out_of_range_compare 13875 : diag::warn_tautological_bool_compare) 13876 << OS.str() << classifyConstantValue(Constant) << OtherT 13877 << OtherIsBooleanDespiteType << *Result 13878 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange()); 13879 } else { 13880 bool IsCharTy = OtherT.withoutLocalFastQualifiers() == S.Context.CharTy; 13881 unsigned Diag = 13882 (isKnownToHaveUnsignedValue(OriginalOther) && Value == 0) 13883 ? (HasEnumType(OriginalOther) 13884 ? diag::warn_unsigned_enum_always_true_comparison 13885 : IsCharTy ? diag::warn_unsigned_char_always_true_comparison 13886 : diag::warn_unsigned_always_true_comparison) 13887 : diag::warn_tautological_constant_compare; 13888 13889 S.Diag(E->getOperatorLoc(), Diag) 13890 << RhsConstant << OtherT << E->getOpcodeStr() << OS.str() << *Result 13891 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 13892 } 13893 13894 return true; 13895 } 13896 13897 /// Analyze the operands of the given comparison. Implements the 13898 /// fallback case from AnalyzeComparison. 13899 static void AnalyzeImpConvsInComparison(Sema &S, BinaryOperator *E) { 13900 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 13901 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 13902 } 13903 13904 /// Implements -Wsign-compare. 13905 /// 13906 /// \param E the binary operator to check for warnings 13907 static void AnalyzeComparison(Sema &S, BinaryOperator *E) { 13908 // The type the comparison is being performed in. 13909 QualType T = E->getLHS()->getType(); 13910 13911 // Only analyze comparison operators where both sides have been converted to 13912 // the same type. 13913 if (!S.Context.hasSameUnqualifiedType(T, E->getRHS()->getType())) 13914 return AnalyzeImpConvsInComparison(S, E); 13915 13916 // Don't analyze value-dependent comparisons directly. 13917 if (E->isValueDependent()) 13918 return AnalyzeImpConvsInComparison(S, E); 13919 13920 Expr *LHS = E->getLHS(); 13921 Expr *RHS = E->getRHS(); 13922 13923 if (T->isIntegralType(S.Context)) { 13924 std::optional<llvm::APSInt> RHSValue = 13925 RHS->getIntegerConstantExpr(S.Context); 13926 std::optional<llvm::APSInt> LHSValue = 13927 LHS->getIntegerConstantExpr(S.Context); 13928 13929 // We don't care about expressions whose result is a constant. 13930 if (RHSValue && LHSValue) 13931 return AnalyzeImpConvsInComparison(S, E); 13932 13933 // We only care about expressions where just one side is literal 13934 if ((bool)RHSValue ^ (bool)LHSValue) { 13935 // Is the constant on the RHS or LHS? 13936 const bool RhsConstant = (bool)RHSValue; 13937 Expr *Const = RhsConstant ? RHS : LHS; 13938 Expr *Other = RhsConstant ? LHS : RHS; 13939 const llvm::APSInt &Value = RhsConstant ? *RHSValue : *LHSValue; 13940 13941 // Check whether an integer constant comparison results in a value 13942 // of 'true' or 'false'. 13943 if (CheckTautologicalComparison(S, E, Const, Other, Value, RhsConstant)) 13944 return AnalyzeImpConvsInComparison(S, E); 13945 } 13946 } 13947 13948 if (!T->hasUnsignedIntegerRepresentation()) { 13949 // We don't do anything special if this isn't an unsigned integral 13950 // comparison: we're only interested in integral comparisons, and 13951 // signed comparisons only happen in cases we don't care to warn about. 13952 return AnalyzeImpConvsInComparison(S, E); 13953 } 13954 13955 LHS = LHS->IgnoreParenImpCasts(); 13956 RHS = RHS->IgnoreParenImpCasts(); 13957 13958 if (!S.getLangOpts().CPlusPlus) { 13959 // Avoid warning about comparison of integers with different signs when 13960 // RHS/LHS has a `typeof(E)` type whose sign is different from the sign of 13961 // the type of `E`. 13962 if (const auto *TET = dyn_cast<TypeOfExprType>(LHS->getType())) 13963 LHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 13964 if (const auto *TET = dyn_cast<TypeOfExprType>(RHS->getType())) 13965 RHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 13966 } 13967 13968 // Check to see if one of the (unmodified) operands is of different 13969 // signedness. 13970 Expr *signedOperand, *unsignedOperand; 13971 if (LHS->getType()->hasSignedIntegerRepresentation()) { 13972 assert(!RHS->getType()->hasSignedIntegerRepresentation() && 13973 "unsigned comparison between two signed integer expressions?"); 13974 signedOperand = LHS; 13975 unsignedOperand = RHS; 13976 } else if (RHS->getType()->hasSignedIntegerRepresentation()) { 13977 signedOperand = RHS; 13978 unsignedOperand = LHS; 13979 } else { 13980 return AnalyzeImpConvsInComparison(S, E); 13981 } 13982 13983 // Otherwise, calculate the effective range of the signed operand. 13984 IntRange signedRange = GetExprRange( 13985 S.Context, signedOperand, S.isConstantEvaluated(), /*Approximate*/ true); 13986 13987 // Go ahead and analyze implicit conversions in the operands. Note 13988 // that we skip the implicit conversions on both sides. 13989 AnalyzeImplicitConversions(S, LHS, E->getOperatorLoc()); 13990 AnalyzeImplicitConversions(S, RHS, E->getOperatorLoc()); 13991 13992 // If the signed range is non-negative, -Wsign-compare won't fire. 13993 if (signedRange.NonNegative) 13994 return; 13995 13996 // For (in)equality comparisons, if the unsigned operand is a 13997 // constant which cannot collide with a overflowed signed operand, 13998 // then reinterpreting the signed operand as unsigned will not 13999 // change the result of the comparison. 14000 if (E->isEqualityOp()) { 14001 unsigned comparisonWidth = S.Context.getIntWidth(T); 14002 IntRange unsignedRange = 14003 GetExprRange(S.Context, unsignedOperand, S.isConstantEvaluated(), 14004 /*Approximate*/ true); 14005 14006 // We should never be unable to prove that the unsigned operand is 14007 // non-negative. 14008 assert(unsignedRange.NonNegative && "unsigned range includes negative?"); 14009 14010 if (unsignedRange.Width < comparisonWidth) 14011 return; 14012 } 14013 14014 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 14015 S.PDiag(diag::warn_mixed_sign_comparison) 14016 << LHS->getType() << RHS->getType() 14017 << LHS->getSourceRange() << RHS->getSourceRange()); 14018 } 14019 14020 /// Analyzes an attempt to assign the given value to a bitfield. 14021 /// 14022 /// Returns true if there was something fishy about the attempt. 14023 static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init, 14024 SourceLocation InitLoc) { 14025 assert(Bitfield->isBitField()); 14026 if (Bitfield->isInvalidDecl()) 14027 return false; 14028 14029 // White-list bool bitfields. 14030 QualType BitfieldType = Bitfield->getType(); 14031 if (BitfieldType->isBooleanType()) 14032 return false; 14033 14034 if (BitfieldType->isEnumeralType()) { 14035 EnumDecl *BitfieldEnumDecl = BitfieldType->castAs<EnumType>()->getDecl(); 14036 // If the underlying enum type was not explicitly specified as an unsigned 14037 // type and the enum contain only positive values, MSVC++ will cause an 14038 // inconsistency by storing this as a signed type. 14039 if (S.getLangOpts().CPlusPlus11 && 14040 !BitfieldEnumDecl->getIntegerTypeSourceInfo() && 14041 BitfieldEnumDecl->getNumPositiveBits() > 0 && 14042 BitfieldEnumDecl->getNumNegativeBits() == 0) { 14043 S.Diag(InitLoc, diag::warn_no_underlying_type_specified_for_enum_bitfield) 14044 << BitfieldEnumDecl; 14045 } 14046 } 14047 14048 // Ignore value- or type-dependent expressions. 14049 if (Bitfield->getBitWidth()->isValueDependent() || 14050 Bitfield->getBitWidth()->isTypeDependent() || 14051 Init->isValueDependent() || 14052 Init->isTypeDependent()) 14053 return false; 14054 14055 Expr *OriginalInit = Init->IgnoreParenImpCasts(); 14056 unsigned FieldWidth = Bitfield->getBitWidthValue(S.Context); 14057 14058 Expr::EvalResult Result; 14059 if (!OriginalInit->EvaluateAsInt(Result, S.Context, 14060 Expr::SE_AllowSideEffects)) { 14061 // The RHS is not constant. If the RHS has an enum type, make sure the 14062 // bitfield is wide enough to hold all the values of the enum without 14063 // truncation. 14064 if (const auto *EnumTy = OriginalInit->getType()->getAs<EnumType>()) { 14065 EnumDecl *ED = EnumTy->getDecl(); 14066 bool SignedBitfield = BitfieldType->isSignedIntegerType(); 14067 14068 // Enum types are implicitly signed on Windows, so check if there are any 14069 // negative enumerators to see if the enum was intended to be signed or 14070 // not. 14071 bool SignedEnum = ED->getNumNegativeBits() > 0; 14072 14073 // Check for surprising sign changes when assigning enum values to a 14074 // bitfield of different signedness. If the bitfield is signed and we 14075 // have exactly the right number of bits to store this unsigned enum, 14076 // suggest changing the enum to an unsigned type. This typically happens 14077 // on Windows where unfixed enums always use an underlying type of 'int'. 14078 unsigned DiagID = 0; 14079 if (SignedEnum && !SignedBitfield) { 14080 DiagID = diag::warn_unsigned_bitfield_assigned_signed_enum; 14081 } else if (SignedBitfield && !SignedEnum && 14082 ED->getNumPositiveBits() == FieldWidth) { 14083 DiagID = diag::warn_signed_bitfield_enum_conversion; 14084 } 14085 14086 if (DiagID) { 14087 S.Diag(InitLoc, DiagID) << Bitfield << ED; 14088 TypeSourceInfo *TSI = Bitfield->getTypeSourceInfo(); 14089 SourceRange TypeRange = 14090 TSI ? TSI->getTypeLoc().getSourceRange() : SourceRange(); 14091 S.Diag(Bitfield->getTypeSpecStartLoc(), diag::note_change_bitfield_sign) 14092 << SignedEnum << TypeRange; 14093 } 14094 14095 // Compute the required bitwidth. If the enum has negative values, we need 14096 // one more bit than the normal number of positive bits to represent the 14097 // sign bit. 14098 unsigned BitsNeeded = SignedEnum ? std::max(ED->getNumPositiveBits() + 1, 14099 ED->getNumNegativeBits()) 14100 : ED->getNumPositiveBits(); 14101 14102 // Check the bitwidth. 14103 if (BitsNeeded > FieldWidth) { 14104 Expr *WidthExpr = Bitfield->getBitWidth(); 14105 S.Diag(InitLoc, diag::warn_bitfield_too_small_for_enum) 14106 << Bitfield << ED; 14107 S.Diag(WidthExpr->getExprLoc(), diag::note_widen_bitfield) 14108 << BitsNeeded << ED << WidthExpr->getSourceRange(); 14109 } 14110 } 14111 14112 return false; 14113 } 14114 14115 llvm::APSInt Value = Result.Val.getInt(); 14116 14117 unsigned OriginalWidth = Value.getBitWidth(); 14118 14119 // In C, the macro 'true' from stdbool.h will evaluate to '1'; To reduce 14120 // false positives where the user is demonstrating they intend to use the 14121 // bit-field as a Boolean, check to see if the value is 1 and we're assigning 14122 // to a one-bit bit-field to see if the value came from a macro named 'true'. 14123 bool OneAssignedToOneBitBitfield = FieldWidth == 1 && Value == 1; 14124 if (OneAssignedToOneBitBitfield && !S.LangOpts.CPlusPlus) { 14125 SourceLocation MaybeMacroLoc = OriginalInit->getBeginLoc(); 14126 if (S.SourceMgr.isInSystemMacro(MaybeMacroLoc) && 14127 S.findMacroSpelling(MaybeMacroLoc, "true")) 14128 return false; 14129 } 14130 14131 if (!Value.isSigned() || Value.isNegative()) 14132 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(OriginalInit)) 14133 if (UO->getOpcode() == UO_Minus || UO->getOpcode() == UO_Not) 14134 OriginalWidth = Value.getSignificantBits(); 14135 14136 if (OriginalWidth <= FieldWidth) 14137 return false; 14138 14139 // Compute the value which the bitfield will contain. 14140 llvm::APSInt TruncatedValue = Value.trunc(FieldWidth); 14141 TruncatedValue.setIsSigned(BitfieldType->isSignedIntegerType()); 14142 14143 // Check whether the stored value is equal to the original value. 14144 TruncatedValue = TruncatedValue.extend(OriginalWidth); 14145 if (llvm::APSInt::isSameValue(Value, TruncatedValue)) 14146 return false; 14147 14148 std::string PrettyValue = toString(Value, 10); 14149 std::string PrettyTrunc = toString(TruncatedValue, 10); 14150 14151 S.Diag(InitLoc, OneAssignedToOneBitBitfield 14152 ? diag::warn_impcast_single_bit_bitield_precision_constant 14153 : diag::warn_impcast_bitfield_precision_constant) 14154 << PrettyValue << PrettyTrunc << OriginalInit->getType() 14155 << Init->getSourceRange(); 14156 14157 return true; 14158 } 14159 14160 /// Analyze the given simple or compound assignment for warning-worthy 14161 /// operations. 14162 static void AnalyzeAssignment(Sema &S, BinaryOperator *E) { 14163 // Just recurse on the LHS. 14164 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 14165 14166 // We want to recurse on the RHS as normal unless we're assigning to 14167 // a bitfield. 14168 if (FieldDecl *Bitfield = E->getLHS()->getSourceBitField()) { 14169 if (AnalyzeBitFieldAssignment(S, Bitfield, E->getRHS(), 14170 E->getOperatorLoc())) { 14171 // Recurse, ignoring any implicit conversions on the RHS. 14172 return AnalyzeImplicitConversions(S, E->getRHS()->IgnoreParenImpCasts(), 14173 E->getOperatorLoc()); 14174 } 14175 } 14176 14177 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 14178 14179 // Diagnose implicitly sequentially-consistent atomic assignment. 14180 if (E->getLHS()->getType()->isAtomicType()) 14181 S.Diag(E->getRHS()->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 14182 } 14183 14184 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 14185 static void DiagnoseImpCast(Sema &S, Expr *E, QualType SourceType, QualType T, 14186 SourceLocation CContext, unsigned diag, 14187 bool pruneControlFlow = false) { 14188 if (pruneControlFlow) { 14189 S.DiagRuntimeBehavior(E->getExprLoc(), E, 14190 S.PDiag(diag) 14191 << SourceType << T << E->getSourceRange() 14192 << SourceRange(CContext)); 14193 return; 14194 } 14195 S.Diag(E->getExprLoc(), diag) 14196 << SourceType << T << E->getSourceRange() << SourceRange(CContext); 14197 } 14198 14199 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 14200 static void DiagnoseImpCast(Sema &S, Expr *E, QualType T, 14201 SourceLocation CContext, 14202 unsigned diag, bool pruneControlFlow = false) { 14203 DiagnoseImpCast(S, E, E->getType(), T, CContext, diag, pruneControlFlow); 14204 } 14205 14206 static bool isObjCSignedCharBool(Sema &S, QualType Ty) { 14207 return Ty->isSpecificBuiltinType(BuiltinType::SChar) && 14208 S.getLangOpts().ObjC && S.NSAPIObj->isObjCBOOLType(Ty); 14209 } 14210 14211 static void adornObjCBoolConversionDiagWithTernaryFixit( 14212 Sema &S, Expr *SourceExpr, const Sema::SemaDiagnosticBuilder &Builder) { 14213 Expr *Ignored = SourceExpr->IgnoreImplicit(); 14214 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(Ignored)) 14215 Ignored = OVE->getSourceExpr(); 14216 bool NeedsParens = isa<AbstractConditionalOperator>(Ignored) || 14217 isa<BinaryOperator>(Ignored) || 14218 isa<CXXOperatorCallExpr>(Ignored); 14219 SourceLocation EndLoc = S.getLocForEndOfToken(SourceExpr->getEndLoc()); 14220 if (NeedsParens) 14221 Builder << FixItHint::CreateInsertion(SourceExpr->getBeginLoc(), "(") 14222 << FixItHint::CreateInsertion(EndLoc, ")"); 14223 Builder << FixItHint::CreateInsertion(EndLoc, " ? YES : NO"); 14224 } 14225 14226 /// Diagnose an implicit cast from a floating point value to an integer value. 14227 static void DiagnoseFloatingImpCast(Sema &S, Expr *E, QualType T, 14228 SourceLocation CContext) { 14229 const bool IsBool = T->isSpecificBuiltinType(BuiltinType::Bool); 14230 const bool PruneWarnings = S.inTemplateInstantiation(); 14231 14232 Expr *InnerE = E->IgnoreParenImpCasts(); 14233 // We also want to warn on, e.g., "int i = -1.234" 14234 if (UnaryOperator *UOp = dyn_cast<UnaryOperator>(InnerE)) 14235 if (UOp->getOpcode() == UO_Minus || UOp->getOpcode() == UO_Plus) 14236 InnerE = UOp->getSubExpr()->IgnoreParenImpCasts(); 14237 14238 const bool IsLiteral = 14239 isa<FloatingLiteral>(E) || isa<FloatingLiteral>(InnerE); 14240 14241 llvm::APFloat Value(0.0); 14242 bool IsConstant = 14243 E->EvaluateAsFloat(Value, S.Context, Expr::SE_AllowSideEffects); 14244 if (!IsConstant) { 14245 if (isObjCSignedCharBool(S, T)) { 14246 return adornObjCBoolConversionDiagWithTernaryFixit( 14247 S, E, 14248 S.Diag(CContext, diag::warn_impcast_float_to_objc_signed_char_bool) 14249 << E->getType()); 14250 } 14251 14252 return DiagnoseImpCast(S, E, T, CContext, 14253 diag::warn_impcast_float_integer, PruneWarnings); 14254 } 14255 14256 bool isExact = false; 14257 14258 llvm::APSInt IntegerValue(S.Context.getIntWidth(T), 14259 T->hasUnsignedIntegerRepresentation()); 14260 llvm::APFloat::opStatus Result = Value.convertToInteger( 14261 IntegerValue, llvm::APFloat::rmTowardZero, &isExact); 14262 14263 // FIXME: Force the precision of the source value down so we don't print 14264 // digits which are usually useless (we don't really care here if we 14265 // truncate a digit by accident in edge cases). Ideally, APFloat::toString 14266 // would automatically print the shortest representation, but it's a bit 14267 // tricky to implement. 14268 SmallString<16> PrettySourceValue; 14269 unsigned precision = llvm::APFloat::semanticsPrecision(Value.getSemantics()); 14270 precision = (precision * 59 + 195) / 196; 14271 Value.toString(PrettySourceValue, precision); 14272 14273 if (isObjCSignedCharBool(S, T) && IntegerValue != 0 && IntegerValue != 1) { 14274 return adornObjCBoolConversionDiagWithTernaryFixit( 14275 S, E, 14276 S.Diag(CContext, diag::warn_impcast_constant_value_to_objc_bool) 14277 << PrettySourceValue); 14278 } 14279 14280 if (Result == llvm::APFloat::opOK && isExact) { 14281 if (IsLiteral) return; 14282 return DiagnoseImpCast(S, E, T, CContext, diag::warn_impcast_float_integer, 14283 PruneWarnings); 14284 } 14285 14286 // Conversion of a floating-point value to a non-bool integer where the 14287 // integral part cannot be represented by the integer type is undefined. 14288 if (!IsBool && Result == llvm::APFloat::opInvalidOp) 14289 return DiagnoseImpCast( 14290 S, E, T, CContext, 14291 IsLiteral ? diag::warn_impcast_literal_float_to_integer_out_of_range 14292 : diag::warn_impcast_float_to_integer_out_of_range, 14293 PruneWarnings); 14294 14295 unsigned DiagID = 0; 14296 if (IsLiteral) { 14297 // Warn on floating point literal to integer. 14298 DiagID = diag::warn_impcast_literal_float_to_integer; 14299 } else if (IntegerValue == 0) { 14300 if (Value.isZero()) { // Skip -0.0 to 0 conversion. 14301 return DiagnoseImpCast(S, E, T, CContext, 14302 diag::warn_impcast_float_integer, PruneWarnings); 14303 } 14304 // Warn on non-zero to zero conversion. 14305 DiagID = diag::warn_impcast_float_to_integer_zero; 14306 } else { 14307 if (IntegerValue.isUnsigned()) { 14308 if (!IntegerValue.isMaxValue()) { 14309 return DiagnoseImpCast(S, E, T, CContext, 14310 diag::warn_impcast_float_integer, PruneWarnings); 14311 } 14312 } else { // IntegerValue.isSigned() 14313 if (!IntegerValue.isMaxSignedValue() && 14314 !IntegerValue.isMinSignedValue()) { 14315 return DiagnoseImpCast(S, E, T, CContext, 14316 diag::warn_impcast_float_integer, PruneWarnings); 14317 } 14318 } 14319 // Warn on evaluatable floating point expression to integer conversion. 14320 DiagID = diag::warn_impcast_float_to_integer; 14321 } 14322 14323 SmallString<16> PrettyTargetValue; 14324 if (IsBool) 14325 PrettyTargetValue = Value.isZero() ? "false" : "true"; 14326 else 14327 IntegerValue.toString(PrettyTargetValue); 14328 14329 if (PruneWarnings) { 14330 S.DiagRuntimeBehavior(E->getExprLoc(), E, 14331 S.PDiag(DiagID) 14332 << E->getType() << T.getUnqualifiedType() 14333 << PrettySourceValue << PrettyTargetValue 14334 << E->getSourceRange() << SourceRange(CContext)); 14335 } else { 14336 S.Diag(E->getExprLoc(), DiagID) 14337 << E->getType() << T.getUnqualifiedType() << PrettySourceValue 14338 << PrettyTargetValue << E->getSourceRange() << SourceRange(CContext); 14339 } 14340 } 14341 14342 /// Analyze the given compound assignment for the possible losing of 14343 /// floating-point precision. 14344 static void AnalyzeCompoundAssignment(Sema &S, BinaryOperator *E) { 14345 assert(isa<CompoundAssignOperator>(E) && 14346 "Must be compound assignment operation"); 14347 // Recurse on the LHS and RHS in here 14348 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 14349 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 14350 14351 if (E->getLHS()->getType()->isAtomicType()) 14352 S.Diag(E->getOperatorLoc(), diag::warn_atomic_implicit_seq_cst); 14353 14354 // Now check the outermost expression 14355 const auto *ResultBT = E->getLHS()->getType()->getAs<BuiltinType>(); 14356 const auto *RBT = cast<CompoundAssignOperator>(E) 14357 ->getComputationResultType() 14358 ->getAs<BuiltinType>(); 14359 14360 // The below checks assume source is floating point. 14361 if (!ResultBT || !RBT || !RBT->isFloatingPoint()) return; 14362 14363 // If source is floating point but target is an integer. 14364 if (ResultBT->isInteger()) 14365 return DiagnoseImpCast(S, E, E->getRHS()->getType(), E->getLHS()->getType(), 14366 E->getExprLoc(), diag::warn_impcast_float_integer); 14367 14368 if (!ResultBT->isFloatingPoint()) 14369 return; 14370 14371 // If both source and target are floating points, warn about losing precision. 14372 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 14373 QualType(ResultBT, 0), QualType(RBT, 0)); 14374 if (Order < 0 && !S.SourceMgr.isInSystemMacro(E->getOperatorLoc())) 14375 // warn about dropping FP rank. 14376 DiagnoseImpCast(S, E->getRHS(), E->getLHS()->getType(), E->getOperatorLoc(), 14377 diag::warn_impcast_float_result_precision); 14378 } 14379 14380 static std::string PrettyPrintInRange(const llvm::APSInt &Value, 14381 IntRange Range) { 14382 if (!Range.Width) return "0"; 14383 14384 llvm::APSInt ValueInRange = Value; 14385 ValueInRange.setIsSigned(!Range.NonNegative); 14386 ValueInRange = ValueInRange.trunc(Range.Width); 14387 return toString(ValueInRange, 10); 14388 } 14389 14390 static bool IsImplicitBoolFloatConversion(Sema &S, Expr *Ex, bool ToBool) { 14391 if (!isa<ImplicitCastExpr>(Ex)) 14392 return false; 14393 14394 Expr *InnerE = Ex->IgnoreParenImpCasts(); 14395 const Type *Target = S.Context.getCanonicalType(Ex->getType()).getTypePtr(); 14396 const Type *Source = 14397 S.Context.getCanonicalType(InnerE->getType()).getTypePtr(); 14398 if (Target->isDependentType()) 14399 return false; 14400 14401 const BuiltinType *FloatCandidateBT = 14402 dyn_cast<BuiltinType>(ToBool ? Source : Target); 14403 const Type *BoolCandidateType = ToBool ? Target : Source; 14404 14405 return (BoolCandidateType->isSpecificBuiltinType(BuiltinType::Bool) && 14406 FloatCandidateBT && (FloatCandidateBT->isFloatingPoint())); 14407 } 14408 14409 static void CheckImplicitArgumentConversions(Sema &S, CallExpr *TheCall, 14410 SourceLocation CC) { 14411 unsigned NumArgs = TheCall->getNumArgs(); 14412 for (unsigned i = 0; i < NumArgs; ++i) { 14413 Expr *CurrA = TheCall->getArg(i); 14414 if (!IsImplicitBoolFloatConversion(S, CurrA, true)) 14415 continue; 14416 14417 bool IsSwapped = ((i > 0) && 14418 IsImplicitBoolFloatConversion(S, TheCall->getArg(i - 1), false)); 14419 IsSwapped |= ((i < (NumArgs - 1)) && 14420 IsImplicitBoolFloatConversion(S, TheCall->getArg(i + 1), false)); 14421 if (IsSwapped) { 14422 // Warn on this floating-point to bool conversion. 14423 DiagnoseImpCast(S, CurrA->IgnoreParenImpCasts(), 14424 CurrA->getType(), CC, 14425 diag::warn_impcast_floating_point_to_bool); 14426 } 14427 } 14428 } 14429 14430 static void DiagnoseNullConversion(Sema &S, Expr *E, QualType T, 14431 SourceLocation CC) { 14432 if (S.Diags.isIgnored(diag::warn_impcast_null_pointer_to_integer, 14433 E->getExprLoc())) 14434 return; 14435 14436 // Don't warn on functions which have return type nullptr_t. 14437 if (isa<CallExpr>(E)) 14438 return; 14439 14440 // Check for NULL (GNUNull) or nullptr (CXX11_nullptr). 14441 const Expr *NewE = E->IgnoreParenImpCasts(); 14442 bool IsGNUNullExpr = isa<GNUNullExpr>(NewE); 14443 bool HasNullPtrType = NewE->getType()->isNullPtrType(); 14444 if (!IsGNUNullExpr && !HasNullPtrType) 14445 return; 14446 14447 // Return if target type is a safe conversion. 14448 if (T->isAnyPointerType() || T->isBlockPointerType() || 14449 T->isMemberPointerType() || !T->isScalarType() || T->isNullPtrType()) 14450 return; 14451 14452 SourceLocation Loc = E->getSourceRange().getBegin(); 14453 14454 // Venture through the macro stacks to get to the source of macro arguments. 14455 // The new location is a better location than the complete location that was 14456 // passed in. 14457 Loc = S.SourceMgr.getTopMacroCallerLoc(Loc); 14458 CC = S.SourceMgr.getTopMacroCallerLoc(CC); 14459 14460 // __null is usually wrapped in a macro. Go up a macro if that is the case. 14461 if (IsGNUNullExpr && Loc.isMacroID()) { 14462 StringRef MacroName = Lexer::getImmediateMacroNameForDiagnostics( 14463 Loc, S.SourceMgr, S.getLangOpts()); 14464 if (MacroName == "NULL") 14465 Loc = S.SourceMgr.getImmediateExpansionRange(Loc).getBegin(); 14466 } 14467 14468 // Only warn if the null and context location are in the same macro expansion. 14469 if (S.SourceMgr.getFileID(Loc) != S.SourceMgr.getFileID(CC)) 14470 return; 14471 14472 S.Diag(Loc, diag::warn_impcast_null_pointer_to_integer) 14473 << HasNullPtrType << T << SourceRange(CC) 14474 << FixItHint::CreateReplacement(Loc, 14475 S.getFixItZeroLiteralForType(T, Loc)); 14476 } 14477 14478 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 14479 ObjCArrayLiteral *ArrayLiteral); 14480 14481 static void 14482 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 14483 ObjCDictionaryLiteral *DictionaryLiteral); 14484 14485 /// Check a single element within a collection literal against the 14486 /// target element type. 14487 static void checkObjCCollectionLiteralElement(Sema &S, 14488 QualType TargetElementType, 14489 Expr *Element, 14490 unsigned ElementKind) { 14491 // Skip a bitcast to 'id' or qualified 'id'. 14492 if (auto ICE = dyn_cast<ImplicitCastExpr>(Element)) { 14493 if (ICE->getCastKind() == CK_BitCast && 14494 ICE->getSubExpr()->getType()->getAs<ObjCObjectPointerType>()) 14495 Element = ICE->getSubExpr(); 14496 } 14497 14498 QualType ElementType = Element->getType(); 14499 ExprResult ElementResult(Element); 14500 if (ElementType->getAs<ObjCObjectPointerType>() && 14501 S.CheckSingleAssignmentConstraints(TargetElementType, 14502 ElementResult, 14503 false, false) 14504 != Sema::Compatible) { 14505 S.Diag(Element->getBeginLoc(), diag::warn_objc_collection_literal_element) 14506 << ElementType << ElementKind << TargetElementType 14507 << Element->getSourceRange(); 14508 } 14509 14510 if (auto ArrayLiteral = dyn_cast<ObjCArrayLiteral>(Element)) 14511 checkObjCArrayLiteral(S, TargetElementType, ArrayLiteral); 14512 else if (auto DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(Element)) 14513 checkObjCDictionaryLiteral(S, TargetElementType, DictionaryLiteral); 14514 } 14515 14516 /// Check an Objective-C array literal being converted to the given 14517 /// target type. 14518 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 14519 ObjCArrayLiteral *ArrayLiteral) { 14520 if (!S.NSArrayDecl) 14521 return; 14522 14523 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 14524 if (!TargetObjCPtr) 14525 return; 14526 14527 if (TargetObjCPtr->isUnspecialized() || 14528 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 14529 != S.NSArrayDecl->getCanonicalDecl()) 14530 return; 14531 14532 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 14533 if (TypeArgs.size() != 1) 14534 return; 14535 14536 QualType TargetElementType = TypeArgs[0]; 14537 for (unsigned I = 0, N = ArrayLiteral->getNumElements(); I != N; ++I) { 14538 checkObjCCollectionLiteralElement(S, TargetElementType, 14539 ArrayLiteral->getElement(I), 14540 0); 14541 } 14542 } 14543 14544 /// Check an Objective-C dictionary literal being converted to the given 14545 /// target type. 14546 static void 14547 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 14548 ObjCDictionaryLiteral *DictionaryLiteral) { 14549 if (!S.NSDictionaryDecl) 14550 return; 14551 14552 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 14553 if (!TargetObjCPtr) 14554 return; 14555 14556 if (TargetObjCPtr->isUnspecialized() || 14557 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 14558 != S.NSDictionaryDecl->getCanonicalDecl()) 14559 return; 14560 14561 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 14562 if (TypeArgs.size() != 2) 14563 return; 14564 14565 QualType TargetKeyType = TypeArgs[0]; 14566 QualType TargetObjectType = TypeArgs[1]; 14567 for (unsigned I = 0, N = DictionaryLiteral->getNumElements(); I != N; ++I) { 14568 auto Element = DictionaryLiteral->getKeyValueElement(I); 14569 checkObjCCollectionLiteralElement(S, TargetKeyType, Element.Key, 1); 14570 checkObjCCollectionLiteralElement(S, TargetObjectType, Element.Value, 2); 14571 } 14572 } 14573 14574 // Helper function to filter out cases for constant width constant conversion. 14575 // Don't warn on char array initialization or for non-decimal values. 14576 static bool isSameWidthConstantConversion(Sema &S, Expr *E, QualType T, 14577 SourceLocation CC) { 14578 // If initializing from a constant, and the constant starts with '0', 14579 // then it is a binary, octal, or hexadecimal. Allow these constants 14580 // to fill all the bits, even if there is a sign change. 14581 if (auto *IntLit = dyn_cast<IntegerLiteral>(E->IgnoreParenImpCasts())) { 14582 const char FirstLiteralCharacter = 14583 S.getSourceManager().getCharacterData(IntLit->getBeginLoc())[0]; 14584 if (FirstLiteralCharacter == '0') 14585 return false; 14586 } 14587 14588 // If the CC location points to a '{', and the type is char, then assume 14589 // assume it is an array initialization. 14590 if (CC.isValid() && T->isCharType()) { 14591 const char FirstContextCharacter = 14592 S.getSourceManager().getCharacterData(CC)[0]; 14593 if (FirstContextCharacter == '{') 14594 return false; 14595 } 14596 14597 return true; 14598 } 14599 14600 static const IntegerLiteral *getIntegerLiteral(Expr *E) { 14601 const auto *IL = dyn_cast<IntegerLiteral>(E); 14602 if (!IL) { 14603 if (auto *UO = dyn_cast<UnaryOperator>(E)) { 14604 if (UO->getOpcode() == UO_Minus) 14605 return dyn_cast<IntegerLiteral>(UO->getSubExpr()); 14606 } 14607 } 14608 14609 return IL; 14610 } 14611 14612 static void DiagnoseIntInBoolContext(Sema &S, Expr *E) { 14613 E = E->IgnoreParenImpCasts(); 14614 SourceLocation ExprLoc = E->getExprLoc(); 14615 14616 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 14617 BinaryOperator::Opcode Opc = BO->getOpcode(); 14618 Expr::EvalResult Result; 14619 // Do not diagnose unsigned shifts. 14620 if (Opc == BO_Shl) { 14621 const auto *LHS = getIntegerLiteral(BO->getLHS()); 14622 const auto *RHS = getIntegerLiteral(BO->getRHS()); 14623 if (LHS && LHS->getValue() == 0) 14624 S.Diag(ExprLoc, diag::warn_left_shift_always) << 0; 14625 else if (!E->isValueDependent() && LHS && RHS && 14626 RHS->getValue().isNonNegative() && 14627 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) 14628 S.Diag(ExprLoc, diag::warn_left_shift_always) 14629 << (Result.Val.getInt() != 0); 14630 else if (E->getType()->isSignedIntegerType()) 14631 S.Diag(ExprLoc, diag::warn_left_shift_in_bool_context) << E; 14632 } 14633 } 14634 14635 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 14636 const auto *LHS = getIntegerLiteral(CO->getTrueExpr()); 14637 const auto *RHS = getIntegerLiteral(CO->getFalseExpr()); 14638 if (!LHS || !RHS) 14639 return; 14640 if ((LHS->getValue() == 0 || LHS->getValue() == 1) && 14641 (RHS->getValue() == 0 || RHS->getValue() == 1)) 14642 // Do not diagnose common idioms. 14643 return; 14644 if (LHS->getValue() != 0 && RHS->getValue() != 0) 14645 S.Diag(ExprLoc, diag::warn_integer_constants_in_conditional_always_true); 14646 } 14647 } 14648 14649 static void CheckImplicitConversion(Sema &S, Expr *E, QualType T, 14650 SourceLocation CC, 14651 bool *ICContext = nullptr, 14652 bool IsListInit = false) { 14653 if (E->isTypeDependent() || E->isValueDependent()) return; 14654 14655 const Type *Source = S.Context.getCanonicalType(E->getType()).getTypePtr(); 14656 const Type *Target = S.Context.getCanonicalType(T).getTypePtr(); 14657 if (Source == Target) return; 14658 if (Target->isDependentType()) return; 14659 14660 // If the conversion context location is invalid don't complain. We also 14661 // don't want to emit a warning if the issue occurs from the expansion of 14662 // a system macro. The problem is that 'getSpellingLoc()' is slow, so we 14663 // delay this check as long as possible. Once we detect we are in that 14664 // scenario, we just return. 14665 if (CC.isInvalid()) 14666 return; 14667 14668 if (Source->isAtomicType()) 14669 S.Diag(E->getExprLoc(), diag::warn_atomic_implicit_seq_cst); 14670 14671 // Diagnose implicit casts to bool. 14672 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) { 14673 if (isa<StringLiteral>(E)) 14674 // Warn on string literal to bool. Checks for string literals in logical 14675 // and expressions, for instance, assert(0 && "error here"), are 14676 // prevented by a check in AnalyzeImplicitConversions(). 14677 return DiagnoseImpCast(S, E, T, CC, 14678 diag::warn_impcast_string_literal_to_bool); 14679 if (isa<ObjCStringLiteral>(E) || isa<ObjCArrayLiteral>(E) || 14680 isa<ObjCDictionaryLiteral>(E) || isa<ObjCBoxedExpr>(E)) { 14681 // This covers the literal expressions that evaluate to Objective-C 14682 // objects. 14683 return DiagnoseImpCast(S, E, T, CC, 14684 diag::warn_impcast_objective_c_literal_to_bool); 14685 } 14686 if (Source->isPointerType() || Source->canDecayToPointerType()) { 14687 // Warn on pointer to bool conversion that is always true. 14688 S.DiagnoseAlwaysNonNullPointer(E, Expr::NPCK_NotNull, /*IsEqual*/ false, 14689 SourceRange(CC)); 14690 } 14691 } 14692 14693 // If the we're converting a constant to an ObjC BOOL on a platform where BOOL 14694 // is a typedef for signed char (macOS), then that constant value has to be 1 14695 // or 0. 14696 if (isObjCSignedCharBool(S, T) && Source->isIntegralType(S.Context)) { 14697 Expr::EvalResult Result; 14698 if (E->EvaluateAsInt(Result, S.getASTContext(), 14699 Expr::SE_AllowSideEffects)) { 14700 if (Result.Val.getInt() != 1 && Result.Val.getInt() != 0) { 14701 adornObjCBoolConversionDiagWithTernaryFixit( 14702 S, E, 14703 S.Diag(CC, diag::warn_impcast_constant_value_to_objc_bool) 14704 << toString(Result.Val.getInt(), 10)); 14705 } 14706 return; 14707 } 14708 } 14709 14710 // Check implicit casts from Objective-C collection literals to specialized 14711 // collection types, e.g., NSArray<NSString *> *. 14712 if (auto *ArrayLiteral = dyn_cast<ObjCArrayLiteral>(E)) 14713 checkObjCArrayLiteral(S, QualType(Target, 0), ArrayLiteral); 14714 else if (auto *DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(E)) 14715 checkObjCDictionaryLiteral(S, QualType(Target, 0), DictionaryLiteral); 14716 14717 // Strip vector types. 14718 if (isa<VectorType>(Source)) { 14719 if (Target->isVLSTBuiltinType() && 14720 (S.Context.areCompatibleSveTypes(QualType(Target, 0), 14721 QualType(Source, 0)) || 14722 S.Context.areLaxCompatibleSveTypes(QualType(Target, 0), 14723 QualType(Source, 0)))) 14724 return; 14725 14726 if (Target->isRVVVLSBuiltinType() && 14727 (S.Context.areCompatibleRVVTypes(QualType(Target, 0), 14728 QualType(Source, 0)) || 14729 S.Context.areLaxCompatibleRVVTypes(QualType(Target, 0), 14730 QualType(Source, 0)))) 14731 return; 14732 14733 if (!isa<VectorType>(Target)) { 14734 if (S.SourceMgr.isInSystemMacro(CC)) 14735 return; 14736 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_vector_scalar); 14737 } 14738 14739 // If the vector cast is cast between two vectors of the same size, it is 14740 // a bitcast, not a conversion. 14741 if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target)) 14742 return; 14743 14744 Source = cast<VectorType>(Source)->getElementType().getTypePtr(); 14745 Target = cast<VectorType>(Target)->getElementType().getTypePtr(); 14746 } 14747 if (auto VecTy = dyn_cast<VectorType>(Target)) 14748 Target = VecTy->getElementType().getTypePtr(); 14749 14750 // Strip complex types. 14751 if (isa<ComplexType>(Source)) { 14752 if (!isa<ComplexType>(Target)) { 14753 if (S.SourceMgr.isInSystemMacro(CC) || Target->isBooleanType()) 14754 return; 14755 14756 return DiagnoseImpCast(S, E, T, CC, 14757 S.getLangOpts().CPlusPlus 14758 ? diag::err_impcast_complex_scalar 14759 : diag::warn_impcast_complex_scalar); 14760 } 14761 14762 Source = cast<ComplexType>(Source)->getElementType().getTypePtr(); 14763 Target = cast<ComplexType>(Target)->getElementType().getTypePtr(); 14764 } 14765 14766 const BuiltinType *SourceBT = dyn_cast<BuiltinType>(Source); 14767 const BuiltinType *TargetBT = dyn_cast<BuiltinType>(Target); 14768 14769 // Strip SVE vector types 14770 if (SourceBT && SourceBT->isVLSTBuiltinType()) { 14771 // Need the original target type for vector type checks 14772 const Type *OriginalTarget = S.Context.getCanonicalType(T).getTypePtr(); 14773 // Handle conversion from scalable to fixed when msve-vector-bits is 14774 // specified 14775 if (S.Context.areCompatibleSveTypes(QualType(OriginalTarget, 0), 14776 QualType(Source, 0)) || 14777 S.Context.areLaxCompatibleSveTypes(QualType(OriginalTarget, 0), 14778 QualType(Source, 0))) 14779 return; 14780 14781 // If the vector cast is cast between two vectors of the same size, it is 14782 // a bitcast, not a conversion. 14783 if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target)) 14784 return; 14785 14786 Source = SourceBT->getSveEltType(S.Context).getTypePtr(); 14787 } 14788 14789 if (TargetBT && TargetBT->isVLSTBuiltinType()) 14790 Target = TargetBT->getSveEltType(S.Context).getTypePtr(); 14791 14792 // If the source is floating point... 14793 if (SourceBT && SourceBT->isFloatingPoint()) { 14794 // ...and the target is floating point... 14795 if (TargetBT && TargetBT->isFloatingPoint()) { 14796 // ...then warn if we're dropping FP rank. 14797 14798 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 14799 QualType(SourceBT, 0), QualType(TargetBT, 0)); 14800 if (Order > 0) { 14801 // Don't warn about float constants that are precisely 14802 // representable in the target type. 14803 Expr::EvalResult result; 14804 if (E->EvaluateAsRValue(result, S.Context)) { 14805 // Value might be a float, a float vector, or a float complex. 14806 if (IsSameFloatAfterCast(result.Val, 14807 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0)), 14808 S.Context.getFloatTypeSemantics(QualType(SourceBT, 0)))) 14809 return; 14810 } 14811 14812 if (S.SourceMgr.isInSystemMacro(CC)) 14813 return; 14814 14815 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_float_precision); 14816 } 14817 // ... or possibly if we're increasing rank, too 14818 else if (Order < 0) { 14819 if (S.SourceMgr.isInSystemMacro(CC)) 14820 return; 14821 14822 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_double_promotion); 14823 } 14824 return; 14825 } 14826 14827 // If the target is integral, always warn. 14828 if (TargetBT && TargetBT->isInteger()) { 14829 if (S.SourceMgr.isInSystemMacro(CC)) 14830 return; 14831 14832 DiagnoseFloatingImpCast(S, E, T, CC); 14833 } 14834 14835 // Detect the case where a call result is converted from floating-point to 14836 // to bool, and the final argument to the call is converted from bool, to 14837 // discover this typo: 14838 // 14839 // bool b = fabs(x < 1.0); // should be "bool b = fabs(x) < 1.0;" 14840 // 14841 // FIXME: This is an incredibly special case; is there some more general 14842 // way to detect this class of misplaced-parentheses bug? 14843 if (Target->isBooleanType() && isa<CallExpr>(E)) { 14844 // Check last argument of function call to see if it is an 14845 // implicit cast from a type matching the type the result 14846 // is being cast to. 14847 CallExpr *CEx = cast<CallExpr>(E); 14848 if (unsigned NumArgs = CEx->getNumArgs()) { 14849 Expr *LastA = CEx->getArg(NumArgs - 1); 14850 Expr *InnerE = LastA->IgnoreParenImpCasts(); 14851 if (isa<ImplicitCastExpr>(LastA) && 14852 InnerE->getType()->isBooleanType()) { 14853 // Warn on this floating-point to bool conversion 14854 DiagnoseImpCast(S, E, T, CC, 14855 diag::warn_impcast_floating_point_to_bool); 14856 } 14857 } 14858 } 14859 return; 14860 } 14861 14862 // Valid casts involving fixed point types should be accounted for here. 14863 if (Source->isFixedPointType()) { 14864 if (Target->isUnsaturatedFixedPointType()) { 14865 Expr::EvalResult Result; 14866 if (E->EvaluateAsFixedPoint(Result, S.Context, Expr::SE_AllowSideEffects, 14867 S.isConstantEvaluated())) { 14868 llvm::APFixedPoint Value = Result.Val.getFixedPoint(); 14869 llvm::APFixedPoint MaxVal = S.Context.getFixedPointMax(T); 14870 llvm::APFixedPoint MinVal = S.Context.getFixedPointMin(T); 14871 if (Value > MaxVal || Value < MinVal) { 14872 S.DiagRuntimeBehavior(E->getExprLoc(), E, 14873 S.PDiag(diag::warn_impcast_fixed_point_range) 14874 << Value.toString() << T 14875 << E->getSourceRange() 14876 << clang::SourceRange(CC)); 14877 return; 14878 } 14879 } 14880 } else if (Target->isIntegerType()) { 14881 Expr::EvalResult Result; 14882 if (!S.isConstantEvaluated() && 14883 E->EvaluateAsFixedPoint(Result, S.Context, 14884 Expr::SE_AllowSideEffects)) { 14885 llvm::APFixedPoint FXResult = Result.Val.getFixedPoint(); 14886 14887 bool Overflowed; 14888 llvm::APSInt IntResult = FXResult.convertToInt( 14889 S.Context.getIntWidth(T), 14890 Target->isSignedIntegerOrEnumerationType(), &Overflowed); 14891 14892 if (Overflowed) { 14893 S.DiagRuntimeBehavior(E->getExprLoc(), E, 14894 S.PDiag(diag::warn_impcast_fixed_point_range) 14895 << FXResult.toString() << T 14896 << E->getSourceRange() 14897 << clang::SourceRange(CC)); 14898 return; 14899 } 14900 } 14901 } 14902 } else if (Target->isUnsaturatedFixedPointType()) { 14903 if (Source->isIntegerType()) { 14904 Expr::EvalResult Result; 14905 if (!S.isConstantEvaluated() && 14906 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) { 14907 llvm::APSInt Value = Result.Val.getInt(); 14908 14909 bool Overflowed; 14910 llvm::APFixedPoint IntResult = llvm::APFixedPoint::getFromIntValue( 14911 Value, S.Context.getFixedPointSemantics(T), &Overflowed); 14912 14913 if (Overflowed) { 14914 S.DiagRuntimeBehavior(E->getExprLoc(), E, 14915 S.PDiag(diag::warn_impcast_fixed_point_range) 14916 << toString(Value, /*Radix=*/10) << T 14917 << E->getSourceRange() 14918 << clang::SourceRange(CC)); 14919 return; 14920 } 14921 } 14922 } 14923 } 14924 14925 // If we are casting an integer type to a floating point type without 14926 // initialization-list syntax, we might lose accuracy if the floating 14927 // point type has a narrower significand than the integer type. 14928 if (SourceBT && TargetBT && SourceBT->isIntegerType() && 14929 TargetBT->isFloatingType() && !IsListInit) { 14930 // Determine the number of precision bits in the source integer type. 14931 IntRange SourceRange = GetExprRange(S.Context, E, S.isConstantEvaluated(), 14932 /*Approximate*/ true); 14933 unsigned int SourcePrecision = SourceRange.Width; 14934 14935 // Determine the number of precision bits in the 14936 // target floating point type. 14937 unsigned int TargetPrecision = llvm::APFloatBase::semanticsPrecision( 14938 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0))); 14939 14940 if (SourcePrecision > 0 && TargetPrecision > 0 && 14941 SourcePrecision > TargetPrecision) { 14942 14943 if (std::optional<llvm::APSInt> SourceInt = 14944 E->getIntegerConstantExpr(S.Context)) { 14945 // If the source integer is a constant, convert it to the target 14946 // floating point type. Issue a warning if the value changes 14947 // during the whole conversion. 14948 llvm::APFloat TargetFloatValue( 14949 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0))); 14950 llvm::APFloat::opStatus ConversionStatus = 14951 TargetFloatValue.convertFromAPInt( 14952 *SourceInt, SourceBT->isSignedInteger(), 14953 llvm::APFloat::rmNearestTiesToEven); 14954 14955 if (ConversionStatus != llvm::APFloat::opOK) { 14956 SmallString<32> PrettySourceValue; 14957 SourceInt->toString(PrettySourceValue, 10); 14958 SmallString<32> PrettyTargetValue; 14959 TargetFloatValue.toString(PrettyTargetValue, TargetPrecision); 14960 14961 S.DiagRuntimeBehavior( 14962 E->getExprLoc(), E, 14963 S.PDiag(diag::warn_impcast_integer_float_precision_constant) 14964 << PrettySourceValue << PrettyTargetValue << E->getType() << T 14965 << E->getSourceRange() << clang::SourceRange(CC)); 14966 } 14967 } else { 14968 // Otherwise, the implicit conversion may lose precision. 14969 DiagnoseImpCast(S, E, T, CC, 14970 diag::warn_impcast_integer_float_precision); 14971 } 14972 } 14973 } 14974 14975 DiagnoseNullConversion(S, E, T, CC); 14976 14977 S.DiscardMisalignedMemberAddress(Target, E); 14978 14979 if (Target->isBooleanType()) 14980 DiagnoseIntInBoolContext(S, E); 14981 14982 if (!Source->isIntegerType() || !Target->isIntegerType()) 14983 return; 14984 14985 // TODO: remove this early return once the false positives for constant->bool 14986 // in templates, macros, etc, are reduced or removed. 14987 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) 14988 return; 14989 14990 if (isObjCSignedCharBool(S, T) && !Source->isCharType() && 14991 !E->isKnownToHaveBooleanValue(/*Semantic=*/false)) { 14992 return adornObjCBoolConversionDiagWithTernaryFixit( 14993 S, E, 14994 S.Diag(CC, diag::warn_impcast_int_to_objc_signed_char_bool) 14995 << E->getType()); 14996 } 14997 14998 IntRange SourceTypeRange = 14999 IntRange::forTargetOfCanonicalType(S.Context, Source); 15000 IntRange LikelySourceRange = 15001 GetExprRange(S.Context, E, S.isConstantEvaluated(), /*Approximate*/ true); 15002 IntRange TargetRange = IntRange::forTargetOfCanonicalType(S.Context, Target); 15003 15004 if (LikelySourceRange.Width > TargetRange.Width) { 15005 // If the source is a constant, use a default-on diagnostic. 15006 // TODO: this should happen for bitfield stores, too. 15007 Expr::EvalResult Result; 15008 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects, 15009 S.isConstantEvaluated())) { 15010 llvm::APSInt Value(32); 15011 Value = Result.Val.getInt(); 15012 15013 if (S.SourceMgr.isInSystemMacro(CC)) 15014 return; 15015 15016 std::string PrettySourceValue = toString(Value, 10); 15017 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 15018 15019 S.DiagRuntimeBehavior( 15020 E->getExprLoc(), E, 15021 S.PDiag(diag::warn_impcast_integer_precision_constant) 15022 << PrettySourceValue << PrettyTargetValue << E->getType() << T 15023 << E->getSourceRange() << SourceRange(CC)); 15024 return; 15025 } 15026 15027 // People want to build with -Wshorten-64-to-32 and not -Wconversion. 15028 if (S.SourceMgr.isInSystemMacro(CC)) 15029 return; 15030 15031 if (TargetRange.Width == 32 && S.Context.getIntWidth(E->getType()) == 64) 15032 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_64_32, 15033 /* pruneControlFlow */ true); 15034 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_precision); 15035 } 15036 15037 if (TargetRange.Width > SourceTypeRange.Width) { 15038 if (auto *UO = dyn_cast<UnaryOperator>(E)) 15039 if (UO->getOpcode() == UO_Minus) 15040 if (Source->isUnsignedIntegerType()) { 15041 if (Target->isUnsignedIntegerType()) 15042 return DiagnoseImpCast(S, E, T, CC, 15043 diag::warn_impcast_high_order_zero_bits); 15044 if (Target->isSignedIntegerType()) 15045 return DiagnoseImpCast(S, E, T, CC, 15046 diag::warn_impcast_nonnegative_result); 15047 } 15048 } 15049 15050 if (TargetRange.Width == LikelySourceRange.Width && 15051 !TargetRange.NonNegative && LikelySourceRange.NonNegative && 15052 Source->isSignedIntegerType()) { 15053 // Warn when doing a signed to signed conversion, warn if the positive 15054 // source value is exactly the width of the target type, which will 15055 // cause a negative value to be stored. 15056 15057 Expr::EvalResult Result; 15058 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects) && 15059 !S.SourceMgr.isInSystemMacro(CC)) { 15060 llvm::APSInt Value = Result.Val.getInt(); 15061 if (isSameWidthConstantConversion(S, E, T, CC)) { 15062 std::string PrettySourceValue = toString(Value, 10); 15063 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 15064 15065 S.DiagRuntimeBehavior( 15066 E->getExprLoc(), E, 15067 S.PDiag(diag::warn_impcast_integer_precision_constant) 15068 << PrettySourceValue << PrettyTargetValue << E->getType() << T 15069 << E->getSourceRange() << SourceRange(CC)); 15070 return; 15071 } 15072 } 15073 15074 // Fall through for non-constants to give a sign conversion warning. 15075 } 15076 15077 if ((!isa<EnumType>(Target) || !isa<EnumType>(Source)) && 15078 ((TargetRange.NonNegative && !LikelySourceRange.NonNegative) || 15079 (!TargetRange.NonNegative && LikelySourceRange.NonNegative && 15080 LikelySourceRange.Width == TargetRange.Width))) { 15081 if (S.SourceMgr.isInSystemMacro(CC)) 15082 return; 15083 15084 if (SourceBT && SourceBT->isInteger() && TargetBT && 15085 TargetBT->isInteger() && 15086 Source->isSignedIntegerType() == Target->isSignedIntegerType()) { 15087 return; 15088 } 15089 15090 unsigned DiagID = diag::warn_impcast_integer_sign; 15091 15092 // Traditionally, gcc has warned about this under -Wsign-compare. 15093 // We also want to warn about it in -Wconversion. 15094 // So if -Wconversion is off, use a completely identical diagnostic 15095 // in the sign-compare group. 15096 // The conditional-checking code will 15097 if (ICContext) { 15098 DiagID = diag::warn_impcast_integer_sign_conditional; 15099 *ICContext = true; 15100 } 15101 15102 return DiagnoseImpCast(S, E, T, CC, DiagID); 15103 } 15104 15105 // Diagnose conversions between different enumeration types. 15106 // In C, we pretend that the type of an EnumConstantDecl is its enumeration 15107 // type, to give us better diagnostics. 15108 QualType SourceType = E->getType(); 15109 if (!S.getLangOpts().CPlusPlus) { 15110 if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 15111 if (EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(DRE->getDecl())) { 15112 EnumDecl *Enum = cast<EnumDecl>(ECD->getDeclContext()); 15113 SourceType = S.Context.getTypeDeclType(Enum); 15114 Source = S.Context.getCanonicalType(SourceType).getTypePtr(); 15115 } 15116 } 15117 15118 if (const EnumType *SourceEnum = Source->getAs<EnumType>()) 15119 if (const EnumType *TargetEnum = Target->getAs<EnumType>()) 15120 if (SourceEnum->getDecl()->hasNameForLinkage() && 15121 TargetEnum->getDecl()->hasNameForLinkage() && 15122 SourceEnum != TargetEnum) { 15123 if (S.SourceMgr.isInSystemMacro(CC)) 15124 return; 15125 15126 return DiagnoseImpCast(S, E, SourceType, T, CC, 15127 diag::warn_impcast_different_enum_types); 15128 } 15129 } 15130 15131 static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E, 15132 SourceLocation CC, QualType T); 15133 15134 static void CheckConditionalOperand(Sema &S, Expr *E, QualType T, 15135 SourceLocation CC, bool &ICContext) { 15136 E = E->IgnoreParenImpCasts(); 15137 // Diagnose incomplete type for second or third operand in C. 15138 if (!S.getLangOpts().CPlusPlus && E->getType()->isRecordType()) 15139 S.RequireCompleteExprType(E, diag::err_incomplete_type); 15140 15141 if (auto *CO = dyn_cast<AbstractConditionalOperator>(E)) 15142 return CheckConditionalOperator(S, CO, CC, T); 15143 15144 AnalyzeImplicitConversions(S, E, CC); 15145 if (E->getType() != T) 15146 return CheckImplicitConversion(S, E, T, CC, &ICContext); 15147 } 15148 15149 static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E, 15150 SourceLocation CC, QualType T) { 15151 AnalyzeImplicitConversions(S, E->getCond(), E->getQuestionLoc()); 15152 15153 Expr *TrueExpr = E->getTrueExpr(); 15154 if (auto *BCO = dyn_cast<BinaryConditionalOperator>(E)) 15155 TrueExpr = BCO->getCommon(); 15156 15157 bool Suspicious = false; 15158 CheckConditionalOperand(S, TrueExpr, T, CC, Suspicious); 15159 CheckConditionalOperand(S, E->getFalseExpr(), T, CC, Suspicious); 15160 15161 if (T->isBooleanType()) 15162 DiagnoseIntInBoolContext(S, E); 15163 15164 // If -Wconversion would have warned about either of the candidates 15165 // for a signedness conversion to the context type... 15166 if (!Suspicious) return; 15167 15168 // ...but it's currently ignored... 15169 if (!S.Diags.isIgnored(diag::warn_impcast_integer_sign_conditional, CC)) 15170 return; 15171 15172 // ...then check whether it would have warned about either of the 15173 // candidates for a signedness conversion to the condition type. 15174 if (E->getType() == T) return; 15175 15176 Suspicious = false; 15177 CheckImplicitConversion(S, TrueExpr->IgnoreParenImpCasts(), 15178 E->getType(), CC, &Suspicious); 15179 if (!Suspicious) 15180 CheckImplicitConversion(S, E->getFalseExpr()->IgnoreParenImpCasts(), 15181 E->getType(), CC, &Suspicious); 15182 } 15183 15184 /// Check conversion of given expression to boolean. 15185 /// Input argument E is a logical expression. 15186 static void CheckBoolLikeConversion(Sema &S, Expr *E, SourceLocation CC) { 15187 if (S.getLangOpts().Bool) 15188 return; 15189 if (E->IgnoreParenImpCasts()->getType()->isAtomicType()) 15190 return; 15191 CheckImplicitConversion(S, E->IgnoreParenImpCasts(), S.Context.BoolTy, CC); 15192 } 15193 15194 namespace { 15195 struct AnalyzeImplicitConversionsWorkItem { 15196 Expr *E; 15197 SourceLocation CC; 15198 bool IsListInit; 15199 }; 15200 } 15201 15202 /// Data recursive variant of AnalyzeImplicitConversions. Subexpressions 15203 /// that should be visited are added to WorkList. 15204 static void AnalyzeImplicitConversions( 15205 Sema &S, AnalyzeImplicitConversionsWorkItem Item, 15206 llvm::SmallVectorImpl<AnalyzeImplicitConversionsWorkItem> &WorkList) { 15207 Expr *OrigE = Item.E; 15208 SourceLocation CC = Item.CC; 15209 15210 QualType T = OrigE->getType(); 15211 Expr *E = OrigE->IgnoreParenImpCasts(); 15212 15213 // Propagate whether we are in a C++ list initialization expression. 15214 // If so, we do not issue warnings for implicit int-float conversion 15215 // precision loss, because C++11 narrowing already handles it. 15216 bool IsListInit = Item.IsListInit || 15217 (isa<InitListExpr>(OrigE) && S.getLangOpts().CPlusPlus); 15218 15219 if (E->isTypeDependent() || E->isValueDependent()) 15220 return; 15221 15222 Expr *SourceExpr = E; 15223 // Examine, but don't traverse into the source expression of an 15224 // OpaqueValueExpr, since it may have multiple parents and we don't want to 15225 // emit duplicate diagnostics. Its fine to examine the form or attempt to 15226 // evaluate it in the context of checking the specific conversion to T though. 15227 if (auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 15228 if (auto *Src = OVE->getSourceExpr()) 15229 SourceExpr = Src; 15230 15231 if (const auto *UO = dyn_cast<UnaryOperator>(SourceExpr)) 15232 if (UO->getOpcode() == UO_Not && 15233 UO->getSubExpr()->isKnownToHaveBooleanValue()) 15234 S.Diag(UO->getBeginLoc(), diag::warn_bitwise_negation_bool) 15235 << OrigE->getSourceRange() << T->isBooleanType() 15236 << FixItHint::CreateReplacement(UO->getBeginLoc(), "!"); 15237 15238 if (const auto *BO = dyn_cast<BinaryOperator>(SourceExpr)) 15239 if ((BO->getOpcode() == BO_And || BO->getOpcode() == BO_Or) && 15240 BO->getLHS()->isKnownToHaveBooleanValue() && 15241 BO->getRHS()->isKnownToHaveBooleanValue() && 15242 BO->getLHS()->HasSideEffects(S.Context) && 15243 BO->getRHS()->HasSideEffects(S.Context)) { 15244 S.Diag(BO->getBeginLoc(), diag::warn_bitwise_instead_of_logical) 15245 << (BO->getOpcode() == BO_And ? "&" : "|") << OrigE->getSourceRange() 15246 << FixItHint::CreateReplacement( 15247 BO->getOperatorLoc(), 15248 (BO->getOpcode() == BO_And ? "&&" : "||")); 15249 S.Diag(BO->getBeginLoc(), diag::note_cast_operand_to_int); 15250 } 15251 15252 // For conditional operators, we analyze the arguments as if they 15253 // were being fed directly into the output. 15254 if (auto *CO = dyn_cast<AbstractConditionalOperator>(SourceExpr)) { 15255 CheckConditionalOperator(S, CO, CC, T); 15256 return; 15257 } 15258 15259 // Check implicit argument conversions for function calls. 15260 if (CallExpr *Call = dyn_cast<CallExpr>(SourceExpr)) 15261 CheckImplicitArgumentConversions(S, Call, CC); 15262 15263 // Go ahead and check any implicit conversions we might have skipped. 15264 // The non-canonical typecheck is just an optimization; 15265 // CheckImplicitConversion will filter out dead implicit conversions. 15266 if (SourceExpr->getType() != T) 15267 CheckImplicitConversion(S, SourceExpr, T, CC, nullptr, IsListInit); 15268 15269 // Now continue drilling into this expression. 15270 15271 if (PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E)) { 15272 // The bound subexpressions in a PseudoObjectExpr are not reachable 15273 // as transitive children. 15274 // FIXME: Use a more uniform representation for this. 15275 for (auto *SE : POE->semantics()) 15276 if (auto *OVE = dyn_cast<OpaqueValueExpr>(SE)) 15277 WorkList.push_back({OVE->getSourceExpr(), CC, IsListInit}); 15278 } 15279 15280 // Skip past explicit casts. 15281 if (auto *CE = dyn_cast<ExplicitCastExpr>(E)) { 15282 E = CE->getSubExpr()->IgnoreParenImpCasts(); 15283 if (!CE->getType()->isVoidType() && E->getType()->isAtomicType()) 15284 S.Diag(E->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 15285 WorkList.push_back({E, CC, IsListInit}); 15286 return; 15287 } 15288 15289 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 15290 // Do a somewhat different check with comparison operators. 15291 if (BO->isComparisonOp()) 15292 return AnalyzeComparison(S, BO); 15293 15294 // And with simple assignments. 15295 if (BO->getOpcode() == BO_Assign) 15296 return AnalyzeAssignment(S, BO); 15297 // And with compound assignments. 15298 if (BO->isAssignmentOp()) 15299 return AnalyzeCompoundAssignment(S, BO); 15300 } 15301 15302 // These break the otherwise-useful invariant below. Fortunately, 15303 // we don't really need to recurse into them, because any internal 15304 // expressions should have been analyzed already when they were 15305 // built into statements. 15306 if (isa<StmtExpr>(E)) return; 15307 15308 // Don't descend into unevaluated contexts. 15309 if (isa<UnaryExprOrTypeTraitExpr>(E)) return; 15310 15311 // Now just recurse over the expression's children. 15312 CC = E->getExprLoc(); 15313 BinaryOperator *BO = dyn_cast<BinaryOperator>(E); 15314 bool IsLogicalAndOperator = BO && BO->getOpcode() == BO_LAnd; 15315 for (Stmt *SubStmt : E->children()) { 15316 Expr *ChildExpr = dyn_cast_or_null<Expr>(SubStmt); 15317 if (!ChildExpr) 15318 continue; 15319 15320 if (auto *CSE = dyn_cast<CoroutineSuspendExpr>(E)) 15321 if (ChildExpr == CSE->getOperand()) 15322 // Do not recurse over a CoroutineSuspendExpr's operand. 15323 // The operand is also a subexpression of getCommonExpr(), and 15324 // recursing into it directly would produce duplicate diagnostics. 15325 continue; 15326 15327 if (IsLogicalAndOperator && 15328 isa<StringLiteral>(ChildExpr->IgnoreParenImpCasts())) 15329 // Ignore checking string literals that are in logical and operators. 15330 // This is a common pattern for asserts. 15331 continue; 15332 WorkList.push_back({ChildExpr, CC, IsListInit}); 15333 } 15334 15335 if (BO && BO->isLogicalOp()) { 15336 Expr *SubExpr = BO->getLHS()->IgnoreParenImpCasts(); 15337 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 15338 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 15339 15340 SubExpr = BO->getRHS()->IgnoreParenImpCasts(); 15341 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 15342 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 15343 } 15344 15345 if (const UnaryOperator *U = dyn_cast<UnaryOperator>(E)) { 15346 if (U->getOpcode() == UO_LNot) { 15347 ::CheckBoolLikeConversion(S, U->getSubExpr(), CC); 15348 } else if (U->getOpcode() != UO_AddrOf) { 15349 if (U->getSubExpr()->getType()->isAtomicType()) 15350 S.Diag(U->getSubExpr()->getBeginLoc(), 15351 diag::warn_atomic_implicit_seq_cst); 15352 } 15353 } 15354 } 15355 15356 /// AnalyzeImplicitConversions - Find and report any interesting 15357 /// implicit conversions in the given expression. There are a couple 15358 /// of competing diagnostics here, -Wconversion and -Wsign-compare. 15359 static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC, 15360 bool IsListInit/*= false*/) { 15361 llvm::SmallVector<AnalyzeImplicitConversionsWorkItem, 16> WorkList; 15362 WorkList.push_back({OrigE, CC, IsListInit}); 15363 while (!WorkList.empty()) 15364 AnalyzeImplicitConversions(S, WorkList.pop_back_val(), WorkList); 15365 } 15366 15367 /// Diagnose integer type and any valid implicit conversion to it. 15368 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, const QualType &IntT) { 15369 // Taking into account implicit conversions, 15370 // allow any integer. 15371 if (!E->getType()->isIntegerType()) { 15372 S.Diag(E->getBeginLoc(), 15373 diag::err_opencl_enqueue_kernel_invalid_local_size_type); 15374 return true; 15375 } 15376 // Potentially emit standard warnings for implicit conversions if enabled 15377 // using -Wconversion. 15378 CheckImplicitConversion(S, E, IntT, E->getBeginLoc()); 15379 return false; 15380 } 15381 15382 // Helper function for Sema::DiagnoseAlwaysNonNullPointer. 15383 // Returns true when emitting a warning about taking the address of a reference. 15384 static bool CheckForReference(Sema &SemaRef, const Expr *E, 15385 const PartialDiagnostic &PD) { 15386 E = E->IgnoreParenImpCasts(); 15387 15388 const FunctionDecl *FD = nullptr; 15389 15390 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) { 15391 if (!DRE->getDecl()->getType()->isReferenceType()) 15392 return false; 15393 } else if (const MemberExpr *M = dyn_cast<MemberExpr>(E)) { 15394 if (!M->getMemberDecl()->getType()->isReferenceType()) 15395 return false; 15396 } else if (const CallExpr *Call = dyn_cast<CallExpr>(E)) { 15397 if (!Call->getCallReturnType(SemaRef.Context)->isReferenceType()) 15398 return false; 15399 FD = Call->getDirectCallee(); 15400 } else { 15401 return false; 15402 } 15403 15404 SemaRef.Diag(E->getExprLoc(), PD); 15405 15406 // If possible, point to location of function. 15407 if (FD) { 15408 SemaRef.Diag(FD->getLocation(), diag::note_reference_is_return_value) << FD; 15409 } 15410 15411 return true; 15412 } 15413 15414 // Returns true if the SourceLocation is expanded from any macro body. 15415 // Returns false if the SourceLocation is invalid, is from not in a macro 15416 // expansion, or is from expanded from a top-level macro argument. 15417 static bool IsInAnyMacroBody(const SourceManager &SM, SourceLocation Loc) { 15418 if (Loc.isInvalid()) 15419 return false; 15420 15421 while (Loc.isMacroID()) { 15422 if (SM.isMacroBodyExpansion(Loc)) 15423 return true; 15424 Loc = SM.getImmediateMacroCallerLoc(Loc); 15425 } 15426 15427 return false; 15428 } 15429 15430 /// Diagnose pointers that are always non-null. 15431 /// \param E the expression containing the pointer 15432 /// \param NullKind NPCK_NotNull if E is a cast to bool, otherwise, E is 15433 /// compared to a null pointer 15434 /// \param IsEqual True when the comparison is equal to a null pointer 15435 /// \param Range Extra SourceRange to highlight in the diagnostic 15436 void Sema::DiagnoseAlwaysNonNullPointer(Expr *E, 15437 Expr::NullPointerConstantKind NullKind, 15438 bool IsEqual, SourceRange Range) { 15439 if (!E) 15440 return; 15441 15442 // Don't warn inside macros. 15443 if (E->getExprLoc().isMacroID()) { 15444 const SourceManager &SM = getSourceManager(); 15445 if (IsInAnyMacroBody(SM, E->getExprLoc()) || 15446 IsInAnyMacroBody(SM, Range.getBegin())) 15447 return; 15448 } 15449 E = E->IgnoreImpCasts(); 15450 15451 const bool IsCompare = NullKind != Expr::NPCK_NotNull; 15452 15453 if (isa<CXXThisExpr>(E)) { 15454 unsigned DiagID = IsCompare ? diag::warn_this_null_compare 15455 : diag::warn_this_bool_conversion; 15456 Diag(E->getExprLoc(), DiagID) << E->getSourceRange() << Range << IsEqual; 15457 return; 15458 } 15459 15460 bool IsAddressOf = false; 15461 15462 if (auto *UO = dyn_cast<UnaryOperator>(E->IgnoreParens())) { 15463 if (UO->getOpcode() != UO_AddrOf) 15464 return; 15465 IsAddressOf = true; 15466 E = UO->getSubExpr(); 15467 } 15468 15469 if (IsAddressOf) { 15470 unsigned DiagID = IsCompare 15471 ? diag::warn_address_of_reference_null_compare 15472 : diag::warn_address_of_reference_bool_conversion; 15473 PartialDiagnostic PD = PDiag(DiagID) << E->getSourceRange() << Range 15474 << IsEqual; 15475 if (CheckForReference(*this, E, PD)) { 15476 return; 15477 } 15478 } 15479 15480 auto ComplainAboutNonnullParamOrCall = [&](const Attr *NonnullAttr) { 15481 bool IsParam = isa<NonNullAttr>(NonnullAttr); 15482 std::string Str; 15483 llvm::raw_string_ostream S(Str); 15484 E->printPretty(S, nullptr, getPrintingPolicy()); 15485 unsigned DiagID = IsCompare ? diag::warn_nonnull_expr_compare 15486 : diag::warn_cast_nonnull_to_bool; 15487 Diag(E->getExprLoc(), DiagID) << IsParam << S.str() 15488 << E->getSourceRange() << Range << IsEqual; 15489 Diag(NonnullAttr->getLocation(), diag::note_declared_nonnull) << IsParam; 15490 }; 15491 15492 // If we have a CallExpr that is tagged with returns_nonnull, we can complain. 15493 if (auto *Call = dyn_cast<CallExpr>(E->IgnoreParenImpCasts())) { 15494 if (auto *Callee = Call->getDirectCallee()) { 15495 if (const Attr *A = Callee->getAttr<ReturnsNonNullAttr>()) { 15496 ComplainAboutNonnullParamOrCall(A); 15497 return; 15498 } 15499 } 15500 } 15501 15502 // Expect to find a single Decl. Skip anything more complicated. 15503 ValueDecl *D = nullptr; 15504 if (DeclRefExpr *R = dyn_cast<DeclRefExpr>(E)) { 15505 D = R->getDecl(); 15506 } else if (MemberExpr *M = dyn_cast<MemberExpr>(E)) { 15507 D = M->getMemberDecl(); 15508 } 15509 15510 // Weak Decls can be null. 15511 if (!D || D->isWeak()) 15512 return; 15513 15514 // Check for parameter decl with nonnull attribute 15515 if (const auto* PV = dyn_cast<ParmVarDecl>(D)) { 15516 if (getCurFunction() && 15517 !getCurFunction()->ModifiedNonNullParams.count(PV)) { 15518 if (const Attr *A = PV->getAttr<NonNullAttr>()) { 15519 ComplainAboutNonnullParamOrCall(A); 15520 return; 15521 } 15522 15523 if (const auto *FD = dyn_cast<FunctionDecl>(PV->getDeclContext())) { 15524 // Skip function template not specialized yet. 15525 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate) 15526 return; 15527 auto ParamIter = llvm::find(FD->parameters(), PV); 15528 assert(ParamIter != FD->param_end()); 15529 unsigned ParamNo = std::distance(FD->param_begin(), ParamIter); 15530 15531 for (const auto *NonNull : FD->specific_attrs<NonNullAttr>()) { 15532 if (!NonNull->args_size()) { 15533 ComplainAboutNonnullParamOrCall(NonNull); 15534 return; 15535 } 15536 15537 for (const ParamIdx &ArgNo : NonNull->args()) { 15538 if (ArgNo.getASTIndex() == ParamNo) { 15539 ComplainAboutNonnullParamOrCall(NonNull); 15540 return; 15541 } 15542 } 15543 } 15544 } 15545 } 15546 } 15547 15548 QualType T = D->getType(); 15549 const bool IsArray = T->isArrayType(); 15550 const bool IsFunction = T->isFunctionType(); 15551 15552 // Address of function is used to silence the function warning. 15553 if (IsAddressOf && IsFunction) { 15554 return; 15555 } 15556 15557 // Found nothing. 15558 if (!IsAddressOf && !IsFunction && !IsArray) 15559 return; 15560 15561 // Pretty print the expression for the diagnostic. 15562 std::string Str; 15563 llvm::raw_string_ostream S(Str); 15564 E->printPretty(S, nullptr, getPrintingPolicy()); 15565 15566 unsigned DiagID = IsCompare ? diag::warn_null_pointer_compare 15567 : diag::warn_impcast_pointer_to_bool; 15568 enum { 15569 AddressOf, 15570 FunctionPointer, 15571 ArrayPointer 15572 } DiagType; 15573 if (IsAddressOf) 15574 DiagType = AddressOf; 15575 else if (IsFunction) 15576 DiagType = FunctionPointer; 15577 else if (IsArray) 15578 DiagType = ArrayPointer; 15579 else 15580 llvm_unreachable("Could not determine diagnostic."); 15581 Diag(E->getExprLoc(), DiagID) << DiagType << S.str() << E->getSourceRange() 15582 << Range << IsEqual; 15583 15584 if (!IsFunction) 15585 return; 15586 15587 // Suggest '&' to silence the function warning. 15588 Diag(E->getExprLoc(), diag::note_function_warning_silence) 15589 << FixItHint::CreateInsertion(E->getBeginLoc(), "&"); 15590 15591 // Check to see if '()' fixit should be emitted. 15592 QualType ReturnType; 15593 UnresolvedSet<4> NonTemplateOverloads; 15594 tryExprAsCall(*E, ReturnType, NonTemplateOverloads); 15595 if (ReturnType.isNull()) 15596 return; 15597 15598 if (IsCompare) { 15599 // There are two cases here. If there is null constant, the only suggest 15600 // for a pointer return type. If the null is 0, then suggest if the return 15601 // type is a pointer or an integer type. 15602 if (!ReturnType->isPointerType()) { 15603 if (NullKind == Expr::NPCK_ZeroExpression || 15604 NullKind == Expr::NPCK_ZeroLiteral) { 15605 if (!ReturnType->isIntegerType()) 15606 return; 15607 } else { 15608 return; 15609 } 15610 } 15611 } else { // !IsCompare 15612 // For function to bool, only suggest if the function pointer has bool 15613 // return type. 15614 if (!ReturnType->isSpecificBuiltinType(BuiltinType::Bool)) 15615 return; 15616 } 15617 Diag(E->getExprLoc(), diag::note_function_to_function_call) 15618 << FixItHint::CreateInsertion(getLocForEndOfToken(E->getEndLoc()), "()"); 15619 } 15620 15621 /// Diagnoses "dangerous" implicit conversions within the given 15622 /// expression (which is a full expression). Implements -Wconversion 15623 /// and -Wsign-compare. 15624 /// 15625 /// \param CC the "context" location of the implicit conversion, i.e. 15626 /// the most location of the syntactic entity requiring the implicit 15627 /// conversion 15628 void Sema::CheckImplicitConversions(Expr *E, SourceLocation CC) { 15629 // Don't diagnose in unevaluated contexts. 15630 if (isUnevaluatedContext()) 15631 return; 15632 15633 // Don't diagnose for value- or type-dependent expressions. 15634 if (E->isTypeDependent() || E->isValueDependent()) 15635 return; 15636 15637 // Check for array bounds violations in cases where the check isn't triggered 15638 // elsewhere for other Expr types (like BinaryOperators), e.g. when an 15639 // ArraySubscriptExpr is on the RHS of a variable initialization. 15640 CheckArrayAccess(E); 15641 15642 // This is not the right CC for (e.g.) a variable initialization. 15643 AnalyzeImplicitConversions(*this, E, CC); 15644 } 15645 15646 /// CheckBoolLikeConversion - Check conversion of given expression to boolean. 15647 /// Input argument E is a logical expression. 15648 void Sema::CheckBoolLikeConversion(Expr *E, SourceLocation CC) { 15649 ::CheckBoolLikeConversion(*this, E, CC); 15650 } 15651 15652 /// Diagnose when expression is an integer constant expression and its evaluation 15653 /// results in integer overflow 15654 void Sema::CheckForIntOverflow (const Expr *E) { 15655 // Use a work list to deal with nested struct initializers. 15656 SmallVector<const Expr *, 2> Exprs(1, E); 15657 15658 do { 15659 const Expr *OriginalE = Exprs.pop_back_val(); 15660 const Expr *E = OriginalE->IgnoreParenCasts(); 15661 15662 if (isa<BinaryOperator, UnaryOperator>(E)) { 15663 E->EvaluateForOverflow(Context); 15664 continue; 15665 } 15666 15667 if (const auto *InitList = dyn_cast<InitListExpr>(OriginalE)) 15668 Exprs.append(InitList->inits().begin(), InitList->inits().end()); 15669 else if (isa<ObjCBoxedExpr>(OriginalE)) 15670 E->EvaluateForOverflow(Context); 15671 else if (const auto *Call = dyn_cast<CallExpr>(E)) 15672 Exprs.append(Call->arg_begin(), Call->arg_end()); 15673 else if (const auto *Message = dyn_cast<ObjCMessageExpr>(E)) 15674 Exprs.append(Message->arg_begin(), Message->arg_end()); 15675 else if (const auto *Construct = dyn_cast<CXXConstructExpr>(E)) 15676 Exprs.append(Construct->arg_begin(), Construct->arg_end()); 15677 else if (const auto *Temporary = dyn_cast<CXXBindTemporaryExpr>(E)) 15678 Exprs.push_back(Temporary->getSubExpr()); 15679 else if (const auto *Array = dyn_cast<ArraySubscriptExpr>(E)) 15680 Exprs.push_back(Array->getIdx()); 15681 else if (const auto *Compound = dyn_cast<CompoundLiteralExpr>(E)) 15682 Exprs.push_back(Compound->getInitializer()); 15683 else if (const auto *New = dyn_cast<CXXNewExpr>(E); 15684 New && New->isArray()) { 15685 if (auto ArraySize = New->getArraySize()) 15686 Exprs.push_back(*ArraySize); 15687 } 15688 } while (!Exprs.empty()); 15689 } 15690 15691 namespace { 15692 15693 /// Visitor for expressions which looks for unsequenced operations on the 15694 /// same object. 15695 class SequenceChecker : public ConstEvaluatedExprVisitor<SequenceChecker> { 15696 using Base = ConstEvaluatedExprVisitor<SequenceChecker>; 15697 15698 /// A tree of sequenced regions within an expression. Two regions are 15699 /// unsequenced if one is an ancestor or a descendent of the other. When we 15700 /// finish processing an expression with sequencing, such as a comma 15701 /// expression, we fold its tree nodes into its parent, since they are 15702 /// unsequenced with respect to nodes we will visit later. 15703 class SequenceTree { 15704 struct Value { 15705 explicit Value(unsigned Parent) : Parent(Parent), Merged(false) {} 15706 unsigned Parent : 31; 15707 unsigned Merged : 1; 15708 }; 15709 SmallVector<Value, 8> Values; 15710 15711 public: 15712 /// A region within an expression which may be sequenced with respect 15713 /// to some other region. 15714 class Seq { 15715 friend class SequenceTree; 15716 15717 unsigned Index; 15718 15719 explicit Seq(unsigned N) : Index(N) {} 15720 15721 public: 15722 Seq() : Index(0) {} 15723 }; 15724 15725 SequenceTree() { Values.push_back(Value(0)); } 15726 Seq root() const { return Seq(0); } 15727 15728 /// Create a new sequence of operations, which is an unsequenced 15729 /// subset of \p Parent. This sequence of operations is sequenced with 15730 /// respect to other children of \p Parent. 15731 Seq allocate(Seq Parent) { 15732 Values.push_back(Value(Parent.Index)); 15733 return Seq(Values.size() - 1); 15734 } 15735 15736 /// Merge a sequence of operations into its parent. 15737 void merge(Seq S) { 15738 Values[S.Index].Merged = true; 15739 } 15740 15741 /// Determine whether two operations are unsequenced. This operation 15742 /// is asymmetric: \p Cur should be the more recent sequence, and \p Old 15743 /// should have been merged into its parent as appropriate. 15744 bool isUnsequenced(Seq Cur, Seq Old) { 15745 unsigned C = representative(Cur.Index); 15746 unsigned Target = representative(Old.Index); 15747 while (C >= Target) { 15748 if (C == Target) 15749 return true; 15750 C = Values[C].Parent; 15751 } 15752 return false; 15753 } 15754 15755 private: 15756 /// Pick a representative for a sequence. 15757 unsigned representative(unsigned K) { 15758 if (Values[K].Merged) 15759 // Perform path compression as we go. 15760 return Values[K].Parent = representative(Values[K].Parent); 15761 return K; 15762 } 15763 }; 15764 15765 /// An object for which we can track unsequenced uses. 15766 using Object = const NamedDecl *; 15767 15768 /// Different flavors of object usage which we track. We only track the 15769 /// least-sequenced usage of each kind. 15770 enum UsageKind { 15771 /// A read of an object. Multiple unsequenced reads are OK. 15772 UK_Use, 15773 15774 /// A modification of an object which is sequenced before the value 15775 /// computation of the expression, such as ++n in C++. 15776 UK_ModAsValue, 15777 15778 /// A modification of an object which is not sequenced before the value 15779 /// computation of the expression, such as n++. 15780 UK_ModAsSideEffect, 15781 15782 UK_Count = UK_ModAsSideEffect + 1 15783 }; 15784 15785 /// Bundle together a sequencing region and the expression corresponding 15786 /// to a specific usage. One Usage is stored for each usage kind in UsageInfo. 15787 struct Usage { 15788 const Expr *UsageExpr; 15789 SequenceTree::Seq Seq; 15790 15791 Usage() : UsageExpr(nullptr) {} 15792 }; 15793 15794 struct UsageInfo { 15795 Usage Uses[UK_Count]; 15796 15797 /// Have we issued a diagnostic for this object already? 15798 bool Diagnosed; 15799 15800 UsageInfo() : Diagnosed(false) {} 15801 }; 15802 using UsageInfoMap = llvm::SmallDenseMap<Object, UsageInfo, 16>; 15803 15804 Sema &SemaRef; 15805 15806 /// Sequenced regions within the expression. 15807 SequenceTree Tree; 15808 15809 /// Declaration modifications and references which we have seen. 15810 UsageInfoMap UsageMap; 15811 15812 /// The region we are currently within. 15813 SequenceTree::Seq Region; 15814 15815 /// Filled in with declarations which were modified as a side-effect 15816 /// (that is, post-increment operations). 15817 SmallVectorImpl<std::pair<Object, Usage>> *ModAsSideEffect = nullptr; 15818 15819 /// Expressions to check later. We defer checking these to reduce 15820 /// stack usage. 15821 SmallVectorImpl<const Expr *> &WorkList; 15822 15823 /// RAII object wrapping the visitation of a sequenced subexpression of an 15824 /// expression. At the end of this process, the side-effects of the evaluation 15825 /// become sequenced with respect to the value computation of the result, so 15826 /// we downgrade any UK_ModAsSideEffect within the evaluation to 15827 /// UK_ModAsValue. 15828 struct SequencedSubexpression { 15829 SequencedSubexpression(SequenceChecker &Self) 15830 : Self(Self), OldModAsSideEffect(Self.ModAsSideEffect) { 15831 Self.ModAsSideEffect = &ModAsSideEffect; 15832 } 15833 15834 ~SequencedSubexpression() { 15835 for (const std::pair<Object, Usage> &M : llvm::reverse(ModAsSideEffect)) { 15836 // Add a new usage with usage kind UK_ModAsValue, and then restore 15837 // the previous usage with UK_ModAsSideEffect (thus clearing it if 15838 // the previous one was empty). 15839 UsageInfo &UI = Self.UsageMap[M.first]; 15840 auto &SideEffectUsage = UI.Uses[UK_ModAsSideEffect]; 15841 Self.addUsage(M.first, UI, SideEffectUsage.UsageExpr, UK_ModAsValue); 15842 SideEffectUsage = M.second; 15843 } 15844 Self.ModAsSideEffect = OldModAsSideEffect; 15845 } 15846 15847 SequenceChecker &Self; 15848 SmallVector<std::pair<Object, Usage>, 4> ModAsSideEffect; 15849 SmallVectorImpl<std::pair<Object, Usage>> *OldModAsSideEffect; 15850 }; 15851 15852 /// RAII object wrapping the visitation of a subexpression which we might 15853 /// choose to evaluate as a constant. If any subexpression is evaluated and 15854 /// found to be non-constant, this allows us to suppress the evaluation of 15855 /// the outer expression. 15856 class EvaluationTracker { 15857 public: 15858 EvaluationTracker(SequenceChecker &Self) 15859 : Self(Self), Prev(Self.EvalTracker) { 15860 Self.EvalTracker = this; 15861 } 15862 15863 ~EvaluationTracker() { 15864 Self.EvalTracker = Prev; 15865 if (Prev) 15866 Prev->EvalOK &= EvalOK; 15867 } 15868 15869 bool evaluate(const Expr *E, bool &Result) { 15870 if (!EvalOK || E->isValueDependent()) 15871 return false; 15872 EvalOK = E->EvaluateAsBooleanCondition( 15873 Result, Self.SemaRef.Context, Self.SemaRef.isConstantEvaluated()); 15874 return EvalOK; 15875 } 15876 15877 private: 15878 SequenceChecker &Self; 15879 EvaluationTracker *Prev; 15880 bool EvalOK = true; 15881 } *EvalTracker = nullptr; 15882 15883 /// Find the object which is produced by the specified expression, 15884 /// if any. 15885 Object getObject(const Expr *E, bool Mod) const { 15886 E = E->IgnoreParenCasts(); 15887 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 15888 if (Mod && (UO->getOpcode() == UO_PreInc || UO->getOpcode() == UO_PreDec)) 15889 return getObject(UO->getSubExpr(), Mod); 15890 } else if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 15891 if (BO->getOpcode() == BO_Comma) 15892 return getObject(BO->getRHS(), Mod); 15893 if (Mod && BO->isAssignmentOp()) 15894 return getObject(BO->getLHS(), Mod); 15895 } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) { 15896 // FIXME: Check for more interesting cases, like "x.n = ++x.n". 15897 if (isa<CXXThisExpr>(ME->getBase()->IgnoreParenCasts())) 15898 return ME->getMemberDecl(); 15899 } else if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 15900 // FIXME: If this is a reference, map through to its value. 15901 return DRE->getDecl(); 15902 return nullptr; 15903 } 15904 15905 /// Note that an object \p O was modified or used by an expression 15906 /// \p UsageExpr with usage kind \p UK. \p UI is the \p UsageInfo for 15907 /// the object \p O as obtained via the \p UsageMap. 15908 void addUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, UsageKind UK) { 15909 // Get the old usage for the given object and usage kind. 15910 Usage &U = UI.Uses[UK]; 15911 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) { 15912 // If we have a modification as side effect and are in a sequenced 15913 // subexpression, save the old Usage so that we can restore it later 15914 // in SequencedSubexpression::~SequencedSubexpression. 15915 if (UK == UK_ModAsSideEffect && ModAsSideEffect) 15916 ModAsSideEffect->push_back(std::make_pair(O, U)); 15917 // Then record the new usage with the current sequencing region. 15918 U.UsageExpr = UsageExpr; 15919 U.Seq = Region; 15920 } 15921 } 15922 15923 /// Check whether a modification or use of an object \p O in an expression 15924 /// \p UsageExpr conflicts with a prior usage of kind \p OtherKind. \p UI is 15925 /// the \p UsageInfo for the object \p O as obtained via the \p UsageMap. 15926 /// \p IsModMod is true when we are checking for a mod-mod unsequenced 15927 /// usage and false we are checking for a mod-use unsequenced usage. 15928 void checkUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, 15929 UsageKind OtherKind, bool IsModMod) { 15930 if (UI.Diagnosed) 15931 return; 15932 15933 const Usage &U = UI.Uses[OtherKind]; 15934 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) 15935 return; 15936 15937 const Expr *Mod = U.UsageExpr; 15938 const Expr *ModOrUse = UsageExpr; 15939 if (OtherKind == UK_Use) 15940 std::swap(Mod, ModOrUse); 15941 15942 SemaRef.DiagRuntimeBehavior( 15943 Mod->getExprLoc(), {Mod, ModOrUse}, 15944 SemaRef.PDiag(IsModMod ? diag::warn_unsequenced_mod_mod 15945 : diag::warn_unsequenced_mod_use) 15946 << O << SourceRange(ModOrUse->getExprLoc())); 15947 UI.Diagnosed = true; 15948 } 15949 15950 // A note on note{Pre, Post}{Use, Mod}: 15951 // 15952 // (It helps to follow the algorithm with an expression such as 15953 // "((++k)++, k) = k" or "k = (k++, k++)". Both contain unsequenced 15954 // operations before C++17 and both are well-defined in C++17). 15955 // 15956 // When visiting a node which uses/modify an object we first call notePreUse 15957 // or notePreMod before visiting its sub-expression(s). At this point the 15958 // children of the current node have not yet been visited and so the eventual 15959 // uses/modifications resulting from the children of the current node have not 15960 // been recorded yet. 15961 // 15962 // We then visit the children of the current node. After that notePostUse or 15963 // notePostMod is called. These will 1) detect an unsequenced modification 15964 // as side effect (as in "k++ + k") and 2) add a new usage with the 15965 // appropriate usage kind. 15966 // 15967 // We also have to be careful that some operation sequences modification as 15968 // side effect as well (for example: || or ,). To account for this we wrap 15969 // the visitation of such a sub-expression (for example: the LHS of || or ,) 15970 // with SequencedSubexpression. SequencedSubexpression is an RAII object 15971 // which record usages which are modifications as side effect, and then 15972 // downgrade them (or more accurately restore the previous usage which was a 15973 // modification as side effect) when exiting the scope of the sequenced 15974 // subexpression. 15975 15976 void notePreUse(Object O, const Expr *UseExpr) { 15977 UsageInfo &UI = UsageMap[O]; 15978 // Uses conflict with other modifications. 15979 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/false); 15980 } 15981 15982 void notePostUse(Object O, const Expr *UseExpr) { 15983 UsageInfo &UI = UsageMap[O]; 15984 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsSideEffect, 15985 /*IsModMod=*/false); 15986 addUsage(O, UI, UseExpr, /*UsageKind=*/UK_Use); 15987 } 15988 15989 void notePreMod(Object O, const Expr *ModExpr) { 15990 UsageInfo &UI = UsageMap[O]; 15991 // Modifications conflict with other modifications and with uses. 15992 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/true); 15993 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_Use, /*IsModMod=*/false); 15994 } 15995 15996 void notePostMod(Object O, const Expr *ModExpr, UsageKind UK) { 15997 UsageInfo &UI = UsageMap[O]; 15998 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsSideEffect, 15999 /*IsModMod=*/true); 16000 addUsage(O, UI, ModExpr, /*UsageKind=*/UK); 16001 } 16002 16003 public: 16004 SequenceChecker(Sema &S, const Expr *E, 16005 SmallVectorImpl<const Expr *> &WorkList) 16006 : Base(S.Context), SemaRef(S), Region(Tree.root()), WorkList(WorkList) { 16007 Visit(E); 16008 // Silence a -Wunused-private-field since WorkList is now unused. 16009 // TODO: Evaluate if it can be used, and if not remove it. 16010 (void)this->WorkList; 16011 } 16012 16013 void VisitStmt(const Stmt *S) { 16014 // Skip all statements which aren't expressions for now. 16015 } 16016 16017 void VisitExpr(const Expr *E) { 16018 // By default, just recurse to evaluated subexpressions. 16019 Base::VisitStmt(E); 16020 } 16021 16022 void VisitCoroutineSuspendExpr(const CoroutineSuspendExpr *CSE) { 16023 for (auto *Sub : CSE->children()) { 16024 const Expr *ChildExpr = dyn_cast_or_null<Expr>(Sub); 16025 if (!ChildExpr) 16026 continue; 16027 16028 if (ChildExpr == CSE->getOperand()) 16029 // Do not recurse over a CoroutineSuspendExpr's operand. 16030 // The operand is also a subexpression of getCommonExpr(), and 16031 // recursing into it directly could confuse object management 16032 // for the sake of sequence tracking. 16033 continue; 16034 16035 Visit(Sub); 16036 } 16037 } 16038 16039 void VisitCastExpr(const CastExpr *E) { 16040 Object O = Object(); 16041 if (E->getCastKind() == CK_LValueToRValue) 16042 O = getObject(E->getSubExpr(), false); 16043 16044 if (O) 16045 notePreUse(O, E); 16046 VisitExpr(E); 16047 if (O) 16048 notePostUse(O, E); 16049 } 16050 16051 void VisitSequencedExpressions(const Expr *SequencedBefore, 16052 const Expr *SequencedAfter) { 16053 SequenceTree::Seq BeforeRegion = Tree.allocate(Region); 16054 SequenceTree::Seq AfterRegion = Tree.allocate(Region); 16055 SequenceTree::Seq OldRegion = Region; 16056 16057 { 16058 SequencedSubexpression SeqBefore(*this); 16059 Region = BeforeRegion; 16060 Visit(SequencedBefore); 16061 } 16062 16063 Region = AfterRegion; 16064 Visit(SequencedAfter); 16065 16066 Region = OldRegion; 16067 16068 Tree.merge(BeforeRegion); 16069 Tree.merge(AfterRegion); 16070 } 16071 16072 void VisitArraySubscriptExpr(const ArraySubscriptExpr *ASE) { 16073 // C++17 [expr.sub]p1: 16074 // The expression E1[E2] is identical (by definition) to *((E1)+(E2)). The 16075 // expression E1 is sequenced before the expression E2. 16076 if (SemaRef.getLangOpts().CPlusPlus17) 16077 VisitSequencedExpressions(ASE->getLHS(), ASE->getRHS()); 16078 else { 16079 Visit(ASE->getLHS()); 16080 Visit(ASE->getRHS()); 16081 } 16082 } 16083 16084 void VisitBinPtrMemD(const BinaryOperator *BO) { VisitBinPtrMem(BO); } 16085 void VisitBinPtrMemI(const BinaryOperator *BO) { VisitBinPtrMem(BO); } 16086 void VisitBinPtrMem(const BinaryOperator *BO) { 16087 // C++17 [expr.mptr.oper]p4: 16088 // Abbreviating pm-expression.*cast-expression as E1.*E2, [...] 16089 // the expression E1 is sequenced before the expression E2. 16090 if (SemaRef.getLangOpts().CPlusPlus17) 16091 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 16092 else { 16093 Visit(BO->getLHS()); 16094 Visit(BO->getRHS()); 16095 } 16096 } 16097 16098 void VisitBinShl(const BinaryOperator *BO) { VisitBinShlShr(BO); } 16099 void VisitBinShr(const BinaryOperator *BO) { VisitBinShlShr(BO); } 16100 void VisitBinShlShr(const BinaryOperator *BO) { 16101 // C++17 [expr.shift]p4: 16102 // The expression E1 is sequenced before the expression E2. 16103 if (SemaRef.getLangOpts().CPlusPlus17) 16104 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 16105 else { 16106 Visit(BO->getLHS()); 16107 Visit(BO->getRHS()); 16108 } 16109 } 16110 16111 void VisitBinComma(const BinaryOperator *BO) { 16112 // C++11 [expr.comma]p1: 16113 // Every value computation and side effect associated with the left 16114 // expression is sequenced before every value computation and side 16115 // effect associated with the right expression. 16116 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 16117 } 16118 16119 void VisitBinAssign(const BinaryOperator *BO) { 16120 SequenceTree::Seq RHSRegion; 16121 SequenceTree::Seq LHSRegion; 16122 if (SemaRef.getLangOpts().CPlusPlus17) { 16123 RHSRegion = Tree.allocate(Region); 16124 LHSRegion = Tree.allocate(Region); 16125 } else { 16126 RHSRegion = Region; 16127 LHSRegion = Region; 16128 } 16129 SequenceTree::Seq OldRegion = Region; 16130 16131 // C++11 [expr.ass]p1: 16132 // [...] the assignment is sequenced after the value computation 16133 // of the right and left operands, [...] 16134 // 16135 // so check it before inspecting the operands and update the 16136 // map afterwards. 16137 Object O = getObject(BO->getLHS(), /*Mod=*/true); 16138 if (O) 16139 notePreMod(O, BO); 16140 16141 if (SemaRef.getLangOpts().CPlusPlus17) { 16142 // C++17 [expr.ass]p1: 16143 // [...] The right operand is sequenced before the left operand. [...] 16144 { 16145 SequencedSubexpression SeqBefore(*this); 16146 Region = RHSRegion; 16147 Visit(BO->getRHS()); 16148 } 16149 16150 Region = LHSRegion; 16151 Visit(BO->getLHS()); 16152 16153 if (O && isa<CompoundAssignOperator>(BO)) 16154 notePostUse(O, BO); 16155 16156 } else { 16157 // C++11 does not specify any sequencing between the LHS and RHS. 16158 Region = LHSRegion; 16159 Visit(BO->getLHS()); 16160 16161 if (O && isa<CompoundAssignOperator>(BO)) 16162 notePostUse(O, BO); 16163 16164 Region = RHSRegion; 16165 Visit(BO->getRHS()); 16166 } 16167 16168 // C++11 [expr.ass]p1: 16169 // the assignment is sequenced [...] before the value computation of the 16170 // assignment expression. 16171 // C11 6.5.16/3 has no such rule. 16172 Region = OldRegion; 16173 if (O) 16174 notePostMod(O, BO, 16175 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 16176 : UK_ModAsSideEffect); 16177 if (SemaRef.getLangOpts().CPlusPlus17) { 16178 Tree.merge(RHSRegion); 16179 Tree.merge(LHSRegion); 16180 } 16181 } 16182 16183 void VisitCompoundAssignOperator(const CompoundAssignOperator *CAO) { 16184 VisitBinAssign(CAO); 16185 } 16186 16187 void VisitUnaryPreInc(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 16188 void VisitUnaryPreDec(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 16189 void VisitUnaryPreIncDec(const UnaryOperator *UO) { 16190 Object O = getObject(UO->getSubExpr(), true); 16191 if (!O) 16192 return VisitExpr(UO); 16193 16194 notePreMod(O, UO); 16195 Visit(UO->getSubExpr()); 16196 // C++11 [expr.pre.incr]p1: 16197 // the expression ++x is equivalent to x+=1 16198 notePostMod(O, UO, 16199 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 16200 : UK_ModAsSideEffect); 16201 } 16202 16203 void VisitUnaryPostInc(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 16204 void VisitUnaryPostDec(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 16205 void VisitUnaryPostIncDec(const UnaryOperator *UO) { 16206 Object O = getObject(UO->getSubExpr(), true); 16207 if (!O) 16208 return VisitExpr(UO); 16209 16210 notePreMod(O, UO); 16211 Visit(UO->getSubExpr()); 16212 notePostMod(O, UO, UK_ModAsSideEffect); 16213 } 16214 16215 void VisitBinLOr(const BinaryOperator *BO) { 16216 // C++11 [expr.log.or]p2: 16217 // If the second expression is evaluated, every value computation and 16218 // side effect associated with the first expression is sequenced before 16219 // every value computation and side effect associated with the 16220 // second expression. 16221 SequenceTree::Seq LHSRegion = Tree.allocate(Region); 16222 SequenceTree::Seq RHSRegion = Tree.allocate(Region); 16223 SequenceTree::Seq OldRegion = Region; 16224 16225 EvaluationTracker Eval(*this); 16226 { 16227 SequencedSubexpression Sequenced(*this); 16228 Region = LHSRegion; 16229 Visit(BO->getLHS()); 16230 } 16231 16232 // C++11 [expr.log.or]p1: 16233 // [...] the second operand is not evaluated if the first operand 16234 // evaluates to true. 16235 bool EvalResult = false; 16236 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult); 16237 bool ShouldVisitRHS = !EvalOK || (EvalOK && !EvalResult); 16238 if (ShouldVisitRHS) { 16239 Region = RHSRegion; 16240 Visit(BO->getRHS()); 16241 } 16242 16243 Region = OldRegion; 16244 Tree.merge(LHSRegion); 16245 Tree.merge(RHSRegion); 16246 } 16247 16248 void VisitBinLAnd(const BinaryOperator *BO) { 16249 // C++11 [expr.log.and]p2: 16250 // If the second expression is evaluated, every value computation and 16251 // side effect associated with the first expression is sequenced before 16252 // every value computation and side effect associated with the 16253 // second expression. 16254 SequenceTree::Seq LHSRegion = Tree.allocate(Region); 16255 SequenceTree::Seq RHSRegion = Tree.allocate(Region); 16256 SequenceTree::Seq OldRegion = Region; 16257 16258 EvaluationTracker Eval(*this); 16259 { 16260 SequencedSubexpression Sequenced(*this); 16261 Region = LHSRegion; 16262 Visit(BO->getLHS()); 16263 } 16264 16265 // C++11 [expr.log.and]p1: 16266 // [...] the second operand is not evaluated if the first operand is false. 16267 bool EvalResult = false; 16268 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult); 16269 bool ShouldVisitRHS = !EvalOK || (EvalOK && EvalResult); 16270 if (ShouldVisitRHS) { 16271 Region = RHSRegion; 16272 Visit(BO->getRHS()); 16273 } 16274 16275 Region = OldRegion; 16276 Tree.merge(LHSRegion); 16277 Tree.merge(RHSRegion); 16278 } 16279 16280 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO) { 16281 // C++11 [expr.cond]p1: 16282 // [...] Every value computation and side effect associated with the first 16283 // expression is sequenced before every value computation and side effect 16284 // associated with the second or third expression. 16285 SequenceTree::Seq ConditionRegion = Tree.allocate(Region); 16286 16287 // No sequencing is specified between the true and false expression. 16288 // However since exactly one of both is going to be evaluated we can 16289 // consider them to be sequenced. This is needed to avoid warning on 16290 // something like "x ? y+= 1 : y += 2;" in the case where we will visit 16291 // both the true and false expressions because we can't evaluate x. 16292 // This will still allow us to detect an expression like (pre C++17) 16293 // "(x ? y += 1 : y += 2) = y". 16294 // 16295 // We don't wrap the visitation of the true and false expression with 16296 // SequencedSubexpression because we don't want to downgrade modifications 16297 // as side effect in the true and false expressions after the visition 16298 // is done. (for example in the expression "(x ? y++ : y++) + y" we should 16299 // not warn between the two "y++", but we should warn between the "y++" 16300 // and the "y". 16301 SequenceTree::Seq TrueRegion = Tree.allocate(Region); 16302 SequenceTree::Seq FalseRegion = Tree.allocate(Region); 16303 SequenceTree::Seq OldRegion = Region; 16304 16305 EvaluationTracker Eval(*this); 16306 { 16307 SequencedSubexpression Sequenced(*this); 16308 Region = ConditionRegion; 16309 Visit(CO->getCond()); 16310 } 16311 16312 // C++11 [expr.cond]p1: 16313 // [...] The first expression is contextually converted to bool (Clause 4). 16314 // It is evaluated and if it is true, the result of the conditional 16315 // expression is the value of the second expression, otherwise that of the 16316 // third expression. Only one of the second and third expressions is 16317 // evaluated. [...] 16318 bool EvalResult = false; 16319 bool EvalOK = Eval.evaluate(CO->getCond(), EvalResult); 16320 bool ShouldVisitTrueExpr = !EvalOK || (EvalOK && EvalResult); 16321 bool ShouldVisitFalseExpr = !EvalOK || (EvalOK && !EvalResult); 16322 if (ShouldVisitTrueExpr) { 16323 Region = TrueRegion; 16324 Visit(CO->getTrueExpr()); 16325 } 16326 if (ShouldVisitFalseExpr) { 16327 Region = FalseRegion; 16328 Visit(CO->getFalseExpr()); 16329 } 16330 16331 Region = OldRegion; 16332 Tree.merge(ConditionRegion); 16333 Tree.merge(TrueRegion); 16334 Tree.merge(FalseRegion); 16335 } 16336 16337 void VisitCallExpr(const CallExpr *CE) { 16338 // FIXME: CXXNewExpr and CXXDeleteExpr implicitly call functions. 16339 16340 if (CE->isUnevaluatedBuiltinCall(Context)) 16341 return; 16342 16343 // C++11 [intro.execution]p15: 16344 // When calling a function [...], every value computation and side effect 16345 // associated with any argument expression, or with the postfix expression 16346 // designating the called function, is sequenced before execution of every 16347 // expression or statement in the body of the function [and thus before 16348 // the value computation of its result]. 16349 SequencedSubexpression Sequenced(*this); 16350 SemaRef.runWithSufficientStackSpace(CE->getExprLoc(), [&] { 16351 // C++17 [expr.call]p5 16352 // The postfix-expression is sequenced before each expression in the 16353 // expression-list and any default argument. [...] 16354 SequenceTree::Seq CalleeRegion; 16355 SequenceTree::Seq OtherRegion; 16356 if (SemaRef.getLangOpts().CPlusPlus17) { 16357 CalleeRegion = Tree.allocate(Region); 16358 OtherRegion = Tree.allocate(Region); 16359 } else { 16360 CalleeRegion = Region; 16361 OtherRegion = Region; 16362 } 16363 SequenceTree::Seq OldRegion = Region; 16364 16365 // Visit the callee expression first. 16366 Region = CalleeRegion; 16367 if (SemaRef.getLangOpts().CPlusPlus17) { 16368 SequencedSubexpression Sequenced(*this); 16369 Visit(CE->getCallee()); 16370 } else { 16371 Visit(CE->getCallee()); 16372 } 16373 16374 // Then visit the argument expressions. 16375 Region = OtherRegion; 16376 for (const Expr *Argument : CE->arguments()) 16377 Visit(Argument); 16378 16379 Region = OldRegion; 16380 if (SemaRef.getLangOpts().CPlusPlus17) { 16381 Tree.merge(CalleeRegion); 16382 Tree.merge(OtherRegion); 16383 } 16384 }); 16385 } 16386 16387 void VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *CXXOCE) { 16388 // C++17 [over.match.oper]p2: 16389 // [...] the operator notation is first transformed to the equivalent 16390 // function-call notation as summarized in Table 12 (where @ denotes one 16391 // of the operators covered in the specified subclause). However, the 16392 // operands are sequenced in the order prescribed for the built-in 16393 // operator (Clause 8). 16394 // 16395 // From the above only overloaded binary operators and overloaded call 16396 // operators have sequencing rules in C++17 that we need to handle 16397 // separately. 16398 if (!SemaRef.getLangOpts().CPlusPlus17 || 16399 (CXXOCE->getNumArgs() != 2 && CXXOCE->getOperator() != OO_Call)) 16400 return VisitCallExpr(CXXOCE); 16401 16402 enum { 16403 NoSequencing, 16404 LHSBeforeRHS, 16405 RHSBeforeLHS, 16406 LHSBeforeRest 16407 } SequencingKind; 16408 switch (CXXOCE->getOperator()) { 16409 case OO_Equal: 16410 case OO_PlusEqual: 16411 case OO_MinusEqual: 16412 case OO_StarEqual: 16413 case OO_SlashEqual: 16414 case OO_PercentEqual: 16415 case OO_CaretEqual: 16416 case OO_AmpEqual: 16417 case OO_PipeEqual: 16418 case OO_LessLessEqual: 16419 case OO_GreaterGreaterEqual: 16420 SequencingKind = RHSBeforeLHS; 16421 break; 16422 16423 case OO_LessLess: 16424 case OO_GreaterGreater: 16425 case OO_AmpAmp: 16426 case OO_PipePipe: 16427 case OO_Comma: 16428 case OO_ArrowStar: 16429 case OO_Subscript: 16430 SequencingKind = LHSBeforeRHS; 16431 break; 16432 16433 case OO_Call: 16434 SequencingKind = LHSBeforeRest; 16435 break; 16436 16437 default: 16438 SequencingKind = NoSequencing; 16439 break; 16440 } 16441 16442 if (SequencingKind == NoSequencing) 16443 return VisitCallExpr(CXXOCE); 16444 16445 // This is a call, so all subexpressions are sequenced before the result. 16446 SequencedSubexpression Sequenced(*this); 16447 16448 SemaRef.runWithSufficientStackSpace(CXXOCE->getExprLoc(), [&] { 16449 assert(SemaRef.getLangOpts().CPlusPlus17 && 16450 "Should only get there with C++17 and above!"); 16451 assert((CXXOCE->getNumArgs() == 2 || CXXOCE->getOperator() == OO_Call) && 16452 "Should only get there with an overloaded binary operator" 16453 " or an overloaded call operator!"); 16454 16455 if (SequencingKind == LHSBeforeRest) { 16456 assert(CXXOCE->getOperator() == OO_Call && 16457 "We should only have an overloaded call operator here!"); 16458 16459 // This is very similar to VisitCallExpr, except that we only have the 16460 // C++17 case. The postfix-expression is the first argument of the 16461 // CXXOperatorCallExpr. The expressions in the expression-list, if any, 16462 // are in the following arguments. 16463 // 16464 // Note that we intentionally do not visit the callee expression since 16465 // it is just a decayed reference to a function. 16466 SequenceTree::Seq PostfixExprRegion = Tree.allocate(Region); 16467 SequenceTree::Seq ArgsRegion = Tree.allocate(Region); 16468 SequenceTree::Seq OldRegion = Region; 16469 16470 assert(CXXOCE->getNumArgs() >= 1 && 16471 "An overloaded call operator must have at least one argument" 16472 " for the postfix-expression!"); 16473 const Expr *PostfixExpr = CXXOCE->getArgs()[0]; 16474 llvm::ArrayRef<const Expr *> Args(CXXOCE->getArgs() + 1, 16475 CXXOCE->getNumArgs() - 1); 16476 16477 // Visit the postfix-expression first. 16478 { 16479 Region = PostfixExprRegion; 16480 SequencedSubexpression Sequenced(*this); 16481 Visit(PostfixExpr); 16482 } 16483 16484 // Then visit the argument expressions. 16485 Region = ArgsRegion; 16486 for (const Expr *Arg : Args) 16487 Visit(Arg); 16488 16489 Region = OldRegion; 16490 Tree.merge(PostfixExprRegion); 16491 Tree.merge(ArgsRegion); 16492 } else { 16493 assert(CXXOCE->getNumArgs() == 2 && 16494 "Should only have two arguments here!"); 16495 assert((SequencingKind == LHSBeforeRHS || 16496 SequencingKind == RHSBeforeLHS) && 16497 "Unexpected sequencing kind!"); 16498 16499 // We do not visit the callee expression since it is just a decayed 16500 // reference to a function. 16501 const Expr *E1 = CXXOCE->getArg(0); 16502 const Expr *E2 = CXXOCE->getArg(1); 16503 if (SequencingKind == RHSBeforeLHS) 16504 std::swap(E1, E2); 16505 16506 return VisitSequencedExpressions(E1, E2); 16507 } 16508 }); 16509 } 16510 16511 void VisitCXXConstructExpr(const CXXConstructExpr *CCE) { 16512 // This is a call, so all subexpressions are sequenced before the result. 16513 SequencedSubexpression Sequenced(*this); 16514 16515 if (!CCE->isListInitialization()) 16516 return VisitExpr(CCE); 16517 16518 // In C++11, list initializations are sequenced. 16519 SmallVector<SequenceTree::Seq, 32> Elts; 16520 SequenceTree::Seq Parent = Region; 16521 for (CXXConstructExpr::const_arg_iterator I = CCE->arg_begin(), 16522 E = CCE->arg_end(); 16523 I != E; ++I) { 16524 Region = Tree.allocate(Parent); 16525 Elts.push_back(Region); 16526 Visit(*I); 16527 } 16528 16529 // Forget that the initializers are sequenced. 16530 Region = Parent; 16531 for (unsigned I = 0; I < Elts.size(); ++I) 16532 Tree.merge(Elts[I]); 16533 } 16534 16535 void VisitInitListExpr(const InitListExpr *ILE) { 16536 if (!SemaRef.getLangOpts().CPlusPlus11) 16537 return VisitExpr(ILE); 16538 16539 // In C++11, list initializations are sequenced. 16540 SmallVector<SequenceTree::Seq, 32> Elts; 16541 SequenceTree::Seq Parent = Region; 16542 for (unsigned I = 0; I < ILE->getNumInits(); ++I) { 16543 const Expr *E = ILE->getInit(I); 16544 if (!E) 16545 continue; 16546 Region = Tree.allocate(Parent); 16547 Elts.push_back(Region); 16548 Visit(E); 16549 } 16550 16551 // Forget that the initializers are sequenced. 16552 Region = Parent; 16553 for (unsigned I = 0; I < Elts.size(); ++I) 16554 Tree.merge(Elts[I]); 16555 } 16556 }; 16557 16558 } // namespace 16559 16560 void Sema::CheckUnsequencedOperations(const Expr *E) { 16561 SmallVector<const Expr *, 8> WorkList; 16562 WorkList.push_back(E); 16563 while (!WorkList.empty()) { 16564 const Expr *Item = WorkList.pop_back_val(); 16565 SequenceChecker(*this, Item, WorkList); 16566 } 16567 } 16568 16569 void Sema::CheckCompletedExpr(Expr *E, SourceLocation CheckLoc, 16570 bool IsConstexpr) { 16571 llvm::SaveAndRestore ConstantContext(isConstantEvaluatedOverride, 16572 IsConstexpr || isa<ConstantExpr>(E)); 16573 CheckImplicitConversions(E, CheckLoc); 16574 if (!E->isInstantiationDependent()) 16575 CheckUnsequencedOperations(E); 16576 if (!IsConstexpr && !E->isValueDependent()) 16577 CheckForIntOverflow(E); 16578 DiagnoseMisalignedMembers(); 16579 } 16580 16581 void Sema::CheckBitFieldInitialization(SourceLocation InitLoc, 16582 FieldDecl *BitField, 16583 Expr *Init) { 16584 (void) AnalyzeBitFieldAssignment(*this, BitField, Init, InitLoc); 16585 } 16586 16587 static void diagnoseArrayStarInParamType(Sema &S, QualType PType, 16588 SourceLocation Loc) { 16589 if (!PType->isVariablyModifiedType()) 16590 return; 16591 if (const auto *PointerTy = dyn_cast<PointerType>(PType)) { 16592 diagnoseArrayStarInParamType(S, PointerTy->getPointeeType(), Loc); 16593 return; 16594 } 16595 if (const auto *ReferenceTy = dyn_cast<ReferenceType>(PType)) { 16596 diagnoseArrayStarInParamType(S, ReferenceTy->getPointeeType(), Loc); 16597 return; 16598 } 16599 if (const auto *ParenTy = dyn_cast<ParenType>(PType)) { 16600 diagnoseArrayStarInParamType(S, ParenTy->getInnerType(), Loc); 16601 return; 16602 } 16603 16604 const ArrayType *AT = S.Context.getAsArrayType(PType); 16605 if (!AT) 16606 return; 16607 16608 if (AT->getSizeModifier() != ArrayType::Star) { 16609 diagnoseArrayStarInParamType(S, AT->getElementType(), Loc); 16610 return; 16611 } 16612 16613 S.Diag(Loc, diag::err_array_star_in_function_definition); 16614 } 16615 16616 /// CheckParmsForFunctionDef - Check that the parameters of the given 16617 /// function are appropriate for the definition of a function. This 16618 /// takes care of any checks that cannot be performed on the 16619 /// declaration itself, e.g., that the types of each of the function 16620 /// parameters are complete. 16621 bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, 16622 bool CheckParameterNames) { 16623 bool HasInvalidParm = false; 16624 for (ParmVarDecl *Param : Parameters) { 16625 assert(Param && "null in a parameter list"); 16626 // C99 6.7.5.3p4: the parameters in a parameter type list in a 16627 // function declarator that is part of a function definition of 16628 // that function shall not have incomplete type. 16629 // 16630 // C++23 [dcl.fct.def.general]/p2 16631 // The type of a parameter [...] for a function definition 16632 // shall not be a (possibly cv-qualified) class type that is incomplete 16633 // or abstract within the function body unless the function is deleted. 16634 if (!Param->isInvalidDecl() && 16635 (RequireCompleteType(Param->getLocation(), Param->getType(), 16636 diag::err_typecheck_decl_incomplete_type) || 16637 RequireNonAbstractType(Param->getBeginLoc(), Param->getOriginalType(), 16638 diag::err_abstract_type_in_decl, 16639 AbstractParamType))) { 16640 Param->setInvalidDecl(); 16641 HasInvalidParm = true; 16642 } 16643 16644 // C99 6.9.1p5: If the declarator includes a parameter type list, the 16645 // declaration of each parameter shall include an identifier. 16646 if (CheckParameterNames && Param->getIdentifier() == nullptr && 16647 !Param->isImplicit() && !getLangOpts().CPlusPlus) { 16648 // Diagnose this as an extension in C17 and earlier. 16649 if (!getLangOpts().C2x) 16650 Diag(Param->getLocation(), diag::ext_parameter_name_omitted_c2x); 16651 } 16652 16653 // C99 6.7.5.3p12: 16654 // If the function declarator is not part of a definition of that 16655 // function, parameters may have incomplete type and may use the [*] 16656 // notation in their sequences of declarator specifiers to specify 16657 // variable length array types. 16658 QualType PType = Param->getOriginalType(); 16659 // FIXME: This diagnostic should point the '[*]' if source-location 16660 // information is added for it. 16661 diagnoseArrayStarInParamType(*this, PType, Param->getLocation()); 16662 16663 // If the parameter is a c++ class type and it has to be destructed in the 16664 // callee function, declare the destructor so that it can be called by the 16665 // callee function. Do not perform any direct access check on the dtor here. 16666 if (!Param->isInvalidDecl()) { 16667 if (CXXRecordDecl *ClassDecl = Param->getType()->getAsCXXRecordDecl()) { 16668 if (!ClassDecl->isInvalidDecl() && 16669 !ClassDecl->hasIrrelevantDestructor() && 16670 !ClassDecl->isDependentContext() && 16671 ClassDecl->isParamDestroyedInCallee()) { 16672 CXXDestructorDecl *Destructor = LookupDestructor(ClassDecl); 16673 MarkFunctionReferenced(Param->getLocation(), Destructor); 16674 DiagnoseUseOfDecl(Destructor, Param->getLocation()); 16675 } 16676 } 16677 } 16678 16679 // Parameters with the pass_object_size attribute only need to be marked 16680 // constant at function definitions. Because we lack information about 16681 // whether we're on a declaration or definition when we're instantiating the 16682 // attribute, we need to check for constness here. 16683 if (const auto *Attr = Param->getAttr<PassObjectSizeAttr>()) 16684 if (!Param->getType().isConstQualified()) 16685 Diag(Param->getLocation(), diag::err_attribute_pointers_only) 16686 << Attr->getSpelling() << 1; 16687 16688 // Check for parameter names shadowing fields from the class. 16689 if (LangOpts.CPlusPlus && !Param->isInvalidDecl()) { 16690 // The owning context for the parameter should be the function, but we 16691 // want to see if this function's declaration context is a record. 16692 DeclContext *DC = Param->getDeclContext(); 16693 if (DC && DC->isFunctionOrMethod()) { 16694 if (auto *RD = dyn_cast<CXXRecordDecl>(DC->getParent())) 16695 CheckShadowInheritedFields(Param->getLocation(), Param->getDeclName(), 16696 RD, /*DeclIsField*/ false); 16697 } 16698 } 16699 16700 if (!Param->isInvalidDecl() && 16701 Param->getOriginalType()->isWebAssemblyTableType()) { 16702 Param->setInvalidDecl(); 16703 HasInvalidParm = true; 16704 Diag(Param->getLocation(), diag::err_wasm_table_as_function_parameter); 16705 } 16706 } 16707 16708 return HasInvalidParm; 16709 } 16710 16711 std::optional<std::pair< 16712 CharUnits, CharUnits>> static getBaseAlignmentAndOffsetFromPtr(const Expr 16713 *E, 16714 ASTContext 16715 &Ctx); 16716 16717 /// Compute the alignment and offset of the base class object given the 16718 /// derived-to-base cast expression and the alignment and offset of the derived 16719 /// class object. 16720 static std::pair<CharUnits, CharUnits> 16721 getDerivedToBaseAlignmentAndOffset(const CastExpr *CE, QualType DerivedType, 16722 CharUnits BaseAlignment, CharUnits Offset, 16723 ASTContext &Ctx) { 16724 for (auto PathI = CE->path_begin(), PathE = CE->path_end(); PathI != PathE; 16725 ++PathI) { 16726 const CXXBaseSpecifier *Base = *PathI; 16727 const CXXRecordDecl *BaseDecl = Base->getType()->getAsCXXRecordDecl(); 16728 if (Base->isVirtual()) { 16729 // The complete object may have a lower alignment than the non-virtual 16730 // alignment of the base, in which case the base may be misaligned. Choose 16731 // the smaller of the non-virtual alignment and BaseAlignment, which is a 16732 // conservative lower bound of the complete object alignment. 16733 CharUnits NonVirtualAlignment = 16734 Ctx.getASTRecordLayout(BaseDecl).getNonVirtualAlignment(); 16735 BaseAlignment = std::min(BaseAlignment, NonVirtualAlignment); 16736 Offset = CharUnits::Zero(); 16737 } else { 16738 const ASTRecordLayout &RL = 16739 Ctx.getASTRecordLayout(DerivedType->getAsCXXRecordDecl()); 16740 Offset += RL.getBaseClassOffset(BaseDecl); 16741 } 16742 DerivedType = Base->getType(); 16743 } 16744 16745 return std::make_pair(BaseAlignment, Offset); 16746 } 16747 16748 /// Compute the alignment and offset of a binary additive operator. 16749 static std::optional<std::pair<CharUnits, CharUnits>> 16750 getAlignmentAndOffsetFromBinAddOrSub(const Expr *PtrE, const Expr *IntE, 16751 bool IsSub, ASTContext &Ctx) { 16752 QualType PointeeType = PtrE->getType()->getPointeeType(); 16753 16754 if (!PointeeType->isConstantSizeType()) 16755 return std::nullopt; 16756 16757 auto P = getBaseAlignmentAndOffsetFromPtr(PtrE, Ctx); 16758 16759 if (!P) 16760 return std::nullopt; 16761 16762 CharUnits EltSize = Ctx.getTypeSizeInChars(PointeeType); 16763 if (std::optional<llvm::APSInt> IdxRes = IntE->getIntegerConstantExpr(Ctx)) { 16764 CharUnits Offset = EltSize * IdxRes->getExtValue(); 16765 if (IsSub) 16766 Offset = -Offset; 16767 return std::make_pair(P->first, P->second + Offset); 16768 } 16769 16770 // If the integer expression isn't a constant expression, compute the lower 16771 // bound of the alignment using the alignment and offset of the pointer 16772 // expression and the element size. 16773 return std::make_pair( 16774 P->first.alignmentAtOffset(P->second).alignmentAtOffset(EltSize), 16775 CharUnits::Zero()); 16776 } 16777 16778 /// This helper function takes an lvalue expression and returns the alignment of 16779 /// a VarDecl and a constant offset from the VarDecl. 16780 std::optional<std::pair< 16781 CharUnits, 16782 CharUnits>> static getBaseAlignmentAndOffsetFromLValue(const Expr *E, 16783 ASTContext &Ctx) { 16784 E = E->IgnoreParens(); 16785 switch (E->getStmtClass()) { 16786 default: 16787 break; 16788 case Stmt::CStyleCastExprClass: 16789 case Stmt::CXXStaticCastExprClass: 16790 case Stmt::ImplicitCastExprClass: { 16791 auto *CE = cast<CastExpr>(E); 16792 const Expr *From = CE->getSubExpr(); 16793 switch (CE->getCastKind()) { 16794 default: 16795 break; 16796 case CK_NoOp: 16797 return getBaseAlignmentAndOffsetFromLValue(From, Ctx); 16798 case CK_UncheckedDerivedToBase: 16799 case CK_DerivedToBase: { 16800 auto P = getBaseAlignmentAndOffsetFromLValue(From, Ctx); 16801 if (!P) 16802 break; 16803 return getDerivedToBaseAlignmentAndOffset(CE, From->getType(), P->first, 16804 P->second, Ctx); 16805 } 16806 } 16807 break; 16808 } 16809 case Stmt::ArraySubscriptExprClass: { 16810 auto *ASE = cast<ArraySubscriptExpr>(E); 16811 return getAlignmentAndOffsetFromBinAddOrSub(ASE->getBase(), ASE->getIdx(), 16812 false, Ctx); 16813 } 16814 case Stmt::DeclRefExprClass: { 16815 if (auto *VD = dyn_cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl())) { 16816 // FIXME: If VD is captured by copy or is an escaping __block variable, 16817 // use the alignment of VD's type. 16818 if (!VD->getType()->isReferenceType()) { 16819 // Dependent alignment cannot be resolved -> bail out. 16820 if (VD->hasDependentAlignment()) 16821 break; 16822 return std::make_pair(Ctx.getDeclAlign(VD), CharUnits::Zero()); 16823 } 16824 if (VD->hasInit()) 16825 return getBaseAlignmentAndOffsetFromLValue(VD->getInit(), Ctx); 16826 } 16827 break; 16828 } 16829 case Stmt::MemberExprClass: { 16830 auto *ME = cast<MemberExpr>(E); 16831 auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl()); 16832 if (!FD || FD->getType()->isReferenceType() || 16833 FD->getParent()->isInvalidDecl()) 16834 break; 16835 std::optional<std::pair<CharUnits, CharUnits>> P; 16836 if (ME->isArrow()) 16837 P = getBaseAlignmentAndOffsetFromPtr(ME->getBase(), Ctx); 16838 else 16839 P = getBaseAlignmentAndOffsetFromLValue(ME->getBase(), Ctx); 16840 if (!P) 16841 break; 16842 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(FD->getParent()); 16843 uint64_t Offset = Layout.getFieldOffset(FD->getFieldIndex()); 16844 return std::make_pair(P->first, 16845 P->second + CharUnits::fromQuantity(Offset)); 16846 } 16847 case Stmt::UnaryOperatorClass: { 16848 auto *UO = cast<UnaryOperator>(E); 16849 switch (UO->getOpcode()) { 16850 default: 16851 break; 16852 case UO_Deref: 16853 return getBaseAlignmentAndOffsetFromPtr(UO->getSubExpr(), Ctx); 16854 } 16855 break; 16856 } 16857 case Stmt::BinaryOperatorClass: { 16858 auto *BO = cast<BinaryOperator>(E); 16859 auto Opcode = BO->getOpcode(); 16860 switch (Opcode) { 16861 default: 16862 break; 16863 case BO_Comma: 16864 return getBaseAlignmentAndOffsetFromLValue(BO->getRHS(), Ctx); 16865 } 16866 break; 16867 } 16868 } 16869 return std::nullopt; 16870 } 16871 16872 /// This helper function takes a pointer expression and returns the alignment of 16873 /// a VarDecl and a constant offset from the VarDecl. 16874 std::optional<std::pair< 16875 CharUnits, CharUnits>> static getBaseAlignmentAndOffsetFromPtr(const Expr 16876 *E, 16877 ASTContext 16878 &Ctx) { 16879 E = E->IgnoreParens(); 16880 switch (E->getStmtClass()) { 16881 default: 16882 break; 16883 case Stmt::CStyleCastExprClass: 16884 case Stmt::CXXStaticCastExprClass: 16885 case Stmt::ImplicitCastExprClass: { 16886 auto *CE = cast<CastExpr>(E); 16887 const Expr *From = CE->getSubExpr(); 16888 switch (CE->getCastKind()) { 16889 default: 16890 break; 16891 case CK_NoOp: 16892 return getBaseAlignmentAndOffsetFromPtr(From, Ctx); 16893 case CK_ArrayToPointerDecay: 16894 return getBaseAlignmentAndOffsetFromLValue(From, Ctx); 16895 case CK_UncheckedDerivedToBase: 16896 case CK_DerivedToBase: { 16897 auto P = getBaseAlignmentAndOffsetFromPtr(From, Ctx); 16898 if (!P) 16899 break; 16900 return getDerivedToBaseAlignmentAndOffset( 16901 CE, From->getType()->getPointeeType(), P->first, P->second, Ctx); 16902 } 16903 } 16904 break; 16905 } 16906 case Stmt::CXXThisExprClass: { 16907 auto *RD = E->getType()->getPointeeType()->getAsCXXRecordDecl(); 16908 CharUnits Alignment = Ctx.getASTRecordLayout(RD).getNonVirtualAlignment(); 16909 return std::make_pair(Alignment, CharUnits::Zero()); 16910 } 16911 case Stmt::UnaryOperatorClass: { 16912 auto *UO = cast<UnaryOperator>(E); 16913 if (UO->getOpcode() == UO_AddrOf) 16914 return getBaseAlignmentAndOffsetFromLValue(UO->getSubExpr(), Ctx); 16915 break; 16916 } 16917 case Stmt::BinaryOperatorClass: { 16918 auto *BO = cast<BinaryOperator>(E); 16919 auto Opcode = BO->getOpcode(); 16920 switch (Opcode) { 16921 default: 16922 break; 16923 case BO_Add: 16924 case BO_Sub: { 16925 const Expr *LHS = BO->getLHS(), *RHS = BO->getRHS(); 16926 if (Opcode == BO_Add && !RHS->getType()->isIntegralOrEnumerationType()) 16927 std::swap(LHS, RHS); 16928 return getAlignmentAndOffsetFromBinAddOrSub(LHS, RHS, Opcode == BO_Sub, 16929 Ctx); 16930 } 16931 case BO_Comma: 16932 return getBaseAlignmentAndOffsetFromPtr(BO->getRHS(), Ctx); 16933 } 16934 break; 16935 } 16936 } 16937 return std::nullopt; 16938 } 16939 16940 static CharUnits getPresumedAlignmentOfPointer(const Expr *E, Sema &S) { 16941 // See if we can compute the alignment of a VarDecl and an offset from it. 16942 std::optional<std::pair<CharUnits, CharUnits>> P = 16943 getBaseAlignmentAndOffsetFromPtr(E, S.Context); 16944 16945 if (P) 16946 return P->first.alignmentAtOffset(P->second); 16947 16948 // If that failed, return the type's alignment. 16949 return S.Context.getTypeAlignInChars(E->getType()->getPointeeType()); 16950 } 16951 16952 /// CheckCastAlign - Implements -Wcast-align, which warns when a 16953 /// pointer cast increases the alignment requirements. 16954 void Sema::CheckCastAlign(Expr *Op, QualType T, SourceRange TRange) { 16955 // This is actually a lot of work to potentially be doing on every 16956 // cast; don't do it if we're ignoring -Wcast_align (as is the default). 16957 if (getDiagnostics().isIgnored(diag::warn_cast_align, TRange.getBegin())) 16958 return; 16959 16960 // Ignore dependent types. 16961 if (T->isDependentType() || Op->getType()->isDependentType()) 16962 return; 16963 16964 // Require that the destination be a pointer type. 16965 const PointerType *DestPtr = T->getAs<PointerType>(); 16966 if (!DestPtr) return; 16967 16968 // If the destination has alignment 1, we're done. 16969 QualType DestPointee = DestPtr->getPointeeType(); 16970 if (DestPointee->isIncompleteType()) return; 16971 CharUnits DestAlign = Context.getTypeAlignInChars(DestPointee); 16972 if (DestAlign.isOne()) return; 16973 16974 // Require that the source be a pointer type. 16975 const PointerType *SrcPtr = Op->getType()->getAs<PointerType>(); 16976 if (!SrcPtr) return; 16977 QualType SrcPointee = SrcPtr->getPointeeType(); 16978 16979 // Explicitly allow casts from cv void*. We already implicitly 16980 // allowed casts to cv void*, since they have alignment 1. 16981 // Also allow casts involving incomplete types, which implicitly 16982 // includes 'void'. 16983 if (SrcPointee->isIncompleteType()) return; 16984 16985 CharUnits SrcAlign = getPresumedAlignmentOfPointer(Op, *this); 16986 16987 if (SrcAlign >= DestAlign) return; 16988 16989 Diag(TRange.getBegin(), diag::warn_cast_align) 16990 << Op->getType() << T 16991 << static_cast<unsigned>(SrcAlign.getQuantity()) 16992 << static_cast<unsigned>(DestAlign.getQuantity()) 16993 << TRange << Op->getSourceRange(); 16994 } 16995 16996 void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, 16997 const ArraySubscriptExpr *ASE, 16998 bool AllowOnePastEnd, bool IndexNegated) { 16999 // Already diagnosed by the constant evaluator. 17000 if (isConstantEvaluated()) 17001 return; 17002 17003 IndexExpr = IndexExpr->IgnoreParenImpCasts(); 17004 if (IndexExpr->isValueDependent()) 17005 return; 17006 17007 const Type *EffectiveType = 17008 BaseExpr->getType()->getPointeeOrArrayElementType(); 17009 BaseExpr = BaseExpr->IgnoreParenCasts(); 17010 const ConstantArrayType *ArrayTy = 17011 Context.getAsConstantArrayType(BaseExpr->getType()); 17012 17013 LangOptions::StrictFlexArraysLevelKind 17014 StrictFlexArraysLevel = getLangOpts().getStrictFlexArraysLevel(); 17015 17016 const Type *BaseType = 17017 ArrayTy == nullptr ? nullptr : ArrayTy->getElementType().getTypePtr(); 17018 bool IsUnboundedArray = 17019 BaseType == nullptr || BaseExpr->isFlexibleArrayMemberLike( 17020 Context, StrictFlexArraysLevel, 17021 /*IgnoreTemplateOrMacroSubstitution=*/true); 17022 if (EffectiveType->isDependentType() || 17023 (!IsUnboundedArray && BaseType->isDependentType())) 17024 return; 17025 17026 Expr::EvalResult Result; 17027 if (!IndexExpr->EvaluateAsInt(Result, Context, Expr::SE_AllowSideEffects)) 17028 return; 17029 17030 llvm::APSInt index = Result.Val.getInt(); 17031 if (IndexNegated) { 17032 index.setIsUnsigned(false); 17033 index = -index; 17034 } 17035 17036 if (IsUnboundedArray) { 17037 if (EffectiveType->isFunctionType()) 17038 return; 17039 if (index.isUnsigned() || !index.isNegative()) { 17040 const auto &ASTC = getASTContext(); 17041 unsigned AddrBits = ASTC.getTargetInfo().getPointerWidth( 17042 EffectiveType->getCanonicalTypeInternal().getAddressSpace()); 17043 if (index.getBitWidth() < AddrBits) 17044 index = index.zext(AddrBits); 17045 std::optional<CharUnits> ElemCharUnits = 17046 ASTC.getTypeSizeInCharsIfKnown(EffectiveType); 17047 // PR50741 - If EffectiveType has unknown size (e.g., if it's a void 17048 // pointer) bounds-checking isn't meaningful. 17049 if (!ElemCharUnits) 17050 return; 17051 llvm::APInt ElemBytes(index.getBitWidth(), ElemCharUnits->getQuantity()); 17052 // If index has more active bits than address space, we already know 17053 // we have a bounds violation to warn about. Otherwise, compute 17054 // address of (index + 1)th element, and warn about bounds violation 17055 // only if that address exceeds address space. 17056 if (index.getActiveBits() <= AddrBits) { 17057 bool Overflow; 17058 llvm::APInt Product(index); 17059 Product += 1; 17060 Product = Product.umul_ov(ElemBytes, Overflow); 17061 if (!Overflow && Product.getActiveBits() <= AddrBits) 17062 return; 17063 } 17064 17065 // Need to compute max possible elements in address space, since that 17066 // is included in diag message. 17067 llvm::APInt MaxElems = llvm::APInt::getMaxValue(AddrBits); 17068 MaxElems = MaxElems.zext(std::max(AddrBits + 1, ElemBytes.getBitWidth())); 17069 MaxElems += 1; 17070 ElemBytes = ElemBytes.zextOrTrunc(MaxElems.getBitWidth()); 17071 MaxElems = MaxElems.udiv(ElemBytes); 17072 17073 unsigned DiagID = 17074 ASE ? diag::warn_array_index_exceeds_max_addressable_bounds 17075 : diag::warn_ptr_arith_exceeds_max_addressable_bounds; 17076 17077 // Diag message shows element size in bits and in "bytes" (platform- 17078 // dependent CharUnits) 17079 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 17080 PDiag(DiagID) 17081 << toString(index, 10, true) << AddrBits 17082 << (unsigned)ASTC.toBits(*ElemCharUnits) 17083 << toString(ElemBytes, 10, false) 17084 << toString(MaxElems, 10, false) 17085 << (unsigned)MaxElems.getLimitedValue(~0U) 17086 << IndexExpr->getSourceRange()); 17087 17088 const NamedDecl *ND = nullptr; 17089 // Try harder to find a NamedDecl to point at in the note. 17090 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(BaseExpr)) 17091 BaseExpr = ASE->getBase()->IgnoreParenCasts(); 17092 if (const auto *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 17093 ND = DRE->getDecl(); 17094 if (const auto *ME = dyn_cast<MemberExpr>(BaseExpr)) 17095 ND = ME->getMemberDecl(); 17096 17097 if (ND) 17098 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr, 17099 PDiag(diag::note_array_declared_here) << ND); 17100 } 17101 return; 17102 } 17103 17104 if (index.isUnsigned() || !index.isNegative()) { 17105 // It is possible that the type of the base expression after 17106 // IgnoreParenCasts is incomplete, even though the type of the base 17107 // expression before IgnoreParenCasts is complete (see PR39746 for an 17108 // example). In this case we have no information about whether the array 17109 // access exceeds the array bounds. However we can still diagnose an array 17110 // access which precedes the array bounds. 17111 if (BaseType->isIncompleteType()) 17112 return; 17113 17114 llvm::APInt size = ArrayTy->getSize(); 17115 17116 if (BaseType != EffectiveType) { 17117 // Make sure we're comparing apples to apples when comparing index to 17118 // size. 17119 uint64_t ptrarith_typesize = Context.getTypeSize(EffectiveType); 17120 uint64_t array_typesize = Context.getTypeSize(BaseType); 17121 17122 // Handle ptrarith_typesize being zero, such as when casting to void*. 17123 // Use the size in bits (what "getTypeSize()" returns) rather than bytes. 17124 if (!ptrarith_typesize) 17125 ptrarith_typesize = Context.getCharWidth(); 17126 17127 if (ptrarith_typesize != array_typesize) { 17128 // There's a cast to a different size type involved. 17129 uint64_t ratio = array_typesize / ptrarith_typesize; 17130 17131 // TODO: Be smarter about handling cases where array_typesize is not a 17132 // multiple of ptrarith_typesize. 17133 if (ptrarith_typesize * ratio == array_typesize) 17134 size *= llvm::APInt(size.getBitWidth(), ratio); 17135 } 17136 } 17137 17138 if (size.getBitWidth() > index.getBitWidth()) 17139 index = index.zext(size.getBitWidth()); 17140 else if (size.getBitWidth() < index.getBitWidth()) 17141 size = size.zext(index.getBitWidth()); 17142 17143 // For array subscripting the index must be less than size, but for pointer 17144 // arithmetic also allow the index (offset) to be equal to size since 17145 // computing the next address after the end of the array is legal and 17146 // commonly done e.g. in C++ iterators and range-based for loops. 17147 if (AllowOnePastEnd ? index.ule(size) : index.ult(size)) 17148 return; 17149 17150 // Suppress the warning if the subscript expression (as identified by the 17151 // ']' location) and the index expression are both from macro expansions 17152 // within a system header. 17153 if (ASE) { 17154 SourceLocation RBracketLoc = SourceMgr.getSpellingLoc( 17155 ASE->getRBracketLoc()); 17156 if (SourceMgr.isInSystemHeader(RBracketLoc)) { 17157 SourceLocation IndexLoc = 17158 SourceMgr.getSpellingLoc(IndexExpr->getBeginLoc()); 17159 if (SourceMgr.isWrittenInSameFile(RBracketLoc, IndexLoc)) 17160 return; 17161 } 17162 } 17163 17164 unsigned DiagID = ASE ? diag::warn_array_index_exceeds_bounds 17165 : diag::warn_ptr_arith_exceeds_bounds; 17166 unsigned CastMsg = (!ASE || BaseType == EffectiveType) ? 0 : 1; 17167 QualType CastMsgTy = ASE ? ASE->getLHS()->getType() : QualType(); 17168 17169 DiagRuntimeBehavior( 17170 BaseExpr->getBeginLoc(), BaseExpr, 17171 PDiag(DiagID) << toString(index, 10, true) << ArrayTy->desugar() 17172 << CastMsg << CastMsgTy << IndexExpr->getSourceRange()); 17173 } else { 17174 unsigned DiagID = diag::warn_array_index_precedes_bounds; 17175 if (!ASE) { 17176 DiagID = diag::warn_ptr_arith_precedes_bounds; 17177 if (index.isNegative()) index = -index; 17178 } 17179 17180 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 17181 PDiag(DiagID) << toString(index, 10, true) 17182 << IndexExpr->getSourceRange()); 17183 } 17184 17185 const NamedDecl *ND = nullptr; 17186 // Try harder to find a NamedDecl to point at in the note. 17187 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(BaseExpr)) 17188 BaseExpr = ASE->getBase()->IgnoreParenCasts(); 17189 if (const auto *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 17190 ND = DRE->getDecl(); 17191 if (const auto *ME = dyn_cast<MemberExpr>(BaseExpr)) 17192 ND = ME->getMemberDecl(); 17193 17194 if (ND) 17195 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr, 17196 PDiag(diag::note_array_declared_here) << ND); 17197 } 17198 17199 void Sema::CheckArrayAccess(const Expr *expr) { 17200 int AllowOnePastEnd = 0; 17201 while (expr) { 17202 expr = expr->IgnoreParenImpCasts(); 17203 switch (expr->getStmtClass()) { 17204 case Stmt::ArraySubscriptExprClass: { 17205 const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(expr); 17206 CheckArrayAccess(ASE->getBase(), ASE->getIdx(), ASE, 17207 AllowOnePastEnd > 0); 17208 expr = ASE->getBase(); 17209 break; 17210 } 17211 case Stmt::MemberExprClass: { 17212 expr = cast<MemberExpr>(expr)->getBase(); 17213 break; 17214 } 17215 case Stmt::OMPArraySectionExprClass: { 17216 const OMPArraySectionExpr *ASE = cast<OMPArraySectionExpr>(expr); 17217 if (ASE->getLowerBound()) 17218 CheckArrayAccess(ASE->getBase(), ASE->getLowerBound(), 17219 /*ASE=*/nullptr, AllowOnePastEnd > 0); 17220 return; 17221 } 17222 case Stmt::UnaryOperatorClass: { 17223 // Only unwrap the * and & unary operators 17224 const UnaryOperator *UO = cast<UnaryOperator>(expr); 17225 expr = UO->getSubExpr(); 17226 switch (UO->getOpcode()) { 17227 case UO_AddrOf: 17228 AllowOnePastEnd++; 17229 break; 17230 case UO_Deref: 17231 AllowOnePastEnd--; 17232 break; 17233 default: 17234 return; 17235 } 17236 break; 17237 } 17238 case Stmt::ConditionalOperatorClass: { 17239 const ConditionalOperator *cond = cast<ConditionalOperator>(expr); 17240 if (const Expr *lhs = cond->getLHS()) 17241 CheckArrayAccess(lhs); 17242 if (const Expr *rhs = cond->getRHS()) 17243 CheckArrayAccess(rhs); 17244 return; 17245 } 17246 case Stmt::CXXOperatorCallExprClass: { 17247 const auto *OCE = cast<CXXOperatorCallExpr>(expr); 17248 for (const auto *Arg : OCE->arguments()) 17249 CheckArrayAccess(Arg); 17250 return; 17251 } 17252 default: 17253 return; 17254 } 17255 } 17256 } 17257 17258 //===--- CHECK: Objective-C retain cycles ----------------------------------// 17259 17260 namespace { 17261 17262 struct RetainCycleOwner { 17263 VarDecl *Variable = nullptr; 17264 SourceRange Range; 17265 SourceLocation Loc; 17266 bool Indirect = false; 17267 17268 RetainCycleOwner() = default; 17269 17270 void setLocsFrom(Expr *e) { 17271 Loc = e->getExprLoc(); 17272 Range = e->getSourceRange(); 17273 } 17274 }; 17275 17276 } // namespace 17277 17278 /// Consider whether capturing the given variable can possibly lead to 17279 /// a retain cycle. 17280 static bool considerVariable(VarDecl *var, Expr *ref, RetainCycleOwner &owner) { 17281 // In ARC, it's captured strongly iff the variable has __strong 17282 // lifetime. In MRR, it's captured strongly if the variable is 17283 // __block and has an appropriate type. 17284 if (var->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 17285 return false; 17286 17287 owner.Variable = var; 17288 if (ref) 17289 owner.setLocsFrom(ref); 17290 return true; 17291 } 17292 17293 static bool findRetainCycleOwner(Sema &S, Expr *e, RetainCycleOwner &owner) { 17294 while (true) { 17295 e = e->IgnoreParens(); 17296 if (CastExpr *cast = dyn_cast<CastExpr>(e)) { 17297 switch (cast->getCastKind()) { 17298 case CK_BitCast: 17299 case CK_LValueBitCast: 17300 case CK_LValueToRValue: 17301 case CK_ARCReclaimReturnedObject: 17302 e = cast->getSubExpr(); 17303 continue; 17304 17305 default: 17306 return false; 17307 } 17308 } 17309 17310 if (ObjCIvarRefExpr *ref = dyn_cast<ObjCIvarRefExpr>(e)) { 17311 ObjCIvarDecl *ivar = ref->getDecl(); 17312 if (ivar->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 17313 return false; 17314 17315 // Try to find a retain cycle in the base. 17316 if (!findRetainCycleOwner(S, ref->getBase(), owner)) 17317 return false; 17318 17319 if (ref->isFreeIvar()) owner.setLocsFrom(ref); 17320 owner.Indirect = true; 17321 return true; 17322 } 17323 17324 if (DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e)) { 17325 VarDecl *var = dyn_cast<VarDecl>(ref->getDecl()); 17326 if (!var) return false; 17327 return considerVariable(var, ref, owner); 17328 } 17329 17330 if (MemberExpr *member = dyn_cast<MemberExpr>(e)) { 17331 if (member->isArrow()) return false; 17332 17333 // Don't count this as an indirect ownership. 17334 e = member->getBase(); 17335 continue; 17336 } 17337 17338 if (PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) { 17339 // Only pay attention to pseudo-objects on property references. 17340 ObjCPropertyRefExpr *pre 17341 = dyn_cast<ObjCPropertyRefExpr>(pseudo->getSyntacticForm() 17342 ->IgnoreParens()); 17343 if (!pre) return false; 17344 if (pre->isImplicitProperty()) return false; 17345 ObjCPropertyDecl *property = pre->getExplicitProperty(); 17346 if (!property->isRetaining() && 17347 !(property->getPropertyIvarDecl() && 17348 property->getPropertyIvarDecl()->getType() 17349 .getObjCLifetime() == Qualifiers::OCL_Strong)) 17350 return false; 17351 17352 owner.Indirect = true; 17353 if (pre->isSuperReceiver()) { 17354 owner.Variable = S.getCurMethodDecl()->getSelfDecl(); 17355 if (!owner.Variable) 17356 return false; 17357 owner.Loc = pre->getLocation(); 17358 owner.Range = pre->getSourceRange(); 17359 return true; 17360 } 17361 e = const_cast<Expr*>(cast<OpaqueValueExpr>(pre->getBase()) 17362 ->getSourceExpr()); 17363 continue; 17364 } 17365 17366 // Array ivars? 17367 17368 return false; 17369 } 17370 } 17371 17372 namespace { 17373 17374 struct FindCaptureVisitor : EvaluatedExprVisitor<FindCaptureVisitor> { 17375 VarDecl *Variable; 17376 Expr *Capturer = nullptr; 17377 bool VarWillBeReased = false; 17378 17379 FindCaptureVisitor(ASTContext &Context, VarDecl *variable) 17380 : EvaluatedExprVisitor<FindCaptureVisitor>(Context), 17381 Variable(variable) {} 17382 17383 void VisitDeclRefExpr(DeclRefExpr *ref) { 17384 if (ref->getDecl() == Variable && !Capturer) 17385 Capturer = ref; 17386 } 17387 17388 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *ref) { 17389 if (Capturer) return; 17390 Visit(ref->getBase()); 17391 if (Capturer && ref->isFreeIvar()) 17392 Capturer = ref; 17393 } 17394 17395 void VisitBlockExpr(BlockExpr *block) { 17396 // Look inside nested blocks 17397 if (block->getBlockDecl()->capturesVariable(Variable)) 17398 Visit(block->getBlockDecl()->getBody()); 17399 } 17400 17401 void VisitOpaqueValueExpr(OpaqueValueExpr *OVE) { 17402 if (Capturer) return; 17403 if (OVE->getSourceExpr()) 17404 Visit(OVE->getSourceExpr()); 17405 } 17406 17407 void VisitBinaryOperator(BinaryOperator *BinOp) { 17408 if (!Variable || VarWillBeReased || BinOp->getOpcode() != BO_Assign) 17409 return; 17410 Expr *LHS = BinOp->getLHS(); 17411 if (const DeclRefExpr *DRE = dyn_cast_or_null<DeclRefExpr>(LHS)) { 17412 if (DRE->getDecl() != Variable) 17413 return; 17414 if (Expr *RHS = BinOp->getRHS()) { 17415 RHS = RHS->IgnoreParenCasts(); 17416 std::optional<llvm::APSInt> Value; 17417 VarWillBeReased = 17418 (RHS && (Value = RHS->getIntegerConstantExpr(Context)) && 17419 *Value == 0); 17420 } 17421 } 17422 } 17423 }; 17424 17425 } // namespace 17426 17427 /// Check whether the given argument is a block which captures a 17428 /// variable. 17429 static Expr *findCapturingExpr(Sema &S, Expr *e, RetainCycleOwner &owner) { 17430 assert(owner.Variable && owner.Loc.isValid()); 17431 17432 e = e->IgnoreParenCasts(); 17433 17434 // Look through [^{...} copy] and Block_copy(^{...}). 17435 if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(e)) { 17436 Selector Cmd = ME->getSelector(); 17437 if (Cmd.isUnarySelector() && Cmd.getNameForSlot(0) == "copy") { 17438 e = ME->getInstanceReceiver(); 17439 if (!e) 17440 return nullptr; 17441 e = e->IgnoreParenCasts(); 17442 } 17443 } else if (CallExpr *CE = dyn_cast<CallExpr>(e)) { 17444 if (CE->getNumArgs() == 1) { 17445 FunctionDecl *Fn = dyn_cast_or_null<FunctionDecl>(CE->getCalleeDecl()); 17446 if (Fn) { 17447 const IdentifierInfo *FnI = Fn->getIdentifier(); 17448 if (FnI && FnI->isStr("_Block_copy")) { 17449 e = CE->getArg(0)->IgnoreParenCasts(); 17450 } 17451 } 17452 } 17453 } 17454 17455 BlockExpr *block = dyn_cast<BlockExpr>(e); 17456 if (!block || !block->getBlockDecl()->capturesVariable(owner.Variable)) 17457 return nullptr; 17458 17459 FindCaptureVisitor visitor(S.Context, owner.Variable); 17460 visitor.Visit(block->getBlockDecl()->getBody()); 17461 return visitor.VarWillBeReased ? nullptr : visitor.Capturer; 17462 } 17463 17464 static void diagnoseRetainCycle(Sema &S, Expr *capturer, 17465 RetainCycleOwner &owner) { 17466 assert(capturer); 17467 assert(owner.Variable && owner.Loc.isValid()); 17468 17469 S.Diag(capturer->getExprLoc(), diag::warn_arc_retain_cycle) 17470 << owner.Variable << capturer->getSourceRange(); 17471 S.Diag(owner.Loc, diag::note_arc_retain_cycle_owner) 17472 << owner.Indirect << owner.Range; 17473 } 17474 17475 /// Check for a keyword selector that starts with the word 'add' or 17476 /// 'set'. 17477 static bool isSetterLikeSelector(Selector sel) { 17478 if (sel.isUnarySelector()) return false; 17479 17480 StringRef str = sel.getNameForSlot(0); 17481 while (!str.empty() && str.front() == '_') str = str.substr(1); 17482 if (str.startswith("set")) 17483 str = str.substr(3); 17484 else if (str.startswith("add")) { 17485 // Specially allow 'addOperationWithBlock:'. 17486 if (sel.getNumArgs() == 1 && str.startswith("addOperationWithBlock")) 17487 return false; 17488 str = str.substr(3); 17489 } 17490 else 17491 return false; 17492 17493 if (str.empty()) return true; 17494 return !isLowercase(str.front()); 17495 } 17496 17497 static std::optional<int> 17498 GetNSMutableArrayArgumentIndex(Sema &S, ObjCMessageExpr *Message) { 17499 bool IsMutableArray = S.NSAPIObj->isSubclassOfNSClass( 17500 Message->getReceiverInterface(), 17501 NSAPI::ClassId_NSMutableArray); 17502 if (!IsMutableArray) { 17503 return std::nullopt; 17504 } 17505 17506 Selector Sel = Message->getSelector(); 17507 17508 std::optional<NSAPI::NSArrayMethodKind> MKOpt = 17509 S.NSAPIObj->getNSArrayMethodKind(Sel); 17510 if (!MKOpt) { 17511 return std::nullopt; 17512 } 17513 17514 NSAPI::NSArrayMethodKind MK = *MKOpt; 17515 17516 switch (MK) { 17517 case NSAPI::NSMutableArr_addObject: 17518 case NSAPI::NSMutableArr_insertObjectAtIndex: 17519 case NSAPI::NSMutableArr_setObjectAtIndexedSubscript: 17520 return 0; 17521 case NSAPI::NSMutableArr_replaceObjectAtIndex: 17522 return 1; 17523 17524 default: 17525 return std::nullopt; 17526 } 17527 17528 return std::nullopt; 17529 } 17530 17531 static std::optional<int> 17532 GetNSMutableDictionaryArgumentIndex(Sema &S, ObjCMessageExpr *Message) { 17533 bool IsMutableDictionary = S.NSAPIObj->isSubclassOfNSClass( 17534 Message->getReceiverInterface(), 17535 NSAPI::ClassId_NSMutableDictionary); 17536 if (!IsMutableDictionary) { 17537 return std::nullopt; 17538 } 17539 17540 Selector Sel = Message->getSelector(); 17541 17542 std::optional<NSAPI::NSDictionaryMethodKind> MKOpt = 17543 S.NSAPIObj->getNSDictionaryMethodKind(Sel); 17544 if (!MKOpt) { 17545 return std::nullopt; 17546 } 17547 17548 NSAPI::NSDictionaryMethodKind MK = *MKOpt; 17549 17550 switch (MK) { 17551 case NSAPI::NSMutableDict_setObjectForKey: 17552 case NSAPI::NSMutableDict_setValueForKey: 17553 case NSAPI::NSMutableDict_setObjectForKeyedSubscript: 17554 return 0; 17555 17556 default: 17557 return std::nullopt; 17558 } 17559 17560 return std::nullopt; 17561 } 17562 17563 static std::optional<int> GetNSSetArgumentIndex(Sema &S, 17564 ObjCMessageExpr *Message) { 17565 bool IsMutableSet = S.NSAPIObj->isSubclassOfNSClass( 17566 Message->getReceiverInterface(), 17567 NSAPI::ClassId_NSMutableSet); 17568 17569 bool IsMutableOrderedSet = S.NSAPIObj->isSubclassOfNSClass( 17570 Message->getReceiverInterface(), 17571 NSAPI::ClassId_NSMutableOrderedSet); 17572 if (!IsMutableSet && !IsMutableOrderedSet) { 17573 return std::nullopt; 17574 } 17575 17576 Selector Sel = Message->getSelector(); 17577 17578 std::optional<NSAPI::NSSetMethodKind> MKOpt = 17579 S.NSAPIObj->getNSSetMethodKind(Sel); 17580 if (!MKOpt) { 17581 return std::nullopt; 17582 } 17583 17584 NSAPI::NSSetMethodKind MK = *MKOpt; 17585 17586 switch (MK) { 17587 case NSAPI::NSMutableSet_addObject: 17588 case NSAPI::NSOrderedSet_setObjectAtIndex: 17589 case NSAPI::NSOrderedSet_setObjectAtIndexedSubscript: 17590 case NSAPI::NSOrderedSet_insertObjectAtIndex: 17591 return 0; 17592 case NSAPI::NSOrderedSet_replaceObjectAtIndexWithObject: 17593 return 1; 17594 } 17595 17596 return std::nullopt; 17597 } 17598 17599 void Sema::CheckObjCCircularContainer(ObjCMessageExpr *Message) { 17600 if (!Message->isInstanceMessage()) { 17601 return; 17602 } 17603 17604 std::optional<int> ArgOpt; 17605 17606 if (!(ArgOpt = GetNSMutableArrayArgumentIndex(*this, Message)) && 17607 !(ArgOpt = GetNSMutableDictionaryArgumentIndex(*this, Message)) && 17608 !(ArgOpt = GetNSSetArgumentIndex(*this, Message))) { 17609 return; 17610 } 17611 17612 int ArgIndex = *ArgOpt; 17613 17614 Expr *Arg = Message->getArg(ArgIndex)->IgnoreImpCasts(); 17615 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Arg)) { 17616 Arg = OE->getSourceExpr()->IgnoreImpCasts(); 17617 } 17618 17619 if (Message->getReceiverKind() == ObjCMessageExpr::SuperInstance) { 17620 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 17621 if (ArgRE->isObjCSelfExpr()) { 17622 Diag(Message->getSourceRange().getBegin(), 17623 diag::warn_objc_circular_container) 17624 << ArgRE->getDecl() << StringRef("'super'"); 17625 } 17626 } 17627 } else { 17628 Expr *Receiver = Message->getInstanceReceiver()->IgnoreImpCasts(); 17629 17630 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Receiver)) { 17631 Receiver = OE->getSourceExpr()->IgnoreImpCasts(); 17632 } 17633 17634 if (DeclRefExpr *ReceiverRE = dyn_cast<DeclRefExpr>(Receiver)) { 17635 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 17636 if (ReceiverRE->getDecl() == ArgRE->getDecl()) { 17637 ValueDecl *Decl = ReceiverRE->getDecl(); 17638 Diag(Message->getSourceRange().getBegin(), 17639 diag::warn_objc_circular_container) 17640 << Decl << Decl; 17641 if (!ArgRE->isObjCSelfExpr()) { 17642 Diag(Decl->getLocation(), 17643 diag::note_objc_circular_container_declared_here) 17644 << Decl; 17645 } 17646 } 17647 } 17648 } else if (ObjCIvarRefExpr *IvarRE = dyn_cast<ObjCIvarRefExpr>(Receiver)) { 17649 if (ObjCIvarRefExpr *IvarArgRE = dyn_cast<ObjCIvarRefExpr>(Arg)) { 17650 if (IvarRE->getDecl() == IvarArgRE->getDecl()) { 17651 ObjCIvarDecl *Decl = IvarRE->getDecl(); 17652 Diag(Message->getSourceRange().getBegin(), 17653 diag::warn_objc_circular_container) 17654 << Decl << Decl; 17655 Diag(Decl->getLocation(), 17656 diag::note_objc_circular_container_declared_here) 17657 << Decl; 17658 } 17659 } 17660 } 17661 } 17662 } 17663 17664 /// Check a message send to see if it's likely to cause a retain cycle. 17665 void Sema::checkRetainCycles(ObjCMessageExpr *msg) { 17666 // Only check instance methods whose selector looks like a setter. 17667 if (!msg->isInstanceMessage() || !isSetterLikeSelector(msg->getSelector())) 17668 return; 17669 17670 // Try to find a variable that the receiver is strongly owned by. 17671 RetainCycleOwner owner; 17672 if (msg->getReceiverKind() == ObjCMessageExpr::Instance) { 17673 if (!findRetainCycleOwner(*this, msg->getInstanceReceiver(), owner)) 17674 return; 17675 } else { 17676 assert(msg->getReceiverKind() == ObjCMessageExpr::SuperInstance); 17677 owner.Variable = getCurMethodDecl()->getSelfDecl(); 17678 owner.Loc = msg->getSuperLoc(); 17679 owner.Range = msg->getSuperLoc(); 17680 } 17681 17682 // Check whether the receiver is captured by any of the arguments. 17683 const ObjCMethodDecl *MD = msg->getMethodDecl(); 17684 for (unsigned i = 0, e = msg->getNumArgs(); i != e; ++i) { 17685 if (Expr *capturer = findCapturingExpr(*this, msg->getArg(i), owner)) { 17686 // noescape blocks should not be retained by the method. 17687 if (MD && MD->parameters()[i]->hasAttr<NoEscapeAttr>()) 17688 continue; 17689 return diagnoseRetainCycle(*this, capturer, owner); 17690 } 17691 } 17692 } 17693 17694 /// Check a property assign to see if it's likely to cause a retain cycle. 17695 void Sema::checkRetainCycles(Expr *receiver, Expr *argument) { 17696 RetainCycleOwner owner; 17697 if (!findRetainCycleOwner(*this, receiver, owner)) 17698 return; 17699 17700 if (Expr *capturer = findCapturingExpr(*this, argument, owner)) 17701 diagnoseRetainCycle(*this, capturer, owner); 17702 } 17703 17704 void Sema::checkRetainCycles(VarDecl *Var, Expr *Init) { 17705 RetainCycleOwner Owner; 17706 if (!considerVariable(Var, /*DeclRefExpr=*/nullptr, Owner)) 17707 return; 17708 17709 // Because we don't have an expression for the variable, we have to set the 17710 // location explicitly here. 17711 Owner.Loc = Var->getLocation(); 17712 Owner.Range = Var->getSourceRange(); 17713 17714 if (Expr *Capturer = findCapturingExpr(*this, Init, Owner)) 17715 diagnoseRetainCycle(*this, Capturer, Owner); 17716 } 17717 17718 static bool checkUnsafeAssignLiteral(Sema &S, SourceLocation Loc, 17719 Expr *RHS, bool isProperty) { 17720 // Check if RHS is an Objective-C object literal, which also can get 17721 // immediately zapped in a weak reference. Note that we explicitly 17722 // allow ObjCStringLiterals, since those are designed to never really die. 17723 RHS = RHS->IgnoreParenImpCasts(); 17724 17725 // This enum needs to match with the 'select' in 17726 // warn_objc_arc_literal_assign (off-by-1). 17727 Sema::ObjCLiteralKind Kind = S.CheckLiteralKind(RHS); 17728 if (Kind == Sema::LK_String || Kind == Sema::LK_None) 17729 return false; 17730 17731 S.Diag(Loc, diag::warn_arc_literal_assign) 17732 << (unsigned) Kind 17733 << (isProperty ? 0 : 1) 17734 << RHS->getSourceRange(); 17735 17736 return true; 17737 } 17738 17739 static bool checkUnsafeAssignObject(Sema &S, SourceLocation Loc, 17740 Qualifiers::ObjCLifetime LT, 17741 Expr *RHS, bool isProperty) { 17742 // Strip off any implicit cast added to get to the one ARC-specific. 17743 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 17744 if (cast->getCastKind() == CK_ARCConsumeObject) { 17745 S.Diag(Loc, diag::warn_arc_retained_assign) 17746 << (LT == Qualifiers::OCL_ExplicitNone) 17747 << (isProperty ? 0 : 1) 17748 << RHS->getSourceRange(); 17749 return true; 17750 } 17751 RHS = cast->getSubExpr(); 17752 } 17753 17754 if (LT == Qualifiers::OCL_Weak && 17755 checkUnsafeAssignLiteral(S, Loc, RHS, isProperty)) 17756 return true; 17757 17758 return false; 17759 } 17760 17761 bool Sema::checkUnsafeAssigns(SourceLocation Loc, 17762 QualType LHS, Expr *RHS) { 17763 Qualifiers::ObjCLifetime LT = LHS.getObjCLifetime(); 17764 17765 if (LT != Qualifiers::OCL_Weak && LT != Qualifiers::OCL_ExplicitNone) 17766 return false; 17767 17768 if (checkUnsafeAssignObject(*this, Loc, LT, RHS, false)) 17769 return true; 17770 17771 return false; 17772 } 17773 17774 void Sema::checkUnsafeExprAssigns(SourceLocation Loc, 17775 Expr *LHS, Expr *RHS) { 17776 QualType LHSType; 17777 // PropertyRef on LHS type need be directly obtained from 17778 // its declaration as it has a PseudoType. 17779 ObjCPropertyRefExpr *PRE 17780 = dyn_cast<ObjCPropertyRefExpr>(LHS->IgnoreParens()); 17781 if (PRE && !PRE->isImplicitProperty()) { 17782 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 17783 if (PD) 17784 LHSType = PD->getType(); 17785 } 17786 17787 if (LHSType.isNull()) 17788 LHSType = LHS->getType(); 17789 17790 Qualifiers::ObjCLifetime LT = LHSType.getObjCLifetime(); 17791 17792 if (LT == Qualifiers::OCL_Weak) { 17793 if (!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, Loc)) 17794 getCurFunction()->markSafeWeakUse(LHS); 17795 } 17796 17797 if (checkUnsafeAssigns(Loc, LHSType, RHS)) 17798 return; 17799 17800 // FIXME. Check for other life times. 17801 if (LT != Qualifiers::OCL_None) 17802 return; 17803 17804 if (PRE) { 17805 if (PRE->isImplicitProperty()) 17806 return; 17807 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 17808 if (!PD) 17809 return; 17810 17811 unsigned Attributes = PD->getPropertyAttributes(); 17812 if (Attributes & ObjCPropertyAttribute::kind_assign) { 17813 // when 'assign' attribute was not explicitly specified 17814 // by user, ignore it and rely on property type itself 17815 // for lifetime info. 17816 unsigned AsWrittenAttr = PD->getPropertyAttributesAsWritten(); 17817 if (!(AsWrittenAttr & ObjCPropertyAttribute::kind_assign) && 17818 LHSType->isObjCRetainableType()) 17819 return; 17820 17821 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 17822 if (cast->getCastKind() == CK_ARCConsumeObject) { 17823 Diag(Loc, diag::warn_arc_retained_property_assign) 17824 << RHS->getSourceRange(); 17825 return; 17826 } 17827 RHS = cast->getSubExpr(); 17828 } 17829 } else if (Attributes & ObjCPropertyAttribute::kind_weak) { 17830 if (checkUnsafeAssignObject(*this, Loc, Qualifiers::OCL_Weak, RHS, true)) 17831 return; 17832 } 17833 } 17834 } 17835 17836 //===--- CHECK: Empty statement body (-Wempty-body) ---------------------===// 17837 17838 static bool ShouldDiagnoseEmptyStmtBody(const SourceManager &SourceMgr, 17839 SourceLocation StmtLoc, 17840 const NullStmt *Body) { 17841 // Do not warn if the body is a macro that expands to nothing, e.g: 17842 // 17843 // #define CALL(x) 17844 // if (condition) 17845 // CALL(0); 17846 if (Body->hasLeadingEmptyMacro()) 17847 return false; 17848 17849 // Get line numbers of statement and body. 17850 bool StmtLineInvalid; 17851 unsigned StmtLine = SourceMgr.getPresumedLineNumber(StmtLoc, 17852 &StmtLineInvalid); 17853 if (StmtLineInvalid) 17854 return false; 17855 17856 bool BodyLineInvalid; 17857 unsigned BodyLine = SourceMgr.getSpellingLineNumber(Body->getSemiLoc(), 17858 &BodyLineInvalid); 17859 if (BodyLineInvalid) 17860 return false; 17861 17862 // Warn if null statement and body are on the same line. 17863 if (StmtLine != BodyLine) 17864 return false; 17865 17866 return true; 17867 } 17868 17869 void Sema::DiagnoseEmptyStmtBody(SourceLocation StmtLoc, 17870 const Stmt *Body, 17871 unsigned DiagID) { 17872 // Since this is a syntactic check, don't emit diagnostic for template 17873 // instantiations, this just adds noise. 17874 if (CurrentInstantiationScope) 17875 return; 17876 17877 // The body should be a null statement. 17878 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 17879 if (!NBody) 17880 return; 17881 17882 // Do the usual checks. 17883 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 17884 return; 17885 17886 Diag(NBody->getSemiLoc(), DiagID); 17887 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 17888 } 17889 17890 void Sema::DiagnoseEmptyLoopBody(const Stmt *S, 17891 const Stmt *PossibleBody) { 17892 assert(!CurrentInstantiationScope); // Ensured by caller 17893 17894 SourceLocation StmtLoc; 17895 const Stmt *Body; 17896 unsigned DiagID; 17897 if (const ForStmt *FS = dyn_cast<ForStmt>(S)) { 17898 StmtLoc = FS->getRParenLoc(); 17899 Body = FS->getBody(); 17900 DiagID = diag::warn_empty_for_body; 17901 } else if (const WhileStmt *WS = dyn_cast<WhileStmt>(S)) { 17902 StmtLoc = WS->getRParenLoc(); 17903 Body = WS->getBody(); 17904 DiagID = diag::warn_empty_while_body; 17905 } else 17906 return; // Neither `for' nor `while'. 17907 17908 // The body should be a null statement. 17909 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 17910 if (!NBody) 17911 return; 17912 17913 // Skip expensive checks if diagnostic is disabled. 17914 if (Diags.isIgnored(DiagID, NBody->getSemiLoc())) 17915 return; 17916 17917 // Do the usual checks. 17918 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 17919 return; 17920 17921 // `for(...);' and `while(...);' are popular idioms, so in order to keep 17922 // noise level low, emit diagnostics only if for/while is followed by a 17923 // CompoundStmt, e.g.: 17924 // for (int i = 0; i < n; i++); 17925 // { 17926 // a(i); 17927 // } 17928 // or if for/while is followed by a statement with more indentation 17929 // than for/while itself: 17930 // for (int i = 0; i < n; i++); 17931 // a(i); 17932 bool ProbableTypo = isa<CompoundStmt>(PossibleBody); 17933 if (!ProbableTypo) { 17934 bool BodyColInvalid; 17935 unsigned BodyCol = SourceMgr.getPresumedColumnNumber( 17936 PossibleBody->getBeginLoc(), &BodyColInvalid); 17937 if (BodyColInvalid) 17938 return; 17939 17940 bool StmtColInvalid; 17941 unsigned StmtCol = 17942 SourceMgr.getPresumedColumnNumber(S->getBeginLoc(), &StmtColInvalid); 17943 if (StmtColInvalid) 17944 return; 17945 17946 if (BodyCol > StmtCol) 17947 ProbableTypo = true; 17948 } 17949 17950 if (ProbableTypo) { 17951 Diag(NBody->getSemiLoc(), DiagID); 17952 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 17953 } 17954 } 17955 17956 //===--- CHECK: Warn on self move with std::move. -------------------------===// 17957 17958 /// DiagnoseSelfMove - Emits a warning if a value is moved to itself. 17959 void Sema::DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, 17960 SourceLocation OpLoc) { 17961 if (Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, OpLoc)) 17962 return; 17963 17964 if (inTemplateInstantiation()) 17965 return; 17966 17967 // Strip parens and casts away. 17968 LHSExpr = LHSExpr->IgnoreParenImpCasts(); 17969 RHSExpr = RHSExpr->IgnoreParenImpCasts(); 17970 17971 // Check for a call expression 17972 const CallExpr *CE = dyn_cast<CallExpr>(RHSExpr); 17973 if (!CE || CE->getNumArgs() != 1) 17974 return; 17975 17976 // Check for a call to std::move 17977 if (!CE->isCallToStdMove()) 17978 return; 17979 17980 // Get argument from std::move 17981 RHSExpr = CE->getArg(0); 17982 17983 const DeclRefExpr *LHSDeclRef = dyn_cast<DeclRefExpr>(LHSExpr); 17984 const DeclRefExpr *RHSDeclRef = dyn_cast<DeclRefExpr>(RHSExpr); 17985 17986 // Two DeclRefExpr's, check that the decls are the same. 17987 if (LHSDeclRef && RHSDeclRef) { 17988 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 17989 return; 17990 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 17991 RHSDeclRef->getDecl()->getCanonicalDecl()) 17992 return; 17993 17994 auto D = Diag(OpLoc, diag::warn_self_move) 17995 << LHSExpr->getType() << LHSExpr->getSourceRange() 17996 << RHSExpr->getSourceRange(); 17997 if (const FieldDecl *F = 17998 getSelfAssignmentClassMemberCandidate(RHSDeclRef->getDecl())) 17999 D << 1 << F 18000 << FixItHint::CreateInsertion(LHSDeclRef->getBeginLoc(), "this->"); 18001 else 18002 D << 0; 18003 return; 18004 } 18005 18006 // Member variables require a different approach to check for self moves. 18007 // MemberExpr's are the same if every nested MemberExpr refers to the same 18008 // Decl and that the base Expr's are DeclRefExpr's with the same Decl or 18009 // the base Expr's are CXXThisExpr's. 18010 const Expr *LHSBase = LHSExpr; 18011 const Expr *RHSBase = RHSExpr; 18012 const MemberExpr *LHSME = dyn_cast<MemberExpr>(LHSExpr); 18013 const MemberExpr *RHSME = dyn_cast<MemberExpr>(RHSExpr); 18014 if (!LHSME || !RHSME) 18015 return; 18016 18017 while (LHSME && RHSME) { 18018 if (LHSME->getMemberDecl()->getCanonicalDecl() != 18019 RHSME->getMemberDecl()->getCanonicalDecl()) 18020 return; 18021 18022 LHSBase = LHSME->getBase(); 18023 RHSBase = RHSME->getBase(); 18024 LHSME = dyn_cast<MemberExpr>(LHSBase); 18025 RHSME = dyn_cast<MemberExpr>(RHSBase); 18026 } 18027 18028 LHSDeclRef = dyn_cast<DeclRefExpr>(LHSBase); 18029 RHSDeclRef = dyn_cast<DeclRefExpr>(RHSBase); 18030 if (LHSDeclRef && RHSDeclRef) { 18031 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 18032 return; 18033 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 18034 RHSDeclRef->getDecl()->getCanonicalDecl()) 18035 return; 18036 18037 Diag(OpLoc, diag::warn_self_move) 18038 << LHSExpr->getType() << 0 << LHSExpr->getSourceRange() 18039 << RHSExpr->getSourceRange(); 18040 return; 18041 } 18042 18043 if (isa<CXXThisExpr>(LHSBase) && isa<CXXThisExpr>(RHSBase)) 18044 Diag(OpLoc, diag::warn_self_move) 18045 << LHSExpr->getType() << 0 << LHSExpr->getSourceRange() 18046 << RHSExpr->getSourceRange(); 18047 } 18048 18049 //===--- Layout compatibility ----------------------------------------------// 18050 18051 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2); 18052 18053 /// Check if two enumeration types are layout-compatible. 18054 static bool isLayoutCompatible(ASTContext &C, EnumDecl *ED1, EnumDecl *ED2) { 18055 // C++11 [dcl.enum] p8: 18056 // Two enumeration types are layout-compatible if they have the same 18057 // underlying type. 18058 return ED1->isComplete() && ED2->isComplete() && 18059 C.hasSameType(ED1->getIntegerType(), ED2->getIntegerType()); 18060 } 18061 18062 /// Check if two fields are layout-compatible. 18063 static bool isLayoutCompatible(ASTContext &C, FieldDecl *Field1, 18064 FieldDecl *Field2) { 18065 if (!isLayoutCompatible(C, Field1->getType(), Field2->getType())) 18066 return false; 18067 18068 if (Field1->isBitField() != Field2->isBitField()) 18069 return false; 18070 18071 if (Field1->isBitField()) { 18072 // Make sure that the bit-fields are the same length. 18073 unsigned Bits1 = Field1->getBitWidthValue(C); 18074 unsigned Bits2 = Field2->getBitWidthValue(C); 18075 18076 if (Bits1 != Bits2) 18077 return false; 18078 } 18079 18080 return true; 18081 } 18082 18083 /// Check if two standard-layout structs are layout-compatible. 18084 /// (C++11 [class.mem] p17) 18085 static bool isLayoutCompatibleStruct(ASTContext &C, RecordDecl *RD1, 18086 RecordDecl *RD2) { 18087 // If both records are C++ classes, check that base classes match. 18088 if (const CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(RD1)) { 18089 // If one of records is a CXXRecordDecl we are in C++ mode, 18090 // thus the other one is a CXXRecordDecl, too. 18091 const CXXRecordDecl *D2CXX = cast<CXXRecordDecl>(RD2); 18092 // Check number of base classes. 18093 if (D1CXX->getNumBases() != D2CXX->getNumBases()) 18094 return false; 18095 18096 // Check the base classes. 18097 for (CXXRecordDecl::base_class_const_iterator 18098 Base1 = D1CXX->bases_begin(), 18099 BaseEnd1 = D1CXX->bases_end(), 18100 Base2 = D2CXX->bases_begin(); 18101 Base1 != BaseEnd1; 18102 ++Base1, ++Base2) { 18103 if (!isLayoutCompatible(C, Base1->getType(), Base2->getType())) 18104 return false; 18105 } 18106 } else if (const CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(RD2)) { 18107 // If only RD2 is a C++ class, it should have zero base classes. 18108 if (D2CXX->getNumBases() > 0) 18109 return false; 18110 } 18111 18112 // Check the fields. 18113 RecordDecl::field_iterator Field2 = RD2->field_begin(), 18114 Field2End = RD2->field_end(), 18115 Field1 = RD1->field_begin(), 18116 Field1End = RD1->field_end(); 18117 for ( ; Field1 != Field1End && Field2 != Field2End; ++Field1, ++Field2) { 18118 if (!isLayoutCompatible(C, *Field1, *Field2)) 18119 return false; 18120 } 18121 if (Field1 != Field1End || Field2 != Field2End) 18122 return false; 18123 18124 return true; 18125 } 18126 18127 /// Check if two standard-layout unions are layout-compatible. 18128 /// (C++11 [class.mem] p18) 18129 static bool isLayoutCompatibleUnion(ASTContext &C, RecordDecl *RD1, 18130 RecordDecl *RD2) { 18131 llvm::SmallPtrSet<FieldDecl *, 8> UnmatchedFields; 18132 for (auto *Field2 : RD2->fields()) 18133 UnmatchedFields.insert(Field2); 18134 18135 for (auto *Field1 : RD1->fields()) { 18136 llvm::SmallPtrSet<FieldDecl *, 8>::iterator 18137 I = UnmatchedFields.begin(), 18138 E = UnmatchedFields.end(); 18139 18140 for ( ; I != E; ++I) { 18141 if (isLayoutCompatible(C, Field1, *I)) { 18142 bool Result = UnmatchedFields.erase(*I); 18143 (void) Result; 18144 assert(Result); 18145 break; 18146 } 18147 } 18148 if (I == E) 18149 return false; 18150 } 18151 18152 return UnmatchedFields.empty(); 18153 } 18154 18155 static bool isLayoutCompatible(ASTContext &C, RecordDecl *RD1, 18156 RecordDecl *RD2) { 18157 if (RD1->isUnion() != RD2->isUnion()) 18158 return false; 18159 18160 if (RD1->isUnion()) 18161 return isLayoutCompatibleUnion(C, RD1, RD2); 18162 else 18163 return isLayoutCompatibleStruct(C, RD1, RD2); 18164 } 18165 18166 /// Check if two types are layout-compatible in C++11 sense. 18167 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2) { 18168 if (T1.isNull() || T2.isNull()) 18169 return false; 18170 18171 // C++11 [basic.types] p11: 18172 // If two types T1 and T2 are the same type, then T1 and T2 are 18173 // layout-compatible types. 18174 if (C.hasSameType(T1, T2)) 18175 return true; 18176 18177 T1 = T1.getCanonicalType().getUnqualifiedType(); 18178 T2 = T2.getCanonicalType().getUnqualifiedType(); 18179 18180 const Type::TypeClass TC1 = T1->getTypeClass(); 18181 const Type::TypeClass TC2 = T2->getTypeClass(); 18182 18183 if (TC1 != TC2) 18184 return false; 18185 18186 if (TC1 == Type::Enum) { 18187 return isLayoutCompatible(C, 18188 cast<EnumType>(T1)->getDecl(), 18189 cast<EnumType>(T2)->getDecl()); 18190 } else if (TC1 == Type::Record) { 18191 if (!T1->isStandardLayoutType() || !T2->isStandardLayoutType()) 18192 return false; 18193 18194 return isLayoutCompatible(C, 18195 cast<RecordType>(T1)->getDecl(), 18196 cast<RecordType>(T2)->getDecl()); 18197 } 18198 18199 return false; 18200 } 18201 18202 //===--- CHECK: pointer_with_type_tag attribute: datatypes should match ----// 18203 18204 /// Given a type tag expression find the type tag itself. 18205 /// 18206 /// \param TypeExpr Type tag expression, as it appears in user's code. 18207 /// 18208 /// \param VD Declaration of an identifier that appears in a type tag. 18209 /// 18210 /// \param MagicValue Type tag magic value. 18211 /// 18212 /// \param isConstantEvaluated whether the evalaution should be performed in 18213 18214 /// constant context. 18215 static bool FindTypeTagExpr(const Expr *TypeExpr, const ASTContext &Ctx, 18216 const ValueDecl **VD, uint64_t *MagicValue, 18217 bool isConstantEvaluated) { 18218 while(true) { 18219 if (!TypeExpr) 18220 return false; 18221 18222 TypeExpr = TypeExpr->IgnoreParenImpCasts()->IgnoreParenCasts(); 18223 18224 switch (TypeExpr->getStmtClass()) { 18225 case Stmt::UnaryOperatorClass: { 18226 const UnaryOperator *UO = cast<UnaryOperator>(TypeExpr); 18227 if (UO->getOpcode() == UO_AddrOf || UO->getOpcode() == UO_Deref) { 18228 TypeExpr = UO->getSubExpr(); 18229 continue; 18230 } 18231 return false; 18232 } 18233 18234 case Stmt::DeclRefExprClass: { 18235 const DeclRefExpr *DRE = cast<DeclRefExpr>(TypeExpr); 18236 *VD = DRE->getDecl(); 18237 return true; 18238 } 18239 18240 case Stmt::IntegerLiteralClass: { 18241 const IntegerLiteral *IL = cast<IntegerLiteral>(TypeExpr); 18242 llvm::APInt MagicValueAPInt = IL->getValue(); 18243 if (MagicValueAPInt.getActiveBits() <= 64) { 18244 *MagicValue = MagicValueAPInt.getZExtValue(); 18245 return true; 18246 } else 18247 return false; 18248 } 18249 18250 case Stmt::BinaryConditionalOperatorClass: 18251 case Stmt::ConditionalOperatorClass: { 18252 const AbstractConditionalOperator *ACO = 18253 cast<AbstractConditionalOperator>(TypeExpr); 18254 bool Result; 18255 if (ACO->getCond()->EvaluateAsBooleanCondition(Result, Ctx, 18256 isConstantEvaluated)) { 18257 if (Result) 18258 TypeExpr = ACO->getTrueExpr(); 18259 else 18260 TypeExpr = ACO->getFalseExpr(); 18261 continue; 18262 } 18263 return false; 18264 } 18265 18266 case Stmt::BinaryOperatorClass: { 18267 const BinaryOperator *BO = cast<BinaryOperator>(TypeExpr); 18268 if (BO->getOpcode() == BO_Comma) { 18269 TypeExpr = BO->getRHS(); 18270 continue; 18271 } 18272 return false; 18273 } 18274 18275 default: 18276 return false; 18277 } 18278 } 18279 } 18280 18281 /// Retrieve the C type corresponding to type tag TypeExpr. 18282 /// 18283 /// \param TypeExpr Expression that specifies a type tag. 18284 /// 18285 /// \param MagicValues Registered magic values. 18286 /// 18287 /// \param FoundWrongKind Set to true if a type tag was found, but of a wrong 18288 /// kind. 18289 /// 18290 /// \param TypeInfo Information about the corresponding C type. 18291 /// 18292 /// \param isConstantEvaluated whether the evalaution should be performed in 18293 /// constant context. 18294 /// 18295 /// \returns true if the corresponding C type was found. 18296 static bool GetMatchingCType( 18297 const IdentifierInfo *ArgumentKind, const Expr *TypeExpr, 18298 const ASTContext &Ctx, 18299 const llvm::DenseMap<Sema::TypeTagMagicValue, Sema::TypeTagData> 18300 *MagicValues, 18301 bool &FoundWrongKind, Sema::TypeTagData &TypeInfo, 18302 bool isConstantEvaluated) { 18303 FoundWrongKind = false; 18304 18305 // Variable declaration that has type_tag_for_datatype attribute. 18306 const ValueDecl *VD = nullptr; 18307 18308 uint64_t MagicValue; 18309 18310 if (!FindTypeTagExpr(TypeExpr, Ctx, &VD, &MagicValue, isConstantEvaluated)) 18311 return false; 18312 18313 if (VD) { 18314 if (TypeTagForDatatypeAttr *I = VD->getAttr<TypeTagForDatatypeAttr>()) { 18315 if (I->getArgumentKind() != ArgumentKind) { 18316 FoundWrongKind = true; 18317 return false; 18318 } 18319 TypeInfo.Type = I->getMatchingCType(); 18320 TypeInfo.LayoutCompatible = I->getLayoutCompatible(); 18321 TypeInfo.MustBeNull = I->getMustBeNull(); 18322 return true; 18323 } 18324 return false; 18325 } 18326 18327 if (!MagicValues) 18328 return false; 18329 18330 llvm::DenseMap<Sema::TypeTagMagicValue, 18331 Sema::TypeTagData>::const_iterator I = 18332 MagicValues->find(std::make_pair(ArgumentKind, MagicValue)); 18333 if (I == MagicValues->end()) 18334 return false; 18335 18336 TypeInfo = I->second; 18337 return true; 18338 } 18339 18340 void Sema::RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, 18341 uint64_t MagicValue, QualType Type, 18342 bool LayoutCompatible, 18343 bool MustBeNull) { 18344 if (!TypeTagForDatatypeMagicValues) 18345 TypeTagForDatatypeMagicValues.reset( 18346 new llvm::DenseMap<TypeTagMagicValue, TypeTagData>); 18347 18348 TypeTagMagicValue Magic(ArgumentKind, MagicValue); 18349 (*TypeTagForDatatypeMagicValues)[Magic] = 18350 TypeTagData(Type, LayoutCompatible, MustBeNull); 18351 } 18352 18353 static bool IsSameCharType(QualType T1, QualType T2) { 18354 const BuiltinType *BT1 = T1->getAs<BuiltinType>(); 18355 if (!BT1) 18356 return false; 18357 18358 const BuiltinType *BT2 = T2->getAs<BuiltinType>(); 18359 if (!BT2) 18360 return false; 18361 18362 BuiltinType::Kind T1Kind = BT1->getKind(); 18363 BuiltinType::Kind T2Kind = BT2->getKind(); 18364 18365 return (T1Kind == BuiltinType::SChar && T2Kind == BuiltinType::Char_S) || 18366 (T1Kind == BuiltinType::UChar && T2Kind == BuiltinType::Char_U) || 18367 (T1Kind == BuiltinType::Char_U && T2Kind == BuiltinType::UChar) || 18368 (T1Kind == BuiltinType::Char_S && T2Kind == BuiltinType::SChar); 18369 } 18370 18371 void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, 18372 const ArrayRef<const Expr *> ExprArgs, 18373 SourceLocation CallSiteLoc) { 18374 const IdentifierInfo *ArgumentKind = Attr->getArgumentKind(); 18375 bool IsPointerAttr = Attr->getIsPointer(); 18376 18377 // Retrieve the argument representing the 'type_tag'. 18378 unsigned TypeTagIdxAST = Attr->getTypeTagIdx().getASTIndex(); 18379 if (TypeTagIdxAST >= ExprArgs.size()) { 18380 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 18381 << 0 << Attr->getTypeTagIdx().getSourceIndex(); 18382 return; 18383 } 18384 const Expr *TypeTagExpr = ExprArgs[TypeTagIdxAST]; 18385 bool FoundWrongKind; 18386 TypeTagData TypeInfo; 18387 if (!GetMatchingCType(ArgumentKind, TypeTagExpr, Context, 18388 TypeTagForDatatypeMagicValues.get(), FoundWrongKind, 18389 TypeInfo, isConstantEvaluated())) { 18390 if (FoundWrongKind) 18391 Diag(TypeTagExpr->getExprLoc(), 18392 diag::warn_type_tag_for_datatype_wrong_kind) 18393 << TypeTagExpr->getSourceRange(); 18394 return; 18395 } 18396 18397 // Retrieve the argument representing the 'arg_idx'. 18398 unsigned ArgumentIdxAST = Attr->getArgumentIdx().getASTIndex(); 18399 if (ArgumentIdxAST >= ExprArgs.size()) { 18400 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 18401 << 1 << Attr->getArgumentIdx().getSourceIndex(); 18402 return; 18403 } 18404 const Expr *ArgumentExpr = ExprArgs[ArgumentIdxAST]; 18405 if (IsPointerAttr) { 18406 // Skip implicit cast of pointer to `void *' (as a function argument). 18407 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgumentExpr)) 18408 if (ICE->getType()->isVoidPointerType() && 18409 ICE->getCastKind() == CK_BitCast) 18410 ArgumentExpr = ICE->getSubExpr(); 18411 } 18412 QualType ArgumentType = ArgumentExpr->getType(); 18413 18414 // Passing a `void*' pointer shouldn't trigger a warning. 18415 if (IsPointerAttr && ArgumentType->isVoidPointerType()) 18416 return; 18417 18418 if (TypeInfo.MustBeNull) { 18419 // Type tag with matching void type requires a null pointer. 18420 if (!ArgumentExpr->isNullPointerConstant(Context, 18421 Expr::NPC_ValueDependentIsNotNull)) { 18422 Diag(ArgumentExpr->getExprLoc(), 18423 diag::warn_type_safety_null_pointer_required) 18424 << ArgumentKind->getName() 18425 << ArgumentExpr->getSourceRange() 18426 << TypeTagExpr->getSourceRange(); 18427 } 18428 return; 18429 } 18430 18431 QualType RequiredType = TypeInfo.Type; 18432 if (IsPointerAttr) 18433 RequiredType = Context.getPointerType(RequiredType); 18434 18435 bool mismatch = false; 18436 if (!TypeInfo.LayoutCompatible) { 18437 mismatch = !Context.hasSameType(ArgumentType, RequiredType); 18438 18439 // C++11 [basic.fundamental] p1: 18440 // Plain char, signed char, and unsigned char are three distinct types. 18441 // 18442 // But we treat plain `char' as equivalent to `signed char' or `unsigned 18443 // char' depending on the current char signedness mode. 18444 if (mismatch) 18445 if ((IsPointerAttr && IsSameCharType(ArgumentType->getPointeeType(), 18446 RequiredType->getPointeeType())) || 18447 (!IsPointerAttr && IsSameCharType(ArgumentType, RequiredType))) 18448 mismatch = false; 18449 } else 18450 if (IsPointerAttr) 18451 mismatch = !isLayoutCompatible(Context, 18452 ArgumentType->getPointeeType(), 18453 RequiredType->getPointeeType()); 18454 else 18455 mismatch = !isLayoutCompatible(Context, ArgumentType, RequiredType); 18456 18457 if (mismatch) 18458 Diag(ArgumentExpr->getExprLoc(), diag::warn_type_safety_type_mismatch) 18459 << ArgumentType << ArgumentKind 18460 << TypeInfo.LayoutCompatible << RequiredType 18461 << ArgumentExpr->getSourceRange() 18462 << TypeTagExpr->getSourceRange(); 18463 } 18464 18465 void Sema::AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, 18466 CharUnits Alignment) { 18467 MisalignedMembers.emplace_back(E, RD, MD, Alignment); 18468 } 18469 18470 void Sema::DiagnoseMisalignedMembers() { 18471 for (MisalignedMember &m : MisalignedMembers) { 18472 const NamedDecl *ND = m.RD; 18473 if (ND->getName().empty()) { 18474 if (const TypedefNameDecl *TD = m.RD->getTypedefNameForAnonDecl()) 18475 ND = TD; 18476 } 18477 Diag(m.E->getBeginLoc(), diag::warn_taking_address_of_packed_member) 18478 << m.MD << ND << m.E->getSourceRange(); 18479 } 18480 MisalignedMembers.clear(); 18481 } 18482 18483 void Sema::DiscardMisalignedMemberAddress(const Type *T, Expr *E) { 18484 E = E->IgnoreParens(); 18485 if (!T->isPointerType() && !T->isIntegerType() && !T->isDependentType()) 18486 return; 18487 if (isa<UnaryOperator>(E) && 18488 cast<UnaryOperator>(E)->getOpcode() == UO_AddrOf) { 18489 auto *Op = cast<UnaryOperator>(E)->getSubExpr()->IgnoreParens(); 18490 if (isa<MemberExpr>(Op)) { 18491 auto *MA = llvm::find(MisalignedMembers, MisalignedMember(Op)); 18492 if (MA != MisalignedMembers.end() && 18493 (T->isDependentType() || T->isIntegerType() || 18494 (T->isPointerType() && (T->getPointeeType()->isIncompleteType() || 18495 Context.getTypeAlignInChars( 18496 T->getPointeeType()) <= MA->Alignment)))) 18497 MisalignedMembers.erase(MA); 18498 } 18499 } 18500 } 18501 18502 void Sema::RefersToMemberWithReducedAlignment( 18503 Expr *E, 18504 llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> 18505 Action) { 18506 const auto *ME = dyn_cast<MemberExpr>(E); 18507 if (!ME) 18508 return; 18509 18510 // No need to check expressions with an __unaligned-qualified type. 18511 if (E->getType().getQualifiers().hasUnaligned()) 18512 return; 18513 18514 // For a chain of MemberExpr like "a.b.c.d" this list 18515 // will keep FieldDecl's like [d, c, b]. 18516 SmallVector<FieldDecl *, 4> ReverseMemberChain; 18517 const MemberExpr *TopME = nullptr; 18518 bool AnyIsPacked = false; 18519 do { 18520 QualType BaseType = ME->getBase()->getType(); 18521 if (BaseType->isDependentType()) 18522 return; 18523 if (ME->isArrow()) 18524 BaseType = BaseType->getPointeeType(); 18525 RecordDecl *RD = BaseType->castAs<RecordType>()->getDecl(); 18526 if (RD->isInvalidDecl()) 18527 return; 18528 18529 ValueDecl *MD = ME->getMemberDecl(); 18530 auto *FD = dyn_cast<FieldDecl>(MD); 18531 // We do not care about non-data members. 18532 if (!FD || FD->isInvalidDecl()) 18533 return; 18534 18535 AnyIsPacked = 18536 AnyIsPacked || (RD->hasAttr<PackedAttr>() || MD->hasAttr<PackedAttr>()); 18537 ReverseMemberChain.push_back(FD); 18538 18539 TopME = ME; 18540 ME = dyn_cast<MemberExpr>(ME->getBase()->IgnoreParens()); 18541 } while (ME); 18542 assert(TopME && "We did not compute a topmost MemberExpr!"); 18543 18544 // Not the scope of this diagnostic. 18545 if (!AnyIsPacked) 18546 return; 18547 18548 const Expr *TopBase = TopME->getBase()->IgnoreParenImpCasts(); 18549 const auto *DRE = dyn_cast<DeclRefExpr>(TopBase); 18550 // TODO: The innermost base of the member expression may be too complicated. 18551 // For now, just disregard these cases. This is left for future 18552 // improvement. 18553 if (!DRE && !isa<CXXThisExpr>(TopBase)) 18554 return; 18555 18556 // Alignment expected by the whole expression. 18557 CharUnits ExpectedAlignment = Context.getTypeAlignInChars(E->getType()); 18558 18559 // No need to do anything else with this case. 18560 if (ExpectedAlignment.isOne()) 18561 return; 18562 18563 // Synthesize offset of the whole access. 18564 CharUnits Offset; 18565 for (const FieldDecl *FD : llvm::reverse(ReverseMemberChain)) 18566 Offset += Context.toCharUnitsFromBits(Context.getFieldOffset(FD)); 18567 18568 // Compute the CompleteObjectAlignment as the alignment of the whole chain. 18569 CharUnits CompleteObjectAlignment = Context.getTypeAlignInChars( 18570 ReverseMemberChain.back()->getParent()->getTypeForDecl()); 18571 18572 // The base expression of the innermost MemberExpr may give 18573 // stronger guarantees than the class containing the member. 18574 if (DRE && !TopME->isArrow()) { 18575 const ValueDecl *VD = DRE->getDecl(); 18576 if (!VD->getType()->isReferenceType()) 18577 CompleteObjectAlignment = 18578 std::max(CompleteObjectAlignment, Context.getDeclAlign(VD)); 18579 } 18580 18581 // Check if the synthesized offset fulfills the alignment. 18582 if (Offset % ExpectedAlignment != 0 || 18583 // It may fulfill the offset it but the effective alignment may still be 18584 // lower than the expected expression alignment. 18585 CompleteObjectAlignment < ExpectedAlignment) { 18586 // If this happens, we want to determine a sensible culprit of this. 18587 // Intuitively, watching the chain of member expressions from right to 18588 // left, we start with the required alignment (as required by the field 18589 // type) but some packed attribute in that chain has reduced the alignment. 18590 // It may happen that another packed structure increases it again. But if 18591 // we are here such increase has not been enough. So pointing the first 18592 // FieldDecl that either is packed or else its RecordDecl is, 18593 // seems reasonable. 18594 FieldDecl *FD = nullptr; 18595 CharUnits Alignment; 18596 for (FieldDecl *FDI : ReverseMemberChain) { 18597 if (FDI->hasAttr<PackedAttr>() || 18598 FDI->getParent()->hasAttr<PackedAttr>()) { 18599 FD = FDI; 18600 Alignment = std::min( 18601 Context.getTypeAlignInChars(FD->getType()), 18602 Context.getTypeAlignInChars(FD->getParent()->getTypeForDecl())); 18603 break; 18604 } 18605 } 18606 assert(FD && "We did not find a packed FieldDecl!"); 18607 Action(E, FD->getParent(), FD, Alignment); 18608 } 18609 } 18610 18611 void Sema::CheckAddressOfPackedMember(Expr *rhs) { 18612 using namespace std::placeholders; 18613 18614 RefersToMemberWithReducedAlignment( 18615 rhs, std::bind(&Sema::AddPotentialMisalignedMembers, std::ref(*this), _1, 18616 _2, _3, _4)); 18617 } 18618 18619 bool Sema::PrepareBuiltinElementwiseMathOneArgCall(CallExpr *TheCall) { 18620 if (checkArgCount(*this, TheCall, 1)) 18621 return true; 18622 18623 ExprResult A = UsualUnaryConversions(TheCall->getArg(0)); 18624 if (A.isInvalid()) 18625 return true; 18626 18627 TheCall->setArg(0, A.get()); 18628 QualType TyA = A.get()->getType(); 18629 18630 if (checkMathBuiltinElementType(*this, A.get()->getBeginLoc(), TyA)) 18631 return true; 18632 18633 TheCall->setType(TyA); 18634 return false; 18635 } 18636 18637 bool Sema::SemaBuiltinElementwiseMath(CallExpr *TheCall) { 18638 if (checkArgCount(*this, TheCall, 2)) 18639 return true; 18640 18641 ExprResult A = TheCall->getArg(0); 18642 ExprResult B = TheCall->getArg(1); 18643 // Do standard promotions between the two arguments, returning their common 18644 // type. 18645 QualType Res = 18646 UsualArithmeticConversions(A, B, TheCall->getExprLoc(), ACK_Comparison); 18647 if (A.isInvalid() || B.isInvalid()) 18648 return true; 18649 18650 QualType TyA = A.get()->getType(); 18651 QualType TyB = B.get()->getType(); 18652 18653 if (Res.isNull() || TyA.getCanonicalType() != TyB.getCanonicalType()) 18654 return Diag(A.get()->getBeginLoc(), 18655 diag::err_typecheck_call_different_arg_types) 18656 << TyA << TyB; 18657 18658 if (checkMathBuiltinElementType(*this, A.get()->getBeginLoc(), TyA)) 18659 return true; 18660 18661 TheCall->setArg(0, A.get()); 18662 TheCall->setArg(1, B.get()); 18663 TheCall->setType(Res); 18664 return false; 18665 } 18666 18667 bool Sema::SemaBuiltinElementwiseTernaryMath(CallExpr *TheCall) { 18668 if (checkArgCount(*this, TheCall, 3)) 18669 return true; 18670 18671 Expr *Args[3]; 18672 for (int I = 0; I < 3; ++I) { 18673 ExprResult Converted = UsualUnaryConversions(TheCall->getArg(I)); 18674 if (Converted.isInvalid()) 18675 return true; 18676 Args[I] = Converted.get(); 18677 } 18678 18679 int ArgOrdinal = 1; 18680 for (Expr *Arg : Args) { 18681 if (checkFPMathBuiltinElementType(*this, Arg->getBeginLoc(), Arg->getType(), 18682 ArgOrdinal++)) 18683 return true; 18684 } 18685 18686 for (int I = 1; I < 3; ++I) { 18687 if (Args[0]->getType().getCanonicalType() != 18688 Args[I]->getType().getCanonicalType()) { 18689 return Diag(Args[0]->getBeginLoc(), 18690 diag::err_typecheck_call_different_arg_types) 18691 << Args[0]->getType() << Args[I]->getType(); 18692 } 18693 18694 TheCall->setArg(I, Args[I]); 18695 } 18696 18697 TheCall->setType(Args[0]->getType()); 18698 return false; 18699 } 18700 18701 bool Sema::PrepareBuiltinReduceMathOneArgCall(CallExpr *TheCall) { 18702 if (checkArgCount(*this, TheCall, 1)) 18703 return true; 18704 18705 ExprResult A = UsualUnaryConversions(TheCall->getArg(0)); 18706 if (A.isInvalid()) 18707 return true; 18708 18709 TheCall->setArg(0, A.get()); 18710 return false; 18711 } 18712 18713 bool Sema::SemaBuiltinNonDeterministicValue(CallExpr *TheCall) { 18714 if (checkArgCount(*this, TheCall, 1)) 18715 return true; 18716 18717 ExprResult Arg = TheCall->getArg(0); 18718 QualType TyArg = Arg.get()->getType(); 18719 18720 if (!TyArg->isBuiltinType() && !TyArg->isVectorType()) 18721 return Diag(TheCall->getArg(0)->getBeginLoc(), diag::err_builtin_invalid_arg_type) 18722 << 1 << /*vector, integer or floating point ty*/ 0 << TyArg; 18723 18724 TheCall->setType(TyArg); 18725 return false; 18726 } 18727 18728 ExprResult Sema::SemaBuiltinMatrixTranspose(CallExpr *TheCall, 18729 ExprResult CallResult) { 18730 if (checkArgCount(*this, TheCall, 1)) 18731 return ExprError(); 18732 18733 ExprResult MatrixArg = DefaultLvalueConversion(TheCall->getArg(0)); 18734 if (MatrixArg.isInvalid()) 18735 return MatrixArg; 18736 Expr *Matrix = MatrixArg.get(); 18737 18738 auto *MType = Matrix->getType()->getAs<ConstantMatrixType>(); 18739 if (!MType) { 18740 Diag(Matrix->getBeginLoc(), diag::err_builtin_invalid_arg_type) 18741 << 1 << /* matrix ty*/ 1 << Matrix->getType(); 18742 return ExprError(); 18743 } 18744 18745 // Create returned matrix type by swapping rows and columns of the argument 18746 // matrix type. 18747 QualType ResultType = Context.getConstantMatrixType( 18748 MType->getElementType(), MType->getNumColumns(), MType->getNumRows()); 18749 18750 // Change the return type to the type of the returned matrix. 18751 TheCall->setType(ResultType); 18752 18753 // Update call argument to use the possibly converted matrix argument. 18754 TheCall->setArg(0, Matrix); 18755 return CallResult; 18756 } 18757 18758 // Get and verify the matrix dimensions. 18759 static std::optional<unsigned> 18760 getAndVerifyMatrixDimension(Expr *Expr, StringRef Name, Sema &S) { 18761 SourceLocation ErrorPos; 18762 std::optional<llvm::APSInt> Value = 18763 Expr->getIntegerConstantExpr(S.Context, &ErrorPos); 18764 if (!Value) { 18765 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_scalar_unsigned_arg) 18766 << Name; 18767 return {}; 18768 } 18769 uint64_t Dim = Value->getZExtValue(); 18770 if (!ConstantMatrixType::isDimensionValid(Dim)) { 18771 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_invalid_dimension) 18772 << Name << ConstantMatrixType::getMaxElementsPerDimension(); 18773 return {}; 18774 } 18775 return Dim; 18776 } 18777 18778 ExprResult Sema::SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall, 18779 ExprResult CallResult) { 18780 if (!getLangOpts().MatrixTypes) { 18781 Diag(TheCall->getBeginLoc(), diag::err_builtin_matrix_disabled); 18782 return ExprError(); 18783 } 18784 18785 if (checkArgCount(*this, TheCall, 4)) 18786 return ExprError(); 18787 18788 unsigned PtrArgIdx = 0; 18789 Expr *PtrExpr = TheCall->getArg(PtrArgIdx); 18790 Expr *RowsExpr = TheCall->getArg(1); 18791 Expr *ColumnsExpr = TheCall->getArg(2); 18792 Expr *StrideExpr = TheCall->getArg(3); 18793 18794 bool ArgError = false; 18795 18796 // Check pointer argument. 18797 { 18798 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr); 18799 if (PtrConv.isInvalid()) 18800 return PtrConv; 18801 PtrExpr = PtrConv.get(); 18802 TheCall->setArg(0, PtrExpr); 18803 if (PtrExpr->isTypeDependent()) { 18804 TheCall->setType(Context.DependentTy); 18805 return TheCall; 18806 } 18807 } 18808 18809 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>(); 18810 QualType ElementTy; 18811 if (!PtrTy) { 18812 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 18813 << PtrArgIdx + 1 << /*pointer to element ty*/ 2 << PtrExpr->getType(); 18814 ArgError = true; 18815 } else { 18816 ElementTy = PtrTy->getPointeeType().getUnqualifiedType(); 18817 18818 if (!ConstantMatrixType::isValidElementType(ElementTy)) { 18819 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 18820 << PtrArgIdx + 1 << /* pointer to element ty*/ 2 18821 << PtrExpr->getType(); 18822 ArgError = true; 18823 } 18824 } 18825 18826 // Apply default Lvalue conversions and convert the expression to size_t. 18827 auto ApplyArgumentConversions = [this](Expr *E) { 18828 ExprResult Conv = DefaultLvalueConversion(E); 18829 if (Conv.isInvalid()) 18830 return Conv; 18831 18832 return tryConvertExprToType(Conv.get(), Context.getSizeType()); 18833 }; 18834 18835 // Apply conversion to row and column expressions. 18836 ExprResult RowsConv = ApplyArgumentConversions(RowsExpr); 18837 if (!RowsConv.isInvalid()) { 18838 RowsExpr = RowsConv.get(); 18839 TheCall->setArg(1, RowsExpr); 18840 } else 18841 RowsExpr = nullptr; 18842 18843 ExprResult ColumnsConv = ApplyArgumentConversions(ColumnsExpr); 18844 if (!ColumnsConv.isInvalid()) { 18845 ColumnsExpr = ColumnsConv.get(); 18846 TheCall->setArg(2, ColumnsExpr); 18847 } else 18848 ColumnsExpr = nullptr; 18849 18850 // If any part of the result matrix type is still pending, just use 18851 // Context.DependentTy, until all parts are resolved. 18852 if ((RowsExpr && RowsExpr->isTypeDependent()) || 18853 (ColumnsExpr && ColumnsExpr->isTypeDependent())) { 18854 TheCall->setType(Context.DependentTy); 18855 return CallResult; 18856 } 18857 18858 // Check row and column dimensions. 18859 std::optional<unsigned> MaybeRows; 18860 if (RowsExpr) 18861 MaybeRows = getAndVerifyMatrixDimension(RowsExpr, "row", *this); 18862 18863 std::optional<unsigned> MaybeColumns; 18864 if (ColumnsExpr) 18865 MaybeColumns = getAndVerifyMatrixDimension(ColumnsExpr, "column", *this); 18866 18867 // Check stride argument. 18868 ExprResult StrideConv = ApplyArgumentConversions(StrideExpr); 18869 if (StrideConv.isInvalid()) 18870 return ExprError(); 18871 StrideExpr = StrideConv.get(); 18872 TheCall->setArg(3, StrideExpr); 18873 18874 if (MaybeRows) { 18875 if (std::optional<llvm::APSInt> Value = 18876 StrideExpr->getIntegerConstantExpr(Context)) { 18877 uint64_t Stride = Value->getZExtValue(); 18878 if (Stride < *MaybeRows) { 18879 Diag(StrideExpr->getBeginLoc(), 18880 diag::err_builtin_matrix_stride_too_small); 18881 ArgError = true; 18882 } 18883 } 18884 } 18885 18886 if (ArgError || !MaybeRows || !MaybeColumns) 18887 return ExprError(); 18888 18889 TheCall->setType( 18890 Context.getConstantMatrixType(ElementTy, *MaybeRows, *MaybeColumns)); 18891 return CallResult; 18892 } 18893 18894 ExprResult Sema::SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall, 18895 ExprResult CallResult) { 18896 if (checkArgCount(*this, TheCall, 3)) 18897 return ExprError(); 18898 18899 unsigned PtrArgIdx = 1; 18900 Expr *MatrixExpr = TheCall->getArg(0); 18901 Expr *PtrExpr = TheCall->getArg(PtrArgIdx); 18902 Expr *StrideExpr = TheCall->getArg(2); 18903 18904 bool ArgError = false; 18905 18906 { 18907 ExprResult MatrixConv = DefaultLvalueConversion(MatrixExpr); 18908 if (MatrixConv.isInvalid()) 18909 return MatrixConv; 18910 MatrixExpr = MatrixConv.get(); 18911 TheCall->setArg(0, MatrixExpr); 18912 } 18913 if (MatrixExpr->isTypeDependent()) { 18914 TheCall->setType(Context.DependentTy); 18915 return TheCall; 18916 } 18917 18918 auto *MatrixTy = MatrixExpr->getType()->getAs<ConstantMatrixType>(); 18919 if (!MatrixTy) { 18920 Diag(MatrixExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 18921 << 1 << /*matrix ty */ 1 << MatrixExpr->getType(); 18922 ArgError = true; 18923 } 18924 18925 { 18926 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr); 18927 if (PtrConv.isInvalid()) 18928 return PtrConv; 18929 PtrExpr = PtrConv.get(); 18930 TheCall->setArg(1, PtrExpr); 18931 if (PtrExpr->isTypeDependent()) { 18932 TheCall->setType(Context.DependentTy); 18933 return TheCall; 18934 } 18935 } 18936 18937 // Check pointer argument. 18938 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>(); 18939 if (!PtrTy) { 18940 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 18941 << PtrArgIdx + 1 << /*pointer to element ty*/ 2 << PtrExpr->getType(); 18942 ArgError = true; 18943 } else { 18944 QualType ElementTy = PtrTy->getPointeeType(); 18945 if (ElementTy.isConstQualified()) { 18946 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_store_to_const); 18947 ArgError = true; 18948 } 18949 ElementTy = ElementTy.getUnqualifiedType().getCanonicalType(); 18950 if (MatrixTy && 18951 !Context.hasSameType(ElementTy, MatrixTy->getElementType())) { 18952 Diag(PtrExpr->getBeginLoc(), 18953 diag::err_builtin_matrix_pointer_arg_mismatch) 18954 << ElementTy << MatrixTy->getElementType(); 18955 ArgError = true; 18956 } 18957 } 18958 18959 // Apply default Lvalue conversions and convert the stride expression to 18960 // size_t. 18961 { 18962 ExprResult StrideConv = DefaultLvalueConversion(StrideExpr); 18963 if (StrideConv.isInvalid()) 18964 return StrideConv; 18965 18966 StrideConv = tryConvertExprToType(StrideConv.get(), Context.getSizeType()); 18967 if (StrideConv.isInvalid()) 18968 return StrideConv; 18969 StrideExpr = StrideConv.get(); 18970 TheCall->setArg(2, StrideExpr); 18971 } 18972 18973 // Check stride argument. 18974 if (MatrixTy) { 18975 if (std::optional<llvm::APSInt> Value = 18976 StrideExpr->getIntegerConstantExpr(Context)) { 18977 uint64_t Stride = Value->getZExtValue(); 18978 if (Stride < MatrixTy->getNumRows()) { 18979 Diag(StrideExpr->getBeginLoc(), 18980 diag::err_builtin_matrix_stride_too_small); 18981 ArgError = true; 18982 } 18983 } 18984 } 18985 18986 if (ArgError) 18987 return ExprError(); 18988 18989 return CallResult; 18990 } 18991 18992 /// Checks the argument at the given index is a WebAssembly table and if it 18993 /// is, sets ElTy to the element type. 18994 static bool CheckWasmBuiltinArgIsTable(Sema &S, CallExpr *E, unsigned ArgIndex, 18995 QualType &ElTy) { 18996 Expr *ArgExpr = E->getArg(ArgIndex); 18997 const auto *ATy = dyn_cast<ArrayType>(ArgExpr->getType()); 18998 if (!ATy || !ATy->getElementType().isWebAssemblyReferenceType()) { 18999 return S.Diag(ArgExpr->getBeginLoc(), 19000 diag::err_wasm_builtin_arg_must_be_table_type) 19001 << ArgIndex + 1 << ArgExpr->getSourceRange(); 19002 } 19003 ElTy = ATy->getElementType(); 19004 return false; 19005 } 19006 19007 /// Checks the argument at the given index is an integer. 19008 static bool CheckWasmBuiltinArgIsInteger(Sema &S, CallExpr *E, 19009 unsigned ArgIndex) { 19010 Expr *ArgExpr = E->getArg(ArgIndex); 19011 if (!ArgExpr->getType()->isIntegerType()) { 19012 return S.Diag(ArgExpr->getBeginLoc(), 19013 diag::err_wasm_builtin_arg_must_be_integer_type) 19014 << ArgIndex + 1 << ArgExpr->getSourceRange(); 19015 } 19016 return false; 19017 } 19018 19019 /// Check that the first argument is a WebAssembly table, and the second 19020 /// is an index to use as index into the table. 19021 bool Sema::BuiltinWasmTableGet(CallExpr *TheCall) { 19022 if (checkArgCount(*this, TheCall, 2)) 19023 return true; 19024 19025 QualType ElTy; 19026 if (CheckWasmBuiltinArgIsTable(*this, TheCall, 0, ElTy)) 19027 return true; 19028 19029 if (CheckWasmBuiltinArgIsInteger(*this, TheCall, 1)) 19030 return true; 19031 19032 // If all is well, we set the type of TheCall to be the type of the 19033 // element of the table. 19034 // i.e. a table.get on an externref table has type externref, 19035 // or whatever the type of the table element is. 19036 TheCall->setType(ElTy); 19037 19038 return false; 19039 } 19040 19041 /// Check that the first argumnet is a WebAssembly table, the second is 19042 /// an index to use as index into the table and the third is the reference 19043 /// type to set into the table. 19044 bool Sema::BuiltinWasmTableSet(CallExpr *TheCall) { 19045 if (checkArgCount(*this, TheCall, 3)) 19046 return true; 19047 19048 QualType ElTy; 19049 if (CheckWasmBuiltinArgIsTable(*this, TheCall, 0, ElTy)) 19050 return true; 19051 19052 if (CheckWasmBuiltinArgIsInteger(*this, TheCall, 1)) 19053 return true; 19054 19055 if (!Context.hasSameType(ElTy, TheCall->getArg(2)->getType())) 19056 return true; 19057 19058 return false; 19059 } 19060 19061 /// Check that the argument is a WebAssembly table. 19062 bool Sema::BuiltinWasmTableSize(CallExpr *TheCall) { 19063 if (checkArgCount(*this, TheCall, 1)) 19064 return true; 19065 19066 QualType ElTy; 19067 if (CheckWasmBuiltinArgIsTable(*this, TheCall, 0, ElTy)) 19068 return true; 19069 19070 return false; 19071 } 19072 19073 /// Check that the first argument is a WebAssembly table, the second is the 19074 /// value to use for new elements (of a type matching the table type), the 19075 /// third value is an integer. 19076 bool Sema::BuiltinWasmTableGrow(CallExpr *TheCall) { 19077 if (checkArgCount(*this, TheCall, 3)) 19078 return true; 19079 19080 QualType ElTy; 19081 if (CheckWasmBuiltinArgIsTable(*this, TheCall, 0, ElTy)) 19082 return true; 19083 19084 Expr *NewElemArg = TheCall->getArg(1); 19085 if (!Context.hasSameType(ElTy, NewElemArg->getType())) { 19086 return Diag(NewElemArg->getBeginLoc(), 19087 diag::err_wasm_builtin_arg_must_match_table_element_type) 19088 << 2 << 1 << NewElemArg->getSourceRange(); 19089 } 19090 19091 if (CheckWasmBuiltinArgIsInteger(*this, TheCall, 2)) 19092 return true; 19093 19094 return false; 19095 } 19096 19097 /// Check that the first argument is a WebAssembly table, the second is an 19098 /// integer, the third is the value to use to fill the table (of a type 19099 /// matching the table type), and the fourth is an integer. 19100 bool Sema::BuiltinWasmTableFill(CallExpr *TheCall) { 19101 if (checkArgCount(*this, TheCall, 4)) 19102 return true; 19103 19104 QualType ElTy; 19105 if (CheckWasmBuiltinArgIsTable(*this, TheCall, 0, ElTy)) 19106 return true; 19107 19108 if (CheckWasmBuiltinArgIsInteger(*this, TheCall, 1)) 19109 return true; 19110 19111 Expr *NewElemArg = TheCall->getArg(2); 19112 if (!Context.hasSameType(ElTy, NewElemArg->getType())) { 19113 return Diag(NewElemArg->getBeginLoc(), 19114 diag::err_wasm_builtin_arg_must_match_table_element_type) 19115 << 3 << 1 << NewElemArg->getSourceRange(); 19116 } 19117 19118 if (CheckWasmBuiltinArgIsInteger(*this, TheCall, 3)) 19119 return true; 19120 19121 return false; 19122 } 19123 19124 /// Check that the first argument is a WebAssembly table, the second is also a 19125 /// WebAssembly table (of the same element type), and the third to fifth 19126 /// arguments are integers. 19127 bool Sema::BuiltinWasmTableCopy(CallExpr *TheCall) { 19128 if (checkArgCount(*this, TheCall, 5)) 19129 return true; 19130 19131 QualType XElTy; 19132 if (CheckWasmBuiltinArgIsTable(*this, TheCall, 0, XElTy)) 19133 return true; 19134 19135 QualType YElTy; 19136 if (CheckWasmBuiltinArgIsTable(*this, TheCall, 1, YElTy)) 19137 return true; 19138 19139 Expr *TableYArg = TheCall->getArg(1); 19140 if (!Context.hasSameType(XElTy, YElTy)) { 19141 return Diag(TableYArg->getBeginLoc(), 19142 diag::err_wasm_builtin_arg_must_match_table_element_type) 19143 << 2 << 1 << TableYArg->getSourceRange(); 19144 } 19145 19146 for (int I = 2; I <= 4; I++) { 19147 if (CheckWasmBuiltinArgIsInteger(*this, TheCall, I)) 19148 return true; 19149 } 19150 19151 return false; 19152 } 19153 19154 /// \brief Enforce the bounds of a TCB 19155 /// CheckTCBEnforcement - Enforces that every function in a named TCB only 19156 /// directly calls other functions in the same TCB as marked by the enforce_tcb 19157 /// and enforce_tcb_leaf attributes. 19158 void Sema::CheckTCBEnforcement(const SourceLocation CallExprLoc, 19159 const NamedDecl *Callee) { 19160 // This warning does not make sense in code that has no runtime behavior. 19161 if (isUnevaluatedContext()) 19162 return; 19163 19164 const NamedDecl *Caller = getCurFunctionOrMethodDecl(); 19165 19166 if (!Caller || !Caller->hasAttr<EnforceTCBAttr>()) 19167 return; 19168 19169 // Search through the enforce_tcb and enforce_tcb_leaf attributes to find 19170 // all TCBs the callee is a part of. 19171 llvm::StringSet<> CalleeTCBs; 19172 for (const auto *A : Callee->specific_attrs<EnforceTCBAttr>()) 19173 CalleeTCBs.insert(A->getTCBName()); 19174 for (const auto *A : Callee->specific_attrs<EnforceTCBLeafAttr>()) 19175 CalleeTCBs.insert(A->getTCBName()); 19176 19177 // Go through the TCBs the caller is a part of and emit warnings if Caller 19178 // is in a TCB that the Callee is not. 19179 for (const auto *A : Caller->specific_attrs<EnforceTCBAttr>()) { 19180 StringRef CallerTCB = A->getTCBName(); 19181 if (CalleeTCBs.count(CallerTCB) == 0) { 19182 this->Diag(CallExprLoc, diag::warn_tcb_enforcement_violation) 19183 << Callee << CallerTCB; 19184 } 19185 } 19186 } 19187