1 //===- ASTContext.cpp - Context to hold long-lived AST nodes --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the ASTContext interface. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "clang/AST/ASTContext.h" 14 #include "CXXABI.h" 15 #include "Interp/Context.h" 16 #include "clang/AST/APValue.h" 17 #include "clang/AST/ASTConcept.h" 18 #include "clang/AST/ASTMutationListener.h" 19 #include "clang/AST/ASTTypeTraits.h" 20 #include "clang/AST/Attr.h" 21 #include "clang/AST/AttrIterator.h" 22 #include "clang/AST/CharUnits.h" 23 #include "clang/AST/Comment.h" 24 #include "clang/AST/Decl.h" 25 #include "clang/AST/DeclBase.h" 26 #include "clang/AST/DeclCXX.h" 27 #include "clang/AST/DeclContextInternals.h" 28 #include "clang/AST/DeclObjC.h" 29 #include "clang/AST/DeclOpenMP.h" 30 #include "clang/AST/DeclTemplate.h" 31 #include "clang/AST/DeclarationName.h" 32 #include "clang/AST/DependenceFlags.h" 33 #include "clang/AST/Expr.h" 34 #include "clang/AST/ExprCXX.h" 35 #include "clang/AST/ExprConcepts.h" 36 #include "clang/AST/ExternalASTSource.h" 37 #include "clang/AST/Mangle.h" 38 #include "clang/AST/MangleNumberingContext.h" 39 #include "clang/AST/NestedNameSpecifier.h" 40 #include "clang/AST/ParentMapContext.h" 41 #include "clang/AST/RawCommentList.h" 42 #include "clang/AST/RecordLayout.h" 43 #include "clang/AST/Stmt.h" 44 #include "clang/AST/TemplateBase.h" 45 #include "clang/AST/TemplateName.h" 46 #include "clang/AST/Type.h" 47 #include "clang/AST/TypeLoc.h" 48 #include "clang/AST/UnresolvedSet.h" 49 #include "clang/AST/VTableBuilder.h" 50 #include "clang/Basic/AddressSpaces.h" 51 #include "clang/Basic/Builtins.h" 52 #include "clang/Basic/CommentOptions.h" 53 #include "clang/Basic/ExceptionSpecificationType.h" 54 #include "clang/Basic/IdentifierTable.h" 55 #include "clang/Basic/LLVM.h" 56 #include "clang/Basic/LangOptions.h" 57 #include "clang/Basic/Linkage.h" 58 #include "clang/Basic/Module.h" 59 #include "clang/Basic/NoSanitizeList.h" 60 #include "clang/Basic/ObjCRuntime.h" 61 #include "clang/Basic/SourceLocation.h" 62 #include "clang/Basic/SourceManager.h" 63 #include "clang/Basic/Specifiers.h" 64 #include "clang/Basic/TargetCXXABI.h" 65 #include "clang/Basic/TargetInfo.h" 66 #include "clang/Basic/XRayLists.h" 67 #include "llvm/ADT/APFixedPoint.h" 68 #include "llvm/ADT/APInt.h" 69 #include "llvm/ADT/APSInt.h" 70 #include "llvm/ADT/ArrayRef.h" 71 #include "llvm/ADT/DenseMap.h" 72 #include "llvm/ADT/DenseSet.h" 73 #include "llvm/ADT/FoldingSet.h" 74 #include "llvm/ADT/PointerUnion.h" 75 #include "llvm/ADT/STLExtras.h" 76 #include "llvm/ADT/SmallPtrSet.h" 77 #include "llvm/ADT/SmallVector.h" 78 #include "llvm/ADT/StringExtras.h" 79 #include "llvm/ADT/StringRef.h" 80 #include "llvm/ADT/Triple.h" 81 #include "llvm/Support/Capacity.h" 82 #include "llvm/Support/Casting.h" 83 #include "llvm/Support/Compiler.h" 84 #include "llvm/Support/ErrorHandling.h" 85 #include "llvm/Support/MD5.h" 86 #include "llvm/Support/MathExtras.h" 87 #include "llvm/Support/raw_ostream.h" 88 #include <algorithm> 89 #include <cassert> 90 #include <cstddef> 91 #include <cstdint> 92 #include <cstdlib> 93 #include <map> 94 #include <memory> 95 #include <optional> 96 #include <string> 97 #include <tuple> 98 #include <utility> 99 100 using namespace clang; 101 102 enum FloatingRank { 103 BFloat16Rank, 104 Float16Rank, 105 HalfRank, 106 FloatRank, 107 DoubleRank, 108 LongDoubleRank, 109 Float128Rank, 110 Ibm128Rank 111 }; 112 113 /// \returns location that is relevant when searching for Doc comments related 114 /// to \p D. 115 static SourceLocation getDeclLocForCommentSearch(const Decl *D, 116 SourceManager &SourceMgr) { 117 assert(D); 118 119 // User can not attach documentation to implicit declarations. 120 if (D->isImplicit()) 121 return {}; 122 123 // User can not attach documentation to implicit instantiations. 124 if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 125 if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 126 return {}; 127 } 128 129 if (const auto *VD = dyn_cast<VarDecl>(D)) { 130 if (VD->isStaticDataMember() && 131 VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 132 return {}; 133 } 134 135 if (const auto *CRD = dyn_cast<CXXRecordDecl>(D)) { 136 if (CRD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 137 return {}; 138 } 139 140 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(D)) { 141 TemplateSpecializationKind TSK = CTSD->getSpecializationKind(); 142 if (TSK == TSK_ImplicitInstantiation || 143 TSK == TSK_Undeclared) 144 return {}; 145 } 146 147 if (const auto *ED = dyn_cast<EnumDecl>(D)) { 148 if (ED->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 149 return {}; 150 } 151 if (const auto *TD = dyn_cast<TagDecl>(D)) { 152 // When tag declaration (but not definition!) is part of the 153 // decl-specifier-seq of some other declaration, it doesn't get comment 154 if (TD->isEmbeddedInDeclarator() && !TD->isCompleteDefinition()) 155 return {}; 156 } 157 // TODO: handle comments for function parameters properly. 158 if (isa<ParmVarDecl>(D)) 159 return {}; 160 161 // TODO: we could look up template parameter documentation in the template 162 // documentation. 163 if (isa<TemplateTypeParmDecl>(D) || 164 isa<NonTypeTemplateParmDecl>(D) || 165 isa<TemplateTemplateParmDecl>(D)) 166 return {}; 167 168 // Find declaration location. 169 // For Objective-C declarations we generally don't expect to have multiple 170 // declarators, thus use declaration starting location as the "declaration 171 // location". 172 // For all other declarations multiple declarators are used quite frequently, 173 // so we use the location of the identifier as the "declaration location". 174 if (isa<ObjCMethodDecl>(D) || isa<ObjCContainerDecl>(D) || 175 isa<ObjCPropertyDecl>(D) || 176 isa<RedeclarableTemplateDecl>(D) || 177 isa<ClassTemplateSpecializationDecl>(D) || 178 // Allow association with Y across {} in `typedef struct X {} Y`. 179 isa<TypedefDecl>(D)) 180 return D->getBeginLoc(); 181 182 const SourceLocation DeclLoc = D->getLocation(); 183 if (DeclLoc.isMacroID()) { 184 if (isa<TypedefDecl>(D)) { 185 // If location of the typedef name is in a macro, it is because being 186 // declared via a macro. Try using declaration's starting location as 187 // the "declaration location". 188 return D->getBeginLoc(); 189 } 190 191 if (const auto *TD = dyn_cast<TagDecl>(D)) { 192 // If location of the tag decl is inside a macro, but the spelling of 193 // the tag name comes from a macro argument, it looks like a special 194 // macro like NS_ENUM is being used to define the tag decl. In that 195 // case, adjust the source location to the expansion loc so that we can 196 // attach the comment to the tag decl. 197 if (SourceMgr.isMacroArgExpansion(DeclLoc) && TD->isCompleteDefinition()) 198 return SourceMgr.getExpansionLoc(DeclLoc); 199 } 200 } 201 202 return DeclLoc; 203 } 204 205 RawComment *ASTContext::getRawCommentForDeclNoCacheImpl( 206 const Decl *D, const SourceLocation RepresentativeLocForDecl, 207 const std::map<unsigned, RawComment *> &CommentsInTheFile) const { 208 // If the declaration doesn't map directly to a location in a file, we 209 // can't find the comment. 210 if (RepresentativeLocForDecl.isInvalid() || 211 !RepresentativeLocForDecl.isFileID()) 212 return nullptr; 213 214 // If there are no comments anywhere, we won't find anything. 215 if (CommentsInTheFile.empty()) 216 return nullptr; 217 218 // Decompose the location for the declaration and find the beginning of the 219 // file buffer. 220 const std::pair<FileID, unsigned> DeclLocDecomp = 221 SourceMgr.getDecomposedLoc(RepresentativeLocForDecl); 222 223 // Slow path. 224 auto OffsetCommentBehindDecl = 225 CommentsInTheFile.lower_bound(DeclLocDecomp.second); 226 227 // First check whether we have a trailing comment. 228 if (OffsetCommentBehindDecl != CommentsInTheFile.end()) { 229 RawComment *CommentBehindDecl = OffsetCommentBehindDecl->second; 230 if ((CommentBehindDecl->isDocumentation() || 231 LangOpts.CommentOpts.ParseAllComments) && 232 CommentBehindDecl->isTrailingComment() && 233 (isa<FieldDecl>(D) || isa<EnumConstantDecl>(D) || isa<VarDecl>(D) || 234 isa<ObjCMethodDecl>(D) || isa<ObjCPropertyDecl>(D))) { 235 236 // Check that Doxygen trailing comment comes after the declaration, starts 237 // on the same line and in the same file as the declaration. 238 if (SourceMgr.getLineNumber(DeclLocDecomp.first, DeclLocDecomp.second) == 239 Comments.getCommentBeginLine(CommentBehindDecl, DeclLocDecomp.first, 240 OffsetCommentBehindDecl->first)) { 241 return CommentBehindDecl; 242 } 243 } 244 } 245 246 // The comment just after the declaration was not a trailing comment. 247 // Let's look at the previous comment. 248 if (OffsetCommentBehindDecl == CommentsInTheFile.begin()) 249 return nullptr; 250 251 auto OffsetCommentBeforeDecl = --OffsetCommentBehindDecl; 252 RawComment *CommentBeforeDecl = OffsetCommentBeforeDecl->second; 253 254 // Check that we actually have a non-member Doxygen comment. 255 if (!(CommentBeforeDecl->isDocumentation() || 256 LangOpts.CommentOpts.ParseAllComments) || 257 CommentBeforeDecl->isTrailingComment()) 258 return nullptr; 259 260 // Decompose the end of the comment. 261 const unsigned CommentEndOffset = 262 Comments.getCommentEndOffset(CommentBeforeDecl); 263 264 // Get the corresponding buffer. 265 bool Invalid = false; 266 const char *Buffer = SourceMgr.getBufferData(DeclLocDecomp.first, 267 &Invalid).data(); 268 if (Invalid) 269 return nullptr; 270 271 // Extract text between the comment and declaration. 272 StringRef Text(Buffer + CommentEndOffset, 273 DeclLocDecomp.second - CommentEndOffset); 274 275 // There should be no other declarations or preprocessor directives between 276 // comment and declaration. 277 if (Text.find_first_of(";{}#@") != StringRef::npos) 278 return nullptr; 279 280 return CommentBeforeDecl; 281 } 282 283 RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const { 284 const SourceLocation DeclLoc = getDeclLocForCommentSearch(D, SourceMgr); 285 286 // If the declaration doesn't map directly to a location in a file, we 287 // can't find the comment. 288 if (DeclLoc.isInvalid() || !DeclLoc.isFileID()) 289 return nullptr; 290 291 if (ExternalSource && !CommentsLoaded) { 292 ExternalSource->ReadComments(); 293 CommentsLoaded = true; 294 } 295 296 if (Comments.empty()) 297 return nullptr; 298 299 const FileID File = SourceMgr.getDecomposedLoc(DeclLoc).first; 300 if (!File.isValid()) { 301 return nullptr; 302 } 303 const auto CommentsInThisFile = Comments.getCommentsInFile(File); 304 if (!CommentsInThisFile || CommentsInThisFile->empty()) 305 return nullptr; 306 307 return getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile); 308 } 309 310 void ASTContext::addComment(const RawComment &RC) { 311 assert(LangOpts.RetainCommentsFromSystemHeaders || 312 !SourceMgr.isInSystemHeader(RC.getSourceRange().getBegin())); 313 Comments.addComment(RC, LangOpts.CommentOpts, BumpAlloc); 314 } 315 316 /// If we have a 'templated' declaration for a template, adjust 'D' to 317 /// refer to the actual template. 318 /// If we have an implicit instantiation, adjust 'D' to refer to template. 319 static const Decl &adjustDeclToTemplate(const Decl &D) { 320 if (const auto *FD = dyn_cast<FunctionDecl>(&D)) { 321 // Is this function declaration part of a function template? 322 if (const FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate()) 323 return *FTD; 324 325 // Nothing to do if function is not an implicit instantiation. 326 if (FD->getTemplateSpecializationKind() != TSK_ImplicitInstantiation) 327 return D; 328 329 // Function is an implicit instantiation of a function template? 330 if (const FunctionTemplateDecl *FTD = FD->getPrimaryTemplate()) 331 return *FTD; 332 333 // Function is instantiated from a member definition of a class template? 334 if (const FunctionDecl *MemberDecl = 335 FD->getInstantiatedFromMemberFunction()) 336 return *MemberDecl; 337 338 return D; 339 } 340 if (const auto *VD = dyn_cast<VarDecl>(&D)) { 341 // Static data member is instantiated from a member definition of a class 342 // template? 343 if (VD->isStaticDataMember()) 344 if (const VarDecl *MemberDecl = VD->getInstantiatedFromStaticDataMember()) 345 return *MemberDecl; 346 347 return D; 348 } 349 if (const auto *CRD = dyn_cast<CXXRecordDecl>(&D)) { 350 // Is this class declaration part of a class template? 351 if (const ClassTemplateDecl *CTD = CRD->getDescribedClassTemplate()) 352 return *CTD; 353 354 // Class is an implicit instantiation of a class template or partial 355 // specialization? 356 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(CRD)) { 357 if (CTSD->getSpecializationKind() != TSK_ImplicitInstantiation) 358 return D; 359 llvm::PointerUnion<ClassTemplateDecl *, 360 ClassTemplatePartialSpecializationDecl *> 361 PU = CTSD->getSpecializedTemplateOrPartial(); 362 return PU.is<ClassTemplateDecl *>() 363 ? *static_cast<const Decl *>(PU.get<ClassTemplateDecl *>()) 364 : *static_cast<const Decl *>( 365 PU.get<ClassTemplatePartialSpecializationDecl *>()); 366 } 367 368 // Class is instantiated from a member definition of a class template? 369 if (const MemberSpecializationInfo *Info = 370 CRD->getMemberSpecializationInfo()) 371 return *Info->getInstantiatedFrom(); 372 373 return D; 374 } 375 if (const auto *ED = dyn_cast<EnumDecl>(&D)) { 376 // Enum is instantiated from a member definition of a class template? 377 if (const EnumDecl *MemberDecl = ED->getInstantiatedFromMemberEnum()) 378 return *MemberDecl; 379 380 return D; 381 } 382 // FIXME: Adjust alias templates? 383 return D; 384 } 385 386 const RawComment *ASTContext::getRawCommentForAnyRedecl( 387 const Decl *D, 388 const Decl **OriginalDecl) const { 389 if (!D) { 390 if (OriginalDecl) 391 OriginalDecl = nullptr; 392 return nullptr; 393 } 394 395 D = &adjustDeclToTemplate(*D); 396 397 // Any comment directly attached to D? 398 { 399 auto DeclComment = DeclRawComments.find(D); 400 if (DeclComment != DeclRawComments.end()) { 401 if (OriginalDecl) 402 *OriginalDecl = D; 403 return DeclComment->second; 404 } 405 } 406 407 // Any comment attached to any redeclaration of D? 408 const Decl *CanonicalD = D->getCanonicalDecl(); 409 if (!CanonicalD) 410 return nullptr; 411 412 { 413 auto RedeclComment = RedeclChainComments.find(CanonicalD); 414 if (RedeclComment != RedeclChainComments.end()) { 415 if (OriginalDecl) 416 *OriginalDecl = RedeclComment->second; 417 auto CommentAtRedecl = DeclRawComments.find(RedeclComment->second); 418 assert(CommentAtRedecl != DeclRawComments.end() && 419 "This decl is supposed to have comment attached."); 420 return CommentAtRedecl->second; 421 } 422 } 423 424 // Any redeclarations of D that we haven't checked for comments yet? 425 // We can't use DenseMap::iterator directly since it'd get invalid. 426 auto LastCheckedRedecl = [this, CanonicalD]() -> const Decl * { 427 auto LookupRes = CommentlessRedeclChains.find(CanonicalD); 428 if (LookupRes != CommentlessRedeclChains.end()) 429 return LookupRes->second; 430 return nullptr; 431 }(); 432 433 for (const auto Redecl : D->redecls()) { 434 assert(Redecl); 435 // Skip all redeclarations that have been checked previously. 436 if (LastCheckedRedecl) { 437 if (LastCheckedRedecl == Redecl) { 438 LastCheckedRedecl = nullptr; 439 } 440 continue; 441 } 442 const RawComment *RedeclComment = getRawCommentForDeclNoCache(Redecl); 443 if (RedeclComment) { 444 cacheRawCommentForDecl(*Redecl, *RedeclComment); 445 if (OriginalDecl) 446 *OriginalDecl = Redecl; 447 return RedeclComment; 448 } 449 CommentlessRedeclChains[CanonicalD] = Redecl; 450 } 451 452 if (OriginalDecl) 453 *OriginalDecl = nullptr; 454 return nullptr; 455 } 456 457 void ASTContext::cacheRawCommentForDecl(const Decl &OriginalD, 458 const RawComment &Comment) const { 459 assert(Comment.isDocumentation() || LangOpts.CommentOpts.ParseAllComments); 460 DeclRawComments.try_emplace(&OriginalD, &Comment); 461 const Decl *const CanonicalDecl = OriginalD.getCanonicalDecl(); 462 RedeclChainComments.try_emplace(CanonicalDecl, &OriginalD); 463 CommentlessRedeclChains.erase(CanonicalDecl); 464 } 465 466 static void addRedeclaredMethods(const ObjCMethodDecl *ObjCMethod, 467 SmallVectorImpl<const NamedDecl *> &Redeclared) { 468 const DeclContext *DC = ObjCMethod->getDeclContext(); 469 if (const auto *IMD = dyn_cast<ObjCImplDecl>(DC)) { 470 const ObjCInterfaceDecl *ID = IMD->getClassInterface(); 471 if (!ID) 472 return; 473 // Add redeclared method here. 474 for (const auto *Ext : ID->known_extensions()) { 475 if (ObjCMethodDecl *RedeclaredMethod = 476 Ext->getMethod(ObjCMethod->getSelector(), 477 ObjCMethod->isInstanceMethod())) 478 Redeclared.push_back(RedeclaredMethod); 479 } 480 } 481 } 482 483 void ASTContext::attachCommentsToJustParsedDecls(ArrayRef<Decl *> Decls, 484 const Preprocessor *PP) { 485 if (Comments.empty() || Decls.empty()) 486 return; 487 488 FileID File; 489 for (Decl *D : Decls) { 490 SourceLocation Loc = D->getLocation(); 491 if (Loc.isValid()) { 492 // See if there are any new comments that are not attached to a decl. 493 // The location doesn't have to be precise - we care only about the file. 494 File = SourceMgr.getDecomposedLoc(Loc).first; 495 break; 496 } 497 } 498 499 if (File.isInvalid()) 500 return; 501 502 auto CommentsInThisFile = Comments.getCommentsInFile(File); 503 if (!CommentsInThisFile || CommentsInThisFile->empty() || 504 CommentsInThisFile->rbegin()->second->isAttached()) 505 return; 506 507 // There is at least one comment not attached to a decl. 508 // Maybe it should be attached to one of Decls? 509 // 510 // Note that this way we pick up not only comments that precede the 511 // declaration, but also comments that *follow* the declaration -- thanks to 512 // the lookahead in the lexer: we've consumed the semicolon and looked 513 // ahead through comments. 514 515 for (const Decl *D : Decls) { 516 assert(D); 517 if (D->isInvalidDecl()) 518 continue; 519 520 D = &adjustDeclToTemplate(*D); 521 522 const SourceLocation DeclLoc = getDeclLocForCommentSearch(D, SourceMgr); 523 524 if (DeclLoc.isInvalid() || !DeclLoc.isFileID()) 525 continue; 526 527 if (DeclRawComments.count(D) > 0) 528 continue; 529 530 if (RawComment *const DocComment = 531 getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile)) { 532 cacheRawCommentForDecl(*D, *DocComment); 533 comments::FullComment *FC = DocComment->parse(*this, PP, D); 534 ParsedComments[D->getCanonicalDecl()] = FC; 535 } 536 } 537 } 538 539 comments::FullComment *ASTContext::cloneFullComment(comments::FullComment *FC, 540 const Decl *D) const { 541 auto *ThisDeclInfo = new (*this) comments::DeclInfo; 542 ThisDeclInfo->CommentDecl = D; 543 ThisDeclInfo->IsFilled = false; 544 ThisDeclInfo->fill(); 545 ThisDeclInfo->CommentDecl = FC->getDecl(); 546 if (!ThisDeclInfo->TemplateParameters) 547 ThisDeclInfo->TemplateParameters = FC->getDeclInfo()->TemplateParameters; 548 comments::FullComment *CFC = 549 new (*this) comments::FullComment(FC->getBlocks(), 550 ThisDeclInfo); 551 return CFC; 552 } 553 554 comments::FullComment *ASTContext::getLocalCommentForDeclUncached(const Decl *D) const { 555 const RawComment *RC = getRawCommentForDeclNoCache(D); 556 return RC ? RC->parse(*this, nullptr, D) : nullptr; 557 } 558 559 comments::FullComment *ASTContext::getCommentForDecl( 560 const Decl *D, 561 const Preprocessor *PP) const { 562 if (!D || D->isInvalidDecl()) 563 return nullptr; 564 D = &adjustDeclToTemplate(*D); 565 566 const Decl *Canonical = D->getCanonicalDecl(); 567 llvm::DenseMap<const Decl *, comments::FullComment *>::iterator Pos = 568 ParsedComments.find(Canonical); 569 570 if (Pos != ParsedComments.end()) { 571 if (Canonical != D) { 572 comments::FullComment *FC = Pos->second; 573 comments::FullComment *CFC = cloneFullComment(FC, D); 574 return CFC; 575 } 576 return Pos->second; 577 } 578 579 const Decl *OriginalDecl = nullptr; 580 581 const RawComment *RC = getRawCommentForAnyRedecl(D, &OriginalDecl); 582 if (!RC) { 583 if (isa<ObjCMethodDecl>(D) || isa<FunctionDecl>(D)) { 584 SmallVector<const NamedDecl*, 8> Overridden; 585 const auto *OMD = dyn_cast<ObjCMethodDecl>(D); 586 if (OMD && OMD->isPropertyAccessor()) 587 if (const ObjCPropertyDecl *PDecl = OMD->findPropertyDecl()) 588 if (comments::FullComment *FC = getCommentForDecl(PDecl, PP)) 589 return cloneFullComment(FC, D); 590 if (OMD) 591 addRedeclaredMethods(OMD, Overridden); 592 getOverriddenMethods(dyn_cast<NamedDecl>(D), Overridden); 593 for (unsigned i = 0, e = Overridden.size(); i < e; i++) 594 if (comments::FullComment *FC = getCommentForDecl(Overridden[i], PP)) 595 return cloneFullComment(FC, D); 596 } 597 else if (const auto *TD = dyn_cast<TypedefNameDecl>(D)) { 598 // Attach any tag type's documentation to its typedef if latter 599 // does not have one of its own. 600 QualType QT = TD->getUnderlyingType(); 601 if (const auto *TT = QT->getAs<TagType>()) 602 if (const Decl *TD = TT->getDecl()) 603 if (comments::FullComment *FC = getCommentForDecl(TD, PP)) 604 return cloneFullComment(FC, D); 605 } 606 else if (const auto *IC = dyn_cast<ObjCInterfaceDecl>(D)) { 607 while (IC->getSuperClass()) { 608 IC = IC->getSuperClass(); 609 if (comments::FullComment *FC = getCommentForDecl(IC, PP)) 610 return cloneFullComment(FC, D); 611 } 612 } 613 else if (const auto *CD = dyn_cast<ObjCCategoryDecl>(D)) { 614 if (const ObjCInterfaceDecl *IC = CD->getClassInterface()) 615 if (comments::FullComment *FC = getCommentForDecl(IC, PP)) 616 return cloneFullComment(FC, D); 617 } 618 else if (const auto *RD = dyn_cast<CXXRecordDecl>(D)) { 619 if (!(RD = RD->getDefinition())) 620 return nullptr; 621 // Check non-virtual bases. 622 for (const auto &I : RD->bases()) { 623 if (I.isVirtual() || (I.getAccessSpecifier() != AS_public)) 624 continue; 625 QualType Ty = I.getType(); 626 if (Ty.isNull()) 627 continue; 628 if (const CXXRecordDecl *NonVirtualBase = Ty->getAsCXXRecordDecl()) { 629 if (!(NonVirtualBase= NonVirtualBase->getDefinition())) 630 continue; 631 632 if (comments::FullComment *FC = getCommentForDecl((NonVirtualBase), PP)) 633 return cloneFullComment(FC, D); 634 } 635 } 636 // Check virtual bases. 637 for (const auto &I : RD->vbases()) { 638 if (I.getAccessSpecifier() != AS_public) 639 continue; 640 QualType Ty = I.getType(); 641 if (Ty.isNull()) 642 continue; 643 if (const CXXRecordDecl *VirtualBase = Ty->getAsCXXRecordDecl()) { 644 if (!(VirtualBase= VirtualBase->getDefinition())) 645 continue; 646 if (comments::FullComment *FC = getCommentForDecl((VirtualBase), PP)) 647 return cloneFullComment(FC, D); 648 } 649 } 650 } 651 return nullptr; 652 } 653 654 // If the RawComment was attached to other redeclaration of this Decl, we 655 // should parse the comment in context of that other Decl. This is important 656 // because comments can contain references to parameter names which can be 657 // different across redeclarations. 658 if (D != OriginalDecl && OriginalDecl) 659 return getCommentForDecl(OriginalDecl, PP); 660 661 comments::FullComment *FC = RC->parse(*this, PP, D); 662 ParsedComments[Canonical] = FC; 663 return FC; 664 } 665 666 void 667 ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID, 668 const ASTContext &C, 669 TemplateTemplateParmDecl *Parm) { 670 ID.AddInteger(Parm->getDepth()); 671 ID.AddInteger(Parm->getPosition()); 672 ID.AddBoolean(Parm->isParameterPack()); 673 674 TemplateParameterList *Params = Parm->getTemplateParameters(); 675 ID.AddInteger(Params->size()); 676 for (TemplateParameterList::const_iterator P = Params->begin(), 677 PEnd = Params->end(); 678 P != PEnd; ++P) { 679 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) { 680 ID.AddInteger(0); 681 ID.AddBoolean(TTP->isParameterPack()); 682 const TypeConstraint *TC = TTP->getTypeConstraint(); 683 ID.AddBoolean(TC != nullptr); 684 if (TC) 685 TC->getImmediatelyDeclaredConstraint()->Profile(ID, C, 686 /*Canonical=*/true); 687 if (TTP->isExpandedParameterPack()) { 688 ID.AddBoolean(true); 689 ID.AddInteger(TTP->getNumExpansionParameters()); 690 } else 691 ID.AddBoolean(false); 692 continue; 693 } 694 695 if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) { 696 ID.AddInteger(1); 697 ID.AddBoolean(NTTP->isParameterPack()); 698 const Expr *TC = NTTP->getPlaceholderTypeConstraint(); 699 ID.AddBoolean(TC != nullptr); 700 ID.AddPointer(NTTP->getType().getCanonicalType().getAsOpaquePtr()); 701 if (TC) 702 TC->Profile(ID, C, /*Canonical=*/true); 703 if (NTTP->isExpandedParameterPack()) { 704 ID.AddBoolean(true); 705 ID.AddInteger(NTTP->getNumExpansionTypes()); 706 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) { 707 QualType T = NTTP->getExpansionType(I); 708 ID.AddPointer(T.getCanonicalType().getAsOpaquePtr()); 709 } 710 } else 711 ID.AddBoolean(false); 712 continue; 713 } 714 715 auto *TTP = cast<TemplateTemplateParmDecl>(*P); 716 ID.AddInteger(2); 717 Profile(ID, C, TTP); 718 } 719 Expr *RequiresClause = Parm->getTemplateParameters()->getRequiresClause(); 720 ID.AddBoolean(RequiresClause != nullptr); 721 if (RequiresClause) 722 RequiresClause->Profile(ID, C, /*Canonical=*/true); 723 } 724 725 static Expr * 726 canonicalizeImmediatelyDeclaredConstraint(const ASTContext &C, Expr *IDC, 727 QualType ConstrainedType) { 728 // This is a bit ugly - we need to form a new immediately-declared 729 // constraint that references the new parameter; this would ideally 730 // require semantic analysis (e.g. template<C T> struct S {}; - the 731 // converted arguments of C<T> could be an argument pack if C is 732 // declared as template<typename... T> concept C = ...). 733 // We don't have semantic analysis here so we dig deep into the 734 // ready-made constraint expr and change the thing manually. 735 ConceptSpecializationExpr *CSE; 736 if (const auto *Fold = dyn_cast<CXXFoldExpr>(IDC)) 737 CSE = cast<ConceptSpecializationExpr>(Fold->getLHS()); 738 else 739 CSE = cast<ConceptSpecializationExpr>(IDC); 740 ArrayRef<TemplateArgument> OldConverted = CSE->getTemplateArguments(); 741 SmallVector<TemplateArgument, 3> NewConverted; 742 NewConverted.reserve(OldConverted.size()); 743 if (OldConverted.front().getKind() == TemplateArgument::Pack) { 744 // The case: 745 // template<typename... T> concept C = true; 746 // template<C<int> T> struct S; -> constraint is C<{T, int}> 747 NewConverted.push_back(ConstrainedType); 748 llvm::append_range(NewConverted, 749 OldConverted.front().pack_elements().drop_front(1)); 750 TemplateArgument NewPack(NewConverted); 751 752 NewConverted.clear(); 753 NewConverted.push_back(NewPack); 754 assert(OldConverted.size() == 1 && 755 "Template parameter pack should be the last parameter"); 756 } else { 757 assert(OldConverted.front().getKind() == TemplateArgument::Type && 758 "Unexpected first argument kind for immediately-declared " 759 "constraint"); 760 NewConverted.push_back(ConstrainedType); 761 llvm::append_range(NewConverted, OldConverted.drop_front(1)); 762 } 763 auto *CSD = ImplicitConceptSpecializationDecl::Create( 764 C, CSE->getNamedConcept()->getDeclContext(), 765 CSE->getNamedConcept()->getLocation(), NewConverted); 766 767 Expr *NewIDC = ConceptSpecializationExpr::Create( 768 C, CSE->getNamedConcept(), CSD, nullptr, CSE->isInstantiationDependent(), 769 CSE->containsUnexpandedParameterPack()); 770 771 if (auto *OrigFold = dyn_cast<CXXFoldExpr>(IDC)) 772 NewIDC = new (C) CXXFoldExpr( 773 OrigFold->getType(), /*Callee*/ nullptr, SourceLocation(), NewIDC, 774 BinaryOperatorKind::BO_LAnd, SourceLocation(), /*RHS=*/nullptr, 775 SourceLocation(), /*NumExpansions=*/std::nullopt); 776 return NewIDC; 777 } 778 779 TemplateTemplateParmDecl * 780 ASTContext::getCanonicalTemplateTemplateParmDecl( 781 TemplateTemplateParmDecl *TTP) const { 782 // Check if we already have a canonical template template parameter. 783 llvm::FoldingSetNodeID ID; 784 CanonicalTemplateTemplateParm::Profile(ID, *this, TTP); 785 void *InsertPos = nullptr; 786 CanonicalTemplateTemplateParm *Canonical 787 = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos); 788 if (Canonical) 789 return Canonical->getParam(); 790 791 // Build a canonical template parameter list. 792 TemplateParameterList *Params = TTP->getTemplateParameters(); 793 SmallVector<NamedDecl *, 4> CanonParams; 794 CanonParams.reserve(Params->size()); 795 for (TemplateParameterList::const_iterator P = Params->begin(), 796 PEnd = Params->end(); 797 P != PEnd; ++P) { 798 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) { 799 TemplateTypeParmDecl *NewTTP = TemplateTypeParmDecl::Create( 800 *this, getTranslationUnitDecl(), SourceLocation(), SourceLocation(), 801 TTP->getDepth(), TTP->getIndex(), nullptr, false, 802 TTP->isParameterPack(), TTP->hasTypeConstraint(), 803 TTP->isExpandedParameterPack() 804 ? std::optional<unsigned>(TTP->getNumExpansionParameters()) 805 : std::nullopt); 806 if (const auto *TC = TTP->getTypeConstraint()) { 807 QualType ParamAsArgument(NewTTP->getTypeForDecl(), 0); 808 Expr *NewIDC = canonicalizeImmediatelyDeclaredConstraint( 809 *this, TC->getImmediatelyDeclaredConstraint(), 810 ParamAsArgument); 811 NewTTP->setTypeConstraint( 812 NestedNameSpecifierLoc(), 813 DeclarationNameInfo(TC->getNamedConcept()->getDeclName(), 814 SourceLocation()), /*FoundDecl=*/nullptr, 815 // Actually canonicalizing a TemplateArgumentLoc is difficult so we 816 // simply omit the ArgsAsWritten 817 TC->getNamedConcept(), /*ArgsAsWritten=*/nullptr, NewIDC); 818 } 819 CanonParams.push_back(NewTTP); 820 } else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) { 821 QualType T = getCanonicalType(NTTP->getType()); 822 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T); 823 NonTypeTemplateParmDecl *Param; 824 if (NTTP->isExpandedParameterPack()) { 825 SmallVector<QualType, 2> ExpandedTypes; 826 SmallVector<TypeSourceInfo *, 2> ExpandedTInfos; 827 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) { 828 ExpandedTypes.push_back(getCanonicalType(NTTP->getExpansionType(I))); 829 ExpandedTInfos.push_back( 830 getTrivialTypeSourceInfo(ExpandedTypes.back())); 831 } 832 833 Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(), 834 SourceLocation(), 835 SourceLocation(), 836 NTTP->getDepth(), 837 NTTP->getPosition(), nullptr, 838 T, 839 TInfo, 840 ExpandedTypes, 841 ExpandedTInfos); 842 } else { 843 Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(), 844 SourceLocation(), 845 SourceLocation(), 846 NTTP->getDepth(), 847 NTTP->getPosition(), nullptr, 848 T, 849 NTTP->isParameterPack(), 850 TInfo); 851 } 852 if (AutoType *AT = T->getContainedAutoType()) { 853 if (AT->isConstrained()) { 854 Param->setPlaceholderTypeConstraint( 855 canonicalizeImmediatelyDeclaredConstraint( 856 *this, NTTP->getPlaceholderTypeConstraint(), T)); 857 } 858 } 859 CanonParams.push_back(Param); 860 861 } else 862 CanonParams.push_back(getCanonicalTemplateTemplateParmDecl( 863 cast<TemplateTemplateParmDecl>(*P))); 864 } 865 866 Expr *CanonRequiresClause = nullptr; 867 if (Expr *RequiresClause = TTP->getTemplateParameters()->getRequiresClause()) 868 CanonRequiresClause = RequiresClause; 869 870 TemplateTemplateParmDecl *CanonTTP 871 = TemplateTemplateParmDecl::Create(*this, getTranslationUnitDecl(), 872 SourceLocation(), TTP->getDepth(), 873 TTP->getPosition(), 874 TTP->isParameterPack(), 875 nullptr, 876 TemplateParameterList::Create(*this, SourceLocation(), 877 SourceLocation(), 878 CanonParams, 879 SourceLocation(), 880 CanonRequiresClause)); 881 882 // Get the new insert position for the node we care about. 883 Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos); 884 assert(!Canonical && "Shouldn't be in the map!"); 885 (void)Canonical; 886 887 // Create the canonical template template parameter entry. 888 Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP); 889 CanonTemplateTemplateParms.InsertNode(Canonical, InsertPos); 890 return CanonTTP; 891 } 892 893 TargetCXXABI::Kind ASTContext::getCXXABIKind() const { 894 auto Kind = getTargetInfo().getCXXABI().getKind(); 895 return getLangOpts().CXXABI.value_or(Kind); 896 } 897 898 CXXABI *ASTContext::createCXXABI(const TargetInfo &T) { 899 if (!LangOpts.CPlusPlus) return nullptr; 900 901 switch (getCXXABIKind()) { 902 case TargetCXXABI::AppleARM64: 903 case TargetCXXABI::Fuchsia: 904 case TargetCXXABI::GenericARM: // Same as Itanium at this level 905 case TargetCXXABI::iOS: 906 case TargetCXXABI::WatchOS: 907 case TargetCXXABI::GenericAArch64: 908 case TargetCXXABI::GenericMIPS: 909 case TargetCXXABI::GenericItanium: 910 case TargetCXXABI::WebAssembly: 911 case TargetCXXABI::XL: 912 return CreateItaniumCXXABI(*this); 913 case TargetCXXABI::Microsoft: 914 return CreateMicrosoftCXXABI(*this); 915 } 916 llvm_unreachable("Invalid CXXABI type!"); 917 } 918 919 interp::Context &ASTContext::getInterpContext() { 920 if (!InterpContext) { 921 InterpContext.reset(new interp::Context(*this)); 922 } 923 return *InterpContext.get(); 924 } 925 926 ParentMapContext &ASTContext::getParentMapContext() { 927 if (!ParentMapCtx) 928 ParentMapCtx.reset(new ParentMapContext(*this)); 929 return *ParentMapCtx.get(); 930 } 931 932 static bool isAddrSpaceMapManglingEnabled(const TargetInfo &TI, 933 const LangOptions &LangOpts) { 934 switch (LangOpts.getAddressSpaceMapMangling()) { 935 case LangOptions::ASMM_Target: 936 return TI.useAddressSpaceMapMangling(); 937 case LangOptions::ASMM_On: 938 return true; 939 case LangOptions::ASMM_Off: 940 return false; 941 } 942 llvm_unreachable("getAddressSpaceMapMangling() doesn't cover anything."); 943 } 944 945 ASTContext::ASTContext(LangOptions &LOpts, SourceManager &SM, 946 IdentifierTable &idents, SelectorTable &sels, 947 Builtin::Context &builtins, TranslationUnitKind TUKind) 948 : ConstantArrayTypes(this_(), ConstantArrayTypesLog2InitSize), 949 FunctionProtoTypes(this_(), FunctionProtoTypesLog2InitSize), 950 TemplateSpecializationTypes(this_()), 951 DependentTemplateSpecializationTypes(this_()), AutoTypes(this_()), 952 SubstTemplateTemplateParmPacks(this_()), 953 CanonTemplateTemplateParms(this_()), SourceMgr(SM), LangOpts(LOpts), 954 NoSanitizeL(new NoSanitizeList(LangOpts.NoSanitizeFiles, SM)), 955 XRayFilter(new XRayFunctionFilter(LangOpts.XRayAlwaysInstrumentFiles, 956 LangOpts.XRayNeverInstrumentFiles, 957 LangOpts.XRayAttrListFiles, SM)), 958 ProfList(new ProfileList(LangOpts.ProfileListFiles, SM)), 959 PrintingPolicy(LOpts), Idents(idents), Selectors(sels), 960 BuiltinInfo(builtins), TUKind(TUKind), DeclarationNames(*this), 961 Comments(SM), CommentCommandTraits(BumpAlloc, LOpts.CommentOpts), 962 CompCategories(this_()), LastSDM(nullptr, 0) { 963 addTranslationUnitDecl(); 964 } 965 966 void ASTContext::cleanup() { 967 // Release the DenseMaps associated with DeclContext objects. 968 // FIXME: Is this the ideal solution? 969 ReleaseDeclContextMaps(); 970 971 // Call all of the deallocation functions on all of their targets. 972 for (auto &Pair : Deallocations) 973 (Pair.first)(Pair.second); 974 Deallocations.clear(); 975 976 // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed 977 // because they can contain DenseMaps. 978 for (llvm::DenseMap<const ObjCContainerDecl*, 979 const ASTRecordLayout*>::iterator 980 I = ObjCLayouts.begin(), E = ObjCLayouts.end(); I != E; ) 981 // Increment in loop to prevent using deallocated memory. 982 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second)) 983 R->Destroy(*this); 984 ObjCLayouts.clear(); 985 986 for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator 987 I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) { 988 // Increment in loop to prevent using deallocated memory. 989 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second)) 990 R->Destroy(*this); 991 } 992 ASTRecordLayouts.clear(); 993 994 for (llvm::DenseMap<const Decl*, AttrVec*>::iterator A = DeclAttrs.begin(), 995 AEnd = DeclAttrs.end(); 996 A != AEnd; ++A) 997 A->second->~AttrVec(); 998 DeclAttrs.clear(); 999 1000 for (const auto &Value : ModuleInitializers) 1001 Value.second->~PerModuleInitializers(); 1002 ModuleInitializers.clear(); 1003 } 1004 1005 ASTContext::~ASTContext() { cleanup(); } 1006 1007 void ASTContext::setTraversalScope(const std::vector<Decl *> &TopLevelDecls) { 1008 TraversalScope = TopLevelDecls; 1009 getParentMapContext().clear(); 1010 } 1011 1012 void ASTContext::AddDeallocation(void (*Callback)(void *), void *Data) const { 1013 Deallocations.push_back({Callback, Data}); 1014 } 1015 1016 void 1017 ASTContext::setExternalSource(IntrusiveRefCntPtr<ExternalASTSource> Source) { 1018 ExternalSource = std::move(Source); 1019 } 1020 1021 void ASTContext::PrintStats() const { 1022 llvm::errs() << "\n*** AST Context Stats:\n"; 1023 llvm::errs() << " " << Types.size() << " types total.\n"; 1024 1025 unsigned counts[] = { 1026 #define TYPE(Name, Parent) 0, 1027 #define ABSTRACT_TYPE(Name, Parent) 1028 #include "clang/AST/TypeNodes.inc" 1029 0 // Extra 1030 }; 1031 1032 for (unsigned i = 0, e = Types.size(); i != e; ++i) { 1033 Type *T = Types[i]; 1034 counts[(unsigned)T->getTypeClass()]++; 1035 } 1036 1037 unsigned Idx = 0; 1038 unsigned TotalBytes = 0; 1039 #define TYPE(Name, Parent) \ 1040 if (counts[Idx]) \ 1041 llvm::errs() << " " << counts[Idx] << " " << #Name \ 1042 << " types, " << sizeof(Name##Type) << " each " \ 1043 << "(" << counts[Idx] * sizeof(Name##Type) \ 1044 << " bytes)\n"; \ 1045 TotalBytes += counts[Idx] * sizeof(Name##Type); \ 1046 ++Idx; 1047 #define ABSTRACT_TYPE(Name, Parent) 1048 #include "clang/AST/TypeNodes.inc" 1049 1050 llvm::errs() << "Total bytes = " << TotalBytes << "\n"; 1051 1052 // Implicit special member functions. 1053 llvm::errs() << NumImplicitDefaultConstructorsDeclared << "/" 1054 << NumImplicitDefaultConstructors 1055 << " implicit default constructors created\n"; 1056 llvm::errs() << NumImplicitCopyConstructorsDeclared << "/" 1057 << NumImplicitCopyConstructors 1058 << " implicit copy constructors created\n"; 1059 if (getLangOpts().CPlusPlus) 1060 llvm::errs() << NumImplicitMoveConstructorsDeclared << "/" 1061 << NumImplicitMoveConstructors 1062 << " implicit move constructors created\n"; 1063 llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared << "/" 1064 << NumImplicitCopyAssignmentOperators 1065 << " implicit copy assignment operators created\n"; 1066 if (getLangOpts().CPlusPlus) 1067 llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared << "/" 1068 << NumImplicitMoveAssignmentOperators 1069 << " implicit move assignment operators created\n"; 1070 llvm::errs() << NumImplicitDestructorsDeclared << "/" 1071 << NumImplicitDestructors 1072 << " implicit destructors created\n"; 1073 1074 if (ExternalSource) { 1075 llvm::errs() << "\n"; 1076 ExternalSource->PrintStats(); 1077 } 1078 1079 BumpAlloc.PrintStats(); 1080 } 1081 1082 void ASTContext::mergeDefinitionIntoModule(NamedDecl *ND, Module *M, 1083 bool NotifyListeners) { 1084 if (NotifyListeners) 1085 if (auto *Listener = getASTMutationListener()) 1086 Listener->RedefinedHiddenDefinition(ND, M); 1087 1088 MergedDefModules[cast<NamedDecl>(ND->getCanonicalDecl())].push_back(M); 1089 } 1090 1091 void ASTContext::deduplicateMergedDefinitonsFor(NamedDecl *ND) { 1092 auto It = MergedDefModules.find(cast<NamedDecl>(ND->getCanonicalDecl())); 1093 if (It == MergedDefModules.end()) 1094 return; 1095 1096 auto &Merged = It->second; 1097 llvm::DenseSet<Module*> Found; 1098 for (Module *&M : Merged) 1099 if (!Found.insert(M).second) 1100 M = nullptr; 1101 llvm::erase_value(Merged, nullptr); 1102 } 1103 1104 ArrayRef<Module *> 1105 ASTContext::getModulesWithMergedDefinition(const NamedDecl *Def) { 1106 auto MergedIt = 1107 MergedDefModules.find(cast<NamedDecl>(Def->getCanonicalDecl())); 1108 if (MergedIt == MergedDefModules.end()) 1109 return std::nullopt; 1110 return MergedIt->second; 1111 } 1112 1113 void ASTContext::PerModuleInitializers::resolve(ASTContext &Ctx) { 1114 if (LazyInitializers.empty()) 1115 return; 1116 1117 auto *Source = Ctx.getExternalSource(); 1118 assert(Source && "lazy initializers but no external source"); 1119 1120 auto LazyInits = std::move(LazyInitializers); 1121 LazyInitializers.clear(); 1122 1123 for (auto ID : LazyInits) 1124 Initializers.push_back(Source->GetExternalDecl(ID)); 1125 1126 assert(LazyInitializers.empty() && 1127 "GetExternalDecl for lazy module initializer added more inits"); 1128 } 1129 1130 void ASTContext::addModuleInitializer(Module *M, Decl *D) { 1131 // One special case: if we add a module initializer that imports another 1132 // module, and that module's only initializer is an ImportDecl, simplify. 1133 if (const auto *ID = dyn_cast<ImportDecl>(D)) { 1134 auto It = ModuleInitializers.find(ID->getImportedModule()); 1135 1136 // Maybe the ImportDecl does nothing at all. (Common case.) 1137 if (It == ModuleInitializers.end()) 1138 return; 1139 1140 // Maybe the ImportDecl only imports another ImportDecl. 1141 auto &Imported = *It->second; 1142 if (Imported.Initializers.size() + Imported.LazyInitializers.size() == 1) { 1143 Imported.resolve(*this); 1144 auto *OnlyDecl = Imported.Initializers.front(); 1145 if (isa<ImportDecl>(OnlyDecl)) 1146 D = OnlyDecl; 1147 } 1148 } 1149 1150 auto *&Inits = ModuleInitializers[M]; 1151 if (!Inits) 1152 Inits = new (*this) PerModuleInitializers; 1153 Inits->Initializers.push_back(D); 1154 } 1155 1156 void ASTContext::addLazyModuleInitializers(Module *M, ArrayRef<uint32_t> IDs) { 1157 auto *&Inits = ModuleInitializers[M]; 1158 if (!Inits) 1159 Inits = new (*this) PerModuleInitializers; 1160 Inits->LazyInitializers.insert(Inits->LazyInitializers.end(), 1161 IDs.begin(), IDs.end()); 1162 } 1163 1164 ArrayRef<Decl *> ASTContext::getModuleInitializers(Module *M) { 1165 auto It = ModuleInitializers.find(M); 1166 if (It == ModuleInitializers.end()) 1167 return std::nullopt; 1168 1169 auto *Inits = It->second; 1170 Inits->resolve(*this); 1171 return Inits->Initializers; 1172 } 1173 1174 ExternCContextDecl *ASTContext::getExternCContextDecl() const { 1175 if (!ExternCContext) 1176 ExternCContext = ExternCContextDecl::Create(*this, getTranslationUnitDecl()); 1177 1178 return ExternCContext; 1179 } 1180 1181 BuiltinTemplateDecl * 1182 ASTContext::buildBuiltinTemplateDecl(BuiltinTemplateKind BTK, 1183 const IdentifierInfo *II) const { 1184 auto *BuiltinTemplate = 1185 BuiltinTemplateDecl::Create(*this, getTranslationUnitDecl(), II, BTK); 1186 BuiltinTemplate->setImplicit(); 1187 getTranslationUnitDecl()->addDecl(BuiltinTemplate); 1188 1189 return BuiltinTemplate; 1190 } 1191 1192 BuiltinTemplateDecl * 1193 ASTContext::getMakeIntegerSeqDecl() const { 1194 if (!MakeIntegerSeqDecl) 1195 MakeIntegerSeqDecl = buildBuiltinTemplateDecl(BTK__make_integer_seq, 1196 getMakeIntegerSeqName()); 1197 return MakeIntegerSeqDecl; 1198 } 1199 1200 BuiltinTemplateDecl * 1201 ASTContext::getTypePackElementDecl() const { 1202 if (!TypePackElementDecl) 1203 TypePackElementDecl = buildBuiltinTemplateDecl(BTK__type_pack_element, 1204 getTypePackElementName()); 1205 return TypePackElementDecl; 1206 } 1207 1208 RecordDecl *ASTContext::buildImplicitRecord(StringRef Name, 1209 RecordDecl::TagKind TK) const { 1210 SourceLocation Loc; 1211 RecordDecl *NewDecl; 1212 if (getLangOpts().CPlusPlus) 1213 NewDecl = CXXRecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, 1214 Loc, &Idents.get(Name)); 1215 else 1216 NewDecl = RecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, Loc, 1217 &Idents.get(Name)); 1218 NewDecl->setImplicit(); 1219 NewDecl->addAttr(TypeVisibilityAttr::CreateImplicit( 1220 const_cast<ASTContext &>(*this), TypeVisibilityAttr::Default)); 1221 return NewDecl; 1222 } 1223 1224 TypedefDecl *ASTContext::buildImplicitTypedef(QualType T, 1225 StringRef Name) const { 1226 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T); 1227 TypedefDecl *NewDecl = TypedefDecl::Create( 1228 const_cast<ASTContext &>(*this), getTranslationUnitDecl(), 1229 SourceLocation(), SourceLocation(), &Idents.get(Name), TInfo); 1230 NewDecl->setImplicit(); 1231 return NewDecl; 1232 } 1233 1234 TypedefDecl *ASTContext::getInt128Decl() const { 1235 if (!Int128Decl) 1236 Int128Decl = buildImplicitTypedef(Int128Ty, "__int128_t"); 1237 return Int128Decl; 1238 } 1239 1240 TypedefDecl *ASTContext::getUInt128Decl() const { 1241 if (!UInt128Decl) 1242 UInt128Decl = buildImplicitTypedef(UnsignedInt128Ty, "__uint128_t"); 1243 return UInt128Decl; 1244 } 1245 1246 void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) { 1247 auto *Ty = new (*this, TypeAlignment) BuiltinType(K); 1248 R = CanQualType::CreateUnsafe(QualType(Ty, 0)); 1249 Types.push_back(Ty); 1250 } 1251 1252 void ASTContext::InitBuiltinTypes(const TargetInfo &Target, 1253 const TargetInfo *AuxTarget) { 1254 assert((!this->Target || this->Target == &Target) && 1255 "Incorrect target reinitialization"); 1256 assert(VoidTy.isNull() && "Context reinitialized?"); 1257 1258 this->Target = &Target; 1259 this->AuxTarget = AuxTarget; 1260 1261 ABI.reset(createCXXABI(Target)); 1262 AddrSpaceMapMangling = isAddrSpaceMapManglingEnabled(Target, LangOpts); 1263 1264 // C99 6.2.5p19. 1265 InitBuiltinType(VoidTy, BuiltinType::Void); 1266 1267 // C99 6.2.5p2. 1268 InitBuiltinType(BoolTy, BuiltinType::Bool); 1269 // C99 6.2.5p3. 1270 if (LangOpts.CharIsSigned) 1271 InitBuiltinType(CharTy, BuiltinType::Char_S); 1272 else 1273 InitBuiltinType(CharTy, BuiltinType::Char_U); 1274 // C99 6.2.5p4. 1275 InitBuiltinType(SignedCharTy, BuiltinType::SChar); 1276 InitBuiltinType(ShortTy, BuiltinType::Short); 1277 InitBuiltinType(IntTy, BuiltinType::Int); 1278 InitBuiltinType(LongTy, BuiltinType::Long); 1279 InitBuiltinType(LongLongTy, BuiltinType::LongLong); 1280 1281 // C99 6.2.5p6. 1282 InitBuiltinType(UnsignedCharTy, BuiltinType::UChar); 1283 InitBuiltinType(UnsignedShortTy, BuiltinType::UShort); 1284 InitBuiltinType(UnsignedIntTy, BuiltinType::UInt); 1285 InitBuiltinType(UnsignedLongTy, BuiltinType::ULong); 1286 InitBuiltinType(UnsignedLongLongTy, BuiltinType::ULongLong); 1287 1288 // C99 6.2.5p10. 1289 InitBuiltinType(FloatTy, BuiltinType::Float); 1290 InitBuiltinType(DoubleTy, BuiltinType::Double); 1291 InitBuiltinType(LongDoubleTy, BuiltinType::LongDouble); 1292 1293 // GNU extension, __float128 for IEEE quadruple precision 1294 InitBuiltinType(Float128Ty, BuiltinType::Float128); 1295 1296 // __ibm128 for IBM extended precision 1297 InitBuiltinType(Ibm128Ty, BuiltinType::Ibm128); 1298 1299 // C11 extension ISO/IEC TS 18661-3 1300 InitBuiltinType(Float16Ty, BuiltinType::Float16); 1301 1302 // ISO/IEC JTC1 SC22 WG14 N1169 Extension 1303 InitBuiltinType(ShortAccumTy, BuiltinType::ShortAccum); 1304 InitBuiltinType(AccumTy, BuiltinType::Accum); 1305 InitBuiltinType(LongAccumTy, BuiltinType::LongAccum); 1306 InitBuiltinType(UnsignedShortAccumTy, BuiltinType::UShortAccum); 1307 InitBuiltinType(UnsignedAccumTy, BuiltinType::UAccum); 1308 InitBuiltinType(UnsignedLongAccumTy, BuiltinType::ULongAccum); 1309 InitBuiltinType(ShortFractTy, BuiltinType::ShortFract); 1310 InitBuiltinType(FractTy, BuiltinType::Fract); 1311 InitBuiltinType(LongFractTy, BuiltinType::LongFract); 1312 InitBuiltinType(UnsignedShortFractTy, BuiltinType::UShortFract); 1313 InitBuiltinType(UnsignedFractTy, BuiltinType::UFract); 1314 InitBuiltinType(UnsignedLongFractTy, BuiltinType::ULongFract); 1315 InitBuiltinType(SatShortAccumTy, BuiltinType::SatShortAccum); 1316 InitBuiltinType(SatAccumTy, BuiltinType::SatAccum); 1317 InitBuiltinType(SatLongAccumTy, BuiltinType::SatLongAccum); 1318 InitBuiltinType(SatUnsignedShortAccumTy, BuiltinType::SatUShortAccum); 1319 InitBuiltinType(SatUnsignedAccumTy, BuiltinType::SatUAccum); 1320 InitBuiltinType(SatUnsignedLongAccumTy, BuiltinType::SatULongAccum); 1321 InitBuiltinType(SatShortFractTy, BuiltinType::SatShortFract); 1322 InitBuiltinType(SatFractTy, BuiltinType::SatFract); 1323 InitBuiltinType(SatLongFractTy, BuiltinType::SatLongFract); 1324 InitBuiltinType(SatUnsignedShortFractTy, BuiltinType::SatUShortFract); 1325 InitBuiltinType(SatUnsignedFractTy, BuiltinType::SatUFract); 1326 InitBuiltinType(SatUnsignedLongFractTy, BuiltinType::SatULongFract); 1327 1328 // GNU extension, 128-bit integers. 1329 InitBuiltinType(Int128Ty, BuiltinType::Int128); 1330 InitBuiltinType(UnsignedInt128Ty, BuiltinType::UInt128); 1331 1332 // C++ 3.9.1p5 1333 if (TargetInfo::isTypeSigned(Target.getWCharType())) 1334 InitBuiltinType(WCharTy, BuiltinType::WChar_S); 1335 else // -fshort-wchar makes wchar_t be unsigned. 1336 InitBuiltinType(WCharTy, BuiltinType::WChar_U); 1337 if (LangOpts.CPlusPlus && LangOpts.WChar) 1338 WideCharTy = WCharTy; 1339 else { 1340 // C99 (or C++ using -fno-wchar). 1341 WideCharTy = getFromTargetType(Target.getWCharType()); 1342 } 1343 1344 WIntTy = getFromTargetType(Target.getWIntType()); 1345 1346 // C++20 (proposed) 1347 InitBuiltinType(Char8Ty, BuiltinType::Char8); 1348 1349 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++ 1350 InitBuiltinType(Char16Ty, BuiltinType::Char16); 1351 else // C99 1352 Char16Ty = getFromTargetType(Target.getChar16Type()); 1353 1354 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++ 1355 InitBuiltinType(Char32Ty, BuiltinType::Char32); 1356 else // C99 1357 Char32Ty = getFromTargetType(Target.getChar32Type()); 1358 1359 // Placeholder type for type-dependent expressions whose type is 1360 // completely unknown. No code should ever check a type against 1361 // DependentTy and users should never see it; however, it is here to 1362 // help diagnose failures to properly check for type-dependent 1363 // expressions. 1364 InitBuiltinType(DependentTy, BuiltinType::Dependent); 1365 1366 // Placeholder type for functions. 1367 InitBuiltinType(OverloadTy, BuiltinType::Overload); 1368 1369 // Placeholder type for bound members. 1370 InitBuiltinType(BoundMemberTy, BuiltinType::BoundMember); 1371 1372 // Placeholder type for pseudo-objects. 1373 InitBuiltinType(PseudoObjectTy, BuiltinType::PseudoObject); 1374 1375 // "any" type; useful for debugger-like clients. 1376 InitBuiltinType(UnknownAnyTy, BuiltinType::UnknownAny); 1377 1378 // Placeholder type for unbridged ARC casts. 1379 InitBuiltinType(ARCUnbridgedCastTy, BuiltinType::ARCUnbridgedCast); 1380 1381 // Placeholder type for builtin functions. 1382 InitBuiltinType(BuiltinFnTy, BuiltinType::BuiltinFn); 1383 1384 // Placeholder type for OMP array sections. 1385 if (LangOpts.OpenMP) { 1386 InitBuiltinType(OMPArraySectionTy, BuiltinType::OMPArraySection); 1387 InitBuiltinType(OMPArrayShapingTy, BuiltinType::OMPArrayShaping); 1388 InitBuiltinType(OMPIteratorTy, BuiltinType::OMPIterator); 1389 } 1390 if (LangOpts.MatrixTypes) 1391 InitBuiltinType(IncompleteMatrixIdxTy, BuiltinType::IncompleteMatrixIdx); 1392 1393 // Builtin types for 'id', 'Class', and 'SEL'. 1394 InitBuiltinType(ObjCBuiltinIdTy, BuiltinType::ObjCId); 1395 InitBuiltinType(ObjCBuiltinClassTy, BuiltinType::ObjCClass); 1396 InitBuiltinType(ObjCBuiltinSelTy, BuiltinType::ObjCSel); 1397 1398 if (LangOpts.OpenCL) { 1399 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 1400 InitBuiltinType(SingletonId, BuiltinType::Id); 1401 #include "clang/Basic/OpenCLImageTypes.def" 1402 1403 InitBuiltinType(OCLSamplerTy, BuiltinType::OCLSampler); 1404 InitBuiltinType(OCLEventTy, BuiltinType::OCLEvent); 1405 InitBuiltinType(OCLClkEventTy, BuiltinType::OCLClkEvent); 1406 InitBuiltinType(OCLQueueTy, BuiltinType::OCLQueue); 1407 InitBuiltinType(OCLReserveIDTy, BuiltinType::OCLReserveID); 1408 1409 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 1410 InitBuiltinType(Id##Ty, BuiltinType::Id); 1411 #include "clang/Basic/OpenCLExtensionTypes.def" 1412 } 1413 1414 if (Target.hasAArch64SVETypes()) { 1415 #define SVE_TYPE(Name, Id, SingletonId) \ 1416 InitBuiltinType(SingletonId, BuiltinType::Id); 1417 #include "clang/Basic/AArch64SVEACLETypes.def" 1418 } 1419 1420 if (Target.getTriple().isPPC64()) { 1421 #define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \ 1422 InitBuiltinType(Id##Ty, BuiltinType::Id); 1423 #include "clang/Basic/PPCTypes.def" 1424 #define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \ 1425 InitBuiltinType(Id##Ty, BuiltinType::Id); 1426 #include "clang/Basic/PPCTypes.def" 1427 } 1428 1429 if (Target.hasRISCVVTypes()) { 1430 #define RVV_TYPE(Name, Id, SingletonId) \ 1431 InitBuiltinType(SingletonId, BuiltinType::Id); 1432 #include "clang/Basic/RISCVVTypes.def" 1433 } 1434 1435 // Builtin type for __objc_yes and __objc_no 1436 ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ? 1437 SignedCharTy : BoolTy); 1438 1439 ObjCConstantStringType = QualType(); 1440 1441 ObjCSuperType = QualType(); 1442 1443 // void * type 1444 if (LangOpts.OpenCLGenericAddressSpace) { 1445 auto Q = VoidTy.getQualifiers(); 1446 Q.setAddressSpace(LangAS::opencl_generic); 1447 VoidPtrTy = getPointerType(getCanonicalType( 1448 getQualifiedType(VoidTy.getUnqualifiedType(), Q))); 1449 } else { 1450 VoidPtrTy = getPointerType(VoidTy); 1451 } 1452 1453 // nullptr type (C++0x 2.14.7) 1454 InitBuiltinType(NullPtrTy, BuiltinType::NullPtr); 1455 1456 // half type (OpenCL 6.1.1.1) / ARM NEON __fp16 1457 InitBuiltinType(HalfTy, BuiltinType::Half); 1458 1459 InitBuiltinType(BFloat16Ty, BuiltinType::BFloat16); 1460 1461 // Builtin type used to help define __builtin_va_list. 1462 VaListTagDecl = nullptr; 1463 1464 // MSVC predeclares struct _GUID, and we need it to create MSGuidDecls. 1465 if (LangOpts.MicrosoftExt || LangOpts.Borland) { 1466 MSGuidTagDecl = buildImplicitRecord("_GUID"); 1467 getTranslationUnitDecl()->addDecl(MSGuidTagDecl); 1468 } 1469 } 1470 1471 DiagnosticsEngine &ASTContext::getDiagnostics() const { 1472 return SourceMgr.getDiagnostics(); 1473 } 1474 1475 AttrVec& ASTContext::getDeclAttrs(const Decl *D) { 1476 AttrVec *&Result = DeclAttrs[D]; 1477 if (!Result) { 1478 void *Mem = Allocate(sizeof(AttrVec)); 1479 Result = new (Mem) AttrVec; 1480 } 1481 1482 return *Result; 1483 } 1484 1485 /// Erase the attributes corresponding to the given declaration. 1486 void ASTContext::eraseDeclAttrs(const Decl *D) { 1487 llvm::DenseMap<const Decl*, AttrVec*>::iterator Pos = DeclAttrs.find(D); 1488 if (Pos != DeclAttrs.end()) { 1489 Pos->second->~AttrVec(); 1490 DeclAttrs.erase(Pos); 1491 } 1492 } 1493 1494 // FIXME: Remove ? 1495 MemberSpecializationInfo * 1496 ASTContext::getInstantiatedFromStaticDataMember(const VarDecl *Var) { 1497 assert(Var->isStaticDataMember() && "Not a static data member"); 1498 return getTemplateOrSpecializationInfo(Var) 1499 .dyn_cast<MemberSpecializationInfo *>(); 1500 } 1501 1502 ASTContext::TemplateOrSpecializationInfo 1503 ASTContext::getTemplateOrSpecializationInfo(const VarDecl *Var) { 1504 llvm::DenseMap<const VarDecl *, TemplateOrSpecializationInfo>::iterator Pos = 1505 TemplateOrInstantiation.find(Var); 1506 if (Pos == TemplateOrInstantiation.end()) 1507 return {}; 1508 1509 return Pos->second; 1510 } 1511 1512 void 1513 ASTContext::setInstantiatedFromStaticDataMember(VarDecl *Inst, VarDecl *Tmpl, 1514 TemplateSpecializationKind TSK, 1515 SourceLocation PointOfInstantiation) { 1516 assert(Inst->isStaticDataMember() && "Not a static data member"); 1517 assert(Tmpl->isStaticDataMember() && "Not a static data member"); 1518 setTemplateOrSpecializationInfo(Inst, new (*this) MemberSpecializationInfo( 1519 Tmpl, TSK, PointOfInstantiation)); 1520 } 1521 1522 void 1523 ASTContext::setTemplateOrSpecializationInfo(VarDecl *Inst, 1524 TemplateOrSpecializationInfo TSI) { 1525 assert(!TemplateOrInstantiation[Inst] && 1526 "Already noted what the variable was instantiated from"); 1527 TemplateOrInstantiation[Inst] = TSI; 1528 } 1529 1530 NamedDecl * 1531 ASTContext::getInstantiatedFromUsingDecl(NamedDecl *UUD) { 1532 auto Pos = InstantiatedFromUsingDecl.find(UUD); 1533 if (Pos == InstantiatedFromUsingDecl.end()) 1534 return nullptr; 1535 1536 return Pos->second; 1537 } 1538 1539 void 1540 ASTContext::setInstantiatedFromUsingDecl(NamedDecl *Inst, NamedDecl *Pattern) { 1541 assert((isa<UsingDecl>(Pattern) || 1542 isa<UnresolvedUsingValueDecl>(Pattern) || 1543 isa<UnresolvedUsingTypenameDecl>(Pattern)) && 1544 "pattern decl is not a using decl"); 1545 assert((isa<UsingDecl>(Inst) || 1546 isa<UnresolvedUsingValueDecl>(Inst) || 1547 isa<UnresolvedUsingTypenameDecl>(Inst)) && 1548 "instantiation did not produce a using decl"); 1549 assert(!InstantiatedFromUsingDecl[Inst] && "pattern already exists"); 1550 InstantiatedFromUsingDecl[Inst] = Pattern; 1551 } 1552 1553 UsingEnumDecl * 1554 ASTContext::getInstantiatedFromUsingEnumDecl(UsingEnumDecl *UUD) { 1555 auto Pos = InstantiatedFromUsingEnumDecl.find(UUD); 1556 if (Pos == InstantiatedFromUsingEnumDecl.end()) 1557 return nullptr; 1558 1559 return Pos->second; 1560 } 1561 1562 void ASTContext::setInstantiatedFromUsingEnumDecl(UsingEnumDecl *Inst, 1563 UsingEnumDecl *Pattern) { 1564 assert(!InstantiatedFromUsingEnumDecl[Inst] && "pattern already exists"); 1565 InstantiatedFromUsingEnumDecl[Inst] = Pattern; 1566 } 1567 1568 UsingShadowDecl * 1569 ASTContext::getInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst) { 1570 llvm::DenseMap<UsingShadowDecl*, UsingShadowDecl*>::const_iterator Pos 1571 = InstantiatedFromUsingShadowDecl.find(Inst); 1572 if (Pos == InstantiatedFromUsingShadowDecl.end()) 1573 return nullptr; 1574 1575 return Pos->second; 1576 } 1577 1578 void 1579 ASTContext::setInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst, 1580 UsingShadowDecl *Pattern) { 1581 assert(!InstantiatedFromUsingShadowDecl[Inst] && "pattern already exists"); 1582 InstantiatedFromUsingShadowDecl[Inst] = Pattern; 1583 } 1584 1585 FieldDecl *ASTContext::getInstantiatedFromUnnamedFieldDecl(FieldDecl *Field) { 1586 llvm::DenseMap<FieldDecl *, FieldDecl *>::iterator Pos 1587 = InstantiatedFromUnnamedFieldDecl.find(Field); 1588 if (Pos == InstantiatedFromUnnamedFieldDecl.end()) 1589 return nullptr; 1590 1591 return Pos->second; 1592 } 1593 1594 void ASTContext::setInstantiatedFromUnnamedFieldDecl(FieldDecl *Inst, 1595 FieldDecl *Tmpl) { 1596 assert(!Inst->getDeclName() && "Instantiated field decl is not unnamed"); 1597 assert(!Tmpl->getDeclName() && "Template field decl is not unnamed"); 1598 assert(!InstantiatedFromUnnamedFieldDecl[Inst] && 1599 "Already noted what unnamed field was instantiated from"); 1600 1601 InstantiatedFromUnnamedFieldDecl[Inst] = Tmpl; 1602 } 1603 1604 ASTContext::overridden_cxx_method_iterator 1605 ASTContext::overridden_methods_begin(const CXXMethodDecl *Method) const { 1606 return overridden_methods(Method).begin(); 1607 } 1608 1609 ASTContext::overridden_cxx_method_iterator 1610 ASTContext::overridden_methods_end(const CXXMethodDecl *Method) const { 1611 return overridden_methods(Method).end(); 1612 } 1613 1614 unsigned 1615 ASTContext::overridden_methods_size(const CXXMethodDecl *Method) const { 1616 auto Range = overridden_methods(Method); 1617 return Range.end() - Range.begin(); 1618 } 1619 1620 ASTContext::overridden_method_range 1621 ASTContext::overridden_methods(const CXXMethodDecl *Method) const { 1622 llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos = 1623 OverriddenMethods.find(Method->getCanonicalDecl()); 1624 if (Pos == OverriddenMethods.end()) 1625 return overridden_method_range(nullptr, nullptr); 1626 return overridden_method_range(Pos->second.begin(), Pos->second.end()); 1627 } 1628 1629 void ASTContext::addOverriddenMethod(const CXXMethodDecl *Method, 1630 const CXXMethodDecl *Overridden) { 1631 assert(Method->isCanonicalDecl() && Overridden->isCanonicalDecl()); 1632 OverriddenMethods[Method].push_back(Overridden); 1633 } 1634 1635 void ASTContext::getOverriddenMethods( 1636 const NamedDecl *D, 1637 SmallVectorImpl<const NamedDecl *> &Overridden) const { 1638 assert(D); 1639 1640 if (const auto *CXXMethod = dyn_cast<CXXMethodDecl>(D)) { 1641 Overridden.append(overridden_methods_begin(CXXMethod), 1642 overridden_methods_end(CXXMethod)); 1643 return; 1644 } 1645 1646 const auto *Method = dyn_cast<ObjCMethodDecl>(D); 1647 if (!Method) 1648 return; 1649 1650 SmallVector<const ObjCMethodDecl *, 8> OverDecls; 1651 Method->getOverriddenMethods(OverDecls); 1652 Overridden.append(OverDecls.begin(), OverDecls.end()); 1653 } 1654 1655 void ASTContext::addedLocalImportDecl(ImportDecl *Import) { 1656 assert(!Import->getNextLocalImport() && 1657 "Import declaration already in the chain"); 1658 assert(!Import->isFromASTFile() && "Non-local import declaration"); 1659 if (!FirstLocalImport) { 1660 FirstLocalImport = Import; 1661 LastLocalImport = Import; 1662 return; 1663 } 1664 1665 LastLocalImport->setNextLocalImport(Import); 1666 LastLocalImport = Import; 1667 } 1668 1669 //===----------------------------------------------------------------------===// 1670 // Type Sizing and Analysis 1671 //===----------------------------------------------------------------------===// 1672 1673 /// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified 1674 /// scalar floating point type. 1675 const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const { 1676 switch (T->castAs<BuiltinType>()->getKind()) { 1677 default: 1678 llvm_unreachable("Not a floating point type!"); 1679 case BuiltinType::BFloat16: 1680 return Target->getBFloat16Format(); 1681 case BuiltinType::Float16: 1682 return Target->getHalfFormat(); 1683 case BuiltinType::Half: 1684 // For HLSL, when the native half type is disabled, half will be treat as 1685 // float. 1686 if (getLangOpts().HLSL) 1687 if (getLangOpts().NativeHalfType) 1688 return Target->getHalfFormat(); 1689 else 1690 return Target->getFloatFormat(); 1691 else 1692 return Target->getHalfFormat(); 1693 case BuiltinType::Float: return Target->getFloatFormat(); 1694 case BuiltinType::Double: return Target->getDoubleFormat(); 1695 case BuiltinType::Ibm128: 1696 return Target->getIbm128Format(); 1697 case BuiltinType::LongDouble: 1698 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice) 1699 return AuxTarget->getLongDoubleFormat(); 1700 return Target->getLongDoubleFormat(); 1701 case BuiltinType::Float128: 1702 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice) 1703 return AuxTarget->getFloat128Format(); 1704 return Target->getFloat128Format(); 1705 } 1706 } 1707 1708 CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const { 1709 unsigned Align = Target->getCharWidth(); 1710 1711 bool UseAlignAttrOnly = false; 1712 if (unsigned AlignFromAttr = D->getMaxAlignment()) { 1713 Align = AlignFromAttr; 1714 1715 // __attribute__((aligned)) can increase or decrease alignment 1716 // *except* on a struct or struct member, where it only increases 1717 // alignment unless 'packed' is also specified. 1718 // 1719 // It is an error for alignas to decrease alignment, so we can 1720 // ignore that possibility; Sema should diagnose it. 1721 if (isa<FieldDecl>(D)) { 1722 UseAlignAttrOnly = D->hasAttr<PackedAttr>() || 1723 cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>(); 1724 } else { 1725 UseAlignAttrOnly = true; 1726 } 1727 } 1728 else if (isa<FieldDecl>(D)) 1729 UseAlignAttrOnly = 1730 D->hasAttr<PackedAttr>() || 1731 cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>(); 1732 1733 // If we're using the align attribute only, just ignore everything 1734 // else about the declaration and its type. 1735 if (UseAlignAttrOnly) { 1736 // do nothing 1737 } else if (const auto *VD = dyn_cast<ValueDecl>(D)) { 1738 QualType T = VD->getType(); 1739 if (const auto *RT = T->getAs<ReferenceType>()) { 1740 if (ForAlignof) 1741 T = RT->getPointeeType(); 1742 else 1743 T = getPointerType(RT->getPointeeType()); 1744 } 1745 QualType BaseT = getBaseElementType(T); 1746 if (T->isFunctionType()) 1747 Align = getTypeInfoImpl(T.getTypePtr()).Align; 1748 else if (!BaseT->isIncompleteType()) { 1749 // Adjust alignments of declarations with array type by the 1750 // large-array alignment on the target. 1751 if (const ArrayType *arrayType = getAsArrayType(T)) { 1752 unsigned MinWidth = Target->getLargeArrayMinWidth(); 1753 if (!ForAlignof && MinWidth) { 1754 if (isa<VariableArrayType>(arrayType)) 1755 Align = std::max(Align, Target->getLargeArrayAlign()); 1756 else if (isa<ConstantArrayType>(arrayType) && 1757 MinWidth <= getTypeSize(cast<ConstantArrayType>(arrayType))) 1758 Align = std::max(Align, Target->getLargeArrayAlign()); 1759 } 1760 } 1761 Align = std::max(Align, getPreferredTypeAlign(T.getTypePtr())); 1762 if (BaseT.getQualifiers().hasUnaligned()) 1763 Align = Target->getCharWidth(); 1764 if (const auto *VD = dyn_cast<VarDecl>(D)) { 1765 if (VD->hasGlobalStorage() && !ForAlignof) { 1766 uint64_t TypeSize = getTypeSize(T.getTypePtr()); 1767 Align = std::max(Align, getTargetInfo().getMinGlobalAlign(TypeSize)); 1768 } 1769 } 1770 } 1771 1772 // Fields can be subject to extra alignment constraints, like if 1773 // the field is packed, the struct is packed, or the struct has a 1774 // a max-field-alignment constraint (#pragma pack). So calculate 1775 // the actual alignment of the field within the struct, and then 1776 // (as we're expected to) constrain that by the alignment of the type. 1777 if (const auto *Field = dyn_cast<FieldDecl>(VD)) { 1778 const RecordDecl *Parent = Field->getParent(); 1779 // We can only produce a sensible answer if the record is valid. 1780 if (!Parent->isInvalidDecl()) { 1781 const ASTRecordLayout &Layout = getASTRecordLayout(Parent); 1782 1783 // Start with the record's overall alignment. 1784 unsigned FieldAlign = toBits(Layout.getAlignment()); 1785 1786 // Use the GCD of that and the offset within the record. 1787 uint64_t Offset = Layout.getFieldOffset(Field->getFieldIndex()); 1788 if (Offset > 0) { 1789 // Alignment is always a power of 2, so the GCD will be a power of 2, 1790 // which means we get to do this crazy thing instead of Euclid's. 1791 uint64_t LowBitOfOffset = Offset & (~Offset + 1); 1792 if (LowBitOfOffset < FieldAlign) 1793 FieldAlign = static_cast<unsigned>(LowBitOfOffset); 1794 } 1795 1796 Align = std::min(Align, FieldAlign); 1797 } 1798 } 1799 } 1800 1801 // Some targets have hard limitation on the maximum requestable alignment in 1802 // aligned attribute for static variables. 1803 const unsigned MaxAlignedAttr = getTargetInfo().getMaxAlignedAttribute(); 1804 const auto *VD = dyn_cast<VarDecl>(D); 1805 if (MaxAlignedAttr && VD && VD->getStorageClass() == SC_Static) 1806 Align = std::min(Align, MaxAlignedAttr); 1807 1808 return toCharUnitsFromBits(Align); 1809 } 1810 1811 CharUnits ASTContext::getExnObjectAlignment() const { 1812 return toCharUnitsFromBits(Target->getExnObjectAlignment()); 1813 } 1814 1815 // getTypeInfoDataSizeInChars - Return the size of a type, in 1816 // chars. If the type is a record, its data size is returned. This is 1817 // the size of the memcpy that's performed when assigning this type 1818 // using a trivial copy/move assignment operator. 1819 TypeInfoChars ASTContext::getTypeInfoDataSizeInChars(QualType T) const { 1820 TypeInfoChars Info = getTypeInfoInChars(T); 1821 1822 // In C++, objects can sometimes be allocated into the tail padding 1823 // of a base-class subobject. We decide whether that's possible 1824 // during class layout, so here we can just trust the layout results. 1825 if (getLangOpts().CPlusPlus) { 1826 if (const auto *RT = T->getAs<RecordType>()) { 1827 const ASTRecordLayout &layout = getASTRecordLayout(RT->getDecl()); 1828 Info.Width = layout.getDataSize(); 1829 } 1830 } 1831 1832 return Info; 1833 } 1834 1835 /// getConstantArrayInfoInChars - Performing the computation in CharUnits 1836 /// instead of in bits prevents overflowing the uint64_t for some large arrays. 1837 TypeInfoChars 1838 static getConstantArrayInfoInChars(const ASTContext &Context, 1839 const ConstantArrayType *CAT) { 1840 TypeInfoChars EltInfo = Context.getTypeInfoInChars(CAT->getElementType()); 1841 uint64_t Size = CAT->getSize().getZExtValue(); 1842 assert((Size == 0 || static_cast<uint64_t>(EltInfo.Width.getQuantity()) <= 1843 (uint64_t)(-1)/Size) && 1844 "Overflow in array type char size evaluation"); 1845 uint64_t Width = EltInfo.Width.getQuantity() * Size; 1846 unsigned Align = EltInfo.Align.getQuantity(); 1847 if (!Context.getTargetInfo().getCXXABI().isMicrosoft() || 1848 Context.getTargetInfo().getPointerWidth(LangAS::Default) == 64) 1849 Width = llvm::alignTo(Width, Align); 1850 return TypeInfoChars(CharUnits::fromQuantity(Width), 1851 CharUnits::fromQuantity(Align), 1852 EltInfo.AlignRequirement); 1853 } 1854 1855 TypeInfoChars ASTContext::getTypeInfoInChars(const Type *T) const { 1856 if (const auto *CAT = dyn_cast<ConstantArrayType>(T)) 1857 return getConstantArrayInfoInChars(*this, CAT); 1858 TypeInfo Info = getTypeInfo(T); 1859 return TypeInfoChars(toCharUnitsFromBits(Info.Width), 1860 toCharUnitsFromBits(Info.Align), Info.AlignRequirement); 1861 } 1862 1863 TypeInfoChars ASTContext::getTypeInfoInChars(QualType T) const { 1864 return getTypeInfoInChars(T.getTypePtr()); 1865 } 1866 1867 bool ASTContext::isPromotableIntegerType(QualType T) const { 1868 // HLSL doesn't promote all small integer types to int, it 1869 // just uses the rank-based promotion rules for all types. 1870 if (getLangOpts().HLSL) 1871 return false; 1872 1873 if (const auto *BT = T->getAs<BuiltinType>()) 1874 switch (BT->getKind()) { 1875 case BuiltinType::Bool: 1876 case BuiltinType::Char_S: 1877 case BuiltinType::Char_U: 1878 case BuiltinType::SChar: 1879 case BuiltinType::UChar: 1880 case BuiltinType::Short: 1881 case BuiltinType::UShort: 1882 case BuiltinType::WChar_S: 1883 case BuiltinType::WChar_U: 1884 case BuiltinType::Char8: 1885 case BuiltinType::Char16: 1886 case BuiltinType::Char32: 1887 return true; 1888 default: 1889 return false; 1890 } 1891 1892 // Enumerated types are promotable to their compatible integer types 1893 // (C99 6.3.1.1) a.k.a. its underlying type (C++ [conv.prom]p2). 1894 if (const auto *ET = T->getAs<EnumType>()) { 1895 if (T->isDependentType() || ET->getDecl()->getPromotionType().isNull() || 1896 ET->getDecl()->isScoped()) 1897 return false; 1898 1899 return true; 1900 } 1901 1902 return false; 1903 } 1904 1905 bool ASTContext::isAlignmentRequired(const Type *T) const { 1906 return getTypeInfo(T).AlignRequirement != AlignRequirementKind::None; 1907 } 1908 1909 bool ASTContext::isAlignmentRequired(QualType T) const { 1910 return isAlignmentRequired(T.getTypePtr()); 1911 } 1912 1913 unsigned ASTContext::getTypeAlignIfKnown(QualType T, 1914 bool NeedsPreferredAlignment) const { 1915 // An alignment on a typedef overrides anything else. 1916 if (const auto *TT = T->getAs<TypedefType>()) 1917 if (unsigned Align = TT->getDecl()->getMaxAlignment()) 1918 return Align; 1919 1920 // If we have an (array of) complete type, we're done. 1921 T = getBaseElementType(T); 1922 if (!T->isIncompleteType()) 1923 return NeedsPreferredAlignment ? getPreferredTypeAlign(T) : getTypeAlign(T); 1924 1925 // If we had an array type, its element type might be a typedef 1926 // type with an alignment attribute. 1927 if (const auto *TT = T->getAs<TypedefType>()) 1928 if (unsigned Align = TT->getDecl()->getMaxAlignment()) 1929 return Align; 1930 1931 // Otherwise, see if the declaration of the type had an attribute. 1932 if (const auto *TT = T->getAs<TagType>()) 1933 return TT->getDecl()->getMaxAlignment(); 1934 1935 return 0; 1936 } 1937 1938 TypeInfo ASTContext::getTypeInfo(const Type *T) const { 1939 TypeInfoMap::iterator I = MemoizedTypeInfo.find(T); 1940 if (I != MemoizedTypeInfo.end()) 1941 return I->second; 1942 1943 // This call can invalidate MemoizedTypeInfo[T], so we need a second lookup. 1944 TypeInfo TI = getTypeInfoImpl(T); 1945 MemoizedTypeInfo[T] = TI; 1946 return TI; 1947 } 1948 1949 /// getTypeInfoImpl - Return the size of the specified type, in bits. This 1950 /// method does not work on incomplete types. 1951 /// 1952 /// FIXME: Pointers into different addr spaces could have different sizes and 1953 /// alignment requirements: getPointerInfo should take an AddrSpace, this 1954 /// should take a QualType, &c. 1955 TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const { 1956 uint64_t Width = 0; 1957 unsigned Align = 8; 1958 AlignRequirementKind AlignRequirement = AlignRequirementKind::None; 1959 LangAS AS = LangAS::Default; 1960 switch (T->getTypeClass()) { 1961 #define TYPE(Class, Base) 1962 #define ABSTRACT_TYPE(Class, Base) 1963 #define NON_CANONICAL_TYPE(Class, Base) 1964 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 1965 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) \ 1966 case Type::Class: \ 1967 assert(!T->isDependentType() && "should not see dependent types here"); \ 1968 return getTypeInfo(cast<Class##Type>(T)->desugar().getTypePtr()); 1969 #include "clang/AST/TypeNodes.inc" 1970 llvm_unreachable("Should not see dependent types"); 1971 1972 case Type::FunctionNoProto: 1973 case Type::FunctionProto: 1974 // GCC extension: alignof(function) = 32 bits 1975 Width = 0; 1976 Align = 32; 1977 break; 1978 1979 case Type::IncompleteArray: 1980 case Type::VariableArray: 1981 case Type::ConstantArray: { 1982 // Model non-constant sized arrays as size zero, but track the alignment. 1983 uint64_t Size = 0; 1984 if (const auto *CAT = dyn_cast<ConstantArrayType>(T)) 1985 Size = CAT->getSize().getZExtValue(); 1986 1987 TypeInfo EltInfo = getTypeInfo(cast<ArrayType>(T)->getElementType()); 1988 assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) && 1989 "Overflow in array type bit size evaluation"); 1990 Width = EltInfo.Width * Size; 1991 Align = EltInfo.Align; 1992 AlignRequirement = EltInfo.AlignRequirement; 1993 if (!getTargetInfo().getCXXABI().isMicrosoft() || 1994 getTargetInfo().getPointerWidth(LangAS::Default) == 64) 1995 Width = llvm::alignTo(Width, Align); 1996 break; 1997 } 1998 1999 case Type::ExtVector: 2000 case Type::Vector: { 2001 const auto *VT = cast<VectorType>(T); 2002 TypeInfo EltInfo = getTypeInfo(VT->getElementType()); 2003 Width = VT->isExtVectorBoolType() ? VT->getNumElements() 2004 : EltInfo.Width * VT->getNumElements(); 2005 // Enforce at least byte alignment. 2006 Align = std::max<unsigned>(8, Width); 2007 2008 // If the alignment is not a power of 2, round up to the next power of 2. 2009 // This happens for non-power-of-2 length vectors. 2010 if (Align & (Align-1)) { 2011 Align = llvm::NextPowerOf2(Align); 2012 Width = llvm::alignTo(Width, Align); 2013 } 2014 // Adjust the alignment based on the target max. 2015 uint64_t TargetVectorAlign = Target->getMaxVectorAlign(); 2016 if (TargetVectorAlign && TargetVectorAlign < Align) 2017 Align = TargetVectorAlign; 2018 if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector) 2019 // Adjust the alignment for fixed-length SVE vectors. This is important 2020 // for non-power-of-2 vector lengths. 2021 Align = 128; 2022 else if (VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector) 2023 // Adjust the alignment for fixed-length SVE predicates. 2024 Align = 16; 2025 break; 2026 } 2027 2028 case Type::ConstantMatrix: { 2029 const auto *MT = cast<ConstantMatrixType>(T); 2030 TypeInfo ElementInfo = getTypeInfo(MT->getElementType()); 2031 // The internal layout of a matrix value is implementation defined. 2032 // Initially be ABI compatible with arrays with respect to alignment and 2033 // size. 2034 Width = ElementInfo.Width * MT->getNumRows() * MT->getNumColumns(); 2035 Align = ElementInfo.Align; 2036 break; 2037 } 2038 2039 case Type::Builtin: 2040 switch (cast<BuiltinType>(T)->getKind()) { 2041 default: llvm_unreachable("Unknown builtin type!"); 2042 case BuiltinType::Void: 2043 // GCC extension: alignof(void) = 8 bits. 2044 Width = 0; 2045 Align = 8; 2046 break; 2047 case BuiltinType::Bool: 2048 Width = Target->getBoolWidth(); 2049 Align = Target->getBoolAlign(); 2050 break; 2051 case BuiltinType::Char_S: 2052 case BuiltinType::Char_U: 2053 case BuiltinType::UChar: 2054 case BuiltinType::SChar: 2055 case BuiltinType::Char8: 2056 Width = Target->getCharWidth(); 2057 Align = Target->getCharAlign(); 2058 break; 2059 case BuiltinType::WChar_S: 2060 case BuiltinType::WChar_U: 2061 Width = Target->getWCharWidth(); 2062 Align = Target->getWCharAlign(); 2063 break; 2064 case BuiltinType::Char16: 2065 Width = Target->getChar16Width(); 2066 Align = Target->getChar16Align(); 2067 break; 2068 case BuiltinType::Char32: 2069 Width = Target->getChar32Width(); 2070 Align = Target->getChar32Align(); 2071 break; 2072 case BuiltinType::UShort: 2073 case BuiltinType::Short: 2074 Width = Target->getShortWidth(); 2075 Align = Target->getShortAlign(); 2076 break; 2077 case BuiltinType::UInt: 2078 case BuiltinType::Int: 2079 Width = Target->getIntWidth(); 2080 Align = Target->getIntAlign(); 2081 break; 2082 case BuiltinType::ULong: 2083 case BuiltinType::Long: 2084 Width = Target->getLongWidth(); 2085 Align = Target->getLongAlign(); 2086 break; 2087 case BuiltinType::ULongLong: 2088 case BuiltinType::LongLong: 2089 Width = Target->getLongLongWidth(); 2090 Align = Target->getLongLongAlign(); 2091 break; 2092 case BuiltinType::Int128: 2093 case BuiltinType::UInt128: 2094 Width = 128; 2095 Align = Target->getInt128Align(); 2096 break; 2097 case BuiltinType::ShortAccum: 2098 case BuiltinType::UShortAccum: 2099 case BuiltinType::SatShortAccum: 2100 case BuiltinType::SatUShortAccum: 2101 Width = Target->getShortAccumWidth(); 2102 Align = Target->getShortAccumAlign(); 2103 break; 2104 case BuiltinType::Accum: 2105 case BuiltinType::UAccum: 2106 case BuiltinType::SatAccum: 2107 case BuiltinType::SatUAccum: 2108 Width = Target->getAccumWidth(); 2109 Align = Target->getAccumAlign(); 2110 break; 2111 case BuiltinType::LongAccum: 2112 case BuiltinType::ULongAccum: 2113 case BuiltinType::SatLongAccum: 2114 case BuiltinType::SatULongAccum: 2115 Width = Target->getLongAccumWidth(); 2116 Align = Target->getLongAccumAlign(); 2117 break; 2118 case BuiltinType::ShortFract: 2119 case BuiltinType::UShortFract: 2120 case BuiltinType::SatShortFract: 2121 case BuiltinType::SatUShortFract: 2122 Width = Target->getShortFractWidth(); 2123 Align = Target->getShortFractAlign(); 2124 break; 2125 case BuiltinType::Fract: 2126 case BuiltinType::UFract: 2127 case BuiltinType::SatFract: 2128 case BuiltinType::SatUFract: 2129 Width = Target->getFractWidth(); 2130 Align = Target->getFractAlign(); 2131 break; 2132 case BuiltinType::LongFract: 2133 case BuiltinType::ULongFract: 2134 case BuiltinType::SatLongFract: 2135 case BuiltinType::SatULongFract: 2136 Width = Target->getLongFractWidth(); 2137 Align = Target->getLongFractAlign(); 2138 break; 2139 case BuiltinType::BFloat16: 2140 if (Target->hasBFloat16Type()) { 2141 Width = Target->getBFloat16Width(); 2142 Align = Target->getBFloat16Align(); 2143 } 2144 break; 2145 case BuiltinType::Float16: 2146 case BuiltinType::Half: 2147 if (Target->hasFloat16Type() || !getLangOpts().OpenMP || 2148 !getLangOpts().OpenMPIsDevice) { 2149 Width = Target->getHalfWidth(); 2150 Align = Target->getHalfAlign(); 2151 } else { 2152 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice && 2153 "Expected OpenMP device compilation."); 2154 Width = AuxTarget->getHalfWidth(); 2155 Align = AuxTarget->getHalfAlign(); 2156 } 2157 break; 2158 case BuiltinType::Float: 2159 Width = Target->getFloatWidth(); 2160 Align = Target->getFloatAlign(); 2161 break; 2162 case BuiltinType::Double: 2163 Width = Target->getDoubleWidth(); 2164 Align = Target->getDoubleAlign(); 2165 break; 2166 case BuiltinType::Ibm128: 2167 Width = Target->getIbm128Width(); 2168 Align = Target->getIbm128Align(); 2169 break; 2170 case BuiltinType::LongDouble: 2171 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice && 2172 (Target->getLongDoubleWidth() != AuxTarget->getLongDoubleWidth() || 2173 Target->getLongDoubleAlign() != AuxTarget->getLongDoubleAlign())) { 2174 Width = AuxTarget->getLongDoubleWidth(); 2175 Align = AuxTarget->getLongDoubleAlign(); 2176 } else { 2177 Width = Target->getLongDoubleWidth(); 2178 Align = Target->getLongDoubleAlign(); 2179 } 2180 break; 2181 case BuiltinType::Float128: 2182 if (Target->hasFloat128Type() || !getLangOpts().OpenMP || 2183 !getLangOpts().OpenMPIsDevice) { 2184 Width = Target->getFloat128Width(); 2185 Align = Target->getFloat128Align(); 2186 } else { 2187 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice && 2188 "Expected OpenMP device compilation."); 2189 Width = AuxTarget->getFloat128Width(); 2190 Align = AuxTarget->getFloat128Align(); 2191 } 2192 break; 2193 case BuiltinType::NullPtr: 2194 // C++ 3.9.1p11: sizeof(nullptr_t) == sizeof(void*) 2195 Width = Target->getPointerWidth(LangAS::Default); 2196 Align = Target->getPointerAlign(LangAS::Default); 2197 break; 2198 case BuiltinType::ObjCId: 2199 case BuiltinType::ObjCClass: 2200 case BuiltinType::ObjCSel: 2201 Width = Target->getPointerWidth(LangAS::Default); 2202 Align = Target->getPointerAlign(LangAS::Default); 2203 break; 2204 case BuiltinType::OCLSampler: 2205 case BuiltinType::OCLEvent: 2206 case BuiltinType::OCLClkEvent: 2207 case BuiltinType::OCLQueue: 2208 case BuiltinType::OCLReserveID: 2209 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 2210 case BuiltinType::Id: 2211 #include "clang/Basic/OpenCLImageTypes.def" 2212 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 2213 case BuiltinType::Id: 2214 #include "clang/Basic/OpenCLExtensionTypes.def" 2215 AS = Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T)); 2216 Width = Target->getPointerWidth(AS); 2217 Align = Target->getPointerAlign(AS); 2218 break; 2219 // The SVE types are effectively target-specific. The length of an 2220 // SVE_VECTOR_TYPE is only known at runtime, but it is always a multiple 2221 // of 128 bits. There is one predicate bit for each vector byte, so the 2222 // length of an SVE_PREDICATE_TYPE is always a multiple of 16 bits. 2223 // 2224 // Because the length is only known at runtime, we use a dummy value 2225 // of 0 for the static length. The alignment values are those defined 2226 // by the Procedure Call Standard for the Arm Architecture. 2227 #define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \ 2228 IsSigned, IsFP, IsBF) \ 2229 case BuiltinType::Id: \ 2230 Width = 0; \ 2231 Align = 128; \ 2232 break; 2233 #define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \ 2234 case BuiltinType::Id: \ 2235 Width = 0; \ 2236 Align = 16; \ 2237 break; 2238 #include "clang/Basic/AArch64SVEACLETypes.def" 2239 #define PPC_VECTOR_TYPE(Name, Id, Size) \ 2240 case BuiltinType::Id: \ 2241 Width = Size; \ 2242 Align = Size; \ 2243 break; 2244 #include "clang/Basic/PPCTypes.def" 2245 #define RVV_VECTOR_TYPE(Name, Id, SingletonId, ElKind, ElBits, NF, IsSigned, \ 2246 IsFP) \ 2247 case BuiltinType::Id: \ 2248 Width = 0; \ 2249 Align = ElBits; \ 2250 break; 2251 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, ElKind) \ 2252 case BuiltinType::Id: \ 2253 Width = 0; \ 2254 Align = 8; \ 2255 break; 2256 #include "clang/Basic/RISCVVTypes.def" 2257 } 2258 break; 2259 case Type::ObjCObjectPointer: 2260 Width = Target->getPointerWidth(LangAS::Default); 2261 Align = Target->getPointerAlign(LangAS::Default); 2262 break; 2263 case Type::BlockPointer: 2264 AS = cast<BlockPointerType>(T)->getPointeeType().getAddressSpace(); 2265 Width = Target->getPointerWidth(AS); 2266 Align = Target->getPointerAlign(AS); 2267 break; 2268 case Type::LValueReference: 2269 case Type::RValueReference: 2270 // alignof and sizeof should never enter this code path here, so we go 2271 // the pointer route. 2272 AS = cast<ReferenceType>(T)->getPointeeType().getAddressSpace(); 2273 Width = Target->getPointerWidth(AS); 2274 Align = Target->getPointerAlign(AS); 2275 break; 2276 case Type::Pointer: 2277 AS = cast<PointerType>(T)->getPointeeType().getAddressSpace(); 2278 Width = Target->getPointerWidth(AS); 2279 Align = Target->getPointerAlign(AS); 2280 break; 2281 case Type::MemberPointer: { 2282 const auto *MPT = cast<MemberPointerType>(T); 2283 CXXABI::MemberPointerInfo MPI = ABI->getMemberPointerInfo(MPT); 2284 Width = MPI.Width; 2285 Align = MPI.Align; 2286 break; 2287 } 2288 case Type::Complex: { 2289 // Complex types have the same alignment as their elements, but twice the 2290 // size. 2291 TypeInfo EltInfo = getTypeInfo(cast<ComplexType>(T)->getElementType()); 2292 Width = EltInfo.Width * 2; 2293 Align = EltInfo.Align; 2294 break; 2295 } 2296 case Type::ObjCObject: 2297 return getTypeInfo(cast<ObjCObjectType>(T)->getBaseType().getTypePtr()); 2298 case Type::Adjusted: 2299 case Type::Decayed: 2300 return getTypeInfo(cast<AdjustedType>(T)->getAdjustedType().getTypePtr()); 2301 case Type::ObjCInterface: { 2302 const auto *ObjCI = cast<ObjCInterfaceType>(T); 2303 if (ObjCI->getDecl()->isInvalidDecl()) { 2304 Width = 8; 2305 Align = 8; 2306 break; 2307 } 2308 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl()); 2309 Width = toBits(Layout.getSize()); 2310 Align = toBits(Layout.getAlignment()); 2311 break; 2312 } 2313 case Type::BitInt: { 2314 const auto *EIT = cast<BitIntType>(T); 2315 Align = 2316 std::min(static_cast<unsigned>(std::max( 2317 getCharWidth(), llvm::PowerOf2Ceil(EIT->getNumBits()))), 2318 Target->getLongLongAlign()); 2319 Width = llvm::alignTo(EIT->getNumBits(), Align); 2320 break; 2321 } 2322 case Type::Record: 2323 case Type::Enum: { 2324 const auto *TT = cast<TagType>(T); 2325 2326 if (TT->getDecl()->isInvalidDecl()) { 2327 Width = 8; 2328 Align = 8; 2329 break; 2330 } 2331 2332 if (const auto *ET = dyn_cast<EnumType>(TT)) { 2333 const EnumDecl *ED = ET->getDecl(); 2334 TypeInfo Info = 2335 getTypeInfo(ED->getIntegerType()->getUnqualifiedDesugaredType()); 2336 if (unsigned AttrAlign = ED->getMaxAlignment()) { 2337 Info.Align = AttrAlign; 2338 Info.AlignRequirement = AlignRequirementKind::RequiredByEnum; 2339 } 2340 return Info; 2341 } 2342 2343 const auto *RT = cast<RecordType>(TT); 2344 const RecordDecl *RD = RT->getDecl(); 2345 const ASTRecordLayout &Layout = getASTRecordLayout(RD); 2346 Width = toBits(Layout.getSize()); 2347 Align = toBits(Layout.getAlignment()); 2348 AlignRequirement = RD->hasAttr<AlignedAttr>() 2349 ? AlignRequirementKind::RequiredByRecord 2350 : AlignRequirementKind::None; 2351 break; 2352 } 2353 2354 case Type::SubstTemplateTypeParm: 2355 return getTypeInfo(cast<SubstTemplateTypeParmType>(T)-> 2356 getReplacementType().getTypePtr()); 2357 2358 case Type::Auto: 2359 case Type::DeducedTemplateSpecialization: { 2360 const auto *A = cast<DeducedType>(T); 2361 assert(!A->getDeducedType().isNull() && 2362 "cannot request the size of an undeduced or dependent auto type"); 2363 return getTypeInfo(A->getDeducedType().getTypePtr()); 2364 } 2365 2366 case Type::Paren: 2367 return getTypeInfo(cast<ParenType>(T)->getInnerType().getTypePtr()); 2368 2369 case Type::MacroQualified: 2370 return getTypeInfo( 2371 cast<MacroQualifiedType>(T)->getUnderlyingType().getTypePtr()); 2372 2373 case Type::ObjCTypeParam: 2374 return getTypeInfo(cast<ObjCTypeParamType>(T)->desugar().getTypePtr()); 2375 2376 case Type::Using: 2377 return getTypeInfo(cast<UsingType>(T)->desugar().getTypePtr()); 2378 2379 case Type::Typedef: { 2380 const auto *TT = cast<TypedefType>(T); 2381 TypeInfo Info = getTypeInfo(TT->desugar().getTypePtr()); 2382 // If the typedef has an aligned attribute on it, it overrides any computed 2383 // alignment we have. This violates the GCC documentation (which says that 2384 // attribute(aligned) can only round up) but matches its implementation. 2385 if (unsigned AttrAlign = TT->getDecl()->getMaxAlignment()) { 2386 Align = AttrAlign; 2387 AlignRequirement = AlignRequirementKind::RequiredByTypedef; 2388 } else { 2389 Align = Info.Align; 2390 AlignRequirement = Info.AlignRequirement; 2391 } 2392 Width = Info.Width; 2393 break; 2394 } 2395 2396 case Type::Elaborated: 2397 return getTypeInfo(cast<ElaboratedType>(T)->getNamedType().getTypePtr()); 2398 2399 case Type::Attributed: 2400 return getTypeInfo( 2401 cast<AttributedType>(T)->getEquivalentType().getTypePtr()); 2402 2403 case Type::BTFTagAttributed: 2404 return getTypeInfo( 2405 cast<BTFTagAttributedType>(T)->getWrappedType().getTypePtr()); 2406 2407 case Type::Atomic: { 2408 // Start with the base type information. 2409 TypeInfo Info = getTypeInfo(cast<AtomicType>(T)->getValueType()); 2410 Width = Info.Width; 2411 Align = Info.Align; 2412 2413 if (!Width) { 2414 // An otherwise zero-sized type should still generate an 2415 // atomic operation. 2416 Width = Target->getCharWidth(); 2417 assert(Align); 2418 } else if (Width <= Target->getMaxAtomicPromoteWidth()) { 2419 // If the size of the type doesn't exceed the platform's max 2420 // atomic promotion width, make the size and alignment more 2421 // favorable to atomic operations: 2422 2423 // Round the size up to a power of 2. 2424 if (!llvm::isPowerOf2_64(Width)) 2425 Width = llvm::NextPowerOf2(Width); 2426 2427 // Set the alignment equal to the size. 2428 Align = static_cast<unsigned>(Width); 2429 } 2430 } 2431 break; 2432 2433 case Type::Pipe: 2434 Width = Target->getPointerWidth(LangAS::opencl_global); 2435 Align = Target->getPointerAlign(LangAS::opencl_global); 2436 break; 2437 } 2438 2439 assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2"); 2440 return TypeInfo(Width, Align, AlignRequirement); 2441 } 2442 2443 unsigned ASTContext::getTypeUnadjustedAlign(const Type *T) const { 2444 UnadjustedAlignMap::iterator I = MemoizedUnadjustedAlign.find(T); 2445 if (I != MemoizedUnadjustedAlign.end()) 2446 return I->second; 2447 2448 unsigned UnadjustedAlign; 2449 if (const auto *RT = T->getAs<RecordType>()) { 2450 const RecordDecl *RD = RT->getDecl(); 2451 const ASTRecordLayout &Layout = getASTRecordLayout(RD); 2452 UnadjustedAlign = toBits(Layout.getUnadjustedAlignment()); 2453 } else if (const auto *ObjCI = T->getAs<ObjCInterfaceType>()) { 2454 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl()); 2455 UnadjustedAlign = toBits(Layout.getUnadjustedAlignment()); 2456 } else { 2457 UnadjustedAlign = getTypeAlign(T->getUnqualifiedDesugaredType()); 2458 } 2459 2460 MemoizedUnadjustedAlign[T] = UnadjustedAlign; 2461 return UnadjustedAlign; 2462 } 2463 2464 unsigned ASTContext::getOpenMPDefaultSimdAlign(QualType T) const { 2465 unsigned SimdAlign = getTargetInfo().getSimdDefaultAlign(); 2466 return SimdAlign; 2467 } 2468 2469 /// toCharUnitsFromBits - Convert a size in bits to a size in characters. 2470 CharUnits ASTContext::toCharUnitsFromBits(int64_t BitSize) const { 2471 return CharUnits::fromQuantity(BitSize / getCharWidth()); 2472 } 2473 2474 /// toBits - Convert a size in characters to a size in characters. 2475 int64_t ASTContext::toBits(CharUnits CharSize) const { 2476 return CharSize.getQuantity() * getCharWidth(); 2477 } 2478 2479 /// getTypeSizeInChars - Return the size of the specified type, in characters. 2480 /// This method does not work on incomplete types. 2481 CharUnits ASTContext::getTypeSizeInChars(QualType T) const { 2482 return getTypeInfoInChars(T).Width; 2483 } 2484 CharUnits ASTContext::getTypeSizeInChars(const Type *T) const { 2485 return getTypeInfoInChars(T).Width; 2486 } 2487 2488 /// getTypeAlignInChars - Return the ABI-specified alignment of a type, in 2489 /// characters. This method does not work on incomplete types. 2490 CharUnits ASTContext::getTypeAlignInChars(QualType T) const { 2491 return toCharUnitsFromBits(getTypeAlign(T)); 2492 } 2493 CharUnits ASTContext::getTypeAlignInChars(const Type *T) const { 2494 return toCharUnitsFromBits(getTypeAlign(T)); 2495 } 2496 2497 /// getTypeUnadjustedAlignInChars - Return the ABI-specified alignment of a 2498 /// type, in characters, before alignment adjustments. This method does 2499 /// not work on incomplete types. 2500 CharUnits ASTContext::getTypeUnadjustedAlignInChars(QualType T) const { 2501 return toCharUnitsFromBits(getTypeUnadjustedAlign(T)); 2502 } 2503 CharUnits ASTContext::getTypeUnadjustedAlignInChars(const Type *T) const { 2504 return toCharUnitsFromBits(getTypeUnadjustedAlign(T)); 2505 } 2506 2507 /// getPreferredTypeAlign - Return the "preferred" alignment of the specified 2508 /// type for the current target in bits. This can be different than the ABI 2509 /// alignment in cases where it is beneficial for performance or backwards 2510 /// compatibility preserving to overalign a data type. (Note: despite the name, 2511 /// the preferred alignment is ABI-impacting, and not an optimization.) 2512 unsigned ASTContext::getPreferredTypeAlign(const Type *T) const { 2513 TypeInfo TI = getTypeInfo(T); 2514 unsigned ABIAlign = TI.Align; 2515 2516 T = T->getBaseElementTypeUnsafe(); 2517 2518 // The preferred alignment of member pointers is that of a pointer. 2519 if (T->isMemberPointerType()) 2520 return getPreferredTypeAlign(getPointerDiffType().getTypePtr()); 2521 2522 if (!Target->allowsLargerPreferedTypeAlignment()) 2523 return ABIAlign; 2524 2525 if (const auto *RT = T->getAs<RecordType>()) { 2526 const RecordDecl *RD = RT->getDecl(); 2527 2528 // When used as part of a typedef, or together with a 'packed' attribute, 2529 // the 'aligned' attribute can be used to decrease alignment. Note that the 2530 // 'packed' case is already taken into consideration when computing the 2531 // alignment, we only need to handle the typedef case here. 2532 if (TI.AlignRequirement == AlignRequirementKind::RequiredByTypedef || 2533 RD->isInvalidDecl()) 2534 return ABIAlign; 2535 2536 unsigned PreferredAlign = static_cast<unsigned>( 2537 toBits(getASTRecordLayout(RD).PreferredAlignment)); 2538 assert(PreferredAlign >= ABIAlign && 2539 "PreferredAlign should be at least as large as ABIAlign."); 2540 return PreferredAlign; 2541 } 2542 2543 // Double (and, for targets supporting AIX `power` alignment, long double) and 2544 // long long should be naturally aligned (despite requiring less alignment) if 2545 // possible. 2546 if (const auto *CT = T->getAs<ComplexType>()) 2547 T = CT->getElementType().getTypePtr(); 2548 if (const auto *ET = T->getAs<EnumType>()) 2549 T = ET->getDecl()->getIntegerType().getTypePtr(); 2550 if (T->isSpecificBuiltinType(BuiltinType::Double) || 2551 T->isSpecificBuiltinType(BuiltinType::LongLong) || 2552 T->isSpecificBuiltinType(BuiltinType::ULongLong) || 2553 (T->isSpecificBuiltinType(BuiltinType::LongDouble) && 2554 Target->defaultsToAIXPowerAlignment())) 2555 // Don't increase the alignment if an alignment attribute was specified on a 2556 // typedef declaration. 2557 if (!TI.isAlignRequired()) 2558 return std::max(ABIAlign, (unsigned)getTypeSize(T)); 2559 2560 return ABIAlign; 2561 } 2562 2563 /// getTargetDefaultAlignForAttributeAligned - Return the default alignment 2564 /// for __attribute__((aligned)) on this target, to be used if no alignment 2565 /// value is specified. 2566 unsigned ASTContext::getTargetDefaultAlignForAttributeAligned() const { 2567 return getTargetInfo().getDefaultAlignForAttributeAligned(); 2568 } 2569 2570 /// getAlignOfGlobalVar - Return the alignment in bits that should be given 2571 /// to a global variable of the specified type. 2572 unsigned ASTContext::getAlignOfGlobalVar(QualType T) const { 2573 uint64_t TypeSize = getTypeSize(T.getTypePtr()); 2574 return std::max(getPreferredTypeAlign(T), 2575 getTargetInfo().getMinGlobalAlign(TypeSize)); 2576 } 2577 2578 /// getAlignOfGlobalVarInChars - Return the alignment in characters that 2579 /// should be given to a global variable of the specified type. 2580 CharUnits ASTContext::getAlignOfGlobalVarInChars(QualType T) const { 2581 return toCharUnitsFromBits(getAlignOfGlobalVar(T)); 2582 } 2583 2584 CharUnits ASTContext::getOffsetOfBaseWithVBPtr(const CXXRecordDecl *RD) const { 2585 CharUnits Offset = CharUnits::Zero(); 2586 const ASTRecordLayout *Layout = &getASTRecordLayout(RD); 2587 while (const CXXRecordDecl *Base = Layout->getBaseSharingVBPtr()) { 2588 Offset += Layout->getBaseClassOffset(Base); 2589 Layout = &getASTRecordLayout(Base); 2590 } 2591 return Offset; 2592 } 2593 2594 CharUnits ASTContext::getMemberPointerPathAdjustment(const APValue &MP) const { 2595 const ValueDecl *MPD = MP.getMemberPointerDecl(); 2596 CharUnits ThisAdjustment = CharUnits::Zero(); 2597 ArrayRef<const CXXRecordDecl*> Path = MP.getMemberPointerPath(); 2598 bool DerivedMember = MP.isMemberPointerToDerivedMember(); 2599 const CXXRecordDecl *RD = cast<CXXRecordDecl>(MPD->getDeclContext()); 2600 for (unsigned I = 0, N = Path.size(); I != N; ++I) { 2601 const CXXRecordDecl *Base = RD; 2602 const CXXRecordDecl *Derived = Path[I]; 2603 if (DerivedMember) 2604 std::swap(Base, Derived); 2605 ThisAdjustment += getASTRecordLayout(Derived).getBaseClassOffset(Base); 2606 RD = Path[I]; 2607 } 2608 if (DerivedMember) 2609 ThisAdjustment = -ThisAdjustment; 2610 return ThisAdjustment; 2611 } 2612 2613 /// DeepCollectObjCIvars - 2614 /// This routine first collects all declared, but not synthesized, ivars in 2615 /// super class and then collects all ivars, including those synthesized for 2616 /// current class. This routine is used for implementation of current class 2617 /// when all ivars, declared and synthesized are known. 2618 void ASTContext::DeepCollectObjCIvars(const ObjCInterfaceDecl *OI, 2619 bool leafClass, 2620 SmallVectorImpl<const ObjCIvarDecl*> &Ivars) const { 2621 if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass()) 2622 DeepCollectObjCIvars(SuperClass, false, Ivars); 2623 if (!leafClass) { 2624 llvm::append_range(Ivars, OI->ivars()); 2625 } else { 2626 auto *IDecl = const_cast<ObjCInterfaceDecl *>(OI); 2627 for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv; 2628 Iv= Iv->getNextIvar()) 2629 Ivars.push_back(Iv); 2630 } 2631 } 2632 2633 /// CollectInheritedProtocols - Collect all protocols in current class and 2634 /// those inherited by it. 2635 void ASTContext::CollectInheritedProtocols(const Decl *CDecl, 2636 llvm::SmallPtrSet<ObjCProtocolDecl*, 8> &Protocols) { 2637 if (const auto *OI = dyn_cast<ObjCInterfaceDecl>(CDecl)) { 2638 // We can use protocol_iterator here instead of 2639 // all_referenced_protocol_iterator since we are walking all categories. 2640 for (auto *Proto : OI->all_referenced_protocols()) { 2641 CollectInheritedProtocols(Proto, Protocols); 2642 } 2643 2644 // Categories of this Interface. 2645 for (const auto *Cat : OI->visible_categories()) 2646 CollectInheritedProtocols(Cat, Protocols); 2647 2648 if (ObjCInterfaceDecl *SD = OI->getSuperClass()) 2649 while (SD) { 2650 CollectInheritedProtocols(SD, Protocols); 2651 SD = SD->getSuperClass(); 2652 } 2653 } else if (const auto *OC = dyn_cast<ObjCCategoryDecl>(CDecl)) { 2654 for (auto *Proto : OC->protocols()) { 2655 CollectInheritedProtocols(Proto, Protocols); 2656 } 2657 } else if (const auto *OP = dyn_cast<ObjCProtocolDecl>(CDecl)) { 2658 // Insert the protocol. 2659 if (!Protocols.insert( 2660 const_cast<ObjCProtocolDecl *>(OP->getCanonicalDecl())).second) 2661 return; 2662 2663 for (auto *Proto : OP->protocols()) 2664 CollectInheritedProtocols(Proto, Protocols); 2665 } 2666 } 2667 2668 static bool unionHasUniqueObjectRepresentations(const ASTContext &Context, 2669 const RecordDecl *RD) { 2670 assert(RD->isUnion() && "Must be union type"); 2671 CharUnits UnionSize = Context.getTypeSizeInChars(RD->getTypeForDecl()); 2672 2673 for (const auto *Field : RD->fields()) { 2674 if (!Context.hasUniqueObjectRepresentations(Field->getType())) 2675 return false; 2676 CharUnits FieldSize = Context.getTypeSizeInChars(Field->getType()); 2677 if (FieldSize != UnionSize) 2678 return false; 2679 } 2680 return !RD->field_empty(); 2681 } 2682 2683 static int64_t getSubobjectOffset(const FieldDecl *Field, 2684 const ASTContext &Context, 2685 const clang::ASTRecordLayout & /*Layout*/) { 2686 return Context.getFieldOffset(Field); 2687 } 2688 2689 static int64_t getSubobjectOffset(const CXXRecordDecl *RD, 2690 const ASTContext &Context, 2691 const clang::ASTRecordLayout &Layout) { 2692 return Context.toBits(Layout.getBaseClassOffset(RD)); 2693 } 2694 2695 static std::optional<int64_t> 2696 structHasUniqueObjectRepresentations(const ASTContext &Context, 2697 const RecordDecl *RD); 2698 2699 static std::optional<int64_t> 2700 getSubobjectSizeInBits(const FieldDecl *Field, const ASTContext &Context) { 2701 if (Field->getType()->isRecordType()) { 2702 const RecordDecl *RD = Field->getType()->getAsRecordDecl(); 2703 if (!RD->isUnion()) 2704 return structHasUniqueObjectRepresentations(Context, RD); 2705 } 2706 2707 // A _BitInt type may not be unique if it has padding bits 2708 // but if it is a bitfield the padding bits are not used. 2709 bool IsBitIntType = Field->getType()->isBitIntType(); 2710 if (!Field->getType()->isReferenceType() && !IsBitIntType && 2711 !Context.hasUniqueObjectRepresentations(Field->getType())) 2712 return std::nullopt; 2713 2714 int64_t FieldSizeInBits = 2715 Context.toBits(Context.getTypeSizeInChars(Field->getType())); 2716 if (Field->isBitField()) { 2717 int64_t BitfieldSize = Field->getBitWidthValue(Context); 2718 if (IsBitIntType) { 2719 if ((unsigned)BitfieldSize > 2720 cast<BitIntType>(Field->getType())->getNumBits()) 2721 return std::nullopt; 2722 } else if (BitfieldSize > FieldSizeInBits) { 2723 return std::nullopt; 2724 } 2725 FieldSizeInBits = BitfieldSize; 2726 } else if (IsBitIntType && 2727 !Context.hasUniqueObjectRepresentations(Field->getType())) { 2728 return std::nullopt; 2729 } 2730 return FieldSizeInBits; 2731 } 2732 2733 static std::optional<int64_t> 2734 getSubobjectSizeInBits(const CXXRecordDecl *RD, const ASTContext &Context) { 2735 return structHasUniqueObjectRepresentations(Context, RD); 2736 } 2737 2738 template <typename RangeT> 2739 static std::optional<int64_t> structSubobjectsHaveUniqueObjectRepresentations( 2740 const RangeT &Subobjects, int64_t CurOffsetInBits, 2741 const ASTContext &Context, const clang::ASTRecordLayout &Layout) { 2742 for (const auto *Subobject : Subobjects) { 2743 std::optional<int64_t> SizeInBits = 2744 getSubobjectSizeInBits(Subobject, Context); 2745 if (!SizeInBits) 2746 return std::nullopt; 2747 if (*SizeInBits != 0) { 2748 int64_t Offset = getSubobjectOffset(Subobject, Context, Layout); 2749 if (Offset != CurOffsetInBits) 2750 return std::nullopt; 2751 CurOffsetInBits += *SizeInBits; 2752 } 2753 } 2754 return CurOffsetInBits; 2755 } 2756 2757 static std::optional<int64_t> 2758 structHasUniqueObjectRepresentations(const ASTContext &Context, 2759 const RecordDecl *RD) { 2760 assert(!RD->isUnion() && "Must be struct/class type"); 2761 const auto &Layout = Context.getASTRecordLayout(RD); 2762 2763 int64_t CurOffsetInBits = 0; 2764 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RD)) { 2765 if (ClassDecl->isDynamicClass()) 2766 return std::nullopt; 2767 2768 SmallVector<CXXRecordDecl *, 4> Bases; 2769 for (const auto &Base : ClassDecl->bases()) { 2770 // Empty types can be inherited from, and non-empty types can potentially 2771 // have tail padding, so just make sure there isn't an error. 2772 Bases.emplace_back(Base.getType()->getAsCXXRecordDecl()); 2773 } 2774 2775 llvm::sort(Bases, [&](const CXXRecordDecl *L, const CXXRecordDecl *R) { 2776 return Layout.getBaseClassOffset(L) < Layout.getBaseClassOffset(R); 2777 }); 2778 2779 std::optional<int64_t> OffsetAfterBases = 2780 structSubobjectsHaveUniqueObjectRepresentations(Bases, CurOffsetInBits, 2781 Context, Layout); 2782 if (!OffsetAfterBases) 2783 return std::nullopt; 2784 CurOffsetInBits = *OffsetAfterBases; 2785 } 2786 2787 std::optional<int64_t> OffsetAfterFields = 2788 structSubobjectsHaveUniqueObjectRepresentations( 2789 RD->fields(), CurOffsetInBits, Context, Layout); 2790 if (!OffsetAfterFields) 2791 return std::nullopt; 2792 CurOffsetInBits = *OffsetAfterFields; 2793 2794 return CurOffsetInBits; 2795 } 2796 2797 bool ASTContext::hasUniqueObjectRepresentations(QualType Ty) const { 2798 // C++17 [meta.unary.prop]: 2799 // The predicate condition for a template specialization 2800 // has_unique_object_representations<T> shall be 2801 // satisfied if and only if: 2802 // (9.1) - T is trivially copyable, and 2803 // (9.2) - any two objects of type T with the same value have the same 2804 // object representation, where two objects 2805 // of array or non-union class type are considered to have the same value 2806 // if their respective sequences of 2807 // direct subobjects have the same values, and two objects of union type 2808 // are considered to have the same 2809 // value if they have the same active member and the corresponding members 2810 // have the same value. 2811 // The set of scalar types for which this condition holds is 2812 // implementation-defined. [ Note: If a type has padding 2813 // bits, the condition does not hold; otherwise, the condition holds true 2814 // for unsigned integral types. -- end note ] 2815 assert(!Ty.isNull() && "Null QualType sent to unique object rep check"); 2816 2817 // Arrays are unique only if their element type is unique. 2818 if (Ty->isArrayType()) 2819 return hasUniqueObjectRepresentations(getBaseElementType(Ty)); 2820 2821 // (9.1) - T is trivially copyable... 2822 if (!Ty.isTriviallyCopyableType(*this)) 2823 return false; 2824 2825 // All integrals and enums are unique. 2826 if (Ty->isIntegralOrEnumerationType()) { 2827 // Except _BitInt types that have padding bits. 2828 if (const auto *BIT = dyn_cast<BitIntType>(Ty)) 2829 return getTypeSize(BIT) == BIT->getNumBits(); 2830 2831 return true; 2832 } 2833 2834 // All other pointers are unique. 2835 if (Ty->isPointerType()) 2836 return true; 2837 2838 if (Ty->isMemberPointerType()) { 2839 const auto *MPT = Ty->getAs<MemberPointerType>(); 2840 return !ABI->getMemberPointerInfo(MPT).HasPadding; 2841 } 2842 2843 if (Ty->isRecordType()) { 2844 const RecordDecl *Record = Ty->castAs<RecordType>()->getDecl(); 2845 2846 if (Record->isInvalidDecl()) 2847 return false; 2848 2849 if (Record->isUnion()) 2850 return unionHasUniqueObjectRepresentations(*this, Record); 2851 2852 std::optional<int64_t> StructSize = 2853 structHasUniqueObjectRepresentations(*this, Record); 2854 2855 return StructSize && *StructSize == static_cast<int64_t>(getTypeSize(Ty)); 2856 } 2857 2858 // FIXME: More cases to handle here (list by rsmith): 2859 // vectors (careful about, eg, vector of 3 foo) 2860 // _Complex int and friends 2861 // _Atomic T 2862 // Obj-C block pointers 2863 // Obj-C object pointers 2864 // and perhaps OpenCL's various builtin types (pipe, sampler_t, event_t, 2865 // clk_event_t, queue_t, reserve_id_t) 2866 // There're also Obj-C class types and the Obj-C selector type, but I think it 2867 // makes sense for those to return false here. 2868 2869 return false; 2870 } 2871 2872 unsigned ASTContext::CountNonClassIvars(const ObjCInterfaceDecl *OI) const { 2873 unsigned count = 0; 2874 // Count ivars declared in class extension. 2875 for (const auto *Ext : OI->known_extensions()) 2876 count += Ext->ivar_size(); 2877 2878 // Count ivar defined in this class's implementation. This 2879 // includes synthesized ivars. 2880 if (ObjCImplementationDecl *ImplDecl = OI->getImplementation()) 2881 count += ImplDecl->ivar_size(); 2882 2883 return count; 2884 } 2885 2886 bool ASTContext::isSentinelNullExpr(const Expr *E) { 2887 if (!E) 2888 return false; 2889 2890 // nullptr_t is always treated as null. 2891 if (E->getType()->isNullPtrType()) return true; 2892 2893 if (E->getType()->isAnyPointerType() && 2894 E->IgnoreParenCasts()->isNullPointerConstant(*this, 2895 Expr::NPC_ValueDependentIsNull)) 2896 return true; 2897 2898 // Unfortunately, __null has type 'int'. 2899 if (isa<GNUNullExpr>(E)) return true; 2900 2901 return false; 2902 } 2903 2904 /// Get the implementation of ObjCInterfaceDecl, or nullptr if none 2905 /// exists. 2906 ObjCImplementationDecl *ASTContext::getObjCImplementation(ObjCInterfaceDecl *D) { 2907 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator 2908 I = ObjCImpls.find(D); 2909 if (I != ObjCImpls.end()) 2910 return cast<ObjCImplementationDecl>(I->second); 2911 return nullptr; 2912 } 2913 2914 /// Get the implementation of ObjCCategoryDecl, or nullptr if none 2915 /// exists. 2916 ObjCCategoryImplDecl *ASTContext::getObjCImplementation(ObjCCategoryDecl *D) { 2917 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator 2918 I = ObjCImpls.find(D); 2919 if (I != ObjCImpls.end()) 2920 return cast<ObjCCategoryImplDecl>(I->second); 2921 return nullptr; 2922 } 2923 2924 /// Set the implementation of ObjCInterfaceDecl. 2925 void ASTContext::setObjCImplementation(ObjCInterfaceDecl *IFaceD, 2926 ObjCImplementationDecl *ImplD) { 2927 assert(IFaceD && ImplD && "Passed null params"); 2928 ObjCImpls[IFaceD] = ImplD; 2929 } 2930 2931 /// Set the implementation of ObjCCategoryDecl. 2932 void ASTContext::setObjCImplementation(ObjCCategoryDecl *CatD, 2933 ObjCCategoryImplDecl *ImplD) { 2934 assert(CatD && ImplD && "Passed null params"); 2935 ObjCImpls[CatD] = ImplD; 2936 } 2937 2938 const ObjCMethodDecl * 2939 ASTContext::getObjCMethodRedeclaration(const ObjCMethodDecl *MD) const { 2940 return ObjCMethodRedecls.lookup(MD); 2941 } 2942 2943 void ASTContext::setObjCMethodRedeclaration(const ObjCMethodDecl *MD, 2944 const ObjCMethodDecl *Redecl) { 2945 assert(!getObjCMethodRedeclaration(MD) && "MD already has a redeclaration"); 2946 ObjCMethodRedecls[MD] = Redecl; 2947 } 2948 2949 const ObjCInterfaceDecl *ASTContext::getObjContainingInterface( 2950 const NamedDecl *ND) const { 2951 if (const auto *ID = dyn_cast<ObjCInterfaceDecl>(ND->getDeclContext())) 2952 return ID; 2953 if (const auto *CD = dyn_cast<ObjCCategoryDecl>(ND->getDeclContext())) 2954 return CD->getClassInterface(); 2955 if (const auto *IMD = dyn_cast<ObjCImplDecl>(ND->getDeclContext())) 2956 return IMD->getClassInterface(); 2957 2958 return nullptr; 2959 } 2960 2961 /// Get the copy initialization expression of VarDecl, or nullptr if 2962 /// none exists. 2963 BlockVarCopyInit ASTContext::getBlockVarCopyInit(const VarDecl *VD) const { 2964 assert(VD && "Passed null params"); 2965 assert(VD->hasAttr<BlocksAttr>() && 2966 "getBlockVarCopyInits - not __block var"); 2967 auto I = BlockVarCopyInits.find(VD); 2968 if (I != BlockVarCopyInits.end()) 2969 return I->second; 2970 return {nullptr, false}; 2971 } 2972 2973 /// Set the copy initialization expression of a block var decl. 2974 void ASTContext::setBlockVarCopyInit(const VarDecl*VD, Expr *CopyExpr, 2975 bool CanThrow) { 2976 assert(VD && CopyExpr && "Passed null params"); 2977 assert(VD->hasAttr<BlocksAttr>() && 2978 "setBlockVarCopyInits - not __block var"); 2979 BlockVarCopyInits[VD].setExprAndFlag(CopyExpr, CanThrow); 2980 } 2981 2982 TypeSourceInfo *ASTContext::CreateTypeSourceInfo(QualType T, 2983 unsigned DataSize) const { 2984 if (!DataSize) 2985 DataSize = TypeLoc::getFullDataSizeForType(T); 2986 else 2987 assert(DataSize == TypeLoc::getFullDataSizeForType(T) && 2988 "incorrect data size provided to CreateTypeSourceInfo!"); 2989 2990 auto *TInfo = 2991 (TypeSourceInfo*)BumpAlloc.Allocate(sizeof(TypeSourceInfo) + DataSize, 8); 2992 new (TInfo) TypeSourceInfo(T); 2993 return TInfo; 2994 } 2995 2996 TypeSourceInfo *ASTContext::getTrivialTypeSourceInfo(QualType T, 2997 SourceLocation L) const { 2998 TypeSourceInfo *DI = CreateTypeSourceInfo(T); 2999 DI->getTypeLoc().initialize(const_cast<ASTContext &>(*this), L); 3000 return DI; 3001 } 3002 3003 const ASTRecordLayout & 3004 ASTContext::getASTObjCInterfaceLayout(const ObjCInterfaceDecl *D) const { 3005 return getObjCLayout(D, nullptr); 3006 } 3007 3008 const ASTRecordLayout & 3009 ASTContext::getASTObjCImplementationLayout( 3010 const ObjCImplementationDecl *D) const { 3011 return getObjCLayout(D->getClassInterface(), D); 3012 } 3013 3014 static auto getCanonicalTemplateArguments(const ASTContext &C, 3015 ArrayRef<TemplateArgument> Args, 3016 bool &AnyNonCanonArgs) { 3017 SmallVector<TemplateArgument, 16> CanonArgs(Args); 3018 for (auto &Arg : CanonArgs) { 3019 TemplateArgument OrigArg = Arg; 3020 Arg = C.getCanonicalTemplateArgument(Arg); 3021 AnyNonCanonArgs |= !Arg.structurallyEquals(OrigArg); 3022 } 3023 return CanonArgs; 3024 } 3025 3026 //===----------------------------------------------------------------------===// 3027 // Type creation/memoization methods 3028 //===----------------------------------------------------------------------===// 3029 3030 QualType 3031 ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const { 3032 unsigned fastQuals = quals.getFastQualifiers(); 3033 quals.removeFastQualifiers(); 3034 3035 // Check if we've already instantiated this type. 3036 llvm::FoldingSetNodeID ID; 3037 ExtQuals::Profile(ID, baseType, quals); 3038 void *insertPos = nullptr; 3039 if (ExtQuals *eq = ExtQualNodes.FindNodeOrInsertPos(ID, insertPos)) { 3040 assert(eq->getQualifiers() == quals); 3041 return QualType(eq, fastQuals); 3042 } 3043 3044 // If the base type is not canonical, make the appropriate canonical type. 3045 QualType canon; 3046 if (!baseType->isCanonicalUnqualified()) { 3047 SplitQualType canonSplit = baseType->getCanonicalTypeInternal().split(); 3048 canonSplit.Quals.addConsistentQualifiers(quals); 3049 canon = getExtQualType(canonSplit.Ty, canonSplit.Quals); 3050 3051 // Re-find the insert position. 3052 (void) ExtQualNodes.FindNodeOrInsertPos(ID, insertPos); 3053 } 3054 3055 auto *eq = new (*this, TypeAlignment) ExtQuals(baseType, canon, quals); 3056 ExtQualNodes.InsertNode(eq, insertPos); 3057 return QualType(eq, fastQuals); 3058 } 3059 3060 QualType ASTContext::getAddrSpaceQualType(QualType T, 3061 LangAS AddressSpace) const { 3062 QualType CanT = getCanonicalType(T); 3063 if (CanT.getAddressSpace() == AddressSpace) 3064 return T; 3065 3066 // If we are composing extended qualifiers together, merge together 3067 // into one ExtQuals node. 3068 QualifierCollector Quals; 3069 const Type *TypeNode = Quals.strip(T); 3070 3071 // If this type already has an address space specified, it cannot get 3072 // another one. 3073 assert(!Quals.hasAddressSpace() && 3074 "Type cannot be in multiple addr spaces!"); 3075 Quals.addAddressSpace(AddressSpace); 3076 3077 return getExtQualType(TypeNode, Quals); 3078 } 3079 3080 QualType ASTContext::removeAddrSpaceQualType(QualType T) const { 3081 // If the type is not qualified with an address space, just return it 3082 // immediately. 3083 if (!T.hasAddressSpace()) 3084 return T; 3085 3086 // If we are composing extended qualifiers together, merge together 3087 // into one ExtQuals node. 3088 QualifierCollector Quals; 3089 const Type *TypeNode; 3090 3091 while (T.hasAddressSpace()) { 3092 TypeNode = Quals.strip(T); 3093 3094 // If the type no longer has an address space after stripping qualifiers, 3095 // jump out. 3096 if (!QualType(TypeNode, 0).hasAddressSpace()) 3097 break; 3098 3099 // There might be sugar in the way. Strip it and try again. 3100 T = T.getSingleStepDesugaredType(*this); 3101 } 3102 3103 Quals.removeAddressSpace(); 3104 3105 // Removal of the address space can mean there are no longer any 3106 // non-fast qualifiers, so creating an ExtQualType isn't possible (asserts) 3107 // or required. 3108 if (Quals.hasNonFastQualifiers()) 3109 return getExtQualType(TypeNode, Quals); 3110 else 3111 return QualType(TypeNode, Quals.getFastQualifiers()); 3112 } 3113 3114 QualType ASTContext::getObjCGCQualType(QualType T, 3115 Qualifiers::GC GCAttr) const { 3116 QualType CanT = getCanonicalType(T); 3117 if (CanT.getObjCGCAttr() == GCAttr) 3118 return T; 3119 3120 if (const auto *ptr = T->getAs<PointerType>()) { 3121 QualType Pointee = ptr->getPointeeType(); 3122 if (Pointee->isAnyPointerType()) { 3123 QualType ResultType = getObjCGCQualType(Pointee, GCAttr); 3124 return getPointerType(ResultType); 3125 } 3126 } 3127 3128 // If we are composing extended qualifiers together, merge together 3129 // into one ExtQuals node. 3130 QualifierCollector Quals; 3131 const Type *TypeNode = Quals.strip(T); 3132 3133 // If this type already has an ObjCGC specified, it cannot get 3134 // another one. 3135 assert(!Quals.hasObjCGCAttr() && 3136 "Type cannot have multiple ObjCGCs!"); 3137 Quals.addObjCGCAttr(GCAttr); 3138 3139 return getExtQualType(TypeNode, Quals); 3140 } 3141 3142 QualType ASTContext::removePtrSizeAddrSpace(QualType T) const { 3143 if (const PointerType *Ptr = T->getAs<PointerType>()) { 3144 QualType Pointee = Ptr->getPointeeType(); 3145 if (isPtrSizeAddressSpace(Pointee.getAddressSpace())) { 3146 return getPointerType(removeAddrSpaceQualType(Pointee)); 3147 } 3148 } 3149 return T; 3150 } 3151 3152 const FunctionType *ASTContext::adjustFunctionType(const FunctionType *T, 3153 FunctionType::ExtInfo Info) { 3154 if (T->getExtInfo() == Info) 3155 return T; 3156 3157 QualType Result; 3158 if (const auto *FNPT = dyn_cast<FunctionNoProtoType>(T)) { 3159 Result = getFunctionNoProtoType(FNPT->getReturnType(), Info); 3160 } else { 3161 const auto *FPT = cast<FunctionProtoType>(T); 3162 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 3163 EPI.ExtInfo = Info; 3164 Result = getFunctionType(FPT->getReturnType(), FPT->getParamTypes(), EPI); 3165 } 3166 3167 return cast<FunctionType>(Result.getTypePtr()); 3168 } 3169 3170 void ASTContext::adjustDeducedFunctionResultType(FunctionDecl *FD, 3171 QualType ResultType) { 3172 FD = FD->getMostRecentDecl(); 3173 while (true) { 3174 const auto *FPT = FD->getType()->castAs<FunctionProtoType>(); 3175 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 3176 FD->setType(getFunctionType(ResultType, FPT->getParamTypes(), EPI)); 3177 if (FunctionDecl *Next = FD->getPreviousDecl()) 3178 FD = Next; 3179 else 3180 break; 3181 } 3182 if (ASTMutationListener *L = getASTMutationListener()) 3183 L->DeducedReturnType(FD, ResultType); 3184 } 3185 3186 /// Get a function type and produce the equivalent function type with the 3187 /// specified exception specification. Type sugar that can be present on a 3188 /// declaration of a function with an exception specification is permitted 3189 /// and preserved. Other type sugar (for instance, typedefs) is not. 3190 QualType ASTContext::getFunctionTypeWithExceptionSpec( 3191 QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI) const { 3192 // Might have some parens. 3193 if (const auto *PT = dyn_cast<ParenType>(Orig)) 3194 return getParenType( 3195 getFunctionTypeWithExceptionSpec(PT->getInnerType(), ESI)); 3196 3197 // Might be wrapped in a macro qualified type. 3198 if (const auto *MQT = dyn_cast<MacroQualifiedType>(Orig)) 3199 return getMacroQualifiedType( 3200 getFunctionTypeWithExceptionSpec(MQT->getUnderlyingType(), ESI), 3201 MQT->getMacroIdentifier()); 3202 3203 // Might have a calling-convention attribute. 3204 if (const auto *AT = dyn_cast<AttributedType>(Orig)) 3205 return getAttributedType( 3206 AT->getAttrKind(), 3207 getFunctionTypeWithExceptionSpec(AT->getModifiedType(), ESI), 3208 getFunctionTypeWithExceptionSpec(AT->getEquivalentType(), ESI)); 3209 3210 // Anything else must be a function type. Rebuild it with the new exception 3211 // specification. 3212 const auto *Proto = Orig->castAs<FunctionProtoType>(); 3213 return getFunctionType( 3214 Proto->getReturnType(), Proto->getParamTypes(), 3215 Proto->getExtProtoInfo().withExceptionSpec(ESI)); 3216 } 3217 3218 bool ASTContext::hasSameFunctionTypeIgnoringExceptionSpec(QualType T, 3219 QualType U) const { 3220 return hasSameType(T, U) || 3221 (getLangOpts().CPlusPlus17 && 3222 hasSameType(getFunctionTypeWithExceptionSpec(T, EST_None), 3223 getFunctionTypeWithExceptionSpec(U, EST_None))); 3224 } 3225 3226 QualType ASTContext::getFunctionTypeWithoutPtrSizes(QualType T) { 3227 if (const auto *Proto = T->getAs<FunctionProtoType>()) { 3228 QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType()); 3229 SmallVector<QualType, 16> Args(Proto->param_types().size()); 3230 for (unsigned i = 0, n = Args.size(); i != n; ++i) 3231 Args[i] = removePtrSizeAddrSpace(Proto->param_types()[i]); 3232 return getFunctionType(RetTy, Args, Proto->getExtProtoInfo()); 3233 } 3234 3235 if (const FunctionNoProtoType *Proto = T->getAs<FunctionNoProtoType>()) { 3236 QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType()); 3237 return getFunctionNoProtoType(RetTy, Proto->getExtInfo()); 3238 } 3239 3240 return T; 3241 } 3242 3243 bool ASTContext::hasSameFunctionTypeIgnoringPtrSizes(QualType T, QualType U) { 3244 return hasSameType(T, U) || 3245 hasSameType(getFunctionTypeWithoutPtrSizes(T), 3246 getFunctionTypeWithoutPtrSizes(U)); 3247 } 3248 3249 void ASTContext::adjustExceptionSpec( 3250 FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI, 3251 bool AsWritten) { 3252 // Update the type. 3253 QualType Updated = 3254 getFunctionTypeWithExceptionSpec(FD->getType(), ESI); 3255 FD->setType(Updated); 3256 3257 if (!AsWritten) 3258 return; 3259 3260 // Update the type in the type source information too. 3261 if (TypeSourceInfo *TSInfo = FD->getTypeSourceInfo()) { 3262 // If the type and the type-as-written differ, we may need to update 3263 // the type-as-written too. 3264 if (TSInfo->getType() != FD->getType()) 3265 Updated = getFunctionTypeWithExceptionSpec(TSInfo->getType(), ESI); 3266 3267 // FIXME: When we get proper type location information for exceptions, 3268 // we'll also have to rebuild the TypeSourceInfo. For now, we just patch 3269 // up the TypeSourceInfo; 3270 assert(TypeLoc::getFullDataSizeForType(Updated) == 3271 TypeLoc::getFullDataSizeForType(TSInfo->getType()) && 3272 "TypeLoc size mismatch from updating exception specification"); 3273 TSInfo->overrideType(Updated); 3274 } 3275 } 3276 3277 /// getComplexType - Return the uniqued reference to the type for a complex 3278 /// number with the specified element type. 3279 QualType ASTContext::getComplexType(QualType T) const { 3280 // Unique pointers, to guarantee there is only one pointer of a particular 3281 // structure. 3282 llvm::FoldingSetNodeID ID; 3283 ComplexType::Profile(ID, T); 3284 3285 void *InsertPos = nullptr; 3286 if (ComplexType *CT = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos)) 3287 return QualType(CT, 0); 3288 3289 // If the pointee type isn't canonical, this won't be a canonical type either, 3290 // so fill in the canonical type field. 3291 QualType Canonical; 3292 if (!T.isCanonical()) { 3293 Canonical = getComplexType(getCanonicalType(T)); 3294 3295 // Get the new insert position for the node we care about. 3296 ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos); 3297 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3298 } 3299 auto *New = new (*this, TypeAlignment) ComplexType(T, Canonical); 3300 Types.push_back(New); 3301 ComplexTypes.InsertNode(New, InsertPos); 3302 return QualType(New, 0); 3303 } 3304 3305 /// getPointerType - Return the uniqued reference to the type for a pointer to 3306 /// the specified type. 3307 QualType ASTContext::getPointerType(QualType T) const { 3308 // Unique pointers, to guarantee there is only one pointer of a particular 3309 // structure. 3310 llvm::FoldingSetNodeID ID; 3311 PointerType::Profile(ID, T); 3312 3313 void *InsertPos = nullptr; 3314 if (PointerType *PT = PointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 3315 return QualType(PT, 0); 3316 3317 // If the pointee type isn't canonical, this won't be a canonical type either, 3318 // so fill in the canonical type field. 3319 QualType Canonical; 3320 if (!T.isCanonical()) { 3321 Canonical = getPointerType(getCanonicalType(T)); 3322 3323 // Get the new insert position for the node we care about. 3324 PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos); 3325 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3326 } 3327 auto *New = new (*this, TypeAlignment) PointerType(T, Canonical); 3328 Types.push_back(New); 3329 PointerTypes.InsertNode(New, InsertPos); 3330 return QualType(New, 0); 3331 } 3332 3333 QualType ASTContext::getAdjustedType(QualType Orig, QualType New) const { 3334 llvm::FoldingSetNodeID ID; 3335 AdjustedType::Profile(ID, Orig, New); 3336 void *InsertPos = nullptr; 3337 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3338 if (AT) 3339 return QualType(AT, 0); 3340 3341 QualType Canonical = getCanonicalType(New); 3342 3343 // Get the new insert position for the node we care about. 3344 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3345 assert(!AT && "Shouldn't be in the map!"); 3346 3347 AT = new (*this, TypeAlignment) 3348 AdjustedType(Type::Adjusted, Orig, New, Canonical); 3349 Types.push_back(AT); 3350 AdjustedTypes.InsertNode(AT, InsertPos); 3351 return QualType(AT, 0); 3352 } 3353 3354 QualType ASTContext::getDecayedType(QualType Orig, QualType Decayed) const { 3355 llvm::FoldingSetNodeID ID; 3356 AdjustedType::Profile(ID, Orig, Decayed); 3357 void *InsertPos = nullptr; 3358 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3359 if (AT) 3360 return QualType(AT, 0); 3361 3362 QualType Canonical = getCanonicalType(Decayed); 3363 3364 // Get the new insert position for the node we care about. 3365 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3366 assert(!AT && "Shouldn't be in the map!"); 3367 3368 AT = new (*this, TypeAlignment) DecayedType(Orig, Decayed, Canonical); 3369 Types.push_back(AT); 3370 AdjustedTypes.InsertNode(AT, InsertPos); 3371 return QualType(AT, 0); 3372 } 3373 3374 QualType ASTContext::getDecayedType(QualType T) const { 3375 assert((T->isArrayType() || T->isFunctionType()) && "T does not decay"); 3376 3377 QualType Decayed; 3378 3379 // C99 6.7.5.3p7: 3380 // A declaration of a parameter as "array of type" shall be 3381 // adjusted to "qualified pointer to type", where the type 3382 // qualifiers (if any) are those specified within the [ and ] of 3383 // the array type derivation. 3384 if (T->isArrayType()) 3385 Decayed = getArrayDecayedType(T); 3386 3387 // C99 6.7.5.3p8: 3388 // A declaration of a parameter as "function returning type" 3389 // shall be adjusted to "pointer to function returning type", as 3390 // in 6.3.2.1. 3391 if (T->isFunctionType()) 3392 Decayed = getPointerType(T); 3393 3394 return getDecayedType(T, Decayed); 3395 } 3396 3397 /// getBlockPointerType - Return the uniqued reference to the type for 3398 /// a pointer to the specified block. 3399 QualType ASTContext::getBlockPointerType(QualType T) const { 3400 assert(T->isFunctionType() && "block of function types only"); 3401 // Unique pointers, to guarantee there is only one block of a particular 3402 // structure. 3403 llvm::FoldingSetNodeID ID; 3404 BlockPointerType::Profile(ID, T); 3405 3406 void *InsertPos = nullptr; 3407 if (BlockPointerType *PT = 3408 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 3409 return QualType(PT, 0); 3410 3411 // If the block pointee type isn't canonical, this won't be a canonical 3412 // type either so fill in the canonical type field. 3413 QualType Canonical; 3414 if (!T.isCanonical()) { 3415 Canonical = getBlockPointerType(getCanonicalType(T)); 3416 3417 // Get the new insert position for the node we care about. 3418 BlockPointerType *NewIP = 3419 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 3420 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3421 } 3422 auto *New = new (*this, TypeAlignment) BlockPointerType(T, Canonical); 3423 Types.push_back(New); 3424 BlockPointerTypes.InsertNode(New, InsertPos); 3425 return QualType(New, 0); 3426 } 3427 3428 /// getLValueReferenceType - Return the uniqued reference to the type for an 3429 /// lvalue reference to the specified type. 3430 QualType 3431 ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const { 3432 assert((!T->isPlaceholderType() || 3433 T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) && 3434 "Unresolved placeholder type"); 3435 3436 // Unique pointers, to guarantee there is only one pointer of a particular 3437 // structure. 3438 llvm::FoldingSetNodeID ID; 3439 ReferenceType::Profile(ID, T, SpelledAsLValue); 3440 3441 void *InsertPos = nullptr; 3442 if (LValueReferenceType *RT = 3443 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos)) 3444 return QualType(RT, 0); 3445 3446 const auto *InnerRef = T->getAs<ReferenceType>(); 3447 3448 // If the referencee type isn't canonical, this won't be a canonical type 3449 // either, so fill in the canonical type field. 3450 QualType Canonical; 3451 if (!SpelledAsLValue || InnerRef || !T.isCanonical()) { 3452 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T); 3453 Canonical = getLValueReferenceType(getCanonicalType(PointeeType)); 3454 3455 // Get the new insert position for the node we care about. 3456 LValueReferenceType *NewIP = 3457 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos); 3458 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3459 } 3460 3461 auto *New = new (*this, TypeAlignment) LValueReferenceType(T, Canonical, 3462 SpelledAsLValue); 3463 Types.push_back(New); 3464 LValueReferenceTypes.InsertNode(New, InsertPos); 3465 3466 return QualType(New, 0); 3467 } 3468 3469 /// getRValueReferenceType - Return the uniqued reference to the type for an 3470 /// rvalue reference to the specified type. 3471 QualType ASTContext::getRValueReferenceType(QualType T) const { 3472 assert((!T->isPlaceholderType() || 3473 T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) && 3474 "Unresolved placeholder type"); 3475 3476 // Unique pointers, to guarantee there is only one pointer of a particular 3477 // structure. 3478 llvm::FoldingSetNodeID ID; 3479 ReferenceType::Profile(ID, T, false); 3480 3481 void *InsertPos = nullptr; 3482 if (RValueReferenceType *RT = 3483 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos)) 3484 return QualType(RT, 0); 3485 3486 const auto *InnerRef = T->getAs<ReferenceType>(); 3487 3488 // If the referencee type isn't canonical, this won't be a canonical type 3489 // either, so fill in the canonical type field. 3490 QualType Canonical; 3491 if (InnerRef || !T.isCanonical()) { 3492 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T); 3493 Canonical = getRValueReferenceType(getCanonicalType(PointeeType)); 3494 3495 // Get the new insert position for the node we care about. 3496 RValueReferenceType *NewIP = 3497 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos); 3498 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3499 } 3500 3501 auto *New = new (*this, TypeAlignment) RValueReferenceType(T, Canonical); 3502 Types.push_back(New); 3503 RValueReferenceTypes.InsertNode(New, InsertPos); 3504 return QualType(New, 0); 3505 } 3506 3507 /// getMemberPointerType - Return the uniqued reference to the type for a 3508 /// member pointer to the specified type, in the specified class. 3509 QualType ASTContext::getMemberPointerType(QualType T, const Type *Cls) const { 3510 // Unique pointers, to guarantee there is only one pointer of a particular 3511 // structure. 3512 llvm::FoldingSetNodeID ID; 3513 MemberPointerType::Profile(ID, T, Cls); 3514 3515 void *InsertPos = nullptr; 3516 if (MemberPointerType *PT = 3517 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 3518 return QualType(PT, 0); 3519 3520 // If the pointee or class type isn't canonical, this won't be a canonical 3521 // type either, so fill in the canonical type field. 3522 QualType Canonical; 3523 if (!T.isCanonical() || !Cls->isCanonicalUnqualified()) { 3524 Canonical = getMemberPointerType(getCanonicalType(T),getCanonicalType(Cls)); 3525 3526 // Get the new insert position for the node we care about. 3527 MemberPointerType *NewIP = 3528 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 3529 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3530 } 3531 auto *New = new (*this, TypeAlignment) MemberPointerType(T, Cls, Canonical); 3532 Types.push_back(New); 3533 MemberPointerTypes.InsertNode(New, InsertPos); 3534 return QualType(New, 0); 3535 } 3536 3537 /// getConstantArrayType - Return the unique reference to the type for an 3538 /// array of the specified element type. 3539 QualType ASTContext::getConstantArrayType(QualType EltTy, 3540 const llvm::APInt &ArySizeIn, 3541 const Expr *SizeExpr, 3542 ArrayType::ArraySizeModifier ASM, 3543 unsigned IndexTypeQuals) const { 3544 assert((EltTy->isDependentType() || 3545 EltTy->isIncompleteType() || EltTy->isConstantSizeType()) && 3546 "Constant array of VLAs is illegal!"); 3547 3548 // We only need the size as part of the type if it's instantiation-dependent. 3549 if (SizeExpr && !SizeExpr->isInstantiationDependent()) 3550 SizeExpr = nullptr; 3551 3552 // Convert the array size into a canonical width matching the pointer size for 3553 // the target. 3554 llvm::APInt ArySize(ArySizeIn); 3555 ArySize = ArySize.zextOrTrunc(Target->getMaxPointerWidth()); 3556 3557 llvm::FoldingSetNodeID ID; 3558 ConstantArrayType::Profile(ID, *this, EltTy, ArySize, SizeExpr, ASM, 3559 IndexTypeQuals); 3560 3561 void *InsertPos = nullptr; 3562 if (ConstantArrayType *ATP = 3563 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos)) 3564 return QualType(ATP, 0); 3565 3566 // If the element type isn't canonical or has qualifiers, or the array bound 3567 // is instantiation-dependent, this won't be a canonical type either, so fill 3568 // in the canonical type field. 3569 QualType Canon; 3570 // FIXME: Check below should look for qualifiers behind sugar. 3571 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers() || SizeExpr) { 3572 SplitQualType canonSplit = getCanonicalType(EltTy).split(); 3573 Canon = getConstantArrayType(QualType(canonSplit.Ty, 0), ArySize, nullptr, 3574 ASM, IndexTypeQuals); 3575 Canon = getQualifiedType(Canon, canonSplit.Quals); 3576 3577 // Get the new insert position for the node we care about. 3578 ConstantArrayType *NewIP = 3579 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos); 3580 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3581 } 3582 3583 void *Mem = Allocate( 3584 ConstantArrayType::totalSizeToAlloc<const Expr *>(SizeExpr ? 1 : 0), 3585 TypeAlignment); 3586 auto *New = new (Mem) 3587 ConstantArrayType(EltTy, Canon, ArySize, SizeExpr, ASM, IndexTypeQuals); 3588 ConstantArrayTypes.InsertNode(New, InsertPos); 3589 Types.push_back(New); 3590 return QualType(New, 0); 3591 } 3592 3593 /// getVariableArrayDecayedType - Turns the given type, which may be 3594 /// variably-modified, into the corresponding type with all the known 3595 /// sizes replaced with [*]. 3596 QualType ASTContext::getVariableArrayDecayedType(QualType type) const { 3597 // Vastly most common case. 3598 if (!type->isVariablyModifiedType()) return type; 3599 3600 QualType result; 3601 3602 SplitQualType split = type.getSplitDesugaredType(); 3603 const Type *ty = split.Ty; 3604 switch (ty->getTypeClass()) { 3605 #define TYPE(Class, Base) 3606 #define ABSTRACT_TYPE(Class, Base) 3607 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 3608 #include "clang/AST/TypeNodes.inc" 3609 llvm_unreachable("didn't desugar past all non-canonical types?"); 3610 3611 // These types should never be variably-modified. 3612 case Type::Builtin: 3613 case Type::Complex: 3614 case Type::Vector: 3615 case Type::DependentVector: 3616 case Type::ExtVector: 3617 case Type::DependentSizedExtVector: 3618 case Type::ConstantMatrix: 3619 case Type::DependentSizedMatrix: 3620 case Type::DependentAddressSpace: 3621 case Type::ObjCObject: 3622 case Type::ObjCInterface: 3623 case Type::ObjCObjectPointer: 3624 case Type::Record: 3625 case Type::Enum: 3626 case Type::UnresolvedUsing: 3627 case Type::TypeOfExpr: 3628 case Type::TypeOf: 3629 case Type::Decltype: 3630 case Type::UnaryTransform: 3631 case Type::DependentName: 3632 case Type::InjectedClassName: 3633 case Type::TemplateSpecialization: 3634 case Type::DependentTemplateSpecialization: 3635 case Type::TemplateTypeParm: 3636 case Type::SubstTemplateTypeParmPack: 3637 case Type::Auto: 3638 case Type::DeducedTemplateSpecialization: 3639 case Type::PackExpansion: 3640 case Type::BitInt: 3641 case Type::DependentBitInt: 3642 llvm_unreachable("type should never be variably-modified"); 3643 3644 // These types can be variably-modified but should never need to 3645 // further decay. 3646 case Type::FunctionNoProto: 3647 case Type::FunctionProto: 3648 case Type::BlockPointer: 3649 case Type::MemberPointer: 3650 case Type::Pipe: 3651 return type; 3652 3653 // These types can be variably-modified. All these modifications 3654 // preserve structure except as noted by comments. 3655 // TODO: if we ever care about optimizing VLAs, there are no-op 3656 // optimizations available here. 3657 case Type::Pointer: 3658 result = getPointerType(getVariableArrayDecayedType( 3659 cast<PointerType>(ty)->getPointeeType())); 3660 break; 3661 3662 case Type::LValueReference: { 3663 const auto *lv = cast<LValueReferenceType>(ty); 3664 result = getLValueReferenceType( 3665 getVariableArrayDecayedType(lv->getPointeeType()), 3666 lv->isSpelledAsLValue()); 3667 break; 3668 } 3669 3670 case Type::RValueReference: { 3671 const auto *lv = cast<RValueReferenceType>(ty); 3672 result = getRValueReferenceType( 3673 getVariableArrayDecayedType(lv->getPointeeType())); 3674 break; 3675 } 3676 3677 case Type::Atomic: { 3678 const auto *at = cast<AtomicType>(ty); 3679 result = getAtomicType(getVariableArrayDecayedType(at->getValueType())); 3680 break; 3681 } 3682 3683 case Type::ConstantArray: { 3684 const auto *cat = cast<ConstantArrayType>(ty); 3685 result = getConstantArrayType( 3686 getVariableArrayDecayedType(cat->getElementType()), 3687 cat->getSize(), 3688 cat->getSizeExpr(), 3689 cat->getSizeModifier(), 3690 cat->getIndexTypeCVRQualifiers()); 3691 break; 3692 } 3693 3694 case Type::DependentSizedArray: { 3695 const auto *dat = cast<DependentSizedArrayType>(ty); 3696 result = getDependentSizedArrayType( 3697 getVariableArrayDecayedType(dat->getElementType()), 3698 dat->getSizeExpr(), 3699 dat->getSizeModifier(), 3700 dat->getIndexTypeCVRQualifiers(), 3701 dat->getBracketsRange()); 3702 break; 3703 } 3704 3705 // Turn incomplete types into [*] types. 3706 case Type::IncompleteArray: { 3707 const auto *iat = cast<IncompleteArrayType>(ty); 3708 result = getVariableArrayType( 3709 getVariableArrayDecayedType(iat->getElementType()), 3710 /*size*/ nullptr, 3711 ArrayType::Normal, 3712 iat->getIndexTypeCVRQualifiers(), 3713 SourceRange()); 3714 break; 3715 } 3716 3717 // Turn VLA types into [*] types. 3718 case Type::VariableArray: { 3719 const auto *vat = cast<VariableArrayType>(ty); 3720 result = getVariableArrayType( 3721 getVariableArrayDecayedType(vat->getElementType()), 3722 /*size*/ nullptr, 3723 ArrayType::Star, 3724 vat->getIndexTypeCVRQualifiers(), 3725 vat->getBracketsRange()); 3726 break; 3727 } 3728 } 3729 3730 // Apply the top-level qualifiers from the original. 3731 return getQualifiedType(result, split.Quals); 3732 } 3733 3734 /// getVariableArrayType - Returns a non-unique reference to the type for a 3735 /// variable array of the specified element type. 3736 QualType ASTContext::getVariableArrayType(QualType EltTy, 3737 Expr *NumElts, 3738 ArrayType::ArraySizeModifier ASM, 3739 unsigned IndexTypeQuals, 3740 SourceRange Brackets) const { 3741 // Since we don't unique expressions, it isn't possible to unique VLA's 3742 // that have an expression provided for their size. 3743 QualType Canon; 3744 3745 // Be sure to pull qualifiers off the element type. 3746 // FIXME: Check below should look for qualifiers behind sugar. 3747 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) { 3748 SplitQualType canonSplit = getCanonicalType(EltTy).split(); 3749 Canon = getVariableArrayType(QualType(canonSplit.Ty, 0), NumElts, ASM, 3750 IndexTypeQuals, Brackets); 3751 Canon = getQualifiedType(Canon, canonSplit.Quals); 3752 } 3753 3754 auto *New = new (*this, TypeAlignment) 3755 VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals, Brackets); 3756 3757 VariableArrayTypes.push_back(New); 3758 Types.push_back(New); 3759 return QualType(New, 0); 3760 } 3761 3762 /// getDependentSizedArrayType - Returns a non-unique reference to 3763 /// the type for a dependently-sized array of the specified element 3764 /// type. 3765 QualType ASTContext::getDependentSizedArrayType(QualType elementType, 3766 Expr *numElements, 3767 ArrayType::ArraySizeModifier ASM, 3768 unsigned elementTypeQuals, 3769 SourceRange brackets) const { 3770 assert((!numElements || numElements->isTypeDependent() || 3771 numElements->isValueDependent()) && 3772 "Size must be type- or value-dependent!"); 3773 3774 // Dependently-sized array types that do not have a specified number 3775 // of elements will have their sizes deduced from a dependent 3776 // initializer. We do no canonicalization here at all, which is okay 3777 // because they can't be used in most locations. 3778 if (!numElements) { 3779 auto *newType 3780 = new (*this, TypeAlignment) 3781 DependentSizedArrayType(*this, elementType, QualType(), 3782 numElements, ASM, elementTypeQuals, 3783 brackets); 3784 Types.push_back(newType); 3785 return QualType(newType, 0); 3786 } 3787 3788 // Otherwise, we actually build a new type every time, but we 3789 // also build a canonical type. 3790 3791 SplitQualType canonElementType = getCanonicalType(elementType).split(); 3792 3793 void *insertPos = nullptr; 3794 llvm::FoldingSetNodeID ID; 3795 DependentSizedArrayType::Profile(ID, *this, 3796 QualType(canonElementType.Ty, 0), 3797 ASM, elementTypeQuals, numElements); 3798 3799 // Look for an existing type with these properties. 3800 DependentSizedArrayType *canonTy = 3801 DependentSizedArrayTypes.FindNodeOrInsertPos(ID, insertPos); 3802 3803 // If we don't have one, build one. 3804 if (!canonTy) { 3805 canonTy = new (*this, TypeAlignment) 3806 DependentSizedArrayType(*this, QualType(canonElementType.Ty, 0), 3807 QualType(), numElements, ASM, elementTypeQuals, 3808 brackets); 3809 DependentSizedArrayTypes.InsertNode(canonTy, insertPos); 3810 Types.push_back(canonTy); 3811 } 3812 3813 // Apply qualifiers from the element type to the array. 3814 QualType canon = getQualifiedType(QualType(canonTy,0), 3815 canonElementType.Quals); 3816 3817 // If we didn't need extra canonicalization for the element type or the size 3818 // expression, then just use that as our result. 3819 if (QualType(canonElementType.Ty, 0) == elementType && 3820 canonTy->getSizeExpr() == numElements) 3821 return canon; 3822 3823 // Otherwise, we need to build a type which follows the spelling 3824 // of the element type. 3825 auto *sugaredType 3826 = new (*this, TypeAlignment) 3827 DependentSizedArrayType(*this, elementType, canon, numElements, 3828 ASM, elementTypeQuals, brackets); 3829 Types.push_back(sugaredType); 3830 return QualType(sugaredType, 0); 3831 } 3832 3833 QualType ASTContext::getIncompleteArrayType(QualType elementType, 3834 ArrayType::ArraySizeModifier ASM, 3835 unsigned elementTypeQuals) const { 3836 llvm::FoldingSetNodeID ID; 3837 IncompleteArrayType::Profile(ID, elementType, ASM, elementTypeQuals); 3838 3839 void *insertPos = nullptr; 3840 if (IncompleteArrayType *iat = 3841 IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos)) 3842 return QualType(iat, 0); 3843 3844 // If the element type isn't canonical, this won't be a canonical type 3845 // either, so fill in the canonical type field. We also have to pull 3846 // qualifiers off the element type. 3847 QualType canon; 3848 3849 // FIXME: Check below should look for qualifiers behind sugar. 3850 if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) { 3851 SplitQualType canonSplit = getCanonicalType(elementType).split(); 3852 canon = getIncompleteArrayType(QualType(canonSplit.Ty, 0), 3853 ASM, elementTypeQuals); 3854 canon = getQualifiedType(canon, canonSplit.Quals); 3855 3856 // Get the new insert position for the node we care about. 3857 IncompleteArrayType *existing = 3858 IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos); 3859 assert(!existing && "Shouldn't be in the map!"); (void) existing; 3860 } 3861 3862 auto *newType = new (*this, TypeAlignment) 3863 IncompleteArrayType(elementType, canon, ASM, elementTypeQuals); 3864 3865 IncompleteArrayTypes.InsertNode(newType, insertPos); 3866 Types.push_back(newType); 3867 return QualType(newType, 0); 3868 } 3869 3870 ASTContext::BuiltinVectorTypeInfo 3871 ASTContext::getBuiltinVectorTypeInfo(const BuiltinType *Ty) const { 3872 #define SVE_INT_ELTTY(BITS, ELTS, SIGNED, NUMVECTORS) \ 3873 {getIntTypeForBitwidth(BITS, SIGNED), llvm::ElementCount::getScalable(ELTS), \ 3874 NUMVECTORS}; 3875 3876 #define SVE_ELTTY(ELTTY, ELTS, NUMVECTORS) \ 3877 {ELTTY, llvm::ElementCount::getScalable(ELTS), NUMVECTORS}; 3878 3879 switch (Ty->getKind()) { 3880 default: 3881 llvm_unreachable("Unsupported builtin vector type"); 3882 case BuiltinType::SveInt8: 3883 return SVE_INT_ELTTY(8, 16, true, 1); 3884 case BuiltinType::SveUint8: 3885 return SVE_INT_ELTTY(8, 16, false, 1); 3886 case BuiltinType::SveInt8x2: 3887 return SVE_INT_ELTTY(8, 16, true, 2); 3888 case BuiltinType::SveUint8x2: 3889 return SVE_INT_ELTTY(8, 16, false, 2); 3890 case BuiltinType::SveInt8x3: 3891 return SVE_INT_ELTTY(8, 16, true, 3); 3892 case BuiltinType::SveUint8x3: 3893 return SVE_INT_ELTTY(8, 16, false, 3); 3894 case BuiltinType::SveInt8x4: 3895 return SVE_INT_ELTTY(8, 16, true, 4); 3896 case BuiltinType::SveUint8x4: 3897 return SVE_INT_ELTTY(8, 16, false, 4); 3898 case BuiltinType::SveInt16: 3899 return SVE_INT_ELTTY(16, 8, true, 1); 3900 case BuiltinType::SveUint16: 3901 return SVE_INT_ELTTY(16, 8, false, 1); 3902 case BuiltinType::SveInt16x2: 3903 return SVE_INT_ELTTY(16, 8, true, 2); 3904 case BuiltinType::SveUint16x2: 3905 return SVE_INT_ELTTY(16, 8, false, 2); 3906 case BuiltinType::SveInt16x3: 3907 return SVE_INT_ELTTY(16, 8, true, 3); 3908 case BuiltinType::SveUint16x3: 3909 return SVE_INT_ELTTY(16, 8, false, 3); 3910 case BuiltinType::SveInt16x4: 3911 return SVE_INT_ELTTY(16, 8, true, 4); 3912 case BuiltinType::SveUint16x4: 3913 return SVE_INT_ELTTY(16, 8, false, 4); 3914 case BuiltinType::SveInt32: 3915 return SVE_INT_ELTTY(32, 4, true, 1); 3916 case BuiltinType::SveUint32: 3917 return SVE_INT_ELTTY(32, 4, false, 1); 3918 case BuiltinType::SveInt32x2: 3919 return SVE_INT_ELTTY(32, 4, true, 2); 3920 case BuiltinType::SveUint32x2: 3921 return SVE_INT_ELTTY(32, 4, false, 2); 3922 case BuiltinType::SveInt32x3: 3923 return SVE_INT_ELTTY(32, 4, true, 3); 3924 case BuiltinType::SveUint32x3: 3925 return SVE_INT_ELTTY(32, 4, false, 3); 3926 case BuiltinType::SveInt32x4: 3927 return SVE_INT_ELTTY(32, 4, true, 4); 3928 case BuiltinType::SveUint32x4: 3929 return SVE_INT_ELTTY(32, 4, false, 4); 3930 case BuiltinType::SveInt64: 3931 return SVE_INT_ELTTY(64, 2, true, 1); 3932 case BuiltinType::SveUint64: 3933 return SVE_INT_ELTTY(64, 2, false, 1); 3934 case BuiltinType::SveInt64x2: 3935 return SVE_INT_ELTTY(64, 2, true, 2); 3936 case BuiltinType::SveUint64x2: 3937 return SVE_INT_ELTTY(64, 2, false, 2); 3938 case BuiltinType::SveInt64x3: 3939 return SVE_INT_ELTTY(64, 2, true, 3); 3940 case BuiltinType::SveUint64x3: 3941 return SVE_INT_ELTTY(64, 2, false, 3); 3942 case BuiltinType::SveInt64x4: 3943 return SVE_INT_ELTTY(64, 2, true, 4); 3944 case BuiltinType::SveUint64x4: 3945 return SVE_INT_ELTTY(64, 2, false, 4); 3946 case BuiltinType::SveBool: 3947 return SVE_ELTTY(BoolTy, 16, 1); 3948 case BuiltinType::SveFloat16: 3949 return SVE_ELTTY(HalfTy, 8, 1); 3950 case BuiltinType::SveFloat16x2: 3951 return SVE_ELTTY(HalfTy, 8, 2); 3952 case BuiltinType::SveFloat16x3: 3953 return SVE_ELTTY(HalfTy, 8, 3); 3954 case BuiltinType::SveFloat16x4: 3955 return SVE_ELTTY(HalfTy, 8, 4); 3956 case BuiltinType::SveFloat32: 3957 return SVE_ELTTY(FloatTy, 4, 1); 3958 case BuiltinType::SveFloat32x2: 3959 return SVE_ELTTY(FloatTy, 4, 2); 3960 case BuiltinType::SveFloat32x3: 3961 return SVE_ELTTY(FloatTy, 4, 3); 3962 case BuiltinType::SveFloat32x4: 3963 return SVE_ELTTY(FloatTy, 4, 4); 3964 case BuiltinType::SveFloat64: 3965 return SVE_ELTTY(DoubleTy, 2, 1); 3966 case BuiltinType::SveFloat64x2: 3967 return SVE_ELTTY(DoubleTy, 2, 2); 3968 case BuiltinType::SveFloat64x3: 3969 return SVE_ELTTY(DoubleTy, 2, 3); 3970 case BuiltinType::SveFloat64x4: 3971 return SVE_ELTTY(DoubleTy, 2, 4); 3972 case BuiltinType::SveBFloat16: 3973 return SVE_ELTTY(BFloat16Ty, 8, 1); 3974 case BuiltinType::SveBFloat16x2: 3975 return SVE_ELTTY(BFloat16Ty, 8, 2); 3976 case BuiltinType::SveBFloat16x3: 3977 return SVE_ELTTY(BFloat16Ty, 8, 3); 3978 case BuiltinType::SveBFloat16x4: 3979 return SVE_ELTTY(BFloat16Ty, 8, 4); 3980 #define RVV_VECTOR_TYPE_INT(Name, Id, SingletonId, NumEls, ElBits, NF, \ 3981 IsSigned) \ 3982 case BuiltinType::Id: \ 3983 return {getIntTypeForBitwidth(ElBits, IsSigned), \ 3984 llvm::ElementCount::getScalable(NumEls), NF}; 3985 #define RVV_VECTOR_TYPE_FLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \ 3986 case BuiltinType::Id: \ 3987 return {ElBits == 16 ? Float16Ty : (ElBits == 32 ? FloatTy : DoubleTy), \ 3988 llvm::ElementCount::getScalable(NumEls), NF}; 3989 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \ 3990 case BuiltinType::Id: \ 3991 return {BoolTy, llvm::ElementCount::getScalable(NumEls), 1}; 3992 #include "clang/Basic/RISCVVTypes.def" 3993 } 3994 } 3995 3996 /// getScalableVectorType - Return the unique reference to a scalable vector 3997 /// type of the specified element type and size. VectorType must be a built-in 3998 /// type. 3999 QualType ASTContext::getScalableVectorType(QualType EltTy, 4000 unsigned NumElts) const { 4001 if (Target->hasAArch64SVETypes()) { 4002 uint64_t EltTySize = getTypeSize(EltTy); 4003 #define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \ 4004 IsSigned, IsFP, IsBF) \ 4005 if (!EltTy->isBooleanType() && \ 4006 ((EltTy->hasIntegerRepresentation() && \ 4007 EltTy->hasSignedIntegerRepresentation() == IsSigned) || \ 4008 (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \ 4009 IsFP && !IsBF) || \ 4010 (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \ 4011 IsBF && !IsFP)) && \ 4012 EltTySize == ElBits && NumElts == NumEls) { \ 4013 return SingletonId; \ 4014 } 4015 #define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \ 4016 if (EltTy->isBooleanType() && NumElts == NumEls) \ 4017 return SingletonId; 4018 #include "clang/Basic/AArch64SVEACLETypes.def" 4019 } else if (Target->hasRISCVVTypes()) { 4020 uint64_t EltTySize = getTypeSize(EltTy); 4021 #define RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, \ 4022 IsFP) \ 4023 if (!EltTy->isBooleanType() && \ 4024 ((EltTy->hasIntegerRepresentation() && \ 4025 EltTy->hasSignedIntegerRepresentation() == IsSigned) || \ 4026 (EltTy->hasFloatingRepresentation() && IsFP)) && \ 4027 EltTySize == ElBits && NumElts == NumEls) \ 4028 return SingletonId; 4029 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \ 4030 if (EltTy->isBooleanType() && NumElts == NumEls) \ 4031 return SingletonId; 4032 #include "clang/Basic/RISCVVTypes.def" 4033 } 4034 return QualType(); 4035 } 4036 4037 /// getVectorType - Return the unique reference to a vector type of 4038 /// the specified element type and size. VectorType must be a built-in type. 4039 QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts, 4040 VectorType::VectorKind VecKind) const { 4041 assert(vecType->isBuiltinType() || 4042 (vecType->isBitIntType() && 4043 // Only support _BitInt elements with byte-sized power of 2 NumBits. 4044 llvm::isPowerOf2_32(vecType->getAs<BitIntType>()->getNumBits()) && 4045 vecType->getAs<BitIntType>()->getNumBits() >= 8)); 4046 4047 // Check if we've already instantiated a vector of this type. 4048 llvm::FoldingSetNodeID ID; 4049 VectorType::Profile(ID, vecType, NumElts, Type::Vector, VecKind); 4050 4051 void *InsertPos = nullptr; 4052 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos)) 4053 return QualType(VTP, 0); 4054 4055 // If the element type isn't canonical, this won't be a canonical type either, 4056 // so fill in the canonical type field. 4057 QualType Canonical; 4058 if (!vecType.isCanonical()) { 4059 Canonical = getVectorType(getCanonicalType(vecType), NumElts, VecKind); 4060 4061 // Get the new insert position for the node we care about. 4062 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4063 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4064 } 4065 auto *New = new (*this, TypeAlignment) 4066 VectorType(vecType, NumElts, Canonical, VecKind); 4067 VectorTypes.InsertNode(New, InsertPos); 4068 Types.push_back(New); 4069 return QualType(New, 0); 4070 } 4071 4072 QualType 4073 ASTContext::getDependentVectorType(QualType VecType, Expr *SizeExpr, 4074 SourceLocation AttrLoc, 4075 VectorType::VectorKind VecKind) const { 4076 llvm::FoldingSetNodeID ID; 4077 DependentVectorType::Profile(ID, *this, getCanonicalType(VecType), SizeExpr, 4078 VecKind); 4079 void *InsertPos = nullptr; 4080 DependentVectorType *Canon = 4081 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4082 DependentVectorType *New; 4083 4084 if (Canon) { 4085 New = new (*this, TypeAlignment) DependentVectorType( 4086 *this, VecType, QualType(Canon, 0), SizeExpr, AttrLoc, VecKind); 4087 } else { 4088 QualType CanonVecTy = getCanonicalType(VecType); 4089 if (CanonVecTy == VecType) { 4090 New = new (*this, TypeAlignment) DependentVectorType( 4091 *this, VecType, QualType(), SizeExpr, AttrLoc, VecKind); 4092 4093 DependentVectorType *CanonCheck = 4094 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4095 assert(!CanonCheck && 4096 "Dependent-sized vector_size canonical type broken"); 4097 (void)CanonCheck; 4098 DependentVectorTypes.InsertNode(New, InsertPos); 4099 } else { 4100 QualType CanonTy = getDependentVectorType(CanonVecTy, SizeExpr, 4101 SourceLocation(), VecKind); 4102 New = new (*this, TypeAlignment) DependentVectorType( 4103 *this, VecType, CanonTy, SizeExpr, AttrLoc, VecKind); 4104 } 4105 } 4106 4107 Types.push_back(New); 4108 return QualType(New, 0); 4109 } 4110 4111 /// getExtVectorType - Return the unique reference to an extended vector type of 4112 /// the specified element type and size. VectorType must be a built-in type. 4113 QualType ASTContext::getExtVectorType(QualType vecType, 4114 unsigned NumElts) const { 4115 assert(vecType->isBuiltinType() || vecType->isDependentType() || 4116 (vecType->isBitIntType() && 4117 // Only support _BitInt elements with byte-sized power of 2 NumBits. 4118 llvm::isPowerOf2_32(vecType->getAs<BitIntType>()->getNumBits()) && 4119 vecType->getAs<BitIntType>()->getNumBits() >= 8)); 4120 4121 // Check if we've already instantiated a vector of this type. 4122 llvm::FoldingSetNodeID ID; 4123 VectorType::Profile(ID, vecType, NumElts, Type::ExtVector, 4124 VectorType::GenericVector); 4125 void *InsertPos = nullptr; 4126 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos)) 4127 return QualType(VTP, 0); 4128 4129 // If the element type isn't canonical, this won't be a canonical type either, 4130 // so fill in the canonical type field. 4131 QualType Canonical; 4132 if (!vecType.isCanonical()) { 4133 Canonical = getExtVectorType(getCanonicalType(vecType), NumElts); 4134 4135 // Get the new insert position for the node we care about. 4136 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4137 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4138 } 4139 auto *New = new (*this, TypeAlignment) 4140 ExtVectorType(vecType, NumElts, Canonical); 4141 VectorTypes.InsertNode(New, InsertPos); 4142 Types.push_back(New); 4143 return QualType(New, 0); 4144 } 4145 4146 QualType 4147 ASTContext::getDependentSizedExtVectorType(QualType vecType, 4148 Expr *SizeExpr, 4149 SourceLocation AttrLoc) const { 4150 llvm::FoldingSetNodeID ID; 4151 DependentSizedExtVectorType::Profile(ID, *this, getCanonicalType(vecType), 4152 SizeExpr); 4153 4154 void *InsertPos = nullptr; 4155 DependentSizedExtVectorType *Canon 4156 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4157 DependentSizedExtVectorType *New; 4158 if (Canon) { 4159 // We already have a canonical version of this array type; use it as 4160 // the canonical type for a newly-built type. 4161 New = new (*this, TypeAlignment) 4162 DependentSizedExtVectorType(*this, vecType, QualType(Canon, 0), 4163 SizeExpr, AttrLoc); 4164 } else { 4165 QualType CanonVecTy = getCanonicalType(vecType); 4166 if (CanonVecTy == vecType) { 4167 New = new (*this, TypeAlignment) 4168 DependentSizedExtVectorType(*this, vecType, QualType(), SizeExpr, 4169 AttrLoc); 4170 4171 DependentSizedExtVectorType *CanonCheck 4172 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4173 assert(!CanonCheck && "Dependent-sized ext_vector canonical type broken"); 4174 (void)CanonCheck; 4175 DependentSizedExtVectorTypes.InsertNode(New, InsertPos); 4176 } else { 4177 QualType CanonExtTy = getDependentSizedExtVectorType(CanonVecTy, SizeExpr, 4178 SourceLocation()); 4179 New = new (*this, TypeAlignment) DependentSizedExtVectorType( 4180 *this, vecType, CanonExtTy, SizeExpr, AttrLoc); 4181 } 4182 } 4183 4184 Types.push_back(New); 4185 return QualType(New, 0); 4186 } 4187 4188 QualType ASTContext::getConstantMatrixType(QualType ElementTy, unsigned NumRows, 4189 unsigned NumColumns) const { 4190 llvm::FoldingSetNodeID ID; 4191 ConstantMatrixType::Profile(ID, ElementTy, NumRows, NumColumns, 4192 Type::ConstantMatrix); 4193 4194 assert(MatrixType::isValidElementType(ElementTy) && 4195 "need a valid element type"); 4196 assert(ConstantMatrixType::isDimensionValid(NumRows) && 4197 ConstantMatrixType::isDimensionValid(NumColumns) && 4198 "need valid matrix dimensions"); 4199 void *InsertPos = nullptr; 4200 if (ConstantMatrixType *MTP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos)) 4201 return QualType(MTP, 0); 4202 4203 QualType Canonical; 4204 if (!ElementTy.isCanonical()) { 4205 Canonical = 4206 getConstantMatrixType(getCanonicalType(ElementTy), NumRows, NumColumns); 4207 4208 ConstantMatrixType *NewIP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos); 4209 assert(!NewIP && "Matrix type shouldn't already exist in the map"); 4210 (void)NewIP; 4211 } 4212 4213 auto *New = new (*this, TypeAlignment) 4214 ConstantMatrixType(ElementTy, NumRows, NumColumns, Canonical); 4215 MatrixTypes.InsertNode(New, InsertPos); 4216 Types.push_back(New); 4217 return QualType(New, 0); 4218 } 4219 4220 QualType ASTContext::getDependentSizedMatrixType(QualType ElementTy, 4221 Expr *RowExpr, 4222 Expr *ColumnExpr, 4223 SourceLocation AttrLoc) const { 4224 QualType CanonElementTy = getCanonicalType(ElementTy); 4225 llvm::FoldingSetNodeID ID; 4226 DependentSizedMatrixType::Profile(ID, *this, CanonElementTy, RowExpr, 4227 ColumnExpr); 4228 4229 void *InsertPos = nullptr; 4230 DependentSizedMatrixType *Canon = 4231 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos); 4232 4233 if (!Canon) { 4234 Canon = new (*this, TypeAlignment) DependentSizedMatrixType( 4235 *this, CanonElementTy, QualType(), RowExpr, ColumnExpr, AttrLoc); 4236 #ifndef NDEBUG 4237 DependentSizedMatrixType *CanonCheck = 4238 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos); 4239 assert(!CanonCheck && "Dependent-sized matrix canonical type broken"); 4240 #endif 4241 DependentSizedMatrixTypes.InsertNode(Canon, InsertPos); 4242 Types.push_back(Canon); 4243 } 4244 4245 // Already have a canonical version of the matrix type 4246 // 4247 // If it exactly matches the requested type, use it directly. 4248 if (Canon->getElementType() == ElementTy && Canon->getRowExpr() == RowExpr && 4249 Canon->getRowExpr() == ColumnExpr) 4250 return QualType(Canon, 0); 4251 4252 // Use Canon as the canonical type for newly-built type. 4253 DependentSizedMatrixType *New = new (*this, TypeAlignment) 4254 DependentSizedMatrixType(*this, ElementTy, QualType(Canon, 0), RowExpr, 4255 ColumnExpr, AttrLoc); 4256 Types.push_back(New); 4257 return QualType(New, 0); 4258 } 4259 4260 QualType ASTContext::getDependentAddressSpaceType(QualType PointeeType, 4261 Expr *AddrSpaceExpr, 4262 SourceLocation AttrLoc) const { 4263 assert(AddrSpaceExpr->isInstantiationDependent()); 4264 4265 QualType canonPointeeType = getCanonicalType(PointeeType); 4266 4267 void *insertPos = nullptr; 4268 llvm::FoldingSetNodeID ID; 4269 DependentAddressSpaceType::Profile(ID, *this, canonPointeeType, 4270 AddrSpaceExpr); 4271 4272 DependentAddressSpaceType *canonTy = 4273 DependentAddressSpaceTypes.FindNodeOrInsertPos(ID, insertPos); 4274 4275 if (!canonTy) { 4276 canonTy = new (*this, TypeAlignment) 4277 DependentAddressSpaceType(*this, canonPointeeType, 4278 QualType(), AddrSpaceExpr, AttrLoc); 4279 DependentAddressSpaceTypes.InsertNode(canonTy, insertPos); 4280 Types.push_back(canonTy); 4281 } 4282 4283 if (canonPointeeType == PointeeType && 4284 canonTy->getAddrSpaceExpr() == AddrSpaceExpr) 4285 return QualType(canonTy, 0); 4286 4287 auto *sugaredType 4288 = new (*this, TypeAlignment) 4289 DependentAddressSpaceType(*this, PointeeType, QualType(canonTy, 0), 4290 AddrSpaceExpr, AttrLoc); 4291 Types.push_back(sugaredType); 4292 return QualType(sugaredType, 0); 4293 } 4294 4295 /// Determine whether \p T is canonical as the result type of a function. 4296 static bool isCanonicalResultType(QualType T) { 4297 return T.isCanonical() && 4298 (T.getObjCLifetime() == Qualifiers::OCL_None || 4299 T.getObjCLifetime() == Qualifiers::OCL_ExplicitNone); 4300 } 4301 4302 /// getFunctionNoProtoType - Return a K&R style C function type like 'int()'. 4303 QualType 4304 ASTContext::getFunctionNoProtoType(QualType ResultTy, 4305 const FunctionType::ExtInfo &Info) const { 4306 // FIXME: This assertion cannot be enabled (yet) because the ObjC rewriter 4307 // functionality creates a function without a prototype regardless of 4308 // language mode (so it makes them even in C++). Once the rewriter has been 4309 // fixed, this assertion can be enabled again. 4310 //assert(!LangOpts.requiresStrictPrototypes() && 4311 // "strict prototypes are disabled"); 4312 4313 // Unique functions, to guarantee there is only one function of a particular 4314 // structure. 4315 llvm::FoldingSetNodeID ID; 4316 FunctionNoProtoType::Profile(ID, ResultTy, Info); 4317 4318 void *InsertPos = nullptr; 4319 if (FunctionNoProtoType *FT = 4320 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) 4321 return QualType(FT, 0); 4322 4323 QualType Canonical; 4324 if (!isCanonicalResultType(ResultTy)) { 4325 Canonical = 4326 getFunctionNoProtoType(getCanonicalFunctionResultType(ResultTy), Info); 4327 4328 // Get the new insert position for the node we care about. 4329 FunctionNoProtoType *NewIP = 4330 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos); 4331 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4332 } 4333 4334 auto *New = new (*this, TypeAlignment) 4335 FunctionNoProtoType(ResultTy, Canonical, Info); 4336 Types.push_back(New); 4337 FunctionNoProtoTypes.InsertNode(New, InsertPos); 4338 return QualType(New, 0); 4339 } 4340 4341 CanQualType 4342 ASTContext::getCanonicalFunctionResultType(QualType ResultType) const { 4343 CanQualType CanResultType = getCanonicalType(ResultType); 4344 4345 // Canonical result types do not have ARC lifetime qualifiers. 4346 if (CanResultType.getQualifiers().hasObjCLifetime()) { 4347 Qualifiers Qs = CanResultType.getQualifiers(); 4348 Qs.removeObjCLifetime(); 4349 return CanQualType::CreateUnsafe( 4350 getQualifiedType(CanResultType.getUnqualifiedType(), Qs)); 4351 } 4352 4353 return CanResultType; 4354 } 4355 4356 static bool isCanonicalExceptionSpecification( 4357 const FunctionProtoType::ExceptionSpecInfo &ESI, bool NoexceptInType) { 4358 if (ESI.Type == EST_None) 4359 return true; 4360 if (!NoexceptInType) 4361 return false; 4362 4363 // C++17 onwards: exception specification is part of the type, as a simple 4364 // boolean "can this function type throw". 4365 if (ESI.Type == EST_BasicNoexcept) 4366 return true; 4367 4368 // A noexcept(expr) specification is (possibly) canonical if expr is 4369 // value-dependent. 4370 if (ESI.Type == EST_DependentNoexcept) 4371 return true; 4372 4373 // A dynamic exception specification is canonical if it only contains pack 4374 // expansions (so we can't tell whether it's non-throwing) and all its 4375 // contained types are canonical. 4376 if (ESI.Type == EST_Dynamic) { 4377 bool AnyPackExpansions = false; 4378 for (QualType ET : ESI.Exceptions) { 4379 if (!ET.isCanonical()) 4380 return false; 4381 if (ET->getAs<PackExpansionType>()) 4382 AnyPackExpansions = true; 4383 } 4384 return AnyPackExpansions; 4385 } 4386 4387 return false; 4388 } 4389 4390 QualType ASTContext::getFunctionTypeInternal( 4391 QualType ResultTy, ArrayRef<QualType> ArgArray, 4392 const FunctionProtoType::ExtProtoInfo &EPI, bool OnlyWantCanonical) const { 4393 size_t NumArgs = ArgArray.size(); 4394 4395 // Unique functions, to guarantee there is only one function of a particular 4396 // structure. 4397 llvm::FoldingSetNodeID ID; 4398 FunctionProtoType::Profile(ID, ResultTy, ArgArray.begin(), NumArgs, EPI, 4399 *this, true); 4400 4401 QualType Canonical; 4402 bool Unique = false; 4403 4404 void *InsertPos = nullptr; 4405 if (FunctionProtoType *FPT = 4406 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) { 4407 QualType Existing = QualType(FPT, 0); 4408 4409 // If we find a pre-existing equivalent FunctionProtoType, we can just reuse 4410 // it so long as our exception specification doesn't contain a dependent 4411 // noexcept expression, or we're just looking for a canonical type. 4412 // Otherwise, we're going to need to create a type 4413 // sugar node to hold the concrete expression. 4414 if (OnlyWantCanonical || !isComputedNoexcept(EPI.ExceptionSpec.Type) || 4415 EPI.ExceptionSpec.NoexceptExpr == FPT->getNoexceptExpr()) 4416 return Existing; 4417 4418 // We need a new type sugar node for this one, to hold the new noexcept 4419 // expression. We do no canonicalization here, but that's OK since we don't 4420 // expect to see the same noexcept expression much more than once. 4421 Canonical = getCanonicalType(Existing); 4422 Unique = true; 4423 } 4424 4425 bool NoexceptInType = getLangOpts().CPlusPlus17; 4426 bool IsCanonicalExceptionSpec = 4427 isCanonicalExceptionSpecification(EPI.ExceptionSpec, NoexceptInType); 4428 4429 // Determine whether the type being created is already canonical or not. 4430 bool isCanonical = !Unique && IsCanonicalExceptionSpec && 4431 isCanonicalResultType(ResultTy) && !EPI.HasTrailingReturn; 4432 for (unsigned i = 0; i != NumArgs && isCanonical; ++i) 4433 if (!ArgArray[i].isCanonicalAsParam()) 4434 isCanonical = false; 4435 4436 if (OnlyWantCanonical) 4437 assert(isCanonical && 4438 "given non-canonical parameters constructing canonical type"); 4439 4440 // If this type isn't canonical, get the canonical version of it if we don't 4441 // already have it. The exception spec is only partially part of the 4442 // canonical type, and only in C++17 onwards. 4443 if (!isCanonical && Canonical.isNull()) { 4444 SmallVector<QualType, 16> CanonicalArgs; 4445 CanonicalArgs.reserve(NumArgs); 4446 for (unsigned i = 0; i != NumArgs; ++i) 4447 CanonicalArgs.push_back(getCanonicalParamType(ArgArray[i])); 4448 4449 llvm::SmallVector<QualType, 8> ExceptionTypeStorage; 4450 FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI; 4451 CanonicalEPI.HasTrailingReturn = false; 4452 4453 if (IsCanonicalExceptionSpec) { 4454 // Exception spec is already OK. 4455 } else if (NoexceptInType) { 4456 switch (EPI.ExceptionSpec.Type) { 4457 case EST_Unparsed: case EST_Unevaluated: case EST_Uninstantiated: 4458 // We don't know yet. It shouldn't matter what we pick here; no-one 4459 // should ever look at this. 4460 [[fallthrough]]; 4461 case EST_None: case EST_MSAny: case EST_NoexceptFalse: 4462 CanonicalEPI.ExceptionSpec.Type = EST_None; 4463 break; 4464 4465 // A dynamic exception specification is almost always "not noexcept", 4466 // with the exception that a pack expansion might expand to no types. 4467 case EST_Dynamic: { 4468 bool AnyPacks = false; 4469 for (QualType ET : EPI.ExceptionSpec.Exceptions) { 4470 if (ET->getAs<PackExpansionType>()) 4471 AnyPacks = true; 4472 ExceptionTypeStorage.push_back(getCanonicalType(ET)); 4473 } 4474 if (!AnyPacks) 4475 CanonicalEPI.ExceptionSpec.Type = EST_None; 4476 else { 4477 CanonicalEPI.ExceptionSpec.Type = EST_Dynamic; 4478 CanonicalEPI.ExceptionSpec.Exceptions = ExceptionTypeStorage; 4479 } 4480 break; 4481 } 4482 4483 case EST_DynamicNone: 4484 case EST_BasicNoexcept: 4485 case EST_NoexceptTrue: 4486 case EST_NoThrow: 4487 CanonicalEPI.ExceptionSpec.Type = EST_BasicNoexcept; 4488 break; 4489 4490 case EST_DependentNoexcept: 4491 llvm_unreachable("dependent noexcept is already canonical"); 4492 } 4493 } else { 4494 CanonicalEPI.ExceptionSpec = FunctionProtoType::ExceptionSpecInfo(); 4495 } 4496 4497 // Adjust the canonical function result type. 4498 CanQualType CanResultTy = getCanonicalFunctionResultType(ResultTy); 4499 Canonical = 4500 getFunctionTypeInternal(CanResultTy, CanonicalArgs, CanonicalEPI, true); 4501 4502 // Get the new insert position for the node we care about. 4503 FunctionProtoType *NewIP = 4504 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos); 4505 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4506 } 4507 4508 // Compute the needed size to hold this FunctionProtoType and the 4509 // various trailing objects. 4510 auto ESH = FunctionProtoType::getExceptionSpecSize( 4511 EPI.ExceptionSpec.Type, EPI.ExceptionSpec.Exceptions.size()); 4512 size_t Size = FunctionProtoType::totalSizeToAlloc< 4513 QualType, SourceLocation, FunctionType::FunctionTypeExtraBitfields, 4514 FunctionType::ExceptionType, Expr *, FunctionDecl *, 4515 FunctionProtoType::ExtParameterInfo, Qualifiers>( 4516 NumArgs, EPI.Variadic, EPI.requiresFunctionProtoTypeExtraBitfields(), 4517 ESH.NumExceptionType, ESH.NumExprPtr, ESH.NumFunctionDeclPtr, 4518 EPI.ExtParameterInfos ? NumArgs : 0, 4519 EPI.TypeQuals.hasNonFastQualifiers() ? 1 : 0); 4520 4521 auto *FTP = (FunctionProtoType *)Allocate(Size, TypeAlignment); 4522 FunctionProtoType::ExtProtoInfo newEPI = EPI; 4523 new (FTP) FunctionProtoType(ResultTy, ArgArray, Canonical, newEPI); 4524 Types.push_back(FTP); 4525 if (!Unique) 4526 FunctionProtoTypes.InsertNode(FTP, InsertPos); 4527 return QualType(FTP, 0); 4528 } 4529 4530 QualType ASTContext::getPipeType(QualType T, bool ReadOnly) const { 4531 llvm::FoldingSetNodeID ID; 4532 PipeType::Profile(ID, T, ReadOnly); 4533 4534 void *InsertPos = nullptr; 4535 if (PipeType *PT = PipeTypes.FindNodeOrInsertPos(ID, InsertPos)) 4536 return QualType(PT, 0); 4537 4538 // If the pipe element type isn't canonical, this won't be a canonical type 4539 // either, so fill in the canonical type field. 4540 QualType Canonical; 4541 if (!T.isCanonical()) { 4542 Canonical = getPipeType(getCanonicalType(T), ReadOnly); 4543 4544 // Get the new insert position for the node we care about. 4545 PipeType *NewIP = PipeTypes.FindNodeOrInsertPos(ID, InsertPos); 4546 assert(!NewIP && "Shouldn't be in the map!"); 4547 (void)NewIP; 4548 } 4549 auto *New = new (*this, TypeAlignment) PipeType(T, Canonical, ReadOnly); 4550 Types.push_back(New); 4551 PipeTypes.InsertNode(New, InsertPos); 4552 return QualType(New, 0); 4553 } 4554 4555 QualType ASTContext::adjustStringLiteralBaseType(QualType Ty) const { 4556 // OpenCL v1.1 s6.5.3: a string literal is in the constant address space. 4557 return LangOpts.OpenCL ? getAddrSpaceQualType(Ty, LangAS::opencl_constant) 4558 : Ty; 4559 } 4560 4561 QualType ASTContext::getReadPipeType(QualType T) const { 4562 return getPipeType(T, true); 4563 } 4564 4565 QualType ASTContext::getWritePipeType(QualType T) const { 4566 return getPipeType(T, false); 4567 } 4568 4569 QualType ASTContext::getBitIntType(bool IsUnsigned, unsigned NumBits) const { 4570 llvm::FoldingSetNodeID ID; 4571 BitIntType::Profile(ID, IsUnsigned, NumBits); 4572 4573 void *InsertPos = nullptr; 4574 if (BitIntType *EIT = BitIntTypes.FindNodeOrInsertPos(ID, InsertPos)) 4575 return QualType(EIT, 0); 4576 4577 auto *New = new (*this, TypeAlignment) BitIntType(IsUnsigned, NumBits); 4578 BitIntTypes.InsertNode(New, InsertPos); 4579 Types.push_back(New); 4580 return QualType(New, 0); 4581 } 4582 4583 QualType ASTContext::getDependentBitIntType(bool IsUnsigned, 4584 Expr *NumBitsExpr) const { 4585 assert(NumBitsExpr->isInstantiationDependent() && "Only good for dependent"); 4586 llvm::FoldingSetNodeID ID; 4587 DependentBitIntType::Profile(ID, *this, IsUnsigned, NumBitsExpr); 4588 4589 void *InsertPos = nullptr; 4590 if (DependentBitIntType *Existing = 4591 DependentBitIntTypes.FindNodeOrInsertPos(ID, InsertPos)) 4592 return QualType(Existing, 0); 4593 4594 auto *New = new (*this, TypeAlignment) 4595 DependentBitIntType(*this, IsUnsigned, NumBitsExpr); 4596 DependentBitIntTypes.InsertNode(New, InsertPos); 4597 4598 Types.push_back(New); 4599 return QualType(New, 0); 4600 } 4601 4602 #ifndef NDEBUG 4603 static bool NeedsInjectedClassNameType(const RecordDecl *D) { 4604 if (!isa<CXXRecordDecl>(D)) return false; 4605 const auto *RD = cast<CXXRecordDecl>(D); 4606 if (isa<ClassTemplatePartialSpecializationDecl>(RD)) 4607 return true; 4608 if (RD->getDescribedClassTemplate() && 4609 !isa<ClassTemplateSpecializationDecl>(RD)) 4610 return true; 4611 return false; 4612 } 4613 #endif 4614 4615 /// getInjectedClassNameType - Return the unique reference to the 4616 /// injected class name type for the specified templated declaration. 4617 QualType ASTContext::getInjectedClassNameType(CXXRecordDecl *Decl, 4618 QualType TST) const { 4619 assert(NeedsInjectedClassNameType(Decl)); 4620 if (Decl->TypeForDecl) { 4621 assert(isa<InjectedClassNameType>(Decl->TypeForDecl)); 4622 } else if (CXXRecordDecl *PrevDecl = Decl->getPreviousDecl()) { 4623 assert(PrevDecl->TypeForDecl && "previous declaration has no type"); 4624 Decl->TypeForDecl = PrevDecl->TypeForDecl; 4625 assert(isa<InjectedClassNameType>(Decl->TypeForDecl)); 4626 } else { 4627 Type *newType = 4628 new (*this, TypeAlignment) InjectedClassNameType(Decl, TST); 4629 Decl->TypeForDecl = newType; 4630 Types.push_back(newType); 4631 } 4632 return QualType(Decl->TypeForDecl, 0); 4633 } 4634 4635 /// getTypeDeclType - Return the unique reference to the type for the 4636 /// specified type declaration. 4637 QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const { 4638 assert(Decl && "Passed null for Decl param"); 4639 assert(!Decl->TypeForDecl && "TypeForDecl present in slow case"); 4640 4641 if (const auto *Typedef = dyn_cast<TypedefNameDecl>(Decl)) 4642 return getTypedefType(Typedef); 4643 4644 assert(!isa<TemplateTypeParmDecl>(Decl) && 4645 "Template type parameter types are always available."); 4646 4647 if (const auto *Record = dyn_cast<RecordDecl>(Decl)) { 4648 assert(Record->isFirstDecl() && "struct/union has previous declaration"); 4649 assert(!NeedsInjectedClassNameType(Record)); 4650 return getRecordType(Record); 4651 } else if (const auto *Enum = dyn_cast<EnumDecl>(Decl)) { 4652 assert(Enum->isFirstDecl() && "enum has previous declaration"); 4653 return getEnumType(Enum); 4654 } else if (const auto *Using = dyn_cast<UnresolvedUsingTypenameDecl>(Decl)) { 4655 return getUnresolvedUsingType(Using); 4656 } else 4657 llvm_unreachable("TypeDecl without a type?"); 4658 4659 return QualType(Decl->TypeForDecl, 0); 4660 } 4661 4662 /// getTypedefType - Return the unique reference to the type for the 4663 /// specified typedef name decl. 4664 QualType ASTContext::getTypedefType(const TypedefNameDecl *Decl, 4665 QualType Underlying) const { 4666 if (!Decl->TypeForDecl) { 4667 if (Underlying.isNull()) 4668 Underlying = Decl->getUnderlyingType(); 4669 auto *NewType = new (*this, TypeAlignment) TypedefType( 4670 Type::Typedef, Decl, QualType(), getCanonicalType(Underlying)); 4671 Decl->TypeForDecl = NewType; 4672 Types.push_back(NewType); 4673 return QualType(NewType, 0); 4674 } 4675 if (Underlying.isNull() || Decl->getUnderlyingType() == Underlying) 4676 return QualType(Decl->TypeForDecl, 0); 4677 assert(hasSameType(Decl->getUnderlyingType(), Underlying)); 4678 4679 llvm::FoldingSetNodeID ID; 4680 TypedefType::Profile(ID, Decl, Underlying); 4681 4682 void *InsertPos = nullptr; 4683 if (TypedefType *T = TypedefTypes.FindNodeOrInsertPos(ID, InsertPos)) { 4684 assert(!T->typeMatchesDecl() && 4685 "non-divergent case should be handled with TypeDecl"); 4686 return QualType(T, 0); 4687 } 4688 4689 void *Mem = 4690 Allocate(TypedefType::totalSizeToAlloc<QualType>(true), TypeAlignment); 4691 auto *NewType = new (Mem) TypedefType(Type::Typedef, Decl, Underlying, 4692 getCanonicalType(Underlying)); 4693 TypedefTypes.InsertNode(NewType, InsertPos); 4694 Types.push_back(NewType); 4695 return QualType(NewType, 0); 4696 } 4697 4698 QualType ASTContext::getUsingType(const UsingShadowDecl *Found, 4699 QualType Underlying) const { 4700 llvm::FoldingSetNodeID ID; 4701 UsingType::Profile(ID, Found, Underlying); 4702 4703 void *InsertPos = nullptr; 4704 if (UsingType *T = UsingTypes.FindNodeOrInsertPos(ID, InsertPos)) 4705 return QualType(T, 0); 4706 4707 const Type *TypeForDecl = 4708 cast<TypeDecl>(Found->getTargetDecl())->getTypeForDecl(); 4709 4710 assert(!Underlying.hasLocalQualifiers()); 4711 QualType Canon = Underlying->getCanonicalTypeInternal(); 4712 assert(TypeForDecl->getCanonicalTypeInternal() == Canon); 4713 4714 if (Underlying.getTypePtr() == TypeForDecl) 4715 Underlying = QualType(); 4716 void *Mem = 4717 Allocate(UsingType::totalSizeToAlloc<QualType>(!Underlying.isNull()), 4718 TypeAlignment); 4719 UsingType *NewType = new (Mem) UsingType(Found, Underlying, Canon); 4720 Types.push_back(NewType); 4721 UsingTypes.InsertNode(NewType, InsertPos); 4722 return QualType(NewType, 0); 4723 } 4724 4725 QualType ASTContext::getRecordType(const RecordDecl *Decl) const { 4726 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); 4727 4728 if (const RecordDecl *PrevDecl = Decl->getPreviousDecl()) 4729 if (PrevDecl->TypeForDecl) 4730 return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0); 4731 4732 auto *newType = new (*this, TypeAlignment) RecordType(Decl); 4733 Decl->TypeForDecl = newType; 4734 Types.push_back(newType); 4735 return QualType(newType, 0); 4736 } 4737 4738 QualType ASTContext::getEnumType(const EnumDecl *Decl) const { 4739 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); 4740 4741 if (const EnumDecl *PrevDecl = Decl->getPreviousDecl()) 4742 if (PrevDecl->TypeForDecl) 4743 return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0); 4744 4745 auto *newType = new (*this, TypeAlignment) EnumType(Decl); 4746 Decl->TypeForDecl = newType; 4747 Types.push_back(newType); 4748 return QualType(newType, 0); 4749 } 4750 4751 QualType ASTContext::getUnresolvedUsingType( 4752 const UnresolvedUsingTypenameDecl *Decl) const { 4753 if (Decl->TypeForDecl) 4754 return QualType(Decl->TypeForDecl, 0); 4755 4756 if (const UnresolvedUsingTypenameDecl *CanonicalDecl = 4757 Decl->getCanonicalDecl()) 4758 if (CanonicalDecl->TypeForDecl) 4759 return QualType(Decl->TypeForDecl = CanonicalDecl->TypeForDecl, 0); 4760 4761 Type *newType = new (*this, TypeAlignment) UnresolvedUsingType(Decl); 4762 Decl->TypeForDecl = newType; 4763 Types.push_back(newType); 4764 return QualType(newType, 0); 4765 } 4766 4767 QualType ASTContext::getAttributedType(attr::Kind attrKind, 4768 QualType modifiedType, 4769 QualType equivalentType) const { 4770 llvm::FoldingSetNodeID id; 4771 AttributedType::Profile(id, attrKind, modifiedType, equivalentType); 4772 4773 void *insertPos = nullptr; 4774 AttributedType *type = AttributedTypes.FindNodeOrInsertPos(id, insertPos); 4775 if (type) return QualType(type, 0); 4776 4777 QualType canon = getCanonicalType(equivalentType); 4778 type = new (*this, TypeAlignment) 4779 AttributedType(canon, attrKind, modifiedType, equivalentType); 4780 4781 Types.push_back(type); 4782 AttributedTypes.InsertNode(type, insertPos); 4783 4784 return QualType(type, 0); 4785 } 4786 4787 QualType ASTContext::getBTFTagAttributedType(const BTFTypeTagAttr *BTFAttr, 4788 QualType Wrapped) { 4789 llvm::FoldingSetNodeID ID; 4790 BTFTagAttributedType::Profile(ID, Wrapped, BTFAttr); 4791 4792 void *InsertPos = nullptr; 4793 BTFTagAttributedType *Ty = 4794 BTFTagAttributedTypes.FindNodeOrInsertPos(ID, InsertPos); 4795 if (Ty) 4796 return QualType(Ty, 0); 4797 4798 QualType Canon = getCanonicalType(Wrapped); 4799 Ty = new (*this, TypeAlignment) BTFTagAttributedType(Canon, Wrapped, BTFAttr); 4800 4801 Types.push_back(Ty); 4802 BTFTagAttributedTypes.InsertNode(Ty, InsertPos); 4803 4804 return QualType(Ty, 0); 4805 } 4806 4807 /// Retrieve a substitution-result type. 4808 QualType ASTContext::getSubstTemplateTypeParmType( 4809 QualType Replacement, Decl *AssociatedDecl, unsigned Index, 4810 std::optional<unsigned> PackIndex) const { 4811 llvm::FoldingSetNodeID ID; 4812 SubstTemplateTypeParmType::Profile(ID, Replacement, AssociatedDecl, Index, 4813 PackIndex); 4814 void *InsertPos = nullptr; 4815 SubstTemplateTypeParmType *SubstParm = 4816 SubstTemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 4817 4818 if (!SubstParm) { 4819 void *Mem = Allocate(SubstTemplateTypeParmType::totalSizeToAlloc<QualType>( 4820 !Replacement.isCanonical()), 4821 TypeAlignment); 4822 SubstParm = new (Mem) SubstTemplateTypeParmType(Replacement, AssociatedDecl, 4823 Index, PackIndex); 4824 Types.push_back(SubstParm); 4825 SubstTemplateTypeParmTypes.InsertNode(SubstParm, InsertPos); 4826 } 4827 4828 return QualType(SubstParm, 0); 4829 } 4830 4831 /// Retrieve a 4832 QualType 4833 ASTContext::getSubstTemplateTypeParmPackType(Decl *AssociatedDecl, 4834 unsigned Index, bool Final, 4835 const TemplateArgument &ArgPack) { 4836 #ifndef NDEBUG 4837 for (const auto &P : ArgPack.pack_elements()) 4838 assert(P.getKind() == TemplateArgument::Type && "Pack contains a non-type"); 4839 #endif 4840 4841 llvm::FoldingSetNodeID ID; 4842 SubstTemplateTypeParmPackType::Profile(ID, AssociatedDecl, Index, Final, 4843 ArgPack); 4844 void *InsertPos = nullptr; 4845 if (SubstTemplateTypeParmPackType *SubstParm = 4846 SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos)) 4847 return QualType(SubstParm, 0); 4848 4849 QualType Canon; 4850 { 4851 TemplateArgument CanonArgPack = getCanonicalTemplateArgument(ArgPack); 4852 if (!AssociatedDecl->isCanonicalDecl() || 4853 !CanonArgPack.structurallyEquals(ArgPack)) { 4854 Canon = getSubstTemplateTypeParmPackType( 4855 AssociatedDecl->getCanonicalDecl(), Index, Final, CanonArgPack); 4856 [[maybe_unused]] const auto *Nothing = 4857 SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos); 4858 assert(!Nothing); 4859 } 4860 } 4861 4862 auto *SubstParm = new (*this, TypeAlignment) SubstTemplateTypeParmPackType( 4863 Canon, AssociatedDecl, Index, Final, ArgPack); 4864 Types.push_back(SubstParm); 4865 SubstTemplateTypeParmPackTypes.InsertNode(SubstParm, InsertPos); 4866 return QualType(SubstParm, 0); 4867 } 4868 4869 /// Retrieve the template type parameter type for a template 4870 /// parameter or parameter pack with the given depth, index, and (optionally) 4871 /// name. 4872 QualType ASTContext::getTemplateTypeParmType(unsigned Depth, unsigned Index, 4873 bool ParameterPack, 4874 TemplateTypeParmDecl *TTPDecl) const { 4875 llvm::FoldingSetNodeID ID; 4876 TemplateTypeParmType::Profile(ID, Depth, Index, ParameterPack, TTPDecl); 4877 void *InsertPos = nullptr; 4878 TemplateTypeParmType *TypeParm 4879 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 4880 4881 if (TypeParm) 4882 return QualType(TypeParm, 0); 4883 4884 if (TTPDecl) { 4885 QualType Canon = getTemplateTypeParmType(Depth, Index, ParameterPack); 4886 TypeParm = new (*this, TypeAlignment) TemplateTypeParmType(TTPDecl, Canon); 4887 4888 TemplateTypeParmType *TypeCheck 4889 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 4890 assert(!TypeCheck && "Template type parameter canonical type broken"); 4891 (void)TypeCheck; 4892 } else 4893 TypeParm = new (*this, TypeAlignment) 4894 TemplateTypeParmType(Depth, Index, ParameterPack); 4895 4896 Types.push_back(TypeParm); 4897 TemplateTypeParmTypes.InsertNode(TypeParm, InsertPos); 4898 4899 return QualType(TypeParm, 0); 4900 } 4901 4902 TypeSourceInfo * 4903 ASTContext::getTemplateSpecializationTypeInfo(TemplateName Name, 4904 SourceLocation NameLoc, 4905 const TemplateArgumentListInfo &Args, 4906 QualType Underlying) const { 4907 assert(!Name.getAsDependentTemplateName() && 4908 "No dependent template names here!"); 4909 QualType TST = 4910 getTemplateSpecializationType(Name, Args.arguments(), Underlying); 4911 4912 TypeSourceInfo *DI = CreateTypeSourceInfo(TST); 4913 TemplateSpecializationTypeLoc TL = 4914 DI->getTypeLoc().castAs<TemplateSpecializationTypeLoc>(); 4915 TL.setTemplateKeywordLoc(SourceLocation()); 4916 TL.setTemplateNameLoc(NameLoc); 4917 TL.setLAngleLoc(Args.getLAngleLoc()); 4918 TL.setRAngleLoc(Args.getRAngleLoc()); 4919 for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i) 4920 TL.setArgLocInfo(i, Args[i].getLocInfo()); 4921 return DI; 4922 } 4923 4924 QualType 4925 ASTContext::getTemplateSpecializationType(TemplateName Template, 4926 ArrayRef<TemplateArgumentLoc> Args, 4927 QualType Underlying) const { 4928 assert(!Template.getAsDependentTemplateName() && 4929 "No dependent template names here!"); 4930 4931 SmallVector<TemplateArgument, 4> ArgVec; 4932 ArgVec.reserve(Args.size()); 4933 for (const TemplateArgumentLoc &Arg : Args) 4934 ArgVec.push_back(Arg.getArgument()); 4935 4936 return getTemplateSpecializationType(Template, ArgVec, Underlying); 4937 } 4938 4939 #ifndef NDEBUG 4940 static bool hasAnyPackExpansions(ArrayRef<TemplateArgument> Args) { 4941 for (const TemplateArgument &Arg : Args) 4942 if (Arg.isPackExpansion()) 4943 return true; 4944 4945 return true; 4946 } 4947 #endif 4948 4949 QualType 4950 ASTContext::getTemplateSpecializationType(TemplateName Template, 4951 ArrayRef<TemplateArgument> Args, 4952 QualType Underlying) const { 4953 assert(!Template.getAsDependentTemplateName() && 4954 "No dependent template names here!"); 4955 // Look through qualified template names. 4956 if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName()) 4957 Template = QTN->getUnderlyingTemplate(); 4958 4959 const auto *TD = Template.getAsTemplateDecl(); 4960 bool IsTypeAlias = TD && TD->isTypeAlias(); 4961 QualType CanonType; 4962 if (!Underlying.isNull()) 4963 CanonType = getCanonicalType(Underlying); 4964 else { 4965 // We can get here with an alias template when the specialization contains 4966 // a pack expansion that does not match up with a parameter pack. 4967 assert((!IsTypeAlias || hasAnyPackExpansions(Args)) && 4968 "Caller must compute aliased type"); 4969 IsTypeAlias = false; 4970 CanonType = getCanonicalTemplateSpecializationType(Template, Args); 4971 } 4972 4973 // Allocate the (non-canonical) template specialization type, but don't 4974 // try to unique it: these types typically have location information that 4975 // we don't unique and don't want to lose. 4976 void *Mem = Allocate(sizeof(TemplateSpecializationType) + 4977 sizeof(TemplateArgument) * Args.size() + 4978 (IsTypeAlias? sizeof(QualType) : 0), 4979 TypeAlignment); 4980 auto *Spec 4981 = new (Mem) TemplateSpecializationType(Template, Args, CanonType, 4982 IsTypeAlias ? Underlying : QualType()); 4983 4984 Types.push_back(Spec); 4985 return QualType(Spec, 0); 4986 } 4987 4988 QualType ASTContext::getCanonicalTemplateSpecializationType( 4989 TemplateName Template, ArrayRef<TemplateArgument> Args) const { 4990 assert(!Template.getAsDependentTemplateName() && 4991 "No dependent template names here!"); 4992 4993 // Look through qualified template names. 4994 if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName()) 4995 Template = TemplateName(QTN->getUnderlyingTemplate()); 4996 4997 // Build the canonical template specialization type. 4998 TemplateName CanonTemplate = getCanonicalTemplateName(Template); 4999 bool AnyNonCanonArgs = false; 5000 auto CanonArgs = 5001 ::getCanonicalTemplateArguments(*this, Args, AnyNonCanonArgs); 5002 5003 // Determine whether this canonical template specialization type already 5004 // exists. 5005 llvm::FoldingSetNodeID ID; 5006 TemplateSpecializationType::Profile(ID, CanonTemplate, 5007 CanonArgs, *this); 5008 5009 void *InsertPos = nullptr; 5010 TemplateSpecializationType *Spec 5011 = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 5012 5013 if (!Spec) { 5014 // Allocate a new canonical template specialization type. 5015 void *Mem = Allocate((sizeof(TemplateSpecializationType) + 5016 sizeof(TemplateArgument) * CanonArgs.size()), 5017 TypeAlignment); 5018 Spec = new (Mem) TemplateSpecializationType(CanonTemplate, 5019 CanonArgs, 5020 QualType(), QualType()); 5021 Types.push_back(Spec); 5022 TemplateSpecializationTypes.InsertNode(Spec, InsertPos); 5023 } 5024 5025 assert(Spec->isDependentType() && 5026 "Non-dependent template-id type must have a canonical type"); 5027 return QualType(Spec, 0); 5028 } 5029 5030 QualType ASTContext::getElaboratedType(ElaboratedTypeKeyword Keyword, 5031 NestedNameSpecifier *NNS, 5032 QualType NamedType, 5033 TagDecl *OwnedTagDecl) const { 5034 llvm::FoldingSetNodeID ID; 5035 ElaboratedType::Profile(ID, Keyword, NNS, NamedType, OwnedTagDecl); 5036 5037 void *InsertPos = nullptr; 5038 ElaboratedType *T = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos); 5039 if (T) 5040 return QualType(T, 0); 5041 5042 QualType Canon = NamedType; 5043 if (!Canon.isCanonical()) { 5044 Canon = getCanonicalType(NamedType); 5045 ElaboratedType *CheckT = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos); 5046 assert(!CheckT && "Elaborated canonical type broken"); 5047 (void)CheckT; 5048 } 5049 5050 void *Mem = Allocate(ElaboratedType::totalSizeToAlloc<TagDecl *>(!!OwnedTagDecl), 5051 TypeAlignment); 5052 T = new (Mem) ElaboratedType(Keyword, NNS, NamedType, Canon, OwnedTagDecl); 5053 5054 Types.push_back(T); 5055 ElaboratedTypes.InsertNode(T, InsertPos); 5056 return QualType(T, 0); 5057 } 5058 5059 QualType 5060 ASTContext::getParenType(QualType InnerType) const { 5061 llvm::FoldingSetNodeID ID; 5062 ParenType::Profile(ID, InnerType); 5063 5064 void *InsertPos = nullptr; 5065 ParenType *T = ParenTypes.FindNodeOrInsertPos(ID, InsertPos); 5066 if (T) 5067 return QualType(T, 0); 5068 5069 QualType Canon = InnerType; 5070 if (!Canon.isCanonical()) { 5071 Canon = getCanonicalType(InnerType); 5072 ParenType *CheckT = ParenTypes.FindNodeOrInsertPos(ID, InsertPos); 5073 assert(!CheckT && "Paren canonical type broken"); 5074 (void)CheckT; 5075 } 5076 5077 T = new (*this, TypeAlignment) ParenType(InnerType, Canon); 5078 Types.push_back(T); 5079 ParenTypes.InsertNode(T, InsertPos); 5080 return QualType(T, 0); 5081 } 5082 5083 QualType 5084 ASTContext::getMacroQualifiedType(QualType UnderlyingTy, 5085 const IdentifierInfo *MacroII) const { 5086 QualType Canon = UnderlyingTy; 5087 if (!Canon.isCanonical()) 5088 Canon = getCanonicalType(UnderlyingTy); 5089 5090 auto *newType = new (*this, TypeAlignment) 5091 MacroQualifiedType(UnderlyingTy, Canon, MacroII); 5092 Types.push_back(newType); 5093 return QualType(newType, 0); 5094 } 5095 5096 QualType ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword, 5097 NestedNameSpecifier *NNS, 5098 const IdentifierInfo *Name, 5099 QualType Canon) const { 5100 if (Canon.isNull()) { 5101 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 5102 if (CanonNNS != NNS) 5103 Canon = getDependentNameType(Keyword, CanonNNS, Name); 5104 } 5105 5106 llvm::FoldingSetNodeID ID; 5107 DependentNameType::Profile(ID, Keyword, NNS, Name); 5108 5109 void *InsertPos = nullptr; 5110 DependentNameType *T 5111 = DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos); 5112 if (T) 5113 return QualType(T, 0); 5114 5115 T = new (*this, TypeAlignment) DependentNameType(Keyword, NNS, Name, Canon); 5116 Types.push_back(T); 5117 DependentNameTypes.InsertNode(T, InsertPos); 5118 return QualType(T, 0); 5119 } 5120 5121 QualType ASTContext::getDependentTemplateSpecializationType( 5122 ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS, 5123 const IdentifierInfo *Name, ArrayRef<TemplateArgumentLoc> Args) const { 5124 // TODO: avoid this copy 5125 SmallVector<TemplateArgument, 16> ArgCopy; 5126 for (unsigned I = 0, E = Args.size(); I != E; ++I) 5127 ArgCopy.push_back(Args[I].getArgument()); 5128 return getDependentTemplateSpecializationType(Keyword, NNS, Name, ArgCopy); 5129 } 5130 5131 QualType 5132 ASTContext::getDependentTemplateSpecializationType( 5133 ElaboratedTypeKeyword Keyword, 5134 NestedNameSpecifier *NNS, 5135 const IdentifierInfo *Name, 5136 ArrayRef<TemplateArgument> Args) const { 5137 assert((!NNS || NNS->isDependent()) && 5138 "nested-name-specifier must be dependent"); 5139 5140 llvm::FoldingSetNodeID ID; 5141 DependentTemplateSpecializationType::Profile(ID, *this, Keyword, NNS, 5142 Name, Args); 5143 5144 void *InsertPos = nullptr; 5145 DependentTemplateSpecializationType *T 5146 = DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 5147 if (T) 5148 return QualType(T, 0); 5149 5150 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 5151 5152 ElaboratedTypeKeyword CanonKeyword = Keyword; 5153 if (Keyword == ETK_None) CanonKeyword = ETK_Typename; 5154 5155 bool AnyNonCanonArgs = false; 5156 auto CanonArgs = 5157 ::getCanonicalTemplateArguments(*this, Args, AnyNonCanonArgs); 5158 5159 QualType Canon; 5160 if (AnyNonCanonArgs || CanonNNS != NNS || CanonKeyword != Keyword) { 5161 Canon = getDependentTemplateSpecializationType(CanonKeyword, CanonNNS, 5162 Name, 5163 CanonArgs); 5164 5165 // Find the insert position again. 5166 [[maybe_unused]] auto *Nothing = 5167 DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 5168 assert(!Nothing && "canonical type broken"); 5169 } 5170 5171 void *Mem = Allocate((sizeof(DependentTemplateSpecializationType) + 5172 sizeof(TemplateArgument) * Args.size()), 5173 TypeAlignment); 5174 T = new (Mem) DependentTemplateSpecializationType(Keyword, NNS, 5175 Name, Args, Canon); 5176 Types.push_back(T); 5177 DependentTemplateSpecializationTypes.InsertNode(T, InsertPos); 5178 return QualType(T, 0); 5179 } 5180 5181 TemplateArgument ASTContext::getInjectedTemplateArg(NamedDecl *Param) { 5182 TemplateArgument Arg; 5183 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Param)) { 5184 QualType ArgType = getTypeDeclType(TTP); 5185 if (TTP->isParameterPack()) 5186 ArgType = getPackExpansionType(ArgType, std::nullopt); 5187 5188 Arg = TemplateArgument(ArgType); 5189 } else if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) { 5190 QualType T = 5191 NTTP->getType().getNonPackExpansionType().getNonLValueExprType(*this); 5192 // For class NTTPs, ensure we include the 'const' so the type matches that 5193 // of a real template argument. 5194 // FIXME: It would be more faithful to model this as something like an 5195 // lvalue-to-rvalue conversion applied to a const-qualified lvalue. 5196 if (T->isRecordType()) 5197 T.addConst(); 5198 Expr *E = new (*this) DeclRefExpr( 5199 *this, NTTP, /*RefersToEnclosingVariableOrCapture*/ false, T, 5200 Expr::getValueKindForType(NTTP->getType()), NTTP->getLocation()); 5201 5202 if (NTTP->isParameterPack()) 5203 E = new (*this) 5204 PackExpansionExpr(DependentTy, E, NTTP->getLocation(), std::nullopt); 5205 Arg = TemplateArgument(E); 5206 } else { 5207 auto *TTP = cast<TemplateTemplateParmDecl>(Param); 5208 if (TTP->isParameterPack()) 5209 Arg = TemplateArgument(TemplateName(TTP), std::optional<unsigned>()); 5210 else 5211 Arg = TemplateArgument(TemplateName(TTP)); 5212 } 5213 5214 if (Param->isTemplateParameterPack()) 5215 Arg = TemplateArgument::CreatePackCopy(*this, Arg); 5216 5217 return Arg; 5218 } 5219 5220 void 5221 ASTContext::getInjectedTemplateArgs(const TemplateParameterList *Params, 5222 SmallVectorImpl<TemplateArgument> &Args) { 5223 Args.reserve(Args.size() + Params->size()); 5224 5225 for (NamedDecl *Param : *Params) 5226 Args.push_back(getInjectedTemplateArg(Param)); 5227 } 5228 5229 QualType ASTContext::getPackExpansionType(QualType Pattern, 5230 std::optional<unsigned> NumExpansions, 5231 bool ExpectPackInType) { 5232 assert((!ExpectPackInType || Pattern->containsUnexpandedParameterPack()) && 5233 "Pack expansions must expand one or more parameter packs"); 5234 5235 llvm::FoldingSetNodeID ID; 5236 PackExpansionType::Profile(ID, Pattern, NumExpansions); 5237 5238 void *InsertPos = nullptr; 5239 PackExpansionType *T = PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos); 5240 if (T) 5241 return QualType(T, 0); 5242 5243 QualType Canon; 5244 if (!Pattern.isCanonical()) { 5245 Canon = getPackExpansionType(getCanonicalType(Pattern), NumExpansions, 5246 /*ExpectPackInType=*/false); 5247 5248 // Find the insert position again, in case we inserted an element into 5249 // PackExpansionTypes and invalidated our insert position. 5250 PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos); 5251 } 5252 5253 T = new (*this, TypeAlignment) 5254 PackExpansionType(Pattern, Canon, NumExpansions); 5255 Types.push_back(T); 5256 PackExpansionTypes.InsertNode(T, InsertPos); 5257 return QualType(T, 0); 5258 } 5259 5260 /// CmpProtocolNames - Comparison predicate for sorting protocols 5261 /// alphabetically. 5262 static int CmpProtocolNames(ObjCProtocolDecl *const *LHS, 5263 ObjCProtocolDecl *const *RHS) { 5264 return DeclarationName::compare((*LHS)->getDeclName(), (*RHS)->getDeclName()); 5265 } 5266 5267 static bool areSortedAndUniqued(ArrayRef<ObjCProtocolDecl *> Protocols) { 5268 if (Protocols.empty()) return true; 5269 5270 if (Protocols[0]->getCanonicalDecl() != Protocols[0]) 5271 return false; 5272 5273 for (unsigned i = 1; i != Protocols.size(); ++i) 5274 if (CmpProtocolNames(&Protocols[i - 1], &Protocols[i]) >= 0 || 5275 Protocols[i]->getCanonicalDecl() != Protocols[i]) 5276 return false; 5277 return true; 5278 } 5279 5280 static void 5281 SortAndUniqueProtocols(SmallVectorImpl<ObjCProtocolDecl *> &Protocols) { 5282 // Sort protocols, keyed by name. 5283 llvm::array_pod_sort(Protocols.begin(), Protocols.end(), CmpProtocolNames); 5284 5285 // Canonicalize. 5286 for (ObjCProtocolDecl *&P : Protocols) 5287 P = P->getCanonicalDecl(); 5288 5289 // Remove duplicates. 5290 auto ProtocolsEnd = std::unique(Protocols.begin(), Protocols.end()); 5291 Protocols.erase(ProtocolsEnd, Protocols.end()); 5292 } 5293 5294 QualType ASTContext::getObjCObjectType(QualType BaseType, 5295 ObjCProtocolDecl * const *Protocols, 5296 unsigned NumProtocols) const { 5297 return getObjCObjectType(BaseType, {}, 5298 llvm::ArrayRef(Protocols, NumProtocols), 5299 /*isKindOf=*/false); 5300 } 5301 5302 QualType ASTContext::getObjCObjectType( 5303 QualType baseType, 5304 ArrayRef<QualType> typeArgs, 5305 ArrayRef<ObjCProtocolDecl *> protocols, 5306 bool isKindOf) const { 5307 // If the base type is an interface and there aren't any protocols or 5308 // type arguments to add, then the interface type will do just fine. 5309 if (typeArgs.empty() && protocols.empty() && !isKindOf && 5310 isa<ObjCInterfaceType>(baseType)) 5311 return baseType; 5312 5313 // Look in the folding set for an existing type. 5314 llvm::FoldingSetNodeID ID; 5315 ObjCObjectTypeImpl::Profile(ID, baseType, typeArgs, protocols, isKindOf); 5316 void *InsertPos = nullptr; 5317 if (ObjCObjectType *QT = ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos)) 5318 return QualType(QT, 0); 5319 5320 // Determine the type arguments to be used for canonicalization, 5321 // which may be explicitly specified here or written on the base 5322 // type. 5323 ArrayRef<QualType> effectiveTypeArgs = typeArgs; 5324 if (effectiveTypeArgs.empty()) { 5325 if (const auto *baseObject = baseType->getAs<ObjCObjectType>()) 5326 effectiveTypeArgs = baseObject->getTypeArgs(); 5327 } 5328 5329 // Build the canonical type, which has the canonical base type and a 5330 // sorted-and-uniqued list of protocols and the type arguments 5331 // canonicalized. 5332 QualType canonical; 5333 bool typeArgsAreCanonical = llvm::all_of( 5334 effectiveTypeArgs, [&](QualType type) { return type.isCanonical(); }); 5335 bool protocolsSorted = areSortedAndUniqued(protocols); 5336 if (!typeArgsAreCanonical || !protocolsSorted || !baseType.isCanonical()) { 5337 // Determine the canonical type arguments. 5338 ArrayRef<QualType> canonTypeArgs; 5339 SmallVector<QualType, 4> canonTypeArgsVec; 5340 if (!typeArgsAreCanonical) { 5341 canonTypeArgsVec.reserve(effectiveTypeArgs.size()); 5342 for (auto typeArg : effectiveTypeArgs) 5343 canonTypeArgsVec.push_back(getCanonicalType(typeArg)); 5344 canonTypeArgs = canonTypeArgsVec; 5345 } else { 5346 canonTypeArgs = effectiveTypeArgs; 5347 } 5348 5349 ArrayRef<ObjCProtocolDecl *> canonProtocols; 5350 SmallVector<ObjCProtocolDecl*, 8> canonProtocolsVec; 5351 if (!protocolsSorted) { 5352 canonProtocolsVec.append(protocols.begin(), protocols.end()); 5353 SortAndUniqueProtocols(canonProtocolsVec); 5354 canonProtocols = canonProtocolsVec; 5355 } else { 5356 canonProtocols = protocols; 5357 } 5358 5359 canonical = getObjCObjectType(getCanonicalType(baseType), canonTypeArgs, 5360 canonProtocols, isKindOf); 5361 5362 // Regenerate InsertPos. 5363 ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos); 5364 } 5365 5366 unsigned size = sizeof(ObjCObjectTypeImpl); 5367 size += typeArgs.size() * sizeof(QualType); 5368 size += protocols.size() * sizeof(ObjCProtocolDecl *); 5369 void *mem = Allocate(size, TypeAlignment); 5370 auto *T = 5371 new (mem) ObjCObjectTypeImpl(canonical, baseType, typeArgs, protocols, 5372 isKindOf); 5373 5374 Types.push_back(T); 5375 ObjCObjectTypes.InsertNode(T, InsertPos); 5376 return QualType(T, 0); 5377 } 5378 5379 /// Apply Objective-C protocol qualifiers to the given type. 5380 /// If this is for the canonical type of a type parameter, we can apply 5381 /// protocol qualifiers on the ObjCObjectPointerType. 5382 QualType 5383 ASTContext::applyObjCProtocolQualifiers(QualType type, 5384 ArrayRef<ObjCProtocolDecl *> protocols, bool &hasError, 5385 bool allowOnPointerType) const { 5386 hasError = false; 5387 5388 if (const auto *objT = dyn_cast<ObjCTypeParamType>(type.getTypePtr())) { 5389 return getObjCTypeParamType(objT->getDecl(), protocols); 5390 } 5391 5392 // Apply protocol qualifiers to ObjCObjectPointerType. 5393 if (allowOnPointerType) { 5394 if (const auto *objPtr = 5395 dyn_cast<ObjCObjectPointerType>(type.getTypePtr())) { 5396 const ObjCObjectType *objT = objPtr->getObjectType(); 5397 // Merge protocol lists and construct ObjCObjectType. 5398 SmallVector<ObjCProtocolDecl*, 8> protocolsVec; 5399 protocolsVec.append(objT->qual_begin(), 5400 objT->qual_end()); 5401 protocolsVec.append(protocols.begin(), protocols.end()); 5402 ArrayRef<ObjCProtocolDecl *> protocols = protocolsVec; 5403 type = getObjCObjectType( 5404 objT->getBaseType(), 5405 objT->getTypeArgsAsWritten(), 5406 protocols, 5407 objT->isKindOfTypeAsWritten()); 5408 return getObjCObjectPointerType(type); 5409 } 5410 } 5411 5412 // Apply protocol qualifiers to ObjCObjectType. 5413 if (const auto *objT = dyn_cast<ObjCObjectType>(type.getTypePtr())){ 5414 // FIXME: Check for protocols to which the class type is already 5415 // known to conform. 5416 5417 return getObjCObjectType(objT->getBaseType(), 5418 objT->getTypeArgsAsWritten(), 5419 protocols, 5420 objT->isKindOfTypeAsWritten()); 5421 } 5422 5423 // If the canonical type is ObjCObjectType, ... 5424 if (type->isObjCObjectType()) { 5425 // Silently overwrite any existing protocol qualifiers. 5426 // TODO: determine whether that's the right thing to do. 5427 5428 // FIXME: Check for protocols to which the class type is already 5429 // known to conform. 5430 return getObjCObjectType(type, {}, protocols, false); 5431 } 5432 5433 // id<protocol-list> 5434 if (type->isObjCIdType()) { 5435 const auto *objPtr = type->castAs<ObjCObjectPointerType>(); 5436 type = getObjCObjectType(ObjCBuiltinIdTy, {}, protocols, 5437 objPtr->isKindOfType()); 5438 return getObjCObjectPointerType(type); 5439 } 5440 5441 // Class<protocol-list> 5442 if (type->isObjCClassType()) { 5443 const auto *objPtr = type->castAs<ObjCObjectPointerType>(); 5444 type = getObjCObjectType(ObjCBuiltinClassTy, {}, protocols, 5445 objPtr->isKindOfType()); 5446 return getObjCObjectPointerType(type); 5447 } 5448 5449 hasError = true; 5450 return type; 5451 } 5452 5453 QualType 5454 ASTContext::getObjCTypeParamType(const ObjCTypeParamDecl *Decl, 5455 ArrayRef<ObjCProtocolDecl *> protocols) const { 5456 // Look in the folding set for an existing type. 5457 llvm::FoldingSetNodeID ID; 5458 ObjCTypeParamType::Profile(ID, Decl, Decl->getUnderlyingType(), protocols); 5459 void *InsertPos = nullptr; 5460 if (ObjCTypeParamType *TypeParam = 5461 ObjCTypeParamTypes.FindNodeOrInsertPos(ID, InsertPos)) 5462 return QualType(TypeParam, 0); 5463 5464 // We canonicalize to the underlying type. 5465 QualType Canonical = getCanonicalType(Decl->getUnderlyingType()); 5466 if (!protocols.empty()) { 5467 // Apply the protocol qualifers. 5468 bool hasError; 5469 Canonical = getCanonicalType(applyObjCProtocolQualifiers( 5470 Canonical, protocols, hasError, true /*allowOnPointerType*/)); 5471 assert(!hasError && "Error when apply protocol qualifier to bound type"); 5472 } 5473 5474 unsigned size = sizeof(ObjCTypeParamType); 5475 size += protocols.size() * sizeof(ObjCProtocolDecl *); 5476 void *mem = Allocate(size, TypeAlignment); 5477 auto *newType = new (mem) ObjCTypeParamType(Decl, Canonical, protocols); 5478 5479 Types.push_back(newType); 5480 ObjCTypeParamTypes.InsertNode(newType, InsertPos); 5481 return QualType(newType, 0); 5482 } 5483 5484 void ASTContext::adjustObjCTypeParamBoundType(const ObjCTypeParamDecl *Orig, 5485 ObjCTypeParamDecl *New) const { 5486 New->setTypeSourceInfo(getTrivialTypeSourceInfo(Orig->getUnderlyingType())); 5487 // Update TypeForDecl after updating TypeSourceInfo. 5488 auto NewTypeParamTy = cast<ObjCTypeParamType>(New->getTypeForDecl()); 5489 SmallVector<ObjCProtocolDecl *, 8> protocols; 5490 protocols.append(NewTypeParamTy->qual_begin(), NewTypeParamTy->qual_end()); 5491 QualType UpdatedTy = getObjCTypeParamType(New, protocols); 5492 New->setTypeForDecl(UpdatedTy.getTypePtr()); 5493 } 5494 5495 /// ObjCObjectAdoptsQTypeProtocols - Checks that protocols in IC's 5496 /// protocol list adopt all protocols in QT's qualified-id protocol 5497 /// list. 5498 bool ASTContext::ObjCObjectAdoptsQTypeProtocols(QualType QT, 5499 ObjCInterfaceDecl *IC) { 5500 if (!QT->isObjCQualifiedIdType()) 5501 return false; 5502 5503 if (const auto *OPT = QT->getAs<ObjCObjectPointerType>()) { 5504 // If both the right and left sides have qualifiers. 5505 for (auto *Proto : OPT->quals()) { 5506 if (!IC->ClassImplementsProtocol(Proto, false)) 5507 return false; 5508 } 5509 return true; 5510 } 5511 return false; 5512 } 5513 5514 /// QIdProtocolsAdoptObjCObjectProtocols - Checks that protocols in 5515 /// QT's qualified-id protocol list adopt all protocols in IDecl's list 5516 /// of protocols. 5517 bool ASTContext::QIdProtocolsAdoptObjCObjectProtocols(QualType QT, 5518 ObjCInterfaceDecl *IDecl) { 5519 if (!QT->isObjCQualifiedIdType()) 5520 return false; 5521 const auto *OPT = QT->getAs<ObjCObjectPointerType>(); 5522 if (!OPT) 5523 return false; 5524 if (!IDecl->hasDefinition()) 5525 return false; 5526 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> InheritedProtocols; 5527 CollectInheritedProtocols(IDecl, InheritedProtocols); 5528 if (InheritedProtocols.empty()) 5529 return false; 5530 // Check that if every protocol in list of id<plist> conforms to a protocol 5531 // of IDecl's, then bridge casting is ok. 5532 bool Conforms = false; 5533 for (auto *Proto : OPT->quals()) { 5534 Conforms = false; 5535 for (auto *PI : InheritedProtocols) { 5536 if (ProtocolCompatibleWithProtocol(Proto, PI)) { 5537 Conforms = true; 5538 break; 5539 } 5540 } 5541 if (!Conforms) 5542 break; 5543 } 5544 if (Conforms) 5545 return true; 5546 5547 for (auto *PI : InheritedProtocols) { 5548 // If both the right and left sides have qualifiers. 5549 bool Adopts = false; 5550 for (auto *Proto : OPT->quals()) { 5551 // return 'true' if 'PI' is in the inheritance hierarchy of Proto 5552 if ((Adopts = ProtocolCompatibleWithProtocol(PI, Proto))) 5553 break; 5554 } 5555 if (!Adopts) 5556 return false; 5557 } 5558 return true; 5559 } 5560 5561 /// getObjCObjectPointerType - Return a ObjCObjectPointerType type for 5562 /// the given object type. 5563 QualType ASTContext::getObjCObjectPointerType(QualType ObjectT) const { 5564 llvm::FoldingSetNodeID ID; 5565 ObjCObjectPointerType::Profile(ID, ObjectT); 5566 5567 void *InsertPos = nullptr; 5568 if (ObjCObjectPointerType *QT = 5569 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 5570 return QualType(QT, 0); 5571 5572 // Find the canonical object type. 5573 QualType Canonical; 5574 if (!ObjectT.isCanonical()) { 5575 Canonical = getObjCObjectPointerType(getCanonicalType(ObjectT)); 5576 5577 // Regenerate InsertPos. 5578 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 5579 } 5580 5581 // No match. 5582 void *Mem = Allocate(sizeof(ObjCObjectPointerType), TypeAlignment); 5583 auto *QType = 5584 new (Mem) ObjCObjectPointerType(Canonical, ObjectT); 5585 5586 Types.push_back(QType); 5587 ObjCObjectPointerTypes.InsertNode(QType, InsertPos); 5588 return QualType(QType, 0); 5589 } 5590 5591 /// getObjCInterfaceType - Return the unique reference to the type for the 5592 /// specified ObjC interface decl. The list of protocols is optional. 5593 QualType ASTContext::getObjCInterfaceType(const ObjCInterfaceDecl *Decl, 5594 ObjCInterfaceDecl *PrevDecl) const { 5595 if (Decl->TypeForDecl) 5596 return QualType(Decl->TypeForDecl, 0); 5597 5598 if (PrevDecl) { 5599 assert(PrevDecl->TypeForDecl && "previous decl has no TypeForDecl"); 5600 Decl->TypeForDecl = PrevDecl->TypeForDecl; 5601 return QualType(PrevDecl->TypeForDecl, 0); 5602 } 5603 5604 // Prefer the definition, if there is one. 5605 if (const ObjCInterfaceDecl *Def = Decl->getDefinition()) 5606 Decl = Def; 5607 5608 void *Mem = Allocate(sizeof(ObjCInterfaceType), TypeAlignment); 5609 auto *T = new (Mem) ObjCInterfaceType(Decl); 5610 Decl->TypeForDecl = T; 5611 Types.push_back(T); 5612 return QualType(T, 0); 5613 } 5614 5615 /// getTypeOfExprType - Unlike many "get<Type>" functions, we can't unique 5616 /// TypeOfExprType AST's (since expression's are never shared). For example, 5617 /// multiple declarations that refer to "typeof(x)" all contain different 5618 /// DeclRefExpr's. This doesn't effect the type checker, since it operates 5619 /// on canonical type's (which are always unique). 5620 QualType ASTContext::getTypeOfExprType(Expr *tofExpr, TypeOfKind Kind) const { 5621 TypeOfExprType *toe; 5622 if (tofExpr->isTypeDependent()) { 5623 llvm::FoldingSetNodeID ID; 5624 DependentTypeOfExprType::Profile(ID, *this, tofExpr, 5625 Kind == TypeOfKind::Unqualified); 5626 5627 void *InsertPos = nullptr; 5628 DependentTypeOfExprType *Canon = 5629 DependentTypeOfExprTypes.FindNodeOrInsertPos(ID, InsertPos); 5630 if (Canon) { 5631 // We already have a "canonical" version of an identical, dependent 5632 // typeof(expr) type. Use that as our canonical type. 5633 toe = new (*this, TypeAlignment) 5634 TypeOfExprType(tofExpr, Kind, QualType((TypeOfExprType *)Canon, 0)); 5635 } else { 5636 // Build a new, canonical typeof(expr) type. 5637 Canon = new (*this, TypeAlignment) 5638 DependentTypeOfExprType(*this, tofExpr, Kind); 5639 DependentTypeOfExprTypes.InsertNode(Canon, InsertPos); 5640 toe = Canon; 5641 } 5642 } else { 5643 QualType Canonical = getCanonicalType(tofExpr->getType()); 5644 toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr, Kind, Canonical); 5645 } 5646 Types.push_back(toe); 5647 return QualType(toe, 0); 5648 } 5649 5650 /// getTypeOfType - Unlike many "get<Type>" functions, we don't unique 5651 /// TypeOfType nodes. The only motivation to unique these nodes would be 5652 /// memory savings. Since typeof(t) is fairly uncommon, space shouldn't be 5653 /// an issue. This doesn't affect the type checker, since it operates 5654 /// on canonical types (which are always unique). 5655 QualType ASTContext::getTypeOfType(QualType tofType, TypeOfKind Kind) const { 5656 QualType Canonical = getCanonicalType(tofType); 5657 auto *tot = 5658 new (*this, TypeAlignment) TypeOfType(tofType, Canonical, Kind); 5659 Types.push_back(tot); 5660 return QualType(tot, 0); 5661 } 5662 5663 /// getReferenceQualifiedType - Given an expr, will return the type for 5664 /// that expression, as in [dcl.type.simple]p4 but without taking id-expressions 5665 /// and class member access into account. 5666 QualType ASTContext::getReferenceQualifiedType(const Expr *E) const { 5667 // C++11 [dcl.type.simple]p4: 5668 // [...] 5669 QualType T = E->getType(); 5670 switch (E->getValueKind()) { 5671 // - otherwise, if e is an xvalue, decltype(e) is T&&, where T is the 5672 // type of e; 5673 case VK_XValue: 5674 return getRValueReferenceType(T); 5675 // - otherwise, if e is an lvalue, decltype(e) is T&, where T is the 5676 // type of e; 5677 case VK_LValue: 5678 return getLValueReferenceType(T); 5679 // - otherwise, decltype(e) is the type of e. 5680 case VK_PRValue: 5681 return T; 5682 } 5683 llvm_unreachable("Unknown value kind"); 5684 } 5685 5686 /// Unlike many "get<Type>" functions, we don't unique DecltypeType 5687 /// nodes. This would never be helpful, since each such type has its own 5688 /// expression, and would not give a significant memory saving, since there 5689 /// is an Expr tree under each such type. 5690 QualType ASTContext::getDecltypeType(Expr *e, QualType UnderlyingType) const { 5691 DecltypeType *dt; 5692 5693 // C++11 [temp.type]p2: 5694 // If an expression e involves a template parameter, decltype(e) denotes a 5695 // unique dependent type. Two such decltype-specifiers refer to the same 5696 // type only if their expressions are equivalent (14.5.6.1). 5697 if (e->isInstantiationDependent()) { 5698 llvm::FoldingSetNodeID ID; 5699 DependentDecltypeType::Profile(ID, *this, e); 5700 5701 void *InsertPos = nullptr; 5702 DependentDecltypeType *Canon 5703 = DependentDecltypeTypes.FindNodeOrInsertPos(ID, InsertPos); 5704 if (!Canon) { 5705 // Build a new, canonical decltype(expr) type. 5706 Canon = new (*this, TypeAlignment) DependentDecltypeType(*this, e); 5707 DependentDecltypeTypes.InsertNode(Canon, InsertPos); 5708 } 5709 dt = new (*this, TypeAlignment) 5710 DecltypeType(e, UnderlyingType, QualType((DecltypeType *)Canon, 0)); 5711 } else { 5712 dt = new (*this, TypeAlignment) 5713 DecltypeType(e, UnderlyingType, getCanonicalType(UnderlyingType)); 5714 } 5715 Types.push_back(dt); 5716 return QualType(dt, 0); 5717 } 5718 5719 /// getUnaryTransformationType - We don't unique these, since the memory 5720 /// savings are minimal and these are rare. 5721 QualType ASTContext::getUnaryTransformType(QualType BaseType, 5722 QualType UnderlyingType, 5723 UnaryTransformType::UTTKind Kind) 5724 const { 5725 UnaryTransformType *ut = nullptr; 5726 5727 if (BaseType->isDependentType()) { 5728 // Look in the folding set for an existing type. 5729 llvm::FoldingSetNodeID ID; 5730 DependentUnaryTransformType::Profile(ID, getCanonicalType(BaseType), Kind); 5731 5732 void *InsertPos = nullptr; 5733 DependentUnaryTransformType *Canon 5734 = DependentUnaryTransformTypes.FindNodeOrInsertPos(ID, InsertPos); 5735 5736 if (!Canon) { 5737 // Build a new, canonical __underlying_type(type) type. 5738 Canon = new (*this, TypeAlignment) 5739 DependentUnaryTransformType(*this, getCanonicalType(BaseType), 5740 Kind); 5741 DependentUnaryTransformTypes.InsertNode(Canon, InsertPos); 5742 } 5743 ut = new (*this, TypeAlignment) UnaryTransformType (BaseType, 5744 QualType(), Kind, 5745 QualType(Canon, 0)); 5746 } else { 5747 QualType CanonType = getCanonicalType(UnderlyingType); 5748 ut = new (*this, TypeAlignment) UnaryTransformType (BaseType, 5749 UnderlyingType, Kind, 5750 CanonType); 5751 } 5752 Types.push_back(ut); 5753 return QualType(ut, 0); 5754 } 5755 5756 QualType ASTContext::getAutoTypeInternal( 5757 QualType DeducedType, AutoTypeKeyword Keyword, bool IsDependent, 5758 bool IsPack, ConceptDecl *TypeConstraintConcept, 5759 ArrayRef<TemplateArgument> TypeConstraintArgs, bool IsCanon) const { 5760 if (DeducedType.isNull() && Keyword == AutoTypeKeyword::Auto && 5761 !TypeConstraintConcept && !IsDependent) 5762 return getAutoDeductType(); 5763 5764 // Look in the folding set for an existing type. 5765 void *InsertPos = nullptr; 5766 llvm::FoldingSetNodeID ID; 5767 AutoType::Profile(ID, *this, DeducedType, Keyword, IsDependent, 5768 TypeConstraintConcept, TypeConstraintArgs); 5769 if (AutoType *AT = AutoTypes.FindNodeOrInsertPos(ID, InsertPos)) 5770 return QualType(AT, 0); 5771 5772 QualType Canon; 5773 if (!IsCanon) { 5774 if (!DeducedType.isNull()) { 5775 Canon = DeducedType.getCanonicalType(); 5776 } else if (TypeConstraintConcept) { 5777 Canon = getAutoTypeInternal(QualType(), Keyword, IsDependent, IsPack, 5778 nullptr, {}, true); 5779 // Find the insert position again. 5780 [[maybe_unused]] auto *Nothing = 5781 AutoTypes.FindNodeOrInsertPos(ID, InsertPos); 5782 assert(!Nothing && "canonical type broken"); 5783 } 5784 } 5785 5786 void *Mem = Allocate(sizeof(AutoType) + 5787 sizeof(TemplateArgument) * TypeConstraintArgs.size(), 5788 TypeAlignment); 5789 auto *AT = new (Mem) AutoType( 5790 DeducedType, Keyword, 5791 (IsDependent ? TypeDependence::DependentInstantiation 5792 : TypeDependence::None) | 5793 (IsPack ? TypeDependence::UnexpandedPack : TypeDependence::None), 5794 Canon, TypeConstraintConcept, TypeConstraintArgs); 5795 Types.push_back(AT); 5796 AutoTypes.InsertNode(AT, InsertPos); 5797 return QualType(AT, 0); 5798 } 5799 5800 /// getAutoType - Return the uniqued reference to the 'auto' type which has been 5801 /// deduced to the given type, or to the canonical undeduced 'auto' type, or the 5802 /// canonical deduced-but-dependent 'auto' type. 5803 QualType 5804 ASTContext::getAutoType(QualType DeducedType, AutoTypeKeyword Keyword, 5805 bool IsDependent, bool IsPack, 5806 ConceptDecl *TypeConstraintConcept, 5807 ArrayRef<TemplateArgument> TypeConstraintArgs) const { 5808 assert((!IsPack || IsDependent) && "only use IsPack for a dependent pack"); 5809 assert((!IsDependent || DeducedType.isNull()) && 5810 "A dependent auto should be undeduced"); 5811 return getAutoTypeInternal(DeducedType, Keyword, IsDependent, IsPack, 5812 TypeConstraintConcept, TypeConstraintArgs); 5813 } 5814 5815 /// Return the uniqued reference to the deduced template specialization type 5816 /// which has been deduced to the given type, or to the canonical undeduced 5817 /// such type, or the canonical deduced-but-dependent such type. 5818 QualType ASTContext::getDeducedTemplateSpecializationType( 5819 TemplateName Template, QualType DeducedType, bool IsDependent) const { 5820 // Look in the folding set for an existing type. 5821 void *InsertPos = nullptr; 5822 llvm::FoldingSetNodeID ID; 5823 DeducedTemplateSpecializationType::Profile(ID, Template, DeducedType, 5824 IsDependent); 5825 if (DeducedTemplateSpecializationType *DTST = 5826 DeducedTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos)) 5827 return QualType(DTST, 0); 5828 5829 auto *DTST = new (*this, TypeAlignment) 5830 DeducedTemplateSpecializationType(Template, DeducedType, IsDependent); 5831 llvm::FoldingSetNodeID TempID; 5832 DTST->Profile(TempID); 5833 assert(ID == TempID && "ID does not match"); 5834 Types.push_back(DTST); 5835 DeducedTemplateSpecializationTypes.InsertNode(DTST, InsertPos); 5836 return QualType(DTST, 0); 5837 } 5838 5839 /// getAtomicType - Return the uniqued reference to the atomic type for 5840 /// the given value type. 5841 QualType ASTContext::getAtomicType(QualType T) const { 5842 // Unique pointers, to guarantee there is only one pointer of a particular 5843 // structure. 5844 llvm::FoldingSetNodeID ID; 5845 AtomicType::Profile(ID, T); 5846 5847 void *InsertPos = nullptr; 5848 if (AtomicType *AT = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos)) 5849 return QualType(AT, 0); 5850 5851 // If the atomic value type isn't canonical, this won't be a canonical type 5852 // either, so fill in the canonical type field. 5853 QualType Canonical; 5854 if (!T.isCanonical()) { 5855 Canonical = getAtomicType(getCanonicalType(T)); 5856 5857 // Get the new insert position for the node we care about. 5858 AtomicType *NewIP = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos); 5859 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 5860 } 5861 auto *New = new (*this, TypeAlignment) AtomicType(T, Canonical); 5862 Types.push_back(New); 5863 AtomicTypes.InsertNode(New, InsertPos); 5864 return QualType(New, 0); 5865 } 5866 5867 /// getAutoDeductType - Get type pattern for deducing against 'auto'. 5868 QualType ASTContext::getAutoDeductType() const { 5869 if (AutoDeductTy.isNull()) 5870 AutoDeductTy = QualType(new (*this, TypeAlignment) 5871 AutoType(QualType(), AutoTypeKeyword::Auto, 5872 TypeDependence::None, QualType(), 5873 /*concept*/ nullptr, /*args*/ {}), 5874 0); 5875 return AutoDeductTy; 5876 } 5877 5878 /// getAutoRRefDeductType - Get type pattern for deducing against 'auto &&'. 5879 QualType ASTContext::getAutoRRefDeductType() const { 5880 if (AutoRRefDeductTy.isNull()) 5881 AutoRRefDeductTy = getRValueReferenceType(getAutoDeductType()); 5882 assert(!AutoRRefDeductTy.isNull() && "can't build 'auto &&' pattern"); 5883 return AutoRRefDeductTy; 5884 } 5885 5886 /// getTagDeclType - Return the unique reference to the type for the 5887 /// specified TagDecl (struct/union/class/enum) decl. 5888 QualType ASTContext::getTagDeclType(const TagDecl *Decl) const { 5889 assert(Decl); 5890 // FIXME: What is the design on getTagDeclType when it requires casting 5891 // away const? mutable? 5892 return getTypeDeclType(const_cast<TagDecl*>(Decl)); 5893 } 5894 5895 /// getSizeType - Return the unique type for "size_t" (C99 7.17), the result 5896 /// of the sizeof operator (C99 6.5.3.4p4). The value is target dependent and 5897 /// needs to agree with the definition in <stddef.h>. 5898 CanQualType ASTContext::getSizeType() const { 5899 return getFromTargetType(Target->getSizeType()); 5900 } 5901 5902 /// Return the unique signed counterpart of the integer type 5903 /// corresponding to size_t. 5904 CanQualType ASTContext::getSignedSizeType() const { 5905 return getFromTargetType(Target->getSignedSizeType()); 5906 } 5907 5908 /// getIntMaxType - Return the unique type for "intmax_t" (C99 7.18.1.5). 5909 CanQualType ASTContext::getIntMaxType() const { 5910 return getFromTargetType(Target->getIntMaxType()); 5911 } 5912 5913 /// getUIntMaxType - Return the unique type for "uintmax_t" (C99 7.18.1.5). 5914 CanQualType ASTContext::getUIntMaxType() const { 5915 return getFromTargetType(Target->getUIntMaxType()); 5916 } 5917 5918 /// getSignedWCharType - Return the type of "signed wchar_t". 5919 /// Used when in C++, as a GCC extension. 5920 QualType ASTContext::getSignedWCharType() const { 5921 // FIXME: derive from "Target" ? 5922 return WCharTy; 5923 } 5924 5925 /// getUnsignedWCharType - Return the type of "unsigned wchar_t". 5926 /// Used when in C++, as a GCC extension. 5927 QualType ASTContext::getUnsignedWCharType() const { 5928 // FIXME: derive from "Target" ? 5929 return UnsignedIntTy; 5930 } 5931 5932 QualType ASTContext::getIntPtrType() const { 5933 return getFromTargetType(Target->getIntPtrType()); 5934 } 5935 5936 QualType ASTContext::getUIntPtrType() const { 5937 return getCorrespondingUnsignedType(getIntPtrType()); 5938 } 5939 5940 /// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17) 5941 /// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9). 5942 QualType ASTContext::getPointerDiffType() const { 5943 return getFromTargetType(Target->getPtrDiffType(LangAS::Default)); 5944 } 5945 5946 /// Return the unique unsigned counterpart of "ptrdiff_t" 5947 /// integer type. The standard (C11 7.21.6.1p7) refers to this type 5948 /// in the definition of %tu format specifier. 5949 QualType ASTContext::getUnsignedPointerDiffType() const { 5950 return getFromTargetType(Target->getUnsignedPtrDiffType(LangAS::Default)); 5951 } 5952 5953 /// Return the unique type for "pid_t" defined in 5954 /// <sys/types.h>. We need this to compute the correct type for vfork(). 5955 QualType ASTContext::getProcessIDType() const { 5956 return getFromTargetType(Target->getProcessIDType()); 5957 } 5958 5959 //===----------------------------------------------------------------------===// 5960 // Type Operators 5961 //===----------------------------------------------------------------------===// 5962 5963 CanQualType ASTContext::getCanonicalParamType(QualType T) const { 5964 // Push qualifiers into arrays, and then discard any remaining 5965 // qualifiers. 5966 T = getCanonicalType(T); 5967 T = getVariableArrayDecayedType(T); 5968 const Type *Ty = T.getTypePtr(); 5969 QualType Result; 5970 if (isa<ArrayType>(Ty)) { 5971 Result = getArrayDecayedType(QualType(Ty,0)); 5972 } else if (isa<FunctionType>(Ty)) { 5973 Result = getPointerType(QualType(Ty, 0)); 5974 } else { 5975 Result = QualType(Ty, 0); 5976 } 5977 5978 return CanQualType::CreateUnsafe(Result); 5979 } 5980 5981 QualType ASTContext::getUnqualifiedArrayType(QualType type, 5982 Qualifiers &quals) { 5983 SplitQualType splitType = type.getSplitUnqualifiedType(); 5984 5985 // FIXME: getSplitUnqualifiedType() actually walks all the way to 5986 // the unqualified desugared type and then drops it on the floor. 5987 // We then have to strip that sugar back off with 5988 // getUnqualifiedDesugaredType(), which is silly. 5989 const auto *AT = 5990 dyn_cast<ArrayType>(splitType.Ty->getUnqualifiedDesugaredType()); 5991 5992 // If we don't have an array, just use the results in splitType. 5993 if (!AT) { 5994 quals = splitType.Quals; 5995 return QualType(splitType.Ty, 0); 5996 } 5997 5998 // Otherwise, recurse on the array's element type. 5999 QualType elementType = AT->getElementType(); 6000 QualType unqualElementType = getUnqualifiedArrayType(elementType, quals); 6001 6002 // If that didn't change the element type, AT has no qualifiers, so we 6003 // can just use the results in splitType. 6004 if (elementType == unqualElementType) { 6005 assert(quals.empty()); // from the recursive call 6006 quals = splitType.Quals; 6007 return QualType(splitType.Ty, 0); 6008 } 6009 6010 // Otherwise, add in the qualifiers from the outermost type, then 6011 // build the type back up. 6012 quals.addConsistentQualifiers(splitType.Quals); 6013 6014 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) { 6015 return getConstantArrayType(unqualElementType, CAT->getSize(), 6016 CAT->getSizeExpr(), CAT->getSizeModifier(), 0); 6017 } 6018 6019 if (const auto *IAT = dyn_cast<IncompleteArrayType>(AT)) { 6020 return getIncompleteArrayType(unqualElementType, IAT->getSizeModifier(), 0); 6021 } 6022 6023 if (const auto *VAT = dyn_cast<VariableArrayType>(AT)) { 6024 return getVariableArrayType(unqualElementType, 6025 VAT->getSizeExpr(), 6026 VAT->getSizeModifier(), 6027 VAT->getIndexTypeCVRQualifiers(), 6028 VAT->getBracketsRange()); 6029 } 6030 6031 const auto *DSAT = cast<DependentSizedArrayType>(AT); 6032 return getDependentSizedArrayType(unqualElementType, DSAT->getSizeExpr(), 6033 DSAT->getSizeModifier(), 0, 6034 SourceRange()); 6035 } 6036 6037 /// Attempt to unwrap two types that may both be array types with the same bound 6038 /// (or both be array types of unknown bound) for the purpose of comparing the 6039 /// cv-decomposition of two types per C++ [conv.qual]. 6040 /// 6041 /// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in 6042 /// C++20 [conv.qual], if permitted by the current language mode. 6043 void ASTContext::UnwrapSimilarArrayTypes(QualType &T1, QualType &T2, 6044 bool AllowPiMismatch) { 6045 while (true) { 6046 auto *AT1 = getAsArrayType(T1); 6047 if (!AT1) 6048 return; 6049 6050 auto *AT2 = getAsArrayType(T2); 6051 if (!AT2) 6052 return; 6053 6054 // If we don't have two array types with the same constant bound nor two 6055 // incomplete array types, we've unwrapped everything we can. 6056 // C++20 also permits one type to be a constant array type and the other 6057 // to be an incomplete array type. 6058 // FIXME: Consider also unwrapping array of unknown bound and VLA. 6059 if (auto *CAT1 = dyn_cast<ConstantArrayType>(AT1)) { 6060 auto *CAT2 = dyn_cast<ConstantArrayType>(AT2); 6061 if (!((CAT2 && CAT1->getSize() == CAT2->getSize()) || 6062 (AllowPiMismatch && getLangOpts().CPlusPlus20 && 6063 isa<IncompleteArrayType>(AT2)))) 6064 return; 6065 } else if (isa<IncompleteArrayType>(AT1)) { 6066 if (!(isa<IncompleteArrayType>(AT2) || 6067 (AllowPiMismatch && getLangOpts().CPlusPlus20 && 6068 isa<ConstantArrayType>(AT2)))) 6069 return; 6070 } else { 6071 return; 6072 } 6073 6074 T1 = AT1->getElementType(); 6075 T2 = AT2->getElementType(); 6076 } 6077 } 6078 6079 /// Attempt to unwrap two types that may be similar (C++ [conv.qual]). 6080 /// 6081 /// If T1 and T2 are both pointer types of the same kind, or both array types 6082 /// with the same bound, unwraps layers from T1 and T2 until a pointer type is 6083 /// unwrapped. Top-level qualifiers on T1 and T2 are ignored. 6084 /// 6085 /// This function will typically be called in a loop that successively 6086 /// "unwraps" pointer and pointer-to-member types to compare them at each 6087 /// level. 6088 /// 6089 /// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in 6090 /// C++20 [conv.qual], if permitted by the current language mode. 6091 /// 6092 /// \return \c true if a pointer type was unwrapped, \c false if we reached a 6093 /// pair of types that can't be unwrapped further. 6094 bool ASTContext::UnwrapSimilarTypes(QualType &T1, QualType &T2, 6095 bool AllowPiMismatch) { 6096 UnwrapSimilarArrayTypes(T1, T2, AllowPiMismatch); 6097 6098 const auto *T1PtrType = T1->getAs<PointerType>(); 6099 const auto *T2PtrType = T2->getAs<PointerType>(); 6100 if (T1PtrType && T2PtrType) { 6101 T1 = T1PtrType->getPointeeType(); 6102 T2 = T2PtrType->getPointeeType(); 6103 return true; 6104 } 6105 6106 const auto *T1MPType = T1->getAs<MemberPointerType>(); 6107 const auto *T2MPType = T2->getAs<MemberPointerType>(); 6108 if (T1MPType && T2MPType && 6109 hasSameUnqualifiedType(QualType(T1MPType->getClass(), 0), 6110 QualType(T2MPType->getClass(), 0))) { 6111 T1 = T1MPType->getPointeeType(); 6112 T2 = T2MPType->getPointeeType(); 6113 return true; 6114 } 6115 6116 if (getLangOpts().ObjC) { 6117 const auto *T1OPType = T1->getAs<ObjCObjectPointerType>(); 6118 const auto *T2OPType = T2->getAs<ObjCObjectPointerType>(); 6119 if (T1OPType && T2OPType) { 6120 T1 = T1OPType->getPointeeType(); 6121 T2 = T2OPType->getPointeeType(); 6122 return true; 6123 } 6124 } 6125 6126 // FIXME: Block pointers, too? 6127 6128 return false; 6129 } 6130 6131 bool ASTContext::hasSimilarType(QualType T1, QualType T2) { 6132 while (true) { 6133 Qualifiers Quals; 6134 T1 = getUnqualifiedArrayType(T1, Quals); 6135 T2 = getUnqualifiedArrayType(T2, Quals); 6136 if (hasSameType(T1, T2)) 6137 return true; 6138 if (!UnwrapSimilarTypes(T1, T2)) 6139 return false; 6140 } 6141 } 6142 6143 bool ASTContext::hasCvrSimilarType(QualType T1, QualType T2) { 6144 while (true) { 6145 Qualifiers Quals1, Quals2; 6146 T1 = getUnqualifiedArrayType(T1, Quals1); 6147 T2 = getUnqualifiedArrayType(T2, Quals2); 6148 6149 Quals1.removeCVRQualifiers(); 6150 Quals2.removeCVRQualifiers(); 6151 if (Quals1 != Quals2) 6152 return false; 6153 6154 if (hasSameType(T1, T2)) 6155 return true; 6156 6157 if (!UnwrapSimilarTypes(T1, T2, /*AllowPiMismatch*/ false)) 6158 return false; 6159 } 6160 } 6161 6162 DeclarationNameInfo 6163 ASTContext::getNameForTemplate(TemplateName Name, 6164 SourceLocation NameLoc) const { 6165 switch (Name.getKind()) { 6166 case TemplateName::QualifiedTemplate: 6167 case TemplateName::Template: 6168 // DNInfo work in progress: CHECKME: what about DNLoc? 6169 return DeclarationNameInfo(Name.getAsTemplateDecl()->getDeclName(), 6170 NameLoc); 6171 6172 case TemplateName::OverloadedTemplate: { 6173 OverloadedTemplateStorage *Storage = Name.getAsOverloadedTemplate(); 6174 // DNInfo work in progress: CHECKME: what about DNLoc? 6175 return DeclarationNameInfo((*Storage->begin())->getDeclName(), NameLoc); 6176 } 6177 6178 case TemplateName::AssumedTemplate: { 6179 AssumedTemplateStorage *Storage = Name.getAsAssumedTemplateName(); 6180 return DeclarationNameInfo(Storage->getDeclName(), NameLoc); 6181 } 6182 6183 case TemplateName::DependentTemplate: { 6184 DependentTemplateName *DTN = Name.getAsDependentTemplateName(); 6185 DeclarationName DName; 6186 if (DTN->isIdentifier()) { 6187 DName = DeclarationNames.getIdentifier(DTN->getIdentifier()); 6188 return DeclarationNameInfo(DName, NameLoc); 6189 } else { 6190 DName = DeclarationNames.getCXXOperatorName(DTN->getOperator()); 6191 // DNInfo work in progress: FIXME: source locations? 6192 DeclarationNameLoc DNLoc = 6193 DeclarationNameLoc::makeCXXOperatorNameLoc(SourceRange()); 6194 return DeclarationNameInfo(DName, NameLoc, DNLoc); 6195 } 6196 } 6197 6198 case TemplateName::SubstTemplateTemplateParm: { 6199 SubstTemplateTemplateParmStorage *subst 6200 = Name.getAsSubstTemplateTemplateParm(); 6201 return DeclarationNameInfo(subst->getParameter()->getDeclName(), 6202 NameLoc); 6203 } 6204 6205 case TemplateName::SubstTemplateTemplateParmPack: { 6206 SubstTemplateTemplateParmPackStorage *subst 6207 = Name.getAsSubstTemplateTemplateParmPack(); 6208 return DeclarationNameInfo(subst->getParameterPack()->getDeclName(), 6209 NameLoc); 6210 } 6211 case TemplateName::UsingTemplate: 6212 return DeclarationNameInfo(Name.getAsUsingShadowDecl()->getDeclName(), 6213 NameLoc); 6214 } 6215 6216 llvm_unreachable("bad template name kind!"); 6217 } 6218 6219 TemplateName 6220 ASTContext::getCanonicalTemplateName(const TemplateName &Name) const { 6221 switch (Name.getKind()) { 6222 case TemplateName::UsingTemplate: 6223 case TemplateName::QualifiedTemplate: 6224 case TemplateName::Template: { 6225 TemplateDecl *Template = Name.getAsTemplateDecl(); 6226 if (auto *TTP = dyn_cast<TemplateTemplateParmDecl>(Template)) 6227 Template = getCanonicalTemplateTemplateParmDecl(TTP); 6228 6229 // The canonical template name is the canonical template declaration. 6230 return TemplateName(cast<TemplateDecl>(Template->getCanonicalDecl())); 6231 } 6232 6233 case TemplateName::OverloadedTemplate: 6234 case TemplateName::AssumedTemplate: 6235 llvm_unreachable("cannot canonicalize unresolved template"); 6236 6237 case TemplateName::DependentTemplate: { 6238 DependentTemplateName *DTN = Name.getAsDependentTemplateName(); 6239 assert(DTN && "Non-dependent template names must refer to template decls."); 6240 return DTN->CanonicalTemplateName; 6241 } 6242 6243 case TemplateName::SubstTemplateTemplateParm: { 6244 SubstTemplateTemplateParmStorage *subst 6245 = Name.getAsSubstTemplateTemplateParm(); 6246 return getCanonicalTemplateName(subst->getReplacement()); 6247 } 6248 6249 case TemplateName::SubstTemplateTemplateParmPack: { 6250 SubstTemplateTemplateParmPackStorage *subst = 6251 Name.getAsSubstTemplateTemplateParmPack(); 6252 TemplateArgument canonArgPack = 6253 getCanonicalTemplateArgument(subst->getArgumentPack()); 6254 return getSubstTemplateTemplateParmPack( 6255 canonArgPack, subst->getAssociatedDecl()->getCanonicalDecl(), 6256 subst->getFinal(), subst->getIndex()); 6257 } 6258 } 6259 6260 llvm_unreachable("bad template name!"); 6261 } 6262 6263 bool ASTContext::hasSameTemplateName(const TemplateName &X, 6264 const TemplateName &Y) const { 6265 return getCanonicalTemplateName(X).getAsVoidPointer() == 6266 getCanonicalTemplateName(Y).getAsVoidPointer(); 6267 } 6268 6269 bool ASTContext::isSameConstraintExpr(const Expr *XCE, const Expr *YCE) const { 6270 if (!XCE != !YCE) 6271 return false; 6272 6273 if (!XCE) 6274 return true; 6275 6276 llvm::FoldingSetNodeID XCEID, YCEID; 6277 XCE->Profile(XCEID, *this, /*Canonical=*/true); 6278 YCE->Profile(YCEID, *this, /*Canonical=*/true); 6279 return XCEID == YCEID; 6280 } 6281 6282 bool ASTContext::isSameTypeConstraint(const TypeConstraint *XTC, 6283 const TypeConstraint *YTC) const { 6284 if (!XTC != !YTC) 6285 return false; 6286 6287 if (!XTC) 6288 return true; 6289 6290 auto *NCX = XTC->getNamedConcept(); 6291 auto *NCY = YTC->getNamedConcept(); 6292 if (!NCX || !NCY || !isSameEntity(NCX, NCY)) 6293 return false; 6294 if (XTC->hasExplicitTemplateArgs() != YTC->hasExplicitTemplateArgs()) 6295 return false; 6296 if (XTC->hasExplicitTemplateArgs()) 6297 if (XTC->getTemplateArgsAsWritten()->NumTemplateArgs != 6298 YTC->getTemplateArgsAsWritten()->NumTemplateArgs) 6299 return false; 6300 6301 // Compare slowly by profiling. 6302 // 6303 // We couldn't compare the profiling result for the template 6304 // args here. Consider the following example in different modules: 6305 // 6306 // template <__integer_like _Tp, C<_Tp> Sentinel> 6307 // constexpr _Tp operator()(_Tp &&__t, Sentinel &&last) const { 6308 // return __t; 6309 // } 6310 // 6311 // When we compare the profiling result for `C<_Tp>` in different 6312 // modules, it will compare the type of `_Tp` in different modules. 6313 // However, the type of `_Tp` in different modules refer to different 6314 // types here naturally. So we couldn't compare the profiling result 6315 // for the template args directly. 6316 return isSameConstraintExpr(XTC->getImmediatelyDeclaredConstraint(), 6317 YTC->getImmediatelyDeclaredConstraint()); 6318 } 6319 6320 bool ASTContext::isSameTemplateParameter(const NamedDecl *X, 6321 const NamedDecl *Y) const { 6322 if (X->getKind() != Y->getKind()) 6323 return false; 6324 6325 if (auto *TX = dyn_cast<TemplateTypeParmDecl>(X)) { 6326 auto *TY = cast<TemplateTypeParmDecl>(Y); 6327 if (TX->isParameterPack() != TY->isParameterPack()) 6328 return false; 6329 if (TX->hasTypeConstraint() != TY->hasTypeConstraint()) 6330 return false; 6331 return isSameTypeConstraint(TX->getTypeConstraint(), 6332 TY->getTypeConstraint()); 6333 } 6334 6335 if (auto *TX = dyn_cast<NonTypeTemplateParmDecl>(X)) { 6336 auto *TY = cast<NonTypeTemplateParmDecl>(Y); 6337 return TX->isParameterPack() == TY->isParameterPack() && 6338 TX->getASTContext().hasSameType(TX->getType(), TY->getType()) && 6339 isSameConstraintExpr(TX->getPlaceholderTypeConstraint(), 6340 TY->getPlaceholderTypeConstraint()); 6341 } 6342 6343 auto *TX = cast<TemplateTemplateParmDecl>(X); 6344 auto *TY = cast<TemplateTemplateParmDecl>(Y); 6345 return TX->isParameterPack() == TY->isParameterPack() && 6346 isSameTemplateParameterList(TX->getTemplateParameters(), 6347 TY->getTemplateParameters()); 6348 } 6349 6350 bool ASTContext::isSameTemplateParameterList( 6351 const TemplateParameterList *X, const TemplateParameterList *Y) const { 6352 if (X->size() != Y->size()) 6353 return false; 6354 6355 for (unsigned I = 0, N = X->size(); I != N; ++I) 6356 if (!isSameTemplateParameter(X->getParam(I), Y->getParam(I))) 6357 return false; 6358 6359 return isSameConstraintExpr(X->getRequiresClause(), Y->getRequiresClause()); 6360 } 6361 6362 bool ASTContext::isSameDefaultTemplateArgument(const NamedDecl *X, 6363 const NamedDecl *Y) const { 6364 // If the type parameter isn't the same already, we don't need to check the 6365 // default argument further. 6366 if (!isSameTemplateParameter(X, Y)) 6367 return false; 6368 6369 if (auto *TTPX = dyn_cast<TemplateTypeParmDecl>(X)) { 6370 auto *TTPY = cast<TemplateTypeParmDecl>(Y); 6371 if (!TTPX->hasDefaultArgument() || !TTPY->hasDefaultArgument()) 6372 return false; 6373 6374 return hasSameType(TTPX->getDefaultArgument(), TTPY->getDefaultArgument()); 6375 } 6376 6377 if (auto *NTTPX = dyn_cast<NonTypeTemplateParmDecl>(X)) { 6378 auto *NTTPY = cast<NonTypeTemplateParmDecl>(Y); 6379 if (!NTTPX->hasDefaultArgument() || !NTTPY->hasDefaultArgument()) 6380 return false; 6381 6382 Expr *DefaultArgumentX = NTTPX->getDefaultArgument()->IgnoreImpCasts(); 6383 Expr *DefaultArgumentY = NTTPY->getDefaultArgument()->IgnoreImpCasts(); 6384 llvm::FoldingSetNodeID XID, YID; 6385 DefaultArgumentX->Profile(XID, *this, /*Canonical=*/true); 6386 DefaultArgumentY->Profile(YID, *this, /*Canonical=*/true); 6387 return XID == YID; 6388 } 6389 6390 auto *TTPX = cast<TemplateTemplateParmDecl>(X); 6391 auto *TTPY = cast<TemplateTemplateParmDecl>(Y); 6392 6393 if (!TTPX->hasDefaultArgument() || !TTPY->hasDefaultArgument()) 6394 return false; 6395 6396 const TemplateArgument &TAX = TTPX->getDefaultArgument().getArgument(); 6397 const TemplateArgument &TAY = TTPY->getDefaultArgument().getArgument(); 6398 return hasSameTemplateName(TAX.getAsTemplate(), TAY.getAsTemplate()); 6399 } 6400 6401 static NamespaceDecl *getNamespace(const NestedNameSpecifier *X) { 6402 if (auto *NS = X->getAsNamespace()) 6403 return NS; 6404 if (auto *NAS = X->getAsNamespaceAlias()) 6405 return NAS->getNamespace(); 6406 return nullptr; 6407 } 6408 6409 static bool isSameQualifier(const NestedNameSpecifier *X, 6410 const NestedNameSpecifier *Y) { 6411 if (auto *NSX = getNamespace(X)) { 6412 auto *NSY = getNamespace(Y); 6413 if (!NSY || NSX->getCanonicalDecl() != NSY->getCanonicalDecl()) 6414 return false; 6415 } else if (X->getKind() != Y->getKind()) 6416 return false; 6417 6418 // FIXME: For namespaces and types, we're permitted to check that the entity 6419 // is named via the same tokens. We should probably do so. 6420 switch (X->getKind()) { 6421 case NestedNameSpecifier::Identifier: 6422 if (X->getAsIdentifier() != Y->getAsIdentifier()) 6423 return false; 6424 break; 6425 case NestedNameSpecifier::Namespace: 6426 case NestedNameSpecifier::NamespaceAlias: 6427 // We've already checked that we named the same namespace. 6428 break; 6429 case NestedNameSpecifier::TypeSpec: 6430 case NestedNameSpecifier::TypeSpecWithTemplate: 6431 if (X->getAsType()->getCanonicalTypeInternal() != 6432 Y->getAsType()->getCanonicalTypeInternal()) 6433 return false; 6434 break; 6435 case NestedNameSpecifier::Global: 6436 case NestedNameSpecifier::Super: 6437 return true; 6438 } 6439 6440 // Recurse into earlier portion of NNS, if any. 6441 auto *PX = X->getPrefix(); 6442 auto *PY = Y->getPrefix(); 6443 if (PX && PY) 6444 return isSameQualifier(PX, PY); 6445 return !PX && !PY; 6446 } 6447 6448 /// Determine whether the attributes we can overload on are identical for A and 6449 /// B. Will ignore any overloadable attrs represented in the type of A and B. 6450 static bool hasSameOverloadableAttrs(const FunctionDecl *A, 6451 const FunctionDecl *B) { 6452 // Note that pass_object_size attributes are represented in the function's 6453 // ExtParameterInfo, so we don't need to check them here. 6454 6455 llvm::FoldingSetNodeID Cand1ID, Cand2ID; 6456 auto AEnableIfAttrs = A->specific_attrs<EnableIfAttr>(); 6457 auto BEnableIfAttrs = B->specific_attrs<EnableIfAttr>(); 6458 6459 for (auto Pair : zip_longest(AEnableIfAttrs, BEnableIfAttrs)) { 6460 std::optional<EnableIfAttr *> Cand1A = std::get<0>(Pair); 6461 std::optional<EnableIfAttr *> Cand2A = std::get<1>(Pair); 6462 6463 // Return false if the number of enable_if attributes is different. 6464 if (!Cand1A || !Cand2A) 6465 return false; 6466 6467 Cand1ID.clear(); 6468 Cand2ID.clear(); 6469 6470 (*Cand1A)->getCond()->Profile(Cand1ID, A->getASTContext(), true); 6471 (*Cand2A)->getCond()->Profile(Cand2ID, B->getASTContext(), true); 6472 6473 // Return false if any of the enable_if expressions of A and B are 6474 // different. 6475 if (Cand1ID != Cand2ID) 6476 return false; 6477 } 6478 return true; 6479 } 6480 6481 bool ASTContext::FriendsDifferByConstraints(const FunctionDecl *X, 6482 const FunctionDecl *Y) const { 6483 // If these aren't friends, then they aren't friends that differ by 6484 // constraints. 6485 if (!X->getFriendObjectKind() || !Y->getFriendObjectKind()) 6486 return false; 6487 6488 // If the two functions share lexical declaration context, they are not in 6489 // separate instantations, and thus in the same scope. 6490 if (X->getLexicalDeclContext() == Y->getLexicalDeclContext()) 6491 return false; 6492 6493 if (!X->getDescribedFunctionTemplate()) { 6494 assert(!Y->getDescribedFunctionTemplate() && 6495 "How would these be the same if they aren't both templates?"); 6496 6497 // If these friends don't have constraints, they aren't constrained, and 6498 // thus don't fall under temp.friend p9. Else the simple presence of a 6499 // constraint makes them unique. 6500 return X->getTrailingRequiresClause(); 6501 } 6502 6503 return X->FriendConstraintRefersToEnclosingTemplate(); 6504 } 6505 6506 bool ASTContext::isSameEntity(const NamedDecl *X, const NamedDecl *Y) const { 6507 if (X == Y) 6508 return true; 6509 6510 if (X->getDeclName() != Y->getDeclName()) 6511 return false; 6512 6513 // Must be in the same context. 6514 // 6515 // Note that we can't use DeclContext::Equals here, because the DeclContexts 6516 // could be two different declarations of the same function. (We will fix the 6517 // semantic DC to refer to the primary definition after merging.) 6518 if (!declaresSameEntity(cast<Decl>(X->getDeclContext()->getRedeclContext()), 6519 cast<Decl>(Y->getDeclContext()->getRedeclContext()))) 6520 return false; 6521 6522 // Two typedefs refer to the same entity if they have the same underlying 6523 // type. 6524 if (const auto *TypedefX = dyn_cast<TypedefNameDecl>(X)) 6525 if (const auto *TypedefY = dyn_cast<TypedefNameDecl>(Y)) 6526 return hasSameType(TypedefX->getUnderlyingType(), 6527 TypedefY->getUnderlyingType()); 6528 6529 // Must have the same kind. 6530 if (X->getKind() != Y->getKind()) 6531 return false; 6532 6533 // Objective-C classes and protocols with the same name always match. 6534 if (isa<ObjCInterfaceDecl>(X) || isa<ObjCProtocolDecl>(X)) 6535 return true; 6536 6537 if (isa<ClassTemplateSpecializationDecl>(X)) { 6538 // No need to handle these here: we merge them when adding them to the 6539 // template. 6540 return false; 6541 } 6542 6543 // Compatible tags match. 6544 if (const auto *TagX = dyn_cast<TagDecl>(X)) { 6545 const auto *TagY = cast<TagDecl>(Y); 6546 return (TagX->getTagKind() == TagY->getTagKind()) || 6547 ((TagX->getTagKind() == TTK_Struct || 6548 TagX->getTagKind() == TTK_Class || 6549 TagX->getTagKind() == TTK_Interface) && 6550 (TagY->getTagKind() == TTK_Struct || 6551 TagY->getTagKind() == TTK_Class || 6552 TagY->getTagKind() == TTK_Interface)); 6553 } 6554 6555 // Functions with the same type and linkage match. 6556 // FIXME: This needs to cope with merging of prototyped/non-prototyped 6557 // functions, etc. 6558 if (const auto *FuncX = dyn_cast<FunctionDecl>(X)) { 6559 const auto *FuncY = cast<FunctionDecl>(Y); 6560 if (const auto *CtorX = dyn_cast<CXXConstructorDecl>(X)) { 6561 const auto *CtorY = cast<CXXConstructorDecl>(Y); 6562 if (CtorX->getInheritedConstructor() && 6563 !isSameEntity(CtorX->getInheritedConstructor().getConstructor(), 6564 CtorY->getInheritedConstructor().getConstructor())) 6565 return false; 6566 } 6567 6568 if (FuncX->isMultiVersion() != FuncY->isMultiVersion()) 6569 return false; 6570 6571 // Multiversioned functions with different feature strings are represented 6572 // as separate declarations. 6573 if (FuncX->isMultiVersion()) { 6574 const auto *TAX = FuncX->getAttr<TargetAttr>(); 6575 const auto *TAY = FuncY->getAttr<TargetAttr>(); 6576 assert(TAX && TAY && "Multiversion Function without target attribute"); 6577 6578 if (TAX->getFeaturesStr() != TAY->getFeaturesStr()) 6579 return false; 6580 } 6581 6582 if (!isSameConstraintExpr(FuncX->getTrailingRequiresClause(), 6583 FuncY->getTrailingRequiresClause())) 6584 return false; 6585 6586 // Constrained friends are different in certain cases, see: [temp.friend]p9. 6587 if (FriendsDifferByConstraints(FuncX, FuncY)) 6588 return false; 6589 6590 auto GetTypeAsWritten = [](const FunctionDecl *FD) { 6591 // Map to the first declaration that we've already merged into this one. 6592 // The TSI of redeclarations might not match (due to calling conventions 6593 // being inherited onto the type but not the TSI), but the TSI type of 6594 // the first declaration of the function should match across modules. 6595 FD = FD->getCanonicalDecl(); 6596 return FD->getTypeSourceInfo() ? FD->getTypeSourceInfo()->getType() 6597 : FD->getType(); 6598 }; 6599 QualType XT = GetTypeAsWritten(FuncX), YT = GetTypeAsWritten(FuncY); 6600 if (!hasSameType(XT, YT)) { 6601 // We can get functions with different types on the redecl chain in C++17 6602 // if they have differing exception specifications and at least one of 6603 // the excpetion specs is unresolved. 6604 auto *XFPT = XT->getAs<FunctionProtoType>(); 6605 auto *YFPT = YT->getAs<FunctionProtoType>(); 6606 if (getLangOpts().CPlusPlus17 && XFPT && YFPT && 6607 (isUnresolvedExceptionSpec(XFPT->getExceptionSpecType()) || 6608 isUnresolvedExceptionSpec(YFPT->getExceptionSpecType())) && 6609 hasSameFunctionTypeIgnoringExceptionSpec(XT, YT)) 6610 return true; 6611 return false; 6612 } 6613 6614 return FuncX->getLinkageInternal() == FuncY->getLinkageInternal() && 6615 hasSameOverloadableAttrs(FuncX, FuncY); 6616 } 6617 6618 // Variables with the same type and linkage match. 6619 if (const auto *VarX = dyn_cast<VarDecl>(X)) { 6620 const auto *VarY = cast<VarDecl>(Y); 6621 if (VarX->getLinkageInternal() == VarY->getLinkageInternal()) { 6622 if (hasSameType(VarX->getType(), VarY->getType())) 6623 return true; 6624 6625 // We can get decls with different types on the redecl chain. Eg. 6626 // template <typename T> struct S { static T Var[]; }; // #1 6627 // template <typename T> T S<T>::Var[sizeof(T)]; // #2 6628 // Only? happens when completing an incomplete array type. In this case 6629 // when comparing #1 and #2 we should go through their element type. 6630 const ArrayType *VarXTy = getAsArrayType(VarX->getType()); 6631 const ArrayType *VarYTy = getAsArrayType(VarY->getType()); 6632 if (!VarXTy || !VarYTy) 6633 return false; 6634 if (VarXTy->isIncompleteArrayType() || VarYTy->isIncompleteArrayType()) 6635 return hasSameType(VarXTy->getElementType(), VarYTy->getElementType()); 6636 } 6637 return false; 6638 } 6639 6640 // Namespaces with the same name and inlinedness match. 6641 if (const auto *NamespaceX = dyn_cast<NamespaceDecl>(X)) { 6642 const auto *NamespaceY = cast<NamespaceDecl>(Y); 6643 return NamespaceX->isInline() == NamespaceY->isInline(); 6644 } 6645 6646 // Identical template names and kinds match if their template parameter lists 6647 // and patterns match. 6648 if (const auto *TemplateX = dyn_cast<TemplateDecl>(X)) { 6649 const auto *TemplateY = cast<TemplateDecl>(Y); 6650 6651 // ConceptDecl wouldn't be the same if their constraint expression differs. 6652 if (const auto *ConceptX = dyn_cast<ConceptDecl>(X)) { 6653 const auto *ConceptY = cast<ConceptDecl>(Y); 6654 const Expr *XCE = ConceptX->getConstraintExpr(); 6655 const Expr *YCE = ConceptY->getConstraintExpr(); 6656 assert(XCE && YCE && "ConceptDecl without constraint expression?"); 6657 llvm::FoldingSetNodeID XID, YID; 6658 XCE->Profile(XID, *this, /*Canonical=*/true); 6659 YCE->Profile(YID, *this, /*Canonical=*/true); 6660 if (XID != YID) 6661 return false; 6662 } 6663 6664 return isSameEntity(TemplateX->getTemplatedDecl(), 6665 TemplateY->getTemplatedDecl()) && 6666 isSameTemplateParameterList(TemplateX->getTemplateParameters(), 6667 TemplateY->getTemplateParameters()); 6668 } 6669 6670 // Fields with the same name and the same type match. 6671 if (const auto *FDX = dyn_cast<FieldDecl>(X)) { 6672 const auto *FDY = cast<FieldDecl>(Y); 6673 // FIXME: Also check the bitwidth is odr-equivalent, if any. 6674 return hasSameType(FDX->getType(), FDY->getType()); 6675 } 6676 6677 // Indirect fields with the same target field match. 6678 if (const auto *IFDX = dyn_cast<IndirectFieldDecl>(X)) { 6679 const auto *IFDY = cast<IndirectFieldDecl>(Y); 6680 return IFDX->getAnonField()->getCanonicalDecl() == 6681 IFDY->getAnonField()->getCanonicalDecl(); 6682 } 6683 6684 // Enumerators with the same name match. 6685 if (isa<EnumConstantDecl>(X)) 6686 // FIXME: Also check the value is odr-equivalent. 6687 return true; 6688 6689 // Using shadow declarations with the same target match. 6690 if (const auto *USX = dyn_cast<UsingShadowDecl>(X)) { 6691 const auto *USY = cast<UsingShadowDecl>(Y); 6692 return USX->getTargetDecl() == USY->getTargetDecl(); 6693 } 6694 6695 // Using declarations with the same qualifier match. (We already know that 6696 // the name matches.) 6697 if (const auto *UX = dyn_cast<UsingDecl>(X)) { 6698 const auto *UY = cast<UsingDecl>(Y); 6699 return isSameQualifier(UX->getQualifier(), UY->getQualifier()) && 6700 UX->hasTypename() == UY->hasTypename() && 6701 UX->isAccessDeclaration() == UY->isAccessDeclaration(); 6702 } 6703 if (const auto *UX = dyn_cast<UnresolvedUsingValueDecl>(X)) { 6704 const auto *UY = cast<UnresolvedUsingValueDecl>(Y); 6705 return isSameQualifier(UX->getQualifier(), UY->getQualifier()) && 6706 UX->isAccessDeclaration() == UY->isAccessDeclaration(); 6707 } 6708 if (const auto *UX = dyn_cast<UnresolvedUsingTypenameDecl>(X)) { 6709 return isSameQualifier( 6710 UX->getQualifier(), 6711 cast<UnresolvedUsingTypenameDecl>(Y)->getQualifier()); 6712 } 6713 6714 // Using-pack declarations are only created by instantiation, and match if 6715 // they're instantiated from matching UnresolvedUsing...Decls. 6716 if (const auto *UX = dyn_cast<UsingPackDecl>(X)) { 6717 return declaresSameEntity( 6718 UX->getInstantiatedFromUsingDecl(), 6719 cast<UsingPackDecl>(Y)->getInstantiatedFromUsingDecl()); 6720 } 6721 6722 // Namespace alias definitions with the same target match. 6723 if (const auto *NAX = dyn_cast<NamespaceAliasDecl>(X)) { 6724 const auto *NAY = cast<NamespaceAliasDecl>(Y); 6725 return NAX->getNamespace()->Equals(NAY->getNamespace()); 6726 } 6727 6728 return false; 6729 } 6730 6731 TemplateArgument 6732 ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) const { 6733 switch (Arg.getKind()) { 6734 case TemplateArgument::Null: 6735 return Arg; 6736 6737 case TemplateArgument::Expression: 6738 return Arg; 6739 6740 case TemplateArgument::Declaration: { 6741 auto *D = cast<ValueDecl>(Arg.getAsDecl()->getCanonicalDecl()); 6742 return TemplateArgument(D, getCanonicalType(Arg.getParamTypeForDecl())); 6743 } 6744 6745 case TemplateArgument::NullPtr: 6746 return TemplateArgument(getCanonicalType(Arg.getNullPtrType()), 6747 /*isNullPtr*/true); 6748 6749 case TemplateArgument::Template: 6750 return TemplateArgument(getCanonicalTemplateName(Arg.getAsTemplate())); 6751 6752 case TemplateArgument::TemplateExpansion: 6753 return TemplateArgument(getCanonicalTemplateName( 6754 Arg.getAsTemplateOrTemplatePattern()), 6755 Arg.getNumTemplateExpansions()); 6756 6757 case TemplateArgument::Integral: 6758 return TemplateArgument(Arg, getCanonicalType(Arg.getIntegralType())); 6759 6760 case TemplateArgument::Type: 6761 return TemplateArgument(getCanonicalType(Arg.getAsType())); 6762 6763 case TemplateArgument::Pack: { 6764 bool AnyNonCanonArgs = false; 6765 auto CanonArgs = ::getCanonicalTemplateArguments( 6766 *this, Arg.pack_elements(), AnyNonCanonArgs); 6767 if (!AnyNonCanonArgs) 6768 return Arg; 6769 return TemplateArgument::CreatePackCopy(const_cast<ASTContext &>(*this), 6770 CanonArgs); 6771 } 6772 } 6773 6774 // Silence GCC warning 6775 llvm_unreachable("Unhandled template argument kind"); 6776 } 6777 6778 NestedNameSpecifier * 6779 ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const { 6780 if (!NNS) 6781 return nullptr; 6782 6783 switch (NNS->getKind()) { 6784 case NestedNameSpecifier::Identifier: 6785 // Canonicalize the prefix but keep the identifier the same. 6786 return NestedNameSpecifier::Create(*this, 6787 getCanonicalNestedNameSpecifier(NNS->getPrefix()), 6788 NNS->getAsIdentifier()); 6789 6790 case NestedNameSpecifier::Namespace: 6791 // A namespace is canonical; build a nested-name-specifier with 6792 // this namespace and no prefix. 6793 return NestedNameSpecifier::Create(*this, nullptr, 6794 NNS->getAsNamespace()->getOriginalNamespace()); 6795 6796 case NestedNameSpecifier::NamespaceAlias: 6797 // A namespace is canonical; build a nested-name-specifier with 6798 // this namespace and no prefix. 6799 return NestedNameSpecifier::Create(*this, nullptr, 6800 NNS->getAsNamespaceAlias()->getNamespace() 6801 ->getOriginalNamespace()); 6802 6803 // The difference between TypeSpec and TypeSpecWithTemplate is that the 6804 // latter will have the 'template' keyword when printed. 6805 case NestedNameSpecifier::TypeSpec: 6806 case NestedNameSpecifier::TypeSpecWithTemplate: { 6807 const Type *T = getCanonicalType(NNS->getAsType()); 6808 6809 // If we have some kind of dependent-named type (e.g., "typename T::type"), 6810 // break it apart into its prefix and identifier, then reconsititute those 6811 // as the canonical nested-name-specifier. This is required to canonicalize 6812 // a dependent nested-name-specifier involving typedefs of dependent-name 6813 // types, e.g., 6814 // typedef typename T::type T1; 6815 // typedef typename T1::type T2; 6816 if (const auto *DNT = T->getAs<DependentNameType>()) 6817 return NestedNameSpecifier::Create( 6818 *this, DNT->getQualifier(), 6819 const_cast<IdentifierInfo *>(DNT->getIdentifier())); 6820 if (const auto *DTST = T->getAs<DependentTemplateSpecializationType>()) 6821 return NestedNameSpecifier::Create(*this, DTST->getQualifier(), true, 6822 const_cast<Type *>(T)); 6823 6824 // TODO: Set 'Template' parameter to true for other template types. 6825 return NestedNameSpecifier::Create(*this, nullptr, false, 6826 const_cast<Type *>(T)); 6827 } 6828 6829 case NestedNameSpecifier::Global: 6830 case NestedNameSpecifier::Super: 6831 // The global specifier and __super specifer are canonical and unique. 6832 return NNS; 6833 } 6834 6835 llvm_unreachable("Invalid NestedNameSpecifier::Kind!"); 6836 } 6837 6838 const ArrayType *ASTContext::getAsArrayType(QualType T) const { 6839 // Handle the non-qualified case efficiently. 6840 if (!T.hasLocalQualifiers()) { 6841 // Handle the common positive case fast. 6842 if (const auto *AT = dyn_cast<ArrayType>(T)) 6843 return AT; 6844 } 6845 6846 // Handle the common negative case fast. 6847 if (!isa<ArrayType>(T.getCanonicalType())) 6848 return nullptr; 6849 6850 // Apply any qualifiers from the array type to the element type. This 6851 // implements C99 6.7.3p8: "If the specification of an array type includes 6852 // any type qualifiers, the element type is so qualified, not the array type." 6853 6854 // If we get here, we either have type qualifiers on the type, or we have 6855 // sugar such as a typedef in the way. If we have type qualifiers on the type 6856 // we must propagate them down into the element type. 6857 6858 SplitQualType split = T.getSplitDesugaredType(); 6859 Qualifiers qs = split.Quals; 6860 6861 // If we have a simple case, just return now. 6862 const auto *ATy = dyn_cast<ArrayType>(split.Ty); 6863 if (!ATy || qs.empty()) 6864 return ATy; 6865 6866 // Otherwise, we have an array and we have qualifiers on it. Push the 6867 // qualifiers into the array element type and return a new array type. 6868 QualType NewEltTy = getQualifiedType(ATy->getElementType(), qs); 6869 6870 if (const auto *CAT = dyn_cast<ConstantArrayType>(ATy)) 6871 return cast<ArrayType>(getConstantArrayType(NewEltTy, CAT->getSize(), 6872 CAT->getSizeExpr(), 6873 CAT->getSizeModifier(), 6874 CAT->getIndexTypeCVRQualifiers())); 6875 if (const auto *IAT = dyn_cast<IncompleteArrayType>(ATy)) 6876 return cast<ArrayType>(getIncompleteArrayType(NewEltTy, 6877 IAT->getSizeModifier(), 6878 IAT->getIndexTypeCVRQualifiers())); 6879 6880 if (const auto *DSAT = dyn_cast<DependentSizedArrayType>(ATy)) 6881 return cast<ArrayType>( 6882 getDependentSizedArrayType(NewEltTy, 6883 DSAT->getSizeExpr(), 6884 DSAT->getSizeModifier(), 6885 DSAT->getIndexTypeCVRQualifiers(), 6886 DSAT->getBracketsRange())); 6887 6888 const auto *VAT = cast<VariableArrayType>(ATy); 6889 return cast<ArrayType>(getVariableArrayType(NewEltTy, 6890 VAT->getSizeExpr(), 6891 VAT->getSizeModifier(), 6892 VAT->getIndexTypeCVRQualifiers(), 6893 VAT->getBracketsRange())); 6894 } 6895 6896 QualType ASTContext::getAdjustedParameterType(QualType T) const { 6897 if (T->isArrayType() || T->isFunctionType()) 6898 return getDecayedType(T); 6899 return T; 6900 } 6901 6902 QualType ASTContext::getSignatureParameterType(QualType T) const { 6903 T = getVariableArrayDecayedType(T); 6904 T = getAdjustedParameterType(T); 6905 return T.getUnqualifiedType(); 6906 } 6907 6908 QualType ASTContext::getExceptionObjectType(QualType T) const { 6909 // C++ [except.throw]p3: 6910 // A throw-expression initializes a temporary object, called the exception 6911 // object, the type of which is determined by removing any top-level 6912 // cv-qualifiers from the static type of the operand of throw and adjusting 6913 // the type from "array of T" or "function returning T" to "pointer to T" 6914 // or "pointer to function returning T", [...] 6915 T = getVariableArrayDecayedType(T); 6916 if (T->isArrayType() || T->isFunctionType()) 6917 T = getDecayedType(T); 6918 return T.getUnqualifiedType(); 6919 } 6920 6921 /// getArrayDecayedType - Return the properly qualified result of decaying the 6922 /// specified array type to a pointer. This operation is non-trivial when 6923 /// handling typedefs etc. The canonical type of "T" must be an array type, 6924 /// this returns a pointer to a properly qualified element of the array. 6925 /// 6926 /// See C99 6.7.5.3p7 and C99 6.3.2.1p3. 6927 QualType ASTContext::getArrayDecayedType(QualType Ty) const { 6928 // Get the element type with 'getAsArrayType' so that we don't lose any 6929 // typedefs in the element type of the array. This also handles propagation 6930 // of type qualifiers from the array type into the element type if present 6931 // (C99 6.7.3p8). 6932 const ArrayType *PrettyArrayType = getAsArrayType(Ty); 6933 assert(PrettyArrayType && "Not an array type!"); 6934 6935 QualType PtrTy = getPointerType(PrettyArrayType->getElementType()); 6936 6937 // int x[restrict 4] -> int *restrict 6938 QualType Result = getQualifiedType(PtrTy, 6939 PrettyArrayType->getIndexTypeQualifiers()); 6940 6941 // int x[_Nullable] -> int * _Nullable 6942 if (auto Nullability = Ty->getNullability()) { 6943 Result = const_cast<ASTContext *>(this)->getAttributedType( 6944 AttributedType::getNullabilityAttrKind(*Nullability), Result, Result); 6945 } 6946 return Result; 6947 } 6948 6949 QualType ASTContext::getBaseElementType(const ArrayType *array) const { 6950 return getBaseElementType(array->getElementType()); 6951 } 6952 6953 QualType ASTContext::getBaseElementType(QualType type) const { 6954 Qualifiers qs; 6955 while (true) { 6956 SplitQualType split = type.getSplitDesugaredType(); 6957 const ArrayType *array = split.Ty->getAsArrayTypeUnsafe(); 6958 if (!array) break; 6959 6960 type = array->getElementType(); 6961 qs.addConsistentQualifiers(split.Quals); 6962 } 6963 6964 return getQualifiedType(type, qs); 6965 } 6966 6967 /// getConstantArrayElementCount - Returns number of constant array elements. 6968 uint64_t 6969 ASTContext::getConstantArrayElementCount(const ConstantArrayType *CA) const { 6970 uint64_t ElementCount = 1; 6971 do { 6972 ElementCount *= CA->getSize().getZExtValue(); 6973 CA = dyn_cast_or_null<ConstantArrayType>( 6974 CA->getElementType()->getAsArrayTypeUnsafe()); 6975 } while (CA); 6976 return ElementCount; 6977 } 6978 6979 uint64_t ASTContext::getArrayInitLoopExprElementCount( 6980 const ArrayInitLoopExpr *AILE) const { 6981 if (!AILE) 6982 return 0; 6983 6984 uint64_t ElementCount = 1; 6985 6986 do { 6987 ElementCount *= AILE->getArraySize().getZExtValue(); 6988 AILE = dyn_cast<ArrayInitLoopExpr>(AILE->getSubExpr()); 6989 } while (AILE); 6990 6991 return ElementCount; 6992 } 6993 6994 /// getFloatingRank - Return a relative rank for floating point types. 6995 /// This routine will assert if passed a built-in type that isn't a float. 6996 static FloatingRank getFloatingRank(QualType T) { 6997 if (const auto *CT = T->getAs<ComplexType>()) 6998 return getFloatingRank(CT->getElementType()); 6999 7000 switch (T->castAs<BuiltinType>()->getKind()) { 7001 default: llvm_unreachable("getFloatingRank(): not a floating type"); 7002 case BuiltinType::Float16: return Float16Rank; 7003 case BuiltinType::Half: return HalfRank; 7004 case BuiltinType::Float: return FloatRank; 7005 case BuiltinType::Double: return DoubleRank; 7006 case BuiltinType::LongDouble: return LongDoubleRank; 7007 case BuiltinType::Float128: return Float128Rank; 7008 case BuiltinType::BFloat16: return BFloat16Rank; 7009 case BuiltinType::Ibm128: return Ibm128Rank; 7010 } 7011 } 7012 7013 /// getFloatingTypeOrder - Compare the rank of the two specified floating 7014 /// point types, ignoring the domain of the type (i.e. 'double' == 7015 /// '_Complex double'). If LHS > RHS, return 1. If LHS == RHS, return 0. If 7016 /// LHS < RHS, return -1. 7017 int ASTContext::getFloatingTypeOrder(QualType LHS, QualType RHS) const { 7018 FloatingRank LHSR = getFloatingRank(LHS); 7019 FloatingRank RHSR = getFloatingRank(RHS); 7020 7021 if (LHSR == RHSR) 7022 return 0; 7023 if (LHSR > RHSR) 7024 return 1; 7025 return -1; 7026 } 7027 7028 int ASTContext::getFloatingTypeSemanticOrder(QualType LHS, QualType RHS) const { 7029 if (&getFloatTypeSemantics(LHS) == &getFloatTypeSemantics(RHS)) 7030 return 0; 7031 return getFloatingTypeOrder(LHS, RHS); 7032 } 7033 7034 /// getIntegerRank - Return an integer conversion rank (C99 6.3.1.1p1). This 7035 /// routine will assert if passed a built-in type that isn't an integer or enum, 7036 /// or if it is not canonicalized. 7037 unsigned ASTContext::getIntegerRank(const Type *T) const { 7038 assert(T->isCanonicalUnqualified() && "T should be canonicalized"); 7039 7040 // Results in this 'losing' to any type of the same size, but winning if 7041 // larger. 7042 if (const auto *EIT = dyn_cast<BitIntType>(T)) 7043 return 0 + (EIT->getNumBits() << 3); 7044 7045 switch (cast<BuiltinType>(T)->getKind()) { 7046 default: llvm_unreachable("getIntegerRank(): not a built-in integer"); 7047 case BuiltinType::Bool: 7048 return 1 + (getIntWidth(BoolTy) << 3); 7049 case BuiltinType::Char_S: 7050 case BuiltinType::Char_U: 7051 case BuiltinType::SChar: 7052 case BuiltinType::UChar: 7053 return 2 + (getIntWidth(CharTy) << 3); 7054 case BuiltinType::Short: 7055 case BuiltinType::UShort: 7056 return 3 + (getIntWidth(ShortTy) << 3); 7057 case BuiltinType::Int: 7058 case BuiltinType::UInt: 7059 return 4 + (getIntWidth(IntTy) << 3); 7060 case BuiltinType::Long: 7061 case BuiltinType::ULong: 7062 return 5 + (getIntWidth(LongTy) << 3); 7063 case BuiltinType::LongLong: 7064 case BuiltinType::ULongLong: 7065 return 6 + (getIntWidth(LongLongTy) << 3); 7066 case BuiltinType::Int128: 7067 case BuiltinType::UInt128: 7068 return 7 + (getIntWidth(Int128Ty) << 3); 7069 7070 // "The ranks of char8_t, char16_t, char32_t, and wchar_t equal the ranks of 7071 // their underlying types" [c++20 conv.rank] 7072 case BuiltinType::Char8: 7073 return getIntegerRank(UnsignedCharTy.getTypePtr()); 7074 case BuiltinType::Char16: 7075 return getIntegerRank( 7076 getFromTargetType(Target->getChar16Type()).getTypePtr()); 7077 case BuiltinType::Char32: 7078 return getIntegerRank( 7079 getFromTargetType(Target->getChar32Type()).getTypePtr()); 7080 case BuiltinType::WChar_S: 7081 case BuiltinType::WChar_U: 7082 return getIntegerRank( 7083 getFromTargetType(Target->getWCharType()).getTypePtr()); 7084 } 7085 } 7086 7087 /// Whether this is a promotable bitfield reference according 7088 /// to C99 6.3.1.1p2, bullet 2 (and GCC extensions). 7089 /// 7090 /// \returns the type this bit-field will promote to, or NULL if no 7091 /// promotion occurs. 7092 QualType ASTContext::isPromotableBitField(Expr *E) const { 7093 if (E->isTypeDependent() || E->isValueDependent()) 7094 return {}; 7095 7096 // C++ [conv.prom]p5: 7097 // If the bit-field has an enumerated type, it is treated as any other 7098 // value of that type for promotion purposes. 7099 if (getLangOpts().CPlusPlus && E->getType()->isEnumeralType()) 7100 return {}; 7101 7102 // FIXME: We should not do this unless E->refersToBitField() is true. This 7103 // matters in C where getSourceBitField() will find bit-fields for various 7104 // cases where the source expression is not a bit-field designator. 7105 7106 FieldDecl *Field = E->getSourceBitField(); // FIXME: conditional bit-fields? 7107 if (!Field) 7108 return {}; 7109 7110 QualType FT = Field->getType(); 7111 7112 uint64_t BitWidth = Field->getBitWidthValue(*this); 7113 uint64_t IntSize = getTypeSize(IntTy); 7114 // C++ [conv.prom]p5: 7115 // A prvalue for an integral bit-field can be converted to a prvalue of type 7116 // int if int can represent all the values of the bit-field; otherwise, it 7117 // can be converted to unsigned int if unsigned int can represent all the 7118 // values of the bit-field. If the bit-field is larger yet, no integral 7119 // promotion applies to it. 7120 // C11 6.3.1.1/2: 7121 // [For a bit-field of type _Bool, int, signed int, or unsigned int:] 7122 // If an int can represent all values of the original type (as restricted by 7123 // the width, for a bit-field), the value is converted to an int; otherwise, 7124 // it is converted to an unsigned int. 7125 // 7126 // FIXME: C does not permit promotion of a 'long : 3' bitfield to int. 7127 // We perform that promotion here to match GCC and C++. 7128 // FIXME: C does not permit promotion of an enum bit-field whose rank is 7129 // greater than that of 'int'. We perform that promotion to match GCC. 7130 if (BitWidth < IntSize) 7131 return IntTy; 7132 7133 if (BitWidth == IntSize) 7134 return FT->isSignedIntegerType() ? IntTy : UnsignedIntTy; 7135 7136 // Bit-fields wider than int are not subject to promotions, and therefore act 7137 // like the base type. GCC has some weird bugs in this area that we 7138 // deliberately do not follow (GCC follows a pre-standard resolution to 7139 // C's DR315 which treats bit-width as being part of the type, and this leaks 7140 // into their semantics in some cases). 7141 return {}; 7142 } 7143 7144 /// getPromotedIntegerType - Returns the type that Promotable will 7145 /// promote to: C99 6.3.1.1p2, assuming that Promotable is a promotable 7146 /// integer type. 7147 QualType ASTContext::getPromotedIntegerType(QualType Promotable) const { 7148 assert(!Promotable.isNull()); 7149 assert(isPromotableIntegerType(Promotable)); 7150 if (const auto *ET = Promotable->getAs<EnumType>()) 7151 return ET->getDecl()->getPromotionType(); 7152 7153 if (const auto *BT = Promotable->getAs<BuiltinType>()) { 7154 // C++ [conv.prom]: A prvalue of type char16_t, char32_t, or wchar_t 7155 // (3.9.1) can be converted to a prvalue of the first of the following 7156 // types that can represent all the values of its underlying type: 7157 // int, unsigned int, long int, unsigned long int, long long int, or 7158 // unsigned long long int [...] 7159 // FIXME: Is there some better way to compute this? 7160 if (BT->getKind() == BuiltinType::WChar_S || 7161 BT->getKind() == BuiltinType::WChar_U || 7162 BT->getKind() == BuiltinType::Char8 || 7163 BT->getKind() == BuiltinType::Char16 || 7164 BT->getKind() == BuiltinType::Char32) { 7165 bool FromIsSigned = BT->getKind() == BuiltinType::WChar_S; 7166 uint64_t FromSize = getTypeSize(BT); 7167 QualType PromoteTypes[] = { IntTy, UnsignedIntTy, LongTy, UnsignedLongTy, 7168 LongLongTy, UnsignedLongLongTy }; 7169 for (const auto &PT : PromoteTypes) { 7170 uint64_t ToSize = getTypeSize(PT); 7171 if (FromSize < ToSize || 7172 (FromSize == ToSize && FromIsSigned == PT->isSignedIntegerType())) 7173 return PT; 7174 } 7175 llvm_unreachable("char type should fit into long long"); 7176 } 7177 } 7178 7179 // At this point, we should have a signed or unsigned integer type. 7180 if (Promotable->isSignedIntegerType()) 7181 return IntTy; 7182 uint64_t PromotableSize = getIntWidth(Promotable); 7183 uint64_t IntSize = getIntWidth(IntTy); 7184 assert(Promotable->isUnsignedIntegerType() && PromotableSize <= IntSize); 7185 return (PromotableSize != IntSize) ? IntTy : UnsignedIntTy; 7186 } 7187 7188 /// Recurses in pointer/array types until it finds an objc retainable 7189 /// type and returns its ownership. 7190 Qualifiers::ObjCLifetime ASTContext::getInnerObjCOwnership(QualType T) const { 7191 while (!T.isNull()) { 7192 if (T.getObjCLifetime() != Qualifiers::OCL_None) 7193 return T.getObjCLifetime(); 7194 if (T->isArrayType()) 7195 T = getBaseElementType(T); 7196 else if (const auto *PT = T->getAs<PointerType>()) 7197 T = PT->getPointeeType(); 7198 else if (const auto *RT = T->getAs<ReferenceType>()) 7199 T = RT->getPointeeType(); 7200 else 7201 break; 7202 } 7203 7204 return Qualifiers::OCL_None; 7205 } 7206 7207 static const Type *getIntegerTypeForEnum(const EnumType *ET) { 7208 // Incomplete enum types are not treated as integer types. 7209 // FIXME: In C++, enum types are never integer types. 7210 if (ET->getDecl()->isComplete() && !ET->getDecl()->isScoped()) 7211 return ET->getDecl()->getIntegerType().getTypePtr(); 7212 return nullptr; 7213 } 7214 7215 /// getIntegerTypeOrder - Returns the highest ranked integer type: 7216 /// C99 6.3.1.8p1. If LHS > RHS, return 1. If LHS == RHS, return 0. If 7217 /// LHS < RHS, return -1. 7218 int ASTContext::getIntegerTypeOrder(QualType LHS, QualType RHS) const { 7219 const Type *LHSC = getCanonicalType(LHS).getTypePtr(); 7220 const Type *RHSC = getCanonicalType(RHS).getTypePtr(); 7221 7222 // Unwrap enums to their underlying type. 7223 if (const auto *ET = dyn_cast<EnumType>(LHSC)) 7224 LHSC = getIntegerTypeForEnum(ET); 7225 if (const auto *ET = dyn_cast<EnumType>(RHSC)) 7226 RHSC = getIntegerTypeForEnum(ET); 7227 7228 if (LHSC == RHSC) return 0; 7229 7230 bool LHSUnsigned = LHSC->isUnsignedIntegerType(); 7231 bool RHSUnsigned = RHSC->isUnsignedIntegerType(); 7232 7233 unsigned LHSRank = getIntegerRank(LHSC); 7234 unsigned RHSRank = getIntegerRank(RHSC); 7235 7236 if (LHSUnsigned == RHSUnsigned) { // Both signed or both unsigned. 7237 if (LHSRank == RHSRank) return 0; 7238 return LHSRank > RHSRank ? 1 : -1; 7239 } 7240 7241 // Otherwise, the LHS is signed and the RHS is unsigned or visa versa. 7242 if (LHSUnsigned) { 7243 // If the unsigned [LHS] type is larger, return it. 7244 if (LHSRank >= RHSRank) 7245 return 1; 7246 7247 // If the signed type can represent all values of the unsigned type, it 7248 // wins. Because we are dealing with 2's complement and types that are 7249 // powers of two larger than each other, this is always safe. 7250 return -1; 7251 } 7252 7253 // If the unsigned [RHS] type is larger, return it. 7254 if (RHSRank >= LHSRank) 7255 return -1; 7256 7257 // If the signed type can represent all values of the unsigned type, it 7258 // wins. Because we are dealing with 2's complement and types that are 7259 // powers of two larger than each other, this is always safe. 7260 return 1; 7261 } 7262 7263 TypedefDecl *ASTContext::getCFConstantStringDecl() const { 7264 if (CFConstantStringTypeDecl) 7265 return CFConstantStringTypeDecl; 7266 7267 assert(!CFConstantStringTagDecl && 7268 "tag and typedef should be initialized together"); 7269 CFConstantStringTagDecl = buildImplicitRecord("__NSConstantString_tag"); 7270 CFConstantStringTagDecl->startDefinition(); 7271 7272 struct { 7273 QualType Type; 7274 const char *Name; 7275 } Fields[5]; 7276 unsigned Count = 0; 7277 7278 /// Objective-C ABI 7279 /// 7280 /// typedef struct __NSConstantString_tag { 7281 /// const int *isa; 7282 /// int flags; 7283 /// const char *str; 7284 /// long length; 7285 /// } __NSConstantString; 7286 /// 7287 /// Swift ABI (4.1, 4.2) 7288 /// 7289 /// typedef struct __NSConstantString_tag { 7290 /// uintptr_t _cfisa; 7291 /// uintptr_t _swift_rc; 7292 /// _Atomic(uint64_t) _cfinfoa; 7293 /// const char *_ptr; 7294 /// uint32_t _length; 7295 /// } __NSConstantString; 7296 /// 7297 /// Swift ABI (5.0) 7298 /// 7299 /// typedef struct __NSConstantString_tag { 7300 /// uintptr_t _cfisa; 7301 /// uintptr_t _swift_rc; 7302 /// _Atomic(uint64_t) _cfinfoa; 7303 /// const char *_ptr; 7304 /// uintptr_t _length; 7305 /// } __NSConstantString; 7306 7307 const auto CFRuntime = getLangOpts().CFRuntime; 7308 if (static_cast<unsigned>(CFRuntime) < 7309 static_cast<unsigned>(LangOptions::CoreFoundationABI::Swift)) { 7310 Fields[Count++] = { getPointerType(IntTy.withConst()), "isa" }; 7311 Fields[Count++] = { IntTy, "flags" }; 7312 Fields[Count++] = { getPointerType(CharTy.withConst()), "str" }; 7313 Fields[Count++] = { LongTy, "length" }; 7314 } else { 7315 Fields[Count++] = { getUIntPtrType(), "_cfisa" }; 7316 Fields[Count++] = { getUIntPtrType(), "_swift_rc" }; 7317 Fields[Count++] = { getFromTargetType(Target->getUInt64Type()), "_swift_rc" }; 7318 Fields[Count++] = { getPointerType(CharTy.withConst()), "_ptr" }; 7319 if (CFRuntime == LangOptions::CoreFoundationABI::Swift4_1 || 7320 CFRuntime == LangOptions::CoreFoundationABI::Swift4_2) 7321 Fields[Count++] = { IntTy, "_ptr" }; 7322 else 7323 Fields[Count++] = { getUIntPtrType(), "_ptr" }; 7324 } 7325 7326 // Create fields 7327 for (unsigned i = 0; i < Count; ++i) { 7328 FieldDecl *Field = 7329 FieldDecl::Create(*this, CFConstantStringTagDecl, SourceLocation(), 7330 SourceLocation(), &Idents.get(Fields[i].Name), 7331 Fields[i].Type, /*TInfo=*/nullptr, 7332 /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit); 7333 Field->setAccess(AS_public); 7334 CFConstantStringTagDecl->addDecl(Field); 7335 } 7336 7337 CFConstantStringTagDecl->completeDefinition(); 7338 // This type is designed to be compatible with NSConstantString, but cannot 7339 // use the same name, since NSConstantString is an interface. 7340 auto tagType = getTagDeclType(CFConstantStringTagDecl); 7341 CFConstantStringTypeDecl = 7342 buildImplicitTypedef(tagType, "__NSConstantString"); 7343 7344 return CFConstantStringTypeDecl; 7345 } 7346 7347 RecordDecl *ASTContext::getCFConstantStringTagDecl() const { 7348 if (!CFConstantStringTagDecl) 7349 getCFConstantStringDecl(); // Build the tag and the typedef. 7350 return CFConstantStringTagDecl; 7351 } 7352 7353 // getCFConstantStringType - Return the type used for constant CFStrings. 7354 QualType ASTContext::getCFConstantStringType() const { 7355 return getTypedefType(getCFConstantStringDecl()); 7356 } 7357 7358 QualType ASTContext::getObjCSuperType() const { 7359 if (ObjCSuperType.isNull()) { 7360 RecordDecl *ObjCSuperTypeDecl = buildImplicitRecord("objc_super"); 7361 getTranslationUnitDecl()->addDecl(ObjCSuperTypeDecl); 7362 ObjCSuperType = getTagDeclType(ObjCSuperTypeDecl); 7363 } 7364 return ObjCSuperType; 7365 } 7366 7367 void ASTContext::setCFConstantStringType(QualType T) { 7368 const auto *TD = T->castAs<TypedefType>(); 7369 CFConstantStringTypeDecl = cast<TypedefDecl>(TD->getDecl()); 7370 const auto *TagType = 7371 CFConstantStringTypeDecl->getUnderlyingType()->castAs<RecordType>(); 7372 CFConstantStringTagDecl = TagType->getDecl(); 7373 } 7374 7375 QualType ASTContext::getBlockDescriptorType() const { 7376 if (BlockDescriptorType) 7377 return getTagDeclType(BlockDescriptorType); 7378 7379 RecordDecl *RD; 7380 // FIXME: Needs the FlagAppleBlock bit. 7381 RD = buildImplicitRecord("__block_descriptor"); 7382 RD->startDefinition(); 7383 7384 QualType FieldTypes[] = { 7385 UnsignedLongTy, 7386 UnsignedLongTy, 7387 }; 7388 7389 static const char *const FieldNames[] = { 7390 "reserved", 7391 "Size" 7392 }; 7393 7394 for (size_t i = 0; i < 2; ++i) { 7395 FieldDecl *Field = FieldDecl::Create( 7396 *this, RD, SourceLocation(), SourceLocation(), 7397 &Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr, 7398 /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit); 7399 Field->setAccess(AS_public); 7400 RD->addDecl(Field); 7401 } 7402 7403 RD->completeDefinition(); 7404 7405 BlockDescriptorType = RD; 7406 7407 return getTagDeclType(BlockDescriptorType); 7408 } 7409 7410 QualType ASTContext::getBlockDescriptorExtendedType() const { 7411 if (BlockDescriptorExtendedType) 7412 return getTagDeclType(BlockDescriptorExtendedType); 7413 7414 RecordDecl *RD; 7415 // FIXME: Needs the FlagAppleBlock bit. 7416 RD = buildImplicitRecord("__block_descriptor_withcopydispose"); 7417 RD->startDefinition(); 7418 7419 QualType FieldTypes[] = { 7420 UnsignedLongTy, 7421 UnsignedLongTy, 7422 getPointerType(VoidPtrTy), 7423 getPointerType(VoidPtrTy) 7424 }; 7425 7426 static const char *const FieldNames[] = { 7427 "reserved", 7428 "Size", 7429 "CopyFuncPtr", 7430 "DestroyFuncPtr" 7431 }; 7432 7433 for (size_t i = 0; i < 4; ++i) { 7434 FieldDecl *Field = FieldDecl::Create( 7435 *this, RD, SourceLocation(), SourceLocation(), 7436 &Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr, 7437 /*BitWidth=*/nullptr, 7438 /*Mutable=*/false, ICIS_NoInit); 7439 Field->setAccess(AS_public); 7440 RD->addDecl(Field); 7441 } 7442 7443 RD->completeDefinition(); 7444 7445 BlockDescriptorExtendedType = RD; 7446 return getTagDeclType(BlockDescriptorExtendedType); 7447 } 7448 7449 OpenCLTypeKind ASTContext::getOpenCLTypeKind(const Type *T) const { 7450 const auto *BT = dyn_cast<BuiltinType>(T); 7451 7452 if (!BT) { 7453 if (isa<PipeType>(T)) 7454 return OCLTK_Pipe; 7455 7456 return OCLTK_Default; 7457 } 7458 7459 switch (BT->getKind()) { 7460 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 7461 case BuiltinType::Id: \ 7462 return OCLTK_Image; 7463 #include "clang/Basic/OpenCLImageTypes.def" 7464 7465 case BuiltinType::OCLClkEvent: 7466 return OCLTK_ClkEvent; 7467 7468 case BuiltinType::OCLEvent: 7469 return OCLTK_Event; 7470 7471 case BuiltinType::OCLQueue: 7472 return OCLTK_Queue; 7473 7474 case BuiltinType::OCLReserveID: 7475 return OCLTK_ReserveID; 7476 7477 case BuiltinType::OCLSampler: 7478 return OCLTK_Sampler; 7479 7480 default: 7481 return OCLTK_Default; 7482 } 7483 } 7484 7485 LangAS ASTContext::getOpenCLTypeAddrSpace(const Type *T) const { 7486 return Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T)); 7487 } 7488 7489 /// BlockRequiresCopying - Returns true if byref variable "D" of type "Ty" 7490 /// requires copy/dispose. Note that this must match the logic 7491 /// in buildByrefHelpers. 7492 bool ASTContext::BlockRequiresCopying(QualType Ty, 7493 const VarDecl *D) { 7494 if (const CXXRecordDecl *record = Ty->getAsCXXRecordDecl()) { 7495 const Expr *copyExpr = getBlockVarCopyInit(D).getCopyExpr(); 7496 if (!copyExpr && record->hasTrivialDestructor()) return false; 7497 7498 return true; 7499 } 7500 7501 // The block needs copy/destroy helpers if Ty is non-trivial to destructively 7502 // move or destroy. 7503 if (Ty.isNonTrivialToPrimitiveDestructiveMove() || Ty.isDestructedType()) 7504 return true; 7505 7506 if (!Ty->isObjCRetainableType()) return false; 7507 7508 Qualifiers qs = Ty.getQualifiers(); 7509 7510 // If we have lifetime, that dominates. 7511 if (Qualifiers::ObjCLifetime lifetime = qs.getObjCLifetime()) { 7512 switch (lifetime) { 7513 case Qualifiers::OCL_None: llvm_unreachable("impossible"); 7514 7515 // These are just bits as far as the runtime is concerned. 7516 case Qualifiers::OCL_ExplicitNone: 7517 case Qualifiers::OCL_Autoreleasing: 7518 return false; 7519 7520 // These cases should have been taken care of when checking the type's 7521 // non-triviality. 7522 case Qualifiers::OCL_Weak: 7523 case Qualifiers::OCL_Strong: 7524 llvm_unreachable("impossible"); 7525 } 7526 llvm_unreachable("fell out of lifetime switch!"); 7527 } 7528 return (Ty->isBlockPointerType() || isObjCNSObjectType(Ty) || 7529 Ty->isObjCObjectPointerType()); 7530 } 7531 7532 bool ASTContext::getByrefLifetime(QualType Ty, 7533 Qualifiers::ObjCLifetime &LifeTime, 7534 bool &HasByrefExtendedLayout) const { 7535 if (!getLangOpts().ObjC || 7536 getLangOpts().getGC() != LangOptions::NonGC) 7537 return false; 7538 7539 HasByrefExtendedLayout = false; 7540 if (Ty->isRecordType()) { 7541 HasByrefExtendedLayout = true; 7542 LifeTime = Qualifiers::OCL_None; 7543 } else if ((LifeTime = Ty.getObjCLifetime())) { 7544 // Honor the ARC qualifiers. 7545 } else if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) { 7546 // The MRR rule. 7547 LifeTime = Qualifiers::OCL_ExplicitNone; 7548 } else { 7549 LifeTime = Qualifiers::OCL_None; 7550 } 7551 return true; 7552 } 7553 7554 CanQualType ASTContext::getNSUIntegerType() const { 7555 assert(Target && "Expected target to be initialized"); 7556 const llvm::Triple &T = Target->getTriple(); 7557 // Windows is LLP64 rather than LP64 7558 if (T.isOSWindows() && T.isArch64Bit()) 7559 return UnsignedLongLongTy; 7560 return UnsignedLongTy; 7561 } 7562 7563 CanQualType ASTContext::getNSIntegerType() const { 7564 assert(Target && "Expected target to be initialized"); 7565 const llvm::Triple &T = Target->getTriple(); 7566 // Windows is LLP64 rather than LP64 7567 if (T.isOSWindows() && T.isArch64Bit()) 7568 return LongLongTy; 7569 return LongTy; 7570 } 7571 7572 TypedefDecl *ASTContext::getObjCInstanceTypeDecl() { 7573 if (!ObjCInstanceTypeDecl) 7574 ObjCInstanceTypeDecl = 7575 buildImplicitTypedef(getObjCIdType(), "instancetype"); 7576 return ObjCInstanceTypeDecl; 7577 } 7578 7579 // This returns true if a type has been typedefed to BOOL: 7580 // typedef <type> BOOL; 7581 static bool isTypeTypedefedAsBOOL(QualType T) { 7582 if (const auto *TT = dyn_cast<TypedefType>(T)) 7583 if (IdentifierInfo *II = TT->getDecl()->getIdentifier()) 7584 return II->isStr("BOOL"); 7585 7586 return false; 7587 } 7588 7589 /// getObjCEncodingTypeSize returns size of type for objective-c encoding 7590 /// purpose. 7591 CharUnits ASTContext::getObjCEncodingTypeSize(QualType type) const { 7592 if (!type->isIncompleteArrayType() && type->isIncompleteType()) 7593 return CharUnits::Zero(); 7594 7595 CharUnits sz = getTypeSizeInChars(type); 7596 7597 // Make all integer and enum types at least as large as an int 7598 if (sz.isPositive() && type->isIntegralOrEnumerationType()) 7599 sz = std::max(sz, getTypeSizeInChars(IntTy)); 7600 // Treat arrays as pointers, since that's how they're passed in. 7601 else if (type->isArrayType()) 7602 sz = getTypeSizeInChars(VoidPtrTy); 7603 return sz; 7604 } 7605 7606 bool ASTContext::isMSStaticDataMemberInlineDefinition(const VarDecl *VD) const { 7607 return getTargetInfo().getCXXABI().isMicrosoft() && 7608 VD->isStaticDataMember() && 7609 VD->getType()->isIntegralOrEnumerationType() && 7610 !VD->getFirstDecl()->isOutOfLine() && VD->getFirstDecl()->hasInit(); 7611 } 7612 7613 ASTContext::InlineVariableDefinitionKind 7614 ASTContext::getInlineVariableDefinitionKind(const VarDecl *VD) const { 7615 if (!VD->isInline()) 7616 return InlineVariableDefinitionKind::None; 7617 7618 // In almost all cases, it's a weak definition. 7619 auto *First = VD->getFirstDecl(); 7620 if (First->isInlineSpecified() || !First->isStaticDataMember()) 7621 return InlineVariableDefinitionKind::Weak; 7622 7623 // If there's a file-context declaration in this translation unit, it's a 7624 // non-discardable definition. 7625 for (auto *D : VD->redecls()) 7626 if (D->getLexicalDeclContext()->isFileContext() && 7627 !D->isInlineSpecified() && (D->isConstexpr() || First->isConstexpr())) 7628 return InlineVariableDefinitionKind::Strong; 7629 7630 // If we've not seen one yet, we don't know. 7631 return InlineVariableDefinitionKind::WeakUnknown; 7632 } 7633 7634 static std::string charUnitsToString(const CharUnits &CU) { 7635 return llvm::itostr(CU.getQuantity()); 7636 } 7637 7638 /// getObjCEncodingForBlock - Return the encoded type for this block 7639 /// declaration. 7640 std::string ASTContext::getObjCEncodingForBlock(const BlockExpr *Expr) const { 7641 std::string S; 7642 7643 const BlockDecl *Decl = Expr->getBlockDecl(); 7644 QualType BlockTy = 7645 Expr->getType()->castAs<BlockPointerType>()->getPointeeType(); 7646 QualType BlockReturnTy = BlockTy->castAs<FunctionType>()->getReturnType(); 7647 // Encode result type. 7648 if (getLangOpts().EncodeExtendedBlockSig) 7649 getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None, BlockReturnTy, S, 7650 true /*Extended*/); 7651 else 7652 getObjCEncodingForType(BlockReturnTy, S); 7653 // Compute size of all parameters. 7654 // Start with computing size of a pointer in number of bytes. 7655 // FIXME: There might(should) be a better way of doing this computation! 7656 CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy); 7657 CharUnits ParmOffset = PtrSize; 7658 for (auto *PI : Decl->parameters()) { 7659 QualType PType = PI->getType(); 7660 CharUnits sz = getObjCEncodingTypeSize(PType); 7661 if (sz.isZero()) 7662 continue; 7663 assert(sz.isPositive() && "BlockExpr - Incomplete param type"); 7664 ParmOffset += sz; 7665 } 7666 // Size of the argument frame 7667 S += charUnitsToString(ParmOffset); 7668 // Block pointer and offset. 7669 S += "@?0"; 7670 7671 // Argument types. 7672 ParmOffset = PtrSize; 7673 for (auto *PVDecl : Decl->parameters()) { 7674 QualType PType = PVDecl->getOriginalType(); 7675 if (const auto *AT = 7676 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 7677 // Use array's original type only if it has known number of 7678 // elements. 7679 if (!isa<ConstantArrayType>(AT)) 7680 PType = PVDecl->getType(); 7681 } else if (PType->isFunctionType()) 7682 PType = PVDecl->getType(); 7683 if (getLangOpts().EncodeExtendedBlockSig) 7684 getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None, PType, 7685 S, true /*Extended*/); 7686 else 7687 getObjCEncodingForType(PType, S); 7688 S += charUnitsToString(ParmOffset); 7689 ParmOffset += getObjCEncodingTypeSize(PType); 7690 } 7691 7692 return S; 7693 } 7694 7695 std::string 7696 ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl) const { 7697 std::string S; 7698 // Encode result type. 7699 getObjCEncodingForType(Decl->getReturnType(), S); 7700 CharUnits ParmOffset; 7701 // Compute size of all parameters. 7702 for (auto *PI : Decl->parameters()) { 7703 QualType PType = PI->getType(); 7704 CharUnits sz = getObjCEncodingTypeSize(PType); 7705 if (sz.isZero()) 7706 continue; 7707 7708 assert(sz.isPositive() && 7709 "getObjCEncodingForFunctionDecl - Incomplete param type"); 7710 ParmOffset += sz; 7711 } 7712 S += charUnitsToString(ParmOffset); 7713 ParmOffset = CharUnits::Zero(); 7714 7715 // Argument types. 7716 for (auto *PVDecl : Decl->parameters()) { 7717 QualType PType = PVDecl->getOriginalType(); 7718 if (const auto *AT = 7719 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 7720 // Use array's original type only if it has known number of 7721 // elements. 7722 if (!isa<ConstantArrayType>(AT)) 7723 PType = PVDecl->getType(); 7724 } else if (PType->isFunctionType()) 7725 PType = PVDecl->getType(); 7726 getObjCEncodingForType(PType, S); 7727 S += charUnitsToString(ParmOffset); 7728 ParmOffset += getObjCEncodingTypeSize(PType); 7729 } 7730 7731 return S; 7732 } 7733 7734 /// getObjCEncodingForMethodParameter - Return the encoded type for a single 7735 /// method parameter or return type. If Extended, include class names and 7736 /// block object types. 7737 void ASTContext::getObjCEncodingForMethodParameter(Decl::ObjCDeclQualifier QT, 7738 QualType T, std::string& S, 7739 bool Extended) const { 7740 // Encode type qualifier, 'in', 'inout', etc. for the parameter. 7741 getObjCEncodingForTypeQualifier(QT, S); 7742 // Encode parameter type. 7743 ObjCEncOptions Options = ObjCEncOptions() 7744 .setExpandPointedToStructures() 7745 .setExpandStructures() 7746 .setIsOutermostType(); 7747 if (Extended) 7748 Options.setEncodeBlockParameters().setEncodeClassNames(); 7749 getObjCEncodingForTypeImpl(T, S, Options, /*Field=*/nullptr); 7750 } 7751 7752 /// getObjCEncodingForMethodDecl - Return the encoded type for this method 7753 /// declaration. 7754 std::string ASTContext::getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl, 7755 bool Extended) const { 7756 // FIXME: This is not very efficient. 7757 // Encode return type. 7758 std::string S; 7759 getObjCEncodingForMethodParameter(Decl->getObjCDeclQualifier(), 7760 Decl->getReturnType(), S, Extended); 7761 // Compute size of all parameters. 7762 // Start with computing size of a pointer in number of bytes. 7763 // FIXME: There might(should) be a better way of doing this computation! 7764 CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy); 7765 // The first two arguments (self and _cmd) are pointers; account for 7766 // their size. 7767 CharUnits ParmOffset = 2 * PtrSize; 7768 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(), 7769 E = Decl->sel_param_end(); PI != E; ++PI) { 7770 QualType PType = (*PI)->getType(); 7771 CharUnits sz = getObjCEncodingTypeSize(PType); 7772 if (sz.isZero()) 7773 continue; 7774 7775 assert(sz.isPositive() && 7776 "getObjCEncodingForMethodDecl - Incomplete param type"); 7777 ParmOffset += sz; 7778 } 7779 S += charUnitsToString(ParmOffset); 7780 S += "@0:"; 7781 S += charUnitsToString(PtrSize); 7782 7783 // Argument types. 7784 ParmOffset = 2 * PtrSize; 7785 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(), 7786 E = Decl->sel_param_end(); PI != E; ++PI) { 7787 const ParmVarDecl *PVDecl = *PI; 7788 QualType PType = PVDecl->getOriginalType(); 7789 if (const auto *AT = 7790 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 7791 // Use array's original type only if it has known number of 7792 // elements. 7793 if (!isa<ConstantArrayType>(AT)) 7794 PType = PVDecl->getType(); 7795 } else if (PType->isFunctionType()) 7796 PType = PVDecl->getType(); 7797 getObjCEncodingForMethodParameter(PVDecl->getObjCDeclQualifier(), 7798 PType, S, Extended); 7799 S += charUnitsToString(ParmOffset); 7800 ParmOffset += getObjCEncodingTypeSize(PType); 7801 } 7802 7803 return S; 7804 } 7805 7806 ObjCPropertyImplDecl * 7807 ASTContext::getObjCPropertyImplDeclForPropertyDecl( 7808 const ObjCPropertyDecl *PD, 7809 const Decl *Container) const { 7810 if (!Container) 7811 return nullptr; 7812 if (const auto *CID = dyn_cast<ObjCCategoryImplDecl>(Container)) { 7813 for (auto *PID : CID->property_impls()) 7814 if (PID->getPropertyDecl() == PD) 7815 return PID; 7816 } else { 7817 const auto *OID = cast<ObjCImplementationDecl>(Container); 7818 for (auto *PID : OID->property_impls()) 7819 if (PID->getPropertyDecl() == PD) 7820 return PID; 7821 } 7822 return nullptr; 7823 } 7824 7825 /// getObjCEncodingForPropertyDecl - Return the encoded type for this 7826 /// property declaration. If non-NULL, Container must be either an 7827 /// ObjCCategoryImplDecl or ObjCImplementationDecl; it should only be 7828 /// NULL when getting encodings for protocol properties. 7829 /// Property attributes are stored as a comma-delimited C string. The simple 7830 /// attributes readonly and bycopy are encoded as single characters. The 7831 /// parametrized attributes, getter=name, setter=name, and ivar=name, are 7832 /// encoded as single characters, followed by an identifier. Property types 7833 /// are also encoded as a parametrized attribute. The characters used to encode 7834 /// these attributes are defined by the following enumeration: 7835 /// @code 7836 /// enum PropertyAttributes { 7837 /// kPropertyReadOnly = 'R', // property is read-only. 7838 /// kPropertyBycopy = 'C', // property is a copy of the value last assigned 7839 /// kPropertyByref = '&', // property is a reference to the value last assigned 7840 /// kPropertyDynamic = 'D', // property is dynamic 7841 /// kPropertyGetter = 'G', // followed by getter selector name 7842 /// kPropertySetter = 'S', // followed by setter selector name 7843 /// kPropertyInstanceVariable = 'V' // followed by instance variable name 7844 /// kPropertyType = 'T' // followed by old-style type encoding. 7845 /// kPropertyWeak = 'W' // 'weak' property 7846 /// kPropertyStrong = 'P' // property GC'able 7847 /// kPropertyNonAtomic = 'N' // property non-atomic 7848 /// }; 7849 /// @endcode 7850 std::string 7851 ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD, 7852 const Decl *Container) const { 7853 // Collect information from the property implementation decl(s). 7854 bool Dynamic = false; 7855 ObjCPropertyImplDecl *SynthesizePID = nullptr; 7856 7857 if (ObjCPropertyImplDecl *PropertyImpDecl = 7858 getObjCPropertyImplDeclForPropertyDecl(PD, Container)) { 7859 if (PropertyImpDecl->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic) 7860 Dynamic = true; 7861 else 7862 SynthesizePID = PropertyImpDecl; 7863 } 7864 7865 // FIXME: This is not very efficient. 7866 std::string S = "T"; 7867 7868 // Encode result type. 7869 // GCC has some special rules regarding encoding of properties which 7870 // closely resembles encoding of ivars. 7871 getObjCEncodingForPropertyType(PD->getType(), S); 7872 7873 if (PD->isReadOnly()) { 7874 S += ",R"; 7875 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_copy) 7876 S += ",C"; 7877 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_retain) 7878 S += ",&"; 7879 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_weak) 7880 S += ",W"; 7881 } else { 7882 switch (PD->getSetterKind()) { 7883 case ObjCPropertyDecl::Assign: break; 7884 case ObjCPropertyDecl::Copy: S += ",C"; break; 7885 case ObjCPropertyDecl::Retain: S += ",&"; break; 7886 case ObjCPropertyDecl::Weak: S += ",W"; break; 7887 } 7888 } 7889 7890 // It really isn't clear at all what this means, since properties 7891 // are "dynamic by default". 7892 if (Dynamic) 7893 S += ",D"; 7894 7895 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_nonatomic) 7896 S += ",N"; 7897 7898 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_getter) { 7899 S += ",G"; 7900 S += PD->getGetterName().getAsString(); 7901 } 7902 7903 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_setter) { 7904 S += ",S"; 7905 S += PD->getSetterName().getAsString(); 7906 } 7907 7908 if (SynthesizePID) { 7909 const ObjCIvarDecl *OID = SynthesizePID->getPropertyIvarDecl(); 7910 S += ",V"; 7911 S += OID->getNameAsString(); 7912 } 7913 7914 // FIXME: OBJCGC: weak & strong 7915 return S; 7916 } 7917 7918 /// getLegacyIntegralTypeEncoding - 7919 /// Another legacy compatibility encoding: 32-bit longs are encoded as 7920 /// 'l' or 'L' , but not always. For typedefs, we need to use 7921 /// 'i' or 'I' instead if encoding a struct field, or a pointer! 7922 void ASTContext::getLegacyIntegralTypeEncoding (QualType &PointeeTy) const { 7923 if (PointeeTy->getAs<TypedefType>()) { 7924 if (const auto *BT = PointeeTy->getAs<BuiltinType>()) { 7925 if (BT->getKind() == BuiltinType::ULong && getIntWidth(PointeeTy) == 32) 7926 PointeeTy = UnsignedIntTy; 7927 else 7928 if (BT->getKind() == BuiltinType::Long && getIntWidth(PointeeTy) == 32) 7929 PointeeTy = IntTy; 7930 } 7931 } 7932 } 7933 7934 void ASTContext::getObjCEncodingForType(QualType T, std::string& S, 7935 const FieldDecl *Field, 7936 QualType *NotEncodedT) const { 7937 // We follow the behavior of gcc, expanding structures which are 7938 // directly pointed to, and expanding embedded structures. Note that 7939 // these rules are sufficient to prevent recursive encoding of the 7940 // same type. 7941 getObjCEncodingForTypeImpl(T, S, 7942 ObjCEncOptions() 7943 .setExpandPointedToStructures() 7944 .setExpandStructures() 7945 .setIsOutermostType(), 7946 Field, NotEncodedT); 7947 } 7948 7949 void ASTContext::getObjCEncodingForPropertyType(QualType T, 7950 std::string& S) const { 7951 // Encode result type. 7952 // GCC has some special rules regarding encoding of properties which 7953 // closely resembles encoding of ivars. 7954 getObjCEncodingForTypeImpl(T, S, 7955 ObjCEncOptions() 7956 .setExpandPointedToStructures() 7957 .setExpandStructures() 7958 .setIsOutermostType() 7959 .setEncodingProperty(), 7960 /*Field=*/nullptr); 7961 } 7962 7963 static char getObjCEncodingForPrimitiveType(const ASTContext *C, 7964 const BuiltinType *BT) { 7965 BuiltinType::Kind kind = BT->getKind(); 7966 switch (kind) { 7967 case BuiltinType::Void: return 'v'; 7968 case BuiltinType::Bool: return 'B'; 7969 case BuiltinType::Char8: 7970 case BuiltinType::Char_U: 7971 case BuiltinType::UChar: return 'C'; 7972 case BuiltinType::Char16: 7973 case BuiltinType::UShort: return 'S'; 7974 case BuiltinType::Char32: 7975 case BuiltinType::UInt: return 'I'; 7976 case BuiltinType::ULong: 7977 return C->getTargetInfo().getLongWidth() == 32 ? 'L' : 'Q'; 7978 case BuiltinType::UInt128: return 'T'; 7979 case BuiltinType::ULongLong: return 'Q'; 7980 case BuiltinType::Char_S: 7981 case BuiltinType::SChar: return 'c'; 7982 case BuiltinType::Short: return 's'; 7983 case BuiltinType::WChar_S: 7984 case BuiltinType::WChar_U: 7985 case BuiltinType::Int: return 'i'; 7986 case BuiltinType::Long: 7987 return C->getTargetInfo().getLongWidth() == 32 ? 'l' : 'q'; 7988 case BuiltinType::LongLong: return 'q'; 7989 case BuiltinType::Int128: return 't'; 7990 case BuiltinType::Float: return 'f'; 7991 case BuiltinType::Double: return 'd'; 7992 case BuiltinType::LongDouble: return 'D'; 7993 case BuiltinType::NullPtr: return '*'; // like char* 7994 7995 case BuiltinType::BFloat16: 7996 case BuiltinType::Float16: 7997 case BuiltinType::Float128: 7998 case BuiltinType::Ibm128: 7999 case BuiltinType::Half: 8000 case BuiltinType::ShortAccum: 8001 case BuiltinType::Accum: 8002 case BuiltinType::LongAccum: 8003 case BuiltinType::UShortAccum: 8004 case BuiltinType::UAccum: 8005 case BuiltinType::ULongAccum: 8006 case BuiltinType::ShortFract: 8007 case BuiltinType::Fract: 8008 case BuiltinType::LongFract: 8009 case BuiltinType::UShortFract: 8010 case BuiltinType::UFract: 8011 case BuiltinType::ULongFract: 8012 case BuiltinType::SatShortAccum: 8013 case BuiltinType::SatAccum: 8014 case BuiltinType::SatLongAccum: 8015 case BuiltinType::SatUShortAccum: 8016 case BuiltinType::SatUAccum: 8017 case BuiltinType::SatULongAccum: 8018 case BuiltinType::SatShortFract: 8019 case BuiltinType::SatFract: 8020 case BuiltinType::SatLongFract: 8021 case BuiltinType::SatUShortFract: 8022 case BuiltinType::SatUFract: 8023 case BuiltinType::SatULongFract: 8024 // FIXME: potentially need @encodes for these! 8025 return ' '; 8026 8027 #define SVE_TYPE(Name, Id, SingletonId) \ 8028 case BuiltinType::Id: 8029 #include "clang/Basic/AArch64SVEACLETypes.def" 8030 #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id: 8031 #include "clang/Basic/RISCVVTypes.def" 8032 { 8033 DiagnosticsEngine &Diags = C->getDiagnostics(); 8034 unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, 8035 "cannot yet @encode type %0"); 8036 Diags.Report(DiagID) << BT->getName(C->getPrintingPolicy()); 8037 return ' '; 8038 } 8039 8040 case BuiltinType::ObjCId: 8041 case BuiltinType::ObjCClass: 8042 case BuiltinType::ObjCSel: 8043 llvm_unreachable("@encoding ObjC primitive type"); 8044 8045 // OpenCL and placeholder types don't need @encodings. 8046 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 8047 case BuiltinType::Id: 8048 #include "clang/Basic/OpenCLImageTypes.def" 8049 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 8050 case BuiltinType::Id: 8051 #include "clang/Basic/OpenCLExtensionTypes.def" 8052 case BuiltinType::OCLEvent: 8053 case BuiltinType::OCLClkEvent: 8054 case BuiltinType::OCLQueue: 8055 case BuiltinType::OCLReserveID: 8056 case BuiltinType::OCLSampler: 8057 case BuiltinType::Dependent: 8058 #define PPC_VECTOR_TYPE(Name, Id, Size) \ 8059 case BuiltinType::Id: 8060 #include "clang/Basic/PPCTypes.def" 8061 #define BUILTIN_TYPE(KIND, ID) 8062 #define PLACEHOLDER_TYPE(KIND, ID) \ 8063 case BuiltinType::KIND: 8064 #include "clang/AST/BuiltinTypes.def" 8065 llvm_unreachable("invalid builtin type for @encode"); 8066 } 8067 llvm_unreachable("invalid BuiltinType::Kind value"); 8068 } 8069 8070 static char ObjCEncodingForEnumType(const ASTContext *C, const EnumType *ET) { 8071 EnumDecl *Enum = ET->getDecl(); 8072 8073 // The encoding of an non-fixed enum type is always 'i', regardless of size. 8074 if (!Enum->isFixed()) 8075 return 'i'; 8076 8077 // The encoding of a fixed enum type matches its fixed underlying type. 8078 const auto *BT = Enum->getIntegerType()->castAs<BuiltinType>(); 8079 return getObjCEncodingForPrimitiveType(C, BT); 8080 } 8081 8082 static void EncodeBitField(const ASTContext *Ctx, std::string& S, 8083 QualType T, const FieldDecl *FD) { 8084 assert(FD->isBitField() && "not a bitfield - getObjCEncodingForTypeImpl"); 8085 S += 'b'; 8086 // The NeXT runtime encodes bit fields as b followed by the number of bits. 8087 // The GNU runtime requires more information; bitfields are encoded as b, 8088 // then the offset (in bits) of the first element, then the type of the 8089 // bitfield, then the size in bits. For example, in this structure: 8090 // 8091 // struct 8092 // { 8093 // int integer; 8094 // int flags:2; 8095 // }; 8096 // On a 32-bit system, the encoding for flags would be b2 for the NeXT 8097 // runtime, but b32i2 for the GNU runtime. The reason for this extra 8098 // information is not especially sensible, but we're stuck with it for 8099 // compatibility with GCC, although providing it breaks anything that 8100 // actually uses runtime introspection and wants to work on both runtimes... 8101 if (Ctx->getLangOpts().ObjCRuntime.isGNUFamily()) { 8102 uint64_t Offset; 8103 8104 if (const auto *IVD = dyn_cast<ObjCIvarDecl>(FD)) { 8105 Offset = Ctx->lookupFieldBitOffset(IVD->getContainingInterface(), nullptr, 8106 IVD); 8107 } else { 8108 const RecordDecl *RD = FD->getParent(); 8109 const ASTRecordLayout &RL = Ctx->getASTRecordLayout(RD); 8110 Offset = RL.getFieldOffset(FD->getFieldIndex()); 8111 } 8112 8113 S += llvm::utostr(Offset); 8114 8115 if (const auto *ET = T->getAs<EnumType>()) 8116 S += ObjCEncodingForEnumType(Ctx, ET); 8117 else { 8118 const auto *BT = T->castAs<BuiltinType>(); 8119 S += getObjCEncodingForPrimitiveType(Ctx, BT); 8120 } 8121 } 8122 S += llvm::utostr(FD->getBitWidthValue(*Ctx)); 8123 } 8124 8125 // Helper function for determining whether the encoded type string would include 8126 // a template specialization type. 8127 static bool hasTemplateSpecializationInEncodedString(const Type *T, 8128 bool VisitBasesAndFields) { 8129 T = T->getBaseElementTypeUnsafe(); 8130 8131 if (auto *PT = T->getAs<PointerType>()) 8132 return hasTemplateSpecializationInEncodedString( 8133 PT->getPointeeType().getTypePtr(), false); 8134 8135 auto *CXXRD = T->getAsCXXRecordDecl(); 8136 8137 if (!CXXRD) 8138 return false; 8139 8140 if (isa<ClassTemplateSpecializationDecl>(CXXRD)) 8141 return true; 8142 8143 if (!CXXRD->hasDefinition() || !VisitBasesAndFields) 8144 return false; 8145 8146 for (auto B : CXXRD->bases()) 8147 if (hasTemplateSpecializationInEncodedString(B.getType().getTypePtr(), 8148 true)) 8149 return true; 8150 8151 for (auto *FD : CXXRD->fields()) 8152 if (hasTemplateSpecializationInEncodedString(FD->getType().getTypePtr(), 8153 true)) 8154 return true; 8155 8156 return false; 8157 } 8158 8159 // FIXME: Use SmallString for accumulating string. 8160 void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S, 8161 const ObjCEncOptions Options, 8162 const FieldDecl *FD, 8163 QualType *NotEncodedT) const { 8164 CanQualType CT = getCanonicalType(T); 8165 switch (CT->getTypeClass()) { 8166 case Type::Builtin: 8167 case Type::Enum: 8168 if (FD && FD->isBitField()) 8169 return EncodeBitField(this, S, T, FD); 8170 if (const auto *BT = dyn_cast<BuiltinType>(CT)) 8171 S += getObjCEncodingForPrimitiveType(this, BT); 8172 else 8173 S += ObjCEncodingForEnumType(this, cast<EnumType>(CT)); 8174 return; 8175 8176 case Type::Complex: 8177 S += 'j'; 8178 getObjCEncodingForTypeImpl(T->castAs<ComplexType>()->getElementType(), S, 8179 ObjCEncOptions(), 8180 /*Field=*/nullptr); 8181 return; 8182 8183 case Type::Atomic: 8184 S += 'A'; 8185 getObjCEncodingForTypeImpl(T->castAs<AtomicType>()->getValueType(), S, 8186 ObjCEncOptions(), 8187 /*Field=*/nullptr); 8188 return; 8189 8190 // encoding for pointer or reference types. 8191 case Type::Pointer: 8192 case Type::LValueReference: 8193 case Type::RValueReference: { 8194 QualType PointeeTy; 8195 if (isa<PointerType>(CT)) { 8196 const auto *PT = T->castAs<PointerType>(); 8197 if (PT->isObjCSelType()) { 8198 S += ':'; 8199 return; 8200 } 8201 PointeeTy = PT->getPointeeType(); 8202 } else { 8203 PointeeTy = T->castAs<ReferenceType>()->getPointeeType(); 8204 } 8205 8206 bool isReadOnly = false; 8207 // For historical/compatibility reasons, the read-only qualifier of the 8208 // pointee gets emitted _before_ the '^'. The read-only qualifier of 8209 // the pointer itself gets ignored, _unless_ we are looking at a typedef! 8210 // Also, do not emit the 'r' for anything but the outermost type! 8211 if (T->getAs<TypedefType>()) { 8212 if (Options.IsOutermostType() && T.isConstQualified()) { 8213 isReadOnly = true; 8214 S += 'r'; 8215 } 8216 } else if (Options.IsOutermostType()) { 8217 QualType P = PointeeTy; 8218 while (auto PT = P->getAs<PointerType>()) 8219 P = PT->getPointeeType(); 8220 if (P.isConstQualified()) { 8221 isReadOnly = true; 8222 S += 'r'; 8223 } 8224 } 8225 if (isReadOnly) { 8226 // Another legacy compatibility encoding. Some ObjC qualifier and type 8227 // combinations need to be rearranged. 8228 // Rewrite "in const" from "nr" to "rn" 8229 if (StringRef(S).endswith("nr")) 8230 S.replace(S.end()-2, S.end(), "rn"); 8231 } 8232 8233 if (PointeeTy->isCharType()) { 8234 // char pointer types should be encoded as '*' unless it is a 8235 // type that has been typedef'd to 'BOOL'. 8236 if (!isTypeTypedefedAsBOOL(PointeeTy)) { 8237 S += '*'; 8238 return; 8239 } 8240 } else if (const auto *RTy = PointeeTy->getAs<RecordType>()) { 8241 // GCC binary compat: Need to convert "struct objc_class *" to "#". 8242 if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_class")) { 8243 S += '#'; 8244 return; 8245 } 8246 // GCC binary compat: Need to convert "struct objc_object *" to "@". 8247 if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_object")) { 8248 S += '@'; 8249 return; 8250 } 8251 // If the encoded string for the class includes template names, just emit 8252 // "^v" for pointers to the class. 8253 if (getLangOpts().CPlusPlus && 8254 (!getLangOpts().EncodeCXXClassTemplateSpec && 8255 hasTemplateSpecializationInEncodedString( 8256 RTy, Options.ExpandPointedToStructures()))) { 8257 S += "^v"; 8258 return; 8259 } 8260 // fall through... 8261 } 8262 S += '^'; 8263 getLegacyIntegralTypeEncoding(PointeeTy); 8264 8265 ObjCEncOptions NewOptions; 8266 if (Options.ExpandPointedToStructures()) 8267 NewOptions.setExpandStructures(); 8268 getObjCEncodingForTypeImpl(PointeeTy, S, NewOptions, 8269 /*Field=*/nullptr, NotEncodedT); 8270 return; 8271 } 8272 8273 case Type::ConstantArray: 8274 case Type::IncompleteArray: 8275 case Type::VariableArray: { 8276 const auto *AT = cast<ArrayType>(CT); 8277 8278 if (isa<IncompleteArrayType>(AT) && !Options.IsStructField()) { 8279 // Incomplete arrays are encoded as a pointer to the array element. 8280 S += '^'; 8281 8282 getObjCEncodingForTypeImpl( 8283 AT->getElementType(), S, 8284 Options.keepingOnly(ObjCEncOptions().setExpandStructures()), FD); 8285 } else { 8286 S += '['; 8287 8288 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) 8289 S += llvm::utostr(CAT->getSize().getZExtValue()); 8290 else { 8291 //Variable length arrays are encoded as a regular array with 0 elements. 8292 assert((isa<VariableArrayType>(AT) || isa<IncompleteArrayType>(AT)) && 8293 "Unknown array type!"); 8294 S += '0'; 8295 } 8296 8297 getObjCEncodingForTypeImpl( 8298 AT->getElementType(), S, 8299 Options.keepingOnly(ObjCEncOptions().setExpandStructures()), FD, 8300 NotEncodedT); 8301 S += ']'; 8302 } 8303 return; 8304 } 8305 8306 case Type::FunctionNoProto: 8307 case Type::FunctionProto: 8308 S += '?'; 8309 return; 8310 8311 case Type::Record: { 8312 RecordDecl *RDecl = cast<RecordType>(CT)->getDecl(); 8313 S += RDecl->isUnion() ? '(' : '{'; 8314 // Anonymous structures print as '?' 8315 if (const IdentifierInfo *II = RDecl->getIdentifier()) { 8316 S += II->getName(); 8317 if (const auto *Spec = dyn_cast<ClassTemplateSpecializationDecl>(RDecl)) { 8318 const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs(); 8319 llvm::raw_string_ostream OS(S); 8320 printTemplateArgumentList(OS, TemplateArgs.asArray(), 8321 getPrintingPolicy()); 8322 } 8323 } else { 8324 S += '?'; 8325 } 8326 if (Options.ExpandStructures()) { 8327 S += '='; 8328 if (!RDecl->isUnion()) { 8329 getObjCEncodingForStructureImpl(RDecl, S, FD, true, NotEncodedT); 8330 } else { 8331 for (const auto *Field : RDecl->fields()) { 8332 if (FD) { 8333 S += '"'; 8334 S += Field->getNameAsString(); 8335 S += '"'; 8336 } 8337 8338 // Special case bit-fields. 8339 if (Field->isBitField()) { 8340 getObjCEncodingForTypeImpl(Field->getType(), S, 8341 ObjCEncOptions().setExpandStructures(), 8342 Field); 8343 } else { 8344 QualType qt = Field->getType(); 8345 getLegacyIntegralTypeEncoding(qt); 8346 getObjCEncodingForTypeImpl( 8347 qt, S, 8348 ObjCEncOptions().setExpandStructures().setIsStructField(), FD, 8349 NotEncodedT); 8350 } 8351 } 8352 } 8353 } 8354 S += RDecl->isUnion() ? ')' : '}'; 8355 return; 8356 } 8357 8358 case Type::BlockPointer: { 8359 const auto *BT = T->castAs<BlockPointerType>(); 8360 S += "@?"; // Unlike a pointer-to-function, which is "^?". 8361 if (Options.EncodeBlockParameters()) { 8362 const auto *FT = BT->getPointeeType()->castAs<FunctionType>(); 8363 8364 S += '<'; 8365 // Block return type 8366 getObjCEncodingForTypeImpl(FT->getReturnType(), S, 8367 Options.forComponentType(), FD, NotEncodedT); 8368 // Block self 8369 S += "@?"; 8370 // Block parameters 8371 if (const auto *FPT = dyn_cast<FunctionProtoType>(FT)) { 8372 for (const auto &I : FPT->param_types()) 8373 getObjCEncodingForTypeImpl(I, S, Options.forComponentType(), FD, 8374 NotEncodedT); 8375 } 8376 S += '>'; 8377 } 8378 return; 8379 } 8380 8381 case Type::ObjCObject: { 8382 // hack to match legacy encoding of *id and *Class 8383 QualType Ty = getObjCObjectPointerType(CT); 8384 if (Ty->isObjCIdType()) { 8385 S += "{objc_object=}"; 8386 return; 8387 } 8388 else if (Ty->isObjCClassType()) { 8389 S += "{objc_class=}"; 8390 return; 8391 } 8392 // TODO: Double check to make sure this intentionally falls through. 8393 [[fallthrough]]; 8394 } 8395 8396 case Type::ObjCInterface: { 8397 // Ignore protocol qualifiers when mangling at this level. 8398 // @encode(class_name) 8399 ObjCInterfaceDecl *OI = T->castAs<ObjCObjectType>()->getInterface(); 8400 S += '{'; 8401 S += OI->getObjCRuntimeNameAsString(); 8402 if (Options.ExpandStructures()) { 8403 S += '='; 8404 SmallVector<const ObjCIvarDecl*, 32> Ivars; 8405 DeepCollectObjCIvars(OI, true, Ivars); 8406 for (unsigned i = 0, e = Ivars.size(); i != e; ++i) { 8407 const FieldDecl *Field = Ivars[i]; 8408 if (Field->isBitField()) 8409 getObjCEncodingForTypeImpl(Field->getType(), S, 8410 ObjCEncOptions().setExpandStructures(), 8411 Field); 8412 else 8413 getObjCEncodingForTypeImpl(Field->getType(), S, 8414 ObjCEncOptions().setExpandStructures(), FD, 8415 NotEncodedT); 8416 } 8417 } 8418 S += '}'; 8419 return; 8420 } 8421 8422 case Type::ObjCObjectPointer: { 8423 const auto *OPT = T->castAs<ObjCObjectPointerType>(); 8424 if (OPT->isObjCIdType()) { 8425 S += '@'; 8426 return; 8427 } 8428 8429 if (OPT->isObjCClassType() || OPT->isObjCQualifiedClassType()) { 8430 // FIXME: Consider if we need to output qualifiers for 'Class<p>'. 8431 // Since this is a binary compatibility issue, need to consult with 8432 // runtime folks. Fortunately, this is a *very* obscure construct. 8433 S += '#'; 8434 return; 8435 } 8436 8437 if (OPT->isObjCQualifiedIdType()) { 8438 getObjCEncodingForTypeImpl( 8439 getObjCIdType(), S, 8440 Options.keepingOnly(ObjCEncOptions() 8441 .setExpandPointedToStructures() 8442 .setExpandStructures()), 8443 FD); 8444 if (FD || Options.EncodingProperty() || Options.EncodeClassNames()) { 8445 // Note that we do extended encoding of protocol qualifier list 8446 // Only when doing ivar or property encoding. 8447 S += '"'; 8448 for (const auto *I : OPT->quals()) { 8449 S += '<'; 8450 S += I->getObjCRuntimeNameAsString(); 8451 S += '>'; 8452 } 8453 S += '"'; 8454 } 8455 return; 8456 } 8457 8458 S += '@'; 8459 if (OPT->getInterfaceDecl() && 8460 (FD || Options.EncodingProperty() || Options.EncodeClassNames())) { 8461 S += '"'; 8462 S += OPT->getInterfaceDecl()->getObjCRuntimeNameAsString(); 8463 for (const auto *I : OPT->quals()) { 8464 S += '<'; 8465 S += I->getObjCRuntimeNameAsString(); 8466 S += '>'; 8467 } 8468 S += '"'; 8469 } 8470 return; 8471 } 8472 8473 // gcc just blithely ignores member pointers. 8474 // FIXME: we should do better than that. 'M' is available. 8475 case Type::MemberPointer: 8476 // This matches gcc's encoding, even though technically it is insufficient. 8477 //FIXME. We should do a better job than gcc. 8478 case Type::Vector: 8479 case Type::ExtVector: 8480 // Until we have a coherent encoding of these three types, issue warning. 8481 if (NotEncodedT) 8482 *NotEncodedT = T; 8483 return; 8484 8485 case Type::ConstantMatrix: 8486 if (NotEncodedT) 8487 *NotEncodedT = T; 8488 return; 8489 8490 case Type::BitInt: 8491 if (NotEncodedT) 8492 *NotEncodedT = T; 8493 return; 8494 8495 // We could see an undeduced auto type here during error recovery. 8496 // Just ignore it. 8497 case Type::Auto: 8498 case Type::DeducedTemplateSpecialization: 8499 return; 8500 8501 case Type::Pipe: 8502 #define ABSTRACT_TYPE(KIND, BASE) 8503 #define TYPE(KIND, BASE) 8504 #define DEPENDENT_TYPE(KIND, BASE) \ 8505 case Type::KIND: 8506 #define NON_CANONICAL_TYPE(KIND, BASE) \ 8507 case Type::KIND: 8508 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(KIND, BASE) \ 8509 case Type::KIND: 8510 #include "clang/AST/TypeNodes.inc" 8511 llvm_unreachable("@encode for dependent type!"); 8512 } 8513 llvm_unreachable("bad type kind!"); 8514 } 8515 8516 void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl, 8517 std::string &S, 8518 const FieldDecl *FD, 8519 bool includeVBases, 8520 QualType *NotEncodedT) const { 8521 assert(RDecl && "Expected non-null RecordDecl"); 8522 assert(!RDecl->isUnion() && "Should not be called for unions"); 8523 if (!RDecl->getDefinition() || RDecl->getDefinition()->isInvalidDecl()) 8524 return; 8525 8526 const auto *CXXRec = dyn_cast<CXXRecordDecl>(RDecl); 8527 std::multimap<uint64_t, NamedDecl *> FieldOrBaseOffsets; 8528 const ASTRecordLayout &layout = getASTRecordLayout(RDecl); 8529 8530 if (CXXRec) { 8531 for (const auto &BI : CXXRec->bases()) { 8532 if (!BI.isVirtual()) { 8533 CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl(); 8534 if (base->isEmpty()) 8535 continue; 8536 uint64_t offs = toBits(layout.getBaseClassOffset(base)); 8537 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 8538 std::make_pair(offs, base)); 8539 } 8540 } 8541 } 8542 8543 unsigned i = 0; 8544 for (FieldDecl *Field : RDecl->fields()) { 8545 if (!Field->isZeroLengthBitField(*this) && Field->isZeroSize(*this)) 8546 continue; 8547 uint64_t offs = layout.getFieldOffset(i); 8548 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 8549 std::make_pair(offs, Field)); 8550 ++i; 8551 } 8552 8553 if (CXXRec && includeVBases) { 8554 for (const auto &BI : CXXRec->vbases()) { 8555 CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl(); 8556 if (base->isEmpty()) 8557 continue; 8558 uint64_t offs = toBits(layout.getVBaseClassOffset(base)); 8559 if (offs >= uint64_t(toBits(layout.getNonVirtualSize())) && 8560 FieldOrBaseOffsets.find(offs) == FieldOrBaseOffsets.end()) 8561 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.end(), 8562 std::make_pair(offs, base)); 8563 } 8564 } 8565 8566 CharUnits size; 8567 if (CXXRec) { 8568 size = includeVBases ? layout.getSize() : layout.getNonVirtualSize(); 8569 } else { 8570 size = layout.getSize(); 8571 } 8572 8573 #ifndef NDEBUG 8574 uint64_t CurOffs = 0; 8575 #endif 8576 std::multimap<uint64_t, NamedDecl *>::iterator 8577 CurLayObj = FieldOrBaseOffsets.begin(); 8578 8579 if (CXXRec && CXXRec->isDynamicClass() && 8580 (CurLayObj == FieldOrBaseOffsets.end() || CurLayObj->first != 0)) { 8581 if (FD) { 8582 S += "\"_vptr$"; 8583 std::string recname = CXXRec->getNameAsString(); 8584 if (recname.empty()) recname = "?"; 8585 S += recname; 8586 S += '"'; 8587 } 8588 S += "^^?"; 8589 #ifndef NDEBUG 8590 CurOffs += getTypeSize(VoidPtrTy); 8591 #endif 8592 } 8593 8594 if (!RDecl->hasFlexibleArrayMember()) { 8595 // Mark the end of the structure. 8596 uint64_t offs = toBits(size); 8597 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 8598 std::make_pair(offs, nullptr)); 8599 } 8600 8601 for (; CurLayObj != FieldOrBaseOffsets.end(); ++CurLayObj) { 8602 #ifndef NDEBUG 8603 assert(CurOffs <= CurLayObj->first); 8604 if (CurOffs < CurLayObj->first) { 8605 uint64_t padding = CurLayObj->first - CurOffs; 8606 // FIXME: There doesn't seem to be a way to indicate in the encoding that 8607 // packing/alignment of members is different that normal, in which case 8608 // the encoding will be out-of-sync with the real layout. 8609 // If the runtime switches to just consider the size of types without 8610 // taking into account alignment, we could make padding explicit in the 8611 // encoding (e.g. using arrays of chars). The encoding strings would be 8612 // longer then though. 8613 CurOffs += padding; 8614 } 8615 #endif 8616 8617 NamedDecl *dcl = CurLayObj->second; 8618 if (!dcl) 8619 break; // reached end of structure. 8620 8621 if (auto *base = dyn_cast<CXXRecordDecl>(dcl)) { 8622 // We expand the bases without their virtual bases since those are going 8623 // in the initial structure. Note that this differs from gcc which 8624 // expands virtual bases each time one is encountered in the hierarchy, 8625 // making the encoding type bigger than it really is. 8626 getObjCEncodingForStructureImpl(base, S, FD, /*includeVBases*/false, 8627 NotEncodedT); 8628 assert(!base->isEmpty()); 8629 #ifndef NDEBUG 8630 CurOffs += toBits(getASTRecordLayout(base).getNonVirtualSize()); 8631 #endif 8632 } else { 8633 const auto *field = cast<FieldDecl>(dcl); 8634 if (FD) { 8635 S += '"'; 8636 S += field->getNameAsString(); 8637 S += '"'; 8638 } 8639 8640 if (field->isBitField()) { 8641 EncodeBitField(this, S, field->getType(), field); 8642 #ifndef NDEBUG 8643 CurOffs += field->getBitWidthValue(*this); 8644 #endif 8645 } else { 8646 QualType qt = field->getType(); 8647 getLegacyIntegralTypeEncoding(qt); 8648 getObjCEncodingForTypeImpl( 8649 qt, S, ObjCEncOptions().setExpandStructures().setIsStructField(), 8650 FD, NotEncodedT); 8651 #ifndef NDEBUG 8652 CurOffs += getTypeSize(field->getType()); 8653 #endif 8654 } 8655 } 8656 } 8657 } 8658 8659 void ASTContext::getObjCEncodingForTypeQualifier(Decl::ObjCDeclQualifier QT, 8660 std::string& S) const { 8661 if (QT & Decl::OBJC_TQ_In) 8662 S += 'n'; 8663 if (QT & Decl::OBJC_TQ_Inout) 8664 S += 'N'; 8665 if (QT & Decl::OBJC_TQ_Out) 8666 S += 'o'; 8667 if (QT & Decl::OBJC_TQ_Bycopy) 8668 S += 'O'; 8669 if (QT & Decl::OBJC_TQ_Byref) 8670 S += 'R'; 8671 if (QT & Decl::OBJC_TQ_Oneway) 8672 S += 'V'; 8673 } 8674 8675 TypedefDecl *ASTContext::getObjCIdDecl() const { 8676 if (!ObjCIdDecl) { 8677 QualType T = getObjCObjectType(ObjCBuiltinIdTy, {}, {}); 8678 T = getObjCObjectPointerType(T); 8679 ObjCIdDecl = buildImplicitTypedef(T, "id"); 8680 } 8681 return ObjCIdDecl; 8682 } 8683 8684 TypedefDecl *ASTContext::getObjCSelDecl() const { 8685 if (!ObjCSelDecl) { 8686 QualType T = getPointerType(ObjCBuiltinSelTy); 8687 ObjCSelDecl = buildImplicitTypedef(T, "SEL"); 8688 } 8689 return ObjCSelDecl; 8690 } 8691 8692 TypedefDecl *ASTContext::getObjCClassDecl() const { 8693 if (!ObjCClassDecl) { 8694 QualType T = getObjCObjectType(ObjCBuiltinClassTy, {}, {}); 8695 T = getObjCObjectPointerType(T); 8696 ObjCClassDecl = buildImplicitTypedef(T, "Class"); 8697 } 8698 return ObjCClassDecl; 8699 } 8700 8701 ObjCInterfaceDecl *ASTContext::getObjCProtocolDecl() const { 8702 if (!ObjCProtocolClassDecl) { 8703 ObjCProtocolClassDecl 8704 = ObjCInterfaceDecl::Create(*this, getTranslationUnitDecl(), 8705 SourceLocation(), 8706 &Idents.get("Protocol"), 8707 /*typeParamList=*/nullptr, 8708 /*PrevDecl=*/nullptr, 8709 SourceLocation(), true); 8710 } 8711 8712 return ObjCProtocolClassDecl; 8713 } 8714 8715 //===----------------------------------------------------------------------===// 8716 // __builtin_va_list Construction Functions 8717 //===----------------------------------------------------------------------===// 8718 8719 static TypedefDecl *CreateCharPtrNamedVaListDecl(const ASTContext *Context, 8720 StringRef Name) { 8721 // typedef char* __builtin[_ms]_va_list; 8722 QualType T = Context->getPointerType(Context->CharTy); 8723 return Context->buildImplicitTypedef(T, Name); 8724 } 8725 8726 static TypedefDecl *CreateMSVaListDecl(const ASTContext *Context) { 8727 return CreateCharPtrNamedVaListDecl(Context, "__builtin_ms_va_list"); 8728 } 8729 8730 static TypedefDecl *CreateCharPtrBuiltinVaListDecl(const ASTContext *Context) { 8731 return CreateCharPtrNamedVaListDecl(Context, "__builtin_va_list"); 8732 } 8733 8734 static TypedefDecl *CreateVoidPtrBuiltinVaListDecl(const ASTContext *Context) { 8735 // typedef void* __builtin_va_list; 8736 QualType T = Context->getPointerType(Context->VoidTy); 8737 return Context->buildImplicitTypedef(T, "__builtin_va_list"); 8738 } 8739 8740 static TypedefDecl * 8741 CreateAArch64ABIBuiltinVaListDecl(const ASTContext *Context) { 8742 // struct __va_list 8743 RecordDecl *VaListTagDecl = Context->buildImplicitRecord("__va_list"); 8744 if (Context->getLangOpts().CPlusPlus) { 8745 // namespace std { struct __va_list { 8746 auto *NS = NamespaceDecl::Create( 8747 const_cast<ASTContext &>(*Context), Context->getTranslationUnitDecl(), 8748 /*Inline=*/false, SourceLocation(), SourceLocation(), 8749 &Context->Idents.get("std"), 8750 /*PrevDecl=*/nullptr, /*Nested=*/false); 8751 NS->setImplicit(); 8752 VaListTagDecl->setDeclContext(NS); 8753 } 8754 8755 VaListTagDecl->startDefinition(); 8756 8757 const size_t NumFields = 5; 8758 QualType FieldTypes[NumFields]; 8759 const char *FieldNames[NumFields]; 8760 8761 // void *__stack; 8762 FieldTypes[0] = Context->getPointerType(Context->VoidTy); 8763 FieldNames[0] = "__stack"; 8764 8765 // void *__gr_top; 8766 FieldTypes[1] = Context->getPointerType(Context->VoidTy); 8767 FieldNames[1] = "__gr_top"; 8768 8769 // void *__vr_top; 8770 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 8771 FieldNames[2] = "__vr_top"; 8772 8773 // int __gr_offs; 8774 FieldTypes[3] = Context->IntTy; 8775 FieldNames[3] = "__gr_offs"; 8776 8777 // int __vr_offs; 8778 FieldTypes[4] = Context->IntTy; 8779 FieldNames[4] = "__vr_offs"; 8780 8781 // Create fields 8782 for (unsigned i = 0; i < NumFields; ++i) { 8783 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8784 VaListTagDecl, 8785 SourceLocation(), 8786 SourceLocation(), 8787 &Context->Idents.get(FieldNames[i]), 8788 FieldTypes[i], /*TInfo=*/nullptr, 8789 /*BitWidth=*/nullptr, 8790 /*Mutable=*/false, 8791 ICIS_NoInit); 8792 Field->setAccess(AS_public); 8793 VaListTagDecl->addDecl(Field); 8794 } 8795 VaListTagDecl->completeDefinition(); 8796 Context->VaListTagDecl = VaListTagDecl; 8797 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8798 8799 // } __builtin_va_list; 8800 return Context->buildImplicitTypedef(VaListTagType, "__builtin_va_list"); 8801 } 8802 8803 static TypedefDecl *CreatePowerABIBuiltinVaListDecl(const ASTContext *Context) { 8804 // typedef struct __va_list_tag { 8805 RecordDecl *VaListTagDecl; 8806 8807 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 8808 VaListTagDecl->startDefinition(); 8809 8810 const size_t NumFields = 5; 8811 QualType FieldTypes[NumFields]; 8812 const char *FieldNames[NumFields]; 8813 8814 // unsigned char gpr; 8815 FieldTypes[0] = Context->UnsignedCharTy; 8816 FieldNames[0] = "gpr"; 8817 8818 // unsigned char fpr; 8819 FieldTypes[1] = Context->UnsignedCharTy; 8820 FieldNames[1] = "fpr"; 8821 8822 // unsigned short reserved; 8823 FieldTypes[2] = Context->UnsignedShortTy; 8824 FieldNames[2] = "reserved"; 8825 8826 // void* overflow_arg_area; 8827 FieldTypes[3] = Context->getPointerType(Context->VoidTy); 8828 FieldNames[3] = "overflow_arg_area"; 8829 8830 // void* reg_save_area; 8831 FieldTypes[4] = Context->getPointerType(Context->VoidTy); 8832 FieldNames[4] = "reg_save_area"; 8833 8834 // Create fields 8835 for (unsigned i = 0; i < NumFields; ++i) { 8836 FieldDecl *Field = FieldDecl::Create(*Context, VaListTagDecl, 8837 SourceLocation(), 8838 SourceLocation(), 8839 &Context->Idents.get(FieldNames[i]), 8840 FieldTypes[i], /*TInfo=*/nullptr, 8841 /*BitWidth=*/nullptr, 8842 /*Mutable=*/false, 8843 ICIS_NoInit); 8844 Field->setAccess(AS_public); 8845 VaListTagDecl->addDecl(Field); 8846 } 8847 VaListTagDecl->completeDefinition(); 8848 Context->VaListTagDecl = VaListTagDecl; 8849 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8850 8851 // } __va_list_tag; 8852 TypedefDecl *VaListTagTypedefDecl = 8853 Context->buildImplicitTypedef(VaListTagType, "__va_list_tag"); 8854 8855 QualType VaListTagTypedefType = 8856 Context->getTypedefType(VaListTagTypedefDecl); 8857 8858 // typedef __va_list_tag __builtin_va_list[1]; 8859 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 8860 QualType VaListTagArrayType 8861 = Context->getConstantArrayType(VaListTagTypedefType, 8862 Size, nullptr, ArrayType::Normal, 0); 8863 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 8864 } 8865 8866 static TypedefDecl * 8867 CreateX86_64ABIBuiltinVaListDecl(const ASTContext *Context) { 8868 // struct __va_list_tag { 8869 RecordDecl *VaListTagDecl; 8870 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 8871 VaListTagDecl->startDefinition(); 8872 8873 const size_t NumFields = 4; 8874 QualType FieldTypes[NumFields]; 8875 const char *FieldNames[NumFields]; 8876 8877 // unsigned gp_offset; 8878 FieldTypes[0] = Context->UnsignedIntTy; 8879 FieldNames[0] = "gp_offset"; 8880 8881 // unsigned fp_offset; 8882 FieldTypes[1] = Context->UnsignedIntTy; 8883 FieldNames[1] = "fp_offset"; 8884 8885 // void* overflow_arg_area; 8886 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 8887 FieldNames[2] = "overflow_arg_area"; 8888 8889 // void* reg_save_area; 8890 FieldTypes[3] = Context->getPointerType(Context->VoidTy); 8891 FieldNames[3] = "reg_save_area"; 8892 8893 // Create fields 8894 for (unsigned i = 0; i < NumFields; ++i) { 8895 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8896 VaListTagDecl, 8897 SourceLocation(), 8898 SourceLocation(), 8899 &Context->Idents.get(FieldNames[i]), 8900 FieldTypes[i], /*TInfo=*/nullptr, 8901 /*BitWidth=*/nullptr, 8902 /*Mutable=*/false, 8903 ICIS_NoInit); 8904 Field->setAccess(AS_public); 8905 VaListTagDecl->addDecl(Field); 8906 } 8907 VaListTagDecl->completeDefinition(); 8908 Context->VaListTagDecl = VaListTagDecl; 8909 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8910 8911 // }; 8912 8913 // typedef struct __va_list_tag __builtin_va_list[1]; 8914 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 8915 QualType VaListTagArrayType = Context->getConstantArrayType( 8916 VaListTagType, Size, nullptr, ArrayType::Normal, 0); 8917 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 8918 } 8919 8920 static TypedefDecl *CreatePNaClABIBuiltinVaListDecl(const ASTContext *Context) { 8921 // typedef int __builtin_va_list[4]; 8922 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 4); 8923 QualType IntArrayType = Context->getConstantArrayType( 8924 Context->IntTy, Size, nullptr, ArrayType::Normal, 0); 8925 return Context->buildImplicitTypedef(IntArrayType, "__builtin_va_list"); 8926 } 8927 8928 static TypedefDecl * 8929 CreateAAPCSABIBuiltinVaListDecl(const ASTContext *Context) { 8930 // struct __va_list 8931 RecordDecl *VaListDecl = Context->buildImplicitRecord("__va_list"); 8932 if (Context->getLangOpts().CPlusPlus) { 8933 // namespace std { struct __va_list { 8934 NamespaceDecl *NS; 8935 NS = NamespaceDecl::Create(const_cast<ASTContext &>(*Context), 8936 Context->getTranslationUnitDecl(), 8937 /*Inline=*/false, SourceLocation(), 8938 SourceLocation(), &Context->Idents.get("std"), 8939 /*PrevDecl=*/nullptr, /*Nested=*/false); 8940 NS->setImplicit(); 8941 VaListDecl->setDeclContext(NS); 8942 } 8943 8944 VaListDecl->startDefinition(); 8945 8946 // void * __ap; 8947 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8948 VaListDecl, 8949 SourceLocation(), 8950 SourceLocation(), 8951 &Context->Idents.get("__ap"), 8952 Context->getPointerType(Context->VoidTy), 8953 /*TInfo=*/nullptr, 8954 /*BitWidth=*/nullptr, 8955 /*Mutable=*/false, 8956 ICIS_NoInit); 8957 Field->setAccess(AS_public); 8958 VaListDecl->addDecl(Field); 8959 8960 // }; 8961 VaListDecl->completeDefinition(); 8962 Context->VaListTagDecl = VaListDecl; 8963 8964 // typedef struct __va_list __builtin_va_list; 8965 QualType T = Context->getRecordType(VaListDecl); 8966 return Context->buildImplicitTypedef(T, "__builtin_va_list"); 8967 } 8968 8969 static TypedefDecl * 8970 CreateSystemZBuiltinVaListDecl(const ASTContext *Context) { 8971 // struct __va_list_tag { 8972 RecordDecl *VaListTagDecl; 8973 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 8974 VaListTagDecl->startDefinition(); 8975 8976 const size_t NumFields = 4; 8977 QualType FieldTypes[NumFields]; 8978 const char *FieldNames[NumFields]; 8979 8980 // long __gpr; 8981 FieldTypes[0] = Context->LongTy; 8982 FieldNames[0] = "__gpr"; 8983 8984 // long __fpr; 8985 FieldTypes[1] = Context->LongTy; 8986 FieldNames[1] = "__fpr"; 8987 8988 // void *__overflow_arg_area; 8989 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 8990 FieldNames[2] = "__overflow_arg_area"; 8991 8992 // void *__reg_save_area; 8993 FieldTypes[3] = Context->getPointerType(Context->VoidTy); 8994 FieldNames[3] = "__reg_save_area"; 8995 8996 // Create fields 8997 for (unsigned i = 0; i < NumFields; ++i) { 8998 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8999 VaListTagDecl, 9000 SourceLocation(), 9001 SourceLocation(), 9002 &Context->Idents.get(FieldNames[i]), 9003 FieldTypes[i], /*TInfo=*/nullptr, 9004 /*BitWidth=*/nullptr, 9005 /*Mutable=*/false, 9006 ICIS_NoInit); 9007 Field->setAccess(AS_public); 9008 VaListTagDecl->addDecl(Field); 9009 } 9010 VaListTagDecl->completeDefinition(); 9011 Context->VaListTagDecl = VaListTagDecl; 9012 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 9013 9014 // }; 9015 9016 // typedef __va_list_tag __builtin_va_list[1]; 9017 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 9018 QualType VaListTagArrayType = Context->getConstantArrayType( 9019 VaListTagType, Size, nullptr, ArrayType::Normal, 0); 9020 9021 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 9022 } 9023 9024 static TypedefDecl *CreateHexagonBuiltinVaListDecl(const ASTContext *Context) { 9025 // typedef struct __va_list_tag { 9026 RecordDecl *VaListTagDecl; 9027 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 9028 VaListTagDecl->startDefinition(); 9029 9030 const size_t NumFields = 3; 9031 QualType FieldTypes[NumFields]; 9032 const char *FieldNames[NumFields]; 9033 9034 // void *CurrentSavedRegisterArea; 9035 FieldTypes[0] = Context->getPointerType(Context->VoidTy); 9036 FieldNames[0] = "__current_saved_reg_area_pointer"; 9037 9038 // void *SavedRegAreaEnd; 9039 FieldTypes[1] = Context->getPointerType(Context->VoidTy); 9040 FieldNames[1] = "__saved_reg_area_end_pointer"; 9041 9042 // void *OverflowArea; 9043 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 9044 FieldNames[2] = "__overflow_area_pointer"; 9045 9046 // Create fields 9047 for (unsigned i = 0; i < NumFields; ++i) { 9048 FieldDecl *Field = FieldDecl::Create( 9049 const_cast<ASTContext &>(*Context), VaListTagDecl, SourceLocation(), 9050 SourceLocation(), &Context->Idents.get(FieldNames[i]), FieldTypes[i], 9051 /*TInfo=*/nullptr, 9052 /*BitWidth=*/nullptr, 9053 /*Mutable=*/false, ICIS_NoInit); 9054 Field->setAccess(AS_public); 9055 VaListTagDecl->addDecl(Field); 9056 } 9057 VaListTagDecl->completeDefinition(); 9058 Context->VaListTagDecl = VaListTagDecl; 9059 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 9060 9061 // } __va_list_tag; 9062 TypedefDecl *VaListTagTypedefDecl = 9063 Context->buildImplicitTypedef(VaListTagType, "__va_list_tag"); 9064 9065 QualType VaListTagTypedefType = Context->getTypedefType(VaListTagTypedefDecl); 9066 9067 // typedef __va_list_tag __builtin_va_list[1]; 9068 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 9069 QualType VaListTagArrayType = Context->getConstantArrayType( 9070 VaListTagTypedefType, Size, nullptr, ArrayType::Normal, 0); 9071 9072 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 9073 } 9074 9075 static TypedefDecl *CreateVaListDecl(const ASTContext *Context, 9076 TargetInfo::BuiltinVaListKind Kind) { 9077 switch (Kind) { 9078 case TargetInfo::CharPtrBuiltinVaList: 9079 return CreateCharPtrBuiltinVaListDecl(Context); 9080 case TargetInfo::VoidPtrBuiltinVaList: 9081 return CreateVoidPtrBuiltinVaListDecl(Context); 9082 case TargetInfo::AArch64ABIBuiltinVaList: 9083 return CreateAArch64ABIBuiltinVaListDecl(Context); 9084 case TargetInfo::PowerABIBuiltinVaList: 9085 return CreatePowerABIBuiltinVaListDecl(Context); 9086 case TargetInfo::X86_64ABIBuiltinVaList: 9087 return CreateX86_64ABIBuiltinVaListDecl(Context); 9088 case TargetInfo::PNaClABIBuiltinVaList: 9089 return CreatePNaClABIBuiltinVaListDecl(Context); 9090 case TargetInfo::AAPCSABIBuiltinVaList: 9091 return CreateAAPCSABIBuiltinVaListDecl(Context); 9092 case TargetInfo::SystemZBuiltinVaList: 9093 return CreateSystemZBuiltinVaListDecl(Context); 9094 case TargetInfo::HexagonBuiltinVaList: 9095 return CreateHexagonBuiltinVaListDecl(Context); 9096 } 9097 9098 llvm_unreachable("Unhandled __builtin_va_list type kind"); 9099 } 9100 9101 TypedefDecl *ASTContext::getBuiltinVaListDecl() const { 9102 if (!BuiltinVaListDecl) { 9103 BuiltinVaListDecl = CreateVaListDecl(this, Target->getBuiltinVaListKind()); 9104 assert(BuiltinVaListDecl->isImplicit()); 9105 } 9106 9107 return BuiltinVaListDecl; 9108 } 9109 9110 Decl *ASTContext::getVaListTagDecl() const { 9111 // Force the creation of VaListTagDecl by building the __builtin_va_list 9112 // declaration. 9113 if (!VaListTagDecl) 9114 (void)getBuiltinVaListDecl(); 9115 9116 return VaListTagDecl; 9117 } 9118 9119 TypedefDecl *ASTContext::getBuiltinMSVaListDecl() const { 9120 if (!BuiltinMSVaListDecl) 9121 BuiltinMSVaListDecl = CreateMSVaListDecl(this); 9122 9123 return BuiltinMSVaListDecl; 9124 } 9125 9126 bool ASTContext::canBuiltinBeRedeclared(const FunctionDecl *FD) const { 9127 // Allow redecl custom type checking builtin for HLSL. 9128 if (LangOpts.HLSL && FD->getBuiltinID() != Builtin::NotBuiltin && 9129 BuiltinInfo.hasCustomTypechecking(FD->getBuiltinID())) 9130 return true; 9131 return BuiltinInfo.canBeRedeclared(FD->getBuiltinID()); 9132 } 9133 9134 void ASTContext::setObjCConstantStringInterface(ObjCInterfaceDecl *Decl) { 9135 assert(ObjCConstantStringType.isNull() && 9136 "'NSConstantString' type already set!"); 9137 9138 ObjCConstantStringType = getObjCInterfaceType(Decl); 9139 } 9140 9141 /// Retrieve the template name that corresponds to a non-empty 9142 /// lookup. 9143 TemplateName 9144 ASTContext::getOverloadedTemplateName(UnresolvedSetIterator Begin, 9145 UnresolvedSetIterator End) const { 9146 unsigned size = End - Begin; 9147 assert(size > 1 && "set is not overloaded!"); 9148 9149 void *memory = Allocate(sizeof(OverloadedTemplateStorage) + 9150 size * sizeof(FunctionTemplateDecl*)); 9151 auto *OT = new (memory) OverloadedTemplateStorage(size); 9152 9153 NamedDecl **Storage = OT->getStorage(); 9154 for (UnresolvedSetIterator I = Begin; I != End; ++I) { 9155 NamedDecl *D = *I; 9156 assert(isa<FunctionTemplateDecl>(D) || 9157 isa<UnresolvedUsingValueDecl>(D) || 9158 (isa<UsingShadowDecl>(D) && 9159 isa<FunctionTemplateDecl>(D->getUnderlyingDecl()))); 9160 *Storage++ = D; 9161 } 9162 9163 return TemplateName(OT); 9164 } 9165 9166 /// Retrieve a template name representing an unqualified-id that has been 9167 /// assumed to name a template for ADL purposes. 9168 TemplateName ASTContext::getAssumedTemplateName(DeclarationName Name) const { 9169 auto *OT = new (*this) AssumedTemplateStorage(Name); 9170 return TemplateName(OT); 9171 } 9172 9173 /// Retrieve the template name that represents a qualified 9174 /// template name such as \c std::vector. 9175 TemplateName ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS, 9176 bool TemplateKeyword, 9177 TemplateName Template) const { 9178 assert(NNS && "Missing nested-name-specifier in qualified template name"); 9179 9180 // FIXME: Canonicalization? 9181 llvm::FoldingSetNodeID ID; 9182 QualifiedTemplateName::Profile(ID, NNS, TemplateKeyword, Template); 9183 9184 void *InsertPos = nullptr; 9185 QualifiedTemplateName *QTN = 9186 QualifiedTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9187 if (!QTN) { 9188 QTN = new (*this, alignof(QualifiedTemplateName)) 9189 QualifiedTemplateName(NNS, TemplateKeyword, Template); 9190 QualifiedTemplateNames.InsertNode(QTN, InsertPos); 9191 } 9192 9193 return TemplateName(QTN); 9194 } 9195 9196 /// Retrieve the template name that represents a dependent 9197 /// template name such as \c MetaFun::template apply. 9198 TemplateName 9199 ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS, 9200 const IdentifierInfo *Name) const { 9201 assert((!NNS || NNS->isDependent()) && 9202 "Nested name specifier must be dependent"); 9203 9204 llvm::FoldingSetNodeID ID; 9205 DependentTemplateName::Profile(ID, NNS, Name); 9206 9207 void *InsertPos = nullptr; 9208 DependentTemplateName *QTN = 9209 DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9210 9211 if (QTN) 9212 return TemplateName(QTN); 9213 9214 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 9215 if (CanonNNS == NNS) { 9216 QTN = new (*this, alignof(DependentTemplateName)) 9217 DependentTemplateName(NNS, Name); 9218 } else { 9219 TemplateName Canon = getDependentTemplateName(CanonNNS, Name); 9220 QTN = new (*this, alignof(DependentTemplateName)) 9221 DependentTemplateName(NNS, Name, Canon); 9222 DependentTemplateName *CheckQTN = 9223 DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9224 assert(!CheckQTN && "Dependent type name canonicalization broken"); 9225 (void)CheckQTN; 9226 } 9227 9228 DependentTemplateNames.InsertNode(QTN, InsertPos); 9229 return TemplateName(QTN); 9230 } 9231 9232 /// Retrieve the template name that represents a dependent 9233 /// template name such as \c MetaFun::template operator+. 9234 TemplateName 9235 ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS, 9236 OverloadedOperatorKind Operator) const { 9237 assert((!NNS || NNS->isDependent()) && 9238 "Nested name specifier must be dependent"); 9239 9240 llvm::FoldingSetNodeID ID; 9241 DependentTemplateName::Profile(ID, NNS, Operator); 9242 9243 void *InsertPos = nullptr; 9244 DependentTemplateName *QTN 9245 = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9246 9247 if (QTN) 9248 return TemplateName(QTN); 9249 9250 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 9251 if (CanonNNS == NNS) { 9252 QTN = new (*this, alignof(DependentTemplateName)) 9253 DependentTemplateName(NNS, Operator); 9254 } else { 9255 TemplateName Canon = getDependentTemplateName(CanonNNS, Operator); 9256 QTN = new (*this, alignof(DependentTemplateName)) 9257 DependentTemplateName(NNS, Operator, Canon); 9258 9259 DependentTemplateName *CheckQTN 9260 = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9261 assert(!CheckQTN && "Dependent template name canonicalization broken"); 9262 (void)CheckQTN; 9263 } 9264 9265 DependentTemplateNames.InsertNode(QTN, InsertPos); 9266 return TemplateName(QTN); 9267 } 9268 9269 TemplateName ASTContext::getSubstTemplateTemplateParm( 9270 TemplateName Replacement, Decl *AssociatedDecl, unsigned Index, 9271 std::optional<unsigned> PackIndex) const { 9272 llvm::FoldingSetNodeID ID; 9273 SubstTemplateTemplateParmStorage::Profile(ID, Replacement, AssociatedDecl, 9274 Index, PackIndex); 9275 9276 void *insertPos = nullptr; 9277 SubstTemplateTemplateParmStorage *subst 9278 = SubstTemplateTemplateParms.FindNodeOrInsertPos(ID, insertPos); 9279 9280 if (!subst) { 9281 subst = new (*this) SubstTemplateTemplateParmStorage( 9282 Replacement, AssociatedDecl, Index, PackIndex); 9283 SubstTemplateTemplateParms.InsertNode(subst, insertPos); 9284 } 9285 9286 return TemplateName(subst); 9287 } 9288 9289 TemplateName 9290 ASTContext::getSubstTemplateTemplateParmPack(const TemplateArgument &ArgPack, 9291 Decl *AssociatedDecl, 9292 unsigned Index, bool Final) const { 9293 auto &Self = const_cast<ASTContext &>(*this); 9294 llvm::FoldingSetNodeID ID; 9295 SubstTemplateTemplateParmPackStorage::Profile(ID, Self, ArgPack, 9296 AssociatedDecl, Index, Final); 9297 9298 void *InsertPos = nullptr; 9299 SubstTemplateTemplateParmPackStorage *Subst 9300 = SubstTemplateTemplateParmPacks.FindNodeOrInsertPos(ID, InsertPos); 9301 9302 if (!Subst) { 9303 Subst = new (*this) SubstTemplateTemplateParmPackStorage( 9304 ArgPack.pack_elements(), AssociatedDecl, Index, Final); 9305 SubstTemplateTemplateParmPacks.InsertNode(Subst, InsertPos); 9306 } 9307 9308 return TemplateName(Subst); 9309 } 9310 9311 /// getFromTargetType - Given one of the integer types provided by 9312 /// TargetInfo, produce the corresponding type. The unsigned @p Type 9313 /// is actually a value of type @c TargetInfo::IntType. 9314 CanQualType ASTContext::getFromTargetType(unsigned Type) const { 9315 switch (Type) { 9316 case TargetInfo::NoInt: return {}; 9317 case TargetInfo::SignedChar: return SignedCharTy; 9318 case TargetInfo::UnsignedChar: return UnsignedCharTy; 9319 case TargetInfo::SignedShort: return ShortTy; 9320 case TargetInfo::UnsignedShort: return UnsignedShortTy; 9321 case TargetInfo::SignedInt: return IntTy; 9322 case TargetInfo::UnsignedInt: return UnsignedIntTy; 9323 case TargetInfo::SignedLong: return LongTy; 9324 case TargetInfo::UnsignedLong: return UnsignedLongTy; 9325 case TargetInfo::SignedLongLong: return LongLongTy; 9326 case TargetInfo::UnsignedLongLong: return UnsignedLongLongTy; 9327 } 9328 9329 llvm_unreachable("Unhandled TargetInfo::IntType value"); 9330 } 9331 9332 //===----------------------------------------------------------------------===// 9333 // Type Predicates. 9334 //===----------------------------------------------------------------------===// 9335 9336 /// getObjCGCAttr - Returns one of GCNone, Weak or Strong objc's 9337 /// garbage collection attribute. 9338 /// 9339 Qualifiers::GC ASTContext::getObjCGCAttrKind(QualType Ty) const { 9340 if (getLangOpts().getGC() == LangOptions::NonGC) 9341 return Qualifiers::GCNone; 9342 9343 assert(getLangOpts().ObjC); 9344 Qualifiers::GC GCAttrs = Ty.getObjCGCAttr(); 9345 9346 // Default behaviour under objective-C's gc is for ObjC pointers 9347 // (or pointers to them) be treated as though they were declared 9348 // as __strong. 9349 if (GCAttrs == Qualifiers::GCNone) { 9350 if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) 9351 return Qualifiers::Strong; 9352 else if (Ty->isPointerType()) 9353 return getObjCGCAttrKind(Ty->castAs<PointerType>()->getPointeeType()); 9354 } else { 9355 // It's not valid to set GC attributes on anything that isn't a 9356 // pointer. 9357 #ifndef NDEBUG 9358 QualType CT = Ty->getCanonicalTypeInternal(); 9359 while (const auto *AT = dyn_cast<ArrayType>(CT)) 9360 CT = AT->getElementType(); 9361 assert(CT->isAnyPointerType() || CT->isBlockPointerType()); 9362 #endif 9363 } 9364 return GCAttrs; 9365 } 9366 9367 //===----------------------------------------------------------------------===// 9368 // Type Compatibility Testing 9369 //===----------------------------------------------------------------------===// 9370 9371 /// areCompatVectorTypes - Return true if the two specified vector types are 9372 /// compatible. 9373 static bool areCompatVectorTypes(const VectorType *LHS, 9374 const VectorType *RHS) { 9375 assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified()); 9376 return LHS->getElementType() == RHS->getElementType() && 9377 LHS->getNumElements() == RHS->getNumElements(); 9378 } 9379 9380 /// areCompatMatrixTypes - Return true if the two specified matrix types are 9381 /// compatible. 9382 static bool areCompatMatrixTypes(const ConstantMatrixType *LHS, 9383 const ConstantMatrixType *RHS) { 9384 assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified()); 9385 return LHS->getElementType() == RHS->getElementType() && 9386 LHS->getNumRows() == RHS->getNumRows() && 9387 LHS->getNumColumns() == RHS->getNumColumns(); 9388 } 9389 9390 bool ASTContext::areCompatibleVectorTypes(QualType FirstVec, 9391 QualType SecondVec) { 9392 assert(FirstVec->isVectorType() && "FirstVec should be a vector type"); 9393 assert(SecondVec->isVectorType() && "SecondVec should be a vector type"); 9394 9395 if (hasSameUnqualifiedType(FirstVec, SecondVec)) 9396 return true; 9397 9398 // Treat Neon vector types and most AltiVec vector types as if they are the 9399 // equivalent GCC vector types. 9400 const auto *First = FirstVec->castAs<VectorType>(); 9401 const auto *Second = SecondVec->castAs<VectorType>(); 9402 if (First->getNumElements() == Second->getNumElements() && 9403 hasSameType(First->getElementType(), Second->getElementType()) && 9404 First->getVectorKind() != VectorType::AltiVecPixel && 9405 First->getVectorKind() != VectorType::AltiVecBool && 9406 Second->getVectorKind() != VectorType::AltiVecPixel && 9407 Second->getVectorKind() != VectorType::AltiVecBool && 9408 First->getVectorKind() != VectorType::SveFixedLengthDataVector && 9409 First->getVectorKind() != VectorType::SveFixedLengthPredicateVector && 9410 Second->getVectorKind() != VectorType::SveFixedLengthDataVector && 9411 Second->getVectorKind() != VectorType::SveFixedLengthPredicateVector) 9412 return true; 9413 9414 return false; 9415 } 9416 9417 /// getSVETypeSize - Return SVE vector or predicate register size. 9418 static uint64_t getSVETypeSize(ASTContext &Context, const BuiltinType *Ty) { 9419 assert(Ty->isVLSTBuiltinType() && "Invalid SVE Type"); 9420 return Ty->getKind() == BuiltinType::SveBool 9421 ? (Context.getLangOpts().VScaleMin * 128) / Context.getCharWidth() 9422 : Context.getLangOpts().VScaleMin * 128; 9423 } 9424 9425 bool ASTContext::areCompatibleSveTypes(QualType FirstType, 9426 QualType SecondType) { 9427 assert(((FirstType->isSizelessBuiltinType() && SecondType->isVectorType()) || 9428 (FirstType->isVectorType() && SecondType->isSizelessBuiltinType())) && 9429 "Expected SVE builtin type and vector type!"); 9430 9431 auto IsValidCast = [this](QualType FirstType, QualType SecondType) { 9432 if (const auto *BT = FirstType->getAs<BuiltinType>()) { 9433 if (const auto *VT = SecondType->getAs<VectorType>()) { 9434 // Predicates have the same representation as uint8 so we also have to 9435 // check the kind to make these types incompatible. 9436 if (VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector) 9437 return BT->getKind() == BuiltinType::SveBool; 9438 else if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector) 9439 return VT->getElementType().getCanonicalType() == 9440 FirstType->getSveEltType(*this); 9441 else if (VT->getVectorKind() == VectorType::GenericVector) 9442 return getTypeSize(SecondType) == getSVETypeSize(*this, BT) && 9443 hasSameType(VT->getElementType(), 9444 getBuiltinVectorTypeInfo(BT).ElementType); 9445 } 9446 } 9447 return false; 9448 }; 9449 9450 return IsValidCast(FirstType, SecondType) || 9451 IsValidCast(SecondType, FirstType); 9452 } 9453 9454 bool ASTContext::areLaxCompatibleSveTypes(QualType FirstType, 9455 QualType SecondType) { 9456 assert(((FirstType->isSizelessBuiltinType() && SecondType->isVectorType()) || 9457 (FirstType->isVectorType() && SecondType->isSizelessBuiltinType())) && 9458 "Expected SVE builtin type and vector type!"); 9459 9460 auto IsLaxCompatible = [this](QualType FirstType, QualType SecondType) { 9461 const auto *BT = FirstType->getAs<BuiltinType>(); 9462 if (!BT) 9463 return false; 9464 9465 const auto *VecTy = SecondType->getAs<VectorType>(); 9466 if (VecTy && 9467 (VecTy->getVectorKind() == VectorType::SveFixedLengthDataVector || 9468 VecTy->getVectorKind() == VectorType::GenericVector)) { 9469 const LangOptions::LaxVectorConversionKind LVCKind = 9470 getLangOpts().getLaxVectorConversions(); 9471 9472 // Can not convert between sve predicates and sve vectors because of 9473 // different size. 9474 if (BT->getKind() == BuiltinType::SveBool && 9475 VecTy->getVectorKind() == VectorType::SveFixedLengthDataVector) 9476 return false; 9477 9478 // If __ARM_FEATURE_SVE_BITS != N do not allow GNU vector lax conversion. 9479 // "Whenever __ARM_FEATURE_SVE_BITS==N, GNUT implicitly 9480 // converts to VLAT and VLAT implicitly converts to GNUT." 9481 // ACLE Spec Version 00bet6, 3.7.3.2. Behavior common to vectors and 9482 // predicates. 9483 if (VecTy->getVectorKind() == VectorType::GenericVector && 9484 getTypeSize(SecondType) != getSVETypeSize(*this, BT)) 9485 return false; 9486 9487 // If -flax-vector-conversions=all is specified, the types are 9488 // certainly compatible. 9489 if (LVCKind == LangOptions::LaxVectorConversionKind::All) 9490 return true; 9491 9492 // If -flax-vector-conversions=integer is specified, the types are 9493 // compatible if the elements are integer types. 9494 if (LVCKind == LangOptions::LaxVectorConversionKind::Integer) 9495 return VecTy->getElementType().getCanonicalType()->isIntegerType() && 9496 FirstType->getSveEltType(*this)->isIntegerType(); 9497 } 9498 9499 return false; 9500 }; 9501 9502 return IsLaxCompatible(FirstType, SecondType) || 9503 IsLaxCompatible(SecondType, FirstType); 9504 } 9505 9506 bool ASTContext::hasDirectOwnershipQualifier(QualType Ty) const { 9507 while (true) { 9508 // __strong id 9509 if (const AttributedType *Attr = dyn_cast<AttributedType>(Ty)) { 9510 if (Attr->getAttrKind() == attr::ObjCOwnership) 9511 return true; 9512 9513 Ty = Attr->getModifiedType(); 9514 9515 // X *__strong (...) 9516 } else if (const ParenType *Paren = dyn_cast<ParenType>(Ty)) { 9517 Ty = Paren->getInnerType(); 9518 9519 // We do not want to look through typedefs, typeof(expr), 9520 // typeof(type), or any other way that the type is somehow 9521 // abstracted. 9522 } else { 9523 return false; 9524 } 9525 } 9526 } 9527 9528 //===----------------------------------------------------------------------===// 9529 // ObjCQualifiedIdTypesAreCompatible - Compatibility testing for qualified id's. 9530 //===----------------------------------------------------------------------===// 9531 9532 /// ProtocolCompatibleWithProtocol - return 'true' if 'lProto' is in the 9533 /// inheritance hierarchy of 'rProto'. 9534 bool 9535 ASTContext::ProtocolCompatibleWithProtocol(ObjCProtocolDecl *lProto, 9536 ObjCProtocolDecl *rProto) const { 9537 if (declaresSameEntity(lProto, rProto)) 9538 return true; 9539 for (auto *PI : rProto->protocols()) 9540 if (ProtocolCompatibleWithProtocol(lProto, PI)) 9541 return true; 9542 return false; 9543 } 9544 9545 /// ObjCQualifiedClassTypesAreCompatible - compare Class<pr,...> and 9546 /// Class<pr1, ...>. 9547 bool ASTContext::ObjCQualifiedClassTypesAreCompatible( 9548 const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs) { 9549 for (auto *lhsProto : lhs->quals()) { 9550 bool match = false; 9551 for (auto *rhsProto : rhs->quals()) { 9552 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto)) { 9553 match = true; 9554 break; 9555 } 9556 } 9557 if (!match) 9558 return false; 9559 } 9560 return true; 9561 } 9562 9563 /// ObjCQualifiedIdTypesAreCompatible - We know that one of lhs/rhs is an 9564 /// ObjCQualifiedIDType. 9565 bool ASTContext::ObjCQualifiedIdTypesAreCompatible( 9566 const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs, 9567 bool compare) { 9568 // Allow id<P..> and an 'id' in all cases. 9569 if (lhs->isObjCIdType() || rhs->isObjCIdType()) 9570 return true; 9571 9572 // Don't allow id<P..> to convert to Class or Class<P..> in either direction. 9573 if (lhs->isObjCClassType() || lhs->isObjCQualifiedClassType() || 9574 rhs->isObjCClassType() || rhs->isObjCQualifiedClassType()) 9575 return false; 9576 9577 if (lhs->isObjCQualifiedIdType()) { 9578 if (rhs->qual_empty()) { 9579 // If the RHS is a unqualified interface pointer "NSString*", 9580 // make sure we check the class hierarchy. 9581 if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) { 9582 for (auto *I : lhs->quals()) { 9583 // when comparing an id<P> on lhs with a static type on rhs, 9584 // see if static class implements all of id's protocols, directly or 9585 // through its super class and categories. 9586 if (!rhsID->ClassImplementsProtocol(I, true)) 9587 return false; 9588 } 9589 } 9590 // If there are no qualifiers and no interface, we have an 'id'. 9591 return true; 9592 } 9593 // Both the right and left sides have qualifiers. 9594 for (auto *lhsProto : lhs->quals()) { 9595 bool match = false; 9596 9597 // when comparing an id<P> on lhs with a static type on rhs, 9598 // see if static class implements all of id's protocols, directly or 9599 // through its super class and categories. 9600 for (auto *rhsProto : rhs->quals()) { 9601 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 9602 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 9603 match = true; 9604 break; 9605 } 9606 } 9607 // If the RHS is a qualified interface pointer "NSString<P>*", 9608 // make sure we check the class hierarchy. 9609 if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) { 9610 for (auto *I : lhs->quals()) { 9611 // when comparing an id<P> on lhs with a static type on rhs, 9612 // see if static class implements all of id's protocols, directly or 9613 // through its super class and categories. 9614 if (rhsID->ClassImplementsProtocol(I, true)) { 9615 match = true; 9616 break; 9617 } 9618 } 9619 } 9620 if (!match) 9621 return false; 9622 } 9623 9624 return true; 9625 } 9626 9627 assert(rhs->isObjCQualifiedIdType() && "One of the LHS/RHS should be id<x>"); 9628 9629 if (lhs->getInterfaceType()) { 9630 // If both the right and left sides have qualifiers. 9631 for (auto *lhsProto : lhs->quals()) { 9632 bool match = false; 9633 9634 // when comparing an id<P> on rhs with a static type on lhs, 9635 // see if static class implements all of id's protocols, directly or 9636 // through its super class and categories. 9637 // First, lhs protocols in the qualifier list must be found, direct 9638 // or indirect in rhs's qualifier list or it is a mismatch. 9639 for (auto *rhsProto : rhs->quals()) { 9640 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 9641 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 9642 match = true; 9643 break; 9644 } 9645 } 9646 if (!match) 9647 return false; 9648 } 9649 9650 // Static class's protocols, or its super class or category protocols 9651 // must be found, direct or indirect in rhs's qualifier list or it is a mismatch. 9652 if (ObjCInterfaceDecl *lhsID = lhs->getInterfaceDecl()) { 9653 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSInheritedProtocols; 9654 CollectInheritedProtocols(lhsID, LHSInheritedProtocols); 9655 // This is rather dubious but matches gcc's behavior. If lhs has 9656 // no type qualifier and its class has no static protocol(s) 9657 // assume that it is mismatch. 9658 if (LHSInheritedProtocols.empty() && lhs->qual_empty()) 9659 return false; 9660 for (auto *lhsProto : LHSInheritedProtocols) { 9661 bool match = false; 9662 for (auto *rhsProto : rhs->quals()) { 9663 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 9664 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 9665 match = true; 9666 break; 9667 } 9668 } 9669 if (!match) 9670 return false; 9671 } 9672 } 9673 return true; 9674 } 9675 return false; 9676 } 9677 9678 /// canAssignObjCInterfaces - Return true if the two interface types are 9679 /// compatible for assignment from RHS to LHS. This handles validation of any 9680 /// protocol qualifiers on the LHS or RHS. 9681 bool ASTContext::canAssignObjCInterfaces(const ObjCObjectPointerType *LHSOPT, 9682 const ObjCObjectPointerType *RHSOPT) { 9683 const ObjCObjectType* LHS = LHSOPT->getObjectType(); 9684 const ObjCObjectType* RHS = RHSOPT->getObjectType(); 9685 9686 // If either type represents the built-in 'id' type, return true. 9687 if (LHS->isObjCUnqualifiedId() || RHS->isObjCUnqualifiedId()) 9688 return true; 9689 9690 // Function object that propagates a successful result or handles 9691 // __kindof types. 9692 auto finish = [&](bool succeeded) -> bool { 9693 if (succeeded) 9694 return true; 9695 9696 if (!RHS->isKindOfType()) 9697 return false; 9698 9699 // Strip off __kindof and protocol qualifiers, then check whether 9700 // we can assign the other way. 9701 return canAssignObjCInterfaces(RHSOPT->stripObjCKindOfTypeAndQuals(*this), 9702 LHSOPT->stripObjCKindOfTypeAndQuals(*this)); 9703 }; 9704 9705 // Casts from or to id<P> are allowed when the other side has compatible 9706 // protocols. 9707 if (LHS->isObjCQualifiedId() || RHS->isObjCQualifiedId()) { 9708 return finish(ObjCQualifiedIdTypesAreCompatible(LHSOPT, RHSOPT, false)); 9709 } 9710 9711 // Verify protocol compatibility for casts from Class<P1> to Class<P2>. 9712 if (LHS->isObjCQualifiedClass() && RHS->isObjCQualifiedClass()) { 9713 return finish(ObjCQualifiedClassTypesAreCompatible(LHSOPT, RHSOPT)); 9714 } 9715 9716 // Casts from Class to Class<Foo>, or vice-versa, are allowed. 9717 if (LHS->isObjCClass() && RHS->isObjCClass()) { 9718 return true; 9719 } 9720 9721 // If we have 2 user-defined types, fall into that path. 9722 if (LHS->getInterface() && RHS->getInterface()) { 9723 return finish(canAssignObjCInterfaces(LHS, RHS)); 9724 } 9725 9726 return false; 9727 } 9728 9729 /// canAssignObjCInterfacesInBlockPointer - This routine is specifically written 9730 /// for providing type-safety for objective-c pointers used to pass/return 9731 /// arguments in block literals. When passed as arguments, passing 'A*' where 9732 /// 'id' is expected is not OK. Passing 'Sub *" where 'Super *" is expected is 9733 /// not OK. For the return type, the opposite is not OK. 9734 bool ASTContext::canAssignObjCInterfacesInBlockPointer( 9735 const ObjCObjectPointerType *LHSOPT, 9736 const ObjCObjectPointerType *RHSOPT, 9737 bool BlockReturnType) { 9738 9739 // Function object that propagates a successful result or handles 9740 // __kindof types. 9741 auto finish = [&](bool succeeded) -> bool { 9742 if (succeeded) 9743 return true; 9744 9745 const ObjCObjectPointerType *Expected = BlockReturnType ? RHSOPT : LHSOPT; 9746 if (!Expected->isKindOfType()) 9747 return false; 9748 9749 // Strip off __kindof and protocol qualifiers, then check whether 9750 // we can assign the other way. 9751 return canAssignObjCInterfacesInBlockPointer( 9752 RHSOPT->stripObjCKindOfTypeAndQuals(*this), 9753 LHSOPT->stripObjCKindOfTypeAndQuals(*this), 9754 BlockReturnType); 9755 }; 9756 9757 if (RHSOPT->isObjCBuiltinType() || LHSOPT->isObjCIdType()) 9758 return true; 9759 9760 if (LHSOPT->isObjCBuiltinType()) { 9761 return finish(RHSOPT->isObjCBuiltinType() || 9762 RHSOPT->isObjCQualifiedIdType()); 9763 } 9764 9765 if (LHSOPT->isObjCQualifiedIdType() || RHSOPT->isObjCQualifiedIdType()) { 9766 if (getLangOpts().CompatibilityQualifiedIdBlockParamTypeChecking) 9767 // Use for block parameters previous type checking for compatibility. 9768 return finish(ObjCQualifiedIdTypesAreCompatible(LHSOPT, RHSOPT, false) || 9769 // Or corrected type checking as in non-compat mode. 9770 (!BlockReturnType && 9771 ObjCQualifiedIdTypesAreCompatible(RHSOPT, LHSOPT, false))); 9772 else 9773 return finish(ObjCQualifiedIdTypesAreCompatible( 9774 (BlockReturnType ? LHSOPT : RHSOPT), 9775 (BlockReturnType ? RHSOPT : LHSOPT), false)); 9776 } 9777 9778 const ObjCInterfaceType* LHS = LHSOPT->getInterfaceType(); 9779 const ObjCInterfaceType* RHS = RHSOPT->getInterfaceType(); 9780 if (LHS && RHS) { // We have 2 user-defined types. 9781 if (LHS != RHS) { 9782 if (LHS->getDecl()->isSuperClassOf(RHS->getDecl())) 9783 return finish(BlockReturnType); 9784 if (RHS->getDecl()->isSuperClassOf(LHS->getDecl())) 9785 return finish(!BlockReturnType); 9786 } 9787 else 9788 return true; 9789 } 9790 return false; 9791 } 9792 9793 /// Comparison routine for Objective-C protocols to be used with 9794 /// llvm::array_pod_sort. 9795 static int compareObjCProtocolsByName(ObjCProtocolDecl * const *lhs, 9796 ObjCProtocolDecl * const *rhs) { 9797 return (*lhs)->getName().compare((*rhs)->getName()); 9798 } 9799 9800 /// getIntersectionOfProtocols - This routine finds the intersection of set 9801 /// of protocols inherited from two distinct objective-c pointer objects with 9802 /// the given common base. 9803 /// It is used to build composite qualifier list of the composite type of 9804 /// the conditional expression involving two objective-c pointer objects. 9805 static 9806 void getIntersectionOfProtocols(ASTContext &Context, 9807 const ObjCInterfaceDecl *CommonBase, 9808 const ObjCObjectPointerType *LHSOPT, 9809 const ObjCObjectPointerType *RHSOPT, 9810 SmallVectorImpl<ObjCProtocolDecl *> &IntersectionSet) { 9811 9812 const ObjCObjectType* LHS = LHSOPT->getObjectType(); 9813 const ObjCObjectType* RHS = RHSOPT->getObjectType(); 9814 assert(LHS->getInterface() && "LHS must have an interface base"); 9815 assert(RHS->getInterface() && "RHS must have an interface base"); 9816 9817 // Add all of the protocols for the LHS. 9818 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSProtocolSet; 9819 9820 // Start with the protocol qualifiers. 9821 for (auto *proto : LHS->quals()) { 9822 Context.CollectInheritedProtocols(proto, LHSProtocolSet); 9823 } 9824 9825 // Also add the protocols associated with the LHS interface. 9826 Context.CollectInheritedProtocols(LHS->getInterface(), LHSProtocolSet); 9827 9828 // Add all of the protocols for the RHS. 9829 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> RHSProtocolSet; 9830 9831 // Start with the protocol qualifiers. 9832 for (auto *proto : RHS->quals()) { 9833 Context.CollectInheritedProtocols(proto, RHSProtocolSet); 9834 } 9835 9836 // Also add the protocols associated with the RHS interface. 9837 Context.CollectInheritedProtocols(RHS->getInterface(), RHSProtocolSet); 9838 9839 // Compute the intersection of the collected protocol sets. 9840 for (auto *proto : LHSProtocolSet) { 9841 if (RHSProtocolSet.count(proto)) 9842 IntersectionSet.push_back(proto); 9843 } 9844 9845 // Compute the set of protocols that is implied by either the common type or 9846 // the protocols within the intersection. 9847 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> ImpliedProtocols; 9848 Context.CollectInheritedProtocols(CommonBase, ImpliedProtocols); 9849 9850 // Remove any implied protocols from the list of inherited protocols. 9851 if (!ImpliedProtocols.empty()) { 9852 llvm::erase_if(IntersectionSet, [&](ObjCProtocolDecl *proto) -> bool { 9853 return ImpliedProtocols.contains(proto); 9854 }); 9855 } 9856 9857 // Sort the remaining protocols by name. 9858 llvm::array_pod_sort(IntersectionSet.begin(), IntersectionSet.end(), 9859 compareObjCProtocolsByName); 9860 } 9861 9862 /// Determine whether the first type is a subtype of the second. 9863 static bool canAssignObjCObjectTypes(ASTContext &ctx, QualType lhs, 9864 QualType rhs) { 9865 // Common case: two object pointers. 9866 const auto *lhsOPT = lhs->getAs<ObjCObjectPointerType>(); 9867 const auto *rhsOPT = rhs->getAs<ObjCObjectPointerType>(); 9868 if (lhsOPT && rhsOPT) 9869 return ctx.canAssignObjCInterfaces(lhsOPT, rhsOPT); 9870 9871 // Two block pointers. 9872 const auto *lhsBlock = lhs->getAs<BlockPointerType>(); 9873 const auto *rhsBlock = rhs->getAs<BlockPointerType>(); 9874 if (lhsBlock && rhsBlock) 9875 return ctx.typesAreBlockPointerCompatible(lhs, rhs); 9876 9877 // If either is an unqualified 'id' and the other is a block, it's 9878 // acceptable. 9879 if ((lhsOPT && lhsOPT->isObjCIdType() && rhsBlock) || 9880 (rhsOPT && rhsOPT->isObjCIdType() && lhsBlock)) 9881 return true; 9882 9883 return false; 9884 } 9885 9886 // Check that the given Objective-C type argument lists are equivalent. 9887 static bool sameObjCTypeArgs(ASTContext &ctx, 9888 const ObjCInterfaceDecl *iface, 9889 ArrayRef<QualType> lhsArgs, 9890 ArrayRef<QualType> rhsArgs, 9891 bool stripKindOf) { 9892 if (lhsArgs.size() != rhsArgs.size()) 9893 return false; 9894 9895 ObjCTypeParamList *typeParams = iface->getTypeParamList(); 9896 for (unsigned i = 0, n = lhsArgs.size(); i != n; ++i) { 9897 if (ctx.hasSameType(lhsArgs[i], rhsArgs[i])) 9898 continue; 9899 9900 switch (typeParams->begin()[i]->getVariance()) { 9901 case ObjCTypeParamVariance::Invariant: 9902 if (!stripKindOf || 9903 !ctx.hasSameType(lhsArgs[i].stripObjCKindOfType(ctx), 9904 rhsArgs[i].stripObjCKindOfType(ctx))) { 9905 return false; 9906 } 9907 break; 9908 9909 case ObjCTypeParamVariance::Covariant: 9910 if (!canAssignObjCObjectTypes(ctx, lhsArgs[i], rhsArgs[i])) 9911 return false; 9912 break; 9913 9914 case ObjCTypeParamVariance::Contravariant: 9915 if (!canAssignObjCObjectTypes(ctx, rhsArgs[i], lhsArgs[i])) 9916 return false; 9917 break; 9918 } 9919 } 9920 9921 return true; 9922 } 9923 9924 QualType ASTContext::areCommonBaseCompatible( 9925 const ObjCObjectPointerType *Lptr, 9926 const ObjCObjectPointerType *Rptr) { 9927 const ObjCObjectType *LHS = Lptr->getObjectType(); 9928 const ObjCObjectType *RHS = Rptr->getObjectType(); 9929 const ObjCInterfaceDecl* LDecl = LHS->getInterface(); 9930 const ObjCInterfaceDecl* RDecl = RHS->getInterface(); 9931 9932 if (!LDecl || !RDecl) 9933 return {}; 9934 9935 // When either LHS or RHS is a kindof type, we should return a kindof type. 9936 // For example, for common base of kindof(ASub1) and kindof(ASub2), we return 9937 // kindof(A). 9938 bool anyKindOf = LHS->isKindOfType() || RHS->isKindOfType(); 9939 9940 // Follow the left-hand side up the class hierarchy until we either hit a 9941 // root or find the RHS. Record the ancestors in case we don't find it. 9942 llvm::SmallDenseMap<const ObjCInterfaceDecl *, const ObjCObjectType *, 4> 9943 LHSAncestors; 9944 while (true) { 9945 // Record this ancestor. We'll need this if the common type isn't in the 9946 // path from the LHS to the root. 9947 LHSAncestors[LHS->getInterface()->getCanonicalDecl()] = LHS; 9948 9949 if (declaresSameEntity(LHS->getInterface(), RDecl)) { 9950 // Get the type arguments. 9951 ArrayRef<QualType> LHSTypeArgs = LHS->getTypeArgsAsWritten(); 9952 bool anyChanges = false; 9953 if (LHS->isSpecialized() && RHS->isSpecialized()) { 9954 // Both have type arguments, compare them. 9955 if (!sameObjCTypeArgs(*this, LHS->getInterface(), 9956 LHS->getTypeArgs(), RHS->getTypeArgs(), 9957 /*stripKindOf=*/true)) 9958 return {}; 9959 } else if (LHS->isSpecialized() != RHS->isSpecialized()) { 9960 // If only one has type arguments, the result will not have type 9961 // arguments. 9962 LHSTypeArgs = {}; 9963 anyChanges = true; 9964 } 9965 9966 // Compute the intersection of protocols. 9967 SmallVector<ObjCProtocolDecl *, 8> Protocols; 9968 getIntersectionOfProtocols(*this, LHS->getInterface(), Lptr, Rptr, 9969 Protocols); 9970 if (!Protocols.empty()) 9971 anyChanges = true; 9972 9973 // If anything in the LHS will have changed, build a new result type. 9974 // If we need to return a kindof type but LHS is not a kindof type, we 9975 // build a new result type. 9976 if (anyChanges || LHS->isKindOfType() != anyKindOf) { 9977 QualType Result = getObjCInterfaceType(LHS->getInterface()); 9978 Result = getObjCObjectType(Result, LHSTypeArgs, Protocols, 9979 anyKindOf || LHS->isKindOfType()); 9980 return getObjCObjectPointerType(Result); 9981 } 9982 9983 return getObjCObjectPointerType(QualType(LHS, 0)); 9984 } 9985 9986 // Find the superclass. 9987 QualType LHSSuperType = LHS->getSuperClassType(); 9988 if (LHSSuperType.isNull()) 9989 break; 9990 9991 LHS = LHSSuperType->castAs<ObjCObjectType>(); 9992 } 9993 9994 // We didn't find anything by following the LHS to its root; now check 9995 // the RHS against the cached set of ancestors. 9996 while (true) { 9997 auto KnownLHS = LHSAncestors.find(RHS->getInterface()->getCanonicalDecl()); 9998 if (KnownLHS != LHSAncestors.end()) { 9999 LHS = KnownLHS->second; 10000 10001 // Get the type arguments. 10002 ArrayRef<QualType> RHSTypeArgs = RHS->getTypeArgsAsWritten(); 10003 bool anyChanges = false; 10004 if (LHS->isSpecialized() && RHS->isSpecialized()) { 10005 // Both have type arguments, compare them. 10006 if (!sameObjCTypeArgs(*this, LHS->getInterface(), 10007 LHS->getTypeArgs(), RHS->getTypeArgs(), 10008 /*stripKindOf=*/true)) 10009 return {}; 10010 } else if (LHS->isSpecialized() != RHS->isSpecialized()) { 10011 // If only one has type arguments, the result will not have type 10012 // arguments. 10013 RHSTypeArgs = {}; 10014 anyChanges = true; 10015 } 10016 10017 // Compute the intersection of protocols. 10018 SmallVector<ObjCProtocolDecl *, 8> Protocols; 10019 getIntersectionOfProtocols(*this, RHS->getInterface(), Lptr, Rptr, 10020 Protocols); 10021 if (!Protocols.empty()) 10022 anyChanges = true; 10023 10024 // If we need to return a kindof type but RHS is not a kindof type, we 10025 // build a new result type. 10026 if (anyChanges || RHS->isKindOfType() != anyKindOf) { 10027 QualType Result = getObjCInterfaceType(RHS->getInterface()); 10028 Result = getObjCObjectType(Result, RHSTypeArgs, Protocols, 10029 anyKindOf || RHS->isKindOfType()); 10030 return getObjCObjectPointerType(Result); 10031 } 10032 10033 return getObjCObjectPointerType(QualType(RHS, 0)); 10034 } 10035 10036 // Find the superclass of the RHS. 10037 QualType RHSSuperType = RHS->getSuperClassType(); 10038 if (RHSSuperType.isNull()) 10039 break; 10040 10041 RHS = RHSSuperType->castAs<ObjCObjectType>(); 10042 } 10043 10044 return {}; 10045 } 10046 10047 bool ASTContext::canAssignObjCInterfaces(const ObjCObjectType *LHS, 10048 const ObjCObjectType *RHS) { 10049 assert(LHS->getInterface() && "LHS is not an interface type"); 10050 assert(RHS->getInterface() && "RHS is not an interface type"); 10051 10052 // Verify that the base decls are compatible: the RHS must be a subclass of 10053 // the LHS. 10054 ObjCInterfaceDecl *LHSInterface = LHS->getInterface(); 10055 bool IsSuperClass = LHSInterface->isSuperClassOf(RHS->getInterface()); 10056 if (!IsSuperClass) 10057 return false; 10058 10059 // If the LHS has protocol qualifiers, determine whether all of them are 10060 // satisfied by the RHS (i.e., the RHS has a superset of the protocols in the 10061 // LHS). 10062 if (LHS->getNumProtocols() > 0) { 10063 // OK if conversion of LHS to SuperClass results in narrowing of types 10064 // ; i.e., SuperClass may implement at least one of the protocols 10065 // in LHS's protocol list. Example, SuperObj<P1> = lhs<P1,P2> is ok. 10066 // But not SuperObj<P1,P2,P3> = lhs<P1,P2>. 10067 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> SuperClassInheritedProtocols; 10068 CollectInheritedProtocols(RHS->getInterface(), SuperClassInheritedProtocols); 10069 // Also, if RHS has explicit quelifiers, include them for comparing with LHS's 10070 // qualifiers. 10071 for (auto *RHSPI : RHS->quals()) 10072 CollectInheritedProtocols(RHSPI, SuperClassInheritedProtocols); 10073 // If there is no protocols associated with RHS, it is not a match. 10074 if (SuperClassInheritedProtocols.empty()) 10075 return false; 10076 10077 for (const auto *LHSProto : LHS->quals()) { 10078 bool SuperImplementsProtocol = false; 10079 for (auto *SuperClassProto : SuperClassInheritedProtocols) 10080 if (SuperClassProto->lookupProtocolNamed(LHSProto->getIdentifier())) { 10081 SuperImplementsProtocol = true; 10082 break; 10083 } 10084 if (!SuperImplementsProtocol) 10085 return false; 10086 } 10087 } 10088 10089 // If the LHS is specialized, we may need to check type arguments. 10090 if (LHS->isSpecialized()) { 10091 // Follow the superclass chain until we've matched the LHS class in the 10092 // hierarchy. This substitutes type arguments through. 10093 const ObjCObjectType *RHSSuper = RHS; 10094 while (!declaresSameEntity(RHSSuper->getInterface(), LHSInterface)) 10095 RHSSuper = RHSSuper->getSuperClassType()->castAs<ObjCObjectType>(); 10096 10097 // If the RHS is specializd, compare type arguments. 10098 if (RHSSuper->isSpecialized() && 10099 !sameObjCTypeArgs(*this, LHS->getInterface(), 10100 LHS->getTypeArgs(), RHSSuper->getTypeArgs(), 10101 /*stripKindOf=*/true)) { 10102 return false; 10103 } 10104 } 10105 10106 return true; 10107 } 10108 10109 bool ASTContext::areComparableObjCPointerTypes(QualType LHS, QualType RHS) { 10110 // get the "pointed to" types 10111 const auto *LHSOPT = LHS->getAs<ObjCObjectPointerType>(); 10112 const auto *RHSOPT = RHS->getAs<ObjCObjectPointerType>(); 10113 10114 if (!LHSOPT || !RHSOPT) 10115 return false; 10116 10117 return canAssignObjCInterfaces(LHSOPT, RHSOPT) || 10118 canAssignObjCInterfaces(RHSOPT, LHSOPT); 10119 } 10120 10121 bool ASTContext::canBindObjCObjectType(QualType To, QualType From) { 10122 return canAssignObjCInterfaces( 10123 getObjCObjectPointerType(To)->castAs<ObjCObjectPointerType>(), 10124 getObjCObjectPointerType(From)->castAs<ObjCObjectPointerType>()); 10125 } 10126 10127 /// typesAreCompatible - C99 6.7.3p9: For two qualified types to be compatible, 10128 /// both shall have the identically qualified version of a compatible type. 10129 /// C99 6.2.7p1: Two types have compatible types if their types are the 10130 /// same. See 6.7.[2,3,5] for additional rules. 10131 bool ASTContext::typesAreCompatible(QualType LHS, QualType RHS, 10132 bool CompareUnqualified) { 10133 if (getLangOpts().CPlusPlus) 10134 return hasSameType(LHS, RHS); 10135 10136 return !mergeTypes(LHS, RHS, false, CompareUnqualified).isNull(); 10137 } 10138 10139 bool ASTContext::propertyTypesAreCompatible(QualType LHS, QualType RHS) { 10140 return typesAreCompatible(LHS, RHS); 10141 } 10142 10143 bool ASTContext::typesAreBlockPointerCompatible(QualType LHS, QualType RHS) { 10144 return !mergeTypes(LHS, RHS, true).isNull(); 10145 } 10146 10147 /// mergeTransparentUnionType - if T is a transparent union type and a member 10148 /// of T is compatible with SubType, return the merged type, else return 10149 /// QualType() 10150 QualType ASTContext::mergeTransparentUnionType(QualType T, QualType SubType, 10151 bool OfBlockPointer, 10152 bool Unqualified) { 10153 if (const RecordType *UT = T->getAsUnionType()) { 10154 RecordDecl *UD = UT->getDecl(); 10155 if (UD->hasAttr<TransparentUnionAttr>()) { 10156 for (const auto *I : UD->fields()) { 10157 QualType ET = I->getType().getUnqualifiedType(); 10158 QualType MT = mergeTypes(ET, SubType, OfBlockPointer, Unqualified); 10159 if (!MT.isNull()) 10160 return MT; 10161 } 10162 } 10163 } 10164 10165 return {}; 10166 } 10167 10168 /// mergeFunctionParameterTypes - merge two types which appear as function 10169 /// parameter types 10170 QualType ASTContext::mergeFunctionParameterTypes(QualType lhs, QualType rhs, 10171 bool OfBlockPointer, 10172 bool Unqualified) { 10173 // GNU extension: two types are compatible if they appear as a function 10174 // argument, one of the types is a transparent union type and the other 10175 // type is compatible with a union member 10176 QualType lmerge = mergeTransparentUnionType(lhs, rhs, OfBlockPointer, 10177 Unqualified); 10178 if (!lmerge.isNull()) 10179 return lmerge; 10180 10181 QualType rmerge = mergeTransparentUnionType(rhs, lhs, OfBlockPointer, 10182 Unqualified); 10183 if (!rmerge.isNull()) 10184 return rmerge; 10185 10186 return mergeTypes(lhs, rhs, OfBlockPointer, Unqualified); 10187 } 10188 10189 QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs, 10190 bool OfBlockPointer, bool Unqualified, 10191 bool AllowCXX, 10192 bool IsConditionalOperator) { 10193 const auto *lbase = lhs->castAs<FunctionType>(); 10194 const auto *rbase = rhs->castAs<FunctionType>(); 10195 const auto *lproto = dyn_cast<FunctionProtoType>(lbase); 10196 const auto *rproto = dyn_cast<FunctionProtoType>(rbase); 10197 bool allLTypes = true; 10198 bool allRTypes = true; 10199 10200 // Check return type 10201 QualType retType; 10202 if (OfBlockPointer) { 10203 QualType RHS = rbase->getReturnType(); 10204 QualType LHS = lbase->getReturnType(); 10205 bool UnqualifiedResult = Unqualified; 10206 if (!UnqualifiedResult) 10207 UnqualifiedResult = (!RHS.hasQualifiers() && LHS.hasQualifiers()); 10208 retType = mergeTypes(LHS, RHS, true, UnqualifiedResult, true); 10209 } 10210 else 10211 retType = mergeTypes(lbase->getReturnType(), rbase->getReturnType(), false, 10212 Unqualified); 10213 if (retType.isNull()) 10214 return {}; 10215 10216 if (Unqualified) 10217 retType = retType.getUnqualifiedType(); 10218 10219 CanQualType LRetType = getCanonicalType(lbase->getReturnType()); 10220 CanQualType RRetType = getCanonicalType(rbase->getReturnType()); 10221 if (Unqualified) { 10222 LRetType = LRetType.getUnqualifiedType(); 10223 RRetType = RRetType.getUnqualifiedType(); 10224 } 10225 10226 if (getCanonicalType(retType) != LRetType) 10227 allLTypes = false; 10228 if (getCanonicalType(retType) != RRetType) 10229 allRTypes = false; 10230 10231 // FIXME: double check this 10232 // FIXME: should we error if lbase->getRegParmAttr() != 0 && 10233 // rbase->getRegParmAttr() != 0 && 10234 // lbase->getRegParmAttr() != rbase->getRegParmAttr()? 10235 FunctionType::ExtInfo lbaseInfo = lbase->getExtInfo(); 10236 FunctionType::ExtInfo rbaseInfo = rbase->getExtInfo(); 10237 10238 // Compatible functions must have compatible calling conventions 10239 if (lbaseInfo.getCC() != rbaseInfo.getCC()) 10240 return {}; 10241 10242 // Regparm is part of the calling convention. 10243 if (lbaseInfo.getHasRegParm() != rbaseInfo.getHasRegParm()) 10244 return {}; 10245 if (lbaseInfo.getRegParm() != rbaseInfo.getRegParm()) 10246 return {}; 10247 10248 if (lbaseInfo.getProducesResult() != rbaseInfo.getProducesResult()) 10249 return {}; 10250 if (lbaseInfo.getNoCallerSavedRegs() != rbaseInfo.getNoCallerSavedRegs()) 10251 return {}; 10252 if (lbaseInfo.getNoCfCheck() != rbaseInfo.getNoCfCheck()) 10253 return {}; 10254 10255 // When merging declarations, it's common for supplemental information like 10256 // attributes to only be present in one of the declarations, and we generally 10257 // want type merging to preserve the union of information. So a merged 10258 // function type should be noreturn if it was noreturn in *either* operand 10259 // type. 10260 // 10261 // But for the conditional operator, this is backwards. The result of the 10262 // operator could be either operand, and its type should conservatively 10263 // reflect that. So a function type in a composite type is noreturn only 10264 // if it's noreturn in *both* operand types. 10265 // 10266 // Arguably, noreturn is a kind of subtype, and the conditional operator 10267 // ought to produce the most specific common supertype of its operand types. 10268 // That would differ from this rule in contravariant positions. However, 10269 // neither C nor C++ generally uses this kind of subtype reasoning. Also, 10270 // as a practical matter, it would only affect C code that does abstraction of 10271 // higher-order functions (taking noreturn callbacks!), which is uncommon to 10272 // say the least. So we use the simpler rule. 10273 bool NoReturn = IsConditionalOperator 10274 ? lbaseInfo.getNoReturn() && rbaseInfo.getNoReturn() 10275 : lbaseInfo.getNoReturn() || rbaseInfo.getNoReturn(); 10276 if (lbaseInfo.getNoReturn() != NoReturn) 10277 allLTypes = false; 10278 if (rbaseInfo.getNoReturn() != NoReturn) 10279 allRTypes = false; 10280 10281 FunctionType::ExtInfo einfo = lbaseInfo.withNoReturn(NoReturn); 10282 10283 if (lproto && rproto) { // two C99 style function prototypes 10284 assert((AllowCXX || 10285 (!lproto->hasExceptionSpec() && !rproto->hasExceptionSpec())) && 10286 "C++ shouldn't be here"); 10287 // Compatible functions must have the same number of parameters 10288 if (lproto->getNumParams() != rproto->getNumParams()) 10289 return {}; 10290 10291 // Variadic and non-variadic functions aren't compatible 10292 if (lproto->isVariadic() != rproto->isVariadic()) 10293 return {}; 10294 10295 if (lproto->getMethodQuals() != rproto->getMethodQuals()) 10296 return {}; 10297 10298 SmallVector<FunctionProtoType::ExtParameterInfo, 4> newParamInfos; 10299 bool canUseLeft, canUseRight; 10300 if (!mergeExtParameterInfo(lproto, rproto, canUseLeft, canUseRight, 10301 newParamInfos)) 10302 return {}; 10303 10304 if (!canUseLeft) 10305 allLTypes = false; 10306 if (!canUseRight) 10307 allRTypes = false; 10308 10309 // Check parameter type compatibility 10310 SmallVector<QualType, 10> types; 10311 for (unsigned i = 0, n = lproto->getNumParams(); i < n; i++) { 10312 QualType lParamType = lproto->getParamType(i).getUnqualifiedType(); 10313 QualType rParamType = rproto->getParamType(i).getUnqualifiedType(); 10314 QualType paramType = mergeFunctionParameterTypes( 10315 lParamType, rParamType, OfBlockPointer, Unqualified); 10316 if (paramType.isNull()) 10317 return {}; 10318 10319 if (Unqualified) 10320 paramType = paramType.getUnqualifiedType(); 10321 10322 types.push_back(paramType); 10323 if (Unqualified) { 10324 lParamType = lParamType.getUnqualifiedType(); 10325 rParamType = rParamType.getUnqualifiedType(); 10326 } 10327 10328 if (getCanonicalType(paramType) != getCanonicalType(lParamType)) 10329 allLTypes = false; 10330 if (getCanonicalType(paramType) != getCanonicalType(rParamType)) 10331 allRTypes = false; 10332 } 10333 10334 if (allLTypes) return lhs; 10335 if (allRTypes) return rhs; 10336 10337 FunctionProtoType::ExtProtoInfo EPI = lproto->getExtProtoInfo(); 10338 EPI.ExtInfo = einfo; 10339 EPI.ExtParameterInfos = 10340 newParamInfos.empty() ? nullptr : newParamInfos.data(); 10341 return getFunctionType(retType, types, EPI); 10342 } 10343 10344 if (lproto) allRTypes = false; 10345 if (rproto) allLTypes = false; 10346 10347 const FunctionProtoType *proto = lproto ? lproto : rproto; 10348 if (proto) { 10349 assert((AllowCXX || !proto->hasExceptionSpec()) && "C++ shouldn't be here"); 10350 if (proto->isVariadic()) 10351 return {}; 10352 // Check that the types are compatible with the types that 10353 // would result from default argument promotions (C99 6.7.5.3p15). 10354 // The only types actually affected are promotable integer 10355 // types and floats, which would be passed as a different 10356 // type depending on whether the prototype is visible. 10357 for (unsigned i = 0, n = proto->getNumParams(); i < n; ++i) { 10358 QualType paramTy = proto->getParamType(i); 10359 10360 // Look at the converted type of enum types, since that is the type used 10361 // to pass enum values. 10362 if (const auto *Enum = paramTy->getAs<EnumType>()) { 10363 paramTy = Enum->getDecl()->getIntegerType(); 10364 if (paramTy.isNull()) 10365 return {}; 10366 } 10367 10368 if (isPromotableIntegerType(paramTy) || 10369 getCanonicalType(paramTy).getUnqualifiedType() == FloatTy) 10370 return {}; 10371 } 10372 10373 if (allLTypes) return lhs; 10374 if (allRTypes) return rhs; 10375 10376 FunctionProtoType::ExtProtoInfo EPI = proto->getExtProtoInfo(); 10377 EPI.ExtInfo = einfo; 10378 return getFunctionType(retType, proto->getParamTypes(), EPI); 10379 } 10380 10381 if (allLTypes) return lhs; 10382 if (allRTypes) return rhs; 10383 return getFunctionNoProtoType(retType, einfo); 10384 } 10385 10386 /// Given that we have an enum type and a non-enum type, try to merge them. 10387 static QualType mergeEnumWithInteger(ASTContext &Context, const EnumType *ET, 10388 QualType other, bool isBlockReturnType) { 10389 // C99 6.7.2.2p4: Each enumerated type shall be compatible with char, 10390 // a signed integer type, or an unsigned integer type. 10391 // Compatibility is based on the underlying type, not the promotion 10392 // type. 10393 QualType underlyingType = ET->getDecl()->getIntegerType(); 10394 if (underlyingType.isNull()) 10395 return {}; 10396 if (Context.hasSameType(underlyingType, other)) 10397 return other; 10398 10399 // In block return types, we're more permissive and accept any 10400 // integral type of the same size. 10401 if (isBlockReturnType && other->isIntegerType() && 10402 Context.getTypeSize(underlyingType) == Context.getTypeSize(other)) 10403 return other; 10404 10405 return {}; 10406 } 10407 10408 QualType ASTContext::mergeTypes(QualType LHS, QualType RHS, bool OfBlockPointer, 10409 bool Unqualified, bool BlockReturnType, 10410 bool IsConditionalOperator) { 10411 // For C++ we will not reach this code with reference types (see below), 10412 // for OpenMP variant call overloading we might. 10413 // 10414 // C++ [expr]: If an expression initially has the type "reference to T", the 10415 // type is adjusted to "T" prior to any further analysis, the expression 10416 // designates the object or function denoted by the reference, and the 10417 // expression is an lvalue unless the reference is an rvalue reference and 10418 // the expression is a function call (possibly inside parentheses). 10419 auto *LHSRefTy = LHS->getAs<ReferenceType>(); 10420 auto *RHSRefTy = RHS->getAs<ReferenceType>(); 10421 if (LangOpts.OpenMP && LHSRefTy && RHSRefTy && 10422 LHS->getTypeClass() == RHS->getTypeClass()) 10423 return mergeTypes(LHSRefTy->getPointeeType(), RHSRefTy->getPointeeType(), 10424 OfBlockPointer, Unqualified, BlockReturnType); 10425 if (LHSRefTy || RHSRefTy) 10426 return {}; 10427 10428 if (Unqualified) { 10429 LHS = LHS.getUnqualifiedType(); 10430 RHS = RHS.getUnqualifiedType(); 10431 } 10432 10433 QualType LHSCan = getCanonicalType(LHS), 10434 RHSCan = getCanonicalType(RHS); 10435 10436 // If two types are identical, they are compatible. 10437 if (LHSCan == RHSCan) 10438 return LHS; 10439 10440 // If the qualifiers are different, the types aren't compatible... mostly. 10441 Qualifiers LQuals = LHSCan.getLocalQualifiers(); 10442 Qualifiers RQuals = RHSCan.getLocalQualifiers(); 10443 if (LQuals != RQuals) { 10444 // If any of these qualifiers are different, we have a type 10445 // mismatch. 10446 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() || 10447 LQuals.getAddressSpace() != RQuals.getAddressSpace() || 10448 LQuals.getObjCLifetime() != RQuals.getObjCLifetime() || 10449 LQuals.hasUnaligned() != RQuals.hasUnaligned()) 10450 return {}; 10451 10452 // Exactly one GC qualifier difference is allowed: __strong is 10453 // okay if the other type has no GC qualifier but is an Objective 10454 // C object pointer (i.e. implicitly strong by default). We fix 10455 // this by pretending that the unqualified type was actually 10456 // qualified __strong. 10457 Qualifiers::GC GC_L = LQuals.getObjCGCAttr(); 10458 Qualifiers::GC GC_R = RQuals.getObjCGCAttr(); 10459 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements"); 10460 10461 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak) 10462 return {}; 10463 10464 if (GC_L == Qualifiers::Strong && RHSCan->isObjCObjectPointerType()) { 10465 return mergeTypes(LHS, getObjCGCQualType(RHS, Qualifiers::Strong)); 10466 } 10467 if (GC_R == Qualifiers::Strong && LHSCan->isObjCObjectPointerType()) { 10468 return mergeTypes(getObjCGCQualType(LHS, Qualifiers::Strong), RHS); 10469 } 10470 return {}; 10471 } 10472 10473 // Okay, qualifiers are equal. 10474 10475 Type::TypeClass LHSClass = LHSCan->getTypeClass(); 10476 Type::TypeClass RHSClass = RHSCan->getTypeClass(); 10477 10478 // We want to consider the two function types to be the same for these 10479 // comparisons, just force one to the other. 10480 if (LHSClass == Type::FunctionProto) LHSClass = Type::FunctionNoProto; 10481 if (RHSClass == Type::FunctionProto) RHSClass = Type::FunctionNoProto; 10482 10483 // Same as above for arrays 10484 if (LHSClass == Type::VariableArray || LHSClass == Type::IncompleteArray) 10485 LHSClass = Type::ConstantArray; 10486 if (RHSClass == Type::VariableArray || RHSClass == Type::IncompleteArray) 10487 RHSClass = Type::ConstantArray; 10488 10489 // ObjCInterfaces are just specialized ObjCObjects. 10490 if (LHSClass == Type::ObjCInterface) LHSClass = Type::ObjCObject; 10491 if (RHSClass == Type::ObjCInterface) RHSClass = Type::ObjCObject; 10492 10493 // Canonicalize ExtVector -> Vector. 10494 if (LHSClass == Type::ExtVector) LHSClass = Type::Vector; 10495 if (RHSClass == Type::ExtVector) RHSClass = Type::Vector; 10496 10497 // If the canonical type classes don't match. 10498 if (LHSClass != RHSClass) { 10499 // Note that we only have special rules for turning block enum 10500 // returns into block int returns, not vice-versa. 10501 if (const auto *ETy = LHS->getAs<EnumType>()) { 10502 return mergeEnumWithInteger(*this, ETy, RHS, false); 10503 } 10504 if (const EnumType* ETy = RHS->getAs<EnumType>()) { 10505 return mergeEnumWithInteger(*this, ETy, LHS, BlockReturnType); 10506 } 10507 // allow block pointer type to match an 'id' type. 10508 if (OfBlockPointer && !BlockReturnType) { 10509 if (LHS->isObjCIdType() && RHS->isBlockPointerType()) 10510 return LHS; 10511 if (RHS->isObjCIdType() && LHS->isBlockPointerType()) 10512 return RHS; 10513 } 10514 // Allow __auto_type to match anything; it merges to the type with more 10515 // information. 10516 if (const auto *AT = LHS->getAs<AutoType>()) { 10517 if (!AT->isDeduced() && AT->isGNUAutoType()) 10518 return RHS; 10519 } 10520 if (const auto *AT = RHS->getAs<AutoType>()) { 10521 if (!AT->isDeduced() && AT->isGNUAutoType()) 10522 return LHS; 10523 } 10524 return {}; 10525 } 10526 10527 // The canonical type classes match. 10528 switch (LHSClass) { 10529 #define TYPE(Class, Base) 10530 #define ABSTRACT_TYPE(Class, Base) 10531 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: 10532 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 10533 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 10534 #include "clang/AST/TypeNodes.inc" 10535 llvm_unreachable("Non-canonical and dependent types shouldn't get here"); 10536 10537 case Type::Auto: 10538 case Type::DeducedTemplateSpecialization: 10539 case Type::LValueReference: 10540 case Type::RValueReference: 10541 case Type::MemberPointer: 10542 llvm_unreachable("C++ should never be in mergeTypes"); 10543 10544 case Type::ObjCInterface: 10545 case Type::IncompleteArray: 10546 case Type::VariableArray: 10547 case Type::FunctionProto: 10548 case Type::ExtVector: 10549 llvm_unreachable("Types are eliminated above"); 10550 10551 case Type::Pointer: 10552 { 10553 // Merge two pointer types, while trying to preserve typedef info 10554 QualType LHSPointee = LHS->castAs<PointerType>()->getPointeeType(); 10555 QualType RHSPointee = RHS->castAs<PointerType>()->getPointeeType(); 10556 if (Unqualified) { 10557 LHSPointee = LHSPointee.getUnqualifiedType(); 10558 RHSPointee = RHSPointee.getUnqualifiedType(); 10559 } 10560 QualType ResultType = mergeTypes(LHSPointee, RHSPointee, false, 10561 Unqualified); 10562 if (ResultType.isNull()) 10563 return {}; 10564 if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType)) 10565 return LHS; 10566 if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType)) 10567 return RHS; 10568 return getPointerType(ResultType); 10569 } 10570 case Type::BlockPointer: 10571 { 10572 // Merge two block pointer types, while trying to preserve typedef info 10573 QualType LHSPointee = LHS->castAs<BlockPointerType>()->getPointeeType(); 10574 QualType RHSPointee = RHS->castAs<BlockPointerType>()->getPointeeType(); 10575 if (Unqualified) { 10576 LHSPointee = LHSPointee.getUnqualifiedType(); 10577 RHSPointee = RHSPointee.getUnqualifiedType(); 10578 } 10579 if (getLangOpts().OpenCL) { 10580 Qualifiers LHSPteeQual = LHSPointee.getQualifiers(); 10581 Qualifiers RHSPteeQual = RHSPointee.getQualifiers(); 10582 // Blocks can't be an expression in a ternary operator (OpenCL v2.0 10583 // 6.12.5) thus the following check is asymmetric. 10584 if (!LHSPteeQual.isAddressSpaceSupersetOf(RHSPteeQual)) 10585 return {}; 10586 LHSPteeQual.removeAddressSpace(); 10587 RHSPteeQual.removeAddressSpace(); 10588 LHSPointee = 10589 QualType(LHSPointee.getTypePtr(), LHSPteeQual.getAsOpaqueValue()); 10590 RHSPointee = 10591 QualType(RHSPointee.getTypePtr(), RHSPteeQual.getAsOpaqueValue()); 10592 } 10593 QualType ResultType = mergeTypes(LHSPointee, RHSPointee, OfBlockPointer, 10594 Unqualified); 10595 if (ResultType.isNull()) 10596 return {}; 10597 if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType)) 10598 return LHS; 10599 if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType)) 10600 return RHS; 10601 return getBlockPointerType(ResultType); 10602 } 10603 case Type::Atomic: 10604 { 10605 // Merge two pointer types, while trying to preserve typedef info 10606 QualType LHSValue = LHS->castAs<AtomicType>()->getValueType(); 10607 QualType RHSValue = RHS->castAs<AtomicType>()->getValueType(); 10608 if (Unqualified) { 10609 LHSValue = LHSValue.getUnqualifiedType(); 10610 RHSValue = RHSValue.getUnqualifiedType(); 10611 } 10612 QualType ResultType = mergeTypes(LHSValue, RHSValue, false, 10613 Unqualified); 10614 if (ResultType.isNull()) 10615 return {}; 10616 if (getCanonicalType(LHSValue) == getCanonicalType(ResultType)) 10617 return LHS; 10618 if (getCanonicalType(RHSValue) == getCanonicalType(ResultType)) 10619 return RHS; 10620 return getAtomicType(ResultType); 10621 } 10622 case Type::ConstantArray: 10623 { 10624 const ConstantArrayType* LCAT = getAsConstantArrayType(LHS); 10625 const ConstantArrayType* RCAT = getAsConstantArrayType(RHS); 10626 if (LCAT && RCAT && RCAT->getSize() != LCAT->getSize()) 10627 return {}; 10628 10629 QualType LHSElem = getAsArrayType(LHS)->getElementType(); 10630 QualType RHSElem = getAsArrayType(RHS)->getElementType(); 10631 if (Unqualified) { 10632 LHSElem = LHSElem.getUnqualifiedType(); 10633 RHSElem = RHSElem.getUnqualifiedType(); 10634 } 10635 10636 QualType ResultType = mergeTypes(LHSElem, RHSElem, false, Unqualified); 10637 if (ResultType.isNull()) 10638 return {}; 10639 10640 const VariableArrayType* LVAT = getAsVariableArrayType(LHS); 10641 const VariableArrayType* RVAT = getAsVariableArrayType(RHS); 10642 10643 // If either side is a variable array, and both are complete, check whether 10644 // the current dimension is definite. 10645 if (LVAT || RVAT) { 10646 auto SizeFetch = [this](const VariableArrayType* VAT, 10647 const ConstantArrayType* CAT) 10648 -> std::pair<bool,llvm::APInt> { 10649 if (VAT) { 10650 std::optional<llvm::APSInt> TheInt; 10651 Expr *E = VAT->getSizeExpr(); 10652 if (E && (TheInt = E->getIntegerConstantExpr(*this))) 10653 return std::make_pair(true, *TheInt); 10654 return std::make_pair(false, llvm::APSInt()); 10655 } 10656 if (CAT) 10657 return std::make_pair(true, CAT->getSize()); 10658 return std::make_pair(false, llvm::APInt()); 10659 }; 10660 10661 bool HaveLSize, HaveRSize; 10662 llvm::APInt LSize, RSize; 10663 std::tie(HaveLSize, LSize) = SizeFetch(LVAT, LCAT); 10664 std::tie(HaveRSize, RSize) = SizeFetch(RVAT, RCAT); 10665 if (HaveLSize && HaveRSize && !llvm::APInt::isSameValue(LSize, RSize)) 10666 return {}; // Definite, but unequal, array dimension 10667 } 10668 10669 if (LCAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType)) 10670 return LHS; 10671 if (RCAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType)) 10672 return RHS; 10673 if (LCAT) 10674 return getConstantArrayType(ResultType, LCAT->getSize(), 10675 LCAT->getSizeExpr(), 10676 ArrayType::ArraySizeModifier(), 0); 10677 if (RCAT) 10678 return getConstantArrayType(ResultType, RCAT->getSize(), 10679 RCAT->getSizeExpr(), 10680 ArrayType::ArraySizeModifier(), 0); 10681 if (LVAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType)) 10682 return LHS; 10683 if (RVAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType)) 10684 return RHS; 10685 if (LVAT) { 10686 // FIXME: This isn't correct! But tricky to implement because 10687 // the array's size has to be the size of LHS, but the type 10688 // has to be different. 10689 return LHS; 10690 } 10691 if (RVAT) { 10692 // FIXME: This isn't correct! But tricky to implement because 10693 // the array's size has to be the size of RHS, but the type 10694 // has to be different. 10695 return RHS; 10696 } 10697 if (getCanonicalType(LHSElem) == getCanonicalType(ResultType)) return LHS; 10698 if (getCanonicalType(RHSElem) == getCanonicalType(ResultType)) return RHS; 10699 return getIncompleteArrayType(ResultType, 10700 ArrayType::ArraySizeModifier(), 0); 10701 } 10702 case Type::FunctionNoProto: 10703 return mergeFunctionTypes(LHS, RHS, OfBlockPointer, Unqualified, 10704 /*AllowCXX=*/false, IsConditionalOperator); 10705 case Type::Record: 10706 case Type::Enum: 10707 return {}; 10708 case Type::Builtin: 10709 // Only exactly equal builtin types are compatible, which is tested above. 10710 return {}; 10711 case Type::Complex: 10712 // Distinct complex types are incompatible. 10713 return {}; 10714 case Type::Vector: 10715 // FIXME: The merged type should be an ExtVector! 10716 if (areCompatVectorTypes(LHSCan->castAs<VectorType>(), 10717 RHSCan->castAs<VectorType>())) 10718 return LHS; 10719 return {}; 10720 case Type::ConstantMatrix: 10721 if (areCompatMatrixTypes(LHSCan->castAs<ConstantMatrixType>(), 10722 RHSCan->castAs<ConstantMatrixType>())) 10723 return LHS; 10724 return {}; 10725 case Type::ObjCObject: { 10726 // Check if the types are assignment compatible. 10727 // FIXME: This should be type compatibility, e.g. whether 10728 // "LHS x; RHS x;" at global scope is legal. 10729 if (canAssignObjCInterfaces(LHS->castAs<ObjCObjectType>(), 10730 RHS->castAs<ObjCObjectType>())) 10731 return LHS; 10732 return {}; 10733 } 10734 case Type::ObjCObjectPointer: 10735 if (OfBlockPointer) { 10736 if (canAssignObjCInterfacesInBlockPointer( 10737 LHS->castAs<ObjCObjectPointerType>(), 10738 RHS->castAs<ObjCObjectPointerType>(), BlockReturnType)) 10739 return LHS; 10740 return {}; 10741 } 10742 if (canAssignObjCInterfaces(LHS->castAs<ObjCObjectPointerType>(), 10743 RHS->castAs<ObjCObjectPointerType>())) 10744 return LHS; 10745 return {}; 10746 case Type::Pipe: 10747 assert(LHS != RHS && 10748 "Equivalent pipe types should have already been handled!"); 10749 return {}; 10750 case Type::BitInt: { 10751 // Merge two bit-precise int types, while trying to preserve typedef info. 10752 bool LHSUnsigned = LHS->castAs<BitIntType>()->isUnsigned(); 10753 bool RHSUnsigned = RHS->castAs<BitIntType>()->isUnsigned(); 10754 unsigned LHSBits = LHS->castAs<BitIntType>()->getNumBits(); 10755 unsigned RHSBits = RHS->castAs<BitIntType>()->getNumBits(); 10756 10757 // Like unsigned/int, shouldn't have a type if they don't match. 10758 if (LHSUnsigned != RHSUnsigned) 10759 return {}; 10760 10761 if (LHSBits != RHSBits) 10762 return {}; 10763 return LHS; 10764 } 10765 } 10766 10767 llvm_unreachable("Invalid Type::Class!"); 10768 } 10769 10770 bool ASTContext::mergeExtParameterInfo( 10771 const FunctionProtoType *FirstFnType, const FunctionProtoType *SecondFnType, 10772 bool &CanUseFirst, bool &CanUseSecond, 10773 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &NewParamInfos) { 10774 assert(NewParamInfos.empty() && "param info list not empty"); 10775 CanUseFirst = CanUseSecond = true; 10776 bool FirstHasInfo = FirstFnType->hasExtParameterInfos(); 10777 bool SecondHasInfo = SecondFnType->hasExtParameterInfos(); 10778 10779 // Fast path: if the first type doesn't have ext parameter infos, 10780 // we match if and only if the second type also doesn't have them. 10781 if (!FirstHasInfo && !SecondHasInfo) 10782 return true; 10783 10784 bool NeedParamInfo = false; 10785 size_t E = FirstHasInfo ? FirstFnType->getExtParameterInfos().size() 10786 : SecondFnType->getExtParameterInfos().size(); 10787 10788 for (size_t I = 0; I < E; ++I) { 10789 FunctionProtoType::ExtParameterInfo FirstParam, SecondParam; 10790 if (FirstHasInfo) 10791 FirstParam = FirstFnType->getExtParameterInfo(I); 10792 if (SecondHasInfo) 10793 SecondParam = SecondFnType->getExtParameterInfo(I); 10794 10795 // Cannot merge unless everything except the noescape flag matches. 10796 if (FirstParam.withIsNoEscape(false) != SecondParam.withIsNoEscape(false)) 10797 return false; 10798 10799 bool FirstNoEscape = FirstParam.isNoEscape(); 10800 bool SecondNoEscape = SecondParam.isNoEscape(); 10801 bool IsNoEscape = FirstNoEscape && SecondNoEscape; 10802 NewParamInfos.push_back(FirstParam.withIsNoEscape(IsNoEscape)); 10803 if (NewParamInfos.back().getOpaqueValue()) 10804 NeedParamInfo = true; 10805 if (FirstNoEscape != IsNoEscape) 10806 CanUseFirst = false; 10807 if (SecondNoEscape != IsNoEscape) 10808 CanUseSecond = false; 10809 } 10810 10811 if (!NeedParamInfo) 10812 NewParamInfos.clear(); 10813 10814 return true; 10815 } 10816 10817 void ASTContext::ResetObjCLayout(const ObjCContainerDecl *CD) { 10818 ObjCLayouts[CD] = nullptr; 10819 } 10820 10821 /// mergeObjCGCQualifiers - This routine merges ObjC's GC attribute of 'LHS' and 10822 /// 'RHS' attributes and returns the merged version; including for function 10823 /// return types. 10824 QualType ASTContext::mergeObjCGCQualifiers(QualType LHS, QualType RHS) { 10825 QualType LHSCan = getCanonicalType(LHS), 10826 RHSCan = getCanonicalType(RHS); 10827 // If two types are identical, they are compatible. 10828 if (LHSCan == RHSCan) 10829 return LHS; 10830 if (RHSCan->isFunctionType()) { 10831 if (!LHSCan->isFunctionType()) 10832 return {}; 10833 QualType OldReturnType = 10834 cast<FunctionType>(RHSCan.getTypePtr())->getReturnType(); 10835 QualType NewReturnType = 10836 cast<FunctionType>(LHSCan.getTypePtr())->getReturnType(); 10837 QualType ResReturnType = 10838 mergeObjCGCQualifiers(NewReturnType, OldReturnType); 10839 if (ResReturnType.isNull()) 10840 return {}; 10841 if (ResReturnType == NewReturnType || ResReturnType == OldReturnType) { 10842 // id foo(); ... __strong id foo(); or: __strong id foo(); ... id foo(); 10843 // In either case, use OldReturnType to build the new function type. 10844 const auto *F = LHS->castAs<FunctionType>(); 10845 if (const auto *FPT = cast<FunctionProtoType>(F)) { 10846 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 10847 EPI.ExtInfo = getFunctionExtInfo(LHS); 10848 QualType ResultType = 10849 getFunctionType(OldReturnType, FPT->getParamTypes(), EPI); 10850 return ResultType; 10851 } 10852 } 10853 return {}; 10854 } 10855 10856 // If the qualifiers are different, the types can still be merged. 10857 Qualifiers LQuals = LHSCan.getLocalQualifiers(); 10858 Qualifiers RQuals = RHSCan.getLocalQualifiers(); 10859 if (LQuals != RQuals) { 10860 // If any of these qualifiers are different, we have a type mismatch. 10861 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() || 10862 LQuals.getAddressSpace() != RQuals.getAddressSpace()) 10863 return {}; 10864 10865 // Exactly one GC qualifier difference is allowed: __strong is 10866 // okay if the other type has no GC qualifier but is an Objective 10867 // C object pointer (i.e. implicitly strong by default). We fix 10868 // this by pretending that the unqualified type was actually 10869 // qualified __strong. 10870 Qualifiers::GC GC_L = LQuals.getObjCGCAttr(); 10871 Qualifiers::GC GC_R = RQuals.getObjCGCAttr(); 10872 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements"); 10873 10874 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak) 10875 return {}; 10876 10877 if (GC_L == Qualifiers::Strong) 10878 return LHS; 10879 if (GC_R == Qualifiers::Strong) 10880 return RHS; 10881 return {}; 10882 } 10883 10884 if (LHSCan->isObjCObjectPointerType() && RHSCan->isObjCObjectPointerType()) { 10885 QualType LHSBaseQT = LHS->castAs<ObjCObjectPointerType>()->getPointeeType(); 10886 QualType RHSBaseQT = RHS->castAs<ObjCObjectPointerType>()->getPointeeType(); 10887 QualType ResQT = mergeObjCGCQualifiers(LHSBaseQT, RHSBaseQT); 10888 if (ResQT == LHSBaseQT) 10889 return LHS; 10890 if (ResQT == RHSBaseQT) 10891 return RHS; 10892 } 10893 return {}; 10894 } 10895 10896 //===----------------------------------------------------------------------===// 10897 // Integer Predicates 10898 //===----------------------------------------------------------------------===// 10899 10900 unsigned ASTContext::getIntWidth(QualType T) const { 10901 if (const auto *ET = T->getAs<EnumType>()) 10902 T = ET->getDecl()->getIntegerType(); 10903 if (T->isBooleanType()) 10904 return 1; 10905 if (const auto *EIT = T->getAs<BitIntType>()) 10906 return EIT->getNumBits(); 10907 // For builtin types, just use the standard type sizing method 10908 return (unsigned)getTypeSize(T); 10909 } 10910 10911 QualType ASTContext::getCorrespondingUnsignedType(QualType T) const { 10912 assert((T->hasIntegerRepresentation() || T->isEnumeralType() || 10913 T->isFixedPointType()) && 10914 "Unexpected type"); 10915 10916 // Turn <4 x signed int> -> <4 x unsigned int> 10917 if (const auto *VTy = T->getAs<VectorType>()) 10918 return getVectorType(getCorrespondingUnsignedType(VTy->getElementType()), 10919 VTy->getNumElements(), VTy->getVectorKind()); 10920 10921 // For _BitInt, return an unsigned _BitInt with same width. 10922 if (const auto *EITy = T->getAs<BitIntType>()) 10923 return getBitIntType(/*Unsigned=*/true, EITy->getNumBits()); 10924 10925 // For enums, get the underlying integer type of the enum, and let the general 10926 // integer type signchanging code handle it. 10927 if (const auto *ETy = T->getAs<EnumType>()) 10928 T = ETy->getDecl()->getIntegerType(); 10929 10930 switch (T->castAs<BuiltinType>()->getKind()) { 10931 case BuiltinType::Char_U: 10932 // Plain `char` is mapped to `unsigned char` even if it's already unsigned 10933 case BuiltinType::Char_S: 10934 case BuiltinType::SChar: 10935 case BuiltinType::Char8: 10936 return UnsignedCharTy; 10937 case BuiltinType::Short: 10938 return UnsignedShortTy; 10939 case BuiltinType::Int: 10940 return UnsignedIntTy; 10941 case BuiltinType::Long: 10942 return UnsignedLongTy; 10943 case BuiltinType::LongLong: 10944 return UnsignedLongLongTy; 10945 case BuiltinType::Int128: 10946 return UnsignedInt128Ty; 10947 // wchar_t is special. It is either signed or not, but when it's signed, 10948 // there's no matching "unsigned wchar_t". Therefore we return the unsigned 10949 // version of its underlying type instead. 10950 case BuiltinType::WChar_S: 10951 return getUnsignedWCharType(); 10952 10953 case BuiltinType::ShortAccum: 10954 return UnsignedShortAccumTy; 10955 case BuiltinType::Accum: 10956 return UnsignedAccumTy; 10957 case BuiltinType::LongAccum: 10958 return UnsignedLongAccumTy; 10959 case BuiltinType::SatShortAccum: 10960 return SatUnsignedShortAccumTy; 10961 case BuiltinType::SatAccum: 10962 return SatUnsignedAccumTy; 10963 case BuiltinType::SatLongAccum: 10964 return SatUnsignedLongAccumTy; 10965 case BuiltinType::ShortFract: 10966 return UnsignedShortFractTy; 10967 case BuiltinType::Fract: 10968 return UnsignedFractTy; 10969 case BuiltinType::LongFract: 10970 return UnsignedLongFractTy; 10971 case BuiltinType::SatShortFract: 10972 return SatUnsignedShortFractTy; 10973 case BuiltinType::SatFract: 10974 return SatUnsignedFractTy; 10975 case BuiltinType::SatLongFract: 10976 return SatUnsignedLongFractTy; 10977 default: 10978 assert((T->hasUnsignedIntegerRepresentation() || 10979 T->isUnsignedFixedPointType()) && 10980 "Unexpected signed integer or fixed point type"); 10981 return T; 10982 } 10983 } 10984 10985 QualType ASTContext::getCorrespondingSignedType(QualType T) const { 10986 assert((T->hasIntegerRepresentation() || T->isEnumeralType() || 10987 T->isFixedPointType()) && 10988 "Unexpected type"); 10989 10990 // Turn <4 x unsigned int> -> <4 x signed int> 10991 if (const auto *VTy = T->getAs<VectorType>()) 10992 return getVectorType(getCorrespondingSignedType(VTy->getElementType()), 10993 VTy->getNumElements(), VTy->getVectorKind()); 10994 10995 // For _BitInt, return a signed _BitInt with same width. 10996 if (const auto *EITy = T->getAs<BitIntType>()) 10997 return getBitIntType(/*Unsigned=*/false, EITy->getNumBits()); 10998 10999 // For enums, get the underlying integer type of the enum, and let the general 11000 // integer type signchanging code handle it. 11001 if (const auto *ETy = T->getAs<EnumType>()) 11002 T = ETy->getDecl()->getIntegerType(); 11003 11004 switch (T->castAs<BuiltinType>()->getKind()) { 11005 case BuiltinType::Char_S: 11006 // Plain `char` is mapped to `signed char` even if it's already signed 11007 case BuiltinType::Char_U: 11008 case BuiltinType::UChar: 11009 case BuiltinType::Char8: 11010 return SignedCharTy; 11011 case BuiltinType::UShort: 11012 return ShortTy; 11013 case BuiltinType::UInt: 11014 return IntTy; 11015 case BuiltinType::ULong: 11016 return LongTy; 11017 case BuiltinType::ULongLong: 11018 return LongLongTy; 11019 case BuiltinType::UInt128: 11020 return Int128Ty; 11021 // wchar_t is special. It is either unsigned or not, but when it's unsigned, 11022 // there's no matching "signed wchar_t". Therefore we return the signed 11023 // version of its underlying type instead. 11024 case BuiltinType::WChar_U: 11025 return getSignedWCharType(); 11026 11027 case BuiltinType::UShortAccum: 11028 return ShortAccumTy; 11029 case BuiltinType::UAccum: 11030 return AccumTy; 11031 case BuiltinType::ULongAccum: 11032 return LongAccumTy; 11033 case BuiltinType::SatUShortAccum: 11034 return SatShortAccumTy; 11035 case BuiltinType::SatUAccum: 11036 return SatAccumTy; 11037 case BuiltinType::SatULongAccum: 11038 return SatLongAccumTy; 11039 case BuiltinType::UShortFract: 11040 return ShortFractTy; 11041 case BuiltinType::UFract: 11042 return FractTy; 11043 case BuiltinType::ULongFract: 11044 return LongFractTy; 11045 case BuiltinType::SatUShortFract: 11046 return SatShortFractTy; 11047 case BuiltinType::SatUFract: 11048 return SatFractTy; 11049 case BuiltinType::SatULongFract: 11050 return SatLongFractTy; 11051 default: 11052 assert( 11053 (T->hasSignedIntegerRepresentation() || T->isSignedFixedPointType()) && 11054 "Unexpected signed integer or fixed point type"); 11055 return T; 11056 } 11057 } 11058 11059 ASTMutationListener::~ASTMutationListener() = default; 11060 11061 void ASTMutationListener::DeducedReturnType(const FunctionDecl *FD, 11062 QualType ReturnType) {} 11063 11064 //===----------------------------------------------------------------------===// 11065 // Builtin Type Computation 11066 //===----------------------------------------------------------------------===// 11067 11068 /// DecodeTypeFromStr - This decodes one type descriptor from Str, advancing the 11069 /// pointer over the consumed characters. This returns the resultant type. If 11070 /// AllowTypeModifiers is false then modifier like * are not parsed, just basic 11071 /// types. This allows "v2i*" to be parsed as a pointer to a v2i instead of 11072 /// a vector of "i*". 11073 /// 11074 /// RequiresICE is filled in on return to indicate whether the value is required 11075 /// to be an Integer Constant Expression. 11076 static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context, 11077 ASTContext::GetBuiltinTypeError &Error, 11078 bool &RequiresICE, 11079 bool AllowTypeModifiers) { 11080 // Modifiers. 11081 int HowLong = 0; 11082 bool Signed = false, Unsigned = false; 11083 RequiresICE = false; 11084 11085 // Read the prefixed modifiers first. 11086 bool Done = false; 11087 #ifndef NDEBUG 11088 bool IsSpecial = false; 11089 #endif 11090 while (!Done) { 11091 switch (*Str++) { 11092 default: Done = true; --Str; break; 11093 case 'I': 11094 RequiresICE = true; 11095 break; 11096 case 'S': 11097 assert(!Unsigned && "Can't use both 'S' and 'U' modifiers!"); 11098 assert(!Signed && "Can't use 'S' modifier multiple times!"); 11099 Signed = true; 11100 break; 11101 case 'U': 11102 assert(!Signed && "Can't use both 'S' and 'U' modifiers!"); 11103 assert(!Unsigned && "Can't use 'U' modifier multiple times!"); 11104 Unsigned = true; 11105 break; 11106 case 'L': 11107 assert(!IsSpecial && "Can't use 'L' with 'W', 'N', 'Z' or 'O' modifiers"); 11108 assert(HowLong <= 2 && "Can't have LLLL modifier"); 11109 ++HowLong; 11110 break; 11111 case 'N': 11112 // 'N' behaves like 'L' for all non LP64 targets and 'int' otherwise. 11113 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 11114 assert(HowLong == 0 && "Can't use both 'L' and 'N' modifiers!"); 11115 #ifndef NDEBUG 11116 IsSpecial = true; 11117 #endif 11118 if (Context.getTargetInfo().getLongWidth() == 32) 11119 ++HowLong; 11120 break; 11121 case 'W': 11122 // This modifier represents int64 type. 11123 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 11124 assert(HowLong == 0 && "Can't use both 'L' and 'W' modifiers!"); 11125 #ifndef NDEBUG 11126 IsSpecial = true; 11127 #endif 11128 switch (Context.getTargetInfo().getInt64Type()) { 11129 default: 11130 llvm_unreachable("Unexpected integer type"); 11131 case TargetInfo::SignedLong: 11132 HowLong = 1; 11133 break; 11134 case TargetInfo::SignedLongLong: 11135 HowLong = 2; 11136 break; 11137 } 11138 break; 11139 case 'Z': 11140 // This modifier represents int32 type. 11141 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 11142 assert(HowLong == 0 && "Can't use both 'L' and 'Z' modifiers!"); 11143 #ifndef NDEBUG 11144 IsSpecial = true; 11145 #endif 11146 switch (Context.getTargetInfo().getIntTypeByWidth(32, true)) { 11147 default: 11148 llvm_unreachable("Unexpected integer type"); 11149 case TargetInfo::SignedInt: 11150 HowLong = 0; 11151 break; 11152 case TargetInfo::SignedLong: 11153 HowLong = 1; 11154 break; 11155 case TargetInfo::SignedLongLong: 11156 HowLong = 2; 11157 break; 11158 } 11159 break; 11160 case 'O': 11161 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 11162 assert(HowLong == 0 && "Can't use both 'L' and 'O' modifiers!"); 11163 #ifndef NDEBUG 11164 IsSpecial = true; 11165 #endif 11166 if (Context.getLangOpts().OpenCL) 11167 HowLong = 1; 11168 else 11169 HowLong = 2; 11170 break; 11171 } 11172 } 11173 11174 QualType Type; 11175 11176 // Read the base type. 11177 switch (*Str++) { 11178 default: llvm_unreachable("Unknown builtin type letter!"); 11179 case 'x': 11180 assert(HowLong == 0 && !Signed && !Unsigned && 11181 "Bad modifiers used with 'x'!"); 11182 Type = Context.Float16Ty; 11183 break; 11184 case 'y': 11185 assert(HowLong == 0 && !Signed && !Unsigned && 11186 "Bad modifiers used with 'y'!"); 11187 Type = Context.BFloat16Ty; 11188 break; 11189 case 'v': 11190 assert(HowLong == 0 && !Signed && !Unsigned && 11191 "Bad modifiers used with 'v'!"); 11192 Type = Context.VoidTy; 11193 break; 11194 case 'h': 11195 assert(HowLong == 0 && !Signed && !Unsigned && 11196 "Bad modifiers used with 'h'!"); 11197 Type = Context.HalfTy; 11198 break; 11199 case 'f': 11200 assert(HowLong == 0 && !Signed && !Unsigned && 11201 "Bad modifiers used with 'f'!"); 11202 Type = Context.FloatTy; 11203 break; 11204 case 'd': 11205 assert(HowLong < 3 && !Signed && !Unsigned && 11206 "Bad modifiers used with 'd'!"); 11207 if (HowLong == 1) 11208 Type = Context.LongDoubleTy; 11209 else if (HowLong == 2) 11210 Type = Context.Float128Ty; 11211 else 11212 Type = Context.DoubleTy; 11213 break; 11214 case 's': 11215 assert(HowLong == 0 && "Bad modifiers used with 's'!"); 11216 if (Unsigned) 11217 Type = Context.UnsignedShortTy; 11218 else 11219 Type = Context.ShortTy; 11220 break; 11221 case 'i': 11222 if (HowLong == 3) 11223 Type = Unsigned ? Context.UnsignedInt128Ty : Context.Int128Ty; 11224 else if (HowLong == 2) 11225 Type = Unsigned ? Context.UnsignedLongLongTy : Context.LongLongTy; 11226 else if (HowLong == 1) 11227 Type = Unsigned ? Context.UnsignedLongTy : Context.LongTy; 11228 else 11229 Type = Unsigned ? Context.UnsignedIntTy : Context.IntTy; 11230 break; 11231 case 'c': 11232 assert(HowLong == 0 && "Bad modifiers used with 'c'!"); 11233 if (Signed) 11234 Type = Context.SignedCharTy; 11235 else if (Unsigned) 11236 Type = Context.UnsignedCharTy; 11237 else 11238 Type = Context.CharTy; 11239 break; 11240 case 'b': // boolean 11241 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'b'!"); 11242 Type = Context.BoolTy; 11243 break; 11244 case 'z': // size_t. 11245 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'z'!"); 11246 Type = Context.getSizeType(); 11247 break; 11248 case 'w': // wchar_t. 11249 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'w'!"); 11250 Type = Context.getWideCharType(); 11251 break; 11252 case 'F': 11253 Type = Context.getCFConstantStringType(); 11254 break; 11255 case 'G': 11256 Type = Context.getObjCIdType(); 11257 break; 11258 case 'H': 11259 Type = Context.getObjCSelType(); 11260 break; 11261 case 'M': 11262 Type = Context.getObjCSuperType(); 11263 break; 11264 case 'a': 11265 Type = Context.getBuiltinVaListType(); 11266 assert(!Type.isNull() && "builtin va list type not initialized!"); 11267 break; 11268 case 'A': 11269 // This is a "reference" to a va_list; however, what exactly 11270 // this means depends on how va_list is defined. There are two 11271 // different kinds of va_list: ones passed by value, and ones 11272 // passed by reference. An example of a by-value va_list is 11273 // x86, where va_list is a char*. An example of by-ref va_list 11274 // is x86-64, where va_list is a __va_list_tag[1]. For x86, 11275 // we want this argument to be a char*&; for x86-64, we want 11276 // it to be a __va_list_tag*. 11277 Type = Context.getBuiltinVaListType(); 11278 assert(!Type.isNull() && "builtin va list type not initialized!"); 11279 if (Type->isArrayType()) 11280 Type = Context.getArrayDecayedType(Type); 11281 else 11282 Type = Context.getLValueReferenceType(Type); 11283 break; 11284 case 'q': { 11285 char *End; 11286 unsigned NumElements = strtoul(Str, &End, 10); 11287 assert(End != Str && "Missing vector size"); 11288 Str = End; 11289 11290 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, 11291 RequiresICE, false); 11292 assert(!RequiresICE && "Can't require vector ICE"); 11293 11294 Type = Context.getScalableVectorType(ElementType, NumElements); 11295 break; 11296 } 11297 case 'V': { 11298 char *End; 11299 unsigned NumElements = strtoul(Str, &End, 10); 11300 assert(End != Str && "Missing vector size"); 11301 Str = End; 11302 11303 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, 11304 RequiresICE, false); 11305 assert(!RequiresICE && "Can't require vector ICE"); 11306 11307 // TODO: No way to make AltiVec vectors in builtins yet. 11308 Type = Context.getVectorType(ElementType, NumElements, 11309 VectorType::GenericVector); 11310 break; 11311 } 11312 case 'E': { 11313 char *End; 11314 11315 unsigned NumElements = strtoul(Str, &End, 10); 11316 assert(End != Str && "Missing vector size"); 11317 11318 Str = End; 11319 11320 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE, 11321 false); 11322 Type = Context.getExtVectorType(ElementType, NumElements); 11323 break; 11324 } 11325 case 'X': { 11326 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE, 11327 false); 11328 assert(!RequiresICE && "Can't require complex ICE"); 11329 Type = Context.getComplexType(ElementType); 11330 break; 11331 } 11332 case 'Y': 11333 Type = Context.getPointerDiffType(); 11334 break; 11335 case 'P': 11336 Type = Context.getFILEType(); 11337 if (Type.isNull()) { 11338 Error = ASTContext::GE_Missing_stdio; 11339 return {}; 11340 } 11341 break; 11342 case 'J': 11343 if (Signed) 11344 Type = Context.getsigjmp_bufType(); 11345 else 11346 Type = Context.getjmp_bufType(); 11347 11348 if (Type.isNull()) { 11349 Error = ASTContext::GE_Missing_setjmp; 11350 return {}; 11351 } 11352 break; 11353 case 'K': 11354 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'K'!"); 11355 Type = Context.getucontext_tType(); 11356 11357 if (Type.isNull()) { 11358 Error = ASTContext::GE_Missing_ucontext; 11359 return {}; 11360 } 11361 break; 11362 case 'p': 11363 Type = Context.getProcessIDType(); 11364 break; 11365 } 11366 11367 // If there are modifiers and if we're allowed to parse them, go for it. 11368 Done = !AllowTypeModifiers; 11369 while (!Done) { 11370 switch (char c = *Str++) { 11371 default: Done = true; --Str; break; 11372 case '*': 11373 case '&': { 11374 // Both pointers and references can have their pointee types 11375 // qualified with an address space. 11376 char *End; 11377 unsigned AddrSpace = strtoul(Str, &End, 10); 11378 if (End != Str) { 11379 // Note AddrSpace == 0 is not the same as an unspecified address space. 11380 Type = Context.getAddrSpaceQualType( 11381 Type, 11382 Context.getLangASForBuiltinAddressSpace(AddrSpace)); 11383 Str = End; 11384 } 11385 if (c == '*') 11386 Type = Context.getPointerType(Type); 11387 else 11388 Type = Context.getLValueReferenceType(Type); 11389 break; 11390 } 11391 // FIXME: There's no way to have a built-in with an rvalue ref arg. 11392 case 'C': 11393 Type = Type.withConst(); 11394 break; 11395 case 'D': 11396 Type = Context.getVolatileType(Type); 11397 break; 11398 case 'R': 11399 Type = Type.withRestrict(); 11400 break; 11401 } 11402 } 11403 11404 assert((!RequiresICE || Type->isIntegralOrEnumerationType()) && 11405 "Integer constant 'I' type must be an integer"); 11406 11407 return Type; 11408 } 11409 11410 // On some targets such as PowerPC, some of the builtins are defined with custom 11411 // type descriptors for target-dependent types. These descriptors are decoded in 11412 // other functions, but it may be useful to be able to fall back to default 11413 // descriptor decoding to define builtins mixing target-dependent and target- 11414 // independent types. This function allows decoding one type descriptor with 11415 // default decoding. 11416 QualType ASTContext::DecodeTypeStr(const char *&Str, const ASTContext &Context, 11417 GetBuiltinTypeError &Error, bool &RequireICE, 11418 bool AllowTypeModifiers) const { 11419 return DecodeTypeFromStr(Str, Context, Error, RequireICE, AllowTypeModifiers); 11420 } 11421 11422 /// GetBuiltinType - Return the type for the specified builtin. 11423 QualType ASTContext::GetBuiltinType(unsigned Id, 11424 GetBuiltinTypeError &Error, 11425 unsigned *IntegerConstantArgs) const { 11426 const char *TypeStr = BuiltinInfo.getTypeString(Id); 11427 if (TypeStr[0] == '\0') { 11428 Error = GE_Missing_type; 11429 return {}; 11430 } 11431 11432 SmallVector<QualType, 8> ArgTypes; 11433 11434 bool RequiresICE = false; 11435 Error = GE_None; 11436 QualType ResType = DecodeTypeFromStr(TypeStr, *this, Error, 11437 RequiresICE, true); 11438 if (Error != GE_None) 11439 return {}; 11440 11441 assert(!RequiresICE && "Result of intrinsic cannot be required to be an ICE"); 11442 11443 while (TypeStr[0] && TypeStr[0] != '.') { 11444 QualType Ty = DecodeTypeFromStr(TypeStr, *this, Error, RequiresICE, true); 11445 if (Error != GE_None) 11446 return {}; 11447 11448 // If this argument is required to be an IntegerConstantExpression and the 11449 // caller cares, fill in the bitmask we return. 11450 if (RequiresICE && IntegerConstantArgs) 11451 *IntegerConstantArgs |= 1 << ArgTypes.size(); 11452 11453 // Do array -> pointer decay. The builtin should use the decayed type. 11454 if (Ty->isArrayType()) 11455 Ty = getArrayDecayedType(Ty); 11456 11457 ArgTypes.push_back(Ty); 11458 } 11459 11460 if (Id == Builtin::BI__GetExceptionInfo) 11461 return {}; 11462 11463 assert((TypeStr[0] != '.' || TypeStr[1] == 0) && 11464 "'.' should only occur at end of builtin type list!"); 11465 11466 bool Variadic = (TypeStr[0] == '.'); 11467 11468 FunctionType::ExtInfo EI(getDefaultCallingConvention( 11469 Variadic, /*IsCXXMethod=*/false, /*IsBuiltin=*/true)); 11470 if (BuiltinInfo.isNoReturn(Id)) EI = EI.withNoReturn(true); 11471 11472 11473 // We really shouldn't be making a no-proto type here. 11474 if (ArgTypes.empty() && Variadic && !getLangOpts().requiresStrictPrototypes()) 11475 return getFunctionNoProtoType(ResType, EI); 11476 11477 FunctionProtoType::ExtProtoInfo EPI; 11478 EPI.ExtInfo = EI; 11479 EPI.Variadic = Variadic; 11480 if (getLangOpts().CPlusPlus && BuiltinInfo.isNoThrow(Id)) 11481 EPI.ExceptionSpec.Type = 11482 getLangOpts().CPlusPlus11 ? EST_BasicNoexcept : EST_DynamicNone; 11483 11484 return getFunctionType(ResType, ArgTypes, EPI); 11485 } 11486 11487 static GVALinkage basicGVALinkageForFunction(const ASTContext &Context, 11488 const FunctionDecl *FD) { 11489 if (!FD->isExternallyVisible()) 11490 return GVA_Internal; 11491 11492 // Non-user-provided functions get emitted as weak definitions with every 11493 // use, no matter whether they've been explicitly instantiated etc. 11494 if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) 11495 if (!MD->isUserProvided()) 11496 return GVA_DiscardableODR; 11497 11498 GVALinkage External; 11499 switch (FD->getTemplateSpecializationKind()) { 11500 case TSK_Undeclared: 11501 case TSK_ExplicitSpecialization: 11502 External = GVA_StrongExternal; 11503 break; 11504 11505 case TSK_ExplicitInstantiationDefinition: 11506 return GVA_StrongODR; 11507 11508 // C++11 [temp.explicit]p10: 11509 // [ Note: The intent is that an inline function that is the subject of 11510 // an explicit instantiation declaration will still be implicitly 11511 // instantiated when used so that the body can be considered for 11512 // inlining, but that no out-of-line copy of the inline function would be 11513 // generated in the translation unit. -- end note ] 11514 case TSK_ExplicitInstantiationDeclaration: 11515 return GVA_AvailableExternally; 11516 11517 case TSK_ImplicitInstantiation: 11518 External = GVA_DiscardableODR; 11519 break; 11520 } 11521 11522 if (!FD->isInlined()) 11523 return External; 11524 11525 if ((!Context.getLangOpts().CPlusPlus && 11526 !Context.getTargetInfo().getCXXABI().isMicrosoft() && 11527 !FD->hasAttr<DLLExportAttr>()) || 11528 FD->hasAttr<GNUInlineAttr>()) { 11529 // FIXME: This doesn't match gcc's behavior for dllexport inline functions. 11530 11531 // GNU or C99 inline semantics. Determine whether this symbol should be 11532 // externally visible. 11533 if (FD->isInlineDefinitionExternallyVisible()) 11534 return External; 11535 11536 // C99 inline semantics, where the symbol is not externally visible. 11537 return GVA_AvailableExternally; 11538 } 11539 11540 // Functions specified with extern and inline in -fms-compatibility mode 11541 // forcibly get emitted. While the body of the function cannot be later 11542 // replaced, the function definition cannot be discarded. 11543 if (FD->isMSExternInline()) 11544 return GVA_StrongODR; 11545 11546 return GVA_DiscardableODR; 11547 } 11548 11549 static GVALinkage adjustGVALinkageForAttributes(const ASTContext &Context, 11550 const Decl *D, GVALinkage L) { 11551 // See http://msdn.microsoft.com/en-us/library/xa0d9ste.aspx 11552 // dllexport/dllimport on inline functions. 11553 if (D->hasAttr<DLLImportAttr>()) { 11554 if (L == GVA_DiscardableODR || L == GVA_StrongODR) 11555 return GVA_AvailableExternally; 11556 } else if (D->hasAttr<DLLExportAttr>()) { 11557 if (L == GVA_DiscardableODR) 11558 return GVA_StrongODR; 11559 } else if (Context.getLangOpts().CUDA && Context.getLangOpts().CUDAIsDevice) { 11560 // Device-side functions with __global__ attribute must always be 11561 // visible externally so they can be launched from host. 11562 if (D->hasAttr<CUDAGlobalAttr>() && 11563 (L == GVA_DiscardableODR || L == GVA_Internal)) 11564 return GVA_StrongODR; 11565 // Single source offloading languages like CUDA/HIP need to be able to 11566 // access static device variables from host code of the same compilation 11567 // unit. This is done by externalizing the static variable with a shared 11568 // name between the host and device compilation which is the same for the 11569 // same compilation unit whereas different among different compilation 11570 // units. 11571 if (Context.shouldExternalize(D)) 11572 return GVA_StrongExternal; 11573 } 11574 return L; 11575 } 11576 11577 /// Adjust the GVALinkage for a declaration based on what an external AST source 11578 /// knows about whether there can be other definitions of this declaration. 11579 static GVALinkage 11580 adjustGVALinkageForExternalDefinitionKind(const ASTContext &Ctx, const Decl *D, 11581 GVALinkage L) { 11582 ExternalASTSource *Source = Ctx.getExternalSource(); 11583 if (!Source) 11584 return L; 11585 11586 switch (Source->hasExternalDefinitions(D)) { 11587 case ExternalASTSource::EK_Never: 11588 // Other translation units rely on us to provide the definition. 11589 if (L == GVA_DiscardableODR) 11590 return GVA_StrongODR; 11591 break; 11592 11593 case ExternalASTSource::EK_Always: 11594 return GVA_AvailableExternally; 11595 11596 case ExternalASTSource::EK_ReplyHazy: 11597 break; 11598 } 11599 return L; 11600 } 11601 11602 GVALinkage ASTContext::GetGVALinkageForFunction(const FunctionDecl *FD) const { 11603 return adjustGVALinkageForExternalDefinitionKind(*this, FD, 11604 adjustGVALinkageForAttributes(*this, FD, 11605 basicGVALinkageForFunction(*this, FD))); 11606 } 11607 11608 static GVALinkage basicGVALinkageForVariable(const ASTContext &Context, 11609 const VarDecl *VD) { 11610 if (!VD->isExternallyVisible()) 11611 return GVA_Internal; 11612 11613 if (VD->isStaticLocal()) { 11614 const DeclContext *LexicalContext = VD->getParentFunctionOrMethod(); 11615 while (LexicalContext && !isa<FunctionDecl>(LexicalContext)) 11616 LexicalContext = LexicalContext->getLexicalParent(); 11617 11618 // ObjC Blocks can create local variables that don't have a FunctionDecl 11619 // LexicalContext. 11620 if (!LexicalContext) 11621 return GVA_DiscardableODR; 11622 11623 // Otherwise, let the static local variable inherit its linkage from the 11624 // nearest enclosing function. 11625 auto StaticLocalLinkage = 11626 Context.GetGVALinkageForFunction(cast<FunctionDecl>(LexicalContext)); 11627 11628 // Itanium ABI 5.2.2: "Each COMDAT group [for a static local variable] must 11629 // be emitted in any object with references to the symbol for the object it 11630 // contains, whether inline or out-of-line." 11631 // Similar behavior is observed with MSVC. An alternative ABI could use 11632 // StrongODR/AvailableExternally to match the function, but none are 11633 // known/supported currently. 11634 if (StaticLocalLinkage == GVA_StrongODR || 11635 StaticLocalLinkage == GVA_AvailableExternally) 11636 return GVA_DiscardableODR; 11637 return StaticLocalLinkage; 11638 } 11639 11640 // MSVC treats in-class initialized static data members as definitions. 11641 // By giving them non-strong linkage, out-of-line definitions won't 11642 // cause link errors. 11643 if (Context.isMSStaticDataMemberInlineDefinition(VD)) 11644 return GVA_DiscardableODR; 11645 11646 // Most non-template variables have strong linkage; inline variables are 11647 // linkonce_odr or (occasionally, for compatibility) weak_odr. 11648 GVALinkage StrongLinkage; 11649 switch (Context.getInlineVariableDefinitionKind(VD)) { 11650 case ASTContext::InlineVariableDefinitionKind::None: 11651 StrongLinkage = GVA_StrongExternal; 11652 break; 11653 case ASTContext::InlineVariableDefinitionKind::Weak: 11654 case ASTContext::InlineVariableDefinitionKind::WeakUnknown: 11655 StrongLinkage = GVA_DiscardableODR; 11656 break; 11657 case ASTContext::InlineVariableDefinitionKind::Strong: 11658 StrongLinkage = GVA_StrongODR; 11659 break; 11660 } 11661 11662 switch (VD->getTemplateSpecializationKind()) { 11663 case TSK_Undeclared: 11664 return StrongLinkage; 11665 11666 case TSK_ExplicitSpecialization: 11667 return Context.getTargetInfo().getCXXABI().isMicrosoft() && 11668 VD->isStaticDataMember() 11669 ? GVA_StrongODR 11670 : StrongLinkage; 11671 11672 case TSK_ExplicitInstantiationDefinition: 11673 return GVA_StrongODR; 11674 11675 case TSK_ExplicitInstantiationDeclaration: 11676 return GVA_AvailableExternally; 11677 11678 case TSK_ImplicitInstantiation: 11679 return GVA_DiscardableODR; 11680 } 11681 11682 llvm_unreachable("Invalid Linkage!"); 11683 } 11684 11685 GVALinkage ASTContext::GetGVALinkageForVariable(const VarDecl *VD) { 11686 return adjustGVALinkageForExternalDefinitionKind(*this, VD, 11687 adjustGVALinkageForAttributes(*this, VD, 11688 basicGVALinkageForVariable(*this, VD))); 11689 } 11690 11691 bool ASTContext::DeclMustBeEmitted(const Decl *D) { 11692 if (const auto *VD = dyn_cast<VarDecl>(D)) { 11693 if (!VD->isFileVarDecl()) 11694 return false; 11695 // Global named register variables (GNU extension) are never emitted. 11696 if (VD->getStorageClass() == SC_Register) 11697 return false; 11698 if (VD->getDescribedVarTemplate() || 11699 isa<VarTemplatePartialSpecializationDecl>(VD)) 11700 return false; 11701 } else if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 11702 // We never need to emit an uninstantiated function template. 11703 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate) 11704 return false; 11705 } else if (isa<PragmaCommentDecl>(D)) 11706 return true; 11707 else if (isa<PragmaDetectMismatchDecl>(D)) 11708 return true; 11709 else if (isa<OMPRequiresDecl>(D)) 11710 return true; 11711 else if (isa<OMPThreadPrivateDecl>(D)) 11712 return !D->getDeclContext()->isDependentContext(); 11713 else if (isa<OMPAllocateDecl>(D)) 11714 return !D->getDeclContext()->isDependentContext(); 11715 else if (isa<OMPDeclareReductionDecl>(D) || isa<OMPDeclareMapperDecl>(D)) 11716 return !D->getDeclContext()->isDependentContext(); 11717 else if (isa<ImportDecl>(D)) 11718 return true; 11719 else 11720 return false; 11721 11722 // If this is a member of a class template, we do not need to emit it. 11723 if (D->getDeclContext()->isDependentContext()) 11724 return false; 11725 11726 // Weak references don't produce any output by themselves. 11727 if (D->hasAttr<WeakRefAttr>()) 11728 return false; 11729 11730 // Aliases and used decls are required. 11731 if (D->hasAttr<AliasAttr>() || D->hasAttr<UsedAttr>()) 11732 return true; 11733 11734 if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 11735 // Forward declarations aren't required. 11736 if (!FD->doesThisDeclarationHaveABody()) 11737 return FD->doesDeclarationForceExternallyVisibleDefinition(); 11738 11739 // Constructors and destructors are required. 11740 if (FD->hasAttr<ConstructorAttr>() || FD->hasAttr<DestructorAttr>()) 11741 return true; 11742 11743 // The key function for a class is required. This rule only comes 11744 // into play when inline functions can be key functions, though. 11745 if (getTargetInfo().getCXXABI().canKeyFunctionBeInline()) { 11746 if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) { 11747 const CXXRecordDecl *RD = MD->getParent(); 11748 if (MD->isOutOfLine() && RD->isDynamicClass()) { 11749 const CXXMethodDecl *KeyFunc = getCurrentKeyFunction(RD); 11750 if (KeyFunc && KeyFunc->getCanonicalDecl() == MD->getCanonicalDecl()) 11751 return true; 11752 } 11753 } 11754 } 11755 11756 GVALinkage Linkage = GetGVALinkageForFunction(FD); 11757 11758 // static, static inline, always_inline, and extern inline functions can 11759 // always be deferred. Normal inline functions can be deferred in C99/C++. 11760 // Implicit template instantiations can also be deferred in C++. 11761 return !isDiscardableGVALinkage(Linkage); 11762 } 11763 11764 const auto *VD = cast<VarDecl>(D); 11765 assert(VD->isFileVarDecl() && "Expected file scoped var"); 11766 11767 // If the decl is marked as `declare target to`, it should be emitted for the 11768 // host and for the device. 11769 if (LangOpts.OpenMP && 11770 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) 11771 return true; 11772 11773 if (VD->isThisDeclarationADefinition() == VarDecl::DeclarationOnly && 11774 !isMSStaticDataMemberInlineDefinition(VD)) 11775 return false; 11776 11777 // Variables that can be needed in other TUs are required. 11778 auto Linkage = GetGVALinkageForVariable(VD); 11779 if (!isDiscardableGVALinkage(Linkage)) 11780 return true; 11781 11782 // We never need to emit a variable that is available in another TU. 11783 if (Linkage == GVA_AvailableExternally) 11784 return false; 11785 11786 // Variables that have destruction with side-effects are required. 11787 if (VD->needsDestruction(*this)) 11788 return true; 11789 11790 // Variables that have initialization with side-effects are required. 11791 if (VD->getInit() && VD->getInit()->HasSideEffects(*this) && 11792 // We can get a value-dependent initializer during error recovery. 11793 (VD->getInit()->isValueDependent() || !VD->evaluateValue())) 11794 return true; 11795 11796 // Likewise, variables with tuple-like bindings are required if their 11797 // bindings have side-effects. 11798 if (const auto *DD = dyn_cast<DecompositionDecl>(VD)) 11799 for (const auto *BD : DD->bindings()) 11800 if (const auto *BindingVD = BD->getHoldingVar()) 11801 if (DeclMustBeEmitted(BindingVD)) 11802 return true; 11803 11804 return false; 11805 } 11806 11807 void ASTContext::forEachMultiversionedFunctionVersion( 11808 const FunctionDecl *FD, 11809 llvm::function_ref<void(FunctionDecl *)> Pred) const { 11810 assert(FD->isMultiVersion() && "Only valid for multiversioned functions"); 11811 llvm::SmallDenseSet<const FunctionDecl*, 4> SeenDecls; 11812 FD = FD->getMostRecentDecl(); 11813 // FIXME: The order of traversal here matters and depends on the order of 11814 // lookup results, which happens to be (mostly) oldest-to-newest, but we 11815 // shouldn't rely on that. 11816 for (auto *CurDecl : 11817 FD->getDeclContext()->getRedeclContext()->lookup(FD->getDeclName())) { 11818 FunctionDecl *CurFD = CurDecl->getAsFunction()->getMostRecentDecl(); 11819 if (CurFD && hasSameType(CurFD->getType(), FD->getType()) && 11820 !llvm::is_contained(SeenDecls, CurFD)) { 11821 SeenDecls.insert(CurFD); 11822 Pred(CurFD); 11823 } 11824 } 11825 } 11826 11827 CallingConv ASTContext::getDefaultCallingConvention(bool IsVariadic, 11828 bool IsCXXMethod, 11829 bool IsBuiltin) const { 11830 // Pass through to the C++ ABI object 11831 if (IsCXXMethod) 11832 return ABI->getDefaultMethodCallConv(IsVariadic); 11833 11834 // Builtins ignore user-specified default calling convention and remain the 11835 // Target's default calling convention. 11836 if (!IsBuiltin) { 11837 switch (LangOpts.getDefaultCallingConv()) { 11838 case LangOptions::DCC_None: 11839 break; 11840 case LangOptions::DCC_CDecl: 11841 return CC_C; 11842 case LangOptions::DCC_FastCall: 11843 if (getTargetInfo().hasFeature("sse2") && !IsVariadic) 11844 return CC_X86FastCall; 11845 break; 11846 case LangOptions::DCC_StdCall: 11847 if (!IsVariadic) 11848 return CC_X86StdCall; 11849 break; 11850 case LangOptions::DCC_VectorCall: 11851 // __vectorcall cannot be applied to variadic functions. 11852 if (!IsVariadic) 11853 return CC_X86VectorCall; 11854 break; 11855 case LangOptions::DCC_RegCall: 11856 // __regcall cannot be applied to variadic functions. 11857 if (!IsVariadic) 11858 return CC_X86RegCall; 11859 break; 11860 } 11861 } 11862 return Target->getDefaultCallingConv(); 11863 } 11864 11865 bool ASTContext::isNearlyEmpty(const CXXRecordDecl *RD) const { 11866 // Pass through to the C++ ABI object 11867 return ABI->isNearlyEmpty(RD); 11868 } 11869 11870 VTableContextBase *ASTContext::getVTableContext() { 11871 if (!VTContext.get()) { 11872 auto ABI = Target->getCXXABI(); 11873 if (ABI.isMicrosoft()) 11874 VTContext.reset(new MicrosoftVTableContext(*this)); 11875 else { 11876 auto ComponentLayout = getLangOpts().RelativeCXXABIVTables 11877 ? ItaniumVTableContext::Relative 11878 : ItaniumVTableContext::Pointer; 11879 VTContext.reset(new ItaniumVTableContext(*this, ComponentLayout)); 11880 } 11881 } 11882 return VTContext.get(); 11883 } 11884 11885 MangleContext *ASTContext::createMangleContext(const TargetInfo *T) { 11886 if (!T) 11887 T = Target; 11888 switch (T->getCXXABI().getKind()) { 11889 case TargetCXXABI::AppleARM64: 11890 case TargetCXXABI::Fuchsia: 11891 case TargetCXXABI::GenericAArch64: 11892 case TargetCXXABI::GenericItanium: 11893 case TargetCXXABI::GenericARM: 11894 case TargetCXXABI::GenericMIPS: 11895 case TargetCXXABI::iOS: 11896 case TargetCXXABI::WebAssembly: 11897 case TargetCXXABI::WatchOS: 11898 case TargetCXXABI::XL: 11899 return ItaniumMangleContext::create(*this, getDiagnostics()); 11900 case TargetCXXABI::Microsoft: 11901 return MicrosoftMangleContext::create(*this, getDiagnostics()); 11902 } 11903 llvm_unreachable("Unsupported ABI"); 11904 } 11905 11906 MangleContext *ASTContext::createDeviceMangleContext(const TargetInfo &T) { 11907 assert(T.getCXXABI().getKind() != TargetCXXABI::Microsoft && 11908 "Device mangle context does not support Microsoft mangling."); 11909 switch (T.getCXXABI().getKind()) { 11910 case TargetCXXABI::AppleARM64: 11911 case TargetCXXABI::Fuchsia: 11912 case TargetCXXABI::GenericAArch64: 11913 case TargetCXXABI::GenericItanium: 11914 case TargetCXXABI::GenericARM: 11915 case TargetCXXABI::GenericMIPS: 11916 case TargetCXXABI::iOS: 11917 case TargetCXXABI::WebAssembly: 11918 case TargetCXXABI::WatchOS: 11919 case TargetCXXABI::XL: 11920 return ItaniumMangleContext::create( 11921 *this, getDiagnostics(), 11922 [](ASTContext &, const NamedDecl *ND) -> std::optional<unsigned> { 11923 if (const auto *RD = dyn_cast<CXXRecordDecl>(ND)) 11924 return RD->getDeviceLambdaManglingNumber(); 11925 return std::nullopt; 11926 }, 11927 /*IsAux=*/true); 11928 case TargetCXXABI::Microsoft: 11929 return MicrosoftMangleContext::create(*this, getDiagnostics(), 11930 /*IsAux=*/true); 11931 } 11932 llvm_unreachable("Unsupported ABI"); 11933 } 11934 11935 CXXABI::~CXXABI() = default; 11936 11937 size_t ASTContext::getSideTableAllocatedMemory() const { 11938 return ASTRecordLayouts.getMemorySize() + 11939 llvm::capacity_in_bytes(ObjCLayouts) + 11940 llvm::capacity_in_bytes(KeyFunctions) + 11941 llvm::capacity_in_bytes(ObjCImpls) + 11942 llvm::capacity_in_bytes(BlockVarCopyInits) + 11943 llvm::capacity_in_bytes(DeclAttrs) + 11944 llvm::capacity_in_bytes(TemplateOrInstantiation) + 11945 llvm::capacity_in_bytes(InstantiatedFromUsingDecl) + 11946 llvm::capacity_in_bytes(InstantiatedFromUsingShadowDecl) + 11947 llvm::capacity_in_bytes(InstantiatedFromUnnamedFieldDecl) + 11948 llvm::capacity_in_bytes(OverriddenMethods) + 11949 llvm::capacity_in_bytes(Types) + 11950 llvm::capacity_in_bytes(VariableArrayTypes); 11951 } 11952 11953 /// getIntTypeForBitwidth - 11954 /// sets integer QualTy according to specified details: 11955 /// bitwidth, signed/unsigned. 11956 /// Returns empty type if there is no appropriate target types. 11957 QualType ASTContext::getIntTypeForBitwidth(unsigned DestWidth, 11958 unsigned Signed) const { 11959 TargetInfo::IntType Ty = getTargetInfo().getIntTypeByWidth(DestWidth, Signed); 11960 CanQualType QualTy = getFromTargetType(Ty); 11961 if (!QualTy && DestWidth == 128) 11962 return Signed ? Int128Ty : UnsignedInt128Ty; 11963 return QualTy; 11964 } 11965 11966 /// getRealTypeForBitwidth - 11967 /// sets floating point QualTy according to specified bitwidth. 11968 /// Returns empty type if there is no appropriate target types. 11969 QualType ASTContext::getRealTypeForBitwidth(unsigned DestWidth, 11970 FloatModeKind ExplicitType) const { 11971 FloatModeKind Ty = 11972 getTargetInfo().getRealTypeByWidth(DestWidth, ExplicitType); 11973 switch (Ty) { 11974 case FloatModeKind::Half: 11975 return HalfTy; 11976 case FloatModeKind::Float: 11977 return FloatTy; 11978 case FloatModeKind::Double: 11979 return DoubleTy; 11980 case FloatModeKind::LongDouble: 11981 return LongDoubleTy; 11982 case FloatModeKind::Float128: 11983 return Float128Ty; 11984 case FloatModeKind::Ibm128: 11985 return Ibm128Ty; 11986 case FloatModeKind::NoFloat: 11987 return {}; 11988 } 11989 11990 llvm_unreachable("Unhandled TargetInfo::RealType value"); 11991 } 11992 11993 void ASTContext::setManglingNumber(const NamedDecl *ND, unsigned Number) { 11994 if (Number > 1) 11995 MangleNumbers[ND] = Number; 11996 } 11997 11998 unsigned ASTContext::getManglingNumber(const NamedDecl *ND, 11999 bool ForAuxTarget) const { 12000 auto I = MangleNumbers.find(ND); 12001 unsigned Res = I != MangleNumbers.end() ? I->second : 1; 12002 // CUDA/HIP host compilation encodes host and device mangling numbers 12003 // as lower and upper half of 32 bit integer. 12004 if (LangOpts.CUDA && !LangOpts.CUDAIsDevice) { 12005 Res = ForAuxTarget ? Res >> 16 : Res & 0xFFFF; 12006 } else { 12007 assert(!ForAuxTarget && "Only CUDA/HIP host compilation supports mangling " 12008 "number for aux target"); 12009 } 12010 return Res > 1 ? Res : 1; 12011 } 12012 12013 void ASTContext::setStaticLocalNumber(const VarDecl *VD, unsigned Number) { 12014 if (Number > 1) 12015 StaticLocalNumbers[VD] = Number; 12016 } 12017 12018 unsigned ASTContext::getStaticLocalNumber(const VarDecl *VD) const { 12019 auto I = StaticLocalNumbers.find(VD); 12020 return I != StaticLocalNumbers.end() ? I->second : 1; 12021 } 12022 12023 MangleNumberingContext & 12024 ASTContext::getManglingNumberContext(const DeclContext *DC) { 12025 assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C. 12026 std::unique_ptr<MangleNumberingContext> &MCtx = MangleNumberingContexts[DC]; 12027 if (!MCtx) 12028 MCtx = createMangleNumberingContext(); 12029 return *MCtx; 12030 } 12031 12032 MangleNumberingContext & 12033 ASTContext::getManglingNumberContext(NeedExtraManglingDecl_t, const Decl *D) { 12034 assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C. 12035 std::unique_ptr<MangleNumberingContext> &MCtx = 12036 ExtraMangleNumberingContexts[D]; 12037 if (!MCtx) 12038 MCtx = createMangleNumberingContext(); 12039 return *MCtx; 12040 } 12041 12042 std::unique_ptr<MangleNumberingContext> 12043 ASTContext::createMangleNumberingContext() const { 12044 return ABI->createMangleNumberingContext(); 12045 } 12046 12047 const CXXConstructorDecl * 12048 ASTContext::getCopyConstructorForExceptionObject(CXXRecordDecl *RD) { 12049 return ABI->getCopyConstructorForExceptionObject( 12050 cast<CXXRecordDecl>(RD->getFirstDecl())); 12051 } 12052 12053 void ASTContext::addCopyConstructorForExceptionObject(CXXRecordDecl *RD, 12054 CXXConstructorDecl *CD) { 12055 return ABI->addCopyConstructorForExceptionObject( 12056 cast<CXXRecordDecl>(RD->getFirstDecl()), 12057 cast<CXXConstructorDecl>(CD->getFirstDecl())); 12058 } 12059 12060 void ASTContext::addTypedefNameForUnnamedTagDecl(TagDecl *TD, 12061 TypedefNameDecl *DD) { 12062 return ABI->addTypedefNameForUnnamedTagDecl(TD, DD); 12063 } 12064 12065 TypedefNameDecl * 12066 ASTContext::getTypedefNameForUnnamedTagDecl(const TagDecl *TD) { 12067 return ABI->getTypedefNameForUnnamedTagDecl(TD); 12068 } 12069 12070 void ASTContext::addDeclaratorForUnnamedTagDecl(TagDecl *TD, 12071 DeclaratorDecl *DD) { 12072 return ABI->addDeclaratorForUnnamedTagDecl(TD, DD); 12073 } 12074 12075 DeclaratorDecl *ASTContext::getDeclaratorForUnnamedTagDecl(const TagDecl *TD) { 12076 return ABI->getDeclaratorForUnnamedTagDecl(TD); 12077 } 12078 12079 void ASTContext::setParameterIndex(const ParmVarDecl *D, unsigned int index) { 12080 ParamIndices[D] = index; 12081 } 12082 12083 unsigned ASTContext::getParameterIndex(const ParmVarDecl *D) const { 12084 ParameterIndexTable::const_iterator I = ParamIndices.find(D); 12085 assert(I != ParamIndices.end() && 12086 "ParmIndices lacks entry set by ParmVarDecl"); 12087 return I->second; 12088 } 12089 12090 QualType ASTContext::getStringLiteralArrayType(QualType EltTy, 12091 unsigned Length) const { 12092 // A C++ string literal has a const-qualified element type (C++ 2.13.4p1). 12093 if (getLangOpts().CPlusPlus || getLangOpts().ConstStrings) 12094 EltTy = EltTy.withConst(); 12095 12096 EltTy = adjustStringLiteralBaseType(EltTy); 12097 12098 // Get an array type for the string, according to C99 6.4.5. This includes 12099 // the null terminator character. 12100 return getConstantArrayType(EltTy, llvm::APInt(32, Length + 1), nullptr, 12101 ArrayType::Normal, /*IndexTypeQuals*/ 0); 12102 } 12103 12104 StringLiteral * 12105 ASTContext::getPredefinedStringLiteralFromCache(StringRef Key) const { 12106 StringLiteral *&Result = StringLiteralCache[Key]; 12107 if (!Result) 12108 Result = StringLiteral::Create( 12109 *this, Key, StringLiteral::Ordinary, 12110 /*Pascal*/ false, getStringLiteralArrayType(CharTy, Key.size()), 12111 SourceLocation()); 12112 return Result; 12113 } 12114 12115 MSGuidDecl * 12116 ASTContext::getMSGuidDecl(MSGuidDecl::Parts Parts) const { 12117 assert(MSGuidTagDecl && "building MS GUID without MS extensions?"); 12118 12119 llvm::FoldingSetNodeID ID; 12120 MSGuidDecl::Profile(ID, Parts); 12121 12122 void *InsertPos; 12123 if (MSGuidDecl *Existing = MSGuidDecls.FindNodeOrInsertPos(ID, InsertPos)) 12124 return Existing; 12125 12126 QualType GUIDType = getMSGuidType().withConst(); 12127 MSGuidDecl *New = MSGuidDecl::Create(*this, GUIDType, Parts); 12128 MSGuidDecls.InsertNode(New, InsertPos); 12129 return New; 12130 } 12131 12132 UnnamedGlobalConstantDecl * 12133 ASTContext::getUnnamedGlobalConstantDecl(QualType Ty, 12134 const APValue &APVal) const { 12135 llvm::FoldingSetNodeID ID; 12136 UnnamedGlobalConstantDecl::Profile(ID, Ty, APVal); 12137 12138 void *InsertPos; 12139 if (UnnamedGlobalConstantDecl *Existing = 12140 UnnamedGlobalConstantDecls.FindNodeOrInsertPos(ID, InsertPos)) 12141 return Existing; 12142 12143 UnnamedGlobalConstantDecl *New = 12144 UnnamedGlobalConstantDecl::Create(*this, Ty, APVal); 12145 UnnamedGlobalConstantDecls.InsertNode(New, InsertPos); 12146 return New; 12147 } 12148 12149 TemplateParamObjectDecl * 12150 ASTContext::getTemplateParamObjectDecl(QualType T, const APValue &V) const { 12151 assert(T->isRecordType() && "template param object of unexpected type"); 12152 12153 // C++ [temp.param]p8: 12154 // [...] a static storage duration object of type 'const T' [...] 12155 T.addConst(); 12156 12157 llvm::FoldingSetNodeID ID; 12158 TemplateParamObjectDecl::Profile(ID, T, V); 12159 12160 void *InsertPos; 12161 if (TemplateParamObjectDecl *Existing = 12162 TemplateParamObjectDecls.FindNodeOrInsertPos(ID, InsertPos)) 12163 return Existing; 12164 12165 TemplateParamObjectDecl *New = TemplateParamObjectDecl::Create(*this, T, V); 12166 TemplateParamObjectDecls.InsertNode(New, InsertPos); 12167 return New; 12168 } 12169 12170 bool ASTContext::AtomicUsesUnsupportedLibcall(const AtomicExpr *E) const { 12171 const llvm::Triple &T = getTargetInfo().getTriple(); 12172 if (!T.isOSDarwin()) 12173 return false; 12174 12175 if (!(T.isiOS() && T.isOSVersionLT(7)) && 12176 !(T.isMacOSX() && T.isOSVersionLT(10, 9))) 12177 return false; 12178 12179 QualType AtomicTy = E->getPtr()->getType()->getPointeeType(); 12180 CharUnits sizeChars = getTypeSizeInChars(AtomicTy); 12181 uint64_t Size = sizeChars.getQuantity(); 12182 CharUnits alignChars = getTypeAlignInChars(AtomicTy); 12183 unsigned Align = alignChars.getQuantity(); 12184 unsigned MaxInlineWidthInBits = getTargetInfo().getMaxAtomicInlineWidth(); 12185 return (Size != Align || toBits(sizeChars) > MaxInlineWidthInBits); 12186 } 12187 12188 bool 12189 ASTContext::ObjCMethodsAreEqual(const ObjCMethodDecl *MethodDecl, 12190 const ObjCMethodDecl *MethodImpl) { 12191 // No point trying to match an unavailable/deprecated mothod. 12192 if (MethodDecl->hasAttr<UnavailableAttr>() 12193 || MethodDecl->hasAttr<DeprecatedAttr>()) 12194 return false; 12195 if (MethodDecl->getObjCDeclQualifier() != 12196 MethodImpl->getObjCDeclQualifier()) 12197 return false; 12198 if (!hasSameType(MethodDecl->getReturnType(), MethodImpl->getReturnType())) 12199 return false; 12200 12201 if (MethodDecl->param_size() != MethodImpl->param_size()) 12202 return false; 12203 12204 for (ObjCMethodDecl::param_const_iterator IM = MethodImpl->param_begin(), 12205 IF = MethodDecl->param_begin(), EM = MethodImpl->param_end(), 12206 EF = MethodDecl->param_end(); 12207 IM != EM && IF != EF; ++IM, ++IF) { 12208 const ParmVarDecl *DeclVar = (*IF); 12209 const ParmVarDecl *ImplVar = (*IM); 12210 if (ImplVar->getObjCDeclQualifier() != DeclVar->getObjCDeclQualifier()) 12211 return false; 12212 if (!hasSameType(DeclVar->getType(), ImplVar->getType())) 12213 return false; 12214 } 12215 12216 return (MethodDecl->isVariadic() == MethodImpl->isVariadic()); 12217 } 12218 12219 uint64_t ASTContext::getTargetNullPointerValue(QualType QT) const { 12220 LangAS AS; 12221 if (QT->getUnqualifiedDesugaredType()->isNullPtrType()) 12222 AS = LangAS::Default; 12223 else 12224 AS = QT->getPointeeType().getAddressSpace(); 12225 12226 return getTargetInfo().getNullPointerValue(AS); 12227 } 12228 12229 unsigned ASTContext::getTargetAddressSpace(LangAS AS) const { 12230 return getTargetInfo().getTargetAddressSpace(AS); 12231 } 12232 12233 bool ASTContext::hasSameExpr(const Expr *X, const Expr *Y) const { 12234 if (X == Y) 12235 return true; 12236 if (!X || !Y) 12237 return false; 12238 llvm::FoldingSetNodeID IDX, IDY; 12239 X->Profile(IDX, *this, /*Canonical=*/true); 12240 Y->Profile(IDY, *this, /*Canonical=*/true); 12241 return IDX == IDY; 12242 } 12243 12244 // The getCommon* helpers return, for given 'same' X and Y entities given as 12245 // inputs, another entity which is also the 'same' as the inputs, but which 12246 // is closer to the canonical form of the inputs, each according to a given 12247 // criteria. 12248 // The getCommon*Checked variants are 'null inputs not-allowed' equivalents of 12249 // the regular ones. 12250 12251 static Decl *getCommonDecl(Decl *X, Decl *Y) { 12252 if (!declaresSameEntity(X, Y)) 12253 return nullptr; 12254 for (const Decl *DX : X->redecls()) { 12255 // If we reach Y before reaching the first decl, that means X is older. 12256 if (DX == Y) 12257 return X; 12258 // If we reach the first decl, then Y is older. 12259 if (DX->isFirstDecl()) 12260 return Y; 12261 } 12262 llvm_unreachable("Corrupt redecls chain"); 12263 } 12264 12265 template <class T, std::enable_if_t<std::is_base_of_v<Decl, T>, bool> = true> 12266 static T *getCommonDecl(T *X, T *Y) { 12267 return cast_or_null<T>( 12268 getCommonDecl(const_cast<Decl *>(cast_or_null<Decl>(X)), 12269 const_cast<Decl *>(cast_or_null<Decl>(Y)))); 12270 } 12271 12272 template <class T, std::enable_if_t<std::is_base_of_v<Decl, T>, bool> = true> 12273 static T *getCommonDeclChecked(T *X, T *Y) { 12274 return cast<T>(getCommonDecl(const_cast<Decl *>(cast<Decl>(X)), 12275 const_cast<Decl *>(cast<Decl>(Y)))); 12276 } 12277 12278 static TemplateName getCommonTemplateName(ASTContext &Ctx, TemplateName X, 12279 TemplateName Y) { 12280 if (X.getAsVoidPointer() == Y.getAsVoidPointer()) 12281 return X; 12282 // FIXME: There are cases here where we could find a common template name 12283 // with more sugar. For example one could be a SubstTemplateTemplate* 12284 // replacing the other. 12285 TemplateName CX = Ctx.getCanonicalTemplateName(X); 12286 if (CX.getAsVoidPointer() != 12287 Ctx.getCanonicalTemplateName(Y).getAsVoidPointer()) 12288 return TemplateName(); 12289 return CX; 12290 } 12291 12292 static TemplateName 12293 getCommonTemplateNameChecked(ASTContext &Ctx, TemplateName X, TemplateName Y) { 12294 TemplateName R = getCommonTemplateName(Ctx, X, Y); 12295 assert(R.getAsVoidPointer() != nullptr); 12296 return R; 12297 } 12298 12299 static auto getCommonTypes(ASTContext &Ctx, ArrayRef<QualType> Xs, 12300 ArrayRef<QualType> Ys, bool Unqualified = false) { 12301 assert(Xs.size() == Ys.size()); 12302 SmallVector<QualType, 8> Rs(Xs.size()); 12303 for (size_t I = 0; I < Rs.size(); ++I) 12304 Rs[I] = Ctx.getCommonSugaredType(Xs[I], Ys[I], Unqualified); 12305 return Rs; 12306 } 12307 12308 template <class T> 12309 static SourceLocation getCommonAttrLoc(const T *X, const T *Y) { 12310 return X->getAttributeLoc() == Y->getAttributeLoc() ? X->getAttributeLoc() 12311 : SourceLocation(); 12312 } 12313 12314 static TemplateArgument getCommonTemplateArgument(ASTContext &Ctx, 12315 const TemplateArgument &X, 12316 const TemplateArgument &Y) { 12317 if (X.getKind() != Y.getKind()) 12318 return TemplateArgument(); 12319 12320 switch (X.getKind()) { 12321 case TemplateArgument::ArgKind::Type: 12322 if (!Ctx.hasSameType(X.getAsType(), Y.getAsType())) 12323 return TemplateArgument(); 12324 return TemplateArgument( 12325 Ctx.getCommonSugaredType(X.getAsType(), Y.getAsType())); 12326 case TemplateArgument::ArgKind::NullPtr: 12327 if (!Ctx.hasSameType(X.getNullPtrType(), Y.getNullPtrType())) 12328 return TemplateArgument(); 12329 return TemplateArgument( 12330 Ctx.getCommonSugaredType(X.getNullPtrType(), Y.getNullPtrType()), 12331 /*Unqualified=*/true); 12332 case TemplateArgument::ArgKind::Expression: 12333 if (!Ctx.hasSameType(X.getAsExpr()->getType(), Y.getAsExpr()->getType())) 12334 return TemplateArgument(); 12335 // FIXME: Try to keep the common sugar. 12336 return X; 12337 case TemplateArgument::ArgKind::Template: { 12338 TemplateName TX = X.getAsTemplate(), TY = Y.getAsTemplate(); 12339 TemplateName CTN = ::getCommonTemplateName(Ctx, TX, TY); 12340 if (!CTN.getAsVoidPointer()) 12341 return TemplateArgument(); 12342 return TemplateArgument(CTN); 12343 } 12344 case TemplateArgument::ArgKind::TemplateExpansion: { 12345 TemplateName TX = X.getAsTemplateOrTemplatePattern(), 12346 TY = Y.getAsTemplateOrTemplatePattern(); 12347 TemplateName CTN = ::getCommonTemplateName(Ctx, TX, TY); 12348 if (!CTN.getAsVoidPointer()) 12349 return TemplateName(); 12350 auto NExpX = X.getNumTemplateExpansions(); 12351 assert(NExpX == Y.getNumTemplateExpansions()); 12352 return TemplateArgument(CTN, NExpX); 12353 } 12354 default: 12355 // FIXME: Handle the other argument kinds. 12356 return X; 12357 } 12358 } 12359 12360 static bool getCommonTemplateArguments(ASTContext &Ctx, 12361 SmallVectorImpl<TemplateArgument> &R, 12362 ArrayRef<TemplateArgument> Xs, 12363 ArrayRef<TemplateArgument> Ys) { 12364 if (Xs.size() != Ys.size()) 12365 return true; 12366 R.resize(Xs.size()); 12367 for (size_t I = 0; I < R.size(); ++I) { 12368 R[I] = getCommonTemplateArgument(Ctx, Xs[I], Ys[I]); 12369 if (R[I].isNull()) 12370 return true; 12371 } 12372 return false; 12373 } 12374 12375 static auto getCommonTemplateArguments(ASTContext &Ctx, 12376 ArrayRef<TemplateArgument> Xs, 12377 ArrayRef<TemplateArgument> Ys) { 12378 SmallVector<TemplateArgument, 8> R; 12379 bool Different = getCommonTemplateArguments(Ctx, R, Xs, Ys); 12380 assert(!Different); 12381 (void)Different; 12382 return R; 12383 } 12384 12385 template <class T> 12386 static ElaboratedTypeKeyword getCommonTypeKeyword(const T *X, const T *Y) { 12387 return X->getKeyword() == Y->getKeyword() ? X->getKeyword() 12388 : ElaboratedTypeKeyword::ETK_None; 12389 } 12390 12391 template <class T> 12392 static NestedNameSpecifier *getCommonNNS(ASTContext &Ctx, const T *X, 12393 const T *Y) { 12394 // FIXME: Try to keep the common NNS sugar. 12395 return X->getQualifier() == Y->getQualifier() 12396 ? X->getQualifier() 12397 : Ctx.getCanonicalNestedNameSpecifier(X->getQualifier()); 12398 } 12399 12400 template <class T> 12401 static QualType getCommonElementType(ASTContext &Ctx, const T *X, const T *Y) { 12402 return Ctx.getCommonSugaredType(X->getElementType(), Y->getElementType()); 12403 } 12404 12405 template <class T> 12406 static QualType getCommonArrayElementType(ASTContext &Ctx, const T *X, 12407 Qualifiers &QX, const T *Y, 12408 Qualifiers &QY) { 12409 QualType EX = X->getElementType(), EY = Y->getElementType(); 12410 QualType R = Ctx.getCommonSugaredType(EX, EY, 12411 /*Unqualified=*/true); 12412 Qualifiers RQ = R.getQualifiers(); 12413 QX += EX.getQualifiers() - RQ; 12414 QY += EY.getQualifiers() - RQ; 12415 return R; 12416 } 12417 12418 template <class T> 12419 static QualType getCommonPointeeType(ASTContext &Ctx, const T *X, const T *Y) { 12420 return Ctx.getCommonSugaredType(X->getPointeeType(), Y->getPointeeType()); 12421 } 12422 12423 template <class T> static auto *getCommonSizeExpr(ASTContext &Ctx, T *X, T *Y) { 12424 assert(Ctx.hasSameExpr(X->getSizeExpr(), Y->getSizeExpr())); 12425 return X->getSizeExpr(); 12426 } 12427 12428 static auto getCommonSizeModifier(const ArrayType *X, const ArrayType *Y) { 12429 assert(X->getSizeModifier() == Y->getSizeModifier()); 12430 return X->getSizeModifier(); 12431 } 12432 12433 static auto getCommonIndexTypeCVRQualifiers(const ArrayType *X, 12434 const ArrayType *Y) { 12435 assert(X->getIndexTypeCVRQualifiers() == Y->getIndexTypeCVRQualifiers()); 12436 return X->getIndexTypeCVRQualifiers(); 12437 } 12438 12439 // Merges two type lists such that the resulting vector will contain 12440 // each type (in a canonical sense) only once, in the order they appear 12441 // from X to Y. If they occur in both X and Y, the result will contain 12442 // the common sugared type between them. 12443 static void mergeTypeLists(ASTContext &Ctx, SmallVectorImpl<QualType> &Out, 12444 ArrayRef<QualType> X, ArrayRef<QualType> Y) { 12445 llvm::DenseMap<QualType, unsigned> Found; 12446 for (auto Ts : {X, Y}) { 12447 for (QualType T : Ts) { 12448 auto Res = Found.try_emplace(Ctx.getCanonicalType(T), Out.size()); 12449 if (!Res.second) { 12450 QualType &U = Out[Res.first->second]; 12451 U = Ctx.getCommonSugaredType(U, T); 12452 } else { 12453 Out.emplace_back(T); 12454 } 12455 } 12456 } 12457 } 12458 12459 FunctionProtoType::ExceptionSpecInfo 12460 ASTContext::mergeExceptionSpecs(FunctionProtoType::ExceptionSpecInfo ESI1, 12461 FunctionProtoType::ExceptionSpecInfo ESI2, 12462 SmallVectorImpl<QualType> &ExceptionTypeStorage, 12463 bool AcceptDependent) { 12464 ExceptionSpecificationType EST1 = ESI1.Type, EST2 = ESI2.Type; 12465 12466 // If either of them can throw anything, that is the result. 12467 for (auto I : {EST_None, EST_MSAny, EST_NoexceptFalse}) { 12468 if (EST1 == I) 12469 return ESI1; 12470 if (EST2 == I) 12471 return ESI2; 12472 } 12473 12474 // If either of them is non-throwing, the result is the other. 12475 for (auto I : 12476 {EST_NoThrow, EST_DynamicNone, EST_BasicNoexcept, EST_NoexceptTrue}) { 12477 if (EST1 == I) 12478 return ESI2; 12479 if (EST2 == I) 12480 return ESI1; 12481 } 12482 12483 // If we're left with value-dependent computed noexcept expressions, we're 12484 // stuck. Before C++17, we can just drop the exception specification entirely, 12485 // since it's not actually part of the canonical type. And this should never 12486 // happen in C++17, because it would mean we were computing the composite 12487 // pointer type of dependent types, which should never happen. 12488 if (EST1 == EST_DependentNoexcept || EST2 == EST_DependentNoexcept) { 12489 assert(AcceptDependent && 12490 "computing composite pointer type of dependent types"); 12491 return FunctionProtoType::ExceptionSpecInfo(); 12492 } 12493 12494 // Switch over the possibilities so that people adding new values know to 12495 // update this function. 12496 switch (EST1) { 12497 case EST_None: 12498 case EST_DynamicNone: 12499 case EST_MSAny: 12500 case EST_BasicNoexcept: 12501 case EST_DependentNoexcept: 12502 case EST_NoexceptFalse: 12503 case EST_NoexceptTrue: 12504 case EST_NoThrow: 12505 llvm_unreachable("These ESTs should be handled above"); 12506 12507 case EST_Dynamic: { 12508 // This is the fun case: both exception specifications are dynamic. Form 12509 // the union of the two lists. 12510 assert(EST2 == EST_Dynamic && "other cases should already be handled"); 12511 mergeTypeLists(*this, ExceptionTypeStorage, ESI1.Exceptions, 12512 ESI2.Exceptions); 12513 FunctionProtoType::ExceptionSpecInfo Result(EST_Dynamic); 12514 Result.Exceptions = ExceptionTypeStorage; 12515 return Result; 12516 } 12517 12518 case EST_Unevaluated: 12519 case EST_Uninstantiated: 12520 case EST_Unparsed: 12521 llvm_unreachable("shouldn't see unresolved exception specifications here"); 12522 } 12523 12524 llvm_unreachable("invalid ExceptionSpecificationType"); 12525 } 12526 12527 static QualType getCommonNonSugarTypeNode(ASTContext &Ctx, const Type *X, 12528 Qualifiers &QX, const Type *Y, 12529 Qualifiers &QY) { 12530 Type::TypeClass TC = X->getTypeClass(); 12531 assert(TC == Y->getTypeClass()); 12532 switch (TC) { 12533 #define UNEXPECTED_TYPE(Class, Kind) \ 12534 case Type::Class: \ 12535 llvm_unreachable("Unexpected " Kind ": " #Class); 12536 12537 #define NON_CANONICAL_TYPE(Class, Base) UNEXPECTED_TYPE(Class, "non-canonical") 12538 #define TYPE(Class, Base) 12539 #include "clang/AST/TypeNodes.inc" 12540 12541 #define SUGAR_FREE_TYPE(Class) UNEXPECTED_TYPE(Class, "sugar-free") 12542 SUGAR_FREE_TYPE(Builtin) 12543 SUGAR_FREE_TYPE(Decltype) 12544 SUGAR_FREE_TYPE(DeducedTemplateSpecialization) 12545 SUGAR_FREE_TYPE(DependentBitInt) 12546 SUGAR_FREE_TYPE(Enum) 12547 SUGAR_FREE_TYPE(BitInt) 12548 SUGAR_FREE_TYPE(ObjCInterface) 12549 SUGAR_FREE_TYPE(Record) 12550 SUGAR_FREE_TYPE(SubstTemplateTypeParmPack) 12551 SUGAR_FREE_TYPE(UnresolvedUsing) 12552 #undef SUGAR_FREE_TYPE 12553 #define NON_UNIQUE_TYPE(Class) UNEXPECTED_TYPE(Class, "non-unique") 12554 NON_UNIQUE_TYPE(TypeOfExpr) 12555 NON_UNIQUE_TYPE(VariableArray) 12556 #undef NON_UNIQUE_TYPE 12557 12558 UNEXPECTED_TYPE(TypeOf, "sugar") 12559 12560 #undef UNEXPECTED_TYPE 12561 12562 case Type::Auto: { 12563 const auto *AX = cast<AutoType>(X), *AY = cast<AutoType>(Y); 12564 assert(AX->getDeducedType().isNull()); 12565 assert(AY->getDeducedType().isNull()); 12566 assert(AX->getKeyword() == AY->getKeyword()); 12567 assert(AX->isInstantiationDependentType() == 12568 AY->isInstantiationDependentType()); 12569 auto As = getCommonTemplateArguments(Ctx, AX->getTypeConstraintArguments(), 12570 AY->getTypeConstraintArguments()); 12571 return Ctx.getAutoType(QualType(), AX->getKeyword(), 12572 AX->isInstantiationDependentType(), 12573 AX->containsUnexpandedParameterPack(), 12574 getCommonDeclChecked(AX->getTypeConstraintConcept(), 12575 AY->getTypeConstraintConcept()), 12576 As); 12577 } 12578 case Type::IncompleteArray: { 12579 const auto *AX = cast<IncompleteArrayType>(X), 12580 *AY = cast<IncompleteArrayType>(Y); 12581 return Ctx.getIncompleteArrayType( 12582 getCommonArrayElementType(Ctx, AX, QX, AY, QY), 12583 getCommonSizeModifier(AX, AY), getCommonIndexTypeCVRQualifiers(AX, AY)); 12584 } 12585 case Type::DependentSizedArray: { 12586 const auto *AX = cast<DependentSizedArrayType>(X), 12587 *AY = cast<DependentSizedArrayType>(Y); 12588 return Ctx.getDependentSizedArrayType( 12589 getCommonArrayElementType(Ctx, AX, QX, AY, QY), 12590 getCommonSizeExpr(Ctx, AX, AY), getCommonSizeModifier(AX, AY), 12591 getCommonIndexTypeCVRQualifiers(AX, AY), 12592 AX->getBracketsRange() == AY->getBracketsRange() 12593 ? AX->getBracketsRange() 12594 : SourceRange()); 12595 } 12596 case Type::ConstantArray: { 12597 const auto *AX = cast<ConstantArrayType>(X), 12598 *AY = cast<ConstantArrayType>(Y); 12599 assert(AX->getSize() == AY->getSize()); 12600 const Expr *SizeExpr = Ctx.hasSameExpr(AX->getSizeExpr(), AY->getSizeExpr()) 12601 ? AX->getSizeExpr() 12602 : nullptr; 12603 return Ctx.getConstantArrayType( 12604 getCommonArrayElementType(Ctx, AX, QX, AY, QY), AX->getSize(), SizeExpr, 12605 getCommonSizeModifier(AX, AY), getCommonIndexTypeCVRQualifiers(AX, AY)); 12606 } 12607 case Type::Atomic: { 12608 const auto *AX = cast<AtomicType>(X), *AY = cast<AtomicType>(Y); 12609 return Ctx.getAtomicType( 12610 Ctx.getCommonSugaredType(AX->getValueType(), AY->getValueType())); 12611 } 12612 case Type::Complex: { 12613 const auto *CX = cast<ComplexType>(X), *CY = cast<ComplexType>(Y); 12614 return Ctx.getComplexType(getCommonArrayElementType(Ctx, CX, QX, CY, QY)); 12615 } 12616 case Type::Pointer: { 12617 const auto *PX = cast<PointerType>(X), *PY = cast<PointerType>(Y); 12618 return Ctx.getPointerType(getCommonPointeeType(Ctx, PX, PY)); 12619 } 12620 case Type::BlockPointer: { 12621 const auto *PX = cast<BlockPointerType>(X), *PY = cast<BlockPointerType>(Y); 12622 return Ctx.getBlockPointerType(getCommonPointeeType(Ctx, PX, PY)); 12623 } 12624 case Type::ObjCObjectPointer: { 12625 const auto *PX = cast<ObjCObjectPointerType>(X), 12626 *PY = cast<ObjCObjectPointerType>(Y); 12627 return Ctx.getObjCObjectPointerType(getCommonPointeeType(Ctx, PX, PY)); 12628 } 12629 case Type::MemberPointer: { 12630 const auto *PX = cast<MemberPointerType>(X), 12631 *PY = cast<MemberPointerType>(Y); 12632 return Ctx.getMemberPointerType( 12633 getCommonPointeeType(Ctx, PX, PY), 12634 Ctx.getCommonSugaredType(QualType(PX->getClass(), 0), 12635 QualType(PY->getClass(), 0)) 12636 .getTypePtr()); 12637 } 12638 case Type::LValueReference: { 12639 const auto *PX = cast<LValueReferenceType>(X), 12640 *PY = cast<LValueReferenceType>(Y); 12641 // FIXME: Preserve PointeeTypeAsWritten. 12642 return Ctx.getLValueReferenceType(getCommonPointeeType(Ctx, PX, PY), 12643 PX->isSpelledAsLValue() || 12644 PY->isSpelledAsLValue()); 12645 } 12646 case Type::RValueReference: { 12647 const auto *PX = cast<RValueReferenceType>(X), 12648 *PY = cast<RValueReferenceType>(Y); 12649 // FIXME: Preserve PointeeTypeAsWritten. 12650 return Ctx.getRValueReferenceType(getCommonPointeeType(Ctx, PX, PY)); 12651 } 12652 case Type::DependentAddressSpace: { 12653 const auto *PX = cast<DependentAddressSpaceType>(X), 12654 *PY = cast<DependentAddressSpaceType>(Y); 12655 assert(Ctx.hasSameExpr(PX->getAddrSpaceExpr(), PY->getAddrSpaceExpr())); 12656 return Ctx.getDependentAddressSpaceType(getCommonPointeeType(Ctx, PX, PY), 12657 PX->getAddrSpaceExpr(), 12658 getCommonAttrLoc(PX, PY)); 12659 } 12660 case Type::FunctionNoProto: { 12661 const auto *FX = cast<FunctionNoProtoType>(X), 12662 *FY = cast<FunctionNoProtoType>(Y); 12663 assert(FX->getExtInfo() == FY->getExtInfo()); 12664 return Ctx.getFunctionNoProtoType( 12665 Ctx.getCommonSugaredType(FX->getReturnType(), FY->getReturnType()), 12666 FX->getExtInfo()); 12667 } 12668 case Type::FunctionProto: { 12669 const auto *FX = cast<FunctionProtoType>(X), 12670 *FY = cast<FunctionProtoType>(Y); 12671 FunctionProtoType::ExtProtoInfo EPIX = FX->getExtProtoInfo(), 12672 EPIY = FY->getExtProtoInfo(); 12673 assert(EPIX.ExtInfo == EPIY.ExtInfo); 12674 assert(EPIX.ExtParameterInfos == EPIY.ExtParameterInfos); 12675 assert(EPIX.RefQualifier == EPIY.RefQualifier); 12676 assert(EPIX.TypeQuals == EPIY.TypeQuals); 12677 assert(EPIX.Variadic == EPIY.Variadic); 12678 12679 // FIXME: Can we handle an empty EllipsisLoc? 12680 // Use emtpy EllipsisLoc if X and Y differ. 12681 12682 EPIX.HasTrailingReturn = EPIX.HasTrailingReturn && EPIY.HasTrailingReturn; 12683 12684 QualType R = 12685 Ctx.getCommonSugaredType(FX->getReturnType(), FY->getReturnType()); 12686 auto P = getCommonTypes(Ctx, FX->param_types(), FY->param_types(), 12687 /*Unqualified=*/true); 12688 12689 SmallVector<QualType, 8> Exceptions; 12690 EPIX.ExceptionSpec = Ctx.mergeExceptionSpecs( 12691 EPIX.ExceptionSpec, EPIY.ExceptionSpec, Exceptions, true); 12692 return Ctx.getFunctionType(R, P, EPIX); 12693 } 12694 case Type::ObjCObject: { 12695 const auto *OX = cast<ObjCObjectType>(X), *OY = cast<ObjCObjectType>(Y); 12696 assert( 12697 std::equal(OX->getProtocols().begin(), OX->getProtocols().end(), 12698 OY->getProtocols().begin(), OY->getProtocols().end(), 12699 [](const ObjCProtocolDecl *P0, const ObjCProtocolDecl *P1) { 12700 return P0->getCanonicalDecl() == P1->getCanonicalDecl(); 12701 }) && 12702 "protocol lists must be the same"); 12703 auto TAs = getCommonTypes(Ctx, OX->getTypeArgsAsWritten(), 12704 OY->getTypeArgsAsWritten()); 12705 return Ctx.getObjCObjectType( 12706 Ctx.getCommonSugaredType(OX->getBaseType(), OY->getBaseType()), TAs, 12707 OX->getProtocols(), 12708 OX->isKindOfTypeAsWritten() && OY->isKindOfTypeAsWritten()); 12709 } 12710 case Type::ConstantMatrix: { 12711 const auto *MX = cast<ConstantMatrixType>(X), 12712 *MY = cast<ConstantMatrixType>(Y); 12713 assert(MX->getNumRows() == MY->getNumRows()); 12714 assert(MX->getNumColumns() == MY->getNumColumns()); 12715 return Ctx.getConstantMatrixType(getCommonElementType(Ctx, MX, MY), 12716 MX->getNumRows(), MX->getNumColumns()); 12717 } 12718 case Type::DependentSizedMatrix: { 12719 const auto *MX = cast<DependentSizedMatrixType>(X), 12720 *MY = cast<DependentSizedMatrixType>(Y); 12721 assert(Ctx.hasSameExpr(MX->getRowExpr(), MY->getRowExpr())); 12722 assert(Ctx.hasSameExpr(MX->getColumnExpr(), MY->getColumnExpr())); 12723 return Ctx.getDependentSizedMatrixType( 12724 getCommonElementType(Ctx, MX, MY), MX->getRowExpr(), 12725 MX->getColumnExpr(), getCommonAttrLoc(MX, MY)); 12726 } 12727 case Type::Vector: { 12728 const auto *VX = cast<VectorType>(X), *VY = cast<VectorType>(Y); 12729 assert(VX->getNumElements() == VY->getNumElements()); 12730 assert(VX->getVectorKind() == VY->getVectorKind()); 12731 return Ctx.getVectorType(getCommonElementType(Ctx, VX, VY), 12732 VX->getNumElements(), VX->getVectorKind()); 12733 } 12734 case Type::ExtVector: { 12735 const auto *VX = cast<ExtVectorType>(X), *VY = cast<ExtVectorType>(Y); 12736 assert(VX->getNumElements() == VY->getNumElements()); 12737 return Ctx.getExtVectorType(getCommonElementType(Ctx, VX, VY), 12738 VX->getNumElements()); 12739 } 12740 case Type::DependentSizedExtVector: { 12741 const auto *VX = cast<DependentSizedExtVectorType>(X), 12742 *VY = cast<DependentSizedExtVectorType>(Y); 12743 return Ctx.getDependentSizedExtVectorType(getCommonElementType(Ctx, VX, VY), 12744 getCommonSizeExpr(Ctx, VX, VY), 12745 getCommonAttrLoc(VX, VY)); 12746 } 12747 case Type::DependentVector: { 12748 const auto *VX = cast<DependentVectorType>(X), 12749 *VY = cast<DependentVectorType>(Y); 12750 assert(VX->getVectorKind() == VY->getVectorKind()); 12751 return Ctx.getDependentVectorType( 12752 getCommonElementType(Ctx, VX, VY), getCommonSizeExpr(Ctx, VX, VY), 12753 getCommonAttrLoc(VX, VY), VX->getVectorKind()); 12754 } 12755 case Type::InjectedClassName: { 12756 const auto *IX = cast<InjectedClassNameType>(X), 12757 *IY = cast<InjectedClassNameType>(Y); 12758 return Ctx.getInjectedClassNameType( 12759 getCommonDeclChecked(IX->getDecl(), IY->getDecl()), 12760 Ctx.getCommonSugaredType(IX->getInjectedSpecializationType(), 12761 IY->getInjectedSpecializationType())); 12762 } 12763 case Type::TemplateSpecialization: { 12764 const auto *TX = cast<TemplateSpecializationType>(X), 12765 *TY = cast<TemplateSpecializationType>(Y); 12766 auto As = getCommonTemplateArguments(Ctx, TX->template_arguments(), 12767 TY->template_arguments()); 12768 return Ctx.getTemplateSpecializationType( 12769 ::getCommonTemplateNameChecked(Ctx, TX->getTemplateName(), 12770 TY->getTemplateName()), 12771 As, X->getCanonicalTypeInternal()); 12772 } 12773 case Type::DependentName: { 12774 const auto *NX = cast<DependentNameType>(X), 12775 *NY = cast<DependentNameType>(Y); 12776 assert(NX->getIdentifier() == NY->getIdentifier()); 12777 return Ctx.getDependentNameType( 12778 getCommonTypeKeyword(NX, NY), getCommonNNS(Ctx, NX, NY), 12779 NX->getIdentifier(), NX->getCanonicalTypeInternal()); 12780 } 12781 case Type::DependentTemplateSpecialization: { 12782 const auto *TX = cast<DependentTemplateSpecializationType>(X), 12783 *TY = cast<DependentTemplateSpecializationType>(Y); 12784 assert(TX->getIdentifier() == TY->getIdentifier()); 12785 auto As = getCommonTemplateArguments(Ctx, TX->template_arguments(), 12786 TY->template_arguments()); 12787 return Ctx.getDependentTemplateSpecializationType( 12788 getCommonTypeKeyword(TX, TY), getCommonNNS(Ctx, TX, TY), 12789 TX->getIdentifier(), As); 12790 } 12791 case Type::UnaryTransform: { 12792 const auto *TX = cast<UnaryTransformType>(X), 12793 *TY = cast<UnaryTransformType>(Y); 12794 assert(TX->getUTTKind() == TY->getUTTKind()); 12795 return Ctx.getUnaryTransformType( 12796 Ctx.getCommonSugaredType(TX->getBaseType(), TY->getBaseType()), 12797 Ctx.getCommonSugaredType(TX->getUnderlyingType(), 12798 TY->getUnderlyingType()), 12799 TX->getUTTKind()); 12800 } 12801 case Type::PackExpansion: { 12802 const auto *PX = cast<PackExpansionType>(X), 12803 *PY = cast<PackExpansionType>(Y); 12804 assert(PX->getNumExpansions() == PY->getNumExpansions()); 12805 return Ctx.getPackExpansionType( 12806 Ctx.getCommonSugaredType(PX->getPattern(), PY->getPattern()), 12807 PX->getNumExpansions(), false); 12808 } 12809 case Type::Pipe: { 12810 const auto *PX = cast<PipeType>(X), *PY = cast<PipeType>(Y); 12811 assert(PX->isReadOnly() == PY->isReadOnly()); 12812 auto MP = PX->isReadOnly() ? &ASTContext::getReadPipeType 12813 : &ASTContext::getWritePipeType; 12814 return (Ctx.*MP)(getCommonElementType(Ctx, PX, PY)); 12815 } 12816 case Type::TemplateTypeParm: { 12817 const auto *TX = cast<TemplateTypeParmType>(X), 12818 *TY = cast<TemplateTypeParmType>(Y); 12819 assert(TX->getDepth() == TY->getDepth()); 12820 assert(TX->getIndex() == TY->getIndex()); 12821 assert(TX->isParameterPack() == TY->isParameterPack()); 12822 return Ctx.getTemplateTypeParmType( 12823 TX->getDepth(), TX->getIndex(), TX->isParameterPack(), 12824 getCommonDecl(TX->getDecl(), TY->getDecl())); 12825 } 12826 } 12827 llvm_unreachable("Unknown Type Class"); 12828 } 12829 12830 static QualType getCommonSugarTypeNode(ASTContext &Ctx, const Type *X, 12831 const Type *Y, 12832 SplitQualType Underlying) { 12833 Type::TypeClass TC = X->getTypeClass(); 12834 if (TC != Y->getTypeClass()) 12835 return QualType(); 12836 switch (TC) { 12837 #define UNEXPECTED_TYPE(Class, Kind) \ 12838 case Type::Class: \ 12839 llvm_unreachable("Unexpected " Kind ": " #Class); 12840 #define TYPE(Class, Base) 12841 #define DEPENDENT_TYPE(Class, Base) UNEXPECTED_TYPE(Class, "dependent") 12842 #include "clang/AST/TypeNodes.inc" 12843 12844 #define CANONICAL_TYPE(Class) UNEXPECTED_TYPE(Class, "canonical") 12845 CANONICAL_TYPE(Atomic) 12846 CANONICAL_TYPE(BitInt) 12847 CANONICAL_TYPE(BlockPointer) 12848 CANONICAL_TYPE(Builtin) 12849 CANONICAL_TYPE(Complex) 12850 CANONICAL_TYPE(ConstantArray) 12851 CANONICAL_TYPE(ConstantMatrix) 12852 CANONICAL_TYPE(Enum) 12853 CANONICAL_TYPE(ExtVector) 12854 CANONICAL_TYPE(FunctionNoProto) 12855 CANONICAL_TYPE(FunctionProto) 12856 CANONICAL_TYPE(IncompleteArray) 12857 CANONICAL_TYPE(LValueReference) 12858 CANONICAL_TYPE(MemberPointer) 12859 CANONICAL_TYPE(ObjCInterface) 12860 CANONICAL_TYPE(ObjCObject) 12861 CANONICAL_TYPE(ObjCObjectPointer) 12862 CANONICAL_TYPE(Pipe) 12863 CANONICAL_TYPE(Pointer) 12864 CANONICAL_TYPE(Record) 12865 CANONICAL_TYPE(RValueReference) 12866 CANONICAL_TYPE(VariableArray) 12867 CANONICAL_TYPE(Vector) 12868 #undef CANONICAL_TYPE 12869 12870 #undef UNEXPECTED_TYPE 12871 12872 case Type::Adjusted: { 12873 const auto *AX = cast<AdjustedType>(X), *AY = cast<AdjustedType>(Y); 12874 QualType OX = AX->getOriginalType(), OY = AY->getOriginalType(); 12875 if (!Ctx.hasSameType(OX, OY)) 12876 return QualType(); 12877 // FIXME: It's inefficient to have to unify the original types. 12878 return Ctx.getAdjustedType(Ctx.getCommonSugaredType(OX, OY), 12879 Ctx.getQualifiedType(Underlying)); 12880 } 12881 case Type::Decayed: { 12882 const auto *DX = cast<DecayedType>(X), *DY = cast<DecayedType>(Y); 12883 QualType OX = DX->getOriginalType(), OY = DY->getOriginalType(); 12884 if (!Ctx.hasSameType(OX, OY)) 12885 return QualType(); 12886 // FIXME: It's inefficient to have to unify the original types. 12887 return Ctx.getDecayedType(Ctx.getCommonSugaredType(OX, OY), 12888 Ctx.getQualifiedType(Underlying)); 12889 } 12890 case Type::Attributed: { 12891 const auto *AX = cast<AttributedType>(X), *AY = cast<AttributedType>(Y); 12892 AttributedType::Kind Kind = AX->getAttrKind(); 12893 if (Kind != AY->getAttrKind()) 12894 return QualType(); 12895 QualType MX = AX->getModifiedType(), MY = AY->getModifiedType(); 12896 if (!Ctx.hasSameType(MX, MY)) 12897 return QualType(); 12898 // FIXME: It's inefficient to have to unify the modified types. 12899 return Ctx.getAttributedType(Kind, Ctx.getCommonSugaredType(MX, MY), 12900 Ctx.getQualifiedType(Underlying)); 12901 } 12902 case Type::BTFTagAttributed: { 12903 const auto *BX = cast<BTFTagAttributedType>(X); 12904 const BTFTypeTagAttr *AX = BX->getAttr(); 12905 // The attribute is not uniqued, so just compare the tag. 12906 if (AX->getBTFTypeTag() != 12907 cast<BTFTagAttributedType>(Y)->getAttr()->getBTFTypeTag()) 12908 return QualType(); 12909 return Ctx.getBTFTagAttributedType(AX, Ctx.getQualifiedType(Underlying)); 12910 } 12911 case Type::Auto: { 12912 const auto *AX = cast<AutoType>(X), *AY = cast<AutoType>(Y); 12913 12914 AutoTypeKeyword KW = AX->getKeyword(); 12915 if (KW != AY->getKeyword()) 12916 return QualType(); 12917 12918 ConceptDecl *CD = ::getCommonDecl(AX->getTypeConstraintConcept(), 12919 AY->getTypeConstraintConcept()); 12920 SmallVector<TemplateArgument, 8> As; 12921 if (CD && 12922 getCommonTemplateArguments(Ctx, As, AX->getTypeConstraintArguments(), 12923 AY->getTypeConstraintArguments())) 12924 CD = nullptr; // The arguments differ, so make it unconstrained. 12925 12926 // Both auto types can't be dependent, otherwise they wouldn't have been 12927 // sugar. This implies they can't contain unexpanded packs either. 12928 return Ctx.getAutoType(Ctx.getQualifiedType(Underlying), AX->getKeyword(), 12929 /*IsDependent=*/false, /*IsPack=*/false, CD, As); 12930 } 12931 case Type::Decltype: 12932 return QualType(); 12933 case Type::DeducedTemplateSpecialization: 12934 // FIXME: Try to merge these. 12935 return QualType(); 12936 12937 case Type::Elaborated: { 12938 const auto *EX = cast<ElaboratedType>(X), *EY = cast<ElaboratedType>(Y); 12939 return Ctx.getElaboratedType( 12940 ::getCommonTypeKeyword(EX, EY), ::getCommonNNS(Ctx, EX, EY), 12941 Ctx.getQualifiedType(Underlying), 12942 ::getCommonDecl(EX->getOwnedTagDecl(), EY->getOwnedTagDecl())); 12943 } 12944 case Type::MacroQualified: { 12945 const auto *MX = cast<MacroQualifiedType>(X), 12946 *MY = cast<MacroQualifiedType>(Y); 12947 const IdentifierInfo *IX = MX->getMacroIdentifier(); 12948 if (IX != MY->getMacroIdentifier()) 12949 return QualType(); 12950 return Ctx.getMacroQualifiedType(Ctx.getQualifiedType(Underlying), IX); 12951 } 12952 case Type::SubstTemplateTypeParm: { 12953 const auto *SX = cast<SubstTemplateTypeParmType>(X), 12954 *SY = cast<SubstTemplateTypeParmType>(Y); 12955 Decl *CD = 12956 ::getCommonDecl(SX->getAssociatedDecl(), SY->getAssociatedDecl()); 12957 if (!CD) 12958 return QualType(); 12959 unsigned Index = SX->getIndex(); 12960 if (Index != SY->getIndex()) 12961 return QualType(); 12962 auto PackIndex = SX->getPackIndex(); 12963 if (PackIndex != SY->getPackIndex()) 12964 return QualType(); 12965 return Ctx.getSubstTemplateTypeParmType(Ctx.getQualifiedType(Underlying), 12966 CD, Index, PackIndex); 12967 } 12968 case Type::ObjCTypeParam: 12969 // FIXME: Try to merge these. 12970 return QualType(); 12971 case Type::Paren: 12972 return Ctx.getParenType(Ctx.getQualifiedType(Underlying)); 12973 12974 case Type::TemplateSpecialization: { 12975 const auto *TX = cast<TemplateSpecializationType>(X), 12976 *TY = cast<TemplateSpecializationType>(Y); 12977 TemplateName CTN = ::getCommonTemplateName(Ctx, TX->getTemplateName(), 12978 TY->getTemplateName()); 12979 if (!CTN.getAsVoidPointer()) 12980 return QualType(); 12981 SmallVector<TemplateArgument, 8> Args; 12982 if (getCommonTemplateArguments(Ctx, Args, TX->template_arguments(), 12983 TY->template_arguments())) 12984 return QualType(); 12985 return Ctx.getTemplateSpecializationType(CTN, Args, 12986 Ctx.getQualifiedType(Underlying)); 12987 } 12988 case Type::Typedef: { 12989 const auto *TX = cast<TypedefType>(X), *TY = cast<TypedefType>(Y); 12990 const TypedefNameDecl *CD = ::getCommonDecl(TX->getDecl(), TY->getDecl()); 12991 if (!CD) 12992 return QualType(); 12993 return Ctx.getTypedefType(CD, Ctx.getQualifiedType(Underlying)); 12994 } 12995 case Type::TypeOf: { 12996 // The common sugar between two typeof expressions, where one is 12997 // potentially a typeof_unqual and the other is not, we unify to the 12998 // qualified type as that retains the most information along with the type. 12999 // We only return a typeof_unqual type when both types are unqual types. 13000 TypeOfKind Kind = TypeOfKind::Qualified; 13001 if (cast<TypeOfType>(X)->getKind() == cast<TypeOfType>(Y)->getKind() && 13002 cast<TypeOfType>(X)->getKind() == TypeOfKind::Unqualified) 13003 Kind = TypeOfKind::Unqualified; 13004 return Ctx.getTypeOfType(Ctx.getQualifiedType(Underlying), Kind); 13005 } 13006 case Type::TypeOfExpr: 13007 return QualType(); 13008 13009 case Type::UnaryTransform: { 13010 const auto *UX = cast<UnaryTransformType>(X), 13011 *UY = cast<UnaryTransformType>(Y); 13012 UnaryTransformType::UTTKind KX = UX->getUTTKind(); 13013 if (KX != UY->getUTTKind()) 13014 return QualType(); 13015 QualType BX = UX->getBaseType(), BY = UY->getBaseType(); 13016 if (!Ctx.hasSameType(BX, BY)) 13017 return QualType(); 13018 // FIXME: It's inefficient to have to unify the base types. 13019 return Ctx.getUnaryTransformType(Ctx.getCommonSugaredType(BX, BY), 13020 Ctx.getQualifiedType(Underlying), KX); 13021 } 13022 case Type::Using: { 13023 const auto *UX = cast<UsingType>(X), *UY = cast<UsingType>(Y); 13024 const UsingShadowDecl *CD = 13025 ::getCommonDecl(UX->getFoundDecl(), UY->getFoundDecl()); 13026 if (!CD) 13027 return QualType(); 13028 return Ctx.getUsingType(CD, Ctx.getQualifiedType(Underlying)); 13029 } 13030 } 13031 llvm_unreachable("Unhandled Type Class"); 13032 } 13033 13034 static auto unwrapSugar(SplitQualType &T, Qualifiers &QTotal) { 13035 SmallVector<SplitQualType, 8> R; 13036 while (true) { 13037 QTotal += T.Quals; 13038 QualType NT = T.Ty->getLocallyUnqualifiedSingleStepDesugaredType(); 13039 if (NT == QualType(T.Ty, 0)) 13040 break; 13041 R.push_back(T); 13042 T = NT.split(); 13043 } 13044 return R; 13045 } 13046 13047 QualType ASTContext::getCommonSugaredType(QualType X, QualType Y, 13048 bool Unqualified) { 13049 assert(Unqualified ? hasSameUnqualifiedType(X, Y) : hasSameType(X, Y)); 13050 if (X == Y) 13051 return X; 13052 if (!Unqualified) { 13053 if (X.isCanonical()) 13054 return X; 13055 if (Y.isCanonical()) 13056 return Y; 13057 } 13058 13059 SplitQualType SX = X.split(), SY = Y.split(); 13060 Qualifiers QX, QY; 13061 // Desugar SX and SY, setting the sugar and qualifiers aside into Xs and Ys, 13062 // until we reach their underlying "canonical nodes". Note these are not 13063 // necessarily canonical types, as they may still have sugared properties. 13064 // QX and QY will store the sum of all qualifiers in Xs and Ys respectively. 13065 auto Xs = ::unwrapSugar(SX, QX), Ys = ::unwrapSugar(SY, QY); 13066 if (SX.Ty != SY.Ty) { 13067 // The canonical nodes differ. Build a common canonical node out of the two, 13068 // unifying their sugar. This may recurse back here. 13069 SX.Ty = 13070 ::getCommonNonSugarTypeNode(*this, SX.Ty, QX, SY.Ty, QY).getTypePtr(); 13071 } else { 13072 // The canonical nodes were identical: We may have desugared too much. 13073 // Add any common sugar back in. 13074 while (!Xs.empty() && !Ys.empty() && Xs.back().Ty == Ys.back().Ty) { 13075 QX -= SX.Quals; 13076 QY -= SY.Quals; 13077 SX = Xs.pop_back_val(); 13078 SY = Ys.pop_back_val(); 13079 } 13080 } 13081 if (Unqualified) 13082 QX = Qualifiers::removeCommonQualifiers(QX, QY); 13083 else 13084 assert(QX == QY); 13085 13086 // Even though the remaining sugar nodes in Xs and Ys differ, some may be 13087 // related. Walk up these nodes, unifying them and adding the result. 13088 while (!Xs.empty() && !Ys.empty()) { 13089 auto Underlying = SplitQualType( 13090 SX.Ty, Qualifiers::removeCommonQualifiers(SX.Quals, SY.Quals)); 13091 SX = Xs.pop_back_val(); 13092 SY = Ys.pop_back_val(); 13093 SX.Ty = ::getCommonSugarTypeNode(*this, SX.Ty, SY.Ty, Underlying) 13094 .getTypePtrOrNull(); 13095 // Stop at the first pair which is unrelated. 13096 if (!SX.Ty) { 13097 SX.Ty = Underlying.Ty; 13098 break; 13099 } 13100 QX -= Underlying.Quals; 13101 }; 13102 13103 // Add back the missing accumulated qualifiers, which were stripped off 13104 // with the sugar nodes we could not unify. 13105 QualType R = getQualifiedType(SX.Ty, QX); 13106 assert(Unqualified ? hasSameUnqualifiedType(R, X) : hasSameType(R, X)); 13107 return R; 13108 } 13109 13110 QualType ASTContext::getCorrespondingSaturatedType(QualType Ty) const { 13111 assert(Ty->isFixedPointType()); 13112 13113 if (Ty->isSaturatedFixedPointType()) return Ty; 13114 13115 switch (Ty->castAs<BuiltinType>()->getKind()) { 13116 default: 13117 llvm_unreachable("Not a fixed point type!"); 13118 case BuiltinType::ShortAccum: 13119 return SatShortAccumTy; 13120 case BuiltinType::Accum: 13121 return SatAccumTy; 13122 case BuiltinType::LongAccum: 13123 return SatLongAccumTy; 13124 case BuiltinType::UShortAccum: 13125 return SatUnsignedShortAccumTy; 13126 case BuiltinType::UAccum: 13127 return SatUnsignedAccumTy; 13128 case BuiltinType::ULongAccum: 13129 return SatUnsignedLongAccumTy; 13130 case BuiltinType::ShortFract: 13131 return SatShortFractTy; 13132 case BuiltinType::Fract: 13133 return SatFractTy; 13134 case BuiltinType::LongFract: 13135 return SatLongFractTy; 13136 case BuiltinType::UShortFract: 13137 return SatUnsignedShortFractTy; 13138 case BuiltinType::UFract: 13139 return SatUnsignedFractTy; 13140 case BuiltinType::ULongFract: 13141 return SatUnsignedLongFractTy; 13142 } 13143 } 13144 13145 LangAS ASTContext::getLangASForBuiltinAddressSpace(unsigned AS) const { 13146 if (LangOpts.OpenCL) 13147 return getTargetInfo().getOpenCLBuiltinAddressSpace(AS); 13148 13149 if (LangOpts.CUDA) 13150 return getTargetInfo().getCUDABuiltinAddressSpace(AS); 13151 13152 return getLangASFromTargetAS(AS); 13153 } 13154 13155 // Explicitly instantiate this in case a Redeclarable<T> is used from a TU that 13156 // doesn't include ASTContext.h 13157 template 13158 clang::LazyGenerationalUpdatePtr< 13159 const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::ValueType 13160 clang::LazyGenerationalUpdatePtr< 13161 const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::makeValue( 13162 const clang::ASTContext &Ctx, Decl *Value); 13163 13164 unsigned char ASTContext::getFixedPointScale(QualType Ty) const { 13165 assert(Ty->isFixedPointType()); 13166 13167 const TargetInfo &Target = getTargetInfo(); 13168 switch (Ty->castAs<BuiltinType>()->getKind()) { 13169 default: 13170 llvm_unreachable("Not a fixed point type!"); 13171 case BuiltinType::ShortAccum: 13172 case BuiltinType::SatShortAccum: 13173 return Target.getShortAccumScale(); 13174 case BuiltinType::Accum: 13175 case BuiltinType::SatAccum: 13176 return Target.getAccumScale(); 13177 case BuiltinType::LongAccum: 13178 case BuiltinType::SatLongAccum: 13179 return Target.getLongAccumScale(); 13180 case BuiltinType::UShortAccum: 13181 case BuiltinType::SatUShortAccum: 13182 return Target.getUnsignedShortAccumScale(); 13183 case BuiltinType::UAccum: 13184 case BuiltinType::SatUAccum: 13185 return Target.getUnsignedAccumScale(); 13186 case BuiltinType::ULongAccum: 13187 case BuiltinType::SatULongAccum: 13188 return Target.getUnsignedLongAccumScale(); 13189 case BuiltinType::ShortFract: 13190 case BuiltinType::SatShortFract: 13191 return Target.getShortFractScale(); 13192 case BuiltinType::Fract: 13193 case BuiltinType::SatFract: 13194 return Target.getFractScale(); 13195 case BuiltinType::LongFract: 13196 case BuiltinType::SatLongFract: 13197 return Target.getLongFractScale(); 13198 case BuiltinType::UShortFract: 13199 case BuiltinType::SatUShortFract: 13200 return Target.getUnsignedShortFractScale(); 13201 case BuiltinType::UFract: 13202 case BuiltinType::SatUFract: 13203 return Target.getUnsignedFractScale(); 13204 case BuiltinType::ULongFract: 13205 case BuiltinType::SatULongFract: 13206 return Target.getUnsignedLongFractScale(); 13207 } 13208 } 13209 13210 unsigned char ASTContext::getFixedPointIBits(QualType Ty) const { 13211 assert(Ty->isFixedPointType()); 13212 13213 const TargetInfo &Target = getTargetInfo(); 13214 switch (Ty->castAs<BuiltinType>()->getKind()) { 13215 default: 13216 llvm_unreachable("Not a fixed point type!"); 13217 case BuiltinType::ShortAccum: 13218 case BuiltinType::SatShortAccum: 13219 return Target.getShortAccumIBits(); 13220 case BuiltinType::Accum: 13221 case BuiltinType::SatAccum: 13222 return Target.getAccumIBits(); 13223 case BuiltinType::LongAccum: 13224 case BuiltinType::SatLongAccum: 13225 return Target.getLongAccumIBits(); 13226 case BuiltinType::UShortAccum: 13227 case BuiltinType::SatUShortAccum: 13228 return Target.getUnsignedShortAccumIBits(); 13229 case BuiltinType::UAccum: 13230 case BuiltinType::SatUAccum: 13231 return Target.getUnsignedAccumIBits(); 13232 case BuiltinType::ULongAccum: 13233 case BuiltinType::SatULongAccum: 13234 return Target.getUnsignedLongAccumIBits(); 13235 case BuiltinType::ShortFract: 13236 case BuiltinType::SatShortFract: 13237 case BuiltinType::Fract: 13238 case BuiltinType::SatFract: 13239 case BuiltinType::LongFract: 13240 case BuiltinType::SatLongFract: 13241 case BuiltinType::UShortFract: 13242 case BuiltinType::SatUShortFract: 13243 case BuiltinType::UFract: 13244 case BuiltinType::SatUFract: 13245 case BuiltinType::ULongFract: 13246 case BuiltinType::SatULongFract: 13247 return 0; 13248 } 13249 } 13250 13251 llvm::FixedPointSemantics 13252 ASTContext::getFixedPointSemantics(QualType Ty) const { 13253 assert((Ty->isFixedPointType() || Ty->isIntegerType()) && 13254 "Can only get the fixed point semantics for a " 13255 "fixed point or integer type."); 13256 if (Ty->isIntegerType()) 13257 return llvm::FixedPointSemantics::GetIntegerSemantics( 13258 getIntWidth(Ty), Ty->isSignedIntegerType()); 13259 13260 bool isSigned = Ty->isSignedFixedPointType(); 13261 return llvm::FixedPointSemantics( 13262 static_cast<unsigned>(getTypeSize(Ty)), getFixedPointScale(Ty), isSigned, 13263 Ty->isSaturatedFixedPointType(), 13264 !isSigned && getTargetInfo().doUnsignedFixedPointTypesHavePadding()); 13265 } 13266 13267 llvm::APFixedPoint ASTContext::getFixedPointMax(QualType Ty) const { 13268 assert(Ty->isFixedPointType()); 13269 return llvm::APFixedPoint::getMax(getFixedPointSemantics(Ty)); 13270 } 13271 13272 llvm::APFixedPoint ASTContext::getFixedPointMin(QualType Ty) const { 13273 assert(Ty->isFixedPointType()); 13274 return llvm::APFixedPoint::getMin(getFixedPointSemantics(Ty)); 13275 } 13276 13277 QualType ASTContext::getCorrespondingSignedFixedPointType(QualType Ty) const { 13278 assert(Ty->isUnsignedFixedPointType() && 13279 "Expected unsigned fixed point type"); 13280 13281 switch (Ty->castAs<BuiltinType>()->getKind()) { 13282 case BuiltinType::UShortAccum: 13283 return ShortAccumTy; 13284 case BuiltinType::UAccum: 13285 return AccumTy; 13286 case BuiltinType::ULongAccum: 13287 return LongAccumTy; 13288 case BuiltinType::SatUShortAccum: 13289 return SatShortAccumTy; 13290 case BuiltinType::SatUAccum: 13291 return SatAccumTy; 13292 case BuiltinType::SatULongAccum: 13293 return SatLongAccumTy; 13294 case BuiltinType::UShortFract: 13295 return ShortFractTy; 13296 case BuiltinType::UFract: 13297 return FractTy; 13298 case BuiltinType::ULongFract: 13299 return LongFractTy; 13300 case BuiltinType::SatUShortFract: 13301 return SatShortFractTy; 13302 case BuiltinType::SatUFract: 13303 return SatFractTy; 13304 case BuiltinType::SatULongFract: 13305 return SatLongFractTy; 13306 default: 13307 llvm_unreachable("Unexpected unsigned fixed point type"); 13308 } 13309 } 13310 13311 std::vector<std::string> ASTContext::filterFunctionTargetVersionAttrs( 13312 const TargetVersionAttr *TV) const { 13313 assert(TV != nullptr); 13314 llvm::SmallVector<StringRef, 8> Feats; 13315 std::vector<std::string> ResFeats; 13316 TV->getFeatures(Feats); 13317 for (auto &Feature : Feats) 13318 if (Target->validateCpuSupports(Feature.str())) 13319 ResFeats.push_back("?" + Feature.str()); 13320 return ResFeats; 13321 } 13322 13323 ParsedTargetAttr 13324 ASTContext::filterFunctionTargetAttrs(const TargetAttr *TD) const { 13325 assert(TD != nullptr); 13326 ParsedTargetAttr ParsedAttr = Target->parseTargetAttr(TD->getFeaturesStr()); 13327 13328 llvm::erase_if(ParsedAttr.Features, [&](const std::string &Feat) { 13329 return !Target->isValidFeatureName(StringRef{Feat}.substr(1)); 13330 }); 13331 return ParsedAttr; 13332 } 13333 13334 void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap, 13335 const FunctionDecl *FD) const { 13336 if (FD) 13337 getFunctionFeatureMap(FeatureMap, GlobalDecl().getWithDecl(FD)); 13338 else 13339 Target->initFeatureMap(FeatureMap, getDiagnostics(), 13340 Target->getTargetOpts().CPU, 13341 Target->getTargetOpts().Features); 13342 } 13343 13344 // Fills in the supplied string map with the set of target features for the 13345 // passed in function. 13346 void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap, 13347 GlobalDecl GD) const { 13348 StringRef TargetCPU = Target->getTargetOpts().CPU; 13349 const FunctionDecl *FD = GD.getDecl()->getAsFunction(); 13350 if (const auto *TD = FD->getAttr<TargetAttr>()) { 13351 ParsedTargetAttr ParsedAttr = filterFunctionTargetAttrs(TD); 13352 13353 // Make a copy of the features as passed on the command line into the 13354 // beginning of the additional features from the function to override. 13355 ParsedAttr.Features.insert( 13356 ParsedAttr.Features.begin(), 13357 Target->getTargetOpts().FeaturesAsWritten.begin(), 13358 Target->getTargetOpts().FeaturesAsWritten.end()); 13359 13360 if (ParsedAttr.CPU != "" && Target->isValidCPUName(ParsedAttr.CPU)) 13361 TargetCPU = ParsedAttr.CPU; 13362 13363 // Now populate the feature map, first with the TargetCPU which is either 13364 // the default or a new one from the target attribute string. Then we'll use 13365 // the passed in features (FeaturesAsWritten) along with the new ones from 13366 // the attribute. 13367 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, 13368 ParsedAttr.Features); 13369 } else if (const auto *SD = FD->getAttr<CPUSpecificAttr>()) { 13370 llvm::SmallVector<StringRef, 32> FeaturesTmp; 13371 Target->getCPUSpecificCPUDispatchFeatures( 13372 SD->getCPUName(GD.getMultiVersionIndex())->getName(), FeaturesTmp); 13373 std::vector<std::string> Features(FeaturesTmp.begin(), FeaturesTmp.end()); 13374 Features.insert(Features.begin(), 13375 Target->getTargetOpts().FeaturesAsWritten.begin(), 13376 Target->getTargetOpts().FeaturesAsWritten.end()); 13377 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features); 13378 } else if (const auto *TC = FD->getAttr<TargetClonesAttr>()) { 13379 std::vector<std::string> Features; 13380 StringRef VersionStr = TC->getFeatureStr(GD.getMultiVersionIndex()); 13381 if (Target->getTriple().isAArch64()) { 13382 // TargetClones for AArch64 13383 if (VersionStr != "default") { 13384 SmallVector<StringRef, 1> VersionFeatures; 13385 VersionStr.split(VersionFeatures, "+"); 13386 for (auto &VFeature : VersionFeatures) { 13387 VFeature = VFeature.trim(); 13388 Features.push_back((StringRef{"?"} + VFeature).str()); 13389 } 13390 } 13391 Features.insert(Features.begin(), 13392 Target->getTargetOpts().FeaturesAsWritten.begin(), 13393 Target->getTargetOpts().FeaturesAsWritten.end()); 13394 } else { 13395 if (VersionStr.startswith("arch=")) 13396 TargetCPU = VersionStr.drop_front(sizeof("arch=") - 1); 13397 else if (VersionStr != "default") 13398 Features.push_back((StringRef{"+"} + VersionStr).str()); 13399 } 13400 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features); 13401 } else if (const auto *TV = FD->getAttr<TargetVersionAttr>()) { 13402 std::vector<std::string> Feats = filterFunctionTargetVersionAttrs(TV); 13403 Feats.insert(Feats.begin(), 13404 Target->getTargetOpts().FeaturesAsWritten.begin(), 13405 Target->getTargetOpts().FeaturesAsWritten.end()); 13406 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Feats); 13407 } else { 13408 FeatureMap = Target->getTargetOpts().FeatureMap; 13409 } 13410 } 13411 13412 OMPTraitInfo &ASTContext::getNewOMPTraitInfo() { 13413 OMPTraitInfoVector.emplace_back(new OMPTraitInfo()); 13414 return *OMPTraitInfoVector.back(); 13415 } 13416 13417 const StreamingDiagnostic &clang:: 13418 operator<<(const StreamingDiagnostic &DB, 13419 const ASTContext::SectionInfo &Section) { 13420 if (Section.Decl) 13421 return DB << Section.Decl; 13422 return DB << "a prior #pragma section"; 13423 } 13424 13425 bool ASTContext::mayExternalize(const Decl *D) const { 13426 bool IsStaticVar = 13427 isa<VarDecl>(D) && cast<VarDecl>(D)->getStorageClass() == SC_Static; 13428 bool IsExplicitDeviceVar = (D->hasAttr<CUDADeviceAttr>() && 13429 !D->getAttr<CUDADeviceAttr>()->isImplicit()) || 13430 (D->hasAttr<CUDAConstantAttr>() && 13431 !D->getAttr<CUDAConstantAttr>()->isImplicit()); 13432 // CUDA/HIP: static managed variables need to be externalized since it is 13433 // a declaration in IR, therefore cannot have internal linkage. Kernels in 13434 // anonymous name space needs to be externalized to avoid duplicate symbols. 13435 return (IsStaticVar && 13436 (D->hasAttr<HIPManagedAttr>() || IsExplicitDeviceVar)) || 13437 (D->hasAttr<CUDAGlobalAttr>() && 13438 basicGVALinkageForFunction(*this, cast<FunctionDecl>(D)) == 13439 GVA_Internal); 13440 } 13441 13442 bool ASTContext::shouldExternalize(const Decl *D) const { 13443 return mayExternalize(D) && 13444 (D->hasAttr<HIPManagedAttr>() || D->hasAttr<CUDAGlobalAttr>() || 13445 CUDADeviceVarODRUsedByHost.count(cast<VarDecl>(D))); 13446 } 13447 13448 StringRef ASTContext::getCUIDHash() const { 13449 if (!CUIDHash.empty()) 13450 return CUIDHash; 13451 if (LangOpts.CUID.empty()) 13452 return StringRef(); 13453 CUIDHash = llvm::utohexstr(llvm::MD5Hash(LangOpts.CUID), /*LowerCase=*/true); 13454 return CUIDHash; 13455 } 13456