1 //===--- Sema.cpp - AST Builder and Semantic Analysis Implementation ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the actions class which performs semantic analysis and
10 // builds an AST out of a parse stream.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "UsedDeclVisitor.h"
15 #include "clang/AST/ASTContext.h"
16 #include "clang/AST/ASTDiagnostic.h"
17 #include "clang/AST/Decl.h"
18 #include "clang/AST/DeclCXX.h"
19 #include "clang/AST/DeclFriend.h"
20 #include "clang/AST/DeclObjC.h"
21 #include "clang/AST/Expr.h"
22 #include "clang/AST/ExprCXX.h"
23 #include "clang/AST/PrettyDeclStackTrace.h"
24 #include "clang/AST/StmtCXX.h"
25 #include "clang/Basic/DarwinSDKInfo.h"
26 #include "clang/Basic/DiagnosticOptions.h"
27 #include "clang/Basic/PartialDiagnostic.h"
28 #include "clang/Basic/SourceManager.h"
29 #include "clang/Basic/Stack.h"
30 #include "clang/Basic/TargetInfo.h"
31 #include "clang/Lex/HeaderSearch.h"
32 #include "clang/Lex/HeaderSearchOptions.h"
33 #include "clang/Lex/Preprocessor.h"
34 #include "clang/Sema/CXXFieldCollector.h"
35 #include "clang/Sema/DelayedDiagnostic.h"
36 #include "clang/Sema/ExternalSemaSource.h"
37 #include "clang/Sema/Initialization.h"
38 #include "clang/Sema/MultiplexExternalSemaSource.h"
39 #include "clang/Sema/ObjCMethodList.h"
40 #include "clang/Sema/RISCVIntrinsicManager.h"
41 #include "clang/Sema/Scope.h"
42 #include "clang/Sema/ScopeInfo.h"
43 #include "clang/Sema/SemaConsumer.h"
44 #include "clang/Sema/SemaInternal.h"
45 #include "clang/Sema/TemplateDeduction.h"
46 #include "clang/Sema/TemplateInstCallback.h"
47 #include "clang/Sema/TypoCorrection.h"
48 #include "llvm/ADT/DenseMap.h"
49 #include "llvm/ADT/STLExtras.h"
50 #include "llvm/ADT/SmallPtrSet.h"
51 #include "llvm/Support/TimeProfiler.h"
52 #include <optional>
53
54 using namespace clang;
55 using namespace sema;
56
getLocForEndOfToken(SourceLocation Loc,unsigned Offset)57 SourceLocation Sema::getLocForEndOfToken(SourceLocation Loc, unsigned Offset) {
58 return Lexer::getLocForEndOfToken(Loc, Offset, SourceMgr, LangOpts);
59 }
60
getModuleLoader() const61 ModuleLoader &Sema::getModuleLoader() const { return PP.getModuleLoader(); }
62
63 DarwinSDKInfo *
getDarwinSDKInfoForAvailabilityChecking(SourceLocation Loc,StringRef Platform)64 Sema::getDarwinSDKInfoForAvailabilityChecking(SourceLocation Loc,
65 StringRef Platform) {
66 auto *SDKInfo = getDarwinSDKInfoForAvailabilityChecking();
67 if (!SDKInfo && !WarnedDarwinSDKInfoMissing) {
68 Diag(Loc, diag::warn_missing_sdksettings_for_availability_checking)
69 << Platform;
70 WarnedDarwinSDKInfoMissing = true;
71 }
72 return SDKInfo;
73 }
74
getDarwinSDKInfoForAvailabilityChecking()75 DarwinSDKInfo *Sema::getDarwinSDKInfoForAvailabilityChecking() {
76 if (CachedDarwinSDKInfo)
77 return CachedDarwinSDKInfo->get();
78 auto SDKInfo = parseDarwinSDKInfo(
79 PP.getFileManager().getVirtualFileSystem(),
80 PP.getHeaderSearchInfo().getHeaderSearchOpts().Sysroot);
81 if (SDKInfo && *SDKInfo) {
82 CachedDarwinSDKInfo = std::make_unique<DarwinSDKInfo>(std::move(**SDKInfo));
83 return CachedDarwinSDKInfo->get();
84 }
85 if (!SDKInfo)
86 llvm::consumeError(SDKInfo.takeError());
87 CachedDarwinSDKInfo = std::unique_ptr<DarwinSDKInfo>();
88 return nullptr;
89 }
90
91 IdentifierInfo *
InventAbbreviatedTemplateParameterTypeName(IdentifierInfo * ParamName,unsigned int Index)92 Sema::InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName,
93 unsigned int Index) {
94 std::string InventedName;
95 llvm::raw_string_ostream OS(InventedName);
96
97 if (!ParamName)
98 OS << "auto:" << Index + 1;
99 else
100 OS << ParamName->getName() << ":auto";
101
102 OS.flush();
103 return &Context.Idents.get(OS.str());
104 }
105
getPrintingPolicy(const ASTContext & Context,const Preprocessor & PP)106 PrintingPolicy Sema::getPrintingPolicy(const ASTContext &Context,
107 const Preprocessor &PP) {
108 PrintingPolicy Policy = Context.getPrintingPolicy();
109 // In diagnostics, we print _Bool as bool if the latter is defined as the
110 // former.
111 Policy.Bool = Context.getLangOpts().Bool;
112 if (!Policy.Bool) {
113 if (const MacroInfo *BoolMacro = PP.getMacroInfo(Context.getBoolName())) {
114 Policy.Bool = BoolMacro->isObjectLike() &&
115 BoolMacro->getNumTokens() == 1 &&
116 BoolMacro->getReplacementToken(0).is(tok::kw__Bool);
117 }
118 }
119
120 // Shorten the data output if needed
121 Policy.EntireContentsOfLargeArray = false;
122
123 return Policy;
124 }
125
ActOnTranslationUnitScope(Scope * S)126 void Sema::ActOnTranslationUnitScope(Scope *S) {
127 TUScope = S;
128 PushDeclContext(S, Context.getTranslationUnitDecl());
129 }
130
131 namespace clang {
132 namespace sema {
133
134 class SemaPPCallbacks : public PPCallbacks {
135 Sema *S = nullptr;
136 llvm::SmallVector<SourceLocation, 8> IncludeStack;
137
138 public:
set(Sema & S)139 void set(Sema &S) { this->S = &S; }
140
reset()141 void reset() { S = nullptr; }
142
FileChanged(SourceLocation Loc,FileChangeReason Reason,SrcMgr::CharacteristicKind FileType,FileID PrevFID)143 void FileChanged(SourceLocation Loc, FileChangeReason Reason,
144 SrcMgr::CharacteristicKind FileType,
145 FileID PrevFID) override {
146 if (!S)
147 return;
148 switch (Reason) {
149 case EnterFile: {
150 SourceManager &SM = S->getSourceManager();
151 SourceLocation IncludeLoc = SM.getIncludeLoc(SM.getFileID(Loc));
152 if (IncludeLoc.isValid()) {
153 if (llvm::timeTraceProfilerEnabled()) {
154 const FileEntry *FE = SM.getFileEntryForID(SM.getFileID(Loc));
155 llvm::timeTraceProfilerBegin(
156 "Source", FE != nullptr ? FE->getName() : StringRef("<unknown>"));
157 }
158
159 IncludeStack.push_back(IncludeLoc);
160 S->DiagnoseNonDefaultPragmaAlignPack(
161 Sema::PragmaAlignPackDiagnoseKind::NonDefaultStateAtInclude,
162 IncludeLoc);
163 }
164 break;
165 }
166 case ExitFile:
167 if (!IncludeStack.empty()) {
168 if (llvm::timeTraceProfilerEnabled())
169 llvm::timeTraceProfilerEnd();
170
171 S->DiagnoseNonDefaultPragmaAlignPack(
172 Sema::PragmaAlignPackDiagnoseKind::ChangedStateAtExit,
173 IncludeStack.pop_back_val());
174 }
175 break;
176 default:
177 break;
178 }
179 }
180 };
181
182 } // end namespace sema
183 } // end namespace clang
184
185 const unsigned Sema::MaxAlignmentExponent;
186 const uint64_t Sema::MaximumAlignment;
187
Sema(Preprocessor & pp,ASTContext & ctxt,ASTConsumer & consumer,TranslationUnitKind TUKind,CodeCompleteConsumer * CodeCompleter)188 Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
189 TranslationUnitKind TUKind, CodeCompleteConsumer *CodeCompleter)
190 : ExternalSource(nullptr), CurFPFeatures(pp.getLangOpts()),
191 LangOpts(pp.getLangOpts()), PP(pp), Context(ctxt), Consumer(consumer),
192 Diags(PP.getDiagnostics()), SourceMgr(PP.getSourceManager()),
193 CollectStats(false), CodeCompleter(CodeCompleter), CurContext(nullptr),
194 OriginalLexicalContext(nullptr), MSStructPragmaOn(false),
195 MSPointerToMemberRepresentationMethod(
196 LangOpts.getMSPointerToMemberRepresentationMethod()),
197 VtorDispStack(LangOpts.getVtorDispMode()),
198 AlignPackStack(AlignPackInfo(getLangOpts().XLPragmaPack)),
199 DataSegStack(nullptr), BSSSegStack(nullptr), ConstSegStack(nullptr),
200 CodeSegStack(nullptr), StrictGuardStackCheckStack(false),
201 FpPragmaStack(FPOptionsOverride()), CurInitSeg(nullptr),
202 VisContext(nullptr), PragmaAttributeCurrentTargetDecl(nullptr),
203 IsBuildingRecoveryCallExpr(false), LateTemplateParser(nullptr),
204 LateTemplateParserCleanup(nullptr), OpaqueParser(nullptr), IdResolver(pp),
205 StdExperimentalNamespaceCache(nullptr), StdInitializerList(nullptr),
206 StdCoroutineTraitsCache(nullptr), CXXTypeInfoDecl(nullptr),
207 MSVCGuidDecl(nullptr), StdSourceLocationImplDecl(nullptr),
208 NSNumberDecl(nullptr), NSValueDecl(nullptr), NSStringDecl(nullptr),
209 StringWithUTF8StringMethod(nullptr),
210 ValueWithBytesObjCTypeMethod(nullptr), NSArrayDecl(nullptr),
211 ArrayWithObjectsMethod(nullptr), NSDictionaryDecl(nullptr),
212 DictionaryWithObjectsMethod(nullptr), GlobalNewDeleteDeclared(false),
213 TUKind(TUKind), NumSFINAEErrors(0),
214 FullyCheckedComparisonCategories(
215 static_cast<unsigned>(ComparisonCategoryType::Last) + 1),
216 SatisfactionCache(Context), AccessCheckingSFINAE(false),
217 InNonInstantiationSFINAEContext(false), NonInstantiationEntries(0),
218 ArgumentPackSubstitutionIndex(-1), CurrentInstantiationScope(nullptr),
219 DisableTypoCorrection(false), TyposCorrected(0), AnalysisWarnings(*this),
220 ThreadSafetyDeclCache(nullptr), VarDataSharingAttributesStack(nullptr),
221 CurScope(nullptr), Ident_super(nullptr), Ident___float128(nullptr) {
222 assert(pp.TUKind == TUKind);
223 TUScope = nullptr;
224 isConstantEvaluatedOverride = false;
225
226 LoadedExternalKnownNamespaces = false;
227 for (unsigned I = 0; I != NSAPI::NumNSNumberLiteralMethods; ++I)
228 NSNumberLiteralMethods[I] = nullptr;
229
230 if (getLangOpts().ObjC)
231 NSAPIObj.reset(new NSAPI(Context));
232
233 if (getLangOpts().CPlusPlus)
234 FieldCollector.reset(new CXXFieldCollector());
235
236 // Tell diagnostics how to render things from the AST library.
237 Diags.SetArgToStringFn(&FormatASTNodeDiagnosticArgument, &Context);
238
239 // This evaluation context exists to ensure that there's always at least one
240 // valid evaluation context available. It is never removed from the
241 // evaluation stack.
242 ExprEvalContexts.emplace_back(
243 ExpressionEvaluationContext::PotentiallyEvaluated, 0, CleanupInfo{},
244 nullptr, ExpressionEvaluationContextRecord::EK_Other);
245
246 // Initialization of data sharing attributes stack for OpenMP
247 InitDataSharingAttributesStack();
248
249 std::unique_ptr<sema::SemaPPCallbacks> Callbacks =
250 std::make_unique<sema::SemaPPCallbacks>();
251 SemaPPCallbackHandler = Callbacks.get();
252 PP.addPPCallbacks(std::move(Callbacks));
253 SemaPPCallbackHandler->set(*this);
254
255 CurFPFeatures.setFPEvalMethod(PP.getCurrentFPEvalMethod());
256 }
257
258 // Anchor Sema's type info to this TU.
anchor()259 void Sema::anchor() {}
260
addImplicitTypedef(StringRef Name,QualType T)261 void Sema::addImplicitTypedef(StringRef Name, QualType T) {
262 DeclarationName DN = &Context.Idents.get(Name);
263 if (IdResolver.begin(DN) == IdResolver.end())
264 PushOnScopeChains(Context.buildImplicitTypedef(T, Name), TUScope);
265 }
266
Initialize()267 void Sema::Initialize() {
268 if (SemaConsumer *SC = dyn_cast<SemaConsumer>(&Consumer))
269 SC->InitializeSema(*this);
270
271 // Tell the external Sema source about this Sema object.
272 if (ExternalSemaSource *ExternalSema
273 = dyn_cast_or_null<ExternalSemaSource>(Context.getExternalSource()))
274 ExternalSema->InitializeSema(*this);
275
276 // This needs to happen after ExternalSemaSource::InitializeSema(this) or we
277 // will not be able to merge any duplicate __va_list_tag decls correctly.
278 VAListTagName = PP.getIdentifierInfo("__va_list_tag");
279
280 if (!TUScope)
281 return;
282
283 // Initialize predefined 128-bit integer types, if needed.
284 if (Context.getTargetInfo().hasInt128Type() ||
285 (Context.getAuxTargetInfo() &&
286 Context.getAuxTargetInfo()->hasInt128Type())) {
287 // If either of the 128-bit integer types are unavailable to name lookup,
288 // define them now.
289 DeclarationName Int128 = &Context.Idents.get("__int128_t");
290 if (IdResolver.begin(Int128) == IdResolver.end())
291 PushOnScopeChains(Context.getInt128Decl(), TUScope);
292
293 DeclarationName UInt128 = &Context.Idents.get("__uint128_t");
294 if (IdResolver.begin(UInt128) == IdResolver.end())
295 PushOnScopeChains(Context.getUInt128Decl(), TUScope);
296 }
297
298
299 // Initialize predefined Objective-C types:
300 if (getLangOpts().ObjC) {
301 // If 'SEL' does not yet refer to any declarations, make it refer to the
302 // predefined 'SEL'.
303 DeclarationName SEL = &Context.Idents.get("SEL");
304 if (IdResolver.begin(SEL) == IdResolver.end())
305 PushOnScopeChains(Context.getObjCSelDecl(), TUScope);
306
307 // If 'id' does not yet refer to any declarations, make it refer to the
308 // predefined 'id'.
309 DeclarationName Id = &Context.Idents.get("id");
310 if (IdResolver.begin(Id) == IdResolver.end())
311 PushOnScopeChains(Context.getObjCIdDecl(), TUScope);
312
313 // Create the built-in typedef for 'Class'.
314 DeclarationName Class = &Context.Idents.get("Class");
315 if (IdResolver.begin(Class) == IdResolver.end())
316 PushOnScopeChains(Context.getObjCClassDecl(), TUScope);
317
318 // Create the built-in forward declaratino for 'Protocol'.
319 DeclarationName Protocol = &Context.Idents.get("Protocol");
320 if (IdResolver.begin(Protocol) == IdResolver.end())
321 PushOnScopeChains(Context.getObjCProtocolDecl(), TUScope);
322 }
323
324 // Create the internal type for the *StringMakeConstantString builtins.
325 DeclarationName ConstantString = &Context.Idents.get("__NSConstantString");
326 if (IdResolver.begin(ConstantString) == IdResolver.end())
327 PushOnScopeChains(Context.getCFConstantStringDecl(), TUScope);
328
329 // Initialize Microsoft "predefined C++ types".
330 if (getLangOpts().MSVCCompat) {
331 if (getLangOpts().CPlusPlus &&
332 IdResolver.begin(&Context.Idents.get("type_info")) == IdResolver.end())
333 PushOnScopeChains(Context.buildImplicitRecord("type_info", TTK_Class),
334 TUScope);
335
336 addImplicitTypedef("size_t", Context.getSizeType());
337 }
338
339 // Initialize predefined OpenCL types and supported extensions and (optional)
340 // core features.
341 if (getLangOpts().OpenCL) {
342 getOpenCLOptions().addSupport(
343 Context.getTargetInfo().getSupportedOpenCLOpts(), getLangOpts());
344 addImplicitTypedef("sampler_t", Context.OCLSamplerTy);
345 addImplicitTypedef("event_t", Context.OCLEventTy);
346 auto OCLCompatibleVersion = getLangOpts().getOpenCLCompatibleVersion();
347 if (OCLCompatibleVersion >= 200) {
348 if (getLangOpts().OpenCLCPlusPlus || getLangOpts().Blocks) {
349 addImplicitTypedef("clk_event_t", Context.OCLClkEventTy);
350 addImplicitTypedef("queue_t", Context.OCLQueueTy);
351 }
352 if (getLangOpts().OpenCLPipes)
353 addImplicitTypedef("reserve_id_t", Context.OCLReserveIDTy);
354 addImplicitTypedef("atomic_int", Context.getAtomicType(Context.IntTy));
355 addImplicitTypedef("atomic_uint",
356 Context.getAtomicType(Context.UnsignedIntTy));
357 addImplicitTypedef("atomic_float",
358 Context.getAtomicType(Context.FloatTy));
359 // OpenCLC v2.0, s6.13.11.6 requires that atomic_flag is implemented as
360 // 32-bit integer and OpenCLC v2.0, s6.1.1 int is always 32-bit wide.
361 addImplicitTypedef("atomic_flag", Context.getAtomicType(Context.IntTy));
362
363
364 // OpenCL v2.0 s6.13.11.6:
365 // - The atomic_long and atomic_ulong types are supported if the
366 // cl_khr_int64_base_atomics and cl_khr_int64_extended_atomics
367 // extensions are supported.
368 // - The atomic_double type is only supported if double precision
369 // is supported and the cl_khr_int64_base_atomics and
370 // cl_khr_int64_extended_atomics extensions are supported.
371 // - If the device address space is 64-bits, the data types
372 // atomic_intptr_t, atomic_uintptr_t, atomic_size_t and
373 // atomic_ptrdiff_t are supported if the cl_khr_int64_base_atomics and
374 // cl_khr_int64_extended_atomics extensions are supported.
375
376 auto AddPointerSizeDependentTypes = [&]() {
377 auto AtomicSizeT = Context.getAtomicType(Context.getSizeType());
378 auto AtomicIntPtrT = Context.getAtomicType(Context.getIntPtrType());
379 auto AtomicUIntPtrT = Context.getAtomicType(Context.getUIntPtrType());
380 auto AtomicPtrDiffT =
381 Context.getAtomicType(Context.getPointerDiffType());
382 addImplicitTypedef("atomic_size_t", AtomicSizeT);
383 addImplicitTypedef("atomic_intptr_t", AtomicIntPtrT);
384 addImplicitTypedef("atomic_uintptr_t", AtomicUIntPtrT);
385 addImplicitTypedef("atomic_ptrdiff_t", AtomicPtrDiffT);
386 };
387
388 if (Context.getTypeSize(Context.getSizeType()) == 32) {
389 AddPointerSizeDependentTypes();
390 }
391
392 if (getOpenCLOptions().isSupported("cl_khr_fp16", getLangOpts())) {
393 auto AtomicHalfT = Context.getAtomicType(Context.HalfTy);
394 addImplicitTypedef("atomic_half", AtomicHalfT);
395 }
396
397 std::vector<QualType> Atomic64BitTypes;
398 if (getOpenCLOptions().isSupported("cl_khr_int64_base_atomics",
399 getLangOpts()) &&
400 getOpenCLOptions().isSupported("cl_khr_int64_extended_atomics",
401 getLangOpts())) {
402 if (getOpenCLOptions().isSupported("cl_khr_fp64", getLangOpts())) {
403 auto AtomicDoubleT = Context.getAtomicType(Context.DoubleTy);
404 addImplicitTypedef("atomic_double", AtomicDoubleT);
405 Atomic64BitTypes.push_back(AtomicDoubleT);
406 }
407 auto AtomicLongT = Context.getAtomicType(Context.LongTy);
408 auto AtomicULongT = Context.getAtomicType(Context.UnsignedLongTy);
409 addImplicitTypedef("atomic_long", AtomicLongT);
410 addImplicitTypedef("atomic_ulong", AtomicULongT);
411
412
413 if (Context.getTypeSize(Context.getSizeType()) == 64) {
414 AddPointerSizeDependentTypes();
415 }
416 }
417 }
418
419 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
420 if (getOpenCLOptions().isSupported(#Ext, getLangOpts())) { \
421 addImplicitTypedef(#ExtType, Context.Id##Ty); \
422 }
423 #include "clang/Basic/OpenCLExtensionTypes.def"
424 }
425
426 if (Context.getTargetInfo().hasAArch64SVETypes()) {
427 #define SVE_TYPE(Name, Id, SingletonId) \
428 addImplicitTypedef(Name, Context.SingletonId);
429 #include "clang/Basic/AArch64SVEACLETypes.def"
430 }
431
432 if (Context.getTargetInfo().getTriple().isPPC64()) {
433 #define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \
434 addImplicitTypedef(#Name, Context.Id##Ty);
435 #include "clang/Basic/PPCTypes.def"
436 #define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \
437 addImplicitTypedef(#Name, Context.Id##Ty);
438 #include "clang/Basic/PPCTypes.def"
439 }
440
441 if (Context.getTargetInfo().hasRISCVVTypes()) {
442 #define RVV_TYPE(Name, Id, SingletonId) \
443 addImplicitTypedef(Name, Context.SingletonId);
444 #include "clang/Basic/RISCVVTypes.def"
445 }
446
447 if (Context.getTargetInfo().hasBuiltinMSVaList()) {
448 DeclarationName MSVaList = &Context.Idents.get("__builtin_ms_va_list");
449 if (IdResolver.begin(MSVaList) == IdResolver.end())
450 PushOnScopeChains(Context.getBuiltinMSVaListDecl(), TUScope);
451 }
452
453 DeclarationName BuiltinVaList = &Context.Idents.get("__builtin_va_list");
454 if (IdResolver.begin(BuiltinVaList) == IdResolver.end())
455 PushOnScopeChains(Context.getBuiltinVaListDecl(), TUScope);
456 }
457
~Sema()458 Sema::~Sema() {
459 assert(InstantiatingSpecializations.empty() &&
460 "failed to clean up an InstantiatingTemplate?");
461
462 if (VisContext) FreeVisContext();
463
464 // Kill all the active scopes.
465 for (sema::FunctionScopeInfo *FSI : FunctionScopes)
466 delete FSI;
467
468 // Tell the SemaConsumer to forget about us; we're going out of scope.
469 if (SemaConsumer *SC = dyn_cast<SemaConsumer>(&Consumer))
470 SC->ForgetSema();
471
472 // Detach from the external Sema source.
473 if (ExternalSemaSource *ExternalSema
474 = dyn_cast_or_null<ExternalSemaSource>(Context.getExternalSource()))
475 ExternalSema->ForgetSema();
476
477 // Delete cached satisfactions.
478 std::vector<ConstraintSatisfaction *> Satisfactions;
479 Satisfactions.reserve(Satisfactions.size());
480 for (auto &Node : SatisfactionCache)
481 Satisfactions.push_back(&Node);
482 for (auto *Node : Satisfactions)
483 delete Node;
484
485 threadSafety::threadSafetyCleanup(ThreadSafetyDeclCache);
486
487 // Destroys data sharing attributes stack for OpenMP
488 DestroyDataSharingAttributesStack();
489
490 // Detach from the PP callback handler which outlives Sema since it's owned
491 // by the preprocessor.
492 SemaPPCallbackHandler->reset();
493 }
494
warnStackExhausted(SourceLocation Loc)495 void Sema::warnStackExhausted(SourceLocation Loc) {
496 // Only warn about this once.
497 if (!WarnedStackExhausted) {
498 Diag(Loc, diag::warn_stack_exhausted);
499 WarnedStackExhausted = true;
500 }
501 }
502
runWithSufficientStackSpace(SourceLocation Loc,llvm::function_ref<void ()> Fn)503 void Sema::runWithSufficientStackSpace(SourceLocation Loc,
504 llvm::function_ref<void()> Fn) {
505 clang::runWithSufficientStackSpace([&] { warnStackExhausted(Loc); }, Fn);
506 }
507
508 /// makeUnavailableInSystemHeader - There is an error in the current
509 /// context. If we're still in a system header, and we can plausibly
510 /// make the relevant declaration unavailable instead of erroring, do
511 /// so and return true.
makeUnavailableInSystemHeader(SourceLocation loc,UnavailableAttr::ImplicitReason reason)512 bool Sema::makeUnavailableInSystemHeader(SourceLocation loc,
513 UnavailableAttr::ImplicitReason reason) {
514 // If we're not in a function, it's an error.
515 FunctionDecl *fn = dyn_cast<FunctionDecl>(CurContext);
516 if (!fn) return false;
517
518 // If we're in template instantiation, it's an error.
519 if (inTemplateInstantiation())
520 return false;
521
522 // If that function's not in a system header, it's an error.
523 if (!Context.getSourceManager().isInSystemHeader(loc))
524 return false;
525
526 // If the function is already unavailable, it's not an error.
527 if (fn->hasAttr<UnavailableAttr>()) return true;
528
529 fn->addAttr(UnavailableAttr::CreateImplicit(Context, "", reason, loc));
530 return true;
531 }
532
getASTMutationListener() const533 ASTMutationListener *Sema::getASTMutationListener() const {
534 return getASTConsumer().GetASTMutationListener();
535 }
536
537 ///Registers an external source. If an external source already exists,
538 /// creates a multiplex external source and appends to it.
539 ///
540 ///\param[in] E - A non-null external sema source.
541 ///
addExternalSource(ExternalSemaSource * E)542 void Sema::addExternalSource(ExternalSemaSource *E) {
543 assert(E && "Cannot use with NULL ptr");
544
545 if (!ExternalSource) {
546 ExternalSource = E;
547 return;
548 }
549
550 if (auto *Ex = dyn_cast<MultiplexExternalSemaSource>(ExternalSource))
551 Ex->AddSource(E);
552 else
553 ExternalSource = new MultiplexExternalSemaSource(ExternalSource.get(), E);
554 }
555
556 /// Print out statistics about the semantic analysis.
PrintStats() const557 void Sema::PrintStats() const {
558 llvm::errs() << "\n*** Semantic Analysis Stats:\n";
559 llvm::errs() << NumSFINAEErrors << " SFINAE diagnostics trapped.\n";
560
561 BumpAlloc.PrintStats();
562 AnalysisWarnings.PrintStats();
563 }
564
diagnoseNullableToNonnullConversion(QualType DstType,QualType SrcType,SourceLocation Loc)565 void Sema::diagnoseNullableToNonnullConversion(QualType DstType,
566 QualType SrcType,
567 SourceLocation Loc) {
568 std::optional<NullabilityKind> ExprNullability = SrcType->getNullability();
569 if (!ExprNullability || (*ExprNullability != NullabilityKind::Nullable &&
570 *ExprNullability != NullabilityKind::NullableResult))
571 return;
572
573 std::optional<NullabilityKind> TypeNullability = DstType->getNullability();
574 if (!TypeNullability || *TypeNullability != NullabilityKind::NonNull)
575 return;
576
577 Diag(Loc, diag::warn_nullability_lost) << SrcType << DstType;
578 }
579
diagnoseZeroToNullptrConversion(CastKind Kind,const Expr * E)580 void Sema::diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E) {
581 // nullptr only exists from C++11 on, so don't warn on its absence earlier.
582 if (!getLangOpts().CPlusPlus11)
583 return;
584
585 if (Kind != CK_NullToPointer && Kind != CK_NullToMemberPointer)
586 return;
587 if (E->IgnoreParenImpCasts()->getType()->isNullPtrType())
588 return;
589
590 if (Diags.isIgnored(diag::warn_zero_as_null_pointer_constant,
591 E->getBeginLoc()))
592 return;
593
594 // Don't diagnose the conversion from a 0 literal to a null pointer argument
595 // in a synthesized call to operator<=>.
596 if (!CodeSynthesisContexts.empty() &&
597 CodeSynthesisContexts.back().Kind ==
598 CodeSynthesisContext::RewritingOperatorAsSpaceship)
599 return;
600
601 // Ignore null pointers in defaulted comparison operators.
602 FunctionDecl *FD = getCurFunctionDecl();
603 if (FD && FD->isDefaulted()) {
604 return;
605 }
606
607 // If it is a macro from system header, and if the macro name is not "NULL",
608 // do not warn.
609 SourceLocation MaybeMacroLoc = E->getBeginLoc();
610 if (Diags.getSuppressSystemWarnings() &&
611 SourceMgr.isInSystemMacro(MaybeMacroLoc) &&
612 !findMacroSpelling(MaybeMacroLoc, "NULL"))
613 return;
614
615 Diag(E->getBeginLoc(), diag::warn_zero_as_null_pointer_constant)
616 << FixItHint::CreateReplacement(E->getSourceRange(), "nullptr");
617 }
618
619 /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit cast.
620 /// If there is already an implicit cast, merge into the existing one.
621 /// The result is of the given category.
ImpCastExprToType(Expr * E,QualType Ty,CastKind Kind,ExprValueKind VK,const CXXCastPath * BasePath,CheckedConversionKind CCK)622 ExprResult Sema::ImpCastExprToType(Expr *E, QualType Ty,
623 CastKind Kind, ExprValueKind VK,
624 const CXXCastPath *BasePath,
625 CheckedConversionKind CCK) {
626 #ifndef NDEBUG
627 if (VK == VK_PRValue && !E->isPRValue()) {
628 switch (Kind) {
629 default:
630 llvm_unreachable(
631 ("can't implicitly cast glvalue to prvalue with this cast "
632 "kind: " +
633 std::string(CastExpr::getCastKindName(Kind)))
634 .c_str());
635 case CK_Dependent:
636 case CK_LValueToRValue:
637 case CK_ArrayToPointerDecay:
638 case CK_FunctionToPointerDecay:
639 case CK_ToVoid:
640 case CK_NonAtomicToAtomic:
641 break;
642 }
643 }
644 assert((VK == VK_PRValue || Kind == CK_Dependent || !E->isPRValue()) &&
645 "can't cast prvalue to glvalue");
646 #endif
647
648 diagnoseNullableToNonnullConversion(Ty, E->getType(), E->getBeginLoc());
649 diagnoseZeroToNullptrConversion(Kind, E);
650
651 QualType ExprTy = Context.getCanonicalType(E->getType());
652 QualType TypeTy = Context.getCanonicalType(Ty);
653
654 if (ExprTy == TypeTy)
655 return E;
656
657 if (Kind == CK_ArrayToPointerDecay) {
658 // C++1z [conv.array]: The temporary materialization conversion is applied.
659 // We also use this to fuel C++ DR1213, which applies to C++11 onwards.
660 if (getLangOpts().CPlusPlus && E->isPRValue()) {
661 // The temporary is an lvalue in C++98 and an xvalue otherwise.
662 ExprResult Materialized = CreateMaterializeTemporaryExpr(
663 E->getType(), E, !getLangOpts().CPlusPlus11);
664 if (Materialized.isInvalid())
665 return ExprError();
666 E = Materialized.get();
667 }
668 // C17 6.7.1p6 footnote 124: The implementation can treat any register
669 // declaration simply as an auto declaration. However, whether or not
670 // addressable storage is actually used, the address of any part of an
671 // object declared with storage-class specifier register cannot be
672 // computed, either explicitly(by use of the unary & operator as discussed
673 // in 6.5.3.2) or implicitly(by converting an array name to a pointer as
674 // discussed in 6.3.2.1).Thus, the only operator that can be applied to an
675 // array declared with storage-class specifier register is sizeof.
676 if (VK == VK_PRValue && !getLangOpts().CPlusPlus && !E->isPRValue()) {
677 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
678 if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
679 if (VD->getStorageClass() == SC_Register) {
680 Diag(E->getExprLoc(), diag::err_typecheck_address_of)
681 << /*register variable*/ 3 << E->getSourceRange();
682 return ExprError();
683 }
684 }
685 }
686 }
687 }
688
689 if (ImplicitCastExpr *ImpCast = dyn_cast<ImplicitCastExpr>(E)) {
690 if (ImpCast->getCastKind() == Kind && (!BasePath || BasePath->empty())) {
691 ImpCast->setType(Ty);
692 ImpCast->setValueKind(VK);
693 return E;
694 }
695 }
696
697 return ImplicitCastExpr::Create(Context, Ty, Kind, E, BasePath, VK,
698 CurFPFeatureOverrides());
699 }
700
701 /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
702 /// to the conversion from scalar type ScalarTy to the Boolean type.
ScalarTypeToBooleanCastKind(QualType ScalarTy)703 CastKind Sema::ScalarTypeToBooleanCastKind(QualType ScalarTy) {
704 switch (ScalarTy->getScalarTypeKind()) {
705 case Type::STK_Bool: return CK_NoOp;
706 case Type::STK_CPointer: return CK_PointerToBoolean;
707 case Type::STK_BlockPointer: return CK_PointerToBoolean;
708 case Type::STK_ObjCObjectPointer: return CK_PointerToBoolean;
709 case Type::STK_MemberPointer: return CK_MemberPointerToBoolean;
710 case Type::STK_Integral: return CK_IntegralToBoolean;
711 case Type::STK_Floating: return CK_FloatingToBoolean;
712 case Type::STK_IntegralComplex: return CK_IntegralComplexToBoolean;
713 case Type::STK_FloatingComplex: return CK_FloatingComplexToBoolean;
714 case Type::STK_FixedPoint: return CK_FixedPointToBoolean;
715 }
716 llvm_unreachable("unknown scalar type kind");
717 }
718
719 /// Used to prune the decls of Sema's UnusedFileScopedDecls vector.
ShouldRemoveFromUnused(Sema * SemaRef,const DeclaratorDecl * D)720 static bool ShouldRemoveFromUnused(Sema *SemaRef, const DeclaratorDecl *D) {
721 if (D->getMostRecentDecl()->isUsed())
722 return true;
723
724 if (D->isExternallyVisible())
725 return true;
726
727 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
728 // If this is a function template and none of its specializations is used,
729 // we should warn.
730 if (FunctionTemplateDecl *Template = FD->getDescribedFunctionTemplate())
731 for (const auto *Spec : Template->specializations())
732 if (ShouldRemoveFromUnused(SemaRef, Spec))
733 return true;
734
735 // UnusedFileScopedDecls stores the first declaration.
736 // The declaration may have become definition so check again.
737 const FunctionDecl *DeclToCheck;
738 if (FD->hasBody(DeclToCheck))
739 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
740
741 // Later redecls may add new information resulting in not having to warn,
742 // so check again.
743 DeclToCheck = FD->getMostRecentDecl();
744 if (DeclToCheck != FD)
745 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
746 }
747
748 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
749 // If a variable usable in constant expressions is referenced,
750 // don't warn if it isn't used: if the value of a variable is required
751 // for the computation of a constant expression, it doesn't make sense to
752 // warn even if the variable isn't odr-used. (isReferenced doesn't
753 // precisely reflect that, but it's a decent approximation.)
754 if (VD->isReferenced() &&
755 VD->mightBeUsableInConstantExpressions(SemaRef->Context))
756 return true;
757
758 if (VarTemplateDecl *Template = VD->getDescribedVarTemplate())
759 // If this is a variable template and none of its specializations is used,
760 // we should warn.
761 for (const auto *Spec : Template->specializations())
762 if (ShouldRemoveFromUnused(SemaRef, Spec))
763 return true;
764
765 // UnusedFileScopedDecls stores the first declaration.
766 // The declaration may have become definition so check again.
767 const VarDecl *DeclToCheck = VD->getDefinition();
768 if (DeclToCheck)
769 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
770
771 // Later redecls may add new information resulting in not having to warn,
772 // so check again.
773 DeclToCheck = VD->getMostRecentDecl();
774 if (DeclToCheck != VD)
775 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
776 }
777
778 return false;
779 }
780
isFunctionOrVarDeclExternC(NamedDecl * ND)781 static bool isFunctionOrVarDeclExternC(NamedDecl *ND) {
782 if (auto *FD = dyn_cast<FunctionDecl>(ND))
783 return FD->isExternC();
784 return cast<VarDecl>(ND)->isExternC();
785 }
786
787 /// Determine whether ND is an external-linkage function or variable whose
788 /// type has no linkage.
isExternalWithNoLinkageType(ValueDecl * VD)789 bool Sema::isExternalWithNoLinkageType(ValueDecl *VD) {
790 // Note: it's not quite enough to check whether VD has UniqueExternalLinkage,
791 // because we also want to catch the case where its type has VisibleNoLinkage,
792 // which does not affect the linkage of VD.
793 return getLangOpts().CPlusPlus && VD->hasExternalFormalLinkage() &&
794 !isExternalFormalLinkage(VD->getType()->getLinkage()) &&
795 !isFunctionOrVarDeclExternC(VD);
796 }
797
798 /// Obtains a sorted list of functions and variables that are undefined but
799 /// ODR-used.
getUndefinedButUsed(SmallVectorImpl<std::pair<NamedDecl *,SourceLocation>> & Undefined)800 void Sema::getUndefinedButUsed(
801 SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined) {
802 for (const auto &UndefinedUse : UndefinedButUsed) {
803 NamedDecl *ND = UndefinedUse.first;
804
805 // Ignore attributes that have become invalid.
806 if (ND->isInvalidDecl()) continue;
807
808 // __attribute__((weakref)) is basically a definition.
809 if (ND->hasAttr<WeakRefAttr>()) continue;
810
811 if (isa<CXXDeductionGuideDecl>(ND))
812 continue;
813
814 if (ND->hasAttr<DLLImportAttr>() || ND->hasAttr<DLLExportAttr>()) {
815 // An exported function will always be emitted when defined, so even if
816 // the function is inline, it doesn't have to be emitted in this TU. An
817 // imported function implies that it has been exported somewhere else.
818 continue;
819 }
820
821 if (FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) {
822 if (FD->isDefined())
823 continue;
824 if (FD->isExternallyVisible() &&
825 !isExternalWithNoLinkageType(FD) &&
826 !FD->getMostRecentDecl()->isInlined() &&
827 !FD->hasAttr<ExcludeFromExplicitInstantiationAttr>())
828 continue;
829 if (FD->getBuiltinID())
830 continue;
831 } else {
832 auto *VD = cast<VarDecl>(ND);
833 if (VD->hasDefinition() != VarDecl::DeclarationOnly)
834 continue;
835 if (VD->isExternallyVisible() &&
836 !isExternalWithNoLinkageType(VD) &&
837 !VD->getMostRecentDecl()->isInline() &&
838 !VD->hasAttr<ExcludeFromExplicitInstantiationAttr>())
839 continue;
840
841 // Skip VarDecls that lack formal definitions but which we know are in
842 // fact defined somewhere.
843 if (VD->isKnownToBeDefined())
844 continue;
845 }
846
847 Undefined.push_back(std::make_pair(ND, UndefinedUse.second));
848 }
849 }
850
851 /// checkUndefinedButUsed - Check for undefined objects with internal linkage
852 /// or that are inline.
checkUndefinedButUsed(Sema & S)853 static void checkUndefinedButUsed(Sema &S) {
854 if (S.UndefinedButUsed.empty()) return;
855
856 // Collect all the still-undefined entities with internal linkage.
857 SmallVector<std::pair<NamedDecl *, SourceLocation>, 16> Undefined;
858 S.getUndefinedButUsed(Undefined);
859 if (Undefined.empty()) return;
860
861 for (auto Undef : Undefined) {
862 ValueDecl *VD = cast<ValueDecl>(Undef.first);
863 SourceLocation UseLoc = Undef.second;
864
865 if (S.isExternalWithNoLinkageType(VD)) {
866 // C++ [basic.link]p8:
867 // A type without linkage shall not be used as the type of a variable
868 // or function with external linkage unless
869 // -- the entity has C language linkage
870 // -- the entity is not odr-used or is defined in the same TU
871 //
872 // As an extension, accept this in cases where the type is externally
873 // visible, since the function or variable actually can be defined in
874 // another translation unit in that case.
875 S.Diag(VD->getLocation(), isExternallyVisible(VD->getType()->getLinkage())
876 ? diag::ext_undefined_internal_type
877 : diag::err_undefined_internal_type)
878 << isa<VarDecl>(VD) << VD;
879 } else if (!VD->isExternallyVisible()) {
880 // FIXME: We can promote this to an error. The function or variable can't
881 // be defined anywhere else, so the program must necessarily violate the
882 // one definition rule.
883 bool IsImplicitBase = false;
884 if (const auto *BaseD = dyn_cast<FunctionDecl>(VD)) {
885 auto *DVAttr = BaseD->getAttr<OMPDeclareVariantAttr>();
886 if (DVAttr && !DVAttr->getTraitInfo().isExtensionActive(
887 llvm::omp::TraitProperty::
888 implementation_extension_disable_implicit_base)) {
889 const auto *Func = cast<FunctionDecl>(
890 cast<DeclRefExpr>(DVAttr->getVariantFuncRef())->getDecl());
891 IsImplicitBase = BaseD->isImplicit() &&
892 Func->getIdentifier()->isMangledOpenMPVariantName();
893 }
894 }
895 if (!S.getLangOpts().OpenMP || !IsImplicitBase)
896 S.Diag(VD->getLocation(), diag::warn_undefined_internal)
897 << isa<VarDecl>(VD) << VD;
898 } else if (auto *FD = dyn_cast<FunctionDecl>(VD)) {
899 (void)FD;
900 assert(FD->getMostRecentDecl()->isInlined() &&
901 "used object requires definition but isn't inline or internal?");
902 // FIXME: This is ill-formed; we should reject.
903 S.Diag(VD->getLocation(), diag::warn_undefined_inline) << VD;
904 } else {
905 assert(cast<VarDecl>(VD)->getMostRecentDecl()->isInline() &&
906 "used var requires definition but isn't inline or internal?");
907 S.Diag(VD->getLocation(), diag::err_undefined_inline_var) << VD;
908 }
909 if (UseLoc.isValid())
910 S.Diag(UseLoc, diag::note_used_here);
911 }
912
913 S.UndefinedButUsed.clear();
914 }
915
LoadExternalWeakUndeclaredIdentifiers()916 void Sema::LoadExternalWeakUndeclaredIdentifiers() {
917 if (!ExternalSource)
918 return;
919
920 SmallVector<std::pair<IdentifierInfo *, WeakInfo>, 4> WeakIDs;
921 ExternalSource->ReadWeakUndeclaredIdentifiers(WeakIDs);
922 for (auto &WeakID : WeakIDs)
923 (void)WeakUndeclaredIdentifiers[WeakID.first].insert(WeakID.second);
924 }
925
926
927 typedef llvm::DenseMap<const CXXRecordDecl*, bool> RecordCompleteMap;
928
929 /// Returns true, if all methods and nested classes of the given
930 /// CXXRecordDecl are defined in this translation unit.
931 ///
932 /// Should only be called from ActOnEndOfTranslationUnit so that all
933 /// definitions are actually read.
MethodsAndNestedClassesComplete(const CXXRecordDecl * RD,RecordCompleteMap & MNCComplete)934 static bool MethodsAndNestedClassesComplete(const CXXRecordDecl *RD,
935 RecordCompleteMap &MNCComplete) {
936 RecordCompleteMap::iterator Cache = MNCComplete.find(RD);
937 if (Cache != MNCComplete.end())
938 return Cache->second;
939 if (!RD->isCompleteDefinition())
940 return false;
941 bool Complete = true;
942 for (DeclContext::decl_iterator I = RD->decls_begin(),
943 E = RD->decls_end();
944 I != E && Complete; ++I) {
945 if (const CXXMethodDecl *M = dyn_cast<CXXMethodDecl>(*I))
946 Complete = M->isDefined() || M->isDefaulted() ||
947 (M->isPure() && !isa<CXXDestructorDecl>(M));
948 else if (const FunctionTemplateDecl *F = dyn_cast<FunctionTemplateDecl>(*I))
949 // If the template function is marked as late template parsed at this
950 // point, it has not been instantiated and therefore we have not
951 // performed semantic analysis on it yet, so we cannot know if the type
952 // can be considered complete.
953 Complete = !F->getTemplatedDecl()->isLateTemplateParsed() &&
954 F->getTemplatedDecl()->isDefined();
955 else if (const CXXRecordDecl *R = dyn_cast<CXXRecordDecl>(*I)) {
956 if (R->isInjectedClassName())
957 continue;
958 if (R->hasDefinition())
959 Complete = MethodsAndNestedClassesComplete(R->getDefinition(),
960 MNCComplete);
961 else
962 Complete = false;
963 }
964 }
965 MNCComplete[RD] = Complete;
966 return Complete;
967 }
968
969 /// Returns true, if the given CXXRecordDecl is fully defined in this
970 /// translation unit, i.e. all methods are defined or pure virtual and all
971 /// friends, friend functions and nested classes are fully defined in this
972 /// translation unit.
973 ///
974 /// Should only be called from ActOnEndOfTranslationUnit so that all
975 /// definitions are actually read.
IsRecordFullyDefined(const CXXRecordDecl * RD,RecordCompleteMap & RecordsComplete,RecordCompleteMap & MNCComplete)976 static bool IsRecordFullyDefined(const CXXRecordDecl *RD,
977 RecordCompleteMap &RecordsComplete,
978 RecordCompleteMap &MNCComplete) {
979 RecordCompleteMap::iterator Cache = RecordsComplete.find(RD);
980 if (Cache != RecordsComplete.end())
981 return Cache->second;
982 bool Complete = MethodsAndNestedClassesComplete(RD, MNCComplete);
983 for (CXXRecordDecl::friend_iterator I = RD->friend_begin(),
984 E = RD->friend_end();
985 I != E && Complete; ++I) {
986 // Check if friend classes and methods are complete.
987 if (TypeSourceInfo *TSI = (*I)->getFriendType()) {
988 // Friend classes are available as the TypeSourceInfo of the FriendDecl.
989 if (CXXRecordDecl *FriendD = TSI->getType()->getAsCXXRecordDecl())
990 Complete = MethodsAndNestedClassesComplete(FriendD, MNCComplete);
991 else
992 Complete = false;
993 } else {
994 // Friend functions are available through the NamedDecl of FriendDecl.
995 if (const FunctionDecl *FD =
996 dyn_cast<FunctionDecl>((*I)->getFriendDecl()))
997 Complete = FD->isDefined();
998 else
999 // This is a template friend, give up.
1000 Complete = false;
1001 }
1002 }
1003 RecordsComplete[RD] = Complete;
1004 return Complete;
1005 }
1006
emitAndClearUnusedLocalTypedefWarnings()1007 void Sema::emitAndClearUnusedLocalTypedefWarnings() {
1008 if (ExternalSource)
1009 ExternalSource->ReadUnusedLocalTypedefNameCandidates(
1010 UnusedLocalTypedefNameCandidates);
1011 for (const TypedefNameDecl *TD : UnusedLocalTypedefNameCandidates) {
1012 if (TD->isReferenced())
1013 continue;
1014 Diag(TD->getLocation(), diag::warn_unused_local_typedef)
1015 << isa<TypeAliasDecl>(TD) << TD->getDeclName();
1016 }
1017 UnusedLocalTypedefNameCandidates.clear();
1018 }
1019
1020 /// This is called before the very first declaration in the translation unit
1021 /// is parsed. Note that the ASTContext may have already injected some
1022 /// declarations.
ActOnStartOfTranslationUnit()1023 void Sema::ActOnStartOfTranslationUnit() {
1024 if (getLangOpts().CPlusPlusModules &&
1025 getLangOpts().getCompilingModule() == LangOptions::CMK_HeaderUnit)
1026 HandleStartOfHeaderUnit();
1027 else if (getLangOpts().ModulesTS &&
1028 (getLangOpts().getCompilingModule() ==
1029 LangOptions::CMK_ModuleInterface ||
1030 getLangOpts().getCompilingModule() == LangOptions::CMK_None)) {
1031 // We start in an implied global module fragment.
1032 SourceLocation StartOfTU =
1033 SourceMgr.getLocForStartOfFile(SourceMgr.getMainFileID());
1034 ActOnGlobalModuleFragmentDecl(StartOfTU);
1035 ModuleScopes.back().ImplicitGlobalModuleFragment = true;
1036 }
1037 }
1038
ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind)1039 void Sema::ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind) {
1040 // No explicit actions are required at the end of the global module fragment.
1041 if (Kind == TUFragmentKind::Global)
1042 return;
1043
1044 // Transfer late parsed template instantiations over to the pending template
1045 // instantiation list. During normal compilation, the late template parser
1046 // will be installed and instantiating these templates will succeed.
1047 //
1048 // If we are building a TU prefix for serialization, it is also safe to
1049 // transfer these over, even though they are not parsed. The end of the TU
1050 // should be outside of any eager template instantiation scope, so when this
1051 // AST is deserialized, these templates will not be parsed until the end of
1052 // the combined TU.
1053 PendingInstantiations.insert(PendingInstantiations.end(),
1054 LateParsedInstantiations.begin(),
1055 LateParsedInstantiations.end());
1056 LateParsedInstantiations.clear();
1057
1058 // If DefinedUsedVTables ends up marking any virtual member functions it
1059 // might lead to more pending template instantiations, which we then need
1060 // to instantiate.
1061 DefineUsedVTables();
1062
1063 // C++: Perform implicit template instantiations.
1064 //
1065 // FIXME: When we perform these implicit instantiations, we do not
1066 // carefully keep track of the point of instantiation (C++ [temp.point]).
1067 // This means that name lookup that occurs within the template
1068 // instantiation will always happen at the end of the translation unit,
1069 // so it will find some names that are not required to be found. This is
1070 // valid, but we could do better by diagnosing if an instantiation uses a
1071 // name that was not visible at its first point of instantiation.
1072 if (ExternalSource) {
1073 // Load pending instantiations from the external source.
1074 SmallVector<PendingImplicitInstantiation, 4> Pending;
1075 ExternalSource->ReadPendingInstantiations(Pending);
1076 for (auto PII : Pending)
1077 if (auto Func = dyn_cast<FunctionDecl>(PII.first))
1078 Func->setInstantiationIsPending(true);
1079 PendingInstantiations.insert(PendingInstantiations.begin(),
1080 Pending.begin(), Pending.end());
1081 }
1082
1083 {
1084 llvm::TimeTraceScope TimeScope("PerformPendingInstantiations");
1085 PerformPendingInstantiations();
1086 }
1087
1088 emitDeferredDiags();
1089
1090 assert(LateParsedInstantiations.empty() &&
1091 "end of TU template instantiation should not create more "
1092 "late-parsed templates");
1093
1094 // Report diagnostics for uncorrected delayed typos. Ideally all of them
1095 // should have been corrected by that time, but it is very hard to cover all
1096 // cases in practice.
1097 for (const auto &Typo : DelayedTypos) {
1098 // We pass an empty TypoCorrection to indicate no correction was performed.
1099 Typo.second.DiagHandler(TypoCorrection());
1100 }
1101 DelayedTypos.clear();
1102 }
1103
1104 /// ActOnEndOfTranslationUnit - This is called at the very end of the
1105 /// translation unit when EOF is reached and all but the top-level scope is
1106 /// popped.
ActOnEndOfTranslationUnit()1107 void Sema::ActOnEndOfTranslationUnit() {
1108 assert(DelayedDiagnostics.getCurrentPool() == nullptr
1109 && "reached end of translation unit with a pool attached?");
1110
1111 // If code completion is enabled, don't perform any end-of-translation-unit
1112 // work.
1113 if (PP.isCodeCompletionEnabled())
1114 return;
1115
1116 // Complete translation units and modules define vtables and perform implicit
1117 // instantiations. PCH files do not.
1118 if (TUKind != TU_Prefix) {
1119 DiagnoseUseOfUnimplementedSelectors();
1120
1121 ActOnEndOfTranslationUnitFragment(
1122 !ModuleScopes.empty() && ModuleScopes.back().Module->Kind ==
1123 Module::PrivateModuleFragment
1124 ? TUFragmentKind::Private
1125 : TUFragmentKind::Normal);
1126
1127 if (LateTemplateParserCleanup)
1128 LateTemplateParserCleanup(OpaqueParser);
1129
1130 CheckDelayedMemberExceptionSpecs();
1131 } else {
1132 // If we are building a TU prefix for serialization, it is safe to transfer
1133 // these over, even though they are not parsed. The end of the TU should be
1134 // outside of any eager template instantiation scope, so when this AST is
1135 // deserialized, these templates will not be parsed until the end of the
1136 // combined TU.
1137 PendingInstantiations.insert(PendingInstantiations.end(),
1138 LateParsedInstantiations.begin(),
1139 LateParsedInstantiations.end());
1140 LateParsedInstantiations.clear();
1141
1142 if (LangOpts.PCHInstantiateTemplates) {
1143 llvm::TimeTraceScope TimeScope("PerformPendingInstantiations");
1144 PerformPendingInstantiations();
1145 }
1146 }
1147
1148 DiagnoseUnterminatedPragmaAlignPack();
1149 DiagnoseUnterminatedPragmaAttribute();
1150 DiagnoseUnterminatedOpenMPDeclareTarget();
1151
1152 // All delayed member exception specs should be checked or we end up accepting
1153 // incompatible declarations.
1154 assert(DelayedOverridingExceptionSpecChecks.empty());
1155 assert(DelayedEquivalentExceptionSpecChecks.empty());
1156
1157 // All dllexport classes should have been processed already.
1158 assert(DelayedDllExportClasses.empty());
1159 assert(DelayedDllExportMemberFunctions.empty());
1160
1161 // Remove file scoped decls that turned out to be used.
1162 UnusedFileScopedDecls.erase(
1163 std::remove_if(UnusedFileScopedDecls.begin(nullptr, true),
1164 UnusedFileScopedDecls.end(),
1165 [this](const DeclaratorDecl *DD) {
1166 return ShouldRemoveFromUnused(this, DD);
1167 }),
1168 UnusedFileScopedDecls.end());
1169
1170 if (TUKind == TU_Prefix) {
1171 // Translation unit prefixes don't need any of the checking below.
1172 if (!PP.isIncrementalProcessingEnabled())
1173 TUScope = nullptr;
1174 return;
1175 }
1176
1177 // Check for #pragma weak identifiers that were never declared
1178 LoadExternalWeakUndeclaredIdentifiers();
1179 for (const auto &WeakIDs : WeakUndeclaredIdentifiers) {
1180 if (WeakIDs.second.empty())
1181 continue;
1182
1183 Decl *PrevDecl = LookupSingleName(TUScope, WeakIDs.first, SourceLocation(),
1184 LookupOrdinaryName);
1185 if (PrevDecl != nullptr &&
1186 !(isa<FunctionDecl>(PrevDecl) || isa<VarDecl>(PrevDecl)))
1187 for (const auto &WI : WeakIDs.second)
1188 Diag(WI.getLocation(), diag::warn_attribute_wrong_decl_type)
1189 << "'weak'" << ExpectedVariableOrFunction;
1190 else
1191 for (const auto &WI : WeakIDs.second)
1192 Diag(WI.getLocation(), diag::warn_weak_identifier_undeclared)
1193 << WeakIDs.first;
1194 }
1195
1196 if (LangOpts.CPlusPlus11 &&
1197 !Diags.isIgnored(diag::warn_delegating_ctor_cycle, SourceLocation()))
1198 CheckDelegatingCtorCycles();
1199
1200 if (!Diags.hasErrorOccurred()) {
1201 if (ExternalSource)
1202 ExternalSource->ReadUndefinedButUsed(UndefinedButUsed);
1203 checkUndefinedButUsed(*this);
1204 }
1205
1206 // A global-module-fragment is only permitted within a module unit.
1207 bool DiagnosedMissingModuleDeclaration = false;
1208 if (!ModuleScopes.empty() &&
1209 ModuleScopes.back().Module->Kind == Module::GlobalModuleFragment &&
1210 !ModuleScopes.back().ImplicitGlobalModuleFragment) {
1211 Diag(ModuleScopes.back().BeginLoc,
1212 diag::err_module_declaration_missing_after_global_module_introducer);
1213 DiagnosedMissingModuleDeclaration = true;
1214 }
1215
1216 if (TUKind == TU_Module) {
1217 // If we are building a module interface unit, we need to have seen the
1218 // module declaration by now.
1219 if (getLangOpts().getCompilingModule() ==
1220 LangOptions::CMK_ModuleInterface &&
1221 !isCurrentModulePurview() && !DiagnosedMissingModuleDeclaration) {
1222 // FIXME: Make a better guess as to where to put the module declaration.
1223 Diag(getSourceManager().getLocForStartOfFile(
1224 getSourceManager().getMainFileID()),
1225 diag::err_module_declaration_missing);
1226 }
1227
1228 // If we are building a module, resolve all of the exported declarations
1229 // now.
1230 if (Module *CurrentModule = PP.getCurrentModule()) {
1231 ModuleMap &ModMap = PP.getHeaderSearchInfo().getModuleMap();
1232
1233 SmallVector<Module *, 2> Stack;
1234 Stack.push_back(CurrentModule);
1235 while (!Stack.empty()) {
1236 Module *Mod = Stack.pop_back_val();
1237
1238 // Resolve the exported declarations and conflicts.
1239 // FIXME: Actually complain, once we figure out how to teach the
1240 // diagnostic client to deal with complaints in the module map at this
1241 // point.
1242 ModMap.resolveExports(Mod, /*Complain=*/false);
1243 ModMap.resolveUses(Mod, /*Complain=*/false);
1244 ModMap.resolveConflicts(Mod, /*Complain=*/false);
1245
1246 // Queue the submodules, so their exports will also be resolved.
1247 Stack.append(Mod->submodule_begin(), Mod->submodule_end());
1248 }
1249 }
1250
1251 // Warnings emitted in ActOnEndOfTranslationUnit() should be emitted for
1252 // modules when they are built, not every time they are used.
1253 emitAndClearUnusedLocalTypedefWarnings();
1254 }
1255
1256 // C++ standard modules. Diagnose cases where a function is declared inline
1257 // in the module purview but has no definition before the end of the TU or
1258 // the start of a Private Module Fragment (if one is present).
1259 if (!PendingInlineFuncDecls.empty()) {
1260 for (auto *D : PendingInlineFuncDecls) {
1261 if (auto *FD = dyn_cast<FunctionDecl>(D)) {
1262 bool DefInPMF = false;
1263 if (auto *FDD = FD->getDefinition()) {
1264 assert(FDD->getOwningModule() &&
1265 FDD->getOwningModule()->isModulePurview());
1266 DefInPMF = FDD->getOwningModule()->isPrivateModule();
1267 if (!DefInPMF)
1268 continue;
1269 }
1270 Diag(FD->getLocation(), diag::err_export_inline_not_defined)
1271 << DefInPMF;
1272 // If we have a PMF it should be at the end of the ModuleScopes.
1273 if (DefInPMF &&
1274 ModuleScopes.back().Module->Kind == Module::PrivateModuleFragment) {
1275 Diag(ModuleScopes.back().BeginLoc,
1276 diag::note_private_module_fragment);
1277 }
1278 }
1279 }
1280 PendingInlineFuncDecls.clear();
1281 }
1282
1283 // C99 6.9.2p2:
1284 // A declaration of an identifier for an object that has file
1285 // scope without an initializer, and without a storage-class
1286 // specifier or with the storage-class specifier static,
1287 // constitutes a tentative definition. If a translation unit
1288 // contains one or more tentative definitions for an identifier,
1289 // and the translation unit contains no external definition for
1290 // that identifier, then the behavior is exactly as if the
1291 // translation unit contains a file scope declaration of that
1292 // identifier, with the composite type as of the end of the
1293 // translation unit, with an initializer equal to 0.
1294 llvm::SmallSet<VarDecl *, 32> Seen;
1295 for (TentativeDefinitionsType::iterator
1296 T = TentativeDefinitions.begin(ExternalSource.get()),
1297 TEnd = TentativeDefinitions.end();
1298 T != TEnd; ++T) {
1299 VarDecl *VD = (*T)->getActingDefinition();
1300
1301 // If the tentative definition was completed, getActingDefinition() returns
1302 // null. If we've already seen this variable before, insert()'s second
1303 // return value is false.
1304 if (!VD || VD->isInvalidDecl() || !Seen.insert(VD).second)
1305 continue;
1306
1307 if (const IncompleteArrayType *ArrayT
1308 = Context.getAsIncompleteArrayType(VD->getType())) {
1309 // Set the length of the array to 1 (C99 6.9.2p5).
1310 Diag(VD->getLocation(), diag::warn_tentative_incomplete_array);
1311 llvm::APInt One(Context.getTypeSize(Context.getSizeType()), true);
1312 QualType T = Context.getConstantArrayType(ArrayT->getElementType(), One,
1313 nullptr, ArrayType::Normal, 0);
1314 VD->setType(T);
1315 } else if (RequireCompleteType(VD->getLocation(), VD->getType(),
1316 diag::err_tentative_def_incomplete_type))
1317 VD->setInvalidDecl();
1318
1319 // No initialization is performed for a tentative definition.
1320 CheckCompleteVariableDeclaration(VD);
1321
1322 // Notify the consumer that we've completed a tentative definition.
1323 if (!VD->isInvalidDecl())
1324 Consumer.CompleteTentativeDefinition(VD);
1325 }
1326
1327 for (auto *D : ExternalDeclarations) {
1328 if (!D || D->isInvalidDecl() || D->getPreviousDecl() || !D->isUsed())
1329 continue;
1330
1331 Consumer.CompleteExternalDeclaration(D);
1332 }
1333
1334 // If there were errors, disable 'unused' warnings since they will mostly be
1335 // noise. Don't warn for a use from a module: either we should warn on all
1336 // file-scope declarations in modules or not at all, but whether the
1337 // declaration is used is immaterial.
1338 if (!Diags.hasErrorOccurred() && TUKind != TU_Module) {
1339 // Output warning for unused file scoped decls.
1340 for (UnusedFileScopedDeclsType::iterator
1341 I = UnusedFileScopedDecls.begin(ExternalSource.get()),
1342 E = UnusedFileScopedDecls.end();
1343 I != E; ++I) {
1344 if (ShouldRemoveFromUnused(this, *I))
1345 continue;
1346
1347 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(*I)) {
1348 const FunctionDecl *DiagD;
1349 if (!FD->hasBody(DiagD))
1350 DiagD = FD;
1351 if (DiagD->isDeleted())
1352 continue; // Deleted functions are supposed to be unused.
1353 if (DiagD->isReferenced()) {
1354 if (isa<CXXMethodDecl>(DiagD))
1355 Diag(DiagD->getLocation(), diag::warn_unneeded_member_function)
1356 << DiagD;
1357 else {
1358 if (FD->getStorageClass() == SC_Static &&
1359 !FD->isInlineSpecified() &&
1360 !SourceMgr.isInMainFile(
1361 SourceMgr.getExpansionLoc(FD->getLocation())))
1362 Diag(DiagD->getLocation(),
1363 diag::warn_unneeded_static_internal_decl)
1364 << DiagD;
1365 else
1366 Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl)
1367 << /*function*/ 0 << DiagD;
1368 }
1369 } else {
1370 if (FD->getDescribedFunctionTemplate())
1371 Diag(DiagD->getLocation(), diag::warn_unused_template)
1372 << /*function*/ 0 << DiagD;
1373 else
1374 Diag(DiagD->getLocation(), isa<CXXMethodDecl>(DiagD)
1375 ? diag::warn_unused_member_function
1376 : diag::warn_unused_function)
1377 << DiagD;
1378 }
1379 } else {
1380 const VarDecl *DiagD = cast<VarDecl>(*I)->getDefinition();
1381 if (!DiagD)
1382 DiagD = cast<VarDecl>(*I);
1383 if (DiagD->isReferenced()) {
1384 Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl)
1385 << /*variable*/ 1 << DiagD;
1386 } else if (DiagD->getType().isConstQualified()) {
1387 const SourceManager &SM = SourceMgr;
1388 if (SM.getMainFileID() != SM.getFileID(DiagD->getLocation()) ||
1389 !PP.getLangOpts().IsHeaderFile)
1390 Diag(DiagD->getLocation(), diag::warn_unused_const_variable)
1391 << DiagD;
1392 } else {
1393 if (DiagD->getDescribedVarTemplate())
1394 Diag(DiagD->getLocation(), diag::warn_unused_template)
1395 << /*variable*/ 1 << DiagD;
1396 else
1397 Diag(DiagD->getLocation(), diag::warn_unused_variable) << DiagD;
1398 }
1399 }
1400 }
1401
1402 emitAndClearUnusedLocalTypedefWarnings();
1403 }
1404
1405 if (!Diags.isIgnored(diag::warn_unused_private_field, SourceLocation())) {
1406 // FIXME: Load additional unused private field candidates from the external
1407 // source.
1408 RecordCompleteMap RecordsComplete;
1409 RecordCompleteMap MNCComplete;
1410 for (NamedDeclSetType::iterator I = UnusedPrivateFields.begin(),
1411 E = UnusedPrivateFields.end(); I != E; ++I) {
1412 const NamedDecl *D = *I;
1413 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D->getDeclContext());
1414 if (RD && !RD->isUnion() &&
1415 IsRecordFullyDefined(RD, RecordsComplete, MNCComplete)) {
1416 Diag(D->getLocation(), diag::warn_unused_private_field)
1417 << D->getDeclName();
1418 }
1419 }
1420 }
1421
1422 if (!Diags.isIgnored(diag::warn_mismatched_delete_new, SourceLocation())) {
1423 if (ExternalSource)
1424 ExternalSource->ReadMismatchingDeleteExpressions(DeleteExprs);
1425 for (const auto &DeletedFieldInfo : DeleteExprs) {
1426 for (const auto &DeleteExprLoc : DeletedFieldInfo.second) {
1427 AnalyzeDeleteExprMismatch(DeletedFieldInfo.first, DeleteExprLoc.first,
1428 DeleteExprLoc.second);
1429 }
1430 }
1431 }
1432
1433 // Check we've noticed that we're no longer parsing the initializer for every
1434 // variable. If we miss cases, then at best we have a performance issue and
1435 // at worst a rejects-valid bug.
1436 assert(ParsingInitForAutoVars.empty() &&
1437 "Didn't unmark var as having its initializer parsed");
1438
1439 if (!PP.isIncrementalProcessingEnabled())
1440 TUScope = nullptr;
1441 }
1442
1443
1444 //===----------------------------------------------------------------------===//
1445 // Helper functions.
1446 //===----------------------------------------------------------------------===//
1447
getFunctionLevelDeclContext(bool AllowLambda)1448 DeclContext *Sema::getFunctionLevelDeclContext(bool AllowLambda) {
1449 DeclContext *DC = CurContext;
1450
1451 while (true) {
1452 if (isa<BlockDecl>(DC) || isa<EnumDecl>(DC) || isa<CapturedDecl>(DC) ||
1453 isa<RequiresExprBodyDecl>(DC)) {
1454 DC = DC->getParent();
1455 } else if (!AllowLambda && isa<CXXMethodDecl>(DC) &&
1456 cast<CXXMethodDecl>(DC)->getOverloadedOperator() == OO_Call &&
1457 cast<CXXRecordDecl>(DC->getParent())->isLambda()) {
1458 DC = DC->getParent()->getParent();
1459 } else break;
1460 }
1461
1462 return DC;
1463 }
1464
1465 /// getCurFunctionDecl - If inside of a function body, this returns a pointer
1466 /// to the function decl for the function being parsed. If we're currently
1467 /// in a 'block', this returns the containing context.
getCurFunctionDecl(bool AllowLambda)1468 FunctionDecl *Sema::getCurFunctionDecl(bool AllowLambda) {
1469 DeclContext *DC = getFunctionLevelDeclContext(AllowLambda);
1470 return dyn_cast<FunctionDecl>(DC);
1471 }
1472
getCurMethodDecl()1473 ObjCMethodDecl *Sema::getCurMethodDecl() {
1474 DeclContext *DC = getFunctionLevelDeclContext();
1475 while (isa<RecordDecl>(DC))
1476 DC = DC->getParent();
1477 return dyn_cast<ObjCMethodDecl>(DC);
1478 }
1479
getCurFunctionOrMethodDecl()1480 NamedDecl *Sema::getCurFunctionOrMethodDecl() {
1481 DeclContext *DC = getFunctionLevelDeclContext();
1482 if (isa<ObjCMethodDecl>(DC) || isa<FunctionDecl>(DC))
1483 return cast<NamedDecl>(DC);
1484 return nullptr;
1485 }
1486
getDefaultCXXMethodAddrSpace() const1487 LangAS Sema::getDefaultCXXMethodAddrSpace() const {
1488 if (getLangOpts().OpenCL)
1489 return getASTContext().getDefaultOpenCLPointeeAddrSpace();
1490 return LangAS::Default;
1491 }
1492
EmitCurrentDiagnostic(unsigned DiagID)1493 void Sema::EmitCurrentDiagnostic(unsigned DiagID) {
1494 // FIXME: It doesn't make sense to me that DiagID is an incoming argument here
1495 // and yet we also use the current diag ID on the DiagnosticsEngine. This has
1496 // been made more painfully obvious by the refactor that introduced this
1497 // function, but it is possible that the incoming argument can be
1498 // eliminated. If it truly cannot be (for example, there is some reentrancy
1499 // issue I am not seeing yet), then there should at least be a clarifying
1500 // comment somewhere.
1501 if (std::optional<TemplateDeductionInfo *> Info = isSFINAEContext()) {
1502 switch (DiagnosticIDs::getDiagnosticSFINAEResponse(
1503 Diags.getCurrentDiagID())) {
1504 case DiagnosticIDs::SFINAE_Report:
1505 // We'll report the diagnostic below.
1506 break;
1507
1508 case DiagnosticIDs::SFINAE_SubstitutionFailure:
1509 // Count this failure so that we know that template argument deduction
1510 // has failed.
1511 ++NumSFINAEErrors;
1512
1513 // Make a copy of this suppressed diagnostic and store it with the
1514 // template-deduction information.
1515 if (*Info && !(*Info)->hasSFINAEDiagnostic()) {
1516 Diagnostic DiagInfo(&Diags);
1517 (*Info)->addSFINAEDiagnostic(DiagInfo.getLocation(),
1518 PartialDiagnostic(DiagInfo, Context.getDiagAllocator()));
1519 }
1520
1521 Diags.setLastDiagnosticIgnored(true);
1522 Diags.Clear();
1523 return;
1524
1525 case DiagnosticIDs::SFINAE_AccessControl: {
1526 // Per C++ Core Issue 1170, access control is part of SFINAE.
1527 // Additionally, the AccessCheckingSFINAE flag can be used to temporarily
1528 // make access control a part of SFINAE for the purposes of checking
1529 // type traits.
1530 if (!AccessCheckingSFINAE && !getLangOpts().CPlusPlus11)
1531 break;
1532
1533 SourceLocation Loc = Diags.getCurrentDiagLoc();
1534
1535 // Suppress this diagnostic.
1536 ++NumSFINAEErrors;
1537
1538 // Make a copy of this suppressed diagnostic and store it with the
1539 // template-deduction information.
1540 if (*Info && !(*Info)->hasSFINAEDiagnostic()) {
1541 Diagnostic DiagInfo(&Diags);
1542 (*Info)->addSFINAEDiagnostic(DiagInfo.getLocation(),
1543 PartialDiagnostic(DiagInfo, Context.getDiagAllocator()));
1544 }
1545
1546 Diags.setLastDiagnosticIgnored(true);
1547 Diags.Clear();
1548
1549 // Now the diagnostic state is clear, produce a C++98 compatibility
1550 // warning.
1551 Diag(Loc, diag::warn_cxx98_compat_sfinae_access_control);
1552
1553 // The last diagnostic which Sema produced was ignored. Suppress any
1554 // notes attached to it.
1555 Diags.setLastDiagnosticIgnored(true);
1556 return;
1557 }
1558
1559 case DiagnosticIDs::SFINAE_Suppress:
1560 // Make a copy of this suppressed diagnostic and store it with the
1561 // template-deduction information;
1562 if (*Info) {
1563 Diagnostic DiagInfo(&Diags);
1564 (*Info)->addSuppressedDiagnostic(DiagInfo.getLocation(),
1565 PartialDiagnostic(DiagInfo, Context.getDiagAllocator()));
1566 }
1567
1568 // Suppress this diagnostic.
1569 Diags.setLastDiagnosticIgnored(true);
1570 Diags.Clear();
1571 return;
1572 }
1573 }
1574
1575 // Copy the diagnostic printing policy over the ASTContext printing policy.
1576 // TODO: Stop doing that. See: https://reviews.llvm.org/D45093#1090292
1577 Context.setPrintingPolicy(getPrintingPolicy());
1578
1579 // Emit the diagnostic.
1580 if (!Diags.EmitCurrentDiagnostic())
1581 return;
1582
1583 // If this is not a note, and we're in a template instantiation
1584 // that is different from the last template instantiation where
1585 // we emitted an error, print a template instantiation
1586 // backtrace.
1587 if (!DiagnosticIDs::isBuiltinNote(DiagID))
1588 PrintContextStack();
1589 }
1590
1591 Sema::SemaDiagnosticBuilder
Diag(SourceLocation Loc,const PartialDiagnostic & PD,bool DeferHint)1592 Sema::Diag(SourceLocation Loc, const PartialDiagnostic &PD, bool DeferHint) {
1593 return Diag(Loc, PD.getDiagID(), DeferHint) << PD;
1594 }
1595
hasUncompilableErrorOccurred() const1596 bool Sema::hasUncompilableErrorOccurred() const {
1597 if (getDiagnostics().hasUncompilableErrorOccurred())
1598 return true;
1599 auto *FD = dyn_cast<FunctionDecl>(CurContext);
1600 if (!FD)
1601 return false;
1602 auto Loc = DeviceDeferredDiags.find(FD);
1603 if (Loc == DeviceDeferredDiags.end())
1604 return false;
1605 for (auto PDAt : Loc->second) {
1606 if (DiagnosticIDs::isDefaultMappingAsError(PDAt.second.getDiagID()))
1607 return true;
1608 }
1609 return false;
1610 }
1611
1612 // Print notes showing how we can reach FD starting from an a priori
1613 // known-callable function.
emitCallStackNotes(Sema & S,FunctionDecl * FD)1614 static void emitCallStackNotes(Sema &S, FunctionDecl *FD) {
1615 auto FnIt = S.DeviceKnownEmittedFns.find(FD);
1616 while (FnIt != S.DeviceKnownEmittedFns.end()) {
1617 // Respect error limit.
1618 if (S.Diags.hasFatalErrorOccurred())
1619 return;
1620 DiagnosticBuilder Builder(
1621 S.Diags.Report(FnIt->second.Loc, diag::note_called_by));
1622 Builder << FnIt->second.FD;
1623 FnIt = S.DeviceKnownEmittedFns.find(FnIt->second.FD);
1624 }
1625 }
1626
1627 namespace {
1628
1629 /// Helper class that emits deferred diagnostic messages if an entity directly
1630 /// or indirectly using the function that causes the deferred diagnostic
1631 /// messages is known to be emitted.
1632 ///
1633 /// During parsing of AST, certain diagnostic messages are recorded as deferred
1634 /// diagnostics since it is unknown whether the functions containing such
1635 /// diagnostics will be emitted. A list of potentially emitted functions and
1636 /// variables that may potentially trigger emission of functions are also
1637 /// recorded. DeferredDiagnosticsEmitter recursively visits used functions
1638 /// by each function to emit deferred diagnostics.
1639 ///
1640 /// During the visit, certain OpenMP directives or initializer of variables
1641 /// with certain OpenMP attributes will cause subsequent visiting of any
1642 /// functions enter a state which is called OpenMP device context in this
1643 /// implementation. The state is exited when the directive or initializer is
1644 /// exited. This state can change the emission states of subsequent uses
1645 /// of functions.
1646 ///
1647 /// Conceptually the functions or variables to be visited form a use graph
1648 /// where the parent node uses the child node. At any point of the visit,
1649 /// the tree nodes traversed from the tree root to the current node form a use
1650 /// stack. The emission state of the current node depends on two factors:
1651 /// 1. the emission state of the root node
1652 /// 2. whether the current node is in OpenMP device context
1653 /// If the function is decided to be emitted, its contained deferred diagnostics
1654 /// are emitted, together with the information about the use stack.
1655 ///
1656 class DeferredDiagnosticsEmitter
1657 : public UsedDeclVisitor<DeferredDiagnosticsEmitter> {
1658 public:
1659 typedef UsedDeclVisitor<DeferredDiagnosticsEmitter> Inherited;
1660
1661 // Whether the function is already in the current use-path.
1662 llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 4> InUsePath;
1663
1664 // The current use-path.
1665 llvm::SmallVector<CanonicalDeclPtr<FunctionDecl>, 4> UsePath;
1666
1667 // Whether the visiting of the function has been done. Done[0] is for the
1668 // case not in OpenMP device context. Done[1] is for the case in OpenMP
1669 // device context. We need two sets because diagnostics emission may be
1670 // different depending on whether it is in OpenMP device context.
1671 llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 4> DoneMap[2];
1672
1673 // Emission state of the root node of the current use graph.
1674 bool ShouldEmitRootNode;
1675
1676 // Current OpenMP device context level. It is initialized to 0 and each
1677 // entering of device context increases it by 1 and each exit decreases
1678 // it by 1. Non-zero value indicates it is currently in device context.
1679 unsigned InOMPDeviceContext;
1680
DeferredDiagnosticsEmitter(Sema & S)1681 DeferredDiagnosticsEmitter(Sema &S)
1682 : Inherited(S), ShouldEmitRootNode(false), InOMPDeviceContext(0) {}
1683
shouldVisitDiscardedStmt() const1684 bool shouldVisitDiscardedStmt() const { return false; }
1685
VisitOMPTargetDirective(OMPTargetDirective * Node)1686 void VisitOMPTargetDirective(OMPTargetDirective *Node) {
1687 ++InOMPDeviceContext;
1688 Inherited::VisitOMPTargetDirective(Node);
1689 --InOMPDeviceContext;
1690 }
1691
visitUsedDecl(SourceLocation Loc,Decl * D)1692 void visitUsedDecl(SourceLocation Loc, Decl *D) {
1693 if (isa<VarDecl>(D))
1694 return;
1695 if (auto *FD = dyn_cast<FunctionDecl>(D))
1696 checkFunc(Loc, FD);
1697 else
1698 Inherited::visitUsedDecl(Loc, D);
1699 }
1700
checkVar(VarDecl * VD)1701 void checkVar(VarDecl *VD) {
1702 assert(VD->isFileVarDecl() &&
1703 "Should only check file-scope variables");
1704 if (auto *Init = VD->getInit()) {
1705 auto DevTy = OMPDeclareTargetDeclAttr::getDeviceType(VD);
1706 bool IsDev = DevTy && (*DevTy == OMPDeclareTargetDeclAttr::DT_NoHost ||
1707 *DevTy == OMPDeclareTargetDeclAttr::DT_Any);
1708 if (IsDev)
1709 ++InOMPDeviceContext;
1710 this->Visit(Init);
1711 if (IsDev)
1712 --InOMPDeviceContext;
1713 }
1714 }
1715
checkFunc(SourceLocation Loc,FunctionDecl * FD)1716 void checkFunc(SourceLocation Loc, FunctionDecl *FD) {
1717 auto &Done = DoneMap[InOMPDeviceContext > 0 ? 1 : 0];
1718 FunctionDecl *Caller = UsePath.empty() ? nullptr : UsePath.back();
1719 if ((!ShouldEmitRootNode && !S.getLangOpts().OpenMP && !Caller) ||
1720 S.shouldIgnoreInHostDeviceCheck(FD) || InUsePath.count(FD))
1721 return;
1722 // Finalize analysis of OpenMP-specific constructs.
1723 if (Caller && S.LangOpts.OpenMP && UsePath.size() == 1 &&
1724 (ShouldEmitRootNode || InOMPDeviceContext))
1725 S.finalizeOpenMPDelayedAnalysis(Caller, FD, Loc);
1726 if (Caller)
1727 S.DeviceKnownEmittedFns[FD] = {Caller, Loc};
1728 // Always emit deferred diagnostics for the direct users. This does not
1729 // lead to explosion of diagnostics since each user is visited at most
1730 // twice.
1731 if (ShouldEmitRootNode || InOMPDeviceContext)
1732 emitDeferredDiags(FD, Caller);
1733 // Do not revisit a function if the function body has been completely
1734 // visited before.
1735 if (!Done.insert(FD).second)
1736 return;
1737 InUsePath.insert(FD);
1738 UsePath.push_back(FD);
1739 if (auto *S = FD->getBody()) {
1740 this->Visit(S);
1741 }
1742 UsePath.pop_back();
1743 InUsePath.erase(FD);
1744 }
1745
checkRecordedDecl(Decl * D)1746 void checkRecordedDecl(Decl *D) {
1747 if (auto *FD = dyn_cast<FunctionDecl>(D)) {
1748 ShouldEmitRootNode = S.getEmissionStatus(FD, /*Final=*/true) ==
1749 Sema::FunctionEmissionStatus::Emitted;
1750 checkFunc(SourceLocation(), FD);
1751 } else
1752 checkVar(cast<VarDecl>(D));
1753 }
1754
1755 // Emit any deferred diagnostics for FD
emitDeferredDiags(FunctionDecl * FD,bool ShowCallStack)1756 void emitDeferredDiags(FunctionDecl *FD, bool ShowCallStack) {
1757 auto It = S.DeviceDeferredDiags.find(FD);
1758 if (It == S.DeviceDeferredDiags.end())
1759 return;
1760 bool HasWarningOrError = false;
1761 bool FirstDiag = true;
1762 for (PartialDiagnosticAt &PDAt : It->second) {
1763 // Respect error limit.
1764 if (S.Diags.hasFatalErrorOccurred())
1765 return;
1766 const SourceLocation &Loc = PDAt.first;
1767 const PartialDiagnostic &PD = PDAt.second;
1768 HasWarningOrError |=
1769 S.getDiagnostics().getDiagnosticLevel(PD.getDiagID(), Loc) >=
1770 DiagnosticsEngine::Warning;
1771 {
1772 DiagnosticBuilder Builder(S.Diags.Report(Loc, PD.getDiagID()));
1773 PD.Emit(Builder);
1774 }
1775 // Emit the note on the first diagnostic in case too many diagnostics
1776 // cause the note not emitted.
1777 if (FirstDiag && HasWarningOrError && ShowCallStack) {
1778 emitCallStackNotes(S, FD);
1779 FirstDiag = false;
1780 }
1781 }
1782 }
1783 };
1784 } // namespace
1785
emitDeferredDiags()1786 void Sema::emitDeferredDiags() {
1787 if (ExternalSource)
1788 ExternalSource->ReadDeclsToCheckForDeferredDiags(
1789 DeclsToCheckForDeferredDiags);
1790
1791 if ((DeviceDeferredDiags.empty() && !LangOpts.OpenMP) ||
1792 DeclsToCheckForDeferredDiags.empty())
1793 return;
1794
1795 DeferredDiagnosticsEmitter DDE(*this);
1796 for (auto *D : DeclsToCheckForDeferredDiags)
1797 DDE.checkRecordedDecl(D);
1798 }
1799
1800 // In CUDA, there are some constructs which may appear in semantically-valid
1801 // code, but trigger errors if we ever generate code for the function in which
1802 // they appear. Essentially every construct you're not allowed to use on the
1803 // device falls into this category, because you are allowed to use these
1804 // constructs in a __host__ __device__ function, but only if that function is
1805 // never codegen'ed on the device.
1806 //
1807 // To handle semantic checking for these constructs, we keep track of the set of
1808 // functions we know will be emitted, either because we could tell a priori that
1809 // they would be emitted, or because they were transitively called by a
1810 // known-emitted function.
1811 //
1812 // We also keep a partial call graph of which not-known-emitted functions call
1813 // which other not-known-emitted functions.
1814 //
1815 // When we see something which is illegal if the current function is emitted
1816 // (usually by way of CUDADiagIfDeviceCode, CUDADiagIfHostCode, or
1817 // CheckCUDACall), we first check if the current function is known-emitted. If
1818 // so, we immediately output the diagnostic.
1819 //
1820 // Otherwise, we "defer" the diagnostic. It sits in Sema::DeviceDeferredDiags
1821 // until we discover that the function is known-emitted, at which point we take
1822 // it out of this map and emit the diagnostic.
1823
SemaDiagnosticBuilder(Kind K,SourceLocation Loc,unsigned DiagID,FunctionDecl * Fn,Sema & S)1824 Sema::SemaDiagnosticBuilder::SemaDiagnosticBuilder(Kind K, SourceLocation Loc,
1825 unsigned DiagID,
1826 FunctionDecl *Fn, Sema &S)
1827 : S(S), Loc(Loc), DiagID(DiagID), Fn(Fn),
1828 ShowCallStack(K == K_ImmediateWithCallStack || K == K_Deferred) {
1829 switch (K) {
1830 case K_Nop:
1831 break;
1832 case K_Immediate:
1833 case K_ImmediateWithCallStack:
1834 ImmediateDiag.emplace(
1835 ImmediateDiagBuilder(S.Diags.Report(Loc, DiagID), S, DiagID));
1836 break;
1837 case K_Deferred:
1838 assert(Fn && "Must have a function to attach the deferred diag to.");
1839 auto &Diags = S.DeviceDeferredDiags[Fn];
1840 PartialDiagId.emplace(Diags.size());
1841 Diags.emplace_back(Loc, S.PDiag(DiagID));
1842 break;
1843 }
1844 }
1845
SemaDiagnosticBuilder(SemaDiagnosticBuilder && D)1846 Sema::SemaDiagnosticBuilder::SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D)
1847 : S(D.S), Loc(D.Loc), DiagID(D.DiagID), Fn(D.Fn),
1848 ShowCallStack(D.ShowCallStack), ImmediateDiag(D.ImmediateDiag),
1849 PartialDiagId(D.PartialDiagId) {
1850 // Clean the previous diagnostics.
1851 D.ShowCallStack = false;
1852 D.ImmediateDiag.reset();
1853 D.PartialDiagId.reset();
1854 }
1855
~SemaDiagnosticBuilder()1856 Sema::SemaDiagnosticBuilder::~SemaDiagnosticBuilder() {
1857 if (ImmediateDiag) {
1858 // Emit our diagnostic and, if it was a warning or error, output a callstack
1859 // if Fn isn't a priori known-emitted.
1860 bool IsWarningOrError = S.getDiagnostics().getDiagnosticLevel(
1861 DiagID, Loc) >= DiagnosticsEngine::Warning;
1862 ImmediateDiag.reset(); // Emit the immediate diag.
1863 if (IsWarningOrError && ShowCallStack)
1864 emitCallStackNotes(S, Fn);
1865 } else {
1866 assert((!PartialDiagId || ShowCallStack) &&
1867 "Must always show call stack for deferred diags.");
1868 }
1869 }
1870
1871 Sema::SemaDiagnosticBuilder
targetDiag(SourceLocation Loc,unsigned DiagID,FunctionDecl * FD)1872 Sema::targetDiag(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD) {
1873 FD = FD ? FD : getCurFunctionDecl();
1874 if (LangOpts.OpenMP)
1875 return LangOpts.OpenMPIsDevice ? diagIfOpenMPDeviceCode(Loc, DiagID, FD)
1876 : diagIfOpenMPHostCode(Loc, DiagID, FD);
1877 if (getLangOpts().CUDA)
1878 return getLangOpts().CUDAIsDevice ? CUDADiagIfDeviceCode(Loc, DiagID)
1879 : CUDADiagIfHostCode(Loc, DiagID);
1880
1881 if (getLangOpts().SYCLIsDevice)
1882 return SYCLDiagIfDeviceCode(Loc, DiagID);
1883
1884 return SemaDiagnosticBuilder(SemaDiagnosticBuilder::K_Immediate, Loc, DiagID,
1885 FD, *this);
1886 }
1887
Diag(SourceLocation Loc,unsigned DiagID,bool DeferHint)1888 Sema::SemaDiagnosticBuilder Sema::Diag(SourceLocation Loc, unsigned DiagID,
1889 bool DeferHint) {
1890 bool IsError = Diags.getDiagnosticIDs()->isDefaultMappingAsError(DiagID);
1891 bool ShouldDefer = getLangOpts().CUDA && LangOpts.GPUDeferDiag &&
1892 DiagnosticIDs::isDeferrable(DiagID) &&
1893 (DeferHint || DeferDiags || !IsError);
1894 auto SetIsLastErrorImmediate = [&](bool Flag) {
1895 if (IsError)
1896 IsLastErrorImmediate = Flag;
1897 };
1898 if (!ShouldDefer) {
1899 SetIsLastErrorImmediate(true);
1900 return SemaDiagnosticBuilder(SemaDiagnosticBuilder::K_Immediate, Loc,
1901 DiagID, getCurFunctionDecl(), *this);
1902 }
1903
1904 SemaDiagnosticBuilder DB = getLangOpts().CUDAIsDevice
1905 ? CUDADiagIfDeviceCode(Loc, DiagID)
1906 : CUDADiagIfHostCode(Loc, DiagID);
1907 SetIsLastErrorImmediate(DB.isImmediate());
1908 return DB;
1909 }
1910
checkTypeSupport(QualType Ty,SourceLocation Loc,ValueDecl * D)1911 void Sema::checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D) {
1912 if (isUnevaluatedContext() || Ty.isNull())
1913 return;
1914
1915 // The original idea behind checkTypeSupport function is that unused
1916 // declarations can be replaced with an array of bytes of the same size during
1917 // codegen, such replacement doesn't seem to be possible for types without
1918 // constant byte size like zero length arrays. So, do a deep check for SYCL.
1919 if (D && LangOpts.SYCLIsDevice) {
1920 llvm::DenseSet<QualType> Visited;
1921 deepTypeCheckForSYCLDevice(Loc, Visited, D);
1922 }
1923
1924 Decl *C = cast<Decl>(getCurLexicalContext());
1925
1926 // Memcpy operations for structs containing a member with unsupported type
1927 // are ok, though.
1928 if (const auto *MD = dyn_cast<CXXMethodDecl>(C)) {
1929 if ((MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) &&
1930 MD->isTrivial())
1931 return;
1932
1933 if (const auto *Ctor = dyn_cast<CXXConstructorDecl>(MD))
1934 if (Ctor->isCopyOrMoveConstructor() && Ctor->isTrivial())
1935 return;
1936 }
1937
1938 // Try to associate errors with the lexical context, if that is a function, or
1939 // the value declaration otherwise.
1940 FunctionDecl *FD = isa<FunctionDecl>(C) ? cast<FunctionDecl>(C)
1941 : dyn_cast_or_null<FunctionDecl>(D);
1942
1943 auto CheckDeviceType = [&](QualType Ty) {
1944 if (Ty->isDependentType())
1945 return;
1946
1947 if (Ty->isBitIntType()) {
1948 if (!Context.getTargetInfo().hasBitIntType()) {
1949 PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type);
1950 if (D)
1951 PD << D;
1952 else
1953 PD << "expression";
1954 targetDiag(Loc, PD, FD)
1955 << false /*show bit size*/ << 0 /*bitsize*/ << false /*return*/
1956 << Ty << Context.getTargetInfo().getTriple().str();
1957 }
1958 return;
1959 }
1960
1961 // Check if we are dealing with two 'long double' but with different
1962 // semantics.
1963 bool LongDoubleMismatched = false;
1964 if (Ty->isRealFloatingType() && Context.getTypeSize(Ty) == 128) {
1965 const llvm::fltSemantics &Sem = Context.getFloatTypeSemantics(Ty);
1966 if ((&Sem != &llvm::APFloat::PPCDoubleDouble() &&
1967 !Context.getTargetInfo().hasFloat128Type()) ||
1968 (&Sem == &llvm::APFloat::PPCDoubleDouble() &&
1969 !Context.getTargetInfo().hasIbm128Type()))
1970 LongDoubleMismatched = true;
1971 }
1972
1973 if ((Ty->isFloat16Type() && !Context.getTargetInfo().hasFloat16Type()) ||
1974 (Ty->isFloat128Type() && !Context.getTargetInfo().hasFloat128Type()) ||
1975 (Ty->isIbm128Type() && !Context.getTargetInfo().hasIbm128Type()) ||
1976 (Ty->isIntegerType() && Context.getTypeSize(Ty) == 128 &&
1977 !Context.getTargetInfo().hasInt128Type()) ||
1978 LongDoubleMismatched) {
1979 PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type);
1980 if (D)
1981 PD << D;
1982 else
1983 PD << "expression";
1984
1985 if (targetDiag(Loc, PD, FD)
1986 << true /*show bit size*/
1987 << static_cast<unsigned>(Context.getTypeSize(Ty)) << Ty
1988 << false /*return*/ << Context.getTargetInfo().getTriple().str()) {
1989 if (D)
1990 D->setInvalidDecl();
1991 }
1992 if (D)
1993 targetDiag(D->getLocation(), diag::note_defined_here, FD) << D;
1994 }
1995 };
1996
1997 auto CheckType = [&](QualType Ty, bool IsRetTy = false) {
1998 if (LangOpts.SYCLIsDevice || (LangOpts.OpenMP && LangOpts.OpenMPIsDevice) ||
1999 LangOpts.CUDAIsDevice)
2000 CheckDeviceType(Ty);
2001
2002 QualType UnqualTy = Ty.getCanonicalType().getUnqualifiedType();
2003 const TargetInfo &TI = Context.getTargetInfo();
2004 if (!TI.hasLongDoubleType() && UnqualTy == Context.LongDoubleTy) {
2005 PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type);
2006 if (D)
2007 PD << D;
2008 else
2009 PD << "expression";
2010
2011 if (Diag(Loc, PD, FD)
2012 << false /*show bit size*/ << 0 << Ty << false /*return*/
2013 << Context.getTargetInfo().getTriple().str()) {
2014 if (D)
2015 D->setInvalidDecl();
2016 }
2017 if (D)
2018 targetDiag(D->getLocation(), diag::note_defined_here, FD) << D;
2019 }
2020
2021 bool IsDouble = UnqualTy == Context.DoubleTy;
2022 bool IsFloat = UnqualTy == Context.FloatTy;
2023 if (IsRetTy && !TI.hasFPReturn() && (IsDouble || IsFloat)) {
2024 PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type);
2025 if (D)
2026 PD << D;
2027 else
2028 PD << "expression";
2029
2030 if (Diag(Loc, PD, FD)
2031 << false /*show bit size*/ << 0 << Ty << true /*return*/
2032 << Context.getTargetInfo().getTriple().str()) {
2033 if (D)
2034 D->setInvalidDecl();
2035 }
2036 if (D)
2037 targetDiag(D->getLocation(), diag::note_defined_here, FD) << D;
2038 }
2039
2040 // Don't allow SVE types in functions without a SVE target.
2041 if (Ty->isSVESizelessBuiltinType() && FD && FD->hasBody()) {
2042 llvm::StringMap<bool> CallerFeatureMap;
2043 Context.getFunctionFeatureMap(CallerFeatureMap, FD);
2044 if (!Builtin::evaluateRequiredTargetFeatures(
2045 "sve", CallerFeatureMap))
2046 Diag(D->getLocation(), diag::err_sve_vector_in_non_sve_target) << Ty;
2047 }
2048 };
2049
2050 CheckType(Ty);
2051 if (const auto *FPTy = dyn_cast<FunctionProtoType>(Ty)) {
2052 for (const auto &ParamTy : FPTy->param_types())
2053 CheckType(ParamTy);
2054 CheckType(FPTy->getReturnType(), /*IsRetTy=*/true);
2055 }
2056 if (const auto *FNPTy = dyn_cast<FunctionNoProtoType>(Ty))
2057 CheckType(FNPTy->getReturnType(), /*IsRetTy=*/true);
2058 }
2059
2060 /// Looks through the macro-expansion chain for the given
2061 /// location, looking for a macro expansion with the given name.
2062 /// If one is found, returns true and sets the location to that
2063 /// expansion loc.
findMacroSpelling(SourceLocation & locref,StringRef name)2064 bool Sema::findMacroSpelling(SourceLocation &locref, StringRef name) {
2065 SourceLocation loc = locref;
2066 if (!loc.isMacroID()) return false;
2067
2068 // There's no good way right now to look at the intermediate
2069 // expansions, so just jump to the expansion location.
2070 loc = getSourceManager().getExpansionLoc(loc);
2071
2072 // If that's written with the name, stop here.
2073 SmallString<16> buffer;
2074 if (getPreprocessor().getSpelling(loc, buffer) == name) {
2075 locref = loc;
2076 return true;
2077 }
2078 return false;
2079 }
2080
2081 /// Determines the active Scope associated with the given declaration
2082 /// context.
2083 ///
2084 /// This routine maps a declaration context to the active Scope object that
2085 /// represents that declaration context in the parser. It is typically used
2086 /// from "scope-less" code (e.g., template instantiation, lazy creation of
2087 /// declarations) that injects a name for name-lookup purposes and, therefore,
2088 /// must update the Scope.
2089 ///
2090 /// \returns The scope corresponding to the given declaraion context, or NULL
2091 /// if no such scope is open.
getScopeForContext(DeclContext * Ctx)2092 Scope *Sema::getScopeForContext(DeclContext *Ctx) {
2093
2094 if (!Ctx)
2095 return nullptr;
2096
2097 Ctx = Ctx->getPrimaryContext();
2098 for (Scope *S = getCurScope(); S; S = S->getParent()) {
2099 // Ignore scopes that cannot have declarations. This is important for
2100 // out-of-line definitions of static class members.
2101 if (S->getFlags() & (Scope::DeclScope | Scope::TemplateParamScope))
2102 if (DeclContext *Entity = S->getEntity())
2103 if (Ctx == Entity->getPrimaryContext())
2104 return S;
2105 }
2106
2107 return nullptr;
2108 }
2109
2110 /// Enter a new function scope
PushFunctionScope()2111 void Sema::PushFunctionScope() {
2112 if (FunctionScopes.empty() && CachedFunctionScope) {
2113 // Use CachedFunctionScope to avoid allocating memory when possible.
2114 CachedFunctionScope->Clear();
2115 FunctionScopes.push_back(CachedFunctionScope.release());
2116 } else {
2117 FunctionScopes.push_back(new FunctionScopeInfo(getDiagnostics()));
2118 }
2119 if (LangOpts.OpenMP)
2120 pushOpenMPFunctionRegion();
2121 }
2122
PushBlockScope(Scope * BlockScope,BlockDecl * Block)2123 void Sema::PushBlockScope(Scope *BlockScope, BlockDecl *Block) {
2124 FunctionScopes.push_back(new BlockScopeInfo(getDiagnostics(),
2125 BlockScope, Block));
2126 }
2127
PushLambdaScope()2128 LambdaScopeInfo *Sema::PushLambdaScope() {
2129 LambdaScopeInfo *const LSI = new LambdaScopeInfo(getDiagnostics());
2130 FunctionScopes.push_back(LSI);
2131 return LSI;
2132 }
2133
RecordParsingTemplateParameterDepth(unsigned Depth)2134 void Sema::RecordParsingTemplateParameterDepth(unsigned Depth) {
2135 if (LambdaScopeInfo *const LSI = getCurLambda()) {
2136 LSI->AutoTemplateParameterDepth = Depth;
2137 return;
2138 }
2139 llvm_unreachable(
2140 "Remove assertion if intentionally called in a non-lambda context.");
2141 }
2142
2143 // Check that the type of the VarDecl has an accessible copy constructor and
2144 // resolve its destructor's exception specification.
2145 // This also performs initialization of block variables when they are moved
2146 // to the heap. It uses the same rules as applicable for implicit moves
2147 // according to the C++ standard in effect ([class.copy.elision]p3).
checkEscapingByref(VarDecl * VD,Sema & S)2148 static void checkEscapingByref(VarDecl *VD, Sema &S) {
2149 QualType T = VD->getType();
2150 EnterExpressionEvaluationContext scope(
2151 S, Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
2152 SourceLocation Loc = VD->getLocation();
2153 Expr *VarRef =
2154 new (S.Context) DeclRefExpr(S.Context, VD, false, T, VK_LValue, Loc);
2155 ExprResult Result;
2156 auto IE = InitializedEntity::InitializeBlock(Loc, T);
2157 if (S.getLangOpts().CPlusPlus2b) {
2158 auto *E = ImplicitCastExpr::Create(S.Context, T, CK_NoOp, VarRef, nullptr,
2159 VK_XValue, FPOptionsOverride());
2160 Result = S.PerformCopyInitialization(IE, SourceLocation(), E);
2161 } else {
2162 Result = S.PerformMoveOrCopyInitialization(
2163 IE, Sema::NamedReturnInfo{VD, Sema::NamedReturnInfo::MoveEligible},
2164 VarRef);
2165 }
2166
2167 if (!Result.isInvalid()) {
2168 Result = S.MaybeCreateExprWithCleanups(Result);
2169 Expr *Init = Result.getAs<Expr>();
2170 S.Context.setBlockVarCopyInit(VD, Init, S.canThrow(Init));
2171 }
2172
2173 // The destructor's exception specification is needed when IRGen generates
2174 // block copy/destroy functions. Resolve it here.
2175 if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl())
2176 if (CXXDestructorDecl *DD = RD->getDestructor()) {
2177 auto *FPT = DD->getType()->getAs<FunctionProtoType>();
2178 S.ResolveExceptionSpec(Loc, FPT);
2179 }
2180 }
2181
markEscapingByrefs(const FunctionScopeInfo & FSI,Sema & S)2182 static void markEscapingByrefs(const FunctionScopeInfo &FSI, Sema &S) {
2183 // Set the EscapingByref flag of __block variables captured by
2184 // escaping blocks.
2185 for (const BlockDecl *BD : FSI.Blocks) {
2186 for (const BlockDecl::Capture &BC : BD->captures()) {
2187 VarDecl *VD = BC.getVariable();
2188 if (VD->hasAttr<BlocksAttr>()) {
2189 // Nothing to do if this is a __block variable captured by a
2190 // non-escaping block.
2191 if (BD->doesNotEscape())
2192 continue;
2193 VD->setEscapingByref();
2194 }
2195 // Check whether the captured variable is or contains an object of
2196 // non-trivial C union type.
2197 QualType CapType = BC.getVariable()->getType();
2198 if (CapType.hasNonTrivialToPrimitiveDestructCUnion() ||
2199 CapType.hasNonTrivialToPrimitiveCopyCUnion())
2200 S.checkNonTrivialCUnion(BC.getVariable()->getType(),
2201 BD->getCaretLocation(),
2202 Sema::NTCUC_BlockCapture,
2203 Sema::NTCUK_Destruct|Sema::NTCUK_Copy);
2204 }
2205 }
2206
2207 for (VarDecl *VD : FSI.ByrefBlockVars) {
2208 // __block variables might require us to capture a copy-initializer.
2209 if (!VD->isEscapingByref())
2210 continue;
2211 // It's currently invalid to ever have a __block variable with an
2212 // array type; should we diagnose that here?
2213 // Regardless, we don't want to ignore array nesting when
2214 // constructing this copy.
2215 if (VD->getType()->isStructureOrClassType())
2216 checkEscapingByref(VD, S);
2217 }
2218 }
2219
2220 /// Pop a function (or block or lambda or captured region) scope from the stack.
2221 ///
2222 /// \param WP The warning policy to use for CFG-based warnings, or null if such
2223 /// warnings should not be produced.
2224 /// \param D The declaration corresponding to this function scope, if producing
2225 /// CFG-based warnings.
2226 /// \param BlockType The type of the block expression, if D is a BlockDecl.
2227 Sema::PoppedFunctionScopePtr
PopFunctionScopeInfo(const AnalysisBasedWarnings::Policy * WP,const Decl * D,QualType BlockType)2228 Sema::PopFunctionScopeInfo(const AnalysisBasedWarnings::Policy *WP,
2229 const Decl *D, QualType BlockType) {
2230 assert(!FunctionScopes.empty() && "mismatched push/pop!");
2231
2232 markEscapingByrefs(*FunctionScopes.back(), *this);
2233
2234 PoppedFunctionScopePtr Scope(FunctionScopes.pop_back_val(),
2235 PoppedFunctionScopeDeleter(this));
2236
2237 if (LangOpts.OpenMP)
2238 popOpenMPFunctionRegion(Scope.get());
2239
2240 // Issue any analysis-based warnings.
2241 if (WP && D)
2242 AnalysisWarnings.IssueWarnings(*WP, Scope.get(), D, BlockType);
2243 else
2244 for (const auto &PUD : Scope->PossiblyUnreachableDiags)
2245 Diag(PUD.Loc, PUD.PD);
2246
2247 return Scope;
2248 }
2249
2250 void Sema::PoppedFunctionScopeDeleter::
operator ()(sema::FunctionScopeInfo * Scope) const2251 operator()(sema::FunctionScopeInfo *Scope) const {
2252 // Stash the function scope for later reuse if it's for a normal function.
2253 if (Scope->isPlainFunction() && !Self->CachedFunctionScope)
2254 Self->CachedFunctionScope.reset(Scope);
2255 else
2256 delete Scope;
2257 }
2258
PushCompoundScope(bool IsStmtExpr)2259 void Sema::PushCompoundScope(bool IsStmtExpr) {
2260 getCurFunction()->CompoundScopes.push_back(
2261 CompoundScopeInfo(IsStmtExpr, getCurFPFeatures()));
2262 }
2263
PopCompoundScope()2264 void Sema::PopCompoundScope() {
2265 FunctionScopeInfo *CurFunction = getCurFunction();
2266 assert(!CurFunction->CompoundScopes.empty() && "mismatched push/pop");
2267
2268 CurFunction->CompoundScopes.pop_back();
2269 }
2270
2271 /// Determine whether any errors occurred within this function/method/
2272 /// block.
hasAnyUnrecoverableErrorsInThisFunction() const2273 bool Sema::hasAnyUnrecoverableErrorsInThisFunction() const {
2274 return getCurFunction()->hasUnrecoverableErrorOccurred();
2275 }
2276
setFunctionHasBranchIntoScope()2277 void Sema::setFunctionHasBranchIntoScope() {
2278 if (!FunctionScopes.empty())
2279 FunctionScopes.back()->setHasBranchIntoScope();
2280 }
2281
setFunctionHasBranchProtectedScope()2282 void Sema::setFunctionHasBranchProtectedScope() {
2283 if (!FunctionScopes.empty())
2284 FunctionScopes.back()->setHasBranchProtectedScope();
2285 }
2286
setFunctionHasIndirectGoto()2287 void Sema::setFunctionHasIndirectGoto() {
2288 if (!FunctionScopes.empty())
2289 FunctionScopes.back()->setHasIndirectGoto();
2290 }
2291
setFunctionHasMustTail()2292 void Sema::setFunctionHasMustTail() {
2293 if (!FunctionScopes.empty())
2294 FunctionScopes.back()->setHasMustTail();
2295 }
2296
getCurBlock()2297 BlockScopeInfo *Sema::getCurBlock() {
2298 if (FunctionScopes.empty())
2299 return nullptr;
2300
2301 auto CurBSI = dyn_cast<BlockScopeInfo>(FunctionScopes.back());
2302 if (CurBSI && CurBSI->TheDecl &&
2303 !CurBSI->TheDecl->Encloses(CurContext)) {
2304 // We have switched contexts due to template instantiation.
2305 assert(!CodeSynthesisContexts.empty());
2306 return nullptr;
2307 }
2308
2309 return CurBSI;
2310 }
2311
getEnclosingFunction() const2312 FunctionScopeInfo *Sema::getEnclosingFunction() const {
2313 if (FunctionScopes.empty())
2314 return nullptr;
2315
2316 for (int e = FunctionScopes.size() - 1; e >= 0; --e) {
2317 if (isa<sema::BlockScopeInfo>(FunctionScopes[e]))
2318 continue;
2319 return FunctionScopes[e];
2320 }
2321 return nullptr;
2322 }
2323
getEnclosingLambda() const2324 LambdaScopeInfo *Sema::getEnclosingLambda() const {
2325 for (auto *Scope : llvm::reverse(FunctionScopes)) {
2326 if (auto *LSI = dyn_cast<sema::LambdaScopeInfo>(Scope)) {
2327 if (LSI->Lambda && !LSI->Lambda->Encloses(CurContext)) {
2328 // We have switched contexts due to template instantiation.
2329 // FIXME: We should swap out the FunctionScopes during code synthesis
2330 // so that we don't need to check for this.
2331 assert(!CodeSynthesisContexts.empty());
2332 return nullptr;
2333 }
2334 return LSI;
2335 }
2336 }
2337 return nullptr;
2338 }
2339
getCurLambda(bool IgnoreNonLambdaCapturingScope)2340 LambdaScopeInfo *Sema::getCurLambda(bool IgnoreNonLambdaCapturingScope) {
2341 if (FunctionScopes.empty())
2342 return nullptr;
2343
2344 auto I = FunctionScopes.rbegin();
2345 if (IgnoreNonLambdaCapturingScope) {
2346 auto E = FunctionScopes.rend();
2347 while (I != E && isa<CapturingScopeInfo>(*I) && !isa<LambdaScopeInfo>(*I))
2348 ++I;
2349 if (I == E)
2350 return nullptr;
2351 }
2352 auto *CurLSI = dyn_cast<LambdaScopeInfo>(*I);
2353 if (CurLSI && CurLSI->Lambda &&
2354 !CurLSI->Lambda->Encloses(CurContext)) {
2355 // We have switched contexts due to template instantiation.
2356 assert(!CodeSynthesisContexts.empty());
2357 return nullptr;
2358 }
2359
2360 return CurLSI;
2361 }
2362
2363 // We have a generic lambda if we parsed auto parameters, or we have
2364 // an associated template parameter list.
getCurGenericLambda()2365 LambdaScopeInfo *Sema::getCurGenericLambda() {
2366 if (LambdaScopeInfo *LSI = getCurLambda()) {
2367 return (LSI->TemplateParams.size() ||
2368 LSI->GLTemplateParameterList) ? LSI : nullptr;
2369 }
2370 return nullptr;
2371 }
2372
2373
ActOnComment(SourceRange Comment)2374 void Sema::ActOnComment(SourceRange Comment) {
2375 if (!LangOpts.RetainCommentsFromSystemHeaders &&
2376 SourceMgr.isInSystemHeader(Comment.getBegin()))
2377 return;
2378 RawComment RC(SourceMgr, Comment, LangOpts.CommentOpts, false);
2379 if (RC.isAlmostTrailingComment()) {
2380 SourceRange MagicMarkerRange(Comment.getBegin(),
2381 Comment.getBegin().getLocWithOffset(3));
2382 StringRef MagicMarkerText;
2383 switch (RC.getKind()) {
2384 case RawComment::RCK_OrdinaryBCPL:
2385 MagicMarkerText = "///<";
2386 break;
2387 case RawComment::RCK_OrdinaryC:
2388 MagicMarkerText = "/**<";
2389 break;
2390 default:
2391 llvm_unreachable("if this is an almost Doxygen comment, "
2392 "it should be ordinary");
2393 }
2394 Diag(Comment.getBegin(), diag::warn_not_a_doxygen_trailing_member_comment) <<
2395 FixItHint::CreateReplacement(MagicMarkerRange, MagicMarkerText);
2396 }
2397 Context.addComment(RC);
2398 }
2399
2400 // Pin this vtable to this file.
~ExternalSemaSource()2401 ExternalSemaSource::~ExternalSemaSource() {}
2402 char ExternalSemaSource::ID;
2403
ReadMethodPool(Selector Sel)2404 void ExternalSemaSource::ReadMethodPool(Selector Sel) { }
updateOutOfDateSelector(Selector Sel)2405 void ExternalSemaSource::updateOutOfDateSelector(Selector Sel) { }
2406
ReadKnownNamespaces(SmallVectorImpl<NamespaceDecl * > & Namespaces)2407 void ExternalSemaSource::ReadKnownNamespaces(
2408 SmallVectorImpl<NamespaceDecl *> &Namespaces) {
2409 }
2410
ReadUndefinedButUsed(llvm::MapVector<NamedDecl *,SourceLocation> & Undefined)2411 void ExternalSemaSource::ReadUndefinedButUsed(
2412 llvm::MapVector<NamedDecl *, SourceLocation> &Undefined) {}
2413
ReadMismatchingDeleteExpressions(llvm::MapVector<FieldDecl *,llvm::SmallVector<std::pair<SourceLocation,bool>,4>> &)2414 void ExternalSemaSource::ReadMismatchingDeleteExpressions(llvm::MapVector<
2415 FieldDecl *, llvm::SmallVector<std::pair<SourceLocation, bool>, 4>> &) {}
2416
2417 /// Figure out if an expression could be turned into a call.
2418 ///
2419 /// Use this when trying to recover from an error where the programmer may have
2420 /// written just the name of a function instead of actually calling it.
2421 ///
2422 /// \param E - The expression to examine.
2423 /// \param ZeroArgCallReturnTy - If the expression can be turned into a call
2424 /// with no arguments, this parameter is set to the type returned by such a
2425 /// call; otherwise, it is set to an empty QualType.
2426 /// \param OverloadSet - If the expression is an overloaded function
2427 /// name, this parameter is populated with the decls of the various overloads.
tryExprAsCall(Expr & E,QualType & ZeroArgCallReturnTy,UnresolvedSetImpl & OverloadSet)2428 bool Sema::tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
2429 UnresolvedSetImpl &OverloadSet) {
2430 ZeroArgCallReturnTy = QualType();
2431 OverloadSet.clear();
2432
2433 const OverloadExpr *Overloads = nullptr;
2434 bool IsMemExpr = false;
2435 if (E.getType() == Context.OverloadTy) {
2436 OverloadExpr::FindResult FR = OverloadExpr::find(const_cast<Expr*>(&E));
2437
2438 // Ignore overloads that are pointer-to-member constants.
2439 if (FR.HasFormOfMemberPointer)
2440 return false;
2441
2442 Overloads = FR.Expression;
2443 } else if (E.getType() == Context.BoundMemberTy) {
2444 Overloads = dyn_cast<UnresolvedMemberExpr>(E.IgnoreParens());
2445 IsMemExpr = true;
2446 }
2447
2448 bool Ambiguous = false;
2449 bool IsMV = false;
2450
2451 if (Overloads) {
2452 for (OverloadExpr::decls_iterator it = Overloads->decls_begin(),
2453 DeclsEnd = Overloads->decls_end(); it != DeclsEnd; ++it) {
2454 OverloadSet.addDecl(*it);
2455
2456 // Check whether the function is a non-template, non-member which takes no
2457 // arguments.
2458 if (IsMemExpr)
2459 continue;
2460 if (const FunctionDecl *OverloadDecl
2461 = dyn_cast<FunctionDecl>((*it)->getUnderlyingDecl())) {
2462 if (OverloadDecl->getMinRequiredArguments() == 0) {
2463 if (!ZeroArgCallReturnTy.isNull() && !Ambiguous &&
2464 (!IsMV || !(OverloadDecl->isCPUDispatchMultiVersion() ||
2465 OverloadDecl->isCPUSpecificMultiVersion()))) {
2466 ZeroArgCallReturnTy = QualType();
2467 Ambiguous = true;
2468 } else {
2469 ZeroArgCallReturnTy = OverloadDecl->getReturnType();
2470 IsMV = OverloadDecl->isCPUDispatchMultiVersion() ||
2471 OverloadDecl->isCPUSpecificMultiVersion();
2472 }
2473 }
2474 }
2475 }
2476
2477 // If it's not a member, use better machinery to try to resolve the call
2478 if (!IsMemExpr)
2479 return !ZeroArgCallReturnTy.isNull();
2480 }
2481
2482 // Attempt to call the member with no arguments - this will correctly handle
2483 // member templates with defaults/deduction of template arguments, overloads
2484 // with default arguments, etc.
2485 if (IsMemExpr && !E.isTypeDependent()) {
2486 Sema::TentativeAnalysisScope Trap(*this);
2487 ExprResult R = BuildCallToMemberFunction(nullptr, &E, SourceLocation(),
2488 std::nullopt, SourceLocation());
2489 if (R.isUsable()) {
2490 ZeroArgCallReturnTy = R.get()->getType();
2491 return true;
2492 }
2493 return false;
2494 }
2495
2496 if (const DeclRefExpr *DeclRef = dyn_cast<DeclRefExpr>(E.IgnoreParens())) {
2497 if (const FunctionDecl *Fun = dyn_cast<FunctionDecl>(DeclRef->getDecl())) {
2498 if (Fun->getMinRequiredArguments() == 0)
2499 ZeroArgCallReturnTy = Fun->getReturnType();
2500 return true;
2501 }
2502 }
2503
2504 // We don't have an expression that's convenient to get a FunctionDecl from,
2505 // but we can at least check if the type is "function of 0 arguments".
2506 QualType ExprTy = E.getType();
2507 const FunctionType *FunTy = nullptr;
2508 QualType PointeeTy = ExprTy->getPointeeType();
2509 if (!PointeeTy.isNull())
2510 FunTy = PointeeTy->getAs<FunctionType>();
2511 if (!FunTy)
2512 FunTy = ExprTy->getAs<FunctionType>();
2513
2514 if (const FunctionProtoType *FPT =
2515 dyn_cast_or_null<FunctionProtoType>(FunTy)) {
2516 if (FPT->getNumParams() == 0)
2517 ZeroArgCallReturnTy = FunTy->getReturnType();
2518 return true;
2519 }
2520 return false;
2521 }
2522
2523 /// Give notes for a set of overloads.
2524 ///
2525 /// A companion to tryExprAsCall. In cases when the name that the programmer
2526 /// wrote was an overloaded function, we may be able to make some guesses about
2527 /// plausible overloads based on their return types; such guesses can be handed
2528 /// off to this method to be emitted as notes.
2529 ///
2530 /// \param Overloads - The overloads to note.
2531 /// \param FinalNoteLoc - If we've suppressed printing some overloads due to
2532 /// -fshow-overloads=best, this is the location to attach to the note about too
2533 /// many candidates. Typically this will be the location of the original
2534 /// ill-formed expression.
noteOverloads(Sema & S,const UnresolvedSetImpl & Overloads,const SourceLocation FinalNoteLoc)2535 static void noteOverloads(Sema &S, const UnresolvedSetImpl &Overloads,
2536 const SourceLocation FinalNoteLoc) {
2537 unsigned ShownOverloads = 0;
2538 unsigned SuppressedOverloads = 0;
2539 for (UnresolvedSetImpl::iterator It = Overloads.begin(),
2540 DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) {
2541 if (ShownOverloads >= S.Diags.getNumOverloadCandidatesToShow()) {
2542 ++SuppressedOverloads;
2543 continue;
2544 }
2545
2546 NamedDecl *Fn = (*It)->getUnderlyingDecl();
2547 // Don't print overloads for non-default multiversioned functions.
2548 if (const auto *FD = Fn->getAsFunction()) {
2549 if (FD->isMultiVersion() && FD->hasAttr<TargetAttr>() &&
2550 !FD->getAttr<TargetAttr>()->isDefaultVersion())
2551 continue;
2552 if (FD->isMultiVersion() && FD->hasAttr<TargetVersionAttr>() &&
2553 !FD->getAttr<TargetVersionAttr>()->isDefaultVersion())
2554 continue;
2555 }
2556 S.Diag(Fn->getLocation(), diag::note_possible_target_of_call);
2557 ++ShownOverloads;
2558 }
2559
2560 S.Diags.overloadCandidatesShown(ShownOverloads);
2561
2562 if (SuppressedOverloads)
2563 S.Diag(FinalNoteLoc, diag::note_ovl_too_many_candidates)
2564 << SuppressedOverloads;
2565 }
2566
notePlausibleOverloads(Sema & S,SourceLocation Loc,const UnresolvedSetImpl & Overloads,bool (* IsPlausibleResult)(QualType))2567 static void notePlausibleOverloads(Sema &S, SourceLocation Loc,
2568 const UnresolvedSetImpl &Overloads,
2569 bool (*IsPlausibleResult)(QualType)) {
2570 if (!IsPlausibleResult)
2571 return noteOverloads(S, Overloads, Loc);
2572
2573 UnresolvedSet<2> PlausibleOverloads;
2574 for (OverloadExpr::decls_iterator It = Overloads.begin(),
2575 DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) {
2576 const FunctionDecl *OverloadDecl = cast<FunctionDecl>(*It);
2577 QualType OverloadResultTy = OverloadDecl->getReturnType();
2578 if (IsPlausibleResult(OverloadResultTy))
2579 PlausibleOverloads.addDecl(It.getDecl());
2580 }
2581 noteOverloads(S, PlausibleOverloads, Loc);
2582 }
2583
2584 /// Determine whether the given expression can be called by just
2585 /// putting parentheses after it. Notably, expressions with unary
2586 /// operators can't be because the unary operator will start parsing
2587 /// outside the call.
IsCallableWithAppend(Expr * E)2588 static bool IsCallableWithAppend(Expr *E) {
2589 E = E->IgnoreImplicit();
2590 return (!isa<CStyleCastExpr>(E) &&
2591 !isa<UnaryOperator>(E) &&
2592 !isa<BinaryOperator>(E) &&
2593 !isa<CXXOperatorCallExpr>(E));
2594 }
2595
IsCPUDispatchCPUSpecificMultiVersion(const Expr * E)2596 static bool IsCPUDispatchCPUSpecificMultiVersion(const Expr *E) {
2597 if (const auto *UO = dyn_cast<UnaryOperator>(E))
2598 E = UO->getSubExpr();
2599
2600 if (const auto *ULE = dyn_cast<UnresolvedLookupExpr>(E)) {
2601 if (ULE->getNumDecls() == 0)
2602 return false;
2603
2604 const NamedDecl *ND = *ULE->decls_begin();
2605 if (const auto *FD = dyn_cast<FunctionDecl>(ND))
2606 return FD->isCPUDispatchMultiVersion() || FD->isCPUSpecificMultiVersion();
2607 }
2608 return false;
2609 }
2610
tryToRecoverWithCall(ExprResult & E,const PartialDiagnostic & PD,bool ForceComplain,bool (* IsPlausibleResult)(QualType))2611 bool Sema::tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
2612 bool ForceComplain,
2613 bool (*IsPlausibleResult)(QualType)) {
2614 SourceLocation Loc = E.get()->getExprLoc();
2615 SourceRange Range = E.get()->getSourceRange();
2616 UnresolvedSet<4> Overloads;
2617
2618 // If this is a SFINAE context, don't try anything that might trigger ADL
2619 // prematurely.
2620 if (!isSFINAEContext()) {
2621 QualType ZeroArgCallTy;
2622 if (tryExprAsCall(*E.get(), ZeroArgCallTy, Overloads) &&
2623 !ZeroArgCallTy.isNull() &&
2624 (!IsPlausibleResult || IsPlausibleResult(ZeroArgCallTy))) {
2625 // At this point, we know E is potentially callable with 0
2626 // arguments and that it returns something of a reasonable type,
2627 // so we can emit a fixit and carry on pretending that E was
2628 // actually a CallExpr.
2629 SourceLocation ParenInsertionLoc = getLocForEndOfToken(Range.getEnd());
2630 bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E.get());
2631 Diag(Loc, PD) << /*zero-arg*/ 1 << IsMV << Range
2632 << (IsCallableWithAppend(E.get())
2633 ? FixItHint::CreateInsertion(ParenInsertionLoc,
2634 "()")
2635 : FixItHint());
2636 if (!IsMV)
2637 notePlausibleOverloads(*this, Loc, Overloads, IsPlausibleResult);
2638
2639 // FIXME: Try this before emitting the fixit, and suppress diagnostics
2640 // while doing so.
2641 E = BuildCallExpr(nullptr, E.get(), Range.getEnd(), std::nullopt,
2642 Range.getEnd().getLocWithOffset(1));
2643 return true;
2644 }
2645 }
2646 if (!ForceComplain) return false;
2647
2648 bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E.get());
2649 Diag(Loc, PD) << /*not zero-arg*/ 0 << IsMV << Range;
2650 if (!IsMV)
2651 notePlausibleOverloads(*this, Loc, Overloads, IsPlausibleResult);
2652 E = ExprError();
2653 return true;
2654 }
2655
getSuperIdentifier() const2656 IdentifierInfo *Sema::getSuperIdentifier() const {
2657 if (!Ident_super)
2658 Ident_super = &Context.Idents.get("super");
2659 return Ident_super;
2660 }
2661
getFloat128Identifier() const2662 IdentifierInfo *Sema::getFloat128Identifier() const {
2663 if (!Ident___float128)
2664 Ident___float128 = &Context.Idents.get("__float128");
2665 return Ident___float128;
2666 }
2667
PushCapturedRegionScope(Scope * S,CapturedDecl * CD,RecordDecl * RD,CapturedRegionKind K,unsigned OpenMPCaptureLevel)2668 void Sema::PushCapturedRegionScope(Scope *S, CapturedDecl *CD, RecordDecl *RD,
2669 CapturedRegionKind K,
2670 unsigned OpenMPCaptureLevel) {
2671 auto *CSI = new CapturedRegionScopeInfo(
2672 getDiagnostics(), S, CD, RD, CD->getContextParam(), K,
2673 (getLangOpts().OpenMP && K == CR_OpenMP) ? getOpenMPNestingLevel() : 0,
2674 OpenMPCaptureLevel);
2675 CSI->ReturnType = Context.VoidTy;
2676 FunctionScopes.push_back(CSI);
2677 }
2678
getCurCapturedRegion()2679 CapturedRegionScopeInfo *Sema::getCurCapturedRegion() {
2680 if (FunctionScopes.empty())
2681 return nullptr;
2682
2683 return dyn_cast<CapturedRegionScopeInfo>(FunctionScopes.back());
2684 }
2685
2686 const llvm::MapVector<FieldDecl *, Sema::DeleteLocs> &
getMismatchingDeleteExpressions() const2687 Sema::getMismatchingDeleteExpressions() const {
2688 return DeleteExprs;
2689 }
2690
FPFeaturesStateRAII(Sema & S)2691 Sema::FPFeaturesStateRAII::FPFeaturesStateRAII(Sema &S)
2692 : S(S), OldFPFeaturesState(S.CurFPFeatures),
2693 OldOverrides(S.FpPragmaStack.CurrentValue),
2694 OldEvalMethod(S.PP.getCurrentFPEvalMethod()),
2695 OldFPPragmaLocation(S.PP.getLastFPEvalPragmaLocation()) {}
2696
~FPFeaturesStateRAII()2697 Sema::FPFeaturesStateRAII::~FPFeaturesStateRAII() {
2698 S.CurFPFeatures = OldFPFeaturesState;
2699 S.FpPragmaStack.CurrentValue = OldOverrides;
2700 S.PP.setCurrentFPEvalMethod(OldFPPragmaLocation, OldEvalMethod);
2701 }
2702
isDeclaratorFunctionLike(Declarator & D)2703 bool Sema::isDeclaratorFunctionLike(Declarator &D) {
2704 assert(D.getCXXScopeSpec().isSet() &&
2705 "can only be called for qualified names");
2706
2707 auto LR = LookupResult(*this, D.getIdentifier(), D.getBeginLoc(),
2708 LookupOrdinaryName, forRedeclarationInCurContext());
2709 DeclContext *DC = computeDeclContext(D.getCXXScopeSpec(),
2710 !D.getDeclSpec().isFriendSpecified());
2711 if (!DC)
2712 return false;
2713
2714 LookupQualifiedName(LR, DC);
2715 bool Result = std::all_of(LR.begin(), LR.end(), [](Decl *Dcl) {
2716 if (NamedDecl *ND = dyn_cast<NamedDecl>(Dcl)) {
2717 ND = ND->getUnderlyingDecl();
2718 return isa<FunctionDecl>(ND) || isa<FunctionTemplateDecl>(ND) ||
2719 isa<UsingDecl>(ND);
2720 }
2721 return false;
2722 });
2723 return Result;
2724 }
2725