1 //===--- SemaCUDA.cpp - Semantic Analysis for CUDA constructs -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements semantic analysis for CUDA constructs.
10 ///
11 //===----------------------------------------------------------------------===//
12
13 #include "clang/AST/ASTContext.h"
14 #include "clang/AST/Decl.h"
15 #include "clang/AST/ExprCXX.h"
16 #include "clang/Basic/Cuda.h"
17 #include "clang/Basic/TargetInfo.h"
18 #include "clang/Lex/Preprocessor.h"
19 #include "clang/Sema/Lookup.h"
20 #include "clang/Sema/ScopeInfo.h"
21 #include "clang/Sema/Sema.h"
22 #include "clang/Sema/SemaDiagnostic.h"
23 #include "clang/Sema/SemaInternal.h"
24 #include "clang/Sema/Template.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include <optional>
27 using namespace clang;
28
hasExplicitAttr(const VarDecl * D)29 template <typename AttrT> static bool hasExplicitAttr(const VarDecl *D) {
30 if (!D)
31 return false;
32 if (auto *A = D->getAttr<AttrT>())
33 return !A->isImplicit();
34 return false;
35 }
36
PushForceCUDAHostDevice()37 void Sema::PushForceCUDAHostDevice() {
38 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
39 ForceCUDAHostDeviceDepth++;
40 }
41
PopForceCUDAHostDevice()42 bool Sema::PopForceCUDAHostDevice() {
43 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
44 if (ForceCUDAHostDeviceDepth == 0)
45 return false;
46 ForceCUDAHostDeviceDepth--;
47 return true;
48 }
49
ActOnCUDAExecConfigExpr(Scope * S,SourceLocation LLLLoc,MultiExprArg ExecConfig,SourceLocation GGGLoc)50 ExprResult Sema::ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
51 MultiExprArg ExecConfig,
52 SourceLocation GGGLoc) {
53 FunctionDecl *ConfigDecl = Context.getcudaConfigureCallDecl();
54 if (!ConfigDecl)
55 return ExprError(Diag(LLLLoc, diag::err_undeclared_var_use)
56 << getCudaConfigureFuncName());
57 QualType ConfigQTy = ConfigDecl->getType();
58
59 DeclRefExpr *ConfigDR = new (Context)
60 DeclRefExpr(Context, ConfigDecl, false, ConfigQTy, VK_LValue, LLLLoc);
61 MarkFunctionReferenced(LLLLoc, ConfigDecl);
62
63 return BuildCallExpr(S, ConfigDR, LLLLoc, ExecConfig, GGGLoc, nullptr,
64 /*IsExecConfig=*/true);
65 }
66
67 Sema::CUDAFunctionTarget
IdentifyCUDATarget(const ParsedAttributesView & Attrs)68 Sema::IdentifyCUDATarget(const ParsedAttributesView &Attrs) {
69 bool HasHostAttr = false;
70 bool HasDeviceAttr = false;
71 bool HasGlobalAttr = false;
72 bool HasInvalidTargetAttr = false;
73 for (const ParsedAttr &AL : Attrs) {
74 switch (AL.getKind()) {
75 case ParsedAttr::AT_CUDAGlobal:
76 HasGlobalAttr = true;
77 break;
78 case ParsedAttr::AT_CUDAHost:
79 HasHostAttr = true;
80 break;
81 case ParsedAttr::AT_CUDADevice:
82 HasDeviceAttr = true;
83 break;
84 case ParsedAttr::AT_CUDAInvalidTarget:
85 HasInvalidTargetAttr = true;
86 break;
87 default:
88 break;
89 }
90 }
91
92 if (HasInvalidTargetAttr)
93 return CFT_InvalidTarget;
94
95 if (HasGlobalAttr)
96 return CFT_Global;
97
98 if (HasHostAttr && HasDeviceAttr)
99 return CFT_HostDevice;
100
101 if (HasDeviceAttr)
102 return CFT_Device;
103
104 return CFT_Host;
105 }
106
107 template <typename A>
hasAttr(const FunctionDecl * D,bool IgnoreImplicitAttr)108 static bool hasAttr(const FunctionDecl *D, bool IgnoreImplicitAttr) {
109 return D->hasAttrs() && llvm::any_of(D->getAttrs(), [&](Attr *Attribute) {
110 return isa<A>(Attribute) &&
111 !(IgnoreImplicitAttr && Attribute->isImplicit());
112 });
113 }
114
115 /// IdentifyCUDATarget - Determine the CUDA compilation target for this function
IdentifyCUDATarget(const FunctionDecl * D,bool IgnoreImplicitHDAttr)116 Sema::CUDAFunctionTarget Sema::IdentifyCUDATarget(const FunctionDecl *D,
117 bool IgnoreImplicitHDAttr) {
118 // Code that lives outside a function is run on the host.
119 if (D == nullptr)
120 return CFT_Host;
121
122 if (D->hasAttr<CUDAInvalidTargetAttr>())
123 return CFT_InvalidTarget;
124
125 if (D->hasAttr<CUDAGlobalAttr>())
126 return CFT_Global;
127
128 if (hasAttr<CUDADeviceAttr>(D, IgnoreImplicitHDAttr)) {
129 if (hasAttr<CUDAHostAttr>(D, IgnoreImplicitHDAttr))
130 return CFT_HostDevice;
131 return CFT_Device;
132 } else if (hasAttr<CUDAHostAttr>(D, IgnoreImplicitHDAttr)) {
133 return CFT_Host;
134 } else if ((D->isImplicit() || !D->isUserProvided()) &&
135 !IgnoreImplicitHDAttr) {
136 // Some implicit declarations (like intrinsic functions) are not marked.
137 // Set the most lenient target on them for maximal flexibility.
138 return CFT_HostDevice;
139 }
140
141 return CFT_Host;
142 }
143
144 /// IdentifyTarget - Determine the CUDA compilation target for this variable.
IdentifyCUDATarget(const VarDecl * Var)145 Sema::CUDAVariableTarget Sema::IdentifyCUDATarget(const VarDecl *Var) {
146 if (Var->hasAttr<HIPManagedAttr>())
147 return CVT_Unified;
148 // Only constexpr and const variabless with implicit constant attribute
149 // are emitted on both sides. Such variables are promoted to device side
150 // only if they have static constant intializers on device side.
151 if ((Var->isConstexpr() || Var->getType().isConstQualified()) &&
152 Var->hasAttr<CUDAConstantAttr>() &&
153 !hasExplicitAttr<CUDAConstantAttr>(Var))
154 return CVT_Both;
155 if (Var->hasAttr<CUDADeviceAttr>() || Var->hasAttr<CUDAConstantAttr>() ||
156 Var->hasAttr<CUDASharedAttr>() ||
157 Var->getType()->isCUDADeviceBuiltinSurfaceType() ||
158 Var->getType()->isCUDADeviceBuiltinTextureType())
159 return CVT_Device;
160 // Function-scope static variable without explicit device or constant
161 // attribute are emitted
162 // - on both sides in host device functions
163 // - on device side in device or global functions
164 if (auto *FD = dyn_cast<FunctionDecl>(Var->getDeclContext())) {
165 switch (IdentifyCUDATarget(FD)) {
166 case CFT_HostDevice:
167 return CVT_Both;
168 case CFT_Device:
169 case CFT_Global:
170 return CVT_Device;
171 default:
172 return CVT_Host;
173 }
174 }
175 return CVT_Host;
176 }
177
178 // * CUDA Call preference table
179 //
180 // F - from,
181 // T - to
182 // Ph - preference in host mode
183 // Pd - preference in device mode
184 // H - handled in (x)
185 // Preferences: N:native, SS:same side, HD:host-device, WS:wrong side, --:never.
186 //
187 // | F | T | Ph | Pd | H |
188 // |----+----+-----+-----+-----+
189 // | d | d | N | N | (c) |
190 // | d | g | -- | -- | (a) |
191 // | d | h | -- | -- | (e) |
192 // | d | hd | HD | HD | (b) |
193 // | g | d | N | N | (c) |
194 // | g | g | -- | -- | (a) |
195 // | g | h | -- | -- | (e) |
196 // | g | hd | HD | HD | (b) |
197 // | h | d | -- | -- | (e) |
198 // | h | g | N | N | (c) |
199 // | h | h | N | N | (c) |
200 // | h | hd | HD | HD | (b) |
201 // | hd | d | WS | SS | (d) |
202 // | hd | g | SS | -- |(d/a)|
203 // | hd | h | SS | WS | (d) |
204 // | hd | hd | HD | HD | (b) |
205
206 Sema::CUDAFunctionPreference
IdentifyCUDAPreference(const FunctionDecl * Caller,const FunctionDecl * Callee)207 Sema::IdentifyCUDAPreference(const FunctionDecl *Caller,
208 const FunctionDecl *Callee) {
209 assert(Callee && "Callee must be valid.");
210 CUDAFunctionTarget CallerTarget = IdentifyCUDATarget(Caller);
211 CUDAFunctionTarget CalleeTarget = IdentifyCUDATarget(Callee);
212
213 // If one of the targets is invalid, the check always fails, no matter what
214 // the other target is.
215 if (CallerTarget == CFT_InvalidTarget || CalleeTarget == CFT_InvalidTarget)
216 return CFP_Never;
217
218 // (a) Can't call global from some contexts until we support CUDA's
219 // dynamic parallelism.
220 if (CalleeTarget == CFT_Global &&
221 (CallerTarget == CFT_Global || CallerTarget == CFT_Device))
222 return CFP_Never;
223
224 // (b) Calling HostDevice is OK for everyone.
225 if (CalleeTarget == CFT_HostDevice)
226 return CFP_HostDevice;
227
228 // (c) Best case scenarios
229 if (CalleeTarget == CallerTarget ||
230 (CallerTarget == CFT_Host && CalleeTarget == CFT_Global) ||
231 (CallerTarget == CFT_Global && CalleeTarget == CFT_Device))
232 return CFP_Native;
233
234 // (d) HostDevice behavior depends on compilation mode.
235 if (CallerTarget == CFT_HostDevice) {
236 // It's OK to call a compilation-mode matching function from an HD one.
237 if ((getLangOpts().CUDAIsDevice && CalleeTarget == CFT_Device) ||
238 (!getLangOpts().CUDAIsDevice &&
239 (CalleeTarget == CFT_Host || CalleeTarget == CFT_Global)))
240 return CFP_SameSide;
241
242 // Calls from HD to non-mode-matching functions (i.e., to host functions
243 // when compiling in device mode or to device functions when compiling in
244 // host mode) are allowed at the sema level, but eventually rejected if
245 // they're ever codegened. TODO: Reject said calls earlier.
246 return CFP_WrongSide;
247 }
248
249 // (e) Calling across device/host boundary is not something you should do.
250 if ((CallerTarget == CFT_Host && CalleeTarget == CFT_Device) ||
251 (CallerTarget == CFT_Device && CalleeTarget == CFT_Host) ||
252 (CallerTarget == CFT_Global && CalleeTarget == CFT_Host))
253 return CFP_Never;
254
255 llvm_unreachable("All cases should've been handled by now.");
256 }
257
hasImplicitAttr(const FunctionDecl * D)258 template <typename AttrT> static bool hasImplicitAttr(const FunctionDecl *D) {
259 if (!D)
260 return false;
261 if (auto *A = D->getAttr<AttrT>())
262 return A->isImplicit();
263 return D->isImplicit();
264 }
265
isCUDAImplicitHostDeviceFunction(const FunctionDecl * D)266 bool Sema::isCUDAImplicitHostDeviceFunction(const FunctionDecl *D) {
267 bool IsImplicitDevAttr = hasImplicitAttr<CUDADeviceAttr>(D);
268 bool IsImplicitHostAttr = hasImplicitAttr<CUDAHostAttr>(D);
269 return IsImplicitDevAttr && IsImplicitHostAttr;
270 }
271
EraseUnwantedCUDAMatches(const FunctionDecl * Caller,SmallVectorImpl<std::pair<DeclAccessPair,FunctionDecl * >> & Matches)272 void Sema::EraseUnwantedCUDAMatches(
273 const FunctionDecl *Caller,
274 SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches) {
275 if (Matches.size() <= 1)
276 return;
277
278 using Pair = std::pair<DeclAccessPair, FunctionDecl*>;
279
280 // Gets the CUDA function preference for a call from Caller to Match.
281 auto GetCFP = [&](const Pair &Match) {
282 return IdentifyCUDAPreference(Caller, Match.second);
283 };
284
285 // Find the best call preference among the functions in Matches.
286 CUDAFunctionPreference BestCFP = GetCFP(*std::max_element(
287 Matches.begin(), Matches.end(),
288 [&](const Pair &M1, const Pair &M2) { return GetCFP(M1) < GetCFP(M2); }));
289
290 // Erase all functions with lower priority.
291 llvm::erase_if(Matches,
292 [&](const Pair &Match) { return GetCFP(Match) < BestCFP; });
293 }
294
295 /// When an implicitly-declared special member has to invoke more than one
296 /// base/field special member, conflicts may occur in the targets of these
297 /// members. For example, if one base's member __host__ and another's is
298 /// __device__, it's a conflict.
299 /// This function figures out if the given targets \param Target1 and
300 /// \param Target2 conflict, and if they do not it fills in
301 /// \param ResolvedTarget with a target that resolves for both calls.
302 /// \return true if there's a conflict, false otherwise.
303 static bool
resolveCalleeCUDATargetConflict(Sema::CUDAFunctionTarget Target1,Sema::CUDAFunctionTarget Target2,Sema::CUDAFunctionTarget * ResolvedTarget)304 resolveCalleeCUDATargetConflict(Sema::CUDAFunctionTarget Target1,
305 Sema::CUDAFunctionTarget Target2,
306 Sema::CUDAFunctionTarget *ResolvedTarget) {
307 // Only free functions and static member functions may be global.
308 assert(Target1 != Sema::CFT_Global);
309 assert(Target2 != Sema::CFT_Global);
310
311 if (Target1 == Sema::CFT_HostDevice) {
312 *ResolvedTarget = Target2;
313 } else if (Target2 == Sema::CFT_HostDevice) {
314 *ResolvedTarget = Target1;
315 } else if (Target1 != Target2) {
316 return true;
317 } else {
318 *ResolvedTarget = Target1;
319 }
320
321 return false;
322 }
323
inferCUDATargetForImplicitSpecialMember(CXXRecordDecl * ClassDecl,CXXSpecialMember CSM,CXXMethodDecl * MemberDecl,bool ConstRHS,bool Diagnose)324 bool Sema::inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
325 CXXSpecialMember CSM,
326 CXXMethodDecl *MemberDecl,
327 bool ConstRHS,
328 bool Diagnose) {
329 // If the defaulted special member is defined lexically outside of its
330 // owning class, or the special member already has explicit device or host
331 // attributes, do not infer.
332 bool InClass = MemberDecl->getLexicalParent() == MemberDecl->getParent();
333 bool HasH = MemberDecl->hasAttr<CUDAHostAttr>();
334 bool HasD = MemberDecl->hasAttr<CUDADeviceAttr>();
335 bool HasExplicitAttr =
336 (HasD && !MemberDecl->getAttr<CUDADeviceAttr>()->isImplicit()) ||
337 (HasH && !MemberDecl->getAttr<CUDAHostAttr>()->isImplicit());
338 if (!InClass || HasExplicitAttr)
339 return false;
340
341 std::optional<CUDAFunctionTarget> InferredTarget;
342
343 // We're going to invoke special member lookup; mark that these special
344 // members are called from this one, and not from its caller.
345 ContextRAII MethodContext(*this, MemberDecl);
346
347 // Look for special members in base classes that should be invoked from here.
348 // Infer the target of this member base on the ones it should call.
349 // Skip direct and indirect virtual bases for abstract classes.
350 llvm::SmallVector<const CXXBaseSpecifier *, 16> Bases;
351 for (const auto &B : ClassDecl->bases()) {
352 if (!B.isVirtual()) {
353 Bases.push_back(&B);
354 }
355 }
356
357 if (!ClassDecl->isAbstract()) {
358 llvm::append_range(Bases, llvm::make_pointer_range(ClassDecl->vbases()));
359 }
360
361 for (const auto *B : Bases) {
362 const RecordType *BaseType = B->getType()->getAs<RecordType>();
363 if (!BaseType) {
364 continue;
365 }
366
367 CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl());
368 Sema::SpecialMemberOverloadResult SMOR =
369 LookupSpecialMember(BaseClassDecl, CSM,
370 /* ConstArg */ ConstRHS,
371 /* VolatileArg */ false,
372 /* RValueThis */ false,
373 /* ConstThis */ false,
374 /* VolatileThis */ false);
375
376 if (!SMOR.getMethod())
377 continue;
378
379 CUDAFunctionTarget BaseMethodTarget = IdentifyCUDATarget(SMOR.getMethod());
380 if (!InferredTarget) {
381 InferredTarget = BaseMethodTarget;
382 } else {
383 bool ResolutionError = resolveCalleeCUDATargetConflict(
384 *InferredTarget, BaseMethodTarget, &*InferredTarget);
385 if (ResolutionError) {
386 if (Diagnose) {
387 Diag(ClassDecl->getLocation(),
388 diag::note_implicit_member_target_infer_collision)
389 << (unsigned)CSM << *InferredTarget << BaseMethodTarget;
390 }
391 MemberDecl->addAttr(CUDAInvalidTargetAttr::CreateImplicit(Context));
392 return true;
393 }
394 }
395 }
396
397 // Same as for bases, but now for special members of fields.
398 for (const auto *F : ClassDecl->fields()) {
399 if (F->isInvalidDecl()) {
400 continue;
401 }
402
403 const RecordType *FieldType =
404 Context.getBaseElementType(F->getType())->getAs<RecordType>();
405 if (!FieldType) {
406 continue;
407 }
408
409 CXXRecordDecl *FieldRecDecl = cast<CXXRecordDecl>(FieldType->getDecl());
410 Sema::SpecialMemberOverloadResult SMOR =
411 LookupSpecialMember(FieldRecDecl, CSM,
412 /* ConstArg */ ConstRHS && !F->isMutable(),
413 /* VolatileArg */ false,
414 /* RValueThis */ false,
415 /* ConstThis */ false,
416 /* VolatileThis */ false);
417
418 if (!SMOR.getMethod())
419 continue;
420
421 CUDAFunctionTarget FieldMethodTarget =
422 IdentifyCUDATarget(SMOR.getMethod());
423 if (!InferredTarget) {
424 InferredTarget = FieldMethodTarget;
425 } else {
426 bool ResolutionError = resolveCalleeCUDATargetConflict(
427 *InferredTarget, FieldMethodTarget, &*InferredTarget);
428 if (ResolutionError) {
429 if (Diagnose) {
430 Diag(ClassDecl->getLocation(),
431 diag::note_implicit_member_target_infer_collision)
432 << (unsigned)CSM << *InferredTarget << FieldMethodTarget;
433 }
434 MemberDecl->addAttr(CUDAInvalidTargetAttr::CreateImplicit(Context));
435 return true;
436 }
437 }
438 }
439
440
441 // If no target was inferred, mark this member as __host__ __device__;
442 // it's the least restrictive option that can be invoked from any target.
443 bool NeedsH = true, NeedsD = true;
444 if (InferredTarget) {
445 if (*InferredTarget == CFT_Device)
446 NeedsH = false;
447 else if (*InferredTarget == CFT_Host)
448 NeedsD = false;
449 }
450
451 // We either setting attributes first time, or the inferred ones must match
452 // previously set ones.
453 if (NeedsD && !HasD)
454 MemberDecl->addAttr(CUDADeviceAttr::CreateImplicit(Context));
455 if (NeedsH && !HasH)
456 MemberDecl->addAttr(CUDAHostAttr::CreateImplicit(Context));
457
458 return false;
459 }
460
isEmptyCudaConstructor(SourceLocation Loc,CXXConstructorDecl * CD)461 bool Sema::isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD) {
462 if (!CD->isDefined() && CD->isTemplateInstantiation())
463 InstantiateFunctionDefinition(Loc, CD->getFirstDecl());
464
465 // (E.2.3.1, CUDA 7.5) A constructor for a class type is considered
466 // empty at a point in the translation unit, if it is either a
467 // trivial constructor
468 if (CD->isTrivial())
469 return true;
470
471 // ... or it satisfies all of the following conditions:
472 // The constructor function has been defined.
473 // The constructor function has no parameters,
474 // and the function body is an empty compound statement.
475 if (!(CD->hasTrivialBody() && CD->getNumParams() == 0))
476 return false;
477
478 // Its class has no virtual functions and no virtual base classes.
479 if (CD->getParent()->isDynamicClass())
480 return false;
481
482 // Union ctor does not call ctors of its data members.
483 if (CD->getParent()->isUnion())
484 return true;
485
486 // The only form of initializer allowed is an empty constructor.
487 // This will recursively check all base classes and member initializers
488 if (!llvm::all_of(CD->inits(), [&](const CXXCtorInitializer *CI) {
489 if (const CXXConstructExpr *CE =
490 dyn_cast<CXXConstructExpr>(CI->getInit()))
491 return isEmptyCudaConstructor(Loc, CE->getConstructor());
492 return false;
493 }))
494 return false;
495
496 return true;
497 }
498
isEmptyCudaDestructor(SourceLocation Loc,CXXDestructorDecl * DD)499 bool Sema::isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *DD) {
500 // No destructor -> no problem.
501 if (!DD)
502 return true;
503
504 if (!DD->isDefined() && DD->isTemplateInstantiation())
505 InstantiateFunctionDefinition(Loc, DD->getFirstDecl());
506
507 // (E.2.3.1, CUDA 7.5) A destructor for a class type is considered
508 // empty at a point in the translation unit, if it is either a
509 // trivial constructor
510 if (DD->isTrivial())
511 return true;
512
513 // ... or it satisfies all of the following conditions:
514 // The destructor function has been defined.
515 // and the function body is an empty compound statement.
516 if (!DD->hasTrivialBody())
517 return false;
518
519 const CXXRecordDecl *ClassDecl = DD->getParent();
520
521 // Its class has no virtual functions and no virtual base classes.
522 if (ClassDecl->isDynamicClass())
523 return false;
524
525 // Union does not have base class and union dtor does not call dtors of its
526 // data members.
527 if (DD->getParent()->isUnion())
528 return true;
529
530 // Only empty destructors are allowed. This will recursively check
531 // destructors for all base classes...
532 if (!llvm::all_of(ClassDecl->bases(), [&](const CXXBaseSpecifier &BS) {
533 if (CXXRecordDecl *RD = BS.getType()->getAsCXXRecordDecl())
534 return isEmptyCudaDestructor(Loc, RD->getDestructor());
535 return true;
536 }))
537 return false;
538
539 // ... and member fields.
540 if (!llvm::all_of(ClassDecl->fields(), [&](const FieldDecl *Field) {
541 if (CXXRecordDecl *RD = Field->getType()
542 ->getBaseElementTypeUnsafe()
543 ->getAsCXXRecordDecl())
544 return isEmptyCudaDestructor(Loc, RD->getDestructor());
545 return true;
546 }))
547 return false;
548
549 return true;
550 }
551
552 namespace {
553 enum CUDAInitializerCheckKind {
554 CICK_DeviceOrConstant, // Check initializer for device/constant variable
555 CICK_Shared, // Check initializer for shared variable
556 };
557
IsDependentVar(VarDecl * VD)558 bool IsDependentVar(VarDecl *VD) {
559 if (VD->getType()->isDependentType())
560 return true;
561 if (const auto *Init = VD->getInit())
562 return Init->isValueDependent();
563 return false;
564 }
565
566 // Check whether a variable has an allowed initializer for a CUDA device side
567 // variable with global storage. \p VD may be a host variable to be checked for
568 // potential promotion to device side variable.
569 //
570 // CUDA/HIP allows only empty constructors as initializers for global
571 // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
572 // __shared__ variables whether they are local or not (they all are implicitly
573 // static in CUDA). One exception is that CUDA allows constant initializers
574 // for __constant__ and __device__ variables.
HasAllowedCUDADeviceStaticInitializer(Sema & S,VarDecl * VD,CUDAInitializerCheckKind CheckKind)575 bool HasAllowedCUDADeviceStaticInitializer(Sema &S, VarDecl *VD,
576 CUDAInitializerCheckKind CheckKind) {
577 assert(!VD->isInvalidDecl() && VD->hasGlobalStorage());
578 assert(!IsDependentVar(VD) && "do not check dependent var");
579 const Expr *Init = VD->getInit();
580 auto IsEmptyInit = [&](const Expr *Init) {
581 if (!Init)
582 return true;
583 if (const auto *CE = dyn_cast<CXXConstructExpr>(Init)) {
584 return S.isEmptyCudaConstructor(VD->getLocation(), CE->getConstructor());
585 }
586 return false;
587 };
588 auto IsConstantInit = [&](const Expr *Init) {
589 assert(Init);
590 ASTContext::CUDAConstantEvalContextRAII EvalCtx(S.Context,
591 /*NoWronSidedVars=*/true);
592 return Init->isConstantInitializer(S.Context,
593 VD->getType()->isReferenceType());
594 };
595 auto HasEmptyDtor = [&](VarDecl *VD) {
596 if (const auto *RD = VD->getType()->getAsCXXRecordDecl())
597 return S.isEmptyCudaDestructor(VD->getLocation(), RD->getDestructor());
598 return true;
599 };
600 if (CheckKind == CICK_Shared)
601 return IsEmptyInit(Init) && HasEmptyDtor(VD);
602 return S.LangOpts.GPUAllowDeviceInit ||
603 ((IsEmptyInit(Init) || IsConstantInit(Init)) && HasEmptyDtor(VD));
604 }
605 } // namespace
606
checkAllowedCUDAInitializer(VarDecl * VD)607 void Sema::checkAllowedCUDAInitializer(VarDecl *VD) {
608 // Do not check dependent variables since the ctor/dtor/initializer are not
609 // determined. Do it after instantiation.
610 if (VD->isInvalidDecl() || !VD->hasInit() || !VD->hasGlobalStorage() ||
611 IsDependentVar(VD))
612 return;
613 const Expr *Init = VD->getInit();
614 bool IsSharedVar = VD->hasAttr<CUDASharedAttr>();
615 bool IsDeviceOrConstantVar =
616 !IsSharedVar &&
617 (VD->hasAttr<CUDADeviceAttr>() || VD->hasAttr<CUDAConstantAttr>());
618 if (IsDeviceOrConstantVar || IsSharedVar) {
619 if (HasAllowedCUDADeviceStaticInitializer(
620 *this, VD, IsSharedVar ? CICK_Shared : CICK_DeviceOrConstant))
621 return;
622 Diag(VD->getLocation(),
623 IsSharedVar ? diag::err_shared_var_init : diag::err_dynamic_var_init)
624 << Init->getSourceRange();
625 VD->setInvalidDecl();
626 } else {
627 // This is a host-side global variable. Check that the initializer is
628 // callable from the host side.
629 const FunctionDecl *InitFn = nullptr;
630 if (const CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(Init)) {
631 InitFn = CE->getConstructor();
632 } else if (const CallExpr *CE = dyn_cast<CallExpr>(Init)) {
633 InitFn = CE->getDirectCallee();
634 }
635 if (InitFn) {
636 CUDAFunctionTarget InitFnTarget = IdentifyCUDATarget(InitFn);
637 if (InitFnTarget != CFT_Host && InitFnTarget != CFT_HostDevice) {
638 Diag(VD->getLocation(), diag::err_ref_bad_target_global_initializer)
639 << InitFnTarget << InitFn;
640 Diag(InitFn->getLocation(), diag::note_previous_decl) << InitFn;
641 VD->setInvalidDecl();
642 }
643 }
644 }
645 }
646
647 // With -fcuda-host-device-constexpr, an unattributed constexpr function is
648 // treated as implicitly __host__ __device__, unless:
649 // * it is a variadic function (device-side variadic functions are not
650 // allowed), or
651 // * a __device__ function with this signature was already declared, in which
652 // case in which case we output an error, unless the __device__ decl is in a
653 // system header, in which case we leave the constexpr function unattributed.
654 //
655 // In addition, all function decls are treated as __host__ __device__ when
656 // ForceCUDAHostDeviceDepth > 0 (corresponding to code within a
657 // #pragma clang force_cuda_host_device_begin/end
658 // pair).
maybeAddCUDAHostDeviceAttrs(FunctionDecl * NewD,const LookupResult & Previous)659 void Sema::maybeAddCUDAHostDeviceAttrs(FunctionDecl *NewD,
660 const LookupResult &Previous) {
661 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
662
663 if (ForceCUDAHostDeviceDepth > 0) {
664 if (!NewD->hasAttr<CUDAHostAttr>())
665 NewD->addAttr(CUDAHostAttr::CreateImplicit(Context));
666 if (!NewD->hasAttr<CUDADeviceAttr>())
667 NewD->addAttr(CUDADeviceAttr::CreateImplicit(Context));
668 return;
669 }
670
671 if (!getLangOpts().CUDAHostDeviceConstexpr || !NewD->isConstexpr() ||
672 NewD->isVariadic() || NewD->hasAttr<CUDAHostAttr>() ||
673 NewD->hasAttr<CUDADeviceAttr>() || NewD->hasAttr<CUDAGlobalAttr>())
674 return;
675
676 // Is D a __device__ function with the same signature as NewD, ignoring CUDA
677 // attributes?
678 auto IsMatchingDeviceFn = [&](NamedDecl *D) {
679 if (UsingShadowDecl *Using = dyn_cast<UsingShadowDecl>(D))
680 D = Using->getTargetDecl();
681 FunctionDecl *OldD = D->getAsFunction();
682 return OldD && OldD->hasAttr<CUDADeviceAttr>() &&
683 !OldD->hasAttr<CUDAHostAttr>() &&
684 !IsOverload(NewD, OldD, /* UseMemberUsingDeclRules = */ false,
685 /* ConsiderCudaAttrs = */ false);
686 };
687 auto It = llvm::find_if(Previous, IsMatchingDeviceFn);
688 if (It != Previous.end()) {
689 // We found a __device__ function with the same name and signature as NewD
690 // (ignoring CUDA attrs). This is an error unless that function is defined
691 // in a system header, in which case we simply return without making NewD
692 // host+device.
693 NamedDecl *Match = *It;
694 if (!getSourceManager().isInSystemHeader(Match->getLocation())) {
695 Diag(NewD->getLocation(),
696 diag::err_cuda_unattributed_constexpr_cannot_overload_device)
697 << NewD;
698 Diag(Match->getLocation(),
699 diag::note_cuda_conflicting_device_function_declared_here);
700 }
701 return;
702 }
703
704 NewD->addAttr(CUDAHostAttr::CreateImplicit(Context));
705 NewD->addAttr(CUDADeviceAttr::CreateImplicit(Context));
706 }
707
708 // TODO: `__constant__` memory may be a limited resource for certain targets.
709 // A safeguard may be needed at the end of compilation pipeline if
710 // `__constant__` memory usage goes beyond limit.
MaybeAddCUDAConstantAttr(VarDecl * VD)711 void Sema::MaybeAddCUDAConstantAttr(VarDecl *VD) {
712 // Do not promote dependent variables since the cotr/dtor/initializer are
713 // not determined. Do it after instantiation.
714 if (getLangOpts().CUDAIsDevice && !VD->hasAttr<CUDAConstantAttr>() &&
715 !VD->hasAttr<CUDASharedAttr>() &&
716 (VD->isFileVarDecl() || VD->isStaticDataMember()) &&
717 !IsDependentVar(VD) &&
718 ((VD->isConstexpr() || VD->getType().isConstQualified()) &&
719 HasAllowedCUDADeviceStaticInitializer(*this, VD,
720 CICK_DeviceOrConstant))) {
721 VD->addAttr(CUDAConstantAttr::CreateImplicit(getASTContext()));
722 }
723 }
724
CUDADiagIfDeviceCode(SourceLocation Loc,unsigned DiagID)725 Sema::SemaDiagnosticBuilder Sema::CUDADiagIfDeviceCode(SourceLocation Loc,
726 unsigned DiagID) {
727 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
728 FunctionDecl *CurFunContext = getCurFunctionDecl(/*AllowLambda=*/true);
729 SemaDiagnosticBuilder::Kind DiagKind = [&] {
730 if (!CurFunContext)
731 return SemaDiagnosticBuilder::K_Nop;
732 switch (CurrentCUDATarget()) {
733 case CFT_Global:
734 case CFT_Device:
735 return SemaDiagnosticBuilder::K_Immediate;
736 case CFT_HostDevice:
737 // An HD function counts as host code if we're compiling for host, and
738 // device code if we're compiling for device. Defer any errors in device
739 // mode until the function is known-emitted.
740 if (!getLangOpts().CUDAIsDevice)
741 return SemaDiagnosticBuilder::K_Nop;
742 if (IsLastErrorImmediate && Diags.getDiagnosticIDs()->isBuiltinNote(DiagID))
743 return SemaDiagnosticBuilder::K_Immediate;
744 return (getEmissionStatus(CurFunContext) ==
745 FunctionEmissionStatus::Emitted)
746 ? SemaDiagnosticBuilder::K_ImmediateWithCallStack
747 : SemaDiagnosticBuilder::K_Deferred;
748 default:
749 return SemaDiagnosticBuilder::K_Nop;
750 }
751 }();
752 return SemaDiagnosticBuilder(DiagKind, Loc, DiagID, CurFunContext, *this);
753 }
754
CUDADiagIfHostCode(SourceLocation Loc,unsigned DiagID)755 Sema::SemaDiagnosticBuilder Sema::CUDADiagIfHostCode(SourceLocation Loc,
756 unsigned DiagID) {
757 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
758 FunctionDecl *CurFunContext = getCurFunctionDecl(/*AllowLambda=*/true);
759 SemaDiagnosticBuilder::Kind DiagKind = [&] {
760 if (!CurFunContext)
761 return SemaDiagnosticBuilder::K_Nop;
762 switch (CurrentCUDATarget()) {
763 case CFT_Host:
764 return SemaDiagnosticBuilder::K_Immediate;
765 case CFT_HostDevice:
766 // An HD function counts as host code if we're compiling for host, and
767 // device code if we're compiling for device. Defer any errors in device
768 // mode until the function is known-emitted.
769 if (getLangOpts().CUDAIsDevice)
770 return SemaDiagnosticBuilder::K_Nop;
771 if (IsLastErrorImmediate && Diags.getDiagnosticIDs()->isBuiltinNote(DiagID))
772 return SemaDiagnosticBuilder::K_Immediate;
773 return (getEmissionStatus(CurFunContext) ==
774 FunctionEmissionStatus::Emitted)
775 ? SemaDiagnosticBuilder::K_ImmediateWithCallStack
776 : SemaDiagnosticBuilder::K_Deferred;
777 default:
778 return SemaDiagnosticBuilder::K_Nop;
779 }
780 }();
781 return SemaDiagnosticBuilder(DiagKind, Loc, DiagID, CurFunContext, *this);
782 }
783
CheckCUDACall(SourceLocation Loc,FunctionDecl * Callee)784 bool Sema::CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee) {
785 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
786 assert(Callee && "Callee may not be null.");
787
788 auto &ExprEvalCtx = ExprEvalContexts.back();
789 if (ExprEvalCtx.isUnevaluated() || ExprEvalCtx.isConstantEvaluated())
790 return true;
791
792 // FIXME: Is bailing out early correct here? Should we instead assume that
793 // the caller is a global initializer?
794 FunctionDecl *Caller = getCurFunctionDecl(/*AllowLambda=*/true);
795 if (!Caller)
796 return true;
797
798 // If the caller is known-emitted, mark the callee as known-emitted.
799 // Otherwise, mark the call in our call graph so we can traverse it later.
800 bool CallerKnownEmitted =
801 getEmissionStatus(Caller) == FunctionEmissionStatus::Emitted;
802 SemaDiagnosticBuilder::Kind DiagKind = [this, Caller, Callee,
803 CallerKnownEmitted] {
804 switch (IdentifyCUDAPreference(Caller, Callee)) {
805 case CFP_Never:
806 case CFP_WrongSide:
807 assert(Caller && "Never/wrongSide calls require a non-null caller");
808 // If we know the caller will be emitted, we know this wrong-side call
809 // will be emitted, so it's an immediate error. Otherwise, defer the
810 // error until we know the caller is emitted.
811 return CallerKnownEmitted
812 ? SemaDiagnosticBuilder::K_ImmediateWithCallStack
813 : SemaDiagnosticBuilder::K_Deferred;
814 default:
815 return SemaDiagnosticBuilder::K_Nop;
816 }
817 }();
818
819 if (DiagKind == SemaDiagnosticBuilder::K_Nop) {
820 // For -fgpu-rdc, keep track of external kernels used by host functions.
821 if (LangOpts.CUDAIsDevice && LangOpts.GPURelocatableDeviceCode &&
822 Callee->hasAttr<CUDAGlobalAttr>() && !Callee->isDefined())
823 getASTContext().CUDAExternalDeviceDeclODRUsedByHost.insert(Callee);
824 return true;
825 }
826
827 // Avoid emitting this error twice for the same location. Using a hashtable
828 // like this is unfortunate, but because we must continue parsing as normal
829 // after encountering a deferred error, it's otherwise very tricky for us to
830 // ensure that we only emit this deferred error once.
831 if (!LocsWithCUDACallDiags.insert({Caller, Loc}).second)
832 return true;
833
834 SemaDiagnosticBuilder(DiagKind, Loc, diag::err_ref_bad_target, Caller, *this)
835 << IdentifyCUDATarget(Callee) << /*function*/ 0 << Callee
836 << IdentifyCUDATarget(Caller);
837 if (!Callee->getBuiltinID())
838 SemaDiagnosticBuilder(DiagKind, Callee->getLocation(),
839 diag::note_previous_decl, Caller, *this)
840 << Callee;
841 return DiagKind != SemaDiagnosticBuilder::K_Immediate &&
842 DiagKind != SemaDiagnosticBuilder::K_ImmediateWithCallStack;
843 }
844
845 // Check the wrong-sided reference capture of lambda for CUDA/HIP.
846 // A lambda function may capture a stack variable by reference when it is
847 // defined and uses the capture by reference when the lambda is called. When
848 // the capture and use happen on different sides, the capture is invalid and
849 // should be diagnosed.
CUDACheckLambdaCapture(CXXMethodDecl * Callee,const sema::Capture & Capture)850 void Sema::CUDACheckLambdaCapture(CXXMethodDecl *Callee,
851 const sema::Capture &Capture) {
852 // In host compilation we only need to check lambda functions emitted on host
853 // side. In such lambda functions, a reference capture is invalid only
854 // if the lambda structure is populated by a device function or kernel then
855 // is passed to and called by a host function. However that is impossible,
856 // since a device function or kernel can only call a device function, also a
857 // kernel cannot pass a lambda back to a host function since we cannot
858 // define a kernel argument type which can hold the lambda before the lambda
859 // itself is defined.
860 if (!LangOpts.CUDAIsDevice)
861 return;
862
863 // File-scope lambda can only do init captures for global variables, which
864 // results in passing by value for these global variables.
865 FunctionDecl *Caller = getCurFunctionDecl(/*AllowLambda=*/true);
866 if (!Caller)
867 return;
868
869 // In device compilation, we only need to check lambda functions which are
870 // emitted on device side. For such lambdas, a reference capture is invalid
871 // only if the lambda structure is populated by a host function then passed
872 // to and called in a device function or kernel.
873 bool CalleeIsDevice = Callee->hasAttr<CUDADeviceAttr>();
874 bool CallerIsHost =
875 !Caller->hasAttr<CUDAGlobalAttr>() && !Caller->hasAttr<CUDADeviceAttr>();
876 bool ShouldCheck = CalleeIsDevice && CallerIsHost;
877 if (!ShouldCheck || !Capture.isReferenceCapture())
878 return;
879 auto DiagKind = SemaDiagnosticBuilder::K_Deferred;
880 if (Capture.isVariableCapture()) {
881 SemaDiagnosticBuilder(DiagKind, Capture.getLocation(),
882 diag::err_capture_bad_target, Callee, *this)
883 << Capture.getVariable();
884 } else if (Capture.isThisCapture()) {
885 // Capture of this pointer is allowed since this pointer may be pointing to
886 // managed memory which is accessible on both device and host sides. It only
887 // results in invalid memory access if this pointer points to memory not
888 // accessible on device side.
889 SemaDiagnosticBuilder(DiagKind, Capture.getLocation(),
890 diag::warn_maybe_capture_bad_target_this_ptr, Callee,
891 *this);
892 }
893 }
894
CUDASetLambdaAttrs(CXXMethodDecl * Method)895 void Sema::CUDASetLambdaAttrs(CXXMethodDecl *Method) {
896 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
897 if (Method->hasAttr<CUDAHostAttr>() || Method->hasAttr<CUDADeviceAttr>())
898 return;
899 Method->addAttr(CUDADeviceAttr::CreateImplicit(Context));
900 Method->addAttr(CUDAHostAttr::CreateImplicit(Context));
901 }
902
checkCUDATargetOverload(FunctionDecl * NewFD,const LookupResult & Previous)903 void Sema::checkCUDATargetOverload(FunctionDecl *NewFD,
904 const LookupResult &Previous) {
905 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
906 CUDAFunctionTarget NewTarget = IdentifyCUDATarget(NewFD);
907 for (NamedDecl *OldND : Previous) {
908 FunctionDecl *OldFD = OldND->getAsFunction();
909 if (!OldFD)
910 continue;
911
912 CUDAFunctionTarget OldTarget = IdentifyCUDATarget(OldFD);
913 // Don't allow HD and global functions to overload other functions with the
914 // same signature. We allow overloading based on CUDA attributes so that
915 // functions can have different implementations on the host and device, but
916 // HD/global functions "exist" in some sense on both the host and device, so
917 // should have the same implementation on both sides.
918 if (NewTarget != OldTarget &&
919 ((NewTarget == CFT_HostDevice) || (OldTarget == CFT_HostDevice) ||
920 (NewTarget == CFT_Global) || (OldTarget == CFT_Global)) &&
921 !IsOverload(NewFD, OldFD, /* UseMemberUsingDeclRules = */ false,
922 /* ConsiderCudaAttrs = */ false)) {
923 Diag(NewFD->getLocation(), diag::err_cuda_ovl_target)
924 << NewTarget << NewFD->getDeclName() << OldTarget << OldFD;
925 Diag(OldFD->getLocation(), diag::note_previous_declaration);
926 NewFD->setInvalidDecl();
927 break;
928 }
929 }
930 }
931
932 template <typename AttrTy>
copyAttrIfPresent(Sema & S,FunctionDecl * FD,const FunctionDecl & TemplateFD)933 static void copyAttrIfPresent(Sema &S, FunctionDecl *FD,
934 const FunctionDecl &TemplateFD) {
935 if (AttrTy *Attribute = TemplateFD.getAttr<AttrTy>()) {
936 AttrTy *Clone = Attribute->clone(S.Context);
937 Clone->setInherited(true);
938 FD->addAttr(Clone);
939 }
940 }
941
inheritCUDATargetAttrs(FunctionDecl * FD,const FunctionTemplateDecl & TD)942 void Sema::inheritCUDATargetAttrs(FunctionDecl *FD,
943 const FunctionTemplateDecl &TD) {
944 const FunctionDecl &TemplateFD = *TD.getTemplatedDecl();
945 copyAttrIfPresent<CUDAGlobalAttr>(*this, FD, TemplateFD);
946 copyAttrIfPresent<CUDAHostAttr>(*this, FD, TemplateFD);
947 copyAttrIfPresent<CUDADeviceAttr>(*this, FD, TemplateFD);
948 }
949
getCudaConfigureFuncName() const950 std::string Sema::getCudaConfigureFuncName() const {
951 if (getLangOpts().HIP)
952 return getLangOpts().HIPUseNewLaunchAPI ? "__hipPushCallConfiguration"
953 : "hipConfigureCall";
954
955 // New CUDA kernel launch sequence.
956 if (CudaFeatureEnabled(Context.getTargetInfo().getSDKVersion(),
957 CudaFeature::CUDA_USES_NEW_LAUNCH))
958 return "__cudaPushCallConfiguration";
959
960 // Legacy CUDA kernel configuration call
961 return "cudaConfigureCall";
962 }
963