1 //===--- SemaCUDA.cpp - Semantic Analysis for CUDA constructs -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements semantic analysis for CUDA constructs.
10 ///
11 //===----------------------------------------------------------------------===//
12
13 #include "clang/AST/ASTContext.h"
14 #include "clang/AST/Decl.h"
15 #include "clang/AST/ExprCXX.h"
16 #include "clang/Basic/Cuda.h"
17 #include "clang/Basic/TargetInfo.h"
18 #include "clang/Lex/Preprocessor.h"
19 #include "clang/Sema/Lookup.h"
20 #include "clang/Sema/ScopeInfo.h"
21 #include "clang/Sema/Sema.h"
22 #include "clang/Sema/SemaDiagnostic.h"
23 #include "clang/Sema/SemaInternal.h"
24 #include "clang/Sema/Template.h"
25 #include "llvm/ADT/Optional.h"
26 #include "llvm/ADT/SmallVector.h"
27 using namespace clang;
28
hasExplicitAttr(const VarDecl * D)29 template <typename AttrT> static bool hasExplicitAttr(const VarDecl *D) {
30 if (!D)
31 return false;
32 if (auto *A = D->getAttr<AttrT>())
33 return !A->isImplicit();
34 return false;
35 }
36
PushForceCUDAHostDevice()37 void Sema::PushForceCUDAHostDevice() {
38 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
39 ForceCUDAHostDeviceDepth++;
40 }
41
PopForceCUDAHostDevice()42 bool Sema::PopForceCUDAHostDevice() {
43 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
44 if (ForceCUDAHostDeviceDepth == 0)
45 return false;
46 ForceCUDAHostDeviceDepth--;
47 return true;
48 }
49
ActOnCUDAExecConfigExpr(Scope * S,SourceLocation LLLLoc,MultiExprArg ExecConfig,SourceLocation GGGLoc)50 ExprResult Sema::ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
51 MultiExprArg ExecConfig,
52 SourceLocation GGGLoc) {
53 FunctionDecl *ConfigDecl = Context.getcudaConfigureCallDecl();
54 if (!ConfigDecl)
55 return ExprError(Diag(LLLLoc, diag::err_undeclared_var_use)
56 << getCudaConfigureFuncName());
57 QualType ConfigQTy = ConfigDecl->getType();
58
59 DeclRefExpr *ConfigDR = new (Context)
60 DeclRefExpr(Context, ConfigDecl, false, ConfigQTy, VK_LValue, LLLLoc);
61 MarkFunctionReferenced(LLLLoc, ConfigDecl);
62
63 return BuildCallExpr(S, ConfigDR, LLLLoc, ExecConfig, GGGLoc, nullptr,
64 /*IsExecConfig=*/true);
65 }
66
67 Sema::CUDAFunctionTarget
IdentifyCUDATarget(const ParsedAttributesView & Attrs)68 Sema::IdentifyCUDATarget(const ParsedAttributesView &Attrs) {
69 bool HasHostAttr = false;
70 bool HasDeviceAttr = false;
71 bool HasGlobalAttr = false;
72 bool HasInvalidTargetAttr = false;
73 for (const ParsedAttr &AL : Attrs) {
74 switch (AL.getKind()) {
75 case ParsedAttr::AT_CUDAGlobal:
76 HasGlobalAttr = true;
77 break;
78 case ParsedAttr::AT_CUDAHost:
79 HasHostAttr = true;
80 break;
81 case ParsedAttr::AT_CUDADevice:
82 HasDeviceAttr = true;
83 break;
84 case ParsedAttr::AT_CUDAInvalidTarget:
85 HasInvalidTargetAttr = true;
86 break;
87 default:
88 break;
89 }
90 }
91
92 if (HasInvalidTargetAttr)
93 return CFT_InvalidTarget;
94
95 if (HasGlobalAttr)
96 return CFT_Global;
97
98 if (HasHostAttr && HasDeviceAttr)
99 return CFT_HostDevice;
100
101 if (HasDeviceAttr)
102 return CFT_Device;
103
104 return CFT_Host;
105 }
106
107 template <typename A>
hasAttr(const FunctionDecl * D,bool IgnoreImplicitAttr)108 static bool hasAttr(const FunctionDecl *D, bool IgnoreImplicitAttr) {
109 return D->hasAttrs() && llvm::any_of(D->getAttrs(), [&](Attr *Attribute) {
110 return isa<A>(Attribute) &&
111 !(IgnoreImplicitAttr && Attribute->isImplicit());
112 });
113 }
114
115 /// IdentifyCUDATarget - Determine the CUDA compilation target for this function
IdentifyCUDATarget(const FunctionDecl * D,bool IgnoreImplicitHDAttr)116 Sema::CUDAFunctionTarget Sema::IdentifyCUDATarget(const FunctionDecl *D,
117 bool IgnoreImplicitHDAttr) {
118 // Code that lives outside a function is run on the host.
119 if (D == nullptr)
120 return CFT_Host;
121
122 if (D->hasAttr<CUDAInvalidTargetAttr>())
123 return CFT_InvalidTarget;
124
125 if (D->hasAttr<CUDAGlobalAttr>())
126 return CFT_Global;
127
128 if (hasAttr<CUDADeviceAttr>(D, IgnoreImplicitHDAttr)) {
129 if (hasAttr<CUDAHostAttr>(D, IgnoreImplicitHDAttr))
130 return CFT_HostDevice;
131 return CFT_Device;
132 } else if (hasAttr<CUDAHostAttr>(D, IgnoreImplicitHDAttr)) {
133 return CFT_Host;
134 } else if ((D->isImplicit() || !D->isUserProvided()) &&
135 !IgnoreImplicitHDAttr) {
136 // Some implicit declarations (like intrinsic functions) are not marked.
137 // Set the most lenient target on them for maximal flexibility.
138 return CFT_HostDevice;
139 }
140
141 return CFT_Host;
142 }
143
144 /// IdentifyTarget - Determine the CUDA compilation target for this variable.
IdentifyCUDATarget(const VarDecl * Var)145 Sema::CUDAVariableTarget Sema::IdentifyCUDATarget(const VarDecl *Var) {
146 if (Var->hasAttr<HIPManagedAttr>())
147 return CVT_Unified;
148 if (Var->isConstexpr() && !hasExplicitAttr<CUDAConstantAttr>(Var))
149 return CVT_Both;
150 if (Var->getType().isConstQualified() && Var->hasAttr<CUDAConstantAttr>() &&
151 !hasExplicitAttr<CUDAConstantAttr>(Var))
152 return CVT_Both;
153 if (Var->hasAttr<CUDADeviceAttr>() || Var->hasAttr<CUDAConstantAttr>() ||
154 Var->hasAttr<CUDASharedAttr>() ||
155 Var->getType()->isCUDADeviceBuiltinSurfaceType() ||
156 Var->getType()->isCUDADeviceBuiltinTextureType())
157 return CVT_Device;
158 // Function-scope static variable without explicit device or constant
159 // attribute are emitted
160 // - on both sides in host device functions
161 // - on device side in device or global functions
162 if (auto *FD = dyn_cast<FunctionDecl>(Var->getDeclContext())) {
163 switch (IdentifyCUDATarget(FD)) {
164 case CFT_HostDevice:
165 return CVT_Both;
166 case CFT_Device:
167 case CFT_Global:
168 return CVT_Device;
169 default:
170 return CVT_Host;
171 }
172 }
173 return CVT_Host;
174 }
175
176 // * CUDA Call preference table
177 //
178 // F - from,
179 // T - to
180 // Ph - preference in host mode
181 // Pd - preference in device mode
182 // H - handled in (x)
183 // Preferences: N:native, SS:same side, HD:host-device, WS:wrong side, --:never.
184 //
185 // | F | T | Ph | Pd | H |
186 // |----+----+-----+-----+-----+
187 // | d | d | N | N | (c) |
188 // | d | g | -- | -- | (a) |
189 // | d | h | -- | -- | (e) |
190 // | d | hd | HD | HD | (b) |
191 // | g | d | N | N | (c) |
192 // | g | g | -- | -- | (a) |
193 // | g | h | -- | -- | (e) |
194 // | g | hd | HD | HD | (b) |
195 // | h | d | -- | -- | (e) |
196 // | h | g | N | N | (c) |
197 // | h | h | N | N | (c) |
198 // | h | hd | HD | HD | (b) |
199 // | hd | d | WS | SS | (d) |
200 // | hd | g | SS | -- |(d/a)|
201 // | hd | h | SS | WS | (d) |
202 // | hd | hd | HD | HD | (b) |
203
204 Sema::CUDAFunctionPreference
IdentifyCUDAPreference(const FunctionDecl * Caller,const FunctionDecl * Callee)205 Sema::IdentifyCUDAPreference(const FunctionDecl *Caller,
206 const FunctionDecl *Callee) {
207 assert(Callee && "Callee must be valid.");
208 CUDAFunctionTarget CallerTarget = IdentifyCUDATarget(Caller);
209 CUDAFunctionTarget CalleeTarget = IdentifyCUDATarget(Callee);
210
211 // If one of the targets is invalid, the check always fails, no matter what
212 // the other target is.
213 if (CallerTarget == CFT_InvalidTarget || CalleeTarget == CFT_InvalidTarget)
214 return CFP_Never;
215
216 // (a) Can't call global from some contexts until we support CUDA's
217 // dynamic parallelism.
218 if (CalleeTarget == CFT_Global &&
219 (CallerTarget == CFT_Global || CallerTarget == CFT_Device))
220 return CFP_Never;
221
222 // (b) Calling HostDevice is OK for everyone.
223 if (CalleeTarget == CFT_HostDevice)
224 return CFP_HostDevice;
225
226 // (c) Best case scenarios
227 if (CalleeTarget == CallerTarget ||
228 (CallerTarget == CFT_Host && CalleeTarget == CFT_Global) ||
229 (CallerTarget == CFT_Global && CalleeTarget == CFT_Device))
230 return CFP_Native;
231
232 // (d) HostDevice behavior depends on compilation mode.
233 if (CallerTarget == CFT_HostDevice) {
234 // It's OK to call a compilation-mode matching function from an HD one.
235 if ((getLangOpts().CUDAIsDevice && CalleeTarget == CFT_Device) ||
236 (!getLangOpts().CUDAIsDevice &&
237 (CalleeTarget == CFT_Host || CalleeTarget == CFT_Global)))
238 return CFP_SameSide;
239
240 // Calls from HD to non-mode-matching functions (i.e., to host functions
241 // when compiling in device mode or to device functions when compiling in
242 // host mode) are allowed at the sema level, but eventually rejected if
243 // they're ever codegened. TODO: Reject said calls earlier.
244 return CFP_WrongSide;
245 }
246
247 // (e) Calling across device/host boundary is not something you should do.
248 if ((CallerTarget == CFT_Host && CalleeTarget == CFT_Device) ||
249 (CallerTarget == CFT_Device && CalleeTarget == CFT_Host) ||
250 (CallerTarget == CFT_Global && CalleeTarget == CFT_Host))
251 return CFP_Never;
252
253 llvm_unreachable("All cases should've been handled by now.");
254 }
255
hasImplicitAttr(const FunctionDecl * D)256 template <typename AttrT> static bool hasImplicitAttr(const FunctionDecl *D) {
257 if (!D)
258 return false;
259 if (auto *A = D->getAttr<AttrT>())
260 return A->isImplicit();
261 return D->isImplicit();
262 }
263
isCUDAImplicitHostDeviceFunction(const FunctionDecl * D)264 bool Sema::isCUDAImplicitHostDeviceFunction(const FunctionDecl *D) {
265 bool IsImplicitDevAttr = hasImplicitAttr<CUDADeviceAttr>(D);
266 bool IsImplicitHostAttr = hasImplicitAttr<CUDAHostAttr>(D);
267 return IsImplicitDevAttr && IsImplicitHostAttr;
268 }
269
EraseUnwantedCUDAMatches(const FunctionDecl * Caller,SmallVectorImpl<std::pair<DeclAccessPair,FunctionDecl * >> & Matches)270 void Sema::EraseUnwantedCUDAMatches(
271 const FunctionDecl *Caller,
272 SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches) {
273 if (Matches.size() <= 1)
274 return;
275
276 using Pair = std::pair<DeclAccessPair, FunctionDecl*>;
277
278 // Gets the CUDA function preference for a call from Caller to Match.
279 auto GetCFP = [&](const Pair &Match) {
280 return IdentifyCUDAPreference(Caller, Match.second);
281 };
282
283 // Find the best call preference among the functions in Matches.
284 CUDAFunctionPreference BestCFP = GetCFP(*std::max_element(
285 Matches.begin(), Matches.end(),
286 [&](const Pair &M1, const Pair &M2) { return GetCFP(M1) < GetCFP(M2); }));
287
288 // Erase all functions with lower priority.
289 llvm::erase_if(Matches,
290 [&](const Pair &Match) { return GetCFP(Match) < BestCFP; });
291 }
292
293 /// When an implicitly-declared special member has to invoke more than one
294 /// base/field special member, conflicts may occur in the targets of these
295 /// members. For example, if one base's member __host__ and another's is
296 /// __device__, it's a conflict.
297 /// This function figures out if the given targets \param Target1 and
298 /// \param Target2 conflict, and if they do not it fills in
299 /// \param ResolvedTarget with a target that resolves for both calls.
300 /// \return true if there's a conflict, false otherwise.
301 static bool
resolveCalleeCUDATargetConflict(Sema::CUDAFunctionTarget Target1,Sema::CUDAFunctionTarget Target2,Sema::CUDAFunctionTarget * ResolvedTarget)302 resolveCalleeCUDATargetConflict(Sema::CUDAFunctionTarget Target1,
303 Sema::CUDAFunctionTarget Target2,
304 Sema::CUDAFunctionTarget *ResolvedTarget) {
305 // Only free functions and static member functions may be global.
306 assert(Target1 != Sema::CFT_Global);
307 assert(Target2 != Sema::CFT_Global);
308
309 if (Target1 == Sema::CFT_HostDevice) {
310 *ResolvedTarget = Target2;
311 } else if (Target2 == Sema::CFT_HostDevice) {
312 *ResolvedTarget = Target1;
313 } else if (Target1 != Target2) {
314 return true;
315 } else {
316 *ResolvedTarget = Target1;
317 }
318
319 return false;
320 }
321
inferCUDATargetForImplicitSpecialMember(CXXRecordDecl * ClassDecl,CXXSpecialMember CSM,CXXMethodDecl * MemberDecl,bool ConstRHS,bool Diagnose)322 bool Sema::inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
323 CXXSpecialMember CSM,
324 CXXMethodDecl *MemberDecl,
325 bool ConstRHS,
326 bool Diagnose) {
327 // If the defaulted special member is defined lexically outside of its
328 // owning class, or the special member already has explicit device or host
329 // attributes, do not infer.
330 bool InClass = MemberDecl->getLexicalParent() == MemberDecl->getParent();
331 bool HasH = MemberDecl->hasAttr<CUDAHostAttr>();
332 bool HasD = MemberDecl->hasAttr<CUDADeviceAttr>();
333 bool HasExplicitAttr =
334 (HasD && !MemberDecl->getAttr<CUDADeviceAttr>()->isImplicit()) ||
335 (HasH && !MemberDecl->getAttr<CUDAHostAttr>()->isImplicit());
336 if (!InClass || HasExplicitAttr)
337 return false;
338
339 llvm::Optional<CUDAFunctionTarget> InferredTarget;
340
341 // We're going to invoke special member lookup; mark that these special
342 // members are called from this one, and not from its caller.
343 ContextRAII MethodContext(*this, MemberDecl);
344
345 // Look for special members in base classes that should be invoked from here.
346 // Infer the target of this member base on the ones it should call.
347 // Skip direct and indirect virtual bases for abstract classes.
348 llvm::SmallVector<const CXXBaseSpecifier *, 16> Bases;
349 for (const auto &B : ClassDecl->bases()) {
350 if (!B.isVirtual()) {
351 Bases.push_back(&B);
352 }
353 }
354
355 if (!ClassDecl->isAbstract()) {
356 for (const auto &VB : ClassDecl->vbases()) {
357 Bases.push_back(&VB);
358 }
359 }
360
361 for (const auto *B : Bases) {
362 const RecordType *BaseType = B->getType()->getAs<RecordType>();
363 if (!BaseType) {
364 continue;
365 }
366
367 CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl());
368 Sema::SpecialMemberOverloadResult SMOR =
369 LookupSpecialMember(BaseClassDecl, CSM,
370 /* ConstArg */ ConstRHS,
371 /* VolatileArg */ false,
372 /* RValueThis */ false,
373 /* ConstThis */ false,
374 /* VolatileThis */ false);
375
376 if (!SMOR.getMethod())
377 continue;
378
379 CUDAFunctionTarget BaseMethodTarget = IdentifyCUDATarget(SMOR.getMethod());
380 if (!InferredTarget.hasValue()) {
381 InferredTarget = BaseMethodTarget;
382 } else {
383 bool ResolutionError = resolveCalleeCUDATargetConflict(
384 InferredTarget.getValue(), BaseMethodTarget,
385 InferredTarget.getPointer());
386 if (ResolutionError) {
387 if (Diagnose) {
388 Diag(ClassDecl->getLocation(),
389 diag::note_implicit_member_target_infer_collision)
390 << (unsigned)CSM << InferredTarget.getValue() << BaseMethodTarget;
391 }
392 MemberDecl->addAttr(CUDAInvalidTargetAttr::CreateImplicit(Context));
393 return true;
394 }
395 }
396 }
397
398 // Same as for bases, but now for special members of fields.
399 for (const auto *F : ClassDecl->fields()) {
400 if (F->isInvalidDecl()) {
401 continue;
402 }
403
404 const RecordType *FieldType =
405 Context.getBaseElementType(F->getType())->getAs<RecordType>();
406 if (!FieldType) {
407 continue;
408 }
409
410 CXXRecordDecl *FieldRecDecl = cast<CXXRecordDecl>(FieldType->getDecl());
411 Sema::SpecialMemberOverloadResult SMOR =
412 LookupSpecialMember(FieldRecDecl, CSM,
413 /* ConstArg */ ConstRHS && !F->isMutable(),
414 /* VolatileArg */ false,
415 /* RValueThis */ false,
416 /* ConstThis */ false,
417 /* VolatileThis */ false);
418
419 if (!SMOR.getMethod())
420 continue;
421
422 CUDAFunctionTarget FieldMethodTarget =
423 IdentifyCUDATarget(SMOR.getMethod());
424 if (!InferredTarget.hasValue()) {
425 InferredTarget = FieldMethodTarget;
426 } else {
427 bool ResolutionError = resolveCalleeCUDATargetConflict(
428 InferredTarget.getValue(), FieldMethodTarget,
429 InferredTarget.getPointer());
430 if (ResolutionError) {
431 if (Diagnose) {
432 Diag(ClassDecl->getLocation(),
433 diag::note_implicit_member_target_infer_collision)
434 << (unsigned)CSM << InferredTarget.getValue()
435 << FieldMethodTarget;
436 }
437 MemberDecl->addAttr(CUDAInvalidTargetAttr::CreateImplicit(Context));
438 return true;
439 }
440 }
441 }
442
443
444 // If no target was inferred, mark this member as __host__ __device__;
445 // it's the least restrictive option that can be invoked from any target.
446 bool NeedsH = true, NeedsD = true;
447 if (InferredTarget.hasValue()) {
448 if (InferredTarget.getValue() == CFT_Device)
449 NeedsH = false;
450 else if (InferredTarget.getValue() == CFT_Host)
451 NeedsD = false;
452 }
453
454 // We either setting attributes first time, or the inferred ones must match
455 // previously set ones.
456 if (NeedsD && !HasD)
457 MemberDecl->addAttr(CUDADeviceAttr::CreateImplicit(Context));
458 if (NeedsH && !HasH)
459 MemberDecl->addAttr(CUDAHostAttr::CreateImplicit(Context));
460
461 return false;
462 }
463
isEmptyCudaConstructor(SourceLocation Loc,CXXConstructorDecl * CD)464 bool Sema::isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD) {
465 if (!CD->isDefined() && CD->isTemplateInstantiation())
466 InstantiateFunctionDefinition(Loc, CD->getFirstDecl());
467
468 // (E.2.3.1, CUDA 7.5) A constructor for a class type is considered
469 // empty at a point in the translation unit, if it is either a
470 // trivial constructor
471 if (CD->isTrivial())
472 return true;
473
474 // ... or it satisfies all of the following conditions:
475 // The constructor function has been defined.
476 // The constructor function has no parameters,
477 // and the function body is an empty compound statement.
478 if (!(CD->hasTrivialBody() && CD->getNumParams() == 0))
479 return false;
480
481 // Its class has no virtual functions and no virtual base classes.
482 if (CD->getParent()->isDynamicClass())
483 return false;
484
485 // Union ctor does not call ctors of its data members.
486 if (CD->getParent()->isUnion())
487 return true;
488
489 // The only form of initializer allowed is an empty constructor.
490 // This will recursively check all base classes and member initializers
491 if (!llvm::all_of(CD->inits(), [&](const CXXCtorInitializer *CI) {
492 if (const CXXConstructExpr *CE =
493 dyn_cast<CXXConstructExpr>(CI->getInit()))
494 return isEmptyCudaConstructor(Loc, CE->getConstructor());
495 return false;
496 }))
497 return false;
498
499 return true;
500 }
501
isEmptyCudaDestructor(SourceLocation Loc,CXXDestructorDecl * DD)502 bool Sema::isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *DD) {
503 // No destructor -> no problem.
504 if (!DD)
505 return true;
506
507 if (!DD->isDefined() && DD->isTemplateInstantiation())
508 InstantiateFunctionDefinition(Loc, DD->getFirstDecl());
509
510 // (E.2.3.1, CUDA 7.5) A destructor for a class type is considered
511 // empty at a point in the translation unit, if it is either a
512 // trivial constructor
513 if (DD->isTrivial())
514 return true;
515
516 // ... or it satisfies all of the following conditions:
517 // The destructor function has been defined.
518 // and the function body is an empty compound statement.
519 if (!DD->hasTrivialBody())
520 return false;
521
522 const CXXRecordDecl *ClassDecl = DD->getParent();
523
524 // Its class has no virtual functions and no virtual base classes.
525 if (ClassDecl->isDynamicClass())
526 return false;
527
528 // Union does not have base class and union dtor does not call dtors of its
529 // data members.
530 if (DD->getParent()->isUnion())
531 return true;
532
533 // Only empty destructors are allowed. This will recursively check
534 // destructors for all base classes...
535 if (!llvm::all_of(ClassDecl->bases(), [&](const CXXBaseSpecifier &BS) {
536 if (CXXRecordDecl *RD = BS.getType()->getAsCXXRecordDecl())
537 return isEmptyCudaDestructor(Loc, RD->getDestructor());
538 return true;
539 }))
540 return false;
541
542 // ... and member fields.
543 if (!llvm::all_of(ClassDecl->fields(), [&](const FieldDecl *Field) {
544 if (CXXRecordDecl *RD = Field->getType()
545 ->getBaseElementTypeUnsafe()
546 ->getAsCXXRecordDecl())
547 return isEmptyCudaDestructor(Loc, RD->getDestructor());
548 return true;
549 }))
550 return false;
551
552 return true;
553 }
554
555 namespace {
556 enum CUDAInitializerCheckKind {
557 CICK_DeviceOrConstant, // Check initializer for device/constant variable
558 CICK_Shared, // Check initializer for shared variable
559 };
560
IsDependentVar(VarDecl * VD)561 bool IsDependentVar(VarDecl *VD) {
562 if (VD->getType()->isDependentType())
563 return true;
564 if (const auto *Init = VD->getInit())
565 return Init->isValueDependent();
566 return false;
567 }
568
569 // Check whether a variable has an allowed initializer for a CUDA device side
570 // variable with global storage. \p VD may be a host variable to be checked for
571 // potential promotion to device side variable.
572 //
573 // CUDA/HIP allows only empty constructors as initializers for global
574 // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
575 // __shared__ variables whether they are local or not (they all are implicitly
576 // static in CUDA). One exception is that CUDA allows constant initializers
577 // for __constant__ and __device__ variables.
HasAllowedCUDADeviceStaticInitializer(Sema & S,VarDecl * VD,CUDAInitializerCheckKind CheckKind)578 bool HasAllowedCUDADeviceStaticInitializer(Sema &S, VarDecl *VD,
579 CUDAInitializerCheckKind CheckKind) {
580 assert(!VD->isInvalidDecl() && VD->hasGlobalStorage());
581 assert(!IsDependentVar(VD) && "do not check dependent var");
582 const Expr *Init = VD->getInit();
583 auto IsEmptyInit = [&](const Expr *Init) {
584 if (!Init)
585 return true;
586 if (const auto *CE = dyn_cast<CXXConstructExpr>(Init)) {
587 return S.isEmptyCudaConstructor(VD->getLocation(), CE->getConstructor());
588 }
589 return false;
590 };
591 auto IsConstantInit = [&](const Expr *Init) {
592 assert(Init);
593 return Init->isConstantInitializer(S.Context,
594 VD->getType()->isReferenceType());
595 };
596 auto HasEmptyDtor = [&](VarDecl *VD) {
597 if (const auto *RD = VD->getType()->getAsCXXRecordDecl())
598 return S.isEmptyCudaDestructor(VD->getLocation(), RD->getDestructor());
599 return true;
600 };
601 if (CheckKind == CICK_Shared)
602 return IsEmptyInit(Init) && HasEmptyDtor(VD);
603 return S.LangOpts.GPUAllowDeviceInit ||
604 ((IsEmptyInit(Init) || IsConstantInit(Init)) && HasEmptyDtor(VD));
605 }
606 } // namespace
607
checkAllowedCUDAInitializer(VarDecl * VD)608 void Sema::checkAllowedCUDAInitializer(VarDecl *VD) {
609 // Do not check dependent variables since the ctor/dtor/initializer are not
610 // determined. Do it after instantiation.
611 if (VD->isInvalidDecl() || !VD->hasInit() || !VD->hasGlobalStorage() ||
612 IsDependentVar(VD))
613 return;
614 const Expr *Init = VD->getInit();
615 bool IsSharedVar = VD->hasAttr<CUDASharedAttr>();
616 bool IsDeviceOrConstantVar =
617 !IsSharedVar &&
618 (VD->hasAttr<CUDADeviceAttr>() || VD->hasAttr<CUDAConstantAttr>());
619 if (IsDeviceOrConstantVar || IsSharedVar) {
620 if (HasAllowedCUDADeviceStaticInitializer(
621 *this, VD, IsSharedVar ? CICK_Shared : CICK_DeviceOrConstant))
622 return;
623 Diag(VD->getLocation(),
624 IsSharedVar ? diag::err_shared_var_init : diag::err_dynamic_var_init)
625 << Init->getSourceRange();
626 VD->setInvalidDecl();
627 } else {
628 // This is a host-side global variable. Check that the initializer is
629 // callable from the host side.
630 const FunctionDecl *InitFn = nullptr;
631 if (const CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(Init)) {
632 InitFn = CE->getConstructor();
633 } else if (const CallExpr *CE = dyn_cast<CallExpr>(Init)) {
634 InitFn = CE->getDirectCallee();
635 }
636 if (InitFn) {
637 CUDAFunctionTarget InitFnTarget = IdentifyCUDATarget(InitFn);
638 if (InitFnTarget != CFT_Host && InitFnTarget != CFT_HostDevice) {
639 Diag(VD->getLocation(), diag::err_ref_bad_target_global_initializer)
640 << InitFnTarget << InitFn;
641 Diag(InitFn->getLocation(), diag::note_previous_decl) << InitFn;
642 VD->setInvalidDecl();
643 }
644 }
645 }
646 }
647
648 // With -fcuda-host-device-constexpr, an unattributed constexpr function is
649 // treated as implicitly __host__ __device__, unless:
650 // * it is a variadic function (device-side variadic functions are not
651 // allowed), or
652 // * a __device__ function with this signature was already declared, in which
653 // case in which case we output an error, unless the __device__ decl is in a
654 // system header, in which case we leave the constexpr function unattributed.
655 //
656 // In addition, all function decls are treated as __host__ __device__ when
657 // ForceCUDAHostDeviceDepth > 0 (corresponding to code within a
658 // #pragma clang force_cuda_host_device_begin/end
659 // pair).
maybeAddCUDAHostDeviceAttrs(FunctionDecl * NewD,const LookupResult & Previous)660 void Sema::maybeAddCUDAHostDeviceAttrs(FunctionDecl *NewD,
661 const LookupResult &Previous) {
662 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
663
664 if (ForceCUDAHostDeviceDepth > 0) {
665 if (!NewD->hasAttr<CUDAHostAttr>())
666 NewD->addAttr(CUDAHostAttr::CreateImplicit(Context));
667 if (!NewD->hasAttr<CUDADeviceAttr>())
668 NewD->addAttr(CUDADeviceAttr::CreateImplicit(Context));
669 return;
670 }
671
672 if (!getLangOpts().CUDAHostDeviceConstexpr || !NewD->isConstexpr() ||
673 NewD->isVariadic() || NewD->hasAttr<CUDAHostAttr>() ||
674 NewD->hasAttr<CUDADeviceAttr>() || NewD->hasAttr<CUDAGlobalAttr>())
675 return;
676
677 // Is D a __device__ function with the same signature as NewD, ignoring CUDA
678 // attributes?
679 auto IsMatchingDeviceFn = [&](NamedDecl *D) {
680 if (UsingShadowDecl *Using = dyn_cast<UsingShadowDecl>(D))
681 D = Using->getTargetDecl();
682 FunctionDecl *OldD = D->getAsFunction();
683 return OldD && OldD->hasAttr<CUDADeviceAttr>() &&
684 !OldD->hasAttr<CUDAHostAttr>() &&
685 !IsOverload(NewD, OldD, /* UseMemberUsingDeclRules = */ false,
686 /* ConsiderCudaAttrs = */ false);
687 };
688 auto It = llvm::find_if(Previous, IsMatchingDeviceFn);
689 if (It != Previous.end()) {
690 // We found a __device__ function with the same name and signature as NewD
691 // (ignoring CUDA attrs). This is an error unless that function is defined
692 // in a system header, in which case we simply return without making NewD
693 // host+device.
694 NamedDecl *Match = *It;
695 if (!getSourceManager().isInSystemHeader(Match->getLocation())) {
696 Diag(NewD->getLocation(),
697 diag::err_cuda_unattributed_constexpr_cannot_overload_device)
698 << NewD;
699 Diag(Match->getLocation(),
700 diag::note_cuda_conflicting_device_function_declared_here);
701 }
702 return;
703 }
704
705 NewD->addAttr(CUDAHostAttr::CreateImplicit(Context));
706 NewD->addAttr(CUDADeviceAttr::CreateImplicit(Context));
707 }
708
709 // TODO: `__constant__` memory may be a limited resource for certain targets.
710 // A safeguard may be needed at the end of compilation pipeline if
711 // `__constant__` memory usage goes beyond limit.
MaybeAddCUDAConstantAttr(VarDecl * VD)712 void Sema::MaybeAddCUDAConstantAttr(VarDecl *VD) {
713 // Do not promote dependent variables since the cotr/dtor/initializer are
714 // not determined. Do it after instantiation.
715 if (getLangOpts().CUDAIsDevice && !VD->hasAttr<CUDAConstantAttr>() &&
716 !VD->hasAttr<CUDAConstantAttr>() && !VD->hasAttr<CUDASharedAttr>() &&
717 (VD->isFileVarDecl() || VD->isStaticDataMember()) &&
718 !IsDependentVar(VD) &&
719 (VD->isConstexpr() || (VD->getType().isConstQualified() &&
720 HasAllowedCUDADeviceStaticInitializer(
721 *this, VD, CICK_DeviceOrConstant)))) {
722 VD->addAttr(CUDAConstantAttr::CreateImplicit(getASTContext()));
723 }
724 }
725
CUDADiagIfDeviceCode(SourceLocation Loc,unsigned DiagID)726 Sema::SemaDiagnosticBuilder Sema::CUDADiagIfDeviceCode(SourceLocation Loc,
727 unsigned DiagID) {
728 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
729 SemaDiagnosticBuilder::Kind DiagKind = [&] {
730 if (!isa<FunctionDecl>(CurContext))
731 return SemaDiagnosticBuilder::K_Nop;
732 switch (CurrentCUDATarget()) {
733 case CFT_Global:
734 case CFT_Device:
735 return SemaDiagnosticBuilder::K_Immediate;
736 case CFT_HostDevice:
737 // An HD function counts as host code if we're compiling for host, and
738 // device code if we're compiling for device. Defer any errors in device
739 // mode until the function is known-emitted.
740 if (!getLangOpts().CUDAIsDevice)
741 return SemaDiagnosticBuilder::K_Nop;
742 if (IsLastErrorImmediate && Diags.getDiagnosticIDs()->isBuiltinNote(DiagID))
743 return SemaDiagnosticBuilder::K_Immediate;
744 return (getEmissionStatus(cast<FunctionDecl>(CurContext)) ==
745 FunctionEmissionStatus::Emitted)
746 ? SemaDiagnosticBuilder::K_ImmediateWithCallStack
747 : SemaDiagnosticBuilder::K_Deferred;
748 default:
749 return SemaDiagnosticBuilder::K_Nop;
750 }
751 }();
752 return SemaDiagnosticBuilder(DiagKind, Loc, DiagID,
753 dyn_cast<FunctionDecl>(CurContext), *this);
754 }
755
CUDADiagIfHostCode(SourceLocation Loc,unsigned DiagID)756 Sema::SemaDiagnosticBuilder Sema::CUDADiagIfHostCode(SourceLocation Loc,
757 unsigned DiagID) {
758 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
759 SemaDiagnosticBuilder::Kind DiagKind = [&] {
760 if (!isa<FunctionDecl>(CurContext))
761 return SemaDiagnosticBuilder::K_Nop;
762 switch (CurrentCUDATarget()) {
763 case CFT_Host:
764 return SemaDiagnosticBuilder::K_Immediate;
765 case CFT_HostDevice:
766 // An HD function counts as host code if we're compiling for host, and
767 // device code if we're compiling for device. Defer any errors in device
768 // mode until the function is known-emitted.
769 if (getLangOpts().CUDAIsDevice)
770 return SemaDiagnosticBuilder::K_Nop;
771 if (IsLastErrorImmediate && Diags.getDiagnosticIDs()->isBuiltinNote(DiagID))
772 return SemaDiagnosticBuilder::K_Immediate;
773 return (getEmissionStatus(cast<FunctionDecl>(CurContext)) ==
774 FunctionEmissionStatus::Emitted)
775 ? SemaDiagnosticBuilder::K_ImmediateWithCallStack
776 : SemaDiagnosticBuilder::K_Deferred;
777 default:
778 return SemaDiagnosticBuilder::K_Nop;
779 }
780 }();
781 return SemaDiagnosticBuilder(DiagKind, Loc, DiagID,
782 dyn_cast<FunctionDecl>(CurContext), *this);
783 }
784
CheckCUDACall(SourceLocation Loc,FunctionDecl * Callee)785 bool Sema::CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee) {
786 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
787 assert(Callee && "Callee may not be null.");
788
789 auto &ExprEvalCtx = ExprEvalContexts.back();
790 if (ExprEvalCtx.isUnevaluated() || ExprEvalCtx.isConstantEvaluated())
791 return true;
792
793 // FIXME: Is bailing out early correct here? Should we instead assume that
794 // the caller is a global initializer?
795 FunctionDecl *Caller = dyn_cast<FunctionDecl>(CurContext);
796 if (!Caller)
797 return true;
798
799 // If the caller is known-emitted, mark the callee as known-emitted.
800 // Otherwise, mark the call in our call graph so we can traverse it later.
801 bool CallerKnownEmitted =
802 getEmissionStatus(Caller) == FunctionEmissionStatus::Emitted;
803 SemaDiagnosticBuilder::Kind DiagKind = [this, Caller, Callee,
804 CallerKnownEmitted] {
805 switch (IdentifyCUDAPreference(Caller, Callee)) {
806 case CFP_Never:
807 case CFP_WrongSide:
808 assert(Caller && "Never/wrongSide calls require a non-null caller");
809 // If we know the caller will be emitted, we know this wrong-side call
810 // will be emitted, so it's an immediate error. Otherwise, defer the
811 // error until we know the caller is emitted.
812 return CallerKnownEmitted
813 ? SemaDiagnosticBuilder::K_ImmediateWithCallStack
814 : SemaDiagnosticBuilder::K_Deferred;
815 default:
816 return SemaDiagnosticBuilder::K_Nop;
817 }
818 }();
819
820 if (DiagKind == SemaDiagnosticBuilder::K_Nop)
821 return true;
822
823 // Avoid emitting this error twice for the same location. Using a hashtable
824 // like this is unfortunate, but because we must continue parsing as normal
825 // after encountering a deferred error, it's otherwise very tricky for us to
826 // ensure that we only emit this deferred error once.
827 if (!LocsWithCUDACallDiags.insert({Caller, Loc}).second)
828 return true;
829
830 SemaDiagnosticBuilder(DiagKind, Loc, diag::err_ref_bad_target, Caller, *this)
831 << IdentifyCUDATarget(Callee) << /*function*/ 0 << Callee
832 << IdentifyCUDATarget(Caller);
833 if (!Callee->getBuiltinID())
834 SemaDiagnosticBuilder(DiagKind, Callee->getLocation(),
835 diag::note_previous_decl, Caller, *this)
836 << Callee;
837 return DiagKind != SemaDiagnosticBuilder::K_Immediate &&
838 DiagKind != SemaDiagnosticBuilder::K_ImmediateWithCallStack;
839 }
840
841 // Check the wrong-sided reference capture of lambda for CUDA/HIP.
842 // A lambda function may capture a stack variable by reference when it is
843 // defined and uses the capture by reference when the lambda is called. When
844 // the capture and use happen on different sides, the capture is invalid and
845 // should be diagnosed.
CUDACheckLambdaCapture(CXXMethodDecl * Callee,const sema::Capture & Capture)846 void Sema::CUDACheckLambdaCapture(CXXMethodDecl *Callee,
847 const sema::Capture &Capture) {
848 // In host compilation we only need to check lambda functions emitted on host
849 // side. In such lambda functions, a reference capture is invalid only
850 // if the lambda structure is populated by a device function or kernel then
851 // is passed to and called by a host function. However that is impossible,
852 // since a device function or kernel can only call a device function, also a
853 // kernel cannot pass a lambda back to a host function since we cannot
854 // define a kernel argument type which can hold the lambda before the lambda
855 // itself is defined.
856 if (!LangOpts.CUDAIsDevice)
857 return;
858
859 // File-scope lambda can only do init captures for global variables, which
860 // results in passing by value for these global variables.
861 FunctionDecl *Caller = dyn_cast<FunctionDecl>(CurContext);
862 if (!Caller)
863 return;
864
865 // In device compilation, we only need to check lambda functions which are
866 // emitted on device side. For such lambdas, a reference capture is invalid
867 // only if the lambda structure is populated by a host function then passed
868 // to and called in a device function or kernel.
869 bool CalleeIsDevice = Callee->hasAttr<CUDADeviceAttr>();
870 bool CallerIsHost =
871 !Caller->hasAttr<CUDAGlobalAttr>() && !Caller->hasAttr<CUDADeviceAttr>();
872 bool ShouldCheck = CalleeIsDevice && CallerIsHost;
873 if (!ShouldCheck || !Capture.isReferenceCapture())
874 return;
875 auto DiagKind = SemaDiagnosticBuilder::K_Deferred;
876 if (Capture.isVariableCapture()) {
877 SemaDiagnosticBuilder(DiagKind, Capture.getLocation(),
878 diag::err_capture_bad_target, Callee, *this)
879 << Capture.getVariable();
880 } else if (Capture.isThisCapture()) {
881 SemaDiagnosticBuilder(DiagKind, Capture.getLocation(),
882 diag::err_capture_bad_target_this_ptr, Callee, *this);
883 }
884 return;
885 }
886
CUDASetLambdaAttrs(CXXMethodDecl * Method)887 void Sema::CUDASetLambdaAttrs(CXXMethodDecl *Method) {
888 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
889 if (Method->hasAttr<CUDAHostAttr>() || Method->hasAttr<CUDADeviceAttr>())
890 return;
891 Method->addAttr(CUDADeviceAttr::CreateImplicit(Context));
892 Method->addAttr(CUDAHostAttr::CreateImplicit(Context));
893 }
894
checkCUDATargetOverload(FunctionDecl * NewFD,const LookupResult & Previous)895 void Sema::checkCUDATargetOverload(FunctionDecl *NewFD,
896 const LookupResult &Previous) {
897 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
898 CUDAFunctionTarget NewTarget = IdentifyCUDATarget(NewFD);
899 for (NamedDecl *OldND : Previous) {
900 FunctionDecl *OldFD = OldND->getAsFunction();
901 if (!OldFD)
902 continue;
903
904 CUDAFunctionTarget OldTarget = IdentifyCUDATarget(OldFD);
905 // Don't allow HD and global functions to overload other functions with the
906 // same signature. We allow overloading based on CUDA attributes so that
907 // functions can have different implementations on the host and device, but
908 // HD/global functions "exist" in some sense on both the host and device, so
909 // should have the same implementation on both sides.
910 if (NewTarget != OldTarget &&
911 ((NewTarget == CFT_HostDevice) || (OldTarget == CFT_HostDevice) ||
912 (NewTarget == CFT_Global) || (OldTarget == CFT_Global)) &&
913 !IsOverload(NewFD, OldFD, /* UseMemberUsingDeclRules = */ false,
914 /* ConsiderCudaAttrs = */ false)) {
915 Diag(NewFD->getLocation(), diag::err_cuda_ovl_target)
916 << NewTarget << NewFD->getDeclName() << OldTarget << OldFD;
917 Diag(OldFD->getLocation(), diag::note_previous_declaration);
918 NewFD->setInvalidDecl();
919 break;
920 }
921 }
922 }
923
924 template <typename AttrTy>
copyAttrIfPresent(Sema & S,FunctionDecl * FD,const FunctionDecl & TemplateFD)925 static void copyAttrIfPresent(Sema &S, FunctionDecl *FD,
926 const FunctionDecl &TemplateFD) {
927 if (AttrTy *Attribute = TemplateFD.getAttr<AttrTy>()) {
928 AttrTy *Clone = Attribute->clone(S.Context);
929 Clone->setInherited(true);
930 FD->addAttr(Clone);
931 }
932 }
933
inheritCUDATargetAttrs(FunctionDecl * FD,const FunctionTemplateDecl & TD)934 void Sema::inheritCUDATargetAttrs(FunctionDecl *FD,
935 const FunctionTemplateDecl &TD) {
936 const FunctionDecl &TemplateFD = *TD.getTemplatedDecl();
937 copyAttrIfPresent<CUDAGlobalAttr>(*this, FD, TemplateFD);
938 copyAttrIfPresent<CUDAHostAttr>(*this, FD, TemplateFD);
939 copyAttrIfPresent<CUDADeviceAttr>(*this, FD, TemplateFD);
940 }
941
getCudaConfigureFuncName() const942 std::string Sema::getCudaConfigureFuncName() const {
943 if (getLangOpts().HIP)
944 return getLangOpts().HIPUseNewLaunchAPI ? "__hipPushCallConfiguration"
945 : "hipConfigureCall";
946
947 // New CUDA kernel launch sequence.
948 if (CudaFeatureEnabled(Context.getTargetInfo().getSDKVersion(),
949 CudaFeature::CUDA_USES_NEW_LAUNCH))
950 return "__cudaPushCallConfiguration";
951
952 // Legacy CUDA kernel configuration call
953 return "cudaConfigureCall";
954 }
955