1 //===---- CGOpenMPRuntimeNVPTX.cpp - Interface to OpenMP NVPTX Runtimes ---===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This provides a class for OpenMP runtime code generation specialized to NVPTX
10 // targets.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "CGOpenMPRuntimeNVPTX.h"
15 #include "CodeGenFunction.h"
16 #include "clang/AST/Attr.h"
17 #include "clang/AST/DeclOpenMP.h"
18 #include "clang/AST/StmtOpenMP.h"
19 #include "clang/AST/StmtVisitor.h"
20 #include "clang/Basic/Cuda.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/IR/IntrinsicsNVPTX.h"
23
24 using namespace clang;
25 using namespace CodeGen;
26 using namespace llvm::omp;
27
28 namespace {
29 enum OpenMPRTLFunctionNVPTX {
30 /// Call to void __kmpc_kernel_init(kmp_int32 thread_limit,
31 /// int16_t RequiresOMPRuntime);
32 OMPRTL_NVPTX__kmpc_kernel_init,
33 /// Call to void __kmpc_kernel_deinit(int16_t IsOMPRuntimeInitialized);
34 OMPRTL_NVPTX__kmpc_kernel_deinit,
35 /// Call to void __kmpc_spmd_kernel_init(kmp_int32 thread_limit,
36 /// int16_t RequiresOMPRuntime, int16_t RequiresDataSharing);
37 OMPRTL_NVPTX__kmpc_spmd_kernel_init,
38 /// Call to void __kmpc_spmd_kernel_deinit_v2(int16_t RequiresOMPRuntime);
39 OMPRTL_NVPTX__kmpc_spmd_kernel_deinit_v2,
40 /// Call to void __kmpc_kernel_prepare_parallel(void
41 /// *outlined_function);
42 OMPRTL_NVPTX__kmpc_kernel_prepare_parallel,
43 /// Call to bool __kmpc_kernel_parallel(void **outlined_function);
44 OMPRTL_NVPTX__kmpc_kernel_parallel,
45 /// Call to void __kmpc_kernel_end_parallel();
46 OMPRTL_NVPTX__kmpc_kernel_end_parallel,
47 /// Call to void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
48 /// global_tid);
49 OMPRTL_NVPTX__kmpc_serialized_parallel,
50 /// Call to void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
51 /// global_tid);
52 OMPRTL_NVPTX__kmpc_end_serialized_parallel,
53 /// Call to int32_t __kmpc_shuffle_int32(int32_t element,
54 /// int16_t lane_offset, int16_t warp_size);
55 OMPRTL_NVPTX__kmpc_shuffle_int32,
56 /// Call to int64_t __kmpc_shuffle_int64(int64_t element,
57 /// int16_t lane_offset, int16_t warp_size);
58 OMPRTL_NVPTX__kmpc_shuffle_int64,
59 /// Call to __kmpc_nvptx_parallel_reduce_nowait_v2(ident_t *loc, kmp_int32
60 /// global_tid, kmp_int32 num_vars, size_t reduce_size, void* reduce_data,
61 /// void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t lane_id, int16_t
62 /// lane_offset, int16_t shortCircuit),
63 /// void (*kmp_InterWarpCopyFctPtr)(void* src, int32_t warp_num));
64 OMPRTL_NVPTX__kmpc_nvptx_parallel_reduce_nowait_v2,
65 /// Call to __kmpc_nvptx_teams_reduce_nowait_v2(ident_t *loc, kmp_int32
66 /// global_tid, void *global_buffer, int32_t num_of_records, void*
67 /// reduce_data,
68 /// void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t lane_id, int16_t
69 /// lane_offset, int16_t shortCircuit),
70 /// void (*kmp_InterWarpCopyFctPtr)(void* src, int32_t warp_num), void
71 /// (*kmp_ListToGlobalCpyFctPtr)(void *buffer, int idx, void *reduce_data),
72 /// void (*kmp_GlobalToListCpyFctPtr)(void *buffer, int idx,
73 /// void *reduce_data), void (*kmp_GlobalToListCpyPtrsFctPtr)(void *buffer,
74 /// int idx, void *reduce_data), void (*kmp_GlobalToListRedFctPtr)(void
75 /// *buffer, int idx, void *reduce_data));
76 OMPRTL_NVPTX__kmpc_nvptx_teams_reduce_nowait_v2,
77 /// Call to __kmpc_nvptx_end_reduce_nowait(int32_t global_tid);
78 OMPRTL_NVPTX__kmpc_end_reduce_nowait,
79 /// Call to void __kmpc_data_sharing_init_stack();
80 OMPRTL_NVPTX__kmpc_data_sharing_init_stack,
81 /// Call to void __kmpc_data_sharing_init_stack_spmd();
82 OMPRTL_NVPTX__kmpc_data_sharing_init_stack_spmd,
83 /// Call to void* __kmpc_data_sharing_coalesced_push_stack(size_t size,
84 /// int16_t UseSharedMemory);
85 OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack,
86 /// Call to void* __kmpc_data_sharing_push_stack(size_t size, int16_t
87 /// UseSharedMemory);
88 OMPRTL_NVPTX__kmpc_data_sharing_push_stack,
89 /// Call to void __kmpc_data_sharing_pop_stack(void *a);
90 OMPRTL_NVPTX__kmpc_data_sharing_pop_stack,
91 /// Call to void __kmpc_begin_sharing_variables(void ***args,
92 /// size_t n_args);
93 OMPRTL_NVPTX__kmpc_begin_sharing_variables,
94 /// Call to void __kmpc_end_sharing_variables();
95 OMPRTL_NVPTX__kmpc_end_sharing_variables,
96 /// Call to void __kmpc_get_shared_variables(void ***GlobalArgs)
97 OMPRTL_NVPTX__kmpc_get_shared_variables,
98 /// Call to uint16_t __kmpc_parallel_level(ident_t *loc, kmp_int32
99 /// global_tid);
100 OMPRTL_NVPTX__kmpc_parallel_level,
101 /// Call to int8_t __kmpc_is_spmd_exec_mode();
102 OMPRTL_NVPTX__kmpc_is_spmd_exec_mode,
103 /// Call to void __kmpc_get_team_static_memory(int16_t isSPMDExecutionMode,
104 /// const void *buf, size_t size, int16_t is_shared, const void **res);
105 OMPRTL_NVPTX__kmpc_get_team_static_memory,
106 /// Call to void __kmpc_restore_team_static_memory(int16_t
107 /// isSPMDExecutionMode, int16_t is_shared);
108 OMPRTL_NVPTX__kmpc_restore_team_static_memory,
109 /// Call to void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
110 OMPRTL__kmpc_barrier,
111 /// Call to void __kmpc_barrier_simple_spmd(ident_t *loc, kmp_int32
112 /// global_tid);
113 OMPRTL__kmpc_barrier_simple_spmd,
114 /// Call to int32_t __kmpc_warp_active_thread_mask(void);
115 OMPRTL_NVPTX__kmpc_warp_active_thread_mask,
116 /// Call to void __kmpc_syncwarp(int32_t Mask);
117 OMPRTL_NVPTX__kmpc_syncwarp,
118 };
119
120 /// Pre(post)-action for different OpenMP constructs specialized for NVPTX.
121 class NVPTXActionTy final : public PrePostActionTy {
122 llvm::FunctionCallee EnterCallee = nullptr;
123 ArrayRef<llvm::Value *> EnterArgs;
124 llvm::FunctionCallee ExitCallee = nullptr;
125 ArrayRef<llvm::Value *> ExitArgs;
126 bool Conditional = false;
127 llvm::BasicBlock *ContBlock = nullptr;
128
129 public:
NVPTXActionTy(llvm::FunctionCallee EnterCallee,ArrayRef<llvm::Value * > EnterArgs,llvm::FunctionCallee ExitCallee,ArrayRef<llvm::Value * > ExitArgs,bool Conditional=false)130 NVPTXActionTy(llvm::FunctionCallee EnterCallee,
131 ArrayRef<llvm::Value *> EnterArgs,
132 llvm::FunctionCallee ExitCallee,
133 ArrayRef<llvm::Value *> ExitArgs, bool Conditional = false)
134 : EnterCallee(EnterCallee), EnterArgs(EnterArgs), ExitCallee(ExitCallee),
135 ExitArgs(ExitArgs), Conditional(Conditional) {}
Enter(CodeGenFunction & CGF)136 void Enter(CodeGenFunction &CGF) override {
137 llvm::Value *EnterRes = CGF.EmitRuntimeCall(EnterCallee, EnterArgs);
138 if (Conditional) {
139 llvm::Value *CallBool = CGF.Builder.CreateIsNotNull(EnterRes);
140 auto *ThenBlock = CGF.createBasicBlock("omp_if.then");
141 ContBlock = CGF.createBasicBlock("omp_if.end");
142 // Generate the branch (If-stmt)
143 CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock);
144 CGF.EmitBlock(ThenBlock);
145 }
146 }
Done(CodeGenFunction & CGF)147 void Done(CodeGenFunction &CGF) {
148 // Emit the rest of blocks/branches
149 CGF.EmitBranch(ContBlock);
150 CGF.EmitBlock(ContBlock, true);
151 }
Exit(CodeGenFunction & CGF)152 void Exit(CodeGenFunction &CGF) override {
153 CGF.EmitRuntimeCall(ExitCallee, ExitArgs);
154 }
155 };
156
157 /// A class to track the execution mode when codegening directives within
158 /// a target region. The appropriate mode (SPMD|NON-SPMD) is set on entry
159 /// to the target region and used by containing directives such as 'parallel'
160 /// to emit optimized code.
161 class ExecutionRuntimeModesRAII {
162 private:
163 CGOpenMPRuntimeNVPTX::ExecutionMode SavedExecMode =
164 CGOpenMPRuntimeNVPTX::EM_Unknown;
165 CGOpenMPRuntimeNVPTX::ExecutionMode &ExecMode;
166 bool SavedRuntimeMode = false;
167 bool *RuntimeMode = nullptr;
168
169 public:
170 /// Constructor for Non-SPMD mode.
ExecutionRuntimeModesRAII(CGOpenMPRuntimeNVPTX::ExecutionMode & ExecMode)171 ExecutionRuntimeModesRAII(CGOpenMPRuntimeNVPTX::ExecutionMode &ExecMode)
172 : ExecMode(ExecMode) {
173 SavedExecMode = ExecMode;
174 ExecMode = CGOpenMPRuntimeNVPTX::EM_NonSPMD;
175 }
176 /// Constructor for SPMD mode.
ExecutionRuntimeModesRAII(CGOpenMPRuntimeNVPTX::ExecutionMode & ExecMode,bool & RuntimeMode,bool FullRuntimeMode)177 ExecutionRuntimeModesRAII(CGOpenMPRuntimeNVPTX::ExecutionMode &ExecMode,
178 bool &RuntimeMode, bool FullRuntimeMode)
179 : ExecMode(ExecMode), RuntimeMode(&RuntimeMode) {
180 SavedExecMode = ExecMode;
181 SavedRuntimeMode = RuntimeMode;
182 ExecMode = CGOpenMPRuntimeNVPTX::EM_SPMD;
183 RuntimeMode = FullRuntimeMode;
184 }
~ExecutionRuntimeModesRAII()185 ~ExecutionRuntimeModesRAII() {
186 ExecMode = SavedExecMode;
187 if (RuntimeMode)
188 *RuntimeMode = SavedRuntimeMode;
189 }
190 };
191
192 /// GPU Configuration: This information can be derived from cuda registers,
193 /// however, providing compile time constants helps generate more efficient
194 /// code. For all practical purposes this is fine because the configuration
195 /// is the same for all known NVPTX architectures.
196 enum MachineConfiguration : unsigned {
197 WarpSize = 32,
198 /// Number of bits required to represent a lane identifier, which is
199 /// computed as log_2(WarpSize).
200 LaneIDBits = 5,
201 LaneIDMask = WarpSize - 1,
202
203 /// Global memory alignment for performance.
204 GlobalMemoryAlignment = 128,
205
206 /// Maximal size of the shared memory buffer.
207 SharedMemorySize = 128,
208 };
209
getPrivateItem(const Expr * RefExpr)210 static const ValueDecl *getPrivateItem(const Expr *RefExpr) {
211 RefExpr = RefExpr->IgnoreParens();
212 if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(RefExpr)) {
213 const Expr *Base = ASE->getBase()->IgnoreParenImpCasts();
214 while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
215 Base = TempASE->getBase()->IgnoreParenImpCasts();
216 RefExpr = Base;
217 } else if (auto *OASE = dyn_cast<OMPArraySectionExpr>(RefExpr)) {
218 const Expr *Base = OASE->getBase()->IgnoreParenImpCasts();
219 while (const auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
220 Base = TempOASE->getBase()->IgnoreParenImpCasts();
221 while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
222 Base = TempASE->getBase()->IgnoreParenImpCasts();
223 RefExpr = Base;
224 }
225 RefExpr = RefExpr->IgnoreParenImpCasts();
226 if (const auto *DE = dyn_cast<DeclRefExpr>(RefExpr))
227 return cast<ValueDecl>(DE->getDecl()->getCanonicalDecl());
228 const auto *ME = cast<MemberExpr>(RefExpr);
229 return cast<ValueDecl>(ME->getMemberDecl()->getCanonicalDecl());
230 }
231
232
buildRecordForGlobalizedVars(ASTContext & C,ArrayRef<const ValueDecl * > EscapedDecls,ArrayRef<const ValueDecl * > EscapedDeclsForTeams,llvm::SmallDenseMap<const ValueDecl *,const FieldDecl * > & MappedDeclsFields,int BufSize)233 static RecordDecl *buildRecordForGlobalizedVars(
234 ASTContext &C, ArrayRef<const ValueDecl *> EscapedDecls,
235 ArrayRef<const ValueDecl *> EscapedDeclsForTeams,
236 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
237 &MappedDeclsFields, int BufSize) {
238 using VarsDataTy = std::pair<CharUnits /*Align*/, const ValueDecl *>;
239 if (EscapedDecls.empty() && EscapedDeclsForTeams.empty())
240 return nullptr;
241 SmallVector<VarsDataTy, 4> GlobalizedVars;
242 for (const ValueDecl *D : EscapedDecls)
243 GlobalizedVars.emplace_back(
244 CharUnits::fromQuantity(std::max(
245 C.getDeclAlign(D).getQuantity(),
246 static_cast<CharUnits::QuantityType>(GlobalMemoryAlignment))),
247 D);
248 for (const ValueDecl *D : EscapedDeclsForTeams)
249 GlobalizedVars.emplace_back(C.getDeclAlign(D), D);
250 llvm::stable_sort(GlobalizedVars, [](VarsDataTy L, VarsDataTy R) {
251 return L.first > R.first;
252 });
253
254 // Build struct _globalized_locals_ty {
255 // /* globalized vars */[WarSize] align (max(decl_align,
256 // GlobalMemoryAlignment))
257 // /* globalized vars */ for EscapedDeclsForTeams
258 // };
259 RecordDecl *GlobalizedRD = C.buildImplicitRecord("_globalized_locals_ty");
260 GlobalizedRD->startDefinition();
261 llvm::SmallPtrSet<const ValueDecl *, 16> SingleEscaped(
262 EscapedDeclsForTeams.begin(), EscapedDeclsForTeams.end());
263 for (const auto &Pair : GlobalizedVars) {
264 const ValueDecl *VD = Pair.second;
265 QualType Type = VD->getType();
266 if (Type->isLValueReferenceType())
267 Type = C.getPointerType(Type.getNonReferenceType());
268 else
269 Type = Type.getNonReferenceType();
270 SourceLocation Loc = VD->getLocation();
271 FieldDecl *Field;
272 if (SingleEscaped.count(VD)) {
273 Field = FieldDecl::Create(
274 C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type,
275 C.getTrivialTypeSourceInfo(Type, SourceLocation()),
276 /*BW=*/nullptr, /*Mutable=*/false,
277 /*InitStyle=*/ICIS_NoInit);
278 Field->setAccess(AS_public);
279 if (VD->hasAttrs()) {
280 for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()),
281 E(VD->getAttrs().end());
282 I != E; ++I)
283 Field->addAttr(*I);
284 }
285 } else {
286 llvm::APInt ArraySize(32, BufSize);
287 Type = C.getConstantArrayType(Type, ArraySize, nullptr, ArrayType::Normal,
288 0);
289 Field = FieldDecl::Create(
290 C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type,
291 C.getTrivialTypeSourceInfo(Type, SourceLocation()),
292 /*BW=*/nullptr, /*Mutable=*/false,
293 /*InitStyle=*/ICIS_NoInit);
294 Field->setAccess(AS_public);
295 llvm::APInt Align(32, std::max(C.getDeclAlign(VD).getQuantity(),
296 static_cast<CharUnits::QuantityType>(
297 GlobalMemoryAlignment)));
298 Field->addAttr(AlignedAttr::CreateImplicit(
299 C, /*IsAlignmentExpr=*/true,
300 IntegerLiteral::Create(C, Align,
301 C.getIntTypeForBitwidth(32, /*Signed=*/0),
302 SourceLocation()),
303 {}, AttributeCommonInfo::AS_GNU, AlignedAttr::GNU_aligned));
304 }
305 GlobalizedRD->addDecl(Field);
306 MappedDeclsFields.try_emplace(VD, Field);
307 }
308 GlobalizedRD->completeDefinition();
309 return GlobalizedRD;
310 }
311
312 /// Get the list of variables that can escape their declaration context.
313 class CheckVarsEscapingDeclContext final
314 : public ConstStmtVisitor<CheckVarsEscapingDeclContext> {
315 CodeGenFunction &CGF;
316 llvm::SetVector<const ValueDecl *> EscapedDecls;
317 llvm::SetVector<const ValueDecl *> EscapedVariableLengthDecls;
318 llvm::SmallPtrSet<const Decl *, 4> EscapedParameters;
319 RecordDecl *GlobalizedRD = nullptr;
320 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields;
321 bool AllEscaped = false;
322 bool IsForCombinedParallelRegion = false;
323
markAsEscaped(const ValueDecl * VD)324 void markAsEscaped(const ValueDecl *VD) {
325 // Do not globalize declare target variables.
326 if (!isa<VarDecl>(VD) ||
327 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
328 return;
329 VD = cast<ValueDecl>(VD->getCanonicalDecl());
330 // Use user-specified allocation.
331 if (VD->hasAttrs() && VD->hasAttr<OMPAllocateDeclAttr>())
332 return;
333 // Variables captured by value must be globalized.
334 if (auto *CSI = CGF.CapturedStmtInfo) {
335 if (const FieldDecl *FD = CSI->lookup(cast<VarDecl>(VD))) {
336 // Check if need to capture the variable that was already captured by
337 // value in the outer region.
338 if (!IsForCombinedParallelRegion) {
339 if (!FD->hasAttrs())
340 return;
341 const auto *Attr = FD->getAttr<OMPCaptureKindAttr>();
342 if (!Attr)
343 return;
344 if (((Attr->getCaptureKind() != OMPC_map) &&
345 !isOpenMPPrivate(Attr->getCaptureKind())) ||
346 ((Attr->getCaptureKind() == OMPC_map) &&
347 !FD->getType()->isAnyPointerType()))
348 return;
349 }
350 if (!FD->getType()->isReferenceType()) {
351 assert(!VD->getType()->isVariablyModifiedType() &&
352 "Parameter captured by value with variably modified type");
353 EscapedParameters.insert(VD);
354 } else if (!IsForCombinedParallelRegion) {
355 return;
356 }
357 }
358 }
359 if ((!CGF.CapturedStmtInfo ||
360 (IsForCombinedParallelRegion && CGF.CapturedStmtInfo)) &&
361 VD->getType()->isReferenceType())
362 // Do not globalize variables with reference type.
363 return;
364 if (VD->getType()->isVariablyModifiedType())
365 EscapedVariableLengthDecls.insert(VD);
366 else
367 EscapedDecls.insert(VD);
368 }
369
VisitValueDecl(const ValueDecl * VD)370 void VisitValueDecl(const ValueDecl *VD) {
371 if (VD->getType()->isLValueReferenceType())
372 markAsEscaped(VD);
373 if (const auto *VarD = dyn_cast<VarDecl>(VD)) {
374 if (!isa<ParmVarDecl>(VarD) && VarD->hasInit()) {
375 const bool SavedAllEscaped = AllEscaped;
376 AllEscaped = VD->getType()->isLValueReferenceType();
377 Visit(VarD->getInit());
378 AllEscaped = SavedAllEscaped;
379 }
380 }
381 }
VisitOpenMPCapturedStmt(const CapturedStmt * S,ArrayRef<OMPClause * > Clauses,bool IsCombinedParallelRegion)382 void VisitOpenMPCapturedStmt(const CapturedStmt *S,
383 ArrayRef<OMPClause *> Clauses,
384 bool IsCombinedParallelRegion) {
385 if (!S)
386 return;
387 for (const CapturedStmt::Capture &C : S->captures()) {
388 if (C.capturesVariable() && !C.capturesVariableByCopy()) {
389 const ValueDecl *VD = C.getCapturedVar();
390 bool SavedIsForCombinedParallelRegion = IsForCombinedParallelRegion;
391 if (IsCombinedParallelRegion) {
392 // Check if the variable is privatized in the combined construct and
393 // those private copies must be shared in the inner parallel
394 // directive.
395 IsForCombinedParallelRegion = false;
396 for (const OMPClause *C : Clauses) {
397 if (!isOpenMPPrivate(C->getClauseKind()) ||
398 C->getClauseKind() == OMPC_reduction ||
399 C->getClauseKind() == OMPC_linear ||
400 C->getClauseKind() == OMPC_private)
401 continue;
402 ArrayRef<const Expr *> Vars;
403 if (const auto *PC = dyn_cast<OMPFirstprivateClause>(C))
404 Vars = PC->getVarRefs();
405 else if (const auto *PC = dyn_cast<OMPLastprivateClause>(C))
406 Vars = PC->getVarRefs();
407 else
408 llvm_unreachable("Unexpected clause.");
409 for (const auto *E : Vars) {
410 const Decl *D =
411 cast<DeclRefExpr>(E)->getDecl()->getCanonicalDecl();
412 if (D == VD->getCanonicalDecl()) {
413 IsForCombinedParallelRegion = true;
414 break;
415 }
416 }
417 if (IsForCombinedParallelRegion)
418 break;
419 }
420 }
421 markAsEscaped(VD);
422 if (isa<OMPCapturedExprDecl>(VD))
423 VisitValueDecl(VD);
424 IsForCombinedParallelRegion = SavedIsForCombinedParallelRegion;
425 }
426 }
427 }
428
buildRecordForGlobalizedVars(bool IsInTTDRegion)429 void buildRecordForGlobalizedVars(bool IsInTTDRegion) {
430 assert(!GlobalizedRD &&
431 "Record for globalized variables is built already.");
432 ArrayRef<const ValueDecl *> EscapedDeclsForParallel, EscapedDeclsForTeams;
433 if (IsInTTDRegion)
434 EscapedDeclsForTeams = EscapedDecls.getArrayRef();
435 else
436 EscapedDeclsForParallel = EscapedDecls.getArrayRef();
437 GlobalizedRD = ::buildRecordForGlobalizedVars(
438 CGF.getContext(), EscapedDeclsForParallel, EscapedDeclsForTeams,
439 MappedDeclsFields, WarpSize);
440 }
441
442 public:
CheckVarsEscapingDeclContext(CodeGenFunction & CGF,ArrayRef<const ValueDecl * > TeamsReductions)443 CheckVarsEscapingDeclContext(CodeGenFunction &CGF,
444 ArrayRef<const ValueDecl *> TeamsReductions)
445 : CGF(CGF), EscapedDecls(TeamsReductions.begin(), TeamsReductions.end()) {
446 }
447 virtual ~CheckVarsEscapingDeclContext() = default;
VisitDeclStmt(const DeclStmt * S)448 void VisitDeclStmt(const DeclStmt *S) {
449 if (!S)
450 return;
451 for (const Decl *D : S->decls())
452 if (const auto *VD = dyn_cast_or_null<ValueDecl>(D))
453 VisitValueDecl(VD);
454 }
VisitOMPExecutableDirective(const OMPExecutableDirective * D)455 void VisitOMPExecutableDirective(const OMPExecutableDirective *D) {
456 if (!D)
457 return;
458 if (!D->hasAssociatedStmt())
459 return;
460 if (const auto *S =
461 dyn_cast_or_null<CapturedStmt>(D->getAssociatedStmt())) {
462 // Do not analyze directives that do not actually require capturing,
463 // like `omp for` or `omp simd` directives.
464 llvm::SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
465 getOpenMPCaptureRegions(CaptureRegions, D->getDirectiveKind());
466 if (CaptureRegions.size() == 1 && CaptureRegions.back() == OMPD_unknown) {
467 VisitStmt(S->getCapturedStmt());
468 return;
469 }
470 VisitOpenMPCapturedStmt(
471 S, D->clauses(),
472 CaptureRegions.back() == OMPD_parallel &&
473 isOpenMPDistributeDirective(D->getDirectiveKind()));
474 }
475 }
VisitCapturedStmt(const CapturedStmt * S)476 void VisitCapturedStmt(const CapturedStmt *S) {
477 if (!S)
478 return;
479 for (const CapturedStmt::Capture &C : S->captures()) {
480 if (C.capturesVariable() && !C.capturesVariableByCopy()) {
481 const ValueDecl *VD = C.getCapturedVar();
482 markAsEscaped(VD);
483 if (isa<OMPCapturedExprDecl>(VD))
484 VisitValueDecl(VD);
485 }
486 }
487 }
VisitLambdaExpr(const LambdaExpr * E)488 void VisitLambdaExpr(const LambdaExpr *E) {
489 if (!E)
490 return;
491 for (const LambdaCapture &C : E->captures()) {
492 if (C.capturesVariable()) {
493 if (C.getCaptureKind() == LCK_ByRef) {
494 const ValueDecl *VD = C.getCapturedVar();
495 markAsEscaped(VD);
496 if (E->isInitCapture(&C) || isa<OMPCapturedExprDecl>(VD))
497 VisitValueDecl(VD);
498 }
499 }
500 }
501 }
VisitBlockExpr(const BlockExpr * E)502 void VisitBlockExpr(const BlockExpr *E) {
503 if (!E)
504 return;
505 for (const BlockDecl::Capture &C : E->getBlockDecl()->captures()) {
506 if (C.isByRef()) {
507 const VarDecl *VD = C.getVariable();
508 markAsEscaped(VD);
509 if (isa<OMPCapturedExprDecl>(VD) || VD->isInitCapture())
510 VisitValueDecl(VD);
511 }
512 }
513 }
VisitCallExpr(const CallExpr * E)514 void VisitCallExpr(const CallExpr *E) {
515 if (!E)
516 return;
517 for (const Expr *Arg : E->arguments()) {
518 if (!Arg)
519 continue;
520 if (Arg->isLValue()) {
521 const bool SavedAllEscaped = AllEscaped;
522 AllEscaped = true;
523 Visit(Arg);
524 AllEscaped = SavedAllEscaped;
525 } else {
526 Visit(Arg);
527 }
528 }
529 Visit(E->getCallee());
530 }
VisitDeclRefExpr(const DeclRefExpr * E)531 void VisitDeclRefExpr(const DeclRefExpr *E) {
532 if (!E)
533 return;
534 const ValueDecl *VD = E->getDecl();
535 if (AllEscaped)
536 markAsEscaped(VD);
537 if (isa<OMPCapturedExprDecl>(VD))
538 VisitValueDecl(VD);
539 else if (const auto *VarD = dyn_cast<VarDecl>(VD))
540 if (VarD->isInitCapture())
541 VisitValueDecl(VD);
542 }
VisitUnaryOperator(const UnaryOperator * E)543 void VisitUnaryOperator(const UnaryOperator *E) {
544 if (!E)
545 return;
546 if (E->getOpcode() == UO_AddrOf) {
547 const bool SavedAllEscaped = AllEscaped;
548 AllEscaped = true;
549 Visit(E->getSubExpr());
550 AllEscaped = SavedAllEscaped;
551 } else {
552 Visit(E->getSubExpr());
553 }
554 }
VisitImplicitCastExpr(const ImplicitCastExpr * E)555 void VisitImplicitCastExpr(const ImplicitCastExpr *E) {
556 if (!E)
557 return;
558 if (E->getCastKind() == CK_ArrayToPointerDecay) {
559 const bool SavedAllEscaped = AllEscaped;
560 AllEscaped = true;
561 Visit(E->getSubExpr());
562 AllEscaped = SavedAllEscaped;
563 } else {
564 Visit(E->getSubExpr());
565 }
566 }
VisitExpr(const Expr * E)567 void VisitExpr(const Expr *E) {
568 if (!E)
569 return;
570 bool SavedAllEscaped = AllEscaped;
571 if (!E->isLValue())
572 AllEscaped = false;
573 for (const Stmt *Child : E->children())
574 if (Child)
575 Visit(Child);
576 AllEscaped = SavedAllEscaped;
577 }
VisitStmt(const Stmt * S)578 void VisitStmt(const Stmt *S) {
579 if (!S)
580 return;
581 for (const Stmt *Child : S->children())
582 if (Child)
583 Visit(Child);
584 }
585
586 /// Returns the record that handles all the escaped local variables and used
587 /// instead of their original storage.
getGlobalizedRecord(bool IsInTTDRegion)588 const RecordDecl *getGlobalizedRecord(bool IsInTTDRegion) {
589 if (!GlobalizedRD)
590 buildRecordForGlobalizedVars(IsInTTDRegion);
591 return GlobalizedRD;
592 }
593
594 /// Returns the field in the globalized record for the escaped variable.
getFieldForGlobalizedVar(const ValueDecl * VD) const595 const FieldDecl *getFieldForGlobalizedVar(const ValueDecl *VD) const {
596 assert(GlobalizedRD &&
597 "Record for globalized variables must be generated already.");
598 auto I = MappedDeclsFields.find(VD);
599 if (I == MappedDeclsFields.end())
600 return nullptr;
601 return I->getSecond();
602 }
603
604 /// Returns the list of the escaped local variables/parameters.
getEscapedDecls() const605 ArrayRef<const ValueDecl *> getEscapedDecls() const {
606 return EscapedDecls.getArrayRef();
607 }
608
609 /// Checks if the escaped local variable is actually a parameter passed by
610 /// value.
getEscapedParameters() const611 const llvm::SmallPtrSetImpl<const Decl *> &getEscapedParameters() const {
612 return EscapedParameters;
613 }
614
615 /// Returns the list of the escaped variables with the variably modified
616 /// types.
getEscapedVariableLengthDecls() const617 ArrayRef<const ValueDecl *> getEscapedVariableLengthDecls() const {
618 return EscapedVariableLengthDecls.getArrayRef();
619 }
620 };
621 } // anonymous namespace
622
623 /// Get the GPU warp size.
getNVPTXWarpSize(CodeGenFunction & CGF)624 static llvm::Value *getNVPTXWarpSize(CodeGenFunction &CGF) {
625 return CGF.EmitRuntimeCall(
626 llvm::Intrinsic::getDeclaration(
627 &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_read_ptx_sreg_warpsize),
628 "nvptx_warp_size");
629 }
630
631 /// Get the id of the current thread on the GPU.
getNVPTXThreadID(CodeGenFunction & CGF)632 static llvm::Value *getNVPTXThreadID(CodeGenFunction &CGF) {
633 return CGF.EmitRuntimeCall(
634 llvm::Intrinsic::getDeclaration(
635 &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_read_ptx_sreg_tid_x),
636 "nvptx_tid");
637 }
638
639 /// Get the id of the warp in the block.
640 /// We assume that the warp size is 32, which is always the case
641 /// on the NVPTX device, to generate more efficient code.
getNVPTXWarpID(CodeGenFunction & CGF)642 static llvm::Value *getNVPTXWarpID(CodeGenFunction &CGF) {
643 CGBuilderTy &Bld = CGF.Builder;
644 return Bld.CreateAShr(getNVPTXThreadID(CGF), LaneIDBits, "nvptx_warp_id");
645 }
646
647 /// Get the id of the current lane in the Warp.
648 /// We assume that the warp size is 32, which is always the case
649 /// on the NVPTX device, to generate more efficient code.
getNVPTXLaneID(CodeGenFunction & CGF)650 static llvm::Value *getNVPTXLaneID(CodeGenFunction &CGF) {
651 CGBuilderTy &Bld = CGF.Builder;
652 return Bld.CreateAnd(getNVPTXThreadID(CGF), Bld.getInt32(LaneIDMask),
653 "nvptx_lane_id");
654 }
655
656 /// Get the maximum number of threads in a block of the GPU.
getNVPTXNumThreads(CodeGenFunction & CGF)657 static llvm::Value *getNVPTXNumThreads(CodeGenFunction &CGF) {
658 return CGF.EmitRuntimeCall(
659 llvm::Intrinsic::getDeclaration(
660 &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_read_ptx_sreg_ntid_x),
661 "nvptx_num_threads");
662 }
663
664 /// Get the value of the thread_limit clause in the teams directive.
665 /// For the 'generic' execution mode, the runtime encodes thread_limit in
666 /// the launch parameters, always starting thread_limit+warpSize threads per
667 /// CTA. The threads in the last warp are reserved for master execution.
668 /// For the 'spmd' execution mode, all threads in a CTA are part of the team.
getThreadLimit(CodeGenFunction & CGF,bool IsInSPMDExecutionMode=false)669 static llvm::Value *getThreadLimit(CodeGenFunction &CGF,
670 bool IsInSPMDExecutionMode = false) {
671 CGBuilderTy &Bld = CGF.Builder;
672 return IsInSPMDExecutionMode
673 ? getNVPTXNumThreads(CGF)
674 : Bld.CreateNUWSub(getNVPTXNumThreads(CGF), getNVPTXWarpSize(CGF),
675 "thread_limit");
676 }
677
678 /// Get the thread id of the OMP master thread.
679 /// The master thread id is the first thread (lane) of the last warp in the
680 /// GPU block. Warp size is assumed to be some power of 2.
681 /// Thread id is 0 indexed.
682 /// E.g: If NumThreads is 33, master id is 32.
683 /// If NumThreads is 64, master id is 32.
684 /// If NumThreads is 1024, master id is 992.
getMasterThreadID(CodeGenFunction & CGF)685 static llvm::Value *getMasterThreadID(CodeGenFunction &CGF) {
686 CGBuilderTy &Bld = CGF.Builder;
687 llvm::Value *NumThreads = getNVPTXNumThreads(CGF);
688
689 // We assume that the warp size is a power of 2.
690 llvm::Value *Mask = Bld.CreateNUWSub(getNVPTXWarpSize(CGF), Bld.getInt32(1));
691
692 return Bld.CreateAnd(Bld.CreateNUWSub(NumThreads, Bld.getInt32(1)),
693 Bld.CreateNot(Mask), "master_tid");
694 }
695
WorkerFunctionState(CodeGenModule & CGM,SourceLocation Loc)696 CGOpenMPRuntimeNVPTX::WorkerFunctionState::WorkerFunctionState(
697 CodeGenModule &CGM, SourceLocation Loc)
698 : WorkerFn(nullptr), CGFI(CGM.getTypes().arrangeNullaryFunction()),
699 Loc(Loc) {
700 createWorkerFunction(CGM);
701 }
702
createWorkerFunction(CodeGenModule & CGM)703 void CGOpenMPRuntimeNVPTX::WorkerFunctionState::createWorkerFunction(
704 CodeGenModule &CGM) {
705 // Create an worker function with no arguments.
706
707 WorkerFn = llvm::Function::Create(
708 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
709 /*placeholder=*/"_worker", &CGM.getModule());
710 CGM.SetInternalFunctionAttributes(GlobalDecl(), WorkerFn, CGFI);
711 WorkerFn->setDoesNotRecurse();
712 }
713
714 CGOpenMPRuntimeNVPTX::ExecutionMode
getExecutionMode() const715 CGOpenMPRuntimeNVPTX::getExecutionMode() const {
716 return CurrentExecutionMode;
717 }
718
719 static CGOpenMPRuntimeNVPTX::DataSharingMode
getDataSharingMode(CodeGenModule & CGM)720 getDataSharingMode(CodeGenModule &CGM) {
721 return CGM.getLangOpts().OpenMPCUDAMode ? CGOpenMPRuntimeNVPTX::CUDA
722 : CGOpenMPRuntimeNVPTX::Generic;
723 }
724
725 /// Check for inner (nested) SPMD construct, if any
hasNestedSPMDDirective(ASTContext & Ctx,const OMPExecutableDirective & D)726 static bool hasNestedSPMDDirective(ASTContext &Ctx,
727 const OMPExecutableDirective &D) {
728 const auto *CS = D.getInnermostCapturedStmt();
729 const auto *Body =
730 CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
731 const Stmt *ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
732
733 if (const auto *NestedDir =
734 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
735 OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
736 switch (D.getDirectiveKind()) {
737 case OMPD_target:
738 if (isOpenMPParallelDirective(DKind))
739 return true;
740 if (DKind == OMPD_teams) {
741 Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
742 /*IgnoreCaptured=*/true);
743 if (!Body)
744 return false;
745 ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
746 if (const auto *NND =
747 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
748 DKind = NND->getDirectiveKind();
749 if (isOpenMPParallelDirective(DKind))
750 return true;
751 }
752 }
753 return false;
754 case OMPD_target_teams:
755 return isOpenMPParallelDirective(DKind);
756 case OMPD_target_simd:
757 case OMPD_target_parallel:
758 case OMPD_target_parallel_for:
759 case OMPD_target_parallel_for_simd:
760 case OMPD_target_teams_distribute:
761 case OMPD_target_teams_distribute_simd:
762 case OMPD_target_teams_distribute_parallel_for:
763 case OMPD_target_teams_distribute_parallel_for_simd:
764 case OMPD_parallel:
765 case OMPD_for:
766 case OMPD_parallel_for:
767 case OMPD_parallel_master:
768 case OMPD_parallel_sections:
769 case OMPD_for_simd:
770 case OMPD_parallel_for_simd:
771 case OMPD_cancel:
772 case OMPD_cancellation_point:
773 case OMPD_ordered:
774 case OMPD_threadprivate:
775 case OMPD_allocate:
776 case OMPD_task:
777 case OMPD_simd:
778 case OMPD_sections:
779 case OMPD_section:
780 case OMPD_single:
781 case OMPD_master:
782 case OMPD_critical:
783 case OMPD_taskyield:
784 case OMPD_barrier:
785 case OMPD_taskwait:
786 case OMPD_taskgroup:
787 case OMPD_atomic:
788 case OMPD_flush:
789 case OMPD_depobj:
790 case OMPD_scan:
791 case OMPD_teams:
792 case OMPD_target_data:
793 case OMPD_target_exit_data:
794 case OMPD_target_enter_data:
795 case OMPD_distribute:
796 case OMPD_distribute_simd:
797 case OMPD_distribute_parallel_for:
798 case OMPD_distribute_parallel_for_simd:
799 case OMPD_teams_distribute:
800 case OMPD_teams_distribute_simd:
801 case OMPD_teams_distribute_parallel_for:
802 case OMPD_teams_distribute_parallel_for_simd:
803 case OMPD_target_update:
804 case OMPD_declare_simd:
805 case OMPD_declare_variant:
806 case OMPD_begin_declare_variant:
807 case OMPD_end_declare_variant:
808 case OMPD_declare_target:
809 case OMPD_end_declare_target:
810 case OMPD_declare_reduction:
811 case OMPD_declare_mapper:
812 case OMPD_taskloop:
813 case OMPD_taskloop_simd:
814 case OMPD_master_taskloop:
815 case OMPD_master_taskloop_simd:
816 case OMPD_parallel_master_taskloop:
817 case OMPD_parallel_master_taskloop_simd:
818 case OMPD_requires:
819 case OMPD_unknown:
820 default:
821 llvm_unreachable("Unexpected directive.");
822 }
823 }
824
825 return false;
826 }
827
supportsSPMDExecutionMode(ASTContext & Ctx,const OMPExecutableDirective & D)828 static bool supportsSPMDExecutionMode(ASTContext &Ctx,
829 const OMPExecutableDirective &D) {
830 OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
831 switch (DirectiveKind) {
832 case OMPD_target:
833 case OMPD_target_teams:
834 return hasNestedSPMDDirective(Ctx, D);
835 case OMPD_target_parallel:
836 case OMPD_target_parallel_for:
837 case OMPD_target_parallel_for_simd:
838 case OMPD_target_teams_distribute_parallel_for:
839 case OMPD_target_teams_distribute_parallel_for_simd:
840 case OMPD_target_simd:
841 case OMPD_target_teams_distribute_simd:
842 return true;
843 case OMPD_target_teams_distribute:
844 return false;
845 case OMPD_parallel:
846 case OMPD_for:
847 case OMPD_parallel_for:
848 case OMPD_parallel_master:
849 case OMPD_parallel_sections:
850 case OMPD_for_simd:
851 case OMPD_parallel_for_simd:
852 case OMPD_cancel:
853 case OMPD_cancellation_point:
854 case OMPD_ordered:
855 case OMPD_threadprivate:
856 case OMPD_allocate:
857 case OMPD_task:
858 case OMPD_simd:
859 case OMPD_sections:
860 case OMPD_section:
861 case OMPD_single:
862 case OMPD_master:
863 case OMPD_critical:
864 case OMPD_taskyield:
865 case OMPD_barrier:
866 case OMPD_taskwait:
867 case OMPD_taskgroup:
868 case OMPD_atomic:
869 case OMPD_flush:
870 case OMPD_depobj:
871 case OMPD_scan:
872 case OMPD_teams:
873 case OMPD_target_data:
874 case OMPD_target_exit_data:
875 case OMPD_target_enter_data:
876 case OMPD_distribute:
877 case OMPD_distribute_simd:
878 case OMPD_distribute_parallel_for:
879 case OMPD_distribute_parallel_for_simd:
880 case OMPD_teams_distribute:
881 case OMPD_teams_distribute_simd:
882 case OMPD_teams_distribute_parallel_for:
883 case OMPD_teams_distribute_parallel_for_simd:
884 case OMPD_target_update:
885 case OMPD_declare_simd:
886 case OMPD_declare_variant:
887 case OMPD_begin_declare_variant:
888 case OMPD_end_declare_variant:
889 case OMPD_declare_target:
890 case OMPD_end_declare_target:
891 case OMPD_declare_reduction:
892 case OMPD_declare_mapper:
893 case OMPD_taskloop:
894 case OMPD_taskloop_simd:
895 case OMPD_master_taskloop:
896 case OMPD_master_taskloop_simd:
897 case OMPD_parallel_master_taskloop:
898 case OMPD_parallel_master_taskloop_simd:
899 case OMPD_requires:
900 case OMPD_unknown:
901 default:
902 break;
903 }
904 llvm_unreachable(
905 "Unknown programming model for OpenMP directive on NVPTX target.");
906 }
907
908 /// Check if the directive is loops based and has schedule clause at all or has
909 /// static scheduling.
hasStaticScheduling(const OMPExecutableDirective & D)910 static bool hasStaticScheduling(const OMPExecutableDirective &D) {
911 assert(isOpenMPWorksharingDirective(D.getDirectiveKind()) &&
912 isOpenMPLoopDirective(D.getDirectiveKind()) &&
913 "Expected loop-based directive.");
914 return !D.hasClausesOfKind<OMPOrderedClause>() &&
915 (!D.hasClausesOfKind<OMPScheduleClause>() ||
916 llvm::any_of(D.getClausesOfKind<OMPScheduleClause>(),
917 [](const OMPScheduleClause *C) {
918 return C->getScheduleKind() == OMPC_SCHEDULE_static;
919 }));
920 }
921
922 /// Check for inner (nested) lightweight runtime construct, if any
hasNestedLightweightDirective(ASTContext & Ctx,const OMPExecutableDirective & D)923 static bool hasNestedLightweightDirective(ASTContext &Ctx,
924 const OMPExecutableDirective &D) {
925 assert(supportsSPMDExecutionMode(Ctx, D) && "Expected SPMD mode directive.");
926 const auto *CS = D.getInnermostCapturedStmt();
927 const auto *Body =
928 CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
929 const Stmt *ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
930
931 if (const auto *NestedDir =
932 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
933 OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
934 switch (D.getDirectiveKind()) {
935 case OMPD_target:
936 if (isOpenMPParallelDirective(DKind) &&
937 isOpenMPWorksharingDirective(DKind) && isOpenMPLoopDirective(DKind) &&
938 hasStaticScheduling(*NestedDir))
939 return true;
940 if (DKind == OMPD_teams_distribute_simd || DKind == OMPD_simd)
941 return true;
942 if (DKind == OMPD_parallel) {
943 Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
944 /*IgnoreCaptured=*/true);
945 if (!Body)
946 return false;
947 ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
948 if (const auto *NND =
949 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
950 DKind = NND->getDirectiveKind();
951 if (isOpenMPWorksharingDirective(DKind) &&
952 isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
953 return true;
954 }
955 } else if (DKind == OMPD_teams) {
956 Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
957 /*IgnoreCaptured=*/true);
958 if (!Body)
959 return false;
960 ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
961 if (const auto *NND =
962 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
963 DKind = NND->getDirectiveKind();
964 if (isOpenMPParallelDirective(DKind) &&
965 isOpenMPWorksharingDirective(DKind) &&
966 isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
967 return true;
968 if (DKind == OMPD_parallel) {
969 Body = NND->getInnermostCapturedStmt()->IgnoreContainers(
970 /*IgnoreCaptured=*/true);
971 if (!Body)
972 return false;
973 ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
974 if (const auto *NND =
975 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
976 DKind = NND->getDirectiveKind();
977 if (isOpenMPWorksharingDirective(DKind) &&
978 isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
979 return true;
980 }
981 }
982 }
983 }
984 return false;
985 case OMPD_target_teams:
986 if (isOpenMPParallelDirective(DKind) &&
987 isOpenMPWorksharingDirective(DKind) && isOpenMPLoopDirective(DKind) &&
988 hasStaticScheduling(*NestedDir))
989 return true;
990 if (DKind == OMPD_distribute_simd || DKind == OMPD_simd)
991 return true;
992 if (DKind == OMPD_parallel) {
993 Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
994 /*IgnoreCaptured=*/true);
995 if (!Body)
996 return false;
997 ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
998 if (const auto *NND =
999 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
1000 DKind = NND->getDirectiveKind();
1001 if (isOpenMPWorksharingDirective(DKind) &&
1002 isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
1003 return true;
1004 }
1005 }
1006 return false;
1007 case OMPD_target_parallel:
1008 if (DKind == OMPD_simd)
1009 return true;
1010 return isOpenMPWorksharingDirective(DKind) &&
1011 isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NestedDir);
1012 case OMPD_target_teams_distribute:
1013 case OMPD_target_simd:
1014 case OMPD_target_parallel_for:
1015 case OMPD_target_parallel_for_simd:
1016 case OMPD_target_teams_distribute_simd:
1017 case OMPD_target_teams_distribute_parallel_for:
1018 case OMPD_target_teams_distribute_parallel_for_simd:
1019 case OMPD_parallel:
1020 case OMPD_for:
1021 case OMPD_parallel_for:
1022 case OMPD_parallel_master:
1023 case OMPD_parallel_sections:
1024 case OMPD_for_simd:
1025 case OMPD_parallel_for_simd:
1026 case OMPD_cancel:
1027 case OMPD_cancellation_point:
1028 case OMPD_ordered:
1029 case OMPD_threadprivate:
1030 case OMPD_allocate:
1031 case OMPD_task:
1032 case OMPD_simd:
1033 case OMPD_sections:
1034 case OMPD_section:
1035 case OMPD_single:
1036 case OMPD_master:
1037 case OMPD_critical:
1038 case OMPD_taskyield:
1039 case OMPD_barrier:
1040 case OMPD_taskwait:
1041 case OMPD_taskgroup:
1042 case OMPD_atomic:
1043 case OMPD_flush:
1044 case OMPD_depobj:
1045 case OMPD_scan:
1046 case OMPD_teams:
1047 case OMPD_target_data:
1048 case OMPD_target_exit_data:
1049 case OMPD_target_enter_data:
1050 case OMPD_distribute:
1051 case OMPD_distribute_simd:
1052 case OMPD_distribute_parallel_for:
1053 case OMPD_distribute_parallel_for_simd:
1054 case OMPD_teams_distribute:
1055 case OMPD_teams_distribute_simd:
1056 case OMPD_teams_distribute_parallel_for:
1057 case OMPD_teams_distribute_parallel_for_simd:
1058 case OMPD_target_update:
1059 case OMPD_declare_simd:
1060 case OMPD_declare_variant:
1061 case OMPD_begin_declare_variant:
1062 case OMPD_end_declare_variant:
1063 case OMPD_declare_target:
1064 case OMPD_end_declare_target:
1065 case OMPD_declare_reduction:
1066 case OMPD_declare_mapper:
1067 case OMPD_taskloop:
1068 case OMPD_taskloop_simd:
1069 case OMPD_master_taskloop:
1070 case OMPD_master_taskloop_simd:
1071 case OMPD_parallel_master_taskloop:
1072 case OMPD_parallel_master_taskloop_simd:
1073 case OMPD_requires:
1074 case OMPD_unknown:
1075 default:
1076 llvm_unreachable("Unexpected directive.");
1077 }
1078 }
1079
1080 return false;
1081 }
1082
1083 /// Checks if the construct supports lightweight runtime. It must be SPMD
1084 /// construct + inner loop-based construct with static scheduling.
supportsLightweightRuntime(ASTContext & Ctx,const OMPExecutableDirective & D)1085 static bool supportsLightweightRuntime(ASTContext &Ctx,
1086 const OMPExecutableDirective &D) {
1087 if (!supportsSPMDExecutionMode(Ctx, D))
1088 return false;
1089 OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
1090 switch (DirectiveKind) {
1091 case OMPD_target:
1092 case OMPD_target_teams:
1093 case OMPD_target_parallel:
1094 return hasNestedLightweightDirective(Ctx, D);
1095 case OMPD_target_parallel_for:
1096 case OMPD_target_parallel_for_simd:
1097 case OMPD_target_teams_distribute_parallel_for:
1098 case OMPD_target_teams_distribute_parallel_for_simd:
1099 // (Last|First)-privates must be shared in parallel region.
1100 return hasStaticScheduling(D);
1101 case OMPD_target_simd:
1102 case OMPD_target_teams_distribute_simd:
1103 return true;
1104 case OMPD_target_teams_distribute:
1105 return false;
1106 case OMPD_parallel:
1107 case OMPD_for:
1108 case OMPD_parallel_for:
1109 case OMPD_parallel_master:
1110 case OMPD_parallel_sections:
1111 case OMPD_for_simd:
1112 case OMPD_parallel_for_simd:
1113 case OMPD_cancel:
1114 case OMPD_cancellation_point:
1115 case OMPD_ordered:
1116 case OMPD_threadprivate:
1117 case OMPD_allocate:
1118 case OMPD_task:
1119 case OMPD_simd:
1120 case OMPD_sections:
1121 case OMPD_section:
1122 case OMPD_single:
1123 case OMPD_master:
1124 case OMPD_critical:
1125 case OMPD_taskyield:
1126 case OMPD_barrier:
1127 case OMPD_taskwait:
1128 case OMPD_taskgroup:
1129 case OMPD_atomic:
1130 case OMPD_flush:
1131 case OMPD_depobj:
1132 case OMPD_scan:
1133 case OMPD_teams:
1134 case OMPD_target_data:
1135 case OMPD_target_exit_data:
1136 case OMPD_target_enter_data:
1137 case OMPD_distribute:
1138 case OMPD_distribute_simd:
1139 case OMPD_distribute_parallel_for:
1140 case OMPD_distribute_parallel_for_simd:
1141 case OMPD_teams_distribute:
1142 case OMPD_teams_distribute_simd:
1143 case OMPD_teams_distribute_parallel_for:
1144 case OMPD_teams_distribute_parallel_for_simd:
1145 case OMPD_target_update:
1146 case OMPD_declare_simd:
1147 case OMPD_declare_variant:
1148 case OMPD_begin_declare_variant:
1149 case OMPD_end_declare_variant:
1150 case OMPD_declare_target:
1151 case OMPD_end_declare_target:
1152 case OMPD_declare_reduction:
1153 case OMPD_declare_mapper:
1154 case OMPD_taskloop:
1155 case OMPD_taskloop_simd:
1156 case OMPD_master_taskloop:
1157 case OMPD_master_taskloop_simd:
1158 case OMPD_parallel_master_taskloop:
1159 case OMPD_parallel_master_taskloop_simd:
1160 case OMPD_requires:
1161 case OMPD_unknown:
1162 default:
1163 break;
1164 }
1165 llvm_unreachable(
1166 "Unknown programming model for OpenMP directive on NVPTX target.");
1167 }
1168
emitNonSPMDKernel(const OMPExecutableDirective & D,StringRef ParentName,llvm::Function * & OutlinedFn,llvm::Constant * & OutlinedFnID,bool IsOffloadEntry,const RegionCodeGenTy & CodeGen)1169 void CGOpenMPRuntimeNVPTX::emitNonSPMDKernel(const OMPExecutableDirective &D,
1170 StringRef ParentName,
1171 llvm::Function *&OutlinedFn,
1172 llvm::Constant *&OutlinedFnID,
1173 bool IsOffloadEntry,
1174 const RegionCodeGenTy &CodeGen) {
1175 ExecutionRuntimeModesRAII ModeRAII(CurrentExecutionMode);
1176 EntryFunctionState EST;
1177 WorkerFunctionState WST(CGM, D.getBeginLoc());
1178 Work.clear();
1179 WrapperFunctionsMap.clear();
1180
1181 // Emit target region as a standalone region.
1182 class NVPTXPrePostActionTy : public PrePostActionTy {
1183 CGOpenMPRuntimeNVPTX::EntryFunctionState &EST;
1184 CGOpenMPRuntimeNVPTX::WorkerFunctionState &WST;
1185
1186 public:
1187 NVPTXPrePostActionTy(CGOpenMPRuntimeNVPTX::EntryFunctionState &EST,
1188 CGOpenMPRuntimeNVPTX::WorkerFunctionState &WST)
1189 : EST(EST), WST(WST) {}
1190 void Enter(CodeGenFunction &CGF) override {
1191 auto &RT =
1192 static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime());
1193 RT.emitNonSPMDEntryHeader(CGF, EST, WST);
1194 // Skip target region initialization.
1195 RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
1196 }
1197 void Exit(CodeGenFunction &CGF) override {
1198 auto &RT =
1199 static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime());
1200 RT.clearLocThreadIdInsertPt(CGF);
1201 RT.emitNonSPMDEntryFooter(CGF, EST);
1202 }
1203 } Action(EST, WST);
1204 CodeGen.setAction(Action);
1205 IsInTTDRegion = true;
1206 // Reserve place for the globalized memory.
1207 GlobalizedRecords.emplace_back();
1208 if (!KernelStaticGlobalized) {
1209 KernelStaticGlobalized = new llvm::GlobalVariable(
1210 CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/false,
1211 llvm::GlobalValue::InternalLinkage,
1212 llvm::ConstantPointerNull::get(CGM.VoidPtrTy),
1213 "_openmp_kernel_static_glob_rd$ptr", /*InsertBefore=*/nullptr,
1214 llvm::GlobalValue::NotThreadLocal,
1215 CGM.getContext().getTargetAddressSpace(LangAS::cuda_shared));
1216 }
1217 emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
1218 IsOffloadEntry, CodeGen);
1219 IsInTTDRegion = false;
1220
1221 // Now change the name of the worker function to correspond to this target
1222 // region's entry function.
1223 WST.WorkerFn->setName(Twine(OutlinedFn->getName(), "_worker"));
1224
1225 // Create the worker function
1226 emitWorkerFunction(WST);
1227 }
1228
1229 // Setup NVPTX threads for master-worker OpenMP scheme.
emitNonSPMDEntryHeader(CodeGenFunction & CGF,EntryFunctionState & EST,WorkerFunctionState & WST)1230 void CGOpenMPRuntimeNVPTX::emitNonSPMDEntryHeader(CodeGenFunction &CGF,
1231 EntryFunctionState &EST,
1232 WorkerFunctionState &WST) {
1233 CGBuilderTy &Bld = CGF.Builder;
1234
1235 llvm::BasicBlock *WorkerBB = CGF.createBasicBlock(".worker");
1236 llvm::BasicBlock *MasterCheckBB = CGF.createBasicBlock(".mastercheck");
1237 llvm::BasicBlock *MasterBB = CGF.createBasicBlock(".master");
1238 EST.ExitBB = CGF.createBasicBlock(".exit");
1239
1240 llvm::Value *IsWorker =
1241 Bld.CreateICmpULT(getNVPTXThreadID(CGF), getThreadLimit(CGF));
1242 Bld.CreateCondBr(IsWorker, WorkerBB, MasterCheckBB);
1243
1244 CGF.EmitBlock(WorkerBB);
1245 emitCall(CGF, WST.Loc, WST.WorkerFn);
1246 CGF.EmitBranch(EST.ExitBB);
1247
1248 CGF.EmitBlock(MasterCheckBB);
1249 llvm::Value *IsMaster =
1250 Bld.CreateICmpEQ(getNVPTXThreadID(CGF), getMasterThreadID(CGF));
1251 Bld.CreateCondBr(IsMaster, MasterBB, EST.ExitBB);
1252
1253 CGF.EmitBlock(MasterBB);
1254 IsInTargetMasterThreadRegion = true;
1255 // SEQUENTIAL (MASTER) REGION START
1256 // First action in sequential region:
1257 // Initialize the state of the OpenMP runtime library on the GPU.
1258 // TODO: Optimize runtime initialization and pass in correct value.
1259 llvm::Value *Args[] = {getThreadLimit(CGF),
1260 Bld.getInt16(/*RequiresOMPRuntime=*/1)};
1261 CGF.EmitRuntimeCall(
1262 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_init), Args);
1263
1264 // For data sharing, we need to initialize the stack.
1265 CGF.EmitRuntimeCall(
1266 createNVPTXRuntimeFunction(
1267 OMPRTL_NVPTX__kmpc_data_sharing_init_stack));
1268
1269 emitGenericVarsProlog(CGF, WST.Loc);
1270 }
1271
emitNonSPMDEntryFooter(CodeGenFunction & CGF,EntryFunctionState & EST)1272 void CGOpenMPRuntimeNVPTX::emitNonSPMDEntryFooter(CodeGenFunction &CGF,
1273 EntryFunctionState &EST) {
1274 IsInTargetMasterThreadRegion = false;
1275 if (!CGF.HaveInsertPoint())
1276 return;
1277
1278 emitGenericVarsEpilog(CGF);
1279
1280 if (!EST.ExitBB)
1281 EST.ExitBB = CGF.createBasicBlock(".exit");
1282
1283 llvm::BasicBlock *TerminateBB = CGF.createBasicBlock(".termination.notifier");
1284 CGF.EmitBranch(TerminateBB);
1285
1286 CGF.EmitBlock(TerminateBB);
1287 // Signal termination condition.
1288 // TODO: Optimize runtime initialization and pass in correct value.
1289 llvm::Value *Args[] = {CGF.Builder.getInt16(/*IsOMPRuntimeInitialized=*/1)};
1290 CGF.EmitRuntimeCall(
1291 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_deinit), Args);
1292 // Barrier to terminate worker threads.
1293 syncCTAThreads(CGF);
1294 // Master thread jumps to exit point.
1295 CGF.EmitBranch(EST.ExitBB);
1296
1297 CGF.EmitBlock(EST.ExitBB);
1298 EST.ExitBB = nullptr;
1299 }
1300
emitSPMDKernel(const OMPExecutableDirective & D,StringRef ParentName,llvm::Function * & OutlinedFn,llvm::Constant * & OutlinedFnID,bool IsOffloadEntry,const RegionCodeGenTy & CodeGen)1301 void CGOpenMPRuntimeNVPTX::emitSPMDKernel(const OMPExecutableDirective &D,
1302 StringRef ParentName,
1303 llvm::Function *&OutlinedFn,
1304 llvm::Constant *&OutlinedFnID,
1305 bool IsOffloadEntry,
1306 const RegionCodeGenTy &CodeGen) {
1307 ExecutionRuntimeModesRAII ModeRAII(
1308 CurrentExecutionMode, RequiresFullRuntime,
1309 CGM.getLangOpts().OpenMPCUDAForceFullRuntime ||
1310 !supportsLightweightRuntime(CGM.getContext(), D));
1311 EntryFunctionState EST;
1312
1313 // Emit target region as a standalone region.
1314 class NVPTXPrePostActionTy : public PrePostActionTy {
1315 CGOpenMPRuntimeNVPTX &RT;
1316 CGOpenMPRuntimeNVPTX::EntryFunctionState &EST;
1317 const OMPExecutableDirective &D;
1318
1319 public:
1320 NVPTXPrePostActionTy(CGOpenMPRuntimeNVPTX &RT,
1321 CGOpenMPRuntimeNVPTX::EntryFunctionState &EST,
1322 const OMPExecutableDirective &D)
1323 : RT(RT), EST(EST), D(D) {}
1324 void Enter(CodeGenFunction &CGF) override {
1325 RT.emitSPMDEntryHeader(CGF, EST, D);
1326 // Skip target region initialization.
1327 RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
1328 }
1329 void Exit(CodeGenFunction &CGF) override {
1330 RT.clearLocThreadIdInsertPt(CGF);
1331 RT.emitSPMDEntryFooter(CGF, EST);
1332 }
1333 } Action(*this, EST, D);
1334 CodeGen.setAction(Action);
1335 IsInTTDRegion = true;
1336 // Reserve place for the globalized memory.
1337 GlobalizedRecords.emplace_back();
1338 if (!KernelStaticGlobalized) {
1339 KernelStaticGlobalized = new llvm::GlobalVariable(
1340 CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/false,
1341 llvm::GlobalValue::InternalLinkage,
1342 llvm::ConstantPointerNull::get(CGM.VoidPtrTy),
1343 "_openmp_kernel_static_glob_rd$ptr", /*InsertBefore=*/nullptr,
1344 llvm::GlobalValue::NotThreadLocal,
1345 CGM.getContext().getTargetAddressSpace(LangAS::cuda_shared));
1346 }
1347 emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
1348 IsOffloadEntry, CodeGen);
1349 IsInTTDRegion = false;
1350 }
1351
emitSPMDEntryHeader(CodeGenFunction & CGF,EntryFunctionState & EST,const OMPExecutableDirective & D)1352 void CGOpenMPRuntimeNVPTX::emitSPMDEntryHeader(
1353 CodeGenFunction &CGF, EntryFunctionState &EST,
1354 const OMPExecutableDirective &D) {
1355 CGBuilderTy &Bld = CGF.Builder;
1356
1357 // Setup BBs in entry function.
1358 llvm::BasicBlock *ExecuteBB = CGF.createBasicBlock(".execute");
1359 EST.ExitBB = CGF.createBasicBlock(".exit");
1360
1361 llvm::Value *Args[] = {getThreadLimit(CGF, /*IsInSPMDExecutionMode=*/true),
1362 /*RequiresOMPRuntime=*/
1363 Bld.getInt16(RequiresFullRuntime ? 1 : 0),
1364 /*RequiresDataSharing=*/Bld.getInt16(0)};
1365 CGF.EmitRuntimeCall(
1366 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_spmd_kernel_init), Args);
1367
1368 if (RequiresFullRuntime) {
1369 // For data sharing, we need to initialize the stack.
1370 CGF.EmitRuntimeCall(createNVPTXRuntimeFunction(
1371 OMPRTL_NVPTX__kmpc_data_sharing_init_stack_spmd));
1372 }
1373
1374 CGF.EmitBranch(ExecuteBB);
1375
1376 CGF.EmitBlock(ExecuteBB);
1377
1378 IsInTargetMasterThreadRegion = true;
1379 }
1380
emitSPMDEntryFooter(CodeGenFunction & CGF,EntryFunctionState & EST)1381 void CGOpenMPRuntimeNVPTX::emitSPMDEntryFooter(CodeGenFunction &CGF,
1382 EntryFunctionState &EST) {
1383 IsInTargetMasterThreadRegion = false;
1384 if (!CGF.HaveInsertPoint())
1385 return;
1386
1387 if (!EST.ExitBB)
1388 EST.ExitBB = CGF.createBasicBlock(".exit");
1389
1390 llvm::BasicBlock *OMPDeInitBB = CGF.createBasicBlock(".omp.deinit");
1391 CGF.EmitBranch(OMPDeInitBB);
1392
1393 CGF.EmitBlock(OMPDeInitBB);
1394 // DeInitialize the OMP state in the runtime; called by all active threads.
1395 llvm::Value *Args[] = {/*RequiresOMPRuntime=*/
1396 CGF.Builder.getInt16(RequiresFullRuntime ? 1 : 0)};
1397 CGF.EmitRuntimeCall(
1398 createNVPTXRuntimeFunction(
1399 OMPRTL_NVPTX__kmpc_spmd_kernel_deinit_v2), Args);
1400 CGF.EmitBranch(EST.ExitBB);
1401
1402 CGF.EmitBlock(EST.ExitBB);
1403 EST.ExitBB = nullptr;
1404 }
1405
1406 // Create a unique global variable to indicate the execution mode of this target
1407 // region. The execution mode is either 'generic', or 'spmd' depending on the
1408 // target directive. This variable is picked up by the offload library to setup
1409 // the device appropriately before kernel launch. If the execution mode is
1410 // 'generic', the runtime reserves one warp for the master, otherwise, all
1411 // warps participate in parallel work.
setPropertyExecutionMode(CodeGenModule & CGM,StringRef Name,bool Mode)1412 static void setPropertyExecutionMode(CodeGenModule &CGM, StringRef Name,
1413 bool Mode) {
1414 auto *GVMode =
1415 new llvm::GlobalVariable(CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
1416 llvm::GlobalValue::WeakAnyLinkage,
1417 llvm::ConstantInt::get(CGM.Int8Ty, Mode ? 0 : 1),
1418 Twine(Name, "_exec_mode"));
1419 CGM.addCompilerUsedGlobal(GVMode);
1420 }
1421
emitWorkerFunction(WorkerFunctionState & WST)1422 void CGOpenMPRuntimeNVPTX::emitWorkerFunction(WorkerFunctionState &WST) {
1423 ASTContext &Ctx = CGM.getContext();
1424
1425 CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
1426 CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, WST.WorkerFn, WST.CGFI, {},
1427 WST.Loc, WST.Loc);
1428 emitWorkerLoop(CGF, WST);
1429 CGF.FinishFunction();
1430 }
1431
emitWorkerLoop(CodeGenFunction & CGF,WorkerFunctionState & WST)1432 void CGOpenMPRuntimeNVPTX::emitWorkerLoop(CodeGenFunction &CGF,
1433 WorkerFunctionState &WST) {
1434 //
1435 // The workers enter this loop and wait for parallel work from the master.
1436 // When the master encounters a parallel region it sets up the work + variable
1437 // arguments, and wakes up the workers. The workers first check to see if
1438 // they are required for the parallel region, i.e., within the # of requested
1439 // parallel threads. The activated workers load the variable arguments and
1440 // execute the parallel work.
1441 //
1442
1443 CGBuilderTy &Bld = CGF.Builder;
1444
1445 llvm::BasicBlock *AwaitBB = CGF.createBasicBlock(".await.work");
1446 llvm::BasicBlock *SelectWorkersBB = CGF.createBasicBlock(".select.workers");
1447 llvm::BasicBlock *ExecuteBB = CGF.createBasicBlock(".execute.parallel");
1448 llvm::BasicBlock *TerminateBB = CGF.createBasicBlock(".terminate.parallel");
1449 llvm::BasicBlock *BarrierBB = CGF.createBasicBlock(".barrier.parallel");
1450 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
1451
1452 CGF.EmitBranch(AwaitBB);
1453
1454 // Workers wait for work from master.
1455 CGF.EmitBlock(AwaitBB);
1456 // Wait for parallel work
1457 syncCTAThreads(CGF);
1458
1459 Address WorkFn =
1460 CGF.CreateDefaultAlignTempAlloca(CGF.Int8PtrTy, /*Name=*/"work_fn");
1461 Address ExecStatus =
1462 CGF.CreateDefaultAlignTempAlloca(CGF.Int8Ty, /*Name=*/"exec_status");
1463 CGF.InitTempAlloca(ExecStatus, Bld.getInt8(/*C=*/0));
1464 CGF.InitTempAlloca(WorkFn, llvm::Constant::getNullValue(CGF.Int8PtrTy));
1465
1466 // TODO: Optimize runtime initialization and pass in correct value.
1467 llvm::Value *Args[] = {WorkFn.getPointer()};
1468 llvm::Value *Ret = CGF.EmitRuntimeCall(
1469 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_parallel), Args);
1470 Bld.CreateStore(Bld.CreateZExt(Ret, CGF.Int8Ty), ExecStatus);
1471
1472 // On termination condition (workid == 0), exit loop.
1473 llvm::Value *WorkID = Bld.CreateLoad(WorkFn);
1474 llvm::Value *ShouldTerminate = Bld.CreateIsNull(WorkID, "should_terminate");
1475 Bld.CreateCondBr(ShouldTerminate, ExitBB, SelectWorkersBB);
1476
1477 // Activate requested workers.
1478 CGF.EmitBlock(SelectWorkersBB);
1479 llvm::Value *IsActive =
1480 Bld.CreateIsNotNull(Bld.CreateLoad(ExecStatus), "is_active");
1481 Bld.CreateCondBr(IsActive, ExecuteBB, BarrierBB);
1482
1483 // Signal start of parallel region.
1484 CGF.EmitBlock(ExecuteBB);
1485 // Skip initialization.
1486 setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
1487
1488 // Process work items: outlined parallel functions.
1489 for (llvm::Function *W : Work) {
1490 // Try to match this outlined function.
1491 llvm::Value *ID = Bld.CreatePointerBitCastOrAddrSpaceCast(W, CGM.Int8PtrTy);
1492
1493 llvm::Value *WorkFnMatch =
1494 Bld.CreateICmpEQ(Bld.CreateLoad(WorkFn), ID, "work_match");
1495
1496 llvm::BasicBlock *ExecuteFNBB = CGF.createBasicBlock(".execute.fn");
1497 llvm::BasicBlock *CheckNextBB = CGF.createBasicBlock(".check.next");
1498 Bld.CreateCondBr(WorkFnMatch, ExecuteFNBB, CheckNextBB);
1499
1500 // Execute this outlined function.
1501 CGF.EmitBlock(ExecuteFNBB);
1502
1503 // Insert call to work function via shared wrapper. The shared
1504 // wrapper takes two arguments:
1505 // - the parallelism level;
1506 // - the thread ID;
1507 emitCall(CGF, WST.Loc, W,
1508 {Bld.getInt16(/*ParallelLevel=*/0), getThreadID(CGF, WST.Loc)});
1509
1510 // Go to end of parallel region.
1511 CGF.EmitBranch(TerminateBB);
1512
1513 CGF.EmitBlock(CheckNextBB);
1514 }
1515 // Default case: call to outlined function through pointer if the target
1516 // region makes a declare target call that may contain an orphaned parallel
1517 // directive.
1518 auto *ParallelFnTy =
1519 llvm::FunctionType::get(CGM.VoidTy, {CGM.Int16Ty, CGM.Int32Ty},
1520 /*isVarArg=*/false);
1521 llvm::Value *WorkFnCast =
1522 Bld.CreateBitCast(WorkID, ParallelFnTy->getPointerTo());
1523 // Insert call to work function via shared wrapper. The shared
1524 // wrapper takes two arguments:
1525 // - the parallelism level;
1526 // - the thread ID;
1527 emitCall(CGF, WST.Loc, {ParallelFnTy, WorkFnCast},
1528 {Bld.getInt16(/*ParallelLevel=*/0), getThreadID(CGF, WST.Loc)});
1529 // Go to end of parallel region.
1530 CGF.EmitBranch(TerminateBB);
1531
1532 // Signal end of parallel region.
1533 CGF.EmitBlock(TerminateBB);
1534 CGF.EmitRuntimeCall(
1535 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_end_parallel),
1536 llvm::None);
1537 CGF.EmitBranch(BarrierBB);
1538
1539 // All active and inactive workers wait at a barrier after parallel region.
1540 CGF.EmitBlock(BarrierBB);
1541 // Barrier after parallel region.
1542 syncCTAThreads(CGF);
1543 CGF.EmitBranch(AwaitBB);
1544
1545 // Exit target region.
1546 CGF.EmitBlock(ExitBB);
1547 // Skip initialization.
1548 clearLocThreadIdInsertPt(CGF);
1549 }
1550
1551 /// Returns specified OpenMP runtime function for the current OpenMP
1552 /// implementation. Specialized for the NVPTX device.
1553 /// \param Function OpenMP runtime function.
1554 /// \return Specified function.
1555 llvm::FunctionCallee
createNVPTXRuntimeFunction(unsigned Function)1556 CGOpenMPRuntimeNVPTX::createNVPTXRuntimeFunction(unsigned Function) {
1557 llvm::FunctionCallee RTLFn = nullptr;
1558 unsigned DefaultAS = CGM.getTargetCodeGenInfo().getDefaultAS();
1559 switch (static_cast<OpenMPRTLFunctionNVPTX>(Function)) {
1560 case OMPRTL_NVPTX__kmpc_kernel_init: {
1561 // Build void __kmpc_kernel_init(kmp_int32 thread_limit, int16_t
1562 // RequiresOMPRuntime);
1563 llvm::Type *TypeParams[] = {CGM.Int32Ty, CGM.Int16Ty};
1564 auto *FnTy =
1565 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1566 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_init");
1567 break;
1568 }
1569 case OMPRTL_NVPTX__kmpc_kernel_deinit: {
1570 // Build void __kmpc_kernel_deinit(int16_t IsOMPRuntimeInitialized);
1571 llvm::Type *TypeParams[] = {CGM.Int16Ty};
1572 auto *FnTy =
1573 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1574 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_deinit");
1575 break;
1576 }
1577 case OMPRTL_NVPTX__kmpc_spmd_kernel_init: {
1578 // Build void __kmpc_spmd_kernel_init(kmp_int32 thread_limit,
1579 // int16_t RequiresOMPRuntime, int16_t RequiresDataSharing);
1580 llvm::Type *TypeParams[] = {CGM.Int32Ty, CGM.Int16Ty, CGM.Int16Ty};
1581 auto *FnTy =
1582 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1583 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_spmd_kernel_init");
1584 break;
1585 }
1586 case OMPRTL_NVPTX__kmpc_spmd_kernel_deinit_v2: {
1587 // Build void __kmpc_spmd_kernel_deinit_v2(int16_t RequiresOMPRuntime);
1588 llvm::Type *TypeParams[] = {CGM.Int16Ty};
1589 auto *FnTy =
1590 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1591 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_spmd_kernel_deinit_v2");
1592 break;
1593 }
1594 case OMPRTL_NVPTX__kmpc_kernel_prepare_parallel: {
1595 /// Build void __kmpc_kernel_prepare_parallel(
1596 /// void *outlined_function);
1597 llvm::Type *TypeParams[] = {CGM.Int8PtrTy};
1598 auto *FnTy =
1599 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1600 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_prepare_parallel");
1601 break;
1602 }
1603 case OMPRTL_NVPTX__kmpc_kernel_parallel: {
1604 /// Build bool __kmpc_kernel_parallel(void **outlined_function);
1605 llvm::Type *TypeParams[] = {CGM.Int8PtrPtrTy};
1606 llvm::Type *RetTy = CGM.getTypes().ConvertType(CGM.getContext().BoolTy);
1607 auto *FnTy =
1608 llvm::FunctionType::get(RetTy, TypeParams, /*isVarArg*/ false);
1609 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_parallel");
1610 break;
1611 }
1612 case OMPRTL_NVPTX__kmpc_kernel_end_parallel: {
1613 /// Build void __kmpc_kernel_end_parallel();
1614 auto *FnTy =
1615 llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false);
1616 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_end_parallel");
1617 break;
1618 }
1619 case OMPRTL_NVPTX__kmpc_serialized_parallel: {
1620 // Build void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
1621 // global_tid);
1622 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1623 auto *FnTy =
1624 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1625 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_serialized_parallel");
1626 break;
1627 }
1628 case OMPRTL_NVPTX__kmpc_end_serialized_parallel: {
1629 // Build void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
1630 // global_tid);
1631 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1632 auto *FnTy =
1633 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1634 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_serialized_parallel");
1635 break;
1636 }
1637 case OMPRTL_NVPTX__kmpc_shuffle_int32: {
1638 // Build int32_t __kmpc_shuffle_int32(int32_t element,
1639 // int16_t lane_offset, int16_t warp_size);
1640 llvm::Type *TypeParams[] = {CGM.Int32Ty, CGM.Int16Ty, CGM.Int16Ty};
1641 auto *FnTy =
1642 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
1643 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_shuffle_int32");
1644 break;
1645 }
1646 case OMPRTL_NVPTX__kmpc_shuffle_int64: {
1647 // Build int64_t __kmpc_shuffle_int64(int64_t element,
1648 // int16_t lane_offset, int16_t warp_size);
1649 llvm::Type *TypeParams[] = {CGM.Int64Ty, CGM.Int16Ty, CGM.Int16Ty};
1650 auto *FnTy =
1651 llvm::FunctionType::get(CGM.Int64Ty, TypeParams, /*isVarArg*/ false);
1652 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_shuffle_int64");
1653 break;
1654 }
1655 case OMPRTL_NVPTX__kmpc_nvptx_parallel_reduce_nowait_v2: {
1656 // Build int32_t kmpc_nvptx_parallel_reduce_nowait_v2(ident_t *loc,
1657 // kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, void*
1658 // reduce_data, void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t
1659 // lane_id, int16_t lane_offset, int16_t Algorithm Version), void
1660 // (*kmp_InterWarpCopyFctPtr)(void* src, int warp_num));
1661 llvm::Type *ShuffleReduceTypeParams[] = {CGM.VoidPtrTy, CGM.Int16Ty,
1662 CGM.Int16Ty, CGM.Int16Ty};
1663 auto *ShuffleReduceFnTy =
1664 llvm::FunctionType::get(CGM.VoidTy, ShuffleReduceTypeParams,
1665 /*isVarArg=*/false);
1666 llvm::Type *InterWarpCopyTypeParams[] = {CGM.VoidPtrTy, CGM.Int32Ty};
1667 auto *InterWarpCopyFnTy =
1668 llvm::FunctionType::get(CGM.VoidTy, InterWarpCopyTypeParams,
1669 /*isVarArg=*/false);
1670 llvm::Type *TypeParams[] = {getIdentTyPointerTy(),
1671 CGM.Int32Ty,
1672 CGM.Int32Ty,
1673 CGM.SizeTy,
1674 CGM.VoidPtrTy,
1675 ShuffleReduceFnTy->getPointerTo(DefaultAS),
1676 InterWarpCopyFnTy->getPointerTo(DefaultAS)};
1677 auto *FnTy =
1678 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1679 RTLFn = CGM.CreateRuntimeFunction(
1680 FnTy, /*Name=*/"__kmpc_nvptx_parallel_reduce_nowait_v2");
1681 break;
1682 }
1683 case OMPRTL_NVPTX__kmpc_end_reduce_nowait: {
1684 // Build __kmpc_end_reduce_nowait(kmp_int32 global_tid);
1685 llvm::Type *TypeParams[] = {CGM.Int32Ty};
1686 auto *FnTy =
1687 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1688 RTLFn = CGM.CreateRuntimeFunction(
1689 FnTy, /*Name=*/"__kmpc_nvptx_end_reduce_nowait");
1690 break;
1691 }
1692 case OMPRTL_NVPTX__kmpc_nvptx_teams_reduce_nowait_v2: {
1693 // Build int32_t __kmpc_nvptx_teams_reduce_nowait_v2(ident_t *loc, kmp_int32
1694 // global_tid, void *global_buffer, int32_t num_of_records, void*
1695 // reduce_data,
1696 // void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t lane_id, int16_t
1697 // lane_offset, int16_t shortCircuit),
1698 // void (*kmp_InterWarpCopyFctPtr)(void* src, int32_t warp_num), void
1699 // (*kmp_ListToGlobalCpyFctPtr)(void *buffer, int idx, void *reduce_data),
1700 // void (*kmp_GlobalToListCpyFctPtr)(void *buffer, int idx,
1701 // void *reduce_data), void (*kmp_GlobalToListCpyPtrsFctPtr)(void *buffer,
1702 // int idx, void *reduce_data), void (*kmp_GlobalToListRedFctPtr)(void
1703 // *buffer, int idx, void *reduce_data));
1704 llvm::Type *ShuffleReduceTypeParams[] = {CGM.VoidPtrTy, CGM.Int16Ty,
1705 CGM.Int16Ty, CGM.Int16Ty};
1706 auto *ShuffleReduceFnTy =
1707 llvm::FunctionType::get(CGM.VoidTy, ShuffleReduceTypeParams,
1708 /*isVarArg=*/false);
1709 llvm::Type *InterWarpCopyTypeParams[] = {CGM.VoidPtrTy, CGM.Int32Ty};
1710 auto *InterWarpCopyFnTy =
1711 llvm::FunctionType::get(CGM.VoidTy, InterWarpCopyTypeParams,
1712 /*isVarArg=*/false);
1713 llvm::Type *GlobalListTypeParams[] = {CGM.VoidPtrTy, CGM.IntTy,
1714 CGM.VoidPtrTy};
1715 auto *GlobalListFnTy =
1716 llvm::FunctionType::get(CGM.VoidTy, GlobalListTypeParams,
1717 /*isVarArg=*/false);
1718 llvm::Type *TypeParams[] = {getIdentTyPointerTy(),
1719 CGM.Int32Ty,
1720 CGM.VoidPtrTy,
1721 CGM.Int32Ty,
1722 CGM.VoidPtrTy,
1723 ShuffleReduceFnTy->getPointerTo(DefaultAS),
1724 InterWarpCopyFnTy->getPointerTo(DefaultAS),
1725 GlobalListFnTy->getPointerTo(DefaultAS),
1726 GlobalListFnTy->getPointerTo(DefaultAS),
1727 GlobalListFnTy->getPointerTo(DefaultAS),
1728 GlobalListFnTy->getPointerTo(DefaultAS)};
1729 auto *FnTy =
1730 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1731 RTLFn = CGM.CreateRuntimeFunction(
1732 FnTy, /*Name=*/"__kmpc_nvptx_teams_reduce_nowait_v2");
1733 break;
1734 }
1735 case OMPRTL_NVPTX__kmpc_data_sharing_init_stack: {
1736 /// Build void __kmpc_data_sharing_init_stack();
1737 auto *FnTy =
1738 llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false);
1739 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_data_sharing_init_stack");
1740 break;
1741 }
1742 case OMPRTL_NVPTX__kmpc_data_sharing_init_stack_spmd: {
1743 /// Build void __kmpc_data_sharing_init_stack_spmd();
1744 auto *FnTy =
1745 llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false);
1746 RTLFn =
1747 CGM.CreateRuntimeFunction(FnTy, "__kmpc_data_sharing_init_stack_spmd");
1748 break;
1749 }
1750 case OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack: {
1751 // Build void *__kmpc_data_sharing_coalesced_push_stack(size_t size,
1752 // int16_t UseSharedMemory);
1753 llvm::Type *TypeParams[] = {CGM.SizeTy, CGM.Int16Ty};
1754 auto *FnTy =
1755 llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
1756 RTLFn = CGM.CreateRuntimeFunction(
1757 FnTy, /*Name=*/"__kmpc_data_sharing_coalesced_push_stack");
1758 break;
1759 }
1760 case OMPRTL_NVPTX__kmpc_data_sharing_push_stack: {
1761 // Build void *__kmpc_data_sharing_push_stack(size_t size, int16_t
1762 // UseSharedMemory);
1763 llvm::Type *TypeParams[] = {CGM.SizeTy, CGM.Int16Ty};
1764 auto *FnTy =
1765 llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
1766 RTLFn = CGM.CreateRuntimeFunction(
1767 FnTy, /*Name=*/"__kmpc_data_sharing_push_stack");
1768 break;
1769 }
1770 case OMPRTL_NVPTX__kmpc_data_sharing_pop_stack: {
1771 // Build void __kmpc_data_sharing_pop_stack(void *a);
1772 llvm::Type *TypeParams[] = {CGM.VoidPtrTy};
1773 auto *FnTy =
1774 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1775 RTLFn = CGM.CreateRuntimeFunction(FnTy,
1776 /*Name=*/"__kmpc_data_sharing_pop_stack");
1777 break;
1778 }
1779 case OMPRTL_NVPTX__kmpc_begin_sharing_variables: {
1780 /// Build void __kmpc_begin_sharing_variables(void ***args,
1781 /// size_t n_args);
1782 llvm::Type *TypeParams[] = {CGM.Int8PtrPtrTy->getPointerTo(), CGM.SizeTy};
1783 auto *FnTy =
1784 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1785 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_begin_sharing_variables");
1786 break;
1787 }
1788 case OMPRTL_NVPTX__kmpc_end_sharing_variables: {
1789 /// Build void __kmpc_end_sharing_variables();
1790 auto *FnTy =
1791 llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false);
1792 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_sharing_variables");
1793 break;
1794 }
1795 case OMPRTL_NVPTX__kmpc_get_shared_variables: {
1796 /// Build void __kmpc_get_shared_variables(void ***GlobalArgs);
1797 llvm::Type *TypeParams[] = {CGM.Int8PtrPtrTy->getPointerTo()};
1798 auto *FnTy =
1799 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1800 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_get_shared_variables");
1801 break;
1802 }
1803 case OMPRTL_NVPTX__kmpc_parallel_level: {
1804 // Build uint16_t __kmpc_parallel_level(ident_t *loc, kmp_int32 global_tid);
1805 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1806 auto *FnTy =
1807 llvm::FunctionType::get(CGM.Int16Ty, TypeParams, /*isVarArg*/ false);
1808 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_parallel_level");
1809 break;
1810 }
1811 case OMPRTL_NVPTX__kmpc_is_spmd_exec_mode: {
1812 // Build int8_t __kmpc_is_spmd_exec_mode();
1813 auto *FnTy = llvm::FunctionType::get(CGM.Int8Ty, /*isVarArg=*/false);
1814 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_is_spmd_exec_mode");
1815 break;
1816 }
1817 case OMPRTL_NVPTX__kmpc_get_team_static_memory: {
1818 // Build void __kmpc_get_team_static_memory(int16_t isSPMDExecutionMode,
1819 // const void *buf, size_t size, int16_t is_shared, const void **res);
1820 llvm::Type *TypeParams[] = {CGM.Int16Ty, CGM.VoidPtrTy, CGM.SizeTy,
1821 CGM.Int16Ty, CGM.VoidPtrPtrTy};
1822 auto *FnTy =
1823 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1824 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_get_team_static_memory");
1825 break;
1826 }
1827 case OMPRTL_NVPTX__kmpc_restore_team_static_memory: {
1828 // Build void __kmpc_restore_team_static_memory(int16_t isSPMDExecutionMode,
1829 // int16_t is_shared);
1830 llvm::Type *TypeParams[] = {CGM.Int16Ty, CGM.Int16Ty};
1831 auto *FnTy =
1832 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1833 RTLFn =
1834 CGM.CreateRuntimeFunction(FnTy, "__kmpc_restore_team_static_memory");
1835 break;
1836 }
1837 case OMPRTL__kmpc_barrier: {
1838 // Build void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
1839 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1840 auto *FnTy =
1841 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1842 RTLFn =
1843 CGM.CreateConvergentRuntimeFunction(FnTy, /*Name*/ "__kmpc_barrier");
1844 break;
1845 }
1846 case OMPRTL__kmpc_barrier_simple_spmd: {
1847 // Build void __kmpc_barrier_simple_spmd(ident_t *loc, kmp_int32
1848 // global_tid);
1849 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1850 auto *FnTy =
1851 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1852 RTLFn = CGM.CreateConvergentRuntimeFunction(
1853 FnTy, /*Name*/ "__kmpc_barrier_simple_spmd");
1854 break;
1855 }
1856 case OMPRTL_NVPTX__kmpc_warp_active_thread_mask: {
1857 // Build int32_t __kmpc_warp_active_thread_mask(void);
1858 auto *FnTy =
1859 llvm::FunctionType::get(CGM.Int32Ty, llvm::None, /*isVarArg=*/false);
1860 RTLFn = CGM.CreateConvergentRuntimeFunction(FnTy, "__kmpc_warp_active_thread_mask");
1861 break;
1862 }
1863 case OMPRTL_NVPTX__kmpc_syncwarp: {
1864 // Build void __kmpc_syncwarp(kmp_int32 Mask);
1865 auto *FnTy =
1866 llvm::FunctionType::get(CGM.VoidTy, CGM.Int32Ty, /*isVarArg=*/false);
1867 RTLFn = CGM.CreateConvergentRuntimeFunction(FnTy, "__kmpc_syncwarp");
1868 break;
1869 }
1870 }
1871 return RTLFn;
1872 }
1873
createOffloadEntry(llvm::Constant * ID,llvm::Constant * Addr,uint64_t Size,int32_t,llvm::GlobalValue::LinkageTypes)1874 void CGOpenMPRuntimeNVPTX::createOffloadEntry(llvm::Constant *ID,
1875 llvm::Constant *Addr,
1876 uint64_t Size, int32_t,
1877 llvm::GlobalValue::LinkageTypes) {
1878 // TODO: Add support for global variables on the device after declare target
1879 // support.
1880 if (!isa<llvm::Function>(Addr))
1881 return;
1882 llvm::Module &M = CGM.getModule();
1883 llvm::LLVMContext &Ctx = CGM.getLLVMContext();
1884
1885 // Get "nvvm.annotations" metadata node
1886 llvm::NamedMDNode *MD = M.getOrInsertNamedMetadata("nvvm.annotations");
1887
1888 llvm::Metadata *MDVals[] = {
1889 llvm::ConstantAsMetadata::get(Addr), llvm::MDString::get(Ctx, "kernel"),
1890 llvm::ConstantAsMetadata::get(
1891 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), 1))};
1892 // Append metadata to nvvm.annotations
1893 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
1894 }
1895
emitTargetOutlinedFunction(const OMPExecutableDirective & D,StringRef ParentName,llvm::Function * & OutlinedFn,llvm::Constant * & OutlinedFnID,bool IsOffloadEntry,const RegionCodeGenTy & CodeGen)1896 void CGOpenMPRuntimeNVPTX::emitTargetOutlinedFunction(
1897 const OMPExecutableDirective &D, StringRef ParentName,
1898 llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
1899 bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
1900 if (!IsOffloadEntry) // Nothing to do.
1901 return;
1902
1903 assert(!ParentName.empty() && "Invalid target region parent name!");
1904
1905 bool Mode = supportsSPMDExecutionMode(CGM.getContext(), D);
1906 if (Mode)
1907 emitSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry,
1908 CodeGen);
1909 else
1910 emitNonSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry,
1911 CodeGen);
1912
1913 setPropertyExecutionMode(CGM, OutlinedFn->getName(), Mode);
1914 }
1915
1916 namespace {
1917 LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
1918 /// Enum for accesseing the reserved_2 field of the ident_t struct.
1919 enum ModeFlagsTy : unsigned {
1920 /// Bit set to 1 when in SPMD mode.
1921 KMP_IDENT_SPMD_MODE = 0x01,
1922 /// Bit set to 1 when a simplified runtime is used.
1923 KMP_IDENT_SIMPLE_RT_MODE = 0x02,
1924 LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/KMP_IDENT_SIMPLE_RT_MODE)
1925 };
1926
1927 /// Special mode Undefined. Is the combination of Non-SPMD mode + SimpleRuntime.
1928 static const ModeFlagsTy UndefinedMode =
1929 (~KMP_IDENT_SPMD_MODE) & KMP_IDENT_SIMPLE_RT_MODE;
1930 } // anonymous namespace
1931
getDefaultLocationReserved2Flags() const1932 unsigned CGOpenMPRuntimeNVPTX::getDefaultLocationReserved2Flags() const {
1933 switch (getExecutionMode()) {
1934 case EM_SPMD:
1935 if (requiresFullRuntime())
1936 return KMP_IDENT_SPMD_MODE & (~KMP_IDENT_SIMPLE_RT_MODE);
1937 return KMP_IDENT_SPMD_MODE | KMP_IDENT_SIMPLE_RT_MODE;
1938 case EM_NonSPMD:
1939 assert(requiresFullRuntime() && "Expected full runtime.");
1940 return (~KMP_IDENT_SPMD_MODE) & (~KMP_IDENT_SIMPLE_RT_MODE);
1941 case EM_Unknown:
1942 return UndefinedMode;
1943 }
1944 llvm_unreachable("Unknown flags are requested.");
1945 }
1946
CGOpenMPRuntimeNVPTX(CodeGenModule & CGM)1947 CGOpenMPRuntimeNVPTX::CGOpenMPRuntimeNVPTX(CodeGenModule &CGM)
1948 : CGOpenMPRuntime(CGM, "_", "$") {
1949 if (!CGM.getLangOpts().OpenMPIsDevice)
1950 llvm_unreachable("OpenMP NVPTX can only handle device code.");
1951 }
1952
emitProcBindClause(CodeGenFunction & CGF,ProcBindKind ProcBind,SourceLocation Loc)1953 void CGOpenMPRuntimeNVPTX::emitProcBindClause(CodeGenFunction &CGF,
1954 ProcBindKind ProcBind,
1955 SourceLocation Loc) {
1956 // Do nothing in case of SPMD mode and L0 parallel.
1957 if (getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD)
1958 return;
1959
1960 CGOpenMPRuntime::emitProcBindClause(CGF, ProcBind, Loc);
1961 }
1962
emitNumThreadsClause(CodeGenFunction & CGF,llvm::Value * NumThreads,SourceLocation Loc)1963 void CGOpenMPRuntimeNVPTX::emitNumThreadsClause(CodeGenFunction &CGF,
1964 llvm::Value *NumThreads,
1965 SourceLocation Loc) {
1966 // Do nothing in case of SPMD mode and L0 parallel.
1967 if (getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD)
1968 return;
1969
1970 CGOpenMPRuntime::emitNumThreadsClause(CGF, NumThreads, Loc);
1971 }
1972
emitNumTeamsClause(CodeGenFunction & CGF,const Expr * NumTeams,const Expr * ThreadLimit,SourceLocation Loc)1973 void CGOpenMPRuntimeNVPTX::emitNumTeamsClause(CodeGenFunction &CGF,
1974 const Expr *NumTeams,
1975 const Expr *ThreadLimit,
1976 SourceLocation Loc) {}
1977
emitParallelOutlinedFunction(const OMPExecutableDirective & D,const VarDecl * ThreadIDVar,OpenMPDirectiveKind InnermostKind,const RegionCodeGenTy & CodeGen)1978 llvm::Function *CGOpenMPRuntimeNVPTX::emitParallelOutlinedFunction(
1979 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1980 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1981 // Emit target region as a standalone region.
1982 class NVPTXPrePostActionTy : public PrePostActionTy {
1983 bool &IsInParallelRegion;
1984 bool PrevIsInParallelRegion;
1985
1986 public:
1987 NVPTXPrePostActionTy(bool &IsInParallelRegion)
1988 : IsInParallelRegion(IsInParallelRegion) {}
1989 void Enter(CodeGenFunction &CGF) override {
1990 PrevIsInParallelRegion = IsInParallelRegion;
1991 IsInParallelRegion = true;
1992 }
1993 void Exit(CodeGenFunction &CGF) override {
1994 IsInParallelRegion = PrevIsInParallelRegion;
1995 }
1996 } Action(IsInParallelRegion);
1997 CodeGen.setAction(Action);
1998 bool PrevIsInTTDRegion = IsInTTDRegion;
1999 IsInTTDRegion = false;
2000 bool PrevIsInTargetMasterThreadRegion = IsInTargetMasterThreadRegion;
2001 IsInTargetMasterThreadRegion = false;
2002 auto *OutlinedFun =
2003 cast<llvm::Function>(CGOpenMPRuntime::emitParallelOutlinedFunction(
2004 D, ThreadIDVar, InnermostKind, CodeGen));
2005 if (CGM.getLangOpts().Optimize) {
2006 OutlinedFun->removeFnAttr(llvm::Attribute::NoInline);
2007 OutlinedFun->removeFnAttr(llvm::Attribute::OptimizeNone);
2008 OutlinedFun->addFnAttr(llvm::Attribute::AlwaysInline);
2009 }
2010 IsInTargetMasterThreadRegion = PrevIsInTargetMasterThreadRegion;
2011 IsInTTDRegion = PrevIsInTTDRegion;
2012 if (getExecutionMode() != CGOpenMPRuntimeNVPTX::EM_SPMD &&
2013 !IsInParallelRegion) {
2014 llvm::Function *WrapperFun =
2015 createParallelDataSharingWrapper(OutlinedFun, D);
2016 WrapperFunctionsMap[OutlinedFun] = WrapperFun;
2017 }
2018
2019 return OutlinedFun;
2020 }
2021
2022 /// Get list of lastprivate variables from the teams distribute ... or
2023 /// teams {distribute ...} directives.
2024 static void
getDistributeLastprivateVars(ASTContext & Ctx,const OMPExecutableDirective & D,llvm::SmallVectorImpl<const ValueDecl * > & Vars)2025 getDistributeLastprivateVars(ASTContext &Ctx, const OMPExecutableDirective &D,
2026 llvm::SmallVectorImpl<const ValueDecl *> &Vars) {
2027 assert(isOpenMPTeamsDirective(D.getDirectiveKind()) &&
2028 "expected teams directive.");
2029 const OMPExecutableDirective *Dir = &D;
2030 if (!isOpenMPDistributeDirective(D.getDirectiveKind())) {
2031 if (const Stmt *S = CGOpenMPRuntime::getSingleCompoundChild(
2032 Ctx,
2033 D.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers(
2034 /*IgnoreCaptured=*/true))) {
2035 Dir = dyn_cast_or_null<OMPExecutableDirective>(S);
2036 if (Dir && !isOpenMPDistributeDirective(Dir->getDirectiveKind()))
2037 Dir = nullptr;
2038 }
2039 }
2040 if (!Dir)
2041 return;
2042 for (const auto *C : Dir->getClausesOfKind<OMPLastprivateClause>()) {
2043 for (const Expr *E : C->getVarRefs())
2044 Vars.push_back(getPrivateItem(E));
2045 }
2046 }
2047
2048 /// Get list of reduction variables from the teams ... directives.
2049 static void
getTeamsReductionVars(ASTContext & Ctx,const OMPExecutableDirective & D,llvm::SmallVectorImpl<const ValueDecl * > & Vars)2050 getTeamsReductionVars(ASTContext &Ctx, const OMPExecutableDirective &D,
2051 llvm::SmallVectorImpl<const ValueDecl *> &Vars) {
2052 assert(isOpenMPTeamsDirective(D.getDirectiveKind()) &&
2053 "expected teams directive.");
2054 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
2055 for (const Expr *E : C->privates())
2056 Vars.push_back(getPrivateItem(E));
2057 }
2058 }
2059
emitTeamsOutlinedFunction(const OMPExecutableDirective & D,const VarDecl * ThreadIDVar,OpenMPDirectiveKind InnermostKind,const RegionCodeGenTy & CodeGen)2060 llvm::Function *CGOpenMPRuntimeNVPTX::emitTeamsOutlinedFunction(
2061 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
2062 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
2063 SourceLocation Loc = D.getBeginLoc();
2064
2065 const RecordDecl *GlobalizedRD = nullptr;
2066 llvm::SmallVector<const ValueDecl *, 4> LastPrivatesReductions;
2067 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields;
2068 // Globalize team reductions variable unconditionally in all modes.
2069 if (getExecutionMode() != CGOpenMPRuntimeNVPTX::EM_SPMD)
2070 getTeamsReductionVars(CGM.getContext(), D, LastPrivatesReductions);
2071 if (getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD) {
2072 getDistributeLastprivateVars(CGM.getContext(), D, LastPrivatesReductions);
2073 if (!LastPrivatesReductions.empty()) {
2074 GlobalizedRD = ::buildRecordForGlobalizedVars(
2075 CGM.getContext(), llvm::None, LastPrivatesReductions,
2076 MappedDeclsFields, WarpSize);
2077 }
2078 } else if (!LastPrivatesReductions.empty()) {
2079 assert(!TeamAndReductions.first &&
2080 "Previous team declaration is not expected.");
2081 TeamAndReductions.first = D.getCapturedStmt(OMPD_teams)->getCapturedDecl();
2082 std::swap(TeamAndReductions.second, LastPrivatesReductions);
2083 }
2084
2085 // Emit target region as a standalone region.
2086 class NVPTXPrePostActionTy : public PrePostActionTy {
2087 SourceLocation &Loc;
2088 const RecordDecl *GlobalizedRD;
2089 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
2090 &MappedDeclsFields;
2091
2092 public:
2093 NVPTXPrePostActionTy(
2094 SourceLocation &Loc, const RecordDecl *GlobalizedRD,
2095 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
2096 &MappedDeclsFields)
2097 : Loc(Loc), GlobalizedRD(GlobalizedRD),
2098 MappedDeclsFields(MappedDeclsFields) {}
2099 void Enter(CodeGenFunction &CGF) override {
2100 auto &Rt =
2101 static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime());
2102 if (GlobalizedRD) {
2103 auto I = Rt.FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first;
2104 I->getSecond().GlobalRecord = GlobalizedRD;
2105 I->getSecond().MappedParams =
2106 std::make_unique<CodeGenFunction::OMPMapVars>();
2107 DeclToAddrMapTy &Data = I->getSecond().LocalVarData;
2108 for (const auto &Pair : MappedDeclsFields) {
2109 assert(Pair.getFirst()->isCanonicalDecl() &&
2110 "Expected canonical declaration");
2111 Data.insert(std::make_pair(Pair.getFirst(),
2112 MappedVarData(Pair.getSecond(),
2113 /*IsOnePerTeam=*/true)));
2114 }
2115 }
2116 Rt.emitGenericVarsProlog(CGF, Loc);
2117 }
2118 void Exit(CodeGenFunction &CGF) override {
2119 static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime())
2120 .emitGenericVarsEpilog(CGF);
2121 }
2122 } Action(Loc, GlobalizedRD, MappedDeclsFields);
2123 CodeGen.setAction(Action);
2124 llvm::Function *OutlinedFun = CGOpenMPRuntime::emitTeamsOutlinedFunction(
2125 D, ThreadIDVar, InnermostKind, CodeGen);
2126 if (CGM.getLangOpts().Optimize) {
2127 OutlinedFun->removeFnAttr(llvm::Attribute::NoInline);
2128 OutlinedFun->removeFnAttr(llvm::Attribute::OptimizeNone);
2129 OutlinedFun->addFnAttr(llvm::Attribute::AlwaysInline);
2130 }
2131
2132 return OutlinedFun;
2133 }
2134
emitGenericVarsProlog(CodeGenFunction & CGF,SourceLocation Loc,bool WithSPMDCheck)2135 void CGOpenMPRuntimeNVPTX::emitGenericVarsProlog(CodeGenFunction &CGF,
2136 SourceLocation Loc,
2137 bool WithSPMDCheck) {
2138 if (getDataSharingMode(CGM) != CGOpenMPRuntimeNVPTX::Generic &&
2139 getExecutionMode() != CGOpenMPRuntimeNVPTX::EM_SPMD)
2140 return;
2141
2142 CGBuilderTy &Bld = CGF.Builder;
2143
2144 const auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
2145 if (I == FunctionGlobalizedDecls.end())
2146 return;
2147 if (const RecordDecl *GlobalizedVarsRecord = I->getSecond().GlobalRecord) {
2148 QualType GlobalRecTy = CGM.getContext().getRecordType(GlobalizedVarsRecord);
2149 QualType SecGlobalRecTy;
2150
2151 // Recover pointer to this function's global record. The runtime will
2152 // handle the specifics of the allocation of the memory.
2153 // Use actual memory size of the record including the padding
2154 // for alignment purposes.
2155 unsigned Alignment =
2156 CGM.getContext().getTypeAlignInChars(GlobalRecTy).getQuantity();
2157 unsigned GlobalRecordSize =
2158 CGM.getContext().getTypeSizeInChars(GlobalRecTy).getQuantity();
2159 GlobalRecordSize = llvm::alignTo(GlobalRecordSize, Alignment);
2160
2161 llvm::PointerType *GlobalRecPtrTy =
2162 CGF.ConvertTypeForMem(GlobalRecTy)->getPointerTo();
2163 llvm::Value *GlobalRecCastAddr;
2164 llvm::Value *IsTTD = nullptr;
2165 if (!IsInTTDRegion &&
2166 (WithSPMDCheck ||
2167 getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_Unknown)) {
2168 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
2169 llvm::BasicBlock *SPMDBB = CGF.createBasicBlock(".spmd");
2170 llvm::BasicBlock *NonSPMDBB = CGF.createBasicBlock(".non-spmd");
2171 if (I->getSecond().SecondaryGlobalRecord.hasValue()) {
2172 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
2173 llvm::Value *ThreadID = getThreadID(CGF, Loc);
2174 llvm::Value *PL = CGF.EmitRuntimeCall(
2175 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_parallel_level),
2176 {RTLoc, ThreadID});
2177 IsTTD = Bld.CreateIsNull(PL);
2178 }
2179 llvm::Value *IsSPMD = Bld.CreateIsNotNull(CGF.EmitNounwindRuntimeCall(
2180 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_is_spmd_exec_mode)));
2181 Bld.CreateCondBr(IsSPMD, SPMDBB, NonSPMDBB);
2182 // There is no need to emit line number for unconditional branch.
2183 (void)ApplyDebugLocation::CreateEmpty(CGF);
2184 CGF.EmitBlock(SPMDBB);
2185 Address RecPtr = Address(llvm::ConstantPointerNull::get(GlobalRecPtrTy),
2186 CharUnits::fromQuantity(Alignment));
2187 CGF.EmitBranch(ExitBB);
2188 // There is no need to emit line number for unconditional branch.
2189 (void)ApplyDebugLocation::CreateEmpty(CGF);
2190 CGF.EmitBlock(NonSPMDBB);
2191 llvm::Value *Size = llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize);
2192 if (const RecordDecl *SecGlobalizedVarsRecord =
2193 I->getSecond().SecondaryGlobalRecord.getValueOr(nullptr)) {
2194 SecGlobalRecTy =
2195 CGM.getContext().getRecordType(SecGlobalizedVarsRecord);
2196
2197 // Recover pointer to this function's global record. The runtime will
2198 // handle the specifics of the allocation of the memory.
2199 // Use actual memory size of the record including the padding
2200 // for alignment purposes.
2201 unsigned Alignment =
2202 CGM.getContext().getTypeAlignInChars(SecGlobalRecTy).getQuantity();
2203 unsigned GlobalRecordSize =
2204 CGM.getContext().getTypeSizeInChars(SecGlobalRecTy).getQuantity();
2205 GlobalRecordSize = llvm::alignTo(GlobalRecordSize, Alignment);
2206 Size = Bld.CreateSelect(
2207 IsTTD, llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize), Size);
2208 }
2209 // TODO: allow the usage of shared memory to be controlled by
2210 // the user, for now, default to global.
2211 llvm::Value *GlobalRecordSizeArg[] = {
2212 Size, CGF.Builder.getInt16(/*UseSharedMemory=*/0)};
2213 llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall(
2214 createNVPTXRuntimeFunction(
2215 OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack),
2216 GlobalRecordSizeArg);
2217 GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2218 GlobalRecValue, GlobalRecPtrTy);
2219 CGF.EmitBlock(ExitBB);
2220 auto *Phi = Bld.CreatePHI(GlobalRecPtrTy,
2221 /*NumReservedValues=*/2, "_select_stack");
2222 Phi->addIncoming(RecPtr.getPointer(), SPMDBB);
2223 Phi->addIncoming(GlobalRecCastAddr, NonSPMDBB);
2224 GlobalRecCastAddr = Phi;
2225 I->getSecond().GlobalRecordAddr = Phi;
2226 I->getSecond().IsInSPMDModeFlag = IsSPMD;
2227 } else if (!CGM.getLangOpts().OpenMPCUDATargetParallel && IsInTTDRegion) {
2228 assert(GlobalizedRecords.back().Records.size() < 2 &&
2229 "Expected less than 2 globalized records: one for target and one "
2230 "for teams.");
2231 unsigned Offset = 0;
2232 for (const RecordDecl *RD : GlobalizedRecords.back().Records) {
2233 QualType RDTy = CGM.getContext().getRecordType(RD);
2234 unsigned Alignment =
2235 CGM.getContext().getTypeAlignInChars(RDTy).getQuantity();
2236 unsigned Size = CGM.getContext().getTypeSizeInChars(RDTy).getQuantity();
2237 Offset =
2238 llvm::alignTo(llvm::alignTo(Offset, Alignment) + Size, Alignment);
2239 }
2240 unsigned Alignment =
2241 CGM.getContext().getTypeAlignInChars(GlobalRecTy).getQuantity();
2242 Offset = llvm::alignTo(Offset, Alignment);
2243 GlobalizedRecords.back().Records.push_back(GlobalizedVarsRecord);
2244 ++GlobalizedRecords.back().RegionCounter;
2245 if (GlobalizedRecords.back().Records.size() == 1) {
2246 assert(KernelStaticGlobalized &&
2247 "Kernel static pointer must be initialized already.");
2248 auto *UseSharedMemory = new llvm::GlobalVariable(
2249 CGM.getModule(), CGM.Int16Ty, /*isConstant=*/true,
2250 llvm::GlobalValue::InternalLinkage, nullptr,
2251 "_openmp_static_kernel$is_shared");
2252 UseSharedMemory->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
2253 QualType Int16Ty = CGM.getContext().getIntTypeForBitwidth(
2254 /*DestWidth=*/16, /*Signed=*/0);
2255 llvm::Value *IsInSharedMemory = CGF.EmitLoadOfScalar(
2256 Address(UseSharedMemory,
2257 CGM.getContext().getTypeAlignInChars(Int16Ty)),
2258 /*Volatile=*/false, Int16Ty, Loc);
2259 auto *StaticGlobalized = new llvm::GlobalVariable(
2260 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
2261 llvm::GlobalValue::CommonLinkage, nullptr);
2262 auto *RecSize = new llvm::GlobalVariable(
2263 CGM.getModule(), CGM.SizeTy, /*isConstant=*/true,
2264 llvm::GlobalValue::InternalLinkage, nullptr,
2265 "_openmp_static_kernel$size");
2266 RecSize->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
2267 llvm::Value *Ld = CGF.EmitLoadOfScalar(
2268 Address(RecSize, CGM.getSizeAlign()), /*Volatile=*/false,
2269 CGM.getContext().getSizeType(), Loc);
2270 llvm::Value *ResAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2271 KernelStaticGlobalized, CGM.VoidPtrPtrTy);
2272 llvm::Value *GlobalRecordSizeArg[] = {
2273 llvm::ConstantInt::get(
2274 CGM.Int16Ty,
2275 getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD ? 1 : 0),
2276 StaticGlobalized, Ld, IsInSharedMemory, ResAddr};
2277 CGF.EmitRuntimeCall(createNVPTXRuntimeFunction(
2278 OMPRTL_NVPTX__kmpc_get_team_static_memory),
2279 GlobalRecordSizeArg);
2280 GlobalizedRecords.back().Buffer = StaticGlobalized;
2281 GlobalizedRecords.back().RecSize = RecSize;
2282 GlobalizedRecords.back().UseSharedMemory = UseSharedMemory;
2283 GlobalizedRecords.back().Loc = Loc;
2284 }
2285 assert(KernelStaticGlobalized && "Global address must be set already.");
2286 Address FrameAddr = CGF.EmitLoadOfPointer(
2287 Address(KernelStaticGlobalized, CGM.getPointerAlign()),
2288 CGM.getContext()
2289 .getPointerType(CGM.getContext().VoidPtrTy)
2290 .castAs<PointerType>());
2291 llvm::Value *GlobalRecValue =
2292 Bld.CreateConstInBoundsGEP(FrameAddr, Offset).getPointer();
2293 I->getSecond().GlobalRecordAddr = GlobalRecValue;
2294 I->getSecond().IsInSPMDModeFlag = nullptr;
2295 GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2296 GlobalRecValue, CGF.ConvertTypeForMem(GlobalRecTy)->getPointerTo());
2297 } else {
2298 // TODO: allow the usage of shared memory to be controlled by
2299 // the user, for now, default to global.
2300 bool UseSharedMemory =
2301 IsInTTDRegion && GlobalRecordSize <= SharedMemorySize;
2302 llvm::Value *GlobalRecordSizeArg[] = {
2303 llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize),
2304 CGF.Builder.getInt16(UseSharedMemory ? 1 : 0)};
2305 llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall(
2306 createNVPTXRuntimeFunction(
2307 IsInTTDRegion
2308 ? OMPRTL_NVPTX__kmpc_data_sharing_push_stack
2309 : OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack),
2310 GlobalRecordSizeArg);
2311 GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2312 GlobalRecValue, GlobalRecPtrTy);
2313 I->getSecond().GlobalRecordAddr = GlobalRecValue;
2314 I->getSecond().IsInSPMDModeFlag = nullptr;
2315 }
2316 LValue Base =
2317 CGF.MakeNaturalAlignPointeeAddrLValue(GlobalRecCastAddr, GlobalRecTy);
2318
2319 // Emit the "global alloca" which is a GEP from the global declaration
2320 // record using the pointer returned by the runtime.
2321 LValue SecBase;
2322 decltype(I->getSecond().LocalVarData)::const_iterator SecIt;
2323 if (IsTTD) {
2324 SecIt = I->getSecond().SecondaryLocalVarData->begin();
2325 llvm::PointerType *SecGlobalRecPtrTy =
2326 CGF.ConvertTypeForMem(SecGlobalRecTy)->getPointerTo();
2327 SecBase = CGF.MakeNaturalAlignPointeeAddrLValue(
2328 Bld.CreatePointerBitCastOrAddrSpaceCast(
2329 I->getSecond().GlobalRecordAddr, SecGlobalRecPtrTy),
2330 SecGlobalRecTy);
2331 }
2332 for (auto &Rec : I->getSecond().LocalVarData) {
2333 bool EscapedParam = I->getSecond().EscapedParameters.count(Rec.first);
2334 llvm::Value *ParValue;
2335 if (EscapedParam) {
2336 const auto *VD = cast<VarDecl>(Rec.first);
2337 LValue ParLVal =
2338 CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType());
2339 ParValue = CGF.EmitLoadOfScalar(ParLVal, Loc);
2340 }
2341 LValue VarAddr = CGF.EmitLValueForField(Base, Rec.second.FD);
2342 // Emit VarAddr basing on lane-id if required.
2343 QualType VarTy;
2344 if (Rec.second.IsOnePerTeam) {
2345 VarTy = Rec.second.FD->getType();
2346 } else {
2347 llvm::Value *Ptr = CGF.Builder.CreateInBoundsGEP(
2348 VarAddr.getAddress(CGF).getPointer(),
2349 {Bld.getInt32(0), getNVPTXLaneID(CGF)});
2350 VarTy =
2351 Rec.second.FD->getType()->castAsArrayTypeUnsafe()->getElementType();
2352 VarAddr = CGF.MakeAddrLValue(
2353 Address(Ptr, CGM.getContext().getDeclAlign(Rec.first)), VarTy,
2354 AlignmentSource::Decl);
2355 }
2356 Rec.second.PrivateAddr = VarAddr.getAddress(CGF);
2357 if (!IsInTTDRegion &&
2358 (WithSPMDCheck ||
2359 getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_Unknown)) {
2360 assert(I->getSecond().IsInSPMDModeFlag &&
2361 "Expected unknown execution mode or required SPMD check.");
2362 if (IsTTD) {
2363 assert(SecIt->second.IsOnePerTeam &&
2364 "Secondary glob data must be one per team.");
2365 LValue SecVarAddr = CGF.EmitLValueForField(SecBase, SecIt->second.FD);
2366 VarAddr.setAddress(
2367 Address(Bld.CreateSelect(IsTTD, SecVarAddr.getPointer(CGF),
2368 VarAddr.getPointer(CGF)),
2369 VarAddr.getAlignment()));
2370 Rec.second.PrivateAddr = VarAddr.getAddress(CGF);
2371 }
2372 Address GlobalPtr = Rec.second.PrivateAddr;
2373 Address LocalAddr = CGF.CreateMemTemp(VarTy, Rec.second.FD->getName());
2374 Rec.second.PrivateAddr = Address(
2375 Bld.CreateSelect(I->getSecond().IsInSPMDModeFlag,
2376 LocalAddr.getPointer(), GlobalPtr.getPointer()),
2377 LocalAddr.getAlignment());
2378 }
2379 if (EscapedParam) {
2380 const auto *VD = cast<VarDecl>(Rec.first);
2381 CGF.EmitStoreOfScalar(ParValue, VarAddr);
2382 I->getSecond().MappedParams->setVarAddr(CGF, VD,
2383 VarAddr.getAddress(CGF));
2384 }
2385 if (IsTTD)
2386 ++SecIt;
2387 }
2388 }
2389 for (const ValueDecl *VD : I->getSecond().EscapedVariableLengthDecls) {
2390 // Recover pointer to this function's global record. The runtime will
2391 // handle the specifics of the allocation of the memory.
2392 // Use actual memory size of the record including the padding
2393 // for alignment purposes.
2394 CGBuilderTy &Bld = CGF.Builder;
2395 llvm::Value *Size = CGF.getTypeSize(VD->getType());
2396 CharUnits Align = CGM.getContext().getDeclAlign(VD);
2397 Size = Bld.CreateNUWAdd(
2398 Size, llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity() - 1));
2399 llvm::Value *AlignVal =
2400 llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity());
2401 Size = Bld.CreateUDiv(Size, AlignVal);
2402 Size = Bld.CreateNUWMul(Size, AlignVal);
2403 // TODO: allow the usage of shared memory to be controlled by
2404 // the user, for now, default to global.
2405 llvm::Value *GlobalRecordSizeArg[] = {
2406 Size, CGF.Builder.getInt16(/*UseSharedMemory=*/0)};
2407 llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall(
2408 createNVPTXRuntimeFunction(
2409 OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack),
2410 GlobalRecordSizeArg);
2411 llvm::Value *GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2412 GlobalRecValue, CGF.ConvertTypeForMem(VD->getType())->getPointerTo());
2413 LValue Base = CGF.MakeAddrLValue(GlobalRecCastAddr, VD->getType(),
2414 CGM.getContext().getDeclAlign(VD),
2415 AlignmentSource::Decl);
2416 I->getSecond().MappedParams->setVarAddr(CGF, cast<VarDecl>(VD),
2417 Base.getAddress(CGF));
2418 I->getSecond().EscapedVariableLengthDeclsAddrs.emplace_back(GlobalRecValue);
2419 }
2420 I->getSecond().MappedParams->apply(CGF);
2421 }
2422
emitGenericVarsEpilog(CodeGenFunction & CGF,bool WithSPMDCheck)2423 void CGOpenMPRuntimeNVPTX::emitGenericVarsEpilog(CodeGenFunction &CGF,
2424 bool WithSPMDCheck) {
2425 if (getDataSharingMode(CGM) != CGOpenMPRuntimeNVPTX::Generic &&
2426 getExecutionMode() != CGOpenMPRuntimeNVPTX::EM_SPMD)
2427 return;
2428
2429 const auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
2430 if (I != FunctionGlobalizedDecls.end()) {
2431 I->getSecond().MappedParams->restore(CGF);
2432 if (!CGF.HaveInsertPoint())
2433 return;
2434 for (llvm::Value *Addr :
2435 llvm::reverse(I->getSecond().EscapedVariableLengthDeclsAddrs)) {
2436 CGF.EmitRuntimeCall(
2437 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_data_sharing_pop_stack),
2438 Addr);
2439 }
2440 if (I->getSecond().GlobalRecordAddr) {
2441 if (!IsInTTDRegion &&
2442 (WithSPMDCheck ||
2443 getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_Unknown)) {
2444 CGBuilderTy &Bld = CGF.Builder;
2445 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
2446 llvm::BasicBlock *NonSPMDBB = CGF.createBasicBlock(".non-spmd");
2447 Bld.CreateCondBr(I->getSecond().IsInSPMDModeFlag, ExitBB, NonSPMDBB);
2448 // There is no need to emit line number for unconditional branch.
2449 (void)ApplyDebugLocation::CreateEmpty(CGF);
2450 CGF.EmitBlock(NonSPMDBB);
2451 CGF.EmitRuntimeCall(
2452 createNVPTXRuntimeFunction(
2453 OMPRTL_NVPTX__kmpc_data_sharing_pop_stack),
2454 CGF.EmitCastToVoidPtr(I->getSecond().GlobalRecordAddr));
2455 CGF.EmitBlock(ExitBB);
2456 } else if (!CGM.getLangOpts().OpenMPCUDATargetParallel && IsInTTDRegion) {
2457 assert(GlobalizedRecords.back().RegionCounter > 0 &&
2458 "region counter must be > 0.");
2459 --GlobalizedRecords.back().RegionCounter;
2460 // Emit the restore function only in the target region.
2461 if (GlobalizedRecords.back().RegionCounter == 0) {
2462 QualType Int16Ty = CGM.getContext().getIntTypeForBitwidth(
2463 /*DestWidth=*/16, /*Signed=*/0);
2464 llvm::Value *IsInSharedMemory = CGF.EmitLoadOfScalar(
2465 Address(GlobalizedRecords.back().UseSharedMemory,
2466 CGM.getContext().getTypeAlignInChars(Int16Ty)),
2467 /*Volatile=*/false, Int16Ty, GlobalizedRecords.back().Loc);
2468 llvm::Value *Args[] = {
2469 llvm::ConstantInt::get(
2470 CGM.Int16Ty,
2471 getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD ? 1 : 0),
2472 IsInSharedMemory};
2473 CGF.EmitRuntimeCall(
2474 createNVPTXRuntimeFunction(
2475 OMPRTL_NVPTX__kmpc_restore_team_static_memory),
2476 Args);
2477 }
2478 } else {
2479 CGF.EmitRuntimeCall(createNVPTXRuntimeFunction(
2480 OMPRTL_NVPTX__kmpc_data_sharing_pop_stack),
2481 I->getSecond().GlobalRecordAddr);
2482 }
2483 }
2484 }
2485 }
2486
emitTeamsCall(CodeGenFunction & CGF,const OMPExecutableDirective & D,SourceLocation Loc,llvm::Function * OutlinedFn,ArrayRef<llvm::Value * > CapturedVars)2487 void CGOpenMPRuntimeNVPTX::emitTeamsCall(CodeGenFunction &CGF,
2488 const OMPExecutableDirective &D,
2489 SourceLocation Loc,
2490 llvm::Function *OutlinedFn,
2491 ArrayRef<llvm::Value *> CapturedVars) {
2492 if (!CGF.HaveInsertPoint())
2493 return;
2494
2495 Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
2496 /*Name=*/".zero.addr");
2497 CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
2498 llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
2499 OutlinedFnArgs.push_back(emitThreadIDAddress(CGF, Loc).getPointer());
2500 OutlinedFnArgs.push_back(ZeroAddr.getPointer());
2501 OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
2502 emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
2503 }
2504
emitParallelCall(CodeGenFunction & CGF,SourceLocation Loc,llvm::Function * OutlinedFn,ArrayRef<llvm::Value * > CapturedVars,const Expr * IfCond)2505 void CGOpenMPRuntimeNVPTX::emitParallelCall(
2506 CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn,
2507 ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) {
2508 if (!CGF.HaveInsertPoint())
2509 return;
2510
2511 if (getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD)
2512 emitSPMDParallelCall(CGF, Loc, OutlinedFn, CapturedVars, IfCond);
2513 else
2514 emitNonSPMDParallelCall(CGF, Loc, OutlinedFn, CapturedVars, IfCond);
2515 }
2516
emitNonSPMDParallelCall(CodeGenFunction & CGF,SourceLocation Loc,llvm::Value * OutlinedFn,ArrayRef<llvm::Value * > CapturedVars,const Expr * IfCond)2517 void CGOpenMPRuntimeNVPTX::emitNonSPMDParallelCall(
2518 CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *OutlinedFn,
2519 ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) {
2520 llvm::Function *Fn = cast<llvm::Function>(OutlinedFn);
2521
2522 // Force inline this outlined function at its call site.
2523 Fn->setLinkage(llvm::GlobalValue::InternalLinkage);
2524
2525 Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
2526 /*Name=*/".zero.addr");
2527 CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
2528 // ThreadId for serialized parallels is 0.
2529 Address ThreadIDAddr = ZeroAddr;
2530 auto &&CodeGen = [this, Fn, CapturedVars, Loc, &ThreadIDAddr](
2531 CodeGenFunction &CGF, PrePostActionTy &Action) {
2532 Action.Enter(CGF);
2533
2534 Address ZeroAddr =
2535 CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
2536 /*Name=*/".bound.zero.addr");
2537 CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
2538 llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
2539 OutlinedFnArgs.push_back(ThreadIDAddr.getPointer());
2540 OutlinedFnArgs.push_back(ZeroAddr.getPointer());
2541 OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
2542 emitOutlinedFunctionCall(CGF, Loc, Fn, OutlinedFnArgs);
2543 };
2544 auto &&SeqGen = [this, &CodeGen, Loc](CodeGenFunction &CGF,
2545 PrePostActionTy &) {
2546
2547 RegionCodeGenTy RCG(CodeGen);
2548 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
2549 llvm::Value *ThreadID = getThreadID(CGF, Loc);
2550 llvm::Value *Args[] = {RTLoc, ThreadID};
2551
2552 NVPTXActionTy Action(
2553 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_serialized_parallel),
2554 Args,
2555 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_serialized_parallel),
2556 Args);
2557 RCG.setAction(Action);
2558 RCG(CGF);
2559 };
2560
2561 auto &&L0ParallelGen = [this, CapturedVars, Fn](CodeGenFunction &CGF,
2562 PrePostActionTy &Action) {
2563 CGBuilderTy &Bld = CGF.Builder;
2564 llvm::Function *WFn = WrapperFunctionsMap[Fn];
2565 assert(WFn && "Wrapper function does not exist!");
2566 llvm::Value *ID = Bld.CreateBitOrPointerCast(WFn, CGM.Int8PtrTy);
2567
2568 // Prepare for parallel region. Indicate the outlined function.
2569 llvm::Value *Args[] = {ID};
2570 CGF.EmitRuntimeCall(
2571 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_prepare_parallel),
2572 Args);
2573
2574 // Create a private scope that will globalize the arguments
2575 // passed from the outside of the target region.
2576 CodeGenFunction::OMPPrivateScope PrivateArgScope(CGF);
2577
2578 // There's something to share.
2579 if (!CapturedVars.empty()) {
2580 // Prepare for parallel region. Indicate the outlined function.
2581 Address SharedArgs =
2582 CGF.CreateDefaultAlignTempAlloca(CGF.VoidPtrPtrTy, "shared_arg_refs");
2583 llvm::Value *SharedArgsPtr = SharedArgs.getPointer();
2584
2585 llvm::Value *DataSharingArgs[] = {
2586 SharedArgsPtr,
2587 llvm::ConstantInt::get(CGM.SizeTy, CapturedVars.size())};
2588 CGF.EmitRuntimeCall(createNVPTXRuntimeFunction(
2589 OMPRTL_NVPTX__kmpc_begin_sharing_variables),
2590 DataSharingArgs);
2591
2592 // Store variable address in a list of references to pass to workers.
2593 unsigned Idx = 0;
2594 ASTContext &Ctx = CGF.getContext();
2595 Address SharedArgListAddress = CGF.EmitLoadOfPointer(
2596 SharedArgs, Ctx.getPointerType(Ctx.getPointerType(Ctx.VoidPtrTy))
2597 .castAs<PointerType>());
2598 for (llvm::Value *V : CapturedVars) {
2599 Address Dst = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
2600 llvm::Value *PtrV;
2601 if (V->getType()->isIntegerTy())
2602 PtrV = Bld.CreateIntToPtr(V, CGF.VoidPtrTy);
2603 else
2604 PtrV = Bld.CreatePointerBitCastOrAddrSpaceCast(V, CGF.VoidPtrTy);
2605 CGF.EmitStoreOfScalar(PtrV, Dst, /*Volatile=*/false,
2606 Ctx.getPointerType(Ctx.VoidPtrTy));
2607 ++Idx;
2608 }
2609 }
2610
2611 // Activate workers. This barrier is used by the master to signal
2612 // work for the workers.
2613 syncCTAThreads(CGF);
2614
2615 // OpenMP [2.5, Parallel Construct, p.49]
2616 // There is an implied barrier at the end of a parallel region. After the
2617 // end of a parallel region, only the master thread of the team resumes
2618 // execution of the enclosing task region.
2619 //
2620 // The master waits at this barrier until all workers are done.
2621 syncCTAThreads(CGF);
2622
2623 if (!CapturedVars.empty())
2624 CGF.EmitRuntimeCall(
2625 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_sharing_variables));
2626
2627 // Remember for post-processing in worker loop.
2628 Work.emplace_back(WFn);
2629 };
2630
2631 auto &&LNParallelGen = [this, Loc, &SeqGen, &L0ParallelGen](
2632 CodeGenFunction &CGF, PrePostActionTy &Action) {
2633 if (IsInParallelRegion) {
2634 SeqGen(CGF, Action);
2635 } else if (IsInTargetMasterThreadRegion) {
2636 L0ParallelGen(CGF, Action);
2637 } else {
2638 // Check for master and then parallelism:
2639 // if (__kmpc_is_spmd_exec_mode() || __kmpc_parallel_level(loc, gtid)) {
2640 // Serialized execution.
2641 // } else {
2642 // Worker call.
2643 // }
2644 CGBuilderTy &Bld = CGF.Builder;
2645 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
2646 llvm::BasicBlock *SeqBB = CGF.createBasicBlock(".sequential");
2647 llvm::BasicBlock *ParallelCheckBB = CGF.createBasicBlock(".parcheck");
2648 llvm::BasicBlock *MasterBB = CGF.createBasicBlock(".master");
2649 llvm::Value *IsSPMD = Bld.CreateIsNotNull(CGF.EmitNounwindRuntimeCall(
2650 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_is_spmd_exec_mode)));
2651 Bld.CreateCondBr(IsSPMD, SeqBB, ParallelCheckBB);
2652 // There is no need to emit line number for unconditional branch.
2653 (void)ApplyDebugLocation::CreateEmpty(CGF);
2654 CGF.EmitBlock(ParallelCheckBB);
2655 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
2656 llvm::Value *ThreadID = getThreadID(CGF, Loc);
2657 llvm::Value *PL = CGF.EmitRuntimeCall(
2658 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_parallel_level),
2659 {RTLoc, ThreadID});
2660 llvm::Value *Res = Bld.CreateIsNotNull(PL);
2661 Bld.CreateCondBr(Res, SeqBB, MasterBB);
2662 CGF.EmitBlock(SeqBB);
2663 SeqGen(CGF, Action);
2664 CGF.EmitBranch(ExitBB);
2665 // There is no need to emit line number for unconditional branch.
2666 (void)ApplyDebugLocation::CreateEmpty(CGF);
2667 CGF.EmitBlock(MasterBB);
2668 L0ParallelGen(CGF, Action);
2669 CGF.EmitBranch(ExitBB);
2670 // There is no need to emit line number for unconditional branch.
2671 (void)ApplyDebugLocation::CreateEmpty(CGF);
2672 // Emit the continuation block for code after the if.
2673 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
2674 }
2675 };
2676
2677 if (IfCond) {
2678 emitIfClause(CGF, IfCond, LNParallelGen, SeqGen);
2679 } else {
2680 CodeGenFunction::RunCleanupsScope Scope(CGF);
2681 RegionCodeGenTy ThenRCG(LNParallelGen);
2682 ThenRCG(CGF);
2683 }
2684 }
2685
emitSPMDParallelCall(CodeGenFunction & CGF,SourceLocation Loc,llvm::Function * OutlinedFn,ArrayRef<llvm::Value * > CapturedVars,const Expr * IfCond)2686 void CGOpenMPRuntimeNVPTX::emitSPMDParallelCall(
2687 CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn,
2688 ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) {
2689 // Just call the outlined function to execute the parallel region.
2690 // OutlinedFn(>id, &zero, CapturedStruct);
2691 //
2692 llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
2693
2694 Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
2695 /*Name=*/".zero.addr");
2696 CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
2697 // ThreadId for serialized parallels is 0.
2698 Address ThreadIDAddr = ZeroAddr;
2699 auto &&CodeGen = [this, OutlinedFn, CapturedVars, Loc, &ThreadIDAddr](
2700 CodeGenFunction &CGF, PrePostActionTy &Action) {
2701 Action.Enter(CGF);
2702
2703 Address ZeroAddr =
2704 CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
2705 /*Name=*/".bound.zero.addr");
2706 CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
2707 llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
2708 OutlinedFnArgs.push_back(ThreadIDAddr.getPointer());
2709 OutlinedFnArgs.push_back(ZeroAddr.getPointer());
2710 OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
2711 emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
2712 };
2713 auto &&SeqGen = [this, &CodeGen, Loc](CodeGenFunction &CGF,
2714 PrePostActionTy &) {
2715
2716 RegionCodeGenTy RCG(CodeGen);
2717 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
2718 llvm::Value *ThreadID = getThreadID(CGF, Loc);
2719 llvm::Value *Args[] = {RTLoc, ThreadID};
2720
2721 NVPTXActionTy Action(
2722 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_serialized_parallel),
2723 Args,
2724 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_serialized_parallel),
2725 Args);
2726 RCG.setAction(Action);
2727 RCG(CGF);
2728 };
2729
2730 if (IsInTargetMasterThreadRegion) {
2731 // In the worker need to use the real thread id.
2732 ThreadIDAddr = emitThreadIDAddress(CGF, Loc);
2733 RegionCodeGenTy RCG(CodeGen);
2734 RCG(CGF);
2735 } else {
2736 // If we are not in the target region, it is definitely L2 parallelism or
2737 // more, because for SPMD mode we always has L1 parallel level, sowe don't
2738 // need to check for orphaned directives.
2739 RegionCodeGenTy RCG(SeqGen);
2740 RCG(CGF);
2741 }
2742 }
2743
syncCTAThreads(CodeGenFunction & CGF)2744 void CGOpenMPRuntimeNVPTX::syncCTAThreads(CodeGenFunction &CGF) {
2745 // Always emit simple barriers!
2746 if (!CGF.HaveInsertPoint())
2747 return;
2748 // Build call __kmpc_barrier_simple_spmd(nullptr, 0);
2749 // This function does not use parameters, so we can emit just default values.
2750 llvm::Value *Args[] = {
2751 llvm::ConstantPointerNull::get(
2752 cast<llvm::PointerType>(getIdentTyPointerTy())),
2753 llvm::ConstantInt::get(CGF.Int32Ty, /*V=*/0, /*isSigned=*/true)};
2754 llvm::CallInst *Call = CGF.EmitRuntimeCall(
2755 createNVPTXRuntimeFunction(OMPRTL__kmpc_barrier_simple_spmd), Args);
2756 Call->setConvergent();
2757 }
2758
emitBarrierCall(CodeGenFunction & CGF,SourceLocation Loc,OpenMPDirectiveKind Kind,bool,bool)2759 void CGOpenMPRuntimeNVPTX::emitBarrierCall(CodeGenFunction &CGF,
2760 SourceLocation Loc,
2761 OpenMPDirectiveKind Kind, bool,
2762 bool) {
2763 // Always emit simple barriers!
2764 if (!CGF.HaveInsertPoint())
2765 return;
2766 // Build call __kmpc_cancel_barrier(loc, thread_id);
2767 unsigned Flags = getDefaultFlagsForBarriers(Kind);
2768 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags),
2769 getThreadID(CGF, Loc)};
2770 llvm::CallInst *Call = CGF.EmitRuntimeCall(
2771 createNVPTXRuntimeFunction(OMPRTL__kmpc_barrier), Args);
2772 Call->setConvergent();
2773 }
2774
emitCriticalRegion(CodeGenFunction & CGF,StringRef CriticalName,const RegionCodeGenTy & CriticalOpGen,SourceLocation Loc,const Expr * Hint)2775 void CGOpenMPRuntimeNVPTX::emitCriticalRegion(
2776 CodeGenFunction &CGF, StringRef CriticalName,
2777 const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc,
2778 const Expr *Hint) {
2779 llvm::BasicBlock *LoopBB = CGF.createBasicBlock("omp.critical.loop");
2780 llvm::BasicBlock *TestBB = CGF.createBasicBlock("omp.critical.test");
2781 llvm::BasicBlock *SyncBB = CGF.createBasicBlock("omp.critical.sync");
2782 llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.critical.body");
2783 llvm::BasicBlock *ExitBB = CGF.createBasicBlock("omp.critical.exit");
2784
2785 // Get the mask of active threads in the warp.
2786 llvm::Value *Mask = CGF.EmitRuntimeCall(
2787 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_warp_active_thread_mask));
2788 // Fetch team-local id of the thread.
2789 llvm::Value *ThreadID = getNVPTXThreadID(CGF);
2790
2791 // Get the width of the team.
2792 llvm::Value *TeamWidth = getNVPTXNumThreads(CGF);
2793
2794 // Initialize the counter variable for the loop.
2795 QualType Int32Ty =
2796 CGF.getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/0);
2797 Address Counter = CGF.CreateMemTemp(Int32Ty, "critical_counter");
2798 LValue CounterLVal = CGF.MakeAddrLValue(Counter, Int32Ty);
2799 CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.Int32Ty), CounterLVal,
2800 /*isInit=*/true);
2801
2802 // Block checks if loop counter exceeds upper bound.
2803 CGF.EmitBlock(LoopBB);
2804 llvm::Value *CounterVal = CGF.EmitLoadOfScalar(CounterLVal, Loc);
2805 llvm::Value *CmpLoopBound = CGF.Builder.CreateICmpSLT(CounterVal, TeamWidth);
2806 CGF.Builder.CreateCondBr(CmpLoopBound, TestBB, ExitBB);
2807
2808 // Block tests which single thread should execute region, and which threads
2809 // should go straight to synchronisation point.
2810 CGF.EmitBlock(TestBB);
2811 CounterVal = CGF.EmitLoadOfScalar(CounterLVal, Loc);
2812 llvm::Value *CmpThreadToCounter =
2813 CGF.Builder.CreateICmpEQ(ThreadID, CounterVal);
2814 CGF.Builder.CreateCondBr(CmpThreadToCounter, BodyBB, SyncBB);
2815
2816 // Block emits the body of the critical region.
2817 CGF.EmitBlock(BodyBB);
2818
2819 // Output the critical statement.
2820 CGOpenMPRuntime::emitCriticalRegion(CGF, CriticalName, CriticalOpGen, Loc,
2821 Hint);
2822
2823 // After the body surrounded by the critical region, the single executing
2824 // thread will jump to the synchronisation point.
2825 // Block waits for all threads in current team to finish then increments the
2826 // counter variable and returns to the loop.
2827 CGF.EmitBlock(SyncBB);
2828 // Reconverge active threads in the warp.
2829 (void)CGF.EmitRuntimeCall(
2830 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_syncwarp), Mask);
2831
2832 llvm::Value *IncCounterVal =
2833 CGF.Builder.CreateNSWAdd(CounterVal, CGF.Builder.getInt32(1));
2834 CGF.EmitStoreOfScalar(IncCounterVal, CounterLVal);
2835 CGF.EmitBranch(LoopBB);
2836
2837 // Block that is reached when all threads in the team complete the region.
2838 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
2839 }
2840
2841 /// Cast value to the specified type.
castValueToType(CodeGenFunction & CGF,llvm::Value * Val,QualType ValTy,QualType CastTy,SourceLocation Loc)2842 static llvm::Value *castValueToType(CodeGenFunction &CGF, llvm::Value *Val,
2843 QualType ValTy, QualType CastTy,
2844 SourceLocation Loc) {
2845 assert(!CGF.getContext().getTypeSizeInChars(CastTy).isZero() &&
2846 "Cast type must sized.");
2847 assert(!CGF.getContext().getTypeSizeInChars(ValTy).isZero() &&
2848 "Val type must sized.");
2849 llvm::Type *LLVMCastTy = CGF.ConvertTypeForMem(CastTy);
2850 if (ValTy == CastTy)
2851 return Val;
2852 if (CGF.getContext().getTypeSizeInChars(ValTy) ==
2853 CGF.getContext().getTypeSizeInChars(CastTy))
2854 return CGF.Builder.CreateBitCast(Val, LLVMCastTy);
2855 if (CastTy->isIntegerType() && ValTy->isIntegerType())
2856 return CGF.Builder.CreateIntCast(Val, LLVMCastTy,
2857 CastTy->hasSignedIntegerRepresentation());
2858 Address CastItem = CGF.CreateMemTemp(CastTy);
2859 Address ValCastItem = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
2860 CastItem, Val->getType()->getPointerTo(CastItem.getAddressSpace()));
2861 CGF.EmitStoreOfScalar(Val, ValCastItem, /*Volatile=*/false, ValTy);
2862 return CGF.EmitLoadOfScalar(CastItem, /*Volatile=*/false, CastTy, Loc);
2863 }
2864
2865 /// This function creates calls to one of two shuffle functions to copy
2866 /// variables between lanes in a warp.
createRuntimeShuffleFunction(CodeGenFunction & CGF,llvm::Value * Elem,QualType ElemType,llvm::Value * Offset,SourceLocation Loc)2867 static llvm::Value *createRuntimeShuffleFunction(CodeGenFunction &CGF,
2868 llvm::Value *Elem,
2869 QualType ElemType,
2870 llvm::Value *Offset,
2871 SourceLocation Loc) {
2872 CodeGenModule &CGM = CGF.CGM;
2873 CGBuilderTy &Bld = CGF.Builder;
2874 CGOpenMPRuntimeNVPTX &RT =
2875 *(static_cast<CGOpenMPRuntimeNVPTX *>(&CGM.getOpenMPRuntime()));
2876
2877 CharUnits Size = CGF.getContext().getTypeSizeInChars(ElemType);
2878 assert(Size.getQuantity() <= 8 &&
2879 "Unsupported bitwidth in shuffle instruction.");
2880
2881 OpenMPRTLFunctionNVPTX ShuffleFn = Size.getQuantity() <= 4
2882 ? OMPRTL_NVPTX__kmpc_shuffle_int32
2883 : OMPRTL_NVPTX__kmpc_shuffle_int64;
2884
2885 // Cast all types to 32- or 64-bit values before calling shuffle routines.
2886 QualType CastTy = CGF.getContext().getIntTypeForBitwidth(
2887 Size.getQuantity() <= 4 ? 32 : 64, /*Signed=*/1);
2888 llvm::Value *ElemCast = castValueToType(CGF, Elem, ElemType, CastTy, Loc);
2889 llvm::Value *WarpSize =
2890 Bld.CreateIntCast(getNVPTXWarpSize(CGF), CGM.Int16Ty, /*isSigned=*/true);
2891
2892 llvm::Value *ShuffledVal = CGF.EmitRuntimeCall(
2893 RT.createNVPTXRuntimeFunction(ShuffleFn), {ElemCast, Offset, WarpSize});
2894
2895 return castValueToType(CGF, ShuffledVal, CastTy, ElemType, Loc);
2896 }
2897
shuffleAndStore(CodeGenFunction & CGF,Address SrcAddr,Address DestAddr,QualType ElemType,llvm::Value * Offset,SourceLocation Loc)2898 static void shuffleAndStore(CodeGenFunction &CGF, Address SrcAddr,
2899 Address DestAddr, QualType ElemType,
2900 llvm::Value *Offset, SourceLocation Loc) {
2901 CGBuilderTy &Bld = CGF.Builder;
2902
2903 CharUnits Size = CGF.getContext().getTypeSizeInChars(ElemType);
2904 // Create the loop over the big sized data.
2905 // ptr = (void*)Elem;
2906 // ptrEnd = (void*) Elem + 1;
2907 // Step = 8;
2908 // while (ptr + Step < ptrEnd)
2909 // shuffle((int64_t)*ptr);
2910 // Step = 4;
2911 // while (ptr + Step < ptrEnd)
2912 // shuffle((int32_t)*ptr);
2913 // ...
2914 Address ElemPtr = DestAddr;
2915 Address Ptr = SrcAddr;
2916 Address PtrEnd = Bld.CreatePointerBitCastOrAddrSpaceCast(
2917 Bld.CreateConstGEP(SrcAddr, 1), CGF.VoidPtrTy);
2918 for (int IntSize = 8; IntSize >= 1; IntSize /= 2) {
2919 if (Size < CharUnits::fromQuantity(IntSize))
2920 continue;
2921 QualType IntType = CGF.getContext().getIntTypeForBitwidth(
2922 CGF.getContext().toBits(CharUnits::fromQuantity(IntSize)),
2923 /*Signed=*/1);
2924 llvm::Type *IntTy = CGF.ConvertTypeForMem(IntType);
2925 Ptr = Bld.CreatePointerBitCastOrAddrSpaceCast(Ptr, IntTy->getPointerTo());
2926 ElemPtr =
2927 Bld.CreatePointerBitCastOrAddrSpaceCast(ElemPtr, IntTy->getPointerTo());
2928 if (Size.getQuantity() / IntSize > 1) {
2929 llvm::BasicBlock *PreCondBB = CGF.createBasicBlock(".shuffle.pre_cond");
2930 llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".shuffle.then");
2931 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".shuffle.exit");
2932 llvm::BasicBlock *CurrentBB = Bld.GetInsertBlock();
2933 CGF.EmitBlock(PreCondBB);
2934 llvm::PHINode *PhiSrc =
2935 Bld.CreatePHI(Ptr.getType(), /*NumReservedValues=*/2);
2936 PhiSrc->addIncoming(Ptr.getPointer(), CurrentBB);
2937 llvm::PHINode *PhiDest =
2938 Bld.CreatePHI(ElemPtr.getType(), /*NumReservedValues=*/2);
2939 PhiDest->addIncoming(ElemPtr.getPointer(), CurrentBB);
2940 Ptr = Address(PhiSrc, Ptr.getAlignment());
2941 ElemPtr = Address(PhiDest, ElemPtr.getAlignment());
2942 llvm::Value *PtrDiff = Bld.CreatePtrDiff(
2943 PtrEnd.getPointer(), Bld.CreatePointerBitCastOrAddrSpaceCast(
2944 Ptr.getPointer(), CGF.VoidPtrTy));
2945 Bld.CreateCondBr(Bld.CreateICmpSGT(PtrDiff, Bld.getInt64(IntSize - 1)),
2946 ThenBB, ExitBB);
2947 CGF.EmitBlock(ThenBB);
2948 llvm::Value *Res = createRuntimeShuffleFunction(
2949 CGF, CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc),
2950 IntType, Offset, Loc);
2951 CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType);
2952 Address LocalPtr = Bld.CreateConstGEP(Ptr, 1);
2953 Address LocalElemPtr = Bld.CreateConstGEP(ElemPtr, 1);
2954 PhiSrc->addIncoming(LocalPtr.getPointer(), ThenBB);
2955 PhiDest->addIncoming(LocalElemPtr.getPointer(), ThenBB);
2956 CGF.EmitBranch(PreCondBB);
2957 CGF.EmitBlock(ExitBB);
2958 } else {
2959 llvm::Value *Res = createRuntimeShuffleFunction(
2960 CGF, CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc),
2961 IntType, Offset, Loc);
2962 CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType);
2963 Ptr = Bld.CreateConstGEP(Ptr, 1);
2964 ElemPtr = Bld.CreateConstGEP(ElemPtr, 1);
2965 }
2966 Size = Size % IntSize;
2967 }
2968 }
2969
2970 namespace {
2971 enum CopyAction : unsigned {
2972 // RemoteLaneToThread: Copy over a Reduce list from a remote lane in
2973 // the warp using shuffle instructions.
2974 RemoteLaneToThread,
2975 // ThreadCopy: Make a copy of a Reduce list on the thread's stack.
2976 ThreadCopy,
2977 // ThreadToScratchpad: Copy a team-reduced array to the scratchpad.
2978 ThreadToScratchpad,
2979 // ScratchpadToThread: Copy from a scratchpad array in global memory
2980 // containing team-reduced data to a thread's stack.
2981 ScratchpadToThread,
2982 };
2983 } // namespace
2984
2985 struct CopyOptionsTy {
2986 llvm::Value *RemoteLaneOffset;
2987 llvm::Value *ScratchpadIndex;
2988 llvm::Value *ScratchpadWidth;
2989 };
2990
2991 /// Emit instructions to copy a Reduce list, which contains partially
2992 /// aggregated values, in the specified direction.
emitReductionListCopy(CopyAction Action,CodeGenFunction & CGF,QualType ReductionArrayTy,ArrayRef<const Expr * > Privates,Address SrcBase,Address DestBase,CopyOptionsTy CopyOptions={nullptr, nullptr, nullptr})2993 static void emitReductionListCopy(
2994 CopyAction Action, CodeGenFunction &CGF, QualType ReductionArrayTy,
2995 ArrayRef<const Expr *> Privates, Address SrcBase, Address DestBase,
2996 CopyOptionsTy CopyOptions = {nullptr, nullptr, nullptr}) {
2997
2998 CodeGenModule &CGM = CGF.CGM;
2999 ASTContext &C = CGM.getContext();
3000 CGBuilderTy &Bld = CGF.Builder;
3001
3002 llvm::Value *RemoteLaneOffset = CopyOptions.RemoteLaneOffset;
3003 llvm::Value *ScratchpadIndex = CopyOptions.ScratchpadIndex;
3004 llvm::Value *ScratchpadWidth = CopyOptions.ScratchpadWidth;
3005
3006 // Iterates, element-by-element, through the source Reduce list and
3007 // make a copy.
3008 unsigned Idx = 0;
3009 unsigned Size = Privates.size();
3010 for (const Expr *Private : Privates) {
3011 Address SrcElementAddr = Address::invalid();
3012 Address DestElementAddr = Address::invalid();
3013 Address DestElementPtrAddr = Address::invalid();
3014 // Should we shuffle in an element from a remote lane?
3015 bool ShuffleInElement = false;
3016 // Set to true to update the pointer in the dest Reduce list to a
3017 // newly created element.
3018 bool UpdateDestListPtr = false;
3019 // Increment the src or dest pointer to the scratchpad, for each
3020 // new element.
3021 bool IncrScratchpadSrc = false;
3022 bool IncrScratchpadDest = false;
3023
3024 switch (Action) {
3025 case RemoteLaneToThread: {
3026 // Step 1.1: Get the address for the src element in the Reduce list.
3027 Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
3028 SrcElementAddr = CGF.EmitLoadOfPointer(
3029 SrcElementPtrAddr,
3030 C.getPointerType(Private->getType())->castAs<PointerType>());
3031
3032 // Step 1.2: Create a temporary to store the element in the destination
3033 // Reduce list.
3034 DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
3035 DestElementAddr =
3036 CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element");
3037 ShuffleInElement = true;
3038 UpdateDestListPtr = true;
3039 break;
3040 }
3041 case ThreadCopy: {
3042 // Step 1.1: Get the address for the src element in the Reduce list.
3043 Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
3044 SrcElementAddr = CGF.EmitLoadOfPointer(
3045 SrcElementPtrAddr,
3046 C.getPointerType(Private->getType())->castAs<PointerType>());
3047
3048 // Step 1.2: Get the address for dest element. The destination
3049 // element has already been created on the thread's stack.
3050 DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
3051 DestElementAddr = CGF.EmitLoadOfPointer(
3052 DestElementPtrAddr,
3053 C.getPointerType(Private->getType())->castAs<PointerType>());
3054 break;
3055 }
3056 case ThreadToScratchpad: {
3057 // Step 1.1: Get the address for the src element in the Reduce list.
3058 Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
3059 SrcElementAddr = CGF.EmitLoadOfPointer(
3060 SrcElementPtrAddr,
3061 C.getPointerType(Private->getType())->castAs<PointerType>());
3062
3063 // Step 1.2: Get the address for dest element:
3064 // address = base + index * ElementSizeInChars.
3065 llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
3066 llvm::Value *CurrentOffset =
3067 Bld.CreateNUWMul(ElementSizeInChars, ScratchpadIndex);
3068 llvm::Value *ScratchPadElemAbsolutePtrVal =
3069 Bld.CreateNUWAdd(DestBase.getPointer(), CurrentOffset);
3070 ScratchPadElemAbsolutePtrVal =
3071 Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy);
3072 DestElementAddr = Address(ScratchPadElemAbsolutePtrVal,
3073 C.getTypeAlignInChars(Private->getType()));
3074 IncrScratchpadDest = true;
3075 break;
3076 }
3077 case ScratchpadToThread: {
3078 // Step 1.1: Get the address for the src element in the scratchpad.
3079 // address = base + index * ElementSizeInChars.
3080 llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
3081 llvm::Value *CurrentOffset =
3082 Bld.CreateNUWMul(ElementSizeInChars, ScratchpadIndex);
3083 llvm::Value *ScratchPadElemAbsolutePtrVal =
3084 Bld.CreateNUWAdd(SrcBase.getPointer(), CurrentOffset);
3085 ScratchPadElemAbsolutePtrVal =
3086 Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy);
3087 SrcElementAddr = Address(ScratchPadElemAbsolutePtrVal,
3088 C.getTypeAlignInChars(Private->getType()));
3089 IncrScratchpadSrc = true;
3090
3091 // Step 1.2: Create a temporary to store the element in the destination
3092 // Reduce list.
3093 DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
3094 DestElementAddr =
3095 CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element");
3096 UpdateDestListPtr = true;
3097 break;
3098 }
3099 }
3100
3101 // Regardless of src and dest of copy, we emit the load of src
3102 // element as this is required in all directions
3103 SrcElementAddr = Bld.CreateElementBitCast(
3104 SrcElementAddr, CGF.ConvertTypeForMem(Private->getType()));
3105 DestElementAddr = Bld.CreateElementBitCast(DestElementAddr,
3106 SrcElementAddr.getElementType());
3107
3108 // Now that all active lanes have read the element in the
3109 // Reduce list, shuffle over the value from the remote lane.
3110 if (ShuffleInElement) {
3111 shuffleAndStore(CGF, SrcElementAddr, DestElementAddr, Private->getType(),
3112 RemoteLaneOffset, Private->getExprLoc());
3113 } else {
3114 switch (CGF.getEvaluationKind(Private->getType())) {
3115 case TEK_Scalar: {
3116 llvm::Value *Elem =
3117 CGF.EmitLoadOfScalar(SrcElementAddr, /*Volatile=*/false,
3118 Private->getType(), Private->getExprLoc());
3119 // Store the source element value to the dest element address.
3120 CGF.EmitStoreOfScalar(Elem, DestElementAddr, /*Volatile=*/false,
3121 Private->getType());
3122 break;
3123 }
3124 case TEK_Complex: {
3125 CodeGenFunction::ComplexPairTy Elem = CGF.EmitLoadOfComplex(
3126 CGF.MakeAddrLValue(SrcElementAddr, Private->getType()),
3127 Private->getExprLoc());
3128 CGF.EmitStoreOfComplex(
3129 Elem, CGF.MakeAddrLValue(DestElementAddr, Private->getType()),
3130 /*isInit=*/false);
3131 break;
3132 }
3133 case TEK_Aggregate:
3134 CGF.EmitAggregateCopy(
3135 CGF.MakeAddrLValue(DestElementAddr, Private->getType()),
3136 CGF.MakeAddrLValue(SrcElementAddr, Private->getType()),
3137 Private->getType(), AggValueSlot::DoesNotOverlap);
3138 break;
3139 }
3140 }
3141
3142 // Step 3.1: Modify reference in dest Reduce list as needed.
3143 // Modifying the reference in Reduce list to point to the newly
3144 // created element. The element is live in the current function
3145 // scope and that of functions it invokes (i.e., reduce_function).
3146 // RemoteReduceData[i] = (void*)&RemoteElem
3147 if (UpdateDestListPtr) {
3148 CGF.EmitStoreOfScalar(Bld.CreatePointerBitCastOrAddrSpaceCast(
3149 DestElementAddr.getPointer(), CGF.VoidPtrTy),
3150 DestElementPtrAddr, /*Volatile=*/false,
3151 C.VoidPtrTy);
3152 }
3153
3154 // Step 4.1: Increment SrcBase/DestBase so that it points to the starting
3155 // address of the next element in scratchpad memory, unless we're currently
3156 // processing the last one. Memory alignment is also taken care of here.
3157 if ((IncrScratchpadDest || IncrScratchpadSrc) && (Idx + 1 < Size)) {
3158 llvm::Value *ScratchpadBasePtr =
3159 IncrScratchpadDest ? DestBase.getPointer() : SrcBase.getPointer();
3160 llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
3161 ScratchpadBasePtr = Bld.CreateNUWAdd(
3162 ScratchpadBasePtr,
3163 Bld.CreateNUWMul(ScratchpadWidth, ElementSizeInChars));
3164
3165 // Take care of global memory alignment for performance
3166 ScratchpadBasePtr = Bld.CreateNUWSub(
3167 ScratchpadBasePtr, llvm::ConstantInt::get(CGM.SizeTy, 1));
3168 ScratchpadBasePtr = Bld.CreateUDiv(
3169 ScratchpadBasePtr,
3170 llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment));
3171 ScratchpadBasePtr = Bld.CreateNUWAdd(
3172 ScratchpadBasePtr, llvm::ConstantInt::get(CGM.SizeTy, 1));
3173 ScratchpadBasePtr = Bld.CreateNUWMul(
3174 ScratchpadBasePtr,
3175 llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment));
3176
3177 if (IncrScratchpadDest)
3178 DestBase = Address(ScratchpadBasePtr, CGF.getPointerAlign());
3179 else /* IncrScratchpadSrc = true */
3180 SrcBase = Address(ScratchpadBasePtr, CGF.getPointerAlign());
3181 }
3182
3183 ++Idx;
3184 }
3185 }
3186
3187 /// This function emits a helper that gathers Reduce lists from the first
3188 /// lane of every active warp to lanes in the first warp.
3189 ///
3190 /// void inter_warp_copy_func(void* reduce_data, num_warps)
3191 /// shared smem[warp_size];
3192 /// For all data entries D in reduce_data:
3193 /// sync
3194 /// If (I am the first lane in each warp)
3195 /// Copy my local D to smem[warp_id]
3196 /// sync
3197 /// if (I am the first warp)
3198 /// Copy smem[thread_id] to my local D
emitInterWarpCopyFunction(CodeGenModule & CGM,ArrayRef<const Expr * > Privates,QualType ReductionArrayTy,SourceLocation Loc)3199 static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
3200 ArrayRef<const Expr *> Privates,
3201 QualType ReductionArrayTy,
3202 SourceLocation Loc) {
3203 ASTContext &C = CGM.getContext();
3204 llvm::Module &M = CGM.getModule();
3205
3206 // ReduceList: thread local Reduce list.
3207 // At the stage of the computation when this function is called, partially
3208 // aggregated values reside in the first lane of every active warp.
3209 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3210 C.VoidPtrTy, ImplicitParamDecl::Other);
3211 // NumWarps: number of warps active in the parallel region. This could
3212 // be smaller than 32 (max warps in a CTA) for partial block reduction.
3213 ImplicitParamDecl NumWarpsArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3214 C.getIntTypeForBitwidth(32, /* Signed */ true),
3215 ImplicitParamDecl::Other);
3216 FunctionArgList Args;
3217 Args.push_back(&ReduceListArg);
3218 Args.push_back(&NumWarpsArg);
3219
3220 const CGFunctionInfo &CGFI =
3221 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3222 auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI),
3223 llvm::GlobalValue::InternalLinkage,
3224 "_omp_reduction_inter_warp_copy_func", &M);
3225 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3226 Fn->setDoesNotRecurse();
3227 CodeGenFunction CGF(CGM);
3228 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
3229
3230 CGBuilderTy &Bld = CGF.Builder;
3231
3232 // This array is used as a medium to transfer, one reduce element at a time,
3233 // the data from the first lane of every warp to lanes in the first warp
3234 // in order to perform the final step of a reduction in a parallel region
3235 // (reduction across warps). The array is placed in NVPTX __shared__ memory
3236 // for reduced latency, as well as to have a distinct copy for concurrently
3237 // executing target regions. The array is declared with common linkage so
3238 // as to be shared across compilation units.
3239 StringRef TransferMediumName =
3240 "__openmp_nvptx_data_transfer_temporary_storage";
3241 llvm::GlobalVariable *TransferMedium =
3242 M.getGlobalVariable(TransferMediumName);
3243 if (!TransferMedium) {
3244 auto *Ty = llvm::ArrayType::get(CGM.Int32Ty, WarpSize);
3245 unsigned SharedAddressSpace = CGM.getTargetAddressSpace(LangAS::cuda_shared);
3246 TransferMedium = new llvm::GlobalVariable(
3247 M, Ty, /*isConstant=*/false, llvm::GlobalVariable::CommonLinkage,
3248 llvm::Constant::getNullValue(Ty), TransferMediumName,
3249 /*InsertBefore=*/nullptr, llvm::GlobalVariable::NotThreadLocal,
3250 SharedAddressSpace);
3251 CGM.addCompilerUsedGlobal(TransferMedium);
3252 }
3253
3254 // Get the CUDA thread id of the current OpenMP thread on the GPU.
3255 llvm::Value *ThreadID = getNVPTXThreadID(CGF);
3256 // nvptx_lane_id = nvptx_id % warpsize
3257 llvm::Value *LaneID = getNVPTXLaneID(CGF);
3258 // nvptx_warp_id = nvptx_id / warpsize
3259 llvm::Value *WarpID = getNVPTXWarpID(CGF);
3260
3261 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
3262 unsigned DefaultAS = CGF.CGM.getTargetCodeGenInfo().getDefaultAS();
3263 Address LocalReduceList(
3264 Bld.CreatePointerBitCastOrAddrSpaceCast(
3265 CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
3266 C.VoidPtrTy, Loc),
3267 CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(DefaultAS)),
3268 CGF.getPointerAlign());
3269
3270 unsigned Idx = 0;
3271 for (const Expr *Private : Privates) {
3272 //
3273 // Warp master copies reduce element to transfer medium in __shared__
3274 // memory.
3275 //
3276 unsigned RealTySize =
3277 C.getTypeSizeInChars(Private->getType())
3278 .alignTo(C.getTypeAlignInChars(Private->getType()))
3279 .getQuantity();
3280 for (unsigned TySize = 4; TySize > 0 && RealTySize > 0; TySize /=2) {
3281 unsigned NumIters = RealTySize / TySize;
3282 if (NumIters == 0)
3283 continue;
3284 QualType CType = C.getIntTypeForBitwidth(
3285 C.toBits(CharUnits::fromQuantity(TySize)), /*Signed=*/1);
3286 llvm::Type *CopyType = CGF.ConvertTypeForMem(CType);
3287 CharUnits Align = CharUnits::fromQuantity(TySize);
3288 llvm::Value *Cnt = nullptr;
3289 Address CntAddr = Address::invalid();
3290 llvm::BasicBlock *PrecondBB = nullptr;
3291 llvm::BasicBlock *ExitBB = nullptr;
3292 if (NumIters > 1) {
3293 CntAddr = CGF.CreateMemTemp(C.IntTy, ".cnt.addr");
3294 CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.IntTy), CntAddr,
3295 /*Volatile=*/false, C.IntTy);
3296 PrecondBB = CGF.createBasicBlock("precond");
3297 ExitBB = CGF.createBasicBlock("exit");
3298 llvm::BasicBlock *BodyBB = CGF.createBasicBlock("body");
3299 // There is no need to emit line number for unconditional branch.
3300 (void)ApplyDebugLocation::CreateEmpty(CGF);
3301 CGF.EmitBlock(PrecondBB);
3302 Cnt = CGF.EmitLoadOfScalar(CntAddr, /*Volatile=*/false, C.IntTy, Loc);
3303 llvm::Value *Cmp =
3304 Bld.CreateICmpULT(Cnt, llvm::ConstantInt::get(CGM.IntTy, NumIters));
3305 Bld.CreateCondBr(Cmp, BodyBB, ExitBB);
3306 CGF.EmitBlock(BodyBB);
3307 }
3308 // kmpc_barrier.
3309 CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown,
3310 /*EmitChecks=*/false,
3311 /*ForceSimpleCall=*/true);
3312 llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
3313 llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
3314 llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
3315
3316 // if (lane_id == 0)
3317 llvm::Value *IsWarpMaster = Bld.CreateIsNull(LaneID, "warp_master");
3318 Bld.CreateCondBr(IsWarpMaster, ThenBB, ElseBB);
3319 CGF.EmitBlock(ThenBB);
3320
3321 // Reduce element = LocalReduceList[i]
3322 Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
3323 llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
3324 ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
3325 // elemptr = ((CopyType*)(elemptrptr)) + I
3326 Address ElemPtr = Address(ElemPtrPtr, Align);
3327 ElemPtr = Bld.CreateElementBitCast(ElemPtr, CopyType);
3328 if (NumIters > 1) {
3329 ElemPtr = Address(Bld.CreateGEP(ElemPtr.getPointer(), Cnt),
3330 ElemPtr.getAlignment());
3331 }
3332
3333 // Get pointer to location in transfer medium.
3334 // MediumPtr = &medium[warp_id]
3335 llvm::Value *MediumPtrVal = Bld.CreateInBoundsGEP(
3336 TransferMedium, {llvm::Constant::getNullValue(CGM.Int64Ty), WarpID});
3337 Address MediumPtr(MediumPtrVal, Align);
3338 // Casting to actual data type.
3339 // MediumPtr = (CopyType*)MediumPtrAddr;
3340 MediumPtr = Bld.CreateElementBitCast(MediumPtr, CopyType);
3341
3342 // elem = *elemptr
3343 //*MediumPtr = elem
3344 llvm::Value *Elem =
3345 CGF.EmitLoadOfScalar(ElemPtr, /*Volatile=*/false, CType, Loc);
3346 // Store the source element value to the dest element address.
3347 CGF.EmitStoreOfScalar(Elem, MediumPtr, /*Volatile=*/true, CType);
3348
3349 Bld.CreateBr(MergeBB);
3350
3351 CGF.EmitBlock(ElseBB);
3352 Bld.CreateBr(MergeBB);
3353
3354 CGF.EmitBlock(MergeBB);
3355
3356 // kmpc_barrier.
3357 CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown,
3358 /*EmitChecks=*/false,
3359 /*ForceSimpleCall=*/true);
3360
3361 //
3362 // Warp 0 copies reduce element from transfer medium.
3363 //
3364 llvm::BasicBlock *W0ThenBB = CGF.createBasicBlock("then");
3365 llvm::BasicBlock *W0ElseBB = CGF.createBasicBlock("else");
3366 llvm::BasicBlock *W0MergeBB = CGF.createBasicBlock("ifcont");
3367
3368 Address AddrNumWarpsArg = CGF.GetAddrOfLocalVar(&NumWarpsArg);
3369 llvm::Value *NumWarpsVal = CGF.EmitLoadOfScalar(
3370 AddrNumWarpsArg, /*Volatile=*/false, C.IntTy, Loc);
3371
3372 // Up to 32 threads in warp 0 are active.
3373 llvm::Value *IsActiveThread =
3374 Bld.CreateICmpULT(ThreadID, NumWarpsVal, "is_active_thread");
3375 Bld.CreateCondBr(IsActiveThread, W0ThenBB, W0ElseBB);
3376
3377 CGF.EmitBlock(W0ThenBB);
3378
3379 // SrcMediumPtr = &medium[tid]
3380 llvm::Value *SrcMediumPtrVal = Bld.CreateInBoundsGEP(
3381 TransferMedium,
3382 {llvm::Constant::getNullValue(CGM.Int64Ty), ThreadID});
3383 Address SrcMediumPtr(SrcMediumPtrVal, Align);
3384 // SrcMediumVal = *SrcMediumPtr;
3385 SrcMediumPtr = Bld.CreateElementBitCast(SrcMediumPtr, CopyType);
3386
3387 // TargetElemPtr = (CopyType*)(SrcDataAddr[i]) + I
3388 Address TargetElemPtrPtr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
3389 llvm::Value *TargetElemPtrVal = CGF.EmitLoadOfScalar(
3390 TargetElemPtrPtr, /*Volatile=*/false, C.VoidPtrTy, Loc);
3391 Address TargetElemPtr = Address(TargetElemPtrVal, Align);
3392 TargetElemPtr = Bld.CreateElementBitCast(TargetElemPtr, CopyType);
3393 if (NumIters > 1) {
3394 TargetElemPtr = Address(Bld.CreateGEP(TargetElemPtr.getPointer(), Cnt),
3395 TargetElemPtr.getAlignment());
3396 }
3397
3398 // *TargetElemPtr = SrcMediumVal;
3399 llvm::Value *SrcMediumValue =
3400 CGF.EmitLoadOfScalar(SrcMediumPtr, /*Volatile=*/true, CType, Loc);
3401 CGF.EmitStoreOfScalar(SrcMediumValue, TargetElemPtr, /*Volatile=*/false,
3402 CType);
3403 Bld.CreateBr(W0MergeBB);
3404
3405 CGF.EmitBlock(W0ElseBB);
3406 Bld.CreateBr(W0MergeBB);
3407
3408 CGF.EmitBlock(W0MergeBB);
3409
3410 if (NumIters > 1) {
3411 Cnt = Bld.CreateNSWAdd(Cnt, llvm::ConstantInt::get(CGM.IntTy, /*V=*/1));
3412 CGF.EmitStoreOfScalar(Cnt, CntAddr, /*Volatile=*/false, C.IntTy);
3413 CGF.EmitBranch(PrecondBB);
3414 (void)ApplyDebugLocation::CreateEmpty(CGF);
3415 CGF.EmitBlock(ExitBB);
3416 }
3417 RealTySize %= TySize;
3418 }
3419 ++Idx;
3420 }
3421
3422 CGF.FinishFunction();
3423 return Fn;
3424 }
3425
3426 /// Emit a helper that reduces data across two OpenMP threads (lanes)
3427 /// in the same warp. It uses shuffle instructions to copy over data from
3428 /// a remote lane's stack. The reduction algorithm performed is specified
3429 /// by the fourth parameter.
3430 ///
3431 /// Algorithm Versions.
3432 /// Full Warp Reduce (argument value 0):
3433 /// This algorithm assumes that all 32 lanes are active and gathers
3434 /// data from these 32 lanes, producing a single resultant value.
3435 /// Contiguous Partial Warp Reduce (argument value 1):
3436 /// This algorithm assumes that only a *contiguous* subset of lanes
3437 /// are active. This happens for the last warp in a parallel region
3438 /// when the user specified num_threads is not an integer multiple of
3439 /// 32. This contiguous subset always starts with the zeroth lane.
3440 /// Partial Warp Reduce (argument value 2):
3441 /// This algorithm gathers data from any number of lanes at any position.
3442 /// All reduced values are stored in the lowest possible lane. The set
3443 /// of problems every algorithm addresses is a super set of those
3444 /// addressable by algorithms with a lower version number. Overhead
3445 /// increases as algorithm version increases.
3446 ///
3447 /// Terminology
3448 /// Reduce element:
3449 /// Reduce element refers to the individual data field with primitive
3450 /// data types to be combined and reduced across threads.
3451 /// Reduce list:
3452 /// Reduce list refers to a collection of local, thread-private
3453 /// reduce elements.
3454 /// Remote Reduce list:
3455 /// Remote Reduce list refers to a collection of remote (relative to
3456 /// the current thread) reduce elements.
3457 ///
3458 /// We distinguish between three states of threads that are important to
3459 /// the implementation of this function.
3460 /// Alive threads:
3461 /// Threads in a warp executing the SIMT instruction, as distinguished from
3462 /// threads that are inactive due to divergent control flow.
3463 /// Active threads:
3464 /// The minimal set of threads that has to be alive upon entry to this
3465 /// function. The computation is correct iff active threads are alive.
3466 /// Some threads are alive but they are not active because they do not
3467 /// contribute to the computation in any useful manner. Turning them off
3468 /// may introduce control flow overheads without any tangible benefits.
3469 /// Effective threads:
3470 /// In order to comply with the argument requirements of the shuffle
3471 /// function, we must keep all lanes holding data alive. But at most
3472 /// half of them perform value aggregation; we refer to this half of
3473 /// threads as effective. The other half is simply handing off their
3474 /// data.
3475 ///
3476 /// Procedure
3477 /// Value shuffle:
3478 /// In this step active threads transfer data from higher lane positions
3479 /// in the warp to lower lane positions, creating Remote Reduce list.
3480 /// Value aggregation:
3481 /// In this step, effective threads combine their thread local Reduce list
3482 /// with Remote Reduce list and store the result in the thread local
3483 /// Reduce list.
3484 /// Value copy:
3485 /// In this step, we deal with the assumption made by algorithm 2
3486 /// (i.e. contiguity assumption). When we have an odd number of lanes
3487 /// active, say 2k+1, only k threads will be effective and therefore k
3488 /// new values will be produced. However, the Reduce list owned by the
3489 /// (2k+1)th thread is ignored in the value aggregation. Therefore
3490 /// we copy the Reduce list from the (2k+1)th lane to (k+1)th lane so
3491 /// that the contiguity assumption still holds.
emitShuffleAndReduceFunction(CodeGenModule & CGM,ArrayRef<const Expr * > Privates,QualType ReductionArrayTy,llvm::Function * ReduceFn,SourceLocation Loc)3492 static llvm::Function *emitShuffleAndReduceFunction(
3493 CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
3494 QualType ReductionArrayTy, llvm::Function *ReduceFn, SourceLocation Loc) {
3495 ASTContext &C = CGM.getContext();
3496
3497 // Thread local Reduce list used to host the values of data to be reduced.
3498 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3499 C.VoidPtrTy, ImplicitParamDecl::Other);
3500 // Current lane id; could be logical.
3501 ImplicitParamDecl LaneIDArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.ShortTy,
3502 ImplicitParamDecl::Other);
3503 // Offset of the remote source lane relative to the current lane.
3504 ImplicitParamDecl RemoteLaneOffsetArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3505 C.ShortTy, ImplicitParamDecl::Other);
3506 // Algorithm version. This is expected to be known at compile time.
3507 ImplicitParamDecl AlgoVerArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3508 C.ShortTy, ImplicitParamDecl::Other);
3509 FunctionArgList Args;
3510 Args.push_back(&ReduceListArg);
3511 Args.push_back(&LaneIDArg);
3512 Args.push_back(&RemoteLaneOffsetArg);
3513 Args.push_back(&AlgoVerArg);
3514
3515 const CGFunctionInfo &CGFI =
3516 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3517 auto *Fn = llvm::Function::Create(
3518 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
3519 "_omp_reduction_shuffle_and_reduce_func", &CGM.getModule());
3520 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3521 Fn->setDoesNotRecurse();
3522 if (CGM.getLangOpts().Optimize) {
3523 Fn->removeFnAttr(llvm::Attribute::NoInline);
3524 Fn->removeFnAttr(llvm::Attribute::OptimizeNone);
3525 Fn->addFnAttr(llvm::Attribute::AlwaysInline);
3526 }
3527
3528 CodeGenFunction CGF(CGM);
3529 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
3530
3531 CGBuilderTy &Bld = CGF.Builder;
3532
3533 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
3534 unsigned DefaultAS = CGM.getTargetCodeGenInfo().getDefaultAS();
3535 Address LocalReduceList(
3536 Bld.CreatePointerBitCastOrAddrSpaceCast(
3537 CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
3538 C.VoidPtrTy, SourceLocation()),
3539 CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(DefaultAS)),
3540 CGF.getPointerAlign());
3541
3542 Address AddrLaneIDArg = CGF.GetAddrOfLocalVar(&LaneIDArg);
3543 llvm::Value *LaneIDArgVal = CGF.EmitLoadOfScalar(
3544 AddrLaneIDArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
3545
3546 Address AddrRemoteLaneOffsetArg = CGF.GetAddrOfLocalVar(&RemoteLaneOffsetArg);
3547 llvm::Value *RemoteLaneOffsetArgVal = CGF.EmitLoadOfScalar(
3548 AddrRemoteLaneOffsetArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
3549
3550 Address AddrAlgoVerArg = CGF.GetAddrOfLocalVar(&AlgoVerArg);
3551 llvm::Value *AlgoVerArgVal = CGF.EmitLoadOfScalar(
3552 AddrAlgoVerArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
3553
3554 // Create a local thread-private variable to host the Reduce list
3555 // from a remote lane.
3556 Address RemoteReduceList =
3557 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.remote_reduce_list");
3558
3559 // This loop iterates through the list of reduce elements and copies,
3560 // element by element, from a remote lane in the warp to RemoteReduceList,
3561 // hosted on the thread's stack.
3562 emitReductionListCopy(RemoteLaneToThread, CGF, ReductionArrayTy, Privates,
3563 LocalReduceList, RemoteReduceList,
3564 {/*RemoteLaneOffset=*/RemoteLaneOffsetArgVal,
3565 /*ScratchpadIndex=*/nullptr,
3566 /*ScratchpadWidth=*/nullptr});
3567
3568 // The actions to be performed on the Remote Reduce list is dependent
3569 // on the algorithm version.
3570 //
3571 // if (AlgoVer==0) || (AlgoVer==1 && (LaneId < Offset)) || (AlgoVer==2 &&
3572 // LaneId % 2 == 0 && Offset > 0):
3573 // do the reduction value aggregation
3574 //
3575 // The thread local variable Reduce list is mutated in place to host the
3576 // reduced data, which is the aggregated value produced from local and
3577 // remote lanes.
3578 //
3579 // Note that AlgoVer is expected to be a constant integer known at compile
3580 // time.
3581 // When AlgoVer==0, the first conjunction evaluates to true, making
3582 // the entire predicate true during compile time.
3583 // When AlgoVer==1, the second conjunction has only the second part to be
3584 // evaluated during runtime. Other conjunctions evaluates to false
3585 // during compile time.
3586 // When AlgoVer==2, the third conjunction has only the second part to be
3587 // evaluated during runtime. Other conjunctions evaluates to false
3588 // during compile time.
3589 llvm::Value *CondAlgo0 = Bld.CreateIsNull(AlgoVerArgVal);
3590
3591 llvm::Value *Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1));
3592 llvm::Value *CondAlgo1 = Bld.CreateAnd(
3593 Algo1, Bld.CreateICmpULT(LaneIDArgVal, RemoteLaneOffsetArgVal));
3594
3595 llvm::Value *Algo2 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(2));
3596 llvm::Value *CondAlgo2 = Bld.CreateAnd(
3597 Algo2, Bld.CreateIsNull(Bld.CreateAnd(LaneIDArgVal, Bld.getInt16(1))));
3598 CondAlgo2 = Bld.CreateAnd(
3599 CondAlgo2, Bld.CreateICmpSGT(RemoteLaneOffsetArgVal, Bld.getInt16(0)));
3600
3601 llvm::Value *CondReduce = Bld.CreateOr(CondAlgo0, CondAlgo1);
3602 CondReduce = Bld.CreateOr(CondReduce, CondAlgo2);
3603
3604 llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
3605 llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
3606 llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
3607 Bld.CreateCondBr(CondReduce, ThenBB, ElseBB);
3608
3609 CGF.EmitBlock(ThenBB);
3610 // reduce_function(LocalReduceList, RemoteReduceList)
3611 llvm::Value *LocalReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3612 LocalReduceList.getPointer(), CGF.VoidPtrTy);
3613 llvm::Value *RemoteReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3614 RemoteReduceList.getPointer(), CGF.VoidPtrTy);
3615 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
3616 CGF, Loc, ReduceFn, {LocalReduceListPtr, RemoteReduceListPtr});
3617 Bld.CreateBr(MergeBB);
3618
3619 CGF.EmitBlock(ElseBB);
3620 Bld.CreateBr(MergeBB);
3621
3622 CGF.EmitBlock(MergeBB);
3623
3624 // if (AlgoVer==1 && (LaneId >= Offset)) copy Remote Reduce list to local
3625 // Reduce list.
3626 Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1));
3627 llvm::Value *CondCopy = Bld.CreateAnd(
3628 Algo1, Bld.CreateICmpUGE(LaneIDArgVal, RemoteLaneOffsetArgVal));
3629
3630 llvm::BasicBlock *CpyThenBB = CGF.createBasicBlock("then");
3631 llvm::BasicBlock *CpyElseBB = CGF.createBasicBlock("else");
3632 llvm::BasicBlock *CpyMergeBB = CGF.createBasicBlock("ifcont");
3633 Bld.CreateCondBr(CondCopy, CpyThenBB, CpyElseBB);
3634
3635 CGF.EmitBlock(CpyThenBB);
3636 emitReductionListCopy(ThreadCopy, CGF, ReductionArrayTy, Privates,
3637 RemoteReduceList, LocalReduceList);
3638 Bld.CreateBr(CpyMergeBB);
3639
3640 CGF.EmitBlock(CpyElseBB);
3641 Bld.CreateBr(CpyMergeBB);
3642
3643 CGF.EmitBlock(CpyMergeBB);
3644
3645 CGF.FinishFunction();
3646 return Fn;
3647 }
3648
3649 /// This function emits a helper that copies all the reduction variables from
3650 /// the team into the provided global buffer for the reduction variables.
3651 ///
3652 /// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data)
3653 /// For all data entries D in reduce_data:
3654 /// Copy local D to buffer.D[Idx]
emitListToGlobalCopyFunction(CodeGenModule & CGM,ArrayRef<const Expr * > Privates,QualType ReductionArrayTy,SourceLocation Loc,const RecordDecl * TeamReductionRec,const llvm::SmallDenseMap<const ValueDecl *,const FieldDecl * > & VarFieldMap)3655 static llvm::Value *emitListToGlobalCopyFunction(
3656 CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
3657 QualType ReductionArrayTy, SourceLocation Loc,
3658 const RecordDecl *TeamReductionRec,
3659 const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
3660 &VarFieldMap) {
3661 ASTContext &C = CGM.getContext();
3662
3663 // Buffer: global reduction buffer.
3664 ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3665 C.VoidPtrTy, ImplicitParamDecl::Other);
3666 // Idx: index of the buffer.
3667 ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
3668 ImplicitParamDecl::Other);
3669 // ReduceList: thread local Reduce list.
3670 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3671 C.VoidPtrTy, ImplicitParamDecl::Other);
3672 FunctionArgList Args;
3673 Args.push_back(&BufferArg);
3674 Args.push_back(&IdxArg);
3675 Args.push_back(&ReduceListArg);
3676
3677 const CGFunctionInfo &CGFI =
3678 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3679 auto *Fn = llvm::Function::Create(
3680 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
3681 "_omp_reduction_list_to_global_copy_func", &CGM.getModule());
3682 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3683 Fn->setDoesNotRecurse();
3684 CodeGenFunction CGF(CGM);
3685 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
3686
3687 CGBuilderTy &Bld = CGF.Builder;
3688
3689 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
3690 Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
3691 Address LocalReduceList(
3692 Bld.CreatePointerBitCastOrAddrSpaceCast(
3693 CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
3694 C.VoidPtrTy, Loc),
3695 CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
3696 CGF.getPointerAlign());
3697 QualType StaticTy = C.getRecordType(TeamReductionRec);
3698 llvm::Type *LLVMReductionsBufferTy =
3699 CGM.getTypes().ConvertTypeForMem(StaticTy);
3700 llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3701 CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
3702 LLVMReductionsBufferTy->getPointerTo());
3703 llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
3704 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
3705 /*Volatile=*/false, C.IntTy,
3706 Loc)};
3707 unsigned Idx = 0;
3708 for (const Expr *Private : Privates) {
3709 // Reduce element = LocalReduceList[i]
3710 Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
3711 llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
3712 ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
3713 // elemptr = ((CopyType*)(elemptrptr)) + I
3714 ElemPtrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3715 ElemPtrPtr, CGF.ConvertTypeForMem(Private->getType())->getPointerTo());
3716 Address ElemPtr =
3717 Address(ElemPtrPtr, C.getTypeAlignInChars(Private->getType()));
3718 const ValueDecl *VD = cast<DeclRefExpr>(Private)->getDecl();
3719 // Global = Buffer.VD[Idx];
3720 const FieldDecl *FD = VarFieldMap.lookup(VD);
3721 LValue GlobLVal = CGF.EmitLValueForField(
3722 CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
3723 llvm::Value *BufferPtr =
3724 Bld.CreateInBoundsGEP(GlobLVal.getPointer(CGF), Idxs);
3725 GlobLVal.setAddress(Address(BufferPtr, GlobLVal.getAlignment()));
3726 switch (CGF.getEvaluationKind(Private->getType())) {
3727 case TEK_Scalar: {
3728 llvm::Value *V = CGF.EmitLoadOfScalar(ElemPtr, /*Volatile=*/false,
3729 Private->getType(), Loc);
3730 CGF.EmitStoreOfScalar(V, GlobLVal);
3731 break;
3732 }
3733 case TEK_Complex: {
3734 CodeGenFunction::ComplexPairTy V = CGF.EmitLoadOfComplex(
3735 CGF.MakeAddrLValue(ElemPtr, Private->getType()), Loc);
3736 CGF.EmitStoreOfComplex(V, GlobLVal, /*isInit=*/false);
3737 break;
3738 }
3739 case TEK_Aggregate:
3740 CGF.EmitAggregateCopy(GlobLVal,
3741 CGF.MakeAddrLValue(ElemPtr, Private->getType()),
3742 Private->getType(), AggValueSlot::DoesNotOverlap);
3743 break;
3744 }
3745 ++Idx;
3746 }
3747
3748 CGF.FinishFunction();
3749 return Fn;
3750 }
3751
3752 /// This function emits a helper that reduces all the reduction variables from
3753 /// the team into the provided global buffer for the reduction variables.
3754 ///
3755 /// void list_to_global_reduce_func(void *buffer, int Idx, void *reduce_data)
3756 /// void *GlobPtrs[];
3757 /// GlobPtrs[0] = (void*)&buffer.D0[Idx];
3758 /// ...
3759 /// GlobPtrs[N] = (void*)&buffer.DN[Idx];
3760 /// reduce_function(GlobPtrs, reduce_data);
emitListToGlobalReduceFunction(CodeGenModule & CGM,ArrayRef<const Expr * > Privates,QualType ReductionArrayTy,SourceLocation Loc,const RecordDecl * TeamReductionRec,const llvm::SmallDenseMap<const ValueDecl *,const FieldDecl * > & VarFieldMap,llvm::Function * ReduceFn)3761 static llvm::Value *emitListToGlobalReduceFunction(
3762 CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
3763 QualType ReductionArrayTy, SourceLocation Loc,
3764 const RecordDecl *TeamReductionRec,
3765 const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
3766 &VarFieldMap,
3767 llvm::Function *ReduceFn) {
3768 ASTContext &C = CGM.getContext();
3769
3770 // Buffer: global reduction buffer.
3771 ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3772 C.VoidPtrTy, ImplicitParamDecl::Other);
3773 // Idx: index of the buffer.
3774 ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
3775 ImplicitParamDecl::Other);
3776 // ReduceList: thread local Reduce list.
3777 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3778 C.VoidPtrTy, ImplicitParamDecl::Other);
3779 FunctionArgList Args;
3780 Args.push_back(&BufferArg);
3781 Args.push_back(&IdxArg);
3782 Args.push_back(&ReduceListArg);
3783
3784 const CGFunctionInfo &CGFI =
3785 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3786 auto *Fn = llvm::Function::Create(
3787 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
3788 "_omp_reduction_list_to_global_reduce_func", &CGM.getModule());
3789 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3790 Fn->setDoesNotRecurse();
3791 CodeGenFunction CGF(CGM);
3792 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
3793
3794 CGBuilderTy &Bld = CGF.Builder;
3795
3796 Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
3797 QualType StaticTy = C.getRecordType(TeamReductionRec);
3798 llvm::Type *LLVMReductionsBufferTy =
3799 CGM.getTypes().ConvertTypeForMem(StaticTy);
3800 llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3801 CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
3802 LLVMReductionsBufferTy->getPointerTo());
3803
3804 // 1. Build a list of reduction variables.
3805 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
3806 Address ReductionList =
3807 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
3808 auto IPriv = Privates.begin();
3809 llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
3810 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
3811 /*Volatile=*/false, C.IntTy,
3812 Loc)};
3813 unsigned Idx = 0;
3814 for (unsigned I = 0, E = Privates.size(); I < E; ++I, ++IPriv, ++Idx) {
3815 Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
3816 // Global = Buffer.VD[Idx];
3817 const ValueDecl *VD = cast<DeclRefExpr>(*IPriv)->getDecl();
3818 const FieldDecl *FD = VarFieldMap.lookup(VD);
3819 LValue GlobLVal = CGF.EmitLValueForField(
3820 CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
3821 llvm::Value *BufferPtr =
3822 Bld.CreateInBoundsGEP(GlobLVal.getPointer(CGF), Idxs);
3823 llvm::Value *Ptr = CGF.EmitCastToVoidPtr(BufferPtr);
3824 CGF.EmitStoreOfScalar(Ptr, Elem, /*Volatile=*/false, C.VoidPtrTy);
3825 if ((*IPriv)->getType()->isVariablyModifiedType()) {
3826 // Store array size.
3827 ++Idx;
3828 Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
3829 llvm::Value *Size = CGF.Builder.CreateIntCast(
3830 CGF.getVLASize(
3831 CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
3832 .NumElts,
3833 CGF.SizeTy, /*isSigned=*/false);
3834 CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
3835 Elem);
3836 }
3837 }
3838
3839 // Call reduce_function(GlobalReduceList, ReduceList)
3840 llvm::Value *GlobalReduceList =
3841 CGF.EmitCastToVoidPtr(ReductionList.getPointer());
3842 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
3843 llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar(
3844 AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc);
3845 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
3846 CGF, Loc, ReduceFn, {GlobalReduceList, ReducedPtr});
3847 CGF.FinishFunction();
3848 return Fn;
3849 }
3850
3851 /// This function emits a helper that copies all the reduction variables from
3852 /// the team into the provided global buffer for the reduction variables.
3853 ///
3854 /// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data)
3855 /// For all data entries D in reduce_data:
3856 /// Copy buffer.D[Idx] to local D;
emitGlobalToListCopyFunction(CodeGenModule & CGM,ArrayRef<const Expr * > Privates,QualType ReductionArrayTy,SourceLocation Loc,const RecordDecl * TeamReductionRec,const llvm::SmallDenseMap<const ValueDecl *,const FieldDecl * > & VarFieldMap)3857 static llvm::Value *emitGlobalToListCopyFunction(
3858 CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
3859 QualType ReductionArrayTy, SourceLocation Loc,
3860 const RecordDecl *TeamReductionRec,
3861 const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
3862 &VarFieldMap) {
3863 ASTContext &C = CGM.getContext();
3864
3865 // Buffer: global reduction buffer.
3866 ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3867 C.VoidPtrTy, ImplicitParamDecl::Other);
3868 // Idx: index of the buffer.
3869 ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
3870 ImplicitParamDecl::Other);
3871 // ReduceList: thread local Reduce list.
3872 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3873 C.VoidPtrTy, ImplicitParamDecl::Other);
3874 FunctionArgList Args;
3875 Args.push_back(&BufferArg);
3876 Args.push_back(&IdxArg);
3877 Args.push_back(&ReduceListArg);
3878
3879 const CGFunctionInfo &CGFI =
3880 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3881 auto *Fn = llvm::Function::Create(
3882 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
3883 "_omp_reduction_global_to_list_copy_func", &CGM.getModule());
3884 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3885 Fn->setDoesNotRecurse();
3886 CodeGenFunction CGF(CGM);
3887 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
3888
3889 CGBuilderTy &Bld = CGF.Builder;
3890
3891 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
3892 Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
3893 Address LocalReduceList(
3894 Bld.CreatePointerBitCastOrAddrSpaceCast(
3895 CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
3896 C.VoidPtrTy, Loc),
3897 CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
3898 CGF.getPointerAlign());
3899 QualType StaticTy = C.getRecordType(TeamReductionRec);
3900 llvm::Type *LLVMReductionsBufferTy =
3901 CGM.getTypes().ConvertTypeForMem(StaticTy);
3902 llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3903 CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
3904 LLVMReductionsBufferTy->getPointerTo());
3905
3906 llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
3907 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
3908 /*Volatile=*/false, C.IntTy,
3909 Loc)};
3910 unsigned Idx = 0;
3911 for (const Expr *Private : Privates) {
3912 // Reduce element = LocalReduceList[i]
3913 Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
3914 llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
3915 ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
3916 // elemptr = ((CopyType*)(elemptrptr)) + I
3917 ElemPtrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3918 ElemPtrPtr, CGF.ConvertTypeForMem(Private->getType())->getPointerTo());
3919 Address ElemPtr =
3920 Address(ElemPtrPtr, C.getTypeAlignInChars(Private->getType()));
3921 const ValueDecl *VD = cast<DeclRefExpr>(Private)->getDecl();
3922 // Global = Buffer.VD[Idx];
3923 const FieldDecl *FD = VarFieldMap.lookup(VD);
3924 LValue GlobLVal = CGF.EmitLValueForField(
3925 CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
3926 llvm::Value *BufferPtr =
3927 Bld.CreateInBoundsGEP(GlobLVal.getPointer(CGF), Idxs);
3928 GlobLVal.setAddress(Address(BufferPtr, GlobLVal.getAlignment()));
3929 switch (CGF.getEvaluationKind(Private->getType())) {
3930 case TEK_Scalar: {
3931 llvm::Value *V = CGF.EmitLoadOfScalar(GlobLVal, Loc);
3932 CGF.EmitStoreOfScalar(V, ElemPtr, /*Volatile=*/false, Private->getType());
3933 break;
3934 }
3935 case TEK_Complex: {
3936 CodeGenFunction::ComplexPairTy V = CGF.EmitLoadOfComplex(GlobLVal, Loc);
3937 CGF.EmitStoreOfComplex(V, CGF.MakeAddrLValue(ElemPtr, Private->getType()),
3938 /*isInit=*/false);
3939 break;
3940 }
3941 case TEK_Aggregate:
3942 CGF.EmitAggregateCopy(CGF.MakeAddrLValue(ElemPtr, Private->getType()),
3943 GlobLVal, Private->getType(),
3944 AggValueSlot::DoesNotOverlap);
3945 break;
3946 }
3947 ++Idx;
3948 }
3949
3950 CGF.FinishFunction();
3951 return Fn;
3952 }
3953
3954 /// This function emits a helper that reduces all the reduction variables from
3955 /// the team into the provided global buffer for the reduction variables.
3956 ///
3957 /// void global_to_list_reduce_func(void *buffer, int Idx, void *reduce_data)
3958 /// void *GlobPtrs[];
3959 /// GlobPtrs[0] = (void*)&buffer.D0[Idx];
3960 /// ...
3961 /// GlobPtrs[N] = (void*)&buffer.DN[Idx];
3962 /// reduce_function(reduce_data, GlobPtrs);
emitGlobalToListReduceFunction(CodeGenModule & CGM,ArrayRef<const Expr * > Privates,QualType ReductionArrayTy,SourceLocation Loc,const RecordDecl * TeamReductionRec,const llvm::SmallDenseMap<const ValueDecl *,const FieldDecl * > & VarFieldMap,llvm::Function * ReduceFn)3963 static llvm::Value *emitGlobalToListReduceFunction(
3964 CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
3965 QualType ReductionArrayTy, SourceLocation Loc,
3966 const RecordDecl *TeamReductionRec,
3967 const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
3968 &VarFieldMap,
3969 llvm::Function *ReduceFn) {
3970 ASTContext &C = CGM.getContext();
3971
3972 // Buffer: global reduction buffer.
3973 ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3974 C.VoidPtrTy, ImplicitParamDecl::Other);
3975 // Idx: index of the buffer.
3976 ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
3977 ImplicitParamDecl::Other);
3978 // ReduceList: thread local Reduce list.
3979 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3980 C.VoidPtrTy, ImplicitParamDecl::Other);
3981 FunctionArgList Args;
3982 Args.push_back(&BufferArg);
3983 Args.push_back(&IdxArg);
3984 Args.push_back(&ReduceListArg);
3985
3986 const CGFunctionInfo &CGFI =
3987 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3988 auto *Fn = llvm::Function::Create(
3989 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
3990 "_omp_reduction_global_to_list_reduce_func", &CGM.getModule());
3991 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3992 Fn->setDoesNotRecurse();
3993 CodeGenFunction CGF(CGM);
3994 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
3995
3996 CGBuilderTy &Bld = CGF.Builder;
3997
3998 Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
3999 QualType StaticTy = C.getRecordType(TeamReductionRec);
4000 llvm::Type *LLVMReductionsBufferTy =
4001 CGM.getTypes().ConvertTypeForMem(StaticTy);
4002 llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
4003 CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
4004 LLVMReductionsBufferTy->getPointerTo());
4005
4006 // 1. Build a list of reduction variables.
4007 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
4008 Address ReductionList =
4009 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
4010 auto IPriv = Privates.begin();
4011 llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
4012 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
4013 /*Volatile=*/false, C.IntTy,
4014 Loc)};
4015 unsigned Idx = 0;
4016 for (unsigned I = 0, E = Privates.size(); I < E; ++I, ++IPriv, ++Idx) {
4017 Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
4018 // Global = Buffer.VD[Idx];
4019 const ValueDecl *VD = cast<DeclRefExpr>(*IPriv)->getDecl();
4020 const FieldDecl *FD = VarFieldMap.lookup(VD);
4021 LValue GlobLVal = CGF.EmitLValueForField(
4022 CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
4023 llvm::Value *BufferPtr =
4024 Bld.CreateInBoundsGEP(GlobLVal.getPointer(CGF), Idxs);
4025 llvm::Value *Ptr = CGF.EmitCastToVoidPtr(BufferPtr);
4026 CGF.EmitStoreOfScalar(Ptr, Elem, /*Volatile=*/false, C.VoidPtrTy);
4027 if ((*IPriv)->getType()->isVariablyModifiedType()) {
4028 // Store array size.
4029 ++Idx;
4030 Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
4031 llvm::Value *Size = CGF.Builder.CreateIntCast(
4032 CGF.getVLASize(
4033 CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
4034 .NumElts,
4035 CGF.SizeTy, /*isSigned=*/false);
4036 CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
4037 Elem);
4038 }
4039 }
4040
4041 // Call reduce_function(ReduceList, GlobalReduceList)
4042 llvm::Value *GlobalReduceList =
4043 CGF.EmitCastToVoidPtr(ReductionList.getPointer());
4044 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
4045 llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar(
4046 AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc);
4047 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
4048 CGF, Loc, ReduceFn, {ReducedPtr, GlobalReduceList});
4049 CGF.FinishFunction();
4050 return Fn;
4051 }
4052
4053 ///
4054 /// Design of OpenMP reductions on the GPU
4055 ///
4056 /// Consider a typical OpenMP program with one or more reduction
4057 /// clauses:
4058 ///
4059 /// float foo;
4060 /// double bar;
4061 /// #pragma omp target teams distribute parallel for \
4062 /// reduction(+:foo) reduction(*:bar)
4063 /// for (int i = 0; i < N; i++) {
4064 /// foo += A[i]; bar *= B[i];
4065 /// }
4066 ///
4067 /// where 'foo' and 'bar' are reduced across all OpenMP threads in
4068 /// all teams. In our OpenMP implementation on the NVPTX device an
4069 /// OpenMP team is mapped to a CUDA threadblock and OpenMP threads
4070 /// within a team are mapped to CUDA threads within a threadblock.
4071 /// Our goal is to efficiently aggregate values across all OpenMP
4072 /// threads such that:
4073 ///
4074 /// - the compiler and runtime are logically concise, and
4075 /// - the reduction is performed efficiently in a hierarchical
4076 /// manner as follows: within OpenMP threads in the same warp,
4077 /// across warps in a threadblock, and finally across teams on
4078 /// the NVPTX device.
4079 ///
4080 /// Introduction to Decoupling
4081 ///
4082 /// We would like to decouple the compiler and the runtime so that the
4083 /// latter is ignorant of the reduction variables (number, data types)
4084 /// and the reduction operators. This allows a simpler interface
4085 /// and implementation while still attaining good performance.
4086 ///
4087 /// Pseudocode for the aforementioned OpenMP program generated by the
4088 /// compiler is as follows:
4089 ///
4090 /// 1. Create private copies of reduction variables on each OpenMP
4091 /// thread: 'foo_private', 'bar_private'
4092 /// 2. Each OpenMP thread reduces the chunk of 'A' and 'B' assigned
4093 /// to it and writes the result in 'foo_private' and 'bar_private'
4094 /// respectively.
4095 /// 3. Call the OpenMP runtime on the GPU to reduce within a team
4096 /// and store the result on the team master:
4097 ///
4098 /// __kmpc_nvptx_parallel_reduce_nowait_v2(...,
4099 /// reduceData, shuffleReduceFn, interWarpCpyFn)
4100 ///
4101 /// where:
4102 /// struct ReduceData {
4103 /// double *foo;
4104 /// double *bar;
4105 /// } reduceData
4106 /// reduceData.foo = &foo_private
4107 /// reduceData.bar = &bar_private
4108 ///
4109 /// 'shuffleReduceFn' and 'interWarpCpyFn' are pointers to two
4110 /// auxiliary functions generated by the compiler that operate on
4111 /// variables of type 'ReduceData'. They aid the runtime perform
4112 /// algorithmic steps in a data agnostic manner.
4113 ///
4114 /// 'shuffleReduceFn' is a pointer to a function that reduces data
4115 /// of type 'ReduceData' across two OpenMP threads (lanes) in the
4116 /// same warp. It takes the following arguments as input:
4117 ///
4118 /// a. variable of type 'ReduceData' on the calling lane,
4119 /// b. its lane_id,
4120 /// c. an offset relative to the current lane_id to generate a
4121 /// remote_lane_id. The remote lane contains the second
4122 /// variable of type 'ReduceData' that is to be reduced.
4123 /// d. an algorithm version parameter determining which reduction
4124 /// algorithm to use.
4125 ///
4126 /// 'shuffleReduceFn' retrieves data from the remote lane using
4127 /// efficient GPU shuffle intrinsics and reduces, using the
4128 /// algorithm specified by the 4th parameter, the two operands
4129 /// element-wise. The result is written to the first operand.
4130 ///
4131 /// Different reduction algorithms are implemented in different
4132 /// runtime functions, all calling 'shuffleReduceFn' to perform
4133 /// the essential reduction step. Therefore, based on the 4th
4134 /// parameter, this function behaves slightly differently to
4135 /// cooperate with the runtime to ensure correctness under
4136 /// different circumstances.
4137 ///
4138 /// 'InterWarpCpyFn' is a pointer to a function that transfers
4139 /// reduced variables across warps. It tunnels, through CUDA
4140 /// shared memory, the thread-private data of type 'ReduceData'
4141 /// from lane 0 of each warp to a lane in the first warp.
4142 /// 4. Call the OpenMP runtime on the GPU to reduce across teams.
4143 /// The last team writes the global reduced value to memory.
4144 ///
4145 /// ret = __kmpc_nvptx_teams_reduce_nowait(...,
4146 /// reduceData, shuffleReduceFn, interWarpCpyFn,
4147 /// scratchpadCopyFn, loadAndReduceFn)
4148 ///
4149 /// 'scratchpadCopyFn' is a helper that stores reduced
4150 /// data from the team master to a scratchpad array in
4151 /// global memory.
4152 ///
4153 /// 'loadAndReduceFn' is a helper that loads data from
4154 /// the scratchpad array and reduces it with the input
4155 /// operand.
4156 ///
4157 /// These compiler generated functions hide address
4158 /// calculation and alignment information from the runtime.
4159 /// 5. if ret == 1:
4160 /// The team master of the last team stores the reduced
4161 /// result to the globals in memory.
4162 /// foo += reduceData.foo; bar *= reduceData.bar
4163 ///
4164 ///
4165 /// Warp Reduction Algorithms
4166 ///
4167 /// On the warp level, we have three algorithms implemented in the
4168 /// OpenMP runtime depending on the number of active lanes:
4169 ///
4170 /// Full Warp Reduction
4171 ///
4172 /// The reduce algorithm within a warp where all lanes are active
4173 /// is implemented in the runtime as follows:
4174 ///
4175 /// full_warp_reduce(void *reduce_data,
4176 /// kmp_ShuffleReductFctPtr ShuffleReduceFn) {
4177 /// for (int offset = WARPSIZE/2; offset > 0; offset /= 2)
4178 /// ShuffleReduceFn(reduce_data, 0, offset, 0);
4179 /// }
4180 ///
4181 /// The algorithm completes in log(2, WARPSIZE) steps.
4182 ///
4183 /// 'ShuffleReduceFn' is used here with lane_id set to 0 because it is
4184 /// not used therefore we save instructions by not retrieving lane_id
4185 /// from the corresponding special registers. The 4th parameter, which
4186 /// represents the version of the algorithm being used, is set to 0 to
4187 /// signify full warp reduction.
4188 ///
4189 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
4190 ///
4191 /// #reduce_elem refers to an element in the local lane's data structure
4192 /// #remote_elem is retrieved from a remote lane
4193 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
4194 /// reduce_elem = reduce_elem REDUCE_OP remote_elem;
4195 ///
4196 /// Contiguous Partial Warp Reduction
4197 ///
4198 /// This reduce algorithm is used within a warp where only the first
4199 /// 'n' (n <= WARPSIZE) lanes are active. It is typically used when the
4200 /// number of OpenMP threads in a parallel region is not a multiple of
4201 /// WARPSIZE. The algorithm is implemented in the runtime as follows:
4202 ///
4203 /// void
4204 /// contiguous_partial_reduce(void *reduce_data,
4205 /// kmp_ShuffleReductFctPtr ShuffleReduceFn,
4206 /// int size, int lane_id) {
4207 /// int curr_size;
4208 /// int offset;
4209 /// curr_size = size;
4210 /// mask = curr_size/2;
4211 /// while (offset>0) {
4212 /// ShuffleReduceFn(reduce_data, lane_id, offset, 1);
4213 /// curr_size = (curr_size+1)/2;
4214 /// offset = curr_size/2;
4215 /// }
4216 /// }
4217 ///
4218 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
4219 ///
4220 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
4221 /// if (lane_id < offset)
4222 /// reduce_elem = reduce_elem REDUCE_OP remote_elem
4223 /// else
4224 /// reduce_elem = remote_elem
4225 ///
4226 /// This algorithm assumes that the data to be reduced are located in a
4227 /// contiguous subset of lanes starting from the first. When there is
4228 /// an odd number of active lanes, the data in the last lane is not
4229 /// aggregated with any other lane's dat but is instead copied over.
4230 ///
4231 /// Dispersed Partial Warp Reduction
4232 ///
4233 /// This algorithm is used within a warp when any discontiguous subset of
4234 /// lanes are active. It is used to implement the reduction operation
4235 /// across lanes in an OpenMP simd region or in a nested parallel region.
4236 ///
4237 /// void
4238 /// dispersed_partial_reduce(void *reduce_data,
4239 /// kmp_ShuffleReductFctPtr ShuffleReduceFn) {
4240 /// int size, remote_id;
4241 /// int logical_lane_id = number_of_active_lanes_before_me() * 2;
4242 /// do {
4243 /// remote_id = next_active_lane_id_right_after_me();
4244 /// # the above function returns 0 of no active lane
4245 /// # is present right after the current lane.
4246 /// size = number_of_active_lanes_in_this_warp();
4247 /// logical_lane_id /= 2;
4248 /// ShuffleReduceFn(reduce_data, logical_lane_id,
4249 /// remote_id-1-threadIdx.x, 2);
4250 /// } while (logical_lane_id % 2 == 0 && size > 1);
4251 /// }
4252 ///
4253 /// There is no assumption made about the initial state of the reduction.
4254 /// Any number of lanes (>=1) could be active at any position. The reduction
4255 /// result is returned in the first active lane.
4256 ///
4257 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
4258 ///
4259 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
4260 /// if (lane_id % 2 == 0 && offset > 0)
4261 /// reduce_elem = reduce_elem REDUCE_OP remote_elem
4262 /// else
4263 /// reduce_elem = remote_elem
4264 ///
4265 ///
4266 /// Intra-Team Reduction
4267 ///
4268 /// This function, as implemented in the runtime call
4269 /// '__kmpc_nvptx_parallel_reduce_nowait_v2', aggregates data across OpenMP
4270 /// threads in a team. It first reduces within a warp using the
4271 /// aforementioned algorithms. We then proceed to gather all such
4272 /// reduced values at the first warp.
4273 ///
4274 /// The runtime makes use of the function 'InterWarpCpyFn', which copies
4275 /// data from each of the "warp master" (zeroth lane of each warp, where
4276 /// warp-reduced data is held) to the zeroth warp. This step reduces (in
4277 /// a mathematical sense) the problem of reduction across warp masters in
4278 /// a block to the problem of warp reduction.
4279 ///
4280 ///
4281 /// Inter-Team Reduction
4282 ///
4283 /// Once a team has reduced its data to a single value, it is stored in
4284 /// a global scratchpad array. Since each team has a distinct slot, this
4285 /// can be done without locking.
4286 ///
4287 /// The last team to write to the scratchpad array proceeds to reduce the
4288 /// scratchpad array. One or more workers in the last team use the helper
4289 /// 'loadAndReduceDataFn' to load and reduce values from the array, i.e.,
4290 /// the k'th worker reduces every k'th element.
4291 ///
4292 /// Finally, a call is made to '__kmpc_nvptx_parallel_reduce_nowait_v2' to
4293 /// reduce across workers and compute a globally reduced value.
4294 ///
emitReduction(CodeGenFunction & CGF,SourceLocation Loc,ArrayRef<const Expr * > Privates,ArrayRef<const Expr * > LHSExprs,ArrayRef<const Expr * > RHSExprs,ArrayRef<const Expr * > ReductionOps,ReductionOptionsTy Options)4295 void CGOpenMPRuntimeNVPTX::emitReduction(
4296 CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates,
4297 ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs,
4298 ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options) {
4299 if (!CGF.HaveInsertPoint())
4300 return;
4301
4302 bool ParallelReduction = isOpenMPParallelDirective(Options.ReductionKind);
4303 #ifndef NDEBUG
4304 bool TeamsReduction = isOpenMPTeamsDirective(Options.ReductionKind);
4305 #endif
4306
4307 if (Options.SimpleReduction) {
4308 assert(!TeamsReduction && !ParallelReduction &&
4309 "Invalid reduction selection in emitReduction.");
4310 CGOpenMPRuntime::emitReduction(CGF, Loc, Privates, LHSExprs, RHSExprs,
4311 ReductionOps, Options);
4312 return;
4313 }
4314
4315 assert((TeamsReduction || ParallelReduction) &&
4316 "Invalid reduction selection in emitReduction.");
4317
4318 // Build res = __kmpc_reduce{_nowait}(<gtid>, <n>, sizeof(RedList),
4319 // RedList, shuffle_reduce_func, interwarp_copy_func);
4320 // or
4321 // Build res = __kmpc_reduce_teams_nowait_simple(<loc>, <gtid>, <lck>);
4322 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
4323 llvm::Value *ThreadId = getThreadID(CGF, Loc);
4324
4325 llvm::Value *Res;
4326 ASTContext &C = CGM.getContext();
4327 // 1. Build a list of reduction variables.
4328 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
4329 auto Size = RHSExprs.size();
4330 for (const Expr *E : Privates) {
4331 if (E->getType()->isVariablyModifiedType())
4332 // Reserve place for array size.
4333 ++Size;
4334 }
4335 llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size);
4336 QualType ReductionArrayTy =
4337 C.getConstantArrayType(C.VoidPtrTy, ArraySize, nullptr, ArrayType::Normal,
4338 /*IndexTypeQuals=*/0);
4339 Address ReductionList =
4340 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
4341 auto IPriv = Privates.begin();
4342 unsigned Idx = 0;
4343 for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) {
4344 Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
4345 CGF.Builder.CreateStore(
4346 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4347 CGF.EmitLValue(RHSExprs[I]).getPointer(CGF), CGF.VoidPtrTy),
4348 Elem);
4349 if ((*IPriv)->getType()->isVariablyModifiedType()) {
4350 // Store array size.
4351 ++Idx;
4352 Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
4353 llvm::Value *Size = CGF.Builder.CreateIntCast(
4354 CGF.getVLASize(
4355 CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
4356 .NumElts,
4357 CGF.SizeTy, /*isSigned=*/false);
4358 CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
4359 Elem);
4360 }
4361 }
4362
4363 llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4364 ReductionList.getPointer(), CGF.VoidPtrTy);
4365 unsigned DefaultAS = CGM.getTargetCodeGenInfo().getDefaultAS();
4366 llvm::Function *ReductionFn = emitReductionFunction(
4367 Loc, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(DefaultAS), Privates,
4368 LHSExprs, RHSExprs, ReductionOps);
4369 llvm::Value *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy);
4370 llvm::Function *ShuffleAndReduceFn = emitShuffleAndReduceFunction(
4371 CGM, Privates, ReductionArrayTy, ReductionFn, Loc);
4372 llvm::Value *InterWarpCopyFn =
4373 emitInterWarpCopyFunction(CGM, Privates, ReductionArrayTy, Loc);
4374
4375 if (ParallelReduction) {
4376 llvm::Value *Args[] = {RTLoc,
4377 ThreadId,
4378 CGF.Builder.getInt32(RHSExprs.size()),
4379 ReductionArrayTySize,
4380 RL,
4381 ShuffleAndReduceFn,
4382 InterWarpCopyFn};
4383
4384 Res = CGF.EmitRuntimeCall(
4385 createNVPTXRuntimeFunction(
4386 OMPRTL_NVPTX__kmpc_nvptx_parallel_reduce_nowait_v2),
4387 Args);
4388 } else {
4389 assert(TeamsReduction && "expected teams reduction.");
4390 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> VarFieldMap;
4391 llvm::SmallVector<const ValueDecl *, 4> PrivatesReductions(Privates.size());
4392 int Cnt = 0;
4393 for (const Expr *DRE : Privates) {
4394 PrivatesReductions[Cnt] = cast<DeclRefExpr>(DRE)->getDecl();
4395 ++Cnt;
4396 }
4397 const RecordDecl *TeamReductionRec = ::buildRecordForGlobalizedVars(
4398 CGM.getContext(), PrivatesReductions, llvm::None, VarFieldMap,
4399 C.getLangOpts().OpenMPCUDAReductionBufNum);
4400 TeamsReductions.push_back(TeamReductionRec);
4401 if (!KernelTeamsReductionPtr) {
4402 KernelTeamsReductionPtr = new llvm::GlobalVariable(
4403 CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/true,
4404 llvm::GlobalValue::InternalLinkage, nullptr,
4405 "_openmp_teams_reductions_buffer_$_$ptr");
4406 }
4407 llvm::Value *GlobalBufferPtr = CGF.EmitLoadOfScalar(
4408 Address(KernelTeamsReductionPtr, CGM.getPointerAlign()),
4409 /*Volatile=*/false, C.getPointerType(C.VoidPtrTy), Loc);
4410 llvm::Value *GlobalToBufferCpyFn = ::emitListToGlobalCopyFunction(
4411 CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap);
4412 llvm::Value *GlobalToBufferRedFn = ::emitListToGlobalReduceFunction(
4413 CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap,
4414 ReductionFn);
4415 llvm::Value *BufferToGlobalCpyFn = ::emitGlobalToListCopyFunction(
4416 CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap);
4417 llvm::Value *BufferToGlobalRedFn = ::emitGlobalToListReduceFunction(
4418 CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap,
4419 ReductionFn);
4420
4421 llvm::Value *Args[] = {
4422 RTLoc,
4423 ThreadId,
4424 GlobalBufferPtr,
4425 CGF.Builder.getInt32(C.getLangOpts().OpenMPCUDAReductionBufNum),
4426 RL,
4427 ShuffleAndReduceFn,
4428 InterWarpCopyFn,
4429 GlobalToBufferCpyFn,
4430 GlobalToBufferRedFn,
4431 BufferToGlobalCpyFn,
4432 BufferToGlobalRedFn};
4433
4434 Res = CGF.EmitRuntimeCall(
4435 createNVPTXRuntimeFunction(
4436 OMPRTL_NVPTX__kmpc_nvptx_teams_reduce_nowait_v2),
4437 Args);
4438 }
4439
4440 // 5. Build if (res == 1)
4441 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".omp.reduction.done");
4442 llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".omp.reduction.then");
4443 llvm::Value *Cond = CGF.Builder.CreateICmpEQ(
4444 Res, llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/1));
4445 CGF.Builder.CreateCondBr(Cond, ThenBB, ExitBB);
4446
4447 // 6. Build then branch: where we have reduced values in the master
4448 // thread in each team.
4449 // __kmpc_end_reduce{_nowait}(<gtid>);
4450 // break;
4451 CGF.EmitBlock(ThenBB);
4452
4453 // Add emission of __kmpc_end_reduce{_nowait}(<gtid>);
4454 auto &&CodeGen = [Privates, LHSExprs, RHSExprs, ReductionOps,
4455 this](CodeGenFunction &CGF, PrePostActionTy &Action) {
4456 auto IPriv = Privates.begin();
4457 auto ILHS = LHSExprs.begin();
4458 auto IRHS = RHSExprs.begin();
4459 for (const Expr *E : ReductionOps) {
4460 emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
4461 cast<DeclRefExpr>(*IRHS));
4462 ++IPriv;
4463 ++ILHS;
4464 ++IRHS;
4465 }
4466 };
4467 llvm::Value *EndArgs[] = {ThreadId};
4468 RegionCodeGenTy RCG(CodeGen);
4469 NVPTXActionTy Action(
4470 nullptr, llvm::None,
4471 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_reduce_nowait),
4472 EndArgs);
4473 RCG.setAction(Action);
4474 RCG(CGF);
4475 // There is no need to emit line number for unconditional branch.
4476 (void)ApplyDebugLocation::CreateEmpty(CGF);
4477 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
4478 }
4479
4480 const VarDecl *
translateParameter(const FieldDecl * FD,const VarDecl * NativeParam) const4481 CGOpenMPRuntimeNVPTX::translateParameter(const FieldDecl *FD,
4482 const VarDecl *NativeParam) const {
4483 if (!NativeParam->getType()->isReferenceType())
4484 return NativeParam;
4485 QualType ArgType = NativeParam->getType();
4486 QualifierCollector QC;
4487 const Type *NonQualTy = QC.strip(ArgType);
4488 QualType PointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType();
4489 if (const auto *Attr = FD->getAttr<OMPCaptureKindAttr>()) {
4490 if (Attr->getCaptureKind() == OMPC_map) {
4491 PointeeTy = CGM.getContext().getAddrSpaceQualType(PointeeTy,
4492 LangAS::opencl_global);
4493 } else if (Attr->getCaptureKind() == OMPC_firstprivate &&
4494 PointeeTy.isConstant(CGM.getContext())) {
4495 PointeeTy = CGM.getContext().getAddrSpaceQualType(PointeeTy,
4496 LangAS::opencl_generic);
4497 }
4498 }
4499 ArgType = CGM.getContext().getPointerType(PointeeTy);
4500 QC.addRestrict();
4501 enum { NVPTX_local_addr = 5 };
4502 QC.addAddressSpace(getLangASFromTargetAS(NVPTX_local_addr));
4503 ArgType = QC.apply(CGM.getContext(), ArgType);
4504 if (isa<ImplicitParamDecl>(NativeParam))
4505 return ImplicitParamDecl::Create(
4506 CGM.getContext(), /*DC=*/nullptr, NativeParam->getLocation(),
4507 NativeParam->getIdentifier(), ArgType, ImplicitParamDecl::Other);
4508 return ParmVarDecl::Create(
4509 CGM.getContext(),
4510 const_cast<DeclContext *>(NativeParam->getDeclContext()),
4511 NativeParam->getBeginLoc(), NativeParam->getLocation(),
4512 NativeParam->getIdentifier(), ArgType,
4513 /*TInfo=*/nullptr, SC_None, /*DefArg=*/nullptr);
4514 }
4515
4516 Address
getParameterAddress(CodeGenFunction & CGF,const VarDecl * NativeParam,const VarDecl * TargetParam) const4517 CGOpenMPRuntimeNVPTX::getParameterAddress(CodeGenFunction &CGF,
4518 const VarDecl *NativeParam,
4519 const VarDecl *TargetParam) const {
4520 assert(NativeParam != TargetParam &&
4521 NativeParam->getType()->isReferenceType() &&
4522 "Native arg must not be the same as target arg.");
4523 Address LocalAddr = CGF.GetAddrOfLocalVar(TargetParam);
4524 QualType NativeParamType = NativeParam->getType();
4525 QualifierCollector QC;
4526 const Type *NonQualTy = QC.strip(NativeParamType);
4527 QualType NativePointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType();
4528 unsigned NativePointeeAddrSpace =
4529 CGF.CGM.getTargetAddressSpace(NativePointeeTy.getAddressSpace());
4530 QualType TargetTy = TargetParam->getType();
4531 llvm::Value *TargetAddr = CGF.EmitLoadOfScalar(
4532 LocalAddr, /*Volatile=*/false, TargetTy, SourceLocation());
4533 // First cast to generic.
4534 TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4535 TargetAddr, TargetAddr->getType()->getPointerElementType()->getPointerTo(
4536 /*AddrSpace=*/0));
4537 // Cast from generic to native address space.
4538 TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4539 TargetAddr, TargetAddr->getType()->getPointerElementType()->getPointerTo(
4540 NativePointeeAddrSpace));
4541 Address NativeParamAddr = CGF.CreateMemTemp(NativeParamType);
4542 CGF.EmitStoreOfScalar(TargetAddr, NativeParamAddr, /*Volatile=*/false,
4543 NativeParamType);
4544 return NativeParamAddr;
4545 }
4546
emitOutlinedFunctionCall(CodeGenFunction & CGF,SourceLocation Loc,llvm::FunctionCallee OutlinedFn,ArrayRef<llvm::Value * > Args) const4547 void CGOpenMPRuntimeNVPTX::emitOutlinedFunctionCall(
4548 CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee OutlinedFn,
4549 ArrayRef<llvm::Value *> Args) const {
4550 SmallVector<llvm::Value *, 4> TargetArgs;
4551 TargetArgs.reserve(Args.size());
4552 auto *FnType = OutlinedFn.getFunctionType();
4553 for (unsigned I = 0, E = Args.size(); I < E; ++I) {
4554 if (FnType->isVarArg() && FnType->getNumParams() <= I) {
4555 TargetArgs.append(std::next(Args.begin(), I), Args.end());
4556 break;
4557 }
4558 llvm::Type *TargetType = FnType->getParamType(I);
4559 llvm::Value *NativeArg = Args[I];
4560 if (!TargetType->isPointerTy()) {
4561 TargetArgs.emplace_back(NativeArg);
4562 continue;
4563 }
4564 llvm::Value *TargetArg = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4565 NativeArg,
4566 NativeArg->getType()->getPointerElementType()->getPointerTo());
4567 TargetArgs.emplace_back(
4568 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(TargetArg, TargetType));
4569 }
4570 CGOpenMPRuntime::emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, TargetArgs);
4571 }
4572
4573 /// Emit function which wraps the outline parallel region
4574 /// and controls the arguments which are passed to this function.
4575 /// The wrapper ensures that the outlined function is called
4576 /// with the correct arguments when data is shared.
createParallelDataSharingWrapper(llvm::Function * OutlinedParallelFn,const OMPExecutableDirective & D)4577 llvm::Function *CGOpenMPRuntimeNVPTX::createParallelDataSharingWrapper(
4578 llvm::Function *OutlinedParallelFn, const OMPExecutableDirective &D) {
4579 ASTContext &Ctx = CGM.getContext();
4580 const auto &CS = *D.getCapturedStmt(OMPD_parallel);
4581
4582 // Create a function that takes as argument the source thread.
4583 FunctionArgList WrapperArgs;
4584 QualType Int16QTy =
4585 Ctx.getIntTypeForBitwidth(/*DestWidth=*/16, /*Signed=*/false);
4586 QualType Int32QTy =
4587 Ctx.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/false);
4588 ImplicitParamDecl ParallelLevelArg(Ctx, /*DC=*/nullptr, D.getBeginLoc(),
4589 /*Id=*/nullptr, Int16QTy,
4590 ImplicitParamDecl::Other);
4591 ImplicitParamDecl WrapperArg(Ctx, /*DC=*/nullptr, D.getBeginLoc(),
4592 /*Id=*/nullptr, Int32QTy,
4593 ImplicitParamDecl::Other);
4594 WrapperArgs.emplace_back(&ParallelLevelArg);
4595 WrapperArgs.emplace_back(&WrapperArg);
4596
4597 const CGFunctionInfo &CGFI =
4598 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, WrapperArgs);
4599
4600 auto *Fn = llvm::Function::Create(
4601 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
4602 Twine(OutlinedParallelFn->getName(), "_wrapper"), &CGM.getModule());
4603 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
4604 Fn->setLinkage(llvm::GlobalValue::InternalLinkage);
4605 Fn->setDoesNotRecurse();
4606
4607 CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
4608 CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, Fn, CGFI, WrapperArgs,
4609 D.getBeginLoc(), D.getBeginLoc());
4610
4611 const auto *RD = CS.getCapturedRecordDecl();
4612 auto CurField = RD->field_begin();
4613
4614 Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
4615 /*Name=*/".zero.addr");
4616 CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
4617 // Get the array of arguments.
4618 SmallVector<llvm::Value *, 8> Args;
4619
4620 Args.emplace_back(CGF.GetAddrOfLocalVar(&WrapperArg).getPointer());
4621 Args.emplace_back(ZeroAddr.getPointer());
4622
4623 CGBuilderTy &Bld = CGF.Builder;
4624 auto CI = CS.capture_begin();
4625
4626 // Use global memory for data sharing.
4627 // Handle passing of global args to workers.
4628 Address GlobalArgs =
4629 CGF.CreateDefaultAlignTempAlloca(CGF.VoidPtrPtrTy, "global_args");
4630 llvm::Value *GlobalArgsPtr = GlobalArgs.getPointer();
4631 llvm::Value *DataSharingArgs[] = {GlobalArgsPtr};
4632 CGF.EmitRuntimeCall(
4633 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_get_shared_variables),
4634 DataSharingArgs);
4635
4636 // Retrieve the shared variables from the list of references returned
4637 // by the runtime. Pass the variables to the outlined function.
4638 Address SharedArgListAddress = Address::invalid();
4639 if (CS.capture_size() > 0 ||
4640 isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) {
4641 SharedArgListAddress = CGF.EmitLoadOfPointer(
4642 GlobalArgs, CGF.getContext()
4643 .getPointerType(CGF.getContext().getPointerType(
4644 CGF.getContext().VoidPtrTy))
4645 .castAs<PointerType>());
4646 }
4647 unsigned Idx = 0;
4648 if (isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) {
4649 Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
4650 Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
4651 Src, CGF.SizeTy->getPointerTo());
4652 llvm::Value *LB = CGF.EmitLoadOfScalar(
4653 TypedAddress,
4654 /*Volatile=*/false,
4655 CGF.getContext().getPointerType(CGF.getContext().getSizeType()),
4656 cast<OMPLoopDirective>(D).getLowerBoundVariable()->getExprLoc());
4657 Args.emplace_back(LB);
4658 ++Idx;
4659 Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
4660 TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
4661 Src, CGF.SizeTy->getPointerTo());
4662 llvm::Value *UB = CGF.EmitLoadOfScalar(
4663 TypedAddress,
4664 /*Volatile=*/false,
4665 CGF.getContext().getPointerType(CGF.getContext().getSizeType()),
4666 cast<OMPLoopDirective>(D).getUpperBoundVariable()->getExprLoc());
4667 Args.emplace_back(UB);
4668 ++Idx;
4669 }
4670 if (CS.capture_size() > 0) {
4671 ASTContext &CGFContext = CGF.getContext();
4672 for (unsigned I = 0, E = CS.capture_size(); I < E; ++I, ++CI, ++CurField) {
4673 QualType ElemTy = CurField->getType();
4674 Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, I + Idx);
4675 Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
4676 Src, CGF.ConvertTypeForMem(CGFContext.getPointerType(ElemTy)));
4677 llvm::Value *Arg = CGF.EmitLoadOfScalar(TypedAddress,
4678 /*Volatile=*/false,
4679 CGFContext.getPointerType(ElemTy),
4680 CI->getLocation());
4681 if (CI->capturesVariableByCopy() &&
4682 !CI->getCapturedVar()->getType()->isAnyPointerType()) {
4683 Arg = castValueToType(CGF, Arg, ElemTy, CGFContext.getUIntPtrType(),
4684 CI->getLocation());
4685 }
4686 Args.emplace_back(Arg);
4687 }
4688 }
4689
4690 emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedParallelFn, Args);
4691 CGF.FinishFunction();
4692 return Fn;
4693 }
4694
emitFunctionProlog(CodeGenFunction & CGF,const Decl * D)4695 void CGOpenMPRuntimeNVPTX::emitFunctionProlog(CodeGenFunction &CGF,
4696 const Decl *D) {
4697 if (getDataSharingMode(CGM) != CGOpenMPRuntimeNVPTX::Generic)
4698 return;
4699
4700 assert(D && "Expected function or captured|block decl.");
4701 assert(FunctionGlobalizedDecls.count(CGF.CurFn) == 0 &&
4702 "Function is registered already.");
4703 assert((!TeamAndReductions.first || TeamAndReductions.first == D) &&
4704 "Team is set but not processed.");
4705 const Stmt *Body = nullptr;
4706 bool NeedToDelayGlobalization = false;
4707 if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
4708 Body = FD->getBody();
4709 } else if (const auto *BD = dyn_cast<BlockDecl>(D)) {
4710 Body = BD->getBody();
4711 } else if (const auto *CD = dyn_cast<CapturedDecl>(D)) {
4712 Body = CD->getBody();
4713 NeedToDelayGlobalization = CGF.CapturedStmtInfo->getKind() == CR_OpenMP;
4714 if (NeedToDelayGlobalization &&
4715 getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD)
4716 return;
4717 }
4718 if (!Body)
4719 return;
4720 CheckVarsEscapingDeclContext VarChecker(CGF, TeamAndReductions.second);
4721 VarChecker.Visit(Body);
4722 const RecordDecl *GlobalizedVarsRecord =
4723 VarChecker.getGlobalizedRecord(IsInTTDRegion);
4724 TeamAndReductions.first = nullptr;
4725 TeamAndReductions.second.clear();
4726 ArrayRef<const ValueDecl *> EscapedVariableLengthDecls =
4727 VarChecker.getEscapedVariableLengthDecls();
4728 if (!GlobalizedVarsRecord && EscapedVariableLengthDecls.empty())
4729 return;
4730 auto I = FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first;
4731 I->getSecond().MappedParams =
4732 std::make_unique<CodeGenFunction::OMPMapVars>();
4733 I->getSecond().GlobalRecord = GlobalizedVarsRecord;
4734 I->getSecond().EscapedParameters.insert(
4735 VarChecker.getEscapedParameters().begin(),
4736 VarChecker.getEscapedParameters().end());
4737 I->getSecond().EscapedVariableLengthDecls.append(
4738 EscapedVariableLengthDecls.begin(), EscapedVariableLengthDecls.end());
4739 DeclToAddrMapTy &Data = I->getSecond().LocalVarData;
4740 for (const ValueDecl *VD : VarChecker.getEscapedDecls()) {
4741 assert(VD->isCanonicalDecl() && "Expected canonical declaration");
4742 const FieldDecl *FD = VarChecker.getFieldForGlobalizedVar(VD);
4743 Data.insert(std::make_pair(VD, MappedVarData(FD, IsInTTDRegion)));
4744 }
4745 if (!IsInTTDRegion && !NeedToDelayGlobalization && !IsInParallelRegion) {
4746 CheckVarsEscapingDeclContext VarChecker(CGF, llvm::None);
4747 VarChecker.Visit(Body);
4748 I->getSecond().SecondaryGlobalRecord =
4749 VarChecker.getGlobalizedRecord(/*IsInTTDRegion=*/true);
4750 I->getSecond().SecondaryLocalVarData.emplace();
4751 DeclToAddrMapTy &Data = I->getSecond().SecondaryLocalVarData.getValue();
4752 for (const ValueDecl *VD : VarChecker.getEscapedDecls()) {
4753 assert(VD->isCanonicalDecl() && "Expected canonical declaration");
4754 const FieldDecl *FD = VarChecker.getFieldForGlobalizedVar(VD);
4755 Data.insert(
4756 std::make_pair(VD, MappedVarData(FD, /*IsInTTDRegion=*/true)));
4757 }
4758 }
4759 if (!NeedToDelayGlobalization) {
4760 emitGenericVarsProlog(CGF, D->getBeginLoc(), /*WithSPMDCheck=*/true);
4761 struct GlobalizationScope final : EHScopeStack::Cleanup {
4762 GlobalizationScope() = default;
4763
4764 void Emit(CodeGenFunction &CGF, Flags flags) override {
4765 static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime())
4766 .emitGenericVarsEpilog(CGF, /*WithSPMDCheck=*/true);
4767 }
4768 };
4769 CGF.EHStack.pushCleanup<GlobalizationScope>(NormalAndEHCleanup);
4770 }
4771 }
4772
getAddressOfLocalVariable(CodeGenFunction & CGF,const VarDecl * VD)4773 Address CGOpenMPRuntimeNVPTX::getAddressOfLocalVariable(CodeGenFunction &CGF,
4774 const VarDecl *VD) {
4775 if (VD && VD->hasAttr<OMPAllocateDeclAttr>()) {
4776 const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
4777 switch (A->getAllocatorType()) {
4778 // Use the default allocator here as by default local vars are
4779 // threadlocal.
4780 case OMPAllocateDeclAttr::OMPNullMemAlloc:
4781 case OMPAllocateDeclAttr::OMPDefaultMemAlloc:
4782 case OMPAllocateDeclAttr::OMPThreadMemAlloc:
4783 case OMPAllocateDeclAttr::OMPHighBWMemAlloc:
4784 case OMPAllocateDeclAttr::OMPLowLatMemAlloc:
4785 // Follow the user decision - use default allocation.
4786 return Address::invalid();
4787 case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc:
4788 // TODO: implement aupport for user-defined allocators.
4789 return Address::invalid();
4790 case OMPAllocateDeclAttr::OMPConstMemAlloc: {
4791 llvm::Type *VarTy = CGF.ConvertTypeForMem(VD->getType());
4792 auto *GV = new llvm::GlobalVariable(
4793 CGM.getModule(), VarTy, /*isConstant=*/false,
4794 llvm::GlobalValue::InternalLinkage,
4795 llvm::Constant::getNullValue(VarTy), VD->getName(),
4796 /*InsertBefore=*/nullptr, llvm::GlobalValue::NotThreadLocal,
4797 CGM.getContext().getTargetAddressSpace(LangAS::cuda_constant));
4798 CharUnits Align = CGM.getContext().getDeclAlign(VD);
4799 GV->setAlignment(Align.getAsAlign());
4800 return Address(GV, Align);
4801 }
4802 case OMPAllocateDeclAttr::OMPPTeamMemAlloc: {
4803 llvm::Type *VarTy = CGF.ConvertTypeForMem(VD->getType());
4804 auto *GV = new llvm::GlobalVariable(
4805 CGM.getModule(), VarTy, /*isConstant=*/false,
4806 llvm::GlobalValue::InternalLinkage,
4807 llvm::Constant::getNullValue(VarTy), VD->getName(),
4808 /*InsertBefore=*/nullptr, llvm::GlobalValue::NotThreadLocal,
4809 CGM.getContext().getTargetAddressSpace(LangAS::cuda_shared));
4810 CharUnits Align = CGM.getContext().getDeclAlign(VD);
4811 GV->setAlignment(Align.getAsAlign());
4812 return Address(GV, Align);
4813 }
4814 case OMPAllocateDeclAttr::OMPLargeCapMemAlloc:
4815 case OMPAllocateDeclAttr::OMPCGroupMemAlloc: {
4816 llvm::Type *VarTy = CGF.ConvertTypeForMem(VD->getType());
4817 auto *GV = new llvm::GlobalVariable(
4818 CGM.getModule(), VarTy, /*isConstant=*/false,
4819 llvm::GlobalValue::InternalLinkage,
4820 llvm::Constant::getNullValue(VarTy), VD->getName());
4821 CharUnits Align = CGM.getContext().getDeclAlign(VD);
4822 GV->setAlignment(Align.getAsAlign());
4823 return Address(GV, Align);
4824 }
4825 }
4826 }
4827
4828 if (getDataSharingMode(CGM) != CGOpenMPRuntimeNVPTX::Generic)
4829 return Address::invalid();
4830
4831 VD = VD->getCanonicalDecl();
4832 auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
4833 if (I == FunctionGlobalizedDecls.end())
4834 return Address::invalid();
4835 auto VDI = I->getSecond().LocalVarData.find(VD);
4836 if (VDI != I->getSecond().LocalVarData.end())
4837 return VDI->second.PrivateAddr;
4838 if (VD->hasAttrs()) {
4839 for (specific_attr_iterator<OMPReferencedVarAttr> IT(VD->attr_begin()),
4840 E(VD->attr_end());
4841 IT != E; ++IT) {
4842 auto VDI = I->getSecond().LocalVarData.find(
4843 cast<VarDecl>(cast<DeclRefExpr>(IT->getRef())->getDecl())
4844 ->getCanonicalDecl());
4845 if (VDI != I->getSecond().LocalVarData.end())
4846 return VDI->second.PrivateAddr;
4847 }
4848 }
4849
4850 return Address::invalid();
4851 }
4852
functionFinished(CodeGenFunction & CGF)4853 void CGOpenMPRuntimeNVPTX::functionFinished(CodeGenFunction &CGF) {
4854 FunctionGlobalizedDecls.erase(CGF.CurFn);
4855 CGOpenMPRuntime::functionFinished(CGF);
4856 }
4857
getDefaultDistScheduleAndChunk(CodeGenFunction & CGF,const OMPLoopDirective & S,OpenMPDistScheduleClauseKind & ScheduleKind,llvm::Value * & Chunk) const4858 void CGOpenMPRuntimeNVPTX::getDefaultDistScheduleAndChunk(
4859 CodeGenFunction &CGF, const OMPLoopDirective &S,
4860 OpenMPDistScheduleClauseKind &ScheduleKind,
4861 llvm::Value *&Chunk) const {
4862 if (getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD) {
4863 ScheduleKind = OMPC_DIST_SCHEDULE_static;
4864 Chunk = CGF.EmitScalarConversion(getNVPTXNumThreads(CGF),
4865 CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
4866 S.getIterationVariable()->getType(), S.getBeginLoc());
4867 return;
4868 }
4869 CGOpenMPRuntime::getDefaultDistScheduleAndChunk(
4870 CGF, S, ScheduleKind, Chunk);
4871 }
4872
getDefaultScheduleAndChunk(CodeGenFunction & CGF,const OMPLoopDirective & S,OpenMPScheduleClauseKind & ScheduleKind,const Expr * & ChunkExpr) const4873 void CGOpenMPRuntimeNVPTX::getDefaultScheduleAndChunk(
4874 CodeGenFunction &CGF, const OMPLoopDirective &S,
4875 OpenMPScheduleClauseKind &ScheduleKind,
4876 const Expr *&ChunkExpr) const {
4877 ScheduleKind = OMPC_SCHEDULE_static;
4878 // Chunk size is 1 in this case.
4879 llvm::APInt ChunkSize(32, 1);
4880 ChunkExpr = IntegerLiteral::Create(CGF.getContext(), ChunkSize,
4881 CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
4882 SourceLocation());
4883 }
4884
adjustTargetSpecificDataForLambdas(CodeGenFunction & CGF,const OMPExecutableDirective & D) const4885 void CGOpenMPRuntimeNVPTX::adjustTargetSpecificDataForLambdas(
4886 CodeGenFunction &CGF, const OMPExecutableDirective &D) const {
4887 assert(isOpenMPTargetExecutionDirective(D.getDirectiveKind()) &&
4888 " Expected target-based directive.");
4889 const CapturedStmt *CS = D.getCapturedStmt(OMPD_target);
4890 for (const CapturedStmt::Capture &C : CS->captures()) {
4891 // Capture variables captured by reference in lambdas for target-based
4892 // directives.
4893 if (!C.capturesVariable())
4894 continue;
4895 const VarDecl *VD = C.getCapturedVar();
4896 const auto *RD = VD->getType()
4897 .getCanonicalType()
4898 .getNonReferenceType()
4899 ->getAsCXXRecordDecl();
4900 if (!RD || !RD->isLambda())
4901 continue;
4902 Address VDAddr = CGF.GetAddrOfLocalVar(VD);
4903 LValue VDLVal;
4904 if (VD->getType().getCanonicalType()->isReferenceType())
4905 VDLVal = CGF.EmitLoadOfReferenceLValue(VDAddr, VD->getType());
4906 else
4907 VDLVal = CGF.MakeAddrLValue(
4908 VDAddr, VD->getType().getCanonicalType().getNonReferenceType());
4909 llvm::DenseMap<const VarDecl *, FieldDecl *> Captures;
4910 FieldDecl *ThisCapture = nullptr;
4911 RD->getCaptureFields(Captures, ThisCapture);
4912 if (ThisCapture && CGF.CapturedStmtInfo->isCXXThisExprCaptured()) {
4913 LValue ThisLVal =
4914 CGF.EmitLValueForFieldInitialization(VDLVal, ThisCapture);
4915 llvm::Value *CXXThis = CGF.LoadCXXThis();
4916 CGF.EmitStoreOfScalar(CXXThis, ThisLVal);
4917 }
4918 for (const LambdaCapture &LC : RD->captures()) {
4919 if (LC.getCaptureKind() != LCK_ByRef)
4920 continue;
4921 const VarDecl *VD = LC.getCapturedVar();
4922 if (!CS->capturesVariable(VD))
4923 continue;
4924 auto It = Captures.find(VD);
4925 assert(It != Captures.end() && "Found lambda capture without field.");
4926 LValue VarLVal = CGF.EmitLValueForFieldInitialization(VDLVal, It->second);
4927 Address VDAddr = CGF.GetAddrOfLocalVar(VD);
4928 if (VD->getType().getCanonicalType()->isReferenceType())
4929 VDAddr = CGF.EmitLoadOfReferenceLValue(VDAddr,
4930 VD->getType().getCanonicalType())
4931 .getAddress(CGF);
4932 CGF.EmitStoreOfScalar(VDAddr.getPointer(), VarLVal);
4933 }
4934 }
4935 }
4936
getDefaultFirstprivateAddressSpace() const4937 unsigned CGOpenMPRuntimeNVPTX::getDefaultFirstprivateAddressSpace() const {
4938 return CGM.getContext().getTargetAddressSpace(LangAS::cuda_constant);
4939 }
4940
hasAllocateAttributeForGlobalVar(const VarDecl * VD,LangAS & AS)4941 bool CGOpenMPRuntimeNVPTX::hasAllocateAttributeForGlobalVar(const VarDecl *VD,
4942 LangAS &AS) {
4943 if (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())
4944 return false;
4945 const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
4946 switch(A->getAllocatorType()) {
4947 case OMPAllocateDeclAttr::OMPNullMemAlloc:
4948 case OMPAllocateDeclAttr::OMPDefaultMemAlloc:
4949 // Not supported, fallback to the default mem space.
4950 case OMPAllocateDeclAttr::OMPThreadMemAlloc:
4951 case OMPAllocateDeclAttr::OMPLargeCapMemAlloc:
4952 case OMPAllocateDeclAttr::OMPCGroupMemAlloc:
4953 case OMPAllocateDeclAttr::OMPHighBWMemAlloc:
4954 case OMPAllocateDeclAttr::OMPLowLatMemAlloc:
4955 AS = LangAS::Default;
4956 return true;
4957 case OMPAllocateDeclAttr::OMPConstMemAlloc:
4958 AS = LangAS::cuda_constant;
4959 return true;
4960 case OMPAllocateDeclAttr::OMPPTeamMemAlloc:
4961 AS = LangAS::cuda_shared;
4962 return true;
4963 case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc:
4964 llvm_unreachable("Expected predefined allocator for the variables with the "
4965 "static storage.");
4966 }
4967 return false;
4968 }
4969
4970 // Get current CudaArch and ignore any unknown values
getCudaArch(CodeGenModule & CGM)4971 static CudaArch getCudaArch(CodeGenModule &CGM) {
4972 if (!CGM.getTarget().hasFeature("ptx"))
4973 return CudaArch::UNKNOWN;
4974 llvm::StringMap<bool> Features;
4975 CGM.getTarget().initFeatureMap(Features, CGM.getDiags(),
4976 CGM.getTarget().getTargetOpts().CPU,
4977 CGM.getTarget().getTargetOpts().Features);
4978 for (const auto &Feature : Features) {
4979 if (Feature.getValue()) {
4980 CudaArch Arch = StringToCudaArch(Feature.getKey());
4981 if (Arch != CudaArch::UNKNOWN)
4982 return Arch;
4983 }
4984 }
4985 return CudaArch::UNKNOWN;
4986 }
4987
4988 /// Check to see if target architecture supports unified addressing which is
4989 /// a restriction for OpenMP requires clause "unified_shared_memory".
processRequiresDirective(const OMPRequiresDecl * D)4990 void CGOpenMPRuntimeNVPTX::processRequiresDirective(
4991 const OMPRequiresDecl *D) {
4992 for (const OMPClause *Clause : D->clauselists()) {
4993 if (Clause->getClauseKind() == OMPC_unified_shared_memory) {
4994 CudaArch Arch = getCudaArch(CGM);
4995 switch (Arch) {
4996 case CudaArch::SM_20:
4997 case CudaArch::SM_21:
4998 case CudaArch::SM_30:
4999 case CudaArch::SM_32:
5000 case CudaArch::SM_35:
5001 case CudaArch::SM_37:
5002 case CudaArch::SM_50:
5003 case CudaArch::SM_52:
5004 case CudaArch::SM_53:
5005 case CudaArch::SM_60:
5006 case CudaArch::SM_61:
5007 case CudaArch::SM_62: {
5008 SmallString<256> Buffer;
5009 llvm::raw_svector_ostream Out(Buffer);
5010 Out << "Target architecture " << CudaArchToString(Arch)
5011 << " does not support unified addressing";
5012 CGM.Error(Clause->getBeginLoc(), Out.str());
5013 return;
5014 }
5015 case CudaArch::SM_70:
5016 case CudaArch::SM_72:
5017 case CudaArch::SM_75:
5018 case CudaArch::SM_80:
5019 case CudaArch::GFX600:
5020 case CudaArch::GFX601:
5021 case CudaArch::GFX700:
5022 case CudaArch::GFX701:
5023 case CudaArch::GFX702:
5024 case CudaArch::GFX703:
5025 case CudaArch::GFX704:
5026 case CudaArch::GFX801:
5027 case CudaArch::GFX802:
5028 case CudaArch::GFX803:
5029 case CudaArch::GFX810:
5030 case CudaArch::GFX900:
5031 case CudaArch::GFX902:
5032 case CudaArch::GFX904:
5033 case CudaArch::GFX906:
5034 case CudaArch::GFX908:
5035 case CudaArch::GFX909:
5036 case CudaArch::GFX1010:
5037 case CudaArch::GFX1011:
5038 case CudaArch::GFX1012:
5039 case CudaArch::GFX1030:
5040 case CudaArch::UNKNOWN:
5041 break;
5042 case CudaArch::LAST:
5043 llvm_unreachable("Unexpected Cuda arch.");
5044 }
5045 }
5046 }
5047 CGOpenMPRuntime::processRequiresDirective(D);
5048 }
5049
5050 /// Get number of SMs and number of blocks per SM.
getSMsBlocksPerSM(CodeGenModule & CGM)5051 static std::pair<unsigned, unsigned> getSMsBlocksPerSM(CodeGenModule &CGM) {
5052 std::pair<unsigned, unsigned> Data;
5053 if (CGM.getLangOpts().OpenMPCUDANumSMs)
5054 Data.first = CGM.getLangOpts().OpenMPCUDANumSMs;
5055 if (CGM.getLangOpts().OpenMPCUDABlocksPerSM)
5056 Data.second = CGM.getLangOpts().OpenMPCUDABlocksPerSM;
5057 if (Data.first && Data.second)
5058 return Data;
5059 switch (getCudaArch(CGM)) {
5060 case CudaArch::SM_20:
5061 case CudaArch::SM_21:
5062 case CudaArch::SM_30:
5063 case CudaArch::SM_32:
5064 case CudaArch::SM_35:
5065 case CudaArch::SM_37:
5066 case CudaArch::SM_50:
5067 case CudaArch::SM_52:
5068 case CudaArch::SM_53:
5069 return {16, 16};
5070 case CudaArch::SM_60:
5071 case CudaArch::SM_61:
5072 case CudaArch::SM_62:
5073 return {56, 32};
5074 case CudaArch::SM_70:
5075 case CudaArch::SM_72:
5076 case CudaArch::SM_75:
5077 case CudaArch::SM_80:
5078 return {84, 32};
5079 case CudaArch::GFX600:
5080 case CudaArch::GFX601:
5081 case CudaArch::GFX700:
5082 case CudaArch::GFX701:
5083 case CudaArch::GFX702:
5084 case CudaArch::GFX703:
5085 case CudaArch::GFX704:
5086 case CudaArch::GFX801:
5087 case CudaArch::GFX802:
5088 case CudaArch::GFX803:
5089 case CudaArch::GFX810:
5090 case CudaArch::GFX900:
5091 case CudaArch::GFX902:
5092 case CudaArch::GFX904:
5093 case CudaArch::GFX906:
5094 case CudaArch::GFX908:
5095 case CudaArch::GFX909:
5096 case CudaArch::GFX1010:
5097 case CudaArch::GFX1011:
5098 case CudaArch::GFX1012:
5099 case CudaArch::GFX1030:
5100 case CudaArch::UNKNOWN:
5101 break;
5102 case CudaArch::LAST:
5103 llvm_unreachable("Unexpected Cuda arch.");
5104 }
5105 llvm_unreachable("Unexpected NVPTX target without ptx feature.");
5106 }
5107
clear()5108 void CGOpenMPRuntimeNVPTX::clear() {
5109 if (!GlobalizedRecords.empty() &&
5110 !CGM.getLangOpts().OpenMPCUDATargetParallel) {
5111 ASTContext &C = CGM.getContext();
5112 llvm::SmallVector<const GlobalPtrSizeRecsTy *, 4> GlobalRecs;
5113 llvm::SmallVector<const GlobalPtrSizeRecsTy *, 4> SharedRecs;
5114 RecordDecl *StaticRD = C.buildImplicitRecord(
5115 "_openmp_static_memory_type_$_", RecordDecl::TagKind::TTK_Union);
5116 StaticRD->startDefinition();
5117 RecordDecl *SharedStaticRD = C.buildImplicitRecord(
5118 "_shared_openmp_static_memory_type_$_", RecordDecl::TagKind::TTK_Union);
5119 SharedStaticRD->startDefinition();
5120 for (const GlobalPtrSizeRecsTy &Records : GlobalizedRecords) {
5121 if (Records.Records.empty())
5122 continue;
5123 unsigned Size = 0;
5124 unsigned RecAlignment = 0;
5125 for (const RecordDecl *RD : Records.Records) {
5126 QualType RDTy = C.getRecordType(RD);
5127 unsigned Alignment = C.getTypeAlignInChars(RDTy).getQuantity();
5128 RecAlignment = std::max(RecAlignment, Alignment);
5129 unsigned RecSize = C.getTypeSizeInChars(RDTy).getQuantity();
5130 Size =
5131 llvm::alignTo(llvm::alignTo(Size, Alignment) + RecSize, Alignment);
5132 }
5133 Size = llvm::alignTo(Size, RecAlignment);
5134 llvm::APInt ArySize(/*numBits=*/64, Size);
5135 QualType SubTy = C.getConstantArrayType(
5136 C.CharTy, ArySize, nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0);
5137 const bool UseSharedMemory = Size <= SharedMemorySize;
5138 auto *Field =
5139 FieldDecl::Create(C, UseSharedMemory ? SharedStaticRD : StaticRD,
5140 SourceLocation(), SourceLocation(), nullptr, SubTy,
5141 C.getTrivialTypeSourceInfo(SubTy, SourceLocation()),
5142 /*BW=*/nullptr, /*Mutable=*/false,
5143 /*InitStyle=*/ICIS_NoInit);
5144 Field->setAccess(AS_public);
5145 if (UseSharedMemory) {
5146 SharedStaticRD->addDecl(Field);
5147 SharedRecs.push_back(&Records);
5148 } else {
5149 StaticRD->addDecl(Field);
5150 GlobalRecs.push_back(&Records);
5151 }
5152 Records.RecSize->setInitializer(llvm::ConstantInt::get(CGM.SizeTy, Size));
5153 Records.UseSharedMemory->setInitializer(
5154 llvm::ConstantInt::get(CGM.Int16Ty, UseSharedMemory ? 1 : 0));
5155 }
5156 // Allocate SharedMemorySize buffer for the shared memory.
5157 // FIXME: nvlink does not handle weak linkage correctly (object with the
5158 // different size are reported as erroneous).
5159 // Restore this code as sson as nvlink is fixed.
5160 if (!SharedStaticRD->field_empty()) {
5161 llvm::APInt ArySize(/*numBits=*/64, SharedMemorySize);
5162 QualType SubTy = C.getConstantArrayType(
5163 C.CharTy, ArySize, nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0);
5164 auto *Field = FieldDecl::Create(
5165 C, SharedStaticRD, SourceLocation(), SourceLocation(), nullptr, SubTy,
5166 C.getTrivialTypeSourceInfo(SubTy, SourceLocation()),
5167 /*BW=*/nullptr, /*Mutable=*/false,
5168 /*InitStyle=*/ICIS_NoInit);
5169 Field->setAccess(AS_public);
5170 SharedStaticRD->addDecl(Field);
5171 }
5172 SharedStaticRD->completeDefinition();
5173 if (!SharedStaticRD->field_empty()) {
5174 QualType StaticTy = C.getRecordType(SharedStaticRD);
5175 llvm::Type *LLVMStaticTy = CGM.getTypes().ConvertTypeForMem(StaticTy);
5176 auto *GV = new llvm::GlobalVariable(
5177 CGM.getModule(), LLVMStaticTy,
5178 /*isConstant=*/false, llvm::GlobalValue::CommonLinkage,
5179 llvm::Constant::getNullValue(LLVMStaticTy),
5180 "_openmp_shared_static_glob_rd_$_", /*InsertBefore=*/nullptr,
5181 llvm::GlobalValue::NotThreadLocal,
5182 C.getTargetAddressSpace(LangAS::cuda_shared));
5183 auto *Replacement = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
5184 GV, CGM.VoidPtrTy);
5185 for (const GlobalPtrSizeRecsTy *Rec : SharedRecs) {
5186 Rec->Buffer->replaceAllUsesWith(Replacement);
5187 Rec->Buffer->eraseFromParent();
5188 }
5189 }
5190 StaticRD->completeDefinition();
5191 if (!StaticRD->field_empty()) {
5192 QualType StaticTy = C.getRecordType(StaticRD);
5193 std::pair<unsigned, unsigned> SMsBlockPerSM = getSMsBlocksPerSM(CGM);
5194 llvm::APInt Size1(32, SMsBlockPerSM.second);
5195 QualType Arr1Ty =
5196 C.getConstantArrayType(StaticTy, Size1, nullptr, ArrayType::Normal,
5197 /*IndexTypeQuals=*/0);
5198 llvm::APInt Size2(32, SMsBlockPerSM.first);
5199 QualType Arr2Ty =
5200 C.getConstantArrayType(Arr1Ty, Size2, nullptr, ArrayType::Normal,
5201 /*IndexTypeQuals=*/0);
5202 llvm::Type *LLVMArr2Ty = CGM.getTypes().ConvertTypeForMem(Arr2Ty);
5203 // FIXME: nvlink does not handle weak linkage correctly (object with the
5204 // different size are reported as erroneous).
5205 // Restore CommonLinkage as soon as nvlink is fixed.
5206 auto *GV = new llvm::GlobalVariable(
5207 CGM.getModule(), LLVMArr2Ty,
5208 /*isConstant=*/false, llvm::GlobalValue::InternalLinkage,
5209 llvm::Constant::getNullValue(LLVMArr2Ty),
5210 "_openmp_static_glob_rd_$_");
5211 auto *Replacement = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
5212 GV, CGM.VoidPtrTy);
5213 for (const GlobalPtrSizeRecsTy *Rec : GlobalRecs) {
5214 Rec->Buffer->replaceAllUsesWith(Replacement);
5215 Rec->Buffer->eraseFromParent();
5216 }
5217 }
5218 }
5219 if (!TeamsReductions.empty()) {
5220 ASTContext &C = CGM.getContext();
5221 RecordDecl *StaticRD = C.buildImplicitRecord(
5222 "_openmp_teams_reduction_type_$_", RecordDecl::TagKind::TTK_Union);
5223 StaticRD->startDefinition();
5224 for (const RecordDecl *TeamReductionRec : TeamsReductions) {
5225 QualType RecTy = C.getRecordType(TeamReductionRec);
5226 auto *Field = FieldDecl::Create(
5227 C, StaticRD, SourceLocation(), SourceLocation(), nullptr, RecTy,
5228 C.getTrivialTypeSourceInfo(RecTy, SourceLocation()),
5229 /*BW=*/nullptr, /*Mutable=*/false,
5230 /*InitStyle=*/ICIS_NoInit);
5231 Field->setAccess(AS_public);
5232 StaticRD->addDecl(Field);
5233 }
5234 StaticRD->completeDefinition();
5235 QualType StaticTy = C.getRecordType(StaticRD);
5236 llvm::Type *LLVMReductionsBufferTy =
5237 CGM.getTypes().ConvertTypeForMem(StaticTy);
5238 // FIXME: nvlink does not handle weak linkage correctly (object with the
5239 // different size are reported as erroneous).
5240 // Restore CommonLinkage as soon as nvlink is fixed.
5241 auto *GV = new llvm::GlobalVariable(
5242 CGM.getModule(), LLVMReductionsBufferTy,
5243 /*isConstant=*/false, llvm::GlobalValue::InternalLinkage,
5244 llvm::Constant::getNullValue(LLVMReductionsBufferTy),
5245 "_openmp_teams_reductions_buffer_$_");
5246 KernelTeamsReductionPtr->setInitializer(
5247 llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(GV,
5248 CGM.VoidPtrTy));
5249 }
5250 CGOpenMPRuntime::clear();
5251 }
5252