1 //===------ PPCGCodeGeneration.cpp - Polly Accelerator Code Generation. ---===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Take a scop created by ScopInfo and map it to GPU code using the ppcg
10 // GPU mapping strategy.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "polly/CodeGen/PPCGCodeGeneration.h"
15 #include "polly/CodeGen/CodeGeneration.h"
16 #include "polly/CodeGen/IslAst.h"
17 #include "polly/CodeGen/IslNodeBuilder.h"
18 #include "polly/CodeGen/PerfMonitor.h"
19 #include "polly/CodeGen/Utils.h"
20 #include "polly/DependenceInfo.h"
21 #include "polly/LinkAllPasses.h"
22 #include "polly/Options.h"
23 #include "polly/ScopDetection.h"
24 #include "polly/ScopInfo.h"
25 #include "polly/Support/SCEVValidator.h"
26 #include "llvm/ADT/PostOrderIterator.h"
27 #include "llvm/Analysis/TargetTransformInfo.h"
28 #include "llvm/IR/IntrinsicsNVPTX.h"
29 #include "llvm/IR/LegacyPassManager.h"
30 #include "llvm/IR/Verifier.h"
31 #include "llvm/IRReader/IRReader.h"
32 #include "llvm/InitializePasses.h"
33 #include "llvm/Linker/Linker.h"
34 #include "llvm/Support/SourceMgr.h"
35 #include "llvm/Support/TargetRegistry.h"
36 #include "llvm/Target/TargetMachine.h"
37 #include "llvm/Transforms/IPO/PassManagerBuilder.h"
38 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
39 #include "isl/union_map.h"
40 #include <algorithm>
41
42 extern "C" {
43 #include "ppcg/cuda.h"
44 #include "ppcg/gpu.h"
45 #include "ppcg/ppcg.h"
46 }
47
48 #include "llvm/Support/Debug.h"
49
50 using namespace polly;
51 using namespace llvm;
52
53 #define DEBUG_TYPE "polly-codegen-ppcg"
54
55 static cl::opt<bool> DumpSchedule("polly-acc-dump-schedule",
56 cl::desc("Dump the computed GPU Schedule"),
57 cl::Hidden, cl::init(false), cl::ZeroOrMore,
58 cl::cat(PollyCategory));
59
60 static cl::opt<bool>
61 DumpCode("polly-acc-dump-code",
62 cl::desc("Dump C code describing the GPU mapping"), cl::Hidden,
63 cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory));
64
65 static cl::opt<bool> DumpKernelIR("polly-acc-dump-kernel-ir",
66 cl::desc("Dump the kernel LLVM-IR"),
67 cl::Hidden, cl::init(false), cl::ZeroOrMore,
68 cl::cat(PollyCategory));
69
70 static cl::opt<bool> DumpKernelASM("polly-acc-dump-kernel-asm",
71 cl::desc("Dump the kernel assembly code"),
72 cl::Hidden, cl::init(false), cl::ZeroOrMore,
73 cl::cat(PollyCategory));
74
75 static cl::opt<bool> FastMath("polly-acc-fastmath",
76 cl::desc("Allow unsafe math optimizations"),
77 cl::Hidden, cl::init(false), cl::ZeroOrMore,
78 cl::cat(PollyCategory));
79 static cl::opt<bool> SharedMemory("polly-acc-use-shared",
80 cl::desc("Use shared memory"), cl::Hidden,
81 cl::init(false), cl::ZeroOrMore,
82 cl::cat(PollyCategory));
83 static cl::opt<bool> PrivateMemory("polly-acc-use-private",
84 cl::desc("Use private memory"), cl::Hidden,
85 cl::init(false), cl::ZeroOrMore,
86 cl::cat(PollyCategory));
87
88 bool polly::PollyManagedMemory;
89 static cl::opt<bool, true>
90 XManagedMemory("polly-acc-codegen-managed-memory",
91 cl::desc("Generate Host kernel code assuming"
92 " that all memory has been"
93 " declared as managed memory"),
94 cl::location(PollyManagedMemory), cl::Hidden,
95 cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory));
96
97 static cl::opt<bool>
98 FailOnVerifyModuleFailure("polly-acc-fail-on-verify-module-failure",
99 cl::desc("Fail and generate a backtrace if"
100 " verifyModule fails on the GPU "
101 " kernel module."),
102 cl::Hidden, cl::init(false), cl::ZeroOrMore,
103 cl::cat(PollyCategory));
104
105 static cl::opt<std::string> CUDALibDevice(
106 "polly-acc-libdevice", cl::desc("Path to CUDA libdevice"), cl::Hidden,
107 cl::init("/usr/local/cuda/nvvm/libdevice/libdevice.compute_20.10.ll"),
108 cl::ZeroOrMore, cl::cat(PollyCategory));
109
110 static cl::opt<std::string>
111 CudaVersion("polly-acc-cuda-version",
112 cl::desc("The CUDA version to compile for"), cl::Hidden,
113 cl::init("sm_30"), cl::ZeroOrMore, cl::cat(PollyCategory));
114
115 static cl::opt<int>
116 MinCompute("polly-acc-mincompute",
117 cl::desc("Minimal number of compute statements to run on GPU."),
118 cl::Hidden, cl::init(10 * 512 * 512));
119
120 GPURuntime polly::GPURuntimeChoice;
121 static cl::opt<GPURuntime, true> XGPURuntimeChoice(
122 "polly-gpu-runtime", cl::desc("The GPU Runtime API to target"),
123 cl::values(clEnumValN(GPURuntime::CUDA, "libcudart",
124 "use the CUDA Runtime API"),
125 clEnumValN(GPURuntime::OpenCL, "libopencl",
126 "use the OpenCL Runtime API")),
127 cl::location(polly::GPURuntimeChoice), cl::init(GPURuntime::CUDA),
128 cl::ZeroOrMore, cl::cat(PollyCategory));
129
130 GPUArch polly::GPUArchChoice;
131 static cl::opt<GPUArch, true>
132 XGPUArchChoice("polly-gpu-arch", cl::desc("The GPU Architecture to target"),
133 cl::values(clEnumValN(GPUArch::NVPTX64, "nvptx64",
134 "target NVIDIA 64-bit architecture"),
135 clEnumValN(GPUArch::SPIR32, "spir32",
136 "target SPIR 32-bit architecture"),
137 clEnumValN(GPUArch::SPIR64, "spir64",
138 "target SPIR 64-bit architecture")),
139 cl::location(polly::GPUArchChoice),
140 cl::init(GPUArch::NVPTX64), cl::ZeroOrMore,
141 cl::cat(PollyCategory));
142
143 extern bool polly::PerfMonitoring;
144
145 /// Return a unique name for a Scop, which is the scop region with the
146 /// function name.
getUniqueScopName(const Scop * S)147 std::string getUniqueScopName(const Scop *S) {
148 return "Scop Region: " + S->getNameStr() +
149 " | Function: " + std::string(S->getFunction().getName());
150 }
151
152 /// Used to store information PPCG wants for kills. This information is
153 /// used by live range reordering.
154 ///
155 /// @see computeLiveRangeReordering
156 /// @see GPUNodeBuilder::createPPCGScop
157 /// @see GPUNodeBuilder::createPPCGProg
158 struct MustKillsInfo {
159 /// Collection of all kill statements that will be sequenced at the end of
160 /// PPCGScop->schedule.
161 ///
162 /// The nodes in `KillsSchedule` will be merged using `isl_schedule_set`
163 /// which merges schedules in *arbitrary* order.
164 /// (we don't care about the order of the kills anyway).
165 isl::schedule KillsSchedule;
166 /// Map from kill statement instances to scalars that need to be
167 /// killed.
168 ///
169 /// We currently derive kill information for:
170 /// 1. phi nodes. PHI nodes are not alive outside the scop and can
171 /// consequently all be killed.
172 /// 2. Scalar arrays that are not used outside the Scop. This is
173 /// checked by `isScalarUsesContainedInScop`.
174 /// [params] -> { [Stmt_phantom[] -> ref_phantom[]] -> scalar_to_kill[] }
175 isl::union_map TaggedMustKills;
176
177 /// Tagged must kills stripped of the tags.
178 /// [params] -> { Stmt_phantom[] -> scalar_to_kill[] }
179 isl::union_map MustKills;
180
MustKillsInfoMustKillsInfo181 MustKillsInfo() : KillsSchedule() {}
182 };
183
184 /// Check if SAI's uses are entirely contained within Scop S.
185 /// If a scalar is used only with a Scop, we are free to kill it, as no data
186 /// can flow in/out of the value any more.
187 /// @see computeMustKillsInfo
isScalarUsesContainedInScop(const Scop & S,const ScopArrayInfo * SAI)188 static bool isScalarUsesContainedInScop(const Scop &S,
189 const ScopArrayInfo *SAI) {
190 assert(SAI->isValueKind() && "this function only deals with scalars."
191 " Dealing with arrays required alias analysis");
192
193 const Region &R = S.getRegion();
194 for (User *U : SAI->getBasePtr()->users()) {
195 Instruction *I = dyn_cast<Instruction>(U);
196 assert(I && "invalid user of scop array info");
197 if (!R.contains(I))
198 return false;
199 }
200 return true;
201 }
202
203 /// Compute must-kills needed to enable live range reordering with PPCG.
204 ///
205 /// @params S The Scop to compute live range reordering information
206 /// @returns live range reordering information that can be used to setup
207 /// PPCG.
computeMustKillsInfo(const Scop & S)208 static MustKillsInfo computeMustKillsInfo(const Scop &S) {
209 const isl::space ParamSpace = S.getParamSpace();
210 MustKillsInfo Info;
211
212 // 1. Collect all ScopArrayInfo that satisfy *any* of the criteria:
213 // 1.1 phi nodes in scop.
214 // 1.2 scalars that are only used within the scop
215 SmallVector<isl::id, 4> KillMemIds;
216 for (ScopArrayInfo *SAI : S.arrays()) {
217 if (SAI->isPHIKind() ||
218 (SAI->isValueKind() && isScalarUsesContainedInScop(S, SAI)))
219 KillMemIds.push_back(isl::manage(SAI->getBasePtrId().release()));
220 }
221
222 Info.TaggedMustKills = isl::union_map::empty(ParamSpace.ctx());
223 Info.MustKills = isl::union_map::empty(ParamSpace.ctx());
224
225 // Initialising KillsSchedule to `isl_set_empty` creates an empty node in the
226 // schedule:
227 // - filter: "[control] -> { }"
228 // So, we choose to not create this to keep the output a little nicer,
229 // at the cost of some code complexity.
230 Info.KillsSchedule = {};
231
232 for (isl::id &ToKillId : KillMemIds) {
233 isl::id KillStmtId = isl::id::alloc(
234 S.getIslCtx(),
235 std::string("SKill_phantom_").append(ToKillId.get_name()), nullptr);
236
237 // NOTE: construction of tagged_must_kill:
238 // 2. We need to construct a map:
239 // [param] -> { [Stmt_phantom[] -> ref_phantom[]] -> scalar_to_kill[] }
240 // To construct this, we use `isl_map_domain_product` on 2 maps`:
241 // 2a. StmtToScalar:
242 // [param] -> { Stmt_phantom[] -> scalar_to_kill[] }
243 // 2b. PhantomRefToScalar:
244 // [param] -> { ref_phantom[] -> scalar_to_kill[] }
245 //
246 // Combining these with `isl_map_domain_product` gives us
247 // TaggedMustKill:
248 // [param] -> { [Stmt[] -> phantom_ref[]] -> scalar_to_kill[] }
249
250 // 2a. [param] -> { Stmt[] -> scalar_to_kill[] }
251 isl::map StmtToScalar = isl::map::universe(ParamSpace);
252 StmtToScalar = StmtToScalar.set_tuple_id(isl::dim::in, isl::id(KillStmtId));
253 StmtToScalar = StmtToScalar.set_tuple_id(isl::dim::out, isl::id(ToKillId));
254
255 isl::id PhantomRefId = isl::id::alloc(
256 S.getIslCtx(), std::string("ref_phantom") + ToKillId.get_name(),
257 nullptr);
258
259 // 2b. [param] -> { phantom_ref[] -> scalar_to_kill[] }
260 isl::map PhantomRefToScalar = isl::map::universe(ParamSpace);
261 PhantomRefToScalar =
262 PhantomRefToScalar.set_tuple_id(isl::dim::in, PhantomRefId);
263 PhantomRefToScalar =
264 PhantomRefToScalar.set_tuple_id(isl::dim::out, ToKillId);
265
266 // 2. [param] -> { [Stmt[] -> phantom_ref[]] -> scalar_to_kill[] }
267 isl::map TaggedMustKill = StmtToScalar.domain_product(PhantomRefToScalar);
268 Info.TaggedMustKills = Info.TaggedMustKills.unite(TaggedMustKill);
269
270 // 2. [param] -> { Stmt[] -> scalar_to_kill[] }
271 Info.MustKills = Info.TaggedMustKills.domain_factor_domain();
272
273 // 3. Create the kill schedule of the form:
274 // "[param] -> { Stmt_phantom[] }"
275 // Then add this to Info.KillsSchedule.
276 isl::space KillStmtSpace = ParamSpace;
277 KillStmtSpace = KillStmtSpace.set_tuple_id(isl::dim::set, KillStmtId);
278 isl::union_set KillStmtDomain = isl::set::universe(KillStmtSpace);
279
280 isl::schedule KillSchedule = isl::schedule::from_domain(KillStmtDomain);
281 if (!Info.KillsSchedule.is_null())
282 Info.KillsSchedule = isl::manage(
283 isl_schedule_set(Info.KillsSchedule.release(), KillSchedule.copy()));
284 else
285 Info.KillsSchedule = KillSchedule;
286 }
287
288 return Info;
289 }
290
291 /// Create the ast expressions for a ScopStmt.
292 ///
293 /// This function is a callback for to generate the ast expressions for each
294 /// of the scheduled ScopStmts.
pollyBuildAstExprForStmt(void * StmtT,__isl_take isl_ast_build * Build_C,isl_multi_pw_aff * (* FunctionIndex)(__isl_take isl_multi_pw_aff * MPA,isl_id * Id,void * User),void * UserIndex,isl_ast_expr * (* FunctionExpr)(isl_ast_expr * Expr,isl_id * Id,void * User),void * UserExpr)295 static __isl_give isl_id_to_ast_expr *pollyBuildAstExprForStmt(
296 void *StmtT, __isl_take isl_ast_build *Build_C,
297 isl_multi_pw_aff *(*FunctionIndex)(__isl_take isl_multi_pw_aff *MPA,
298 isl_id *Id, void *User),
299 void *UserIndex,
300 isl_ast_expr *(*FunctionExpr)(isl_ast_expr *Expr, isl_id *Id, void *User),
301 void *UserExpr) {
302
303 ScopStmt *Stmt = (ScopStmt *)StmtT;
304
305 if (!Stmt || !Build_C)
306 return NULL;
307
308 isl::ast_build Build = isl::manage_copy(Build_C);
309 isl::ctx Ctx = Build.ctx();
310 isl::id_to_ast_expr RefToExpr = isl::id_to_ast_expr::alloc(Ctx, 0);
311
312 Stmt->setAstBuild(Build);
313
314 for (MemoryAccess *Acc : *Stmt) {
315 isl::map AddrFunc = Acc->getAddressFunction();
316 AddrFunc = AddrFunc.intersect_domain(Stmt->getDomain());
317
318 isl::id RefId = Acc->getId();
319 isl::pw_multi_aff PMA = isl::pw_multi_aff::from_map(AddrFunc);
320
321 isl::multi_pw_aff MPA = isl::multi_pw_aff(PMA);
322 MPA = MPA.coalesce();
323 MPA = isl::manage(FunctionIndex(MPA.release(), RefId.get(), UserIndex));
324
325 isl::ast_expr Access = Build.access_from(MPA);
326 Access = isl::manage(FunctionExpr(Access.release(), RefId.get(), UserExpr));
327 RefToExpr = RefToExpr.set(RefId, Access);
328 }
329
330 return RefToExpr.release();
331 }
332
333 /// Given a LLVM Type, compute its size in bytes,
computeSizeInBytes(const Type * T)334 static int computeSizeInBytes(const Type *T) {
335 int bytes = T->getPrimitiveSizeInBits() / 8;
336 if (bytes == 0)
337 bytes = T->getScalarSizeInBits() / 8;
338 return bytes;
339 }
340
341 /// Generate code for a GPU specific isl AST.
342 ///
343 /// The GPUNodeBuilder augments the general existing IslNodeBuilder, which
344 /// generates code for general-purpose AST nodes, with special functionality
345 /// for generating GPU specific user nodes.
346 ///
347 /// @see GPUNodeBuilder::createUser
348 class GPUNodeBuilder : public IslNodeBuilder {
349 public:
GPUNodeBuilder(PollyIRBuilder & Builder,ScopAnnotator & Annotator,const DataLayout & DL,LoopInfo & LI,ScalarEvolution & SE,DominatorTree & DT,Scop & S,BasicBlock * StartBlock,gpu_prog * Prog,GPURuntime Runtime,GPUArch Arch)350 GPUNodeBuilder(PollyIRBuilder &Builder, ScopAnnotator &Annotator,
351 const DataLayout &DL, LoopInfo &LI, ScalarEvolution &SE,
352 DominatorTree &DT, Scop &S, BasicBlock *StartBlock,
353 gpu_prog *Prog, GPURuntime Runtime, GPUArch Arch)
354 : IslNodeBuilder(Builder, Annotator, DL, LI, SE, DT, S, StartBlock),
355 Prog(Prog), Runtime(Runtime), Arch(Arch) {
356 getExprBuilder().setIDToSAI(&IDToSAI);
357 }
358
359 /// Create after-run-time-check initialization code.
360 void initializeAfterRTH();
361
362 /// Finalize the generated scop.
363 void finalize() override;
364
365 /// Track if the full build process was successful.
366 ///
367 /// This value is set to false, if throughout the build process an error
368 /// occurred which prevents us from generating valid GPU code.
369 bool BuildSuccessful = true;
370
371 /// The maximal number of loops surrounding a sequential kernel.
372 unsigned DeepestSequential = 0;
373
374 /// The maximal number of loops surrounding a parallel kernel.
375 unsigned DeepestParallel = 0;
376
377 /// Return the name to set for the ptx_kernel.
378 std::string getKernelFuncName(int Kernel_id);
379
380 private:
381 /// A vector of array base pointers for which a new ScopArrayInfo was created.
382 ///
383 /// This vector is used to delete the ScopArrayInfo when it is not needed any
384 /// more.
385 std::vector<Value *> LocalArrays;
386
387 /// A map from ScopArrays to their corresponding device allocations.
388 std::map<ScopArrayInfo *, Value *> DeviceAllocations;
389
390 /// The current GPU context.
391 Value *GPUContext;
392
393 /// The set of isl_ids allocated in the kernel
394 std::vector<isl_id *> KernelIds;
395
396 /// A module containing GPU code.
397 ///
398 /// This pointer is only set in case we are currently generating GPU code.
399 std::unique_ptr<Module> GPUModule;
400
401 /// The GPU program we generate code for.
402 gpu_prog *Prog;
403
404 /// The GPU Runtime implementation to use (OpenCL or CUDA).
405 GPURuntime Runtime;
406
407 /// The GPU Architecture to target.
408 GPUArch Arch;
409
410 /// Class to free isl_ids.
411 class IslIdDeleter {
412 public:
operator ()(__isl_take isl_id * Id)413 void operator()(__isl_take isl_id *Id) { isl_id_free(Id); };
414 };
415
416 /// A set containing all isl_ids allocated in a GPU kernel.
417 ///
418 /// By releasing this set all isl_ids will be freed.
419 std::set<std::unique_ptr<isl_id, IslIdDeleter>> KernelIDs;
420
421 IslExprBuilder::IDToScopArrayInfoTy IDToSAI;
422
423 /// Create code for user-defined AST nodes.
424 ///
425 /// These AST nodes can be of type:
426 ///
427 /// - ScopStmt: A computational statement (TODO)
428 /// - Kernel: A GPU kernel call (TODO)
429 /// - Data-Transfer: A GPU <-> CPU data-transfer
430 /// - In-kernel synchronization
431 /// - In-kernel memory copy statement
432 ///
433 /// @param UserStmt The ast node to generate code for.
434 void createUser(__isl_take isl_ast_node *UserStmt) override;
435
436 void createFor(__isl_take isl_ast_node *Node) override;
437
438 enum DataDirection { HOST_TO_DEVICE, DEVICE_TO_HOST };
439
440 /// Create code for a data transfer statement
441 ///
442 /// @param TransferStmt The data transfer statement.
443 /// @param Direction The direction in which to transfer data.
444 void createDataTransfer(__isl_take isl_ast_node *TransferStmt,
445 enum DataDirection Direction);
446
447 /// Find llvm::Values referenced in GPU kernel.
448 ///
449 /// @param Kernel The kernel to scan for llvm::Values
450 ///
451 /// @returns A tuple, whose:
452 /// - First element contains the set of values referenced by the
453 /// kernel
454 /// - Second element contains the set of functions referenced by the
455 /// kernel. All functions in the set satisfy
456 /// `isValidFunctionInKernel`.
457 /// - Third element contains loops that have induction variables
458 /// which are used in the kernel, *and* these loops are *neither*
459 /// in the scop, nor do they immediately surroung the Scop.
460 /// See [Code generation of induction variables of loops outside
461 /// Scops]
462 std::tuple<SetVector<Value *>, SetVector<Function *>, SetVector<const Loop *>,
463 isl::space>
464 getReferencesInKernel(ppcg_kernel *Kernel);
465
466 /// Compute the sizes of the execution grid for a given kernel.
467 ///
468 /// @param Kernel The kernel to compute grid sizes for.
469 ///
470 /// @returns A tuple with grid sizes for X and Y dimension
471 std::tuple<Value *, Value *> getGridSizes(ppcg_kernel *Kernel);
472
473 /// Get the managed array pointer for sending host pointers to the device.
474 /// \note
475 /// This is to be used only with managed memory
476 Value *getManagedDeviceArray(gpu_array_info *Array, ScopArrayInfo *ArrayInfo);
477
478 /// Compute the sizes of the thread blocks for a given kernel.
479 ///
480 /// @param Kernel The kernel to compute thread block sizes for.
481 ///
482 /// @returns A tuple with thread block sizes for X, Y, and Z dimensions.
483 std::tuple<Value *, Value *, Value *> getBlockSizes(ppcg_kernel *Kernel);
484
485 /// Store a specific kernel launch parameter in the array of kernel launch
486 /// parameters.
487 ///
488 /// @param Parameters The list of parameters in which to store.
489 /// @param Param The kernel launch parameter to store.
490 /// @param Index The index in the parameter list, at which to store the
491 /// parameter.
492 void insertStoreParameter(Instruction *Parameters, Instruction *Param,
493 int Index);
494
495 /// Create kernel launch parameters.
496 ///
497 /// @param Kernel The kernel to create parameters for.
498 /// @param F The kernel function that has been created.
499 /// @param SubtreeValues The set of llvm::Values referenced by this kernel.
500 ///
501 /// @returns A stack allocated array with pointers to the parameter
502 /// values that are passed to the kernel.
503 Value *createLaunchParameters(ppcg_kernel *Kernel, Function *F,
504 SetVector<Value *> SubtreeValues);
505
506 /// Create declarations for kernel variable.
507 ///
508 /// This includes shared memory declarations.
509 ///
510 /// @param Kernel The kernel definition to create variables for.
511 /// @param FN The function into which to generate the variables.
512 void createKernelVariables(ppcg_kernel *Kernel, Function *FN);
513
514 /// Add CUDA annotations to module.
515 ///
516 /// Add a set of CUDA annotations that declares the maximal block dimensions
517 /// that will be used to execute the CUDA kernel. This allows the NVIDIA
518 /// PTX compiler to bound the number of allocated registers to ensure the
519 /// resulting kernel is known to run with up to as many block dimensions
520 /// as specified here.
521 ///
522 /// @param M The module to add the annotations to.
523 /// @param BlockDimX The size of block dimension X.
524 /// @param BlockDimY The size of block dimension Y.
525 /// @param BlockDimZ The size of block dimension Z.
526 void addCUDAAnnotations(Module *M, Value *BlockDimX, Value *BlockDimY,
527 Value *BlockDimZ);
528
529 /// Create GPU kernel.
530 ///
531 /// Code generate the kernel described by @p KernelStmt.
532 ///
533 /// @param KernelStmt The ast node to generate kernel code for.
534 void createKernel(__isl_take isl_ast_node *KernelStmt);
535
536 /// Generate code that computes the size of an array.
537 ///
538 /// @param Array The array for which to compute a size.
539 Value *getArraySize(gpu_array_info *Array);
540
541 /// Generate code to compute the minimal offset at which an array is accessed.
542 ///
543 /// The offset of an array is the minimal array location accessed in a scop.
544 ///
545 /// Example:
546 ///
547 /// for (long i = 0; i < 100; i++)
548 /// A[i + 42] += ...
549 ///
550 /// getArrayOffset(A) results in 42.
551 ///
552 /// @param Array The array for which to compute the offset.
553 /// @returns An llvm::Value that contains the offset of the array.
554 Value *getArrayOffset(gpu_array_info *Array);
555
556 /// Prepare the kernel arguments for kernel code generation
557 ///
558 /// @param Kernel The kernel to generate code for.
559 /// @param FN The function created for the kernel.
560 void prepareKernelArguments(ppcg_kernel *Kernel, Function *FN);
561
562 /// Create kernel function.
563 ///
564 /// Create a kernel function located in a newly created module that can serve
565 /// as target for device code generation. Set the Builder to point to the
566 /// start block of this newly created function.
567 ///
568 /// @param Kernel The kernel to generate code for.
569 /// @param SubtreeValues The set of llvm::Values referenced by this kernel.
570 /// @param SubtreeFunctions The set of llvm::Functions referenced by this
571 /// kernel.
572 void createKernelFunction(ppcg_kernel *Kernel,
573 SetVector<Value *> &SubtreeValues,
574 SetVector<Function *> &SubtreeFunctions);
575
576 /// Create the declaration of a kernel function.
577 ///
578 /// The kernel function takes as arguments:
579 ///
580 /// - One i8 pointer for each external array reference used in the kernel.
581 /// - Host iterators
582 /// - Parameters
583 /// - Other LLVM Value references (TODO)
584 ///
585 /// @param Kernel The kernel to generate the function declaration for.
586 /// @param SubtreeValues The set of llvm::Values referenced by this kernel.
587 ///
588 /// @returns The newly declared function.
589 Function *createKernelFunctionDecl(ppcg_kernel *Kernel,
590 SetVector<Value *> &SubtreeValues);
591
592 /// Insert intrinsic functions to obtain thread and block ids.
593 ///
594 /// @param The kernel to generate the intrinsic functions for.
595 void insertKernelIntrinsics(ppcg_kernel *Kernel);
596
597 /// Insert function calls to retrieve the SPIR group/local ids.
598 ///
599 /// @param Kernel The kernel to generate the function calls for.
600 /// @param SizeTypeIs64Bit Whether size_t of the openCl device is 64bit.
601 void insertKernelCallsSPIR(ppcg_kernel *Kernel, bool SizeTypeIs64bit);
602
603 /// Setup the creation of functions referenced by the GPU kernel.
604 ///
605 /// 1. Create new function declarations in GPUModule which are the same as
606 /// SubtreeFunctions.
607 ///
608 /// 2. Populate IslNodeBuilder::ValueMap with mappings from
609 /// old functions (that come from the original module) to new functions
610 /// (that are created within GPUModule). That way, we generate references
611 /// to the correct function (in GPUModule) in BlockGenerator.
612 ///
613 /// @see IslNodeBuilder::ValueMap
614 /// @see BlockGenerator::GlobalMap
615 /// @see BlockGenerator::getNewValue
616 /// @see GPUNodeBuilder::getReferencesInKernel.
617 ///
618 /// @param SubtreeFunctions The set of llvm::Functions referenced by
619 /// this kernel.
620 void setupKernelSubtreeFunctions(SetVector<Function *> SubtreeFunctions);
621
622 /// Create a global-to-shared or shared-to-global copy statement.
623 ///
624 /// @param CopyStmt The copy statement to generate code for
625 void createKernelCopy(ppcg_kernel_stmt *CopyStmt);
626
627 /// Create code for a ScopStmt called in @p Expr.
628 ///
629 /// @param Expr The expression containing the call.
630 /// @param KernelStmt The kernel statement referenced in the call.
631 void createScopStmt(isl_ast_expr *Expr, ppcg_kernel_stmt *KernelStmt);
632
633 /// Create an in-kernel synchronization call.
634 void createKernelSync();
635
636 /// Create a PTX assembly string for the current GPU kernel.
637 ///
638 /// @returns A string containing the corresponding PTX assembly code.
639 std::string createKernelASM();
640
641 /// Remove references from the dominator tree to the kernel function @p F.
642 ///
643 /// @param F The function to remove references to.
644 void clearDominators(Function *F);
645
646 /// Remove references from scalar evolution to the kernel function @p F.
647 ///
648 /// @param F The function to remove references to.
649 void clearScalarEvolution(Function *F);
650
651 /// Remove references from loop info to the kernel function @p F.
652 ///
653 /// @param F The function to remove references to.
654 void clearLoops(Function *F);
655
656 /// Check if the scop requires to be linked with CUDA's libdevice.
657 bool requiresCUDALibDevice();
658
659 /// Link with the NVIDIA libdevice library (if needed and available).
660 void addCUDALibDevice();
661
662 /// Finalize the generation of the kernel function.
663 ///
664 /// Free the LLVM-IR module corresponding to the kernel and -- if requested --
665 /// dump its IR to stderr.
666 ///
667 /// @returns The Assembly string of the kernel.
668 std::string finalizeKernelFunction();
669
670 /// Finalize the generation of the kernel arguments.
671 ///
672 /// This function ensures that not-read-only scalars used in a kernel are
673 /// stored back to the global memory location they are backed with before
674 /// the kernel terminates.
675 ///
676 /// @params Kernel The kernel to finalize kernel arguments for.
677 void finalizeKernelArguments(ppcg_kernel *Kernel);
678
679 /// Create code that allocates memory to store arrays on device.
680 void allocateDeviceArrays();
681
682 /// Create code to prepare the managed device pointers.
683 void prepareManagedDeviceArrays();
684
685 /// Free all allocated device arrays.
686 void freeDeviceArrays();
687
688 /// Create a call to initialize the GPU context.
689 ///
690 /// @returns A pointer to the newly initialized context.
691 Value *createCallInitContext();
692
693 /// Create a call to get the device pointer for a kernel allocation.
694 ///
695 /// @param Allocation The Polly GPU allocation
696 ///
697 /// @returns The device parameter corresponding to this allocation.
698 Value *createCallGetDevicePtr(Value *Allocation);
699
700 /// Create a call to free the GPU context.
701 ///
702 /// @param Context A pointer to an initialized GPU context.
703 void createCallFreeContext(Value *Context);
704
705 /// Create a call to allocate memory on the device.
706 ///
707 /// @param Size The size of memory to allocate
708 ///
709 /// @returns A pointer that identifies this allocation.
710 Value *createCallAllocateMemoryForDevice(Value *Size);
711
712 /// Create a call to free a device array.
713 ///
714 /// @param Array The device array to free.
715 void createCallFreeDeviceMemory(Value *Array);
716
717 /// Create a call to copy data from host to device.
718 ///
719 /// @param HostPtr A pointer to the host data that should be copied.
720 /// @param DevicePtr A device pointer specifying the location to copy to.
721 void createCallCopyFromHostToDevice(Value *HostPtr, Value *DevicePtr,
722 Value *Size);
723
724 /// Create a call to copy data from device to host.
725 ///
726 /// @param DevicePtr A pointer to the device data that should be copied.
727 /// @param HostPtr A host pointer specifying the location to copy to.
728 void createCallCopyFromDeviceToHost(Value *DevicePtr, Value *HostPtr,
729 Value *Size);
730
731 /// Create a call to synchronize Host & Device.
732 /// \note
733 /// This is to be used only with managed memory.
734 void createCallSynchronizeDevice();
735
736 /// Create a call to get a kernel from an assembly string.
737 ///
738 /// @param Buffer The string describing the kernel.
739 /// @param Entry The name of the kernel function to call.
740 ///
741 /// @returns A pointer to a kernel object
742 Value *createCallGetKernel(Value *Buffer, Value *Entry);
743
744 /// Create a call to free a GPU kernel.
745 ///
746 /// @param GPUKernel THe kernel to free.
747 void createCallFreeKernel(Value *GPUKernel);
748
749 /// Create a call to launch a GPU kernel.
750 ///
751 /// @param GPUKernel The kernel to launch.
752 /// @param GridDimX The size of the first grid dimension.
753 /// @param GridDimY The size of the second grid dimension.
754 /// @param GridBlockX The size of the first block dimension.
755 /// @param GridBlockY The size of the second block dimension.
756 /// @param GridBlockZ The size of the third block dimension.
757 /// @param Parameters A pointer to an array that contains itself pointers to
758 /// the parameter values passed for each kernel argument.
759 void createCallLaunchKernel(Value *GPUKernel, Value *GridDimX,
760 Value *GridDimY, Value *BlockDimX,
761 Value *BlockDimY, Value *BlockDimZ,
762 Value *Parameters);
763 };
764
getKernelFuncName(int Kernel_id)765 std::string GPUNodeBuilder::getKernelFuncName(int Kernel_id) {
766 return "FUNC_" + S.getFunction().getName().str() + "_SCOP_" +
767 std::to_string(S.getID()) + "_KERNEL_" + std::to_string(Kernel_id);
768 }
769
initializeAfterRTH()770 void GPUNodeBuilder::initializeAfterRTH() {
771 BasicBlock *NewBB = SplitBlock(Builder.GetInsertBlock(),
772 &*Builder.GetInsertPoint(), &DT, &LI);
773 NewBB->setName("polly.acc.initialize");
774 Builder.SetInsertPoint(&NewBB->front());
775
776 GPUContext = createCallInitContext();
777
778 if (!PollyManagedMemory)
779 allocateDeviceArrays();
780 else
781 prepareManagedDeviceArrays();
782 }
783
finalize()784 void GPUNodeBuilder::finalize() {
785 if (!PollyManagedMemory)
786 freeDeviceArrays();
787
788 createCallFreeContext(GPUContext);
789 IslNodeBuilder::finalize();
790 }
791
allocateDeviceArrays()792 void GPUNodeBuilder::allocateDeviceArrays() {
793 assert(!PollyManagedMemory &&
794 "Managed memory will directly send host pointers "
795 "to the kernel. There is no need for device arrays");
796 isl_ast_build *Build = isl_ast_build_from_context(S.getContext().release());
797
798 for (int i = 0; i < Prog->n_array; ++i) {
799 gpu_array_info *Array = &Prog->array[i];
800 auto *ScopArray = (ScopArrayInfo *)Array->user;
801 std::string DevArrayName("p_dev_array_");
802 DevArrayName.append(Array->name);
803
804 Value *ArraySize = getArraySize(Array);
805 Value *Offset = getArrayOffset(Array);
806 if (Offset)
807 ArraySize = Builder.CreateSub(
808 ArraySize,
809 Builder.CreateMul(Offset,
810 Builder.getInt64(ScopArray->getElemSizeInBytes())));
811 const SCEV *SizeSCEV = SE.getSCEV(ArraySize);
812 // It makes no sense to have an array of size 0. The CUDA API will
813 // throw an error anyway if we invoke `cuMallocManaged` with size `0`. We
814 // choose to be defensive and catch this at the compile phase. It is
815 // most likely that we are doing something wrong with size computation.
816 if (SizeSCEV->isZero()) {
817 errs() << getUniqueScopName(&S)
818 << " has computed array size 0: " << *ArraySize
819 << " | for array: " << *(ScopArray->getBasePtr())
820 << ". This is illegal, exiting.\n";
821 report_fatal_error("array size was computed to be 0");
822 }
823
824 Value *DevArray = createCallAllocateMemoryForDevice(ArraySize);
825 DevArray->setName(DevArrayName);
826 DeviceAllocations[ScopArray] = DevArray;
827 }
828
829 isl_ast_build_free(Build);
830 }
831
prepareManagedDeviceArrays()832 void GPUNodeBuilder::prepareManagedDeviceArrays() {
833 assert(PollyManagedMemory &&
834 "Device array most only be prepared in managed-memory mode");
835 for (int i = 0; i < Prog->n_array; ++i) {
836 gpu_array_info *Array = &Prog->array[i];
837 ScopArrayInfo *ScopArray = (ScopArrayInfo *)Array->user;
838 Value *HostPtr;
839
840 if (gpu_array_is_scalar(Array))
841 HostPtr = BlockGen.getOrCreateAlloca(ScopArray);
842 else
843 HostPtr = ScopArray->getBasePtr();
844 HostPtr = getLatestValue(HostPtr);
845
846 Value *Offset = getArrayOffset(Array);
847 if (Offset) {
848 HostPtr = Builder.CreatePointerCast(
849 HostPtr, ScopArray->getElementType()->getPointerTo());
850 HostPtr = Builder.CreateGEP(ScopArray->getElementType(), HostPtr, Offset);
851 }
852
853 HostPtr = Builder.CreatePointerCast(HostPtr, Builder.getInt8PtrTy());
854 DeviceAllocations[ScopArray] = HostPtr;
855 }
856 }
857
addCUDAAnnotations(Module * M,Value * BlockDimX,Value * BlockDimY,Value * BlockDimZ)858 void GPUNodeBuilder::addCUDAAnnotations(Module *M, Value *BlockDimX,
859 Value *BlockDimY, Value *BlockDimZ) {
860 auto AnnotationNode = M->getOrInsertNamedMetadata("nvvm.annotations");
861
862 for (auto &F : *M) {
863 if (F.getCallingConv() != CallingConv::PTX_Kernel)
864 continue;
865
866 Value *V[] = {BlockDimX, BlockDimY, BlockDimZ};
867
868 Metadata *Elements[] = {
869 ValueAsMetadata::get(&F), MDString::get(M->getContext(), "maxntidx"),
870 ValueAsMetadata::get(V[0]), MDString::get(M->getContext(), "maxntidy"),
871 ValueAsMetadata::get(V[1]), MDString::get(M->getContext(), "maxntidz"),
872 ValueAsMetadata::get(V[2]),
873 };
874 MDNode *Node = MDNode::get(M->getContext(), Elements);
875 AnnotationNode->addOperand(Node);
876 }
877 }
878
freeDeviceArrays()879 void GPUNodeBuilder::freeDeviceArrays() {
880 assert(!PollyManagedMemory && "Managed memory does not use device arrays");
881 for (auto &Array : DeviceAllocations)
882 createCallFreeDeviceMemory(Array.second);
883 }
884
createCallGetKernel(Value * Buffer,Value * Entry)885 Value *GPUNodeBuilder::createCallGetKernel(Value *Buffer, Value *Entry) {
886 const char *Name = "polly_getKernel";
887 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
888 Function *F = M->getFunction(Name);
889
890 // If F is not available, declare it.
891 if (!F) {
892 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
893 std::vector<Type *> Args;
894 Args.push_back(Builder.getInt8PtrTy());
895 Args.push_back(Builder.getInt8PtrTy());
896 FunctionType *Ty = FunctionType::get(Builder.getInt8PtrTy(), Args, false);
897 F = Function::Create(Ty, Linkage, Name, M);
898 }
899
900 return Builder.CreateCall(F, {Buffer, Entry});
901 }
902
createCallGetDevicePtr(Value * Allocation)903 Value *GPUNodeBuilder::createCallGetDevicePtr(Value *Allocation) {
904 const char *Name = "polly_getDevicePtr";
905 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
906 Function *F = M->getFunction(Name);
907
908 // If F is not available, declare it.
909 if (!F) {
910 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
911 std::vector<Type *> Args;
912 Args.push_back(Builder.getInt8PtrTy());
913 FunctionType *Ty = FunctionType::get(Builder.getInt8PtrTy(), Args, false);
914 F = Function::Create(Ty, Linkage, Name, M);
915 }
916
917 return Builder.CreateCall(F, {Allocation});
918 }
919
createCallLaunchKernel(Value * GPUKernel,Value * GridDimX,Value * GridDimY,Value * BlockDimX,Value * BlockDimY,Value * BlockDimZ,Value * Parameters)920 void GPUNodeBuilder::createCallLaunchKernel(Value *GPUKernel, Value *GridDimX,
921 Value *GridDimY, Value *BlockDimX,
922 Value *BlockDimY, Value *BlockDimZ,
923 Value *Parameters) {
924 const char *Name = "polly_launchKernel";
925 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
926 Function *F = M->getFunction(Name);
927
928 // If F is not available, declare it.
929 if (!F) {
930 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
931 std::vector<Type *> Args;
932 Args.push_back(Builder.getInt8PtrTy());
933 Args.push_back(Builder.getInt32Ty());
934 Args.push_back(Builder.getInt32Ty());
935 Args.push_back(Builder.getInt32Ty());
936 Args.push_back(Builder.getInt32Ty());
937 Args.push_back(Builder.getInt32Ty());
938 Args.push_back(Builder.getInt8PtrTy());
939 FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false);
940 F = Function::Create(Ty, Linkage, Name, M);
941 }
942
943 Builder.CreateCall(F, {GPUKernel, GridDimX, GridDimY, BlockDimX, BlockDimY,
944 BlockDimZ, Parameters});
945 }
946
createCallFreeKernel(Value * GPUKernel)947 void GPUNodeBuilder::createCallFreeKernel(Value *GPUKernel) {
948 const char *Name = "polly_freeKernel";
949 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
950 Function *F = M->getFunction(Name);
951
952 // If F is not available, declare it.
953 if (!F) {
954 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
955 std::vector<Type *> Args;
956 Args.push_back(Builder.getInt8PtrTy());
957 FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false);
958 F = Function::Create(Ty, Linkage, Name, M);
959 }
960
961 Builder.CreateCall(F, {GPUKernel});
962 }
963
createCallFreeDeviceMemory(Value * Array)964 void GPUNodeBuilder::createCallFreeDeviceMemory(Value *Array) {
965 assert(!PollyManagedMemory &&
966 "Managed memory does not allocate or free memory "
967 "for device");
968 const char *Name = "polly_freeDeviceMemory";
969 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
970 Function *F = M->getFunction(Name);
971
972 // If F is not available, declare it.
973 if (!F) {
974 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
975 std::vector<Type *> Args;
976 Args.push_back(Builder.getInt8PtrTy());
977 FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false);
978 F = Function::Create(Ty, Linkage, Name, M);
979 }
980
981 Builder.CreateCall(F, {Array});
982 }
983
createCallAllocateMemoryForDevice(Value * Size)984 Value *GPUNodeBuilder::createCallAllocateMemoryForDevice(Value *Size) {
985 assert(!PollyManagedMemory &&
986 "Managed memory does not allocate or free memory "
987 "for device");
988 const char *Name = "polly_allocateMemoryForDevice";
989 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
990 Function *F = M->getFunction(Name);
991
992 // If F is not available, declare it.
993 if (!F) {
994 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
995 std::vector<Type *> Args;
996 Args.push_back(Builder.getInt64Ty());
997 FunctionType *Ty = FunctionType::get(Builder.getInt8PtrTy(), Args, false);
998 F = Function::Create(Ty, Linkage, Name, M);
999 }
1000
1001 return Builder.CreateCall(F, {Size});
1002 }
1003
createCallCopyFromHostToDevice(Value * HostData,Value * DeviceData,Value * Size)1004 void GPUNodeBuilder::createCallCopyFromHostToDevice(Value *HostData,
1005 Value *DeviceData,
1006 Value *Size) {
1007 assert(!PollyManagedMemory &&
1008 "Managed memory does not transfer memory between "
1009 "device and host");
1010 const char *Name = "polly_copyFromHostToDevice";
1011 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
1012 Function *F = M->getFunction(Name);
1013
1014 // If F is not available, declare it.
1015 if (!F) {
1016 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
1017 std::vector<Type *> Args;
1018 Args.push_back(Builder.getInt8PtrTy());
1019 Args.push_back(Builder.getInt8PtrTy());
1020 Args.push_back(Builder.getInt64Ty());
1021 FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false);
1022 F = Function::Create(Ty, Linkage, Name, M);
1023 }
1024
1025 Builder.CreateCall(F, {HostData, DeviceData, Size});
1026 }
1027
createCallCopyFromDeviceToHost(Value * DeviceData,Value * HostData,Value * Size)1028 void GPUNodeBuilder::createCallCopyFromDeviceToHost(Value *DeviceData,
1029 Value *HostData,
1030 Value *Size) {
1031 assert(!PollyManagedMemory &&
1032 "Managed memory does not transfer memory between "
1033 "device and host");
1034 const char *Name = "polly_copyFromDeviceToHost";
1035 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
1036 Function *F = M->getFunction(Name);
1037
1038 // If F is not available, declare it.
1039 if (!F) {
1040 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
1041 std::vector<Type *> Args;
1042 Args.push_back(Builder.getInt8PtrTy());
1043 Args.push_back(Builder.getInt8PtrTy());
1044 Args.push_back(Builder.getInt64Ty());
1045 FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false);
1046 F = Function::Create(Ty, Linkage, Name, M);
1047 }
1048
1049 Builder.CreateCall(F, {DeviceData, HostData, Size});
1050 }
1051
createCallSynchronizeDevice()1052 void GPUNodeBuilder::createCallSynchronizeDevice() {
1053 assert(PollyManagedMemory && "explicit synchronization is only necessary for "
1054 "managed memory");
1055 const char *Name = "polly_synchronizeDevice";
1056 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
1057 Function *F = M->getFunction(Name);
1058
1059 // If F is not available, declare it.
1060 if (!F) {
1061 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
1062 FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), false);
1063 F = Function::Create(Ty, Linkage, Name, M);
1064 }
1065
1066 Builder.CreateCall(F);
1067 }
1068
createCallInitContext()1069 Value *GPUNodeBuilder::createCallInitContext() {
1070 const char *Name;
1071
1072 switch (Runtime) {
1073 case GPURuntime::CUDA:
1074 Name = "polly_initContextCUDA";
1075 break;
1076 case GPURuntime::OpenCL:
1077 Name = "polly_initContextCL";
1078 break;
1079 }
1080
1081 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
1082 Function *F = M->getFunction(Name);
1083
1084 // If F is not available, declare it.
1085 if (!F) {
1086 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
1087 std::vector<Type *> Args;
1088 FunctionType *Ty = FunctionType::get(Builder.getInt8PtrTy(), Args, false);
1089 F = Function::Create(Ty, Linkage, Name, M);
1090 }
1091
1092 return Builder.CreateCall(F, {});
1093 }
1094
createCallFreeContext(Value * Context)1095 void GPUNodeBuilder::createCallFreeContext(Value *Context) {
1096 const char *Name = "polly_freeContext";
1097 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
1098 Function *F = M->getFunction(Name);
1099
1100 // If F is not available, declare it.
1101 if (!F) {
1102 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
1103 std::vector<Type *> Args;
1104 Args.push_back(Builder.getInt8PtrTy());
1105 FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false);
1106 F = Function::Create(Ty, Linkage, Name, M);
1107 }
1108
1109 Builder.CreateCall(F, {Context});
1110 }
1111
1112 /// Check if one string is a prefix of another.
1113 ///
1114 /// @param String The string in which to look for the prefix.
1115 /// @param Prefix The prefix to look for.
isPrefix(std::string String,std::string Prefix)1116 static bool isPrefix(std::string String, std::string Prefix) {
1117 return String.find(Prefix) == 0;
1118 }
1119
getArraySize(gpu_array_info * Array)1120 Value *GPUNodeBuilder::getArraySize(gpu_array_info *Array) {
1121 isl::ast_build Build = isl::ast_build::from_context(S.getContext());
1122 Value *ArraySize = ConstantInt::get(Builder.getInt64Ty(), Array->size);
1123
1124 if (!gpu_array_is_scalar(Array)) {
1125 isl::multi_pw_aff ArrayBound = isl::manage_copy(Array->bound);
1126
1127 isl::pw_aff OffsetDimZero = ArrayBound.get_pw_aff(0);
1128 isl::ast_expr Res = Build.expr_from(OffsetDimZero);
1129
1130 for (unsigned int i = 1; i < Array->n_index; i++) {
1131 isl::pw_aff Bound_I = ArrayBound.get_pw_aff(i);
1132 isl::ast_expr Expr = Build.expr_from(Bound_I);
1133 Res = Res.mul(Expr);
1134 }
1135
1136 Value *NumElements = ExprBuilder.create(Res.release());
1137 if (NumElements->getType() != ArraySize->getType())
1138 NumElements = Builder.CreateSExt(NumElements, ArraySize->getType());
1139 ArraySize = Builder.CreateMul(ArraySize, NumElements);
1140 }
1141 return ArraySize;
1142 }
1143
getArrayOffset(gpu_array_info * Array)1144 Value *GPUNodeBuilder::getArrayOffset(gpu_array_info *Array) {
1145 if (gpu_array_is_scalar(Array))
1146 return nullptr;
1147
1148 isl::ast_build Build = isl::ast_build::from_context(S.getContext());
1149
1150 isl::set Min = isl::manage_copy(Array->extent).lexmin();
1151
1152 isl::set ZeroSet = isl::set::universe(Min.get_space());
1153
1154 for (long i = 0, n = Min.tuple_dim(); i < n; i++)
1155 ZeroSet = ZeroSet.fix_si(isl::dim::set, i, 0);
1156
1157 if (Min.is_subset(ZeroSet)) {
1158 return nullptr;
1159 }
1160
1161 isl::ast_expr Result = isl::ast_expr::from_val(isl::val(Min.ctx(), 0));
1162
1163 for (long i = 0, n = Min.tuple_dim(); i < n; i++) {
1164 if (i > 0) {
1165 isl::pw_aff Bound_I =
1166 isl::manage(isl_multi_pw_aff_get_pw_aff(Array->bound, i - 1));
1167 isl::ast_expr BExpr = Build.expr_from(Bound_I);
1168 Result = Result.mul(BExpr);
1169 }
1170 isl::pw_aff DimMin = Min.dim_min(i);
1171 isl::ast_expr MExpr = Build.expr_from(DimMin);
1172 Result = Result.add(MExpr);
1173 }
1174
1175 return ExprBuilder.create(Result.release());
1176 }
1177
getManagedDeviceArray(gpu_array_info * Array,ScopArrayInfo * ArrayInfo)1178 Value *GPUNodeBuilder::getManagedDeviceArray(gpu_array_info *Array,
1179 ScopArrayInfo *ArrayInfo) {
1180 assert(PollyManagedMemory && "Only used when you wish to get a host "
1181 "pointer for sending data to the kernel, "
1182 "with managed memory");
1183 std::map<ScopArrayInfo *, Value *>::iterator it;
1184 it = DeviceAllocations.find(ArrayInfo);
1185 assert(it != DeviceAllocations.end() &&
1186 "Device array expected to be available");
1187 return it->second;
1188 }
1189
createDataTransfer(__isl_take isl_ast_node * TransferStmt,enum DataDirection Direction)1190 void GPUNodeBuilder::createDataTransfer(__isl_take isl_ast_node *TransferStmt,
1191 enum DataDirection Direction) {
1192 assert(!PollyManagedMemory && "Managed memory needs no data transfers");
1193 isl_ast_expr *Expr = isl_ast_node_user_get_expr(TransferStmt);
1194 isl_ast_expr *Arg = isl_ast_expr_get_op_arg(Expr, 0);
1195 isl_id *Id = isl_ast_expr_get_id(Arg);
1196 auto Array = (gpu_array_info *)isl_id_get_user(Id);
1197 auto ScopArray = (ScopArrayInfo *)(Array->user);
1198
1199 Value *Size = getArraySize(Array);
1200 Value *Offset = getArrayOffset(Array);
1201 Value *DevPtr = DeviceAllocations[ScopArray];
1202
1203 Value *HostPtr;
1204
1205 if (gpu_array_is_scalar(Array))
1206 HostPtr = BlockGen.getOrCreateAlloca(ScopArray);
1207 else
1208 HostPtr = ScopArray->getBasePtr();
1209 HostPtr = getLatestValue(HostPtr);
1210
1211 if (Offset) {
1212 HostPtr = Builder.CreatePointerCast(
1213 HostPtr, ScopArray->getElementType()->getPointerTo());
1214 HostPtr = Builder.CreateGEP(ScopArray->getElementType(), HostPtr, Offset);
1215 }
1216
1217 HostPtr = Builder.CreatePointerCast(HostPtr, Builder.getInt8PtrTy());
1218
1219 if (Offset) {
1220 Size = Builder.CreateSub(
1221 Size, Builder.CreateMul(
1222 Offset, Builder.getInt64(ScopArray->getElemSizeInBytes())));
1223 }
1224
1225 if (Direction == HOST_TO_DEVICE)
1226 createCallCopyFromHostToDevice(HostPtr, DevPtr, Size);
1227 else
1228 createCallCopyFromDeviceToHost(DevPtr, HostPtr, Size);
1229
1230 isl_id_free(Id);
1231 isl_ast_expr_free(Arg);
1232 isl_ast_expr_free(Expr);
1233 isl_ast_node_free(TransferStmt);
1234 }
1235
createUser(__isl_take isl_ast_node * UserStmt)1236 void GPUNodeBuilder::createUser(__isl_take isl_ast_node *UserStmt) {
1237 isl_ast_expr *Expr = isl_ast_node_user_get_expr(UserStmt);
1238 isl_ast_expr *StmtExpr = isl_ast_expr_get_op_arg(Expr, 0);
1239 isl_id *Id = isl_ast_expr_get_id(StmtExpr);
1240 isl_id_free(Id);
1241 isl_ast_expr_free(StmtExpr);
1242
1243 const char *Str = isl_id_get_name(Id);
1244 if (!strcmp(Str, "kernel")) {
1245 createKernel(UserStmt);
1246 if (PollyManagedMemory)
1247 createCallSynchronizeDevice();
1248 isl_ast_expr_free(Expr);
1249 return;
1250 }
1251 if (!strcmp(Str, "init_device")) {
1252 initializeAfterRTH();
1253 isl_ast_node_free(UserStmt);
1254 isl_ast_expr_free(Expr);
1255 return;
1256 }
1257 if (!strcmp(Str, "clear_device")) {
1258 finalize();
1259 isl_ast_node_free(UserStmt);
1260 isl_ast_expr_free(Expr);
1261 return;
1262 }
1263 if (isPrefix(Str, "to_device")) {
1264 if (!PollyManagedMemory)
1265 createDataTransfer(UserStmt, HOST_TO_DEVICE);
1266 else
1267 isl_ast_node_free(UserStmt);
1268
1269 isl_ast_expr_free(Expr);
1270 return;
1271 }
1272
1273 if (isPrefix(Str, "from_device")) {
1274 if (!PollyManagedMemory) {
1275 createDataTransfer(UserStmt, DEVICE_TO_HOST);
1276 } else {
1277 isl_ast_node_free(UserStmt);
1278 }
1279 isl_ast_expr_free(Expr);
1280 return;
1281 }
1282
1283 isl_id *Anno = isl_ast_node_get_annotation(UserStmt);
1284 struct ppcg_kernel_stmt *KernelStmt =
1285 (struct ppcg_kernel_stmt *)isl_id_get_user(Anno);
1286 isl_id_free(Anno);
1287
1288 switch (KernelStmt->type) {
1289 case ppcg_kernel_domain:
1290 createScopStmt(Expr, KernelStmt);
1291 isl_ast_node_free(UserStmt);
1292 return;
1293 case ppcg_kernel_copy:
1294 createKernelCopy(KernelStmt);
1295 isl_ast_expr_free(Expr);
1296 isl_ast_node_free(UserStmt);
1297 return;
1298 case ppcg_kernel_sync:
1299 createKernelSync();
1300 isl_ast_expr_free(Expr);
1301 isl_ast_node_free(UserStmt);
1302 return;
1303 }
1304
1305 isl_ast_expr_free(Expr);
1306 isl_ast_node_free(UserStmt);
1307 }
1308
createFor(__isl_take isl_ast_node * Node)1309 void GPUNodeBuilder::createFor(__isl_take isl_ast_node *Node) {
1310 createForSequential(isl::manage(Node), false);
1311 }
1312
createKernelCopy(ppcg_kernel_stmt * KernelStmt)1313 void GPUNodeBuilder::createKernelCopy(ppcg_kernel_stmt *KernelStmt) {
1314 isl_ast_expr *LocalIndex = isl_ast_expr_copy(KernelStmt->u.c.local_index);
1315 LocalIndex = isl_ast_expr_address_of(LocalIndex);
1316 Value *LocalAddr = ExprBuilder.create(LocalIndex);
1317 isl_ast_expr *Index = isl_ast_expr_copy(KernelStmt->u.c.index);
1318 Index = isl_ast_expr_address_of(Index);
1319 Value *GlobalAddr = ExprBuilder.create(Index);
1320 Type *IndexTy = cast<PointerType>(GlobalAddr->getType())->getElementType();
1321
1322 if (KernelStmt->u.c.read) {
1323 LoadInst *Load = Builder.CreateLoad(IndexTy, GlobalAddr, "shared.read");
1324 Builder.CreateStore(Load, LocalAddr);
1325 } else {
1326 LoadInst *Load = Builder.CreateLoad(IndexTy, LocalAddr, "shared.write");
1327 Builder.CreateStore(Load, GlobalAddr);
1328 }
1329 }
1330
createScopStmt(isl_ast_expr * Expr,ppcg_kernel_stmt * KernelStmt)1331 void GPUNodeBuilder::createScopStmt(isl_ast_expr *Expr,
1332 ppcg_kernel_stmt *KernelStmt) {
1333 auto Stmt = (ScopStmt *)KernelStmt->u.d.stmt->stmt;
1334 isl_id_to_ast_expr *Indexes = KernelStmt->u.d.ref2expr;
1335
1336 LoopToScevMapT LTS;
1337 LTS.insert(OutsideLoopIterations.begin(), OutsideLoopIterations.end());
1338
1339 createSubstitutions(Expr, Stmt, LTS);
1340
1341 if (Stmt->isBlockStmt())
1342 BlockGen.copyStmt(*Stmt, LTS, Indexes);
1343 else
1344 RegionGen.copyStmt(*Stmt, LTS, Indexes);
1345 }
1346
createKernelSync()1347 void GPUNodeBuilder::createKernelSync() {
1348 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
1349 const char *SpirName = "__gen_ocl_barrier_global";
1350
1351 Function *Sync;
1352
1353 switch (Arch) {
1354 case GPUArch::SPIR64:
1355 case GPUArch::SPIR32:
1356 Sync = M->getFunction(SpirName);
1357
1358 // If Sync is not available, declare it.
1359 if (!Sync) {
1360 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
1361 std::vector<Type *> Args;
1362 FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false);
1363 Sync = Function::Create(Ty, Linkage, SpirName, M);
1364 Sync->setCallingConv(CallingConv::SPIR_FUNC);
1365 }
1366 break;
1367 case GPUArch::NVPTX64:
1368 Sync = Intrinsic::getDeclaration(M, Intrinsic::nvvm_barrier0);
1369 break;
1370 }
1371
1372 Builder.CreateCall(Sync, {});
1373 }
1374
1375 /// Collect llvm::Values referenced from @p Node
1376 ///
1377 /// This function only applies to isl_ast_nodes that are user_nodes referring
1378 /// to a ScopStmt. All other node types are ignore.
1379 ///
1380 /// @param Node The node to collect references for.
1381 /// @param User A user pointer used as storage for the data that is collected.
1382 ///
1383 /// @returns isl_bool_true if data could be collected successfully.
collectReferencesInGPUStmt(__isl_keep isl_ast_node * Node,void * User)1384 isl_bool collectReferencesInGPUStmt(__isl_keep isl_ast_node *Node, void *User) {
1385 if (isl_ast_node_get_type(Node) != isl_ast_node_user)
1386 return isl_bool_true;
1387
1388 isl_ast_expr *Expr = isl_ast_node_user_get_expr(Node);
1389 isl_ast_expr *StmtExpr = isl_ast_expr_get_op_arg(Expr, 0);
1390 isl_id *Id = isl_ast_expr_get_id(StmtExpr);
1391 const char *Str = isl_id_get_name(Id);
1392 isl_id_free(Id);
1393 isl_ast_expr_free(StmtExpr);
1394 isl_ast_expr_free(Expr);
1395
1396 if (!isPrefix(Str, "Stmt"))
1397 return isl_bool_true;
1398
1399 Id = isl_ast_node_get_annotation(Node);
1400 auto *KernelStmt = (ppcg_kernel_stmt *)isl_id_get_user(Id);
1401 auto Stmt = (ScopStmt *)KernelStmt->u.d.stmt->stmt;
1402 isl_id_free(Id);
1403
1404 addReferencesFromStmt(Stmt, User, false /* CreateScalarRefs */);
1405
1406 return isl_bool_true;
1407 }
1408
1409 /// A list of functions that are available in NVIDIA's libdevice.
1410 const std::set<std::string> CUDALibDeviceFunctions = {
1411 "exp", "expf", "expl", "cos", "cosf", "sqrt", "sqrtf",
1412 "copysign", "copysignf", "copysignl", "log", "logf", "powi", "powif"};
1413
1414 // A map from intrinsics to their corresponding libdevice functions.
1415 const std::map<std::string, std::string> IntrinsicToLibdeviceFunc = {
1416 {"llvm.exp.f64", "exp"},
1417 {"llvm.exp.f32", "expf"},
1418 {"llvm.powi.f64.i32", "powi"},
1419 {"llvm.powi.f32.i32", "powif"}};
1420
1421 /// Return the corresponding CUDA libdevice function name @p Name.
1422 /// Note that this function will try to convert instrinsics in the list
1423 /// IntrinsicToLibdeviceFunc into libdevice functions.
1424 /// This is because some intrinsics such as `exp`
1425 /// are not supported by the NVPTX backend.
1426 /// If this restriction of the backend is lifted, we should refactor our code
1427 /// so that we use intrinsics whenever possible.
1428 ///
1429 /// Return "" if we are not compiling for CUDA.
getCUDALibDeviceFuntion(StringRef NameRef)1430 std::string getCUDALibDeviceFuntion(StringRef NameRef) {
1431 std::string Name = NameRef.str();
1432 auto It = IntrinsicToLibdeviceFunc.find(Name);
1433 if (It != IntrinsicToLibdeviceFunc.end())
1434 return getCUDALibDeviceFuntion(It->second);
1435
1436 if (CUDALibDeviceFunctions.count(Name))
1437 return ("__nv_" + Name);
1438
1439 return "";
1440 }
1441
1442 /// Check if F is a function that we can code-generate in a GPU kernel.
isValidFunctionInKernel(llvm::Function * F,bool AllowLibDevice)1443 static bool isValidFunctionInKernel(llvm::Function *F, bool AllowLibDevice) {
1444 assert(F && "F is an invalid pointer");
1445 // We string compare against the name of the function to allow
1446 // all variants of the intrinsic "llvm.sqrt.*", "llvm.fabs", and
1447 // "llvm.copysign".
1448 const StringRef Name = F->getName();
1449
1450 if (AllowLibDevice && getCUDALibDeviceFuntion(Name).length() > 0)
1451 return true;
1452
1453 return F->isIntrinsic() &&
1454 (Name.startswith("llvm.sqrt") || Name.startswith("llvm.fabs") ||
1455 Name.startswith("llvm.copysign"));
1456 }
1457
1458 /// Do not take `Function` as a subtree value.
1459 ///
1460 /// We try to take the reference of all subtree values and pass them along
1461 /// to the kernel from the host. Taking an address of any function and
1462 /// trying to pass along is nonsensical. Only allow `Value`s that are not
1463 /// `Function`s.
isValidSubtreeValue(llvm::Value * V)1464 static bool isValidSubtreeValue(llvm::Value *V) { return !isa<Function>(V); }
1465
1466 /// Return `Function`s from `RawSubtreeValues`.
1467 static SetVector<Function *>
getFunctionsFromRawSubtreeValues(SetVector<Value * > RawSubtreeValues,bool AllowCUDALibDevice)1468 getFunctionsFromRawSubtreeValues(SetVector<Value *> RawSubtreeValues,
1469 bool AllowCUDALibDevice) {
1470 SetVector<Function *> SubtreeFunctions;
1471 for (Value *It : RawSubtreeValues) {
1472 Function *F = dyn_cast<Function>(It);
1473 if (F) {
1474 assert(isValidFunctionInKernel(F, AllowCUDALibDevice) &&
1475 "Code should have bailed out by "
1476 "this point if an invalid function "
1477 "were present in a kernel.");
1478 SubtreeFunctions.insert(F);
1479 }
1480 }
1481 return SubtreeFunctions;
1482 }
1483
1484 std::tuple<SetVector<Value *>, SetVector<Function *>, SetVector<const Loop *>,
1485 isl::space>
getReferencesInKernel(ppcg_kernel * Kernel)1486 GPUNodeBuilder::getReferencesInKernel(ppcg_kernel *Kernel) {
1487 SetVector<Value *> SubtreeValues;
1488 SetVector<const SCEV *> SCEVs;
1489 SetVector<const Loop *> Loops;
1490 isl::space ParamSpace = isl::space(S.getIslCtx(), 0, 0).params();
1491 SubtreeReferences References = {
1492 LI, SE, S, ValueMap, SubtreeValues, SCEVs, getBlockGenerator(),
1493 &ParamSpace};
1494
1495 for (const auto &I : IDToValue)
1496 SubtreeValues.insert(I.second);
1497
1498 // NOTE: this is populated in IslNodeBuilder::addParameters
1499 // See [Code generation of induction variables of loops outside Scops].
1500 for (const auto &I : OutsideLoopIterations)
1501 SubtreeValues.insert(cast<SCEVUnknown>(I.second)->getValue());
1502
1503 isl_ast_node_foreach_descendant_top_down(
1504 Kernel->tree, collectReferencesInGPUStmt, &References);
1505
1506 for (const SCEV *Expr : SCEVs) {
1507 findValues(Expr, SE, SubtreeValues);
1508 findLoops(Expr, Loops);
1509 }
1510
1511 Loops.remove_if([this](const Loop *L) {
1512 return S.contains(L) || L->contains(S.getEntry());
1513 });
1514
1515 for (auto &SAI : S.arrays())
1516 SubtreeValues.remove(SAI->getBasePtr());
1517
1518 isl_space *Space = S.getParamSpace().release();
1519 for (long i = 0, n = isl_space_dim(Space, isl_dim_param); i < n; i++) {
1520 isl_id *Id = isl_space_get_dim_id(Space, isl_dim_param, i);
1521 assert(IDToValue.count(Id));
1522 Value *Val = IDToValue[Id];
1523 SubtreeValues.remove(Val);
1524 isl_id_free(Id);
1525 }
1526 isl_space_free(Space);
1527
1528 for (long i = 0, n = isl_space_dim(Kernel->space, isl_dim_set); i < n; i++) {
1529 isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_set, i);
1530 assert(IDToValue.count(Id));
1531 Value *Val = IDToValue[Id];
1532 SubtreeValues.remove(Val);
1533 isl_id_free(Id);
1534 }
1535
1536 // Note: { ValidSubtreeValues, ValidSubtreeFunctions } partitions
1537 // SubtreeValues. This is important, because we should not lose any
1538 // SubtreeValues in the process of constructing the
1539 // "ValidSubtree{Values, Functions} sets. Nor should the set
1540 // ValidSubtree{Values, Functions} have any common element.
1541 auto ValidSubtreeValuesIt =
1542 make_filter_range(SubtreeValues, isValidSubtreeValue);
1543 SetVector<Value *> ValidSubtreeValues(ValidSubtreeValuesIt.begin(),
1544 ValidSubtreeValuesIt.end());
1545
1546 bool AllowCUDALibDevice = Arch == GPUArch::NVPTX64;
1547
1548 SetVector<Function *> ValidSubtreeFunctions(
1549 getFunctionsFromRawSubtreeValues(SubtreeValues, AllowCUDALibDevice));
1550
1551 // @see IslNodeBuilder::getReferencesInSubtree
1552 SetVector<Value *> ReplacedValues;
1553 for (Value *V : ValidSubtreeValues) {
1554 auto It = ValueMap.find(V);
1555 if (It == ValueMap.end())
1556 ReplacedValues.insert(V);
1557 else
1558 ReplacedValues.insert(It->second);
1559 }
1560 return std::make_tuple(ReplacedValues, ValidSubtreeFunctions, Loops,
1561 ParamSpace);
1562 }
1563
clearDominators(Function * F)1564 void GPUNodeBuilder::clearDominators(Function *F) {
1565 DomTreeNode *N = DT.getNode(&F->getEntryBlock());
1566 std::vector<BasicBlock *> Nodes;
1567 for (po_iterator<DomTreeNode *> I = po_begin(N), E = po_end(N); I != E; ++I)
1568 Nodes.push_back(I->getBlock());
1569
1570 for (BasicBlock *BB : Nodes)
1571 DT.eraseNode(BB);
1572 }
1573
clearScalarEvolution(Function * F)1574 void GPUNodeBuilder::clearScalarEvolution(Function *F) {
1575 for (BasicBlock &BB : *F) {
1576 Loop *L = LI.getLoopFor(&BB);
1577 if (L)
1578 SE.forgetLoop(L);
1579 }
1580 }
1581
clearLoops(Function * F)1582 void GPUNodeBuilder::clearLoops(Function *F) {
1583 SmallSet<Loop *, 1> WorkList;
1584 for (BasicBlock &BB : *F) {
1585 Loop *L = LI.getLoopFor(&BB);
1586 if (L)
1587 WorkList.insert(L);
1588 }
1589 for (auto *L : WorkList)
1590 LI.erase(L);
1591 }
1592
getGridSizes(ppcg_kernel * Kernel)1593 std::tuple<Value *, Value *> GPUNodeBuilder::getGridSizes(ppcg_kernel *Kernel) {
1594 std::vector<Value *> Sizes;
1595 isl::ast_build Context = isl::ast_build::from_context(S.getContext());
1596
1597 isl::multi_pw_aff GridSizePwAffs = isl::manage_copy(Kernel->grid_size);
1598 for (long i = 0; i < Kernel->n_grid; i++) {
1599 isl::pw_aff Size = GridSizePwAffs.get_pw_aff(i);
1600 isl::ast_expr GridSize = Context.expr_from(Size);
1601 Value *Res = ExprBuilder.create(GridSize.release());
1602 Res = Builder.CreateTrunc(Res, Builder.getInt32Ty());
1603 Sizes.push_back(Res);
1604 }
1605
1606 for (long i = Kernel->n_grid; i < 3; i++)
1607 Sizes.push_back(ConstantInt::get(Builder.getInt32Ty(), 1));
1608
1609 return std::make_tuple(Sizes[0], Sizes[1]);
1610 }
1611
1612 std::tuple<Value *, Value *, Value *>
getBlockSizes(ppcg_kernel * Kernel)1613 GPUNodeBuilder::getBlockSizes(ppcg_kernel *Kernel) {
1614 std::vector<Value *> Sizes;
1615
1616 for (long i = 0; i < Kernel->n_block; i++) {
1617 Value *Res = ConstantInt::get(Builder.getInt32Ty(), Kernel->block_dim[i]);
1618 Sizes.push_back(Res);
1619 }
1620
1621 for (long i = Kernel->n_block; i < 3; i++)
1622 Sizes.push_back(ConstantInt::get(Builder.getInt32Ty(), 1));
1623
1624 return std::make_tuple(Sizes[0], Sizes[1], Sizes[2]);
1625 }
1626
insertStoreParameter(Instruction * Parameters,Instruction * Param,int Index)1627 void GPUNodeBuilder::insertStoreParameter(Instruction *Parameters,
1628 Instruction *Param, int Index) {
1629 Value *Slot = Builder.CreateGEP(
1630 Parameters->getType()->getPointerElementType(), Parameters,
1631 {Builder.getInt64(0), Builder.getInt64(Index)});
1632 Value *ParamTyped = Builder.CreatePointerCast(Param, Builder.getInt8PtrTy());
1633 Builder.CreateStore(ParamTyped, Slot);
1634 }
1635
1636 Value *
createLaunchParameters(ppcg_kernel * Kernel,Function * F,SetVector<Value * > SubtreeValues)1637 GPUNodeBuilder::createLaunchParameters(ppcg_kernel *Kernel, Function *F,
1638 SetVector<Value *> SubtreeValues) {
1639 const int NumArgs = F->arg_size();
1640 std::vector<int> ArgSizes(NumArgs);
1641
1642 // If we are using the OpenCL Runtime, we need to add the kernel argument
1643 // sizes to the end of the launch-parameter list, so OpenCL can determine
1644 // how big the respective kernel arguments are.
1645 // Here we need to reserve adequate space for that.
1646 Type *ArrayTy;
1647 if (Runtime == GPURuntime::OpenCL)
1648 ArrayTy = ArrayType::get(Builder.getInt8PtrTy(), 2 * NumArgs);
1649 else
1650 ArrayTy = ArrayType::get(Builder.getInt8PtrTy(), NumArgs);
1651
1652 BasicBlock *EntryBlock =
1653 &Builder.GetInsertBlock()->getParent()->getEntryBlock();
1654 auto AddressSpace = F->getParent()->getDataLayout().getAllocaAddrSpace();
1655 std::string Launch = "polly_launch_" + std::to_string(Kernel->id);
1656 Instruction *Parameters = new AllocaInst(
1657 ArrayTy, AddressSpace, Launch + "_params", EntryBlock->getTerminator());
1658
1659 int Index = 0;
1660 for (long i = 0; i < Prog->n_array; i++) {
1661 if (!ppcg_kernel_requires_array_argument(Kernel, i))
1662 continue;
1663
1664 isl_id *Id = isl_space_get_tuple_id(Prog->array[i].space, isl_dim_set);
1665 const ScopArrayInfo *SAI = ScopArrayInfo::getFromId(isl::manage(Id));
1666
1667 if (Runtime == GPURuntime::OpenCL)
1668 ArgSizes[Index] = SAI->getElemSizeInBytes();
1669
1670 Value *DevArray = nullptr;
1671 if (PollyManagedMemory) {
1672 DevArray = getManagedDeviceArray(&Prog->array[i],
1673 const_cast<ScopArrayInfo *>(SAI));
1674 } else {
1675 DevArray = DeviceAllocations[const_cast<ScopArrayInfo *>(SAI)];
1676 DevArray = createCallGetDevicePtr(DevArray);
1677 }
1678 assert(DevArray != nullptr && "Array to be offloaded to device not "
1679 "initialized");
1680 Value *Offset = getArrayOffset(&Prog->array[i]);
1681
1682 if (Offset) {
1683 DevArray = Builder.CreatePointerCast(
1684 DevArray, SAI->getElementType()->getPointerTo());
1685 DevArray = Builder.CreateGEP(SAI->getElementType(), DevArray,
1686 Builder.CreateNeg(Offset));
1687 DevArray = Builder.CreatePointerCast(DevArray, Builder.getInt8PtrTy());
1688 }
1689 Value *Slot = Builder.CreateGEP(
1690 ArrayTy, Parameters, {Builder.getInt64(0), Builder.getInt64(Index)});
1691
1692 if (gpu_array_is_read_only_scalar(&Prog->array[i])) {
1693 Value *ValPtr = nullptr;
1694 if (PollyManagedMemory)
1695 ValPtr = DevArray;
1696 else
1697 ValPtr = BlockGen.getOrCreateAlloca(SAI);
1698
1699 assert(ValPtr != nullptr && "ValPtr that should point to a valid object"
1700 " to be stored into Parameters");
1701 Value *ValPtrCast =
1702 Builder.CreatePointerCast(ValPtr, Builder.getInt8PtrTy());
1703 Builder.CreateStore(ValPtrCast, Slot);
1704 } else {
1705 Instruction *Param =
1706 new AllocaInst(Builder.getInt8PtrTy(), AddressSpace,
1707 Launch + "_param_" + std::to_string(Index),
1708 EntryBlock->getTerminator());
1709 Builder.CreateStore(DevArray, Param);
1710 Value *ParamTyped =
1711 Builder.CreatePointerCast(Param, Builder.getInt8PtrTy());
1712 Builder.CreateStore(ParamTyped, Slot);
1713 }
1714 Index++;
1715 }
1716
1717 int NumHostIters = isl_space_dim(Kernel->space, isl_dim_set);
1718
1719 for (long i = 0; i < NumHostIters; i++) {
1720 isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_set, i);
1721 Value *Val = IDToValue[Id];
1722 isl_id_free(Id);
1723
1724 if (Runtime == GPURuntime::OpenCL)
1725 ArgSizes[Index] = computeSizeInBytes(Val->getType());
1726
1727 Instruction *Param =
1728 new AllocaInst(Val->getType(), AddressSpace,
1729 Launch + "_param_" + std::to_string(Index),
1730 EntryBlock->getTerminator());
1731 Builder.CreateStore(Val, Param);
1732 insertStoreParameter(Parameters, Param, Index);
1733 Index++;
1734 }
1735
1736 int NumVars = isl_space_dim(Kernel->space, isl_dim_param);
1737
1738 for (long i = 0; i < NumVars; i++) {
1739 isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_param, i);
1740 Value *Val = IDToValue[Id];
1741 if (ValueMap.count(Val))
1742 Val = ValueMap[Val];
1743 isl_id_free(Id);
1744
1745 if (Runtime == GPURuntime::OpenCL)
1746 ArgSizes[Index] = computeSizeInBytes(Val->getType());
1747
1748 Instruction *Param =
1749 new AllocaInst(Val->getType(), AddressSpace,
1750 Launch + "_param_" + std::to_string(Index),
1751 EntryBlock->getTerminator());
1752 Builder.CreateStore(Val, Param);
1753 insertStoreParameter(Parameters, Param, Index);
1754 Index++;
1755 }
1756
1757 for (auto Val : SubtreeValues) {
1758 if (Runtime == GPURuntime::OpenCL)
1759 ArgSizes[Index] = computeSizeInBytes(Val->getType());
1760
1761 Instruction *Param =
1762 new AllocaInst(Val->getType(), AddressSpace,
1763 Launch + "_param_" + std::to_string(Index),
1764 EntryBlock->getTerminator());
1765 Builder.CreateStore(Val, Param);
1766 insertStoreParameter(Parameters, Param, Index);
1767 Index++;
1768 }
1769
1770 if (Runtime == GPURuntime::OpenCL) {
1771 for (int i = 0; i < NumArgs; i++) {
1772 Value *Val = ConstantInt::get(Builder.getInt32Ty(), ArgSizes[i]);
1773 Instruction *Param =
1774 new AllocaInst(Builder.getInt32Ty(), AddressSpace,
1775 Launch + "_param_size_" + std::to_string(i),
1776 EntryBlock->getTerminator());
1777 Builder.CreateStore(Val, Param);
1778 insertStoreParameter(Parameters, Param, Index);
1779 Index++;
1780 }
1781 }
1782
1783 auto Location = EntryBlock->getTerminator();
1784 return new BitCastInst(Parameters, Builder.getInt8PtrTy(),
1785 Launch + "_params_i8ptr", Location);
1786 }
1787
setupKernelSubtreeFunctions(SetVector<Function * > SubtreeFunctions)1788 void GPUNodeBuilder::setupKernelSubtreeFunctions(
1789 SetVector<Function *> SubtreeFunctions) {
1790 for (auto Fn : SubtreeFunctions) {
1791 const std::string ClonedFnName = Fn->getName().str();
1792 Function *Clone = GPUModule->getFunction(ClonedFnName);
1793 if (!Clone)
1794 Clone =
1795 Function::Create(Fn->getFunctionType(), GlobalValue::ExternalLinkage,
1796 ClonedFnName, GPUModule.get());
1797 assert(Clone && "Expected cloned function to be initialized.");
1798 assert(ValueMap.find(Fn) == ValueMap.end() &&
1799 "Fn already present in ValueMap");
1800 ValueMap[Fn] = Clone;
1801 }
1802 }
createKernel(__isl_take isl_ast_node * KernelStmt)1803 void GPUNodeBuilder::createKernel(__isl_take isl_ast_node *KernelStmt) {
1804 isl_id *Id = isl_ast_node_get_annotation(KernelStmt);
1805 ppcg_kernel *Kernel = (ppcg_kernel *)isl_id_get_user(Id);
1806 isl_id_free(Id);
1807 isl_ast_node_free(KernelStmt);
1808
1809 if (Kernel->n_grid > 1)
1810 DeepestParallel = std::max(
1811 DeepestParallel, (unsigned)isl_space_dim(Kernel->space, isl_dim_set));
1812 else
1813 DeepestSequential = std::max(
1814 DeepestSequential, (unsigned)isl_space_dim(Kernel->space, isl_dim_set));
1815
1816 Value *BlockDimX, *BlockDimY, *BlockDimZ;
1817 std::tie(BlockDimX, BlockDimY, BlockDimZ) = getBlockSizes(Kernel);
1818
1819 SetVector<Value *> SubtreeValues;
1820 SetVector<Function *> SubtreeFunctions;
1821 SetVector<const Loop *> Loops;
1822 isl::space ParamSpace;
1823 std::tie(SubtreeValues, SubtreeFunctions, Loops, ParamSpace) =
1824 getReferencesInKernel(Kernel);
1825
1826 // Add parameters that appear only in the access function to the kernel
1827 // space. This is important to make sure that all isl_ids are passed as
1828 // parameters to the kernel, even though we may not have all parameters
1829 // in the context to improve compile time.
1830 Kernel->space = isl_space_align_params(Kernel->space, ParamSpace.release());
1831
1832 assert(Kernel->tree && "Device AST of kernel node is empty");
1833
1834 Instruction &HostInsertPoint = *Builder.GetInsertPoint();
1835 IslExprBuilder::IDToValueTy HostIDs = IDToValue;
1836 ValueMapT HostValueMap = ValueMap;
1837 BlockGenerator::AllocaMapTy HostScalarMap = ScalarMap;
1838 ScalarMap.clear();
1839 BlockGenerator::EscapeUsersAllocaMapTy HostEscapeMap = EscapeMap;
1840 EscapeMap.clear();
1841
1842 // Create for all loops we depend on values that contain the current loop
1843 // iteration. These values are necessary to generate code for SCEVs that
1844 // depend on such loops. As a result we need to pass them to the subfunction.
1845 for (const Loop *L : Loops) {
1846 const SCEV *OuterLIV = SE.getAddRecExpr(SE.getUnknown(Builder.getInt64(0)),
1847 SE.getUnknown(Builder.getInt64(1)),
1848 L, SCEV::FlagAnyWrap);
1849 Value *V = generateSCEV(OuterLIV);
1850 OutsideLoopIterations[L] = SE.getUnknown(V);
1851 SubtreeValues.insert(V);
1852 }
1853
1854 createKernelFunction(Kernel, SubtreeValues, SubtreeFunctions);
1855 setupKernelSubtreeFunctions(SubtreeFunctions);
1856
1857 create(isl_ast_node_copy(Kernel->tree));
1858
1859 finalizeKernelArguments(Kernel);
1860 Function *F = Builder.GetInsertBlock()->getParent();
1861 if (Arch == GPUArch::NVPTX64)
1862 addCUDAAnnotations(F->getParent(), BlockDimX, BlockDimY, BlockDimZ);
1863 clearDominators(F);
1864 clearScalarEvolution(F);
1865 clearLoops(F);
1866
1867 IDToValue = HostIDs;
1868
1869 ValueMap = std::move(HostValueMap);
1870 ScalarMap = std::move(HostScalarMap);
1871 EscapeMap = std::move(HostEscapeMap);
1872 IDToSAI.clear();
1873 Annotator.resetAlternativeAliasBases();
1874 for (auto &BasePtr : LocalArrays)
1875 S.invalidateScopArrayInfo(BasePtr, MemoryKind::Array);
1876 LocalArrays.clear();
1877
1878 std::string ASMString = finalizeKernelFunction();
1879 Builder.SetInsertPoint(&HostInsertPoint);
1880 Value *Parameters = createLaunchParameters(Kernel, F, SubtreeValues);
1881
1882 std::string Name = getKernelFuncName(Kernel->id);
1883 Value *KernelString = Builder.CreateGlobalStringPtr(ASMString, Name);
1884 Value *NameString = Builder.CreateGlobalStringPtr(Name, Name + "_name");
1885 Value *GPUKernel = createCallGetKernel(KernelString, NameString);
1886
1887 Value *GridDimX, *GridDimY;
1888 std::tie(GridDimX, GridDimY) = getGridSizes(Kernel);
1889
1890 createCallLaunchKernel(GPUKernel, GridDimX, GridDimY, BlockDimX, BlockDimY,
1891 BlockDimZ, Parameters);
1892 createCallFreeKernel(GPUKernel);
1893
1894 for (auto Id : KernelIds)
1895 isl_id_free(Id);
1896
1897 KernelIds.clear();
1898 }
1899
1900 /// Compute the DataLayout string for the NVPTX backend.
1901 ///
1902 /// @param is64Bit Are we looking for a 64 bit architecture?
computeNVPTXDataLayout(bool is64Bit)1903 static std::string computeNVPTXDataLayout(bool is64Bit) {
1904 std::string Ret = "";
1905
1906 if (!is64Bit) {
1907 Ret += "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:"
1908 "64-i128:128:128-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:"
1909 "64-v128:128:128-n16:32:64";
1910 } else {
1911 Ret += "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:"
1912 "64-i128:128:128-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:"
1913 "64-v128:128:128-n16:32:64";
1914 }
1915
1916 return Ret;
1917 }
1918
1919 /// Compute the DataLayout string for a SPIR kernel.
1920 ///
1921 /// @param is64Bit Are we looking for a 64 bit architecture?
computeSPIRDataLayout(bool is64Bit)1922 static std::string computeSPIRDataLayout(bool is64Bit) {
1923 std::string Ret = "";
1924
1925 if (!is64Bit) {
1926 Ret += "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:"
1927 "64-i128:128:128-f32:32:32-f64:64:64-v16:16:16-v24:32:32-v32:32:"
1928 "32-v48:64:64-v64:64:64-v96:128:128-v128:128:128-v192:"
1929 "256:256-v256:256:256-v512:512:512-v1024:1024:1024";
1930 } else {
1931 Ret += "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:"
1932 "64-i128:128:128-f32:32:32-f64:64:64-v16:16:16-v24:32:32-v32:32:"
1933 "32-v48:64:64-v64:64:64-v96:128:128-v128:128:128-v192:"
1934 "256:256-v256:256:256-v512:512:512-v1024:1024:1024";
1935 }
1936
1937 return Ret;
1938 }
1939
1940 Function *
createKernelFunctionDecl(ppcg_kernel * Kernel,SetVector<Value * > & SubtreeValues)1941 GPUNodeBuilder::createKernelFunctionDecl(ppcg_kernel *Kernel,
1942 SetVector<Value *> &SubtreeValues) {
1943 std::vector<Type *> Args;
1944 std::string Identifier = getKernelFuncName(Kernel->id);
1945
1946 std::vector<Metadata *> MemoryType;
1947
1948 for (long i = 0; i < Prog->n_array; i++) {
1949 if (!ppcg_kernel_requires_array_argument(Kernel, i))
1950 continue;
1951
1952 if (gpu_array_is_read_only_scalar(&Prog->array[i])) {
1953 isl_id *Id = isl_space_get_tuple_id(Prog->array[i].space, isl_dim_set);
1954 const ScopArrayInfo *SAI = ScopArrayInfo::getFromId(isl::manage(Id));
1955 Args.push_back(SAI->getElementType());
1956 MemoryType.push_back(
1957 ConstantAsMetadata::get(ConstantInt::get(Builder.getInt32Ty(), 0)));
1958 } else {
1959 static const int UseGlobalMemory = 1;
1960 Args.push_back(Builder.getInt8PtrTy(UseGlobalMemory));
1961 MemoryType.push_back(
1962 ConstantAsMetadata::get(ConstantInt::get(Builder.getInt32Ty(), 1)));
1963 }
1964 }
1965
1966 int NumHostIters = isl_space_dim(Kernel->space, isl_dim_set);
1967
1968 for (long i = 0; i < NumHostIters; i++) {
1969 Args.push_back(Builder.getInt64Ty());
1970 MemoryType.push_back(
1971 ConstantAsMetadata::get(ConstantInt::get(Builder.getInt32Ty(), 0)));
1972 }
1973
1974 int NumVars = isl_space_dim(Kernel->space, isl_dim_param);
1975
1976 for (long i = 0; i < NumVars; i++) {
1977 isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_param, i);
1978 Value *Val = IDToValue[Id];
1979 isl_id_free(Id);
1980 Args.push_back(Val->getType());
1981 MemoryType.push_back(
1982 ConstantAsMetadata::get(ConstantInt::get(Builder.getInt32Ty(), 0)));
1983 }
1984
1985 for (auto *V : SubtreeValues) {
1986 Args.push_back(V->getType());
1987 MemoryType.push_back(
1988 ConstantAsMetadata::get(ConstantInt::get(Builder.getInt32Ty(), 0)));
1989 }
1990
1991 auto *FT = FunctionType::get(Builder.getVoidTy(), Args, false);
1992 auto *FN = Function::Create(FT, Function::ExternalLinkage, Identifier,
1993 GPUModule.get());
1994
1995 std::vector<Metadata *> EmptyStrings;
1996
1997 for (unsigned int i = 0; i < MemoryType.size(); i++) {
1998 EmptyStrings.push_back(MDString::get(FN->getContext(), ""));
1999 }
2000
2001 if (Arch == GPUArch::SPIR32 || Arch == GPUArch::SPIR64) {
2002 FN->setMetadata("kernel_arg_addr_space",
2003 MDNode::get(FN->getContext(), MemoryType));
2004 FN->setMetadata("kernel_arg_name",
2005 MDNode::get(FN->getContext(), EmptyStrings));
2006 FN->setMetadata("kernel_arg_access_qual",
2007 MDNode::get(FN->getContext(), EmptyStrings));
2008 FN->setMetadata("kernel_arg_type",
2009 MDNode::get(FN->getContext(), EmptyStrings));
2010 FN->setMetadata("kernel_arg_type_qual",
2011 MDNode::get(FN->getContext(), EmptyStrings));
2012 FN->setMetadata("kernel_arg_base_type",
2013 MDNode::get(FN->getContext(), EmptyStrings));
2014 }
2015
2016 switch (Arch) {
2017 case GPUArch::NVPTX64:
2018 FN->setCallingConv(CallingConv::PTX_Kernel);
2019 break;
2020 case GPUArch::SPIR32:
2021 case GPUArch::SPIR64:
2022 FN->setCallingConv(CallingConv::SPIR_KERNEL);
2023 break;
2024 }
2025
2026 auto Arg = FN->arg_begin();
2027 for (long i = 0; i < Kernel->n_array; i++) {
2028 if (!ppcg_kernel_requires_array_argument(Kernel, i))
2029 continue;
2030
2031 Arg->setName(Kernel->array[i].array->name);
2032
2033 isl_id *Id = isl_space_get_tuple_id(Prog->array[i].space, isl_dim_set);
2034 const ScopArrayInfo *SAI = ScopArrayInfo::getFromId(isl::manage_copy(Id));
2035 Type *EleTy = SAI->getElementType();
2036 Value *Val = &*Arg;
2037 SmallVector<const SCEV *, 4> Sizes;
2038 isl_ast_build *Build =
2039 isl_ast_build_from_context(isl_set_copy(Prog->context));
2040 Sizes.push_back(nullptr);
2041 for (long j = 1, n = Kernel->array[i].array->n_index; j < n; j++) {
2042 isl_ast_expr *DimSize = isl_ast_build_expr_from_pw_aff(
2043 Build, isl_multi_pw_aff_get_pw_aff(Kernel->array[i].array->bound, j));
2044 auto V = ExprBuilder.create(DimSize);
2045 Sizes.push_back(SE.getSCEV(V));
2046 }
2047 const ScopArrayInfo *SAIRep =
2048 S.getOrCreateScopArrayInfo(Val, EleTy, Sizes, MemoryKind::Array);
2049 LocalArrays.push_back(Val);
2050
2051 isl_ast_build_free(Build);
2052 KernelIds.push_back(Id);
2053 IDToSAI[Id] = SAIRep;
2054 Arg++;
2055 }
2056
2057 for (long i = 0; i < NumHostIters; i++) {
2058 isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_set, i);
2059 Arg->setName(isl_id_get_name(Id));
2060 IDToValue[Id] = &*Arg;
2061 KernelIDs.insert(std::unique_ptr<isl_id, IslIdDeleter>(Id));
2062 Arg++;
2063 }
2064
2065 for (long i = 0; i < NumVars; i++) {
2066 isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_param, i);
2067 Arg->setName(isl_id_get_name(Id));
2068 Value *Val = IDToValue[Id];
2069 ValueMap[Val] = &*Arg;
2070 IDToValue[Id] = &*Arg;
2071 KernelIDs.insert(std::unique_ptr<isl_id, IslIdDeleter>(Id));
2072 Arg++;
2073 }
2074
2075 for (auto *V : SubtreeValues) {
2076 Arg->setName(V->getName());
2077 ValueMap[V] = &*Arg;
2078 Arg++;
2079 }
2080
2081 return FN;
2082 }
2083
insertKernelIntrinsics(ppcg_kernel * Kernel)2084 void GPUNodeBuilder::insertKernelIntrinsics(ppcg_kernel *Kernel) {
2085 Intrinsic::ID IntrinsicsBID[2];
2086 Intrinsic::ID IntrinsicsTID[3];
2087
2088 switch (Arch) {
2089 case GPUArch::SPIR64:
2090 case GPUArch::SPIR32:
2091 llvm_unreachable("Cannot generate NVVM intrinsics for SPIR");
2092 case GPUArch::NVPTX64:
2093 IntrinsicsBID[0] = Intrinsic::nvvm_read_ptx_sreg_ctaid_x;
2094 IntrinsicsBID[1] = Intrinsic::nvvm_read_ptx_sreg_ctaid_y;
2095
2096 IntrinsicsTID[0] = Intrinsic::nvvm_read_ptx_sreg_tid_x;
2097 IntrinsicsTID[1] = Intrinsic::nvvm_read_ptx_sreg_tid_y;
2098 IntrinsicsTID[2] = Intrinsic::nvvm_read_ptx_sreg_tid_z;
2099 break;
2100 }
2101
2102 auto addId = [this](__isl_take isl_id *Id, Intrinsic::ID Intr) mutable {
2103 std::string Name = isl_id_get_name(Id);
2104 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
2105 Function *IntrinsicFn = Intrinsic::getDeclaration(M, Intr);
2106 Value *Val = Builder.CreateCall(IntrinsicFn, {});
2107 Val = Builder.CreateIntCast(Val, Builder.getInt64Ty(), false, Name);
2108 IDToValue[Id] = Val;
2109 KernelIDs.insert(std::unique_ptr<isl_id, IslIdDeleter>(Id));
2110 };
2111
2112 for (int i = 0; i < Kernel->n_grid; ++i) {
2113 isl_id *Id = isl_id_list_get_id(Kernel->block_ids, i);
2114 addId(Id, IntrinsicsBID[i]);
2115 }
2116
2117 for (int i = 0; i < Kernel->n_block; ++i) {
2118 isl_id *Id = isl_id_list_get_id(Kernel->thread_ids, i);
2119 addId(Id, IntrinsicsTID[i]);
2120 }
2121 }
2122
insertKernelCallsSPIR(ppcg_kernel * Kernel,bool SizeTypeIs64bit)2123 void GPUNodeBuilder::insertKernelCallsSPIR(ppcg_kernel *Kernel,
2124 bool SizeTypeIs64bit) {
2125 const char *GroupName[3] = {"__gen_ocl_get_group_id0",
2126 "__gen_ocl_get_group_id1",
2127 "__gen_ocl_get_group_id2"};
2128
2129 const char *LocalName[3] = {"__gen_ocl_get_local_id0",
2130 "__gen_ocl_get_local_id1",
2131 "__gen_ocl_get_local_id2"};
2132 IntegerType *SizeT =
2133 SizeTypeIs64bit ? Builder.getInt64Ty() : Builder.getInt32Ty();
2134
2135 auto createFunc = [this](const char *Name, __isl_take isl_id *Id,
2136 IntegerType *SizeT) mutable {
2137 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
2138 Function *FN = M->getFunction(Name);
2139
2140 // If FN is not available, declare it.
2141 if (!FN) {
2142 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
2143 std::vector<Type *> Args;
2144 FunctionType *Ty = FunctionType::get(SizeT, Args, false);
2145 FN = Function::Create(Ty, Linkage, Name, M);
2146 FN->setCallingConv(CallingConv::SPIR_FUNC);
2147 }
2148
2149 Value *Val = Builder.CreateCall(FN, {});
2150 if (SizeT == Builder.getInt32Ty())
2151 Val = Builder.CreateIntCast(Val, Builder.getInt64Ty(), false, Name);
2152 IDToValue[Id] = Val;
2153 KernelIDs.insert(std::unique_ptr<isl_id, IslIdDeleter>(Id));
2154 };
2155
2156 for (int i = 0; i < Kernel->n_grid; ++i)
2157 createFunc(GroupName[i], isl_id_list_get_id(Kernel->block_ids, i), SizeT);
2158
2159 for (int i = 0; i < Kernel->n_block; ++i)
2160 createFunc(LocalName[i], isl_id_list_get_id(Kernel->thread_ids, i), SizeT);
2161 }
2162
prepareKernelArguments(ppcg_kernel * Kernel,Function * FN)2163 void GPUNodeBuilder::prepareKernelArguments(ppcg_kernel *Kernel, Function *FN) {
2164 auto Arg = FN->arg_begin();
2165 for (long i = 0; i < Kernel->n_array; i++) {
2166 if (!ppcg_kernel_requires_array_argument(Kernel, i))
2167 continue;
2168
2169 isl_id *Id = isl_space_get_tuple_id(Prog->array[i].space, isl_dim_set);
2170 const ScopArrayInfo *SAI = ScopArrayInfo::getFromId(isl::manage_copy(Id));
2171 isl_id_free(Id);
2172
2173 if (SAI->getNumberOfDimensions() > 0) {
2174 Arg++;
2175 continue;
2176 }
2177
2178 Value *Val = &*Arg;
2179
2180 if (!gpu_array_is_read_only_scalar(&Prog->array[i])) {
2181 Type *TypePtr = SAI->getElementType()->getPointerTo();
2182 Value *TypedArgPtr = Builder.CreatePointerCast(Val, TypePtr);
2183 Val = Builder.CreateLoad(SAI->getElementType(), TypedArgPtr);
2184 }
2185
2186 Value *Alloca = BlockGen.getOrCreateAlloca(SAI);
2187 Builder.CreateStore(Val, Alloca);
2188
2189 Arg++;
2190 }
2191 }
2192
finalizeKernelArguments(ppcg_kernel * Kernel)2193 void GPUNodeBuilder::finalizeKernelArguments(ppcg_kernel *Kernel) {
2194 auto *FN = Builder.GetInsertBlock()->getParent();
2195 auto Arg = FN->arg_begin();
2196
2197 bool StoredScalar = false;
2198 for (long i = 0; i < Kernel->n_array; i++) {
2199 if (!ppcg_kernel_requires_array_argument(Kernel, i))
2200 continue;
2201
2202 isl_id *Id = isl_space_get_tuple_id(Prog->array[i].space, isl_dim_set);
2203 const ScopArrayInfo *SAI = ScopArrayInfo::getFromId(isl::manage_copy(Id));
2204 isl_id_free(Id);
2205
2206 if (SAI->getNumberOfDimensions() > 0) {
2207 Arg++;
2208 continue;
2209 }
2210
2211 if (gpu_array_is_read_only_scalar(&Prog->array[i])) {
2212 Arg++;
2213 continue;
2214 }
2215
2216 Value *Alloca = BlockGen.getOrCreateAlloca(SAI);
2217 Value *ArgPtr = &*Arg;
2218 Type *TypePtr = SAI->getElementType()->getPointerTo();
2219 Value *TypedArgPtr = Builder.CreatePointerCast(ArgPtr, TypePtr);
2220 Value *Val = Builder.CreateLoad(SAI->getElementType(), Alloca);
2221 Builder.CreateStore(Val, TypedArgPtr);
2222 StoredScalar = true;
2223
2224 Arg++;
2225 }
2226
2227 if (StoredScalar) {
2228 /// In case more than one thread contains scalar stores, the generated
2229 /// code might be incorrect, if we only store at the end of the kernel.
2230 /// To support this case we need to store these scalars back at each
2231 /// memory store or at least before each kernel barrier.
2232 if (Kernel->n_block != 0 || Kernel->n_grid != 0) {
2233 BuildSuccessful = 0;
2234 LLVM_DEBUG(
2235 dbgs() << getUniqueScopName(&S)
2236 << " has a store to a scalar value that"
2237 " would be undefined to run in parallel. Bailing out.\n";);
2238 }
2239 }
2240 }
2241
createKernelVariables(ppcg_kernel * Kernel,Function * FN)2242 void GPUNodeBuilder::createKernelVariables(ppcg_kernel *Kernel, Function *FN) {
2243 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
2244
2245 for (int i = 0; i < Kernel->n_var; ++i) {
2246 struct ppcg_kernel_var &Var = Kernel->var[i];
2247 isl_id *Id = isl_space_get_tuple_id(Var.array->space, isl_dim_set);
2248 Type *EleTy = ScopArrayInfo::getFromId(isl::manage(Id))->getElementType();
2249
2250 Type *ArrayTy = EleTy;
2251 SmallVector<const SCEV *, 4> Sizes;
2252
2253 Sizes.push_back(nullptr);
2254 for (unsigned int j = 1; j < Var.array->n_index; ++j) {
2255 isl_val *Val = isl_vec_get_element_val(Var.size, j);
2256 long Bound = isl_val_get_num_si(Val);
2257 isl_val_free(Val);
2258 Sizes.push_back(S.getSE()->getConstant(Builder.getInt64Ty(), Bound));
2259 }
2260
2261 for (int j = Var.array->n_index - 1; j >= 0; --j) {
2262 isl_val *Val = isl_vec_get_element_val(Var.size, j);
2263 long Bound = isl_val_get_num_si(Val);
2264 isl_val_free(Val);
2265 ArrayTy = ArrayType::get(ArrayTy, Bound);
2266 }
2267
2268 const ScopArrayInfo *SAI;
2269 Value *Allocation;
2270 if (Var.type == ppcg_access_shared) {
2271 auto GlobalVar = new GlobalVariable(
2272 *M, ArrayTy, false, GlobalValue::InternalLinkage, 0, Var.name,
2273 nullptr, GlobalValue::ThreadLocalMode::NotThreadLocal, 3);
2274 GlobalVar->setAlignment(llvm::Align(EleTy->getPrimitiveSizeInBits() / 8));
2275 GlobalVar->setInitializer(Constant::getNullValue(ArrayTy));
2276
2277 Allocation = GlobalVar;
2278 } else if (Var.type == ppcg_access_private) {
2279 Allocation = Builder.CreateAlloca(ArrayTy, 0, "private_array");
2280 } else {
2281 llvm_unreachable("unknown variable type");
2282 }
2283 SAI =
2284 S.getOrCreateScopArrayInfo(Allocation, EleTy, Sizes, MemoryKind::Array);
2285 Id = isl_id_alloc(S.getIslCtx().get(), Var.name, nullptr);
2286 IDToValue[Id] = Allocation;
2287 LocalArrays.push_back(Allocation);
2288 KernelIds.push_back(Id);
2289 IDToSAI[Id] = SAI;
2290 }
2291 }
2292
createKernelFunction(ppcg_kernel * Kernel,SetVector<Value * > & SubtreeValues,SetVector<Function * > & SubtreeFunctions)2293 void GPUNodeBuilder::createKernelFunction(
2294 ppcg_kernel *Kernel, SetVector<Value *> &SubtreeValues,
2295 SetVector<Function *> &SubtreeFunctions) {
2296 std::string Identifier = getKernelFuncName(Kernel->id);
2297 GPUModule.reset(new Module(Identifier, Builder.getContext()));
2298
2299 switch (Arch) {
2300 case GPUArch::NVPTX64:
2301 if (Runtime == GPURuntime::CUDA)
2302 GPUModule->setTargetTriple(Triple::normalize("nvptx64-nvidia-cuda"));
2303 else if (Runtime == GPURuntime::OpenCL)
2304 GPUModule->setTargetTriple(Triple::normalize("nvptx64-nvidia-nvcl"));
2305 GPUModule->setDataLayout(computeNVPTXDataLayout(true /* is64Bit */));
2306 break;
2307 case GPUArch::SPIR32:
2308 GPUModule->setTargetTriple(Triple::normalize("spir-unknown-unknown"));
2309 GPUModule->setDataLayout(computeSPIRDataLayout(false /* is64Bit */));
2310 break;
2311 case GPUArch::SPIR64:
2312 GPUModule->setTargetTriple(Triple::normalize("spir64-unknown-unknown"));
2313 GPUModule->setDataLayout(computeSPIRDataLayout(true /* is64Bit */));
2314 break;
2315 }
2316
2317 Function *FN = createKernelFunctionDecl(Kernel, SubtreeValues);
2318
2319 BasicBlock *PrevBlock = Builder.GetInsertBlock();
2320 auto EntryBlock = BasicBlock::Create(Builder.getContext(), "entry", FN);
2321
2322 DT.addNewBlock(EntryBlock, PrevBlock);
2323
2324 Builder.SetInsertPoint(EntryBlock);
2325 Builder.CreateRetVoid();
2326 Builder.SetInsertPoint(EntryBlock, EntryBlock->begin());
2327
2328 ScopDetection::markFunctionAsInvalid(FN);
2329
2330 prepareKernelArguments(Kernel, FN);
2331 createKernelVariables(Kernel, FN);
2332
2333 switch (Arch) {
2334 case GPUArch::NVPTX64:
2335 insertKernelIntrinsics(Kernel);
2336 break;
2337 case GPUArch::SPIR32:
2338 insertKernelCallsSPIR(Kernel, false);
2339 break;
2340 case GPUArch::SPIR64:
2341 insertKernelCallsSPIR(Kernel, true);
2342 break;
2343 }
2344 }
2345
createKernelASM()2346 std::string GPUNodeBuilder::createKernelASM() {
2347 llvm::Triple GPUTriple;
2348
2349 switch (Arch) {
2350 case GPUArch::NVPTX64:
2351 switch (Runtime) {
2352 case GPURuntime::CUDA:
2353 GPUTriple = llvm::Triple(Triple::normalize("nvptx64-nvidia-cuda"));
2354 break;
2355 case GPURuntime::OpenCL:
2356 GPUTriple = llvm::Triple(Triple::normalize("nvptx64-nvidia-nvcl"));
2357 break;
2358 }
2359 break;
2360 case GPUArch::SPIR64:
2361 case GPUArch::SPIR32:
2362 std::string SPIRAssembly;
2363 raw_string_ostream IROstream(SPIRAssembly);
2364 IROstream << *GPUModule;
2365 IROstream.flush();
2366 return SPIRAssembly;
2367 }
2368
2369 std::string ErrMsg;
2370 auto GPUTarget = TargetRegistry::lookupTarget(GPUTriple.getTriple(), ErrMsg);
2371
2372 if (!GPUTarget) {
2373 errs() << ErrMsg << "\n";
2374 return "";
2375 }
2376
2377 TargetOptions Options;
2378 Options.UnsafeFPMath = FastMath;
2379
2380 std::string subtarget;
2381
2382 switch (Arch) {
2383 case GPUArch::NVPTX64:
2384 subtarget = CudaVersion;
2385 break;
2386 case GPUArch::SPIR32:
2387 case GPUArch::SPIR64:
2388 llvm_unreachable("No subtarget for SPIR architecture");
2389 }
2390
2391 std::unique_ptr<TargetMachine> TargetM(GPUTarget->createTargetMachine(
2392 GPUTriple.getTriple(), subtarget, "", Options, Optional<Reloc::Model>()));
2393
2394 SmallString<0> ASMString;
2395 raw_svector_ostream ASMStream(ASMString);
2396 llvm::legacy::PassManager PM;
2397
2398 PM.add(createTargetTransformInfoWrapperPass(TargetM->getTargetIRAnalysis()));
2399
2400 if (TargetM->addPassesToEmitFile(PM, ASMStream, nullptr, CGFT_AssemblyFile,
2401 true /* verify */)) {
2402 errs() << "The target does not support generation of this file type!\n";
2403 return "";
2404 }
2405
2406 PM.run(*GPUModule);
2407
2408 return ASMStream.str().str();
2409 }
2410
requiresCUDALibDevice()2411 bool GPUNodeBuilder::requiresCUDALibDevice() {
2412 bool RequiresLibDevice = false;
2413 for (Function &F : GPUModule->functions()) {
2414 if (!F.isDeclaration())
2415 continue;
2416
2417 const std::string CUDALibDeviceFunc = getCUDALibDeviceFuntion(F.getName());
2418 if (CUDALibDeviceFunc.length() != 0) {
2419 // We need to handle the case where a module looks like this:
2420 // @expf(..)
2421 // @llvm.exp.f64(..)
2422 // Both of these functions would be renamed to `__nv_expf`.
2423 //
2424 // So, we must first check for the existence of the libdevice function.
2425 // If this exists, we replace our current function with it.
2426 //
2427 // If it does not exist, we rename the current function to the
2428 // libdevice functiono name.
2429 if (Function *Replacement = F.getParent()->getFunction(CUDALibDeviceFunc))
2430 F.replaceAllUsesWith(Replacement);
2431 else
2432 F.setName(CUDALibDeviceFunc);
2433 RequiresLibDevice = true;
2434 }
2435 }
2436
2437 return RequiresLibDevice;
2438 }
2439
addCUDALibDevice()2440 void GPUNodeBuilder::addCUDALibDevice() {
2441 if (Arch != GPUArch::NVPTX64)
2442 return;
2443
2444 if (requiresCUDALibDevice()) {
2445 SMDiagnostic Error;
2446
2447 errs() << CUDALibDevice << "\n";
2448 auto LibDeviceModule =
2449 parseIRFile(CUDALibDevice, Error, GPUModule->getContext());
2450
2451 if (!LibDeviceModule) {
2452 BuildSuccessful = false;
2453 report_fatal_error("Could not find or load libdevice. Skipping GPU "
2454 "kernel generation. Please set -polly-acc-libdevice "
2455 "accordingly.\n");
2456 return;
2457 }
2458
2459 Linker L(*GPUModule);
2460
2461 // Set an nvptx64 target triple to avoid linker warnings. The original
2462 // triple of the libdevice files are nvptx-unknown-unknown.
2463 LibDeviceModule->setTargetTriple(Triple::normalize("nvptx64-nvidia-cuda"));
2464 L.linkInModule(std::move(LibDeviceModule), Linker::LinkOnlyNeeded);
2465 }
2466 }
2467
finalizeKernelFunction()2468 std::string GPUNodeBuilder::finalizeKernelFunction() {
2469
2470 if (verifyModule(*GPUModule)) {
2471 LLVM_DEBUG(dbgs() << "verifyModule failed on module:\n";
2472 GPUModule->print(dbgs(), nullptr); dbgs() << "\n";);
2473 LLVM_DEBUG(dbgs() << "verifyModule Error:\n";
2474 verifyModule(*GPUModule, &dbgs()););
2475
2476 if (FailOnVerifyModuleFailure)
2477 llvm_unreachable("VerifyModule failed.");
2478
2479 BuildSuccessful = false;
2480 return "";
2481 }
2482
2483 addCUDALibDevice();
2484
2485 if (DumpKernelIR)
2486 outs() << *GPUModule << "\n";
2487
2488 if (Arch != GPUArch::SPIR32 && Arch != GPUArch::SPIR64) {
2489 // Optimize module.
2490 llvm::legacy::PassManager OptPasses;
2491 PassManagerBuilder PassBuilder;
2492 PassBuilder.OptLevel = 3;
2493 PassBuilder.SizeLevel = 0;
2494 PassBuilder.populateModulePassManager(OptPasses);
2495 OptPasses.run(*GPUModule);
2496 }
2497
2498 std::string Assembly = createKernelASM();
2499
2500 if (DumpKernelASM)
2501 outs() << Assembly << "\n";
2502
2503 GPUModule.release();
2504 KernelIDs.clear();
2505
2506 return Assembly;
2507 }
2508 /// Construct an `isl_pw_aff_list` from a vector of `isl_pw_aff`
2509 /// @param PwAffs The list of piecewise affine functions to create an
2510 /// `isl_pw_aff_list` from. We expect an rvalue ref because
2511 /// all the isl_pw_aff are used up by this function.
2512 ///
2513 /// @returns The `isl_pw_aff_list`.
2514 __isl_give isl_pw_aff_list *
createPwAffList(isl_ctx * Context,const std::vector<__isl_take isl_pw_aff * > && PwAffs)2515 createPwAffList(isl_ctx *Context,
2516 const std::vector<__isl_take isl_pw_aff *> &&PwAffs) {
2517 isl_pw_aff_list *List = isl_pw_aff_list_alloc(Context, PwAffs.size());
2518
2519 for (unsigned i = 0; i < PwAffs.size(); i++) {
2520 List = isl_pw_aff_list_insert(List, i, PwAffs[i]);
2521 }
2522 return List;
2523 }
2524
2525 /// Align all the `PwAffs` such that they have the same parameter dimensions.
2526 ///
2527 /// We loop over all `pw_aff` and align all of their spaces together to
2528 /// create a common space for all the `pw_aff`. This common space is the
2529 /// `AlignSpace`. We then align all the `pw_aff` to this space. We start
2530 /// with the given `SeedSpace`.
2531 /// @param PwAffs The list of piecewise affine functions we want to align.
2532 /// This is an rvalue reference because the entire vector is
2533 /// used up by the end of the operation.
2534 /// @param SeedSpace The space to start the alignment process with.
2535 /// @returns A std::pair, whose first element is the aligned space,
2536 /// whose second element is the vector of aligned piecewise
2537 /// affines.
2538 static std::pair<__isl_give isl_space *, std::vector<__isl_give isl_pw_aff *>>
alignPwAffs(const std::vector<__isl_take isl_pw_aff * > && PwAffs,__isl_take isl_space * SeedSpace)2539 alignPwAffs(const std::vector<__isl_take isl_pw_aff *> &&PwAffs,
2540 __isl_take isl_space *SeedSpace) {
2541 assert(SeedSpace && "Invalid seed space given.");
2542
2543 isl_space *AlignSpace = SeedSpace;
2544 for (isl_pw_aff *PwAff : PwAffs) {
2545 isl_space *PwAffSpace = isl_pw_aff_get_domain_space(PwAff);
2546 AlignSpace = isl_space_align_params(AlignSpace, PwAffSpace);
2547 }
2548 std::vector<isl_pw_aff *> AdjustedPwAffs;
2549
2550 for (unsigned i = 0; i < PwAffs.size(); i++) {
2551 isl_pw_aff *Adjusted = PwAffs[i];
2552 assert(Adjusted && "Invalid pw_aff given.");
2553 Adjusted = isl_pw_aff_align_params(Adjusted, isl_space_copy(AlignSpace));
2554 AdjustedPwAffs.push_back(Adjusted);
2555 }
2556 return std::make_pair(AlignSpace, AdjustedPwAffs);
2557 }
2558
2559 namespace {
2560 class PPCGCodeGeneration : public ScopPass {
2561 public:
2562 static char ID;
2563
2564 GPURuntime Runtime = GPURuntime::CUDA;
2565
2566 GPUArch Architecture = GPUArch::NVPTX64;
2567
2568 /// The scop that is currently processed.
2569 Scop *S;
2570
2571 LoopInfo *LI;
2572 DominatorTree *DT;
2573 ScalarEvolution *SE;
2574 const DataLayout *DL;
2575 RegionInfo *RI;
2576
PPCGCodeGeneration()2577 PPCGCodeGeneration() : ScopPass(ID) {
2578 // Apply defaults.
2579 Runtime = GPURuntimeChoice;
2580 Architecture = GPUArchChoice;
2581 }
2582
2583 /// Construct compilation options for PPCG.
2584 ///
2585 /// @returns The compilation options.
createPPCGOptions()2586 ppcg_options *createPPCGOptions() {
2587 auto DebugOptions =
2588 (ppcg_debug_options *)malloc(sizeof(ppcg_debug_options));
2589 auto Options = (ppcg_options *)malloc(sizeof(ppcg_options));
2590
2591 DebugOptions->dump_schedule_constraints = false;
2592 DebugOptions->dump_schedule = false;
2593 DebugOptions->dump_final_schedule = false;
2594 DebugOptions->dump_sizes = false;
2595 DebugOptions->verbose = false;
2596
2597 Options->debug = DebugOptions;
2598
2599 Options->group_chains = false;
2600 Options->reschedule = true;
2601 Options->scale_tile_loops = false;
2602 Options->wrap = false;
2603
2604 Options->non_negative_parameters = false;
2605 Options->ctx = nullptr;
2606 Options->sizes = nullptr;
2607
2608 Options->tile = true;
2609 Options->tile_size = 32;
2610
2611 Options->isolate_full_tiles = false;
2612
2613 Options->use_private_memory = PrivateMemory;
2614 Options->use_shared_memory = SharedMemory;
2615 Options->max_shared_memory = 48 * 1024;
2616
2617 Options->target = PPCG_TARGET_CUDA;
2618 Options->openmp = false;
2619 Options->linearize_device_arrays = true;
2620 Options->allow_gnu_extensions = false;
2621
2622 Options->unroll_copy_shared = false;
2623 Options->unroll_gpu_tile = false;
2624 Options->live_range_reordering = true;
2625
2626 Options->live_range_reordering = true;
2627 Options->hybrid = false;
2628 Options->opencl_compiler_options = nullptr;
2629 Options->opencl_use_gpu = false;
2630 Options->opencl_n_include_file = 0;
2631 Options->opencl_include_files = nullptr;
2632 Options->opencl_print_kernel_types = false;
2633 Options->opencl_embed_kernel_code = false;
2634
2635 Options->save_schedule_file = nullptr;
2636 Options->load_schedule_file = nullptr;
2637
2638 return Options;
2639 }
2640
2641 /// Get a tagged access relation containing all accesses of type @p AccessTy.
2642 ///
2643 /// Instead of a normal access of the form:
2644 ///
2645 /// Stmt[i,j,k] -> Array[f_0(i,j,k), f_1(i,j,k)]
2646 ///
2647 /// a tagged access has the form
2648 ///
2649 /// [Stmt[i,j,k] -> id[]] -> Array[f_0(i,j,k), f_1(i,j,k)]
2650 ///
2651 /// where 'id' is an additional space that references the memory access that
2652 /// triggered the access.
2653 ///
2654 /// @param AccessTy The type of the memory accesses to collect.
2655 ///
2656 /// @return The relation describing all tagged memory accesses.
getTaggedAccesses(enum MemoryAccess::AccessType AccessTy)2657 isl_union_map *getTaggedAccesses(enum MemoryAccess::AccessType AccessTy) {
2658 isl_union_map *Accesses = isl_union_map_empty(S->getParamSpace().release());
2659
2660 for (auto &Stmt : *S)
2661 for (auto &Acc : Stmt)
2662 if (Acc->getType() == AccessTy) {
2663 isl_map *Relation = Acc->getAccessRelation().release();
2664 Relation =
2665 isl_map_intersect_domain(Relation, Stmt.getDomain().release());
2666
2667 isl_space *Space = isl_map_get_space(Relation);
2668 Space = isl_space_range(Space);
2669 Space = isl_space_from_range(Space);
2670 Space =
2671 isl_space_set_tuple_id(Space, isl_dim_in, Acc->getId().release());
2672 isl_map *Universe = isl_map_universe(Space);
2673 Relation = isl_map_domain_product(Relation, Universe);
2674 Accesses = isl_union_map_add_map(Accesses, Relation);
2675 }
2676
2677 return Accesses;
2678 }
2679
2680 /// Get the set of all read accesses, tagged with the access id.
2681 ///
2682 /// @see getTaggedAccesses
getTaggedReads()2683 isl_union_map *getTaggedReads() {
2684 return getTaggedAccesses(MemoryAccess::READ);
2685 }
2686
2687 /// Get the set of all may (and must) accesses, tagged with the access id.
2688 ///
2689 /// @see getTaggedAccesses
getTaggedMayWrites()2690 isl_union_map *getTaggedMayWrites() {
2691 return isl_union_map_union(getTaggedAccesses(MemoryAccess::MAY_WRITE),
2692 getTaggedAccesses(MemoryAccess::MUST_WRITE));
2693 }
2694
2695 /// Get the set of all must accesses, tagged with the access id.
2696 ///
2697 /// @see getTaggedAccesses
getTaggedMustWrites()2698 isl_union_map *getTaggedMustWrites() {
2699 return getTaggedAccesses(MemoryAccess::MUST_WRITE);
2700 }
2701
2702 /// Collect parameter and array names as isl_ids.
2703 ///
2704 /// To reason about the different parameters and arrays used, ppcg requires
2705 /// a list of all isl_ids in use. As PPCG traditionally performs
2706 /// source-to-source compilation each of these isl_ids is mapped to the
2707 /// expression that represents it. As we do not have a corresponding
2708 /// expression in Polly, we just map each id to a 'zero' expression to match
2709 /// the data format that ppcg expects.
2710 ///
2711 /// @returns Retun a map from collected ids to 'zero' ast expressions.
getNames()2712 __isl_give isl_id_to_ast_expr *getNames() {
2713 auto *Names = isl_id_to_ast_expr_alloc(
2714 S->getIslCtx().get(),
2715 S->getNumParams() + std::distance(S->array_begin(), S->array_end()));
2716 auto *Zero = isl_ast_expr_from_val(isl_val_zero(S->getIslCtx().get()));
2717
2718 for (const SCEV *P : S->parameters()) {
2719 isl_id *Id = S->getIdForParam(P).release();
2720 Names = isl_id_to_ast_expr_set(Names, Id, isl_ast_expr_copy(Zero));
2721 }
2722
2723 for (auto &Array : S->arrays()) {
2724 auto Id = Array->getBasePtrId().release();
2725 Names = isl_id_to_ast_expr_set(Names, Id, isl_ast_expr_copy(Zero));
2726 }
2727
2728 isl_ast_expr_free(Zero);
2729
2730 return Names;
2731 }
2732
2733 /// Create a new PPCG scop from the current scop.
2734 ///
2735 /// The PPCG scop is initialized with data from the current polly::Scop. From
2736 /// this initial data, the data-dependences in the PPCG scop are initialized.
2737 /// We do not use Polly's dependence analysis for now, to ensure we match
2738 /// the PPCG default behaviour more closely.
2739 ///
2740 /// @returns A new ppcg scop.
createPPCGScop()2741 ppcg_scop *createPPCGScop() {
2742 MustKillsInfo KillsInfo = computeMustKillsInfo(*S);
2743
2744 auto PPCGScop = (ppcg_scop *)malloc(sizeof(ppcg_scop));
2745
2746 PPCGScop->options = createPPCGOptions();
2747 // enable live range reordering
2748 PPCGScop->options->live_range_reordering = 1;
2749
2750 PPCGScop->start = 0;
2751 PPCGScop->end = 0;
2752
2753 PPCGScop->context = S->getContext().release();
2754 PPCGScop->domain = S->getDomains().release();
2755 // TODO: investigate this further. PPCG calls collect_call_domains.
2756 PPCGScop->call = isl_union_set_from_set(S->getContext().release());
2757 PPCGScop->tagged_reads = getTaggedReads();
2758 PPCGScop->reads = S->getReads().release();
2759 PPCGScop->live_in = nullptr;
2760 PPCGScop->tagged_may_writes = getTaggedMayWrites();
2761 PPCGScop->may_writes = S->getWrites().release();
2762 PPCGScop->tagged_must_writes = getTaggedMustWrites();
2763 PPCGScop->must_writes = S->getMustWrites().release();
2764 PPCGScop->live_out = nullptr;
2765 PPCGScop->tagged_must_kills = KillsInfo.TaggedMustKills.release();
2766 PPCGScop->must_kills = KillsInfo.MustKills.release();
2767
2768 PPCGScop->tagger = nullptr;
2769 PPCGScop->independence =
2770 isl_union_map_empty(isl_set_get_space(PPCGScop->context));
2771 PPCGScop->dep_flow = nullptr;
2772 PPCGScop->tagged_dep_flow = nullptr;
2773 PPCGScop->dep_false = nullptr;
2774 PPCGScop->dep_forced = nullptr;
2775 PPCGScop->dep_order = nullptr;
2776 PPCGScop->tagged_dep_order = nullptr;
2777
2778 PPCGScop->schedule = S->getScheduleTree().release();
2779 // If we have something non-trivial to kill, add it to the schedule
2780 if (KillsInfo.KillsSchedule.get())
2781 PPCGScop->schedule = isl_schedule_sequence(
2782 PPCGScop->schedule, KillsInfo.KillsSchedule.release());
2783
2784 PPCGScop->names = getNames();
2785 PPCGScop->pet = nullptr;
2786
2787 compute_tagger(PPCGScop);
2788 compute_dependences(PPCGScop);
2789 eliminate_dead_code(PPCGScop);
2790
2791 return PPCGScop;
2792 }
2793
2794 /// Collect the array accesses in a statement.
2795 ///
2796 /// @param Stmt The statement for which to collect the accesses.
2797 ///
2798 /// @returns A list of array accesses.
getStmtAccesses(ScopStmt & Stmt)2799 gpu_stmt_access *getStmtAccesses(ScopStmt &Stmt) {
2800 gpu_stmt_access *Accesses = nullptr;
2801
2802 for (MemoryAccess *Acc : Stmt) {
2803 auto Access =
2804 isl_alloc_type(S->getIslCtx().get(), struct gpu_stmt_access);
2805 Access->read = Acc->isRead();
2806 Access->write = Acc->isWrite();
2807 Access->access = Acc->getAccessRelation().release();
2808 isl_space *Space = isl_map_get_space(Access->access);
2809 Space = isl_space_range(Space);
2810 Space = isl_space_from_range(Space);
2811 Space = isl_space_set_tuple_id(Space, isl_dim_in, Acc->getId().release());
2812 isl_map *Universe = isl_map_universe(Space);
2813 Access->tagged_access =
2814 isl_map_domain_product(Acc->getAccessRelation().release(), Universe);
2815 Access->exact_write = !Acc->isMayWrite();
2816 Access->ref_id = Acc->getId().release();
2817 Access->next = Accesses;
2818 Access->n_index = Acc->getScopArrayInfo()->getNumberOfDimensions();
2819 // TODO: Also mark one-element accesses to arrays as fixed-element.
2820 Access->fixed_element =
2821 Acc->isLatestScalarKind() ? isl_bool_true : isl_bool_false;
2822 Accesses = Access;
2823 }
2824
2825 return Accesses;
2826 }
2827
2828 /// Collect the list of GPU statements.
2829 ///
2830 /// Each statement has an id, a pointer to the underlying data structure,
2831 /// as well as a list with all memory accesses.
2832 ///
2833 /// TODO: Initialize the list of memory accesses.
2834 ///
2835 /// @returns A linked-list of statements.
getStatements()2836 gpu_stmt *getStatements() {
2837 gpu_stmt *Stmts = isl_calloc_array(S->getIslCtx().get(), struct gpu_stmt,
2838 std::distance(S->begin(), S->end()));
2839
2840 int i = 0;
2841 for (auto &Stmt : *S) {
2842 gpu_stmt *GPUStmt = &Stmts[i];
2843
2844 GPUStmt->id = Stmt.getDomainId().release();
2845
2846 // We use the pet stmt pointer to keep track of the Polly statements.
2847 GPUStmt->stmt = (pet_stmt *)&Stmt;
2848 GPUStmt->accesses = getStmtAccesses(Stmt);
2849 i++;
2850 }
2851
2852 return Stmts;
2853 }
2854
2855 /// Derive the extent of an array.
2856 ///
2857 /// The extent of an array is the set of elements that are within the
2858 /// accessed array. For the inner dimensions, the extent constraints are
2859 /// 0 and the size of the corresponding array dimension. For the first
2860 /// (outermost) dimension, the extent constraints are the minimal and maximal
2861 /// subscript value for the first dimension.
2862 ///
2863 /// @param Array The array to derive the extent for.
2864 ///
2865 /// @returns An isl_set describing the extent of the array.
getExtent(ScopArrayInfo * Array)2866 isl::set getExtent(ScopArrayInfo *Array) {
2867 unsigned NumDims = Array->getNumberOfDimensions();
2868
2869 if (Array->getNumberOfDimensions() == 0)
2870 return isl::set::universe(Array->getSpace());
2871
2872 isl::union_map Accesses = S->getAccesses(Array);
2873 isl::union_set AccessUSet = Accesses.range();
2874 AccessUSet = AccessUSet.coalesce();
2875 AccessUSet = AccessUSet.detect_equalities();
2876 AccessUSet = AccessUSet.coalesce();
2877
2878 if (AccessUSet.is_empty())
2879 return isl::set::empty(Array->getSpace());
2880
2881 isl::set AccessSet = AccessUSet.extract_set(Array->getSpace());
2882
2883 isl::local_space LS = isl::local_space(Array->getSpace());
2884
2885 isl::pw_aff Val = isl::aff::var_on_domain(LS, isl::dim::set, 0);
2886 isl::pw_aff OuterMin = AccessSet.dim_min(0);
2887 isl::pw_aff OuterMax = AccessSet.dim_max(0);
2888 OuterMin = OuterMin.add_dims(isl::dim::in, Val.dim(isl::dim::in));
2889 OuterMax = OuterMax.add_dims(isl::dim::in, Val.dim(isl::dim::in));
2890 OuterMin = OuterMin.set_tuple_id(isl::dim::in, Array->getBasePtrId());
2891 OuterMax = OuterMax.set_tuple_id(isl::dim::in, Array->getBasePtrId());
2892
2893 isl::set Extent = isl::set::universe(Array->getSpace());
2894
2895 Extent = Extent.intersect(OuterMin.le_set(Val));
2896 Extent = Extent.intersect(OuterMax.ge_set(Val));
2897
2898 for (unsigned i = 1; i < NumDims; ++i)
2899 Extent = Extent.lower_bound_si(isl::dim::set, i, 0);
2900
2901 for (unsigned i = 0; i < NumDims; ++i) {
2902 isl::pw_aff PwAff = Array->getDimensionSizePw(i);
2903
2904 // isl_pw_aff can be NULL for zero dimension. Only in the case of a
2905 // Fortran array will we have a legitimate dimension.
2906 if (PwAff.is_null()) {
2907 assert(i == 0 && "invalid dimension isl_pw_aff for nonzero dimension");
2908 continue;
2909 }
2910
2911 isl::pw_aff Val = isl::aff::var_on_domain(
2912 isl::local_space(Array->getSpace()), isl::dim::set, i);
2913 PwAff = PwAff.add_dims(isl::dim::in, Val.dim(isl::dim::in));
2914 PwAff = PwAff.set_tuple_id(isl::dim::in, Val.get_tuple_id(isl::dim::in));
2915 isl::set Set = PwAff.gt_set(Val);
2916 Extent = Set.intersect(Extent);
2917 }
2918
2919 return Extent;
2920 }
2921
2922 /// Derive the bounds of an array.
2923 ///
2924 /// For the first dimension we derive the bound of the array from the extent
2925 /// of this dimension. For inner dimensions we obtain their size directly from
2926 /// ScopArrayInfo.
2927 ///
2928 /// @param PPCGArray The array to compute bounds for.
2929 /// @param Array The polly array from which to take the information.
setArrayBounds(gpu_array_info & PPCGArray,ScopArrayInfo * Array)2930 void setArrayBounds(gpu_array_info &PPCGArray, ScopArrayInfo *Array) {
2931 std::vector<isl_pw_aff *> Bounds;
2932
2933 if (PPCGArray.n_index > 0) {
2934 if (isl_set_is_empty(PPCGArray.extent)) {
2935 isl_set *Dom = isl_set_copy(PPCGArray.extent);
2936 isl_local_space *LS = isl_local_space_from_space(
2937 isl_space_params(isl_set_get_space(Dom)));
2938 isl_set_free(Dom);
2939 isl_pw_aff *Zero = isl_pw_aff_from_aff(isl_aff_zero_on_domain(LS));
2940 Bounds.push_back(Zero);
2941 } else {
2942 isl_set *Dom = isl_set_copy(PPCGArray.extent);
2943 Dom = isl_set_project_out(Dom, isl_dim_set, 1, PPCGArray.n_index - 1);
2944 isl_pw_aff *Bound = isl_set_dim_max(isl_set_copy(Dom), 0);
2945 isl_set_free(Dom);
2946 Dom = isl_pw_aff_domain(isl_pw_aff_copy(Bound));
2947 isl_local_space *LS =
2948 isl_local_space_from_space(isl_set_get_space(Dom));
2949 isl_aff *One = isl_aff_zero_on_domain(LS);
2950 One = isl_aff_add_constant_si(One, 1);
2951 Bound = isl_pw_aff_add(Bound, isl_pw_aff_alloc(Dom, One));
2952 Bound = isl_pw_aff_gist(Bound, S->getContext().release());
2953 Bounds.push_back(Bound);
2954 }
2955 }
2956
2957 for (unsigned i = 1; i < PPCGArray.n_index; ++i) {
2958 isl_pw_aff *Bound = Array->getDimensionSizePw(i).release();
2959 auto LS = isl_pw_aff_get_domain_space(Bound);
2960 auto Aff = isl_multi_aff_zero(LS);
2961
2962 // We need types to work out, which is why we perform this weird dance
2963 // with `Aff` and `Bound`. Consider this example:
2964
2965 // LS: [p] -> { [] }
2966 // Zero: [p] -> { [] } | Implicitly, is [p] -> { ~ -> [] }.
2967 // This `~` is used to denote a "null space" (which is different from
2968 // a *zero dimensional* space), which is something that ISL does not
2969 // show you when pretty printing.
2970
2971 // Bound: [p] -> { [] -> [(10p)] } | Here, the [] is a *zero dimensional*
2972 // space, not a "null space" which does not exist at all.
2973
2974 // When we pullback (precompose) `Bound` with `Zero`, we get:
2975 // Bound . Zero =
2976 // ([p] -> { [] -> [(10p)] }) . ([p] -> {~ -> [] }) =
2977 // [p] -> { ~ -> [(10p)] } =
2978 // [p] -> [(10p)] (as ISL pretty prints it)
2979 // Bound Pullback: [p] -> { [(10p)] }
2980
2981 // We want this kind of an expression for Bound, without a
2982 // zero dimensional input, but with a "null space" input for the types
2983 // to work out later on, as far as I (Siddharth Bhat) understand.
2984 // I was unable to find a reference to this in the ISL manual.
2985 // References: Tobias Grosser.
2986
2987 Bound = isl_pw_aff_pullback_multi_aff(Bound, Aff);
2988 Bounds.push_back(Bound);
2989 }
2990
2991 /// To construct a `isl_multi_pw_aff`, we need all the indivisual `pw_aff`
2992 /// to have the same parameter dimensions. So, we need to align them to an
2993 /// appropriate space.
2994 /// Scop::Context is _not_ an appropriate space, because when we have
2995 /// `-polly-ignore-parameter-bounds` enabled, the Scop::Context does not
2996 /// contain all parameter dimensions.
2997 /// So, use the helper `alignPwAffs` to align all the `isl_pw_aff` together.
2998 isl_space *SeedAlignSpace = S->getParamSpace().release();
2999 SeedAlignSpace = isl_space_add_dims(SeedAlignSpace, isl_dim_set, 1);
3000
3001 isl_space *AlignSpace = nullptr;
3002 std::vector<isl_pw_aff *> AlignedBounds;
3003 std::tie(AlignSpace, AlignedBounds) =
3004 alignPwAffs(std::move(Bounds), SeedAlignSpace);
3005
3006 assert(AlignSpace && "alignPwAffs did not initialise AlignSpace");
3007
3008 isl_pw_aff_list *BoundsList =
3009 createPwAffList(S->getIslCtx().get(), std::move(AlignedBounds));
3010
3011 isl_space *BoundsSpace = isl_set_get_space(PPCGArray.extent);
3012 BoundsSpace = isl_space_align_params(BoundsSpace, AlignSpace);
3013
3014 assert(BoundsSpace && "Unable to access space of array.");
3015 assert(BoundsList && "Unable to access list of bounds.");
3016
3017 PPCGArray.bound =
3018 isl_multi_pw_aff_from_pw_aff_list(BoundsSpace, BoundsList);
3019 assert(PPCGArray.bound && "PPCGArray.bound was not constructed correctly.");
3020 }
3021
3022 /// Create the arrays for @p PPCGProg.
3023 ///
3024 /// @param PPCGProg The program to compute the arrays for.
createArrays(gpu_prog * PPCGProg,const SmallVector<ScopArrayInfo *,4> & ValidSAIs)3025 void createArrays(gpu_prog *PPCGProg,
3026 const SmallVector<ScopArrayInfo *, 4> &ValidSAIs) {
3027 int i = 0;
3028 for (auto &Array : ValidSAIs) {
3029 std::string TypeName;
3030 raw_string_ostream OS(TypeName);
3031
3032 OS << *Array->getElementType();
3033 TypeName = OS.str();
3034
3035 gpu_array_info &PPCGArray = PPCGProg->array[i];
3036
3037 PPCGArray.space = Array->getSpace().release();
3038 PPCGArray.type = strdup(TypeName.c_str());
3039 PPCGArray.size = DL->getTypeAllocSize(Array->getElementType());
3040 PPCGArray.name = strdup(Array->getName().c_str());
3041 PPCGArray.extent = nullptr;
3042 PPCGArray.n_index = Array->getNumberOfDimensions();
3043 PPCGArray.extent = getExtent(Array).release();
3044 PPCGArray.n_ref = 0;
3045 PPCGArray.refs = nullptr;
3046 PPCGArray.accessed = true;
3047 PPCGArray.read_only_scalar =
3048 Array->isReadOnly() && Array->getNumberOfDimensions() == 0;
3049 PPCGArray.has_compound_element = false;
3050 PPCGArray.local = false;
3051 PPCGArray.declare_local = false;
3052 PPCGArray.global = false;
3053 PPCGArray.linearize = false;
3054 PPCGArray.dep_order = nullptr;
3055 PPCGArray.user = Array;
3056
3057 PPCGArray.bound = nullptr;
3058 setArrayBounds(PPCGArray, Array);
3059 i++;
3060
3061 collect_references(PPCGProg, &PPCGArray);
3062 PPCGArray.only_fixed_element = only_fixed_element_accessed(&PPCGArray);
3063 }
3064 }
3065
3066 /// Create an identity map between the arrays in the scop.
3067 ///
3068 /// @returns An identity map between the arrays in the scop.
getArrayIdentity()3069 isl_union_map *getArrayIdentity() {
3070 isl_union_map *Maps = isl_union_map_empty(S->getParamSpace().release());
3071
3072 for (auto &Array : S->arrays()) {
3073 isl_space *Space = Array->getSpace().release();
3074 Space = isl_space_map_from_set(Space);
3075 isl_map *Identity = isl_map_identity(Space);
3076 Maps = isl_union_map_add_map(Maps, Identity);
3077 }
3078
3079 return Maps;
3080 }
3081
3082 /// Create a default-initialized PPCG GPU program.
3083 ///
3084 /// @returns A new gpu program description.
createPPCGProg(ppcg_scop * PPCGScop)3085 gpu_prog *createPPCGProg(ppcg_scop *PPCGScop) {
3086
3087 if (!PPCGScop)
3088 return nullptr;
3089
3090 auto PPCGProg = isl_calloc_type(S->getIslCtx().get(), struct gpu_prog);
3091
3092 PPCGProg->ctx = S->getIslCtx().get();
3093 PPCGProg->scop = PPCGScop;
3094 PPCGProg->context = isl_set_copy(PPCGScop->context);
3095 PPCGProg->read = isl_union_map_copy(PPCGScop->reads);
3096 PPCGProg->may_write = isl_union_map_copy(PPCGScop->may_writes);
3097 PPCGProg->must_write = isl_union_map_copy(PPCGScop->must_writes);
3098 PPCGProg->tagged_must_kill =
3099 isl_union_map_copy(PPCGScop->tagged_must_kills);
3100 PPCGProg->to_inner = getArrayIdentity();
3101 PPCGProg->to_outer = getArrayIdentity();
3102 // TODO: verify that this assignment is correct.
3103 PPCGProg->any_to_outer = nullptr;
3104 PPCGProg->n_stmts = std::distance(S->begin(), S->end());
3105 PPCGProg->stmts = getStatements();
3106
3107 // Only consider arrays that have a non-empty extent.
3108 // Otherwise, this will cause us to consider the following kinds of
3109 // empty arrays:
3110 // 1. Invariant loads that are represented by SAI objects.
3111 // 2. Arrays with statically known zero size.
3112 auto ValidSAIsRange =
3113 make_filter_range(S->arrays(), [this](ScopArrayInfo *SAI) -> bool {
3114 return !getExtent(SAI).is_empty();
3115 });
3116 SmallVector<ScopArrayInfo *, 4> ValidSAIs(ValidSAIsRange.begin(),
3117 ValidSAIsRange.end());
3118
3119 PPCGProg->n_array =
3120 ValidSAIs.size(); // std::distance(S->array_begin(), S->array_end());
3121 PPCGProg->array = isl_calloc_array(
3122 S->getIslCtx().get(), struct gpu_array_info, PPCGProg->n_array);
3123
3124 createArrays(PPCGProg, ValidSAIs);
3125
3126 PPCGProg->array_order = nullptr;
3127 collect_order_dependences(PPCGProg);
3128
3129 PPCGProg->may_persist = compute_may_persist(PPCGProg);
3130 return PPCGProg;
3131 }
3132
3133 struct PrintGPUUserData {
3134 struct cuda_info *CudaInfo;
3135 struct gpu_prog *PPCGProg;
3136 std::vector<ppcg_kernel *> Kernels;
3137 };
3138
3139 /// Print a user statement node in the host code.
3140 ///
3141 /// We use ppcg's printing facilities to print the actual statement and
3142 /// additionally build up a list of all kernels that are encountered in the
3143 /// host ast.
3144 ///
3145 /// @param P The printer to print to
3146 /// @param Options The printing options to use
3147 /// @param Node The node to print
3148 /// @param User A user pointer to carry additional data. This pointer is
3149 /// expected to be of type PrintGPUUserData.
3150 ///
3151 /// @returns A printer to which the output has been printed.
3152 static __isl_give isl_printer *
printHostUser(__isl_take isl_printer * P,__isl_take isl_ast_print_options * Options,__isl_take isl_ast_node * Node,void * User)3153 printHostUser(__isl_take isl_printer *P,
3154 __isl_take isl_ast_print_options *Options,
3155 __isl_take isl_ast_node *Node, void *User) {
3156 auto Data = (struct PrintGPUUserData *)User;
3157 auto Id = isl_ast_node_get_annotation(Node);
3158
3159 if (Id) {
3160 bool IsUser = !strcmp(isl_id_get_name(Id), "user");
3161
3162 // If this is a user statement, format it ourselves as ppcg would
3163 // otherwise try to call pet functionality that is not available in
3164 // Polly.
3165 if (IsUser) {
3166 P = isl_printer_start_line(P);
3167 P = isl_printer_print_ast_node(P, Node);
3168 P = isl_printer_end_line(P);
3169 isl_id_free(Id);
3170 isl_ast_print_options_free(Options);
3171 return P;
3172 }
3173
3174 auto Kernel = (struct ppcg_kernel *)isl_id_get_user(Id);
3175 isl_id_free(Id);
3176 Data->Kernels.push_back(Kernel);
3177 }
3178
3179 return print_host_user(P, Options, Node, User);
3180 }
3181
3182 /// Print C code corresponding to the control flow in @p Kernel.
3183 ///
3184 /// @param Kernel The kernel to print
printKernel(ppcg_kernel * Kernel)3185 void printKernel(ppcg_kernel *Kernel) {
3186 auto *P = isl_printer_to_str(S->getIslCtx().get());
3187 P = isl_printer_set_output_format(P, ISL_FORMAT_C);
3188 auto *Options = isl_ast_print_options_alloc(S->getIslCtx().get());
3189 P = isl_ast_node_print(Kernel->tree, P, Options);
3190 char *String = isl_printer_get_str(P);
3191 outs() << String << "\n";
3192 free(String);
3193 isl_printer_free(P);
3194 }
3195
3196 /// Print C code corresponding to the GPU code described by @p Tree.
3197 ///
3198 /// @param Tree An AST describing GPU code
3199 /// @param PPCGProg The PPCG program from which @Tree has been constructed.
printGPUTree(isl_ast_node * Tree,gpu_prog * PPCGProg)3200 void printGPUTree(isl_ast_node *Tree, gpu_prog *PPCGProg) {
3201 auto *P = isl_printer_to_str(S->getIslCtx().get());
3202 P = isl_printer_set_output_format(P, ISL_FORMAT_C);
3203
3204 PrintGPUUserData Data;
3205 Data.PPCGProg = PPCGProg;
3206
3207 auto *Options = isl_ast_print_options_alloc(S->getIslCtx().get());
3208 Options =
3209 isl_ast_print_options_set_print_user(Options, printHostUser, &Data);
3210 P = isl_ast_node_print(Tree, P, Options);
3211 char *String = isl_printer_get_str(P);
3212 outs() << "# host\n";
3213 outs() << String << "\n";
3214 free(String);
3215 isl_printer_free(P);
3216
3217 for (auto Kernel : Data.Kernels) {
3218 outs() << "# kernel" << Kernel->id << "\n";
3219 printKernel(Kernel);
3220 }
3221 }
3222
3223 // Generate a GPU program using PPCG.
3224 //
3225 // GPU mapping consists of multiple steps:
3226 //
3227 // 1) Compute new schedule for the program.
3228 // 2) Map schedule to GPU (TODO)
3229 // 3) Generate code for new schedule (TODO)
3230 //
3231 // We do not use here the Polly ScheduleOptimizer, as the schedule optimizer
3232 // is mostly CPU specific. Instead, we use PPCG's GPU code generation
3233 // strategy directly from this pass.
generateGPU(ppcg_scop * PPCGScop,gpu_prog * PPCGProg)3234 gpu_gen *generateGPU(ppcg_scop *PPCGScop, gpu_prog *PPCGProg) {
3235
3236 auto PPCGGen = isl_calloc_type(S->getIslCtx().get(), struct gpu_gen);
3237
3238 PPCGGen->ctx = S->getIslCtx().get();
3239 PPCGGen->options = PPCGScop->options;
3240 PPCGGen->print = nullptr;
3241 PPCGGen->print_user = nullptr;
3242 PPCGGen->build_ast_expr = &pollyBuildAstExprForStmt;
3243 PPCGGen->prog = PPCGProg;
3244 PPCGGen->tree = nullptr;
3245 PPCGGen->types.n = 0;
3246 PPCGGen->types.name = nullptr;
3247 PPCGGen->sizes = nullptr;
3248 PPCGGen->used_sizes = nullptr;
3249 PPCGGen->kernel_id = 0;
3250
3251 // Set scheduling strategy to same strategy PPCG is using.
3252 isl_options_set_schedule_outer_coincidence(PPCGGen->ctx, true);
3253 isl_options_set_schedule_maximize_band_depth(PPCGGen->ctx, true);
3254 isl_options_set_schedule_whole_component(PPCGGen->ctx, false);
3255
3256 isl_schedule *Schedule = get_schedule(PPCGGen);
3257
3258 int has_permutable = has_any_permutable_node(Schedule);
3259
3260 Schedule =
3261 isl_schedule_align_params(Schedule, S->getFullParamSpace().release());
3262
3263 if (!has_permutable || has_permutable < 0) {
3264 Schedule = isl_schedule_free(Schedule);
3265 LLVM_DEBUG(dbgs() << getUniqueScopName(S)
3266 << " does not have permutable bands. Bailing out\n";);
3267 } else {
3268 const bool CreateTransferToFromDevice = !PollyManagedMemory;
3269 Schedule = map_to_device(PPCGGen, Schedule, CreateTransferToFromDevice);
3270 PPCGGen->tree = generate_code(PPCGGen, isl_schedule_copy(Schedule));
3271 }
3272
3273 if (DumpSchedule) {
3274 isl_printer *P = isl_printer_to_str(S->getIslCtx().get());
3275 P = isl_printer_set_yaml_style(P, ISL_YAML_STYLE_BLOCK);
3276 P = isl_printer_print_str(P, "Schedule\n");
3277 P = isl_printer_print_str(P, "========\n");
3278 if (Schedule)
3279 P = isl_printer_print_schedule(P, Schedule);
3280 else
3281 P = isl_printer_print_str(P, "No schedule found\n");
3282
3283 outs() << isl_printer_get_str(P) << "\n";
3284 isl_printer_free(P);
3285 }
3286
3287 if (DumpCode) {
3288 outs() << "Code\n";
3289 outs() << "====\n";
3290 if (PPCGGen->tree)
3291 printGPUTree(PPCGGen->tree, PPCGProg);
3292 else
3293 outs() << "No code generated\n";
3294 }
3295
3296 isl_schedule_free(Schedule);
3297
3298 return PPCGGen;
3299 }
3300
3301 /// Free gpu_gen structure.
3302 ///
3303 /// @param PPCGGen The ppcg_gen object to free.
freePPCGGen(gpu_gen * PPCGGen)3304 void freePPCGGen(gpu_gen *PPCGGen) {
3305 isl_ast_node_free(PPCGGen->tree);
3306 isl_union_map_free(PPCGGen->sizes);
3307 isl_union_map_free(PPCGGen->used_sizes);
3308 free(PPCGGen);
3309 }
3310
3311 /// Free the options in the ppcg scop structure.
3312 ///
3313 /// ppcg is not freeing these options for us. To avoid leaks we do this
3314 /// ourselves.
3315 ///
3316 /// @param PPCGScop The scop referencing the options to free.
freeOptions(ppcg_scop * PPCGScop)3317 void freeOptions(ppcg_scop *PPCGScop) {
3318 free(PPCGScop->options->debug);
3319 PPCGScop->options->debug = nullptr;
3320 free(PPCGScop->options);
3321 PPCGScop->options = nullptr;
3322 }
3323
3324 /// Approximate the number of points in the set.
3325 ///
3326 /// This function returns an ast expression that overapproximates the number
3327 /// of points in an isl set through the rectangular hull surrounding this set.
3328 ///
3329 /// @param Set The set to count.
3330 /// @param Build The isl ast build object to use for creating the ast
3331 /// expression.
3332 ///
3333 /// @returns An approximation of the number of points in the set.
approxPointsInSet(__isl_take isl_set * Set,__isl_keep isl_ast_build * Build)3334 __isl_give isl_ast_expr *approxPointsInSet(__isl_take isl_set *Set,
3335 __isl_keep isl_ast_build *Build) {
3336
3337 isl_val *One = isl_val_int_from_si(isl_set_get_ctx(Set), 1);
3338 auto *Expr = isl_ast_expr_from_val(isl_val_copy(One));
3339
3340 isl_space *Space = isl_set_get_space(Set);
3341 Space = isl_space_params(Space);
3342 auto *Univ = isl_set_universe(Space);
3343 isl_pw_aff *OneAff = isl_pw_aff_val_on_domain(Univ, One);
3344
3345 for (long i = 0, n = isl_set_dim(Set, isl_dim_set); i < n; i++) {
3346 isl_pw_aff *Max = isl_set_dim_max(isl_set_copy(Set), i);
3347 isl_pw_aff *Min = isl_set_dim_min(isl_set_copy(Set), i);
3348 isl_pw_aff *DimSize = isl_pw_aff_sub(Max, Min);
3349 DimSize = isl_pw_aff_add(DimSize, isl_pw_aff_copy(OneAff));
3350 auto DimSizeExpr = isl_ast_build_expr_from_pw_aff(Build, DimSize);
3351 Expr = isl_ast_expr_mul(Expr, DimSizeExpr);
3352 }
3353
3354 isl_set_free(Set);
3355 isl_pw_aff_free(OneAff);
3356
3357 return Expr;
3358 }
3359
3360 /// Approximate a number of dynamic instructions executed by a given
3361 /// statement.
3362 ///
3363 /// @param Stmt The statement for which to compute the number of dynamic
3364 /// instructions.
3365 /// @param Build The isl ast build object to use for creating the ast
3366 /// expression.
3367 /// @returns An approximation of the number of dynamic instructions executed
3368 /// by @p Stmt.
approxDynamicInst(ScopStmt & Stmt,__isl_keep isl_ast_build * Build)3369 __isl_give isl_ast_expr *approxDynamicInst(ScopStmt &Stmt,
3370 __isl_keep isl_ast_build *Build) {
3371 auto Iterations = approxPointsInSet(Stmt.getDomain().release(), Build);
3372
3373 long InstCount = 0;
3374
3375 if (Stmt.isBlockStmt()) {
3376 auto *BB = Stmt.getBasicBlock();
3377 InstCount = std::distance(BB->begin(), BB->end());
3378 } else {
3379 auto *R = Stmt.getRegion();
3380
3381 for (auto *BB : R->blocks()) {
3382 InstCount += std::distance(BB->begin(), BB->end());
3383 }
3384 }
3385
3386 isl_val *InstVal = isl_val_int_from_si(S->getIslCtx().get(), InstCount);
3387 auto *InstExpr = isl_ast_expr_from_val(InstVal);
3388 return isl_ast_expr_mul(InstExpr, Iterations);
3389 }
3390
3391 /// Approximate dynamic instructions executed in scop.
3392 ///
3393 /// @param S The scop for which to approximate dynamic instructions.
3394 /// @param Build The isl ast build object to use for creating the ast
3395 /// expression.
3396 /// @returns An approximation of the number of dynamic instructions executed
3397 /// in @p S.
3398 __isl_give isl_ast_expr *
getNumberOfIterations(Scop & S,__isl_keep isl_ast_build * Build)3399 getNumberOfIterations(Scop &S, __isl_keep isl_ast_build *Build) {
3400 isl_ast_expr *Instructions;
3401
3402 isl_val *Zero = isl_val_int_from_si(S.getIslCtx().get(), 0);
3403 Instructions = isl_ast_expr_from_val(Zero);
3404
3405 for (ScopStmt &Stmt : S) {
3406 isl_ast_expr *StmtInstructions = approxDynamicInst(Stmt, Build);
3407 Instructions = isl_ast_expr_add(Instructions, StmtInstructions);
3408 }
3409 return Instructions;
3410 }
3411
3412 /// Create a check that ensures sufficient compute in scop.
3413 ///
3414 /// @param S The scop for which to ensure sufficient compute.
3415 /// @param Build The isl ast build object to use for creating the ast
3416 /// expression.
3417 /// @returns An expression that evaluates to TRUE in case of sufficient
3418 /// compute and to FALSE, otherwise.
3419 __isl_give isl_ast_expr *
createSufficientComputeCheck(Scop & S,__isl_keep isl_ast_build * Build)3420 createSufficientComputeCheck(Scop &S, __isl_keep isl_ast_build *Build) {
3421 auto Iterations = getNumberOfIterations(S, Build);
3422 auto *MinComputeVal = isl_val_int_from_si(S.getIslCtx().get(), MinCompute);
3423 auto *MinComputeExpr = isl_ast_expr_from_val(MinComputeVal);
3424 return isl_ast_expr_ge(Iterations, MinComputeExpr);
3425 }
3426
3427 /// Check if the basic block contains a function we cannot codegen for GPU
3428 /// kernels.
3429 ///
3430 /// If this basic block does something with a `Function` other than calling
3431 /// a function that we support in a kernel, return true.
containsInvalidKernelFunctionInBlock(const BasicBlock * BB,bool AllowCUDALibDevice)3432 bool containsInvalidKernelFunctionInBlock(const BasicBlock *BB,
3433 bool AllowCUDALibDevice) {
3434 for (const Instruction &Inst : *BB) {
3435 const CallInst *Call = dyn_cast<CallInst>(&Inst);
3436 if (Call && isValidFunctionInKernel(Call->getCalledFunction(),
3437 AllowCUDALibDevice))
3438 continue;
3439
3440 for (Value *Op : Inst.operands())
3441 // Look for (<func-type>*) among operands of Inst
3442 if (auto PtrTy = dyn_cast<PointerType>(Op->getType())) {
3443 if (isa<FunctionType>(PtrTy->getElementType())) {
3444 LLVM_DEBUG(dbgs()
3445 << Inst << " has illegal use of function in kernel.\n");
3446 return true;
3447 }
3448 }
3449 }
3450 return false;
3451 }
3452
3453 /// Return whether the Scop S uses functions in a way that we do not support.
containsInvalidKernelFunction(const Scop & S,bool AllowCUDALibDevice)3454 bool containsInvalidKernelFunction(const Scop &S, bool AllowCUDALibDevice) {
3455 for (auto &Stmt : S) {
3456 if (Stmt.isBlockStmt()) {
3457 if (containsInvalidKernelFunctionInBlock(Stmt.getBasicBlock(),
3458 AllowCUDALibDevice))
3459 return true;
3460 } else {
3461 assert(Stmt.isRegionStmt() &&
3462 "Stmt was neither block nor region statement");
3463 for (const BasicBlock *BB : Stmt.getRegion()->blocks())
3464 if (containsInvalidKernelFunctionInBlock(BB, AllowCUDALibDevice))
3465 return true;
3466 }
3467 }
3468 return false;
3469 }
3470
3471 /// Generate code for a given GPU AST described by @p Root.
3472 ///
3473 /// @param Root An isl_ast_node pointing to the root of the GPU AST.
3474 /// @param Prog The GPU Program to generate code for.
generateCode(__isl_take isl_ast_node * Root,gpu_prog * Prog)3475 void generateCode(__isl_take isl_ast_node *Root, gpu_prog *Prog) {
3476 ScopAnnotator Annotator;
3477 Annotator.buildAliasScopes(*S);
3478
3479 Region *R = &S->getRegion();
3480
3481 simplifyRegion(R, DT, LI, RI);
3482
3483 BasicBlock *EnteringBB = R->getEnteringBlock();
3484
3485 PollyIRBuilder Builder(EnteringBB->getContext(), ConstantFolder(),
3486 IRInserter(Annotator));
3487 Builder.SetInsertPoint(EnteringBB->getTerminator());
3488
3489 // Only build the run-time condition and parameters _after_ having
3490 // introduced the conditional branch. This is important as the conditional
3491 // branch will guard the original scop from new induction variables that
3492 // the SCEVExpander may introduce while code generating the parameters and
3493 // which may introduce scalar dependences that prevent us from correctly
3494 // code generating this scop.
3495 BBPair StartExitBlocks;
3496 BranchInst *CondBr = nullptr;
3497 std::tie(StartExitBlocks, CondBr) =
3498 executeScopConditionally(*S, Builder.getTrue(), *DT, *RI, *LI);
3499 BasicBlock *StartBlock = std::get<0>(StartExitBlocks);
3500
3501 assert(CondBr && "CondBr not initialized by executeScopConditionally");
3502
3503 GPUNodeBuilder NodeBuilder(Builder, Annotator, *DL, *LI, *SE, *DT, *S,
3504 StartBlock, Prog, Runtime, Architecture);
3505
3506 // TODO: Handle LICM
3507 auto SplitBlock = StartBlock->getSinglePredecessor();
3508 Builder.SetInsertPoint(SplitBlock->getTerminator());
3509
3510 isl_ast_build *Build = isl_ast_build_alloc(S->getIslCtx().get());
3511 isl::ast_expr Condition =
3512 IslAst::buildRunCondition(*S, isl::manage_copy(Build));
3513 isl_ast_expr *SufficientCompute = createSufficientComputeCheck(*S, Build);
3514 Condition =
3515 isl::manage(isl_ast_expr_and(Condition.release(), SufficientCompute));
3516 isl_ast_build_free(Build);
3517
3518 // preload invariant loads. Note: This should happen before the RTC
3519 // because the RTC may depend on values that are invariant load hoisted.
3520 if (!NodeBuilder.preloadInvariantLoads()) {
3521 // Patch the introduced branch condition to ensure that we always execute
3522 // the original SCoP.
3523 auto *FalseI1 = Builder.getFalse();
3524 auto *SplitBBTerm = Builder.GetInsertBlock()->getTerminator();
3525 SplitBBTerm->setOperand(0, FalseI1);
3526
3527 LLVM_DEBUG(dbgs() << "preloading invariant loads failed in function: " +
3528 S->getFunction().getName() +
3529 " | Scop Region: " + S->getNameStr());
3530 // adjust the dominator tree accordingly.
3531 auto *ExitingBlock = StartBlock->getUniqueSuccessor();
3532 assert(ExitingBlock);
3533 auto *MergeBlock = ExitingBlock->getUniqueSuccessor();
3534 assert(MergeBlock);
3535 polly::markBlockUnreachable(*StartBlock, Builder);
3536 polly::markBlockUnreachable(*ExitingBlock, Builder);
3537 auto *ExitingBB = S->getExitingBlock();
3538 assert(ExitingBB);
3539
3540 DT->changeImmediateDominator(MergeBlock, ExitingBB);
3541 DT->eraseNode(ExitingBlock);
3542 isl_ast_node_free(Root);
3543 } else {
3544
3545 if (polly::PerfMonitoring) {
3546 PerfMonitor P(*S, EnteringBB->getParent()->getParent());
3547 P.initialize();
3548 P.insertRegionStart(SplitBlock->getTerminator());
3549
3550 // TODO: actually think if this is the correct exiting block to place
3551 // the `end` performance marker. Invariant load hoisting changes
3552 // the CFG in a way that I do not precisely understand, so I
3553 // (Siddharth<siddu.druid@gmail.com>) should come back to this and
3554 // think about which exiting block to use.
3555 auto *ExitingBlock = StartBlock->getUniqueSuccessor();
3556 assert(ExitingBlock);
3557 BasicBlock *MergeBlock = ExitingBlock->getUniqueSuccessor();
3558 P.insertRegionEnd(MergeBlock->getTerminator());
3559 }
3560
3561 NodeBuilder.addParameters(S->getContext().release());
3562 Value *RTC = NodeBuilder.createRTC(Condition.release());
3563 Builder.GetInsertBlock()->getTerminator()->setOperand(0, RTC);
3564
3565 Builder.SetInsertPoint(&*StartBlock->begin());
3566
3567 NodeBuilder.create(Root);
3568 }
3569
3570 /// In case a sequential kernel has more surrounding loops as any parallel
3571 /// kernel, the SCoP is probably mostly sequential. Hence, there is no
3572 /// point in running it on a GPU.
3573 if (NodeBuilder.DeepestSequential > NodeBuilder.DeepestParallel)
3574 CondBr->setOperand(0, Builder.getFalse());
3575
3576 if (!NodeBuilder.BuildSuccessful)
3577 CondBr->setOperand(0, Builder.getFalse());
3578 }
3579
runOnScop(Scop & CurrentScop)3580 bool runOnScop(Scop &CurrentScop) override {
3581 S = &CurrentScop;
3582 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
3583 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
3584 SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
3585 DL = &S->getRegion().getEntry()->getModule()->getDataLayout();
3586 RI = &getAnalysis<RegionInfoPass>().getRegionInfo();
3587
3588 LLVM_DEBUG(dbgs() << "PPCGCodeGen running on : " << getUniqueScopName(S)
3589 << " | loop depth: " << S->getMaxLoopDepth() << "\n");
3590
3591 // We currently do not support functions other than intrinsics inside
3592 // kernels, as code generation will need to offload function calls to the
3593 // kernel. This may lead to a kernel trying to call a function on the host.
3594 // This also allows us to prevent codegen from trying to take the
3595 // address of an intrinsic function to send to the kernel.
3596 if (containsInvalidKernelFunction(CurrentScop,
3597 Architecture == GPUArch::NVPTX64)) {
3598 LLVM_DEBUG(
3599 dbgs() << getUniqueScopName(S)
3600 << " contains function which cannot be materialised in a GPU "
3601 "kernel. Bailing out.\n";);
3602 return false;
3603 }
3604
3605 auto PPCGScop = createPPCGScop();
3606 auto PPCGProg = createPPCGProg(PPCGScop);
3607 auto PPCGGen = generateGPU(PPCGScop, PPCGProg);
3608
3609 if (PPCGGen->tree) {
3610 generateCode(isl_ast_node_copy(PPCGGen->tree), PPCGProg);
3611 CurrentScop.markAsToBeSkipped();
3612 } else {
3613 LLVM_DEBUG(dbgs() << getUniqueScopName(S)
3614 << " has empty PPCGGen->tree. Bailing out.\n");
3615 }
3616
3617 freeOptions(PPCGScop);
3618 freePPCGGen(PPCGGen);
3619 gpu_prog_free(PPCGProg);
3620 ppcg_scop_free(PPCGScop);
3621
3622 return true;
3623 }
3624
printScop(raw_ostream &,Scop &) const3625 void printScop(raw_ostream &, Scop &) const override {}
3626
getAnalysisUsage(AnalysisUsage & AU) const3627 void getAnalysisUsage(AnalysisUsage &AU) const override {
3628 ScopPass::getAnalysisUsage(AU);
3629
3630 AU.addRequired<DominatorTreeWrapperPass>();
3631 AU.addRequired<RegionInfoPass>();
3632 AU.addRequired<ScalarEvolutionWrapperPass>();
3633 AU.addRequired<ScopDetectionWrapperPass>();
3634 AU.addRequired<ScopInfoRegionPass>();
3635 AU.addRequired<LoopInfoWrapperPass>();
3636
3637 // FIXME: We do not yet add regions for the newly generated code to the
3638 // region tree.
3639 }
3640 };
3641 } // namespace
3642
3643 char PPCGCodeGeneration::ID = 1;
3644
createPPCGCodeGenerationPass(GPUArch Arch,GPURuntime Runtime)3645 Pass *polly::createPPCGCodeGenerationPass(GPUArch Arch, GPURuntime Runtime) {
3646 PPCGCodeGeneration *generator = new PPCGCodeGeneration();
3647 generator->Runtime = Runtime;
3648 generator->Architecture = Arch;
3649 return generator;
3650 }
3651
3652 INITIALIZE_PASS_BEGIN(PPCGCodeGeneration, "polly-codegen-ppcg",
3653 "Polly - Apply PPCG translation to SCOP", false, false)
3654 INITIALIZE_PASS_DEPENDENCY(DependenceInfo);
3655 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass);
3656 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass);
3657 INITIALIZE_PASS_DEPENDENCY(RegionInfoPass);
3658 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass);
3659 INITIALIZE_PASS_DEPENDENCY(ScopDetectionWrapperPass);
3660 INITIALIZE_PASS_END(PPCGCodeGeneration, "polly-codegen-ppcg",
3661 "Polly - Apply PPCG translation to SCOP", false, false)
3662