1 //===- cuda-runtime-wrappers.cpp - MLIR CUDA runner wrapper library -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Implements C wrappers around the CUDA library for easy linking in ORC jit.
10 // Also adds some debugging helpers that are helpful when writing MLIR code to
11 // run on GPUs.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include <cassert>
16 #include <numeric>
17 
18 #include "mlir/ExecutionEngine/CRunnerUtils.h"
19 #include "llvm/ADT/ArrayRef.h"
20 #include "llvm/Support/raw_ostream.h"
21 
22 #include "cuda.h"
23 
24 #define CUDA_REPORT_IF_ERROR(expr)                                             \
25   [](CUresult result) {                                                        \
26     if (!result)                                                               \
27       return;                                                                  \
28     const char *name = nullptr;                                                \
29     cuGetErrorName(result, &name);                                             \
30     if (!name)                                                                 \
31       name = "<unknown>";                                                      \
32     llvm::errs() << "'" << #expr << "' failed with '" << name << "'\n";        \
33   }(expr)
34 
35 // Static reference to CUDA primary context for device ordinal 0.
__anon3e11b6920102null36 static CUcontext Context = [] {
37   CUDA_REPORT_IF_ERROR(cuInit(/*flags=*/0));
38   CUdevice device;
39   CUDA_REPORT_IF_ERROR(cuDeviceGet(&device, /*ordinal=*/0));
40   CUcontext context;
41   CUDA_REPORT_IF_ERROR(cuDevicePrimaryCtxRetain(&context, device));
42   return context;
43 }();
44 
45 // Sets the `Context` for the duration of the instance and restores the previous
46 // context on destruction.
47 class ScopedContext {
48 public:
ScopedContext()49   ScopedContext() {
50     CUDA_REPORT_IF_ERROR(cuCtxGetCurrent(&previous));
51     CUDA_REPORT_IF_ERROR(cuCtxSetCurrent(Context));
52   }
53 
~ScopedContext()54   ~ScopedContext() { CUDA_REPORT_IF_ERROR(cuCtxSetCurrent(previous)); }
55 
56 private:
57   CUcontext previous;
58 };
59 
mgpuModuleLoad(void * data)60 extern "C" CUmodule mgpuModuleLoad(void *data) {
61   ScopedContext scopedContext;
62   CUmodule module = nullptr;
63   CUDA_REPORT_IF_ERROR(cuModuleLoadData(&module, data));
64   return module;
65 }
66 
mgpuModuleUnload(CUmodule module)67 extern "C" void mgpuModuleUnload(CUmodule module) {
68   CUDA_REPORT_IF_ERROR(cuModuleUnload(module));
69 }
70 
mgpuModuleGetFunction(CUmodule module,const char * name)71 extern "C" CUfunction mgpuModuleGetFunction(CUmodule module, const char *name) {
72   CUfunction function = nullptr;
73   CUDA_REPORT_IF_ERROR(cuModuleGetFunction(&function, module, name));
74   return function;
75 }
76 
77 // The wrapper uses intptr_t instead of CUDA's unsigned int to match
78 // the type of MLIR's index type. This avoids the need for casts in the
79 // generated MLIR code.
mgpuLaunchKernel(CUfunction function,intptr_t gridX,intptr_t gridY,intptr_t gridZ,intptr_t blockX,intptr_t blockY,intptr_t blockZ,int32_t smem,CUstream stream,void ** params,void ** extra)80 extern "C" void mgpuLaunchKernel(CUfunction function, intptr_t gridX,
81                                  intptr_t gridY, intptr_t gridZ,
82                                  intptr_t blockX, intptr_t blockY,
83                                  intptr_t blockZ, int32_t smem, CUstream stream,
84                                  void **params, void **extra) {
85   ScopedContext scopedContext;
86   CUDA_REPORT_IF_ERROR(cuLaunchKernel(function, gridX, gridY, gridZ, blockX,
87                                       blockY, blockZ, smem, stream, params,
88                                       extra));
89 }
90 
mgpuStreamCreate()91 extern "C" CUstream mgpuStreamCreate() {
92   ScopedContext scopedContext;
93   CUstream stream = nullptr;
94   CUDA_REPORT_IF_ERROR(cuStreamCreate(&stream, CU_STREAM_NON_BLOCKING));
95   return stream;
96 }
97 
mgpuStreamDestroy(CUstream stream)98 extern "C" void mgpuStreamDestroy(CUstream stream) {
99   CUDA_REPORT_IF_ERROR(cuStreamDestroy(stream));
100 }
101 
mgpuStreamSynchronize(CUstream stream)102 extern "C" void mgpuStreamSynchronize(CUstream stream) {
103   CUDA_REPORT_IF_ERROR(cuStreamSynchronize(stream));
104 }
105 
mgpuStreamWaitEvent(CUstream stream,CUevent event)106 extern "C" void mgpuStreamWaitEvent(CUstream stream, CUevent event) {
107   CUDA_REPORT_IF_ERROR(cuStreamWaitEvent(stream, event, /*flags=*/0));
108 }
109 
mgpuEventCreate()110 extern "C" CUevent mgpuEventCreate() {
111   ScopedContext scopedContext;
112   CUevent event = nullptr;
113   CUDA_REPORT_IF_ERROR(cuEventCreate(&event, CU_EVENT_DISABLE_TIMING));
114   return event;
115 }
116 
mgpuEventDestroy(CUevent event)117 extern "C" void mgpuEventDestroy(CUevent event) {
118   CUDA_REPORT_IF_ERROR(cuEventDestroy(event));
119 }
120 
mgpuEventSynchronize(CUevent event)121 extern "C" void mgpuEventSynchronize(CUevent event) {
122   CUDA_REPORT_IF_ERROR(cuEventSynchronize(event));
123 }
124 
mgpuEventRecord(CUevent event,CUstream stream)125 extern "C" void mgpuEventRecord(CUevent event, CUstream stream) {
126   CUDA_REPORT_IF_ERROR(cuEventRecord(event, stream));
127 }
128 
mgpuMemAlloc(uint64_t sizeBytes,CUstream)129 extern "C" void *mgpuMemAlloc(uint64_t sizeBytes, CUstream /*stream*/) {
130   ScopedContext scopedContext;
131   CUdeviceptr ptr;
132   CUDA_REPORT_IF_ERROR(cuMemAlloc(&ptr, sizeBytes));
133   return reinterpret_cast<void *>(ptr);
134 }
135 
mgpuMemFree(void * ptr,CUstream)136 extern "C" void mgpuMemFree(void *ptr, CUstream /*stream*/) {
137   CUDA_REPORT_IF_ERROR(cuMemFree(reinterpret_cast<CUdeviceptr>(ptr)));
138 }
139 
mgpuMemcpy(void * dst,void * src,uint64_t sizeBytes,CUstream stream)140 extern "C" void mgpuMemcpy(void *dst, void *src, uint64_t sizeBytes,
141                            CUstream stream) {
142   CUDA_REPORT_IF_ERROR(cuMemcpyAsync(reinterpret_cast<CUdeviceptr>(dst),
143                                      reinterpret_cast<CUdeviceptr>(src),
144                                      sizeBytes, stream));
145 }
146 
147 /// Helper functions for writing mlir example code
148 
149 // Allows to register byte array with the CUDA runtime. Helpful until we have
150 // transfer functions implemented.
mgpuMemHostRegister(void * ptr,uint64_t sizeBytes)151 extern "C" void mgpuMemHostRegister(void *ptr, uint64_t sizeBytes) {
152   CUDA_REPORT_IF_ERROR(cuMemHostRegister(ptr, sizeBytes, /*flags=*/0));
153 }
154 
155 // Allows to register a MemRef with the CUDA runtime. Helpful until we have
156 // transfer functions implemented.
157 extern "C" void
mgpuMemHostRegisterMemRef(int64_t rank,StridedMemRefType<char,1> * descriptor,int64_t elementSizeBytes)158 mgpuMemHostRegisterMemRef(int64_t rank, StridedMemRefType<char, 1> *descriptor,
159                           int64_t elementSizeBytes) {
160 
161   llvm::SmallVector<int64_t, 4> denseStrides(rank);
162   llvm::ArrayRef<int64_t> sizes(descriptor->sizes, rank);
163   llvm::ArrayRef<int64_t> strides(sizes.end(), rank);
164 
165   std::partial_sum(sizes.rbegin(), sizes.rend(), denseStrides.rbegin(),
166                    std::multiplies<int64_t>());
167   auto sizeBytes = denseStrides.front() * elementSizeBytes;
168 
169   // Only densely packed tensors are currently supported.
170   std::rotate(denseStrides.begin(), denseStrides.begin() + 1,
171               denseStrides.end());
172   denseStrides.back() = 1;
173   assert(strides == llvm::makeArrayRef(denseStrides));
174 
175   auto ptr = descriptor->data + descriptor->offset * elementSizeBytes;
176   mgpuMemHostRegister(ptr, sizeBytes);
177 }
178