1 //===--- amdgpu/src/rtl.cpp --------------------------------------- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // RTL for hsa machine
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include <algorithm>
14 #include <assert.h>
15 #include <cstdio>
16 #include <cstdlib>
17 #include <cstring>
18 #include <functional>
Dwoo_Plugin_upper_compile(Dwoo_Compiler $compiler, $value)19 #include <libelf.h>
20 #include <list>
21 #include <memory>
22 #include <mutex>
23 #include <shared_mutex>
24 #include <unordered_map>
25 #include <vector>
26
27 // Header from ATMI interface
28 #include "atmi_interop_hsa.h"
29 #include "atmi_runtime.h"
30
31 #include "internal.h"
32
33 #include "Debug.h"
34 #include "get_elf_mach_gfx_name.h"
35 #include "machine.h"
36 #include "omptargetplugin.h"
37 #include "print_tracing.h"
38
39 #include "llvm/Frontend/OpenMP/OMPGridValues.h"
40
41 #ifndef TARGET_NAME
42 #define TARGET_NAME AMDHSA
43 #endif
44 #define DEBUG_PREFIX "Target " GETNAME(TARGET_NAME) " RTL"
45
46 // hostrpc interface, FIXME: consider moving to its own include these are
47 // statically linked into amdgpu/plugin if present from hostrpc_services.a,
48 // linked as --whole-archive to override the weak symbols that are used to
49 // implement a fallback for toolchains that do not yet have a hostrpc library.
50 extern "C" {
51 unsigned long hostrpc_assign_buffer(hsa_agent_t agent, hsa_queue_t *this_Q,
52 uint32_t device_id);
53 hsa_status_t hostrpc_init();
54 hsa_status_t hostrpc_terminate();
55
56 __attribute__((weak)) hsa_status_t hostrpc_init() { return HSA_STATUS_SUCCESS; }
57 __attribute__((weak)) hsa_status_t hostrpc_terminate() {
58 return HSA_STATUS_SUCCESS;
59 }
60 __attribute__((weak)) unsigned long
61 hostrpc_assign_buffer(hsa_agent_t, hsa_queue_t *, uint32_t device_id) {
62 DP("Warning: Attempting to assign hostrpc to device %u, but hostrpc library "
63 "missing\n",
64 device_id);
65 return 0;
66 }
67 }
68
69 // Heuristic parameters used for kernel launch
70 // Number of teams per CU to allow scheduling flexibility
71 static const unsigned DefaultTeamsPerCU = 4;
72
73 int print_kernel_trace;
74
75 #ifdef OMPTARGET_DEBUG
76 #define check(msg, status) \
77 if (status != HSA_STATUS_SUCCESS) { \
78 DP(#msg " failed\n"); \
79 } else { \
80 DP(#msg " succeeded\n"); \
81 }
82 #else
83 #define check(msg, status) \
84 {}
85 #endif
86
87 #include "elf_common.h"
88
89 namespace core {
90 hsa_status_t RegisterModuleFromMemory(
91 std::map<std::string, atl_kernel_info_t> &KernelInfo,
92 std::map<std::string, atl_symbol_info_t> &SymbolInfoTable, void *, size_t,
93 hsa_agent_t agent,
94 hsa_status_t (*on_deserialized_data)(void *data, size_t size,
95 void *cb_state),
96 void *cb_state, std::vector<hsa_executable_t> &HSAExecutables);
97 }
98
99 namespace hsa {
100 template <typename C> hsa_status_t iterate_agents(C cb) {
101 auto L = [](hsa_agent_t agent, void *data) -> hsa_status_t {
102 C *unwrapped = static_cast<C *>(data);
103 return (*unwrapped)(agent);
104 };
105 return hsa_iterate_agents(L, static_cast<void *>(&cb));
106 }
107
108 template <typename C>
109 hsa_status_t amd_agent_iterate_memory_pools(hsa_agent_t Agent, C cb) {
110 auto L = [](hsa_amd_memory_pool_t MemoryPool, void *data) -> hsa_status_t {
111 C *unwrapped = static_cast<C *>(data);
112 return (*unwrapped)(MemoryPool);
113 };
114
115 return hsa_amd_agent_iterate_memory_pools(Agent, L, static_cast<void *>(&cb));
116 }
117
118 } // namespace hsa
119
120 /// Keep entries table per device
121 struct FuncOrGblEntryTy {
122 __tgt_target_table Table;
123 std::vector<__tgt_offload_entry> Entries;
124 };
125
126 enum ExecutionModeType {
127 SPMD, // constructors, destructors,
128 // combined constructs (`teams distribute parallel for [simd]`)
129 GENERIC, // everything else
130 SPMD_GENERIC, // Generic kernel with SPMD execution
131 NONE
132 };
133
134 struct KernelArgPool {
135 private:
136 static pthread_mutex_t mutex;
137
138 public:
139 uint32_t kernarg_segment_size;
140 void *kernarg_region = nullptr;
141 std::queue<int> free_kernarg_segments;
142
143 uint32_t kernarg_size_including_implicit() {
144 return kernarg_segment_size + sizeof(atmi_implicit_args_t);
145 }
146
147 ~KernelArgPool() {
148 if (kernarg_region) {
149 auto r = hsa_amd_memory_pool_free(kernarg_region);
150 if (r != HSA_STATUS_SUCCESS) {
151 DP("hsa_amd_memory_pool_free failed: %s\n", get_error_string(r));
152 }
153 }
154 }
155
156 // Can't really copy or move a mutex
157 KernelArgPool() = default;
158 KernelArgPool(const KernelArgPool &) = delete;
159 KernelArgPool(KernelArgPool &&) = delete;
160
161 KernelArgPool(uint32_t kernarg_segment_size,
162 hsa_amd_memory_pool_t &memory_pool)
163 : kernarg_segment_size(kernarg_segment_size) {
164
165 // atmi uses one pool per kernel for all gpus, with a fixed upper size
166 // preserving that exact scheme here, including the queue<int>
167
168 hsa_status_t err = hsa_amd_memory_pool_allocate(
169 memory_pool, kernarg_size_including_implicit() * MAX_NUM_KERNELS, 0,
170 &kernarg_region);
171
172 if (err != HSA_STATUS_SUCCESS) {
173 DP("hsa_amd_memory_pool_allocate failed: %s\n", get_error_string(err));
174 kernarg_region = nullptr; // paranoid
175 return;
176 }
177
178 err = core::allow_access_to_all_gpu_agents(kernarg_region);
179 if (err != HSA_STATUS_SUCCESS) {
180 DP("hsa allow_access_to_all_gpu_agents failed: %s\n",
181 get_error_string(err));
182 auto r = hsa_amd_memory_pool_free(kernarg_region);
183 if (r != HSA_STATUS_SUCCESS) {
184 // if free failed, can't do anything more to resolve it
185 DP("hsa memory poll free failed: %s\n", get_error_string(err));
186 }
187 kernarg_region = nullptr;
188 return;
189 }
190
191 for (int i = 0; i < MAX_NUM_KERNELS; i++) {
192 free_kernarg_segments.push(i);
193 }
194 }
195
196 void *allocate(uint64_t arg_num) {
197 assert((arg_num * sizeof(void *)) == kernarg_segment_size);
198 lock l(&mutex);
199 void *res = nullptr;
200 if (!free_kernarg_segments.empty()) {
201
202 int free_idx = free_kernarg_segments.front();
203 res = static_cast<void *>(static_cast<char *>(kernarg_region) +
204 (free_idx * kernarg_size_including_implicit()));
205 assert(free_idx == pointer_to_index(res));
206 free_kernarg_segments.pop();
207 }
208 return res;
209 }
210
211 void deallocate(void *ptr) {
212 lock l(&mutex);
213 int idx = pointer_to_index(ptr);
214 free_kernarg_segments.push(idx);
215 }
216
217 private:
218 int pointer_to_index(void *ptr) {
219 ptrdiff_t bytes =
220 static_cast<char *>(ptr) - static_cast<char *>(kernarg_region);
221 assert(bytes >= 0);
222 assert(bytes % kernarg_size_including_implicit() == 0);
223 return bytes / kernarg_size_including_implicit();
224 }
225 struct lock {
226 lock(pthread_mutex_t *m) : m(m) { pthread_mutex_lock(m); }
227 ~lock() { pthread_mutex_unlock(m); }
228 pthread_mutex_t *m;
229 };
230 };
231 pthread_mutex_t KernelArgPool::mutex = PTHREAD_MUTEX_INITIALIZER;
232
233 std::unordered_map<std::string /*kernel*/, std::unique_ptr<KernelArgPool>>
234 KernelArgPoolMap;
235
236 /// Use a single entity to encode a kernel and a set of flags
237 struct KernelTy {
238 // execution mode of kernel
239 // 0 - SPMD mode (without master warp)
240 // 1 - Generic mode (with master warp)
241 // 2 - SPMD mode execution with Generic mode semantics.
242 int8_t ExecutionMode;
243 int16_t ConstWGSize;
244 int32_t device_id;
245 void *CallStackAddr = nullptr;
246 const char *Name;
247
248 KernelTy(int8_t _ExecutionMode, int16_t _ConstWGSize, int32_t _device_id,
249 void *_CallStackAddr, const char *_Name,
250 uint32_t _kernarg_segment_size,
251 hsa_amd_memory_pool_t &KernArgMemoryPool)
252 : ExecutionMode(_ExecutionMode), ConstWGSize(_ConstWGSize),
253 device_id(_device_id), CallStackAddr(_CallStackAddr), Name(_Name) {
254 DP("Construct kernelinfo: ExecMode %d\n", ExecutionMode);
255
256 std::string N(_Name);
257 if (KernelArgPoolMap.find(N) == KernelArgPoolMap.end()) {
258 KernelArgPoolMap.insert(
259 std::make_pair(N, std::unique_ptr<KernelArgPool>(new KernelArgPool(
260 _kernarg_segment_size, KernArgMemoryPool))));
261 }
262 }
263 };
264
265 /// List that contains all the kernels.
266 /// FIXME: we may need this to be per device and per library.
267 std::list<KernelTy> KernelsList;
268
269 template <typename Callback> static hsa_status_t FindAgents(Callback CB) {
270
271 hsa_status_t err =
272 hsa::iterate_agents([&](hsa_agent_t agent) -> hsa_status_t {
273 hsa_device_type_t device_type;
274 // get_info fails iff HSA runtime not yet initialized
275 hsa_status_t err =
276 hsa_agent_get_info(agent, HSA_AGENT_INFO_DEVICE, &device_type);
277 if (print_kernel_trace > 0 && err != HSA_STATUS_SUCCESS)
278 printf("rtl.cpp: err %d\n", err);
279 assert(err == HSA_STATUS_SUCCESS);
280
281 CB(device_type, agent);
282 return HSA_STATUS_SUCCESS;
283 });
284
285 // iterate_agents fails iff HSA runtime not yet initialized
286 if (print_kernel_trace > 0 && err != HSA_STATUS_SUCCESS) {
287 printf("rtl.cpp: err %d\n", err);
288 }
289
290 return err;
291 }
292
293 static void callbackQueue(hsa_status_t status, hsa_queue_t *source,
294 void *data) {
295 if (status != HSA_STATUS_SUCCESS) {
296 const char *status_string;
297 if (hsa_status_string(status, &status_string) != HSA_STATUS_SUCCESS) {
298 status_string = "unavailable";
299 }
300 fprintf(stderr, "[%s:%d] GPU error in queue %p %d (%s)\n", __FILE__,
301 __LINE__, source, status, status_string);
302 abort();
303 }
304 }
305
306 namespace core {
307 namespace {
308 void packet_store_release(uint32_t *packet, uint16_t header, uint16_t rest) {
309 __atomic_store_n(packet, header | (rest << 16), __ATOMIC_RELEASE);
310 }
311
312 uint16_t create_header() {
313 uint16_t header = HSA_PACKET_TYPE_KERNEL_DISPATCH << HSA_PACKET_HEADER_TYPE;
314 header |= HSA_FENCE_SCOPE_SYSTEM << HSA_PACKET_HEADER_ACQUIRE_FENCE_SCOPE;
315 header |= HSA_FENCE_SCOPE_SYSTEM << HSA_PACKET_HEADER_RELEASE_FENCE_SCOPE;
316 return header;
317 }
318
319 hsa_status_t addKernArgPool(hsa_amd_memory_pool_t MemoryPool, void *Data) {
320 std::vector<hsa_amd_memory_pool_t> *Result =
321 static_cast<std::vector<hsa_amd_memory_pool_t> *>(Data);
322 bool AllocAllowed = false;
323 hsa_status_t err = hsa_amd_memory_pool_get_info(
324 MemoryPool, HSA_AMD_MEMORY_POOL_INFO_RUNTIME_ALLOC_ALLOWED,
325 &AllocAllowed);
326 if (err != HSA_STATUS_SUCCESS) {
327 fprintf(stderr, "Alloc allowed in memory pool check failed: %s\n",
328 get_error_string(err));
329 return err;
330 }
331
332 if (!AllocAllowed) {
333 // nothing needs to be done here.
334 return HSA_STATUS_SUCCESS;
335 }
336
337 uint32_t GlobalFlags = 0;
338 err = hsa_amd_memory_pool_get_info(
339 MemoryPool, HSA_AMD_MEMORY_POOL_INFO_GLOBAL_FLAGS, &GlobalFlags);
340 if (err != HSA_STATUS_SUCCESS) {
341 fprintf(stderr, "Get memory pool info failed: %s\n", get_error_string(err));
342 return err;
343 }
344
345 size_t size = 0;
346 err = hsa_amd_memory_pool_get_info(MemoryPool, HSA_AMD_MEMORY_POOL_INFO_SIZE,
347 &size);
348 if (err != HSA_STATUS_SUCCESS) {
349 fprintf(stderr, "Get memory pool size failed: %s\n", get_error_string(err));
350 return err;
351 }
352
353 if ((GlobalFlags & HSA_AMD_MEMORY_POOL_GLOBAL_FLAG_FINE_GRAINED) &&
354 (GlobalFlags & HSA_AMD_MEMORY_POOL_GLOBAL_FLAG_KERNARG_INIT) &&
355 size > 0) {
356 Result->push_back(MemoryPool);
357 }
358
359 return HSA_STATUS_SUCCESS;
360 }
361
362 std::pair<hsa_status_t, bool>
363 isValidMemoryPool(hsa_amd_memory_pool_t MemoryPool) {
364 bool AllocAllowed = false;
365 hsa_status_t Err = hsa_amd_memory_pool_get_info(
366 MemoryPool, HSA_AMD_MEMORY_POOL_INFO_RUNTIME_ALLOC_ALLOWED,
367 &AllocAllowed);
368 if (Err != HSA_STATUS_SUCCESS) {
369 fprintf(stderr, "Alloc allowed in memory pool check failed: %s\n",
370 get_error_string(Err));
371 return {Err, false};
372 }
373
374 return {HSA_STATUS_SUCCESS, AllocAllowed};
375 }
376
377 template <typename AccumulatorFunc>
378 hsa_status_t collectMemoryPools(const std::vector<hsa_agent_t> &Agents,
379 AccumulatorFunc Func) {
380 for (int DeviceId = 0; DeviceId < Agents.size(); DeviceId++) {
381 hsa_status_t Err = hsa::amd_agent_iterate_memory_pools(
382 Agents[DeviceId], [&](hsa_amd_memory_pool_t MemoryPool) {
383 hsa_status_t Err;
384 bool Valid = false;
385 std::tie(Err, Valid) = isValidMemoryPool(MemoryPool);
386 if (Err != HSA_STATUS_SUCCESS) {
387 return Err;
388 }
389 if (Valid)
390 Func(MemoryPool, DeviceId);
391 return HSA_STATUS_SUCCESS;
392 });
393
394 if (Err != HSA_STATUS_SUCCESS) {
395 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
396 "Iterate all memory pools", get_error_string(Err));
397 return Err;
398 }
399 }
400
401 return HSA_STATUS_SUCCESS;
402 }
403
404 std::pair<hsa_status_t, hsa_amd_memory_pool_t>
405 FindKernargPool(const std::vector<hsa_agent_t> &HSAAgents) {
406 std::vector<hsa_amd_memory_pool_t> KernArgPools;
407 for (const auto &Agent : HSAAgents) {
408 hsa_status_t err = HSA_STATUS_SUCCESS;
409 err = hsa_amd_agent_iterate_memory_pools(
410 Agent, addKernArgPool, static_cast<void *>(&KernArgPools));
411 if (err != HSA_STATUS_SUCCESS) {
412 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
413 "Iterate all memory pools", get_error_string(err));
414 return {err, hsa_amd_memory_pool_t{}};
415 }
416 }
417
418 if (KernArgPools.empty()) {
419 fprintf(stderr, "Unable to find any valid kernarg pool\n");
420 return {HSA_STATUS_ERROR, hsa_amd_memory_pool_t{}};
421 }
422
423 return {HSA_STATUS_SUCCESS, KernArgPools[0]};
424 }
425
426 } // namespace
427 } // namespace core
428
429 struct EnvironmentVariables {
430 int NumTeams;
431 int TeamLimit;
432 int TeamThreadLimit;
433 int MaxTeamsDefault;
434 };
435
436 /// Class containing all the device information
437 class RTLDeviceInfoTy {
438 std::vector<std::list<FuncOrGblEntryTy>> FuncGblEntries;
439 bool HSAInitializeSucceeded = false;
440
441 public:
442 // load binary populates symbol tables and mutates various global state
443 // run uses those symbol tables
444 std::shared_timed_mutex load_run_lock;
445
446 int NumberOfDevices = 0;
447
448 // GPU devices
449 std::vector<hsa_agent_t> HSAAgents;
450 std::vector<hsa_queue_t *> HSAQueues; // one per gpu
451
452 // CPUs
453 std::vector<hsa_agent_t> CPUAgents;
454
455 // Device properties
456 std::vector<int> ComputeUnits;
457 std::vector<int> GroupsPerDevice;
458 std::vector<int> ThreadsPerGroup;
459 std::vector<int> WarpSize;
460 std::vector<std::string> GPUName;
461
462 // OpenMP properties
463 std::vector<int> NumTeams;
464 std::vector<int> NumThreads;
465
466 // OpenMP Environment properties
467 EnvironmentVariables Env;
468
469 // OpenMP Requires Flags
470 int64_t RequiresFlags;
471
472 // Resource pools
473 SignalPoolT FreeSignalPool;
474
475 bool hostcall_required = false;
476
477 std::vector<hsa_executable_t> HSAExecutables;
478
479 std::vector<std::map<std::string, atl_kernel_info_t>> KernelInfoTable;
480 std::vector<std::map<std::string, atl_symbol_info_t>> SymbolInfoTable;
481
482 hsa_amd_memory_pool_t KernArgPool;
483
484 // fine grained memory pool for host allocations
485 hsa_amd_memory_pool_t HostFineGrainedMemoryPool;
486
487 // fine and coarse-grained memory pools per offloading device
488 std::vector<hsa_amd_memory_pool_t> DeviceFineGrainedMemoryPools;
489 std::vector<hsa_amd_memory_pool_t> DeviceCoarseGrainedMemoryPools;
490
491 struct atmiFreePtrDeletor {
492 void operator()(void *p) {
493 core::Runtime::Memfree(p); // ignore failure to free
494 }
495 };
496
497 // device_State shared across loaded binaries, error if inconsistent size
498 std::vector<std::pair<std::unique_ptr<void, atmiFreePtrDeletor>, uint64_t>>
499 deviceStateStore;
500
501 static const unsigned HardTeamLimit =
502 (1 << 16) - 1; // 64K needed to fit in uint16
503 static const int DefaultNumTeams = 128;
504 static const int Max_Teams =
505 llvm::omp::AMDGPUGpuGridValues[llvm::omp::GVIDX::GV_Max_Teams];
506 static const int Warp_Size =
507 llvm::omp::AMDGPUGpuGridValues[llvm::omp::GVIDX::GV_Warp_Size];
508 static const int Max_WG_Size =
509 llvm::omp::AMDGPUGpuGridValues[llvm::omp::GVIDX::GV_Max_WG_Size];
510 static const int Default_WG_Size =
511 llvm::omp::AMDGPUGpuGridValues[llvm::omp::GVIDX::GV_Default_WG_Size];
512
513 using MemcpyFunc = hsa_status_t (*)(hsa_signal_t, void *, const void *,
514 size_t size, hsa_agent_t);
515 hsa_status_t freesignalpool_memcpy(void *dest, const void *src, size_t size,
516 MemcpyFunc Func, int32_t deviceId) {
517 hsa_agent_t agent = HSAAgents[deviceId];
518 hsa_signal_t s = FreeSignalPool.pop();
519 if (s.handle == 0) {
520 return HSA_STATUS_ERROR;
521 }
522 hsa_status_t r = Func(s, dest, src, size, agent);
523 FreeSignalPool.push(s);
524 return r;
525 }
526
527 hsa_status_t freesignalpool_memcpy_d2h(void *dest, const void *src,
528 size_t size, int32_t deviceId) {
529 return freesignalpool_memcpy(dest, src, size, atmi_memcpy_d2h, deviceId);
530 }
531
532 hsa_status_t freesignalpool_memcpy_h2d(void *dest, const void *src,
533 size_t size, int32_t deviceId) {
534 return freesignalpool_memcpy(dest, src, size, atmi_memcpy_h2d, deviceId);
535 }
536
537 // Record entry point associated with device
538 void addOffloadEntry(int32_t device_id, __tgt_offload_entry entry) {
539 assert(device_id < (int32_t)FuncGblEntries.size() &&
540 "Unexpected device id!");
541 FuncOrGblEntryTy &E = FuncGblEntries[device_id].back();
542
543 E.Entries.push_back(entry);
544 }
545
546 // Return true if the entry is associated with device
547 bool findOffloadEntry(int32_t device_id, void *addr) {
548 assert(device_id < (int32_t)FuncGblEntries.size() &&
549 "Unexpected device id!");
550 FuncOrGblEntryTy &E = FuncGblEntries[device_id].back();
551
552 for (auto &it : E.Entries) {
553 if (it.addr == addr)
554 return true;
555 }
556
557 return false;
558 }
559
560 // Return the pointer to the target entries table
561 __tgt_target_table *getOffloadEntriesTable(int32_t device_id) {
562 assert(device_id < (int32_t)FuncGblEntries.size() &&
563 "Unexpected device id!");
564 FuncOrGblEntryTy &E = FuncGblEntries[device_id].back();
565
566 int32_t size = E.Entries.size();
567
568 // Table is empty
569 if (!size)
570 return 0;
571
572 __tgt_offload_entry *begin = &E.Entries[0];
573 __tgt_offload_entry *end = &E.Entries[size - 1];
574
575 // Update table info according to the entries and return the pointer
576 E.Table.EntriesBegin = begin;
577 E.Table.EntriesEnd = ++end;
578
579 return &E.Table;
580 }
581
582 // Clear entries table for a device
583 void clearOffloadEntriesTable(int device_id) {
584 assert(device_id < (int32_t)FuncGblEntries.size() &&
585 "Unexpected device id!");
586 FuncGblEntries[device_id].emplace_back();
587 FuncOrGblEntryTy &E = FuncGblEntries[device_id].back();
588 // KernelArgPoolMap.clear();
589 E.Entries.clear();
590 E.Table.EntriesBegin = E.Table.EntriesEnd = 0;
591 }
592
593 hsa_status_t addDeviceMemoryPool(hsa_amd_memory_pool_t MemoryPool,
594 int DeviceId) {
595 assert(DeviceId < DeviceFineGrainedMemoryPools.size() && "Error here.");
596 uint32_t GlobalFlags = 0;
597 hsa_status_t Err = hsa_amd_memory_pool_get_info(
598 MemoryPool, HSA_AMD_MEMORY_POOL_INFO_GLOBAL_FLAGS, &GlobalFlags);
599
600 if (Err != HSA_STATUS_SUCCESS) {
601 return Err;
602 }
603
604 if (GlobalFlags & HSA_AMD_MEMORY_POOL_GLOBAL_FLAG_FINE_GRAINED) {
605 DeviceFineGrainedMemoryPools[DeviceId] = MemoryPool;
606 } else if (GlobalFlags & HSA_AMD_MEMORY_POOL_GLOBAL_FLAG_COARSE_GRAINED) {
607 DeviceCoarseGrainedMemoryPools[DeviceId] = MemoryPool;
608 }
609
610 return HSA_STATUS_SUCCESS;
611 }
612
613 hsa_status_t addHostMemoryPool(hsa_amd_memory_pool_t MemoryPool,
614 int DeviceId) {
615 uint32_t GlobalFlags = 0;
616 hsa_status_t Err = hsa_amd_memory_pool_get_info(
617 MemoryPool, HSA_AMD_MEMORY_POOL_INFO_GLOBAL_FLAGS, &GlobalFlags);
618
619 if (Err != HSA_STATUS_SUCCESS) {
620 return Err;
621 }
622
623 uint32_t Size;
624 Err = hsa_amd_memory_pool_get_info(MemoryPool,
625 HSA_AMD_MEMORY_POOL_INFO_SIZE, &Size);
626 if (Err != HSA_STATUS_SUCCESS) {
627 return Err;
628 }
629
630 if (GlobalFlags & HSA_AMD_MEMORY_POOL_GLOBAL_FLAG_FINE_GRAINED &&
631 Size > 0) {
632 HostFineGrainedMemoryPool = MemoryPool;
633 }
634
635 return HSA_STATUS_SUCCESS;
636 }
637
638 hsa_status_t setupMemoryPools() {
639 using namespace std::placeholders;
640 hsa_status_t Err;
641 Err = core::collectMemoryPools(
642 CPUAgents, std::bind(&RTLDeviceInfoTy::addHostMemoryPool, this, _1, _2));
643 if (Err != HSA_STATUS_SUCCESS) {
644 fprintf(stderr, "HSA error in collecting memory pools for CPU: %s\n",
645 get_error_string(Err));
646 return Err;
647 }
648 Err = core::collectMemoryPools(
649 HSAAgents, std::bind(&RTLDeviceInfoTy::addDeviceMemoryPool, this, _1, _2));
650 if (Err != HSA_STATUS_SUCCESS) {
651 fprintf(stderr,
652 "HSA error in collecting memory pools for offload devices: %s\n",
653 get_error_string(Err));
654 return Err;
655 }
656 return HSA_STATUS_SUCCESS;
657 }
658
659 hsa_amd_memory_pool_t getDeviceMemoryPool(int DeviceId) {
660 assert(DeviceId >= 0 && DeviceId < DeviceCoarseGrainedMemoryPools.size() &&
661 "Invalid device Id");
662 return DeviceCoarseGrainedMemoryPools[DeviceId];
663 }
664
665 hsa_amd_memory_pool_t getHostMemoryPool() {
666 return HostFineGrainedMemoryPool;
667 }
668
669 static int readEnvElseMinusOne(const char *Env) {
670 const char *envStr = getenv(Env);
671 int res = -1;
672 if (envStr) {
673 res = std::stoi(envStr);
674 DP("Parsed %s=%d\n", Env, res);
675 }
676 return res;
677 }
678
679 RTLDeviceInfoTy() {
680 // LIBOMPTARGET_KERNEL_TRACE provides a kernel launch trace to stderr
681 // anytime. You do not need a debug library build.
682 // 0 => no tracing
683 // 1 => tracing dispatch only
684 // >1 => verbosity increase
685 if (char *envStr = getenv("LIBOMPTARGET_KERNEL_TRACE"))
686 print_kernel_trace = atoi(envStr);
687 else
688 print_kernel_trace = 0;
689
690 DP("Start initializing HSA-ATMI\n");
691 hsa_status_t err = core::atl_init_gpu_context();
692 if (err == HSA_STATUS_SUCCESS) {
693 HSAInitializeSucceeded = true;
694 } else {
695 DP("Error when initializing HSA-ATMI\n");
696 return;
697 }
698
699 // Init hostcall soon after initializing ATMI
700 hostrpc_init();
701
702 err = FindAgents([&](hsa_device_type_t DeviceType, hsa_agent_t Agent) {
703 if (DeviceType == HSA_DEVICE_TYPE_CPU) {
704 CPUAgents.push_back(Agent);
705 } else {
706 HSAAgents.push_back(Agent);
707 }
708 });
709 if (err != HSA_STATUS_SUCCESS)
710 return;
711
712 NumberOfDevices = (int)HSAAgents.size();
713
714 if (NumberOfDevices == 0) {
715 DP("There are no devices supporting HSA.\n");
716 return;
717 } else {
718 DP("There are %d devices supporting HSA.\n", NumberOfDevices);
719 }
720 std::tie(err, KernArgPool) = core::FindKernargPool(CPUAgents);
721 if (err != HSA_STATUS_SUCCESS) {
722 DP("Error when reading memory pools\n");
723 return;
724 }
725
726 // Init the device info
727 HSAQueues.resize(NumberOfDevices);
728 FuncGblEntries.resize(NumberOfDevices);
729 ThreadsPerGroup.resize(NumberOfDevices);
730 ComputeUnits.resize(NumberOfDevices);
731 GPUName.resize(NumberOfDevices);
732 GroupsPerDevice.resize(NumberOfDevices);
733 WarpSize.resize(NumberOfDevices);
734 NumTeams.resize(NumberOfDevices);
735 NumThreads.resize(NumberOfDevices);
736 deviceStateStore.resize(NumberOfDevices);
737 KernelInfoTable.resize(NumberOfDevices);
738 SymbolInfoTable.resize(NumberOfDevices);
739 DeviceCoarseGrainedMemoryPools.resize(NumberOfDevices);
740 DeviceFineGrainedMemoryPools.resize(NumberOfDevices);
741
742 err = setupMemoryPools();
743 if (err != HSA_STATUS_SUCCESS) {
744 DP("Error when setting up memory pools");
745 return;
746 }
747
748 for (int i = 0; i < NumberOfDevices; i++) {
749 HSAQueues[i] = nullptr;
750 }
751
752 for (int i = 0; i < NumberOfDevices; i++) {
753 uint32_t queue_size = 0;
754 {
755 hsa_status_t err = hsa_agent_get_info(
756 HSAAgents[i], HSA_AGENT_INFO_QUEUE_MAX_SIZE, &queue_size);
757 if (err != HSA_STATUS_SUCCESS) {
758 DP("HSA query QUEUE_MAX_SIZE failed for agent %d\n", i);
759 return;
760 }
761 if (queue_size > core::Runtime::getInstance().getMaxQueueSize()) {
762 queue_size = core::Runtime::getInstance().getMaxQueueSize();
763 }
764 }
765
766 hsa_status_t rc = hsa_queue_create(
767 HSAAgents[i], queue_size, HSA_QUEUE_TYPE_MULTI, callbackQueue, NULL,
768 UINT32_MAX, UINT32_MAX, &HSAQueues[i]);
769 if (rc != HSA_STATUS_SUCCESS) {
770 DP("Failed to create HSA queue %d\n", i);
771 return;
772 }
773
774 deviceStateStore[i] = {nullptr, 0};
775 }
776
777 for (int i = 0; i < NumberOfDevices; i++) {
778 ThreadsPerGroup[i] = RTLDeviceInfoTy::Default_WG_Size;
779 GroupsPerDevice[i] = RTLDeviceInfoTy::DefaultNumTeams;
780 ComputeUnits[i] = 1;
781 DP("Device %d: Initial groupsPerDevice %d & threadsPerGroup %d\n", i,
782 GroupsPerDevice[i], ThreadsPerGroup[i]);
783 }
784
785 // Get environment variables regarding teams
786 Env.TeamLimit = readEnvElseMinusOne("OMP_TEAM_LIMIT");
787 Env.NumTeams = readEnvElseMinusOne("OMP_NUM_TEAMS");
788 Env.MaxTeamsDefault = readEnvElseMinusOne("OMP_MAX_TEAMS_DEFAULT");
789 Env.TeamThreadLimit = readEnvElseMinusOne("OMP_TEAMS_THREAD_LIMIT");
790
791 // Default state.
792 RequiresFlags = OMP_REQ_UNDEFINED;
793 }
794
795 ~RTLDeviceInfoTy() {
796 DP("Finalizing the HSA-ATMI DeviceInfo.\n");
797 if (!HSAInitializeSucceeded) {
798 // Then none of these can have been set up and they can't be torn down
799 return;
800 }
801 // Run destructors on types that use HSA before
802 // atmi_finalize removes access to it
803 deviceStateStore.clear();
804 KernelArgPoolMap.clear();
805 // Terminate hostrpc before finalizing ATMI
806 hostrpc_terminate();
807
808 hsa_status_t Err;
809 for (uint32_t I = 0; I < HSAExecutables.size(); I++) {
810 Err = hsa_executable_destroy(HSAExecutables[I]);
811 if (Err != HSA_STATUS_SUCCESS) {
812 DP("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
813 "Destroying executable", get_error_string(Err));
814 }
815 }
816
817 Err = hsa_shut_down();
818 if (Err != HSA_STATUS_SUCCESS) {
819 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, "Shutting down HSA",
820 get_error_string(Err));
821 }
822 }
823 };
824
825 pthread_mutex_t SignalPoolT::mutex = PTHREAD_MUTEX_INITIALIZER;
826
827 // TODO: May need to drop the trailing to fields until deviceRTL is updated
828 struct omptarget_device_environmentTy {
829 int32_t debug_level; // gets value of envvar LIBOMPTARGET_DEVICE_RTL_DEBUG
830 // only useful for Debug build of deviceRTLs
831 int32_t num_devices; // gets number of active offload devices
832 int32_t device_num; // gets a value 0 to num_devices-1
833 };
834
835 static RTLDeviceInfoTy DeviceInfo;
836
837 namespace {
838
839 int32_t dataRetrieve(int32_t DeviceId, void *HstPtr, void *TgtPtr, int64_t Size,
840 __tgt_async_info *AsyncInfo) {
841 assert(AsyncInfo && "AsyncInfo is nullptr");
842 assert(DeviceId < DeviceInfo.NumberOfDevices && "Device ID too large");
843 // Return success if we are not copying back to host from target.
844 if (!HstPtr)
845 return OFFLOAD_SUCCESS;
846 hsa_status_t err;
847 DP("Retrieve data %ld bytes, (tgt:%016llx) -> (hst:%016llx).\n", Size,
848 (long long unsigned)(Elf64_Addr)TgtPtr,
849 (long long unsigned)(Elf64_Addr)HstPtr);
850
851 err = DeviceInfo.freesignalpool_memcpy_d2h(HstPtr, TgtPtr, (size_t)Size,
852 DeviceId);
853
854 if (err != HSA_STATUS_SUCCESS) {
855 DP("Error when copying data from device to host. Pointers: "
856 "host = 0x%016lx, device = 0x%016lx, size = %lld\n",
857 (Elf64_Addr)HstPtr, (Elf64_Addr)TgtPtr, (unsigned long long)Size);
858 return OFFLOAD_FAIL;
859 }
860 DP("DONE Retrieve data %ld bytes, (tgt:%016llx) -> (hst:%016llx).\n", Size,
861 (long long unsigned)(Elf64_Addr)TgtPtr,
862 (long long unsigned)(Elf64_Addr)HstPtr);
863 return OFFLOAD_SUCCESS;
864 }
865
866 int32_t dataSubmit(int32_t DeviceId, void *TgtPtr, void *HstPtr, int64_t Size,
867 __tgt_async_info *AsyncInfo) {
868 assert(AsyncInfo && "AsyncInfo is nullptr");
869 hsa_status_t err;
870 assert(DeviceId < DeviceInfo.NumberOfDevices && "Device ID too large");
871 // Return success if we are not doing host to target.
872 if (!HstPtr)
873 return OFFLOAD_SUCCESS;
874
875 DP("Submit data %ld bytes, (hst:%016llx) -> (tgt:%016llx).\n", Size,
876 (long long unsigned)(Elf64_Addr)HstPtr,
877 (long long unsigned)(Elf64_Addr)TgtPtr);
878 err = DeviceInfo.freesignalpool_memcpy_h2d(TgtPtr, HstPtr, (size_t)Size,
879 DeviceId);
880 if (err != HSA_STATUS_SUCCESS) {
881 DP("Error when copying data from host to device. Pointers: "
882 "host = 0x%016lx, device = 0x%016lx, size = %lld\n",
883 (Elf64_Addr)HstPtr, (Elf64_Addr)TgtPtr, (unsigned long long)Size);
884 return OFFLOAD_FAIL;
885 }
886 return OFFLOAD_SUCCESS;
887 }
888
889 // Async.
890 // The implementation was written with cuda streams in mind. The semantics of
891 // that are to execute kernels on a queue in order of insertion. A synchronise
892 // call then makes writes visible between host and device. This means a series
893 // of N data_submit_async calls are expected to execute serially. HSA offers
894 // various options to run the data copies concurrently. This may require changes
895 // to libomptarget.
896
897 // __tgt_async_info* contains a void * Queue. Queue = 0 is used to indicate that
898 // there are no outstanding kernels that need to be synchronized. Any async call
899 // may be passed a Queue==0, at which point the cuda implementation will set it
900 // to non-null (see getStream). The cuda streams are per-device. Upstream may
901 // change this interface to explicitly initialize the AsyncInfo_pointer, but
902 // until then hsa lazily initializes it as well.
903
904 void initAsyncInfo(__tgt_async_info *AsyncInfo) {
905 // set non-null while using async calls, return to null to indicate completion
906 assert(AsyncInfo);
907 if (!AsyncInfo->Queue) {
908 AsyncInfo->Queue = reinterpret_cast<void *>(UINT64_MAX);
909 }
910 }
911 void finiAsyncInfo(__tgt_async_info *AsyncInfo) {
912 assert(AsyncInfo);
913 assert(AsyncInfo->Queue);
914 AsyncInfo->Queue = 0;
915 }
916
917 bool elf_machine_id_is_amdgcn(__tgt_device_image *image) {
918 const uint16_t amdgcnMachineID = 224; // EM_AMDGPU may not be in system elf.h
919 int32_t r = elf_check_machine(image, amdgcnMachineID);
920 if (!r) {
921 DP("Supported machine ID not found\n");
922 }
923 return r;
924 }
925
926 uint32_t elf_e_flags(__tgt_device_image *image) {
927 char *img_begin = (char *)image->ImageStart;
928 size_t img_size = (char *)image->ImageEnd - img_begin;
929
930 Elf *e = elf_memory(img_begin, img_size);
931 if (!e) {
932 DP("Unable to get ELF handle: %s!\n", elf_errmsg(-1));
933 return 0;
934 }
935
936 Elf64_Ehdr *eh64 = elf64_getehdr(e);
937
938 if (!eh64) {
939 DP("Unable to get machine ID from ELF file!\n");
940 elf_end(e);
941 return 0;
942 }
943
944 uint32_t Flags = eh64->e_flags;
945
946 elf_end(e);
947 DP("ELF Flags: 0x%x\n", Flags);
948 return Flags;
949 }
950 } // namespace
951
952 int32_t __tgt_rtl_is_valid_binary(__tgt_device_image *image) {
953 return elf_machine_id_is_amdgcn(image);
954 }
955
956 int __tgt_rtl_number_of_devices() { return DeviceInfo.NumberOfDevices; }
957
958 int64_t __tgt_rtl_init_requires(int64_t RequiresFlags) {
959 DP("Init requires flags to %ld\n", RequiresFlags);
960 DeviceInfo.RequiresFlags = RequiresFlags;
961 return RequiresFlags;
962 }
963
964 namespace {
965 template <typename T> bool enforce_upper_bound(T *value, T upper) {
966 bool changed = *value > upper;
967 if (changed) {
968 *value = upper;
969 }
970 return changed;
971 }
972 } // namespace
973
974 int32_t __tgt_rtl_init_device(int device_id) {
975 hsa_status_t err;
976
977 // this is per device id init
978 DP("Initialize the device id: %d\n", device_id);
979
980 hsa_agent_t agent = DeviceInfo.HSAAgents[device_id];
981
982 // Get number of Compute Unit
983 uint32_t compute_units = 0;
984 err = hsa_agent_get_info(
985 agent, (hsa_agent_info_t)HSA_AMD_AGENT_INFO_COMPUTE_UNIT_COUNT,
986 &compute_units);
987 if (err != HSA_STATUS_SUCCESS) {
988 DeviceInfo.ComputeUnits[device_id] = 1;
989 DP("Error getting compute units : settiing to 1\n");
990 } else {
991 DeviceInfo.ComputeUnits[device_id] = compute_units;
992 DP("Using %d compute unis per grid\n", DeviceInfo.ComputeUnits[device_id]);
993 }
994
995 char GetInfoName[64]; // 64 max size returned by get info
996 err = hsa_agent_get_info(agent, (hsa_agent_info_t)HSA_AGENT_INFO_NAME,
997 (void *)GetInfoName);
998 if (err)
999 DeviceInfo.GPUName[device_id] = "--unknown gpu--";
1000 else {
1001 DeviceInfo.GPUName[device_id] = GetInfoName;
1002 }
1003
1004 if (print_kernel_trace & STARTUP_DETAILS)
1005 fprintf(stderr, "Device#%-2d CU's: %2d %s\n", device_id,
1006 DeviceInfo.ComputeUnits[device_id],
1007 DeviceInfo.GPUName[device_id].c_str());
1008
1009 // Query attributes to determine number of threads/block and blocks/grid.
1010 uint16_t workgroup_max_dim[3];
1011 err = hsa_agent_get_info(agent, HSA_AGENT_INFO_WORKGROUP_MAX_DIM,
1012 &workgroup_max_dim);
1013 if (err != HSA_STATUS_SUCCESS) {
1014 DeviceInfo.GroupsPerDevice[device_id] = RTLDeviceInfoTy::DefaultNumTeams;
1015 DP("Error getting grid dims: num groups : %d\n",
1016 RTLDeviceInfoTy::DefaultNumTeams);
1017 } else if (workgroup_max_dim[0] <= RTLDeviceInfoTy::HardTeamLimit) {
1018 DeviceInfo.GroupsPerDevice[device_id] = workgroup_max_dim[0];
1019 DP("Using %d ROCm blocks per grid\n",
1020 DeviceInfo.GroupsPerDevice[device_id]);
1021 } else {
1022 DeviceInfo.GroupsPerDevice[device_id] = RTLDeviceInfoTy::HardTeamLimit;
1023 DP("Max ROCm blocks per grid %d exceeds the hard team limit %d, capping "
1024 "at the hard limit\n",
1025 workgroup_max_dim[0], RTLDeviceInfoTy::HardTeamLimit);
1026 }
1027
1028 // Get thread limit
1029 hsa_dim3_t grid_max_dim;
1030 err = hsa_agent_get_info(agent, HSA_AGENT_INFO_GRID_MAX_DIM, &grid_max_dim);
1031 if (err == HSA_STATUS_SUCCESS) {
1032 DeviceInfo.ThreadsPerGroup[device_id] =
1033 reinterpret_cast<uint32_t *>(&grid_max_dim)[0] /
1034 DeviceInfo.GroupsPerDevice[device_id];
1035
1036 if (DeviceInfo.ThreadsPerGroup[device_id] == 0) {
1037 DeviceInfo.ThreadsPerGroup[device_id] = RTLDeviceInfoTy::Max_WG_Size;
1038 DP("Default thread limit: %d\n", RTLDeviceInfoTy::Max_WG_Size);
1039 } else if (enforce_upper_bound(&DeviceInfo.ThreadsPerGroup[device_id],
1040 RTLDeviceInfoTy::Max_WG_Size)) {
1041 DP("Capped thread limit: %d\n", RTLDeviceInfoTy::Max_WG_Size);
1042 } else {
1043 DP("Using ROCm Queried thread limit: %d\n",
1044 DeviceInfo.ThreadsPerGroup[device_id]);
1045 }
1046 } else {
1047 DeviceInfo.ThreadsPerGroup[device_id] = RTLDeviceInfoTy::Max_WG_Size;
1048 DP("Error getting max block dimension, use default:%d \n",
1049 RTLDeviceInfoTy::Max_WG_Size);
1050 }
1051
1052 // Get wavefront size
1053 uint32_t wavefront_size = 0;
1054 err =
1055 hsa_agent_get_info(agent, HSA_AGENT_INFO_WAVEFRONT_SIZE, &wavefront_size);
1056 if (err == HSA_STATUS_SUCCESS) {
1057 DP("Queried wavefront size: %d\n", wavefront_size);
1058 DeviceInfo.WarpSize[device_id] = wavefront_size;
1059 } else {
1060 DP("Default wavefront size: %d\n",
1061 llvm::omp::AMDGPUGpuGridValues[llvm::omp::GVIDX::GV_Warp_Size]);
1062 DeviceInfo.WarpSize[device_id] =
1063 llvm::omp::AMDGPUGpuGridValues[llvm::omp::GVIDX::GV_Warp_Size];
1064 }
1065
1066 // Adjust teams to the env variables
1067
1068 if (DeviceInfo.Env.TeamLimit > 0 &&
1069 (enforce_upper_bound(&DeviceInfo.GroupsPerDevice[device_id],
1070 DeviceInfo.Env.TeamLimit))) {
1071 DP("Capping max groups per device to OMP_TEAM_LIMIT=%d\n",
1072 DeviceInfo.Env.TeamLimit);
1073 }
1074
1075 // Set default number of teams
1076 if (DeviceInfo.Env.NumTeams > 0) {
1077 DeviceInfo.NumTeams[device_id] = DeviceInfo.Env.NumTeams;
1078 DP("Default number of teams set according to environment %d\n",
1079 DeviceInfo.Env.NumTeams);
1080 } else {
1081 char *TeamsPerCUEnvStr = getenv("OMP_TARGET_TEAMS_PER_PROC");
1082 int TeamsPerCU = DefaultTeamsPerCU;
1083 if (TeamsPerCUEnvStr) {
1084 TeamsPerCU = std::stoi(TeamsPerCUEnvStr);
1085 }
1086
1087 DeviceInfo.NumTeams[device_id] =
1088 TeamsPerCU * DeviceInfo.ComputeUnits[device_id];
1089 DP("Default number of teams = %d * number of compute units %d\n",
1090 TeamsPerCU, DeviceInfo.ComputeUnits[device_id]);
1091 }
1092
1093 if (enforce_upper_bound(&DeviceInfo.NumTeams[device_id],
1094 DeviceInfo.GroupsPerDevice[device_id])) {
1095 DP("Default number of teams exceeds device limit, capping at %d\n",
1096 DeviceInfo.GroupsPerDevice[device_id]);
1097 }
1098
1099 // Adjust threads to the env variables
1100 if (DeviceInfo.Env.TeamThreadLimit > 0 &&
1101 (enforce_upper_bound(&DeviceInfo.NumThreads[device_id],
1102 DeviceInfo.Env.TeamThreadLimit))) {
1103 DP("Capping max number of threads to OMP_TEAMS_THREAD_LIMIT=%d\n",
1104 DeviceInfo.Env.TeamThreadLimit);
1105 }
1106
1107 // Set default number of threads
1108 DeviceInfo.NumThreads[device_id] = RTLDeviceInfoTy::Default_WG_Size;
1109 DP("Default number of threads set according to library's default %d\n",
1110 RTLDeviceInfoTy::Default_WG_Size);
1111 if (enforce_upper_bound(&DeviceInfo.NumThreads[device_id],
1112 DeviceInfo.ThreadsPerGroup[device_id])) {
1113 DP("Default number of threads exceeds device limit, capping at %d\n",
1114 DeviceInfo.ThreadsPerGroup[device_id]);
1115 }
1116
1117 DP("Device %d: default limit for groupsPerDevice %d & threadsPerGroup %d\n",
1118 device_id, DeviceInfo.GroupsPerDevice[device_id],
1119 DeviceInfo.ThreadsPerGroup[device_id]);
1120
1121 DP("Device %d: wavefront size %d, total threads %d x %d = %d\n", device_id,
1122 DeviceInfo.WarpSize[device_id], DeviceInfo.ThreadsPerGroup[device_id],
1123 DeviceInfo.GroupsPerDevice[device_id],
1124 DeviceInfo.GroupsPerDevice[device_id] *
1125 DeviceInfo.ThreadsPerGroup[device_id]);
1126
1127 return OFFLOAD_SUCCESS;
1128 }
1129
1130 namespace {
1131 Elf64_Shdr *find_only_SHT_HASH(Elf *elf) {
1132 size_t N;
1133 int rc = elf_getshdrnum(elf, &N);
1134 if (rc != 0) {
1135 return nullptr;
1136 }
1137
1138 Elf64_Shdr *result = nullptr;
1139 for (size_t i = 0; i < N; i++) {
1140 Elf_Scn *scn = elf_getscn(elf, i);
1141 if (scn) {
1142 Elf64_Shdr *shdr = elf64_getshdr(scn);
1143 if (shdr) {
1144 if (shdr->sh_type == SHT_HASH) {
1145 if (result == nullptr) {
1146 result = shdr;
1147 } else {
1148 // multiple SHT_HASH sections not handled
1149 return nullptr;
1150 }
1151 }
1152 }
1153 }
1154 }
1155 return result;
1156 }
1157
1158 const Elf64_Sym *elf_lookup(Elf *elf, char *base, Elf64_Shdr *section_hash,
1159 const char *symname) {
1160
1161 assert(section_hash);
1162 size_t section_symtab_index = section_hash->sh_link;
1163 Elf64_Shdr *section_symtab =
1164 elf64_getshdr(elf_getscn(elf, section_symtab_index));
1165 size_t section_strtab_index = section_symtab->sh_link;
1166
1167 const Elf64_Sym *symtab =
1168 reinterpret_cast<const Elf64_Sym *>(base + section_symtab->sh_offset);
1169
1170 const uint32_t *hashtab =
1171 reinterpret_cast<const uint32_t *>(base + section_hash->sh_offset);
1172
1173 // Layout:
1174 // nbucket
1175 // nchain
1176 // bucket[nbucket]
1177 // chain[nchain]
1178 uint32_t nbucket = hashtab[0];
1179 const uint32_t *bucket = &hashtab[2];
1180 const uint32_t *chain = &hashtab[nbucket + 2];
1181
1182 const size_t max = strlen(symname) + 1;
1183 const uint32_t hash = elf_hash(symname);
1184 for (uint32_t i = bucket[hash % nbucket]; i != 0; i = chain[i]) {
1185 char *n = elf_strptr(elf, section_strtab_index, symtab[i].st_name);
1186 if (strncmp(symname, n, max) == 0) {
1187 return &symtab[i];
1188 }
1189 }
1190
1191 return nullptr;
1192 }
1193
1194 struct symbol_info {
1195 void *addr = nullptr;
1196 uint32_t size = UINT32_MAX;
1197 uint32_t sh_type = SHT_NULL;
1198 };
1199
1200 int get_symbol_info_without_loading(Elf *elf, char *base, const char *symname,
1201 symbol_info *res) {
1202 if (elf_kind(elf) != ELF_K_ELF) {
1203 return 1;
1204 }
1205
1206 Elf64_Shdr *section_hash = find_only_SHT_HASH(elf);
1207 if (!section_hash) {
1208 return 1;
1209 }
1210
1211 const Elf64_Sym *sym = elf_lookup(elf, base, section_hash, symname);
1212 if (!sym) {
1213 return 1;
1214 }
1215
1216 if (sym->st_size > UINT32_MAX) {
1217 return 1;
1218 }
1219
1220 if (sym->st_shndx == SHN_UNDEF) {
1221 return 1;
1222 }
1223
1224 Elf_Scn *section = elf_getscn(elf, sym->st_shndx);
1225 if (!section) {
1226 return 1;
1227 }
1228
1229 Elf64_Shdr *header = elf64_getshdr(section);
1230 if (!header) {
1231 return 1;
1232 }
1233
1234 res->addr = sym->st_value + base;
1235 res->size = static_cast<uint32_t>(sym->st_size);
1236 res->sh_type = header->sh_type;
1237 return 0;
1238 }
1239
1240 int get_symbol_info_without_loading(char *base, size_t img_size,
1241 const char *symname, symbol_info *res) {
1242 Elf *elf = elf_memory(base, img_size);
1243 if (elf) {
1244 int rc = get_symbol_info_without_loading(elf, base, symname, res);
1245 elf_end(elf);
1246 return rc;
1247 }
1248 return 1;
1249 }
1250
1251 hsa_status_t interop_get_symbol_info(char *base, size_t img_size,
1252 const char *symname, void **var_addr,
1253 uint32_t *var_size) {
1254 symbol_info si;
1255 int rc = get_symbol_info_without_loading(base, img_size, symname, &si);
1256 if (rc == 0) {
1257 *var_addr = si.addr;
1258 *var_size = si.size;
1259 return HSA_STATUS_SUCCESS;
1260 } else {
1261 return HSA_STATUS_ERROR;
1262 }
1263 }
1264
1265 template <typename C>
1266 hsa_status_t module_register_from_memory_to_place(
1267 std::map<std::string, atl_kernel_info_t> &KernelInfoTable,
1268 std::map<std::string, atl_symbol_info_t> &SymbolInfoTable,
1269 void *module_bytes, size_t module_size, int DeviceId, C cb,
1270 std::vector<hsa_executable_t> &HSAExecutables) {
1271 auto L = [](void *data, size_t size, void *cb_state) -> hsa_status_t {
1272 C *unwrapped = static_cast<C *>(cb_state);
1273 return (*unwrapped)(data, size);
1274 };
1275 return core::RegisterModuleFromMemory(
1276 KernelInfoTable, SymbolInfoTable, module_bytes, module_size,
1277 DeviceInfo.HSAAgents[DeviceId], L, static_cast<void *>(&cb),
1278 HSAExecutables);
1279 }
1280 } // namespace
1281
1282 static uint64_t get_device_State_bytes(char *ImageStart, size_t img_size) {
1283 uint64_t device_State_bytes = 0;
1284 {
1285 // If this is the deviceRTL, get the state variable size
1286 symbol_info size_si;
1287 int rc = get_symbol_info_without_loading(
1288 ImageStart, img_size, "omptarget_nvptx_device_State_size", &size_si);
1289
1290 if (rc == 0) {
1291 if (size_si.size != sizeof(uint64_t)) {
1292 DP("Found device_State_size variable with wrong size\n");
1293 return 0;
1294 }
1295
1296 // Read number of bytes directly from the elf
1297 memcpy(&device_State_bytes, size_si.addr, sizeof(uint64_t));
1298 }
1299 }
1300 return device_State_bytes;
1301 }
1302
1303 static __tgt_target_table *
1304 __tgt_rtl_load_binary_locked(int32_t device_id, __tgt_device_image *image);
1305
1306 static __tgt_target_table *
1307 __tgt_rtl_load_binary_locked(int32_t device_id, __tgt_device_image *image);
1308
1309 __tgt_target_table *__tgt_rtl_load_binary(int32_t device_id,
1310 __tgt_device_image *image) {
1311 DeviceInfo.load_run_lock.lock();
1312 __tgt_target_table *res = __tgt_rtl_load_binary_locked(device_id, image);
1313 DeviceInfo.load_run_lock.unlock();
1314 return res;
1315 }
1316
1317 struct device_environment {
1318 // initialise an omptarget_device_environmentTy in the deviceRTL
1319 // patches around differences in the deviceRTL between trunk, aomp,
1320 // rocmcc. Over time these differences will tend to zero and this class
1321 // simplified.
1322 // Symbol may be in .data or .bss, and may be missing fields:
1323 // - aomp has debug_level, num_devices, device_num
1324 // - trunk has debug_level
1325 // - under review in trunk is debug_level, device_num
1326 // - rocmcc matches aomp, patch to swap num_devices and device_num
1327
1328 // The symbol may also have been deadstripped because the device side
1329 // accessors were unused.
1330
1331 // If the symbol is in .data (aomp, rocm) it can be written directly.
1332 // If it is in .bss, we must wait for it to be allocated space on the
1333 // gpu (trunk) and initialize after loading.
1334 const char *sym() { return "omptarget_device_environment"; }
1335
1336 omptarget_device_environmentTy host_device_env;
1337 symbol_info si;
1338 bool valid = false;
1339
1340 __tgt_device_image *image;
1341 const size_t img_size;
1342
1343 device_environment(int device_id, int number_devices,
1344 __tgt_device_image *image, const size_t img_size)
1345 : image(image), img_size(img_size) {
1346
1347 host_device_env.num_devices = number_devices;
1348 host_device_env.device_num = device_id;
1349 host_device_env.debug_level = 0;
1350 #ifdef OMPTARGET_DEBUG
1351 if (char *envStr = getenv("LIBOMPTARGET_DEVICE_RTL_DEBUG")) {
1352 host_device_env.debug_level = std::stoi(envStr);
1353 }
1354 #endif
1355
1356 int rc = get_symbol_info_without_loading((char *)image->ImageStart,
1357 img_size, sym(), &si);
1358 if (rc != 0) {
1359 DP("Finding global device environment '%s' - symbol missing.\n", sym());
1360 return;
1361 }
1362
1363 if (si.size > sizeof(host_device_env)) {
1364 DP("Symbol '%s' has size %u, expected at most %zu.\n", sym(), si.size,
1365 sizeof(host_device_env));
1366 return;
1367 }
1368
1369 valid = true;
1370 }
1371
1372 bool in_image() { return si.sh_type != SHT_NOBITS; }
1373
1374 hsa_status_t before_loading(void *data, size_t size) {
1375 if (valid) {
1376 if (in_image()) {
1377 DP("Setting global device environment before load (%u bytes)\n",
1378 si.size);
1379 uint64_t offset = (char *)si.addr - (char *)image->ImageStart;
1380 void *pos = (char *)data + offset;
1381 memcpy(pos, &host_device_env, si.size);
1382 }
1383 }
1384 return HSA_STATUS_SUCCESS;
1385 }
1386
1387 hsa_status_t after_loading() {
1388 if (valid) {
1389 if (!in_image()) {
1390 DP("Setting global device environment after load (%u bytes)\n",
1391 si.size);
1392 int device_id = host_device_env.device_num;
1393 auto &SymbolInfo = DeviceInfo.SymbolInfoTable[device_id];
1394 void *state_ptr;
1395 uint32_t state_ptr_size;
1396 hsa_status_t err = atmi_interop_hsa_get_symbol_info(
1397 SymbolInfo, device_id, sym(), &state_ptr, &state_ptr_size);
1398 if (err != HSA_STATUS_SUCCESS) {
1399 DP("failed to find %s in loaded image\n", sym());
1400 return err;
1401 }
1402
1403 if (state_ptr_size != si.size) {
1404 DP("Symbol had size %u before loading, %u after\n", state_ptr_size,
1405 si.size);
1406 return HSA_STATUS_ERROR;
1407 }
1408
1409 return DeviceInfo.freesignalpool_memcpy_h2d(state_ptr, &host_device_env,
1410 state_ptr_size, device_id);
1411 }
1412 }
1413 return HSA_STATUS_SUCCESS;
1414 }
1415 };
1416
1417 static hsa_status_t atmi_calloc(void **ret_ptr, size_t size, int DeviceId) {
1418 uint64_t rounded = 4 * ((size + 3) / 4);
1419 void *ptr;
1420 hsa_status_t err = core::Runtime::DeviceMalloc(&ptr, rounded, DeviceId);
1421 if (err != HSA_STATUS_SUCCESS) {
1422 return err;
1423 }
1424
1425 hsa_status_t rc = hsa_amd_memory_fill(ptr, 0, rounded / 4);
1426 if (rc != HSA_STATUS_SUCCESS) {
1427 fprintf(stderr, "zero fill device_state failed with %u\n", rc);
1428 core::Runtime::Memfree(ptr);
1429 return HSA_STATUS_ERROR;
1430 }
1431
1432 *ret_ptr = ptr;
1433 return HSA_STATUS_SUCCESS;
1434 }
1435
1436 static bool image_contains_symbol(void *data, size_t size, const char *sym) {
1437 symbol_info si;
1438 int rc = get_symbol_info_without_loading((char *)data, size, sym, &si);
1439 return (rc == 0) && (si.addr != nullptr);
1440 }
1441
1442 __tgt_target_table *__tgt_rtl_load_binary_locked(int32_t device_id,
1443 __tgt_device_image *image) {
1444 // This function loads the device image onto gpu[device_id] and does other
1445 // per-image initialization work. Specifically:
1446 //
1447 // - Initialize an omptarget_device_environmentTy instance embedded in the
1448 // image at the symbol "omptarget_device_environment"
1449 // Fields debug_level, device_num, num_devices. Used by the deviceRTL.
1450 //
1451 // - Allocate a large array per-gpu (could be moved to init_device)
1452 // - Read a uint64_t at symbol omptarget_nvptx_device_State_size
1453 // - Allocate at least that many bytes of gpu memory
1454 // - Zero initialize it
1455 // - Write the pointer to the symbol omptarget_nvptx_device_State
1456 //
1457 // - Pulls some per-kernel information together from various sources and
1458 // records it in the KernelsList for quicker access later
1459 //
1460 // The initialization can be done before or after loading the image onto the
1461 // gpu. This function presently does a mixture. Using the hsa api to get/set
1462 // the information is simpler to implement, in exchange for more complicated
1463 // runtime behaviour. E.g. launching a kernel or using dma to get eight bytes
1464 // back from the gpu vs a hashtable lookup on the host.
1465
1466 const size_t img_size = (char *)image->ImageEnd - (char *)image->ImageStart;
1467
1468 DeviceInfo.clearOffloadEntriesTable(device_id);
1469
1470 // We do not need to set the ELF version because the caller of this function
1471 // had to do that to decide the right runtime to use
1472
1473 if (!elf_machine_id_is_amdgcn(image)) {
1474 return NULL;
1475 }
1476
1477 {
1478 auto env = device_environment(device_id, DeviceInfo.NumberOfDevices, image,
1479 img_size);
1480
1481 auto &KernelInfo = DeviceInfo.KernelInfoTable[device_id];
1482 auto &SymbolInfo = DeviceInfo.SymbolInfoTable[device_id];
1483 hsa_status_t err = module_register_from_memory_to_place(
1484 KernelInfo, SymbolInfo, (void *)image->ImageStart, img_size, device_id,
1485 [&](void *data, size_t size) {
1486 if (image_contains_symbol(data, size, "needs_hostcall_buffer")) {
1487 __atomic_store_n(&DeviceInfo.hostcall_required, true,
1488 __ATOMIC_RELEASE);
1489 }
1490 return env.before_loading(data, size);
1491 },
1492 DeviceInfo.HSAExecutables);
1493
1494 check("Module registering", err);
1495 if (err != HSA_STATUS_SUCCESS) {
1496 fprintf(stderr,
1497 "Possible gpu arch mismatch: device:%s, image:%s please check"
1498 " compiler flag: -march=<gpu>\n",
1499 DeviceInfo.GPUName[device_id].c_str(),
1500 get_elf_mach_gfx_name(elf_e_flags(image)));
1501 return NULL;
1502 }
1503
1504 err = env.after_loading();
1505 if (err != HSA_STATUS_SUCCESS) {
1506 return NULL;
1507 }
1508 }
1509
1510 DP("ATMI module successfully loaded!\n");
1511
1512 {
1513 // the device_State array is either large value in bss or a void* that
1514 // needs to be assigned to a pointer to an array of size device_state_bytes
1515 // If absent, it has been deadstripped and needs no setup.
1516
1517 void *state_ptr;
1518 uint32_t state_ptr_size;
1519 auto &SymbolInfoMap = DeviceInfo.SymbolInfoTable[device_id];
1520 hsa_status_t err = atmi_interop_hsa_get_symbol_info(
1521 SymbolInfoMap, device_id, "omptarget_nvptx_device_State", &state_ptr,
1522 &state_ptr_size);
1523
1524 if (err != HSA_STATUS_SUCCESS) {
1525 DP("No device_state symbol found, skipping initialization\n");
1526 } else {
1527 if (state_ptr_size < sizeof(void *)) {
1528 DP("unexpected size of state_ptr %u != %zu\n", state_ptr_size,
1529 sizeof(void *));
1530 return NULL;
1531 }
1532
1533 // if it's larger than a void*, assume it's a bss array and no further
1534 // initialization is required. Only try to set up a pointer for
1535 // sizeof(void*)
1536 if (state_ptr_size == sizeof(void *)) {
1537 uint64_t device_State_bytes =
1538 get_device_State_bytes((char *)image->ImageStart, img_size);
1539 if (device_State_bytes == 0) {
1540 DP("Can't initialize device_State, missing size information\n");
1541 return NULL;
1542 }
1543
1544 auto &dss = DeviceInfo.deviceStateStore[device_id];
1545 if (dss.first.get() == nullptr) {
1546 assert(dss.second == 0);
1547 void *ptr = NULL;
1548 hsa_status_t err = atmi_calloc(&ptr, device_State_bytes, device_id);
1549 if (err != HSA_STATUS_SUCCESS) {
1550 DP("Failed to allocate device_state array\n");
1551 return NULL;
1552 }
1553 dss = {
1554 std::unique_ptr<void, RTLDeviceInfoTy::atmiFreePtrDeletor>{ptr},
1555 device_State_bytes,
1556 };
1557 }
1558
1559 void *ptr = dss.first.get();
1560 if (device_State_bytes != dss.second) {
1561 DP("Inconsistent sizes of device_State unsupported\n");
1562 return NULL;
1563 }
1564
1565 // write ptr to device memory so it can be used by later kernels
1566 err = DeviceInfo.freesignalpool_memcpy_h2d(state_ptr, &ptr,
1567 sizeof(void *), device_id);
1568 if (err != HSA_STATUS_SUCCESS) {
1569 DP("memcpy install of state_ptr failed\n");
1570 return NULL;
1571 }
1572 }
1573 }
1574 }
1575
1576 // Here, we take advantage of the data that is appended after img_end to get
1577 // the symbols' name we need to load. This data consist of the host entries
1578 // begin and end as well as the target name (see the offloading linker script
1579 // creation in clang compiler).
1580
1581 // Find the symbols in the module by name. The name can be obtain by
1582 // concatenating the host entry name with the target name
1583
1584 __tgt_offload_entry *HostBegin = image->EntriesBegin;
1585 __tgt_offload_entry *HostEnd = image->EntriesEnd;
1586
1587 for (__tgt_offload_entry *e = HostBegin; e != HostEnd; ++e) {
1588
1589 if (!e->addr) {
1590 // The host should have always something in the address to
1591 // uniquely identify the target region.
1592 fprintf(stderr, "Analyzing host entry '<null>' (size = %lld)...\n",
1593 (unsigned long long)e->size);
1594 return NULL;
1595 }
1596
1597 if (e->size) {
1598 __tgt_offload_entry entry = *e;
1599
1600 void *varptr;
1601 uint32_t varsize;
1602
1603 auto &SymbolInfoMap = DeviceInfo.SymbolInfoTable[device_id];
1604 hsa_status_t err = atmi_interop_hsa_get_symbol_info(
1605 SymbolInfoMap, device_id, e->name, &varptr, &varsize);
1606
1607 if (err != HSA_STATUS_SUCCESS) {
1608 // Inform the user what symbol prevented offloading
1609 DP("Loading global '%s' (Failed)\n", e->name);
1610 return NULL;
1611 }
1612
1613 if (varsize != e->size) {
1614 DP("Loading global '%s' - size mismatch (%u != %lu)\n", e->name,
1615 varsize, e->size);
1616 return NULL;
1617 }
1618
1619 DP("Entry point " DPxMOD " maps to global %s (" DPxMOD ")\n",
1620 DPxPTR(e - HostBegin), e->name, DPxPTR(varptr));
1621 entry.addr = (void *)varptr;
1622
1623 DeviceInfo.addOffloadEntry(device_id, entry);
1624
1625 if (DeviceInfo.RequiresFlags & OMP_REQ_UNIFIED_SHARED_MEMORY &&
1626 e->flags & OMP_DECLARE_TARGET_LINK) {
1627 // If unified memory is present any target link variables
1628 // can access host addresses directly. There is no longer a
1629 // need for device copies.
1630 err = DeviceInfo.freesignalpool_memcpy_h2d(varptr, e->addr,
1631 sizeof(void *), device_id);
1632 if (err != HSA_STATUS_SUCCESS)
1633 DP("Error when copying USM\n");
1634 DP("Copy linked variable host address (" DPxMOD ")"
1635 "to device address (" DPxMOD ")\n",
1636 DPxPTR(*((void **)e->addr)), DPxPTR(varptr));
1637 }
1638
1639 continue;
1640 }
1641
1642 DP("to find the kernel name: %s size: %lu\n", e->name, strlen(e->name));
1643
1644 uint32_t kernarg_segment_size;
1645 auto &KernelInfoMap = DeviceInfo.KernelInfoTable[device_id];
1646 hsa_status_t err = atmi_interop_hsa_get_kernel_info(
1647 KernelInfoMap, device_id, e->name,
1648 HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_KERNARG_SEGMENT_SIZE,
1649 &kernarg_segment_size);
1650
1651 // each arg is a void * in this openmp implementation
1652 uint32_t arg_num = kernarg_segment_size / sizeof(void *);
1653 std::vector<size_t> arg_sizes(arg_num);
1654 for (std::vector<size_t>::iterator it = arg_sizes.begin();
1655 it != arg_sizes.end(); it++) {
1656 *it = sizeof(void *);
1657 }
1658
1659 // default value GENERIC (in case symbol is missing from cubin file)
1660 int8_t ExecModeVal = ExecutionModeType::GENERIC;
1661
1662 // get flat group size if present, else Default_WG_Size
1663 int16_t WGSizeVal = RTLDeviceInfoTy::Default_WG_Size;
1664
1665 // get Kernel Descriptor if present.
1666 // Keep struct in sync wih getTgtAttributeStructQTy in CGOpenMPRuntime.cpp
1667 struct KernDescValType {
1668 uint16_t Version;
1669 uint16_t TSize;
1670 uint16_t WG_Size;
1671 uint8_t Mode;
1672 };
1673 struct KernDescValType KernDescVal;
1674 std::string KernDescNameStr(e->name);
1675 KernDescNameStr += "_kern_desc";
1676 const char *KernDescName = KernDescNameStr.c_str();
1677
1678 void *KernDescPtr;
1679 uint32_t KernDescSize;
1680 void *CallStackAddr = nullptr;
1681 err = interop_get_symbol_info((char *)image->ImageStart, img_size,
1682 KernDescName, &KernDescPtr, &KernDescSize);
1683
1684 if (err == HSA_STATUS_SUCCESS) {
1685 if ((size_t)KernDescSize != sizeof(KernDescVal))
1686 DP("Loading global computation properties '%s' - size mismatch (%u != "
1687 "%lu)\n",
1688 KernDescName, KernDescSize, sizeof(KernDescVal));
1689
1690 memcpy(&KernDescVal, KernDescPtr, (size_t)KernDescSize);
1691
1692 // Check structure size against recorded size.
1693 if ((size_t)KernDescSize != KernDescVal.TSize)
1694 DP("KernDescVal size %lu does not match advertized size %d for '%s'\n",
1695 sizeof(KernDescVal), KernDescVal.TSize, KernDescName);
1696
1697 DP("After loading global for %s KernDesc \n", KernDescName);
1698 DP("KernDesc: Version: %d\n", KernDescVal.Version);
1699 DP("KernDesc: TSize: %d\n", KernDescVal.TSize);
1700 DP("KernDesc: WG_Size: %d\n", KernDescVal.WG_Size);
1701 DP("KernDesc: Mode: %d\n", KernDescVal.Mode);
1702
1703 // Get ExecMode
1704 ExecModeVal = KernDescVal.Mode;
1705 DP("ExecModeVal %d\n", ExecModeVal);
1706 if (KernDescVal.WG_Size == 0) {
1707 KernDescVal.WG_Size = RTLDeviceInfoTy::Default_WG_Size;
1708 DP("Setting KernDescVal.WG_Size to default %d\n", KernDescVal.WG_Size);
1709 }
1710 WGSizeVal = KernDescVal.WG_Size;
1711 DP("WGSizeVal %d\n", WGSizeVal);
1712 check("Loading KernDesc computation property", err);
1713 } else {
1714 DP("Warning: Loading KernDesc '%s' - symbol not found, ", KernDescName);
1715
1716 // Generic
1717 std::string ExecModeNameStr(e->name);
1718 ExecModeNameStr += "_exec_mode";
1719 const char *ExecModeName = ExecModeNameStr.c_str();
1720
1721 void *ExecModePtr;
1722 uint32_t varsize;
1723 err = interop_get_symbol_info((char *)image->ImageStart, img_size,
1724 ExecModeName, &ExecModePtr, &varsize);
1725
1726 if (err == HSA_STATUS_SUCCESS) {
1727 if ((size_t)varsize != sizeof(int8_t)) {
1728 DP("Loading global computation properties '%s' - size mismatch(%u != "
1729 "%lu)\n",
1730 ExecModeName, varsize, sizeof(int8_t));
1731 return NULL;
1732 }
1733
1734 memcpy(&ExecModeVal, ExecModePtr, (size_t)varsize);
1735
1736 DP("After loading global for %s ExecMode = %d\n", ExecModeName,
1737 ExecModeVal);
1738
1739 if (ExecModeVal < 0 || ExecModeVal > 2) {
1740 DP("Error wrong exec_mode value specified in HSA code object file: "
1741 "%d\n",
1742 ExecModeVal);
1743 return NULL;
1744 }
1745 } else {
1746 DP("Loading global exec_mode '%s' - symbol missing, using default "
1747 "value "
1748 "GENERIC (1)\n",
1749 ExecModeName);
1750 }
1751 check("Loading computation property", err);
1752
1753 // Flat group size
1754 std::string WGSizeNameStr(e->name);
1755 WGSizeNameStr += "_wg_size";
1756 const char *WGSizeName = WGSizeNameStr.c_str();
1757
1758 void *WGSizePtr;
1759 uint32_t WGSize;
1760 err = interop_get_symbol_info((char *)image->ImageStart, img_size,
1761 WGSizeName, &WGSizePtr, &WGSize);
1762
1763 if (err == HSA_STATUS_SUCCESS) {
1764 if ((size_t)WGSize != sizeof(int16_t)) {
1765 DP("Loading global computation properties '%s' - size mismatch (%u "
1766 "!= "
1767 "%lu)\n",
1768 WGSizeName, WGSize, sizeof(int16_t));
1769 return NULL;
1770 }
1771
1772 memcpy(&WGSizeVal, WGSizePtr, (size_t)WGSize);
1773
1774 DP("After loading global for %s WGSize = %d\n", WGSizeName, WGSizeVal);
1775
1776 if (WGSizeVal < RTLDeviceInfoTy::Default_WG_Size ||
1777 WGSizeVal > RTLDeviceInfoTy::Max_WG_Size) {
1778 DP("Error wrong WGSize value specified in HSA code object file: "
1779 "%d\n",
1780 WGSizeVal);
1781 WGSizeVal = RTLDeviceInfoTy::Default_WG_Size;
1782 }
1783 } else {
1784 DP("Warning: Loading WGSize '%s' - symbol not found, "
1785 "using default value %d\n",
1786 WGSizeName, WGSizeVal);
1787 }
1788
1789 check("Loading WGSize computation property", err);
1790 }
1791
1792 KernelsList.push_back(KernelTy(ExecModeVal, WGSizeVal, device_id,
1793 CallStackAddr, e->name, kernarg_segment_size,
1794 DeviceInfo.KernArgPool));
1795 __tgt_offload_entry entry = *e;
1796 entry.addr = (void *)&KernelsList.back();
1797 DeviceInfo.addOffloadEntry(device_id, entry);
1798 DP("Entry point %ld maps to %s\n", e - HostBegin, e->name);
1799 }
1800
1801 return DeviceInfo.getOffloadEntriesTable(device_id);
1802 }
1803
1804 void *__tgt_rtl_data_alloc(int device_id, int64_t size, void *, int32_t kind) {
1805 void *ptr = NULL;
1806 assert(device_id < DeviceInfo.NumberOfDevices && "Device ID too large");
1807
1808 if (kind != TARGET_ALLOC_DEFAULT) {
1809 REPORT("Invalid target data allocation kind or requested allocator not "
1810 "implemented yet\n");
1811 return NULL;
1812 }
1813
1814 hsa_status_t err = core::Runtime::DeviceMalloc(&ptr, size, device_id);
1815 DP("Tgt alloc data %ld bytes, (tgt:%016llx).\n", size,
1816 (long long unsigned)(Elf64_Addr)ptr);
1817 ptr = (err == HSA_STATUS_SUCCESS) ? ptr : NULL;
1818 return ptr;
1819 }
1820
1821 int32_t __tgt_rtl_data_submit(int device_id, void *tgt_ptr, void *hst_ptr,
1822 int64_t size) {
1823 assert(device_id < DeviceInfo.NumberOfDevices && "Device ID too large");
1824 __tgt_async_info AsyncInfo;
1825 int32_t rc = dataSubmit(device_id, tgt_ptr, hst_ptr, size, &AsyncInfo);
1826 if (rc != OFFLOAD_SUCCESS)
1827 return OFFLOAD_FAIL;
1828
1829 return __tgt_rtl_synchronize(device_id, &AsyncInfo);
1830 }
1831
1832 int32_t __tgt_rtl_data_submit_async(int device_id, void *tgt_ptr, void *hst_ptr,
1833 int64_t size, __tgt_async_info *AsyncInfo) {
1834 assert(device_id < DeviceInfo.NumberOfDevices && "Device ID too large");
1835 if (AsyncInfo) {
1836 initAsyncInfo(AsyncInfo);
1837 return dataSubmit(device_id, tgt_ptr, hst_ptr, size, AsyncInfo);
1838 } else {
1839 return __tgt_rtl_data_submit(device_id, tgt_ptr, hst_ptr, size);
1840 }
1841 }
1842
1843 int32_t __tgt_rtl_data_retrieve(int device_id, void *hst_ptr, void *tgt_ptr,
1844 int64_t size) {
1845 assert(device_id < DeviceInfo.NumberOfDevices && "Device ID too large");
1846 __tgt_async_info AsyncInfo;
1847 int32_t rc = dataRetrieve(device_id, hst_ptr, tgt_ptr, size, &AsyncInfo);
1848 if (rc != OFFLOAD_SUCCESS)
1849 return OFFLOAD_FAIL;
1850
1851 return __tgt_rtl_synchronize(device_id, &AsyncInfo);
1852 }
1853
1854 int32_t __tgt_rtl_data_retrieve_async(int device_id, void *hst_ptr,
1855 void *tgt_ptr, int64_t size,
1856 __tgt_async_info *AsyncInfo) {
1857 assert(AsyncInfo && "AsyncInfo is nullptr");
1858 assert(device_id < DeviceInfo.NumberOfDevices && "Device ID too large");
1859 initAsyncInfo(AsyncInfo);
1860 return dataRetrieve(device_id, hst_ptr, tgt_ptr, size, AsyncInfo);
1861 }
1862
1863 int32_t __tgt_rtl_data_delete(int device_id, void *tgt_ptr) {
1864 assert(device_id < DeviceInfo.NumberOfDevices && "Device ID too large");
1865 hsa_status_t err;
1866 DP("Tgt free data (tgt:%016llx).\n", (long long unsigned)(Elf64_Addr)tgt_ptr);
1867 err = core::Runtime::Memfree(tgt_ptr);
1868 if (err != HSA_STATUS_SUCCESS) {
1869 DP("Error when freeing CUDA memory\n");
1870 return OFFLOAD_FAIL;
1871 }
1872 return OFFLOAD_SUCCESS;
1873 }
1874
1875 // Determine launch values for kernel.
1876 struct launchVals {
1877 int WorkgroupSize;
1878 int GridSize;
1879 };
1880 launchVals getLaunchVals(EnvironmentVariables Env, int ConstWGSize,
1881 int ExecutionMode, int num_teams, int thread_limit,
1882 uint64_t loop_tripcount, int DeviceNumTeams) {
1883
1884 int threadsPerGroup = RTLDeviceInfoTy::Default_WG_Size;
1885 int num_groups = 0;
1886
1887 int Max_Teams =
1888 Env.MaxTeamsDefault > 0 ? Env.MaxTeamsDefault : DeviceNumTeams;
1889 if (Max_Teams > RTLDeviceInfoTy::HardTeamLimit)
1890 Max_Teams = RTLDeviceInfoTy::HardTeamLimit;
1891
1892 if (print_kernel_trace & STARTUP_DETAILS) {
1893 fprintf(stderr, "RTLDeviceInfoTy::Max_Teams: %d\n",
1894 RTLDeviceInfoTy::Max_Teams);
1895 fprintf(stderr, "Max_Teams: %d\n", Max_Teams);
1896 fprintf(stderr, "RTLDeviceInfoTy::Warp_Size: %d\n",
1897 RTLDeviceInfoTy::Warp_Size);
1898 fprintf(stderr, "RTLDeviceInfoTy::Max_WG_Size: %d\n",
1899 RTLDeviceInfoTy::Max_WG_Size);
1900 fprintf(stderr, "RTLDeviceInfoTy::Default_WG_Size: %d\n",
1901 RTLDeviceInfoTy::Default_WG_Size);
1902 fprintf(stderr, "thread_limit: %d\n", thread_limit);
1903 fprintf(stderr, "threadsPerGroup: %d\n", threadsPerGroup);
1904 fprintf(stderr, "ConstWGSize: %d\n", ConstWGSize);
1905 }
1906 // check for thread_limit() clause
1907 if (thread_limit > 0) {
1908 threadsPerGroup = thread_limit;
1909 DP("Setting threads per block to requested %d\n", thread_limit);
1910 if (ExecutionMode == GENERIC) { // Add master warp for GENERIC
1911 threadsPerGroup += RTLDeviceInfoTy::Warp_Size;
1912 DP("Adding master wavefront: +%d threads\n", RTLDeviceInfoTy::Warp_Size);
1913 }
1914 if (threadsPerGroup > RTLDeviceInfoTy::Max_WG_Size) { // limit to max
1915 threadsPerGroup = RTLDeviceInfoTy::Max_WG_Size;
1916 DP("Setting threads per block to maximum %d\n", threadsPerGroup);
1917 }
1918 }
1919 // check flat_max_work_group_size attr here
1920 if (threadsPerGroup > ConstWGSize) {
1921 threadsPerGroup = ConstWGSize;
1922 DP("Reduced threadsPerGroup to flat-attr-group-size limit %d\n",
1923 threadsPerGroup);
1924 }
1925 if (print_kernel_trace & STARTUP_DETAILS)
1926 fprintf(stderr, "threadsPerGroup: %d\n", threadsPerGroup);
1927 DP("Preparing %d threads\n", threadsPerGroup);
1928
1929 // Set default num_groups (teams)
1930 if (Env.TeamLimit > 0)
1931 num_groups = (Max_Teams < Env.TeamLimit) ? Max_Teams : Env.TeamLimit;
1932 else
1933 num_groups = Max_Teams;
1934 DP("Set default num of groups %d\n", num_groups);
1935
1936 if (print_kernel_trace & STARTUP_DETAILS) {
1937 fprintf(stderr, "num_groups: %d\n", num_groups);
1938 fprintf(stderr, "num_teams: %d\n", num_teams);
1939 }
1940
1941 // Reduce num_groups if threadsPerGroup exceeds RTLDeviceInfoTy::Max_WG_Size
1942 // This reduction is typical for default case (no thread_limit clause).
1943 // or when user goes crazy with num_teams clause.
1944 // FIXME: We cant distinguish between a constant or variable thread limit.
1945 // So we only handle constant thread_limits.
1946 if (threadsPerGroup >
1947 RTLDeviceInfoTy::Default_WG_Size) // 256 < threadsPerGroup <= 1024
1948 // Should we round threadsPerGroup up to nearest RTLDeviceInfoTy::Warp_Size
1949 // here?
1950 num_groups = (Max_Teams * RTLDeviceInfoTy::Max_WG_Size) / threadsPerGroup;
1951
1952 // check for num_teams() clause
1953 if (num_teams > 0) {
1954 num_groups = (num_teams < num_groups) ? num_teams : num_groups;
1955 }
1956 if (print_kernel_trace & STARTUP_DETAILS) {
1957 fprintf(stderr, "num_groups: %d\n", num_groups);
1958 fprintf(stderr, "Env.NumTeams %d\n", Env.NumTeams);
1959 fprintf(stderr, "Env.TeamLimit %d\n", Env.TeamLimit);
1960 }
1961
1962 if (Env.NumTeams > 0) {
1963 num_groups = (Env.NumTeams < num_groups) ? Env.NumTeams : num_groups;
1964 DP("Modifying teams based on Env.NumTeams %d\n", Env.NumTeams);
1965 } else if (Env.TeamLimit > 0) {
1966 num_groups = (Env.TeamLimit < num_groups) ? Env.TeamLimit : num_groups;
1967 DP("Modifying teams based on Env.TeamLimit%d\n", Env.TeamLimit);
1968 } else {
1969 if (num_teams <= 0) {
1970 if (loop_tripcount > 0) {
1971 if (ExecutionMode == SPMD) {
1972 // round up to the nearest integer
1973 num_groups = ((loop_tripcount - 1) / threadsPerGroup) + 1;
1974 } else if (ExecutionMode == GENERIC) {
1975 num_groups = loop_tripcount;
1976 } else /* ExecutionMode == SPMD_GENERIC */ {
1977 // This is a generic kernel that was transformed to use SPMD-mode
1978 // execution but uses Generic-mode semantics for scheduling.
1979 num_groups = loop_tripcount;
1980 }
1981 DP("Using %d teams due to loop trip count %" PRIu64 " and number of "
1982 "threads per block %d\n",
1983 num_groups, loop_tripcount, threadsPerGroup);
1984 }
1985 } else {
1986 num_groups = num_teams;
1987 }
1988 if (num_groups > Max_Teams) {
1989 num_groups = Max_Teams;
1990 if (print_kernel_trace & STARTUP_DETAILS)
1991 fprintf(stderr, "Limiting num_groups %d to Max_Teams %d \n", num_groups,
1992 Max_Teams);
1993 }
1994 if (num_groups > num_teams && num_teams > 0) {
1995 num_groups = num_teams;
1996 if (print_kernel_trace & STARTUP_DETAILS)
1997 fprintf(stderr, "Limiting num_groups %d to clause num_teams %d \n",
1998 num_groups, num_teams);
1999 }
2000 }
2001
2002 // num_teams clause always honored, no matter what, unless DEFAULT is active.
2003 if (num_teams > 0) {
2004 num_groups = num_teams;
2005 // Cap num_groups to EnvMaxTeamsDefault if set.
2006 if (Env.MaxTeamsDefault > 0 && num_groups > Env.MaxTeamsDefault)
2007 num_groups = Env.MaxTeamsDefault;
2008 }
2009 if (print_kernel_trace & STARTUP_DETAILS) {
2010 fprintf(stderr, "threadsPerGroup: %d\n", threadsPerGroup);
2011 fprintf(stderr, "num_groups: %d\n", num_groups);
2012 fprintf(stderr, "loop_tripcount: %ld\n", loop_tripcount);
2013 }
2014 DP("Final %d num_groups and %d threadsPerGroup\n", num_groups,
2015 threadsPerGroup);
2016
2017 launchVals res;
2018 res.WorkgroupSize = threadsPerGroup;
2019 res.GridSize = threadsPerGroup * num_groups;
2020 return res;
2021 }
2022
2023 static uint64_t acquire_available_packet_id(hsa_queue_t *queue) {
2024 uint64_t packet_id = hsa_queue_add_write_index_relaxed(queue, 1);
2025 bool full = true;
2026 while (full) {
2027 full =
2028 packet_id >= (queue->size + hsa_queue_load_read_index_scacquire(queue));
2029 }
2030 return packet_id;
2031 }
2032
2033 static int32_t __tgt_rtl_run_target_team_region_locked(
2034 int32_t device_id, void *tgt_entry_ptr, void **tgt_args,
2035 ptrdiff_t *tgt_offsets, int32_t arg_num, int32_t num_teams,
2036 int32_t thread_limit, uint64_t loop_tripcount);
2037
2038 int32_t __tgt_rtl_run_target_team_region(int32_t device_id, void *tgt_entry_ptr,
2039 void **tgt_args,
2040 ptrdiff_t *tgt_offsets,
2041 int32_t arg_num, int32_t num_teams,
2042 int32_t thread_limit,
2043 uint64_t loop_tripcount) {
2044
2045 DeviceInfo.load_run_lock.lock_shared();
2046 int32_t res = __tgt_rtl_run_target_team_region_locked(
2047 device_id, tgt_entry_ptr, tgt_args, tgt_offsets, arg_num, num_teams,
2048 thread_limit, loop_tripcount);
2049
2050 DeviceInfo.load_run_lock.unlock_shared();
2051 return res;
2052 }
2053
2054 int32_t __tgt_rtl_run_target_team_region_locked(
2055 int32_t device_id, void *tgt_entry_ptr, void **tgt_args,
2056 ptrdiff_t *tgt_offsets, int32_t arg_num, int32_t num_teams,
2057 int32_t thread_limit, uint64_t loop_tripcount) {
2058 // Set the context we are using
2059 // update thread limit content in gpu memory if un-initialized or specified
2060 // from host
2061
2062 DP("Run target team region thread_limit %d\n", thread_limit);
2063
2064 // All args are references.
2065 std::vector<void *> args(arg_num);
2066 std::vector<void *> ptrs(arg_num);
2067
2068 DP("Arg_num: %d\n", arg_num);
2069 for (int32_t i = 0; i < arg_num; ++i) {
2070 ptrs[i] = (void *)((intptr_t)tgt_args[i] + tgt_offsets[i]);
2071 args[i] = &ptrs[i];
2072 DP("Offseted base: arg[%d]:" DPxMOD "\n", i, DPxPTR(ptrs[i]));
2073 }
2074
2075 KernelTy *KernelInfo = (KernelTy *)tgt_entry_ptr;
2076
2077 std::string kernel_name = std::string(KernelInfo->Name);
2078 auto &KernelInfoTable = DeviceInfo.KernelInfoTable;
2079 if (KernelInfoTable[device_id].find(kernel_name) ==
2080 KernelInfoTable[device_id].end()) {
2081 DP("Kernel %s not found\n", kernel_name.c_str());
2082 return OFFLOAD_FAIL;
2083 }
2084
2085 const atl_kernel_info_t KernelInfoEntry =
2086 KernelInfoTable[device_id][kernel_name];
2087 const uint32_t group_segment_size = KernelInfoEntry.group_segment_size;
2088 const uint32_t sgpr_count = KernelInfoEntry.sgpr_count;
2089 const uint32_t vgpr_count = KernelInfoEntry.vgpr_count;
2090 const uint32_t sgpr_spill_count = KernelInfoEntry.sgpr_spill_count;
2091 const uint32_t vgpr_spill_count = KernelInfoEntry.vgpr_spill_count;
2092
2093 assert(arg_num == (int)KernelInfoEntry.num_args);
2094
2095 /*
2096 * Set limit based on ThreadsPerGroup and GroupsPerDevice
2097 */
2098 launchVals LV = getLaunchVals(DeviceInfo.Env, KernelInfo->ConstWGSize,
2099 KernelInfo->ExecutionMode,
2100 num_teams, // From run_region arg
2101 thread_limit, // From run_region arg
2102 loop_tripcount, // From run_region arg
2103 DeviceInfo.NumTeams[KernelInfo->device_id]);
2104 const int GridSize = LV.GridSize;
2105 const int WorkgroupSize = LV.WorkgroupSize;
2106
2107 if (print_kernel_trace >= LAUNCH) {
2108 int num_groups = GridSize / WorkgroupSize;
2109 // enum modes are SPMD, GENERIC, NONE 0,1,2
2110 // if doing rtl timing, print to stderr, unless stdout requested.
2111 bool traceToStdout = print_kernel_trace & (RTL_TO_STDOUT | RTL_TIMING);
2112 fprintf(traceToStdout ? stdout : stderr,
2113 "DEVID:%2d SGN:%1d ConstWGSize:%-4d args:%2d teamsXthrds:(%4dX%4d) "
2114 "reqd:(%4dX%4d) lds_usage:%uB sgpr_count:%u vgpr_count:%u "
2115 "sgpr_spill_count:%u vgpr_spill_count:%u tripcount:%lu n:%s\n",
2116 device_id, KernelInfo->ExecutionMode, KernelInfo->ConstWGSize,
2117 arg_num, num_groups, WorkgroupSize, num_teams, thread_limit,
2118 group_segment_size, sgpr_count, vgpr_count, sgpr_spill_count,
2119 vgpr_spill_count, loop_tripcount, KernelInfo->Name);
2120 }
2121
2122 // Run on the device.
2123 {
2124 hsa_queue_t *queue = DeviceInfo.HSAQueues[device_id];
2125 if (!queue) {
2126 return OFFLOAD_FAIL;
2127 }
2128 uint64_t packet_id = acquire_available_packet_id(queue);
2129
2130 const uint32_t mask = queue->size - 1; // size is a power of 2
2131 hsa_kernel_dispatch_packet_t *packet =
2132 (hsa_kernel_dispatch_packet_t *)queue->base_address +
2133 (packet_id & mask);
2134
2135 // packet->header is written last
2136 packet->setup = UINT16_C(1) << HSA_KERNEL_DISPATCH_PACKET_SETUP_DIMENSIONS;
2137 packet->workgroup_size_x = WorkgroupSize;
2138 packet->workgroup_size_y = 1;
2139 packet->workgroup_size_z = 1;
2140 packet->reserved0 = 0;
2141 packet->grid_size_x = GridSize;
2142 packet->grid_size_y = 1;
2143 packet->grid_size_z = 1;
2144 packet->private_segment_size = KernelInfoEntry.private_segment_size;
2145 packet->group_segment_size = KernelInfoEntry.group_segment_size;
2146 packet->kernel_object = KernelInfoEntry.kernel_object;
2147 packet->kernarg_address = 0; // use the block allocator
2148 packet->reserved2 = 0; // atmi writes id_ here
2149 packet->completion_signal = {0}; // may want a pool of signals
2150
2151 KernelArgPool *ArgPool = nullptr;
2152 {
2153 auto it = KernelArgPoolMap.find(std::string(KernelInfo->Name));
2154 if (it != KernelArgPoolMap.end()) {
2155 ArgPool = (it->second).get();
2156 }
2157 }
2158 if (!ArgPool) {
2159 DP("Warning: No ArgPool for %s on device %d\n", KernelInfo->Name,
2160 device_id);
2161 }
2162 {
2163 void *kernarg = nullptr;
2164 if (ArgPool) {
2165 assert(ArgPool->kernarg_segment_size == (arg_num * sizeof(void *)));
2166 kernarg = ArgPool->allocate(arg_num);
2167 }
2168 if (!kernarg) {
2169 DP("Allocate kernarg failed\n");
2170 return OFFLOAD_FAIL;
2171 }
2172
2173 // Copy explicit arguments
2174 for (int i = 0; i < arg_num; i++) {
2175 memcpy((char *)kernarg + sizeof(void *) * i, args[i], sizeof(void *));
2176 }
2177
2178 // Initialize implicit arguments. ATMI seems to leave most fields
2179 // uninitialized
2180 atmi_implicit_args_t *impl_args =
2181 reinterpret_cast<atmi_implicit_args_t *>(
2182 static_cast<char *>(kernarg) + ArgPool->kernarg_segment_size);
2183 memset(impl_args, 0,
2184 sizeof(atmi_implicit_args_t)); // may not be necessary
2185 impl_args->offset_x = 0;
2186 impl_args->offset_y = 0;
2187 impl_args->offset_z = 0;
2188
2189 // assign a hostcall buffer for the selected Q
2190 if (__atomic_load_n(&DeviceInfo.hostcall_required, __ATOMIC_ACQUIRE)) {
2191 // hostrpc_assign_buffer is not thread safe, and this function is
2192 // under a multiple reader lock, not a writer lock.
2193 static pthread_mutex_t hostcall_init_lock = PTHREAD_MUTEX_INITIALIZER;
2194 pthread_mutex_lock(&hostcall_init_lock);
2195 impl_args->hostcall_ptr = hostrpc_assign_buffer(
2196 DeviceInfo.HSAAgents[device_id], queue, device_id);
2197 pthread_mutex_unlock(&hostcall_init_lock);
2198 if (!impl_args->hostcall_ptr) {
2199 DP("hostrpc_assign_buffer failed, gpu would dereference null and "
2200 "error\n");
2201 return OFFLOAD_FAIL;
2202 }
2203 }
2204
2205 packet->kernarg_address = kernarg;
2206 }
2207
2208 {
2209 hsa_signal_t s = DeviceInfo.FreeSignalPool.pop();
2210 if (s.handle == 0) {
2211 DP("Failed to get signal instance\n");
2212 return OFFLOAD_FAIL;
2213 }
2214 packet->completion_signal = s;
2215 hsa_signal_store_relaxed(packet->completion_signal, 1);
2216 }
2217
2218 core::packet_store_release(reinterpret_cast<uint32_t *>(packet),
2219 core::create_header(), packet->setup);
2220
2221 hsa_signal_store_relaxed(queue->doorbell_signal, packet_id);
2222
2223 while (hsa_signal_wait_scacquire(packet->completion_signal,
2224 HSA_SIGNAL_CONDITION_EQ, 0, UINT64_MAX,
2225 HSA_WAIT_STATE_BLOCKED) != 0)
2226 ;
2227
2228 assert(ArgPool);
2229 ArgPool->deallocate(packet->kernarg_address);
2230 DeviceInfo.FreeSignalPool.push(packet->completion_signal);
2231 }
2232
2233 DP("Kernel completed\n");
2234 return OFFLOAD_SUCCESS;
2235 }
2236
2237 int32_t __tgt_rtl_run_target_region(int32_t device_id, void *tgt_entry_ptr,
2238 void **tgt_args, ptrdiff_t *tgt_offsets,
2239 int32_t arg_num) {
2240 // use one team and one thread
2241 // fix thread num
2242 int32_t team_num = 1;
2243 int32_t thread_limit = 0; // use default
2244 return __tgt_rtl_run_target_team_region(device_id, tgt_entry_ptr, tgt_args,
2245 tgt_offsets, arg_num, team_num,
2246 thread_limit, 0);
2247 }
2248
2249 int32_t __tgt_rtl_run_target_region_async(int32_t device_id,
2250 void *tgt_entry_ptr, void **tgt_args,
2251 ptrdiff_t *tgt_offsets,
2252 int32_t arg_num,
2253 __tgt_async_info *AsyncInfo) {
2254 assert(AsyncInfo && "AsyncInfo is nullptr");
2255 initAsyncInfo(AsyncInfo);
2256
2257 // use one team and one thread
2258 // fix thread num
2259 int32_t team_num = 1;
2260 int32_t thread_limit = 0; // use default
2261 return __tgt_rtl_run_target_team_region(device_id, tgt_entry_ptr, tgt_args,
2262 tgt_offsets, arg_num, team_num,
2263 thread_limit, 0);
2264 }
2265
2266 int32_t __tgt_rtl_synchronize(int32_t device_id, __tgt_async_info *AsyncInfo) {
2267 assert(AsyncInfo && "AsyncInfo is nullptr");
2268
2269 // Cuda asserts that AsyncInfo->Queue is non-null, but this invariant
2270 // is not ensured by devices.cpp for amdgcn
2271 // assert(AsyncInfo->Queue && "AsyncInfo->Queue is nullptr");
2272 if (AsyncInfo->Queue) {
2273 finiAsyncInfo(AsyncInfo);
2274 }
2275 return OFFLOAD_SUCCESS;
2276 }
2277
2278 namespace core {
2279 hsa_status_t allow_access_to_all_gpu_agents(void *ptr) {
2280 return hsa_amd_agents_allow_access(DeviceInfo.HSAAgents.size(),
2281 &DeviceInfo.HSAAgents[0], NULL, ptr);
2282 }
2283
2284 } // namespace core
2285