1 /*===--------------------------------------------------------------------------
2  *              ATMI (Asynchronous Task and Memory Interface)
3  *
4  * This file is distributed under the MIT License. See LICENSE.txt for details.
5  *===------------------------------------------------------------------------*/
6 #include <gelf.h>
7 #include <libelf.h>
8 
9 #include <cassert>
10 #include <cstdarg>
11 #include <fstream>
12 #include <iomanip>
13 #include <iostream>
14 #include <set>
15 #include <string>
16 
17 #include "internal.h"
18 #include "machine.h"
19 #include "rt.h"
20 
21 #include "msgpack.h"
22 
23 #define msgpackErrorCheck(msg, status)                                         \
24   if (status != 0) {                                                           \
25     printf("[%s:%d] %s failed\n", __FILE__, __LINE__, #msg);                   \
26     return HSA_STATUS_ERROR_INVALID_CODE_OBJECT;                               \
27   } else {                                                                     \
28   }
29 
30 typedef unsigned char *address;
31 /*
32  * Note descriptors.
33  */
34 typedef struct {
35   uint32_t n_namesz; /* Length of note's name. */
36   uint32_t n_descsz; /* Length of note's value. */
37   uint32_t n_type;   /* Type of note. */
38   // then name
39   // then padding, optional
40   // then desc, at 4 byte alignment (not 8, despite being elf64)
41 } Elf_Note;
42 
43 // The following include file and following structs/enums
44 // have been replicated on a per-use basis below. For example,
45 // llvm::AMDGPU::HSAMD::Kernel::Metadata has several fields,
46 // but we may care only about kernargSegmentSize_ for now, so
47 // we just include that field in our KernelMD implementation. We
48 // chose this approach to replicate in order to avoid forcing
49 // a dependency on LLVM_INCLUDE_DIR just to compile the runtime.
50 // #include "llvm/Support/AMDGPUMetadata.h"
51 // typedef llvm::AMDGPU::HSAMD::Metadata CodeObjectMD;
52 // typedef llvm::AMDGPU::HSAMD::Kernel::Metadata KernelMD;
53 // typedef llvm::AMDGPU::HSAMD::Kernel::Arg::Metadata KernelArgMD;
54 // using llvm::AMDGPU::HSAMD::AccessQualifier;
55 // using llvm::AMDGPU::HSAMD::AddressSpaceQualifier;
56 // using llvm::AMDGPU::HSAMD::ValueKind;
57 // using llvm::AMDGPU::HSAMD::ValueType;
58 
59 class KernelArgMD {
60 public:
61   enum class ValueKind {
62     HiddenGlobalOffsetX,
63     HiddenGlobalOffsetY,
64     HiddenGlobalOffsetZ,
65     HiddenNone,
66     HiddenPrintfBuffer,
67     HiddenDefaultQueue,
68     HiddenCompletionAction,
69     HiddenMultiGridSyncArg,
70     HiddenHostcallBuffer,
71     Unknown
72   };
73 
KernelArgMD()74   KernelArgMD()
75       : name_(std::string()), typeName_(std::string()), size_(0), offset_(0),
76         align_(0), valueKind_(ValueKind::Unknown) {}
77 
78   // fields
79   std::string name_;
80   std::string typeName_;
81   uint32_t size_;
82   uint32_t offset_;
83   uint32_t align_;
84   ValueKind valueKind_;
85 };
86 
87 class KernelMD {
88 public:
KernelMD()89   KernelMD() : kernargSegmentSize_(0ull) {}
90 
91   // fields
92   uint64_t kernargSegmentSize_;
93 };
94 
95 static const std::map<std::string, KernelArgMD::ValueKind> ArgValueKind = {
96     //    Including only those fields that are relevant to the runtime.
97     //    {"ByValue", KernelArgMD::ValueKind::ByValue},
98     //    {"GlobalBuffer", KernelArgMD::ValueKind::GlobalBuffer},
99     //    {"DynamicSharedPointer",
100     //    KernelArgMD::ValueKind::DynamicSharedPointer},
101     //    {"Sampler", KernelArgMD::ValueKind::Sampler},
102     //    {"Image", KernelArgMD::ValueKind::Image},
103     //    {"Pipe", KernelArgMD::ValueKind::Pipe},
104     //    {"Queue", KernelArgMD::ValueKind::Queue},
105     {"HiddenGlobalOffsetX", KernelArgMD::ValueKind::HiddenGlobalOffsetX},
106     {"HiddenGlobalOffsetY", KernelArgMD::ValueKind::HiddenGlobalOffsetY},
107     {"HiddenGlobalOffsetZ", KernelArgMD::ValueKind::HiddenGlobalOffsetZ},
108     {"HiddenNone", KernelArgMD::ValueKind::HiddenNone},
109     {"HiddenPrintfBuffer", KernelArgMD::ValueKind::HiddenPrintfBuffer},
110     {"HiddenDefaultQueue", KernelArgMD::ValueKind::HiddenDefaultQueue},
111     {"HiddenCompletionAction", KernelArgMD::ValueKind::HiddenCompletionAction},
112     {"HiddenMultiGridSyncArg", KernelArgMD::ValueKind::HiddenMultiGridSyncArg},
113     {"HiddenHostcallBuffer", KernelArgMD::ValueKind::HiddenHostcallBuffer},
114     // v3
115     //    {"by_value", KernelArgMD::ValueKind::ByValue},
116     //    {"global_buffer", KernelArgMD::ValueKind::GlobalBuffer},
117     //    {"dynamic_shared_pointer",
118     //    KernelArgMD::ValueKind::DynamicSharedPointer},
119     //    {"sampler", KernelArgMD::ValueKind::Sampler},
120     //    {"image", KernelArgMD::ValueKind::Image},
121     //    {"pipe", KernelArgMD::ValueKind::Pipe},
122     //    {"queue", KernelArgMD::ValueKind::Queue},
123     {"hidden_global_offset_x", KernelArgMD::ValueKind::HiddenGlobalOffsetX},
124     {"hidden_global_offset_y", KernelArgMD::ValueKind::HiddenGlobalOffsetY},
125     {"hidden_global_offset_z", KernelArgMD::ValueKind::HiddenGlobalOffsetZ},
126     {"hidden_none", KernelArgMD::ValueKind::HiddenNone},
127     {"hidden_printf_buffer", KernelArgMD::ValueKind::HiddenPrintfBuffer},
128     {"hidden_default_queue", KernelArgMD::ValueKind::HiddenDefaultQueue},
129     {"hidden_completion_action",
130      KernelArgMD::ValueKind::HiddenCompletionAction},
131     {"hidden_multigrid_sync_arg",
132      KernelArgMD::ValueKind::HiddenMultiGridSyncArg},
133     {"hidden_hostcall_buffer", KernelArgMD::ValueKind::HiddenHostcallBuffer},
134 };
135 
136 // global variables. TODO: Get rid of these
137 atmi_machine_t g_atmi_machine;
138 ATLMachine g_atl_machine;
139 
140 hsa_region_t atl_gpu_kernarg_region;
141 std::vector<hsa_amd_memory_pool_t> atl_gpu_kernarg_pools;
142 hsa_region_t atl_cpu_kernarg_region;
143 
144 static std::vector<hsa_executable_t> g_executables;
145 
146 std::map<std::string, std::string> KernelNameMap;
147 std::vector<std::map<std::string, atl_kernel_info_t>> KernelInfoTable;
148 std::vector<std::map<std::string, atl_symbol_info_t>> SymbolInfoTable;
149 
150 bool g_atmi_initialized = false;
151 bool g_atmi_hostcall_required = false;
152 
153 struct timespec context_init_time;
154 int context_init_time_init = 0;
155 
156 /*
157    atlc is all internal global values.
158    The structure atl_context_t is defined in atl_internal.h
159    Most references will use the global structure prefix atlc.
160    However the pointer value atlc_p-> is equivalent to atlc.
161 
162 */
163 
164 atl_context_t atlc = {.struct_initialized = false};
165 atl_context_t *atlc_p = NULL;
166 
167 namespace core {
168 /* Machine Info */
GetMachineInfo()169 atmi_machine_t *Runtime::GetMachineInfo() {
170   if (!atlc.g_hsa_initialized)
171     return NULL;
172   return &g_atmi_machine;
173 }
174 
atl_set_atmi_initialized()175 void atl_set_atmi_initialized() {
176   // FIXME: thread safe? locks?
177   g_atmi_initialized = true;
178 }
179 
atl_reset_atmi_initialized()180 void atl_reset_atmi_initialized() {
181   // FIXME: thread safe? locks?
182   g_atmi_initialized = false;
183 }
184 
atl_is_atmi_initialized()185 bool atl_is_atmi_initialized() { return g_atmi_initialized; }
186 
allow_access_to_all_gpu_agents(void * ptr)187 void allow_access_to_all_gpu_agents(void *ptr) {
188   hsa_status_t err;
189   std::vector<ATLGPUProcessor> &gpu_procs =
190       g_atl_machine.processors<ATLGPUProcessor>();
191   std::vector<hsa_agent_t> agents;
192   for (uint32_t i = 0; i < gpu_procs.size(); i++) {
193     agents.push_back(gpu_procs[i].agent());
194   }
195   err = hsa_amd_agents_allow_access(agents.size(), &agents[0], NULL, ptr);
196   ErrorCheck(Allow agents ptr access, err);
197 }
198 
Initialize()199 atmi_status_t Runtime::Initialize() {
200   atmi_devtype_t devtype = ATMI_DEVTYPE_GPU;
201   if (atl_is_atmi_initialized())
202     return ATMI_STATUS_SUCCESS;
203 
204   if (devtype == ATMI_DEVTYPE_ALL || devtype & ATMI_DEVTYPE_GPU) {
205     ATMIErrorCheck(GPU context init, atl_init_gpu_context());
206   }
207 
208   atl_set_atmi_initialized();
209   return ATMI_STATUS_SUCCESS;
210 }
211 
Finalize()212 atmi_status_t Runtime::Finalize() {
213   hsa_status_t err;
214 
215   for (uint32_t i = 0; i < g_executables.size(); i++) {
216     err = hsa_executable_destroy(g_executables[i]);
217     ErrorCheck(Destroying executable, err);
218   }
219 
220   for (uint32_t i = 0; i < SymbolInfoTable.size(); i++) {
221     SymbolInfoTable[i].clear();
222   }
223   SymbolInfoTable.clear();
224   for (uint32_t i = 0; i < KernelInfoTable.size(); i++) {
225     KernelInfoTable[i].clear();
226   }
227   KernelInfoTable.clear();
228 
229   atl_reset_atmi_initialized();
230   err = hsa_shut_down();
231   ErrorCheck(Shutting down HSA, err);
232 
233   return ATMI_STATUS_SUCCESS;
234 }
235 
atmi_init_context_structs()236 void atmi_init_context_structs() {
237   atlc_p = &atlc;
238   atlc.struct_initialized = true; /* This only gets called one time */
239   atlc.g_hsa_initialized = false;
240   atlc.g_gpu_initialized = false;
241   atlc.g_tasks_initialized = false;
242 }
243 
244 // Implement memory_pool iteration function
get_memory_pool_info(hsa_amd_memory_pool_t memory_pool,void * data)245 static hsa_status_t get_memory_pool_info(hsa_amd_memory_pool_t memory_pool,
246                                          void *data) {
247   ATLProcessor *proc = reinterpret_cast<ATLProcessor *>(data);
248   hsa_status_t err = HSA_STATUS_SUCCESS;
249   // Check if the memory_pool is allowed to allocate, i.e. do not return group
250   // memory
251   bool alloc_allowed = false;
252   err = hsa_amd_memory_pool_get_info(
253       memory_pool, HSA_AMD_MEMORY_POOL_INFO_RUNTIME_ALLOC_ALLOWED,
254       &alloc_allowed);
255   ErrorCheck(Alloc allowed in memory pool check, err);
256   if (alloc_allowed) {
257     uint32_t global_flag = 0;
258     err = hsa_amd_memory_pool_get_info(
259         memory_pool, HSA_AMD_MEMORY_POOL_INFO_GLOBAL_FLAGS, &global_flag);
260     ErrorCheck(Get memory pool info, err);
261     if (HSA_AMD_MEMORY_POOL_GLOBAL_FLAG_FINE_GRAINED & global_flag) {
262       ATLMemory new_mem(memory_pool, *proc, ATMI_MEMTYPE_FINE_GRAINED);
263       proc->addMemory(new_mem);
264       if (HSA_AMD_MEMORY_POOL_GLOBAL_FLAG_KERNARG_INIT & global_flag) {
265         DEBUG_PRINT("GPU kernel args pool handle: %lu\n", memory_pool.handle);
266         atl_gpu_kernarg_pools.push_back(memory_pool);
267       }
268     } else {
269       ATLMemory new_mem(memory_pool, *proc, ATMI_MEMTYPE_COARSE_GRAINED);
270       proc->addMemory(new_mem);
271     }
272   }
273 
274   return err;
275 }
276 
get_agent_info(hsa_agent_t agent,void * data)277 static hsa_status_t get_agent_info(hsa_agent_t agent, void *data) {
278   hsa_status_t err = HSA_STATUS_SUCCESS;
279   hsa_device_type_t device_type;
280   err = hsa_agent_get_info(agent, HSA_AGENT_INFO_DEVICE, &device_type);
281   ErrorCheck(Get device type info, err);
282   switch (device_type) {
283   case HSA_DEVICE_TYPE_CPU: {
284     ;
285     ATLCPUProcessor new_proc(agent);
286     err = hsa_amd_agent_iterate_memory_pools(agent, get_memory_pool_info,
287                                              &new_proc);
288     ErrorCheck(Iterate all memory pools, err);
289     g_atl_machine.addProcessor(new_proc);
290   } break;
291   case HSA_DEVICE_TYPE_GPU: {
292     ;
293     hsa_profile_t profile;
294     err = hsa_agent_get_info(agent, HSA_AGENT_INFO_PROFILE, &profile);
295     ErrorCheck(Query the agent profile, err);
296     atmi_devtype_t gpu_type;
297     gpu_type =
298         (profile == HSA_PROFILE_FULL) ? ATMI_DEVTYPE_iGPU : ATMI_DEVTYPE_dGPU;
299     ATLGPUProcessor new_proc(agent, gpu_type);
300     err = hsa_amd_agent_iterate_memory_pools(agent, get_memory_pool_info,
301                                              &new_proc);
302     ErrorCheck(Iterate all memory pools, err);
303     g_atl_machine.addProcessor(new_proc);
304   } break;
305   case HSA_DEVICE_TYPE_DSP: {
306     err = HSA_STATUS_ERROR_INVALID_CODE_OBJECT;
307   } break;
308   }
309 
310   return err;
311 }
312 
get_fine_grained_region(hsa_region_t region,void * data)313 hsa_status_t get_fine_grained_region(hsa_region_t region, void *data) {
314   hsa_region_segment_t segment;
315   hsa_region_get_info(region, HSA_REGION_INFO_SEGMENT, &segment);
316   if (segment != HSA_REGION_SEGMENT_GLOBAL) {
317     return HSA_STATUS_SUCCESS;
318   }
319   hsa_region_global_flag_t flags;
320   hsa_region_get_info(region, HSA_REGION_INFO_GLOBAL_FLAGS, &flags);
321   if (flags & HSA_REGION_GLOBAL_FLAG_FINE_GRAINED) {
322     hsa_region_t *ret = reinterpret_cast<hsa_region_t *>(data);
323     *ret = region;
324     return HSA_STATUS_INFO_BREAK;
325   }
326   return HSA_STATUS_SUCCESS;
327 }
328 
329 /* Determines if a memory region can be used for kernarg allocations.  */
get_kernarg_memory_region(hsa_region_t region,void * data)330 static hsa_status_t get_kernarg_memory_region(hsa_region_t region, void *data) {
331   hsa_region_segment_t segment;
332   hsa_region_get_info(region, HSA_REGION_INFO_SEGMENT, &segment);
333   if (HSA_REGION_SEGMENT_GLOBAL != segment) {
334     return HSA_STATUS_SUCCESS;
335   }
336 
337   hsa_region_global_flag_t flags;
338   hsa_region_get_info(region, HSA_REGION_INFO_GLOBAL_FLAGS, &flags);
339   if (flags & HSA_REGION_GLOBAL_FLAG_KERNARG) {
340     hsa_region_t *ret = reinterpret_cast<hsa_region_t *>(data);
341     *ret = region;
342     return HSA_STATUS_INFO_BREAK;
343   }
344 
345   return HSA_STATUS_SUCCESS;
346 }
347 
init_compute_and_memory()348 static hsa_status_t init_compute_and_memory() {
349   hsa_status_t err;
350 
351   /* Iterate over the agents and pick the gpu agent */
352   err = hsa_iterate_agents(get_agent_info, NULL);
353   if (err == HSA_STATUS_INFO_BREAK) {
354     err = HSA_STATUS_SUCCESS;
355   }
356   ErrorCheck(Getting a gpu agent, err);
357   if (err != HSA_STATUS_SUCCESS)
358     return err;
359 
360   /* Init all devices or individual device types? */
361   std::vector<ATLCPUProcessor> &cpu_procs =
362       g_atl_machine.processors<ATLCPUProcessor>();
363   std::vector<ATLGPUProcessor> &gpu_procs =
364       g_atl_machine.processors<ATLGPUProcessor>();
365   /* For CPU memory pools, add other devices that can access them directly
366    * or indirectly */
367   for (auto &cpu_proc : cpu_procs) {
368     for (auto &cpu_mem : cpu_proc.memories()) {
369       hsa_amd_memory_pool_t pool = cpu_mem.memory();
370       for (auto &gpu_proc : gpu_procs) {
371         hsa_agent_t agent = gpu_proc.agent();
372         hsa_amd_memory_pool_access_t access;
373         hsa_amd_agent_memory_pool_get_info(
374             agent, pool, HSA_AMD_AGENT_MEMORY_POOL_INFO_ACCESS, &access);
375         if (access != 0) {
376           // this means not NEVER, but could be YES or NO
377           // add this memory pool to the proc
378           gpu_proc.addMemory(cpu_mem);
379         }
380       }
381     }
382   }
383 
384   /* FIXME: are the below combinations of procs and memory pools needed?
385    * all to all compare procs with their memory pools and add those memory
386    * pools that are accessible by the target procs */
387   for (auto &gpu_proc : gpu_procs) {
388     for (auto &gpu_mem : gpu_proc.memories()) {
389       hsa_amd_memory_pool_t pool = gpu_mem.memory();
390       for (auto &cpu_proc : cpu_procs) {
391         hsa_agent_t agent = cpu_proc.agent();
392         hsa_amd_memory_pool_access_t access;
393         hsa_amd_agent_memory_pool_get_info(
394             agent, pool, HSA_AMD_AGENT_MEMORY_POOL_INFO_ACCESS, &access);
395         if (access != 0) {
396           // this means not NEVER, but could be YES or NO
397           // add this memory pool to the proc
398           cpu_proc.addMemory(gpu_mem);
399         }
400       }
401     }
402   }
403 
404   g_atmi_machine.device_count_by_type[ATMI_DEVTYPE_CPU] = cpu_procs.size();
405   g_atmi_machine.device_count_by_type[ATMI_DEVTYPE_GPU] = gpu_procs.size();
406 
407   size_t num_procs = cpu_procs.size() + gpu_procs.size();
408   // g_atmi_machine.devices = (atmi_device_t *)malloc(num_procs *
409   // sizeof(atmi_device_t));
410   atmi_device_t *all_devices = reinterpret_cast<atmi_device_t *>(
411       malloc(num_procs * sizeof(atmi_device_t)));
412   int num_iGPUs = 0;
413   int num_dGPUs = 0;
414   for (uint32_t i = 0; i < gpu_procs.size(); i++) {
415     if (gpu_procs[i].type() == ATMI_DEVTYPE_iGPU)
416       num_iGPUs++;
417     else
418       num_dGPUs++;
419   }
420   assert(num_iGPUs + num_dGPUs == gpu_procs.size() &&
421          "Number of dGPUs and iGPUs do not add up");
422   DEBUG_PRINT("CPU Agents: %lu\n", cpu_procs.size());
423   DEBUG_PRINT("iGPU Agents: %d\n", num_iGPUs);
424   DEBUG_PRINT("dGPU Agents: %d\n", num_dGPUs);
425   DEBUG_PRINT("GPU Agents: %lu\n", gpu_procs.size());
426 
427   g_atmi_machine.device_count_by_type[ATMI_DEVTYPE_iGPU] = num_iGPUs;
428   g_atmi_machine.device_count_by_type[ATMI_DEVTYPE_dGPU] = num_dGPUs;
429 
430   int cpus_begin = 0;
431   int cpus_end = cpu_procs.size();
432   int gpus_begin = cpu_procs.size();
433   int gpus_end = cpu_procs.size() + gpu_procs.size();
434   g_atmi_machine.devices_by_type[ATMI_DEVTYPE_CPU] = &all_devices[cpus_begin];
435   g_atmi_machine.devices_by_type[ATMI_DEVTYPE_GPU] = &all_devices[gpus_begin];
436   g_atmi_machine.devices_by_type[ATMI_DEVTYPE_iGPU] = &all_devices[gpus_begin];
437   g_atmi_machine.devices_by_type[ATMI_DEVTYPE_dGPU] = &all_devices[gpus_begin];
438   int proc_index = 0;
439   for (int i = cpus_begin; i < cpus_end; i++) {
440     all_devices[i].type = cpu_procs[proc_index].type();
441 
442     std::vector<ATLMemory> memories = cpu_procs[proc_index].memories();
443     int fine_memories_size = 0;
444     int coarse_memories_size = 0;
445     DEBUG_PRINT("CPU memory types:\t");
446     for (auto &memory : memories) {
447       atmi_memtype_t type = memory.type();
448       if (type == ATMI_MEMTYPE_FINE_GRAINED) {
449         fine_memories_size++;
450         DEBUG_PRINT("Fine\t");
451       } else {
452         coarse_memories_size++;
453         DEBUG_PRINT("Coarse\t");
454       }
455     }
456     DEBUG_PRINT("\nFine Memories : %d", fine_memories_size);
457     DEBUG_PRINT("\tCoarse Memories : %d\n", coarse_memories_size);
458     proc_index++;
459   }
460   proc_index = 0;
461   for (int i = gpus_begin; i < gpus_end; i++) {
462     all_devices[i].type = gpu_procs[proc_index].type();
463 
464     std::vector<ATLMemory> memories = gpu_procs[proc_index].memories();
465     int fine_memories_size = 0;
466     int coarse_memories_size = 0;
467     DEBUG_PRINT("GPU memory types:\t");
468     for (auto &memory : memories) {
469       atmi_memtype_t type = memory.type();
470       if (type == ATMI_MEMTYPE_FINE_GRAINED) {
471         fine_memories_size++;
472         DEBUG_PRINT("Fine\t");
473       } else {
474         coarse_memories_size++;
475         DEBUG_PRINT("Coarse\t");
476       }
477     }
478     DEBUG_PRINT("\nFine Memories : %d", fine_memories_size);
479     DEBUG_PRINT("\tCoarse Memories : %d\n", coarse_memories_size);
480     proc_index++;
481   }
482   proc_index = 0;
483   atl_cpu_kernarg_region.handle = (uint64_t)-1;
484   if (cpu_procs.size() > 0) {
485     err = hsa_agent_iterate_regions(
486         cpu_procs[0].agent(), get_fine_grained_region, &atl_cpu_kernarg_region);
487     if (err == HSA_STATUS_INFO_BREAK) {
488       err = HSA_STATUS_SUCCESS;
489     }
490     err = (atl_cpu_kernarg_region.handle == (uint64_t)-1) ? HSA_STATUS_ERROR
491                                                           : HSA_STATUS_SUCCESS;
492     ErrorCheck(Finding a CPU kernarg memory region handle, err);
493   }
494   /* Find a memory region that supports kernel arguments.  */
495   atl_gpu_kernarg_region.handle = (uint64_t)-1;
496   if (gpu_procs.size() > 0) {
497     hsa_agent_iterate_regions(gpu_procs[0].agent(), get_kernarg_memory_region,
498                               &atl_gpu_kernarg_region);
499     err = (atl_gpu_kernarg_region.handle == (uint64_t)-1) ? HSA_STATUS_ERROR
500                                                           : HSA_STATUS_SUCCESS;
501     ErrorCheck(Finding a kernarg memory region, err);
502   }
503   if (num_procs > 0)
504     return HSA_STATUS_SUCCESS;
505   else
506     return HSA_STATUS_ERROR_NOT_INITIALIZED;
507 }
508 
init_hsa()509 hsa_status_t init_hsa() {
510   if (atlc.g_hsa_initialized == false) {
511     DEBUG_PRINT("Initializing HSA...");
512     hsa_status_t err = hsa_init();
513     ErrorCheck(Initializing the hsa runtime, err);
514     if (err != HSA_STATUS_SUCCESS)
515       return err;
516 
517     err = init_compute_and_memory();
518     if (err != HSA_STATUS_SUCCESS)
519       return err;
520     ErrorCheck(After initializing compute and memory, err);
521 
522     int gpu_count = g_atl_machine.processorCount<ATLGPUProcessor>();
523     KernelInfoTable.resize(gpu_count);
524     SymbolInfoTable.resize(gpu_count);
525     for (uint32_t i = 0; i < SymbolInfoTable.size(); i++)
526       SymbolInfoTable[i].clear();
527     for (uint32_t i = 0; i < KernelInfoTable.size(); i++)
528       KernelInfoTable[i].clear();
529     atlc.g_hsa_initialized = true;
530     DEBUG_PRINT("done\n");
531   }
532   return HSA_STATUS_SUCCESS;
533 }
534 
init_tasks()535 void init_tasks() {
536   if (atlc.g_tasks_initialized != false)
537     return;
538   std::vector<hsa_agent_t> gpu_agents;
539   int gpu_count = g_atl_machine.processorCount<ATLGPUProcessor>();
540   for (int gpu = 0; gpu < gpu_count; gpu++) {
541     atmi_place_t place = ATMI_PLACE_GPU(0, gpu);
542     ATLGPUProcessor &proc = get_processor<ATLGPUProcessor>(place);
543     gpu_agents.push_back(proc.agent());
544   }
545   atlc.g_tasks_initialized = true;
546 }
547 
callbackEvent(const hsa_amd_event_t * event,void * data)548 hsa_status_t callbackEvent(const hsa_amd_event_t *event, void *data) {
549 #if (ROCM_VERSION_MAJOR >= 3) ||                                               \
550     (ROCM_VERSION_MAJOR >= 2 && ROCM_VERSION_MINOR >= 3)
551   if (event->event_type == HSA_AMD_GPU_MEMORY_FAULT_EVENT) {
552 #else
553   if (event->event_type == GPU_MEMORY_FAULT_EVENT) {
554 #endif
555     hsa_amd_gpu_memory_fault_info_t memory_fault = event->memory_fault;
556     // memory_fault.agent
557     // memory_fault.virtual_address
558     // memory_fault.fault_reason_mask
559     // fprintf("[GPU Error at %p: Reason is ", memory_fault.virtual_address);
560     std::stringstream stream;
561     stream << std::hex << (uintptr_t)memory_fault.virtual_address;
562     std::string addr("0x" + stream.str());
563 
564     std::string err_string = "[GPU Memory Error] Addr: " + addr;
565     err_string += " Reason: ";
566     if (!(memory_fault.fault_reason_mask & 0x00111111)) {
567       err_string += "No Idea! ";
568     } else {
569       if (memory_fault.fault_reason_mask & 0x00000001)
570         err_string += "Page not present or supervisor privilege. ";
571       if (memory_fault.fault_reason_mask & 0x00000010)
572         err_string += "Write access to a read-only page. ";
573       if (memory_fault.fault_reason_mask & 0x00000100)
574         err_string += "Execute access to a page marked NX. ";
575       if (memory_fault.fault_reason_mask & 0x00001000)
576         err_string += "Host access only. ";
577       if (memory_fault.fault_reason_mask & 0x00010000)
578         err_string += "ECC failure (if supported by HW). ";
579       if (memory_fault.fault_reason_mask & 0x00100000)
580         err_string += "Can't determine the exact fault address. ";
581     }
582     fprintf(stderr, "%s\n", err_string.c_str());
583     return HSA_STATUS_ERROR;
584   }
585   return HSA_STATUS_SUCCESS;
586 }
587 
588 atmi_status_t atl_init_gpu_context() {
589   if (atlc.struct_initialized == false)
590     atmi_init_context_structs();
591   if (atlc.g_gpu_initialized != false)
592     return ATMI_STATUS_SUCCESS;
593 
594   hsa_status_t err;
595   err = init_hsa();
596   if (err != HSA_STATUS_SUCCESS)
597     return ATMI_STATUS_ERROR;
598 
599   if (context_init_time_init == 0) {
600     clock_gettime(CLOCK_MONOTONIC_RAW, &context_init_time);
601     context_init_time_init = 1;
602   }
603 
604   err = hsa_amd_register_system_event_handler(callbackEvent, NULL);
605     ErrorCheck(Registering the system for memory faults, err);
606 
607     init_tasks();
608     atlc.g_gpu_initialized = true;
609     return ATMI_STATUS_SUCCESS;
610 }
611 
612 bool isImplicit(KernelArgMD::ValueKind value_kind) {
613   switch (value_kind) {
614   case KernelArgMD::ValueKind::HiddenGlobalOffsetX:
615   case KernelArgMD::ValueKind::HiddenGlobalOffsetY:
616   case KernelArgMD::ValueKind::HiddenGlobalOffsetZ:
617   case KernelArgMD::ValueKind::HiddenNone:
618   case KernelArgMD::ValueKind::HiddenPrintfBuffer:
619   case KernelArgMD::ValueKind::HiddenDefaultQueue:
620   case KernelArgMD::ValueKind::HiddenCompletionAction:
621   case KernelArgMD::ValueKind::HiddenMultiGridSyncArg:
622   case KernelArgMD::ValueKind::HiddenHostcallBuffer:
623     return true;
624   default:
625     return false;
626   }
627 }
628 
629 static std::pair<unsigned char *, unsigned char *>
630 find_metadata(void *binary, size_t binSize) {
631   std::pair<unsigned char *, unsigned char *> failure = {nullptr, nullptr};
632 
633   Elf *e = elf_memory(static_cast<char *>(binary), binSize);
634   if (elf_kind(e) != ELF_K_ELF) {
635     return failure;
636   }
637 
638   size_t numpHdrs;
639   if (elf_getphdrnum(e, &numpHdrs) != 0) {
640     return failure;
641   }
642 
643   for (size_t i = 0; i < numpHdrs; ++i) {
644     GElf_Phdr pHdr;
645     if (gelf_getphdr(e, i, &pHdr) != &pHdr) {
646       continue;
647     }
648     // Look for the runtime metadata note
649     if (pHdr.p_type == PT_NOTE && pHdr.p_align >= sizeof(int)) {
650       // Iterate over the notes in this segment
651       address ptr = (address)binary + pHdr.p_offset;
652       address segmentEnd = ptr + pHdr.p_filesz;
653 
654       while (ptr < segmentEnd) {
655         Elf_Note *note = reinterpret_cast<Elf_Note *>(ptr);
656         address name = (address)&note[1];
657 
658         if (note->n_type == 7 || note->n_type == 8) {
659           return failure;
660         } else if (note->n_type == 10 /* NT_AMD_AMDGPU_HSA_METADATA */ &&
661                    note->n_namesz == sizeof "AMD" &&
662                    !memcmp(name, "AMD", note->n_namesz)) {
663           // code object v2 uses yaml metadata, no longer supported
664           return failure;
665         } else if (note->n_type == 32 /* NT_AMDGPU_METADATA */ &&
666                    note->n_namesz == sizeof "AMDGPU" &&
667                    !memcmp(name, "AMDGPU", note->n_namesz)) {
668 
669           // n_descsz = 485
670           // value is padded to 4 byte alignment, may want to move end up to
671           // match
672           size_t offset = sizeof(uint32_t) * 3 /* fields */
673                           + sizeof("AMDGPU")   /* name */
674                           + 1 /* padding to 4 byte alignment */;
675 
676           // Including the trailing padding means both pointers are 4 bytes
677           // aligned, which may be useful later.
678           unsigned char *metadata_start = (unsigned char *)ptr + offset;
679           unsigned char *metadata_end =
680               metadata_start + core::alignUp(note->n_descsz, 4);
681           return {metadata_start, metadata_end};
682         }
683         ptr += sizeof(*note) + core::alignUp(note->n_namesz, sizeof(int)) +
684                core::alignUp(note->n_descsz, sizeof(int));
685       }
686     }
687   }
688 
689   return failure;
690 }
691 
692 namespace {
693 int map_lookup_array(msgpack::byte_range message, const char *needle,
694                      msgpack::byte_range *res, uint64_t *size) {
695   unsigned count = 0;
696   struct s : msgpack::functors_defaults<s> {
697     s(unsigned &count, uint64_t *size) : count(count), size(size) {}
698     unsigned &count;
699     uint64_t *size;
700     const unsigned char *handle_array(uint64_t N, msgpack::byte_range bytes) {
701       count++;
702       *size = N;
703       return bytes.end;
704     }
705   };
706 
707   msgpack::foreach_map(message,
708                        [&](msgpack::byte_range key, msgpack::byte_range value) {
709                          if (msgpack::message_is_string(key, needle)) {
710                            // If the message is an array, record number of
711                            // elements in *size
712                            msgpack::handle_msgpack<s>(value, {count, size});
713                            // return the whole array
714                            *res = value;
715                          }
716                        });
717   // Only claim success if exactly one key/array pair matched
718   return count != 1;
719 }
720 
721 int map_lookup_string(msgpack::byte_range message, const char *needle,
722                       std::string *res) {
723   unsigned count = 0;
724   struct s : public msgpack::functors_defaults<s> {
725     s(unsigned &count, std::string *res) : count(count), res(res) {}
726     unsigned &count;
727     std::string *res;
728     void handle_string(size_t N, const unsigned char *str) {
729       count++;
730       *res = std::string(str, str + N);
731     }
732   };
733   msgpack::foreach_map(message,
734                        [&](msgpack::byte_range key, msgpack::byte_range value) {
735                          if (msgpack::message_is_string(key, needle)) {
736                            msgpack::handle_msgpack<s>(value, {count, res});
737                          }
738                        });
739   return count != 1;
740 }
741 
742 int map_lookup_uint64_t(msgpack::byte_range message, const char *needle,
743                         uint64_t *res) {
744   unsigned count = 0;
745   msgpack::foreach_map(message,
746                        [&](msgpack::byte_range key, msgpack::byte_range value) {
747                          if (msgpack::message_is_string(key, needle)) {
748                            msgpack::foronly_unsigned(value, [&](uint64_t x) {
749                              count++;
750                              *res = x;
751                            });
752                          }
753                        });
754   return count != 1;
755 }
756 
757 int array_lookup_element(msgpack::byte_range message, uint64_t elt,
758                          msgpack::byte_range *res) {
759   int rc = 1;
760   uint64_t i = 0;
761   msgpack::foreach_array(message, [&](msgpack::byte_range value) {
762     if (i == elt) {
763       *res = value;
764       rc = 0;
765     }
766     i++;
767   });
768   return rc;
769 }
770 
771 int populate_kernelArgMD(msgpack::byte_range args_element,
772                          KernelArgMD *kernelarg) {
773   using namespace msgpack;
774   int error = 0;
775   foreach_map(args_element, [&](byte_range key, byte_range value) -> void {
776     if (message_is_string(key, ".name")) {
777       foronly_string(value, [&](size_t N, const unsigned char *str) {
778         kernelarg->name_ = std::string(str, str + N);
779       });
780     } else if (message_is_string(key, ".type_name")) {
781       foronly_string(value, [&](size_t N, const unsigned char *str) {
782         kernelarg->typeName_ = std::string(str, str + N);
783       });
784     } else if (message_is_string(key, ".size")) {
785       foronly_unsigned(value, [&](uint64_t x) { kernelarg->size_ = x; });
786     } else if (message_is_string(key, ".offset")) {
787       foronly_unsigned(value, [&](uint64_t x) { kernelarg->offset_ = x; });
788     } else if (message_is_string(key, ".value_kind")) {
789       foronly_string(value, [&](size_t N, const unsigned char *str) {
790         std::string s = std::string(str, str + N);
791         auto itValueKind = ArgValueKind.find(s);
792         if (itValueKind != ArgValueKind.end()) {
793           kernelarg->valueKind_ = itValueKind->second;
794         }
795       });
796     }
797   });
798   return error;
799 }
800 } // namespace
801 
802 static hsa_status_t get_code_object_custom_metadata(void *binary,
803                                                     size_t binSize, int gpu) {
804   // parse code object with different keys from v2
805   // also, the kernel name is not the same as the symbol name -- so a
806   // symbol->name map is needed
807 
808   std::pair<unsigned char *, unsigned char *> metadata =
809       find_metadata(binary, binSize);
810   if (!metadata.first) {
811     return HSA_STATUS_ERROR_INVALID_CODE_OBJECT;
812   }
813 
814   uint64_t kernelsSize = 0;
815   int msgpack_errors = 0;
816   msgpack::byte_range kernel_array;
817   msgpack_errors =
818       map_lookup_array({metadata.first, metadata.second}, "amdhsa.kernels",
819                        &kernel_array, &kernelsSize);
820   msgpackErrorCheck(kernels lookup in program metadata, msgpack_errors);
821 
822   for (size_t i = 0; i < kernelsSize; i++) {
823     assert(msgpack_errors == 0);
824     std::string kernelName;
825     std::string languageName;
826     std::string symbolName;
827 
828     msgpack::byte_range element;
829     msgpack_errors += array_lookup_element(kernel_array, i, &element);
830     msgpackErrorCheck(element lookup in kernel metadata, msgpack_errors);
831 
832     msgpack_errors += map_lookup_string(element, ".name", &kernelName);
833     msgpack_errors += map_lookup_string(element, ".language", &languageName);
834     msgpack_errors += map_lookup_string(element, ".symbol", &symbolName);
835     msgpackErrorCheck(strings lookup in kernel metadata, msgpack_errors);
836 
837     atl_kernel_info_t info = {0, 0, 0, 0, 0, {}, {}, {}};
838     size_t kernel_explicit_args_size = 0;
839     uint64_t kernel_segment_size;
840     msgpack_errors += map_lookup_uint64_t(element, ".kernarg_segment_size",
841                                           &kernel_segment_size);
842     msgpackErrorCheck(kernarg segment size metadata lookup in kernel metadata,
843                       msgpack_errors);
844 
845     // create a map from symbol to name
846     DEBUG_PRINT("Kernel symbol %s; Name: %s; Size: %lu\n", symbolName.c_str(),
847                 kernelName.c_str(), kernel_segment_size);
848     KernelNameMap[symbolName] = kernelName;
849 
850     bool hasHiddenArgs = false;
851     if (kernel_segment_size > 0) {
852       uint64_t argsSize;
853       size_t offset = 0;
854 
855       msgpack::byte_range args_array;
856       msgpack_errors +=
857           map_lookup_array(element, ".args", &args_array, &argsSize);
858       msgpackErrorCheck(kernel args metadata lookup in kernel metadata,
859                         msgpack_errors);
860 
861       info.num_args = argsSize;
862 
863       for (size_t i = 0; i < argsSize; ++i) {
864         KernelArgMD lcArg;
865 
866         msgpack::byte_range args_element;
867         msgpack_errors += array_lookup_element(args_array, i, &args_element);
868         msgpackErrorCheck(iterate args map in kernel args metadata,
869                           msgpack_errors);
870 
871         msgpack_errors += populate_kernelArgMD(args_element, &lcArg);
872         msgpackErrorCheck(iterate args map in kernel args metadata,
873                           msgpack_errors);
874 
875         // populate info with sizes and offsets
876         info.arg_sizes.push_back(lcArg.size_);
877         // v3 has offset field and not align field
878         size_t new_offset = lcArg.offset_;
879         size_t padding = new_offset - offset;
880         offset = new_offset;
881         info.arg_offsets.push_back(lcArg.offset_);
882         DEBUG_PRINT("Arg[%lu] \"%s\" (%u, %u)\n", i, lcArg.name_.c_str(),
883                     lcArg.size_, lcArg.offset_);
884         offset += lcArg.size_;
885 
886         // check if the arg is a hidden/implicit arg
887         // this logic assumes that all hidden args are 8-byte aligned
888         if (!isImplicit(lcArg.valueKind_)) {
889           kernel_explicit_args_size += lcArg.size_;
890         } else {
891           hasHiddenArgs = true;
892         }
893         kernel_explicit_args_size += padding;
894       }
895     }
896 
897     // add size of implicit args, e.g.: offset x, y and z and pipe pointer, but
898     // in ATMI, do not count the compiler set implicit args, but set your own
899     // implicit args by discounting the compiler set implicit args
900     info.kernel_segment_size =
901         (hasHiddenArgs ? kernel_explicit_args_size : kernel_segment_size) +
902         sizeof(atmi_implicit_args_t);
903     DEBUG_PRINT("[%s: kernarg seg size] (%lu --> %u)\n", kernelName.c_str(),
904                 kernel_segment_size, info.kernel_segment_size);
905 
906     // kernel received, now add it to the kernel info table
907     KernelInfoTable[gpu][kernelName] = info;
908   }
909 
910   return HSA_STATUS_SUCCESS;
911 }
912 
913 static hsa_status_t populate_InfoTables(hsa_executable_t executable,
914                                         hsa_executable_symbol_t symbol,
915                                         void *data) {
916   int gpu = *static_cast<int *>(data);
917   hsa_symbol_kind_t type;
918 
919   uint32_t name_length;
920   hsa_status_t err;
921   err = hsa_executable_symbol_get_info(symbol, HSA_EXECUTABLE_SYMBOL_INFO_TYPE,
922                                        &type);
923   ErrorCheck(Symbol info extraction, err);
924   DEBUG_PRINT("Exec Symbol type: %d\n", type);
925   if (type == HSA_SYMBOL_KIND_KERNEL) {
926     err = hsa_executable_symbol_get_info(
927         symbol, HSA_EXECUTABLE_SYMBOL_INFO_NAME_LENGTH, &name_length);
928     ErrorCheck(Symbol info extraction, err);
929     char *name = reinterpret_cast<char *>(malloc(name_length + 1));
930     err = hsa_executable_symbol_get_info(symbol,
931                                          HSA_EXECUTABLE_SYMBOL_INFO_NAME, name);
932     ErrorCheck(Symbol info extraction, err);
933     name[name_length] = 0;
934 
935     if (KernelNameMap.find(std::string(name)) == KernelNameMap.end()) {
936       // did not find kernel name in the kernel map; this can happen only
937       // if the ROCr API for getting symbol info (name) is different from
938       // the comgr method of getting symbol info
939       ErrorCheck(Invalid kernel name, HSA_STATUS_ERROR_INVALID_CODE_OBJECT);
940     }
941     atl_kernel_info_t info;
942     std::string kernelName = KernelNameMap[std::string(name)];
943     // by now, the kernel info table should already have an entry
944     // because the non-ROCr custom code object parsing is called before
945     // iterating over the code object symbols using ROCr
946     if (KernelInfoTable[gpu].find(kernelName) == KernelInfoTable[gpu].end()) {
947       ErrorCheck(Finding the entry kernel info table,
948                  HSA_STATUS_ERROR_INVALID_CODE_OBJECT);
949     }
950     // found, so assign and update
951     info = KernelInfoTable[gpu][kernelName];
952 
953     /* Extract dispatch information from the symbol */
954     err = hsa_executable_symbol_get_info(
955         symbol, HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_OBJECT,
956         &(info.kernel_object));
957     ErrorCheck(Extracting the symbol from the executable, err);
958     err = hsa_executable_symbol_get_info(
959         symbol, HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_GROUP_SEGMENT_SIZE,
960         &(info.group_segment_size));
961     ErrorCheck(Extracting the group segment size from the executable, err);
962     err = hsa_executable_symbol_get_info(
963         symbol, HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_PRIVATE_SEGMENT_SIZE,
964         &(info.private_segment_size));
965     ErrorCheck(Extracting the private segment from the executable, err);
966 
967     DEBUG_PRINT(
968         "Kernel %s --> %lx symbol %u group segsize %u pvt segsize %u bytes "
969         "kernarg\n",
970         kernelName.c_str(), info.kernel_object, info.group_segment_size,
971         info.private_segment_size, info.kernel_segment_size);
972 
973     // assign it back to the kernel info table
974     KernelInfoTable[gpu][kernelName] = info;
975     free(name);
976   } else if (type == HSA_SYMBOL_KIND_VARIABLE) {
977     err = hsa_executable_symbol_get_info(
978         symbol, HSA_EXECUTABLE_SYMBOL_INFO_NAME_LENGTH, &name_length);
979     ErrorCheck(Symbol info extraction, err);
980     char *name = reinterpret_cast<char *>(malloc(name_length + 1));
981     err = hsa_executable_symbol_get_info(symbol,
982                                          HSA_EXECUTABLE_SYMBOL_INFO_NAME, name);
983     ErrorCheck(Symbol info extraction, err);
984     name[name_length] = 0;
985 
986     atl_symbol_info_t info;
987 
988     err = hsa_executable_symbol_get_info(
989         symbol, HSA_EXECUTABLE_SYMBOL_INFO_VARIABLE_ADDRESS, &(info.addr));
990     ErrorCheck(Symbol info address extraction, err);
991 
992     err = hsa_executable_symbol_get_info(
993         symbol, HSA_EXECUTABLE_SYMBOL_INFO_VARIABLE_SIZE, &(info.size));
994     ErrorCheck(Symbol info size extraction, err);
995 
996     atmi_mem_place_t place = ATMI_MEM_PLACE(ATMI_DEVTYPE_GPU, gpu, 0);
997     DEBUG_PRINT("Symbol %s = %p (%u bytes)\n", name, (void *)info.addr,
998                 info.size);
999     register_allocation(reinterpret_cast<void *>(info.addr), (size_t)info.size,
1000                         place);
1001     SymbolInfoTable[gpu][std::string(name)] = info;
1002     if (strcmp(name, "needs_hostcall_buffer") == 0)
1003       g_atmi_hostcall_required = true;
1004     free(name);
1005   } else {
1006     DEBUG_PRINT("Symbol is an indirect function\n");
1007   }
1008   return HSA_STATUS_SUCCESS;
1009 }
1010 
1011 atmi_status_t Runtime::RegisterModuleFromMemory(
1012     void *module_bytes, size_t module_size, atmi_place_t place,
1013     atmi_status_t (*on_deserialized_data)(void *data, size_t size,
1014                                           void *cb_state),
1015     void *cb_state) {
1016   hsa_status_t err;
1017   int gpu = place.device_id;
1018   assert(gpu >= 0);
1019 
1020   DEBUG_PRINT("Trying to load module to GPU-%d\n", gpu);
1021   ATLGPUProcessor &proc = get_processor<ATLGPUProcessor>(place);
1022   hsa_agent_t agent = proc.agent();
1023   hsa_executable_t executable = {0};
1024   hsa_profile_t agent_profile;
1025 
1026   err = hsa_agent_get_info(agent, HSA_AGENT_INFO_PROFILE, &agent_profile);
1027   ErrorCheck(Query the agent profile, err);
1028   // FIXME: Assume that every profile is FULL until we understand how to build
1029   // GCN with base profile
1030   agent_profile = HSA_PROFILE_FULL;
1031   /* Create the empty executable.  */
1032   err = hsa_executable_create(agent_profile, HSA_EXECUTABLE_STATE_UNFROZEN, "",
1033                               &executable);
1034   ErrorCheck(Create the executable, err);
1035 
1036   bool module_load_success = false;
1037   do // Existing control flow used continue, preserve that for this patch
1038   {
1039     {
1040       // Some metadata info is not available through ROCr API, so use custom
1041       // code object metadata parsing to collect such metadata info
1042 
1043       err = get_code_object_custom_metadata(module_bytes, module_size, gpu);
1044       ErrorCheckAndContinue(Getting custom code object metadata, err);
1045 
1046       // Deserialize code object.
1047       hsa_code_object_t code_object = {0};
1048       err = hsa_code_object_deserialize(module_bytes, module_size, NULL,
1049                                         &code_object);
1050       ErrorCheckAndContinue(Code Object Deserialization, err);
1051       assert(0 != code_object.handle);
1052 
1053       // Mutating the device image here avoids another allocation & memcpy
1054       void *code_object_alloc_data =
1055           reinterpret_cast<void *>(code_object.handle);
1056       atmi_status_t atmi_err =
1057           on_deserialized_data(code_object_alloc_data, module_size, cb_state);
1058       ATMIErrorCheck(Error in deserialized_data callback, atmi_err);
1059 
1060       /* Load the code object.  */
1061       err =
1062           hsa_executable_load_code_object(executable, agent, code_object, NULL);
1063       ErrorCheckAndContinue(Loading the code object, err);
1064 
1065       // cannot iterate over symbols until executable is frozen
1066     }
1067     module_load_success = true;
1068   } while (0);
1069   DEBUG_PRINT("Modules loaded successful? %d\n", module_load_success);
1070   if (module_load_success) {
1071     /* Freeze the executable; it can now be queried for symbols.  */
1072     err = hsa_executable_freeze(executable, "");
1073     ErrorCheck(Freeze the executable, err);
1074 
1075     err = hsa_executable_iterate_symbols(executable, populate_InfoTables,
1076                                          static_cast<void *>(&gpu));
1077     ErrorCheck(Iterating over symbols for execuatable, err);
1078 
1079     // save the executable and destroy during finalize
1080     g_executables.push_back(executable);
1081     return ATMI_STATUS_SUCCESS;
1082   } else {
1083     return ATMI_STATUS_ERROR;
1084   }
1085 }
1086 
1087 } // namespace core
1088