1 //===------------ libcall.cu - OpenMP GPU user calls ------------- CUDA -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the OpenMP runtime functions that can be
10 // invoked by the user in an OpenMP region
11 //
12 //===----------------------------------------------------------------------===//
13 #pragma omp declare target
14
15 #include "common/omptarget.h"
16 #include "target_impl.h"
17
omp_get_wtick(void)18 EXTERN double omp_get_wtick(void) {
19 double rc = __kmpc_impl_get_wtick();
20 PRINT(LD_IO, "omp_get_wtick() returns %g\n", rc);
21 return rc;
22 }
23
omp_get_wtime(void)24 EXTERN double omp_get_wtime(void) {
25 double rc = __kmpc_impl_get_wtime();
26 PRINT(LD_IO, "call omp_get_wtime() returns %g\n", rc);
27 return rc;
28 }
29
omp_set_num_threads(int num)30 EXTERN void omp_set_num_threads(int num) {
31 // Ignore it for SPMD mode.
32 if (__kmpc_is_spmd_exec_mode())
33 return;
34 ASSERT0(LT_FUSSY, isRuntimeInitialized(), "Expected initialized runtime.");
35 PRINT(LD_IO, "call omp_set_num_threads(num %d)\n", num);
36 if (num <= 0) {
37 WARNING0(LW_INPUT, "expected positive num; ignore\n");
38 } else if (parallelLevel[GetWarpId()] == 0) {
39 nThreads = num;
40 }
41 }
42
omp_get_num_threads(void)43 EXTERN int omp_get_num_threads(void) {
44 int rc = GetNumberOfOmpThreads(__kmpc_is_spmd_exec_mode());
45 PRINT(LD_IO, "call omp_get_num_threads() return %d\n", rc);
46 return rc;
47 }
48
omp_get_max_threads(void)49 EXTERN int omp_get_max_threads(void) {
50 if (parallelLevel[GetWarpId()] > 0)
51 // We're already in parallel region.
52 return 1; // default is 1 thread avail
53 // Not currently in a parallel region, return what was set.
54 int rc = 1;
55 if (parallelLevel[GetWarpId()] == 0)
56 rc = nThreads;
57 ASSERT0(LT_FUSSY, rc >= 0, "bad number of threads");
58 PRINT(LD_IO, "call omp_get_max_threads() return %d\n", rc);
59 return rc;
60 }
61
omp_get_thread_limit(void)62 EXTERN int omp_get_thread_limit(void) {
63 if (__kmpc_is_spmd_exec_mode())
64 return __kmpc_get_hardware_num_threads_in_block();
65 int rc = threadLimit;
66 PRINT(LD_IO, "call omp_get_thread_limit() return %d\n", rc);
67 return rc;
68 }
69
omp_get_thread_num()70 EXTERN int omp_get_thread_num() {
71 int rc = GetOmpThreadId();
72 PRINT(LD_IO, "call omp_get_thread_num() returns %d\n", rc);
73 return rc;
74 }
75
omp_get_num_procs(void)76 EXTERN int omp_get_num_procs(void) {
77 int rc = GetNumberOfProcsInDevice(__kmpc_is_spmd_exec_mode());
78 PRINT(LD_IO, "call omp_get_num_procs() returns %d\n", rc);
79 return rc;
80 }
81
omp_in_parallel(void)82 EXTERN int omp_in_parallel(void) {
83 int rc = parallelLevel[GetWarpId()] > OMP_ACTIVE_PARALLEL_LEVEL ? 1 : 0;
84 PRINT(LD_IO, "call omp_in_parallel() returns %d\n", rc);
85 return rc;
86 }
87
omp_in_final(void)88 EXTERN int omp_in_final(void) {
89 // treat all tasks as final... Specs may expect runtime to keep
90 // track more precisely if a task was actively set by users... This
91 // is not explicitly specified; will treat as if runtime can
92 // actively decide to put a non-final task into a final one.
93 int rc = 1;
94 PRINT(LD_IO, "call omp_in_final() returns %d\n", rc);
95 return rc;
96 }
97
omp_set_dynamic(int flag)98 EXTERN void omp_set_dynamic(int flag) {
99 PRINT(LD_IO, "call omp_set_dynamic(%d) is ignored (no support)\n", flag);
100 }
101
omp_get_dynamic(void)102 EXTERN int omp_get_dynamic(void) {
103 int rc = 0;
104 PRINT(LD_IO, "call omp_get_dynamic() returns %d\n", rc);
105 return rc;
106 }
107
omp_set_nested(int flag)108 EXTERN void omp_set_nested(int flag) {
109 PRINT(LD_IO, "call omp_set_nested(%d) is ignored (no nested support)\n",
110 flag);
111 }
112
omp_get_nested(void)113 EXTERN int omp_get_nested(void) {
114 int rc = 0;
115 PRINT(LD_IO, "call omp_get_nested() returns %d\n", rc);
116 return rc;
117 }
118
omp_set_max_active_levels(int level)119 EXTERN void omp_set_max_active_levels(int level) {
120 PRINT(LD_IO,
121 "call omp_set_max_active_levels(%d) is ignored (no nested support)\n",
122 level);
123 }
124
omp_get_max_active_levels(void)125 EXTERN int omp_get_max_active_levels(void) {
126 int rc = 1;
127 PRINT(LD_IO, "call omp_get_max_active_levels() returns %d\n", rc);
128 return rc;
129 }
130
omp_get_level(void)131 EXTERN int omp_get_level(void) {
132 int level = __kmpc_parallel_level();
133 PRINT(LD_IO, "call omp_get_level() returns %d\n", level);
134 return level;
135 }
136
omp_get_active_level(void)137 EXTERN int omp_get_active_level(void) {
138 int level = parallelLevel[GetWarpId()] > OMP_ACTIVE_PARALLEL_LEVEL ? 1 : 0;
139 PRINT(LD_IO, "call omp_get_active_level() returns %d\n", level)
140 return level;
141 }
142
omp_get_ancestor_thread_num(int level)143 EXTERN int omp_get_ancestor_thread_num(int level) {
144 if (__kmpc_is_spmd_exec_mode())
145 return level == 1 ? __kmpc_get_hardware_thread_id_in_block() : 0;
146 int rc = -1;
147 // If level is 0 or all parallel regions are not active - return 0.
148 unsigned parLevel = parallelLevel[GetWarpId()];
149 if (level == 1 && parLevel > OMP_ACTIVE_PARALLEL_LEVEL) {
150 int totLevel = omp_get_level();
151 if (level <= totLevel) {
152 omptarget_nvptx_TaskDescr *currTaskDescr =
153 getMyTopTaskDescriptor(/*isSPMDExecutionMode=*/false);
154 int steps = totLevel - level;
155 PRINT(LD_IO, "backtrack %d steps\n", steps);
156 ASSERT0(LT_FUSSY, currTaskDescr,
157 "do not expect fct to be called in a non-active thread");
158 do {
159 if (DON(LD_IOD)) {
160 // print current state
161 omp_sched_t sched = currTaskDescr->GetRuntimeSched();
162 PRINT(LD_ALL,
163 "task descr %s %d: %s, in par %d, rt sched %d,"
164 " chunk %" PRIu64 "; tid %d, tnum %d, nthreads %d\n",
165 "ancestor", steps,
166 (currTaskDescr->IsParallelConstruct() ? "par" : "task"),
167 (int)currTaskDescr->InParallelRegion(), (int)sched,
168 currTaskDescr->RuntimeChunkSize(),
169 (int)currTaskDescr->ThreadId(), (int)threadsInTeam,
170 (int)nThreads);
171 }
172
173 if (currTaskDescr->IsParallelConstruct()) {
174 // found the level
175 if (!steps) {
176 rc = currTaskDescr->ThreadId();
177 break;
178 }
179 steps--;
180 }
181 currTaskDescr = currTaskDescr->GetPrevTaskDescr();
182 } while (currTaskDescr);
183 ASSERT0(LT_FUSSY, !steps, "expected to find all steps");
184 }
185 } else if (level == 0 ||
186 (level > 0 && parLevel < OMP_ACTIVE_PARALLEL_LEVEL &&
187 level <= parLevel) ||
188 (level > 1 && parLevel > OMP_ACTIVE_PARALLEL_LEVEL &&
189 level <= (parLevel - OMP_ACTIVE_PARALLEL_LEVEL))) {
190 rc = 0;
191 }
192 PRINT(LD_IO, "call omp_get_ancestor_thread_num(level %d) returns %d\n", level,
193 rc)
194 return rc;
195 }
196
omp_get_team_size(int level)197 EXTERN int omp_get_team_size(int level) {
198 if (__kmpc_is_spmd_exec_mode())
199 return level == 1 ? __kmpc_get_hardware_num_threads_in_block() : 1;
200 int rc = -1;
201 unsigned parLevel = parallelLevel[GetWarpId()];
202 // If level is 0 or all parallel regions are not active - return 1.
203 if (level == 1 && parLevel > OMP_ACTIVE_PARALLEL_LEVEL) {
204 rc = threadsInTeam;
205 } else if (level == 0 ||
206 (level > 0 && parLevel < OMP_ACTIVE_PARALLEL_LEVEL &&
207 level <= parLevel) ||
208 (level > 1 && parLevel > OMP_ACTIVE_PARALLEL_LEVEL &&
209 level <= (parLevel - OMP_ACTIVE_PARALLEL_LEVEL))) {
210 rc = 1;
211 }
212 PRINT(LD_IO, "call omp_get_team_size(level %d) returns %d\n", level, rc)
213 return rc;
214 }
215
omp_get_schedule(omp_sched_t * kind,int * modifier)216 EXTERN void omp_get_schedule(omp_sched_t *kind, int *modifier) {
217 if (isRuntimeUninitialized()) {
218 ASSERT0(LT_FUSSY, __kmpc_is_spmd_exec_mode(),
219 "Expected SPMD mode only with uninitialized runtime.");
220 *kind = omp_sched_static;
221 *modifier = 1;
222 } else {
223 omptarget_nvptx_TaskDescr *currTaskDescr =
224 getMyTopTaskDescriptor(__kmpc_is_spmd_exec_mode());
225 *kind = currTaskDescr->GetRuntimeSched();
226 *modifier = currTaskDescr->RuntimeChunkSize();
227 }
228 PRINT(LD_IO, "call omp_get_schedule returns sched %d and modif %d\n",
229 (int)*kind, *modifier);
230 }
231
omp_set_schedule(omp_sched_t kind,int modifier)232 EXTERN void omp_set_schedule(omp_sched_t kind, int modifier) {
233 PRINT(LD_IO, "call omp_set_schedule(sched %d, modif %d)\n", (int)kind,
234 modifier);
235 if (isRuntimeUninitialized()) {
236 ASSERT0(LT_FUSSY, __kmpc_is_spmd_exec_mode(),
237 "Expected SPMD mode only with uninitialized runtime.");
238 return;
239 }
240 if (kind >= omp_sched_static && kind < omp_sched_auto) {
241 omptarget_nvptx_TaskDescr *currTaskDescr =
242 getMyTopTaskDescriptor(__kmpc_is_spmd_exec_mode());
243 currTaskDescr->SetRuntimeSched(kind);
244 currTaskDescr->RuntimeChunkSize() = modifier;
245 PRINT(LD_IOD, "omp_set_schedule did set sched %d & modif %" PRIu64 "\n",
246 (int)currTaskDescr->GetRuntimeSched(),
247 currTaskDescr->RuntimeChunkSize());
248 }
249 }
250
omp_get_proc_bind(void)251 EXTERN omp_proc_bind_t omp_get_proc_bind(void) {
252 PRINT0(LD_IO, "call omp_get_proc_bin() is true, regardless on state\n");
253 return omp_proc_bind_true;
254 }
255
omp_get_num_places(void)256 EXTERN int omp_get_num_places(void) {
257 PRINT0(LD_IO, "call omp_get_num_places() returns 0\n");
258 return 0;
259 }
260
omp_get_place_num_procs(int place_num)261 EXTERN int omp_get_place_num_procs(int place_num) {
262 PRINT0(LD_IO, "call omp_get_place_num_procs() returns 0\n");
263 return 0;
264 }
265
omp_get_place_proc_ids(int place_num,int * ids)266 EXTERN void omp_get_place_proc_ids(int place_num, int *ids) {
267 PRINT0(LD_IO, "call to omp_get_place_proc_ids()\n");
268 }
269
omp_get_place_num(void)270 EXTERN int omp_get_place_num(void) {
271 PRINT0(LD_IO, "call to omp_get_place_num() returns 0\n");
272 return 0;
273 }
274
omp_get_partition_num_places(void)275 EXTERN int omp_get_partition_num_places(void) {
276 PRINT0(LD_IO, "call to omp_get_partition_num_places() returns 0\n");
277 return 0;
278 }
279
omp_get_partition_place_nums(int * place_nums)280 EXTERN void omp_get_partition_place_nums(int *place_nums) {
281 PRINT0(LD_IO, "call to omp_get_partition_place_nums()\n");
282 }
283
omp_get_cancellation(void)284 EXTERN int omp_get_cancellation(void) {
285 int rc = 0;
286 PRINT(LD_IO, "call omp_get_cancellation() returns %d\n", rc);
287 return rc;
288 }
289
omp_set_default_device(int deviceId)290 EXTERN void omp_set_default_device(int deviceId) {
291 PRINT0(LD_IO, "call omp_get_default_device() is undef on device\n");
292 }
293
omp_get_default_device(void)294 EXTERN int omp_get_default_device(void) {
295 PRINT0(LD_IO,
296 "call omp_get_default_device() is undef on device, returns 0\n");
297 return 0;
298 }
299
omp_get_num_devices(void)300 EXTERN int omp_get_num_devices(void) {
301 PRINT0(LD_IO, "call omp_get_num_devices() is undef on device, returns 0\n");
302 return 0;
303 }
304
omp_get_num_teams(void)305 EXTERN int omp_get_num_teams(void) {
306 int rc = GetNumberOfOmpTeams();
307 PRINT(LD_IO, "call omp_get_num_teams() returns %d\n", rc);
308 return rc;
309 }
310
omp_get_team_num()311 EXTERN int omp_get_team_num() {
312 int rc = GetOmpTeamId();
313 PRINT(LD_IO, "call omp_get_team_num() returns %d\n", rc);
314 return rc;
315 }
316
317 // Unspecified on the device.
omp_get_initial_device(void)318 EXTERN int omp_get_initial_device(void) {
319 PRINT0(LD_IO, "call omp_get_initial_device() returns 0\n");
320 return 0;
321 }
322
323 // Unused for now.
omp_get_max_task_priority(void)324 EXTERN int omp_get_max_task_priority(void) {
325 PRINT0(LD_IO, "call omp_get_max_task_priority() returns 0\n");
326 return 0;
327 }
328
329 ////////////////////////////////////////////////////////////////////////////////
330 // locks
331 ////////////////////////////////////////////////////////////////////////////////
332
omp_init_lock(omp_lock_t * lock)333 EXTERN void omp_init_lock(omp_lock_t *lock) {
334 __kmpc_impl_init_lock(lock);
335 PRINT0(LD_IO, "call omp_init_lock()\n");
336 }
337
omp_destroy_lock(omp_lock_t * lock)338 EXTERN void omp_destroy_lock(omp_lock_t *lock) {
339 __kmpc_impl_destroy_lock(lock);
340 PRINT0(LD_IO, "call omp_destroy_lock()\n");
341 }
342
omp_set_lock(omp_lock_t * lock)343 EXTERN void omp_set_lock(omp_lock_t *lock) {
344 __kmpc_impl_set_lock(lock);
345 PRINT0(LD_IO, "call omp_set_lock()\n");
346 }
347
omp_unset_lock(omp_lock_t * lock)348 EXTERN void omp_unset_lock(omp_lock_t *lock) {
349 __kmpc_impl_unset_lock(lock);
350 PRINT0(LD_IO, "call omp_unset_lock()\n");
351 }
352
omp_test_lock(omp_lock_t * lock)353 EXTERN int omp_test_lock(omp_lock_t *lock) {
354 int rc = __kmpc_impl_test_lock(lock);
355 PRINT(LD_IO, "call omp_test_lock() return %d\n", rc);
356 return rc;
357 }
358
359 #pragma omp end declare target
360