1 /*
2  * kmp_tasking.cpp -- OpenMP 3.0 tasking support.
3  */
4 
5 //===----------------------------------------------------------------------===//
6 //
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "kmp.h"
14 #include "kmp_i18n.h"
15 #include "kmp_itt.h"
16 #include "kmp_stats.h"
17 #include "kmp_wait_release.h"
18 #include "kmp_taskdeps.h"
19 
20 #if OMPT_SUPPORT
21 #include "ompt-specific.h"
22 #endif
23 
24 #include "tsan_annotations.h"
25 
26 /* forward declaration */
27 static void __kmp_enable_tasking(kmp_task_team_t *task_team,
28                                  kmp_info_t *this_thr);
29 static void __kmp_alloc_task_deque(kmp_info_t *thread,
30                                    kmp_thread_data_t *thread_data);
31 static int __kmp_realloc_task_threads_data(kmp_info_t *thread,
32                                            kmp_task_team_t *task_team);
33 static void __kmp_bottom_half_finish_proxy(kmp_int32 gtid, kmp_task_t *ptask);
34 
35 #ifdef BUILD_TIED_TASK_STACK
36 
37 //  __kmp_trace_task_stack: print the tied tasks from the task stack in order
38 //  from top do bottom
39 //
40 //  gtid: global thread identifier for thread containing stack
41 //  thread_data: thread data for task team thread containing stack
42 //  threshold: value above which the trace statement triggers
43 //  location: string identifying call site of this function (for trace)
44 static void __kmp_trace_task_stack(kmp_int32 gtid,
45                                    kmp_thread_data_t *thread_data,
46                                    int threshold, char *location) {
47   kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
48   kmp_taskdata_t **stack_top = task_stack->ts_top;
49   kmp_int32 entries = task_stack->ts_entries;
50   kmp_taskdata_t *tied_task;
51 
52   KA_TRACE(
53       threshold,
54       ("__kmp_trace_task_stack(start): location = %s, gtid = %d, entries = %d, "
55        "first_block = %p, stack_top = %p \n",
56        location, gtid, entries, task_stack->ts_first_block, stack_top));
57 
58   KMP_DEBUG_ASSERT(stack_top != NULL);
59   KMP_DEBUG_ASSERT(entries > 0);
60 
61   while (entries != 0) {
62     KMP_DEBUG_ASSERT(stack_top != &task_stack->ts_first_block.sb_block[0]);
63     // fix up ts_top if we need to pop from previous block
64     if (entries & TASK_STACK_INDEX_MASK == 0) {
65       kmp_stack_block_t *stack_block = (kmp_stack_block_t *)(stack_top);
66 
67       stack_block = stack_block->sb_prev;
68       stack_top = &stack_block->sb_block[TASK_STACK_BLOCK_SIZE];
69     }
70 
71     // finish bookkeeping
72     stack_top--;
73     entries--;
74 
75     tied_task = *stack_top;
76 
77     KMP_DEBUG_ASSERT(tied_task != NULL);
78     KMP_DEBUG_ASSERT(tied_task->td_flags.tasktype == TASK_TIED);
79 
80     KA_TRACE(threshold,
81              ("__kmp_trace_task_stack(%s):             gtid=%d, entry=%d, "
82               "stack_top=%p, tied_task=%p\n",
83               location, gtid, entries, stack_top, tied_task));
84   }
85   KMP_DEBUG_ASSERT(stack_top == &task_stack->ts_first_block.sb_block[0]);
86 
87   KA_TRACE(threshold,
88            ("__kmp_trace_task_stack(exit): location = %s, gtid = %d\n",
89             location, gtid));
90 }
91 
92 //  __kmp_init_task_stack: initialize the task stack for the first time
93 //  after a thread_data structure is created.
94 //  It should not be necessary to do this again (assuming the stack works).
95 //
96 //  gtid: global thread identifier of calling thread
97 //  thread_data: thread data for task team thread containing stack
98 static void __kmp_init_task_stack(kmp_int32 gtid,
99                                   kmp_thread_data_t *thread_data) {
100   kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
101   kmp_stack_block_t *first_block;
102 
103   // set up the first block of the stack
104   first_block = &task_stack->ts_first_block;
105   task_stack->ts_top = (kmp_taskdata_t **)first_block;
106   memset((void *)first_block, '\0',
107          TASK_STACK_BLOCK_SIZE * sizeof(kmp_taskdata_t *));
108 
109   // initialize the stack to be empty
110   task_stack->ts_entries = TASK_STACK_EMPTY;
111   first_block->sb_next = NULL;
112   first_block->sb_prev = NULL;
113 }
114 
115 //  __kmp_free_task_stack: free the task stack when thread_data is destroyed.
116 //
117 //  gtid: global thread identifier for calling thread
118 //  thread_data: thread info for thread containing stack
119 static void __kmp_free_task_stack(kmp_int32 gtid,
120                                   kmp_thread_data_t *thread_data) {
121   kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
122   kmp_stack_block_t *stack_block = &task_stack->ts_first_block;
123 
124   KMP_DEBUG_ASSERT(task_stack->ts_entries == TASK_STACK_EMPTY);
125   // free from the second block of the stack
126   while (stack_block != NULL) {
127     kmp_stack_block_t *next_block = (stack_block) ? stack_block->sb_next : NULL;
128 
129     stack_block->sb_next = NULL;
130     stack_block->sb_prev = NULL;
131     if (stack_block != &task_stack->ts_first_block) {
132       __kmp_thread_free(thread,
133                         stack_block); // free the block, if not the first
134     }
135     stack_block = next_block;
136   }
137   // initialize the stack to be empty
138   task_stack->ts_entries = 0;
139   task_stack->ts_top = NULL;
140 }
141 
142 //  __kmp_push_task_stack: Push the tied task onto the task stack.
143 //     Grow the stack if necessary by allocating another block.
144 //
145 //  gtid: global thread identifier for calling thread
146 //  thread: thread info for thread containing stack
147 //  tied_task: the task to push on the stack
148 static void __kmp_push_task_stack(kmp_int32 gtid, kmp_info_t *thread,
149                                   kmp_taskdata_t *tied_task) {
150   // GEH - need to consider what to do if tt_threads_data not allocated yet
151   kmp_thread_data_t *thread_data =
152       &thread->th.th_task_team->tt.tt_threads_data[__kmp_tid_from_gtid(gtid)];
153   kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
154 
155   if (tied_task->td_flags.team_serial || tied_task->td_flags.tasking_ser) {
156     return; // Don't push anything on stack if team or team tasks are serialized
157   }
158 
159   KMP_DEBUG_ASSERT(tied_task->td_flags.tasktype == TASK_TIED);
160   KMP_DEBUG_ASSERT(task_stack->ts_top != NULL);
161 
162   KA_TRACE(20,
163            ("__kmp_push_task_stack(enter): GTID: %d; THREAD: %p; TASK: %p\n",
164             gtid, thread, tied_task));
165   // Store entry
166   *(task_stack->ts_top) = tied_task;
167 
168   // Do bookkeeping for next push
169   task_stack->ts_top++;
170   task_stack->ts_entries++;
171 
172   if (task_stack->ts_entries & TASK_STACK_INDEX_MASK == 0) {
173     // Find beginning of this task block
174     kmp_stack_block_t *stack_block =
175         (kmp_stack_block_t *)(task_stack->ts_top - TASK_STACK_BLOCK_SIZE);
176 
177     // Check if we already have a block
178     if (stack_block->sb_next !=
179         NULL) { // reset ts_top to beginning of next block
180       task_stack->ts_top = &stack_block->sb_next->sb_block[0];
181     } else { // Alloc new block and link it up
182       kmp_stack_block_t *new_block = (kmp_stack_block_t *)__kmp_thread_calloc(
183           thread, sizeof(kmp_stack_block_t));
184 
185       task_stack->ts_top = &new_block->sb_block[0];
186       stack_block->sb_next = new_block;
187       new_block->sb_prev = stack_block;
188       new_block->sb_next = NULL;
189 
190       KA_TRACE(
191           30,
192           ("__kmp_push_task_stack(): GTID: %d; TASK: %p; Alloc new block: %p\n",
193            gtid, tied_task, new_block));
194     }
195   }
196   KA_TRACE(20, ("__kmp_push_task_stack(exit): GTID: %d; TASK: %p\n", gtid,
197                 tied_task));
198 }
199 
200 //  __kmp_pop_task_stack: Pop the tied task from the task stack.  Don't return
201 //  the task, just check to make sure it matches the ending task passed in.
202 //
203 //  gtid: global thread identifier for the calling thread
204 //  thread: thread info structure containing stack
205 //  tied_task: the task popped off the stack
206 //  ending_task: the task that is ending (should match popped task)
207 static void __kmp_pop_task_stack(kmp_int32 gtid, kmp_info_t *thread,
208                                  kmp_taskdata_t *ending_task) {
209   // GEH - need to consider what to do if tt_threads_data not allocated yet
210   kmp_thread_data_t *thread_data =
211       &thread->th.th_task_team->tt_threads_data[__kmp_tid_from_gtid(gtid)];
212   kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
213   kmp_taskdata_t *tied_task;
214 
215   if (ending_task->td_flags.team_serial || ending_task->td_flags.tasking_ser) {
216     // Don't pop anything from stack if team or team tasks are serialized
217     return;
218   }
219 
220   KMP_DEBUG_ASSERT(task_stack->ts_top != NULL);
221   KMP_DEBUG_ASSERT(task_stack->ts_entries > 0);
222 
223   KA_TRACE(20, ("__kmp_pop_task_stack(enter): GTID: %d; THREAD: %p\n", gtid,
224                 thread));
225 
226   // fix up ts_top if we need to pop from previous block
227   if (task_stack->ts_entries & TASK_STACK_INDEX_MASK == 0) {
228     kmp_stack_block_t *stack_block = (kmp_stack_block_t *)(task_stack->ts_top);
229 
230     stack_block = stack_block->sb_prev;
231     task_stack->ts_top = &stack_block->sb_block[TASK_STACK_BLOCK_SIZE];
232   }
233 
234   // finish bookkeeping
235   task_stack->ts_top--;
236   task_stack->ts_entries--;
237 
238   tied_task = *(task_stack->ts_top);
239 
240   KMP_DEBUG_ASSERT(tied_task != NULL);
241   KMP_DEBUG_ASSERT(tied_task->td_flags.tasktype == TASK_TIED);
242   KMP_DEBUG_ASSERT(tied_task == ending_task); // If we built the stack correctly
243 
244   KA_TRACE(20, ("__kmp_pop_task_stack(exit): GTID: %d; TASK: %p\n", gtid,
245                 tied_task));
246   return;
247 }
248 #endif /* BUILD_TIED_TASK_STACK */
249 
250 // returns 1 if new task is allowed to execute, 0 otherwise
251 // checks Task Scheduling constraint (if requested) and
252 // mutexinoutset dependencies if any
253 static bool __kmp_task_is_allowed(int gtid, const kmp_int32 is_constrained,
254                                   const kmp_taskdata_t *tasknew,
255                                   const kmp_taskdata_t *taskcurr) {
256   if (is_constrained && (tasknew->td_flags.tiedness == TASK_TIED)) {
257     // Check if the candidate obeys the Task Scheduling Constraints (TSC)
258     // only descendant of all deferred tied tasks can be scheduled, checking
259     // the last one is enough, as it in turn is the descendant of all others
260     kmp_taskdata_t *current = taskcurr->td_last_tied;
261     KMP_DEBUG_ASSERT(current != NULL);
262     // check if the task is not suspended on barrier
263     if (current->td_flags.tasktype == TASK_EXPLICIT ||
264         current->td_taskwait_thread > 0) { // <= 0 on barrier
265       kmp_int32 level = current->td_level;
266       kmp_taskdata_t *parent = tasknew->td_parent;
267       while (parent != current && parent->td_level > level) {
268         // check generation up to the level of the current task
269         parent = parent->td_parent;
270         KMP_DEBUG_ASSERT(parent != NULL);
271       }
272       if (parent != current)
273         return false;
274     }
275   }
276   // Check mutexinoutset dependencies, acquire locks
277   kmp_depnode_t *node = tasknew->td_depnode;
278   if (node && (node->dn.mtx_num_locks > 0)) {
279     for (int i = 0; i < node->dn.mtx_num_locks; ++i) {
280       KMP_DEBUG_ASSERT(node->dn.mtx_locks[i] != NULL);
281       if (__kmp_test_lock(node->dn.mtx_locks[i], gtid))
282         continue;
283       // could not get the lock, release previous locks
284       for (int j = i - 1; j >= 0; --j)
285         __kmp_release_lock(node->dn.mtx_locks[j], gtid);
286       return false;
287     }
288     // negative num_locks means all locks acquired successfully
289     node->dn.mtx_num_locks = -node->dn.mtx_num_locks;
290   }
291   return true;
292 }
293 
294 // __kmp_realloc_task_deque:
295 // Re-allocates a task deque for a particular thread, copies the content from
296 // the old deque and adjusts the necessary data structures relating to the
297 // deque. This operation must be done with the deque_lock being held
298 static void __kmp_realloc_task_deque(kmp_info_t *thread,
299                                      kmp_thread_data_t *thread_data) {
300   kmp_int32 size = TASK_DEQUE_SIZE(thread_data->td);
301   KMP_DEBUG_ASSERT(TCR_4(thread_data->td.td_deque_ntasks) == size);
302   kmp_int32 new_size = 2 * size;
303 
304   KE_TRACE(10, ("__kmp_realloc_task_deque: T#%d reallocating deque[from %d to "
305                 "%d] for thread_data %p\n",
306                 __kmp_gtid_from_thread(thread), size, new_size, thread_data));
307 
308   kmp_taskdata_t **new_deque =
309       (kmp_taskdata_t **)__kmp_allocate(new_size * sizeof(kmp_taskdata_t *));
310 
311   int i, j;
312   for (i = thread_data->td.td_deque_head, j = 0; j < size;
313        i = (i + 1) & TASK_DEQUE_MASK(thread_data->td), j++)
314     new_deque[j] = thread_data->td.td_deque[i];
315 
316   __kmp_free(thread_data->td.td_deque);
317 
318   thread_data->td.td_deque_head = 0;
319   thread_data->td.td_deque_tail = size;
320   thread_data->td.td_deque = new_deque;
321   thread_data->td.td_deque_size = new_size;
322 }
323 
324 //  __kmp_push_task: Add a task to the thread's deque
325 static kmp_int32 __kmp_push_task(kmp_int32 gtid, kmp_task_t *task) {
326   kmp_info_t *thread = __kmp_threads[gtid];
327   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
328   kmp_task_team_t *task_team = thread->th.th_task_team;
329   kmp_int32 tid = __kmp_tid_from_gtid(gtid);
330   kmp_thread_data_t *thread_data;
331 
332   KA_TRACE(20,
333            ("__kmp_push_task: T#%d trying to push task %p.\n", gtid, taskdata));
334 
335   if (taskdata->td_flags.tiedness == TASK_UNTIED) {
336     // untied task needs to increment counter so that the task structure is not
337     // freed prematurely
338     kmp_int32 counter = 1 + KMP_ATOMIC_INC(&taskdata->td_untied_count);
339     KMP_DEBUG_USE_VAR(counter);
340     KA_TRACE(
341         20,
342         ("__kmp_push_task: T#%d untied_count (%d) incremented for task %p\n",
343          gtid, counter, taskdata));
344   }
345 
346   // The first check avoids building task_team thread data if serialized
347   if (taskdata->td_flags.task_serial) {
348     KA_TRACE(20, ("__kmp_push_task: T#%d team serialized; returning "
349                   "TASK_NOT_PUSHED for task %p\n",
350                   gtid, taskdata));
351     return TASK_NOT_PUSHED;
352   }
353 
354   // Now that serialized tasks have returned, we can assume that we are not in
355   // immediate exec mode
356   KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
357   if (!KMP_TASKING_ENABLED(task_team)) {
358     __kmp_enable_tasking(task_team, thread);
359   }
360   KMP_DEBUG_ASSERT(TCR_4(task_team->tt.tt_found_tasks) == TRUE);
361   KMP_DEBUG_ASSERT(TCR_PTR(task_team->tt.tt_threads_data) != NULL);
362 
363   // Find tasking deque specific to encountering thread
364   thread_data = &task_team->tt.tt_threads_data[tid];
365 
366   // No lock needed since only owner can allocate
367   if (thread_data->td.td_deque == NULL) {
368     __kmp_alloc_task_deque(thread, thread_data);
369   }
370 
371   int locked = 0;
372   // Check if deque is full
373   if (TCR_4(thread_data->td.td_deque_ntasks) >=
374       TASK_DEQUE_SIZE(thread_data->td)) {
375     if (__kmp_enable_task_throttling &&
376         __kmp_task_is_allowed(gtid, __kmp_task_stealing_constraint, taskdata,
377                               thread->th.th_current_task)) {
378       KA_TRACE(20, ("__kmp_push_task: T#%d deque is full; returning "
379                     "TASK_NOT_PUSHED for task %p\n",
380                     gtid, taskdata));
381       return TASK_NOT_PUSHED;
382     } else {
383       __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
384       locked = 1;
385       if (TCR_4(thread_data->td.td_deque_ntasks) >=
386           TASK_DEQUE_SIZE(thread_data->td)) {
387         // expand deque to push the task which is not allowed to execute
388         __kmp_realloc_task_deque(thread, thread_data);
389       }
390     }
391   }
392   // Lock the deque for the task push operation
393   if (!locked) {
394     __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
395     // Need to recheck as we can get a proxy task from thread outside of OpenMP
396     if (TCR_4(thread_data->td.td_deque_ntasks) >=
397         TASK_DEQUE_SIZE(thread_data->td)) {
398       if (__kmp_enable_task_throttling &&
399           __kmp_task_is_allowed(gtid, __kmp_task_stealing_constraint, taskdata,
400                                 thread->th.th_current_task)) {
401         __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
402         KA_TRACE(20, ("__kmp_push_task: T#%d deque is full on 2nd check; "
403                       "returning TASK_NOT_PUSHED for task %p\n",
404                       gtid, taskdata));
405         return TASK_NOT_PUSHED;
406       } else {
407         // expand deque to push the task which is not allowed to execute
408         __kmp_realloc_task_deque(thread, thread_data);
409       }
410     }
411   }
412   // Must have room since no thread can add tasks but calling thread
413   KMP_DEBUG_ASSERT(TCR_4(thread_data->td.td_deque_ntasks) <
414                    TASK_DEQUE_SIZE(thread_data->td));
415 
416   thread_data->td.td_deque[thread_data->td.td_deque_tail] =
417       taskdata; // Push taskdata
418   // Wrap index.
419   thread_data->td.td_deque_tail =
420       (thread_data->td.td_deque_tail + 1) & TASK_DEQUE_MASK(thread_data->td);
421   TCW_4(thread_data->td.td_deque_ntasks,
422         TCR_4(thread_data->td.td_deque_ntasks) + 1); // Adjust task count
423 
424   KA_TRACE(20, ("__kmp_push_task: T#%d returning TASK_SUCCESSFULLY_PUSHED: "
425                 "task=%p ntasks=%d head=%u tail=%u\n",
426                 gtid, taskdata, thread_data->td.td_deque_ntasks,
427                 thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
428 
429   __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
430 
431   return TASK_SUCCESSFULLY_PUSHED;
432 }
433 
434 // __kmp_pop_current_task_from_thread: set up current task from called thread
435 // when team ends
436 //
437 // this_thr: thread structure to set current_task in.
438 void __kmp_pop_current_task_from_thread(kmp_info_t *this_thr) {
439   KF_TRACE(10, ("__kmp_pop_current_task_from_thread(enter): T#%d "
440                 "this_thread=%p, curtask=%p, "
441                 "curtask_parent=%p\n",
442                 0, this_thr, this_thr->th.th_current_task,
443                 this_thr->th.th_current_task->td_parent));
444 
445   this_thr->th.th_current_task = this_thr->th.th_current_task->td_parent;
446 
447   KF_TRACE(10, ("__kmp_pop_current_task_from_thread(exit): T#%d "
448                 "this_thread=%p, curtask=%p, "
449                 "curtask_parent=%p\n",
450                 0, this_thr, this_thr->th.th_current_task,
451                 this_thr->th.th_current_task->td_parent));
452 }
453 
454 // __kmp_push_current_task_to_thread: set up current task in called thread for a
455 // new team
456 //
457 // this_thr: thread structure to set up
458 // team: team for implicit task data
459 // tid: thread within team to set up
460 void __kmp_push_current_task_to_thread(kmp_info_t *this_thr, kmp_team_t *team,
461                                        int tid) {
462   // current task of the thread is a parent of the new just created implicit
463   // tasks of new team
464   KF_TRACE(10, ("__kmp_push_current_task_to_thread(enter): T#%d this_thread=%p "
465                 "curtask=%p "
466                 "parent_task=%p\n",
467                 tid, this_thr, this_thr->th.th_current_task,
468                 team->t.t_implicit_task_taskdata[tid].td_parent));
469 
470   KMP_DEBUG_ASSERT(this_thr != NULL);
471 
472   if (tid == 0) {
473     if (this_thr->th.th_current_task != &team->t.t_implicit_task_taskdata[0]) {
474       team->t.t_implicit_task_taskdata[0].td_parent =
475           this_thr->th.th_current_task;
476       this_thr->th.th_current_task = &team->t.t_implicit_task_taskdata[0];
477     }
478   } else {
479     team->t.t_implicit_task_taskdata[tid].td_parent =
480         team->t.t_implicit_task_taskdata[0].td_parent;
481     this_thr->th.th_current_task = &team->t.t_implicit_task_taskdata[tid];
482   }
483 
484   KF_TRACE(10, ("__kmp_push_current_task_to_thread(exit): T#%d this_thread=%p "
485                 "curtask=%p "
486                 "parent_task=%p\n",
487                 tid, this_thr, this_thr->th.th_current_task,
488                 team->t.t_implicit_task_taskdata[tid].td_parent));
489 }
490 
491 // __kmp_task_start: bookkeeping for a task starting execution
492 //
493 // GTID: global thread id of calling thread
494 // task: task starting execution
495 // current_task: task suspending
496 static void __kmp_task_start(kmp_int32 gtid, kmp_task_t *task,
497                              kmp_taskdata_t *current_task) {
498   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
499   kmp_info_t *thread = __kmp_threads[gtid];
500 
501   KA_TRACE(10,
502            ("__kmp_task_start(enter): T#%d starting task %p: current_task=%p\n",
503             gtid, taskdata, current_task));
504 
505   KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
506 
507   // mark currently executing task as suspended
508   // TODO: GEH - make sure root team implicit task is initialized properly.
509   // KMP_DEBUG_ASSERT( current_task -> td_flags.executing == 1 );
510   current_task->td_flags.executing = 0;
511 
512 // Add task to stack if tied
513 #ifdef BUILD_TIED_TASK_STACK
514   if (taskdata->td_flags.tiedness == TASK_TIED) {
515     __kmp_push_task_stack(gtid, thread, taskdata);
516   }
517 #endif /* BUILD_TIED_TASK_STACK */
518 
519   // mark starting task as executing and as current task
520   thread->th.th_current_task = taskdata;
521 
522   KMP_DEBUG_ASSERT(taskdata->td_flags.started == 0 ||
523                    taskdata->td_flags.tiedness == TASK_UNTIED);
524   KMP_DEBUG_ASSERT(taskdata->td_flags.executing == 0 ||
525                    taskdata->td_flags.tiedness == TASK_UNTIED);
526   taskdata->td_flags.started = 1;
527   taskdata->td_flags.executing = 1;
528   KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 0);
529   KMP_DEBUG_ASSERT(taskdata->td_flags.freed == 0);
530 
531   // GEH TODO: shouldn't we pass some sort of location identifier here?
532   // APT: yes, we will pass location here.
533   // need to store current thread state (in a thread or taskdata structure)
534   // before setting work_state, otherwise wrong state is set after end of task
535 
536   KA_TRACE(10, ("__kmp_task_start(exit): T#%d task=%p\n", gtid, taskdata));
537 
538   return;
539 }
540 
541 #if OMPT_SUPPORT
542 //------------------------------------------------------------------------------
543 // __ompt_task_init:
544 //   Initialize OMPT fields maintained by a task. This will only be called after
545 //   ompt_start_tool, so we already know whether ompt is enabled or not.
546 
547 static inline void __ompt_task_init(kmp_taskdata_t *task, int tid) {
548   // The calls to __ompt_task_init already have the ompt_enabled condition.
549   task->ompt_task_info.task_data.value = 0;
550   task->ompt_task_info.frame.exit_frame = ompt_data_none;
551   task->ompt_task_info.frame.enter_frame = ompt_data_none;
552   task->ompt_task_info.frame.exit_frame_flags = ompt_frame_runtime | ompt_frame_framepointer;
553   task->ompt_task_info.frame.enter_frame_flags = ompt_frame_runtime | ompt_frame_framepointer;
554 }
555 
556 // __ompt_task_start:
557 //   Build and trigger task-begin event
558 static inline void __ompt_task_start(kmp_task_t *task,
559                                      kmp_taskdata_t *current_task,
560                                      kmp_int32 gtid) {
561   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
562   ompt_task_status_t status = ompt_task_switch;
563   if (__kmp_threads[gtid]->th.ompt_thread_info.ompt_task_yielded) {
564     status = ompt_task_yield;
565     __kmp_threads[gtid]->th.ompt_thread_info.ompt_task_yielded = 0;
566   }
567   /* let OMPT know that we're about to run this task */
568   if (ompt_enabled.ompt_callback_task_schedule) {
569     ompt_callbacks.ompt_callback(ompt_callback_task_schedule)(
570         &(current_task->ompt_task_info.task_data), status,
571         &(taskdata->ompt_task_info.task_data));
572   }
573   taskdata->ompt_task_info.scheduling_parent = current_task;
574 }
575 
576 // __ompt_task_finish:
577 //   Build and trigger final task-schedule event
578 static inline void __ompt_task_finish(kmp_task_t *task,
579                                       kmp_taskdata_t *resumed_task,
580                                       ompt_task_status_t status) {
581   if (ompt_enabled.ompt_callback_task_schedule) {
582     kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
583     if (__kmp_omp_cancellation && taskdata->td_taskgroup &&
584         taskdata->td_taskgroup->cancel_request == cancel_taskgroup) {
585       status = ompt_task_cancel;
586     }
587 
588     /* let OMPT know that we're returning to the callee task */
589     ompt_callbacks.ompt_callback(ompt_callback_task_schedule)(
590         &(taskdata->ompt_task_info.task_data), status,
591         (resumed_task ? &(resumed_task->ompt_task_info.task_data) : NULL));
592   }
593 }
594 #endif
595 
596 template <bool ompt>
597 static void __kmpc_omp_task_begin_if0_template(ident_t *loc_ref, kmp_int32 gtid,
598                                                kmp_task_t *task,
599                                                void *frame_address,
600                                                void *return_address) {
601   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
602   kmp_taskdata_t *current_task = __kmp_threads[gtid]->th.th_current_task;
603 
604   KA_TRACE(10, ("__kmpc_omp_task_begin_if0(enter): T#%d loc=%p task=%p "
605                 "current_task=%p\n",
606                 gtid, loc_ref, taskdata, current_task));
607 
608   if (taskdata->td_flags.tiedness == TASK_UNTIED) {
609     // untied task needs to increment counter so that the task structure is not
610     // freed prematurely
611     kmp_int32 counter = 1 + KMP_ATOMIC_INC(&taskdata->td_untied_count);
612     KMP_DEBUG_USE_VAR(counter);
613     KA_TRACE(20, ("__kmpc_omp_task_begin_if0: T#%d untied_count (%d) "
614                   "incremented for task %p\n",
615                   gtid, counter, taskdata));
616   }
617 
618   taskdata->td_flags.task_serial =
619       1; // Execute this task immediately, not deferred.
620   __kmp_task_start(gtid, task, current_task);
621 
622 #if OMPT_SUPPORT
623   if (ompt) {
624     if (current_task->ompt_task_info.frame.enter_frame.ptr == NULL) {
625       current_task->ompt_task_info.frame.enter_frame.ptr =
626           taskdata->ompt_task_info.frame.exit_frame.ptr = frame_address;
627       current_task->ompt_task_info.frame.enter_frame_flags =
628           taskdata->ompt_task_info.frame.exit_frame_flags = ompt_frame_application | ompt_frame_framepointer;
629     }
630     if (ompt_enabled.ompt_callback_task_create) {
631       ompt_task_info_t *parent_info = &(current_task->ompt_task_info);
632       ompt_callbacks.ompt_callback(ompt_callback_task_create)(
633           &(parent_info->task_data), &(parent_info->frame),
634           &(taskdata->ompt_task_info.task_data),
635           ompt_task_explicit | TASK_TYPE_DETAILS_FORMAT(taskdata), 0,
636           return_address);
637     }
638     __ompt_task_start(task, current_task, gtid);
639   }
640 #endif // OMPT_SUPPORT
641 
642   KA_TRACE(10, ("__kmpc_omp_task_begin_if0(exit): T#%d loc=%p task=%p,\n", gtid,
643                 loc_ref, taskdata));
644 }
645 
646 #if OMPT_SUPPORT
647 OMPT_NOINLINE
648 static void __kmpc_omp_task_begin_if0_ompt(ident_t *loc_ref, kmp_int32 gtid,
649                                            kmp_task_t *task,
650                                            void *frame_address,
651                                            void *return_address) {
652   __kmpc_omp_task_begin_if0_template<true>(loc_ref, gtid, task, frame_address,
653                                            return_address);
654 }
655 #endif // OMPT_SUPPORT
656 
657 // __kmpc_omp_task_begin_if0: report that a given serialized task has started
658 // execution
659 //
660 // loc_ref: source location information; points to beginning of task block.
661 // gtid: global thread number.
662 // task: task thunk for the started task.
663 void __kmpc_omp_task_begin_if0(ident_t *loc_ref, kmp_int32 gtid,
664                                kmp_task_t *task) {
665 #if OMPT_SUPPORT
666   if (UNLIKELY(ompt_enabled.enabled)) {
667     OMPT_STORE_RETURN_ADDRESS(gtid);
668     __kmpc_omp_task_begin_if0_ompt(loc_ref, gtid, task,
669                                    OMPT_GET_FRAME_ADDRESS(1),
670                                    OMPT_LOAD_RETURN_ADDRESS(gtid));
671     return;
672   }
673 #endif
674   __kmpc_omp_task_begin_if0_template<false>(loc_ref, gtid, task, NULL, NULL);
675 }
676 
677 #ifdef TASK_UNUSED
678 // __kmpc_omp_task_begin: report that a given task has started execution
679 // NEVER GENERATED BY COMPILER, DEPRECATED!!!
680 void __kmpc_omp_task_begin(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *task) {
681   kmp_taskdata_t *current_task = __kmp_threads[gtid]->th.th_current_task;
682 
683   KA_TRACE(
684       10,
685       ("__kmpc_omp_task_begin(enter): T#%d loc=%p task=%p current_task=%p\n",
686        gtid, loc_ref, KMP_TASK_TO_TASKDATA(task), current_task));
687 
688   __kmp_task_start(gtid, task, current_task);
689 
690   KA_TRACE(10, ("__kmpc_omp_task_begin(exit): T#%d loc=%p task=%p,\n", gtid,
691                 loc_ref, KMP_TASK_TO_TASKDATA(task)));
692   return;
693 }
694 #endif // TASK_UNUSED
695 
696 // __kmp_free_task: free the current task space and the space for shareds
697 //
698 // gtid: Global thread ID of calling thread
699 // taskdata: task to free
700 // thread: thread data structure of caller
701 static void __kmp_free_task(kmp_int32 gtid, kmp_taskdata_t *taskdata,
702                             kmp_info_t *thread) {
703   KA_TRACE(30, ("__kmp_free_task: T#%d freeing data from task %p\n", gtid,
704                 taskdata));
705 
706   // Check to make sure all flags and counters have the correct values
707   KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
708   KMP_DEBUG_ASSERT(taskdata->td_flags.executing == 0);
709   KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 1);
710   KMP_DEBUG_ASSERT(taskdata->td_flags.freed == 0);
711   KMP_DEBUG_ASSERT(taskdata->td_allocated_child_tasks == 0 ||
712                    taskdata->td_flags.task_serial == 1);
713   KMP_DEBUG_ASSERT(taskdata->td_incomplete_child_tasks == 0);
714 
715   taskdata->td_flags.freed = 1;
716   ANNOTATE_HAPPENS_BEFORE(taskdata);
717 // deallocate the taskdata and shared variable blocks associated with this task
718 #if USE_FAST_MEMORY
719   __kmp_fast_free(thread, taskdata);
720 #else /* ! USE_FAST_MEMORY */
721   __kmp_thread_free(thread, taskdata);
722 #endif
723 
724   KA_TRACE(20, ("__kmp_free_task: T#%d freed task %p\n", gtid, taskdata));
725 }
726 
727 // __kmp_free_task_and_ancestors: free the current task and ancestors without
728 // children
729 //
730 // gtid: Global thread ID of calling thread
731 // taskdata: task to free
732 // thread: thread data structure of caller
733 static void __kmp_free_task_and_ancestors(kmp_int32 gtid,
734                                           kmp_taskdata_t *taskdata,
735                                           kmp_info_t *thread) {
736   // Proxy tasks must always be allowed to free their parents
737   // because they can be run in background even in serial mode.
738   kmp_int32 team_serial =
739       (taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser) &&
740       !taskdata->td_flags.proxy;
741   KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
742 
743   kmp_int32 children = KMP_ATOMIC_DEC(&taskdata->td_allocated_child_tasks) - 1;
744   KMP_DEBUG_ASSERT(children >= 0);
745 
746   // Now, go up the ancestor tree to see if any ancestors can now be freed.
747   while (children == 0) {
748     kmp_taskdata_t *parent_taskdata = taskdata->td_parent;
749 
750     KA_TRACE(20, ("__kmp_free_task_and_ancestors(enter): T#%d task %p complete "
751                   "and freeing itself\n",
752                   gtid, taskdata));
753 
754     // --- Deallocate my ancestor task ---
755     __kmp_free_task(gtid, taskdata, thread);
756 
757     taskdata = parent_taskdata;
758 
759     if (team_serial)
760       return;
761     // Stop checking ancestors at implicit task instead of walking up ancestor
762     // tree to avoid premature deallocation of ancestors.
763     if (taskdata->td_flags.tasktype == TASK_IMPLICIT) {
764       if (taskdata->td_dephash) { // do we need to cleanup dephash?
765         int children = KMP_ATOMIC_LD_ACQ(&taskdata->td_incomplete_child_tasks);
766         kmp_tasking_flags_t flags_old = taskdata->td_flags;
767         if (children == 0 && flags_old.complete == 1) {
768           kmp_tasking_flags_t flags_new = flags_old;
769           flags_new.complete = 0;
770           if (KMP_COMPARE_AND_STORE_ACQ32(
771                   RCAST(kmp_int32 *, &taskdata->td_flags),
772                   *RCAST(kmp_int32 *, &flags_old),
773                   *RCAST(kmp_int32 *, &flags_new))) {
774             KA_TRACE(100, ("__kmp_free_task_and_ancestors: T#%d cleans "
775                            "dephash of implicit task %p\n",
776                            gtid, taskdata));
777             // cleanup dephash of finished implicit task
778             __kmp_dephash_free_entries(thread, taskdata->td_dephash);
779           }
780         }
781       }
782       return;
783     }
784     // Predecrement simulated by "- 1" calculation
785     children = KMP_ATOMIC_DEC(&taskdata->td_allocated_child_tasks) - 1;
786     KMP_DEBUG_ASSERT(children >= 0);
787   }
788 
789   KA_TRACE(
790       20, ("__kmp_free_task_and_ancestors(exit): T#%d task %p has %d children; "
791            "not freeing it yet\n",
792            gtid, taskdata, children));
793 }
794 
795 // __kmp_task_finish: bookkeeping to do when a task finishes execution
796 //
797 // gtid: global thread ID for calling thread
798 // task: task to be finished
799 // resumed_task: task to be resumed.  (may be NULL if task is serialized)
800 //
801 // template<ompt>: effectively ompt_enabled.enabled!=0
802 // the version with ompt=false is inlined, allowing to optimize away all ompt
803 // code in this case
804 template <bool ompt>
805 static void __kmp_task_finish(kmp_int32 gtid, kmp_task_t *task,
806                               kmp_taskdata_t *resumed_task) {
807   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
808   kmp_info_t *thread = __kmp_threads[gtid];
809   kmp_task_team_t *task_team =
810       thread->th.th_task_team; // might be NULL for serial teams...
811   kmp_int32 children = 0;
812 
813   KA_TRACE(10, ("__kmp_task_finish(enter): T#%d finishing task %p and resuming "
814                 "task %p\n",
815                 gtid, taskdata, resumed_task));
816 
817   KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
818 
819 // Pop task from stack if tied
820 #ifdef BUILD_TIED_TASK_STACK
821   if (taskdata->td_flags.tiedness == TASK_TIED) {
822     __kmp_pop_task_stack(gtid, thread, taskdata);
823   }
824 #endif /* BUILD_TIED_TASK_STACK */
825 
826   if (taskdata->td_flags.tiedness == TASK_UNTIED) {
827     // untied task needs to check the counter so that the task structure is not
828     // freed prematurely
829     kmp_int32 counter = KMP_ATOMIC_DEC(&taskdata->td_untied_count) - 1;
830     KA_TRACE(
831         20,
832         ("__kmp_task_finish: T#%d untied_count (%d) decremented for task %p\n",
833          gtid, counter, taskdata));
834     if (counter > 0) {
835       // untied task is not done, to be continued possibly by other thread, do
836       // not free it now
837       if (resumed_task == NULL) {
838         KMP_DEBUG_ASSERT(taskdata->td_flags.task_serial);
839         resumed_task = taskdata->td_parent; // In a serialized task, the resumed
840         // task is the parent
841       }
842       thread->th.th_current_task = resumed_task; // restore current_task
843       resumed_task->td_flags.executing = 1; // resume previous task
844       KA_TRACE(10, ("__kmp_task_finish(exit): T#%d partially done task %p, "
845                     "resuming task %p\n",
846                     gtid, taskdata, resumed_task));
847       return;
848     }
849   }
850 
851   // Check mutexinoutset dependencies, release locks
852   kmp_depnode_t *node = taskdata->td_depnode;
853   if (node && (node->dn.mtx_num_locks < 0)) {
854     // negative num_locks means all locks were acquired
855     node->dn.mtx_num_locks = -node->dn.mtx_num_locks;
856     for (int i = node->dn.mtx_num_locks - 1; i >= 0; --i) {
857       KMP_DEBUG_ASSERT(node->dn.mtx_locks[i] != NULL);
858       __kmp_release_lock(node->dn.mtx_locks[i], gtid);
859     }
860   }
861 
862   // bookkeeping for resuming task:
863   // GEH - note tasking_ser => task_serial
864   KMP_DEBUG_ASSERT(
865       (taskdata->td_flags.tasking_ser || taskdata->td_flags.task_serial) ==
866       taskdata->td_flags.task_serial);
867   if (taskdata->td_flags.task_serial) {
868     if (resumed_task == NULL) {
869       resumed_task = taskdata->td_parent; // In a serialized task, the resumed
870       // task is the parent
871     }
872   } else {
873     KMP_DEBUG_ASSERT(resumed_task !=
874                      NULL); // verify that resumed task is passed as argument
875   }
876 
877   /* If the tasks' destructor thunk flag has been set, we need to invoke the
878      destructor thunk that has been generated by the compiler. The code is
879      placed here, since at this point other tasks might have been released
880      hence overlapping the destructor invocations with some other work in the
881      released tasks.  The OpenMP spec is not specific on when the destructors
882      are invoked, so we should be free to choose. */
883   if (taskdata->td_flags.destructors_thunk) {
884     kmp_routine_entry_t destr_thunk = task->data1.destructors;
885     KMP_ASSERT(destr_thunk);
886     destr_thunk(gtid, task);
887   }
888 
889   KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 0);
890   KMP_DEBUG_ASSERT(taskdata->td_flags.started == 1);
891   KMP_DEBUG_ASSERT(taskdata->td_flags.freed == 0);
892 
893   bool detach = false;
894   if (taskdata->td_flags.detachable == TASK_DETACHABLE) {
895     if (taskdata->td_allow_completion_event.type ==
896         KMP_EVENT_ALLOW_COMPLETION) {
897       // event hasn't been fulfilled yet. Try to detach task.
898       __kmp_acquire_tas_lock(&taskdata->td_allow_completion_event.lock, gtid);
899       if (taskdata->td_allow_completion_event.type ==
900           KMP_EVENT_ALLOW_COMPLETION) {
901         // task finished execution
902         KMP_DEBUG_ASSERT(taskdata->td_flags.executing == 1);
903         taskdata->td_flags.executing = 0; // suspend the finishing task
904 
905 #if OMPT_SUPPORT
906         // For a detached task, which is not completed, we switch back
907         // the omp_fulfill_event signals completion
908         // locking is necessary to avoid a race with ompt_task_late_fulfill
909         if (ompt)
910           __ompt_task_finish(task, resumed_task, ompt_task_detach);
911 #endif
912 
913         // no access to taskdata after this point!
914         // __kmp_fulfill_event might free taskdata at any time from now
915 
916         taskdata->td_flags.proxy = TASK_PROXY; // proxify!
917         detach = true;
918       }
919       __kmp_release_tas_lock(&taskdata->td_allow_completion_event.lock, gtid);
920     }
921   }
922 
923   if (!detach) {
924     taskdata->td_flags.complete = 1; // mark the task as completed
925 
926 #if OMPT_SUPPORT
927     // This is not a detached task, we are done here
928     if (ompt)
929       __ompt_task_finish(task, resumed_task, ompt_task_complete);
930 #endif
931 
932     // Only need to keep track of count if team parallel and tasking not
933     // serialized, or task is detachable and event has already been fulfilled
934     if (!(taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser) ||
935         taskdata->td_flags.detachable == TASK_DETACHABLE) {
936       // Predecrement simulated by "- 1" calculation
937       children =
938           KMP_ATOMIC_DEC(&taskdata->td_parent->td_incomplete_child_tasks) - 1;
939       KMP_DEBUG_ASSERT(children >= 0);
940       if (taskdata->td_taskgroup)
941         KMP_ATOMIC_DEC(&taskdata->td_taskgroup->count);
942       __kmp_release_deps(gtid, taskdata);
943     } else if (task_team && task_team->tt.tt_found_proxy_tasks) {
944       // if we found proxy tasks there could exist a dependency chain
945       // with the proxy task as origin
946       __kmp_release_deps(gtid, taskdata);
947     }
948     // td_flags.executing must be marked as 0 after __kmp_release_deps has been
949     // called. Othertwise, if a task is executed immediately from the
950     // release_deps code, the flag will be reset to 1 again by this same
951     // function
952     KMP_DEBUG_ASSERT(taskdata->td_flags.executing == 1);
953     taskdata->td_flags.executing = 0; // suspend the finishing task
954   }
955 
956 
957   KA_TRACE(
958       20, ("__kmp_task_finish: T#%d finished task %p, %d incomplete children\n",
959            gtid, taskdata, children));
960 
961   // Free this task and then ancestor tasks if they have no children.
962   // Restore th_current_task first as suggested by John:
963   // johnmc: if an asynchronous inquiry peers into the runtime system
964   // it doesn't see the freed task as the current task.
965   thread->th.th_current_task = resumed_task;
966   if (!detach)
967     __kmp_free_task_and_ancestors(gtid, taskdata, thread);
968 
969   // TODO: GEH - make sure root team implicit task is initialized properly.
970   // KMP_DEBUG_ASSERT( resumed_task->td_flags.executing == 0 );
971   resumed_task->td_flags.executing = 1; // resume previous task
972 
973   KA_TRACE(
974       10, ("__kmp_task_finish(exit): T#%d finished task %p, resuming task %p\n",
975            gtid, taskdata, resumed_task));
976 
977   return;
978 }
979 
980 template <bool ompt>
981 static void __kmpc_omp_task_complete_if0_template(ident_t *loc_ref,
982                                                   kmp_int32 gtid,
983                                                   kmp_task_t *task) {
984   KA_TRACE(10, ("__kmpc_omp_task_complete_if0(enter): T#%d loc=%p task=%p\n",
985                 gtid, loc_ref, KMP_TASK_TO_TASKDATA(task)));
986   // this routine will provide task to resume
987   __kmp_task_finish<ompt>(gtid, task, NULL);
988 
989   KA_TRACE(10, ("__kmpc_omp_task_complete_if0(exit): T#%d loc=%p task=%p\n",
990                 gtid, loc_ref, KMP_TASK_TO_TASKDATA(task)));
991 
992 #if OMPT_SUPPORT
993   if (ompt) {
994     ompt_frame_t *ompt_frame;
995     __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
996     ompt_frame->enter_frame = ompt_data_none;
997     ompt_frame->enter_frame_flags = ompt_frame_runtime | ompt_frame_framepointer;
998   }
999 #endif
1000 
1001   return;
1002 }
1003 
1004 #if OMPT_SUPPORT
1005 OMPT_NOINLINE
1006 void __kmpc_omp_task_complete_if0_ompt(ident_t *loc_ref, kmp_int32 gtid,
1007                                        kmp_task_t *task) {
1008   __kmpc_omp_task_complete_if0_template<true>(loc_ref, gtid, task);
1009 }
1010 #endif // OMPT_SUPPORT
1011 
1012 // __kmpc_omp_task_complete_if0: report that a task has completed execution
1013 //
1014 // loc_ref: source location information; points to end of task block.
1015 // gtid: global thread number.
1016 // task: task thunk for the completed task.
1017 void __kmpc_omp_task_complete_if0(ident_t *loc_ref, kmp_int32 gtid,
1018                                   kmp_task_t *task) {
1019 #if OMPT_SUPPORT
1020   if (UNLIKELY(ompt_enabled.enabled)) {
1021     __kmpc_omp_task_complete_if0_ompt(loc_ref, gtid, task);
1022     return;
1023   }
1024 #endif
1025   __kmpc_omp_task_complete_if0_template<false>(loc_ref, gtid, task);
1026 }
1027 
1028 #ifdef TASK_UNUSED
1029 // __kmpc_omp_task_complete: report that a task has completed execution
1030 // NEVER GENERATED BY COMPILER, DEPRECATED!!!
1031 void __kmpc_omp_task_complete(ident_t *loc_ref, kmp_int32 gtid,
1032                               kmp_task_t *task) {
1033   KA_TRACE(10, ("__kmpc_omp_task_complete(enter): T#%d loc=%p task=%p\n", gtid,
1034                 loc_ref, KMP_TASK_TO_TASKDATA(task)));
1035 
1036   __kmp_task_finish<false>(gtid, task,
1037                            NULL); // Not sure how to find task to resume
1038 
1039   KA_TRACE(10, ("__kmpc_omp_task_complete(exit): T#%d loc=%p task=%p\n", gtid,
1040                 loc_ref, KMP_TASK_TO_TASKDATA(task)));
1041   return;
1042 }
1043 #endif // TASK_UNUSED
1044 
1045 // __kmp_init_implicit_task: Initialize the appropriate fields in the implicit
1046 // task for a given thread
1047 //
1048 // loc_ref:  reference to source location of parallel region
1049 // this_thr:  thread data structure corresponding to implicit task
1050 // team: team for this_thr
1051 // tid: thread id of given thread within team
1052 // set_curr_task: TRUE if need to push current task to thread
1053 // NOTE: Routine does not set up the implicit task ICVS.  This is assumed to
1054 // have already been done elsewhere.
1055 // TODO: Get better loc_ref.  Value passed in may be NULL
1056 void __kmp_init_implicit_task(ident_t *loc_ref, kmp_info_t *this_thr,
1057                               kmp_team_t *team, int tid, int set_curr_task) {
1058   kmp_taskdata_t *task = &team->t.t_implicit_task_taskdata[tid];
1059 
1060   KF_TRACE(
1061       10,
1062       ("__kmp_init_implicit_task(enter): T#:%d team=%p task=%p, reinit=%s\n",
1063        tid, team, task, set_curr_task ? "TRUE" : "FALSE"));
1064 
1065   task->td_task_id = KMP_GEN_TASK_ID();
1066   task->td_team = team;
1067   //    task->td_parent   = NULL;  // fix for CQ230101 (broken parent task info
1068   //    in debugger)
1069   task->td_ident = loc_ref;
1070   task->td_taskwait_ident = NULL;
1071   task->td_taskwait_counter = 0;
1072   task->td_taskwait_thread = 0;
1073 
1074   task->td_flags.tiedness = TASK_TIED;
1075   task->td_flags.tasktype = TASK_IMPLICIT;
1076   task->td_flags.proxy = TASK_FULL;
1077 
1078   // All implicit tasks are executed immediately, not deferred
1079   task->td_flags.task_serial = 1;
1080   task->td_flags.tasking_ser = (__kmp_tasking_mode == tskm_immediate_exec);
1081   task->td_flags.team_serial = (team->t.t_serialized) ? 1 : 0;
1082 
1083   task->td_flags.started = 1;
1084   task->td_flags.executing = 1;
1085   task->td_flags.complete = 0;
1086   task->td_flags.freed = 0;
1087 
1088   task->td_depnode = NULL;
1089   task->td_last_tied = task;
1090   task->td_allow_completion_event.type = KMP_EVENT_UNINITIALIZED;
1091 
1092   if (set_curr_task) { // only do this init first time thread is created
1093     KMP_ATOMIC_ST_REL(&task->td_incomplete_child_tasks, 0);
1094     // Not used: don't need to deallocate implicit task
1095     KMP_ATOMIC_ST_REL(&task->td_allocated_child_tasks, 0);
1096     task->td_taskgroup = NULL; // An implicit task does not have taskgroup
1097     task->td_dephash = NULL;
1098     __kmp_push_current_task_to_thread(this_thr, team, tid);
1099   } else {
1100     KMP_DEBUG_ASSERT(task->td_incomplete_child_tasks == 0);
1101     KMP_DEBUG_ASSERT(task->td_allocated_child_tasks == 0);
1102   }
1103 
1104 #if OMPT_SUPPORT
1105   if (UNLIKELY(ompt_enabled.enabled))
1106     __ompt_task_init(task, tid);
1107 #endif
1108 
1109   KF_TRACE(10, ("__kmp_init_implicit_task(exit): T#:%d team=%p task=%p\n", tid,
1110                 team, task));
1111 }
1112 
1113 // __kmp_finish_implicit_task: Release resources associated to implicit tasks
1114 // at the end of parallel regions. Some resources are kept for reuse in the next
1115 // parallel region.
1116 //
1117 // thread:  thread data structure corresponding to implicit task
1118 void __kmp_finish_implicit_task(kmp_info_t *thread) {
1119   kmp_taskdata_t *task = thread->th.th_current_task;
1120   if (task->td_dephash) {
1121     int children;
1122     task->td_flags.complete = 1;
1123     children = KMP_ATOMIC_LD_ACQ(&task->td_incomplete_child_tasks);
1124     kmp_tasking_flags_t flags_old = task->td_flags;
1125     if (children == 0 && flags_old.complete == 1) {
1126       kmp_tasking_flags_t flags_new = flags_old;
1127       flags_new.complete = 0;
1128       if (KMP_COMPARE_AND_STORE_ACQ32(RCAST(kmp_int32 *, &task->td_flags),
1129                                       *RCAST(kmp_int32 *, &flags_old),
1130                                       *RCAST(kmp_int32 *, &flags_new))) {
1131         KA_TRACE(100, ("__kmp_finish_implicit_task: T#%d cleans "
1132                        "dephash of implicit task %p\n",
1133                        thread->th.th_info.ds.ds_gtid, task));
1134         __kmp_dephash_free_entries(thread, task->td_dephash);
1135       }
1136     }
1137   }
1138 }
1139 
1140 // __kmp_free_implicit_task: Release resources associated to implicit tasks
1141 // when these are destroyed regions
1142 //
1143 // thread:  thread data structure corresponding to implicit task
1144 void __kmp_free_implicit_task(kmp_info_t *thread) {
1145   kmp_taskdata_t *task = thread->th.th_current_task;
1146   if (task && task->td_dephash) {
1147     __kmp_dephash_free(thread, task->td_dephash);
1148     task->td_dephash = NULL;
1149   }
1150 }
1151 
1152 // Round up a size to a power of two specified by val: Used to insert padding
1153 // between structures co-allocated using a single malloc() call
1154 static size_t __kmp_round_up_to_val(size_t size, size_t val) {
1155   if (size & (val - 1)) {
1156     size &= ~(val - 1);
1157     if (size <= KMP_SIZE_T_MAX - val) {
1158       size += val; // Round up if there is no overflow.
1159     }
1160   }
1161   return size;
1162 } // __kmp_round_up_to_va
1163 
1164 // __kmp_task_alloc: Allocate the taskdata and task data structures for a task
1165 //
1166 // loc_ref: source location information
1167 // gtid: global thread number.
1168 // flags: include tiedness & task type (explicit vs. implicit) of the ''new''
1169 // task encountered. Converted from kmp_int32 to kmp_tasking_flags_t in routine.
1170 // sizeof_kmp_task_t:  Size in bytes of kmp_task_t data structure including
1171 // private vars accessed in task.
1172 // sizeof_shareds:  Size in bytes of array of pointers to shared vars accessed
1173 // in task.
1174 // task_entry: Pointer to task code entry point generated by compiler.
1175 // returns: a pointer to the allocated kmp_task_t structure (task).
1176 kmp_task_t *__kmp_task_alloc(ident_t *loc_ref, kmp_int32 gtid,
1177                              kmp_tasking_flags_t *flags,
1178                              size_t sizeof_kmp_task_t, size_t sizeof_shareds,
1179                              kmp_routine_entry_t task_entry) {
1180   kmp_task_t *task;
1181   kmp_taskdata_t *taskdata;
1182   kmp_info_t *thread = __kmp_threads[gtid];
1183   kmp_team_t *team = thread->th.th_team;
1184   kmp_taskdata_t *parent_task = thread->th.th_current_task;
1185   size_t shareds_offset;
1186 
1187   if (!TCR_4(__kmp_init_middle))
1188     __kmp_middle_initialize();
1189 
1190   KA_TRACE(10, ("__kmp_task_alloc(enter): T#%d loc=%p, flags=(0x%x) "
1191                 "sizeof_task=%ld sizeof_shared=%ld entry=%p\n",
1192                 gtid, loc_ref, *((kmp_int32 *)flags), sizeof_kmp_task_t,
1193                 sizeof_shareds, task_entry));
1194 
1195   if (parent_task->td_flags.final) {
1196     if (flags->merged_if0) {
1197     }
1198     flags->final = 1;
1199   }
1200   if (flags->tiedness == TASK_UNTIED && !team->t.t_serialized) {
1201     // Untied task encountered causes the TSC algorithm to check entire deque of
1202     // the victim thread. If no untied task encountered, then checking the head
1203     // of the deque should be enough.
1204     KMP_CHECK_UPDATE(thread->th.th_task_team->tt.tt_untied_task_encountered, 1);
1205   }
1206 
1207   // Detachable tasks are not proxy tasks yet but could be in the future. Doing
1208   // the tasking setup
1209   // when that happens is too late.
1210   if (flags->proxy == TASK_PROXY || flags->detachable == TASK_DETACHABLE) {
1211     if (flags->proxy == TASK_PROXY) {
1212       flags->tiedness = TASK_UNTIED;
1213       flags->merged_if0 = 1;
1214     }
1215     /* are we running in a sequential parallel or tskm_immediate_exec... we need
1216        tasking support enabled */
1217     if ((thread->th.th_task_team) == NULL) {
1218       /* This should only happen if the team is serialized
1219           setup a task team and propagate it to the thread */
1220       KMP_DEBUG_ASSERT(team->t.t_serialized);
1221       KA_TRACE(30,
1222                ("T#%d creating task team in __kmp_task_alloc for proxy task\n",
1223                 gtid));
1224       __kmp_task_team_setup(
1225           thread, team,
1226           1); // 1 indicates setup the current team regardless of nthreads
1227       thread->th.th_task_team = team->t.t_task_team[thread->th.th_task_state];
1228     }
1229     kmp_task_team_t *task_team = thread->th.th_task_team;
1230 
1231     /* tasking must be enabled now as the task might not be pushed */
1232     if (!KMP_TASKING_ENABLED(task_team)) {
1233       KA_TRACE(
1234           30,
1235           ("T#%d enabling tasking in __kmp_task_alloc for proxy task\n", gtid));
1236       __kmp_enable_tasking(task_team, thread);
1237       kmp_int32 tid = thread->th.th_info.ds.ds_tid;
1238       kmp_thread_data_t *thread_data = &task_team->tt.tt_threads_data[tid];
1239       // No lock needed since only owner can allocate
1240       if (thread_data->td.td_deque == NULL) {
1241         __kmp_alloc_task_deque(thread, thread_data);
1242       }
1243     }
1244 
1245     if (task_team->tt.tt_found_proxy_tasks == FALSE)
1246       TCW_4(task_team->tt.tt_found_proxy_tasks, TRUE);
1247   }
1248 
1249   // Calculate shared structure offset including padding after kmp_task_t struct
1250   // to align pointers in shared struct
1251   shareds_offset = sizeof(kmp_taskdata_t) + sizeof_kmp_task_t;
1252   shareds_offset = __kmp_round_up_to_val(shareds_offset, sizeof(void *));
1253 
1254   // Allocate a kmp_taskdata_t block and a kmp_task_t block.
1255   KA_TRACE(30, ("__kmp_task_alloc: T#%d First malloc size: %ld\n", gtid,
1256                 shareds_offset));
1257   KA_TRACE(30, ("__kmp_task_alloc: T#%d Second malloc size: %ld\n", gtid,
1258                 sizeof_shareds));
1259 
1260 // Avoid double allocation here by combining shareds with taskdata
1261 #if USE_FAST_MEMORY
1262   taskdata = (kmp_taskdata_t *)__kmp_fast_allocate(thread, shareds_offset +
1263                                                                sizeof_shareds);
1264 #else /* ! USE_FAST_MEMORY */
1265   taskdata = (kmp_taskdata_t *)__kmp_thread_malloc(thread, shareds_offset +
1266                                                                sizeof_shareds);
1267 #endif /* USE_FAST_MEMORY */
1268   ANNOTATE_HAPPENS_AFTER(taskdata);
1269 
1270   task = KMP_TASKDATA_TO_TASK(taskdata);
1271 
1272 // Make sure task & taskdata are aligned appropriately
1273 #if KMP_ARCH_X86 || KMP_ARCH_PPC64 || !KMP_HAVE_QUAD
1274   KMP_DEBUG_ASSERT((((kmp_uintptr_t)taskdata) & (sizeof(double) - 1)) == 0);
1275   KMP_DEBUG_ASSERT((((kmp_uintptr_t)task) & (sizeof(double) - 1)) == 0);
1276 #else
1277   KMP_DEBUG_ASSERT((((kmp_uintptr_t)taskdata) & (sizeof(_Quad) - 1)) == 0);
1278   KMP_DEBUG_ASSERT((((kmp_uintptr_t)task) & (sizeof(_Quad) - 1)) == 0);
1279 #endif
1280   if (sizeof_shareds > 0) {
1281     // Avoid double allocation here by combining shareds with taskdata
1282     task->shareds = &((char *)taskdata)[shareds_offset];
1283     // Make sure shareds struct is aligned to pointer size
1284     KMP_DEBUG_ASSERT((((kmp_uintptr_t)task->shareds) & (sizeof(void *) - 1)) ==
1285                      0);
1286   } else {
1287     task->shareds = NULL;
1288   }
1289   task->routine = task_entry;
1290   task->part_id = 0; // AC: Always start with 0 part id
1291 
1292   taskdata->td_task_id = KMP_GEN_TASK_ID();
1293   taskdata->td_team = team;
1294   taskdata->td_alloc_thread = thread;
1295   taskdata->td_parent = parent_task;
1296   taskdata->td_level = parent_task->td_level + 1; // increment nesting level
1297   KMP_ATOMIC_ST_RLX(&taskdata->td_untied_count, 0);
1298   taskdata->td_ident = loc_ref;
1299   taskdata->td_taskwait_ident = NULL;
1300   taskdata->td_taskwait_counter = 0;
1301   taskdata->td_taskwait_thread = 0;
1302   KMP_DEBUG_ASSERT(taskdata->td_parent != NULL);
1303   // avoid copying icvs for proxy tasks
1304   if (flags->proxy == TASK_FULL)
1305     copy_icvs(&taskdata->td_icvs, &taskdata->td_parent->td_icvs);
1306 
1307   taskdata->td_flags.tiedness = flags->tiedness;
1308   taskdata->td_flags.final = flags->final;
1309   taskdata->td_flags.merged_if0 = flags->merged_if0;
1310   taskdata->td_flags.destructors_thunk = flags->destructors_thunk;
1311   taskdata->td_flags.proxy = flags->proxy;
1312   taskdata->td_flags.detachable = flags->detachable;
1313   taskdata->td_task_team = thread->th.th_task_team;
1314   taskdata->td_size_alloc = shareds_offset + sizeof_shareds;
1315   taskdata->td_flags.tasktype = TASK_EXPLICIT;
1316 
1317   // GEH - TODO: fix this to copy parent task's value of tasking_ser flag
1318   taskdata->td_flags.tasking_ser = (__kmp_tasking_mode == tskm_immediate_exec);
1319 
1320   // GEH - TODO: fix this to copy parent task's value of team_serial flag
1321   taskdata->td_flags.team_serial = (team->t.t_serialized) ? 1 : 0;
1322 
1323   // GEH - Note we serialize the task if the team is serialized to make sure
1324   // implicit parallel region tasks are not left until program termination to
1325   // execute. Also, it helps locality to execute immediately.
1326 
1327   taskdata->td_flags.task_serial =
1328       (parent_task->td_flags.final || taskdata->td_flags.team_serial ||
1329        taskdata->td_flags.tasking_ser || flags->merged_if0);
1330 
1331   taskdata->td_flags.started = 0;
1332   taskdata->td_flags.executing = 0;
1333   taskdata->td_flags.complete = 0;
1334   taskdata->td_flags.freed = 0;
1335 
1336   taskdata->td_flags.native = flags->native;
1337 
1338   KMP_ATOMIC_ST_RLX(&taskdata->td_incomplete_child_tasks, 0);
1339   // start at one because counts current task and children
1340   KMP_ATOMIC_ST_RLX(&taskdata->td_allocated_child_tasks, 1);
1341   taskdata->td_taskgroup =
1342       parent_task->td_taskgroup; // task inherits taskgroup from the parent task
1343   taskdata->td_dephash = NULL;
1344   taskdata->td_depnode = NULL;
1345   if (flags->tiedness == TASK_UNTIED)
1346     taskdata->td_last_tied = NULL; // will be set when the task is scheduled
1347   else
1348     taskdata->td_last_tied = taskdata;
1349   taskdata->td_allow_completion_event.type = KMP_EVENT_UNINITIALIZED;
1350 #if OMPT_SUPPORT
1351   if (UNLIKELY(ompt_enabled.enabled))
1352     __ompt_task_init(taskdata, gtid);
1353 #endif
1354 // Only need to keep track of child task counts if team parallel and tasking not
1355 // serialized or if it is a proxy or detachable task
1356   if (flags->proxy == TASK_PROXY ||
1357       flags->detachable == TASK_DETACHABLE ||
1358       !(taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser))
1359   {
1360     KMP_ATOMIC_INC(&parent_task->td_incomplete_child_tasks);
1361     if (parent_task->td_taskgroup)
1362       KMP_ATOMIC_INC(&parent_task->td_taskgroup->count);
1363     // Only need to keep track of allocated child tasks for explicit tasks since
1364     // implicit not deallocated
1365     if (taskdata->td_parent->td_flags.tasktype == TASK_EXPLICIT) {
1366       KMP_ATOMIC_INC(&taskdata->td_parent->td_allocated_child_tasks);
1367     }
1368   }
1369 
1370   KA_TRACE(20, ("__kmp_task_alloc(exit): T#%d created task %p parent=%p\n",
1371                 gtid, taskdata, taskdata->td_parent));
1372   ANNOTATE_HAPPENS_BEFORE(task);
1373 
1374   return task;
1375 }
1376 
1377 kmp_task_t *__kmpc_omp_task_alloc(ident_t *loc_ref, kmp_int32 gtid,
1378                                   kmp_int32 flags, size_t sizeof_kmp_task_t,
1379                                   size_t sizeof_shareds,
1380                                   kmp_routine_entry_t task_entry) {
1381   kmp_task_t *retval;
1382   kmp_tasking_flags_t *input_flags = (kmp_tasking_flags_t *)&flags;
1383 
1384   input_flags->native = FALSE;
1385 // __kmp_task_alloc() sets up all other runtime flags
1386 
1387   KA_TRACE(10, ("__kmpc_omp_task_alloc(enter): T#%d loc=%p, flags=(%s %s %s) "
1388                 "sizeof_task=%ld sizeof_shared=%ld entry=%p\n",
1389                 gtid, loc_ref, input_flags->tiedness ? "tied  " : "untied",
1390                 input_flags->proxy ? "proxy" : "",
1391                 input_flags->detachable ? "detachable" : "", sizeof_kmp_task_t,
1392                 sizeof_shareds, task_entry));
1393 
1394   retval = __kmp_task_alloc(loc_ref, gtid, input_flags, sizeof_kmp_task_t,
1395                             sizeof_shareds, task_entry);
1396 
1397   KA_TRACE(20, ("__kmpc_omp_task_alloc(exit): T#%d retval %p\n", gtid, retval));
1398 
1399   return retval;
1400 }
1401 
1402 kmp_task_t *__kmpc_omp_target_task_alloc(ident_t *loc_ref, kmp_int32 gtid,
1403                                          kmp_int32 flags,
1404                                          size_t sizeof_kmp_task_t,
1405                                          size_t sizeof_shareds,
1406                                          kmp_routine_entry_t task_entry,
1407                                          kmp_int64 device_id) {
1408   return __kmpc_omp_task_alloc(loc_ref, gtid, flags, sizeof_kmp_task_t,
1409                                sizeof_shareds, task_entry);
1410 }
1411 
1412 /*!
1413 @ingroup TASKING
1414 @param loc_ref location of the original task directive
1415 @param gtid Global Thread ID of encountering thread
1416 @param new_task task thunk allocated by __kmpc_omp_task_alloc() for the ''new
1417 task''
1418 @param naffins Number of affinity items
1419 @param affin_list List of affinity items
1420 @return Returns non-zero if registering affinity information was not successful.
1421  Returns 0 if registration was successful
1422 This entry registers the affinity information attached to a task with the task
1423 thunk structure kmp_taskdata_t.
1424 */
1425 kmp_int32
1426 __kmpc_omp_reg_task_with_affinity(ident_t *loc_ref, kmp_int32 gtid,
1427                                   kmp_task_t *new_task, kmp_int32 naffins,
1428                                   kmp_task_affinity_info_t *affin_list) {
1429   return 0;
1430 }
1431 
1432 //  __kmp_invoke_task: invoke the specified task
1433 //
1434 // gtid: global thread ID of caller
1435 // task: the task to invoke
1436 // current_task: the task to resume after task invocation
1437 static void __kmp_invoke_task(kmp_int32 gtid, kmp_task_t *task,
1438                               kmp_taskdata_t *current_task) {
1439   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
1440   kmp_info_t *thread;
1441   int discard = 0 /* false */;
1442   KA_TRACE(
1443       30, ("__kmp_invoke_task(enter): T#%d invoking task %p, current_task=%p\n",
1444            gtid, taskdata, current_task));
1445   KMP_DEBUG_ASSERT(task);
1446   if (taskdata->td_flags.proxy == TASK_PROXY &&
1447       taskdata->td_flags.complete == 1) {
1448     // This is a proxy task that was already completed but it needs to run
1449     // its bottom-half finish
1450     KA_TRACE(
1451         30,
1452         ("__kmp_invoke_task: T#%d running bottom finish for proxy task %p\n",
1453          gtid, taskdata));
1454 
1455     __kmp_bottom_half_finish_proxy(gtid, task);
1456 
1457     KA_TRACE(30, ("__kmp_invoke_task(exit): T#%d completed bottom finish for "
1458                   "proxy task %p, resuming task %p\n",
1459                   gtid, taskdata, current_task));
1460 
1461     return;
1462   }
1463 
1464 #if OMPT_SUPPORT
1465   // For untied tasks, the first task executed only calls __kmpc_omp_task and
1466   // does not execute code.
1467   ompt_thread_info_t oldInfo;
1468   if (UNLIKELY(ompt_enabled.enabled)) {
1469     // Store the threads states and restore them after the task
1470     thread = __kmp_threads[gtid];
1471     oldInfo = thread->th.ompt_thread_info;
1472     thread->th.ompt_thread_info.wait_id = 0;
1473     thread->th.ompt_thread_info.state = (thread->th.th_team_serialized)
1474                                             ? ompt_state_work_serial
1475                                             : ompt_state_work_parallel;
1476     taskdata->ompt_task_info.frame.exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
1477   }
1478 #endif
1479 
1480   // Proxy tasks are not handled by the runtime
1481   if (taskdata->td_flags.proxy != TASK_PROXY) {
1482     ANNOTATE_HAPPENS_AFTER(task);
1483     __kmp_task_start(gtid, task, current_task); // OMPT only if not discarded
1484   }
1485 
1486   // TODO: cancel tasks if the parallel region has also been cancelled
1487   // TODO: check if this sequence can be hoisted above __kmp_task_start
1488   // if cancellation has been enabled for this run ...
1489   if (__kmp_omp_cancellation) {
1490     thread = __kmp_threads[gtid];
1491     kmp_team_t *this_team = thread->th.th_team;
1492     kmp_taskgroup_t *taskgroup = taskdata->td_taskgroup;
1493     if ((taskgroup && taskgroup->cancel_request) ||
1494         (this_team->t.t_cancel_request == cancel_parallel)) {
1495 #if OMPT_SUPPORT && OMPT_OPTIONAL
1496       ompt_data_t *task_data;
1497       if (UNLIKELY(ompt_enabled.ompt_callback_cancel)) {
1498         __ompt_get_task_info_internal(0, NULL, &task_data, NULL, NULL, NULL);
1499         ompt_callbacks.ompt_callback(ompt_callback_cancel)(
1500             task_data,
1501             ((taskgroup && taskgroup->cancel_request) ? ompt_cancel_taskgroup
1502                                                       : ompt_cancel_parallel) |
1503                 ompt_cancel_discarded_task,
1504             NULL);
1505       }
1506 #endif
1507       KMP_COUNT_BLOCK(TASK_cancelled);
1508       // this task belongs to a task group and we need to cancel it
1509       discard = 1 /* true */;
1510     }
1511   }
1512 
1513   // Invoke the task routine and pass in relevant data.
1514   // Thunks generated by gcc take a different argument list.
1515   if (!discard) {
1516     if (taskdata->td_flags.tiedness == TASK_UNTIED) {
1517       taskdata->td_last_tied = current_task->td_last_tied;
1518       KMP_DEBUG_ASSERT(taskdata->td_last_tied);
1519     }
1520 #if KMP_STATS_ENABLED
1521     KMP_COUNT_BLOCK(TASK_executed);
1522     switch (KMP_GET_THREAD_STATE()) {
1523     case FORK_JOIN_BARRIER:
1524       KMP_PUSH_PARTITIONED_TIMER(OMP_task_join_bar);
1525       break;
1526     case PLAIN_BARRIER:
1527       KMP_PUSH_PARTITIONED_TIMER(OMP_task_plain_bar);
1528       break;
1529     case TASKYIELD:
1530       KMP_PUSH_PARTITIONED_TIMER(OMP_task_taskyield);
1531       break;
1532     case TASKWAIT:
1533       KMP_PUSH_PARTITIONED_TIMER(OMP_task_taskwait);
1534       break;
1535     case TASKGROUP:
1536       KMP_PUSH_PARTITIONED_TIMER(OMP_task_taskgroup);
1537       break;
1538     default:
1539       KMP_PUSH_PARTITIONED_TIMER(OMP_task_immediate);
1540       break;
1541     }
1542 #endif // KMP_STATS_ENABLED
1543 
1544 // OMPT task begin
1545 #if OMPT_SUPPORT
1546     if (UNLIKELY(ompt_enabled.enabled))
1547       __ompt_task_start(task, current_task, gtid);
1548 #endif
1549 
1550 #if USE_ITT_BUILD && USE_ITT_NOTIFY
1551     kmp_uint64 cur_time;
1552     kmp_int32 kmp_itt_count_task =
1553         __kmp_forkjoin_frames_mode == 3 && !taskdata->td_flags.task_serial &&
1554         current_task->td_flags.tasktype == TASK_IMPLICIT;
1555     if (kmp_itt_count_task) {
1556       thread = __kmp_threads[gtid];
1557       // Time outer level explicit task on barrier for adjusting imbalance time
1558       if (thread->th.th_bar_arrive_time)
1559         cur_time = __itt_get_timestamp();
1560       else
1561         kmp_itt_count_task = 0; // thread is not on a barrier - skip timing
1562     }
1563 #endif
1564 
1565 #ifdef KMP_GOMP_COMPAT
1566     if (taskdata->td_flags.native) {
1567       ((void (*)(void *))(*(task->routine)))(task->shareds);
1568     } else
1569 #endif /* KMP_GOMP_COMPAT */
1570     {
1571       (*(task->routine))(gtid, task);
1572     }
1573     KMP_POP_PARTITIONED_TIMER();
1574 
1575 #if USE_ITT_BUILD && USE_ITT_NOTIFY
1576     if (kmp_itt_count_task) {
1577       // Barrier imbalance - adjust arrive time with the task duration
1578       thread->th.th_bar_arrive_time += (__itt_get_timestamp() - cur_time);
1579     }
1580 #endif
1581 
1582   }
1583 
1584 
1585   // Proxy tasks are not handled by the runtime
1586   if (taskdata->td_flags.proxy != TASK_PROXY) {
1587     ANNOTATE_HAPPENS_BEFORE(taskdata->td_parent);
1588 #if OMPT_SUPPORT
1589     if (UNLIKELY(ompt_enabled.enabled)) {
1590       thread->th.ompt_thread_info = oldInfo;
1591       if (taskdata->td_flags.tiedness == TASK_TIED) {
1592         taskdata->ompt_task_info.frame.exit_frame = ompt_data_none;
1593       }
1594       __kmp_task_finish<true>(gtid, task, current_task);
1595     } else
1596 #endif
1597       __kmp_task_finish<false>(gtid, task, current_task);
1598   }
1599 
1600   KA_TRACE(
1601       30,
1602       ("__kmp_invoke_task(exit): T#%d completed task %p, resuming task %p\n",
1603        gtid, taskdata, current_task));
1604   return;
1605 }
1606 
1607 // __kmpc_omp_task_parts: Schedule a thread-switchable task for execution
1608 //
1609 // loc_ref: location of original task pragma (ignored)
1610 // gtid: Global Thread ID of encountering thread
1611 // new_task: task thunk allocated by __kmp_omp_task_alloc() for the ''new task''
1612 // Returns:
1613 //    TASK_CURRENT_NOT_QUEUED (0) if did not suspend and queue current task to
1614 //    be resumed later.
1615 //    TASK_CURRENT_QUEUED (1) if suspended and queued the current task to be
1616 //    resumed later.
1617 kmp_int32 __kmpc_omp_task_parts(ident_t *loc_ref, kmp_int32 gtid,
1618                                 kmp_task_t *new_task) {
1619   kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
1620 
1621   KA_TRACE(10, ("__kmpc_omp_task_parts(enter): T#%d loc=%p task=%p\n", gtid,
1622                 loc_ref, new_taskdata));
1623 
1624 #if OMPT_SUPPORT
1625   kmp_taskdata_t *parent;
1626   if (UNLIKELY(ompt_enabled.enabled)) {
1627     parent = new_taskdata->td_parent;
1628     if (ompt_enabled.ompt_callback_task_create) {
1629       ompt_data_t task_data = ompt_data_none;
1630       ompt_callbacks.ompt_callback(ompt_callback_task_create)(
1631           parent ? &(parent->ompt_task_info.task_data) : &task_data,
1632           parent ? &(parent->ompt_task_info.frame) : NULL,
1633           &(new_taskdata->ompt_task_info.task_data), ompt_task_explicit, 0,
1634           OMPT_GET_RETURN_ADDRESS(0));
1635     }
1636   }
1637 #endif
1638 
1639   /* Should we execute the new task or queue it? For now, let's just always try
1640      to queue it.  If the queue fills up, then we'll execute it.  */
1641 
1642   if (__kmp_push_task(gtid, new_task) == TASK_NOT_PUSHED) // if cannot defer
1643   { // Execute this task immediately
1644     kmp_taskdata_t *current_task = __kmp_threads[gtid]->th.th_current_task;
1645     new_taskdata->td_flags.task_serial = 1;
1646     __kmp_invoke_task(gtid, new_task, current_task);
1647   }
1648 
1649   KA_TRACE(
1650       10,
1651       ("__kmpc_omp_task_parts(exit): T#%d returning TASK_CURRENT_NOT_QUEUED: "
1652        "loc=%p task=%p, return: TASK_CURRENT_NOT_QUEUED\n",
1653        gtid, loc_ref, new_taskdata));
1654 
1655   ANNOTATE_HAPPENS_BEFORE(new_task);
1656 #if OMPT_SUPPORT
1657   if (UNLIKELY(ompt_enabled.enabled)) {
1658     parent->ompt_task_info.frame.enter_frame = ompt_data_none;
1659   }
1660 #endif
1661   return TASK_CURRENT_NOT_QUEUED;
1662 }
1663 
1664 // __kmp_omp_task: Schedule a non-thread-switchable task for execution
1665 //
1666 // gtid: Global Thread ID of encountering thread
1667 // new_task:non-thread-switchable task thunk allocated by __kmp_omp_task_alloc()
1668 // serialize_immediate: if TRUE then if the task is executed immediately its
1669 // execution will be serialized
1670 // Returns:
1671 //    TASK_CURRENT_NOT_QUEUED (0) if did not suspend and queue current task to
1672 //    be resumed later.
1673 //    TASK_CURRENT_QUEUED (1) if suspended and queued the current task to be
1674 //    resumed later.
1675 kmp_int32 __kmp_omp_task(kmp_int32 gtid, kmp_task_t *new_task,
1676                          bool serialize_immediate) {
1677   kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
1678 
1679   /* Should we execute the new task or queue it? For now, let's just always try
1680      to queue it.  If the queue fills up, then we'll execute it.  */
1681   if (new_taskdata->td_flags.proxy == TASK_PROXY ||
1682       __kmp_push_task(gtid, new_task) == TASK_NOT_PUSHED) // if cannot defer
1683   { // Execute this task immediately
1684     kmp_taskdata_t *current_task = __kmp_threads[gtid]->th.th_current_task;
1685     if (serialize_immediate)
1686       new_taskdata->td_flags.task_serial = 1;
1687     __kmp_invoke_task(gtid, new_task, current_task);
1688   }
1689 
1690   ANNOTATE_HAPPENS_BEFORE(new_task);
1691   return TASK_CURRENT_NOT_QUEUED;
1692 }
1693 
1694 // __kmpc_omp_task: Wrapper around __kmp_omp_task to schedule a
1695 // non-thread-switchable task from the parent thread only!
1696 //
1697 // loc_ref: location of original task pragma (ignored)
1698 // gtid: Global Thread ID of encountering thread
1699 // new_task: non-thread-switchable task thunk allocated by
1700 // __kmp_omp_task_alloc()
1701 // Returns:
1702 //    TASK_CURRENT_NOT_QUEUED (0) if did not suspend and queue current task to
1703 //    be resumed later.
1704 //    TASK_CURRENT_QUEUED (1) if suspended and queued the current task to be
1705 //    resumed later.
1706 kmp_int32 __kmpc_omp_task(ident_t *loc_ref, kmp_int32 gtid,
1707                           kmp_task_t *new_task) {
1708   kmp_int32 res;
1709   KMP_SET_THREAD_STATE_BLOCK(EXPLICIT_TASK);
1710 
1711 #if KMP_DEBUG || OMPT_SUPPORT
1712   kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
1713 #endif
1714   KA_TRACE(10, ("__kmpc_omp_task(enter): T#%d loc=%p task=%p\n", gtid, loc_ref,
1715                 new_taskdata));
1716 
1717 #if OMPT_SUPPORT
1718   kmp_taskdata_t *parent = NULL;
1719   if (UNLIKELY(ompt_enabled.enabled)) {
1720     if (!new_taskdata->td_flags.started) {
1721       OMPT_STORE_RETURN_ADDRESS(gtid);
1722       parent = new_taskdata->td_parent;
1723       if (!parent->ompt_task_info.frame.enter_frame.ptr) {
1724         parent->ompt_task_info.frame.enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
1725       }
1726       if (ompt_enabled.ompt_callback_task_create) {
1727         ompt_data_t task_data = ompt_data_none;
1728         ompt_callbacks.ompt_callback(ompt_callback_task_create)(
1729             parent ? &(parent->ompt_task_info.task_data) : &task_data,
1730             parent ? &(parent->ompt_task_info.frame) : NULL,
1731             &(new_taskdata->ompt_task_info.task_data),
1732             ompt_task_explicit | TASK_TYPE_DETAILS_FORMAT(new_taskdata), 0,
1733             OMPT_LOAD_RETURN_ADDRESS(gtid));
1734       }
1735     } else {
1736       // We are scheduling the continuation of an UNTIED task.
1737       // Scheduling back to the parent task.
1738       __ompt_task_finish(new_task,
1739                          new_taskdata->ompt_task_info.scheduling_parent,
1740                          ompt_task_switch);
1741       new_taskdata->ompt_task_info.frame.exit_frame = ompt_data_none;
1742     }
1743   }
1744 #endif
1745 
1746   res = __kmp_omp_task(gtid, new_task, true);
1747 
1748   KA_TRACE(10, ("__kmpc_omp_task(exit): T#%d returning "
1749                 "TASK_CURRENT_NOT_QUEUED: loc=%p task=%p\n",
1750                 gtid, loc_ref, new_taskdata));
1751 #if OMPT_SUPPORT
1752   if (UNLIKELY(ompt_enabled.enabled && parent != NULL)) {
1753     parent->ompt_task_info.frame.enter_frame = ompt_data_none;
1754   }
1755 #endif
1756   return res;
1757 }
1758 
1759 // __kmp_omp_taskloop_task: Wrapper around __kmp_omp_task to schedule
1760 // a taskloop task with the correct OMPT return address
1761 //
1762 // loc_ref: location of original task pragma (ignored)
1763 // gtid: Global Thread ID of encountering thread
1764 // new_task: non-thread-switchable task thunk allocated by
1765 // __kmp_omp_task_alloc()
1766 // codeptr_ra: return address for OMPT callback
1767 // Returns:
1768 //    TASK_CURRENT_NOT_QUEUED (0) if did not suspend and queue current task to
1769 //    be resumed later.
1770 //    TASK_CURRENT_QUEUED (1) if suspended and queued the current task to be
1771 //    resumed later.
1772 kmp_int32 __kmp_omp_taskloop_task(ident_t *loc_ref, kmp_int32 gtid,
1773                                   kmp_task_t *new_task, void *codeptr_ra) {
1774   kmp_int32 res;
1775   KMP_SET_THREAD_STATE_BLOCK(EXPLICIT_TASK);
1776 
1777 #if KMP_DEBUG || OMPT_SUPPORT
1778   kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
1779 #endif
1780   KA_TRACE(10, ("__kmpc_omp_task(enter): T#%d loc=%p task=%p\n", gtid, loc_ref,
1781                 new_taskdata));
1782 
1783 #if OMPT_SUPPORT
1784   kmp_taskdata_t *parent = NULL;
1785   if (UNLIKELY(ompt_enabled.enabled && !new_taskdata->td_flags.started)) {
1786     parent = new_taskdata->td_parent;
1787     if (!parent->ompt_task_info.frame.enter_frame.ptr)
1788       parent->ompt_task_info.frame.enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
1789     if (ompt_enabled.ompt_callback_task_create) {
1790       ompt_data_t task_data = ompt_data_none;
1791       ompt_callbacks.ompt_callback(ompt_callback_task_create)(
1792           parent ? &(parent->ompt_task_info.task_data) : &task_data,
1793           parent ? &(parent->ompt_task_info.frame) : NULL,
1794           &(new_taskdata->ompt_task_info.task_data),
1795           ompt_task_explicit | TASK_TYPE_DETAILS_FORMAT(new_taskdata), 0,
1796           codeptr_ra);
1797     }
1798   }
1799 #endif
1800 
1801   res = __kmp_omp_task(gtid, new_task, true);
1802 
1803   KA_TRACE(10, ("__kmpc_omp_task(exit): T#%d returning "
1804                 "TASK_CURRENT_NOT_QUEUED: loc=%p task=%p\n",
1805                 gtid, loc_ref, new_taskdata));
1806 #if OMPT_SUPPORT
1807   if (UNLIKELY(ompt_enabled.enabled && parent != NULL)) {
1808     parent->ompt_task_info.frame.enter_frame = ompt_data_none;
1809   }
1810 #endif
1811   return res;
1812 }
1813 
1814 template <bool ompt>
1815 static kmp_int32 __kmpc_omp_taskwait_template(ident_t *loc_ref, kmp_int32 gtid,
1816                                               void *frame_address,
1817                                               void *return_address) {
1818   kmp_taskdata_t *taskdata;
1819   kmp_info_t *thread;
1820   int thread_finished = FALSE;
1821   KMP_SET_THREAD_STATE_BLOCK(TASKWAIT);
1822 
1823   KA_TRACE(10, ("__kmpc_omp_taskwait(enter): T#%d loc=%p\n", gtid, loc_ref));
1824 
1825   if (__kmp_tasking_mode != tskm_immediate_exec) {
1826     thread = __kmp_threads[gtid];
1827     taskdata = thread->th.th_current_task;
1828 
1829 #if OMPT_SUPPORT && OMPT_OPTIONAL
1830     ompt_data_t *my_task_data;
1831     ompt_data_t *my_parallel_data;
1832 
1833     if (ompt) {
1834       my_task_data = &(taskdata->ompt_task_info.task_data);
1835       my_parallel_data = OMPT_CUR_TEAM_DATA(thread);
1836 
1837       taskdata->ompt_task_info.frame.enter_frame.ptr = frame_address;
1838 
1839       if (ompt_enabled.ompt_callback_sync_region) {
1840         ompt_callbacks.ompt_callback(ompt_callback_sync_region)(
1841             ompt_sync_region_taskwait, ompt_scope_begin, my_parallel_data,
1842             my_task_data, return_address);
1843       }
1844 
1845       if (ompt_enabled.ompt_callback_sync_region_wait) {
1846         ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)(
1847             ompt_sync_region_taskwait, ompt_scope_begin, my_parallel_data,
1848             my_task_data, return_address);
1849       }
1850     }
1851 #endif // OMPT_SUPPORT && OMPT_OPTIONAL
1852 
1853 // Debugger: The taskwait is active. Store location and thread encountered the
1854 // taskwait.
1855 #if USE_ITT_BUILD
1856 // Note: These values are used by ITT events as well.
1857 #endif /* USE_ITT_BUILD */
1858     taskdata->td_taskwait_counter += 1;
1859     taskdata->td_taskwait_ident = loc_ref;
1860     taskdata->td_taskwait_thread = gtid + 1;
1861 
1862 #if USE_ITT_BUILD
1863     void *itt_sync_obj = __kmp_itt_taskwait_object(gtid);
1864     if (itt_sync_obj != NULL)
1865       __kmp_itt_taskwait_starting(gtid, itt_sync_obj);
1866 #endif /* USE_ITT_BUILD */
1867 
1868     bool must_wait =
1869         !taskdata->td_flags.team_serial && !taskdata->td_flags.final;
1870 
1871     must_wait = must_wait || (thread->th.th_task_team != NULL &&
1872                               thread->th.th_task_team->tt.tt_found_proxy_tasks);
1873     if (must_wait) {
1874       kmp_flag_32 flag(RCAST(std::atomic<kmp_uint32> *,
1875                              &(taskdata->td_incomplete_child_tasks)),
1876                        0U);
1877       while (KMP_ATOMIC_LD_ACQ(&taskdata->td_incomplete_child_tasks) != 0) {
1878         flag.execute_tasks(thread, gtid, FALSE,
1879                            &thread_finished USE_ITT_BUILD_ARG(itt_sync_obj),
1880                            __kmp_task_stealing_constraint);
1881       }
1882     }
1883 #if USE_ITT_BUILD
1884     if (itt_sync_obj != NULL)
1885       __kmp_itt_taskwait_finished(gtid, itt_sync_obj);
1886 #endif /* USE_ITT_BUILD */
1887 
1888     // Debugger:  The taskwait is completed. Location remains, but thread is
1889     // negated.
1890     taskdata->td_taskwait_thread = -taskdata->td_taskwait_thread;
1891 
1892 #if OMPT_SUPPORT && OMPT_OPTIONAL
1893     if (ompt) {
1894       if (ompt_enabled.ompt_callback_sync_region_wait) {
1895         ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)(
1896             ompt_sync_region_taskwait, ompt_scope_end, my_parallel_data,
1897             my_task_data, return_address);
1898       }
1899       if (ompt_enabled.ompt_callback_sync_region) {
1900         ompt_callbacks.ompt_callback(ompt_callback_sync_region)(
1901             ompt_sync_region_taskwait, ompt_scope_end, my_parallel_data,
1902             my_task_data, return_address);
1903       }
1904       taskdata->ompt_task_info.frame.enter_frame = ompt_data_none;
1905     }
1906 #endif // OMPT_SUPPORT && OMPT_OPTIONAL
1907 
1908     ANNOTATE_HAPPENS_AFTER(taskdata);
1909   }
1910 
1911   KA_TRACE(10, ("__kmpc_omp_taskwait(exit): T#%d task %p finished waiting, "
1912                 "returning TASK_CURRENT_NOT_QUEUED\n",
1913                 gtid, taskdata));
1914 
1915   return TASK_CURRENT_NOT_QUEUED;
1916 }
1917 
1918 #if OMPT_SUPPORT && OMPT_OPTIONAL
1919 OMPT_NOINLINE
1920 static kmp_int32 __kmpc_omp_taskwait_ompt(ident_t *loc_ref, kmp_int32 gtid,
1921                                           void *frame_address,
1922                                           void *return_address) {
1923   return __kmpc_omp_taskwait_template<true>(loc_ref, gtid, frame_address,
1924                                             return_address);
1925 }
1926 #endif // OMPT_SUPPORT && OMPT_OPTIONAL
1927 
1928 // __kmpc_omp_taskwait: Wait until all tasks generated by the current task are
1929 // complete
1930 kmp_int32 __kmpc_omp_taskwait(ident_t *loc_ref, kmp_int32 gtid) {
1931 #if OMPT_SUPPORT && OMPT_OPTIONAL
1932   if (UNLIKELY(ompt_enabled.enabled)) {
1933     OMPT_STORE_RETURN_ADDRESS(gtid);
1934     return __kmpc_omp_taskwait_ompt(loc_ref, gtid, OMPT_GET_FRAME_ADDRESS(0),
1935                                     OMPT_LOAD_RETURN_ADDRESS(gtid));
1936   }
1937 #endif
1938   return __kmpc_omp_taskwait_template<false>(loc_ref, gtid, NULL, NULL);
1939 }
1940 
1941 // __kmpc_omp_taskyield: switch to a different task
1942 kmp_int32 __kmpc_omp_taskyield(ident_t *loc_ref, kmp_int32 gtid, int end_part) {
1943   kmp_taskdata_t *taskdata;
1944   kmp_info_t *thread;
1945   int thread_finished = FALSE;
1946 
1947   KMP_COUNT_BLOCK(OMP_TASKYIELD);
1948   KMP_SET_THREAD_STATE_BLOCK(TASKYIELD);
1949 
1950   KA_TRACE(10, ("__kmpc_omp_taskyield(enter): T#%d loc=%p end_part = %d\n",
1951                 gtid, loc_ref, end_part));
1952 
1953   if (__kmp_tasking_mode != tskm_immediate_exec && __kmp_init_parallel) {
1954     thread = __kmp_threads[gtid];
1955     taskdata = thread->th.th_current_task;
1956 // Should we model this as a task wait or not?
1957 // Debugger: The taskwait is active. Store location and thread encountered the
1958 // taskwait.
1959 #if USE_ITT_BUILD
1960 // Note: These values are used by ITT events as well.
1961 #endif /* USE_ITT_BUILD */
1962     taskdata->td_taskwait_counter += 1;
1963     taskdata->td_taskwait_ident = loc_ref;
1964     taskdata->td_taskwait_thread = gtid + 1;
1965 
1966 #if USE_ITT_BUILD
1967     void *itt_sync_obj = __kmp_itt_taskwait_object(gtid);
1968     if (itt_sync_obj != NULL)
1969       __kmp_itt_taskwait_starting(gtid, itt_sync_obj);
1970 #endif /* USE_ITT_BUILD */
1971     if (!taskdata->td_flags.team_serial) {
1972       kmp_task_team_t *task_team = thread->th.th_task_team;
1973       if (task_team != NULL) {
1974         if (KMP_TASKING_ENABLED(task_team)) {
1975 #if OMPT_SUPPORT
1976           if (UNLIKELY(ompt_enabled.enabled))
1977             thread->th.ompt_thread_info.ompt_task_yielded = 1;
1978 #endif
1979           __kmp_execute_tasks_32(
1980               thread, gtid, NULL, FALSE,
1981               &thread_finished USE_ITT_BUILD_ARG(itt_sync_obj),
1982               __kmp_task_stealing_constraint);
1983 #if OMPT_SUPPORT
1984           if (UNLIKELY(ompt_enabled.enabled))
1985             thread->th.ompt_thread_info.ompt_task_yielded = 0;
1986 #endif
1987         }
1988       }
1989     }
1990 #if USE_ITT_BUILD
1991     if (itt_sync_obj != NULL)
1992       __kmp_itt_taskwait_finished(gtid, itt_sync_obj);
1993 #endif /* USE_ITT_BUILD */
1994 
1995     // Debugger:  The taskwait is completed. Location remains, but thread is
1996     // negated.
1997     taskdata->td_taskwait_thread = -taskdata->td_taskwait_thread;
1998   }
1999 
2000   KA_TRACE(10, ("__kmpc_omp_taskyield(exit): T#%d task %p resuming, "
2001                 "returning TASK_CURRENT_NOT_QUEUED\n",
2002                 gtid, taskdata));
2003 
2004   return TASK_CURRENT_NOT_QUEUED;
2005 }
2006 
2007 // Task Reduction implementation
2008 //
2009 // Note: initial implementation didn't take into account the possibility
2010 // to specify omp_orig for initializer of the UDR (user defined reduction).
2011 // Corrected implementation takes into account the omp_orig object.
2012 // Compiler is free to use old implementation if omp_orig is not specified.
2013 
2014 /*!
2015 @ingroup BASIC_TYPES
2016 @{
2017 */
2018 
2019 /*!
2020 Flags for special info per task reduction item.
2021 */
2022 typedef struct kmp_taskred_flags {
2023   /*! 1 - use lazy alloc/init (e.g. big objects, #tasks < #threads) */
2024   unsigned lazy_priv : 1;
2025   unsigned reserved31 : 31;
2026 } kmp_taskred_flags_t;
2027 
2028 /*!
2029 Internal struct for reduction data item related info set up by compiler.
2030 */
2031 typedef struct kmp_task_red_input {
2032   void *reduce_shar; /**< shared between tasks item to reduce into */
2033   size_t reduce_size; /**< size of data item in bytes */
2034   // three compiler-generated routines (init, fini are optional):
2035   void *reduce_init; /**< data initialization routine (single parameter) */
2036   void *reduce_fini; /**< data finalization routine */
2037   void *reduce_comb; /**< data combiner routine */
2038   kmp_taskred_flags_t flags; /**< flags for additional info from compiler */
2039 } kmp_task_red_input_t;
2040 
2041 /*!
2042 Internal struct for reduction data item related info saved by the library.
2043 */
2044 typedef struct kmp_taskred_data {
2045   void *reduce_shar; /**< shared between tasks item to reduce into */
2046   size_t reduce_size; /**< size of data item */
2047   kmp_taskred_flags_t flags; /**< flags for additional info from compiler */
2048   void *reduce_priv; /**< array of thread specific items */
2049   void *reduce_pend; /**< end of private data for faster comparison op */
2050   // three compiler-generated routines (init, fini are optional):
2051   void *reduce_comb; /**< data combiner routine */
2052   void *reduce_init; /**< data initialization routine (two parameters) */
2053   void *reduce_fini; /**< data finalization routine */
2054   void *reduce_orig; /**< original item (can be used in UDR initializer) */
2055 } kmp_taskred_data_t;
2056 
2057 /*!
2058 Internal struct for reduction data item related info set up by compiler.
2059 
2060 New interface: added reduce_orig field to provide omp_orig for UDR initializer.
2061 */
2062 typedef struct kmp_taskred_input {
2063   void *reduce_shar; /**< shared between tasks item to reduce into */
2064   void *reduce_orig; /**< original reduction item used for initialization */
2065   size_t reduce_size; /**< size of data item */
2066   // three compiler-generated routines (init, fini are optional):
2067   void *reduce_init; /**< data initialization routine (two parameters) */
2068   void *reduce_fini; /**< data finalization routine */
2069   void *reduce_comb; /**< data combiner routine */
2070   kmp_taskred_flags_t flags; /**< flags for additional info from compiler */
2071 } kmp_taskred_input_t;
2072 /*!
2073 @}
2074 */
2075 
2076 template <typename T> void __kmp_assign_orig(kmp_taskred_data_t &item, T &src);
2077 template <>
2078 void __kmp_assign_orig<kmp_task_red_input_t>(kmp_taskred_data_t &item,
2079                                              kmp_task_red_input_t &src) {
2080   item.reduce_orig = NULL;
2081 }
2082 template <>
2083 void __kmp_assign_orig<kmp_taskred_input_t>(kmp_taskred_data_t &item,
2084                                             kmp_taskred_input_t &src) {
2085   if (src.reduce_orig != NULL) {
2086     item.reduce_orig = src.reduce_orig;
2087   } else {
2088     item.reduce_orig = src.reduce_shar;
2089   } // non-NULL reduce_orig means new interface used
2090 }
2091 
2092 template <typename T> void __kmp_call_init(kmp_taskred_data_t &item, int j);
2093 template <>
2094 void __kmp_call_init<kmp_task_red_input_t>(kmp_taskred_data_t &item,
2095                                            int offset) {
2096   ((void (*)(void *))item.reduce_init)((char *)(item.reduce_priv) + offset);
2097 }
2098 template <>
2099 void __kmp_call_init<kmp_taskred_input_t>(kmp_taskred_data_t &item,
2100                                           int offset) {
2101   ((void (*)(void *, void *))item.reduce_init)(
2102       (char *)(item.reduce_priv) + offset, item.reduce_orig);
2103 }
2104 
2105 template <typename T>
2106 void *__kmp_task_reduction_init(int gtid, int num, T *data) {
2107   kmp_info_t *thread = __kmp_threads[gtid];
2108   kmp_taskgroup_t *tg = thread->th.th_current_task->td_taskgroup;
2109   kmp_int32 nth = thread->th.th_team_nproc;
2110   kmp_taskred_data_t *arr;
2111 
2112   // check input data just in case
2113   KMP_ASSERT(tg != NULL);
2114   KMP_ASSERT(data != NULL);
2115   KMP_ASSERT(num > 0);
2116   if (nth == 1) {
2117     KA_TRACE(10, ("__kmpc_task_reduction_init: T#%d, tg %p, exiting nth=1\n",
2118                   gtid, tg));
2119     return (void *)tg;
2120   }
2121   KA_TRACE(10, ("__kmpc_task_reduction_init: T#%d, taskgroup %p, #items %d\n",
2122                 gtid, tg, num));
2123   arr = (kmp_taskred_data_t *)__kmp_thread_malloc(
2124       thread, num * sizeof(kmp_taskred_data_t));
2125   for (int i = 0; i < num; ++i) {
2126     size_t size = data[i].reduce_size - 1;
2127     // round the size up to cache line per thread-specific item
2128     size += CACHE_LINE - size % CACHE_LINE;
2129     KMP_ASSERT(data[i].reduce_comb != NULL); // combiner is mandatory
2130     arr[i].reduce_shar = data[i].reduce_shar;
2131     arr[i].reduce_size = size;
2132     arr[i].flags = data[i].flags;
2133     arr[i].reduce_comb = data[i].reduce_comb;
2134     arr[i].reduce_init = data[i].reduce_init;
2135     arr[i].reduce_fini = data[i].reduce_fini;
2136     __kmp_assign_orig<T>(arr[i], data[i]);
2137     if (!arr[i].flags.lazy_priv) {
2138       // allocate cache-line aligned block and fill it with zeros
2139       arr[i].reduce_priv = __kmp_allocate(nth * size);
2140       arr[i].reduce_pend = (char *)(arr[i].reduce_priv) + nth * size;
2141       if (arr[i].reduce_init != NULL) {
2142         // initialize all thread-specific items
2143         for (int j = 0; j < nth; ++j) {
2144           __kmp_call_init<T>(arr[i], j * size);
2145         }
2146       }
2147     } else {
2148       // only allocate space for pointers now,
2149       // objects will be lazily allocated/initialized if/when requested
2150       // note that __kmp_allocate zeroes the allocated memory
2151       arr[i].reduce_priv = __kmp_allocate(nth * sizeof(void *));
2152     }
2153   }
2154   tg->reduce_data = (void *)arr;
2155   tg->reduce_num_data = num;
2156   return (void *)tg;
2157 }
2158 
2159 /*!
2160 @ingroup TASKING
2161 @param gtid      Global thread ID
2162 @param num       Number of data items to reduce
2163 @param data      Array of data for reduction
2164 @return The taskgroup identifier
2165 
2166 Initialize task reduction for the taskgroup.
2167 
2168 Note: this entry supposes the optional compiler-generated initializer routine
2169 has single parameter - pointer to object to be initialized. That means
2170 the reduction either does not use omp_orig object, or the omp_orig is accessible
2171 without help of the runtime library.
2172 */
2173 void *__kmpc_task_reduction_init(int gtid, int num, void *data) {
2174   return __kmp_task_reduction_init(gtid, num, (kmp_task_red_input_t *)data);
2175 }
2176 
2177 /*!
2178 @ingroup TASKING
2179 @param gtid      Global thread ID
2180 @param num       Number of data items to reduce
2181 @param data      Array of data for reduction
2182 @return The taskgroup identifier
2183 
2184 Initialize task reduction for the taskgroup.
2185 
2186 Note: this entry supposes the optional compiler-generated initializer routine
2187 has two parameters, pointer to object to be initialized and pointer to omp_orig
2188 */
2189 void *__kmpc_taskred_init(int gtid, int num, void *data) {
2190   return __kmp_task_reduction_init(gtid, num, (kmp_taskred_input_t *)data);
2191 }
2192 
2193 // Copy task reduction data (except for shared pointers).
2194 template <typename T>
2195 void __kmp_task_reduction_init_copy(kmp_info_t *thr, int num, T *data,
2196                                     kmp_taskgroup_t *tg, void *reduce_data) {
2197   kmp_taskred_data_t *arr;
2198   KA_TRACE(20, ("__kmp_task_reduction_init_copy: Th %p, init taskgroup %p,"
2199                 " from data %p\n",
2200                 thr, tg, reduce_data));
2201   arr = (kmp_taskred_data_t *)__kmp_thread_malloc(
2202       thr, num * sizeof(kmp_taskred_data_t));
2203   // threads will share private copies, thunk routines, sizes, flags, etc.:
2204   KMP_MEMCPY(arr, reduce_data, num * sizeof(kmp_taskred_data_t));
2205   for (int i = 0; i < num; ++i) {
2206     arr[i].reduce_shar = data[i].reduce_shar; // init unique shared pointers
2207   }
2208   tg->reduce_data = (void *)arr;
2209   tg->reduce_num_data = num;
2210 }
2211 
2212 /*!
2213 @ingroup TASKING
2214 @param gtid    Global thread ID
2215 @param tskgrp  The taskgroup ID (optional)
2216 @param data    Shared location of the item
2217 @return The pointer to per-thread data
2218 
2219 Get thread-specific location of data item
2220 */
2221 void *__kmpc_task_reduction_get_th_data(int gtid, void *tskgrp, void *data) {
2222   kmp_info_t *thread = __kmp_threads[gtid];
2223   kmp_int32 nth = thread->th.th_team_nproc;
2224   if (nth == 1)
2225     return data; // nothing to do
2226 
2227   kmp_taskgroup_t *tg = (kmp_taskgroup_t *)tskgrp;
2228   if (tg == NULL)
2229     tg = thread->th.th_current_task->td_taskgroup;
2230   KMP_ASSERT(tg != NULL);
2231   kmp_taskred_data_t *arr = (kmp_taskred_data_t *)(tg->reduce_data);
2232   kmp_int32 num = tg->reduce_num_data;
2233   kmp_int32 tid = thread->th.th_info.ds.ds_tid;
2234 
2235   KMP_ASSERT(data != NULL);
2236   while (tg != NULL) {
2237     for (int i = 0; i < num; ++i) {
2238       if (!arr[i].flags.lazy_priv) {
2239         if (data == arr[i].reduce_shar ||
2240             (data >= arr[i].reduce_priv && data < arr[i].reduce_pend))
2241           return (char *)(arr[i].reduce_priv) + tid * arr[i].reduce_size;
2242       } else {
2243         // check shared location first
2244         void **p_priv = (void **)(arr[i].reduce_priv);
2245         if (data == arr[i].reduce_shar)
2246           goto found;
2247         // check if we get some thread specific location as parameter
2248         for (int j = 0; j < nth; ++j)
2249           if (data == p_priv[j])
2250             goto found;
2251         continue; // not found, continue search
2252       found:
2253         if (p_priv[tid] == NULL) {
2254           // allocate thread specific object lazily
2255           p_priv[tid] = __kmp_allocate(arr[i].reduce_size);
2256           if (arr[i].reduce_init != NULL) {
2257             if (arr[i].reduce_orig != NULL) { // new interface
2258               ((void (*)(void *, void *))arr[i].reduce_init)(
2259                   p_priv[tid], arr[i].reduce_orig);
2260             } else { // old interface (single parameter)
2261               ((void (*)(void *))arr[i].reduce_init)(p_priv[tid]);
2262             }
2263           }
2264         }
2265         return p_priv[tid];
2266       }
2267     }
2268     tg = tg->parent;
2269     arr = (kmp_taskred_data_t *)(tg->reduce_data);
2270     num = tg->reduce_num_data;
2271   }
2272   KMP_ASSERT2(0, "Unknown task reduction item");
2273   return NULL; // ERROR, this line never executed
2274 }
2275 
2276 // Finalize task reduction.
2277 // Called from __kmpc_end_taskgroup()
2278 static void __kmp_task_reduction_fini(kmp_info_t *th, kmp_taskgroup_t *tg) {
2279   kmp_int32 nth = th->th.th_team_nproc;
2280   KMP_DEBUG_ASSERT(nth > 1); // should not be called if nth == 1
2281   kmp_taskred_data_t *arr = (kmp_taskred_data_t *)tg->reduce_data;
2282   kmp_int32 num = tg->reduce_num_data;
2283   for (int i = 0; i < num; ++i) {
2284     void *sh_data = arr[i].reduce_shar;
2285     void (*f_fini)(void *) = (void (*)(void *))(arr[i].reduce_fini);
2286     void (*f_comb)(void *, void *) =
2287         (void (*)(void *, void *))(arr[i].reduce_comb);
2288     if (!arr[i].flags.lazy_priv) {
2289       void *pr_data = arr[i].reduce_priv;
2290       size_t size = arr[i].reduce_size;
2291       for (int j = 0; j < nth; ++j) {
2292         void *priv_data = (char *)pr_data + j * size;
2293         f_comb(sh_data, priv_data); // combine results
2294         if (f_fini)
2295           f_fini(priv_data); // finalize if needed
2296       }
2297     } else {
2298       void **pr_data = (void **)(arr[i].reduce_priv);
2299       for (int j = 0; j < nth; ++j) {
2300         if (pr_data[j] != NULL) {
2301           f_comb(sh_data, pr_data[j]); // combine results
2302           if (f_fini)
2303             f_fini(pr_data[j]); // finalize if needed
2304           __kmp_free(pr_data[j]);
2305         }
2306       }
2307     }
2308     __kmp_free(arr[i].reduce_priv);
2309   }
2310   __kmp_thread_free(th, arr);
2311   tg->reduce_data = NULL;
2312   tg->reduce_num_data = 0;
2313 }
2314 
2315 // Cleanup task reduction data for parallel or worksharing,
2316 // do not touch task private data other threads still working with.
2317 // Called from __kmpc_end_taskgroup()
2318 static void __kmp_task_reduction_clean(kmp_info_t *th, kmp_taskgroup_t *tg) {
2319   __kmp_thread_free(th, tg->reduce_data);
2320   tg->reduce_data = NULL;
2321   tg->reduce_num_data = 0;
2322 }
2323 
2324 template <typename T>
2325 void *__kmp_task_reduction_modifier_init(ident_t *loc, int gtid, int is_ws,
2326                                          int num, T *data) {
2327   kmp_info_t *thr = __kmp_threads[gtid];
2328   kmp_int32 nth = thr->th.th_team_nproc;
2329   __kmpc_taskgroup(loc, gtid); // form new taskgroup first
2330   if (nth == 1) {
2331     KA_TRACE(10,
2332              ("__kmpc_reduction_modifier_init: T#%d, tg %p, exiting nth=1\n",
2333               gtid, thr->th.th_current_task->td_taskgroup));
2334     return (void *)thr->th.th_current_task->td_taskgroup;
2335   }
2336   kmp_team_t *team = thr->th.th_team;
2337   void *reduce_data;
2338   kmp_taskgroup_t *tg;
2339   reduce_data = KMP_ATOMIC_LD_RLX(&team->t.t_tg_reduce_data[is_ws]);
2340   if (reduce_data == NULL &&
2341       __kmp_atomic_compare_store(&team->t.t_tg_reduce_data[is_ws], reduce_data,
2342                                  (void *)1)) {
2343     // single thread enters this block to initialize common reduction data
2344     KMP_DEBUG_ASSERT(reduce_data == NULL);
2345     // first initialize own data, then make a copy other threads can use
2346     tg = (kmp_taskgroup_t *)__kmp_task_reduction_init<T>(gtid, num, data);
2347     reduce_data = __kmp_thread_malloc(thr, num * sizeof(kmp_taskred_data_t));
2348     KMP_MEMCPY(reduce_data, tg->reduce_data, num * sizeof(kmp_taskred_data_t));
2349     // fini counters should be 0 at this point
2350     KMP_DEBUG_ASSERT(KMP_ATOMIC_LD_RLX(&team->t.t_tg_fini_counter[0]) == 0);
2351     KMP_DEBUG_ASSERT(KMP_ATOMIC_LD_RLX(&team->t.t_tg_fini_counter[1]) == 0);
2352     KMP_ATOMIC_ST_REL(&team->t.t_tg_reduce_data[is_ws], reduce_data);
2353   } else {
2354     while (
2355         (reduce_data = KMP_ATOMIC_LD_ACQ(&team->t.t_tg_reduce_data[is_ws])) ==
2356         (void *)1) { // wait for task reduction initialization
2357       KMP_CPU_PAUSE();
2358     }
2359     KMP_DEBUG_ASSERT(reduce_data > (void *)1); // should be valid pointer here
2360     tg = thr->th.th_current_task->td_taskgroup;
2361     __kmp_task_reduction_init_copy<T>(thr, num, data, tg, reduce_data);
2362   }
2363   return tg;
2364 }
2365 
2366 /*!
2367 @ingroup TASKING
2368 @param loc       Source location info
2369 @param gtid      Global thread ID
2370 @param is_ws     Is 1 if the reduction is for worksharing, 0 otherwise
2371 @param num       Number of data items to reduce
2372 @param data      Array of data for reduction
2373 @return The taskgroup identifier
2374 
2375 Initialize task reduction for a parallel or worksharing.
2376 
2377 Note: this entry supposes the optional compiler-generated initializer routine
2378 has single parameter - pointer to object to be initialized. That means
2379 the reduction either does not use omp_orig object, or the omp_orig is accessible
2380 without help of the runtime library.
2381 */
2382 void *__kmpc_task_reduction_modifier_init(ident_t *loc, int gtid, int is_ws,
2383                                           int num, void *data) {
2384   return __kmp_task_reduction_modifier_init(loc, gtid, is_ws, num,
2385                                             (kmp_task_red_input_t *)data);
2386 }
2387 
2388 /*!
2389 @ingroup TASKING
2390 @param loc       Source location info
2391 @param gtid      Global thread ID
2392 @param is_ws     Is 1 if the reduction is for worksharing, 0 otherwise
2393 @param num       Number of data items to reduce
2394 @param data      Array of data for reduction
2395 @return The taskgroup identifier
2396 
2397 Initialize task reduction for a parallel or worksharing.
2398 
2399 Note: this entry supposes the optional compiler-generated initializer routine
2400 has two parameters, pointer to object to be initialized and pointer to omp_orig
2401 */
2402 void *__kmpc_taskred_modifier_init(ident_t *loc, int gtid, int is_ws, int num,
2403                                    void *data) {
2404   return __kmp_task_reduction_modifier_init(loc, gtid, is_ws, num,
2405                                             (kmp_taskred_input_t *)data);
2406 }
2407 
2408 /*!
2409 @ingroup TASKING
2410 @param loc       Source location info
2411 @param gtid      Global thread ID
2412 @param is_ws     Is 1 if the reduction is for worksharing, 0 otherwise
2413 
2414 Finalize task reduction for a parallel or worksharing.
2415 */
2416 void __kmpc_task_reduction_modifier_fini(ident_t *loc, int gtid, int is_ws) {
2417   __kmpc_end_taskgroup(loc, gtid);
2418 }
2419 
2420 // __kmpc_taskgroup: Start a new taskgroup
2421 void __kmpc_taskgroup(ident_t *loc, int gtid) {
2422   kmp_info_t *thread = __kmp_threads[gtid];
2423   kmp_taskdata_t *taskdata = thread->th.th_current_task;
2424   kmp_taskgroup_t *tg_new =
2425       (kmp_taskgroup_t *)__kmp_thread_malloc(thread, sizeof(kmp_taskgroup_t));
2426   KA_TRACE(10, ("__kmpc_taskgroup: T#%d loc=%p group=%p\n", gtid, loc, tg_new));
2427   KMP_ATOMIC_ST_RLX(&tg_new->count, 0);
2428   KMP_ATOMIC_ST_RLX(&tg_new->cancel_request, cancel_noreq);
2429   tg_new->parent = taskdata->td_taskgroup;
2430   tg_new->reduce_data = NULL;
2431   tg_new->reduce_num_data = 0;
2432   taskdata->td_taskgroup = tg_new;
2433 
2434 #if OMPT_SUPPORT && OMPT_OPTIONAL
2435   if (UNLIKELY(ompt_enabled.ompt_callback_sync_region)) {
2436     void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid);
2437     if (!codeptr)
2438       codeptr = OMPT_GET_RETURN_ADDRESS(0);
2439     kmp_team_t *team = thread->th.th_team;
2440     ompt_data_t my_task_data = taskdata->ompt_task_info.task_data;
2441     // FIXME: I think this is wrong for lwt!
2442     ompt_data_t my_parallel_data = team->t.ompt_team_info.parallel_data;
2443 
2444     ompt_callbacks.ompt_callback(ompt_callback_sync_region)(
2445         ompt_sync_region_taskgroup, ompt_scope_begin, &(my_parallel_data),
2446         &(my_task_data), codeptr);
2447   }
2448 #endif
2449 }
2450 
2451 // __kmpc_end_taskgroup: Wait until all tasks generated by the current task
2452 //                       and its descendants are complete
2453 void __kmpc_end_taskgroup(ident_t *loc, int gtid) {
2454   kmp_info_t *thread = __kmp_threads[gtid];
2455   kmp_taskdata_t *taskdata = thread->th.th_current_task;
2456   kmp_taskgroup_t *taskgroup = taskdata->td_taskgroup;
2457   int thread_finished = FALSE;
2458 
2459 #if OMPT_SUPPORT && OMPT_OPTIONAL
2460   kmp_team_t *team;
2461   ompt_data_t my_task_data;
2462   ompt_data_t my_parallel_data;
2463   void *codeptr;
2464   if (UNLIKELY(ompt_enabled.enabled)) {
2465     team = thread->th.th_team;
2466     my_task_data = taskdata->ompt_task_info.task_data;
2467     // FIXME: I think this is wrong for lwt!
2468     my_parallel_data = team->t.ompt_team_info.parallel_data;
2469     codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid);
2470     if (!codeptr)
2471       codeptr = OMPT_GET_RETURN_ADDRESS(0);
2472   }
2473 #endif
2474 
2475   KA_TRACE(10, ("__kmpc_end_taskgroup(enter): T#%d loc=%p\n", gtid, loc));
2476   KMP_DEBUG_ASSERT(taskgroup != NULL);
2477   KMP_SET_THREAD_STATE_BLOCK(TASKGROUP);
2478 
2479   if (__kmp_tasking_mode != tskm_immediate_exec) {
2480     // mark task as waiting not on a barrier
2481     taskdata->td_taskwait_counter += 1;
2482     taskdata->td_taskwait_ident = loc;
2483     taskdata->td_taskwait_thread = gtid + 1;
2484 #if USE_ITT_BUILD
2485     // For ITT the taskgroup wait is similar to taskwait until we need to
2486     // distinguish them
2487     void *itt_sync_obj = __kmp_itt_taskwait_object(gtid);
2488     if (itt_sync_obj != NULL)
2489       __kmp_itt_taskwait_starting(gtid, itt_sync_obj);
2490 #endif /* USE_ITT_BUILD */
2491 
2492 #if OMPT_SUPPORT && OMPT_OPTIONAL
2493     if (UNLIKELY(ompt_enabled.ompt_callback_sync_region_wait)) {
2494       ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)(
2495           ompt_sync_region_taskgroup, ompt_scope_begin, &(my_parallel_data),
2496           &(my_task_data), codeptr);
2497     }
2498 #endif
2499 
2500     if (!taskdata->td_flags.team_serial ||
2501         (thread->th.th_task_team != NULL &&
2502          thread->th.th_task_team->tt.tt_found_proxy_tasks)) {
2503       kmp_flag_32 flag(RCAST(std::atomic<kmp_uint32> *, &(taskgroup->count)),
2504                        0U);
2505       while (KMP_ATOMIC_LD_ACQ(&taskgroup->count) != 0) {
2506         flag.execute_tasks(thread, gtid, FALSE,
2507                            &thread_finished USE_ITT_BUILD_ARG(itt_sync_obj),
2508                            __kmp_task_stealing_constraint);
2509       }
2510     }
2511     taskdata->td_taskwait_thread = -taskdata->td_taskwait_thread; // end waiting
2512 
2513 #if OMPT_SUPPORT && OMPT_OPTIONAL
2514     if (UNLIKELY(ompt_enabled.ompt_callback_sync_region_wait)) {
2515       ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)(
2516           ompt_sync_region_taskgroup, ompt_scope_end, &(my_parallel_data),
2517           &(my_task_data), codeptr);
2518     }
2519 #endif
2520 
2521 #if USE_ITT_BUILD
2522     if (itt_sync_obj != NULL)
2523       __kmp_itt_taskwait_finished(gtid, itt_sync_obj);
2524 #endif /* USE_ITT_BUILD */
2525   }
2526   KMP_DEBUG_ASSERT(taskgroup->count == 0);
2527 
2528   if (taskgroup->reduce_data != NULL) { // need to reduce?
2529     int cnt;
2530     void *reduce_data;
2531     kmp_team_t *t = thread->th.th_team;
2532     kmp_taskred_data_t *arr = (kmp_taskred_data_t *)taskgroup->reduce_data;
2533     // check if <priv> data of the first reduction variable shared for the team
2534     void *priv0 = arr[0].reduce_priv;
2535     if ((reduce_data = KMP_ATOMIC_LD_ACQ(&t->t.t_tg_reduce_data[0])) != NULL &&
2536         ((kmp_taskred_data_t *)reduce_data)[0].reduce_priv == priv0) {
2537       // finishing task reduction on parallel
2538       cnt = KMP_ATOMIC_INC(&t->t.t_tg_fini_counter[0]);
2539       if (cnt == thread->th.th_team_nproc - 1) {
2540         // we are the last thread passing __kmpc_reduction_modifier_fini()
2541         // finalize task reduction:
2542         __kmp_task_reduction_fini(thread, taskgroup);
2543         // cleanup fields in the team structure:
2544         // TODO: is relaxed store enough here (whole barrier should follow)?
2545         __kmp_thread_free(thread, reduce_data);
2546         KMP_ATOMIC_ST_REL(&t->t.t_tg_reduce_data[0], NULL);
2547         KMP_ATOMIC_ST_REL(&t->t.t_tg_fini_counter[0], 0);
2548       } else {
2549         // we are not the last thread passing __kmpc_reduction_modifier_fini(),
2550         // so do not finalize reduction, just clean own copy of the data
2551         __kmp_task_reduction_clean(thread, taskgroup);
2552       }
2553     } else if ((reduce_data = KMP_ATOMIC_LD_ACQ(&t->t.t_tg_reduce_data[1])) !=
2554                    NULL &&
2555                ((kmp_taskred_data_t *)reduce_data)[0].reduce_priv == priv0) {
2556       // finishing task reduction on worksharing
2557       cnt = KMP_ATOMIC_INC(&t->t.t_tg_fini_counter[1]);
2558       if (cnt == thread->th.th_team_nproc - 1) {
2559         // we are the last thread passing __kmpc_reduction_modifier_fini()
2560         __kmp_task_reduction_fini(thread, taskgroup);
2561         // cleanup fields in team structure:
2562         // TODO: is relaxed store enough here (whole barrier should follow)?
2563         __kmp_thread_free(thread, reduce_data);
2564         KMP_ATOMIC_ST_REL(&t->t.t_tg_reduce_data[1], NULL);
2565         KMP_ATOMIC_ST_REL(&t->t.t_tg_fini_counter[1], 0);
2566       } else {
2567         // we are not the last thread passing __kmpc_reduction_modifier_fini(),
2568         // so do not finalize reduction, just clean own copy of the data
2569         __kmp_task_reduction_clean(thread, taskgroup);
2570       }
2571     } else {
2572       // finishing task reduction on taskgroup
2573       __kmp_task_reduction_fini(thread, taskgroup);
2574     }
2575   }
2576   // Restore parent taskgroup for the current task
2577   taskdata->td_taskgroup = taskgroup->parent;
2578   __kmp_thread_free(thread, taskgroup);
2579 
2580   KA_TRACE(10, ("__kmpc_end_taskgroup(exit): T#%d task %p finished waiting\n",
2581                 gtid, taskdata));
2582   ANNOTATE_HAPPENS_AFTER(taskdata);
2583 
2584 #if OMPT_SUPPORT && OMPT_OPTIONAL
2585   if (UNLIKELY(ompt_enabled.ompt_callback_sync_region)) {
2586     ompt_callbacks.ompt_callback(ompt_callback_sync_region)(
2587         ompt_sync_region_taskgroup, ompt_scope_end, &(my_parallel_data),
2588         &(my_task_data), codeptr);
2589   }
2590 #endif
2591 }
2592 
2593 // __kmp_remove_my_task: remove a task from my own deque
2594 static kmp_task_t *__kmp_remove_my_task(kmp_info_t *thread, kmp_int32 gtid,
2595                                         kmp_task_team_t *task_team,
2596                                         kmp_int32 is_constrained) {
2597   kmp_task_t *task;
2598   kmp_taskdata_t *taskdata;
2599   kmp_thread_data_t *thread_data;
2600   kmp_uint32 tail;
2601 
2602   KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
2603   KMP_DEBUG_ASSERT(task_team->tt.tt_threads_data !=
2604                    NULL); // Caller should check this condition
2605 
2606   thread_data = &task_team->tt.tt_threads_data[__kmp_tid_from_gtid(gtid)];
2607 
2608   KA_TRACE(10, ("__kmp_remove_my_task(enter): T#%d ntasks=%d head=%u tail=%u\n",
2609                 gtid, thread_data->td.td_deque_ntasks,
2610                 thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
2611 
2612   if (TCR_4(thread_data->td.td_deque_ntasks) == 0) {
2613     KA_TRACE(10,
2614              ("__kmp_remove_my_task(exit #1): T#%d No tasks to remove: "
2615               "ntasks=%d head=%u tail=%u\n",
2616               gtid, thread_data->td.td_deque_ntasks,
2617               thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
2618     return NULL;
2619   }
2620 
2621   __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
2622 
2623   if (TCR_4(thread_data->td.td_deque_ntasks) == 0) {
2624     __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
2625     KA_TRACE(10,
2626              ("__kmp_remove_my_task(exit #2): T#%d No tasks to remove: "
2627               "ntasks=%d head=%u tail=%u\n",
2628               gtid, thread_data->td.td_deque_ntasks,
2629               thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
2630     return NULL;
2631   }
2632 
2633   tail = (thread_data->td.td_deque_tail - 1) &
2634          TASK_DEQUE_MASK(thread_data->td); // Wrap index.
2635   taskdata = thread_data->td.td_deque[tail];
2636 
2637   if (!__kmp_task_is_allowed(gtid, is_constrained, taskdata,
2638                              thread->th.th_current_task)) {
2639     // The TSC does not allow to steal victim task
2640     __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
2641     KA_TRACE(10,
2642              ("__kmp_remove_my_task(exit #3): T#%d TSC blocks tail task: "
2643               "ntasks=%d head=%u tail=%u\n",
2644               gtid, thread_data->td.td_deque_ntasks,
2645               thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
2646     return NULL;
2647   }
2648 
2649   thread_data->td.td_deque_tail = tail;
2650   TCW_4(thread_data->td.td_deque_ntasks, thread_data->td.td_deque_ntasks - 1);
2651 
2652   __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
2653 
2654   KA_TRACE(10, ("__kmp_remove_my_task(exit #4): T#%d task %p removed: "
2655                 "ntasks=%d head=%u tail=%u\n",
2656                 gtid, taskdata, thread_data->td.td_deque_ntasks,
2657                 thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
2658 
2659   task = KMP_TASKDATA_TO_TASK(taskdata);
2660   return task;
2661 }
2662 
2663 // __kmp_steal_task: remove a task from another thread's deque
2664 // Assume that calling thread has already checked existence of
2665 // task_team thread_data before calling this routine.
2666 static kmp_task_t *__kmp_steal_task(kmp_info_t *victim_thr, kmp_int32 gtid,
2667                                     kmp_task_team_t *task_team,
2668                                     std::atomic<kmp_int32> *unfinished_threads,
2669                                     int *thread_finished,
2670                                     kmp_int32 is_constrained) {
2671   kmp_task_t *task;
2672   kmp_taskdata_t *taskdata;
2673   kmp_taskdata_t *current;
2674   kmp_thread_data_t *victim_td, *threads_data;
2675   kmp_int32 target;
2676   kmp_int32 victim_tid;
2677 
2678   KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
2679 
2680   threads_data = task_team->tt.tt_threads_data;
2681   KMP_DEBUG_ASSERT(threads_data != NULL); // Caller should check this condition
2682 
2683   victim_tid = victim_thr->th.th_info.ds.ds_tid;
2684   victim_td = &threads_data[victim_tid];
2685 
2686   KA_TRACE(10, ("__kmp_steal_task(enter): T#%d try to steal from T#%d: "
2687                 "task_team=%p ntasks=%d head=%u tail=%u\n",
2688                 gtid, __kmp_gtid_from_thread(victim_thr), task_team,
2689                 victim_td->td.td_deque_ntasks, victim_td->td.td_deque_head,
2690                 victim_td->td.td_deque_tail));
2691 
2692   if (TCR_4(victim_td->td.td_deque_ntasks) == 0) {
2693     KA_TRACE(10, ("__kmp_steal_task(exit #1): T#%d could not steal from T#%d: "
2694                   "task_team=%p ntasks=%d head=%u tail=%u\n",
2695                   gtid, __kmp_gtid_from_thread(victim_thr), task_team,
2696                   victim_td->td.td_deque_ntasks, victim_td->td.td_deque_head,
2697                   victim_td->td.td_deque_tail));
2698     return NULL;
2699   }
2700 
2701   __kmp_acquire_bootstrap_lock(&victim_td->td.td_deque_lock);
2702 
2703   int ntasks = TCR_4(victim_td->td.td_deque_ntasks);
2704   // Check again after we acquire the lock
2705   if (ntasks == 0) {
2706     __kmp_release_bootstrap_lock(&victim_td->td.td_deque_lock);
2707     KA_TRACE(10, ("__kmp_steal_task(exit #2): T#%d could not steal from T#%d: "
2708                   "task_team=%p ntasks=%d head=%u tail=%u\n",
2709                   gtid, __kmp_gtid_from_thread(victim_thr), task_team, ntasks,
2710                   victim_td->td.td_deque_head, victim_td->td.td_deque_tail));
2711     return NULL;
2712   }
2713 
2714   KMP_DEBUG_ASSERT(victim_td->td.td_deque != NULL);
2715   current = __kmp_threads[gtid]->th.th_current_task;
2716   taskdata = victim_td->td.td_deque[victim_td->td.td_deque_head];
2717   if (__kmp_task_is_allowed(gtid, is_constrained, taskdata, current)) {
2718     // Bump head pointer and Wrap.
2719     victim_td->td.td_deque_head =
2720         (victim_td->td.td_deque_head + 1) & TASK_DEQUE_MASK(victim_td->td);
2721   } else {
2722     if (!task_team->tt.tt_untied_task_encountered) {
2723       // The TSC does not allow to steal victim task
2724       __kmp_release_bootstrap_lock(&victim_td->td.td_deque_lock);
2725       KA_TRACE(10, ("__kmp_steal_task(exit #3): T#%d could not steal from "
2726                     "T#%d: task_team=%p ntasks=%d head=%u tail=%u\n",
2727                     gtid, __kmp_gtid_from_thread(victim_thr), task_team, ntasks,
2728                     victim_td->td.td_deque_head, victim_td->td.td_deque_tail));
2729       return NULL;
2730     }
2731     int i;
2732     // walk through victim's deque trying to steal any task
2733     target = victim_td->td.td_deque_head;
2734     taskdata = NULL;
2735     for (i = 1; i < ntasks; ++i) {
2736       target = (target + 1) & TASK_DEQUE_MASK(victim_td->td);
2737       taskdata = victim_td->td.td_deque[target];
2738       if (__kmp_task_is_allowed(gtid, is_constrained, taskdata, current)) {
2739         break; // found victim task
2740       } else {
2741         taskdata = NULL;
2742       }
2743     }
2744     if (taskdata == NULL) {
2745       // No appropriate candidate to steal found
2746       __kmp_release_bootstrap_lock(&victim_td->td.td_deque_lock);
2747       KA_TRACE(10, ("__kmp_steal_task(exit #4): T#%d could not steal from "
2748                     "T#%d: task_team=%p ntasks=%d head=%u tail=%u\n",
2749                     gtid, __kmp_gtid_from_thread(victim_thr), task_team, ntasks,
2750                     victim_td->td.td_deque_head, victim_td->td.td_deque_tail));
2751       return NULL;
2752     }
2753     int prev = target;
2754     for (i = i + 1; i < ntasks; ++i) {
2755       // shift remaining tasks in the deque left by 1
2756       target = (target + 1) & TASK_DEQUE_MASK(victim_td->td);
2757       victim_td->td.td_deque[prev] = victim_td->td.td_deque[target];
2758       prev = target;
2759     }
2760     KMP_DEBUG_ASSERT(
2761         victim_td->td.td_deque_tail ==
2762         (kmp_uint32)((target + 1) & TASK_DEQUE_MASK(victim_td->td)));
2763     victim_td->td.td_deque_tail = target; // tail -= 1 (wrapped))
2764   }
2765   if (*thread_finished) {
2766     // We need to un-mark this victim as a finished victim.  This must be done
2767     // before releasing the lock, or else other threads (starting with the
2768     // master victim) might be prematurely released from the barrier!!!
2769     kmp_int32 count;
2770 
2771     count = KMP_ATOMIC_INC(unfinished_threads);
2772 
2773     KA_TRACE(
2774         20,
2775         ("__kmp_steal_task: T#%d inc unfinished_threads to %d: task_team=%p\n",
2776          gtid, count + 1, task_team));
2777 
2778     *thread_finished = FALSE;
2779   }
2780   TCW_4(victim_td->td.td_deque_ntasks, ntasks - 1);
2781 
2782   __kmp_release_bootstrap_lock(&victim_td->td.td_deque_lock);
2783 
2784   KMP_COUNT_BLOCK(TASK_stolen);
2785   KA_TRACE(10,
2786            ("__kmp_steal_task(exit #5): T#%d stole task %p from T#%d: "
2787             "task_team=%p ntasks=%d head=%u tail=%u\n",
2788             gtid, taskdata, __kmp_gtid_from_thread(victim_thr), task_team,
2789             ntasks, victim_td->td.td_deque_head, victim_td->td.td_deque_tail));
2790 
2791   task = KMP_TASKDATA_TO_TASK(taskdata);
2792   return task;
2793 }
2794 
2795 // __kmp_execute_tasks_template: Choose and execute tasks until either the
2796 // condition is statisfied (return true) or there are none left (return false).
2797 //
2798 // final_spin is TRUE if this is the spin at the release barrier.
2799 // thread_finished indicates whether the thread is finished executing all
2800 // the tasks it has on its deque, and is at the release barrier.
2801 // spinner is the location on which to spin.
2802 // spinner == NULL means only execute a single task and return.
2803 // checker is the value to check to terminate the spin.
2804 template <class C>
2805 static inline int __kmp_execute_tasks_template(
2806     kmp_info_t *thread, kmp_int32 gtid, C *flag, int final_spin,
2807     int *thread_finished USE_ITT_BUILD_ARG(void *itt_sync_obj),
2808     kmp_int32 is_constrained) {
2809   kmp_task_team_t *task_team = thread->th.th_task_team;
2810   kmp_thread_data_t *threads_data;
2811   kmp_task_t *task;
2812   kmp_info_t *other_thread;
2813   kmp_taskdata_t *current_task = thread->th.th_current_task;
2814   std::atomic<kmp_int32> *unfinished_threads;
2815   kmp_int32 nthreads, victim_tid = -2, use_own_tasks = 1, new_victim = 0,
2816                       tid = thread->th.th_info.ds.ds_tid;
2817 
2818   KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
2819   KMP_DEBUG_ASSERT(thread == __kmp_threads[gtid]);
2820 
2821   if (task_team == NULL || current_task == NULL)
2822     return FALSE;
2823 
2824   KA_TRACE(15, ("__kmp_execute_tasks_template(enter): T#%d final_spin=%d "
2825                 "*thread_finished=%d\n",
2826                 gtid, final_spin, *thread_finished));
2827 
2828   thread->th.th_reap_state = KMP_NOT_SAFE_TO_REAP;
2829   threads_data = (kmp_thread_data_t *)TCR_PTR(task_team->tt.tt_threads_data);
2830   KMP_DEBUG_ASSERT(threads_data != NULL);
2831 
2832   nthreads = task_team->tt.tt_nproc;
2833   unfinished_threads = &(task_team->tt.tt_unfinished_threads);
2834   KMP_DEBUG_ASSERT(nthreads > 1 || task_team->tt.tt_found_proxy_tasks);
2835   KMP_DEBUG_ASSERT(*unfinished_threads >= 0);
2836 
2837   while (1) { // Outer loop keeps trying to find tasks in case of single thread
2838     // getting tasks from target constructs
2839     while (1) { // Inner loop to find a task and execute it
2840       task = NULL;
2841       if (use_own_tasks) { // check on own queue first
2842         task = __kmp_remove_my_task(thread, gtid, task_team, is_constrained);
2843       }
2844       if ((task == NULL) && (nthreads > 1)) { // Steal a task
2845         int asleep = 1;
2846         use_own_tasks = 0;
2847         // Try to steal from the last place I stole from successfully.
2848         if (victim_tid == -2) { // haven't stolen anything yet
2849           victim_tid = threads_data[tid].td.td_deque_last_stolen;
2850           if (victim_tid !=
2851               -1) // if we have a last stolen from victim, get the thread
2852             other_thread = threads_data[victim_tid].td.td_thr;
2853         }
2854         if (victim_tid != -1) { // found last victim
2855           asleep = 0;
2856         } else if (!new_victim) { // no recent steals and we haven't already
2857           // used a new victim; select a random thread
2858           do { // Find a different thread to steal work from.
2859             // Pick a random thread. Initial plan was to cycle through all the
2860             // threads, and only return if we tried to steal from every thread,
2861             // and failed.  Arch says that's not such a great idea.
2862             victim_tid = __kmp_get_random(thread) % (nthreads - 1);
2863             if (victim_tid >= tid) {
2864               ++victim_tid; // Adjusts random distribution to exclude self
2865             }
2866             // Found a potential victim
2867             other_thread = threads_data[victim_tid].td.td_thr;
2868             // There is a slight chance that __kmp_enable_tasking() did not wake
2869             // up all threads waiting at the barrier.  If victim is sleeping,
2870             // then wake it up. Since we were going to pay the cache miss
2871             // penalty for referencing another thread's kmp_info_t struct
2872             // anyway,
2873             // the check shouldn't cost too much performance at this point. In
2874             // extra barrier mode, tasks do not sleep at the separate tasking
2875             // barrier, so this isn't a problem.
2876             asleep = 0;
2877             if ((__kmp_tasking_mode == tskm_task_teams) &&
2878                 (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME) &&
2879                 (TCR_PTR(CCAST(void *, other_thread->th.th_sleep_loc)) !=
2880                  NULL)) {
2881               asleep = 1;
2882               __kmp_null_resume_wrapper(__kmp_gtid_from_thread(other_thread),
2883                                         other_thread->th.th_sleep_loc);
2884               // A sleeping thread should not have any tasks on it's queue.
2885               // There is a slight possibility that it resumes, steals a task
2886               // from another thread, which spawns more tasks, all in the time
2887               // that it takes this thread to check => don't write an assertion
2888               // that the victim's queue is empty.  Try stealing from a
2889               // different thread.
2890             }
2891           } while (asleep);
2892         }
2893 
2894         if (!asleep) {
2895           // We have a victim to try to steal from
2896           task = __kmp_steal_task(other_thread, gtid, task_team,
2897                                   unfinished_threads, thread_finished,
2898                                   is_constrained);
2899         }
2900         if (task != NULL) { // set last stolen to victim
2901           if (threads_data[tid].td.td_deque_last_stolen != victim_tid) {
2902             threads_data[tid].td.td_deque_last_stolen = victim_tid;
2903             // The pre-refactored code did not try more than 1 successful new
2904             // vicitm, unless the last one generated more local tasks;
2905             // new_victim keeps track of this
2906             new_victim = 1;
2907           }
2908         } else { // No tasks found; unset last_stolen
2909           KMP_CHECK_UPDATE(threads_data[tid].td.td_deque_last_stolen, -1);
2910           victim_tid = -2; // no successful victim found
2911         }
2912       }
2913 
2914       if (task == NULL) // break out of tasking loop
2915         break;
2916 
2917 // Found a task; execute it
2918 #if USE_ITT_BUILD && USE_ITT_NOTIFY
2919       if (__itt_sync_create_ptr || KMP_ITT_DEBUG) {
2920         if (itt_sync_obj == NULL) { // we are at fork barrier where we could not
2921           // get the object reliably
2922           itt_sync_obj = __kmp_itt_barrier_object(gtid, bs_forkjoin_barrier);
2923         }
2924         __kmp_itt_task_starting(itt_sync_obj);
2925       }
2926 #endif /* USE_ITT_BUILD && USE_ITT_NOTIFY */
2927       __kmp_invoke_task(gtid, task, current_task);
2928 #if USE_ITT_BUILD
2929       if (itt_sync_obj != NULL)
2930         __kmp_itt_task_finished(itt_sync_obj);
2931 #endif /* USE_ITT_BUILD */
2932       // If this thread is only partway through the barrier and the condition is
2933       // met, then return now, so that the barrier gather/release pattern can
2934       // proceed. If this thread is in the last spin loop in the barrier,
2935       // waiting to be released, we know that the termination condition will not
2936       // be satisfied, so don't waste any cycles checking it.
2937       if (flag == NULL || (!final_spin && flag->done_check())) {
2938         KA_TRACE(
2939             15,
2940             ("__kmp_execute_tasks_template: T#%d spin condition satisfied\n",
2941              gtid));
2942         return TRUE;
2943       }
2944       if (thread->th.th_task_team == NULL) {
2945         break;
2946       }
2947       KMP_YIELD(__kmp_library == library_throughput); // Yield before next task
2948       // If execution of a stolen task results in more tasks being placed on our
2949       // run queue, reset use_own_tasks
2950       if (!use_own_tasks && TCR_4(threads_data[tid].td.td_deque_ntasks) != 0) {
2951         KA_TRACE(20, ("__kmp_execute_tasks_template: T#%d stolen task spawned "
2952                       "other tasks, restart\n",
2953                       gtid));
2954         use_own_tasks = 1;
2955         new_victim = 0;
2956       }
2957     }
2958 
2959     // The task source has been exhausted. If in final spin loop of barrier,
2960     // check if termination condition is satisfied. The work queue may be empty
2961     // but there might be proxy tasks still executing.
2962     if (final_spin &&
2963         KMP_ATOMIC_LD_ACQ(&current_task->td_incomplete_child_tasks) == 0) {
2964       // First, decrement the #unfinished threads, if that has not already been
2965       // done.  This decrement might be to the spin location, and result in the
2966       // termination condition being satisfied.
2967       if (!*thread_finished) {
2968         kmp_int32 count;
2969 
2970         count = KMP_ATOMIC_DEC(unfinished_threads) - 1;
2971         KA_TRACE(20, ("__kmp_execute_tasks_template: T#%d dec "
2972                       "unfinished_threads to %d task_team=%p\n",
2973                       gtid, count, task_team));
2974         *thread_finished = TRUE;
2975       }
2976 
2977       // It is now unsafe to reference thread->th.th_team !!!
2978       // Decrementing task_team->tt.tt_unfinished_threads can allow the master
2979       // thread to pass through the barrier, where it might reset each thread's
2980       // th.th_team field for the next parallel region. If we can steal more
2981       // work, we know that this has not happened yet.
2982       if (flag != NULL && flag->done_check()) {
2983         KA_TRACE(
2984             15,
2985             ("__kmp_execute_tasks_template: T#%d spin condition satisfied\n",
2986              gtid));
2987         return TRUE;
2988       }
2989     }
2990 
2991     // If this thread's task team is NULL, master has recognized that there are
2992     // no more tasks; bail out
2993     if (thread->th.th_task_team == NULL) {
2994       KA_TRACE(15,
2995                ("__kmp_execute_tasks_template: T#%d no more tasks\n", gtid));
2996       return FALSE;
2997     }
2998 
2999     // We could be getting tasks from target constructs; if this is the only
3000     // thread, keep trying to execute tasks from own queue
3001     if (nthreads == 1)
3002       use_own_tasks = 1;
3003     else {
3004       KA_TRACE(15,
3005                ("__kmp_execute_tasks_template: T#%d can't find work\n", gtid));
3006       return FALSE;
3007     }
3008   }
3009 }
3010 
3011 int __kmp_execute_tasks_32(
3012     kmp_info_t *thread, kmp_int32 gtid, kmp_flag_32 *flag, int final_spin,
3013     int *thread_finished USE_ITT_BUILD_ARG(void *itt_sync_obj),
3014     kmp_int32 is_constrained) {
3015   return __kmp_execute_tasks_template(
3016       thread, gtid, flag, final_spin,
3017       thread_finished USE_ITT_BUILD_ARG(itt_sync_obj), is_constrained);
3018 }
3019 
3020 int __kmp_execute_tasks_64(
3021     kmp_info_t *thread, kmp_int32 gtid, kmp_flag_64 *flag, int final_spin,
3022     int *thread_finished USE_ITT_BUILD_ARG(void *itt_sync_obj),
3023     kmp_int32 is_constrained) {
3024   return __kmp_execute_tasks_template(
3025       thread, gtid, flag, final_spin,
3026       thread_finished USE_ITT_BUILD_ARG(itt_sync_obj), is_constrained);
3027 }
3028 
3029 int __kmp_execute_tasks_oncore(
3030     kmp_info_t *thread, kmp_int32 gtid, kmp_flag_oncore *flag, int final_spin,
3031     int *thread_finished USE_ITT_BUILD_ARG(void *itt_sync_obj),
3032     kmp_int32 is_constrained) {
3033   return __kmp_execute_tasks_template(
3034       thread, gtid, flag, final_spin,
3035       thread_finished USE_ITT_BUILD_ARG(itt_sync_obj), is_constrained);
3036 }
3037 
3038 // __kmp_enable_tasking: Allocate task team and resume threads sleeping at the
3039 // next barrier so they can assist in executing enqueued tasks.
3040 // First thread in allocates the task team atomically.
3041 static void __kmp_enable_tasking(kmp_task_team_t *task_team,
3042                                  kmp_info_t *this_thr) {
3043   kmp_thread_data_t *threads_data;
3044   int nthreads, i, is_init_thread;
3045 
3046   KA_TRACE(10, ("__kmp_enable_tasking(enter): T#%d\n",
3047                 __kmp_gtid_from_thread(this_thr)));
3048 
3049   KMP_DEBUG_ASSERT(task_team != NULL);
3050   KMP_DEBUG_ASSERT(this_thr->th.th_team != NULL);
3051 
3052   nthreads = task_team->tt.tt_nproc;
3053   KMP_DEBUG_ASSERT(nthreads > 0);
3054   KMP_DEBUG_ASSERT(nthreads == this_thr->th.th_team->t.t_nproc);
3055 
3056   // Allocate or increase the size of threads_data if necessary
3057   is_init_thread = __kmp_realloc_task_threads_data(this_thr, task_team);
3058 
3059   if (!is_init_thread) {
3060     // Some other thread already set up the array.
3061     KA_TRACE(
3062         20,
3063         ("__kmp_enable_tasking(exit): T#%d: threads array already set up.\n",
3064          __kmp_gtid_from_thread(this_thr)));
3065     return;
3066   }
3067   threads_data = (kmp_thread_data_t *)TCR_PTR(task_team->tt.tt_threads_data);
3068   KMP_DEBUG_ASSERT(threads_data != NULL);
3069 
3070   if (__kmp_tasking_mode == tskm_task_teams &&
3071       (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME)) {
3072     // Release any threads sleeping at the barrier, so that they can steal
3073     // tasks and execute them.  In extra barrier mode, tasks do not sleep
3074     // at the separate tasking barrier, so this isn't a problem.
3075     for (i = 0; i < nthreads; i++) {
3076       volatile void *sleep_loc;
3077       kmp_info_t *thread = threads_data[i].td.td_thr;
3078 
3079       if (i == this_thr->th.th_info.ds.ds_tid) {
3080         continue;
3081       }
3082       // Since we haven't locked the thread's suspend mutex lock at this
3083       // point, there is a small window where a thread might be putting
3084       // itself to sleep, but hasn't set the th_sleep_loc field yet.
3085       // To work around this, __kmp_execute_tasks_template() periodically checks
3086       // see if other threads are sleeping (using the same random mechanism that
3087       // is used for task stealing) and awakens them if they are.
3088       if ((sleep_loc = TCR_PTR(CCAST(void *, thread->th.th_sleep_loc))) !=
3089           NULL) {
3090         KF_TRACE(50, ("__kmp_enable_tasking: T#%d waking up thread T#%d\n",
3091                       __kmp_gtid_from_thread(this_thr),
3092                       __kmp_gtid_from_thread(thread)));
3093         __kmp_null_resume_wrapper(__kmp_gtid_from_thread(thread), sleep_loc);
3094       } else {
3095         KF_TRACE(50, ("__kmp_enable_tasking: T#%d don't wake up thread T#%d\n",
3096                       __kmp_gtid_from_thread(this_thr),
3097                       __kmp_gtid_from_thread(thread)));
3098       }
3099     }
3100   }
3101 
3102   KA_TRACE(10, ("__kmp_enable_tasking(exit): T#%d\n",
3103                 __kmp_gtid_from_thread(this_thr)));
3104 }
3105 
3106 /* // TODO: Check the comment consistency
3107  * Utility routines for "task teams".  A task team (kmp_task_t) is kind of
3108  * like a shadow of the kmp_team_t data struct, with a different lifetime.
3109  * After a child * thread checks into a barrier and calls __kmp_release() from
3110  * the particular variant of __kmp_<barrier_kind>_barrier_gather(), it can no
3111  * longer assume that the kmp_team_t structure is intact (at any moment, the
3112  * master thread may exit the barrier code and free the team data structure,
3113  * and return the threads to the thread pool).
3114  *
3115  * This does not work with the tasking code, as the thread is still
3116  * expected to participate in the execution of any tasks that may have been
3117  * spawned my a member of the team, and the thread still needs access to all
3118  * to each thread in the team, so that it can steal work from it.
3119  *
3120  * Enter the existence of the kmp_task_team_t struct.  It employs a reference
3121  * counting mechanism, and is allocated by the master thread before calling
3122  * __kmp_<barrier_kind>_release, and then is release by the last thread to
3123  * exit __kmp_<barrier_kind>_release at the next barrier.  I.e. the lifetimes
3124  * of the kmp_task_team_t structs for consecutive barriers can overlap
3125  * (and will, unless the master thread is the last thread to exit the barrier
3126  * release phase, which is not typical). The existence of such a struct is
3127  * useful outside the context of tasking.
3128  *
3129  * We currently use the existence of the threads array as an indicator that
3130  * tasks were spawned since the last barrier.  If the structure is to be
3131  * useful outside the context of tasking, then this will have to change, but
3132  * not setting the field minimizes the performance impact of tasking on
3133  * barriers, when no explicit tasks were spawned (pushed, actually).
3134  */
3135 
3136 static kmp_task_team_t *__kmp_free_task_teams =
3137     NULL; // Free list for task_team data structures
3138 // Lock for task team data structures
3139 kmp_bootstrap_lock_t __kmp_task_team_lock =
3140     KMP_BOOTSTRAP_LOCK_INITIALIZER(__kmp_task_team_lock);
3141 
3142 // __kmp_alloc_task_deque:
3143 // Allocates a task deque for a particular thread, and initialize the necessary
3144 // data structures relating to the deque.  This only happens once per thread
3145 // per task team since task teams are recycled. No lock is needed during
3146 // allocation since each thread allocates its own deque.
3147 static void __kmp_alloc_task_deque(kmp_info_t *thread,
3148                                    kmp_thread_data_t *thread_data) {
3149   __kmp_init_bootstrap_lock(&thread_data->td.td_deque_lock);
3150   KMP_DEBUG_ASSERT(thread_data->td.td_deque == NULL);
3151 
3152   // Initialize last stolen task field to "none"
3153   thread_data->td.td_deque_last_stolen = -1;
3154 
3155   KMP_DEBUG_ASSERT(TCR_4(thread_data->td.td_deque_ntasks) == 0);
3156   KMP_DEBUG_ASSERT(thread_data->td.td_deque_head == 0);
3157   KMP_DEBUG_ASSERT(thread_data->td.td_deque_tail == 0);
3158 
3159   KE_TRACE(
3160       10,
3161       ("__kmp_alloc_task_deque: T#%d allocating deque[%d] for thread_data %p\n",
3162        __kmp_gtid_from_thread(thread), INITIAL_TASK_DEQUE_SIZE, thread_data));
3163   // Allocate space for task deque, and zero the deque
3164   // Cannot use __kmp_thread_calloc() because threads not around for
3165   // kmp_reap_task_team( ).
3166   thread_data->td.td_deque = (kmp_taskdata_t **)__kmp_allocate(
3167       INITIAL_TASK_DEQUE_SIZE * sizeof(kmp_taskdata_t *));
3168   thread_data->td.td_deque_size = INITIAL_TASK_DEQUE_SIZE;
3169 }
3170 
3171 // __kmp_free_task_deque:
3172 // Deallocates a task deque for a particular thread. Happens at library
3173 // deallocation so don't need to reset all thread data fields.
3174 static void __kmp_free_task_deque(kmp_thread_data_t *thread_data) {
3175   if (thread_data->td.td_deque != NULL) {
3176     __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
3177     TCW_4(thread_data->td.td_deque_ntasks, 0);
3178     __kmp_free(thread_data->td.td_deque);
3179     thread_data->td.td_deque = NULL;
3180     __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
3181   }
3182 
3183 #ifdef BUILD_TIED_TASK_STACK
3184   // GEH: Figure out what to do here for td_susp_tied_tasks
3185   if (thread_data->td.td_susp_tied_tasks.ts_entries != TASK_STACK_EMPTY) {
3186     __kmp_free_task_stack(__kmp_thread_from_gtid(gtid), thread_data);
3187   }
3188 #endif // BUILD_TIED_TASK_STACK
3189 }
3190 
3191 // __kmp_realloc_task_threads_data:
3192 // Allocates a threads_data array for a task team, either by allocating an
3193 // initial array or enlarging an existing array.  Only the first thread to get
3194 // the lock allocs or enlarges the array and re-initializes the array elements.
3195 // That thread returns "TRUE", the rest return "FALSE".
3196 // Assumes that the new array size is given by task_team -> tt.tt_nproc.
3197 // The current size is given by task_team -> tt.tt_max_threads.
3198 static int __kmp_realloc_task_threads_data(kmp_info_t *thread,
3199                                            kmp_task_team_t *task_team) {
3200   kmp_thread_data_t **threads_data_p;
3201   kmp_int32 nthreads, maxthreads;
3202   int is_init_thread = FALSE;
3203 
3204   if (TCR_4(task_team->tt.tt_found_tasks)) {
3205     // Already reallocated and initialized.
3206     return FALSE;
3207   }
3208 
3209   threads_data_p = &task_team->tt.tt_threads_data;
3210   nthreads = task_team->tt.tt_nproc;
3211   maxthreads = task_team->tt.tt_max_threads;
3212 
3213   // All threads must lock when they encounter the first task of the implicit
3214   // task region to make sure threads_data fields are (re)initialized before
3215   // used.
3216   __kmp_acquire_bootstrap_lock(&task_team->tt.tt_threads_lock);
3217 
3218   if (!TCR_4(task_team->tt.tt_found_tasks)) {
3219     // first thread to enable tasking
3220     kmp_team_t *team = thread->th.th_team;
3221     int i;
3222 
3223     is_init_thread = TRUE;
3224     if (maxthreads < nthreads) {
3225 
3226       if (*threads_data_p != NULL) {
3227         kmp_thread_data_t *old_data = *threads_data_p;
3228         kmp_thread_data_t *new_data = NULL;
3229 
3230         KE_TRACE(
3231             10,
3232             ("__kmp_realloc_task_threads_data: T#%d reallocating "
3233              "threads data for task_team %p, new_size = %d, old_size = %d\n",
3234              __kmp_gtid_from_thread(thread), task_team, nthreads, maxthreads));
3235         // Reallocate threads_data to have more elements than current array
3236         // Cannot use __kmp_thread_realloc() because threads not around for
3237         // kmp_reap_task_team( ).  Note all new array entries are initialized
3238         // to zero by __kmp_allocate().
3239         new_data = (kmp_thread_data_t *)__kmp_allocate(
3240             nthreads * sizeof(kmp_thread_data_t));
3241         // copy old data to new data
3242         KMP_MEMCPY_S((void *)new_data, nthreads * sizeof(kmp_thread_data_t),
3243                      (void *)old_data, maxthreads * sizeof(kmp_thread_data_t));
3244 
3245 #ifdef BUILD_TIED_TASK_STACK
3246         // GEH: Figure out if this is the right thing to do
3247         for (i = maxthreads; i < nthreads; i++) {
3248           kmp_thread_data_t *thread_data = &(*threads_data_p)[i];
3249           __kmp_init_task_stack(__kmp_gtid_from_thread(thread), thread_data);
3250         }
3251 #endif // BUILD_TIED_TASK_STACK
3252         // Install the new data and free the old data
3253         (*threads_data_p) = new_data;
3254         __kmp_free(old_data);
3255       } else {
3256         KE_TRACE(10, ("__kmp_realloc_task_threads_data: T#%d allocating "
3257                       "threads data for task_team %p, size = %d\n",
3258                       __kmp_gtid_from_thread(thread), task_team, nthreads));
3259         // Make the initial allocate for threads_data array, and zero entries
3260         // Cannot use __kmp_thread_calloc() because threads not around for
3261         // kmp_reap_task_team( ).
3262         ANNOTATE_IGNORE_WRITES_BEGIN();
3263         *threads_data_p = (kmp_thread_data_t *)__kmp_allocate(
3264             nthreads * sizeof(kmp_thread_data_t));
3265         ANNOTATE_IGNORE_WRITES_END();
3266 #ifdef BUILD_TIED_TASK_STACK
3267         // GEH: Figure out if this is the right thing to do
3268         for (i = 0; i < nthreads; i++) {
3269           kmp_thread_data_t *thread_data = &(*threads_data_p)[i];
3270           __kmp_init_task_stack(__kmp_gtid_from_thread(thread), thread_data);
3271         }
3272 #endif // BUILD_TIED_TASK_STACK
3273       }
3274       task_team->tt.tt_max_threads = nthreads;
3275     } else {
3276       // If array has (more than) enough elements, go ahead and use it
3277       KMP_DEBUG_ASSERT(*threads_data_p != NULL);
3278     }
3279 
3280     // initialize threads_data pointers back to thread_info structures
3281     for (i = 0; i < nthreads; i++) {
3282       kmp_thread_data_t *thread_data = &(*threads_data_p)[i];
3283       thread_data->td.td_thr = team->t.t_threads[i];
3284 
3285       if (thread_data->td.td_deque_last_stolen >= nthreads) {
3286         // The last stolen field survives across teams / barrier, and the number
3287         // of threads may have changed.  It's possible (likely?) that a new
3288         // parallel region will exhibit the same behavior as previous region.
3289         thread_data->td.td_deque_last_stolen = -1;
3290       }
3291     }
3292 
3293     KMP_MB();
3294     TCW_SYNC_4(task_team->tt.tt_found_tasks, TRUE);
3295   }
3296 
3297   __kmp_release_bootstrap_lock(&task_team->tt.tt_threads_lock);
3298   return is_init_thread;
3299 }
3300 
3301 // __kmp_free_task_threads_data:
3302 // Deallocates a threads_data array for a task team, including any attached
3303 // tasking deques.  Only occurs at library shutdown.
3304 static void __kmp_free_task_threads_data(kmp_task_team_t *task_team) {
3305   __kmp_acquire_bootstrap_lock(&task_team->tt.tt_threads_lock);
3306   if (task_team->tt.tt_threads_data != NULL) {
3307     int i;
3308     for (i = 0; i < task_team->tt.tt_max_threads; i++) {
3309       __kmp_free_task_deque(&task_team->tt.tt_threads_data[i]);
3310     }
3311     __kmp_free(task_team->tt.tt_threads_data);
3312     task_team->tt.tt_threads_data = NULL;
3313   }
3314   __kmp_release_bootstrap_lock(&task_team->tt.tt_threads_lock);
3315 }
3316 
3317 // __kmp_allocate_task_team:
3318 // Allocates a task team associated with a specific team, taking it from
3319 // the global task team free list if possible.  Also initializes data
3320 // structures.
3321 static kmp_task_team_t *__kmp_allocate_task_team(kmp_info_t *thread,
3322                                                  kmp_team_t *team) {
3323   kmp_task_team_t *task_team = NULL;
3324   int nthreads;
3325 
3326   KA_TRACE(20, ("__kmp_allocate_task_team: T#%d entering; team = %p\n",
3327                 (thread ? __kmp_gtid_from_thread(thread) : -1), team));
3328 
3329   if (TCR_PTR(__kmp_free_task_teams) != NULL) {
3330     // Take a task team from the task team pool
3331     __kmp_acquire_bootstrap_lock(&__kmp_task_team_lock);
3332     if (__kmp_free_task_teams != NULL) {
3333       task_team = __kmp_free_task_teams;
3334       TCW_PTR(__kmp_free_task_teams, task_team->tt.tt_next);
3335       task_team->tt.tt_next = NULL;
3336     }
3337     __kmp_release_bootstrap_lock(&__kmp_task_team_lock);
3338   }
3339 
3340   if (task_team == NULL) {
3341     KE_TRACE(10, ("__kmp_allocate_task_team: T#%d allocating "
3342                   "task team for team %p\n",
3343                   __kmp_gtid_from_thread(thread), team));
3344     // Allocate a new task team if one is not available.
3345     // Cannot use __kmp_thread_malloc() because threads not around for
3346     // kmp_reap_task_team( ).
3347     task_team = (kmp_task_team_t *)__kmp_allocate(sizeof(kmp_task_team_t));
3348     __kmp_init_bootstrap_lock(&task_team->tt.tt_threads_lock);
3349     // AC: __kmp_allocate zeroes returned memory
3350     // task_team -> tt.tt_threads_data = NULL;
3351     // task_team -> tt.tt_max_threads = 0;
3352     // task_team -> tt.tt_next = NULL;
3353   }
3354 
3355   TCW_4(task_team->tt.tt_found_tasks, FALSE);
3356   TCW_4(task_team->tt.tt_found_proxy_tasks, FALSE);
3357   task_team->tt.tt_nproc = nthreads = team->t.t_nproc;
3358 
3359   KMP_ATOMIC_ST_REL(&task_team->tt.tt_unfinished_threads, nthreads);
3360   TCW_4(task_team->tt.tt_active, TRUE);
3361 
3362   KA_TRACE(20, ("__kmp_allocate_task_team: T#%d exiting; task_team = %p "
3363                 "unfinished_threads init'd to %d\n",
3364                 (thread ? __kmp_gtid_from_thread(thread) : -1), task_team,
3365                 KMP_ATOMIC_LD_RLX(&task_team->tt.tt_unfinished_threads)));
3366   return task_team;
3367 }
3368 
3369 // __kmp_free_task_team:
3370 // Frees the task team associated with a specific thread, and adds it
3371 // to the global task team free list.
3372 void __kmp_free_task_team(kmp_info_t *thread, kmp_task_team_t *task_team) {
3373   KA_TRACE(20, ("__kmp_free_task_team: T#%d task_team = %p\n",
3374                 thread ? __kmp_gtid_from_thread(thread) : -1, task_team));
3375 
3376   // Put task team back on free list
3377   __kmp_acquire_bootstrap_lock(&__kmp_task_team_lock);
3378 
3379   KMP_DEBUG_ASSERT(task_team->tt.tt_next == NULL);
3380   task_team->tt.tt_next = __kmp_free_task_teams;
3381   TCW_PTR(__kmp_free_task_teams, task_team);
3382 
3383   __kmp_release_bootstrap_lock(&__kmp_task_team_lock);
3384 }
3385 
3386 // __kmp_reap_task_teams:
3387 // Free all the task teams on the task team free list.
3388 // Should only be done during library shutdown.
3389 // Cannot do anything that needs a thread structure or gtid since they are
3390 // already gone.
3391 void __kmp_reap_task_teams(void) {
3392   kmp_task_team_t *task_team;
3393 
3394   if (TCR_PTR(__kmp_free_task_teams) != NULL) {
3395     // Free all task_teams on the free list
3396     __kmp_acquire_bootstrap_lock(&__kmp_task_team_lock);
3397     while ((task_team = __kmp_free_task_teams) != NULL) {
3398       __kmp_free_task_teams = task_team->tt.tt_next;
3399       task_team->tt.tt_next = NULL;
3400 
3401       // Free threads_data if necessary
3402       if (task_team->tt.tt_threads_data != NULL) {
3403         __kmp_free_task_threads_data(task_team);
3404       }
3405       __kmp_free(task_team);
3406     }
3407     __kmp_release_bootstrap_lock(&__kmp_task_team_lock);
3408   }
3409 }
3410 
3411 // __kmp_wait_to_unref_task_teams:
3412 // Some threads could still be in the fork barrier release code, possibly
3413 // trying to steal tasks.  Wait for each thread to unreference its task team.
3414 void __kmp_wait_to_unref_task_teams(void) {
3415   kmp_info_t *thread;
3416   kmp_uint32 spins;
3417   int done;
3418 
3419   KMP_INIT_YIELD(spins);
3420 
3421   for (;;) {
3422     done = TRUE;
3423 
3424     // TODO: GEH - this may be is wrong because some sync would be necessary
3425     // in case threads are added to the pool during the traversal. Need to
3426     // verify that lock for thread pool is held when calling this routine.
3427     for (thread = CCAST(kmp_info_t *, __kmp_thread_pool); thread != NULL;
3428          thread = thread->th.th_next_pool) {
3429 #if KMP_OS_WINDOWS
3430       DWORD exit_val;
3431 #endif
3432       if (TCR_PTR(thread->th.th_task_team) == NULL) {
3433         KA_TRACE(10, ("__kmp_wait_to_unref_task_team: T#%d task_team == NULL\n",
3434                       __kmp_gtid_from_thread(thread)));
3435         continue;
3436       }
3437 #if KMP_OS_WINDOWS
3438       // TODO: GEH - add this check for Linux* OS / OS X* as well?
3439       if (!__kmp_is_thread_alive(thread, &exit_val)) {
3440         thread->th.th_task_team = NULL;
3441         continue;
3442       }
3443 #endif
3444 
3445       done = FALSE; // Because th_task_team pointer is not NULL for this thread
3446 
3447       KA_TRACE(10, ("__kmp_wait_to_unref_task_team: Waiting for T#%d to "
3448                     "unreference task_team\n",
3449                     __kmp_gtid_from_thread(thread)));
3450 
3451       if (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME) {
3452         volatile void *sleep_loc;
3453         // If the thread is sleeping, awaken it.
3454         if ((sleep_loc = TCR_PTR(CCAST(void *, thread->th.th_sleep_loc))) !=
3455             NULL) {
3456           KA_TRACE(
3457               10,
3458               ("__kmp_wait_to_unref_task_team: T#%d waking up thread T#%d\n",
3459                __kmp_gtid_from_thread(thread), __kmp_gtid_from_thread(thread)));
3460           __kmp_null_resume_wrapper(__kmp_gtid_from_thread(thread), sleep_loc);
3461         }
3462       }
3463     }
3464     if (done) {
3465       break;
3466     }
3467 
3468     // If oversubscribed or have waited a bit, yield.
3469     KMP_YIELD_OVERSUB_ELSE_SPIN(spins);
3470   }
3471 }
3472 
3473 // __kmp_task_team_setup:  Create a task_team for the current team, but use
3474 // an already created, unused one if it already exists.
3475 void __kmp_task_team_setup(kmp_info_t *this_thr, kmp_team_t *team, int always) {
3476   KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
3477 
3478   // If this task_team hasn't been created yet, allocate it. It will be used in
3479   // the region after the next.
3480   // If it exists, it is the current task team and shouldn't be touched yet as
3481   // it may still be in use.
3482   if (team->t.t_task_team[this_thr->th.th_task_state] == NULL &&
3483       (always || team->t.t_nproc > 1)) {
3484     team->t.t_task_team[this_thr->th.th_task_state] =
3485         __kmp_allocate_task_team(this_thr, team);
3486     KA_TRACE(20, ("__kmp_task_team_setup: Master T#%d created new task_team %p "
3487                   "for team %d at parity=%d\n",
3488                   __kmp_gtid_from_thread(this_thr),
3489                   team->t.t_task_team[this_thr->th.th_task_state],
3490                   ((team != NULL) ? team->t.t_id : -1),
3491                   this_thr->th.th_task_state));
3492   }
3493 
3494   // After threads exit the release, they will call sync, and then point to this
3495   // other task_team; make sure it is allocated and properly initialized. As
3496   // threads spin in the barrier release phase, they will continue to use the
3497   // previous task_team struct(above), until they receive the signal to stop
3498   // checking for tasks (they can't safely reference the kmp_team_t struct,
3499   // which could be reallocated by the master thread). No task teams are formed
3500   // for serialized teams.
3501   if (team->t.t_nproc > 1) {
3502     int other_team = 1 - this_thr->th.th_task_state;
3503     if (team->t.t_task_team[other_team] == NULL) { // setup other team as well
3504       team->t.t_task_team[other_team] =
3505           __kmp_allocate_task_team(this_thr, team);
3506       KA_TRACE(20, ("__kmp_task_team_setup: Master T#%d created second new "
3507                     "task_team %p for team %d at parity=%d\n",
3508                     __kmp_gtid_from_thread(this_thr),
3509                     team->t.t_task_team[other_team],
3510                     ((team != NULL) ? team->t.t_id : -1), other_team));
3511     } else { // Leave the old task team struct in place for the upcoming region;
3512       // adjust as needed
3513       kmp_task_team_t *task_team = team->t.t_task_team[other_team];
3514       if (!task_team->tt.tt_active ||
3515           team->t.t_nproc != task_team->tt.tt_nproc) {
3516         TCW_4(task_team->tt.tt_nproc, team->t.t_nproc);
3517         TCW_4(task_team->tt.tt_found_tasks, FALSE);
3518         TCW_4(task_team->tt.tt_found_proxy_tasks, FALSE);
3519         KMP_ATOMIC_ST_REL(&task_team->tt.tt_unfinished_threads,
3520                           team->t.t_nproc);
3521         TCW_4(task_team->tt.tt_active, TRUE);
3522       }
3523       // if team size has changed, the first thread to enable tasking will
3524       // realloc threads_data if necessary
3525       KA_TRACE(20, ("__kmp_task_team_setup: Master T#%d reset next task_team "
3526                     "%p for team %d at parity=%d\n",
3527                     __kmp_gtid_from_thread(this_thr),
3528                     team->t.t_task_team[other_team],
3529                     ((team != NULL) ? team->t.t_id : -1), other_team));
3530     }
3531   }
3532 }
3533 
3534 // __kmp_task_team_sync: Propagation of task team data from team to threads
3535 // which happens just after the release phase of a team barrier.  This may be
3536 // called by any thread, but only for teams with # threads > 1.
3537 void __kmp_task_team_sync(kmp_info_t *this_thr, kmp_team_t *team) {
3538   KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
3539 
3540   // Toggle the th_task_state field, to switch which task_team this thread
3541   // refers to
3542   this_thr->th.th_task_state = 1 - this_thr->th.th_task_state;
3543   // It is now safe to propagate the task team pointer from the team struct to
3544   // the current thread.
3545   TCW_PTR(this_thr->th.th_task_team,
3546           team->t.t_task_team[this_thr->th.th_task_state]);
3547   KA_TRACE(20,
3548            ("__kmp_task_team_sync: Thread T#%d task team switched to task_team "
3549             "%p from Team #%d (parity=%d)\n",
3550             __kmp_gtid_from_thread(this_thr), this_thr->th.th_task_team,
3551             ((team != NULL) ? team->t.t_id : -1), this_thr->th.th_task_state));
3552 }
3553 
3554 // __kmp_task_team_wait: Master thread waits for outstanding tasks after the
3555 // barrier gather phase. Only called by master thread if #threads in team > 1 or
3556 // if proxy tasks were created.
3557 //
3558 // wait is a flag that defaults to 1 (see kmp.h), but waiting can be turned off
3559 // by passing in 0 optionally as the last argument. When wait is zero, master
3560 // thread does not wait for unfinished_threads to reach 0.
3561 void __kmp_task_team_wait(
3562     kmp_info_t *this_thr,
3563     kmp_team_t *team USE_ITT_BUILD_ARG(void *itt_sync_obj), int wait) {
3564   kmp_task_team_t *task_team = team->t.t_task_team[this_thr->th.th_task_state];
3565 
3566   KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
3567   KMP_DEBUG_ASSERT(task_team == this_thr->th.th_task_team);
3568 
3569   if ((task_team != NULL) && KMP_TASKING_ENABLED(task_team)) {
3570     if (wait) {
3571       KA_TRACE(20, ("__kmp_task_team_wait: Master T#%d waiting for all tasks "
3572                     "(for unfinished_threads to reach 0) on task_team = %p\n",
3573                     __kmp_gtid_from_thread(this_thr), task_team));
3574       // Worker threads may have dropped through to release phase, but could
3575       // still be executing tasks. Wait here for tasks to complete. To avoid
3576       // memory contention, only master thread checks termination condition.
3577       kmp_flag_32 flag(RCAST(std::atomic<kmp_uint32> *,
3578                              &task_team->tt.tt_unfinished_threads),
3579                        0U);
3580       flag.wait(this_thr, TRUE USE_ITT_BUILD_ARG(itt_sync_obj));
3581     }
3582     // Deactivate the old task team, so that the worker threads will stop
3583     // referencing it while spinning.
3584     KA_TRACE(
3585         20,
3586         ("__kmp_task_team_wait: Master T#%d deactivating task_team %p: "
3587          "setting active to false, setting local and team's pointer to NULL\n",
3588          __kmp_gtid_from_thread(this_thr), task_team));
3589     KMP_DEBUG_ASSERT(task_team->tt.tt_nproc > 1 ||
3590                      task_team->tt.tt_found_proxy_tasks == TRUE);
3591     TCW_SYNC_4(task_team->tt.tt_found_proxy_tasks, FALSE);
3592     KMP_CHECK_UPDATE(task_team->tt.tt_untied_task_encountered, 0);
3593     TCW_SYNC_4(task_team->tt.tt_active, FALSE);
3594     KMP_MB();
3595 
3596     TCW_PTR(this_thr->th.th_task_team, NULL);
3597   }
3598 }
3599 
3600 // __kmp_tasking_barrier:
3601 // This routine may only called when __kmp_tasking_mode == tskm_extra_barrier.
3602 // Internal function to execute all tasks prior to a regular barrier or a join
3603 // barrier. It is a full barrier itself, which unfortunately turns regular
3604 // barriers into double barriers and join barriers into 1 1/2 barriers.
3605 void __kmp_tasking_barrier(kmp_team_t *team, kmp_info_t *thread, int gtid) {
3606   std::atomic<kmp_uint32> *spin = RCAST(
3607       std::atomic<kmp_uint32> *,
3608       &team->t.t_task_team[thread->th.th_task_state]->tt.tt_unfinished_threads);
3609   int flag = FALSE;
3610   KMP_DEBUG_ASSERT(__kmp_tasking_mode == tskm_extra_barrier);
3611 
3612 #if USE_ITT_BUILD
3613   KMP_FSYNC_SPIN_INIT(spin, NULL);
3614 #endif /* USE_ITT_BUILD */
3615   kmp_flag_32 spin_flag(spin, 0U);
3616   while (!spin_flag.execute_tasks(thread, gtid, TRUE,
3617                                   &flag USE_ITT_BUILD_ARG(NULL), 0)) {
3618 #if USE_ITT_BUILD
3619     // TODO: What about itt_sync_obj??
3620     KMP_FSYNC_SPIN_PREPARE(RCAST(void *, spin));
3621 #endif /* USE_ITT_BUILD */
3622 
3623     if (TCR_4(__kmp_global.g.g_done)) {
3624       if (__kmp_global.g.g_abort)
3625         __kmp_abort_thread();
3626       break;
3627     }
3628     KMP_YIELD(TRUE);
3629   }
3630 #if USE_ITT_BUILD
3631   KMP_FSYNC_SPIN_ACQUIRED(RCAST(void *, spin));
3632 #endif /* USE_ITT_BUILD */
3633 }
3634 
3635 // __kmp_give_task puts a task into a given thread queue if:
3636 //  - the queue for that thread was created
3637 //  - there's space in that queue
3638 // Because of this, __kmp_push_task needs to check if there's space after
3639 // getting the lock
3640 static bool __kmp_give_task(kmp_info_t *thread, kmp_int32 tid, kmp_task_t *task,
3641                             kmp_int32 pass) {
3642   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
3643   kmp_task_team_t *task_team = taskdata->td_task_team;
3644 
3645   KA_TRACE(20, ("__kmp_give_task: trying to give task %p to thread %d.\n",
3646                 taskdata, tid));
3647 
3648   // If task_team is NULL something went really bad...
3649   KMP_DEBUG_ASSERT(task_team != NULL);
3650 
3651   bool result = false;
3652   kmp_thread_data_t *thread_data = &task_team->tt.tt_threads_data[tid];
3653 
3654   if (thread_data->td.td_deque == NULL) {
3655     // There's no queue in this thread, go find another one
3656     // We're guaranteed that at least one thread has a queue
3657     KA_TRACE(30,
3658              ("__kmp_give_task: thread %d has no queue while giving task %p.\n",
3659               tid, taskdata));
3660     return result;
3661   }
3662 
3663   if (TCR_4(thread_data->td.td_deque_ntasks) >=
3664       TASK_DEQUE_SIZE(thread_data->td)) {
3665     KA_TRACE(
3666         30,
3667         ("__kmp_give_task: queue is full while giving task %p to thread %d.\n",
3668          taskdata, tid));
3669 
3670     // if this deque is bigger than the pass ratio give a chance to another
3671     // thread
3672     if (TASK_DEQUE_SIZE(thread_data->td) / INITIAL_TASK_DEQUE_SIZE >= pass)
3673       return result;
3674 
3675     __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
3676     if (TCR_4(thread_data->td.td_deque_ntasks) >=
3677         TASK_DEQUE_SIZE(thread_data->td)) {
3678       // expand deque to push the task which is not allowed to execute
3679       __kmp_realloc_task_deque(thread, thread_data);
3680     }
3681 
3682   } else {
3683 
3684     __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
3685 
3686     if (TCR_4(thread_data->td.td_deque_ntasks) >=
3687         TASK_DEQUE_SIZE(thread_data->td)) {
3688       KA_TRACE(30, ("__kmp_give_task: queue is full while giving task %p to "
3689                     "thread %d.\n",
3690                     taskdata, tid));
3691 
3692       // if this deque is bigger than the pass ratio give a chance to another
3693       // thread
3694       if (TASK_DEQUE_SIZE(thread_data->td) / INITIAL_TASK_DEQUE_SIZE >= pass)
3695         goto release_and_exit;
3696 
3697       __kmp_realloc_task_deque(thread, thread_data);
3698     }
3699   }
3700 
3701   // lock is held here, and there is space in the deque
3702 
3703   thread_data->td.td_deque[thread_data->td.td_deque_tail] = taskdata;
3704   // Wrap index.
3705   thread_data->td.td_deque_tail =
3706       (thread_data->td.td_deque_tail + 1) & TASK_DEQUE_MASK(thread_data->td);
3707   TCW_4(thread_data->td.td_deque_ntasks,
3708         TCR_4(thread_data->td.td_deque_ntasks) + 1);
3709 
3710   result = true;
3711   KA_TRACE(30, ("__kmp_give_task: successfully gave task %p to thread %d.\n",
3712                 taskdata, tid));
3713 
3714 release_and_exit:
3715   __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
3716 
3717   return result;
3718 }
3719 
3720 /* The finish of the proxy tasks is divided in two pieces:
3721     - the top half is the one that can be done from a thread outside the team
3722     - the bottom half must be run from a thread within the team
3723 
3724    In order to run the bottom half the task gets queued back into one of the
3725    threads of the team. Once the td_incomplete_child_task counter of the parent
3726    is decremented the threads can leave the barriers. So, the bottom half needs
3727    to be queued before the counter is decremented. The top half is therefore
3728    divided in two parts:
3729     - things that can be run before queuing the bottom half
3730     - things that must be run after queuing the bottom half
3731 
3732    This creates a second race as the bottom half can free the task before the
3733    second top half is executed. To avoid this we use the
3734    td_incomplete_child_task of the proxy task to synchronize the top and bottom
3735    half. */
3736 static void __kmp_first_top_half_finish_proxy(kmp_taskdata_t *taskdata) {
3737   KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
3738   KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY);
3739   KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 0);
3740   KMP_DEBUG_ASSERT(taskdata->td_flags.freed == 0);
3741 
3742   taskdata->td_flags.complete = 1; // mark the task as completed
3743 
3744   if (taskdata->td_taskgroup)
3745     KMP_ATOMIC_DEC(&taskdata->td_taskgroup->count);
3746 
3747   // Create an imaginary children for this task so the bottom half cannot
3748   // release the task before we have completed the second top half
3749   KMP_ATOMIC_INC(&taskdata->td_incomplete_child_tasks);
3750 }
3751 
3752 static void __kmp_second_top_half_finish_proxy(kmp_taskdata_t *taskdata) {
3753   kmp_int32 children = 0;
3754 
3755   // Predecrement simulated by "- 1" calculation
3756   children =
3757       KMP_ATOMIC_DEC(&taskdata->td_parent->td_incomplete_child_tasks) - 1;
3758   KMP_DEBUG_ASSERT(children >= 0);
3759 
3760   // Remove the imaginary children
3761   KMP_ATOMIC_DEC(&taskdata->td_incomplete_child_tasks);
3762 }
3763 
3764 static void __kmp_bottom_half_finish_proxy(kmp_int32 gtid, kmp_task_t *ptask) {
3765   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(ptask);
3766   kmp_info_t *thread = __kmp_threads[gtid];
3767 
3768   KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY);
3769   KMP_DEBUG_ASSERT(taskdata->td_flags.complete ==
3770                    1); // top half must run before bottom half
3771 
3772   // We need to wait to make sure the top half is finished
3773   // Spinning here should be ok as this should happen quickly
3774   while (KMP_ATOMIC_LD_ACQ(&taskdata->td_incomplete_child_tasks) > 0)
3775     ;
3776 
3777   __kmp_release_deps(gtid, taskdata);
3778   __kmp_free_task_and_ancestors(gtid, taskdata, thread);
3779 }
3780 
3781 /*!
3782 @ingroup TASKING
3783 @param gtid Global Thread ID of encountering thread
3784 @param ptask Task which execution is completed
3785 
3786 Execute the completion of a proxy task from a thread of that is part of the
3787 team. Run first and bottom halves directly.
3788 */
3789 void __kmpc_proxy_task_completed(kmp_int32 gtid, kmp_task_t *ptask) {
3790   KMP_DEBUG_ASSERT(ptask != NULL);
3791   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(ptask);
3792   KA_TRACE(
3793       10, ("__kmp_proxy_task_completed(enter): T#%d proxy task %p completing\n",
3794            gtid, taskdata));
3795 
3796   KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY);
3797 
3798   __kmp_first_top_half_finish_proxy(taskdata);
3799   __kmp_second_top_half_finish_proxy(taskdata);
3800   __kmp_bottom_half_finish_proxy(gtid, ptask);
3801 
3802   KA_TRACE(10,
3803            ("__kmp_proxy_task_completed(exit): T#%d proxy task %p completing\n",
3804             gtid, taskdata));
3805 }
3806 
3807 /*!
3808 @ingroup TASKING
3809 @param ptask Task which execution is completed
3810 
3811 Execute the completion of a proxy task from a thread that could not belong to
3812 the team.
3813 */
3814 void __kmpc_proxy_task_completed_ooo(kmp_task_t *ptask) {
3815   KMP_DEBUG_ASSERT(ptask != NULL);
3816   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(ptask);
3817 
3818   KA_TRACE(
3819       10,
3820       ("__kmp_proxy_task_completed_ooo(enter): proxy task completing ooo %p\n",
3821        taskdata));
3822 
3823   KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY);
3824 
3825   __kmp_first_top_half_finish_proxy(taskdata);
3826 
3827   // Enqueue task to complete bottom half completion from a thread within the
3828   // corresponding team
3829   kmp_team_t *team = taskdata->td_team;
3830   kmp_int32 nthreads = team->t.t_nproc;
3831   kmp_info_t *thread;
3832 
3833   // This should be similar to start_k = __kmp_get_random( thread ) % nthreads
3834   // but we cannot use __kmp_get_random here
3835   kmp_int32 start_k = 0;
3836   kmp_int32 pass = 1;
3837   kmp_int32 k = start_k;
3838 
3839   do {
3840     // For now we're just linearly trying to find a thread
3841     thread = team->t.t_threads[k];
3842     k = (k + 1) % nthreads;
3843 
3844     // we did a full pass through all the threads
3845     if (k == start_k)
3846       pass = pass << 1;
3847 
3848   } while (!__kmp_give_task(thread, k, ptask, pass));
3849 
3850   __kmp_second_top_half_finish_proxy(taskdata);
3851 
3852   KA_TRACE(
3853       10,
3854       ("__kmp_proxy_task_completed_ooo(exit): proxy task completing ooo %p\n",
3855        taskdata));
3856 }
3857 
3858 kmp_event_t *__kmpc_task_allow_completion_event(ident_t *loc_ref, int gtid,
3859                                                 kmp_task_t *task) {
3860   kmp_taskdata_t *td = KMP_TASK_TO_TASKDATA(task);
3861   if (td->td_allow_completion_event.type == KMP_EVENT_UNINITIALIZED) {
3862     td->td_allow_completion_event.type = KMP_EVENT_ALLOW_COMPLETION;
3863     td->td_allow_completion_event.ed.task = task;
3864     __kmp_init_tas_lock(&td->td_allow_completion_event.lock);
3865   }
3866   return &td->td_allow_completion_event;
3867 }
3868 
3869 void __kmp_fulfill_event(kmp_event_t *event) {
3870   if (event->type == KMP_EVENT_ALLOW_COMPLETION) {
3871     kmp_task_t *ptask = event->ed.task;
3872     kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(ptask);
3873     bool detached = false;
3874     int gtid = __kmp_get_gtid();
3875 
3876     // The associated task might have completed or could be completing at this
3877     // point.
3878     // We need to take the lock to avoid races
3879     __kmp_acquire_tas_lock(&event->lock, gtid);
3880     if (taskdata->td_flags.proxy == TASK_PROXY) {
3881       detached = true;
3882     } else {
3883 #if OMPT_SUPPORT
3884       // The OMPT event must occur under mutual exclusion,
3885       // otherwise the tool might access ptask after free
3886       if (UNLIKELY(ompt_enabled.enabled))
3887         __ompt_task_finish(ptask, NULL, ompt_task_early_fulfill);
3888 #endif
3889     }
3890     event->type = KMP_EVENT_UNINITIALIZED;
3891     __kmp_release_tas_lock(&event->lock, gtid);
3892 
3893     if (detached) {
3894 #if OMPT_SUPPORT
3895       // We free ptask afterwards and know the task is finished,
3896       // so locking is not necessary
3897       if (UNLIKELY(ompt_enabled.enabled))
3898         __ompt_task_finish(ptask, NULL, ompt_task_late_fulfill);
3899 #endif
3900       // If the task detached complete the proxy task
3901       if (gtid >= 0) {
3902         kmp_team_t *team = taskdata->td_team;
3903         kmp_info_t *thread = __kmp_get_thread();
3904         if (thread->th.th_team == team) {
3905           __kmpc_proxy_task_completed(gtid, ptask);
3906           return;
3907         }
3908       }
3909 
3910       // fallback
3911       __kmpc_proxy_task_completed_ooo(ptask);
3912     }
3913   }
3914 }
3915 
3916 // __kmp_task_dup_alloc: Allocate the taskdata and make a copy of source task
3917 // for taskloop
3918 //
3919 // thread:   allocating thread
3920 // task_src: pointer to source task to be duplicated
3921 // returns:  a pointer to the allocated kmp_task_t structure (task).
3922 kmp_task_t *__kmp_task_dup_alloc(kmp_info_t *thread, kmp_task_t *task_src) {
3923   kmp_task_t *task;
3924   kmp_taskdata_t *taskdata;
3925   kmp_taskdata_t *taskdata_src = KMP_TASK_TO_TASKDATA(task_src);
3926   kmp_taskdata_t *parent_task = taskdata_src->td_parent; // same parent task
3927   size_t shareds_offset;
3928   size_t task_size;
3929 
3930   KA_TRACE(10, ("__kmp_task_dup_alloc(enter): Th %p, source task %p\n", thread,
3931                 task_src));
3932   KMP_DEBUG_ASSERT(taskdata_src->td_flags.proxy ==
3933                    TASK_FULL); // it should not be proxy task
3934   KMP_DEBUG_ASSERT(taskdata_src->td_flags.tasktype == TASK_EXPLICIT);
3935   task_size = taskdata_src->td_size_alloc;
3936 
3937   // Allocate a kmp_taskdata_t block and a kmp_task_t block.
3938   KA_TRACE(30, ("__kmp_task_dup_alloc: Th %p, malloc size %ld\n", thread,
3939                 task_size));
3940 #if USE_FAST_MEMORY
3941   taskdata = (kmp_taskdata_t *)__kmp_fast_allocate(thread, task_size);
3942 #else
3943   taskdata = (kmp_taskdata_t *)__kmp_thread_malloc(thread, task_size);
3944 #endif /* USE_FAST_MEMORY */
3945   KMP_MEMCPY(taskdata, taskdata_src, task_size);
3946 
3947   task = KMP_TASKDATA_TO_TASK(taskdata);
3948 
3949   // Initialize new task (only specific fields not affected by memcpy)
3950   taskdata->td_task_id = KMP_GEN_TASK_ID();
3951   if (task->shareds != NULL) { // need setup shareds pointer
3952     shareds_offset = (char *)task_src->shareds - (char *)taskdata_src;
3953     task->shareds = &((char *)taskdata)[shareds_offset];
3954     KMP_DEBUG_ASSERT((((kmp_uintptr_t)task->shareds) & (sizeof(void *) - 1)) ==
3955                      0);
3956   }
3957   taskdata->td_alloc_thread = thread;
3958   taskdata->td_parent = parent_task;
3959   // task inherits the taskgroup from the parent task
3960   taskdata->td_taskgroup = parent_task->td_taskgroup;
3961   // tied task needs to initialize the td_last_tied at creation,
3962   // untied one does this when it is scheduled for execution
3963   if (taskdata->td_flags.tiedness == TASK_TIED)
3964     taskdata->td_last_tied = taskdata;
3965 
3966   // Only need to keep track of child task counts if team parallel and tasking
3967   // not serialized
3968   if (!(taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser)) {
3969     KMP_ATOMIC_INC(&parent_task->td_incomplete_child_tasks);
3970     if (parent_task->td_taskgroup)
3971       KMP_ATOMIC_INC(&parent_task->td_taskgroup->count);
3972     // Only need to keep track of allocated child tasks for explicit tasks since
3973     // implicit not deallocated
3974     if (taskdata->td_parent->td_flags.tasktype == TASK_EXPLICIT)
3975       KMP_ATOMIC_INC(&taskdata->td_parent->td_allocated_child_tasks);
3976   }
3977 
3978   KA_TRACE(20,
3979            ("__kmp_task_dup_alloc(exit): Th %p, created task %p, parent=%p\n",
3980             thread, taskdata, taskdata->td_parent));
3981 #if OMPT_SUPPORT
3982   if (UNLIKELY(ompt_enabled.enabled))
3983     __ompt_task_init(taskdata, thread->th.th_info.ds.ds_gtid);
3984 #endif
3985   return task;
3986 }
3987 
3988 // Routine optionally generated by the compiler for setting the lastprivate flag
3989 // and calling needed constructors for private/firstprivate objects
3990 // (used to form taskloop tasks from pattern task)
3991 // Parameters: dest task, src task, lastprivate flag.
3992 typedef void (*p_task_dup_t)(kmp_task_t *, kmp_task_t *, kmp_int32);
3993 
3994 KMP_BUILD_ASSERT(sizeof(long) == 4 || sizeof(long) == 8);
3995 
3996 // class to encapsulate manipulating loop bounds in a taskloop task.
3997 // this abstracts away the Intel vs GOMP taskloop interface for setting/getting
3998 // the loop bound variables.
3999 class kmp_taskloop_bounds_t {
4000   kmp_task_t *task;
4001   const kmp_taskdata_t *taskdata;
4002   size_t lower_offset;
4003   size_t upper_offset;
4004 
4005 public:
4006   kmp_taskloop_bounds_t(kmp_task_t *_task, kmp_uint64 *lb, kmp_uint64 *ub)
4007       : task(_task), taskdata(KMP_TASK_TO_TASKDATA(task)),
4008         lower_offset((char *)lb - (char *)task),
4009         upper_offset((char *)ub - (char *)task) {
4010     KMP_DEBUG_ASSERT((char *)lb > (char *)_task);
4011     KMP_DEBUG_ASSERT((char *)ub > (char *)_task);
4012   }
4013   kmp_taskloop_bounds_t(kmp_task_t *_task, const kmp_taskloop_bounds_t &bounds)
4014       : task(_task), taskdata(KMP_TASK_TO_TASKDATA(_task)),
4015         lower_offset(bounds.lower_offset), upper_offset(bounds.upper_offset) {}
4016   size_t get_lower_offset() const { return lower_offset; }
4017   size_t get_upper_offset() const { return upper_offset; }
4018   kmp_uint64 get_lb() const {
4019     kmp_int64 retval;
4020 #if defined(KMP_GOMP_COMPAT)
4021     // Intel task just returns the lower bound normally
4022     if (!taskdata->td_flags.native) {
4023       retval = *(kmp_int64 *)((char *)task + lower_offset);
4024     } else {
4025       // GOMP task has to take into account the sizeof(long)
4026       if (taskdata->td_size_loop_bounds == 4) {
4027         kmp_int32 *lb = RCAST(kmp_int32 *, task->shareds);
4028         retval = (kmp_int64)*lb;
4029       } else {
4030         kmp_int64 *lb = RCAST(kmp_int64 *, task->shareds);
4031         retval = (kmp_int64)*lb;
4032       }
4033     }
4034 #else
4035     retval = *(kmp_int64 *)((char *)task + lower_offset);
4036 #endif // defined(KMP_GOMP_COMPAT)
4037     return retval;
4038   }
4039   kmp_uint64 get_ub() const {
4040     kmp_int64 retval;
4041 #if defined(KMP_GOMP_COMPAT)
4042     // Intel task just returns the upper bound normally
4043     if (!taskdata->td_flags.native) {
4044       retval = *(kmp_int64 *)((char *)task + upper_offset);
4045     } else {
4046       // GOMP task has to take into account the sizeof(long)
4047       if (taskdata->td_size_loop_bounds == 4) {
4048         kmp_int32 *ub = RCAST(kmp_int32 *, task->shareds) + 1;
4049         retval = (kmp_int64)*ub;
4050       } else {
4051         kmp_int64 *ub = RCAST(kmp_int64 *, task->shareds) + 1;
4052         retval = (kmp_int64)*ub;
4053       }
4054     }
4055 #else
4056     retval = *(kmp_int64 *)((char *)task + upper_offset);
4057 #endif // defined(KMP_GOMP_COMPAT)
4058     return retval;
4059   }
4060   void set_lb(kmp_uint64 lb) {
4061 #if defined(KMP_GOMP_COMPAT)
4062     // Intel task just sets the lower bound normally
4063     if (!taskdata->td_flags.native) {
4064       *(kmp_uint64 *)((char *)task + lower_offset) = lb;
4065     } else {
4066       // GOMP task has to take into account the sizeof(long)
4067       if (taskdata->td_size_loop_bounds == 4) {
4068         kmp_uint32 *lower = RCAST(kmp_uint32 *, task->shareds);
4069         *lower = (kmp_uint32)lb;
4070       } else {
4071         kmp_uint64 *lower = RCAST(kmp_uint64 *, task->shareds);
4072         *lower = (kmp_uint64)lb;
4073       }
4074     }
4075 #else
4076     *(kmp_uint64 *)((char *)task + lower_offset) = lb;
4077 #endif // defined(KMP_GOMP_COMPAT)
4078   }
4079   void set_ub(kmp_uint64 ub) {
4080 #if defined(KMP_GOMP_COMPAT)
4081     // Intel task just sets the upper bound normally
4082     if (!taskdata->td_flags.native) {
4083       *(kmp_uint64 *)((char *)task + upper_offset) = ub;
4084     } else {
4085       // GOMP task has to take into account the sizeof(long)
4086       if (taskdata->td_size_loop_bounds == 4) {
4087         kmp_uint32 *upper = RCAST(kmp_uint32 *, task->shareds) + 1;
4088         *upper = (kmp_uint32)ub;
4089       } else {
4090         kmp_uint64 *upper = RCAST(kmp_uint64 *, task->shareds) + 1;
4091         *upper = (kmp_uint64)ub;
4092       }
4093     }
4094 #else
4095     *(kmp_uint64 *)((char *)task + upper_offset) = ub;
4096 #endif // defined(KMP_GOMP_COMPAT)
4097   }
4098 };
4099 
4100 // __kmp_taskloop_linear: Start tasks of the taskloop linearly
4101 //
4102 // loc        Source location information
4103 // gtid       Global thread ID
4104 // task       Pattern task, exposes the loop iteration range
4105 // lb         Pointer to loop lower bound in task structure
4106 // ub         Pointer to loop upper bound in task structure
4107 // st         Loop stride
4108 // ub_glob    Global upper bound (used for lastprivate check)
4109 // num_tasks  Number of tasks to execute
4110 // grainsize  Number of loop iterations per task
4111 // extras     Number of chunks with grainsize+1 iterations
4112 // tc         Iterations count
4113 // task_dup   Tasks duplication routine
4114 // codeptr_ra Return address for OMPT events
4115 void __kmp_taskloop_linear(ident_t *loc, int gtid, kmp_task_t *task,
4116                            kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st,
4117                            kmp_uint64 ub_glob, kmp_uint64 num_tasks,
4118                            kmp_uint64 grainsize, kmp_uint64 extras,
4119                            kmp_uint64 tc,
4120 #if OMPT_SUPPORT
4121                            void *codeptr_ra,
4122 #endif
4123                            void *task_dup) {
4124   KMP_COUNT_BLOCK(OMP_TASKLOOP);
4125   KMP_TIME_PARTITIONED_BLOCK(OMP_taskloop_scheduling);
4126   p_task_dup_t ptask_dup = (p_task_dup_t)task_dup;
4127   // compiler provides global bounds here
4128   kmp_taskloop_bounds_t task_bounds(task, lb, ub);
4129   kmp_uint64 lower = task_bounds.get_lb();
4130   kmp_uint64 upper = task_bounds.get_ub();
4131   kmp_uint64 i;
4132   kmp_info_t *thread = __kmp_threads[gtid];
4133   kmp_taskdata_t *current_task = thread->th.th_current_task;
4134   kmp_task_t *next_task;
4135   kmp_int32 lastpriv = 0;
4136 
4137   KMP_DEBUG_ASSERT(tc == num_tasks * grainsize + extras);
4138   KMP_DEBUG_ASSERT(num_tasks > extras);
4139   KMP_DEBUG_ASSERT(num_tasks > 0);
4140   KA_TRACE(20, ("__kmp_taskloop_linear: T#%d: %lld tasks, grainsize %lld, "
4141                 "extras %lld, i=%lld,%lld(%d)%lld, dup %p\n",
4142                 gtid, num_tasks, grainsize, extras, lower, upper, ub_glob, st,
4143                 task_dup));
4144 
4145   // Launch num_tasks tasks, assign grainsize iterations each task
4146   for (i = 0; i < num_tasks; ++i) {
4147     kmp_uint64 chunk_minus_1;
4148     if (extras == 0) {
4149       chunk_minus_1 = grainsize - 1;
4150     } else {
4151       chunk_minus_1 = grainsize;
4152       --extras; // first extras iterations get bigger chunk (grainsize+1)
4153     }
4154     upper = lower + st * chunk_minus_1;
4155     if (i == num_tasks - 1) {
4156       // schedule the last task, set lastprivate flag if needed
4157       if (st == 1) { // most common case
4158         KMP_DEBUG_ASSERT(upper == *ub);
4159         if (upper == ub_glob)
4160           lastpriv = 1;
4161       } else if (st > 0) { // positive loop stride
4162         KMP_DEBUG_ASSERT((kmp_uint64)st > *ub - upper);
4163         if ((kmp_uint64)st > ub_glob - upper)
4164           lastpriv = 1;
4165       } else { // negative loop stride
4166         KMP_DEBUG_ASSERT(upper + st < *ub);
4167         if (upper - ub_glob < (kmp_uint64)(-st))
4168           lastpriv = 1;
4169       }
4170     }
4171     next_task = __kmp_task_dup_alloc(thread, task); // allocate new task
4172     kmp_taskdata_t *next_taskdata = KMP_TASK_TO_TASKDATA(next_task);
4173     kmp_taskloop_bounds_t next_task_bounds =
4174         kmp_taskloop_bounds_t(next_task, task_bounds);
4175 
4176     // adjust task-specific bounds
4177     next_task_bounds.set_lb(lower);
4178     if (next_taskdata->td_flags.native) {
4179       next_task_bounds.set_ub(upper + (st > 0 ? 1 : -1));
4180     } else {
4181       next_task_bounds.set_ub(upper);
4182     }
4183     if (ptask_dup != NULL) // set lastprivate flag, construct firstprivates,
4184                            // etc.
4185       ptask_dup(next_task, task, lastpriv);
4186     KA_TRACE(40,
4187              ("__kmp_taskloop_linear: T#%d; task #%llu: task %p: lower %lld, "
4188               "upper %lld stride %lld, (offsets %p %p)\n",
4189               gtid, i, next_task, lower, upper, st,
4190               next_task_bounds.get_lower_offset(),
4191               next_task_bounds.get_upper_offset()));
4192 #if OMPT_SUPPORT
4193     __kmp_omp_taskloop_task(NULL, gtid, next_task,
4194                            codeptr_ra); // schedule new task
4195 #else
4196     __kmp_omp_task(gtid, next_task, true); // schedule new task
4197 #endif
4198     lower = upper + st; // adjust lower bound for the next iteration
4199   }
4200   // free the pattern task and exit
4201   __kmp_task_start(gtid, task, current_task); // make internal bookkeeping
4202   // do not execute the pattern task, just do internal bookkeeping
4203   __kmp_task_finish<false>(gtid, task, current_task);
4204 }
4205 
4206 // Structure to keep taskloop parameters for auxiliary task
4207 // kept in the shareds of the task structure.
4208 typedef struct __taskloop_params {
4209   kmp_task_t *task;
4210   kmp_uint64 *lb;
4211   kmp_uint64 *ub;
4212   void *task_dup;
4213   kmp_int64 st;
4214   kmp_uint64 ub_glob;
4215   kmp_uint64 num_tasks;
4216   kmp_uint64 grainsize;
4217   kmp_uint64 extras;
4218   kmp_uint64 tc;
4219   kmp_uint64 num_t_min;
4220 #if OMPT_SUPPORT
4221   void *codeptr_ra;
4222 #endif
4223 } __taskloop_params_t;
4224 
4225 void __kmp_taskloop_recur(ident_t *, int, kmp_task_t *, kmp_uint64 *,
4226                           kmp_uint64 *, kmp_int64, kmp_uint64, kmp_uint64,
4227                           kmp_uint64, kmp_uint64, kmp_uint64, kmp_uint64,
4228 #if OMPT_SUPPORT
4229                           void *,
4230 #endif
4231                           void *);
4232 
4233 // Execute part of the taskloop submitted as a task.
4234 int __kmp_taskloop_task(int gtid, void *ptask) {
4235   __taskloop_params_t *p =
4236       (__taskloop_params_t *)((kmp_task_t *)ptask)->shareds;
4237   kmp_task_t *task = p->task;
4238   kmp_uint64 *lb = p->lb;
4239   kmp_uint64 *ub = p->ub;
4240   void *task_dup = p->task_dup;
4241   //  p_task_dup_t ptask_dup = (p_task_dup_t)task_dup;
4242   kmp_int64 st = p->st;
4243   kmp_uint64 ub_glob = p->ub_glob;
4244   kmp_uint64 num_tasks = p->num_tasks;
4245   kmp_uint64 grainsize = p->grainsize;
4246   kmp_uint64 extras = p->extras;
4247   kmp_uint64 tc = p->tc;
4248   kmp_uint64 num_t_min = p->num_t_min;
4249 #if OMPT_SUPPORT
4250   void *codeptr_ra = p->codeptr_ra;
4251 #endif
4252 #if KMP_DEBUG
4253   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
4254   KMP_DEBUG_ASSERT(task != NULL);
4255   KA_TRACE(20, ("__kmp_taskloop_task: T#%d, task %p: %lld tasks, grainsize"
4256                 " %lld, extras %lld, i=%lld,%lld(%d), dup %p\n",
4257                 gtid, taskdata, num_tasks, grainsize, extras, *lb, *ub, st,
4258                 task_dup));
4259 #endif
4260   KMP_DEBUG_ASSERT(num_tasks * 2 + 1 > num_t_min);
4261   if (num_tasks > num_t_min)
4262     __kmp_taskloop_recur(NULL, gtid, task, lb, ub, st, ub_glob, num_tasks,
4263                          grainsize, extras, tc, num_t_min,
4264 #if OMPT_SUPPORT
4265                          codeptr_ra,
4266 #endif
4267                          task_dup);
4268   else
4269     __kmp_taskloop_linear(NULL, gtid, task, lb, ub, st, ub_glob, num_tasks,
4270                           grainsize, extras, tc,
4271 #if OMPT_SUPPORT
4272                           codeptr_ra,
4273 #endif
4274                           task_dup);
4275 
4276   KA_TRACE(40, ("__kmp_taskloop_task(exit): T#%d\n", gtid));
4277   return 0;
4278 }
4279 
4280 // Schedule part of the taskloop as a task,
4281 // execute the rest of the taskloop.
4282 //
4283 // loc        Source location information
4284 // gtid       Global thread ID
4285 // task       Pattern task, exposes the loop iteration range
4286 // lb         Pointer to loop lower bound in task structure
4287 // ub         Pointer to loop upper bound in task structure
4288 // st         Loop stride
4289 // ub_glob    Global upper bound (used for lastprivate check)
4290 // num_tasks  Number of tasks to execute
4291 // grainsize  Number of loop iterations per task
4292 // extras     Number of chunks with grainsize+1 iterations
4293 // tc         Iterations count
4294 // num_t_min  Threshold to launch tasks recursively
4295 // task_dup   Tasks duplication routine
4296 // codeptr_ra Return address for OMPT events
4297 void __kmp_taskloop_recur(ident_t *loc, int gtid, kmp_task_t *task,
4298                           kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st,
4299                           kmp_uint64 ub_glob, kmp_uint64 num_tasks,
4300                           kmp_uint64 grainsize, kmp_uint64 extras,
4301                           kmp_uint64 tc, kmp_uint64 num_t_min,
4302 #if OMPT_SUPPORT
4303                           void *codeptr_ra,
4304 #endif
4305                           void *task_dup) {
4306   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
4307   KMP_DEBUG_ASSERT(task != NULL);
4308   KMP_DEBUG_ASSERT(num_tasks > num_t_min);
4309   KA_TRACE(20, ("__kmp_taskloop_recur: T#%d, task %p: %lld tasks, grainsize"
4310                 " %lld, extras %lld, i=%lld,%lld(%d), dup %p\n",
4311                 gtid, taskdata, num_tasks, grainsize, extras, *lb, *ub, st,
4312                 task_dup));
4313   p_task_dup_t ptask_dup = (p_task_dup_t)task_dup;
4314   kmp_uint64 lower = *lb;
4315   kmp_info_t *thread = __kmp_threads[gtid];
4316   //  kmp_taskdata_t *current_task = thread->th.th_current_task;
4317   kmp_task_t *next_task;
4318   size_t lower_offset =
4319       (char *)lb - (char *)task; // remember offset of lb in the task structure
4320   size_t upper_offset =
4321       (char *)ub - (char *)task; // remember offset of ub in the task structure
4322 
4323   KMP_DEBUG_ASSERT(tc == num_tasks * grainsize + extras);
4324   KMP_DEBUG_ASSERT(num_tasks > extras);
4325   KMP_DEBUG_ASSERT(num_tasks > 0);
4326 
4327   // split the loop in two halves
4328   kmp_uint64 lb1, ub0, tc0, tc1, ext0, ext1;
4329   kmp_uint64 gr_size0 = grainsize;
4330   kmp_uint64 n_tsk0 = num_tasks >> 1; // num_tasks/2 to execute
4331   kmp_uint64 n_tsk1 = num_tasks - n_tsk0; // to schedule as a task
4332   if (n_tsk0 <= extras) {
4333     gr_size0++; // integrate extras into grainsize
4334     ext0 = 0; // no extra iters in 1st half
4335     ext1 = extras - n_tsk0; // remaining extras
4336     tc0 = gr_size0 * n_tsk0;
4337     tc1 = tc - tc0;
4338   } else { // n_tsk0 > extras
4339     ext1 = 0; // no extra iters in 2nd half
4340     ext0 = extras;
4341     tc1 = grainsize * n_tsk1;
4342     tc0 = tc - tc1;
4343   }
4344   ub0 = lower + st * (tc0 - 1);
4345   lb1 = ub0 + st;
4346 
4347   // create pattern task for 2nd half of the loop
4348   next_task = __kmp_task_dup_alloc(thread, task); // duplicate the task
4349   // adjust lower bound (upper bound is not changed) for the 2nd half
4350   *(kmp_uint64 *)((char *)next_task + lower_offset) = lb1;
4351   if (ptask_dup != NULL) // construct firstprivates, etc.
4352     ptask_dup(next_task, task, 0);
4353   *ub = ub0; // adjust upper bound for the 1st half
4354 
4355   // create auxiliary task for 2nd half of the loop
4356   // make sure new task has same parent task as the pattern task
4357   kmp_taskdata_t *current_task = thread->th.th_current_task;
4358   thread->th.th_current_task = taskdata->td_parent;
4359   kmp_task_t *new_task =
4360       __kmpc_omp_task_alloc(loc, gtid, 1, 3 * sizeof(void *),
4361                             sizeof(__taskloop_params_t), &__kmp_taskloop_task);
4362   // restore current task
4363   thread->th.th_current_task = current_task;
4364   __taskloop_params_t *p = (__taskloop_params_t *)new_task->shareds;
4365   p->task = next_task;
4366   p->lb = (kmp_uint64 *)((char *)next_task + lower_offset);
4367   p->ub = (kmp_uint64 *)((char *)next_task + upper_offset);
4368   p->task_dup = task_dup;
4369   p->st = st;
4370   p->ub_glob = ub_glob;
4371   p->num_tasks = n_tsk1;
4372   p->grainsize = grainsize;
4373   p->extras = ext1;
4374   p->tc = tc1;
4375   p->num_t_min = num_t_min;
4376 #if OMPT_SUPPORT
4377   p->codeptr_ra = codeptr_ra;
4378 #endif
4379 
4380 #if OMPT_SUPPORT
4381   // schedule new task with correct return address for OMPT events
4382   __kmp_omp_taskloop_task(NULL, gtid, new_task, codeptr_ra);
4383 #else
4384   __kmp_omp_task(gtid, new_task, true); // schedule new task
4385 #endif
4386 
4387   // execute the 1st half of current subrange
4388   if (n_tsk0 > num_t_min)
4389     __kmp_taskloop_recur(loc, gtid, task, lb, ub, st, ub_glob, n_tsk0, gr_size0,
4390                          ext0, tc0, num_t_min,
4391 #if OMPT_SUPPORT
4392                          codeptr_ra,
4393 #endif
4394                          task_dup);
4395   else
4396     __kmp_taskloop_linear(loc, gtid, task, lb, ub, st, ub_glob, n_tsk0,
4397                           gr_size0, ext0, tc0,
4398 #if OMPT_SUPPORT
4399                           codeptr_ra,
4400 #endif
4401                           task_dup);
4402 
4403   KA_TRACE(40, ("__kmpc_taskloop_recur(exit): T#%d\n", gtid));
4404 }
4405 
4406 /*!
4407 @ingroup TASKING
4408 @param loc       Source location information
4409 @param gtid      Global thread ID
4410 @param task      Task structure
4411 @param if_val    Value of the if clause
4412 @param lb        Pointer to loop lower bound in task structure
4413 @param ub        Pointer to loop upper bound in task structure
4414 @param st        Loop stride
4415 @param nogroup   Flag, 1 if no taskgroup needs to be added, 0 otherwise
4416 @param sched     Schedule specified 0/1/2 for none/grainsize/num_tasks
4417 @param grainsize Schedule value if specified
4418 @param task_dup  Tasks duplication routine
4419 
4420 Execute the taskloop construct.
4421 */
4422 void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int if_val,
4423                      kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup,
4424                      int sched, kmp_uint64 grainsize, void *task_dup) {
4425   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
4426   KMP_DEBUG_ASSERT(task != NULL);
4427 
4428   if (nogroup == 0) {
4429 #if OMPT_SUPPORT && OMPT_OPTIONAL
4430     OMPT_STORE_RETURN_ADDRESS(gtid);
4431 #endif
4432     __kmpc_taskgroup(loc, gtid);
4433   }
4434 
4435   // =========================================================================
4436   // calculate loop parameters
4437   kmp_taskloop_bounds_t task_bounds(task, lb, ub);
4438   kmp_uint64 tc;
4439   // compiler provides global bounds here
4440   kmp_uint64 lower = task_bounds.get_lb();
4441   kmp_uint64 upper = task_bounds.get_ub();
4442   kmp_uint64 ub_glob = upper; // global upper used to calc lastprivate flag
4443   kmp_uint64 num_tasks = 0, extras = 0;
4444   kmp_uint64 num_tasks_min = __kmp_taskloop_min_tasks;
4445   kmp_info_t *thread = __kmp_threads[gtid];
4446   kmp_taskdata_t *current_task = thread->th.th_current_task;
4447 
4448   KA_TRACE(20, ("__kmpc_taskloop: T#%d, task %p, lb %lld, ub %lld, st %lld, "
4449                 "grain %llu(%d), dup %p\n",
4450                 gtid, taskdata, lower, upper, st, grainsize, sched, task_dup));
4451 
4452   // compute trip count
4453   if (st == 1) { // most common case
4454     tc = upper - lower + 1;
4455   } else if (st < 0) {
4456     tc = (lower - upper) / (-st) + 1;
4457   } else { // st > 0
4458     tc = (upper - lower) / st + 1;
4459   }
4460   if (tc == 0) {
4461     KA_TRACE(20, ("__kmpc_taskloop(exit): T#%d zero-trip loop\n", gtid));
4462     // free the pattern task and exit
4463     __kmp_task_start(gtid, task, current_task);
4464     // do not execute anything for zero-trip loop
4465     __kmp_task_finish<false>(gtid, task, current_task);
4466     return;
4467   }
4468 
4469 #if OMPT_SUPPORT && OMPT_OPTIONAL
4470   ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL);
4471   ompt_task_info_t *task_info = __ompt_get_task_info_object(0);
4472   if (ompt_enabled.ompt_callback_work) {
4473     ompt_callbacks.ompt_callback(ompt_callback_work)(
4474         ompt_work_taskloop, ompt_scope_begin, &(team_info->parallel_data),
4475         &(task_info->task_data), tc, OMPT_GET_RETURN_ADDRESS(0));
4476   }
4477 #endif
4478 
4479   if (num_tasks_min == 0)
4480     // TODO: can we choose better default heuristic?
4481     num_tasks_min =
4482         KMP_MIN(thread->th.th_team_nproc * 10, INITIAL_TASK_DEQUE_SIZE);
4483 
4484   // compute num_tasks/grainsize based on the input provided
4485   switch (sched) {
4486   case 0: // no schedule clause specified, we can choose the default
4487     // let's try to schedule (team_size*10) tasks
4488     grainsize = thread->th.th_team_nproc * 10;
4489     KMP_FALLTHROUGH();
4490   case 2: // num_tasks provided
4491     if (grainsize > tc) {
4492       num_tasks = tc; // too big num_tasks requested, adjust values
4493       grainsize = 1;
4494       extras = 0;
4495     } else {
4496       num_tasks = grainsize;
4497       grainsize = tc / num_tasks;
4498       extras = tc % num_tasks;
4499     }
4500     break;
4501   case 1: // grainsize provided
4502     if (grainsize > tc) {
4503       num_tasks = 1; // too big grainsize requested, adjust values
4504       grainsize = tc;
4505       extras = 0;
4506     } else {
4507       num_tasks = tc / grainsize;
4508       // adjust grainsize for balanced distribution of iterations
4509       grainsize = tc / num_tasks;
4510       extras = tc % num_tasks;
4511     }
4512     break;
4513   default:
4514     KMP_ASSERT2(0, "unknown scheduling of taskloop");
4515   }
4516   KMP_DEBUG_ASSERT(tc == num_tasks * grainsize + extras);
4517   KMP_DEBUG_ASSERT(num_tasks > extras);
4518   KMP_DEBUG_ASSERT(num_tasks > 0);
4519   // =========================================================================
4520 
4521   // check if clause value first
4522   // Also require GOMP_taskloop to reduce to linear (taskdata->td_flags.native)
4523   if (if_val == 0) { // if(0) specified, mark task as serial
4524     taskdata->td_flags.task_serial = 1;
4525     taskdata->td_flags.tiedness = TASK_TIED; // AC: serial task cannot be untied
4526     // always start serial tasks linearly
4527     __kmp_taskloop_linear(loc, gtid, task, lb, ub, st, ub_glob, num_tasks,
4528                           grainsize, extras, tc,
4529 #if OMPT_SUPPORT
4530                           OMPT_GET_RETURN_ADDRESS(0),
4531 #endif
4532                           task_dup);
4533     // !taskdata->td_flags.native => currently force linear spawning of tasks
4534     // for GOMP_taskloop
4535   } else if (num_tasks > num_tasks_min && !taskdata->td_flags.native) {
4536     KA_TRACE(20, ("__kmpc_taskloop: T#%d, go recursive: tc %llu, #tasks %llu"
4537                   "(%lld), grain %llu, extras %llu\n",
4538                   gtid, tc, num_tasks, num_tasks_min, grainsize, extras));
4539     __kmp_taskloop_recur(loc, gtid, task, lb, ub, st, ub_glob, num_tasks,
4540                          grainsize, extras, tc, num_tasks_min,
4541 #if OMPT_SUPPORT
4542                          OMPT_GET_RETURN_ADDRESS(0),
4543 #endif
4544                          task_dup);
4545   } else {
4546     KA_TRACE(20, ("__kmpc_taskloop: T#%d, go linear: tc %llu, #tasks %llu"
4547                   "(%lld), grain %llu, extras %llu\n",
4548                   gtid, tc, num_tasks, num_tasks_min, grainsize, extras));
4549     __kmp_taskloop_linear(loc, gtid, task, lb, ub, st, ub_glob, num_tasks,
4550                           grainsize, extras, tc,
4551 #if OMPT_SUPPORT
4552                           OMPT_GET_RETURN_ADDRESS(0),
4553 #endif
4554                           task_dup);
4555   }
4556 
4557 #if OMPT_SUPPORT && OMPT_OPTIONAL
4558   if (ompt_enabled.ompt_callback_work) {
4559     ompt_callbacks.ompt_callback(ompt_callback_work)(
4560         ompt_work_taskloop, ompt_scope_end, &(team_info->parallel_data),
4561         &(task_info->task_data), tc, OMPT_GET_RETURN_ADDRESS(0));
4562   }
4563 #endif
4564 
4565   if (nogroup == 0) {
4566 #if OMPT_SUPPORT && OMPT_OPTIONAL
4567     OMPT_STORE_RETURN_ADDRESS(gtid);
4568 #endif
4569     __kmpc_end_taskgroup(loc, gtid);
4570   }
4571   KA_TRACE(20, ("__kmpc_taskloop(exit): T#%d\n", gtid));
4572 }
4573