1 /*
2  * kmp_taskdeps.h
3  */
4 
5 //===----------------------------------------------------------------------===//
6 //
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #ifndef KMP_TASKDEPS_H
14 #define KMP_TASKDEPS_H
15 
16 #include "kmp.h"
17 
18 #define KMP_ACQUIRE_DEPNODE(gtid, n) __kmp_acquire_lock(&(n)->dn.lock, (gtid))
19 #define KMP_RELEASE_DEPNODE(gtid, n) __kmp_release_lock(&(n)->dn.lock, (gtid))
20 
21 static inline void __kmp_node_deref(kmp_info_t *thread, kmp_depnode_t *node) {
22   if (!node)
23     return;
24 
25   kmp_int32 n = KMP_ATOMIC_DEC(&node->dn.nrefs) - 1;
26   KMP_DEBUG_ASSERT(n >= 0);
27   if (n == 0) {
28     KMP_ASSERT(node->dn.nrefs == 0);
29 #if USE_FAST_MEMORY
30     __kmp_fast_free(thread, node);
31 #else
32     __kmp_thread_free(thread, node);
33 #endif
34   }
35 }
36 
37 static inline void __kmp_depnode_list_free(kmp_info_t *thread,
38                                            kmp_depnode_list *list) {
39   kmp_depnode_list *next;
40 
41   for (; list; list = next) {
42     next = list->next;
43 
44     __kmp_node_deref(thread, list->node);
45 #if USE_FAST_MEMORY
46     __kmp_fast_free(thread, list);
47 #else
48     __kmp_thread_free(thread, list);
49 #endif
50   }
51 }
52 
53 static inline void __kmp_dephash_free_entries(kmp_info_t *thread,
54                                               kmp_dephash_t *h) {
55   for (size_t i = 0; i < h->size; i++) {
56     if (h->buckets[i]) {
57       kmp_dephash_entry_t *next;
58       for (kmp_dephash_entry_t *entry = h->buckets[i]; entry; entry = next) {
59         next = entry->next_in_bucket;
60         __kmp_depnode_list_free(thread, entry->last_set);
61         __kmp_depnode_list_free(thread, entry->prev_set);
62         __kmp_node_deref(thread, entry->last_out);
63         if (entry->mtx_lock) {
64           __kmp_destroy_lock(entry->mtx_lock);
65           __kmp_free(entry->mtx_lock);
66         }
67 #if USE_FAST_MEMORY
68         __kmp_fast_free(thread, entry);
69 #else
70         __kmp_thread_free(thread, entry);
71 #endif
72       }
73       h->buckets[i] = 0;
74     }
75   }
76 }
77 
78 static inline void __kmp_dephash_free(kmp_info_t *thread, kmp_dephash_t *h) {
79   __kmp_dephash_free_entries(thread, h);
80 #if USE_FAST_MEMORY
81   __kmp_fast_free(thread, h);
82 #else
83   __kmp_thread_free(thread, h);
84 #endif
85 }
86 
87 extern void __kmpc_give_task(kmp_task_t *ptask, kmp_int32 start);
88 
89 static inline void __kmp_release_deps(kmp_int32 gtid, kmp_taskdata_t *task) {
90   kmp_info_t *thread = __kmp_threads[gtid];
91   kmp_depnode_t *node = task->td_depnode;
92 
93   // Check mutexinoutset dependencies, release locks
94   if (UNLIKELY(node && (node->dn.mtx_num_locks < 0))) {
95     // negative num_locks means all locks were acquired
96     node->dn.mtx_num_locks = -node->dn.mtx_num_locks;
97     for (int i = node->dn.mtx_num_locks - 1; i >= 0; --i) {
98       KMP_DEBUG_ASSERT(node->dn.mtx_locks[i] != NULL);
99       __kmp_release_lock(node->dn.mtx_locks[i], gtid);
100     }
101   }
102 
103   if (task->td_dephash) {
104     KA_TRACE(
105         40, ("__kmp_release_deps: T#%d freeing dependencies hash of task %p.\n",
106              gtid, task));
107     __kmp_dephash_free(thread, task->td_dephash);
108     task->td_dephash = NULL;
109   }
110 
111   if (!node)
112     return;
113 
114   KA_TRACE(20, ("__kmp_release_deps: T#%d notifying successors of task %p.\n",
115                 gtid, task));
116 
117   KMP_ACQUIRE_DEPNODE(gtid, node);
118   node->dn.task =
119       NULL; // mark this task as finished, so no new dependencies are generated
120   KMP_RELEASE_DEPNODE(gtid, node);
121 
122   kmp_depnode_list_t *next;
123   kmp_taskdata_t *next_taskdata;
124   for (kmp_depnode_list_t *p = node->dn.successors; p; p = next) {
125     kmp_depnode_t *successor = p->node;
126     kmp_int32 npredecessors = KMP_ATOMIC_DEC(&successor->dn.npredecessors) - 1;
127 
128     // successor task can be NULL for wait_depends or because deps are still
129     // being processed
130     if (npredecessors == 0) {
131       KMP_MB();
132       if (successor->dn.task) {
133         KA_TRACE(20, ("__kmp_release_deps: T#%d successor %p of %p scheduled "
134                       "for execution.\n",
135                       gtid, successor->dn.task, task));
136         // If a regular task depending on a hidden helper task, when the
137         // hidden helper task is done, the regular task should be executed by
138         // its encountering team.
139         if (KMP_HIDDEN_HELPER_THREAD(gtid)) {
140           // Hidden helper thread can only execute hidden helper tasks
141           KMP_ASSERT(task->td_flags.hidden_helper);
142           next_taskdata = KMP_TASK_TO_TASKDATA(successor->dn.task);
143           // If the dependent task is a regular task, we need to push to its
144           // encountering thread's queue; otherwise, it can be pushed to its own
145           // queue.
146           if (!next_taskdata->td_flags.hidden_helper) {
147             __kmpc_give_task(
148                 successor->dn.task,
149                 __kmp_tid_from_gtid(next_taskdata->encountering_gtid));
150           } else {
151             __kmp_omp_task(gtid, successor->dn.task, false);
152           }
153         } else {
154           __kmp_omp_task(gtid, successor->dn.task, false);
155         }
156       }
157     }
158 
159     next = p->next;
160     __kmp_node_deref(thread, p->node);
161 #if USE_FAST_MEMORY
162     __kmp_fast_free(thread, p);
163 #else
164     __kmp_thread_free(thread, p);
165 #endif
166   }
167 
168   __kmp_node_deref(thread, node);
169 
170   KA_TRACE(
171       20,
172       ("__kmp_release_deps: T#%d all successors of %p notified of completion\n",
173        gtid, task));
174 }
175 
176 #endif // KMP_TASKDEPS_H
177