LLVM OpenMP* Runtime Library
kmp_taskdeps.h
1 /*
2  * kmp_taskdeps.h
3  */
4 
5 //===----------------------------------------------------------------------===//
6 //
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #ifndef KMP_TASKDEPS_H
14 #define KMP_TASKDEPS_H
15 
16 #include "kmp.h"
17 
18 #define KMP_ACQUIRE_DEPNODE(gtid, n) __kmp_acquire_lock(&(n)->dn.lock, (gtid))
19 #define KMP_RELEASE_DEPNODE(gtid, n) __kmp_release_lock(&(n)->dn.lock, (gtid))
20 
21 static inline void __kmp_node_deref(kmp_info_t *thread, kmp_depnode_t *node) {
22  if (!node)
23  return;
24 
25  kmp_int32 n = KMP_ATOMIC_DEC(&node->dn.nrefs) - 1;
26  KMP_DEBUG_ASSERT(n >= 0);
27  if (n == 0) {
28  KMP_ASSERT(node->dn.nrefs == 0);
29 #if USE_FAST_MEMORY
30  __kmp_fast_free(thread, node);
31 #else
32  __kmp_thread_free(thread, node);
33 #endif
34  }
35 }
36 
37 static inline void __kmp_depnode_list_free(kmp_info_t *thread,
38  kmp_depnode_list *list) {
39  kmp_depnode_list *next;
40 
41  for (; list; list = next) {
42  next = list->next;
43 
44  __kmp_node_deref(thread, list->node);
45 #if USE_FAST_MEMORY
46  __kmp_fast_free(thread, list);
47 #else
48  __kmp_thread_free(thread, list);
49 #endif
50  }
51 }
52 
53 static inline void __kmp_dephash_free_entries(kmp_info_t *thread,
54  kmp_dephash_t *h) {
55  for (size_t i = 0; i < h->size; i++) {
56  if (h->buckets[i]) {
57  kmp_dephash_entry_t *next;
58  for (kmp_dephash_entry_t *entry = h->buckets[i]; entry; entry = next) {
59  next = entry->next_in_bucket;
60  __kmp_depnode_list_free(thread, entry->last_set);
61  __kmp_depnode_list_free(thread, entry->prev_set);
62  __kmp_node_deref(thread, entry->last_out);
63  if (entry->mtx_lock) {
64  __kmp_destroy_lock(entry->mtx_lock);
65  __kmp_free(entry->mtx_lock);
66  }
67 #if USE_FAST_MEMORY
68  __kmp_fast_free(thread, entry);
69 #else
70  __kmp_thread_free(thread, entry);
71 #endif
72  }
73  h->buckets[i] = 0;
74  }
75  }
76  __kmp_node_deref(thread, h->last_all);
77  h->last_all = NULL;
78 }
79 
80 static inline void __kmp_dephash_free(kmp_info_t *thread, kmp_dephash_t *h) {
81  __kmp_dephash_free_entries(thread, h);
82 #if USE_FAST_MEMORY
83  __kmp_fast_free(thread, h);
84 #else
85  __kmp_thread_free(thread, h);
86 #endif
87 }
88 
89 extern void __kmpc_give_task(kmp_task_t *ptask, kmp_int32 start);
90 
91 static inline void __kmp_release_deps(kmp_int32 gtid, kmp_taskdata_t *task) {
92  kmp_info_t *thread = __kmp_threads[gtid];
93  kmp_depnode_t *node = task->td_depnode;
94 
95  // Check mutexinoutset dependencies, release locks
96  if (UNLIKELY(node && (node->dn.mtx_num_locks < 0))) {
97  // negative num_locks means all locks were acquired
98  node->dn.mtx_num_locks = -node->dn.mtx_num_locks;
99  for (int i = node->dn.mtx_num_locks - 1; i >= 0; --i) {
100  KMP_DEBUG_ASSERT(node->dn.mtx_locks[i] != NULL);
101  __kmp_release_lock(node->dn.mtx_locks[i], gtid);
102  }
103  }
104 
105  if (task->td_dephash) {
106  KA_TRACE(
107  40, ("__kmp_release_deps: T#%d freeing dependencies hash of task %p.\n",
108  gtid, task));
109  __kmp_dephash_free(thread, task->td_dephash);
110  task->td_dephash = NULL;
111  }
112 
113  if (!node)
114  return;
115 
116  KA_TRACE(20, ("__kmp_release_deps: T#%d notifying successors of task %p.\n",
117  gtid, task));
118 
119  KMP_ACQUIRE_DEPNODE(gtid, node);
120  node->dn.task =
121  NULL; // mark this task as finished, so no new dependencies are generated
122  KMP_RELEASE_DEPNODE(gtid, node);
123 
124  kmp_depnode_list_t *next;
125  kmp_taskdata_t *next_taskdata;
126  for (kmp_depnode_list_t *p = node->dn.successors; p; p = next) {
127  kmp_depnode_t *successor = p->node;
128  kmp_int32 npredecessors = KMP_ATOMIC_DEC(&successor->dn.npredecessors) - 1;
129 
130  // successor task can be NULL for wait_depends or because deps are still
131  // being processed
132  if (npredecessors == 0) {
133  KMP_MB();
134  if (successor->dn.task) {
135  KA_TRACE(20, ("__kmp_release_deps: T#%d successor %p of %p scheduled "
136  "for execution.\n",
137  gtid, successor->dn.task, task));
138  // If a regular task depending on a hidden helper task, when the
139  // hidden helper task is done, the regular task should be executed by
140  // its encountering team.
141  if (KMP_HIDDEN_HELPER_THREAD(gtid)) {
142  // Hidden helper thread can only execute hidden helper tasks
143  KMP_ASSERT(task->td_flags.hidden_helper);
144  next_taskdata = KMP_TASK_TO_TASKDATA(successor->dn.task);
145  // If the dependent task is a regular task, we need to push to its
146  // encountering thread's queue; otherwise, it can be pushed to its own
147  // queue.
148  if (!next_taskdata->td_flags.hidden_helper) {
149  kmp_int32 encountering_gtid =
150  next_taskdata->td_alloc_thread->th.th_info.ds.ds_gtid;
151  kmp_int32 encountering_tid = __kmp_tid_from_gtid(encountering_gtid);
152  __kmpc_give_task(successor->dn.task, encountering_tid);
153  } else {
154  __kmp_omp_task(gtid, successor->dn.task, false);
155  }
156  } else {
157  __kmp_omp_task(gtid, successor->dn.task, false);
158  }
159  }
160  }
161 
162  next = p->next;
163  __kmp_node_deref(thread, p->node);
164 #if USE_FAST_MEMORY
165  __kmp_fast_free(thread, p);
166 #else
167  __kmp_thread_free(thread, p);
168 #endif
169  }
170 
171  __kmp_node_deref(thread, node);
172 
173  KA_TRACE(
174  20,
175  ("__kmp_release_deps: T#%d all successors of %p notified of completion\n",
176  gtid, task));
177 }
178 
179 #endif // KMP_TASKDEPS_H