41 #include "kmp_wait_release.h"
52 #ifdef KMP_SUPPORT_GRAPH_OUTPUT
53 static kmp_int32 kmp_node_id_seed = 0;
57 __kmp_init_node ( kmp_depnode_t *node )
60 node->dn.successors = NULL;
61 __kmp_init_lock(&node->dn.lock);
63 #ifdef KMP_SUPPORT_GRAPH_OUTPUT
64 node->dn.id = KMP_TEST_THEN_INC32(&kmp_node_id_seed);
68 static inline kmp_depnode_t *
69 __kmp_node_ref ( kmp_depnode_t *node )
71 KMP_TEST_THEN_INC32(&node->dn.nrefs);
76 __kmp_node_deref ( kmp_info_t *thread, kmp_depnode_t *node )
80 kmp_int32 n = KMP_TEST_THEN_DEC32(&node->dn.nrefs) - 1;
82 KMP_ASSERT(node->dn.nrefs == 0);
84 __kmp_fast_free(thread,node);
86 __kmp_thread_free(thread,node);
91 #define KMP_ACQUIRE_DEPNODE(gtid,n) __kmp_acquire_lock(&(n)->dn.lock,(gtid))
92 #define KMP_RELEASE_DEPNODE(gtid,n) __kmp_release_lock(&(n)->dn.lock,(gtid))
95 __kmp_depnode_list_free ( kmp_info_t *thread, kmp_depnode_list *list );
97 static const kmp_int32 kmp_dephash_log2 = 6;
98 static const kmp_int32 kmp_dephash_size = (1 << kmp_dephash_log2);
100 static inline kmp_int32
101 __kmp_dephash_hash ( kmp_intptr_t addr )
104 return ((addr >> kmp_dephash_log2) ^ addr) % kmp_dephash_size;
107 static kmp_dephash_t *
108 __kmp_dephash_create ( kmp_info_t *thread )
112 kmp_int32 size = kmp_dephash_size *
sizeof(kmp_dephash_entry_t) +
sizeof(kmp_dephash_t);
115 h = (kmp_dephash_t *) __kmp_fast_allocate( thread, size );
117 h = (kmp_dephash_t *) __kmp_thread_malloc( thread, size );
123 h->buckets = (kmp_dephash_entry **)(h+1);
125 for ( kmp_int32 i = 0; i < kmp_dephash_size; i++ )
132 __kmp_dephash_free ( kmp_info_t *thread, kmp_dephash_t *h )
134 for ( kmp_int32 i=0; i < kmp_dephash_size; i++ ) {
135 if ( h->buckets[i] ) {
136 kmp_dephash_entry_t *next;
137 for ( kmp_dephash_entry_t *entry = h->buckets[i]; entry; entry = next ) {
138 next = entry->next_in_bucket;
139 __kmp_depnode_list_free(thread,entry->last_ins);
140 __kmp_node_deref(thread,entry->last_out);
142 __kmp_fast_free(thread,entry);
144 __kmp_thread_free(thread,entry);
150 __kmp_fast_free(thread,h);
152 __kmp_thread_free(thread,h);
156 static kmp_dephash_entry *
157 __kmp_dephash_find ( kmp_info_t *thread, kmp_dephash_t *h, kmp_intptr_t addr )
159 kmp_int32 bucket = __kmp_dephash_hash(addr);
161 kmp_dephash_entry_t *entry;
162 for ( entry = h->buckets[bucket]; entry; entry = entry->next_in_bucket )
163 if ( entry->addr == addr )
break;
165 if ( entry == NULL ) {
168 entry = (kmp_dephash_entry_t *) __kmp_fast_allocate( thread,
sizeof(kmp_dephash_entry_t) );
170 entry = (kmp_dephash_entry_t *) __kmp_thread_malloc( thread,
sizeof(kmp_dephash_entry_t) );
173 entry->last_out = NULL;
174 entry->last_ins = NULL;
175 entry->next_in_bucket = h->buckets[bucket];
176 h->buckets[bucket] = entry;
179 if ( entry->next_in_bucket ) h->nconflicts++;
185 static kmp_depnode_list_t *
186 __kmp_add_node ( kmp_info_t *thread, kmp_depnode_list_t *list, kmp_depnode_t *node )
188 kmp_depnode_list_t *new_head;
191 new_head = (kmp_depnode_list_t *) __kmp_fast_allocate(thread,
sizeof(kmp_depnode_list_t));
193 new_head = (kmp_depnode_list_t *) __kmp_thread_malloc(thread,
sizeof(kmp_depnode_list_t));
196 new_head->node = __kmp_node_ref(node);
197 new_head->next = list;
203 __kmp_depnode_list_free ( kmp_info_t *thread, kmp_depnode_list *list )
205 kmp_depnode_list *next;
207 for ( ; list ; list = next ) {
210 __kmp_node_deref(thread,list->node);
212 __kmp_fast_free(thread,list);
214 __kmp_thread_free(thread,list);
220 __kmp_track_dependence ( kmp_depnode_t *source, kmp_depnode_t *sink )
222 #ifdef KMP_SUPPORT_GRAPH_OUTPUT
223 kmp_taskdata_t * task_source = KMP_TASK_TO_TASKDATA(source->dn.task);
224 kmp_taskdata_t * task_sink = KMP_TASK_TO_TASKDATA(sink->dn.task);
226 __kmp_printf(
"%d(%s) -> %d(%s)\n", source->dn.id, task_source->td_ident->psource, sink->dn.id, task_sink->td_ident->psource);
230 template<
bool filter >
231 static inline kmp_int32
232 __kmp_process_deps ( kmp_int32 gtid, kmp_depnode_t *node, kmp_dephash_t *hash,
233 bool dep_barrier,kmp_int32 ndeps, kmp_depend_info_t *dep_list)
235 KA_TRACE(30, (
"__kmp_process_deps<%d>: T#%d processing %d depencies : dep_barrier = %d\n", filter, gtid, ndeps, dep_barrier ) );
237 kmp_info_t *thread = __kmp_threads[ gtid ];
238 kmp_int32 npredecessors=0;
239 for ( kmp_int32 i = 0; i < ndeps ; i++ ) {
240 const kmp_depend_info_t * dep = &dep_list[i];
242 KMP_DEBUG_ASSERT(dep->flags.in);
244 if ( filter && dep->base_addr == 0 )
continue;
246 kmp_dephash_entry_t *info = __kmp_dephash_find(thread,hash,dep->base_addr);
247 kmp_depnode_t *last_out = info->last_out;
249 if ( dep->flags.out && info->last_ins ) {
250 for ( kmp_depnode_list_t * p = info->last_ins; p; p = p->next ) {
251 kmp_depnode_t * indep = p->node;
252 if ( indep->dn.task ) {
253 KMP_ACQUIRE_DEPNODE(gtid,indep);
254 if ( indep->dn.task ) {
255 __kmp_track_dependence(indep,node);
256 indep->dn.successors = __kmp_add_node(thread, indep->dn.successors, node);
257 KA_TRACE(40,(
"__kmp_process_deps<%d>: T#%d adding dependence from %p to %p",
258 filter,gtid, KMP_TASK_TO_TASKDATA(indep->dn.task), KMP_TASK_TO_TASKDATA(node->dn.task)));
261 KMP_RELEASE_DEPNODE(gtid,indep);
265 __kmp_depnode_list_free(thread,info->last_ins);
266 info->last_ins = NULL;
268 }
else if ( last_out && last_out->dn.task ) {
269 KMP_ACQUIRE_DEPNODE(gtid,last_out);
270 if ( last_out->dn.task ) {
271 __kmp_track_dependence(last_out,node);
272 last_out->dn.successors = __kmp_add_node(thread, last_out->dn.successors, node);
273 KA_TRACE(40,(
"__kmp_process_deps<%d>: T#%d adding dependence from %p to %p",
274 filter,gtid, KMP_TASK_TO_TASKDATA(last_out->dn.task), KMP_TASK_TO_TASKDATA(node->dn.task)));
278 KMP_RELEASE_DEPNODE(gtid,last_out);
284 __kmp_node_deref(thread,last_out);
285 info->last_out = NULL;
287 if ( dep->flags.out ) {
288 __kmp_node_deref(thread,last_out);
289 info->last_out = __kmp_node_ref(node);
291 info->last_ins = __kmp_add_node(thread, info->last_ins, node);
296 KA_TRACE(30, (
"__kmp_process_deps<%d>: T#%d found %d predecessors\n", filter, gtid, npredecessors ) );
298 return npredecessors;
301 #define NO_DEP_BARRIER (false)
302 #define DEP_BARRIER (true)
306 __kmp_check_deps ( kmp_int32 gtid, kmp_depnode_t *node, kmp_task_t *task, kmp_dephash_t *hash,
bool dep_barrier,
307 kmp_int32 ndeps, kmp_depend_info_t *dep_list,
308 kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list )
312 kmp_taskdata_t * taskdata = KMP_TASK_TO_TASKDATA(task);
313 KA_TRACE(20, (
"__kmp_check_deps: T#%d checking dependencies for task %p : %d possibly aliased dependencies, %d non-aliased depedencies : dep_barrier=%d .\n", gtid, taskdata, ndeps, ndeps_noalias, dep_barrier ) );
317 for ( i = 0; i < ndeps; i ++ ) {
318 if ( dep_list[i].base_addr != 0 )
319 for (
int j = i+1; j < ndeps; j++ )
320 if ( dep_list[i].base_addr == dep_list[j].base_addr ) {
321 dep_list[i].flags.in |= dep_list[j].flags.in;
322 dep_list[i].flags.out |= dep_list[j].flags.out;
323 dep_list[j].base_addr = 0;
329 node->dn.npredecessors = -1;
334 npredecessors = __kmp_process_deps<true>(gtid, node, hash, dep_barrier, ndeps, dep_list);
335 npredecessors += __kmp_process_deps<false>(gtid, node, hash, dep_barrier, ndeps_noalias, noalias_dep_list);
337 node->dn.task = task;
344 npredecessors = KMP_TEST_THEN_ADD32(&node->dn.npredecessors, npredecessors) + npredecessors;
346 KA_TRACE(20, (
"__kmp_check_deps: T#%d found %d predecessors for task %p \n", gtid, npredecessors, taskdata ) );
349 return npredecessors > 0 ?
true :
false;
353 __kmp_release_deps ( kmp_int32 gtid, kmp_taskdata_t *task )
355 kmp_info_t *thread = __kmp_threads[ gtid ];
356 kmp_depnode_t *node = task->td_depnode;
358 if ( task->td_dephash ) {
359 KA_TRACE(40, (
"__kmp_realease_deps: T#%d freeing dependencies hash of task %p.\n", gtid, task ) );
360 __kmp_dephash_free(thread,task->td_dephash);
365 KA_TRACE(20, (
"__kmp_realease_deps: T#%d notifying succesors of task %p.\n", gtid, task ) );
367 KMP_ACQUIRE_DEPNODE(gtid,node);
368 node->dn.task = NULL;
369 KMP_RELEASE_DEPNODE(gtid,node);
371 kmp_depnode_list_t *next;
372 for ( kmp_depnode_list_t *p = node->dn.successors; p; p = next ) {
373 kmp_depnode_t *successor = p->node;
374 kmp_int32 npredecessors = KMP_TEST_THEN_DEC32(&successor->dn.npredecessors) - 1;
377 if ( npredecessors == 0 ) {
379 if ( successor->dn.task ) {
380 KA_TRACE(20, (
"__kmp_realease_deps: T#%d successor %p of %p scheduled for execution.\n", gtid, successor->dn.task, task ) );
381 __kmp_omp_task(gtid,successor->dn.task,
false);
386 __kmp_node_deref(thread,p->node);
388 __kmp_fast_free(thread,p);
390 __kmp_thread_free(thread,p);
394 __kmp_node_deref(thread,node);
396 KA_TRACE(20, (
"__kmp_realease_deps: T#%d all successors of %p notified of completation\n", gtid, task ) );
415 kmp_int32 ndeps, kmp_depend_info_t *dep_list,
416 kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list )
419 kmp_taskdata_t * new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
420 KA_TRACE(10, (
"__kmpc_omp_task_with_deps(enter): T#%d loc=%p task=%p\n",
421 gtid, loc_ref, new_taskdata ) );
423 kmp_info_t *thread = __kmp_threads[ gtid ];
424 kmp_taskdata_t * current_task = thread->th.th_current_task;
426 bool serial = current_task->td_flags.team_serial || current_task->td_flags.tasking_ser || current_task->td_flags.final;
428 if ( !serial && ( ndeps > 0 || ndeps_noalias > 0 )) {
430 if ( current_task->td_dephash == NULL )
431 current_task->td_dephash = __kmp_dephash_create(thread);
434 kmp_depnode_t *node = (kmp_depnode_t *) __kmp_fast_allocate(thread,
sizeof(kmp_depnode_t));
436 kmp_depnode_t *node = (kmp_depnode_t *) __kmp_thread_malloc(thread,
sizeof(kmp_depnode_t));
439 __kmp_init_node(node);
440 new_taskdata->td_depnode = node;
442 if ( __kmp_check_deps( gtid, node, new_task, current_task->td_dephash, NO_DEP_BARRIER,
443 ndeps, dep_list, ndeps_noalias,noalias_dep_list ) ) {
444 KA_TRACE(10, (
"__kmpc_omp_task_with_deps(exit): T#%d task had blocking dependencies: "
445 "loc=%p task=%p, return: TASK_CURRENT_NOT_QUEUED\n", gtid, loc_ref,
447 return TASK_CURRENT_NOT_QUEUED;
451 KA_TRACE(10, (
"__kmpc_omp_task_with_deps(exit): T#%d task had no blocking dependencies : "
452 "loc=%p task=%p, transferring to __kmpc_omp_task\n", gtid, loc_ref,
455 return __kmpc_omp_task(loc_ref,gtid,new_task);
471 kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list )
473 KA_TRACE(10, (
"__kmpc_omp_wait_deps(enter): T#%d loc=%p\n", gtid, loc_ref) );
475 if ( ndeps == 0 && ndeps_noalias == 0 ) {
476 KA_TRACE(10, (
"__kmpc_omp_wait_deps(exit): T#%d has no dependencies to wait upon : loc=%p\n", gtid, loc_ref) );
480 kmp_info_t *thread = __kmp_threads[ gtid ];
481 kmp_taskdata_t * current_task = thread->th.th_current_task;
486 if ( current_task->td_flags.team_serial || current_task->td_flags.tasking_ser || current_task->td_flags.final || current_task->td_dephash == NULL ) {
487 KA_TRACE(10, (
"__kmpc_omp_wait_deps(exit): T#%d has no blocking dependencies : loc=%p\n", gtid, loc_ref) );
492 __kmp_init_node(&node);
494 if (!__kmp_check_deps( gtid, &node, NULL, current_task->td_dephash, DEP_BARRIER,
495 ndeps, dep_list, ndeps_noalias, noalias_dep_list )) {
496 KA_TRACE(10, (
"__kmpc_omp_wait_deps(exit): T#%d has no blocking dependencies : loc=%p\n", gtid, loc_ref) );
500 int thread_finished = FALSE;
501 kmp_flag_32 flag((
volatile kmp_uint32 *)&(node.dn.npredecessors), 0U);
502 while ( node.dn.npredecessors > 0 ) {
503 flag.execute_tasks(thread, gtid, FALSE, &thread_finished,
507 __kmp_task_stealing_constraint );
510 KA_TRACE(10, (
"__kmpc_omp_wait_deps(exit): T#%d finished waiting : loc=%p\n", gtid, loc_ref) );
void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)
kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)