LLVM OpenMP* Runtime Library
kmp_threadprivate.cpp
1 /*
2  * kmp_threadprivate.cpp -- OpenMP threadprivate support library
3  */
4 
5 
6 //===----------------------------------------------------------------------===//
7 //
8 // The LLVM Compiler Infrastructure
9 //
10 // This file is dual licensed under the MIT and the University of Illinois Open
11 // Source Licenses. See LICENSE.txt for details.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 
16 #include "kmp.h"
17 #include "kmp_i18n.h"
18 #include "kmp_itt.h"
19 
20 #define USE_CHECKS_COMMON
21 
22 #define KMP_INLINE_SUBR 1
23 
24 void kmp_threadprivate_insert_private_data(int gtid, void *pc_addr,
25  void *data_addr, size_t pc_size);
26 struct private_common *kmp_threadprivate_insert(int gtid, void *pc_addr,
27  void *data_addr,
28  size_t pc_size);
29 
30 struct shared_table __kmp_threadprivate_d_table;
31 
32 static
33 #ifdef KMP_INLINE_SUBR
34  __forceinline
35 #endif
36  struct private_common *
37  __kmp_threadprivate_find_task_common(struct common_table *tbl, int gtid,
38  void *pc_addr)
39 
40 {
41  struct private_common *tn;
42 
43 #ifdef KMP_TASK_COMMON_DEBUG
44  KC_TRACE(10, ("__kmp_threadprivate_find_task_common: thread#%d, called with "
45  "address %p\n",
46  gtid, pc_addr));
47  dump_list();
48 #endif
49 
50  for (tn = tbl->data[KMP_HASH(pc_addr)]; tn; tn = tn->next) {
51  if (tn->gbl_addr == pc_addr) {
52 #ifdef KMP_TASK_COMMON_DEBUG
53  KC_TRACE(10, ("__kmp_threadprivate_find_task_common: thread#%d, found "
54  "node %p on list\n",
55  gtid, pc_addr));
56 #endif
57  return tn;
58  }
59  }
60  return 0;
61 }
62 
63 static
64 #ifdef KMP_INLINE_SUBR
65  __forceinline
66 #endif
67  struct shared_common *
68  __kmp_find_shared_task_common(struct shared_table *tbl, int gtid,
69  void *pc_addr) {
70  struct shared_common *tn;
71 
72  for (tn = tbl->data[KMP_HASH(pc_addr)]; tn; tn = tn->next) {
73  if (tn->gbl_addr == pc_addr) {
74 #ifdef KMP_TASK_COMMON_DEBUG
75  KC_TRACE(
76  10,
77  ("__kmp_find_shared_task_common: thread#%d, found node %p on list\n",
78  gtid, pc_addr));
79 #endif
80  return tn;
81  }
82  }
83  return 0;
84 }
85 
86 // Create a template for the data initialized storage. Either the template is
87 // NULL indicating zero fill, or the template is a copy of the original data.
88 static struct private_data *__kmp_init_common_data(void *pc_addr,
89  size_t pc_size) {
90  struct private_data *d;
91  size_t i;
92  char *p;
93 
94  d = (struct private_data *)__kmp_allocate(sizeof(struct private_data));
95  /*
96  d->data = 0; // AC: commented out because __kmp_allocate zeroes the
97  memory
98  d->next = 0;
99  */
100  d->size = pc_size;
101  d->more = 1;
102 
103  p = (char *)pc_addr;
104 
105  for (i = pc_size; i > 0; --i) {
106  if (*p++ != '\0') {
107  d->data = __kmp_allocate(pc_size);
108  KMP_MEMCPY(d->data, pc_addr, pc_size);
109  break;
110  }
111  }
112 
113  return d;
114 }
115 
116 // Initialize the data area from the template.
117 static void __kmp_copy_common_data(void *pc_addr, struct private_data *d) {
118  char *addr = (char *)pc_addr;
119  int i, offset;
120 
121  for (offset = 0; d != 0; d = d->next) {
122  for (i = d->more; i > 0; --i) {
123  if (d->data == 0)
124  memset(&addr[offset], '\0', d->size);
125  else
126  KMP_MEMCPY(&addr[offset], d->data, d->size);
127  offset += d->size;
128  }
129  }
130 }
131 
132 /* we are called from __kmp_serial_initialize() with __kmp_initz_lock held. */
133 void __kmp_common_initialize(void) {
134  if (!TCR_4(__kmp_init_common)) {
135  int q;
136 #ifdef KMP_DEBUG
137  int gtid;
138 #endif
139 
140  __kmp_threadpriv_cache_list = NULL;
141 
142 #ifdef KMP_DEBUG
143  /* verify the uber masters were initialized */
144  for (gtid = 0; gtid < __kmp_threads_capacity; gtid++)
145  if (__kmp_root[gtid]) {
146  KMP_DEBUG_ASSERT(__kmp_root[gtid]->r.r_uber_thread);
147  for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q)
148  KMP_DEBUG_ASSERT(
149  !__kmp_root[gtid]->r.r_uber_thread->th.th_pri_common->data[q]);
150  /* __kmp_root[ gitd ]-> r.r_uber_thread ->
151  * th.th_pri_common -> data[ q ] = 0;*/
152  }
153 #endif /* KMP_DEBUG */
154 
155  for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q)
156  __kmp_threadprivate_d_table.data[q] = 0;
157 
158  TCW_4(__kmp_init_common, TRUE);
159  }
160 }
161 
162 /* Call all destructors for threadprivate data belonging to all threads.
163  Currently unused! */
164 void __kmp_common_destroy(void) {
165  if (TCR_4(__kmp_init_common)) {
166  int q;
167 
168  TCW_4(__kmp_init_common, FALSE);
169 
170  for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q) {
171  int gtid;
172  struct private_common *tn;
173  struct shared_common *d_tn;
174 
175  /* C++ destructors need to be called once per thread before exiting.
176  Don't call destructors for master thread though unless we used copy
177  constructor */
178 
179  for (d_tn = __kmp_threadprivate_d_table.data[q]; d_tn;
180  d_tn = d_tn->next) {
181  if (d_tn->is_vec) {
182  if (d_tn->dt.dtorv != 0) {
183  for (gtid = 0; gtid < __kmp_all_nth; ++gtid) {
184  if (__kmp_threads[gtid]) {
185  if ((__kmp_foreign_tp) ? (!KMP_INITIAL_GTID(gtid))
186  : (!KMP_UBER_GTID(gtid))) {
187  tn = __kmp_threadprivate_find_task_common(
188  __kmp_threads[gtid]->th.th_pri_common, gtid,
189  d_tn->gbl_addr);
190  if (tn) {
191  (*d_tn->dt.dtorv)(tn->par_addr, d_tn->vec_len);
192  }
193  }
194  }
195  }
196  if (d_tn->obj_init != 0) {
197  (*d_tn->dt.dtorv)(d_tn->obj_init, d_tn->vec_len);
198  }
199  }
200  } else {
201  if (d_tn->dt.dtor != 0) {
202  for (gtid = 0; gtid < __kmp_all_nth; ++gtid) {
203  if (__kmp_threads[gtid]) {
204  if ((__kmp_foreign_tp) ? (!KMP_INITIAL_GTID(gtid))
205  : (!KMP_UBER_GTID(gtid))) {
206  tn = __kmp_threadprivate_find_task_common(
207  __kmp_threads[gtid]->th.th_pri_common, gtid,
208  d_tn->gbl_addr);
209  if (tn) {
210  (*d_tn->dt.dtor)(tn->par_addr);
211  }
212  }
213  }
214  }
215  if (d_tn->obj_init != 0) {
216  (*d_tn->dt.dtor)(d_tn->obj_init);
217  }
218  }
219  }
220  }
221  __kmp_threadprivate_d_table.data[q] = 0;
222  }
223  }
224 }
225 
226 /* Call all destructors for threadprivate data belonging to this thread */
227 void __kmp_common_destroy_gtid(int gtid) {
228  struct private_common *tn;
229  struct shared_common *d_tn;
230 
231  KC_TRACE(10, ("__kmp_common_destroy_gtid: T#%d called\n", gtid));
232  if ((__kmp_foreign_tp) ? (!KMP_INITIAL_GTID(gtid)) : (!KMP_UBER_GTID(gtid))) {
233 
234  if (TCR_4(__kmp_init_common)) {
235 
236  /* Cannot do this here since not all threads have destroyed their data */
237  /* TCW_4(__kmp_init_common, FALSE); */
238 
239  for (tn = __kmp_threads[gtid]->th.th_pri_head; tn; tn = tn->link) {
240 
241  d_tn = __kmp_find_shared_task_common(&__kmp_threadprivate_d_table, gtid,
242  tn->gbl_addr);
243 
244  KMP_DEBUG_ASSERT(d_tn);
245 
246  if (d_tn->is_vec) {
247  if (d_tn->dt.dtorv != 0) {
248  (void)(*d_tn->dt.dtorv)(tn->par_addr, d_tn->vec_len);
249  }
250  if (d_tn->obj_init != 0) {
251  (void)(*d_tn->dt.dtorv)(d_tn->obj_init, d_tn->vec_len);
252  }
253  } else {
254  if (d_tn->dt.dtor != 0) {
255  (void)(*d_tn->dt.dtor)(tn->par_addr);
256  }
257  if (d_tn->obj_init != 0) {
258  (void)(*d_tn->dt.dtor)(d_tn->obj_init);
259  }
260  }
261  }
262  KC_TRACE(30, ("__kmp_common_destroy_gtid: T#%d threadprivate destructors "
263  "complete\n",
264  gtid));
265  }
266  }
267 }
268 
269 #ifdef KMP_TASK_COMMON_DEBUG
270 static void dump_list(void) {
271  int p, q;
272 
273  for (p = 0; p < __kmp_all_nth; ++p) {
274  if (!__kmp_threads[p])
275  continue;
276  for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q) {
277  if (__kmp_threads[p]->th.th_pri_common->data[q]) {
278  struct private_common *tn;
279 
280  KC_TRACE(10, ("\tdump_list: gtid:%d addresses\n", p));
281 
282  for (tn = __kmp_threads[p]->th.th_pri_common->data[q]; tn;
283  tn = tn->next) {
284  KC_TRACE(10,
285  ("\tdump_list: THREADPRIVATE: Serial %p -> Parallel %p\n",
286  tn->gbl_addr, tn->par_addr));
287  }
288  }
289  }
290  }
291 }
292 #endif /* KMP_TASK_COMMON_DEBUG */
293 
294 // NOTE: this routine is to be called only from the serial part of the program.
295 void kmp_threadprivate_insert_private_data(int gtid, void *pc_addr,
296  void *data_addr, size_t pc_size) {
297  struct shared_common **lnk_tn, *d_tn;
298  KMP_DEBUG_ASSERT(__kmp_threads[gtid] &&
299  __kmp_threads[gtid]->th.th_root->r.r_active == 0);
300 
301  d_tn = __kmp_find_shared_task_common(&__kmp_threadprivate_d_table, gtid,
302  pc_addr);
303 
304  if (d_tn == 0) {
305  d_tn = (struct shared_common *)__kmp_allocate(sizeof(struct shared_common));
306 
307  d_tn->gbl_addr = pc_addr;
308  d_tn->pod_init = __kmp_init_common_data(data_addr, pc_size);
309  /*
310  d_tn->obj_init = 0; // AC: commented out because __kmp_allocate
311  zeroes the memory
312  d_tn->ct.ctor = 0;
313  d_tn->cct.cctor = 0;;
314  d_tn->dt.dtor = 0;
315  d_tn->is_vec = FALSE;
316  d_tn->vec_len = 0L;
317  */
318  d_tn->cmn_size = pc_size;
319 
320  __kmp_acquire_lock(&__kmp_global_lock, gtid);
321 
322  lnk_tn = &(__kmp_threadprivate_d_table.data[KMP_HASH(pc_addr)]);
323 
324  d_tn->next = *lnk_tn;
325  *lnk_tn = d_tn;
326 
327  __kmp_release_lock(&__kmp_global_lock, gtid);
328  }
329 }
330 
331 struct private_common *kmp_threadprivate_insert(int gtid, void *pc_addr,
332  void *data_addr,
333  size_t pc_size) {
334  struct private_common *tn, **tt;
335  struct shared_common *d_tn;
336 
337  /* +++++++++ START OF CRITICAL SECTION +++++++++ */
338  __kmp_acquire_lock(&__kmp_global_lock, gtid);
339 
340  tn = (struct private_common *)__kmp_allocate(sizeof(struct private_common));
341 
342  tn->gbl_addr = pc_addr;
343 
344  d_tn = __kmp_find_shared_task_common(
345  &__kmp_threadprivate_d_table, gtid,
346  pc_addr); /* Only the MASTER data table exists. */
347 
348  if (d_tn != 0) {
349  /* This threadprivate variable has already been seen. */
350 
351  if (d_tn->pod_init == 0 && d_tn->obj_init == 0) {
352  d_tn->cmn_size = pc_size;
353 
354  if (d_tn->is_vec) {
355  if (d_tn->ct.ctorv != 0) {
356  /* Construct from scratch so no prototype exists */
357  d_tn->obj_init = 0;
358  } else if (d_tn->cct.cctorv != 0) {
359  /* Now data initialize the prototype since it was previously
360  * registered */
361  d_tn->obj_init = (void *)__kmp_allocate(d_tn->cmn_size);
362  (void)(*d_tn->cct.cctorv)(d_tn->obj_init, pc_addr, d_tn->vec_len);
363  } else {
364  d_tn->pod_init = __kmp_init_common_data(data_addr, d_tn->cmn_size);
365  }
366  } else {
367  if (d_tn->ct.ctor != 0) {
368  /* Construct from scratch so no prototype exists */
369  d_tn->obj_init = 0;
370  } else if (d_tn->cct.cctor != 0) {
371  /* Now data initialize the prototype since it was previously
372  registered */
373  d_tn->obj_init = (void *)__kmp_allocate(d_tn->cmn_size);
374  (void)(*d_tn->cct.cctor)(d_tn->obj_init, pc_addr);
375  } else {
376  d_tn->pod_init = __kmp_init_common_data(data_addr, d_tn->cmn_size);
377  }
378  }
379  }
380  } else {
381  struct shared_common **lnk_tn;
382 
383  d_tn = (struct shared_common *)__kmp_allocate(sizeof(struct shared_common));
384  d_tn->gbl_addr = pc_addr;
385  d_tn->cmn_size = pc_size;
386  d_tn->pod_init = __kmp_init_common_data(data_addr, pc_size);
387  /*
388  d_tn->obj_init = 0; // AC: commented out because __kmp_allocate
389  zeroes the memory
390  d_tn->ct.ctor = 0;
391  d_tn->cct.cctor = 0;
392  d_tn->dt.dtor = 0;
393  d_tn->is_vec = FALSE;
394  d_tn->vec_len = 0L;
395  */
396  lnk_tn = &(__kmp_threadprivate_d_table.data[KMP_HASH(pc_addr)]);
397 
398  d_tn->next = *lnk_tn;
399  *lnk_tn = d_tn;
400  }
401 
402  tn->cmn_size = d_tn->cmn_size;
403 
404  if ((__kmp_foreign_tp) ? (KMP_INITIAL_GTID(gtid)) : (KMP_UBER_GTID(gtid))) {
405  tn->par_addr = (void *)pc_addr;
406  } else {
407  tn->par_addr = (void *)__kmp_allocate(tn->cmn_size);
408  }
409 
410  __kmp_release_lock(&__kmp_global_lock, gtid);
411 /* +++++++++ END OF CRITICAL SECTION +++++++++ */
412 
413 #ifdef USE_CHECKS_COMMON
414  if (pc_size > d_tn->cmn_size) {
415  KC_TRACE(
416  10, ("__kmp_threadprivate_insert: THREADPRIVATE: %p (%" KMP_UINTPTR_SPEC
417  " ,%" KMP_UINTPTR_SPEC ")\n",
418  pc_addr, pc_size, d_tn->cmn_size));
419  KMP_FATAL(TPCommonBlocksInconsist);
420  }
421 #endif /* USE_CHECKS_COMMON */
422 
423  tt = &(__kmp_threads[gtid]->th.th_pri_common->data[KMP_HASH(pc_addr)]);
424 
425 #ifdef KMP_TASK_COMMON_DEBUG
426  if (*tt != 0) {
427  KC_TRACE(
428  10,
429  ("__kmp_threadprivate_insert: WARNING! thread#%d: collision on %p\n",
430  gtid, pc_addr));
431  }
432 #endif
433  tn->next = *tt;
434  *tt = tn;
435 
436 #ifdef KMP_TASK_COMMON_DEBUG
437  KC_TRACE(10,
438  ("__kmp_threadprivate_insert: thread#%d, inserted node %p on list\n",
439  gtid, pc_addr));
440  dump_list();
441 #endif
442 
443  /* Link the node into a simple list */
444 
445  tn->link = __kmp_threads[gtid]->th.th_pri_head;
446  __kmp_threads[gtid]->th.th_pri_head = tn;
447 
448 #ifdef BUILD_TV
449  __kmp_tv_threadprivate_store(__kmp_threads[gtid], tn->gbl_addr, tn->par_addr);
450 #endif
451 
452  if ((__kmp_foreign_tp) ? (KMP_INITIAL_GTID(gtid)) : (KMP_UBER_GTID(gtid)))
453  return tn;
454 
455  /* if C++ object with copy constructor, use it;
456  * else if C++ object with constructor, use it for the non-master copies only;
457  * else use pod_init and memcpy
458  *
459  * C++ constructors need to be called once for each non-master thread on
460  * allocate
461  * C++ copy constructors need to be called once for each thread on allocate */
462 
463  /* C++ object with constructors/destructors; don't call constructors for
464  master thread though */
465  if (d_tn->is_vec) {
466  if (d_tn->ct.ctorv != 0) {
467  (void)(*d_tn->ct.ctorv)(tn->par_addr, d_tn->vec_len);
468  } else if (d_tn->cct.cctorv != 0) {
469  (void)(*d_tn->cct.cctorv)(tn->par_addr, d_tn->obj_init, d_tn->vec_len);
470  } else if (tn->par_addr != tn->gbl_addr) {
471  __kmp_copy_common_data(tn->par_addr, d_tn->pod_init);
472  }
473  } else {
474  if (d_tn->ct.ctor != 0) {
475  (void)(*d_tn->ct.ctor)(tn->par_addr);
476  } else if (d_tn->cct.cctor != 0) {
477  (void)(*d_tn->cct.cctor)(tn->par_addr, d_tn->obj_init);
478  } else if (tn->par_addr != tn->gbl_addr) {
479  __kmp_copy_common_data(tn->par_addr, d_tn->pod_init);
480  }
481  }
482  /* !BUILD_OPENMP_C
483  if (tn->par_addr != tn->gbl_addr)
484  __kmp_copy_common_data( tn->par_addr, d_tn->pod_init ); */
485 
486  return tn;
487 }
488 
489 /* ------------------------------------------------------------------------ */
490 /* We are currently parallel, and we know the thread id. */
491 /* ------------------------------------------------------------------------ */
492 
505 void __kmpc_threadprivate_register(ident_t *loc, void *data, kmpc_ctor ctor,
506  kmpc_cctor cctor, kmpc_dtor dtor) {
507  struct shared_common *d_tn, **lnk_tn;
508 
509  KC_TRACE(10, ("__kmpc_threadprivate_register: called\n"));
510 
511 #ifdef USE_CHECKS_COMMON
512  /* copy constructor must be zero for current code gen (Nov 2002 - jph) */
513  KMP_ASSERT(cctor == 0);
514 #endif /* USE_CHECKS_COMMON */
515 
516  /* Only the global data table exists. */
517  d_tn = __kmp_find_shared_task_common(&__kmp_threadprivate_d_table, -1, data);
518 
519  if (d_tn == 0) {
520  d_tn = (struct shared_common *)__kmp_allocate(sizeof(struct shared_common));
521  d_tn->gbl_addr = data;
522 
523  d_tn->ct.ctor = ctor;
524  d_tn->cct.cctor = cctor;
525  d_tn->dt.dtor = dtor;
526  /*
527  d_tn->is_vec = FALSE; // AC: commented out because __kmp_allocate
528  zeroes the memory
529  d_tn->vec_len = 0L;
530  d_tn->obj_init = 0;
531  d_tn->pod_init = 0;
532  */
533  lnk_tn = &(__kmp_threadprivate_d_table.data[KMP_HASH(data)]);
534 
535  d_tn->next = *lnk_tn;
536  *lnk_tn = d_tn;
537  }
538 }
539 
540 void *__kmpc_threadprivate(ident_t *loc, kmp_int32 global_tid, void *data,
541  size_t size) {
542  void *ret;
543  struct private_common *tn;
544 
545  KC_TRACE(10, ("__kmpc_threadprivate: T#%d called\n", global_tid));
546 
547 #ifdef USE_CHECKS_COMMON
548  if (!__kmp_init_serial)
549  KMP_FATAL(RTLNotInitialized);
550 #endif /* USE_CHECKS_COMMON */
551 
552  if (!__kmp_threads[global_tid]->th.th_root->r.r_active && !__kmp_foreign_tp) {
553  /* The parallel address will NEVER overlap with the data_address */
554  /* dkp: 3rd arg to kmp_threadprivate_insert_private_data() is the
555  * data_address; use data_address = data */
556 
557  KC_TRACE(20, ("__kmpc_threadprivate: T#%d inserting private data\n",
558  global_tid));
559  kmp_threadprivate_insert_private_data(global_tid, data, data, size);
560 
561  ret = data;
562  } else {
563  KC_TRACE(
564  50,
565  ("__kmpc_threadprivate: T#%d try to find private data at address %p\n",
566  global_tid, data));
567  tn = __kmp_threadprivate_find_task_common(
568  __kmp_threads[global_tid]->th.th_pri_common, global_tid, data);
569 
570  if (tn) {
571  KC_TRACE(20, ("__kmpc_threadprivate: T#%d found data\n", global_tid));
572 #ifdef USE_CHECKS_COMMON
573  if ((size_t)size > tn->cmn_size) {
574  KC_TRACE(10, ("THREADPRIVATE: %p (%" KMP_UINTPTR_SPEC
575  " ,%" KMP_UINTPTR_SPEC ")\n",
576  data, size, tn->cmn_size));
577  KMP_FATAL(TPCommonBlocksInconsist);
578  }
579 #endif /* USE_CHECKS_COMMON */
580  } else {
581  /* The parallel address will NEVER overlap with the data_address */
582  /* dkp: 3rd arg to kmp_threadprivate_insert() is the data_address; use
583  * data_address = data */
584  KC_TRACE(20, ("__kmpc_threadprivate: T#%d inserting data\n", global_tid));
585  tn = kmp_threadprivate_insert(global_tid, data, data, size);
586  }
587 
588  ret = tn->par_addr;
589  }
590  KC_TRACE(10, ("__kmpc_threadprivate: T#%d exiting; return value = %p\n",
591  global_tid, ret));
592 
593  return ret;
594 }
595 
607 void *
609  kmp_int32 global_tid, // gtid.
610  void *data, // Pointer to original global variable.
611  size_t size, // Size of original global variable.
612  void ***cache) {
613  KC_TRACE(10, ("__kmpc_threadprivate_cached: T#%d called with cache: %p, "
614  "address: %p, size: %" KMP_SIZE_T_SPEC "\n",
615  global_tid, *cache, data, size));
616 
617  if (TCR_PTR(*cache) == 0) {
618  __kmp_acquire_lock(&__kmp_global_lock, global_tid);
619 
620  if (TCR_PTR(*cache) == 0) {
621  __kmp_acquire_bootstrap_lock(&__kmp_tp_cached_lock);
622  __kmp_tp_cached = 1;
623  __kmp_release_bootstrap_lock(&__kmp_tp_cached_lock);
624  void **my_cache;
625  KMP_ITT_IGNORE(
626  my_cache = (void **)__kmp_allocate(
627  sizeof(void *) * __kmp_tp_capacity + sizeof(kmp_cached_addr_t)););
628  // No need to zero the allocated memory; __kmp_allocate does that.
629  KC_TRACE(
630  50,
631  ("__kmpc_threadprivate_cached: T#%d allocated cache at address %p\n",
632  global_tid, my_cache));
633 
634  /* TODO: free all this memory in __kmp_common_destroy using
635  * __kmp_threadpriv_cache_list */
636  /* Add address of mycache to linked list for cleanup later */
637  kmp_cached_addr_t *tp_cache_addr;
638 
639  tp_cache_addr = (kmp_cached_addr_t *)&my_cache[__kmp_tp_capacity];
640  tp_cache_addr->addr = my_cache;
641  tp_cache_addr->next = __kmp_threadpriv_cache_list;
642  __kmp_threadpriv_cache_list = tp_cache_addr;
643 
644  KMP_MB();
645 
646  TCW_PTR(*cache, my_cache);
647 
648  KMP_MB();
649  }
650 
651  __kmp_release_lock(&__kmp_global_lock, global_tid);
652  }
653 
654  void *ret;
655  if ((ret = TCR_PTR((*cache)[global_tid])) == 0) {
656  ret = __kmpc_threadprivate(loc, global_tid, data, (size_t)size);
657 
658  TCW_PTR((*cache)[global_tid], ret);
659  }
660  KC_TRACE(10,
661  ("__kmpc_threadprivate_cached: T#%d exiting; return value = %p\n",
662  global_tid, ret));
663 
664  return ret;
665 }
666 
678  kmpc_ctor_vec ctor, kmpc_cctor_vec cctor,
679  kmpc_dtor_vec dtor,
680  size_t vector_length) {
681  struct shared_common *d_tn, **lnk_tn;
682 
683  KC_TRACE(10, ("__kmpc_threadprivate_register_vec: called\n"));
684 
685 #ifdef USE_CHECKS_COMMON
686  /* copy constructor must be zero for current code gen (Nov 2002 - jph) */
687  KMP_ASSERT(cctor == 0);
688 #endif /* USE_CHECKS_COMMON */
689 
690  d_tn = __kmp_find_shared_task_common(
691  &__kmp_threadprivate_d_table, -1,
692  data); /* Only the global data table exists. */
693 
694  if (d_tn == 0) {
695  d_tn = (struct shared_common *)__kmp_allocate(sizeof(struct shared_common));
696  d_tn->gbl_addr = data;
697 
698  d_tn->ct.ctorv = ctor;
699  d_tn->cct.cctorv = cctor;
700  d_tn->dt.dtorv = dtor;
701  d_tn->is_vec = TRUE;
702  d_tn->vec_len = (size_t)vector_length;
703  /*
704  d_tn->obj_init = 0; // AC: commented out because __kmp_allocate
705  zeroes the memory
706  d_tn->pod_init = 0;
707  */
708  lnk_tn = &(__kmp_threadprivate_d_table.data[KMP_HASH(data)]);
709 
710  d_tn->next = *lnk_tn;
711  *lnk_tn = d_tn;
712  }
713 }
void(* kmpc_dtor)(void *)
Definition: kmp.h:1417
void(* kmpc_dtor_vec)(void *, size_t)
Definition: kmp.h:1440
void *(* kmpc_ctor_vec)(void *, size_t)
Definition: kmp.h:1434
void * __kmpc_threadprivate_cached(ident_t *loc, kmp_int32 global_tid, void *data, size_t size, void ***cache)
void *(* kmpc_cctor_vec)(void *, void *, size_t)
Definition: kmp.h:1446
void *(* kmpc_cctor)(void *, void *)
Definition: kmp.h:1424
void __kmpc_threadprivate_register(ident_t *loc, void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor)
Definition: kmp.h:208
void *(* kmpc_ctor)(void *)
Definition: kmp.h:1411
void __kmpc_threadprivate_register_vec(ident_t *loc, void *data, kmpc_ctor_vec ctor, kmpc_cctor_vec cctor, kmpc_dtor_vec dtor, size_t vector_length)