1 /*
2 * kmp_alloc.cpp -- private/shared dynamic memory allocation and management
3 */
4
5 //===----------------------------------------------------------------------===//
6 //
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "kmp.h"
14 #include "kmp_io.h"
15 #include "kmp_wrapper_malloc.h"
16
17 // Disable bget when it is not used
18 #if KMP_USE_BGET
19
20 /* Thread private buffer management code */
21
22 typedef int (*bget_compact_t)(size_t, int);
23 typedef void *(*bget_acquire_t)(size_t);
24 typedef void (*bget_release_t)(void *);
25
26 /* NOTE: bufsize must be a signed datatype */
27
28 #if KMP_OS_WINDOWS
29 #if KMP_ARCH_X86 || KMP_ARCH_ARM
30 typedef kmp_int32 bufsize;
31 #else
32 typedef kmp_int64 bufsize;
33 #endif
34 #else
35 typedef ssize_t bufsize;
36 #endif // KMP_OS_WINDOWS
37
38 /* The three modes of operation are, fifo search, lifo search, and best-fit */
39
40 typedef enum bget_mode {
41 bget_mode_fifo = 0,
42 bget_mode_lifo = 1,
43 bget_mode_best = 2
44 } bget_mode_t;
45
46 static void bpool(kmp_info_t *th, void *buffer, bufsize len);
47 static void *bget(kmp_info_t *th, bufsize size);
48 static void *bgetz(kmp_info_t *th, bufsize size);
49 static void *bgetr(kmp_info_t *th, void *buffer, bufsize newsize);
50 static void brel(kmp_info_t *th, void *buf);
51 static void bectl(kmp_info_t *th, bget_compact_t compact,
52 bget_acquire_t acquire, bget_release_t release,
53 bufsize pool_incr);
54
55 /* BGET CONFIGURATION */
56 /* Buffer allocation size quantum: all buffers allocated are a
57 multiple of this size. This MUST be a power of two. */
58
59 /* On IA-32 architecture with Linux* OS, malloc() does not
60 ensure 16 byte alignment */
61
62 #if KMP_ARCH_X86 || !KMP_HAVE_QUAD
63
64 #define SizeQuant 8
65 #define AlignType double
66
67 #else
68
69 #define SizeQuant 16
70 #define AlignType _Quad
71
72 #endif
73
74 // Define this symbol to enable the bstats() function which calculates the
75 // total free space in the buffer pool, the largest available buffer, and the
76 // total space currently allocated.
77 #define BufStats 1
78
79 #ifdef KMP_DEBUG
80
81 // Define this symbol to enable the bpoold() function which dumps the buffers
82 // in a buffer pool.
83 #define BufDump 1
84
85 // Define this symbol to enable the bpoolv() function for validating a buffer
86 // pool.
87 #define BufValid 1
88
89 // Define this symbol to enable the bufdump() function which allows dumping the
90 // contents of an allocated or free buffer.
91 #define DumpData 1
92
93 #ifdef NOT_USED_NOW
94
95 // Wipe free buffers to a guaranteed pattern of garbage to trip up miscreants
96 // who attempt to use pointers into released buffers.
97 #define FreeWipe 1
98
99 // Use a best fit algorithm when searching for space for an allocation request.
100 // This uses memory more efficiently, but allocation will be much slower.
101 #define BestFit 1
102
103 #endif /* NOT_USED_NOW */
104 #endif /* KMP_DEBUG */
105
106 static bufsize bget_bin_size[] = {
107 0,
108 // 1 << 6, /* .5 Cache line */
109 1 << 7, /* 1 Cache line, new */
110 1 << 8, /* 2 Cache lines */
111 1 << 9, /* 4 Cache lines, new */
112 1 << 10, /* 8 Cache lines */
113 1 << 11, /* 16 Cache lines, new */
114 1 << 12, 1 << 13, /* new */
115 1 << 14, 1 << 15, /* new */
116 1 << 16, 1 << 17, 1 << 18, 1 << 19, 1 << 20, /* 1MB */
117 1 << 21, /* 2MB */
118 1 << 22, /* 4MB */
119 1 << 23, /* 8MB */
120 1 << 24, /* 16MB */
121 1 << 25, /* 32MB */
122 };
123
124 #define MAX_BGET_BINS (int)(sizeof(bget_bin_size) / sizeof(bufsize))
125
126 struct bfhead;
127
128 // Declare the interface, including the requested buffer size type, bufsize.
129
130 /* Queue links */
131 typedef struct qlinks {
132 struct bfhead *flink; /* Forward link */
133 struct bfhead *blink; /* Backward link */
134 } qlinks_t;
135
136 /* Header in allocated and free buffers */
137 typedef struct bhead2 {
138 kmp_info_t *bthr; /* The thread which owns the buffer pool */
139 bufsize prevfree; /* Relative link back to previous free buffer in memory or
140 0 if previous buffer is allocated. */
141 bufsize bsize; /* Buffer size: positive if free, negative if allocated. */
142 } bhead2_t;
143
144 /* Make sure the bhead structure is a multiple of SizeQuant in size. */
145 typedef union bhead {
146 KMP_ALIGN(SizeQuant)
147 AlignType b_align;
148 char b_pad[sizeof(bhead2_t) + (SizeQuant - (sizeof(bhead2_t) % SizeQuant))];
149 bhead2_t bb;
150 } bhead_t;
151 #define BH(p) ((bhead_t *)(p))
152
153 /* Header in directly allocated buffers (by acqfcn) */
154 typedef struct bdhead {
155 bufsize tsize; /* Total size, including overhead */
156 bhead_t bh; /* Common header */
157 } bdhead_t;
158 #define BDH(p) ((bdhead_t *)(p))
159
160 /* Header in free buffers */
161 typedef struct bfhead {
162 bhead_t bh; /* Common allocated/free header */
163 qlinks_t ql; /* Links on free list */
164 } bfhead_t;
165 #define BFH(p) ((bfhead_t *)(p))
166
167 typedef struct thr_data {
168 bfhead_t freelist[MAX_BGET_BINS];
169 #if BufStats
170 size_t totalloc; /* Total space currently allocated */
171 long numget, numrel; /* Number of bget() and brel() calls */
172 long numpblk; /* Number of pool blocks */
173 long numpget, numprel; /* Number of block gets and rels */
174 long numdget, numdrel; /* Number of direct gets and rels */
175 #endif /* BufStats */
176
177 /* Automatic expansion block management functions */
178 bget_compact_t compfcn;
179 bget_acquire_t acqfcn;
180 bget_release_t relfcn;
181
182 bget_mode_t mode; /* what allocation mode to use? */
183
184 bufsize exp_incr; /* Expansion block size */
185 bufsize pool_len; /* 0: no bpool calls have been made
186 -1: not all pool blocks are the same size
187 >0: (common) block size for all bpool calls made so far
188 */
189 bfhead_t *last_pool; /* Last pool owned by this thread (delay deallocation) */
190 } thr_data_t;
191
192 /* Minimum allocation quantum: */
193 #define QLSize (sizeof(qlinks_t))
194 #define SizeQ ((SizeQuant > QLSize) ? SizeQuant : QLSize)
195 #define MaxSize \
196 (bufsize)( \
197 ~(((bufsize)(1) << (sizeof(bufsize) * CHAR_BIT - 1)) | (SizeQuant - 1)))
198 // Maximum for the requested size.
199
200 /* End sentinel: value placed in bsize field of dummy block delimiting
201 end of pool block. The most negative number which will fit in a
202 bufsize, defined in a way that the compiler will accept. */
203
204 #define ESent \
205 ((bufsize)(-(((((bufsize)1) << ((int)sizeof(bufsize) * 8 - 2)) - 1) * 2) - 2))
206
207 /* Thread Data management routines */
bget_get_bin(bufsize size)208 static int bget_get_bin(bufsize size) {
209 // binary chop bins
210 int lo = 0, hi = MAX_BGET_BINS - 1;
211
212 KMP_DEBUG_ASSERT(size > 0);
213
214 while ((hi - lo) > 1) {
215 int mid = (lo + hi) >> 1;
216 if (size < bget_bin_size[mid])
217 hi = mid - 1;
218 else
219 lo = mid;
220 }
221
222 KMP_DEBUG_ASSERT((lo >= 0) && (lo < MAX_BGET_BINS));
223
224 return lo;
225 }
226
set_thr_data(kmp_info_t * th)227 static void set_thr_data(kmp_info_t *th) {
228 int i;
229 thr_data_t *data;
230
231 data = (thr_data_t *)((!th->th.th_local.bget_data)
232 ? __kmp_allocate(sizeof(*data))
233 : th->th.th_local.bget_data);
234
235 memset(data, '\0', sizeof(*data));
236
237 for (i = 0; i < MAX_BGET_BINS; ++i) {
238 data->freelist[i].ql.flink = &data->freelist[i];
239 data->freelist[i].ql.blink = &data->freelist[i];
240 }
241
242 th->th.th_local.bget_data = data;
243 th->th.th_local.bget_list = 0;
244 #if !USE_CMP_XCHG_FOR_BGET
245 #ifdef USE_QUEUING_LOCK_FOR_BGET
246 __kmp_init_lock(&th->th.th_local.bget_lock);
247 #else
248 __kmp_init_bootstrap_lock(&th->th.th_local.bget_lock);
249 #endif /* USE_LOCK_FOR_BGET */
250 #endif /* ! USE_CMP_XCHG_FOR_BGET */
251 }
252
get_thr_data(kmp_info_t * th)253 static thr_data_t *get_thr_data(kmp_info_t *th) {
254 thr_data_t *data;
255
256 data = (thr_data_t *)th->th.th_local.bget_data;
257
258 KMP_DEBUG_ASSERT(data != 0);
259
260 return data;
261 }
262
263 /* Walk the free list and release the enqueued buffers */
__kmp_bget_dequeue(kmp_info_t * th)264 static void __kmp_bget_dequeue(kmp_info_t *th) {
265 void *p = TCR_SYNC_PTR(th->th.th_local.bget_list);
266
267 if (p != 0) {
268 #if USE_CMP_XCHG_FOR_BGET
269 {
270 volatile void *old_value = TCR_SYNC_PTR(th->th.th_local.bget_list);
271 while (!KMP_COMPARE_AND_STORE_PTR(&th->th.th_local.bget_list,
272 CCAST(void *, old_value), nullptr)) {
273 KMP_CPU_PAUSE();
274 old_value = TCR_SYNC_PTR(th->th.th_local.bget_list);
275 }
276 p = CCAST(void *, old_value);
277 }
278 #else /* ! USE_CMP_XCHG_FOR_BGET */
279 #ifdef USE_QUEUING_LOCK_FOR_BGET
280 __kmp_acquire_lock(&th->th.th_local.bget_lock, __kmp_gtid_from_thread(th));
281 #else
282 __kmp_acquire_bootstrap_lock(&th->th.th_local.bget_lock);
283 #endif /* USE_QUEUING_LOCK_FOR_BGET */
284
285 p = (void *)th->th.th_local.bget_list;
286 th->th.th_local.bget_list = 0;
287
288 #ifdef USE_QUEUING_LOCK_FOR_BGET
289 __kmp_release_lock(&th->th.th_local.bget_lock, __kmp_gtid_from_thread(th));
290 #else
291 __kmp_release_bootstrap_lock(&th->th.th_local.bget_lock);
292 #endif
293 #endif /* USE_CMP_XCHG_FOR_BGET */
294
295 /* Check again to make sure the list is not empty */
296 while (p != 0) {
297 void *buf = p;
298 bfhead_t *b = BFH(((char *)p) - sizeof(bhead_t));
299
300 KMP_DEBUG_ASSERT(b->bh.bb.bsize != 0);
301 KMP_DEBUG_ASSERT(((kmp_uintptr_t)TCR_PTR(b->bh.bb.bthr) & ~1) ==
302 (kmp_uintptr_t)th); // clear possible mark
303 KMP_DEBUG_ASSERT(b->ql.blink == 0);
304
305 p = (void *)b->ql.flink;
306
307 brel(th, buf);
308 }
309 }
310 }
311
312 /* Chain together the free buffers by using the thread owner field */
__kmp_bget_enqueue(kmp_info_t * th,void * buf,kmp_int32 rel_gtid)313 static void __kmp_bget_enqueue(kmp_info_t *th, void *buf
314 #ifdef USE_QUEUING_LOCK_FOR_BGET
315 ,
316 kmp_int32 rel_gtid
317 #endif
318 ) {
319 bfhead_t *b = BFH(((char *)buf) - sizeof(bhead_t));
320
321 KMP_DEBUG_ASSERT(b->bh.bb.bsize != 0);
322 KMP_DEBUG_ASSERT(((kmp_uintptr_t)TCR_PTR(b->bh.bb.bthr) & ~1) ==
323 (kmp_uintptr_t)th); // clear possible mark
324
325 b->ql.blink = 0;
326
327 KC_TRACE(10, ("__kmp_bget_enqueue: moving buffer to T#%d list\n",
328 __kmp_gtid_from_thread(th)));
329
330 #if USE_CMP_XCHG_FOR_BGET
331 {
332 volatile void *old_value = TCR_PTR(th->th.th_local.bget_list);
333 /* the next pointer must be set before setting bget_list to buf to avoid
334 exposing a broken list to other threads, even for an instant. */
335 b->ql.flink = BFH(CCAST(void *, old_value));
336
337 while (!KMP_COMPARE_AND_STORE_PTR(&th->th.th_local.bget_list,
338 CCAST(void *, old_value), buf)) {
339 KMP_CPU_PAUSE();
340 old_value = TCR_PTR(th->th.th_local.bget_list);
341 /* the next pointer must be set before setting bget_list to buf to avoid
342 exposing a broken list to other threads, even for an instant. */
343 b->ql.flink = BFH(CCAST(void *, old_value));
344 }
345 }
346 #else /* ! USE_CMP_XCHG_FOR_BGET */
347 #ifdef USE_QUEUING_LOCK_FOR_BGET
348 __kmp_acquire_lock(&th->th.th_local.bget_lock, rel_gtid);
349 #else
350 __kmp_acquire_bootstrap_lock(&th->th.th_local.bget_lock);
351 #endif
352
353 b->ql.flink = BFH(th->th.th_local.bget_list);
354 th->th.th_local.bget_list = (void *)buf;
355
356 #ifdef USE_QUEUING_LOCK_FOR_BGET
357 __kmp_release_lock(&th->th.th_local.bget_lock, rel_gtid);
358 #else
359 __kmp_release_bootstrap_lock(&th->th.th_local.bget_lock);
360 #endif
361 #endif /* USE_CMP_XCHG_FOR_BGET */
362 }
363
364 /* insert buffer back onto a new freelist */
__kmp_bget_insert_into_freelist(thr_data_t * thr,bfhead_t * b)365 static void __kmp_bget_insert_into_freelist(thr_data_t *thr, bfhead_t *b) {
366 int bin;
367
368 KMP_DEBUG_ASSERT(((size_t)b) % SizeQuant == 0);
369 KMP_DEBUG_ASSERT(b->bh.bb.bsize % SizeQuant == 0);
370
371 bin = bget_get_bin(b->bh.bb.bsize);
372
373 KMP_DEBUG_ASSERT(thr->freelist[bin].ql.blink->ql.flink ==
374 &thr->freelist[bin]);
375 KMP_DEBUG_ASSERT(thr->freelist[bin].ql.flink->ql.blink ==
376 &thr->freelist[bin]);
377
378 b->ql.flink = &thr->freelist[bin];
379 b->ql.blink = thr->freelist[bin].ql.blink;
380
381 thr->freelist[bin].ql.blink = b;
382 b->ql.blink->ql.flink = b;
383 }
384
385 /* unlink the buffer from the old freelist */
__kmp_bget_remove_from_freelist(bfhead_t * b)386 static void __kmp_bget_remove_from_freelist(bfhead_t *b) {
387 KMP_DEBUG_ASSERT(b->ql.blink->ql.flink == b);
388 KMP_DEBUG_ASSERT(b->ql.flink->ql.blink == b);
389
390 b->ql.blink->ql.flink = b->ql.flink;
391 b->ql.flink->ql.blink = b->ql.blink;
392 }
393
394 /* GET STATS -- check info on free list */
bcheck(kmp_info_t * th,bufsize * max_free,bufsize * total_free)395 static void bcheck(kmp_info_t *th, bufsize *max_free, bufsize *total_free) {
396 thr_data_t *thr = get_thr_data(th);
397 int bin;
398
399 *total_free = *max_free = 0;
400
401 for (bin = 0; bin < MAX_BGET_BINS; ++bin) {
402 bfhead_t *b, *best;
403
404 best = &thr->freelist[bin];
405 b = best->ql.flink;
406
407 while (b != &thr->freelist[bin]) {
408 *total_free += (b->bh.bb.bsize - sizeof(bhead_t));
409 if ((best == &thr->freelist[bin]) || (b->bh.bb.bsize < best->bh.bb.bsize))
410 best = b;
411
412 /* Link to next buffer */
413 b = b->ql.flink;
414 }
415
416 if (*max_free < best->bh.bb.bsize)
417 *max_free = best->bh.bb.bsize;
418 }
419
420 if (*max_free > (bufsize)sizeof(bhead_t))
421 *max_free -= sizeof(bhead_t);
422 }
423
424 /* BGET -- Allocate a buffer. */
bget(kmp_info_t * th,bufsize requested_size)425 static void *bget(kmp_info_t *th, bufsize requested_size) {
426 thr_data_t *thr = get_thr_data(th);
427 bufsize size = requested_size;
428 bfhead_t *b;
429 void *buf;
430 int compactseq = 0;
431 int use_blink = 0;
432 /* For BestFit */
433 bfhead_t *best;
434
435 if (size < 0 || size + sizeof(bhead_t) > MaxSize) {
436 return NULL;
437 }
438
439 __kmp_bget_dequeue(th); /* Release any queued buffers */
440
441 if (size < (bufsize)SizeQ) { // Need at least room for the queue links.
442 size = SizeQ;
443 }
444 #if defined(SizeQuant) && (SizeQuant > 1)
445 size = (size + (SizeQuant - 1)) & (~(SizeQuant - 1));
446 #endif
447
448 size += sizeof(bhead_t); // Add overhead in allocated buffer to size required.
449 KMP_DEBUG_ASSERT(size >= 0);
450 KMP_DEBUG_ASSERT(size % SizeQuant == 0);
451
452 use_blink = (thr->mode == bget_mode_lifo);
453
454 /* If a compact function was provided in the call to bectl(), wrap
455 a loop around the allocation process to allow compaction to
456 intervene in case we don't find a suitable buffer in the chain. */
457
458 for (;;) {
459 int bin;
460
461 for (bin = bget_get_bin(size); bin < MAX_BGET_BINS; ++bin) {
462 /* Link to next buffer */
463 b = (use_blink ? thr->freelist[bin].ql.blink
464 : thr->freelist[bin].ql.flink);
465
466 if (thr->mode == bget_mode_best) {
467 best = &thr->freelist[bin];
468
469 /* Scan the free list searching for the first buffer big enough
470 to hold the requested size buffer. */
471 while (b != &thr->freelist[bin]) {
472 if (b->bh.bb.bsize >= (bufsize)size) {
473 if ((best == &thr->freelist[bin]) ||
474 (b->bh.bb.bsize < best->bh.bb.bsize)) {
475 best = b;
476 }
477 }
478
479 /* Link to next buffer */
480 b = (use_blink ? b->ql.blink : b->ql.flink);
481 }
482 b = best;
483 }
484
485 while (b != &thr->freelist[bin]) {
486 if ((bufsize)b->bh.bb.bsize >= (bufsize)size) {
487
488 // Buffer is big enough to satisfy the request. Allocate it to the
489 // caller. We must decide whether the buffer is large enough to split
490 // into the part given to the caller and a free buffer that remains
491 // on the free list, or whether the entire buffer should be removed
492 // from the free list and given to the caller in its entirety. We
493 // only split the buffer if enough room remains for a header plus the
494 // minimum quantum of allocation.
495 if ((b->bh.bb.bsize - (bufsize)size) >
496 (bufsize)(SizeQ + (sizeof(bhead_t)))) {
497 bhead_t *ba, *bn;
498
499 ba = BH(((char *)b) + (b->bh.bb.bsize - (bufsize)size));
500 bn = BH(((char *)ba) + size);
501
502 KMP_DEBUG_ASSERT(bn->bb.prevfree == b->bh.bb.bsize);
503
504 /* Subtract size from length of free block. */
505 b->bh.bb.bsize -= (bufsize)size;
506
507 /* Link allocated buffer to the previous free buffer. */
508 ba->bb.prevfree = b->bh.bb.bsize;
509
510 /* Plug negative size into user buffer. */
511 ba->bb.bsize = -size;
512
513 /* Mark this buffer as owned by this thread. */
514 TCW_PTR(ba->bb.bthr,
515 th); // not an allocated address (do not mark it)
516 /* Mark buffer after this one not preceded by free block. */
517 bn->bb.prevfree = 0;
518
519 // unlink buffer from old freelist, and reinsert into new freelist
520 __kmp_bget_remove_from_freelist(b);
521 __kmp_bget_insert_into_freelist(thr, b);
522 #if BufStats
523 thr->totalloc += (size_t)size;
524 thr->numget++; /* Increment number of bget() calls */
525 #endif
526 buf = (void *)((((char *)ba) + sizeof(bhead_t)));
527 KMP_DEBUG_ASSERT(((size_t)buf) % SizeQuant == 0);
528 return buf;
529 } else {
530 bhead_t *ba;
531
532 ba = BH(((char *)b) + b->bh.bb.bsize);
533
534 KMP_DEBUG_ASSERT(ba->bb.prevfree == b->bh.bb.bsize);
535
536 /* The buffer isn't big enough to split. Give the whole
537 shebang to the caller and remove it from the free list. */
538
539 __kmp_bget_remove_from_freelist(b);
540 #if BufStats
541 thr->totalloc += (size_t)b->bh.bb.bsize;
542 thr->numget++; /* Increment number of bget() calls */
543 #endif
544 /* Negate size to mark buffer allocated. */
545 b->bh.bb.bsize = -(b->bh.bb.bsize);
546
547 /* Mark this buffer as owned by this thread. */
548 TCW_PTR(ba->bb.bthr, th); // not an allocated address (do not mark)
549 /* Zero the back pointer in the next buffer in memory
550 to indicate that this buffer is allocated. */
551 ba->bb.prevfree = 0;
552
553 /* Give user buffer starting at queue links. */
554 buf = (void *)&(b->ql);
555 KMP_DEBUG_ASSERT(((size_t)buf) % SizeQuant == 0);
556 return buf;
557 }
558 }
559
560 /* Link to next buffer */
561 b = (use_blink ? b->ql.blink : b->ql.flink);
562 }
563 }
564
565 /* We failed to find a buffer. If there's a compact function defined,
566 notify it of the size requested. If it returns TRUE, try the allocation
567 again. */
568
569 if ((thr->compfcn == 0) || (!(*thr->compfcn)(size, ++compactseq))) {
570 break;
571 }
572 }
573
574 /* No buffer available with requested size free. */
575
576 /* Don't give up yet -- look in the reserve supply. */
577 if (thr->acqfcn != 0) {
578 if (size > (bufsize)(thr->exp_incr - sizeof(bhead_t))) {
579 /* Request is too large to fit in a single expansion block.
580 Try to satisfy it by a direct buffer acquisition. */
581 bdhead_t *bdh;
582
583 size += sizeof(bdhead_t) - sizeof(bhead_t);
584
585 KE_TRACE(10, ("%%%%%% MALLOC( %d )\n", (int)size));
586
587 /* richryan */
588 bdh = BDH((*thr->acqfcn)((bufsize)size));
589 if (bdh != NULL) {
590
591 // Mark the buffer special by setting size field of its header to zero.
592 bdh->bh.bb.bsize = 0;
593
594 /* Mark this buffer as owned by this thread. */
595 TCW_PTR(bdh->bh.bb.bthr, th); // don't mark buffer as allocated,
596 // because direct buffer never goes to free list
597 bdh->bh.bb.prevfree = 0;
598 bdh->tsize = size;
599 #if BufStats
600 thr->totalloc += (size_t)size;
601 thr->numget++; /* Increment number of bget() calls */
602 thr->numdget++; /* Direct bget() call count */
603 #endif
604 buf = (void *)(bdh + 1);
605 KMP_DEBUG_ASSERT(((size_t)buf) % SizeQuant == 0);
606 return buf;
607 }
608
609 } else {
610
611 /* Try to obtain a new expansion block */
612 void *newpool;
613
614 KE_TRACE(10, ("%%%%%% MALLOCB( %d )\n", (int)thr->exp_incr));
615
616 /* richryan */
617 newpool = (*thr->acqfcn)((bufsize)thr->exp_incr);
618 KMP_DEBUG_ASSERT(((size_t)newpool) % SizeQuant == 0);
619 if (newpool != NULL) {
620 bpool(th, newpool, thr->exp_incr);
621 buf = bget(
622 th, requested_size); /* This can't, I say, can't get into a loop. */
623 return buf;
624 }
625 }
626 }
627
628 /* Still no buffer available */
629
630 return NULL;
631 }
632
633 /* BGETZ -- Allocate a buffer and clear its contents to zero. We clear
634 the entire contents of the buffer to zero, not just the
635 region requested by the caller. */
636
bgetz(kmp_info_t * th,bufsize size)637 static void *bgetz(kmp_info_t *th, bufsize size) {
638 char *buf = (char *)bget(th, size);
639
640 if (buf != NULL) {
641 bhead_t *b;
642 bufsize rsize;
643
644 b = BH(buf - sizeof(bhead_t));
645 rsize = -(b->bb.bsize);
646 if (rsize == 0) {
647 bdhead_t *bd;
648
649 bd = BDH(buf - sizeof(bdhead_t));
650 rsize = bd->tsize - (bufsize)sizeof(bdhead_t);
651 } else {
652 rsize -= sizeof(bhead_t);
653 }
654
655 KMP_DEBUG_ASSERT(rsize >= size);
656
657 (void)memset(buf, 0, (bufsize)rsize);
658 }
659 return ((void *)buf);
660 }
661
662 /* BGETR -- Reallocate a buffer. This is a minimal implementation,
663 simply in terms of brel() and bget(). It could be
664 enhanced to allow the buffer to grow into adjacent free
665 blocks and to avoid moving data unnecessarily. */
666
bgetr(kmp_info_t * th,void * buf,bufsize size)667 static void *bgetr(kmp_info_t *th, void *buf, bufsize size) {
668 void *nbuf;
669 bufsize osize; /* Old size of buffer */
670 bhead_t *b;
671
672 nbuf = bget(th, size);
673 if (nbuf == NULL) { /* Acquire new buffer */
674 return NULL;
675 }
676 if (buf == NULL) {
677 return nbuf;
678 }
679 b = BH(((char *)buf) - sizeof(bhead_t));
680 osize = -b->bb.bsize;
681 if (osize == 0) {
682 /* Buffer acquired directly through acqfcn. */
683 bdhead_t *bd;
684
685 bd = BDH(((char *)buf) - sizeof(bdhead_t));
686 osize = bd->tsize - (bufsize)sizeof(bdhead_t);
687 } else {
688 osize -= sizeof(bhead_t);
689 }
690
691 KMP_DEBUG_ASSERT(osize > 0);
692
693 (void)KMP_MEMCPY((char *)nbuf, (char *)buf, /* Copy the data */
694 (size_t)((size < osize) ? size : osize));
695 brel(th, buf);
696
697 return nbuf;
698 }
699
700 /* BREL -- Release a buffer. */
brel(kmp_info_t * th,void * buf)701 static void brel(kmp_info_t *th, void *buf) {
702 thr_data_t *thr = get_thr_data(th);
703 bfhead_t *b, *bn;
704 kmp_info_t *bth;
705
706 KMP_DEBUG_ASSERT(buf != NULL);
707 KMP_DEBUG_ASSERT(((size_t)buf) % SizeQuant == 0);
708
709 b = BFH(((char *)buf) - sizeof(bhead_t));
710
711 if (b->bh.bb.bsize == 0) { /* Directly-acquired buffer? */
712 bdhead_t *bdh;
713
714 bdh = BDH(((char *)buf) - sizeof(bdhead_t));
715 KMP_DEBUG_ASSERT(b->bh.bb.prevfree == 0);
716 #if BufStats
717 thr->totalloc -= (size_t)bdh->tsize;
718 thr->numdrel++; /* Number of direct releases */
719 thr->numrel++; /* Increment number of brel() calls */
720 #endif /* BufStats */
721 #ifdef FreeWipe
722 (void)memset((char *)buf, 0x55, (size_t)(bdh->tsize - sizeof(bdhead_t)));
723 #endif /* FreeWipe */
724
725 KE_TRACE(10, ("%%%%%% FREE( %p )\n", (void *)bdh));
726
727 KMP_DEBUG_ASSERT(thr->relfcn != 0);
728 (*thr->relfcn)((void *)bdh); /* Release it directly. */
729 return;
730 }
731
732 bth = (kmp_info_t *)((kmp_uintptr_t)TCR_PTR(b->bh.bb.bthr) &
733 ~1); // clear possible mark before comparison
734 if (bth != th) {
735 /* Add this buffer to be released by the owning thread later */
736 __kmp_bget_enqueue(bth, buf
737 #ifdef USE_QUEUING_LOCK_FOR_BGET
738 ,
739 __kmp_gtid_from_thread(th)
740 #endif
741 );
742 return;
743 }
744
745 /* Buffer size must be negative, indicating that the buffer is allocated. */
746 if (b->bh.bb.bsize >= 0) {
747 bn = NULL;
748 }
749 KMP_DEBUG_ASSERT(b->bh.bb.bsize < 0);
750
751 /* Back pointer in next buffer must be zero, indicating the same thing: */
752
753 KMP_DEBUG_ASSERT(BH((char *)b - b->bh.bb.bsize)->bb.prevfree == 0);
754
755 #if BufStats
756 thr->numrel++; /* Increment number of brel() calls */
757 thr->totalloc += (size_t)b->bh.bb.bsize;
758 #endif
759
760 /* If the back link is nonzero, the previous buffer is free. */
761
762 if (b->bh.bb.prevfree != 0) {
763 /* The previous buffer is free. Consolidate this buffer with it by adding
764 the length of this buffer to the previous free buffer. Note that we
765 subtract the size in the buffer being released, since it's negative to
766 indicate that the buffer is allocated. */
767 bufsize size = b->bh.bb.bsize;
768
769 /* Make the previous buffer the one we're working on. */
770 KMP_DEBUG_ASSERT(BH((char *)b - b->bh.bb.prevfree)->bb.bsize ==
771 b->bh.bb.prevfree);
772 b = BFH(((char *)b) - b->bh.bb.prevfree);
773 b->bh.bb.bsize -= size;
774
775 /* unlink the buffer from the old freelist */
776 __kmp_bget_remove_from_freelist(b);
777 } else {
778 /* The previous buffer isn't allocated. Mark this buffer size as positive
779 (i.e. free) and fall through to place the buffer on the free list as an
780 isolated free block. */
781 b->bh.bb.bsize = -b->bh.bb.bsize;
782 }
783
784 /* insert buffer back onto a new freelist */
785 __kmp_bget_insert_into_freelist(thr, b);
786
787 /* Now we look at the next buffer in memory, located by advancing from
788 the start of this buffer by its size, to see if that buffer is
789 free. If it is, we combine this buffer with the next one in
790 memory, dechaining the second buffer from the free list. */
791 bn = BFH(((char *)b) + b->bh.bb.bsize);
792 if (bn->bh.bb.bsize > 0) {
793
794 /* The buffer is free. Remove it from the free list and add
795 its size to that of our buffer. */
796 KMP_DEBUG_ASSERT(BH((char *)bn + bn->bh.bb.bsize)->bb.prevfree ==
797 bn->bh.bb.bsize);
798
799 __kmp_bget_remove_from_freelist(bn);
800
801 b->bh.bb.bsize += bn->bh.bb.bsize;
802
803 /* unlink the buffer from the old freelist, and reinsert it into the new
804 * freelist */
805 __kmp_bget_remove_from_freelist(b);
806 __kmp_bget_insert_into_freelist(thr, b);
807
808 /* Finally, advance to the buffer that follows the newly
809 consolidated free block. We must set its backpointer to the
810 head of the consolidated free block. We know the next block
811 must be an allocated block because the process of recombination
812 guarantees that two free blocks will never be contiguous in
813 memory. */
814 bn = BFH(((char *)b) + b->bh.bb.bsize);
815 }
816 #ifdef FreeWipe
817 (void)memset(((char *)b) + sizeof(bfhead_t), 0x55,
818 (size_t)(b->bh.bb.bsize - sizeof(bfhead_t)));
819 #endif
820 KMP_DEBUG_ASSERT(bn->bh.bb.bsize < 0);
821
822 /* The next buffer is allocated. Set the backpointer in it to point
823 to this buffer; the previous free buffer in memory. */
824
825 bn->bh.bb.prevfree = b->bh.bb.bsize;
826
827 /* If a block-release function is defined, and this free buffer
828 constitutes the entire block, release it. Note that pool_len
829 is defined in such a way that the test will fail unless all
830 pool blocks are the same size. */
831 if (thr->relfcn != 0 &&
832 b->bh.bb.bsize == (bufsize)(thr->pool_len - sizeof(bhead_t))) {
833 #if BufStats
834 if (thr->numpblk !=
835 1) { /* Do not release the last buffer until finalization time */
836 #endif
837
838 KMP_DEBUG_ASSERT(b->bh.bb.prevfree == 0);
839 KMP_DEBUG_ASSERT(BH((char *)b + b->bh.bb.bsize)->bb.bsize == ESent);
840 KMP_DEBUG_ASSERT(BH((char *)b + b->bh.bb.bsize)->bb.prevfree ==
841 b->bh.bb.bsize);
842
843 /* Unlink the buffer from the free list */
844 __kmp_bget_remove_from_freelist(b);
845
846 KE_TRACE(10, ("%%%%%% FREE( %p )\n", (void *)b));
847
848 (*thr->relfcn)(b);
849 #if BufStats
850 thr->numprel++; /* Nr of expansion block releases */
851 thr->numpblk--; /* Total number of blocks */
852 KMP_DEBUG_ASSERT(thr->numpblk == thr->numpget - thr->numprel);
853
854 // avoid leaving stale last_pool pointer around if it is being dealloced
855 if (thr->last_pool == b)
856 thr->last_pool = 0;
857 } else {
858 thr->last_pool = b;
859 }
860 #endif /* BufStats */
861 }
862 }
863
864 /* BECTL -- Establish automatic pool expansion control */
bectl(kmp_info_t * th,bget_compact_t compact,bget_acquire_t acquire,bget_release_t release,bufsize pool_incr)865 static void bectl(kmp_info_t *th, bget_compact_t compact,
866 bget_acquire_t acquire, bget_release_t release,
867 bufsize pool_incr) {
868 thr_data_t *thr = get_thr_data(th);
869
870 thr->compfcn = compact;
871 thr->acqfcn = acquire;
872 thr->relfcn = release;
873 thr->exp_incr = pool_incr;
874 }
875
876 /* BPOOL -- Add a region of memory to the buffer pool. */
bpool(kmp_info_t * th,void * buf,bufsize len)877 static void bpool(kmp_info_t *th, void *buf, bufsize len) {
878 /* int bin = 0; */
879 thr_data_t *thr = get_thr_data(th);
880 bfhead_t *b = BFH(buf);
881 bhead_t *bn;
882
883 __kmp_bget_dequeue(th); /* Release any queued buffers */
884
885 #ifdef SizeQuant
886 len &= ~(SizeQuant - 1);
887 #endif
888 if (thr->pool_len == 0) {
889 thr->pool_len = len;
890 } else if (len != thr->pool_len) {
891 thr->pool_len = -1;
892 }
893 #if BufStats
894 thr->numpget++; /* Number of block acquisitions */
895 thr->numpblk++; /* Number of blocks total */
896 KMP_DEBUG_ASSERT(thr->numpblk == thr->numpget - thr->numprel);
897 #endif /* BufStats */
898
899 /* Since the block is initially occupied by a single free buffer,
900 it had better not be (much) larger than the largest buffer
901 whose size we can store in bhead.bb.bsize. */
902 KMP_DEBUG_ASSERT(len - sizeof(bhead_t) <= -((bufsize)ESent + 1));
903
904 /* Clear the backpointer at the start of the block to indicate that
905 there is no free block prior to this one. That blocks
906 recombination when the first block in memory is released. */
907 b->bh.bb.prevfree = 0;
908
909 /* Create a dummy allocated buffer at the end of the pool. This dummy
910 buffer is seen when a buffer at the end of the pool is released and
911 blocks recombination of the last buffer with the dummy buffer at
912 the end. The length in the dummy buffer is set to the largest
913 negative number to denote the end of the pool for diagnostic
914 routines (this specific value is not counted on by the actual
915 allocation and release functions). */
916 len -= sizeof(bhead_t);
917 b->bh.bb.bsize = (bufsize)len;
918 /* Set the owner of this buffer */
919 TCW_PTR(b->bh.bb.bthr,
920 (kmp_info_t *)((kmp_uintptr_t)th |
921 1)); // mark the buffer as allocated address
922
923 /* Chain the new block to the free list. */
924 __kmp_bget_insert_into_freelist(thr, b);
925
926 #ifdef FreeWipe
927 (void)memset(((char *)b) + sizeof(bfhead_t), 0x55,
928 (size_t)(len - sizeof(bfhead_t)));
929 #endif
930 bn = BH(((char *)b) + len);
931 bn->bb.prevfree = (bufsize)len;
932 /* Definition of ESent assumes two's complement! */
933 KMP_DEBUG_ASSERT((~0) == -1 && (bn != 0));
934
935 bn->bb.bsize = ESent;
936 }
937
938 /* BFREED -- Dump the free lists for this thread. */
bfreed(kmp_info_t * th)939 static void bfreed(kmp_info_t *th) {
940 int bin = 0, count = 0;
941 int gtid = __kmp_gtid_from_thread(th);
942 thr_data_t *thr = get_thr_data(th);
943
944 #if BufStats
945 __kmp_printf_no_lock("__kmp_printpool: T#%d total=%" KMP_UINT64_SPEC
946 " get=%" KMP_INT64_SPEC " rel=%" KMP_INT64_SPEC
947 " pblk=%" KMP_INT64_SPEC " pget=%" KMP_INT64_SPEC
948 " prel=%" KMP_INT64_SPEC " dget=%" KMP_INT64_SPEC
949 " drel=%" KMP_INT64_SPEC "\n",
950 gtid, (kmp_uint64)thr->totalloc, (kmp_int64)thr->numget,
951 (kmp_int64)thr->numrel, (kmp_int64)thr->numpblk,
952 (kmp_int64)thr->numpget, (kmp_int64)thr->numprel,
953 (kmp_int64)thr->numdget, (kmp_int64)thr->numdrel);
954 #endif
955
956 for (bin = 0; bin < MAX_BGET_BINS; ++bin) {
957 bfhead_t *b;
958
959 for (b = thr->freelist[bin].ql.flink; b != &thr->freelist[bin];
960 b = b->ql.flink) {
961 bufsize bs = b->bh.bb.bsize;
962
963 KMP_DEBUG_ASSERT(b->ql.blink->ql.flink == b);
964 KMP_DEBUG_ASSERT(b->ql.flink->ql.blink == b);
965 KMP_DEBUG_ASSERT(bs > 0);
966
967 count += 1;
968
969 __kmp_printf_no_lock(
970 "__kmp_printpool: T#%d Free block: 0x%p size %6ld bytes.\n", gtid, b,
971 (long)bs);
972 #ifdef FreeWipe
973 {
974 char *lerr = ((char *)b) + sizeof(bfhead_t);
975 if ((bs > sizeof(bfhead_t)) &&
976 ((*lerr != 0x55) ||
977 (memcmp(lerr, lerr + 1, (size_t)(bs - (sizeof(bfhead_t) + 1))) !=
978 0))) {
979 __kmp_printf_no_lock("__kmp_printpool: T#%d (Contents of above "
980 "free block have been overstored.)\n",
981 gtid);
982 }
983 }
984 #endif
985 }
986 }
987
988 if (count == 0)
989 __kmp_printf_no_lock("__kmp_printpool: T#%d No free blocks\n", gtid);
990 }
991
__kmp_initialize_bget(kmp_info_t * th)992 void __kmp_initialize_bget(kmp_info_t *th) {
993 KMP_DEBUG_ASSERT(SizeQuant >= sizeof(void *) && (th != 0));
994
995 set_thr_data(th);
996
997 bectl(th, (bget_compact_t)0, (bget_acquire_t)malloc, (bget_release_t)free,
998 (bufsize)__kmp_malloc_pool_incr);
999 }
1000
__kmp_finalize_bget(kmp_info_t * th)1001 void __kmp_finalize_bget(kmp_info_t *th) {
1002 thr_data_t *thr;
1003 bfhead_t *b;
1004
1005 KMP_DEBUG_ASSERT(th != 0);
1006
1007 #if BufStats
1008 thr = (thr_data_t *)th->th.th_local.bget_data;
1009 KMP_DEBUG_ASSERT(thr != NULL);
1010 b = thr->last_pool;
1011
1012 /* If a block-release function is defined, and this free buffer constitutes
1013 the entire block, release it. Note that pool_len is defined in such a way
1014 that the test will fail unless all pool blocks are the same size. */
1015
1016 // Deallocate the last pool if one exists because we no longer do it in brel()
1017 if (thr->relfcn != 0 && b != 0 && thr->numpblk != 0 &&
1018 b->bh.bb.bsize == (bufsize)(thr->pool_len - sizeof(bhead_t))) {
1019 KMP_DEBUG_ASSERT(b->bh.bb.prevfree == 0);
1020 KMP_DEBUG_ASSERT(BH((char *)b + b->bh.bb.bsize)->bb.bsize == ESent);
1021 KMP_DEBUG_ASSERT(BH((char *)b + b->bh.bb.bsize)->bb.prevfree ==
1022 b->bh.bb.bsize);
1023
1024 /* Unlink the buffer from the free list */
1025 __kmp_bget_remove_from_freelist(b);
1026
1027 KE_TRACE(10, ("%%%%%% FREE( %p )\n", (void *)b));
1028
1029 (*thr->relfcn)(b);
1030 thr->numprel++; /* Nr of expansion block releases */
1031 thr->numpblk--; /* Total number of blocks */
1032 KMP_DEBUG_ASSERT(thr->numpblk == thr->numpget - thr->numprel);
1033 }
1034 #endif /* BufStats */
1035
1036 /* Deallocate bget_data */
1037 if (th->th.th_local.bget_data != NULL) {
1038 __kmp_free(th->th.th_local.bget_data);
1039 th->th.th_local.bget_data = NULL;
1040 }
1041 }
1042
kmpc_set_poolsize(size_t size)1043 void kmpc_set_poolsize(size_t size) {
1044 bectl(__kmp_get_thread(), (bget_compact_t)0, (bget_acquire_t)malloc,
1045 (bget_release_t)free, (bufsize)size);
1046 }
1047
kmpc_get_poolsize(void)1048 size_t kmpc_get_poolsize(void) {
1049 thr_data_t *p;
1050
1051 p = get_thr_data(__kmp_get_thread());
1052
1053 return p->exp_incr;
1054 }
1055
kmpc_set_poolmode(int mode)1056 void kmpc_set_poolmode(int mode) {
1057 thr_data_t *p;
1058
1059 if (mode == bget_mode_fifo || mode == bget_mode_lifo ||
1060 mode == bget_mode_best) {
1061 p = get_thr_data(__kmp_get_thread());
1062 p->mode = (bget_mode_t)mode;
1063 }
1064 }
1065
kmpc_get_poolmode(void)1066 int kmpc_get_poolmode(void) {
1067 thr_data_t *p;
1068
1069 p = get_thr_data(__kmp_get_thread());
1070
1071 return p->mode;
1072 }
1073
kmpc_get_poolstat(size_t * maxmem,size_t * allmem)1074 void kmpc_get_poolstat(size_t *maxmem, size_t *allmem) {
1075 kmp_info_t *th = __kmp_get_thread();
1076 bufsize a, b;
1077
1078 __kmp_bget_dequeue(th); /* Release any queued buffers */
1079
1080 bcheck(th, &a, &b);
1081
1082 *maxmem = a;
1083 *allmem = b;
1084 }
1085
kmpc_poolprint(void)1086 void kmpc_poolprint(void) {
1087 kmp_info_t *th = __kmp_get_thread();
1088
1089 __kmp_bget_dequeue(th); /* Release any queued buffers */
1090
1091 bfreed(th);
1092 }
1093
1094 #endif // #if KMP_USE_BGET
1095
kmpc_malloc(size_t size)1096 void *kmpc_malloc(size_t size) {
1097 void *ptr;
1098 ptr = bget(__kmp_entry_thread(), (bufsize)(size + sizeof(ptr)));
1099 if (ptr != NULL) {
1100 // save allocated pointer just before one returned to user
1101 *(void **)ptr = ptr;
1102 ptr = (void **)ptr + 1;
1103 }
1104 return ptr;
1105 }
1106
1107 #define IS_POWER_OF_TWO(n) (((n) & ((n)-1)) == 0)
1108
kmpc_aligned_malloc(size_t size,size_t alignment)1109 void *kmpc_aligned_malloc(size_t size, size_t alignment) {
1110 void *ptr;
1111 void *ptr_allocated;
1112 KMP_DEBUG_ASSERT(alignment < 32 * 1024); // Alignment should not be too big
1113 if (!IS_POWER_OF_TWO(alignment)) {
1114 // AC: do we need to issue a warning here?
1115 errno = EINVAL;
1116 return NULL;
1117 }
1118 size = size + sizeof(void *) + alignment;
1119 ptr_allocated = bget(__kmp_entry_thread(), (bufsize)size);
1120 if (ptr_allocated != NULL) {
1121 // save allocated pointer just before one returned to user
1122 ptr = (void *)(((kmp_uintptr_t)ptr_allocated + sizeof(void *) + alignment) &
1123 ~(alignment - 1));
1124 *((void **)ptr - 1) = ptr_allocated;
1125 } else {
1126 ptr = NULL;
1127 }
1128 return ptr;
1129 }
1130
kmpc_calloc(size_t nelem,size_t elsize)1131 void *kmpc_calloc(size_t nelem, size_t elsize) {
1132 void *ptr;
1133 ptr = bgetz(__kmp_entry_thread(), (bufsize)(nelem * elsize + sizeof(ptr)));
1134 if (ptr != NULL) {
1135 // save allocated pointer just before one returned to user
1136 *(void **)ptr = ptr;
1137 ptr = (void **)ptr + 1;
1138 }
1139 return ptr;
1140 }
1141
kmpc_realloc(void * ptr,size_t size)1142 void *kmpc_realloc(void *ptr, size_t size) {
1143 void *result = NULL;
1144 if (ptr == NULL) {
1145 // If pointer is NULL, realloc behaves like malloc.
1146 result = bget(__kmp_entry_thread(), (bufsize)(size + sizeof(ptr)));
1147 // save allocated pointer just before one returned to user
1148 if (result != NULL) {
1149 *(void **)result = result;
1150 result = (void **)result + 1;
1151 }
1152 } else if (size == 0) {
1153 // If size is 0, realloc behaves like free.
1154 // The thread must be registered by the call to kmpc_malloc() or
1155 // kmpc_calloc() before.
1156 // So it should be safe to call __kmp_get_thread(), not
1157 // __kmp_entry_thread().
1158 KMP_ASSERT(*((void **)ptr - 1));
1159 brel(__kmp_get_thread(), *((void **)ptr - 1));
1160 } else {
1161 result = bgetr(__kmp_entry_thread(), *((void **)ptr - 1),
1162 (bufsize)(size + sizeof(ptr)));
1163 if (result != NULL) {
1164 *(void **)result = result;
1165 result = (void **)result + 1;
1166 }
1167 }
1168 return result;
1169 }
1170
1171 // NOTE: the library must have already been initialized by a previous allocate
kmpc_free(void * ptr)1172 void kmpc_free(void *ptr) {
1173 if (!__kmp_init_serial) {
1174 return;
1175 }
1176 if (ptr != NULL) {
1177 kmp_info_t *th = __kmp_get_thread();
1178 __kmp_bget_dequeue(th); /* Release any queued buffers */
1179 // extract allocated pointer and free it
1180 KMP_ASSERT(*((void **)ptr - 1));
1181 brel(th, *((void **)ptr - 1));
1182 }
1183 }
1184
___kmp_thread_malloc(kmp_info_t * th,size_t size KMP_SRC_LOC_DECL)1185 void *___kmp_thread_malloc(kmp_info_t *th, size_t size KMP_SRC_LOC_DECL) {
1186 void *ptr;
1187 KE_TRACE(30, ("-> __kmp_thread_malloc( %p, %d ) called from %s:%d\n", th,
1188 (int)size KMP_SRC_LOC_PARM));
1189 ptr = bget(th, (bufsize)size);
1190 KE_TRACE(30, ("<- __kmp_thread_malloc() returns %p\n", ptr));
1191 return ptr;
1192 }
1193
___kmp_thread_calloc(kmp_info_t * th,size_t nelem,size_t elsize KMP_SRC_LOC_DECL)1194 void *___kmp_thread_calloc(kmp_info_t *th, size_t nelem,
1195 size_t elsize KMP_SRC_LOC_DECL) {
1196 void *ptr;
1197 KE_TRACE(30, ("-> __kmp_thread_calloc( %p, %d, %d ) called from %s:%d\n", th,
1198 (int)nelem, (int)elsize KMP_SRC_LOC_PARM));
1199 ptr = bgetz(th, (bufsize)(nelem * elsize));
1200 KE_TRACE(30, ("<- __kmp_thread_calloc() returns %p\n", ptr));
1201 return ptr;
1202 }
1203
___kmp_thread_realloc(kmp_info_t * th,void * ptr,size_t size KMP_SRC_LOC_DECL)1204 void *___kmp_thread_realloc(kmp_info_t *th, void *ptr,
1205 size_t size KMP_SRC_LOC_DECL) {
1206 KE_TRACE(30, ("-> __kmp_thread_realloc( %p, %p, %d ) called from %s:%d\n", th,
1207 ptr, (int)size KMP_SRC_LOC_PARM));
1208 ptr = bgetr(th, ptr, (bufsize)size);
1209 KE_TRACE(30, ("<- __kmp_thread_realloc() returns %p\n", ptr));
1210 return ptr;
1211 }
1212
___kmp_thread_free(kmp_info_t * th,void * ptr KMP_SRC_LOC_DECL)1213 void ___kmp_thread_free(kmp_info_t *th, void *ptr KMP_SRC_LOC_DECL) {
1214 KE_TRACE(30, ("-> __kmp_thread_free( %p, %p ) called from %s:%d\n", th,
1215 ptr KMP_SRC_LOC_PARM));
1216 if (ptr != NULL) {
1217 __kmp_bget_dequeue(th); /* Release any queued buffers */
1218 brel(th, ptr);
1219 }
1220 KE_TRACE(30, ("<- __kmp_thread_free()\n"));
1221 }
1222
1223 /* OMP 5.0 Memory Management support */
1224 static const char *kmp_mk_lib_name;
1225 static void *h_memkind;
1226 /* memkind experimental API: */
1227 // memkind_alloc
1228 static void *(*kmp_mk_alloc)(void *k, size_t sz);
1229 // memkind_free
1230 static void (*kmp_mk_free)(void *kind, void *ptr);
1231 // memkind_check_available
1232 static int (*kmp_mk_check)(void *kind);
1233 // kinds we are going to use
1234 static void **mk_default;
1235 static void **mk_interleave;
1236 static void **mk_hbw;
1237 static void **mk_hbw_interleave;
1238 static void **mk_hbw_preferred;
1239 static void **mk_hugetlb;
1240 static void **mk_hbw_hugetlb;
1241 static void **mk_hbw_preferred_hugetlb;
1242 static void **mk_dax_kmem;
1243 static void **mk_dax_kmem_all;
1244 static void **mk_dax_kmem_preferred;
1245 // Preview of target memory support
1246 static void *(*kmp_target_alloc_host)(size_t size, int device);
1247 static void *(*kmp_target_alloc_shared)(size_t size, int device);
1248 static void *(*kmp_target_alloc_device)(size_t size, int device);
1249 static void *(*kmp_target_free)(void *ptr, int device);
1250 static bool __kmp_target_mem_available;
1251 #define KMP_IS_TARGET_MEM_SPACE(MS) \
1252 (MS == llvm_omp_target_host_mem_space || \
1253 MS == llvm_omp_target_shared_mem_space || \
1254 MS == llvm_omp_target_device_mem_space)
1255 #define KMP_IS_TARGET_MEM_ALLOC(MA) \
1256 (MA == llvm_omp_target_host_mem_alloc || \
1257 MA == llvm_omp_target_shared_mem_alloc || \
1258 MA == llvm_omp_target_device_mem_alloc)
1259
1260 #if KMP_OS_UNIX && KMP_DYNAMIC_LIB
chk_kind(void *** pkind)1261 static inline void chk_kind(void ***pkind) {
1262 KMP_DEBUG_ASSERT(pkind);
1263 if (*pkind) // symbol found
1264 if (kmp_mk_check(**pkind)) // kind not available or error
1265 *pkind = NULL;
1266 }
1267 #endif
1268
__kmp_init_memkind()1269 void __kmp_init_memkind() {
1270 // as of 2018-07-31 memkind does not support Windows*, exclude it for now
1271 #if KMP_OS_UNIX && KMP_DYNAMIC_LIB
1272 // use of statically linked memkind is problematic, as it depends on libnuma
1273 kmp_mk_lib_name = "libmemkind.so";
1274 h_memkind = dlopen(kmp_mk_lib_name, RTLD_LAZY);
1275 if (h_memkind) {
1276 kmp_mk_check = (int (*)(void *))dlsym(h_memkind, "memkind_check_available");
1277 kmp_mk_alloc =
1278 (void *(*)(void *, size_t))dlsym(h_memkind, "memkind_malloc");
1279 kmp_mk_free = (void (*)(void *, void *))dlsym(h_memkind, "memkind_free");
1280 mk_default = (void **)dlsym(h_memkind, "MEMKIND_DEFAULT");
1281 if (kmp_mk_check && kmp_mk_alloc && kmp_mk_free && mk_default &&
1282 !kmp_mk_check(*mk_default)) {
1283 __kmp_memkind_available = 1;
1284 mk_interleave = (void **)dlsym(h_memkind, "MEMKIND_INTERLEAVE");
1285 chk_kind(&mk_interleave);
1286 mk_hbw = (void **)dlsym(h_memkind, "MEMKIND_HBW");
1287 chk_kind(&mk_hbw);
1288 mk_hbw_interleave = (void **)dlsym(h_memkind, "MEMKIND_HBW_INTERLEAVE");
1289 chk_kind(&mk_hbw_interleave);
1290 mk_hbw_preferred = (void **)dlsym(h_memkind, "MEMKIND_HBW_PREFERRED");
1291 chk_kind(&mk_hbw_preferred);
1292 mk_hugetlb = (void **)dlsym(h_memkind, "MEMKIND_HUGETLB");
1293 chk_kind(&mk_hugetlb);
1294 mk_hbw_hugetlb = (void **)dlsym(h_memkind, "MEMKIND_HBW_HUGETLB");
1295 chk_kind(&mk_hbw_hugetlb);
1296 mk_hbw_preferred_hugetlb =
1297 (void **)dlsym(h_memkind, "MEMKIND_HBW_PREFERRED_HUGETLB");
1298 chk_kind(&mk_hbw_preferred_hugetlb);
1299 mk_dax_kmem = (void **)dlsym(h_memkind, "MEMKIND_DAX_KMEM");
1300 chk_kind(&mk_dax_kmem);
1301 mk_dax_kmem_all = (void **)dlsym(h_memkind, "MEMKIND_DAX_KMEM_ALL");
1302 chk_kind(&mk_dax_kmem_all);
1303 mk_dax_kmem_preferred =
1304 (void **)dlsym(h_memkind, "MEMKIND_DAX_KMEM_PREFERRED");
1305 chk_kind(&mk_dax_kmem_preferred);
1306 KE_TRACE(25, ("__kmp_init_memkind: memkind library initialized\n"));
1307 return; // success
1308 }
1309 dlclose(h_memkind); // failure
1310 }
1311 #else // !(KMP_OS_UNIX && KMP_DYNAMIC_LIB)
1312 kmp_mk_lib_name = "";
1313 #endif // !(KMP_OS_UNIX && KMP_DYNAMIC_LIB)
1314 h_memkind = NULL;
1315 kmp_mk_check = NULL;
1316 kmp_mk_alloc = NULL;
1317 kmp_mk_free = NULL;
1318 mk_default = NULL;
1319 mk_interleave = NULL;
1320 mk_hbw = NULL;
1321 mk_hbw_interleave = NULL;
1322 mk_hbw_preferred = NULL;
1323 mk_hugetlb = NULL;
1324 mk_hbw_hugetlb = NULL;
1325 mk_hbw_preferred_hugetlb = NULL;
1326 mk_dax_kmem = NULL;
1327 mk_dax_kmem_all = NULL;
1328 mk_dax_kmem_preferred = NULL;
1329 }
1330
__kmp_fini_memkind()1331 void __kmp_fini_memkind() {
1332 #if KMP_OS_UNIX && KMP_DYNAMIC_LIB
1333 if (__kmp_memkind_available)
1334 KE_TRACE(25, ("__kmp_fini_memkind: finalize memkind library\n"));
1335 if (h_memkind) {
1336 dlclose(h_memkind);
1337 h_memkind = NULL;
1338 }
1339 kmp_mk_check = NULL;
1340 kmp_mk_alloc = NULL;
1341 kmp_mk_free = NULL;
1342 mk_default = NULL;
1343 mk_interleave = NULL;
1344 mk_hbw = NULL;
1345 mk_hbw_interleave = NULL;
1346 mk_hbw_preferred = NULL;
1347 mk_hugetlb = NULL;
1348 mk_hbw_hugetlb = NULL;
1349 mk_hbw_preferred_hugetlb = NULL;
1350 mk_dax_kmem = NULL;
1351 mk_dax_kmem_all = NULL;
1352 mk_dax_kmem_preferred = NULL;
1353 #endif
1354 }
1355 // Preview of target memory support
__kmp_init_target_mem()1356 void __kmp_init_target_mem() {
1357 *(void **)(&kmp_target_alloc_host) = KMP_DLSYM("llvm_omp_target_alloc_host");
1358 *(void **)(&kmp_target_alloc_shared) =
1359 KMP_DLSYM("llvm_omp_target_alloc_shared");
1360 *(void **)(&kmp_target_alloc_device) =
1361 KMP_DLSYM("llvm_omp_target_alloc_device");
1362 *(void **)(&kmp_target_free) = KMP_DLSYM("omp_target_free");
1363 __kmp_target_mem_available = kmp_target_alloc_host &&
1364 kmp_target_alloc_shared &&
1365 kmp_target_alloc_device && kmp_target_free;
1366 }
1367
__kmpc_init_allocator(int gtid,omp_memspace_handle_t ms,int ntraits,omp_alloctrait_t traits[])1368 omp_allocator_handle_t __kmpc_init_allocator(int gtid, omp_memspace_handle_t ms,
1369 int ntraits,
1370 omp_alloctrait_t traits[]) {
1371 // OpenMP 5.0 only allows predefined memspaces
1372 KMP_DEBUG_ASSERT(ms == omp_default_mem_space || ms == omp_low_lat_mem_space ||
1373 ms == omp_large_cap_mem_space || ms == omp_const_mem_space ||
1374 ms == omp_high_bw_mem_space || KMP_IS_TARGET_MEM_SPACE(ms));
1375 kmp_allocator_t *al;
1376 int i;
1377 al = (kmp_allocator_t *)__kmp_allocate(sizeof(kmp_allocator_t)); // zeroed
1378 al->memspace = ms; // not used currently
1379 for (i = 0; i < ntraits; ++i) {
1380 switch (traits[i].key) {
1381 case omp_atk_sync_hint:
1382 case omp_atk_access:
1383 case omp_atk_pinned:
1384 break;
1385 case omp_atk_alignment:
1386 __kmp_type_convert(traits[i].value, &(al->alignment));
1387 KMP_ASSERT(IS_POWER_OF_TWO(al->alignment));
1388 break;
1389 case omp_atk_pool_size:
1390 al->pool_size = traits[i].value;
1391 break;
1392 case omp_atk_fallback:
1393 al->fb = (omp_alloctrait_value_t)traits[i].value;
1394 KMP_DEBUG_ASSERT(
1395 al->fb == omp_atv_default_mem_fb || al->fb == omp_atv_null_fb ||
1396 al->fb == omp_atv_abort_fb || al->fb == omp_atv_allocator_fb);
1397 break;
1398 case omp_atk_fb_data:
1399 al->fb_data = RCAST(kmp_allocator_t *, traits[i].value);
1400 break;
1401 case omp_atk_partition:
1402 al->memkind = RCAST(void **, traits[i].value);
1403 break;
1404 default:
1405 KMP_ASSERT2(0, "Unexpected allocator trait");
1406 }
1407 }
1408 if (al->fb == 0) {
1409 // set default allocator
1410 al->fb = omp_atv_default_mem_fb;
1411 al->fb_data = (kmp_allocator_t *)omp_default_mem_alloc;
1412 } else if (al->fb == omp_atv_allocator_fb) {
1413 KMP_ASSERT(al->fb_data != NULL);
1414 } else if (al->fb == omp_atv_default_mem_fb) {
1415 al->fb_data = (kmp_allocator_t *)omp_default_mem_alloc;
1416 }
1417 if (__kmp_memkind_available) {
1418 // Let's use memkind library if available
1419 if (ms == omp_high_bw_mem_space) {
1420 if (al->memkind == (void *)omp_atv_interleaved && mk_hbw_interleave) {
1421 al->memkind = mk_hbw_interleave;
1422 } else if (mk_hbw_preferred) {
1423 // AC: do not try to use MEMKIND_HBW for now, because memkind library
1424 // cannot reliably detect exhaustion of HBW memory.
1425 // It could be possible using hbw_verify_memory_region() but memkind
1426 // manual says: "Using this function in production code may result in
1427 // serious performance penalty".
1428 al->memkind = mk_hbw_preferred;
1429 } else {
1430 // HBW is requested but not available --> return NULL allocator
1431 __kmp_free(al);
1432 return omp_null_allocator;
1433 }
1434 } else if (ms == omp_large_cap_mem_space) {
1435 if (mk_dax_kmem_all) {
1436 // All pmem nodes are visited
1437 al->memkind = mk_dax_kmem_all;
1438 } else if (mk_dax_kmem) {
1439 // Only closest pmem node is visited
1440 al->memkind = mk_dax_kmem;
1441 } else {
1442 __kmp_free(al);
1443 return omp_null_allocator;
1444 }
1445 } else {
1446 if (al->memkind == (void *)omp_atv_interleaved && mk_interleave) {
1447 al->memkind = mk_interleave;
1448 } else {
1449 al->memkind = mk_default;
1450 }
1451 }
1452 } else if (KMP_IS_TARGET_MEM_SPACE(ms) && !__kmp_target_mem_available) {
1453 __kmp_free(al);
1454 return omp_null_allocator;
1455 } else {
1456 if (ms == omp_high_bw_mem_space) {
1457 // cannot detect HBW memory presence without memkind library
1458 __kmp_free(al);
1459 return omp_null_allocator;
1460 }
1461 }
1462 return (omp_allocator_handle_t)al;
1463 }
1464
__kmpc_destroy_allocator(int gtid,omp_allocator_handle_t allocator)1465 void __kmpc_destroy_allocator(int gtid, omp_allocator_handle_t allocator) {
1466 if (allocator > kmp_max_mem_alloc)
1467 __kmp_free(allocator);
1468 }
1469
__kmpc_set_default_allocator(int gtid,omp_allocator_handle_t allocator)1470 void __kmpc_set_default_allocator(int gtid, omp_allocator_handle_t allocator) {
1471 if (allocator == omp_null_allocator)
1472 allocator = omp_default_mem_alloc;
1473 __kmp_threads[gtid]->th.th_def_allocator = allocator;
1474 }
1475
__kmpc_get_default_allocator(int gtid)1476 omp_allocator_handle_t __kmpc_get_default_allocator(int gtid) {
1477 return __kmp_threads[gtid]->th.th_def_allocator;
1478 }
1479
1480 typedef struct kmp_mem_desc { // Memory block descriptor
1481 void *ptr_alloc; // Pointer returned by allocator
1482 size_t size_a; // Size of allocated memory block (initial+descriptor+align)
1483 size_t size_orig; // Original size requested
1484 void *ptr_align; // Pointer to aligned memory, returned
1485 kmp_allocator_t *allocator; // allocator
1486 } kmp_mem_desc_t;
1487 static int alignment = sizeof(void *); // let's align to pointer size
1488
__kmpc_alloc(int gtid,size_t size,omp_allocator_handle_t allocator)1489 void *__kmpc_alloc(int gtid, size_t size, omp_allocator_handle_t allocator) {
1490 void *ptr = NULL;
1491 kmp_allocator_t *al;
1492 KMP_DEBUG_ASSERT(__kmp_init_serial);
1493
1494 if (size == 0)
1495 return NULL;
1496
1497 if (allocator == omp_null_allocator)
1498 allocator = __kmp_threads[gtid]->th.th_def_allocator;
1499
1500 KE_TRACE(25, ("__kmpc_alloc: T#%d (%d, %p)\n", gtid, (int)size, allocator));
1501 al = RCAST(kmp_allocator_t *, CCAST(omp_allocator_handle_t, allocator));
1502
1503 int sz_desc = sizeof(kmp_mem_desc_t);
1504 kmp_mem_desc_t desc;
1505 kmp_uintptr_t addr; // address returned by allocator
1506 kmp_uintptr_t addr_align; // address to return to caller
1507 kmp_uintptr_t addr_descr; // address of memory block descriptor
1508 int align = alignment; // default alignment
1509 if (allocator > kmp_max_mem_alloc && al->alignment > 0) {
1510 align = al->alignment; // alignment requested by user
1511 }
1512 desc.size_orig = size;
1513 desc.size_a = size + sz_desc + align;
1514
1515 if (__kmp_memkind_available) {
1516 if (allocator < kmp_max_mem_alloc) {
1517 // pre-defined allocator
1518 if (allocator == omp_high_bw_mem_alloc && mk_hbw_preferred) {
1519 ptr = kmp_mk_alloc(*mk_hbw_preferred, desc.size_a);
1520 } else if (allocator == omp_large_cap_mem_alloc && mk_dax_kmem_all) {
1521 ptr = kmp_mk_alloc(*mk_dax_kmem_all, desc.size_a);
1522 } else {
1523 ptr = kmp_mk_alloc(*mk_default, desc.size_a);
1524 }
1525 } else if (al->pool_size > 0) {
1526 // custom allocator with pool size requested
1527 kmp_uint64 used =
1528 KMP_TEST_THEN_ADD64((kmp_int64 *)&al->pool_used, desc.size_a);
1529 if (used + desc.size_a > al->pool_size) {
1530 // not enough space, need to go fallback path
1531 KMP_TEST_THEN_ADD64((kmp_int64 *)&al->pool_used, -desc.size_a);
1532 if (al->fb == omp_atv_default_mem_fb) {
1533 al = (kmp_allocator_t *)omp_default_mem_alloc;
1534 ptr = kmp_mk_alloc(*mk_default, desc.size_a);
1535 } else if (al->fb == omp_atv_abort_fb) {
1536 KMP_ASSERT(0); // abort fallback requested
1537 } else if (al->fb == omp_atv_allocator_fb) {
1538 KMP_ASSERT(al != al->fb_data);
1539 al = al->fb_data;
1540 return __kmpc_alloc(gtid, size, (omp_allocator_handle_t)al);
1541 } // else ptr == NULL;
1542 } else {
1543 // pool has enough space
1544 ptr = kmp_mk_alloc(*al->memkind, desc.size_a);
1545 if (ptr == NULL) {
1546 if (al->fb == omp_atv_default_mem_fb) {
1547 al = (kmp_allocator_t *)omp_default_mem_alloc;
1548 ptr = kmp_mk_alloc(*mk_default, desc.size_a);
1549 } else if (al->fb == omp_atv_abort_fb) {
1550 KMP_ASSERT(0); // abort fallback requested
1551 } else if (al->fb == omp_atv_allocator_fb) {
1552 KMP_ASSERT(al != al->fb_data);
1553 al = al->fb_data;
1554 return __kmpc_alloc(gtid, size, (omp_allocator_handle_t)al);
1555 }
1556 }
1557 }
1558 } else {
1559 // custom allocator, pool size not requested
1560 ptr = kmp_mk_alloc(*al->memkind, desc.size_a);
1561 if (ptr == NULL) {
1562 if (al->fb == omp_atv_default_mem_fb) {
1563 al = (kmp_allocator_t *)omp_default_mem_alloc;
1564 ptr = kmp_mk_alloc(*mk_default, desc.size_a);
1565 } else if (al->fb == omp_atv_abort_fb) {
1566 KMP_ASSERT(0); // abort fallback requested
1567 } else if (al->fb == omp_atv_allocator_fb) {
1568 KMP_ASSERT(al != al->fb_data);
1569 al = al->fb_data;
1570 return __kmpc_alloc(gtid, size, (omp_allocator_handle_t)al);
1571 }
1572 }
1573 }
1574 } else if (allocator < kmp_max_mem_alloc) {
1575 if (KMP_IS_TARGET_MEM_ALLOC(allocator)) {
1576 // Use size input directly as the memory may not be accessible on host.
1577 // Use default device for now.
1578 if (__kmp_target_mem_available) {
1579 kmp_int32 device =
1580 __kmp_threads[gtid]->th.th_current_task->td_icvs.default_device;
1581 if (allocator == llvm_omp_target_host_mem_alloc)
1582 ptr = kmp_target_alloc_host(size, device);
1583 else if (allocator == llvm_omp_target_shared_mem_alloc)
1584 ptr = kmp_target_alloc_shared(size, device);
1585 else // allocator == llvm_omp_target_device_mem_alloc
1586 ptr = kmp_target_alloc_device(size, device);
1587 }
1588 return ptr;
1589 }
1590
1591 // pre-defined allocator
1592 if (allocator == omp_high_bw_mem_alloc) {
1593 // ptr = NULL;
1594 } else if (allocator == omp_large_cap_mem_alloc) {
1595 // warnings?
1596 } else {
1597 ptr = __kmp_thread_malloc(__kmp_thread_from_gtid(gtid), desc.size_a);
1598 }
1599 } else if (KMP_IS_TARGET_MEM_SPACE(al->memspace)) {
1600 if (__kmp_target_mem_available) {
1601 kmp_int32 device =
1602 __kmp_threads[gtid]->th.th_current_task->td_icvs.default_device;
1603 if (al->memspace == llvm_omp_target_host_mem_space)
1604 ptr = kmp_target_alloc_host(size, device);
1605 else if (al->memspace == llvm_omp_target_shared_mem_space)
1606 ptr = kmp_target_alloc_shared(size, device);
1607 else // al->memspace == llvm_omp_target_device_mem_space
1608 ptr = kmp_target_alloc_device(size, device);
1609 }
1610 return ptr;
1611 } else if (al->pool_size > 0) {
1612 // custom allocator with pool size requested
1613 kmp_uint64 used =
1614 KMP_TEST_THEN_ADD64((kmp_int64 *)&al->pool_used, desc.size_a);
1615 if (used + desc.size_a > al->pool_size) {
1616 // not enough space, need to go fallback path
1617 KMP_TEST_THEN_ADD64((kmp_int64 *)&al->pool_used, -desc.size_a);
1618 if (al->fb == omp_atv_default_mem_fb) {
1619 al = (kmp_allocator_t *)omp_default_mem_alloc;
1620 ptr = __kmp_thread_malloc(__kmp_thread_from_gtid(gtid), desc.size_a);
1621 } else if (al->fb == omp_atv_abort_fb) {
1622 KMP_ASSERT(0); // abort fallback requested
1623 } else if (al->fb == omp_atv_allocator_fb) {
1624 KMP_ASSERT(al != al->fb_data);
1625 al = al->fb_data;
1626 return __kmpc_alloc(gtid, size, (omp_allocator_handle_t)al);
1627 } // else ptr == NULL;
1628 } else {
1629 // pool has enough space
1630 ptr = __kmp_thread_malloc(__kmp_thread_from_gtid(gtid), desc.size_a);
1631 if (ptr == NULL && al->fb == omp_atv_abort_fb) {
1632 KMP_ASSERT(0); // abort fallback requested
1633 } // no sense to look for another fallback because of same internal alloc
1634 }
1635 } else {
1636 // custom allocator, pool size not requested
1637 ptr = __kmp_thread_malloc(__kmp_thread_from_gtid(gtid), desc.size_a);
1638 if (ptr == NULL && al->fb == omp_atv_abort_fb) {
1639 KMP_ASSERT(0); // abort fallback requested
1640 } // no sense to look for another fallback because of same internal alloc
1641 }
1642 KE_TRACE(10, ("__kmpc_alloc: T#%d %p=alloc(%d)\n", gtid, ptr, desc.size_a));
1643 if (ptr == NULL)
1644 return NULL;
1645
1646 addr = (kmp_uintptr_t)ptr;
1647 addr_align = (addr + sz_desc + align - 1) & ~(align - 1);
1648 addr_descr = addr_align - sz_desc;
1649
1650 desc.ptr_alloc = ptr;
1651 desc.ptr_align = (void *)addr_align;
1652 desc.allocator = al;
1653 *((kmp_mem_desc_t *)addr_descr) = desc; // save descriptor contents
1654 KMP_MB();
1655
1656 KE_TRACE(25, ("__kmpc_alloc returns %p, T#%d\n", desc.ptr_align, gtid));
1657 return desc.ptr_align;
1658 }
1659
__kmpc_calloc(int gtid,size_t nmemb,size_t size,omp_allocator_handle_t allocator)1660 void *__kmpc_calloc(int gtid, size_t nmemb, size_t size,
1661 omp_allocator_handle_t allocator) {
1662 void *ptr = NULL;
1663 kmp_allocator_t *al;
1664 KMP_DEBUG_ASSERT(__kmp_init_serial);
1665
1666 if (allocator == omp_null_allocator)
1667 allocator = __kmp_threads[gtid]->th.th_def_allocator;
1668
1669 KE_TRACE(25, ("__kmpc_calloc: T#%d (%d, %d, %p)\n", gtid, (int)nmemb,
1670 (int)size, allocator));
1671
1672 al = RCAST(kmp_allocator_t *, CCAST(omp_allocator_handle_t, allocator));
1673
1674 if (nmemb == 0 || size == 0)
1675 return ptr;
1676
1677 if ((SIZE_MAX - sizeof(kmp_mem_desc_t)) / size < nmemb) {
1678 if (al->fb == omp_atv_abort_fb) {
1679 KMP_ASSERT(0);
1680 }
1681 return ptr;
1682 }
1683
1684 ptr = __kmpc_alloc(gtid, nmemb * size, allocator);
1685
1686 if (ptr) {
1687 memset(ptr, 0x00, nmemb * size);
1688 }
1689 KE_TRACE(25, ("__kmpc_calloc returns %p, T#%d\n", ptr, gtid));
1690 return ptr;
1691 }
1692
__kmpc_realloc(int gtid,void * ptr,size_t size,omp_allocator_handle_t allocator,omp_allocator_handle_t free_allocator)1693 void *__kmpc_realloc(int gtid, void *ptr, size_t size,
1694 omp_allocator_handle_t allocator,
1695 omp_allocator_handle_t free_allocator) {
1696 void *nptr = NULL;
1697 KMP_DEBUG_ASSERT(__kmp_init_serial);
1698
1699 if (size == 0) {
1700 if (ptr != NULL)
1701 __kmpc_free(gtid, ptr, free_allocator);
1702 return nptr;
1703 }
1704
1705 KE_TRACE(25, ("__kmpc_realloc: T#%d (%p, %d, %p, %p)\n", gtid, ptr, (int)size,
1706 allocator, free_allocator));
1707
1708 nptr = __kmpc_alloc(gtid, size, allocator);
1709
1710 if (nptr != NULL && ptr != NULL) {
1711 kmp_mem_desc_t desc;
1712 kmp_uintptr_t addr_align; // address to return to caller
1713 kmp_uintptr_t addr_descr; // address of memory block descriptor
1714
1715 addr_align = (kmp_uintptr_t)ptr;
1716 addr_descr = addr_align - sizeof(kmp_mem_desc_t);
1717 desc = *((kmp_mem_desc_t *)addr_descr); // read descriptor
1718
1719 KMP_DEBUG_ASSERT(desc.ptr_align == ptr);
1720 KMP_DEBUG_ASSERT(desc.size_orig > 0);
1721 KMP_DEBUG_ASSERT(desc.size_orig < desc.size_a);
1722 KMP_MEMCPY((char *)nptr, (char *)ptr,
1723 (size_t)((size < desc.size_orig) ? size : desc.size_orig));
1724 }
1725
1726 if (nptr != NULL) {
1727 __kmpc_free(gtid, ptr, free_allocator);
1728 }
1729
1730 KE_TRACE(25, ("__kmpc_realloc returns %p, T#%d\n", nptr, gtid));
1731 return nptr;
1732 }
1733
__kmpc_free(int gtid,void * ptr,const omp_allocator_handle_t allocator)1734 void __kmpc_free(int gtid, void *ptr, const omp_allocator_handle_t allocator) {
1735 KE_TRACE(25, ("__kmpc_free: T#%d free(%p,%p)\n", gtid, ptr, allocator));
1736 if (ptr == NULL)
1737 return;
1738
1739 kmp_allocator_t *al;
1740 omp_allocator_handle_t oal;
1741 al = RCAST(kmp_allocator_t *, CCAST(omp_allocator_handle_t, allocator));
1742 kmp_mem_desc_t desc;
1743 kmp_uintptr_t addr_align; // address to return to caller
1744 kmp_uintptr_t addr_descr; // address of memory block descriptor
1745 if (KMP_IS_TARGET_MEM_ALLOC(allocator) ||
1746 (allocator > kmp_max_mem_alloc &&
1747 KMP_IS_TARGET_MEM_SPACE(al->memspace))) {
1748 KMP_DEBUG_ASSERT(kmp_target_free);
1749 kmp_int32 device =
1750 __kmp_threads[gtid]->th.th_current_task->td_icvs.default_device;
1751 kmp_target_free(ptr, device);
1752 return;
1753 }
1754
1755 addr_align = (kmp_uintptr_t)ptr;
1756 addr_descr = addr_align - sizeof(kmp_mem_desc_t);
1757 desc = *((kmp_mem_desc_t *)addr_descr); // read descriptor
1758
1759 KMP_DEBUG_ASSERT(desc.ptr_align == ptr);
1760 if (allocator) {
1761 KMP_DEBUG_ASSERT(desc.allocator == al || desc.allocator == al->fb_data);
1762 }
1763 al = desc.allocator;
1764 oal = (omp_allocator_handle_t)al; // cast to void* for comparisons
1765 KMP_DEBUG_ASSERT(al);
1766
1767 if (__kmp_memkind_available) {
1768 if (oal < kmp_max_mem_alloc) {
1769 // pre-defined allocator
1770 if (oal == omp_high_bw_mem_alloc && mk_hbw_preferred) {
1771 kmp_mk_free(*mk_hbw_preferred, desc.ptr_alloc);
1772 } else if (oal == omp_large_cap_mem_alloc && mk_dax_kmem_all) {
1773 kmp_mk_free(*mk_dax_kmem_all, desc.ptr_alloc);
1774 } else {
1775 kmp_mk_free(*mk_default, desc.ptr_alloc);
1776 }
1777 } else {
1778 if (al->pool_size > 0) { // custom allocator with pool size requested
1779 kmp_uint64 used =
1780 KMP_TEST_THEN_ADD64((kmp_int64 *)&al->pool_used, -desc.size_a);
1781 (void)used; // to suppress compiler warning
1782 KMP_DEBUG_ASSERT(used >= desc.size_a);
1783 }
1784 kmp_mk_free(*al->memkind, desc.ptr_alloc);
1785 }
1786 } else {
1787 if (oal > kmp_max_mem_alloc && al->pool_size > 0) {
1788 kmp_uint64 used =
1789 KMP_TEST_THEN_ADD64((kmp_int64 *)&al->pool_used, -desc.size_a);
1790 (void)used; // to suppress compiler warning
1791 KMP_DEBUG_ASSERT(used >= desc.size_a);
1792 }
1793 __kmp_thread_free(__kmp_thread_from_gtid(gtid), desc.ptr_alloc);
1794 }
1795 KE_TRACE(10, ("__kmpc_free: T#%d freed %p (%p)\n", gtid, desc.ptr_alloc,
1796 allocator));
1797 }
1798
1799 /* If LEAK_MEMORY is defined, __kmp_free() will *not* free memory. It causes
1800 memory leaks, but it may be useful for debugging memory corruptions, used
1801 freed pointers, etc. */
1802 /* #define LEAK_MEMORY */
1803 struct kmp_mem_descr { // Memory block descriptor.
1804 void *ptr_allocated; // Pointer returned by malloc(), subject for free().
1805 size_t size_allocated; // Size of allocated memory block.
1806 void *ptr_aligned; // Pointer to aligned memory, to be used by client code.
1807 size_t size_aligned; // Size of aligned memory block.
1808 };
1809 typedef struct kmp_mem_descr kmp_mem_descr_t;
1810
1811 /* Allocate memory on requested boundary, fill allocated memory with 0x00.
1812 NULL is NEVER returned, __kmp_abort() is called in case of memory allocation
1813 error. Must use __kmp_free when freeing memory allocated by this routine! */
___kmp_allocate_align(size_t size,size_t alignment KMP_SRC_LOC_DECL)1814 static void *___kmp_allocate_align(size_t size,
1815 size_t alignment KMP_SRC_LOC_DECL) {
1816 /* __kmp_allocate() allocates (by call to malloc()) bigger memory block than
1817 requested to return properly aligned pointer. Original pointer returned
1818 by malloc() and size of allocated block is saved in descriptor just
1819 before the aligned pointer. This information used by __kmp_free() -- it
1820 has to pass to free() original pointer, not aligned one.
1821
1822 +---------+------------+-----------------------------------+---------+
1823 | padding | descriptor | aligned block | padding |
1824 +---------+------------+-----------------------------------+---------+
1825 ^ ^
1826 | |
1827 | +- Aligned pointer returned to caller
1828 +- Pointer returned by malloc()
1829
1830 Aligned block is filled with zeros, paddings are filled with 0xEF. */
1831
1832 kmp_mem_descr_t descr;
1833 kmp_uintptr_t addr_allocated; // Address returned by malloc().
1834 kmp_uintptr_t addr_aligned; // Aligned address to return to caller.
1835 kmp_uintptr_t addr_descr; // Address of memory block descriptor.
1836
1837 KE_TRACE(25, ("-> ___kmp_allocate_align( %d, %d ) called from %s:%d\n",
1838 (int)size, (int)alignment KMP_SRC_LOC_PARM));
1839
1840 KMP_DEBUG_ASSERT(alignment < 32 * 1024); // Alignment should not be too
1841 KMP_DEBUG_ASSERT(sizeof(void *) <= sizeof(kmp_uintptr_t));
1842 // Make sure kmp_uintptr_t is enough to store addresses.
1843
1844 descr.size_aligned = size;
1845 descr.size_allocated =
1846 descr.size_aligned + sizeof(kmp_mem_descr_t) + alignment;
1847
1848 #if KMP_DEBUG
1849 descr.ptr_allocated = _malloc_src_loc(descr.size_allocated, _file_, _line_);
1850 #else
1851 descr.ptr_allocated = malloc_src_loc(descr.size_allocated KMP_SRC_LOC_PARM);
1852 #endif
1853 KE_TRACE(10, (" malloc( %d ) returned %p\n", (int)descr.size_allocated,
1854 descr.ptr_allocated));
1855 if (descr.ptr_allocated == NULL) {
1856 KMP_FATAL(OutOfHeapMemory);
1857 }
1858
1859 addr_allocated = (kmp_uintptr_t)descr.ptr_allocated;
1860 addr_aligned =
1861 (addr_allocated + sizeof(kmp_mem_descr_t) + alignment) & ~(alignment - 1);
1862 addr_descr = addr_aligned - sizeof(kmp_mem_descr_t);
1863
1864 descr.ptr_aligned = (void *)addr_aligned;
1865
1866 KE_TRACE(26, (" ___kmp_allocate_align: "
1867 "ptr_allocated=%p, size_allocated=%d, "
1868 "ptr_aligned=%p, size_aligned=%d\n",
1869 descr.ptr_allocated, (int)descr.size_allocated,
1870 descr.ptr_aligned, (int)descr.size_aligned));
1871
1872 KMP_DEBUG_ASSERT(addr_allocated <= addr_descr);
1873 KMP_DEBUG_ASSERT(addr_descr + sizeof(kmp_mem_descr_t) == addr_aligned);
1874 KMP_DEBUG_ASSERT(addr_aligned + descr.size_aligned <=
1875 addr_allocated + descr.size_allocated);
1876 KMP_DEBUG_ASSERT(addr_aligned % alignment == 0);
1877 #ifdef KMP_DEBUG
1878 memset(descr.ptr_allocated, 0xEF, descr.size_allocated);
1879 // Fill allocated memory block with 0xEF.
1880 #endif
1881 memset(descr.ptr_aligned, 0x00, descr.size_aligned);
1882 // Fill the aligned memory block (which is intended for using by caller) with
1883 // 0x00. Do not
1884 // put this filling under KMP_DEBUG condition! Many callers expect zeroed
1885 // memory. (Padding
1886 // bytes remain filled with 0xEF in debugging library.)
1887 *((kmp_mem_descr_t *)addr_descr) = descr;
1888
1889 KMP_MB();
1890
1891 KE_TRACE(25, ("<- ___kmp_allocate_align() returns %p\n", descr.ptr_aligned));
1892 return descr.ptr_aligned;
1893 } // func ___kmp_allocate_align
1894
1895 /* Allocate memory on cache line boundary, fill allocated memory with 0x00.
1896 Do not call this func directly! Use __kmp_allocate macro instead.
1897 NULL is NEVER returned, __kmp_abort() is called in case of memory allocation
1898 error. Must use __kmp_free when freeing memory allocated by this routine! */
___kmp_allocate(size_t size KMP_SRC_LOC_DECL)1899 void *___kmp_allocate(size_t size KMP_SRC_LOC_DECL) {
1900 void *ptr;
1901 KE_TRACE(25, ("-> __kmp_allocate( %d ) called from %s:%d\n",
1902 (int)size KMP_SRC_LOC_PARM));
1903 ptr = ___kmp_allocate_align(size, __kmp_align_alloc KMP_SRC_LOC_PARM);
1904 KE_TRACE(25, ("<- __kmp_allocate() returns %p\n", ptr));
1905 return ptr;
1906 } // func ___kmp_allocate
1907
1908 /* Allocate memory on page boundary, fill allocated memory with 0x00.
1909 Does not call this func directly! Use __kmp_page_allocate macro instead.
1910 NULL is NEVER returned, __kmp_abort() is called in case of memory allocation
1911 error. Must use __kmp_free when freeing memory allocated by this routine! */
___kmp_page_allocate(size_t size KMP_SRC_LOC_DECL)1912 void *___kmp_page_allocate(size_t size KMP_SRC_LOC_DECL) {
1913 int page_size = 8 * 1024;
1914 void *ptr;
1915
1916 KE_TRACE(25, ("-> __kmp_page_allocate( %d ) called from %s:%d\n",
1917 (int)size KMP_SRC_LOC_PARM));
1918 ptr = ___kmp_allocate_align(size, page_size KMP_SRC_LOC_PARM);
1919 KE_TRACE(25, ("<- __kmp_page_allocate( %d ) returns %p\n", (int)size, ptr));
1920 return ptr;
1921 } // ___kmp_page_allocate
1922
1923 /* Free memory allocated by __kmp_allocate() and __kmp_page_allocate().
1924 In debug mode, fill the memory block with 0xEF before call to free(). */
___kmp_free(void * ptr KMP_SRC_LOC_DECL)1925 void ___kmp_free(void *ptr KMP_SRC_LOC_DECL) {
1926 kmp_mem_descr_t descr;
1927 kmp_uintptr_t addr_allocated; // Address returned by malloc().
1928 kmp_uintptr_t addr_aligned; // Aligned address passed by caller.
1929
1930 KE_TRACE(25,
1931 ("-> __kmp_free( %p ) called from %s:%d\n", ptr KMP_SRC_LOC_PARM));
1932 KMP_ASSERT(ptr != NULL);
1933
1934 descr = *(kmp_mem_descr_t *)((kmp_uintptr_t)ptr - sizeof(kmp_mem_descr_t));
1935
1936 KE_TRACE(26, (" __kmp_free: "
1937 "ptr_allocated=%p, size_allocated=%d, "
1938 "ptr_aligned=%p, size_aligned=%d\n",
1939 descr.ptr_allocated, (int)descr.size_allocated,
1940 descr.ptr_aligned, (int)descr.size_aligned));
1941
1942 addr_allocated = (kmp_uintptr_t)descr.ptr_allocated;
1943 addr_aligned = (kmp_uintptr_t)descr.ptr_aligned;
1944
1945 KMP_DEBUG_ASSERT(addr_aligned % CACHE_LINE == 0);
1946 KMP_DEBUG_ASSERT(descr.ptr_aligned == ptr);
1947 KMP_DEBUG_ASSERT(addr_allocated + sizeof(kmp_mem_descr_t) <= addr_aligned);
1948 KMP_DEBUG_ASSERT(descr.size_aligned < descr.size_allocated);
1949 KMP_DEBUG_ASSERT(addr_aligned + descr.size_aligned <=
1950 addr_allocated + descr.size_allocated);
1951
1952 #ifdef KMP_DEBUG
1953 memset(descr.ptr_allocated, 0xEF, descr.size_allocated);
1954 // Fill memory block with 0xEF, it helps catch using freed memory.
1955 #endif
1956
1957 #ifndef LEAK_MEMORY
1958 KE_TRACE(10, (" free( %p )\n", descr.ptr_allocated));
1959 #ifdef KMP_DEBUG
1960 _free_src_loc(descr.ptr_allocated, _file_, _line_);
1961 #else
1962 free_src_loc(descr.ptr_allocated KMP_SRC_LOC_PARM);
1963 #endif
1964 #endif
1965 KMP_MB();
1966 KE_TRACE(25, ("<- __kmp_free() returns\n"));
1967 } // func ___kmp_free
1968
1969 #if USE_FAST_MEMORY == 3
1970 // Allocate fast memory by first scanning the thread's free lists
1971 // If a chunk the right size exists, grab it off the free list.
1972 // Otherwise allocate normally using kmp_thread_malloc.
1973
1974 // AC: How to choose the limit? Just get 16 for now...
1975 #define KMP_FREE_LIST_LIMIT 16
1976
1977 // Always use 128 bytes for determining buckets for caching memory blocks
1978 #define DCACHE_LINE 128
1979
___kmp_fast_allocate(kmp_info_t * this_thr,size_t size KMP_SRC_LOC_DECL)1980 void *___kmp_fast_allocate(kmp_info_t *this_thr, size_t size KMP_SRC_LOC_DECL) {
1981 void *ptr;
1982 size_t num_lines, idx;
1983 int index;
1984 void *alloc_ptr;
1985 size_t alloc_size;
1986 kmp_mem_descr_t *descr;
1987
1988 KE_TRACE(25, ("-> __kmp_fast_allocate( T#%d, %d ) called from %s:%d\n",
1989 __kmp_gtid_from_thread(this_thr), (int)size KMP_SRC_LOC_PARM));
1990
1991 num_lines = (size + DCACHE_LINE - 1) / DCACHE_LINE;
1992 idx = num_lines - 1;
1993 KMP_DEBUG_ASSERT(idx >= 0);
1994 if (idx < 2) {
1995 index = 0; // idx is [ 0, 1 ], use first free list
1996 num_lines = 2; // 1, 2 cache lines or less than cache line
1997 } else if ((idx >>= 2) == 0) {
1998 index = 1; // idx is [ 2, 3 ], use second free list
1999 num_lines = 4; // 3, 4 cache lines
2000 } else if ((idx >>= 2) == 0) {
2001 index = 2; // idx is [ 4, 15 ], use third free list
2002 num_lines = 16; // 5, 6, ..., 16 cache lines
2003 } else if ((idx >>= 2) == 0) {
2004 index = 3; // idx is [ 16, 63 ], use fourth free list
2005 num_lines = 64; // 17, 18, ..., 64 cache lines
2006 } else {
2007 goto alloc_call; // 65 or more cache lines ( > 8KB ), don't use free lists
2008 }
2009
2010 ptr = this_thr->th.th_free_lists[index].th_free_list_self;
2011 if (ptr != NULL) {
2012 // pop the head of no-sync free list
2013 this_thr->th.th_free_lists[index].th_free_list_self = *((void **)ptr);
2014 KMP_DEBUG_ASSERT(this_thr == ((kmp_mem_descr_t *)((kmp_uintptr_t)ptr -
2015 sizeof(kmp_mem_descr_t)))
2016 ->ptr_aligned);
2017 goto end;
2018 }
2019 ptr = TCR_SYNC_PTR(this_thr->th.th_free_lists[index].th_free_list_sync);
2020 if (ptr != NULL) {
2021 // no-sync free list is empty, use sync free list (filled in by other
2022 // threads only)
2023 // pop the head of the sync free list, push NULL instead
2024 while (!KMP_COMPARE_AND_STORE_PTR(
2025 &this_thr->th.th_free_lists[index].th_free_list_sync, ptr, nullptr)) {
2026 KMP_CPU_PAUSE();
2027 ptr = TCR_SYNC_PTR(this_thr->th.th_free_lists[index].th_free_list_sync);
2028 }
2029 // push the rest of chain into no-sync free list (can be NULL if there was
2030 // the only block)
2031 this_thr->th.th_free_lists[index].th_free_list_self = *((void **)ptr);
2032 KMP_DEBUG_ASSERT(this_thr == ((kmp_mem_descr_t *)((kmp_uintptr_t)ptr -
2033 sizeof(kmp_mem_descr_t)))
2034 ->ptr_aligned);
2035 goto end;
2036 }
2037
2038 alloc_call:
2039 // haven't found block in the free lists, thus allocate it
2040 size = num_lines * DCACHE_LINE;
2041
2042 alloc_size = size + sizeof(kmp_mem_descr_t) + DCACHE_LINE;
2043 KE_TRACE(25, ("__kmp_fast_allocate: T#%d Calling __kmp_thread_malloc with "
2044 "alloc_size %d\n",
2045 __kmp_gtid_from_thread(this_thr), alloc_size));
2046 alloc_ptr = bget(this_thr, (bufsize)alloc_size);
2047
2048 // align ptr to DCACHE_LINE
2049 ptr = (void *)((((kmp_uintptr_t)alloc_ptr) + sizeof(kmp_mem_descr_t) +
2050 DCACHE_LINE) &
2051 ~(DCACHE_LINE - 1));
2052 descr = (kmp_mem_descr_t *)(((kmp_uintptr_t)ptr) - sizeof(kmp_mem_descr_t));
2053
2054 descr->ptr_allocated = alloc_ptr; // remember allocated pointer
2055 // we don't need size_allocated
2056 descr->ptr_aligned = (void *)this_thr; // remember allocating thread
2057 // (it is already saved in bget buffer,
2058 // but we may want to use another allocator in future)
2059 descr->size_aligned = size;
2060
2061 end:
2062 KE_TRACE(25, ("<- __kmp_fast_allocate( T#%d ) returns %p\n",
2063 __kmp_gtid_from_thread(this_thr), ptr));
2064 return ptr;
2065 } // func __kmp_fast_allocate
2066
2067 // Free fast memory and place it on the thread's free list if it is of
2068 // the correct size.
___kmp_fast_free(kmp_info_t * this_thr,void * ptr KMP_SRC_LOC_DECL)2069 void ___kmp_fast_free(kmp_info_t *this_thr, void *ptr KMP_SRC_LOC_DECL) {
2070 kmp_mem_descr_t *descr;
2071 kmp_info_t *alloc_thr;
2072 size_t size;
2073 size_t idx;
2074 int index;
2075
2076 KE_TRACE(25, ("-> __kmp_fast_free( T#%d, %p ) called from %s:%d\n",
2077 __kmp_gtid_from_thread(this_thr), ptr KMP_SRC_LOC_PARM));
2078 KMP_ASSERT(ptr != NULL);
2079
2080 descr = (kmp_mem_descr_t *)(((kmp_uintptr_t)ptr) - sizeof(kmp_mem_descr_t));
2081
2082 KE_TRACE(26, (" __kmp_fast_free: size_aligned=%d\n",
2083 (int)descr->size_aligned));
2084
2085 size = descr->size_aligned; // 2, 4, 16, 64, 65, 66, ... cache lines
2086
2087 idx = DCACHE_LINE * 2; // 2 cache lines is minimal size of block
2088 if (idx == size) {
2089 index = 0; // 2 cache lines
2090 } else if ((idx <<= 1) == size) {
2091 index = 1; // 4 cache lines
2092 } else if ((idx <<= 2) == size) {
2093 index = 2; // 16 cache lines
2094 } else if ((idx <<= 2) == size) {
2095 index = 3; // 64 cache lines
2096 } else {
2097 KMP_DEBUG_ASSERT(size > DCACHE_LINE * 64);
2098 goto free_call; // 65 or more cache lines ( > 8KB )
2099 }
2100
2101 alloc_thr = (kmp_info_t *)descr->ptr_aligned; // get thread owning the block
2102 if (alloc_thr == this_thr) {
2103 // push block to self no-sync free list, linking previous head (LIFO)
2104 *((void **)ptr) = this_thr->th.th_free_lists[index].th_free_list_self;
2105 this_thr->th.th_free_lists[index].th_free_list_self = ptr;
2106 } else {
2107 void *head = this_thr->th.th_free_lists[index].th_free_list_other;
2108 if (head == NULL) {
2109 // Create new free list
2110 this_thr->th.th_free_lists[index].th_free_list_other = ptr;
2111 *((void **)ptr) = NULL; // mark the tail of the list
2112 descr->size_allocated = (size_t)1; // head of the list keeps its length
2113 } else {
2114 // need to check existed "other" list's owner thread and size of queue
2115 kmp_mem_descr_t *dsc =
2116 (kmp_mem_descr_t *)((char *)head - sizeof(kmp_mem_descr_t));
2117 // allocating thread, same for all queue nodes
2118 kmp_info_t *q_th = (kmp_info_t *)(dsc->ptr_aligned);
2119 size_t q_sz =
2120 dsc->size_allocated + 1; // new size in case we add current task
2121 if (q_th == alloc_thr && q_sz <= KMP_FREE_LIST_LIMIT) {
2122 // we can add current task to "other" list, no sync needed
2123 *((void **)ptr) = head;
2124 descr->size_allocated = q_sz;
2125 this_thr->th.th_free_lists[index].th_free_list_other = ptr;
2126 } else {
2127 // either queue blocks owner is changing or size limit exceeded
2128 // return old queue to allocating thread (q_th) synchronously,
2129 // and start new list for alloc_thr's tasks
2130 void *old_ptr;
2131 void *tail = head;
2132 void *next = *((void **)head);
2133 while (next != NULL) {
2134 KMP_DEBUG_ASSERT(
2135 // queue size should decrease by 1 each step through the list
2136 ((kmp_mem_descr_t *)((char *)next - sizeof(kmp_mem_descr_t)))
2137 ->size_allocated +
2138 1 ==
2139 ((kmp_mem_descr_t *)((char *)tail - sizeof(kmp_mem_descr_t)))
2140 ->size_allocated);
2141 tail = next; // remember tail node
2142 next = *((void **)next);
2143 }
2144 KMP_DEBUG_ASSERT(q_th != NULL);
2145 // push block to owner's sync free list
2146 old_ptr = TCR_PTR(q_th->th.th_free_lists[index].th_free_list_sync);
2147 /* the next pointer must be set before setting free_list to ptr to avoid
2148 exposing a broken list to other threads, even for an instant. */
2149 *((void **)tail) = old_ptr;
2150
2151 while (!KMP_COMPARE_AND_STORE_PTR(
2152 &q_th->th.th_free_lists[index].th_free_list_sync, old_ptr, head)) {
2153 KMP_CPU_PAUSE();
2154 old_ptr = TCR_PTR(q_th->th.th_free_lists[index].th_free_list_sync);
2155 *((void **)tail) = old_ptr;
2156 }
2157
2158 // start new list of not-selt tasks
2159 this_thr->th.th_free_lists[index].th_free_list_other = ptr;
2160 *((void **)ptr) = NULL;
2161 descr->size_allocated = (size_t)1; // head of queue keeps its length
2162 }
2163 }
2164 }
2165 goto end;
2166
2167 free_call:
2168 KE_TRACE(25, ("__kmp_fast_free: T#%d Calling __kmp_thread_free for size %d\n",
2169 __kmp_gtid_from_thread(this_thr), size));
2170 __kmp_bget_dequeue(this_thr); /* Release any queued buffers */
2171 brel(this_thr, descr->ptr_allocated);
2172
2173 end:
2174 KE_TRACE(25, ("<- __kmp_fast_free() returns\n"));
2175
2176 } // func __kmp_fast_free
2177
2178 // Initialize the thread free lists related to fast memory
2179 // Only do this when a thread is initially created.
__kmp_initialize_fast_memory(kmp_info_t * this_thr)2180 void __kmp_initialize_fast_memory(kmp_info_t *this_thr) {
2181 KE_TRACE(10, ("__kmp_initialize_fast_memory: Called from th %p\n", this_thr));
2182
2183 memset(this_thr->th.th_free_lists, 0, NUM_LISTS * sizeof(kmp_free_list_t));
2184 }
2185
2186 // Free the memory in the thread free lists related to fast memory
2187 // Only do this when a thread is being reaped (destroyed).
__kmp_free_fast_memory(kmp_info_t * th)2188 void __kmp_free_fast_memory(kmp_info_t *th) {
2189 // Suppose we use BGET underlying allocator, walk through its structures...
2190 int bin;
2191 thr_data_t *thr = get_thr_data(th);
2192 void **lst = NULL;
2193
2194 KE_TRACE(
2195 5, ("__kmp_free_fast_memory: Called T#%d\n", __kmp_gtid_from_thread(th)));
2196
2197 __kmp_bget_dequeue(th); // Release any queued buffers
2198
2199 // Dig through free lists and extract all allocated blocks
2200 for (bin = 0; bin < MAX_BGET_BINS; ++bin) {
2201 bfhead_t *b = thr->freelist[bin].ql.flink;
2202 while (b != &thr->freelist[bin]) {
2203 if ((kmp_uintptr_t)b->bh.bb.bthr & 1) { // the buffer is allocated address
2204 *((void **)b) =
2205 lst; // link the list (override bthr, but keep flink yet)
2206 lst = (void **)b; // push b into lst
2207 }
2208 b = b->ql.flink; // get next buffer
2209 }
2210 }
2211 while (lst != NULL) {
2212 void *next = *lst;
2213 KE_TRACE(10, ("__kmp_free_fast_memory: freeing %p, next=%p th %p (%d)\n",
2214 lst, next, th, __kmp_gtid_from_thread(th)));
2215 (*thr->relfcn)(lst);
2216 #if BufStats
2217 // count blocks to prevent problems in __kmp_finalize_bget()
2218 thr->numprel++; /* Nr of expansion block releases */
2219 thr->numpblk--; /* Total number of blocks */
2220 #endif
2221 lst = (void **)next;
2222 }
2223
2224 KE_TRACE(
2225 5, ("__kmp_free_fast_memory: Freed T#%d\n", __kmp_gtid_from_thread(th)));
2226 }
2227
2228 #endif // USE_FAST_MEMORY
2229