1 /* sl_malloc.c - malloc routines using a per-thread slab */
2 /* $OpenLDAP$ */
3 /* This work is part of OpenLDAP Software <http://www.openldap.org/>.
4 *
5 * Copyright 2003-2021 The OpenLDAP Foundation.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted only as authorized by the OpenLDAP
10 * Public License.
11 *
12 * A copy of this license is available in the file LICENSE in the
13 * top-level directory of the distribution or, alternatively, at
14 * <http://www.OpenLDAP.org/license.html>.
15 */
16
17 #include "portable.h"
18
19 #include <stdio.h>
20 #include <ac/string.h>
21
22 #include "slap.h"
23
24 #ifdef USE_VALGRIND
25 /* Get debugging help from Valgrind */
26 #include <valgrind/memcheck.h>
27 #define VGMEMP_MARK(m,s) VALGRIND_MAKE_MEM_NOACCESS(m,s)
28 #define VGMEMP_CREATE(h,r,z) VALGRIND_CREATE_MEMPOOL(h,r,z)
29 #define VGMEMP_TRIM(h,a,s) VALGRIND_MEMPOOL_TRIM(h,a,s)
30 #define VGMEMP_ALLOC(h,a,s) VALGRIND_MEMPOOL_ALLOC(h,a,s)
31 #define VGMEMP_CHANGE(h,a,b,s) VALGRIND_MEMPOOL_CHANGE(h,a,b,s)
32 #else
33 #define VGMEMP_MARK(m,s)
34 #define VGMEMP_CREATE(h,r,z)
35 #define VGMEMP_TRIM(h,a,s)
36 #define VGMEMP_ALLOC(h,a,s)
37 #define VGMEMP_CHANGE(h,a,b,s)
38 #endif
39
40 /*
41 * This allocator returns temporary memory from a slab in a given memory
42 * context, aligned on a 2-int boundary. It cannot be used for data
43 * which will outlive the task allocating it.
44 *
45 * A new memory context attaches to the creator's thread context, if any.
46 * Threads cannot use other threads' memory contexts; there are no locks.
47 *
48 * The caller of slap_sl_malloc, usually a thread pool task, must
49 * slap_sl_free the memory before finishing: New tasks reuse the context
50 * and normally reset it, reclaiming memory left over from last task.
51 *
52 * The allocator helps memory fragmentation, speed and memory leaks.
53 * It is not (yet) reliable as a garbage collector:
54 *
55 * It falls back to context NULL - plain ber_memalloc() - when the
56 * context's slab is full. A reset does not reclaim such memory.
57 * Conversely, free/realloc of data not from the given context assumes
58 * context NULL. The data must not belong to another memory context.
59 *
60 * Code which has lost track of the current memory context can try
61 * slap_sl_context() or ch_malloc.c:ch_free/ch_realloc().
62 *
63 * Allocations cannot yet return failure. Like ch_malloc, they succeed
64 * or abort slapd. This will change, do fix code which assumes success.
65 */
66
67 /*
68 * The stack-based allocator stores (ber_len_t)sizeof(head+block) at
69 * allocated blocks' head - and in freed blocks also at the tail, marked
70 * by ORing *next* block's head with 1. Freed blocks are only reclaimed
71 * from the last block forward. This is fast, but when a block is never
72 * freed, older blocks will not be reclaimed until the slab is reset...
73 */
74
75 #ifdef SLAP_NO_SL_MALLOC /* Useful with memory debuggers like Valgrind */
76 enum { No_sl_malloc = 1 };
77 #else
78 enum { No_sl_malloc = 0 };
79 #endif
80
81 #define SLAP_SLAB_SOBLOCK 64
82
83 struct slab_object {
84 void *so_ptr;
85 int so_blockhead;
86 LDAP_LIST_ENTRY(slab_object) so_link;
87 };
88
89 struct slab_heap {
90 void *sh_base;
91 void *sh_last;
92 void *sh_end;
93 int sh_stack;
94 int sh_maxorder;
95 unsigned char **sh_map;
96 LDAP_LIST_HEAD(sh_freelist, slab_object) *sh_free;
97 LDAP_LIST_HEAD(sh_so, slab_object) sh_sopool;
98 };
99
100 enum {
101 Align = sizeof(ber_len_t) > 2*sizeof(int)
102 ? sizeof(ber_len_t) : 2*sizeof(int),
103 Align_log2 = 1 + (Align>2) + (Align>4) + (Align>8) + (Align>16),
104 order_start = Align_log2 - 1,
105 pad = Align - 1
106 };
107
108 static struct slab_object * slap_replenish_sopool(struct slab_heap* sh);
109 #ifdef SLAPD_UNUSED
110 static void print_slheap(int level, void *ctx);
111 #endif
112
113 /* Keep memory context in a thread-local var */
114 # define memctx_key ((void *) slap_sl_mem_init)
115 # define SET_MEMCTX(thrctx, memctx, kfree) \
116 ldap_pvt_thread_pool_setkey(thrctx,memctx_key, memctx,kfree, NULL,NULL)
117 # define GET_MEMCTX(thrctx, memctxp) \
118 ((void) (*(memctxp) = NULL), \
119 (void) ldap_pvt_thread_pool_getkey(thrctx,memctx_key, memctxp,NULL), \
120 *(memctxp))
121
122 /* Destroy the context, or if key==NULL clean it up for reuse. */
123 void
slap_sl_mem_destroy(void * key,void * data)124 slap_sl_mem_destroy(
125 void *key,
126 void *data
127 )
128 {
129 struct slab_heap *sh = data;
130 struct slab_object *so;
131 int i;
132
133 if (!sh)
134 return;
135
136 if (!sh->sh_stack) {
137 for (i = 0; i <= sh->sh_maxorder - order_start; i++) {
138 so = LDAP_LIST_FIRST(&sh->sh_free[i]);
139 while (so) {
140 struct slab_object *so_tmp = so;
141 so = LDAP_LIST_NEXT(so, so_link);
142 LDAP_LIST_INSERT_HEAD(&sh->sh_sopool, so_tmp, so_link);
143 }
144 ch_free(sh->sh_map[i]);
145 }
146 ch_free(sh->sh_free);
147 ch_free(sh->sh_map);
148
149 so = LDAP_LIST_FIRST(&sh->sh_sopool);
150 while (so) {
151 struct slab_object *so_tmp = so;
152 so = LDAP_LIST_NEXT(so, so_link);
153 if (!so_tmp->so_blockhead) {
154 LDAP_LIST_REMOVE(so_tmp, so_link);
155 }
156 }
157 so = LDAP_LIST_FIRST(&sh->sh_sopool);
158 while (so) {
159 struct slab_object *so_tmp = so;
160 so = LDAP_LIST_NEXT(so, so_link);
161 ch_free(so_tmp);
162 }
163 }
164
165 if (key != NULL) {
166 ber_memfree_x(sh->sh_base, NULL);
167 ber_memfree_x(sh, NULL);
168 }
169 }
170
171 BerMemoryFunctions slap_sl_mfuncs =
172 { slap_sl_malloc, slap_sl_calloc, slap_sl_realloc, slap_sl_free };
173
174 void
slap_sl_mem_init()175 slap_sl_mem_init()
176 {
177 assert( Align == 1 << Align_log2 );
178
179 ber_set_option( NULL, LBER_OPT_MEMORY_FNS, &slap_sl_mfuncs );
180 }
181
182 /* Create, reset or just return the memory context of the current thread. */
183 void *
slap_sl_mem_create(ber_len_t size,int stack,void * thrctx,int new)184 slap_sl_mem_create(
185 ber_len_t size,
186 int stack,
187 void *thrctx,
188 int new
189 )
190 {
191 void *memctx;
192 struct slab_heap *sh;
193 ber_len_t size_shift;
194 struct slab_object *so;
195 char *base, *newptr;
196 enum { Base_offset = (unsigned) -sizeof(ber_len_t) % Align };
197
198 sh = GET_MEMCTX(thrctx, &memctx);
199 if ( sh && !new )
200 return sh;
201
202 /* Round up to doubleword boundary, then make room for initial
203 * padding, preserving expected available size for pool version */
204 size = ((size + Align-1) & -Align) + Base_offset;
205
206 if (!sh) {
207 sh = ch_malloc(sizeof(struct slab_heap));
208 base = ch_malloc(size);
209 SET_MEMCTX(thrctx, sh, slap_sl_mem_destroy);
210 VGMEMP_MARK(base, size);
211 VGMEMP_CREATE(sh, 0, 0);
212 } else {
213 slap_sl_mem_destroy(NULL, sh);
214 base = sh->sh_base;
215 if (size > (ber_len_t) ((char *) sh->sh_end - base)) {
216 newptr = ch_realloc(base, size);
217 if ( newptr == NULL ) return NULL;
218 VGMEMP_CHANGE(sh, base, newptr, size);
219 base = newptr;
220 }
221 VGMEMP_TRIM(sh, base, 0);
222 }
223 sh->sh_base = base;
224 sh->sh_end = base + size;
225
226 /* Align (base + head of first block) == first returned block */
227 base += Base_offset;
228 size -= Base_offset;
229
230 sh->sh_stack = stack;
231 if (stack) {
232 sh->sh_last = base;
233
234 } else {
235 int i, order = -1, order_end = -1;
236
237 size_shift = size - 1;
238 do {
239 order_end++;
240 } while (size_shift >>= 1);
241 order = order_end - order_start + 1;
242 sh->sh_maxorder = order_end;
243
244 sh->sh_free = (struct sh_freelist *)
245 ch_malloc(order * sizeof(struct sh_freelist));
246 for (i = 0; i < order; i++) {
247 LDAP_LIST_INIT(&sh->sh_free[i]);
248 }
249
250 LDAP_LIST_INIT(&sh->sh_sopool);
251
252 if (LDAP_LIST_EMPTY(&sh->sh_sopool)) {
253 slap_replenish_sopool(sh);
254 }
255 so = LDAP_LIST_FIRST(&sh->sh_sopool);
256 LDAP_LIST_REMOVE(so, so_link);
257 so->so_ptr = base;
258
259 LDAP_LIST_INSERT_HEAD(&sh->sh_free[order-1], so, so_link);
260
261 sh->sh_map = (unsigned char **)
262 ch_malloc(order * sizeof(unsigned char *));
263 for (i = 0; i < order; i++) {
264 int shiftamt = order_start + 1 + i;
265 int nummaps = size >> shiftamt;
266 assert(nummaps);
267 nummaps >>= 3;
268 if (!nummaps) nummaps = 1;
269 sh->sh_map[i] = (unsigned char *) ch_malloc(nummaps);
270 memset(sh->sh_map[i], 0, nummaps);
271 }
272 }
273
274 return sh;
275 }
276
277 /*
278 * Assign memory context to thread context. Use NULL to detach
279 * current memory context from thread. Future users must
280 * know the context, since ch_free/slap_sl_context() cannot find it.
281 */
282 void
slap_sl_mem_setctx(void * thrctx,void * memctx)283 slap_sl_mem_setctx(
284 void *thrctx,
285 void *memctx
286 )
287 {
288 SET_MEMCTX(thrctx, memctx, slap_sl_mem_destroy);
289 }
290
291 void *
slap_sl_malloc(ber_len_t size,void * ctx)292 slap_sl_malloc(
293 ber_len_t size,
294 void *ctx
295 )
296 {
297 struct slab_heap *sh = ctx;
298 ber_len_t *ptr, *newptr;
299
300 /* ber_set_option calls us like this */
301 if (No_sl_malloc || !ctx) {
302 newptr = ber_memalloc_x( size, NULL );
303 if ( newptr ) return newptr;
304 Debug(LDAP_DEBUG_ANY, "slap_sl_malloc of %lu bytes failed\n",
305 (unsigned long) size );
306 assert( 0 );
307 exit( EXIT_FAILURE );
308 }
309
310 /* Add room for head, ensure room for tail when freed, and
311 * round up to doubleword boundary. */
312 size = (size + sizeof(ber_len_t) + Align-1 + !size) & -Align;
313
314 if (sh->sh_stack) {
315 if (size < (ber_len_t) ((char *) sh->sh_end - (char *) sh->sh_last)) {
316 newptr = sh->sh_last;
317 sh->sh_last = (char *) sh->sh_last + size;
318 VGMEMP_ALLOC(sh, newptr, size);
319 *newptr++ = size;
320 return( (void *)newptr );
321 }
322
323 size -= sizeof(ber_len_t);
324
325 } else {
326 struct slab_object *so_new, *so_left, *so_right;
327 ber_len_t size_shift;
328 unsigned long diff;
329 int i, j, order = -1;
330
331 size_shift = size - 1;
332 do {
333 order++;
334 } while (size_shift >>= 1);
335
336 size -= sizeof(ber_len_t);
337
338 for (i = order; i <= sh->sh_maxorder &&
339 LDAP_LIST_EMPTY(&sh->sh_free[i-order_start]); i++);
340
341 if (i == order) {
342 so_new = LDAP_LIST_FIRST(&sh->sh_free[i-order_start]);
343 LDAP_LIST_REMOVE(so_new, so_link);
344 ptr = so_new->so_ptr;
345 diff = (unsigned long)((char*)ptr -
346 (char*)sh->sh_base) >> (order + 1);
347 sh->sh_map[order-order_start][diff>>3] |= (1 << (diff & 0x7));
348 *ptr++ = size;
349 LDAP_LIST_INSERT_HEAD(&sh->sh_sopool, so_new, so_link);
350 return((void*)ptr);
351 } else if (i <= sh->sh_maxorder) {
352 for (j = i; j > order; j--) {
353 so_left = LDAP_LIST_FIRST(&sh->sh_free[j-order_start]);
354 LDAP_LIST_REMOVE(so_left, so_link);
355 if (LDAP_LIST_EMPTY(&sh->sh_sopool)) {
356 slap_replenish_sopool(sh);
357 }
358 so_right = LDAP_LIST_FIRST(&sh->sh_sopool);
359 LDAP_LIST_REMOVE(so_right, so_link);
360 so_right->so_ptr = (void *)((char *)so_left->so_ptr + (1 << j));
361 if (j == order + 1) {
362 ptr = so_left->so_ptr;
363 diff = (unsigned long)((char*)ptr -
364 (char*)sh->sh_base) >> (order+1);
365 sh->sh_map[order-order_start][diff>>3] |=
366 (1 << (diff & 0x7));
367 *ptr++ = size;
368 LDAP_LIST_INSERT_HEAD(
369 &sh->sh_free[j-1-order_start], so_right, so_link);
370 LDAP_LIST_INSERT_HEAD(&sh->sh_sopool, so_left, so_link);
371 return((void*)ptr);
372 } else {
373 LDAP_LIST_INSERT_HEAD(
374 &sh->sh_free[j-1-order_start], so_right, so_link);
375 LDAP_LIST_INSERT_HEAD(
376 &sh->sh_free[j-1-order_start], so_left, so_link);
377 }
378 }
379 }
380 /* FIXME: missing return; guessing we failed... */
381 }
382
383 Debug(LDAP_DEBUG_TRACE,
384 "sl_malloc %lu: ch_malloc\n",
385 (unsigned long) size );
386 return ch_malloc(size);
387 }
388
389 #define LIM_SQRT(t) /* some value < sqrt(max value of unsigned type t) */ \
390 ((0UL|(t)-1) >>31>>31 > 1 ? ((t)1 <<32) - 1 : \
391 (0UL|(t)-1) >>31 ? 65535U : (0UL|(t)-1) >>15 ? 255U : 15U)
392
393 void *
slap_sl_calloc(ber_len_t n,ber_len_t size,void * ctx)394 slap_sl_calloc( ber_len_t n, ber_len_t size, void *ctx )
395 {
396 void *newptr;
397 ber_len_t total = n * size;
398
399 /* The sqrt test is a slight optimization: often avoids the division */
400 if ((n | size) <= LIM_SQRT(ber_len_t) || n == 0 || total/n == size) {
401 newptr = slap_sl_malloc( total, ctx );
402 memset( newptr, 0, n*size );
403 } else {
404 Debug(LDAP_DEBUG_ANY, "slap_sl_calloc(%lu,%lu) out of range\n",
405 (unsigned long) n, (unsigned long) size );
406 assert(0);
407 exit(EXIT_FAILURE);
408 }
409 return newptr;
410 }
411
412 void *
slap_sl_realloc(void * ptr,ber_len_t size,void * ctx)413 slap_sl_realloc(void *ptr, ber_len_t size, void *ctx)
414 {
415 struct slab_heap *sh = ctx;
416 ber_len_t oldsize, *p = (ber_len_t *) ptr, *nextp;
417 void *newptr;
418
419 if (ptr == NULL)
420 return slap_sl_malloc(size, ctx);
421
422 /* Not our memory? */
423 if (No_sl_malloc || !sh || ptr < sh->sh_base || ptr >= sh->sh_end) {
424 /* Like ch_realloc(), except not trying a new context */
425 newptr = ber_memrealloc_x(ptr, size, NULL);
426 if (newptr) {
427 return newptr;
428 }
429 Debug(LDAP_DEBUG_ANY, "slap_sl_realloc of %lu bytes failed\n",
430 (unsigned long) size );
431 assert(0);
432 exit( EXIT_FAILURE );
433 }
434
435 if (size == 0) {
436 slap_sl_free(ptr, ctx);
437 return NULL;
438 }
439
440 oldsize = p[-1];
441
442 if (sh->sh_stack) {
443 /* Add room for head, round up to doubleword boundary */
444 size = (size + sizeof(ber_len_t) + Align-1) & -Align;
445
446 p--;
447
448 /* Never shrink blocks */
449 if (size <= oldsize) {
450 return ptr;
451 }
452
453 oldsize &= -2;
454 nextp = (ber_len_t *) ((char *) p + oldsize);
455
456 /* If reallocing the last block, try to grow it */
457 if (nextp == sh->sh_last) {
458 if (size < (ber_len_t) ((char *) sh->sh_end - (char *) p)) {
459 sh->sh_last = (char *) p + size;
460 p[0] = (p[0] & 1) | size;
461 return ptr;
462 }
463
464 /* Nowhere to grow, need to alloc and copy */
465 } else {
466 /* Slight optimization of the final realloc variant */
467 newptr = slap_sl_malloc(size-sizeof(ber_len_t), ctx);
468 AC_MEMCPY(newptr, ptr, oldsize-sizeof(ber_len_t));
469 /* Not last block, can just mark old region as free */
470 nextp[-1] = oldsize;
471 nextp[0] |= 1;
472 return newptr;
473 }
474
475 size -= sizeof(ber_len_t);
476 oldsize -= sizeof(ber_len_t);
477
478 } else if (oldsize > size) {
479 oldsize = size;
480 }
481
482 newptr = slap_sl_malloc(size, ctx);
483 AC_MEMCPY(newptr, ptr, oldsize);
484 slap_sl_free(ptr, ctx);
485 return newptr;
486 }
487
488 void
slap_sl_free(void * ptr,void * ctx)489 slap_sl_free(void *ptr, void *ctx)
490 {
491 struct slab_heap *sh = ctx;
492 ber_len_t size;
493 ber_len_t *p = ptr, *nextp, *tmpp;
494
495 if (!ptr)
496 return;
497
498 if (No_sl_malloc || !sh || ptr < sh->sh_base || ptr >= sh->sh_end) {
499 ber_memfree_x(ptr, NULL);
500 return;
501 }
502
503 size = *(--p);
504
505 if (sh->sh_stack) {
506 size &= -2;
507 nextp = (ber_len_t *) ((char *) p + size);
508 if (sh->sh_last != nextp) {
509 /* Mark it free: tail = size, head of next block |= 1 */
510 nextp[-1] = size;
511 nextp[0] |= 1;
512 /* We can't tell Valgrind about it yet, because we
513 * still need read/write access to this block for
514 * when we eventually get to reclaim it.
515 */
516 } else {
517 /* Reclaim freed block(s) off tail */
518 while (*p & 1) {
519 p = (ber_len_t *) ((char *) p - p[-1]);
520 }
521 sh->sh_last = p;
522 VGMEMP_TRIM(sh, sh->sh_base,
523 (char *) sh->sh_last - (char *) sh->sh_base);
524 }
525
526 } else {
527 int size_shift, order_size;
528 struct slab_object *so;
529 unsigned long diff;
530 int i, inserted = 0, order = -1;
531
532 size_shift = size + sizeof(ber_len_t) - 1;
533 do {
534 order++;
535 } while (size_shift >>= 1);
536
537 for (i = order, tmpp = p; i <= sh->sh_maxorder; i++) {
538 order_size = 1 << (i+1);
539 diff = (unsigned long)((char*)tmpp - (char*)sh->sh_base) >> (i+1);
540 sh->sh_map[i-order_start][diff>>3] &= (~(1 << (diff & 0x7)));
541 if (diff == ((diff>>1)<<1)) {
542 if (!(sh->sh_map[i-order_start][(diff+1)>>3] &
543 (1<<((diff+1)&0x7)))) {
544 so = LDAP_LIST_FIRST(&sh->sh_free[i-order_start]);
545 while (so) {
546 if ((char*)so->so_ptr == (char*)tmpp) {
547 LDAP_LIST_REMOVE( so, so_link );
548 } else if ((char*)so->so_ptr ==
549 (char*)tmpp + order_size) {
550 LDAP_LIST_REMOVE(so, so_link);
551 break;
552 }
553 so = LDAP_LIST_NEXT(so, so_link);
554 }
555 if (so) {
556 if (i < sh->sh_maxorder) {
557 inserted = 1;
558 so->so_ptr = tmpp;
559 LDAP_LIST_INSERT_HEAD(&sh->sh_free[i-order_start+1],
560 so, so_link);
561 }
562 continue;
563 } else {
564 if (LDAP_LIST_EMPTY(&sh->sh_sopool)) {
565 slap_replenish_sopool(sh);
566 }
567 so = LDAP_LIST_FIRST(&sh->sh_sopool);
568 LDAP_LIST_REMOVE(so, so_link);
569 so->so_ptr = tmpp;
570 LDAP_LIST_INSERT_HEAD(&sh->sh_free[i-order_start],
571 so, so_link);
572 break;
573
574 Debug(LDAP_DEBUG_TRACE, "slap_sl_free: "
575 "free object not found while bit is clear.\n" );
576 assert(so != NULL);
577
578 }
579 } else {
580 if (!inserted) {
581 if (LDAP_LIST_EMPTY(&sh->sh_sopool)) {
582 slap_replenish_sopool(sh);
583 }
584 so = LDAP_LIST_FIRST(&sh->sh_sopool);
585 LDAP_LIST_REMOVE(so, so_link);
586 so->so_ptr = tmpp;
587 LDAP_LIST_INSERT_HEAD(&sh->sh_free[i-order_start],
588 so, so_link);
589 }
590 break;
591 }
592 } else {
593 if (!(sh->sh_map[i-order_start][(diff-1)>>3] &
594 (1<<((diff-1)&0x7)))) {
595 so = LDAP_LIST_FIRST(&sh->sh_free[i-order_start]);
596 while (so) {
597 if ((char*)so->so_ptr == (char*)tmpp) {
598 LDAP_LIST_REMOVE(so, so_link);
599 } else if ((char*)tmpp == (char *)so->so_ptr + order_size) {
600 LDAP_LIST_REMOVE(so, so_link);
601 tmpp = so->so_ptr;
602 break;
603 }
604 so = LDAP_LIST_NEXT(so, so_link);
605 }
606 if (so) {
607 if (i < sh->sh_maxorder) {
608 inserted = 1;
609 LDAP_LIST_INSERT_HEAD(&sh->sh_free[i-order_start+1], so, so_link);
610 continue;
611 }
612 } else {
613 if (LDAP_LIST_EMPTY(&sh->sh_sopool)) {
614 slap_replenish_sopool(sh);
615 }
616 so = LDAP_LIST_FIRST(&sh->sh_sopool);
617 LDAP_LIST_REMOVE(so, so_link);
618 so->so_ptr = tmpp;
619 LDAP_LIST_INSERT_HEAD(&sh->sh_free[i-order_start],
620 so, so_link);
621 break;
622
623 Debug(LDAP_DEBUG_TRACE, "slap_sl_free: "
624 "free object not found while bit is clear.\n" );
625 assert(so != NULL);
626
627 }
628 } else {
629 if ( !inserted ) {
630 if (LDAP_LIST_EMPTY(&sh->sh_sopool)) {
631 slap_replenish_sopool(sh);
632 }
633 so = LDAP_LIST_FIRST(&sh->sh_sopool);
634 LDAP_LIST_REMOVE(so, so_link);
635 so->so_ptr = tmpp;
636 LDAP_LIST_INSERT_HEAD(&sh->sh_free[i-order_start],
637 so, so_link);
638 }
639 break;
640 }
641 }
642 }
643 }
644 }
645
646 void
slap_sl_release(void * ptr,void * ctx)647 slap_sl_release( void *ptr, void *ctx )
648 {
649 struct slab_heap *sh = ctx;
650 if ( sh && ptr >= sh->sh_base && ptr <= sh->sh_end )
651 sh->sh_last = ptr;
652 }
653
654 void *
slap_sl_mark(void * ctx)655 slap_sl_mark( void *ctx )
656 {
657 struct slab_heap *sh = ctx;
658 return sh->sh_last;
659 }
660
661 /*
662 * Return the memory context of the current thread if the given block of
663 * memory belongs to it, otherwise return NULL.
664 */
665 void *
slap_sl_context(void * ptr)666 slap_sl_context( void *ptr )
667 {
668 void *memctx;
669 struct slab_heap *sh;
670
671 if ( slapMode & SLAP_TOOL_MODE ) return NULL;
672
673 sh = GET_MEMCTX(ldap_pvt_thread_pool_context(), &memctx);
674 if (sh && ptr >= sh->sh_base && ptr <= sh->sh_end) {
675 return sh;
676 }
677 return NULL;
678 }
679
680 static struct slab_object *
slap_replenish_sopool(struct slab_heap * sh)681 slap_replenish_sopool(
682 struct slab_heap* sh
683 )
684 {
685 struct slab_object *so_block;
686 int i;
687
688 so_block = (struct slab_object *)ch_malloc(
689 SLAP_SLAB_SOBLOCK * sizeof(struct slab_object));
690
691 if ( so_block == NULL ) {
692 return NULL;
693 }
694
695 so_block[0].so_blockhead = 1;
696 LDAP_LIST_INSERT_HEAD(&sh->sh_sopool, &so_block[0], so_link);
697 for (i = 1; i < SLAP_SLAB_SOBLOCK; i++) {
698 so_block[i].so_blockhead = 0;
699 LDAP_LIST_INSERT_HEAD(&sh->sh_sopool, &so_block[i], so_link );
700 }
701
702 return so_block;
703 }
704
705 #ifdef SLAPD_UNUSED
706 static void
print_slheap(int level,void * ctx)707 print_slheap(int level, void *ctx)
708 {
709 struct slab_heap *sh = ctx;
710 struct slab_object *so;
711 int i, j, once = 0;
712
713 if (!ctx) {
714 Debug(level, "NULL memctx\n" );
715 return;
716 }
717
718 Debug(level, "sh->sh_maxorder=%d\n", sh->sh_maxorder );
719
720 for (i = order_start; i <= sh->sh_maxorder; i++) {
721 once = 0;
722 Debug(level, "order=%d\n", i );
723 for (j = 0; j < (1<<(sh->sh_maxorder-i))/8; j++) {
724 Debug(level, "%02x ", sh->sh_map[i-order_start][j] );
725 once = 1;
726 }
727 if (!once) {
728 Debug(level, "%02x ", sh->sh_map[i-order_start][0] );
729 }
730 Debug(level, "\n" );
731 Debug(level, "free list:\n" );
732 so = LDAP_LIST_FIRST(&sh->sh_free[i-order_start]);
733 while (so) {
734 Debug(level, "%p\n", so->so_ptr );
735 so = LDAP_LIST_NEXT(so, so_link);
736 }
737 }
738 }
739 #endif
740