1 /*-------------------------------------------------------------------------
2  *
3  * slab.c
4  *	  SLAB allocator definitions.
5  *
6  * SLAB is a MemoryContext implementation designed for cases where large
7  * numbers of equally-sized objects are allocated (and freed).
8  *
9  *
10  * Portions Copyright (c) 2017, PostgreSQL Global Development Group
11  *
12  * IDENTIFICATION
13  *	  src/backend/utils/mmgr/slab.c
14  *
15  *
16  * NOTE:
17  *	The constant allocation size allows significant simplification and various
18  *	optimizations over more general purpose allocators. The blocks are carved
19  *	into chunks of exactly the right size (plus alignment), not wasting any
20  *	memory.
21  *
22  *	The information about free chunks is maintained both at the block level and
23  *	global (context) level. This is possible as the chunk size (and thus also
24  *	the number of chunks per block) is fixed.
25  *
26  *	On each block, free chunks are tracked in a simple linked list. Contents
27  *	of free chunks is replaced with an index of the next free chunk, forming
28  *	a very simple linked list. Each block also contains a counter of free
29  *	chunks. Combined with the local block-level freelist, it makes it trivial
30  *	to eventually free the whole block.
31  *
32  *	At the context level, we use 'freelist' to track blocks ordered by number
33  *	of free chunks, starting with blocks having a single allocated chunk, and
34  *	with completely full blocks on the tail.
35  *
36  *	This also allows various optimizations - for example when searching for
37  *	free chunk, the allocator reuses space from the fullest blocks first, in
38  *	the hope that some of the less full blocks will get completely empty (and
39  *	returned back to the OS).
40  *
41  *	For each block, we maintain pointer to the first free chunk - this is quite
42  *	cheap and allows us to skip all the preceding used chunks, eliminating
43  *	a significant number of lookups in many common usage patters. In the worst
44  *	case this performs as if the pointer was not maintained.
45  *
46  *	We cache the freelist index for the blocks with the fewest free chunks
47  *	(minFreeChunks), so that we don't have to search the freelist on every
48  *	SlabAlloc() call, which is quite expensive.
49  *
50  *-------------------------------------------------------------------------
51  */
52 
53 #include "postgres.h"
54 
55 #include "utils/memdebug.h"
56 #include "utils/memutils.h"
57 #include "lib/ilist.h"
58 
59 
60 /*
61  * SlabContext is a specialized implementation of MemoryContext.
62  */
63 typedef struct SlabContext
64 {
65 	MemoryContextData header;	/* Standard memory-context fields */
66 	/* Allocation parameters for this context: */
67 	Size		chunkSize;		/* chunk size */
68 	Size		fullChunkSize;	/* chunk size including header and alignment */
69 	Size		blockSize;		/* block size */
70 	int			chunksPerBlock; /* number of chunks per block */
71 	int			minFreeChunks;	/* min number of free chunks in any block */
72 	int			nblocks;		/* number of blocks allocated */
73 #ifdef MEMORY_CONTEXT_CHECKING
74 	bool	   *freechunks;		/* bitmap of free chunks in a block */
75 #endif
76 	/* blocks with free space, grouped by number of free chunks: */
77 	dlist_head	freelist[FLEXIBLE_ARRAY_MEMBER];
78 } SlabContext;
79 
80 /*
81  * SlabBlock
82  *		Structure of a single block in SLAB allocator.
83  *
84  * node: doubly-linked list of blocks in global freelist
85  * nfree: number of free chunks in this block
86  * firstFreeChunk: index of the first free chunk
87  */
88 typedef struct SlabBlock
89 {
90 	dlist_node	node;			/* doubly-linked list */
91 	int			nfree;			/* number of free chunks */
92 	int			firstFreeChunk; /* index of the first free chunk in the block */
93 } SlabBlock;
94 
95 /*
96  * SlabChunk
97  *		The prefix of each piece of memory in an SlabBlock
98  */
99 typedef struct SlabChunk
100 {
101 	/* block owning this chunk */
102 	void	   *block;
103 	SlabContext *slab;			/* owning context */
104 	/* there must not be any padding to reach a MAXALIGN boundary here! */
105 } SlabChunk;
106 
107 
108 #define SlabPointerGetChunk(ptr)	\
109 	((SlabChunk *)(((char *)(ptr)) - sizeof(SlabChunk)))
110 #define SlabChunkGetPointer(chk)	\
111 	((void *)(((char *)(chk)) + sizeof(SlabChunk)))
112 #define SlabBlockGetChunk(slab, block, idx) \
113 	((SlabChunk *) ((char *) (block) + sizeof(SlabBlock)	\
114 					+ (idx * slab->fullChunkSize)))
115 #define SlabBlockStart(block)	\
116 	((char *) block + sizeof(SlabBlock))
117 #define SlabChunkIndex(slab, block, chunk)	\
118 	(((char *) chunk - SlabBlockStart(block)) / slab->fullChunkSize)
119 
120 /*
121  * These functions implement the MemoryContext API for Slab contexts.
122  */
123 static void *SlabAlloc(MemoryContext context, Size size);
124 static void SlabFree(MemoryContext context, void *pointer);
125 static void *SlabRealloc(MemoryContext context, void *pointer, Size size);
126 static void SlabInit(MemoryContext context);
127 static void SlabReset(MemoryContext context);
128 static void SlabDelete(MemoryContext context);
129 static Size SlabGetChunkSpace(MemoryContext context, void *pointer);
130 static bool SlabIsEmpty(MemoryContext context);
131 static void SlabStats(MemoryContext context, int level, bool print,
132 		  MemoryContextCounters *totals);
133 #ifdef MEMORY_CONTEXT_CHECKING
134 static void SlabCheck(MemoryContext context);
135 #endif
136 
137 /*
138  * This is the virtual function table for Slab contexts.
139  */
140 static MemoryContextMethods SlabMethods = {
141 	SlabAlloc,
142 	SlabFree,
143 	SlabRealloc,
144 	SlabInit,
145 	SlabReset,
146 	SlabDelete,
147 	SlabGetChunkSpace,
148 	SlabIsEmpty,
149 	SlabStats
150 #ifdef MEMORY_CONTEXT_CHECKING
151 	,SlabCheck
152 #endif
153 };
154 
155 /* ----------
156  * Debug macros
157  * ----------
158  */
159 #ifdef HAVE_ALLOCINFO
160 #define SlabFreeInfo(_cxt, _chunk) \
161 			fprintf(stderr, "SlabFree: %s: %p, %zu\n", \
162 				(_cxt)->header.name, (_chunk), (_chunk)->header.size)
163 #define SlabAllocInfo(_cxt, _chunk) \
164 			fprintf(stderr, "SlabAlloc: %s: %p, %zu\n", \
165 				(_cxt)->header.name, (_chunk), (_chunk)->header.size)
166 #else
167 #define SlabFreeInfo(_cxt, _chunk)
168 #define SlabAllocInfo(_cxt, _chunk)
169 #endif
170 
171 
172 /*
173  * SlabContextCreate
174  *		Create a new Slab context.
175  *
176  * parent: parent context, or NULL if top-level context
177  * name: name of context (for debugging --- string will be copied)
178  * blockSize: allocation block size
179  * chunkSize: allocation chunk size
180  *
181  * The chunkSize may not exceed:
182  *		MAXALIGN_DOWN(SIZE_MAX) - MAXALIGN(sizeof(SlabBlock)) - SLAB_CHUNKHDRSZ
183  *
184  */
185 MemoryContext
SlabContextCreate(MemoryContext parent,const char * name,Size blockSize,Size chunkSize)186 SlabContextCreate(MemoryContext parent,
187 				  const char *name,
188 				  Size blockSize,
189 				  Size chunkSize)
190 {
191 	int			chunksPerBlock;
192 	Size		fullChunkSize;
193 	Size		freelistSize;
194 	Size		headerSize;
195 	SlabContext *slab;
196 
197 	StaticAssertStmt(offsetof(SlabChunk, slab) + sizeof(MemoryContext) ==
198 					 MAXALIGN(sizeof(SlabChunk)),
199 					 "padding calculation in SlabChunk is wrong");
200 
201 	/* Make sure the linked list node fits inside a freed chunk */
202 	if (chunkSize < sizeof(int))
203 		chunkSize = sizeof(int);
204 
205 	/* chunk, including SLAB header (both addresses nicely aligned) */
206 	fullChunkSize = MAXALIGN(sizeof(SlabChunk) + MAXALIGN(chunkSize));
207 
208 	/* Make sure the block can store at least one chunk. */
209 	if (blockSize - sizeof(SlabBlock) < fullChunkSize)
210 		elog(ERROR, "block size %zu for slab is too small for %zu chunks",
211 			 blockSize, chunkSize);
212 
213 	/* Compute maximum number of chunks per block */
214 	chunksPerBlock = (blockSize - sizeof(SlabBlock)) / fullChunkSize;
215 
216 	/* The freelist starts with 0, ends with chunksPerBlock. */
217 	freelistSize = sizeof(dlist_head) * (chunksPerBlock + 1);
218 
219 	/* if we can't fit at least one chunk into the block, we're hosed */
220 	Assert(chunksPerBlock > 0);
221 
222 	/* make sure the chunks actually fit on the block	*/
223 	Assert((fullChunkSize * chunksPerBlock) + sizeof(SlabBlock) <= blockSize);
224 
225 	/* Size of the memory context header */
226 	headerSize = offsetof(SlabContext, freelist) + freelistSize;
227 
228 #ifdef MEMORY_CONTEXT_CHECKING
229 	/*
230 	 * With memory checking, we need to allocate extra space for the bitmap
231 	 * of free chunks. The bitmap is an array of bools, so we don't need to
232 	 * worry about alignment.
233 	 */
234 	headerSize += chunksPerBlock * sizeof(bool);
235 #endif
236 
237 	/* Do the type-independent part of context creation */
238 	slab = (SlabContext *)
239 		MemoryContextCreate(T_SlabContext,
240 							headerSize,
241 							&SlabMethods,
242 							parent,
243 							name);
244 
245 	slab->blockSize = blockSize;
246 	slab->chunkSize = chunkSize;
247 	slab->fullChunkSize = fullChunkSize;
248 	slab->chunksPerBlock = chunksPerBlock;
249 	slab->nblocks = 0;
250 	slab->minFreeChunks = 0;
251 
252 #ifdef MEMORY_CONTEXT_CHECKING
253 	/* set the freechunks pointer right after the freelists array */
254 	slab->freechunks
255 		= (bool *) slab + offsetof(SlabContext, freelist) + freelistSize;
256 #endif
257 
258 	return (MemoryContext) slab;
259 }
260 
261 /*
262  * SlabInit
263  *		Context-type-specific initialization routine.
264  */
265 static void
SlabInit(MemoryContext context)266 SlabInit(MemoryContext context)
267 {
268 	int			i;
269 	SlabContext *slab = castNode(SlabContext, context);
270 
271 	Assert(slab);
272 
273 	/* initialize the freelist slots */
274 	for (i = 0; i < (slab->chunksPerBlock + 1); i++)
275 		dlist_init(&slab->freelist[i]);
276 }
277 
278 /*
279  * SlabReset
280  *		Frees all memory which is allocated in the given set.
281  *
282  * The code simply frees all the blocks in the context - we don't keep any
283  * keeper blocks or anything like that.
284  */
285 static void
SlabReset(MemoryContext context)286 SlabReset(MemoryContext context)
287 {
288 	int			i;
289 	SlabContext *slab = castNode(SlabContext, context);
290 
291 	Assert(slab);
292 
293 #ifdef MEMORY_CONTEXT_CHECKING
294 	/* Check for corruption and leaks before freeing */
295 	SlabCheck(context);
296 #endif
297 
298 	/* walk over freelists and free the blocks */
299 	for (i = 0; i <= slab->chunksPerBlock; i++)
300 	{
301 		dlist_mutable_iter miter;
302 
303 		dlist_foreach_modify(miter, &slab->freelist[i])
304 		{
305 			SlabBlock  *block = dlist_container(SlabBlock, node, miter.cur);
306 
307 			dlist_delete(miter.cur);
308 
309 #ifdef CLOBBER_FREED_MEMORY
310 			wipe_mem(block, slab->blockSize);
311 #endif
312 			free(block);
313 			slab->nblocks--;
314 		}
315 	}
316 
317 	slab->minFreeChunks = 0;
318 
319 	Assert(slab->nblocks == 0);
320 }
321 
322 /*
323  * SlabDelete
324  *		Frees all memory which is allocated in the given slab, in preparation
325  *		for deletion of the slab. We simply call SlabReset().
326  */
327 static void
SlabDelete(MemoryContext context)328 SlabDelete(MemoryContext context)
329 {
330 	/* just reset the context */
331 	SlabReset(context);
332 }
333 
334 /*
335  * SlabAlloc
336  *		Returns pointer to allocated memory of given size or NULL if
337  *		request could not be completed; memory is added to the slab.
338  */
339 static void *
SlabAlloc(MemoryContext context,Size size)340 SlabAlloc(MemoryContext context, Size size)
341 {
342 	SlabContext *slab = castNode(SlabContext, context);
343 	SlabBlock  *block;
344 	SlabChunk  *chunk;
345 	int			idx;
346 
347 	Assert(slab);
348 
349 	Assert((slab->minFreeChunks >= 0) &&
350 		   (slab->minFreeChunks < slab->chunksPerBlock));
351 
352 	/* make sure we only allow correct request size */
353 	if (size != slab->chunkSize)
354 		elog(ERROR, "unexpected alloc chunk size %zu (expected %zu)",
355 			 size, slab->chunkSize);
356 
357 	/*
358 	 * If there are no free chunks in any existing block, create a new block
359 	 * and put it to the last freelist bucket.
360 	 *
361 	 * slab->minFreeChunks == 0 means there are no blocks with free chunks,
362 	 * thanks to how minFreeChunks is updated at the end of SlabAlloc().
363 	 */
364 	if (slab->minFreeChunks == 0)
365 	{
366 		block = (SlabBlock *) malloc(slab->blockSize);
367 
368 		if (block == NULL)
369 			return NULL;
370 
371 		block->nfree = slab->chunksPerBlock;
372 		block->firstFreeChunk = 0;
373 
374 		/*
375 		 * Put all the chunks on a freelist. Walk the chunks and point each
376 		 * one to the next one.
377 		 */
378 		for (idx = 0; idx < slab->chunksPerBlock; idx++)
379 		{
380 			chunk = SlabBlockGetChunk(slab, block, idx);
381 			*(int32 *) SlabChunkGetPointer(chunk) = (idx + 1);
382 		}
383 
384 		/*
385 		 * And add it to the last freelist with all chunks empty.
386 		 *
387 		 * We know there are no blocks in the freelist, otherwise we wouldn't
388 		 * need a new block.
389 		 */
390 		Assert(dlist_is_empty(&slab->freelist[slab->chunksPerBlock]));
391 
392 		dlist_push_head(&slab->freelist[slab->chunksPerBlock], &block->node);
393 
394 		slab->minFreeChunks = slab->chunksPerBlock;
395 		slab->nblocks += 1;
396 	}
397 
398 	/* grab the block from the freelist (even the new block is there) */
399 	block = dlist_head_element(SlabBlock, node,
400 							   &slab->freelist[slab->minFreeChunks]);
401 
402 	/* make sure we actually got a valid block, with matching nfree */
403 	Assert(block != NULL);
404 	Assert(slab->minFreeChunks == block->nfree);
405 	Assert(block->nfree > 0);
406 
407 	/* we know index of the first free chunk in the block */
408 	idx = block->firstFreeChunk;
409 
410 	/* make sure the chunk index is valid, and that it's marked as empty */
411 	Assert((idx >= 0) && (idx < slab->chunksPerBlock));
412 
413 	/* compute the chunk location block start (after the block header) */
414 	chunk = SlabBlockGetChunk(slab, block, idx);
415 
416 	/*
417 	 * Update the block nfree count, and also the minFreeChunks as we've
418 	 * decreased nfree for a block with the minimum number of free chunks
419 	 * (because that's how we chose the block).
420 	 */
421 	block->nfree--;
422 	slab->minFreeChunks = block->nfree;
423 
424 	/*
425 	 * Remove the chunk from the freelist head. The index of the next free
426 	 * chunk is stored in the chunk itself.
427 	 */
428 	VALGRIND_MAKE_MEM_DEFINED(SlabChunkGetPointer(chunk), sizeof(int32));
429 	block->firstFreeChunk = *(int32 *) SlabChunkGetPointer(chunk);
430 
431 	Assert(block->firstFreeChunk >= 0);
432 	Assert(block->firstFreeChunk <= slab->chunksPerBlock);
433 
434 	Assert((block->nfree != 0 &&
435 			block->firstFreeChunk < slab->chunksPerBlock) ||
436 		   (block->nfree == 0 &&
437 			block->firstFreeChunk == slab->chunksPerBlock));
438 
439 	/* move the whole block to the right place in the freelist */
440 	dlist_delete(&block->node);
441 	dlist_push_head(&slab->freelist[block->nfree], &block->node);
442 
443 	/*
444 	 * And finally update minFreeChunks, i.e. the index to the block with the
445 	 * lowest number of free chunks. We only need to do that when the block
446 	 * got full (otherwise we know the current block is the right one). We'll
447 	 * simply walk the freelist until we find a non-empty entry.
448 	 */
449 	if (slab->minFreeChunks == 0)
450 	{
451 		for (idx = 1; idx <= slab->chunksPerBlock; idx++)
452 		{
453 			if (dlist_is_empty(&slab->freelist[idx]))
454 				continue;
455 
456 			/* found a non-empty freelist */
457 			slab->minFreeChunks = idx;
458 			break;
459 		}
460 	}
461 
462 	if (slab->minFreeChunks == slab->chunksPerBlock)
463 		slab->minFreeChunks = 0;
464 
465 	/* Prepare to initialize the chunk header. */
466 	VALGRIND_MAKE_MEM_UNDEFINED(chunk, sizeof(SlabChunk));
467 
468 	chunk->block = (void *) block;
469 	chunk->slab = slab;
470 
471 #ifdef MEMORY_CONTEXT_CHECKING
472 	/* slab mark to catch clobber of "unused" space */
473 	if (slab->chunkSize < (slab->fullChunkSize - sizeof(SlabChunk)))
474 	{
475 		set_sentinel(SlabChunkGetPointer(chunk), size);
476 		VALGRIND_MAKE_MEM_NOACCESS(((char *) chunk) +
477 								   sizeof(SlabChunk) + slab->chunkSize,
478 								   slab->fullChunkSize -
479 								   (slab->chunkSize + sizeof(SlabChunk)));
480 	}
481 #endif
482 #ifdef RANDOMIZE_ALLOCATED_MEMORY
483 	/* fill the allocated space with junk */
484 	randomize_mem((char *) SlabChunkGetPointer(chunk), size);
485 #endif
486 
487 	SlabAllocInfo(slab, chunk);
488 	return SlabChunkGetPointer(chunk);
489 }
490 
491 /*
492  * SlabFree
493  *		Frees allocated memory; memory is removed from the slab.
494  */
495 static void
SlabFree(MemoryContext context,void * pointer)496 SlabFree(MemoryContext context, void *pointer)
497 {
498 	int			idx;
499 	SlabContext *slab = castNode(SlabContext, context);
500 	SlabChunk  *chunk = SlabPointerGetChunk(pointer);
501 	SlabBlock  *block = chunk->block;
502 
503 	SlabFreeInfo(slab, chunk);
504 
505 #ifdef MEMORY_CONTEXT_CHECKING
506 	/* Test for someone scribbling on unused space in chunk */
507 	if (slab->chunkSize < (slab->fullChunkSize - sizeof(SlabChunk)))
508 		if (!sentinel_ok(pointer, slab->chunkSize))
509 			elog(WARNING, "detected write past chunk end in %s %p",
510 				 slab->header.name, chunk);
511 #endif
512 
513 	/* compute index of the chunk with respect to block start */
514 	idx = SlabChunkIndex(slab, block, chunk);
515 
516 	/* add chunk to freelist, and update block nfree count */
517 	*(int32 *) pointer = block->firstFreeChunk;
518 	block->firstFreeChunk = idx;
519 	block->nfree++;
520 
521 	Assert(block->nfree > 0);
522 	Assert(block->nfree <= slab->chunksPerBlock);
523 
524 #ifdef CLOBBER_FREED_MEMORY
525 	/* XXX don't wipe the int32 index, used for block-level freelist */
526 	wipe_mem((char *) pointer + sizeof(int32),
527 			 slab->chunkSize - sizeof(int32));
528 #endif
529 
530 	/* remove the block from a freelist */
531 	dlist_delete(&block->node);
532 
533 	/*
534 	 * See if we need to update the minFreeChunks field for the slab - we only
535 	 * need to do that if there the block had that number of free chunks
536 	 * before we freed one. In that case, we check if there still are blocks
537 	 * in the original freelist and we either keep the current value (if there
538 	 * still are blocks) or increment it by one (the new block is still the
539 	 * one with minimum free chunks).
540 	 *
541 	 * The one exception is when the block will get completely free - in that
542 	 * case we will free it, se we can't use it for minFreeChunks. It however
543 	 * means there are no more blocks with free chunks.
544 	 */
545 	if (slab->minFreeChunks == (block->nfree - 1))
546 	{
547 		/* Have we removed the last chunk from the freelist? */
548 		if (dlist_is_empty(&slab->freelist[slab->minFreeChunks]))
549 		{
550 			/* but if we made the block entirely free, we'll free it */
551 			if (block->nfree == slab->chunksPerBlock)
552 				slab->minFreeChunks = 0;
553 			else
554 				slab->minFreeChunks++;
555 		}
556 	}
557 
558 	/* If the block is now completely empty, free it. */
559 	if (block->nfree == slab->chunksPerBlock)
560 	{
561 		free(block);
562 		slab->nblocks--;
563 	}
564 	else
565 		dlist_push_head(&slab->freelist[block->nfree], &block->node);
566 
567 	Assert(slab->nblocks >= 0);
568 }
569 
570 /*
571  * SlabRealloc
572  *		Change the allocated size of a chunk.
573  *
574  * As Slab is designed for allocating equally-sized chunks of memory, it can't
575  * do an actual chunk size change.  We try to be gentle and allow calls with
576  * exactly the same size, as in that case we can simply return the same
577  * chunk.  When the size differs, we throw an error.
578  *
579  * We could also allow requests with size < chunkSize.  That however seems
580  * rather pointless - Slab is meant for chunks of constant size, and moreover
581  * realloc is usually used to enlarge the chunk.
582  */
583 static void *
SlabRealloc(MemoryContext context,void * pointer,Size size)584 SlabRealloc(MemoryContext context, void *pointer, Size size)
585 {
586 	SlabContext *slab = castNode(SlabContext, context);
587 
588 	Assert(slab);
589 
590 	/* can't do actual realloc with slab, but let's try to be gentle */
591 	if (size == slab->chunkSize)
592 		return pointer;
593 
594 	elog(ERROR, "slab allocator does not support realloc()");
595 	return NULL;				/* keep compiler quiet */
596 }
597 
598 /*
599  * SlabGetChunkSpace
600  *		Given a currently-allocated chunk, determine the total space
601  *		it occupies (including all memory-allocation overhead).
602  */
603 static Size
SlabGetChunkSpace(MemoryContext context,void * pointer)604 SlabGetChunkSpace(MemoryContext context, void *pointer)
605 {
606 	SlabContext *slab = castNode(SlabContext, context);
607 
608 	Assert(slab);
609 
610 	return slab->fullChunkSize;
611 }
612 
613 /*
614  * SlabIsEmpty
615  *		Is an Slab empty of any allocated space?
616  */
617 static bool
SlabIsEmpty(MemoryContext context)618 SlabIsEmpty(MemoryContext context)
619 {
620 	SlabContext *slab = castNode(SlabContext, context);
621 
622 	Assert(slab);
623 
624 	return (slab->nblocks == 0);
625 }
626 
627 /*
628  * SlabStats
629  *		Compute stats about memory consumption of an Slab.
630  *
631  * level: recursion level (0 at top level); used for print indentation.
632  * print: true to print stats to stderr.
633  * totals: if not NULL, add stats about this Slab into *totals.
634  */
635 static void
SlabStats(MemoryContext context,int level,bool print,MemoryContextCounters * totals)636 SlabStats(MemoryContext context, int level, bool print,
637 		  MemoryContextCounters *totals)
638 {
639 	SlabContext *slab = castNode(SlabContext, context);
640 	Size		nblocks = 0;
641 	Size		freechunks = 0;
642 	Size		totalspace = 0;
643 	Size		freespace = 0;
644 	int			i;
645 
646 	Assert(slab);
647 
648 	for (i = 0; i <= slab->chunksPerBlock; i++)
649 	{
650 		dlist_iter	iter;
651 
652 		dlist_foreach(iter, &slab->freelist[i])
653 		{
654 			SlabBlock  *block = dlist_container(SlabBlock, node, iter.cur);
655 
656 			nblocks++;
657 			totalspace += slab->blockSize;
658 			freespace += slab->fullChunkSize * block->nfree;
659 			freechunks += block->nfree;
660 		}
661 	}
662 
663 	if (print)
664 	{
665 		for (i = 0; i < level; i++)
666 			fprintf(stderr, "  ");
667 		fprintf(stderr,
668 				"Slab: %s: %zu total in %zd blocks; %zu free (%zd chunks); %zu used\n",
669 				slab->header.name, totalspace, nblocks, freespace, freechunks,
670 				totalspace - freespace);
671 	}
672 
673 	if (totals)
674 	{
675 		totals->nblocks += nblocks;
676 		totals->freechunks += freechunks;
677 		totals->totalspace += totalspace;
678 		totals->freespace += freespace;
679 	}
680 }
681 
682 
683 #ifdef MEMORY_CONTEXT_CHECKING
684 
685 /*
686  * SlabCheck
687  *		Walk through chunks and check consistency of memory.
688  *
689  * NOTE: report errors as WARNING, *not* ERROR or FATAL.  Otherwise you'll
690  * find yourself in an infinite loop when trouble occurs, because this
691  * routine will be entered again when elog cleanup tries to release memory!
692  */
693 static void
SlabCheck(MemoryContext context)694 SlabCheck(MemoryContext context)
695 {
696 	int			i;
697 	SlabContext *slab = castNode(SlabContext, context);
698 	char	   *name = slab->header.name;
699 
700 	Assert(slab);
701 	Assert(slab->chunksPerBlock > 0);
702 
703 	/* walk all the freelists */
704 	for (i = 0; i <= slab->chunksPerBlock; i++)
705 	{
706 		int			j,
707 					nfree;
708 		dlist_iter	iter;
709 
710 		/* walk all blocks on this freelist */
711 		dlist_foreach(iter, &slab->freelist[i])
712 		{
713 			int			idx;
714 			SlabBlock  *block = dlist_container(SlabBlock, node, iter.cur);
715 
716 			/*
717 			 * Make sure the number of free chunks (in the block header)
718 			 * matches position in the freelist.
719 			 */
720 			if (block->nfree != i)
721 				elog(WARNING, "problem in slab %s: number of free chunks %d in block %p does not match freelist %d",
722 					 name, block->nfree, block, i);
723 
724 			/* reset the bitmap of free chunks for this block */
725 			memset(slab->freechunks, 0, (slab->chunksPerBlock * sizeof(bool)));
726 			idx = block->firstFreeChunk;
727 
728 			/*
729 			 * Now walk through the chunks, count the free ones and also
730 			 * perform some additional checks for the used ones. As the chunk
731 			 * freelist is stored within the chunks themselves, we have to
732 			 * walk through the chunks and construct our own bitmap.
733 			 */
734 
735 			nfree = 0;
736 			while (idx < slab->chunksPerBlock)
737 			{
738 				SlabChunk  *chunk;
739 
740 				/* count the chunk as free, add it to the bitmap */
741 				nfree++;
742 				slab->freechunks[idx] = true;
743 
744 				/* read index of the next free chunk */
745 				chunk = SlabBlockGetChunk(slab, block, idx);
746 				VALGRIND_MAKE_MEM_DEFINED(SlabChunkGetPointer(chunk), sizeof(int32));
747 				idx = *(int32 *) SlabChunkGetPointer(chunk);
748 			}
749 
750 			for (j = 0; j < slab->chunksPerBlock; j++)
751 			{
752 				/* non-zero bit in the bitmap means chunk the chunk is used */
753 				if (!slab->freechunks[j])
754 				{
755 					SlabChunk  *chunk = SlabBlockGetChunk(slab, block, j);
756 
757 					/* chunks have both block and slab pointers, so check both */
758 					if (chunk->block != block)
759 						elog(WARNING, "problem in slab %s: bogus block link in block %p, chunk %p",
760 							 name, block, chunk);
761 
762 					if (chunk->slab != slab)
763 						elog(WARNING, "problem in slab %s: bogus slab link in block %p, chunk %p",
764 							 name, block, chunk);
765 
766 					/* there might be sentinel (thanks to alignment) */
767 					if (slab->chunkSize < (slab->fullChunkSize - sizeof(SlabChunk)))
768 						if (!sentinel_ok(chunk, slab->chunkSize))
769 							elog(WARNING, "problem in slab %s: detected write past chunk end in block %p, chunk %p",
770 								 name, block, chunk);
771 				}
772 			}
773 
774 			/*
775 			 * Make sure we got the expected number of free chunks (as tracked
776 			 * in the block header).
777 			 */
778 			if (nfree != block->nfree)
779 				elog(WARNING, "problem in slab %s: number of free chunks %d in block %p does not match bitmap %d",
780 					 name, block->nfree, block, nfree);
781 		}
782 	}
783 }
784 
785 #endif							/* MEMORY_CONTEXT_CHECKING */
786