1 /*-------------------------------------------------------------------------
2  *
3  * aset.c
4  *	  Allocation set definitions.
5  *
6  * AllocSet is our standard implementation of the abstract MemoryContext
7  * type.
8  *
9  *
10  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
11  * Portions Copyright (c) 1994, Regents of the University of California
12  *
13  * IDENTIFICATION
14  *	  src/backend/utils/mmgr/aset.c
15  *
16  * NOTE:
17  *	This is a new (Feb. 05, 1999) implementation of the allocation set
18  *	routines. AllocSet...() does not use OrderedSet...() any more.
19  *	Instead it manages allocations in a block pool by itself, combining
20  *	many small allocations in a few bigger blocks. AllocSetFree() normally
21  *	doesn't free() memory really. It just add's the free'd area to some
22  *	list for later reuse by AllocSetAlloc(). All memory blocks are free()'d
23  *	at once on AllocSetReset(), which happens when the memory context gets
24  *	destroyed.
25  *				Jan Wieck
26  *
27  *	Performance improvement from Tom Lane, 8/99: for extremely large request
28  *	sizes, we do want to be able to give the memory back to free() as soon
29  *	as it is pfree()'d.  Otherwise we risk tying up a lot of memory in
30  *	freelist entries that might never be usable.  This is specially needed
31  *	when the caller is repeatedly repalloc()'ing a block bigger and bigger;
32  *	the previous instances of the block were guaranteed to be wasted until
33  *	AllocSetReset() under the old way.
34  *
35  *	Further improvement 12/00: as the code stood, request sizes in the
36  *	midrange between "small" and "large" were handled very inefficiently,
37  *	because any sufficiently large free chunk would be used to satisfy a
38  *	request, even if it was much larger than necessary.  This led to more
39  *	and more wasted space in allocated chunks over time.  To fix, get rid
40  *	of the midrange behavior: we now handle only "small" power-of-2-size
41  *	chunks as chunks.  Anything "large" is passed off to malloc().  Change
42  *	the number of freelists to change the small/large boundary.
43  *
44  *-------------------------------------------------------------------------
45  */
46 
47 #include "pool_type.h"
48 #include "utils/palloc.h"
49 #include "utils/memdebug.h"
50 #include "utils/memutils.h"
51 #include "utils/elog.h"
52 #include <string.h>
53 #include <stdint.h>
54 
55 /* Define this to detail debug alloc information */
56 /* #define HAVE_ALLOCINFO */
57 
58 /*--------------------
59  * Chunk freelist k holds chunks of size 1 << (k + ALLOC_MINBITS),
60  * for k = 0 .. ALLOCSET_NUM_FREELISTS-1.
61  *
62  * Note that all chunks in the freelists have power-of-2 sizes.  This
63  * improves recyclability: we may waste some space, but the wasted space
64  * should stay pretty constant as requests are made and released.
65  *
66  * A request too large for the last freelist is handled by allocating a
67  * dedicated block from malloc().  The block still has a block header and
68  * chunk header, but when the chunk is freed we'll return the whole block
69  * to malloc(), not put it on our freelists.
70  *
71  * CAUTION: ALLOC_MINBITS must be large enough so that
72  * 1<<ALLOC_MINBITS is at least MAXALIGN,
73  * or we may fail to align the smallest chunks adequately.
74  * 8-byte alignment is enough on all currently known machines.
75  *
76  * With the current parameters, request sizes up to 8K are treated as chunks,
77  * larger requests go into dedicated blocks.  Change ALLOCSET_NUM_FREELISTS
78  * to adjust the boundary point; and adjust ALLOCSET_SEPARATE_THRESHOLD in
79  * memutils.h to agree.  (Note: in contexts with small maxBlockSize, we may
80  * set the allocChunkLimit to less than 8K, so as to avoid space wastage.)
81  *--------------------
82  */
83 
84 #define ALLOC_MINBITS		3	/* smallest chunk size is 8 bytes */
85 #define ALLOCSET_NUM_FREELISTS	11
86 #define ALLOC_CHUNK_LIMIT	(1 << (ALLOCSET_NUM_FREELISTS-1+ALLOC_MINBITS))
87 /* Size of largest chunk that we use a fixed size for */
88 #define ALLOC_CHUNK_FRACTION	4
89 /* We allow chunks to be at most 1/4 of maxBlockSize (less overhead) */
90 
91 /*--------------------
92  * The first block allocated for an allocset has size initBlockSize.
93  * Each time we have to allocate another block, we double the block size
94  * (if possible, and without exceeding maxBlockSize), so as to reduce
95  * the bookkeeping load on malloc().
96  *
97  * Blocks allocated to hold oversize chunks do not follow this rule, however;
98  * they are just however big they need to be to hold that single chunk.
99  *--------------------
100  */
101 
102 #define ALLOC_BLOCKHDRSZ	MAXALIGN(sizeof(AllocBlockData))
103 #define ALLOC_CHUNKHDRSZ	sizeof(struct AllocChunkData)
104 
105 typedef struct AllocBlockData *AllocBlock;	/* forward reference */
106 typedef struct AllocChunkData *AllocChunk;
107 
108 /*
109  * AllocPointer
110  *		Aligned pointer which may be a member of an allocation set.
111  */
112 typedef void *AllocPointer;
113 
114 /*
115  * AllocSetContext is our standard implementation of MemoryContext.
116  *
117  * Note: header.isReset means there is nothing for AllocSetReset to do.
118  * This is different from the aset being physically empty (empty blocks list)
119  * because we may still have a keeper block.  It's also different from the set
120  * being logically empty, because we don't attempt to detect pfree'ing the
121  * last active chunk.
122  */
123 typedef struct AllocSetContext
124 {
125 	MemoryContextData header;	/* Standard memory-context fields */
126 	/* Info about storage allocated in this context: */
127 	AllocBlock	blocks;			/* head of list of blocks in this set */
128 	AllocChunk	freelist[ALLOCSET_NUM_FREELISTS];	/* free chunk lists */
129 	/* Allocation parameters for this context: */
130 	Size		initBlockSize;	/* initial block size */
131 	Size		maxBlockSize;	/* maximum block size */
132 	Size		nextBlockSize;	/* next block size to allocate */
133 	Size		allocChunkLimit;	/* effective chunk size limit */
134 	AllocBlock	keeper;			/* if not NULL, keep this block over resets */
135 } AllocSetContext;
136 
137 typedef AllocSetContext *AllocSet;
138 
139 /*
140  * AllocBlock
141  *		An AllocBlock is the unit of memory that is obtained by aset.c
142  *		from malloc().  It contains one or more AllocChunks, which are
143  *		the units requested by palloc() and freed by pfree().  AllocChunks
144  *		cannot be returned to malloc() individually, instead they are put
145  *		on freelists by pfree() and re-used by the next palloc() that has
146  *		a matching request size.
147  *
148  *		AllocBlockData is the header data for a block --- the usable space
149  *		within the block begins at the next alignment boundary.
150  */
151 typedef struct AllocBlockData
152 {
153 	AllocSet	aset;			/* aset that owns this block */
154 	AllocBlock	prev;			/* prev block in aset's blocks list, if any */
155 	AllocBlock	next;			/* next block in aset's blocks list, if any */
156 	char	   *freeptr;		/* start of free space in this block */
157 	char	   *endptr;			/* end of space in this block */
158 }			AllocBlockData;
159 
160 /*
161  * AllocChunk
162  *		The prefix of each piece of memory in an AllocBlock
163  */
164 typedef struct AllocChunkData
165 {
166 	/* size is always the size of the usable space in the chunk */
167 	Size		size;
168 #ifdef MEMORY_CONTEXT_CHECKING
169 	/* when debugging memory usage, also store actual requested size */
170 	/* this is zero in a free chunk */
171 	Size		requested_size;
172 #if MAXIMUM_ALIGNOF > 4 && SIZEOF_VOID_P == 4
173 	Size		padding;
174 #endif
175 
176 #endif							/* MEMORY_CONTEXT_CHECKING */
177 
178 	/* aset is the owning aset if allocated, or the freelist link if free */
179 	void	   *aset;
180 
181 	/* there must not be any padding to reach a MAXALIGN boundary here! */
182 }			AllocChunkData;
183 
184 /*
185  * AllocPointerIsValid
186  *		True iff pointer is valid allocation pointer.
187  */
188 #define AllocPointerIsValid(pointer) PointerIsValid(pointer)
189 
190 /*
191  * AllocSetIsValid
192  *		True iff set is valid allocation set.
193  */
194 #define AllocSetIsValid(set) PointerIsValid(set)
195 
196 #define AllocPointerGetChunk(ptr)	\
197 					((AllocChunk)(((char *)(ptr)) - ALLOC_CHUNKHDRSZ))
198 #define AllocChunkGetPointer(chk)	\
199 					((AllocPointer)(((char *)(chk)) + ALLOC_CHUNKHDRSZ))
200 
201 /*
202  * These functions implement the MemoryContext API for AllocSet contexts.
203  */
204 static void *AllocSetAlloc(MemoryContext context, Size size);
205 static void AllocSetFree(MemoryContext context, void *pointer);
206 static void *AllocSetRealloc(MemoryContext context, void *pointer, Size size);
207 static void AllocSetInit(MemoryContext context);
208 static void AllocSetReset(MemoryContext context);
209 static void AllocSetDelete(MemoryContext context);
210 static Size AllocSetGetChunkSpace(MemoryContext context, void *pointer);
211 static bool AllocSetIsEmpty(MemoryContext context);
212 static void AllocSetStats(MemoryContext context, int level, bool print,
213 			  MemoryContextCounters *totals);
214 
215 #ifdef MEMORY_CONTEXT_CHECKING
216 static void AllocSetCheck(MemoryContext context);
217 #endif
218 
219 /*
220  * This is the virtual function table for AllocSet contexts.
221  */
222 static MemoryContextMethods AllocSetMethods = {
223 	AllocSetAlloc,
224 	AllocSetFree,
225 	AllocSetRealloc,
226 	AllocSetInit,
227 	AllocSetReset,
228 	AllocSetDelete,
229 	AllocSetGetChunkSpace,
230 	AllocSetIsEmpty,
231 	AllocSetStats
232 #ifdef MEMORY_CONTEXT_CHECKING
233 	,AllocSetCheck
234 #endif
235 };
236 
237 /*
238  * Table for AllocSetFreeIndex
239  */
240 #define LT16(n) n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n
241 
242 static const unsigned char LogTable256[256] =
243 {
244 	0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
245 	LT16(5), LT16(6), LT16(6), LT16(7), LT16(7), LT16(7), LT16(7),
246 	LT16(8), LT16(8), LT16(8), LT16(8), LT16(8), LT16(8), LT16(8), LT16(8)
247 };
248 
249 /* ----------
250  * Debug macros
251  * ----------
252  */
253 #ifdef HAVE_ALLOCINFO
254 #define AllocFreeInfo(_cxt, _chunk) \
255 			fprintf(stderr, "AllocFree: %s: %p, %zu\n", \
256 				(_cxt)->header.name, (_chunk), (_chunk)->size)
257 #define AllocAllocInfo(_cxt, _chunk) \
258 			fprintf(stderr, "AllocAlloc: %s: %p, %zu\n", \
259 				(_cxt)->header.name, (_chunk), (_chunk)->size)
260 #else
261 #define AllocFreeInfo(_cxt, _chunk)
262 #define AllocAllocInfo(_cxt, _chunk)
263 #endif
264 
265 /* ----------
266  * AllocSetFreeIndex -
267  *
268  *		Depending on the size of an allocation compute which freechunk
269  *		list of the alloc set it belongs to.  Caller must have verified
270  *		that size <= ALLOC_CHUNK_LIMIT.
271  * ----------
272  */
273 static inline int
AllocSetFreeIndex(Size size)274 AllocSetFreeIndex(Size size)
275 {
276 	int			idx;
277 	unsigned int t,
278 				tsize;
279 
280 	if (size > (1 << ALLOC_MINBITS))
281 	{
282 		tsize = (size - 1) >> ALLOC_MINBITS;
283 
284 		/*
285 		 * At this point we need to obtain log2(tsize)+1, ie, the number of
286 		 * not-all-zero bits at the right.  We used to do this with a
287 		 * shift-and-count loop, but this function is enough of a hotspot to
288 		 * justify micro-optimization effort.  The best approach seems to be
289 		 * to use a lookup table.  Note that this code assumes that
290 		 * ALLOCSET_NUM_FREELISTS <= 17, since we only cope with two bytes of
291 		 * the tsize value.
292 		 */
293 		t = tsize >> 8;
294 		idx = t ? LogTable256[t] + 8 : LogTable256[tsize];
295 
296 		Assert(idx < ALLOCSET_NUM_FREELISTS);
297 	}
298 	else
299 		idx = 0;
300 
301 	return idx;
302 }
303 
304 
305 /*
306  * Public routines
307  */
308 
309 
310 /*
311  * AllocSetContextCreate
312  *		Create a new AllocSet context.
313  *
314  * parent: parent context, or NULL if top-level context
315  * name: name of context (for debugging only, need not be unique)
316  * minContextSize: minimum context size
317  * initBlockSize: initial allocation block size
318  * maxBlockSize: maximum allocation block size
319  *
320  * Notes: the name string will be copied into context-lifespan storage.
321  * Most callers should abstract the context size parameters using a macro
322  * such as ALLOCSET_DEFAULT_SIZES.
323  */
324 MemoryContext
AllocSetContextCreate(MemoryContext parent,const char * name,Size minContextSize,Size initBlockSize,Size maxBlockSize)325 AllocSetContextCreate(MemoryContext parent,
326 					  const char *name,
327 					  Size minContextSize,
328 					  Size initBlockSize,
329 					  Size maxBlockSize)
330 {
331 	AllocSet	set;
332 
333 	StaticAssertStmt(offsetof(AllocChunkData, aset) + sizeof(MemoryContext) ==
334 					 MAXALIGN(sizeof(AllocChunkData)),
335 					 "padding calculation in AllocChunkData is wrong");
336 
337 	/*
338 	 * First, validate allocation parameters.  (If we're going to throw an
339 	 * error, we should do so before the context is created, not after.)  We
340 	 * somewhat arbitrarily enforce a minimum 1K block size.
341 	 */
342 	if (initBlockSize != MAXALIGN(initBlockSize) ||
343 		initBlockSize < 1024)
344 		elog(ERROR, "invalid initBlockSize for memory context: %zu",
345 			 initBlockSize);
346 	if (maxBlockSize != MAXALIGN(maxBlockSize) ||
347 		maxBlockSize < initBlockSize ||
348 		!AllocHugeSizeIsValid(maxBlockSize))	/* must be safe to double */
349 		elog(ERROR, "invalid maxBlockSize for memory context: %zu",
350 			 maxBlockSize);
351 	if (minContextSize != 0 &&
352 		(minContextSize != MAXALIGN(minContextSize) ||
353 		 minContextSize <= ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ))
354 		elog(ERROR, "invalid minContextSize for memory context: %zu",
355 			 minContextSize);
356 
357 	/* Do the type-independent part of context creation */
358 	set = (AllocSet) MemoryContextCreate(T_AllocSetContext,
359 										 sizeof(AllocSetContext),
360 										 &AllocSetMethods,
361 										 parent,
362 										 name);
363 
364 	/* Save allocation parameters */
365 	set->initBlockSize = initBlockSize;
366 	set->maxBlockSize = maxBlockSize;
367 	set->nextBlockSize = initBlockSize;
368 
369 	/*
370 	 * Compute the allocation chunk size limit for this context.  It can't be
371 	 * more than ALLOC_CHUNK_LIMIT because of the fixed number of freelists.
372 	 * If maxBlockSize is small then requests exceeding the maxBlockSize, or
373 	 * even a significant fraction of it, should be treated as large chunks
374 	 * too.  For the typical case of maxBlockSize a power of 2, the chunk size
375 	 * limit will be at most 1/8th maxBlockSize, so that given a stream of
376 	 * requests that are all the maximum chunk size we will waste at most
377 	 * 1/8th of the allocated space.
378 	 *
379 	 * We have to have allocChunkLimit a power of two, because the requested
380 	 * and actually-allocated sizes of any chunk must be on the same side of
381 	 * the limit, else we get confused about whether the chunk is "big".
382 	 *
383 	 * Also, allocChunkLimit must not exceed ALLOCSET_SEPARATE_THRESHOLD.
384 	 */
385 	StaticAssertStmt(ALLOC_CHUNK_LIMIT == ALLOCSET_SEPARATE_THRESHOLD,
386 					 "ALLOC_CHUNK_LIMIT != ALLOCSET_SEPARATE_THRESHOLD");
387 
388 	set->allocChunkLimit = ALLOC_CHUNK_LIMIT;
389 	while ((Size) (set->allocChunkLimit + ALLOC_CHUNKHDRSZ) >
390 		   (Size) ((maxBlockSize - ALLOC_BLOCKHDRSZ) / ALLOC_CHUNK_FRACTION))
391 		set->allocChunkLimit >>= 1;
392 
393 	/*
394 	 * Grab always-allocated space, if requested
395 	 */
396 	if (minContextSize > 0)
397 	{
398 		Size		blksize = minContextSize;
399 		AllocBlock	block;
400 
401 		block = (AllocBlock) malloc(blksize);
402 		if (block == NULL)
403 		{
404 			MemoryContextStats(TopMemoryContext);
405 			ereport(ERROR,
406 					(errcode(ERRCODE_OUT_OF_MEMORY),
407 					 errmsg("out of memory"),
408 					 errdetail("Failed while creating memory context \"%s\".",
409 							   name)));
410 		}
411 		block->aset = set;
412 		block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
413 		block->endptr = ((char *) block) + blksize;
414 		block->prev = NULL;
415 		block->next = set->blocks;
416 		if (block->next)
417 			block->next->prev = block;
418 		set->blocks = block;
419 		/* Mark block as not to be released at reset time */
420 		set->keeper = block;
421 
422 		/* Mark unallocated space NOACCESS; leave the block header alone. */
423 		VALGRIND_MAKE_MEM_NOACCESS(block->freeptr,
424 								   blksize - ALLOC_BLOCKHDRSZ);
425 	}
426 
427 	return (MemoryContext) set;
428 }
429 
430 /*
431  * AllocSetInit
432  *		Context-type-specific initialization routine.
433  *
434  * This is called by MemoryContextCreate() after setting up the
435  * generic MemoryContext fields and before linking the new context
436  * into the context tree.  We must do whatever is needed to make the
437  * new context minimally valid for deletion.  We must *not* risk
438  * failure --- thus, for example, allocating more memory is not cool.
439  * (AllocSetContextCreate can allocate memory when it gets control
440  * back, however.)
441  */
442 static void
AllocSetInit(MemoryContext context)443 AllocSetInit(MemoryContext context)
444 {
445 	/*
446 	 * Since MemoryContextCreate already zeroed the context node, we don't
447 	 * have to do anything here: it's already OK.
448 	 */
449 }
450 
451 /*
452  * AllocSetReset
453  *		Frees all memory which is allocated in the given set.
454  *
455  * Actually, this routine has some discretion about what to do.
456  * It should mark all allocated chunks freed, but it need not necessarily
457  * give back all the resources the set owns.  Our actual implementation is
458  * that we hang onto any "keeper" block specified for the set.  In this way,
459  * we don't thrash malloc() when a context is repeatedly reset after small
460  * allocations, which is typical behavior for per-tuple contexts.
461  */
462 static void
AllocSetReset(MemoryContext context)463 AllocSetReset(MemoryContext context)
464 {
465 	AllocSet	set = (AllocSet) context;
466 	AllocBlock	block;
467 
468 	AssertArg(AllocSetIsValid(set));
469 
470 #ifdef MEMORY_CONTEXT_CHECKING
471 	/* Check for corruption and leaks before freeing */
472 	AllocSetCheck(context);
473 #endif
474 
475 	/* Clear chunk freelists */
476 	MemSetAligned(set->freelist, 0, sizeof(set->freelist));
477 
478 	block = set->blocks;
479 
480 	/* New blocks list is either empty or just the keeper block */
481 	set->blocks = set->keeper;
482 
483 	while (block != NULL)
484 	{
485 		AllocBlock	next = block->next;
486 
487 		if (block == set->keeper)
488 		{
489 			/* Reset the block, but don't return it to malloc */
490 			char	   *datastart = ((char *) block) + ALLOC_BLOCKHDRSZ;
491 
492 #ifdef CLOBBER_FREED_MEMORY
493 			wipe_mem(datastart, block->freeptr - datastart);
494 #else
495 			/* wipe_mem() would have done this */
496 			VALGRIND_MAKE_MEM_NOACCESS(datastart, block->freeptr - datastart);
497 #endif
498 			block->freeptr = datastart;
499 			block->prev = NULL;
500 			block->next = NULL;
501 		}
502 		else
503 		{
504 			/* Normal case, release the block */
505 #ifdef CLOBBER_FREED_MEMORY
506 			wipe_mem(block, block->freeptr - ((char *) block));
507 #endif
508 			free(block);
509 		}
510 		block = next;
511 	}
512 
513 	/* Reset block size allocation sequence, too */
514 	set->nextBlockSize = set->initBlockSize;
515 }
516 
517 /*
518  * AllocSetDelete
519  *		Frees all memory which is allocated in the given set,
520  *		in preparation for deletion of the set.
521  *
522  * Unlike AllocSetReset, this *must* free all resources of the set.
523  * But note we are not responsible for deleting the context node itself.
524  */
525 static void
AllocSetDelete(MemoryContext context)526 AllocSetDelete(MemoryContext context)
527 {
528 	AllocSet	set = (AllocSet) context;
529 	AllocBlock	block = set->blocks;
530 
531 	AssertArg(AllocSetIsValid(set));
532 
533 #ifdef MEMORY_CONTEXT_CHECKING
534 	/* Check for corruption and leaks before freeing */
535 	AllocSetCheck(context);
536 #endif
537 
538 	/* Make it look empty, just in case... */
539 	MemSetAligned(set->freelist, 0, sizeof(set->freelist));
540 	set->blocks = NULL;
541 	set->keeper = NULL;
542 
543 	while (block != NULL)
544 	{
545 		AllocBlock	next = block->next;
546 
547 #ifdef CLOBBER_FREED_MEMORY
548 		wipe_mem(block, block->freeptr - ((char *) block));
549 #endif
550 		free(block);
551 		block = next;
552 	}
553 }
554 
555 /*
556  * AllocSetAlloc
557  *		Returns pointer to allocated memory of given size or NULL if
558  *		request could not be completed; memory is added to the set.
559  *
560  * No request may exceed:
561  *		MAXALIGN_DOWN(SIZE_MAX) - ALLOC_BLOCKHDRSZ - ALLOC_CHUNKHDRSZ
562  * All callers use a much-lower limit.
563  */
564 static void *
AllocSetAlloc(MemoryContext context,Size size)565 AllocSetAlloc(MemoryContext context, Size size)
566 {
567 	AllocSet	set = (AllocSet) context;
568 	AllocBlock	block;
569 	AllocChunk	chunk;
570 	int			fidx;
571 	Size		chunk_size;
572 	Size		blksize;
573 
574 	AssertArg(AllocSetIsValid(set));
575 
576 	/*
577 	 * If requested size exceeds maximum for chunks, allocate an entire block
578 	 * for this request.
579 	 */
580 	if (size > set->allocChunkLimit)
581 	{
582 		chunk_size = MAXALIGN(size);
583 		blksize = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
584 		block = (AllocBlock) malloc(blksize);
585 		if (block == NULL)
586 			return NULL;
587 		block->aset = set;
588 		block->freeptr = block->endptr = ((char *) block) + blksize;
589 
590 		chunk = (AllocChunk) (((char *) block) + ALLOC_BLOCKHDRSZ);
591 		chunk->aset = set;
592 		chunk->size = chunk_size;
593 #ifdef MEMORY_CONTEXT_CHECKING
594 		/* Valgrind: Will be made NOACCESS below. */
595 		chunk->requested_size = size;
596 		/* set mark to catch clobber of "unused" space */
597 		if (size < chunk_size)
598 			set_sentinel(AllocChunkGetPointer(chunk), size);
599 #endif
600 #ifdef RANDOMIZE_ALLOCATED_MEMORY
601 		/* fill the allocated space with junk */
602 		randomize_mem((char *) AllocChunkGetPointer(chunk), size);
603 #endif
604 
605 		/*
606 		 * Stick the new block underneath the active allocation block, if any,
607 		 * so that we don't lose the use of the space remaining therein.
608 		 */
609 		if (set->blocks != NULL)
610 		{
611 			block->prev = set->blocks;
612 			block->next = set->blocks->next;
613 			if (block->next)
614 				block->next->prev = block;
615 			set->blocks->next = block;
616 		}
617 		else
618 		{
619 			block->prev = NULL;
620 			block->next = NULL;
621 			set->blocks = block;
622 		}
623 
624 		AllocAllocInfo(set, chunk);
625 
626 		/*
627 		 * Chunk's metadata fields remain DEFINED.  The requested allocation
628 		 * itself can be NOACCESS or UNDEFINED; our caller will soon make it
629 		 * UNDEFINED.  Make extra space at the end of the chunk, if any,
630 		 * NOACCESS.
631 		 */
632 		VALGRIND_MAKE_MEM_NOACCESS((char *) chunk + ALLOC_CHUNKHDRSZ,
633 								   chunk_size - ALLOC_CHUNKHDRSZ);
634 
635 		return AllocChunkGetPointer(chunk);
636 	}
637 
638 	/*
639 	 * Request is small enough to be treated as a chunk.  Look in the
640 	 * corresponding free list to see if there is a free chunk we could reuse.
641 	 * If one is found, remove it from the free list, make it again a member
642 	 * of the alloc set and return its data address.
643 	 */
644 	fidx = AllocSetFreeIndex(size);
645 	chunk = set->freelist[fidx];
646 	if (chunk != NULL)
647 	{
648 		Assert(chunk->size >= size);
649 
650 		set->freelist[fidx] = (AllocChunk) chunk->aset;
651 
652 		chunk->aset = (void *) set;
653 
654 #ifdef MEMORY_CONTEXT_CHECKING
655 		/* Valgrind: Free list requested_size should be DEFINED. */
656 		chunk->requested_size = size;
657 		VALGRIND_MAKE_MEM_NOACCESS(&chunk->requested_size,
658 								   sizeof(chunk->requested_size));
659 		/* set mark to catch clobber of "unused" space */
660 		if (size < chunk->size)
661 			set_sentinel(AllocChunkGetPointer(chunk), size);
662 #endif
663 #ifdef RANDOMIZE_ALLOCATED_MEMORY
664 		/* fill the allocated space with junk */
665 		randomize_mem((char *) AllocChunkGetPointer(chunk), size);
666 #endif
667 
668 		AllocAllocInfo(set, chunk);
669 		return AllocChunkGetPointer(chunk);
670 	}
671 
672 	/*
673 	 * Choose the actual chunk size to allocate.
674 	 */
675 	chunk_size = (1 << ALLOC_MINBITS) << fidx;
676 	Assert(chunk_size >= size);
677 
678 	/*
679 	 * If there is enough room in the active allocation block, we will put the
680 	 * chunk into that block.  Else must start a new one.
681 	 */
682 	if ((block = set->blocks) != NULL)
683 	{
684 		Size		availspace = block->endptr - block->freeptr;
685 
686 		if (availspace < (chunk_size + ALLOC_CHUNKHDRSZ))
687 		{
688 			/*
689 			 * The existing active (top) block does not have enough room for
690 			 * the requested allocation, but it might still have a useful
691 			 * amount of space in it.  Once we push it down in the block list,
692 			 * we'll never try to allocate more space from it. So, before we
693 			 * do that, carve up its free space into chunks that we can put on
694 			 * the set's freelists.
695 			 *
696 			 * Because we can only get here when there's less than
697 			 * ALLOC_CHUNK_LIMIT left in the block, this loop cannot iterate
698 			 * more than ALLOCSET_NUM_FREELISTS-1 times.
699 			 */
700 			while (availspace >= ((1 << ALLOC_MINBITS) + ALLOC_CHUNKHDRSZ))
701 			{
702 				Size		availchunk = availspace - ALLOC_CHUNKHDRSZ;
703 				int			a_fidx = AllocSetFreeIndex(availchunk);
704 
705 				/*
706 				 * In most cases, we'll get back the index of the next larger
707 				 * freelist than the one we need to put this chunk on.  The
708 				 * exception is when availchunk is exactly a power of 2.
709 				 */
710 				if (availchunk != ((Size) 1 << (a_fidx + ALLOC_MINBITS)))
711 				{
712 					a_fidx--;
713 					Assert(a_fidx >= 0);
714 					availchunk = ((Size) 1 << (a_fidx + ALLOC_MINBITS));
715 				}
716 
717 				chunk = (AllocChunk) (block->freeptr);
718 
719 				/* Prepare to initialize the chunk header. */
720 				VALGRIND_MAKE_MEM_UNDEFINED(chunk, ALLOC_CHUNKHDRSZ);
721 
722 				block->freeptr += (availchunk + ALLOC_CHUNKHDRSZ);
723 				availspace -= (availchunk + ALLOC_CHUNKHDRSZ);
724 
725 				chunk->size = availchunk;
726 #ifdef MEMORY_CONTEXT_CHECKING
727 				chunk->requested_size = 0;	/* mark it free */
728 #endif
729 				chunk->aset = (void *) set->freelist[a_fidx];
730 				set->freelist[a_fidx] = chunk;
731 			}
732 
733 			/* Mark that we need to create a new block */
734 			block = NULL;
735 		}
736 	}
737 
738 	/*
739 	 * Time to create a new regular (multi-chunk) block?
740 	 */
741 	if (block == NULL)
742 	{
743 		Size		required_size;
744 
745 		/*
746 		 * The first such block has size initBlockSize, and we double the
747 		 * space in each succeeding block, but not more than maxBlockSize.
748 		 */
749 		blksize = set->nextBlockSize;
750 		set->nextBlockSize <<= 1;
751 		if (set->nextBlockSize > set->maxBlockSize)
752 			set->nextBlockSize = set->maxBlockSize;
753 
754 		/*
755 		 * If initBlockSize is less than ALLOC_CHUNK_LIMIT, we could need more
756 		 * space... but try to keep it a power of 2.
757 		 */
758 		required_size = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
759 		while (blksize < required_size)
760 			blksize <<= 1;
761 
762 		/* Try to allocate it */
763 		block = (AllocBlock) malloc(blksize);
764 
765 		/*
766 		 * We could be asking for pretty big blocks here, so cope if malloc
767 		 * fails.  But give up if there's less than a meg or so available...
768 		 */
769 		while (block == NULL && blksize > 1024 * 1024)
770 		{
771 			blksize >>= 1;
772 			if (blksize < required_size)
773 				break;
774 			block = (AllocBlock) malloc(blksize);
775 		}
776 
777 		if (block == NULL)
778 			return NULL;
779 
780 		block->aset = set;
781 		block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
782 		block->endptr = ((char *) block) + blksize;
783 
784 		/*
785 		 * If this is the first block of the set, make it the "keeper" block.
786 		 * Formerly, a keeper block could only be created during context
787 		 * creation, but allowing it to happen here lets us have fast reset
788 		 * cycling even for contexts created with minContextSize = 0; that way
789 		 * we don't have to force space to be allocated in contexts that might
790 		 * never need any space.  Don't mark an oversize block as a keeper,
791 		 * however.
792 		 */
793 		if (set->keeper == NULL && blksize == set->initBlockSize)
794 			set->keeper = block;
795 
796 		/* Mark unallocated space NOACCESS. */
797 		VALGRIND_MAKE_MEM_NOACCESS(block->freeptr,
798 								   blksize - ALLOC_BLOCKHDRSZ);
799 
800 		block->prev = NULL;
801 		block->next = set->blocks;
802 		if (block->next)
803 			block->next->prev = block;
804 		set->blocks = block;
805 	}
806 
807 	/*
808 	 * OK, do the allocation
809 	 */
810 	chunk = (AllocChunk) (block->freeptr);
811 
812 	/* Prepare to initialize the chunk header. */
813 	VALGRIND_MAKE_MEM_UNDEFINED(chunk, ALLOC_CHUNKHDRSZ);
814 
815 	block->freeptr += (chunk_size + ALLOC_CHUNKHDRSZ);
816 	Assert(block->freeptr <= block->endptr);
817 
818 	chunk->aset = (void *) set;
819 	chunk->size = chunk_size;
820 #ifdef MEMORY_CONTEXT_CHECKING
821 	chunk->requested_size = size;
822 	VALGRIND_MAKE_MEM_NOACCESS(&chunk->requested_size,
823 							   sizeof(chunk->requested_size));
824 	/* set mark to catch clobber of "unused" space */
825 	if (size < chunk->size)
826 		set_sentinel(AllocChunkGetPointer(chunk), size);
827 #endif
828 #ifdef RANDOMIZE_ALLOCATED_MEMORY
829 	/* fill the allocated space with junk */
830 	randomize_mem((char *) AllocChunkGetPointer(chunk), size);
831 #endif
832 
833 	AllocAllocInfo(set, chunk);
834 	return AllocChunkGetPointer(chunk);
835 }
836 
837 /*
838  * AllocSetFree
839  *		Frees allocated memory; memory is removed from the set.
840  */
841 static void
AllocSetFree(MemoryContext context,void * pointer)842 AllocSetFree(MemoryContext context, void *pointer)
843 {
844 	AllocSet	set = (AllocSet) context;
845 	AllocChunk	chunk = AllocPointerGetChunk(pointer);
846 
847 	AllocFreeInfo(set, chunk);
848 
849 #ifdef MEMORY_CONTEXT_CHECKING
850 	VALGRIND_MAKE_MEM_DEFINED(&chunk->requested_size,
851 							  sizeof(chunk->requested_size));
852 	/* Test for someone scribbling on unused space in chunk */
853 	if (chunk->requested_size < chunk->size)
854 		if (!sentinel_ok(pointer, chunk->requested_size))
855 			elog(WARNING, "detected write past chunk end in %s %p",
856 				 set->header.name, chunk);
857 #endif
858 
859 	if (chunk->size > set->allocChunkLimit)
860 	{
861 		/*
862 		 * Big chunks are certain to have been allocated as single-chunk
863 		 * blocks.  Just unlink that block and return it to malloc().
864 		 */
865 		AllocBlock	block = (AllocBlock) (((char *) chunk) - ALLOC_BLOCKHDRSZ);
866 
867 		/*
868 		 * Try to verify that we have a sane block pointer: it should
869 		 * reference the correct aset, and freeptr and endptr should point
870 		 * just past the chunk.
871 		 */
872 		if (block->aset != set ||
873 			block->freeptr != block->endptr ||
874 			block->freeptr != ((char *) block) +
875 			(chunk->size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ))
876 			elog(ERROR, "could not find block containing chunk %p", chunk);
877 
878 		/* OK, remove block from aset's list and free it */
879 		if (block->prev)
880 			block->prev->next = block->next;
881 		else
882 			set->blocks = block->next;
883 		if (block->next)
884 			block->next->prev = block->prev;
885 #ifdef CLOBBER_FREED_MEMORY
886 		wipe_mem(block, block->freeptr - ((char *) block));
887 #endif
888 		free(block);
889 	}
890 	else
891 	{
892 		/* Normal case, put the chunk into appropriate freelist */
893 		int			fidx = AllocSetFreeIndex(chunk->size);
894 
895 		chunk->aset = (void *) set->freelist[fidx];
896 
897 #ifdef CLOBBER_FREED_MEMORY
898 		wipe_mem(pointer, chunk->size);
899 #endif
900 
901 #ifdef MEMORY_CONTEXT_CHECKING
902 		/* Reset requested_size to 0 in chunks that are on freelist */
903 		chunk->requested_size = 0;
904 #endif
905 		set->freelist[fidx] = chunk;
906 	}
907 }
908 
909 /*
910  * AllocSetRealloc
911  *		Returns new pointer to allocated memory of given size or NULL if
912  *		request could not be completed; this memory is added to the set.
913  *		Memory associated with given pointer is copied into the new memory,
914  *		and the old memory is freed.
915  *
916  * Without MEMORY_CONTEXT_CHECKING, we don't know the old request size.  This
917  * makes our Valgrind client requests less-precise, hazarding false negatives.
918  * (In principle, we could use VALGRIND_GET_VBITS() to rediscover the old
919  * request size.)
920  */
921 static void *
AllocSetRealloc(MemoryContext context,void * pointer,Size size)922 AllocSetRealloc(MemoryContext context, void *pointer, Size size)
923 {
924 	AllocSet	set = (AllocSet) context;
925 	AllocChunk	chunk = AllocPointerGetChunk(pointer);
926 	Size		oldsize = chunk->size;
927 
928 #ifdef MEMORY_CONTEXT_CHECKING
929 	VALGRIND_MAKE_MEM_DEFINED(&chunk->requested_size,
930 							  sizeof(chunk->requested_size));
931 	/* Test for someone scribbling on unused space in chunk */
932 	if (chunk->requested_size < oldsize)
933 		if (!sentinel_ok(pointer, chunk->requested_size))
934 			elog(WARNING, "detected write past chunk end in %s %p",
935 				 set->header.name, chunk);
936 #endif
937 
938 	/*
939 	 * Chunk sizes are aligned to power of 2 in AllocSetAlloc(). Maybe the
940 	 * allocated area already is >= the new size.  (In particular, we always
941 	 * fall out here if the requested size is a decrease.)
942 	 */
943 	if (oldsize >= size)
944 	{
945 #ifdef MEMORY_CONTEXT_CHECKING
946 		Size		oldrequest = chunk->requested_size;
947 
948 #ifdef RANDOMIZE_ALLOCATED_MEMORY
949 		/* We can only fill the extra space if we know the prior request */
950 		if (size > oldrequest)
951 			randomize_mem((char *) pointer + oldrequest,
952 						  size - oldrequest);
953 #endif
954 
955 		chunk->requested_size = size;
956 		VALGRIND_MAKE_MEM_NOACCESS(&chunk->requested_size,
957 								   sizeof(chunk->requested_size));
958 
959 		/*
960 		 * If this is an increase, mark any newly-available part UNDEFINED.
961 		 * Otherwise, mark the obsolete part NOACCESS.
962 		 */
963 		if (size > oldrequest)
964 			VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
965 										size - oldrequest);
966 		else
967 			VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
968 									   oldsize - size);
969 
970 		/* set mark to catch clobber of "unused" space */
971 		if (size < oldsize)
972 			set_sentinel(pointer, size);
973 #else							/* !MEMORY_CONTEXT_CHECKING */
974 
975 		/*
976 		 * We don't have the information to determine whether we're growing
977 		 * the old request or shrinking it, so we conservatively mark the
978 		 * entire new allocation DEFINED.
979 		 */
980 		VALGRIND_MAKE_MEM_NOACCESS(pointer, oldsize);
981 		VALGRIND_MAKE_MEM_DEFINED(pointer, size);
982 #endif
983 
984 		return pointer;
985 	}
986 
987 	if (oldsize > set->allocChunkLimit)
988 	{
989 		/*
990 		 * The chunk must have been allocated as a single-chunk block.  Use
991 		 * realloc() to make the containing block bigger with minimum space
992 		 * wastage.
993 		 */
994 		AllocBlock	block = (AllocBlock) (((char *) chunk) - ALLOC_BLOCKHDRSZ);
995 		Size		chksize;
996 		Size		blksize;
997 
998 		/*
999 		 * Try to verify that we have a sane block pointer: it should
1000 		 * reference the correct aset, and freeptr and endptr should point
1001 		 * just past the chunk.
1002 		 */
1003 		if (block->aset != set ||
1004 			block->freeptr != block->endptr ||
1005 			block->freeptr != ((char *) block) +
1006 			(chunk->size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ))
1007 			elog(ERROR, "could not find block containing chunk %p", chunk);
1008 
1009 		/* Do the realloc */
1010 		chksize = MAXALIGN(size);
1011 		blksize = chksize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
1012 		block = (AllocBlock) realloc(block, blksize);
1013 		if (block == NULL)
1014 			return NULL;
1015 		block->freeptr = block->endptr = ((char *) block) + blksize;
1016 
1017 		/* Update pointers since block has likely been moved */
1018 		chunk = (AllocChunk) (((char *) block) + ALLOC_BLOCKHDRSZ);
1019 		pointer = AllocChunkGetPointer(chunk);
1020 		if (block->prev)
1021 			block->prev->next = block;
1022 		else
1023 			set->blocks = block;
1024 		if (block->next)
1025 			block->next->prev = block;
1026 		chunk->size = chksize;
1027 
1028 #ifdef MEMORY_CONTEXT_CHECKING
1029 #ifdef RANDOMIZE_ALLOCATED_MEMORY
1030 		/* We can only fill the extra space if we know the prior request */
1031 		randomize_mem((char *) pointer + chunk->requested_size,
1032 					  size - chunk->requested_size);
1033 #endif
1034 
1035 		/*
1036 		 * realloc() (or randomize_mem()) will have left the newly-allocated
1037 		 * part UNDEFINED, but we may need to adjust trailing bytes from the
1038 		 * old allocation.
1039 		 */
1040 		VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + chunk->requested_size,
1041 									oldsize - chunk->requested_size);
1042 
1043 		chunk->requested_size = size;
1044 		VALGRIND_MAKE_MEM_NOACCESS(&chunk->requested_size,
1045 								   sizeof(chunk->requested_size));
1046 
1047 		/* set mark to catch clobber of "unused" space */
1048 		if (size < chunk->size)
1049 			set_sentinel(pointer, size);
1050 #else							/* !MEMORY_CONTEXT_CHECKING */
1051 
1052 		/*
1053 		 * We don't know how much of the old chunk size was the actual
1054 		 * allocation; it could have been as small as one byte.  We have to be
1055 		 * conservative and just mark the entire old portion DEFINED.
1056 		 */
1057 		VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
1058 #endif
1059 
1060 		/* Make any trailing alignment padding NOACCESS. */
1061 		VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size, chksize - size);
1062 
1063 		return pointer;
1064 	}
1065 	else
1066 	{
1067 		/*
1068 		 * Small-chunk case.  We just do this by brute force, ie, allocate a
1069 		 * new chunk and copy the data.  Since we know the existing data isn't
1070 		 * huge, this won't involve any great memcpy expense, so it's not
1071 		 * worth being smarter.  (At one time we tried to avoid memcpy when it
1072 		 * was possible to enlarge the chunk in-place, but that turns out to
1073 		 * misbehave unpleasantly for repeated cycles of
1074 		 * palloc/repalloc/pfree: the eventually freed chunks go into the
1075 		 * wrong freelist for the next initial palloc request, and so we leak
1076 		 * memory indefinitely.  See pgsql-hackers archives for 2007-08-11.)
1077 		 */
1078 		AllocPointer newPointer;
1079 
1080 		/* allocate new chunk */
1081 		newPointer = AllocSetAlloc((MemoryContext) set, size);
1082 
1083 		/* leave immediately if request was not completed */
1084 		if (newPointer == NULL)
1085 			return NULL;
1086 
1087 		/*
1088 		 * AllocSetAlloc() just made the region NOACCESS.  Change it to
1089 		 * UNDEFINED for the moment; memcpy() will then transfer definedness
1090 		 * from the old allocation to the new.  If we know the old allocation,
1091 		 * copy just that much.  Otherwise, make the entire old chunk defined
1092 		 * to avoid errors as we copy the currently-NOACCESS trailing bytes.
1093 		 */
1094 		VALGRIND_MAKE_MEM_UNDEFINED(newPointer, size);
1095 #ifdef MEMORY_CONTEXT_CHECKING
1096 		oldsize = chunk->requested_size;
1097 #else
1098 		VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
1099 #endif
1100 
1101 		/* transfer existing data (certain to fit) */
1102 		memcpy(newPointer, pointer, oldsize);
1103 
1104 		/* free old chunk */
1105 		AllocSetFree((MemoryContext) set, pointer);
1106 
1107 		return newPointer;
1108 	}
1109 }
1110 
1111 /*
1112  * AllocSetGetChunkSpace
1113  *		Given a currently-allocated chunk, determine the total space
1114  *		it occupies (including all memory-allocation overhead).
1115  */
1116 static Size
AllocSetGetChunkSpace(MemoryContext context,void * pointer)1117 AllocSetGetChunkSpace(MemoryContext context, void *pointer)
1118 {
1119 	AllocChunk	chunk = AllocPointerGetChunk(pointer);
1120 
1121 	return chunk->size + ALLOC_CHUNKHDRSZ;
1122 }
1123 
1124 /*
1125  * AllocSetIsEmpty
1126  *		Is an allocset empty of any allocated space?
1127  */
1128 static bool
AllocSetIsEmpty(MemoryContext context)1129 AllocSetIsEmpty(MemoryContext context)
1130 {
1131 	/*
1132 	 * For now, we say "empty" only if the context is new or just reset. We
1133 	 * could examine the freelists to determine if all space has been freed,
1134 	 * but it's not really worth the trouble for present uses of this
1135 	 * functionality.
1136 	 */
1137 	if (context->isReset)
1138 		return true;
1139 	return false;
1140 }
1141 
1142 /*
1143  * AllocSetStats
1144  *		Compute stats about memory consumption of an allocset.
1145  *
1146  * level: recursion level (0 at top level); used for print indentation.
1147  * print: true to print stats to stderr.
1148  * totals: if not NULL, add stats about this allocset into *totals.
1149  */
1150 static void
AllocSetStats(MemoryContext context,int level,bool print,MemoryContextCounters * totals)1151 AllocSetStats(MemoryContext context, int level, bool print,
1152 			  MemoryContextCounters *totals)
1153 {
1154 	AllocSet	set = (AllocSet) context;
1155 	Size		nblocks = 0;
1156 	Size		freechunks = 0;
1157 	Size		totalspace = 0;
1158 	Size		freespace = 0;
1159 	AllocBlock	block;
1160 	int			fidx;
1161 
1162 	for (block = set->blocks; block != NULL; block = block->next)
1163 	{
1164 		nblocks++;
1165 		totalspace += block->endptr - ((char *) block);
1166 		freespace += block->endptr - block->freeptr;
1167 	}
1168 	for (fidx = 0; fidx < ALLOCSET_NUM_FREELISTS; fidx++)
1169 	{
1170 		AllocChunk	chunk;
1171 
1172 		for (chunk = set->freelist[fidx]; chunk != NULL;
1173 			 chunk = (AllocChunk) chunk->aset)
1174 		{
1175 			freechunks++;
1176 			freespace += chunk->size + ALLOC_CHUNKHDRSZ;
1177 		}
1178 	}
1179 
1180 	if (print)
1181 	{
1182 		int			i;
1183 
1184 		for (i = 0; i < level; i++)
1185 			fprintf(stderr, "  ");
1186 		fprintf(stderr,
1187 				"%s: %zu total in %zd blocks; %zu free (%zd chunks); %zu used\n",
1188 				set->header.name, totalspace, nblocks, freespace, freechunks,
1189 				totalspace - freespace);
1190 	}
1191 
1192 	if (totals)
1193 	{
1194 		totals->nblocks += nblocks;
1195 		totals->freechunks += freechunks;
1196 		totals->totalspace += totalspace;
1197 		totals->freespace += freespace;
1198 	}
1199 }
1200 
1201 
1202 #ifdef MEMORY_CONTEXT_CHECKING
1203 
1204 /*
1205  * AllocSetCheck
1206  *		Walk through chunks and check consistency of memory.
1207  *
1208  * NOTE: report errors as WARNING, *not* ERROR or FATAL.  Otherwise you'll
1209  * find yourself in an infinite loop when trouble occurs, because this
1210  * routine will be entered again when elog cleanup tries to release memory!
1211  */
1212 static void
AllocSetCheck(MemoryContext context)1213 AllocSetCheck(MemoryContext context)
1214 {
1215 	AllocSet	set = (AllocSet) context;
1216 	char	   *name = set->header.name;
1217 	AllocBlock	prevblock;
1218 	AllocBlock	block;
1219 
1220 	for (prevblock = NULL, block = set->blocks;
1221 		 block != NULL;
1222 		 prevblock = block, block = block->next)
1223 	{
1224 		char	   *bpoz = ((char *) block) + ALLOC_BLOCKHDRSZ;
1225 		long		blk_used = block->freeptr - bpoz;
1226 		long		blk_data = 0;
1227 		long		nchunks = 0;
1228 
1229 		/*
1230 		 * Empty block - empty can be keeper-block only
1231 		 */
1232 		if (!blk_used)
1233 		{
1234 			if (set->keeper != block)
1235 				elog(WARNING, "problem in alloc set %s: empty block %p",
1236 					 name, block);
1237 		}
1238 
1239 		/*
1240 		 * Check block header fields
1241 		 */
1242 		if (block->aset != set ||
1243 			block->prev != prevblock ||
1244 			block->freeptr < bpoz ||
1245 			block->freeptr > block->endptr)
1246 			elog(WARNING, "problem in alloc set %s: corrupt header in block %p",
1247 				 name, block);
1248 
1249 		/*
1250 		 * Chunk walker
1251 		 */
1252 		while (bpoz < block->freeptr)
1253 		{
1254 			AllocChunk	chunk = (AllocChunk) bpoz;
1255 			Size		chsize,
1256 						dsize;
1257 
1258 			chsize = chunk->size;	/* aligned chunk size */
1259 			VALGRIND_MAKE_MEM_DEFINED(&chunk->requested_size,
1260 									  sizeof(chunk->requested_size));
1261 			dsize = chunk->requested_size;	/* real data */
1262 			if (dsize > 0)		/* not on a free list */
1263 				VALGRIND_MAKE_MEM_NOACCESS(&chunk->requested_size,
1264 										   sizeof(chunk->requested_size));
1265 
1266 			/*
1267 			 * Check chunk size
1268 			 */
1269 			if (dsize > chsize)
1270 				elog(WARNING, "problem in alloc set %s: req size > alloc size for chunk %p in block %p",
1271 					 name, chunk, block);
1272 			if (chsize < (1 << ALLOC_MINBITS))
1273 				elog(WARNING, "problem in alloc set %s: bad size %zu for chunk %p in block %p",
1274 					 name, chsize, chunk, block);
1275 
1276 			/* single-chunk block? */
1277 			if (chsize > set->allocChunkLimit &&
1278 				chsize + ALLOC_CHUNKHDRSZ != blk_used)
1279 				elog(WARNING, "problem in alloc set %s: bad single-chunk %p in block %p",
1280 					 name, chunk, block);
1281 
1282 			/*
1283 			 * If chunk is allocated, check for correct aset pointer. (If it's
1284 			 * free, the aset is the freelist pointer, which we can't check as
1285 			 * easily...)
1286 			 */
1287 			if (dsize > 0 && chunk->aset != (void *) set)
1288 				elog(WARNING, "problem in alloc set %s: bogus aset link in block %p, chunk %p",
1289 					 name, block, chunk);
1290 
1291 			/*
1292 			 * Check for overwrite of "unallocated" space in chunk
1293 			 */
1294 			if (dsize > 0 && dsize < chsize &&
1295 				!sentinel_ok(chunk, ALLOC_CHUNKHDRSZ + dsize))
1296 				elog(WARNING, "problem in alloc set %s: detected write past chunk end in block %p, chunk %p",
1297 					 name, block, chunk);
1298 
1299 			blk_data += chsize;
1300 			nchunks++;
1301 
1302 			bpoz += ALLOC_CHUNKHDRSZ + chsize;
1303 		}
1304 
1305 		if ((blk_data + (nchunks * ALLOC_CHUNKHDRSZ)) != blk_used)
1306 			elog(WARNING, "problem in alloc set %s: found inconsistent memory block %p",
1307 				 name, block);
1308 	}
1309 }
1310 
1311 #endif							/* MEMORY_CONTEXT_CHECKING */
1312