1 /*-------------------------------------------------------------------------
2  *
3  * aset.c
4  *	  Allocation set definitions.
5  *
6  * AllocSet is our standard implementation of the abstract MemoryContext
7  * type.
8  *
9  *
10  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
11  * Portions Copyright (c) 1994, Regents of the University of California
12  *
13  * IDENTIFICATION
14  *	  src/backend/utils/mmgr/aset.c
15  *
16  * NOTE:
17  *	This is a new (Feb. 05, 1999) implementation of the allocation set
18  *	routines. AllocSet...() does not use OrderedSet...() any more.
19  *	Instead it manages allocations in a block pool by itself, combining
20  *	many small allocations in a few bigger blocks. AllocSetFree() normally
21  *	doesn't free() memory really. It just add's the free'd area to some
22  *	list for later reuse by AllocSetAlloc(). All memory blocks are free()'d
23  *	at once on AllocSetReset(), which happens when the memory context gets
24  *	destroyed.
25  *				Jan Wieck
26  *
27  *	Performance improvement from Tom Lane, 8/99: for extremely large request
28  *	sizes, we do want to be able to give the memory back to free() as soon
29  *	as it is pfree()'d.  Otherwise we risk tying up a lot of memory in
30  *	freelist entries that might never be usable.  This is specially needed
31  *	when the caller is repeatedly repalloc()'ing a block bigger and bigger;
32  *	the previous instances of the block were guaranteed to be wasted until
33  *	AllocSetReset() under the old way.
34  *
35  *	Further improvement 12/00: as the code stood, request sizes in the
36  *	midrange between "small" and "large" were handled very inefficiently,
37  *	because any sufficiently large free chunk would be used to satisfy a
38  *	request, even if it was much larger than necessary.  This led to more
39  *	and more wasted space in allocated chunks over time.  To fix, get rid
40  *	of the midrange behavior: we now handle only "small" power-of-2-size
41  *	chunks as chunks.  Anything "large" is passed off to malloc().  Change
42  *	the number of freelists to change the small/large boundary.
43  *
44  *-------------------------------------------------------------------------
45  */
46 
47 #include "postgres.h"
48 
49 #include "utils/memdebug.h"
50 #include "utils/memutils.h"
51 
52 /* Define this to detail debug alloc information */
53 /* #define HAVE_ALLOCINFO */
54 
55 /*--------------------
56  * Chunk freelist k holds chunks of size 1 << (k + ALLOC_MINBITS),
57  * for k = 0 .. ALLOCSET_NUM_FREELISTS-1.
58  *
59  * Note that all chunks in the freelists have power-of-2 sizes.  This
60  * improves recyclability: we may waste some space, but the wasted space
61  * should stay pretty constant as requests are made and released.
62  *
63  * A request too large for the last freelist is handled by allocating a
64  * dedicated block from malloc().  The block still has a block header and
65  * chunk header, but when the chunk is freed we'll return the whole block
66  * to malloc(), not put it on our freelists.
67  *
68  * CAUTION: ALLOC_MINBITS must be large enough so that
69  * 1<<ALLOC_MINBITS is at least MAXALIGN,
70  * or we may fail to align the smallest chunks adequately.
71  * 8-byte alignment is enough on all currently known machines.
72  *
73  * With the current parameters, request sizes up to 8K are treated as chunks,
74  * larger requests go into dedicated blocks.  Change ALLOCSET_NUM_FREELISTS
75  * to adjust the boundary point; and adjust ALLOCSET_SEPARATE_THRESHOLD in
76  * memutils.h to agree.  (Note: in contexts with small maxBlockSize, we may
77  * set the allocChunkLimit to less than 8K, so as to avoid space wastage.)
78  *--------------------
79  */
80 
81 #define ALLOC_MINBITS		3	/* smallest chunk size is 8 bytes */
82 #define ALLOCSET_NUM_FREELISTS	11
83 #define ALLOC_CHUNK_LIMIT	(1 << (ALLOCSET_NUM_FREELISTS-1+ALLOC_MINBITS))
84 /* Size of largest chunk that we use a fixed size for */
85 #define ALLOC_CHUNK_FRACTION	4
86 /* We allow chunks to be at most 1/4 of maxBlockSize (less overhead) */
87 
88 /*--------------------
89  * The first block allocated for an allocset has size initBlockSize.
90  * Each time we have to allocate another block, we double the block size
91  * (if possible, and without exceeding maxBlockSize), so as to reduce
92  * the bookkeeping load on malloc().
93  *
94  * Blocks allocated to hold oversize chunks do not follow this rule, however;
95  * they are just however big they need to be to hold that single chunk.
96  *--------------------
97  */
98 
99 #define ALLOC_BLOCKHDRSZ	MAXALIGN(sizeof(AllocBlockData))
100 #define ALLOC_CHUNKHDRSZ	sizeof(struct AllocChunkData)
101 
102 typedef struct AllocBlockData *AllocBlock;	/* forward reference */
103 typedef struct AllocChunkData *AllocChunk;
104 
105 /*
106  * AllocPointer
107  *		Aligned pointer which may be a member of an allocation set.
108  */
109 typedef void *AllocPointer;
110 
111 /*
112  * AllocSetContext is our standard implementation of MemoryContext.
113  *
114  * Note: header.isReset means there is nothing for AllocSetReset to do.
115  * This is different from the aset being physically empty (empty blocks list)
116  * because we may still have a keeper block.  It's also different from the set
117  * being logically empty, because we don't attempt to detect pfree'ing the
118  * last active chunk.
119  */
120 typedef struct AllocSetContext
121 {
122 	MemoryContextData header;	/* Standard memory-context fields */
123 	/* Info about storage allocated in this context: */
124 	AllocBlock	blocks;			/* head of list of blocks in this set */
125 	AllocChunk	freelist[ALLOCSET_NUM_FREELISTS];	/* free chunk lists */
126 	/* Allocation parameters for this context: */
127 	Size		initBlockSize;	/* initial block size */
128 	Size		maxBlockSize;	/* maximum block size */
129 	Size		nextBlockSize;	/* next block size to allocate */
130 	Size		allocChunkLimit;	/* effective chunk size limit */
131 	AllocBlock	keeper;			/* if not NULL, keep this block over resets */
132 } AllocSetContext;
133 
134 typedef AllocSetContext *AllocSet;
135 
136 /*
137  * AllocBlock
138  *		An AllocBlock is the unit of memory that is obtained by aset.c
139  *		from malloc().  It contains one or more AllocChunks, which are
140  *		the units requested by palloc() and freed by pfree().  AllocChunks
141  *		cannot be returned to malloc() individually, instead they are put
142  *		on freelists by pfree() and re-used by the next palloc() that has
143  *		a matching request size.
144  *
145  *		AllocBlockData is the header data for a block --- the usable space
146  *		within the block begins at the next alignment boundary.
147  */
148 typedef struct AllocBlockData
149 {
150 	AllocSet	aset;			/* aset that owns this block */
151 	AllocBlock	prev;			/* prev block in aset's blocks list, if any */
152 	AllocBlock	next;			/* next block in aset's blocks list, if any */
153 	char	   *freeptr;		/* start of free space in this block */
154 	char	   *endptr;			/* end of space in this block */
155 }			AllocBlockData;
156 
157 /*
158  * AllocChunk
159  *		The prefix of each piece of memory in an AllocBlock
160  */
161 typedef struct AllocChunkData
162 {
163 	/* size is always the size of the usable space in the chunk */
164 	Size		size;
165 #ifdef MEMORY_CONTEXT_CHECKING
166 	/* when debugging memory usage, also store actual requested size */
167 	/* this is zero in a free chunk */
168 	Size		requested_size;
169 #if MAXIMUM_ALIGNOF > 4 && SIZEOF_VOID_P == 4
170 	Size		padding;
171 #endif
172 
173 #endif							/* MEMORY_CONTEXT_CHECKING */
174 
175 	/* aset is the owning aset if allocated, or the freelist link if free */
176 	void	   *aset;
177 
178 	/* there must not be any padding to reach a MAXALIGN boundary here! */
179 }			AllocChunkData;
180 
181 /*
182  * AllocPointerIsValid
183  *		True iff pointer is valid allocation pointer.
184  */
185 #define AllocPointerIsValid(pointer) PointerIsValid(pointer)
186 
187 /*
188  * AllocSetIsValid
189  *		True iff set is valid allocation set.
190  */
191 #define AllocSetIsValid(set) PointerIsValid(set)
192 
193 #define AllocPointerGetChunk(ptr)	\
194 					((AllocChunk)(((char *)(ptr)) - ALLOC_CHUNKHDRSZ))
195 #define AllocChunkGetPointer(chk)	\
196 					((AllocPointer)(((char *)(chk)) + ALLOC_CHUNKHDRSZ))
197 
198 /*
199  * These functions implement the MemoryContext API for AllocSet contexts.
200  */
201 static void *AllocSetAlloc(MemoryContext context, Size size);
202 static void AllocSetFree(MemoryContext context, void *pointer);
203 static void *AllocSetRealloc(MemoryContext context, void *pointer, Size size);
204 static void AllocSetInit(MemoryContext context);
205 static void AllocSetReset(MemoryContext context);
206 static void AllocSetDelete(MemoryContext context);
207 static Size AllocSetGetChunkSpace(MemoryContext context, void *pointer);
208 static bool AllocSetIsEmpty(MemoryContext context);
209 static void AllocSetStats(MemoryContext context, int level, bool print,
210 			  MemoryContextCounters *totals);
211 
212 #ifdef MEMORY_CONTEXT_CHECKING
213 static void AllocSetCheck(MemoryContext context);
214 #endif
215 
216 /*
217  * This is the virtual function table for AllocSet contexts.
218  */
219 static MemoryContextMethods AllocSetMethods = {
220 	AllocSetAlloc,
221 	AllocSetFree,
222 	AllocSetRealloc,
223 	AllocSetInit,
224 	AllocSetReset,
225 	AllocSetDelete,
226 	AllocSetGetChunkSpace,
227 	AllocSetIsEmpty,
228 	AllocSetStats
229 #ifdef MEMORY_CONTEXT_CHECKING
230 	,AllocSetCheck
231 #endif
232 };
233 
234 /*
235  * Table for AllocSetFreeIndex
236  */
237 #define LT16(n) n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n
238 
239 static const unsigned char LogTable256[256] =
240 {
241 	0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
242 	LT16(5), LT16(6), LT16(6), LT16(7), LT16(7), LT16(7), LT16(7),
243 	LT16(8), LT16(8), LT16(8), LT16(8), LT16(8), LT16(8), LT16(8), LT16(8)
244 };
245 
246 /* ----------
247  * Debug macros
248  * ----------
249  */
250 #ifdef HAVE_ALLOCINFO
251 #define AllocFreeInfo(_cxt, _chunk) \
252 			fprintf(stderr, "AllocFree: %s: %p, %zu\n", \
253 				(_cxt)->header.name, (_chunk), (_chunk)->size)
254 #define AllocAllocInfo(_cxt, _chunk) \
255 			fprintf(stderr, "AllocAlloc: %s: %p, %zu\n", \
256 				(_cxt)->header.name, (_chunk), (_chunk)->size)
257 #else
258 #define AllocFreeInfo(_cxt, _chunk)
259 #define AllocAllocInfo(_cxt, _chunk)
260 #endif
261 
262 /* ----------
263  * AllocSetFreeIndex -
264  *
265  *		Depending on the size of an allocation compute which freechunk
266  *		list of the alloc set it belongs to.  Caller must have verified
267  *		that size <= ALLOC_CHUNK_LIMIT.
268  * ----------
269  */
270 static inline int
AllocSetFreeIndex(Size size)271 AllocSetFreeIndex(Size size)
272 {
273 	int			idx;
274 	unsigned int t,
275 				tsize;
276 
277 	if (size > (1 << ALLOC_MINBITS))
278 	{
279 		tsize = (size - 1) >> ALLOC_MINBITS;
280 
281 		/*
282 		 * At this point we need to obtain log2(tsize)+1, ie, the number of
283 		 * not-all-zero bits at the right.  We used to do this with a
284 		 * shift-and-count loop, but this function is enough of a hotspot to
285 		 * justify micro-optimization effort.  The best approach seems to be
286 		 * to use a lookup table.  Note that this code assumes that
287 		 * ALLOCSET_NUM_FREELISTS <= 17, since we only cope with two bytes of
288 		 * the tsize value.
289 		 */
290 		t = tsize >> 8;
291 		idx = t ? LogTable256[t] + 8 : LogTable256[tsize];
292 
293 		Assert(idx < ALLOCSET_NUM_FREELISTS);
294 	}
295 	else
296 		idx = 0;
297 
298 	return idx;
299 }
300 
301 
302 /*
303  * Public routines
304  */
305 
306 
307 /*
308  * AllocSetContextCreate
309  *		Create a new AllocSet context.
310  *
311  * parent: parent context, or NULL if top-level context
312  * name: name of context (for debugging only, need not be unique)
313  * minContextSize: minimum context size
314  * initBlockSize: initial allocation block size
315  * maxBlockSize: maximum allocation block size
316  *
317  * Notes: the name string will be copied into context-lifespan storage.
318  * Most callers should abstract the context size parameters using a macro
319  * such as ALLOCSET_DEFAULT_SIZES.
320  */
321 MemoryContext
AllocSetContextCreate(MemoryContext parent,const char * name,Size minContextSize,Size initBlockSize,Size maxBlockSize)322 AllocSetContextCreate(MemoryContext parent,
323 					  const char *name,
324 					  Size minContextSize,
325 					  Size initBlockSize,
326 					  Size maxBlockSize)
327 {
328 	AllocSet	set;
329 
330 	StaticAssertStmt(offsetof(AllocChunkData, aset) + sizeof(MemoryContext) ==
331 					 MAXALIGN(sizeof(AllocChunkData)),
332 					 "padding calculation in AllocChunkData is wrong");
333 
334 	/*
335 	 * First, validate allocation parameters.  (If we're going to throw an
336 	 * error, we should do so before the context is created, not after.)  We
337 	 * somewhat arbitrarily enforce a minimum 1K block size.
338 	 */
339 	if (initBlockSize != MAXALIGN(initBlockSize) ||
340 		initBlockSize < 1024)
341 		elog(ERROR, "invalid initBlockSize for memory context: %zu",
342 			 initBlockSize);
343 	if (maxBlockSize != MAXALIGN(maxBlockSize) ||
344 		maxBlockSize < initBlockSize ||
345 		!AllocHugeSizeIsValid(maxBlockSize))	/* must be safe to double */
346 		elog(ERROR, "invalid maxBlockSize for memory context: %zu",
347 			 maxBlockSize);
348 	if (minContextSize != 0 &&
349 		(minContextSize != MAXALIGN(minContextSize) ||
350 		 minContextSize <= ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ))
351 		elog(ERROR, "invalid minContextSize for memory context: %zu",
352 			 minContextSize);
353 
354 	/* Do the type-independent part of context creation */
355 	set = (AllocSet) MemoryContextCreate(T_AllocSetContext,
356 										 sizeof(AllocSetContext),
357 										 &AllocSetMethods,
358 										 parent,
359 										 name);
360 
361 	/* Save allocation parameters */
362 	set->initBlockSize = initBlockSize;
363 	set->maxBlockSize = maxBlockSize;
364 	set->nextBlockSize = initBlockSize;
365 
366 	/*
367 	 * Compute the allocation chunk size limit for this context.  It can't be
368 	 * more than ALLOC_CHUNK_LIMIT because of the fixed number of freelists.
369 	 * If maxBlockSize is small then requests exceeding the maxBlockSize, or
370 	 * even a significant fraction of it, should be treated as large chunks
371 	 * too.  For the typical case of maxBlockSize a power of 2, the chunk size
372 	 * limit will be at most 1/8th maxBlockSize, so that given a stream of
373 	 * requests that are all the maximum chunk size we will waste at most
374 	 * 1/8th of the allocated space.
375 	 *
376 	 * We have to have allocChunkLimit a power of two, because the requested
377 	 * and actually-allocated sizes of any chunk must be on the same side of
378 	 * the limit, else we get confused about whether the chunk is "big".
379 	 *
380 	 * Also, allocChunkLimit must not exceed ALLOCSET_SEPARATE_THRESHOLD.
381 	 */
382 	StaticAssertStmt(ALLOC_CHUNK_LIMIT == ALLOCSET_SEPARATE_THRESHOLD,
383 					 "ALLOC_CHUNK_LIMIT != ALLOCSET_SEPARATE_THRESHOLD");
384 
385 	set->allocChunkLimit = ALLOC_CHUNK_LIMIT;
386 	while ((Size) (set->allocChunkLimit + ALLOC_CHUNKHDRSZ) >
387 		   (Size) ((maxBlockSize - ALLOC_BLOCKHDRSZ) / ALLOC_CHUNK_FRACTION))
388 		set->allocChunkLimit >>= 1;
389 
390 	/*
391 	 * Grab always-allocated space, if requested
392 	 */
393 	if (minContextSize > 0)
394 	{
395 		Size		blksize = minContextSize;
396 		AllocBlock	block;
397 
398 		block = (AllocBlock) malloc(blksize);
399 		if (block == NULL)
400 		{
401 			MemoryContextStats(TopMemoryContext);
402 			ereport(ERROR,
403 					(errcode(ERRCODE_OUT_OF_MEMORY),
404 					 errmsg("out of memory"),
405 					 errdetail("Failed while creating memory context \"%s\".",
406 							   name)));
407 		}
408 		block->aset = set;
409 		block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
410 		block->endptr = ((char *) block) + blksize;
411 		block->prev = NULL;
412 		block->next = set->blocks;
413 		if (block->next)
414 			block->next->prev = block;
415 		set->blocks = block;
416 		/* Mark block as not to be released at reset time */
417 		set->keeper = block;
418 
419 		/* Mark unallocated space NOACCESS; leave the block header alone. */
420 		VALGRIND_MAKE_MEM_NOACCESS(block->freeptr,
421 								   blksize - ALLOC_BLOCKHDRSZ);
422 	}
423 
424 	return (MemoryContext) set;
425 }
426 
427 /*
428  * AllocSetInit
429  *		Context-type-specific initialization routine.
430  *
431  * This is called by MemoryContextCreate() after setting up the
432  * generic MemoryContext fields and before linking the new context
433  * into the context tree.  We must do whatever is needed to make the
434  * new context minimally valid for deletion.  We must *not* risk
435  * failure --- thus, for example, allocating more memory is not cool.
436  * (AllocSetContextCreate can allocate memory when it gets control
437  * back, however.)
438  */
439 static void
AllocSetInit(MemoryContext context)440 AllocSetInit(MemoryContext context)
441 {
442 	/*
443 	 * Since MemoryContextCreate already zeroed the context node, we don't
444 	 * have to do anything here: it's already OK.
445 	 */
446 }
447 
448 /*
449  * AllocSetReset
450  *		Frees all memory which is allocated in the given set.
451  *
452  * Actually, this routine has some discretion about what to do.
453  * It should mark all allocated chunks freed, but it need not necessarily
454  * give back all the resources the set owns.  Our actual implementation is
455  * that we hang onto any "keeper" block specified for the set.  In this way,
456  * we don't thrash malloc() when a context is repeatedly reset after small
457  * allocations, which is typical behavior for per-tuple contexts.
458  */
459 static void
AllocSetReset(MemoryContext context)460 AllocSetReset(MemoryContext context)
461 {
462 	AllocSet	set = (AllocSet) context;
463 	AllocBlock	block;
464 
465 	AssertArg(AllocSetIsValid(set));
466 
467 #ifdef MEMORY_CONTEXT_CHECKING
468 	/* Check for corruption and leaks before freeing */
469 	AllocSetCheck(context);
470 #endif
471 
472 	/* Clear chunk freelists */
473 	MemSetAligned(set->freelist, 0, sizeof(set->freelist));
474 
475 	block = set->blocks;
476 
477 	/* New blocks list is either empty or just the keeper block */
478 	set->blocks = set->keeper;
479 
480 	while (block != NULL)
481 	{
482 		AllocBlock	next = block->next;
483 
484 		if (block == set->keeper)
485 		{
486 			/* Reset the block, but don't return it to malloc */
487 			char	   *datastart = ((char *) block) + ALLOC_BLOCKHDRSZ;
488 
489 #ifdef CLOBBER_FREED_MEMORY
490 			wipe_mem(datastart, block->freeptr - datastart);
491 #else
492 			/* wipe_mem() would have done this */
493 			VALGRIND_MAKE_MEM_NOACCESS(datastart, block->freeptr - datastart);
494 #endif
495 			block->freeptr = datastart;
496 			block->prev = NULL;
497 			block->next = NULL;
498 		}
499 		else
500 		{
501 			/* Normal case, release the block */
502 #ifdef CLOBBER_FREED_MEMORY
503 			wipe_mem(block, block->freeptr - ((char *) block));
504 #endif
505 			free(block);
506 		}
507 		block = next;
508 	}
509 
510 	/* Reset block size allocation sequence, too */
511 	set->nextBlockSize = set->initBlockSize;
512 }
513 
514 /*
515  * AllocSetDelete
516  *		Frees all memory which is allocated in the given set,
517  *		in preparation for deletion of the set.
518  *
519  * Unlike AllocSetReset, this *must* free all resources of the set.
520  * But note we are not responsible for deleting the context node itself.
521  */
522 static void
AllocSetDelete(MemoryContext context)523 AllocSetDelete(MemoryContext context)
524 {
525 	AllocSet	set = (AllocSet) context;
526 	AllocBlock	block = set->blocks;
527 
528 	AssertArg(AllocSetIsValid(set));
529 
530 #ifdef MEMORY_CONTEXT_CHECKING
531 	/* Check for corruption and leaks before freeing */
532 	AllocSetCheck(context);
533 #endif
534 
535 	/* Make it look empty, just in case... */
536 	MemSetAligned(set->freelist, 0, sizeof(set->freelist));
537 	set->blocks = NULL;
538 	set->keeper = NULL;
539 
540 	while (block != NULL)
541 	{
542 		AllocBlock	next = block->next;
543 
544 #ifdef CLOBBER_FREED_MEMORY
545 		wipe_mem(block, block->freeptr - ((char *) block));
546 #endif
547 		free(block);
548 		block = next;
549 	}
550 }
551 
552 /*
553  * AllocSetAlloc
554  *		Returns pointer to allocated memory of given size or NULL if
555  *		request could not be completed; memory is added to the set.
556  *
557  * No request may exceed:
558  *		MAXALIGN_DOWN(SIZE_MAX) - ALLOC_BLOCKHDRSZ - ALLOC_CHUNKHDRSZ
559  * All callers use a much-lower limit.
560  */
561 static void *
AllocSetAlloc(MemoryContext context,Size size)562 AllocSetAlloc(MemoryContext context, Size size)
563 {
564 	AllocSet	set = (AllocSet) context;
565 	AllocBlock	block;
566 	AllocChunk	chunk;
567 	int			fidx;
568 	Size		chunk_size;
569 	Size		blksize;
570 
571 	AssertArg(AllocSetIsValid(set));
572 
573 	/*
574 	 * If requested size exceeds maximum for chunks, allocate an entire block
575 	 * for this request.
576 	 */
577 	if (size > set->allocChunkLimit)
578 	{
579 		chunk_size = MAXALIGN(size);
580 		blksize = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
581 		block = (AllocBlock) malloc(blksize);
582 		if (block == NULL)
583 			return NULL;
584 		block->aset = set;
585 		block->freeptr = block->endptr = ((char *) block) + blksize;
586 
587 		chunk = (AllocChunk) (((char *) block) + ALLOC_BLOCKHDRSZ);
588 		chunk->aset = set;
589 		chunk->size = chunk_size;
590 #ifdef MEMORY_CONTEXT_CHECKING
591 		/* Valgrind: Will be made NOACCESS below. */
592 		chunk->requested_size = size;
593 		/* set mark to catch clobber of "unused" space */
594 		if (size < chunk_size)
595 			set_sentinel(AllocChunkGetPointer(chunk), size);
596 #endif
597 #ifdef RANDOMIZE_ALLOCATED_MEMORY
598 		/* fill the allocated space with junk */
599 		randomize_mem((char *) AllocChunkGetPointer(chunk), size);
600 #endif
601 
602 		/*
603 		 * Stick the new block underneath the active allocation block, if any,
604 		 * so that we don't lose the use of the space remaining therein.
605 		 */
606 		if (set->blocks != NULL)
607 		{
608 			block->prev = set->blocks;
609 			block->next = set->blocks->next;
610 			if (block->next)
611 				block->next->prev = block;
612 			set->blocks->next = block;
613 		}
614 		else
615 		{
616 			block->prev = NULL;
617 			block->next = NULL;
618 			set->blocks = block;
619 		}
620 
621 		AllocAllocInfo(set, chunk);
622 
623 		/*
624 		 * Chunk's metadata fields remain DEFINED.  The requested allocation
625 		 * itself can be NOACCESS or UNDEFINED; our caller will soon make it
626 		 * UNDEFINED.  Make extra space at the end of the chunk, if any,
627 		 * NOACCESS.
628 		 */
629 		VALGRIND_MAKE_MEM_NOACCESS((char *) chunk + ALLOC_CHUNKHDRSZ,
630 								   chunk_size - ALLOC_CHUNKHDRSZ);
631 
632 		return AllocChunkGetPointer(chunk);
633 	}
634 
635 	/*
636 	 * Request is small enough to be treated as a chunk.  Look in the
637 	 * corresponding free list to see if there is a free chunk we could reuse.
638 	 * If one is found, remove it from the free list, make it again a member
639 	 * of the alloc set and return its data address.
640 	 */
641 	fidx = AllocSetFreeIndex(size);
642 	chunk = set->freelist[fidx];
643 	if (chunk != NULL)
644 	{
645 		Assert(chunk->size >= size);
646 
647 		set->freelist[fidx] = (AllocChunk) chunk->aset;
648 
649 		chunk->aset = (void *) set;
650 
651 #ifdef MEMORY_CONTEXT_CHECKING
652 		/* Valgrind: Free list requested_size should be DEFINED. */
653 		chunk->requested_size = size;
654 		VALGRIND_MAKE_MEM_NOACCESS(&chunk->requested_size,
655 								   sizeof(chunk->requested_size));
656 		/* set mark to catch clobber of "unused" space */
657 		if (size < chunk->size)
658 			set_sentinel(AllocChunkGetPointer(chunk), size);
659 #endif
660 #ifdef RANDOMIZE_ALLOCATED_MEMORY
661 		/* fill the allocated space with junk */
662 		randomize_mem((char *) AllocChunkGetPointer(chunk), size);
663 #endif
664 
665 		AllocAllocInfo(set, chunk);
666 		return AllocChunkGetPointer(chunk);
667 	}
668 
669 	/*
670 	 * Choose the actual chunk size to allocate.
671 	 */
672 	chunk_size = (1 << ALLOC_MINBITS) << fidx;
673 	Assert(chunk_size >= size);
674 
675 	/*
676 	 * If there is enough room in the active allocation block, we will put the
677 	 * chunk into that block.  Else must start a new one.
678 	 */
679 	if ((block = set->blocks) != NULL)
680 	{
681 		Size		availspace = block->endptr - block->freeptr;
682 
683 		if (availspace < (chunk_size + ALLOC_CHUNKHDRSZ))
684 		{
685 			/*
686 			 * The existing active (top) block does not have enough room for
687 			 * the requested allocation, but it might still have a useful
688 			 * amount of space in it.  Once we push it down in the block list,
689 			 * we'll never try to allocate more space from it. So, before we
690 			 * do that, carve up its free space into chunks that we can put on
691 			 * the set's freelists.
692 			 *
693 			 * Because we can only get here when there's less than
694 			 * ALLOC_CHUNK_LIMIT left in the block, this loop cannot iterate
695 			 * more than ALLOCSET_NUM_FREELISTS-1 times.
696 			 */
697 			while (availspace >= ((1 << ALLOC_MINBITS) + ALLOC_CHUNKHDRSZ))
698 			{
699 				Size		availchunk = availspace - ALLOC_CHUNKHDRSZ;
700 				int			a_fidx = AllocSetFreeIndex(availchunk);
701 
702 				/*
703 				 * In most cases, we'll get back the index of the next larger
704 				 * freelist than the one we need to put this chunk on.  The
705 				 * exception is when availchunk is exactly a power of 2.
706 				 */
707 				if (availchunk != ((Size) 1 << (a_fidx + ALLOC_MINBITS)))
708 				{
709 					a_fidx--;
710 					Assert(a_fidx >= 0);
711 					availchunk = ((Size) 1 << (a_fidx + ALLOC_MINBITS));
712 				}
713 
714 				chunk = (AllocChunk) (block->freeptr);
715 
716 				/* Prepare to initialize the chunk header. */
717 				VALGRIND_MAKE_MEM_UNDEFINED(chunk, ALLOC_CHUNKHDRSZ);
718 
719 				block->freeptr += (availchunk + ALLOC_CHUNKHDRSZ);
720 				availspace -= (availchunk + ALLOC_CHUNKHDRSZ);
721 
722 				chunk->size = availchunk;
723 #ifdef MEMORY_CONTEXT_CHECKING
724 				chunk->requested_size = 0;	/* mark it free */
725 #endif
726 				chunk->aset = (void *) set->freelist[a_fidx];
727 				set->freelist[a_fidx] = chunk;
728 			}
729 
730 			/* Mark that we need to create a new block */
731 			block = NULL;
732 		}
733 	}
734 
735 	/*
736 	 * Time to create a new regular (multi-chunk) block?
737 	 */
738 	if (block == NULL)
739 	{
740 		Size		required_size;
741 
742 		/*
743 		 * The first such block has size initBlockSize, and we double the
744 		 * space in each succeeding block, but not more than maxBlockSize.
745 		 */
746 		blksize = set->nextBlockSize;
747 		set->nextBlockSize <<= 1;
748 		if (set->nextBlockSize > set->maxBlockSize)
749 			set->nextBlockSize = set->maxBlockSize;
750 
751 		/*
752 		 * If initBlockSize is less than ALLOC_CHUNK_LIMIT, we could need more
753 		 * space... but try to keep it a power of 2.
754 		 */
755 		required_size = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
756 		while (blksize < required_size)
757 			blksize <<= 1;
758 
759 		/* Try to allocate it */
760 		block = (AllocBlock) malloc(blksize);
761 
762 		/*
763 		 * We could be asking for pretty big blocks here, so cope if malloc
764 		 * fails.  But give up if there's less than a meg or so available...
765 		 */
766 		while (block == NULL && blksize > 1024 * 1024)
767 		{
768 			blksize >>= 1;
769 			if (blksize < required_size)
770 				break;
771 			block = (AllocBlock) malloc(blksize);
772 		}
773 
774 		if (block == NULL)
775 			return NULL;
776 
777 		block->aset = set;
778 		block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
779 		block->endptr = ((char *) block) + blksize;
780 
781 		/*
782 		 * If this is the first block of the set, make it the "keeper" block.
783 		 * Formerly, a keeper block could only be created during context
784 		 * creation, but allowing it to happen here lets us have fast reset
785 		 * cycling even for contexts created with minContextSize = 0; that way
786 		 * we don't have to force space to be allocated in contexts that might
787 		 * never need any space.  Don't mark an oversize block as a keeper,
788 		 * however.
789 		 */
790 		if (set->keeper == NULL && blksize == set->initBlockSize)
791 			set->keeper = block;
792 
793 		/* Mark unallocated space NOACCESS. */
794 		VALGRIND_MAKE_MEM_NOACCESS(block->freeptr,
795 								   blksize - ALLOC_BLOCKHDRSZ);
796 
797 		block->prev = NULL;
798 		block->next = set->blocks;
799 		if (block->next)
800 			block->next->prev = block;
801 		set->blocks = block;
802 	}
803 
804 	/*
805 	 * OK, do the allocation
806 	 */
807 	chunk = (AllocChunk) (block->freeptr);
808 
809 	/* Prepare to initialize the chunk header. */
810 	VALGRIND_MAKE_MEM_UNDEFINED(chunk, ALLOC_CHUNKHDRSZ);
811 
812 	block->freeptr += (chunk_size + ALLOC_CHUNKHDRSZ);
813 	Assert(block->freeptr <= block->endptr);
814 
815 	chunk->aset = (void *) set;
816 	chunk->size = chunk_size;
817 #ifdef MEMORY_CONTEXT_CHECKING
818 	chunk->requested_size = size;
819 	VALGRIND_MAKE_MEM_NOACCESS(&chunk->requested_size,
820 							   sizeof(chunk->requested_size));
821 	/* set mark to catch clobber of "unused" space */
822 	if (size < chunk->size)
823 		set_sentinel(AllocChunkGetPointer(chunk), size);
824 #endif
825 #ifdef RANDOMIZE_ALLOCATED_MEMORY
826 	/* fill the allocated space with junk */
827 	randomize_mem((char *) AllocChunkGetPointer(chunk), size);
828 #endif
829 
830 	AllocAllocInfo(set, chunk);
831 	return AllocChunkGetPointer(chunk);
832 }
833 
834 /*
835  * AllocSetFree
836  *		Frees allocated memory; memory is removed from the set.
837  */
838 static void
AllocSetFree(MemoryContext context,void * pointer)839 AllocSetFree(MemoryContext context, void *pointer)
840 {
841 	AllocSet	set = (AllocSet) context;
842 	AllocChunk	chunk = AllocPointerGetChunk(pointer);
843 
844 	AllocFreeInfo(set, chunk);
845 
846 #ifdef MEMORY_CONTEXT_CHECKING
847 	VALGRIND_MAKE_MEM_DEFINED(&chunk->requested_size,
848 							  sizeof(chunk->requested_size));
849 	/* Test for someone scribbling on unused space in chunk */
850 	if (chunk->requested_size < chunk->size)
851 		if (!sentinel_ok(pointer, chunk->requested_size))
852 			elog(WARNING, "detected write past chunk end in %s %p",
853 				 set->header.name, chunk);
854 #endif
855 
856 	if (chunk->size > set->allocChunkLimit)
857 	{
858 		/*
859 		 * Big chunks are certain to have been allocated as single-chunk
860 		 * blocks.  Just unlink that block and return it to malloc().
861 		 */
862 		AllocBlock	block = (AllocBlock) (((char *) chunk) - ALLOC_BLOCKHDRSZ);
863 
864 		/*
865 		 * Try to verify that we have a sane block pointer: it should
866 		 * reference the correct aset, and freeptr and endptr should point
867 		 * just past the chunk.
868 		 */
869 		if (block->aset != set ||
870 			block->freeptr != block->endptr ||
871 			block->freeptr != ((char *) block) +
872 			(chunk->size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ))
873 			elog(ERROR, "could not find block containing chunk %p", chunk);
874 
875 		/* OK, remove block from aset's list and free it */
876 		if (block->prev)
877 			block->prev->next = block->next;
878 		else
879 			set->blocks = block->next;
880 		if (block->next)
881 			block->next->prev = block->prev;
882 #ifdef CLOBBER_FREED_MEMORY
883 		wipe_mem(block, block->freeptr - ((char *) block));
884 #endif
885 		free(block);
886 	}
887 	else
888 	{
889 		/* Normal case, put the chunk into appropriate freelist */
890 		int			fidx = AllocSetFreeIndex(chunk->size);
891 
892 		chunk->aset = (void *) set->freelist[fidx];
893 
894 #ifdef CLOBBER_FREED_MEMORY
895 		wipe_mem(pointer, chunk->size);
896 #endif
897 
898 #ifdef MEMORY_CONTEXT_CHECKING
899 		/* Reset requested_size to 0 in chunks that are on freelist */
900 		chunk->requested_size = 0;
901 #endif
902 		set->freelist[fidx] = chunk;
903 	}
904 }
905 
906 /*
907  * AllocSetRealloc
908  *		Returns new pointer to allocated memory of given size or NULL if
909  *		request could not be completed; this memory is added to the set.
910  *		Memory associated with given pointer is copied into the new memory,
911  *		and the old memory is freed.
912  *
913  * Without MEMORY_CONTEXT_CHECKING, we don't know the old request size.  This
914  * makes our Valgrind client requests less-precise, hazarding false negatives.
915  * (In principle, we could use VALGRIND_GET_VBITS() to rediscover the old
916  * request size.)
917  */
918 static void *
AllocSetRealloc(MemoryContext context,void * pointer,Size size)919 AllocSetRealloc(MemoryContext context, void *pointer, Size size)
920 {
921 	AllocSet	set = (AllocSet) context;
922 	AllocChunk	chunk = AllocPointerGetChunk(pointer);
923 	Size		oldsize = chunk->size;
924 
925 #ifdef MEMORY_CONTEXT_CHECKING
926 	VALGRIND_MAKE_MEM_DEFINED(&chunk->requested_size,
927 							  sizeof(chunk->requested_size));
928 	/* Test for someone scribbling on unused space in chunk */
929 	if (chunk->requested_size < oldsize)
930 		if (!sentinel_ok(pointer, chunk->requested_size))
931 			elog(WARNING, "detected write past chunk end in %s %p",
932 				 set->header.name, chunk);
933 #endif
934 
935 	if (oldsize > set->allocChunkLimit)
936 	{
937 		/*
938 		 * The chunk must have been allocated as a single-chunk block.  Use
939 		 * realloc() to make the containing block bigger, or smaller, with
940 		 * minimum space wastage.
941 		 */
942 		AllocBlock	block = (AllocBlock) (((char *) chunk) - ALLOC_BLOCKHDRSZ);
943 		Size		chksize;
944 		Size		blksize;
945 
946 		/*
947 		 * Try to verify that we have a sane block pointer: it should
948 		 * reference the correct aset, and freeptr and endptr should point
949 		 * just past the chunk.
950 		 */
951 		if (block->aset != set ||
952 			block->freeptr != block->endptr ||
953 			block->freeptr != ((char *) block) +
954 			(oldsize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ))
955 			elog(ERROR, "could not find block containing chunk %p", chunk);
956 
957 		/*
958 		 * Even if the new request is less than set->allocChunkLimit, we stick
959 		 * with the single-chunk block approach.  Therefore we need
960 		 * chunk->size to be bigger than set->allocChunkLimit, so we don't get
961 		 * confused about the chunk's status in future calls.
962 		 */
963 		chksize = Max(size, set->allocChunkLimit + 1);
964 		chksize = MAXALIGN(chksize);
965 
966 		/* Do the realloc */
967 		blksize = chksize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
968 		block = (AllocBlock) realloc(block, blksize);
969 		if (block == NULL)
970 			return NULL;
971 		block->freeptr = block->endptr = ((char *) block) + blksize;
972 
973 		/* Update pointers since block has likely been moved */
974 		chunk = (AllocChunk) (((char *) block) + ALLOC_BLOCKHDRSZ);
975 		pointer = AllocChunkGetPointer(chunk);
976 		if (block->prev)
977 			block->prev->next = block;
978 		else
979 			set->blocks = block;
980 		if (block->next)
981 			block->next->prev = block;
982 		chunk->size = chksize;
983 
984 #ifdef MEMORY_CONTEXT_CHECKING
985 #ifdef RANDOMIZE_ALLOCATED_MEMORY
986 		/* We can only fill the extra space if we know the prior request */
987 		if (size > chunk->requested_size)
988 			randomize_mem((char *) pointer + chunk->requested_size,
989 						  size - chunk->requested_size);
990 #endif
991 
992 		/*
993 		 * realloc() (or randomize_mem()) will have left any newly-allocated
994 		 * part UNDEFINED, but we may need to adjust trailing bytes from the
995 		 * old allocation.
996 		 */
997 #ifdef USE_VALGRIND
998 		if (oldsize > chunk->requested_size)
999 			VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + chunk->requested_size,
1000 										oldsize - chunk->requested_size);
1001 #endif
1002 
1003 		chunk->requested_size = size;
1004 		VALGRIND_MAKE_MEM_NOACCESS(&chunk->requested_size,
1005 								   sizeof(chunk->requested_size));
1006 
1007 		/* set mark to catch clobber of "unused" space */
1008 		if (size < chunk->size)
1009 			set_sentinel(pointer, size);
1010 #else							/* !MEMORY_CONTEXT_CHECKING */
1011 
1012 		/*
1013 		 * We don't know how much of the old chunk size was the actual
1014 		 * allocation; it could have been as small as one byte.  We have to be
1015 		 * conservative and just mark the entire old portion DEFINED.
1016 		 */
1017 		VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
1018 #endif
1019 
1020 		/* Make any trailing alignment padding NOACCESS. */
1021 		VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size, chksize - size);
1022 
1023 		return pointer;
1024 	}
1025 
1026 	/*
1027 	 * Chunk sizes are aligned to power of 2 in AllocSetAlloc().  Maybe the
1028 	 * allocated area already is >= the new size.  (In particular, we will
1029 	 * fall out here if the requested size is a decrease.)
1030 	 */
1031 	else if (oldsize >= size)
1032 	{
1033 #ifdef MEMORY_CONTEXT_CHECKING
1034 		Size		oldrequest = chunk->requested_size;
1035 
1036 #ifdef RANDOMIZE_ALLOCATED_MEMORY
1037 		/* We can only fill the extra space if we know the prior request */
1038 		if (size > oldrequest)
1039 			randomize_mem((char *) pointer + oldrequest,
1040 						  size - oldrequest);
1041 #endif
1042 
1043 		chunk->requested_size = size;
1044 		VALGRIND_MAKE_MEM_NOACCESS(&chunk->requested_size,
1045 								   sizeof(chunk->requested_size));
1046 
1047 		/*
1048 		 * If this is an increase, mark any newly-available part UNDEFINED.
1049 		 * Otherwise, mark the obsolete part NOACCESS.
1050 		 */
1051 		if (size > oldrequest)
1052 			VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
1053 										size - oldrequest);
1054 		else
1055 			VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
1056 									   oldsize - size);
1057 
1058 		/* set mark to catch clobber of "unused" space */
1059 		if (size < oldsize)
1060 			set_sentinel(pointer, size);
1061 #else							/* !MEMORY_CONTEXT_CHECKING */
1062 
1063 		/*
1064 		 * We don't have the information to determine whether we're growing
1065 		 * the old request or shrinking it, so we conservatively mark the
1066 		 * entire new allocation DEFINED.
1067 		 */
1068 		VALGRIND_MAKE_MEM_NOACCESS(pointer, oldsize);
1069 		VALGRIND_MAKE_MEM_DEFINED(pointer, size);
1070 #endif
1071 
1072 		return pointer;
1073 	}
1074 	else
1075 	{
1076 		/*
1077 		 * Enlarge-a-small-chunk case.  We just do this by brute force, ie,
1078 		 * allocate a new chunk and copy the data.  Since we know the existing
1079 		 * data isn't huge, this won't involve any great memcpy expense, so
1080 		 * it's not worth being smarter.  (At one time we tried to avoid
1081 		 * memcpy when it was possible to enlarge the chunk in-place, but that
1082 		 * turns out to misbehave unpleasantly for repeated cycles of
1083 		 * palloc/repalloc/pfree: the eventually freed chunks go into the
1084 		 * wrong freelist for the next initial palloc request, and so we leak
1085 		 * memory indefinitely.  See pgsql-hackers archives for 2007-08-11.)
1086 		 */
1087 		AllocPointer newPointer;
1088 
1089 		/* allocate new chunk */
1090 		newPointer = AllocSetAlloc((MemoryContext) set, size);
1091 
1092 		/* leave immediately if request was not completed */
1093 		if (newPointer == NULL)
1094 			return NULL;
1095 
1096 		/*
1097 		 * AllocSetAlloc() just made the region NOACCESS.  Change it to
1098 		 * UNDEFINED for the moment; memcpy() will then transfer definedness
1099 		 * from the old allocation to the new.  If we know the old allocation,
1100 		 * copy just that much.  Otherwise, make the entire old chunk defined
1101 		 * to avoid errors as we copy the currently-NOACCESS trailing bytes.
1102 		 */
1103 		VALGRIND_MAKE_MEM_UNDEFINED(newPointer, size);
1104 #ifdef MEMORY_CONTEXT_CHECKING
1105 		oldsize = chunk->requested_size;
1106 #else
1107 		VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
1108 #endif
1109 
1110 		/* transfer existing data (certain to fit) */
1111 		memcpy(newPointer, pointer, oldsize);
1112 
1113 		/* free old chunk */
1114 		AllocSetFree((MemoryContext) set, pointer);
1115 
1116 		return newPointer;
1117 	}
1118 }
1119 
1120 /*
1121  * AllocSetGetChunkSpace
1122  *		Given a currently-allocated chunk, determine the total space
1123  *		it occupies (including all memory-allocation overhead).
1124  */
1125 static Size
AllocSetGetChunkSpace(MemoryContext context,void * pointer)1126 AllocSetGetChunkSpace(MemoryContext context, void *pointer)
1127 {
1128 	AllocChunk	chunk = AllocPointerGetChunk(pointer);
1129 
1130 	return chunk->size + ALLOC_CHUNKHDRSZ;
1131 }
1132 
1133 /*
1134  * AllocSetIsEmpty
1135  *		Is an allocset empty of any allocated space?
1136  */
1137 static bool
AllocSetIsEmpty(MemoryContext context)1138 AllocSetIsEmpty(MemoryContext context)
1139 {
1140 	/*
1141 	 * For now, we say "empty" only if the context is new or just reset. We
1142 	 * could examine the freelists to determine if all space has been freed,
1143 	 * but it's not really worth the trouble for present uses of this
1144 	 * functionality.
1145 	 */
1146 	if (context->isReset)
1147 		return true;
1148 	return false;
1149 }
1150 
1151 /*
1152  * AllocSetStats
1153  *		Compute stats about memory consumption of an allocset.
1154  *
1155  * level: recursion level (0 at top level); used for print indentation.
1156  * print: true to print stats to stderr.
1157  * totals: if not NULL, add stats about this allocset into *totals.
1158  */
1159 static void
AllocSetStats(MemoryContext context,int level,bool print,MemoryContextCounters * totals)1160 AllocSetStats(MemoryContext context, int level, bool print,
1161 			  MemoryContextCounters *totals)
1162 {
1163 	AllocSet	set = (AllocSet) context;
1164 	Size		nblocks = 0;
1165 	Size		freechunks = 0;
1166 	Size		totalspace = 0;
1167 	Size		freespace = 0;
1168 	AllocBlock	block;
1169 	int			fidx;
1170 
1171 	for (block = set->blocks; block != NULL; block = block->next)
1172 	{
1173 		nblocks++;
1174 		totalspace += block->endptr - ((char *) block);
1175 		freespace += block->endptr - block->freeptr;
1176 	}
1177 	for (fidx = 0; fidx < ALLOCSET_NUM_FREELISTS; fidx++)
1178 	{
1179 		AllocChunk	chunk;
1180 
1181 		for (chunk = set->freelist[fidx]; chunk != NULL;
1182 			 chunk = (AllocChunk) chunk->aset)
1183 		{
1184 			freechunks++;
1185 			freespace += chunk->size + ALLOC_CHUNKHDRSZ;
1186 		}
1187 	}
1188 
1189 	if (print)
1190 	{
1191 		int			i;
1192 
1193 		for (i = 0; i < level; i++)
1194 			fprintf(stderr, "  ");
1195 		fprintf(stderr,
1196 				"%s: %zu total in %zd blocks; %zu free (%zd chunks); %zu used\n",
1197 				set->header.name, totalspace, nblocks, freespace, freechunks,
1198 				totalspace - freespace);
1199 	}
1200 
1201 	if (totals)
1202 	{
1203 		totals->nblocks += nblocks;
1204 		totals->freechunks += freechunks;
1205 		totals->totalspace += totalspace;
1206 		totals->freespace += freespace;
1207 	}
1208 }
1209 
1210 
1211 #ifdef MEMORY_CONTEXT_CHECKING
1212 
1213 /*
1214  * AllocSetCheck
1215  *		Walk through chunks and check consistency of memory.
1216  *
1217  * NOTE: report errors as WARNING, *not* ERROR or FATAL.  Otherwise you'll
1218  * find yourself in an infinite loop when trouble occurs, because this
1219  * routine will be entered again when elog cleanup tries to release memory!
1220  */
1221 static void
AllocSetCheck(MemoryContext context)1222 AllocSetCheck(MemoryContext context)
1223 {
1224 	AllocSet	set = (AllocSet) context;
1225 	char	   *name = set->header.name;
1226 	AllocBlock	prevblock;
1227 	AllocBlock	block;
1228 
1229 	for (prevblock = NULL, block = set->blocks;
1230 		 block != NULL;
1231 		 prevblock = block, block = block->next)
1232 	{
1233 		char	   *bpoz = ((char *) block) + ALLOC_BLOCKHDRSZ;
1234 		long		blk_used = block->freeptr - bpoz;
1235 		long		blk_data = 0;
1236 		long		nchunks = 0;
1237 
1238 		/*
1239 		 * Empty block - empty can be keeper-block only
1240 		 */
1241 		if (!blk_used)
1242 		{
1243 			if (set->keeper != block)
1244 				elog(WARNING, "problem in alloc set %s: empty block %p",
1245 					 name, block);
1246 		}
1247 
1248 		/*
1249 		 * Check block header fields
1250 		 */
1251 		if (block->aset != set ||
1252 			block->prev != prevblock ||
1253 			block->freeptr < bpoz ||
1254 			block->freeptr > block->endptr)
1255 			elog(WARNING, "problem in alloc set %s: corrupt header in block %p",
1256 				 name, block);
1257 
1258 		/*
1259 		 * Chunk walker
1260 		 */
1261 		while (bpoz < block->freeptr)
1262 		{
1263 			AllocChunk	chunk = (AllocChunk) bpoz;
1264 			Size		chsize,
1265 						dsize;
1266 
1267 			chsize = chunk->size;	/* aligned chunk size */
1268 			VALGRIND_MAKE_MEM_DEFINED(&chunk->requested_size,
1269 									  sizeof(chunk->requested_size));
1270 			dsize = chunk->requested_size;	/* real data */
1271 			if (dsize > 0)		/* not on a free list */
1272 				VALGRIND_MAKE_MEM_NOACCESS(&chunk->requested_size,
1273 										   sizeof(chunk->requested_size));
1274 
1275 			/*
1276 			 * Check chunk size
1277 			 */
1278 			if (dsize > chsize)
1279 				elog(WARNING, "problem in alloc set %s: req size > alloc size for chunk %p in block %p",
1280 					 name, chunk, block);
1281 			if (chsize < (1 << ALLOC_MINBITS))
1282 				elog(WARNING, "problem in alloc set %s: bad size %zu for chunk %p in block %p",
1283 					 name, chsize, chunk, block);
1284 
1285 			/* single-chunk block? */
1286 			if (chsize > set->allocChunkLimit &&
1287 				chsize + ALLOC_CHUNKHDRSZ != blk_used)
1288 				elog(WARNING, "problem in alloc set %s: bad single-chunk %p in block %p",
1289 					 name, chunk, block);
1290 
1291 			/*
1292 			 * If chunk is allocated, check for correct aset pointer. (If it's
1293 			 * free, the aset is the freelist pointer, which we can't check as
1294 			 * easily...)
1295 			 */
1296 			if (dsize > 0 && chunk->aset != (void *) set)
1297 				elog(WARNING, "problem in alloc set %s: bogus aset link in block %p, chunk %p",
1298 					 name, block, chunk);
1299 
1300 			/*
1301 			 * Check for overwrite of "unallocated" space in chunk
1302 			 */
1303 			if (dsize > 0 && dsize < chsize &&
1304 				!sentinel_ok(chunk, ALLOC_CHUNKHDRSZ + dsize))
1305 				elog(WARNING, "problem in alloc set %s: detected write past chunk end in block %p, chunk %p",
1306 					 name, block, chunk);
1307 
1308 			blk_data += chsize;
1309 			nchunks++;
1310 
1311 			bpoz += ALLOC_CHUNKHDRSZ + chsize;
1312 		}
1313 
1314 		if ((blk_data + (nchunks * ALLOC_CHUNKHDRSZ)) != blk_used)
1315 			elog(WARNING, "problem in alloc set %s: found inconsistent memory block %p",
1316 				 name, block);
1317 	}
1318 }
1319 
1320 #endif							/* MEMORY_CONTEXT_CHECKING */
1321