1 /*-------------------------------------------------------------------------
2 *
3 * generation.c
4 * Generational allocator definitions.
5 *
6 * Generation is a custom MemoryContext implementation designed for cases of
7 * chunks with similar lifespan.
8 *
9 * Portions Copyright (c) 2017-2021, PostgreSQL Global Development Group
10 *
11 * IDENTIFICATION
12 * src/backend/utils/mmgr/generation.c
13 *
14 *
15 * This memory context is based on the assumption that the chunks are freed
16 * roughly in the same order as they were allocated (FIFO), or in groups with
17 * similar lifespan (generations - hence the name of the context). This is
18 * typical for various queue-like use cases, i.e. when tuples are constructed,
19 * processed and then thrown away.
20 *
21 * The memory context uses a very simple approach to free space management.
22 * Instead of a complex global freelist, each block tracks a number
23 * of allocated and freed chunks. Freed chunks are not reused, and once all
24 * chunks in a block are freed, the whole block is thrown away. When the
25 * chunks allocated in the same block have similar lifespan, this works
26 * very well and is very cheap.
27 *
28 * The current implementation only uses a fixed block size - maybe it should
29 * adapt a min/max block size range, and grow the blocks automatically.
30 * It already uses dedicated blocks for oversized chunks.
31 *
32 * XXX It might be possible to improve this by keeping a small freelist for
33 * only a small number of recent blocks, but it's not clear it's worth the
34 * additional complexity.
35 *
36 *-------------------------------------------------------------------------
37 */
38
39 #include "postgres.h"
40
41 #include "lib/ilist.h"
42 #include "utils/memdebug.h"
43 #include "utils/memutils.h"
44
45
46 #define Generation_BLOCKHDRSZ MAXALIGN(sizeof(GenerationBlock))
47 #define Generation_CHUNKHDRSZ sizeof(GenerationChunk)
48
49 typedef struct GenerationBlock GenerationBlock; /* forward reference */
50 typedef struct GenerationChunk GenerationChunk;
51
52 typedef void *GenerationPointer;
53
54 /*
55 * GenerationContext is a simple memory context not reusing allocated chunks,
56 * and freeing blocks once all chunks are freed.
57 */
58 typedef struct GenerationContext
59 {
60 MemoryContextData header; /* Standard memory-context fields */
61
62 /* Generational context parameters */
63 Size blockSize; /* standard block size */
64
65 GenerationBlock *block; /* current (most recently allocated) block */
66 dlist_head blocks; /* list of blocks */
67 } GenerationContext;
68
69 /*
70 * GenerationBlock
71 * GenerationBlock is the unit of memory that is obtained by generation.c
72 * from malloc(). It contains one or more GenerationChunks, which are
73 * the units requested by palloc() and freed by pfree(). GenerationChunks
74 * cannot be returned to malloc() individually, instead pfree()
75 * updates the free counter of the block and when all chunks in a block
76 * are free the whole block is returned to malloc().
77 *
78 * GenerationBlock is the header data for a block --- the usable space
79 * within the block begins at the next alignment boundary.
80 */
81 struct GenerationBlock
82 {
83 dlist_node node; /* doubly-linked list of blocks */
84 Size blksize; /* allocated size of this block */
85 int nchunks; /* number of chunks in the block */
86 int nfree; /* number of free chunks */
87 char *freeptr; /* start of free space in this block */
88 char *endptr; /* end of space in this block */
89 };
90
91 /*
92 * GenerationChunk
93 * The prefix of each piece of memory in a GenerationBlock
94 *
95 * Note: to meet the memory context APIs, the payload area of the chunk must
96 * be maxaligned, and the "context" link must be immediately adjacent to the
97 * payload area (cf. GetMemoryChunkContext). We simplify matters for this
98 * module by requiring sizeof(GenerationChunk) to be maxaligned, and then
99 * we can ensure things work by adding any required alignment padding before
100 * the pointer fields. There is a static assertion below that the alignment
101 * is done correctly.
102 */
103 struct GenerationChunk
104 {
105 /* size is always the size of the usable space in the chunk */
106 Size size;
107 #ifdef MEMORY_CONTEXT_CHECKING
108 /* when debugging memory usage, also store actual requested size */
109 /* this is zero in a free chunk */
110 Size requested_size;
111
112 #define GENERATIONCHUNK_RAWSIZE (SIZEOF_SIZE_T * 2 + SIZEOF_VOID_P * 2)
113 #else
114 #define GENERATIONCHUNK_RAWSIZE (SIZEOF_SIZE_T + SIZEOF_VOID_P * 2)
115 #endif /* MEMORY_CONTEXT_CHECKING */
116
117 /* ensure proper alignment by adding padding if needed */
118 #if (GENERATIONCHUNK_RAWSIZE % MAXIMUM_ALIGNOF) != 0
119 char padding[MAXIMUM_ALIGNOF - GENERATIONCHUNK_RAWSIZE % MAXIMUM_ALIGNOF];
120 #endif
121
122 GenerationBlock *block; /* block owning this chunk */
123 GenerationContext *context; /* owning context, or NULL if freed chunk */
124 /* there must not be any padding to reach a MAXALIGN boundary here! */
125 };
126
127 /*
128 * Only the "context" field should be accessed outside this module.
129 * We keep the rest of an allocated chunk's header marked NOACCESS when using
130 * valgrind. But note that freed chunk headers are kept accessible, for
131 * simplicity.
132 */
133 #define GENERATIONCHUNK_PRIVATE_LEN offsetof(GenerationChunk, context)
134
135 /*
136 * GenerationIsValid
137 * True iff set is valid allocation set.
138 */
139 #define GenerationIsValid(set) PointerIsValid(set)
140
141 #define GenerationPointerGetChunk(ptr) \
142 ((GenerationChunk *)(((char *)(ptr)) - Generation_CHUNKHDRSZ))
143 #define GenerationChunkGetPointer(chk) \
144 ((GenerationPointer *)(((char *)(chk)) + Generation_CHUNKHDRSZ))
145
146 /*
147 * These functions implement the MemoryContext API for Generation contexts.
148 */
149 static void *GenerationAlloc(MemoryContext context, Size size);
150 static void GenerationFree(MemoryContext context, void *pointer);
151 static void *GenerationRealloc(MemoryContext context, void *pointer, Size size);
152 static void GenerationReset(MemoryContext context);
153 static void GenerationDelete(MemoryContext context);
154 static Size GenerationGetChunkSpace(MemoryContext context, void *pointer);
155 static bool GenerationIsEmpty(MemoryContext context);
156 static void GenerationStats(MemoryContext context,
157 MemoryStatsPrintFunc printfunc, void *passthru,
158 MemoryContextCounters *totals,
159 bool print_to_stderr);
160
161 #ifdef MEMORY_CONTEXT_CHECKING
162 static void GenerationCheck(MemoryContext context);
163 #endif
164
165 /*
166 * This is the virtual function table for Generation contexts.
167 */
168 static const MemoryContextMethods GenerationMethods = {
169 GenerationAlloc,
170 GenerationFree,
171 GenerationRealloc,
172 GenerationReset,
173 GenerationDelete,
174 GenerationGetChunkSpace,
175 GenerationIsEmpty,
176 GenerationStats
177 #ifdef MEMORY_CONTEXT_CHECKING
178 ,GenerationCheck
179 #endif
180 };
181
182
183 /*
184 * Public routines
185 */
186
187
188 /*
189 * GenerationContextCreate
190 * Create a new Generation context.
191 *
192 * parent: parent context, or NULL if top-level context
193 * name: name of context (must be statically allocated)
194 * blockSize: generation block size
195 */
196 MemoryContext
GenerationContextCreate(MemoryContext parent,const char * name,Size blockSize)197 GenerationContextCreate(MemoryContext parent,
198 const char *name,
199 Size blockSize)
200 {
201 GenerationContext *set;
202
203 /* Assert we padded GenerationChunk properly */
204 StaticAssertStmt(Generation_CHUNKHDRSZ == MAXALIGN(Generation_CHUNKHDRSZ),
205 "sizeof(GenerationChunk) is not maxaligned");
206 StaticAssertStmt(offsetof(GenerationChunk, context) + sizeof(MemoryContext) ==
207 Generation_CHUNKHDRSZ,
208 "padding calculation in GenerationChunk is wrong");
209
210 /*
211 * First, validate allocation parameters. (If we're going to throw an
212 * error, we should do so before the context is created, not after.) We
213 * somewhat arbitrarily enforce a minimum 1K block size, mostly because
214 * that's what AllocSet does.
215 */
216 if (blockSize != MAXALIGN(blockSize) ||
217 blockSize < 1024 ||
218 !AllocHugeSizeIsValid(blockSize))
219 elog(ERROR, "invalid blockSize for memory context: %zu",
220 blockSize);
221
222 /*
223 * Allocate the context header. Unlike aset.c, we never try to combine
224 * this with the first regular block, since that would prevent us from
225 * freeing the first generation of allocations.
226 */
227
228 set = (GenerationContext *) malloc(MAXALIGN(sizeof(GenerationContext)));
229 if (set == NULL)
230 {
231 MemoryContextStats(TopMemoryContext);
232 ereport(ERROR,
233 (errcode(ERRCODE_OUT_OF_MEMORY),
234 errmsg("out of memory"),
235 errdetail("Failed while creating memory context \"%s\".",
236 name)));
237 }
238
239 /*
240 * Avoid writing code that can fail between here and MemoryContextCreate;
241 * we'd leak the header if we ereport in this stretch.
242 */
243
244 /* Fill in GenerationContext-specific header fields */
245 set->blockSize = blockSize;
246 set->block = NULL;
247 dlist_init(&set->blocks);
248
249 /* Finally, do the type-independent part of context creation */
250 MemoryContextCreate((MemoryContext) set,
251 T_GenerationContext,
252 &GenerationMethods,
253 parent,
254 name);
255
256 return (MemoryContext) set;
257 }
258
259 /*
260 * GenerationReset
261 * Frees all memory which is allocated in the given set.
262 *
263 * The code simply frees all the blocks in the context - we don't keep any
264 * keeper blocks or anything like that.
265 */
266 static void
GenerationReset(MemoryContext context)267 GenerationReset(MemoryContext context)
268 {
269 GenerationContext *set = (GenerationContext *) context;
270 dlist_mutable_iter miter;
271
272 AssertArg(GenerationIsValid(set));
273
274 #ifdef MEMORY_CONTEXT_CHECKING
275 /* Check for corruption and leaks before freeing */
276 GenerationCheck(context);
277 #endif
278
279 dlist_foreach_modify(miter, &set->blocks)
280 {
281 GenerationBlock *block = dlist_container(GenerationBlock, node, miter.cur);
282
283 dlist_delete(miter.cur);
284
285 context->mem_allocated -= block->blksize;
286
287 #ifdef CLOBBER_FREED_MEMORY
288 wipe_mem(block, block->blksize);
289 #endif
290
291 free(block);
292 }
293
294 set->block = NULL;
295
296 Assert(dlist_is_empty(&set->blocks));
297 }
298
299 /*
300 * GenerationDelete
301 * Free all memory which is allocated in the given context.
302 */
303 static void
GenerationDelete(MemoryContext context)304 GenerationDelete(MemoryContext context)
305 {
306 /* Reset to release all the GenerationBlocks */
307 GenerationReset(context);
308 /* And free the context header */
309 free(context);
310 }
311
312 /*
313 * GenerationAlloc
314 * Returns pointer to allocated memory of given size or NULL if
315 * request could not be completed; memory is added to the set.
316 *
317 * No request may exceed:
318 * MAXALIGN_DOWN(SIZE_MAX) - Generation_BLOCKHDRSZ - Generation_CHUNKHDRSZ
319 * All callers use a much-lower limit.
320 *
321 * Note: when using valgrind, it doesn't matter how the returned allocation
322 * is marked, as mcxt.c will set it to UNDEFINED. In some paths we will
323 * return space that is marked NOACCESS - GenerationRealloc has to beware!
324 */
325 static void *
GenerationAlloc(MemoryContext context,Size size)326 GenerationAlloc(MemoryContext context, Size size)
327 {
328 GenerationContext *set = (GenerationContext *) context;
329 GenerationBlock *block;
330 GenerationChunk *chunk;
331 Size chunk_size = MAXALIGN(size);
332
333 /* is it an over-sized chunk? if yes, allocate special block */
334 if (chunk_size > set->blockSize / 8)
335 {
336 Size blksize = chunk_size + Generation_BLOCKHDRSZ + Generation_CHUNKHDRSZ;
337
338 block = (GenerationBlock *) malloc(blksize);
339 if (block == NULL)
340 return NULL;
341
342 context->mem_allocated += blksize;
343
344 /* block with a single (used) chunk */
345 block->blksize = blksize;
346 block->nchunks = 1;
347 block->nfree = 0;
348
349 /* the block is completely full */
350 block->freeptr = block->endptr = ((char *) block) + blksize;
351
352 chunk = (GenerationChunk *) (((char *) block) + Generation_BLOCKHDRSZ);
353 chunk->block = block;
354 chunk->context = set;
355 chunk->size = chunk_size;
356
357 #ifdef MEMORY_CONTEXT_CHECKING
358 chunk->requested_size = size;
359 /* set mark to catch clobber of "unused" space */
360 if (size < chunk_size)
361 set_sentinel(GenerationChunkGetPointer(chunk), size);
362 #endif
363 #ifdef RANDOMIZE_ALLOCATED_MEMORY
364 /* fill the allocated space with junk */
365 randomize_mem((char *) GenerationChunkGetPointer(chunk), size);
366 #endif
367
368 /* add the block to the list of allocated blocks */
369 dlist_push_head(&set->blocks, &block->node);
370
371 /* Ensure any padding bytes are marked NOACCESS. */
372 VALGRIND_MAKE_MEM_NOACCESS((char *) GenerationChunkGetPointer(chunk) + size,
373 chunk_size - size);
374
375 /* Disallow external access to private part of chunk header. */
376 VALGRIND_MAKE_MEM_NOACCESS(chunk, GENERATIONCHUNK_PRIVATE_LEN);
377
378 return GenerationChunkGetPointer(chunk);
379 }
380
381 /*
382 * Not an over-sized chunk. Is there enough space in the current block? If
383 * not, allocate a new "regular" block.
384 */
385 block = set->block;
386
387 if ((block == NULL) ||
388 (block->endptr - block->freeptr) < Generation_CHUNKHDRSZ + chunk_size)
389 {
390 Size blksize = set->blockSize;
391
392 block = (GenerationBlock *) malloc(blksize);
393
394 if (block == NULL)
395 return NULL;
396
397 context->mem_allocated += blksize;
398
399 block->blksize = blksize;
400 block->nchunks = 0;
401 block->nfree = 0;
402
403 block->freeptr = ((char *) block) + Generation_BLOCKHDRSZ;
404 block->endptr = ((char *) block) + blksize;
405
406 /* Mark unallocated space NOACCESS. */
407 VALGRIND_MAKE_MEM_NOACCESS(block->freeptr,
408 blksize - Generation_BLOCKHDRSZ);
409
410 /* add it to the doubly-linked list of blocks */
411 dlist_push_head(&set->blocks, &block->node);
412
413 /* and also use it as the current allocation block */
414 set->block = block;
415 }
416
417 /* we're supposed to have a block with enough free space now */
418 Assert(block != NULL);
419 Assert((block->endptr - block->freeptr) >= Generation_CHUNKHDRSZ + chunk_size);
420
421 chunk = (GenerationChunk *) block->freeptr;
422
423 /* Prepare to initialize the chunk header. */
424 VALGRIND_MAKE_MEM_UNDEFINED(chunk, Generation_CHUNKHDRSZ);
425
426 block->nchunks += 1;
427 block->freeptr += (Generation_CHUNKHDRSZ + chunk_size);
428
429 Assert(block->freeptr <= block->endptr);
430
431 chunk->block = block;
432 chunk->context = set;
433 chunk->size = chunk_size;
434
435 #ifdef MEMORY_CONTEXT_CHECKING
436 chunk->requested_size = size;
437 /* set mark to catch clobber of "unused" space */
438 if (size < chunk->size)
439 set_sentinel(GenerationChunkGetPointer(chunk), size);
440 #endif
441 #ifdef RANDOMIZE_ALLOCATED_MEMORY
442 /* fill the allocated space with junk */
443 randomize_mem((char *) GenerationChunkGetPointer(chunk), size);
444 #endif
445
446 /* Ensure any padding bytes are marked NOACCESS. */
447 VALGRIND_MAKE_MEM_NOACCESS((char *) GenerationChunkGetPointer(chunk) + size,
448 chunk_size - size);
449
450 /* Disallow external access to private part of chunk header. */
451 VALGRIND_MAKE_MEM_NOACCESS(chunk, GENERATIONCHUNK_PRIVATE_LEN);
452
453 return GenerationChunkGetPointer(chunk);
454 }
455
456 /*
457 * GenerationFree
458 * Update number of chunks in the block, and if all chunks in the block
459 * are now free then discard the block.
460 */
461 static void
GenerationFree(MemoryContext context,void * pointer)462 GenerationFree(MemoryContext context, void *pointer)
463 {
464 GenerationContext *set = (GenerationContext *) context;
465 GenerationChunk *chunk = GenerationPointerGetChunk(pointer);
466 GenerationBlock *block;
467
468 /* Allow access to private part of chunk header. */
469 VALGRIND_MAKE_MEM_DEFINED(chunk, GENERATIONCHUNK_PRIVATE_LEN);
470
471 block = chunk->block;
472
473 #ifdef MEMORY_CONTEXT_CHECKING
474 /* Test for someone scribbling on unused space in chunk */
475 if (chunk->requested_size < chunk->size)
476 if (!sentinel_ok(pointer, chunk->requested_size))
477 elog(WARNING, "detected write past chunk end in %s %p",
478 ((MemoryContext) set)->name, chunk);
479 #endif
480
481 #ifdef CLOBBER_FREED_MEMORY
482 wipe_mem(pointer, chunk->size);
483 #endif
484
485 /* Reset context to NULL in freed chunks */
486 chunk->context = NULL;
487
488 #ifdef MEMORY_CONTEXT_CHECKING
489 /* Reset requested_size to 0 in freed chunks */
490 chunk->requested_size = 0;
491 #endif
492
493 block->nfree += 1;
494
495 Assert(block->nchunks > 0);
496 Assert(block->nfree <= block->nchunks);
497
498 /* If there are still allocated chunks in the block, we're done. */
499 if (block->nfree < block->nchunks)
500 return;
501
502 /*
503 * The block is empty, so let's get rid of it. First remove it from the
504 * list of blocks, then return it to malloc().
505 */
506 dlist_delete(&block->node);
507
508 /* Also make sure the block is not marked as the current block. */
509 if (set->block == block)
510 set->block = NULL;
511
512 context->mem_allocated -= block->blksize;
513 free(block);
514 }
515
516 /*
517 * GenerationRealloc
518 * When handling repalloc, we simply allocate a new chunk, copy the data
519 * and discard the old one. The only exception is when the new size fits
520 * into the old chunk - in that case we just update chunk header.
521 */
522 static void *
GenerationRealloc(MemoryContext context,void * pointer,Size size)523 GenerationRealloc(MemoryContext context, void *pointer, Size size)
524 {
525 GenerationContext *set = (GenerationContext *) context;
526 GenerationChunk *chunk = GenerationPointerGetChunk(pointer);
527 GenerationPointer newPointer;
528 Size oldsize;
529
530 /* Allow access to private part of chunk header. */
531 VALGRIND_MAKE_MEM_DEFINED(chunk, GENERATIONCHUNK_PRIVATE_LEN);
532
533 oldsize = chunk->size;
534
535 #ifdef MEMORY_CONTEXT_CHECKING
536 /* Test for someone scribbling on unused space in chunk */
537 if (chunk->requested_size < oldsize)
538 if (!sentinel_ok(pointer, chunk->requested_size))
539 elog(WARNING, "detected write past chunk end in %s %p",
540 ((MemoryContext) set)->name, chunk);
541 #endif
542
543 /*
544 * Maybe the allocated area already is >= the new size. (In particular,
545 * we always fall out here if the requested size is a decrease.)
546 *
547 * This memory context does not use power-of-2 chunk sizing and instead
548 * carves the chunks to be as small as possible, so most repalloc() calls
549 * will end up in the palloc/memcpy/pfree branch.
550 *
551 * XXX Perhaps we should annotate this condition with unlikely()?
552 */
553 if (oldsize >= size)
554 {
555 #ifdef MEMORY_CONTEXT_CHECKING
556 Size oldrequest = chunk->requested_size;
557
558 #ifdef RANDOMIZE_ALLOCATED_MEMORY
559 /* We can only fill the extra space if we know the prior request */
560 if (size > oldrequest)
561 randomize_mem((char *) pointer + oldrequest,
562 size - oldrequest);
563 #endif
564
565 chunk->requested_size = size;
566
567 /*
568 * If this is an increase, mark any newly-available part UNDEFINED.
569 * Otherwise, mark the obsolete part NOACCESS.
570 */
571 if (size > oldrequest)
572 VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
573 size - oldrequest);
574 else
575 VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
576 oldsize - size);
577
578 /* set mark to catch clobber of "unused" space */
579 if (size < oldsize)
580 set_sentinel(pointer, size);
581 #else /* !MEMORY_CONTEXT_CHECKING */
582
583 /*
584 * We don't have the information to determine whether we're growing
585 * the old request or shrinking it, so we conservatively mark the
586 * entire new allocation DEFINED.
587 */
588 VALGRIND_MAKE_MEM_NOACCESS(pointer, oldsize);
589 VALGRIND_MAKE_MEM_DEFINED(pointer, size);
590 #endif
591
592 /* Disallow external access to private part of chunk header. */
593 VALGRIND_MAKE_MEM_NOACCESS(chunk, GENERATIONCHUNK_PRIVATE_LEN);
594
595 return pointer;
596 }
597
598 /* allocate new chunk */
599 newPointer = GenerationAlloc((MemoryContext) set, size);
600
601 /* leave immediately if request was not completed */
602 if (newPointer == NULL)
603 {
604 /* Disallow external access to private part of chunk header. */
605 VALGRIND_MAKE_MEM_NOACCESS(chunk, GENERATIONCHUNK_PRIVATE_LEN);
606 return NULL;
607 }
608
609 /*
610 * GenerationAlloc() may have returned a region that is still NOACCESS.
611 * Change it to UNDEFINED for the moment; memcpy() will then transfer
612 * definedness from the old allocation to the new. If we know the old
613 * allocation, copy just that much. Otherwise, make the entire old chunk
614 * defined to avoid errors as we copy the currently-NOACCESS trailing
615 * bytes.
616 */
617 VALGRIND_MAKE_MEM_UNDEFINED(newPointer, size);
618 #ifdef MEMORY_CONTEXT_CHECKING
619 oldsize = chunk->requested_size;
620 #else
621 VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
622 #endif
623
624 /* transfer existing data (certain to fit) */
625 memcpy(newPointer, pointer, oldsize);
626
627 /* free old chunk */
628 GenerationFree((MemoryContext) set, pointer);
629
630 return newPointer;
631 }
632
633 /*
634 * GenerationGetChunkSpace
635 * Given a currently-allocated chunk, determine the total space
636 * it occupies (including all memory-allocation overhead).
637 */
638 static Size
GenerationGetChunkSpace(MemoryContext context,void * pointer)639 GenerationGetChunkSpace(MemoryContext context, void *pointer)
640 {
641 GenerationChunk *chunk = GenerationPointerGetChunk(pointer);
642 Size result;
643
644 VALGRIND_MAKE_MEM_DEFINED(chunk, GENERATIONCHUNK_PRIVATE_LEN);
645 result = chunk->size + Generation_CHUNKHDRSZ;
646 VALGRIND_MAKE_MEM_NOACCESS(chunk, GENERATIONCHUNK_PRIVATE_LEN);
647 return result;
648 }
649
650 /*
651 * GenerationIsEmpty
652 * Is a GenerationContext empty of any allocated space?
653 */
654 static bool
GenerationIsEmpty(MemoryContext context)655 GenerationIsEmpty(MemoryContext context)
656 {
657 GenerationContext *set = (GenerationContext *) context;
658
659 return dlist_is_empty(&set->blocks);
660 }
661
662 /*
663 * GenerationStats
664 * Compute stats about memory consumption of a Generation context.
665 *
666 * printfunc: if not NULL, pass a human-readable stats string to this.
667 * passthru: pass this pointer through to printfunc.
668 * totals: if not NULL, add stats about this context into *totals.
669 * print_to_stderr: print stats to stderr if true, elog otherwise.
670 *
671 * XXX freespace only accounts for empty space at the end of the block, not
672 * space of freed chunks (which is unknown).
673 */
674 static void
GenerationStats(MemoryContext context,MemoryStatsPrintFunc printfunc,void * passthru,MemoryContextCounters * totals,bool print_to_stderr)675 GenerationStats(MemoryContext context,
676 MemoryStatsPrintFunc printfunc, void *passthru,
677 MemoryContextCounters *totals, bool print_to_stderr)
678 {
679 GenerationContext *set = (GenerationContext *) context;
680 Size nblocks = 0;
681 Size nchunks = 0;
682 Size nfreechunks = 0;
683 Size totalspace;
684 Size freespace = 0;
685 dlist_iter iter;
686
687 /* Include context header in totalspace */
688 totalspace = MAXALIGN(sizeof(GenerationContext));
689
690 dlist_foreach(iter, &set->blocks)
691 {
692 GenerationBlock *block = dlist_container(GenerationBlock, node, iter.cur);
693
694 nblocks++;
695 nchunks += block->nchunks;
696 nfreechunks += block->nfree;
697 totalspace += block->blksize;
698 freespace += (block->endptr - block->freeptr);
699 }
700
701 if (printfunc)
702 {
703 char stats_string[200];
704
705 snprintf(stats_string, sizeof(stats_string),
706 "%zu total in %zd blocks (%zd chunks); %zu free (%zd chunks); %zu used",
707 totalspace, nblocks, nchunks, freespace,
708 nfreechunks, totalspace - freespace);
709 printfunc(context, passthru, stats_string, print_to_stderr);
710 }
711
712 if (totals)
713 {
714 totals->nblocks += nblocks;
715 totals->freechunks += nfreechunks;
716 totals->totalspace += totalspace;
717 totals->freespace += freespace;
718 }
719 }
720
721
722 #ifdef MEMORY_CONTEXT_CHECKING
723
724 /*
725 * GenerationCheck
726 * Walk through chunks and check consistency of memory.
727 *
728 * NOTE: report errors as WARNING, *not* ERROR or FATAL. Otherwise you'll
729 * find yourself in an infinite loop when trouble occurs, because this
730 * routine will be entered again when elog cleanup tries to release memory!
731 */
732 static void
GenerationCheck(MemoryContext context)733 GenerationCheck(MemoryContext context)
734 {
735 GenerationContext *gen = (GenerationContext *) context;
736 const char *name = context->name;
737 dlist_iter iter;
738 Size total_allocated = 0;
739
740 /* walk all blocks in this context */
741 dlist_foreach(iter, &gen->blocks)
742 {
743 GenerationBlock *block = dlist_container(GenerationBlock, node, iter.cur);
744 int nfree,
745 nchunks;
746 char *ptr;
747
748 total_allocated += block->blksize;
749
750 /*
751 * nfree > nchunks is surely wrong, and we don't expect to see
752 * equality either, because such a block should have gotten freed.
753 */
754 if (block->nfree >= block->nchunks)
755 elog(WARNING, "problem in Generation %s: number of free chunks %d in block %p exceeds %d allocated",
756 name, block->nfree, block, block->nchunks);
757
758 /* Now walk through the chunks and count them. */
759 nfree = 0;
760 nchunks = 0;
761 ptr = ((char *) block) + Generation_BLOCKHDRSZ;
762
763 while (ptr < block->freeptr)
764 {
765 GenerationChunk *chunk = (GenerationChunk *) ptr;
766
767 /* Allow access to private part of chunk header. */
768 VALGRIND_MAKE_MEM_DEFINED(chunk, GENERATIONCHUNK_PRIVATE_LEN);
769
770 /* move to the next chunk */
771 ptr += (chunk->size + Generation_CHUNKHDRSZ);
772
773 nchunks += 1;
774
775 /* chunks have both block and context pointers, so check both */
776 if (chunk->block != block)
777 elog(WARNING, "problem in Generation %s: bogus block link in block %p, chunk %p",
778 name, block, chunk);
779
780 /*
781 * Check for valid context pointer. Note this is an incomplete
782 * test, since palloc(0) produces an allocated chunk with
783 * requested_size == 0.
784 */
785 if ((chunk->requested_size > 0 && chunk->context != gen) ||
786 (chunk->context != gen && chunk->context != NULL))
787 elog(WARNING, "problem in Generation %s: bogus context link in block %p, chunk %p",
788 name, block, chunk);
789
790 /* now make sure the chunk size is correct */
791 if (chunk->size < chunk->requested_size ||
792 chunk->size != MAXALIGN(chunk->size))
793 elog(WARNING, "problem in Generation %s: bogus chunk size in block %p, chunk %p",
794 name, block, chunk);
795
796 /* is chunk allocated? */
797 if (chunk->context != NULL)
798 {
799 /* check sentinel, but only in allocated blocks */
800 if (chunk->requested_size < chunk->size &&
801 !sentinel_ok(chunk, Generation_CHUNKHDRSZ + chunk->requested_size))
802 elog(WARNING, "problem in Generation %s: detected write past chunk end in block %p, chunk %p",
803 name, block, chunk);
804 }
805 else
806 nfree += 1;
807
808 /*
809 * If chunk is allocated, disallow external access to private part
810 * of chunk header.
811 */
812 if (chunk->context != NULL)
813 VALGRIND_MAKE_MEM_NOACCESS(chunk, GENERATIONCHUNK_PRIVATE_LEN);
814 }
815
816 /*
817 * Make sure we got the expected number of allocated and free chunks
818 * (as tracked in the block header).
819 */
820 if (nchunks != block->nchunks)
821 elog(WARNING, "problem in Generation %s: number of allocated chunks %d in block %p does not match header %d",
822 name, nchunks, block, block->nchunks);
823
824 if (nfree != block->nfree)
825 elog(WARNING, "problem in Generation %s: number of free chunks %d in block %p does not match header %d",
826 name, nfree, block, block->nfree);
827 }
828
829 Assert(total_allocated == context->mem_allocated);
830 }
831
832 #endif /* MEMORY_CONTEXT_CHECKING */
833