1 /*-------------------------------------------------------------------------
2 *
3 * generation.c
4 * Generational allocator definitions.
5 *
6 * Generation is a custom MemoryContext implementation designed for cases of
7 * chunks with similar lifespan.
8 *
9 * Portions Copyright (c) 2017-2020, PostgreSQL Global Development Group
10 *
11 * IDENTIFICATION
12 * src/backend/utils/mmgr/generation.c
13 *
14 *
15 * This memory context is based on the assumption that the chunks are freed
16 * roughly in the same order as they were allocated (FIFO), or in groups with
17 * similar lifespan (generations - hence the name of the context). This is
18 * typical for various queue-like use cases, i.e. when tuples are constructed,
19 * processed and then thrown away.
20 *
21 * The memory context uses a very simple approach to free space management.
22 * Instead of a complex global freelist, each block tracks a number
23 * of allocated and freed chunks. Freed chunks are not reused, and once all
24 * chunks in a block are freed, the whole block is thrown away. When the
25 * chunks allocated in the same block have similar lifespan, this works
26 * very well and is very cheap.
27 *
28 * The current implementation only uses a fixed block size - maybe it should
29 * adapt a min/max block size range, and grow the blocks automatically.
30 * It already uses dedicated blocks for oversized chunks.
31 *
32 * XXX It might be possible to improve this by keeping a small freelist for
33 * only a small number of recent blocks, but it's not clear it's worth the
34 * additional complexity.
35 *
36 *-------------------------------------------------------------------------
37 */
38
39 #include "postgres.h"
40
41 #include "lib/ilist.h"
42 #include "utils/memdebug.h"
43 #include "utils/memutils.h"
44
45
46 #define Generation_BLOCKHDRSZ MAXALIGN(sizeof(GenerationBlock))
47 #define Generation_CHUNKHDRSZ sizeof(GenerationChunk)
48
49 typedef struct GenerationBlock GenerationBlock; /* forward reference */
50 typedef struct GenerationChunk GenerationChunk;
51
52 typedef void *GenerationPointer;
53
54 /*
55 * GenerationContext is a simple memory context not reusing allocated chunks,
56 * and freeing blocks once all chunks are freed.
57 */
58 typedef struct GenerationContext
59 {
60 MemoryContextData header; /* Standard memory-context fields */
61
62 /* Generational context parameters */
63 Size blockSize; /* standard block size */
64
65 GenerationBlock *block; /* current (most recently allocated) block */
66 dlist_head blocks; /* list of blocks */
67 } GenerationContext;
68
69 /*
70 * GenerationBlock
71 * GenerationBlock is the unit of memory that is obtained by generation.c
72 * from malloc(). It contains one or more GenerationChunks, which are
73 * the units requested by palloc() and freed by pfree(). GenerationChunks
74 * cannot be returned to malloc() individually, instead pfree()
75 * updates the free counter of the block and when all chunks in a block
76 * are free the whole block is returned to malloc().
77 *
78 * GenerationBlock is the header data for a block --- the usable space
79 * within the block begins at the next alignment boundary.
80 */
81 struct GenerationBlock
82 {
83 dlist_node node; /* doubly-linked list of blocks */
84 Size blksize; /* allocated size of this block */
85 int nchunks; /* number of chunks in the block */
86 int nfree; /* number of free chunks */
87 char *freeptr; /* start of free space in this block */
88 char *endptr; /* end of space in this block */
89 };
90
91 /*
92 * GenerationChunk
93 * The prefix of each piece of memory in a GenerationBlock
94 *
95 * Note: to meet the memory context APIs, the payload area of the chunk must
96 * be maxaligned, and the "context" link must be immediately adjacent to the
97 * payload area (cf. GetMemoryChunkContext). We simplify matters for this
98 * module by requiring sizeof(GenerationChunk) to be maxaligned, and then
99 * we can ensure things work by adding any required alignment padding before
100 * the pointer fields. There is a static assertion below that the alignment
101 * is done correctly.
102 */
103 struct GenerationChunk
104 {
105 /* size is always the size of the usable space in the chunk */
106 Size size;
107 #ifdef MEMORY_CONTEXT_CHECKING
108 /* when debugging memory usage, also store actual requested size */
109 /* this is zero in a free chunk */
110 Size requested_size;
111
112 #define GENERATIONCHUNK_RAWSIZE (SIZEOF_SIZE_T * 2 + SIZEOF_VOID_P * 2)
113 #else
114 #define GENERATIONCHUNK_RAWSIZE (SIZEOF_SIZE_T + SIZEOF_VOID_P * 2)
115 #endif /* MEMORY_CONTEXT_CHECKING */
116
117 /* ensure proper alignment by adding padding if needed */
118 #if (GENERATIONCHUNK_RAWSIZE % MAXIMUM_ALIGNOF) != 0
119 char padding[MAXIMUM_ALIGNOF - GENERATIONCHUNK_RAWSIZE % MAXIMUM_ALIGNOF];
120 #endif
121
122 GenerationBlock *block; /* block owning this chunk */
123 GenerationContext *context; /* owning context, or NULL if freed chunk */
124 /* there must not be any padding to reach a MAXALIGN boundary here! */
125 };
126
127 /*
128 * Only the "context" field should be accessed outside this module.
129 * We keep the rest of an allocated chunk's header marked NOACCESS when using
130 * valgrind. But note that freed chunk headers are kept accessible, for
131 * simplicity.
132 */
133 #define GENERATIONCHUNK_PRIVATE_LEN offsetof(GenerationChunk, context)
134
135 /*
136 * GenerationIsValid
137 * True iff set is valid allocation set.
138 */
139 #define GenerationIsValid(set) PointerIsValid(set)
140
141 #define GenerationPointerGetChunk(ptr) \
142 ((GenerationChunk *)(((char *)(ptr)) - Generation_CHUNKHDRSZ))
143 #define GenerationChunkGetPointer(chk) \
144 ((GenerationPointer *)(((char *)(chk)) + Generation_CHUNKHDRSZ))
145
146 /*
147 * These functions implement the MemoryContext API for Generation contexts.
148 */
149 static void *GenerationAlloc(MemoryContext context, Size size);
150 static void GenerationFree(MemoryContext context, void *pointer);
151 static void *GenerationRealloc(MemoryContext context, void *pointer, Size size);
152 static void GenerationReset(MemoryContext context);
153 static void GenerationDelete(MemoryContext context);
154 static Size GenerationGetChunkSpace(MemoryContext context, void *pointer);
155 static bool GenerationIsEmpty(MemoryContext context);
156 static void GenerationStats(MemoryContext context,
157 MemoryStatsPrintFunc printfunc, void *passthru,
158 MemoryContextCounters *totals);
159
160 #ifdef MEMORY_CONTEXT_CHECKING
161 static void GenerationCheck(MemoryContext context);
162 #endif
163
164 /*
165 * This is the virtual function table for Generation contexts.
166 */
167 static const MemoryContextMethods GenerationMethods = {
168 GenerationAlloc,
169 GenerationFree,
170 GenerationRealloc,
171 GenerationReset,
172 GenerationDelete,
173 GenerationGetChunkSpace,
174 GenerationIsEmpty,
175 GenerationStats
176 #ifdef MEMORY_CONTEXT_CHECKING
177 ,GenerationCheck
178 #endif
179 };
180
181
182 /*
183 * Public routines
184 */
185
186
187 /*
188 * GenerationContextCreate
189 * Create a new Generation context.
190 *
191 * parent: parent context, or NULL if top-level context
192 * name: name of context (must be statically allocated)
193 * blockSize: generation block size
194 */
195 MemoryContext
GenerationContextCreate(MemoryContext parent,const char * name,Size blockSize)196 GenerationContextCreate(MemoryContext parent,
197 const char *name,
198 Size blockSize)
199 {
200 GenerationContext *set;
201
202 /* Assert we padded GenerationChunk properly */
203 StaticAssertStmt(Generation_CHUNKHDRSZ == MAXALIGN(Generation_CHUNKHDRSZ),
204 "sizeof(GenerationChunk) is not maxaligned");
205 StaticAssertStmt(offsetof(GenerationChunk, context) + sizeof(MemoryContext) ==
206 Generation_CHUNKHDRSZ,
207 "padding calculation in GenerationChunk is wrong");
208
209 /*
210 * First, validate allocation parameters. (If we're going to throw an
211 * error, we should do so before the context is created, not after.) We
212 * somewhat arbitrarily enforce a minimum 1K block size, mostly because
213 * that's what AllocSet does.
214 */
215 if (blockSize != MAXALIGN(blockSize) ||
216 blockSize < 1024 ||
217 !AllocHugeSizeIsValid(blockSize))
218 elog(ERROR, "invalid blockSize for memory context: %zu",
219 blockSize);
220
221 /*
222 * Allocate the context header. Unlike aset.c, we never try to combine
223 * this with the first regular block, since that would prevent us from
224 * freeing the first generation of allocations.
225 */
226
227 set = (GenerationContext *) malloc(MAXALIGN(sizeof(GenerationContext)));
228 if (set == NULL)
229 {
230 MemoryContextStats(TopMemoryContext);
231 ereport(ERROR,
232 (errcode(ERRCODE_OUT_OF_MEMORY),
233 errmsg("out of memory"),
234 errdetail("Failed while creating memory context \"%s\".",
235 name)));
236 }
237
238 /*
239 * Avoid writing code that can fail between here and MemoryContextCreate;
240 * we'd leak the header if we ereport in this stretch.
241 */
242
243 /* Fill in GenerationContext-specific header fields */
244 set->blockSize = blockSize;
245 set->block = NULL;
246 dlist_init(&set->blocks);
247
248 /* Finally, do the type-independent part of context creation */
249 MemoryContextCreate((MemoryContext) set,
250 T_GenerationContext,
251 &GenerationMethods,
252 parent,
253 name);
254
255 return (MemoryContext) set;
256 }
257
258 /*
259 * GenerationReset
260 * Frees all memory which is allocated in the given set.
261 *
262 * The code simply frees all the blocks in the context - we don't keep any
263 * keeper blocks or anything like that.
264 */
265 static void
GenerationReset(MemoryContext context)266 GenerationReset(MemoryContext context)
267 {
268 GenerationContext *set = (GenerationContext *) context;
269 dlist_mutable_iter miter;
270
271 AssertArg(GenerationIsValid(set));
272
273 #ifdef MEMORY_CONTEXT_CHECKING
274 /* Check for corruption and leaks before freeing */
275 GenerationCheck(context);
276 #endif
277
278 dlist_foreach_modify(miter, &set->blocks)
279 {
280 GenerationBlock *block = dlist_container(GenerationBlock, node, miter.cur);
281
282 dlist_delete(miter.cur);
283
284 context->mem_allocated -= block->blksize;
285
286 #ifdef CLOBBER_FREED_MEMORY
287 wipe_mem(block, block->blksize);
288 #endif
289
290 free(block);
291 }
292
293 set->block = NULL;
294
295 Assert(dlist_is_empty(&set->blocks));
296 }
297
298 /*
299 * GenerationDelete
300 * Free all memory which is allocated in the given context.
301 */
302 static void
GenerationDelete(MemoryContext context)303 GenerationDelete(MemoryContext context)
304 {
305 /* Reset to release all the GenerationBlocks */
306 GenerationReset(context);
307 /* And free the context header */
308 free(context);
309 }
310
311 /*
312 * GenerationAlloc
313 * Returns pointer to allocated memory of given size or NULL if
314 * request could not be completed; memory is added to the set.
315 *
316 * No request may exceed:
317 * MAXALIGN_DOWN(SIZE_MAX) - Generation_BLOCKHDRSZ - Generation_CHUNKHDRSZ
318 * All callers use a much-lower limit.
319 *
320 * Note: when using valgrind, it doesn't matter how the returned allocation
321 * is marked, as mcxt.c will set it to UNDEFINED. In some paths we will
322 * return space that is marked NOACCESS - GenerationRealloc has to beware!
323 */
324 static void *
GenerationAlloc(MemoryContext context,Size size)325 GenerationAlloc(MemoryContext context, Size size)
326 {
327 GenerationContext *set = (GenerationContext *) context;
328 GenerationBlock *block;
329 GenerationChunk *chunk;
330 Size chunk_size = MAXALIGN(size);
331
332 /* is it an over-sized chunk? if yes, allocate special block */
333 if (chunk_size > set->blockSize / 8)
334 {
335 Size blksize = chunk_size + Generation_BLOCKHDRSZ + Generation_CHUNKHDRSZ;
336
337 block = (GenerationBlock *) malloc(blksize);
338 if (block == NULL)
339 return NULL;
340
341 context->mem_allocated += blksize;
342
343 /* block with a single (used) chunk */
344 block->blksize = blksize;
345 block->nchunks = 1;
346 block->nfree = 0;
347
348 /* the block is completely full */
349 block->freeptr = block->endptr = ((char *) block) + blksize;
350
351 chunk = (GenerationChunk *) (((char *) block) + Generation_BLOCKHDRSZ);
352 chunk->block = block;
353 chunk->context = set;
354 chunk->size = chunk_size;
355
356 #ifdef MEMORY_CONTEXT_CHECKING
357 chunk->requested_size = size;
358 /* set mark to catch clobber of "unused" space */
359 if (size < chunk_size)
360 set_sentinel(GenerationChunkGetPointer(chunk), size);
361 #endif
362 #ifdef RANDOMIZE_ALLOCATED_MEMORY
363 /* fill the allocated space with junk */
364 randomize_mem((char *) GenerationChunkGetPointer(chunk), size);
365 #endif
366
367 /* add the block to the list of allocated blocks */
368 dlist_push_head(&set->blocks, &block->node);
369
370 /* Ensure any padding bytes are marked NOACCESS. */
371 VALGRIND_MAKE_MEM_NOACCESS((char *) GenerationChunkGetPointer(chunk) + size,
372 chunk_size - size);
373
374 /* Disallow external access to private part of chunk header. */
375 VALGRIND_MAKE_MEM_NOACCESS(chunk, GENERATIONCHUNK_PRIVATE_LEN);
376
377 return GenerationChunkGetPointer(chunk);
378 }
379
380 /*
381 * Not an over-sized chunk. Is there enough space in the current block? If
382 * not, allocate a new "regular" block.
383 */
384 block = set->block;
385
386 if ((block == NULL) ||
387 (block->endptr - block->freeptr) < Generation_CHUNKHDRSZ + chunk_size)
388 {
389 Size blksize = set->blockSize;
390
391 block = (GenerationBlock *) malloc(blksize);
392
393 if (block == NULL)
394 return NULL;
395
396 context->mem_allocated += blksize;
397
398 block->blksize = blksize;
399 block->nchunks = 0;
400 block->nfree = 0;
401
402 block->freeptr = ((char *) block) + Generation_BLOCKHDRSZ;
403 block->endptr = ((char *) block) + blksize;
404
405 /* Mark unallocated space NOACCESS. */
406 VALGRIND_MAKE_MEM_NOACCESS(block->freeptr,
407 blksize - Generation_BLOCKHDRSZ);
408
409 /* add it to the doubly-linked list of blocks */
410 dlist_push_head(&set->blocks, &block->node);
411
412 /* and also use it as the current allocation block */
413 set->block = block;
414 }
415
416 /* we're supposed to have a block with enough free space now */
417 Assert(block != NULL);
418 Assert((block->endptr - block->freeptr) >= Generation_CHUNKHDRSZ + chunk_size);
419
420 chunk = (GenerationChunk *) block->freeptr;
421
422 /* Prepare to initialize the chunk header. */
423 VALGRIND_MAKE_MEM_UNDEFINED(chunk, Generation_CHUNKHDRSZ);
424
425 block->nchunks += 1;
426 block->freeptr += (Generation_CHUNKHDRSZ + chunk_size);
427
428 Assert(block->freeptr <= block->endptr);
429
430 chunk->block = block;
431 chunk->context = set;
432 chunk->size = chunk_size;
433
434 #ifdef MEMORY_CONTEXT_CHECKING
435 chunk->requested_size = size;
436 /* set mark to catch clobber of "unused" space */
437 if (size < chunk->size)
438 set_sentinel(GenerationChunkGetPointer(chunk), size);
439 #endif
440 #ifdef RANDOMIZE_ALLOCATED_MEMORY
441 /* fill the allocated space with junk */
442 randomize_mem((char *) GenerationChunkGetPointer(chunk), size);
443 #endif
444
445 /* Ensure any padding bytes are marked NOACCESS. */
446 VALGRIND_MAKE_MEM_NOACCESS((char *) GenerationChunkGetPointer(chunk) + size,
447 chunk_size - size);
448
449 /* Disallow external access to private part of chunk header. */
450 VALGRIND_MAKE_MEM_NOACCESS(chunk, GENERATIONCHUNK_PRIVATE_LEN);
451
452 return GenerationChunkGetPointer(chunk);
453 }
454
455 /*
456 * GenerationFree
457 * Update number of chunks in the block, and if all chunks in the block
458 * are now free then discard the block.
459 */
460 static void
GenerationFree(MemoryContext context,void * pointer)461 GenerationFree(MemoryContext context, void *pointer)
462 {
463 GenerationContext *set = (GenerationContext *) context;
464 GenerationChunk *chunk = GenerationPointerGetChunk(pointer);
465 GenerationBlock *block;
466
467 /* Allow access to private part of chunk header. */
468 VALGRIND_MAKE_MEM_DEFINED(chunk, GENERATIONCHUNK_PRIVATE_LEN);
469
470 block = chunk->block;
471
472 #ifdef MEMORY_CONTEXT_CHECKING
473 /* Test for someone scribbling on unused space in chunk */
474 if (chunk->requested_size < chunk->size)
475 if (!sentinel_ok(pointer, chunk->requested_size))
476 elog(WARNING, "detected write past chunk end in %s %p",
477 ((MemoryContext) set)->name, chunk);
478 #endif
479
480 #ifdef CLOBBER_FREED_MEMORY
481 wipe_mem(pointer, chunk->size);
482 #endif
483
484 /* Reset context to NULL in freed chunks */
485 chunk->context = NULL;
486
487 #ifdef MEMORY_CONTEXT_CHECKING
488 /* Reset requested_size to 0 in freed chunks */
489 chunk->requested_size = 0;
490 #endif
491
492 block->nfree += 1;
493
494 Assert(block->nchunks > 0);
495 Assert(block->nfree <= block->nchunks);
496
497 /* If there are still allocated chunks in the block, we're done. */
498 if (block->nfree < block->nchunks)
499 return;
500
501 /*
502 * The block is empty, so let's get rid of it. First remove it from the
503 * list of blocks, then return it to malloc().
504 */
505 dlist_delete(&block->node);
506
507 /* Also make sure the block is not marked as the current block. */
508 if (set->block == block)
509 set->block = NULL;
510
511 context->mem_allocated -= block->blksize;
512 free(block);
513 }
514
515 /*
516 * GenerationRealloc
517 * When handling repalloc, we simply allocate a new chunk, copy the data
518 * and discard the old one. The only exception is when the new size fits
519 * into the old chunk - in that case we just update chunk header.
520 */
521 static void *
GenerationRealloc(MemoryContext context,void * pointer,Size size)522 GenerationRealloc(MemoryContext context, void *pointer, Size size)
523 {
524 GenerationContext *set = (GenerationContext *) context;
525 GenerationChunk *chunk = GenerationPointerGetChunk(pointer);
526 GenerationPointer newPointer;
527 Size oldsize;
528
529 /* Allow access to private part of chunk header. */
530 VALGRIND_MAKE_MEM_DEFINED(chunk, GENERATIONCHUNK_PRIVATE_LEN);
531
532 oldsize = chunk->size;
533
534 #ifdef MEMORY_CONTEXT_CHECKING
535 /* Test for someone scribbling on unused space in chunk */
536 if (chunk->requested_size < oldsize)
537 if (!sentinel_ok(pointer, chunk->requested_size))
538 elog(WARNING, "detected write past chunk end in %s %p",
539 ((MemoryContext) set)->name, chunk);
540 #endif
541
542 /*
543 * Maybe the allocated area already is >= the new size. (In particular,
544 * we always fall out here if the requested size is a decrease.)
545 *
546 * This memory context does not use power-of-2 chunk sizing and instead
547 * carves the chunks to be as small as possible, so most repalloc() calls
548 * will end up in the palloc/memcpy/pfree branch.
549 *
550 * XXX Perhaps we should annotate this condition with unlikely()?
551 */
552 if (oldsize >= size)
553 {
554 #ifdef MEMORY_CONTEXT_CHECKING
555 Size oldrequest = chunk->requested_size;
556
557 #ifdef RANDOMIZE_ALLOCATED_MEMORY
558 /* We can only fill the extra space if we know the prior request */
559 if (size > oldrequest)
560 randomize_mem((char *) pointer + oldrequest,
561 size - oldrequest);
562 #endif
563
564 chunk->requested_size = size;
565
566 /*
567 * If this is an increase, mark any newly-available part UNDEFINED.
568 * Otherwise, mark the obsolete part NOACCESS.
569 */
570 if (size > oldrequest)
571 VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
572 size - oldrequest);
573 else
574 VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
575 oldsize - size);
576
577 /* set mark to catch clobber of "unused" space */
578 if (size < oldsize)
579 set_sentinel(pointer, size);
580 #else /* !MEMORY_CONTEXT_CHECKING */
581
582 /*
583 * We don't have the information to determine whether we're growing
584 * the old request or shrinking it, so we conservatively mark the
585 * entire new allocation DEFINED.
586 */
587 VALGRIND_MAKE_MEM_NOACCESS(pointer, oldsize);
588 VALGRIND_MAKE_MEM_DEFINED(pointer, size);
589 #endif
590
591 /* Disallow external access to private part of chunk header. */
592 VALGRIND_MAKE_MEM_NOACCESS(chunk, GENERATIONCHUNK_PRIVATE_LEN);
593
594 return pointer;
595 }
596
597 /* allocate new chunk */
598 newPointer = GenerationAlloc((MemoryContext) set, size);
599
600 /* leave immediately if request was not completed */
601 if (newPointer == NULL)
602 {
603 /* Disallow external access to private part of chunk header. */
604 VALGRIND_MAKE_MEM_NOACCESS(chunk, GENERATIONCHUNK_PRIVATE_LEN);
605 return NULL;
606 }
607
608 /*
609 * GenerationAlloc() may have returned a region that is still NOACCESS.
610 * Change it to UNDEFINED for the moment; memcpy() will then transfer
611 * definedness from the old allocation to the new. If we know the old
612 * allocation, copy just that much. Otherwise, make the entire old chunk
613 * defined to avoid errors as we copy the currently-NOACCESS trailing
614 * bytes.
615 */
616 VALGRIND_MAKE_MEM_UNDEFINED(newPointer, size);
617 #ifdef MEMORY_CONTEXT_CHECKING
618 oldsize = chunk->requested_size;
619 #else
620 VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
621 #endif
622
623 /* transfer existing data (certain to fit) */
624 memcpy(newPointer, pointer, oldsize);
625
626 /* free old chunk */
627 GenerationFree((MemoryContext) set, pointer);
628
629 return newPointer;
630 }
631
632 /*
633 * GenerationGetChunkSpace
634 * Given a currently-allocated chunk, determine the total space
635 * it occupies (including all memory-allocation overhead).
636 */
637 static Size
GenerationGetChunkSpace(MemoryContext context,void * pointer)638 GenerationGetChunkSpace(MemoryContext context, void *pointer)
639 {
640 GenerationChunk *chunk = GenerationPointerGetChunk(pointer);
641 Size result;
642
643 VALGRIND_MAKE_MEM_DEFINED(chunk, GENERATIONCHUNK_PRIVATE_LEN);
644 result = chunk->size + Generation_CHUNKHDRSZ;
645 VALGRIND_MAKE_MEM_NOACCESS(chunk, GENERATIONCHUNK_PRIVATE_LEN);
646 return result;
647 }
648
649 /*
650 * GenerationIsEmpty
651 * Is a GenerationContext empty of any allocated space?
652 */
653 static bool
GenerationIsEmpty(MemoryContext context)654 GenerationIsEmpty(MemoryContext context)
655 {
656 GenerationContext *set = (GenerationContext *) context;
657
658 return dlist_is_empty(&set->blocks);
659 }
660
661 /*
662 * GenerationStats
663 * Compute stats about memory consumption of a Generation context.
664 *
665 * printfunc: if not NULL, pass a human-readable stats string to this.
666 * passthru: pass this pointer through to printfunc.
667 * totals: if not NULL, add stats about this context into *totals.
668 *
669 * XXX freespace only accounts for empty space at the end of the block, not
670 * space of freed chunks (which is unknown).
671 */
672 static void
GenerationStats(MemoryContext context,MemoryStatsPrintFunc printfunc,void * passthru,MemoryContextCounters * totals)673 GenerationStats(MemoryContext context,
674 MemoryStatsPrintFunc printfunc, void *passthru,
675 MemoryContextCounters *totals)
676 {
677 GenerationContext *set = (GenerationContext *) context;
678 Size nblocks = 0;
679 Size nchunks = 0;
680 Size nfreechunks = 0;
681 Size totalspace;
682 Size freespace = 0;
683 dlist_iter iter;
684
685 /* Include context header in totalspace */
686 totalspace = MAXALIGN(sizeof(GenerationContext));
687
688 dlist_foreach(iter, &set->blocks)
689 {
690 GenerationBlock *block = dlist_container(GenerationBlock, node, iter.cur);
691
692 nblocks++;
693 nchunks += block->nchunks;
694 nfreechunks += block->nfree;
695 totalspace += block->blksize;
696 freespace += (block->endptr - block->freeptr);
697 }
698
699 if (printfunc)
700 {
701 char stats_string[200];
702
703 snprintf(stats_string, sizeof(stats_string),
704 "%zu total in %zd blocks (%zd chunks); %zu free (%zd chunks); %zu used",
705 totalspace, nblocks, nchunks, freespace,
706 nfreechunks, totalspace - freespace);
707 printfunc(context, passthru, stats_string);
708 }
709
710 if (totals)
711 {
712 totals->nblocks += nblocks;
713 totals->freechunks += nfreechunks;
714 totals->totalspace += totalspace;
715 totals->freespace += freespace;
716 }
717 }
718
719
720 #ifdef MEMORY_CONTEXT_CHECKING
721
722 /*
723 * GenerationCheck
724 * Walk through chunks and check consistency of memory.
725 *
726 * NOTE: report errors as WARNING, *not* ERROR or FATAL. Otherwise you'll
727 * find yourself in an infinite loop when trouble occurs, because this
728 * routine will be entered again when elog cleanup tries to release memory!
729 */
730 static void
GenerationCheck(MemoryContext context)731 GenerationCheck(MemoryContext context)
732 {
733 GenerationContext *gen = (GenerationContext *) context;
734 const char *name = context->name;
735 dlist_iter iter;
736 Size total_allocated = 0;
737
738 /* walk all blocks in this context */
739 dlist_foreach(iter, &gen->blocks)
740 {
741 GenerationBlock *block = dlist_container(GenerationBlock, node, iter.cur);
742 int nfree,
743 nchunks;
744 char *ptr;
745
746 total_allocated += block->blksize;
747
748 /*
749 * nfree > nchunks is surely wrong, and we don't expect to see
750 * equality either, because such a block should have gotten freed.
751 */
752 if (block->nfree >= block->nchunks)
753 elog(WARNING, "problem in Generation %s: number of free chunks %d in block %p exceeds %d allocated",
754 name, block->nfree, block, block->nchunks);
755
756 /* Now walk through the chunks and count them. */
757 nfree = 0;
758 nchunks = 0;
759 ptr = ((char *) block) + Generation_BLOCKHDRSZ;
760
761 while (ptr < block->freeptr)
762 {
763 GenerationChunk *chunk = (GenerationChunk *) ptr;
764
765 /* Allow access to private part of chunk header. */
766 VALGRIND_MAKE_MEM_DEFINED(chunk, GENERATIONCHUNK_PRIVATE_LEN);
767
768 /* move to the next chunk */
769 ptr += (chunk->size + Generation_CHUNKHDRSZ);
770
771 nchunks += 1;
772
773 /* chunks have both block and context pointers, so check both */
774 if (chunk->block != block)
775 elog(WARNING, "problem in Generation %s: bogus block link in block %p, chunk %p",
776 name, block, chunk);
777
778 /*
779 * Check for valid context pointer. Note this is an incomplete
780 * test, since palloc(0) produces an allocated chunk with
781 * requested_size == 0.
782 */
783 if ((chunk->requested_size > 0 && chunk->context != gen) ||
784 (chunk->context != gen && chunk->context != NULL))
785 elog(WARNING, "problem in Generation %s: bogus context link in block %p, chunk %p",
786 name, block, chunk);
787
788 /* now make sure the chunk size is correct */
789 if (chunk->size < chunk->requested_size ||
790 chunk->size != MAXALIGN(chunk->size))
791 elog(WARNING, "problem in Generation %s: bogus chunk size in block %p, chunk %p",
792 name, block, chunk);
793
794 /* is chunk allocated? */
795 if (chunk->context != NULL)
796 {
797 /* check sentinel, but only in allocated blocks */
798 if (chunk->requested_size < chunk->size &&
799 !sentinel_ok(chunk, Generation_CHUNKHDRSZ + chunk->requested_size))
800 elog(WARNING, "problem in Generation %s: detected write past chunk end in block %p, chunk %p",
801 name, block, chunk);
802 }
803 else
804 nfree += 1;
805
806 /*
807 * If chunk is allocated, disallow external access to private part
808 * of chunk header.
809 */
810 if (chunk->context != NULL)
811 VALGRIND_MAKE_MEM_NOACCESS(chunk, GENERATIONCHUNK_PRIVATE_LEN);
812 }
813
814 /*
815 * Make sure we got the expected number of allocated and free chunks
816 * (as tracked in the block header).
817 */
818 if (nchunks != block->nchunks)
819 elog(WARNING, "problem in Generation %s: number of allocated chunks %d in block %p does not match header %d",
820 name, nchunks, block, block->nchunks);
821
822 if (nfree != block->nfree)
823 elog(WARNING, "problem in Generation %s: number of free chunks %d in block %p does not match header %d",
824 name, nfree, block, block->nfree);
825 }
826
827 Assert(total_allocated == context->mem_allocated);
828 }
829
830 #endif /* MEMORY_CONTEXT_CHECKING */
831