1 /*-------------------------------------------------------------------------
2 *
3 * verify_heapam.c
4 * Functions to check postgresql heap relations for corruption
5 *
6 * Copyright (c) 2016-2021, PostgreSQL Global Development Group
7 *
8 * contrib/amcheck/verify_heapam.c
9 *-------------------------------------------------------------------------
10 */
11 #include "postgres.h"
12
13 #include "access/detoast.h"
14 #include "access/genam.h"
15 #include "access/heapam.h"
16 #include "access/heaptoast.h"
17 #include "access/multixact.h"
18 #include "access/toast_internals.h"
19 #include "access/visibilitymap.h"
20 #include "catalog/pg_am.h"
21 #include "funcapi.h"
22 #include "miscadmin.h"
23 #include "storage/bufmgr.h"
24 #include "storage/procarray.h"
25 #include "utils/builtins.h"
26 #include "utils/fmgroids.h"
27
28 PG_FUNCTION_INFO_V1(verify_heapam);
29
30 /* The number of columns in tuples returned by verify_heapam */
31 #define HEAPCHECK_RELATION_COLS 4
32
33 /*
34 * Despite the name, we use this for reporting problems with both XIDs and
35 * MXIDs.
36 */
37 typedef enum XidBoundsViolation
38 {
39 XID_INVALID,
40 XID_IN_FUTURE,
41 XID_PRECEDES_CLUSTERMIN,
42 XID_PRECEDES_RELMIN,
43 XID_BOUNDS_OK
44 } XidBoundsViolation;
45
46 typedef enum XidCommitStatus
47 {
48 XID_COMMITTED,
49 XID_IS_CURRENT_XID,
50 XID_IN_PROGRESS,
51 XID_ABORTED
52 } XidCommitStatus;
53
54 typedef enum SkipPages
55 {
56 SKIP_PAGES_ALL_FROZEN,
57 SKIP_PAGES_ALL_VISIBLE,
58 SKIP_PAGES_NONE
59 } SkipPages;
60
61 /*
62 * Struct holding information about a toasted attribute sufficient to both
63 * check the toasted attribute and, if found to be corrupt, to report where it
64 * was encountered in the main table.
65 */
66 typedef struct ToastedAttribute
67 {
68 struct varatt_external toast_pointer;
69 BlockNumber blkno; /* block in main table */
70 OffsetNumber offnum; /* offset in main table */
71 AttrNumber attnum; /* attribute in main table */
72 } ToastedAttribute;
73
74 /*
75 * Struct holding the running context information during
76 * a lifetime of a verify_heapam execution.
77 */
78 typedef struct HeapCheckContext
79 {
80 /*
81 * Cached copies of values from ShmemVariableCache and computed values
82 * from them.
83 */
84 FullTransactionId next_fxid; /* ShmemVariableCache->nextXid */
85 TransactionId next_xid; /* 32-bit version of next_fxid */
86 TransactionId oldest_xid; /* ShmemVariableCache->oldestXid */
87 FullTransactionId oldest_fxid; /* 64-bit version of oldest_xid, computed
88 * relative to next_fxid */
89 TransactionId safe_xmin; /* this XID and newer ones can't become
90 * all-visible while we're running */
91
92 /*
93 * Cached copy of value from MultiXactState
94 */
95 MultiXactId next_mxact; /* MultiXactState->nextMXact */
96 MultiXactId oldest_mxact; /* MultiXactState->oldestMultiXactId */
97
98 /*
99 * Cached copies of the most recently checked xid and its status.
100 */
101 TransactionId cached_xid;
102 XidCommitStatus cached_status;
103
104 /* Values concerning the heap relation being checked */
105 Relation rel;
106 TransactionId relfrozenxid;
107 FullTransactionId relfrozenfxid;
108 TransactionId relminmxid;
109 Relation toast_rel;
110 Relation *toast_indexes;
111 Relation valid_toast_index;
112 int num_toast_indexes;
113
114 /* Values for iterating over pages in the relation */
115 BlockNumber blkno;
116 BufferAccessStrategy bstrategy;
117 Buffer buffer;
118 Page page;
119
120 /* Values for iterating over tuples within a page */
121 OffsetNumber offnum;
122 ItemId itemid;
123 uint16 lp_len;
124 uint16 lp_off;
125 HeapTupleHeader tuphdr;
126 int natts;
127
128 /* Values for iterating over attributes within the tuple */
129 uint32 offset; /* offset in tuple data */
130 AttrNumber attnum;
131
132 /* True if tuple's xmax makes it eligible for pruning */
133 bool tuple_could_be_pruned;
134
135 /*
136 * List of ToastedAttribute structs for toasted attributes which are not
137 * eligible for pruning and should be checked
138 */
139 List *toasted_attributes;
140
141 /* Whether verify_heapam has yet encountered any corrupt tuples */
142 bool is_corrupt;
143
144 /* The descriptor and tuplestore for verify_heapam's result tuples */
145 TupleDesc tupdesc;
146 Tuplestorestate *tupstore;
147 } HeapCheckContext;
148
149 /* Internal implementation */
150 static void sanity_check_relation(Relation rel);
151 static void check_tuple(HeapCheckContext *ctx);
152 static void check_toast_tuple(HeapTuple toasttup, HeapCheckContext *ctx,
153 ToastedAttribute *ta, int32 *expected_chunk_seq,
154 uint32 extsize);
155
156 static bool check_tuple_attribute(HeapCheckContext *ctx);
157 static void check_toasted_attribute(HeapCheckContext *ctx,
158 ToastedAttribute *ta);
159
160 static bool check_tuple_header(HeapCheckContext *ctx);
161 static bool check_tuple_visibility(HeapCheckContext *ctx);
162
163 static void report_corruption(HeapCheckContext *ctx, char *msg);
164 static void report_toast_corruption(HeapCheckContext *ctx,
165 ToastedAttribute *ta, char *msg);
166 static TupleDesc verify_heapam_tupdesc(void);
167 static FullTransactionId FullTransactionIdFromXidAndCtx(TransactionId xid,
168 const HeapCheckContext *ctx);
169 static void update_cached_xid_range(HeapCheckContext *ctx);
170 static void update_cached_mxid_range(HeapCheckContext *ctx);
171 static XidBoundsViolation check_mxid_in_range(MultiXactId mxid,
172 HeapCheckContext *ctx);
173 static XidBoundsViolation check_mxid_valid_in_rel(MultiXactId mxid,
174 HeapCheckContext *ctx);
175 static XidBoundsViolation get_xid_status(TransactionId xid,
176 HeapCheckContext *ctx,
177 XidCommitStatus *status);
178
179 /*
180 * Scan and report corruption in heap pages, optionally reconciling toasted
181 * attributes with entries in the associated toast table. Intended to be
182 * called from SQL with the following parameters:
183 *
184 * relation:
185 * The Oid of the heap relation to be checked.
186 *
187 * on_error_stop:
188 * Whether to stop at the end of the first page for which errors are
189 * detected. Note that multiple rows may be returned.
190 *
191 * check_toast:
192 * Whether to check each toasted attribute against the toast table to
193 * verify that it can be found there.
194 *
195 * skip:
196 * What kinds of pages in the heap relation should be skipped. Valid
197 * options are "all-visible", "all-frozen", and "none".
198 *
199 * Returns to the SQL caller a set of tuples, each containing the location
200 * and a description of a corruption found in the heap.
201 *
202 * This code goes to some trouble to avoid crashing the server even if the
203 * table pages are badly corrupted, but it's probably not perfect. If
204 * check_toast is true, we'll use regular index lookups to try to fetch TOAST
205 * tuples, which can certainly cause crashes if the right kind of corruption
206 * exists in the toast table or index. No matter what parameters you pass,
207 * we can't protect against crashes that might occur trying to look up the
208 * commit status of transaction IDs (though we avoid trying to do such lookups
209 * for transaction IDs that can't legally appear in the table).
210 */
211 Datum
verify_heapam(PG_FUNCTION_ARGS)212 verify_heapam(PG_FUNCTION_ARGS)
213 {
214 ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
215 MemoryContext old_context;
216 bool random_access;
217 HeapCheckContext ctx;
218 Buffer vmbuffer = InvalidBuffer;
219 Oid relid;
220 bool on_error_stop;
221 bool check_toast;
222 SkipPages skip_option = SKIP_PAGES_NONE;
223 BlockNumber first_block;
224 BlockNumber last_block;
225 BlockNumber nblocks;
226 const char *skip;
227
228 /* Check to see if caller supports us returning a tuplestore */
229 if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
230 ereport(ERROR,
231 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
232 errmsg("set-valued function called in context that cannot accept a set")));
233 if (!(rsinfo->allowedModes & SFRM_Materialize))
234 ereport(ERROR,
235 (errcode(ERRCODE_SYNTAX_ERROR),
236 errmsg("materialize mode required, but it is not allowed in this context")));
237
238 /* Check supplied arguments */
239 if (PG_ARGISNULL(0))
240 ereport(ERROR,
241 (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
242 errmsg("relation cannot be null")));
243 relid = PG_GETARG_OID(0);
244
245 if (PG_ARGISNULL(1))
246 ereport(ERROR,
247 (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
248 errmsg("on_error_stop cannot be null")));
249 on_error_stop = PG_GETARG_BOOL(1);
250
251 if (PG_ARGISNULL(2))
252 ereport(ERROR,
253 (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
254 errmsg("check_toast cannot be null")));
255 check_toast = PG_GETARG_BOOL(2);
256
257 if (PG_ARGISNULL(3))
258 ereport(ERROR,
259 (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
260 errmsg("skip cannot be null")));
261 skip = text_to_cstring(PG_GETARG_TEXT_PP(3));
262 if (pg_strcasecmp(skip, "all-visible") == 0)
263 skip_option = SKIP_PAGES_ALL_VISIBLE;
264 else if (pg_strcasecmp(skip, "all-frozen") == 0)
265 skip_option = SKIP_PAGES_ALL_FROZEN;
266 else if (pg_strcasecmp(skip, "none") == 0)
267 skip_option = SKIP_PAGES_NONE;
268 else
269 ereport(ERROR,
270 (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
271 errmsg("invalid skip option"),
272 errhint("Valid skip options are \"all-visible\", \"all-frozen\", and \"none\".")));
273
274 memset(&ctx, 0, sizeof(HeapCheckContext));
275 ctx.cached_xid = InvalidTransactionId;
276 ctx.toasted_attributes = NIL;
277
278 /*
279 * Any xmin newer than the xmin of our snapshot can't become all-visible
280 * while we're running.
281 */
282 ctx.safe_xmin = GetTransactionSnapshot()->xmin;
283
284 /*
285 * If we report corruption when not examining some individual attribute,
286 * we need attnum to be reported as NULL. Set that up before any
287 * corruption reporting might happen.
288 */
289 ctx.attnum = -1;
290
291 /* The tupdesc and tuplestore must be created in ecxt_per_query_memory */
292 old_context = MemoryContextSwitchTo(rsinfo->econtext->ecxt_per_query_memory);
293 random_access = (rsinfo->allowedModes & SFRM_Materialize_Random) != 0;
294 ctx.tupdesc = verify_heapam_tupdesc();
295 ctx.tupstore = tuplestore_begin_heap(random_access, false, work_mem);
296 rsinfo->returnMode = SFRM_Materialize;
297 rsinfo->setResult = ctx.tupstore;
298 rsinfo->setDesc = ctx.tupdesc;
299 MemoryContextSwitchTo(old_context);
300
301 /* Open relation, check relkind and access method */
302 ctx.rel = relation_open(relid, AccessShareLock);
303 sanity_check_relation(ctx.rel);
304
305 /*
306 * Early exit for unlogged relations during recovery. These will have no
307 * relation fork, so there won't be anything to check. We behave as if
308 * the relation is empty.
309 */
310 if (ctx.rel->rd_rel->relpersistence == RELPERSISTENCE_UNLOGGED &&
311 RecoveryInProgress())
312 {
313 ereport(DEBUG1,
314 (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
315 errmsg("cannot verify unlogged relation \"%s\" during recovery, skipping",
316 RelationGetRelationName(ctx.rel))));
317 relation_close(ctx.rel, AccessShareLock);
318 PG_RETURN_NULL();
319 }
320
321 /* Early exit if the relation is empty */
322 nblocks = RelationGetNumberOfBlocks(ctx.rel);
323 if (!nblocks)
324 {
325 relation_close(ctx.rel, AccessShareLock);
326 PG_RETURN_NULL();
327 }
328
329 ctx.bstrategy = GetAccessStrategy(BAS_BULKREAD);
330 ctx.buffer = InvalidBuffer;
331 ctx.page = NULL;
332
333 /* Validate block numbers, or handle nulls. */
334 if (PG_ARGISNULL(4))
335 first_block = 0;
336 else
337 {
338 int64 fb = PG_GETARG_INT64(4);
339
340 if (fb < 0 || fb >= nblocks)
341 ereport(ERROR,
342 (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
343 errmsg("starting block number must be between 0 and %u",
344 nblocks - 1)));
345 first_block = (BlockNumber) fb;
346 }
347 if (PG_ARGISNULL(5))
348 last_block = nblocks - 1;
349 else
350 {
351 int64 lb = PG_GETARG_INT64(5);
352
353 if (lb < 0 || lb >= nblocks)
354 ereport(ERROR,
355 (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
356 errmsg("ending block number must be between 0 and %u",
357 nblocks - 1)));
358 last_block = (BlockNumber) lb;
359 }
360
361 /* Optionally open the toast relation, if any. */
362 if (ctx.rel->rd_rel->reltoastrelid && check_toast)
363 {
364 int offset;
365
366 /* Main relation has associated toast relation */
367 ctx.toast_rel = table_open(ctx.rel->rd_rel->reltoastrelid,
368 AccessShareLock);
369 offset = toast_open_indexes(ctx.toast_rel,
370 AccessShareLock,
371 &(ctx.toast_indexes),
372 &(ctx.num_toast_indexes));
373 ctx.valid_toast_index = ctx.toast_indexes[offset];
374 }
375 else
376 {
377 /*
378 * Main relation has no associated toast relation, or we're
379 * intentionally skipping it.
380 */
381 ctx.toast_rel = NULL;
382 ctx.toast_indexes = NULL;
383 ctx.num_toast_indexes = 0;
384 }
385
386 update_cached_xid_range(&ctx);
387 update_cached_mxid_range(&ctx);
388 ctx.relfrozenxid = ctx.rel->rd_rel->relfrozenxid;
389 ctx.relfrozenfxid = FullTransactionIdFromXidAndCtx(ctx.relfrozenxid, &ctx);
390 ctx.relminmxid = ctx.rel->rd_rel->relminmxid;
391
392 if (TransactionIdIsNormal(ctx.relfrozenxid))
393 ctx.oldest_xid = ctx.relfrozenxid;
394
395 for (ctx.blkno = first_block; ctx.blkno <= last_block; ctx.blkno++)
396 {
397 OffsetNumber maxoff;
398
399 CHECK_FOR_INTERRUPTS();
400
401 /* Optionally skip over all-frozen or all-visible blocks */
402 if (skip_option != SKIP_PAGES_NONE)
403 {
404 int32 mapbits;
405
406 mapbits = (int32) visibilitymap_get_status(ctx.rel, ctx.blkno,
407 &vmbuffer);
408 if (skip_option == SKIP_PAGES_ALL_FROZEN)
409 {
410 if ((mapbits & VISIBILITYMAP_ALL_FROZEN) != 0)
411 continue;
412 }
413
414 if (skip_option == SKIP_PAGES_ALL_VISIBLE)
415 {
416 if ((mapbits & VISIBILITYMAP_ALL_VISIBLE) != 0)
417 continue;
418 }
419 }
420
421 /* Read and lock the next page. */
422 ctx.buffer = ReadBufferExtended(ctx.rel, MAIN_FORKNUM, ctx.blkno,
423 RBM_NORMAL, ctx.bstrategy);
424 LockBuffer(ctx.buffer, BUFFER_LOCK_SHARE);
425 ctx.page = BufferGetPage(ctx.buffer);
426
427 /* Perform tuple checks */
428 maxoff = PageGetMaxOffsetNumber(ctx.page);
429 for (ctx.offnum = FirstOffsetNumber; ctx.offnum <= maxoff;
430 ctx.offnum = OffsetNumberNext(ctx.offnum))
431 {
432 ctx.itemid = PageGetItemId(ctx.page, ctx.offnum);
433
434 /* Skip over unused/dead line pointers */
435 if (!ItemIdIsUsed(ctx.itemid) || ItemIdIsDead(ctx.itemid))
436 continue;
437
438 /*
439 * If this line pointer has been redirected, check that it
440 * redirects to a valid offset within the line pointer array
441 */
442 if (ItemIdIsRedirected(ctx.itemid))
443 {
444 OffsetNumber rdoffnum = ItemIdGetRedirect(ctx.itemid);
445 ItemId rditem;
446
447 if (rdoffnum < FirstOffsetNumber)
448 {
449 report_corruption(&ctx,
450 psprintf("line pointer redirection to item at offset %u precedes minimum offset %u",
451 (unsigned) rdoffnum,
452 (unsigned) FirstOffsetNumber));
453 continue;
454 }
455 if (rdoffnum > maxoff)
456 {
457 report_corruption(&ctx,
458 psprintf("line pointer redirection to item at offset %u exceeds maximum offset %u",
459 (unsigned) rdoffnum,
460 (unsigned) maxoff));
461 continue;
462 }
463 rditem = PageGetItemId(ctx.page, rdoffnum);
464 if (!ItemIdIsUsed(rditem))
465 report_corruption(&ctx,
466 psprintf("line pointer redirection to unused item at offset %u",
467 (unsigned) rdoffnum));
468 continue;
469 }
470
471 /* Sanity-check the line pointer's offset and length values */
472 ctx.lp_len = ItemIdGetLength(ctx.itemid);
473 ctx.lp_off = ItemIdGetOffset(ctx.itemid);
474
475 if (ctx.lp_off != MAXALIGN(ctx.lp_off))
476 {
477 report_corruption(&ctx,
478 psprintf("line pointer to page offset %u is not maximally aligned",
479 ctx.lp_off));
480 continue;
481 }
482 if (ctx.lp_len < MAXALIGN(SizeofHeapTupleHeader))
483 {
484 report_corruption(&ctx,
485 psprintf("line pointer length %u is less than the minimum tuple header size %u",
486 ctx.lp_len,
487 (unsigned) MAXALIGN(SizeofHeapTupleHeader)));
488 continue;
489 }
490 if (ctx.lp_off + ctx.lp_len > BLCKSZ)
491 {
492 report_corruption(&ctx,
493 psprintf("line pointer to page offset %u with length %u ends beyond maximum page offset %u",
494 ctx.lp_off,
495 ctx.lp_len,
496 (unsigned) BLCKSZ));
497 continue;
498 }
499
500 /* It should be safe to examine the tuple's header, at least */
501 ctx.tuphdr = (HeapTupleHeader) PageGetItem(ctx.page, ctx.itemid);
502 ctx.natts = HeapTupleHeaderGetNatts(ctx.tuphdr);
503
504 /* Ok, ready to check this next tuple */
505 check_tuple(&ctx);
506 }
507
508 /* clean up */
509 UnlockReleaseBuffer(ctx.buffer);
510
511 /*
512 * Check any toast pointers from the page whose lock we just released
513 */
514 if (ctx.toasted_attributes != NIL)
515 {
516 ListCell *cell;
517
518 foreach(cell, ctx.toasted_attributes)
519 check_toasted_attribute(&ctx, lfirst(cell));
520 list_free_deep(ctx.toasted_attributes);
521 ctx.toasted_attributes = NIL;
522 }
523
524 if (on_error_stop && ctx.is_corrupt)
525 break;
526 }
527
528 if (vmbuffer != InvalidBuffer)
529 ReleaseBuffer(vmbuffer);
530
531 /* Close the associated toast table and indexes, if any. */
532 if (ctx.toast_indexes)
533 toast_close_indexes(ctx.toast_indexes, ctx.num_toast_indexes,
534 AccessShareLock);
535 if (ctx.toast_rel)
536 table_close(ctx.toast_rel, AccessShareLock);
537
538 /* Close the main relation */
539 relation_close(ctx.rel, AccessShareLock);
540
541 PG_RETURN_NULL();
542 }
543
544 /*
545 * Check that a relation's relkind and access method are both supported.
546 */
547 static void
sanity_check_relation(Relation rel)548 sanity_check_relation(Relation rel)
549 {
550 if (rel->rd_rel->relkind != RELKIND_RELATION &&
551 rel->rd_rel->relkind != RELKIND_MATVIEW &&
552 rel->rd_rel->relkind != RELKIND_TOASTVALUE)
553 ereport(ERROR,
554 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
555 errmsg("\"%s\" is not a table, materialized view, or TOAST table",
556 RelationGetRelationName(rel))));
557 if (rel->rd_rel->relam != HEAP_TABLE_AM_OID)
558 ereport(ERROR,
559 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
560 errmsg("only heap AM is supported")));
561 }
562
563 /*
564 * Shared internal implementation for report_corruption and
565 * report_toast_corruption.
566 */
567 static void
report_corruption_internal(Tuplestorestate * tupstore,TupleDesc tupdesc,BlockNumber blkno,OffsetNumber offnum,AttrNumber attnum,char * msg)568 report_corruption_internal(Tuplestorestate *tupstore, TupleDesc tupdesc,
569 BlockNumber blkno, OffsetNumber offnum,
570 AttrNumber attnum, char *msg)
571 {
572 Datum values[HEAPCHECK_RELATION_COLS];
573 bool nulls[HEAPCHECK_RELATION_COLS];
574 HeapTuple tuple;
575
576 MemSet(values, 0, sizeof(values));
577 MemSet(nulls, 0, sizeof(nulls));
578 values[0] = Int64GetDatum(blkno);
579 values[1] = Int32GetDatum(offnum);
580 values[2] = Int32GetDatum(attnum);
581 nulls[2] = (attnum < 0);
582 values[3] = CStringGetTextDatum(msg);
583
584 /*
585 * In principle, there is nothing to prevent a scan over a large, highly
586 * corrupted table from using work_mem worth of memory building up the
587 * tuplestore. That's ok, but if we also leak the msg argument memory
588 * until the end of the query, we could exceed work_mem by more than a
589 * trivial amount. Therefore, free the msg argument each time we are
590 * called rather than waiting for our current memory context to be freed.
591 */
592 pfree(msg);
593
594 tuple = heap_form_tuple(tupdesc, values, nulls);
595 tuplestore_puttuple(tupstore, tuple);
596 }
597
598 /*
599 * Record a single corruption found in the main table. The values in ctx should
600 * indicate the location of the corruption, and the msg argument should contain
601 * a human-readable description of the corruption.
602 *
603 * The msg argument is pfree'd by this function.
604 */
605 static void
report_corruption(HeapCheckContext * ctx,char * msg)606 report_corruption(HeapCheckContext *ctx, char *msg)
607 {
608 report_corruption_internal(ctx->tupstore, ctx->tupdesc, ctx->blkno,
609 ctx->offnum, ctx->attnum, msg);
610 ctx->is_corrupt = true;
611 }
612
613 /*
614 * Record corruption found in the toast table. The values in ta should
615 * indicate the location in the main table where the toast pointer was
616 * encountered, and the msg argument should contain a human-readable
617 * description of the toast table corruption.
618 *
619 * As above, the msg argument is pfree'd by this function.
620 */
621 static void
report_toast_corruption(HeapCheckContext * ctx,ToastedAttribute * ta,char * msg)622 report_toast_corruption(HeapCheckContext *ctx, ToastedAttribute *ta,
623 char *msg)
624 {
625 report_corruption_internal(ctx->tupstore, ctx->tupdesc, ta->blkno,
626 ta->offnum, ta->attnum, msg);
627 ctx->is_corrupt = true;
628 }
629
630 /*
631 * Construct the TupleDesc used to report messages about corruptions found
632 * while scanning the heap.
633 */
634 static TupleDesc
verify_heapam_tupdesc(void)635 verify_heapam_tupdesc(void)
636 {
637 TupleDesc tupdesc;
638 AttrNumber a = 0;
639
640 tupdesc = CreateTemplateTupleDesc(HEAPCHECK_RELATION_COLS);
641 TupleDescInitEntry(tupdesc, ++a, "blkno", INT8OID, -1, 0);
642 TupleDescInitEntry(tupdesc, ++a, "offnum", INT4OID, -1, 0);
643 TupleDescInitEntry(tupdesc, ++a, "attnum", INT4OID, -1, 0);
644 TupleDescInitEntry(tupdesc, ++a, "msg", TEXTOID, -1, 0);
645 Assert(a == HEAPCHECK_RELATION_COLS);
646
647 return BlessTupleDesc(tupdesc);
648 }
649
650 /*
651 * Check for tuple header corruption.
652 *
653 * Some kinds of corruption make it unsafe to check the tuple attributes, for
654 * example when the line pointer refers to a range of bytes outside the page.
655 * In such cases, we return false (not checkable) after recording appropriate
656 * corruption messages.
657 *
658 * Some other kinds of tuple header corruption confuse the question of where
659 * the tuple attributes begin, or how long the nulls bitmap is, etc., making it
660 * unreasonable to attempt to check attributes, even if all candidate answers
661 * to those questions would not result in reading past the end of the line
662 * pointer or page. In such cases, like above, we record corruption messages
663 * about the header and then return false.
664 *
665 * Other kinds of tuple header corruption do not bear on the question of
666 * whether the tuple attributes can be checked, so we record corruption
667 * messages for them but we do not return false merely because we detected
668 * them.
669 *
670 * Returns whether the tuple is sufficiently sensible to undergo visibility and
671 * attribute checks.
672 */
673 static bool
check_tuple_header(HeapCheckContext * ctx)674 check_tuple_header(HeapCheckContext *ctx)
675 {
676 HeapTupleHeader tuphdr = ctx->tuphdr;
677 uint16 infomask = tuphdr->t_infomask;
678 bool result = true;
679 unsigned expected_hoff;
680
681 if (ctx->tuphdr->t_hoff > ctx->lp_len)
682 {
683 report_corruption(ctx,
684 psprintf("data begins at offset %u beyond the tuple length %u",
685 ctx->tuphdr->t_hoff, ctx->lp_len));
686 result = false;
687 }
688
689 if ((ctx->tuphdr->t_infomask & HEAP_XMAX_COMMITTED) &&
690 (ctx->tuphdr->t_infomask & HEAP_XMAX_IS_MULTI))
691 {
692 report_corruption(ctx,
693 pstrdup("multixact should not be marked committed"));
694
695 /*
696 * This condition is clearly wrong, but it's not enough to justify
697 * skipping further checks, because we don't rely on this to determine
698 * whether the tuple is visible or to interpret other relevant header
699 * fields.
700 */
701 }
702
703 if (infomask & HEAP_HASNULL)
704 expected_hoff = MAXALIGN(SizeofHeapTupleHeader + BITMAPLEN(ctx->natts));
705 else
706 expected_hoff = MAXALIGN(SizeofHeapTupleHeader);
707 if (ctx->tuphdr->t_hoff != expected_hoff)
708 {
709 if ((infomask & HEAP_HASNULL) && ctx->natts == 1)
710 report_corruption(ctx,
711 psprintf("tuple data should begin at byte %u, but actually begins at byte %u (1 attribute, has nulls)",
712 expected_hoff, ctx->tuphdr->t_hoff));
713 else if ((infomask & HEAP_HASNULL))
714 report_corruption(ctx,
715 psprintf("tuple data should begin at byte %u, but actually begins at byte %u (%u attributes, has nulls)",
716 expected_hoff, ctx->tuphdr->t_hoff, ctx->natts));
717 else if (ctx->natts == 1)
718 report_corruption(ctx,
719 psprintf("tuple data should begin at byte %u, but actually begins at byte %u (1 attribute, no nulls)",
720 expected_hoff, ctx->tuphdr->t_hoff));
721 else
722 report_corruption(ctx,
723 psprintf("tuple data should begin at byte %u, but actually begins at byte %u (%u attributes, no nulls)",
724 expected_hoff, ctx->tuphdr->t_hoff, ctx->natts));
725 result = false;
726 }
727
728 return result;
729 }
730
731 /*
732 * Checks tuple visibility so we know which further checks are safe to
733 * perform.
734 *
735 * If a tuple could have been inserted by a transaction that also added a
736 * column to the table, but which ultimately did not commit, or which has not
737 * yet committed, then the table's current TupleDesc might differ from the one
738 * used to construct this tuple, so we must not check it.
739 *
740 * As a special case, if our own transaction inserted the tuple, even if we
741 * added a column to the table, our TupleDesc should match. We could check the
742 * tuple, but choose not to do so.
743 *
744 * If a tuple has been updated or deleted, we can still read the old tuple for
745 * corruption checking purposes, as long as we are careful about concurrent
746 * vacuums. The main table tuple itself cannot be vacuumed away because we
747 * hold a buffer lock on the page, but if the deleting transaction is older
748 * than our transaction snapshot's xmin, then vacuum could remove the toast at
749 * any time, so we must not try to follow TOAST pointers.
750 *
751 * If xmin or xmax values are older than can be checked against clog, or appear
752 * to be in the future (possibly due to wrap-around), then we cannot make a
753 * determination about the visibility of the tuple, so we skip further checks.
754 *
755 * Returns true if the tuple itself should be checked, false otherwise. Sets
756 * ctx->tuple_could_be_pruned if the tuple -- and thus also any associated
757 * TOAST tuples -- are eligible for pruning.
758 */
759 static bool
check_tuple_visibility(HeapCheckContext * ctx)760 check_tuple_visibility(HeapCheckContext *ctx)
761 {
762 TransactionId xmin;
763 TransactionId xvac;
764 TransactionId xmax;
765 XidCommitStatus xmin_status;
766 XidCommitStatus xvac_status;
767 XidCommitStatus xmax_status;
768 HeapTupleHeader tuphdr = ctx->tuphdr;
769
770 ctx->tuple_could_be_pruned = true; /* have not yet proven otherwise */
771
772 /* If xmin is normal, it should be within valid range */
773 xmin = HeapTupleHeaderGetXmin(tuphdr);
774 switch (get_xid_status(xmin, ctx, &xmin_status))
775 {
776 case XID_INVALID:
777 case XID_BOUNDS_OK:
778 break;
779 case XID_IN_FUTURE:
780 report_corruption(ctx,
781 psprintf("xmin %u equals or exceeds next valid transaction ID %u:%u",
782 xmin,
783 EpochFromFullTransactionId(ctx->next_fxid),
784 XidFromFullTransactionId(ctx->next_fxid)));
785 return false;
786 case XID_PRECEDES_CLUSTERMIN:
787 report_corruption(ctx,
788 psprintf("xmin %u precedes oldest valid transaction ID %u:%u",
789 xmin,
790 EpochFromFullTransactionId(ctx->oldest_fxid),
791 XidFromFullTransactionId(ctx->oldest_fxid)));
792 return false;
793 case XID_PRECEDES_RELMIN:
794 report_corruption(ctx,
795 psprintf("xmin %u precedes relation freeze threshold %u:%u",
796 xmin,
797 EpochFromFullTransactionId(ctx->relfrozenfxid),
798 XidFromFullTransactionId(ctx->relfrozenfxid)));
799 return false;
800 }
801
802 /*
803 * Has inserting transaction committed?
804 */
805 if (!HeapTupleHeaderXminCommitted(tuphdr))
806 {
807 if (HeapTupleHeaderXminInvalid(tuphdr))
808 return false; /* inserter aborted, don't check */
809 /* Used by pre-9.0 binary upgrades */
810 else if (tuphdr->t_infomask & HEAP_MOVED_OFF)
811 {
812 xvac = HeapTupleHeaderGetXvac(tuphdr);
813
814 switch (get_xid_status(xvac, ctx, &xvac_status))
815 {
816 case XID_INVALID:
817 report_corruption(ctx,
818 pstrdup("old-style VACUUM FULL transaction ID for moved off tuple is invalid"));
819 return false;
820 case XID_IN_FUTURE:
821 report_corruption(ctx,
822 psprintf("old-style VACUUM FULL transaction ID %u for moved off tuple equals or exceeds next valid transaction ID %u:%u",
823 xvac,
824 EpochFromFullTransactionId(ctx->next_fxid),
825 XidFromFullTransactionId(ctx->next_fxid)));
826 return false;
827 case XID_PRECEDES_RELMIN:
828 report_corruption(ctx,
829 psprintf("old-style VACUUM FULL transaction ID %u for moved off tuple precedes relation freeze threshold %u:%u",
830 xvac,
831 EpochFromFullTransactionId(ctx->relfrozenfxid),
832 XidFromFullTransactionId(ctx->relfrozenfxid)));
833 return false;
834 case XID_PRECEDES_CLUSTERMIN:
835 report_corruption(ctx,
836 psprintf("old-style VACUUM FULL transaction ID %u for moved off tuple precedes oldest valid transaction ID %u:%u",
837 xvac,
838 EpochFromFullTransactionId(ctx->oldest_fxid),
839 XidFromFullTransactionId(ctx->oldest_fxid)));
840 return false;
841 case XID_BOUNDS_OK:
842 break;
843 }
844
845 switch (xvac_status)
846 {
847 case XID_IS_CURRENT_XID:
848 report_corruption(ctx,
849 psprintf("old-style VACUUM FULL transaction ID %u for moved off tuple matches our current transaction ID",
850 xvac));
851 return false;
852 case XID_IN_PROGRESS:
853 report_corruption(ctx,
854 psprintf("old-style VACUUM FULL transaction ID %u for moved off tuple appears to be in progress",
855 xvac));
856 return false;
857
858 case XID_COMMITTED:
859
860 /*
861 * The tuple is dead, because the xvac transaction moved
862 * it off and committed. It's checkable, but also
863 * prunable.
864 */
865 return true;
866
867 case XID_ABORTED:
868
869 /*
870 * The original xmin must have committed, because the xvac
871 * transaction tried to move it later. Since xvac is
872 * aborted, whether it's still alive now depends on the
873 * status of xmax.
874 */
875 break;
876 }
877 }
878 /* Used by pre-9.0 binary upgrades */
879 else if (tuphdr->t_infomask & HEAP_MOVED_IN)
880 {
881 xvac = HeapTupleHeaderGetXvac(tuphdr);
882
883 switch (get_xid_status(xvac, ctx, &xvac_status))
884 {
885 case XID_INVALID:
886 report_corruption(ctx,
887 pstrdup("old-style VACUUM FULL transaction ID for moved in tuple is invalid"));
888 return false;
889 case XID_IN_FUTURE:
890 report_corruption(ctx,
891 psprintf("old-style VACUUM FULL transaction ID %u for moved in tuple equals or exceeds next valid transaction ID %u:%u",
892 xvac,
893 EpochFromFullTransactionId(ctx->next_fxid),
894 XidFromFullTransactionId(ctx->next_fxid)));
895 return false;
896 case XID_PRECEDES_RELMIN:
897 report_corruption(ctx,
898 psprintf("old-style VACUUM FULL transaction ID %u for moved in tuple precedes relation freeze threshold %u:%u",
899 xvac,
900 EpochFromFullTransactionId(ctx->relfrozenfxid),
901 XidFromFullTransactionId(ctx->relfrozenfxid)));
902 return false;
903 case XID_PRECEDES_CLUSTERMIN:
904 report_corruption(ctx,
905 psprintf("old-style VACUUM FULL transaction ID %u for moved in tuple precedes oldest valid transaction ID %u:%u",
906 xvac,
907 EpochFromFullTransactionId(ctx->oldest_fxid),
908 XidFromFullTransactionId(ctx->oldest_fxid)));
909 return false;
910 case XID_BOUNDS_OK:
911 break;
912 }
913
914 switch (xvac_status)
915 {
916 case XID_IS_CURRENT_XID:
917 report_corruption(ctx,
918 psprintf("old-style VACUUM FULL transaction ID %u for moved in tuple matches our current transaction ID",
919 xvac));
920 return false;
921 case XID_IN_PROGRESS:
922 report_corruption(ctx,
923 psprintf("old-style VACUUM FULL transaction ID %u for moved in tuple appears to be in progress",
924 xvac));
925 return false;
926
927 case XID_COMMITTED:
928
929 /*
930 * The original xmin must have committed, because the xvac
931 * transaction moved it later. Whether it's still alive
932 * now depends on the status of xmax.
933 */
934 break;
935
936 case XID_ABORTED:
937
938 /*
939 * The tuple is dead, because the xvac transaction moved
940 * it off and committed. It's checkable, but also
941 * prunable.
942 */
943 return true;
944 }
945 }
946 else if (xmin_status != XID_COMMITTED)
947 {
948 /*
949 * Inserting transaction is not in progress, and not committed, so
950 * it might have changed the TupleDesc in ways we don't know
951 * about. Thus, don't try to check the tuple structure.
952 *
953 * If xmin_status happens to be XID_IS_CURRENT_XID, then in theory
954 * any such DDL changes ought to be visible to us, so perhaps we
955 * could check anyway in that case. But, for now, let's be
956 * conservative and treat this like any other uncommitted insert.
957 */
958 return false;
959 }
960 }
961
962 /*
963 * Okay, the inserter committed, so it was good at some point. Now what
964 * about the deleting transaction?
965 */
966
967 if (tuphdr->t_infomask & HEAP_XMAX_IS_MULTI)
968 {
969 /*
970 * xmax is a multixact, so sanity-check the MXID. Note that we do this
971 * prior to checking for HEAP_XMAX_INVALID or
972 * HEAP_XMAX_IS_LOCKED_ONLY. This might therefore complain about
973 * things that wouldn't actually be a problem during a normal scan,
974 * but eventually we're going to have to freeze, and that process will
975 * ignore hint bits.
976 *
977 * Even if the MXID is out of range, we still know that the original
978 * insert committed, so we can check the tuple itself. However, we
979 * can't rule out the possibility that this tuple is dead, so don't
980 * clear ctx->tuple_could_be_pruned. Possibly we should go ahead and
981 * clear that flag anyway if HEAP_XMAX_INVALID is set or if
982 * HEAP_XMAX_IS_LOCKED_ONLY is true, but for now we err on the side of
983 * avoiding possibly-bogus complaints about missing TOAST entries.
984 */
985 xmax = HeapTupleHeaderGetRawXmax(tuphdr);
986 switch (check_mxid_valid_in_rel(xmax, ctx))
987 {
988 case XID_INVALID:
989 report_corruption(ctx,
990 pstrdup("multitransaction ID is invalid"));
991 return true;
992 case XID_PRECEDES_RELMIN:
993 report_corruption(ctx,
994 psprintf("multitransaction ID %u precedes relation minimum multitransaction ID threshold %u",
995 xmax, ctx->relminmxid));
996 return true;
997 case XID_PRECEDES_CLUSTERMIN:
998 report_corruption(ctx,
999 psprintf("multitransaction ID %u precedes oldest valid multitransaction ID threshold %u",
1000 xmax, ctx->oldest_mxact));
1001 return true;
1002 case XID_IN_FUTURE:
1003 report_corruption(ctx,
1004 psprintf("multitransaction ID %u equals or exceeds next valid multitransaction ID %u",
1005 xmax,
1006 ctx->next_mxact));
1007 return true;
1008 case XID_BOUNDS_OK:
1009 break;
1010 }
1011 }
1012
1013 if (tuphdr->t_infomask & HEAP_XMAX_INVALID)
1014 {
1015 /*
1016 * This tuple is live. A concurrently running transaction could
1017 * delete it before we get around to checking the toast, but any such
1018 * running transaction is surely not less than our safe_xmin, so the
1019 * toast cannot be vacuumed out from under us.
1020 */
1021 ctx->tuple_could_be_pruned = false;
1022 return true;
1023 }
1024
1025 if (HEAP_XMAX_IS_LOCKED_ONLY(tuphdr->t_infomask))
1026 {
1027 /*
1028 * "Deleting" xact really only locked it, so the tuple is live in any
1029 * case. As above, a concurrently running transaction could delete
1030 * it, but it cannot be vacuumed out from under us.
1031 */
1032 ctx->tuple_could_be_pruned = false;
1033 return true;
1034 }
1035
1036 if (tuphdr->t_infomask & HEAP_XMAX_IS_MULTI)
1037 {
1038 /*
1039 * We already checked above that this multixact is within limits for
1040 * this table. Now check the update xid from this multixact.
1041 */
1042 xmax = HeapTupleGetUpdateXid(tuphdr);
1043 switch (get_xid_status(xmax, ctx, &xmax_status))
1044 {
1045 case XID_INVALID:
1046 /* not LOCKED_ONLY, so it has to have an xmax */
1047 report_corruption(ctx,
1048 pstrdup("update xid is invalid"));
1049 return true;
1050 case XID_IN_FUTURE:
1051 report_corruption(ctx,
1052 psprintf("update xid %u equals or exceeds next valid transaction ID %u:%u",
1053 xmax,
1054 EpochFromFullTransactionId(ctx->next_fxid),
1055 XidFromFullTransactionId(ctx->next_fxid)));
1056 return true;
1057 case XID_PRECEDES_RELMIN:
1058 report_corruption(ctx,
1059 psprintf("update xid %u precedes relation freeze threshold %u:%u",
1060 xmax,
1061 EpochFromFullTransactionId(ctx->relfrozenfxid),
1062 XidFromFullTransactionId(ctx->relfrozenfxid)));
1063 return true;
1064 case XID_PRECEDES_CLUSTERMIN:
1065 report_corruption(ctx,
1066 psprintf("update xid %u precedes oldest valid transaction ID %u:%u",
1067 xmax,
1068 EpochFromFullTransactionId(ctx->oldest_fxid),
1069 XidFromFullTransactionId(ctx->oldest_fxid)));
1070 return true;
1071 case XID_BOUNDS_OK:
1072 break;
1073 }
1074
1075 switch (xmax_status)
1076 {
1077 case XID_IS_CURRENT_XID:
1078 case XID_IN_PROGRESS:
1079
1080 /*
1081 * The delete is in progress, so it cannot be visible to our
1082 * snapshot.
1083 */
1084 ctx->tuple_could_be_pruned = false;
1085 break;
1086 case XID_COMMITTED:
1087
1088 /*
1089 * The delete committed. Whether the toast can be vacuumed
1090 * away depends on how old the deleting transaction is.
1091 */
1092 ctx->tuple_could_be_pruned = TransactionIdPrecedes(xmax,
1093 ctx->safe_xmin);
1094 break;
1095 case XID_ABORTED:
1096
1097 /*
1098 * The delete aborted or crashed. The tuple is still live.
1099 */
1100 ctx->tuple_could_be_pruned = false;
1101 break;
1102 }
1103
1104 /* Tuple itself is checkable even if it's dead. */
1105 return true;
1106 }
1107
1108 /* xmax is an XID, not a MXID. Sanity check it. */
1109 xmax = HeapTupleHeaderGetRawXmax(tuphdr);
1110 switch (get_xid_status(xmax, ctx, &xmax_status))
1111 {
1112 case XID_IN_FUTURE:
1113 report_corruption(ctx,
1114 psprintf("xmax %u equals or exceeds next valid transaction ID %u:%u",
1115 xmax,
1116 EpochFromFullTransactionId(ctx->next_fxid),
1117 XidFromFullTransactionId(ctx->next_fxid)));
1118 return false; /* corrupt */
1119 case XID_PRECEDES_RELMIN:
1120 report_corruption(ctx,
1121 psprintf("xmax %u precedes relation freeze threshold %u:%u",
1122 xmax,
1123 EpochFromFullTransactionId(ctx->relfrozenfxid),
1124 XidFromFullTransactionId(ctx->relfrozenfxid)));
1125 return false; /* corrupt */
1126 case XID_PRECEDES_CLUSTERMIN:
1127 report_corruption(ctx,
1128 psprintf("xmax %u precedes oldest valid transaction ID %u:%u",
1129 xmax,
1130 EpochFromFullTransactionId(ctx->oldest_fxid),
1131 XidFromFullTransactionId(ctx->oldest_fxid)));
1132 return false; /* corrupt */
1133 case XID_BOUNDS_OK:
1134 case XID_INVALID:
1135 break;
1136 }
1137
1138 /*
1139 * Whether the toast can be vacuumed away depends on how old the deleting
1140 * transaction is.
1141 */
1142 switch (xmax_status)
1143 {
1144 case XID_IS_CURRENT_XID:
1145 case XID_IN_PROGRESS:
1146
1147 /*
1148 * The delete is in progress, so it cannot be visible to our
1149 * snapshot.
1150 */
1151 ctx->tuple_could_be_pruned = false;
1152 break;
1153
1154 case XID_COMMITTED:
1155
1156 /*
1157 * The delete committed. Whether the toast can be vacuumed away
1158 * depends on how old the deleting transaction is.
1159 */
1160 ctx->tuple_could_be_pruned = TransactionIdPrecedes(xmax,
1161 ctx->safe_xmin);
1162 break;
1163
1164 case XID_ABORTED:
1165
1166 /*
1167 * The delete aborted or crashed. The tuple is still live.
1168 */
1169 ctx->tuple_could_be_pruned = false;
1170 break;
1171 }
1172
1173 /* Tuple itself is checkable even if it's dead. */
1174 return true;
1175 }
1176
1177
1178 /*
1179 * Check the current toast tuple against the state tracked in ctx, recording
1180 * any corruption found in ctx->tupstore.
1181 *
1182 * This is not equivalent to running verify_heapam on the toast table itself,
1183 * and is not hardened against corruption of the toast table. Rather, when
1184 * validating a toasted attribute in the main table, the sequence of toast
1185 * tuples that store the toasted value are retrieved and checked in order, with
1186 * each toast tuple being checked against where we are in the sequence, as well
1187 * as each toast tuple having its varlena structure sanity checked.
1188 *
1189 * On entry, *expected_chunk_seq should be the chunk_seq value that we expect
1190 * to find in toasttup. On exit, it will be updated to the value the next call
1191 * to this function should expect to see.
1192 */
1193 static void
check_toast_tuple(HeapTuple toasttup,HeapCheckContext * ctx,ToastedAttribute * ta,int32 * expected_chunk_seq,uint32 extsize)1194 check_toast_tuple(HeapTuple toasttup, HeapCheckContext *ctx,
1195 ToastedAttribute *ta, int32 *expected_chunk_seq,
1196 uint32 extsize)
1197 {
1198 int32 chunk_seq;
1199 int32 last_chunk_seq = (extsize - 1) / TOAST_MAX_CHUNK_SIZE;
1200 Pointer chunk;
1201 bool isnull;
1202 int32 chunksize;
1203 int32 expected_size;
1204
1205 /* Sanity-check the sequence number. */
1206 chunk_seq = DatumGetInt32(fastgetattr(toasttup, 2,
1207 ctx->toast_rel->rd_att, &isnull));
1208 if (isnull)
1209 {
1210 report_toast_corruption(ctx, ta,
1211 psprintf("toast value %u has toast chunk with null sequence number",
1212 ta->toast_pointer.va_valueid));
1213 return;
1214 }
1215 if (chunk_seq != *expected_chunk_seq)
1216 {
1217 /* Either the TOAST index is corrupt, or we don't have all chunks. */
1218 report_toast_corruption(ctx, ta,
1219 psprintf("toast value %u index scan returned chunk %d when expecting chunk %d",
1220 ta->toast_pointer.va_valueid,
1221 chunk_seq, *expected_chunk_seq));
1222 }
1223 *expected_chunk_seq = chunk_seq + 1;
1224
1225 /* Sanity-check the chunk data. */
1226 chunk = DatumGetPointer(fastgetattr(toasttup, 3,
1227 ctx->toast_rel->rd_att, &isnull));
1228 if (isnull)
1229 {
1230 report_toast_corruption(ctx, ta,
1231 psprintf("toast value %u chunk %d has null data",
1232 ta->toast_pointer.va_valueid,
1233 chunk_seq));
1234 return;
1235 }
1236 if (!VARATT_IS_EXTENDED(chunk))
1237 chunksize = VARSIZE(chunk) - VARHDRSZ;
1238 else if (VARATT_IS_SHORT(chunk))
1239 {
1240 /*
1241 * could happen due to heap_form_tuple doing its thing
1242 */
1243 chunksize = VARSIZE_SHORT(chunk) - VARHDRSZ_SHORT;
1244 }
1245 else
1246 {
1247 /* should never happen */
1248 uint32 header = ((varattrib_4b *) chunk)->va_4byte.va_header;
1249
1250 report_toast_corruption(ctx, ta,
1251 psprintf("toast value %u chunk %d has invalid varlena header %0x",
1252 ta->toast_pointer.va_valueid,
1253 chunk_seq, header));
1254 return;
1255 }
1256
1257 /*
1258 * Some checks on the data we've found
1259 */
1260 if (chunk_seq > last_chunk_seq)
1261 {
1262 report_toast_corruption(ctx, ta,
1263 psprintf("toast value %u chunk %d follows last expected chunk %d",
1264 ta->toast_pointer.va_valueid,
1265 chunk_seq, last_chunk_seq));
1266 return;
1267 }
1268
1269 expected_size = chunk_seq < last_chunk_seq ? TOAST_MAX_CHUNK_SIZE
1270 : extsize - (last_chunk_seq * TOAST_MAX_CHUNK_SIZE);
1271
1272 if (chunksize != expected_size)
1273 report_toast_corruption(ctx, ta,
1274 psprintf("toast value %u chunk %d has size %u, but expected size %u",
1275 ta->toast_pointer.va_valueid,
1276 chunk_seq, chunksize, expected_size));
1277 }
1278
1279 /*
1280 * Check the current attribute as tracked in ctx, recording any corruption
1281 * found in ctx->tupstore.
1282 *
1283 * This function follows the logic performed by heap_deform_tuple(), and in the
1284 * case of a toasted value, optionally stores the toast pointer so later it can
1285 * be checked following the logic of detoast_external_attr(), checking for any
1286 * conditions that would result in either of those functions Asserting or
1287 * crashing the backend. The checks performed by Asserts present in those two
1288 * functions are also performed here and in check_toasted_attribute. In cases
1289 * where those two functions are a bit cavalier in their assumptions about data
1290 * being correct, we perform additional checks not present in either of those
1291 * two functions. Where some condition is checked in both of those functions,
1292 * we perform it here twice, as we parallel the logical flow of those two
1293 * functions. The presence of duplicate checks seems a reasonable price to pay
1294 * for keeping this code tightly coupled with the code it protects.
1295 *
1296 * Returns true if the tuple attribute is sane enough for processing to
1297 * continue on to the next attribute, false otherwise.
1298 */
1299 static bool
check_tuple_attribute(HeapCheckContext * ctx)1300 check_tuple_attribute(HeapCheckContext *ctx)
1301 {
1302 Datum attdatum;
1303 struct varlena *attr;
1304 char *tp; /* pointer to the tuple data */
1305 uint16 infomask;
1306 Form_pg_attribute thisatt;
1307 struct varatt_external toast_pointer;
1308
1309 infomask = ctx->tuphdr->t_infomask;
1310 thisatt = TupleDescAttr(RelationGetDescr(ctx->rel), ctx->attnum);
1311
1312 tp = (char *) ctx->tuphdr + ctx->tuphdr->t_hoff;
1313
1314 if (ctx->tuphdr->t_hoff + ctx->offset > ctx->lp_len)
1315 {
1316 report_corruption(ctx,
1317 psprintf("attribute with length %u starts at offset %u beyond total tuple length %u",
1318 thisatt->attlen,
1319 ctx->tuphdr->t_hoff + ctx->offset,
1320 ctx->lp_len));
1321 return false;
1322 }
1323
1324 /* Skip null values */
1325 if (infomask & HEAP_HASNULL && att_isnull(ctx->attnum, ctx->tuphdr->t_bits))
1326 return true;
1327
1328 /* Skip non-varlena values, but update offset first */
1329 if (thisatt->attlen != -1)
1330 {
1331 ctx->offset = att_align_nominal(ctx->offset, thisatt->attalign);
1332 ctx->offset = att_addlength_pointer(ctx->offset, thisatt->attlen,
1333 tp + ctx->offset);
1334 if (ctx->tuphdr->t_hoff + ctx->offset > ctx->lp_len)
1335 {
1336 report_corruption(ctx,
1337 psprintf("attribute with length %u ends at offset %u beyond total tuple length %u",
1338 thisatt->attlen,
1339 ctx->tuphdr->t_hoff + ctx->offset,
1340 ctx->lp_len));
1341 return false;
1342 }
1343 return true;
1344 }
1345
1346 /* Ok, we're looking at a varlena attribute. */
1347 ctx->offset = att_align_pointer(ctx->offset, thisatt->attalign, -1,
1348 tp + ctx->offset);
1349
1350 /* Get the (possibly corrupt) varlena datum */
1351 attdatum = fetchatt(thisatt, tp + ctx->offset);
1352
1353 /*
1354 * We have the datum, but we cannot decode it carelessly, as it may still
1355 * be corrupt.
1356 */
1357
1358 /*
1359 * Check that VARTAG_SIZE won't hit a TrapMacro on a corrupt va_tag before
1360 * risking a call into att_addlength_pointer
1361 */
1362 if (VARATT_IS_EXTERNAL(tp + ctx->offset))
1363 {
1364 uint8 va_tag = VARTAG_EXTERNAL(tp + ctx->offset);
1365
1366 if (va_tag != VARTAG_ONDISK)
1367 {
1368 report_corruption(ctx,
1369 psprintf("toasted attribute has unexpected TOAST tag %u",
1370 va_tag));
1371 /* We can't know where the next attribute begins */
1372 return false;
1373 }
1374 }
1375
1376 /* Ok, should be safe now */
1377 ctx->offset = att_addlength_pointer(ctx->offset, thisatt->attlen,
1378 tp + ctx->offset);
1379
1380 if (ctx->tuphdr->t_hoff + ctx->offset > ctx->lp_len)
1381 {
1382 report_corruption(ctx,
1383 psprintf("attribute with length %u ends at offset %u beyond total tuple length %u",
1384 thisatt->attlen,
1385 ctx->tuphdr->t_hoff + ctx->offset,
1386 ctx->lp_len));
1387
1388 return false;
1389 }
1390
1391 /*
1392 * heap_deform_tuple would be done with this attribute at this point,
1393 * having stored it in values[], and would continue to the next attribute.
1394 * We go further, because we need to check if the toast datum is corrupt.
1395 */
1396
1397 attr = (struct varlena *) DatumGetPointer(attdatum);
1398
1399 /*
1400 * Now we follow the logic of detoast_external_attr(), with the same
1401 * caveats about being paranoid about corruption.
1402 */
1403
1404 /* Skip values that are not external */
1405 if (!VARATT_IS_EXTERNAL(attr))
1406 return true;
1407
1408 /* It is external, and we're looking at a page on disk */
1409
1410 /*
1411 * Must copy attr into toast_pointer for alignment considerations
1412 */
1413 VARATT_EXTERNAL_GET_POINTER(toast_pointer, attr);
1414
1415 /* The tuple header better claim to contain toasted values */
1416 if (!(infomask & HEAP_HASEXTERNAL))
1417 {
1418 report_corruption(ctx,
1419 psprintf("toast value %u is external but tuple header flag HEAP_HASEXTERNAL not set",
1420 toast_pointer.va_valueid));
1421 return true;
1422 }
1423
1424 /* The relation better have a toast table */
1425 if (!ctx->rel->rd_rel->reltoastrelid)
1426 {
1427 report_corruption(ctx,
1428 psprintf("toast value %u is external but relation has no toast relation",
1429 toast_pointer.va_valueid));
1430 return true;
1431 }
1432
1433 /* If we were told to skip toast checking, then we're done. */
1434 if (ctx->toast_rel == NULL)
1435 return true;
1436
1437 /*
1438 * If this tuple is eligible to be pruned, we cannot check the toast.
1439 * Otherwise, we push a copy of the toast tuple so we can check it after
1440 * releasing the main table buffer lock.
1441 */
1442 if (!ctx->tuple_could_be_pruned)
1443 {
1444 ToastedAttribute *ta;
1445
1446 ta = (ToastedAttribute *) palloc0(sizeof(ToastedAttribute));
1447
1448 VARATT_EXTERNAL_GET_POINTER(ta->toast_pointer, attr);
1449 ta->blkno = ctx->blkno;
1450 ta->offnum = ctx->offnum;
1451 ta->attnum = ctx->attnum;
1452 ctx->toasted_attributes = lappend(ctx->toasted_attributes, ta);
1453 }
1454
1455 return true;
1456 }
1457
1458 /*
1459 * For each attribute collected in ctx->toasted_attributes, look up the value
1460 * in the toast table and perform checks on it. This function should only be
1461 * called on toast pointers which cannot be vacuumed away during our
1462 * processing.
1463 */
1464 static void
check_toasted_attribute(HeapCheckContext * ctx,ToastedAttribute * ta)1465 check_toasted_attribute(HeapCheckContext *ctx, ToastedAttribute *ta)
1466 {
1467 SnapshotData SnapshotToast;
1468 ScanKeyData toastkey;
1469 SysScanDesc toastscan;
1470 bool found_toasttup;
1471 HeapTuple toasttup;
1472 uint32 extsize;
1473 int32 expected_chunk_seq = 0;
1474 int32 last_chunk_seq;
1475
1476 extsize = VARATT_EXTERNAL_GET_EXTSIZE(ta->toast_pointer);
1477 last_chunk_seq = (extsize - 1) / TOAST_MAX_CHUNK_SIZE;
1478
1479 /*
1480 * Setup a scan key to find chunks in toast table with matching va_valueid
1481 */
1482 ScanKeyInit(&toastkey,
1483 (AttrNumber) 1,
1484 BTEqualStrategyNumber, F_OIDEQ,
1485 ObjectIdGetDatum(ta->toast_pointer.va_valueid));
1486
1487 /*
1488 * Check if any chunks for this toasted object exist in the toast table,
1489 * accessible via the index.
1490 */
1491 init_toast_snapshot(&SnapshotToast);
1492 toastscan = systable_beginscan_ordered(ctx->toast_rel,
1493 ctx->valid_toast_index,
1494 &SnapshotToast, 1,
1495 &toastkey);
1496 found_toasttup = false;
1497 while ((toasttup =
1498 systable_getnext_ordered(toastscan,
1499 ForwardScanDirection)) != NULL)
1500 {
1501 found_toasttup = true;
1502 check_toast_tuple(toasttup, ctx, ta, &expected_chunk_seq, extsize);
1503 }
1504 systable_endscan_ordered(toastscan);
1505
1506 if (!found_toasttup)
1507 report_toast_corruption(ctx, ta,
1508 psprintf("toast value %u not found in toast table",
1509 ta->toast_pointer.va_valueid));
1510 else if (expected_chunk_seq <= last_chunk_seq)
1511 report_toast_corruption(ctx, ta,
1512 psprintf("toast value %u was expected to end at chunk %d, but ended while expecting chunk %d",
1513 ta->toast_pointer.va_valueid,
1514 last_chunk_seq, expected_chunk_seq));
1515 }
1516
1517 /*
1518 * Check the current tuple as tracked in ctx, recording any corruption found in
1519 * ctx->tupstore.
1520 */
1521 static void
check_tuple(HeapCheckContext * ctx)1522 check_tuple(HeapCheckContext *ctx)
1523 {
1524 /*
1525 * Check various forms of tuple header corruption, and if the header is
1526 * too corrupt, do not continue with other checks.
1527 */
1528 if (!check_tuple_header(ctx))
1529 return;
1530
1531 /*
1532 * Check tuple visibility. If the inserting transaction aborted, we
1533 * cannot assume our relation description matches the tuple structure, and
1534 * therefore cannot check it.
1535 */
1536 if (!check_tuple_visibility(ctx))
1537 return;
1538
1539 /*
1540 * The tuple is visible, so it must be compatible with the current version
1541 * of the relation descriptor. It might have fewer columns than are
1542 * present in the relation descriptor, but it cannot have more.
1543 */
1544 if (RelationGetDescr(ctx->rel)->natts < ctx->natts)
1545 {
1546 report_corruption(ctx,
1547 psprintf("number of attributes %u exceeds maximum expected for table %u",
1548 ctx->natts,
1549 RelationGetDescr(ctx->rel)->natts));
1550 return;
1551 }
1552
1553 /*
1554 * Check each attribute unless we hit corruption that confuses what to do
1555 * next, at which point we abort further attribute checks for this tuple.
1556 * Note that we don't abort for all types of corruption, only for those
1557 * types where we don't know how to continue. We also don't abort the
1558 * checking of toasted attributes collected from the tuple prior to
1559 * aborting. Those will still be checked later along with other toasted
1560 * attributes collected from the page.
1561 */
1562 ctx->offset = 0;
1563 for (ctx->attnum = 0; ctx->attnum < ctx->natts; ctx->attnum++)
1564 if (!check_tuple_attribute(ctx))
1565 break; /* cannot continue */
1566
1567 /* revert attnum to -1 until we again examine individual attributes */
1568 ctx->attnum = -1;
1569 }
1570
1571 /*
1572 * Convert a TransactionId into a FullTransactionId using our cached values of
1573 * the valid transaction ID range. It is the caller's responsibility to have
1574 * already updated the cached values, if necessary.
1575 */
1576 static FullTransactionId
FullTransactionIdFromXidAndCtx(TransactionId xid,const HeapCheckContext * ctx)1577 FullTransactionIdFromXidAndCtx(TransactionId xid, const HeapCheckContext *ctx)
1578 {
1579 uint32 epoch;
1580
1581 if (!TransactionIdIsNormal(xid))
1582 return FullTransactionIdFromEpochAndXid(0, xid);
1583 epoch = EpochFromFullTransactionId(ctx->next_fxid);
1584 if (xid > ctx->next_xid)
1585 epoch--;
1586 return FullTransactionIdFromEpochAndXid(epoch, xid);
1587 }
1588
1589 /*
1590 * Update our cached range of valid transaction IDs.
1591 */
1592 static void
update_cached_xid_range(HeapCheckContext * ctx)1593 update_cached_xid_range(HeapCheckContext *ctx)
1594 {
1595 /* Make cached copies */
1596 LWLockAcquire(XidGenLock, LW_SHARED);
1597 ctx->next_fxid = ShmemVariableCache->nextXid;
1598 ctx->oldest_xid = ShmemVariableCache->oldestXid;
1599 LWLockRelease(XidGenLock);
1600
1601 /* And compute alternate versions of the same */
1602 ctx->oldest_fxid = FullTransactionIdFromXidAndCtx(ctx->oldest_xid, ctx);
1603 ctx->next_xid = XidFromFullTransactionId(ctx->next_fxid);
1604 }
1605
1606 /*
1607 * Update our cached range of valid multitransaction IDs.
1608 */
1609 static void
update_cached_mxid_range(HeapCheckContext * ctx)1610 update_cached_mxid_range(HeapCheckContext *ctx)
1611 {
1612 ReadMultiXactIdRange(&ctx->oldest_mxact, &ctx->next_mxact);
1613 }
1614
1615 /*
1616 * Return whether the given FullTransactionId is within our cached valid
1617 * transaction ID range.
1618 */
1619 static inline bool
fxid_in_cached_range(FullTransactionId fxid,const HeapCheckContext * ctx)1620 fxid_in_cached_range(FullTransactionId fxid, const HeapCheckContext *ctx)
1621 {
1622 return (FullTransactionIdPrecedesOrEquals(ctx->oldest_fxid, fxid) &&
1623 FullTransactionIdPrecedes(fxid, ctx->next_fxid));
1624 }
1625
1626 /*
1627 * Checks whether a multitransaction ID is in the cached valid range, returning
1628 * the nature of the range violation, if any.
1629 */
1630 static XidBoundsViolation
check_mxid_in_range(MultiXactId mxid,HeapCheckContext * ctx)1631 check_mxid_in_range(MultiXactId mxid, HeapCheckContext *ctx)
1632 {
1633 if (!TransactionIdIsValid(mxid))
1634 return XID_INVALID;
1635 if (MultiXactIdPrecedes(mxid, ctx->relminmxid))
1636 return XID_PRECEDES_RELMIN;
1637 if (MultiXactIdPrecedes(mxid, ctx->oldest_mxact))
1638 return XID_PRECEDES_CLUSTERMIN;
1639 if (MultiXactIdPrecedesOrEquals(ctx->next_mxact, mxid))
1640 return XID_IN_FUTURE;
1641 return XID_BOUNDS_OK;
1642 }
1643
1644 /*
1645 * Checks whether the given mxid is valid to appear in the heap being checked,
1646 * returning the nature of the range violation, if any.
1647 *
1648 * This function attempts to return quickly by caching the known valid mxid
1649 * range in ctx. Callers should already have performed the initial setup of
1650 * the cache prior to the first call to this function.
1651 */
1652 static XidBoundsViolation
check_mxid_valid_in_rel(MultiXactId mxid,HeapCheckContext * ctx)1653 check_mxid_valid_in_rel(MultiXactId mxid, HeapCheckContext *ctx)
1654 {
1655 XidBoundsViolation result;
1656
1657 result = check_mxid_in_range(mxid, ctx);
1658 if (result == XID_BOUNDS_OK)
1659 return XID_BOUNDS_OK;
1660
1661 /* The range may have advanced. Recheck. */
1662 update_cached_mxid_range(ctx);
1663 return check_mxid_in_range(mxid, ctx);
1664 }
1665
1666 /*
1667 * Checks whether the given transaction ID is (or was recently) valid to appear
1668 * in the heap being checked, or whether it is too old or too new to appear in
1669 * the relation, returning information about the nature of the bounds violation.
1670 *
1671 * We cache the range of valid transaction IDs. If xid is in that range, we
1672 * conclude that it is valid, even though concurrent changes to the table might
1673 * invalidate it under certain corrupt conditions. (For example, if the table
1674 * contains corrupt all-frozen bits, a concurrent vacuum might skip the page(s)
1675 * containing the xid and then truncate clog and advance the relfrozenxid
1676 * beyond xid.) Reporting the xid as valid under such conditions seems
1677 * acceptable, since if we had checked it earlier in our scan it would have
1678 * truly been valid at that time.
1679 *
1680 * If the status argument is not NULL, and if and only if the transaction ID
1681 * appears to be valid in this relation, the status argument will be set with
1682 * the commit status of the transaction ID.
1683 */
1684 static XidBoundsViolation
get_xid_status(TransactionId xid,HeapCheckContext * ctx,XidCommitStatus * status)1685 get_xid_status(TransactionId xid, HeapCheckContext *ctx,
1686 XidCommitStatus *status)
1687 {
1688 FullTransactionId fxid;
1689 FullTransactionId clog_horizon;
1690
1691 /* Quick check for special xids */
1692 if (!TransactionIdIsValid(xid))
1693 return XID_INVALID;
1694 else if (xid == BootstrapTransactionId || xid == FrozenTransactionId)
1695 {
1696 if (status != NULL)
1697 *status = XID_COMMITTED;
1698 return XID_BOUNDS_OK;
1699 }
1700
1701 /* Check if the xid is within bounds */
1702 fxid = FullTransactionIdFromXidAndCtx(xid, ctx);
1703 if (!fxid_in_cached_range(fxid, ctx))
1704 {
1705 /*
1706 * We may have been checking against stale values. Update the cached
1707 * range to be sure, and since we relied on the cached range when we
1708 * performed the full xid conversion, reconvert.
1709 */
1710 update_cached_xid_range(ctx);
1711 fxid = FullTransactionIdFromXidAndCtx(xid, ctx);
1712 }
1713
1714 if (FullTransactionIdPrecedesOrEquals(ctx->next_fxid, fxid))
1715 return XID_IN_FUTURE;
1716 if (FullTransactionIdPrecedes(fxid, ctx->oldest_fxid))
1717 return XID_PRECEDES_CLUSTERMIN;
1718 if (FullTransactionIdPrecedes(fxid, ctx->relfrozenfxid))
1719 return XID_PRECEDES_RELMIN;
1720
1721 /* Early return if the caller does not request clog checking */
1722 if (status == NULL)
1723 return XID_BOUNDS_OK;
1724
1725 /* Early return if we just checked this xid in a prior call */
1726 if (xid == ctx->cached_xid)
1727 {
1728 *status = ctx->cached_status;
1729 return XID_BOUNDS_OK;
1730 }
1731
1732 *status = XID_COMMITTED;
1733 LWLockAcquire(XactTruncationLock, LW_SHARED);
1734 clog_horizon =
1735 FullTransactionIdFromXidAndCtx(ShmemVariableCache->oldestClogXid,
1736 ctx);
1737 if (FullTransactionIdPrecedesOrEquals(clog_horizon, fxid))
1738 {
1739 if (TransactionIdIsCurrentTransactionId(xid))
1740 *status = XID_IS_CURRENT_XID;
1741 else if (TransactionIdIsInProgress(xid))
1742 *status = XID_IN_PROGRESS;
1743 else if (TransactionIdDidCommit(xid))
1744 *status = XID_COMMITTED;
1745 else
1746 *status = XID_ABORTED;
1747 }
1748 LWLockRelease(XactTruncationLock);
1749 ctx->cached_xid = xid;
1750 ctx->cached_status = *status;
1751 return XID_BOUNDS_OK;
1752 }
1753