1 /*-------------------------------------------------------------------------
2 *
3 * vacuumlazy.c
4 * Concurrent ("lazy") vacuuming.
5 *
6 *
7 * The major space usage for LAZY VACUUM is storage for the array of dead tuple
8 * TIDs. We want to ensure we can vacuum even the very largest relations with
9 * finite memory space usage. To do that, we set upper bounds on the number of
10 * tuples we will keep track of at once.
11 *
12 * We are willing to use at most maintenance_work_mem (or perhaps
13 * autovacuum_work_mem) memory space to keep track of dead tuples. We
14 * initially allocate an array of TIDs of that size, with an upper limit that
15 * depends on table size (this limit ensures we don't allocate a huge area
16 * uselessly for vacuuming small tables). If the array threatens to overflow,
17 * we suspend the heap scan phase and perform a pass of index cleanup and page
18 * compaction, then resume the heap scan with an empty TID array.
19 *
20 * If we're processing a table with no indexes, we can just vacuum each page
21 * as we go; there's no need to save up multiple tuples to minimize the number
22 * of index scans performed. So we don't use maintenance_work_mem memory for
23 * the TID array, just enough to hold as many heap tuples as fit on one page.
24 *
25 *
26 * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
27 * Portions Copyright (c) 1994, Regents of the University of California
28 *
29 *
30 * IDENTIFICATION
31 * src/backend/commands/vacuumlazy.c
32 *
33 *-------------------------------------------------------------------------
34 */
35 #include "postgres.h"
36
37 #include <math.h>
38
39 #include "access/genam.h"
40 #include "access/heapam.h"
41 #include "access/heapam_xlog.h"
42 #include "access/htup_details.h"
43 #include "access/multixact.h"
44 #include "access/transam.h"
45 #include "access/visibilitymap.h"
46 #include "access/xlog.h"
47 #include "catalog/catalog.h"
48 #include "catalog/storage.h"
49 #include "commands/dbcommands.h"
50 #include "commands/progress.h"
51 #include "commands/vacuum.h"
52 #include "miscadmin.h"
53 #include "pgstat.h"
54 #include "portability/instr_time.h"
55 #include "postmaster/autovacuum.h"
56 #include "storage/bufmgr.h"
57 #include "storage/freespace.h"
58 #include "storage/lmgr.h"
59 #include "utils/lsyscache.h"
60 #include "utils/memutils.h"
61 #include "utils/pg_rusage.h"
62 #include "utils/timestamp.h"
63 #include "utils/tqual.h"
64
65
66 /*
67 * Space/time tradeoff parameters: do these need to be user-tunable?
68 *
69 * To consider truncating the relation, we want there to be at least
70 * REL_TRUNCATE_MINIMUM or (relsize / REL_TRUNCATE_FRACTION) (whichever
71 * is less) potentially-freeable pages.
72 */
73 #define REL_TRUNCATE_MINIMUM 1000
74 #define REL_TRUNCATE_FRACTION 16
75
76 /*
77 * Timing parameters for truncate locking heuristics.
78 *
79 * These were not exposed as user tunable GUC values because it didn't seem
80 * that the potential for improvement was great enough to merit the cost of
81 * supporting them.
82 */
83 #define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL 20 /* ms */
84 #define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL 50 /* ms */
85 #define VACUUM_TRUNCATE_LOCK_TIMEOUT 5000 /* ms */
86
87 /*
88 * Guesstimation of number of dead tuples per page. This is used to
89 * provide an upper limit to memory allocated when vacuuming small
90 * tables.
91 */
92 #define LAZY_ALLOC_TUPLES MaxHeapTuplesPerPage
93
94 /*
95 * Before we consider skipping a page that's marked as clean in
96 * visibility map, we must've seen at least this many clean pages.
97 */
98 #define SKIP_PAGES_THRESHOLD ((BlockNumber) 32)
99
100 /*
101 * Size of the prefetch window for lazy vacuum backwards truncation scan.
102 * Needs to be a power of 2.
103 */
104 #define PREFETCH_SIZE ((BlockNumber) 32)
105
106 typedef struct LVRelStats
107 {
108 /* hasindex = true means two-pass strategy; false means one-pass */
109 bool hasindex;
110 /* Overall statistics about rel */
111 BlockNumber old_rel_pages; /* previous value of pg_class.relpages */
112 BlockNumber rel_pages; /* total number of pages */
113 BlockNumber scanned_pages; /* number of pages we examined */
114 BlockNumber pinskipped_pages; /* # of pages we skipped due to a pin */
115 BlockNumber frozenskipped_pages; /* # of frozen pages we skipped */
116 BlockNumber tupcount_pages; /* pages whose tuples we counted */
117 double scanned_tuples; /* counts only tuples on tupcount_pages */
118 double old_rel_tuples; /* previous value of pg_class.reltuples */
119 double new_rel_tuples; /* new estimated total # of tuples */
120 double new_dead_tuples; /* new estimated total # of dead tuples */
121 BlockNumber pages_removed;
122 double tuples_deleted;
123 BlockNumber nonempty_pages; /* actually, last nonempty page + 1 */
124 /* List of TIDs of tuples we intend to delete */
125 /* NB: this list is ordered by TID address */
126 int num_dead_tuples; /* current # of entries */
127 int max_dead_tuples; /* # slots allocated in array */
128 ItemPointer dead_tuples; /* array of ItemPointerData */
129 int num_index_scans;
130 TransactionId latestRemovedXid;
131 bool lock_waiter_detected;
132 } LVRelStats;
133
134
135 /* A few variables that don't seem worth passing around as parameters */
136 static int elevel = -1;
137
138 static TransactionId OldestXmin;
139 static TransactionId FreezeLimit;
140 static MultiXactId MultiXactCutoff;
141
142 static BufferAccessStrategy vac_strategy;
143
144
145 /* non-export function prototypes */
146 static void lazy_scan_heap(Relation onerel, int options,
147 LVRelStats *vacrelstats, Relation *Irel, int nindexes,
148 bool aggressive);
149 static void lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats);
150 static bool lazy_check_needs_freeze(Buffer buf, bool *hastup);
151 static void lazy_vacuum_index(Relation indrel,
152 IndexBulkDeleteResult **stats,
153 LVRelStats *vacrelstats);
154 static void lazy_cleanup_index(Relation indrel,
155 IndexBulkDeleteResult *stats,
156 LVRelStats *vacrelstats);
157 static int lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
158 int tupindex, LVRelStats *vacrelstats, Buffer *vmbuffer);
159 static bool should_attempt_truncation(LVRelStats *vacrelstats);
160 static void lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats);
161 static BlockNumber count_nondeletable_pages(Relation onerel,
162 LVRelStats *vacrelstats);
163 static void lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks);
164 static void lazy_record_dead_tuple(LVRelStats *vacrelstats,
165 ItemPointer itemptr);
166 static bool lazy_tid_reaped(ItemPointer itemptr, void *state);
167 static int vac_cmp_itemptr(const void *left, const void *right);
168 static bool heap_page_is_all_visible(Relation rel, Buffer buf,
169 TransactionId *visibility_cutoff_xid, bool *all_frozen);
170
171
172 /*
173 * lazy_vacuum_rel() -- perform LAZY VACUUM for one heap relation
174 *
175 * This routine vacuums a single heap, cleans out its indexes, and
176 * updates its relpages and reltuples statistics.
177 *
178 * At entry, we have already established a transaction and opened
179 * and locked the relation.
180 */
181 void
lazy_vacuum_rel(Relation onerel,int options,VacuumParams * params,BufferAccessStrategy bstrategy)182 lazy_vacuum_rel(Relation onerel, int options, VacuumParams *params,
183 BufferAccessStrategy bstrategy)
184 {
185 LVRelStats *vacrelstats;
186 Relation *Irel;
187 int nindexes;
188 PGRUsage ru0;
189 TimestampTz starttime = 0;
190 long secs;
191 int usecs;
192 double read_rate,
193 write_rate;
194 bool aggressive; /* should we scan all unfrozen pages? */
195 bool scanned_all_unfrozen; /* actually scanned all such pages? */
196 TransactionId xidFullScanLimit;
197 MultiXactId mxactFullScanLimit;
198 BlockNumber new_rel_pages;
199 double new_rel_tuples;
200 BlockNumber new_rel_allvisible;
201 double new_live_tuples;
202 TransactionId new_frozen_xid;
203 MultiXactId new_min_multi;
204
205 Assert(params != NULL);
206
207 /* measure elapsed time iff autovacuum logging requires it */
208 if (IsAutoVacuumWorkerProcess() && params->log_min_duration >= 0)
209 {
210 pg_rusage_init(&ru0);
211 starttime = GetCurrentTimestamp();
212 }
213
214 if (options & VACOPT_VERBOSE)
215 elevel = INFO;
216 else
217 elevel = DEBUG2;
218
219 pgstat_progress_start_command(PROGRESS_COMMAND_VACUUM,
220 RelationGetRelid(onerel));
221
222 vac_strategy = bstrategy;
223
224 vacuum_set_xid_limits(onerel,
225 params->freeze_min_age,
226 params->freeze_table_age,
227 params->multixact_freeze_min_age,
228 params->multixact_freeze_table_age,
229 &OldestXmin, &FreezeLimit, &xidFullScanLimit,
230 &MultiXactCutoff, &mxactFullScanLimit);
231
232 /*
233 * We request an aggressive scan if the table's frozen Xid is now older
234 * than or equal to the requested Xid full-table scan limit; or if the
235 * table's minimum MultiXactId is older than or equal to the requested
236 * mxid full-table scan limit; or if DISABLE_PAGE_SKIPPING was specified.
237 */
238 aggressive = TransactionIdPrecedesOrEquals(onerel->rd_rel->relfrozenxid,
239 xidFullScanLimit);
240 aggressive |= MultiXactIdPrecedesOrEquals(onerel->rd_rel->relminmxid,
241 mxactFullScanLimit);
242 if (options & VACOPT_DISABLE_PAGE_SKIPPING)
243 aggressive = true;
244
245 vacrelstats = (LVRelStats *) palloc0(sizeof(LVRelStats));
246
247 vacrelstats->old_rel_pages = onerel->rd_rel->relpages;
248 vacrelstats->old_rel_tuples = onerel->rd_rel->reltuples;
249 vacrelstats->num_index_scans = 0;
250 vacrelstats->pages_removed = 0;
251 vacrelstats->lock_waiter_detected = false;
252
253 /* Open all indexes of the relation */
254 vac_open_indexes(onerel, RowExclusiveLock, &nindexes, &Irel);
255 vacrelstats->hasindex = (nindexes > 0);
256
257 /* Do the vacuuming */
258 lazy_scan_heap(onerel, options, vacrelstats, Irel, nindexes, aggressive);
259
260 /* Done with indexes */
261 vac_close_indexes(nindexes, Irel, NoLock);
262
263 /*
264 * Compute whether we actually scanned the all unfrozen pages. If we did,
265 * we can adjust relfrozenxid and relminmxid.
266 *
267 * NB: We need to check this before truncating the relation, because that
268 * will change ->rel_pages.
269 */
270 if ((vacrelstats->scanned_pages + vacrelstats->frozenskipped_pages)
271 < vacrelstats->rel_pages)
272 {
273 Assert(!aggressive);
274 scanned_all_unfrozen = false;
275 }
276 else
277 scanned_all_unfrozen = true;
278
279 /*
280 * Optionally truncate the relation.
281 */
282 if (should_attempt_truncation(vacrelstats))
283 lazy_truncate_heap(onerel, vacrelstats);
284
285 /* Report that we are now doing final cleanup */
286 pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
287 PROGRESS_VACUUM_PHASE_FINAL_CLEANUP);
288
289 /* Vacuum the Free Space Map */
290 FreeSpaceMapVacuum(onerel);
291
292 /*
293 * Update statistics in pg_class.
294 *
295 * A corner case here is that if we scanned no pages at all because every
296 * page is all-visible, we should not update relpages/reltuples, because
297 * we have no new information to contribute. In particular this keeps us
298 * from replacing relpages=reltuples=0 (which means "unknown tuple
299 * density") with nonzero relpages and reltuples=0 (which means "zero
300 * tuple density") unless there's some actual evidence for the latter.
301 *
302 * It's important that we use tupcount_pages and not scanned_pages for the
303 * check described above; scanned_pages counts pages where we could not
304 * get cleanup lock, and which were processed only for frozenxid purposes.
305 *
306 * We do update relallvisible even in the corner case, since if the table
307 * is all-visible we'd definitely like to know that. But clamp the value
308 * to be not more than what we're setting relpages to.
309 *
310 * Also, don't change relfrozenxid/relminmxid if we skipped any pages,
311 * since then we don't know for certain that all tuples have a newer xmin.
312 */
313 new_rel_pages = vacrelstats->rel_pages;
314 new_rel_tuples = vacrelstats->new_rel_tuples;
315 if (vacrelstats->tupcount_pages == 0 && new_rel_pages > 0)
316 {
317 new_rel_pages = vacrelstats->old_rel_pages;
318 new_rel_tuples = vacrelstats->old_rel_tuples;
319 }
320
321 visibilitymap_count(onerel, &new_rel_allvisible, NULL);
322 if (new_rel_allvisible > new_rel_pages)
323 new_rel_allvisible = new_rel_pages;
324
325 new_frozen_xid = scanned_all_unfrozen ? FreezeLimit : InvalidTransactionId;
326 new_min_multi = scanned_all_unfrozen ? MultiXactCutoff : InvalidMultiXactId;
327
328 vac_update_relstats(onerel,
329 new_rel_pages,
330 new_rel_tuples,
331 new_rel_allvisible,
332 vacrelstats->hasindex,
333 new_frozen_xid,
334 new_min_multi,
335 false);
336
337 /* report results to the stats collector, too */
338 new_live_tuples = new_rel_tuples - vacrelstats->new_dead_tuples;
339 if (new_live_tuples < 0)
340 new_live_tuples = 0; /* just in case */
341
342 pgstat_report_vacuum(RelationGetRelid(onerel),
343 onerel->rd_rel->relisshared,
344 new_live_tuples,
345 vacrelstats->new_dead_tuples);
346 pgstat_progress_end_command();
347
348 /* and log the action if appropriate */
349 if (IsAutoVacuumWorkerProcess() && params->log_min_duration >= 0)
350 {
351 TimestampTz endtime = GetCurrentTimestamp();
352
353 if (params->log_min_duration == 0 ||
354 TimestampDifferenceExceeds(starttime, endtime,
355 params->log_min_duration))
356 {
357 StringInfoData buf;
358
359 TimestampDifference(starttime, endtime, &secs, &usecs);
360
361 read_rate = 0;
362 write_rate = 0;
363 if ((secs > 0) || (usecs > 0))
364 {
365 read_rate = (double) BLCKSZ * VacuumPageMiss / (1024 * 1024) /
366 (secs + usecs / 1000000.0);
367 write_rate = (double) BLCKSZ * VacuumPageDirty / (1024 * 1024) /
368 (secs + usecs / 1000000.0);
369 }
370
371 /*
372 * This is pretty messy, but we split it up so that we can skip
373 * emitting individual parts of the message when not applicable.
374 */
375 initStringInfo(&buf);
376 appendStringInfo(&buf, _("automatic vacuum of table \"%s.%s.%s\": index scans: %d\n"),
377 get_database_name(MyDatabaseId),
378 get_namespace_name(RelationGetNamespace(onerel)),
379 RelationGetRelationName(onerel),
380 vacrelstats->num_index_scans);
381 appendStringInfo(&buf, _("pages: %u removed, %u remain, %u skipped due to pins, %u skipped frozen\n"),
382 vacrelstats->pages_removed,
383 vacrelstats->rel_pages,
384 vacrelstats->pinskipped_pages,
385 vacrelstats->frozenskipped_pages);
386 appendStringInfo(&buf,
387 _("tuples: %.0f removed, %.0f remain, %.0f are dead but not yet removable, oldest xmin: %u\n"),
388 vacrelstats->tuples_deleted,
389 vacrelstats->new_rel_tuples,
390 vacrelstats->new_dead_tuples,
391 OldestXmin);
392 appendStringInfo(&buf,
393 _("buffer usage: %d hits, %d misses, %d dirtied\n"),
394 VacuumPageHit,
395 VacuumPageMiss,
396 VacuumPageDirty);
397 appendStringInfo(&buf, _("avg read rate: %.3f MB/s, avg write rate: %.3f MB/s\n"),
398 read_rate, write_rate);
399 appendStringInfo(&buf, _("system usage: %s"), pg_rusage_show(&ru0));
400
401 ereport(LOG,
402 (errmsg_internal("%s", buf.data)));
403 pfree(buf.data);
404 }
405 }
406 }
407
408 /*
409 * For Hot Standby we need to know the highest transaction id that will
410 * be removed by any change. VACUUM proceeds in a number of passes so
411 * we need to consider how each pass operates. The first phase runs
412 * heap_page_prune(), which can issue XLOG_HEAP2_CLEAN records as it
413 * progresses - these will have a latestRemovedXid on each record.
414 * In some cases this removes all of the tuples to be removed, though
415 * often we have dead tuples with index pointers so we must remember them
416 * for removal in phase 3. Index records for those rows are removed
417 * in phase 2 and index blocks do not have MVCC information attached.
418 * So before we can allow removal of any index tuples we need to issue
419 * a WAL record containing the latestRemovedXid of rows that will be
420 * removed in phase three. This allows recovery queries to block at the
421 * correct place, i.e. before phase two, rather than during phase three
422 * which would be after the rows have become inaccessible.
423 */
424 static void
vacuum_log_cleanup_info(Relation rel,LVRelStats * vacrelstats)425 vacuum_log_cleanup_info(Relation rel, LVRelStats *vacrelstats)
426 {
427 /*
428 * Skip this for relations for which no WAL is to be written, or if we're
429 * not trying to support archive recovery.
430 */
431 if (!RelationNeedsWAL(rel) || !XLogIsNeeded())
432 return;
433
434 /*
435 * No need to write the record at all unless it contains a valid value
436 */
437 if (TransactionIdIsValid(vacrelstats->latestRemovedXid))
438 (void) log_heap_cleanup_info(rel->rd_node, vacrelstats->latestRemovedXid);
439 }
440
441 /*
442 * lazy_scan_heap() -- scan an open heap relation
443 *
444 * This routine prunes each page in the heap, which will among other
445 * things truncate dead tuples to dead line pointers, defragment the
446 * page, and set commit status bits (see heap_page_prune). It also builds
447 * lists of dead tuples and pages with free space, calculates statistics
448 * on the number of live tuples in the heap, and marks pages as
449 * all-visible if appropriate. When done, or when we run low on space for
450 * dead-tuple TIDs, invoke vacuuming of indexes and call lazy_vacuum_heap
451 * to reclaim dead line pointers.
452 *
453 * If there are no indexes then we can reclaim line pointers on the fly;
454 * dead line pointers need only be retained until all index pointers that
455 * reference them have been killed.
456 */
457 static void
lazy_scan_heap(Relation onerel,int options,LVRelStats * vacrelstats,Relation * Irel,int nindexes,bool aggressive)458 lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats,
459 Relation *Irel, int nindexes, bool aggressive)
460 {
461 BlockNumber nblocks,
462 blkno;
463 HeapTupleData tuple;
464 char *relname;
465 TransactionId relfrozenxid = onerel->rd_rel->relfrozenxid;
466 TransactionId relminmxid = onerel->rd_rel->relminmxid;
467 BlockNumber empty_pages,
468 vacuumed_pages;
469 double num_tuples,
470 tups_vacuumed,
471 nkeep,
472 nunused;
473 IndexBulkDeleteResult **indstats;
474 int i;
475 PGRUsage ru0;
476 Buffer vmbuffer = InvalidBuffer;
477 BlockNumber next_unskippable_block;
478 bool skipping_blocks;
479 xl_heap_freeze_tuple *frozen;
480 StringInfoData buf;
481 const int initprog_index[] = {
482 PROGRESS_VACUUM_PHASE,
483 PROGRESS_VACUUM_TOTAL_HEAP_BLKS,
484 PROGRESS_VACUUM_MAX_DEAD_TUPLES
485 };
486 int64 initprog_val[3];
487
488 pg_rusage_init(&ru0);
489
490 relname = RelationGetRelationName(onerel);
491 ereport(elevel,
492 (errmsg("vacuuming \"%s.%s\"",
493 get_namespace_name(RelationGetNamespace(onerel)),
494 relname)));
495
496 empty_pages = vacuumed_pages = 0;
497 num_tuples = tups_vacuumed = nkeep = nunused = 0;
498
499 indstats = (IndexBulkDeleteResult **)
500 palloc0(nindexes * sizeof(IndexBulkDeleteResult *));
501
502 nblocks = RelationGetNumberOfBlocks(onerel);
503 vacrelstats->rel_pages = nblocks;
504 vacrelstats->scanned_pages = 0;
505 vacrelstats->tupcount_pages = 0;
506 vacrelstats->nonempty_pages = 0;
507 vacrelstats->latestRemovedXid = InvalidTransactionId;
508
509 lazy_space_alloc(vacrelstats, nblocks);
510 frozen = palloc(sizeof(xl_heap_freeze_tuple) * MaxHeapTuplesPerPage);
511
512 /* Report that we're scanning the heap, advertising total # of blocks */
513 initprog_val[0] = PROGRESS_VACUUM_PHASE_SCAN_HEAP;
514 initprog_val[1] = nblocks;
515 initprog_val[2] = vacrelstats->max_dead_tuples;
516 pgstat_progress_update_multi_param(3, initprog_index, initprog_val);
517
518 /*
519 * Except when aggressive is set, we want to skip pages that are
520 * all-visible according to the visibility map, but only when we can skip
521 * at least SKIP_PAGES_THRESHOLD consecutive pages. Since we're reading
522 * sequentially, the OS should be doing readahead for us, so there's no
523 * gain in skipping a page now and then; that's likely to disable
524 * readahead and so be counterproductive. Also, skipping even a single
525 * page means that we can't update relfrozenxid, so we only want to do it
526 * if we can skip a goodly number of pages.
527 *
528 * When aggressive is set, we can't skip pages just because they are
529 * all-visible, but we can still skip pages that are all-frozen, since
530 * such pages do not need freezing and do not affect the value that we can
531 * safely set for relfrozenxid or relminmxid.
532 *
533 * Before entering the main loop, establish the invariant that
534 * next_unskippable_block is the next block number >= blkno that we can't
535 * skip based on the visibility map, either all-visible for a regular scan
536 * or all-frozen for an aggressive scan. We set it to nblocks if there's
537 * no such block. We also set up the skipping_blocks flag correctly at
538 * this stage.
539 *
540 * Note: The value returned by visibilitymap_get_status could be slightly
541 * out-of-date, since we make this test before reading the corresponding
542 * heap page or locking the buffer. This is OK. If we mistakenly think
543 * that the page is all-visible or all-frozen when in fact the flag's just
544 * been cleared, we might fail to vacuum the page. It's easy to see that
545 * skipping a page when aggressive is not set is not a very big deal; we
546 * might leave some dead tuples lying around, but the next vacuum will
547 * find them. But even when aggressive *is* set, it's still OK if we miss
548 * a page whose all-frozen marking has just been cleared. Any new XIDs
549 * just added to that page are necessarily newer than the GlobalXmin we
550 * computed, so they'll have no effect on the value to which we can safely
551 * set relfrozenxid. A similar argument applies for MXIDs and relminmxid.
552 *
553 * We will scan the table's last page, at least to the extent of
554 * determining whether it has tuples or not, even if it should be skipped
555 * according to the above rules; except when we've already determined that
556 * it's not worth trying to truncate the table. This avoids having
557 * lazy_truncate_heap() take access-exclusive lock on the table to attempt
558 * a truncation that just fails immediately because there are tuples in
559 * the last page. This is worth avoiding mainly because such a lock must
560 * be replayed on any hot standby, where it can be disruptive.
561 */
562 next_unskippable_block = 0;
563 if ((options & VACOPT_DISABLE_PAGE_SKIPPING) == 0)
564 {
565 while (next_unskippable_block < nblocks)
566 {
567 uint8 vmstatus;
568
569 vmstatus = visibilitymap_get_status(onerel, next_unskippable_block,
570 &vmbuffer);
571 if (aggressive)
572 {
573 if ((vmstatus & VISIBILITYMAP_ALL_FROZEN) == 0)
574 break;
575 }
576 else
577 {
578 if ((vmstatus & VISIBILITYMAP_ALL_VISIBLE) == 0)
579 break;
580 }
581 vacuum_delay_point();
582 next_unskippable_block++;
583 }
584 }
585
586 if (next_unskippable_block >= SKIP_PAGES_THRESHOLD)
587 skipping_blocks = true;
588 else
589 skipping_blocks = false;
590
591 for (blkno = 0; blkno < nblocks; blkno++)
592 {
593 Buffer buf;
594 Page page;
595 OffsetNumber offnum,
596 maxoff;
597 bool tupgone,
598 hastup;
599 int prev_dead_count;
600 int nfrozen;
601 Size freespace;
602 bool all_visible_according_to_vm = false;
603 bool all_visible;
604 bool all_frozen = true; /* provided all_visible is also true */
605 bool has_dead_tuples;
606 TransactionId visibility_cutoff_xid = InvalidTransactionId;
607
608 /* see note above about forcing scanning of last page */
609 #define FORCE_CHECK_PAGE() \
610 (blkno == nblocks - 1 && should_attempt_truncation(vacrelstats))
611
612 pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_SCANNED, blkno);
613
614 if (blkno == next_unskippable_block)
615 {
616 /* Time to advance next_unskippable_block */
617 next_unskippable_block++;
618 if ((options & VACOPT_DISABLE_PAGE_SKIPPING) == 0)
619 {
620 while (next_unskippable_block < nblocks)
621 {
622 uint8 vmskipflags;
623
624 vmskipflags = visibilitymap_get_status(onerel,
625 next_unskippable_block,
626 &vmbuffer);
627 if (aggressive)
628 {
629 if ((vmskipflags & VISIBILITYMAP_ALL_FROZEN) == 0)
630 break;
631 }
632 else
633 {
634 if ((vmskipflags & VISIBILITYMAP_ALL_VISIBLE) == 0)
635 break;
636 }
637 vacuum_delay_point();
638 next_unskippable_block++;
639 }
640 }
641
642 /*
643 * We know we can't skip the current block. But set up
644 * skipping_blocks to do the right thing at the following blocks.
645 */
646 if (next_unskippable_block - blkno > SKIP_PAGES_THRESHOLD)
647 skipping_blocks = true;
648 else
649 skipping_blocks = false;
650
651 /*
652 * Normally, the fact that we can't skip this block must mean that
653 * it's not all-visible. But in an aggressive vacuum we know only
654 * that it's not all-frozen, so it might still be all-visible.
655 */
656 if (aggressive && VM_ALL_VISIBLE(onerel, blkno, &vmbuffer))
657 all_visible_according_to_vm = true;
658 }
659 else
660 {
661 /*
662 * The current block is potentially skippable; if we've seen a
663 * long enough run of skippable blocks to justify skipping it, and
664 * we're not forced to check it, then go ahead and skip.
665 * Otherwise, the page must be at least all-visible if not
666 * all-frozen, so we can set all_visible_according_to_vm = true.
667 */
668 if (skipping_blocks && !FORCE_CHECK_PAGE())
669 {
670 /*
671 * Tricky, tricky. If this is in aggressive vacuum, the page
672 * must have been all-frozen at the time we checked whether it
673 * was skippable, but it might not be any more. We must be
674 * careful to count it as a skipped all-frozen page in that
675 * case, or else we'll think we can't update relfrozenxid and
676 * relminmxid. If it's not an aggressive vacuum, we don't
677 * know whether it was all-frozen, so we have to recheck; but
678 * in this case an approximate answer is OK.
679 */
680 if (aggressive || VM_ALL_FROZEN(onerel, blkno, &vmbuffer))
681 vacrelstats->frozenskipped_pages++;
682 continue;
683 }
684 all_visible_according_to_vm = true;
685 }
686
687 vacuum_delay_point();
688
689 /*
690 * If we are close to overrunning the available space for dead-tuple
691 * TIDs, pause and do a cycle of vacuuming before we tackle this page.
692 */
693 if ((vacrelstats->max_dead_tuples - vacrelstats->num_dead_tuples) < MaxHeapTuplesPerPage &&
694 vacrelstats->num_dead_tuples > 0)
695 {
696 const int hvp_index[] = {
697 PROGRESS_VACUUM_PHASE,
698 PROGRESS_VACUUM_NUM_INDEX_VACUUMS
699 };
700 int64 hvp_val[2];
701
702 /*
703 * Before beginning index vacuuming, we release any pin we may
704 * hold on the visibility map page. This isn't necessary for
705 * correctness, but we do it anyway to avoid holding the pin
706 * across a lengthy, unrelated operation.
707 */
708 if (BufferIsValid(vmbuffer))
709 {
710 ReleaseBuffer(vmbuffer);
711 vmbuffer = InvalidBuffer;
712 }
713
714 /* Log cleanup info before we touch indexes */
715 vacuum_log_cleanup_info(onerel, vacrelstats);
716
717 /* Report that we are now vacuuming indexes */
718 pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
719 PROGRESS_VACUUM_PHASE_VACUUM_INDEX);
720
721 /* Remove index entries */
722 for (i = 0; i < nindexes; i++)
723 lazy_vacuum_index(Irel[i],
724 &indstats[i],
725 vacrelstats);
726
727 /*
728 * Report that we are now vacuuming the heap. We also increase
729 * the number of index scans here; note that by using
730 * pgstat_progress_update_multi_param we can update both
731 * parameters atomically.
732 */
733 hvp_val[0] = PROGRESS_VACUUM_PHASE_VACUUM_HEAP;
734 hvp_val[1] = vacrelstats->num_index_scans + 1;
735 pgstat_progress_update_multi_param(2, hvp_index, hvp_val);
736
737 /* Remove tuples from heap */
738 lazy_vacuum_heap(onerel, vacrelstats);
739
740 /*
741 * Forget the now-vacuumed tuples, and press on, but be careful
742 * not to reset latestRemovedXid since we want that value to be
743 * valid.
744 */
745 vacrelstats->num_dead_tuples = 0;
746 vacrelstats->num_index_scans++;
747
748 /* Report that we are once again scanning the heap */
749 pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
750 PROGRESS_VACUUM_PHASE_SCAN_HEAP);
751 }
752
753 /*
754 * Pin the visibility map page in case we need to mark the page
755 * all-visible. In most cases this will be very cheap, because we'll
756 * already have the correct page pinned anyway. However, it's
757 * possible that (a) next_unskippable_block is covered by a different
758 * VM page than the current block or (b) we released our pin and did a
759 * cycle of index vacuuming.
760 *
761 */
762 visibilitymap_pin(onerel, blkno, &vmbuffer);
763
764 buf = ReadBufferExtended(onerel, MAIN_FORKNUM, blkno,
765 RBM_NORMAL, vac_strategy);
766
767 /* We need buffer cleanup lock so that we can prune HOT chains. */
768 if (!ConditionalLockBufferForCleanup(buf))
769 {
770 /*
771 * If we're not performing an aggressive scan to guard against XID
772 * wraparound, and we don't want to forcibly check the page, then
773 * it's OK to skip vacuuming pages we get a lock conflict on. They
774 * will be dealt with in some future vacuum.
775 */
776 if (!aggressive && !FORCE_CHECK_PAGE())
777 {
778 ReleaseBuffer(buf);
779 vacrelstats->pinskipped_pages++;
780 continue;
781 }
782
783 /*
784 * Read the page with share lock to see if any xids on it need to
785 * be frozen. If not we just skip the page, after updating our
786 * scan statistics. If there are some, we wait for cleanup lock.
787 *
788 * We could defer the lock request further by remembering the page
789 * and coming back to it later, or we could even register
790 * ourselves for multiple buffers and then service whichever one
791 * is received first. For now, this seems good enough.
792 *
793 * If we get here with aggressive false, then we're just forcibly
794 * checking the page, and so we don't want to insist on getting
795 * the lock; we only need to know if the page contains tuples, so
796 * that we can update nonempty_pages correctly. It's convenient
797 * to use lazy_check_needs_freeze() for both situations, though.
798 */
799 LockBuffer(buf, BUFFER_LOCK_SHARE);
800 if (!lazy_check_needs_freeze(buf, &hastup))
801 {
802 UnlockReleaseBuffer(buf);
803 vacrelstats->scanned_pages++;
804 vacrelstats->pinskipped_pages++;
805 if (hastup)
806 vacrelstats->nonempty_pages = blkno + 1;
807 continue;
808 }
809 if (!aggressive)
810 {
811 /*
812 * Here, we must not advance scanned_pages; that would amount
813 * to claiming that the page contains no freezable tuples.
814 */
815 UnlockReleaseBuffer(buf);
816 vacrelstats->pinskipped_pages++;
817 if (hastup)
818 vacrelstats->nonempty_pages = blkno + 1;
819 continue;
820 }
821 LockBuffer(buf, BUFFER_LOCK_UNLOCK);
822 LockBufferForCleanup(buf);
823 /* drop through to normal processing */
824 }
825
826 vacrelstats->scanned_pages++;
827 vacrelstats->tupcount_pages++;
828
829 page = BufferGetPage(buf);
830
831 if (PageIsNew(page))
832 {
833 /*
834 * An all-zeroes page could be left over if a backend extends the
835 * relation but crashes before initializing the page. Reclaim such
836 * pages for use.
837 *
838 * We have to be careful here because we could be looking at a
839 * page that someone has just added to the relation and not yet
840 * been able to initialize (see RelationGetBufferForTuple). To
841 * protect against that, release the buffer lock, grab the
842 * relation extension lock momentarily, and re-lock the buffer. If
843 * the page is still uninitialized by then, it must be left over
844 * from a crashed backend, and we can initialize it.
845 *
846 * We don't really need the relation lock when this is a new or
847 * temp relation, but it's probably not worth the code space to
848 * check that, since this surely isn't a critical path.
849 *
850 * Note: the comparable code in vacuum.c need not worry because
851 * it's got exclusive lock on the whole relation.
852 */
853 LockBuffer(buf, BUFFER_LOCK_UNLOCK);
854 LockRelationForExtension(onerel, ExclusiveLock);
855 UnlockRelationForExtension(onerel, ExclusiveLock);
856 LockBufferForCleanup(buf);
857 if (PageIsNew(page))
858 {
859 ereport(WARNING,
860 (errmsg("relation \"%s\" page %u is uninitialized --- fixing",
861 relname, blkno)));
862 PageInit(page, BufferGetPageSize(buf), 0);
863 empty_pages++;
864 }
865 freespace = PageGetHeapFreeSpace(page);
866 MarkBufferDirty(buf);
867 UnlockReleaseBuffer(buf);
868
869 RecordPageWithFreeSpace(onerel, blkno, freespace);
870 continue;
871 }
872
873 if (PageIsEmpty(page))
874 {
875 empty_pages++;
876 freespace = PageGetHeapFreeSpace(page);
877
878 /* empty pages are always all-visible and all-frozen */
879 if (!PageIsAllVisible(page))
880 {
881 START_CRIT_SECTION();
882
883 /* mark buffer dirty before writing a WAL record */
884 MarkBufferDirty(buf);
885
886 /*
887 * It's possible that another backend has extended the heap,
888 * initialized the page, and then failed to WAL-log the page
889 * due to an ERROR. Since heap extension is not WAL-logged,
890 * recovery might try to replay our record setting the page
891 * all-visible and find that the page isn't initialized, which
892 * will cause a PANIC. To prevent that, check whether the
893 * page has been previously WAL-logged, and if not, do that
894 * now.
895 */
896 if (RelationNeedsWAL(onerel) &&
897 PageGetLSN(page) == InvalidXLogRecPtr)
898 log_newpage_buffer(buf, true);
899
900 PageSetAllVisible(page);
901 visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
902 vmbuffer, InvalidTransactionId,
903 VISIBILITYMAP_ALL_VISIBLE | VISIBILITYMAP_ALL_FROZEN);
904 END_CRIT_SECTION();
905 }
906
907 UnlockReleaseBuffer(buf);
908 RecordPageWithFreeSpace(onerel, blkno, freespace);
909 continue;
910 }
911
912 /*
913 * Prune all HOT-update chains in this page.
914 *
915 * We count tuples removed by the pruning step as removed by VACUUM.
916 */
917 tups_vacuumed += heap_page_prune(onerel, buf, OldestXmin, false,
918 &vacrelstats->latestRemovedXid);
919
920 /*
921 * Now scan the page to collect vacuumable items and check for tuples
922 * requiring freezing.
923 */
924 all_visible = true;
925 has_dead_tuples = false;
926 nfrozen = 0;
927 hastup = false;
928 prev_dead_count = vacrelstats->num_dead_tuples;
929 maxoff = PageGetMaxOffsetNumber(page);
930
931 /*
932 * Note: If you change anything in the loop below, also look at
933 * heap_page_is_all_visible to see if that needs to be changed.
934 */
935 for (offnum = FirstOffsetNumber;
936 offnum <= maxoff;
937 offnum = OffsetNumberNext(offnum))
938 {
939 ItemId itemid;
940
941 itemid = PageGetItemId(page, offnum);
942
943 /* Unused items require no processing, but we count 'em */
944 if (!ItemIdIsUsed(itemid))
945 {
946 nunused += 1;
947 continue;
948 }
949
950 /* Redirect items mustn't be touched */
951 if (ItemIdIsRedirected(itemid))
952 {
953 hastup = true; /* this page won't be truncatable */
954 continue;
955 }
956
957 ItemPointerSet(&(tuple.t_self), blkno, offnum);
958
959 /*
960 * DEAD item pointers are to be vacuumed normally; but we don't
961 * count them in tups_vacuumed, else we'd be double-counting (at
962 * least in the common case where heap_page_prune() just freed up
963 * a non-HOT tuple).
964 */
965 if (ItemIdIsDead(itemid))
966 {
967 lazy_record_dead_tuple(vacrelstats, &(tuple.t_self));
968 all_visible = false;
969 continue;
970 }
971
972 Assert(ItemIdIsNormal(itemid));
973
974 tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
975 tuple.t_len = ItemIdGetLength(itemid);
976 tuple.t_tableOid = RelationGetRelid(onerel);
977
978 tupgone = false;
979
980 switch (HeapTupleSatisfiesVacuum(&tuple, OldestXmin, buf))
981 {
982 case HEAPTUPLE_DEAD:
983
984 /*
985 * Ordinarily, DEAD tuples would have been removed by
986 * heap_page_prune(), but it's possible that the tuple
987 * state changed since heap_page_prune() looked. In
988 * particular an INSERT_IN_PROGRESS tuple could have
989 * changed to DEAD if the inserter aborted. So this
990 * cannot be considered an error condition.
991 *
992 * If the tuple is HOT-updated then it must only be
993 * removed by a prune operation; so we keep it just as if
994 * it were RECENTLY_DEAD. Also, if it's a heap-only
995 * tuple, we choose to keep it, because it'll be a lot
996 * cheaper to get rid of it in the next pruning pass than
997 * to treat it like an indexed tuple.
998 *
999 * If this were to happen for a tuple that actually needed
1000 * to be deleted, we'd be in trouble, because it'd
1001 * possibly leave a tuple below the relation's xmin
1002 * horizon alive. heap_prepare_freeze_tuple() is prepared
1003 * to detect that case and abort the transaction,
1004 * preventing corruption.
1005 */
1006 if (HeapTupleIsHotUpdated(&tuple) ||
1007 HeapTupleIsHeapOnly(&tuple))
1008 nkeep += 1;
1009 else
1010 tupgone = true; /* we can delete the tuple */
1011 all_visible = false;
1012 break;
1013 case HEAPTUPLE_LIVE:
1014 /* Tuple is good --- but let's do some validity checks */
1015 if (onerel->rd_rel->relhasoids &&
1016 !OidIsValid(HeapTupleGetOid(&tuple)))
1017 elog(WARNING, "relation \"%s\" TID %u/%u: OID is invalid",
1018 relname, blkno, offnum);
1019
1020 /*
1021 * Is the tuple definitely visible to all transactions?
1022 *
1023 * NB: Like with per-tuple hint bits, we can't set the
1024 * PD_ALL_VISIBLE flag if the inserter committed
1025 * asynchronously. See SetHintBits for more info. Check
1026 * that the tuple is hinted xmin-committed because of
1027 * that.
1028 */
1029 if (all_visible)
1030 {
1031 TransactionId xmin;
1032
1033 if (!HeapTupleHeaderXminCommitted(tuple.t_data))
1034 {
1035 all_visible = false;
1036 break;
1037 }
1038
1039 /*
1040 * The inserter definitely committed. But is it old
1041 * enough that everyone sees it as committed?
1042 */
1043 xmin = HeapTupleHeaderGetXmin(tuple.t_data);
1044 if (!TransactionIdPrecedes(xmin, OldestXmin))
1045 {
1046 all_visible = false;
1047 break;
1048 }
1049
1050 /* Track newest xmin on page. */
1051 if (TransactionIdFollows(xmin, visibility_cutoff_xid))
1052 visibility_cutoff_xid = xmin;
1053 }
1054 break;
1055 case HEAPTUPLE_RECENTLY_DEAD:
1056
1057 /*
1058 * If tuple is recently deleted then we must not remove it
1059 * from relation.
1060 */
1061 nkeep += 1;
1062 all_visible = false;
1063 break;
1064 case HEAPTUPLE_INSERT_IN_PROGRESS:
1065 /* This is an expected case during concurrent vacuum */
1066 all_visible = false;
1067 break;
1068 case HEAPTUPLE_DELETE_IN_PROGRESS:
1069 /* This is an expected case during concurrent vacuum */
1070 all_visible = false;
1071 break;
1072 default:
1073 elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
1074 break;
1075 }
1076
1077 if (tupgone)
1078 {
1079 lazy_record_dead_tuple(vacrelstats, &(tuple.t_self));
1080 HeapTupleHeaderAdvanceLatestRemovedXid(tuple.t_data,
1081 &vacrelstats->latestRemovedXid);
1082 tups_vacuumed += 1;
1083 has_dead_tuples = true;
1084 }
1085 else
1086 {
1087 bool tuple_totally_frozen;
1088
1089 num_tuples += 1;
1090 hastup = true;
1091
1092 /*
1093 * Each non-removable tuple must be checked to see if it needs
1094 * freezing. Note we already have exclusive buffer lock.
1095 */
1096 if (heap_prepare_freeze_tuple(tuple.t_data,
1097 relfrozenxid, relminmxid,
1098 FreezeLimit, MultiXactCutoff,
1099 &frozen[nfrozen],
1100 &tuple_totally_frozen))
1101 frozen[nfrozen++].offset = offnum;
1102
1103 if (!tuple_totally_frozen)
1104 all_frozen = false;
1105 }
1106 } /* scan along page */
1107
1108 /*
1109 * If we froze any tuples, mark the buffer dirty, and write a WAL
1110 * record recording the changes. We must log the changes to be
1111 * crash-safe against future truncation of CLOG.
1112 */
1113 if (nfrozen > 0)
1114 {
1115 START_CRIT_SECTION();
1116
1117 MarkBufferDirty(buf);
1118
1119 /* execute collected freezes */
1120 for (i = 0; i < nfrozen; i++)
1121 {
1122 ItemId itemid;
1123 HeapTupleHeader htup;
1124
1125 itemid = PageGetItemId(page, frozen[i].offset);
1126 htup = (HeapTupleHeader) PageGetItem(page, itemid);
1127
1128 heap_execute_freeze_tuple(htup, &frozen[i]);
1129 }
1130
1131 /* Now WAL-log freezing if necessary */
1132 if (RelationNeedsWAL(onerel))
1133 {
1134 XLogRecPtr recptr;
1135
1136 recptr = log_heap_freeze(onerel, buf, FreezeLimit,
1137 frozen, nfrozen);
1138 PageSetLSN(page, recptr);
1139 }
1140
1141 END_CRIT_SECTION();
1142 }
1143
1144 /*
1145 * If there are no indexes then we can vacuum the page right now
1146 * instead of doing a second scan.
1147 */
1148 if (nindexes == 0 &&
1149 vacrelstats->num_dead_tuples > 0)
1150 {
1151 /* Remove tuples from heap */
1152 lazy_vacuum_page(onerel, blkno, buf, 0, vacrelstats, &vmbuffer);
1153 has_dead_tuples = false;
1154
1155 /*
1156 * Forget the now-vacuumed tuples, and press on, but be careful
1157 * not to reset latestRemovedXid since we want that value to be
1158 * valid.
1159 */
1160 vacrelstats->num_dead_tuples = 0;
1161 vacuumed_pages++;
1162 }
1163
1164 freespace = PageGetHeapFreeSpace(page);
1165
1166 /* mark page all-visible, if appropriate */
1167 if (all_visible && !all_visible_according_to_vm)
1168 {
1169 uint8 flags = VISIBILITYMAP_ALL_VISIBLE;
1170
1171 if (all_frozen)
1172 flags |= VISIBILITYMAP_ALL_FROZEN;
1173
1174 /*
1175 * It should never be the case that the visibility map page is set
1176 * while the page-level bit is clear, but the reverse is allowed
1177 * (if checksums are not enabled). Regardless, set the both bits
1178 * so that we get back in sync.
1179 *
1180 * NB: If the heap page is all-visible but the VM bit is not set,
1181 * we don't need to dirty the heap page. However, if checksums
1182 * are enabled, we do need to make sure that the heap page is
1183 * dirtied before passing it to visibilitymap_set(), because it
1184 * may be logged. Given that this situation should only happen in
1185 * rare cases after a crash, it is not worth optimizing.
1186 */
1187 PageSetAllVisible(page);
1188 MarkBufferDirty(buf);
1189 visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
1190 vmbuffer, visibility_cutoff_xid, flags);
1191 }
1192
1193 /*
1194 * As of PostgreSQL 9.2, the visibility map bit should never be set if
1195 * the page-level bit is clear. However, it's possible that the bit
1196 * got cleared after we checked it and before we took the buffer
1197 * content lock, so we must recheck before jumping to the conclusion
1198 * that something bad has happened.
1199 */
1200 else if (all_visible_according_to_vm && !PageIsAllVisible(page)
1201 && VM_ALL_VISIBLE(onerel, blkno, &vmbuffer))
1202 {
1203 elog(WARNING, "page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u",
1204 relname, blkno);
1205 visibilitymap_clear(onerel, blkno, vmbuffer,
1206 VISIBILITYMAP_VALID_BITS);
1207 }
1208
1209 /*
1210 * It's possible for the value returned by GetOldestXmin() to move
1211 * backwards, so it's not wrong for us to see tuples that appear to
1212 * not be visible to everyone yet, while PD_ALL_VISIBLE is already
1213 * set. The real safe xmin value never moves backwards, but
1214 * GetOldestXmin() is conservative and sometimes returns a value
1215 * that's unnecessarily small, so if we see that contradiction it just
1216 * means that the tuples that we think are not visible to everyone yet
1217 * actually are, and the PD_ALL_VISIBLE flag is correct.
1218 *
1219 * There should never be dead tuples on a page with PD_ALL_VISIBLE
1220 * set, however.
1221 */
1222 else if (PageIsAllVisible(page) && has_dead_tuples)
1223 {
1224 elog(WARNING, "page containing dead tuples is marked as all-visible in relation \"%s\" page %u",
1225 relname, blkno);
1226 PageClearAllVisible(page);
1227 MarkBufferDirty(buf);
1228 visibilitymap_clear(onerel, blkno, vmbuffer,
1229 VISIBILITYMAP_VALID_BITS);
1230 }
1231
1232 /*
1233 * If the all-visible page is turned out to be all-frozen but not
1234 * marked, we should so mark it. Note that all_frozen is only valid
1235 * if all_visible is true, so we must check both.
1236 */
1237 else if (all_visible_according_to_vm && all_visible && all_frozen &&
1238 !VM_ALL_FROZEN(onerel, blkno, &vmbuffer))
1239 {
1240 /*
1241 * We can pass InvalidTransactionId as the cutoff XID here,
1242 * because setting the all-frozen bit doesn't cause recovery
1243 * conflicts.
1244 */
1245 visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
1246 vmbuffer, InvalidTransactionId,
1247 VISIBILITYMAP_ALL_FROZEN);
1248 }
1249
1250 UnlockReleaseBuffer(buf);
1251
1252 /* Remember the location of the last page with nonremovable tuples */
1253 if (hastup)
1254 vacrelstats->nonempty_pages = blkno + 1;
1255
1256 /*
1257 * If we remembered any tuples for deletion, then the page will be
1258 * visited again by lazy_vacuum_heap, which will compute and record
1259 * its post-compaction free space. If not, then we're done with this
1260 * page, so remember its free space as-is. (This path will always be
1261 * taken if there are no indexes.)
1262 */
1263 if (vacrelstats->num_dead_tuples == prev_dead_count)
1264 RecordPageWithFreeSpace(onerel, blkno, freespace);
1265 }
1266
1267 /* report that everything is scanned and vacuumed */
1268 pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_SCANNED, blkno);
1269
1270 pfree(frozen);
1271
1272 /* save stats for use later */
1273 vacrelstats->scanned_tuples = num_tuples;
1274 vacrelstats->tuples_deleted = tups_vacuumed;
1275 vacrelstats->new_dead_tuples = nkeep;
1276
1277 /* now we can compute the new value for pg_class.reltuples */
1278 vacrelstats->new_rel_tuples = vac_estimate_reltuples(onerel, false,
1279 nblocks,
1280 vacrelstats->tupcount_pages,
1281 num_tuples);
1282
1283 /*
1284 * Release any remaining pin on visibility map page.
1285 */
1286 if (BufferIsValid(vmbuffer))
1287 {
1288 ReleaseBuffer(vmbuffer);
1289 vmbuffer = InvalidBuffer;
1290 }
1291
1292 /* If any tuples need to be deleted, perform final vacuum cycle */
1293 /* XXX put a threshold on min number of tuples here? */
1294 if (vacrelstats->num_dead_tuples > 0)
1295 {
1296 const int hvp_index[] = {
1297 PROGRESS_VACUUM_PHASE,
1298 PROGRESS_VACUUM_NUM_INDEX_VACUUMS
1299 };
1300 int64 hvp_val[2];
1301
1302 /* Log cleanup info before we touch indexes */
1303 vacuum_log_cleanup_info(onerel, vacrelstats);
1304
1305 /* Report that we are now vacuuming indexes */
1306 pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
1307 PROGRESS_VACUUM_PHASE_VACUUM_INDEX);
1308
1309 /* Remove index entries */
1310 for (i = 0; i < nindexes; i++)
1311 lazy_vacuum_index(Irel[i],
1312 &indstats[i],
1313 vacrelstats);
1314
1315 /* Report that we are now vacuuming the heap */
1316 hvp_val[0] = PROGRESS_VACUUM_PHASE_VACUUM_HEAP;
1317 hvp_val[1] = vacrelstats->num_index_scans + 1;
1318 pgstat_progress_update_multi_param(2, hvp_index, hvp_val);
1319
1320 /* Remove tuples from heap */
1321 pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
1322 PROGRESS_VACUUM_PHASE_VACUUM_HEAP);
1323 lazy_vacuum_heap(onerel, vacrelstats);
1324 vacrelstats->num_index_scans++;
1325 }
1326
1327 /* report all blocks vacuumed; and that we're cleaning up */
1328 pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, blkno);
1329 pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
1330 PROGRESS_VACUUM_PHASE_INDEX_CLEANUP);
1331
1332 /* Do post-vacuum cleanup and statistics update for each index */
1333 for (i = 0; i < nindexes; i++)
1334 lazy_cleanup_index(Irel[i], indstats[i], vacrelstats);
1335
1336 /* If no indexes, make log report that lazy_vacuum_heap would've made */
1337 if (vacuumed_pages)
1338 ereport(elevel,
1339 (errmsg("\"%s\": removed %.0f row versions in %u pages",
1340 RelationGetRelationName(onerel),
1341 tups_vacuumed, vacuumed_pages)));
1342
1343 /*
1344 * This is pretty messy, but we split it up so that we can skip emitting
1345 * individual parts of the message when not applicable.
1346 */
1347 initStringInfo(&buf);
1348 appendStringInfo(&buf,
1349 _("%.0f dead row versions cannot be removed yet, oldest xmin: %u\n"),
1350 nkeep, OldestXmin);
1351 appendStringInfo(&buf, _("There were %.0f unused item pointers.\n"),
1352 nunused);
1353 appendStringInfo(&buf, ngettext("Skipped %u page due to buffer pins, ",
1354 "Skipped %u pages due to buffer pins, ",
1355 vacrelstats->pinskipped_pages),
1356 vacrelstats->pinskipped_pages);
1357 appendStringInfo(&buf, ngettext("%u frozen page.\n",
1358 "%u frozen pages.\n",
1359 vacrelstats->frozenskipped_pages),
1360 vacrelstats->frozenskipped_pages);
1361 appendStringInfo(&buf, ngettext("%u page is entirely empty.\n",
1362 "%u pages are entirely empty.\n",
1363 empty_pages),
1364 empty_pages);
1365 appendStringInfo(&buf, _("%s."), pg_rusage_show(&ru0));
1366
1367 ereport(elevel,
1368 (errmsg("\"%s\": found %.0f removable, %.0f nonremovable row versions in %u out of %u pages",
1369 RelationGetRelationName(onerel),
1370 tups_vacuumed, num_tuples,
1371 vacrelstats->scanned_pages, nblocks),
1372 errdetail_internal("%s", buf.data)));
1373 pfree(buf.data);
1374 }
1375
1376
1377 /*
1378 * lazy_vacuum_heap() -- second pass over the heap
1379 *
1380 * This routine marks dead tuples as unused and compacts out free
1381 * space on their pages. Pages not having dead tuples recorded from
1382 * lazy_scan_heap are not visited at all.
1383 *
1384 * Note: the reason for doing this as a second pass is we cannot remove
1385 * the tuples until we've removed their index entries, and we want to
1386 * process index entry removal in batches as large as possible.
1387 */
1388 static void
lazy_vacuum_heap(Relation onerel,LVRelStats * vacrelstats)1389 lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
1390 {
1391 int tupindex;
1392 int npages;
1393 PGRUsage ru0;
1394 Buffer vmbuffer = InvalidBuffer;
1395
1396 pg_rusage_init(&ru0);
1397 npages = 0;
1398
1399 tupindex = 0;
1400 while (tupindex < vacrelstats->num_dead_tuples)
1401 {
1402 BlockNumber tblk;
1403 Buffer buf;
1404 Page page;
1405 Size freespace;
1406
1407 vacuum_delay_point();
1408
1409 tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples[tupindex]);
1410 buf = ReadBufferExtended(onerel, MAIN_FORKNUM, tblk, RBM_NORMAL,
1411 vac_strategy);
1412 if (!ConditionalLockBufferForCleanup(buf))
1413 {
1414 ReleaseBuffer(buf);
1415 ++tupindex;
1416 continue;
1417 }
1418 tupindex = lazy_vacuum_page(onerel, tblk, buf, tupindex, vacrelstats,
1419 &vmbuffer);
1420
1421 /* Now that we've compacted the page, record its available space */
1422 page = BufferGetPage(buf);
1423 freespace = PageGetHeapFreeSpace(page);
1424
1425 UnlockReleaseBuffer(buf);
1426 RecordPageWithFreeSpace(onerel, tblk, freespace);
1427 npages++;
1428 }
1429
1430 if (BufferIsValid(vmbuffer))
1431 {
1432 ReleaseBuffer(vmbuffer);
1433 vmbuffer = InvalidBuffer;
1434 }
1435
1436 ereport(elevel,
1437 (errmsg("\"%s\": removed %d row versions in %d pages",
1438 RelationGetRelationName(onerel),
1439 tupindex, npages),
1440 errdetail_internal("%s", pg_rusage_show(&ru0))));
1441 }
1442
1443 /*
1444 * lazy_vacuum_page() -- free dead tuples on a page
1445 * and repair its fragmentation.
1446 *
1447 * Caller must hold pin and buffer cleanup lock on the buffer.
1448 *
1449 * tupindex is the index in vacrelstats->dead_tuples of the first dead
1450 * tuple for this page. We assume the rest follow sequentially.
1451 * The return value is the first tupindex after the tuples of this page.
1452 */
1453 static int
lazy_vacuum_page(Relation onerel,BlockNumber blkno,Buffer buffer,int tupindex,LVRelStats * vacrelstats,Buffer * vmbuffer)1454 lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
1455 int tupindex, LVRelStats *vacrelstats, Buffer *vmbuffer)
1456 {
1457 Page page = BufferGetPage(buffer);
1458 OffsetNumber unused[MaxOffsetNumber];
1459 int uncnt = 0;
1460 TransactionId visibility_cutoff_xid;
1461 bool all_frozen;
1462
1463 pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, blkno);
1464
1465 START_CRIT_SECTION();
1466
1467 for (; tupindex < vacrelstats->num_dead_tuples; tupindex++)
1468 {
1469 BlockNumber tblk;
1470 OffsetNumber toff;
1471 ItemId itemid;
1472
1473 tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples[tupindex]);
1474 if (tblk != blkno)
1475 break; /* past end of tuples for this block */
1476 toff = ItemPointerGetOffsetNumber(&vacrelstats->dead_tuples[tupindex]);
1477 itemid = PageGetItemId(page, toff);
1478 ItemIdSetUnused(itemid);
1479 unused[uncnt++] = toff;
1480 }
1481
1482 PageRepairFragmentation(page);
1483
1484 /*
1485 * Mark buffer dirty before we write WAL.
1486 */
1487 MarkBufferDirty(buffer);
1488
1489 /* XLOG stuff */
1490 if (RelationNeedsWAL(onerel))
1491 {
1492 XLogRecPtr recptr;
1493
1494 recptr = log_heap_clean(onerel, buffer,
1495 NULL, 0, NULL, 0,
1496 unused, uncnt,
1497 vacrelstats->latestRemovedXid);
1498 PageSetLSN(page, recptr);
1499 }
1500
1501 /*
1502 * End critical section, so we safely can do visibility tests (which
1503 * possibly need to perform IO and allocate memory!). If we crash now the
1504 * page (including the corresponding vm bit) might not be marked all
1505 * visible, but that's fine. A later vacuum will fix that.
1506 */
1507 END_CRIT_SECTION();
1508
1509 /*
1510 * Now that we have removed the dead tuples from the page, once again
1511 * check if the page has become all-visible. The page is already marked
1512 * dirty, exclusively locked, and, if needed, a full page image has been
1513 * emitted in the log_heap_clean() above.
1514 */
1515 if (heap_page_is_all_visible(onerel, buffer, &visibility_cutoff_xid,
1516 &all_frozen))
1517 PageSetAllVisible(page);
1518
1519 /*
1520 * All the changes to the heap page have been done. If the all-visible
1521 * flag is now set, also set the VM all-visible bit (and, if possible, the
1522 * all-frozen bit) unless this has already been done previously.
1523 */
1524 if (PageIsAllVisible(page))
1525 {
1526 uint8 vm_status = visibilitymap_get_status(onerel, blkno, vmbuffer);
1527 uint8 flags = 0;
1528
1529 /* Set the VM all-frozen bit to flag, if needed */
1530 if ((vm_status & VISIBILITYMAP_ALL_VISIBLE) == 0)
1531 flags |= VISIBILITYMAP_ALL_VISIBLE;
1532 if ((vm_status & VISIBILITYMAP_ALL_FROZEN) == 0 && all_frozen)
1533 flags |= VISIBILITYMAP_ALL_FROZEN;
1534
1535 Assert(BufferIsValid(*vmbuffer));
1536 if (flags != 0)
1537 visibilitymap_set(onerel, blkno, buffer, InvalidXLogRecPtr,
1538 *vmbuffer, visibility_cutoff_xid, flags);
1539 }
1540
1541 return tupindex;
1542 }
1543
1544 /*
1545 * lazy_check_needs_freeze() -- scan page to see if any tuples
1546 * need to be cleaned to avoid wraparound
1547 *
1548 * Returns true if the page needs to be vacuumed using cleanup lock.
1549 * Also returns a flag indicating whether page contains any tuples at all.
1550 */
1551 static bool
lazy_check_needs_freeze(Buffer buf,bool * hastup)1552 lazy_check_needs_freeze(Buffer buf, bool *hastup)
1553 {
1554 Page page = BufferGetPage(buf);
1555 OffsetNumber offnum,
1556 maxoff;
1557 HeapTupleHeader tupleheader;
1558
1559 *hastup = false;
1560
1561 /* If we hit an uninitialized page, we want to force vacuuming it. */
1562 if (PageIsNew(page))
1563 return true;
1564
1565 /* Quick out for ordinary empty page. */
1566 if (PageIsEmpty(page))
1567 return false;
1568
1569 maxoff = PageGetMaxOffsetNumber(page);
1570 for (offnum = FirstOffsetNumber;
1571 offnum <= maxoff;
1572 offnum = OffsetNumberNext(offnum))
1573 {
1574 ItemId itemid;
1575
1576 itemid = PageGetItemId(page, offnum);
1577
1578 /* this should match hastup test in count_nondeletable_pages() */
1579 if (ItemIdIsUsed(itemid))
1580 *hastup = true;
1581
1582 /* dead and redirect items never need freezing */
1583 if (!ItemIdIsNormal(itemid))
1584 continue;
1585
1586 tupleheader = (HeapTupleHeader) PageGetItem(page, itemid);
1587
1588 if (heap_tuple_needs_freeze(tupleheader, FreezeLimit,
1589 MultiXactCutoff, buf))
1590 return true;
1591 } /* scan along page */
1592
1593 return false;
1594 }
1595
1596
1597 /*
1598 * lazy_vacuum_index() -- vacuum one index relation.
1599 *
1600 * Delete all the index entries pointing to tuples listed in
1601 * vacrelstats->dead_tuples, and update running statistics.
1602 */
1603 static void
lazy_vacuum_index(Relation indrel,IndexBulkDeleteResult ** stats,LVRelStats * vacrelstats)1604 lazy_vacuum_index(Relation indrel,
1605 IndexBulkDeleteResult **stats,
1606 LVRelStats *vacrelstats)
1607 {
1608 IndexVacuumInfo ivinfo;
1609 PGRUsage ru0;
1610
1611 pg_rusage_init(&ru0);
1612
1613 ivinfo.index = indrel;
1614 ivinfo.analyze_only = false;
1615 ivinfo.estimated_count = true;
1616 ivinfo.message_level = elevel;
1617 ivinfo.num_heap_tuples = vacrelstats->old_rel_tuples;
1618 ivinfo.strategy = vac_strategy;
1619
1620 /* Do bulk deletion */
1621 *stats = index_bulk_delete(&ivinfo, *stats,
1622 lazy_tid_reaped, (void *) vacrelstats);
1623
1624 ereport(elevel,
1625 (errmsg("scanned index \"%s\" to remove %d row versions",
1626 RelationGetRelationName(indrel),
1627 vacrelstats->num_dead_tuples),
1628 errdetail_internal("%s", pg_rusage_show(&ru0))));
1629 }
1630
1631 /*
1632 * lazy_cleanup_index() -- do post-vacuum cleanup for one index relation.
1633 */
1634 static void
lazy_cleanup_index(Relation indrel,IndexBulkDeleteResult * stats,LVRelStats * vacrelstats)1635 lazy_cleanup_index(Relation indrel,
1636 IndexBulkDeleteResult *stats,
1637 LVRelStats *vacrelstats)
1638 {
1639 IndexVacuumInfo ivinfo;
1640 PGRUsage ru0;
1641
1642 pg_rusage_init(&ru0);
1643
1644 ivinfo.index = indrel;
1645 ivinfo.analyze_only = false;
1646 ivinfo.estimated_count = (vacrelstats->tupcount_pages < vacrelstats->rel_pages);
1647 ivinfo.message_level = elevel;
1648 ivinfo.num_heap_tuples = vacrelstats->new_rel_tuples;
1649 ivinfo.strategy = vac_strategy;
1650
1651 stats = index_vacuum_cleanup(&ivinfo, stats);
1652
1653 if (!stats)
1654 return;
1655
1656 /*
1657 * Now update statistics in pg_class, but only if the index says the count
1658 * is accurate.
1659 */
1660 if (!stats->estimated_count)
1661 vac_update_relstats(indrel,
1662 stats->num_pages,
1663 stats->num_index_tuples,
1664 0,
1665 false,
1666 InvalidTransactionId,
1667 InvalidMultiXactId,
1668 false);
1669
1670 ereport(elevel,
1671 (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
1672 RelationGetRelationName(indrel),
1673 stats->num_index_tuples,
1674 stats->num_pages),
1675 errdetail("%.0f index row versions were removed.\n"
1676 "%u index pages have been deleted, %u are currently reusable.\n"
1677 "%s.",
1678 stats->tuples_removed,
1679 stats->pages_deleted, stats->pages_free,
1680 pg_rusage_show(&ru0))));
1681
1682 pfree(stats);
1683 }
1684
1685 /*
1686 * should_attempt_truncation - should we attempt to truncate the heap?
1687 *
1688 * Don't even think about it unless we have a shot at releasing a goodly
1689 * number of pages. Otherwise, the time taken isn't worth it.
1690 *
1691 * Also don't attempt it if we are doing early pruning/vacuuming, because a
1692 * scan which cannot find a truncated heap page cannot determine that the
1693 * snapshot is too old to read that page. We might be able to get away with
1694 * truncating all except one of the pages, setting its LSN to (at least) the
1695 * maximum of the truncated range if we also treated an index leaf tuple
1696 * pointing to a missing heap page as something to trigger the "snapshot too
1697 * old" error, but that seems fragile and seems like it deserves its own patch
1698 * if we consider it.
1699 *
1700 * This is split out so that we can test whether truncation is going to be
1701 * called for before we actually do it. If you change the logic here, be
1702 * careful to depend only on fields that lazy_scan_heap updates on-the-fly.
1703 */
1704 static bool
should_attempt_truncation(LVRelStats * vacrelstats)1705 should_attempt_truncation(LVRelStats *vacrelstats)
1706 {
1707 BlockNumber possibly_freeable;
1708
1709 possibly_freeable = vacrelstats->rel_pages - vacrelstats->nonempty_pages;
1710 if (possibly_freeable > 0 &&
1711 (possibly_freeable >= REL_TRUNCATE_MINIMUM ||
1712 possibly_freeable >= vacrelstats->rel_pages / REL_TRUNCATE_FRACTION) &&
1713 old_snapshot_threshold < 0)
1714 return true;
1715 else
1716 return false;
1717 }
1718
1719 /*
1720 * lazy_truncate_heap - try to truncate off any empty pages at the end
1721 */
1722 static void
lazy_truncate_heap(Relation onerel,LVRelStats * vacrelstats)1723 lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
1724 {
1725 BlockNumber old_rel_pages = vacrelstats->rel_pages;
1726 BlockNumber new_rel_pages;
1727 int lock_retry;
1728
1729 /* Report that we are now truncating */
1730 pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
1731 PROGRESS_VACUUM_PHASE_TRUNCATE);
1732
1733 /*
1734 * Loop until no more truncating can be done.
1735 */
1736 do
1737 {
1738 PGRUsage ru0;
1739
1740 pg_rusage_init(&ru0);
1741
1742 /*
1743 * We need full exclusive lock on the relation in order to do
1744 * truncation. If we can't get it, give up rather than waiting --- we
1745 * don't want to block other backends, and we don't want to deadlock
1746 * (which is quite possible considering we already hold a lower-grade
1747 * lock).
1748 */
1749 vacrelstats->lock_waiter_detected = false;
1750 lock_retry = 0;
1751 while (true)
1752 {
1753 if (ConditionalLockRelation(onerel, AccessExclusiveLock))
1754 break;
1755
1756 /*
1757 * Check for interrupts while trying to (re-)acquire the exclusive
1758 * lock.
1759 */
1760 CHECK_FOR_INTERRUPTS();
1761
1762 if (++lock_retry > (VACUUM_TRUNCATE_LOCK_TIMEOUT /
1763 VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL))
1764 {
1765 /*
1766 * We failed to establish the lock in the specified number of
1767 * retries. This means we give up truncating.
1768 */
1769 vacrelstats->lock_waiter_detected = true;
1770 ereport(elevel,
1771 (errmsg("\"%s\": stopping truncate due to conflicting lock request",
1772 RelationGetRelationName(onerel))));
1773 return;
1774 }
1775
1776 pg_usleep(VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL * 1000L);
1777 }
1778
1779 /*
1780 * Now that we have exclusive lock, look to see if the rel has grown
1781 * whilst we were vacuuming with non-exclusive lock. If so, give up;
1782 * the newly added pages presumably contain non-deletable tuples.
1783 */
1784 new_rel_pages = RelationGetNumberOfBlocks(onerel);
1785 if (new_rel_pages != old_rel_pages)
1786 {
1787 /*
1788 * Note: we intentionally don't update vacrelstats->rel_pages with
1789 * the new rel size here. If we did, it would amount to assuming
1790 * that the new pages are empty, which is unlikely. Leaving the
1791 * numbers alone amounts to assuming that the new pages have the
1792 * same tuple density as existing ones, which is less unlikely.
1793 */
1794 UnlockRelation(onerel, AccessExclusiveLock);
1795 return;
1796 }
1797
1798 /*
1799 * Scan backwards from the end to verify that the end pages actually
1800 * contain no tuples. This is *necessary*, not optional, because
1801 * other backends could have added tuples to these pages whilst we
1802 * were vacuuming.
1803 */
1804 new_rel_pages = count_nondeletable_pages(onerel, vacrelstats);
1805
1806 if (new_rel_pages >= old_rel_pages)
1807 {
1808 /* can't do anything after all */
1809 UnlockRelation(onerel, AccessExclusiveLock);
1810 return;
1811 }
1812
1813 /*
1814 * Okay to truncate.
1815 */
1816 RelationTruncate(onerel, new_rel_pages);
1817
1818 /*
1819 * We can release the exclusive lock as soon as we have truncated.
1820 * Other backends can't safely access the relation until they have
1821 * processed the smgr invalidation that smgrtruncate sent out ... but
1822 * that should happen as part of standard invalidation processing once
1823 * they acquire lock on the relation.
1824 */
1825 UnlockRelation(onerel, AccessExclusiveLock);
1826
1827 /*
1828 * Update statistics. Here, it *is* correct to adjust rel_pages
1829 * without also touching reltuples, since the tuple count wasn't
1830 * changed by the truncation.
1831 */
1832 vacrelstats->pages_removed += old_rel_pages - new_rel_pages;
1833 vacrelstats->rel_pages = new_rel_pages;
1834
1835 ereport(elevel,
1836 (errmsg("\"%s\": truncated %u to %u pages",
1837 RelationGetRelationName(onerel),
1838 old_rel_pages, new_rel_pages),
1839 errdetail_internal("%s",
1840 pg_rusage_show(&ru0))));
1841 old_rel_pages = new_rel_pages;
1842 } while (new_rel_pages > vacrelstats->nonempty_pages &&
1843 vacrelstats->lock_waiter_detected);
1844 }
1845
1846 /*
1847 * Rescan end pages to verify that they are (still) empty of tuples.
1848 *
1849 * Returns number of nondeletable pages (last nonempty page + 1).
1850 */
1851 static BlockNumber
count_nondeletable_pages(Relation onerel,LVRelStats * vacrelstats)1852 count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
1853 {
1854 BlockNumber blkno;
1855 BlockNumber prefetchedUntil;
1856 instr_time starttime;
1857
1858 /* Initialize the starttime if we check for conflicting lock requests */
1859 INSTR_TIME_SET_CURRENT(starttime);
1860
1861 /*
1862 * Start checking blocks at what we believe relation end to be and move
1863 * backwards. (Strange coding of loop control is needed because blkno is
1864 * unsigned.) To make the scan faster, we prefetch a few blocks at a time
1865 * in forward direction, so that OS-level readahead can kick in.
1866 */
1867 blkno = vacrelstats->rel_pages;
1868 StaticAssertStmt((PREFETCH_SIZE & (PREFETCH_SIZE - 1)) == 0,
1869 "prefetch size must be power of 2");
1870 prefetchedUntil = InvalidBlockNumber;
1871 while (blkno > vacrelstats->nonempty_pages)
1872 {
1873 Buffer buf;
1874 Page page;
1875 OffsetNumber offnum,
1876 maxoff;
1877 bool hastup;
1878
1879 /*
1880 * Check if another process requests a lock on our relation. We are
1881 * holding an AccessExclusiveLock here, so they will be waiting. We
1882 * only do this once per VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL, and we
1883 * only check if that interval has elapsed once every 32 blocks to
1884 * keep the number of system calls and actual shared lock table
1885 * lookups to a minimum.
1886 */
1887 if ((blkno % 32) == 0)
1888 {
1889 instr_time currenttime;
1890 instr_time elapsed;
1891
1892 INSTR_TIME_SET_CURRENT(currenttime);
1893 elapsed = currenttime;
1894 INSTR_TIME_SUBTRACT(elapsed, starttime);
1895 if ((INSTR_TIME_GET_MICROSEC(elapsed) / 1000)
1896 >= VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL)
1897 {
1898 if (LockHasWaitersRelation(onerel, AccessExclusiveLock))
1899 {
1900 ereport(elevel,
1901 (errmsg("\"%s\": suspending truncate due to conflicting lock request",
1902 RelationGetRelationName(onerel))));
1903
1904 vacrelstats->lock_waiter_detected = true;
1905 return blkno;
1906 }
1907 starttime = currenttime;
1908 }
1909 }
1910
1911 /*
1912 * We don't insert a vacuum delay point here, because we have an
1913 * exclusive lock on the table which we want to hold for as short a
1914 * time as possible. We still need to check for interrupts however.
1915 */
1916 CHECK_FOR_INTERRUPTS();
1917
1918 blkno--;
1919
1920 /* If we haven't prefetched this lot yet, do so now. */
1921 if (prefetchedUntil > blkno)
1922 {
1923 BlockNumber prefetchStart;
1924 BlockNumber pblkno;
1925
1926 prefetchStart = blkno & ~(PREFETCH_SIZE - 1);
1927 for (pblkno = prefetchStart; pblkno <= blkno; pblkno++)
1928 {
1929 PrefetchBuffer(onerel, MAIN_FORKNUM, pblkno);
1930 CHECK_FOR_INTERRUPTS();
1931 }
1932 prefetchedUntil = prefetchStart;
1933 }
1934
1935 buf = ReadBufferExtended(onerel, MAIN_FORKNUM, blkno,
1936 RBM_NORMAL, vac_strategy);
1937
1938 /* In this phase we only need shared access to the buffer */
1939 LockBuffer(buf, BUFFER_LOCK_SHARE);
1940
1941 page = BufferGetPage(buf);
1942
1943 if (PageIsNew(page) || PageIsEmpty(page))
1944 {
1945 /* PageIsNew probably shouldn't happen... */
1946 UnlockReleaseBuffer(buf);
1947 continue;
1948 }
1949
1950 hastup = false;
1951 maxoff = PageGetMaxOffsetNumber(page);
1952 for (offnum = FirstOffsetNumber;
1953 offnum <= maxoff;
1954 offnum = OffsetNumberNext(offnum))
1955 {
1956 ItemId itemid;
1957
1958 itemid = PageGetItemId(page, offnum);
1959
1960 /*
1961 * Note: any non-unused item should be taken as a reason to keep
1962 * this page. We formerly thought that DEAD tuples could be
1963 * thrown away, but that's not so, because we'd not have cleaned
1964 * out their index entries.
1965 */
1966 if (ItemIdIsUsed(itemid))
1967 {
1968 hastup = true;
1969 break; /* can stop scanning */
1970 }
1971 } /* scan along page */
1972
1973 UnlockReleaseBuffer(buf);
1974
1975 /* Done scanning if we found a tuple here */
1976 if (hastup)
1977 return blkno + 1;
1978 }
1979
1980 /*
1981 * If we fall out of the loop, all the previously-thought-to-be-empty
1982 * pages still are; we need not bother to look at the last known-nonempty
1983 * page.
1984 */
1985 return vacrelstats->nonempty_pages;
1986 }
1987
1988 /*
1989 * lazy_space_alloc - space allocation decisions for lazy vacuum
1990 *
1991 * See the comments at the head of this file for rationale.
1992 */
1993 static void
lazy_space_alloc(LVRelStats * vacrelstats,BlockNumber relblocks)1994 lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks)
1995 {
1996 long maxtuples;
1997 int vac_work_mem = IsAutoVacuumWorkerProcess() &&
1998 autovacuum_work_mem != -1 ?
1999 autovacuum_work_mem : maintenance_work_mem;
2000
2001 if (vacrelstats->hasindex)
2002 {
2003 maxtuples = (vac_work_mem * 1024L) / sizeof(ItemPointerData);
2004 maxtuples = Min(maxtuples, INT_MAX);
2005 maxtuples = Min(maxtuples, MaxAllocSize / sizeof(ItemPointerData));
2006
2007 /* curious coding here to ensure the multiplication can't overflow */
2008 if ((BlockNumber) (maxtuples / LAZY_ALLOC_TUPLES) > relblocks)
2009 maxtuples = relblocks * LAZY_ALLOC_TUPLES;
2010
2011 /* stay sane if small maintenance_work_mem */
2012 maxtuples = Max(maxtuples, MaxHeapTuplesPerPage);
2013 }
2014 else
2015 {
2016 maxtuples = MaxHeapTuplesPerPage;
2017 }
2018
2019 vacrelstats->num_dead_tuples = 0;
2020 vacrelstats->max_dead_tuples = (int) maxtuples;
2021 vacrelstats->dead_tuples = (ItemPointer)
2022 palloc(maxtuples * sizeof(ItemPointerData));
2023 }
2024
2025 /*
2026 * lazy_record_dead_tuple - remember one deletable tuple
2027 */
2028 static void
lazy_record_dead_tuple(LVRelStats * vacrelstats,ItemPointer itemptr)2029 lazy_record_dead_tuple(LVRelStats *vacrelstats,
2030 ItemPointer itemptr)
2031 {
2032 /*
2033 * The array shouldn't overflow under normal behavior, but perhaps it
2034 * could if we are given a really small maintenance_work_mem. In that
2035 * case, just forget the last few tuples (we'll get 'em next time).
2036 */
2037 if (vacrelstats->num_dead_tuples < vacrelstats->max_dead_tuples)
2038 {
2039 vacrelstats->dead_tuples[vacrelstats->num_dead_tuples] = *itemptr;
2040 vacrelstats->num_dead_tuples++;
2041 pgstat_progress_update_param(PROGRESS_VACUUM_NUM_DEAD_TUPLES,
2042 vacrelstats->num_dead_tuples);
2043 }
2044 }
2045
2046 /*
2047 * lazy_tid_reaped() -- is a particular tid deletable?
2048 *
2049 * This has the right signature to be an IndexBulkDeleteCallback.
2050 *
2051 * Assumes dead_tuples array is in sorted order.
2052 */
2053 static bool
lazy_tid_reaped(ItemPointer itemptr,void * state)2054 lazy_tid_reaped(ItemPointer itemptr, void *state)
2055 {
2056 LVRelStats *vacrelstats = (LVRelStats *) state;
2057 ItemPointer res;
2058
2059 res = (ItemPointer) bsearch((void *) itemptr,
2060 (void *) vacrelstats->dead_tuples,
2061 vacrelstats->num_dead_tuples,
2062 sizeof(ItemPointerData),
2063 vac_cmp_itemptr);
2064
2065 return (res != NULL);
2066 }
2067
2068 /*
2069 * Comparator routines for use with qsort() and bsearch().
2070 */
2071 static int
vac_cmp_itemptr(const void * left,const void * right)2072 vac_cmp_itemptr(const void *left, const void *right)
2073 {
2074 BlockNumber lblk,
2075 rblk;
2076 OffsetNumber loff,
2077 roff;
2078
2079 lblk = ItemPointerGetBlockNumber((ItemPointer) left);
2080 rblk = ItemPointerGetBlockNumber((ItemPointer) right);
2081
2082 if (lblk < rblk)
2083 return -1;
2084 if (lblk > rblk)
2085 return 1;
2086
2087 loff = ItemPointerGetOffsetNumber((ItemPointer) left);
2088 roff = ItemPointerGetOffsetNumber((ItemPointer) right);
2089
2090 if (loff < roff)
2091 return -1;
2092 if (loff > roff)
2093 return 1;
2094
2095 return 0;
2096 }
2097
2098 /*
2099 * Check if every tuple in the given page is visible to all current and future
2100 * transactions. Also return the visibility_cutoff_xid which is the highest
2101 * xmin amongst the visible tuples. Set *all_frozen to true if every tuple
2102 * on this page is frozen.
2103 */
2104 static bool
heap_page_is_all_visible(Relation rel,Buffer buf,TransactionId * visibility_cutoff_xid,bool * all_frozen)2105 heap_page_is_all_visible(Relation rel, Buffer buf,
2106 TransactionId *visibility_cutoff_xid,
2107 bool *all_frozen)
2108 {
2109 Page page = BufferGetPage(buf);
2110 BlockNumber blockno = BufferGetBlockNumber(buf);
2111 OffsetNumber offnum,
2112 maxoff;
2113 bool all_visible = true;
2114
2115 *visibility_cutoff_xid = InvalidTransactionId;
2116 *all_frozen = true;
2117
2118 /*
2119 * This is a stripped down version of the line pointer scan in
2120 * lazy_scan_heap(). So if you change anything here, also check that code.
2121 */
2122 maxoff = PageGetMaxOffsetNumber(page);
2123 for (offnum = FirstOffsetNumber;
2124 offnum <= maxoff && all_visible;
2125 offnum = OffsetNumberNext(offnum))
2126 {
2127 ItemId itemid;
2128 HeapTupleData tuple;
2129
2130 itemid = PageGetItemId(page, offnum);
2131
2132 /* Unused or redirect line pointers are of no interest */
2133 if (!ItemIdIsUsed(itemid) || ItemIdIsRedirected(itemid))
2134 continue;
2135
2136 ItemPointerSet(&(tuple.t_self), blockno, offnum);
2137
2138 /*
2139 * Dead line pointers can have index pointers pointing to them. So
2140 * they can't be treated as visible
2141 */
2142 if (ItemIdIsDead(itemid))
2143 {
2144 all_visible = false;
2145 *all_frozen = false;
2146 break;
2147 }
2148
2149 Assert(ItemIdIsNormal(itemid));
2150
2151 tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
2152 tuple.t_len = ItemIdGetLength(itemid);
2153 tuple.t_tableOid = RelationGetRelid(rel);
2154
2155 switch (HeapTupleSatisfiesVacuum(&tuple, OldestXmin, buf))
2156 {
2157 case HEAPTUPLE_LIVE:
2158 {
2159 TransactionId xmin;
2160
2161 /* Check comments in lazy_scan_heap. */
2162 if (!HeapTupleHeaderXminCommitted(tuple.t_data))
2163 {
2164 all_visible = false;
2165 *all_frozen = false;
2166 break;
2167 }
2168
2169 /*
2170 * The inserter definitely committed. But is it old enough
2171 * that everyone sees it as committed?
2172 */
2173 xmin = HeapTupleHeaderGetXmin(tuple.t_data);
2174 if (!TransactionIdPrecedes(xmin, OldestXmin))
2175 {
2176 all_visible = false;
2177 *all_frozen = false;
2178 break;
2179 }
2180
2181 /* Track newest xmin on page. */
2182 if (TransactionIdFollows(xmin, *visibility_cutoff_xid))
2183 *visibility_cutoff_xid = xmin;
2184
2185 /* Check whether this tuple is already frozen or not */
2186 if (all_visible && *all_frozen &&
2187 heap_tuple_needs_eventual_freeze(tuple.t_data))
2188 *all_frozen = false;
2189 }
2190 break;
2191
2192 case HEAPTUPLE_DEAD:
2193 case HEAPTUPLE_RECENTLY_DEAD:
2194 case HEAPTUPLE_INSERT_IN_PROGRESS:
2195 case HEAPTUPLE_DELETE_IN_PROGRESS:
2196 {
2197 all_visible = false;
2198 *all_frozen = false;
2199 break;
2200 }
2201 default:
2202 elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
2203 break;
2204 }
2205 } /* scan along page */
2206
2207 return all_visible;
2208 }
2209