1 /*-------------------------------------------------------------------------
2  *
3  * clog.c
4  *		PostgreSQL transaction-commit-log manager
5  *
6  * This module replaces the old "pg_log" access code, which treated pg_log
7  * essentially like a relation, in that it went through the regular buffer
8  * manager.  The problem with that was that there wasn't any good way to
9  * recycle storage space for transactions so old that they'll never be
10  * looked up again.  Now we use specialized access code so that the commit
11  * log can be broken into relatively small, independent segments.
12  *
13  * XLOG interactions: this module generates an XLOG record whenever a new
14  * CLOG page is initialized to zeroes.  Other writes of CLOG come from
15  * recording of transaction commit or abort in xact.c, which generates its
16  * own XLOG records for these events and will re-perform the status update
17  * on redo; so we need make no additional XLOG entry here.  For synchronous
18  * transaction commits, the XLOG is guaranteed flushed through the XLOG commit
19  * record before we are called to log a commit, so the WAL rule "write xlog
20  * before data" is satisfied automatically.  However, for async commits we
21  * must track the latest LSN affecting each CLOG page, so that we can flush
22  * XLOG that far and satisfy the WAL rule.  We don't have to worry about this
23  * for aborts (whether sync or async), since the post-crash assumption would
24  * be that such transactions failed anyway.
25  *
26  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
27  * Portions Copyright (c) 1994, Regents of the University of California
28  *
29  * src/backend/access/transam/clog.c
30  *
31  *-------------------------------------------------------------------------
32  */
33 #include "postgres.h"
34 
35 #include "access/clog.h"
36 #include "access/slru.h"
37 #include "access/transam.h"
38 #include "access/xlog.h"
39 #include "access/xloginsert.h"
40 #include "access/xlogutils.h"
41 #include "miscadmin.h"
42 #include "pg_trace.h"
43 
44 /*
45  * Defines for CLOG page sizes.  A page is the same BLCKSZ as is used
46  * everywhere else in Postgres.
47  *
48  * Note: because TransactionIds are 32 bits and wrap around at 0xFFFFFFFF,
49  * CLOG page numbering also wraps around at 0xFFFFFFFF/CLOG_XACTS_PER_PAGE,
50  * and CLOG segment numbering at
51  * 0xFFFFFFFF/CLOG_XACTS_PER_PAGE/SLRU_PAGES_PER_SEGMENT.  We need take no
52  * explicit notice of that fact in this module, except when comparing segment
53  * and page numbers in TruncateCLOG (see CLOGPagePrecedes).
54  */
55 
56 /* We need two bits per xact, so four xacts fit in a byte */
57 #define CLOG_BITS_PER_XACT	2
58 #define CLOG_XACTS_PER_BYTE 4
59 #define CLOG_XACTS_PER_PAGE (BLCKSZ * CLOG_XACTS_PER_BYTE)
60 #define CLOG_XACT_BITMASK	((1 << CLOG_BITS_PER_XACT) - 1)
61 
62 #define TransactionIdToPage(xid)	((xid) / (TransactionId) CLOG_XACTS_PER_PAGE)
63 #define TransactionIdToPgIndex(xid) ((xid) % (TransactionId) CLOG_XACTS_PER_PAGE)
64 #define TransactionIdToByte(xid)	(TransactionIdToPgIndex(xid) / CLOG_XACTS_PER_BYTE)
65 #define TransactionIdToBIndex(xid)	((xid) % (TransactionId) CLOG_XACTS_PER_BYTE)
66 
67 /* We store the latest async LSN for each group of transactions */
68 #define CLOG_XACTS_PER_LSN_GROUP	32	/* keep this a power of 2 */
69 #define CLOG_LSNS_PER_PAGE	(CLOG_XACTS_PER_PAGE / CLOG_XACTS_PER_LSN_GROUP)
70 
71 #define GetLSNIndex(slotno, xid)	((slotno) * CLOG_LSNS_PER_PAGE + \
72 	((xid) % (TransactionId) CLOG_XACTS_PER_PAGE) / CLOG_XACTS_PER_LSN_GROUP)
73 
74 
75 /*
76  * Link to shared-memory data structures for CLOG control
77  */
78 static SlruCtlData ClogCtlData;
79 
80 #define ClogCtl (&ClogCtlData)
81 
82 
83 static int	ZeroCLOGPage(int pageno, bool writeXlog);
84 static bool CLOGPagePrecedes(int page1, int page2);
85 static void WriteZeroPageXlogRec(int pageno);
86 static void WriteTruncateXlogRec(int pageno, TransactionId oldestXact,
87 					 Oid oldestXidDb);
88 static void TransactionIdSetPageStatus(TransactionId xid, int nsubxids,
89 						   TransactionId *subxids, XidStatus status,
90 						   XLogRecPtr lsn, int pageno);
91 static void TransactionIdSetStatusBit(TransactionId xid, XidStatus status,
92 						  XLogRecPtr lsn, int slotno);
93 static void set_status_by_pages(int nsubxids, TransactionId *subxids,
94 					XidStatus status, XLogRecPtr lsn);
95 
96 
97 /*
98  * TransactionIdSetTreeStatus
99  *
100  * Record the final state of transaction entries in the commit log for
101  * a transaction and its subtransaction tree. Take care to ensure this is
102  * efficient, and as atomic as possible.
103  *
104  * xid is a single xid to set status for. This will typically be
105  * the top level transactionid for a top level commit or abort. It can
106  * also be a subtransaction when we record transaction aborts.
107  *
108  * subxids is an array of xids of length nsubxids, representing subtransactions
109  * in the tree of xid. In various cases nsubxids may be zero.
110  *
111  * lsn must be the WAL location of the commit record when recording an async
112  * commit.  For a synchronous commit it can be InvalidXLogRecPtr, since the
113  * caller guarantees the commit record is already flushed in that case.  It
114  * should be InvalidXLogRecPtr for abort cases, too.
115  *
116  * In the commit case, atomicity is limited by whether all the subxids are in
117  * the same CLOG page as xid.  If they all are, then the lock will be grabbed
118  * only once, and the status will be set to committed directly.  Otherwise
119  * we must
120  *	 1. set sub-committed all subxids that are not on the same page as the
121  *		main xid
122  *	 2. atomically set committed the main xid and the subxids on the same page
123  *	 3. go over the first bunch again and set them committed
124  * Note that as far as concurrent checkers are concerned, main transaction
125  * commit as a whole is still atomic.
126  *
127  * Example:
128  *		TransactionId t commits and has subxids t1, t2, t3, t4
129  *		t is on page p1, t1 is also on p1, t2 and t3 are on p2, t4 is on p3
130  *		1. update pages2-3:
131  *					page2: set t2,t3 as sub-committed
132  *					page3: set t4 as sub-committed
133  *		2. update page1:
134  *					set t1 as sub-committed,
135  *					then set t as committed,
136 					then set t1 as committed
137  *		3. update pages2-3:
138  *					page2: set t2,t3 as committed
139  *					page3: set t4 as committed
140  *
141  * NB: this is a low-level routine and is NOT the preferred entry point
142  * for most uses; functions in transam.c are the intended callers.
143  *
144  * XXX Think about issuing FADVISE_WILLNEED on pages that we will need,
145  * but aren't yet in cache, as well as hinting pages not to fall out of
146  * cache yet.
147  */
148 void
TransactionIdSetTreeStatus(TransactionId xid,int nsubxids,TransactionId * subxids,XidStatus status,XLogRecPtr lsn)149 TransactionIdSetTreeStatus(TransactionId xid, int nsubxids,
150 						   TransactionId *subxids, XidStatus status, XLogRecPtr lsn)
151 {
152 	int			pageno = TransactionIdToPage(xid);	/* get page of parent */
153 	int			i;
154 
155 	Assert(status == TRANSACTION_STATUS_COMMITTED ||
156 		   status == TRANSACTION_STATUS_ABORTED);
157 
158 	/*
159 	 * See how many subxids, if any, are on the same page as the parent, if
160 	 * any.
161 	 */
162 	for (i = 0; i < nsubxids; i++)
163 	{
164 		if (TransactionIdToPage(subxids[i]) != pageno)
165 			break;
166 	}
167 
168 	/*
169 	 * Do all items fit on a single page?
170 	 */
171 	if (i == nsubxids)
172 	{
173 		/*
174 		 * Set the parent and all subtransactions in a single call
175 		 */
176 		TransactionIdSetPageStatus(xid, nsubxids, subxids, status, lsn,
177 								   pageno);
178 	}
179 	else
180 	{
181 		int			nsubxids_on_first_page = i;
182 
183 		/*
184 		 * If this is a commit then we care about doing this correctly (i.e.
185 		 * using the subcommitted intermediate status).  By here, we know
186 		 * we're updating more than one page of clog, so we must mark entries
187 		 * that are *not* on the first page so that they show as subcommitted
188 		 * before we then return to update the status to fully committed.
189 		 *
190 		 * To avoid touching the first page twice, skip marking subcommitted
191 		 * for the subxids on that first page.
192 		 */
193 		if (status == TRANSACTION_STATUS_COMMITTED)
194 			set_status_by_pages(nsubxids - nsubxids_on_first_page,
195 								subxids + nsubxids_on_first_page,
196 								TRANSACTION_STATUS_SUB_COMMITTED, lsn);
197 
198 		/*
199 		 * Now set the parent and subtransactions on same page as the parent,
200 		 * if any
201 		 */
202 		pageno = TransactionIdToPage(xid);
203 		TransactionIdSetPageStatus(xid, nsubxids_on_first_page, subxids, status,
204 								   lsn, pageno);
205 
206 		/*
207 		 * Now work through the rest of the subxids one clog page at a time,
208 		 * starting from the second page onwards, like we did above.
209 		 */
210 		set_status_by_pages(nsubxids - nsubxids_on_first_page,
211 							subxids + nsubxids_on_first_page,
212 							status, lsn);
213 	}
214 }
215 
216 /*
217  * Helper for TransactionIdSetTreeStatus: set the status for a bunch of
218  * transactions, chunking in the separate CLOG pages involved. We never
219  * pass the whole transaction tree to this function, only subtransactions
220  * that are on different pages to the top level transaction id.
221  */
222 static void
set_status_by_pages(int nsubxids,TransactionId * subxids,XidStatus status,XLogRecPtr lsn)223 set_status_by_pages(int nsubxids, TransactionId *subxids,
224 					XidStatus status, XLogRecPtr lsn)
225 {
226 	int			pageno = TransactionIdToPage(subxids[0]);
227 	int			offset = 0;
228 	int			i = 0;
229 
230 	Assert(nsubxids > 0);		/* else the pageno fetch above is unsafe */
231 
232 	while (i < nsubxids)
233 	{
234 		int			num_on_page = 0;
235 		int			nextpageno;
236 
237 		do
238 		{
239 			nextpageno = TransactionIdToPage(subxids[i]);
240 			if (nextpageno != pageno)
241 				break;
242 			num_on_page++;
243 			i++;
244 		} while (i < nsubxids);
245 
246 		TransactionIdSetPageStatus(InvalidTransactionId,
247 								   num_on_page, subxids + offset,
248 								   status, lsn, pageno);
249 		offset = i;
250 		pageno = nextpageno;
251 	}
252 }
253 
254 /*
255  * Record the final state of transaction entries in the commit log for
256  * all entries on a single page.  Atomic only on this page.
257  *
258  * Otherwise API is same as TransactionIdSetTreeStatus()
259  */
260 static void
TransactionIdSetPageStatus(TransactionId xid,int nsubxids,TransactionId * subxids,XidStatus status,XLogRecPtr lsn,int pageno)261 TransactionIdSetPageStatus(TransactionId xid, int nsubxids,
262 						   TransactionId *subxids, XidStatus status,
263 						   XLogRecPtr lsn, int pageno)
264 {
265 	int			slotno;
266 	int			i;
267 
268 	Assert(status == TRANSACTION_STATUS_COMMITTED ||
269 		   status == TRANSACTION_STATUS_ABORTED ||
270 		   (status == TRANSACTION_STATUS_SUB_COMMITTED && !TransactionIdIsValid(xid)));
271 
272 	LWLockAcquire(CLogControlLock, LW_EXCLUSIVE);
273 
274 	/*
275 	 * If we're doing an async commit (ie, lsn is valid), then we must wait
276 	 * for any active write on the page slot to complete.  Otherwise our
277 	 * update could reach disk in that write, which will not do since we
278 	 * mustn't let it reach disk until we've done the appropriate WAL flush.
279 	 * But when lsn is invalid, it's OK to scribble on a page while it is
280 	 * write-busy, since we don't care if the update reaches disk sooner than
281 	 * we think.
282 	 */
283 	slotno = SimpleLruReadPage(ClogCtl, pageno, XLogRecPtrIsInvalid(lsn), xid);
284 
285 	/*
286 	 * Set the main transaction id, if any.
287 	 *
288 	 * If we update more than one xid on this page while it is being written
289 	 * out, we might find that some of the bits go to disk and others don't.
290 	 * If we are updating commits on the page with the top-level xid that
291 	 * could break atomicity, so we subcommit the subxids first before we mark
292 	 * the top-level commit.
293 	 */
294 	if (TransactionIdIsValid(xid))
295 	{
296 		/* Subtransactions first, if needed ... */
297 		if (status == TRANSACTION_STATUS_COMMITTED)
298 		{
299 			for (i = 0; i < nsubxids; i++)
300 			{
301 				Assert(ClogCtl->shared->page_number[slotno] == TransactionIdToPage(subxids[i]));
302 				TransactionIdSetStatusBit(subxids[i],
303 										  TRANSACTION_STATUS_SUB_COMMITTED,
304 										  lsn, slotno);
305 			}
306 		}
307 
308 		/* ... then the main transaction */
309 		TransactionIdSetStatusBit(xid, status, lsn, slotno);
310 	}
311 
312 	/* Set the subtransactions */
313 	for (i = 0; i < nsubxids; i++)
314 	{
315 		Assert(ClogCtl->shared->page_number[slotno] == TransactionIdToPage(subxids[i]));
316 		TransactionIdSetStatusBit(subxids[i], status, lsn, slotno);
317 	}
318 
319 	ClogCtl->shared->page_dirty[slotno] = true;
320 
321 	LWLockRelease(CLogControlLock);
322 }
323 
324 /*
325  * Sets the commit status of a single transaction.
326  *
327  * Must be called with CLogControlLock held
328  */
329 static void
TransactionIdSetStatusBit(TransactionId xid,XidStatus status,XLogRecPtr lsn,int slotno)330 TransactionIdSetStatusBit(TransactionId xid, XidStatus status, XLogRecPtr lsn, int slotno)
331 {
332 	int			byteno = TransactionIdToByte(xid);
333 	int			bshift = TransactionIdToBIndex(xid) * CLOG_BITS_PER_XACT;
334 	char	   *byteptr;
335 	char		byteval;
336 	char		curval;
337 
338 	byteptr = ClogCtl->shared->page_buffer[slotno] + byteno;
339 	curval = (*byteptr >> bshift) & CLOG_XACT_BITMASK;
340 
341 	/*
342 	 * When replaying transactions during recovery we still need to perform
343 	 * the two phases of subcommit and then commit. However, some transactions
344 	 * are already correctly marked, so we just treat those as a no-op which
345 	 * allows us to keep the following Assert as restrictive as possible.
346 	 */
347 	if (InRecovery && status == TRANSACTION_STATUS_SUB_COMMITTED &&
348 		curval == TRANSACTION_STATUS_COMMITTED)
349 		return;
350 
351 	/*
352 	 * Current state change should be from 0 or subcommitted to target state
353 	 * or we should already be there when replaying changes during recovery.
354 	 */
355 	Assert(curval == 0 ||
356 		   (curval == TRANSACTION_STATUS_SUB_COMMITTED &&
357 			status != TRANSACTION_STATUS_IN_PROGRESS) ||
358 		   curval == status);
359 
360 	/* note this assumes exclusive access to the clog page */
361 	byteval = *byteptr;
362 	byteval &= ~(((1 << CLOG_BITS_PER_XACT) - 1) << bshift);
363 	byteval |= (status << bshift);
364 	*byteptr = byteval;
365 
366 	/*
367 	 * Update the group LSN if the transaction completion LSN is higher.
368 	 *
369 	 * Note: lsn will be invalid when supplied during InRecovery processing,
370 	 * so we don't need to do anything special to avoid LSN updates during
371 	 * recovery. After recovery completes the next clog change will set the
372 	 * LSN correctly.
373 	 */
374 	if (!XLogRecPtrIsInvalid(lsn))
375 	{
376 		int			lsnindex = GetLSNIndex(slotno, xid);
377 
378 		if (ClogCtl->shared->group_lsn[lsnindex] < lsn)
379 			ClogCtl->shared->group_lsn[lsnindex] = lsn;
380 	}
381 }
382 
383 /*
384  * Interrogate the state of a transaction in the commit log.
385  *
386  * Aside from the actual commit status, this function returns (into *lsn)
387  * an LSN that is late enough to be able to guarantee that if we flush up to
388  * that LSN then we will have flushed the transaction's commit record to disk.
389  * The result is not necessarily the exact LSN of the transaction's commit
390  * record!	For example, for long-past transactions (those whose clog pages
391  * already migrated to disk), we'll return InvalidXLogRecPtr.  Also, because
392  * we group transactions on the same clog page to conserve storage, we might
393  * return the LSN of a later transaction that falls into the same group.
394  *
395  * NB: this is a low-level routine and is NOT the preferred entry point
396  * for most uses; TransactionLogFetch() in transam.c is the intended caller.
397  */
398 XidStatus
TransactionIdGetStatus(TransactionId xid,XLogRecPtr * lsn)399 TransactionIdGetStatus(TransactionId xid, XLogRecPtr *lsn)
400 {
401 	int			pageno = TransactionIdToPage(xid);
402 	int			byteno = TransactionIdToByte(xid);
403 	int			bshift = TransactionIdToBIndex(xid) * CLOG_BITS_PER_XACT;
404 	int			slotno;
405 	int			lsnindex;
406 	char	   *byteptr;
407 	XidStatus	status;
408 
409 	/* lock is acquired by SimpleLruReadPage_ReadOnly */
410 
411 	slotno = SimpleLruReadPage_ReadOnly(ClogCtl, pageno, xid);
412 	byteptr = ClogCtl->shared->page_buffer[slotno] + byteno;
413 
414 	status = (*byteptr >> bshift) & CLOG_XACT_BITMASK;
415 
416 	lsnindex = GetLSNIndex(slotno, xid);
417 	*lsn = ClogCtl->shared->group_lsn[lsnindex];
418 
419 	LWLockRelease(CLogControlLock);
420 
421 	return status;
422 }
423 
424 /*
425  * Number of shared CLOG buffers.
426  *
427  * On larger multi-processor systems, it is possible to have many CLOG page
428  * requests in flight at one time which could lead to disk access for CLOG
429  * page if the required page is not found in memory.  Testing revealed that we
430  * can get the best performance by having 128 CLOG buffers, more than that it
431  * doesn't improve performance.
432  *
433  * Unconditionally keeping the number of CLOG buffers to 128 did not seem like
434  * a good idea, because it would increase the minimum amount of shared memory
435  * required to start, which could be a problem for people running very small
436  * configurations.  The following formula seems to represent a reasonable
437  * compromise: people with very low values for shared_buffers will get fewer
438  * CLOG buffers as well, and everyone else will get 128.
439  */
440 Size
CLOGShmemBuffers(void)441 CLOGShmemBuffers(void)
442 {
443 	return Min(128, Max(4, NBuffers / 512));
444 }
445 
446 /*
447  * Initialization of shared memory for CLOG
448  */
449 Size
CLOGShmemSize(void)450 CLOGShmemSize(void)
451 {
452 	return SimpleLruShmemSize(CLOGShmemBuffers(), CLOG_LSNS_PER_PAGE);
453 }
454 
455 void
CLOGShmemInit(void)456 CLOGShmemInit(void)
457 {
458 	ClogCtl->PagePrecedes = CLOGPagePrecedes;
459 	SimpleLruInit(ClogCtl, "clog", CLOGShmemBuffers(), CLOG_LSNS_PER_PAGE,
460 				  CLogControlLock, "pg_xact", LWTRANCHE_CLOG_BUFFERS);
461 	SlruPagePrecedesUnitTests(ClogCtl, CLOG_XACTS_PER_PAGE);
462 }
463 
464 /*
465  * This func must be called ONCE on system install.  It creates
466  * the initial CLOG segment.  (The CLOG directory is assumed to
467  * have been created by initdb, and CLOGShmemInit must have been
468  * called already.)
469  */
470 void
BootStrapCLOG(void)471 BootStrapCLOG(void)
472 {
473 	int			slotno;
474 
475 	LWLockAcquire(CLogControlLock, LW_EXCLUSIVE);
476 
477 	/* Create and zero the first page of the commit log */
478 	slotno = ZeroCLOGPage(0, false);
479 
480 	/* Make sure it's written out */
481 	SimpleLruWritePage(ClogCtl, slotno);
482 	Assert(!ClogCtl->shared->page_dirty[slotno]);
483 
484 	LWLockRelease(CLogControlLock);
485 }
486 
487 /*
488  * Initialize (or reinitialize) a page of CLOG to zeroes.
489  * If writeXlog is TRUE, also emit an XLOG record saying we did this.
490  *
491  * The page is not actually written, just set up in shared memory.
492  * The slot number of the new page is returned.
493  *
494  * Control lock must be held at entry, and will be held at exit.
495  */
496 static int
ZeroCLOGPage(int pageno,bool writeXlog)497 ZeroCLOGPage(int pageno, bool writeXlog)
498 {
499 	int			slotno;
500 
501 	slotno = SimpleLruZeroPage(ClogCtl, pageno);
502 
503 	if (writeXlog)
504 		WriteZeroPageXlogRec(pageno);
505 
506 	return slotno;
507 }
508 
509 /*
510  * This must be called ONCE during postmaster or standalone-backend startup,
511  * after StartupXLOG has initialized ShmemVariableCache->nextXid.
512  */
513 void
StartupCLOG(void)514 StartupCLOG(void)
515 {
516 	TransactionId xid = ShmemVariableCache->nextXid;
517 	int			pageno = TransactionIdToPage(xid);
518 
519 	LWLockAcquire(CLogControlLock, LW_EXCLUSIVE);
520 
521 	/*
522 	 * Initialize our idea of the latest page number.
523 	 */
524 	ClogCtl->shared->latest_page_number = pageno;
525 
526 	LWLockRelease(CLogControlLock);
527 }
528 
529 /*
530  * This must be called ONCE at the end of startup/recovery.
531  */
532 void
TrimCLOG(void)533 TrimCLOG(void)
534 {
535 	TransactionId xid = ShmemVariableCache->nextXid;
536 	int			pageno = TransactionIdToPage(xid);
537 
538 	LWLockAcquire(CLogControlLock, LW_EXCLUSIVE);
539 
540 	/*
541 	 * Re-Initialize our idea of the latest page number.
542 	 */
543 	ClogCtl->shared->latest_page_number = pageno;
544 
545 	/*
546 	 * Zero out the remainder of the current clog page.  Under normal
547 	 * circumstances it should be zeroes already, but it seems at least
548 	 * theoretically possible that XLOG replay will have settled on a nextXID
549 	 * value that is less than the last XID actually used and marked by the
550 	 * previous database lifecycle (since subtransaction commit writes clog
551 	 * but makes no WAL entry).  Let's just be safe. (We need not worry about
552 	 * pages beyond the current one, since those will be zeroed when first
553 	 * used.  For the same reason, there is no need to do anything when
554 	 * nextXid is exactly at a page boundary; and it's likely that the
555 	 * "current" page doesn't exist yet in that case.)
556 	 */
557 	if (TransactionIdToPgIndex(xid) != 0)
558 	{
559 		int			byteno = TransactionIdToByte(xid);
560 		int			bshift = TransactionIdToBIndex(xid) * CLOG_BITS_PER_XACT;
561 		int			slotno;
562 		char	   *byteptr;
563 
564 		slotno = SimpleLruReadPage(ClogCtl, pageno, false, xid);
565 		byteptr = ClogCtl->shared->page_buffer[slotno] + byteno;
566 
567 		/* Zero so-far-unused positions in the current byte */
568 		*byteptr &= (1 << bshift) - 1;
569 		/* Zero the rest of the page */
570 		MemSet(byteptr + 1, 0, BLCKSZ - byteno - 1);
571 
572 		ClogCtl->shared->page_dirty[slotno] = true;
573 	}
574 
575 	LWLockRelease(CLogControlLock);
576 }
577 
578 /*
579  * This must be called ONCE during postmaster or standalone-backend shutdown
580  */
581 void
ShutdownCLOG(void)582 ShutdownCLOG(void)
583 {
584 	/* Flush dirty CLOG pages to disk */
585 	TRACE_POSTGRESQL_CLOG_CHECKPOINT_START(false);
586 	SimpleLruFlush(ClogCtl, false);
587 
588 	/*
589 	 * fsync pg_xact to ensure that any files flushed previously are durably
590 	 * on disk.
591 	 */
592 	fsync_fname("pg_xact", true);
593 
594 	TRACE_POSTGRESQL_CLOG_CHECKPOINT_DONE(false);
595 }
596 
597 /*
598  * Perform a checkpoint --- either during shutdown, or on-the-fly
599  */
600 void
CheckPointCLOG(void)601 CheckPointCLOG(void)
602 {
603 	/* Flush dirty CLOG pages to disk */
604 	TRACE_POSTGRESQL_CLOG_CHECKPOINT_START(true);
605 	SimpleLruFlush(ClogCtl, true);
606 	TRACE_POSTGRESQL_CLOG_CHECKPOINT_DONE(true);
607 }
608 
609 
610 /*
611  * Make sure that CLOG has room for a newly-allocated XID.
612  *
613  * NB: this is called while holding XidGenLock.  We want it to be very fast
614  * most of the time; even when it's not so fast, no actual I/O need happen
615  * unless we're forced to write out a dirty clog or xlog page to make room
616  * in shared memory.
617  */
618 void
ExtendCLOG(TransactionId newestXact)619 ExtendCLOG(TransactionId newestXact)
620 {
621 	int			pageno;
622 
623 	/*
624 	 * No work except at first XID of a page.  But beware: just after
625 	 * wraparound, the first XID of page zero is FirstNormalTransactionId.
626 	 */
627 	if (TransactionIdToPgIndex(newestXact) != 0 &&
628 		!TransactionIdEquals(newestXact, FirstNormalTransactionId))
629 		return;
630 
631 	pageno = TransactionIdToPage(newestXact);
632 
633 	LWLockAcquire(CLogControlLock, LW_EXCLUSIVE);
634 
635 	/* Zero the page and make an XLOG entry about it */
636 	ZeroCLOGPage(pageno, true);
637 
638 	LWLockRelease(CLogControlLock);
639 }
640 
641 
642 /*
643  * Remove all CLOG segments before the one holding the passed transaction ID
644  *
645  * Before removing any CLOG data, we must flush XLOG to disk, to ensure
646  * that any recently-emitted HEAP_FREEZE records have reached disk; otherwise
647  * a crash and restart might leave us with some unfrozen tuples referencing
648  * removed CLOG data.  We choose to emit a special TRUNCATE XLOG record too.
649  * Replaying the deletion from XLOG is not critical, since the files could
650  * just as well be removed later, but doing so prevents a long-running hot
651  * standby server from acquiring an unreasonably bloated CLOG directory.
652  *
653  * Since CLOG segments hold a large number of transactions, the opportunity to
654  * actually remove a segment is fairly rare, and so it seems best not to do
655  * the XLOG flush unless we have confirmed that there is a removable segment.
656  */
657 void
TruncateCLOG(TransactionId oldestXact,Oid oldestxid_datoid)658 TruncateCLOG(TransactionId oldestXact, Oid oldestxid_datoid)
659 {
660 	int			cutoffPage;
661 
662 	/*
663 	 * The cutoff point is the start of the segment containing oldestXact. We
664 	 * pass the *page* containing oldestXact to SimpleLruTruncate.
665 	 */
666 	cutoffPage = TransactionIdToPage(oldestXact);
667 
668 	/* Check to see if there's any files that could be removed */
669 	if (!SlruScanDirectory(ClogCtl, SlruScanDirCbReportPresence, &cutoffPage))
670 		return;					/* nothing to remove */
671 
672 	/*
673 	 * Advance oldestClogXid before truncating clog, so concurrent xact status
674 	 * lookups can ensure they don't attempt to access truncated-away clog.
675 	 *
676 	 * It's only necessary to do this if we will actually truncate away clog
677 	 * pages.
678 	 */
679 	AdvanceOldestClogXid(oldestXact);
680 
681 	/*
682 	 * Write XLOG record and flush XLOG to disk. We record the oldest xid
683 	 * we're keeping information about here so we can ensure that it's always
684 	 * ahead of clog truncation in case we crash, and so a standby finds out
685 	 * the new valid xid before the next checkpoint.
686 	 */
687 	WriteTruncateXlogRec(cutoffPage, oldestXact, oldestxid_datoid);
688 
689 	/* Now we can remove the old CLOG segment(s) */
690 	SimpleLruTruncate(ClogCtl, cutoffPage);
691 }
692 
693 
694 /*
695  * Decide whether a CLOG page number is "older" for truncation purposes.
696  *
697  * We need to use comparison of TransactionIds here in order to do the right
698  * thing with wraparound XID arithmetic.  However, TransactionIdPrecedes()
699  * would get weird about permanent xact IDs.  So, offset both such that xid1,
700  * xid2, and xid2 + CLOG_XACTS_PER_PAGE - 1 are all normal XIDs; this offset
701  * is relevant to page 0 and to the page preceding page 0.
702  *
703  * The page containing oldestXact-2^31 is the important edge case.  The
704  * portion of that page equaling or following oldestXact-2^31 is expendable,
705  * but the portion preceding oldestXact-2^31 is not.  When oldestXact-2^31 is
706  * the first XID of a page and segment, the entire page and segment is
707  * expendable, and we could truncate the segment.  Recognizing that case would
708  * require making oldestXact, not just the page containing oldestXact,
709  * available to this callback.  The benefit would be rare and small, so we
710  * don't optimize that edge case.
711  */
712 static bool
CLOGPagePrecedes(int page1,int page2)713 CLOGPagePrecedes(int page1, int page2)
714 {
715 	TransactionId xid1;
716 	TransactionId xid2;
717 
718 	xid1 = ((TransactionId) page1) * CLOG_XACTS_PER_PAGE;
719 	xid1 += FirstNormalTransactionId + 1;
720 	xid2 = ((TransactionId) page2) * CLOG_XACTS_PER_PAGE;
721 	xid2 += FirstNormalTransactionId + 1;
722 
723 	return (TransactionIdPrecedes(xid1, xid2) &&
724 			TransactionIdPrecedes(xid1, xid2 + CLOG_XACTS_PER_PAGE - 1));
725 }
726 
727 
728 /*
729  * Write a ZEROPAGE xlog record
730  */
731 static void
WriteZeroPageXlogRec(int pageno)732 WriteZeroPageXlogRec(int pageno)
733 {
734 	XLogBeginInsert();
735 	XLogRegisterData((char *) (&pageno), sizeof(int));
736 	(void) XLogInsert(RM_CLOG_ID, CLOG_ZEROPAGE);
737 }
738 
739 /*
740  * Write a TRUNCATE xlog record
741  *
742  * We must flush the xlog record to disk before returning --- see notes
743  * in TruncateCLOG().
744  */
745 static void
WriteTruncateXlogRec(int pageno,TransactionId oldestXact,Oid oldestXactDb)746 WriteTruncateXlogRec(int pageno, TransactionId oldestXact, Oid oldestXactDb)
747 {
748 	XLogRecPtr	recptr;
749 	xl_clog_truncate xlrec;
750 
751 	xlrec.pageno = pageno;
752 	xlrec.oldestXact = oldestXact;
753 	xlrec.oldestXactDb = oldestXactDb;
754 
755 	XLogBeginInsert();
756 	XLogRegisterData((char *) (&xlrec), sizeof(xl_clog_truncate));
757 	recptr = XLogInsert(RM_CLOG_ID, CLOG_TRUNCATE);
758 	XLogFlush(recptr);
759 }
760 
761 /*
762  * CLOG resource manager's routines
763  */
764 void
clog_redo(XLogReaderState * record)765 clog_redo(XLogReaderState *record)
766 {
767 	uint8		info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
768 
769 	/* Backup blocks are not used in clog records */
770 	Assert(!XLogRecHasAnyBlockRefs(record));
771 
772 	if (info == CLOG_ZEROPAGE)
773 	{
774 		int			pageno;
775 		int			slotno;
776 
777 		memcpy(&pageno, XLogRecGetData(record), sizeof(int));
778 
779 		LWLockAcquire(CLogControlLock, LW_EXCLUSIVE);
780 
781 		slotno = ZeroCLOGPage(pageno, false);
782 		SimpleLruWritePage(ClogCtl, slotno);
783 		Assert(!ClogCtl->shared->page_dirty[slotno]);
784 
785 		LWLockRelease(CLogControlLock);
786 	}
787 	else if (info == CLOG_TRUNCATE)
788 	{
789 		xl_clog_truncate xlrec;
790 
791 		memcpy(&xlrec, XLogRecGetData(record), sizeof(xl_clog_truncate));
792 
793 		/*
794 		 * During XLOG replay, latest_page_number isn't set up yet; insert a
795 		 * suitable value to bypass the sanity test in SimpleLruTruncate.
796 		 */
797 		ClogCtl->shared->latest_page_number = xlrec.pageno;
798 
799 		AdvanceOldestClogXid(xlrec.oldestXact);
800 
801 		SimpleLruTruncate(ClogCtl, xlrec.pageno);
802 	}
803 	else
804 		elog(PANIC, "clog_redo: unknown op code %u", info);
805 }
806