1 /*-------------------------------------------------------------------------
2  *
3  * reorderbuffer.c
4  *	  PostgreSQL logical replay/reorder buffer management
5  *
6  *
7  * Copyright (c) 2012-2017, PostgreSQL Global Development Group
8  *
9  *
10  * IDENTIFICATION
11  *	  src/backend/replication/reorderbuffer.c
12  *
13  * NOTES
14  *	  This module gets handed individual pieces of transactions in the order
15  *	  they are written to the WAL and is responsible to reassemble them into
16  *	  toplevel transaction sized pieces. When a transaction is completely
17  *	  reassembled - signalled by reading the transaction commit record - it
18  *	  will then call the output plugin (c.f. ReorderBufferCommit()) with the
19  *	  individual changes. The output plugins rely on snapshots built by
20  *	  snapbuild.c which hands them to us.
21  *
22  *	  Transactions and subtransactions/savepoints in postgres are not
23  *	  immediately linked to each other from outside the performing
24  *	  backend. Only at commit/abort (or special xact_assignment records) they
25  *	  are linked together. Which means that we will have to splice together a
26  *	  toplevel transaction from its subtransactions. To do that efficiently we
27  *	  build a binary heap indexed by the smallest current lsn of the individual
28  *	  subtransactions' changestreams. As the individual streams are inherently
29  *	  ordered by LSN - since that is where we build them from - the transaction
30  *	  can easily be reassembled by always using the subtransaction with the
31  *	  smallest current LSN from the heap.
32  *
33  *	  In order to cope with large transactions - which can be several times as
34  *	  big as the available memory - this module supports spooling the contents
35  *	  of a large transactions to disk. When the transaction is replayed the
36  *	  contents of individual (sub-)transactions will be read from disk in
37  *	  chunks.
38  *
39  *	  This module also has to deal with reassembling toast records from the
40  *	  individual chunks stored in WAL. When a new (or initial) version of a
41  *	  tuple is stored in WAL it will always be preceded by the toast chunks
42  *	  emitted for the columns stored out of line. Within a single toplevel
43  *	  transaction there will be no other data carrying records between a row's
44  *	  toast chunks and the row data itself. See ReorderBufferToast* for
45  *	  details.
46  * -------------------------------------------------------------------------
47  */
48 #include "postgres.h"
49 
50 #include <unistd.h>
51 #include <sys/stat.h>
52 
53 #include "access/rewriteheap.h"
54 #include "access/transam.h"
55 #include "access/tuptoaster.h"
56 #include "access/xact.h"
57 #include "access/xlog_internal.h"
58 #include "catalog/catalog.h"
59 #include "lib/binaryheap.h"
60 #include "miscadmin.h"
61 #include "pgstat.h"
62 #include "replication/logical.h"
63 #include "replication/reorderbuffer.h"
64 #include "replication/slot.h"
65 #include "replication/snapbuild.h"	/* just for SnapBuildSnapDecRefcount */
66 #include "storage/bufmgr.h"
67 #include "storage/fd.h"
68 #include "storage/sinval.h"
69 #include "utils/builtins.h"
70 #include "utils/combocid.h"
71 #include "utils/memdebug.h"
72 #include "utils/memutils.h"
73 #include "utils/rel.h"
74 #include "utils/relfilenodemap.h"
75 #include "utils/tqual.h"
76 
77 
78 /* entry for a hash table we use to map from xid to our transaction state */
79 typedef struct ReorderBufferTXNByIdEnt
80 {
81 	TransactionId xid;
82 	ReorderBufferTXN *txn;
83 } ReorderBufferTXNByIdEnt;
84 
85 /* data structures for (relfilenode, ctid) => (cmin, cmax) mapping */
86 typedef struct ReorderBufferTupleCidKey
87 {
88 	RelFileNode relnode;
89 	ItemPointerData tid;
90 } ReorderBufferTupleCidKey;
91 
92 typedef struct ReorderBufferTupleCidEnt
93 {
94 	ReorderBufferTupleCidKey key;
95 	CommandId	cmin;
96 	CommandId	cmax;
97 	CommandId	combocid;		/* just for debugging */
98 } ReorderBufferTupleCidEnt;
99 
100 /* k-way in-order change iteration support structures */
101 typedef struct ReorderBufferIterTXNEntry
102 {
103 	XLogRecPtr	lsn;
104 	ReorderBufferChange *change;
105 	ReorderBufferTXN *txn;
106 	File		fd;
107 	XLogSegNo	segno;
108 } ReorderBufferIterTXNEntry;
109 
110 typedef struct ReorderBufferIterTXNState
111 {
112 	binaryheap *heap;
113 	Size		nr_txns;
114 	dlist_head	old_change;
115 	ReorderBufferIterTXNEntry entries[FLEXIBLE_ARRAY_MEMBER];
116 } ReorderBufferIterTXNState;
117 
118 /* toast datastructures */
119 typedef struct ReorderBufferToastEnt
120 {
121 	Oid			chunk_id;		/* toast_table.chunk_id */
122 	int32		last_chunk_seq; /* toast_table.chunk_seq of the last chunk we
123 								 * have seen */
124 	Size		num_chunks;		/* number of chunks we've already seen */
125 	Size		size;			/* combined size of chunks seen */
126 	dlist_head	chunks;			/* linked list of chunks */
127 	struct varlena *reconstructed;	/* reconstructed varlena now pointed to in
128 									 * main tup */
129 } ReorderBufferToastEnt;
130 
131 /* Disk serialization support datastructures */
132 typedef struct ReorderBufferDiskChange
133 {
134 	Size		size;
135 	ReorderBufferChange change;
136 	/* data follows */
137 } ReorderBufferDiskChange;
138 
139 /*
140  * Maximum number of changes kept in memory, per transaction. After that,
141  * changes are spooled to disk.
142  *
143  * The current value should be sufficient to decode the entire transaction
144  * without hitting disk in OLTP workloads, while starting to spool to disk in
145  * other workloads reasonably fast.
146  *
147  * At some point in the future it probably makes sense to have a more elaborate
148  * resource management here, but it's not entirely clear what that would look
149  * like.
150  */
151 static const Size max_changes_in_memory = 4096;
152 
153 /*
154  * We use a very simple form of a slab allocator for frequently allocated
155  * objects, simply keeping a fixed number in a linked list when unused,
156  * instead pfree()ing them. Without that in many workloads aset.c becomes a
157  * major bottleneck, especially when spilling to disk while decoding batch
158  * workloads.
159  */
160 static const Size max_cached_tuplebufs = 4096 * 2;	/* ~64MB */
161 
162 /* ---------------------------------------
163  * primary reorderbuffer support routines
164  * ---------------------------------------
165  */
166 static ReorderBufferTXN *ReorderBufferGetTXN(ReorderBuffer *rb);
167 static void ReorderBufferReturnTXN(ReorderBuffer *rb, ReorderBufferTXN *txn);
168 static ReorderBufferTXN *ReorderBufferTXNByXid(ReorderBuffer *rb,
169 					  TransactionId xid, bool create, bool *is_new,
170 					  XLogRecPtr lsn, bool create_as_top);
171 static void ReorderBufferTransferSnapToParent(ReorderBufferTXN *txn,
172 								  ReorderBufferTXN *subtxn);
173 
174 static void AssertTXNLsnOrder(ReorderBuffer *rb);
175 
176 /* ---------------------------------------
177  * support functions for lsn-order iterating over the ->changes of a
178  * transaction and its subtransactions
179  *
180  * used for iteration over the k-way heap merge of a transaction and its
181  * subtransactions
182  * ---------------------------------------
183  */
184 static void ReorderBufferIterTXNInit(ReorderBuffer *rb, ReorderBufferTXN *txn,
185 									 ReorderBufferIterTXNState *volatile *iter_state);
186 static ReorderBufferChange *ReorderBufferIterTXNNext(ReorderBuffer *rb, ReorderBufferIterTXNState *state);
187 static void ReorderBufferIterTXNFinish(ReorderBuffer *rb,
188 						   ReorderBufferIterTXNState *state);
189 static void ReorderBufferExecuteInvalidations(ReorderBuffer *rb, ReorderBufferTXN *txn);
190 
191 /*
192  * ---------------------------------------
193  * Disk serialization support functions
194  * ---------------------------------------
195  */
196 static void ReorderBufferCheckSerializeTXN(ReorderBuffer *rb, ReorderBufferTXN *txn);
197 static void ReorderBufferSerializeTXN(ReorderBuffer *rb, ReorderBufferTXN *txn);
198 static void ReorderBufferSerializeChange(ReorderBuffer *rb, ReorderBufferTXN *txn,
199 							 int fd, ReorderBufferChange *change);
200 static Size ReorderBufferRestoreChanges(ReorderBuffer *rb, ReorderBufferTXN *txn,
201 							File *fd, XLogSegNo *segno);
202 static void ReorderBufferRestoreChange(ReorderBuffer *rb, ReorderBufferTXN *txn,
203 						   char *change);
204 static void ReorderBufferRestoreCleanup(ReorderBuffer *rb, ReorderBufferTXN *txn);
205 static void ReorderBufferCleanupSerializedTXNs(const char *slotname);
206 static void ReorderBufferSerializedPath(char *path, ReplicationSlot *slot,
207 							TransactionId xid, XLogSegNo segno);
208 
209 static void ReorderBufferFreeSnap(ReorderBuffer *rb, Snapshot snap);
210 static Snapshot ReorderBufferCopySnap(ReorderBuffer *rb, Snapshot orig_snap,
211 					  ReorderBufferTXN *txn, CommandId cid);
212 
213 /* ---------------------------------------
214  * toast reassembly support
215  * ---------------------------------------
216  */
217 static void ReorderBufferToastInitHash(ReorderBuffer *rb, ReorderBufferTXN *txn);
218 static void ReorderBufferToastReset(ReorderBuffer *rb, ReorderBufferTXN *txn);
219 static void ReorderBufferToastReplace(ReorderBuffer *rb, ReorderBufferTXN *txn,
220 						  Relation relation, ReorderBufferChange *change);
221 static void ReorderBufferToastAppendChunk(ReorderBuffer *rb, ReorderBufferTXN *txn,
222 							  Relation relation, ReorderBufferChange *change);
223 
224 
225 /*
226  * Allocate a new ReorderBuffer and clean out any old serialized state from
227  * prior ReorderBuffer instances for the same slot.
228  */
229 ReorderBuffer *
ReorderBufferAllocate(void)230 ReorderBufferAllocate(void)
231 {
232 	ReorderBuffer *buffer;
233 	HASHCTL		hash_ctl;
234 	MemoryContext new_ctx;
235 
236 	Assert(MyReplicationSlot != NULL);
237 
238 	/* allocate memory in own context, to have better accountability */
239 	new_ctx = AllocSetContextCreate(CurrentMemoryContext,
240 									"ReorderBuffer",
241 									ALLOCSET_DEFAULT_SIZES);
242 
243 	buffer =
244 		(ReorderBuffer *) MemoryContextAlloc(new_ctx, sizeof(ReorderBuffer));
245 
246 	memset(&hash_ctl, 0, sizeof(hash_ctl));
247 
248 	buffer->context = new_ctx;
249 
250 	buffer->change_context = SlabContextCreate(new_ctx,
251 											   "Change",
252 											   SLAB_DEFAULT_BLOCK_SIZE,
253 											   sizeof(ReorderBufferChange));
254 
255 	buffer->txn_context = SlabContextCreate(new_ctx,
256 											"TXN",
257 											SLAB_DEFAULT_BLOCK_SIZE,
258 											sizeof(ReorderBufferTXN));
259 
260 	hash_ctl.keysize = sizeof(TransactionId);
261 	hash_ctl.entrysize = sizeof(ReorderBufferTXNByIdEnt);
262 	hash_ctl.hcxt = buffer->context;
263 
264 	buffer->by_txn = hash_create("ReorderBufferByXid", 1000, &hash_ctl,
265 								 HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
266 
267 	buffer->by_txn_last_xid = InvalidTransactionId;
268 	buffer->by_txn_last_txn = NULL;
269 
270 	buffer->nr_cached_tuplebufs = 0;
271 
272 	buffer->outbuf = NULL;
273 	buffer->outbufsize = 0;
274 
275 	buffer->current_restart_decoding_lsn = InvalidXLogRecPtr;
276 
277 	dlist_init(&buffer->toplevel_by_lsn);
278 	dlist_init(&buffer->txns_by_base_snapshot_lsn);
279 	slist_init(&buffer->cached_tuplebufs);
280 
281 	/*
282 	 * Ensure there's no stale data from prior uses of this slot, in case some
283 	 * prior exit avoided calling ReorderBufferFree. Failure to do this can
284 	 * produce duplicated txns, and it's very cheap if there's nothing there.
285 	 */
286 	ReorderBufferCleanupSerializedTXNs(NameStr(MyReplicationSlot->data.name));
287 
288 	return buffer;
289 }
290 
291 /*
292  * Free a ReorderBuffer
293  */
294 void
ReorderBufferFree(ReorderBuffer * rb)295 ReorderBufferFree(ReorderBuffer *rb)
296 {
297 	MemoryContext context = rb->context;
298 
299 	/*
300 	 * We free separately allocated data by entirely scrapping reorderbuffer's
301 	 * memory context.
302 	 */
303 	MemoryContextDelete(context);
304 
305 	/* Free disk space used by unconsumed reorder buffers */
306 	ReorderBufferCleanupSerializedTXNs(NameStr(MyReplicationSlot->data.name));
307 }
308 
309 /*
310  * Get an unused, possibly preallocated, ReorderBufferTXN.
311  */
312 static ReorderBufferTXN *
ReorderBufferGetTXN(ReorderBuffer * rb)313 ReorderBufferGetTXN(ReorderBuffer *rb)
314 {
315 	ReorderBufferTXN *txn;
316 
317 	txn = (ReorderBufferTXN *)
318 		MemoryContextAlloc(rb->txn_context, sizeof(ReorderBufferTXN));
319 
320 	memset(txn, 0, sizeof(ReorderBufferTXN));
321 
322 	dlist_init(&txn->changes);
323 	dlist_init(&txn->tuplecids);
324 	dlist_init(&txn->subtxns);
325 
326 	return txn;
327 }
328 
329 /*
330  * Free a ReorderBufferTXN.
331  */
332 static void
ReorderBufferReturnTXN(ReorderBuffer * rb,ReorderBufferTXN * txn)333 ReorderBufferReturnTXN(ReorderBuffer *rb, ReorderBufferTXN *txn)
334 {
335 	/* clean the lookup cache if we were cached (quite likely) */
336 	if (rb->by_txn_last_xid == txn->xid)
337 	{
338 		rb->by_txn_last_xid = InvalidTransactionId;
339 		rb->by_txn_last_txn = NULL;
340 	}
341 
342 	/* free data that's contained */
343 
344 	if (txn->tuplecid_hash != NULL)
345 	{
346 		hash_destroy(txn->tuplecid_hash);
347 		txn->tuplecid_hash = NULL;
348 	}
349 
350 	if (txn->invalidations)
351 	{
352 		pfree(txn->invalidations);
353 		txn->invalidations = NULL;
354 	}
355 
356 	/* Reset the toast hash */
357 	ReorderBufferToastReset(rb, txn);
358 
359 	pfree(txn);
360 }
361 
362 /*
363  * Get an fresh ReorderBufferChange.
364  */
365 ReorderBufferChange *
ReorderBufferGetChange(ReorderBuffer * rb)366 ReorderBufferGetChange(ReorderBuffer *rb)
367 {
368 	ReorderBufferChange *change;
369 
370 	change = (ReorderBufferChange *)
371 		MemoryContextAlloc(rb->change_context, sizeof(ReorderBufferChange));
372 
373 	memset(change, 0, sizeof(ReorderBufferChange));
374 	return change;
375 }
376 
377 /*
378  * Free an ReorderBufferChange.
379  */
380 void
ReorderBufferReturnChange(ReorderBuffer * rb,ReorderBufferChange * change)381 ReorderBufferReturnChange(ReorderBuffer *rb, ReorderBufferChange *change)
382 {
383 	/* free contained data */
384 	switch (change->action)
385 	{
386 		case REORDER_BUFFER_CHANGE_INSERT:
387 		case REORDER_BUFFER_CHANGE_UPDATE:
388 		case REORDER_BUFFER_CHANGE_DELETE:
389 		case REORDER_BUFFER_CHANGE_INTERNAL_SPEC_INSERT:
390 			if (change->data.tp.newtuple)
391 			{
392 				ReorderBufferReturnTupleBuf(rb, change->data.tp.newtuple);
393 				change->data.tp.newtuple = NULL;
394 			}
395 
396 			if (change->data.tp.oldtuple)
397 			{
398 				ReorderBufferReturnTupleBuf(rb, change->data.tp.oldtuple);
399 				change->data.tp.oldtuple = NULL;
400 			}
401 			break;
402 		case REORDER_BUFFER_CHANGE_MESSAGE:
403 			if (change->data.msg.prefix != NULL)
404 				pfree(change->data.msg.prefix);
405 			change->data.msg.prefix = NULL;
406 			if (change->data.msg.message != NULL)
407 				pfree(change->data.msg.message);
408 			change->data.msg.message = NULL;
409 			break;
410 		case REORDER_BUFFER_CHANGE_INTERNAL_SNAPSHOT:
411 			if (change->data.snapshot)
412 			{
413 				ReorderBufferFreeSnap(rb, change->data.snapshot);
414 				change->data.snapshot = NULL;
415 			}
416 			break;
417 			/* no data in addition to the struct itself */
418 		case REORDER_BUFFER_CHANGE_INTERNAL_SPEC_CONFIRM:
419 		case REORDER_BUFFER_CHANGE_INTERNAL_SPEC_ABORT:
420 		case REORDER_BUFFER_CHANGE_INTERNAL_COMMAND_ID:
421 		case REORDER_BUFFER_CHANGE_INTERNAL_TUPLECID:
422 			break;
423 	}
424 
425 	pfree(change);
426 }
427 
428 /*
429  * Get a fresh ReorderBufferTupleBuf fitting at least a tuple of size
430  * tuple_len (excluding header overhead).
431  */
432 ReorderBufferTupleBuf *
ReorderBufferGetTupleBuf(ReorderBuffer * rb,Size tuple_len)433 ReorderBufferGetTupleBuf(ReorderBuffer *rb, Size tuple_len)
434 {
435 	ReorderBufferTupleBuf *tuple;
436 	Size		alloc_len;
437 
438 	alloc_len = tuple_len + SizeofHeapTupleHeader;
439 
440 	/*
441 	 * Most tuples are below MaxHeapTupleSize, so we use a slab allocator for
442 	 * those. Thus always allocate at least MaxHeapTupleSize. Note that tuples
443 	 * generated for oldtuples can be bigger, as they don't have out-of-line
444 	 * toast columns.
445 	 */
446 	if (alloc_len < MaxHeapTupleSize)
447 		alloc_len = MaxHeapTupleSize;
448 
449 
450 	/* if small enough, check the slab cache */
451 	if (alloc_len <= MaxHeapTupleSize && rb->nr_cached_tuplebufs)
452 	{
453 		rb->nr_cached_tuplebufs--;
454 		tuple = slist_container(ReorderBufferTupleBuf, node,
455 								slist_pop_head_node(&rb->cached_tuplebufs));
456 		Assert(tuple->alloc_tuple_size == MaxHeapTupleSize);
457 #ifdef USE_ASSERT_CHECKING
458 		memset(&tuple->tuple, 0xa9, sizeof(HeapTupleData));
459 		VALGRIND_MAKE_MEM_UNDEFINED(&tuple->tuple, sizeof(HeapTupleData));
460 #endif
461 		tuple->tuple.t_data = ReorderBufferTupleBufData(tuple);
462 #ifdef USE_ASSERT_CHECKING
463 		memset(tuple->tuple.t_data, 0xa8, tuple->alloc_tuple_size);
464 		VALGRIND_MAKE_MEM_UNDEFINED(tuple->tuple.t_data, tuple->alloc_tuple_size);
465 #endif
466 	}
467 	else
468 	{
469 		tuple = (ReorderBufferTupleBuf *)
470 			MemoryContextAlloc(rb->context,
471 							   sizeof(ReorderBufferTupleBuf) +
472 							   MAXIMUM_ALIGNOF + alloc_len);
473 		tuple->alloc_tuple_size = alloc_len;
474 		tuple->tuple.t_data = ReorderBufferTupleBufData(tuple);
475 	}
476 
477 	return tuple;
478 }
479 
480 /*
481  * Free an ReorderBufferTupleBuf.
482  */
483 void
ReorderBufferReturnTupleBuf(ReorderBuffer * rb,ReorderBufferTupleBuf * tuple)484 ReorderBufferReturnTupleBuf(ReorderBuffer *rb, ReorderBufferTupleBuf *tuple)
485 {
486 	/* check whether to put into the slab cache, oversized tuples never are */
487 	if (tuple->alloc_tuple_size == MaxHeapTupleSize &&
488 		rb->nr_cached_tuplebufs < max_cached_tuplebufs)
489 	{
490 		rb->nr_cached_tuplebufs++;
491 		slist_push_head(&rb->cached_tuplebufs, &tuple->node);
492 		VALGRIND_MAKE_MEM_UNDEFINED(tuple->tuple.t_data, tuple->alloc_tuple_size);
493 		VALGRIND_MAKE_MEM_UNDEFINED(tuple, sizeof(ReorderBufferTupleBuf));
494 		VALGRIND_MAKE_MEM_DEFINED(&tuple->node, sizeof(tuple->node));
495 		VALGRIND_MAKE_MEM_DEFINED(&tuple->alloc_tuple_size, sizeof(tuple->alloc_tuple_size));
496 	}
497 	else
498 	{
499 		pfree(tuple);
500 	}
501 }
502 
503 /*
504  * Return the ReorderBufferTXN from the given buffer, specified by Xid.
505  * If create is true, and a transaction doesn't already exist, create it
506  * (with the given LSN, and as top transaction if that's specified);
507  * when this happens, is_new is set to true.
508  */
509 static ReorderBufferTXN *
ReorderBufferTXNByXid(ReorderBuffer * rb,TransactionId xid,bool create,bool * is_new,XLogRecPtr lsn,bool create_as_top)510 ReorderBufferTXNByXid(ReorderBuffer *rb, TransactionId xid, bool create,
511 					  bool *is_new, XLogRecPtr lsn, bool create_as_top)
512 {
513 	ReorderBufferTXN *txn;
514 	ReorderBufferTXNByIdEnt *ent;
515 	bool		found;
516 
517 	Assert(TransactionIdIsValid(xid));
518 
519 	/*
520 	 * Check the one-entry lookup cache first
521 	 */
522 	if (TransactionIdIsValid(rb->by_txn_last_xid) &&
523 		rb->by_txn_last_xid == xid)
524 	{
525 		txn = rb->by_txn_last_txn;
526 
527 		if (txn != NULL)
528 		{
529 			/* found it, and it's valid */
530 			if (is_new)
531 				*is_new = false;
532 			return txn;
533 		}
534 
535 		/*
536 		 * cached as non-existent, and asked not to create? Then nothing else
537 		 * to do.
538 		 */
539 		if (!create)
540 			return NULL;
541 		/* otherwise fall through to create it */
542 	}
543 
544 	/*
545 	 * If the cache wasn't hit or it yielded an "does-not-exist" and we want
546 	 * to create an entry.
547 	 */
548 
549 	/* search the lookup table */
550 	ent = (ReorderBufferTXNByIdEnt *)
551 		hash_search(rb->by_txn,
552 					(void *) &xid,
553 					create ? HASH_ENTER : HASH_FIND,
554 					&found);
555 	if (found)
556 		txn = ent->txn;
557 	else if (create)
558 	{
559 		/* initialize the new entry, if creation was requested */
560 		Assert(ent != NULL);
561 		Assert(lsn != InvalidXLogRecPtr);
562 
563 		ent->txn = ReorderBufferGetTXN(rb);
564 		ent->txn->xid = xid;
565 		txn = ent->txn;
566 		txn->first_lsn = lsn;
567 		txn->restart_decoding_lsn = rb->current_restart_decoding_lsn;
568 
569 		if (create_as_top)
570 		{
571 			dlist_push_tail(&rb->toplevel_by_lsn, &txn->node);
572 			AssertTXNLsnOrder(rb);
573 		}
574 	}
575 	else
576 		txn = NULL;				/* not found and not asked to create */
577 
578 	/* update cache */
579 	rb->by_txn_last_xid = xid;
580 	rb->by_txn_last_txn = txn;
581 
582 	if (is_new)
583 		*is_new = !found;
584 
585 	Assert(!create || txn != NULL);
586 	return txn;
587 }
588 
589 /*
590  * Queue a change into a transaction so it can be replayed upon commit.
591  */
592 void
ReorderBufferQueueChange(ReorderBuffer * rb,TransactionId xid,XLogRecPtr lsn,ReorderBufferChange * change)593 ReorderBufferQueueChange(ReorderBuffer *rb, TransactionId xid, XLogRecPtr lsn,
594 						 ReorderBufferChange *change)
595 {
596 	ReorderBufferTXN *txn;
597 
598 	txn = ReorderBufferTXNByXid(rb, xid, true, NULL, lsn, true);
599 
600 	change->lsn = lsn;
601 	Assert(InvalidXLogRecPtr != lsn);
602 	dlist_push_tail(&txn->changes, &change->node);
603 	txn->nentries++;
604 	txn->nentries_mem++;
605 
606 	ReorderBufferCheckSerializeTXN(rb, txn);
607 }
608 
609 /*
610  * Queue message into a transaction so it can be processed upon commit.
611  */
612 void
ReorderBufferQueueMessage(ReorderBuffer * rb,TransactionId xid,Snapshot snapshot,XLogRecPtr lsn,bool transactional,const char * prefix,Size message_size,const char * message)613 ReorderBufferQueueMessage(ReorderBuffer *rb, TransactionId xid,
614 						  Snapshot snapshot, XLogRecPtr lsn,
615 						  bool transactional, const char *prefix,
616 						  Size message_size, const char *message)
617 {
618 	if (transactional)
619 	{
620 		MemoryContext oldcontext;
621 		ReorderBufferChange *change;
622 
623 		Assert(xid != InvalidTransactionId);
624 
625 		oldcontext = MemoryContextSwitchTo(rb->context);
626 
627 		change = ReorderBufferGetChange(rb);
628 		change->action = REORDER_BUFFER_CHANGE_MESSAGE;
629 		change->data.msg.prefix = pstrdup(prefix);
630 		change->data.msg.message_size = message_size;
631 		change->data.msg.message = palloc(message_size);
632 		memcpy(change->data.msg.message, message, message_size);
633 
634 		ReorderBufferQueueChange(rb, xid, lsn, change);
635 
636 		MemoryContextSwitchTo(oldcontext);
637 	}
638 	else
639 	{
640 		ReorderBufferTXN *txn = NULL;
641 		volatile Snapshot snapshot_now = snapshot;
642 
643 		if (xid != InvalidTransactionId)
644 			txn = ReorderBufferTXNByXid(rb, xid, true, NULL, lsn, true);
645 
646 		/* setup snapshot to allow catalog access */
647 		SetupHistoricSnapshot(snapshot_now, NULL);
648 		PG_TRY();
649 		{
650 			rb->message(rb, txn, lsn, false, prefix, message_size, message);
651 
652 			TeardownHistoricSnapshot(false);
653 		}
654 		PG_CATCH();
655 		{
656 			TeardownHistoricSnapshot(true);
657 			PG_RE_THROW();
658 		}
659 		PG_END_TRY();
660 	}
661 }
662 
663 /*
664  * AssertTXNLsnOrder
665  *		Verify LSN ordering of transaction lists in the reorderbuffer
666  *
667  * Other LSN-related invariants are checked too.
668  *
669  * No-op if assertions are not in use.
670  */
671 static void
AssertTXNLsnOrder(ReorderBuffer * rb)672 AssertTXNLsnOrder(ReorderBuffer *rb)
673 {
674 #ifdef USE_ASSERT_CHECKING
675 	dlist_iter	iter;
676 	XLogRecPtr	prev_first_lsn = InvalidXLogRecPtr;
677 	XLogRecPtr	prev_base_snap_lsn = InvalidXLogRecPtr;
678 
679 	dlist_foreach(iter, &rb->toplevel_by_lsn)
680 	{
681 		ReorderBufferTXN *cur_txn = dlist_container(ReorderBufferTXN, node,
682 													iter.cur);
683 
684 		/* start LSN must be set */
685 		Assert(cur_txn->first_lsn != InvalidXLogRecPtr);
686 
687 		/* If there is an end LSN, it must be higher than start LSN */
688 		if (cur_txn->end_lsn != InvalidXLogRecPtr)
689 			Assert(cur_txn->first_lsn <= cur_txn->end_lsn);
690 
691 		/* Current initial LSN must be strictly higher than previous */
692 		if (prev_first_lsn != InvalidXLogRecPtr)
693 			Assert(prev_first_lsn < cur_txn->first_lsn);
694 
695 		/* known-as-subtxn txns must not be listed */
696 		Assert(!cur_txn->is_known_as_subxact);
697 
698 		prev_first_lsn = cur_txn->first_lsn;
699 	}
700 
701 	dlist_foreach(iter, &rb->txns_by_base_snapshot_lsn)
702 	{
703 		ReorderBufferTXN *cur_txn = dlist_container(ReorderBufferTXN,
704 													base_snapshot_node,
705 													iter.cur);
706 
707 		/* base snapshot (and its LSN) must be set */
708 		Assert(cur_txn->base_snapshot != NULL);
709 		Assert(cur_txn->base_snapshot_lsn != InvalidXLogRecPtr);
710 
711 		/* current LSN must be strictly higher than previous */
712 		if (prev_base_snap_lsn != InvalidXLogRecPtr)
713 			Assert(prev_base_snap_lsn < cur_txn->base_snapshot_lsn);
714 
715 		/* known-as-subtxn txns must not be listed */
716 		Assert(!cur_txn->is_known_as_subxact);
717 
718 		prev_base_snap_lsn = cur_txn->base_snapshot_lsn;
719 	}
720 #endif
721 }
722 
723 /*
724  * ReorderBufferGetOldestTXN
725  *		Return oldest transaction in reorderbuffer
726  */
727 ReorderBufferTXN *
ReorderBufferGetOldestTXN(ReorderBuffer * rb)728 ReorderBufferGetOldestTXN(ReorderBuffer *rb)
729 {
730 	ReorderBufferTXN *txn;
731 
732 	AssertTXNLsnOrder(rb);
733 
734 	if (dlist_is_empty(&rb->toplevel_by_lsn))
735 		return NULL;
736 
737 	txn = dlist_head_element(ReorderBufferTXN, node, &rb->toplevel_by_lsn);
738 
739 	Assert(!txn->is_known_as_subxact);
740 	Assert(txn->first_lsn != InvalidXLogRecPtr);
741 	return txn;
742 }
743 
744 /*
745  * ReorderBufferGetOldestXmin
746  *		Return oldest Xmin in reorderbuffer
747  *
748  * Returns oldest possibly running Xid from the point of view of snapshots
749  * used in the transactions kept by reorderbuffer, or InvalidTransactionId if
750  * there are none.
751  *
752  * Since snapshots are assigned monotonically, this equals the Xmin of the
753  * base snapshot with minimal base_snapshot_lsn.
754  */
755 TransactionId
ReorderBufferGetOldestXmin(ReorderBuffer * rb)756 ReorderBufferGetOldestXmin(ReorderBuffer *rb)
757 {
758 	ReorderBufferTXN *txn;
759 
760 	AssertTXNLsnOrder(rb);
761 
762 	if (dlist_is_empty(&rb->txns_by_base_snapshot_lsn))
763 		return InvalidTransactionId;
764 
765 	txn = dlist_head_element(ReorderBufferTXN, base_snapshot_node,
766 							 &rb->txns_by_base_snapshot_lsn);
767 	return txn->base_snapshot->xmin;
768 }
769 
770 void
ReorderBufferSetRestartPoint(ReorderBuffer * rb,XLogRecPtr ptr)771 ReorderBufferSetRestartPoint(ReorderBuffer *rb, XLogRecPtr ptr)
772 {
773 	rb->current_restart_decoding_lsn = ptr;
774 }
775 
776 /*
777  * ReorderBufferAssignChild
778  *
779  * Make note that we know that subxid is a subtransaction of xid, seen as of
780  * the given lsn.
781  */
782 void
ReorderBufferAssignChild(ReorderBuffer * rb,TransactionId xid,TransactionId subxid,XLogRecPtr lsn)783 ReorderBufferAssignChild(ReorderBuffer *rb, TransactionId xid,
784 						 TransactionId subxid, XLogRecPtr lsn)
785 {
786 	ReorderBufferTXN *txn;
787 	ReorderBufferTXN *subtxn;
788 	bool		new_top;
789 	bool		new_sub;
790 
791 	txn = ReorderBufferTXNByXid(rb, xid, true, &new_top, lsn, true);
792 	subtxn = ReorderBufferTXNByXid(rb, subxid, true, &new_sub, lsn, false);
793 
794 	if (!new_sub)
795 	{
796 		if (subtxn->is_known_as_subxact)
797 		{
798 			/* already associated, nothing to do */
799 			return;
800 		}
801 		else
802 		{
803 			/*
804 			 * We already saw this transaction, but initially added it to the list
805 			 * of top-level txns.  Now that we know it's not top-level, remove
806 			 * it from there.
807 			 */
808 			dlist_delete(&subtxn->node);
809 		}
810 	}
811 
812 	subtxn->is_known_as_subxact = true;
813 	subtxn->toplevel_xid = xid;
814 	Assert(subtxn->nsubtxns == 0);
815 
816 	/* add to subtransaction list */
817 	dlist_push_tail(&txn->subtxns, &subtxn->node);
818 	txn->nsubtxns++;
819 
820 	/* Possibly transfer the subtxn's snapshot to its top-level txn. */
821 	ReorderBufferTransferSnapToParent(txn, subtxn);
822 
823 	/* Verify LSN-ordering invariant */
824 	AssertTXNLsnOrder(rb);
825 }
826 
827 /*
828  * ReorderBufferTransferSnapToParent
829  *		Transfer base snapshot from subtxn to top-level txn, if needed
830  *
831  * This is done if the top-level txn doesn't have a base snapshot, or if the
832  * subtxn's base snapshot has an earlier LSN than the top-level txn's base
833  * snapshot's LSN.  This can happen if there are no changes in the toplevel
834  * txn but there are some in the subtxn, or the first change in subtxn has
835  * earlier LSN than first change in the top-level txn and we learned about
836  * their kinship only now.
837  *
838  * The subtransaction's snapshot is cleared regardless of the transfer
839  * happening, since it's not needed anymore in either case.
840  *
841  * We do this as soon as we become aware of their kinship, to avoid queueing
842  * extra snapshots to txns known-as-subtxns -- only top-level txns will
843  * receive further snapshots.
844  */
845 static void
ReorderBufferTransferSnapToParent(ReorderBufferTXN * txn,ReorderBufferTXN * subtxn)846 ReorderBufferTransferSnapToParent(ReorderBufferTXN *txn,
847 								  ReorderBufferTXN *subtxn)
848 {
849 	Assert(subtxn->toplevel_xid == txn->xid);
850 
851 	if (subtxn->base_snapshot != NULL)
852 	{
853 		if (txn->base_snapshot == NULL ||
854 			subtxn->base_snapshot_lsn < txn->base_snapshot_lsn)
855 		{
856 			/*
857 			 * If the toplevel transaction already has a base snapshot but
858 			 * it's newer than the subxact's, purge it.
859 			 */
860 			if (txn->base_snapshot != NULL)
861 			{
862 				SnapBuildSnapDecRefcount(txn->base_snapshot);
863 				dlist_delete(&txn->base_snapshot_node);
864 			}
865 
866 			/*
867 			 * The snapshot is now the top transaction's; transfer it, and
868 			 * adjust the list position of the top transaction in the list by
869 			 * moving it to where the subtransaction is.
870 			 */
871 			txn->base_snapshot = subtxn->base_snapshot;
872 			txn->base_snapshot_lsn = subtxn->base_snapshot_lsn;
873 			dlist_insert_before(&subtxn->base_snapshot_node,
874 								&txn->base_snapshot_node);
875 
876 			/*
877 			 * The subtransaction doesn't have a snapshot anymore (so it
878 			 * mustn't be in the list.)
879 			 */
880 			subtxn->base_snapshot = NULL;
881 			subtxn->base_snapshot_lsn = InvalidXLogRecPtr;
882 			dlist_delete(&subtxn->base_snapshot_node);
883 		}
884 		else
885 		{
886 			/* Base snap of toplevel is fine, so subxact's is not needed */
887 			SnapBuildSnapDecRefcount(subtxn->base_snapshot);
888 			dlist_delete(&subtxn->base_snapshot_node);
889 			subtxn->base_snapshot = NULL;
890 			subtxn->base_snapshot_lsn = InvalidXLogRecPtr;
891 		}
892 	}
893 }
894 
895 /*
896  * Associate a subtransaction with its toplevel transaction at commit
897  * time. There may be no further changes added after this.
898  */
899 void
ReorderBufferCommitChild(ReorderBuffer * rb,TransactionId xid,TransactionId subxid,XLogRecPtr commit_lsn,XLogRecPtr end_lsn)900 ReorderBufferCommitChild(ReorderBuffer *rb, TransactionId xid,
901 						 TransactionId subxid, XLogRecPtr commit_lsn,
902 						 XLogRecPtr end_lsn)
903 {
904 	ReorderBufferTXN *subtxn;
905 
906 	subtxn = ReorderBufferTXNByXid(rb, subxid, false, NULL,
907 								   InvalidXLogRecPtr, false);
908 
909 	/*
910 	 * No need to do anything if that subtxn didn't contain any changes
911 	 */
912 	if (!subtxn)
913 		return;
914 
915 	subtxn->final_lsn = commit_lsn;
916 	subtxn->end_lsn = end_lsn;
917 
918 	/*
919 	 * Assign this subxact as a child of the toplevel xact (no-op if already
920 	 * done.)
921 	 */
922 	ReorderBufferAssignChild(rb, xid, subxid, InvalidXLogRecPtr);
923 }
924 
925 
926 /*
927  * Support for efficiently iterating over a transaction's and its
928  * subtransactions' changes.
929  *
930  * We do by doing a k-way merge between transactions/subtransactions. For that
931  * we model the current heads of the different transactions as a binary heap
932  * so we easily know which (sub-)transaction has the change with the smallest
933  * lsn next.
934  *
935  * We assume the changes in individual transactions are already sorted by LSN.
936  */
937 
938 /*
939  * Binary heap comparison function.
940  */
941 static int
ReorderBufferIterCompare(Datum a,Datum b,void * arg)942 ReorderBufferIterCompare(Datum a, Datum b, void *arg)
943 {
944 	ReorderBufferIterTXNState *state = (ReorderBufferIterTXNState *) arg;
945 	XLogRecPtr	pos_a = state->entries[DatumGetInt32(a)].lsn;
946 	XLogRecPtr	pos_b = state->entries[DatumGetInt32(b)].lsn;
947 
948 	if (pos_a < pos_b)
949 		return 1;
950 	else if (pos_a == pos_b)
951 		return 0;
952 	return -1;
953 }
954 
955 /*
956  * Allocate & initialize an iterator which iterates in lsn order over a
957  * transaction and all its subtransactions.
958  *
959  * Note: The iterator state is returned through iter_state parameter rather
960  * than the function's return value.  This is because the state gets cleaned up
961  * in a PG_CATCH block in the caller, so we want to make sure the caller gets
962  * back the state even if this function throws an exception.
963  */
964 static void
ReorderBufferIterTXNInit(ReorderBuffer * rb,ReorderBufferTXN * txn,ReorderBufferIterTXNState * volatile * iter_state)965 ReorderBufferIterTXNInit(ReorderBuffer *rb, ReorderBufferTXN *txn,
966 						 ReorderBufferIterTXNState *volatile *iter_state)
967 {
968 	Size		nr_txns = 0;
969 	ReorderBufferIterTXNState *state;
970 	dlist_iter	cur_txn_i;
971 	int32		off;
972 
973 	*iter_state = NULL;
974 
975 	/*
976 	 * Calculate the size of our heap: one element for every transaction that
977 	 * contains changes.  (Besides the transactions already in the reorder
978 	 * buffer, we count the one we were directly passed.)
979 	 */
980 	if (txn->nentries > 0)
981 		nr_txns++;
982 
983 	dlist_foreach(cur_txn_i, &txn->subtxns)
984 	{
985 		ReorderBufferTXN *cur_txn;
986 
987 		cur_txn = dlist_container(ReorderBufferTXN, node, cur_txn_i.cur);
988 
989 		if (cur_txn->nentries > 0)
990 			nr_txns++;
991 	}
992 
993 	/*
994 	 * TODO: Consider adding fastpath for the rather common nr_txns=1 case, no
995 	 * need to allocate/build a heap then.
996 	 */
997 
998 	/* allocate iteration state */
999 	state = (ReorderBufferIterTXNState *)
1000 		MemoryContextAllocZero(rb->context,
1001 							   sizeof(ReorderBufferIterTXNState) +
1002 							   sizeof(ReorderBufferIterTXNEntry) * nr_txns);
1003 
1004 	state->nr_txns = nr_txns;
1005 	dlist_init(&state->old_change);
1006 
1007 	for (off = 0; off < state->nr_txns; off++)
1008 	{
1009 		state->entries[off].fd = -1;
1010 		state->entries[off].segno = 0;
1011 	}
1012 
1013 	/* allocate heap */
1014 	state->heap = binaryheap_allocate(state->nr_txns,
1015 									  ReorderBufferIterCompare,
1016 									  state);
1017 
1018 	/* Now that the state fields are initialized, it is safe to return it. */
1019 	*iter_state = state;
1020 
1021 	/*
1022 	 * Now insert items into the binary heap, in an unordered fashion.  (We
1023 	 * will run a heap assembly step at the end; this is more efficient.)
1024 	 */
1025 
1026 	off = 0;
1027 
1028 	/* add toplevel transaction if it contains changes */
1029 	if (txn->nentries > 0)
1030 	{
1031 		ReorderBufferChange *cur_change;
1032 
1033 		if (txn->serialized)
1034 		{
1035 			/* serialize remaining changes */
1036 			ReorderBufferSerializeTXN(rb, txn);
1037 			ReorderBufferRestoreChanges(rb, txn, &state->entries[off].fd,
1038 										&state->entries[off].segno);
1039 		}
1040 
1041 		cur_change = dlist_head_element(ReorderBufferChange, node,
1042 										&txn->changes);
1043 
1044 		state->entries[off].lsn = cur_change->lsn;
1045 		state->entries[off].change = cur_change;
1046 		state->entries[off].txn = txn;
1047 
1048 		binaryheap_add_unordered(state->heap, Int32GetDatum(off++));
1049 	}
1050 
1051 	/* add subtransactions if they contain changes */
1052 	dlist_foreach(cur_txn_i, &txn->subtxns)
1053 	{
1054 		ReorderBufferTXN *cur_txn;
1055 
1056 		cur_txn = dlist_container(ReorderBufferTXN, node, cur_txn_i.cur);
1057 
1058 		if (cur_txn->nentries > 0)
1059 		{
1060 			ReorderBufferChange *cur_change;
1061 
1062 			if (cur_txn->serialized)
1063 			{
1064 				/* serialize remaining changes */
1065 				ReorderBufferSerializeTXN(rb, cur_txn);
1066 				ReorderBufferRestoreChanges(rb, cur_txn,
1067 											&state->entries[off].fd,
1068 											&state->entries[off].segno);
1069 			}
1070 			cur_change = dlist_head_element(ReorderBufferChange, node,
1071 											&cur_txn->changes);
1072 
1073 			state->entries[off].lsn = cur_change->lsn;
1074 			state->entries[off].change = cur_change;
1075 			state->entries[off].txn = cur_txn;
1076 
1077 			binaryheap_add_unordered(state->heap, Int32GetDatum(off++));
1078 		}
1079 	}
1080 
1081 	/* assemble a valid binary heap */
1082 	binaryheap_build(state->heap);
1083 }
1084 
1085 /*
1086  * Return the next change when iterating over a transaction and its
1087  * subtransactions.
1088  *
1089  * Returns NULL when no further changes exist.
1090  */
1091 static ReorderBufferChange *
ReorderBufferIterTXNNext(ReorderBuffer * rb,ReorderBufferIterTXNState * state)1092 ReorderBufferIterTXNNext(ReorderBuffer *rb, ReorderBufferIterTXNState *state)
1093 {
1094 	ReorderBufferChange *change;
1095 	ReorderBufferIterTXNEntry *entry;
1096 	int32		off;
1097 
1098 	/* nothing there anymore */
1099 	if (state->heap->bh_size == 0)
1100 		return NULL;
1101 
1102 	off = DatumGetInt32(binaryheap_first(state->heap));
1103 	entry = &state->entries[off];
1104 
1105 	/* free memory we might have "leaked" in the previous *Next call */
1106 	if (!dlist_is_empty(&state->old_change))
1107 	{
1108 		change = dlist_container(ReorderBufferChange, node,
1109 								 dlist_pop_head_node(&state->old_change));
1110 		ReorderBufferReturnChange(rb, change);
1111 		Assert(dlist_is_empty(&state->old_change));
1112 	}
1113 
1114 	change = entry->change;
1115 
1116 	/*
1117 	 * update heap with information about which transaction has the next
1118 	 * relevant change in LSN order
1119 	 */
1120 
1121 	/* there are in-memory changes */
1122 	if (dlist_has_next(&entry->txn->changes, &entry->change->node))
1123 	{
1124 		dlist_node *next = dlist_next_node(&entry->txn->changes, &change->node);
1125 		ReorderBufferChange *next_change =
1126 		dlist_container(ReorderBufferChange, node, next);
1127 
1128 		/* txn stays the same */
1129 		state->entries[off].lsn = next_change->lsn;
1130 		state->entries[off].change = next_change;
1131 
1132 		binaryheap_replace_first(state->heap, Int32GetDatum(off));
1133 		return change;
1134 	}
1135 
1136 	/* try to load changes from disk */
1137 	if (entry->txn->nentries != entry->txn->nentries_mem)
1138 	{
1139 		/*
1140 		 * Ugly: restoring changes will reuse *Change records, thus delete the
1141 		 * current one from the per-tx list and only free in the next call.
1142 		 */
1143 		dlist_delete(&change->node);
1144 		dlist_push_tail(&state->old_change, &change->node);
1145 
1146 		if (ReorderBufferRestoreChanges(rb, entry->txn, &entry->fd,
1147 										&state->entries[off].segno))
1148 		{
1149 			/* successfully restored changes from disk */
1150 			ReorderBufferChange *next_change =
1151 			dlist_head_element(ReorderBufferChange, node,
1152 							   &entry->txn->changes);
1153 
1154 			elog(DEBUG2, "restored %u/%u changes from disk",
1155 				 (uint32) entry->txn->nentries_mem,
1156 				 (uint32) entry->txn->nentries);
1157 
1158 			Assert(entry->txn->nentries_mem);
1159 			/* txn stays the same */
1160 			state->entries[off].lsn = next_change->lsn;
1161 			state->entries[off].change = next_change;
1162 			binaryheap_replace_first(state->heap, Int32GetDatum(off));
1163 
1164 			return change;
1165 		}
1166 	}
1167 
1168 	/* ok, no changes there anymore, remove */
1169 	binaryheap_remove_first(state->heap);
1170 
1171 	return change;
1172 }
1173 
1174 /*
1175  * Deallocate the iterator
1176  */
1177 static void
ReorderBufferIterTXNFinish(ReorderBuffer * rb,ReorderBufferIterTXNState * state)1178 ReorderBufferIterTXNFinish(ReorderBuffer *rb,
1179 						   ReorderBufferIterTXNState *state)
1180 {
1181 	int32		off;
1182 
1183 	for (off = 0; off < state->nr_txns; off++)
1184 	{
1185 		if (state->entries[off].fd != -1)
1186 			FileClose(state->entries[off].fd);
1187 	}
1188 
1189 	/* free memory we might have "leaked" in the last *Next call */
1190 	if (!dlist_is_empty(&state->old_change))
1191 	{
1192 		ReorderBufferChange *change;
1193 
1194 		change = dlist_container(ReorderBufferChange, node,
1195 								 dlist_pop_head_node(&state->old_change));
1196 		ReorderBufferReturnChange(rb, change);
1197 		Assert(dlist_is_empty(&state->old_change));
1198 	}
1199 
1200 	binaryheap_free(state->heap);
1201 	pfree(state);
1202 }
1203 
1204 /*
1205  * Cleanup the contents of a transaction, usually after the transaction
1206  * committed or aborted.
1207  */
1208 static void
ReorderBufferCleanupTXN(ReorderBuffer * rb,ReorderBufferTXN * txn)1209 ReorderBufferCleanupTXN(ReorderBuffer *rb, ReorderBufferTXN *txn)
1210 {
1211 	bool		found;
1212 	dlist_mutable_iter iter;
1213 
1214 	/* cleanup subtransactions & their changes */
1215 	dlist_foreach_modify(iter, &txn->subtxns)
1216 	{
1217 		ReorderBufferTXN *subtxn;
1218 
1219 		subtxn = dlist_container(ReorderBufferTXN, node, iter.cur);
1220 
1221 		/*
1222 		 * Subtransactions are always associated to the toplevel TXN, even if
1223 		 * they originally were happening inside another subtxn, so we won't
1224 		 * ever recurse more than one level deep here.
1225 		 */
1226 		Assert(subtxn->is_known_as_subxact);
1227 		Assert(subtxn->nsubtxns == 0);
1228 
1229 		ReorderBufferCleanupTXN(rb, subtxn);
1230 	}
1231 
1232 	/* cleanup changes in the toplevel txn */
1233 	dlist_foreach_modify(iter, &txn->changes)
1234 	{
1235 		ReorderBufferChange *change;
1236 
1237 		change = dlist_container(ReorderBufferChange, node, iter.cur);
1238 
1239 		ReorderBufferReturnChange(rb, change);
1240 	}
1241 
1242 	/*
1243 	 * Cleanup the tuplecids we stored for decoding catalog snapshot access.
1244 	 * They are always stored in the toplevel transaction.
1245 	 */
1246 	dlist_foreach_modify(iter, &txn->tuplecids)
1247 	{
1248 		ReorderBufferChange *change;
1249 
1250 		change = dlist_container(ReorderBufferChange, node, iter.cur);
1251 		Assert(change->action == REORDER_BUFFER_CHANGE_INTERNAL_TUPLECID);
1252 		ReorderBufferReturnChange(rb, change);
1253 	}
1254 
1255 	/*
1256 	 * Cleanup the base snapshot, if set.
1257 	 */
1258 	if (txn->base_snapshot != NULL)
1259 	{
1260 		SnapBuildSnapDecRefcount(txn->base_snapshot);
1261 		dlist_delete(&txn->base_snapshot_node);
1262 	}
1263 
1264 	/*
1265 	 * Remove TXN from its containing list.
1266 	 *
1267 	 * Note: if txn->is_known_as_subxact, we are deleting the TXN from its
1268 	 * parent's list of known subxacts; this leaves the parent's nsubxacts
1269 	 * count too high, but we don't care.  Otherwise, we are deleting the TXN
1270 	 * from the LSN-ordered list of toplevel TXNs.
1271 	 */
1272 	dlist_delete(&txn->node);
1273 
1274 	/* now remove reference from buffer */
1275 	hash_search(rb->by_txn,
1276 				(void *) &txn->xid,
1277 				HASH_REMOVE,
1278 				&found);
1279 	Assert(found);
1280 
1281 	/* remove entries spilled to disk */
1282 	if (txn->serialized)
1283 		ReorderBufferRestoreCleanup(rb, txn);
1284 
1285 	/* deallocate */
1286 	ReorderBufferReturnTXN(rb, txn);
1287 }
1288 
1289 /*
1290  * Build a hash with a (relfilenode, ctid) -> (cmin, cmax) mapping for use by
1291  * tqual.c's HeapTupleSatisfiesHistoricMVCC.
1292  */
1293 static void
ReorderBufferBuildTupleCidHash(ReorderBuffer * rb,ReorderBufferTXN * txn)1294 ReorderBufferBuildTupleCidHash(ReorderBuffer *rb, ReorderBufferTXN *txn)
1295 {
1296 	dlist_iter	iter;
1297 	HASHCTL		hash_ctl;
1298 
1299 	if (!txn->has_catalog_changes || dlist_is_empty(&txn->tuplecids))
1300 		return;
1301 
1302 	memset(&hash_ctl, 0, sizeof(hash_ctl));
1303 
1304 	hash_ctl.keysize = sizeof(ReorderBufferTupleCidKey);
1305 	hash_ctl.entrysize = sizeof(ReorderBufferTupleCidEnt);
1306 	hash_ctl.hcxt = rb->context;
1307 
1308 	/*
1309 	 * create the hash with the exact number of to-be-stored tuplecids from
1310 	 * the start
1311 	 */
1312 	txn->tuplecid_hash =
1313 		hash_create("ReorderBufferTupleCid", txn->ntuplecids, &hash_ctl,
1314 					HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
1315 
1316 	dlist_foreach(iter, &txn->tuplecids)
1317 	{
1318 		ReorderBufferTupleCidKey key;
1319 		ReorderBufferTupleCidEnt *ent;
1320 		bool		found;
1321 		ReorderBufferChange *change;
1322 
1323 		change = dlist_container(ReorderBufferChange, node, iter.cur);
1324 
1325 		Assert(change->action == REORDER_BUFFER_CHANGE_INTERNAL_TUPLECID);
1326 
1327 		/* be careful about padding */
1328 		memset(&key, 0, sizeof(ReorderBufferTupleCidKey));
1329 
1330 		key.relnode = change->data.tuplecid.node;
1331 
1332 		ItemPointerCopy(&change->data.tuplecid.tid,
1333 						&key.tid);
1334 
1335 		ent = (ReorderBufferTupleCidEnt *)
1336 			hash_search(txn->tuplecid_hash,
1337 						(void *) &key,
1338 						HASH_ENTER,
1339 						&found);
1340 		if (!found)
1341 		{
1342 			ent->cmin = change->data.tuplecid.cmin;
1343 			ent->cmax = change->data.tuplecid.cmax;
1344 			ent->combocid = change->data.tuplecid.combocid;
1345 		}
1346 		else
1347 		{
1348 			/*
1349 			 * Maybe we already saw this tuple before in this transaction,
1350 			 * but if so it must have the same cmin.
1351 			 */
1352 			Assert(ent->cmin == change->data.tuplecid.cmin);
1353 
1354 			/*
1355 			 * cmax may be initially invalid, but once set it can only grow,
1356 			 * and never become invalid again.
1357 			 */
1358 			Assert((ent->cmax == InvalidCommandId) ||
1359 				   ((change->data.tuplecid.cmax != InvalidCommandId) &&
1360 					(change->data.tuplecid.cmax > ent->cmax)));
1361 			ent->cmax = change->data.tuplecid.cmax;
1362 		}
1363 	}
1364 }
1365 
1366 /*
1367  * Copy a provided snapshot so we can modify it privately. This is needed so
1368  * that catalog modifying transactions can look into intermediate catalog
1369  * states.
1370  */
1371 static Snapshot
ReorderBufferCopySnap(ReorderBuffer * rb,Snapshot orig_snap,ReorderBufferTXN * txn,CommandId cid)1372 ReorderBufferCopySnap(ReorderBuffer *rb, Snapshot orig_snap,
1373 					  ReorderBufferTXN *txn, CommandId cid)
1374 {
1375 	Snapshot	snap;
1376 	dlist_iter	iter;
1377 	int			i = 0;
1378 	Size		size;
1379 
1380 	size = sizeof(SnapshotData) +
1381 		sizeof(TransactionId) * orig_snap->xcnt +
1382 		sizeof(TransactionId) * (txn->nsubtxns + 1);
1383 
1384 	snap = MemoryContextAllocZero(rb->context, size);
1385 	memcpy(snap, orig_snap, sizeof(SnapshotData));
1386 
1387 	snap->copied = true;
1388 	snap->active_count = 1;		/* mark as active so nobody frees it */
1389 	snap->regd_count = 0;
1390 	snap->xip = (TransactionId *) (snap + 1);
1391 
1392 	memcpy(snap->xip, orig_snap->xip, sizeof(TransactionId) * snap->xcnt);
1393 
1394 	/*
1395 	 * snap->subxip contains all txids that belong to our transaction which we
1396 	 * need to check via cmin/cmax. That's why we store the toplevel
1397 	 * transaction in there as well.
1398 	 */
1399 	snap->subxip = snap->xip + snap->xcnt;
1400 	snap->subxip[i++] = txn->xid;
1401 
1402 	/*
1403 	 * nsubxcnt isn't decreased when subtransactions abort, so count manually.
1404 	 * Since it's an upper boundary it is safe to use it for the allocation
1405 	 * above.
1406 	 */
1407 	snap->subxcnt = 1;
1408 
1409 	dlist_foreach(iter, &txn->subtxns)
1410 	{
1411 		ReorderBufferTXN *sub_txn;
1412 
1413 		sub_txn = dlist_container(ReorderBufferTXN, node, iter.cur);
1414 		snap->subxip[i++] = sub_txn->xid;
1415 		snap->subxcnt++;
1416 	}
1417 
1418 	/* sort so we can bsearch() later */
1419 	qsort(snap->subxip, snap->subxcnt, sizeof(TransactionId), xidComparator);
1420 
1421 	/* store the specified current CommandId */
1422 	snap->curcid = cid;
1423 
1424 	return snap;
1425 }
1426 
1427 /*
1428  * Free a previously ReorderBufferCopySnap'ed snapshot
1429  */
1430 static void
ReorderBufferFreeSnap(ReorderBuffer * rb,Snapshot snap)1431 ReorderBufferFreeSnap(ReorderBuffer *rb, Snapshot snap)
1432 {
1433 	if (snap->copied)
1434 		pfree(snap);
1435 	else
1436 		SnapBuildSnapDecRefcount(snap);
1437 }
1438 
1439 /*
1440  * Perform the replay of a transaction and its non-aborted subtransactions.
1441  *
1442  * Subtransactions previously have to be processed by
1443  * ReorderBufferCommitChild(), even if previously assigned to the toplevel
1444  * transaction with ReorderBufferAssignChild.
1445  *
1446  * We currently can only decode a transaction's contents when its commit
1447  * record is read because that's the only place where we know about cache
1448  * invalidations. Thus, once a toplevel commit is read, we iterate over the top
1449  * and subtransactions (using a k-way merge) and replay the changes in lsn
1450  * order.
1451  */
1452 void
ReorderBufferCommit(ReorderBuffer * rb,TransactionId xid,XLogRecPtr commit_lsn,XLogRecPtr end_lsn,TimestampTz commit_time,RepOriginId origin_id,XLogRecPtr origin_lsn)1453 ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid,
1454 					XLogRecPtr commit_lsn, XLogRecPtr end_lsn,
1455 					TimestampTz commit_time,
1456 					RepOriginId origin_id, XLogRecPtr origin_lsn)
1457 {
1458 	ReorderBufferTXN *txn;
1459 	volatile Snapshot snapshot_now;
1460 	volatile CommandId command_id = FirstCommandId;
1461 	bool		using_subtxn;
1462 	ReorderBufferIterTXNState *volatile iterstate = NULL;
1463 
1464 	txn = ReorderBufferTXNByXid(rb, xid, false, NULL, InvalidXLogRecPtr,
1465 								false);
1466 
1467 	/* unknown transaction, nothing to replay */
1468 	if (txn == NULL)
1469 		return;
1470 
1471 	txn->final_lsn = commit_lsn;
1472 	txn->end_lsn = end_lsn;
1473 	txn->commit_time = commit_time;
1474 	txn->origin_id = origin_id;
1475 	txn->origin_lsn = origin_lsn;
1476 
1477 	/*
1478 	 * If this transaction has no snapshot, it didn't make any changes to the
1479 	 * database, so there's nothing to decode.  Note that
1480 	 * ReorderBufferCommitChild will have transferred any snapshots from
1481 	 * subtransactions if there were any.
1482 	 */
1483 	if (txn->base_snapshot == NULL)
1484 	{
1485 		Assert(txn->ninvalidations == 0);
1486 		ReorderBufferCleanupTXN(rb, txn);
1487 		return;
1488 	}
1489 
1490 	snapshot_now = txn->base_snapshot;
1491 
1492 	/* build data to be able to lookup the CommandIds of catalog tuples */
1493 	ReorderBufferBuildTupleCidHash(rb, txn);
1494 
1495 	/* setup the initial snapshot */
1496 	SetupHistoricSnapshot(snapshot_now, txn->tuplecid_hash);
1497 
1498 	/*
1499 	 * Decoding needs access to syscaches et al., which in turn use
1500 	 * heavyweight locks and such. Thus we need to have enough state around to
1501 	 * keep track of those.  The easiest way is to simply use a transaction
1502 	 * internally.  That also allows us to easily enforce that nothing writes
1503 	 * to the database by checking for xid assignments.
1504 	 *
1505 	 * When we're called via the SQL SRF there's already a transaction
1506 	 * started, so start an explicit subtransaction there.
1507 	 */
1508 	using_subtxn = IsTransactionOrTransactionBlock();
1509 
1510 	PG_TRY();
1511 	{
1512 		ReorderBufferChange *change;
1513 		ReorderBufferChange *specinsert = NULL;
1514 
1515 		if (using_subtxn)
1516 			BeginInternalSubTransaction("replay");
1517 		else
1518 			StartTransactionCommand();
1519 
1520 		rb->begin(rb, txn);
1521 
1522 		ReorderBufferIterTXNInit(rb, txn, &iterstate);
1523 		while ((change = ReorderBufferIterTXNNext(rb, iterstate)) != NULL)
1524 		{
1525 			Relation	relation = NULL;
1526 			Oid			reloid;
1527 
1528 			switch (change->action)
1529 			{
1530 				case REORDER_BUFFER_CHANGE_INTERNAL_SPEC_CONFIRM:
1531 
1532 					/*
1533 					 * Confirmation for speculative insertion arrived. Simply
1534 					 * use as a normal record. It'll be cleaned up at the end
1535 					 * of INSERT processing.
1536 					 */
1537 					if (specinsert == NULL)
1538 						elog(ERROR, "invalid ordering of speculative insertion changes");
1539 					Assert(specinsert->data.tp.oldtuple == NULL);
1540 					change = specinsert;
1541 					change->action = REORDER_BUFFER_CHANGE_INSERT;
1542 
1543 					/* intentionally fall through */
1544 				case REORDER_BUFFER_CHANGE_INSERT:
1545 				case REORDER_BUFFER_CHANGE_UPDATE:
1546 				case REORDER_BUFFER_CHANGE_DELETE:
1547 					Assert(snapshot_now);
1548 
1549 					reloid = RelidByRelfilenode(change->data.tp.relnode.spcNode,
1550 												change->data.tp.relnode.relNode);
1551 
1552 					/*
1553 					 * Mapped catalog tuple without data, emitted while
1554 					 * catalog table was in the process of being rewritten. We
1555 					 * can fail to look up the relfilenode, because the the
1556 					 * relmapper has no "historic" view, in contrast to normal
1557 					 * the normal catalog during decoding. Thus repeated
1558 					 * rewrites can cause a lookup failure. That's OK because
1559 					 * we do not decode catalog changes anyway. Normally such
1560 					 * tuples would be skipped over below, but we can't
1561 					 * identify whether the table should be logically logged
1562 					 * without mapping the relfilenode to the oid.
1563 					 */
1564 					if (reloid == InvalidOid &&
1565 						change->data.tp.newtuple == NULL &&
1566 						change->data.tp.oldtuple == NULL)
1567 						goto change_done;
1568 					else if (reloid == InvalidOid)
1569 						elog(ERROR, "could not map filenode \"%s\" to relation OID",
1570 							 relpathperm(change->data.tp.relnode,
1571 										 MAIN_FORKNUM));
1572 
1573 					relation = RelationIdGetRelation(reloid);
1574 
1575 					if (!RelationIsValid(relation))
1576 						elog(ERROR, "could not open relation with OID %u (for filenode \"%s\")",
1577 							 reloid,
1578 							 relpathperm(change->data.tp.relnode,
1579 										 MAIN_FORKNUM));
1580 
1581 					if (!RelationIsLogicallyLogged(relation))
1582 						goto change_done;
1583 
1584 					/*
1585 					 * For now ignore sequence changes entirely. Most of the
1586 					 * time they don't log changes using records we
1587 					 * understand, so it doesn't make sense to handle the few
1588 					 * cases we do.
1589 					 */
1590 					if (relation->rd_rel->relkind == RELKIND_SEQUENCE)
1591 						goto change_done;
1592 
1593 					/* user-triggered change */
1594 					if (!IsToastRelation(relation))
1595 					{
1596 						ReorderBufferToastReplace(rb, txn, relation, change);
1597 						rb->apply_change(rb, txn, relation, change);
1598 
1599 						/*
1600 						 * Only clear reassembled toast chunks if we're sure
1601 						 * they're not required anymore. The creator of the
1602 						 * tuple tells us.
1603 						 */
1604 						if (change->data.tp.clear_toast_afterwards)
1605 							ReorderBufferToastReset(rb, txn);
1606 					}
1607 					/* we're not interested in toast deletions */
1608 					else if (change->action == REORDER_BUFFER_CHANGE_INSERT)
1609 					{
1610 						/*
1611 						 * Need to reassemble the full toasted Datum in
1612 						 * memory, to ensure the chunks don't get reused till
1613 						 * we're done remove it from the list of this
1614 						 * transaction's changes. Otherwise it will get
1615 						 * freed/reused while restoring spooled data from
1616 						 * disk.
1617 						 */
1618 						Assert(change->data.tp.newtuple != NULL);
1619 
1620 						dlist_delete(&change->node);
1621 						ReorderBufferToastAppendChunk(rb, txn, relation,
1622 													  change);
1623 					}
1624 
1625 			change_done:
1626 
1627 					/*
1628 					 * If speculative insertion was confirmed, the record isn't
1629 					 * needed anymore.
1630 					 */
1631 					if (specinsert != NULL)
1632 					{
1633 						ReorderBufferReturnChange(rb, specinsert);
1634 						specinsert = NULL;
1635 					}
1636 
1637 					if (relation != NULL)
1638 					{
1639 						RelationClose(relation);
1640 						relation = NULL;
1641 					}
1642 					break;
1643 
1644 				case REORDER_BUFFER_CHANGE_INTERNAL_SPEC_INSERT:
1645 
1646 					/*
1647 					 * Speculative insertions are dealt with by delaying the
1648 					 * processing of the insert until the confirmation record
1649 					 * arrives. For that we simply unlink the record from the
1650 					 * chain, so it does not get freed/reused while restoring
1651 					 * spooled data from disk.
1652 					 *
1653 					 * This is safe in the face of concurrent catalog changes
1654 					 * because the relevant relation can't be changed between
1655 					 * speculative insertion and confirmation due to
1656 					 * CheckTableNotInUse() and locking.
1657 					 */
1658 
1659 					/* clear out a pending (and thus failed) speculation */
1660 					if (specinsert != NULL)
1661 					{
1662 						ReorderBufferReturnChange(rb, specinsert);
1663 						specinsert = NULL;
1664 					}
1665 
1666 					/* and memorize the pending insertion */
1667 					dlist_delete(&change->node);
1668 					specinsert = change;
1669 					break;
1670 
1671 				case REORDER_BUFFER_CHANGE_INTERNAL_SPEC_ABORT:
1672 
1673 					/*
1674 					 * Abort for speculative insertion arrived. So cleanup the
1675 					 * specinsert tuple and toast hash.
1676 					 *
1677 					 * Note that we get the spec abort change for each toast
1678 					 * entry but we need to perform the cleanup only the first
1679 					 * time we get it for the main table.
1680 					 */
1681 					if (specinsert != NULL)
1682 					{
1683 						/*
1684 						 * We must clean the toast hash before processing a
1685 						 * completely new tuple to avoid confusion about the
1686 						 * previous tuple's toast chunks.
1687 						 */
1688 						Assert(change->data.tp.clear_toast_afterwards);
1689 						ReorderBufferToastReset(rb, txn);
1690 
1691 						/* We don't need this record anymore. */
1692 						ReorderBufferReturnChange(rb, specinsert);
1693 						specinsert = NULL;
1694 					}
1695 					break;
1696 
1697 				case REORDER_BUFFER_CHANGE_MESSAGE:
1698 					rb->message(rb, txn, change->lsn, true,
1699 								change->data.msg.prefix,
1700 								change->data.msg.message_size,
1701 								change->data.msg.message);
1702 					break;
1703 
1704 				case REORDER_BUFFER_CHANGE_INTERNAL_SNAPSHOT:
1705 					/* get rid of the old */
1706 					TeardownHistoricSnapshot(false);
1707 
1708 					if (snapshot_now->copied)
1709 					{
1710 						ReorderBufferFreeSnap(rb, snapshot_now);
1711 						snapshot_now =
1712 							ReorderBufferCopySnap(rb, change->data.snapshot,
1713 												  txn, command_id);
1714 					}
1715 
1716 					/*
1717 					 * Restored from disk, need to be careful not to double
1718 					 * free. We could introduce refcounting for that, but for
1719 					 * now this seems infrequent enough not to care.
1720 					 */
1721 					else if (change->data.snapshot->copied)
1722 					{
1723 						snapshot_now =
1724 							ReorderBufferCopySnap(rb, change->data.snapshot,
1725 												  txn, command_id);
1726 					}
1727 					else
1728 					{
1729 						snapshot_now = change->data.snapshot;
1730 					}
1731 
1732 
1733 					/* and continue with the new one */
1734 					SetupHistoricSnapshot(snapshot_now, txn->tuplecid_hash);
1735 					break;
1736 
1737 				case REORDER_BUFFER_CHANGE_INTERNAL_COMMAND_ID:
1738 					Assert(change->data.command_id != InvalidCommandId);
1739 
1740 					if (command_id < change->data.command_id)
1741 					{
1742 						command_id = change->data.command_id;
1743 
1744 						if (!snapshot_now->copied)
1745 						{
1746 							/* we don't use the global one anymore */
1747 							snapshot_now = ReorderBufferCopySnap(rb, snapshot_now,
1748 																 txn, command_id);
1749 						}
1750 
1751 						snapshot_now->curcid = command_id;
1752 
1753 						TeardownHistoricSnapshot(false);
1754 						SetupHistoricSnapshot(snapshot_now, txn->tuplecid_hash);
1755 
1756 						/*
1757 						 * Every time the CommandId is incremented, we could
1758 						 * see new catalog contents, so execute all
1759 						 * invalidations.
1760 						 */
1761 						ReorderBufferExecuteInvalidations(rb, txn);
1762 					}
1763 
1764 					break;
1765 
1766 				case REORDER_BUFFER_CHANGE_INTERNAL_TUPLECID:
1767 					elog(ERROR, "tuplecid value in changequeue");
1768 					break;
1769 			}
1770 		}
1771 
1772 		/* speculative insertion record must be freed by now */
1773 		Assert(!specinsert);
1774 
1775 		/* clean up the iterator */
1776 		ReorderBufferIterTXNFinish(rb, iterstate);
1777 		iterstate = NULL;
1778 
1779 		/* call commit callback */
1780 		rb->commit(rb, txn, commit_lsn);
1781 
1782 		/* this is just a sanity check against bad output plugin behaviour */
1783 		if (GetCurrentTransactionIdIfAny() != InvalidTransactionId)
1784 			elog(ERROR, "output plugin used XID %u",
1785 				 GetCurrentTransactionId());
1786 
1787 		/* cleanup */
1788 		TeardownHistoricSnapshot(false);
1789 
1790 		/*
1791 		 * Aborting the current (sub-)transaction as a whole has the right
1792 		 * semantics. We want all locks acquired in here to be released, not
1793 		 * reassigned to the parent and we do not want any database access
1794 		 * have persistent effects.
1795 		 */
1796 		AbortCurrentTransaction();
1797 
1798 		/* make sure there's no cache pollution */
1799 		ReorderBufferExecuteInvalidations(rb, txn);
1800 
1801 		if (using_subtxn)
1802 			RollbackAndReleaseCurrentSubTransaction();
1803 
1804 		if (snapshot_now->copied)
1805 			ReorderBufferFreeSnap(rb, snapshot_now);
1806 
1807 		/* remove potential on-disk data, and deallocate */
1808 		ReorderBufferCleanupTXN(rb, txn);
1809 	}
1810 	PG_CATCH();
1811 	{
1812 		/* TODO: Encapsulate cleanup from the PG_TRY and PG_CATCH blocks */
1813 		if (iterstate)
1814 			ReorderBufferIterTXNFinish(rb, iterstate);
1815 
1816 		TeardownHistoricSnapshot(true);
1817 
1818 		/*
1819 		 * Force cache invalidation to happen outside of a valid transaction
1820 		 * to prevent catalog access as we just caught an error.
1821 		 */
1822 		AbortCurrentTransaction();
1823 
1824 		/* make sure there's no cache pollution */
1825 		ReorderBufferExecuteInvalidations(rb, txn);
1826 
1827 		if (using_subtxn)
1828 			RollbackAndReleaseCurrentSubTransaction();
1829 
1830 		if (snapshot_now->copied)
1831 			ReorderBufferFreeSnap(rb, snapshot_now);
1832 
1833 		/* remove potential on-disk data, and deallocate */
1834 		ReorderBufferCleanupTXN(rb, txn);
1835 
1836 		PG_RE_THROW();
1837 	}
1838 	PG_END_TRY();
1839 }
1840 
1841 /*
1842  * Abort a transaction that possibly has previous changes. Needs to be first
1843  * called for subtransactions and then for the toplevel xid.
1844  *
1845  * NB: Transactions handled here have to have actively aborted (i.e. have
1846  * produced an abort record). Implicitly aborted transactions are handled via
1847  * ReorderBufferAbortOld(); transactions we're just not interested in, but
1848  * which have committed are handled in ReorderBufferForget().
1849  *
1850  * This function purges this transaction and its contents from memory and
1851  * disk.
1852  */
1853 void
ReorderBufferAbort(ReorderBuffer * rb,TransactionId xid,XLogRecPtr lsn)1854 ReorderBufferAbort(ReorderBuffer *rb, TransactionId xid, XLogRecPtr lsn)
1855 {
1856 	ReorderBufferTXN *txn;
1857 
1858 	txn = ReorderBufferTXNByXid(rb, xid, false, NULL, InvalidXLogRecPtr,
1859 								false);
1860 
1861 	/* unknown, nothing to remove */
1862 	if (txn == NULL)
1863 		return;
1864 
1865 	/* cosmetic... */
1866 	txn->final_lsn = lsn;
1867 
1868 	/* remove potential on-disk data, and deallocate */
1869 	ReorderBufferCleanupTXN(rb, txn);
1870 }
1871 
1872 /*
1873  * Abort all transactions that aren't actually running anymore because the
1874  * server restarted.
1875  *
1876  * NB: These really have to be transactions that have aborted due to a server
1877  * crash/immediate restart, as we don't deal with invalidations here.
1878  */
1879 void
ReorderBufferAbortOld(ReorderBuffer * rb,TransactionId oldestRunningXid)1880 ReorderBufferAbortOld(ReorderBuffer *rb, TransactionId oldestRunningXid)
1881 {
1882 	dlist_mutable_iter it;
1883 
1884 	/*
1885 	 * Iterate through all (potential) toplevel TXNs and abort all that are
1886 	 * older than what possibly can be running. Once we've found the first
1887 	 * that is alive we stop, there might be some that acquired an xid earlier
1888 	 * but started writing later, but it's unlikely and they will be cleaned
1889 	 * up in a later call to this function.
1890 	 */
1891 	dlist_foreach_modify(it, &rb->toplevel_by_lsn)
1892 	{
1893 		ReorderBufferTXN *txn;
1894 
1895 		txn = dlist_container(ReorderBufferTXN, node, it.cur);
1896 
1897 		if (TransactionIdPrecedes(txn->xid, oldestRunningXid))
1898 		{
1899 			elog(DEBUG2, "aborting old transaction %u", txn->xid);
1900 
1901 			/* remove potential on-disk data, and deallocate this tx */
1902 			ReorderBufferCleanupTXN(rb, txn);
1903 		}
1904 		else
1905 			return;
1906 	}
1907 }
1908 
1909 /*
1910  * Forget the contents of a transaction if we aren't interested in it's
1911  * contents. Needs to be first called for subtransactions and then for the
1912  * toplevel xid.
1913  *
1914  * This is significantly different to ReorderBufferAbort() because
1915  * transactions that have committed need to be treated differently from aborted
1916  * ones since they may have modified the catalog.
1917  *
1918  * Note that this is only allowed to be called in the moment a transaction
1919  * commit has just been read, not earlier; otherwise later records referring
1920  * to this xid might re-create the transaction incompletely.
1921  */
1922 void
ReorderBufferForget(ReorderBuffer * rb,TransactionId xid,XLogRecPtr lsn)1923 ReorderBufferForget(ReorderBuffer *rb, TransactionId xid, XLogRecPtr lsn)
1924 {
1925 	ReorderBufferTXN *txn;
1926 
1927 	txn = ReorderBufferTXNByXid(rb, xid, false, NULL, InvalidXLogRecPtr,
1928 								false);
1929 
1930 	/* unknown, nothing to forget */
1931 	if (txn == NULL)
1932 		return;
1933 
1934 	/* cosmetic... */
1935 	txn->final_lsn = lsn;
1936 
1937 	/*
1938 	 * Process cache invalidation messages if there are any. Even if we're not
1939 	 * interested in the transaction's contents, it could have manipulated the
1940 	 * catalog and we need to update the caches according to that.
1941 	 */
1942 	if (txn->base_snapshot != NULL && txn->ninvalidations > 0)
1943 		ReorderBufferImmediateInvalidation(rb, txn->ninvalidations,
1944 										   txn->invalidations);
1945 	else
1946 		Assert(txn->ninvalidations == 0);
1947 
1948 	/* remove potential on-disk data, and deallocate */
1949 	ReorderBufferCleanupTXN(rb, txn);
1950 }
1951 
1952 /*
1953  * Execute invalidations happening outside the context of a decoded
1954  * transaction. That currently happens either for xid-less commits
1955  * (c.f. RecordTransactionCommit()) or for invalidations in uninteresting
1956  * transactions (via ReorderBufferForget()).
1957  */
1958 void
ReorderBufferImmediateInvalidation(ReorderBuffer * rb,uint32 ninvalidations,SharedInvalidationMessage * invalidations)1959 ReorderBufferImmediateInvalidation(ReorderBuffer *rb, uint32 ninvalidations,
1960 								   SharedInvalidationMessage *invalidations)
1961 {
1962 	bool		use_subtxn = IsTransactionOrTransactionBlock();
1963 	int			i;
1964 
1965 	if (use_subtxn)
1966 		BeginInternalSubTransaction("replay");
1967 
1968 	/*
1969 	 * Force invalidations to happen outside of a valid transaction - that way
1970 	 * entries will just be marked as invalid without accessing the catalog.
1971 	 * That's advantageous because we don't need to setup the full state
1972 	 * necessary for catalog access.
1973 	 */
1974 	if (use_subtxn)
1975 		AbortCurrentTransaction();
1976 
1977 	for (i = 0; i < ninvalidations; i++)
1978 		LocalExecuteInvalidationMessage(&invalidations[i]);
1979 
1980 	if (use_subtxn)
1981 		RollbackAndReleaseCurrentSubTransaction();
1982 }
1983 
1984 /*
1985  * Tell reorderbuffer about an xid seen in the WAL stream. Has to be called at
1986  * least once for every xid in XLogRecord->xl_xid (other places in records
1987  * may, but do not have to be passed through here).
1988  *
1989  * Reorderbuffer keeps some datastructures about transactions in LSN order,
1990  * for efficiency. To do that it has to know about when transactions are seen
1991  * first in the WAL. As many types of records are not actually interesting for
1992  * logical decoding, they do not necessarily pass though here.
1993  */
1994 void
ReorderBufferProcessXid(ReorderBuffer * rb,TransactionId xid,XLogRecPtr lsn)1995 ReorderBufferProcessXid(ReorderBuffer *rb, TransactionId xid, XLogRecPtr lsn)
1996 {
1997 	/* many records won't have an xid assigned, centralize check here */
1998 	if (xid != InvalidTransactionId)
1999 		ReorderBufferTXNByXid(rb, xid, true, NULL, lsn, true);
2000 }
2001 
2002 /*
2003  * Add a new snapshot to this transaction that may only used after lsn 'lsn'
2004  * because the previous snapshot doesn't describe the catalog correctly for
2005  * following rows.
2006  */
2007 void
ReorderBufferAddSnapshot(ReorderBuffer * rb,TransactionId xid,XLogRecPtr lsn,Snapshot snap)2008 ReorderBufferAddSnapshot(ReorderBuffer *rb, TransactionId xid,
2009 						 XLogRecPtr lsn, Snapshot snap)
2010 {
2011 	ReorderBufferChange *change = ReorderBufferGetChange(rb);
2012 
2013 	change->data.snapshot = snap;
2014 	change->action = REORDER_BUFFER_CHANGE_INTERNAL_SNAPSHOT;
2015 
2016 	ReorderBufferQueueChange(rb, xid, lsn, change);
2017 }
2018 
2019 /*
2020  * Set up the transaction's base snapshot.
2021  *
2022  * If we know that xid is a subtransaction, set the base snapshot on the
2023  * top-level transaction instead.
2024  */
2025 void
ReorderBufferSetBaseSnapshot(ReorderBuffer * rb,TransactionId xid,XLogRecPtr lsn,Snapshot snap)2026 ReorderBufferSetBaseSnapshot(ReorderBuffer *rb, TransactionId xid,
2027 							 XLogRecPtr lsn, Snapshot snap)
2028 {
2029 	ReorderBufferTXN *txn;
2030 	bool		is_new;
2031 
2032 	AssertArg(snap != NULL);
2033 
2034 	/*
2035 	 * Fetch the transaction to operate on.  If we know it's a subtransaction,
2036 	 * operate on its top-level transaction instead.
2037 	 */
2038 	txn = ReorderBufferTXNByXid(rb, xid, true, &is_new, lsn, true);
2039 	if (txn->is_known_as_subxact)
2040 		txn = ReorderBufferTXNByXid(rb, txn->toplevel_xid, false,
2041 									NULL, InvalidXLogRecPtr, false);
2042 	Assert(txn->base_snapshot == NULL);
2043 
2044 	txn->base_snapshot = snap;
2045 	txn->base_snapshot_lsn = lsn;
2046 	dlist_push_tail(&rb->txns_by_base_snapshot_lsn, &txn->base_snapshot_node);
2047 
2048 	AssertTXNLsnOrder(rb);
2049 }
2050 
2051 /*
2052  * Access the catalog with this CommandId at this point in the changestream.
2053  *
2054  * May only be called for command ids > 1
2055  */
2056 void
ReorderBufferAddNewCommandId(ReorderBuffer * rb,TransactionId xid,XLogRecPtr lsn,CommandId cid)2057 ReorderBufferAddNewCommandId(ReorderBuffer *rb, TransactionId xid,
2058 							 XLogRecPtr lsn, CommandId cid)
2059 {
2060 	ReorderBufferChange *change = ReorderBufferGetChange(rb);
2061 
2062 	change->data.command_id = cid;
2063 	change->action = REORDER_BUFFER_CHANGE_INTERNAL_COMMAND_ID;
2064 
2065 	ReorderBufferQueueChange(rb, xid, lsn, change);
2066 }
2067 
2068 
2069 /*
2070  * Add new (relfilenode, tid) -> (cmin, cmax) mappings.
2071  */
2072 void
ReorderBufferAddNewTupleCids(ReorderBuffer * rb,TransactionId xid,XLogRecPtr lsn,RelFileNode node,ItemPointerData tid,CommandId cmin,CommandId cmax,CommandId combocid)2073 ReorderBufferAddNewTupleCids(ReorderBuffer *rb, TransactionId xid,
2074 							 XLogRecPtr lsn, RelFileNode node,
2075 							 ItemPointerData tid, CommandId cmin,
2076 							 CommandId cmax, CommandId combocid)
2077 {
2078 	ReorderBufferChange *change = ReorderBufferGetChange(rb);
2079 	ReorderBufferTXN *txn;
2080 
2081 	txn = ReorderBufferTXNByXid(rb, xid, true, NULL, lsn, true);
2082 
2083 	change->data.tuplecid.node = node;
2084 	change->data.tuplecid.tid = tid;
2085 	change->data.tuplecid.cmin = cmin;
2086 	change->data.tuplecid.cmax = cmax;
2087 	change->data.tuplecid.combocid = combocid;
2088 	change->lsn = lsn;
2089 	change->action = REORDER_BUFFER_CHANGE_INTERNAL_TUPLECID;
2090 
2091 	dlist_push_tail(&txn->tuplecids, &change->node);
2092 	txn->ntuplecids++;
2093 }
2094 
2095 /*
2096  * Setup the invalidation of the toplevel transaction.
2097  *
2098  * This needs to be done before ReorderBufferCommit is called!
2099  */
2100 void
ReorderBufferAddInvalidations(ReorderBuffer * rb,TransactionId xid,XLogRecPtr lsn,Size nmsgs,SharedInvalidationMessage * msgs)2101 ReorderBufferAddInvalidations(ReorderBuffer *rb, TransactionId xid,
2102 							  XLogRecPtr lsn, Size nmsgs,
2103 							  SharedInvalidationMessage *msgs)
2104 {
2105 	ReorderBufferTXN *txn;
2106 
2107 	txn = ReorderBufferTXNByXid(rb, xid, true, NULL, lsn, true);
2108 
2109 	if (txn->ninvalidations != 0)
2110 		elog(ERROR, "only ever add one set of invalidations");
2111 
2112 	Assert(nmsgs > 0);
2113 
2114 	txn->ninvalidations = nmsgs;
2115 	txn->invalidations = (SharedInvalidationMessage *)
2116 		MemoryContextAlloc(rb->context,
2117 						   sizeof(SharedInvalidationMessage) * nmsgs);
2118 	memcpy(txn->invalidations, msgs,
2119 		   sizeof(SharedInvalidationMessage) * nmsgs);
2120 }
2121 
2122 /*
2123  * Apply all invalidations we know. Possibly we only need parts at this point
2124  * in the changestream but we don't know which those are.
2125  */
2126 static void
ReorderBufferExecuteInvalidations(ReorderBuffer * rb,ReorderBufferTXN * txn)2127 ReorderBufferExecuteInvalidations(ReorderBuffer *rb, ReorderBufferTXN *txn)
2128 {
2129 	int			i;
2130 
2131 	for (i = 0; i < txn->ninvalidations; i++)
2132 		LocalExecuteInvalidationMessage(&txn->invalidations[i]);
2133 }
2134 
2135 /*
2136  * Mark a transaction as containing catalog changes
2137  */
2138 void
ReorderBufferXidSetCatalogChanges(ReorderBuffer * rb,TransactionId xid,XLogRecPtr lsn)2139 ReorderBufferXidSetCatalogChanges(ReorderBuffer *rb, TransactionId xid,
2140 								  XLogRecPtr lsn)
2141 {
2142 	ReorderBufferTXN *txn;
2143 
2144 	txn = ReorderBufferTXNByXid(rb, xid, true, NULL, lsn, true);
2145 
2146 	txn->has_catalog_changes = true;
2147 }
2148 
2149 /*
2150  * Query whether a transaction is already *known* to contain catalog
2151  * changes. This can be wrong until directly before the commit!
2152  */
2153 bool
ReorderBufferXidHasCatalogChanges(ReorderBuffer * rb,TransactionId xid)2154 ReorderBufferXidHasCatalogChanges(ReorderBuffer *rb, TransactionId xid)
2155 {
2156 	ReorderBufferTXN *txn;
2157 
2158 	txn = ReorderBufferTXNByXid(rb, xid, false, NULL, InvalidXLogRecPtr,
2159 								false);
2160 	if (txn == NULL)
2161 		return false;
2162 
2163 	return txn->has_catalog_changes;
2164 }
2165 
2166 /*
2167  * ReorderBufferXidHasBaseSnapshot
2168  *		Have we already set the base snapshot for the given txn/subtxn?
2169  */
2170 bool
ReorderBufferXidHasBaseSnapshot(ReorderBuffer * rb,TransactionId xid)2171 ReorderBufferXidHasBaseSnapshot(ReorderBuffer *rb, TransactionId xid)
2172 {
2173 	ReorderBufferTXN *txn;
2174 
2175 	txn = ReorderBufferTXNByXid(rb, xid, false,
2176 								NULL, InvalidXLogRecPtr, false);
2177 
2178 	/* transaction isn't known yet, ergo no snapshot */
2179 	if (txn == NULL)
2180 		return false;
2181 
2182 	/* a known subtxn? operate on top-level txn instead */
2183 	if (txn->is_known_as_subxact)
2184 		txn = ReorderBufferTXNByXid(rb, txn->toplevel_xid, false,
2185 									NULL, InvalidXLogRecPtr, false);
2186 
2187 	return txn->base_snapshot != NULL;
2188 }
2189 
2190 
2191 /*
2192  * ---------------------------------------
2193  * Disk serialization support
2194  * ---------------------------------------
2195  */
2196 
2197 /*
2198  * Ensure the IO buffer is >= sz.
2199  */
2200 static void
ReorderBufferSerializeReserve(ReorderBuffer * rb,Size sz)2201 ReorderBufferSerializeReserve(ReorderBuffer *rb, Size sz)
2202 {
2203 	if (!rb->outbufsize)
2204 	{
2205 		rb->outbuf = MemoryContextAlloc(rb->context, sz);
2206 		rb->outbufsize = sz;
2207 	}
2208 	else if (rb->outbufsize < sz)
2209 	{
2210 		rb->outbuf = repalloc(rb->outbuf, sz);
2211 		rb->outbufsize = sz;
2212 	}
2213 }
2214 
2215 /*
2216  * Check whether the transaction tx should spill its data to disk.
2217  */
2218 static void
ReorderBufferCheckSerializeTXN(ReorderBuffer * rb,ReorderBufferTXN * txn)2219 ReorderBufferCheckSerializeTXN(ReorderBuffer *rb, ReorderBufferTXN *txn)
2220 {
2221 	/*
2222 	 * TODO: improve accounting so we cheaply can take subtransactions into
2223 	 * account here.
2224 	 */
2225 	if (txn->nentries_mem >= max_changes_in_memory)
2226 	{
2227 		ReorderBufferSerializeTXN(rb, txn);
2228 		Assert(txn->nentries_mem == 0);
2229 	}
2230 }
2231 
2232 /*
2233  * Spill data of a large transaction (and its subtransactions) to disk.
2234  */
2235 static void
ReorderBufferSerializeTXN(ReorderBuffer * rb,ReorderBufferTXN * txn)2236 ReorderBufferSerializeTXN(ReorderBuffer *rb, ReorderBufferTXN *txn)
2237 {
2238 	dlist_iter	subtxn_i;
2239 	dlist_mutable_iter change_i;
2240 	int			fd = -1;
2241 	XLogSegNo	curOpenSegNo = 0;
2242 	Size		spilled = 0;
2243 
2244 	elog(DEBUG2, "spill %u changes in XID %u to disk",
2245 		 (uint32) txn->nentries_mem, txn->xid);
2246 
2247 	/* do the same to all child TXs */
2248 	dlist_foreach(subtxn_i, &txn->subtxns)
2249 	{
2250 		ReorderBufferTXN *subtxn;
2251 
2252 		subtxn = dlist_container(ReorderBufferTXN, node, subtxn_i.cur);
2253 		ReorderBufferSerializeTXN(rb, subtxn);
2254 	}
2255 
2256 	/* serialize changestream */
2257 	dlist_foreach_modify(change_i, &txn->changes)
2258 	{
2259 		ReorderBufferChange *change;
2260 
2261 		change = dlist_container(ReorderBufferChange, node, change_i.cur);
2262 
2263 		/*
2264 		 * store in segment in which it belongs by start lsn, don't split over
2265 		 * multiple segments tho
2266 		 */
2267 		if (fd == -1 || !XLByteInSeg(change->lsn, curOpenSegNo))
2268 		{
2269 			char		path[MAXPGPATH];
2270 
2271 			if (fd != -1)
2272 				CloseTransientFile(fd);
2273 
2274 			XLByteToSeg(change->lsn, curOpenSegNo);
2275 
2276 			/*
2277 			 * No need to care about TLIs here, only used during a single run,
2278 			 * so each LSN only maps to a specific WAL record.
2279 			 */
2280 			ReorderBufferSerializedPath(path, MyReplicationSlot, txn->xid,
2281 										curOpenSegNo);
2282 
2283 			/* open segment, create it if necessary */
2284 			fd = OpenTransientFile(path,
2285 								   O_CREAT | O_WRONLY | O_APPEND | PG_BINARY,
2286 								   S_IRUSR | S_IWUSR);
2287 
2288 			if (fd < 0)
2289 				ereport(ERROR,
2290 						(errcode_for_file_access(),
2291 						 errmsg("could not open file \"%s\": %m", path)));
2292 		}
2293 
2294 		ReorderBufferSerializeChange(rb, txn, fd, change);
2295 		dlist_delete(&change->node);
2296 		ReorderBufferReturnChange(rb, change);
2297 
2298 		spilled++;
2299 	}
2300 
2301 	Assert(spilled == txn->nentries_mem);
2302 	Assert(dlist_is_empty(&txn->changes));
2303 	txn->nentries_mem = 0;
2304 	txn->serialized = true;
2305 
2306 	if (fd != -1)
2307 		CloseTransientFile(fd);
2308 }
2309 
2310 /*
2311  * Serialize individual change to disk.
2312  */
2313 static void
ReorderBufferSerializeChange(ReorderBuffer * rb,ReorderBufferTXN * txn,int fd,ReorderBufferChange * change)2314 ReorderBufferSerializeChange(ReorderBuffer *rb, ReorderBufferTXN *txn,
2315 							 int fd, ReorderBufferChange *change)
2316 {
2317 	ReorderBufferDiskChange *ondisk;
2318 	Size		sz = sizeof(ReorderBufferDiskChange);
2319 
2320 	ReorderBufferSerializeReserve(rb, sz);
2321 
2322 	ondisk = (ReorderBufferDiskChange *) rb->outbuf;
2323 	memcpy(&ondisk->change, change, sizeof(ReorderBufferChange));
2324 
2325 	switch (change->action)
2326 	{
2327 			/* fall through these, they're all similar enough */
2328 		case REORDER_BUFFER_CHANGE_INSERT:
2329 		case REORDER_BUFFER_CHANGE_UPDATE:
2330 		case REORDER_BUFFER_CHANGE_DELETE:
2331 		case REORDER_BUFFER_CHANGE_INTERNAL_SPEC_INSERT:
2332 			{
2333 				char	   *data;
2334 				ReorderBufferTupleBuf *oldtup,
2335 						   *newtup;
2336 				Size		oldlen = 0;
2337 				Size		newlen = 0;
2338 
2339 				oldtup = change->data.tp.oldtuple;
2340 				newtup = change->data.tp.newtuple;
2341 
2342 				if (oldtup)
2343 				{
2344 					sz += sizeof(HeapTupleData);
2345 					oldlen = oldtup->tuple.t_len;
2346 					sz += oldlen;
2347 				}
2348 
2349 				if (newtup)
2350 				{
2351 					sz += sizeof(HeapTupleData);
2352 					newlen = newtup->tuple.t_len;
2353 					sz += newlen;
2354 				}
2355 
2356 				/* make sure we have enough space */
2357 				ReorderBufferSerializeReserve(rb, sz);
2358 
2359 				data = ((char *) rb->outbuf) + sizeof(ReorderBufferDiskChange);
2360 				/* might have been reallocated above */
2361 				ondisk = (ReorderBufferDiskChange *) rb->outbuf;
2362 
2363 				if (oldlen)
2364 				{
2365 					memcpy(data, &oldtup->tuple, sizeof(HeapTupleData));
2366 					data += sizeof(HeapTupleData);
2367 
2368 					memcpy(data, oldtup->tuple.t_data, oldlen);
2369 					data += oldlen;
2370 				}
2371 
2372 				if (newlen)
2373 				{
2374 					memcpy(data, &newtup->tuple, sizeof(HeapTupleData));
2375 					data += sizeof(HeapTupleData);
2376 
2377 					memcpy(data, newtup->tuple.t_data, newlen);
2378 					data += newlen;
2379 				}
2380 				break;
2381 			}
2382 		case REORDER_BUFFER_CHANGE_MESSAGE:
2383 			{
2384 				char	   *data;
2385 				Size		prefix_size = strlen(change->data.msg.prefix) + 1;
2386 
2387 				sz += prefix_size + change->data.msg.message_size +
2388 					sizeof(Size) + sizeof(Size);
2389 				ReorderBufferSerializeReserve(rb, sz);
2390 
2391 				data = ((char *) rb->outbuf) + sizeof(ReorderBufferDiskChange);
2392 
2393 				/* might have been reallocated above */
2394 				ondisk = (ReorderBufferDiskChange *) rb->outbuf;
2395 
2396 				/* write the prefix including the size */
2397 				memcpy(data, &prefix_size, sizeof(Size));
2398 				data += sizeof(Size);
2399 				memcpy(data, change->data.msg.prefix,
2400 					   prefix_size);
2401 				data += prefix_size;
2402 
2403 				/* write the message including the size */
2404 				memcpy(data, &change->data.msg.message_size, sizeof(Size));
2405 				data += sizeof(Size);
2406 				memcpy(data, change->data.msg.message,
2407 					   change->data.msg.message_size);
2408 				data += change->data.msg.message_size;
2409 
2410 				break;
2411 			}
2412 		case REORDER_BUFFER_CHANGE_INTERNAL_SNAPSHOT:
2413 			{
2414 				Snapshot	snap;
2415 				char	   *data;
2416 
2417 				snap = change->data.snapshot;
2418 
2419 				sz += sizeof(SnapshotData) +
2420 					sizeof(TransactionId) * snap->xcnt +
2421 					sizeof(TransactionId) * snap->subxcnt;
2422 
2423 				/* make sure we have enough space */
2424 				ReorderBufferSerializeReserve(rb, sz);
2425 				data = ((char *) rb->outbuf) + sizeof(ReorderBufferDiskChange);
2426 				/* might have been reallocated above */
2427 				ondisk = (ReorderBufferDiskChange *) rb->outbuf;
2428 
2429 				memcpy(data, snap, sizeof(SnapshotData));
2430 				data += sizeof(SnapshotData);
2431 
2432 				if (snap->xcnt)
2433 				{
2434 					memcpy(data, snap->xip,
2435 						   sizeof(TransactionId) * snap->xcnt);
2436 					data += sizeof(TransactionId) * snap->xcnt;
2437 				}
2438 
2439 				if (snap->subxcnt)
2440 				{
2441 					memcpy(data, snap->subxip,
2442 						   sizeof(TransactionId) * snap->subxcnt);
2443 					data += sizeof(TransactionId) * snap->subxcnt;
2444 				}
2445 				break;
2446 			}
2447 		case REORDER_BUFFER_CHANGE_INTERNAL_SPEC_CONFIRM:
2448 		case REORDER_BUFFER_CHANGE_INTERNAL_SPEC_ABORT:
2449 		case REORDER_BUFFER_CHANGE_INTERNAL_COMMAND_ID:
2450 		case REORDER_BUFFER_CHANGE_INTERNAL_TUPLECID:
2451 			/* ReorderBufferChange contains everything important */
2452 			break;
2453 	}
2454 
2455 	ondisk->size = sz;
2456 
2457 	errno = 0;
2458 	pgstat_report_wait_start(WAIT_EVENT_REORDER_BUFFER_WRITE);
2459 	if (write(fd, rb->outbuf, ondisk->size) != ondisk->size)
2460 	{
2461 		int			save_errno = errno;
2462 
2463 		CloseTransientFile(fd);
2464 
2465 		/* if write didn't set errno, assume problem is no disk space */
2466 		errno = save_errno ? save_errno : ENOSPC;
2467 		ereport(ERROR,
2468 				(errcode_for_file_access(),
2469 				 errmsg("could not write to data file for XID %u: %m",
2470 						txn->xid)));
2471 	}
2472 	pgstat_report_wait_end();
2473 
2474 	/*
2475 	 * Keep the transaction's final_lsn up to date with each change we send to
2476 	 * disk, so that ReorderBufferRestoreCleanup works correctly.  (We used to
2477 	 * only do this on commit and abort records, but that doesn't work if a
2478 	 * system crash leaves a transaction without its abort record).
2479 	 *
2480 	 * Make sure not to move it backwards.
2481 	 */
2482 	if (txn->final_lsn < change->lsn)
2483 		txn->final_lsn = change->lsn;
2484 
2485 	Assert(ondisk->change.action == change->action);
2486 }
2487 
2488 /*
2489  * Restore a number of changes spilled to disk back into memory.
2490  */
2491 static Size
ReorderBufferRestoreChanges(ReorderBuffer * rb,ReorderBufferTXN * txn,File * fd,XLogSegNo * segno)2492 ReorderBufferRestoreChanges(ReorderBuffer *rb, ReorderBufferTXN *txn,
2493 							File *fd, XLogSegNo *segno)
2494 {
2495 	Size		restored = 0;
2496 	XLogSegNo	last_segno;
2497 	dlist_mutable_iter cleanup_iter;
2498 
2499 	Assert(txn->first_lsn != InvalidXLogRecPtr);
2500 	Assert(txn->final_lsn != InvalidXLogRecPtr);
2501 
2502 	/* free current entries, so we have memory for more */
2503 	dlist_foreach_modify(cleanup_iter, &txn->changes)
2504 	{
2505 		ReorderBufferChange *cleanup =
2506 		dlist_container(ReorderBufferChange, node, cleanup_iter.cur);
2507 
2508 		dlist_delete(&cleanup->node);
2509 		ReorderBufferReturnChange(rb, cleanup);
2510 	}
2511 	txn->nentries_mem = 0;
2512 	Assert(dlist_is_empty(&txn->changes));
2513 
2514 	XLByteToSeg(txn->final_lsn, last_segno);
2515 
2516 	while (restored < max_changes_in_memory && *segno <= last_segno)
2517 	{
2518 		int			readBytes;
2519 		ReorderBufferDiskChange *ondisk;
2520 
2521 		if (*fd == -1)
2522 		{
2523 			char		path[MAXPGPATH];
2524 
2525 			/* first time in */
2526 			if (*segno == 0)
2527 				XLByteToSeg(txn->first_lsn, *segno);
2528 
2529 			Assert(*segno != 0 || dlist_is_empty(&txn->changes));
2530 
2531 			/*
2532 			 * No need to care about TLIs here, only used during a single run,
2533 			 * so each LSN only maps to a specific WAL record.
2534 			 */
2535 			ReorderBufferSerializedPath(path, MyReplicationSlot, txn->xid,
2536 										*segno);
2537 
2538 			*fd = PathNameOpenFile(path, O_RDONLY | PG_BINARY, 0);
2539 			if (*fd < 0 && errno == ENOENT)
2540 			{
2541 				*fd = -1;
2542 				(*segno)++;
2543 				continue;
2544 			}
2545 			else if (*fd < 0)
2546 				ereport(ERROR,
2547 						(errcode_for_file_access(),
2548 						 errmsg("could not open file \"%s\": %m",
2549 								path)));
2550 
2551 		}
2552 
2553 		/*
2554 		 * Read the statically sized part of a change which has information
2555 		 * about the total size. If we couldn't read a record, we're at the
2556 		 * end of this file.
2557 		 */
2558 		ReorderBufferSerializeReserve(rb, sizeof(ReorderBufferDiskChange));
2559 		readBytes = FileRead(*fd, rb->outbuf, sizeof(ReorderBufferDiskChange),
2560 							 WAIT_EVENT_REORDER_BUFFER_READ);
2561 
2562 		/* eof */
2563 		if (readBytes == 0)
2564 		{
2565 			FileClose(*fd);
2566 			*fd = -1;
2567 			(*segno)++;
2568 			continue;
2569 		}
2570 		else if (readBytes < 0)
2571 			ereport(ERROR,
2572 					(errcode_for_file_access(),
2573 					 errmsg("could not read from reorderbuffer spill file: %m")));
2574 		else if (readBytes != sizeof(ReorderBufferDiskChange))
2575 			ereport(ERROR,
2576 					(errcode_for_file_access(),
2577 					 errmsg("could not read from reorderbuffer spill file: read %d instead of %u bytes",
2578 							readBytes,
2579 							(uint32) sizeof(ReorderBufferDiskChange))));
2580 
2581 		ondisk = (ReorderBufferDiskChange *) rb->outbuf;
2582 
2583 		ReorderBufferSerializeReserve(rb,
2584 									  sizeof(ReorderBufferDiskChange) + ondisk->size);
2585 		ondisk = (ReorderBufferDiskChange *) rb->outbuf;
2586 
2587 		readBytes = FileRead(*fd,
2588 							 rb->outbuf + sizeof(ReorderBufferDiskChange),
2589 							 ondisk->size - sizeof(ReorderBufferDiskChange),
2590 							 WAIT_EVENT_REORDER_BUFFER_READ);
2591 
2592 		if (readBytes < 0)
2593 			ereport(ERROR,
2594 					(errcode_for_file_access(),
2595 					 errmsg("could not read from reorderbuffer spill file: %m")));
2596 		else if (readBytes != ondisk->size - sizeof(ReorderBufferDiskChange))
2597 			ereport(ERROR,
2598 					(errcode_for_file_access(),
2599 					 errmsg("could not read from reorderbuffer spill file: read %d instead of %u bytes",
2600 							readBytes,
2601 							(uint32) (ondisk->size - sizeof(ReorderBufferDiskChange)))));
2602 
2603 		/*
2604 		 * ok, read a full change from disk, now restore it into proper
2605 		 * in-memory format
2606 		 */
2607 		ReorderBufferRestoreChange(rb, txn, rb->outbuf);
2608 		restored++;
2609 	}
2610 
2611 	return restored;
2612 }
2613 
2614 /*
2615  * Convert change from its on-disk format to in-memory format and queue it onto
2616  * the TXN's ->changes list.
2617  *
2618  * Note: although "data" is declared char*, at entry it points to a
2619  * maxalign'd buffer, making it safe in most of this function to assume
2620  * that the pointed-to data is suitably aligned for direct access.
2621  */
2622 static void
ReorderBufferRestoreChange(ReorderBuffer * rb,ReorderBufferTXN * txn,char * data)2623 ReorderBufferRestoreChange(ReorderBuffer *rb, ReorderBufferTXN *txn,
2624 						   char *data)
2625 {
2626 	ReorderBufferDiskChange *ondisk;
2627 	ReorderBufferChange *change;
2628 
2629 	ondisk = (ReorderBufferDiskChange *) data;
2630 
2631 	change = ReorderBufferGetChange(rb);
2632 
2633 	/* copy static part */
2634 	memcpy(change, &ondisk->change, sizeof(ReorderBufferChange));
2635 
2636 	data += sizeof(ReorderBufferDiskChange);
2637 
2638 	/* restore individual stuff */
2639 	switch (change->action)
2640 	{
2641 			/* fall through these, they're all similar enough */
2642 		case REORDER_BUFFER_CHANGE_INSERT:
2643 		case REORDER_BUFFER_CHANGE_UPDATE:
2644 		case REORDER_BUFFER_CHANGE_DELETE:
2645 		case REORDER_BUFFER_CHANGE_INTERNAL_SPEC_INSERT:
2646 			if (change->data.tp.oldtuple)
2647 			{
2648 				uint32		tuplelen = ((HeapTuple) data)->t_len;
2649 
2650 				change->data.tp.oldtuple =
2651 					ReorderBufferGetTupleBuf(rb, tuplelen - SizeofHeapTupleHeader);
2652 
2653 				/* restore ->tuple */
2654 				memcpy(&change->data.tp.oldtuple->tuple, data,
2655 					   sizeof(HeapTupleData));
2656 				data += sizeof(HeapTupleData);
2657 
2658 				/* reset t_data pointer into the new tuplebuf */
2659 				change->data.tp.oldtuple->tuple.t_data =
2660 					ReorderBufferTupleBufData(change->data.tp.oldtuple);
2661 
2662 				/* restore tuple data itself */
2663 				memcpy(change->data.tp.oldtuple->tuple.t_data, data, tuplelen);
2664 				data += tuplelen;
2665 			}
2666 
2667 			if (change->data.tp.newtuple)
2668 			{
2669 				/* here, data might not be suitably aligned! */
2670 				uint32		tuplelen;
2671 
2672 				memcpy(&tuplelen, data + offsetof(HeapTupleData, t_len),
2673 					   sizeof(uint32));
2674 
2675 				change->data.tp.newtuple =
2676 					ReorderBufferGetTupleBuf(rb, tuplelen - SizeofHeapTupleHeader);
2677 
2678 				/* restore ->tuple */
2679 				memcpy(&change->data.tp.newtuple->tuple, data,
2680 					   sizeof(HeapTupleData));
2681 				data += sizeof(HeapTupleData);
2682 
2683 				/* reset t_data pointer into the new tuplebuf */
2684 				change->data.tp.newtuple->tuple.t_data =
2685 					ReorderBufferTupleBufData(change->data.tp.newtuple);
2686 
2687 				/* restore tuple data itself */
2688 				memcpy(change->data.tp.newtuple->tuple.t_data, data, tuplelen);
2689 				data += tuplelen;
2690 			}
2691 
2692 			break;
2693 		case REORDER_BUFFER_CHANGE_MESSAGE:
2694 			{
2695 				Size		prefix_size;
2696 
2697 				/* read prefix */
2698 				memcpy(&prefix_size, data, sizeof(Size));
2699 				data += sizeof(Size);
2700 				change->data.msg.prefix = MemoryContextAlloc(rb->context,
2701 															 prefix_size);
2702 				memcpy(change->data.msg.prefix, data, prefix_size);
2703 				Assert(change->data.msg.prefix[prefix_size - 1] == '\0');
2704 				data += prefix_size;
2705 
2706 				/* read the message */
2707 				memcpy(&change->data.msg.message_size, data, sizeof(Size));
2708 				data += sizeof(Size);
2709 				change->data.msg.message = MemoryContextAlloc(rb->context,
2710 															  change->data.msg.message_size);
2711 				memcpy(change->data.msg.message, data,
2712 					   change->data.msg.message_size);
2713 				data += change->data.msg.message_size;
2714 
2715 				break;
2716 			}
2717 		case REORDER_BUFFER_CHANGE_INTERNAL_SNAPSHOT:
2718 			{
2719 				Snapshot	oldsnap;
2720 				Snapshot	newsnap;
2721 				Size		size;
2722 
2723 				oldsnap = (Snapshot) data;
2724 
2725 				size = sizeof(SnapshotData) +
2726 					sizeof(TransactionId) * oldsnap->xcnt +
2727 					sizeof(TransactionId) * (oldsnap->subxcnt + 0);
2728 
2729 				change->data.snapshot = MemoryContextAllocZero(rb->context, size);
2730 
2731 				newsnap = change->data.snapshot;
2732 
2733 				memcpy(newsnap, data, size);
2734 				newsnap->xip = (TransactionId *)
2735 					(((char *) newsnap) + sizeof(SnapshotData));
2736 				newsnap->subxip = newsnap->xip + newsnap->xcnt;
2737 				newsnap->copied = true;
2738 				break;
2739 			}
2740 			/* the base struct contains all the data, easy peasy */
2741 		case REORDER_BUFFER_CHANGE_INTERNAL_SPEC_CONFIRM:
2742 		case REORDER_BUFFER_CHANGE_INTERNAL_SPEC_ABORT:
2743 		case REORDER_BUFFER_CHANGE_INTERNAL_COMMAND_ID:
2744 		case REORDER_BUFFER_CHANGE_INTERNAL_TUPLECID:
2745 			break;
2746 	}
2747 
2748 	dlist_push_tail(&txn->changes, &change->node);
2749 	txn->nentries_mem++;
2750 }
2751 
2752 /*
2753  * Remove all on-disk stored for the passed in transaction.
2754  */
2755 static void
ReorderBufferRestoreCleanup(ReorderBuffer * rb,ReorderBufferTXN * txn)2756 ReorderBufferRestoreCleanup(ReorderBuffer *rb, ReorderBufferTXN *txn)
2757 {
2758 	XLogSegNo	first;
2759 	XLogSegNo	cur;
2760 	XLogSegNo	last;
2761 
2762 	Assert(txn->first_lsn != InvalidXLogRecPtr);
2763 	Assert(txn->final_lsn != InvalidXLogRecPtr);
2764 
2765 	XLByteToSeg(txn->first_lsn, first);
2766 	XLByteToSeg(txn->final_lsn, last);
2767 
2768 	/* iterate over all possible filenames, and delete them */
2769 	for (cur = first; cur <= last; cur++)
2770 	{
2771 		char		path[MAXPGPATH];
2772 
2773 		ReorderBufferSerializedPath(path, MyReplicationSlot, txn->xid, cur);
2774 		if (unlink(path) != 0 && errno != ENOENT)
2775 			ereport(ERROR,
2776 					(errcode_for_file_access(),
2777 					 errmsg("could not remove file \"%s\": %m", path)));
2778 	}
2779 }
2780 
2781 /*
2782  * Remove any leftover serialized reorder buffers from a slot directory after a
2783  * prior crash or decoding session exit.
2784  */
2785 static void
ReorderBufferCleanupSerializedTXNs(const char * slotname)2786 ReorderBufferCleanupSerializedTXNs(const char *slotname)
2787 {
2788 	DIR		   *spill_dir;
2789 	struct dirent *spill_de;
2790 	struct stat statbuf;
2791 	char		path[MAXPGPATH * 2 + 12];
2792 
2793 	sprintf(path, "pg_replslot/%s", slotname);
2794 
2795 	/* we're only handling directories here, skip if it's not ours */
2796 	if (lstat(path, &statbuf) == 0 && !S_ISDIR(statbuf.st_mode))
2797 		return;
2798 
2799 	spill_dir = AllocateDir(path);
2800 	while ((spill_de = ReadDirExtended(spill_dir, path, INFO)) != NULL)
2801 	{
2802 		/* only look at names that can be ours */
2803 		if (strncmp(spill_de->d_name, "xid", 3) == 0)
2804 		{
2805 			snprintf(path, sizeof(path),
2806 					 "pg_replslot/%s/%s", slotname,
2807 					 spill_de->d_name);
2808 
2809 			if (unlink(path) != 0)
2810 				ereport(ERROR,
2811 						(errcode_for_file_access(),
2812 						 errmsg("could not remove file \"%s\" during removal of pg_replslot/%s/*.xid: %m",
2813 								path, slotname)));
2814 		}
2815 	}
2816 	FreeDir(spill_dir);
2817 }
2818 
2819 /*
2820  * Given a replication slot, transaction ID and segment number, fill in the
2821  * corresponding spill file into 'path', which is a caller-owned buffer of size
2822  * at least MAXPGPATH.
2823  */
2824 static void
ReorderBufferSerializedPath(char * path,ReplicationSlot * slot,TransactionId xid,XLogSegNo segno)2825 ReorderBufferSerializedPath(char *path, ReplicationSlot *slot, TransactionId xid,
2826 							XLogSegNo segno)
2827 {
2828 	XLogRecPtr	recptr;
2829 
2830 	XLogSegNoOffsetToRecPtr(segno, 0, recptr);
2831 
2832 	snprintf(path, MAXPGPATH, "pg_replslot/%s/xid-%u-lsn-%X-%X.snap",
2833 			NameStr(MyReplicationSlot->data.name),
2834 			xid,
2835 			(uint32) (recptr >> 32), (uint32) recptr);
2836 }
2837 
2838 /*
2839  * Delete all data spilled to disk after we've restarted/crashed. It will be
2840  * recreated when the respective slots are reused.
2841  */
2842 void
StartupReorderBuffer(void)2843 StartupReorderBuffer(void)
2844 {
2845 	DIR		   *logical_dir;
2846 	struct dirent *logical_de;
2847 
2848 	logical_dir = AllocateDir("pg_replslot");
2849 	while ((logical_de = ReadDir(logical_dir, "pg_replslot")) != NULL)
2850 	{
2851 		if (strcmp(logical_de->d_name, ".") == 0 ||
2852 			strcmp(logical_de->d_name, "..") == 0)
2853 			continue;
2854 
2855 		/* if it cannot be a slot, skip the directory */
2856 		if (!ReplicationSlotValidateName(logical_de->d_name, DEBUG2))
2857 			continue;
2858 
2859 		/*
2860 		 * ok, has to be a surviving logical slot, iterate and delete
2861 		 * everything starting with xid-*
2862 		 */
2863 		ReorderBufferCleanupSerializedTXNs(logical_de->d_name);
2864 	}
2865 	FreeDir(logical_dir);
2866 }
2867 
2868 /* ---------------------------------------
2869  * toast reassembly support
2870  * ---------------------------------------
2871  */
2872 
2873 /*
2874  * Initialize per tuple toast reconstruction support.
2875  */
2876 static void
ReorderBufferToastInitHash(ReorderBuffer * rb,ReorderBufferTXN * txn)2877 ReorderBufferToastInitHash(ReorderBuffer *rb, ReorderBufferTXN *txn)
2878 {
2879 	HASHCTL		hash_ctl;
2880 
2881 	Assert(txn->toast_hash == NULL);
2882 
2883 	memset(&hash_ctl, 0, sizeof(hash_ctl));
2884 	hash_ctl.keysize = sizeof(Oid);
2885 	hash_ctl.entrysize = sizeof(ReorderBufferToastEnt);
2886 	hash_ctl.hcxt = rb->context;
2887 	txn->toast_hash = hash_create("ReorderBufferToastHash", 5, &hash_ctl,
2888 								  HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
2889 }
2890 
2891 /*
2892  * Per toast-chunk handling for toast reconstruction
2893  *
2894  * Appends a toast chunk so we can reconstruct it when the tuple "owning" the
2895  * toasted Datum comes along.
2896  */
2897 static void
ReorderBufferToastAppendChunk(ReorderBuffer * rb,ReorderBufferTXN * txn,Relation relation,ReorderBufferChange * change)2898 ReorderBufferToastAppendChunk(ReorderBuffer *rb, ReorderBufferTXN *txn,
2899 							  Relation relation, ReorderBufferChange *change)
2900 {
2901 	ReorderBufferToastEnt *ent;
2902 	ReorderBufferTupleBuf *newtup;
2903 	bool		found;
2904 	int32		chunksize;
2905 	bool		isnull;
2906 	Pointer		chunk;
2907 	TupleDesc	desc = RelationGetDescr(relation);
2908 	Oid			chunk_id;
2909 	int32		chunk_seq;
2910 
2911 	if (txn->toast_hash == NULL)
2912 		ReorderBufferToastInitHash(rb, txn);
2913 
2914 	Assert(IsToastRelation(relation));
2915 
2916 	newtup = change->data.tp.newtuple;
2917 	chunk_id = DatumGetObjectId(fastgetattr(&newtup->tuple, 1, desc, &isnull));
2918 	Assert(!isnull);
2919 	chunk_seq = DatumGetInt32(fastgetattr(&newtup->tuple, 2, desc, &isnull));
2920 	Assert(!isnull);
2921 
2922 	ent = (ReorderBufferToastEnt *)
2923 		hash_search(txn->toast_hash,
2924 					(void *) &chunk_id,
2925 					HASH_ENTER,
2926 					&found);
2927 
2928 	if (!found)
2929 	{
2930 		Assert(ent->chunk_id == chunk_id);
2931 		ent->num_chunks = 0;
2932 		ent->last_chunk_seq = 0;
2933 		ent->size = 0;
2934 		ent->reconstructed = NULL;
2935 		dlist_init(&ent->chunks);
2936 
2937 		if (chunk_seq != 0)
2938 			elog(ERROR, "got sequence entry %d for toast chunk %u instead of seq 0",
2939 				 chunk_seq, chunk_id);
2940 	}
2941 	else if (found && chunk_seq != ent->last_chunk_seq + 1)
2942 		elog(ERROR, "got sequence entry %d for toast chunk %u instead of seq %d",
2943 			 chunk_seq, chunk_id, ent->last_chunk_seq + 1);
2944 
2945 	chunk = DatumGetPointer(fastgetattr(&newtup->tuple, 3, desc, &isnull));
2946 	Assert(!isnull);
2947 
2948 	/* calculate size so we can allocate the right size at once later */
2949 	if (!VARATT_IS_EXTENDED(chunk))
2950 		chunksize = VARSIZE(chunk) - VARHDRSZ;
2951 	else if (VARATT_IS_SHORT(chunk))
2952 		/* could happen due to heap_form_tuple doing its thing */
2953 		chunksize = VARSIZE_SHORT(chunk) - VARHDRSZ_SHORT;
2954 	else
2955 		elog(ERROR, "unexpected type of toast chunk");
2956 
2957 	ent->size += chunksize;
2958 	ent->last_chunk_seq = chunk_seq;
2959 	ent->num_chunks++;
2960 	dlist_push_tail(&ent->chunks, &change->node);
2961 }
2962 
2963 /*
2964  * Rejigger change->newtuple to point to in-memory toast tuples instead to
2965  * on-disk toast tuples that may not longer exist (think DROP TABLE or VACUUM).
2966  *
2967  * We cannot replace unchanged toast tuples though, so those will still point
2968  * to on-disk toast data.
2969  */
2970 static void
ReorderBufferToastReplace(ReorderBuffer * rb,ReorderBufferTXN * txn,Relation relation,ReorderBufferChange * change)2971 ReorderBufferToastReplace(ReorderBuffer *rb, ReorderBufferTXN *txn,
2972 						  Relation relation, ReorderBufferChange *change)
2973 {
2974 	TupleDesc	desc;
2975 	int			natt;
2976 	Datum	   *attrs;
2977 	bool	   *isnull;
2978 	bool	   *free;
2979 	HeapTuple	tmphtup;
2980 	Relation	toast_rel;
2981 	TupleDesc	toast_desc;
2982 	MemoryContext oldcontext;
2983 	ReorderBufferTupleBuf *newtup;
2984 
2985 	/* no toast tuples changed */
2986 	if (txn->toast_hash == NULL)
2987 		return;
2988 
2989 	oldcontext = MemoryContextSwitchTo(rb->context);
2990 
2991 	/* we should only have toast tuples in an INSERT or UPDATE */
2992 	Assert(change->data.tp.newtuple);
2993 
2994 	desc = RelationGetDescr(relation);
2995 
2996 	toast_rel = RelationIdGetRelation(relation->rd_rel->reltoastrelid);
2997 	if (!RelationIsValid(toast_rel))
2998 		elog(ERROR, "could not open toast relation with OID %u (base relation \"%s\")",
2999 			 relation->rd_rel->reltoastrelid, RelationGetRelationName(relation));
3000 
3001 	toast_desc = RelationGetDescr(toast_rel);
3002 
3003 	/* should we allocate from stack instead? */
3004 	attrs = palloc0(sizeof(Datum) * desc->natts);
3005 	isnull = palloc0(sizeof(bool) * desc->natts);
3006 	free = palloc0(sizeof(bool) * desc->natts);
3007 
3008 	newtup = change->data.tp.newtuple;
3009 
3010 	heap_deform_tuple(&newtup->tuple, desc, attrs, isnull);
3011 
3012 	for (natt = 0; natt < desc->natts; natt++)
3013 	{
3014 		Form_pg_attribute attr = desc->attrs[natt];
3015 		ReorderBufferToastEnt *ent;
3016 		struct varlena *varlena;
3017 
3018 		/* va_rawsize is the size of the original datum -- including header */
3019 		struct varatt_external toast_pointer;
3020 		struct varatt_indirect redirect_pointer;
3021 		struct varlena *new_datum = NULL;
3022 		struct varlena *reconstructed;
3023 		dlist_iter	it;
3024 		Size		data_done = 0;
3025 
3026 		/* system columns aren't toasted */
3027 		if (attr->attnum < 0)
3028 			continue;
3029 
3030 		if (attr->attisdropped)
3031 			continue;
3032 
3033 		/* not a varlena datatype */
3034 		if (attr->attlen != -1)
3035 			continue;
3036 
3037 		/* no data */
3038 		if (isnull[natt])
3039 			continue;
3040 
3041 		/* ok, we know we have a toast datum */
3042 		varlena = (struct varlena *) DatumGetPointer(attrs[natt]);
3043 
3044 		/* no need to do anything if the tuple isn't external */
3045 		if (!VARATT_IS_EXTERNAL(varlena))
3046 			continue;
3047 
3048 		VARATT_EXTERNAL_GET_POINTER(toast_pointer, varlena);
3049 
3050 		/*
3051 		 * Check whether the toast tuple changed, replace if so.
3052 		 */
3053 		ent = (ReorderBufferToastEnt *)
3054 			hash_search(txn->toast_hash,
3055 						(void *) &toast_pointer.va_valueid,
3056 						HASH_FIND,
3057 						NULL);
3058 		if (ent == NULL)
3059 			continue;
3060 
3061 		new_datum =
3062 			(struct varlena *) palloc0(INDIRECT_POINTER_SIZE);
3063 
3064 		free[natt] = true;
3065 
3066 		reconstructed = palloc0(toast_pointer.va_rawsize);
3067 
3068 		ent->reconstructed = reconstructed;
3069 
3070 		/* stitch toast tuple back together from its parts */
3071 		dlist_foreach(it, &ent->chunks)
3072 		{
3073 			bool		isnull;
3074 			ReorderBufferChange *cchange;
3075 			ReorderBufferTupleBuf *ctup;
3076 			Pointer		chunk;
3077 
3078 			cchange = dlist_container(ReorderBufferChange, node, it.cur);
3079 			ctup = cchange->data.tp.newtuple;
3080 			chunk = DatumGetPointer(
3081 									fastgetattr(&ctup->tuple, 3, toast_desc, &isnull));
3082 
3083 			Assert(!isnull);
3084 			Assert(!VARATT_IS_EXTERNAL(chunk));
3085 			Assert(!VARATT_IS_SHORT(chunk));
3086 
3087 			memcpy(VARDATA(reconstructed) + data_done,
3088 				   VARDATA(chunk),
3089 				   VARSIZE(chunk) - VARHDRSZ);
3090 			data_done += VARSIZE(chunk) - VARHDRSZ;
3091 		}
3092 		Assert(data_done == toast_pointer.va_extsize);
3093 
3094 		/* make sure its marked as compressed or not */
3095 		if (VARATT_EXTERNAL_IS_COMPRESSED(toast_pointer))
3096 			SET_VARSIZE_COMPRESSED(reconstructed, data_done + VARHDRSZ);
3097 		else
3098 			SET_VARSIZE(reconstructed, data_done + VARHDRSZ);
3099 
3100 		memset(&redirect_pointer, 0, sizeof(redirect_pointer));
3101 		redirect_pointer.pointer = reconstructed;
3102 
3103 		SET_VARTAG_EXTERNAL(new_datum, VARTAG_INDIRECT);
3104 		memcpy(VARDATA_EXTERNAL(new_datum), &redirect_pointer,
3105 			   sizeof(redirect_pointer));
3106 
3107 		attrs[natt] = PointerGetDatum(new_datum);
3108 	}
3109 
3110 	/*
3111 	 * Build tuple in separate memory & copy tuple back into the tuplebuf
3112 	 * passed to the output plugin. We can't directly heap_fill_tuple() into
3113 	 * the tuplebuf because attrs[] will point back into the current content.
3114 	 */
3115 	tmphtup = heap_form_tuple(desc, attrs, isnull);
3116 	Assert(newtup->tuple.t_len <= MaxHeapTupleSize);
3117 	Assert(ReorderBufferTupleBufData(newtup) == newtup->tuple.t_data);
3118 
3119 	memcpy(newtup->tuple.t_data, tmphtup->t_data, tmphtup->t_len);
3120 	newtup->tuple.t_len = tmphtup->t_len;
3121 
3122 	/*
3123 	 * free resources we won't further need, more persistent stuff will be
3124 	 * free'd in ReorderBufferToastReset().
3125 	 */
3126 	RelationClose(toast_rel);
3127 	pfree(tmphtup);
3128 	for (natt = 0; natt < desc->natts; natt++)
3129 	{
3130 		if (free[natt])
3131 			pfree(DatumGetPointer(attrs[natt]));
3132 	}
3133 	pfree(attrs);
3134 	pfree(free);
3135 	pfree(isnull);
3136 
3137 	MemoryContextSwitchTo(oldcontext);
3138 }
3139 
3140 /*
3141  * Free all resources allocated for toast reconstruction.
3142  */
3143 static void
ReorderBufferToastReset(ReorderBuffer * rb,ReorderBufferTXN * txn)3144 ReorderBufferToastReset(ReorderBuffer *rb, ReorderBufferTXN *txn)
3145 {
3146 	HASH_SEQ_STATUS hstat;
3147 	ReorderBufferToastEnt *ent;
3148 
3149 	if (txn->toast_hash == NULL)
3150 		return;
3151 
3152 	/* sequentially walk over the hash and free everything */
3153 	hash_seq_init(&hstat, txn->toast_hash);
3154 	while ((ent = (ReorderBufferToastEnt *) hash_seq_search(&hstat)) != NULL)
3155 	{
3156 		dlist_mutable_iter it;
3157 
3158 		if (ent->reconstructed != NULL)
3159 			pfree(ent->reconstructed);
3160 
3161 		dlist_foreach_modify(it, &ent->chunks)
3162 		{
3163 			ReorderBufferChange *change =
3164 			dlist_container(ReorderBufferChange, node, it.cur);
3165 
3166 			dlist_delete(&change->node);
3167 			ReorderBufferReturnChange(rb, change);
3168 		}
3169 	}
3170 
3171 	hash_destroy(txn->toast_hash);
3172 	txn->toast_hash = NULL;
3173 }
3174 
3175 
3176 /* ---------------------------------------
3177  * Visibility support for logical decoding
3178  *
3179  *
3180  * Lookup actual cmin/cmax values when using decoding snapshot. We can't
3181  * always rely on stored cmin/cmax values because of two scenarios:
3182  *
3183  * * A tuple got changed multiple times during a single transaction and thus
3184  *	 has got a combocid. Combocid's are only valid for the duration of a
3185  *	 single transaction.
3186  * * A tuple with a cmin but no cmax (and thus no combocid) got
3187  *	 deleted/updated in another transaction than the one which created it
3188  *	 which we are looking at right now. As only one of cmin, cmax or combocid
3189  *	 is actually stored in the heap we don't have access to the value we
3190  *	 need anymore.
3191  *
3192  * To resolve those problems we have a per-transaction hash of (cmin,
3193  * cmax) tuples keyed by (relfilenode, ctid) which contains the actual
3194  * (cmin, cmax) values. That also takes care of combocids by simply
3195  * not caring about them at all. As we have the real cmin/cmax values
3196  * combocids aren't interesting.
3197  *
3198  * As we only care about catalog tuples here the overhead of this
3199  * hashtable should be acceptable.
3200  *
3201  * Heap rewrites complicate this a bit, check rewriteheap.c for
3202  * details.
3203  * -------------------------------------------------------------------------
3204  */
3205 
3206 /* struct for qsort()ing mapping files by lsn somewhat efficiently */
3207 typedef struct RewriteMappingFile
3208 {
3209 	XLogRecPtr	lsn;
3210 	char		fname[MAXPGPATH];
3211 } RewriteMappingFile;
3212 
3213 #if NOT_USED
3214 static void
DisplayMapping(HTAB * tuplecid_data)3215 DisplayMapping(HTAB *tuplecid_data)
3216 {
3217 	HASH_SEQ_STATUS hstat;
3218 	ReorderBufferTupleCidEnt *ent;
3219 
3220 	hash_seq_init(&hstat, tuplecid_data);
3221 	while ((ent = (ReorderBufferTupleCidEnt *) hash_seq_search(&hstat)) != NULL)
3222 	{
3223 		elog(DEBUG3, "mapping: node: %u/%u/%u tid: %u/%u cmin: %u, cmax: %u",
3224 			 ent->key.relnode.dbNode,
3225 			 ent->key.relnode.spcNode,
3226 			 ent->key.relnode.relNode,
3227 			 ItemPointerGetBlockNumber(&ent->key.tid),
3228 			 ItemPointerGetOffsetNumber(&ent->key.tid),
3229 			 ent->cmin,
3230 			 ent->cmax
3231 			);
3232 	}
3233 }
3234 #endif
3235 
3236 /*
3237  * Apply a single mapping file to tuplecid_data.
3238  *
3239  * The mapping file has to have been verified to be a) committed b) for our
3240  * transaction c) applied in LSN order.
3241  */
3242 static void
ApplyLogicalMappingFile(HTAB * tuplecid_data,Oid relid,const char * fname)3243 ApplyLogicalMappingFile(HTAB *tuplecid_data, Oid relid, const char *fname)
3244 {
3245 	char		path[MAXPGPATH];
3246 	int			fd;
3247 	int			readBytes;
3248 	LogicalRewriteMappingData map;
3249 
3250 	sprintf(path, "pg_logical/mappings/%s", fname);
3251 	fd = OpenTransientFile(path, O_RDONLY | PG_BINARY, 0);
3252 	if (fd < 0)
3253 		ereport(ERROR,
3254 				(errcode_for_file_access(),
3255 				 errmsg("could not open file \"%s\": %m", path)));
3256 
3257 	while (true)
3258 	{
3259 		ReorderBufferTupleCidKey key;
3260 		ReorderBufferTupleCidEnt *ent;
3261 		ReorderBufferTupleCidEnt *new_ent;
3262 		bool		found;
3263 
3264 		/* be careful about padding */
3265 		memset(&key, 0, sizeof(ReorderBufferTupleCidKey));
3266 
3267 		/* read all mappings till the end of the file */
3268 		pgstat_report_wait_start(WAIT_EVENT_REORDER_LOGICAL_MAPPING_READ);
3269 		readBytes = read(fd, &map, sizeof(LogicalRewriteMappingData));
3270 		pgstat_report_wait_end();
3271 
3272 		if (readBytes < 0)
3273 			ereport(ERROR,
3274 					(errcode_for_file_access(),
3275 					 errmsg("could not read file \"%s\": %m",
3276 							path)));
3277 		else if (readBytes == 0)	/* EOF */
3278 			break;
3279 		else if (readBytes != sizeof(LogicalRewriteMappingData))
3280 			ereport(ERROR,
3281 					(errcode_for_file_access(),
3282 					 errmsg("could not read from file \"%s\": read %d instead of %d bytes",
3283 							path, readBytes,
3284 							(int32) sizeof(LogicalRewriteMappingData))));
3285 
3286 		key.relnode = map.old_node;
3287 		ItemPointerCopy(&map.old_tid,
3288 						&key.tid);
3289 
3290 
3291 		ent = (ReorderBufferTupleCidEnt *)
3292 			hash_search(tuplecid_data,
3293 						(void *) &key,
3294 						HASH_FIND,
3295 						NULL);
3296 
3297 		/* no existing mapping, no need to update */
3298 		if (!ent)
3299 			continue;
3300 
3301 		key.relnode = map.new_node;
3302 		ItemPointerCopy(&map.new_tid,
3303 						&key.tid);
3304 
3305 		new_ent = (ReorderBufferTupleCidEnt *)
3306 			hash_search(tuplecid_data,
3307 						(void *) &key,
3308 						HASH_ENTER,
3309 						&found);
3310 
3311 		if (found)
3312 		{
3313 			/*
3314 			 * Make sure the existing mapping makes sense. We sometime update
3315 			 * old records that did not yet have a cmax (e.g. pg_class' own
3316 			 * entry while rewriting it) during rewrites, so allow that.
3317 			 */
3318 			Assert(ent->cmin == InvalidCommandId || ent->cmin == new_ent->cmin);
3319 			Assert(ent->cmax == InvalidCommandId || ent->cmax == new_ent->cmax);
3320 		}
3321 		else
3322 		{
3323 			/* update mapping */
3324 			new_ent->cmin = ent->cmin;
3325 			new_ent->cmax = ent->cmax;
3326 			new_ent->combocid = ent->combocid;
3327 		}
3328 	}
3329 
3330 	CloseTransientFile(fd);
3331 }
3332 
3333 
3334 /*
3335  * Check whether the TransactionOid 'xid' is in the pre-sorted array 'xip'.
3336  */
3337 static bool
TransactionIdInArray(TransactionId xid,TransactionId * xip,Size num)3338 TransactionIdInArray(TransactionId xid, TransactionId *xip, Size num)
3339 {
3340 	return bsearch(&xid, xip, num,
3341 				   sizeof(TransactionId), xidComparator) != NULL;
3342 }
3343 
3344 /*
3345  * qsort() comparator for sorting RewriteMappingFiles in LSN order.
3346  */
3347 static int
file_sort_by_lsn(const void * a_p,const void * b_p)3348 file_sort_by_lsn(const void *a_p, const void *b_p)
3349 {
3350 	RewriteMappingFile *a = *(RewriteMappingFile **) a_p;
3351 	RewriteMappingFile *b = *(RewriteMappingFile **) b_p;
3352 
3353 	if (a->lsn < b->lsn)
3354 		return -1;
3355 	else if (a->lsn > b->lsn)
3356 		return 1;
3357 	return 0;
3358 }
3359 
3360 /*
3361  * Apply any existing logical remapping files if there are any targeted at our
3362  * transaction for relid.
3363  */
3364 static void
UpdateLogicalMappings(HTAB * tuplecid_data,Oid relid,Snapshot snapshot)3365 UpdateLogicalMappings(HTAB *tuplecid_data, Oid relid, Snapshot snapshot)
3366 {
3367 	DIR		   *mapping_dir;
3368 	struct dirent *mapping_de;
3369 	List	   *files = NIL;
3370 	ListCell   *file;
3371 	RewriteMappingFile **files_a;
3372 	size_t		off;
3373 	Oid			dboid = IsSharedRelation(relid) ? InvalidOid : MyDatabaseId;
3374 
3375 	mapping_dir = AllocateDir("pg_logical/mappings");
3376 	while ((mapping_de = ReadDir(mapping_dir, "pg_logical/mappings")) != NULL)
3377 	{
3378 		Oid			f_dboid;
3379 		Oid			f_relid;
3380 		TransactionId f_mapped_xid;
3381 		TransactionId f_create_xid;
3382 		XLogRecPtr	f_lsn;
3383 		uint32		f_hi,
3384 					f_lo;
3385 		RewriteMappingFile *f;
3386 
3387 		if (strcmp(mapping_de->d_name, ".") == 0 ||
3388 			strcmp(mapping_de->d_name, "..") == 0)
3389 			continue;
3390 
3391 		/* Ignore files that aren't ours */
3392 		if (strncmp(mapping_de->d_name, "map-", 4) != 0)
3393 			continue;
3394 
3395 		if (sscanf(mapping_de->d_name, LOGICAL_REWRITE_FORMAT,
3396 				   &f_dboid, &f_relid, &f_hi, &f_lo,
3397 				   &f_mapped_xid, &f_create_xid) != 6)
3398 			elog(ERROR, "could not parse filename \"%s\"", mapping_de->d_name);
3399 
3400 		f_lsn = ((uint64) f_hi) << 32 | f_lo;
3401 
3402 		/* mapping for another database */
3403 		if (f_dboid != dboid)
3404 			continue;
3405 
3406 		/* mapping for another relation */
3407 		if (f_relid != relid)
3408 			continue;
3409 
3410 		/* did the creating transaction abort? */
3411 		if (!TransactionIdDidCommit(f_create_xid))
3412 			continue;
3413 
3414 		/* not for our transaction */
3415 		if (!TransactionIdInArray(f_mapped_xid, snapshot->subxip, snapshot->subxcnt))
3416 			continue;
3417 
3418 		/* ok, relevant, queue for apply */
3419 		f = palloc(sizeof(RewriteMappingFile));
3420 		f->lsn = f_lsn;
3421 		strcpy(f->fname, mapping_de->d_name);
3422 		files = lappend(files, f);
3423 	}
3424 	FreeDir(mapping_dir);
3425 
3426 	/* build array we can easily sort */
3427 	files_a = palloc(list_length(files) * sizeof(RewriteMappingFile *));
3428 	off = 0;
3429 	foreach(file, files)
3430 	{
3431 		files_a[off++] = lfirst(file);
3432 	}
3433 
3434 	/* sort files so we apply them in LSN order */
3435 	qsort(files_a, list_length(files), sizeof(RewriteMappingFile *),
3436 		  file_sort_by_lsn);
3437 
3438 	for (off = 0; off < list_length(files); off++)
3439 	{
3440 		RewriteMappingFile *f = files_a[off];
3441 
3442 		elog(DEBUG1, "applying mapping: \"%s\" in %u", f->fname,
3443 			 snapshot->subxip[0]);
3444 		ApplyLogicalMappingFile(tuplecid_data, relid, f->fname);
3445 		pfree(f);
3446 	}
3447 }
3448 
3449 /*
3450  * Lookup cmin/cmax of a tuple, during logical decoding where we can't rely on
3451  * combocids.
3452  */
3453 bool
ResolveCminCmaxDuringDecoding(HTAB * tuplecid_data,Snapshot snapshot,HeapTuple htup,Buffer buffer,CommandId * cmin,CommandId * cmax)3454 ResolveCminCmaxDuringDecoding(HTAB *tuplecid_data,
3455 							  Snapshot snapshot,
3456 							  HeapTuple htup, Buffer buffer,
3457 							  CommandId *cmin, CommandId *cmax)
3458 {
3459 	ReorderBufferTupleCidKey key;
3460 	ReorderBufferTupleCidEnt *ent;
3461 	ForkNumber	forkno;
3462 	BlockNumber blockno;
3463 	bool		updated_mapping = false;
3464 
3465 	/* be careful about padding */
3466 	memset(&key, 0, sizeof(key));
3467 
3468 	Assert(!BufferIsLocal(buffer));
3469 
3470 	/*
3471 	 * get relfilenode from the buffer, no convenient way to access it other
3472 	 * than that.
3473 	 */
3474 	BufferGetTag(buffer, &key.relnode, &forkno, &blockno);
3475 
3476 	/* tuples can only be in the main fork */
3477 	Assert(forkno == MAIN_FORKNUM);
3478 	Assert(blockno == ItemPointerGetBlockNumber(&htup->t_self));
3479 
3480 	ItemPointerCopy(&htup->t_self,
3481 					&key.tid);
3482 
3483 restart:
3484 	ent = (ReorderBufferTupleCidEnt *)
3485 		hash_search(tuplecid_data,
3486 					(void *) &key,
3487 					HASH_FIND,
3488 					NULL);
3489 
3490 	/*
3491 	 * failed to find a mapping, check whether the table was rewritten and
3492 	 * apply mapping if so, but only do that once - there can be no new
3493 	 * mappings while we are in here since we have to hold a lock on the
3494 	 * relation.
3495 	 */
3496 	if (ent == NULL && !updated_mapping)
3497 	{
3498 		UpdateLogicalMappings(tuplecid_data, htup->t_tableOid, snapshot);
3499 		/* now check but don't update for a mapping again */
3500 		updated_mapping = true;
3501 		goto restart;
3502 	}
3503 	else if (ent == NULL)
3504 		return false;
3505 
3506 	if (cmin)
3507 		*cmin = ent->cmin;
3508 	if (cmax)
3509 		*cmax = ent->cmax;
3510 	return true;
3511 }
3512