1 /*-------------------------------------------------------------------------
2  *
3  * heapam_xlog.h
4  *	  POSTGRES heap access XLOG definitions.
5  *
6  *
7  * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
8  * Portions Copyright (c) 1994, Regents of the University of California
9  *
10  * src/include/access/heapam_xlog.h
11  *
12  *-------------------------------------------------------------------------
13  */
14 #ifndef HEAPAM_XLOG_H
15 #define HEAPAM_XLOG_H
16 
17 #include "access/htup.h"
18 #include "access/xlogreader.h"
19 #include "lib/stringinfo.h"
20 #include "storage/buf.h"
21 #include "storage/bufpage.h"
22 #include "storage/relfilenode.h"
23 #include "utils/relcache.h"
24 
25 
26 /*
27  * WAL record definitions for heapam.c's WAL operations
28  *
29  * XLOG allows to store some information in high 4 bits of log
30  * record xl_info field.  We use 3 for opcode and one for init bit.
31  */
32 #define XLOG_HEAP_INSERT		0x00
33 #define XLOG_HEAP_DELETE		0x10
34 #define XLOG_HEAP_UPDATE		0x20
35 #define XLOG_HEAP_TRUNCATE		0x30
36 #define XLOG_HEAP_HOT_UPDATE	0x40
37 #define XLOG_HEAP_CONFIRM		0x50
38 #define XLOG_HEAP_LOCK			0x60
39 #define XLOG_HEAP_INPLACE		0x70
40 
41 #define XLOG_HEAP_OPMASK		0x70
42 /*
43  * When we insert 1st item on new page in INSERT, UPDATE, HOT_UPDATE,
44  * or MULTI_INSERT, we can (and we do) restore entire page in redo
45  */
46 #define XLOG_HEAP_INIT_PAGE		0x80
47 /*
48  * We ran out of opcodes, so heapam.c now has a second RmgrId.  These opcodes
49  * are associated with RM_HEAP2_ID, but are not logically different from
50  * the ones above associated with RM_HEAP_ID.  XLOG_HEAP_OPMASK applies to
51  * these, too.
52  */
53 #define XLOG_HEAP2_REWRITE		0x00
54 #define XLOG_HEAP2_CLEAN		0x10
55 #define XLOG_HEAP2_FREEZE_PAGE	0x20
56 #define XLOG_HEAP2_CLEANUP_INFO 0x30
57 #define XLOG_HEAP2_VISIBLE		0x40
58 #define XLOG_HEAP2_MULTI_INSERT 0x50
59 #define XLOG_HEAP2_LOCK_UPDATED 0x60
60 #define XLOG_HEAP2_NEW_CID		0x70
61 
62 /*
63  * xl_heap_insert/xl_heap_multi_insert flag values, 8 bits are available.
64  */
65 /* PD_ALL_VISIBLE was cleared */
66 #define XLH_INSERT_ALL_VISIBLE_CLEARED			(1<<0)
67 #define XLH_INSERT_LAST_IN_MULTI				(1<<1)
68 #define XLH_INSERT_IS_SPECULATIVE				(1<<2)
69 #define XLH_INSERT_CONTAINS_NEW_TUPLE			(1<<3)
70 
71 /*
72  * xl_heap_update flag values, 8 bits are available.
73  */
74 /* PD_ALL_VISIBLE was cleared */
75 #define XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED		(1<<0)
76 /* PD_ALL_VISIBLE was cleared in the 2nd page */
77 #define XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED		(1<<1)
78 #define XLH_UPDATE_CONTAINS_OLD_TUPLE			(1<<2)
79 #define XLH_UPDATE_CONTAINS_OLD_KEY				(1<<3)
80 #define XLH_UPDATE_CONTAINS_NEW_TUPLE			(1<<4)
81 #define XLH_UPDATE_PREFIX_FROM_OLD				(1<<5)
82 #define XLH_UPDATE_SUFFIX_FROM_OLD				(1<<6)
83 
84 /* convenience macro for checking whether any form of old tuple was logged */
85 #define XLH_UPDATE_CONTAINS_OLD						\
86 	(XLH_UPDATE_CONTAINS_OLD_TUPLE | XLH_UPDATE_CONTAINS_OLD_KEY)
87 
88 /*
89  * xl_heap_delete flag values, 8 bits are available.
90  */
91 /* PD_ALL_VISIBLE was cleared */
92 #define XLH_DELETE_ALL_VISIBLE_CLEARED			(1<<0)
93 #define XLH_DELETE_CONTAINS_OLD_TUPLE			(1<<1)
94 #define XLH_DELETE_CONTAINS_OLD_KEY				(1<<2)
95 #define XLH_DELETE_IS_SUPER						(1<<3)
96 #define XLH_DELETE_IS_PARTITION_MOVE			(1<<4)
97 
98 /* convenience macro for checking whether any form of old tuple was logged */
99 #define XLH_DELETE_CONTAINS_OLD						\
100 	(XLH_DELETE_CONTAINS_OLD_TUPLE | XLH_DELETE_CONTAINS_OLD_KEY)
101 
102 /* This is what we need to know about delete */
103 typedef struct xl_heap_delete
104 {
105 	TransactionId xmax;			/* xmax of the deleted tuple */
106 	OffsetNumber offnum;		/* deleted tuple's offset */
107 	uint8		infobits_set;	/* infomask bits */
108 	uint8		flags;
109 } xl_heap_delete;
110 
111 #define SizeOfHeapDelete	(offsetof(xl_heap_delete, flags) + sizeof(uint8))
112 
113 /*
114  * xl_heap_truncate flag values, 8 bits are available.
115  */
116 #define XLH_TRUNCATE_CASCADE					(1<<0)
117 #define XLH_TRUNCATE_RESTART_SEQS				(1<<1)
118 
119 /*
120  * For truncate we list all truncated relids in an array, followed by all
121  * sequence relids that need to be restarted, if any.
122  * All rels are always within the same database, so we just list dbid once.
123  */
124 typedef struct xl_heap_truncate
125 {
126 	Oid			dbId;
127 	uint32		nrelids;
128 	uint8		flags;
129 	Oid			relids[FLEXIBLE_ARRAY_MEMBER];
130 } xl_heap_truncate;
131 
132 #define SizeOfHeapTruncate	(offsetof(xl_heap_truncate, relids))
133 
134 /*
135  * We don't store the whole fixed part (HeapTupleHeaderData) of an inserted
136  * or updated tuple in WAL; we can save a few bytes by reconstructing the
137  * fields that are available elsewhere in the WAL record, or perhaps just
138  * plain needn't be reconstructed.  These are the fields we must store.
139  */
140 typedef struct xl_heap_header
141 {
142 	uint16		t_infomask2;
143 	uint16		t_infomask;
144 	uint8		t_hoff;
145 } xl_heap_header;
146 
147 #define SizeOfHeapHeader	(offsetof(xl_heap_header, t_hoff) + sizeof(uint8))
148 
149 /* This is what we need to know about insert */
150 typedef struct xl_heap_insert
151 {
152 	OffsetNumber offnum;		/* inserted tuple's offset */
153 	uint8		flags;
154 
155 	/* xl_heap_header & TUPLE DATA in backup block 0 */
156 } xl_heap_insert;
157 
158 #define SizeOfHeapInsert	(offsetof(xl_heap_insert, flags) + sizeof(uint8))
159 
160 /*
161  * This is what we need to know about a multi-insert.
162  *
163  * The main data of the record consists of this xl_heap_multi_insert header.
164  * 'offsets' array is omitted if the whole page is reinitialized
165  * (XLOG_HEAP_INIT_PAGE).
166  *
167  * In block 0's data portion, there is an xl_multi_insert_tuple struct,
168  * followed by the tuple data for each tuple. There is padding to align
169  * each xl_multi_insert_tuple struct.
170  */
171 typedef struct xl_heap_multi_insert
172 {
173 	uint8		flags;
174 	uint16		ntuples;
175 	OffsetNumber offsets[FLEXIBLE_ARRAY_MEMBER];
176 } xl_heap_multi_insert;
177 
178 #define SizeOfHeapMultiInsert	offsetof(xl_heap_multi_insert, offsets)
179 
180 typedef struct xl_multi_insert_tuple
181 {
182 	uint16		datalen;		/* size of tuple data that follows */
183 	uint16		t_infomask2;
184 	uint16		t_infomask;
185 	uint8		t_hoff;
186 	/* TUPLE DATA FOLLOWS AT END OF STRUCT */
187 } xl_multi_insert_tuple;
188 
189 #define SizeOfMultiInsertTuple	(offsetof(xl_multi_insert_tuple, t_hoff) + sizeof(uint8))
190 
191 /*
192  * This is what we need to know about update|hot_update
193  *
194  * Backup blk 0: new page
195  *
196  * If XLH_UPDATE_PREFIX_FROM_OLD or XLH_UPDATE_SUFFIX_FROM_OLD flags are set,
197  * the prefix and/or suffix come first, as one or two uint16s.
198  *
199  * After that, xl_heap_header and new tuple data follow.  The new tuple
200  * data doesn't include the prefix and suffix, which are copied from the
201  * old tuple on replay.
202  *
203  * If XLH_UPDATE_CONTAINS_NEW_TUPLE flag is given, the tuple data is
204  * included even if a full-page image was taken.
205  *
206  * Backup blk 1: old page, if different. (no data, just a reference to the blk)
207  */
208 typedef struct xl_heap_update
209 {
210 	TransactionId old_xmax;		/* xmax of the old tuple */
211 	OffsetNumber old_offnum;	/* old tuple's offset */
212 	uint8		old_infobits_set;	/* infomask bits to set on old tuple */
213 	uint8		flags;
214 	TransactionId new_xmax;		/* xmax of the new tuple */
215 	OffsetNumber new_offnum;	/* new tuple's offset */
216 
217 	/*
218 	 * If XLH_UPDATE_CONTAINS_OLD_TUPLE or XLH_UPDATE_CONTAINS_OLD_KEY flags
219 	 * are set, xl_heap_header and tuple data for the old tuple follow.
220 	 */
221 } xl_heap_update;
222 
223 #define SizeOfHeapUpdate	(offsetof(xl_heap_update, new_offnum) + sizeof(OffsetNumber))
224 
225 /*
226  * This is what we need to know about vacuum page cleanup/redirect
227  *
228  * The array of OffsetNumbers following the fixed part of the record contains:
229  *	* for each redirected item: the item offset, then the offset redirected to
230  *	* for each now-dead item: the item offset
231  *	* for each now-unused item: the item offset
232  * The total number of OffsetNumbers is therefore 2*nredirected+ndead+nunused.
233  * Note that nunused is not explicitly stored, but may be found by reference
234  * to the total record length.
235  */
236 typedef struct xl_heap_clean
237 {
238 	TransactionId latestRemovedXid;
239 	uint16		nredirected;
240 	uint16		ndead;
241 	/* OFFSET NUMBERS are in the block reference 0 */
242 } xl_heap_clean;
243 
244 #define SizeOfHeapClean (offsetof(xl_heap_clean, ndead) + sizeof(uint16))
245 
246 /*
247  * Cleanup_info is required in some cases during a lazy VACUUM.
248  * Used for reporting the results of HeapTupleHeaderAdvanceLatestRemovedXid()
249  * see vacuumlazy.c for full explanation
250  */
251 typedef struct xl_heap_cleanup_info
252 {
253 	RelFileNode node;
254 	TransactionId latestRemovedXid;
255 } xl_heap_cleanup_info;
256 
257 #define SizeOfHeapCleanupInfo (sizeof(xl_heap_cleanup_info))
258 
259 /* flags for infobits_set */
260 #define XLHL_XMAX_IS_MULTI		0x01
261 #define XLHL_XMAX_LOCK_ONLY		0x02
262 #define XLHL_XMAX_EXCL_LOCK		0x04
263 #define XLHL_XMAX_KEYSHR_LOCK	0x08
264 #define XLHL_KEYS_UPDATED		0x10
265 
266 /* flag bits for xl_heap_lock / xl_heap_lock_updated's flag field */
267 #define XLH_LOCK_ALL_FROZEN_CLEARED		0x01
268 
269 /* This is what we need to know about lock */
270 typedef struct xl_heap_lock
271 {
272 	TransactionId locking_xid;	/* might be a MultiXactId not xid */
273 	OffsetNumber offnum;		/* locked tuple's offset on page */
274 	int8		infobits_set;	/* infomask and infomask2 bits to set */
275 	uint8		flags;			/* XLH_LOCK_* flag bits */
276 } xl_heap_lock;
277 
278 #define SizeOfHeapLock	(offsetof(xl_heap_lock, flags) + sizeof(int8))
279 
280 /* This is what we need to know about locking an updated version of a row */
281 typedef struct xl_heap_lock_updated
282 {
283 	TransactionId xmax;
284 	OffsetNumber offnum;
285 	uint8		infobits_set;
286 	uint8		flags;
287 } xl_heap_lock_updated;
288 
289 #define SizeOfHeapLockUpdated	(offsetof(xl_heap_lock_updated, flags) + sizeof(uint8))
290 
291 /* This is what we need to know about confirmation of speculative insertion */
292 typedef struct xl_heap_confirm
293 {
294 	OffsetNumber offnum;		/* confirmed tuple's offset on page */
295 } xl_heap_confirm;
296 
297 #define SizeOfHeapConfirm	(offsetof(xl_heap_confirm, offnum) + sizeof(OffsetNumber))
298 
299 /* This is what we need to know about in-place update */
300 typedef struct xl_heap_inplace
301 {
302 	OffsetNumber offnum;		/* updated tuple's offset on page */
303 	/* TUPLE DATA FOLLOWS AT END OF STRUCT */
304 } xl_heap_inplace;
305 
306 #define SizeOfHeapInplace	(offsetof(xl_heap_inplace, offnum) + sizeof(OffsetNumber))
307 
308 /*
309  * This struct represents a 'freeze plan', which is what we need to know about
310  * a single tuple being frozen during vacuum.
311  */
312 /* 0x01 was XLH_FREEZE_XMIN */
313 #define		XLH_FREEZE_XVAC		0x02
314 #define		XLH_INVALID_XVAC	0x04
315 
316 typedef struct xl_heap_freeze_tuple
317 {
318 	TransactionId xmax;
319 	OffsetNumber offset;
320 	uint16		t_infomask2;
321 	uint16		t_infomask;
322 	uint8		frzflags;
323 } xl_heap_freeze_tuple;
324 
325 /*
326  * This is what we need to know about a block being frozen during vacuum
327  *
328  * Backup block 0's data contains an array of xl_heap_freeze_tuple structs,
329  * one for each tuple.
330  */
331 typedef struct xl_heap_freeze_page
332 {
333 	TransactionId cutoff_xid;
334 	uint16		ntuples;
335 } xl_heap_freeze_page;
336 
337 #define SizeOfHeapFreezePage (offsetof(xl_heap_freeze_page, ntuples) + sizeof(uint16))
338 
339 /*
340  * This is what we need to know about setting a visibility map bit
341  *
342  * Backup blk 0: visibility map buffer
343  * Backup blk 1: heap buffer
344  */
345 typedef struct xl_heap_visible
346 {
347 	TransactionId cutoff_xid;
348 	uint8		flags;
349 } xl_heap_visible;
350 
351 #define SizeOfHeapVisible (offsetof(xl_heap_visible, flags) + sizeof(uint8))
352 
353 typedef struct xl_heap_new_cid
354 {
355 	/*
356 	 * store toplevel xid so we don't have to merge cids from different
357 	 * transactions
358 	 */
359 	TransactionId top_xid;
360 	CommandId	cmin;
361 	CommandId	cmax;
362 	CommandId	combocid;		/* just for debugging */
363 
364 	/*
365 	 * Store the relfilenode/ctid pair to facilitate lookups.
366 	 */
367 	RelFileNode target_node;
368 	ItemPointerData target_tid;
369 } xl_heap_new_cid;
370 
371 #define SizeOfHeapNewCid (offsetof(xl_heap_new_cid, target_tid) + sizeof(ItemPointerData))
372 
373 /* logical rewrite xlog record header */
374 typedef struct xl_heap_rewrite_mapping
375 {
376 	TransactionId mapped_xid;	/* xid that might need to see the row */
377 	Oid			mapped_db;		/* DbOid or InvalidOid for shared rels */
378 	Oid			mapped_rel;		/* Oid of the mapped relation */
379 	off_t		offset;			/* How far have we written so far */
380 	uint32		num_mappings;	/* Number of in-memory mappings */
381 	XLogRecPtr	start_lsn;		/* Insert LSN at begin of rewrite */
382 } xl_heap_rewrite_mapping;
383 
384 extern void HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple,
385 												   TransactionId *latestRemovedXid);
386 
387 extern void heap_redo(XLogReaderState *record);
388 extern void heap_desc(StringInfo buf, XLogReaderState *record);
389 extern const char *heap_identify(uint8 info);
390 extern void heap_mask(char *pagedata, BlockNumber blkno);
391 extern void heap2_redo(XLogReaderState *record);
392 extern void heap2_desc(StringInfo buf, XLogReaderState *record);
393 extern const char *heap2_identify(uint8 info);
394 extern void heap_xlog_logical_rewrite(XLogReaderState *r);
395 
396 extern XLogRecPtr log_heap_cleanup_info(RelFileNode rnode,
397 										TransactionId latestRemovedXid);
398 extern XLogRecPtr log_heap_clean(Relation reln, Buffer buffer,
399 								 OffsetNumber *redirected, int nredirected,
400 								 OffsetNumber *nowdead, int ndead,
401 								 OffsetNumber *nowunused, int nunused,
402 								 TransactionId latestRemovedXid);
403 extern XLogRecPtr log_heap_freeze(Relation reln, Buffer buffer,
404 								  TransactionId cutoff_xid, xl_heap_freeze_tuple *tuples,
405 								  int ntuples);
406 extern bool heap_prepare_freeze_tuple(HeapTupleHeader tuple,
407 									  TransactionId relfrozenxid,
408 									  TransactionId relminmxid,
409 									  TransactionId cutoff_xid,
410 									  TransactionId cutoff_multi,
411 									  xl_heap_freeze_tuple *frz,
412 									  bool *totally_frozen);
413 extern void heap_execute_freeze_tuple(HeapTupleHeader tuple,
414 									  xl_heap_freeze_tuple *xlrec_tp);
415 extern XLogRecPtr log_heap_visible(RelFileNode rnode, Buffer heap_buffer,
416 								   Buffer vm_buffer, TransactionId cutoff_xid, uint8 flags);
417 
418 #endif							/* HEAPAM_XLOG_H */
419