1 /*-------------------------------------------------------------------------
2  *
3  * htup_details.h
4  *	  POSTGRES heap tuple header definitions.
5  *
6  *
7  * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group
8  * Portions Copyright (c) 1994, Regents of the University of California
9  *
10  * src/include/access/htup_details.h
11  *
12  *-------------------------------------------------------------------------
13  */
14 #ifndef HTUP_DETAILS_H
15 #define HTUP_DETAILS_H
16 
17 #include "access/htup.h"
18 #include "access/tupdesc.h"
19 #include "access/tupmacs.h"
20 #include "access/transam.h"
21 #include "storage/bufpage.h"
22 
23 /*
24  * MaxTupleAttributeNumber limits the number of (user) columns in a tuple.
25  * The key limit on this value is that the size of the fixed overhead for
26  * a tuple, plus the size of the null-values bitmap (at 1 bit per column),
27  * plus MAXALIGN alignment, must fit into t_hoff which is uint8.  On most
28  * machines the upper limit without making t_hoff wider would be a little
29  * over 1700.  We use round numbers here and for MaxHeapAttributeNumber
30  * so that alterations in HeapTupleHeaderData layout won't change the
31  * supported max number of columns.
32  */
33 #define MaxTupleAttributeNumber 1664	/* 8 * 208 */
34 
35 /*
36  * MaxHeapAttributeNumber limits the number of (user) columns in a table.
37  * This should be somewhat less than MaxTupleAttributeNumber.  It must be
38  * at least one less, else we will fail to do UPDATEs on a maximal-width
39  * table (because UPDATE has to form working tuples that include CTID).
40  * In practice we want some additional daylight so that we can gracefully
41  * support operations that add hidden "resjunk" columns, for example
42  * SELECT * FROM wide_table ORDER BY foo, bar, baz.
43  * In any case, depending on column data types you will likely be running
44  * into the disk-block-based limit on overall tuple size if you have more
45  * than a thousand or so columns.  TOAST won't help.
46  */
47 #define MaxHeapAttributeNumber	1600	/* 8 * 200 */
48 
49 /*
50  * Heap tuple header.  To avoid wasting space, the fields should be
51  * laid out in such a way as to avoid structure padding.
52  *
53  * Datums of composite types (row types) share the same general structure
54  * as on-disk tuples, so that the same routines can be used to build and
55  * examine them.  However the requirements are slightly different: a Datum
56  * does not need any transaction visibility information, and it does need
57  * a length word and some embedded type information.  We can achieve this
58  * by overlaying the xmin/cmin/xmax/cmax/xvac fields of a heap tuple
59  * with the fields needed in the Datum case.  Typically, all tuples built
60  * in-memory will be initialized with the Datum fields; but when a tuple is
61  * about to be inserted in a table, the transaction fields will be filled,
62  * overwriting the datum fields.
63  *
64  * The overall structure of a heap tuple looks like:
65  *			fixed fields (HeapTupleHeaderData struct)
66  *			nulls bitmap (if HEAP_HASNULL is set in t_infomask)
67  *			alignment padding (as needed to make user data MAXALIGN'd)
68  *			object ID (if HEAP_HASOID is set in t_infomask)
69  *			user data fields
70  *
71  * We store five "virtual" fields Xmin, Cmin, Xmax, Cmax, and Xvac in three
72  * physical fields.  Xmin and Xmax are always really stored, but Cmin, Cmax
73  * and Xvac share a field.  This works because we know that Cmin and Cmax
74  * are only interesting for the lifetime of the inserting and deleting
75  * transaction respectively.  If a tuple is inserted and deleted in the same
76  * transaction, we store a "combo" command id that can be mapped to the real
77  * cmin and cmax, but only by use of local state within the originating
78  * backend.  See combocid.c for more details.  Meanwhile, Xvac is only set by
79  * old-style VACUUM FULL, which does not have any command sub-structure and so
80  * does not need either Cmin or Cmax.  (This requires that old-style VACUUM
81  * FULL never try to move a tuple whose Cmin or Cmax is still interesting,
82  * ie, an insert-in-progress or delete-in-progress tuple.)
83  *
84  * A word about t_ctid: whenever a new tuple is stored on disk, its t_ctid
85  * is initialized with its own TID (location).  If the tuple is ever updated,
86  * its t_ctid is changed to point to the replacement version of the tuple.  Or
87  * if the tuple is moved from one partition to another, due to an update of
88  * the partition key, t_ctid is set to a special value to indicate that
89  * (see ItemPointerSetMovedPartitions).  Thus, a tuple is the latest version
90  * of its row iff XMAX is invalid or
91  * t_ctid points to itself (in which case, if XMAX is valid, the tuple is
92  * either locked or deleted).  One can follow the chain of t_ctid links
93  * to find the newest version of the row, unless it was moved to a different
94  * partition.  Beware however that VACUUM might
95  * erase the pointed-to (newer) tuple before erasing the pointing (older)
96  * tuple.  Hence, when following a t_ctid link, it is necessary to check
97  * to see if the referenced slot is empty or contains an unrelated tuple.
98  * Check that the referenced tuple has XMIN equal to the referencing tuple's
99  * XMAX to verify that it is actually the descendant version and not an
100  * unrelated tuple stored into a slot recently freed by VACUUM.  If either
101  * check fails, one may assume that there is no live descendant version.
102  *
103  * t_ctid is sometimes used to store a speculative insertion token, instead
104  * of a real TID.  A speculative token is set on a tuple that's being
105  * inserted, until the inserter is sure that it wants to go ahead with the
106  * insertion.  Hence a token should only be seen on a tuple with an XMAX
107  * that's still in-progress, or invalid/aborted.  The token is replaced with
108  * the tuple's real TID when the insertion is confirmed.  One should never
109  * see a speculative insertion token while following a chain of t_ctid links,
110  * because they are not used on updates, only insertions.
111  *
112  * Following the fixed header fields, the nulls bitmap is stored (beginning
113  * at t_bits).  The bitmap is *not* stored if t_infomask shows that there
114  * are no nulls in the tuple.  If an OID field is present (as indicated by
115  * t_infomask), then it is stored just before the user data, which begins at
116  * the offset shown by t_hoff.  Note that t_hoff must be a multiple of
117  * MAXALIGN.
118  */
119 
120 typedef struct HeapTupleFields
121 {
122 	TransactionId t_xmin;		/* inserting xact ID */
123 	TransactionId t_xmax;		/* deleting or locking xact ID */
124 
125 	union
126 	{
127 		CommandId	t_cid;		/* inserting or deleting command ID, or both */
128 		TransactionId t_xvac;	/* old-style VACUUM FULL xact ID */
129 	}			t_field3;
130 } HeapTupleFields;
131 
132 typedef struct DatumTupleFields
133 {
134 	int32		datum_len_;		/* varlena header (do not touch directly!) */
135 
136 	int32		datum_typmod;	/* -1, or identifier of a record type */
137 
138 	Oid			datum_typeid;	/* composite type OID, or RECORDOID */
139 
140 	/*
141 	 * datum_typeid cannot be a domain over composite, only plain composite,
142 	 * even if the datum is meant as a value of a domain-over-composite type.
143 	 * This is in line with the general principle that CoerceToDomain does not
144 	 * change the physical representation of the base type value.
145 	 *
146 	 * Note: field ordering is chosen with thought that Oid might someday
147 	 * widen to 64 bits.
148 	 */
149 } DatumTupleFields;
150 
151 struct HeapTupleHeaderData
152 {
153 	union
154 	{
155 		HeapTupleFields t_heap;
156 		DatumTupleFields t_datum;
157 	}			t_choice;
158 
159 	ItemPointerData t_ctid;		/* current TID of this or newer tuple (or a
160 								 * speculative insertion token) */
161 
162 	/* Fields below here must match MinimalTupleData! */
163 
164 #define FIELDNO_HEAPTUPLEHEADERDATA_INFOMASK2 2
165 	uint16		t_infomask2;	/* number of attributes + various flags */
166 
167 #define FIELDNO_HEAPTUPLEHEADERDATA_INFOMASK 3
168 	uint16		t_infomask;		/* various flag bits, see below */
169 
170 #define FIELDNO_HEAPTUPLEHEADERDATA_HOFF 4
171 	uint8		t_hoff;			/* sizeof header incl. bitmap, padding */
172 
173 	/* ^ - 23 bytes - ^ */
174 
175 #define FIELDNO_HEAPTUPLEHEADERDATA_BITS 5
176 	bits8		t_bits[FLEXIBLE_ARRAY_MEMBER];	/* bitmap of NULLs */
177 
178 	/* MORE DATA FOLLOWS AT END OF STRUCT */
179 };
180 
181 /* typedef appears in htup.h */
182 
183 #define SizeofHeapTupleHeader offsetof(HeapTupleHeaderData, t_bits)
184 
185 /*
186  * information stored in t_infomask:
187  */
188 #define HEAP_HASNULL			0x0001	/* has null attribute(s) */
189 #define HEAP_HASVARWIDTH		0x0002	/* has variable-width attribute(s) */
190 #define HEAP_HASEXTERNAL		0x0004	/* has external stored attribute(s) */
191 #define HEAP_HASOID				0x0008	/* has an object-id field */
192 #define HEAP_XMAX_KEYSHR_LOCK	0x0010	/* xmax is a key-shared locker */
193 #define HEAP_COMBOCID			0x0020	/* t_cid is a combo cid */
194 #define HEAP_XMAX_EXCL_LOCK		0x0040	/* xmax is exclusive locker */
195 #define HEAP_XMAX_LOCK_ONLY		0x0080	/* xmax, if valid, is only a locker */
196 
197  /* xmax is a shared locker */
198 #define HEAP_XMAX_SHR_LOCK	(HEAP_XMAX_EXCL_LOCK | HEAP_XMAX_KEYSHR_LOCK)
199 
200 #define HEAP_LOCK_MASK	(HEAP_XMAX_SHR_LOCK | HEAP_XMAX_EXCL_LOCK | \
201 						 HEAP_XMAX_KEYSHR_LOCK)
202 #define HEAP_XMIN_COMMITTED		0x0100	/* t_xmin committed */
203 #define HEAP_XMIN_INVALID		0x0200	/* t_xmin invalid/aborted */
204 #define HEAP_XMIN_FROZEN		(HEAP_XMIN_COMMITTED|HEAP_XMIN_INVALID)
205 #define HEAP_XMAX_COMMITTED		0x0400	/* t_xmax committed */
206 #define HEAP_XMAX_INVALID		0x0800	/* t_xmax invalid/aborted */
207 #define HEAP_XMAX_IS_MULTI		0x1000	/* t_xmax is a MultiXactId */
208 #define HEAP_UPDATED			0x2000	/* this is UPDATEd version of row */
209 #define HEAP_MOVED_OFF			0x4000	/* moved to another place by pre-9.0
210 										 * VACUUM FULL; kept for binary
211 										 * upgrade support */
212 #define HEAP_MOVED_IN			0x8000	/* moved from another place by pre-9.0
213 										 * VACUUM FULL; kept for binary
214 										 * upgrade support */
215 #define HEAP_MOVED (HEAP_MOVED_OFF | HEAP_MOVED_IN)
216 
217 #define HEAP_XACT_MASK			0xFFF0	/* visibility-related bits */
218 
219 /*
220  * A tuple is only locked (i.e. not updated by its Xmax) if the
221  * HEAP_XMAX_LOCK_ONLY bit is set; or, for pg_upgrade's sake, if the Xmax is
222  * not a multi and the EXCL_LOCK bit is set.
223  *
224  * See also HeapTupleHeaderIsOnlyLocked, which also checks for a possible
225  * aborted updater transaction.
226  *
227  * Beware of multiple evaluations of the argument.
228  */
229 #define HEAP_XMAX_IS_LOCKED_ONLY(infomask) \
230 	(((infomask) & HEAP_XMAX_LOCK_ONLY) || \
231 	 (((infomask) & (HEAP_XMAX_IS_MULTI | HEAP_LOCK_MASK)) == HEAP_XMAX_EXCL_LOCK))
232 
233 /*
234  * A tuple that has HEAP_XMAX_IS_MULTI and HEAP_XMAX_LOCK_ONLY but neither of
235  * XMAX_EXCL_LOCK and XMAX_KEYSHR_LOCK must come from a tuple that was
236  * share-locked in 9.2 or earlier and then pg_upgrade'd.
237  *
238  * In 9.2 and prior, HEAP_XMAX_IS_MULTI was only set when there were multiple
239  * FOR SHARE lockers of that tuple.  That set HEAP_XMAX_LOCK_ONLY (with a
240  * different name back then) but neither of HEAP_XMAX_EXCL_LOCK and
241  * HEAP_XMAX_KEYSHR_LOCK.  That combination is no longer possible in 9.3 and
242  * up, so if we see that combination we know for certain that the tuple was
243  * locked in an earlier release; since all such lockers are gone (they cannot
244  * survive through pg_upgrade), such tuples can safely be considered not
245  * locked.
246  *
247  * We must not resolve such multixacts locally, because the result would be
248  * bogus, regardless of where they stand with respect to the current valid
249  * multixact range.
250  */
251 #define HEAP_LOCKED_UPGRADED(infomask) \
252 ( \
253 	 ((infomask) & HEAP_XMAX_IS_MULTI) != 0 && \
254 	 ((infomask) & HEAP_XMAX_LOCK_ONLY) != 0 && \
255 	 (((infomask) & (HEAP_XMAX_EXCL_LOCK | HEAP_XMAX_KEYSHR_LOCK)) == 0) \
256 )
257 
258 /*
259  * Use these to test whether a particular lock is applied to a tuple
260  */
261 #define HEAP_XMAX_IS_SHR_LOCKED(infomask) \
262 	(((infomask) & HEAP_LOCK_MASK) == HEAP_XMAX_SHR_LOCK)
263 #define HEAP_XMAX_IS_EXCL_LOCKED(infomask) \
264 	(((infomask) & HEAP_LOCK_MASK) == HEAP_XMAX_EXCL_LOCK)
265 #define HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) \
266 	(((infomask) & HEAP_LOCK_MASK) == HEAP_XMAX_KEYSHR_LOCK)
267 
268 /* turn these all off when Xmax is to change */
269 #define HEAP_XMAX_BITS (HEAP_XMAX_COMMITTED | HEAP_XMAX_INVALID | \
270 						HEAP_XMAX_IS_MULTI | HEAP_LOCK_MASK | HEAP_XMAX_LOCK_ONLY)
271 
272 /*
273  * information stored in t_infomask2:
274  */
275 #define HEAP_NATTS_MASK			0x07FF	/* 11 bits for number of attributes */
276 /* bits 0x1800 are available */
277 #define HEAP_KEYS_UPDATED		0x2000	/* tuple was updated and key cols
278 										 * modified, or tuple deleted */
279 #define HEAP_HOT_UPDATED		0x4000	/* tuple was HOT-updated */
280 #define HEAP_ONLY_TUPLE			0x8000	/* this is heap-only tuple */
281 
282 #define HEAP2_XACT_MASK			0xE000	/* visibility-related bits */
283 
284 /*
285  * HEAP_TUPLE_HAS_MATCH is a temporary flag used during hash joins.  It is
286  * only used in tuples that are in the hash table, and those don't need
287  * any visibility information, so we can overlay it on a visibility flag
288  * instead of using up a dedicated bit.
289  */
290 #define HEAP_TUPLE_HAS_MATCH	HEAP_ONLY_TUPLE /* tuple has a join match */
291 
292 /*
293  * HeapTupleHeader accessor macros
294  *
295  * Note: beware of multiple evaluations of "tup" argument.  But the Set
296  * macros evaluate their other argument only once.
297  */
298 
299 /*
300  * HeapTupleHeaderGetRawXmin returns the "raw" xmin field, which is the xid
301  * originally used to insert the tuple.  However, the tuple might actually
302  * be frozen (via HeapTupleHeaderSetXminFrozen) in which case the tuple's xmin
303  * is visible to every snapshot.  Prior to PostgreSQL 9.4, we actually changed
304  * the xmin to FrozenTransactionId, and that value may still be encountered
305  * on disk.
306  */
307 #define HeapTupleHeaderGetRawXmin(tup) \
308 ( \
309 	(tup)->t_choice.t_heap.t_xmin \
310 )
311 
312 #define HeapTupleHeaderGetXmin(tup) \
313 ( \
314 	HeapTupleHeaderXminFrozen(tup) ? \
315 		FrozenTransactionId : HeapTupleHeaderGetRawXmin(tup) \
316 )
317 
318 #define HeapTupleHeaderSetXmin(tup, xid) \
319 ( \
320 	(tup)->t_choice.t_heap.t_xmin = (xid) \
321 )
322 
323 #define HeapTupleHeaderXminCommitted(tup) \
324 ( \
325 	((tup)->t_infomask & HEAP_XMIN_COMMITTED) != 0 \
326 )
327 
328 #define HeapTupleHeaderXminInvalid(tup) \
329 ( \
330 	((tup)->t_infomask & (HEAP_XMIN_COMMITTED|HEAP_XMIN_INVALID)) == \
331 		HEAP_XMIN_INVALID \
332 )
333 
334 #define HeapTupleHeaderXminFrozen(tup) \
335 ( \
336 	((tup)->t_infomask & (HEAP_XMIN_FROZEN)) == HEAP_XMIN_FROZEN \
337 )
338 
339 #define HeapTupleHeaderSetXminCommitted(tup) \
340 ( \
341 	AssertMacro(!HeapTupleHeaderXminInvalid(tup)), \
342 	((tup)->t_infomask |= HEAP_XMIN_COMMITTED) \
343 )
344 
345 #define HeapTupleHeaderSetXminInvalid(tup) \
346 ( \
347 	AssertMacro(!HeapTupleHeaderXminCommitted(tup)), \
348 	((tup)->t_infomask |= HEAP_XMIN_INVALID) \
349 )
350 
351 #define HeapTupleHeaderSetXminFrozen(tup) \
352 ( \
353 	AssertMacro(!HeapTupleHeaderXminInvalid(tup)), \
354 	((tup)->t_infomask |= HEAP_XMIN_FROZEN) \
355 )
356 
357 /*
358  * HeapTupleHeaderGetRawXmax gets you the raw Xmax field.  To find out the Xid
359  * that updated a tuple, you might need to resolve the MultiXactId if certain
360  * bits are set.  HeapTupleHeaderGetUpdateXid checks those bits and takes care
361  * to resolve the MultiXactId if necessary.  This might involve multixact I/O,
362  * so it should only be used if absolutely necessary.
363  */
364 #define HeapTupleHeaderGetUpdateXid(tup) \
365 ( \
366 	(!((tup)->t_infomask & HEAP_XMAX_INVALID) && \
367 	 ((tup)->t_infomask & HEAP_XMAX_IS_MULTI) && \
368 	 !((tup)->t_infomask & HEAP_XMAX_LOCK_ONLY)) ? \
369 		HeapTupleGetUpdateXid(tup) \
370 	: \
371 		HeapTupleHeaderGetRawXmax(tup) \
372 )
373 
374 #define HeapTupleHeaderGetRawXmax(tup) \
375 ( \
376 	(tup)->t_choice.t_heap.t_xmax \
377 )
378 
379 #define HeapTupleHeaderSetXmax(tup, xid) \
380 ( \
381 	(tup)->t_choice.t_heap.t_xmax = (xid) \
382 )
383 
384 /*
385  * HeapTupleHeaderGetRawCommandId will give you what's in the header whether
386  * it is useful or not.  Most code should use HeapTupleHeaderGetCmin or
387  * HeapTupleHeaderGetCmax instead, but note that those Assert that you can
388  * get a legitimate result, ie you are in the originating transaction!
389  */
390 #define HeapTupleHeaderGetRawCommandId(tup) \
391 ( \
392 	(tup)->t_choice.t_heap.t_field3.t_cid \
393 )
394 
395 /* SetCmin is reasonably simple since we never need a combo CID */
396 #define HeapTupleHeaderSetCmin(tup, cid) \
397 do { \
398 	Assert(!((tup)->t_infomask & HEAP_MOVED)); \
399 	(tup)->t_choice.t_heap.t_field3.t_cid = (cid); \
400 	(tup)->t_infomask &= ~HEAP_COMBOCID; \
401 } while (0)
402 
403 /* SetCmax must be used after HeapTupleHeaderAdjustCmax; see combocid.c */
404 #define HeapTupleHeaderSetCmax(tup, cid, iscombo) \
405 do { \
406 	Assert(!((tup)->t_infomask & HEAP_MOVED)); \
407 	(tup)->t_choice.t_heap.t_field3.t_cid = (cid); \
408 	if (iscombo) \
409 		(tup)->t_infomask |= HEAP_COMBOCID; \
410 	else \
411 		(tup)->t_infomask &= ~HEAP_COMBOCID; \
412 } while (0)
413 
414 #define HeapTupleHeaderGetXvac(tup) \
415 ( \
416 	((tup)->t_infomask & HEAP_MOVED) ? \
417 		(tup)->t_choice.t_heap.t_field3.t_xvac \
418 	: \
419 		InvalidTransactionId \
420 )
421 
422 #define HeapTupleHeaderSetXvac(tup, xid) \
423 do { \
424 	Assert((tup)->t_infomask & HEAP_MOVED); \
425 	(tup)->t_choice.t_heap.t_field3.t_xvac = (xid); \
426 } while (0)
427 
428 #define HeapTupleHeaderIsSpeculative(tup) \
429 ( \
430 	(ItemPointerGetOffsetNumberNoCheck(&(tup)->t_ctid) == SpecTokenOffsetNumber) \
431 )
432 
433 #define HeapTupleHeaderGetSpeculativeToken(tup) \
434 ( \
435 	AssertMacro(HeapTupleHeaderIsSpeculative(tup)), \
436 	ItemPointerGetBlockNumber(&(tup)->t_ctid) \
437 )
438 
439 #define HeapTupleHeaderSetSpeculativeToken(tup, token)	\
440 ( \
441 	ItemPointerSet(&(tup)->t_ctid, token, SpecTokenOffsetNumber) \
442 )
443 
444 #define HeapTupleHeaderIndicatesMovedPartitions(tup) \
445 	(ItemPointerGetOffsetNumber(&(tup)->t_ctid) == MovedPartitionsOffsetNumber && \
446 	 ItemPointerGetBlockNumberNoCheck(&(tup)->t_ctid) == MovedPartitionsBlockNumber)
447 
448 #define HeapTupleHeaderSetMovedPartitions(tup) \
449 	ItemPointerSet(&(tup)->t_ctid, MovedPartitionsBlockNumber, MovedPartitionsOffsetNumber)
450 
451 #define HeapTupleHeaderGetDatumLength(tup) \
452 	VARSIZE(tup)
453 
454 #define HeapTupleHeaderSetDatumLength(tup, len) \
455 	SET_VARSIZE(tup, len)
456 
457 #define HeapTupleHeaderGetTypeId(tup) \
458 ( \
459 	(tup)->t_choice.t_datum.datum_typeid \
460 )
461 
462 #define HeapTupleHeaderSetTypeId(tup, typeid) \
463 ( \
464 	(tup)->t_choice.t_datum.datum_typeid = (typeid) \
465 )
466 
467 #define HeapTupleHeaderGetTypMod(tup) \
468 ( \
469 	(tup)->t_choice.t_datum.datum_typmod \
470 )
471 
472 #define HeapTupleHeaderSetTypMod(tup, typmod) \
473 ( \
474 	(tup)->t_choice.t_datum.datum_typmod = (typmod) \
475 )
476 
477 #define HeapTupleHeaderGetOid(tup) \
478 ( \
479 	((tup)->t_infomask & HEAP_HASOID) ? \
480 		*((Oid *) ((char *)(tup) + (tup)->t_hoff - sizeof(Oid))) \
481 	: \
482 		InvalidOid \
483 )
484 
485 #define HeapTupleHeaderSetOid(tup, oid) \
486 do { \
487 	Assert((tup)->t_infomask & HEAP_HASOID); \
488 	*((Oid *) ((char *)(tup) + (tup)->t_hoff - sizeof(Oid))) = (oid); \
489 } while (0)
490 
491 /*
492  * Note that we stop considering a tuple HOT-updated as soon as it is known
493  * aborted or the would-be updating transaction is known aborted.  For best
494  * efficiency, check tuple visibility before using this macro, so that the
495  * INVALID bits will be as up to date as possible.
496  */
497 #define HeapTupleHeaderIsHotUpdated(tup) \
498 ( \
499 	((tup)->t_infomask2 & HEAP_HOT_UPDATED) != 0 && \
500 	((tup)->t_infomask & HEAP_XMAX_INVALID) == 0 && \
501 	!HeapTupleHeaderXminInvalid(tup) \
502 )
503 
504 #define HeapTupleHeaderSetHotUpdated(tup) \
505 ( \
506 	(tup)->t_infomask2 |= HEAP_HOT_UPDATED \
507 )
508 
509 #define HeapTupleHeaderClearHotUpdated(tup) \
510 ( \
511 	(tup)->t_infomask2 &= ~HEAP_HOT_UPDATED \
512 )
513 
514 #define HeapTupleHeaderIsHeapOnly(tup) \
515 ( \
516   ((tup)->t_infomask2 & HEAP_ONLY_TUPLE) != 0 \
517 )
518 
519 #define HeapTupleHeaderSetHeapOnly(tup) \
520 ( \
521   (tup)->t_infomask2 |= HEAP_ONLY_TUPLE \
522 )
523 
524 #define HeapTupleHeaderClearHeapOnly(tup) \
525 ( \
526   (tup)->t_infomask2 &= ~HEAP_ONLY_TUPLE \
527 )
528 
529 #define HeapTupleHeaderHasMatch(tup) \
530 ( \
531   ((tup)->t_infomask2 & HEAP_TUPLE_HAS_MATCH) != 0 \
532 )
533 
534 #define HeapTupleHeaderSetMatch(tup) \
535 ( \
536   (tup)->t_infomask2 |= HEAP_TUPLE_HAS_MATCH \
537 )
538 
539 #define HeapTupleHeaderClearMatch(tup) \
540 ( \
541   (tup)->t_infomask2 &= ~HEAP_TUPLE_HAS_MATCH \
542 )
543 
544 #define HeapTupleHeaderGetNatts(tup) \
545 	((tup)->t_infomask2 & HEAP_NATTS_MASK)
546 
547 #define HeapTupleHeaderSetNatts(tup, natts) \
548 ( \
549 	(tup)->t_infomask2 = ((tup)->t_infomask2 & ~HEAP_NATTS_MASK) | (natts) \
550 )
551 
552 #define HeapTupleHeaderHasExternal(tup) \
553 		(((tup)->t_infomask & HEAP_HASEXTERNAL) != 0)
554 
555 
556 /*
557  * BITMAPLEN(NATTS) -
558  *		Computes size of null bitmap given number of data columns.
559  */
560 #define BITMAPLEN(NATTS)	(((int)(NATTS) + 7) / 8)
561 
562 /*
563  * MaxHeapTupleSize is the maximum allowed size of a heap tuple, including
564  * header and MAXALIGN alignment padding.  Basically it's BLCKSZ minus the
565  * other stuff that has to be on a disk page.  Since heap pages use no
566  * "special space", there's no deduction for that.
567  *
568  * NOTE: we allow for the ItemId that must point to the tuple, ensuring that
569  * an otherwise-empty page can indeed hold a tuple of this size.  Because
570  * ItemIds and tuples have different alignment requirements, don't assume that
571  * you can, say, fit 2 tuples of size MaxHeapTupleSize/2 on the same page.
572  */
573 #define MaxHeapTupleSize  (BLCKSZ - MAXALIGN(SizeOfPageHeaderData + sizeof(ItemIdData)))
574 #define MinHeapTupleSize  MAXALIGN(SizeofHeapTupleHeader)
575 
576 /*
577  * MaxHeapTuplesPerPage is an upper bound on the number of tuples that can
578  * fit on one heap page.  (Note that indexes could have more, because they
579  * use a smaller tuple header.)  We arrive at the divisor because each tuple
580  * must be maxaligned, and it must have an associated item pointer.
581  *
582  * Note: with HOT, there could theoretically be more line pointers (not actual
583  * tuples) than this on a heap page.  However we constrain the number of line
584  * pointers to this anyway, to avoid excessive line-pointer bloat and not
585  * require increases in the size of work arrays.
586  */
587 #define MaxHeapTuplesPerPage	\
588 	((int) ((BLCKSZ - SizeOfPageHeaderData) / \
589 			(MAXALIGN(SizeofHeapTupleHeader) + sizeof(ItemIdData))))
590 
591 /*
592  * MaxAttrSize is a somewhat arbitrary upper limit on the declared size of
593  * data fields of char(n) and similar types.  It need not have anything
594  * directly to do with the *actual* upper limit of varlena values, which
595  * is currently 1Gb (see TOAST structures in postgres.h).  I've set it
596  * at 10Mb which seems like a reasonable number --- tgl 8/6/00.
597  */
598 #define MaxAttrSize		(10 * 1024 * 1024)
599 
600 
601 /*
602  * MinimalTuple is an alternative representation that is used for transient
603  * tuples inside the executor, in places where transaction status information
604  * is not required, the tuple rowtype is known, and shaving off a few bytes
605  * is worthwhile because we need to store many tuples.  The representation
606  * is chosen so that tuple access routines can work with either full or
607  * minimal tuples via a HeapTupleData pointer structure.  The access routines
608  * see no difference, except that they must not access the transaction status
609  * or t_ctid fields because those aren't there.
610  *
611  * For the most part, MinimalTuples should be accessed via TupleTableSlot
612  * routines.  These routines will prevent access to the "system columns"
613  * and thereby prevent accidental use of the nonexistent fields.
614  *
615  * MinimalTupleData contains a length word, some padding, and fields matching
616  * HeapTupleHeaderData beginning with t_infomask2. The padding is chosen so
617  * that offsetof(t_infomask2) is the same modulo MAXIMUM_ALIGNOF in both
618  * structs.   This makes data alignment rules equivalent in both cases.
619  *
620  * When a minimal tuple is accessed via a HeapTupleData pointer, t_data is
621  * set to point MINIMAL_TUPLE_OFFSET bytes before the actual start of the
622  * minimal tuple --- that is, where a full tuple matching the minimal tuple's
623  * data would start.  This trick is what makes the structs seem equivalent.
624  *
625  * Note that t_hoff is computed the same as in a full tuple, hence it includes
626  * the MINIMAL_TUPLE_OFFSET distance.  t_len does not include that, however.
627  *
628  * MINIMAL_TUPLE_DATA_OFFSET is the offset to the first useful (non-pad) data
629  * other than the length word.  tuplesort.c and tuplestore.c use this to avoid
630  * writing the padding to disk.
631  */
632 #define MINIMAL_TUPLE_OFFSET \
633 	((offsetof(HeapTupleHeaderData, t_infomask2) - sizeof(uint32)) / MAXIMUM_ALIGNOF * MAXIMUM_ALIGNOF)
634 #define MINIMAL_TUPLE_PADDING \
635 	((offsetof(HeapTupleHeaderData, t_infomask2) - sizeof(uint32)) % MAXIMUM_ALIGNOF)
636 #define MINIMAL_TUPLE_DATA_OFFSET \
637 	offsetof(MinimalTupleData, t_infomask2)
638 
639 struct MinimalTupleData
640 {
641 	uint32		t_len;			/* actual length of minimal tuple */
642 
643 	char		mt_padding[MINIMAL_TUPLE_PADDING];
644 
645 	/* Fields below here must match HeapTupleHeaderData! */
646 
647 	uint16		t_infomask2;	/* number of attributes + various flags */
648 
649 	uint16		t_infomask;		/* various flag bits, see below */
650 
651 	uint8		t_hoff;			/* sizeof header incl. bitmap, padding */
652 
653 	/* ^ - 23 bytes - ^ */
654 
655 	bits8		t_bits[FLEXIBLE_ARRAY_MEMBER];	/* bitmap of NULLs */
656 
657 	/* MORE DATA FOLLOWS AT END OF STRUCT */
658 };
659 
660 /* typedef appears in htup.h */
661 
662 #define SizeofMinimalTupleHeader offsetof(MinimalTupleData, t_bits)
663 
664 
665 /*
666  * GETSTRUCT - given a HeapTuple pointer, return address of the user data
667  */
668 #define GETSTRUCT(TUP) ((char *) ((TUP)->t_data) + (TUP)->t_data->t_hoff)
669 
670 /*
671  * Accessor macros to be used with HeapTuple pointers.
672  */
673 
674 #define HeapTupleHasNulls(tuple) \
675 		(((tuple)->t_data->t_infomask & HEAP_HASNULL) != 0)
676 
677 #define HeapTupleNoNulls(tuple) \
678 		(!((tuple)->t_data->t_infomask & HEAP_HASNULL))
679 
680 #define HeapTupleHasVarWidth(tuple) \
681 		(((tuple)->t_data->t_infomask & HEAP_HASVARWIDTH) != 0)
682 
683 #define HeapTupleAllFixed(tuple) \
684 		(!((tuple)->t_data->t_infomask & HEAP_HASVARWIDTH))
685 
686 #define HeapTupleHasExternal(tuple) \
687 		(((tuple)->t_data->t_infomask & HEAP_HASEXTERNAL) != 0)
688 
689 #define HeapTupleIsHotUpdated(tuple) \
690 		HeapTupleHeaderIsHotUpdated((tuple)->t_data)
691 
692 #define HeapTupleSetHotUpdated(tuple) \
693 		HeapTupleHeaderSetHotUpdated((tuple)->t_data)
694 
695 #define HeapTupleClearHotUpdated(tuple) \
696 		HeapTupleHeaderClearHotUpdated((tuple)->t_data)
697 
698 #define HeapTupleIsHeapOnly(tuple) \
699 		HeapTupleHeaderIsHeapOnly((tuple)->t_data)
700 
701 #define HeapTupleSetHeapOnly(tuple) \
702 		HeapTupleHeaderSetHeapOnly((tuple)->t_data)
703 
704 #define HeapTupleClearHeapOnly(tuple) \
705 		HeapTupleHeaderClearHeapOnly((tuple)->t_data)
706 
707 #define HeapTupleGetOid(tuple) \
708 		HeapTupleHeaderGetOid((tuple)->t_data)
709 
710 #define HeapTupleSetOid(tuple, oid) \
711 		HeapTupleHeaderSetOid((tuple)->t_data, (oid))
712 
713 
714 /* ----------------
715  *		fastgetattr
716  *
717  *		Fetch a user attribute's value as a Datum (might be either a
718  *		value, or a pointer into the data area of the tuple).
719  *
720  *		This must not be used when a system attribute might be requested.
721  *		Furthermore, the passed attnum MUST be valid.  Use heap_getattr()
722  *		instead, if in doubt.
723  *
724  *		This gets called many times, so we macro the cacheable and NULL
725  *		lookups, and call nocachegetattr() for the rest.
726  * ----------------
727  */
728 
729 #if !defined(DISABLE_COMPLEX_MACRO)
730 
731 #define fastgetattr(tup, attnum, tupleDesc, isnull)					\
732 (																	\
733 	AssertMacro((attnum) > 0),										\
734 	(*(isnull) = false),											\
735 	HeapTupleNoNulls(tup) ?											\
736 	(																\
737 		TupleDescAttr((tupleDesc), (attnum)-1)->attcacheoff >= 0 ?	\
738 		(															\
739 			fetchatt(TupleDescAttr((tupleDesc), (attnum)-1),		\
740 				(char *) (tup)->t_data + (tup)->t_data->t_hoff +	\
741 				TupleDescAttr((tupleDesc), (attnum)-1)->attcacheoff)\
742 		)															\
743 		:															\
744 			nocachegetattr((tup), (attnum), (tupleDesc))			\
745 	)																\
746 	:																\
747 	(																\
748 		att_isnull((attnum)-1, (tup)->t_data->t_bits) ?				\
749 		(															\
750 			(*(isnull) = true),										\
751 			(Datum)NULL												\
752 		)															\
753 		:															\
754 		(															\
755 			nocachegetattr((tup), (attnum), (tupleDesc))			\
756 		)															\
757 	)																\
758 )
759 #else							/* defined(DISABLE_COMPLEX_MACRO) */
760 
761 extern Datum fastgetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc,
762 			bool *isnull);
763 #endif							/* defined(DISABLE_COMPLEX_MACRO) */
764 
765 
766 /* ----------------
767  *		heap_getattr
768  *
769  *		Extract an attribute of a heap tuple and return it as a Datum.
770  *		This works for either system or user attributes.  The given attnum
771  *		is properly range-checked.
772  *
773  *		If the field in question has a NULL value, we return a zero Datum
774  *		and set *isnull == true.  Otherwise, we set *isnull == false.
775  *
776  *		<tup> is the pointer to the heap tuple.  <attnum> is the attribute
777  *		number of the column (field) caller wants.  <tupleDesc> is a
778  *		pointer to the structure describing the row and all its fields.
779  * ----------------
780  */
781 #define heap_getattr(tup, attnum, tupleDesc, isnull) \
782 	( \
783 		((attnum) > 0) ? \
784 		( \
785 			((attnum) > (int) HeapTupleHeaderGetNatts((tup)->t_data)) ? \
786 				getmissingattr((tupleDesc), (attnum), (isnull)) \
787 			: \
788 				fastgetattr((tup), (attnum), (tupleDesc), (isnull)) \
789 		) \
790 		: \
791 			heap_getsysattr((tup), (attnum), (tupleDesc), (isnull)) \
792 	)
793 
794 
795 /* prototypes for functions in common/heaptuple.c */
796 extern Size heap_compute_data_size(TupleDesc tupleDesc,
797 					   Datum *values, bool *isnull);
798 extern void heap_fill_tuple(TupleDesc tupleDesc,
799 				Datum *values, bool *isnull,
800 				char *data, Size data_size,
801 				uint16 *infomask, bits8 *bit);
802 extern bool heap_attisnull(HeapTuple tup, int attnum, TupleDesc tupleDesc);
803 extern Datum nocachegetattr(HeapTuple tup, int attnum,
804 			   TupleDesc att);
805 extern Datum heap_getsysattr(HeapTuple tup, int attnum, TupleDesc tupleDesc,
806 				bool *isnull);
807 extern Datum getmissingattr(TupleDesc tupleDesc,
808 			   int attnum, bool *isnull);
809 extern HeapTuple heap_copytuple(HeapTuple tuple);
810 extern void heap_copytuple_with_tuple(HeapTuple src, HeapTuple dest);
811 extern Datum heap_copy_tuple_as_datum(HeapTuple tuple, TupleDesc tupleDesc);
812 extern HeapTuple heap_form_tuple(TupleDesc tupleDescriptor,
813 				Datum *values, bool *isnull);
814 extern HeapTuple heap_modify_tuple(HeapTuple tuple,
815 				  TupleDesc tupleDesc,
816 				  Datum *replValues,
817 				  bool *replIsnull,
818 				  bool *doReplace);
819 extern HeapTuple heap_modify_tuple_by_cols(HeapTuple tuple,
820 						  TupleDesc tupleDesc,
821 						  int nCols,
822 						  int *replCols,
823 						  Datum *replValues,
824 						  bool *replIsnull);
825 extern void heap_deform_tuple(HeapTuple tuple, TupleDesc tupleDesc,
826 				  Datum *values, bool *isnull);
827 extern void heap_freetuple(HeapTuple htup);
828 extern MinimalTuple heap_form_minimal_tuple(TupleDesc tupleDescriptor,
829 						Datum *values, bool *isnull);
830 extern void heap_free_minimal_tuple(MinimalTuple mtup);
831 extern MinimalTuple heap_copy_minimal_tuple(MinimalTuple mtup);
832 extern HeapTuple heap_tuple_from_minimal_tuple(MinimalTuple mtup);
833 extern MinimalTuple minimal_tuple_from_heap_tuple(HeapTuple htup);
834 extern size_t varsize_any(void *p);
835 extern HeapTuple heap_expand_tuple(HeapTuple sourceTuple, TupleDesc tupleDesc);
836 extern MinimalTuple minimal_expand_tuple(HeapTuple sourceTuple, TupleDesc tupleDesc);
837 
838 #endif							/* HTUP_DETAILS_H */
839