1 /*-------------------------------------------------------------------------
2 *
3 * heaptuple.c
4 * This file contains heap tuple accessor and mutator routines, as well
5 * as various tuple utilities.
6 *
7 * Some notes about varlenas and this code:
8 *
9 * Before Postgres 8.3 varlenas always had a 4-byte length header, and
10 * therefore always needed 4-byte alignment (at least). This wasted space
11 * for short varlenas, for example CHAR(1) took 5 bytes and could need up to
12 * 3 additional padding bytes for alignment.
13 *
14 * Now, a short varlena (up to 126 data bytes) is reduced to a 1-byte header
15 * and we don't align it. To hide this from datatype-specific functions that
16 * don't want to deal with it, such a datum is considered "toasted" and will
17 * be expanded back to the normal 4-byte-header format by pg_detoast_datum.
18 * (In performance-critical code paths we can use pg_detoast_datum_packed
19 * and the appropriate access macros to avoid that overhead.) Note that this
20 * conversion is performed directly in heap_form_tuple, without invoking
21 * tuptoaster.c.
22 *
23 * This change will break any code that assumes it needn't detoast values
24 * that have been put into a tuple but never sent to disk. Hopefully there
25 * are few such places.
26 *
27 * Varlenas still have alignment 'i' (or 'd') in pg_type/pg_attribute, since
28 * that's the normal requirement for the untoasted format. But we ignore that
29 * for the 1-byte-header format. This means that the actual start position
30 * of a varlena datum may vary depending on which format it has. To determine
31 * what is stored, we have to require that alignment padding bytes be zero.
32 * (Postgres actually has always zeroed them, but now it's required!) Since
33 * the first byte of a 1-byte-header varlena can never be zero, we can examine
34 * the first byte after the previous datum to tell if it's a pad byte or the
35 * start of a 1-byte-header varlena.
36 *
37 * Note that while formerly we could rely on the first varlena column of a
38 * system catalog to be at the offset suggested by the C struct for the
39 * catalog, this is now risky: it's only safe if the preceding field is
40 * word-aligned, so that there will never be any padding.
41 *
42 * We don't pack varlenas whose attstorage is 'p', since the data type
43 * isn't expecting to have to detoast values. This is used in particular
44 * by oidvector and int2vector, which are used in the system catalogs
45 * and we'd like to still refer to them via C struct offsets.
46 *
47 *
48 * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
49 * Portions Copyright (c) 1994, Regents of the University of California
50 *
51 *
52 * IDENTIFICATION
53 * src/backend/access/common/heaptuple.c
54 *
55 *-------------------------------------------------------------------------
56 */
57
58 #include "postgres.h"
59
60 #include "access/sysattr.h"
61 #include "access/tuptoaster.h"
62 #include "executor/tuptable.h"
63 #include "utils/expandeddatum.h"
64
65
66 /* Does att's datatype allow packing into the 1-byte-header varlena format? */
67 #define ATT_IS_PACKABLE(att) \
68 ((att)->attlen == -1 && (att)->attstorage != 'p')
69 /* Use this if it's already known varlena */
70 #define VARLENA_ATT_IS_PACKABLE(att) \
71 ((att)->attstorage != 'p')
72
73
74 /* ----------------------------------------------------------------
75 * misc support routines
76 * ----------------------------------------------------------------
77 */
78
79
80 /*
81 * heap_compute_data_size
82 * Determine size of the data area of a tuple to be constructed
83 */
84 Size
heap_compute_data_size(TupleDesc tupleDesc,Datum * values,bool * isnull)85 heap_compute_data_size(TupleDesc tupleDesc,
86 Datum *values,
87 bool *isnull)
88 {
89 Size data_length = 0;
90 int i;
91 int numberOfAttributes = tupleDesc->natts;
92 Form_pg_attribute *att = tupleDesc->attrs;
93
94 for (i = 0; i < numberOfAttributes; i++)
95 {
96 Datum val;
97 Form_pg_attribute atti;
98
99 if (isnull[i])
100 continue;
101
102 val = values[i];
103 atti = att[i];
104
105 if (ATT_IS_PACKABLE(atti) &&
106 VARATT_CAN_MAKE_SHORT(DatumGetPointer(val)))
107 {
108 /*
109 * we're anticipating converting to a short varlena header, so
110 * adjust length and don't count any alignment
111 */
112 data_length += VARATT_CONVERTED_SHORT_SIZE(DatumGetPointer(val));
113 }
114 else if (atti->attlen == -1 &&
115 VARATT_IS_EXTERNAL_EXPANDED(DatumGetPointer(val)))
116 {
117 /*
118 * we want to flatten the expanded value so that the constructed
119 * tuple doesn't depend on it
120 */
121 data_length = att_align_nominal(data_length, atti->attalign);
122 data_length += EOH_get_flat_size(DatumGetEOHP(val));
123 }
124 else
125 {
126 data_length = att_align_datum(data_length, atti->attalign,
127 atti->attlen, val);
128 data_length = att_addlength_datum(data_length, atti->attlen,
129 val);
130 }
131 }
132
133 return data_length;
134 }
135
136 /*
137 * heap_fill_tuple
138 * Load data portion of a tuple from values/isnull arrays
139 *
140 * We also fill the null bitmap (if any) and set the infomask bits
141 * that reflect the tuple's data contents.
142 *
143 * NOTE: it is now REQUIRED that the caller have pre-zeroed the data area.
144 */
145 void
heap_fill_tuple(TupleDesc tupleDesc,Datum * values,bool * isnull,char * data,Size data_size,uint16 * infomask,bits8 * bit)146 heap_fill_tuple(TupleDesc tupleDesc,
147 Datum *values, bool *isnull,
148 char *data, Size data_size,
149 uint16 *infomask, bits8 *bit)
150 {
151 bits8 *bitP;
152 int bitmask;
153 int i;
154 int numberOfAttributes = tupleDesc->natts;
155 Form_pg_attribute *att = tupleDesc->attrs;
156
157 #ifdef USE_ASSERT_CHECKING
158 char *start = data;
159 #endif
160
161 if (bit != NULL)
162 {
163 bitP = &bit[-1];
164 bitmask = HIGHBIT;
165 }
166 else
167 {
168 /* just to keep compiler quiet */
169 bitP = NULL;
170 bitmask = 0;
171 }
172
173 *infomask &= ~(HEAP_HASNULL | HEAP_HASVARWIDTH | HEAP_HASEXTERNAL);
174
175 for (i = 0; i < numberOfAttributes; i++)
176 {
177 Size data_length;
178
179 if (bit != NULL)
180 {
181 if (bitmask != HIGHBIT)
182 bitmask <<= 1;
183 else
184 {
185 bitP += 1;
186 *bitP = 0x0;
187 bitmask = 1;
188 }
189
190 if (isnull[i])
191 {
192 *infomask |= HEAP_HASNULL;
193 continue;
194 }
195
196 *bitP |= bitmask;
197 }
198
199 /*
200 * XXX we use the att_align macros on the pointer value itself, not on
201 * an offset. This is a bit of a hack.
202 */
203
204 if (att[i]->attbyval)
205 {
206 /* pass-by-value */
207 data = (char *) att_align_nominal(data, att[i]->attalign);
208 store_att_byval(data, values[i], att[i]->attlen);
209 data_length = att[i]->attlen;
210 }
211 else if (att[i]->attlen == -1)
212 {
213 /* varlena */
214 Pointer val = DatumGetPointer(values[i]);
215
216 *infomask |= HEAP_HASVARWIDTH;
217 if (VARATT_IS_EXTERNAL(val))
218 {
219 if (VARATT_IS_EXTERNAL_EXPANDED(val))
220 {
221 /*
222 * we want to flatten the expanded value so that the
223 * constructed tuple doesn't depend on it
224 */
225 ExpandedObjectHeader *eoh = DatumGetEOHP(values[i]);
226
227 data = (char *) att_align_nominal(data,
228 att[i]->attalign);
229 data_length = EOH_get_flat_size(eoh);
230 EOH_flatten_into(eoh, data, data_length);
231 }
232 else
233 {
234 *infomask |= HEAP_HASEXTERNAL;
235 /* no alignment, since it's short by definition */
236 data_length = VARSIZE_EXTERNAL(val);
237 memcpy(data, val, data_length);
238 }
239 }
240 else if (VARATT_IS_SHORT(val))
241 {
242 /* no alignment for short varlenas */
243 data_length = VARSIZE_SHORT(val);
244 memcpy(data, val, data_length);
245 }
246 else if (VARLENA_ATT_IS_PACKABLE(att[i]) &&
247 VARATT_CAN_MAKE_SHORT(val))
248 {
249 /* convert to short varlena -- no alignment */
250 data_length = VARATT_CONVERTED_SHORT_SIZE(val);
251 SET_VARSIZE_SHORT(data, data_length);
252 memcpy(data + 1, VARDATA(val), data_length - 1);
253 }
254 else
255 {
256 /* full 4-byte header varlena */
257 data = (char *) att_align_nominal(data,
258 att[i]->attalign);
259 data_length = VARSIZE(val);
260 memcpy(data, val, data_length);
261 }
262 }
263 else if (att[i]->attlen == -2)
264 {
265 /* cstring ... never needs alignment */
266 *infomask |= HEAP_HASVARWIDTH;
267 Assert(att[i]->attalign == 'c');
268 data_length = strlen(DatumGetCString(values[i])) + 1;
269 memcpy(data, DatumGetPointer(values[i]), data_length);
270 }
271 else
272 {
273 /* fixed-length pass-by-reference */
274 data = (char *) att_align_nominal(data, att[i]->attalign);
275 Assert(att[i]->attlen > 0);
276 data_length = att[i]->attlen;
277 memcpy(data, DatumGetPointer(values[i]), data_length);
278 }
279
280 data += data_length;
281 }
282
283 Assert((data - start) == data_size);
284 }
285
286
287 /* ----------------------------------------------------------------
288 * heap tuple interface
289 * ----------------------------------------------------------------
290 */
291
292 /* ----------------
293 * heap_attisnull - returns TRUE iff tuple attribute is not present
294 * ----------------
295 */
296 bool
heap_attisnull(HeapTuple tup,int attnum)297 heap_attisnull(HeapTuple tup, int attnum)
298 {
299 if (attnum > (int) HeapTupleHeaderGetNatts(tup->t_data))
300 return true;
301
302 if (attnum > 0)
303 {
304 if (HeapTupleNoNulls(tup))
305 return false;
306 return att_isnull(attnum - 1, tup->t_data->t_bits);
307 }
308
309 switch (attnum)
310 {
311 case TableOidAttributeNumber:
312 case SelfItemPointerAttributeNumber:
313 case ObjectIdAttributeNumber:
314 case MinTransactionIdAttributeNumber:
315 case MinCommandIdAttributeNumber:
316 case MaxTransactionIdAttributeNumber:
317 case MaxCommandIdAttributeNumber:
318 /* these are never null */
319 break;
320
321 default:
322 elog(ERROR, "invalid attnum: %d", attnum);
323 }
324
325 return false;
326 }
327
328 /* ----------------
329 * nocachegetattr
330 *
331 * This only gets called from fastgetattr() macro, in cases where
332 * we can't use a cacheoffset and the value is not null.
333 *
334 * This caches attribute offsets in the attribute descriptor.
335 *
336 * An alternative way to speed things up would be to cache offsets
337 * with the tuple, but that seems more difficult unless you take
338 * the storage hit of actually putting those offsets into the
339 * tuple you send to disk. Yuck.
340 *
341 * This scheme will be slightly slower than that, but should
342 * perform well for queries which hit large #'s of tuples. After
343 * you cache the offsets once, examining all the other tuples using
344 * the same attribute descriptor will go much quicker. -cim 5/4/91
345 *
346 * NOTE: if you need to change this code, see also heap_deform_tuple.
347 * Also see nocache_index_getattr, which is the same code for index
348 * tuples.
349 * ----------------
350 */
351 Datum
nocachegetattr(HeapTuple tuple,int attnum,TupleDesc tupleDesc)352 nocachegetattr(HeapTuple tuple,
353 int attnum,
354 TupleDesc tupleDesc)
355 {
356 HeapTupleHeader tup = tuple->t_data;
357 Form_pg_attribute *att = tupleDesc->attrs;
358 char *tp; /* ptr to data part of tuple */
359 bits8 *bp = tup->t_bits; /* ptr to null bitmap in tuple */
360 bool slow = false; /* do we have to walk attrs? */
361 int off; /* current offset within data */
362
363 /* ----------------
364 * Three cases:
365 *
366 * 1: No nulls and no variable-width attributes.
367 * 2: Has a null or a var-width AFTER att.
368 * 3: Has nulls or var-widths BEFORE att.
369 * ----------------
370 */
371
372 attnum--;
373
374 if (!HeapTupleNoNulls(tuple))
375 {
376 /*
377 * there's a null somewhere in the tuple
378 *
379 * check to see if any preceding bits are null...
380 */
381 int byte = attnum >> 3;
382 int finalbit = attnum & 0x07;
383
384 /* check for nulls "before" final bit of last byte */
385 if ((~bp[byte]) & ((1 << finalbit) - 1))
386 slow = true;
387 else
388 {
389 /* check for nulls in any "earlier" bytes */
390 int i;
391
392 for (i = 0; i < byte; i++)
393 {
394 if (bp[i] != 0xFF)
395 {
396 slow = true;
397 break;
398 }
399 }
400 }
401 }
402
403 tp = (char *) tup + tup->t_hoff;
404
405 if (!slow)
406 {
407 /*
408 * If we get here, there are no nulls up to and including the target
409 * attribute. If we have a cached offset, we can use it.
410 */
411 if (att[attnum]->attcacheoff >= 0)
412 {
413 return fetchatt(att[attnum],
414 tp + att[attnum]->attcacheoff);
415 }
416
417 /*
418 * Otherwise, check for non-fixed-length attrs up to and including
419 * target. If there aren't any, it's safe to cheaply initialize the
420 * cached offsets for these attrs.
421 */
422 if (HeapTupleHasVarWidth(tuple))
423 {
424 int j;
425
426 for (j = 0; j <= attnum; j++)
427 {
428 if (att[j]->attlen <= 0)
429 {
430 slow = true;
431 break;
432 }
433 }
434 }
435 }
436
437 if (!slow)
438 {
439 int natts = tupleDesc->natts;
440 int j = 1;
441
442 /*
443 * If we get here, we have a tuple with no nulls or var-widths up to
444 * and including the target attribute, so we can use the cached offset
445 * ... only we don't have it yet, or we'd not have got here. Since
446 * it's cheap to compute offsets for fixed-width columns, we take the
447 * opportunity to initialize the cached offsets for *all* the leading
448 * fixed-width columns, in hope of avoiding future visits to this
449 * routine.
450 */
451 att[0]->attcacheoff = 0;
452
453 /* we might have set some offsets in the slow path previously */
454 while (j < natts && att[j]->attcacheoff > 0)
455 j++;
456
457 off = att[j - 1]->attcacheoff + att[j - 1]->attlen;
458
459 for (; j < natts; j++)
460 {
461 if (att[j]->attlen <= 0)
462 break;
463
464 off = att_align_nominal(off, att[j]->attalign);
465
466 att[j]->attcacheoff = off;
467
468 off += att[j]->attlen;
469 }
470
471 Assert(j > attnum);
472
473 off = att[attnum]->attcacheoff;
474 }
475 else
476 {
477 bool usecache = true;
478 int i;
479
480 /*
481 * Now we know that we have to walk the tuple CAREFULLY. But we still
482 * might be able to cache some offsets for next time.
483 *
484 * Note - This loop is a little tricky. For each non-null attribute,
485 * we have to first account for alignment padding before the attr,
486 * then advance over the attr based on its length. Nulls have no
487 * storage and no alignment padding either. We can use/set
488 * attcacheoff until we reach either a null or a var-width attribute.
489 */
490 off = 0;
491 for (i = 0;; i++) /* loop exit is at "break" */
492 {
493 if (HeapTupleHasNulls(tuple) && att_isnull(i, bp))
494 {
495 usecache = false;
496 continue; /* this cannot be the target att */
497 }
498
499 /* If we know the next offset, we can skip the rest */
500 if (usecache && att[i]->attcacheoff >= 0)
501 off = att[i]->attcacheoff;
502 else if (att[i]->attlen == -1)
503 {
504 /*
505 * We can only cache the offset for a varlena attribute if the
506 * offset is already suitably aligned, so that there would be
507 * no pad bytes in any case: then the offset will be valid for
508 * either an aligned or unaligned value.
509 */
510 if (usecache &&
511 off == att_align_nominal(off, att[i]->attalign))
512 att[i]->attcacheoff = off;
513 else
514 {
515 off = att_align_pointer(off, att[i]->attalign, -1,
516 tp + off);
517 usecache = false;
518 }
519 }
520 else
521 {
522 /* not varlena, so safe to use att_align_nominal */
523 off = att_align_nominal(off, att[i]->attalign);
524
525 if (usecache)
526 att[i]->attcacheoff = off;
527 }
528
529 if (i == attnum)
530 break;
531
532 off = att_addlength_pointer(off, att[i]->attlen, tp + off);
533
534 if (usecache && att[i]->attlen <= 0)
535 usecache = false;
536 }
537 }
538
539 return fetchatt(att[attnum], tp + off);
540 }
541
542 /* ----------------
543 * heap_getsysattr
544 *
545 * Fetch the value of a system attribute for a tuple.
546 *
547 * This is a support routine for the heap_getattr macro. The macro
548 * has already determined that the attnum refers to a system attribute.
549 * ----------------
550 */
551 Datum
heap_getsysattr(HeapTuple tup,int attnum,TupleDesc tupleDesc,bool * isnull)552 heap_getsysattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
553 {
554 Datum result;
555
556 Assert(tup);
557
558 /* Currently, no sys attribute ever reads as NULL. */
559 *isnull = false;
560
561 switch (attnum)
562 {
563 case SelfItemPointerAttributeNumber:
564 /* pass-by-reference datatype */
565 result = PointerGetDatum(&(tup->t_self));
566 break;
567 case ObjectIdAttributeNumber:
568 result = ObjectIdGetDatum(HeapTupleGetOid(tup));
569 break;
570 case MinTransactionIdAttributeNumber:
571 result = TransactionIdGetDatum(HeapTupleHeaderGetRawXmin(tup->t_data));
572 break;
573 case MaxTransactionIdAttributeNumber:
574 result = TransactionIdGetDatum(HeapTupleHeaderGetRawXmax(tup->t_data));
575 break;
576 case MinCommandIdAttributeNumber:
577 case MaxCommandIdAttributeNumber:
578
579 /*
580 * cmin and cmax are now both aliases for the same field, which
581 * can in fact also be a combo command id. XXX perhaps we should
582 * return the "real" cmin or cmax if possible, that is if we are
583 * inside the originating transaction?
584 */
585 result = CommandIdGetDatum(HeapTupleHeaderGetRawCommandId(tup->t_data));
586 break;
587 case TableOidAttributeNumber:
588 result = ObjectIdGetDatum(tup->t_tableOid);
589 break;
590 default:
591 elog(ERROR, "invalid attnum: %d", attnum);
592 result = 0; /* keep compiler quiet */
593 break;
594 }
595 return result;
596 }
597
598 /* ----------------
599 * heap_copytuple
600 *
601 * returns a copy of an entire tuple
602 *
603 * The HeapTuple struct, tuple header, and tuple data are all allocated
604 * as a single palloc() block.
605 * ----------------
606 */
607 HeapTuple
heap_copytuple(HeapTuple tuple)608 heap_copytuple(HeapTuple tuple)
609 {
610 HeapTuple newTuple;
611
612 if (!HeapTupleIsValid(tuple) || tuple->t_data == NULL)
613 return NULL;
614
615 newTuple = (HeapTuple) palloc(HEAPTUPLESIZE + tuple->t_len);
616 newTuple->t_len = tuple->t_len;
617 newTuple->t_self = tuple->t_self;
618 newTuple->t_tableOid = tuple->t_tableOid;
619 newTuple->t_data = (HeapTupleHeader) ((char *) newTuple + HEAPTUPLESIZE);
620 memcpy((char *) newTuple->t_data, (char *) tuple->t_data, tuple->t_len);
621 return newTuple;
622 }
623
624 /* ----------------
625 * heap_copytuple_with_tuple
626 *
627 * copy a tuple into a caller-supplied HeapTuple management struct
628 *
629 * Note that after calling this function, the "dest" HeapTuple will not be
630 * allocated as a single palloc() block (unlike with heap_copytuple()).
631 * ----------------
632 */
633 void
heap_copytuple_with_tuple(HeapTuple src,HeapTuple dest)634 heap_copytuple_with_tuple(HeapTuple src, HeapTuple dest)
635 {
636 if (!HeapTupleIsValid(src) || src->t_data == NULL)
637 {
638 dest->t_data = NULL;
639 return;
640 }
641
642 dest->t_len = src->t_len;
643 dest->t_self = src->t_self;
644 dest->t_tableOid = src->t_tableOid;
645 dest->t_data = (HeapTupleHeader) palloc(src->t_len);
646 memcpy((char *) dest->t_data, (char *) src->t_data, src->t_len);
647 }
648
649 /* ----------------
650 * heap_copy_tuple_as_datum
651 *
652 * copy a tuple as a composite-type Datum
653 * ----------------
654 */
655 Datum
heap_copy_tuple_as_datum(HeapTuple tuple,TupleDesc tupleDesc)656 heap_copy_tuple_as_datum(HeapTuple tuple, TupleDesc tupleDesc)
657 {
658 HeapTupleHeader td;
659
660 /*
661 * If the tuple contains any external TOAST pointers, we have to inline
662 * those fields to meet the conventions for composite-type Datums.
663 */
664 if (HeapTupleHasExternal(tuple))
665 return toast_flatten_tuple_to_datum(tuple->t_data,
666 tuple->t_len,
667 tupleDesc);
668
669 /*
670 * Fast path for easy case: just make a palloc'd copy and insert the
671 * correct composite-Datum header fields (since those may not be set if
672 * the given tuple came from disk, rather than from heap_form_tuple).
673 */
674 td = (HeapTupleHeader) palloc(tuple->t_len);
675 memcpy((char *) td, (char *) tuple->t_data, tuple->t_len);
676
677 HeapTupleHeaderSetDatumLength(td, tuple->t_len);
678 HeapTupleHeaderSetTypeId(td, tupleDesc->tdtypeid);
679 HeapTupleHeaderSetTypMod(td, tupleDesc->tdtypmod);
680
681 return PointerGetDatum(td);
682 }
683
684 /*
685 * heap_form_tuple
686 * construct a tuple from the given values[] and isnull[] arrays,
687 * which are of the length indicated by tupleDescriptor->natts
688 *
689 * The result is allocated in the current memory context.
690 */
691 HeapTuple
heap_form_tuple(TupleDesc tupleDescriptor,Datum * values,bool * isnull)692 heap_form_tuple(TupleDesc tupleDescriptor,
693 Datum *values,
694 bool *isnull)
695 {
696 HeapTuple tuple; /* return tuple */
697 HeapTupleHeader td; /* tuple data */
698 Size len,
699 data_len;
700 int hoff;
701 bool hasnull = false;
702 int numberOfAttributes = tupleDescriptor->natts;
703 int i;
704
705 if (numberOfAttributes > MaxTupleAttributeNumber)
706 ereport(ERROR,
707 (errcode(ERRCODE_TOO_MANY_COLUMNS),
708 errmsg("number of columns (%d) exceeds limit (%d)",
709 numberOfAttributes, MaxTupleAttributeNumber)));
710
711 /*
712 * Check for nulls
713 */
714 for (i = 0; i < numberOfAttributes; i++)
715 {
716 if (isnull[i])
717 {
718 hasnull = true;
719 break;
720 }
721 }
722
723 /*
724 * Determine total space needed
725 */
726 len = offsetof(HeapTupleHeaderData, t_bits);
727
728 if (hasnull)
729 len += BITMAPLEN(numberOfAttributes);
730
731 if (tupleDescriptor->tdhasoid)
732 len += sizeof(Oid);
733
734 hoff = len = MAXALIGN(len); /* align user data safely */
735
736 data_len = heap_compute_data_size(tupleDescriptor, values, isnull);
737
738 len += data_len;
739
740 /*
741 * Allocate and zero the space needed. Note that the tuple body and
742 * HeapTupleData management structure are allocated in one chunk.
743 */
744 tuple = (HeapTuple) palloc0(HEAPTUPLESIZE + len);
745 tuple->t_data = td = (HeapTupleHeader) ((char *) tuple + HEAPTUPLESIZE);
746
747 /*
748 * And fill in the information. Note we fill the Datum fields even though
749 * this tuple may never become a Datum. This lets HeapTupleHeaderGetDatum
750 * identify the tuple type if needed.
751 */
752 tuple->t_len = len;
753 ItemPointerSetInvalid(&(tuple->t_self));
754 tuple->t_tableOid = InvalidOid;
755
756 HeapTupleHeaderSetDatumLength(td, len);
757 HeapTupleHeaderSetTypeId(td, tupleDescriptor->tdtypeid);
758 HeapTupleHeaderSetTypMod(td, tupleDescriptor->tdtypmod);
759 /* We also make sure that t_ctid is invalid unless explicitly set */
760 ItemPointerSetInvalid(&(td->t_ctid));
761
762 HeapTupleHeaderSetNatts(td, numberOfAttributes);
763 td->t_hoff = hoff;
764
765 if (tupleDescriptor->tdhasoid) /* else leave infomask = 0 */
766 td->t_infomask = HEAP_HASOID;
767
768 heap_fill_tuple(tupleDescriptor,
769 values,
770 isnull,
771 (char *) td + hoff,
772 data_len,
773 &td->t_infomask,
774 (hasnull ? td->t_bits : NULL));
775
776 return tuple;
777 }
778
779 /*
780 * heap_modify_tuple
781 * form a new tuple from an old tuple and a set of replacement values.
782 *
783 * The replValues, replIsnull, and doReplace arrays must be of the length
784 * indicated by tupleDesc->natts. The new tuple is constructed using the data
785 * from replValues/replIsnull at columns where doReplace is true, and using
786 * the data from the old tuple at columns where doReplace is false.
787 *
788 * The result is allocated in the current memory context.
789 */
790 HeapTuple
heap_modify_tuple(HeapTuple tuple,TupleDesc tupleDesc,Datum * replValues,bool * replIsnull,bool * doReplace)791 heap_modify_tuple(HeapTuple tuple,
792 TupleDesc tupleDesc,
793 Datum *replValues,
794 bool *replIsnull,
795 bool *doReplace)
796 {
797 int numberOfAttributes = tupleDesc->natts;
798 int attoff;
799 Datum *values;
800 bool *isnull;
801 HeapTuple newTuple;
802
803 /*
804 * allocate and fill values and isnull arrays from either the tuple or the
805 * repl information, as appropriate.
806 *
807 * NOTE: it's debatable whether to use heap_deform_tuple() here or just
808 * heap_getattr() only the non-replaced columns. The latter could win if
809 * there are many replaced columns and few non-replaced ones. However,
810 * heap_deform_tuple costs only O(N) while the heap_getattr way would cost
811 * O(N^2) if there are many non-replaced columns, so it seems better to
812 * err on the side of linear cost.
813 */
814 values = (Datum *) palloc(numberOfAttributes * sizeof(Datum));
815 isnull = (bool *) palloc(numberOfAttributes * sizeof(bool));
816
817 heap_deform_tuple(tuple, tupleDesc, values, isnull);
818
819 for (attoff = 0; attoff < numberOfAttributes; attoff++)
820 {
821 if (doReplace[attoff])
822 {
823 values[attoff] = replValues[attoff];
824 isnull[attoff] = replIsnull[attoff];
825 }
826 }
827
828 /*
829 * create a new tuple from the values and isnull arrays
830 */
831 newTuple = heap_form_tuple(tupleDesc, values, isnull);
832
833 pfree(values);
834 pfree(isnull);
835
836 /*
837 * copy the identification info of the old tuple: t_ctid, t_self, and OID
838 * (if any)
839 */
840 newTuple->t_data->t_ctid = tuple->t_data->t_ctid;
841 newTuple->t_self = tuple->t_self;
842 newTuple->t_tableOid = tuple->t_tableOid;
843 if (tupleDesc->tdhasoid)
844 HeapTupleSetOid(newTuple, HeapTupleGetOid(tuple));
845
846 return newTuple;
847 }
848
849 /*
850 * heap_deform_tuple
851 * Given a tuple, extract data into values/isnull arrays; this is
852 * the inverse of heap_form_tuple.
853 *
854 * Storage for the values/isnull arrays is provided by the caller;
855 * it should be sized according to tupleDesc->natts not
856 * HeapTupleHeaderGetNatts(tuple->t_data).
857 *
858 * Note that for pass-by-reference datatypes, the pointer placed
859 * in the Datum will point into the given tuple.
860 *
861 * When all or most of a tuple's fields need to be extracted,
862 * this routine will be significantly quicker than a loop around
863 * heap_getattr; the loop will become O(N^2) as soon as any
864 * noncacheable attribute offsets are involved.
865 */
866 void
heap_deform_tuple(HeapTuple tuple,TupleDesc tupleDesc,Datum * values,bool * isnull)867 heap_deform_tuple(HeapTuple tuple, TupleDesc tupleDesc,
868 Datum *values, bool *isnull)
869 {
870 HeapTupleHeader tup = tuple->t_data;
871 bool hasnulls = HeapTupleHasNulls(tuple);
872 Form_pg_attribute *att = tupleDesc->attrs;
873 int tdesc_natts = tupleDesc->natts;
874 int natts; /* number of atts to extract */
875 int attnum;
876 char *tp; /* ptr to tuple data */
877 long off; /* offset in tuple data */
878 bits8 *bp = tup->t_bits; /* ptr to null bitmap in tuple */
879 bool slow = false; /* can we use/set attcacheoff? */
880
881 natts = HeapTupleHeaderGetNatts(tup);
882
883 /*
884 * In inheritance situations, it is possible that the given tuple actually
885 * has more fields than the caller is expecting. Don't run off the end of
886 * the caller's arrays.
887 */
888 natts = Min(natts, tdesc_natts);
889
890 tp = (char *) tup + tup->t_hoff;
891
892 off = 0;
893
894 for (attnum = 0; attnum < natts; attnum++)
895 {
896 Form_pg_attribute thisatt = att[attnum];
897
898 if (hasnulls && att_isnull(attnum, bp))
899 {
900 values[attnum] = (Datum) 0;
901 isnull[attnum] = true;
902 slow = true; /* can't use attcacheoff anymore */
903 continue;
904 }
905
906 isnull[attnum] = false;
907
908 if (!slow && thisatt->attcacheoff >= 0)
909 off = thisatt->attcacheoff;
910 else if (thisatt->attlen == -1)
911 {
912 /*
913 * We can only cache the offset for a varlena attribute if the
914 * offset is already suitably aligned, so that there would be no
915 * pad bytes in any case: then the offset will be valid for either
916 * an aligned or unaligned value.
917 */
918 if (!slow &&
919 off == att_align_nominal(off, thisatt->attalign))
920 thisatt->attcacheoff = off;
921 else
922 {
923 off = att_align_pointer(off, thisatt->attalign, -1,
924 tp + off);
925 slow = true;
926 }
927 }
928 else
929 {
930 /* not varlena, so safe to use att_align_nominal */
931 off = att_align_nominal(off, thisatt->attalign);
932
933 if (!slow)
934 thisatt->attcacheoff = off;
935 }
936
937 values[attnum] = fetchatt(thisatt, tp + off);
938
939 off = att_addlength_pointer(off, thisatt->attlen, tp + off);
940
941 if (thisatt->attlen <= 0)
942 slow = true; /* can't use attcacheoff anymore */
943 }
944
945 /*
946 * If tuple doesn't have all the atts indicated by tupleDesc, read the
947 * rest as null
948 */
949 for (; attnum < tdesc_natts; attnum++)
950 {
951 values[attnum] = (Datum) 0;
952 isnull[attnum] = true;
953 }
954 }
955
956 /*
957 * slot_deform_tuple
958 * Given a TupleTableSlot, extract data from the slot's physical tuple
959 * into its Datum/isnull arrays. Data is extracted up through the
960 * natts'th column (caller must ensure this is a legal column number).
961 *
962 * This is essentially an incremental version of heap_deform_tuple:
963 * on each call we extract attributes up to the one needed, without
964 * re-computing information about previously extracted attributes.
965 * slot->tts_nvalid is the number of attributes already extracted.
966 */
967 static void
slot_deform_tuple(TupleTableSlot * slot,int natts)968 slot_deform_tuple(TupleTableSlot *slot, int natts)
969 {
970 HeapTuple tuple = slot->tts_tuple;
971 TupleDesc tupleDesc = slot->tts_tupleDescriptor;
972 Datum *values = slot->tts_values;
973 bool *isnull = slot->tts_isnull;
974 HeapTupleHeader tup = tuple->t_data;
975 bool hasnulls = HeapTupleHasNulls(tuple);
976 Form_pg_attribute *att = tupleDesc->attrs;
977 int attnum;
978 char *tp; /* ptr to tuple data */
979 long off; /* offset in tuple data */
980 bits8 *bp = tup->t_bits; /* ptr to null bitmap in tuple */
981 bool slow; /* can we use/set attcacheoff? */
982
983 /*
984 * Check whether the first call for this tuple, and initialize or restore
985 * loop state.
986 */
987 attnum = slot->tts_nvalid;
988 if (attnum == 0)
989 {
990 /* Start from the first attribute */
991 off = 0;
992 slow = false;
993 }
994 else
995 {
996 /* Restore state from previous execution */
997 off = slot->tts_off;
998 slow = slot->tts_slow;
999 }
1000
1001 tp = (char *) tup + tup->t_hoff;
1002
1003 for (; attnum < natts; attnum++)
1004 {
1005 Form_pg_attribute thisatt = att[attnum];
1006
1007 if (hasnulls && att_isnull(attnum, bp))
1008 {
1009 values[attnum] = (Datum) 0;
1010 isnull[attnum] = true;
1011 slow = true; /* can't use attcacheoff anymore */
1012 continue;
1013 }
1014
1015 isnull[attnum] = false;
1016
1017 if (!slow && thisatt->attcacheoff >= 0)
1018 off = thisatt->attcacheoff;
1019 else if (thisatt->attlen == -1)
1020 {
1021 /*
1022 * We can only cache the offset for a varlena attribute if the
1023 * offset is already suitably aligned, so that there would be no
1024 * pad bytes in any case: then the offset will be valid for either
1025 * an aligned or unaligned value.
1026 */
1027 if (!slow &&
1028 off == att_align_nominal(off, thisatt->attalign))
1029 thisatt->attcacheoff = off;
1030 else
1031 {
1032 off = att_align_pointer(off, thisatt->attalign, -1,
1033 tp + off);
1034 slow = true;
1035 }
1036 }
1037 else
1038 {
1039 /* not varlena, so safe to use att_align_nominal */
1040 off = att_align_nominal(off, thisatt->attalign);
1041
1042 if (!slow)
1043 thisatt->attcacheoff = off;
1044 }
1045
1046 values[attnum] = fetchatt(thisatt, tp + off);
1047
1048 off = att_addlength_pointer(off, thisatt->attlen, tp + off);
1049
1050 if (thisatt->attlen <= 0)
1051 slow = true; /* can't use attcacheoff anymore */
1052 }
1053
1054 /*
1055 * Save state for next execution
1056 */
1057 slot->tts_nvalid = attnum;
1058 slot->tts_off = off;
1059 slot->tts_slow = slow;
1060 }
1061
1062 /*
1063 * slot_getattr
1064 * This function fetches an attribute of the slot's current tuple.
1065 * It is functionally equivalent to heap_getattr, but fetches of
1066 * multiple attributes of the same tuple will be optimized better,
1067 * because we avoid O(N^2) behavior from multiple calls of
1068 * nocachegetattr(), even when attcacheoff isn't usable.
1069 *
1070 * A difference from raw heap_getattr is that attnums beyond the
1071 * slot's tupdesc's last attribute will be considered NULL even
1072 * when the physical tuple is longer than the tupdesc.
1073 */
1074 Datum
slot_getattr(TupleTableSlot * slot,int attnum,bool * isnull)1075 slot_getattr(TupleTableSlot *slot, int attnum, bool *isnull)
1076 {
1077 HeapTuple tuple = slot->tts_tuple;
1078 TupleDesc tupleDesc = slot->tts_tupleDescriptor;
1079 HeapTupleHeader tup;
1080
1081 /*
1082 * system attributes are handled by heap_getsysattr
1083 */
1084 if (attnum <= 0)
1085 {
1086 if (tuple == NULL) /* internal error */
1087 elog(ERROR, "cannot extract system attribute from virtual tuple");
1088 if (tuple == &(slot->tts_minhdr)) /* internal error */
1089 elog(ERROR, "cannot extract system attribute from minimal tuple");
1090 return heap_getsysattr(tuple, attnum, tupleDesc, isnull);
1091 }
1092
1093 /*
1094 * fast path if desired attribute already cached
1095 */
1096 if (attnum <= slot->tts_nvalid)
1097 {
1098 *isnull = slot->tts_isnull[attnum - 1];
1099 return slot->tts_values[attnum - 1];
1100 }
1101
1102 /*
1103 * return NULL if attnum is out of range according to the tupdesc
1104 */
1105 if (attnum > tupleDesc->natts)
1106 {
1107 *isnull = true;
1108 return (Datum) 0;
1109 }
1110
1111 /*
1112 * otherwise we had better have a physical tuple (tts_nvalid should equal
1113 * natts in all virtual-tuple cases)
1114 */
1115 if (tuple == NULL) /* internal error */
1116 elog(ERROR, "cannot extract attribute from empty tuple slot");
1117
1118 /*
1119 * return NULL if attnum is out of range according to the tuple
1120 *
1121 * (We have to check this separately because of various inheritance and
1122 * table-alteration scenarios: the tuple could be either longer or shorter
1123 * than the tupdesc.)
1124 */
1125 tup = tuple->t_data;
1126 if (attnum > HeapTupleHeaderGetNatts(tup))
1127 {
1128 *isnull = true;
1129 return (Datum) 0;
1130 }
1131
1132 /*
1133 * check if target attribute is null: no point in groveling through tuple
1134 */
1135 if (HeapTupleHasNulls(tuple) && att_isnull(attnum - 1, tup->t_bits))
1136 {
1137 *isnull = true;
1138 return (Datum) 0;
1139 }
1140
1141 /*
1142 * If the attribute's column has been dropped, we force a NULL result.
1143 * This case should not happen in normal use, but it could happen if we
1144 * are executing a plan cached before the column was dropped.
1145 */
1146 if (tupleDesc->attrs[attnum - 1]->attisdropped)
1147 {
1148 *isnull = true;
1149 return (Datum) 0;
1150 }
1151
1152 /*
1153 * Extract the attribute, along with any preceding attributes.
1154 */
1155 slot_deform_tuple(slot, attnum);
1156
1157 /*
1158 * The result is acquired from tts_values array.
1159 */
1160 *isnull = slot->tts_isnull[attnum - 1];
1161 return slot->tts_values[attnum - 1];
1162 }
1163
1164 /*
1165 * slot_getallattrs
1166 * This function forces all the entries of the slot's Datum/isnull
1167 * arrays to be valid. The caller may then extract data directly
1168 * from those arrays instead of using slot_getattr.
1169 */
1170 void
slot_getallattrs(TupleTableSlot * slot)1171 slot_getallattrs(TupleTableSlot *slot)
1172 {
1173 int tdesc_natts = slot->tts_tupleDescriptor->natts;
1174 int attnum;
1175 HeapTuple tuple;
1176
1177 /* Quick out if we have 'em all already */
1178 if (slot->tts_nvalid == tdesc_natts)
1179 return;
1180
1181 /*
1182 * otherwise we had better have a physical tuple (tts_nvalid should equal
1183 * natts in all virtual-tuple cases)
1184 */
1185 tuple = slot->tts_tuple;
1186 if (tuple == NULL) /* internal error */
1187 elog(ERROR, "cannot extract attribute from empty tuple slot");
1188
1189 /*
1190 * load up any slots available from physical tuple
1191 */
1192 attnum = HeapTupleHeaderGetNatts(tuple->t_data);
1193 attnum = Min(attnum, tdesc_natts);
1194
1195 slot_deform_tuple(slot, attnum);
1196
1197 /*
1198 * If tuple doesn't have all the atts indicated by tupleDesc, read the
1199 * rest as null
1200 */
1201 for (; attnum < tdesc_natts; attnum++)
1202 {
1203 slot->tts_values[attnum] = (Datum) 0;
1204 slot->tts_isnull[attnum] = true;
1205 }
1206 slot->tts_nvalid = tdesc_natts;
1207 }
1208
1209 /*
1210 * slot_getsomeattrs
1211 * This function forces the entries of the slot's Datum/isnull
1212 * arrays to be valid at least up through the attnum'th entry.
1213 */
1214 void
slot_getsomeattrs(TupleTableSlot * slot,int attnum)1215 slot_getsomeattrs(TupleTableSlot *slot, int attnum)
1216 {
1217 HeapTuple tuple;
1218 int attno;
1219
1220 /* Quick out if we have 'em all already */
1221 if (slot->tts_nvalid >= attnum)
1222 return;
1223
1224 /* Check for caller error */
1225 if (attnum <= 0 || attnum > slot->tts_tupleDescriptor->natts)
1226 elog(ERROR, "invalid attribute number %d", attnum);
1227
1228 /*
1229 * otherwise we had better have a physical tuple (tts_nvalid should equal
1230 * natts in all virtual-tuple cases)
1231 */
1232 tuple = slot->tts_tuple;
1233 if (tuple == NULL) /* internal error */
1234 elog(ERROR, "cannot extract attribute from empty tuple slot");
1235
1236 /*
1237 * load up any slots available from physical tuple
1238 */
1239 attno = HeapTupleHeaderGetNatts(tuple->t_data);
1240 attno = Min(attno, attnum);
1241
1242 slot_deform_tuple(slot, attno);
1243
1244 /*
1245 * If tuple doesn't have all the atts indicated by tupleDesc, read the
1246 * rest as null
1247 */
1248 for (; attno < attnum; attno++)
1249 {
1250 slot->tts_values[attno] = (Datum) 0;
1251 slot->tts_isnull[attno] = true;
1252 }
1253 slot->tts_nvalid = attnum;
1254 }
1255
1256 /*
1257 * slot_attisnull
1258 * Detect whether an attribute of the slot is null, without
1259 * actually fetching it.
1260 */
1261 bool
slot_attisnull(TupleTableSlot * slot,int attnum)1262 slot_attisnull(TupleTableSlot *slot, int attnum)
1263 {
1264 HeapTuple tuple = slot->tts_tuple;
1265 TupleDesc tupleDesc = slot->tts_tupleDescriptor;
1266
1267 /*
1268 * system attributes are handled by heap_attisnull
1269 */
1270 if (attnum <= 0)
1271 {
1272 if (tuple == NULL) /* internal error */
1273 elog(ERROR, "cannot extract system attribute from virtual tuple");
1274 if (tuple == &(slot->tts_minhdr)) /* internal error */
1275 elog(ERROR, "cannot extract system attribute from minimal tuple");
1276 return heap_attisnull(tuple, attnum);
1277 }
1278
1279 /*
1280 * fast path if desired attribute already cached
1281 */
1282 if (attnum <= slot->tts_nvalid)
1283 return slot->tts_isnull[attnum - 1];
1284
1285 /*
1286 * return NULL if attnum is out of range according to the tupdesc
1287 */
1288 if (attnum > tupleDesc->natts)
1289 return true;
1290
1291 /*
1292 * otherwise we had better have a physical tuple (tts_nvalid should equal
1293 * natts in all virtual-tuple cases)
1294 */
1295 if (tuple == NULL) /* internal error */
1296 elog(ERROR, "cannot extract attribute from empty tuple slot");
1297
1298 /* and let the tuple tell it */
1299 return heap_attisnull(tuple, attnum);
1300 }
1301
1302 /*
1303 * slot_getsysattr
1304 * This function fetches a system attribute of the slot's current tuple.
1305 * Unlike slot_getattr, if the slot does not contain system attributes,
1306 * this will return false (with a NULL attribute value) instead of
1307 * throwing an error.
1308 */
1309 bool
slot_getsysattr(TupleTableSlot * slot,int attnum,Datum * value,bool * isnull)1310 slot_getsysattr(TupleTableSlot *slot, int attnum,
1311 Datum *value, bool *isnull)
1312 {
1313 HeapTuple tuple = slot->tts_tuple;
1314
1315 Assert(attnum < 0); /* else caller error */
1316 if (tuple == NULL ||
1317 tuple == &(slot->tts_minhdr))
1318 {
1319 /* No physical tuple, or minimal tuple, so fail */
1320 *value = (Datum) 0;
1321 *isnull = true;
1322 return false;
1323 }
1324 *value = heap_getsysattr(tuple, attnum, slot->tts_tupleDescriptor, isnull);
1325 return true;
1326 }
1327
1328 /*
1329 * heap_freetuple
1330 */
1331 void
heap_freetuple(HeapTuple htup)1332 heap_freetuple(HeapTuple htup)
1333 {
1334 pfree(htup);
1335 }
1336
1337
1338 /*
1339 * heap_form_minimal_tuple
1340 * construct a MinimalTuple from the given values[] and isnull[] arrays,
1341 * which are of the length indicated by tupleDescriptor->natts
1342 *
1343 * This is exactly like heap_form_tuple() except that the result is a
1344 * "minimal" tuple lacking a HeapTupleData header as well as room for system
1345 * columns.
1346 *
1347 * The result is allocated in the current memory context.
1348 */
1349 MinimalTuple
heap_form_minimal_tuple(TupleDesc tupleDescriptor,Datum * values,bool * isnull)1350 heap_form_minimal_tuple(TupleDesc tupleDescriptor,
1351 Datum *values,
1352 bool *isnull)
1353 {
1354 MinimalTuple tuple; /* return tuple */
1355 Size len,
1356 data_len;
1357 int hoff;
1358 bool hasnull = false;
1359 int numberOfAttributes = tupleDescriptor->natts;
1360 int i;
1361
1362 if (numberOfAttributes > MaxTupleAttributeNumber)
1363 ereport(ERROR,
1364 (errcode(ERRCODE_TOO_MANY_COLUMNS),
1365 errmsg("number of columns (%d) exceeds limit (%d)",
1366 numberOfAttributes, MaxTupleAttributeNumber)));
1367
1368 /*
1369 * Check for nulls
1370 */
1371 for (i = 0; i < numberOfAttributes; i++)
1372 {
1373 if (isnull[i])
1374 {
1375 hasnull = true;
1376 break;
1377 }
1378 }
1379
1380 /*
1381 * Determine total space needed
1382 */
1383 len = SizeofMinimalTupleHeader;
1384
1385 if (hasnull)
1386 len += BITMAPLEN(numberOfAttributes);
1387
1388 if (tupleDescriptor->tdhasoid)
1389 len += sizeof(Oid);
1390
1391 hoff = len = MAXALIGN(len); /* align user data safely */
1392
1393 data_len = heap_compute_data_size(tupleDescriptor, values, isnull);
1394
1395 len += data_len;
1396
1397 /*
1398 * Allocate and zero the space needed.
1399 */
1400 tuple = (MinimalTuple) palloc0(len);
1401
1402 /*
1403 * And fill in the information.
1404 */
1405 tuple->t_len = len;
1406 HeapTupleHeaderSetNatts(tuple, numberOfAttributes);
1407 tuple->t_hoff = hoff + MINIMAL_TUPLE_OFFSET;
1408
1409 if (tupleDescriptor->tdhasoid) /* else leave infomask = 0 */
1410 tuple->t_infomask = HEAP_HASOID;
1411
1412 heap_fill_tuple(tupleDescriptor,
1413 values,
1414 isnull,
1415 (char *) tuple + hoff,
1416 data_len,
1417 &tuple->t_infomask,
1418 (hasnull ? tuple->t_bits : NULL));
1419
1420 return tuple;
1421 }
1422
1423 /*
1424 * heap_free_minimal_tuple
1425 */
1426 void
heap_free_minimal_tuple(MinimalTuple mtup)1427 heap_free_minimal_tuple(MinimalTuple mtup)
1428 {
1429 pfree(mtup);
1430 }
1431
1432 /*
1433 * heap_copy_minimal_tuple
1434 * copy a MinimalTuple
1435 *
1436 * The result is allocated in the current memory context.
1437 */
1438 MinimalTuple
heap_copy_minimal_tuple(MinimalTuple mtup)1439 heap_copy_minimal_tuple(MinimalTuple mtup)
1440 {
1441 MinimalTuple result;
1442
1443 result = (MinimalTuple) palloc(mtup->t_len);
1444 memcpy(result, mtup, mtup->t_len);
1445 return result;
1446 }
1447
1448 /*
1449 * heap_tuple_from_minimal_tuple
1450 * create a HeapTuple by copying from a MinimalTuple;
1451 * system columns are filled with zeroes
1452 *
1453 * The result is allocated in the current memory context.
1454 * The HeapTuple struct, tuple header, and tuple data are all allocated
1455 * as a single palloc() block.
1456 */
1457 HeapTuple
heap_tuple_from_minimal_tuple(MinimalTuple mtup)1458 heap_tuple_from_minimal_tuple(MinimalTuple mtup)
1459 {
1460 HeapTuple result;
1461 uint32 len = mtup->t_len + MINIMAL_TUPLE_OFFSET;
1462
1463 result = (HeapTuple) palloc(HEAPTUPLESIZE + len);
1464 result->t_len = len;
1465 ItemPointerSetInvalid(&(result->t_self));
1466 result->t_tableOid = InvalidOid;
1467 result->t_data = (HeapTupleHeader) ((char *) result + HEAPTUPLESIZE);
1468 memcpy((char *) result->t_data + MINIMAL_TUPLE_OFFSET, mtup, mtup->t_len);
1469 memset(result->t_data, 0, offsetof(HeapTupleHeaderData, t_infomask2));
1470 return result;
1471 }
1472
1473 /*
1474 * minimal_tuple_from_heap_tuple
1475 * create a MinimalTuple by copying from a HeapTuple
1476 *
1477 * The result is allocated in the current memory context.
1478 */
1479 MinimalTuple
minimal_tuple_from_heap_tuple(HeapTuple htup)1480 minimal_tuple_from_heap_tuple(HeapTuple htup)
1481 {
1482 MinimalTuple result;
1483 uint32 len;
1484
1485 Assert(htup->t_len > MINIMAL_TUPLE_OFFSET);
1486 len = htup->t_len - MINIMAL_TUPLE_OFFSET;
1487 result = (MinimalTuple) palloc(len);
1488 memcpy(result, (char *) htup->t_data + MINIMAL_TUPLE_OFFSET, len);
1489 result->t_len = len;
1490 return result;
1491 }
1492