1 /*-------------------------------------------------------------------------
2 *
3 * heaptuple.c
4 * This file contains heap tuple accessor and mutator routines, as well
5 * as various tuple utilities.
6 *
7 * Some notes about varlenas and this code:
8 *
9 * Before Postgres 8.3 varlenas always had a 4-byte length header, and
10 * therefore always needed 4-byte alignment (at least). This wasted space
11 * for short varlenas, for example CHAR(1) took 5 bytes and could need up to
12 * 3 additional padding bytes for alignment.
13 *
14 * Now, a short varlena (up to 126 data bytes) is reduced to a 1-byte header
15 * and we don't align it. To hide this from datatype-specific functions that
16 * don't want to deal with it, such a datum is considered "toasted" and will
17 * be expanded back to the normal 4-byte-header format by pg_detoast_datum.
18 * (In performance-critical code paths we can use pg_detoast_datum_packed
19 * and the appropriate access macros to avoid that overhead.) Note that this
20 * conversion is performed directly in heap_form_tuple, without invoking
21 * tuptoaster.c.
22 *
23 * This change will break any code that assumes it needn't detoast values
24 * that have been put into a tuple but never sent to disk. Hopefully there
25 * are few such places.
26 *
27 * Varlenas still have alignment 'i' (or 'd') in pg_type/pg_attribute, since
28 * that's the normal requirement for the untoasted format. But we ignore that
29 * for the 1-byte-header format. This means that the actual start position
30 * of a varlena datum may vary depending on which format it has. To determine
31 * what is stored, we have to require that alignment padding bytes be zero.
32 * (Postgres actually has always zeroed them, but now it's required!) Since
33 * the first byte of a 1-byte-header varlena can never be zero, we can examine
34 * the first byte after the previous datum to tell if it's a pad byte or the
35 * start of a 1-byte-header varlena.
36 *
37 * Note that while formerly we could rely on the first varlena column of a
38 * system catalog to be at the offset suggested by the C struct for the
39 * catalog, this is now risky: it's only safe if the preceding field is
40 * word-aligned, so that there will never be any padding.
41 *
42 * We don't pack varlenas whose attstorage is 'p', since the data type
43 * isn't expecting to have to detoast values. This is used in particular
44 * by oidvector and int2vector, which are used in the system catalogs
45 * and we'd like to still refer to them via C struct offsets.
46 *
47 *
48 * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
49 * Portions Copyright (c) 1994, Regents of the University of California
50 *
51 *
52 * IDENTIFICATION
53 * src/backend/access/common/heaptuple.c
54 *
55 *-------------------------------------------------------------------------
56 */
57
58 #include "postgres.h"
59
60 #include "access/sysattr.h"
61 #include "access/tuptoaster.h"
62 #include "executor/tuptable.h"
63 #include "utils/expandeddatum.h"
64
65
66 /* Does att's datatype allow packing into the 1-byte-header varlena format? */
67 #define ATT_IS_PACKABLE(att) \
68 ((att)->attlen == -1 && (att)->attstorage != 'p')
69 /* Use this if it's already known varlena */
70 #define VARLENA_ATT_IS_PACKABLE(att) \
71 ((att)->attstorage != 'p')
72
73
74 /* ----------------------------------------------------------------
75 * misc support routines
76 * ----------------------------------------------------------------
77 */
78
79
80 /*
81 * heap_compute_data_size
82 * Determine size of the data area of a tuple to be constructed
83 */
84 Size
heap_compute_data_size(TupleDesc tupleDesc,Datum * values,bool * isnull)85 heap_compute_data_size(TupleDesc tupleDesc,
86 Datum *values,
87 bool *isnull)
88 {
89 Size data_length = 0;
90 int i;
91 int numberOfAttributes = tupleDesc->natts;
92 Form_pg_attribute *att = tupleDesc->attrs;
93
94 for (i = 0; i < numberOfAttributes; i++)
95 {
96 Datum val;
97 Form_pg_attribute atti;
98
99 if (isnull[i])
100 continue;
101
102 val = values[i];
103 atti = att[i];
104
105 if (ATT_IS_PACKABLE(atti) &&
106 VARATT_CAN_MAKE_SHORT(DatumGetPointer(val)))
107 {
108 /*
109 * we're anticipating converting to a short varlena header, so
110 * adjust length and don't count any alignment
111 */
112 data_length += VARATT_CONVERTED_SHORT_SIZE(DatumGetPointer(val));
113 }
114 else if (atti->attlen == -1 &&
115 VARATT_IS_EXTERNAL_EXPANDED(DatumGetPointer(val)))
116 {
117 /*
118 * we want to flatten the expanded value so that the constructed
119 * tuple doesn't depend on it
120 */
121 data_length = att_align_nominal(data_length, atti->attalign);
122 data_length += EOH_get_flat_size(DatumGetEOHP(val));
123 }
124 else
125 {
126 data_length = att_align_datum(data_length, atti->attalign,
127 atti->attlen, val);
128 data_length = att_addlength_datum(data_length, atti->attlen,
129 val);
130 }
131 }
132
133 return data_length;
134 }
135
136 /*
137 * heap_fill_tuple
138 * Load data portion of a tuple from values/isnull arrays
139 *
140 * We also fill the null bitmap (if any) and set the infomask bits
141 * that reflect the tuple's data contents.
142 *
143 * NOTE: it is now REQUIRED that the caller have pre-zeroed the data area.
144 */
145 void
heap_fill_tuple(TupleDesc tupleDesc,Datum * values,bool * isnull,char * data,Size data_size,uint16 * infomask,bits8 * bit)146 heap_fill_tuple(TupleDesc tupleDesc,
147 Datum *values, bool *isnull,
148 char *data, Size data_size,
149 uint16 *infomask, bits8 *bit)
150 {
151 bits8 *bitP;
152 int bitmask;
153 int i;
154 int numberOfAttributes = tupleDesc->natts;
155 Form_pg_attribute *att = tupleDesc->attrs;
156
157 #ifdef USE_ASSERT_CHECKING
158 char *start = data;
159 #endif
160
161 if (bit != NULL)
162 {
163 bitP = &bit[-1];
164 bitmask = HIGHBIT;
165 }
166 else
167 {
168 /* just to keep compiler quiet */
169 bitP = NULL;
170 bitmask = 0;
171 }
172
173 *infomask &= ~(HEAP_HASNULL | HEAP_HASVARWIDTH | HEAP_HASEXTERNAL);
174
175 for (i = 0; i < numberOfAttributes; i++)
176 {
177 Size data_length;
178
179 if (bit != NULL)
180 {
181 if (bitmask != HIGHBIT)
182 bitmask <<= 1;
183 else
184 {
185 bitP += 1;
186 *bitP = 0x0;
187 bitmask = 1;
188 }
189
190 if (isnull[i])
191 {
192 *infomask |= HEAP_HASNULL;
193 continue;
194 }
195
196 *bitP |= bitmask;
197 }
198
199 /*
200 * XXX we use the att_align macros on the pointer value itself, not on
201 * an offset. This is a bit of a hack.
202 */
203
204 if (att[i]->attbyval)
205 {
206 /* pass-by-value */
207 data = (char *) att_align_nominal(data, att[i]->attalign);
208 store_att_byval(data, values[i], att[i]->attlen);
209 data_length = att[i]->attlen;
210 }
211 else if (att[i]->attlen == -1)
212 {
213 /* varlena */
214 Pointer val = DatumGetPointer(values[i]);
215
216 *infomask |= HEAP_HASVARWIDTH;
217 if (VARATT_IS_EXTERNAL(val))
218 {
219 if (VARATT_IS_EXTERNAL_EXPANDED(val))
220 {
221 /*
222 * we want to flatten the expanded value so that the
223 * constructed tuple doesn't depend on it
224 */
225 ExpandedObjectHeader *eoh = DatumGetEOHP(values[i]);
226
227 data = (char *) att_align_nominal(data,
228 att[i]->attalign);
229 data_length = EOH_get_flat_size(eoh);
230 EOH_flatten_into(eoh, data, data_length);
231 }
232 else
233 {
234 *infomask |= HEAP_HASEXTERNAL;
235 /* no alignment, since it's short by definition */
236 data_length = VARSIZE_EXTERNAL(val);
237 memcpy(data, val, data_length);
238 }
239 }
240 else if (VARATT_IS_SHORT(val))
241 {
242 /* no alignment for short varlenas */
243 data_length = VARSIZE_SHORT(val);
244 memcpy(data, val, data_length);
245 }
246 else if (VARLENA_ATT_IS_PACKABLE(att[i]) &&
247 VARATT_CAN_MAKE_SHORT(val))
248 {
249 /* convert to short varlena -- no alignment */
250 data_length = VARATT_CONVERTED_SHORT_SIZE(val);
251 SET_VARSIZE_SHORT(data, data_length);
252 memcpy(data + 1, VARDATA(val), data_length - 1);
253 }
254 else
255 {
256 /* full 4-byte header varlena */
257 data = (char *) att_align_nominal(data,
258 att[i]->attalign);
259 data_length = VARSIZE(val);
260 memcpy(data, val, data_length);
261 }
262 }
263 else if (att[i]->attlen == -2)
264 {
265 /* cstring ... never needs alignment */
266 *infomask |= HEAP_HASVARWIDTH;
267 Assert(att[i]->attalign == 'c');
268 data_length = strlen(DatumGetCString(values[i])) + 1;
269 memcpy(data, DatumGetPointer(values[i]), data_length);
270 }
271 else
272 {
273 /* fixed-length pass-by-reference */
274 data = (char *) att_align_nominal(data, att[i]->attalign);
275 Assert(att[i]->attlen > 0);
276 data_length = att[i]->attlen;
277 memcpy(data, DatumGetPointer(values[i]), data_length);
278 }
279
280 data += data_length;
281 }
282
283 Assert((data - start) == data_size);
284 }
285
286
287 /* ----------------------------------------------------------------
288 * heap tuple interface
289 * ----------------------------------------------------------------
290 */
291
292 /* ----------------
293 * heap_attisnull - returns TRUE iff tuple attribute is not present
294 * ----------------
295 */
296 bool
heap_attisnull(HeapTuple tup,int attnum)297 heap_attisnull(HeapTuple tup, int attnum)
298 {
299 if (attnum > (int) HeapTupleHeaderGetNatts(tup->t_data))
300 return true;
301
302 if (attnum > 0)
303 {
304 if (HeapTupleNoNulls(tup))
305 return false;
306 return att_isnull(attnum - 1, tup->t_data->t_bits);
307 }
308
309 switch (attnum)
310 {
311 case TableOidAttributeNumber:
312 case SelfItemPointerAttributeNumber:
313 case ObjectIdAttributeNumber:
314 case MinTransactionIdAttributeNumber:
315 case MinCommandIdAttributeNumber:
316 case MaxTransactionIdAttributeNumber:
317 case MaxCommandIdAttributeNumber:
318 /* these are never null */
319 break;
320
321 default:
322 elog(ERROR, "invalid attnum: %d", attnum);
323 }
324
325 return false;
326 }
327
328 /* ----------------
329 * nocachegetattr
330 *
331 * This only gets called from fastgetattr() macro, in cases where
332 * we can't use a cacheoffset and the value is not null.
333 *
334 * This caches attribute offsets in the attribute descriptor.
335 *
336 * An alternative way to speed things up would be to cache offsets
337 * with the tuple, but that seems more difficult unless you take
338 * the storage hit of actually putting those offsets into the
339 * tuple you send to disk. Yuck.
340 *
341 * This scheme will be slightly slower than that, but should
342 * perform well for queries which hit large #'s of tuples. After
343 * you cache the offsets once, examining all the other tuples using
344 * the same attribute descriptor will go much quicker. -cim 5/4/91
345 *
346 * NOTE: if you need to change this code, see also heap_deform_tuple.
347 * Also see nocache_index_getattr, which is the same code for index
348 * tuples.
349 * ----------------
350 */
351 Datum
nocachegetattr(HeapTuple tuple,int attnum,TupleDesc tupleDesc)352 nocachegetattr(HeapTuple tuple,
353 int attnum,
354 TupleDesc tupleDesc)
355 {
356 HeapTupleHeader tup = tuple->t_data;
357 Form_pg_attribute *att = tupleDesc->attrs;
358 char *tp; /* ptr to data part of tuple */
359 bits8 *bp = tup->t_bits; /* ptr to null bitmap in tuple */
360 bool slow = false; /* do we have to walk attrs? */
361 int off; /* current offset within data */
362
363 /* ----------------
364 * Three cases:
365 *
366 * 1: No nulls and no variable-width attributes.
367 * 2: Has a null or a var-width AFTER att.
368 * 3: Has nulls or var-widths BEFORE att.
369 * ----------------
370 */
371
372 attnum--;
373
374 if (!HeapTupleNoNulls(tuple))
375 {
376 /*
377 * there's a null somewhere in the tuple
378 *
379 * check to see if any preceding bits are null...
380 */
381 int byte = attnum >> 3;
382 int finalbit = attnum & 0x07;
383
384 /* check for nulls "before" final bit of last byte */
385 if ((~bp[byte]) & ((1 << finalbit) - 1))
386 slow = true;
387 else
388 {
389 /* check for nulls in any "earlier" bytes */
390 int i;
391
392 for (i = 0; i < byte; i++)
393 {
394 if (bp[i] != 0xFF)
395 {
396 slow = true;
397 break;
398 }
399 }
400 }
401 }
402
403 tp = (char *) tup + tup->t_hoff;
404
405 if (!slow)
406 {
407 /*
408 * If we get here, there are no nulls up to and including the target
409 * attribute. If we have a cached offset, we can use it.
410 */
411 if (att[attnum]->attcacheoff >= 0)
412 {
413 return fetchatt(att[attnum],
414 tp + att[attnum]->attcacheoff);
415 }
416
417 /*
418 * Otherwise, check for non-fixed-length attrs up to and including
419 * target. If there aren't any, it's safe to cheaply initialize the
420 * cached offsets for these attrs.
421 */
422 if (HeapTupleHasVarWidth(tuple))
423 {
424 int j;
425
426 for (j = 0; j <= attnum; j++)
427 {
428 if (att[j]->attlen <= 0)
429 {
430 slow = true;
431 break;
432 }
433 }
434 }
435 }
436
437 if (!slow)
438 {
439 int natts = tupleDesc->natts;
440 int j = 1;
441
442 /*
443 * If we get here, we have a tuple with no nulls or var-widths up to
444 * and including the target attribute, so we can use the cached offset
445 * ... only we don't have it yet, or we'd not have got here. Since
446 * it's cheap to compute offsets for fixed-width columns, we take the
447 * opportunity to initialize the cached offsets for *all* the leading
448 * fixed-width columns, in hope of avoiding future visits to this
449 * routine.
450 */
451 att[0]->attcacheoff = 0;
452
453 /* we might have set some offsets in the slow path previously */
454 while (j < natts && att[j]->attcacheoff > 0)
455 j++;
456
457 off = att[j - 1]->attcacheoff + att[j - 1]->attlen;
458
459 for (; j < natts; j++)
460 {
461 if (att[j]->attlen <= 0)
462 break;
463
464 off = att_align_nominal(off, att[j]->attalign);
465
466 att[j]->attcacheoff = off;
467
468 off += att[j]->attlen;
469 }
470
471 Assert(j > attnum);
472
473 off = att[attnum]->attcacheoff;
474 }
475 else
476 {
477 bool usecache = true;
478 int i;
479
480 /*
481 * Now we know that we have to walk the tuple CAREFULLY. But we still
482 * might be able to cache some offsets for next time.
483 *
484 * Note - This loop is a little tricky. For each non-null attribute,
485 * we have to first account for alignment padding before the attr,
486 * then advance over the attr based on its length. Nulls have no
487 * storage and no alignment padding either. We can use/set
488 * attcacheoff until we reach either a null or a var-width attribute.
489 */
490 off = 0;
491 for (i = 0;; i++) /* loop exit is at "break" */
492 {
493 if (HeapTupleHasNulls(tuple) && att_isnull(i, bp))
494 {
495 usecache = false;
496 continue; /* this cannot be the target att */
497 }
498
499 /* If we know the next offset, we can skip the rest */
500 if (usecache && att[i]->attcacheoff >= 0)
501 off = att[i]->attcacheoff;
502 else if (att[i]->attlen == -1)
503 {
504 /*
505 * We can only cache the offset for a varlena attribute if the
506 * offset is already suitably aligned, so that there would be
507 * no pad bytes in any case: then the offset will be valid for
508 * either an aligned or unaligned value.
509 */
510 if (usecache &&
511 off == att_align_nominal(off, att[i]->attalign))
512 att[i]->attcacheoff = off;
513 else
514 {
515 off = att_align_pointer(off, att[i]->attalign, -1,
516 tp + off);
517 usecache = false;
518 }
519 }
520 else
521 {
522 /* not varlena, so safe to use att_align_nominal */
523 off = att_align_nominal(off, att[i]->attalign);
524
525 if (usecache)
526 att[i]->attcacheoff = off;
527 }
528
529 if (i == attnum)
530 break;
531
532 off = att_addlength_pointer(off, att[i]->attlen, tp + off);
533
534 if (usecache && att[i]->attlen <= 0)
535 usecache = false;
536 }
537 }
538
539 return fetchatt(att[attnum], tp + off);
540 }
541
542 /* ----------------
543 * heap_getsysattr
544 *
545 * Fetch the value of a system attribute for a tuple.
546 *
547 * This is a support routine for the heap_getattr macro. The macro
548 * has already determined that the attnum refers to a system attribute.
549 * ----------------
550 */
551 Datum
heap_getsysattr(HeapTuple tup,int attnum,TupleDesc tupleDesc,bool * isnull)552 heap_getsysattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
553 {
554 Datum result;
555
556 Assert(tup);
557
558 /* Currently, no sys attribute ever reads as NULL. */
559 *isnull = false;
560
561 switch (attnum)
562 {
563 case SelfItemPointerAttributeNumber:
564 /* pass-by-reference datatype */
565 result = PointerGetDatum(&(tup->t_self));
566 break;
567 case ObjectIdAttributeNumber:
568 result = ObjectIdGetDatum(HeapTupleGetOid(tup));
569 break;
570 case MinTransactionIdAttributeNumber:
571 result = TransactionIdGetDatum(HeapTupleHeaderGetRawXmin(tup->t_data));
572 break;
573 case MaxTransactionIdAttributeNumber:
574 result = TransactionIdGetDatum(HeapTupleHeaderGetRawXmax(tup->t_data));
575 break;
576 case MinCommandIdAttributeNumber:
577 case MaxCommandIdAttributeNumber:
578
579 /*
580 * cmin and cmax are now both aliases for the same field, which
581 * can in fact also be a combo command id. XXX perhaps we should
582 * return the "real" cmin or cmax if possible, that is if we are
583 * inside the originating transaction?
584 */
585 result = CommandIdGetDatum(HeapTupleHeaderGetRawCommandId(tup->t_data));
586 break;
587 case TableOidAttributeNumber:
588 result = ObjectIdGetDatum(tup->t_tableOid);
589 break;
590 default:
591 elog(ERROR, "invalid attnum: %d", attnum);
592 result = 0; /* keep compiler quiet */
593 break;
594 }
595 return result;
596 }
597
598 /* ----------------
599 * heap_copytuple
600 *
601 * returns a copy of an entire tuple
602 *
603 * The HeapTuple struct, tuple header, and tuple data are all allocated
604 * as a single palloc() block.
605 * ----------------
606 */
607 HeapTuple
heap_copytuple(HeapTuple tuple)608 heap_copytuple(HeapTuple tuple)
609 {
610 HeapTuple newTuple;
611
612 if (!HeapTupleIsValid(tuple) || tuple->t_data == NULL)
613 return NULL;
614
615 newTuple = (HeapTuple) palloc(HEAPTUPLESIZE + tuple->t_len);
616 newTuple->t_len = tuple->t_len;
617 newTuple->t_self = tuple->t_self;
618 newTuple->t_tableOid = tuple->t_tableOid;
619 newTuple->t_data = (HeapTupleHeader) ((char *) newTuple + HEAPTUPLESIZE);
620 memcpy((char *) newTuple->t_data, (char *) tuple->t_data, tuple->t_len);
621 return newTuple;
622 }
623
624 /* ----------------
625 * heap_copytuple_with_tuple
626 *
627 * copy a tuple into a caller-supplied HeapTuple management struct
628 *
629 * Note that after calling this function, the "dest" HeapTuple will not be
630 * allocated as a single palloc() block (unlike with heap_copytuple()).
631 * ----------------
632 */
633 void
heap_copytuple_with_tuple(HeapTuple src,HeapTuple dest)634 heap_copytuple_with_tuple(HeapTuple src, HeapTuple dest)
635 {
636 if (!HeapTupleIsValid(src) || src->t_data == NULL)
637 {
638 dest->t_data = NULL;
639 return;
640 }
641
642 dest->t_len = src->t_len;
643 dest->t_self = src->t_self;
644 dest->t_tableOid = src->t_tableOid;
645 dest->t_data = (HeapTupleHeader) palloc(src->t_len);
646 memcpy((char *) dest->t_data, (char *) src->t_data, src->t_len);
647 }
648
649 /* ----------------
650 * heap_copy_tuple_as_datum
651 *
652 * copy a tuple as a composite-type Datum
653 * ----------------
654 */
655 Datum
heap_copy_tuple_as_datum(HeapTuple tuple,TupleDesc tupleDesc)656 heap_copy_tuple_as_datum(HeapTuple tuple, TupleDesc tupleDesc)
657 {
658 HeapTupleHeader td;
659
660 /*
661 * If the tuple contains any external TOAST pointers, we have to inline
662 * those fields to meet the conventions for composite-type Datums.
663 */
664 if (HeapTupleHasExternal(tuple))
665 return toast_flatten_tuple_to_datum(tuple->t_data,
666 tuple->t_len,
667 tupleDesc);
668
669 /*
670 * Fast path for easy case: just make a palloc'd copy and insert the
671 * correct composite-Datum header fields (since those may not be set if
672 * the given tuple came from disk, rather than from heap_form_tuple).
673 */
674 td = (HeapTupleHeader) palloc(tuple->t_len);
675 memcpy((char *) td, (char *) tuple->t_data, tuple->t_len);
676
677 HeapTupleHeaderSetDatumLength(td, tuple->t_len);
678 HeapTupleHeaderSetTypeId(td, tupleDesc->tdtypeid);
679 HeapTupleHeaderSetTypMod(td, tupleDesc->tdtypmod);
680
681 return PointerGetDatum(td);
682 }
683
684 /*
685 * heap_form_tuple
686 * construct a tuple from the given values[] and isnull[] arrays,
687 * which are of the length indicated by tupleDescriptor->natts
688 *
689 * The result is allocated in the current memory context.
690 */
691 HeapTuple
heap_form_tuple(TupleDesc tupleDescriptor,Datum * values,bool * isnull)692 heap_form_tuple(TupleDesc tupleDescriptor,
693 Datum *values,
694 bool *isnull)
695 {
696 HeapTuple tuple; /* return tuple */
697 HeapTupleHeader td; /* tuple data */
698 Size len,
699 data_len;
700 int hoff;
701 bool hasnull = false;
702 int numberOfAttributes = tupleDescriptor->natts;
703 int i;
704
705 if (numberOfAttributes > MaxTupleAttributeNumber)
706 ereport(ERROR,
707 (errcode(ERRCODE_TOO_MANY_COLUMNS),
708 errmsg("number of columns (%d) exceeds limit (%d)",
709 numberOfAttributes, MaxTupleAttributeNumber)));
710
711 /*
712 * Check for nulls
713 */
714 for (i = 0; i < numberOfAttributes; i++)
715 {
716 if (isnull[i])
717 {
718 hasnull = true;
719 break;
720 }
721 }
722
723 /*
724 * Determine total space needed
725 */
726 len = offsetof(HeapTupleHeaderData, t_bits);
727
728 if (hasnull)
729 len += BITMAPLEN(numberOfAttributes);
730
731 if (tupleDescriptor->tdhasoid)
732 len += sizeof(Oid);
733
734 hoff = len = MAXALIGN(len); /* align user data safely */
735
736 data_len = heap_compute_data_size(tupleDescriptor, values, isnull);
737
738 len += data_len;
739
740 /*
741 * Allocate and zero the space needed. Note that the tuple body and
742 * HeapTupleData management structure are allocated in one chunk.
743 */
744 tuple = (HeapTuple) palloc0(HEAPTUPLESIZE + len);
745 tuple->t_data = td = (HeapTupleHeader) ((char *) tuple + HEAPTUPLESIZE);
746
747 /*
748 * And fill in the information. Note we fill the Datum fields even though
749 * this tuple may never become a Datum. This lets HeapTupleHeaderGetDatum
750 * identify the tuple type if needed.
751 */
752 tuple->t_len = len;
753 ItemPointerSetInvalid(&(tuple->t_self));
754 tuple->t_tableOid = InvalidOid;
755
756 HeapTupleHeaderSetDatumLength(td, len);
757 HeapTupleHeaderSetTypeId(td, tupleDescriptor->tdtypeid);
758 HeapTupleHeaderSetTypMod(td, tupleDescriptor->tdtypmod);
759 /* We also make sure that t_ctid is invalid unless explicitly set */
760 ItemPointerSetInvalid(&(td->t_ctid));
761
762 HeapTupleHeaderSetNatts(td, numberOfAttributes);
763 td->t_hoff = hoff;
764
765 if (tupleDescriptor->tdhasoid) /* else leave infomask = 0 */
766 td->t_infomask = HEAP_HASOID;
767
768 heap_fill_tuple(tupleDescriptor,
769 values,
770 isnull,
771 (char *) td + hoff,
772 data_len,
773 &td->t_infomask,
774 (hasnull ? td->t_bits : NULL));
775
776 return tuple;
777 }
778
779 /*
780 * heap_modify_tuple
781 * form a new tuple from an old tuple and a set of replacement values.
782 *
783 * The replValues, replIsnull, and doReplace arrays must be of the length
784 * indicated by tupleDesc->natts. The new tuple is constructed using the data
785 * from replValues/replIsnull at columns where doReplace is true, and using
786 * the data from the old tuple at columns where doReplace is false.
787 *
788 * The result is allocated in the current memory context.
789 */
790 HeapTuple
heap_modify_tuple(HeapTuple tuple,TupleDesc tupleDesc,Datum * replValues,bool * replIsnull,bool * doReplace)791 heap_modify_tuple(HeapTuple tuple,
792 TupleDesc tupleDesc,
793 Datum *replValues,
794 bool *replIsnull,
795 bool *doReplace)
796 {
797 int numberOfAttributes = tupleDesc->natts;
798 int attoff;
799 Datum *values;
800 bool *isnull;
801 HeapTuple newTuple;
802
803 /*
804 * allocate and fill values and isnull arrays from either the tuple or the
805 * repl information, as appropriate.
806 *
807 * NOTE: it's debatable whether to use heap_deform_tuple() here or just
808 * heap_getattr() only the non-replaced columns. The latter could win if
809 * there are many replaced columns and few non-replaced ones. However,
810 * heap_deform_tuple costs only O(N) while the heap_getattr way would cost
811 * O(N^2) if there are many non-replaced columns, so it seems better to
812 * err on the side of linear cost.
813 */
814 values = (Datum *) palloc(numberOfAttributes * sizeof(Datum));
815 isnull = (bool *) palloc(numberOfAttributes * sizeof(bool));
816
817 heap_deform_tuple(tuple, tupleDesc, values, isnull);
818
819 for (attoff = 0; attoff < numberOfAttributes; attoff++)
820 {
821 if (doReplace[attoff])
822 {
823 values[attoff] = replValues[attoff];
824 isnull[attoff] = replIsnull[attoff];
825 }
826 }
827
828 /*
829 * create a new tuple from the values and isnull arrays
830 */
831 newTuple = heap_form_tuple(tupleDesc, values, isnull);
832
833 pfree(values);
834 pfree(isnull);
835
836 /*
837 * copy the identification info of the old tuple: t_ctid, t_self, and OID
838 * (if any)
839 */
840 newTuple->t_data->t_ctid = tuple->t_data->t_ctid;
841 newTuple->t_self = tuple->t_self;
842 newTuple->t_tableOid = tuple->t_tableOid;
843 if (tupleDesc->tdhasoid)
844 HeapTupleSetOid(newTuple, HeapTupleGetOid(tuple));
845
846 return newTuple;
847 }
848
849 /*
850 * heap_modify_tuple_by_cols
851 * form a new tuple from an old tuple and a set of replacement values.
852 *
853 * This is like heap_modify_tuple, except that instead of specifying which
854 * column(s) to replace by a boolean map, an array of target column numbers
855 * is used. This is often more convenient when a fixed number of columns
856 * are to be replaced. The replCols, replValues, and replIsnull arrays must
857 * be of length nCols. Target column numbers are indexed from 1.
858 *
859 * The result is allocated in the current memory context.
860 */
861 HeapTuple
heap_modify_tuple_by_cols(HeapTuple tuple,TupleDesc tupleDesc,int nCols,int * replCols,Datum * replValues,bool * replIsnull)862 heap_modify_tuple_by_cols(HeapTuple tuple,
863 TupleDesc tupleDesc,
864 int nCols,
865 int *replCols,
866 Datum *replValues,
867 bool *replIsnull)
868 {
869 int numberOfAttributes = tupleDesc->natts;
870 Datum *values;
871 bool *isnull;
872 HeapTuple newTuple;
873 int i;
874
875 /*
876 * allocate and fill values and isnull arrays from the tuple, then replace
877 * selected columns from the input arrays.
878 */
879 values = (Datum *) palloc(numberOfAttributes * sizeof(Datum));
880 isnull = (bool *) palloc(numberOfAttributes * sizeof(bool));
881
882 heap_deform_tuple(tuple, tupleDesc, values, isnull);
883
884 for (i = 0; i < nCols; i++)
885 {
886 int attnum = replCols[i];
887
888 if (attnum <= 0 || attnum > numberOfAttributes)
889 elog(ERROR, "invalid column number %d", attnum);
890 values[attnum - 1] = replValues[i];
891 isnull[attnum - 1] = replIsnull[i];
892 }
893
894 /*
895 * create a new tuple from the values and isnull arrays
896 */
897 newTuple = heap_form_tuple(tupleDesc, values, isnull);
898
899 pfree(values);
900 pfree(isnull);
901
902 /*
903 * copy the identification info of the old tuple: t_ctid, t_self, and OID
904 * (if any)
905 */
906 newTuple->t_data->t_ctid = tuple->t_data->t_ctid;
907 newTuple->t_self = tuple->t_self;
908 newTuple->t_tableOid = tuple->t_tableOid;
909 if (tupleDesc->tdhasoid)
910 HeapTupleSetOid(newTuple, HeapTupleGetOid(tuple));
911
912 return newTuple;
913 }
914
915 /*
916 * heap_deform_tuple
917 * Given a tuple, extract data into values/isnull arrays; this is
918 * the inverse of heap_form_tuple.
919 *
920 * Storage for the values/isnull arrays is provided by the caller;
921 * it should be sized according to tupleDesc->natts not
922 * HeapTupleHeaderGetNatts(tuple->t_data).
923 *
924 * Note that for pass-by-reference datatypes, the pointer placed
925 * in the Datum will point into the given tuple.
926 *
927 * When all or most of a tuple's fields need to be extracted,
928 * this routine will be significantly quicker than a loop around
929 * heap_getattr; the loop will become O(N^2) as soon as any
930 * noncacheable attribute offsets are involved.
931 */
932 void
heap_deform_tuple(HeapTuple tuple,TupleDesc tupleDesc,Datum * values,bool * isnull)933 heap_deform_tuple(HeapTuple tuple, TupleDesc tupleDesc,
934 Datum *values, bool *isnull)
935 {
936 HeapTupleHeader tup = tuple->t_data;
937 bool hasnulls = HeapTupleHasNulls(tuple);
938 Form_pg_attribute *att = tupleDesc->attrs;
939 int tdesc_natts = tupleDesc->natts;
940 int natts; /* number of atts to extract */
941 int attnum;
942 char *tp; /* ptr to tuple data */
943 long off; /* offset in tuple data */
944 bits8 *bp = tup->t_bits; /* ptr to null bitmap in tuple */
945 bool slow = false; /* can we use/set attcacheoff? */
946
947 natts = HeapTupleHeaderGetNatts(tup);
948
949 /*
950 * In inheritance situations, it is possible that the given tuple actually
951 * has more fields than the caller is expecting. Don't run off the end of
952 * the caller's arrays.
953 */
954 natts = Min(natts, tdesc_natts);
955
956 tp = (char *) tup + tup->t_hoff;
957
958 off = 0;
959
960 for (attnum = 0; attnum < natts; attnum++)
961 {
962 Form_pg_attribute thisatt = att[attnum];
963
964 if (hasnulls && att_isnull(attnum, bp))
965 {
966 values[attnum] = (Datum) 0;
967 isnull[attnum] = true;
968 slow = true; /* can't use attcacheoff anymore */
969 continue;
970 }
971
972 isnull[attnum] = false;
973
974 if (!slow && thisatt->attcacheoff >= 0)
975 off = thisatt->attcacheoff;
976 else if (thisatt->attlen == -1)
977 {
978 /*
979 * We can only cache the offset for a varlena attribute if the
980 * offset is already suitably aligned, so that there would be no
981 * pad bytes in any case: then the offset will be valid for either
982 * an aligned or unaligned value.
983 */
984 if (!slow &&
985 off == att_align_nominal(off, thisatt->attalign))
986 thisatt->attcacheoff = off;
987 else
988 {
989 off = att_align_pointer(off, thisatt->attalign, -1,
990 tp + off);
991 slow = true;
992 }
993 }
994 else
995 {
996 /* not varlena, so safe to use att_align_nominal */
997 off = att_align_nominal(off, thisatt->attalign);
998
999 if (!slow)
1000 thisatt->attcacheoff = off;
1001 }
1002
1003 values[attnum] = fetchatt(thisatt, tp + off);
1004
1005 off = att_addlength_pointer(off, thisatt->attlen, tp + off);
1006
1007 if (thisatt->attlen <= 0)
1008 slow = true; /* can't use attcacheoff anymore */
1009 }
1010
1011 /*
1012 * If tuple doesn't have all the atts indicated by tupleDesc, read the
1013 * rest as null
1014 */
1015 for (; attnum < tdesc_natts; attnum++)
1016 {
1017 values[attnum] = (Datum) 0;
1018 isnull[attnum] = true;
1019 }
1020 }
1021
1022 /*
1023 * slot_deform_tuple
1024 * Given a TupleTableSlot, extract data from the slot's physical tuple
1025 * into its Datum/isnull arrays. Data is extracted up through the
1026 * natts'th column (caller must ensure this is a legal column number).
1027 *
1028 * This is essentially an incremental version of heap_deform_tuple:
1029 * on each call we extract attributes up to the one needed, without
1030 * re-computing information about previously extracted attributes.
1031 * slot->tts_nvalid is the number of attributes already extracted.
1032 */
1033 static void
slot_deform_tuple(TupleTableSlot * slot,int natts)1034 slot_deform_tuple(TupleTableSlot *slot, int natts)
1035 {
1036 HeapTuple tuple = slot->tts_tuple;
1037 TupleDesc tupleDesc = slot->tts_tupleDescriptor;
1038 Datum *values = slot->tts_values;
1039 bool *isnull = slot->tts_isnull;
1040 HeapTupleHeader tup = tuple->t_data;
1041 bool hasnulls = HeapTupleHasNulls(tuple);
1042 Form_pg_attribute *att = tupleDesc->attrs;
1043 int attnum;
1044 char *tp; /* ptr to tuple data */
1045 long off; /* offset in tuple data */
1046 bits8 *bp = tup->t_bits; /* ptr to null bitmap in tuple */
1047 bool slow; /* can we use/set attcacheoff? */
1048
1049 /*
1050 * Check whether the first call for this tuple, and initialize or restore
1051 * loop state.
1052 */
1053 attnum = slot->tts_nvalid;
1054 if (attnum == 0)
1055 {
1056 /* Start from the first attribute */
1057 off = 0;
1058 slow = false;
1059 }
1060 else
1061 {
1062 /* Restore state from previous execution */
1063 off = slot->tts_off;
1064 slow = slot->tts_slow;
1065 }
1066
1067 tp = (char *) tup + tup->t_hoff;
1068
1069 for (; attnum < natts; attnum++)
1070 {
1071 Form_pg_attribute thisatt = att[attnum];
1072
1073 if (hasnulls && att_isnull(attnum, bp))
1074 {
1075 values[attnum] = (Datum) 0;
1076 isnull[attnum] = true;
1077 slow = true; /* can't use attcacheoff anymore */
1078 continue;
1079 }
1080
1081 isnull[attnum] = false;
1082
1083 if (!slow && thisatt->attcacheoff >= 0)
1084 off = thisatt->attcacheoff;
1085 else if (thisatt->attlen == -1)
1086 {
1087 /*
1088 * We can only cache the offset for a varlena attribute if the
1089 * offset is already suitably aligned, so that there would be no
1090 * pad bytes in any case: then the offset will be valid for either
1091 * an aligned or unaligned value.
1092 */
1093 if (!slow &&
1094 off == att_align_nominal(off, thisatt->attalign))
1095 thisatt->attcacheoff = off;
1096 else
1097 {
1098 off = att_align_pointer(off, thisatt->attalign, -1,
1099 tp + off);
1100 slow = true;
1101 }
1102 }
1103 else
1104 {
1105 /* not varlena, so safe to use att_align_nominal */
1106 off = att_align_nominal(off, thisatt->attalign);
1107
1108 if (!slow)
1109 thisatt->attcacheoff = off;
1110 }
1111
1112 values[attnum] = fetchatt(thisatt, tp + off);
1113
1114 off = att_addlength_pointer(off, thisatt->attlen, tp + off);
1115
1116 if (thisatt->attlen <= 0)
1117 slow = true; /* can't use attcacheoff anymore */
1118 }
1119
1120 /*
1121 * Save state for next execution
1122 */
1123 slot->tts_nvalid = attnum;
1124 slot->tts_off = off;
1125 slot->tts_slow = slow;
1126 }
1127
1128 /*
1129 * slot_getattr
1130 * This function fetches an attribute of the slot's current tuple.
1131 * It is functionally equivalent to heap_getattr, but fetches of
1132 * multiple attributes of the same tuple will be optimized better,
1133 * because we avoid O(N^2) behavior from multiple calls of
1134 * nocachegetattr(), even when attcacheoff isn't usable.
1135 *
1136 * A difference from raw heap_getattr is that attnums beyond the
1137 * slot's tupdesc's last attribute will be considered NULL even
1138 * when the physical tuple is longer than the tupdesc.
1139 */
1140 Datum
slot_getattr(TupleTableSlot * slot,int attnum,bool * isnull)1141 slot_getattr(TupleTableSlot *slot, int attnum, bool *isnull)
1142 {
1143 HeapTuple tuple = slot->tts_tuple;
1144 TupleDesc tupleDesc = slot->tts_tupleDescriptor;
1145 HeapTupleHeader tup;
1146
1147 /*
1148 * system attributes are handled by heap_getsysattr
1149 */
1150 if (attnum <= 0)
1151 {
1152 if (tuple == NULL) /* internal error */
1153 elog(ERROR, "cannot extract system attribute from virtual tuple");
1154 if (tuple == &(slot->tts_minhdr)) /* internal error */
1155 elog(ERROR, "cannot extract system attribute from minimal tuple");
1156 return heap_getsysattr(tuple, attnum, tupleDesc, isnull);
1157 }
1158
1159 /*
1160 * fast path if desired attribute already cached
1161 */
1162 if (attnum <= slot->tts_nvalid)
1163 {
1164 *isnull = slot->tts_isnull[attnum - 1];
1165 return slot->tts_values[attnum - 1];
1166 }
1167
1168 /*
1169 * return NULL if attnum is out of range according to the tupdesc
1170 */
1171 if (attnum > tupleDesc->natts)
1172 {
1173 *isnull = true;
1174 return (Datum) 0;
1175 }
1176
1177 /*
1178 * otherwise we had better have a physical tuple (tts_nvalid should equal
1179 * natts in all virtual-tuple cases)
1180 */
1181 if (tuple == NULL) /* internal error */
1182 elog(ERROR, "cannot extract attribute from empty tuple slot");
1183
1184 /*
1185 * return NULL if attnum is out of range according to the tuple
1186 *
1187 * (We have to check this separately because of various inheritance and
1188 * table-alteration scenarios: the tuple could be either longer or shorter
1189 * than the tupdesc.)
1190 */
1191 tup = tuple->t_data;
1192 if (attnum > HeapTupleHeaderGetNatts(tup))
1193 {
1194 *isnull = true;
1195 return (Datum) 0;
1196 }
1197
1198 /*
1199 * check if target attribute is null: no point in groveling through tuple
1200 */
1201 if (HeapTupleHasNulls(tuple) && att_isnull(attnum - 1, tup->t_bits))
1202 {
1203 *isnull = true;
1204 return (Datum) 0;
1205 }
1206
1207 /*
1208 * If the attribute's column has been dropped, we force a NULL result.
1209 * This case should not happen in normal use, but it could happen if we
1210 * are executing a plan cached before the column was dropped.
1211 */
1212 if (tupleDesc->attrs[attnum - 1]->attisdropped)
1213 {
1214 *isnull = true;
1215 return (Datum) 0;
1216 }
1217
1218 /*
1219 * Extract the attribute, along with any preceding attributes.
1220 */
1221 slot_deform_tuple(slot, attnum);
1222
1223 /*
1224 * The result is acquired from tts_values array.
1225 */
1226 *isnull = slot->tts_isnull[attnum - 1];
1227 return slot->tts_values[attnum - 1];
1228 }
1229
1230 /*
1231 * slot_getallattrs
1232 * This function forces all the entries of the slot's Datum/isnull
1233 * arrays to be valid. The caller may then extract data directly
1234 * from those arrays instead of using slot_getattr.
1235 */
1236 void
slot_getallattrs(TupleTableSlot * slot)1237 slot_getallattrs(TupleTableSlot *slot)
1238 {
1239 int tdesc_natts = slot->tts_tupleDescriptor->natts;
1240 int attnum;
1241 HeapTuple tuple;
1242
1243 /* Quick out if we have 'em all already */
1244 if (slot->tts_nvalid == tdesc_natts)
1245 return;
1246
1247 /*
1248 * otherwise we had better have a physical tuple (tts_nvalid should equal
1249 * natts in all virtual-tuple cases)
1250 */
1251 tuple = slot->tts_tuple;
1252 if (tuple == NULL) /* internal error */
1253 elog(ERROR, "cannot extract attribute from empty tuple slot");
1254
1255 /*
1256 * load up any slots available from physical tuple
1257 */
1258 attnum = HeapTupleHeaderGetNatts(tuple->t_data);
1259 attnum = Min(attnum, tdesc_natts);
1260
1261 slot_deform_tuple(slot, attnum);
1262
1263 /*
1264 * If tuple doesn't have all the atts indicated by tupleDesc, read the
1265 * rest as null
1266 */
1267 for (; attnum < tdesc_natts; attnum++)
1268 {
1269 slot->tts_values[attnum] = (Datum) 0;
1270 slot->tts_isnull[attnum] = true;
1271 }
1272 slot->tts_nvalid = tdesc_natts;
1273 }
1274
1275 /*
1276 * slot_getsomeattrs
1277 * This function forces the entries of the slot's Datum/isnull
1278 * arrays to be valid at least up through the attnum'th entry.
1279 */
1280 void
slot_getsomeattrs(TupleTableSlot * slot,int attnum)1281 slot_getsomeattrs(TupleTableSlot *slot, int attnum)
1282 {
1283 HeapTuple tuple;
1284 int attno;
1285
1286 /* Quick out if we have 'em all already */
1287 if (slot->tts_nvalid >= attnum)
1288 return;
1289
1290 /* Check for caller error */
1291 if (attnum <= 0 || attnum > slot->tts_tupleDescriptor->natts)
1292 elog(ERROR, "invalid attribute number %d", attnum);
1293
1294 /*
1295 * otherwise we had better have a physical tuple (tts_nvalid should equal
1296 * natts in all virtual-tuple cases)
1297 */
1298 tuple = slot->tts_tuple;
1299 if (tuple == NULL) /* internal error */
1300 elog(ERROR, "cannot extract attribute from empty tuple slot");
1301
1302 /*
1303 * load up any slots available from physical tuple
1304 */
1305 attno = HeapTupleHeaderGetNatts(tuple->t_data);
1306 attno = Min(attno, attnum);
1307
1308 slot_deform_tuple(slot, attno);
1309
1310 /*
1311 * If tuple doesn't have all the atts indicated by tupleDesc, read the
1312 * rest as null
1313 */
1314 for (; attno < attnum; attno++)
1315 {
1316 slot->tts_values[attno] = (Datum) 0;
1317 slot->tts_isnull[attno] = true;
1318 }
1319 slot->tts_nvalid = attnum;
1320 }
1321
1322 /*
1323 * slot_attisnull
1324 * Detect whether an attribute of the slot is null, without
1325 * actually fetching it.
1326 */
1327 bool
slot_attisnull(TupleTableSlot * slot,int attnum)1328 slot_attisnull(TupleTableSlot *slot, int attnum)
1329 {
1330 HeapTuple tuple = slot->tts_tuple;
1331 TupleDesc tupleDesc = slot->tts_tupleDescriptor;
1332
1333 /*
1334 * system attributes are handled by heap_attisnull
1335 */
1336 if (attnum <= 0)
1337 {
1338 if (tuple == NULL) /* internal error */
1339 elog(ERROR, "cannot extract system attribute from virtual tuple");
1340 if (tuple == &(slot->tts_minhdr)) /* internal error */
1341 elog(ERROR, "cannot extract system attribute from minimal tuple");
1342 return heap_attisnull(tuple, attnum);
1343 }
1344
1345 /*
1346 * fast path if desired attribute already cached
1347 */
1348 if (attnum <= slot->tts_nvalid)
1349 return slot->tts_isnull[attnum - 1];
1350
1351 /*
1352 * return NULL if attnum is out of range according to the tupdesc
1353 */
1354 if (attnum > tupleDesc->natts)
1355 return true;
1356
1357 /*
1358 * otherwise we had better have a physical tuple (tts_nvalid should equal
1359 * natts in all virtual-tuple cases)
1360 */
1361 if (tuple == NULL) /* internal error */
1362 elog(ERROR, "cannot extract attribute from empty tuple slot");
1363
1364 /* and let the tuple tell it */
1365 return heap_attisnull(tuple, attnum);
1366 }
1367
1368 /*
1369 * slot_getsysattr
1370 * This function fetches a system attribute of the slot's current tuple.
1371 * Unlike slot_getattr, if the slot does not contain system attributes,
1372 * this will return false (with a NULL attribute value) instead of
1373 * throwing an error.
1374 */
1375 bool
slot_getsysattr(TupleTableSlot * slot,int attnum,Datum * value,bool * isnull)1376 slot_getsysattr(TupleTableSlot *slot, int attnum,
1377 Datum *value, bool *isnull)
1378 {
1379 HeapTuple tuple = slot->tts_tuple;
1380
1381 Assert(attnum < 0); /* else caller error */
1382 if (tuple == NULL ||
1383 tuple == &(slot->tts_minhdr))
1384 {
1385 /* No physical tuple, or minimal tuple, so fail */
1386 *value = (Datum) 0;
1387 *isnull = true;
1388 return false;
1389 }
1390 *value = heap_getsysattr(tuple, attnum, slot->tts_tupleDescriptor, isnull);
1391 return true;
1392 }
1393
1394 /*
1395 * heap_freetuple
1396 */
1397 void
heap_freetuple(HeapTuple htup)1398 heap_freetuple(HeapTuple htup)
1399 {
1400 pfree(htup);
1401 }
1402
1403
1404 /*
1405 * heap_form_minimal_tuple
1406 * construct a MinimalTuple from the given values[] and isnull[] arrays,
1407 * which are of the length indicated by tupleDescriptor->natts
1408 *
1409 * This is exactly like heap_form_tuple() except that the result is a
1410 * "minimal" tuple lacking a HeapTupleData header as well as room for system
1411 * columns.
1412 *
1413 * The result is allocated in the current memory context.
1414 */
1415 MinimalTuple
heap_form_minimal_tuple(TupleDesc tupleDescriptor,Datum * values,bool * isnull)1416 heap_form_minimal_tuple(TupleDesc tupleDescriptor,
1417 Datum *values,
1418 bool *isnull)
1419 {
1420 MinimalTuple tuple; /* return tuple */
1421 Size len,
1422 data_len;
1423 int hoff;
1424 bool hasnull = false;
1425 int numberOfAttributes = tupleDescriptor->natts;
1426 int i;
1427
1428 if (numberOfAttributes > MaxTupleAttributeNumber)
1429 ereport(ERROR,
1430 (errcode(ERRCODE_TOO_MANY_COLUMNS),
1431 errmsg("number of columns (%d) exceeds limit (%d)",
1432 numberOfAttributes, MaxTupleAttributeNumber)));
1433
1434 /*
1435 * Check for nulls
1436 */
1437 for (i = 0; i < numberOfAttributes; i++)
1438 {
1439 if (isnull[i])
1440 {
1441 hasnull = true;
1442 break;
1443 }
1444 }
1445
1446 /*
1447 * Determine total space needed
1448 */
1449 len = SizeofMinimalTupleHeader;
1450
1451 if (hasnull)
1452 len += BITMAPLEN(numberOfAttributes);
1453
1454 if (tupleDescriptor->tdhasoid)
1455 len += sizeof(Oid);
1456
1457 hoff = len = MAXALIGN(len); /* align user data safely */
1458
1459 data_len = heap_compute_data_size(tupleDescriptor, values, isnull);
1460
1461 len += data_len;
1462
1463 /*
1464 * Allocate and zero the space needed.
1465 */
1466 tuple = (MinimalTuple) palloc0(len);
1467
1468 /*
1469 * And fill in the information.
1470 */
1471 tuple->t_len = len;
1472 HeapTupleHeaderSetNatts(tuple, numberOfAttributes);
1473 tuple->t_hoff = hoff + MINIMAL_TUPLE_OFFSET;
1474
1475 if (tupleDescriptor->tdhasoid) /* else leave infomask = 0 */
1476 tuple->t_infomask = HEAP_HASOID;
1477
1478 heap_fill_tuple(tupleDescriptor,
1479 values,
1480 isnull,
1481 (char *) tuple + hoff,
1482 data_len,
1483 &tuple->t_infomask,
1484 (hasnull ? tuple->t_bits : NULL));
1485
1486 return tuple;
1487 }
1488
1489 /*
1490 * heap_free_minimal_tuple
1491 */
1492 void
heap_free_minimal_tuple(MinimalTuple mtup)1493 heap_free_minimal_tuple(MinimalTuple mtup)
1494 {
1495 pfree(mtup);
1496 }
1497
1498 /*
1499 * heap_copy_minimal_tuple
1500 * copy a MinimalTuple
1501 *
1502 * The result is allocated in the current memory context.
1503 */
1504 MinimalTuple
heap_copy_minimal_tuple(MinimalTuple mtup)1505 heap_copy_minimal_tuple(MinimalTuple mtup)
1506 {
1507 MinimalTuple result;
1508
1509 result = (MinimalTuple) palloc(mtup->t_len);
1510 memcpy(result, mtup, mtup->t_len);
1511 return result;
1512 }
1513
1514 /*
1515 * heap_tuple_from_minimal_tuple
1516 * create a HeapTuple by copying from a MinimalTuple;
1517 * system columns are filled with zeroes
1518 *
1519 * The result is allocated in the current memory context.
1520 * The HeapTuple struct, tuple header, and tuple data are all allocated
1521 * as a single palloc() block.
1522 */
1523 HeapTuple
heap_tuple_from_minimal_tuple(MinimalTuple mtup)1524 heap_tuple_from_minimal_tuple(MinimalTuple mtup)
1525 {
1526 HeapTuple result;
1527 uint32 len = mtup->t_len + MINIMAL_TUPLE_OFFSET;
1528
1529 result = (HeapTuple) palloc(HEAPTUPLESIZE + len);
1530 result->t_len = len;
1531 ItemPointerSetInvalid(&(result->t_self));
1532 result->t_tableOid = InvalidOid;
1533 result->t_data = (HeapTupleHeader) ((char *) result + HEAPTUPLESIZE);
1534 memcpy((char *) result->t_data + MINIMAL_TUPLE_OFFSET, mtup, mtup->t_len);
1535 memset(result->t_data, 0, offsetof(HeapTupleHeaderData, t_infomask2));
1536 return result;
1537 }
1538
1539 /*
1540 * minimal_tuple_from_heap_tuple
1541 * create a MinimalTuple by copying from a HeapTuple
1542 *
1543 * The result is allocated in the current memory context.
1544 */
1545 MinimalTuple
minimal_tuple_from_heap_tuple(HeapTuple htup)1546 minimal_tuple_from_heap_tuple(HeapTuple htup)
1547 {
1548 MinimalTuple result;
1549 uint32 len;
1550
1551 Assert(htup->t_len > MINIMAL_TUPLE_OFFSET);
1552 len = htup->t_len - MINIMAL_TUPLE_OFFSET;
1553 result = (MinimalTuple) palloc(len);
1554 memcpy(result, (char *) htup->t_data + MINIMAL_TUPLE_OFFSET, len);
1555 result->t_len = len;
1556 return result;
1557 }
1558