PostgreSQL Source Code git master
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
heaptuple.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * heaptuple.c
4 * This file contains heap tuple accessor and mutator routines, as well
5 * as various tuple utilities.
6 *
7 * Some notes about varlenas and this code:
8 *
9 * Before Postgres 8.3 varlenas always had a 4-byte length header, and
10 * therefore always needed 4-byte alignment (at least). This wasted space
11 * for short varlenas, for example CHAR(1) took 5 bytes and could need up to
12 * 3 additional padding bytes for alignment.
13 *
14 * Now, a short varlena (up to 126 data bytes) is reduced to a 1-byte header
15 * and we don't align it. To hide this from datatype-specific functions that
16 * don't want to deal with it, such a datum is considered "toasted" and will
17 * be expanded back to the normal 4-byte-header format by pg_detoast_datum.
18 * (In performance-critical code paths we can use pg_detoast_datum_packed
19 * and the appropriate access macros to avoid that overhead.) Note that this
20 * conversion is performed directly in heap_form_tuple, without invoking
21 * heaptoast.c.
22 *
23 * This change will break any code that assumes it needn't detoast values
24 * that have been put into a tuple but never sent to disk. Hopefully there
25 * are few such places.
26 *
27 * Varlenas still have alignment INT (or DOUBLE) in pg_type/pg_attribute, since
28 * that's the normal requirement for the untoasted format. But we ignore that
29 * for the 1-byte-header format. This means that the actual start position
30 * of a varlena datum may vary depending on which format it has. To determine
31 * what is stored, we have to require that alignment padding bytes be zero.
32 * (Postgres actually has always zeroed them, but now it's required!) Since
33 * the first byte of a 1-byte-header varlena can never be zero, we can examine
34 * the first byte after the previous datum to tell if it's a pad byte or the
35 * start of a 1-byte-header varlena.
36 *
37 * Note that while formerly we could rely on the first varlena column of a
38 * system catalog to be at the offset suggested by the C struct for the
39 * catalog, this is now risky: it's only safe if the preceding field is
40 * word-aligned, so that there will never be any padding.
41 *
42 * We don't pack varlenas whose attstorage is PLAIN, since the data type
43 * isn't expecting to have to detoast values. This is used in particular
44 * by oidvector and int2vector, which are used in the system catalogs
45 * and we'd like to still refer to them via C struct offsets.
46 *
47 *
48 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
49 * Portions Copyright (c) 1994, Regents of the University of California
50 *
51 *
52 * IDENTIFICATION
53 * src/backend/access/common/heaptuple.c
54 *
55 *-------------------------------------------------------------------------
56 */
57
58#include "postgres.h"
59
60#include "access/heaptoast.h"
61#include "access/sysattr.h"
63#include "common/hashfn.h"
64#include "utils/datum.h"
65#include "utils/expandeddatum.h"
66#include "utils/hsearch.h"
67#include "utils/memutils.h"
68
69
70/*
71 * Does att's datatype allow packing into the 1-byte-header varlena format?
72 * While functions that use TupleDescAttr() and assign attstorage =
73 * TYPSTORAGE_PLAIN cannot use packed varlena headers, functions that call
74 * TupleDescInitEntry() use typeForm->typstorage (TYPSTORAGE_EXTENDED) and
75 * can use packed varlena headers, e.g.:
76 * CREATE TABLE test(a VARCHAR(10000) STORAGE PLAIN);
77 * INSERT INTO test VALUES (repeat('A',10));
78 * This can be verified with pageinspect.
79 */
80#define ATT_IS_PACKABLE(att) \
81 ((att)->attlen == -1 && (att)->attstorage != TYPSTORAGE_PLAIN)
82/* Use this if it's already known varlena */
83#define VARLENA_ATT_IS_PACKABLE(att) \
84 ((att)->attstorage != TYPSTORAGE_PLAIN)
85
86/* FormData_pg_attribute.attstorage != TYPSTORAGE_PLAIN and an attlen of -1 */
87#define COMPACT_ATTR_IS_PACKABLE(att) \
88 ((att)->attlen == -1 && (att)->attispackable)
89
90/*
91 * Setup for caching pass-by-ref missing attributes in a way that survives
92 * tupleDesc destruction.
93 */
94
95typedef struct
96{
97 int len;
100
101static HTAB *missing_cache = NULL;
102
103static uint32
104missing_hash(const void *key, Size keysize)
105{
106 const missing_cache_key *entry = (missing_cache_key *) key;
107
108 return hash_bytes((const unsigned char *) entry->value, entry->len);
109}
110
111static int
112missing_match(const void *key1, const void *key2, Size keysize)
113{
114 const missing_cache_key *entry1 = (missing_cache_key *) key1;
115 const missing_cache_key *entry2 = (missing_cache_key *) key2;
116
117 if (entry1->len != entry2->len)
118 return entry1->len > entry2->len ? 1 : -1;
119
120 return memcmp(DatumGetPointer(entry1->value),
121 DatumGetPointer(entry2->value),
122 entry1->len);
123}
124
125static void
127{
128 HASHCTL hash_ctl;
129
130 hash_ctl.keysize = sizeof(missing_cache_key);
131 hash_ctl.entrysize = sizeof(missing_cache_key);
132 hash_ctl.hcxt = TopMemoryContext;
133 hash_ctl.hash = missing_hash;
134 hash_ctl.match = missing_match;
136 hash_create("Missing Values Cache",
137 32,
138 &hash_ctl,
140}
141
142/* ----------------------------------------------------------------
143 * misc support routines
144 * ----------------------------------------------------------------
145 */
146
147/*
148 * Return the missing value of an attribute, or NULL if there isn't one.
149 */
150Datum
152 int attnum, bool *isnull)
153{
154 CompactAttribute *att;
155
156 Assert(attnum <= tupleDesc->natts);
157 Assert(attnum > 0);
158
159 att = TupleDescCompactAttr(tupleDesc, attnum - 1);
160
161 if (att->atthasmissing)
162 {
163 AttrMissing *attrmiss;
164
165 Assert(tupleDesc->constr);
166 Assert(tupleDesc->constr->missing);
167
168 attrmiss = tupleDesc->constr->missing + (attnum - 1);
169
170 if (attrmiss->am_present)
171 {
173 missing_cache_key *entry;
174 bool found;
175 MemoryContext oldctx;
176
177 *isnull = false;
178
179 /* no need to cache by-value attributes */
180 if (att->attbyval)
181 return attrmiss->am_value;
182
183 /* set up cache if required */
184 if (missing_cache == NULL)
186
187 /* check if there's a cache entry */
188 Assert(att->attlen > 0 || att->attlen == -1);
189 if (att->attlen > 0)
190 key.len = att->attlen;
191 else
192 key.len = VARSIZE_ANY(attrmiss->am_value);
193 key.value = attrmiss->am_value;
194
195 entry = hash_search(missing_cache, &key, HASH_ENTER, &found);
196
197 if (!found)
198 {
199 /* cache miss, so we need a non-transient copy of the datum */
201 entry->value =
202 datumCopy(attrmiss->am_value, false, att->attlen);
203 MemoryContextSwitchTo(oldctx);
204 }
205
206 return entry->value;
207 }
208 }
209
210 *isnull = true;
211 return PointerGetDatum(NULL);
212}
213
214/*
215 * heap_compute_data_size
216 * Determine size of the data area of a tuple to be constructed
217 */
218Size
220 const Datum *values,
221 const bool *isnull)
222{
223 Size data_length = 0;
224 int i;
225 int numberOfAttributes = tupleDesc->natts;
226
227 for (i = 0; i < numberOfAttributes; i++)
228 {
229 Datum val;
230 CompactAttribute *atti;
231
232 if (isnull[i])
233 continue;
234
235 val = values[i];
236 atti = TupleDescCompactAttr(tupleDesc, i);
237
238 if (COMPACT_ATTR_IS_PACKABLE(atti) &&
240 {
241 /*
242 * we're anticipating converting to a short varlena header, so
243 * adjust length and don't count any alignment
244 */
246 }
247 else if (atti->attlen == -1 &&
249 {
250 /*
251 * we want to flatten the expanded value so that the constructed
252 * tuple doesn't depend on it
253 */
254 data_length = att_nominal_alignby(data_length, atti->attalignby);
255 data_length += EOH_get_flat_size(DatumGetEOHP(val));
256 }
257 else
258 {
259 data_length = att_datum_alignby(data_length, atti->attalignby,
260 atti->attlen, val);
261 data_length = att_addlength_datum(data_length, atti->attlen,
262 val);
263 }
264 }
265
266 return data_length;
267}
268
269/*
270 * Per-attribute helper for heap_fill_tuple and other routines building tuples.
271 *
272 * Fill in either a data value or a bit in the null bitmask
273 */
274static inline void
276 bits8 **bit,
277 int *bitmask,
278 char **dataP,
279 uint16 *infomask,
280 Datum datum,
281 bool isnull)
282{
283 Size data_length;
284 char *data = *dataP;
285
286 /*
287 * If we're building a null bitmap, set the appropriate bit for the
288 * current column value here.
289 */
290 if (bit != NULL)
291 {
292 if (*bitmask != HIGHBIT)
293 *bitmask <<= 1;
294 else
295 {
296 *bit += 1;
297 **bit = 0x0;
298 *bitmask = 1;
299 }
300
301 if (isnull)
302 {
303 *infomask |= HEAP_HASNULL;
304 return;
305 }
306
307 **bit |= *bitmask;
308 }
309
310 /*
311 * XXX we use the att_nominal_alignby macro on the pointer value itself,
312 * not on an offset. This is a bit of a hack.
313 */
314 if (att->attbyval)
315 {
316 /* pass-by-value */
317 data = (char *) att_nominal_alignby(data, att->attalignby);
318 store_att_byval(data, datum, att->attlen);
319 data_length = att->attlen;
320 }
321 else if (att->attlen == -1)
322 {
323 /* varlena */
324 Pointer val = DatumGetPointer(datum);
325
326 *infomask |= HEAP_HASVARWIDTH;
328 {
330 {
331 /*
332 * we want to flatten the expanded value so that the
333 * constructed tuple doesn't depend on it
334 */
336
337 data = (char *) att_nominal_alignby(data, att->attalignby);
338 data_length = EOH_get_flat_size(eoh);
339 EOH_flatten_into(eoh, data, data_length);
340 }
341 else
342 {
343 *infomask |= HEAP_HASEXTERNAL;
344 /* no alignment, since it's short by definition */
345 data_length = VARSIZE_EXTERNAL(val);
346 memcpy(data, val, data_length);
347 }
348 }
349 else if (VARATT_IS_SHORT(val))
350 {
351 /* no alignment for short varlenas */
352 data_length = VARSIZE_SHORT(val);
353 memcpy(data, val, data_length);
354 }
355 else if (att->attispackable && VARATT_CAN_MAKE_SHORT(val))
356 {
357 /* convert to short varlena -- no alignment */
358 data_length = VARATT_CONVERTED_SHORT_SIZE(val);
359 SET_VARSIZE_SHORT(data, data_length);
360 memcpy(data + 1, VARDATA(val), data_length - 1);
361 }
362 else
363 {
364 /* full 4-byte header varlena */
365 data = (char *) att_nominal_alignby(data, att->attalignby);
366 data_length = VARSIZE(val);
367 memcpy(data, val, data_length);
368 }
369 }
370 else if (att->attlen == -2)
371 {
372 /* cstring ... never needs alignment */
373 *infomask |= HEAP_HASVARWIDTH;
374 Assert(att->attalignby == sizeof(char));
375 data_length = strlen(DatumGetCString(datum)) + 1;
376 memcpy(data, DatumGetPointer(datum), data_length);
377 }
378 else
379 {
380 /* fixed-length pass-by-reference */
381 data = (char *) att_nominal_alignby(data, att->attalignby);
382 Assert(att->attlen > 0);
383 data_length = att->attlen;
384 memcpy(data, DatumGetPointer(datum), data_length);
385 }
386
387 data += data_length;
388 *dataP = data;
389}
390
391/*
392 * heap_fill_tuple
393 * Load data portion of a tuple from values/isnull arrays
394 *
395 * We also fill the null bitmap (if any) and set the infomask bits
396 * that reflect the tuple's data contents.
397 *
398 * NOTE: it is now REQUIRED that the caller have pre-zeroed the data area.
399 */
400void
402 const Datum *values, const bool *isnull,
403 char *data, Size data_size,
404 uint16 *infomask, bits8 *bit)
405{
406 bits8 *bitP;
407 int bitmask;
408 int i;
409 int numberOfAttributes = tupleDesc->natts;
410
411#ifdef USE_ASSERT_CHECKING
412 char *start = data;
413#endif
414
415 if (bit != NULL)
416 {
417 bitP = &bit[-1];
418 bitmask = HIGHBIT;
419 }
420 else
421 {
422 /* just to keep compiler quiet */
423 bitP = NULL;
424 bitmask = 0;
425 }
426
428
429 for (i = 0; i < numberOfAttributes; i++)
430 {
431 CompactAttribute *attr = TupleDescCompactAttr(tupleDesc, i);
432
433 fill_val(attr,
434 bitP ? &bitP : NULL,
435 &bitmask,
436 &data,
437 infomask,
438 values ? values[i] : PointerGetDatum(NULL),
439 isnull ? isnull[i] : true);
440 }
441
442 Assert((data - start) == data_size);
443}
444
445
446/* ----------------------------------------------------------------
447 * heap tuple interface
448 * ----------------------------------------------------------------
449 */
450
451/* ----------------
452 * heap_attisnull - returns true iff tuple attribute is not present
453 * ----------------
454 */
455bool
457{
458 /*
459 * We allow a NULL tupledesc for relations not expected to have missing
460 * values, such as catalog relations and indexes.
461 */
462 Assert(!tupleDesc || attnum <= tupleDesc->natts);
463 if (attnum > (int) HeapTupleHeaderGetNatts(tup->t_data))
464 {
465 if (tupleDesc &&
466 TupleDescCompactAttr(tupleDesc, attnum - 1)->atthasmissing)
467 return false;
468 else
469 return true;
470 }
471
472 if (attnum > 0)
473 {
474 if (HeapTupleNoNulls(tup))
475 return false;
476 return att_isnull(attnum - 1, tup->t_data->t_bits);
477 }
478
479 switch (attnum)
480 {
487 /* these are never null */
488 break;
489
490 default:
491 elog(ERROR, "invalid attnum: %d", attnum);
492 }
493
494 return false;
495}
496
497/* ----------------
498 * nocachegetattr
499 *
500 * This only gets called from fastgetattr(), in cases where we
501 * can't use a cacheoffset and the value is not null.
502 *
503 * This caches attribute offsets in the attribute descriptor.
504 *
505 * An alternative way to speed things up would be to cache offsets
506 * with the tuple, but that seems more difficult unless you take
507 * the storage hit of actually putting those offsets into the
508 * tuple you send to disk. Yuck.
509 *
510 * This scheme will be slightly slower than that, but should
511 * perform well for queries which hit large #'s of tuples. After
512 * you cache the offsets once, examining all the other tuples using
513 * the same attribute descriptor will go much quicker. -cim 5/4/91
514 *
515 * NOTE: if you need to change this code, see also heap_deform_tuple.
516 * Also see nocache_index_getattr, which is the same code for index
517 * tuples.
518 * ----------------
519 */
520Datum
522 int attnum,
523 TupleDesc tupleDesc)
524{
525 HeapTupleHeader td = tup->t_data;
526 char *tp; /* ptr to data part of tuple */
527 bits8 *bp = td->t_bits; /* ptr to null bitmap in tuple */
528 bool slow = false; /* do we have to walk attrs? */
529 int off; /* current offset within data */
530
531 /* ----------------
532 * Three cases:
533 *
534 * 1: No nulls and no variable-width attributes.
535 * 2: Has a null or a var-width AFTER att.
536 * 3: Has nulls or var-widths BEFORE att.
537 * ----------------
538 */
539
540 attnum--;
541
542 if (!HeapTupleNoNulls(tup))
543 {
544 /*
545 * there's a null somewhere in the tuple
546 *
547 * check to see if any preceding bits are null...
548 */
549 int byte = attnum >> 3;
550 int finalbit = attnum & 0x07;
551
552 /* check for nulls "before" final bit of last byte */
553 if ((~bp[byte]) & ((1 << finalbit) - 1))
554 slow = true;
555 else
556 {
557 /* check for nulls in any "earlier" bytes */
558 int i;
559
560 for (i = 0; i < byte; i++)
561 {
562 if (bp[i] != 0xFF)
563 {
564 slow = true;
565 break;
566 }
567 }
568 }
569 }
570
571 tp = (char *) td + td->t_hoff;
572
573 if (!slow)
574 {
575 CompactAttribute *att;
576
577 /*
578 * If we get here, there are no nulls up to and including the target
579 * attribute. If we have a cached offset, we can use it.
580 */
581 att = TupleDescCompactAttr(tupleDesc, attnum);
582 if (att->attcacheoff >= 0)
583 return fetchatt(att, tp + att->attcacheoff);
584
585 /*
586 * Otherwise, check for non-fixed-length attrs up to and including
587 * target. If there aren't any, it's safe to cheaply initialize the
588 * cached offsets for these attrs.
589 */
590 if (HeapTupleHasVarWidth(tup))
591 {
592 int j;
593
594 for (j = 0; j <= attnum; j++)
595 {
596 if (TupleDescCompactAttr(tupleDesc, j)->attlen <= 0)
597 {
598 slow = true;
599 break;
600 }
601 }
602 }
603 }
604
605 if (!slow)
606 {
607 int natts = tupleDesc->natts;
608 int j = 1;
609
610 /*
611 * If we get here, we have a tuple with no nulls or var-widths up to
612 * and including the target attribute, so we can use the cached offset
613 * ... only we don't have it yet, or we'd not have got here. Since
614 * it's cheap to compute offsets for fixed-width columns, we take the
615 * opportunity to initialize the cached offsets for *all* the leading
616 * fixed-width columns, in hope of avoiding future visits to this
617 * routine.
618 */
619 TupleDescCompactAttr(tupleDesc, 0)->attcacheoff = 0;
620
621 /* we might have set some offsets in the slow path previously */
622 while (j < natts && TupleDescCompactAttr(tupleDesc, j)->attcacheoff > 0)
623 j++;
624
625 off = TupleDescCompactAttr(tupleDesc, j - 1)->attcacheoff +
626 TupleDescCompactAttr(tupleDesc, j - 1)->attlen;
627
628 for (; j < natts; j++)
629 {
630 CompactAttribute *att = TupleDescCompactAttr(tupleDesc, j);
631
632 if (att->attlen <= 0)
633 break;
634
635 off = att_nominal_alignby(off, att->attalignby);
636
637 att->attcacheoff = off;
638
639 off += att->attlen;
640 }
641
642 Assert(j > attnum);
643
644 off = TupleDescCompactAttr(tupleDesc, attnum)->attcacheoff;
645 }
646 else
647 {
648 bool usecache = true;
649 int i;
650
651 /*
652 * Now we know that we have to walk the tuple CAREFULLY. But we still
653 * might be able to cache some offsets for next time.
654 *
655 * Note - This loop is a little tricky. For each non-null attribute,
656 * we have to first account for alignment padding before the attr,
657 * then advance over the attr based on its length. Nulls have no
658 * storage and no alignment padding either. We can use/set
659 * attcacheoff until we reach either a null or a var-width attribute.
660 */
661 off = 0;
662 for (i = 0;; i++) /* loop exit is at "break" */
663 {
664 CompactAttribute *att = TupleDescCompactAttr(tupleDesc, i);
665
666 if (HeapTupleHasNulls(tup) && att_isnull(i, bp))
667 {
668 usecache = false;
669 continue; /* this cannot be the target att */
670 }
671
672 /* If we know the next offset, we can skip the rest */
673 if (usecache && att->attcacheoff >= 0)
674 off = att->attcacheoff;
675 else if (att->attlen == -1)
676 {
677 /*
678 * We can only cache the offset for a varlena attribute if the
679 * offset is already suitably aligned, so that there would be
680 * no pad bytes in any case: then the offset will be valid for
681 * either an aligned or unaligned value.
682 */
683 if (usecache &&
684 off == att_nominal_alignby(off, att->attalignby))
685 att->attcacheoff = off;
686 else
687 {
688 off = att_pointer_alignby(off, att->attalignby, -1,
689 tp + off);
690 usecache = false;
691 }
692 }
693 else
694 {
695 /* not varlena, so safe to use att_nominal_alignby */
696 off = att_nominal_alignby(off, att->attalignby);
697
698 if (usecache)
699 att->attcacheoff = off;
700 }
701
702 if (i == attnum)
703 break;
704
705 off = att_addlength_pointer(off, att->attlen, tp + off);
706
707 if (usecache && att->attlen <= 0)
708 usecache = false;
709 }
710 }
711
712 return fetchatt(TupleDescCompactAttr(tupleDesc, attnum), tp + off);
713}
714
715/* ----------------
716 * heap_getsysattr
717 *
718 * Fetch the value of a system attribute for a tuple.
719 *
720 * This is a support routine for heap_getattr(). The function has already
721 * determined that the attnum refers to a system attribute.
722 * ----------------
723 */
724Datum
725heap_getsysattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
726{
727 Datum result;
728
729 Assert(tup);
730
731 /* Currently, no sys attribute ever reads as NULL. */
732 *isnull = false;
733
734 switch (attnum)
735 {
737 /* pass-by-reference datatype */
738 result = PointerGetDatum(&(tup->t_self));
739 break;
742 break;
745 break;
748
749 /*
750 * cmin and cmax are now both aliases for the same field, which
751 * can in fact also be a combo command id. XXX perhaps we should
752 * return the "real" cmin or cmax if possible, that is if we are
753 * inside the originating transaction?
754 */
756 break;
758 result = ObjectIdGetDatum(tup->t_tableOid);
759 break;
760 default:
761 elog(ERROR, "invalid attnum: %d", attnum);
762 result = 0; /* keep compiler quiet */
763 break;
764 }
765 return result;
766}
767
768/* ----------------
769 * heap_copytuple
770 *
771 * returns a copy of an entire tuple
772 *
773 * The HeapTuple struct, tuple header, and tuple data are all allocated
774 * as a single palloc() block.
775 * ----------------
776 */
779{
780 HeapTuple newTuple;
781
782 if (!HeapTupleIsValid(tuple) || tuple->t_data == NULL)
783 return NULL;
784
785 newTuple = (HeapTuple) palloc(HEAPTUPLESIZE + tuple->t_len);
786 newTuple->t_len = tuple->t_len;
787 newTuple->t_self = tuple->t_self;
788 newTuple->t_tableOid = tuple->t_tableOid;
789 newTuple->t_data = (HeapTupleHeader) ((char *) newTuple + HEAPTUPLESIZE);
790 memcpy(newTuple->t_data, tuple->t_data, tuple->t_len);
791 return newTuple;
792}
793
794/* ----------------
795 * heap_copytuple_with_tuple
796 *
797 * copy a tuple into a caller-supplied HeapTuple management struct
798 *
799 * Note that after calling this function, the "dest" HeapTuple will not be
800 * allocated as a single palloc() block (unlike with heap_copytuple()).
801 * ----------------
802 */
803void
805{
806 if (!HeapTupleIsValid(src) || src->t_data == NULL)
807 {
808 dest->t_data = NULL;
809 return;
810 }
811
812 dest->t_len = src->t_len;
813 dest->t_self = src->t_self;
814 dest->t_tableOid = src->t_tableOid;
815 dest->t_data = (HeapTupleHeader) palloc(src->t_len);
816 memcpy(dest->t_data, src->t_data, src->t_len);
817}
818
819/*
820 * Expand a tuple which has fewer attributes than required. For each attribute
821 * not present in the sourceTuple, if there is a missing value that will be
822 * used. Otherwise the attribute will be set to NULL.
823 *
824 * The source tuple must have fewer attributes than the required number.
825 *
826 * Only one of targetHeapTuple and targetMinimalTuple may be supplied. The
827 * other argument must be NULL.
828 */
829static void
830expand_tuple(HeapTuple *targetHeapTuple,
831 MinimalTuple *targetMinimalTuple,
832 HeapTuple sourceTuple,
833 TupleDesc tupleDesc)
834{
835 AttrMissing *attrmiss = NULL;
836 int attnum;
837 int firstmissingnum;
838 bool hasNulls = HeapTupleHasNulls(sourceTuple);
839 HeapTupleHeader targetTHeader;
840 HeapTupleHeader sourceTHeader = sourceTuple->t_data;
841 int sourceNatts = HeapTupleHeaderGetNatts(sourceTHeader);
842 int natts = tupleDesc->natts;
843 int sourceNullLen;
844 int targetNullLen;
845 Size sourceDataLen = sourceTuple->t_len - sourceTHeader->t_hoff;
846 Size targetDataLen;
847 Size len;
848 int hoff;
849 bits8 *nullBits = NULL;
850 int bitMask = 0;
851 char *targetData;
852 uint16 *infoMask;
853
854 Assert((targetHeapTuple && !targetMinimalTuple)
855 || (!targetHeapTuple && targetMinimalTuple));
856
857 Assert(sourceNatts < natts);
858
859 sourceNullLen = (hasNulls ? BITMAPLEN(sourceNatts) : 0);
860
861 targetDataLen = sourceDataLen;
862
863 if (tupleDesc->constr &&
864 tupleDesc->constr->missing)
865 {
866 /*
867 * If there are missing values we want to put them into the tuple.
868 * Before that we have to compute the extra length for the values
869 * array and the variable length data.
870 */
871 attrmiss = tupleDesc->constr->missing;
872
873 /*
874 * Find the first item in attrmiss for which we don't have a value in
875 * the source. We can ignore all the missing entries before that.
876 */
877 for (firstmissingnum = sourceNatts;
878 firstmissingnum < natts;
879 firstmissingnum++)
880 {
881 if (attrmiss[firstmissingnum].am_present)
882 break;
883 else
884 hasNulls = true;
885 }
886
887 /*
888 * Now walk the missing attributes. If there is a missing value make
889 * space for it. Otherwise, it's going to be NULL.
890 */
891 for (attnum = firstmissingnum;
892 attnum < natts;
893 attnum++)
894 {
895 if (attrmiss[attnum].am_present)
896 {
898
899 targetDataLen = att_datum_alignby(targetDataLen,
900 att->attalignby,
901 att->attlen,
902 attrmiss[attnum].am_value);
903
904 targetDataLen = att_addlength_pointer(targetDataLen,
905 att->attlen,
906 attrmiss[attnum].am_value);
907 }
908 else
909 {
910 /* no missing value, so it must be null */
911 hasNulls = true;
912 }
913 }
914 } /* end if have missing values */
915 else
916 {
917 /*
918 * If there are no missing values at all then NULLS must be allowed,
919 * since some of the attributes are known to be absent.
920 */
921 hasNulls = true;
922 }
923
924 len = 0;
925
926 if (hasNulls)
927 {
928 targetNullLen = BITMAPLEN(natts);
929 len += targetNullLen;
930 }
931 else
932 targetNullLen = 0;
933
934 /*
935 * Allocate and zero the space needed. Note that the tuple body and
936 * HeapTupleData management structure are allocated in one chunk.
937 */
938 if (targetHeapTuple)
939 {
940 len += offsetof(HeapTupleHeaderData, t_bits);
941 hoff = len = MAXALIGN(len); /* align user data safely */
942 len += targetDataLen;
943
944 *targetHeapTuple = (HeapTuple) palloc0(HEAPTUPLESIZE + len);
945 (*targetHeapTuple)->t_data
946 = targetTHeader
947 = (HeapTupleHeader) ((char *) *targetHeapTuple + HEAPTUPLESIZE);
948 (*targetHeapTuple)->t_len = len;
949 (*targetHeapTuple)->t_tableOid = sourceTuple->t_tableOid;
950 (*targetHeapTuple)->t_self = sourceTuple->t_self;
951
952 targetTHeader->t_infomask = sourceTHeader->t_infomask;
953 targetTHeader->t_hoff = hoff;
954 HeapTupleHeaderSetNatts(targetTHeader, natts);
955 HeapTupleHeaderSetDatumLength(targetTHeader, len);
956 HeapTupleHeaderSetTypeId(targetTHeader, tupleDesc->tdtypeid);
957 HeapTupleHeaderSetTypMod(targetTHeader, tupleDesc->tdtypmod);
958 /* We also make sure that t_ctid is invalid unless explicitly set */
959 ItemPointerSetInvalid(&(targetTHeader->t_ctid));
960 if (targetNullLen > 0)
961 nullBits = (bits8 *) ((char *) (*targetHeapTuple)->t_data
962 + offsetof(HeapTupleHeaderData, t_bits));
963 targetData = (char *) (*targetHeapTuple)->t_data + hoff;
964 infoMask = &(targetTHeader->t_infomask);
965 }
966 else
967 {
969 hoff = len = MAXALIGN(len); /* align user data safely */
970 len += targetDataLen;
971
972 *targetMinimalTuple = (MinimalTuple) palloc0(len);
973 (*targetMinimalTuple)->t_len = len;
974 (*targetMinimalTuple)->t_hoff = hoff + MINIMAL_TUPLE_OFFSET;
975 (*targetMinimalTuple)->t_infomask = sourceTHeader->t_infomask;
976 /* Same macro works for MinimalTuples */
977 HeapTupleHeaderSetNatts(*targetMinimalTuple, natts);
978 if (targetNullLen > 0)
979 nullBits = (bits8 *) ((char *) *targetMinimalTuple
980 + offsetof(MinimalTupleData, t_bits));
981 targetData = (char *) *targetMinimalTuple + hoff;
982 infoMask = &((*targetMinimalTuple)->t_infomask);
983 }
984
985 if (targetNullLen > 0)
986 {
987 if (sourceNullLen > 0)
988 {
989 /* if bitmap pre-existed copy in - all is set */
990 memcpy(nullBits,
991 ((char *) sourceTHeader)
992 + offsetof(HeapTupleHeaderData, t_bits),
993 sourceNullLen);
994 nullBits += sourceNullLen - 1;
995 }
996 else
997 {
998 sourceNullLen = BITMAPLEN(sourceNatts);
999 /* Set NOT NULL for all existing attributes */
1000 memset(nullBits, 0xff, sourceNullLen);
1001
1002 nullBits += sourceNullLen - 1;
1003
1004 if (sourceNatts & 0x07)
1005 {
1006 /* build the mask (inverted!) */
1007 bitMask = 0xff << (sourceNatts & 0x07);
1008 /* Voila */
1009 *nullBits = ~bitMask;
1010 }
1011 }
1012
1013 bitMask = (1 << ((sourceNatts - 1) & 0x07));
1014 } /* End if have null bitmap */
1015
1016 memcpy(targetData,
1017 ((char *) sourceTuple->t_data) + sourceTHeader->t_hoff,
1018 sourceDataLen);
1019
1020 targetData += sourceDataLen;
1021
1022 /* Now fill in the missing values */
1023 for (attnum = sourceNatts; attnum < natts; attnum++)
1024 {
1025 CompactAttribute *attr = TupleDescCompactAttr(tupleDesc, attnum);
1026
1027 if (attrmiss && attrmiss[attnum].am_present)
1028 {
1029 fill_val(attr,
1030 nullBits ? &nullBits : NULL,
1031 &bitMask,
1032 &targetData,
1033 infoMask,
1034 attrmiss[attnum].am_value,
1035 false);
1036 }
1037 else
1038 {
1039 fill_val(attr,
1040 &nullBits,
1041 &bitMask,
1042 &targetData,
1043 infoMask,
1044 (Datum) 0,
1045 true);
1046 }
1047 } /* end loop over missing attributes */
1048}
1049
1050/*
1051 * Fill in the missing values for a minimal HeapTuple
1052 */
1055{
1056 MinimalTuple minimalTuple;
1057
1058 expand_tuple(NULL, &minimalTuple, sourceTuple, tupleDesc);
1059 return minimalTuple;
1060}
1061
1062/*
1063 * Fill in the missing values for an ordinary HeapTuple
1064 */
1067{
1068 HeapTuple heapTuple;
1069
1070 expand_tuple(&heapTuple, NULL, sourceTuple, tupleDesc);
1071 return heapTuple;
1072}
1073
1074/* ----------------
1075 * heap_copy_tuple_as_datum
1076 *
1077 * copy a tuple as a composite-type Datum
1078 * ----------------
1079 */
1080Datum
1082{
1083 HeapTupleHeader td;
1084
1085 /*
1086 * If the tuple contains any external TOAST pointers, we have to inline
1087 * those fields to meet the conventions for composite-type Datums.
1088 */
1089 if (HeapTupleHasExternal(tuple))
1091 tuple->t_len,
1092 tupleDesc);
1093
1094 /*
1095 * Fast path for easy case: just make a palloc'd copy and insert the
1096 * correct composite-Datum header fields (since those may not be set if
1097 * the given tuple came from disk, rather than from heap_form_tuple).
1098 */
1099 td = (HeapTupleHeader) palloc(tuple->t_len);
1100 memcpy(td, tuple->t_data, tuple->t_len);
1101
1103 HeapTupleHeaderSetTypeId(td, tupleDesc->tdtypeid);
1104 HeapTupleHeaderSetTypMod(td, tupleDesc->tdtypmod);
1105
1106 return PointerGetDatum(td);
1107}
1108
1109/*
1110 * heap_form_tuple
1111 * construct a tuple from the given values[] and isnull[] arrays,
1112 * which are of the length indicated by tupleDescriptor->natts
1113 *
1114 * The result is allocated in the current memory context.
1115 */
1118 const Datum *values,
1119 const bool *isnull)
1120{
1121 HeapTuple tuple; /* return tuple */
1122 HeapTupleHeader td; /* tuple data */
1123 Size len,
1124 data_len;
1125 int hoff;
1126 bool hasnull = false;
1127 int numberOfAttributes = tupleDescriptor->natts;
1128 int i;
1129
1130 if (numberOfAttributes > MaxTupleAttributeNumber)
1131 ereport(ERROR,
1132 (errcode(ERRCODE_TOO_MANY_COLUMNS),
1133 errmsg("number of columns (%d) exceeds limit (%d)",
1134 numberOfAttributes, MaxTupleAttributeNumber)));
1135
1136 /*
1137 * Check for nulls
1138 */
1139 for (i = 0; i < numberOfAttributes; i++)
1140 {
1141 if (isnull[i])
1142 {
1143 hasnull = true;
1144 break;
1145 }
1146 }
1147
1148 /*
1149 * Determine total space needed
1150 */
1151 len = offsetof(HeapTupleHeaderData, t_bits);
1152
1153 if (hasnull)
1154 len += BITMAPLEN(numberOfAttributes);
1155
1156 hoff = len = MAXALIGN(len); /* align user data safely */
1157
1158 data_len = heap_compute_data_size(tupleDescriptor, values, isnull);
1159
1160 len += data_len;
1161
1162 /*
1163 * Allocate and zero the space needed. Note that the tuple body and
1164 * HeapTupleData management structure are allocated in one chunk.
1165 */
1166 tuple = (HeapTuple) palloc0(HEAPTUPLESIZE + len);
1167 tuple->t_data = td = (HeapTupleHeader) ((char *) tuple + HEAPTUPLESIZE);
1168
1169 /*
1170 * And fill in the information. Note we fill the Datum fields even though
1171 * this tuple may never become a Datum. This lets HeapTupleHeaderGetDatum
1172 * identify the tuple type if needed.
1173 */
1174 tuple->t_len = len;
1175 ItemPointerSetInvalid(&(tuple->t_self));
1176 tuple->t_tableOid = InvalidOid;
1177
1179 HeapTupleHeaderSetTypeId(td, tupleDescriptor->tdtypeid);
1180 HeapTupleHeaderSetTypMod(td, tupleDescriptor->tdtypmod);
1181 /* We also make sure that t_ctid is invalid unless explicitly set */
1183
1184 HeapTupleHeaderSetNatts(td, numberOfAttributes);
1185 td->t_hoff = hoff;
1186
1187 heap_fill_tuple(tupleDescriptor,
1188 values,
1189 isnull,
1190 (char *) td + hoff,
1191 data_len,
1192 &td->t_infomask,
1193 (hasnull ? td->t_bits : NULL));
1194
1195 return tuple;
1196}
1197
1198/*
1199 * heap_modify_tuple
1200 * form a new tuple from an old tuple and a set of replacement values.
1201 *
1202 * The replValues, replIsnull, and doReplace arrays must be of the length
1203 * indicated by tupleDesc->natts. The new tuple is constructed using the data
1204 * from replValues/replIsnull at columns where doReplace is true, and using
1205 * the data from the old tuple at columns where doReplace is false.
1206 *
1207 * The result is allocated in the current memory context.
1208 */
1211 TupleDesc tupleDesc,
1212 const Datum *replValues,
1213 const bool *replIsnull,
1214 const bool *doReplace)
1215{
1216 int numberOfAttributes = tupleDesc->natts;
1217 int attoff;
1218 Datum *values;
1219 bool *isnull;
1220 HeapTuple newTuple;
1221
1222 /*
1223 * allocate and fill values and isnull arrays from either the tuple or the
1224 * repl information, as appropriate.
1225 *
1226 * NOTE: it's debatable whether to use heap_deform_tuple() here or just
1227 * heap_getattr() only the non-replaced columns. The latter could win if
1228 * there are many replaced columns and few non-replaced ones. However,
1229 * heap_deform_tuple costs only O(N) while the heap_getattr way would cost
1230 * O(N^2) if there are many non-replaced columns, so it seems better to
1231 * err on the side of linear cost.
1232 */
1233 values = (Datum *) palloc(numberOfAttributes * sizeof(Datum));
1234 isnull = (bool *) palloc(numberOfAttributes * sizeof(bool));
1235
1236 heap_deform_tuple(tuple, tupleDesc, values, isnull);
1237
1238 for (attoff = 0; attoff < numberOfAttributes; attoff++)
1239 {
1240 if (doReplace[attoff])
1241 {
1242 values[attoff] = replValues[attoff];
1243 isnull[attoff] = replIsnull[attoff];
1244 }
1245 }
1246
1247 /*
1248 * create a new tuple from the values and isnull arrays
1249 */
1250 newTuple = heap_form_tuple(tupleDesc, values, isnull);
1251
1252 pfree(values);
1253 pfree(isnull);
1254
1255 /*
1256 * copy the identification info of the old tuple: t_ctid, t_self
1257 */
1258 newTuple->t_data->t_ctid = tuple->t_data->t_ctid;
1259 newTuple->t_self = tuple->t_self;
1260 newTuple->t_tableOid = tuple->t_tableOid;
1261
1262 return newTuple;
1263}
1264
1265/*
1266 * heap_modify_tuple_by_cols
1267 * form a new tuple from an old tuple and a set of replacement values.
1268 *
1269 * This is like heap_modify_tuple, except that instead of specifying which
1270 * column(s) to replace by a boolean map, an array of target column numbers
1271 * is used. This is often more convenient when a fixed number of columns
1272 * are to be replaced. The replCols, replValues, and replIsnull arrays must
1273 * be of length nCols. Target column numbers are indexed from 1.
1274 *
1275 * The result is allocated in the current memory context.
1276 */
1279 TupleDesc tupleDesc,
1280 int nCols,
1281 const int *replCols,
1282 const Datum *replValues,
1283 const bool *replIsnull)
1284{
1285 int numberOfAttributes = tupleDesc->natts;
1286 Datum *values;
1287 bool *isnull;
1288 HeapTuple newTuple;
1289 int i;
1290
1291 /*
1292 * allocate and fill values and isnull arrays from the tuple, then replace
1293 * selected columns from the input arrays.
1294 */
1295 values = (Datum *) palloc(numberOfAttributes * sizeof(Datum));
1296 isnull = (bool *) palloc(numberOfAttributes * sizeof(bool));
1297
1298 heap_deform_tuple(tuple, tupleDesc, values, isnull);
1299
1300 for (i = 0; i < nCols; i++)
1301 {
1302 int attnum = replCols[i];
1303
1304 if (attnum <= 0 || attnum > numberOfAttributes)
1305 elog(ERROR, "invalid column number %d", attnum);
1306 values[attnum - 1] = replValues[i];
1307 isnull[attnum - 1] = replIsnull[i];
1308 }
1309
1310 /*
1311 * create a new tuple from the values and isnull arrays
1312 */
1313 newTuple = heap_form_tuple(tupleDesc, values, isnull);
1314
1315 pfree(values);
1316 pfree(isnull);
1317
1318 /*
1319 * copy the identification info of the old tuple: t_ctid, t_self
1320 */
1321 newTuple->t_data->t_ctid = tuple->t_data->t_ctid;
1322 newTuple->t_self = tuple->t_self;
1323 newTuple->t_tableOid = tuple->t_tableOid;
1324
1325 return newTuple;
1326}
1327
1328/*
1329 * heap_deform_tuple
1330 * Given a tuple, extract data into values/isnull arrays; this is
1331 * the inverse of heap_form_tuple.
1332 *
1333 * Storage for the values/isnull arrays is provided by the caller;
1334 * it should be sized according to tupleDesc->natts not
1335 * HeapTupleHeaderGetNatts(tuple->t_data).
1336 *
1337 * Note that for pass-by-reference datatypes, the pointer placed
1338 * in the Datum will point into the given tuple.
1339 *
1340 * When all or most of a tuple's fields need to be extracted,
1341 * this routine will be significantly quicker than a loop around
1342 * heap_getattr; the loop will become O(N^2) as soon as any
1343 * noncacheable attribute offsets are involved.
1344 */
1345void
1347 Datum *values, bool *isnull)
1348{
1349 HeapTupleHeader tup = tuple->t_data;
1350 bool hasnulls = HeapTupleHasNulls(tuple);
1351 int tdesc_natts = tupleDesc->natts;
1352 int natts; /* number of atts to extract */
1353 int attnum;
1354 char *tp; /* ptr to tuple data */
1355 uint32 off; /* offset in tuple data */
1356 bits8 *bp = tup->t_bits; /* ptr to null bitmap in tuple */
1357 bool slow = false; /* can we use/set attcacheoff? */
1358
1359 natts = HeapTupleHeaderGetNatts(tup);
1360
1361 /*
1362 * In inheritance situations, it is possible that the given tuple actually
1363 * has more fields than the caller is expecting. Don't run off the end of
1364 * the caller's arrays.
1365 */
1366 natts = Min(natts, tdesc_natts);
1367
1368 tp = (char *) tup + tup->t_hoff;
1369
1370 off = 0;
1371
1372 for (attnum = 0; attnum < natts; attnum++)
1373 {
1374 CompactAttribute *thisatt = TupleDescCompactAttr(tupleDesc, attnum);
1375
1376 if (hasnulls && att_isnull(attnum, bp))
1377 {
1378 values[attnum] = (Datum) 0;
1379 isnull[attnum] = true;
1380 slow = true; /* can't use attcacheoff anymore */
1381 continue;
1382 }
1383
1384 isnull[attnum] = false;
1385
1386 if (!slow && thisatt->attcacheoff >= 0)
1387 off = thisatt->attcacheoff;
1388 else if (thisatt->attlen == -1)
1389 {
1390 /*
1391 * We can only cache the offset for a varlena attribute if the
1392 * offset is already suitably aligned, so that there would be no
1393 * pad bytes in any case: then the offset will be valid for either
1394 * an aligned or unaligned value.
1395 */
1396 if (!slow &&
1397 off == att_nominal_alignby(off, thisatt->attalignby))
1398 thisatt->attcacheoff = off;
1399 else
1400 {
1401 off = att_pointer_alignby(off, thisatt->attalignby, -1,
1402 tp + off);
1403 slow = true;
1404 }
1405 }
1406 else
1407 {
1408 /* not varlena, so safe to use att_nominal_alignby */
1409 off = att_nominal_alignby(off, thisatt->attalignby);
1410
1411 if (!slow)
1412 thisatt->attcacheoff = off;
1413 }
1414
1415 values[attnum] = fetchatt(thisatt, tp + off);
1416
1417 off = att_addlength_pointer(off, thisatt->attlen, tp + off);
1418
1419 if (thisatt->attlen <= 0)
1420 slow = true; /* can't use attcacheoff anymore */
1421 }
1422
1423 /*
1424 * If tuple doesn't have all the atts indicated by tupleDesc, read the
1425 * rest as nulls or missing values as appropriate.
1426 */
1427 for (; attnum < tdesc_natts; attnum++)
1428 values[attnum] = getmissingattr(tupleDesc, attnum + 1, &isnull[attnum]);
1429}
1430
1431/*
1432 * heap_freetuple
1433 */
1434void
1436{
1437 pfree(htup);
1438}
1439
1440
1441/*
1442 * heap_form_minimal_tuple
1443 * construct a MinimalTuple from the given values[] and isnull[] arrays,
1444 * which are of the length indicated by tupleDescriptor->natts
1445 *
1446 * This is exactly like heap_form_tuple() except that the result is a
1447 * "minimal" tuple lacking a HeapTupleData header as well as room for system
1448 * columns.
1449 *
1450 * The result is allocated in the current memory context.
1451 */
1454 const Datum *values,
1455 const bool *isnull,
1456 Size extra)
1457{
1458 MinimalTuple tuple; /* return tuple */
1459 char *mem;
1460 Size len,
1461 data_len;
1462 int hoff;
1463 bool hasnull = false;
1464 int numberOfAttributes = tupleDescriptor->natts;
1465 int i;
1466
1467 Assert(extra == MAXALIGN(extra));
1468
1469 if (numberOfAttributes > MaxTupleAttributeNumber)
1470 ereport(ERROR,
1471 (errcode(ERRCODE_TOO_MANY_COLUMNS),
1472 errmsg("number of columns (%d) exceeds limit (%d)",
1473 numberOfAttributes, MaxTupleAttributeNumber)));
1474
1475 /*
1476 * Check for nulls
1477 */
1478 for (i = 0; i < numberOfAttributes; i++)
1479 {
1480 if (isnull[i])
1481 {
1482 hasnull = true;
1483 break;
1484 }
1485 }
1486
1487 /*
1488 * Determine total space needed
1489 */
1491
1492 if (hasnull)
1493 len += BITMAPLEN(numberOfAttributes);
1494
1495 hoff = len = MAXALIGN(len); /* align user data safely */
1496
1497 data_len = heap_compute_data_size(tupleDescriptor, values, isnull);
1498
1499 len += data_len;
1500
1501 /*
1502 * Allocate and zero the space needed.
1503 */
1504 mem = palloc0(len + extra);
1505 memset(mem, 0, extra);
1506 tuple = (MinimalTuple) (mem + extra);
1507
1508 /*
1509 * And fill in the information.
1510 */
1511 tuple->t_len = len;
1512 HeapTupleHeaderSetNatts(tuple, numberOfAttributes);
1513 tuple->t_hoff = hoff + MINIMAL_TUPLE_OFFSET;
1514
1515 heap_fill_tuple(tupleDescriptor,
1516 values,
1517 isnull,
1518 (char *) tuple + hoff,
1519 data_len,
1520 &tuple->t_infomask,
1521 (hasnull ? tuple->t_bits : NULL));
1522
1523 return tuple;
1524}
1525
1526/*
1527 * heap_free_minimal_tuple
1528 */
1529void
1531{
1532 pfree(mtup);
1533}
1534
1535/*
1536 * heap_copy_minimal_tuple
1537 * copy a MinimalTuple
1538 *
1539 * The result is allocated in the current memory context.
1540 */
1543{
1544 MinimalTuple result;
1545 char *mem;
1546
1547 Assert(extra == MAXALIGN(extra));
1548 mem = palloc(mtup->t_len + extra);
1549 memset(mem, 0, extra);
1550 result = (MinimalTuple) (mem + extra);
1551 memcpy(result, mtup, mtup->t_len);
1552 return result;
1553}
1554
1555/*
1556 * heap_tuple_from_minimal_tuple
1557 * create a HeapTuple by copying from a MinimalTuple;
1558 * system columns are filled with zeroes
1559 *
1560 * The result is allocated in the current memory context.
1561 * The HeapTuple struct, tuple header, and tuple data are all allocated
1562 * as a single palloc() block.
1563 */
1566{
1567 HeapTuple result;
1569
1570 result = (HeapTuple) palloc(HEAPTUPLESIZE + len);
1571 result->t_len = len;
1572 ItemPointerSetInvalid(&(result->t_self));
1573 result->t_tableOid = InvalidOid;
1574 result->t_data = (HeapTupleHeader) ((char *) result + HEAPTUPLESIZE);
1575 memcpy((char *) result->t_data + MINIMAL_TUPLE_OFFSET, mtup, mtup->t_len);
1576 memset(result->t_data, 0, offsetof(HeapTupleHeaderData, t_infomask2));
1577 return result;
1578}
1579
1580/*
1581 * minimal_tuple_from_heap_tuple
1582 * create a MinimalTuple by copying from a HeapTuple
1583 *
1584 * The result is allocated in the current memory context.
1585 */
1588{
1589 MinimalTuple result;
1590 char *mem;
1591 uint32 len;
1592
1593 Assert(extra == MAXALIGN(extra));
1595 len = htup->t_len - MINIMAL_TUPLE_OFFSET;
1596 mem = palloc(len + extra);
1597 memset(mem, 0, extra);
1598 result = (MinimalTuple) (mem + extra);
1599 memcpy(result, (char *) htup->t_data + MINIMAL_TUPLE_OFFSET, len);
1600
1601 result->t_len = len;
1602 return result;
1603}
1604
1605/*
1606 * This mainly exists so JIT can inline the definition, but it's also
1607 * sometimes useful in debugging sessions.
1608 */
1609size_t
1611{
1612 return VARSIZE_ANY(p);
1613}
static Datum values[MAXATTR]
Definition: bootstrap.c:151
#define Min(x, y)
Definition: c.h:975
#define MAXALIGN(LEN)
Definition: c.h:782
char * Pointer
Definition: c.h:493
uint8 bits8
Definition: c.h:509
uint16_t uint16
Definition: c.h:501
uint32_t uint32
Definition: c.h:502
#define HIGHBIT
Definition: c.h:1125
size_t Size
Definition: c.h:576
Datum datumCopy(Datum value, bool typByVal, int typLen)
Definition: datum.c:132
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:955
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:352
int errcode(int sqlerrcode)
Definition: elog.c:854
int errmsg(const char *fmt,...)
Definition: elog.c:1071
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:226
#define ereport(elevel,...)
Definition: elog.h:149
ExpandedObjectHeader * DatumGetEOHP(Datum d)
Definition: expandeddatum.c:29
void EOH_flatten_into(ExpandedObjectHeader *eohptr, void *result, Size allocated_size)
Definition: expandeddatum.c:81
Size EOH_get_flat_size(ExpandedObjectHeader *eohptr)
Definition: expandeddatum.c:75
uint32 hash_bytes(const unsigned char *k, int keylen)
Definition: hashfn.c:146
Assert(PointerIsAligned(start, uint64))
return str start
for(;;)
Datum toast_flatten_tuple_to_datum(HeapTupleHeader tup, uint32 tup_len, TupleDesc tupleDesc)
Definition: heaptoast.c:449
Size heap_compute_data_size(TupleDesc tupleDesc, const Datum *values, const bool *isnull)
Definition: heaptuple.c:219
static uint32 missing_hash(const void *key, Size keysize)
Definition: heaptuple.c:104
HeapTuple heap_modify_tuple(HeapTuple tuple, TupleDesc tupleDesc, const Datum *replValues, const bool *replIsnull, const bool *doReplace)
Definition: heaptuple.c:1210
void heap_copytuple_with_tuple(HeapTuple src, HeapTuple dest)
Definition: heaptuple.c:804
HeapTuple heap_copytuple(HeapTuple tuple)
Definition: heaptuple.c:778
static void init_missing_cache()
Definition: heaptuple.c:126
size_t varsize_any(void *p)
Definition: heaptuple.c:1610
MinimalTuple heap_copy_minimal_tuple(MinimalTuple mtup, Size extra)
Definition: heaptuple.c:1542
Datum heap_getsysattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
Definition: heaptuple.c:725
MinimalTuple heap_form_minimal_tuple(TupleDesc tupleDescriptor, const Datum *values, const bool *isnull, Size extra)
Definition: heaptuple.c:1453
HeapTuple heap_modify_tuple_by_cols(HeapTuple tuple, TupleDesc tupleDesc, int nCols, const int *replCols, const Datum *replValues, const bool *replIsnull)
Definition: heaptuple.c:1278
HeapTuple heap_form_tuple(TupleDesc tupleDescriptor, const Datum *values, const bool *isnull)
Definition: heaptuple.c:1117
void heap_free_minimal_tuple(MinimalTuple mtup)
Definition: heaptuple.c:1530
bool heap_attisnull(HeapTuple tup, int attnum, TupleDesc tupleDesc)
Definition: heaptuple.c:456
Datum nocachegetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc)
Definition: heaptuple.c:521
Datum getmissingattr(TupleDesc tupleDesc, int attnum, bool *isnull)
Definition: heaptuple.c:151
MinimalTuple minimal_tuple_from_heap_tuple(HeapTuple htup, Size extra)
Definition: heaptuple.c:1587
HeapTuple heap_expand_tuple(HeapTuple sourceTuple, TupleDesc tupleDesc)
Definition: heaptuple.c:1066
static void fill_val(CompactAttribute *att, bits8 **bit, int *bitmask, char **dataP, uint16 *infomask, Datum datum, bool isnull)
Definition: heaptuple.c:275
void heap_fill_tuple(TupleDesc tupleDesc, const Datum *values, const bool *isnull, char *data, Size data_size, uint16 *infomask, bits8 *bit)
Definition: heaptuple.c:401
#define COMPACT_ATTR_IS_PACKABLE(att)
Definition: heaptuple.c:87
void heap_deform_tuple(HeapTuple tuple, TupleDesc tupleDesc, Datum *values, bool *isnull)
Definition: heaptuple.c:1346
static int missing_match(const void *key1, const void *key2, Size keysize)
Definition: heaptuple.c:112
Datum heap_copy_tuple_as_datum(HeapTuple tuple, TupleDesc tupleDesc)
Definition: heaptuple.c:1081
static HTAB * missing_cache
Definition: heaptuple.c:101
static void expand_tuple(HeapTuple *targetHeapTuple, MinimalTuple *targetMinimalTuple, HeapTuple sourceTuple, TupleDesc tupleDesc)
Definition: heaptuple.c:830
HeapTuple heap_tuple_from_minimal_tuple(MinimalTuple mtup)
Definition: heaptuple.c:1565
MinimalTuple minimal_expand_tuple(HeapTuple sourceTuple, TupleDesc tupleDesc)
Definition: heaptuple.c:1054
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1435
@ HASH_ENTER
Definition: hsearch.h:114
#define HASH_CONTEXT
Definition: hsearch.h:102
#define HASH_ELEM
Definition: hsearch.h:95
#define HASH_COMPARE
Definition: hsearch.h:99
#define HASH_FUNCTION
Definition: hsearch.h:98
#define HEAPTUPLESIZE
Definition: htup.h:73
HeapTupleData * HeapTuple
Definition: htup.h:71
MinimalTupleData * MinimalTuple
Definition: htup.h:27
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define HeapTupleIsValid(tuple)
Definition: htup.h:78
#define HEAP_HASVARWIDTH
Definition: htup_details.h:191
#define MINIMAL_TUPLE_OFFSET
Definition: htup_details.h:669
#define HeapTupleHeaderGetNatts(tup)
Definition: htup_details.h:577
static void HeapTupleHeaderSetTypMod(HeapTupleHeaderData *tup, int32 typmod)
Definition: htup_details.h:522
static bool HeapTupleHasNulls(const HeapTupleData *tuple)
Definition: htup_details.h:738
#define HEAP_HASNULL
Definition: htup_details.h:190
static int BITMAPLEN(int NATTS)
Definition: htup_details.h:594
static bool HeapTupleHasExternal(const HeapTupleData *tuple)
Definition: htup_details.h:762
#define SizeofMinimalTupleHeader
Definition: htup_details.h:699
static void HeapTupleHeaderSetTypeId(HeapTupleHeaderData *tup, Oid datum_typeid)
Definition: htup_details.h:510
static CommandId HeapTupleHeaderGetRawCommandId(const HeapTupleHeaderData *tup)
Definition: htup_details.h:415
static TransactionId HeapTupleHeaderGetRawXmax(const HeapTupleHeaderData *tup)
Definition: htup_details.h:377
#define MaxTupleAttributeNumber
Definition: htup_details.h:34
static bool HeapTupleNoNulls(const HeapTupleData *tuple)
Definition: htup_details.h:744
#define HEAP_HASEXTERNAL
Definition: htup_details.h:192
static void HeapTupleHeaderSetDatumLength(HeapTupleHeaderData *tup, uint32 len)
Definition: htup_details.h:498
static TransactionId HeapTupleHeaderGetRawXmin(const HeapTupleHeaderData *tup)
Definition: htup_details.h:318
#define HeapTupleHeaderSetNatts(tup, natts)
Definition: htup_details.h:580
static bool HeapTupleHasVarWidth(const HeapTupleData *tuple)
Definition: htup_details.h:750
long val
Definition: informix.c:689
int j
Definition: isn.c:78
int i
Definition: isn.c:77
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:81
static void ItemPointerSetInvalid(ItemPointerData *pointer)
Definition: itemptr.h:184
void pfree(void *pointer)
Definition: mcxt.c:2147
void * palloc0(Size size)
Definition: mcxt.c:1970
MemoryContext TopMemoryContext
Definition: mcxt.c:165
void * palloc(Size size)
Definition: mcxt.c:1940
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:124
int16 attnum
Definition: pg_attribute.h:74
int16 attlen
Definition: pg_attribute.h:59
const void size_t len
const void * data
static Datum PointerGetDatum(const void *X)
Definition: postgres.h:327
static Datum TransactionIdGetDatum(TransactionId X)
Definition: postgres.h:277
uintptr_t Datum
Definition: postgres.h:69
static Datum CommandIdGetDatum(CommandId X)
Definition: postgres.h:307
static Datum ObjectIdGetDatum(Oid X)
Definition: postgres.h:257
static char * DatumGetCString(Datum X)
Definition: postgres.h:340
static Pointer DatumGetPointer(Datum X)
Definition: postgres.h:317
#define InvalidOid
Definition: postgres_ext.h:35
uint8 attalignby
Definition: tupdesc.h:80
bool attispackable
Definition: tupdesc.h:74
int16 attlen
Definition: tupdesc.h:71
bool atthasmissing
Definition: tupdesc.h:76
int32 attcacheoff
Definition: tupdesc.h:70
Size keysize
Definition: hsearch.h:75
HashValueFunc hash
Definition: hsearch.h:78
Size entrysize
Definition: hsearch.h:76
HashCompareFunc match
Definition: hsearch.h:80
MemoryContext hcxt
Definition: hsearch.h:86
Definition: dynahash.c:220
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
HeapTupleHeader t_data
Definition: htup.h:68
Oid t_tableOid
Definition: htup.h:66
ItemPointerData t_ctid
Definition: htup_details.h:161
bits8 t_bits[FLEXIBLE_ARRAY_MEMBER]
Definition: htup_details.h:178
bits8 t_bits[FLEXIBLE_ARRAY_MEMBER]
Definition: htup_details.h:692
struct AttrMissing * missing
Definition: tupdesc.h:42
TupleConstr * constr
Definition: tupdesc.h:141
int32 tdtypmod
Definition: tupdesc.h:139
Oid tdtypeid
Definition: tupdesc.h:138
#define MinTransactionIdAttributeNumber
Definition: sysattr.h:22
#define MaxCommandIdAttributeNumber
Definition: sysattr.h:25
#define MaxTransactionIdAttributeNumber
Definition: sysattr.h:24
#define TableOidAttributeNumber
Definition: sysattr.h:26
#define SelfItemPointerAttributeNumber
Definition: sysattr.h:21
#define MinCommandIdAttributeNumber
Definition: sysattr.h:23
static CompactAttribute * TupleDescCompactAttr(TupleDesc tupdesc, int i)
Definition: tupdesc.h:175
#define att_nominal_alignby(cur_offset, attalignby)
Definition: tupmacs.h:165
#define att_datum_alignby(cur_offset, attalignby, attlen, attdatum)
Definition: tupmacs.h:98
static bool att_isnull(int ATT, const bits8 *BITS)
Definition: tupmacs.h:26
#define att_addlength_pointer(cur_offset, attlen, attptr)
Definition: tupmacs.h:185
#define att_pointer_alignby(cur_offset, attalignby, attlen, attptr)
Definition: tupmacs.h:129
#define fetchatt(A, T)
Definition: tupmacs.h:47
#define att_addlength_datum(cur_offset, attlen, attdatum)
Definition: tupmacs.h:173
static void store_att_byval(void *T, Datum newdatum, int attlen)
Definition: tupmacs.h:211
#define VARSIZE_SHORT(PTR)
Definition: varatt.h:281
#define SET_VARSIZE_SHORT(PTR, len)
Definition: varatt.h:306
#define VARATT_CAN_MAKE_SHORT(PTR)
Definition: varatt.h:258
#define VARATT_IS_EXTERNAL_EXPANDED(PTR)
Definition: varatt.h:298
#define VARATT_IS_SHORT(PTR)
Definition: varatt.h:302
#define VARSIZE_ANY(PTR)
Definition: varatt.h:311
#define VARDATA(PTR)
Definition: varatt.h:278
#define VARSIZE_EXTERNAL(PTR)
Definition: varatt.h:285
#define VARSIZE(PTR)
Definition: varatt.h:279
#define VARATT_CONVERTED_SHORT_SIZE(PTR)
Definition: varatt.h:261
#define VARATT_IS_EXTERNAL(PTR)
Definition: varatt.h:289
Datum bit(PG_FUNCTION_ARGS)
Definition: varbit.c:391