PostgreSQL Source Code git master
Loading...
Searching...
No Matches
nbtutils.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * nbtutils.c
4 * Utility code for Postgres btree implementation.
5 *
6 * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 *
10 * IDENTIFICATION
11 * src/backend/access/nbtree/nbtutils.c
12 *
13 *-------------------------------------------------------------------------
14 */
15
16#include "postgres.h"
17
18#include <time.h>
19
20#include "access/nbtree.h"
21#include "access/reloptions.h"
22#include "access/relscan.h"
23#include "commands/progress.h"
24#include "common/int.h"
25#include "lib/qunique.h"
26#include "miscadmin.h"
27#include "storage/lwlock.h"
28#include "storage/subsystems.h"
29#include "utils/datum.h"
30#include "utils/lsyscache.h"
31#include "utils/rel.h"
32
33
34static int _bt_compare_int(const void *va, const void *vb);
35static int _bt_keep_natts(Relation rel, IndexTuple lastleft,
37
38
39/*
40 * _bt_mkscankey
41 * Build an insertion scan key that contains comparison data from itup
42 * as well as comparator routines appropriate to the key datatypes.
43 *
44 * The result is intended for use with _bt_compare() and _bt_truncate().
45 * Callers that don't need to fill out the insertion scankey arguments
46 * (e.g. they use an ad-hoc comparison routine, or only need a scankey
47 * for _bt_truncate()) can pass a NULL index tuple. The scankey will
48 * be initialized as if an "all truncated" pivot tuple was passed
49 * instead.
50 *
51 * Note that we may occasionally have to share lock the metapage to
52 * determine whether or not the keys in the index are expected to be
53 * unique (i.e. if this is a "heapkeyspace" index). We assume a
54 * heapkeyspace index when caller passes a NULL tuple, allowing index
55 * build callers to avoid accessing the non-existent metapage. We
56 * also assume that the index is _not_ allequalimage when a NULL tuple
57 * is passed; CREATE INDEX callers call _bt_allequalimage() to set the
58 * field themselves.
59 */
62{
63 BTScanInsert key;
66 int indnkeyatts;
68 int tupnatts;
69 int i;
70
74 tupnatts = itup ? BTreeTupleGetNAtts(itup, rel) : 0;
75
77
78 /*
79 * We'll execute search using scan key constructed on key columns.
80 * Truncated attributes and non-key attributes are omitted from the final
81 * scan key.
82 */
83 key = palloc(offsetof(BTScanInsertData, scankeys) +
84 sizeof(ScanKeyData) * indnkeyatts);
85 if (itup)
86 _bt_metaversion(rel, &key->heapkeyspace, &key->allequalimage);
87 else
88 {
89 /* Utility statement callers can set these fields themselves */
90 key->heapkeyspace = true;
91 key->allequalimage = false;
92 }
93 key->anynullkeys = false; /* initial assumption */
94 key->nextkey = false; /* usual case, required by btinsert */
95 key->backward = false; /* usual case, required by btinsert */
96 key->keysz = Min(indnkeyatts, tupnatts);
97 key->scantid = key->heapkeyspace && itup ?
99 skey = key->scankeys;
100 for (i = 0; i < indnkeyatts; i++)
101 {
103 Datum arg;
104 bool null;
105 int flags;
106
107 /*
108 * We can use the cached (default) support procs since no cross-type
109 * comparison can be needed.
110 */
112
113 /*
114 * Key arguments built from truncated attributes (or when caller
115 * provides no tuple) are defensively represented as NULL values. They
116 * should never be used.
117 */
118 if (i < tupnatts)
119 arg = index_getattr(itup, i + 1, itupdesc, &null);
120 else
121 {
122 arg = (Datum) 0;
123 null = true;
124 }
125 flags = (null ? SK_ISNULL : 0) | (indoption[i] << SK_BT_INDOPTION_SHIFT);
127 flags,
128 (AttrNumber) (i + 1),
131 rel->rd_indcollation[i],
132 procinfo,
133 arg);
134 /* Record if any key attribute is NULL (or truncated) */
135 if (null)
136 key->anynullkeys = true;
137 }
138
139 /*
140 * In NULLS NOT DISTINCT mode, we pretend that there are no null keys, so
141 * that full uniqueness check is done.
142 */
143 if (rel->rd_index->indnullsnotdistinct)
144 key->anynullkeys = false;
145
146 return key;
147}
148
149/*
150 * qsort comparison function for int arrays
151 */
152static int
153_bt_compare_int(const void *va, const void *vb)
154{
155 int a = *((const int *) va);
156 int b = *((const int *) vb);
157
158 return pg_cmp_s32(a, b);
159}
160
161/*
162 * _bt_killitems - set LP_DEAD state for items an indexscan caller has
163 * told us were killed
164 *
165 * scan->opaque, referenced locally through so, contains information about the
166 * current page and killed tuples thereon (generally, this should only be
167 * called if so->numKilled > 0).
168 *
169 * Caller should not have a lock on the so->currPos page, but must hold a
170 * buffer pin when !so->dropPin. When we return, it still won't be locked.
171 * It'll continue to hold whatever pins were held before calling here.
172 *
173 * We match items by heap TID before assuming they are the right ones to set
174 * LP_DEAD. If the scan is one that holds a buffer pin on the target page
175 * continuously from initially reading the items until applying this function
176 * (if it is a !so->dropPin scan), VACUUM cannot have deleted any items on the
177 * page, so the page's TIDs can't have been recycled by now. There's no risk
178 * that we'll confuse a new index tuple that happens to use a recycled TID
179 * with a now-removed tuple with the same TID (that used to be on this same
180 * page). We can't rely on that during scans that drop buffer pins eagerly
181 * (so->dropPin scans), though, so we must condition setting LP_DEAD bits on
182 * the page LSN having not changed since back when _bt_readpage saw the page.
183 * We totally give up on setting LP_DEAD bits when the page LSN changed.
184 *
185 * We give up much less often during !so->dropPin scans, but it still happens.
186 * We cope with cases where items have moved right due to insertions. If an
187 * item has moved off the current page due to a split, we'll fail to find it
188 * and just give up on it.
189 */
190void
192{
193 Relation rel = scan->indexRelation;
195 Page page;
196 BTPageOpaque opaque;
197 OffsetNumber minoff;
198 OffsetNumber maxoff;
199 int numKilled = so->numKilled;
200 bool killedsomething = false;
201 Buffer buf;
202
203 Assert(numKilled > 0);
204 Assert(BTScanPosIsValid(so->currPos));
205 Assert(scan->heapRelation != NULL); /* can't be a bitmap index scan */
206
207 /* Always invalidate so->killedItems[] before leaving so->currPos */
208 so->numKilled = 0;
209
210 /*
211 * We need to iterate through so->killedItems[] in leaf page order; the
212 * loop below expects this (when marking posting list tuples, at least).
213 * so->killedItems[] is now in whatever order the scan returned items in.
214 * Scrollable cursor scans might have even saved the same item/TID twice.
215 *
216 * Sort and unique-ify so->killedItems[] to deal with all this.
217 */
218 if (numKilled > 1)
219 {
220 qsort(so->killedItems, numKilled, sizeof(int), _bt_compare_int);
221 numKilled = qunique(so->killedItems, numKilled, sizeof(int),
223 }
224
225 if (!so->dropPin)
226 {
227 /*
228 * We have held the pin on this page since we read the index tuples,
229 * so all we need to do is lock it. The pin will have prevented
230 * concurrent VACUUMs from recycling any of the TIDs on the page.
231 */
232 Assert(BTScanPosIsPinned(so->currPos));
233 buf = so->currPos.buf;
234 _bt_lockbuf(rel, buf, BT_READ);
235 }
236 else
237 {
239
240 Assert(!BTScanPosIsPinned(so->currPos));
241 buf = _bt_getbuf(rel, so->currPos.currPage, BT_READ);
242
244 Assert(so->currPos.lsn <= latestlsn);
245 if (so->currPos.lsn != latestlsn)
246 {
247 /* Modified, give up on hinting */
248 _bt_relbuf(rel, buf);
249 return;
250 }
251
252 /* Unmodified, hinting is safe */
253 }
254
255 page = BufferGetPage(buf);
256 opaque = BTPageGetOpaque(page);
257 minoff = P_FIRSTDATAKEY(opaque);
258 maxoff = PageGetMaxOffsetNumber(page);
259
260 /* Iterate through so->killedItems[] in leaf page order */
261 for (int i = 0; i < numKilled; i++)
262 {
263 int itemIndex = so->killedItems[i];
264 BTScanPosItem *kitem = &so->currPos.items[itemIndex];
266
267 Assert(itemIndex >= so->currPos.firstItem &&
268 itemIndex <= so->currPos.lastItem);
269 Assert(i == 0 ||
270 offnum >= so->currPos.items[so->killedItems[i - 1]].indexOffset);
271
272 if (offnum < minoff)
273 continue; /* pure paranoia */
274 while (offnum <= maxoff)
275 {
276 ItemId iid = PageGetItemId(page, offnum);
278 bool killtuple = false;
279
281 {
282 int pi = i + 1;
284 int j;
285
286 /*
287 * Note that the page may have been modified in almost any way
288 * since we first read it (in the !so->dropPin case), so it's
289 * possible that this posting list tuple wasn't a posting list
290 * tuple when we first encountered its heap TIDs.
291 */
292 for (j = 0; j < nposting; j++)
293 {
295
296 if (!ItemPointerEquals(item, &kitem->heapTid))
297 break; /* out of posting list loop */
298
299 /*
300 * kitem must have matching offnum when heap TIDs match,
301 * though only in the common case where the page can't
302 * have been concurrently modified
303 */
304 Assert(kitem->indexOffset == offnum || !so->dropPin);
305
306 /*
307 * Read-ahead to later kitems here.
308 *
309 * We rely on the assumption that not advancing kitem here
310 * will prevent us from considering the posting list tuple
311 * fully dead by not matching its next heap TID in next
312 * loop iteration.
313 *
314 * If, on the other hand, this is the final heap TID in
315 * the posting list tuple, then tuple gets killed
316 * regardless (i.e. we handle the case where the last
317 * kitem is also the last heap TID in the last index tuple
318 * correctly -- posting tuple still gets killed).
319 */
320 if (pi < numKilled)
321 kitem = &so->currPos.items[so->killedItems[pi++]];
322 }
323
324 /*
325 * Don't bother advancing the outermost loop's int iterator to
326 * avoid processing killed items that relate to the same
327 * offnum/posting list tuple. This micro-optimization hardly
328 * seems worth it. (Further iterations of the outermost loop
329 * will fail to match on this same posting list's first heap
330 * TID instead, so we'll advance to the next offnum/index
331 * tuple pretty quickly.)
332 */
333 if (j == nposting)
334 killtuple = true;
335 }
336 else if (ItemPointerEquals(&ituple->t_tid, &kitem->heapTid))
337 killtuple = true;
338
339 /*
340 * Mark index item as dead, if it isn't already. Since this
341 * happens while holding a buffer lock possibly in shared mode,
342 * it's possible that multiple processes attempt to do this
343 * simultaneously, leading to multiple full-page images being sent
344 * to WAL (if wal_log_hints or data checksums are enabled), which
345 * is undesirable.
346 */
347 if (killtuple && !ItemIdIsDead(iid))
348 {
349 if (!killedsomething)
350 {
351 /*
352 * Use the hint bit infrastructure to check if we can
353 * update the page while just holding a share lock. If we
354 * are not allowed, there's no point continuing.
355 */
357 goto unlock_page;
358 }
359
360 /* found the item/all posting list items */
362 killedsomething = true;
363 break; /* out of inner search loop */
364 }
365 offnum = OffsetNumberNext(offnum);
366 }
367 }
368
369 /*
370 * Since this can be redone later if needed, mark as dirty hint.
371 *
372 * Whenever we mark anything LP_DEAD, we also set the page's
373 * BTP_HAS_GARBAGE flag, which is likewise just a hint. (Note that we
374 * only rely on the page-level flag in !heapkeyspace indexes.)
375 */
376 if (killedsomething)
377 {
378 opaque->btpo_flags |= BTP_HAS_GARBAGE;
379 BufferFinishSetHintBits(buf, true, true);
380 }
381
383 if (!so->dropPin)
384 _bt_unlockbuf(rel, buf);
385 else
386 _bt_relbuf(rel, buf);
387}
388
389
390/*
391 * The following routines manage a shared-memory area in which we track
392 * assignment of "vacuum cycle IDs" to currently-active btree vacuuming
393 * operations. There is a single counter which increments each time we
394 * start a vacuum to assign it a cycle ID. Since multiple vacuums could
395 * be active concurrently, we have to track the cycle ID for each active
396 * vacuum; this requires at most MaxBackends entries (usually far fewer).
397 * We assume at most one vacuum can be active for a given index.
398 *
399 * Access to the shared memory area is controlled by BtreeVacuumLock.
400 * In principle we could use a separate lmgr locktag for each index,
401 * but a single LWLock is much cheaper, and given the short time that
402 * the lock is ever held, the concurrency hit should be minimal.
403 */
404
405typedef struct BTOneVacInfo
406{
407 LockRelId relid; /* global identifier of an index */
408 BTCycleId cycleid; /* cycle ID for its active VACUUM */
410
411typedef struct BTVacInfo
412{
413 BTCycleId cycle_ctr; /* cycle ID most recently assigned */
414 int num_vacuums; /* number of currently active VACUUMs */
415 int max_vacuums; /* allocated length of vacuums[] array */
418
420
421static void BTreeShmemRequest(void *arg);
422static void BTreeShmemInit(void *arg);
423
428
429/*
430 * _bt_vacuum_cycleid --- get the active vacuum cycle ID for an index,
431 * or zero if there is no active VACUUM
432 *
433 * Note: for correct interlocking, the caller must already hold pin and
434 * exclusive lock on each buffer it will store the cycle ID into. This
435 * ensures that even if a VACUUM starts immediately afterwards, it cannot
436 * process those pages until the page split is complete.
437 */
440{
441 BTCycleId result = 0;
442 int i;
443
444 /* Share lock is enough since this is a read-only operation */
446
447 for (i = 0; i < btvacinfo->num_vacuums; i++)
448 {
450
451 if (vac->relid.relId == rel->rd_lockInfo.lockRelId.relId &&
452 vac->relid.dbId == rel->rd_lockInfo.lockRelId.dbId)
453 {
454 result = vac->cycleid;
455 break;
456 }
457 }
458
460 return result;
461}
462
463/*
464 * _bt_start_vacuum --- assign a cycle ID to a just-starting VACUUM operation
465 *
466 * Note: the caller must guarantee that it will eventually call
467 * _bt_end_vacuum, else we'll permanently leak an array slot. To ensure
468 * that this happens even in elog(FATAL) scenarios, the appropriate coding
469 * is not just a PG_TRY, but
470 * PG_ENSURE_ERROR_CLEANUP(_bt_end_vacuum_callback, PointerGetDatum(rel))
471 */
474{
476 int i;
477 BTOneVacInfo *vac;
478
480
481 /*
482 * Assign the next cycle ID, being careful to avoid zero as well as the
483 * reserved high values.
484 */
486 if (result == 0 || result > MAX_BT_CYCLE_ID)
488
489 /* Let's just make sure there's no entry already for this index */
490 for (i = 0; i < btvacinfo->num_vacuums; i++)
491 {
492 vac = &btvacinfo->vacuums[i];
493 if (vac->relid.relId == rel->rd_lockInfo.lockRelId.relId &&
494 vac->relid.dbId == rel->rd_lockInfo.lockRelId.dbId)
495 {
496 /*
497 * Unlike most places in the backend, we have to explicitly
498 * release our LWLock before throwing an error. This is because
499 * we expect _bt_end_vacuum() to be called before transaction
500 * abort cleanup can run to release LWLocks.
501 */
503 elog(ERROR, "multiple active vacuums for index \"%s\"",
505 }
506 }
507
508 /* OK, add an entry */
510 {
512 elog(ERROR, "out of btvacinfo slots");
513 }
515 vac->relid = rel->rd_lockInfo.lockRelId;
516 vac->cycleid = result;
518
520 return result;
521}
522
523/*
524 * _bt_end_vacuum --- mark a btree VACUUM operation as done
525 *
526 * Note: this is deliberately coded not to complain if no entry is found;
527 * this allows the caller to put PG_TRY around the start_vacuum operation.
528 */
529void
531{
532 int i;
533
535
536 /* Find the array entry */
537 for (i = 0; i < btvacinfo->num_vacuums; i++)
538 {
540
541 if (vac->relid.relId == rel->rd_lockInfo.lockRelId.relId &&
542 vac->relid.dbId == rel->rd_lockInfo.lockRelId.dbId)
543 {
544 /* Remove it by shifting down the last entry */
547 break;
548 }
549 }
550
552}
553
554/*
555 * _bt_end_vacuum wrapped as an on_shmem_exit callback function
556 */
557void
562
563/*
564 * BTreeShmemRequest --- register this module's shared memory
565 */
566static void
568{
569 Size size;
570
571 size = offsetof(BTVacInfo, vacuums);
572 size = add_size(size, mul_size(MaxBackends, sizeof(BTOneVacInfo)));
573
574 ShmemRequestStruct(.name = "BTree Vacuum State",
575 .size = size,
576 .ptr = (void **) &btvacinfo,
577 );
578}
579
580/*
581 * BTreeShmemInit --- initialize this module's shared memory
582 */
583static void
585{
586 /*
587 * It doesn't really matter what the cycle counter starts at, but having
588 * it always start the same doesn't seem good. Seed with low-order bits
589 * of time() instead.
590 */
592
595}
596
597bytea *
598btoptions(Datum reloptions, bool validate)
599{
600 static const relopt_parse_elt tab[] = {
601 {"fillfactor", RELOPT_TYPE_INT, offsetof(BTOptions, fillfactor)},
602 {"vacuum_cleanup_index_scale_factor", RELOPT_TYPE_REAL,
603 offsetof(BTOptions, vacuum_cleanup_index_scale_factor)},
604 {"deduplicate_items", RELOPT_TYPE_BOOL,
605 offsetof(BTOptions, deduplicate_items)}
606 };
607
608 return (bytea *) build_reloptions(reloptions, validate,
610 sizeof(BTOptions),
611 tab, lengthof(tab));
612}
613
614/*
615 * btproperty() -- Check boolean properties of indexes.
616 *
617 * This is optional, but handling AMPROP_RETURNABLE here saves opening the rel
618 * to call btcanreturn.
619 */
620bool
622 IndexAMProperty prop, const char *propname,
623 bool *res, bool *isnull)
624{
625 switch (prop)
626 {
628 /* answer only for columns, not AM or whole index */
629 if (attno == 0)
630 return false;
631 /* otherwise, btree can always return data */
632 *res = true;
633 return true;
634
635 default:
636 return false; /* punt to generic code */
637 }
638}
639
640/*
641 * btbuildphasename() -- Return name of index build phase.
642 */
643char *
645{
646 switch (phasenum)
647 {
649 return "initializing";
651 return "scanning table";
653 return "sorting live tuples";
655 return "sorting dead tuples";
657 return "loading tuples in tree";
658 default:
659 return NULL;
660 }
661}
662
663/*
664 * _bt_truncate() -- create tuple without unneeded suffix attributes.
665 *
666 * Returns truncated pivot index tuple allocated in caller's memory context,
667 * with key attributes copied from caller's firstright argument. If rel is
668 * an INCLUDE index, non-key attributes will definitely be truncated away,
669 * since they're not part of the key space. More aggressive suffix
670 * truncation can take place when it's clear that the returned tuple does not
671 * need one or more suffix key attributes. We only need to keep firstright
672 * attributes up to and including the first non-lastleft-equal attribute.
673 * Caller's insertion scankey is used to compare the tuples; the scankey's
674 * argument values are not considered here.
675 *
676 * Note that returned tuple's t_tid offset will hold the number of attributes
677 * present, so the original item pointer offset is not represented. Caller
678 * should only change truncated tuple's downlink. Note also that truncated
679 * key attributes are treated as containing "minus infinity" values by
680 * _bt_compare().
681 *
682 * In the worst case (when a heap TID must be appended to distinguish lastleft
683 * from firstright), the size of the returned tuple is the size of firstright
684 * plus the size of an additional MAXALIGN()'d item pointer. This guarantee
685 * is important, since callers need to stay under the 1/3 of a page
686 * restriction on tuple size. If this routine is ever taught to truncate
687 * within an attribute/datum, it will need to avoid returning an enlarged
688 * tuple to caller when truncation + TOAST compression ends up enlarging the
689 * final datum.
690 */
693 BTScanInsert itup_key)
694{
697 int keepnatts;
702
703 /*
704 * We should only ever truncate non-pivot tuples from leaf pages. It's
705 * never okay to truncate when splitting an internal page.
706 */
708
709 /* Determine how many attributes must be kept in truncated tuple */
710 keepnatts = _bt_keep_natts(rel, lastleft, firstright, itup_key);
711
712#ifdef DEBUG_NO_TRUNCATE
713 /* Force truncation to be ineffective for testing purposes */
714 keepnatts = nkeyatts + 1;
715#endif
716
719
721 {
722 /*
723 * index_truncate_tuple() just returns a straight copy of firstright
724 * when it has no attributes to truncate. When that happens, we may
725 * need to truncate away a posting list here instead.
726 */
729 pivot->t_info &= ~INDEX_SIZE_MASK;
731 }
732
733 /*
734 * If there is a distinguishing key attribute within pivot tuple, we're
735 * done
736 */
737 if (keepnatts <= nkeyatts)
738 {
740 return pivot;
741 }
742
743 /*
744 * We have to store a heap TID in the new pivot tuple, since no non-TID
745 * key attribute value in firstright distinguishes the right side of the
746 * split from the left side. nbtree conceptualizes this case as an
747 * inability to truncate away any key attributes, since heap TID is
748 * treated as just another key attribute (despite lacking a pg_attribute
749 * entry).
750 *
751 * Use enlarged space that holds a copy of pivot. We need the extra space
752 * to store a heap TID at the end (using the special pivot tuple
753 * representation). Note that the original pivot already has firstright's
754 * possible posting list/non-key attribute values removed at this point.
755 */
759 /* Cannot leak memory here */
760 pfree(pivot);
761
762 /*
763 * Store all of firstright's key attribute values plus a tiebreaker heap
764 * TID value in enlarged pivot tuple
765 */
766 tidpivot->t_info &= ~INDEX_SIZE_MASK;
767 tidpivot->t_info |= newsize;
770
771 /*
772 * Lehman & Yao use lastleft as the leaf high key in all cases, but don't
773 * consider suffix truncation. It seems like a good idea to follow that
774 * example in cases where no truncation takes place -- use lastleft's heap
775 * TID. (This is also the closest value to negative infinity that's
776 * legally usable.)
777 */
779
780 /*
781 * We're done. Assert() that heap TID invariants hold before returning.
782 *
783 * Lehman and Yao require that the downlink to the right page, which is to
784 * be inserted into the parent page in the second phase of a page split be
785 * a strict lower bound on items on the right page, and a non-strict upper
786 * bound for items on the left page. Assert that heap TIDs follow these
787 * invariants, since a heap TID value is apparently needed as a
788 * tiebreaker.
789 */
790#ifndef DEBUG_NO_TRUNCATE
794 BTreeTupleGetHeapTID(lastleft)) >= 0);
797#else
798
799 /*
800 * Those invariants aren't guaranteed to hold for lastleft + firstright
801 * heap TID attribute values when they're considered here only because
802 * DEBUG_NO_TRUNCATE is defined (a heap TID is probably not actually
803 * needed as a tiebreaker). DEBUG_NO_TRUNCATE must therefore use a heap
804 * TID value that always works as a strict lower bound for items to the
805 * right. In particular, it must avoid using firstright's leading key
806 * attribute values along with lastleft's heap TID value when lastleft's
807 * TID happens to be greater than firstright's TID.
808 */
810
811 /*
812 * Pivot heap TID should never be fully equal to firstright. Note that
813 * the pivot heap TID will still end up equal to lastleft's heap TID when
814 * that's the only usable value.
815 */
820#endif
821
822 return tidpivot;
823}
824
825/*
826 * _bt_keep_natts - how many key attributes to keep when truncating.
827 *
828 * Caller provides two tuples that enclose a split point. Caller's insertion
829 * scankey is used to compare the tuples; the scankey's argument values are
830 * not considered here.
831 *
832 * This can return a number of attributes that is one greater than the
833 * number of key attributes for the index relation. This indicates that the
834 * caller must use a heap TID as a unique-ifier in new pivot tuple.
835 */
836static int
838 BTScanInsert itup_key)
839{
842 int keepnatts;
844
845 /*
846 * _bt_compare() treats truncated key attributes as having the value minus
847 * infinity, which would break searches within !heapkeyspace indexes. We
848 * must still truncate away non-key attribute values, though.
849 */
850 if (!itup_key->heapkeyspace)
851 return nkeyatts;
852
853 scankey = itup_key->scankeys;
854 keepnatts = 1;
855 for (int attnum = 1; attnum <= nkeyatts; attnum++, scankey++)
856 {
857 Datum datum1,
858 datum2;
859 bool isNull1,
860 isNull2;
861
862 datum1 = index_getattr(lastleft, attnum, itupdesc, &isNull1);
864
865 if (isNull1 != isNull2)
866 break;
867
868 if (!isNull1 &&
870 scankey->sk_collation,
871 datum1,
872 datum2)) != 0)
873 break;
874
875 keepnatts++;
876 }
877
878 /*
879 * Assert that _bt_keep_natts_fast() agrees with us in passing. This is
880 * expected in an allequalimage index.
881 */
882 Assert(!itup_key->allequalimage ||
883 keepnatts == _bt_keep_natts_fast(rel, lastleft, firstright));
884
885 return keepnatts;
886}
887
888/*
889 * _bt_keep_natts_fast - fast bitwise variant of _bt_keep_natts.
890 *
891 * This is exported so that a candidate split point can have its effect on
892 * suffix truncation inexpensively evaluated ahead of time when finding a
893 * split location. A naive bitwise approach to datum comparisons is used to
894 * save cycles.
895 *
896 * The approach taken here usually provides the same answer as _bt_keep_natts
897 * will (for the same pair of tuples from a heapkeyspace index), since the
898 * majority of btree opclasses can never indicate that two datums are equal
899 * unless they're bitwise equal after detoasting. When an index only has
900 * "equal image" columns, routine is guaranteed to give the same result as
901 * _bt_keep_natts would.
902 *
903 * Callers can rely on the fact that attributes considered equal here are
904 * definitely also equal according to _bt_keep_natts, even when the index uses
905 * an opclass or collation that is not "allequalimage"/deduplication-safe.
906 * This weaker guarantee is good enough for nbtsplitloc.c caller, since false
907 * negatives generally only have the effect of making leaf page splits use a
908 * more balanced split point.
909 */
910int
912{
915 int keepnatts;
916
917 keepnatts = 1;
918 for (int attnum = 1; attnum <= keysz; attnum++)
919 {
920 Datum datum1,
921 datum2;
922 bool isNull1,
923 isNull2;
924 CompactAttribute *att;
925
926 datum1 = index_getattr(lastleft, attnum, itupdesc, &isNull1);
929
930 if (isNull1 != isNull2)
931 break;
932
933 if (!isNull1 &&
934 !datum_image_eq(datum1, datum2, att->attbyval, att->attlen))
935 break;
936
937 keepnatts++;
938 }
939
940 return keepnatts;
941}
942
943/*
944 * _bt_check_natts() -- Verify tuple has expected number of attributes.
945 *
946 * Returns value indicating if the expected number of attributes were found
947 * for a particular offset on page. This can be used as a general purpose
948 * sanity check.
949 *
950 * Testing a tuple directly with BTreeTupleGetNAtts() should generally be
951 * preferred to calling here. That's usually more convenient, and is always
952 * more explicit. Call here instead when offnum's tuple may be a negative
953 * infinity tuple that uses the pre-v11 on-disk representation, or when a low
954 * context check is appropriate. This routine is as strict as possible about
955 * what is expected on each version of btree.
956 */
957bool
958_bt_check_natts(Relation rel, bool heapkeyspace, Page page, OffsetNumber offnum)
959{
962 BTPageOpaque opaque = BTPageGetOpaque(page);
963 IndexTuple itup;
964 int tupnatts;
965
966 /*
967 * We cannot reliably test a deleted or half-dead page, since they have
968 * dummy high keys
969 */
970 if (P_IGNORE(opaque))
971 return true;
972
973 Assert(offnum >= FirstOffsetNumber &&
974 offnum <= PageGetMaxOffsetNumber(page));
975
976 itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offnum));
977 tupnatts = BTreeTupleGetNAtts(itup, rel);
978
979 /* !heapkeyspace indexes do not support deduplication */
980 if (!heapkeyspace && BTreeTupleIsPosting(itup))
981 return false;
982
983 /* Posting list tuples should never have "pivot heap TID" bit set */
984 if (BTreeTupleIsPosting(itup) &&
987 return false;
988
989 /* INCLUDE indexes do not support deduplication */
990 if (natts != nkeyatts && BTreeTupleIsPosting(itup))
991 return false;
992
993 if (P_ISLEAF(opaque))
994 {
995 if (offnum >= P_FIRSTDATAKEY(opaque))
996 {
997 /*
998 * Non-pivot tuple should never be explicitly marked as a pivot
999 * tuple
1000 */
1001 if (BTreeTupleIsPivot(itup))
1002 return false;
1003
1004 /*
1005 * Leaf tuples that are not the page high key (non-pivot tuples)
1006 * should never be truncated. (Note that tupnatts must have been
1007 * inferred, even with a posting list tuple, because only pivot
1008 * tuples store tupnatts directly.)
1009 */
1010 return tupnatts == natts;
1011 }
1012 else
1013 {
1014 /*
1015 * Rightmost page doesn't contain a page high key, so tuple was
1016 * checked above as ordinary leaf tuple
1017 */
1018 Assert(!P_RIGHTMOST(opaque));
1019
1020 /*
1021 * !heapkeyspace high key tuple contains only key attributes. Note
1022 * that tupnatts will only have been explicitly represented in
1023 * !heapkeyspace indexes that happen to have non-key attributes.
1024 */
1025 if (!heapkeyspace)
1026 return tupnatts == nkeyatts;
1027
1028 /* Use generic heapkeyspace pivot tuple handling */
1029 }
1030 }
1031 else /* !P_ISLEAF(opaque) */
1032 {
1033 if (offnum == P_FIRSTDATAKEY(opaque))
1034 {
1035 /*
1036 * The first tuple on any internal page (possibly the first after
1037 * its high key) is its negative infinity tuple. Negative
1038 * infinity tuples are always truncated to zero attributes. They
1039 * are a particular kind of pivot tuple.
1040 */
1041 if (heapkeyspace)
1042 return tupnatts == 0;
1043
1044 /*
1045 * The number of attributes won't be explicitly represented if the
1046 * negative infinity tuple was generated during a page split that
1047 * occurred with a version of Postgres before v11. There must be
1048 * a problem when there is an explicit representation that is
1049 * non-zero, or when there is no explicit representation and the
1050 * tuple is evidently not a pre-pg_upgrade tuple.
1051 *
1052 * Prior to v11, downlinks always had P_HIKEY as their offset.
1053 * Accept that as an alternative indication of a valid
1054 * !heapkeyspace negative infinity tuple.
1055 */
1056 return tupnatts == 0 ||
1058 }
1059 else
1060 {
1061 /*
1062 * !heapkeyspace downlink tuple with separator key contains only
1063 * key attributes. Note that tupnatts will only have been
1064 * explicitly represented in !heapkeyspace indexes that happen to
1065 * have non-key attributes.
1066 */
1067 if (!heapkeyspace)
1068 return tupnatts == nkeyatts;
1069
1070 /* Use generic heapkeyspace pivot tuple handling */
1071 }
1072 }
1073
1074 /* Handle heapkeyspace pivot tuples (excluding minus infinity items) */
1075 Assert(heapkeyspace);
1076
1077 /*
1078 * Explicit representation of the number of attributes is mandatory with
1079 * heapkeyspace index pivot tuples, regardless of whether or not there are
1080 * non-key attributes.
1081 */
1082 if (!BTreeTupleIsPivot(itup))
1083 return false;
1084
1085 /* Pivot tuple should not use posting list representation (redundant) */
1086 if (BTreeTupleIsPosting(itup))
1087 return false;
1088
1089 /*
1090 * Heap TID is a tiebreaker key attribute, so it cannot be untruncated
1091 * when any other key attribute is truncated
1092 */
1093 if (BTreeTupleGetHeapTID(itup) != NULL && tupnatts != nkeyatts)
1094 return false;
1095
1096 /*
1097 * Pivot tuple must have at least one untruncated key attribute (minus
1098 * infinity pivot tuples are the only exception). Pivot tuples can never
1099 * represent that there is a value present for a key attribute that
1100 * exceeds pg_index.indnkeyatts for the index.
1101 */
1102 return tupnatts > 0 && tupnatts <= nkeyatts;
1103}
1104
1105/*
1106 *
1107 * _bt_check_third_page() -- check whether tuple fits on a btree page at all.
1108 *
1109 * We actually need to be able to fit three items on every page, so restrict
1110 * any one item to 1/3 the per-page available space. Note that itemsz should
1111 * not include the ItemId overhead.
1112 *
1113 * It might be useful to apply TOAST methods rather than throw an error here.
1114 * Using out of line storage would break assumptions made by suffix truncation
1115 * and by contrib/amcheck, though.
1116 */
1117void
1119 Page page, IndexTuple newtup)
1120{
1121 Size itemsz;
1122 BTPageOpaque opaque;
1123
1124 itemsz = MAXALIGN(IndexTupleSize(newtup));
1125
1126 /* Double check item size against limit */
1127 if (itemsz <= BTMaxItemSize)
1128 return;
1129
1130 /*
1131 * Tuple is probably too large to fit on page, but it's possible that the
1132 * index uses version 2 or version 3, or that page is an internal page, in
1133 * which case a slightly higher limit applies.
1134 */
1135 if (!needheaptidspace && itemsz <= BTMaxItemSizeNoHeapTid)
1136 return;
1137
1138 /*
1139 * Internal page insertions cannot fail here, because that would mean that
1140 * an earlier leaf level insertion that should have failed didn't
1141 */
1142 opaque = BTPageGetOpaque(page);
1143 if (!P_ISLEAF(opaque))
1144 elog(ERROR, "cannot insert oversized tuple of size %zu on internal page of index \"%s\"",
1145 itemsz, RelationGetRelationName(rel));
1146
1147 ereport(ERROR,
1149 errmsg("index row size %zu exceeds btree version %u maximum %zu for index \"%s\"",
1150 itemsz,
1154 errdetail("Index row references tuple (%u,%u) in relation \"%s\".",
1158 errhint("Values larger than 1/3 of a buffer page cannot be indexed.\n"
1159 "Consider a function index of an MD5 hash of the value, "
1160 "or use full text indexing."),
1162}
1163
1164/*
1165 * Are all attributes in rel "equality is image equality" attributes?
1166 *
1167 * We use each attribute's BTEQUALIMAGE_PROC opclass procedure. If any
1168 * opclass either lacks a BTEQUALIMAGE_PROC procedure or returns false, we
1169 * return false; otherwise we return true.
1170 *
1171 * Returned boolean value is stored in index metapage during index builds.
1172 * Deduplication can only be used when we return true.
1173 */
1174bool
1176{
1177 bool allequalimage = true;
1178
1179 /* INCLUDE indexes can never support deduplication */
1182 return false;
1183
1184 for (int i = 0; i < IndexRelationGetNumberOfKeyAttributes(rel); i++)
1185 {
1186 Oid opfamily = rel->rd_opfamily[i];
1187 Oid opcintype = rel->rd_opcintype[i];
1188 Oid collation = rel->rd_indcollation[i];
1190
1191 equalimageproc = get_opfamily_proc(opfamily, opcintype, opcintype,
1193
1194 /*
1195 * If there is no BTEQUALIMAGE_PROC then deduplication is assumed to
1196 * be unsafe. Otherwise, actually call proc and see what it says.
1197 */
1198 if (!OidIsValid(equalimageproc) ||
1200 ObjectIdGetDatum(opcintype))))
1201 {
1202 allequalimage = false;
1203 break;
1204 }
1205 }
1206
1207 if (debugmessage)
1208 {
1209 if (allequalimage)
1210 elog(DEBUG1, "index \"%s\" can safely use deduplication",
1212 else
1213 elog(DEBUG1, "index \"%s\" cannot use deduplication",
1215 }
1216
1217 return allequalimage;
1218}
IndexAMProperty
Definition amapi.h:39
@ AMPROP_RETURNABLE
Definition amapi.h:47
int16 AttrNumber
Definition attnum.h:21
static bool validate(Port *port, const char *auth, const char **logdetail)
Definition auth-oauth.c:672
int Buffer
Definition buf.h:23
void BufferFinishSetHintBits(Buffer buffer, bool mark_dirty, bool buffer_std)
Definition bufmgr.c:7070
XLogRecPtr BufferGetLSNAtomic(Buffer buffer)
Definition bufmgr.c:4713
bool BufferBeginSetHintBits(Buffer buffer)
Definition bufmgr.c:7042
static Page BufferGetPage(Buffer buffer)
Definition bufmgr.h:468
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
Definition bufpage.h:268
static void * PageGetItem(PageData *page, const ItemIdData *itemId)
Definition bufpage.h:378
PageData * Page
Definition bufpage.h:81
static OffsetNumber PageGetMaxOffsetNumber(const PageData *page)
Definition bufpage.h:396
#define Min(x, y)
Definition c.h:1091
#define MAXALIGN(LEN)
Definition c.h:896
#define Assert(condition)
Definition c.h:943
int64_t int64
Definition c.h:621
#define FLEXIBLE_ARRAY_MEMBER
Definition c.h:558
int16_t int16
Definition c.h:619
#define lengthof(array)
Definition c.h:873
#define OidIsValid(objectId)
Definition c.h:858
size_t Size
Definition c.h:689
uint32 result
memcpy(sums, checksumBaseOffsets, sizeof(checksumBaseOffsets))
bool datum_image_eq(Datum value1, Datum value2, bool typByVal, int typLen)
Definition datum.c:271
Datum arg
Definition elog.c:1322
int errcode(int sqlerrcode)
Definition elog.c:874
int errhint(const char *fmt,...) pg_attribute_printf(1
int errdetail(const char *fmt,...) pg_attribute_printf(1
#define DEBUG1
Definition elog.h:30
#define ERROR
Definition elog.h:39
#define elog(elevel,...)
Definition elog.h:227
#define ereport(elevel,...)
Definition elog.h:151
Datum FunctionCall2Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2)
Definition fmgr.c:1151
Datum OidFunctionCall1Coll(Oid functionId, Oid collation, Datum arg1)
Definition fmgr.c:1413
int MaxBackends
Definition globals.c:149
FmgrInfo * index_getprocinfo(Relation irel, AttrNumber attnum, uint16 procnum)
Definition indexam.c:885
IndexTuple index_truncate_tuple(TupleDesc sourceDescriptor, IndexTuple source, int leavenatts)
Definition indextuple.c:508
static int pg_cmp_s32(int32 a, int32 b)
Definition int.h:713
int b
Definition isn.c:74
int a
Definition isn.c:73
int j
Definition isn.c:78
int i
Definition isn.c:77
#define ItemIdMarkDead(itemId)
Definition itemid.h:179
#define ItemIdIsDead(itemId)
Definition itemid.h:113
int32 ItemPointerCompare(const ItemPointerData *arg1, const ItemPointerData *arg2)
Definition itemptr.c:51
bool ItemPointerEquals(const ItemPointerData *pointer1, const ItemPointerData *pointer2)
Definition itemptr.c:35
static void ItemPointerSetOffsetNumber(ItemPointerData *pointer, OffsetNumber offsetNumber)
Definition itemptr.h:158
static OffsetNumber ItemPointerGetOffsetNumber(const ItemPointerData *pointer)
Definition itemptr.h:124
static OffsetNumber ItemPointerGetOffsetNumberNoCheck(const ItemPointerData *pointer)
Definition itemptr.h:114
static BlockNumber ItemPointerGetBlockNumber(const ItemPointerData *pointer)
Definition itemptr.h:103
static void ItemPointerCopy(const ItemPointerData *fromPointer, ItemPointerData *toPointer)
Definition itemptr.h:172
IndexTupleData * IndexTuple
Definition itup.h:53
static Datum index_getattr(IndexTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
Definition itup.h:131
static Size IndexTupleSize(const IndexTupleData *itup)
Definition itup.h:71
Oid get_opfamily_proc(Oid opfamily, Oid lefttype, Oid righttype, int16 procnum)
Definition lsyscache.c:915
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition lwlock.c:1150
void LWLockRelease(LWLock *lock)
Definition lwlock.c:1767
@ LW_SHARED
Definition lwlock.h:105
@ LW_EXCLUSIVE
Definition lwlock.h:104
void pfree(void *pointer)
Definition mcxt.c:1616
void * palloc0(Size size)
Definition mcxt.c:1417
void * palloc(Size size)
Definition mcxt.c:1387
void _bt_relbuf(Relation rel, Buffer buf)
Definition nbtpage.c:1044
void _bt_metaversion(Relation rel, bool *heapkeyspace, bool *allequalimage)
Definition nbtpage.c:744
Buffer _bt_getbuf(Relation rel, BlockNumber blkno, int access)
Definition nbtpage.c:850
void _bt_unlockbuf(Relation rel, Buffer buf)
Definition nbtpage.c:1098
void _bt_lockbuf(Relation rel, Buffer buf, int access)
Definition nbtpage.c:1067
#define BTScanPosIsPinned(scanpos)
Definition nbtree.h:1004
#define BT_PIVOT_HEAP_TID_ATTR
Definition nbtree.h:466
static uint16 BTreeTupleGetNPosting(IndexTuple posting)
Definition nbtree.h:519
static bool BTreeTupleIsPivot(IndexTuple itup)
Definition nbtree.h:481
#define P_ISLEAF(opaque)
Definition nbtree.h:221
#define P_HIKEY
Definition nbtree.h:368
#define PROGRESS_BTREE_PHASE_PERFORMSORT_2
Definition nbtree.h:1148
#define PROGRESS_BTREE_PHASE_LEAF_LOAD
Definition nbtree.h:1149
#define BTP_HAS_GARBAGE
Definition nbtree.h:83
#define BTEQUALIMAGE_PROC
Definition nbtree.h:720
#define BTORDER_PROC
Definition nbtree.h:717
#define BTPageGetOpaque(page)
Definition nbtree.h:74
#define BTREE_VERSION
Definition nbtree.h:151
#define BTScanPosIsValid(scanpos)
Definition nbtree.h:1021
#define PROGRESS_BTREE_PHASE_INDEXBUILD_TABLESCAN
Definition nbtree.h:1146
#define SK_BT_INDOPTION_SHIFT
Definition nbtree.h:1115
#define P_FIRSTDATAKEY(opaque)
Definition nbtree.h:370
#define MAX_BT_CYCLE_ID
Definition nbtree.h:94
#define PROGRESS_BTREE_PHASE_PERFORMSORT_1
Definition nbtree.h:1147
uint16 BTCycleId
Definition nbtree.h:30
static uint32 BTreeTupleGetPostingOffset(IndexTuple posting)
Definition nbtree.h:530
#define P_RIGHTMOST(opaque)
Definition nbtree.h:220
static ItemPointer BTreeTupleGetPostingN(IndexTuple posting, int n)
Definition nbtree.h:545
#define BT_READ
Definition nbtree.h:730
#define P_IGNORE(opaque)
Definition nbtree.h:226
static ItemPointer BTreeTupleGetMaxHeapTID(IndexTuple itup)
Definition nbtree.h:665
static bool BTreeTupleIsPosting(IndexTuple itup)
Definition nbtree.h:493
#define BTREE_NOVAC_VERSION
Definition nbtree.h:153
#define BTMaxItemSizeNoHeapTid
Definition nbtree.h:170
static ItemPointer BTreeTupleGetHeapTID(IndexTuple itup)
Definition nbtree.h:639
static void BTreeTupleSetNAtts(IndexTuple itup, uint16 nkeyatts, bool heaptid)
Definition nbtree.h:596
#define BTMaxItemSize
Definition nbtree.h:165
#define BTreeTupleGetNAtts(itup, rel)
Definition nbtree.h:578
BTScanOpaqueData * BTScanOpaque
Definition nbtree.h:1097
void _bt_check_third_page(Relation rel, Relation heap, bool needheaptidspace, Page page, IndexTuple newtup)
Definition nbtutils.c:1118
static void BTreeShmemInit(void *arg)
Definition nbtutils.c:584
void _bt_end_vacuum(Relation rel)
Definition nbtutils.c:530
void _bt_end_vacuum_callback(int code, Datum arg)
Definition nbtutils.c:558
static void BTreeShmemRequest(void *arg)
Definition nbtutils.c:567
BTCycleId _bt_vacuum_cycleid(Relation rel)
Definition nbtutils.c:439
BTScanInsert _bt_mkscankey(Relation rel, IndexTuple itup)
Definition nbtutils.c:61
void _bt_killitems(IndexScanDesc scan)
Definition nbtutils.c:191
bool _bt_check_natts(Relation rel, bool heapkeyspace, Page page, OffsetNumber offnum)
Definition nbtutils.c:958
IndexTuple _bt_truncate(Relation rel, IndexTuple lastleft, IndexTuple firstright, BTScanInsert itup_key)
Definition nbtutils.c:692
int _bt_keep_natts_fast(Relation rel, IndexTuple lastleft, IndexTuple firstright)
Definition nbtutils.c:911
static BTVacInfo * btvacinfo
Definition nbtutils.c:419
char * btbuildphasename(int64 phasenum)
Definition nbtutils.c:644
bytea * btoptions(Datum reloptions, bool validate)
Definition nbtutils.c:598
const ShmemCallbacks BTreeShmemCallbacks
Definition nbtutils.c:424
static int _bt_keep_natts(Relation rel, IndexTuple lastleft, IndexTuple firstright, BTScanInsert itup_key)
Definition nbtutils.c:837
bool btproperty(Oid index_oid, int attno, IndexAMProperty prop, const char *propname, bool *res, bool *isnull)
Definition nbtutils.c:621
bool _bt_allequalimage(Relation rel, bool debugmessage)
Definition nbtutils.c:1175
static int _bt_compare_int(const void *va, const void *vb)
Definition nbtutils.c:153
BTCycleId _bt_start_vacuum(Relation rel)
Definition nbtutils.c:473
static char * errmsg
#define OffsetNumberNext(offsetNumber)
Definition off.h:52
uint16 OffsetNumber
Definition off.h:24
#define FirstOffsetNumber
Definition off.h:27
#define OffsetNumberPrev(offsetNumber)
Definition off.h:54
int16 attnum
static char buf[DEFAULT_XLOG_SEG_SIZE]
static int fillfactor
Definition pgbench.c:188
#define qsort(a, b, c, d)
Definition port.h:495
static bool DatumGetBool(Datum X)
Definition postgres.h:100
static Datum ObjectIdGetDatum(Oid X)
Definition postgres.h:252
uint64_t Datum
Definition postgres.h:70
static Pointer DatumGetPointer(Datum X)
Definition postgres.h:332
static int32 DatumGetInt32(Datum X)
Definition postgres.h:202
#define InvalidOid
unsigned int Oid
static int fb(int x)
#define PROGRESS_CREATEIDX_SUBPHASE_INITIALIZE
Definition progress.h:135
static size_t qunique(void *array, size_t elements, size_t width, int(*compare)(const void *, const void *))
Definition qunique.h:21
#define RelationGetDescr(relation)
Definition rel.h:542
#define RelationGetRelationName(relation)
Definition rel.h:550
#define IndexRelationGetNumberOfAttributes(relation)
Definition rel.h:528
#define IndexRelationGetNumberOfKeyAttributes(relation)
Definition rel.h:535
int errtableconstraint(Relation rel, const char *conname)
Definition relcache.c:6117
void * build_reloptions(Datum reloptions, bool validate, relopt_kind kind, Size relopt_struct_size, const relopt_parse_elt *relopt_elems, int num_relopt_elems)
@ RELOPT_KIND_BTREE
Definition reloptions.h:44
@ RELOPT_TYPE_INT
Definition reloptions.h:32
@ RELOPT_TYPE_BOOL
Definition reloptions.h:30
@ RELOPT_TYPE_REAL
Definition reloptions.h:33
void ScanKeyEntryInitializeWithInfo(ScanKey entry, int flags, AttrNumber attributeNumber, StrategyNumber strategy, Oid subtype, Oid collation, FmgrInfo *finfo, Datum argument)
Definition scankey.c:101
Size add_size(Size s1, Size s2)
Definition shmem.c:1048
Size mul_size(Size s1, Size s2)
Definition shmem.c:1063
#define ShmemRequestStruct(...)
Definition shmem.h:176
#define SK_ISNULL
Definition skey.h:115
#define InvalidStrategy
Definition stratnum.h:24
BTCycleId cycleid
Definition nbtutils.c:408
LockRelId relid
Definition nbtutils.c:407
uint16 btpo_flags
Definition nbtree.h:68
bool allequalimage
Definition nbtree.h:798
bool heapkeyspace
Definition nbtree.h:797
ScanKeyData scankeys[INDEX_MAX_KEYS]
Definition nbtree.h:804
OffsetNumber indexOffset
Definition nbtree.h:958
BTCycleId cycle_ctr
Definition nbtutils.c:413
int num_vacuums
Definition nbtutils.c:414
BTOneVacInfo vacuums[FLEXIBLE_ARRAY_MEMBER]
Definition nbtutils.c:416
int max_vacuums
Definition nbtutils.c:415
Relation indexRelation
Definition relscan.h:150
Relation heapRelation
Definition relscan.h:149
ItemPointerData t_tid
Definition itup.h:37
LockRelId lockRelId
Definition rel.h:46
Oid relId
Definition rel.h:40
Oid dbId
Definition rel.h:41
LockInfoData rd_lockInfo
Definition rel.h:114
Oid * rd_opcintype
Definition rel.h:208
int16 * rd_indoption
Definition rel.h:211
Form_pg_index rd_index
Definition rel.h:192
Oid * rd_opfamily
Definition rel.h:207
Oid * rd_indcollation
Definition rel.h:217
ShmemRequestCallback request_fn
Definition shmem.h:133
Definition c.h:776
static CompactAttribute * TupleDescCompactAttr(TupleDesc tupdesc, int i)
Definition tupdesc.h:195
const char * name
uint64 XLogRecPtr
Definition xlogdefs.h:21