PostgreSQL Source Code git master
nbtpage.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * nbtpage.c
4 * BTree-specific page management code for the Postgres btree access
5 * method.
6 *
7 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
8 * Portions Copyright (c) 1994, Regents of the University of California
9 *
10 *
11 * IDENTIFICATION
12 * src/backend/access/nbtree/nbtpage.c
13 *
14 * NOTES
15 * Postgres btree pages look like ordinary relation pages. The opaque
16 * data at high addresses includes pointers to left and right siblings
17 * and flag data describing page state. The first page in a btree, page
18 * zero, is special -- it stores meta-information describing the tree.
19 * Pages one and higher store the actual tree data.
20 *
21 *-------------------------------------------------------------------------
22 */
23#include "postgres.h"
24
25#include "access/nbtree.h"
26#include "access/nbtxlog.h"
27#include "access/tableam.h"
28#include "access/transam.h"
29#include "access/xlog.h"
30#include "access/xloginsert.h"
31#include "common/int.h"
32#include "miscadmin.h"
33#include "storage/indexfsm.h"
34#include "storage/predicate.h"
35#include "storage/procarray.h"
37#include "utils/memdebug.h"
38#include "utils/memutils.h"
39#include "utils/snapmgr.h"
40
41static BTMetaPageData *_bt_getmeta(Relation rel, Buffer metabuf);
42static void _bt_delitems_delete(Relation rel, Buffer buf,
43 TransactionId snapshotConflictHorizon,
44 bool isCatalogRel,
45 OffsetNumber *deletable, int ndeletable,
46 BTVacuumPosting *updatable, int nupdatable);
47static char *_bt_delitems_update(BTVacuumPosting *updatable, int nupdatable,
48 OffsetNumber *updatedoffsets,
49 Size *updatedbuflen, bool needswal);
50static bool _bt_mark_page_halfdead(Relation rel, Relation heaprel,
51 Buffer leafbuf, BTStack stack);
52static bool _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf,
53 BlockNumber scanblkno,
54 bool *rightsib_empty,
55 BTVacState *vstate);
56static bool _bt_lock_subtree_parent(Relation rel, Relation heaprel,
57 BlockNumber child, BTStack stack,
58 Buffer *subtreeparent, OffsetNumber *poffset,
59 BlockNumber *topparent,
60 BlockNumber *topparentrightsib);
61static void _bt_pendingfsm_add(BTVacState *vstate, BlockNumber target,
62 FullTransactionId safexid);
63
64/*
65 * _bt_initmetapage() -- Fill a page buffer with a correct metapage image
66 */
67void
68_bt_initmetapage(Page page, BlockNumber rootbknum, uint32 level,
69 bool allequalimage)
70{
71 BTMetaPageData *metad;
72 BTPageOpaque metaopaque;
73
74 _bt_pageinit(page, BLCKSZ);
75
76 metad = BTPageGetMeta(page);
77 metad->btm_magic = BTREE_MAGIC;
79 metad->btm_root = rootbknum;
80 metad->btm_level = level;
81 metad->btm_fastroot = rootbknum;
82 metad->btm_fastlevel = level;
85 metad->btm_allequalimage = allequalimage;
86
87 metaopaque = BTPageGetOpaque(page);
88 metaopaque->btpo_flags = BTP_META;
89
90 /*
91 * Set pd_lower just past the end of the metadata. This is essential,
92 * because without doing so, metadata will be lost if xlog.c compresses
93 * the page.
94 */
95 ((PageHeader) page)->pd_lower =
96 ((char *) metad + sizeof(BTMetaPageData)) - (char *) page;
97}
98
99/*
100 * _bt_upgrademetapage() -- Upgrade a meta-page from an old format to version
101 * 3, the last version that can be updated without broadly affecting
102 * on-disk compatibility. (A REINDEX is required to upgrade to v4.)
103 *
104 * This routine does purely in-memory image upgrade. Caller is
105 * responsible for locking, WAL-logging etc.
106 */
107void
109{
110 BTMetaPageData *metad;
112
113 metad = BTPageGetMeta(page);
114 metaopaque = BTPageGetOpaque(page);
115
116 /* It must be really a meta page of upgradable version */
117 Assert(metaopaque->btpo_flags & BTP_META);
120
121 /* Set version number and fill extra fields added into version 3 */
125 /* Only a REINDEX can set this field */
126 Assert(!metad->btm_allequalimage);
127 metad->btm_allequalimage = false;
128
129 /* Adjust pd_lower (see _bt_initmetapage() for details) */
130 ((PageHeader) page)->pd_lower =
131 ((char *) metad + sizeof(BTMetaPageData)) - (char *) page;
132}
133
134/*
135 * Get metadata from share-locked buffer containing metapage, while performing
136 * standard sanity checks.
137 *
138 * Callers that cache data returned here in local cache should note that an
139 * on-the-fly upgrade using _bt_upgrademetapage() can change the version field
140 * and BTREE_NOVAC_VERSION specific fields without invalidating local cache.
141 */
142static BTMetaPageData *
144{
145 Page metapg;
146 BTPageOpaque metaopaque;
147 BTMetaPageData *metad;
148
149 metapg = BufferGetPage(metabuf);
150 metaopaque = BTPageGetOpaque(metapg);
151 metad = BTPageGetMeta(metapg);
152
153 /* sanity-check the metapage */
154 if (!P_ISMETA(metaopaque) ||
155 metad->btm_magic != BTREE_MAGIC)
157 (errcode(ERRCODE_INDEX_CORRUPTED),
158 errmsg("index \"%s\" is not a btree",
160
161 if (metad->btm_version < BTREE_MIN_VERSION ||
162 metad->btm_version > BTREE_VERSION)
164 (errcode(ERRCODE_INDEX_CORRUPTED),
165 errmsg("version mismatch in index \"%s\": file version %d, "
166 "current version %d, minimal supported version %d",
169
170 return metad;
171}
172
173/*
174 * _bt_vacuum_needs_cleanup() -- Checks if index needs cleanup
175 *
176 * Called by btvacuumcleanup when btbulkdelete was never called because no
177 * index tuples needed to be deleted.
178 */
179bool
181{
182 Buffer metabuf;
183 Page metapg;
184 BTMetaPageData *metad;
185 uint32 btm_version;
186 BlockNumber prev_num_delpages;
187
188 /*
189 * Copy details from metapage to local variables quickly.
190 *
191 * Note that we deliberately avoid using cached version of metapage here.
192 */
193 metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_READ);
194 metapg = BufferGetPage(metabuf);
195 metad = BTPageGetMeta(metapg);
196 btm_version = metad->btm_version;
197
198 if (btm_version < BTREE_NOVAC_VERSION)
199 {
200 /*
201 * Metapage needs to be dynamically upgraded to store fields that are
202 * only present when btm_version >= BTREE_NOVAC_VERSION
203 */
204 _bt_relbuf(rel, metabuf);
205 return true;
206 }
207
208 prev_num_delpages = metad->btm_last_cleanup_num_delpages;
209 _bt_relbuf(rel, metabuf);
210
211 /*
212 * Trigger cleanup in rare cases where prev_num_delpages exceeds 5% of the
213 * total size of the index. We can reasonably expect (though are not
214 * guaranteed) to be able to recycle this many pages if we decide to do a
215 * btvacuumscan call during the ongoing btvacuumcleanup. For further
216 * details see the nbtree/README section on placing deleted pages in the
217 * FSM.
218 */
219 if (prev_num_delpages > 0 &&
220 prev_num_delpages > RelationGetNumberOfBlocks(rel) / 20)
221 return true;
222
223 return false;
224}
225
226/*
227 * _bt_set_cleanup_info() -- Update metapage for btvacuumcleanup.
228 *
229 * Called at the end of btvacuumcleanup, when num_delpages value has been
230 * finalized.
231 */
232void
234{
235 Buffer metabuf;
236 Page metapg;
237 BTMetaPageData *metad;
238
239 /*
240 * On-disk compatibility note: The btm_last_cleanup_num_delpages metapage
241 * field started out as a TransactionId field called btm_oldest_btpo_xact.
242 * Both "versions" are just uint32 fields. It was convenient to repurpose
243 * the field when we began to use 64-bit XIDs in deleted pages.
244 *
245 * It's possible that a pg_upgrade'd database will contain an XID value in
246 * what is now recognized as the metapage's btm_last_cleanup_num_delpages
247 * field. _bt_vacuum_needs_cleanup() may even believe that this value
248 * indicates that there are lots of pages that it needs to recycle, when
249 * in reality there are only one or two. The worst that can happen is
250 * that there will be a call to btvacuumscan a little earlier, which will
251 * set btm_last_cleanup_num_delpages to a sane value when we're called.
252 *
253 * Note also that the metapage's btm_last_cleanup_num_heap_tuples field is
254 * no longer used as of PostgreSQL 14. We set it to -1.0 on rewrite, just
255 * to be consistent.
256 */
257 metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_READ);
258 metapg = BufferGetPage(metabuf);
259 metad = BTPageGetMeta(metapg);
260
261 /* Don't miss chance to upgrade index/metapage when BTREE_MIN_VERSION */
262 if (metad->btm_version >= BTREE_NOVAC_VERSION &&
263 metad->btm_last_cleanup_num_delpages == num_delpages)
264 {
265 /* Usually means index continues to have num_delpages of 0 */
266 _bt_relbuf(rel, metabuf);
267 return;
268 }
269
270 /* trade in our read lock for a write lock */
271 _bt_unlockbuf(rel, metabuf);
272 _bt_lockbuf(rel, metabuf, BT_WRITE);
273
275
276 /* upgrade meta-page if needed */
277 if (metad->btm_version < BTREE_NOVAC_VERSION)
278 _bt_upgrademetapage(metapg);
279
280 /* update cleanup-related information */
281 metad->btm_last_cleanup_num_delpages = num_delpages;
283 MarkBufferDirty(metabuf);
284
285 /* write wal record if needed */
286 if (RelationNeedsWAL(rel))
287 {
289 XLogRecPtr recptr;
290
293
295 md.version = metad->btm_version;
296 md.root = metad->btm_root;
297 md.level = metad->btm_level;
298 md.fastroot = metad->btm_fastroot;
299 md.fastlevel = metad->btm_fastlevel;
300 md.last_cleanup_num_delpages = num_delpages;
302
303 XLogRegisterBufData(0, &md, sizeof(xl_btree_metadata));
304
305 recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_META_CLEANUP);
306
307 PageSetLSN(metapg, recptr);
308 }
309
311
312 _bt_relbuf(rel, metabuf);
313}
314
315/*
316 * _bt_getroot() -- Get the root page of the btree.
317 *
318 * Since the root page can move around the btree file, we have to read
319 * its location from the metadata page, and then read the root page
320 * itself. If no root page exists yet, we have to create one.
321 *
322 * The access type parameter (BT_READ or BT_WRITE) controls whether
323 * a new root page will be created or not. If access = BT_READ,
324 * and no root page exists, we just return InvalidBuffer. For
325 * BT_WRITE, we try to create the root page if it doesn't exist.
326 * NOTE that the returned root page will have only a read lock set
327 * on it even if access = BT_WRITE!
328 *
329 * If access = BT_WRITE, heaprel must be set; otherwise caller can just
330 * pass NULL. See _bt_allocbuf for an explanation.
331 *
332 * The returned page is not necessarily the true root --- it could be
333 * a "fast root" (a page that is alone in its level due to deletions).
334 * Also, if the root page is split while we are "in flight" to it,
335 * what we will return is the old root, which is now just the leftmost
336 * page on a probably-not-very-wide level. For most purposes this is
337 * as good as or better than the true root, so we do not bother to
338 * insist on finding the true root. We do, however, guarantee to
339 * return a live (not deleted or half-dead) page.
340 *
341 * On successful return, the root page is pinned and read-locked.
342 * The metadata page is not locked or pinned on exit.
343 */
344Buffer
346{
347 Buffer metabuf;
348 Buffer rootbuf;
349 Page rootpage;
350 BTPageOpaque rootopaque;
351 BlockNumber rootblkno;
352 uint32 rootlevel;
353 BTMetaPageData *metad;
354
355 Assert(access == BT_READ || heaprel != NULL);
356
357 /*
358 * Try to use previously-cached metapage data to find the root. This
359 * normally saves one buffer access per index search, which is a very
360 * helpful savings in bufmgr traffic and hence contention.
361 */
362 if (rel->rd_amcache != NULL)
363 {
364 metad = (BTMetaPageData *) rel->rd_amcache;
365 /* We shouldn't have cached it if any of these fail */
366 Assert(metad->btm_magic == BTREE_MAGIC);
369 Assert(!metad->btm_allequalimage ||
371 Assert(metad->btm_root != P_NONE);
372
373 rootblkno = metad->btm_fastroot;
374 Assert(rootblkno != P_NONE);
375 rootlevel = metad->btm_fastlevel;
376
377 rootbuf = _bt_getbuf(rel, rootblkno, BT_READ);
378 rootpage = BufferGetPage(rootbuf);
379 rootopaque = BTPageGetOpaque(rootpage);
380
381 /*
382 * Since the cache might be stale, we check the page more carefully
383 * here than normal. We *must* check that it's not deleted. If it's
384 * not alone on its level, then we reject too --- this may be overly
385 * paranoid but better safe than sorry. Note we don't check P_ISROOT,
386 * because that's not set in a "fast root".
387 */
388 if (!P_IGNORE(rootopaque) &&
389 rootopaque->btpo_level == rootlevel &&
390 P_LEFTMOST(rootopaque) &&
391 P_RIGHTMOST(rootopaque))
392 {
393 /* OK, accept cached page as the root */
394 return rootbuf;
395 }
396 _bt_relbuf(rel, rootbuf);
397 /* Cache is stale, throw it away */
398 if (rel->rd_amcache)
399 pfree(rel->rd_amcache);
400 rel->rd_amcache = NULL;
401 }
402
403 metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_READ);
404 metad = _bt_getmeta(rel, metabuf);
405
406 /* if no root page initialized yet, do it */
407 if (metad->btm_root == P_NONE)
408 {
409 Page metapg;
410
411 /* If access = BT_READ, caller doesn't want us to create root yet */
412 if (access == BT_READ)
413 {
414 _bt_relbuf(rel, metabuf);
415 return InvalidBuffer;
416 }
417
418 /* trade in our read lock for a write lock */
419 _bt_unlockbuf(rel, metabuf);
420 _bt_lockbuf(rel, metabuf, BT_WRITE);
421
422 /*
423 * Race condition: if someone else initialized the metadata between
424 * the time we released the read lock and acquired the write lock, we
425 * must avoid doing it again.
426 */
427 if (metad->btm_root != P_NONE)
428 {
429 /*
430 * Metadata initialized by someone else. In order to guarantee no
431 * deadlocks, we have to release the metadata page and start all
432 * over again. (Is that really true? But it's hardly worth trying
433 * to optimize this case.)
434 */
435 _bt_relbuf(rel, metabuf);
436 return _bt_getroot(rel, heaprel, access);
437 }
438
439 /*
440 * Get, initialize, write, and leave a lock of the appropriate type on
441 * the new root page. Since this is the first page in the tree, it's
442 * a leaf as well as the root.
443 */
444 rootbuf = _bt_allocbuf(rel, heaprel);
445 rootblkno = BufferGetBlockNumber(rootbuf);
446 rootpage = BufferGetPage(rootbuf);
447 rootopaque = BTPageGetOpaque(rootpage);
448 rootopaque->btpo_prev = rootopaque->btpo_next = P_NONE;
449 rootopaque->btpo_flags = (BTP_LEAF | BTP_ROOT);
450 rootopaque->btpo_level = 0;
451 rootopaque->btpo_cycleid = 0;
452 /* Get raw page pointer for metapage */
453 metapg = BufferGetPage(metabuf);
454
455 /* NO ELOG(ERROR) till meta is updated */
457
458 /* upgrade metapage if needed */
459 if (metad->btm_version < BTREE_NOVAC_VERSION)
460 _bt_upgrademetapage(metapg);
461
462 metad->btm_root = rootblkno;
463 metad->btm_level = 0;
464 metad->btm_fastroot = rootblkno;
465 metad->btm_fastlevel = 0;
468
469 MarkBufferDirty(rootbuf);
470 MarkBufferDirty(metabuf);
471
472 /* XLOG stuff */
473 if (RelationNeedsWAL(rel))
474 {
475 xl_btree_newroot xlrec;
476 XLogRecPtr recptr;
478
482
484 md.version = metad->btm_version;
485 md.root = rootblkno;
486 md.level = 0;
487 md.fastroot = rootblkno;
488 md.fastlevel = 0;
491
492 XLogRegisterBufData(2, &md, sizeof(xl_btree_metadata));
493
494 xlrec.rootblk = rootblkno;
495 xlrec.level = 0;
496
498
499 recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_NEWROOT);
500
501 PageSetLSN(rootpage, recptr);
502 PageSetLSN(metapg, recptr);
503 }
504
506
507 /*
508 * swap root write lock for read lock. There is no danger of anyone
509 * else accessing the new root page while it's unlocked, since no one
510 * else knows where it is yet.
511 */
512 _bt_unlockbuf(rel, rootbuf);
513 _bt_lockbuf(rel, rootbuf, BT_READ);
514
515 /* okay, metadata is correct, release lock on it without caching */
516 _bt_relbuf(rel, metabuf);
517 }
518 else
519 {
520 rootblkno = metad->btm_fastroot;
521 Assert(rootblkno != P_NONE);
522 rootlevel = metad->btm_fastlevel;
523
524 /*
525 * Cache the metapage data for next time
526 */
528 sizeof(BTMetaPageData));
529 memcpy(rel->rd_amcache, metad, sizeof(BTMetaPageData));
530
531 /*
532 * We are done with the metapage; arrange to release it via first
533 * _bt_relandgetbuf call
534 */
535 rootbuf = metabuf;
536
537 for (;;)
538 {
539 rootbuf = _bt_relandgetbuf(rel, rootbuf, rootblkno, BT_READ);
540 rootpage = BufferGetPage(rootbuf);
541 rootopaque = BTPageGetOpaque(rootpage);
542
543 if (!P_IGNORE(rootopaque))
544 break;
545
546 /* it's dead, Jim. step right one page */
547 if (P_RIGHTMOST(rootopaque))
548 elog(ERROR, "no live root page found in index \"%s\"",
550 rootblkno = rootopaque->btpo_next;
551 }
552
553 if (rootopaque->btpo_level != rootlevel)
554 elog(ERROR, "root page %u of index \"%s\" has level %u, expected %u",
555 rootblkno, RelationGetRelationName(rel),
556 rootopaque->btpo_level, rootlevel);
557 }
558
559 /*
560 * By here, we have a pin and read lock on the root page, and no lock set
561 * on the metadata page. Return the root page's buffer.
562 */
563 return rootbuf;
564}
565
566/*
567 * _bt_gettrueroot() -- Get the true root page of the btree.
568 *
569 * This is the same as the BT_READ case of _bt_getroot(), except
570 * we follow the true-root link not the fast-root link.
571 *
572 * By the time we acquire lock on the root page, it might have been split and
573 * not be the true root anymore. This is okay for the present uses of this
574 * routine; we only really need to be able to move up at least one tree level
575 * from whatever non-root page we were at. If we ever do need to lock the
576 * one true root page, we could loop here, re-reading the metapage on each
577 * failure. (Note that it wouldn't do to hold the lock on the metapage while
578 * moving to the root --- that'd deadlock against any concurrent root split.)
579 */
580Buffer
582{
583 Buffer metabuf;
584 Page metapg;
585 BTPageOpaque metaopaque;
586 Buffer rootbuf;
587 Page rootpage;
588 BTPageOpaque rootopaque;
589 BlockNumber rootblkno;
590 uint32 rootlevel;
591 BTMetaPageData *metad;
592
593 /*
594 * We don't try to use cached metapage data here, since (a) this path is
595 * not performance-critical, and (b) if we are here it suggests our cache
596 * is out-of-date anyway. In light of point (b), it's probably safest to
597 * actively flush any cached metapage info.
598 */
599 if (rel->rd_amcache)
600 pfree(rel->rd_amcache);
601 rel->rd_amcache = NULL;
602
603 metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_READ);
604 metapg = BufferGetPage(metabuf);
605 metaopaque = BTPageGetOpaque(metapg);
606 metad = BTPageGetMeta(metapg);
607
608 if (!P_ISMETA(metaopaque) ||
609 metad->btm_magic != BTREE_MAGIC)
611 (errcode(ERRCODE_INDEX_CORRUPTED),
612 errmsg("index \"%s\" is not a btree",
614
615 if (metad->btm_version < BTREE_MIN_VERSION ||
616 metad->btm_version > BTREE_VERSION)
618 (errcode(ERRCODE_INDEX_CORRUPTED),
619 errmsg("version mismatch in index \"%s\": file version %d, "
620 "current version %d, minimal supported version %d",
623
624 /* if no root page initialized yet, fail */
625 if (metad->btm_root == P_NONE)
626 {
627 _bt_relbuf(rel, metabuf);
628 return InvalidBuffer;
629 }
630
631 rootblkno = metad->btm_root;
632 rootlevel = metad->btm_level;
633
634 /*
635 * We are done with the metapage; arrange to release it via first
636 * _bt_relandgetbuf call
637 */
638 rootbuf = metabuf;
639
640 for (;;)
641 {
642 rootbuf = _bt_relandgetbuf(rel, rootbuf, rootblkno, BT_READ);
643 rootpage = BufferGetPage(rootbuf);
644 rootopaque = BTPageGetOpaque(rootpage);
645
646 if (!P_IGNORE(rootopaque))
647 break;
648
649 /* it's dead, Jim. step right one page */
650 if (P_RIGHTMOST(rootopaque))
651 elog(ERROR, "no live root page found in index \"%s\"",
653 rootblkno = rootopaque->btpo_next;
654 }
655
656 if (rootopaque->btpo_level != rootlevel)
657 elog(ERROR, "root page %u of index \"%s\" has level %u, expected %u",
658 rootblkno, RelationGetRelationName(rel),
659 rootopaque->btpo_level, rootlevel);
660
661 return rootbuf;
662}
663
664/*
665 * _bt_getrootheight() -- Get the height of the btree search tree.
666 *
667 * We return the level (counting from zero) of the current fast root.
668 * This represents the number of tree levels we'd have to descend through
669 * to start any btree index search.
670 *
671 * This is used by the planner for cost-estimation purposes. Since it's
672 * only an estimate, slightly-stale data is fine, hence we don't worry
673 * about updating previously cached data.
674 */
675int
677{
678 BTMetaPageData *metad;
679
680 if (rel->rd_amcache == NULL)
681 {
682 Buffer metabuf;
683
684 metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_READ);
685 metad = _bt_getmeta(rel, metabuf);
686
687 /*
688 * If there's no root page yet, _bt_getroot() doesn't expect a cache
689 * to be made, so just stop here and report the index height is zero.
690 * (XXX perhaps _bt_getroot() should be changed to allow this case.)
691 */
692 if (metad->btm_root == P_NONE)
693 {
694 _bt_relbuf(rel, metabuf);
695 return 0;
696 }
697
698 /*
699 * Cache the metapage data for next time
700 */
702 sizeof(BTMetaPageData));
703 memcpy(rel->rd_amcache, metad, sizeof(BTMetaPageData));
704 _bt_relbuf(rel, metabuf);
705 }
706
707 /* Get cached page */
708 metad = (BTMetaPageData *) rel->rd_amcache;
709 /* We shouldn't have cached it if any of these fail */
710 Assert(metad->btm_magic == BTREE_MAGIC);
713 Assert(!metad->btm_allequalimage ||
715 Assert(metad->btm_fastroot != P_NONE);
716
717 return metad->btm_fastlevel;
718}
719
720/*
721 * _bt_metaversion() -- Get version/status info from metapage.
722 *
723 * Sets caller's *heapkeyspace and *allequalimage arguments using data
724 * from the B-Tree metapage (could be locally-cached version). This
725 * information needs to be stashed in insertion scankey, so we provide a
726 * single function that fetches both at once.
727 *
728 * This is used to determine the rules that must be used to descend a
729 * btree. Version 4 indexes treat heap TID as a tiebreaker attribute.
730 * pg_upgrade'd version 3 indexes need extra steps to preserve reasonable
731 * performance when inserting a new BTScanInsert-wise duplicate tuple
732 * among many leaf pages already full of such duplicates.
733 *
734 * Also sets allequalimage field, which indicates whether or not it is
735 * safe to apply deduplication. We rely on the assumption that
736 * btm_allequalimage will be zero'ed on heapkeyspace indexes that were
737 * pg_upgrade'd from Postgres 12.
738 */
739void
740_bt_metaversion(Relation rel, bool *heapkeyspace, bool *allequalimage)
741{
742 BTMetaPageData *metad;
743
744 if (rel->rd_amcache == NULL)
745 {
746 Buffer metabuf;
747
748 metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_READ);
749 metad = _bt_getmeta(rel, metabuf);
750
751 /*
752 * If there's no root page yet, _bt_getroot() doesn't expect a cache
753 * to be made, so just stop here. (XXX perhaps _bt_getroot() should
754 * be changed to allow this case.)
755 */
756 if (metad->btm_root == P_NONE)
757 {
758 *heapkeyspace = metad->btm_version > BTREE_NOVAC_VERSION;
759 *allequalimage = metad->btm_allequalimage;
760
761 _bt_relbuf(rel, metabuf);
762 return;
763 }
764
765 /*
766 * Cache the metapage data for next time
767 *
768 * An on-the-fly version upgrade performed by _bt_upgrademetapage()
769 * can change the nbtree version for an index without invalidating any
770 * local cache. This is okay because it can only happen when moving
771 * from version 2 to version 3, both of which are !heapkeyspace
772 * versions.
773 */
775 sizeof(BTMetaPageData));
776 memcpy(rel->rd_amcache, metad, sizeof(BTMetaPageData));
777 _bt_relbuf(rel, metabuf);
778 }
779
780 /* Get cached page */
781 metad = (BTMetaPageData *) rel->rd_amcache;
782 /* We shouldn't have cached it if any of these fail */
783 Assert(metad->btm_magic == BTREE_MAGIC);
786 Assert(!metad->btm_allequalimage ||
788 Assert(metad->btm_fastroot != P_NONE);
789
790 *heapkeyspace = metad->btm_version > BTREE_NOVAC_VERSION;
791 *allequalimage = metad->btm_allequalimage;
792}
793
794/*
795 * _bt_checkpage() -- Verify that a freshly-read page looks sane.
796 */
797void
799{
800 Page page = BufferGetPage(buf);
801
802 /*
803 * ReadBuffer verifies that every newly-read page passes
804 * PageHeaderIsValid, which means it either contains a reasonably sane
805 * page header or is all-zero. We have to defend against the all-zero
806 * case, however.
807 */
808 if (PageIsNew(page))
810 (errcode(ERRCODE_INDEX_CORRUPTED),
811 errmsg("index \"%s\" contains unexpected zero page at block %u",
814 errhint("Please REINDEX it.")));
815
816 /*
817 * Additionally check that the special area looks sane.
818 */
819 if (PageGetSpecialSize(page) != MAXALIGN(sizeof(BTPageOpaqueData)))
821 (errcode(ERRCODE_INDEX_CORRUPTED),
822 errmsg("index \"%s\" contains corrupted page at block %u",
825 errhint("Please REINDEX it.")));
826}
827
828/*
829 * _bt_getbuf() -- Get an existing block in a buffer, for read or write.
830 *
831 * The general rule in nbtree is that it's never okay to access a
832 * page without holding both a buffer pin and a buffer lock on
833 * the page's buffer.
834 *
835 * When this routine returns, the appropriate lock is set on the
836 * requested buffer and its reference count has been incremented
837 * (ie, the buffer is "locked and pinned"). Also, we apply
838 * _bt_checkpage to sanity-check the page, and perform Valgrind
839 * client requests that help Valgrind detect unsafe page accesses.
840 *
841 * Note: raw LockBuffer() calls are disallowed in nbtree; all
842 * buffer lock requests need to go through wrapper functions such
843 * as _bt_lockbuf().
844 */
845Buffer
847{
848 Buffer buf;
849
851
852 /* Read an existing block of the relation */
853 buf = ReadBuffer(rel, blkno);
854 _bt_lockbuf(rel, buf, access);
855 _bt_checkpage(rel, buf);
856
857 return buf;
858}
859
860/*
861 * _bt_allocbuf() -- Allocate a new block/page.
862 *
863 * Returns a write-locked buffer containing an unallocated nbtree page.
864 *
865 * Callers are required to pass a valid heaprel. We need heaprel so that we
866 * can handle generating a snapshotConflictHorizon that makes reusing a page
867 * from the FSM safe for queries that may be running on standbys.
868 */
869Buffer
871{
872 Buffer buf;
873 BlockNumber blkno;
874 Page page;
875
876 Assert(heaprel != NULL);
877
878 /*
879 * First see if the FSM knows of any free pages.
880 *
881 * We can't trust the FSM's report unreservedly; we have to check that the
882 * page is still free. (For example, an already-free page could have been
883 * re-used between the time the last VACUUM scanned it and the time the
884 * VACUUM made its FSM updates.)
885 *
886 * In fact, it's worse than that: we can't even assume that it's safe to
887 * take a lock on the reported page. If somebody else has a lock on it,
888 * or even worse our own caller does, we could deadlock. (The own-caller
889 * scenario is actually not improbable. Consider an index on a serial or
890 * timestamp column. Nearly all splits will be at the rightmost page, so
891 * it's entirely likely that _bt_split will call us while holding a lock
892 * on the page most recently acquired from FSM. A VACUUM running
893 * concurrently with the previous split could well have placed that page
894 * back in FSM.)
895 *
896 * To get around that, we ask for only a conditional lock on the reported
897 * page. If we fail, then someone else is using the page, and we may
898 * reasonably assume it's not free. (If we happen to be wrong, the worst
899 * consequence is the page will be lost to use till the next VACUUM, which
900 * is no big problem.)
901 */
902 for (;;)
903 {
904 blkno = GetFreeIndexPage(rel);
905 if (blkno == InvalidBlockNumber)
906 break;
907 buf = ReadBuffer(rel, blkno);
908 if (_bt_conditionallockbuf(rel, buf))
909 {
910 page = BufferGetPage(buf);
911
912 /*
913 * It's possible to find an all-zeroes page in an index. For
914 * example, a backend might successfully extend the relation one
915 * page and then crash before it is able to make a WAL entry for
916 * adding the page. If we find a zeroed page then reclaim it
917 * immediately.
918 */
919 if (PageIsNew(page))
920 {
921 /* Okay to use page. Initialize and return it. */
923 return buf;
924 }
925
926 if (BTPageIsRecyclable(page, heaprel))
927 {
928 /*
929 * If we are generating WAL for Hot Standby then create a WAL
930 * record that will allow us to conflict with queries running
931 * on standby, in case they have snapshots older than safexid
932 * value
933 */
935 {
936 xl_btree_reuse_page xlrec_reuse;
937
938 /*
939 * Note that we don't register the buffer with the record,
940 * because this operation doesn't modify the page (that
941 * already happened, back when VACUUM deleted the page).
942 * This record only exists to provide a conflict point for
943 * Hot Standby. See record REDO routine comments.
944 */
945 xlrec_reuse.locator = rel->rd_locator;
946 xlrec_reuse.block = blkno;
948 xlrec_reuse.isCatalogRel =
950
953
954 XLogInsert(RM_BTREE_ID, XLOG_BTREE_REUSE_PAGE);
955 }
956
957 /* Okay to use page. Re-initialize and return it. */
959 return buf;
960 }
961 elog(DEBUG2, "FSM returned nonrecyclable page");
962 _bt_relbuf(rel, buf);
963 }
964 else
965 {
966 elog(DEBUG2, "FSM returned nonlockable page");
967 /* couldn't get lock, so just drop pin */
969 }
970 }
971
972 /*
973 * Extend the relation by one page. Need to use RBM_ZERO_AND_LOCK or we
974 * risk a race condition against btvacuumscan --- see comments therein.
975 * This forces us to repeat the valgrind request that _bt_lockbuf()
976 * otherwise would make, as we can't use _bt_lockbuf() without introducing
977 * a race.
978 */
980 if (!RelationUsesLocalBuffers(rel))
982
983 /* Initialize the new page before returning it */
984 page = BufferGetPage(buf);
985 Assert(PageIsNew(page));
987
988 return buf;
989}
990
991/*
992 * _bt_relandgetbuf() -- release a locked buffer and get another one.
993 *
994 * This is equivalent to _bt_relbuf followed by _bt_getbuf. Also, if obuf is
995 * InvalidBuffer then it reduces to just _bt_getbuf; allowing this case
996 * simplifies some callers.
997 *
998 * The original motivation for using this was to avoid two entries to the
999 * bufmgr when one would do. However, now it's mainly just a notational
1000 * convenience. The only case where it saves work over _bt_relbuf/_bt_getbuf
1001 * is when the target page is the same one already in the buffer.
1002 */
1003Buffer
1005{
1006 Buffer buf;
1007
1008 Assert(BlockNumberIsValid(blkno));
1009 if (BufferIsValid(obuf))
1010 _bt_unlockbuf(rel, obuf);
1011 buf = ReleaseAndReadBuffer(obuf, rel, blkno);
1012 _bt_lockbuf(rel, buf, access);
1013
1014 _bt_checkpage(rel, buf);
1015 return buf;
1016}
1017
1018/*
1019 * _bt_relbuf() -- release a locked buffer.
1020 *
1021 * Lock and pin (refcount) are both dropped.
1022 */
1023void
1025{
1026 _bt_unlockbuf(rel, buf);
1028}
1029
1030/*
1031 * _bt_lockbuf() -- lock a pinned buffer.
1032 *
1033 * Lock is acquired without acquiring another pin. This is like a raw
1034 * LockBuffer() call, but performs extra steps needed by Valgrind.
1035 *
1036 * Note: Caller may need to call _bt_checkpage() with buf when pin on buf
1037 * wasn't originally acquired in _bt_getbuf() or _bt_relandgetbuf().
1038 */
1039void
1041{
1042 /* LockBuffer() asserts that pin is held by this backend */
1044
1045 /*
1046 * It doesn't matter that _bt_unlockbuf() won't get called in the event of
1047 * an nbtree error (e.g. a unique violation error). That won't cause
1048 * Valgrind false positives.
1049 *
1050 * The nbtree client requests are superimposed on top of the bufmgr.c
1051 * buffer pin client requests. In the event of an nbtree error the buffer
1052 * will certainly get marked as defined when the backend once again
1053 * acquires its first pin on the buffer. (Of course, if the backend never
1054 * touches the buffer again then it doesn't matter that it remains
1055 * non-accessible to Valgrind.)
1056 *
1057 * Note: When an IndexTuple C pointer gets computed using an ItemId read
1058 * from a page while a lock was held, the C pointer becomes unsafe to
1059 * dereference forever as soon as the lock is released. Valgrind can only
1060 * detect cases where the pointer gets dereferenced with no _current_
1061 * lock/pin held, though.
1062 */
1063 if (!RelationUsesLocalBuffers(rel))
1065}
1066
1067/*
1068 * _bt_unlockbuf() -- unlock a pinned buffer.
1069 */
1070void
1072{
1073 /*
1074 * Buffer is pinned and locked, which means that it is expected to be
1075 * defined and addressable. Check that proactively.
1076 */
1078
1079 /* LockBuffer() asserts that pin is held by this backend */
1081
1082 if (!RelationUsesLocalBuffers(rel))
1084}
1085
1086/*
1087 * _bt_conditionallockbuf() -- conditionally BT_WRITE lock pinned
1088 * buffer.
1089 *
1090 * Note: Caller may need to call _bt_checkpage() with buf when pin on buf
1091 * wasn't originally acquired in _bt_getbuf() or _bt_relandgetbuf().
1092 */
1093bool
1095{
1096 /* ConditionalLockBuffer() asserts that pin is held by this backend */
1098 return false;
1099
1100 if (!RelationUsesLocalBuffers(rel))
1102
1103 return true;
1104}
1105
1106/*
1107 * _bt_upgradelockbufcleanup() -- upgrade lock to a full cleanup lock.
1108 */
1109void
1111{
1112 /*
1113 * Buffer is pinned and locked, which means that it is expected to be
1114 * defined and addressable. Check that proactively.
1115 */
1117
1118 /* LockBuffer() asserts that pin is held by this backend */
1121}
1122
1123/*
1124 * _bt_pageinit() -- Initialize a new page.
1125 *
1126 * On return, the page header is initialized; data space is empty;
1127 * special space is zeroed out.
1128 */
1129void
1131{
1132 PageInit(page, size, sizeof(BTPageOpaqueData));
1133}
1134
1135/*
1136 * Delete item(s) from a btree leaf page during VACUUM.
1137 *
1138 * This routine assumes that the caller already has a full cleanup lock on
1139 * the buffer. Also, the given deletable and updatable arrays *must* be
1140 * sorted in ascending order.
1141 *
1142 * Routine deals with deleting TIDs when some (but not all) of the heap TIDs
1143 * in an existing posting list item are to be removed. This works by
1144 * updating/overwriting an existing item with caller's new version of the item
1145 * (a version that lacks the TIDs that are to be deleted).
1146 *
1147 * We record VACUUMs and b-tree deletes differently in WAL. Deletes must
1148 * generate their own snapshotConflictHorizon directly from the tableam,
1149 * whereas VACUUMs rely on the initial VACUUM table scan performing
1150 * WAL-logging that takes care of the issue for the table's indexes
1151 * indirectly. Also, we remove the VACUUM cycle ID from pages, which b-tree
1152 * deletes don't do.
1153 */
1154void
1156 OffsetNumber *deletable, int ndeletable,
1157 BTVacuumPosting *updatable, int nupdatable)
1158{
1159 Page page = BufferGetPage(buf);
1160 BTPageOpaque opaque;
1161 bool needswal = RelationNeedsWAL(rel);
1162 char *updatedbuf = NULL;
1163 Size updatedbuflen = 0;
1164 OffsetNumber updatedoffsets[MaxIndexTuplesPerPage];
1165
1166 /* Shouldn't be called unless there's something to do */
1167 Assert(ndeletable > 0 || nupdatable > 0);
1168
1169 /* Generate new version of posting lists without deleted TIDs */
1170 if (nupdatable > 0)
1171 updatedbuf = _bt_delitems_update(updatable, nupdatable,
1172 updatedoffsets, &updatedbuflen,
1173 needswal);
1174
1175 /* No ereport(ERROR) until changes are logged */
1177
1178 /*
1179 * Handle posting tuple updates.
1180 *
1181 * Deliberately do this before handling simple deletes. If we did it the
1182 * other way around (i.e. WAL record order -- simple deletes before
1183 * updates) then we'd have to make compensating changes to the 'updatable'
1184 * array of offset numbers.
1185 *
1186 * PageIndexTupleOverwrite() won't unset each item's LP_DEAD bit when it
1187 * happens to already be set. It's important that we not interfere with
1188 * any future simple index tuple deletion operations.
1189 */
1190 for (int i = 0; i < nupdatable; i++)
1191 {
1192 OffsetNumber updatedoffset = updatedoffsets[i];
1193 IndexTuple itup;
1194 Size itemsz;
1195
1196 itup = updatable[i]->itup;
1197 itemsz = MAXALIGN(IndexTupleSize(itup));
1198 if (!PageIndexTupleOverwrite(page, updatedoffset, itup, itemsz))
1199 elog(PANIC, "failed to update partially dead item in block %u of index \"%s\"",
1201 }
1202
1203 /* Now handle simple deletes of entire tuples */
1204 if (ndeletable > 0)
1205 PageIndexMultiDelete(page, deletable, ndeletable);
1206
1207 /*
1208 * We can clear the vacuum cycle ID since this page has certainly been
1209 * processed by the current vacuum scan.
1210 */
1211 opaque = BTPageGetOpaque(page);
1212 opaque->btpo_cycleid = 0;
1213
1214 /*
1215 * Clear the BTP_HAS_GARBAGE page flag.
1216 *
1217 * This flag indicates the presence of LP_DEAD items on the page (though
1218 * not reliably). Note that we only rely on it with pg_upgrade'd
1219 * !heapkeyspace indexes. That's why clearing it here won't usually
1220 * interfere with simple index tuple deletion.
1221 */
1222 opaque->btpo_flags &= ~BTP_HAS_GARBAGE;
1223
1225
1226 /* XLOG stuff */
1227 if (needswal)
1228 {
1229 XLogRecPtr recptr;
1230 xl_btree_vacuum xlrec_vacuum;
1231
1232 xlrec_vacuum.ndeleted = ndeletable;
1233 xlrec_vacuum.nupdated = nupdatable;
1234
1237 XLogRegisterData(&xlrec_vacuum, SizeOfBtreeVacuum);
1238
1239 if (ndeletable > 0)
1240 XLogRegisterBufData(0, deletable,
1241 ndeletable * sizeof(OffsetNumber));
1242
1243 if (nupdatable > 0)
1244 {
1245 XLogRegisterBufData(0, updatedoffsets,
1246 nupdatable * sizeof(OffsetNumber));
1247 XLogRegisterBufData(0, updatedbuf, updatedbuflen);
1248 }
1249
1250 recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_VACUUM);
1251
1252 PageSetLSN(page, recptr);
1253 }
1254
1256
1257 /* can't leak memory here */
1258 if (updatedbuf != NULL)
1259 pfree(updatedbuf);
1260 /* free tuples allocated within _bt_delitems_update() */
1261 for (int i = 0; i < nupdatable; i++)
1262 pfree(updatable[i]->itup);
1263}
1264
1265/*
1266 * Delete item(s) from a btree leaf page during single-page cleanup.
1267 *
1268 * This routine assumes that the caller has pinned and write locked the
1269 * buffer. Also, the given deletable and updatable arrays *must* be sorted in
1270 * ascending order.
1271 *
1272 * Routine deals with deleting TIDs when some (but not all) of the heap TIDs
1273 * in an existing posting list item are to be removed. This works by
1274 * updating/overwriting an existing item with caller's new version of the item
1275 * (a version that lacks the TIDs that are to be deleted).
1276 *
1277 * This is nearly the same as _bt_delitems_vacuum as far as what it does to
1278 * the page, but it needs its own snapshotConflictHorizon and isCatalogRel
1279 * (from the tableam). This is used by the REDO routine to generate recovery
1280 * conflicts. The other difference is that only _bt_delitems_vacuum will
1281 * clear page's VACUUM cycle ID.
1282 */
1283static void
1285 TransactionId snapshotConflictHorizon, bool isCatalogRel,
1286 OffsetNumber *deletable, int ndeletable,
1287 BTVacuumPosting *updatable, int nupdatable)
1288{
1289 Page page = BufferGetPage(buf);
1290 BTPageOpaque opaque;
1291 bool needswal = RelationNeedsWAL(rel);
1292 char *updatedbuf = NULL;
1293 Size updatedbuflen = 0;
1294 OffsetNumber updatedoffsets[MaxIndexTuplesPerPage];
1295
1296 /* Shouldn't be called unless there's something to do */
1297 Assert(ndeletable > 0 || nupdatable > 0);
1298
1299 /* Generate new versions of posting lists without deleted TIDs */
1300 if (nupdatable > 0)
1301 updatedbuf = _bt_delitems_update(updatable, nupdatable,
1302 updatedoffsets, &updatedbuflen,
1303 needswal);
1304
1305 /* No ereport(ERROR) until changes are logged */
1307
1308 /* Handle updates and deletes just like _bt_delitems_vacuum */
1309 for (int i = 0; i < nupdatable; i++)
1310 {
1311 OffsetNumber updatedoffset = updatedoffsets[i];
1312 IndexTuple itup;
1313 Size itemsz;
1314
1315 itup = updatable[i]->itup;
1316 itemsz = MAXALIGN(IndexTupleSize(itup));
1317 if (!PageIndexTupleOverwrite(page, updatedoffset, itup, itemsz))
1318 elog(PANIC, "failed to update partially dead item in block %u of index \"%s\"",
1320 }
1321
1322 if (ndeletable > 0)
1323 PageIndexMultiDelete(page, deletable, ndeletable);
1324
1325 /*
1326 * Unlike _bt_delitems_vacuum, we *must not* clear the vacuum cycle ID at
1327 * this point. The VACUUM command alone controls vacuum cycle IDs.
1328 */
1329 opaque = BTPageGetOpaque(page);
1330
1331 /*
1332 * Clear the BTP_HAS_GARBAGE page flag.
1333 *
1334 * This flag indicates the presence of LP_DEAD items on the page (though
1335 * not reliably). Note that we only rely on it with pg_upgrade'd
1336 * !heapkeyspace indexes.
1337 */
1338 opaque->btpo_flags &= ~BTP_HAS_GARBAGE;
1339
1341
1342 /* XLOG stuff */
1343 if (needswal)
1344 {
1345 XLogRecPtr recptr;
1346 xl_btree_delete xlrec_delete;
1347
1348 xlrec_delete.snapshotConflictHorizon = snapshotConflictHorizon;
1349 xlrec_delete.ndeleted = ndeletable;
1350 xlrec_delete.nupdated = nupdatable;
1351 xlrec_delete.isCatalogRel = isCatalogRel;
1352
1355 XLogRegisterData(&xlrec_delete, SizeOfBtreeDelete);
1356
1357 if (ndeletable > 0)
1358 XLogRegisterBufData(0, deletable,
1359 ndeletable * sizeof(OffsetNumber));
1360
1361 if (nupdatable > 0)
1362 {
1363 XLogRegisterBufData(0, updatedoffsets,
1364 nupdatable * sizeof(OffsetNumber));
1365 XLogRegisterBufData(0, updatedbuf, updatedbuflen);
1366 }
1367
1368 recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_DELETE);
1369
1370 PageSetLSN(page, recptr);
1371 }
1372
1374
1375 /* can't leak memory here */
1376 if (updatedbuf != NULL)
1377 pfree(updatedbuf);
1378 /* free tuples allocated within _bt_delitems_update() */
1379 for (int i = 0; i < nupdatable; i++)
1380 pfree(updatable[i]->itup);
1381}
1382
1383/*
1384 * Set up state needed to delete TIDs from posting list tuples via "updating"
1385 * the tuple. Performs steps common to both _bt_delitems_vacuum and
1386 * _bt_delitems_delete. These steps must take place before each function's
1387 * critical section begins.
1388 *
1389 * updatable and nupdatable are inputs, though note that we will use
1390 * _bt_update_posting() to replace the original itup with a pointer to a final
1391 * version in palloc()'d memory. Caller should free the tuples when its done.
1392 *
1393 * The first nupdatable entries from updatedoffsets are set to the page offset
1394 * number for posting list tuples that caller updates. This is mostly useful
1395 * because caller may need to WAL-log the page offsets (though we always do
1396 * this for caller out of convenience).
1397 *
1398 * Returns buffer consisting of an array of xl_btree_update structs that
1399 * describe the steps we perform here for caller (though only when needswal is
1400 * true). Also sets *updatedbuflen to the final size of the buffer. This
1401 * buffer is used by caller when WAL logging is required.
1402 */
1403static char *
1404_bt_delitems_update(BTVacuumPosting *updatable, int nupdatable,
1405 OffsetNumber *updatedoffsets, Size *updatedbuflen,
1406 bool needswal)
1407{
1408 char *updatedbuf = NULL;
1409 Size buflen = 0;
1410
1411 /* Shouldn't be called unless there's something to do */
1412 Assert(nupdatable > 0);
1413
1414 for (int i = 0; i < nupdatable; i++)
1415 {
1416 BTVacuumPosting vacposting = updatable[i];
1417 Size itemsz;
1418
1419 /* Replace work area IndexTuple with updated version */
1420 _bt_update_posting(vacposting);
1421
1422 /* Keep track of size of xl_btree_update for updatedbuf in passing */
1423 itemsz = SizeOfBtreeUpdate + vacposting->ndeletedtids * sizeof(uint16);
1424 buflen += itemsz;
1425
1426 /* Build updatedoffsets buffer in passing */
1427 updatedoffsets[i] = vacposting->updatedoffset;
1428 }
1429
1430 /* XLOG stuff */
1431 if (needswal)
1432 {
1433 Size offset = 0;
1434
1435 /* Allocate, set final size for caller */
1436 updatedbuf = palloc(buflen);
1437 *updatedbuflen = buflen;
1438 for (int i = 0; i < nupdatable; i++)
1439 {
1440 BTVacuumPosting vacposting = updatable[i];
1441 Size itemsz;
1442 xl_btree_update update;
1443
1444 update.ndeletedtids = vacposting->ndeletedtids;
1445 memcpy(updatedbuf + offset, &update.ndeletedtids,
1447 offset += SizeOfBtreeUpdate;
1448
1449 itemsz = update.ndeletedtids * sizeof(uint16);
1450 memcpy(updatedbuf + offset, vacposting->deletetids, itemsz);
1451 offset += itemsz;
1452 }
1453 }
1454
1455 return updatedbuf;
1456}
1457
1458/*
1459 * Comparator used by _bt_delitems_delete_check() to restore deltids array
1460 * back to its original leaf-page-wise sort order
1461 */
1462static int
1463_bt_delitems_cmp(const void *a, const void *b)
1464{
1465 TM_IndexDelete *indexdelete1 = (TM_IndexDelete *) a;
1466 TM_IndexDelete *indexdelete2 = (TM_IndexDelete *) b;
1467
1468 Assert(indexdelete1->id != indexdelete2->id);
1469
1470 return pg_cmp_s16(indexdelete1->id, indexdelete2->id);
1471}
1472
1473/*
1474 * Try to delete item(s) from a btree leaf page during single-page cleanup.
1475 *
1476 * nbtree interface to table_index_delete_tuples(). Deletes a subset of index
1477 * tuples from caller's deltids array: those whose TIDs are found safe to
1478 * delete by the tableam (or already marked LP_DEAD in index, and so already
1479 * known to be deletable by our simple index deletion caller). We physically
1480 * delete index tuples from buf leaf page last of all (for index tuples where
1481 * that is known to be safe following our table_index_delete_tuples() call).
1482 *
1483 * Simple index deletion caller only includes TIDs from index tuples marked
1484 * LP_DEAD, as well as extra TIDs it found on the same leaf page that can be
1485 * included without increasing the total number of distinct table blocks for
1486 * the deletion operation as a whole. This approach often allows us to delete
1487 * some extra index tuples that were practically free for tableam to check in
1488 * passing (when they actually turn out to be safe to delete). It probably
1489 * only makes sense for the tableam to go ahead with these extra checks when
1490 * it is block-oriented (otherwise the checks probably won't be practically
1491 * free, which we rely on). The tableam interface requires the tableam side
1492 * to handle the problem, though, so this is okay (we as an index AM are free
1493 * to make the simplifying assumption that all tableams must be block-based).
1494 *
1495 * Bottom-up index deletion caller provides all the TIDs from the leaf page,
1496 * without expecting that tableam will check most of them. The tableam has
1497 * considerable discretion around which entries/blocks it checks. Our role in
1498 * costing the bottom-up deletion operation is strictly advisory.
1499 *
1500 * Note: Caller must have added deltids entries (i.e. entries that go in
1501 * delstate's main array) in leaf-page-wise order: page offset number order,
1502 * TID order among entries taken from the same posting list tuple (tiebreak on
1503 * TID). This order is convenient to work with here.
1504 *
1505 * Note: We also rely on the id field of each deltids element "capturing" this
1506 * original leaf-page-wise order. That is, we expect to be able to get back
1507 * to the original leaf-page-wise order just by sorting deltids on the id
1508 * field (tableam will sort deltids for its own reasons, so we'll need to put
1509 * it back in leaf-page-wise order afterwards).
1510 */
1511void
1513 TM_IndexDeleteOp *delstate)
1514{
1515 Page page = BufferGetPage(buf);
1516 TransactionId snapshotConflictHorizon;
1517 bool isCatalogRel;
1518 OffsetNumber postingidxoffnum = InvalidOffsetNumber;
1519 int ndeletable = 0,
1520 nupdatable = 0;
1523
1524 /* Use tableam interface to determine which tuples to delete first */
1525 snapshotConflictHorizon = table_index_delete_tuples(heapRel, delstate);
1526 isCatalogRel = RelationIsAccessibleInLogicalDecoding(heapRel);
1527
1528 /* Should not WAL-log snapshotConflictHorizon unless it's required */
1529 if (!XLogStandbyInfoActive())
1530 snapshotConflictHorizon = InvalidTransactionId;
1531
1532 /*
1533 * Construct a leaf-page-wise description of what _bt_delitems_delete()
1534 * needs to do to physically delete index tuples from the page.
1535 *
1536 * Must sort deltids array to restore leaf-page-wise order (original order
1537 * before call to tableam). This is the order that the loop expects.
1538 *
1539 * Note that deltids array might be a lot smaller now. It might even have
1540 * no entries at all (with bottom-up deletion caller), in which case there
1541 * is nothing left to do.
1542 */
1543 qsort(delstate->deltids, delstate->ndeltids, sizeof(TM_IndexDelete),
1545 if (delstate->ndeltids == 0)
1546 {
1547 Assert(delstate->bottomup);
1548 return;
1549 }
1550
1551 /* We definitely have to delete at least one index tuple (or one TID) */
1552 for (int i = 0; i < delstate->ndeltids; i++)
1553 {
1554 TM_IndexStatus *dstatus = delstate->status + delstate->deltids[i].id;
1555 OffsetNumber idxoffnum = dstatus->idxoffnum;
1556 ItemId itemid = PageGetItemId(page, idxoffnum);
1557 IndexTuple itup = (IndexTuple) PageGetItem(page, itemid);
1558 int nestedi,
1559 nitem;
1560 BTVacuumPosting vacposting;
1561
1562 Assert(OffsetNumberIsValid(idxoffnum));
1563
1564 if (idxoffnum == postingidxoffnum)
1565 {
1566 /*
1567 * This deltid entry is a TID from a posting list tuple that has
1568 * already been completely processed
1569 */
1572 &delstate->deltids[i].tid) < 0);
1574 &delstate->deltids[i].tid) >= 0);
1575 continue;
1576 }
1577
1578 if (!BTreeTupleIsPosting(itup))
1579 {
1580 /* Plain non-pivot tuple */
1581 Assert(ItemPointerEquals(&itup->t_tid, &delstate->deltids[i].tid));
1582 if (dstatus->knowndeletable)
1583 deletable[ndeletable++] = idxoffnum;
1584 continue;
1585 }
1586
1587 /*
1588 * itup is a posting list tuple whose lowest deltids entry (which may
1589 * or may not be for the first TID from itup) is considered here now.
1590 * We should process all of the deltids entries for the posting list
1591 * together now, though (not just the lowest). Remember to skip over
1592 * later itup-related entries during later iterations of outermost
1593 * loop.
1594 */
1595 postingidxoffnum = idxoffnum; /* Remember work in outermost loop */
1596 nestedi = i; /* Initialize for first itup deltids entry */
1597 vacposting = NULL; /* Describes final action for itup */
1598 nitem = BTreeTupleGetNPosting(itup);
1599 for (int p = 0; p < nitem; p++)
1600 {
1601 ItemPointer ptid = BTreeTupleGetPostingN(itup, p);
1602 int ptidcmp = -1;
1603
1604 /*
1605 * This nested loop reuses work across ptid TIDs taken from itup.
1606 * We take advantage of the fact that both itup's TIDs and deltids
1607 * entries (within a single itup/posting list grouping) must both
1608 * be in ascending TID order.
1609 */
1610 for (; nestedi < delstate->ndeltids; nestedi++)
1611 {
1612 TM_IndexDelete *tcdeltid = &delstate->deltids[nestedi];
1613 TM_IndexStatus *tdstatus = (delstate->status + tcdeltid->id);
1614
1615 /* Stop once we get past all itup related deltids entries */
1616 Assert(tdstatus->idxoffnum >= idxoffnum);
1617 if (tdstatus->idxoffnum != idxoffnum)
1618 break;
1619
1620 /* Skip past non-deletable itup related entries up front */
1621 if (!tdstatus->knowndeletable)
1622 continue;
1623
1624 /* Entry is first partial ptid match (or an exact match)? */
1625 ptidcmp = ItemPointerCompare(&tcdeltid->tid, ptid);
1626 if (ptidcmp >= 0)
1627 {
1628 /* Greater than or equal (partial or exact) match... */
1629 break;
1630 }
1631 }
1632
1633 /* ...exact ptid match to a deletable deltids entry? */
1634 if (ptidcmp != 0)
1635 continue;
1636
1637 /* Exact match for deletable deltids entry -- ptid gets deleted */
1638 if (vacposting == NULL)
1639 {
1640 vacposting = palloc(offsetof(BTVacuumPostingData, deletetids) +
1641 nitem * sizeof(uint16));
1642 vacposting->itup = itup;
1643 vacposting->updatedoffset = idxoffnum;
1644 vacposting->ndeletedtids = 0;
1645 }
1646 vacposting->deletetids[vacposting->ndeletedtids++] = p;
1647 }
1648
1649 /* Final decision on itup, a posting list tuple */
1650
1651 if (vacposting == NULL)
1652 {
1653 /* No TIDs to delete from itup -- do nothing */
1654 }
1655 else if (vacposting->ndeletedtids == nitem)
1656 {
1657 /* Straight delete of itup (to delete all TIDs) */
1658 deletable[ndeletable++] = idxoffnum;
1659 /* Turns out we won't need granular information */
1660 pfree(vacposting);
1661 }
1662 else
1663 {
1664 /* Delete some (but not all) TIDs from itup */
1665 Assert(vacposting->ndeletedtids > 0 &&
1666 vacposting->ndeletedtids < nitem);
1667 updatable[nupdatable++] = vacposting;
1668 }
1669 }
1670
1671 /* Physically delete tuples (or TIDs) using deletable (or updatable) */
1672 _bt_delitems_delete(rel, buf, snapshotConflictHorizon, isCatalogRel,
1673 deletable, ndeletable, updatable, nupdatable);
1674
1675 /* be tidy */
1676 for (int i = 0; i < nupdatable; i++)
1677 pfree(updatable[i]);
1678}
1679
1680/*
1681 * Check that leftsib page (the btpo_prev of target page) is not marked with
1682 * INCOMPLETE_SPLIT flag. Used during page deletion.
1683 *
1684 * Returning true indicates that page flag is set in leftsib (which is
1685 * definitely still the left sibling of target). When that happens, the
1686 * target doesn't have a downlink in parent, and the page deletion algorithm
1687 * isn't prepared to handle that. Deletion of the target page (or the whole
1688 * subtree that contains the target page) cannot take place.
1689 *
1690 * Caller should not have a lock on the target page itself, since pages on the
1691 * same level must always be locked left to right to avoid deadlocks.
1692 */
1693static bool
1695{
1696 Buffer buf;
1697 Page page;
1698 BTPageOpaque opaque;
1699 bool result;
1700
1701 /* Easy case: No left sibling */
1702 if (leftsib == P_NONE)
1703 return false;
1704
1705 buf = _bt_getbuf(rel, leftsib, BT_READ);
1706 page = BufferGetPage(buf);
1707 opaque = BTPageGetOpaque(page);
1708
1709 /*
1710 * If the left sibling was concurrently split, so that its next-pointer
1711 * doesn't point to the current page anymore, the split that created
1712 * target must be completed. Caller can reasonably expect that there will
1713 * be a downlink to the target page that it can relocate using its stack.
1714 * (We don't allow splitting an incompletely split page again until the
1715 * previous split has been completed.)
1716 */
1717 result = (opaque->btpo_next == target && P_INCOMPLETE_SPLIT(opaque));
1718 _bt_relbuf(rel, buf);
1719
1720 return result;
1721}
1722
1723/*
1724 * Check that leafrightsib page (the btpo_next of target leaf page) is not
1725 * marked with ISHALFDEAD flag. Used during page deletion.
1726 *
1727 * Returning true indicates that page flag is set in leafrightsib, so page
1728 * deletion cannot go ahead. Our caller is not prepared to deal with the case
1729 * where the parent page does not have a pivot tuples whose downlink points to
1730 * leafrightsib (due to an earlier interrupted VACUUM operation). It doesn't
1731 * seem worth going to the trouble of teaching our caller to deal with it.
1732 * The situation will be resolved after VACUUM finishes the deletion of the
1733 * half-dead page (when a future VACUUM operation reaches the target page
1734 * again).
1735 *
1736 * _bt_leftsib_splitflag() is called for both leaf pages and internal pages.
1737 * _bt_rightsib_halfdeadflag() is only called for leaf pages, though. This is
1738 * okay because of the restriction on deleting pages that are the rightmost
1739 * page of their parent (i.e. that such deletions can only take place when the
1740 * entire subtree must be deleted). The leaf level check made here will apply
1741 * to a right "cousin" leaf page rather than a simple right sibling leaf page
1742 * in cases where caller actually goes on to attempt deleting pages that are
1743 * above the leaf page. The right cousin leaf page is representative of the
1744 * left edge of the subtree to the right of the to-be-deleted subtree as a
1745 * whole, which is exactly the condition that our caller cares about.
1746 * (Besides, internal pages are never marked half-dead, so it isn't even
1747 * possible to _directly_ assess if an internal page is part of some other
1748 * to-be-deleted subtree.)
1749 */
1750static bool
1752{
1753 Buffer buf;
1754 Page page;
1755 BTPageOpaque opaque;
1756 bool result;
1757
1758 Assert(leafrightsib != P_NONE);
1759
1760 buf = _bt_getbuf(rel, leafrightsib, BT_READ);
1761 page = BufferGetPage(buf);
1762 opaque = BTPageGetOpaque(page);
1763
1764 Assert(P_ISLEAF(opaque) && !P_ISDELETED(opaque));
1765 result = P_ISHALFDEAD(opaque);
1766 _bt_relbuf(rel, buf);
1767
1768 return result;
1769}
1770
1771/*
1772 * _bt_pagedel() -- Delete a leaf page from the b-tree, if legal to do so.
1773 *
1774 * This action unlinks the leaf page from the b-tree structure, removing all
1775 * pointers leading to it --- but not touching its own left and right links.
1776 * The page cannot be physically reclaimed right away, since other processes
1777 * may currently be trying to follow links leading to the page; they have to
1778 * be allowed to use its right-link to recover. See nbtree/README.
1779 *
1780 * On entry, the target buffer must be pinned and locked (either read or write
1781 * lock is OK). The page must be an empty leaf page, which may be half-dead
1782 * already (a half-dead page should only be passed to us when an earlier
1783 * VACUUM operation was interrupted, though). Note in particular that caller
1784 * should never pass a buffer containing an existing deleted page here. The
1785 * lock and pin on caller's buffer will be dropped before we return.
1786 *
1787 * Maintains bulk delete stats for caller, which are taken from vstate. We
1788 * need to cooperate closely with caller here so that whole VACUUM operation
1789 * reliably avoids any double counting of subsidiary-to-leafbuf pages that we
1790 * delete in passing. If such pages happen to be from a block number that is
1791 * ahead of the current scanblkno position, then caller is expected to count
1792 * them directly later on. It's simpler for us to understand caller's
1793 * requirements than it would be for caller to understand when or how a
1794 * deleted page became deleted after the fact.
1795 *
1796 * NOTE: this leaks memory. Rather than trying to clean up everything
1797 * carefully, it's better to run it in a temp context that can be reset
1798 * frequently.
1799 */
1800void
1802{
1803 BlockNumber rightsib;
1804 bool rightsib_empty;
1805 Page page;
1806 BTPageOpaque opaque;
1807
1808 /*
1809 * Save original leafbuf block number from caller. Only deleted blocks
1810 * that are <= scanblkno are added to bulk delete stat's pages_deleted
1811 * count.
1812 */
1813 BlockNumber scanblkno = BufferGetBlockNumber(leafbuf);
1814
1815 /*
1816 * "stack" is a search stack leading (approximately) to the target page.
1817 * It is initially NULL, but when iterating, we keep it to avoid
1818 * duplicated search effort.
1819 *
1820 * Also, when "stack" is not NULL, we have already checked that the
1821 * current page is not the right half of an incomplete split, i.e. the
1822 * left sibling does not have its INCOMPLETE_SPLIT flag set, including
1823 * when the current target page is to the right of caller's initial page
1824 * (the scanblkno page).
1825 */
1826 BTStack stack = NULL;
1827
1828 for (;;)
1829 {
1830 page = BufferGetPage(leafbuf);
1831 opaque = BTPageGetOpaque(page);
1832
1833 /*
1834 * Internal pages are never deleted directly, only as part of deleting
1835 * the whole subtree all the way down to leaf level.
1836 *
1837 * Also check for deleted pages here. Caller never passes us a fully
1838 * deleted page. Only VACUUM can delete pages, so there can't have
1839 * been a concurrent deletion. Assume that we reached any deleted
1840 * page encountered here by following a sibling link, and that the
1841 * index is corrupt.
1842 */
1843 Assert(!P_ISDELETED(opaque));
1844 if (!P_ISLEAF(opaque) || P_ISDELETED(opaque))
1845 {
1846 /*
1847 * Pre-9.4 page deletion only marked internal pages as half-dead,
1848 * but now we only use that flag on leaf pages. The old algorithm
1849 * was never supposed to leave half-dead pages in the tree, it was
1850 * just a transient state, but it was nevertheless possible in
1851 * error scenarios. We don't know how to deal with them here. They
1852 * are harmless as far as searches are considered, but inserts
1853 * into the deleted keyspace could add out-of-order downlinks in
1854 * the upper levels. Log a notice, hopefully the admin will notice
1855 * and reindex.
1856 */
1857 if (P_ISHALFDEAD(opaque))
1858 ereport(LOG,
1859 (errcode(ERRCODE_INDEX_CORRUPTED),
1860 errmsg("index \"%s\" contains a half-dead internal page",
1862 errhint("This can be caused by an interrupted VACUUM in version 9.3 or older, before upgrade. Please REINDEX it.")));
1863
1864 if (P_ISDELETED(opaque))
1865 ereport(LOG,
1866 (errcode(ERRCODE_INDEX_CORRUPTED),
1867 errmsg_internal("found deleted block %u while following right link from block %u in index \"%s\"",
1868 BufferGetBlockNumber(leafbuf),
1869 scanblkno,
1871
1872 _bt_relbuf(rel, leafbuf);
1873 return;
1874 }
1875
1876 /*
1877 * We can never delete rightmost pages nor root pages. While at it,
1878 * check that page is empty, since it's possible that the leafbuf page
1879 * was empty a moment ago, but has since had some inserts.
1880 *
1881 * To keep the algorithm simple, we also never delete an incompletely
1882 * split page (they should be rare enough that this doesn't make any
1883 * meaningful difference to disk usage):
1884 *
1885 * The INCOMPLETE_SPLIT flag on the page tells us if the page is the
1886 * left half of an incomplete split, but ensuring that it's not the
1887 * right half is more complicated. For that, we have to check that
1888 * the left sibling doesn't have its INCOMPLETE_SPLIT flag set using
1889 * _bt_leftsib_splitflag(). On the first iteration, we temporarily
1890 * release the lock on scanblkno/leafbuf, check the left sibling, and
1891 * construct a search stack to scanblkno. On subsequent iterations,
1892 * we know we stepped right from a page that passed these tests, so
1893 * it's OK.
1894 */
1895 if (P_RIGHTMOST(opaque) || P_ISROOT(opaque) ||
1896 P_FIRSTDATAKEY(opaque) <= PageGetMaxOffsetNumber(page) ||
1897 P_INCOMPLETE_SPLIT(opaque))
1898 {
1899 /* Should never fail to delete a half-dead page */
1900 Assert(!P_ISHALFDEAD(opaque));
1901
1902 _bt_relbuf(rel, leafbuf);
1903 return;
1904 }
1905
1906 /*
1907 * First, remove downlink pointing to the page (or a parent of the
1908 * page, if we are going to delete a taller subtree), and mark the
1909 * leafbuf page half-dead
1910 */
1911 if (!P_ISHALFDEAD(opaque))
1912 {
1913 /*
1914 * We need an approximate pointer to the page's parent page. We
1915 * use a variant of the standard search mechanism to search for
1916 * the page's high key; this will give us a link to either the
1917 * current parent or someplace to its left (if there are multiple
1918 * equal high keys, which is possible with !heapkeyspace indexes).
1919 *
1920 * Also check if this is the right-half of an incomplete split
1921 * (see comment above).
1922 */
1923 if (!stack)
1924 {
1925 BTScanInsert itup_key;
1926 ItemId itemid;
1927 IndexTuple targetkey;
1928 BlockNumber leftsib,
1929 leafblkno;
1930 Buffer sleafbuf;
1931
1932 itemid = PageGetItemId(page, P_HIKEY);
1933 targetkey = CopyIndexTuple((IndexTuple) PageGetItem(page, itemid));
1934
1935 leftsib = opaque->btpo_prev;
1936 leafblkno = BufferGetBlockNumber(leafbuf);
1937
1938 /*
1939 * To avoid deadlocks, we'd better drop the leaf page lock
1940 * before going further.
1941 */
1942 _bt_unlockbuf(rel, leafbuf);
1943
1944 /*
1945 * Check that the left sibling of leafbuf (if any) is not
1946 * marked with INCOMPLETE_SPLIT flag before proceeding
1947 */
1948 Assert(leafblkno == scanblkno);
1949 if (_bt_leftsib_splitflag(rel, leftsib, leafblkno))
1950 {
1951 ReleaseBuffer(leafbuf);
1952 return;
1953 }
1954
1955 /*
1956 * We need an insertion scan key, so build one.
1957 *
1958 * _bt_search searches for the leaf page that contains any
1959 * matching non-pivot tuples, but we need it to "search" for
1960 * the high key pivot from the page that we're set to delete.
1961 * Compensate for the mismatch by having _bt_search locate the
1962 * last position < equal-to-untruncated-prefix non-pivots.
1963 */
1964 itup_key = _bt_mkscankey(rel, targetkey);
1965
1966 /* Set up a BTLessStrategyNumber-like insertion scan key */
1967 itup_key->nextkey = false;
1968 itup_key->backward = true;
1969 stack = _bt_search(rel, NULL, itup_key, &sleafbuf, BT_READ);
1970 /* won't need a second lock or pin on leafbuf */
1971 _bt_relbuf(rel, sleafbuf);
1972
1973 /*
1974 * Re-lock the leaf page, and start over to use our stack
1975 * within _bt_mark_page_halfdead. We must do it that way
1976 * because it's possible that leafbuf can no longer be
1977 * deleted. We need to recheck.
1978 *
1979 * Note: We can't simply hold on to the sleafbuf lock instead,
1980 * because it's barely possible that sleafbuf is not the same
1981 * page as leafbuf. This happens when leafbuf split after our
1982 * original lock was dropped, but before _bt_search finished
1983 * its descent. We rely on the assumption that we'll find
1984 * leafbuf isn't safe to delete anymore in this scenario.
1985 * (Page deletion can cope with the stack being to the left of
1986 * leafbuf, but not to the right of leafbuf.)
1987 */
1988 _bt_lockbuf(rel, leafbuf, BT_WRITE);
1989 continue;
1990 }
1991
1992 /*
1993 * See if it's safe to delete the leaf page, and determine how
1994 * many parent/internal pages above the leaf level will be
1995 * deleted. If it's safe then _bt_mark_page_halfdead will also
1996 * perform the first phase of deletion, which includes marking the
1997 * leafbuf page half-dead.
1998 */
1999 Assert(P_ISLEAF(opaque) && !P_IGNORE(opaque));
2000 if (!_bt_mark_page_halfdead(rel, vstate->info->heaprel, leafbuf,
2001 stack))
2002 {
2003 _bt_relbuf(rel, leafbuf);
2004 return;
2005 }
2006 }
2007 else
2008 {
2009 INJECTION_POINT("nbtree-finish-half-dead-page-vacuum", NULL);
2010 }
2011
2012 /*
2013 * Then unlink it from its siblings. Each call to
2014 * _bt_unlink_halfdead_page unlinks the topmost page from the subtree,
2015 * making it shallower. Iterate until the leafbuf page is deleted.
2016 */
2017 rightsib_empty = false;
2018 Assert(P_ISLEAF(opaque) && P_ISHALFDEAD(opaque));
2019 while (P_ISHALFDEAD(opaque))
2020 {
2021 /* Check for interrupts in _bt_unlink_halfdead_page */
2022 if (!_bt_unlink_halfdead_page(rel, leafbuf, scanblkno,
2023 &rightsib_empty, vstate))
2024 {
2025 /*
2026 * _bt_unlink_halfdead_page should never fail, since we
2027 * established that deletion is generally safe in
2028 * _bt_mark_page_halfdead -- index must be corrupt.
2029 *
2030 * Note that _bt_unlink_halfdead_page already released the
2031 * lock and pin on leafbuf for us.
2032 */
2033 Assert(false);
2034 return;
2035 }
2036 }
2037
2038 Assert(P_ISLEAF(opaque) && P_ISDELETED(opaque));
2039
2040 rightsib = opaque->btpo_next;
2041
2042 _bt_relbuf(rel, leafbuf);
2043
2044 /*
2045 * Check here, as calling loops will have locks held, preventing
2046 * interrupts from being processed.
2047 */
2049
2050 /*
2051 * The page has now been deleted. If its right sibling is completely
2052 * empty, it's possible that the reason we haven't deleted it earlier
2053 * is that it was the rightmost child of the parent. Now that we
2054 * removed the downlink for this page, the right sibling might now be
2055 * the only child of the parent, and could be removed. It would be
2056 * picked up by the next vacuum anyway, but might as well try to
2057 * remove it now, so loop back to process the right sibling.
2058 *
2059 * Note: This relies on the assumption that _bt_getstackbuf() will be
2060 * able to reuse our original descent stack with a different child
2061 * block (provided that the child block is to the right of the
2062 * original leaf page reached by _bt_search()). It will even update
2063 * the descent stack each time we loop around, avoiding repeated work.
2064 */
2065 if (!rightsib_empty)
2066 break;
2067
2068 leafbuf = _bt_getbuf(rel, rightsib, BT_WRITE);
2069 }
2070}
2071
2072/*
2073 * First stage of page deletion.
2074 *
2075 * Establish the height of the to-be-deleted subtree with leafbuf at its
2076 * lowest level, remove the downlink to the subtree, and mark leafbuf
2077 * half-dead. The final to-be-deleted subtree is usually just leafbuf itself,
2078 * but may include additional internal pages (at most one per level of the
2079 * tree below the root).
2080 *
2081 * Caller must pass a valid heaprel, since it's just about possible that our
2082 * call to _bt_lock_subtree_parent will need to allocate a new index page to
2083 * complete a page split. Every call to _bt_allocbuf needs to pass a heaprel.
2084 *
2085 * Returns 'false' if leafbuf is unsafe to delete, usually because leafbuf is
2086 * the rightmost child of its parent (and parent has more than one downlink).
2087 * Returns 'true' when the first stage of page deletion completed
2088 * successfully.
2089 */
2090static bool
2092 BTStack stack)
2093{
2094 BlockNumber leafblkno;
2095 BlockNumber leafrightsib;
2096 BlockNumber topparent;
2097 BlockNumber topparentrightsib;
2098 ItemId itemid;
2099 Page page;
2100 BTPageOpaque opaque;
2101 Buffer subtreeparent;
2102 OffsetNumber poffset;
2103 OffsetNumber nextoffset;
2104 IndexTuple itup;
2105 IndexTupleData trunctuple;
2106
2107 page = BufferGetPage(leafbuf);
2108 opaque = BTPageGetOpaque(page);
2109
2110 Assert(!P_RIGHTMOST(opaque) && !P_ISROOT(opaque) &&
2111 P_ISLEAF(opaque) && !P_IGNORE(opaque) &&
2112 P_FIRSTDATAKEY(opaque) > PageGetMaxOffsetNumber(page));
2113 Assert(heaprel != NULL);
2114
2115 /*
2116 * Save info about the leaf page.
2117 */
2118 leafblkno = BufferGetBlockNumber(leafbuf);
2119 leafrightsib = opaque->btpo_next;
2120
2121 /*
2122 * Before attempting to lock the parent page, check that the right sibling
2123 * is not in half-dead state. A half-dead right sibling would have no
2124 * downlink in the parent, which would be highly confusing later when we
2125 * delete the downlink. It would fail the "right sibling of target page
2126 * is also the next child in parent page" cross-check below.
2127 */
2128 if (_bt_rightsib_halfdeadflag(rel, leafrightsib))
2129 {
2130 elog(DEBUG1, "could not delete page %u because its right sibling %u is half-dead",
2131 leafblkno, leafrightsib);
2132 return false;
2133 }
2134
2135 /*
2136 * We cannot delete a page that is the rightmost child of its immediate
2137 * parent, unless it is the only child --- in which case the parent has to
2138 * be deleted too, and the same condition applies recursively to it. We
2139 * have to check this condition all the way up before trying to delete,
2140 * and lock the parent of the root of the to-be-deleted subtree (the
2141 * "subtree parent"). _bt_lock_subtree_parent() locks the subtree parent
2142 * for us. We remove the downlink to the "top parent" page (subtree root
2143 * page) from the subtree parent page below.
2144 *
2145 * Initialize topparent to be leafbuf page now. The final to-be-deleted
2146 * subtree is often a degenerate one page subtree consisting only of the
2147 * leafbuf page. When that happens, the leafbuf page is the final subtree
2148 * root page/top parent page.
2149 */
2150 topparent = leafblkno;
2151 topparentrightsib = leafrightsib;
2152 if (!_bt_lock_subtree_parent(rel, heaprel, leafblkno, stack,
2153 &subtreeparent, &poffset,
2154 &topparent, &topparentrightsib))
2155 return false;
2156
2157 page = BufferGetPage(subtreeparent);
2158 opaque = BTPageGetOpaque(page);
2159
2160#ifdef USE_ASSERT_CHECKING
2161
2162 /*
2163 * This is just an assertion because _bt_lock_subtree_parent should have
2164 * guaranteed tuple has the expected contents
2165 */
2166 itemid = PageGetItemId(page, poffset);
2167 itup = (IndexTuple) PageGetItem(page, itemid);
2168 Assert(BTreeTupleGetDownLink(itup) == topparent);
2169#endif
2170
2171 nextoffset = OffsetNumberNext(poffset);
2172 itemid = PageGetItemId(page, nextoffset);
2173 itup = (IndexTuple) PageGetItem(page, itemid);
2174
2175 /*
2176 * Check that the parent-page index items we're about to delete/overwrite
2177 * in subtree parent page contain what we expect. This can fail if the
2178 * index has become corrupt for some reason. When that happens we back
2179 * out of deletion of the leafbuf subtree. (This is just like the case
2180 * where _bt_lock_subtree_parent() cannot "re-find" leafbuf's downlink.)
2181 */
2182 if (BTreeTupleGetDownLink(itup) != topparentrightsib)
2183 {
2184 ereport(LOG,
2185 (errcode(ERRCODE_INDEX_CORRUPTED),
2186 errmsg_internal("right sibling %u of block %u is not next child %u of block %u in index \"%s\"",
2187 topparentrightsib, topparent,
2189 BufferGetBlockNumber(subtreeparent),
2191
2192 _bt_relbuf(rel, subtreeparent);
2193 Assert(false);
2194 return false;
2195 }
2196
2197 /*
2198 * Any insert which would have gone on the leaf block will now go to its
2199 * right sibling. In other words, the key space moves right.
2200 */
2201 PredicateLockPageCombine(rel, leafblkno, leafrightsib);
2202
2203 /* No ereport(ERROR) until changes are logged */
2205
2206 /*
2207 * Update parent of subtree. We want to delete the downlink to the top
2208 * parent page/root of the subtree, and the *following* key. Easiest way
2209 * is to copy the right sibling's downlink over the downlink that points
2210 * to top parent page, and then delete the right sibling's original pivot
2211 * tuple.
2212 *
2213 * Lanin and Shasha make the key space move left when deleting a page,
2214 * whereas the key space moves right here. That's why we cannot simply
2215 * delete the pivot tuple with the downlink to the top parent page. See
2216 * nbtree/README.
2217 */
2218 page = BufferGetPage(subtreeparent);
2219 opaque = BTPageGetOpaque(page);
2220
2221 itemid = PageGetItemId(page, poffset);
2222 itup = (IndexTuple) PageGetItem(page, itemid);
2223 BTreeTupleSetDownLink(itup, topparentrightsib);
2224
2225 nextoffset = OffsetNumberNext(poffset);
2226 PageIndexTupleDelete(page, nextoffset);
2227
2228 /*
2229 * Mark the leaf page as half-dead, and stamp it with a link to the top
2230 * parent page. When the leaf page is also the top parent page, the link
2231 * is set to InvalidBlockNumber.
2232 */
2233 page = BufferGetPage(leafbuf);
2234 opaque = BTPageGetOpaque(page);
2235 opaque->btpo_flags |= BTP_HALF_DEAD;
2236
2238 MemSet(&trunctuple, 0, sizeof(IndexTupleData));
2239 trunctuple.t_info = sizeof(IndexTupleData);
2240 if (topparent != leafblkno)
2241 BTreeTupleSetTopParent(&trunctuple, topparent);
2242 else
2244
2245 if (!PageIndexTupleOverwrite(page, P_HIKEY, &trunctuple, IndexTupleSize(&trunctuple)))
2246 elog(ERROR, "could not overwrite high key in half-dead page");
2247
2248 /* Must mark buffers dirty before XLogInsert */
2249 MarkBufferDirty(subtreeparent);
2250 MarkBufferDirty(leafbuf);
2251
2252 /* XLOG stuff */
2253 if (RelationNeedsWAL(rel))
2254 {
2256 XLogRecPtr recptr;
2257
2258 xlrec.poffset = poffset;
2259 xlrec.leafblk = leafblkno;
2260 if (topparent != leafblkno)
2261 xlrec.topparent = topparent;
2262 else
2264
2267 XLogRegisterBuffer(1, subtreeparent, REGBUF_STANDARD);
2268
2269 page = BufferGetPage(leafbuf);
2270 opaque = BTPageGetOpaque(page);
2271 xlrec.leftblk = opaque->btpo_prev;
2272 xlrec.rightblk = opaque->btpo_next;
2273
2275
2276 recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_MARK_PAGE_HALFDEAD);
2277
2278 page = BufferGetPage(subtreeparent);
2279 PageSetLSN(page, recptr);
2280 page = BufferGetPage(leafbuf);
2281 PageSetLSN(page, recptr);
2282 }
2283
2285
2286 _bt_relbuf(rel, subtreeparent);
2287 return true;
2288}
2289
2290/*
2291 * Second stage of page deletion.
2292 *
2293 * Unlinks a single page (in the subtree undergoing deletion) from its
2294 * siblings. Also marks the page deleted.
2295 *
2296 * To get rid of the whole subtree, including the leaf page itself, call here
2297 * until the leaf page is deleted. The original "top parent" established in
2298 * the first stage of deletion is deleted in the first call here, while the
2299 * leaf page is deleted in the last call here. Note that the leaf page itself
2300 * is often the initial top parent page.
2301 *
2302 * Returns 'false' if the page could not be unlinked (shouldn't happen). If
2303 * the right sibling of the current target page is empty, *rightsib_empty is
2304 * set to true, allowing caller to delete the target's right sibling page in
2305 * passing. Note that *rightsib_empty is only actually used by caller when
2306 * target page is leafbuf, following last call here for leafbuf/the subtree
2307 * containing leafbuf. (We always set *rightsib_empty for caller, just to be
2308 * consistent.)
2309 *
2310 * Must hold pin and lock on leafbuf at entry (read or write doesn't matter).
2311 * On success exit, we'll be holding pin and write lock. On failure exit,
2312 * we'll release both pin and lock before returning (we define it that way
2313 * to avoid having to reacquire a lock we already released).
2314 */
2315static bool
2317 bool *rightsib_empty, BTVacState *vstate)
2318{
2319 BlockNumber leafblkno = BufferGetBlockNumber(leafbuf);
2320 IndexBulkDeleteResult *stats = vstate->stats;
2321 BlockNumber leafleftsib;
2322 BlockNumber leafrightsib;
2323 BlockNumber target;
2324 BlockNumber leftsib;
2325 BlockNumber rightsib;
2326 Buffer lbuf = InvalidBuffer;
2327 Buffer buf;
2328 Buffer rbuf;
2329 Buffer metabuf = InvalidBuffer;
2330 Page metapg = NULL;
2331 BTMetaPageData *metad = NULL;
2332 ItemId itemid;
2333 Page page;
2334 BTPageOpaque opaque;
2335 FullTransactionId safexid;
2336 bool rightsib_is_rightmost;
2337 uint32 targetlevel;
2338 IndexTuple leafhikey;
2339 BlockNumber leaftopparent;
2340
2341 page = BufferGetPage(leafbuf);
2342 opaque = BTPageGetOpaque(page);
2343
2344 Assert(P_ISLEAF(opaque) && !P_ISDELETED(opaque) && P_ISHALFDEAD(opaque));
2345
2346 /*
2347 * Remember some information about the leaf page.
2348 */
2349 itemid = PageGetItemId(page, P_HIKEY);
2350 leafhikey = (IndexTuple) PageGetItem(page, itemid);
2351 target = BTreeTupleGetTopParent(leafhikey);
2352 leafleftsib = opaque->btpo_prev;
2353 leafrightsib = opaque->btpo_next;
2354
2355 _bt_unlockbuf(rel, leafbuf);
2356
2357 INJECTION_POINT("nbtree-leave-page-half-dead", NULL);
2358
2359 /*
2360 * Check here, as calling loops will have locks held, preventing
2361 * interrupts from being processed.
2362 */
2364
2365 /* Unlink the current top parent of the subtree */
2366 if (!BlockNumberIsValid(target))
2367 {
2368 /* Target is leaf page (or leaf page is top parent, if you prefer) */
2369 target = leafblkno;
2370
2371 buf = leafbuf;
2372 leftsib = leafleftsib;
2373 targetlevel = 0;
2374 }
2375 else
2376 {
2377 /* Target is the internal page taken from leaf's top parent link */
2378 Assert(target != leafblkno);
2379
2380 /* Fetch the block number of the target's left sibling */
2381 buf = _bt_getbuf(rel, target, BT_READ);
2382 page = BufferGetPage(buf);
2383 opaque = BTPageGetOpaque(page);
2384 leftsib = opaque->btpo_prev;
2385 targetlevel = opaque->btpo_level;
2386 Assert(targetlevel > 0);
2387
2388 /*
2389 * To avoid deadlocks, we'd better drop the target page lock before
2390 * going further.
2391 */
2392 _bt_unlockbuf(rel, buf);
2393 }
2394
2395 /*
2396 * We have to lock the pages we need to modify in the standard order:
2397 * moving right, then up. Else we will deadlock against other writers.
2398 *
2399 * So, first lock the leaf page, if it's not the target. Then find and
2400 * write-lock the current left sibling of the target page. The sibling
2401 * that was current a moment ago could have split, so we may have to move
2402 * right.
2403 */
2404 if (target != leafblkno)
2405 _bt_lockbuf(rel, leafbuf, BT_WRITE);
2406 if (leftsib != P_NONE)
2407 {
2408 lbuf = _bt_getbuf(rel, leftsib, BT_WRITE);
2409 page = BufferGetPage(lbuf);
2410 opaque = BTPageGetOpaque(page);
2411 while (P_ISDELETED(opaque) || opaque->btpo_next != target)
2412 {
2413 bool leftsibvalid = true;
2414
2415 /*
2416 * Before we follow the link from the page that was the left
2417 * sibling mere moments ago, validate its right link. This
2418 * reduces the opportunities for loop to fail to ever make any
2419 * progress in the presence of index corruption.
2420 *
2421 * Note: we rely on the assumption that there can only be one
2422 * vacuum process running at a time (against the same index).
2423 */
2424 if (P_RIGHTMOST(opaque) || P_ISDELETED(opaque) ||
2425 leftsib == opaque->btpo_next)
2426 leftsibvalid = false;
2427
2428 leftsib = opaque->btpo_next;
2429 _bt_relbuf(rel, lbuf);
2430
2431 if (!leftsibvalid)
2432 {
2433 /*
2434 * This is known to fail in the field; sibling link corruption
2435 * is relatively common. Press on with vacuuming rather than
2436 * just throwing an ERROR.
2437 */
2438 ereport(LOG,
2439 (errcode(ERRCODE_INDEX_CORRUPTED),
2440 errmsg_internal("valid left sibling for deletion target could not be located: "
2441 "left sibling %u of target %u with leafblkno %u and scanblkno %u on level %u of index \"%s\"",
2442 leftsib, target, leafblkno, scanblkno,
2443 targetlevel, RelationGetRelationName(rel))));
2444
2445 /* Must release all pins and locks on failure exit */
2447 if (target != leafblkno)
2448 _bt_relbuf(rel, leafbuf);
2449
2450 return false;
2451 }
2452
2454
2455 /* step right one page */
2456 lbuf = _bt_getbuf(rel, leftsib, BT_WRITE);
2457 page = BufferGetPage(lbuf);
2458 opaque = BTPageGetOpaque(page);
2459 }
2460 }
2461 else
2462 lbuf = InvalidBuffer;
2463
2464 /* Next write-lock the target page itself */
2465 _bt_lockbuf(rel, buf, BT_WRITE);
2466 page = BufferGetPage(buf);
2467 opaque = BTPageGetOpaque(page);
2468
2469 /*
2470 * Check page is still empty etc, else abandon deletion. This is just for
2471 * paranoia's sake; a half-dead page cannot resurrect because there can be
2472 * only one vacuum process running at a time.
2473 */
2474 if (P_RIGHTMOST(opaque) || P_ISROOT(opaque) || P_ISDELETED(opaque))
2475 elog(ERROR, "target page changed status unexpectedly in block %u of index \"%s\"",
2476 target, RelationGetRelationName(rel));
2477
2478 if (opaque->btpo_prev != leftsib)
2479 ereport(ERROR,
2480 (errcode(ERRCODE_INDEX_CORRUPTED),
2481 errmsg_internal("target page left link unexpectedly changed from %u to %u in block %u of index \"%s\"",
2482 leftsib, opaque->btpo_prev, target,
2484
2485 if (target == leafblkno)
2486 {
2487 if (P_FIRSTDATAKEY(opaque) <= PageGetMaxOffsetNumber(page) ||
2488 !P_ISLEAF(opaque) || !P_ISHALFDEAD(opaque))
2489 elog(ERROR, "target leaf page changed status unexpectedly in block %u of index \"%s\"",
2490 target, RelationGetRelationName(rel));
2491
2492 /* Leaf page is also target page: don't set leaftopparent */
2493 leaftopparent = InvalidBlockNumber;
2494 }
2495 else
2496 {
2497 IndexTuple finaldataitem;
2498
2499 if (P_FIRSTDATAKEY(opaque) != PageGetMaxOffsetNumber(page) ||
2500 P_ISLEAF(opaque))
2501 elog(ERROR, "target internal page on level %u changed status unexpectedly in block %u of index \"%s\"",
2502 targetlevel, target, RelationGetRelationName(rel));
2503
2504 /* Target is internal: set leaftopparent for next call here... */
2505 itemid = PageGetItemId(page, P_FIRSTDATAKEY(opaque));
2506 finaldataitem = (IndexTuple) PageGetItem(page, itemid);
2507 leaftopparent = BTreeTupleGetDownLink(finaldataitem);
2508 /* ...except when it would be a redundant pointer-to-self */
2509 if (leaftopparent == leafblkno)
2510 leaftopparent = InvalidBlockNumber;
2511 }
2512
2513 /* No leaftopparent for level 0 (leaf page) or level 1 target */
2514 Assert(!BlockNumberIsValid(leaftopparent) || targetlevel > 1);
2515
2516 /*
2517 * And next write-lock the (current) right sibling.
2518 */
2519 rightsib = opaque->btpo_next;
2520 rbuf = _bt_getbuf(rel, rightsib, BT_WRITE);
2521 page = BufferGetPage(rbuf);
2522 opaque = BTPageGetOpaque(page);
2523
2524 /*
2525 * Validate target's right sibling page. Its left link must point back to
2526 * the target page.
2527 */
2528 if (opaque->btpo_prev != target)
2529 {
2530 /*
2531 * This is known to fail in the field; sibling link corruption is
2532 * relatively common. Press on with vacuuming rather than just
2533 * throwing an ERROR (same approach used for left-sibling's-right-link
2534 * validation check a moment ago).
2535 */
2536 ereport(LOG,
2537 (errcode(ERRCODE_INDEX_CORRUPTED),
2538 errmsg_internal("right sibling's left-link doesn't match: "
2539 "right sibling %u of target %u with leafblkno %u "
2540 "and scanblkno %u spuriously links to non-target %u "
2541 "on level %u of index \"%s\"",
2542 rightsib, target, leafblkno,
2543 scanblkno, opaque->btpo_prev,
2544 targetlevel, RelationGetRelationName(rel))));
2545
2546 /* Must release all pins and locks on failure exit */
2547 if (BufferIsValid(lbuf))
2548 _bt_relbuf(rel, lbuf);
2549 _bt_relbuf(rel, rbuf);
2550 _bt_relbuf(rel, buf);
2551 if (target != leafblkno)
2552 _bt_relbuf(rel, leafbuf);
2553
2554 return false;
2555 }
2556
2557 rightsib_is_rightmost = P_RIGHTMOST(opaque);
2558 *rightsib_empty = (P_FIRSTDATAKEY(opaque) > PageGetMaxOffsetNumber(page));
2559
2560 /*
2561 * If we are deleting the next-to-last page on the target's level, then
2562 * the rightsib is a candidate to become the new fast root. (In theory, it
2563 * might be possible to push the fast root even further down, but the odds
2564 * of doing so are slim, and the locking considerations daunting.)
2565 *
2566 * We can safely acquire a lock on the metapage here --- see comments for
2567 * _bt_newlevel().
2568 */
2569 if (leftsib == P_NONE && rightsib_is_rightmost)
2570 {
2571 page = BufferGetPage(rbuf);
2572 opaque = BTPageGetOpaque(page);
2573 if (P_RIGHTMOST(opaque))
2574 {
2575 /* rightsib will be the only one left on the level */
2576 metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_WRITE);
2577 metapg = BufferGetPage(metabuf);
2578 metad = BTPageGetMeta(metapg);
2579
2580 /*
2581 * The expected case here is btm_fastlevel == targetlevel+1; if
2582 * the fastlevel is <= targetlevel, something is wrong, and we
2583 * choose to overwrite it to fix it.
2584 */
2585 if (metad->btm_fastlevel > targetlevel + 1)
2586 {
2587 /* no update wanted */
2588 _bt_relbuf(rel, metabuf);
2589 metabuf = InvalidBuffer;
2590 }
2591 }
2592 }
2593
2594 /*
2595 * Here we begin doing the deletion.
2596 */
2597
2598 /* No ereport(ERROR) until changes are logged */
2600
2601 /*
2602 * Update siblings' side-links. Note the target page's side-links will
2603 * continue to point to the siblings. Asserts here are just rechecking
2604 * things we already verified above.
2605 */
2606 if (BufferIsValid(lbuf))
2607 {
2608 page = BufferGetPage(lbuf);
2609 opaque = BTPageGetOpaque(page);
2610 Assert(opaque->btpo_next == target);
2611 opaque->btpo_next = rightsib;
2612 }
2613 page = BufferGetPage(rbuf);
2614 opaque = BTPageGetOpaque(page);
2615 Assert(opaque->btpo_prev == target);
2616 opaque->btpo_prev = leftsib;
2617
2618 /*
2619 * If we deleted a parent of the targeted leaf page, instead of the leaf
2620 * itself, update the leaf to point to the next remaining child in the
2621 * subtree.
2622 *
2623 * Note: We rely on the fact that a buffer pin on the leaf page has been
2624 * held since leafhikey was initialized. This is safe, though only
2625 * because the page was already half-dead at that point. The leaf page
2626 * cannot have been modified by any other backend during the period when
2627 * no lock was held.
2628 */
2629 if (target != leafblkno)
2630 BTreeTupleSetTopParent(leafhikey, leaftopparent);
2631
2632 /*
2633 * Mark the page itself deleted. It can be recycled when all current
2634 * transactions are gone. Storing GetTopTransactionId() would work, but
2635 * we're in VACUUM and would not otherwise have an XID. Having already
2636 * updated links to the target, ReadNextFullTransactionId() suffices as an
2637 * upper bound. Any scan having retained a now-stale link is advertising
2638 * in its PGPROC an xmin less than or equal to the value we read here. It
2639 * will continue to do so, holding back the xmin horizon, for the duration
2640 * of that scan.
2641 */
2642 page = BufferGetPage(buf);
2643 opaque = BTPageGetOpaque(page);
2644 Assert(P_ISHALFDEAD(opaque) || !P_ISLEAF(opaque));
2645
2646 /*
2647 * Store upper bound XID that's used to determine when deleted page is no
2648 * longer needed as a tombstone
2649 */
2650 safexid = ReadNextFullTransactionId();
2651 BTPageSetDeleted(page, safexid);
2652 opaque->btpo_cycleid = 0;
2653
2654 /* And update the metapage, if needed */
2655 if (BufferIsValid(metabuf))
2656 {
2657 /* upgrade metapage if needed */
2658 if (metad->btm_version < BTREE_NOVAC_VERSION)
2659 _bt_upgrademetapage(metapg);
2660 metad->btm_fastroot = rightsib;
2661 metad->btm_fastlevel = targetlevel;
2662 MarkBufferDirty(metabuf);
2663 }
2664
2665 /* Must mark buffers dirty before XLogInsert */
2666 MarkBufferDirty(rbuf);
2668 if (BufferIsValid(lbuf))
2669 MarkBufferDirty(lbuf);
2670 if (target != leafblkno)
2671 MarkBufferDirty(leafbuf);
2672
2673 /* XLOG stuff */
2674 if (RelationNeedsWAL(rel))
2675 {
2677 xl_btree_metadata xlmeta;
2678 uint8 xlinfo;
2679 XLogRecPtr recptr;
2680
2682
2684 if (BufferIsValid(lbuf))
2687 if (target != leafblkno)
2689
2690 /* information stored on the target/to-be-unlinked block */
2691 xlrec.leftsib = leftsib;
2692 xlrec.rightsib = rightsib;
2693 xlrec.level = targetlevel;
2694 xlrec.safexid = safexid;
2695
2696 /* information needed to recreate the leaf block (if not the target) */
2697 xlrec.leafleftsib = leafleftsib;
2698 xlrec.leafrightsib = leafrightsib;
2699 xlrec.leaftopparent = leaftopparent;
2700
2702
2703 if (BufferIsValid(metabuf))
2704 {
2706
2708 xlmeta.version = metad->btm_version;
2709 xlmeta.root = metad->btm_root;
2710 xlmeta.level = metad->btm_level;
2711 xlmeta.fastroot = metad->btm_fastroot;
2712 xlmeta.fastlevel = metad->btm_fastlevel;
2714 xlmeta.allequalimage = metad->btm_allequalimage;
2715
2716 XLogRegisterBufData(4, &xlmeta, sizeof(xl_btree_metadata));
2718 }
2719 else
2720 xlinfo = XLOG_BTREE_UNLINK_PAGE;
2721
2722 recptr = XLogInsert(RM_BTREE_ID, xlinfo);
2723
2724 if (BufferIsValid(metabuf))
2725 {
2726 PageSetLSN(metapg, recptr);
2727 }
2728 page = BufferGetPage(rbuf);
2729 PageSetLSN(page, recptr);
2730 page = BufferGetPage(buf);
2731 PageSetLSN(page, recptr);
2732 if (BufferIsValid(lbuf))
2733 {
2734 page = BufferGetPage(lbuf);
2735 PageSetLSN(page, recptr);
2736 }
2737 if (target != leafblkno)
2738 {
2739 page = BufferGetPage(leafbuf);
2740 PageSetLSN(page, recptr);
2741 }
2742 }
2743
2745
2746 /* release metapage */
2747 if (BufferIsValid(metabuf))
2748 _bt_relbuf(rel, metabuf);
2749
2750 /* release siblings */
2751 if (BufferIsValid(lbuf))
2752 _bt_relbuf(rel, lbuf);
2753 _bt_relbuf(rel, rbuf);
2754
2755 /* If the target is not leafbuf, we're done with it now -- release it */
2756 if (target != leafblkno)
2757 _bt_relbuf(rel, buf);
2758
2759 /*
2760 * Maintain pages_newly_deleted, which is simply the number of pages
2761 * deleted by the ongoing VACUUM operation.
2762 *
2763 * Maintain pages_deleted in a way that takes into account how
2764 * btvacuumpage() will count deleted pages that have yet to become
2765 * scanblkno -- only count page when it's not going to get that treatment
2766 * later on.
2767 */
2768 stats->pages_newly_deleted++;
2769 if (target <= scanblkno)
2770 stats->pages_deleted++;
2771
2772 /*
2773 * Remember information about the target page (now a newly deleted page)
2774 * in dedicated vstate space for later. The page will be considered as a
2775 * candidate to place in the FSM at the end of the current btvacuumscan()
2776 * call.
2777 */
2778 _bt_pendingfsm_add(vstate, target, safexid);
2779
2780 /* Success - hold on to lock on leafbuf (might also have been target) */
2781 return true;
2782}
2783
2784/*
2785 * Establish how tall the to-be-deleted subtree will be during the first stage
2786 * of page deletion.
2787 *
2788 * Caller's child argument is the block number of the page caller wants to
2789 * delete (this is leafbuf's block number, except when we're called
2790 * recursively). stack is a search stack leading to it. Note that we will
2791 * update the stack entry(s) to reflect current downlink positions --- this is
2792 * similar to the corresponding point in page split handling.
2793 *
2794 * If "first stage" caller cannot go ahead with deleting _any_ pages, returns
2795 * false. Returns true on success, in which case caller can use certain
2796 * details established here to perform the first stage of deletion. This
2797 * function is the last point at which page deletion may be deemed unsafe
2798 * (barring index corruption, or unexpected concurrent page deletions).
2799 *
2800 * We write lock the parent of the root of the to-be-deleted subtree for
2801 * caller on success (i.e. we leave our lock on the *subtreeparent buffer for
2802 * caller). Caller will have to remove a downlink from *subtreeparent. We
2803 * also set a *subtreeparent offset number in *poffset, to indicate the
2804 * location of the pivot tuple that contains the relevant downlink.
2805 *
2806 * The root of the to-be-deleted subtree is called the "top parent". Note
2807 * that the leafbuf page is often the final "top parent" page (you can think
2808 * of the leafbuf page as a degenerate single page subtree when that happens).
2809 * Caller should initialize *topparent to the target leafbuf page block number
2810 * (while *topparentrightsib should be set to leafbuf's right sibling block
2811 * number). We will update *topparent (and *topparentrightsib) for caller
2812 * here, though only when it turns out that caller will delete at least one
2813 * internal page (i.e. only when caller needs to store a valid link to the top
2814 * parent block in the leafbuf page using BTreeTupleSetTopParent()).
2815 */
2816static bool
2818 BTStack stack, Buffer *subtreeparent,
2819 OffsetNumber *poffset, BlockNumber *topparent,
2820 BlockNumber *topparentrightsib)
2821{
2822 BlockNumber parent,
2823 leftsibparent;
2824 OffsetNumber parentoffset,
2825 maxoff;
2826 Buffer pbuf;
2827 Page page;
2828 BTPageOpaque opaque;
2829
2830 /*
2831 * Locate the pivot tuple whose downlink points to "child". Write lock
2832 * the parent page itself.
2833 */
2834 pbuf = _bt_getstackbuf(rel, heaprel, stack, child);
2835 if (pbuf == InvalidBuffer)
2836 {
2837 /*
2838 * Failed to "re-find" a pivot tuple whose downlink matched our child
2839 * block number on the parent level -- the index must be corrupt.
2840 * Don't even try to delete the leafbuf subtree. Just report the
2841 * issue and press on with vacuuming the index.
2842 *
2843 * Note: _bt_getstackbuf() recovers from concurrent page splits that
2844 * take place on the parent level. Its approach is a near-exhaustive
2845 * linear search. This also gives it a surprisingly good chance of
2846 * recovering in the event of a buggy or inconsistent opclass. But we
2847 * don't rely on that here.
2848 */
2849 ereport(LOG,
2850 (errcode(ERRCODE_INDEX_CORRUPTED),
2851 errmsg_internal("failed to re-find parent key in index \"%s\" for deletion target page %u",
2852 RelationGetRelationName(rel), child)));
2853 Assert(false);
2854 return false;
2855 }
2856
2857 parent = stack->bts_blkno;
2858 parentoffset = stack->bts_offset;
2859
2860 page = BufferGetPage(pbuf);
2861 opaque = BTPageGetOpaque(page);
2862 maxoff = PageGetMaxOffsetNumber(page);
2863 leftsibparent = opaque->btpo_prev;
2864
2865 /*
2866 * _bt_getstackbuf() completes page splits on returned parent buffer when
2867 * required.
2868 *
2869 * In general it's a bad idea for VACUUM to use up more disk space, which
2870 * is why page deletion does not finish incomplete page splits most of the
2871 * time. We allow this limited exception because the risk is much lower,
2872 * and the potential downside of not proceeding is much higher: A single
2873 * internal page with the INCOMPLETE_SPLIT flag set might otherwise
2874 * prevent us from deleting hundreds of empty leaf pages from one level
2875 * down.
2876 */
2877 Assert(!P_INCOMPLETE_SPLIT(opaque));
2878
2879 if (parentoffset < maxoff)
2880 {
2881 /*
2882 * Child is not the rightmost child in parent, so it's safe to delete
2883 * the subtree whose root/topparent is child page
2884 */
2885 *subtreeparent = pbuf;
2886 *poffset = parentoffset;
2887 return true;
2888 }
2889
2890 /*
2891 * Child is the rightmost child of parent.
2892 *
2893 * Since it's the rightmost child of parent, deleting the child (or
2894 * deleting the subtree whose root/topparent is the child page) is only
2895 * safe when it's also possible to delete the parent.
2896 */
2897 Assert(parentoffset == maxoff);
2898 if (parentoffset != P_FIRSTDATAKEY(opaque) || P_RIGHTMOST(opaque))
2899 {
2900 /*
2901 * Child isn't parent's only child, or parent is rightmost on its
2902 * entire level. Definitely cannot delete any pages.
2903 */
2904 _bt_relbuf(rel, pbuf);
2905 return false;
2906 }
2907
2908 /*
2909 * Now make sure that the parent deletion is itself safe by examining the
2910 * child's grandparent page. Recurse, passing the parent page as the
2911 * child page (child's grandparent is the parent on the next level up). If
2912 * parent deletion is unsafe, then child deletion must also be unsafe (in
2913 * which case caller cannot delete any pages at all).
2914 */
2915 *topparent = parent;
2916 *topparentrightsib = opaque->btpo_next;
2917
2918 /*
2919 * Release lock on parent before recursing.
2920 *
2921 * It's OK to release page locks on parent before recursive call locks
2922 * grandparent. An internal page can only acquire an entry if the child
2923 * is split, but that cannot happen as long as we still hold a lock on the
2924 * leafbuf page.
2925 */
2926 _bt_relbuf(rel, pbuf);
2927
2928 /*
2929 * Before recursing, check that the left sibling of parent (if any) is not
2930 * marked with INCOMPLETE_SPLIT flag first (must do so after we drop the
2931 * parent lock).
2932 *
2933 * Note: We deliberately avoid completing incomplete splits here.
2934 */
2935 if (_bt_leftsib_splitflag(rel, leftsibparent, parent))
2936 return false;
2937
2938 /* Recurse to examine child page's grandparent page */
2939 return _bt_lock_subtree_parent(rel, heaprel, parent, stack->bts_parent,
2940 subtreeparent, poffset,
2941 topparent, topparentrightsib);
2942}
2943
2944/*
2945 * Initialize local memory state used by VACUUM for _bt_pendingfsm_finalize
2946 * optimization.
2947 *
2948 * Called at the start of a btvacuumscan(). Caller's cleanuponly argument
2949 * indicates if ongoing VACUUM has not (and will not) call btbulkdelete().
2950 *
2951 * We expect to allocate memory inside VACUUM's top-level memory context here.
2952 * The working buffer is subject to a limit based on work_mem. Our strategy
2953 * when the array can no longer grow within the bounds of that limit is to
2954 * stop saving additional newly deleted pages, while proceeding as usual with
2955 * the pages that we can fit.
2956 */
2957void
2958_bt_pendingfsm_init(Relation rel, BTVacState *vstate, bool cleanuponly)
2959{
2960 Size maxbufsize;
2961
2962 /*
2963 * Don't bother with optimization in cleanup-only case -- we don't expect
2964 * any newly deleted pages. Besides, cleanup-only calls to btvacuumscan()
2965 * can only take place because this optimization didn't work out during
2966 * the last VACUUM.
2967 */
2968 if (cleanuponly)
2969 return;
2970
2971 /*
2972 * Cap maximum size of array so that we always respect work_mem. Avoid
2973 * int overflow here.
2974 */
2975 vstate->bufsize = 256;
2976 maxbufsize = (work_mem * (Size) 1024) / sizeof(BTPendingFSM);
2977 maxbufsize = Min(maxbufsize, MaxAllocSize / sizeof(BTPendingFSM));
2978 /* BTVacState.maxbufsize has type int */
2979 maxbufsize = Min(maxbufsize, INT_MAX);
2980 /* Stay sane with small work_mem */
2981 maxbufsize = Max(maxbufsize, vstate->bufsize);
2982 vstate->maxbufsize = (int) maxbufsize;
2983
2984 /* Allocate buffer, indicate that there are currently 0 pending pages */
2985 vstate->pendingpages = palloc(sizeof(BTPendingFSM) * vstate->bufsize);
2986 vstate->npendingpages = 0;
2987}
2988
2989/*
2990 * Place any newly deleted pages (i.e. pages that _bt_pagedel() deleted during
2991 * the ongoing VACUUM operation) into the free space map -- though only when
2992 * it is actually safe to do so by now.
2993 *
2994 * Called at the end of a btvacuumscan(), just before free space map vacuuming
2995 * takes place.
2996 *
2997 * Frees memory allocated by _bt_pendingfsm_init(), if any.
2998 */
2999void
3001{
3002 IndexBulkDeleteResult *stats = vstate->stats;
3003 Relation heaprel = vstate->info->heaprel;
3004
3005 Assert(stats->pages_newly_deleted >= vstate->npendingpages);
3006 Assert(heaprel != NULL);
3007
3008 if (vstate->npendingpages == 0)
3009 {
3010 /* Just free memory when nothing to do */
3011 if (vstate->pendingpages)
3012 pfree(vstate->pendingpages);
3013
3014 return;
3015 }
3016
3017#ifdef DEBUG_BTREE_PENDING_FSM
3018
3019 /*
3020 * Debugging aid: Sleep for 5 seconds to greatly increase the chances of
3021 * placing pending pages in the FSM. Note that the optimization will
3022 * never be effective without some other backend concurrently consuming an
3023 * XID.
3024 */
3025 pg_usleep(5000000L);
3026#endif
3027
3028 /*
3029 * Recompute VACUUM XID boundaries.
3030 *
3031 * We don't actually care about the oldest non-removable XID. Computing
3032 * the oldest such XID has a useful side-effect that we rely on: it
3033 * forcibly updates the XID horizon state for this backend. This step is
3034 * essential; GlobalVisCheckRemovableFullXid() will not reliably recognize
3035 * that it is now safe to recycle newly deleted pages without this step.
3036 */
3038
3039 for (int i = 0; i < vstate->npendingpages; i++)
3040 {
3041 BlockNumber target = vstate->pendingpages[i].target;
3042 FullTransactionId safexid = vstate->pendingpages[i].safexid;
3043
3044 /*
3045 * Do the equivalent of checking BTPageIsRecyclable(), but without
3046 * accessing the page again a second time.
3047 *
3048 * Give up on finding the first non-recyclable page -- all later pages
3049 * must be non-recyclable too, since _bt_pendingfsm_add() adds pages
3050 * to the array in safexid order.
3051 */
3052 if (!GlobalVisCheckRemovableFullXid(heaprel, safexid))
3053 break;
3054
3055 RecordFreeIndexPage(rel, target);
3056 stats->pages_free++;
3057 }
3058
3059 pfree(vstate->pendingpages);
3060}
3061
3062/*
3063 * Maintain array of pages that were deleted during current btvacuumscan()
3064 * call, for use in _bt_pendingfsm_finalize()
3065 */
3066static void
3068 BlockNumber target,
3069 FullTransactionId safexid)
3070{
3071 Assert(vstate->npendingpages <= vstate->bufsize);
3072 Assert(vstate->bufsize <= vstate->maxbufsize);
3073
3074#ifdef USE_ASSERT_CHECKING
3075
3076 /*
3077 * Verify an assumption made by _bt_pendingfsm_finalize(): pages from the
3078 * array will always be in safexid order (since that is the order that we
3079 * save them in here)
3080 */
3081 if (vstate->npendingpages > 0)
3082 {
3083 FullTransactionId lastsafexid =
3084 vstate->pendingpages[vstate->npendingpages - 1].safexid;
3085
3086 Assert(FullTransactionIdFollowsOrEquals(safexid, lastsafexid));
3087 }
3088#endif
3089
3090 /*
3091 * If temp buffer reaches maxbufsize/work_mem capacity then we discard
3092 * information about this page.
3093 *
3094 * Note that this also covers the case where we opted to not use the
3095 * optimization in _bt_pendingfsm_init().
3096 */
3097 if (vstate->npendingpages == vstate->maxbufsize)
3098 return;
3099
3100 /* Consider enlarging buffer */
3101 if (vstate->npendingpages == vstate->bufsize)
3102 {
3103 int newbufsize = vstate->bufsize * 2;
3104
3105 /* Respect work_mem */
3106 if (newbufsize > vstate->maxbufsize)
3107 newbufsize = vstate->maxbufsize;
3108
3109 vstate->bufsize = newbufsize;
3110 vstate->pendingpages =
3111 repalloc(vstate->pendingpages,
3112 sizeof(BTPendingFSM) * vstate->bufsize);
3113 }
3114
3115 /* Save metadata for newly deleted page */
3116 vstate->pendingpages[vstate->npendingpages].target = target;
3117 vstate->pendingpages[vstate->npendingpages].safexid = safexid;
3118 vstate->npendingpages++;
3119}
uint32 BlockNumber
Definition: block.h:31
#define InvalidBlockNumber
Definition: block.h:33
static bool BlockNumberIsValid(BlockNumber blockNumber)
Definition: block.h:71
int Buffer
Definition: buf.h:23
#define InvalidBuffer
Definition: buf.h:25
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:4223
Buffer ReleaseAndReadBuffer(Buffer buffer, Relation relation, BlockNumber blockNum)
Definition: bufmgr.c:3008
void LockBuffer(Buffer buffer, BufferLockMode mode)
Definition: bufmgr.c:5604
Buffer ExtendBufferedRel(BufferManagerRelation bmr, ForkNumber forkNum, BufferAccessStrategy strategy, uint32 flags)
Definition: bufmgr.c:845
bool ConditionalLockBuffer(Buffer buffer)
Definition: bufmgr.c:5630
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:5366
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:2943
void LockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:5684
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:745
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:294
static Page BufferGetPage(Buffer buffer)
Definition: bufmgr.h:436
@ BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:205
static Size BufferGetPageSize(Buffer buffer)
Definition: bufmgr.h:425
@ EB_LOCK_FIRST
Definition: bufmgr.h:87
#define BMR_REL(p_rel)
Definition: bufmgr.h:114
static bool BufferIsValid(Buffer bufnum)
Definition: bufmgr.h:387
void PageIndexMultiDelete(Page page, OffsetNumber *itemnos, int nitems)
Definition: bufpage.c:1160
bool PageIndexTupleOverwrite(Page page, OffsetNumber offnum, const void *newtup, Size newsize)
Definition: bufpage.c:1404
void PageIndexTupleDelete(Page page, OffsetNumber offnum)
Definition: bufpage.c:1051
void PageInit(Page page, Size pageSize, Size specialSize)
Definition: bufpage.c:42
PageHeaderData * PageHeader
Definition: bufpage.h:173
static uint16 PageGetSpecialSize(const PageData *page)
Definition: bufpage.h:316
static void * PageGetItem(const PageData *page, const ItemIdData *itemId)
Definition: bufpage.h:353
static bool PageIsNew(const PageData *page)
Definition: bufpage.h:233
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
Definition: bufpage.h:243
static void PageSetLSN(Page page, XLogRecPtr lsn)
Definition: bufpage.h:390
PageData * Page
Definition: bufpage.h:81
static OffsetNumber PageGetMaxOffsetNumber(const PageData *page)
Definition: bufpage.h:371
#define Min(x, y)
Definition: c.h:1006
#define MAXALIGN(LEN)
Definition: c.h:813
uint8_t uint8
Definition: c.h:539
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:228
#define Max(x, y)
Definition: c.h:1000
uint16_t uint16
Definition: c.h:540
uint32_t uint32
Definition: c.h:541
#define MemSet(start, val, len)
Definition: c.h:1022
uint32 TransactionId
Definition: c.h:660
size_t Size
Definition: c.h:613
int errmsg_internal(const char *fmt,...)
Definition: elog.c:1170
int errhint(const char *fmt,...)
Definition: elog.c:1330
int errcode(int sqlerrcode)
Definition: elog.c:863
int errmsg(const char *fmt,...)
Definition: elog.c:1080
#define LOG
Definition: elog.h:31
#define DEBUG2
Definition: elog.h:29
#define PANIC
Definition: elog.h:42
#define DEBUG1
Definition: elog.h:30
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:226
#define ereport(elevel,...)
Definition: elog.h:150
#define MaxAllocSize
Definition: fe_memutils.h:22
int work_mem
Definition: globals.c:131
Assert(PointerIsAligned(start, uint64))
BlockNumber GetFreeIndexPage(Relation rel)
Definition: indexfsm.c:38
void RecordFreeIndexPage(Relation rel, BlockNumber freeBlock)
Definition: indexfsm.c:52
IndexTuple CopyIndexTuple(IndexTuple source)
Definition: indextuple.c:547
#define INJECTION_POINT(name, arg)
static int pg_cmp_s16(int16 a, int16 b)
Definition: int.h:701
int b
Definition: isn.c:74
int a
Definition: isn.c:73
int i
Definition: isn.c:77
int32 ItemPointerCompare(const ItemPointerData *arg1, const ItemPointerData *arg2)
Definition: itemptr.c:51
bool ItemPointerEquals(const ItemPointerData *pointer1, const ItemPointerData *pointer2)
Definition: itemptr.c:35
IndexTupleData * IndexTuple
Definition: itup.h:53
struct IndexTupleData IndexTupleData
static Size IndexTupleSize(const IndexTupleData *itup)
Definition: itup.h:71
#define MaxIndexTuplesPerPage
Definition: itup.h:181
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:1229
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:1610
void pfree(void *pointer)
Definition: mcxt.c:1594
void * palloc(Size size)
Definition: mcxt.c:1365
#define VALGRIND_MAKE_MEM_DEFINED(addr, size)
Definition: memdebug.h:26
#define VALGRIND_CHECK_MEM_IS_DEFINED(addr, size)
Definition: memdebug.h:23
#define VALGRIND_MAKE_MEM_NOACCESS(addr, size)
Definition: memdebug.h:27
#define START_CRIT_SECTION()
Definition: miscadmin.h:150
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:123
#define END_CRIT_SECTION()
Definition: miscadmin.h:152
void _bt_update_posting(BTVacuumPosting vacposting)
Definition: nbtdedup.c:922
Buffer _bt_getstackbuf(Relation rel, Relation heaprel, BTStack stack, BlockNumber child)
Definition: nbtinsert.c:2335
Buffer _bt_relandgetbuf(Relation rel, Buffer obuf, BlockNumber blkno, int access)
Definition: nbtpage.c:1004
static bool _bt_lock_subtree_parent(Relation rel, Relation heaprel, BlockNumber child, BTStack stack, Buffer *subtreeparent, OffsetNumber *poffset, BlockNumber *topparent, BlockNumber *topparentrightsib)
Definition: nbtpage.c:2817
void _bt_upgrademetapage(Page page)
Definition: nbtpage.c:108
void _bt_relbuf(Relation rel, Buffer buf)
Definition: nbtpage.c:1024
Buffer _bt_gettrueroot(Relation rel)
Definition: nbtpage.c:581
int _bt_getrootheight(Relation rel)
Definition: nbtpage.c:676
void _bt_pageinit(Page page, Size size)
Definition: nbtpage.c:1130
static bool _bt_rightsib_halfdeadflag(Relation rel, BlockNumber leafrightsib)
Definition: nbtpage.c:1751
void _bt_pagedel(Relation rel, Buffer leafbuf, BTVacState *vstate)
Definition: nbtpage.c:1801
Buffer _bt_allocbuf(Relation rel, Relation heaprel)
Definition: nbtpage.c:870
void _bt_delitems_vacuum(Relation rel, Buffer buf, OffsetNumber *deletable, int ndeletable, BTVacuumPosting *updatable, int nupdatable)
Definition: nbtpage.c:1155
static bool _bt_leftsib_splitflag(Relation rel, BlockNumber leftsib, BlockNumber target)
Definition: nbtpage.c:1694
void _bt_checkpage(Relation rel, Buffer buf)
Definition: nbtpage.c:798
void _bt_metaversion(Relation rel, bool *heapkeyspace, bool *allequalimage)
Definition: nbtpage.c:740
static BTMetaPageData * _bt_getmeta(Relation rel, Buffer metabuf)
Definition: nbtpage.c:143
static void _bt_delitems_delete(Relation rel, Buffer buf, TransactionId snapshotConflictHorizon, bool isCatalogRel, OffsetNumber *deletable, int ndeletable, BTVacuumPosting *updatable, int nupdatable)
Definition: nbtpage.c:1284
void _bt_set_cleanup_info(Relation rel, BlockNumber num_delpages)
Definition: nbtpage.c:233
static bool _bt_mark_page_halfdead(Relation rel, Relation heaprel, Buffer leafbuf, BTStack stack)
Definition: nbtpage.c:2091
bool _bt_conditionallockbuf(Relation rel, Buffer buf)
Definition: nbtpage.c:1094
Buffer _bt_getbuf(Relation rel, BlockNumber blkno, int access)
Definition: nbtpage.c:846
void _bt_unlockbuf(Relation rel, Buffer buf)
Definition: nbtpage.c:1071
void _bt_upgradelockbufcleanup(Relation rel, Buffer buf)
Definition: nbtpage.c:1110
void _bt_initmetapage(Page page, BlockNumber rootbknum, uint32 level, bool allequalimage)
Definition: nbtpage.c:68
void _bt_delitems_delete_check(Relation rel, Buffer buf, Relation heapRel, TM_IndexDeleteOp *delstate)
Definition: nbtpage.c:1512
bool _bt_vacuum_needs_cleanup(Relation rel)
Definition: nbtpage.c:180
static char * _bt_delitems_update(BTVacuumPosting *updatable, int nupdatable, OffsetNumber *updatedoffsets, Size *updatedbuflen, bool needswal)
Definition: nbtpage.c:1404
static int _bt_delitems_cmp(const void *a, const void *b)
Definition: nbtpage.c:1463
void _bt_pendingfsm_finalize(Relation rel, BTVacState *vstate)
Definition: nbtpage.c:3000
void _bt_lockbuf(Relation rel, Buffer buf, int access)
Definition: nbtpage.c:1040
Buffer _bt_getroot(Relation rel, Relation heaprel, int access)
Definition: nbtpage.c:345
void _bt_pendingfsm_init(Relation rel, BTVacState *vstate, bool cleanuponly)
Definition: nbtpage.c:2958
static void _bt_pendingfsm_add(BTVacState *vstate, BlockNumber target, FullTransactionId safexid)
Definition: nbtpage.c:3067
static bool _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf, BlockNumber scanblkno, bool *rightsib_empty, BTVacState *vstate)
Definition: nbtpage.c:2316
#define P_ISHALFDEAD(opaque)
Definition: nbtree.h:225
static uint16 BTreeTupleGetNPosting(IndexTuple posting)
Definition: nbtree.h:519
#define BTPageGetMeta(p)
Definition: nbtree.h:122
#define P_ISLEAF(opaque)
Definition: nbtree.h:221
static FullTransactionId BTPageGetDeleteXid(Page page)
Definition: nbtree.h:261
#define BTREE_MIN_VERSION
Definition: nbtree.h:152
#define BTP_LEAF
Definition: nbtree.h:77
#define BTP_HALF_DEAD
Definition: nbtree.h:81
#define P_HIKEY
Definition: nbtree.h:368
static void BTreeTupleSetTopParent(IndexTuple leafhikey, BlockNumber blkno)
Definition: nbtree.h:627
#define P_ISMETA(opaque)
Definition: nbtree.h:224
#define P_LEFTMOST(opaque)
Definition: nbtree.h:219
#define BTPageGetOpaque(page)
Definition: nbtree.h:74
#define P_ISDELETED(opaque)
Definition: nbtree.h:223
#define BTREE_MAGIC
Definition: nbtree.h:150
#define BTP_META
Definition: nbtree.h:80
#define BTREE_VERSION
Definition: nbtree.h:151
static BlockNumber BTreeTupleGetTopParent(IndexTuple leafhikey)
Definition: nbtree.h:621
struct BTPendingFSM BTPendingFSM
#define BTP_ROOT
Definition: nbtree.h:78
static void BTreeTupleSetDownLink(IndexTuple pivot, BlockNumber blkno)
Definition: nbtree.h:563
#define P_FIRSTDATAKEY(opaque)
Definition: nbtree.h:370
#define P_ISROOT(opaque)
Definition: nbtree.h:222
#define P_NONE
Definition: nbtree.h:213
#define P_RIGHTMOST(opaque)
Definition: nbtree.h:220
#define P_INCOMPLETE_SPLIT(opaque)
Definition: nbtree.h:228
#define BTREE_METAPAGE
Definition: nbtree.h:149
static ItemPointer BTreeTupleGetPostingN(IndexTuple posting, int n)
Definition: nbtree.h:545
#define BT_READ
Definition: nbtree.h:730
static bool BTPageIsRecyclable(Page page, Relation heaprel)
Definition: nbtree.h:292
static BlockNumber BTreeTupleGetDownLink(IndexTuple pivot)
Definition: nbtree.h:557
#define P_IGNORE(opaque)
Definition: nbtree.h:226
static ItemPointer BTreeTupleGetMaxHeapTID(IndexTuple itup)
Definition: nbtree.h:665
static bool BTreeTupleIsPosting(IndexTuple itup)
Definition: nbtree.h:493
static void BTPageSetDeleted(Page page, FullTransactionId safexid)
Definition: nbtree.h:240
#define BTREE_NOVAC_VERSION
Definition: nbtree.h:153
static ItemPointer BTreeTupleGetHeapTID(IndexTuple itup)
Definition: nbtree.h:639
#define BT_WRITE
Definition: nbtree.h:731
BTStack _bt_search(Relation rel, Relation heaprel, BTScanInsert key, Buffer *bufP, int access)
Definition: nbtsearch.c:107
BTScanInsert _bt_mkscankey(Relation rel, IndexTuple itup)
Definition: nbtutils.c:97
#define SizeOfBtreeVacuum
Definition: nbtxlog.h:234
#define XLOG_BTREE_META_CLEANUP
Definition: nbtxlog.h:41
#define SizeOfBtreeUpdate
Definition: nbtxlog.h:268
#define XLOG_BTREE_VACUUM
Definition: nbtxlog.h:39
#define SizeOfBtreeDelete
Definition: nbtxlog.h:253
#define SizeOfBtreeMarkPageHalfDead
Definition: nbtxlog.h:291
#define XLOG_BTREE_UNLINK_PAGE
Definition: nbtxlog.h:35
#define XLOG_BTREE_UNLINK_PAGE_META
Definition: nbtxlog.h:36
#define SizeOfBtreeNewroot
Definition: nbtxlog.h:347
#define XLOG_BTREE_MARK_PAGE_HALFDEAD
Definition: nbtxlog.h:38
#define XLOG_BTREE_REUSE_PAGE
Definition: nbtxlog.h:40
#define SizeOfBtreeUnlinkPage
Definition: nbtxlog.h:328
#define SizeOfBtreeReusePage
Definition: nbtxlog.h:192
#define XLOG_BTREE_NEWROOT
Definition: nbtxlog.h:37
#define XLOG_BTREE_DELETE
Definition: nbtxlog.h:34
#define InvalidOffsetNumber
Definition: off.h:26
#define OffsetNumberIsValid(offsetNumber)
Definition: off.h:39
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
uint16 OffsetNumber
Definition: off.h:24
static char * buf
Definition: pg_test_fsync.c:72
#define qsort(a, b, c, d)
Definition: port.h:500
void PredicateLockPageCombine(Relation relation, BlockNumber oldblkno, BlockNumber newblkno)
Definition: predicate.c:3229
short access
Definition: preproc-type.c:36
TransactionId GetOldestNonRemovableTransactionId(Relation rel)
Definition: procarray.c:1953
bool GlobalVisCheckRemovableFullXid(Relation rel, FullTransactionId fxid)
Definition: procarray.c:4248
#define RelationGetRelationName(relation)
Definition: rel.h:549
#define RelationIsAccessibleInLogicalDecoding(relation)
Definition: rel.h:694
#define RelationNeedsWAL(relation)
Definition: rel.h:638
#define RelationUsesLocalBuffers(relation)
Definition: rel.h:647
@ MAIN_FORKNUM
Definition: relpath.h:58
void pg_usleep(long microsec)
Definition: signal.c:53
uint32 btm_last_cleanup_num_delpages
Definition: nbtree.h:115
uint32 btm_level
Definition: nbtree.h:109
float8 btm_last_cleanup_num_heap_tuples
Definition: nbtree.h:117
BlockNumber btm_fastroot
Definition: nbtree.h:110
uint32 btm_version
Definition: nbtree.h:107
uint32 btm_magic
Definition: nbtree.h:106
BlockNumber btm_root
Definition: nbtree.h:108
bool btm_allequalimage
Definition: nbtree.h:119
uint32 btm_fastlevel
Definition: nbtree.h:111
BlockNumber btpo_next
Definition: nbtree.h:66
BlockNumber btpo_prev
Definition: nbtree.h:65
uint16 btpo_flags
Definition: nbtree.h:68
uint32 btpo_level
Definition: nbtree.h:67
BTCycleId btpo_cycleid
Definition: nbtree.h:69
FullTransactionId safexid
Definition: nbtree.h:328
BlockNumber target
Definition: nbtree.h:327
BlockNumber bts_blkno
Definition: nbtree.h:745
struct BTStackData * bts_parent
Definition: nbtree.h:747
OffsetNumber bts_offset
Definition: nbtree.h:746
IndexBulkDeleteResult * stats
Definition: nbtree.h:334
BTPendingFSM * pendingpages
Definition: nbtree.h:345
int npendingpages
Definition: nbtree.h:346
IndexVacuumInfo * info
Definition: nbtree.h:333
int bufsize
Definition: nbtree.h:343
int maxbufsize
Definition: nbtree.h:344
uint16 deletetids[FLEXIBLE_ARRAY_MEMBER]
Definition: nbtree.h:922
uint16 ndeletedtids
Definition: nbtree.h:921
IndexTuple itup
Definition: nbtree.h:917
OffsetNumber updatedoffset
Definition: nbtree.h:918
BlockNumber pages_deleted
Definition: genam.h:109
BlockNumber pages_newly_deleted
Definition: genam.h:108
BlockNumber pages_free
Definition: genam.h:110
ItemPointerData t_tid
Definition: itup.h:37
unsigned short t_info
Definition: itup.h:49
Relation heaprel
Definition: genam.h:74
void * rd_amcache
Definition: rel.h:229
MemoryContext rd_indexcxt
Definition: rel.h:204
RelFileLocator rd_locator
Definition: rel.h:57
TM_IndexStatus * status
Definition: tableam.h:254
TM_IndexDelete * deltids
Definition: tableam.h:253
ItemPointerData tid
Definition: tableam.h:212
bool knowndeletable
Definition: tableam.h:219
OffsetNumber idxoffnum
Definition: tableam.h:218
TransactionId snapshotConflictHorizon
Definition: nbtxlog.h:238
bool isCatalogRel
Definition: nbtxlog.h:241
uint16 ndeleted
Definition: nbtxlog.h:239
uint16 nupdated
Definition: nbtxlog.h:240
uint32 level
Definition: nbtxlog.h:50
uint32 version
Definition: nbtxlog.h:48
bool allequalimage
Definition: nbtxlog.h:54
BlockNumber fastroot
Definition: nbtxlog.h:51
uint32 fastlevel
Definition: nbtxlog.h:52
BlockNumber root
Definition: nbtxlog.h:49
uint32 last_cleanup_num_delpages
Definition: nbtxlog.h:53
uint32 level
Definition: nbtxlog.h:344
BlockNumber rootblk
Definition: nbtxlog.h:343
FullTransactionId snapshotConflictHorizon
Definition: nbtxlog.h:187
RelFileLocator locator
Definition: nbtxlog.h:185
BlockNumber block
Definition: nbtxlog.h:186
uint16 ndeletedtids
Definition: nbtxlog.h:263
uint16 ndeleted
Definition: nbtxlog.h:222
uint16 nupdated
Definition: nbtxlog.h:223
static TransactionId table_index_delete_tuples(Relation rel, TM_IndexDeleteOp *delstate)
Definition: tableam.h:1331
#define InvalidTransactionId
Definition: transam.h:31
#define FullTransactionIdFollowsOrEquals(a, b)
Definition: transam.h:54
FullTransactionId ReadNextFullTransactionId(void)
Definition: varsup.c:288
#define XLogStandbyInfoActive()
Definition: xlog.h:123
uint64 XLogRecPtr
Definition: xlogdefs.h:21
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:478
void XLogRegisterBufData(uint8 block_id, const void *data, uint32 len)
Definition: xloginsert.c:409
void XLogRegisterData(const void *data, uint32 len)
Definition: xloginsert.c:368
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:245
void XLogBeginInsert(void)
Definition: xloginsert.c:152
#define REGBUF_STANDARD
Definition: xloginsert.h:35
#define REGBUF_WILL_INIT
Definition: xloginsert.h:34