PostgreSQL Source Code git master
Loading...
Searching...
No Matches
brin_pageops.c File Reference
#include "postgres.h"
#include "access/brin_page.h"
#include "access/brin_pageops.h"
#include "access/brin_revmap.h"
#include "access/brin_xlog.h"
#include "access/xloginsert.h"
#include "miscadmin.h"
#include "storage/bufmgr.h"
#include "storage/freespace.h"
#include "storage/lmgr.h"
#include "utils/rel.h"
Include dependency graph for brin_pageops.c:

Go to the source code of this file.

Macros

#define BrinMaxItemSize
 

Functions

static Buffer brin_getinsertbuffer (Relation irel, Buffer oldbuf, Size itemsz, bool *extended)
 
static Size br_page_get_freespace (Page page)
 
static void brin_initialize_empty_new_buffer (Relation idxrel, Buffer buffer)
 
bool brin_doupdate (Relation idxrel, BlockNumber pagesPerRange, BrinRevmap *revmap, BlockNumber heapBlk, Buffer oldbuf, OffsetNumber oldoff, const BrinTuple *origtup, Size origsz, const BrinTuple *newtup, Size newsz, bool samepage)
 
bool brin_can_do_samepage_update (Buffer buffer, Size origsz, Size newsz)
 
OffsetNumber brin_doinsert (Relation idxrel, BlockNumber pagesPerRange, BrinRevmap *revmap, Buffer *buffer, BlockNumber heapBlk, const BrinTuple *tup, Size itemsz)
 
void brin_page_init (Page page, uint16 type)
 
void brin_metapage_init (Page page, BlockNumber pagesPerRange, uint16 version)
 
bool brin_start_evacuating_page (Relation idxRel, Buffer buf)
 
void brin_evacuate_page (Relation idxRel, BlockNumber pagesPerRange, BrinRevmap *revmap, Buffer buf)
 
void brin_page_cleanup (Relation idxrel, Buffer buf)
 

Macro Definition Documentation

◆ BrinMaxItemSize

#define BrinMaxItemSize
Value:
sizeof(ItemIdData)) + \
#define SizeOfPageHeaderData
Definition bufpage.h:216
#define MAXALIGN_DOWN(LEN)
Definition c.h:838
#define MAXALIGN(LEN)
Definition c.h:826
static int fb(int x)

Definition at line 28 of file brin_pageops.c.

58{
62 Size oldsz;
65 bool extended;
66
68
69 /* If the item is oversized, don't bother. */
71 {
74 errmsg("index row size %zu exceeds maximum %zu for index \"%s\"",
76 return false; /* keep compiler quiet */
77 }
78
79 /* make sure the revmap is long enough to contain the entry we need */
80 brinRevmapExtend(revmap, heapBlk);
81
82 if (!samepage)
83 {
84 /* need a page on which to put the item */
87 {
88 Assert(!extended);
89 return false;
90 }
91
92 /*
93 * Note: it's possible (though unlikely) that the returned newbuf is
94 * the same as oldbuf, if brin_getinsertbuffer determined that the old
95 * buffer does in fact have enough space.
96 */
97 if (newbuf == oldbuf)
98 {
99 Assert(!extended);
101 }
102 else
104 }
105 else
106 {
109 extended = false;
110 }
113
114 /*
115 * Check that the old tuple wasn't updated concurrently: it might have
116 * moved someplace else entirely, and for that matter the whole page
117 * might've become a revmap page. Note that in the first two cases
118 * checked here, the "oldlp" we just calculated is garbage; but
119 * PageGetItemId() is simple enough that it was safe to do that
120 * calculation anyway.
121 */
125 {
127
128 /*
129 * If this happens, and the new buffer was obtained by extending the
130 * relation, then we need to ensure we don't leave it uninitialized or
131 * forget about it.
132 */
134 {
135 if (extended)
138 if (extended)
140 }
141 return false;
142 }
143
146
147 /*
148 * ... or it might have been updated in place to different contents.
149 */
151 {
154 {
155 /* As above, initialize and record new page if we got one */
156 if (extended)
159 if (extended)
161 }
162 return false;
163 }
164
165 /*
166 * Great, the old tuple is intact. We can proceed with the update.
167 *
168 * If there's enough room in the old page for the new tuple, replace it.
169 *
170 * Note that there might now be enough space on the page even though the
171 * caller told us there isn't, if a concurrent update moved another tuple
172 * elsewhere or replaced a tuple with a smaller one.
173 */
174 if (((BrinPageFlags(oldpage) & BRIN_EVACUATE_PAGE) == 0) &&
176 {
179 elog(ERROR, "failed to replace BRIN tuple");
181
182 /* XLOG stuff */
184 {
188
190
193
196
198
200 }
201
203
205
207 {
208 /* As above, initialize and record new page if we got one */
209 if (extended)
212 if (extended)
214 }
215
216 return true;
217 }
218 else if (newbuf == InvalidBuffer)
219 {
220 /*
221 * Not enough space, but caller said that there was. Tell them to
222 * start over.
223 */
225 return false;
226 }
227 else
228 {
229 /*
230 * Not enough free space on the oldpage. Put the new tuple on the new
231 * page, and update the revmap.
232 */
237 Size freespace = 0;
238
240
242
243 /*
244 * We need to initialize the page if it's newly obtained. Note we
245 * will WAL-log the initialization as part of the update, so we don't
246 * need to do that here.
247 */
248 if (extended)
250
254 elog(ERROR, "failed to add BRIN tuple to new page");
257
258 /* needed to update FSM below */
259 if (extended)
260 freespace = br_page_get_freespace(newpage);
261
263 brinSetHeapBlockItemptr(revmapbuf, pagesPerRange, heapBlk, newtid);
265
266 /* XLOG stuff */
268 {
271 uint8 info;
272
273 info = XLOG_BRIN_UPDATE | (extended ? XLOG_BRIN_INIT_PAGE : 0);
274
276 xlrec.insert.heapBlk = heapBlk;
277 xlrec.insert.pagesPerRange = pagesPerRange;
278 xlrec.oldOffnum = oldoff;
279
281
282 /* new page */
284
287
288 /* revmap page */
290
291 /* old page */
293
295
299 }
300
302
306
307 if (extended)
308 {
311 }
312
313 return true;
314 }
315}
316
317/*
318 * Return whether brin_doupdate can do a samepage update.
319 */
320bool
322{
323 return
324 ((newsz <= origsz) ||
326}
327
328/*
329 * Insert an index tuple into the index relation. The revmap is updated to
330 * mark the range containing the given page as pointing to the inserted entry.
331 * A WAL record is written.
332 *
333 * The buffer, if valid, is first checked for free space to insert the new
334 * entry; if there isn't enough, a new buffer is obtained and pinned. No
335 * buffer lock must be held on entry, no buffer lock is held on exit.
336 *
337 * Return value is the offset number where the tuple was inserted.
338 */
341 BrinRevmap *revmap, Buffer *buffer, BlockNumber heapBlk,
342 const BrinTuple *tup, Size itemsz)
343{
344 Page page;
346 OffsetNumber off;
347 Size freespace = 0;
349 ItemPointerData tid;
350 bool extended;
351
352 Assert(itemsz == MAXALIGN(itemsz));
353
354 /* If the item is oversized, don't even bother. */
355 if (itemsz > BrinMaxItemSize)
356 {
359 errmsg("index row size %zu exceeds maximum %zu for index \"%s\"",
361 return InvalidOffsetNumber; /* keep compiler quiet */
362 }
363
364 /* Make sure the revmap is long enough to contain the entry we need */
365 brinRevmapExtend(revmap, heapBlk);
366
367 /*
368 * Acquire lock on buffer supplied by caller, if any. If it doesn't have
369 * enough space, unpin it to obtain a new one below.
370 */
371 if (BufferIsValid(*buffer))
372 {
373 /*
374 * It's possible that another backend (or ourselves!) extended the
375 * revmap over the page we held a pin on, so we cannot assume that
376 * it's still a regular page.
377 */
379 if (br_page_get_freespace(BufferGetPage(*buffer)) < itemsz)
380 {
381 UnlockReleaseBuffer(*buffer);
382 *buffer = InvalidBuffer;
383 }
384 }
385
386 /*
387 * If we still don't have a usable buffer, have brin_getinsertbuffer
388 * obtain one for us.
389 */
390 if (!BufferIsValid(*buffer))
391 {
392 do
393 *buffer = brin_getinsertbuffer(idxrel, InvalidBuffer, itemsz, &extended);
394 while (!BufferIsValid(*buffer));
395 }
396 else
397 extended = false;
398
399 /* Now obtain lock on revmap buffer */
401
402 page = BufferGetPage(*buffer);
403 blk = BufferGetBlockNumber(*buffer);
404
405 /* Execute the actual insertion */
407 if (extended)
409 off = PageAddItem(page, tup, itemsz, InvalidOffsetNumber, false, false);
410 if (off == InvalidOffsetNumber)
411 elog(ERROR, "failed to add BRIN tuple to new page");
412 MarkBufferDirty(*buffer);
413
414 /* needed to update FSM below */
415 if (extended)
416 freespace = br_page_get_freespace(page);
417
418 ItemPointerSet(&tid, blk, off);
419 brinSetHeapBlockItemptr(revmapbuf, pagesPerRange, heapBlk, tid);
421
422 /* XLOG stuff */
424 {
427 uint8 info;
428
429 info = XLOG_BRIN_INSERT | (extended ? XLOG_BRIN_INIT_PAGE : 0);
430 xlrec.heapBlk = heapBlk;
431 xlrec.pagesPerRange = pagesPerRange;
432 xlrec.offnum = off;
433
436
437 XLogRegisterBuffer(0, *buffer, REGBUF_STANDARD | (extended ? REGBUF_WILL_INIT : 0));
438 XLogRegisterBufData(0, tup, itemsz);
439
441
443
444 PageSetLSN(page, recptr);
446 }
447
449
450 /* Tuple is firmly on buffer; we can release our locks */
453
454 BRIN_elog((DEBUG2, "inserted tuple (%u,%u) for range starting at %u",
455 blk, off, heapBlk));
456
457 if (extended)
458 {
461 }
462
463 return off;
464}
465
466/*
467 * Initialize a page with the given type.
468 *
469 * Caller is responsible for marking it dirty, as appropriate.
470 */
471void
473{
474 PageInit(page, BLCKSZ, sizeof(BrinSpecialSpace));
475
476 BrinPageType(page) = type;
477}
478
479/*
480 * Initialize a new BRIN index's metapage.
481 */
482void
483brin_metapage_init(Page page, BlockNumber pagesPerRange, uint16 version)
484{
485 BrinMetaPageData *metadata;
486
488
489 metadata = (BrinMetaPageData *) PageGetContents(page);
490
491 metadata->brinMagic = BRIN_META_MAGIC;
492 metadata->brinVersion = version;
493 metadata->pagesPerRange = pagesPerRange;
494
495 /*
496 * Note we cheat here a little. 0 is not a valid revmap block number
497 * (because it's the metapage buffer), but doing this enables the first
498 * revmap page to be created when the index is.
499 */
500 metadata->lastRevmapPage = 0;
501
502 /*
503 * Set pd_lower just past the end of the metadata. This is essential,
504 * because without doing so, metadata will be lost if xlog.c compresses
505 * the page.
506 */
507 ((PageHeader) page)->pd_lower =
508 ((char *) metadata + sizeof(BrinMetaPageData)) - (char *) page;
509}
510
511/*
512 * Initiate page evacuation protocol.
513 *
514 * The page must be locked in exclusive mode by the caller.
515 *
516 * If the page is not yet initialized or empty, return false without doing
517 * anything; it can be used for revmap without any further changes. If it
518 * contains tuples, mark it for evacuation and return true.
519 */
520bool
522{
523 OffsetNumber off;
524 OffsetNumber maxoff;
525 Page page;
526
527 page = BufferGetPage(buf);
528
529 if (PageIsNew(page))
530 return false;
531
532 maxoff = PageGetMaxOffsetNumber(page);
533 for (off = FirstOffsetNumber; off <= maxoff; off++)
534 {
535 ItemId lp;
536
537 lp = PageGetItemId(page, off);
538 if (ItemIdIsUsed(lp))
539 {
540 /*
541 * Prevent other backends from adding more stuff to this page:
542 * BRIN_EVACUATE_PAGE informs br_page_get_freespace that this page
543 * can no longer be used to add new tuples. Note that this flag
544 * is not WAL-logged, except accidentally.
545 */
548
549 return true;
550 }
551 }
552 return false;
553}
554
555/*
556 * Move all tuples out of a page.
557 *
558 * The caller must hold lock on the page. The lock and pin are released.
559 */
560void
563{
564 OffsetNumber off;
565 OffsetNumber maxoff;
566 Page page;
568 Size btupsz = 0;
569
570 page = BufferGetPage(buf);
571
573
574 maxoff = PageGetMaxOffsetNumber(page);
575 for (off = FirstOffsetNumber; off <= maxoff; off++)
576 {
577 BrinTuple *tup;
578 Size sz;
579 ItemId lp;
580
582
583 lp = PageGetItemId(page, off);
584 if (ItemIdIsUsed(lp))
585 {
587 tup = (BrinTuple *) PageGetItem(page, lp);
589
591
592 if (!brin_doupdate(idxRel, pagesPerRange, revmap, tup->bt_blkno,
593 buf, off, tup, sz, tup, sz, false))
594 off--; /* retry */
595
597
598 /* It's possible that someone extended the revmap over this page */
599 if (!BRIN_IS_REGULAR_PAGE(page))
600 break;
601 }
602 }
603
605}
606
607/*
608 * Given a BRIN index page, initialize it if necessary, and record its
609 * current free space in the FSM.
610 *
611 * The main use for this is when, during vacuuming, an uninitialized page is
612 * found, which could be the result of relation extension followed by a crash
613 * before the page can be used.
614 *
615 * Here, we don't bother to update upper FSM pages, instead expecting that our
616 * caller (brin_vacuum_scan) will fix them at the end of the scan. Elsewhere
617 * in this file, it's generally a good idea to propagate additions of free
618 * space into the upper FSM pages immediately.
619 */
620void
622{
623 Page page = BufferGetPage(buf);
624
625 /*
626 * If a page was left uninitialized, initialize it now; also record it in
627 * FSM.
628 *
629 * Somebody else might be extending the relation concurrently. To avoid
630 * re-initializing the page before they can grab the buffer lock, we
631 * acquire the extension lock momentarily. Since they hold the extension
632 * lock from before getting the page and after its been initialized, we're
633 * sure to see their initialization.
634 */
635 if (PageIsNew(page))
636 {
639
641 if (PageIsNew(page))
642 {
645 return;
646 }
648 }
649
650 /* Nothing to be done for non-regular index pages */
653 return;
654
655 /* Measure free space and record it */
658}
659
660/*
661 * Return a pinned and exclusively locked buffer which can be used to insert an
662 * index item of size itemsz (caller must ensure not to request sizes
663 * impossible to fulfill). If oldbuf is a valid buffer, it is also locked (in
664 * an order determined to avoid deadlocks).
665 *
666 * If we find that the old page is no longer a regular index page (because
667 * of a revmap extension), the old buffer is unlocked and we return
668 * InvalidBuffer.
669 *
670 * If there's no existing page with enough free space to accommodate the new
671 * item, the relation is extended. If this happens, *extended is set to true,
672 * and it is the caller's responsibility to initialize the page (and WAL-log
673 * that fact) prior to use. The caller should also update the FSM with the
674 * page's remaining free space after the insertion.
675 *
676 * Note that the caller is not expected to update FSM unless *extended is set
677 * true. This policy means that we'll update FSM when a page is created, and
678 * when it's found to have too little space for a desired tuple insertion,
679 * but not every single time we add a tuple to the page.
680 *
681 * Note that in some corner cases it is possible for this routine to extend
682 * the relation and then not return the new page. It is this routine's
683 * responsibility to WAL-log the page initialization and to record the page in
684 * FSM if that happens, since the caller certainly can't do it.
685 */
686static Buffer
688 bool *extended)
689{
692 Page page;
693 Size freespace;
694
695 /* callers must have checked */
696 Assert(itemsz <= BrinMaxItemSize);
697
700 else
702
703 /* Choose initial target page, re-using existing target if known */
706 newblk = GetPageWithFreeSpace(irel, itemsz);
707
708 /*
709 * Loop until we find a page with sufficient free space. By the time we
710 * return to caller out of this loop, both buffers are valid and locked;
711 * if we have to restart here, neither page is locked and newblk isn't
712 * pinned (if it's even valid).
713 */
714 for (;;)
715 {
716 Buffer buf;
717 bool extensionLockHeld = false;
718
720
721 *extended = false;
722
724 {
725 /*
726 * There's not enough free space in any existing index page,
727 * according to the FSM: extend the relation to obtain a shiny new
728 * page.
729 *
730 * XXX: It's likely possible to use RBM_ZERO_AND_LOCK here,
731 * which'd avoid the need to hold the extension lock during buffer
732 * reclaim.
733 */
734 if (!RELATION_IS_LOCAL(irel))
735 {
737 extensionLockHeld = true;
738 }
739 buf = ReadBuffer(irel, P_NEW);
741 *extended = true;
742
743 BRIN_elog((DEBUG2, "brin_getinsertbuffer: extending to page %u",
745 }
746 else if (newblk == oldblk)
747 {
748 /*
749 * There's an odd corner-case here where the FSM is out-of-date,
750 * and gave us the old page.
751 */
752 buf = oldbuf;
753 }
754 else
755 {
756 buf = ReadBuffer(irel, newblk);
757 }
758
759 /*
760 * We lock the old buffer first, if it's earlier than the new one; but
761 * then we need to check that it hasn't been turned into a revmap page
762 * concurrently. If we detect that that happened, give up and tell
763 * caller to start over.
764 */
766 {
769 {
771
772 /*
773 * It is possible that the new page was obtained from
774 * extending the relation. In that case, we must be sure to
775 * record it in the FSM before leaving, because otherwise the
776 * space would be lost forever. However, we cannot let an
777 * uninitialized page get in the FSM, so we need to initialize
778 * it first.
779 */
780 if (*extended)
782
785
787
788 if (*extended)
789 {
791 /* shouldn't matter, but don't confuse caller */
792 *extended = false;
793 }
794
795 return InvalidBuffer;
796 }
797 }
798
800
803
804 page = BufferGetPage(buf);
805
806 /*
807 * We have a new buffer to insert into. Check that the new page has
808 * enough free space, and return it if it does; otherwise start over.
809 * (br_page_get_freespace also checks that the FSM didn't hand us a
810 * page that has since been repurposed for the revmap.)
811 */
812 freespace = *extended ?
814 if (freespace >= itemsz)
815 {
817
818 /*
819 * Lock the old buffer if not locked already. Note that in this
820 * case we know for sure it's a regular page: it's later than the
821 * new page we just got, which is not a revmap page, and revmap
822 * pages are always consecutive.
823 */
825 {
828 }
829
830 return buf;
831 }
832
833 /* This page is no good. */
834
835 /*
836 * If an entirely new page does not contain enough free space for the
837 * new item, then surely that item is oversized. Complain loudly; but
838 * first make sure we initialize the page and record it as free, for
839 * next time.
840 */
841 if (*extended)
842 {
844 /* since this should not happen, skip FreeSpaceMapVacuum */
845
848 errmsg("index row size %zu exceeds maximum %zu for index \"%s\"",
849 itemsz, freespace, RelationGetRelationName(irel))));
850 return InvalidBuffer; /* keep compiler quiet */
851 }
852
853 if (newblk != oldblk)
857
858 /*
859 * Update the FSM with the new, presumably smaller, freespace value
860 * for this page, then search for a new target page.
861 */
862 newblk = RecordAndGetPageWithFreeSpace(irel, newblk, freespace, itemsz);
863 }
864}
865
866/*
867 * Initialize a page as an empty regular BRIN page, WAL-log this, and record
868 * the page in FSM.
869 *
870 * There are several corner situations in which we extend the relation to
871 * obtain a new page and later find that we cannot use it immediately. When
872 * that happens, we don't want to leave the page go unrecorded in FSM, because
873 * there is no mechanism to get the space back and the index would bloat.
874 * Also, because we would not WAL-log the action that would initialize the
875 * page, the page would go uninitialized in a standby (or after recovery).
876 *
877 * While we record the page in FSM here, caller is responsible for doing FSM
878 * upper-page update if that seems appropriate.
879 */
880static void
882{
883 Page page;
884
886 "brin_initialize_empty_new_buffer: initializing blank page %u",
887 BufferGetBlockNumber(buffer)));
888
890 page = BufferGetPage(buffer);
892 MarkBufferDirty(buffer);
893
894 /* XLOG stuff */
896 log_newpage_buffer(buffer, true);
897
899
900 /*
901 * We update the FSM for this page, but this is not WAL-logged. This is
902 * acceptable because VACUUM will scan the index and update the FSM with
903 * pages whose FSM records were forgotten in a crash.
904 */
907}
908
909
910/*
911 * Return the amount of free space on a regular BRIN index page.
912 *
913 * If the page is not a regular page, or has been marked with the
914 * BRIN_EVACUATE_PAGE flag, returns 0.
915 */
916static Size
918{
919 if (!BRIN_IS_REGULAR_PAGE(page) ||
920 (BrinPageFlags(page) & BRIN_EVACUATE_PAGE) != 0)
921 return 0;
922 else
923 return PageGetFreeSpace(page);
924}
uint32 BlockNumber
Definition block.h:31
#define InvalidBlockNumber
Definition block.h:33
#define BRIN_elog(args)
#define BRIN_IS_META_PAGE(page)
Definition brin_page.h:55
#define BrinPageFlags(page)
Definition brin_page.h:46
#define BRIN_META_MAGIC
Definition brin_page.h:73
#define BRIN_EVACUATE_PAGE
Definition brin_page.h:60
#define BRIN_PAGETYPE_REGULAR
Definition brin_page.h:53
#define BRIN_PAGETYPE_META
Definition brin_page.h:51
#define BRIN_IS_REVMAP_PAGE(page)
Definition brin_page.h:56
#define BrinPageType(page)
Definition brin_page.h:42
#define BRIN_IS_REGULAR_PAGE(page)
Definition brin_page.h:57
void brin_evacuate_page(Relation idxRel, BlockNumber pagesPerRange, BrinRevmap *revmap, Buffer buf)
bool brin_start_evacuating_page(Relation idxRel, Buffer buf)
bool brin_doupdate(Relation idxrel, BlockNumber pagesPerRange, BrinRevmap *revmap, BlockNumber heapBlk, Buffer oldbuf, OffsetNumber oldoff, const BrinTuple *origtup, Size origsz, const BrinTuple *newtup, Size newsz, bool samepage)
static void brin_initialize_empty_new_buffer(Relation idxrel, Buffer buffer)
void brin_page_cleanup(Relation idxrel, Buffer buf)
OffsetNumber brin_doinsert(Relation idxrel, BlockNumber pagesPerRange, BrinRevmap *revmap, Buffer *buffer, BlockNumber heapBlk, const BrinTuple *tup, Size itemsz)
#define BrinMaxItemSize
void brin_page_init(Page page, uint16 type)
static Size br_page_get_freespace(Page page)
static Buffer brin_getinsertbuffer(Relation irel, Buffer oldbuf, Size itemsz, bool *extended)
void brin_metapage_init(Page page, BlockNumber pagesPerRange, uint16 version)
bool brin_can_do_samepage_update(Buffer buffer, Size origsz, Size newsz)
void brinRevmapExtend(BrinRevmap *revmap, BlockNumber heapBlk)
void brinSetHeapBlockItemptr(Buffer buf, BlockNumber pagesPerRange, BlockNumber heapBlk, ItemPointerData tid)
Buffer brinLockRevmapPageForUpdate(BrinRevmap *revmap, BlockNumber heapBlk)
BrinTuple * brin_copy_tuple(BrinTuple *tuple, Size len, BrinTuple *dest, Size *destsz)
Definition brin_tuple.c:445
bool brin_tuples_equal(const BrinTuple *a, Size alen, const BrinTuple *b, Size blen)
Definition brin_tuple.c:464
#define SizeOfBrinInsert
Definition brin_xlog.h:74
#define SizeOfBrinUpdate
Definition brin_xlog.h:95
#define XLOG_BRIN_SAMEPAGE_UPDATE
Definition brin_xlog.h:34
#define SizeOfBrinSamepageUpdate
Definition brin_xlog.h:107
#define XLOG_BRIN_INIT_PAGE
Definition brin_xlog.h:43
#define XLOG_BRIN_UPDATE
Definition brin_xlog.h:33
#define XLOG_BRIN_INSERT
Definition brin_xlog.h:32
int Buffer
Definition buf.h:23
#define InvalidBuffer
Definition buf.h:25
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition bufmgr.c:4356
void ReleaseBuffer(Buffer buffer)
Definition bufmgr.c:5501
void UnlockReleaseBuffer(Buffer buffer)
Definition bufmgr.c:5518
void MarkBufferDirty(Buffer buffer)
Definition bufmgr.c:3056
void MarkBufferDirtyHint(Buffer buffer, bool buffer_std)
Definition bufmgr.c:5565
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition bufmgr.c:864
#define P_NEW
Definition bufmgr.h:198
static Page BufferGetPage(Buffer buffer)
Definition bufmgr.h:466
@ BUFFER_LOCK_SHARE
Definition bufmgr.h:210
@ BUFFER_LOCK_EXCLUSIVE
Definition bufmgr.h:220
@ BUFFER_LOCK_UNLOCK
Definition bufmgr.h:205
static void LockBuffer(Buffer buffer, BufferLockMode mode)
Definition bufmgr.h:328
static bool BufferIsValid(Buffer bufnum)
Definition bufmgr.h:417
Size PageGetFreeSpace(const PageData *page)
Definition bufpage.c:906
bool PageIndexTupleOverwrite(Page page, OffsetNumber offnum, const void *newtup, Size newsize)
Definition bufpage.c:1404
Size PageGetExactFreeSpace(const PageData *page)
Definition bufpage.c:957
void PageInit(Page page, Size pageSize, Size specialSize)
Definition bufpage.c:42
void PageIndexTupleDeleteNoCompact(Page page, OffsetNumber offnum)
Definition bufpage.c:1294
PageHeaderData * PageHeader
Definition bufpage.h:173
static bool PageIsNew(const PageData *page)
Definition bufpage.h:233
static char * PageGetContents(Page page)
Definition bufpage.h:257
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
Definition bufpage.h:243
static void * PageGetItem(PageData *page, const ItemIdData *itemId)
Definition bufpage.h:353
static void PageSetLSN(Page page, XLogRecPtr lsn)
Definition bufpage.h:390
PageData * Page
Definition bufpage.h:81
#define PageAddItem(page, item, size, offsetNumber, overwrite, is_heap)
Definition bufpage.h:471
static OffsetNumber PageGetMaxOffsetNumber(const PageData *page)
Definition bufpage.h:371
uint8_t uint8
Definition c.h:544
#define Assert(condition)
Definition c.h:873
uint16_t uint16
Definition c.h:545
size_t Size
Definition c.h:619
int errcode(int sqlerrcode)
Definition elog.c:863
int errmsg(const char *fmt,...)
Definition elog.c:1080
#define DEBUG2
Definition elog.h:29
#define ERROR
Definition elog.h:39
#define elog(elevel,...)
Definition elog.h:226
#define ereport(elevel,...)
Definition elog.h:150
void FreeSpaceMapVacuumRange(Relation rel, BlockNumber start, BlockNumber end)
Definition freespace.c:377
BlockNumber RecordAndGetPageWithFreeSpace(Relation rel, BlockNumber oldPage, Size oldSpaceAvail, Size spaceNeeded)
Definition freespace.c:154
void RecordPageWithFreeSpace(Relation rel, BlockNumber heapBlk, Size spaceAvail)
Definition freespace.c:194
BlockNumber GetPageWithFreeSpace(Relation rel, Size spaceNeeded)
Definition freespace.c:137
#define ItemIdGetLength(itemId)
Definition itemid.h:59
#define ItemIdIsNormal(itemId)
Definition itemid.h:99
#define ItemIdIsUsed(itemId)
Definition itemid.h:92
static void ItemPointerSet(ItemPointerData *pointer, BlockNumber blockNumber, OffsetNumber offNum)
Definition itemptr.h:135
void LockRelationForExtension(Relation relation, LOCKMODE lockmode)
Definition lmgr.c:424
void UnlockRelationForExtension(Relation relation, LOCKMODE lockmode)
Definition lmgr.c:474
#define ExclusiveLock
Definition lockdefs.h:42
#define ShareLock
Definition lockdefs.h:40
#define START_CRIT_SECTION()
Definition miscadmin.h:150
#define CHECK_FOR_INTERRUPTS()
Definition miscadmin.h:123
#define END_CRIT_SECTION()
Definition miscadmin.h:152
#define InvalidOffsetNumber
Definition off.h:26
uint16 OffsetNumber
Definition off.h:24
#define FirstOffsetNumber
Definition off.h:27
static char buf[DEFAULT_XLOG_SEG_SIZE]
#define RELATION_IS_LOCAL(relation)
Definition rel.h:657
#define RelationGetRelationName(relation)
Definition rel.h:548
#define RelationGetTargetBlock(relation)
Definition rel.h:610
#define RelationNeedsWAL(relation)
Definition rel.h:637
#define RelationSetTargetBlock(relation, targblock)
Definition rel.h:617
uint32 brinVersion
Definition brin_page.h:67
BlockNumber lastRevmapPage
Definition brin_page.h:69
BlockNumber pagesPerRange
Definition brin_page.h:68
OffsetNumber offnum
Definition brin_xlog.h:71
BlockNumber heapBlk
Definition brin_xlog.h:65
xl_brin_insert insert
Definition brin_xlog.h:92
const char * type
uint64 XLogRecPtr
Definition xlogdefs.h:21
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition xloginsert.c:478
void XLogRegisterBufData(uint8 block_id, const void *data, uint32 len)
Definition xloginsert.c:409
void XLogRegisterData(const void *data, uint32 len)
Definition xloginsert.c:368
XLogRecPtr log_newpage_buffer(Buffer buffer, bool page_std)
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition xloginsert.c:245
void XLogBeginInsert(void)
Definition xloginsert.c:152
#define REGBUF_STANDARD
Definition xloginsert.h:35
#define REGBUF_WILL_INIT
Definition xloginsert.h:34

Function Documentation

◆ br_page_get_freespace()

static Size br_page_get_freespace ( Page  page)
static

Definition at line 918 of file brin_pageops.c.

919{
920 if (!BRIN_IS_REGULAR_PAGE(page) ||
921 (BrinPageFlags(page) & BRIN_EVACUATE_PAGE) != 0)
922 return 0;
923 else
924 return PageGetFreeSpace(page);
925}

References BRIN_EVACUATE_PAGE, BRIN_IS_REGULAR_PAGE, BrinPageFlags, and PageGetFreeSpace().

Referenced by brin_doinsert(), brin_doupdate(), brin_getinsertbuffer(), brin_initialize_empty_new_buffer(), and brin_page_cleanup().

◆ brin_can_do_samepage_update()

bool brin_can_do_samepage_update ( Buffer  buffer,
Size  origsz,
Size  newsz 
)

Definition at line 322 of file brin_pageops.c.

323{
324 return
325 ((newsz <= origsz) ||
327}

References BufferGetPage(), fb(), and PageGetExactFreeSpace().

Referenced by brin_doupdate(), brininsert(), and summarize_range().

◆ brin_doinsert()

OffsetNumber brin_doinsert ( Relation  idxrel,
BlockNumber  pagesPerRange,
BrinRevmap revmap,
Buffer buffer,
BlockNumber  heapBlk,
const BrinTuple tup,
Size  itemsz 
)

Definition at line 341 of file brin_pageops.c.

344{
345 Page page;
347 OffsetNumber off;
348 Size freespace = 0;
350 ItemPointerData tid;
351 bool extended;
352
353 Assert(itemsz == MAXALIGN(itemsz));
354
355 /* If the item is oversized, don't even bother. */
356 if (itemsz > BrinMaxItemSize)
357 {
360 errmsg("index row size %zu exceeds maximum %zu for index \"%s\"",
362 return InvalidOffsetNumber; /* keep compiler quiet */
363 }
364
365 /* Make sure the revmap is long enough to contain the entry we need */
366 brinRevmapExtend(revmap, heapBlk);
367
368 /*
369 * Acquire lock on buffer supplied by caller, if any. If it doesn't have
370 * enough space, unpin it to obtain a new one below.
371 */
372 if (BufferIsValid(*buffer))
373 {
374 /*
375 * It's possible that another backend (or ourselves!) extended the
376 * revmap over the page we held a pin on, so we cannot assume that
377 * it's still a regular page.
378 */
380 if (br_page_get_freespace(BufferGetPage(*buffer)) < itemsz)
381 {
382 UnlockReleaseBuffer(*buffer);
383 *buffer = InvalidBuffer;
384 }
385 }
386
387 /*
388 * If we still don't have a usable buffer, have brin_getinsertbuffer
389 * obtain one for us.
390 */
391 if (!BufferIsValid(*buffer))
392 {
393 do
394 *buffer = brin_getinsertbuffer(idxrel, InvalidBuffer, itemsz, &extended);
395 while (!BufferIsValid(*buffer));
396 }
397 else
398 extended = false;
399
400 /* Now obtain lock on revmap buffer */
402
403 page = BufferGetPage(*buffer);
404 blk = BufferGetBlockNumber(*buffer);
405
406 /* Execute the actual insertion */
408 if (extended)
410 off = PageAddItem(page, tup, itemsz, InvalidOffsetNumber, false, false);
411 if (off == InvalidOffsetNumber)
412 elog(ERROR, "failed to add BRIN tuple to new page");
413 MarkBufferDirty(*buffer);
414
415 /* needed to update FSM below */
416 if (extended)
417 freespace = br_page_get_freespace(page);
418
419 ItemPointerSet(&tid, blk, off);
420 brinSetHeapBlockItemptr(revmapbuf, pagesPerRange, heapBlk, tid);
422
423 /* XLOG stuff */
425 {
428 uint8 info;
429
430 info = XLOG_BRIN_INSERT | (extended ? XLOG_BRIN_INIT_PAGE : 0);
431 xlrec.heapBlk = heapBlk;
432 xlrec.pagesPerRange = pagesPerRange;
433 xlrec.offnum = off;
434
437
438 XLogRegisterBuffer(0, *buffer, REGBUF_STANDARD | (extended ? REGBUF_WILL_INIT : 0));
439 XLogRegisterBufData(0, tup, itemsz);
440
442
444
445 PageSetLSN(page, recptr);
447 }
448
450
451 /* Tuple is firmly on buffer; we can release our locks */
454
455 BRIN_elog((DEBUG2, "inserted tuple (%u,%u) for range starting at %u",
456 blk, off, heapBlk));
457
458 if (extended)
459 {
462 }
463
464 return off;
465}

References Assert, br_page_get_freespace(), BRIN_elog, brin_getinsertbuffer(), brin_page_init(), BRIN_PAGETYPE_REGULAR, brinLockRevmapPageForUpdate(), BrinMaxItemSize, brinRevmapExtend(), brinSetHeapBlockItemptr(), BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetBlockNumber(), BufferGetPage(), BufferIsValid(), DEBUG2, elog, END_CRIT_SECTION, ereport, errcode(), errmsg(), ERROR, fb(), FreeSpaceMapVacuumRange(), xl_brin_insert::heapBlk, InvalidBuffer, InvalidOffsetNumber, ItemPointerSet(), LockBuffer(), MarkBufferDirty(), MAXALIGN, PageAddItem, PageSetLSN(), RecordPageWithFreeSpace(), REGBUF_STANDARD, REGBUF_WILL_INIT, RelationGetRelationName, RelationNeedsWAL, SizeOfBrinInsert, START_CRIT_SECTION, UnlockReleaseBuffer(), XLOG_BRIN_INIT_PAGE, XLOG_BRIN_INSERT, XLogBeginInsert(), XLogInsert(), XLogRegisterBufData(), XLogRegisterBuffer(), and XLogRegisterData().

Referenced by _brin_parallel_merge(), brin_fill_empty_ranges(), form_and_insert_tuple(), and summarize_range().

◆ brin_doupdate()

bool brin_doupdate ( Relation  idxrel,
BlockNumber  pagesPerRange,
BrinRevmap revmap,
BlockNumber  heapBlk,
Buffer  oldbuf,
OffsetNumber  oldoff,
const BrinTuple origtup,
Size  origsz,
const BrinTuple newtup,
Size  newsz,
bool  samepage 
)

Definition at line 53 of file brin_pageops.c.

59{
63 Size oldsz;
66 bool extended;
67
69
70 /* If the item is oversized, don't bother. */
72 {
75 errmsg("index row size %zu exceeds maximum %zu for index \"%s\"",
77 return false; /* keep compiler quiet */
78 }
79
80 /* make sure the revmap is long enough to contain the entry we need */
81 brinRevmapExtend(revmap, heapBlk);
82
83 if (!samepage)
84 {
85 /* need a page on which to put the item */
88 {
89 Assert(!extended);
90 return false;
91 }
92
93 /*
94 * Note: it's possible (though unlikely) that the returned newbuf is
95 * the same as oldbuf, if brin_getinsertbuffer determined that the old
96 * buffer does in fact have enough space.
97 */
98 if (newbuf == oldbuf)
99 {
100 Assert(!extended);
102 }
103 else
105 }
106 else
107 {
110 extended = false;
111 }
114
115 /*
116 * Check that the old tuple wasn't updated concurrently: it might have
117 * moved someplace else entirely, and for that matter the whole page
118 * might've become a revmap page. Note that in the first two cases
119 * checked here, the "oldlp" we just calculated is garbage; but
120 * PageGetItemId() is simple enough that it was safe to do that
121 * calculation anyway.
122 */
126 {
128
129 /*
130 * If this happens, and the new buffer was obtained by extending the
131 * relation, then we need to ensure we don't leave it uninitialized or
132 * forget about it.
133 */
135 {
136 if (extended)
139 if (extended)
141 }
142 return false;
143 }
144
147
148 /*
149 * ... or it might have been updated in place to different contents.
150 */
152 {
155 {
156 /* As above, initialize and record new page if we got one */
157 if (extended)
160 if (extended)
162 }
163 return false;
164 }
165
166 /*
167 * Great, the old tuple is intact. We can proceed with the update.
168 *
169 * If there's enough room in the old page for the new tuple, replace it.
170 *
171 * Note that there might now be enough space on the page even though the
172 * caller told us there isn't, if a concurrent update moved another tuple
173 * elsewhere or replaced a tuple with a smaller one.
174 */
175 if (((BrinPageFlags(oldpage) & BRIN_EVACUATE_PAGE) == 0) &&
177 {
180 elog(ERROR, "failed to replace BRIN tuple");
182
183 /* XLOG stuff */
185 {
189
191
194
197
199
201 }
202
204
206
208 {
209 /* As above, initialize and record new page if we got one */
210 if (extended)
213 if (extended)
215 }
216
217 return true;
218 }
219 else if (newbuf == InvalidBuffer)
220 {
221 /*
222 * Not enough space, but caller said that there was. Tell them to
223 * start over.
224 */
226 return false;
227 }
228 else
229 {
230 /*
231 * Not enough free space on the oldpage. Put the new tuple on the new
232 * page, and update the revmap.
233 */
238 Size freespace = 0;
239
241
243
244 /*
245 * We need to initialize the page if it's newly obtained. Note we
246 * will WAL-log the initialization as part of the update, so we don't
247 * need to do that here.
248 */
249 if (extended)
251
255 elog(ERROR, "failed to add BRIN tuple to new page");
258
259 /* needed to update FSM below */
260 if (extended)
261 freespace = br_page_get_freespace(newpage);
262
264 brinSetHeapBlockItemptr(revmapbuf, pagesPerRange, heapBlk, newtid);
266
267 /* XLOG stuff */
269 {
272 uint8 info;
273
274 info = XLOG_BRIN_UPDATE | (extended ? XLOG_BRIN_INIT_PAGE : 0);
275
277 xlrec.insert.heapBlk = heapBlk;
278 xlrec.insert.pagesPerRange = pagesPerRange;
279 xlrec.oldOffnum = oldoff;
280
282
283 /* new page */
285
288
289 /* revmap page */
291
292 /* old page */
294
296
300 }
301
303
307
308 if (extended)
309 {
312 }
313
314 return true;
315 }
316}

References Assert, br_page_get_freespace(), brin_can_do_samepage_update(), BRIN_EVACUATE_PAGE, brin_getinsertbuffer(), brin_initialize_empty_new_buffer(), BRIN_IS_REGULAR_PAGE, brin_page_init(), BRIN_PAGETYPE_REGULAR, brin_tuples_equal(), brinLockRevmapPageForUpdate(), BrinMaxItemSize, BrinPageFlags, brinRevmapExtend(), brinSetHeapBlockItemptr(), BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetBlockNumber(), BufferGetPage(), BufferIsValid(), elog, END_CRIT_SECTION, ereport, errcode(), errmsg(), ERROR, fb(), FreeSpaceMapVacuumRange(), xl_brin_update::insert, InvalidBlockNumber, InvalidBuffer, InvalidOffsetNumber, ItemIdGetLength, ItemIdIsNormal, ItemPointerSet(), LockBuffer(), MarkBufferDirty(), MAXALIGN, xl_brin_insert::offnum, xl_brin_samepage_update::offnum, PageAddItem, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), PageIndexTupleDeleteNoCompact(), PageIndexTupleOverwrite(), PageSetLSN(), RecordPageWithFreeSpace(), REGBUF_STANDARD, REGBUF_WILL_INIT, RelationGetRelationName, RelationNeedsWAL, SizeOfBrinSamepageUpdate, SizeOfBrinUpdate, START_CRIT_SECTION, UnlockReleaseBuffer(), XLOG_BRIN_INIT_PAGE, XLOG_BRIN_SAMEPAGE_UPDATE, XLOG_BRIN_UPDATE, XLogBeginInsert(), XLogInsert(), XLogRegisterBufData(), XLogRegisterBuffer(), and XLogRegisterData().

Referenced by brin_evacuate_page(), brininsert(), and summarize_range().

◆ brin_evacuate_page()

void brin_evacuate_page ( Relation  idxRel,
BlockNumber  pagesPerRange,
BrinRevmap revmap,
Buffer  buf 
)

Definition at line 562 of file brin_pageops.c.

564{
565 OffsetNumber off;
566 OffsetNumber maxoff;
567 Page page;
569 Size btupsz = 0;
570
571 page = BufferGetPage(buf);
572
574
575 maxoff = PageGetMaxOffsetNumber(page);
576 for (off = FirstOffsetNumber; off <= maxoff; off++)
577 {
578 BrinTuple *tup;
579 Size sz;
580 ItemId lp;
581
583
584 lp = PageGetItemId(page, off);
585 if (ItemIdIsUsed(lp))
586 {
588 tup = (BrinTuple *) PageGetItem(page, lp);
590
592
593 if (!brin_doupdate(idxRel, pagesPerRange, revmap, tup->bt_blkno,
594 buf, off, tup, sz, tup, sz, false))
595 off--; /* retry */
596
598
599 /* It's possible that someone extended the revmap over this page */
600 if (!BRIN_IS_REGULAR_PAGE(page))
601 break;
602 }
603 }
604
606}

References Assert, brin_copy_tuple(), brin_doupdate(), BRIN_EVACUATE_PAGE, BRIN_IS_REGULAR_PAGE, BrinPageFlags, buf, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetPage(), CHECK_FOR_INTERRUPTS, fb(), FirstOffsetNumber, ItemIdGetLength, ItemIdIsUsed, LockBuffer(), PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), and UnlockReleaseBuffer().

Referenced by revmap_physical_extend().

◆ brin_getinsertbuffer()

static Buffer brin_getinsertbuffer ( Relation  irel,
Buffer  oldbuf,
Size  itemsz,
bool extended 
)
static

Definition at line 688 of file brin_pageops.c.

690{
693 Page page;
694 Size freespace;
695
696 /* callers must have checked */
697 Assert(itemsz <= BrinMaxItemSize);
698
701 else
703
704 /* Choose initial target page, re-using existing target if known */
707 newblk = GetPageWithFreeSpace(irel, itemsz);
708
709 /*
710 * Loop until we find a page with sufficient free space. By the time we
711 * return to caller out of this loop, both buffers are valid and locked;
712 * if we have to restart here, neither page is locked and newblk isn't
713 * pinned (if it's even valid).
714 */
715 for (;;)
716 {
717 Buffer buf;
718 bool extensionLockHeld = false;
719
721
722 *extended = false;
723
725 {
726 /*
727 * There's not enough free space in any existing index page,
728 * according to the FSM: extend the relation to obtain a shiny new
729 * page.
730 *
731 * XXX: It's likely possible to use RBM_ZERO_AND_LOCK here,
732 * which'd avoid the need to hold the extension lock during buffer
733 * reclaim.
734 */
735 if (!RELATION_IS_LOCAL(irel))
736 {
738 extensionLockHeld = true;
739 }
740 buf = ReadBuffer(irel, P_NEW);
742 *extended = true;
743
744 BRIN_elog((DEBUG2, "brin_getinsertbuffer: extending to page %u",
746 }
747 else if (newblk == oldblk)
748 {
749 /*
750 * There's an odd corner-case here where the FSM is out-of-date,
751 * and gave us the old page.
752 */
753 buf = oldbuf;
754 }
755 else
756 {
757 buf = ReadBuffer(irel, newblk);
758 }
759
760 /*
761 * We lock the old buffer first, if it's earlier than the new one; but
762 * then we need to check that it hasn't been turned into a revmap page
763 * concurrently. If we detect that that happened, give up and tell
764 * caller to start over.
765 */
767 {
770 {
772
773 /*
774 * It is possible that the new page was obtained from
775 * extending the relation. In that case, we must be sure to
776 * record it in the FSM before leaving, because otherwise the
777 * space would be lost forever. However, we cannot let an
778 * uninitialized page get in the FSM, so we need to initialize
779 * it first.
780 */
781 if (*extended)
783
786
788
789 if (*extended)
790 {
792 /* shouldn't matter, but don't confuse caller */
793 *extended = false;
794 }
795
796 return InvalidBuffer;
797 }
798 }
799
801
804
805 page = BufferGetPage(buf);
806
807 /*
808 * We have a new buffer to insert into. Check that the new page has
809 * enough free space, and return it if it does; otherwise start over.
810 * (br_page_get_freespace also checks that the FSM didn't hand us a
811 * page that has since been repurposed for the revmap.)
812 */
813 freespace = *extended ?
815 if (freespace >= itemsz)
816 {
818
819 /*
820 * Lock the old buffer if not locked already. Note that in this
821 * case we know for sure it's a regular page: it's later than the
822 * new page we just got, which is not a revmap page, and revmap
823 * pages are always consecutive.
824 */
826 {
829 }
830
831 return buf;
832 }
833
834 /* This page is no good. */
835
836 /*
837 * If an entirely new page does not contain enough free space for the
838 * new item, then surely that item is oversized. Complain loudly; but
839 * first make sure we initialize the page and record it as free, for
840 * next time.
841 */
842 if (*extended)
843 {
845 /* since this should not happen, skip FreeSpaceMapVacuum */
846
849 errmsg("index row size %zu exceeds maximum %zu for index \"%s\"",
850 itemsz, freespace, RelationGetRelationName(irel))));
851 return InvalidBuffer; /* keep compiler quiet */
852 }
853
854 if (newblk != oldblk)
858
859 /*
860 * Update the FSM with the new, presumably smaller, freespace value
861 * for this page, then search for a new target page.
862 */
863 newblk = RecordAndGetPageWithFreeSpace(irel, newblk, freespace, itemsz);
864 }
865}

References Assert, br_page_get_freespace(), BRIN_elog, brin_initialize_empty_new_buffer(), BRIN_IS_REGULAR_PAGE, BrinMaxItemSize, buf, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetBlockNumber(), BufferGetPage(), BufferIsValid(), CHECK_FOR_INTERRUPTS, DEBUG2, ereport, errcode(), errmsg(), ERROR, ExclusiveLock, fb(), FreeSpaceMapVacuumRange(), GetPageWithFreeSpace(), InvalidBlockNumber, InvalidBuffer, LockBuffer(), LockRelationForExtension(), P_NEW, ReadBuffer(), RecordAndGetPageWithFreeSpace(), RELATION_IS_LOCAL, RelationGetRelationName, RelationGetTargetBlock, RelationSetTargetBlock, ReleaseBuffer(), UnlockRelationForExtension(), and UnlockReleaseBuffer().

Referenced by brin_doinsert(), and brin_doupdate().

◆ brin_initialize_empty_new_buffer()

static void brin_initialize_empty_new_buffer ( Relation  idxrel,
Buffer  buffer 
)
static

Definition at line 882 of file brin_pageops.c.

883{
884 Page page;
885
887 "brin_initialize_empty_new_buffer: initializing blank page %u",
888 BufferGetBlockNumber(buffer)));
889
891 page = BufferGetPage(buffer);
893 MarkBufferDirty(buffer);
894
895 /* XLOG stuff */
897 log_newpage_buffer(buffer, true);
898
900
901 /*
902 * We update the FSM for this page, but this is not WAL-logged. This is
903 * acceptable because VACUUM will scan the index and update the FSM with
904 * pages whose FSM records were forgotten in a crash.
905 */
908}

References br_page_get_freespace(), BRIN_elog, brin_page_init(), BRIN_PAGETYPE_REGULAR, BufferGetBlockNumber(), BufferGetPage(), DEBUG2, END_CRIT_SECTION, fb(), log_newpage_buffer(), MarkBufferDirty(), RecordPageWithFreeSpace(), RelationNeedsWAL, and START_CRIT_SECTION.

Referenced by brin_doupdate(), brin_getinsertbuffer(), and brin_page_cleanup().

◆ brin_metapage_init()

void brin_metapage_init ( Page  page,
BlockNumber  pagesPerRange,
uint16  version 
)

Definition at line 484 of file brin_pageops.c.

485{
486 BrinMetaPageData *metadata;
487
489
490 metadata = (BrinMetaPageData *) PageGetContents(page);
491
492 metadata->brinMagic = BRIN_META_MAGIC;
493 metadata->brinVersion = version;
494 metadata->pagesPerRange = pagesPerRange;
495
496 /*
497 * Note we cheat here a little. 0 is not a valid revmap block number
498 * (because it's the metapage buffer), but doing this enables the first
499 * revmap page to be created when the index is.
500 */
501 metadata->lastRevmapPage = 0;
502
503 /*
504 * Set pd_lower just past the end of the metadata. This is essential,
505 * because without doing so, metadata will be lost if xlog.c compresses
506 * the page.
507 */
508 ((PageHeader) page)->pd_lower =
509 ((char *) metadata + sizeof(BrinMetaPageData)) - (char *) page;
510}

References BRIN_META_MAGIC, brin_page_init(), BRIN_PAGETYPE_META, BrinMetaPageData::brinMagic, BrinMetaPageData::brinVersion, BrinMetaPageData::lastRevmapPage, PageGetContents(), and BrinMetaPageData::pagesPerRange.

Referenced by brin_xlog_createidx(), brinbuild(), and brinbuildempty().

◆ brin_page_cleanup()

void brin_page_cleanup ( Relation  idxrel,
Buffer  buf 
)

Definition at line 622 of file brin_pageops.c.

623{
624 Page page = BufferGetPage(buf);
625
626 /*
627 * If a page was left uninitialized, initialize it now; also record it in
628 * FSM.
629 *
630 * Somebody else might be extending the relation concurrently. To avoid
631 * re-initializing the page before they can grab the buffer lock, we
632 * acquire the extension lock momentarily. Since they hold the extension
633 * lock from before getting the page and after its been initialized, we're
634 * sure to see their initialization.
635 */
636 if (PageIsNew(page))
637 {
640
642 if (PageIsNew(page))
643 {
646 return;
647 }
649 }
650
651 /* Nothing to be done for non-regular index pages */
654 return;
655
656 /* Measure free space and record it */
659}

References br_page_get_freespace(), brin_initialize_empty_new_buffer(), BRIN_IS_META_PAGE, BRIN_IS_REVMAP_PAGE, buf, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetBlockNumber(), BufferGetPage(), fb(), LockBuffer(), LockRelationForExtension(), PageIsNew(), RecordPageWithFreeSpace(), ShareLock, and UnlockRelationForExtension().

Referenced by brin_vacuum_scan().

◆ brin_page_init()

void brin_page_init ( Page  page,
uint16  type 
)

◆ brin_start_evacuating_page()

bool brin_start_evacuating_page ( Relation  idxRel,
Buffer  buf 
)

Definition at line 522 of file brin_pageops.c.

523{
524 OffsetNumber off;
525 OffsetNumber maxoff;
526 Page page;
527
528 page = BufferGetPage(buf);
529
530 if (PageIsNew(page))
531 return false;
532
533 maxoff = PageGetMaxOffsetNumber(page);
534 for (off = FirstOffsetNumber; off <= maxoff; off++)
535 {
536 ItemId lp;
537
538 lp = PageGetItemId(page, off);
539 if (ItemIdIsUsed(lp))
540 {
541 /*
542 * Prevent other backends from adding more stuff to this page:
543 * BRIN_EVACUATE_PAGE informs br_page_get_freespace that this page
544 * can no longer be used to add new tuples. Note that this flag
545 * is not WAL-logged, except accidentally.
546 */
549
550 return true;
551 }
552 }
553 return false;
554}

References BRIN_EVACUATE_PAGE, BrinPageFlags, buf, BufferGetPage(), fb(), FirstOffsetNumber, ItemIdIsUsed, MarkBufferDirtyHint(), PageGetItemId(), PageGetMaxOffsetNumber(), and PageIsNew().

Referenced by revmap_physical_extend().