PostgreSQL Source Code  git master
brin_revmap.c
Go to the documentation of this file.
1 /*
2  * brin_revmap.c
3  * Range map for BRIN indexes
4  *
5  * The range map (revmap) is a translation structure for BRIN indexes: for each
6  * page range there is one summary tuple, and its location is tracked by the
7  * revmap. Whenever a new tuple is inserted into a table that violates the
8  * previously recorded summary values, a new tuple is inserted into the index
9  * and the revmap is updated to point to it.
10  *
11  * The revmap is stored in the first pages of the index, immediately following
12  * the metapage. When the revmap needs to be expanded, all tuples on the
13  * regular BRIN page at that block (if any) are moved out of the way.
14  *
15  * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
16  * Portions Copyright (c) 1994, Regents of the University of California
17  *
18  * IDENTIFICATION
19  * src/backend/access/brin/brin_revmap.c
20  */
21 #include "postgres.h"
22 
23 #include "access/brin_page.h"
24 #include "access/brin_pageops.h"
25 #include "access/brin_revmap.h"
26 #include "access/brin_tuple.h"
27 #include "access/brin_xlog.h"
28 #include "access/rmgr.h"
29 #include "access/xloginsert.h"
30 #include "miscadmin.h"
31 #include "storage/bufmgr.h"
32 #include "storage/lmgr.h"
33 #include "utils/rel.h"
34 
35 
36 /*
37  * In revmap pages, each item stores an ItemPointerData. These defines let one
38  * find the logical revmap page number and index number of the revmap item for
39  * the given heap block number.
40  */
41 #define HEAPBLK_TO_REVMAP_BLK(pagesPerRange, heapBlk) \
42  ((heapBlk / pagesPerRange) / REVMAP_PAGE_MAXITEMS)
43 #define HEAPBLK_TO_REVMAP_INDEX(pagesPerRange, heapBlk) \
44  ((heapBlk / pagesPerRange) % REVMAP_PAGE_MAXITEMS)
45 
46 
47 struct BrinRevmap
48 {
51  BlockNumber rm_lastRevmapPage; /* cached from the metapage */
54 };
55 
56 /* typedef appears in brin_revmap.h */
57 
58 
60  BlockNumber heapBlk);
61 static Buffer revmap_get_buffer(BrinRevmap *revmap, BlockNumber heapBlk);
63  BlockNumber heapBlk);
64 static void revmap_physical_extend(BrinRevmap *revmap);
65 
66 /*
67  * Initialize an access object for a range map. This must be freed by
68  * brinRevmapTerminate when caller is done with it.
69  */
70 BrinRevmap *
71 brinRevmapInitialize(Relation idxrel, BlockNumber *pagesPerRange)
72 {
73  BrinRevmap *revmap;
74  Buffer meta;
75  BrinMetaPageData *metadata;
76  Page page;
77 
78  meta = ReadBuffer(idxrel, BRIN_METAPAGE_BLKNO);
80  page = BufferGetPage(meta);
81  metadata = (BrinMetaPageData *) PageGetContents(page);
82 
83  revmap = palloc(sizeof(BrinRevmap));
84  revmap->rm_irel = idxrel;
85  revmap->rm_pagesPerRange = metadata->pagesPerRange;
86  revmap->rm_lastRevmapPage = metadata->lastRevmapPage;
87  revmap->rm_metaBuf = meta;
88  revmap->rm_currBuf = InvalidBuffer;
89 
90  *pagesPerRange = metadata->pagesPerRange;
91 
93 
94  return revmap;
95 }
96 
97 /*
98  * Release resources associated with a revmap access object.
99  */
100 void
102 {
103  ReleaseBuffer(revmap->rm_metaBuf);
104  if (revmap->rm_currBuf != InvalidBuffer)
105  ReleaseBuffer(revmap->rm_currBuf);
106  pfree(revmap);
107 }
108 
109 /*
110  * Extend the revmap to cover the given heap block number.
111  */
112 void
114 {
116 
117  mapBlk = revmap_extend_and_get_blkno(revmap, heapBlk);
118 
119  /* Ensure the buffer we got is in the expected range */
120  Assert(mapBlk != InvalidBlockNumber &&
121  mapBlk != BRIN_METAPAGE_BLKNO &&
122  mapBlk <= revmap->rm_lastRevmapPage);
123 }
124 
125 /*
126  * Prepare to insert an entry into the revmap; the revmap buffer in which the
127  * entry is to reside is locked and returned. Most callers should call
128  * brinRevmapExtend beforehand, as this routine does not extend the revmap if
129  * it's not long enough.
130  *
131  * The returned buffer is also recorded in the revmap struct; finishing that
132  * releases the buffer, therefore the caller needn't do it explicitly.
133  */
134 Buffer
136 {
137  Buffer rmBuf;
138 
139  rmBuf = revmap_get_buffer(revmap, heapBlk);
141 
142  return rmBuf;
143 }
144 
145 /*
146  * In the given revmap buffer (locked appropriately by caller), which is used
147  * in a BRIN index of pagesPerRange pages per range, set the element
148  * corresponding to heap block number heapBlk to the given TID.
149  *
150  * Once the operation is complete, the caller must update the LSN on the
151  * returned buffer.
152  *
153  * This is used both in regular operation and during WAL replay.
154  */
155 void
157  BlockNumber heapBlk, ItemPointerData tid)
158 {
159  RevmapContents *contents;
160  ItemPointerData *iptr;
161  Page page;
162 
163  /* The correct page should already be pinned and locked */
164  page = BufferGetPage(buf);
165  contents = (RevmapContents *) PageGetContents(page);
166  iptr = (ItemPointerData *) contents->rm_tids;
167  iptr += HEAPBLK_TO_REVMAP_INDEX(pagesPerRange, heapBlk);
168 
169  if (ItemPointerIsValid(&tid))
170  ItemPointerSet(iptr,
173  else
174  ItemPointerSetInvalid(iptr);
175 }
176 
177 /*
178  * Fetch the BrinTuple for a given heap block.
179  *
180  * The buffer containing the tuple is locked, and returned in *buf. The
181  * returned tuple points to the shared buffer and must not be freed; if caller
182  * wants to use it after releasing the buffer lock, it must create its own
183  * palloc'ed copy. As an optimization, the caller can pass a pinned buffer
184  * *buf on entry, which will avoid a pin-unpin cycle when the next tuple is on
185  * the same page as a previous one.
186  *
187  * If no tuple is found for the given heap range, returns NULL. In that case,
188  * *buf might still be updated (and pin must be released by caller), but it's
189  * not locked.
190  *
191  * The output tuple offset within the buffer is returned in *off, and its size
192  * is returned in *size.
193  */
194 BrinTuple *
196  Buffer *buf, OffsetNumber *off, Size *size, int mode)
197 {
198  Relation idxRel = revmap->rm_irel;
199  BlockNumber mapBlk;
200  RevmapContents *contents;
201  ItemPointerData *iptr;
202  BlockNumber blk;
203  Page page;
204  ItemId lp;
205  BrinTuple *tup;
206  ItemPointerData previptr;
207 
208  /* normalize the heap block number to be the first page in the range */
209  heapBlk = (heapBlk / revmap->rm_pagesPerRange) * revmap->rm_pagesPerRange;
210 
211  /*
212  * Compute the revmap page number we need. If Invalid is returned (i.e.,
213  * the revmap page hasn't been created yet), the requested page range is
214  * not summarized.
215  */
216  mapBlk = revmap_get_blkno(revmap, heapBlk);
217  if (mapBlk == InvalidBlockNumber)
218  {
219  *off = InvalidOffsetNumber;
220  return NULL;
221  }
222 
223  ItemPointerSetInvalid(&previptr);
224  for (;;)
225  {
227 
228  if (revmap->rm_currBuf == InvalidBuffer ||
229  BufferGetBlockNumber(revmap->rm_currBuf) != mapBlk)
230  {
231  if (revmap->rm_currBuf != InvalidBuffer)
232  ReleaseBuffer(revmap->rm_currBuf);
233 
234  Assert(mapBlk != InvalidBlockNumber);
235  revmap->rm_currBuf = ReadBuffer(revmap->rm_irel, mapBlk);
236  }
237 
239 
240  contents = (RevmapContents *)
242  iptr = contents->rm_tids;
243  iptr += HEAPBLK_TO_REVMAP_INDEX(revmap->rm_pagesPerRange, heapBlk);
244 
245  if (!ItemPointerIsValid(iptr))
246  {
248  return NULL;
249  }
250 
251  /*
252  * Check the TID we got in a previous iteration, if any, and save the
253  * current TID we got from the revmap; if we loop, we can sanity-check
254  * that the next one we get is different. Otherwise we might be stuck
255  * looping forever if the revmap is somehow badly broken.
256  */
257  if (ItemPointerIsValid(&previptr) && ItemPointerEquals(&previptr, iptr))
258  ereport(ERROR,
259  (errcode(ERRCODE_INDEX_CORRUPTED),
260  errmsg_internal("corrupted BRIN index: inconsistent range map")));
261  previptr = *iptr;
262 
263  blk = ItemPointerGetBlockNumber(iptr);
264  *off = ItemPointerGetOffsetNumber(iptr);
265 
267 
268  /* Ok, got a pointer to where the BrinTuple should be. Fetch it. */
269  if (!BufferIsValid(*buf) || BufferGetBlockNumber(*buf) != blk)
270  {
271  if (BufferIsValid(*buf))
272  ReleaseBuffer(*buf);
273  *buf = ReadBuffer(idxRel, blk);
274  }
275  LockBuffer(*buf, mode);
276  page = BufferGetPage(*buf);
277 
278  /* If we land on a revmap page, start over */
279  if (BRIN_IS_REGULAR_PAGE(page))
280  {
281  /*
282  * If the offset number is greater than what's in the page, it's
283  * possible that the range was desummarized concurrently. Just
284  * return NULL to handle that case.
285  */
286  if (*off > PageGetMaxOffsetNumber(page))
287  {
289  return NULL;
290  }
291 
292  lp = PageGetItemId(page, *off);
293  if (ItemIdIsUsed(lp))
294  {
295  tup = (BrinTuple *) PageGetItem(page, lp);
296 
297  if (tup->bt_blkno == heapBlk)
298  {
299  if (size)
300  *size = ItemIdGetLength(lp);
301  /* found it! */
302  return tup;
303  }
304  }
305  }
306 
307  /*
308  * No luck. Assume that the revmap was updated concurrently.
309  */
311  }
312  /* not reached, but keep compiler quiet */
313  return NULL;
314 }
315 
316 /*
317  * Delete an index tuple, marking a page range as unsummarized.
318  *
319  * Index must be locked in ShareUpdateExclusiveLock mode.
320  *
321  * Return false if caller should retry.
322  */
323 bool
325 {
326  BrinRevmap *revmap;
327  BlockNumber pagesPerRange;
328  RevmapContents *contents;
329  ItemPointerData *iptr;
330  ItemPointerData invalidIptr;
331  BlockNumber revmapBlk;
332  Buffer revmapBuf;
333  Buffer regBuf;
334  Page revmapPg;
335  Page regPg;
336  OffsetNumber revmapOffset;
337  OffsetNumber regOffset;
338  ItemId lp;
339 
340  revmap = brinRevmapInitialize(idxrel, &pagesPerRange);
341 
342  revmapBlk = revmap_get_blkno(revmap, heapBlk);
343  if (!BlockNumberIsValid(revmapBlk))
344  {
345  /* revmap page doesn't exist: range not summarized, we're done */
346  brinRevmapTerminate(revmap);
347  return true;
348  }
349 
350  /* Lock the revmap page, obtain the index tuple pointer from it */
351  revmapBuf = brinLockRevmapPageForUpdate(revmap, heapBlk);
352  revmapPg = BufferGetPage(revmapBuf);
353  revmapOffset = HEAPBLK_TO_REVMAP_INDEX(revmap->rm_pagesPerRange, heapBlk);
354 
355  contents = (RevmapContents *) PageGetContents(revmapPg);
356  iptr = contents->rm_tids;
357  iptr += revmapOffset;
358 
359  if (!ItemPointerIsValid(iptr))
360  {
361  /* no index tuple: range not summarized, we're done */
362  LockBuffer(revmapBuf, BUFFER_LOCK_UNLOCK);
363  brinRevmapTerminate(revmap);
364  return true;
365  }
366 
367  regBuf = ReadBuffer(idxrel, ItemPointerGetBlockNumber(iptr));
369  regPg = BufferGetPage(regBuf);
370 
371  /* if this is no longer a regular page, tell caller to start over */
372  if (!BRIN_IS_REGULAR_PAGE(regPg))
373  {
374  LockBuffer(revmapBuf, BUFFER_LOCK_UNLOCK);
376  brinRevmapTerminate(revmap);
377  return false;
378  }
379 
380  regOffset = ItemPointerGetOffsetNumber(iptr);
381  if (regOffset > PageGetMaxOffsetNumber(regPg))
382  ereport(ERROR,
383  (errcode(ERRCODE_INDEX_CORRUPTED),
384  errmsg("corrupted BRIN index: inconsistent range map")));
385 
386  lp = PageGetItemId(regPg, regOffset);
387  if (!ItemIdIsUsed(lp))
388  ereport(ERROR,
389  (errcode(ERRCODE_INDEX_CORRUPTED),
390  errmsg("corrupted BRIN index: inconsistent range map")));
391 
392  /*
393  * Placeholder tuples only appear during unfinished summarization, and we
394  * hold ShareUpdateExclusiveLock, so this function cannot run concurrently
395  * with that. So any placeholder tuples that exist are leftovers from a
396  * crashed or aborted summarization; remove them silently.
397  */
398 
400 
401  ItemPointerSetInvalid(&invalidIptr);
402  brinSetHeapBlockItemptr(revmapBuf, revmap->rm_pagesPerRange, heapBlk,
403  invalidIptr);
404  PageIndexTupleDeleteNoCompact(regPg, regOffset);
405  /* XXX record free space in FSM? */
406 
407  MarkBufferDirty(regBuf);
408  MarkBufferDirty(revmapBuf);
409 
410  if (RelationNeedsWAL(idxrel))
411  {
412  xl_brin_desummarize xlrec;
413  XLogRecPtr recptr;
414 
415  xlrec.pagesPerRange = revmap->rm_pagesPerRange;
416  xlrec.heapBlk = heapBlk;
417  xlrec.regOffset = regOffset;
418 
419  XLogBeginInsert();
420  XLogRegisterData((char *) &xlrec, SizeOfBrinDesummarize);
421  XLogRegisterBuffer(0, revmapBuf, 0);
423  recptr = XLogInsert(RM_BRIN_ID, XLOG_BRIN_DESUMMARIZE);
424  PageSetLSN(revmapPg, recptr);
425  PageSetLSN(regPg, recptr);
426  }
427 
429 
430  UnlockReleaseBuffer(regBuf);
431  LockBuffer(revmapBuf, BUFFER_LOCK_UNLOCK);
432  brinRevmapTerminate(revmap);
433 
434  return true;
435 }
436 
437 /*
438  * Given a heap block number, find the corresponding physical revmap block
439  * number and return it. If the revmap page hasn't been allocated yet, return
440  * InvalidBlockNumber.
441  */
442 static BlockNumber
444 {
445  BlockNumber targetblk;
446 
447  /* obtain revmap block number, skip 1 for metapage block */
448  targetblk = HEAPBLK_TO_REVMAP_BLK(revmap->rm_pagesPerRange, heapBlk) + 1;
449 
450  /* Normal case: the revmap page is already allocated */
451  if (targetblk <= revmap->rm_lastRevmapPage)
452  return targetblk;
453 
454  return InvalidBlockNumber;
455 }
456 
457 /*
458  * Obtain and return a buffer containing the revmap page for the given heap
459  * page. The revmap must have been previously extended to cover that page.
460  * The returned buffer is also recorded in the revmap struct; finishing that
461  * releases the buffer, therefore the caller needn't do it explicitly.
462  */
463 static Buffer
465 {
466  BlockNumber mapBlk;
467 
468  /* Translate the heap block number to physical index location. */
469  mapBlk = revmap_get_blkno(revmap, heapBlk);
470 
471  if (mapBlk == InvalidBlockNumber)
472  elog(ERROR, "revmap does not cover heap block %u", heapBlk);
473 
474  /* Ensure the buffer we got is in the expected range */
475  Assert(mapBlk != BRIN_METAPAGE_BLKNO &&
476  mapBlk <= revmap->rm_lastRevmapPage);
477 
478  /*
479  * Obtain the buffer from which we need to read. If we already have the
480  * correct buffer in our access struct, use that; otherwise, release that,
481  * (if valid) and read the one we need.
482  */
483  if (revmap->rm_currBuf == InvalidBuffer ||
484  mapBlk != BufferGetBlockNumber(revmap->rm_currBuf))
485  {
486  if (revmap->rm_currBuf != InvalidBuffer)
487  ReleaseBuffer(revmap->rm_currBuf);
488 
489  revmap->rm_currBuf = ReadBuffer(revmap->rm_irel, mapBlk);
490  }
491 
492  return revmap->rm_currBuf;
493 }
494 
495 /*
496  * Given a heap block number, find the corresponding physical revmap block
497  * number and return it. If the revmap page hasn't been allocated yet, extend
498  * the revmap until it is.
499  */
500 static BlockNumber
502 {
503  BlockNumber targetblk;
504 
505  /* obtain revmap block number, skip 1 for metapage block */
506  targetblk = HEAPBLK_TO_REVMAP_BLK(revmap->rm_pagesPerRange, heapBlk) + 1;
507 
508  /* Extend the revmap, if necessary */
509  while (targetblk > revmap->rm_lastRevmapPage)
510  {
512  revmap_physical_extend(revmap);
513  }
514 
515  return targetblk;
516 }
517 
518 /*
519  * Try to extend the revmap by one page. This might not happen for a number of
520  * reasons; caller is expected to retry until the expected outcome is obtained.
521  */
522 static void
524 {
525  Buffer buf;
526  Page page;
527  Page metapage;
528  BrinMetaPageData *metadata;
529  BlockNumber mapBlk;
530  BlockNumber nblocks;
531  Relation irel = revmap->rm_irel;
532 
533  /*
534  * Lock the metapage. This locks out concurrent extensions of the revmap,
535  * but note that we still need to grab the relation extension lock because
536  * another backend can extend the index with regular BRIN pages.
537  */
539  metapage = BufferGetPage(revmap->rm_metaBuf);
540  metadata = (BrinMetaPageData *) PageGetContents(metapage);
541 
542  /*
543  * Check that our cached lastRevmapPage value was up-to-date; if it
544  * wasn't, update the cached copy and have caller start over.
545  */
546  if (metadata->lastRevmapPage != revmap->rm_lastRevmapPage)
547  {
548  revmap->rm_lastRevmapPage = metadata->lastRevmapPage;
550  return;
551  }
552  mapBlk = metadata->lastRevmapPage + 1;
553 
554  nblocks = RelationGetNumberOfBlocks(irel);
555  if (mapBlk < nblocks)
556  {
557  buf = ReadBuffer(irel, mapBlk);
559  page = BufferGetPage(buf);
560  }
561  else
562  {
563  buf = ExtendBufferedRel(BMR_REL(irel), MAIN_FORKNUM, NULL,
564  EB_LOCK_FIRST);
565  if (BufferGetBlockNumber(buf) != mapBlk)
566  {
567  /*
568  * Very rare corner case: somebody extended the relation
569  * concurrently after we read its length. If this happens, give
570  * up and have caller start over. We will have to evacuate that
571  * page from under whoever is using it.
572  */
575  return;
576  }
577  page = BufferGetPage(buf);
578  }
579 
580  /* Check that it's a regular block (or an empty page) */
581  if (!PageIsNew(page) && !BRIN_IS_REGULAR_PAGE(page))
582  ereport(ERROR,
583  (errcode(ERRCODE_INDEX_CORRUPTED),
584  errmsg("unexpected page type 0x%04X in BRIN index \"%s\" block %u",
585  BrinPageType(page),
588 
589  /* If the page is in use, evacuate it and restart */
590  if (brin_start_evacuating_page(irel, buf))
591  {
593  brin_evacuate_page(irel, revmap->rm_pagesPerRange, revmap, buf);
594 
595  /* have caller start over */
596  return;
597  }
598 
599  /*
600  * Ok, we have now locked the metapage and the target block. Re-initialize
601  * the target block as a revmap page, and update the metapage.
602  */
604 
605  /* the rm_tids array is initialized to all invalid by PageInit */
608 
609  metadata->lastRevmapPage = mapBlk;
610 
611  /*
612  * Set pd_lower just past the end of the metadata. This is essential,
613  * because without doing so, metadata will be lost if xlog.c compresses
614  * the page. (We must do this here because pre-v11 versions of PG did not
615  * set the metapage's pd_lower correctly, so a pg_upgraded index might
616  * contain the wrong value.)
617  */
618  ((PageHeader) metapage)->pd_lower =
619  ((char *) metadata + sizeof(BrinMetaPageData)) - (char *) metapage;
620 
621  MarkBufferDirty(revmap->rm_metaBuf);
622 
623  if (RelationNeedsWAL(revmap->rm_irel))
624  {
625  xl_brin_revmap_extend xlrec;
626  XLogRecPtr recptr;
627 
628  xlrec.targetBlk = mapBlk;
629 
630  XLogBeginInsert();
631  XLogRegisterData((char *) &xlrec, SizeOfBrinRevmapExtend);
633 
635 
636  recptr = XLogInsert(RM_BRIN_ID, XLOG_BRIN_REVMAP_EXTEND);
637  PageSetLSN(metapage, recptr);
638  PageSetLSN(page, recptr);
639  }
640 
642 
644 
646 }
uint32 BlockNumber
Definition: block.h:31
#define InvalidBlockNumber
Definition: block.h:33
static bool BlockNumberIsValid(BlockNumber blockNumber)
Definition: block.h:71
#define BRIN_PAGETYPE_REVMAP
Definition: brin_page.h:52
#define BRIN_METAPAGE_BLKNO
Definition: brin_page.h:75
#define BrinPageType(page)
Definition: brin_page.h:42
#define BRIN_IS_REGULAR_PAGE(page)
Definition: brin_page.h:57
void brin_evacuate_page(Relation idxRel, BlockNumber pagesPerRange, BrinRevmap *revmap, Buffer buf)
Definition: brin_pageops.c:565
bool brin_start_evacuating_page(Relation idxRel, Buffer buf)
Definition: brin_pageops.c:525
void brin_page_init(Page page, uint16 type)
Definition: brin_pageops.c:476
bool brinRevmapDesummarizeRange(Relation idxrel, BlockNumber heapBlk)
Definition: brin_revmap.c:324
static BlockNumber revmap_extend_and_get_blkno(BrinRevmap *revmap, BlockNumber heapBlk)
Definition: brin_revmap.c:501
void brinRevmapTerminate(BrinRevmap *revmap)
Definition: brin_revmap.c:101
static void revmap_physical_extend(BrinRevmap *revmap)
Definition: brin_revmap.c:523
void brinRevmapExtend(BrinRevmap *revmap, BlockNumber heapBlk)
Definition: brin_revmap.c:113
BrinTuple * brinGetTupleForHeapBlock(BrinRevmap *revmap, BlockNumber heapBlk, Buffer *buf, OffsetNumber *off, Size *size, int mode)
Definition: brin_revmap.c:195
static BlockNumber revmap_get_blkno(BrinRevmap *revmap, BlockNumber heapBlk)
Definition: brin_revmap.c:443
BrinRevmap * brinRevmapInitialize(Relation idxrel, BlockNumber *pagesPerRange)
Definition: brin_revmap.c:71
void brinSetHeapBlockItemptr(Buffer buf, BlockNumber pagesPerRange, BlockNumber heapBlk, ItemPointerData tid)
Definition: brin_revmap.c:156
#define HEAPBLK_TO_REVMAP_INDEX(pagesPerRange, heapBlk)
Definition: brin_revmap.c:43
#define HEAPBLK_TO_REVMAP_BLK(pagesPerRange, heapBlk)
Definition: brin_revmap.c:41
static Buffer revmap_get_buffer(BrinRevmap *revmap, BlockNumber heapBlk)
Definition: brin_revmap.c:464
Buffer brinLockRevmapPageForUpdate(BrinRevmap *revmap, BlockNumber heapBlk)
Definition: brin_revmap.c:135
#define XLOG_BRIN_REVMAP_EXTEND
Definition: brin_xlog.h:35
#define SizeOfBrinRevmapExtend
Definition: brin_xlog.h:124
#define XLOG_BRIN_DESUMMARIZE
Definition: brin_xlog.h:36
#define SizeOfBrinDesummarize
Definition: brin_xlog.h:142
int Buffer
Definition: buf.h:23
#define InvalidBuffer
Definition: buf.h:25
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:3290
Buffer ExtendBufferedRel(BufferManagerRelation bmr, ForkNumber forkNum, BufferAccessStrategy strategy, uint32 flags)
Definition: bufmgr.c:812
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:4480
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:4497
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:2111
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:4715
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:708
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:157
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:158
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:227
static Page BufferGetPage(Buffer buffer)
Definition: bufmgr.h:350
@ EB_LOCK_FIRST
Definition: bufmgr.h:85
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:159
#define BMR_REL(p_rel)
Definition: bufmgr.h:106
static bool BufferIsValid(Buffer bufnum)
Definition: bufmgr.h:301
void PageIndexTupleDeleteNoCompact(Page page, OffsetNumber offnum)
Definition: bufpage.c:1295
PageHeaderData * PageHeader
Definition: bufpage.h:170
static char * PageGetContents(Page page)
Definition: bufpage.h:254
Pointer Page
Definition: bufpage.h:78
static Item PageGetItem(Page page, ItemId itemId)
Definition: bufpage.h:351
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
Definition: bufpage.h:240
static bool PageIsNew(Page page)
Definition: bufpage.h:230
static void PageSetLSN(Page page, XLogRecPtr lsn)
Definition: bufpage.h:388
static OffsetNumber PageGetMaxOffsetNumber(Page page)
Definition: bufpage.h:369
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:171
size_t Size
Definition: c.h:594
int errmsg_internal(const char *fmt,...)
Definition: elog.c:1156
int errcode(int sqlerrcode)
Definition: elog.c:858
int errmsg(const char *fmt,...)
Definition: elog.c:1069
#define ERROR
Definition: elog.h:39
#define ereport(elevel,...)
Definition: elog.h:149
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
#define ItemIdIsUsed(itemId)
Definition: itemid.h:92
bool ItemPointerEquals(ItemPointer pointer1, ItemPointer pointer2)
Definition: itemptr.c:35
static void ItemPointerSet(ItemPointerData *pointer, BlockNumber blockNumber, OffsetNumber offNum)
Definition: itemptr.h:135
static void ItemPointerSetInvalid(ItemPointerData *pointer)
Definition: itemptr.h:184
static OffsetNumber ItemPointerGetOffsetNumber(const ItemPointerData *pointer)
Definition: itemptr.h:124
static BlockNumber ItemPointerGetBlockNumber(const ItemPointerData *pointer)
Definition: itemptr.h:103
static bool ItemPointerIsValid(const ItemPointerData *pointer)
Definition: itemptr.h:83
Assert(fmt[strlen(fmt) - 1] !='\n')
void pfree(void *pointer)
Definition: mcxt.c:1456
void * palloc(Size size)
Definition: mcxt.c:1226
#define START_CRIT_SECTION()
Definition: miscadmin.h:148
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:121
#define END_CRIT_SECTION()
Definition: miscadmin.h:150
#define InvalidOffsetNumber
Definition: off.h:26
uint16 OffsetNumber
Definition: off.h:24
static PgChecksumMode mode
Definition: pg_checksums.c:56
static char * buf
Definition: pg_test_fsync.c:67
#define RelationGetRelationName(relation)
Definition: rel.h:538
#define RelationNeedsWAL(relation)
Definition: rel.h:629
@ MAIN_FORKNUM
Definition: relpath.h:50
BlockNumber lastRevmapPage
Definition: brin_page.h:69
BlockNumber pagesPerRange
Definition: brin_page.h:68
BlockNumber rm_pagesPerRange
Definition: brin_revmap.c:50
BlockNumber rm_lastRevmapPage
Definition: brin_revmap.c:51
Buffer rm_metaBuf
Definition: brin_revmap.c:52
Buffer rm_currBuf
Definition: brin_revmap.c:53
Relation rm_irel
Definition: brin_revmap.c:49
BlockNumber bt_blkno
Definition: brin_tuple.h:66
ItemPointerData rm_tids[1]
Definition: brin_page.h:85
BlockNumber pagesPerRange
Definition: brin_xlog.h:135
BlockNumber heapBlk
Definition: brin_xlog.h:137
OffsetNumber regOffset
Definition: brin_xlog.h:139
BlockNumber targetBlk
Definition: brin_xlog.h:121
uint64 XLogRecPtr
Definition: xlogdefs.h:21
void XLogRegisterData(char *data, uint32 len)
Definition: xloginsert.c:351
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:461
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:243
void XLogBeginInsert(void)
Definition: xloginsert.c:150
#define REGBUF_STANDARD
Definition: xloginsert.h:34
#define REGBUF_WILL_INIT
Definition: xloginsert.h:33