PostgreSQL Source Code  git master
brin_revmap.c
Go to the documentation of this file.
1 /*
2  * brin_revmap.c
3  * Range map for BRIN indexes
4  *
5  * The range map (revmap) is a translation structure for BRIN indexes: for each
6  * page range there is one summary tuple, and its location is tracked by the
7  * revmap. Whenever a new tuple is inserted into a table that violates the
8  * previously recorded summary values, a new tuple is inserted into the index
9  * and the revmap is updated to point to it.
10  *
11  * The revmap is stored in the first pages of the index, immediately following
12  * the metapage. When the revmap needs to be expanded, all tuples on the
13  * regular BRIN page at that block (if any) are moved out of the way.
14  *
15  * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
16  * Portions Copyright (c) 1994, Regents of the University of California
17  *
18  * IDENTIFICATION
19  * src/backend/access/brin/brin_revmap.c
20  */
21 #include "postgres.h"
22 
23 #include "access/brin_page.h"
24 #include "access/brin_pageops.h"
25 #include "access/brin_revmap.h"
26 #include "access/brin_tuple.h"
27 #include "access/brin_xlog.h"
28 #include "access/rmgr.h"
29 #include "access/xloginsert.h"
30 #include "miscadmin.h"
31 #include "storage/bufmgr.h"
32 #include "storage/lmgr.h"
33 #include "utils/rel.h"
34 
35 
36 /*
37  * In revmap pages, each item stores an ItemPointerData. These defines let one
38  * find the logical revmap page number and index number of the revmap item for
39  * the given heap block number.
40  */
41 #define HEAPBLK_TO_REVMAP_BLK(pagesPerRange, heapBlk) \
42  ((heapBlk / pagesPerRange) / REVMAP_PAGE_MAXITEMS)
43 #define HEAPBLK_TO_REVMAP_INDEX(pagesPerRange, heapBlk) \
44  ((heapBlk / pagesPerRange) % REVMAP_PAGE_MAXITEMS)
45 
46 
47 struct BrinRevmap
48 {
51  BlockNumber rm_lastRevmapPage; /* cached from the metapage */
54 };
55 
56 /* typedef appears in brin_revmap.h */
57 
58 
60  BlockNumber heapBlk);
61 static Buffer revmap_get_buffer(BrinRevmap *revmap, BlockNumber heapBlk);
63  BlockNumber heapBlk);
64 static void revmap_physical_extend(BrinRevmap *revmap);
65 
66 /*
67  * Initialize an access object for a range map. This must be freed by
68  * brinRevmapTerminate when caller is done with it.
69  */
70 BrinRevmap *
71 brinRevmapInitialize(Relation idxrel, BlockNumber *pagesPerRange,
72  Snapshot snapshot)
73 {
74  BrinRevmap *revmap;
75  Buffer meta;
76  BrinMetaPageData *metadata;
77  Page page;
78 
79  meta = ReadBuffer(idxrel, BRIN_METAPAGE_BLKNO);
81  page = BufferGetPage(meta);
82  TestForOldSnapshot(snapshot, idxrel, page);
83  metadata = (BrinMetaPageData *) PageGetContents(page);
84 
85  revmap = palloc(sizeof(BrinRevmap));
86  revmap->rm_irel = idxrel;
87  revmap->rm_pagesPerRange = metadata->pagesPerRange;
88  revmap->rm_lastRevmapPage = metadata->lastRevmapPage;
89  revmap->rm_metaBuf = meta;
90  revmap->rm_currBuf = InvalidBuffer;
91 
92  *pagesPerRange = metadata->pagesPerRange;
93 
95 
96  return revmap;
97 }
98 
99 /*
100  * Release resources associated with a revmap access object.
101  */
102 void
104 {
105  ReleaseBuffer(revmap->rm_metaBuf);
106  if (revmap->rm_currBuf != InvalidBuffer)
107  ReleaseBuffer(revmap->rm_currBuf);
108  pfree(revmap);
109 }
110 
111 /*
112  * Extend the revmap to cover the given heap block number.
113  */
114 void
116 {
118 
119  mapBlk = revmap_extend_and_get_blkno(revmap, heapBlk);
120 
121  /* Ensure the buffer we got is in the expected range */
122  Assert(mapBlk != InvalidBlockNumber &&
123  mapBlk != BRIN_METAPAGE_BLKNO &&
124  mapBlk <= revmap->rm_lastRevmapPage);
125 }
126 
127 /*
128  * Prepare to insert an entry into the revmap; the revmap buffer in which the
129  * entry is to reside is locked and returned. Most callers should call
130  * brinRevmapExtend beforehand, as this routine does not extend the revmap if
131  * it's not long enough.
132  *
133  * The returned buffer is also recorded in the revmap struct; finishing that
134  * releases the buffer, therefore the caller needn't do it explicitly.
135  */
136 Buffer
138 {
139  Buffer rmBuf;
140 
141  rmBuf = revmap_get_buffer(revmap, heapBlk);
143 
144  return rmBuf;
145 }
146 
147 /*
148  * In the given revmap buffer (locked appropriately by caller), which is used
149  * in a BRIN index of pagesPerRange pages per range, set the element
150  * corresponding to heap block number heapBlk to the given TID.
151  *
152  * Once the operation is complete, the caller must update the LSN on the
153  * returned buffer.
154  *
155  * This is used both in regular operation and during WAL replay.
156  */
157 void
159  BlockNumber heapBlk, ItemPointerData tid)
160 {
161  RevmapContents *contents;
162  ItemPointerData *iptr;
163  Page page;
164 
165  /* The correct page should already be pinned and locked */
166  page = BufferGetPage(buf);
167  contents = (RevmapContents *) PageGetContents(page);
168  iptr = (ItemPointerData *) contents->rm_tids;
169  iptr += HEAPBLK_TO_REVMAP_INDEX(pagesPerRange, heapBlk);
170 
171  if (ItemPointerIsValid(&tid))
172  ItemPointerSet(iptr,
175  else
176  ItemPointerSetInvalid(iptr);
177 }
178 
179 /*
180  * Fetch the BrinTuple for a given heap block.
181  *
182  * The buffer containing the tuple is locked, and returned in *buf. The
183  * returned tuple points to the shared buffer and must not be freed; if caller
184  * wants to use it after releasing the buffer lock, it must create its own
185  * palloc'ed copy. As an optimization, the caller can pass a pinned buffer
186  * *buf on entry, which will avoid a pin-unpin cycle when the next tuple is on
187  * the same page as a previous one.
188  *
189  * If no tuple is found for the given heap range, returns NULL. In that case,
190  * *buf might still be updated (and pin must be released by caller), but it's
191  * not locked.
192  *
193  * The output tuple offset within the buffer is returned in *off, and its size
194  * is returned in *size.
195  */
196 BrinTuple *
198  Buffer *buf, OffsetNumber *off, Size *size, int mode,
199  Snapshot snapshot)
200 {
201  Relation idxRel = revmap->rm_irel;
202  BlockNumber mapBlk;
203  RevmapContents *contents;
204  ItemPointerData *iptr;
205  BlockNumber blk;
206  Page page;
207  ItemId lp;
208  BrinTuple *tup;
209  ItemPointerData previptr;
210 
211  /* normalize the heap block number to be the first page in the range */
212  heapBlk = (heapBlk / revmap->rm_pagesPerRange) * revmap->rm_pagesPerRange;
213 
214  /*
215  * Compute the revmap page number we need. If Invalid is returned (i.e.,
216  * the revmap page hasn't been created yet), the requested page range is
217  * not summarized.
218  */
219  mapBlk = revmap_get_blkno(revmap, heapBlk);
220  if (mapBlk == InvalidBlockNumber)
221  {
222  *off = InvalidOffsetNumber;
223  return NULL;
224  }
225 
226  ItemPointerSetInvalid(&previptr);
227  for (;;)
228  {
230 
231  if (revmap->rm_currBuf == InvalidBuffer ||
232  BufferGetBlockNumber(revmap->rm_currBuf) != mapBlk)
233  {
234  if (revmap->rm_currBuf != InvalidBuffer)
235  ReleaseBuffer(revmap->rm_currBuf);
236 
237  Assert(mapBlk != InvalidBlockNumber);
238  revmap->rm_currBuf = ReadBuffer(revmap->rm_irel, mapBlk);
239  }
240 
242 
243  contents = (RevmapContents *)
245  iptr = contents->rm_tids;
246  iptr += HEAPBLK_TO_REVMAP_INDEX(revmap->rm_pagesPerRange, heapBlk);
247 
248  if (!ItemPointerIsValid(iptr))
249  {
251  return NULL;
252  }
253 
254  /*
255  * Check the TID we got in a previous iteration, if any, and save the
256  * current TID we got from the revmap; if we loop, we can sanity-check
257  * that the next one we get is different. Otherwise we might be stuck
258  * looping forever if the revmap is somehow badly broken.
259  */
260  if (ItemPointerIsValid(&previptr) && ItemPointerEquals(&previptr, iptr))
261  ereport(ERROR,
262  (errcode(ERRCODE_INDEX_CORRUPTED),
263  errmsg_internal("corrupted BRIN index: inconsistent range map")));
264  previptr = *iptr;
265 
266  blk = ItemPointerGetBlockNumber(iptr);
267  *off = ItemPointerGetOffsetNumber(iptr);
268 
270 
271  /* Ok, got a pointer to where the BrinTuple should be. Fetch it. */
272  if (!BufferIsValid(*buf) || BufferGetBlockNumber(*buf) != blk)
273  {
274  if (BufferIsValid(*buf))
275  ReleaseBuffer(*buf);
276  *buf = ReadBuffer(idxRel, blk);
277  }
278  LockBuffer(*buf, mode);
279  page = BufferGetPage(*buf);
280  TestForOldSnapshot(snapshot, idxRel, page);
281 
282  /* If we land on a revmap page, start over */
283  if (BRIN_IS_REGULAR_PAGE(page))
284  {
285  if (*off > PageGetMaxOffsetNumber(page))
286  ereport(ERROR,
287  (errcode(ERRCODE_INDEX_CORRUPTED),
288  errmsg_internal("corrupted BRIN index: inconsistent range map")));
289  lp = PageGetItemId(page, *off);
290  if (ItemIdIsUsed(lp))
291  {
292  tup = (BrinTuple *) PageGetItem(page, lp);
293 
294  if (tup->bt_blkno == heapBlk)
295  {
296  if (size)
297  *size = ItemIdGetLength(lp);
298  /* found it! */
299  return tup;
300  }
301  }
302  }
303 
304  /*
305  * No luck. Assume that the revmap was updated concurrently.
306  */
308  }
309  /* not reached, but keep compiler quiet */
310  return NULL;
311 }
312 
313 /*
314  * Delete an index tuple, marking a page range as unsummarized.
315  *
316  * Index must be locked in ShareUpdateExclusiveLock mode.
317  *
318  * Return false if caller should retry.
319  */
320 bool
322 {
323  BrinRevmap *revmap;
324  BlockNumber pagesPerRange;
325  RevmapContents *contents;
326  ItemPointerData *iptr;
327  ItemPointerData invalidIptr;
328  BlockNumber revmapBlk;
329  Buffer revmapBuf;
330  Buffer regBuf;
331  Page revmapPg;
332  Page regPg;
333  OffsetNumber revmapOffset;
334  OffsetNumber regOffset;
335  ItemId lp;
336  BrinTuple *tup;
337 
338  revmap = brinRevmapInitialize(idxrel, &pagesPerRange, NULL);
339 
340  revmapBlk = revmap_get_blkno(revmap, heapBlk);
341  if (!BlockNumberIsValid(revmapBlk))
342  {
343  /* revmap page doesn't exist: range not summarized, we're done */
344  brinRevmapTerminate(revmap);
345  return true;
346  }
347 
348  /* Lock the revmap page, obtain the index tuple pointer from it */
349  revmapBuf = brinLockRevmapPageForUpdate(revmap, heapBlk);
350  revmapPg = BufferGetPage(revmapBuf);
351  revmapOffset = HEAPBLK_TO_REVMAP_INDEX(revmap->rm_pagesPerRange, heapBlk);
352 
353  contents = (RevmapContents *) PageGetContents(revmapPg);
354  iptr = contents->rm_tids;
355  iptr += revmapOffset;
356 
357  if (!ItemPointerIsValid(iptr))
358  {
359  /* no index tuple: range not summarized, we're done */
360  LockBuffer(revmapBuf, BUFFER_LOCK_UNLOCK);
361  brinRevmapTerminate(revmap);
362  return true;
363  }
364 
365  regBuf = ReadBuffer(idxrel, ItemPointerGetBlockNumber(iptr));
367  regPg = BufferGetPage(regBuf);
368 
369  /* if this is no longer a regular page, tell caller to start over */
370  if (!BRIN_IS_REGULAR_PAGE(regPg))
371  {
372  LockBuffer(revmapBuf, BUFFER_LOCK_UNLOCK);
374  brinRevmapTerminate(revmap);
375  return false;
376  }
377 
378  regOffset = ItemPointerGetOffsetNumber(iptr);
379  if (regOffset > PageGetMaxOffsetNumber(regPg))
380  ereport(ERROR,
381  (errcode(ERRCODE_INDEX_CORRUPTED),
382  errmsg("corrupted BRIN index: inconsistent range map")));
383 
384  lp = PageGetItemId(regPg, regOffset);
385  if (!ItemIdIsUsed(lp))
386  ereport(ERROR,
387  (errcode(ERRCODE_INDEX_CORRUPTED),
388  errmsg("corrupted BRIN index: inconsistent range map")));
389  tup = (BrinTuple *) PageGetItem(regPg, lp);
390  /* XXX apply sanity checks? Might as well delete a bogus tuple ... */
391 
392  /*
393  * We're only removing data, not reading it, so there's no need to
394  * TestForOldSnapshot here.
395  */
396 
397  /*
398  * Because of ShareUpdateExclusive lock, this function shouldn't run
399  * concurrently with summarization. Placeholder tuples can only exist as
400  * leftovers from crashed summarization, so if we detect any, we complain
401  * but proceed.
402  */
403  if (BrinTupleIsPlaceholder(tup))
405  (errmsg("leftover placeholder tuple detected in BRIN index \"%s\", deleting",
406  RelationGetRelationName(idxrel))));
407 
409 
410  ItemPointerSetInvalid(&invalidIptr);
411  brinSetHeapBlockItemptr(revmapBuf, revmap->rm_pagesPerRange, heapBlk,
412  invalidIptr);
413  PageIndexTupleDeleteNoCompact(regPg, regOffset);
414  /* XXX record free space in FSM? */
415 
416  MarkBufferDirty(regBuf);
417  MarkBufferDirty(revmapBuf);
418 
419  if (RelationNeedsWAL(idxrel))
420  {
421  xl_brin_desummarize xlrec;
422  XLogRecPtr recptr;
423 
424  xlrec.pagesPerRange = revmap->rm_pagesPerRange;
425  xlrec.heapBlk = heapBlk;
426  xlrec.regOffset = regOffset;
427 
428  XLogBeginInsert();
429  XLogRegisterData((char *) &xlrec, SizeOfBrinDesummarize);
430  XLogRegisterBuffer(0, revmapBuf, 0);
432  recptr = XLogInsert(RM_BRIN_ID, XLOG_BRIN_DESUMMARIZE);
433  PageSetLSN(revmapPg, recptr);
434  PageSetLSN(regPg, recptr);
435  }
436 
438 
439  UnlockReleaseBuffer(regBuf);
440  LockBuffer(revmapBuf, BUFFER_LOCK_UNLOCK);
441  brinRevmapTerminate(revmap);
442 
443  return true;
444 }
445 
446 /*
447  * Given a heap block number, find the corresponding physical revmap block
448  * number and return it. If the revmap page hasn't been allocated yet, return
449  * InvalidBlockNumber.
450  */
451 static BlockNumber
453 {
454  BlockNumber targetblk;
455 
456  /* obtain revmap block number, skip 1 for metapage block */
457  targetblk = HEAPBLK_TO_REVMAP_BLK(revmap->rm_pagesPerRange, heapBlk) + 1;
458 
459  /* Normal case: the revmap page is already allocated */
460  if (targetblk <= revmap->rm_lastRevmapPage)
461  return targetblk;
462 
463  return InvalidBlockNumber;
464 }
465 
466 /*
467  * Obtain and return a buffer containing the revmap page for the given heap
468  * page. The revmap must have been previously extended to cover that page.
469  * The returned buffer is also recorded in the revmap struct; finishing that
470  * releases the buffer, therefore the caller needn't do it explicitly.
471  */
472 static Buffer
474 {
475  BlockNumber mapBlk;
476 
477  /* Translate the heap block number to physical index location. */
478  mapBlk = revmap_get_blkno(revmap, heapBlk);
479 
480  if (mapBlk == InvalidBlockNumber)
481  elog(ERROR, "revmap does not cover heap block %u", heapBlk);
482 
483  /* Ensure the buffer we got is in the expected range */
484  Assert(mapBlk != BRIN_METAPAGE_BLKNO &&
485  mapBlk <= revmap->rm_lastRevmapPage);
486 
487  /*
488  * Obtain the buffer from which we need to read. If we already have the
489  * correct buffer in our access struct, use that; otherwise, release that,
490  * (if valid) and read the one we need.
491  */
492  if (revmap->rm_currBuf == InvalidBuffer ||
493  mapBlk != BufferGetBlockNumber(revmap->rm_currBuf))
494  {
495  if (revmap->rm_currBuf != InvalidBuffer)
496  ReleaseBuffer(revmap->rm_currBuf);
497 
498  revmap->rm_currBuf = ReadBuffer(revmap->rm_irel, mapBlk);
499  }
500 
501  return revmap->rm_currBuf;
502 }
503 
504 /*
505  * Given a heap block number, find the corresponding physical revmap block
506  * number and return it. If the revmap page hasn't been allocated yet, extend
507  * the revmap until it is.
508  */
509 static BlockNumber
511 {
512  BlockNumber targetblk;
513 
514  /* obtain revmap block number, skip 1 for metapage block */
515  targetblk = HEAPBLK_TO_REVMAP_BLK(revmap->rm_pagesPerRange, heapBlk) + 1;
516 
517  /* Extend the revmap, if necessary */
518  while (targetblk > revmap->rm_lastRevmapPage)
519  {
521  revmap_physical_extend(revmap);
522  }
523 
524  return targetblk;
525 }
526 
527 /*
528  * Try to extend the revmap by one page. This might not happen for a number of
529  * reasons; caller is expected to retry until the expected outcome is obtained.
530  */
531 static void
533 {
534  Buffer buf;
535  Page page;
536  Page metapage;
537  BrinMetaPageData *metadata;
538  BlockNumber mapBlk;
539  BlockNumber nblocks;
540  Relation irel = revmap->rm_irel;
541  bool needLock = !RELATION_IS_LOCAL(irel);
542 
543  /*
544  * Lock the metapage. This locks out concurrent extensions of the revmap,
545  * but note that we still need to grab the relation extension lock because
546  * another backend can extend the index with regular BRIN pages.
547  */
549  metapage = BufferGetPage(revmap->rm_metaBuf);
550  metadata = (BrinMetaPageData *) PageGetContents(metapage);
551 
552  /*
553  * Check that our cached lastRevmapPage value was up-to-date; if it
554  * wasn't, update the cached copy and have caller start over.
555  */
556  if (metadata->lastRevmapPage != revmap->rm_lastRevmapPage)
557  {
558  revmap->rm_lastRevmapPage = metadata->lastRevmapPage;
560  return;
561  }
562  mapBlk = metadata->lastRevmapPage + 1;
563 
564  nblocks = RelationGetNumberOfBlocks(irel);
565  if (mapBlk < nblocks)
566  {
567  buf = ReadBuffer(irel, mapBlk);
569  page = BufferGetPage(buf);
570  }
571  else
572  {
573  if (needLock)
575 
576  buf = ReadBuffer(irel, P_NEW);
577  if (BufferGetBlockNumber(buf) != mapBlk)
578  {
579  /*
580  * Very rare corner case: somebody extended the relation
581  * concurrently after we read its length. If this happens, give
582  * up and have caller start over. We will have to evacuate that
583  * page from under whoever is using it.
584  */
585  if (needLock)
588  ReleaseBuffer(buf);
589  return;
590  }
592  page = BufferGetPage(buf);
593 
594  if (needLock)
596  }
597 
598  /* Check that it's a regular block (or an empty page) */
599  if (!PageIsNew(page) && !BRIN_IS_REGULAR_PAGE(page))
600  ereport(ERROR,
601  (errcode(ERRCODE_INDEX_CORRUPTED),
602  errmsg("unexpected page type 0x%04X in BRIN index \"%s\" block %u",
603  BrinPageType(page),
605  BufferGetBlockNumber(buf))));
606 
607  /* If the page is in use, evacuate it and restart */
608  if (brin_start_evacuating_page(irel, buf))
609  {
611  brin_evacuate_page(irel, revmap->rm_pagesPerRange, revmap, buf);
612 
613  /* have caller start over */
614  return;
615  }
616 
617  /*
618  * Ok, we have now locked the metapage and the target block. Re-initialize
619  * the target block as a revmap page, and update the metapage.
620  */
622 
623  /* the rm_tids array is initialized to all invalid by PageInit */
625  MarkBufferDirty(buf);
626 
627  metadata->lastRevmapPage = mapBlk;
628 
629  /*
630  * Set pd_lower just past the end of the metadata. This is essential,
631  * because without doing so, metadata will be lost if xlog.c compresses
632  * the page. (We must do this here because pre-v11 versions of PG did not
633  * set the metapage's pd_lower correctly, so a pg_upgraded index might
634  * contain the wrong value.)
635  */
636  ((PageHeader) metapage)->pd_lower =
637  ((char *) metadata + sizeof(BrinMetaPageData)) - (char *) metapage;
638 
639  MarkBufferDirty(revmap->rm_metaBuf);
640 
641  if (RelationNeedsWAL(revmap->rm_irel))
642  {
643  xl_brin_revmap_extend xlrec;
644  XLogRecPtr recptr;
645 
646  xlrec.targetBlk = mapBlk;
647 
648  XLogBeginInsert();
649  XLogRegisterData((char *) &xlrec, SizeOfBrinRevmapExtend);
651 
653 
654  recptr = XLogInsert(RM_BRIN_ID, XLOG_BRIN_REVMAP_EXTEND);
655  PageSetLSN(metapage, recptr);
656  PageSetLSN(page, recptr);
657  }
658 
660 
662 
663  UnlockReleaseBuffer(buf);
664 }
BlockNumber rm_lastRevmapPage
Definition: brin_revmap.c:51
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:82
static PgChecksumMode mode
Definition: pg_checksums.c:61
#define BrinTupleIsPlaceholder(tup)
Definition: brin_tuple.h:83
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:86
Relation rm_irel
Definition: brin_revmap.c:49
#define HEAPBLK_TO_REVMAP_INDEX(pagesPerRange, heapBlk)
Definition: brin_revmap.c:43
OffsetNumber regOffset
Definition: brin_xlog.h:139
static void TestForOldSnapshot(Snapshot snapshot, Relation relation, Page page)
Definition: bufmgr.h:264
static BlockNumber revmap_extend_and_get_blkno(BrinRevmap *revmap, BlockNumber heapBlk)
Definition: brin_revmap.c:510
#define BRIN_METAPAGE_BLKNO
Definition: brin_page.h:75
BlockNumber pagesPerRange
Definition: brin_xlog.h:135
void brinRevmapTerminate(BrinRevmap *revmap)
Definition: brin_revmap.c:103
BrinTuple * brinGetTupleForHeapBlock(BrinRevmap *revmap, BlockNumber heapBlk, Buffer *buf, OffsetNumber *off, Size *size, int mode, Snapshot snapshot)
Definition: brin_revmap.c:197
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1458
#define ExclusiveLock
Definition: lockdefs.h:44
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
#define RELATION_IS_LOCAL(relation)
Definition: rel.h:542
#define END_CRIT_SECTION()
Definition: miscadmin.h:134
#define ItemIdIsUsed(itemId)
Definition: itemid.h:92
#define InvalidBuffer
Definition: buf.h:25
Buffer brinLockRevmapPageForUpdate(BrinRevmap *revmap, BlockNumber heapBlk)
Definition: brin_revmap.c:137
#define REGBUF_WILL_INIT
Definition: xloginsert.h:33
#define START_CRIT_SECTION()
Definition: miscadmin.h:132
int errcode(int sqlerrcode)
Definition: elog.c:608
#define BRIN_IS_REGULAR_PAGE(page)
Definition: brin_page.h:57
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3365
#define P_NEW
Definition: bufmgr.h:81
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:88
#define SizeOfBrinRevmapExtend
Definition: brin_xlog.h:124
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
uint16 OffsetNumber
Definition: off.h:24
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
void pfree(void *pointer)
Definition: mcxt.c:1056
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3388
#define ERROR
Definition: elog.h:43
#define XLOG_BRIN_DESUMMARIZE
Definition: brin_xlog.h:36
void brinRevmapExtend(BrinRevmap *revmap, BlockNumber heapBlk)
Definition: brin_revmap.c:115
static char * buf
Definition: pg_test_fsync.c:67
static void revmap_physical_extend(BrinRevmap *revmap)
Definition: brin_revmap.c:532
#define REGBUF_STANDARD
Definition: xloginsert.h:35
#define RelationGetRelationName(relation)
Definition: rel.h:456
#define BufferGetPage(buffer)
Definition: bufmgr.h:159
BlockNumber targetBlk
Definition: brin_xlog.h:121
#define ereport(elevel, rest)
Definition: elog.h:141
#define XLOG_BRIN_REVMAP_EXTEND
Definition: brin_xlog.h:35
#define WARNING
Definition: elog.h:40
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
void LockRelationForExtension(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:402
BlockNumber lastRevmapPage
Definition: brin_page.h:69
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
void UnlockRelationForExtension(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:452
#define PageGetContents(page)
Definition: bufpage.h:246
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3602
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:198
#define InvalidOffsetNumber
Definition: off.h:26
BlockNumber pagesPerRange
Definition: brin_page.h:68
#define BlockNumberIsValid(blockNumber)
Definition: block.h:70
BlockNumber bt_blkno
Definition: brin_tuple.h:57
void PageIndexTupleDeleteNoCompact(Page page, OffsetNumber offnum)
Definition: bufpage.c:957
int errmsg_internal(const char *fmt,...)
Definition: elog.c:909
PageHeaderData * PageHeader
Definition: bufpage.h:166
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:739
static Buffer revmap_get_buffer(BrinRevmap *revmap, BlockNumber heapBlk)
Definition: brin_revmap.c:473
Buffer rm_currBuf
Definition: brin_revmap.c:53
bool brinRevmapDesummarizeRange(Relation idxrel, BlockNumber heapBlk)
Definition: brin_revmap.c:321
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:596
#define BRIN_PAGETYPE_REVMAP
Definition: brin_page.h:52
size_t Size
Definition: c.h:467
void brin_evacuate_page(Relation idxRel, BlockNumber pagesPerRange, BrinRevmap *revmap, Buffer buf)
Definition: brin_pageops.c:560
#define InvalidBlockNumber
Definition: block.h:33
BrinRevmap * brinRevmapInitialize(Relation idxrel, BlockNumber *pagesPerRange, Snapshot snapshot)
Definition: brin_revmap.c:71
#define BufferIsValid(bufnum)
Definition: bufmgr.h:113
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
BlockNumber heapBlk
Definition: brin_xlog.h:137
#define SizeOfBrinDesummarize
Definition: brin_xlog.h:142
#define RelationNeedsWAL(relation)
Definition: rel.h:524
bool ItemPointerEquals(ItemPointer pointer1, ItemPointer pointer2)
Definition: itemptr.c:29
Buffer rm_metaBuf
Definition: brin_revmap.c:52
#define BrinPageType(page)
Definition: brin_page.h:42
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2613
#define ItemPointerSetInvalid(pointer)
Definition: itemptr.h:172
#define PageIsNew(page)
Definition: bufpage.h:229
void * palloc(Size size)
Definition: mcxt.c:949
BlockNumber rm_pagesPerRange
Definition: brin_revmap.c:50
int errmsg(const char *fmt,...)
Definition: elog.c:822
#define elog(elevel,...)
Definition: elog.h:228
#define HEAPBLK_TO_REVMAP_BLK(pagesPerRange, heapBlk)
Definition: brin_revmap.c:41
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:87
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:99
bool brin_start_evacuating_page(Relation idxRel, Buffer buf)
Definition: brin_pageops.c:525
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
static BlockNumber revmap_get_blkno(BrinRevmap *revmap, BlockNumber heapBlk)
Definition: brin_revmap.c:452
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:123
void brinSetHeapBlockItemptr(Buffer buf, BlockNumber pagesPerRange, BlockNumber heapBlk, ItemPointerData tid)
Definition: brin_revmap.c:158
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define PageSetLSN(page, lsn)
Definition: bufpage.h:368
int Buffer
Definition: buf.h:23
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78
#define ItemPointerSet(pointer, blockNumber, offNum)
Definition: itemptr.h:127
ItemPointerData rm_tids[1]
Definition: brin_page.h:85
void brin_page_init(Page page, uint16 type)
Definition: brin_pageops.c:476