PostgreSQL Source Code  git master
visibilitymap.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * visibilitymap.c
4  * bitmap for tracking visibility of heap tuples
5  *
6  * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/access/heap/visibilitymap.c
12  *
13  * INTERFACE ROUTINES
14  * visibilitymap_clear - clear bits for one page in the visibility map
15  * visibilitymap_pin - pin a map page for setting a bit
16  * visibilitymap_pin_ok - check whether correct map page is already pinned
17  * visibilitymap_set - set a bit in a previously pinned page
18  * visibilitymap_get_status - get status of bits
19  * visibilitymap_count - count number of bits set in visibility map
20  * visibilitymap_prepare_truncate -
21  * prepare for truncation of the visibility map
22  *
23  * NOTES
24  *
25  * The visibility map is a bitmap with two bits (all-visible and all-frozen)
26  * per heap page. A set all-visible bit means that all tuples on the page are
27  * known visible to all transactions, and therefore the page doesn't need to
28  * be vacuumed. A set all-frozen bit means that all tuples on the page are
29  * completely frozen, and therefore the page doesn't need to be vacuumed even
30  * if whole table scanning vacuum is required (e.g. anti-wraparound vacuum).
31  * The all-frozen bit must be set only when the page is already all-visible.
32  *
33  * The map is conservative in the sense that we make sure that whenever a bit
34  * is set, we know the condition is true, but if a bit is not set, it might or
35  * might not be true.
36  *
37  * Clearing visibility map bits is not separately WAL-logged. The callers
38  * must make sure that whenever a bit is cleared, the bit is cleared on WAL
39  * replay of the updating operation as well.
40  *
41  * When we *set* a visibility map during VACUUM, we must write WAL. This may
42  * seem counterintuitive, since the bit is basically a hint: if it is clear,
43  * it may still be the case that every tuple on the page is visible to all
44  * transactions; we just don't know that for certain. The difficulty is that
45  * there are two bits which are typically set together: the PD_ALL_VISIBLE bit
46  * on the page itself, and the visibility map bit. If a crash occurs after the
47  * visibility map page makes it to disk and before the updated heap page makes
48  * it to disk, redo must set the bit on the heap page. Otherwise, the next
49  * insert, update, or delete on the heap page will fail to realize that the
50  * visibility map bit must be cleared, possibly causing index-only scans to
51  * return wrong answers.
52  *
53  * VACUUM will normally skip pages for which the visibility map bit is set;
54  * such pages can't contain any dead tuples and therefore don't need vacuuming.
55  *
56  * LOCKING
57  *
58  * In heapam.c, whenever a page is modified so that not all tuples on the
59  * page are visible to everyone anymore, the corresponding bit in the
60  * visibility map is cleared. In order to be crash-safe, we need to do this
61  * while still holding a lock on the heap page and in the same critical
62  * section that logs the page modification. However, we don't want to hold
63  * the buffer lock over any I/O that may be required to read in the visibility
64  * map page. To avoid this, we examine the heap page before locking it;
65  * if the page-level PD_ALL_VISIBLE bit is set, we pin the visibility map
66  * bit. Then, we lock the buffer. But this creates a race condition: there
67  * is a possibility that in the time it takes to lock the buffer, the
68  * PD_ALL_VISIBLE bit gets set. If that happens, we have to unlock the
69  * buffer, pin the visibility map page, and relock the buffer. This shouldn't
70  * happen often, because only VACUUM currently sets visibility map bits,
71  * and the race will only occur if VACUUM processes a given page at almost
72  * exactly the same time that someone tries to further modify it.
73  *
74  * To set a bit, you need to hold a lock on the heap page. That prevents
75  * the race condition where VACUUM sees that all tuples on the page are
76  * visible to everyone, but another backend modifies the page before VACUUM
77  * sets the bit in the visibility map.
78  *
79  * When a bit is set, the LSN of the visibility map page is updated to make
80  * sure that the visibility map update doesn't get written to disk before the
81  * WAL record of the changes that made it possible to set the bit is flushed.
82  * But when a bit is cleared, we don't have to do that because it's always
83  * safe to clear a bit in the map from correctness point of view.
84  *
85  *-------------------------------------------------------------------------
86  */
87 #include "postgres.h"
88 
89 #include "access/heapam_xlog.h"
90 #include "access/visibilitymap.h"
91 #include "access/xlog.h"
92 #include "miscadmin.h"
93 #include "port/pg_bitutils.h"
94 #include "storage/bufmgr.h"
95 #include "storage/lmgr.h"
96 #include "storage/smgr.h"
97 #include "utils/inval.h"
98 
99 
100 /*#define TRACE_VISIBILITYMAP */
101 
102 /*
103  * Size of the bitmap on each visibility map page, in bytes. There's no
104  * extra headers, so the whole page minus the standard page header is
105  * used for the bitmap.
106  */
107 #define MAPSIZE (BLCKSZ - MAXALIGN(SizeOfPageHeaderData))
108 
109 /* Number of heap blocks we can represent in one byte */
110 #define HEAPBLOCKS_PER_BYTE (BITS_PER_BYTE / BITS_PER_HEAPBLOCK)
111 
112 /* Number of heap blocks we can represent in one visibility map page. */
113 #define HEAPBLOCKS_PER_PAGE (MAPSIZE * HEAPBLOCKS_PER_BYTE)
114 
115 /* Mapping from heap block number to the right bit in the visibility map */
116 #define HEAPBLK_TO_MAPBLOCK(x) ((x) / HEAPBLOCKS_PER_PAGE)
117 #define HEAPBLK_TO_MAPBYTE(x) (((x) % HEAPBLOCKS_PER_PAGE) / HEAPBLOCKS_PER_BYTE)
118 #define HEAPBLK_TO_OFFSET(x) (((x) % HEAPBLOCKS_PER_BYTE) * BITS_PER_HEAPBLOCK)
119 
120 /* Masks for counting subsets of bits in the visibility map. */
121 #define VISIBLE_MASK64 UINT64CONST(0x5555555555555555) /* The lower bit of each
122  * bit pair */
123 #define FROZEN_MASK64 UINT64CONST(0xaaaaaaaaaaaaaaaa) /* The upper bit of each
124  * bit pair */
125 
126 /* prototypes for internal routines */
127 static Buffer vm_readbuf(Relation rel, BlockNumber blkno, bool extend);
128 static void vm_extend(Relation rel, BlockNumber vm_nblocks);
129 
130 
131 /*
132  * visibilitymap_clear - clear specified bits for one page in visibility map
133  *
134  * You must pass a buffer containing the correct map page to this function.
135  * Call visibilitymap_pin first to pin the right one. This function doesn't do
136  * any I/O. Returns true if any bits have been cleared and false otherwise.
137  */
138 bool
140 {
141  BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
142  int mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
143  int mapOffset = HEAPBLK_TO_OFFSET(heapBlk);
144  uint8 mask = flags << mapOffset;
145  char *map;
146  bool cleared = false;
147 
149 
150 #ifdef TRACE_VISIBILITYMAP
151  elog(DEBUG1, "vm_clear %s %d", RelationGetRelationName(rel), heapBlk);
152 #endif
153 
154  if (!BufferIsValid(buf) || BufferGetBlockNumber(buf) != mapBlock)
155  elog(ERROR, "wrong buffer passed to visibilitymap_clear");
156 
158  map = PageGetContents(BufferGetPage(buf));
159 
160  if (map[mapByte] & mask)
161  {
162  map[mapByte] &= ~mask;
163 
164  MarkBufferDirty(buf);
165  cleared = true;
166  }
167 
169 
170  return cleared;
171 }
172 
173 /*
174  * visibilitymap_pin - pin a map page for setting a bit
175  *
176  * Setting a bit in the visibility map is a two-phase operation. First, call
177  * visibilitymap_pin, to pin the visibility map page containing the bit for
178  * the heap page. Because that can require I/O to read the map page, you
179  * shouldn't hold a lock on the heap page while doing that. Then, call
180  * visibilitymap_set to actually set the bit.
181  *
182  * On entry, *buf should be InvalidBuffer or a valid buffer returned by
183  * an earlier call to visibilitymap_pin or visibilitymap_get_status on the same
184  * relation. On return, *buf is a valid buffer with the map page containing
185  * the bit for heapBlk.
186  *
187  * If the page doesn't exist in the map file yet, it is extended.
188  */
189 void
191 {
192  BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
193 
194  /* Reuse the old pinned buffer if possible */
195  if (BufferIsValid(*buf))
196  {
197  if (BufferGetBlockNumber(*buf) == mapBlock)
198  return;
199 
200  ReleaseBuffer(*buf);
201  }
202  *buf = vm_readbuf(rel, mapBlock, true);
203 }
204 
205 /*
206  * visibilitymap_pin_ok - do we already have the correct page pinned?
207  *
208  * On entry, buf should be InvalidBuffer or a valid buffer returned by
209  * an earlier call to visibilitymap_pin or visibilitymap_get_status on the same
210  * relation. The return value indicates whether the buffer covers the
211  * given heapBlk.
212  */
213 bool
215 {
216  BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
217 
218  return BufferIsValid(buf) && BufferGetBlockNumber(buf) == mapBlock;
219 }
220 
221 /*
222  * visibilitymap_set - set bit(s) on a previously pinned page
223  *
224  * recptr is the LSN of the XLOG record we're replaying, if we're in recovery,
225  * or InvalidXLogRecPtr in normal running. The page LSN is advanced to the
226  * one provided; in normal running, we generate a new XLOG record and set the
227  * page LSN to that value. cutoff_xid is the largest xmin on the page being
228  * marked all-visible; it is needed for Hot Standby, and can be
229  * InvalidTransactionId if the page contains no tuples. It can also be set
230  * to InvalidTransactionId when a page that is already all-visible is being
231  * marked all-frozen.
232  *
233  * Caller is expected to set the heap page's PD_ALL_VISIBLE bit before calling
234  * this function. Except in recovery, caller should also pass the heap
235  * buffer. When checksums are enabled and we're not in recovery, we must add
236  * the heap buffer to the WAL chain to protect it from being torn.
237  *
238  * You must pass a buffer containing the correct map page to this function.
239  * Call visibilitymap_pin first to pin the right one. This function doesn't do
240  * any I/O.
241  */
242 void
244  XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid,
245  uint8 flags)
246 {
247  BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
248  uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
249  uint8 mapOffset = HEAPBLK_TO_OFFSET(heapBlk);
250  Page page;
251  uint8 *map;
252 
253 #ifdef TRACE_VISIBILITYMAP
254  elog(DEBUG1, "vm_set %s %d", RelationGetRelationName(rel), heapBlk);
255 #endif
256 
258  Assert(InRecovery || BufferIsValid(heapBuf));
260 
261  /* Check that we have the right heap page pinned, if present */
262  if (BufferIsValid(heapBuf) && BufferGetBlockNumber(heapBuf) != heapBlk)
263  elog(ERROR, "wrong heap buffer passed to visibilitymap_set");
264 
265  /* Check that we have the right VM page pinned */
266  if (!BufferIsValid(vmBuf) || BufferGetBlockNumber(vmBuf) != mapBlock)
267  elog(ERROR, "wrong VM buffer passed to visibilitymap_set");
268 
269  page = BufferGetPage(vmBuf);
270  map = (uint8 *) PageGetContents(page);
272 
273  if (flags != (map[mapByte] >> mapOffset & VISIBILITYMAP_VALID_BITS))
274  {
276 
277  map[mapByte] |= (flags << mapOffset);
278  MarkBufferDirty(vmBuf);
279 
280  if (RelationNeedsWAL(rel))
281  {
282  if (XLogRecPtrIsInvalid(recptr))
283  {
284  Assert(!InRecovery);
285  recptr = log_heap_visible(rel->rd_node, heapBuf, vmBuf,
286  cutoff_xid, flags);
287 
288  /*
289  * If data checksums are enabled (or wal_log_hints=on), we
290  * need to protect the heap page from being torn.
291  */
292  if (XLogHintBitIsNeeded())
293  {
294  Page heapPage = BufferGetPage(heapBuf);
295 
296  /* caller is expected to set PD_ALL_VISIBLE first */
297  Assert(PageIsAllVisible(heapPage));
298  PageSetLSN(heapPage, recptr);
299  }
300  }
301  PageSetLSN(page, recptr);
302  }
303 
305  }
306 
308 }
309 
310 /*
311  * visibilitymap_get_status - get status of bits
312  *
313  * Are all tuples on heapBlk visible to all or are marked frozen, according
314  * to the visibility map?
315  *
316  * On entry, *buf should be InvalidBuffer or a valid buffer returned by an
317  * earlier call to visibilitymap_pin or visibilitymap_get_status on the same
318  * relation. On return, *buf is a valid buffer with the map page containing
319  * the bit for heapBlk, or InvalidBuffer. The caller is responsible for
320  * releasing *buf after it's done testing and setting bits.
321  *
322  * NOTE: This function is typically called without a lock on the heap page,
323  * so somebody else could change the bit just after we look at it. In fact,
324  * since we don't lock the visibility map page either, it's even possible that
325  * someone else could have changed the bit just before we look at it, but yet
326  * we might see the old value. It is the caller's responsibility to deal with
327  * all concurrency issues!
328  */
329 uint8
331 {
332  BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
333  uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
334  uint8 mapOffset = HEAPBLK_TO_OFFSET(heapBlk);
335  char *map;
336  uint8 result;
337 
338 #ifdef TRACE_VISIBILITYMAP
339  elog(DEBUG1, "vm_get_status %s %d", RelationGetRelationName(rel), heapBlk);
340 #endif
341 
342  /* Reuse the old pinned buffer if possible */
343  if (BufferIsValid(*buf))
344  {
345  if (BufferGetBlockNumber(*buf) != mapBlock)
346  {
347  ReleaseBuffer(*buf);
348  *buf = InvalidBuffer;
349  }
350  }
351 
352  if (!BufferIsValid(*buf))
353  {
354  *buf = vm_readbuf(rel, mapBlock, false);
355  if (!BufferIsValid(*buf))
356  return false;
357  }
358 
359  map = PageGetContents(BufferGetPage(*buf));
360 
361  /*
362  * A single byte read is atomic. There could be memory-ordering effects
363  * here, but for performance reasons we make it the caller's job to worry
364  * about that.
365  */
366  result = ((map[mapByte] >> mapOffset) & VISIBILITYMAP_VALID_BITS);
367  return result;
368 }
369 
370 /*
371  * visibilitymap_count - count number of bits set in visibility map
372  *
373  * Note: we ignore the possibility of race conditions when the table is being
374  * extended concurrently with the call. New pages added to the table aren't
375  * going to be marked all-visible or all-frozen, so they won't affect the result.
376  */
377 void
378 visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber *all_frozen)
379 {
380  BlockNumber mapBlock;
381  BlockNumber nvisible = 0;
382  BlockNumber nfrozen = 0;
383 
384  /* all_visible must be specified */
385  Assert(all_visible);
386 
387  for (mapBlock = 0;; mapBlock++)
388  {
389  Buffer mapBuffer;
390  uint64 *map;
391  int i;
392 
393  /*
394  * Read till we fall off the end of the map. We assume that any extra
395  * bytes in the last page are zeroed, so we don't bother excluding
396  * them from the count.
397  */
398  mapBuffer = vm_readbuf(rel, mapBlock, false);
399  if (!BufferIsValid(mapBuffer))
400  break;
401 
402  /*
403  * We choose not to lock the page, since the result is going to be
404  * immediately stale anyway if anyone is concurrently setting or
405  * clearing bits, and we only really need an approximate value.
406  */
407  map = (uint64 *) PageGetContents(BufferGetPage(mapBuffer));
408 
409  StaticAssertStmt(MAPSIZE % sizeof(uint64) == 0,
410  "unsupported MAPSIZE");
411  if (all_frozen == NULL)
412  {
413  for (i = 0; i < MAPSIZE / sizeof(uint64); i++)
414  nvisible += pg_popcount64(map[i] & VISIBLE_MASK64);
415  }
416  else
417  {
418  for (i = 0; i < MAPSIZE / sizeof(uint64); i++)
419  {
420  nvisible += pg_popcount64(map[i] & VISIBLE_MASK64);
421  nfrozen += pg_popcount64(map[i] & FROZEN_MASK64);
422  }
423  }
424 
425  ReleaseBuffer(mapBuffer);
426  }
427 
428  *all_visible = nvisible;
429  if (all_frozen)
430  *all_frozen = nfrozen;
431 }
432 
433 /*
434  * visibilitymap_prepare_truncate -
435  * prepare for truncation of the visibility map
436  *
437  * nheapblocks is the new size of the heap.
438  *
439  * Return the number of blocks of new visibility map.
440  * If it's InvalidBlockNumber, there is nothing to truncate;
441  * otherwise the caller is responsible for calling smgrtruncate()
442  * to truncate the visibility map pages.
443  */
446 {
447  BlockNumber newnblocks;
448 
449  /* last remaining block, byte, and bit */
450  BlockNumber truncBlock = HEAPBLK_TO_MAPBLOCK(nheapblocks);
451  uint32 truncByte = HEAPBLK_TO_MAPBYTE(nheapblocks);
452  uint8 truncOffset = HEAPBLK_TO_OFFSET(nheapblocks);
453 
454 #ifdef TRACE_VISIBILITYMAP
455  elog(DEBUG1, "vm_truncate %s %d", RelationGetRelationName(rel), nheapblocks);
456 #endif
457 
458  RelationOpenSmgr(rel);
459 
460  /*
461  * If no visibility map has been created yet for this relation, there's
462  * nothing to truncate.
463  */
465  return InvalidBlockNumber;
466 
467  /*
468  * Unless the new size is exactly at a visibility map page boundary, the
469  * tail bits in the last remaining map page, representing truncated heap
470  * blocks, need to be cleared. This is not only tidy, but also necessary
471  * because we don't get a chance to clear the bits if the heap is extended
472  * again.
473  */
474  if (truncByte != 0 || truncOffset != 0)
475  {
476  Buffer mapBuffer;
477  Page page;
478  char *map;
479 
480  newnblocks = truncBlock + 1;
481 
482  mapBuffer = vm_readbuf(rel, truncBlock, false);
483  if (!BufferIsValid(mapBuffer))
484  {
485  /* nothing to do, the file was already smaller */
486  return InvalidBlockNumber;
487  }
488 
489  page = BufferGetPage(mapBuffer);
490  map = PageGetContents(page);
491 
492  LockBuffer(mapBuffer, BUFFER_LOCK_EXCLUSIVE);
493 
494  /* NO EREPORT(ERROR) from here till changes are logged */
496 
497  /* Clear out the unwanted bytes. */
498  MemSet(&map[truncByte + 1], 0, MAPSIZE - (truncByte + 1));
499 
500  /*----
501  * Mask out the unwanted bits of the last remaining byte.
502  *
503  * ((1 << 0) - 1) = 00000000
504  * ((1 << 1) - 1) = 00000001
505  * ...
506  * ((1 << 6) - 1) = 00111111
507  * ((1 << 7) - 1) = 01111111
508  *----
509  */
510  map[truncByte] &= (1 << truncOffset) - 1;
511 
512  /*
513  * Truncation of a relation is WAL-logged at a higher-level, and we
514  * will be called at WAL replay. But if checksums are enabled, we need
515  * to still write a WAL record to protect against a torn page, if the
516  * page is flushed to disk before the truncation WAL record. We cannot
517  * use MarkBufferDirtyHint here, because that will not dirty the page
518  * during recovery.
519  */
520  MarkBufferDirty(mapBuffer);
522  log_newpage_buffer(mapBuffer, false);
523 
525 
526  UnlockReleaseBuffer(mapBuffer);
527  }
528  else
529  newnblocks = truncBlock;
530 
531  if (smgrnblocks(rel->rd_smgr, VISIBILITYMAP_FORKNUM) <= newnblocks)
532  {
533  /* nothing to do, the file was already smaller than requested size */
534  return InvalidBlockNumber;
535  }
536 
537  return newnblocks;
538 }
539 
540 /*
541  * Read a visibility map page.
542  *
543  * If the page doesn't exist, InvalidBuffer is returned, or if 'extend' is
544  * true, the visibility map file is extended.
545  */
546 static Buffer
547 vm_readbuf(Relation rel, BlockNumber blkno, bool extend)
548 {
549  Buffer buf;
550 
551  /*
552  * We might not have opened the relation at the smgr level yet, or we
553  * might have been forced to close it by a sinval message. The code below
554  * won't necessarily notice relation extension immediately when extend =
555  * false, so we rely on sinval messages to ensure that our ideas about the
556  * size of the map aren't too far out of date.
557  */
558  RelationOpenSmgr(rel);
559 
560  /*
561  * If we haven't cached the size of the visibility map fork yet, check it
562  * first.
563  */
565  {
569  else
570  rel->rd_smgr->smgr_vm_nblocks = 0;
571  }
572 
573  /* Handle requests beyond EOF */
574  if (blkno >= rel->rd_smgr->smgr_vm_nblocks)
575  {
576  if (extend)
577  vm_extend(rel, blkno + 1);
578  else
579  return InvalidBuffer;
580  }
581 
582  /*
583  * Use ZERO_ON_ERROR mode, and initialize the page if necessary. It's
584  * always safe to clear bits, so it's better to clear corrupt pages than
585  * error out.
586  *
587  * The initialize-the-page part is trickier than it looks, because of the
588  * possibility of multiple backends doing this concurrently, and our
589  * desire to not uselessly take the buffer lock in the normal path where
590  * the page is OK. We must take the lock to initialize the page, so
591  * recheck page newness after we have the lock, in case someone else
592  * already did it. Also, because we initially check PageIsNew with no
593  * lock, it's possible to fall through and return the buffer while someone
594  * else is still initializing the page (i.e., we might see pd_upper as set
595  * but other page header fields are still zeroes). This is harmless for
596  * callers that will take a buffer lock themselves, but some callers
597  * inspect the page without any lock at all. The latter is OK only so
598  * long as it doesn't depend on the page header having correct contents.
599  * Current usage is safe because PageGetContents() does not require that.
600  */
601  buf = ReadBufferExtended(rel, VISIBILITYMAP_FORKNUM, blkno,
602  RBM_ZERO_ON_ERROR, NULL);
603  if (PageIsNew(BufferGetPage(buf)))
604  {
606  if (PageIsNew(BufferGetPage(buf)))
607  PageInit(BufferGetPage(buf), BLCKSZ, 0);
609  }
610  return buf;
611 }
612 
613 /*
614  * Ensure that the visibility map fork is at least vm_nblocks long, extending
615  * it if necessary with zeroed pages.
616  */
617 static void
619 {
620  BlockNumber vm_nblocks_now;
621  PGAlignedBlock pg;
622 
623  PageInit((Page) pg.data, BLCKSZ, 0);
624 
625  /*
626  * We use the relation extension lock to lock out other backends trying to
627  * extend the visibility map at the same time. It also locks out extension
628  * of the main fork, unnecessarily, but extending the visibility map
629  * happens seldom enough that it doesn't seem worthwhile to have a
630  * separate lock tag type for it.
631  *
632  * Note that another backend might have extended or created the relation
633  * by the time we get the lock.
634  */
636 
637  /* Might have to re-open if a cache flush happened */
638  RelationOpenSmgr(rel);
639 
640  /*
641  * Create the file first if it doesn't exist. If smgr_vm_nblocks is
642  * positive then it must exist, no need for an smgrexists call.
643  */
644  if ((rel->rd_smgr->smgr_vm_nblocks == 0 ||
648 
649  vm_nblocks_now = smgrnblocks(rel->rd_smgr, VISIBILITYMAP_FORKNUM);
650 
651  /* Now extend the file */
652  while (vm_nblocks_now < vm_nblocks)
653  {
654  PageSetChecksumInplace((Page) pg.data, vm_nblocks_now);
655 
656  smgrextend(rel->rd_smgr, VISIBILITYMAP_FORKNUM, vm_nblocks_now,
657  pg.data, false);
658  vm_nblocks_now++;
659  }
660 
661  /*
662  * Send a shared-inval message to force other backends to close any smgr
663  * references they may have for this rel, which we are about to change.
664  * This is a useful optimization because it means that backends don't have
665  * to keep checking for creation or extension of the file, which happens
666  * infrequently.
667  */
669 
670  /* Update local cache with the up-to-date size */
671  rel->rd_smgr->smgr_vm_nblocks = vm_nblocks_now;
672 
674 }
void CacheInvalidateSmgr(RelFileNodeBackend rnode)
Definition: inval.c:1369
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:86
BlockNumber smgr_vm_nblocks
Definition: smgr.h:56
#define DEBUG1
Definition: elog.h:25
XLogRecPtr log_newpage_buffer(Buffer buffer, bool page_std)
Definition: xloginsert.c:1009
void smgrcreate(SMgrRelation reln, ForkNumber forknum, bool isRedo)
Definition: smgr.c:333
#define PageIsAllVisible(page)
Definition: bufpage.h:385
uint32 TransactionId
Definition: c.h:507
void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *buf)
static void vm_extend(Relation rel, BlockNumber vm_nblocks)
void visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid, uint8 flags)
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1458
#define ExclusiveLock
Definition: lockdefs.h:44
struct SMgrRelationData * rd_smgr
Definition: rel.h:56
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:642
bool InRecovery
Definition: xlog.c:200
#define END_CRIT_SECTION()
Definition: miscadmin.h:134
unsigned char uint8
Definition: c.h:356
#define HEAPBLK_TO_MAPBYTE(x)
#define InvalidBuffer
Definition: buf.h:25
#define START_CRIT_SECTION()
Definition: miscadmin.h:132
#define MemSet(start, val, len)
Definition: c.h:955
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3365
bool smgrexists(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:247
void visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber *all_frozen)
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:88
#define VISIBILITYMAP_VALID_BITS
Definition: visibilitymap.h:28
#define RelationOpenSmgr(relation)
Definition: rel.h:476
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:842
char data[BLCKSZ]
Definition: c.h:1060
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer buf, uint8 flags)
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3388
#define ERROR
Definition: elog.h:43
XLogRecPtr log_heap_visible(RelFileNode rnode, Buffer heap_buffer, Buffer vm_buffer, TransactionId cutoff_xid, uint8 vmflags)
Definition: heapam.c:7269
RelFileNodeBackend smgr_rnode
Definition: smgr.h:42
static char * buf
Definition: pg_test_fsync.c:68
int(* pg_popcount64)(uint64 word)
Definition: pg_bitutils.c:133
#define RelationGetRelationName(relation)
Definition: rel.h:453
unsigned int uint32
Definition: c.h:358
#define VISIBLE_MASK64
#define MAPSIZE
#define BufferGetPage(buffer)
Definition: bufmgr.h:159
#define XLogRecPtrIsInvalid(r)
Definition: xlogdefs.h:29
void LockRelationForExtension(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:402
void UnlockRelationForExtension(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:452
#define PageGetContents(page)
Definition: bufpage.h:246
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3602
RelFileNode rd_node
Definition: rel.h:54
BlockNumber smgrnblocks(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:555
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:732
#define HEAPBLK_TO_OFFSET(x)
#define InvalidBlockNumber
Definition: block.h:33
void PageSetChecksumInplace(Page page, BlockNumber blkno)
Definition: bufpage.c:1198
#define BufferIsValid(bufnum)
Definition: bufmgr.h:113
#define RelationNeedsWAL(relation)
Definition: rel.h:521
void smgrextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, char *buffer, bool skipFsync)
Definition: smgr.c:483
#define FROZEN_MASK64
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2613
#define PageIsNew(page)
Definition: bufpage.h:229
uint8 visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *buf)
#define elog(elevel,...)
Definition: elog.h:226
int i
BlockNumber visibilitymap_prepare_truncate(Relation rel, BlockNumber nheapblocks)
bool visibilitymap_pin_ok(BlockNumber heapBlk, Buffer buf)
static Buffer vm_readbuf(Relation rel, BlockNumber blkno, bool extend)
#define PageSetLSN(page, lsn)
Definition: bufpage.h:368
int Buffer
Definition: buf.h:23
#define XLogHintBitIsNeeded()
Definition: xlog.h:192
Pointer Page
Definition: bufpage.h:78
#define HEAPBLK_TO_MAPBLOCK(x)
void PageInit(Page page, Size pageSize, Size specialSize)
Definition: bufpage.c:42