PostgreSQL Source Code git master
Loading...
Searching...
No Matches
visibilitymap.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * visibilitymap.c
4 * bitmap for tracking visibility of heap tuples
5 *
6 * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 *
10 * IDENTIFICATION
11 * src/backend/access/heap/visibilitymap.c
12 *
13 * INTERFACE ROUTINES
14 * visibilitymap_clear - clear bits for one page in the visibility map
15 * visibilitymap_pin - pin a map page for setting a bit
16 * visibilitymap_pin_ok - check whether correct map page is already pinned
17 * visibilitymap_set - set bit(s) in a previously pinned page and log
18 * visibilitymap_set_vmbits - set bit(s) in a pinned page
19 * visibilitymap_get_status - get status of bits
20 * visibilitymap_count - count number of bits set in visibility map
21 * visibilitymap_prepare_truncate -
22 * prepare for truncation of the visibility map
23 *
24 * NOTES
25 *
26 * The visibility map is a bitmap with two bits (all-visible and all-frozen)
27 * per heap page. A set all-visible bit means that all tuples on the page are
28 * known visible to all transactions, and therefore the page doesn't need to
29 * be vacuumed. A set all-frozen bit means that all tuples on the page are
30 * completely frozen, and therefore the page doesn't need to be vacuumed even
31 * if whole table scanning vacuum is required (e.g. anti-wraparound vacuum).
32 * The all-frozen bit must be set only when the page is already all-visible.
33 *
34 * The map is conservative in the sense that we make sure that whenever a bit
35 * is set, we know the condition is true, but if a bit is not set, it might or
36 * might not be true.
37 *
38 * Clearing visibility map bits is not separately WAL-logged. The callers
39 * must make sure that whenever a bit is cleared, the bit is cleared on WAL
40 * replay of the updating operation as well.
41 *
42 * When we *set* a visibility map during VACUUM, we must write WAL. This may
43 * seem counterintuitive, since the bit is basically a hint: if it is clear,
44 * it may still be the case that every tuple on the page is visible to all
45 * transactions; we just don't know that for certain. The difficulty is that
46 * there are two bits which are typically set together: the PD_ALL_VISIBLE bit
47 * on the page itself, and the visibility map bit. If a crash occurs after the
48 * visibility map page makes it to disk and before the updated heap page makes
49 * it to disk, redo must set the bit on the heap page. Otherwise, the next
50 * insert, update, or delete on the heap page will fail to realize that the
51 * visibility map bit must be cleared, possibly causing index-only scans to
52 * return wrong answers.
53 *
54 * VACUUM will normally skip pages for which the visibility map bit is set;
55 * such pages can't contain any dead tuples and therefore don't need vacuuming.
56 *
57 * LOCKING
58 *
59 * In heapam.c, whenever a page is modified so that not all tuples on the
60 * page are visible to everyone anymore, the corresponding bit in the
61 * visibility map is cleared. In order to be crash-safe, we need to do this
62 * while still holding a lock on the heap page and in the same critical
63 * section that logs the page modification. However, we don't want to hold
64 * the buffer lock over any I/O that may be required to read in the visibility
65 * map page. To avoid this, we examine the heap page before locking it;
66 * if the page-level PD_ALL_VISIBLE bit is set, we pin the visibility map
67 * bit. Then, we lock the buffer. But this creates a race condition: there
68 * is a possibility that in the time it takes to lock the buffer, the
69 * PD_ALL_VISIBLE bit gets set. If that happens, we have to unlock the
70 * buffer, pin the visibility map page, and relock the buffer. This shouldn't
71 * happen often, because only VACUUM currently sets visibility map bits,
72 * and the race will only occur if VACUUM processes a given page at almost
73 * exactly the same time that someone tries to further modify it.
74 *
75 * To set a bit, you need to hold a lock on the heap page. That prevents
76 * the race condition where VACUUM sees that all tuples on the page are
77 * visible to everyone, but another backend modifies the page before VACUUM
78 * sets the bit in the visibility map.
79 *
80 * When a bit is set, the LSN of the visibility map page is updated to make
81 * sure that the visibility map update doesn't get written to disk before the
82 * WAL record of the changes that made it possible to set the bit is flushed.
83 * But when a bit is cleared, we don't have to do that because it's always
84 * safe to clear a bit in the map from correctness point of view.
85 *
86 *-------------------------------------------------------------------------
87 */
88#include "postgres.h"
89
90#include "access/heapam_xlog.h"
92#include "access/xloginsert.h"
93#include "access/xlogutils.h"
94#include "miscadmin.h"
95#include "port/pg_bitutils.h"
96#include "storage/bufmgr.h"
97#include "storage/smgr.h"
98#include "utils/inval.h"
99#include "utils/rel.h"
100
101
102/*#define TRACE_VISIBILITYMAP */
103
104/*
105 * Size of the bitmap on each visibility map page, in bytes. There's no
106 * extra headers, so the whole page minus the standard page header is
107 * used for the bitmap.
108 */
109#define MAPSIZE (BLCKSZ - MAXALIGN(SizeOfPageHeaderData))
110
111/* Number of heap blocks we can represent in one byte */
112#define HEAPBLOCKS_PER_BYTE (BITS_PER_BYTE / BITS_PER_HEAPBLOCK)
113
114/* Number of heap blocks we can represent in one visibility map page. */
115#define HEAPBLOCKS_PER_PAGE (MAPSIZE * HEAPBLOCKS_PER_BYTE)
116
117/* Mapping from heap block number to the right bit in the visibility map */
118#define HEAPBLK_TO_MAPBLOCK(x) ((x) / HEAPBLOCKS_PER_PAGE)
119#define HEAPBLK_TO_MAPBYTE(x) (((x) % HEAPBLOCKS_PER_PAGE) / HEAPBLOCKS_PER_BYTE)
120#define HEAPBLK_TO_OFFSET(x) (((x) % HEAPBLOCKS_PER_BYTE) * BITS_PER_HEAPBLOCK)
121
122/* Masks for counting subsets of bits in the visibility map. */
123#define VISIBLE_MASK8 (0x55) /* The lower bit of each bit pair */
124#define FROZEN_MASK8 (0xaa) /* The upper bit of each bit pair */
125
126/* prototypes for internal routines */
127static Buffer vm_readbuf(Relation rel, BlockNumber blkno, bool extend);
129
130
131/*
132 * visibilitymap_clear - clear specified bits for one page in visibility map
133 *
134 * You must pass a buffer containing the correct map page to this function.
135 * Call visibilitymap_pin first to pin the right one. This function doesn't do
136 * any I/O. Returns true if any bits have been cleared and false otherwise.
137 */
138bool
140{
142 int mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
143 int mapOffset = HEAPBLK_TO_OFFSET(heapBlk);
144 uint8 mask = flags << mapOffset;
145 char *map;
146 bool cleared = false;
147
148 /* Must never clear all_visible bit while leaving all_frozen bit set */
151
152#ifdef TRACE_VISIBILITYMAP
153 elog(DEBUG1, "vm_clear %s %d", RelationGetRelationName(rel), heapBlk);
154#endif
155
157 elog(ERROR, "wrong buffer passed to visibilitymap_clear");
158
161
162 if (map[mapByte] & mask)
163 {
164 map[mapByte] &= ~mask;
165
167 cleared = true;
168 }
169
171
172 return cleared;
173}
174
175/*
176 * visibilitymap_pin - pin a map page for setting a bit
177 *
178 * Setting a bit in the visibility map is a two-phase operation. First, call
179 * visibilitymap_pin, to pin the visibility map page containing the bit for
180 * the heap page. Because that can require I/O to read the map page, you
181 * shouldn't hold a lock on the heap page while doing that. Then, call
182 * visibilitymap_set to actually set the bit.
183 *
184 * On entry, *vmbuf should be InvalidBuffer or a valid buffer returned by
185 * an earlier call to visibilitymap_pin or visibilitymap_get_status on the same
186 * relation. On return, *vmbuf is a valid buffer with the map page containing
187 * the bit for heapBlk.
188 *
189 * If the page doesn't exist in the map file yet, it is extended.
190 */
191void
193{
195
196 /* Reuse the old pinned buffer if possible */
197 if (BufferIsValid(*vmbuf))
198 {
200 return;
201
203 }
204 *vmbuf = vm_readbuf(rel, mapBlock, true);
205}
206
207/*
208 * visibilitymap_pin_ok - do we already have the correct page pinned?
209 *
210 * On entry, vmbuf should be InvalidBuffer or a valid buffer returned by
211 * an earlier call to visibilitymap_pin or visibilitymap_get_status on the same
212 * relation. The return value indicates whether the buffer covers the
213 * given heapBlk.
214 */
215bool
222
223/*
224 * visibilitymap_set - set bit(s) on a previously pinned page
225 *
226 * recptr is the LSN of the XLOG record we're replaying, if we're in recovery,
227 * or InvalidXLogRecPtr in normal running. The VM page LSN is advanced to the
228 * one provided; in normal running, we generate a new XLOG record and set the
229 * page LSN to that value (though the heap page's LSN may *not* be updated;
230 * see below). cutoff_xid is the largest xmin on the page being marked
231 * all-visible; it is needed for Hot Standby, and can be InvalidTransactionId
232 * if the page contains no tuples. It can also be set to InvalidTransactionId
233 * when a page that is already all-visible is being marked all-frozen.
234 *
235 * Caller is expected to set the heap page's PD_ALL_VISIBLE bit before calling
236 * this function. Except in recovery, caller should also pass the heap
237 * buffer. When checksums are enabled and we're not in recovery, we must add
238 * the heap buffer to the WAL chain to protect it from being torn.
239 *
240 * You must pass a buffer containing the correct map page to this function.
241 * Call visibilitymap_pin first to pin the right one. This function doesn't do
242 * any I/O.
243 */
244void
247 uint8 flags)
248{
252 Page page;
253 uint8 *map;
254 uint8 status;
255
256#ifdef TRACE_VISIBILITYMAP
257 elog(DEBUG1, "vm_set flags 0x%02X for %s %d",
258 flags, RelationGetRelationName(rel), heapBlk);
259#endif
260
263 Assert((flags & VISIBILITYMAP_VALID_BITS) == flags);
264
265 /* Must never set all_frozen bit without also setting all_visible bit */
267
268 /* Check that we have the right heap page pinned, if present */
270 elog(ERROR, "wrong heap buffer passed to visibilitymap_set");
271
274
275 /* Check that we have the right VM page pinned */
277 elog(ERROR, "wrong VM buffer passed to visibilitymap_set");
278
279 page = BufferGetPage(vmBuf);
280 map = (uint8 *) PageGetContents(page);
282
283 status = (map[mapByte] >> mapOffset) & VISIBILITYMAP_VALID_BITS;
284 if (flags != status)
285 {
287
288 map[mapByte] |= (flags << mapOffset);
290
291 if (RelationNeedsWAL(rel))
292 {
294 {
297
298 /*
299 * If data checksums are enabled (or wal_log_hints=on), we
300 * need to protect the heap page from being torn.
301 *
302 * If not, then we must *not* update the heap page's LSN. In
303 * this case, the FPI for the heap page was omitted from the
304 * WAL record inserted above, so it would be incorrect to
305 * update the heap page's LSN.
306 */
308 {
310
312 }
313 }
314 PageSetLSN(page, recptr);
315 }
316
318 }
319
321}
322
323/*
324 * Set VM (visibility map) flags in the VM block in vmBuf.
325 *
326 * This function is intended for callers that log VM changes together
327 * with the heap page modifications that rendered the page all-visible.
328 * Callers that log VM changes separately should use visibilitymap_set().
329 *
330 * vmBuf must be pinned and exclusively locked, and it must cover the VM bits
331 * corresponding to heapBlk.
332 *
333 * In normal operation (not recovery), this must be called inside a critical
334 * section that also applies the necessary heap page changes and, if
335 * applicable, emits WAL.
336 *
337 * The caller is responsible for ensuring consistency between the heap page
338 * and the VM page by holding a pin and exclusive lock on the buffer
339 * containing heapBlk.
340 *
341 * rlocator is used only for debugging messages.
342 */
343void
345 Buffer vmBuf, uint8 flags,
346 const RelFileLocator rlocator)
347{
351 Page page;
352 uint8 *map;
353 uint8 status;
354
355#ifdef TRACE_VISIBILITYMAP
356 elog(DEBUG1, "vm_set flags 0x%02X for %s %d",
357 flags,
359 heapBlk);
360#endif
361
362 /* Call in same critical section where WAL is emitted. */
364
365 /* Flags should be valid. Also never clear bits with this function */
366 Assert((flags & VISIBILITYMAP_VALID_BITS) == flags);
367
368 /* Must never set all_frozen bit without also setting all_visible bit */
370
371 /* Check that we have the right VM page pinned */
373 elog(ERROR, "wrong VM buffer passed to visibilitymap_set");
374
376
377 page = BufferGetPage(vmBuf);
378 map = (uint8 *) PageGetContents(page);
379
380 status = (map[mapByte] >> mapOffset) & VISIBILITYMAP_VALID_BITS;
381 if (flags != status)
382 {
383 map[mapByte] |= (flags << mapOffset);
385 }
386}
387
388/*
389 * visibilitymap_get_status - get status of bits
390 *
391 * Are all tuples on heapBlk visible to all or are marked frozen, according
392 * to the visibility map?
393 *
394 * On entry, *vmbuf should be InvalidBuffer or a valid buffer returned by an
395 * earlier call to visibilitymap_pin or visibilitymap_get_status on the same
396 * relation. On return, *vmbuf is a valid buffer with the map page containing
397 * the bit for heapBlk, or InvalidBuffer. The caller is responsible for
398 * releasing *vmbuf after it's done testing and setting bits.
399 *
400 * NOTE: This function is typically called without a lock on the heap page,
401 * so somebody else could change the bit just after we look at it. In fact,
402 * since we don't lock the visibility map page either, it's even possible that
403 * someone else could have changed the bit just before we look at it, but yet
404 * we might see the old value. It is the caller's responsibility to deal with
405 * all concurrency issues!
406 */
407uint8
409{
413 char *map;
414 uint8 result;
415
416#ifdef TRACE_VISIBILITYMAP
417 elog(DEBUG1, "vm_get_status %s %d", RelationGetRelationName(rel), heapBlk);
418#endif
419
420 /* Reuse the old pinned buffer if possible */
421 if (BufferIsValid(*vmbuf))
422 {
424 {
427 }
428 }
429
430 if (!BufferIsValid(*vmbuf))
431 {
432 *vmbuf = vm_readbuf(rel, mapBlock, false);
433 if (!BufferIsValid(*vmbuf))
434 return (uint8) 0;
435 }
436
438
439 /*
440 * A single byte read is atomic. There could be memory-ordering effects
441 * here, but for performance reasons we make it the caller's job to worry
442 * about that.
443 */
444 result = ((map[mapByte] >> mapOffset) & VISIBILITYMAP_VALID_BITS);
445 return result;
446}
447
448/*
449 * visibilitymap_count - count number of bits set in visibility map
450 *
451 * Note: we ignore the possibility of race conditions when the table is being
452 * extended concurrently with the call. New pages added to the table aren't
453 * going to be marked all-visible or all-frozen, so they won't affect the result.
454 */
455void
457{
460 BlockNumber nfrozen = 0;
461
462 /* all_visible must be specified */
463 Assert(all_visible);
464
465 for (mapBlock = 0;; mapBlock++)
466 {
468 uint64 *map;
469
470 /*
471 * Read till we fall off the end of the map. We assume that any extra
472 * bytes in the last page are zeroed, so we don't bother excluding
473 * them from the count.
474 */
475 mapBuffer = vm_readbuf(rel, mapBlock, false);
477 break;
478
479 /*
480 * We choose not to lock the page, since the result is going to be
481 * immediately stale anyway if anyone is concurrently setting or
482 * clearing bits, and we only really need an approximate value.
483 */
485
486 nvisible += pg_popcount_masked((const char *) map, MAPSIZE, VISIBLE_MASK8);
487 if (all_frozen)
488 nfrozen += pg_popcount_masked((const char *) map, MAPSIZE, FROZEN_MASK8);
489
491 }
492
493 *all_visible = nvisible;
494 if (all_frozen)
495 *all_frozen = nfrozen;
496}
497
498/*
499 * visibilitymap_prepare_truncate -
500 * prepare for truncation of the visibility map
501 *
502 * nheapblocks is the new size of the heap.
503 *
504 * Return the number of blocks of new visibility map.
505 * If it's InvalidBlockNumber, there is nothing to truncate;
506 * otherwise the caller is responsible for calling smgrtruncate()
507 * to truncate the visibility map pages.
508 */
511{
513
514 /* last remaining block, byte, and bit */
518
519#ifdef TRACE_VISIBILITYMAP
520 elog(DEBUG1, "vm_truncate %s %d", RelationGetRelationName(rel), nheapblocks);
521#endif
522
523 /*
524 * If no visibility map has been created yet for this relation, there's
525 * nothing to truncate.
526 */
528 return InvalidBlockNumber;
529
530 /*
531 * Unless the new size is exactly at a visibility map page boundary, the
532 * tail bits in the last remaining map page, representing truncated heap
533 * blocks, need to be cleared. This is not only tidy, but also necessary
534 * because we don't get a chance to clear the bits if the heap is extended
535 * again.
536 */
537 if (truncByte != 0 || truncOffset != 0)
538 {
540 Page page;
541 char *map;
542
544
545 mapBuffer = vm_readbuf(rel, truncBlock, false);
547 {
548 /* nothing to do, the file was already smaller */
549 return InvalidBlockNumber;
550 }
551
552 page = BufferGetPage(mapBuffer);
553 map = PageGetContents(page);
554
556
557 /* NO EREPORT(ERROR) from here till changes are logged */
559
560 /* Clear out the unwanted bytes. */
561 MemSet(&map[truncByte + 1], 0, MAPSIZE - (truncByte + 1));
562
563 /*----
564 * Mask out the unwanted bits of the last remaining byte.
565 *
566 * ((1 << 0) - 1) = 00000000
567 * ((1 << 1) - 1) = 00000001
568 * ...
569 * ((1 << 6) - 1) = 00111111
570 * ((1 << 7) - 1) = 01111111
571 *----
572 */
573 map[truncByte] &= (1 << truncOffset) - 1;
574
575 /*
576 * Truncation of a relation is WAL-logged at a higher-level, and we
577 * will be called at WAL replay. But if checksums are enabled, we need
578 * to still write a WAL record to protect against a torn page, if the
579 * page is flushed to disk before the truncation WAL record. We cannot
580 * use MarkBufferDirtyHint here, because that will not dirty the page
581 * during recovery.
582 */
586
588
590 }
591 else
593
595 {
596 /* nothing to do, the file was already smaller than requested size */
597 return InvalidBlockNumber;
598 }
599
600 return newnblocks;
601}
602
603/*
604 * Read a visibility map page.
605 *
606 * If the page doesn't exist, InvalidBuffer is returned, or if 'extend' is
607 * true, the visibility map file is extended.
608 */
609static Buffer
611{
612 Buffer buf;
614
615 /*
616 * Caution: re-using this smgr pointer could fail if the relcache entry
617 * gets closed. It's safe as long as we only do smgr-level operations
618 * between here and the last use of the pointer.
619 */
620 reln = RelationGetSmgr(rel);
621
622 /*
623 * If we haven't cached the size of the visibility map fork yet, check it
624 * first.
625 */
626 if (reln->smgr_cached_nblocks[VISIBILITYMAP_FORKNUM] == InvalidBlockNumber)
627 {
630 else
631 reln->smgr_cached_nblocks[VISIBILITYMAP_FORKNUM] = 0;
632 }
633
634 /*
635 * For reading we use ZERO_ON_ERROR mode, and initialize the page if
636 * necessary. It's always safe to clear bits, so it's better to clear
637 * corrupt pages than error out.
638 *
639 * We use the same path below to initialize pages when extending the
640 * relation, as a concurrent extension can end up with vm_extend()
641 * returning an already-initialized page.
642 */
643 if (blkno >= reln->smgr_cached_nblocks[VISIBILITYMAP_FORKNUM])
644 {
645 if (extend)
646 buf = vm_extend(rel, blkno + 1);
647 else
648 return InvalidBuffer;
649 }
650 else
653
654 /*
655 * Initializing the page when needed is trickier than it looks, because of
656 * the possibility of multiple backends doing this concurrently, and our
657 * desire to not uselessly take the buffer lock in the normal path where
658 * the page is OK. We must take the lock to initialize the page, so
659 * recheck page newness after we have the lock, in case someone else
660 * already did it. Also, because we initially check PageIsNew with no
661 * lock, it's possible to fall through and return the buffer while someone
662 * else is still initializing the page (i.e., we might see pd_upper as set
663 * but other page header fields are still zeroes). This is harmless for
664 * callers that will take a buffer lock themselves, but some callers
665 * inspect the page without any lock at all. The latter is OK only so
666 * long as it doesn't depend on the page header having correct contents.
667 * Current usage is safe because PageGetContents() does not require that.
668 */
670 {
675 }
676 return buf;
677}
678
679/*
680 * Ensure that the visibility map fork is at least vm_nblocks long, extending
681 * it if necessary with zeroed pages.
682 */
683static Buffer
685{
686 Buffer buf;
687
693
694 /*
695 * Send a shared-inval message to force other backends to close any smgr
696 * references they may have for this rel, which we are about to change.
697 * This is a useful optimization because it means that backends don't have
698 * to keep checking for creation or extension of the file, which happens
699 * infrequently.
700 */
701 CacheInvalidateSmgr(RelationGetSmgr(rel)->smgr_rlocator);
702
703 return buf;
704}
uint32 BlockNumber
Definition block.h:31
#define InvalidBlockNumber
Definition block.h:33
int Buffer
Definition buf.h:23
#define InvalidBuffer
Definition buf.h:25
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition bufmgr.c:4356
bool BufferIsLockedByMeInMode(Buffer buffer, BufferLockMode mode)
Definition bufmgr.c:2997
Buffer ExtendBufferedRelTo(BufferManagerRelation bmr, ForkNumber fork, BufferAccessStrategy strategy, uint32 flags, BlockNumber extend_to, ReadBufferMode mode)
Definition bufmgr.c:1025
void ReleaseBuffer(Buffer buffer)
Definition bufmgr.c:5501
void UnlockReleaseBuffer(Buffer buffer)
Definition bufmgr.c:5518
void MarkBufferDirty(Buffer buffer)
Definition bufmgr.c:3056
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition bufmgr.c:911
static Page BufferGetPage(Buffer buffer)
Definition bufmgr.h:466
@ BUFFER_LOCK_EXCLUSIVE
Definition bufmgr.h:220
@ BUFFER_LOCK_UNLOCK
Definition bufmgr.h:205
static void LockBuffer(Buffer buffer, BufferLockMode mode)
Definition bufmgr.h:328
@ EB_CLEAR_SIZE_CACHE
Definition bufmgr.h:90
@ EB_CREATE_FORK_IF_NEEDED
Definition bufmgr.h:84
@ RBM_ZERO_ON_ERROR
Definition bufmgr.h:51
#define BMR_REL(p_rel)
Definition bufmgr.h:114
static bool BufferIsValid(Buffer bufnum)
Definition bufmgr.h:417
void PageInit(Page page, Size pageSize, Size specialSize)
Definition bufpage.c:42
static bool PageIsAllVisible(const PageData *page)
Definition bufpage.h:428
static bool PageIsNew(const PageData *page)
Definition bufpage.h:233
static char * PageGetContents(Page page)
Definition bufpage.h:257
static void PageSetLSN(Page page, XLogRecPtr lsn)
Definition bufpage.h:390
PageData * Page
Definition bufpage.h:81
uint8_t uint8
Definition c.h:544
#define Assert(condition)
Definition c.h:873
uint64_t uint64
Definition c.h:547
uint32_t uint32
Definition c.h:546
#define MemSet(start, val, len)
Definition c.h:1013
uint32 TransactionId
Definition c.h:666
#define DEBUG1
Definition elog.h:30
#define ERROR
Definition elog.h:39
#define elog(elevel,...)
Definition elog.h:226
ProcNumber MyProcNumber
Definition globals.c:90
volatile uint32 CritSectionCount
Definition globals.c:45
const char * str
XLogRecPtr log_heap_visible(Relation rel, Buffer heap_buffer, Buffer vm_buffer, TransactionId snapshotConflictHorizon, uint8 vmflags)
Definition heapam.c:8893
void CacheInvalidateSmgr(RelFileLocatorBackend rlocator)
Definition inval.c:1755
#define START_CRIT_SECTION()
Definition miscadmin.h:150
#define END_CRIT_SECTION()
Definition miscadmin.h:152
static uint64 pg_popcount_masked(const char *buf, int bytes, bits8 mask)
static char buf[DEFAULT_XLOG_SEG_SIZE]
static int fb(int x)
static SMgrRelation RelationGetSmgr(Relation rel)
Definition rel.h:576
#define RelationGetRelationName(relation)
Definition rel.h:548
#define RelationNeedsWAL(relation)
Definition rel.h:637
@ VISIBILITYMAP_FORKNUM
Definition relpath.h:60
@ MAIN_FORKNUM
Definition relpath.h:58
#define relpathbackend(rlocator, backend, forknum)
Definition relpath.h:141
BlockNumber smgrnblocks(SMgrRelation reln, ForkNumber forknum)
Definition smgr.c:819
bool smgrexists(SMgrRelation reln, ForkNumber forknum)
Definition smgr.c:462
#define MAPSIZE
#define FROZEN_MASK8
void visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid, uint8 flags)
bool visibilitymap_pin_ok(BlockNumber heapBlk, Buffer vmbuf)
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer vmbuf, uint8 flags)
#define VISIBLE_MASK8
#define HEAPBLK_TO_OFFSET(x)
void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
uint8 visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
static Buffer vm_extend(Relation rel, BlockNumber vm_nblocks)
BlockNumber visibilitymap_prepare_truncate(Relation rel, BlockNumber nheapblocks)
void visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber *all_frozen)
static Buffer vm_readbuf(Relation rel, BlockNumber blkno, bool extend)
#define HEAPBLK_TO_MAPBLOCK(x)
void visibilitymap_set_vmbits(BlockNumber heapBlk, Buffer vmBuf, uint8 flags, const RelFileLocator rlocator)
#define HEAPBLK_TO_MAPBYTE(x)
#define VISIBILITYMAP_VALID_BITS
#define VISIBILITYMAP_ALL_FROZEN
#define VISIBILITYMAP_ALL_VISIBLE
#define XLogHintBitIsNeeded()
Definition xlog.h:122
#define XLogRecPtrIsValid(r)
Definition xlogdefs.h:29
uint64 XLogRecPtr
Definition xlogdefs.h:21
XLogRecPtr log_newpage_buffer(Buffer buffer, bool page_std)
bool InRecovery
Definition xlogutils.c:50