PostgreSQL Source Code git master
Loading...
Searching...
No Matches
visibilitymap.c File Reference
#include "postgres.h"
#include "access/heapam_xlog.h"
#include "access/visibilitymap.h"
#include "access/xloginsert.h"
#include "access/xlogutils.h"
#include "miscadmin.h"
#include "port/pg_bitutils.h"
#include "storage/bufmgr.h"
#include "storage/smgr.h"
#include "utils/inval.h"
#include "utils/rel.h"
Include dependency graph for visibilitymap.c:

Go to the source code of this file.

Macros

#define MAPSIZE   (BLCKSZ - MAXALIGN(SizeOfPageHeaderData))
 
#define HEAPBLOCKS_PER_BYTE   (BITS_PER_BYTE / BITS_PER_HEAPBLOCK)
 
#define HEAPBLOCKS_PER_PAGE   (MAPSIZE * HEAPBLOCKS_PER_BYTE)
 
#define HEAPBLK_TO_MAPBLOCK(x)   ((x) / HEAPBLOCKS_PER_PAGE)
 
#define HEAPBLK_TO_MAPBYTE(x)   (((x) % HEAPBLOCKS_PER_PAGE) / HEAPBLOCKS_PER_BYTE)
 
#define HEAPBLK_TO_OFFSET(x)   (((x) % HEAPBLOCKS_PER_BYTE) * BITS_PER_HEAPBLOCK)
 
#define VISIBLE_MASK8   (0x55) /* The lower bit of each bit pair */
 
#define FROZEN_MASK8   (0xaa) /* The upper bit of each bit pair */
 

Functions

static Buffer vm_readbuf (Relation rel, BlockNumber blkno, bool extend)
 
static Buffer vm_extend (Relation rel, BlockNumber vm_nblocks)
 
bool visibilitymap_clear (Relation rel, BlockNumber heapBlk, Buffer vmbuf, uint8 flags)
 
void visibilitymap_pin (Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
 
bool visibilitymap_pin_ok (BlockNumber heapBlk, Buffer vmbuf)
 
void visibilitymap_set (Relation rel, BlockNumber heapBlk, Buffer heapBuf, XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid, uint8 flags)
 
void visibilitymap_set_vmbits (BlockNumber heapBlk, Buffer vmBuf, uint8 flags, const RelFileLocator rlocator)
 
uint8 visibilitymap_get_status (Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
 
void visibilitymap_count (Relation rel, BlockNumber *all_visible, BlockNumber *all_frozen)
 
BlockNumber visibilitymap_prepare_truncate (Relation rel, BlockNumber nheapblocks)
 

Macro Definition Documentation

◆ FROZEN_MASK8

#define FROZEN_MASK8   (0xaa) /* The upper bit of each bit pair */

Definition at line 124 of file visibilitymap.c.

◆ HEAPBLK_TO_MAPBLOCK

#define HEAPBLK_TO_MAPBLOCK (   x)    ((x) / HEAPBLOCKS_PER_PAGE)

Definition at line 118 of file visibilitymap.c.

◆ HEAPBLK_TO_MAPBYTE

#define HEAPBLK_TO_MAPBYTE (   x)    (((x) % HEAPBLOCKS_PER_PAGE) / HEAPBLOCKS_PER_BYTE)

Definition at line 119 of file visibilitymap.c.

◆ HEAPBLK_TO_OFFSET

#define HEAPBLK_TO_OFFSET (   x)    (((x) % HEAPBLOCKS_PER_BYTE) * BITS_PER_HEAPBLOCK)

Definition at line 120 of file visibilitymap.c.

◆ HEAPBLOCKS_PER_BYTE

#define HEAPBLOCKS_PER_BYTE   (BITS_PER_BYTE / BITS_PER_HEAPBLOCK)

Definition at line 112 of file visibilitymap.c.

◆ HEAPBLOCKS_PER_PAGE

#define HEAPBLOCKS_PER_PAGE   (MAPSIZE * HEAPBLOCKS_PER_BYTE)

Definition at line 115 of file visibilitymap.c.

◆ MAPSIZE

Definition at line 109 of file visibilitymap.c.

◆ VISIBLE_MASK8

#define VISIBLE_MASK8   (0x55) /* The lower bit of each bit pair */

Definition at line 123 of file visibilitymap.c.

Function Documentation

◆ visibilitymap_clear()

bool visibilitymap_clear ( Relation  rel,
BlockNumber  heapBlk,
Buffer  vmbuf,
uint8  flags 
)

Definition at line 139 of file visibilitymap.c.

140{
142 int mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
143 int mapOffset = HEAPBLK_TO_OFFSET(heapBlk);
144 uint8 mask = flags << mapOffset;
145 char *map;
146 bool cleared = false;
147
148 /* Must never clear all_visible bit while leaving all_frozen bit set */
151
152#ifdef TRACE_VISIBILITYMAP
153 elog(DEBUG1, "vm_clear %s %d", RelationGetRelationName(rel), heapBlk);
154#endif
155
157 elog(ERROR, "wrong buffer passed to visibilitymap_clear");
158
161
162 if (map[mapByte] & mask)
163 {
164 map[mapByte] &= ~mask;
165
167 cleared = true;
168 }
169
171
172 return cleared;
173}
uint32 BlockNumber
Definition block.h:31
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition bufmgr.c:4356
void MarkBufferDirty(Buffer buffer)
Definition bufmgr.c:3056
static Page BufferGetPage(Buffer buffer)
Definition bufmgr.h:466
@ BUFFER_LOCK_EXCLUSIVE
Definition bufmgr.h:220
@ BUFFER_LOCK_UNLOCK
Definition bufmgr.h:205
static void LockBuffer(Buffer buffer, BufferLockMode mode)
Definition bufmgr.h:328
static bool BufferIsValid(Buffer bufnum)
Definition bufmgr.h:417
static char * PageGetContents(Page page)
Definition bufpage.h:257
uint8_t uint8
Definition c.h:544
#define Assert(condition)
Definition c.h:873
#define DEBUG1
Definition elog.h:30
#define ERROR
Definition elog.h:39
#define elog(elevel,...)
Definition elog.h:226
static int fb(int x)
#define RelationGetRelationName(relation)
Definition rel.h:548
#define HEAPBLK_TO_OFFSET(x)
#define HEAPBLK_TO_MAPBLOCK(x)
#define HEAPBLK_TO_MAPBYTE(x)
#define VISIBILITYMAP_VALID_BITS
#define VISIBILITYMAP_ALL_VISIBLE

References Assert, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetBlockNumber(), BufferGetPage(), BufferIsValid(), DEBUG1, elog, ERROR, fb(), HEAPBLK_TO_MAPBLOCK, HEAPBLK_TO_MAPBYTE, HEAPBLK_TO_OFFSET, LockBuffer(), MarkBufferDirty(), PageGetContents(), RelationGetRelationName, VISIBILITYMAP_ALL_VISIBLE, and VISIBILITYMAP_VALID_BITS.

Referenced by heap_delete(), heap_force_common(), heap_insert(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_update(), heap_xlog_delete(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_update(), and identify_and_fix_vm_corruption().

◆ visibilitymap_count()

void visibilitymap_count ( Relation  rel,
BlockNumber all_visible,
BlockNumber all_frozen 
)

Definition at line 456 of file visibilitymap.c.

457{
460 BlockNumber nfrozen = 0;
461
462 /* all_visible must be specified */
463 Assert(all_visible);
464
465 for (mapBlock = 0;; mapBlock++)
466 {
468 uint64 *map;
469
470 /*
471 * Read till we fall off the end of the map. We assume that any extra
472 * bytes in the last page are zeroed, so we don't bother excluding
473 * them from the count.
474 */
475 mapBuffer = vm_readbuf(rel, mapBlock, false);
477 break;
478
479 /*
480 * We choose not to lock the page, since the result is going to be
481 * immediately stale anyway if anyone is concurrently setting or
482 * clearing bits, and we only really need an approximate value.
483 */
485
486 nvisible += pg_popcount_masked((const char *) map, MAPSIZE, VISIBLE_MASK8);
487 if (all_frozen)
488 nfrozen += pg_popcount_masked((const char *) map, MAPSIZE, FROZEN_MASK8);
489
491 }
492
493 *all_visible = nvisible;
494 if (all_frozen)
495 *all_frozen = nfrozen;
496}
int Buffer
Definition buf.h:23
void ReleaseBuffer(Buffer buffer)
Definition bufmgr.c:5501
uint64_t uint64
Definition c.h:547
static uint64 pg_popcount_masked(const char *buf, int bytes, bits8 mask)
#define MAPSIZE
#define FROZEN_MASK8
#define VISIBLE_MASK8
static Buffer vm_readbuf(Relation rel, BlockNumber blkno, bool extend)

References Assert, BufferGetPage(), BufferIsValid(), fb(), FROZEN_MASK8, MAPSIZE, PageGetContents(), pg_popcount_masked(), ReleaseBuffer(), VISIBLE_MASK8, and vm_readbuf().

Referenced by do_analyze_rel(), heap_vacuum_eager_scan_setup(), heap_vacuum_rel(), index_update_stats(), and pg_visibility_map_summary().

◆ visibilitymap_get_status()

uint8 visibilitymap_get_status ( Relation  rel,
BlockNumber  heapBlk,
Buffer vmbuf 
)

Definition at line 408 of file visibilitymap.c.

409{
413 char *map;
414 uint8 result;
415
416#ifdef TRACE_VISIBILITYMAP
417 elog(DEBUG1, "vm_get_status %s %d", RelationGetRelationName(rel), heapBlk);
418#endif
419
420 /* Reuse the old pinned buffer if possible */
421 if (BufferIsValid(*vmbuf))
422 {
424 {
427 }
428 }
429
430 if (!BufferIsValid(*vmbuf))
431 {
432 *vmbuf = vm_readbuf(rel, mapBlock, false);
433 if (!BufferIsValid(*vmbuf))
434 return (uint8) 0;
435 }
436
438
439 /*
440 * A single byte read is atomic. There could be memory-ordering effects
441 * here, but for performance reasons we make it the caller's job to worry
442 * about that.
443 */
444 result = ((map[mapByte] >> mapOffset) & VISIBILITYMAP_VALID_BITS);
445 return result;
446}
#define InvalidBuffer
Definition buf.h:25
uint32_t uint32
Definition c.h:546

References BufferGetBlockNumber(), BufferGetPage(), BufferIsValid(), DEBUG1, elog, fb(), HEAPBLK_TO_MAPBLOCK, HEAPBLK_TO_MAPBYTE, HEAPBLK_TO_OFFSET, InvalidBuffer, PageGetContents(), RelationGetRelationName, ReleaseBuffer(), VISIBILITYMAP_VALID_BITS, and vm_readbuf().

Referenced by collect_visibility_data(), find_next_unskippable_block(), heapcheck_read_stream_next_unskippable(), identify_and_fix_vm_corruption(), lazy_scan_prune(), pg_visibility(), and pg_visibility_map().

◆ visibilitymap_pin()

◆ visibilitymap_pin_ok()

bool visibilitymap_pin_ok ( BlockNumber  heapBlk,
Buffer  vmbuf 
)

◆ visibilitymap_prepare_truncate()

BlockNumber visibilitymap_prepare_truncate ( Relation  rel,
BlockNumber  nheapblocks 
)

Definition at line 510 of file visibilitymap.c.

511{
513
514 /* last remaining block, byte, and bit */
518
519#ifdef TRACE_VISIBILITYMAP
520 elog(DEBUG1, "vm_truncate %s %d", RelationGetRelationName(rel), nheapblocks);
521#endif
522
523 /*
524 * If no visibility map has been created yet for this relation, there's
525 * nothing to truncate.
526 */
528 return InvalidBlockNumber;
529
530 /*
531 * Unless the new size is exactly at a visibility map page boundary, the
532 * tail bits in the last remaining map page, representing truncated heap
533 * blocks, need to be cleared. This is not only tidy, but also necessary
534 * because we don't get a chance to clear the bits if the heap is extended
535 * again.
536 */
537 if (truncByte != 0 || truncOffset != 0)
538 {
540 Page page;
541 char *map;
542
544
545 mapBuffer = vm_readbuf(rel, truncBlock, false);
547 {
548 /* nothing to do, the file was already smaller */
549 return InvalidBlockNumber;
550 }
551
552 page = BufferGetPage(mapBuffer);
553 map = PageGetContents(page);
554
556
557 /* NO EREPORT(ERROR) from here till changes are logged */
559
560 /* Clear out the unwanted bytes. */
561 MemSet(&map[truncByte + 1], 0, MAPSIZE - (truncByte + 1));
562
563 /*----
564 * Mask out the unwanted bits of the last remaining byte.
565 *
566 * ((1 << 0) - 1) = 00000000
567 * ((1 << 1) - 1) = 00000001
568 * ...
569 * ((1 << 6) - 1) = 00111111
570 * ((1 << 7) - 1) = 01111111
571 *----
572 */
573 map[truncByte] &= (1 << truncOffset) - 1;
574
575 /*
576 * Truncation of a relation is WAL-logged at a higher-level, and we
577 * will be called at WAL replay. But if checksums are enabled, we need
578 * to still write a WAL record to protect against a torn page, if the
579 * page is flushed to disk before the truncation WAL record. We cannot
580 * use MarkBufferDirtyHint here, because that will not dirty the page
581 * during recovery.
582 */
586
588
590 }
591 else
593
595 {
596 /* nothing to do, the file was already smaller than requested size */
597 return InvalidBlockNumber;
598 }
599
600 return newnblocks;
601}
#define InvalidBlockNumber
Definition block.h:33
void UnlockReleaseBuffer(Buffer buffer)
Definition bufmgr.c:5518
PageData * Page
Definition bufpage.h:81
#define MemSet(start, val, len)
Definition c.h:1013
#define START_CRIT_SECTION()
Definition miscadmin.h:150
#define END_CRIT_SECTION()
Definition miscadmin.h:152
static SMgrRelation RelationGetSmgr(Relation rel)
Definition rel.h:576
#define RelationNeedsWAL(relation)
Definition rel.h:637
@ VISIBILITYMAP_FORKNUM
Definition relpath.h:60
BlockNumber smgrnblocks(SMgrRelation reln, ForkNumber forknum)
Definition smgr.c:819
bool smgrexists(SMgrRelation reln, ForkNumber forknum)
Definition smgr.c:462
#define XLogHintBitIsNeeded()
Definition xlog.h:122
XLogRecPtr log_newpage_buffer(Buffer buffer, bool page_std)
bool InRecovery
Definition xlogutils.c:50

References BUFFER_LOCK_EXCLUSIVE, BufferGetPage(), BufferIsValid(), DEBUG1, elog, END_CRIT_SECTION, fb(), HEAPBLK_TO_MAPBLOCK, HEAPBLK_TO_MAPBYTE, HEAPBLK_TO_OFFSET, InRecovery, InvalidBlockNumber, LockBuffer(), log_newpage_buffer(), MAPSIZE, MarkBufferDirty(), MemSet, PageGetContents(), RelationGetRelationName, RelationGetSmgr(), RelationNeedsWAL, smgrexists(), smgrnblocks(), START_CRIT_SECTION, UnlockReleaseBuffer(), VISIBILITYMAP_FORKNUM, vm_readbuf(), and XLogHintBitIsNeeded.

Referenced by pg_truncate_visibility_map(), RelationTruncate(), and smgr_redo().

◆ visibilitymap_set()

void visibilitymap_set ( Relation  rel,
BlockNumber  heapBlk,
Buffer  heapBuf,
XLogRecPtr  recptr,
Buffer  vmBuf,
TransactionId  cutoff_xid,
uint8  flags 
)

Definition at line 245 of file visibilitymap.c.

248{
252 Page page;
253 uint8 *map;
254 uint8 status;
255
256#ifdef TRACE_VISIBILITYMAP
257 elog(DEBUG1, "vm_set flags 0x%02X for %s %d",
258 flags, RelationGetRelationName(rel), heapBlk);
259#endif
260
263 Assert((flags & VISIBILITYMAP_VALID_BITS) == flags);
264
265 /* Must never set all_frozen bit without also setting all_visible bit */
267
268 /* Check that we have the right heap page pinned, if present */
270 elog(ERROR, "wrong heap buffer passed to visibilitymap_set");
271
274
275 /* Check that we have the right VM page pinned */
277 elog(ERROR, "wrong VM buffer passed to visibilitymap_set");
278
279 page = BufferGetPage(vmBuf);
280 map = (uint8 *) PageGetContents(page);
282
283 status = (map[mapByte] >> mapOffset) & VISIBILITYMAP_VALID_BITS;
284 if (flags != status)
285 {
287
288 map[mapByte] |= (flags << mapOffset);
290
291 if (RelationNeedsWAL(rel))
292 {
294 {
297
298 /*
299 * If data checksums are enabled (or wal_log_hints=on), we
300 * need to protect the heap page from being torn.
301 *
302 * If not, then we must *not* update the heap page's LSN. In
303 * this case, the FPI for the heap page was omitted from the
304 * WAL record inserted above, so it would be incorrect to
305 * update the heap page's LSN.
306 */
308 {
310
312 }
313 }
314 PageSetLSN(page, recptr);
315 }
316
318 }
319
321}
bool BufferIsLockedByMeInMode(Buffer buffer, BufferLockMode mode)
Definition bufmgr.c:2997
static bool PageIsAllVisible(const PageData *page)
Definition bufpage.h:428
static void PageSetLSN(Page page, XLogRecPtr lsn)
Definition bufpage.h:390
XLogRecPtr log_heap_visible(Relation rel, Buffer heap_buffer, Buffer vm_buffer, TransactionId snapshotConflictHorizon, uint8 vmflags)
Definition heapam.c:8893
#define VISIBILITYMAP_ALL_FROZEN
#define XLogRecPtrIsValid(r)
Definition xlogdefs.h:29

References Assert, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetBlockNumber(), BufferGetPage(), BufferIsLockedByMeInMode(), BufferIsValid(), DEBUG1, elog, END_CRIT_SECTION, ERROR, fb(), HEAPBLK_TO_MAPBLOCK, HEAPBLK_TO_MAPBYTE, HEAPBLK_TO_OFFSET, InRecovery, LockBuffer(), log_heap_visible(), MarkBufferDirty(), PageGetContents(), PageIsAllVisible(), PageSetLSN(), RelationGetRelationName, RelationNeedsWAL, START_CRIT_SECTION, VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_VALID_BITS, XLogHintBitIsNeeded, and XLogRecPtrIsValid.

Referenced by heap_xlog_visible(), lazy_scan_new_or_empty(), and lazy_scan_prune().

◆ visibilitymap_set_vmbits()

void visibilitymap_set_vmbits ( BlockNumber  heapBlk,
Buffer  vmBuf,
uint8  flags,
const RelFileLocator  rlocator 
)

Definition at line 344 of file visibilitymap.c.

347{
351 Page page;
352 uint8 *map;
353 uint8 status;
354
355#ifdef TRACE_VISIBILITYMAP
356 elog(DEBUG1, "vm_set flags 0x%02X for %s %d",
357 flags,
359 heapBlk);
360#endif
361
362 /* Call in same critical section where WAL is emitted. */
364
365 /* Flags should be valid. Also never clear bits with this function */
366 Assert((flags & VISIBILITYMAP_VALID_BITS) == flags);
367
368 /* Must never set all_frozen bit without also setting all_visible bit */
370
371 /* Check that we have the right VM page pinned */
373 elog(ERROR, "wrong VM buffer passed to visibilitymap_set");
374
376
377 page = BufferGetPage(vmBuf);
378 map = (uint8 *) PageGetContents(page);
379
380 status = (map[mapByte] >> mapOffset) & VISIBILITYMAP_VALID_BITS;
381 if (flags != status)
382 {
383 map[mapByte] |= (flags << mapOffset);
385 }
386}
ProcNumber MyProcNumber
Definition globals.c:90
volatile uint32 CritSectionCount
Definition globals.c:45
const char * str
@ MAIN_FORKNUM
Definition relpath.h:58
#define relpathbackend(rlocator, backend, forknum)
Definition relpath.h:141

References Assert, BUFFER_LOCK_EXCLUSIVE, BufferGetBlockNumber(), BufferGetPage(), BufferIsLockedByMeInMode(), BufferIsValid(), CritSectionCount, DEBUG1, elog, ERROR, fb(), HEAPBLK_TO_MAPBLOCK, HEAPBLK_TO_MAPBYTE, HEAPBLK_TO_OFFSET, InRecovery, MAIN_FORKNUM, MarkBufferDirty(), MyProcNumber, PageGetContents(), relpathbackend, str, VISIBILITYMAP_ALL_FROZEN, and VISIBILITYMAP_VALID_BITS.

Referenced by heap_multi_insert(), heap_xlog_multi_insert(), heap_xlog_prune_freeze(), and lazy_vacuum_heap_page().

◆ vm_extend()

static Buffer vm_extend ( Relation  rel,
BlockNumber  vm_nblocks 
)
static

Definition at line 684 of file visibilitymap.c.

685{
686 Buffer buf;
687
693
694 /*
695 * Send a shared-inval message to force other backends to close any smgr
696 * references they may have for this rel, which we are about to change.
697 * This is a useful optimization because it means that backends don't have
698 * to keep checking for creation or extension of the file, which happens
699 * infrequently.
700 */
701 CacheInvalidateSmgr(RelationGetSmgr(rel)->smgr_rlocator);
702
703 return buf;
704}
Buffer ExtendBufferedRelTo(BufferManagerRelation bmr, ForkNumber fork, BufferAccessStrategy strategy, uint32 flags, BlockNumber extend_to, ReadBufferMode mode)
Definition bufmgr.c:1025
@ EB_CLEAR_SIZE_CACHE
Definition bufmgr.h:90
@ EB_CREATE_FORK_IF_NEEDED
Definition bufmgr.h:84
@ RBM_ZERO_ON_ERROR
Definition bufmgr.h:51
#define BMR_REL(p_rel)
Definition bufmgr.h:114
void CacheInvalidateSmgr(RelFileLocatorBackend rlocator)
Definition inval.c:1755
static char buf[DEFAULT_XLOG_SEG_SIZE]

References BMR_REL, buf, CacheInvalidateSmgr(), EB_CLEAR_SIZE_CACHE, EB_CREATE_FORK_IF_NEEDED, ExtendBufferedRelTo(), fb(), RBM_ZERO_ON_ERROR, RelationGetSmgr(), and VISIBILITYMAP_FORKNUM.

Referenced by vm_readbuf().

◆ vm_readbuf()

static Buffer vm_readbuf ( Relation  rel,
BlockNumber  blkno,
bool  extend 
)
static

Definition at line 610 of file visibilitymap.c.

611{
612 Buffer buf;
614
615 /*
616 * Caution: re-using this smgr pointer could fail if the relcache entry
617 * gets closed. It's safe as long as we only do smgr-level operations
618 * between here and the last use of the pointer.
619 */
620 reln = RelationGetSmgr(rel);
621
622 /*
623 * If we haven't cached the size of the visibility map fork yet, check it
624 * first.
625 */
626 if (reln->smgr_cached_nblocks[VISIBILITYMAP_FORKNUM] == InvalidBlockNumber)
627 {
630 else
631 reln->smgr_cached_nblocks[VISIBILITYMAP_FORKNUM] = 0;
632 }
633
634 /*
635 * For reading we use ZERO_ON_ERROR mode, and initialize the page if
636 * necessary. It's always safe to clear bits, so it's better to clear
637 * corrupt pages than error out.
638 *
639 * We use the same path below to initialize pages when extending the
640 * relation, as a concurrent extension can end up with vm_extend()
641 * returning an already-initialized page.
642 */
643 if (blkno >= reln->smgr_cached_nblocks[VISIBILITYMAP_FORKNUM])
644 {
645 if (extend)
646 buf = vm_extend(rel, blkno + 1);
647 else
648 return InvalidBuffer;
649 }
650 else
653
654 /*
655 * Initializing the page when needed is trickier than it looks, because of
656 * the possibility of multiple backends doing this concurrently, and our
657 * desire to not uselessly take the buffer lock in the normal path where
658 * the page is OK. We must take the lock to initialize the page, so
659 * recheck page newness after we have the lock, in case someone else
660 * already did it. Also, because we initially check PageIsNew with no
661 * lock, it's possible to fall through and return the buffer while someone
662 * else is still initializing the page (i.e., we might see pd_upper as set
663 * but other page header fields are still zeroes). This is harmless for
664 * callers that will take a buffer lock themselves, but some callers
665 * inspect the page without any lock at all. The latter is OK only so
666 * long as it doesn't depend on the page header having correct contents.
667 * Current usage is safe because PageGetContents() does not require that.
668 */
670 {
675 }
676 return buf;
677}
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition bufmgr.c:911
void PageInit(Page page, Size pageSize, Size specialSize)
Definition bufpage.c:42
static bool PageIsNew(const PageData *page)
Definition bufpage.h:233
static Buffer vm_extend(Relation rel, BlockNumber vm_nblocks)

References buf, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetPage(), fb(), InvalidBlockNumber, InvalidBuffer, LockBuffer(), PageInit(), PageIsNew(), RBM_ZERO_ON_ERROR, ReadBufferExtended(), RelationGetSmgr(), smgrexists(), smgrnblocks(), VISIBILITYMAP_FORKNUM, and vm_extend().

Referenced by visibilitymap_count(), visibilitymap_get_status(), visibilitymap_pin(), and visibilitymap_prepare_truncate().