108#define MAPSIZE (BLCKSZ - MAXALIGN(SizeOfPageHeaderData))
111#define HEAPBLOCKS_PER_BYTE (BITS_PER_BYTE / BITS_PER_HEAPBLOCK)
114#define HEAPBLOCKS_PER_PAGE (MAPSIZE * HEAPBLOCKS_PER_BYTE)
117#define HEAPBLK_TO_MAPBLOCK(x) ((x) / HEAPBLOCKS_PER_PAGE)
118#define HEAPBLK_TO_MAPBYTE(x) (((x) % HEAPBLOCKS_PER_PAGE) / HEAPBLOCKS_PER_BYTE)
119#define HEAPBLK_TO_OFFSET(x) (((x) % HEAPBLOCKS_PER_BYTE) * BITS_PER_HEAPBLOCK)
122#define VISIBLE_MASK8 (0x55)
123#define FROZEN_MASK8 (0xaa)
143 uint8 mask = flags << mapOffset;
145 bool cleared =
false;
151#ifdef TRACE_VISIBILITYMAP
156 elog(
ERROR,
"wrong buffer passed to visibilitymap_clear");
161 if (map[mapByte] & mask)
163 map[mapByte] &= ~mask;
257#ifdef TRACE_VISIBILITYMAP
270 elog(
ERROR,
"wrong heap buffer passed to visibilitymap_set");
274 elog(
ERROR,
"wrong VM buffer passed to visibilitymap_set");
285 map[mapByte] |= (flags << mapOffset);
349#ifdef TRACE_VISIBILITYMAP
398 for (mapBlock = 0;; mapBlock++)
426 *all_visible = nvisible;
428 *all_frozen = nfrozen;
452#ifdef TRACE_VISIBILITYMAP
470 if (truncByte != 0 || truncOffset != 0)
476 newnblocks = truncBlock + 1;
478 mapBuffer =
vm_readbuf(rel, truncBlock,
false);
506 map[truncByte] &= (1 << truncOffset) - 1;
525 newnblocks = truncBlock;
#define InvalidBlockNumber
BlockNumber BufferGetBlockNumber(Buffer buffer)
Buffer ExtendBufferedRelTo(BufferManagerRelation bmr, ForkNumber fork, BufferAccessStrategy strategy, uint32 flags, BlockNumber extend_to, ReadBufferMode mode)
void ReleaseBuffer(Buffer buffer)
void UnlockReleaseBuffer(Buffer buffer)
void MarkBufferDirty(Buffer buffer)
void LockBuffer(Buffer buffer, int mode)
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
#define BUFFER_LOCK_UNLOCK
static Page BufferGetPage(Buffer buffer)
@ EB_CREATE_FORK_IF_NEEDED
#define BUFFER_LOCK_EXCLUSIVE
static bool BufferIsValid(Buffer bufnum)
void PageInit(Page page, Size pageSize, Size specialSize)
static bool PageIsAllVisible(const PageData *page)
static bool PageIsNew(const PageData *page)
static char * PageGetContents(Page page)
static void PageSetLSN(Page page, XLogRecPtr lsn)
#define Assert(condition)
#define MemSet(start, val, len)
XLogRecPtr log_heap_visible(Relation rel, Buffer heap_buffer, Buffer vm_buffer, TransactionId snapshotConflictHorizon, uint8 vmflags)
void CacheInvalidateSmgr(RelFileLocatorBackend rlocator)
#define START_CRIT_SECTION()
#define END_CRIT_SECTION()
static uint64 pg_popcount_masked(const char *buf, int bytes, bits8 mask)
static SMgrRelation RelationGetSmgr(Relation rel)
#define RelationGetRelationName(relation)
#define RelationNeedsWAL(relation)
BlockNumber smgrnblocks(SMgrRelation reln, ForkNumber forknum)
bool smgrexists(SMgrRelation reln, ForkNumber forknum)
BlockNumber smgr_cached_nblocks[MAX_FORKNUM+1]
bool visibilitymap_pin_ok(BlockNumber heapBlk, Buffer vmbuf)
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer vmbuf, uint8 flags)
#define HEAPBLK_TO_OFFSET(x)
void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
uint8 visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
static Buffer vm_extend(Relation rel, BlockNumber vm_nblocks)
BlockNumber visibilitymap_prepare_truncate(Relation rel, BlockNumber nheapblocks)
void visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber *all_frozen)
static Buffer vm_readbuf(Relation rel, BlockNumber blkno, bool extend)
uint8 visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid, uint8 flags)
#define HEAPBLK_TO_MAPBLOCK(x)
#define HEAPBLK_TO_MAPBYTE(x)
#define VISIBILITYMAP_VALID_BITS
#define VISIBILITYMAP_ALL_FROZEN
#define VISIBILITYMAP_ALL_VISIBLE
#define XLogHintBitIsNeeded()
#define XLogRecPtrIsInvalid(r)
XLogRecPtr log_newpage_buffer(Buffer buffer, bool page_std)