PostgreSQL Source Code  git master
bufmgr.h File Reference
#include "storage/block.h"
#include "storage/buf.h"
#include "storage/bufpage.h"
#include "storage/relfilenode.h"
#include "utils/relcache.h"
#include "utils/snapmgr.h"
Include dependency graph for bufmgr.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Data Structures

struct  PrefetchBufferResult
 

Macros

#define MAX_IO_CONCURRENCY   1000
 
#define P_NEW   InvalidBlockNumber /* grow the file to get a new page */
 
#define BUFFER_LOCK_UNLOCK   0
 
#define BUFFER_LOCK_SHARE   1
 
#define BUFFER_LOCK_EXCLUSIVE   2
 
#define BufferIsValid(bufnum)
 
#define BufferGetBlock(buffer)
 
#define BufferGetPageSize(buffer)
 
#define BufferGetPage(buffer)   ((Page)BufferGetBlock(buffer))
 
#define RelationGetNumberOfBlocks(reln)   RelationGetNumberOfBlocksInFork(reln, MAIN_FORKNUM)
 

Typedefs

typedef void * Block
 
typedef enum BufferAccessStrategyType BufferAccessStrategyType
 
typedef struct PrefetchBufferResult PrefetchBufferResult
 

Enumerations

enum  BufferAccessStrategyType { BAS_NORMAL, BAS_BULKREAD, BAS_BULKWRITE, BAS_VACUUM }
 
enum  ReadBufferMode {
  RBM_NORMAL, RBM_ZERO_AND_LOCK, RBM_ZERO_AND_CLEANUP_LOCK, RBM_ZERO_ON_ERROR,
  RBM_NORMAL_NO_LOG
}
 

Functions

PrefetchBufferResult PrefetchSharedBuffer (struct SMgrRelationData *smgr_reln, ForkNumber forkNum, BlockNumber blockNum)
 
PrefetchBufferResult PrefetchBuffer (Relation reln, ForkNumber forkNum, BlockNumber blockNum)
 
bool ReadRecentBuffer (RelFileNode rnode, ForkNumber forkNum, BlockNumber blockNum, Buffer recent_buffer)
 
Buffer ReadBuffer (Relation reln, BlockNumber blockNum)
 
Buffer ReadBufferExtended (Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
 
Buffer ReadBufferWithoutRelcache (RelFileNode rnode, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
 
void ReleaseBuffer (Buffer buffer)
 
void UnlockReleaseBuffer (Buffer buffer)
 
void MarkBufferDirty (Buffer buffer)
 
void IncrBufferRefCount (Buffer buffer)
 
Buffer ReleaseAndReadBuffer (Buffer buffer, Relation relation, BlockNumber blockNum)
 
void InitBufferPool (void)
 
void InitBufferPoolAccess (void)
 
void AtEOXact_Buffers (bool isCommit)
 
void PrintBufferLeakWarning (Buffer buffer)
 
void CheckPointBuffers (int flags)
 
BlockNumber BufferGetBlockNumber (Buffer buffer)
 
BlockNumber RelationGetNumberOfBlocksInFork (Relation relation, ForkNumber forkNum)
 
void FlushOneBuffer (Buffer buffer)
 
void FlushRelationBuffers (Relation rel)
 
void FlushRelationsAllBuffers (struct SMgrRelationData **smgrs, int nrels)
 
void FlushDatabaseBuffers (Oid dbid)
 
void DropRelFileNodeBuffers (struct SMgrRelationData *smgr_reln, ForkNumber *forkNum, int nforks, BlockNumber *firstDelBlock)
 
void DropRelFileNodesAllBuffers (struct SMgrRelationData **smgr_reln, int nnodes)
 
void DropDatabaseBuffers (Oid dbid)
 
bool BufferIsPermanent (Buffer buffer)
 
XLogRecPtr BufferGetLSNAtomic (Buffer buffer)
 
Size BufferShmemSize (void)
 
void BufferGetTag (Buffer buffer, RelFileNode *rnode, ForkNumber *forknum, BlockNumber *blknum)
 
void MarkBufferDirtyHint (Buffer buffer, bool buffer_std)
 
void UnlockBuffers (void)
 
void LockBuffer (Buffer buffer, int mode)
 
bool ConditionalLockBuffer (Buffer buffer)
 
void LockBufferForCleanup (Buffer buffer)
 
bool ConditionalLockBufferForCleanup (Buffer buffer)
 
bool IsBufferCleanupOK (Buffer buffer)
 
bool HoldingBufferPinThatDelaysRecovery (void)
 
void AbortBufferIO (void)
 
void BufmgrCommit (void)
 
bool BgBufferSync (struct WritebackContext *wb_context)
 
void AtProcExit_LocalBuffers (void)
 
void TestForOldSnapshot_impl (Snapshot snapshot, Relation relation)
 
BufferAccessStrategy GetAccessStrategy (BufferAccessStrategyType btype)
 
void FreeAccessStrategy (BufferAccessStrategy strategy)
 
static void TestForOldSnapshot (Snapshot snapshot, Relation relation, Page page)
 

Variables

PGDLLIMPORT int NBuffers
 
bool zero_damaged_pages
 
int bgwriter_lru_maxpages
 
double bgwriter_lru_multiplier
 
bool track_io_timing
 
int effective_io_concurrency
 
int maintenance_io_concurrency
 
int checkpoint_flush_after
 
int backend_flush_after
 
int bgwriter_flush_after
 
PGDLLIMPORT char * BufferBlocks
 
PGDLLIMPORT int NLocBuffer
 
PGDLLIMPORT BlockLocalBufferBlockPointers
 
PGDLLIMPORT int32LocalRefCount
 

Macro Definition Documentation

◆ BUFFER_LOCK_EXCLUSIVE

◆ BUFFER_LOCK_SHARE

◆ BUFFER_LOCK_UNLOCK

#define BUFFER_LOCK_UNLOCK   0

Definition at line 96 of file bufmgr.h.

Referenced by _bt_unlockbuf(), _bt_upgradelockbufcleanup(), _hash_addovflpage(), _hash_doinsert(), _hash_expandtable(), _hash_finish_split(), _hash_first(), _hash_freeovflpage(), _hash_getcachedmetap(), _hash_init(), _hash_kill_items(), _hash_readnext(), _hash_readpage(), _hash_readprev(), _hash_splitbucket(), _hash_squeezebucket(), _hash_vacuum_one_page(), blinsert(), BloomNewBuffer(), brin_doinsert(), brin_doupdate(), brin_evacuate_page(), brin_getinsertbuffer(), brin_page_cleanup(), bringetbitmap(), brinGetTupleForHeapBlock(), brininsert(), brinRevmapDesummarizeRange(), brinRevmapInitialize(), brinsummarize(), ConditionalLockBufferForCleanup(), fsm_readbuf(), fsm_search_avail(), fsm_vacuum_page(), get_raw_page_internal(), GetVisibilityMapPins(), hashbucketcleanup(), hashbulkdelete(), heap_abort_speculative(), heap_delete(), heap_fetch(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_page_prune_opt(), heap_update(), heap_xlog_visible(), heapam_index_build_range_scan(), heapam_index_fetch_tuple(), heapam_index_validate_scan(), heapam_relation_copy_for_cluster(), heapam_scan_bitmap_next_block(), heapam_scan_sample_next_tuple(), heapam_tuple_satisfies_snapshot(), heapgetpage(), heapgettup(), lazy_scan_heap(), LockBuffer(), LockBufferForCleanup(), pgrowlocks(), pgstat_heap(), pgstatindex_impl(), RelationGetBufferForTuple(), revmap_physical_extend(), SpGistNewBuffer(), summarize_range(), UnlockReleaseBuffer(), visibilitymap_clear(), visibilitymap_set(), vm_readbuf(), and XLogReadBufferExtended().

◆ BufferGetBlock

#define BufferGetBlock (   buffer)
Value:
( \
AssertMacro(BufferIsValid(buffer)), \
BufferIsLocal(buffer) ? \
LocalBufferBlockPointers[-(buffer) - 1] \
: \
(Block) (BufferBlocks + ((Size) ((buffer) - 1)) * BLCKSZ) \
)
PGDLLIMPORT char * BufferBlocks
Definition: buf_init.c:21
size_t Size
Definition: c.h:540
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
void * Block
Definition: bufmgr.h:24

Definition at line 136 of file bufmgr.h.

Referenced by XLogSaveBufferForHint().

◆ BufferGetPage

#define BufferGetPage (   buffer)    ((Page)BufferGetBlock(buffer))

Definition at line 169 of file bufmgr.h.

Referenced by _bt_binsrch(), _bt_binsrch_insert(), _bt_bottomupdel_pass(), _bt_check_unique(), _bt_checkpage(), _bt_clear_incomplete_split(), _bt_conditionallockbuf(), _bt_dedup_pass(), _bt_delete_or_dedup_one_page(), _bt_delitems_delete(), _bt_delitems_delete_check(), _bt_delitems_vacuum(), _bt_endpoint(), _bt_findinsertloc(), _bt_finish_split(), _bt_get_endpoint(), _bt_getbuf(), _bt_getmeta(), _bt_getroot(), _bt_getstackbuf(), _bt_gettrueroot(), _bt_insert_parent(), _bt_insertonpg(), _bt_killitems(), _bt_leftsib_splitflag(), _bt_lock_subtree_parent(), _bt_lockbuf(), _bt_mark_page_halfdead(), _bt_moveright(), _bt_newroot(), _bt_pagedel(), _bt_readnextpage(), _bt_readpage(), _bt_restore_meta(), _bt_rightsib_halfdeadflag(), _bt_search(), _bt_search_insert(), _bt_set_cleanup_info(), _bt_simpledel_pass(), _bt_split(), _bt_stepright(), _bt_unlink_halfdead_page(), _bt_unlockbuf(), _bt_upgradelockbufcleanup(), _bt_vacuum_needs_cleanup(), _bt_walk_left(), _hash_addovflpage(), _hash_checkpage(), _hash_doinsert(), _hash_expandtable(), _hash_finish_split(), _hash_first(), _hash_freeovflpage(), _hash_get_newblock_from_oldbucket(), _hash_get_oldblock_from_newbucket(), _hash_getbucketbuf_from_hashkey(), _hash_getcachedmetap(), _hash_getinitbuf(), _hash_getnewbuf(), _hash_init(), _hash_init_metabuffer(), _hash_initbitmapbuffer(), _hash_initbuf(), _hash_kill_items(), _hash_next(), _hash_pgaddmultitup(), _hash_pgaddtup(), _hash_readnext(), _hash_readpage(), _hash_readprev(), _hash_splitbucket(), _hash_squeezebucket(), _hash_vacuum_one_page(), allocNewBuffer(), blgetbitmap(), blinsert(), BloomNewBuffer(), blvacuumcleanup(), brin_can_do_samepage_update(), brin_doinsert(), brin_doupdate(), brin_evacuate_page(), brin_getinsertbuffer(), brin_initialize_empty_new_buffer(), brin_page_cleanup(), brin_start_evacuating_page(), brin_xlog_createidx(), brin_xlog_desummarize_page(), brin_xlog_insert_update(), brin_xlog_revmap_extend(), brin_xlog_samepage_update(), brin_xlog_update(), brinbuild(), brinbuildempty(), brinGetStats(), brinGetTupleForHeapBlock(), brininsert(), brinRevmapDesummarizeRange(), brinRevmapInitialize(), brinSetHeapBlockItemptr(), bt_metap(), bt_page_items_internal(), bt_recheck_sibling_links(), bt_rootdescend(), btree_xlog_dedup(), btree_xlog_delete(), btree_xlog_insert(), btree_xlog_mark_page_halfdead(), btree_xlog_newroot(), btree_xlog_split(), btree_xlog_unlink_page(), btree_xlog_vacuum(), btvacuumpage(), BufferGetLSNAtomic(), checkXLogConsistency(), collect_corrupt_items(), collect_visibility_data(), collectMatchBitmap(), collectMatchesForHeapRow(), count_nondeletable_pages(), createPostingTree(), dataBeginPlaceToPage(), dataBeginPlaceToPageInternal(), dataBeginPlaceToPageLeaf(), dataExecPlaceToPage(), dataExecPlaceToPageInternal(), dataLocateItem(), dataPlaceToPageLeafRecompress(), dataPrepareDownlink(), dataSplitPageInternal(), do_setval(), doPickSplit(), entryExecPlaceToPage(), entryIsEnoughSpace(), entryLoadMoreItems(), entryLocateEntry(), entryLocateLeafEntry(), entryPrepareDownlink(), entrySplitPage(), fill_seq_with_data(), FreeSpaceMapPrepareTruncateRel(), fsm_readbuf(), fsm_search(), fsm_search_avail(), fsm_set_and_search(), fsm_vacuum_page(), generic_redo(), GenericXLogFinish(), GenericXLogRegisterBuffer(), get_raw_page_internal(), GetBTPageStatistics(), GetRecordedFreeSpace(), GetVisibilityMapPins(), ginbulkdelete(), ginDeletePage(), ginEntryInsert(), ginFindLeafPage(), ginFindParents(), ginFinishSplit(), ginGetStats(), ginHeapTupleFastInsert(), GinInitBuffer(), GinInitMetabuffer(), ginInsertCleanup(), ginInsertValue(), GinNewBuffer(), ginPlaceToPage(), ginRedoClearIncompleteSplit(), ginRedoCreatePTree(), ginRedoDeleteListPages(), ginRedoDeletePage(), ginRedoInsert(), ginRedoInsertData(), ginRedoInsertEntry(), ginRedoInsertListPage(), ginRedoUpdateMetapage(), ginRedoVacuumDataLeafPage(), ginScanToDelete(), ginStepRight(), ginTraverseLock(), ginUpdateStats(), ginvacuumcleanup(), ginVacuumEntryPage(), ginVacuumPostingTreeLeaf(), ginVacuumPostingTreeLeaves(), gistBufferingFindCorrectParent(), gistbufferinginserttuples(), gistbuild(), gistcheckpage(), gistdeletepage(), gistdoinsert(), gistFindCorrectParent(), gistFindPath(), gistfixsplit(), gistformdownlink(), gistGetMaxLevel(), GISTInitBuffer(), gistkillitems(), gistMemorizeAllDownlinks(), gistNewBuffer(), gistplacetopage(), gistProcessItup(), gistRedoClearFollowRight(), gistRedoDeleteRecord(), gistRedoPageDelete(), gistRedoPageSplitRecord(), gistRedoPageUpdateRecord(), gistScanPage(), gistvacuum_delete_empty_pages(), gistvacuumpage(), hash_bitmap_info(), hash_xlog_add_ovfl_page(), hash_xlog_delete(), hash_xlog_init_bitmap_page(), hash_xlog_init_meta_page(), hash_xlog_insert(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_split_cleanup(), hash_xlog_split_complete(), hash_xlog_squeeze_page(), hash_xlog_update_meta_page(), hash_xlog_vacuum_one_page(), hashbucketcleanup(), hashbulkdelete(), heap_abort_speculative(), heap_delete(), heap_fetch(), heap_finish_speculative(), heap_force_common(), heap_get_latest_tid(), heap_hot_search_buffer(), heap_index_delete_tuples(), heap_inplace_update(), heap_insert(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_page_is_all_visible(), heap_page_prune(), heap_page_prune_execute(), heap_page_prune_opt(), heap_prune_chain(), heap_update(), heap_xlog_confirm(), heap_xlog_delete(), heap_xlog_freeze_page(), heap_xlog_inplace(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_prune(), heap_xlog_update(), heap_xlog_vacuum(), heap_xlog_visible(), heapam_index_build_range_scan(), heapam_index_validate_scan(), heapam_scan_analyze_next_tuple(), heapam_scan_bitmap_next_block(), heapam_scan_bitmap_next_tuple(), heapam_scan_sample_next_tuple(), heapgetpage(), heapgettup(), heapgettup_pagemode(), index_compute_xid_horizon_for_tuples(), initBloomState(), lazy_check_needs_freeze(), lazy_scan_heap(), lazy_vacuum_heap_page(), lazy_vacuum_heap_rel(), log_heap_update(), log_newpage_buffer(), log_newpage_range(), log_split_page(), MarkBufferDirtyHint(), moveLeafs(), moveRightIfItNeeded(), nextval_internal(), palloc_btree_page(), pg_visibility(), pgstat_btree_page(), pgstat_gist_page(), pgstat_hash_page(), pgstat_heap(), pgstatginindex_internal(), pgstathashindex(), pgstatindex_impl(), read_seq_tuple(), RelationAddExtraBlocks(), RelationGetBufferForTuple(), RelationPutHeapTuple(), revmap_physical_extend(), scanGetCandidate(), scanPendingInsert(), scanPostingTree(), seq_redo(), shiftList(), spgAddNodeAction(), spgbuild(), spgdoinsert(), spgGetCache(), SpGistGetBuffer(), SpGistInitBuffer(), SpGistNewBuffer(), SpGistSetLastUsedPage(), SpGistUpdateMetaPage(), spgprocesspending(), spgRedoAddLeaf(), spgRedoAddNode(), spgRedoMoveLeafs(), spgRedoPickSplit(), spgRedoSplitTuple(), spgRedoVacuumLeaf(), spgRedoVacuumRedirect(), spgRedoVacuumRoot(), spgSplitNodeAction(), spgvacuumpage(), spgWalk(), startScanEntry(), statapprox_heap(), terminate_brin_buildstate(), vacuumLeafPage(), vacuumLeafRoot(), vacuumRedirectAndPlaceholder(), verify_heapam(), visibilitymap_clear(), visibilitymap_count(), visibilitymap_get_status(), visibilitymap_prepare_truncate(), visibilitymap_set(), vm_readbuf(), writeListPage(), XLogCheckBufferNeedsBackup(), XLogReadBufferExtended(), XLogReadBufferForRedoExtended(), XLogRecordPageWithFreeSpace(), XLogRegisterBuffer(), XLogSaveBufferForHint(), and xlogVacuumPage().

◆ BufferGetPageSize

◆ BufferIsValid

#define BufferIsValid (   bufnum)
Value:
( \
AssertMacro((bufnum) <= NBuffers && (bufnum) >= -NLocBuffer), \
(bufnum) != InvalidBuffer \
)
#define InvalidBuffer
Definition: buf.h:25
PGDLLIMPORT int NLocBuffer
Definition: localbuf.c:41
PGDLLIMPORT int NBuffers
Definition: globals.c:135

Definition at line 123 of file bufmgr.h.

Referenced by _bt_clear_incomplete_split(), _bt_endpoint(), _bt_first(), _bt_get_endpoint(), _bt_insertonpg(), _bt_readpage(), _bt_relandgetbuf(), _bt_search(), _bt_unlink_halfdead_page(), _hash_addovflpage(), _hash_dropscanbuf(), _hash_freeovflpage(), _hash_getbucketbuf_from_hashkey(), _hash_getcachedmetap(), _hash_readnext(), _hash_readpage(), _hash_readprev(), autoprewarm_database_main(), brin_doinsert(), brin_doupdate(), brin_getinsertbuffer(), brin_xlog_createidx(), brin_xlog_desummarize_page(), brin_xlog_insert_update(), brin_xlog_revmap_extend(), brin_xlog_samepage_update(), brin_xlog_update(), brinGetTupleForHeapBlock(), brininsert(), brinsummarize(), bt_recheck_sibling_links(), bt_rootdescend(), btree_xlog_dedup(), btree_xlog_delete(), btree_xlog_insert(), btree_xlog_mark_page_halfdead(), btree_xlog_split(), btree_xlog_unlink_page(), btree_xlog_vacuum(), BufferGetLSNAtomic(), BufferIsPermanent(), checkXLogConsistency(), ConditionalLockBufferForCleanup(), doPickSplit(), entryGetItem(), entryLoadMoreItems(), ExecStoreBufferHeapTuple(), ExecStorePinnedBufferHeapTuple(), FreeSpaceMapPrepareTruncateRel(), fsm_search(), fsm_vacuum_page(), generic_redo(), GetPrivateRefCount(), GetPrivateRefCountEntry(), GetRecordedFreeSpace(), GetVisibilityMapPins(), ginPlaceToPage(), ginRedoClearIncompleteSplit(), ginRedoDeletePage(), ginRedoInsert(), ginRedoUpdateMetapage(), ginRedoVacuumDataLeafPage(), ginScanToDelete(), gistinserttuples(), gistkillitems(), gistplacetopage(), gistRedoClearFollowRight(), gistRedoDeleteRecord(), gistRedoPageDelete(), gistRedoPageUpdateRecord(), gistXLogSplit(), gistXLogUpdate(), hash_xlog_add_ovfl_page(), hash_xlog_delete(), hash_xlog_init_bitmap_page(), hash_xlog_init_meta_page(), hash_xlog_insert(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_split_cleanup(), hash_xlog_split_complete(), hash_xlog_squeeze_page(), hash_xlog_update_meta_page(), hash_xlog_vacuum_one_page(), heap_endscan(), heap_index_delete_tuples(), heap_lock_tuple(), heap_rescan(), heap_update(), heap_xlog_confirm(), heap_xlog_delete(), heap_xlog_freeze_page(), heap_xlog_inplace(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_prune(), heap_xlog_update(), heap_xlog_vacuum(), heap_xlog_visible(), heapam_index_fetch_reset(), heapam_scan_sample_next_block(), heapam_tuple_lock(), heapam_tuple_satisfies_snapshot(), heapgetpage(), heapgettup(), heapgettup_pagemode(), IsBufferCleanupOK(), lazy_scan_heap(), lazy_vacuum_heap_page(), lazy_vacuum_heap_rel(), log_heap_visible(), MarkBufferDirty(), MarkBufferDirtyHint(), PrintBufferLeakWarning(), ReadRecentBuffer(), ReleaseAndReadBuffer(), ReleaseBuffer(), spgRedoAddLeaf(), spgRedoAddNode(), spgRedoMoveLeafs(), spgRedoPickSplit(), spgRedoSplitTuple(), spgRedoVacuumLeaf(), spgRedoVacuumRedirect(), spgRedoVacuumRoot(), statapprox_heap(), tts_buffer_heap_clear(), tts_buffer_heap_copyslot(), tts_buffer_heap_materialize(), tts_buffer_heap_store_tuple(), visibilitymap_clear(), visibilitymap_count(), visibilitymap_get_status(), visibilitymap_pin(), visibilitymap_pin_ok(), visibilitymap_prepare_truncate(), visibilitymap_set(), and XLogReadBufferForRedoExtended().

◆ MAX_IO_CONCURRENCY

#define MAX_IO_CONCURRENCY   1000

Definition at line 88 of file bufmgr.h.

◆ P_NEW

◆ RelationGetNumberOfBlocks

Typedef Documentation

◆ Block

typedef void* Block

Definition at line 24 of file bufmgr.h.

◆ BufferAccessStrategyType

◆ PrefetchBufferResult

Enumeration Type Documentation

◆ BufferAccessStrategyType

Enumerator
BAS_NORMAL 
BAS_BULKREAD 
BAS_BULKWRITE 
BAS_VACUUM 

Definition at line 27 of file bufmgr.h.

28 {
29  BAS_NORMAL, /* Normal random access */
30  BAS_BULKREAD, /* Large read-only scan (hint bit updates are
31  * ok) */
32  BAS_BULKWRITE, /* Large multi-block write (e.g. COPY IN) */
33  BAS_VACUUM /* VACUUM */
BufferAccessStrategyType
Definition: bufmgr.h:27

◆ ReadBufferMode

Enumerator
RBM_NORMAL 
RBM_ZERO_AND_LOCK 
RBM_ZERO_AND_CLEANUP_LOCK 
RBM_ZERO_ON_ERROR 
RBM_NORMAL_NO_LOG 

Definition at line 37 of file bufmgr.h.

38 {
39  RBM_NORMAL, /* Normal read */
40  RBM_ZERO_AND_LOCK, /* Don't read from disk, caller will
41  * initialize. Also locks the page. */
42  RBM_ZERO_AND_CLEANUP_LOCK, /* Like RBM_ZERO_AND_LOCK, but locks the page
43  * in "cleanup" mode */
44  RBM_ZERO_ON_ERROR, /* Read, but return an all-zeros page on error */
45  RBM_NORMAL_NO_LOG /* Don't log page as invalid during WAL
46  * replay; otherwise same as RBM_NORMAL */
ReadBufferMode
Definition: bufmgr.h:37

Function Documentation

◆ AbortBufferIO()

void AbortBufferIO ( void  )

Definition at line 4477 of file bufmgr.c.

References Assert, buftag::blockNum, BM_DIRTY, BM_IO_ERROR, BM_IO_IN_PROGRESS, BM_VALID, buf, ereport, errcode(), errdetail(), errmsg(), buftag::forkNum, InProgressBuf, IsForInput, LockBufHdr(), pfree(), relpathperm, buftag::rnode, BufferDesc::tag, TerminateBufferIO(), UnlockBufHdr, and WARNING.

Referenced by AbortSubTransaction(), AbortTransaction(), AtProcExit_Buffers(), AutoVacLauncherMain(), BackgroundWriterMain(), CheckpointerMain(), and WalWriterMain().

4478 {
4480 
4481  if (buf)
4482  {
4483  uint32 buf_state;
4484 
4485  buf_state = LockBufHdr(buf);
4486  Assert(buf_state & BM_IO_IN_PROGRESS);
4487  if (IsForInput)
4488  {
4489  Assert(!(buf_state & BM_DIRTY));
4490 
4491  /* We'd better not think buffer is valid yet */
4492  Assert(!(buf_state & BM_VALID));
4493  UnlockBufHdr(buf, buf_state);
4494  }
4495  else
4496  {
4497  Assert(buf_state & BM_DIRTY);
4498  UnlockBufHdr(buf, buf_state);
4499  /* Issue notice if this is not the first failure... */
4500  if (buf_state & BM_IO_ERROR)
4501  {
4502  /* Buffer is pinned, so we can read tag without spinlock */
4503  char *path;
4504 
4505  path = relpathperm(buf->tag.rnode, buf->tag.forkNum);
4506  ereport(WARNING,
4507  (errcode(ERRCODE_IO_ERROR),
4508  errmsg("could not write block %u of %s",
4509  buf->tag.blockNum, path),
4510  errdetail("Multiple failures --- write error might be permanent.")));
4511  pfree(path);
4512  }
4513  }
4514  TerminateBufferIO(buf, false, BM_IO_ERROR);
4515  }
4516 }
#define relpathperm(rnode, forknum)
Definition: relpath.h:83
ForkNumber forkNum
Definition: buf_internals.h:94
int errcode(int sqlerrcode)
Definition: elog.c:698
#define BM_DIRTY
Definition: buf_internals.h:59
static BufferDesc * InProgressBuf
Definition: bufmgr.c:161
void pfree(void *pointer)
Definition: mcxt.c:1169
static char * buf
Definition: pg_test_fsync.c:68
int errdetail(const char *fmt,...)
Definition: elog.c:1042
unsigned int uint32
Definition: c.h:441
static bool IsForInput
Definition: bufmgr.c:162
#define WARNING
Definition: elog.h:40
static void TerminateBufferIO(BufferDesc *buf, bool clear_dirty, uint32 set_flag_bits)
Definition: bufmgr.c:4445
#define BM_VALID
Definition: buf_internals.h:60
#define ereport(elevel,...)
Definition: elog.h:157
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4587
#define Assert(condition)
Definition: c.h:804
BlockNumber blockNum
Definition: buf_internals.h:95
RelFileNode rnode
Definition: buf_internals.h:93
#define BM_IO_ERROR
Definition: buf_internals.h:63
BufferTag tag
int errmsg(const char *fmt,...)
Definition: elog.c:909
#define UnlockBufHdr(desc, s)
#define BM_IO_IN_PROGRESS
Definition: buf_internals.h:62

◆ AtEOXact_Buffers()

void AtEOXact_Buffers ( bool  isCommit)

Definition at line 2579 of file bufmgr.c.

References Assert, AtEOXact_LocalBuffers(), CheckForBufferLeaks(), and PrivateRefCountOverflowed.

Referenced by AbortTransaction(), AutoVacLauncherMain(), BackgroundWriterMain(), CheckpointerMain(), CommitTransaction(), PrepareTransaction(), and WalWriterMain().

2580 {
2582 
2583  AtEOXact_LocalBuffers(isCommit);
2584 
2586 }
static int32 PrivateRefCountOverflowed
Definition: bufmgr.c:199
#define Assert(condition)
Definition: c.h:804
static void CheckForBufferLeaks(void)
Definition: bufmgr.c:2640
void AtEOXact_LocalBuffers(bool isCommit)
Definition: localbuf.c:577

◆ AtProcExit_LocalBuffers()

void AtProcExit_LocalBuffers ( void  )

Definition at line 588 of file localbuf.c.

References CheckForLocalBufferLeaks().

Referenced by AtProcExit_Buffers().

589 {
590  /*
591  * We shouldn't be holding any remaining pins; if we are, and assertions
592  * aren't enabled, we'll fail later in DropRelFileNodeBuffers while trying
593  * to drop the temp rels.
594  */
596 }
static void CheckForLocalBufferLeaks(void)
Definition: localbuf.c:548

◆ BgBufferSync()

bool BgBufferSync ( struct WritebackContext wb_context)

Definition at line 2209 of file bufmgr.c.

References Assert, bgwriter_lru_maxpages, bgwriter_lru_multiplier, BgWriterDelay, BUF_REUSABLE, BUF_WRITTEN, CurrentResourceOwner, DEBUG1, DEBUG2, elog, PgStat_MsgBgWriter::m_buf_alloc, PgStat_MsgBgWriter::m_buf_written_clean, PgStat_MsgBgWriter::m_maxwritten_clean, NBuffers, PendingBgWriterStats, ResourceOwnerEnlargeBuffers(), StrategySyncStart(), and SyncOneBuffer().

Referenced by BackgroundWriterMain().

2210 {
2211  /* info obtained from freelist.c */
2212  int strategy_buf_id;
2213  uint32 strategy_passes;
2214  uint32 recent_alloc;
2215 
2216  /*
2217  * Information saved between calls so we can determine the strategy
2218  * point's advance rate and avoid scanning already-cleaned buffers.
2219  */
2220  static bool saved_info_valid = false;
2221  static int prev_strategy_buf_id;
2222  static uint32 prev_strategy_passes;
2223  static int next_to_clean;
2224  static uint32 next_passes;
2225 
2226  /* Moving averages of allocation rate and clean-buffer density */
2227  static float smoothed_alloc = 0;
2228  static float smoothed_density = 10.0;
2229 
2230  /* Potentially these could be tunables, but for now, not */
2231  float smoothing_samples = 16;
2232  float scan_whole_pool_milliseconds = 120000.0;
2233 
2234  /* Used to compute how far we scan ahead */
2235  long strategy_delta;
2236  int bufs_to_lap;
2237  int bufs_ahead;
2238  float scans_per_alloc;
2239  int reusable_buffers_est;
2240  int upcoming_alloc_est;
2241  int min_scan_buffers;
2242 
2243  /* Variables for the scanning loop proper */
2244  int num_to_scan;
2245  int num_written;
2246  int reusable_buffers;
2247 
2248  /* Variables for final smoothed_density update */
2249  long new_strategy_delta;
2250  uint32 new_recent_alloc;
2251 
2252  /*
2253  * Find out where the freelist clock sweep currently is, and how many
2254  * buffer allocations have happened since our last call.
2255  */
2256  strategy_buf_id = StrategySyncStart(&strategy_passes, &recent_alloc);
2257 
2258  /* Report buffer alloc counts to pgstat */
2259  PendingBgWriterStats.m_buf_alloc += recent_alloc;
2260 
2261  /*
2262  * If we're not running the LRU scan, just stop after doing the stats
2263  * stuff. We mark the saved state invalid so that we can recover sanely
2264  * if LRU scan is turned back on later.
2265  */
2266  if (bgwriter_lru_maxpages <= 0)
2267  {
2268  saved_info_valid = false;
2269  return true;
2270  }
2271 
2272  /*
2273  * Compute strategy_delta = how many buffers have been scanned by the
2274  * clock sweep since last time. If first time through, assume none. Then
2275  * see if we are still ahead of the clock sweep, and if so, how many
2276  * buffers we could scan before we'd catch up with it and "lap" it. Note:
2277  * weird-looking coding of xxx_passes comparisons are to avoid bogus
2278  * behavior when the passes counts wrap around.
2279  */
2280  if (saved_info_valid)
2281  {
2282  int32 passes_delta = strategy_passes - prev_strategy_passes;
2283 
2284  strategy_delta = strategy_buf_id - prev_strategy_buf_id;
2285  strategy_delta += (long) passes_delta * NBuffers;
2286 
2287  Assert(strategy_delta >= 0);
2288 
2289  if ((int32) (next_passes - strategy_passes) > 0)
2290  {
2291  /* we're one pass ahead of the strategy point */
2292  bufs_to_lap = strategy_buf_id - next_to_clean;
2293 #ifdef BGW_DEBUG
2294  elog(DEBUG2, "bgwriter ahead: bgw %u-%u strategy %u-%u delta=%ld lap=%d",
2295  next_passes, next_to_clean,
2296  strategy_passes, strategy_buf_id,
2297  strategy_delta, bufs_to_lap);
2298 #endif
2299  }
2300  else if (next_passes == strategy_passes &&
2301  next_to_clean >= strategy_buf_id)
2302  {
2303  /* on same pass, but ahead or at least not behind */
2304  bufs_to_lap = NBuffers - (next_to_clean - strategy_buf_id);
2305 #ifdef BGW_DEBUG
2306  elog(DEBUG2, "bgwriter ahead: bgw %u-%u strategy %u-%u delta=%ld lap=%d",
2307  next_passes, next_to_clean,
2308  strategy_passes, strategy_buf_id,
2309  strategy_delta, bufs_to_lap);
2310 #endif
2311  }
2312  else
2313  {
2314  /*
2315  * We're behind, so skip forward to the strategy point and start
2316  * cleaning from there.
2317  */
2318 #ifdef BGW_DEBUG
2319  elog(DEBUG2, "bgwriter behind: bgw %u-%u strategy %u-%u delta=%ld",
2320  next_passes, next_to_clean,
2321  strategy_passes, strategy_buf_id,
2322  strategy_delta);
2323 #endif
2324  next_to_clean = strategy_buf_id;
2325  next_passes = strategy_passes;
2326  bufs_to_lap = NBuffers;
2327  }
2328  }
2329  else
2330  {
2331  /*
2332  * Initializing at startup or after LRU scanning had been off. Always
2333  * start at the strategy point.
2334  */
2335 #ifdef BGW_DEBUG
2336  elog(DEBUG2, "bgwriter initializing: strategy %u-%u",
2337  strategy_passes, strategy_buf_id);
2338 #endif
2339  strategy_delta = 0;
2340  next_to_clean = strategy_buf_id;
2341  next_passes = strategy_passes;
2342  bufs_to_lap = NBuffers;
2343  }
2344 
2345  /* Update saved info for next time */
2346  prev_strategy_buf_id = strategy_buf_id;
2347  prev_strategy_passes = strategy_passes;
2348  saved_info_valid = true;
2349 
2350  /*
2351  * Compute how many buffers had to be scanned for each new allocation, ie,
2352  * 1/density of reusable buffers, and track a moving average of that.
2353  *
2354  * If the strategy point didn't move, we don't update the density estimate
2355  */
2356  if (strategy_delta > 0 && recent_alloc > 0)
2357  {
2358  scans_per_alloc = (float) strategy_delta / (float) recent_alloc;
2359  smoothed_density += (scans_per_alloc - smoothed_density) /
2360  smoothing_samples;
2361  }
2362 
2363  /*
2364  * Estimate how many reusable buffers there are between the current
2365  * strategy point and where we've scanned ahead to, based on the smoothed
2366  * density estimate.
2367  */
2368  bufs_ahead = NBuffers - bufs_to_lap;
2369  reusable_buffers_est = (float) bufs_ahead / smoothed_density;
2370 
2371  /*
2372  * Track a moving average of recent buffer allocations. Here, rather than
2373  * a true average we want a fast-attack, slow-decline behavior: we
2374  * immediately follow any increase.
2375  */
2376  if (smoothed_alloc <= (float) recent_alloc)
2377  smoothed_alloc = recent_alloc;
2378  else
2379  smoothed_alloc += ((float) recent_alloc - smoothed_alloc) /
2380  smoothing_samples;
2381 
2382  /* Scale the estimate by a GUC to allow more aggressive tuning. */
2383  upcoming_alloc_est = (int) (smoothed_alloc * bgwriter_lru_multiplier);
2384 
2385  /*
2386  * If recent_alloc remains at zero for many cycles, smoothed_alloc will
2387  * eventually underflow to zero, and the underflows produce annoying
2388  * kernel warnings on some platforms. Once upcoming_alloc_est has gone to
2389  * zero, there's no point in tracking smaller and smaller values of
2390  * smoothed_alloc, so just reset it to exactly zero to avoid this
2391  * syndrome. It will pop back up as soon as recent_alloc increases.
2392  */
2393  if (upcoming_alloc_est == 0)
2394  smoothed_alloc = 0;
2395 
2396  /*
2397  * Even in cases where there's been little or no buffer allocation
2398  * activity, we want to make a small amount of progress through the buffer
2399  * cache so that as many reusable buffers as possible are clean after an
2400  * idle period.
2401  *
2402  * (scan_whole_pool_milliseconds / BgWriterDelay) computes how many times
2403  * the BGW will be called during the scan_whole_pool time; slice the
2404  * buffer pool into that many sections.
2405  */
2406  min_scan_buffers = (int) (NBuffers / (scan_whole_pool_milliseconds / BgWriterDelay));
2407 
2408  if (upcoming_alloc_est < (min_scan_buffers + reusable_buffers_est))
2409  {
2410 #ifdef BGW_DEBUG
2411  elog(DEBUG2, "bgwriter: alloc_est=%d too small, using min=%d + reusable_est=%d",
2412  upcoming_alloc_est, min_scan_buffers, reusable_buffers_est);
2413 #endif
2414  upcoming_alloc_est = min_scan_buffers + reusable_buffers_est;
2415  }
2416 
2417  /*
2418  * Now write out dirty reusable buffers, working forward from the
2419  * next_to_clean point, until we have lapped the strategy scan, or cleaned
2420  * enough buffers to match our estimate of the next cycle's allocation
2421  * requirements, or hit the bgwriter_lru_maxpages limit.
2422  */
2423 
2424  /* Make sure we can handle the pin inside SyncOneBuffer */
2426 
2427  num_to_scan = bufs_to_lap;
2428  num_written = 0;
2429  reusable_buffers = reusable_buffers_est;
2430 
2431  /* Execute the LRU scan */
2432  while (num_to_scan > 0 && reusable_buffers < upcoming_alloc_est)
2433  {
2434  int sync_state = SyncOneBuffer(next_to_clean, true,
2435  wb_context);
2436 
2437  if (++next_to_clean >= NBuffers)
2438  {
2439  next_to_clean = 0;
2440  next_passes++;
2441  }
2442  num_to_scan--;
2443 
2444  if (sync_state & BUF_WRITTEN)
2445  {
2446  reusable_buffers++;
2447  if (++num_written >= bgwriter_lru_maxpages)
2448  {
2450  break;
2451  }
2452  }
2453  else if (sync_state & BUF_REUSABLE)
2454  reusable_buffers++;
2455  }
2456 
2458 
2459 #ifdef BGW_DEBUG
2460  elog(DEBUG1, "bgwriter: recent_alloc=%u smoothed=%.2f delta=%ld ahead=%d density=%.2f reusable_est=%d upcoming_est=%d scanned=%d wrote=%d reusable=%d",
2461  recent_alloc, smoothed_alloc, strategy_delta, bufs_ahead,
2462  smoothed_density, reusable_buffers_est, upcoming_alloc_est,
2463  bufs_to_lap - num_to_scan,
2464  num_written,
2465  reusable_buffers - reusable_buffers_est);
2466 #endif
2467 
2468  /*
2469  * Consider the above scan as being like a new allocation scan.
2470  * Characterize its density and update the smoothed one based on it. This
2471  * effectively halves the moving average period in cases where both the
2472  * strategy and the background writer are doing some useful scanning,
2473  * which is helpful because a long memory isn't as desirable on the
2474  * density estimates.
2475  */
2476  new_strategy_delta = bufs_to_lap - num_to_scan;
2477  new_recent_alloc = reusable_buffers - reusable_buffers_est;
2478  if (new_strategy_delta > 0 && new_recent_alloc > 0)
2479  {
2480  scans_per_alloc = (float) new_strategy_delta / (float) new_recent_alloc;
2481  smoothed_density += (scans_per_alloc - smoothed_density) /
2482  smoothing_samples;
2483 
2484 #ifdef BGW_DEBUG
2485  elog(DEBUG2, "bgwriter: cleaner density alloc=%u scan=%ld density=%.2f new smoothed=%.2f",
2486  new_recent_alloc, new_strategy_delta,
2487  scans_per_alloc, smoothed_density);
2488 #endif
2489  }
2490 
2491  /* Return true if OK to hibernate */
2492  return (bufs_to_lap == 0 && recent_alloc == 0);
2493 }
PgStat_Counter m_buf_alloc
Definition: pgstat.h:462
int StrategySyncStart(uint32 *complete_passes, uint32 *num_buf_alloc)
Definition: freelist.c:395
#define DEBUG1
Definition: elog.h:25
int BgWriterDelay
Definition: bgwriter.c:61
PgStat_MsgBgWriter PendingBgWriterStats
Definition: pgstat.c:131
ResourceOwner CurrentResourceOwner
Definition: resowner.c:146
PgStat_Counter m_maxwritten_clean
Definition: pgstat.h:461
PgStat_Counter m_buf_written_clean
Definition: pgstat.h:460
double bgwriter_lru_multiplier
Definition: bufmgr.c:134
static int SyncOneBuffer(int buf_id, bool skip_recently_used, WritebackContext *wb_context)
Definition: bufmgr.c:2512
signed int int32
Definition: c.h:429
#define BUF_REUSABLE
Definition: bufmgr.c:69
int bgwriter_lru_maxpages
Definition: bufmgr.c:133
#define DEBUG2
Definition: elog.h:24
unsigned int uint32
Definition: c.h:441
#define BUF_WRITTEN
Definition: bufmgr.c:68
void ResourceOwnerEnlargeBuffers(ResourceOwner owner)
Definition: resowner.c:946
#define Assert(condition)
Definition: c.h:804
#define elog(elevel,...)
Definition: elog.h:232
int NBuffers
Definition: globals.c:135

◆ BufferGetBlockNumber()

BlockNumber BufferGetBlockNumber ( Buffer  buffer)

Definition at line 2748 of file bufmgr.c.

References Assert, buftag::blockNum, BufferIsLocal, BufferIsPinned, GetBufferDescriptor, GetLocalBufferDescriptor, and BufferDesc::tag.

Referenced by _bt_binsrch_insert(), _bt_check_unique(), _bt_checkpage(), _bt_delitems_delete(), _bt_delitems_vacuum(), _bt_doinsert(), _bt_endpoint(), _bt_finish_split(), _bt_first(), _bt_getroot(), _bt_insert_parent(), _bt_insertonpg(), _bt_mark_page_halfdead(), _bt_moveright(), _bt_newroot(), _bt_pagedel(), _bt_readnextpage(), _bt_readpage(), _bt_restore_meta(), _bt_search(), _bt_split(), _bt_unlink_halfdead_page(), _bt_walk_left(), _hash_addovflpage(), _hash_checkpage(), _hash_doinsert(), _hash_first(), _hash_freeovflpage(), _hash_getnewbuf(), _hash_readnext(), _hash_readpage(), _hash_splitbucket(), allocNewBuffer(), blinsert(), BloomInitMetapage(), brin_doinsert(), brin_doupdate(), brin_getinsertbuffer(), brin_initialize_empty_new_buffer(), brin_page_cleanup(), brin_xlog_insert_update(), brinbuild(), brinGetTupleForHeapBlock(), createPostingTree(), dataBeginPlaceToPageLeaf(), dataPrepareDownlink(), doPickSplit(), entryPrepareDownlink(), fill_seq_with_data(), ginEntryInsert(), ginFindParents(), ginFinishSplit(), ginPlaceToPage(), ginRedoDeleteListPages(), ginRedoUpdateMetapage(), ginScanToDelete(), gistbufferinginserttuples(), gistbuild(), gistcheckpage(), gistdeletepage(), gistformdownlink(), gistinserttuples(), gistMemorizeAllDownlinks(), gistplacetopage(), gistRelocateBuildBuffersOnSplit(), gistScanPage(), hash_xlog_add_ovfl_page(), heap_delete(), heap_hot_search_buffer(), heap_insert(), heap_multi_insert(), heap_page_is_all_visible(), heap_prune_chain(), heap_update(), heap_xlog_confirm(), heap_xlog_lock(), makeSublist(), moveLeafs(), moveRightIfItNeeded(), pgstathashindex(), ReadBufferBI(), RelationAddExtraBlocks(), RelationGetBufferForTuple(), RelationPutHeapTuple(), revmap_get_buffer(), revmap_physical_extend(), spgAddNodeAction(), spgbuild(), spgdoinsert(), SpGistSetLastUsedPage(), spgSplitNodeAction(), spgWalk(), startScanEntry(), terminate_brin_buildstate(), vacuumLeafPage(), visibilitymap_clear(), visibilitymap_get_status(), visibilitymap_pin(), visibilitymap_pin_ok(), visibilitymap_set(), and XLogReadBufferExtended().

2749 {
2750  BufferDesc *bufHdr;
2751 
2752  Assert(BufferIsPinned(buffer));
2753 
2754  if (BufferIsLocal(buffer))
2755  bufHdr = GetLocalBufferDescriptor(-buffer - 1);
2756  else
2757  bufHdr = GetBufferDescriptor(buffer - 1);
2758 
2759  /* pinned, so OK to read tag without spinlock */
2760  return bufHdr->tag.blockNum;
2761 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:448
#define GetLocalBufferDescriptor(id)
#define GetBufferDescriptor(id)
#define Assert(condition)
Definition: c.h:804
#define BufferIsLocal(buffer)
Definition: buf.h:37
BlockNumber blockNum
Definition: buf_internals.h:95
BufferTag tag

◆ BufferGetLSNAtomic()

XLogRecPtr BufferGetLSNAtomic ( Buffer  buffer)

Definition at line 3008 of file bufmgr.c.

References Assert, BufferGetPage, BufferIsLocal, BufferIsPinned, BufferIsValid, GetBufferDescriptor, LockBufHdr(), PageGetLSN, UnlockBufHdr, and XLogHintBitIsNeeded.

Referenced by _bt_killitems(), _bt_readpage(), gistdoinsert(), gistFindPath(), gistkillitems(), gistScanPage(), SetHintBits(), and XLogSaveBufferForHint().

3009 {
3010  BufferDesc *bufHdr = GetBufferDescriptor(buffer - 1);
3011  char *page = BufferGetPage(buffer);
3012  XLogRecPtr lsn;
3013  uint32 buf_state;
3014 
3015  /*
3016  * If we don't need locking for correctness, fastpath out.
3017  */
3018  if (!XLogHintBitIsNeeded() || BufferIsLocal(buffer))
3019  return PageGetLSN(page);
3020 
3021  /* Make sure we've got a real buffer, and that we hold a pin on it. */
3022  Assert(BufferIsValid(buffer));
3023  Assert(BufferIsPinned(buffer));
3024 
3025  buf_state = LockBufHdr(bufHdr);
3026  lsn = PageGetLSN(page);
3027  UnlockBufHdr(bufHdr, buf_state);
3028 
3029  return lsn;
3030 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:448
#define GetBufferDescriptor(id)
unsigned int uint32
Definition: c.h:441
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4587
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:804
#define BufferIsLocal(buffer)
Definition: buf.h:37
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
#define PageGetLSN(page)
Definition: bufpage.h:366
#define UnlockBufHdr(desc, s)
#define XLogHintBitIsNeeded()
Definition: xlog.h:177

◆ BufferGetTag()

void BufferGetTag ( Buffer  buffer,
RelFileNode rnode,
ForkNumber forknum,
BlockNumber blknum 
)

Definition at line 2769 of file bufmgr.c.

References Assert, buftag::blockNum, BufferIsLocal, BufferIsPinned, buftag::forkNum, GetBufferDescriptor, GetLocalBufferDescriptor, buftag::rnode, and BufferDesc::tag.

Referenced by fsm_search_avail(), ginRedoInsertEntry(), log_newpage_buffer(), ResolveCminCmaxDuringDecoding(), XLogRegisterBuffer(), and XLogSaveBufferForHint().

2771 {
2772  BufferDesc *bufHdr;
2773 
2774  /* Do the same checks as BufferGetBlockNumber. */
2775  Assert(BufferIsPinned(buffer));
2776 
2777  if (BufferIsLocal(buffer))
2778  bufHdr = GetLocalBufferDescriptor(-buffer - 1);
2779  else
2780  bufHdr = GetBufferDescriptor(buffer - 1);
2781 
2782  /* pinned, so OK to read tag without spinlock */
2783  *rnode = bufHdr->tag.rnode;
2784  *forknum = bufHdr->tag.forkNum;
2785  *blknum = bufHdr->tag.blockNum;
2786 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:448
ForkNumber forkNum
Definition: buf_internals.h:94
#define GetLocalBufferDescriptor(id)
#define GetBufferDescriptor(id)
#define Assert(condition)
Definition: c.h:804
#define BufferIsLocal(buffer)
Definition: buf.h:37
BlockNumber blockNum
Definition: buf_internals.h:95
RelFileNode rnode
Definition: buf_internals.h:93
BufferTag tag

◆ BufferIsPermanent()

bool BufferIsPermanent ( Buffer  buffer)

Definition at line 2978 of file bufmgr.c.

References Assert, BM_PERMANENT, BufferIsLocal, BufferIsPinned, BufferIsValid, GetBufferDescriptor, pg_atomic_read_u32(), and BufferDesc::state.

Referenced by SetHintBits().

2979 {
2980  BufferDesc *bufHdr;
2981 
2982  /* Local buffers are used only for temp relations. */
2983  if (BufferIsLocal(buffer))
2984  return false;
2985 
2986  /* Make sure we've got a real buffer, and that we hold a pin on it. */
2987  Assert(BufferIsValid(buffer));
2988  Assert(BufferIsPinned(buffer));
2989 
2990  /*
2991  * BM_PERMANENT can't be changed while we hold a pin on the buffer, so we
2992  * need not bother with the buffer header spinlock. Even if someone else
2993  * changes the buffer header state while we're doing this, the state is
2994  * changed atomically, so we'll read the old value or the new value, but
2995  * not random garbage.
2996  */
2997  bufHdr = GetBufferDescriptor(buffer - 1);
2998  return (pg_atomic_read_u32(&bufHdr->state) & BM_PERMANENT) != 0;
2999 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:448
#define BM_PERMANENT
Definition: buf_internals.h:67
#define GetBufferDescriptor(id)
#define Assert(condition)
Definition: c.h:804
#define BufferIsLocal(buffer)
Definition: buf.h:37
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
pg_atomic_uint32 state
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:241

◆ BufferShmemSize()

Size BufferShmemSize ( void  )

Definition at line 156 of file buf_init.c.

References add_size(), mul_size(), NBuffers, PG_CACHE_LINE_SIZE, and StrategyShmemSize().

Referenced by CalculateShmemSize().

157 {
158  Size size = 0;
159 
160  /* size of buffer descriptors */
161  size = add_size(size, mul_size(NBuffers, sizeof(BufferDescPadded)));
162  /* to allow aligning buffer descriptors */
163  size = add_size(size, PG_CACHE_LINE_SIZE);
164 
165  /* size of data pages */
166  size = add_size(size, mul_size(NBuffers, BLCKSZ));
167 
168  /* size of stuff controlled by freelist.c */
169  size = add_size(size, StrategyShmemSize());
170 
171  /* size of I/O condition variables */
172  size = add_size(size, mul_size(NBuffers,
174  /* to allow aligning the above */
175  size = add_size(size, PG_CACHE_LINE_SIZE);
176 
177  /* size of checkpoint sort array in bufmgr.c */
178  size = add_size(size, mul_size(NBuffers, sizeof(CkptSortItem)));
179 
180  return size;
181 }
#define PG_CACHE_LINE_SIZE
Size mul_size(Size s1, Size s2)
Definition: shmem.c:519
Size add_size(Size s1, Size s2)
Definition: shmem.c:502
size_t Size
Definition: c.h:540
int NBuffers
Definition: globals.c:135
Size StrategyShmemSize(void)
Definition: freelist.c:454

◆ BufmgrCommit()

void BufmgrCommit ( void  )

Definition at line 2734 of file bufmgr.c.

Referenced by PrepareTransaction(), and RecordTransactionCommit().

2735 {
2736  /* Nothing to do in bufmgr anymore... */
2737 }

◆ CheckPointBuffers()

void CheckPointBuffers ( int  flags)

Definition at line 2724 of file bufmgr.c.

References BufferSync().

Referenced by CheckPointGuts().

2725 {
2726  BufferSync(flags);
2727 }
static void BufferSync(int flags)
Definition: bufmgr.c:1933

◆ ConditionalLockBuffer()

bool ConditionalLockBuffer ( Buffer  buffer)

Definition at line 4033 of file bufmgr.c.

References Assert, buf, BufferDescriptorGetContentLock, BufferIsLocal, BufferIsPinned, GetBufferDescriptor, LW_EXCLUSIVE, and LWLockConditionalAcquire().

Referenced by _bt_conditionallockbuf(), BloomNewBuffer(), ConditionalLockBufferForCleanup(), GinNewBuffer(), gistNewBuffer(), RelationGetBufferForTuple(), spgdoinsert(), SpGistGetBuffer(), SpGistNewBuffer(), and SpGistUpdateMetaPage().

4034 {
4035  BufferDesc *buf;
4036 
4037  Assert(BufferIsPinned(buffer));
4038  if (BufferIsLocal(buffer))
4039  return true; /* act as though we got it */
4040 
4041  buf = GetBufferDescriptor(buffer - 1);
4042 
4044  LW_EXCLUSIVE);
4045 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:448
bool LWLockConditionalAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1370
static char * buf
Definition: pg_test_fsync.c:68
#define GetBufferDescriptor(id)
#define BufferDescriptorGetContentLock(bdesc)
#define Assert(condition)
Definition: c.h:804
#define BufferIsLocal(buffer)
Definition: buf.h:37

◆ ConditionalLockBufferForCleanup()

bool ConditionalLockBufferForCleanup ( Buffer  buffer)

Definition at line 4241 of file bufmgr.c.

References Assert, BUF_STATE_GET_REFCOUNT, BUFFER_LOCK_UNLOCK, BufferIsLocal, BufferIsValid, ConditionalLockBuffer(), GetBufferDescriptor, GetPrivateRefCount(), LocalRefCount, LockBuffer(), LockBufHdr(), PrivateRefCountEntry::refcount, and UnlockBufHdr.

Referenced by _hash_finish_split(), _hash_getbuf_with_condlock_cleanup(), heap_page_prune_opt(), and lazy_scan_heap().

4242 {
4243  BufferDesc *bufHdr;
4244  uint32 buf_state,
4245  refcount;
4246 
4247  Assert(BufferIsValid(buffer));
4248 
4249  if (BufferIsLocal(buffer))
4250  {
4251  refcount = LocalRefCount[-buffer - 1];
4252  /* There should be exactly one pin */
4253  Assert(refcount > 0);
4254  if (refcount != 1)
4255  return false;
4256  /* Nobody else to wait for */
4257  return true;
4258  }
4259 
4260  /* There should be exactly one local pin */
4261  refcount = GetPrivateRefCount(buffer);
4262  Assert(refcount);
4263  if (refcount != 1)
4264  return false;
4265 
4266  /* Try to acquire lock */
4267  if (!ConditionalLockBuffer(buffer))
4268  return false;
4269 
4270  bufHdr = GetBufferDescriptor(buffer - 1);
4271  buf_state = LockBufHdr(bufHdr);
4272  refcount = BUF_STATE_GET_REFCOUNT(buf_state);
4273 
4274  Assert(refcount > 0);
4275  if (refcount == 1)
4276  {
4277  /* Successfully acquired exclusive lock with pincount 1 */
4278  UnlockBufHdr(bufHdr, buf_state);
4279  return true;
4280  }
4281 
4282  /* Failed, so release the lock */
4283  UnlockBufHdr(bufHdr, buf_state);
4284  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
4285  return false;
4286 }
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
#define GetBufferDescriptor(id)
static int32 GetPrivateRefCount(Buffer buffer)
Definition: bufmgr.c:387
unsigned int uint32
Definition: c.h:441
bool ConditionalLockBuffer(Buffer buffer)
Definition: bufmgr.c:4033
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:4007
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4587
#define Assert(condition)
Definition: c.h:804
#define BufferIsLocal(buffer)
Definition: buf.h:37
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
#define UnlockBufHdr(desc, s)
int32 * LocalRefCount
Definition: localbuf.c:45
#define BUF_STATE_GET_REFCOUNT(state)
Definition: buf_internals.h:49

◆ DropDatabaseBuffers()

void DropDatabaseBuffers ( Oid  dbid)

Definition at line 3408 of file bufmgr.c.

References buftag::blockNum, buf, BufferDescriptorGetBuffer, RelFileNode::dbNode, elog, buftag::forkNum, BufferDesc::freeNext, GetBufferDescriptor, GetPrivateRefCount(), i, InvalidateBuffer(), InvalidBackendId, LockBufHdr(), LOG, NBuffers, relpathbackend, relpathperm, buftag::rnode, BufferDesc::tag, and UnlockBufHdr.

Referenced by dbase_redo(), dropdb(), and movedb().

3409 {
3410  int i;
3411 
3412  /*
3413  * We needn't consider local buffers, since by assumption the target
3414  * database isn't our own.
3415  */
3416 
3417  for (i = 0; i < NBuffers; i++)
3418  {
3419  BufferDesc *bufHdr = GetBufferDescriptor(i);
3420  uint32 buf_state;
3421 
3422  /*
3423  * As in DropRelFileNodeBuffers, an unlocked precheck should be safe
3424  * and saves some cycles.
3425  */
3426  if (bufHdr->tag.rnode.dbNode != dbid)
3427  continue;
3428 
3429  buf_state = LockBufHdr(bufHdr);
3430  if (bufHdr->tag.rnode.dbNode == dbid)
3431  InvalidateBuffer(bufHdr); /* releases spinlock */
3432  else
3433  UnlockBufHdr(bufHdr, buf_state);
3434  }
3435 }
static void InvalidateBuffer(BufferDesc *buf)
Definition: bufmgr.c:1467
#define GetBufferDescriptor(id)
unsigned int uint32
Definition: c.h:441
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4587
RelFileNode rnode
Definition: buf_internals.h:93
BufferTag tag
#define UnlockBufHdr(desc, s)
int i
int NBuffers
Definition: globals.c:135

◆ DropRelFileNodeBuffers()

void DropRelFileNodeBuffers ( struct SMgrRelationData smgr_reln,
ForkNumber forkNum,
int  nforks,
BlockNumber firstDelBlock 
)

◆ DropRelFileNodesAllBuffers()

void DropRelFileNodesAllBuffers ( struct SMgrRelationData **  smgr_reln,
int  nnodes 
)

◆ FlushDatabaseBuffers()

void FlushDatabaseBuffers ( Oid  dbid)

Definition at line 3705 of file bufmgr.c.

References BM_DIRTY, BM_VALID, BufferDescriptorGetContentLock, CurrentResourceOwner, RelFileNode::dbNode, FlushBuffer(), GetBufferDescriptor, i, LockBufHdr(), LW_SHARED, LWLockAcquire(), LWLockRelease(), NBuffers, PinBuffer_Locked(), ReservePrivateRefCountEntry(), ResourceOwnerEnlargeBuffers(), buftag::rnode, BufferDesc::tag, UnlockBufHdr, and UnpinBuffer().

Referenced by dbase_redo().

3706 {
3707  int i;
3708  BufferDesc *bufHdr;
3709 
3710  /* Make sure we can handle the pin inside the loop */
3712 
3713  for (i = 0; i < NBuffers; i++)
3714  {
3715  uint32 buf_state;
3716 
3717  bufHdr = GetBufferDescriptor(i);
3718 
3719  /*
3720  * As in DropRelFileNodeBuffers, an unlocked precheck should be safe
3721  * and saves some cycles.
3722  */
3723  if (bufHdr->tag.rnode.dbNode != dbid)
3724  continue;
3725 
3727 
3728  buf_state = LockBufHdr(bufHdr);
3729  if (bufHdr->tag.rnode.dbNode == dbid &&
3730  (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
3731  {
3732  PinBuffer_Locked(bufHdr);
3734  FlushBuffer(bufHdr, NULL);
3736  UnpinBuffer(bufHdr, true);
3737  }
3738  else
3739  UnlockBufHdr(bufHdr, buf_state);
3740  }
3741 }
ResourceOwner CurrentResourceOwner
Definition: resowner.c:146
#define BM_DIRTY
Definition: buf_internals.h:59
static void FlushBuffer(BufferDesc *buf, SMgrRelation reln)
Definition: bufmgr.c:2808
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1803
#define GetBufferDescriptor(id)
unsigned int uint32
Definition: c.h:441
static void UnpinBuffer(BufferDesc *buf, bool fixOwner)
Definition: bufmgr.c:1834
void ResourceOwnerEnlargeBuffers(ResourceOwner owner)
Definition: resowner.c:946
#define BM_VALID
Definition: buf_internals.h:60
#define BufferDescriptorGetContentLock(bdesc)
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4587
static void PinBuffer_Locked(BufferDesc *buf)
Definition: bufmgr.c:1789
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1199
RelFileNode rnode
Definition: buf_internals.h:93
BufferTag tag
#define UnlockBufHdr(desc, s)
int i
int NBuffers
Definition: globals.c:135
static void ReservePrivateRefCountEntry(void)
Definition: bufmgr.c:215

◆ FlushOneBuffer()

void FlushOneBuffer ( Buffer  buffer)

Definition at line 3748 of file bufmgr.c.

References Assert, BufferDescriptorGetContentLock, BufferIsLocal, BufferIsPinned, FlushBuffer(), GetBufferDescriptor, and LWLockHeldByMe().

Referenced by hash_xlog_init_bitmap_page(), hash_xlog_init_meta_page(), and XLogReadBufferForRedoExtended().

3749 {
3750  BufferDesc *bufHdr;
3751 
3752  /* currently not needed, but no fundamental reason not to support */
3753  Assert(!BufferIsLocal(buffer));
3754 
3755  Assert(BufferIsPinned(buffer));
3756 
3757  bufHdr = GetBufferDescriptor(buffer - 1);
3758 
3760 
3761  FlushBuffer(bufHdr, NULL);
3762 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:448
bool LWLockHeldByMe(LWLock *l)
Definition: lwlock.c:1919
static void FlushBuffer(BufferDesc *buf, SMgrRelation reln)
Definition: bufmgr.c:2808
#define GetBufferDescriptor(id)
#define BufferDescriptorGetContentLock(bdesc)
#define Assert(condition)
Definition: c.h:804
#define BufferIsLocal(buffer)
Definition: buf.h:37

◆ FlushRelationBuffers()

void FlushRelationBuffers ( Relation  rel)

Definition at line 3512 of file bufmgr.c.

References ErrorContextCallback::arg, buftag::blockNum, BM_DIRTY, BM_JUST_DIRTIED, BM_VALID, BufferDescriptorGetContentLock, ErrorContextCallback::callback, CurrentResourceOwner, error_context_stack, FlushBuffer(), buftag::forkNum, GetBufferDescriptor, GetLocalBufferDescriptor, i, local_buffer_write_error_callback(), LocalBufHdrGetBlock, LockBufHdr(), LW_SHARED, LWLockAcquire(), LWLockRelease(), NBuffers, NLocBuffer, PageSetChecksumInplace(), pg_atomic_read_u32(), pg_atomic_unlocked_write_u32(), PinBuffer_Locked(), ErrorContextCallback::previous, RelationData::rd_node, RelationGetSmgr(), RelationUsesLocalBuffers, RelFileNodeEquals, ReservePrivateRefCountEntry(), ResourceOwnerEnlargeBuffers(), buftag::rnode, smgrwrite(), BufferDesc::state, BufferDesc::tag, UnlockBufHdr, and UnpinBuffer().

Referenced by heapam_relation_copy_data(), and index_copy_data().

3513 {
3514  int i;
3515  BufferDesc *bufHdr;
3516 
3517  if (RelationUsesLocalBuffers(rel))
3518  {
3519  for (i = 0; i < NLocBuffer; i++)
3520  {
3521  uint32 buf_state;
3522 
3523  bufHdr = GetLocalBufferDescriptor(i);
3524  if (RelFileNodeEquals(bufHdr->tag.rnode, rel->rd_node) &&
3525  ((buf_state = pg_atomic_read_u32(&bufHdr->state)) &
3526  (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
3527  {
3528  ErrorContextCallback errcallback;
3529  Page localpage;
3530 
3531  localpage = (char *) LocalBufHdrGetBlock(bufHdr);
3532 
3533  /* Setup error traceback support for ereport() */
3535  errcallback.arg = (void *) bufHdr;
3536  errcallback.previous = error_context_stack;
3537  error_context_stack = &errcallback;
3538 
3539  PageSetChecksumInplace(localpage, bufHdr->tag.blockNum);
3540 
3542  bufHdr->tag.forkNum,
3543  bufHdr->tag.blockNum,
3544  localpage,
3545  false);
3546 
3547  buf_state &= ~(BM_DIRTY | BM_JUST_DIRTIED);
3548  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
3549 
3550  /* Pop the error context stack */
3551  error_context_stack = errcallback.previous;
3552  }
3553  }
3554 
3555  return;
3556  }
3557 
3558  /* Make sure we can handle the pin inside the loop */
3560 
3561  for (i = 0; i < NBuffers; i++)
3562  {
3563  uint32 buf_state;
3564 
3565  bufHdr = GetBufferDescriptor(i);
3566 
3567  /*
3568  * As in DropRelFileNodeBuffers, an unlocked precheck should be safe
3569  * and saves some cycles.
3570  */
3571  if (!RelFileNodeEquals(bufHdr->tag.rnode, rel->rd_node))
3572  continue;
3573 
3575 
3576  buf_state = LockBufHdr(bufHdr);
3577  if (RelFileNodeEquals(bufHdr->tag.rnode, rel->rd_node) &&
3578  (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
3579  {
3580  PinBuffer_Locked(bufHdr);
3582  FlushBuffer(bufHdr, RelationGetSmgr(rel));
3584  UnpinBuffer(bufHdr, true);
3585  }
3586  else
3587  UnlockBufHdr(bufHdr, buf_state);
3588  }
3589 }
#define LocalBufHdrGetBlock(bufHdr)
Definition: bufmgr.c:64
ForkNumber forkNum
Definition: buf_internals.h:94
static void local_buffer_write_error_callback(void *arg)
Definition: bufmgr.c:4541
ResourceOwner CurrentResourceOwner
Definition: resowner.c:146
#define GetLocalBufferDescriptor(id)
#define BM_DIRTY
Definition: buf_internals.h:59
void(* callback)(void *arg)
Definition: elog.h:247
struct ErrorContextCallback * previous
Definition: elog.h:246
static void FlushBuffer(BufferDesc *buf, SMgrRelation reln)
Definition: bufmgr.c:2808
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1803
ErrorContextCallback * error_context_stack
Definition: elog.c:93
int NLocBuffer
Definition: localbuf.c:41
void smgrwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, char *buffer, bool skipFsync)
Definition: smgr.c:523
#define GetBufferDescriptor(id)
#define BM_JUST_DIRTIED
Definition: buf_internals.h:64
unsigned int uint32
Definition: c.h:441
static void UnpinBuffer(BufferDesc *buf, bool fixOwner)
Definition: bufmgr.c:1834
void ResourceOwnerEnlargeBuffers(ResourceOwner owner)
Definition: resowner.c:946
#define BM_VALID
Definition: buf_internals.h:60
RelFileNode rd_node
Definition: rel.h:56
#define BufferDescriptorGetContentLock(bdesc)
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4587
static SMgrRelation RelationGetSmgr(Relation rel)
Definition: rel.h:544
static void PinBuffer_Locked(BufferDesc *buf)
Definition: bufmgr.c:1789
void PageSetChecksumInplace(Page page, BlockNumber blkno)
Definition: bufpage.c:1532
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1199
BlockNumber blockNum
Definition: buf_internals.h:95
RelFileNode rnode
Definition: buf_internals.h:93
#define RelationUsesLocalBuffers(relation)
Definition: rel.h:610
BufferTag tag
#define UnlockBufHdr(desc, s)
int i
int NBuffers
Definition: globals.c:135
static void pg_atomic_unlocked_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:277
static void ReservePrivateRefCountEntry(void)
Definition: bufmgr.c:215
pg_atomic_uint32 state
Pointer Page
Definition: bufpage.h:78
#define RelFileNodeEquals(node1, node2)
Definition: relfilenode.h:88
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:241

◆ FlushRelationsAllBuffers()

void FlushRelationsAllBuffers ( struct SMgrRelationData **  smgrs,
int  nrels 
)

◆ FreeAccessStrategy()

void FreeAccessStrategy ( BufferAccessStrategy  strategy)

Definition at line 597 of file freelist.c.

References pfree().

Referenced by blgetbitmap(), FreeBulkInsertState(), heap_endscan(), initscan(), and parallel_vacuum_main().

598 {
599  /* don't crash if called on a "default" strategy */
600  if (strategy != NULL)
601  pfree(strategy);
602 }
void pfree(void *pointer)
Definition: mcxt.c:1169

◆ GetAccessStrategy()

BufferAccessStrategy GetAccessStrategy ( BufferAccessStrategyType  btype)

Definition at line 542 of file freelist.c.

References BAS_BULKREAD, BAS_BULKWRITE, BAS_NORMAL, BAS_VACUUM, BufferAccessStrategyData::btype, BufferAccessStrategyData::buffers, elog, ERROR, Min, NBuffers, offsetof, palloc0(), and BufferAccessStrategyData::ring_size.

Referenced by blgetbitmap(), bt_check_every_level(), collect_corrupt_items(), collect_visibility_data(), do_autovacuum(), GetBulkInsertState(), initscan(), parallel_vacuum_main(), pgstat_index(), pgstathashindex(), pgstatindex_impl(), statapprox_heap(), vacuum(), and verify_heapam().

543 {
544  BufferAccessStrategy strategy;
545  int ring_size;
546 
547  /*
548  * Select ring size to use. See buffer/README for rationales.
549  *
550  * Note: if you change the ring size for BAS_BULKREAD, see also
551  * SYNC_SCAN_REPORT_INTERVAL in access/heap/syncscan.c.
552  */
553  switch (btype)
554  {
555  case BAS_NORMAL:
556  /* if someone asks for NORMAL, just give 'em a "default" object */
557  return NULL;
558 
559  case BAS_BULKREAD:
560  ring_size = 256 * 1024 / BLCKSZ;
561  break;
562  case BAS_BULKWRITE:
563  ring_size = 16 * 1024 * 1024 / BLCKSZ;
564  break;
565  case BAS_VACUUM:
566  ring_size = 256 * 1024 / BLCKSZ;
567  break;
568 
569  default:
570  elog(ERROR, "unrecognized buffer access strategy: %d",
571  (int) btype);
572  return NULL; /* keep compiler quiet */
573  }
574 
575  /* Make sure ring isn't an undue fraction of shared buffers */
576  ring_size = Min(NBuffers / 8, ring_size);
577 
578  /* Allocate the object and initialize all elements to zeroes */
579  strategy = (BufferAccessStrategy)
581  ring_size * sizeof(Buffer));
582 
583  /* Set fields that don't start out zero */
584  strategy->btype = btype;
585  strategy->ring_size = ring_size;
586 
587  return strategy;
588 }
struct BufferAccessStrategyData * BufferAccessStrategy
Definition: buf.h:44
#define Min(x, y)
Definition: c.h:986
#define ERROR
Definition: elog.h:46
BufferAccessStrategyType btype
Definition: freelist.c:74
void * palloc0(Size size)
Definition: mcxt.c:1093
#define elog(elevel,...)
Definition: elog.h:232
int NBuffers
Definition: globals.c:135
int Buffer
Definition: buf.h:23
#define offsetof(type, field)
Definition: c.h:727

◆ HoldingBufferPinThatDelaysRecovery()

bool HoldingBufferPinThatDelaysRecovery ( void  )

Definition at line 4215 of file bufmgr.c.

References GetPrivateRefCount(), and GetStartupBufferPinWaitBufId().

Referenced by CheckRecoveryConflictDeadlock(), and RecoveryConflictInterrupt().

4216 {
4217  int bufid = GetStartupBufferPinWaitBufId();
4218 
4219  /*
4220  * If we get woken slowly then it's possible that the Startup process was
4221  * already woken by other backends before we got here. Also possible that
4222  * we get here by multiple interrupts or interrupts at inappropriate
4223  * times, so make sure we do nothing if the bufid is not set.
4224  */
4225  if (bufid < 0)
4226  return false;
4227 
4228  if (GetPrivateRefCount(bufid + 1) > 0)
4229  return true;
4230 
4231  return false;
4232 }
static int32 GetPrivateRefCount(Buffer buffer)
Definition: bufmgr.c:387
int GetStartupBufferPinWaitBufId(void)
Definition: proc.c:662

◆ IncrBufferRefCount()

void IncrBufferRefCount ( Buffer  buffer)

Definition at line 3806 of file bufmgr.c.

References Assert, BufferIsLocal, BufferIsPinned, CurrentResourceOwner, GetPrivateRefCountEntry(), LocalRefCount, PrivateRefCountEntry::refcount, ResourceOwnerEnlargeBuffers(), and ResourceOwnerRememberBuffer().

Referenced by _bt_steppage(), btrestrpos(), entryLoadMoreItems(), ReadBufferBI(), scanPostingTree(), startScanEntry(), and tts_buffer_heap_store_tuple().

3807 {
3808  Assert(BufferIsPinned(buffer));
3810  if (BufferIsLocal(buffer))
3811  LocalRefCount[-buffer - 1]++;
3812  else
3813  {
3814  PrivateRefCountEntry *ref;
3815 
3816  ref = GetPrivateRefCountEntry(buffer, true);
3817  Assert(ref != NULL);
3818  ref->refcount++;
3819  }
3821 }
static PrivateRefCountEntry * GetPrivateRefCountEntry(Buffer buffer, bool do_move)
Definition: bufmgr.c:307
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:448
ResourceOwner CurrentResourceOwner
Definition: resowner.c:146
void ResourceOwnerRememberBuffer(ResourceOwner owner, Buffer buffer)
Definition: resowner.c:959
void ResourceOwnerEnlargeBuffers(ResourceOwner owner)
Definition: resowner.c:946
#define Assert(condition)
Definition: c.h:804
#define BufferIsLocal(buffer)
Definition: buf.h:37
int32 * LocalRefCount
Definition: localbuf.c:45

◆ InitBufferPool()

void InitBufferPool ( void  )

Definition at line 67 of file buf_init.c.

References Assert, backend_flush_after, buf, BufferDesc::buf_id, BufferBlocks, BufferDescriptorGetContentLock, BufferDescriptorGetIOCV, CLEAR_BUFFERTAG, ConditionVariableInit(), BufferDesc::freeNext, FREENEXT_END_OF_LIST, GetBufferDescriptor, i, LWLockInitialize(), LWTRANCHE_BUFFER_CONTENT, NBuffers, pg_atomic_init_u32(), ShmemInitStruct(), BufferDesc::state, StrategyInitialize(), BufferDesc::tag, BufferDesc::wait_backend_pid, and WritebackContextInit().

Referenced by CreateSharedMemoryAndSemaphores().

68 {
69  bool foundBufs,
70  foundDescs,
71  foundIOCV,
72  foundBufCkpt;
73 
74  /* Align descriptors to a cacheline boundary. */
76  ShmemInitStruct("Buffer Descriptors",
77  NBuffers * sizeof(BufferDescPadded),
78  &foundDescs);
79 
80  BufferBlocks = (char *)
81  ShmemInitStruct("Buffer Blocks",
82  NBuffers * (Size) BLCKSZ, &foundBufs);
83 
84  /* Align condition variables to cacheline boundary. */
86  ShmemInitStruct("Buffer IO Condition Variables",
88  &foundIOCV);
89 
90  /*
91  * The array used to sort to-be-checkpointed buffer ids is located in
92  * shared memory, to avoid having to allocate significant amounts of
93  * memory at runtime. As that'd be in the middle of a checkpoint, or when
94  * the checkpointer is restarted, memory allocation failures would be
95  * painful.
96  */
98  ShmemInitStruct("Checkpoint BufferIds",
99  NBuffers * sizeof(CkptSortItem), &foundBufCkpt);
100 
101  if (foundDescs || foundBufs || foundIOCV || foundBufCkpt)
102  {
103  /* should find all of these, or none of them */
104  Assert(foundDescs && foundBufs && foundIOCV && foundBufCkpt);
105  /* note: this path is only taken in EXEC_BACKEND case */
106  }
107  else
108  {
109  int i;
110 
111  /*
112  * Initialize all the buffer headers.
113  */
114  for (i = 0; i < NBuffers; i++)
115  {
117 
118  CLEAR_BUFFERTAG(buf->tag);
119 
120  pg_atomic_init_u32(&buf->state, 0);
121  buf->wait_backend_pid = 0;
122 
123  buf->buf_id = i;
124 
125  /*
126  * Initially link all the buffers together as unused. Subsequent
127  * management of this list is done by freelist.c.
128  */
129  buf->freeNext = i + 1;
130 
133 
135  }
136 
137  /* Correct last entry of linked list */
138  GetBufferDescriptor(NBuffers - 1)->freeNext = FREENEXT_END_OF_LIST;
139  }
140 
141  /* Init other shared buffer-management stuff */
142  StrategyInitialize(!foundDescs);
143 
144  /* Initialize per-backend file flush context */
147 }
#define FREENEXT_END_OF_LIST
int wait_backend_pid
int backend_flush_after
Definition: bufmgr.c:158
void StrategyInitialize(bool init)
Definition: freelist.c:475
void WritebackContextInit(WritebackContext *context, int *max_pending)
Definition: bufmgr.c:4722
void ConditionVariableInit(ConditionVariable *cv)
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:396
WritebackContext BackendWritebackContext
Definition: buf_init.c:23
static char * buf
Definition: pg_test_fsync.c:68
BufferDescPadded * BufferDescriptors
Definition: buf_init.c:20
#define GetBufferDescriptor(id)
#define BufferDescriptorGetIOCV(bdesc)
void LWLockInitialize(LWLock *lock, int tranche_id)
Definition: lwlock.c:736
ConditionVariableMinimallyPadded * BufferIOCVArray
Definition: buf_init.c:22
#define BufferDescriptorGetContentLock(bdesc)
#define Assert(condition)
Definition: c.h:804
#define CLEAR_BUFFERTAG(a)
Definition: buf_internals.h:98
CkptSortItem * CkptBufferIds
Definition: buf_init.c:24
size_t Size
Definition: c.h:540
BufferTag tag
int i
int NBuffers
Definition: globals.c:135
pg_atomic_uint32 state
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:223
char * BufferBlocks
Definition: buf_init.c:21

◆ InitBufferPoolAccess()

void InitBufferPoolAccess ( void  )

Definition at line 2596 of file bufmgr.c.

References Assert, AtProcExit_Buffers(), HASHCTL::entrysize, HASH_BLOBS, hash_create(), HASH_ELEM, HASHCTL::keysize, MyProc, on_shmem_exit(), and PrivateRefCountArray.

Referenced by BaseInit().

2597 {
2598  HASHCTL hash_ctl;
2599 
2600  memset(&PrivateRefCountArray, 0, sizeof(PrivateRefCountArray));
2601 
2602  hash_ctl.keysize = sizeof(int32);
2603  hash_ctl.entrysize = sizeof(PrivateRefCountEntry);
2604 
2605  PrivateRefCountHash = hash_create("PrivateRefCount", 100, &hash_ctl,
2606  HASH_ELEM | HASH_BLOBS);
2607 
2608  /*
2609  * AtProcExit_Buffers needs LWLock access, and thereby has to be called at
2610  * the corresponding phase of backend shutdown.
2611  */
2612  Assert(MyProc != NULL);
2614 }
struct PrivateRefCountEntry PrivateRefCountEntry
#define HASH_ELEM
Definition: hsearch.h:95
PGPROC * MyProc
Definition: proc.c:68
Size entrysize
Definition: hsearch.h:76
signed int int32
Definition: c.h:429
void on_shmem_exit(pg_on_exit_callback function, Datum arg)
Definition: ipc.c:361
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:349
static struct PrivateRefCountEntry PrivateRefCountArray[REFCOUNT_ARRAY_ENTRIES]
Definition: bufmgr.c:197
#define HASH_BLOBS
Definition: hsearch.h:97
Size keysize
Definition: hsearch.h:75
#define Assert(condition)
Definition: c.h:804
static HTAB * PrivateRefCountHash
Definition: bufmgr.c:198
static void AtProcExit_Buffers(int code, Datum arg)
Definition: bufmgr.c:2621

◆ IsBufferCleanupOK()

bool IsBufferCleanupOK ( Buffer  buffer)

Definition at line 4297 of file bufmgr.c.

References Assert, BUF_STATE_GET_REFCOUNT, BufferDescriptorGetContentLock, BufferIsLocal, BufferIsValid, GetBufferDescriptor, GetPrivateRefCount(), LocalRefCount, LockBufHdr(), LW_EXCLUSIVE, LWLockHeldByMeInMode(), and UnlockBufHdr.

Referenced by _hash_doinsert(), _hash_expandtable(), _hash_splitbucket(), hash_xlog_split_allocate_page(), and hashbucketcleanup().

4298 {
4299  BufferDesc *bufHdr;
4300  uint32 buf_state;
4301 
4302  Assert(BufferIsValid(buffer));
4303 
4304  if (BufferIsLocal(buffer))
4305  {
4306  /* There should be exactly one pin */
4307  if (LocalRefCount[-buffer - 1] != 1)
4308  return false;
4309  /* Nobody else to wait for */
4310  return true;
4311  }
4312 
4313  /* There should be exactly one local pin */
4314  if (GetPrivateRefCount(buffer) != 1)
4315  return false;
4316 
4317  bufHdr = GetBufferDescriptor(buffer - 1);
4318 
4319  /* caller must hold exclusive lock on buffer */
4321  LW_EXCLUSIVE));
4322 
4323  buf_state = LockBufHdr(bufHdr);
4324 
4325  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
4326  if (BUF_STATE_GET_REFCOUNT(buf_state) == 1)
4327  {
4328  /* pincount is OK. */
4329  UnlockBufHdr(bufHdr, buf_state);
4330  return true;
4331  }
4332 
4333  UnlockBufHdr(bufHdr, buf_state);
4334  return false;
4335 }
bool LWLockHeldByMeInMode(LWLock *l, LWLockMode mode)
Definition: lwlock.c:1937
#define GetBufferDescriptor(id)
static int32 GetPrivateRefCount(Buffer buffer)
Definition: bufmgr.c:387
unsigned int uint32
Definition: c.h:441
#define BufferDescriptorGetContentLock(bdesc)
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4587
#define Assert(condition)
Definition: c.h:804
#define BufferIsLocal(buffer)
Definition: buf.h:37
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
#define UnlockBufHdr(desc, s)
int32 * LocalRefCount
Definition: localbuf.c:45
#define BUF_STATE_GET_REFCOUNT(state)
Definition: buf_internals.h:49

◆ LockBuffer()

void LockBuffer ( Buffer  buffer,
int  mode 
)

Definition at line 4007 of file bufmgr.c.

References Assert, buf, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferDescriptorGetContentLock, BufferIsLocal, BufferIsPinned, elog, ERROR, GetBufferDescriptor, LW_EXCLUSIVE, LW_SHARED, LWLockAcquire(), and LWLockRelease().

Referenced by _bt_lockbuf(), _bt_unlockbuf(), _bt_upgradelockbufcleanup(), _hash_addovflpage(), _hash_doinsert(), _hash_expandtable(), _hash_finish_split(), _hash_first(), _hash_freeovflpage(), _hash_getbuf(), _hash_getbuf_with_strategy(), _hash_getcachedmetap(), _hash_getnewbuf(), _hash_init(), _hash_kill_items(), _hash_readnext(), _hash_readpage(), _hash_readprev(), _hash_splitbucket(), _hash_squeezebucket(), _hash_vacuum_one_page(), blbulkdelete(), blgetbitmap(), blinsert(), BloomNewBuffer(), blvacuumcleanup(), brin_doinsert(), brin_doupdate(), brin_evacuate_page(), brin_getinsertbuffer(), brin_page_cleanup(), brinbuild(), brinbuildempty(), bringetbitmap(), brinGetStats(), brinGetTupleForHeapBlock(), brininsert(), brinLockRevmapPageForUpdate(), brinRevmapDesummarizeRange(), brinRevmapInitialize(), brinsummarize(), bt_metap(), bt_page_items_internal(), bt_page_stats_internal(), bt_recheck_sibling_links(), checkXLogConsistency(), collect_corrupt_items(), collect_visibility_data(), collectMatchBitmap(), ConditionalLockBufferForCleanup(), count_nondeletable_pages(), entryLoadMoreItems(), fill_seq_with_data(), FreeSpaceMapPrepareTruncateRel(), fsm_readbuf(), fsm_search(), fsm_search_avail(), fsm_set_and_search(), fsm_vacuum_page(), get_raw_page_internal(), GetVisibilityMapPins(), ginbuildempty(), ginbulkdelete(), ginEntryInsert(), ginFindLeafPage(), ginFindParents(), ginFinishSplit(), ginGetStats(), ginHeapTupleFastInsert(), ginInsertCleanup(), ginInsertValue(), GinNewBuffer(), ginScanToDelete(), ginStepRight(), ginTraverseLock(), ginUpdateStats(), ginvacuumcleanup(), ginVacuumPostingTreeLeaves(), gistBufferingFindCorrectParent(), gistbufferinginserttuples(), gistbuildempty(), gistdoinsert(), gistFindCorrectParent(), gistFindPath(), gistfinishsplit(), gistfixsplit(), gistformdownlink(), gistGetMaxLevel(), gistinserttuples(), gistkillitems(), gistNewBuffer(), gistProcessItup(), gistScanPage(), gistvacuum_delete_empty_pages(), gistvacuumpage(), hashbucketcleanup(), hashbulkdelete(), heap_abort_speculative(), heap_delete(), heap_fetch(), heap_finish_speculative(), heap_get_latest_tid(), heap_index_delete_tuples(), heap_inplace_update(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_page_prune_opt(), heap_update(), heap_xlog_visible(), heapam_index_build_range_scan(), heapam_index_fetch_tuple(), heapam_index_validate_scan(), heapam_relation_copy_for_cluster(), heapam_scan_analyze_next_block(), heapam_scan_bitmap_next_block(), heapam_scan_sample_next_tuple(), heapam_tuple_satisfies_snapshot(), heapgetpage(), heapgettup(), initBloomState(), lazy_scan_heap(), lazy_vacuum_heap_rel(), LockBufferForCleanup(), log_newpage_range(), palloc_btree_page(), pg_visibility(), pgrowlocks(), pgstat_btree_page(), pgstat_gist_page(), pgstat_heap(), pgstatginindex_internal(), pgstathashindex(), pgstatindex_impl(), read_seq_tuple(), RelationGetBufferForTuple(), revmap_physical_extend(), scanGetCandidate(), scanPendingInsert(), shiftList(), spgdoinsert(), spgGetCache(), SpGistNewBuffer(), spgprocesspending(), spgvacuumpage(), spgWalk(), startScanEntry(), statapprox_heap(), summarize_range(), UnlockReleaseBuffer(), verify_heapam(), visibilitymap_clear(), visibilitymap_prepare_truncate(), visibilitymap_set(), vm_readbuf(), XLogReadBufferExtended(), XLogReadBufferForRedoExtended(), and XLogRecordPageWithFreeSpace().

4008 {
4009  BufferDesc *buf;
4010 
4011  Assert(BufferIsPinned(buffer));
4012  if (BufferIsLocal(buffer))
4013  return; /* local buffers need no lock */
4014 
4015  buf = GetBufferDescriptor(buffer - 1);
4016 
4017  if (mode == BUFFER_LOCK_UNLOCK)
4019  else if (mode == BUFFER_LOCK_SHARE)
4021  else if (mode == BUFFER_LOCK_EXCLUSIVE)
4023  else
4024  elog(ERROR, "unrecognized buffer lock mode: %d", mode);
4025 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:448
static PgChecksumMode mode
Definition: pg_checksums.c:65
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:98
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1803
#define ERROR
Definition: elog.h:46
static char * buf
Definition: pg_test_fsync.c:68
#define GetBufferDescriptor(id)
#define BufferDescriptorGetContentLock(bdesc)
#define Assert(condition)
Definition: c.h:804
#define BufferIsLocal(buffer)
Definition: buf.h:37
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1199
#define elog(elevel,...)
Definition: elog.h:232
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:97

◆ LockBufferForCleanup()

void LockBufferForCleanup ( Buffer  buffer)

Definition at line 4064 of file bufmgr.c.

References Assert, BM_PIN_COUNT_WAITER, BUF_STATE_GET_REFCOUNT, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferIsLocal, BufferIsPinned, DeadlockTimeout, elog, ERROR, get_ps_display(), GetBufferDescriptor, GetCurrentTimestamp(), GetPrivateRefCount(), InHotStandby, LocalRefCount, LockBuffer(), LockBufHdr(), log_recovery_conflict_waits, LogRecoveryConflict(), MyProcPid, now(), palloc(), pfree(), PG_WAIT_BUFFER_PIN, PROCSIG_RECOVERY_CONFLICT_BUFFERPIN, ProcWaitForSignal(), ResolveRecoveryConflictWithBufferPin(), set_ps_display(), SetStartupBufferPinWaitBufId(), TimestampDifferenceExceeds(), UnlockBufHdr, update_process_title, and BufferDesc::wait_backend_pid.

Referenced by _bt_upgradelockbufcleanup(), ginVacuumPostingTree(), hashbulkdelete(), heap_force_common(), lazy_scan_heap(), ReadBuffer_common(), and XLogReadBufferForRedoExtended().

4065 {
4066  BufferDesc *bufHdr;
4067  char *new_status = NULL;
4068  TimestampTz waitStart = 0;
4069  bool logged_recovery_conflict = false;
4070 
4071  Assert(BufferIsPinned(buffer));
4072  Assert(PinCountWaitBuf == NULL);
4073 
4074  if (BufferIsLocal(buffer))
4075  {
4076  /* There should be exactly one pin */
4077  if (LocalRefCount[-buffer - 1] != 1)
4078  elog(ERROR, "incorrect local pin count: %d",
4079  LocalRefCount[-buffer - 1]);
4080  /* Nobody else to wait for */
4081  return;
4082  }
4083 
4084  /* There should be exactly one local pin */
4085  if (GetPrivateRefCount(buffer) != 1)
4086  elog(ERROR, "incorrect local pin count: %d",
4087  GetPrivateRefCount(buffer));
4088 
4089  bufHdr = GetBufferDescriptor(buffer - 1);
4090 
4091  for (;;)
4092  {
4093  uint32 buf_state;
4094 
4095  /* Try to acquire lock */
4097  buf_state = LockBufHdr(bufHdr);
4098 
4099  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
4100  if (BUF_STATE_GET_REFCOUNT(buf_state) == 1)
4101  {
4102  /* Successfully acquired exclusive lock with pincount 1 */
4103  UnlockBufHdr(bufHdr, buf_state);
4104 
4105  /*
4106  * Emit the log message if recovery conflict on buffer pin was
4107  * resolved but the startup process waited longer than
4108  * deadlock_timeout for it.
4109  */
4110  if (logged_recovery_conflict)
4112  waitStart, GetCurrentTimestamp(),
4113  NULL, false);
4114 
4115  /* Report change to non-waiting status */
4116  if (new_status)
4117  {
4118  set_ps_display(new_status);
4119  pfree(new_status);
4120  }
4121  return;
4122  }
4123  /* Failed, so mark myself as waiting for pincount 1 */
4124  if (buf_state & BM_PIN_COUNT_WAITER)
4125  {
4126  UnlockBufHdr(bufHdr, buf_state);
4127  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
4128  elog(ERROR, "multiple backends attempting to wait for pincount 1");
4129  }
4130  bufHdr->wait_backend_pid = MyProcPid;
4131  PinCountWaitBuf = bufHdr;
4132  buf_state |= BM_PIN_COUNT_WAITER;
4133  UnlockBufHdr(bufHdr, buf_state);
4134  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
4135 
4136  /* Wait to be signaled by UnpinBuffer() */
4137  if (InHotStandby)
4138  {
4139  /* Report change to waiting status */
4140  if (update_process_title && new_status == NULL)
4141  {
4142  const char *old_status;
4143  int len;
4144 
4145  old_status = get_ps_display(&len);
4146  new_status = (char *) palloc(len + 8 + 1);
4147  memcpy(new_status, old_status, len);
4148  strcpy(new_status + len, " waiting");
4149  set_ps_display(new_status);
4150  new_status[len] = '\0'; /* truncate off " waiting" */
4151  }
4152 
4153  /*
4154  * Emit the log message if the startup process is waiting longer
4155  * than deadlock_timeout for recovery conflict on buffer pin.
4156  *
4157  * Skip this if first time through because the startup process has
4158  * not started waiting yet in this case. So, the wait start
4159  * timestamp is set after this logic.
4160  */
4161  if (waitStart != 0 && !logged_recovery_conflict)
4162  {
4164 
4165  if (TimestampDifferenceExceeds(waitStart, now,
4166  DeadlockTimeout))
4167  {
4169  waitStart, now, NULL, true);
4170  logged_recovery_conflict = true;
4171  }
4172  }
4173 
4174  /*
4175  * Set the wait start timestamp if logging is enabled and first
4176  * time through.
4177  */
4178  if (log_recovery_conflict_waits && waitStart == 0)
4179  waitStart = GetCurrentTimestamp();
4180 
4181  /* Publish the bufid that Startup process waits on */
4182  SetStartupBufferPinWaitBufId(buffer - 1);
4183  /* Set alarm and then wait to be signaled by UnpinBuffer() */
4185  /* Reset the published bufid */
4187  }
4188  else
4190 
4191  /*
4192  * Remove flag marking us as waiter. Normally this will not be set
4193  * anymore, but ProcWaitForSignal() can return for other signals as
4194  * well. We take care to only reset the flag if we're the waiter, as
4195  * theoretically another backend could have started waiting. That's
4196  * impossible with the current usages due to table level locking, but
4197  * better be safe.
4198  */
4199  buf_state = LockBufHdr(bufHdr);
4200  if ((buf_state & BM_PIN_COUNT_WAITER) != 0 &&
4201  bufHdr->wait_backend_pid == MyProcPid)
4202  buf_state &= ~BM_PIN_COUNT_WAITER;
4203  UnlockBufHdr(bufHdr, buf_state);
4204 
4205  PinCountWaitBuf = NULL;
4206  /* Loop back and try again */
4207  }
4208 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:448
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
int MyProcPid
Definition: globals.c:43
int wait_backend_pid
bool update_process_title
Definition: ps_status.c:36
TimestampTz GetCurrentTimestamp(void)
Definition: timestamp.c:1580
int64 TimestampTz
Definition: timestamp.h:39
void LogRecoveryConflict(ProcSignalReason reason, TimestampTz wait_start, TimestampTz now, VirtualTransactionId *wait_list, bool still_waiting)
Definition: standby.c:249
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:98
bool TimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, int msec)
Definition: timestamp.c:1711
void set_ps_display(const char *activity)
Definition: ps_status.c:349
void pfree(void *pointer)
Definition: mcxt.c:1169
#define ERROR
Definition: elog.h:46
void ResolveRecoveryConflictWithBufferPin(void)
Definition: standby.c:753
const char * get_ps_display(int *displen)
Definition: ps_status.c:430
void SetStartupBufferPinWaitBufId(int bufid)
Definition: proc.c:650
#define InHotStandby
Definition: xlogutils.h:57
#define GetBufferDescriptor(id)
#define PG_WAIT_BUFFER_PIN
Definition: wait_event.h:20
static int32 GetPrivateRefCount(Buffer buffer)
Definition: bufmgr.c:387
unsigned int uint32
Definition: c.h:441
bool log_recovery_conflict_waits
Definition: standby.c:42
void ProcWaitForSignal(uint32 wait_event_info)
Definition: proc.c:1897
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:4007
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4587
#define Assert(condition)
Definition: c.h:804
#define BufferIsLocal(buffer)
Definition: buf.h:37
void * palloc(Size size)
Definition: mcxt.c:1062
#define UnlockBufHdr(desc, s)
#define elog(elevel,...)
Definition: elog.h:232
static BufferDesc * PinCountWaitBuf
Definition: bufmgr.c:165
int DeadlockTimeout
Definition: proc.c:60
Datum now(PG_FUNCTION_ARGS)
Definition: timestamp.c:1544
int32 * LocalRefCount
Definition: localbuf.c:45
#define BUF_STATE_GET_REFCOUNT(state)
Definition: buf_internals.h:49
#define BM_PIN_COUNT_WAITER
Definition: buf_internals.h:65

◆ MarkBufferDirty()

void MarkBufferDirty ( Buffer  buffer)

Definition at line 1565 of file bufmgr.c.

References Assert, BM_DIRTY, BM_JUST_DIRTIED, BM_LOCKED, BUF_STATE_GET_REFCOUNT, BufferDescriptorGetContentLock, BufferIsLocal, BufferIsPinned, BufferIsValid, elog, ERROR, GetBufferDescriptor, LW_EXCLUSIVE, LWLockHeldByMeInMode(), MarkLocalBufferDirty(), pg_atomic_compare_exchange_u32(), pg_atomic_read_u32(), pgBufferUsage, BufferUsage::shared_blks_dirtied, BufferDesc::state, VacuumCostActive, VacuumCostBalance, VacuumCostPageDirty, VacuumPageDirty, and WaitBufHdrUnlocked().

Referenced by _bt_clear_incomplete_split(), _bt_dedup_pass(), _bt_delitems_delete(), _bt_delitems_vacuum(), _bt_getroot(), _bt_insertonpg(), _bt_mark_page_halfdead(), _bt_newroot(), _bt_restore_meta(), _bt_set_cleanup_info(), _bt_split(), _bt_unlink_halfdead_page(), _hash_addovflpage(), _hash_doinsert(), _hash_expandtable(), _hash_freeovflpage(), _hash_init(), _hash_splitbucket(), _hash_squeezebucket(), _hash_vacuum_one_page(), addLeafTuple(), brin_doinsert(), brin_doupdate(), brin_initialize_empty_new_buffer(), brin_xlog_createidx(), brin_xlog_desummarize_page(), brin_xlog_insert_update(), brin_xlog_revmap_extend(), brin_xlog_samepage_update(), brin_xlog_update(), brinbuild(), brinbuildempty(), brinRevmapDesummarizeRange(), btree_xlog_dedup(), btree_xlog_delete(), btree_xlog_insert(), btree_xlog_mark_page_halfdead(), btree_xlog_newroot(), btree_xlog_split(), btree_xlog_unlink_page(), btree_xlog_vacuum(), createPostingTree(), do_setval(), doPickSplit(), fill_seq_with_data(), FreeSpaceMapPrepareTruncateRel(), generic_redo(), GenericXLogFinish(), ginbuild(), ginbuildempty(), ginbulkdelete(), ginDeletePage(), ginHeapTupleFastInsert(), ginPlaceToPage(), ginRedoClearIncompleteSplit(), ginRedoCreatePTree(), ginRedoDeleteListPages(), ginRedoDeletePage(), ginRedoInsert(), ginRedoInsertListPage(), ginRedoUpdateMetapage(), ginRedoVacuumDataLeafPage(), ginUpdateStats(), ginVacuumPostingTreeLeaf(), gistbuild(), gistbuildempty(), gistdeletepage(), gistplacetopage(), gistprunepage(), gistRedoClearFollowRight(), gistRedoDeleteRecord(), gistRedoPageDelete(), gistRedoPageSplitRecord(), gistRedoPageUpdateRecord(), gistvacuumpage(), hash_xlog_add_ovfl_page(), hash_xlog_delete(), hash_xlog_init_bitmap_page(), hash_xlog_init_meta_page(), hash_xlog_insert(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_split_cleanup(), hash_xlog_split_complete(), hash_xlog_squeeze_page(), hash_xlog_update_meta_page(), hash_xlog_vacuum_one_page(), hashbucketcleanup(), hashbulkdelete(), heap_abort_speculative(), heap_delete(), heap_finish_speculative(), heap_force_common(), heap_inplace_update(), heap_insert(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_page_prune(), heap_update(), heap_xlog_confirm(), heap_xlog_delete(), heap_xlog_freeze_page(), heap_xlog_inplace(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_prune(), heap_xlog_update(), heap_xlog_vacuum(), heap_xlog_visible(), lazy_scan_heap(), lazy_scan_prune(), lazy_vacuum_heap_page(), log_newpage_range(), moveLeafs(), nextval_internal(), RelationGetBufferForTuple(), revmap_physical_extend(), saveNodeLink(), seq_redo(), shiftList(), spgAddNodeAction(), spgbuild(), SpGistUpdateMetaPage(), spgRedoAddLeaf(), spgRedoAddNode(), spgRedoMoveLeafs(), spgRedoPickSplit(), spgRedoSplitTuple(), spgRedoVacuumLeaf(), spgRedoVacuumRedirect(), spgRedoVacuumRoot(), spgSplitNodeAction(), vacuumLeafPage(), vacuumLeafRoot(), vacuumRedirectAndPlaceholder(), visibilitymap_clear(), visibilitymap_prepare_truncate(), visibilitymap_set(), writeListPage(), and XLogReadBufferForRedoExtended().

1566 {
1567  BufferDesc *bufHdr;
1568  uint32 buf_state;
1569  uint32 old_buf_state;
1570 
1571  if (!BufferIsValid(buffer))
1572  elog(ERROR, "bad buffer ID: %d", buffer);
1573 
1574  if (BufferIsLocal(buffer))
1575  {
1576  MarkLocalBufferDirty(buffer);
1577  return;
1578  }
1579 
1580  bufHdr = GetBufferDescriptor(buffer - 1);
1581 
1582  Assert(BufferIsPinned(buffer));
1584  LW_EXCLUSIVE));
1585 
1586  old_buf_state = pg_atomic_read_u32(&bufHdr->state);
1587  for (;;)
1588  {
1589  if (old_buf_state & BM_LOCKED)
1590  old_buf_state = WaitBufHdrUnlocked(bufHdr);
1591 
1592  buf_state = old_buf_state;
1593 
1594  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
1595  buf_state |= BM_DIRTY | BM_JUST_DIRTIED;
1596 
1597  if (pg_atomic_compare_exchange_u32(&bufHdr->state, &old_buf_state,
1598  buf_state))
1599  break;
1600  }
1601 
1602  /*
1603  * If the buffer was not dirty already, do vacuum accounting.
1604  */
1605  if (!(old_buf_state & BM_DIRTY))
1606  {
1607  VacuumPageDirty++;
1609  if (VacuumCostActive)
1611  }
1612 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:448
bool LWLockHeldByMeInMode(LWLock *l, LWLockMode mode)
Definition: lwlock.c:1937
int VacuumCostBalance
Definition: globals.c:151
static bool pg_atomic_compare_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval)
Definition: atomics.h:311
int64 VacuumPageDirty
Definition: globals.c:149
int64 shared_blks_dirtied
Definition: instrument.h:28
#define BM_DIRTY
Definition: buf_internals.h:59
int VacuumCostPageDirty
Definition: globals.c:143
#define ERROR
Definition: elog.h:46
#define GetBufferDescriptor(id)
#define BM_JUST_DIRTIED
Definition: buf_internals.h:64
unsigned int uint32
Definition: c.h:441
void MarkLocalBufferDirty(Buffer buffer)
Definition: localbuf.c:286
#define BM_LOCKED
Definition: buf_internals.h:58
#define BufferDescriptorGetContentLock(bdesc)
#define Assert(condition)
Definition: c.h:804
#define BufferIsLocal(buffer)
Definition: buf.h:37
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
static uint32 WaitBufHdrUnlocked(BufferDesc *buf)
Definition: bufmgr.c:4615
#define elog(elevel,...)
Definition: elog.h:232
pg_atomic_uint32 state
BufferUsage pgBufferUsage
Definition: instrument.c:20
#define BUF_STATE_GET_REFCOUNT(state)
Definition: buf_internals.h:49
bool VacuumCostActive
Definition: globals.c:152
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:241

◆ MarkBufferDirtyHint()

void MarkBufferDirtyHint ( Buffer  buffer,
bool  buffer_std 
)

Definition at line 3838 of file bufmgr.c.

References Assert, BM_DIRTY, BM_JUST_DIRTIED, BM_PERMANENT, BUF_STATE_GET_REFCOUNT, BufferDescriptorGetContentLock, BufferGetPage, BufferIsLocal, BufferIsValid, PGPROC::delayChkpt, elog, ERROR, GetBufferDescriptor, GetPrivateRefCount(), InvalidXLogRecPtr, LockBufHdr(), LWLockHeldByMe(), MarkLocalBufferDirty(), MyProc, PageSetLSN, pg_atomic_read_u32(), pgBufferUsage, RecoveryInProgress(), RelFileNodeSkippingWAL(), buftag::rnode, BufferUsage::shared_blks_dirtied, BufferDesc::state, BufferDesc::tag, UnlockBufHdr, VacuumCostActive, VacuumCostBalance, VacuumCostPageDirty, VacuumPageDirty, XLogHintBitIsNeeded, XLogRecPtrIsInvalid, and XLogSaveBufferForHint().

Referenced by _bt_check_unique(), _bt_killitems(), _hash_kill_items(), brin_start_evacuating_page(), btvacuumpage(), fsm_search_avail(), fsm_set_and_search(), fsm_vacuum_page(), gistkillitems(), heap_page_prune(), read_seq_tuple(), SetHintBits(), and XLogRecordPageWithFreeSpace().

3839 {
3840  BufferDesc *bufHdr;
3841  Page page = BufferGetPage(buffer);
3842 
3843  if (!BufferIsValid(buffer))
3844  elog(ERROR, "bad buffer ID: %d", buffer);
3845 
3846  if (BufferIsLocal(buffer))
3847  {
3848  MarkLocalBufferDirty(buffer);
3849  return;
3850  }
3851 
3852  bufHdr = GetBufferDescriptor(buffer - 1);
3853 
3854  Assert(GetPrivateRefCount(buffer) > 0);
3855  /* here, either share or exclusive lock is OK */
3857 
3858  /*
3859  * This routine might get called many times on the same page, if we are
3860  * making the first scan after commit of an xact that added/deleted many
3861  * tuples. So, be as quick as we can if the buffer is already dirty. We
3862  * do this by not acquiring spinlock if it looks like the status bits are
3863  * already set. Since we make this test unlocked, there's a chance we
3864  * might fail to notice that the flags have just been cleared, and failed
3865  * to reset them, due to memory-ordering issues. But since this function
3866  * is only intended to be used in cases where failing to write out the
3867  * data would be harmless anyway, it doesn't really matter.
3868  */
3869  if ((pg_atomic_read_u32(&bufHdr->state) & (BM_DIRTY | BM_JUST_DIRTIED)) !=
3871  {
3873  bool dirtied = false;
3874  bool delayChkpt = false;
3875  uint32 buf_state;
3876 
3877  /*
3878  * If we need to protect hint bit updates from torn writes, WAL-log a
3879  * full page image of the page. This full page image is only necessary
3880  * if the hint bit update is the first change to the page since the
3881  * last checkpoint.
3882  *
3883  * We don't check full_page_writes here because that logic is included
3884  * when we call XLogInsert() since the value changes dynamically.
3885  */
3886  if (XLogHintBitIsNeeded() &&
3887  (pg_atomic_read_u32(&bufHdr->state) & BM_PERMANENT))
3888  {
3889  /*
3890  * If we must not write WAL, due to a relfilenode-specific
3891  * condition or being in recovery, don't dirty the page. We can
3892  * set the hint, just not dirty the page as a result so the hint
3893  * is lost when we evict the page or shutdown.
3894  *
3895  * See src/backend/storage/page/README for longer discussion.
3896  */
3897  if (RecoveryInProgress() ||
3898  RelFileNodeSkippingWAL(bufHdr->tag.rnode))
3899  return;
3900 
3901  /*
3902  * If the block is already dirty because we either made a change
3903  * or set a hint already, then we don't need to write a full page
3904  * image. Note that aggressive cleaning of blocks dirtied by hint
3905  * bit setting would increase the call rate. Bulk setting of hint
3906  * bits would reduce the call rate...
3907  *
3908  * We must issue the WAL record before we mark the buffer dirty.
3909  * Otherwise we might write the page before we write the WAL. That
3910  * causes a race condition, since a checkpoint might occur between
3911  * writing the WAL record and marking the buffer dirty. We solve
3912  * that with a kluge, but one that is already in use during
3913  * transaction commit to prevent race conditions. Basically, we
3914  * simply prevent the checkpoint WAL record from being written
3915  * until we have marked the buffer dirty. We don't start the
3916  * checkpoint flush until we have marked dirty, so our checkpoint
3917  * must flush the change to disk successfully or the checkpoint
3918  * never gets written, so crash recovery will fix.
3919  *
3920  * It's possible we may enter here without an xid, so it is
3921  * essential that CreateCheckpoint waits for virtual transactions
3922  * rather than full transactionids.
3923  */
3924  MyProc->delayChkpt = delayChkpt = true;
3925  lsn = XLogSaveBufferForHint(buffer, buffer_std);
3926  }
3927 
3928  buf_state = LockBufHdr(bufHdr);
3929 
3930  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
3931 
3932  if (!(buf_state & BM_DIRTY))
3933  {
3934  dirtied = true; /* Means "will be dirtied by this action" */
3935 
3936  /*
3937  * Set the page LSN if we wrote a backup block. We aren't supposed
3938  * to set this when only holding a share lock but as long as we
3939  * serialise it somehow we're OK. We choose to set LSN while
3940  * holding the buffer header lock, which causes any reader of an
3941  * LSN who holds only a share lock to also obtain a buffer header
3942  * lock before using PageGetLSN(), which is enforced in
3943  * BufferGetLSNAtomic().
3944  *
3945  * If checksums are enabled, you might think we should reset the
3946  * checksum here. That will happen when the page is written
3947  * sometime later in this checkpoint cycle.
3948  */
3949  if (!XLogRecPtrIsInvalid(lsn))
3950  PageSetLSN(page, lsn);
3951  }
3952 
3953  buf_state |= BM_DIRTY | BM_JUST_DIRTIED;
3954  UnlockBufHdr(bufHdr, buf_state);
3955 
3956  if (delayChkpt)
3957  MyProc->delayChkpt = false;
3958 
3959  if (dirtied)
3960  {
3961  VacuumPageDirty++;
3963  if (VacuumCostActive)
3965  }
3966  }
3967 }
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
#define BM_PERMANENT
Definition: buf_internals.h:67
int VacuumCostBalance
Definition: globals.c:151
bool LWLockHeldByMe(LWLock *l)
Definition: lwlock.c:1919
PGPROC * MyProc
Definition: proc.c:68
XLogRecPtr XLogSaveBufferForHint(Buffer buffer, bool buffer_std)
Definition: xloginsert.c:972
int64 VacuumPageDirty
Definition: globals.c:149
int64 shared_blks_dirtied
Definition: instrument.h:28
bool RecoveryInProgress(void)
Definition: xlog.c:8341
#define BM_DIRTY
Definition: buf_internals.h:59
int VacuumCostPageDirty
Definition: globals.c:143
#define ERROR
Definition: elog.h:46
bool delayChkpt
Definition: proc.h:187
#define GetBufferDescriptor(id)
#define BM_JUST_DIRTIED
Definition: buf_internals.h:64
static int32 GetPrivateRefCount(Buffer buffer)
Definition: bufmgr.c:387
unsigned int uint32
Definition: c.h:441
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
void MarkLocalBufferDirty(Buffer buffer)
Definition: localbuf.c:286
#define XLogRecPtrIsInvalid(r)
Definition: xlogdefs.h:29
#define BufferDescriptorGetContentLock(bdesc)
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4587
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:804
#define BufferIsLocal(buffer)
Definition: buf.h:37
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
RelFileNode rnode
Definition: buf_internals.h:93
bool RelFileNodeSkippingWAL(RelFileNode rnode)
Definition: storage.c:513
BufferTag tag
#define UnlockBufHdr(desc, s)
#define elog(elevel,...)
Definition: elog.h:232
pg_atomic_uint32 state
#define PageSetLSN(page, lsn)
Definition: bufpage.h:368
#define XLogHintBitIsNeeded()
Definition: xlog.h:177
Pointer Page
Definition: bufpage.h:78
BufferUsage pgBufferUsage
Definition: instrument.c:20
#define BUF_STATE_GET_REFCOUNT(state)
Definition: buf_internals.h:49
bool VacuumCostActive
Definition: globals.c:152
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:241

◆ PrefetchBuffer()

PrefetchBufferResult PrefetchBuffer ( Relation  reln,
ForkNumber  forkNum,
BlockNumber  blockNum 
)

Definition at line 587 of file bufmgr.c.

References Assert, BlockNumberIsValid, ereport, errcode(), errmsg(), ERROR, PrefetchLocalBuffer(), PrefetchSharedBuffer(), RELATION_IS_OTHER_TEMP, RelationGetSmgr(), RelationIsValid, and RelationUsesLocalBuffers.

Referenced by acquire_sample_rows(), BitmapPrefetch(), count_nondeletable_pages(), HeapTupleHeaderAdvanceLatestRemovedXid(), and pg_prewarm().

588 {
589  Assert(RelationIsValid(reln));
590  Assert(BlockNumberIsValid(blockNum));
591 
592  if (RelationUsesLocalBuffers(reln))
593  {
594  /* see comments in ReadBufferExtended */
595  if (RELATION_IS_OTHER_TEMP(reln))
596  ereport(ERROR,
597  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
598  errmsg("cannot access temporary tables of other sessions")));
599 
600  /* pass it off to localbuf.c */
601  return PrefetchLocalBuffer(RelationGetSmgr(reln), forkNum, blockNum);
602  }
603  else
604  {
605  /* pass it to the shared buffer version */
606  return PrefetchSharedBuffer(RelationGetSmgr(reln), forkNum, blockNum);
607  }
608 }
PrefetchBufferResult PrefetchLocalBuffer(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum)
Definition: localbuf.c:64
int errcode(int sqlerrcode)
Definition: elog.c:698
#define ERROR
Definition: elog.h:46
#define RelationIsValid(relation)
Definition: rel.h:450
PrefetchBufferResult PrefetchSharedBuffer(SMgrRelation smgr_reln, ForkNumber forkNum, BlockNumber blockNum)
Definition: bufmgr.c:500
#define ereport(elevel,...)
Definition: elog.h:157
#define BlockNumberIsValid(blockNumber)
Definition: block.h:70
#define Assert(condition)
Definition: c.h:804
#define RELATION_IS_OTHER_TEMP(relation)
Definition: rel.h:631
static SMgrRelation RelationGetSmgr(Relation rel)
Definition: rel.h:544
#define RelationUsesLocalBuffers(relation)
Definition: rel.h:610
int errmsg(const char *fmt,...)
Definition: elog.c:909

◆ PrefetchSharedBuffer()

PrefetchBufferResult PrefetchSharedBuffer ( struct SMgrRelationData smgr_reln,
ForkNumber  forkNum,
BlockNumber  blockNum 
)

◆ PrintBufferLeakWarning()

void PrintBufferLeakWarning ( Buffer  buffer)

Definition at line 2681 of file bufmgr.c.

References Assert, buftag::blockNum, buf, BUF_FLAG_MASK, BUF_STATE_GET_REFCOUNT, BufferIsLocal, BufferIsValid, elog, buftag::forkNum, GetBufferDescriptor, GetLocalBufferDescriptor, GetPrivateRefCount(), InvalidBackendId, LocalRefCount, MyBackendId, pfree(), pg_atomic_read_u32(), relpathbackend, buftag::rnode, BufferDesc::state, BufferDesc::tag, and WARNING.

Referenced by CheckForBufferLeaks(), CheckForLocalBufferLeaks(), and ResourceOwnerReleaseInternal().

2682 {
2683  BufferDesc *buf;
2684  int32 loccount;
2685  char *path;
2686  BackendId backend;
2687  uint32 buf_state;
2688 
2689  Assert(BufferIsValid(buffer));
2690  if (BufferIsLocal(buffer))
2691  {
2692  buf = GetLocalBufferDescriptor(-buffer - 1);
2693  loccount = LocalRefCount[-buffer - 1];
2694  backend = MyBackendId;
2695  }
2696  else
2697  {
2698  buf = GetBufferDescriptor(buffer - 1);
2699  loccount = GetPrivateRefCount(buffer);
2700  backend = InvalidBackendId;
2701  }
2702 
2703  /* theoretically we should lock the bufhdr here */
2704  path = relpathbackend(buf->tag.rnode, backend, buf->tag.forkNum);
2705  buf_state = pg_atomic_read_u32(&buf->state);
2706  elog(WARNING,
2707  "buffer refcount leak: [%03d] "
2708  "(rel=%s, blockNum=%u, flags=0x%x, refcount=%u %d)",
2709  buffer, path,
2710  buf->tag.blockNum, buf_state & BUF_FLAG_MASK,
2711  BUF_STATE_GET_REFCOUNT(buf_state), loccount);
2712  pfree(path);
2713 }
BackendId MyBackendId
Definition: globals.c:84
ForkNumber forkNum
Definition: buf_internals.h:94
#define GetLocalBufferDescriptor(id)
signed int int32
Definition: c.h:429
void pfree(void *pointer)
Definition: mcxt.c:1169
#define BUF_FLAG_MASK
Definition: buf_internals.h:46
static char * buf
Definition: pg_test_fsync.c:68
#define GetBufferDescriptor(id)
static int32 GetPrivateRefCount(Buffer buffer)
Definition: bufmgr.c:387
unsigned int uint32
Definition: c.h:441
#define WARNING
Definition: elog.h:40
#define InvalidBackendId
Definition: backendid.h:23
int BackendId
Definition: backendid.h:21
#define Assert(condition)
Definition: c.h:804
#define BufferIsLocal(buffer)
Definition: buf.h:37
BlockNumber blockNum
Definition: buf_internals.h:95
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
RelFileNode rnode
Definition: buf_internals.h:93
BufferTag tag
#define elog(elevel,...)
Definition: elog.h:232
pg_atomic_uint32 state
#define relpathbackend(rnode, backend, forknum)
Definition: relpath.h:78
int32 * LocalRefCount
Definition: localbuf.c:45
#define BUF_STATE_GET_REFCOUNT(state)
Definition: buf_internals.h:49
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:241

◆ ReadBuffer()

Buffer ReadBuffer ( Relation  reln,
BlockNumber  blockNum 
)

Definition at line 694 of file bufmgr.c.

References MAIN_FORKNUM, RBM_NORMAL, and ReadBufferExtended().

Referenced by _bt_getbuf(), _bt_search_insert(), _hash_getbuf(), _hash_getbuf_with_condlock_cleanup(), blbulkdelete(), blinsert(), BloomNewBuffer(), brin_getinsertbuffer(), brinbuild(), brinGetStats(), brinGetTupleForHeapBlock(), brinRevmapDesummarizeRange(), brinRevmapInitialize(), bt_metap(), bt_page_items_internal(), bt_page_stats_internal(), fill_seq_with_data(), ginFindLeafPage(), ginFindParents(), ginGetStats(), ginHeapTupleFastInsert(), ginInsertCleanup(), GinNewBuffer(), ginStepRight(), ginUpdateStats(), gistBufferingFindCorrectParent(), gistbufferinginserttuples(), gistdoinsert(), gistFindCorrectParent(), gistFindPath(), gistfixsplit(), gistGetMaxLevel(), gistkillitems(), gistNewBuffer(), gistProcessItup(), gistScanPage(), heap_abort_speculative(), heap_delete(), heap_fetch(), heap_finish_speculative(), heap_force_common(), heap_get_latest_tid(), heap_index_delete_tuples(), heap_inplace_update(), heap_lock_tuple(), heap_update(), initBloomState(), pg_visibility(), pgstatginindex_internal(), read_seq_tuple(), RelationGetBufferForTuple(), ReleaseAndReadBuffer(), revmap_get_buffer(), revmap_physical_extend(), scanGetCandidate(), scanPendingInsert(), shiftList(), spgdoinsert(), spgGetCache(), SpGistGetBuffer(), SpGistNewBuffer(), SpGistUpdateMetaPage(), and spgWalk().

695 {
696  return ReadBufferExtended(reln, MAIN_FORKNUM, blockNum, RBM_NORMAL, NULL);
697 }
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:741

◆ ReadBufferExtended()

Buffer ReadBufferExtended ( Relation  reln,
ForkNumber  forkNum,
BlockNumber  blockNum,
ReadBufferMode  mode,
BufferAccessStrategy  strategy 
)

Definition at line 741 of file bufmgr.c.

References buf, ereport, errcode(), errmsg(), ERROR, pgstat_count_buffer_hit, pgstat_count_buffer_read, RelationData::rd_rel, ReadBuffer_common(), RELATION_IS_OTHER_TEMP, and RelationGetSmgr().

Referenced by _hash_getbuf_with_strategy(), _hash_getinitbuf(), _hash_getnewbuf(), autoprewarm_database_main(), blbulkdelete(), blgetbitmap(), blvacuumcleanup(), brin_vacuum_scan(), brinbuildempty(), bt_recheck_sibling_links(), btvacuumpage(), collect_corrupt_items(), collect_visibility_data(), count_nondeletable_pages(), fsm_readbuf(), get_raw_page_internal(), ginbuildempty(), ginbulkdelete(), ginDeletePage(), ginScanToDelete(), ginvacuumcleanup(), ginVacuumPostingTree(), ginVacuumPostingTreeLeaves(), gistbuildempty(), gistvacuum_delete_empty_pages(), gistvacuumpage(), hashbulkdelete(), heapam_scan_analyze_next_block(), heapgetpage(), lazy_scan_heap(), lazy_vacuum_heap_rel(), log_newpage_range(), palloc_btree_page(), pg_prewarm(), pgstat_btree_page(), pgstat_gist_page(), pgstat_heap(), pgstathashindex(), pgstatindex_impl(), ReadBuffer(), ReadBufferBI(), spgprocesspending(), spgvacuumpage(), statapprox_heap(), verify_heapam(), and vm_readbuf().

743 {
744  bool hit;
745  Buffer buf;
746 
747  /*
748  * Reject attempts to read non-local temporary relations; we would be
749  * likely to get wrong data since we have no visibility into the owning
750  * session's local buffers.
751  */
752  if (RELATION_IS_OTHER_TEMP(reln))
753  ereport(ERROR,
754  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
755  errmsg("cannot access temporary tables of other sessions")));
756 
757  /*
758  * Read the buffer, and update pgstat counters to reflect a cache hit or
759  * miss.
760  */
762  buf = ReadBuffer_common(RelationGetSmgr(reln), reln->rd_rel->relpersistence,
763  forkNum, blockNum, mode, strategy, &hit);
764  if (hit)
766  return buf;
767 }
static PgChecksumMode mode
Definition: pg_checksums.c:65
int errcode(int sqlerrcode)
Definition: elog.c:698
Form_pg_class rd_rel
Definition: rel.h:109
#define ERROR
Definition: elog.h:46
#define pgstat_count_buffer_read(rel)
Definition: pgstat.h:1077
static char * buf
Definition: pg_test_fsync.c:68
#define pgstat_count_buffer_hit(rel)
Definition: pgstat.h:1082
static Buffer ReadBuffer_common(SMgrRelation reln, char relpersistence, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy, bool *hit)
Definition: bufmgr.c:801
#define ereport(elevel,...)
Definition: elog.h:157
#define RELATION_IS_OTHER_TEMP(relation)
Definition: rel.h:631
static SMgrRelation RelationGetSmgr(Relation rel)
Definition: rel.h:544
int errmsg(const char *fmt,...)
Definition: elog.c:909
int Buffer
Definition: buf.h:23

◆ ReadBufferWithoutRelcache()

Buffer ReadBufferWithoutRelcache ( RelFileNode  rnode,
ForkNumber  forkNum,
BlockNumber  blockNum,
ReadBufferMode  mode,
BufferAccessStrategy  strategy 
)

Definition at line 780 of file bufmgr.c.

References Assert, InRecovery, InvalidBackendId, ReadBuffer_common(), and smgropen().

Referenced by XLogReadBufferExtended().

783 {
784  bool hit;
785 
786  SMgrRelation smgr = smgropen(rnode, InvalidBackendId);
787 
789 
790  return ReadBuffer_common(smgr, RELPERSISTENCE_PERMANENT, forkNum, blockNum,
791  mode, strategy, &hit);
792 }
static PgChecksumMode mode
Definition: pg_checksums.c:65
SMgrRelation smgropen(RelFileNode rnode, BackendId backend)
Definition: smgr.c:146
#define InvalidBackendId
Definition: backendid.h:23
static Buffer ReadBuffer_common(SMgrRelation reln, char relpersistence, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy, bool *hit)
Definition: bufmgr.c:801
bool InRecovery
Definition: xlogutils.c:52
#define Assert(condition)
Definition: c.h:804

◆ ReadRecentBuffer()

bool ReadRecentBuffer ( RelFileNode  rnode,
ForkNumber  forkNum,
BlockNumber  blockNum,
Buffer  recent_buffer 
)

Definition at line 618 of file bufmgr.c.

References Assert, BM_MAX_USAGE_COUNT, BM_VALID, BUF_STATE_GET_USAGECOUNT, BUF_USAGECOUNT_ONE, BufferIsLocal, BufferIsValid, BUFFERTAGS_EQUAL, CurrentResourceOwner, GetBufferDescriptor, GetPrivateRefCount(), INIT_BUFFERTAG, LocalRefCount, LockBufHdr(), pg_atomic_read_u32(), pg_atomic_write_u32(), PinBuffer(), PinBuffer_Locked(), ReservePrivateRefCountEntry(), ResourceOwnerEnlargeBuffers(), ResourceOwnerRememberBuffer(), BufferDesc::state, BufferDesc::tag, and UnlockBufHdr.

620 {
621  BufferDesc *bufHdr;
622  BufferTag tag;
623  uint32 buf_state;
624  bool have_private_ref;
625 
626  Assert(BufferIsValid(recent_buffer));
627 
630  INIT_BUFFERTAG(tag, rnode, forkNum, blockNum);
631 
632  if (BufferIsLocal(recent_buffer))
633  {
634  bufHdr = GetBufferDescriptor(-recent_buffer - 1);
635  buf_state = pg_atomic_read_u32(&bufHdr->state);
636 
637  /* Is it still valid and holding the right tag? */
638  if ((buf_state & BM_VALID) && BUFFERTAGS_EQUAL(tag, bufHdr->tag))
639  {
640  /* Bump local buffer's ref and usage counts. */
642  LocalRefCount[-recent_buffer - 1]++;
644  pg_atomic_write_u32(&bufHdr->state,
645  buf_state + BUF_USAGECOUNT_ONE);
646 
647  return true;
648  }
649  }
650  else
651  {
652  bufHdr = GetBufferDescriptor(recent_buffer - 1);
653  have_private_ref = GetPrivateRefCount(recent_buffer) > 0;
654 
655  /*
656  * Do we already have this buffer pinned with a private reference? If
657  * so, it must be valid and it is safe to check the tag without
658  * locking. If not, we have to lock the header first and then check.
659  */
660  if (have_private_ref)
661  buf_state = pg_atomic_read_u32(&bufHdr->state);
662  else
663  buf_state = LockBufHdr(bufHdr);
664 
665  if ((buf_state & BM_VALID) && BUFFERTAGS_EQUAL(tag, bufHdr->tag))
666  {
667  /*
668  * It's now safe to pin the buffer. We can't pin first and ask
669  * questions later, because it might confuse code paths
670  * like InvalidateBuffer() if we pinned a random non-matching
671  * buffer.
672  */
673  if (have_private_ref)
674  PinBuffer(bufHdr, NULL); /* bump pin count */
675  else
676  PinBuffer_Locked(bufHdr); /* pin for first time */
677 
678  return true;
679  }
680 
681  /* If we locked the header above, now unlock. */
682  if (!have_private_ref)
683  UnlockBufHdr(bufHdr, buf_state);
684  }
685 
686  return false;
687 }
static bool PinBuffer(BufferDesc *buf, BufferAccessStrategy strategy)
Definition: bufmgr.c:1686
ResourceOwner CurrentResourceOwner
Definition: resowner.c:146
void ResourceOwnerRememberBuffer(ResourceOwner owner, Buffer buffer)
Definition: resowner.c:959
#define BUF_USAGECOUNT_ONE
Definition: buf_internals.h:44
#define GetBufferDescriptor(id)
static int32 GetPrivateRefCount(Buffer buffer)
Definition: bufmgr.c:387
unsigned int uint32
Definition: c.h:441
#define BUFFERTAGS_EQUAL(a, b)
void ResourceOwnerEnlargeBuffers(ResourceOwner owner)
Definition: resowner.c:946
#define BM_VALID
Definition: buf_internals.h:60
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4587
#define Assert(condition)
Definition: c.h:804
#define INIT_BUFFERTAG(a, xx_rnode, xx_forkNum, xx_blockNum)
static void PinBuffer_Locked(BufferDesc *buf)
Definition: bufmgr.c:1789
#define BufferIsLocal(buffer)
Definition: buf.h:37
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
#define BM_MAX_USAGE_COUNT
Definition: buf_internals.h:77
BufferTag tag
#define UnlockBufHdr(desc, s)
static void ReservePrivateRefCountEntry(void)
Definition: bufmgr.c:215
pg_atomic_uint32 state
#define BUF_STATE_GET_USAGECOUNT(state)
Definition: buf_internals.h:50
static void pg_atomic_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:258
int32 * LocalRefCount
Definition: localbuf.c:45
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:241

◆ RelationGetNumberOfBlocksInFork()

BlockNumber RelationGetNumberOfBlocksInFork ( Relation  relation,
ForkNumber  forkNum 
)

Definition at line 2935 of file bufmgr.c.

References Assert, RelationData::rd_rel, RelationGetSmgr(), smgrnblocks(), and table_relation_size().

Referenced by _hash_getnewbuf(), _hash_init(), autoprewarm_database_main(), get_raw_page_internal(), and pg_prewarm().

2936 {
2937  switch (relation->rd_rel->relkind)
2938  {
2939  case RELKIND_SEQUENCE:
2940  case RELKIND_INDEX:
2941  return smgrnblocks(RelationGetSmgr(relation), forkNum);
2942 
2943  case RELKIND_RELATION:
2944  case RELKIND_TOASTVALUE:
2945  case RELKIND_MATVIEW:
2946  {
2947  /*
2948  * Not every table AM uses BLCKSZ wide fixed size blocks.
2949  * Therefore tableam returns the size in bytes - but for the
2950  * purpose of this routine, we want the number of blocks.
2951  * Therefore divide, rounding up.
2952  */
2953  uint64 szbytes;
2954 
2955  szbytes = table_relation_size(relation, forkNum);
2956 
2957  return (szbytes + (BLCKSZ - 1)) / BLCKSZ;
2958  }
2959  case RELKIND_VIEW:
2960  case RELKIND_COMPOSITE_TYPE:
2961  case RELKIND_FOREIGN_TABLE:
2962  case RELKIND_PARTITIONED_INDEX:
2963  case RELKIND_PARTITIONED_TABLE:
2964  default:
2965  Assert(false);
2966  break;
2967  }
2968 
2969  return 0; /* keep compiler quiet */
2970 }
Form_pg_class rd_rel
Definition: rel.h:109
static uint64 table_relation_size(Relation rel, ForkNumber forkNumber)
Definition: tableam.h:1838
BlockNumber smgrnblocks(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:548
#define Assert(condition)
Definition: c.h:804
static SMgrRelation RelationGetSmgr(Relation rel)
Definition: rel.h:544

◆ ReleaseAndReadBuffer()

Buffer ReleaseAndReadBuffer ( Buffer  buffer,
Relation  relation,
BlockNumber  blockNum 
)

Definition at line 1628 of file bufmgr.c.

References Assert, buftag::blockNum, PrivateRefCountEntry::buffer, BufferIsLocal, BufferIsPinned, BufferIsValid, CurrentResourceOwner, buftag::forkNum, GetBufferDescriptor, GetLocalBufferDescriptor, LocalRefCount, MAIN_FORKNUM, RelationData::rd_node, ReadBuffer(), RelFileNodeEquals, ResourceOwnerForgetBuffer(), buftag::rnode, BufferDesc::tag, and UnpinBuffer().

Referenced by _bt_relandgetbuf(), ginFindLeafPage(), heapam_index_fetch_tuple(), and heapam_scan_bitmap_next_block().

1631 {
1632  ForkNumber forkNum = MAIN_FORKNUM;
1633  BufferDesc *bufHdr;
1634 
1635  if (BufferIsValid(buffer))
1636  {
1637  Assert(BufferIsPinned(buffer));
1638  if (BufferIsLocal(buffer))
1639  {
1640  bufHdr = GetLocalBufferDescriptor(-buffer - 1);
1641  if (bufHdr->tag.blockNum == blockNum &&
1642  RelFileNodeEquals(bufHdr->tag.rnode, relation->rd_node) &&
1643  bufHdr->tag.forkNum == forkNum)
1644  return buffer;
1646  LocalRefCount[-buffer - 1]--;
1647  }
1648  else
1649  {
1650  bufHdr = GetBufferDescriptor(buffer - 1);
1651  /* we have pin, so it's ok to examine tag without spinlock */
1652  if (bufHdr->tag.blockNum == blockNum &&
1653  RelFileNodeEquals(bufHdr->tag.rnode, relation->rd_node) &&
1654  bufHdr->tag.forkNum == forkNum)
1655  return buffer;
1656  UnpinBuffer(bufHdr, true);
1657  }
1658  }
1659 
1660  return ReadBuffer(relation, blockNum);
1661 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:448
ForkNumber forkNum
Definition: buf_internals.h:94
ResourceOwner CurrentResourceOwner
Definition: resowner.c:146
#define GetLocalBufferDescriptor(id)
#define GetBufferDescriptor(id)
ForkNumber
Definition: relpath.h:40
static void UnpinBuffer(BufferDesc *buf, bool fixOwner)
Definition: bufmgr.c:1834
RelFileNode rd_node
Definition: rel.h:56
#define Assert(condition)
Definition: c.h:804
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:694
#define BufferIsLocal(buffer)
Definition: buf.h:37
BlockNumber blockNum
Definition: buf_internals.h:95
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
RelFileNode rnode
Definition: buf_internals.h:93
BufferTag tag
int32 * LocalRefCount
Definition: localbuf.c:45
#define RelFileNodeEquals(node1, node2)
Definition: relfilenode.h:88
void ResourceOwnerForgetBuffer(ResourceOwner owner, Buffer buffer)
Definition: resowner.c:968

◆ ReleaseBuffer()

void ReleaseBuffer ( Buffer  buffer)

Definition at line 3768 of file bufmgr.c.

References Assert, BufferIsLocal, BufferIsValid, CurrentResourceOwner, elog, ERROR, GetBufferDescriptor, LocalRefCount, ResourceOwnerForgetBuffer(), and UnpinBuffer().

Referenced by _bt_drop_lock_and_maybe_pin(), _bt_getbuf(), _bt_pagedel(), _bt_relbuf(), _bt_search_insert(), _bt_unlink_halfdead_page(), _hash_dropbuf(), _hash_getbuf_with_condlock_cleanup(), autoprewarm_database_main(), blinsert(), BloomNewBuffer(), brin_getinsertbuffer(), brin_vacuum_scan(), bringetbitmap(), brinGetTupleForHeapBlock(), brininsert(), brinRevmapTerminate(), brinsummarize(), collect_corrupt_items(), collect_visibility_data(), entryLoadMoreItems(), ExecEndBitmapHeapScan(), ExecEndIndexOnlyScan(), ExecReScanBitmapHeapScan(), FreeBulkInsertState(), freeGinBtreeStack(), fsm_vacuum_page(), get_actual_variable_endpoint(), get_raw_page_internal(), GetRecordedFreeSpace(), ginDeletePage(), ginFindParents(), ginFinishSplit(), ginFreeScanKeys(), ginInsertCleanup(), GinNewBuffer(), ginScanToDelete(), gistdoinsert(), gistFindCorrectParent(), gistNewBuffer(), gistvacuum_delete_empty_pages(), heap_abort_speculative(), heap_delete(), heap_endscan(), heap_fetch(), heap_force_common(), heap_insert(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_rescan(), heap_update(), heap_xlog_delete(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_update(), heap_xlog_visible(), heapam_index_fetch_reset(), heapam_scan_sample_next_block(), heapam_tuple_lock(), heapgetpage(), heapgettup(), heapgettup_pagemode(), lazy_scan_heap(), lazy_vacuum_heap_rel(), pg_prewarm(), pg_visibility(), pg_visibility_map(), pg_visibility_map_summary(), pgstatindex_impl(), ReadBufferBI(), RelationGetBufferForTuple(), ReleaseBulkInsertStatePin(), ResourceOwnerReleaseInternal(), revmap_get_buffer(), revmap_physical_extend(), spgdoinsert(), SpGistGetBuffer(), SpGistNewBuffer(), SpGistUpdateMetaPage(), statapprox_heap(), summarize_range(), terminate_brin_buildstate(), tts_buffer_heap_clear(), tts_buffer_heap_materialize(), tts_buffer_heap_store_tuple(), UnlockReleaseBuffer(), verify_heapam(), visibilitymap_count(), visibilitymap_get_status(), visibilitymap_pin(), and XLogReadBufferExtended().

3769 {
3770  if (!BufferIsValid(buffer))
3771  elog(ERROR, "bad buffer ID: %d", buffer);
3772 
3773  if (BufferIsLocal(buffer))
3774  {
3776 
3777  Assert(LocalRefCount[-buffer - 1] > 0);
3778  LocalRefCount[-buffer - 1]--;
3779  return;
3780  }
3781 
3782  UnpinBuffer(GetBufferDescriptor(buffer - 1), true);
3783 }
ResourceOwner CurrentResourceOwner
Definition: resowner.c:146
#define ERROR
Definition: elog.h:46
#define GetBufferDescriptor(id)
static void UnpinBuffer(BufferDesc *buf, bool fixOwner)
Definition: bufmgr.c:1834
#define Assert(condition)
Definition: c.h:804
#define BufferIsLocal(buffer)
Definition: buf.h:37
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
#define elog(elevel,...)
Definition: elog.h:232
int32 * LocalRefCount
Definition: localbuf.c:45
void ResourceOwnerForgetBuffer(ResourceOwner owner, Buffer buffer)
Definition: resowner.c:968

◆ TestForOldSnapshot()

static void TestForOldSnapshot ( Snapshot  snapshot,
Relation  relation,
Page  page 
)
inlinestatic

Definition at line 278 of file bufmgr.h.

References Assert, old_snapshot_threshold, PageGetLSN, SNAPSHOT_MVCC, SNAPSHOT_TOAST, TestForOldSnapshot_impl(), and XLogRecPtrIsInvalid.

Referenced by _bt_get_endpoint(), _bt_moveright(), _bt_readnextpage(), _bt_walk_left(), _hash_first(), _hash_next(), _hash_readnext(), _hash_readprev(), blgetbitmap(), brinGetTupleForHeapBlock(), brinRevmapInitialize(), collectMatchBitmap(), collectMatchesForHeapRow(), ginFindLeafPage(), gistScanPage(), heap_fetch(), heap_get_latest_tid(), heapgetpage(), heapgettup(), heapgettup_pagemode(), scanGetCandidate(), scanPendingInsert(), and spgWalk().

279 {
280  Assert(relation != NULL);
281 
282  if (old_snapshot_threshold >= 0
283  && (snapshot) != NULL
284  && ((snapshot)->snapshot_type == SNAPSHOT_MVCC
285  || (snapshot)->snapshot_type == SNAPSHOT_TOAST)
286  && !XLogRecPtrIsInvalid((snapshot)->lsn)
287  && PageGetLSN(page) > (snapshot)->lsn)
288  TestForOldSnapshot_impl(snapshot, relation);
289 }
#define XLogRecPtrIsInvalid(r)
Definition: xlogdefs.h:29
#define Assert(condition)
Definition: c.h:804
#define PageGetLSN(page)
Definition: bufpage.h:366
int old_snapshot_threshold
Definition: snapmgr.c:78
void TestForOldSnapshot_impl(Snapshot snapshot, Relation relation)
Definition: bufmgr.c:4848

◆ TestForOldSnapshot_impl()

void TestForOldSnapshot_impl ( Snapshot  snapshot,
Relation  relation 
)

Definition at line 4848 of file bufmgr.c.

References ereport, errcode(), errmsg(), ERROR, GetOldSnapshotThresholdTimestamp(), and RelationAllowsEarlyPruning.

Referenced by TestForOldSnapshot().

4849 {
4850  if (RelationAllowsEarlyPruning(relation)
4851  && (snapshot)->whenTaken < GetOldSnapshotThresholdTimestamp())
4852  ereport(ERROR,
4853  (errcode(ERRCODE_SNAPSHOT_TOO_OLD),
4854  errmsg("snapshot too old")));
4855 }
TimestampTz GetOldSnapshotThresholdTimestamp(void)
Definition: snapmgr.c:1675
#define RelationAllowsEarlyPruning(rel)
Definition: snapmgr.h:38
int errcode(int sqlerrcode)
Definition: elog.c:698
#define ERROR
Definition: elog.h:46
#define ereport(elevel,...)
Definition: elog.h:157
int errmsg(const char *fmt,...)
Definition: elog.c:909

◆ UnlockBuffers()

void UnlockBuffers ( void  )

Definition at line 3979 of file bufmgr.c.

References BM_PIN_COUNT_WAITER, buf, LockBufHdr(), MyProcPid, PinCountWaitBuf, UnlockBufHdr, and BufferDesc::wait_backend_pid.

Referenced by AbortSubTransaction(), AbortTransaction(), AtProcExit_Buffers(), AutoVacLauncherMain(), BackgroundWriterMain(), CheckpointerMain(), and WalWriterMain().

3980 {
3982 
3983  if (buf)
3984  {
3985  uint32 buf_state;
3986 
3987  buf_state = LockBufHdr(buf);
3988 
3989  /*
3990  * Don't complain if flag bit not set; it could have been reset but we
3991  * got a cancel/die interrupt before getting the signal.
3992  */
3993  if ((buf_state & BM_PIN_COUNT_WAITER) != 0 &&
3994  buf->wait_backend_pid == MyProcPid)
3995  buf_state &= ~BM_PIN_COUNT_WAITER;
3996 
3997  UnlockBufHdr(buf, buf_state);
3998 
3999  PinCountWaitBuf = NULL;
4000  }
4001 }
int MyProcPid
Definition: globals.c:43
int wait_backend_pid
static char * buf
Definition: pg_test_fsync.c:68
unsigned int uint32
Definition: c.h:441
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4587
#define UnlockBufHdr(desc, s)
static BufferDesc * PinCountWaitBuf
Definition: bufmgr.c:165
#define BM_PIN_COUNT_WAITER
Definition: buf_internals.h:65

◆ UnlockReleaseBuffer()

void UnlockReleaseBuffer ( Buffer  buffer)

Definition at line 3791 of file bufmgr.c.

References BUFFER_LOCK_UNLOCK, LockBuffer(), and ReleaseBuffer().

Referenced by _bt_clear_incomplete_split(), _bt_restore_meta(), _hash_relbuf(), allocNewBuffer(), AlterSequence(), blbulkdelete(), blgetbitmap(), blinsert(), BloomInitMetapage(), blvacuumcleanup(), brin_doinsert(), brin_doupdate(), brin_evacuate_page(), brin_getinsertbuffer(), brin_xlog_createidx(), brin_xlog_desummarize_page(), brin_xlog_insert_update(), brin_xlog_revmap_extend(), brin_xlog_samepage_update(), brin_xlog_update(), brinbuild(), brinbuildempty(), brinGetStats(), brinRevmapDesummarizeRange(), bt_metap(), bt_page_items_internal(), bt_page_stats_internal(), bt_recheck_sibling_links(), btree_xlog_dedup(), btree_xlog_delete(), btree_xlog_insert(), btree_xlog_mark_page_halfdead(), btree_xlog_newroot(), btree_xlog_split(), btree_xlog_unlink_page(), btree_xlog_vacuum(), checkXLogConsistency(), collect_corrupt_items(), collect_visibility_data(), count_nondeletable_pages(), createPostingTree(), do_setval(), doPickSplit(), entryLoadMoreItems(), fill_seq_with_data(), flushCachedPage(), FreeSpaceMapPrepareTruncateRel(), fsm_search(), fsm_set_and_search(), generic_redo(), ginbuild(), ginbuildempty(), ginbulkdelete(), ginFindParents(), ginGetStats(), ginHeapTupleFastInsert(), ginInsertCleanup(), ginPlaceToPage(), ginRedoClearIncompleteSplit(), ginRedoCreatePTree(), ginRedoDeleteListPages(), ginRedoDeletePage(), ginRedoInsert(), ginRedoInsertListPage(), ginRedoSplit(), ginRedoUpdateMetapage(), ginRedoVacuumDataLeafPage(), ginRedoVacuumPage(), ginScanToDelete(), ginStepRight(), ginUpdateStats(), ginvacuumcleanup(), ginVacuumPostingTree(), ginVacuumPostingTreeLeaves(), gistbufferinginserttuples(), gistbuild(), gistbuildempty(), gistdoinsert(), gistFindCorrectParent(), gistFindPath(), gistGetMaxLevel(), gistinserttuples(), gistkillitems(), gistplacetopage(), gistProcessItup(), gistRedoClearFollowRight(), gistRedoDeleteRecord(), gistRedoPageDelete(), gistRedoPageSplitRecord(), gistRedoPageUpdateRecord(), gistScanPage(), gistvacuum_delete_empty_pages(), gistvacuumpage(), hash_xlog_add_ovfl_page(), hash_xlog_delete(), hash_xlog_init_bitmap_page(), hash_xlog_init_meta_page(), hash_xlog_insert(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_split_cleanup(), hash_xlog_split_complete(), hash_xlog_split_page(), hash_xlog_squeeze_page(), hash_xlog_update_meta_page(), hash_xlog_vacuum_one_page(), heap_delete(), heap_finish_speculative(), heap_force_common(), heap_get_latest_tid(), heap_index_delete_tuples(), heap_inplace_update(), heap_insert(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_update(), heap_xlog_confirm(), heap_xlog_delete(), heap_xlog_freeze_page(), heap_xlog_inplace(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_prune(), heap_xlog_update(), heap_xlog_vacuum(), heap_xlog_visible(), heapam_scan_analyze_next_tuple(), initBloomState(), lazy_scan_heap(), lazy_vacuum_heap_rel(), log_newpage_range(), moveLeafs(), nextval_internal(), palloc_btree_page(), pg_sequence_last_value(), pg_visibility(), pgstat_gist_page(), pgstat_heap(), pgstatginindex_internal(), pgstathashindex(), RelationAddExtraBlocks(), RelationGetBufferForTuple(), ResetSequence(), revmap_physical_extend(), scanGetCandidate(), scanPendingInsert(), scanPostingTree(), seq_redo(), shiftList(), spgAddNodeAction(), spgbuild(), spgdoinsert(), spgGetCache(), SpGistGetBuffer(), SpGistUpdateMetaPage(), spgMatchNodeAction(), spgprocesspending(), spgRedoAddLeaf(), spgRedoAddNode(), spgRedoMoveLeafs(), spgRedoPickSplit(), spgRedoSplitTuple(), spgRedoVacuumLeaf(), spgRedoVacuumRedirect(), spgRedoVacuumRoot(), spgSplitNodeAction(), spgvacuumpage(), spgWalk(), statapprox_heap(), verify_heapam(), visibilitymap_prepare_truncate(), writeListPage(), xlog_redo(), and XLogRecordPageWithFreeSpace().

3792 {
3793  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3794  ReleaseBuffer(buffer);
3795 }
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3768
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:4007

Variable Documentation

◆ backend_flush_after

int backend_flush_after

Definition at line 158 of file bufmgr.c.

Referenced by InitBufferPool().

◆ bgwriter_flush_after

int bgwriter_flush_after

Definition at line 157 of file bufmgr.c.

Referenced by BackgroundWriterMain().

◆ bgwriter_lru_maxpages

int bgwriter_lru_maxpages

Definition at line 133 of file bufmgr.c.

Referenced by BgBufferSync().

◆ bgwriter_lru_multiplier

double bgwriter_lru_multiplier

Definition at line 134 of file bufmgr.c.

Referenced by BgBufferSync().

◆ BufferBlocks

PGDLLIMPORT char* BufferBlocks

Definition at line 21 of file buf_init.c.

Referenced by InitBufferPool().

◆ checkpoint_flush_after

int checkpoint_flush_after

Definition at line 156 of file bufmgr.c.

Referenced by BufferSync().

◆ effective_io_concurrency

int effective_io_concurrency

Definition at line 143 of file bufmgr.c.

Referenced by get_tablespace_io_concurrency(), and tablespace_reloptions().

◆ LocalBufferBlockPointers

PGDLLIMPORT Block* LocalBufferBlockPointers

Definition at line 44 of file localbuf.c.

Referenced by InitLocalBuffers().

◆ LocalRefCount

◆ maintenance_io_concurrency

int maintenance_io_concurrency

◆ NBuffers

◆ NLocBuffer

◆ track_io_timing

bool track_io_timing

◆ zero_damaged_pages

bool zero_damaged_pages

Definition at line 132 of file bufmgr.c.

Referenced by mdread(), and ReadBuffer_common().