PostgreSQL Source Code  git master
bufmgr.h File Reference
#include "storage/block.h"
#include "storage/buf.h"
#include "storage/bufpage.h"
#include "storage/relfilenode.h"
#include "utils/relcache.h"
#include "utils/snapmgr.h"
Include dependency graph for bufmgr.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Data Structures

struct  PrefetchBufferResult
 

Macros

#define MAX_IO_CONCURRENCY   1000
 
#define P_NEW   InvalidBlockNumber /* grow the file to get a new page */
 
#define BUFFER_LOCK_UNLOCK   0
 
#define BUFFER_LOCK_SHARE   1
 
#define BUFFER_LOCK_EXCLUSIVE   2
 
#define BufferIsValid(bufnum)
 
#define BufferGetBlock(buffer)
 
#define BufferGetPageSize(buffer)
 
#define BufferGetPage(buffer)   ((Page)BufferGetBlock(buffer))
 
#define RelationGetNumberOfBlocks(reln)   RelationGetNumberOfBlocksInFork(reln, MAIN_FORKNUM)
 

Typedefs

typedef void * Block
 
typedef enum BufferAccessStrategyType BufferAccessStrategyType
 
typedef struct PrefetchBufferResult PrefetchBufferResult
 

Enumerations

enum  BufferAccessStrategyType { BAS_NORMAL, BAS_BULKREAD, BAS_BULKWRITE, BAS_VACUUM }
 
enum  ReadBufferMode {
  RBM_NORMAL, RBM_ZERO_AND_LOCK, RBM_ZERO_AND_CLEANUP_LOCK, RBM_ZERO_ON_ERROR,
  RBM_NORMAL_NO_LOG
}
 

Functions

PrefetchBufferResult PrefetchSharedBuffer (struct SMgrRelationData *smgr_reln, ForkNumber forkNum, BlockNumber blockNum)
 
PrefetchBufferResult PrefetchBuffer (Relation reln, ForkNumber forkNum, BlockNumber blockNum)
 
bool ReadRecentBuffer (RelFileNode rnode, ForkNumber forkNum, BlockNumber blockNum, Buffer recent_buffer)
 
Buffer ReadBuffer (Relation reln, BlockNumber blockNum)
 
Buffer ReadBufferExtended (Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
 
Buffer ReadBufferWithoutRelcache (RelFileNode rnode, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
 
void ReleaseBuffer (Buffer buffer)
 
void UnlockReleaseBuffer (Buffer buffer)
 
void MarkBufferDirty (Buffer buffer)
 
void IncrBufferRefCount (Buffer buffer)
 
Buffer ReleaseAndReadBuffer (Buffer buffer, Relation relation, BlockNumber blockNum)
 
void InitBufferPool (void)
 
void InitBufferPoolAccess (void)
 
void InitBufferPoolBackend (void)
 
void AtEOXact_Buffers (bool isCommit)
 
void PrintBufferLeakWarning (Buffer buffer)
 
void CheckPointBuffers (int flags)
 
BlockNumber BufferGetBlockNumber (Buffer buffer)
 
BlockNumber RelationGetNumberOfBlocksInFork (Relation relation, ForkNumber forkNum)
 
void FlushOneBuffer (Buffer buffer)
 
void FlushRelationBuffers (Relation rel)
 
void FlushRelationsAllBuffers (struct SMgrRelationData **smgrs, int nrels)
 
void FlushDatabaseBuffers (Oid dbid)
 
void DropRelFileNodeBuffers (struct SMgrRelationData *smgr_reln, ForkNumber *forkNum, int nforks, BlockNumber *firstDelBlock)
 
void DropRelFileNodesAllBuffers (struct SMgrRelationData **smgr_reln, int nnodes)
 
void DropDatabaseBuffers (Oid dbid)
 
bool BufferIsPermanent (Buffer buffer)
 
XLogRecPtr BufferGetLSNAtomic (Buffer buffer)
 
Size BufferShmemSize (void)
 
void BufferGetTag (Buffer buffer, RelFileNode *rnode, ForkNumber *forknum, BlockNumber *blknum)
 
void MarkBufferDirtyHint (Buffer buffer, bool buffer_std)
 
void UnlockBuffers (void)
 
void LockBuffer (Buffer buffer, int mode)
 
bool ConditionalLockBuffer (Buffer buffer)
 
void LockBufferForCleanup (Buffer buffer)
 
bool ConditionalLockBufferForCleanup (Buffer buffer)
 
bool IsBufferCleanupOK (Buffer buffer)
 
bool HoldingBufferPinThatDelaysRecovery (void)
 
void AbortBufferIO (void)
 
void BufmgrCommit (void)
 
bool BgBufferSync (struct WritebackContext *wb_context)
 
void AtProcExit_LocalBuffers (void)
 
void TestForOldSnapshot_impl (Snapshot snapshot, Relation relation)
 
BufferAccessStrategy GetAccessStrategy (BufferAccessStrategyType btype)
 
void FreeAccessStrategy (BufferAccessStrategy strategy)
 
static void TestForOldSnapshot (Snapshot snapshot, Relation relation, Page page)
 

Variables

PGDLLIMPORT int NBuffers
 
bool zero_damaged_pages
 
int bgwriter_lru_maxpages
 
double bgwriter_lru_multiplier
 
bool track_io_timing
 
int effective_io_concurrency
 
int maintenance_io_concurrency
 
int checkpoint_flush_after
 
int backend_flush_after
 
int bgwriter_flush_after
 
PGDLLIMPORT char * BufferBlocks
 
PGDLLIMPORT int NLocBuffer
 
PGDLLIMPORT BlockLocalBufferBlockPointers
 
PGDLLIMPORT int32LocalRefCount
 

Macro Definition Documentation

◆ BUFFER_LOCK_EXCLUSIVE

◆ BUFFER_LOCK_SHARE

◆ BUFFER_LOCK_UNLOCK

#define BUFFER_LOCK_UNLOCK   0

Definition at line 96 of file bufmgr.h.

Referenced by _bt_unlockbuf(), _bt_upgradelockbufcleanup(), _hash_addovflpage(), _hash_doinsert(), _hash_expandtable(), _hash_finish_split(), _hash_first(), _hash_freeovflpage(), _hash_getcachedmetap(), _hash_init(), _hash_kill_items(), _hash_readnext(), _hash_readpage(), _hash_readprev(), _hash_splitbucket(), _hash_squeezebucket(), _hash_vacuum_one_page(), blinsert(), BloomNewBuffer(), brin_doinsert(), brin_doupdate(), brin_evacuate_page(), brin_getinsertbuffer(), brin_page_cleanup(), bringetbitmap(), brinGetTupleForHeapBlock(), brininsert(), brinRevmapDesummarizeRange(), brinRevmapInitialize(), brinsummarize(), ConditionalLockBufferForCleanup(), fsm_readbuf(), fsm_search_avail(), fsm_vacuum_page(), get_raw_page_internal(), GetVisibilityMapPins(), hashbucketcleanup(), hashbulkdelete(), heap_abort_speculative(), heap_delete(), heap_fetch(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_page_prune_opt(), heap_update(), heap_xlog_visible(), heapam_index_build_range_scan(), heapam_index_fetch_tuple(), heapam_index_validate_scan(), heapam_relation_copy_for_cluster(), heapam_scan_bitmap_next_block(), heapam_scan_sample_next_tuple(), heapam_tuple_satisfies_snapshot(), heapgetpage(), heapgettup(), lazy_scan_heap(), LockBuffer(), LockBufferForCleanup(), pgrowlocks(), pgstat_heap(), pgstatindex_impl(), RelationGetBufferForTuple(), revmap_physical_extend(), SpGistNewBuffer(), summarize_range(), UnlockReleaseBuffer(), visibilitymap_clear(), visibilitymap_set(), vm_readbuf(), and XLogReadBufferExtended().

◆ BufferGetBlock

#define BufferGetBlock (   buffer)
Value:
( \
AssertMacro(BufferIsValid(buffer)), \
BufferIsLocal(buffer) ? \
LocalBufferBlockPointers[-(buffer) - 1] \
: \
(Block) (BufferBlocks + ((Size) ((buffer) - 1)) * BLCKSZ) \
)
PGDLLIMPORT char * BufferBlocks
Definition: buf_init.c:21
size_t Size
Definition: c.h:540
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
void * Block
Definition: bufmgr.h:24

Definition at line 136 of file bufmgr.h.

Referenced by XLogSaveBufferForHint().

◆ BufferGetPage

#define BufferGetPage (   buffer)    ((Page)BufferGetBlock(buffer))

Definition at line 169 of file bufmgr.h.

Referenced by _bt_binsrch(), _bt_binsrch_insert(), _bt_bottomupdel_pass(), _bt_check_unique(), _bt_checkpage(), _bt_clear_incomplete_split(), _bt_conditionallockbuf(), _bt_dedup_pass(), _bt_delete_or_dedup_one_page(), _bt_delitems_delete(), _bt_delitems_delete_check(), _bt_delitems_vacuum(), _bt_endpoint(), _bt_findinsertloc(), _bt_finish_split(), _bt_get_endpoint(), _bt_getbuf(), _bt_getmeta(), _bt_getroot(), _bt_getstackbuf(), _bt_gettrueroot(), _bt_insert_parent(), _bt_insertonpg(), _bt_killitems(), _bt_leftsib_splitflag(), _bt_lock_subtree_parent(), _bt_lockbuf(), _bt_mark_page_halfdead(), _bt_moveright(), _bt_newroot(), _bt_pagedel(), _bt_readnextpage(), _bt_readpage(), _bt_restore_meta(), _bt_rightsib_halfdeadflag(), _bt_search(), _bt_search_insert(), _bt_set_cleanup_info(), _bt_simpledel_pass(), _bt_split(), _bt_stepright(), _bt_unlink_halfdead_page(), _bt_unlockbuf(), _bt_upgradelockbufcleanup(), _bt_vacuum_needs_cleanup(), _bt_walk_left(), _hash_addovflpage(), _hash_checkpage(), _hash_doinsert(), _hash_expandtable(), _hash_finish_split(), _hash_first(), _hash_freeovflpage(), _hash_get_newblock_from_oldbucket(), _hash_get_oldblock_from_newbucket(), _hash_getbucketbuf_from_hashkey(), _hash_getcachedmetap(), _hash_getinitbuf(), _hash_getnewbuf(), _hash_init(), _hash_init_metabuffer(), _hash_initbitmapbuffer(), _hash_initbuf(), _hash_kill_items(), _hash_next(), _hash_pgaddmultitup(), _hash_pgaddtup(), _hash_readnext(), _hash_readpage(), _hash_readprev(), _hash_splitbucket(), _hash_squeezebucket(), _hash_vacuum_one_page(), allocNewBuffer(), blgetbitmap(), blinsert(), BloomNewBuffer(), blvacuumcleanup(), brin_can_do_samepage_update(), brin_doinsert(), brin_doupdate(), brin_evacuate_page(), brin_getinsertbuffer(), brin_initialize_empty_new_buffer(), brin_page_cleanup(), brin_start_evacuating_page(), brin_xlog_createidx(), brin_xlog_desummarize_page(), brin_xlog_insert_update(), brin_xlog_revmap_extend(), brin_xlog_samepage_update(), brin_xlog_update(), brinbuild(), brinbuildempty(), brinGetStats(), brinGetTupleForHeapBlock(), brininsert(), brinRevmapDesummarizeRange(), brinRevmapInitialize(), brinSetHeapBlockItemptr(), bt_metap(), bt_page_items_internal(), bt_recheck_sibling_links(), bt_rootdescend(), btree_xlog_dedup(), btree_xlog_delete(), btree_xlog_insert(), btree_xlog_mark_page_halfdead(), btree_xlog_newroot(), btree_xlog_split(), btree_xlog_unlink_page(), btree_xlog_vacuum(), btvacuumpage(), BufferGetLSNAtomic(), checkXLogConsistency(), collect_corrupt_items(), collect_visibility_data(), collectMatchBitmap(), collectMatchesForHeapRow(), count_nondeletable_pages(), createPostingTree(), dataBeginPlaceToPage(), dataBeginPlaceToPageInternal(), dataBeginPlaceToPageLeaf(), dataExecPlaceToPage(), dataExecPlaceToPageInternal(), dataLocateItem(), dataPlaceToPageLeafRecompress(), dataPrepareDownlink(), dataSplitPageInternal(), do_setval(), doPickSplit(), entryExecPlaceToPage(), entryIsEnoughSpace(), entryLoadMoreItems(), entryLocateEntry(), entryLocateLeafEntry(), entryPrepareDownlink(), entrySplitPage(), fill_seq_with_data(), FreeSpaceMapPrepareTruncateRel(), fsm_readbuf(), fsm_search(), fsm_search_avail(), fsm_set_and_search(), fsm_vacuum_page(), generic_redo(), GenericXLogFinish(), GenericXLogRegisterBuffer(), get_raw_page_internal(), GetBTPageStatistics(), GetRecordedFreeSpace(), GetVisibilityMapPins(), ginbulkdelete(), ginDeletePage(), ginEntryInsert(), ginFindLeafPage(), ginFindParents(), ginFinishSplit(), ginGetStats(), ginHeapTupleFastInsert(), GinInitBuffer(), GinInitMetabuffer(), ginInsertCleanup(), ginInsertValue(), GinNewBuffer(), ginPlaceToPage(), ginRedoClearIncompleteSplit(), ginRedoCreatePTree(), ginRedoDeleteListPages(), ginRedoDeletePage(), ginRedoInsert(), ginRedoInsertData(), ginRedoInsertEntry(), ginRedoInsertListPage(), ginRedoUpdateMetapage(), ginRedoVacuumDataLeafPage(), ginScanToDelete(), ginStepRight(), ginTraverseLock(), ginUpdateStats(), ginvacuumcleanup(), ginVacuumEntryPage(), ginVacuumPostingTreeLeaf(), ginVacuumPostingTreeLeaves(), gistBufferingFindCorrectParent(), gistbufferinginserttuples(), gistbuild(), gistcheckpage(), gistdeletepage(), gistdoinsert(), gistFindCorrectParent(), gistFindPath(), gistfixsplit(), gistformdownlink(), gistGetMaxLevel(), GISTInitBuffer(), gistkillitems(), gistMemorizeAllDownlinks(), gistNewBuffer(), gistplacetopage(), gistProcessItup(), gistRedoClearFollowRight(), gistRedoDeleteRecord(), gistRedoPageDelete(), gistRedoPageSplitRecord(), gistRedoPageUpdateRecord(), gistScanPage(), gistvacuum_delete_empty_pages(), gistvacuumpage(), hash_bitmap_info(), hash_xlog_add_ovfl_page(), hash_xlog_delete(), hash_xlog_init_bitmap_page(), hash_xlog_init_meta_page(), hash_xlog_insert(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_split_cleanup(), hash_xlog_split_complete(), hash_xlog_squeeze_page(), hash_xlog_update_meta_page(), hash_xlog_vacuum_one_page(), hashbucketcleanup(), hashbulkdelete(), heap_abort_speculative(), heap_delete(), heap_fetch(), heap_finish_speculative(), heap_force_common(), heap_get_latest_tid(), heap_hot_search_buffer(), heap_index_delete_tuples(), heap_inplace_update(), heap_insert(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_page_is_all_visible(), heap_page_prune(), heap_page_prune_execute(), heap_page_prune_opt(), heap_prune_chain(), heap_update(), heap_xlog_confirm(), heap_xlog_delete(), heap_xlog_freeze_page(), heap_xlog_inplace(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_prune(), heap_xlog_update(), heap_xlog_vacuum(), heap_xlog_visible(), heapam_index_build_range_scan(), heapam_index_validate_scan(), heapam_scan_analyze_next_tuple(), heapam_scan_bitmap_next_block(), heapam_scan_bitmap_next_tuple(), heapam_scan_sample_next_tuple(), heapgetpage(), heapgettup(), heapgettup_pagemode(), index_compute_xid_horizon_for_tuples(), initBloomState(), lazy_check_needs_freeze(), lazy_scan_heap(), lazy_vacuum_heap_page(), lazy_vacuum_heap_rel(), log_heap_update(), log_newpage_buffer(), log_newpage_range(), log_split_page(), MarkBufferDirtyHint(), moveLeafs(), moveRightIfItNeeded(), nextval_internal(), palloc_btree_page(), pg_visibility(), pgstat_btree_page(), pgstat_gist_page(), pgstat_hash_page(), pgstat_heap(), pgstatginindex_internal(), pgstathashindex(), pgstatindex_impl(), read_seq_tuple(), RelationAddExtraBlocks(), RelationGetBufferForTuple(), RelationPutHeapTuple(), revmap_physical_extend(), scanGetCandidate(), scanPendingInsert(), scanPostingTree(), seq_redo(), shiftList(), spgAddNodeAction(), spgbuild(), spgdoinsert(), spgGetCache(), SpGistGetBuffer(), SpGistInitBuffer(), SpGistNewBuffer(), SpGistSetLastUsedPage(), SpGistUpdateMetaPage(), spgprocesspending(), spgRedoAddLeaf(), spgRedoAddNode(), spgRedoMoveLeafs(), spgRedoPickSplit(), spgRedoSplitTuple(), spgRedoVacuumLeaf(), spgRedoVacuumRedirect(), spgRedoVacuumRoot(), spgSplitNodeAction(), spgvacuumpage(), spgWalk(), startScanEntry(), statapprox_heap(), terminate_brin_buildstate(), vacuumLeafPage(), vacuumLeafRoot(), vacuumRedirectAndPlaceholder(), verify_heapam(), visibilitymap_clear(), visibilitymap_count(), visibilitymap_get_status(), visibilitymap_prepare_truncate(), visibilitymap_set(), vm_readbuf(), writeListPage(), XLogCheckBufferNeedsBackup(), XLogReadBufferExtended(), XLogReadBufferForRedoExtended(), XLogRecordPageWithFreeSpace(), XLogRegisterBuffer(), XLogSaveBufferForHint(), and xlogVacuumPage().

◆ BufferGetPageSize

◆ BufferIsValid

#define BufferIsValid (   bufnum)
Value:
( \
AssertMacro((bufnum) <= NBuffers && (bufnum) >= -NLocBuffer), \
(bufnum) != InvalidBuffer \
)
#define InvalidBuffer
Definition: buf.h:25
PGDLLIMPORT int NLocBuffer
Definition: localbuf.c:41
PGDLLIMPORT int NBuffers
Definition: globals.c:135

Definition at line 123 of file bufmgr.h.

Referenced by _bt_clear_incomplete_split(), _bt_endpoint(), _bt_first(), _bt_get_endpoint(), _bt_insertonpg(), _bt_readpage(), _bt_relandgetbuf(), _bt_search(), _bt_unlink_halfdead_page(), _hash_addovflpage(), _hash_dropscanbuf(), _hash_freeovflpage(), _hash_getbucketbuf_from_hashkey(), _hash_getcachedmetap(), _hash_readnext(), _hash_readpage(), _hash_readprev(), autoprewarm_database_main(), brin_doinsert(), brin_doupdate(), brin_getinsertbuffer(), brin_xlog_createidx(), brin_xlog_desummarize_page(), brin_xlog_insert_update(), brin_xlog_revmap_extend(), brin_xlog_samepage_update(), brin_xlog_update(), brinGetTupleForHeapBlock(), brininsert(), brinsummarize(), bt_recheck_sibling_links(), bt_rootdescend(), btree_xlog_dedup(), btree_xlog_delete(), btree_xlog_insert(), btree_xlog_mark_page_halfdead(), btree_xlog_split(), btree_xlog_unlink_page(), btree_xlog_vacuum(), BufferGetLSNAtomic(), BufferIsPermanent(), checkXLogConsistency(), ConditionalLockBufferForCleanup(), doPickSplit(), entryGetItem(), entryLoadMoreItems(), ExecStoreBufferHeapTuple(), ExecStorePinnedBufferHeapTuple(), FreeSpaceMapPrepareTruncateRel(), fsm_search(), fsm_vacuum_page(), generic_redo(), GetPrivateRefCount(), GetPrivateRefCountEntry(), GetRecordedFreeSpace(), GetVisibilityMapPins(), ginPlaceToPage(), ginRedoClearIncompleteSplit(), ginRedoDeletePage(), ginRedoInsert(), ginRedoUpdateMetapage(), ginRedoVacuumDataLeafPage(), ginScanToDelete(), gistinserttuples(), gistkillitems(), gistplacetopage(), gistRedoClearFollowRight(), gistRedoDeleteRecord(), gistRedoPageDelete(), gistRedoPageUpdateRecord(), gistXLogSplit(), gistXLogUpdate(), hash_xlog_add_ovfl_page(), hash_xlog_delete(), hash_xlog_init_bitmap_page(), hash_xlog_init_meta_page(), hash_xlog_insert(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_split_cleanup(), hash_xlog_split_complete(), hash_xlog_squeeze_page(), hash_xlog_update_meta_page(), hash_xlog_vacuum_one_page(), heap_endscan(), heap_index_delete_tuples(), heap_lock_tuple(), heap_rescan(), heap_update(), heap_xlog_confirm(), heap_xlog_delete(), heap_xlog_freeze_page(), heap_xlog_inplace(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_prune(), heap_xlog_update(), heap_xlog_vacuum(), heap_xlog_visible(), heapam_index_fetch_reset(), heapam_scan_sample_next_block(), heapam_tuple_lock(), heapam_tuple_satisfies_snapshot(), heapgetpage(), heapgettup(), heapgettup_pagemode(), IsBufferCleanupOK(), lazy_scan_heap(), lazy_vacuum_heap_page(), lazy_vacuum_heap_rel(), log_heap_visible(), MarkBufferDirty(), MarkBufferDirtyHint(), PrintBufferLeakWarning(), ReadRecentBuffer(), ReleaseAndReadBuffer(), ReleaseBuffer(), spgRedoAddLeaf(), spgRedoAddNode(), spgRedoMoveLeafs(), spgRedoPickSplit(), spgRedoSplitTuple(), spgRedoVacuumLeaf(), spgRedoVacuumRedirect(), spgRedoVacuumRoot(), statapprox_heap(), tts_buffer_heap_clear(), tts_buffer_heap_copyslot(), tts_buffer_heap_materialize(), tts_buffer_heap_store_tuple(), visibilitymap_clear(), visibilitymap_count(), visibilitymap_get_status(), visibilitymap_pin(), visibilitymap_pin_ok(), visibilitymap_prepare_truncate(), visibilitymap_set(), XLogPrefetcherScanBlocks(), XLogReadBufferExtended(), and XLogReadBufferForRedoExtended().

◆ MAX_IO_CONCURRENCY

#define MAX_IO_CONCURRENCY   1000

Definition at line 88 of file bufmgr.h.

◆ P_NEW

◆ RelationGetNumberOfBlocks

Typedef Documentation

◆ Block

typedef void* Block

Definition at line 24 of file bufmgr.h.

◆ BufferAccessStrategyType

◆ PrefetchBufferResult

Enumeration Type Documentation

◆ BufferAccessStrategyType

Enumerator
BAS_NORMAL 
BAS_BULKREAD 
BAS_BULKWRITE 
BAS_VACUUM 

Definition at line 27 of file bufmgr.h.

28 {
29  BAS_NORMAL, /* Normal random access */
30  BAS_BULKREAD, /* Large read-only scan (hint bit updates are
31  * ok) */
32  BAS_BULKWRITE, /* Large multi-block write (e.g. COPY IN) */
33  BAS_VACUUM /* VACUUM */
BufferAccessStrategyType
Definition: bufmgr.h:27

◆ ReadBufferMode

Enumerator
RBM_NORMAL 
RBM_ZERO_AND_LOCK 
RBM_ZERO_AND_CLEANUP_LOCK 
RBM_ZERO_ON_ERROR 
RBM_NORMAL_NO_LOG 

Definition at line 37 of file bufmgr.h.

38 {
39  RBM_NORMAL, /* Normal read */
40  RBM_ZERO_AND_LOCK, /* Don't read from disk, caller will
41  * initialize. Also locks the page. */
42  RBM_ZERO_AND_CLEANUP_LOCK, /* Like RBM_ZERO_AND_LOCK, but locks the page
43  * in "cleanup" mode */
44  RBM_ZERO_ON_ERROR, /* Read, but return an all-zeros page on error */
45  RBM_NORMAL_NO_LOG /* Don't log page as invalid during WAL
46  * replay; otherwise same as RBM_NORMAL */
ReadBufferMode
Definition: bufmgr.h:37

Function Documentation

◆ AbortBufferIO()

void AbortBufferIO ( void  )

Definition at line 4493 of file bufmgr.c.

References Assert, buftag::blockNum, BM_DIRTY, BM_IO_ERROR, BM_IO_IN_PROGRESS, BM_VALID, buf, ereport, errcode(), errdetail(), errmsg(), buftag::forkNum, InProgressBuf, IsForInput, LockBufHdr(), pfree(), relpathperm, buftag::rnode, BufferDesc::tag, TerminateBufferIO(), UnlockBufHdr, and WARNING.

Referenced by AbortSubTransaction(), AbortTransaction(), AtProcExit_Buffers(), AutoVacLauncherMain(), BackgroundWriterMain(), CheckpointerMain(), and WalWriterMain().

4494 {
4496 
4497  if (buf)
4498  {
4499  uint32 buf_state;
4500 
4501  buf_state = LockBufHdr(buf);
4502  Assert(buf_state & BM_IO_IN_PROGRESS);
4503  if (IsForInput)
4504  {
4505  Assert(!(buf_state & BM_DIRTY));
4506 
4507  /* We'd better not think buffer is valid yet */
4508  Assert(!(buf_state & BM_VALID));
4509  UnlockBufHdr(buf, buf_state);
4510  }
4511  else
4512  {
4513  Assert(buf_state & BM_DIRTY);
4514  UnlockBufHdr(buf, buf_state);
4515  /* Issue notice if this is not the first failure... */
4516  if (buf_state & BM_IO_ERROR)
4517  {
4518  /* Buffer is pinned, so we can read tag without spinlock */
4519  char *path;
4520 
4521  path = relpathperm(buf->tag.rnode, buf->tag.forkNum);
4522  ereport(WARNING,
4523  (errcode(ERRCODE_IO_ERROR),
4524  errmsg("could not write block %u of %s",
4525  buf->tag.blockNum, path),
4526  errdetail("Multiple failures --- write error might be permanent.")));
4527  pfree(path);
4528  }
4529  }
4530  TerminateBufferIO(buf, false, BM_IO_ERROR);
4531  }
4532 }
#define relpathperm(rnode, forknum)
Definition: relpath.h:83
ForkNumber forkNum
Definition: buf_internals.h:94
int errcode(int sqlerrcode)
Definition: elog.c:698
#define BM_DIRTY
Definition: buf_internals.h:59
static BufferDesc * InProgressBuf
Definition: bufmgr.c:161
void pfree(void *pointer)
Definition: mcxt.c:1169
static char * buf
Definition: pg_test_fsync.c:68
int errdetail(const char *fmt,...)
Definition: elog.c:1042
unsigned int uint32
Definition: c.h:441
static bool IsForInput
Definition: bufmgr.c:162
#define WARNING
Definition: elog.h:40
static void TerminateBufferIO(BufferDesc *buf, bool clear_dirty, uint32 set_flag_bits)
Definition: bufmgr.c:4461
#define BM_VALID
Definition: buf_internals.h:60
#define ereport(elevel,...)
Definition: elog.h:157
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4603
#define Assert(condition)
Definition: c.h:804
BlockNumber blockNum
Definition: buf_internals.h:95
RelFileNode rnode
Definition: buf_internals.h:93
#define BM_IO_ERROR
Definition: buf_internals.h:63
BufferTag tag
int errmsg(const char *fmt,...)
Definition: elog.c:909
#define UnlockBufHdr(desc, s)
#define BM_IO_IN_PROGRESS
Definition: buf_internals.h:62

◆ AtEOXact_Buffers()

void AtEOXact_Buffers ( bool  isCommit)

Definition at line 2576 of file bufmgr.c.

References Assert, AtEOXact_LocalBuffers(), CheckForBufferLeaks(), and PrivateRefCountOverflowed.

Referenced by AbortTransaction(), AutoVacLauncherMain(), BackgroundWriterMain(), CheckpointerMain(), CommitTransaction(), PrepareTransaction(), and WalWriterMain().

2577 {
2579 
2580  AtEOXact_LocalBuffers(isCommit);
2581 
2583 }
static int32 PrivateRefCountOverflowed
Definition: bufmgr.c:199
#define Assert(condition)
Definition: c.h:804
static void CheckForBufferLeaks(void)
Definition: bufmgr.c:2650
void AtEOXact_LocalBuffers(bool isCommit)
Definition: localbuf.c:577

◆ AtProcExit_LocalBuffers()

void AtProcExit_LocalBuffers ( void  )

Definition at line 588 of file localbuf.c.

References CheckForLocalBufferLeaks().

Referenced by AtProcExit_Buffers().

589 {
590  /*
591  * We shouldn't be holding any remaining pins; if we are, and assertions
592  * aren't enabled, we'll fail later in DropRelFileNodeBuffers while trying
593  * to drop the temp rels.
594  */
596 }
static void CheckForLocalBufferLeaks(void)
Definition: localbuf.c:548

◆ BgBufferSync()

bool BgBufferSync ( struct WritebackContext wb_context)

Definition at line 2206 of file bufmgr.c.

References Assert, bgwriter_lru_maxpages, bgwriter_lru_multiplier, BgWriterDelay, BgWriterStats, BUF_REUSABLE, BUF_WRITTEN, CurrentResourceOwner, DEBUG1, DEBUG2, elog, PgStat_MsgBgWriter::m_buf_alloc, PgStat_MsgBgWriter::m_buf_written_clean, PgStat_MsgBgWriter::m_maxwritten_clean, NBuffers, ResourceOwnerEnlargeBuffers(), StrategySyncStart(), and SyncOneBuffer().

Referenced by BackgroundWriterMain().

2207 {
2208  /* info obtained from freelist.c */
2209  int strategy_buf_id;
2210  uint32 strategy_passes;
2211  uint32 recent_alloc;
2212 
2213  /*
2214  * Information saved between calls so we can determine the strategy
2215  * point's advance rate and avoid scanning already-cleaned buffers.
2216  */
2217  static bool saved_info_valid = false;
2218  static int prev_strategy_buf_id;
2219  static uint32 prev_strategy_passes;
2220  static int next_to_clean;
2221  static uint32 next_passes;
2222 
2223  /* Moving averages of allocation rate and clean-buffer density */
2224  static float smoothed_alloc = 0;
2225  static float smoothed_density = 10.0;
2226 
2227  /* Potentially these could be tunables, but for now, not */
2228  float smoothing_samples = 16;
2229  float scan_whole_pool_milliseconds = 120000.0;
2230 
2231  /* Used to compute how far we scan ahead */
2232  long strategy_delta;
2233  int bufs_to_lap;
2234  int bufs_ahead;
2235  float scans_per_alloc;
2236  int reusable_buffers_est;
2237  int upcoming_alloc_est;
2238  int min_scan_buffers;
2239 
2240  /* Variables for the scanning loop proper */
2241  int num_to_scan;
2242  int num_written;
2243  int reusable_buffers;
2244 
2245  /* Variables for final smoothed_density update */
2246  long new_strategy_delta;
2247  uint32 new_recent_alloc;
2248 
2249  /*
2250  * Find out where the freelist clock sweep currently is, and how many
2251  * buffer allocations have happened since our last call.
2252  */
2253  strategy_buf_id = StrategySyncStart(&strategy_passes, &recent_alloc);
2254 
2255  /* Report buffer alloc counts to pgstat */
2256  BgWriterStats.m_buf_alloc += recent_alloc;
2257 
2258  /*
2259  * If we're not running the LRU scan, just stop after doing the stats
2260  * stuff. We mark the saved state invalid so that we can recover sanely
2261  * if LRU scan is turned back on later.
2262  */
2263  if (bgwriter_lru_maxpages <= 0)
2264  {
2265  saved_info_valid = false;
2266  return true;
2267  }
2268 
2269  /*
2270  * Compute strategy_delta = how many buffers have been scanned by the
2271  * clock sweep since last time. If first time through, assume none. Then
2272  * see if we are still ahead of the clock sweep, and if so, how many
2273  * buffers we could scan before we'd catch up with it and "lap" it. Note:
2274  * weird-looking coding of xxx_passes comparisons are to avoid bogus
2275  * behavior when the passes counts wrap around.
2276  */
2277  if (saved_info_valid)
2278  {
2279  int32 passes_delta = strategy_passes - prev_strategy_passes;
2280 
2281  strategy_delta = strategy_buf_id - prev_strategy_buf_id;
2282  strategy_delta += (long) passes_delta * NBuffers;
2283 
2284  Assert(strategy_delta >= 0);
2285 
2286  if ((int32) (next_passes - strategy_passes) > 0)
2287  {
2288  /* we're one pass ahead of the strategy point */
2289  bufs_to_lap = strategy_buf_id - next_to_clean;
2290 #ifdef BGW_DEBUG
2291  elog(DEBUG2, "bgwriter ahead: bgw %u-%u strategy %u-%u delta=%ld lap=%d",
2292  next_passes, next_to_clean,
2293  strategy_passes, strategy_buf_id,
2294  strategy_delta, bufs_to_lap);
2295 #endif
2296  }
2297  else if (next_passes == strategy_passes &&
2298  next_to_clean >= strategy_buf_id)
2299  {
2300  /* on same pass, but ahead or at least not behind */
2301  bufs_to_lap = NBuffers - (next_to_clean - strategy_buf_id);
2302 #ifdef BGW_DEBUG
2303  elog(DEBUG2, "bgwriter ahead: bgw %u-%u strategy %u-%u delta=%ld lap=%d",
2304  next_passes, next_to_clean,
2305  strategy_passes, strategy_buf_id,
2306  strategy_delta, bufs_to_lap);
2307 #endif
2308  }
2309  else
2310  {
2311  /*
2312  * We're behind, so skip forward to the strategy point and start
2313  * cleaning from there.
2314  */
2315 #ifdef BGW_DEBUG
2316  elog(DEBUG2, "bgwriter behind: bgw %u-%u strategy %u-%u delta=%ld",
2317  next_passes, next_to_clean,
2318  strategy_passes, strategy_buf_id,
2319  strategy_delta);
2320 #endif
2321  next_to_clean = strategy_buf_id;
2322  next_passes = strategy_passes;
2323  bufs_to_lap = NBuffers;
2324  }
2325  }
2326  else
2327  {
2328  /*
2329  * Initializing at startup or after LRU scanning had been off. Always
2330  * start at the strategy point.
2331  */
2332 #ifdef BGW_DEBUG
2333  elog(DEBUG2, "bgwriter initializing: strategy %u-%u",
2334  strategy_passes, strategy_buf_id);
2335 #endif
2336  strategy_delta = 0;
2337  next_to_clean = strategy_buf_id;
2338  next_passes = strategy_passes;
2339  bufs_to_lap = NBuffers;
2340  }
2341 
2342  /* Update saved info for next time */
2343  prev_strategy_buf_id = strategy_buf_id;
2344  prev_strategy_passes = strategy_passes;
2345  saved_info_valid = true;
2346 
2347  /*
2348  * Compute how many buffers had to be scanned for each new allocation, ie,
2349  * 1/density of reusable buffers, and track a moving average of that.
2350  *
2351  * If the strategy point didn't move, we don't update the density estimate
2352  */
2353  if (strategy_delta > 0 && recent_alloc > 0)
2354  {
2355  scans_per_alloc = (float) strategy_delta / (float) recent_alloc;
2356  smoothed_density += (scans_per_alloc - smoothed_density) /
2357  smoothing_samples;
2358  }
2359 
2360  /*
2361  * Estimate how many reusable buffers there are between the current
2362  * strategy point and where we've scanned ahead to, based on the smoothed
2363  * density estimate.
2364  */
2365  bufs_ahead = NBuffers - bufs_to_lap;
2366  reusable_buffers_est = (float) bufs_ahead / smoothed_density;
2367 
2368  /*
2369  * Track a moving average of recent buffer allocations. Here, rather than
2370  * a true average we want a fast-attack, slow-decline behavior: we
2371  * immediately follow any increase.
2372  */
2373  if (smoothed_alloc <= (float) recent_alloc)
2374  smoothed_alloc = recent_alloc;
2375  else
2376  smoothed_alloc += ((float) recent_alloc - smoothed_alloc) /
2377  smoothing_samples;
2378 
2379  /* Scale the estimate by a GUC to allow more aggressive tuning. */
2380  upcoming_alloc_est = (int) (smoothed_alloc * bgwriter_lru_multiplier);
2381 
2382  /*
2383  * If recent_alloc remains at zero for many cycles, smoothed_alloc will
2384  * eventually underflow to zero, and the underflows produce annoying
2385  * kernel warnings on some platforms. Once upcoming_alloc_est has gone to
2386  * zero, there's no point in tracking smaller and smaller values of
2387  * smoothed_alloc, so just reset it to exactly zero to avoid this
2388  * syndrome. It will pop back up as soon as recent_alloc increases.
2389  */
2390  if (upcoming_alloc_est == 0)
2391  smoothed_alloc = 0;
2392 
2393  /*
2394  * Even in cases where there's been little or no buffer allocation
2395  * activity, we want to make a small amount of progress through the buffer
2396  * cache so that as many reusable buffers as possible are clean after an
2397  * idle period.
2398  *
2399  * (scan_whole_pool_milliseconds / BgWriterDelay) computes how many times
2400  * the BGW will be called during the scan_whole_pool time; slice the
2401  * buffer pool into that many sections.
2402  */
2403  min_scan_buffers = (int) (NBuffers / (scan_whole_pool_milliseconds / BgWriterDelay));
2404 
2405  if (upcoming_alloc_est < (min_scan_buffers + reusable_buffers_est))
2406  {
2407 #ifdef BGW_DEBUG
2408  elog(DEBUG2, "bgwriter: alloc_est=%d too small, using min=%d + reusable_est=%d",
2409  upcoming_alloc_est, min_scan_buffers, reusable_buffers_est);
2410 #endif
2411  upcoming_alloc_est = min_scan_buffers + reusable_buffers_est;
2412  }
2413 
2414  /*
2415  * Now write out dirty reusable buffers, working forward from the
2416  * next_to_clean point, until we have lapped the strategy scan, or cleaned
2417  * enough buffers to match our estimate of the next cycle's allocation
2418  * requirements, or hit the bgwriter_lru_maxpages limit.
2419  */
2420 
2421  /* Make sure we can handle the pin inside SyncOneBuffer */
2423 
2424  num_to_scan = bufs_to_lap;
2425  num_written = 0;
2426  reusable_buffers = reusable_buffers_est;
2427 
2428  /* Execute the LRU scan */
2429  while (num_to_scan > 0 && reusable_buffers < upcoming_alloc_est)
2430  {
2431  int sync_state = SyncOneBuffer(next_to_clean, true,
2432  wb_context);
2433 
2434  if (++next_to_clean >= NBuffers)
2435  {
2436  next_to_clean = 0;
2437  next_passes++;
2438  }
2439  num_to_scan--;
2440 
2441  if (sync_state & BUF_WRITTEN)
2442  {
2443  reusable_buffers++;
2444  if (++num_written >= bgwriter_lru_maxpages)
2445  {
2447  break;
2448  }
2449  }
2450  else if (sync_state & BUF_REUSABLE)
2451  reusable_buffers++;
2452  }
2453 
2454  BgWriterStats.m_buf_written_clean += num_written;
2455 
2456 #ifdef BGW_DEBUG
2457  elog(DEBUG1, "bgwriter: recent_alloc=%u smoothed=%.2f delta=%ld ahead=%d density=%.2f reusable_est=%d upcoming_est=%d scanned=%d wrote=%d reusable=%d",
2458  recent_alloc, smoothed_alloc, strategy_delta, bufs_ahead,
2459  smoothed_density, reusable_buffers_est, upcoming_alloc_est,
2460  bufs_to_lap - num_to_scan,
2461  num_written,
2462  reusable_buffers - reusable_buffers_est);
2463 #endif
2464 
2465  /*
2466  * Consider the above scan as being like a new allocation scan.
2467  * Characterize its density and update the smoothed one based on it. This
2468  * effectively halves the moving average period in cases where both the
2469  * strategy and the background writer are doing some useful scanning,
2470  * which is helpful because a long memory isn't as desirable on the
2471  * density estimates.
2472  */
2473  new_strategy_delta = bufs_to_lap - num_to_scan;
2474  new_recent_alloc = reusable_buffers - reusable_buffers_est;
2475  if (new_strategy_delta > 0 && new_recent_alloc > 0)
2476  {
2477  scans_per_alloc = (float) new_strategy_delta / (float) new_recent_alloc;
2478  smoothed_density += (scans_per_alloc - smoothed_density) /
2479  smoothing_samples;
2480 
2481 #ifdef BGW_DEBUG
2482  elog(DEBUG2, "bgwriter: cleaner density alloc=%u scan=%ld density=%.2f new smoothed=%.2f",
2483  new_recent_alloc, new_strategy_delta,
2484  scans_per_alloc, smoothed_density);
2485 #endif
2486  }
2487 
2488  /* Return true if OK to hibernate */
2489  return (bufs_to_lap == 0 && recent_alloc == 0);
2490 }
PgStat_Counter m_buf_alloc
Definition: pgstat.h:494
int StrategySyncStart(uint32 *complete_passes, uint32 *num_buf_alloc)
Definition: freelist.c:395
#define DEBUG1
Definition: elog.h:25
int BgWriterDelay
Definition: bgwriter.c:64
ResourceOwner CurrentResourceOwner
Definition: resowner.c:146
PgStat_Counter m_maxwritten_clean
Definition: pgstat.h:491
PgStat_Counter m_buf_written_clean
Definition: pgstat.h:490
PgStat_MsgBgWriter BgWriterStats
Definition: pgstat.c:132
double bgwriter_lru_multiplier
Definition: bufmgr.c:134
static int SyncOneBuffer(int buf_id, bool skip_recently_used, WritebackContext *wb_context)
Definition: bufmgr.c:2509
signed int int32
Definition: c.h:429
#define BUF_REUSABLE
Definition: bufmgr.c:69
int bgwriter_lru_maxpages
Definition: bufmgr.c:133
#define DEBUG2
Definition: elog.h:24
unsigned int uint32
Definition: c.h:441
#define BUF_WRITTEN
Definition: bufmgr.c:68
void ResourceOwnerEnlargeBuffers(ResourceOwner owner)
Definition: resowner.c:946
#define Assert(condition)
Definition: c.h:804
#define elog(elevel,...)
Definition: elog.h:232
int NBuffers
Definition: globals.c:135

◆ BufferGetBlockNumber()

BlockNumber BufferGetBlockNumber ( Buffer  buffer)

Definition at line 2758 of file bufmgr.c.

References Assert, buftag::blockNum, BufferIsLocal, BufferIsPinned, GetBufferDescriptor, GetLocalBufferDescriptor, and BufferDesc::tag.

Referenced by _bt_check_unique(), _bt_checkpage(), _bt_delitems_delete(), _bt_delitems_vacuum(), _bt_doinsert(), _bt_endpoint(), _bt_finish_split(), _bt_first(), _bt_getroot(), _bt_insert_parent(), _bt_insertonpg(), _bt_mark_page_halfdead(), _bt_moveright(), _bt_newroot(), _bt_pagedel(), _bt_readnextpage(), _bt_readpage(), _bt_restore_meta(), _bt_search(), _bt_split(), _bt_unlink_halfdead_page(), _bt_walk_left(), _hash_addovflpage(), _hash_checkpage(), _hash_doinsert(), _hash_first(), _hash_freeovflpage(), _hash_getnewbuf(), _hash_readnext(), _hash_readpage(), _hash_splitbucket(), allocNewBuffer(), blinsert(), BloomInitMetapage(), brin_doinsert(), brin_doupdate(), brin_getinsertbuffer(), brin_initialize_empty_new_buffer(), brin_page_cleanup(), brin_xlog_insert_update(), brinbuild(), brinGetTupleForHeapBlock(), createPostingTree(), dataBeginPlaceToPageLeaf(), dataPrepareDownlink(), doPickSplit(), entryPrepareDownlink(), fill_seq_with_data(), ginEntryInsert(), ginFindParents(), ginFinishSplit(), ginPlaceToPage(), ginRedoDeleteListPages(), ginRedoUpdateMetapage(), ginScanToDelete(), gistbufferinginserttuples(), gistbuild(), gistcheckpage(), gistdeletepage(), gistformdownlink(), gistinserttuples(), gistMemorizeAllDownlinks(), gistplacetopage(), gistRelocateBuildBuffersOnSplit(), gistScanPage(), hash_xlog_add_ovfl_page(), heap_delete(), heap_hot_search_buffer(), heap_insert(), heap_multi_insert(), heap_page_is_all_visible(), heap_prune_chain(), heap_update(), heap_xlog_confirm(), heap_xlog_lock(), makeSublist(), moveLeafs(), moveRightIfItNeeded(), pgstathashindex(), ReadBufferBI(), RelationAddExtraBlocks(), RelationGetBufferForTuple(), RelationPutHeapTuple(), revmap_get_buffer(), revmap_physical_extend(), spgAddNodeAction(), spgbuild(), spgdoinsert(), SpGistSetLastUsedPage(), spgSplitNodeAction(), spgWalk(), startScanEntry(), terminate_brin_buildstate(), vacuumLeafPage(), visibilitymap_clear(), visibilitymap_get_status(), visibilitymap_pin(), visibilitymap_pin_ok(), visibilitymap_set(), and XLogReadBufferExtended().

2759 {
2760  BufferDesc *bufHdr;
2761 
2762  Assert(BufferIsPinned(buffer));
2763 
2764  if (BufferIsLocal(buffer))
2765  bufHdr = GetLocalBufferDescriptor(-buffer - 1);
2766  else
2767  bufHdr = GetBufferDescriptor(buffer - 1);
2768 
2769  /* pinned, so OK to read tag without spinlock */
2770  return bufHdr->tag.blockNum;
2771 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:448
#define GetLocalBufferDescriptor(id)
#define GetBufferDescriptor(id)
#define Assert(condition)
Definition: c.h:804
#define BufferIsLocal(buffer)
Definition: buf.h:37
BlockNumber blockNum
Definition: buf_internals.h:95
BufferTag tag

◆ BufferGetLSNAtomic()

XLogRecPtr BufferGetLSNAtomic ( Buffer  buffer)

Definition at line 3021 of file bufmgr.c.

References Assert, BufferGetPage, BufferIsLocal, BufferIsPinned, BufferIsValid, GetBufferDescriptor, LockBufHdr(), PageGetLSN, UnlockBufHdr, and XLogHintBitIsNeeded.

Referenced by _bt_killitems(), _bt_readpage(), gistdoinsert(), gistFindPath(), gistkillitems(), gistScanPage(), SetHintBits(), and XLogSaveBufferForHint().

3022 {
3023  BufferDesc *bufHdr = GetBufferDescriptor(buffer - 1);
3024  char *page = BufferGetPage(buffer);
3025  XLogRecPtr lsn;
3026  uint32 buf_state;
3027 
3028  /*
3029  * If we don't need locking for correctness, fastpath out.
3030  */
3031  if (!XLogHintBitIsNeeded() || BufferIsLocal(buffer))
3032  return PageGetLSN(page);
3033 
3034  /* Make sure we've got a real buffer, and that we hold a pin on it. */
3035  Assert(BufferIsValid(buffer));
3036  Assert(BufferIsPinned(buffer));
3037 
3038  buf_state = LockBufHdr(bufHdr);
3039  lsn = PageGetLSN(page);
3040  UnlockBufHdr(bufHdr, buf_state);
3041 
3042  return lsn;
3043 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:448
#define GetBufferDescriptor(id)
unsigned int uint32
Definition: c.h:441
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4603
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:804
#define BufferIsLocal(buffer)
Definition: buf.h:37
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
#define PageGetLSN(page)
Definition: bufpage.h:366
#define UnlockBufHdr(desc, s)
#define XLogHintBitIsNeeded()
Definition: xlog.h:212

◆ BufferGetTag()

void BufferGetTag ( Buffer  buffer,
RelFileNode rnode,
ForkNumber forknum,
BlockNumber blknum 
)

Definition at line 2779 of file bufmgr.c.

References Assert, buftag::blockNum, BufferIsLocal, BufferIsPinned, buftag::forkNum, GetBufferDescriptor, GetLocalBufferDescriptor, buftag::rnode, and BufferDesc::tag.

Referenced by fsm_search_avail(), ginRedoInsertEntry(), log_newpage_buffer(), ResolveCminCmaxDuringDecoding(), XLogRegisterBuffer(), and XLogSaveBufferForHint().

2781 {
2782  BufferDesc *bufHdr;
2783 
2784  /* Do the same checks as BufferGetBlockNumber. */
2785  Assert(BufferIsPinned(buffer));
2786 
2787  if (BufferIsLocal(buffer))
2788  bufHdr = GetLocalBufferDescriptor(-buffer - 1);
2789  else
2790  bufHdr = GetBufferDescriptor(buffer - 1);
2791 
2792  /* pinned, so OK to read tag without spinlock */
2793  *rnode = bufHdr->tag.rnode;
2794  *forknum = bufHdr->tag.forkNum;
2795  *blknum = bufHdr->tag.blockNum;
2796 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:448
ForkNumber forkNum
Definition: buf_internals.h:94
#define GetLocalBufferDescriptor(id)
#define GetBufferDescriptor(id)
#define Assert(condition)
Definition: c.h:804
#define BufferIsLocal(buffer)
Definition: buf.h:37
BlockNumber blockNum
Definition: buf_internals.h:95
RelFileNode rnode
Definition: buf_internals.h:93
BufferTag tag

◆ BufferIsPermanent()

bool BufferIsPermanent ( Buffer  buffer)

Definition at line 2991 of file bufmgr.c.

References Assert, BM_PERMANENT, BufferIsLocal, BufferIsPinned, BufferIsValid, GetBufferDescriptor, pg_atomic_read_u32(), and BufferDesc::state.

Referenced by SetHintBits().

2992 {
2993  BufferDesc *bufHdr;
2994 
2995  /* Local buffers are used only for temp relations. */
2996  if (BufferIsLocal(buffer))
2997  return false;
2998 
2999  /* Make sure we've got a real buffer, and that we hold a pin on it. */
3000  Assert(BufferIsValid(buffer));
3001  Assert(BufferIsPinned(buffer));
3002 
3003  /*
3004  * BM_PERMANENT can't be changed while we hold a pin on the buffer, so we
3005  * need not bother with the buffer header spinlock. Even if someone else
3006  * changes the buffer header state while we're doing this, the state is
3007  * changed atomically, so we'll read the old value or the new value, but
3008  * not random garbage.
3009  */
3010  bufHdr = GetBufferDescriptor(buffer - 1);
3011  return (pg_atomic_read_u32(&bufHdr->state) & BM_PERMANENT) != 0;
3012 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:448
#define BM_PERMANENT
Definition: buf_internals.h:67
#define GetBufferDescriptor(id)
#define Assert(condition)
Definition: c.h:804
#define BufferIsLocal(buffer)
Definition: buf.h:37
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
pg_atomic_uint32 state
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:241

◆ BufferShmemSize()

Size BufferShmemSize ( void  )

Definition at line 156 of file buf_init.c.

References add_size(), mul_size(), NBuffers, PG_CACHE_LINE_SIZE, and StrategyShmemSize().

Referenced by CreateSharedMemoryAndSemaphores().

157 {
158  Size size = 0;
159 
160  /* size of buffer descriptors */
161  size = add_size(size, mul_size(NBuffers, sizeof(BufferDescPadded)));
162  /* to allow aligning buffer descriptors */
163  size = add_size(size, PG_CACHE_LINE_SIZE);
164 
165  /* size of data pages */
166  size = add_size(size, mul_size(NBuffers, BLCKSZ));
167 
168  /* size of stuff controlled by freelist.c */
169  size = add_size(size, StrategyShmemSize());
170 
171  /* size of I/O condition variables */
172  size = add_size(size, mul_size(NBuffers,
174  /* to allow aligning the above */
175  size = add_size(size, PG_CACHE_LINE_SIZE);
176 
177  /* size of checkpoint sort array in bufmgr.c */
178  size = add_size(size, mul_size(NBuffers, sizeof(CkptSortItem)));
179 
180  return size;
181 }
#define PG_CACHE_LINE_SIZE
Size mul_size(Size s1, Size s2)
Definition: shmem.c:519
Size add_size(Size s1, Size s2)
Definition: shmem.c:502
size_t Size
Definition: c.h:540
int NBuffers
Definition: globals.c:135
Size StrategyShmemSize(void)
Definition: freelist.c:454

◆ BufmgrCommit()

void BufmgrCommit ( void  )

Definition at line 2744 of file bufmgr.c.

Referenced by PrepareTransaction(), and RecordTransactionCommit().

2745 {
2746  /* Nothing to do in bufmgr anymore... */
2747 }

◆ CheckPointBuffers()

void CheckPointBuffers ( int  flags)

Definition at line 2734 of file bufmgr.c.

References BufferSync().

Referenced by CheckPointGuts().

2735 {
2736  BufferSync(flags);
2737 }
static void BufferSync(int flags)
Definition: bufmgr.c:1930

◆ ConditionalLockBuffer()

bool ConditionalLockBuffer ( Buffer  buffer)

Definition at line 4049 of file bufmgr.c.

References Assert, buf, BufferDescriptorGetContentLock, BufferIsLocal, BufferIsPinned, GetBufferDescriptor, LW_EXCLUSIVE, and LWLockConditionalAcquire().

Referenced by _bt_conditionallockbuf(), BloomNewBuffer(), ConditionalLockBufferForCleanup(), GinNewBuffer(), gistNewBuffer(), RelationGetBufferForTuple(), spgdoinsert(), SpGistGetBuffer(), SpGistNewBuffer(), and SpGistUpdateMetaPage().

4050 {
4051  BufferDesc *buf;
4052 
4053  Assert(BufferIsPinned(buffer));
4054  if (BufferIsLocal(buffer))
4055  return true; /* act as though we got it */
4056 
4057  buf = GetBufferDescriptor(buffer - 1);
4058 
4060  LW_EXCLUSIVE);
4061 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:448
bool LWLockConditionalAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1378
static char * buf
Definition: pg_test_fsync.c:68
#define GetBufferDescriptor(id)
#define BufferDescriptorGetContentLock(bdesc)
#define Assert(condition)
Definition: c.h:804
#define BufferIsLocal(buffer)
Definition: buf.h:37

◆ ConditionalLockBufferForCleanup()

bool ConditionalLockBufferForCleanup ( Buffer  buffer)

Definition at line 4257 of file bufmgr.c.

References Assert, BUF_STATE_GET_REFCOUNT, BUFFER_LOCK_UNLOCK, BufferIsLocal, BufferIsValid, ConditionalLockBuffer(), GetBufferDescriptor, GetPrivateRefCount(), LocalRefCount, LockBuffer(), LockBufHdr(), PrivateRefCountEntry::refcount, and UnlockBufHdr.

Referenced by _hash_finish_split(), _hash_getbuf_with_condlock_cleanup(), heap_page_prune_opt(), and lazy_scan_heap().

4258 {
4259  BufferDesc *bufHdr;
4260  uint32 buf_state,
4261  refcount;
4262 
4263  Assert(BufferIsValid(buffer));
4264 
4265  if (BufferIsLocal(buffer))
4266  {
4267  refcount = LocalRefCount[-buffer - 1];
4268  /* There should be exactly one pin */
4269  Assert(refcount > 0);
4270  if (refcount != 1)
4271  return false;
4272  /* Nobody else to wait for */
4273  return true;
4274  }
4275 
4276  /* There should be exactly one local pin */
4277  refcount = GetPrivateRefCount(buffer);
4278  Assert(refcount);
4279  if (refcount != 1)
4280  return false;
4281 
4282  /* Try to acquire lock */
4283  if (!ConditionalLockBuffer(buffer))
4284  return false;
4285 
4286  bufHdr = GetBufferDescriptor(buffer - 1);
4287  buf_state = LockBufHdr(bufHdr);
4288  refcount = BUF_STATE_GET_REFCOUNT(buf_state);
4289 
4290  Assert(refcount > 0);
4291  if (refcount == 1)
4292  {
4293  /* Successfully acquired exclusive lock with pincount 1 */
4294  UnlockBufHdr(bufHdr, buf_state);
4295  return true;
4296  }
4297 
4298  /* Failed, so release the lock */
4299  UnlockBufHdr(bufHdr, buf_state);
4300  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
4301  return false;
4302 }
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
#define GetBufferDescriptor(id)
static int32 GetPrivateRefCount(Buffer buffer)
Definition: bufmgr.c:387
unsigned int uint32
Definition: c.h:441
bool ConditionalLockBuffer(Buffer buffer)
Definition: bufmgr.c:4049
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:4023
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4603
#define Assert(condition)
Definition: c.h:804
#define BufferIsLocal(buffer)
Definition: buf.h:37
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
#define UnlockBufHdr(desc, s)
int32 * LocalRefCount
Definition: localbuf.c:45
#define BUF_STATE_GET_REFCOUNT(state)
Definition: buf_internals.h:49

◆ DropDatabaseBuffers()

void DropDatabaseBuffers ( Oid  dbid)

Definition at line 3421 of file bufmgr.c.

References buftag::blockNum, buf, BufferDescriptorGetBuffer, RelFileNode::dbNode, elog, buftag::forkNum, BufferDesc::freeNext, GetBufferDescriptor, GetPrivateRefCount(), i, InvalidateBuffer(), InvalidBackendId, LockBufHdr(), LOG, NBuffers, relpathbackend, relpathperm, buftag::rnode, BufferDesc::tag, and UnlockBufHdr.

Referenced by dbase_redo(), dropdb(), and movedb().

3422 {
3423  int i;
3424 
3425  /*
3426  * We needn't consider local buffers, since by assumption the target
3427  * database isn't our own.
3428  */
3429 
3430  for (i = 0; i < NBuffers; i++)
3431  {
3432  BufferDesc *bufHdr = GetBufferDescriptor(i);
3433  uint32 buf_state;
3434 
3435  /*
3436  * As in DropRelFileNodeBuffers, an unlocked precheck should be safe
3437  * and saves some cycles.
3438  */
3439  if (bufHdr->tag.rnode.dbNode != dbid)
3440  continue;
3441 
3442  buf_state = LockBufHdr(bufHdr);
3443  if (bufHdr->tag.rnode.dbNode == dbid)
3444  InvalidateBuffer(bufHdr); /* releases spinlock */
3445  else
3446  UnlockBufHdr(bufHdr, buf_state);
3447  }
3448 }
static void InvalidateBuffer(BufferDesc *buf)
Definition: bufmgr.c:1464
#define GetBufferDescriptor(id)
unsigned int uint32
Definition: c.h:441
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4603
RelFileNode rnode
Definition: buf_internals.h:93
BufferTag tag
#define UnlockBufHdr(desc, s)
int i
int NBuffers
Definition: globals.c:135

◆ DropRelFileNodeBuffers()

void DropRelFileNodeBuffers ( struct SMgrRelationData smgr_reln,
ForkNumber forkNum,
int  nforks,
BlockNumber firstDelBlock 
)

◆ DropRelFileNodesAllBuffers()

void DropRelFileNodesAllBuffers ( struct SMgrRelationData **  smgr_reln,
int  nnodes 
)

◆ FlushDatabaseBuffers()

void FlushDatabaseBuffers ( Oid  dbid)

Definition at line 3721 of file bufmgr.c.

References BM_DIRTY, BM_VALID, BufferDescriptorGetContentLock, CurrentResourceOwner, RelFileNode::dbNode, FlushBuffer(), GetBufferDescriptor, i, LockBufHdr(), LW_SHARED, LWLockAcquire(), LWLockRelease(), NBuffers, PinBuffer_Locked(), ReservePrivateRefCountEntry(), ResourceOwnerEnlargeBuffers(), buftag::rnode, BufferDesc::tag, UnlockBufHdr, and UnpinBuffer().

Referenced by dbase_redo().

3722 {
3723  int i;
3724  BufferDesc *bufHdr;
3725 
3726  /* Make sure we can handle the pin inside the loop */
3728 
3729  for (i = 0; i < NBuffers; i++)
3730  {
3731  uint32 buf_state;
3732 
3733  bufHdr = GetBufferDescriptor(i);
3734 
3735  /*
3736  * As in DropRelFileNodeBuffers, an unlocked precheck should be safe
3737  * and saves some cycles.
3738  */
3739  if (bufHdr->tag.rnode.dbNode != dbid)
3740  continue;
3741 
3743 
3744  buf_state = LockBufHdr(bufHdr);
3745  if (bufHdr->tag.rnode.dbNode == dbid &&
3746  (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
3747  {
3748  PinBuffer_Locked(bufHdr);
3750  FlushBuffer(bufHdr, NULL);
3752  UnpinBuffer(bufHdr, true);
3753  }
3754  else
3755  UnlockBufHdr(bufHdr, buf_state);
3756  }
3757 }
ResourceOwner CurrentResourceOwner
Definition: resowner.c:146
#define BM_DIRTY
Definition: buf_internals.h:59
static void FlushBuffer(BufferDesc *buf, SMgrRelation reln)
Definition: bufmgr.c:2818
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1816
#define GetBufferDescriptor(id)
unsigned int uint32
Definition: c.h:441
static void UnpinBuffer(BufferDesc *buf, bool fixOwner)
Definition: bufmgr.c:1831
void ResourceOwnerEnlargeBuffers(ResourceOwner owner)
Definition: resowner.c:946
#define BM_VALID
Definition: buf_internals.h:60
#define BufferDescriptorGetContentLock(bdesc)
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4603
static void PinBuffer_Locked(BufferDesc *buf)
Definition: bufmgr.c:1786
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1203
RelFileNode rnode
Definition: buf_internals.h:93
BufferTag tag
#define UnlockBufHdr(desc, s)
int i
int NBuffers
Definition: globals.c:135
static void ReservePrivateRefCountEntry(void)
Definition: bufmgr.c:215

◆ FlushOneBuffer()

void FlushOneBuffer ( Buffer  buffer)

Definition at line 3764 of file bufmgr.c.

References Assert, BufferDescriptorGetContentLock, BufferIsLocal, BufferIsPinned, FlushBuffer(), GetBufferDescriptor, and LWLockHeldByMe().

Referenced by hash_xlog_init_bitmap_page(), hash_xlog_init_meta_page(), and XLogReadBufferForRedoExtended().

3765 {
3766  BufferDesc *bufHdr;
3767 
3768  /* currently not needed, but no fundamental reason not to support */
3769  Assert(!BufferIsLocal(buffer));
3770 
3771  Assert(BufferIsPinned(buffer));
3772 
3773  bufHdr = GetBufferDescriptor(buffer - 1);
3774 
3776 
3777  FlushBuffer(bufHdr, NULL);
3778 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:448
bool LWLockHeldByMe(LWLock *l)
Definition: lwlock.c:1932
static void FlushBuffer(BufferDesc *buf, SMgrRelation reln)
Definition: bufmgr.c:2818
#define GetBufferDescriptor(id)
#define BufferDescriptorGetContentLock(bdesc)
#define Assert(condition)
Definition: c.h:804
#define BufferIsLocal(buffer)
Definition: buf.h:37

◆ FlushRelationBuffers()

void FlushRelationBuffers ( Relation  rel)

Definition at line 3525 of file bufmgr.c.

References ErrorContextCallback::arg, buftag::blockNum, BM_DIRTY, BM_JUST_DIRTIED, BM_VALID, BufferDescriptorGetContentLock, ErrorContextCallback::callback, CurrentResourceOwner, error_context_stack, FlushBuffer(), buftag::forkNum, GetBufferDescriptor, GetLocalBufferDescriptor, i, local_buffer_write_error_callback(), LocalBufHdrGetBlock, LockBufHdr(), LW_SHARED, LWLockAcquire(), LWLockRelease(), NBuffers, NLocBuffer, PageSetChecksumInplace(), pg_atomic_read_u32(), pg_atomic_unlocked_write_u32(), PinBuffer_Locked(), ErrorContextCallback::previous, RelationData::rd_node, RelationData::rd_smgr, RelationOpenSmgr, RelationUsesLocalBuffers, RelFileNodeEquals, ReservePrivateRefCountEntry(), ResourceOwnerEnlargeBuffers(), buftag::rnode, smgrwrite(), BufferDesc::state, BufferDesc::tag, UnlockBufHdr, and UnpinBuffer().

Referenced by heapam_relation_copy_data(), and index_copy_data().

3526 {
3527  int i;
3528  BufferDesc *bufHdr;
3529 
3530  /* Open rel at the smgr level if not already done */
3531  RelationOpenSmgr(rel);
3532 
3533  if (RelationUsesLocalBuffers(rel))
3534  {
3535  for (i = 0; i < NLocBuffer; i++)
3536  {
3537  uint32 buf_state;
3538 
3539  bufHdr = GetLocalBufferDescriptor(i);
3540  if (RelFileNodeEquals(bufHdr->tag.rnode, rel->rd_node) &&
3541  ((buf_state = pg_atomic_read_u32(&bufHdr->state)) &
3542  (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
3543  {
3544  ErrorContextCallback errcallback;
3545  Page localpage;
3546 
3547  localpage = (char *) LocalBufHdrGetBlock(bufHdr);
3548 
3549  /* Setup error traceback support for ereport() */
3551  errcallback.arg = (void *) bufHdr;
3552  errcallback.previous = error_context_stack;
3553  error_context_stack = &errcallback;
3554 
3555  PageSetChecksumInplace(localpage, bufHdr->tag.blockNum);
3556 
3557  smgrwrite(rel->rd_smgr,
3558  bufHdr->tag.forkNum,
3559  bufHdr->tag.blockNum,
3560  localpage,
3561  false);
3562 
3563  buf_state &= ~(BM_DIRTY | BM_JUST_DIRTIED);
3564  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
3565 
3566  /* Pop the error context stack */
3567  error_context_stack = errcallback.previous;
3568  }
3569  }
3570 
3571  return;
3572  }
3573 
3574  /* Make sure we can handle the pin inside the loop */
3576 
3577  for (i = 0; i < NBuffers; i++)
3578  {
3579  uint32 buf_state;
3580 
3581  bufHdr = GetBufferDescriptor(i);
3582 
3583  /*
3584  * As in DropRelFileNodeBuffers, an unlocked precheck should be safe
3585  * and saves some cycles.
3586  */
3587  if (!RelFileNodeEquals(bufHdr->tag.rnode, rel->rd_node))
3588  continue;
3589 
3591 
3592  buf_state = LockBufHdr(bufHdr);
3593  if (RelFileNodeEquals(bufHdr->tag.rnode, rel->rd_node) &&
3594  (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
3595  {
3596  PinBuffer_Locked(bufHdr);
3598  FlushBuffer(bufHdr, rel->rd_smgr);
3600  UnpinBuffer(bufHdr, true);
3601  }
3602  else
3603  UnlockBufHdr(bufHdr, buf_state);
3604  }
3605 }
#define LocalBufHdrGetBlock(bufHdr)
Definition: bufmgr.c:64
ForkNumber forkNum
Definition: buf_internals.h:94
static void local_buffer_write_error_callback(void *arg)
Definition: bufmgr.c:4557
ResourceOwner CurrentResourceOwner
Definition: resowner.c:146
struct SMgrRelationData * rd_smgr
Definition: rel.h:57
#define GetLocalBufferDescriptor(id)
#define BM_DIRTY
Definition: buf_internals.h:59
void(* callback)(void *arg)
Definition: elog.h:247
struct ErrorContextCallback * previous
Definition: elog.h:246
static void FlushBuffer(BufferDesc *buf, SMgrRelation reln)
Definition: bufmgr.c:2818
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1816
ErrorContextCallback * error_context_stack
Definition: elog.c:93
#define RelationOpenSmgr(relation)
Definition: rel.h:529
int NLocBuffer
Definition: localbuf.c:41
void smgrwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, char *buffer, bool skipFsync)
Definition: smgr.c:523
#define GetBufferDescriptor(id)
#define BM_JUST_DIRTIED
Definition: buf_internals.h:64
unsigned int uint32
Definition: c.h:441
static void UnpinBuffer(BufferDesc *buf, bool fixOwner)
Definition: bufmgr.c:1831
void ResourceOwnerEnlargeBuffers(ResourceOwner owner)
Definition: resowner.c:946
#define BM_VALID
Definition: buf_internals.h:60
RelFileNode rd_node
Definition: rel.h:55
#define BufferDescriptorGetContentLock(bdesc)
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4603
static void PinBuffer_Locked(BufferDesc *buf)
Definition: bufmgr.c:1786
void PageSetChecksumInplace(Page page, BlockNumber blkno)
Definition: bufpage.c:1532
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1203
BlockNumber blockNum
Definition: buf_internals.h:95
RelFileNode rnode
Definition: buf_internals.h:93
#define RelationUsesLocalBuffers(relation)
Definition: rel.h:594
BufferTag tag
#define UnlockBufHdr(desc, s)
int i
int NBuffers
Definition: globals.c:135
static void pg_atomic_unlocked_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:277
static void ReservePrivateRefCountEntry(void)
Definition: bufmgr.c:215
pg_atomic_uint32 state
Pointer Page
Definition: bufpage.h:78
#define RelFileNodeEquals(node1, node2)
Definition: relfilenode.h:88
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:241

◆ FlushRelationsAllBuffers()

void FlushRelationsAllBuffers ( struct SMgrRelationData **  smgrs,
int  nrels 
)

◆ FreeAccessStrategy()

void FreeAccessStrategy ( BufferAccessStrategy  strategy)

Definition at line 597 of file freelist.c.

References pfree().

Referenced by blgetbitmap(), FreeBulkInsertState(), heap_endscan(), initscan(), and parallel_vacuum_main().

598 {
599  /* don't crash if called on a "default" strategy */
600  if (strategy != NULL)
601  pfree(strategy);
602 }
void pfree(void *pointer)
Definition: mcxt.c:1169

◆ GetAccessStrategy()

BufferAccessStrategy GetAccessStrategy ( BufferAccessStrategyType  btype)

Definition at line 542 of file freelist.c.

References BAS_BULKREAD, BAS_BULKWRITE, BAS_NORMAL, BAS_VACUUM, BufferAccessStrategyData::btype, BufferAccessStrategyData::buffers, elog, ERROR, Min, NBuffers, offsetof, palloc0(), and BufferAccessStrategyData::ring_size.

Referenced by blgetbitmap(), bt_check_every_level(), collect_corrupt_items(), collect_visibility_data(), do_autovacuum(), GetBulkInsertState(), initscan(), parallel_vacuum_main(), pgstat_index(), pgstathashindex(), pgstatindex_impl(), statapprox_heap(), vacuum(), and verify_heapam().

543 {
544  BufferAccessStrategy strategy;
545  int ring_size;
546 
547  /*
548  * Select ring size to use. See buffer/README for rationales.
549  *
550  * Note: if you change the ring size for BAS_BULKREAD, see also
551  * SYNC_SCAN_REPORT_INTERVAL in access/heap/syncscan.c.
552  */
553  switch (btype)
554  {
555  case BAS_NORMAL:
556  /* if someone asks for NORMAL, just give 'em a "default" object */
557  return NULL;
558 
559  case BAS_BULKREAD:
560  ring_size = 256 * 1024 / BLCKSZ;
561  break;
562  case BAS_BULKWRITE:
563  ring_size = 16 * 1024 * 1024 / BLCKSZ;
564  break;
565  case BAS_VACUUM:
566  ring_size = 256 * 1024 / BLCKSZ;
567  break;
568 
569  default:
570  elog(ERROR, "unrecognized buffer access strategy: %d",
571  (int) btype);
572  return NULL; /* keep compiler quiet */
573  }
574 
575  /* Make sure ring isn't an undue fraction of shared buffers */
576  ring_size = Min(NBuffers / 8, ring_size);
577 
578  /* Allocate the object and initialize all elements to zeroes */
579  strategy = (BufferAccessStrategy)
581  ring_size * sizeof(Buffer));
582 
583  /* Set fields that don't start out zero */
584  strategy->btype = btype;
585  strategy->ring_size = ring_size;
586 
587  return strategy;
588 }
struct BufferAccessStrategyData * BufferAccessStrategy
Definition: buf.h:44
#define Min(x, y)
Definition: c.h:986
#define ERROR
Definition: elog.h:46
BufferAccessStrategyType btype
Definition: freelist.c:74
void * palloc0(Size size)
Definition: mcxt.c:1093
#define elog(elevel,...)
Definition: elog.h:232
int NBuffers
Definition: globals.c:135
int Buffer
Definition: buf.h:23
#define offsetof(type, field)
Definition: c.h:727

◆ HoldingBufferPinThatDelaysRecovery()

bool HoldingBufferPinThatDelaysRecovery ( void  )

Definition at line 4231 of file bufmgr.c.

References GetPrivateRefCount(), and GetStartupBufferPinWaitBufId().

Referenced by CheckRecoveryConflictDeadlock(), and RecoveryConflictInterrupt().

4232 {
4233  int bufid = GetStartupBufferPinWaitBufId();
4234 
4235  /*
4236  * If we get woken slowly then it's possible that the Startup process was
4237  * already woken by other backends before we got here. Also possible that
4238  * we get here by multiple interrupts or interrupts at inappropriate
4239  * times, so make sure we do nothing if the bufid is not set.
4240  */
4241  if (bufid < 0)
4242  return false;
4243 
4244  if (GetPrivateRefCount(bufid + 1) > 0)
4245  return true;
4246 
4247  return false;
4248 }
static int32 GetPrivateRefCount(Buffer buffer)
Definition: bufmgr.c:387
int GetStartupBufferPinWaitBufId(void)
Definition: proc.c:662

◆ IncrBufferRefCount()

void IncrBufferRefCount ( Buffer  buffer)

Definition at line 3822 of file bufmgr.c.

References Assert, BufferIsLocal, BufferIsPinned, CurrentResourceOwner, GetPrivateRefCountEntry(), LocalRefCount, PrivateRefCountEntry::refcount, ResourceOwnerEnlargeBuffers(), and ResourceOwnerRememberBuffer().

Referenced by _bt_steppage(), btrestrpos(), entryLoadMoreItems(), ReadBufferBI(), scanPostingTree(), startScanEntry(), and tts_buffer_heap_store_tuple().

3823 {
3824  Assert(BufferIsPinned(buffer));
3826  if (BufferIsLocal(buffer))
3827  LocalRefCount[-buffer - 1]++;
3828  else
3829  {
3830  PrivateRefCountEntry *ref;
3831 
3832  ref = GetPrivateRefCountEntry(buffer, true);
3833  Assert(ref != NULL);
3834  ref->refcount++;
3835  }
3837 }
static PrivateRefCountEntry * GetPrivateRefCountEntry(Buffer buffer, bool do_move)
Definition: bufmgr.c:307
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:448
ResourceOwner CurrentResourceOwner
Definition: resowner.c:146
void ResourceOwnerRememberBuffer(ResourceOwner owner, Buffer buffer)
Definition: resowner.c:959
void ResourceOwnerEnlargeBuffers(ResourceOwner owner)
Definition: resowner.c:946
#define Assert(condition)
Definition: c.h:804
#define BufferIsLocal(buffer)
Definition: buf.h:37
int32 * LocalRefCount
Definition: localbuf.c:45

◆ InitBufferPool()

void InitBufferPool ( void  )

Definition at line 67 of file buf_init.c.

References Assert, backend_flush_after, buf, BufferDesc::buf_id, BufferBlocks, BufferDescriptorGetContentLock, BufferDescriptorGetIOCV, CLEAR_BUFFERTAG, ConditionVariableInit(), BufferDesc::freeNext, FREENEXT_END_OF_LIST, GetBufferDescriptor, i, LWLockInitialize(), LWTRANCHE_BUFFER_CONTENT, NBuffers, pg_atomic_init_u32(), ShmemInitStruct(), BufferDesc::state, StrategyInitialize(), BufferDesc::tag, BufferDesc::wait_backend_pid, and WritebackContextInit().

Referenced by CreateSharedMemoryAndSemaphores().

68 {
69  bool foundBufs,
70  foundDescs,
71  foundIOCV,
72  foundBufCkpt;
73 
74  /* Align descriptors to a cacheline boundary. */
76  ShmemInitStruct("Buffer Descriptors",
77  NBuffers * sizeof(BufferDescPadded),
78  &foundDescs);
79 
80  BufferBlocks = (char *)
81  ShmemInitStruct("Buffer Blocks",
82  NBuffers * (Size) BLCKSZ, &foundBufs);
83 
84  /* Align condition variables to cacheline boundary. */
86  ShmemInitStruct("Buffer IO Condition Variables",
88  &foundIOCV);
89 
90  /*
91  * The array used to sort to-be-checkpointed buffer ids is located in
92  * shared memory, to avoid having to allocate significant amounts of
93  * memory at runtime. As that'd be in the middle of a checkpoint, or when
94  * the checkpointer is restarted, memory allocation failures would be
95  * painful.
96  */
98  ShmemInitStruct("Checkpoint BufferIds",
99  NBuffers * sizeof(CkptSortItem), &foundBufCkpt);
100 
101  if (foundDescs || foundBufs || foundIOCV || foundBufCkpt)
102  {
103  /* should find all of these, or none of them */
104  Assert(foundDescs && foundBufs && foundIOCV && foundBufCkpt);
105  /* note: this path is only taken in EXEC_BACKEND case */
106  }
107  else
108  {
109  int i;
110 
111  /*
112  * Initialize all the buffer headers.
113  */
114  for (i = 0; i < NBuffers; i++)
115  {
117 
118  CLEAR_BUFFERTAG(buf->tag);
119 
120  pg_atomic_init_u32(&buf->state, 0);
121  buf->wait_backend_pid = 0;
122 
123  buf->buf_id = i;
124 
125  /*
126  * Initially link all the buffers together as unused. Subsequent
127  * management of this list is done by freelist.c.
128  */
129  buf->freeNext = i + 1;
130 
133 
135  }
136 
137  /* Correct last entry of linked list */
138  GetBufferDescriptor(NBuffers - 1)->freeNext = FREENEXT_END_OF_LIST;
139  }
140 
141  /* Init other shared buffer-management stuff */
142  StrategyInitialize(!foundDescs);
143 
144  /* Initialize per-backend file flush context */
147 }
#define FREENEXT_END_OF_LIST
int wait_backend_pid
int backend_flush_after
Definition: bufmgr.c:158
void StrategyInitialize(bool init)
Definition: freelist.c:475
void WritebackContextInit(WritebackContext *context, int *max_pending)
Definition: bufmgr.c:4738
void ConditionVariableInit(ConditionVariable *cv)
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:396
WritebackContext BackendWritebackContext
Definition: buf_init.c:23
static char * buf
Definition: pg_test_fsync.c:68
BufferDescPadded * BufferDescriptors
Definition: buf_init.c:20
#define GetBufferDescriptor(id)
#define BufferDescriptorGetIOCV(bdesc)
void LWLockInitialize(LWLock *lock, int tranche_id)
Definition: lwlock.c:740
ConditionVariableMinimallyPadded * BufferIOCVArray
Definition: buf_init.c:22
#define BufferDescriptorGetContentLock(bdesc)
#define Assert(condition)
Definition: c.h:804
#define CLEAR_BUFFERTAG(a)
Definition: buf_internals.h:98
CkptSortItem * CkptBufferIds
Definition: buf_init.c:24
size_t Size
Definition: c.h:540
BufferTag tag
int i
int NBuffers
Definition: globals.c:135
pg_atomic_uint32 state
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:223
char * BufferBlocks
Definition: buf_init.c:21

◆ InitBufferPoolAccess()

void InitBufferPoolAccess ( void  )

Definition at line 2598 of file bufmgr.c.

References HASHCTL::entrysize, HASH_BLOBS, hash_create(), HASH_ELEM, HASHCTL::keysize, and PrivateRefCountArray.

Referenced by BaseInit().

2599 {
2600  HASHCTL hash_ctl;
2601 
2602  memset(&PrivateRefCountArray, 0, sizeof(PrivateRefCountArray));
2603 
2604  hash_ctl.keysize = sizeof(int32);
2605  hash_ctl.entrysize = sizeof(PrivateRefCountEntry);
2606 
2607  PrivateRefCountHash = hash_create("PrivateRefCount", 100, &hash_ctl,
2608  HASH_ELEM | HASH_BLOBS);
2609 }
struct PrivateRefCountEntry PrivateRefCountEntry
#define HASH_ELEM
Definition: hsearch.h:95
Size entrysize
Definition: hsearch.h:76
signed int int32
Definition: c.h:429
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:349
static struct PrivateRefCountEntry PrivateRefCountArray[REFCOUNT_ARRAY_ENTRIES]
Definition: bufmgr.c:197
#define HASH_BLOBS
Definition: hsearch.h:97
Size keysize
Definition: hsearch.h:75
static HTAB * PrivateRefCountHash
Definition: bufmgr.c:198

◆ InitBufferPoolBackend()

void InitBufferPoolBackend ( void  )

Definition at line 2621 of file bufmgr.c.

References AtProcExit_Buffers(), and on_shmem_exit().

Referenced by AuxiliaryProcessMain(), and InitPostgres().

2622 {
2624 }
void on_shmem_exit(pg_on_exit_callback function, Datum arg)
Definition: ipc.c:361
static void AtProcExit_Buffers(int code, Datum arg)
Definition: bufmgr.c:2631

◆ IsBufferCleanupOK()

bool IsBufferCleanupOK ( Buffer  buffer)

Definition at line 4313 of file bufmgr.c.

References Assert, BUF_STATE_GET_REFCOUNT, BufferDescriptorGetContentLock, BufferIsLocal, BufferIsValid, GetBufferDescriptor, GetPrivateRefCount(), LocalRefCount, LockBufHdr(), LW_EXCLUSIVE, LWLockHeldByMeInMode(), and UnlockBufHdr.

Referenced by _hash_doinsert(), _hash_expandtable(), _hash_splitbucket(), hash_xlog_split_allocate_page(), and hashbucketcleanup().

4314 {
4315  BufferDesc *bufHdr;
4316  uint32 buf_state;
4317 
4318  Assert(BufferIsValid(buffer));
4319 
4320  if (BufferIsLocal(buffer))
4321  {
4322  /* There should be exactly one pin */
4323  if (LocalRefCount[-buffer - 1] != 1)
4324  return false;
4325  /* Nobody else to wait for */
4326  return true;
4327  }
4328 
4329  /* There should be exactly one local pin */
4330  if (GetPrivateRefCount(buffer) != 1)
4331  return false;
4332 
4333  bufHdr = GetBufferDescriptor(buffer - 1);
4334 
4335  /* caller must hold exclusive lock on buffer */
4337  LW_EXCLUSIVE));
4338 
4339  buf_state = LockBufHdr(bufHdr);
4340 
4341  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
4342  if (BUF_STATE_GET_REFCOUNT(buf_state) == 1)
4343  {
4344  /* pincount is OK. */
4345  UnlockBufHdr(bufHdr, buf_state);
4346  return true;
4347  }
4348 
4349  UnlockBufHdr(bufHdr, buf_state);
4350  return false;
4351 }
bool LWLockHeldByMeInMode(LWLock *l, LWLockMode mode)
Definition: lwlock.c:1950
#define GetBufferDescriptor(id)
static int32 GetPrivateRefCount(Buffer buffer)
Definition: bufmgr.c:387
unsigned int uint32
Definition: c.h:441
#define BufferDescriptorGetContentLock(bdesc)
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4603
#define Assert(condition)
Definition: c.h:804
#define BufferIsLocal(buffer)
Definition: buf.h:37
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
#define UnlockBufHdr(desc, s)
int32 * LocalRefCount
Definition: localbuf.c:45
#define BUF_STATE_GET_REFCOUNT(state)
Definition: buf_internals.h:49

◆ LockBuffer()

void LockBuffer ( Buffer  buffer,
int  mode 
)

Definition at line 4023 of file bufmgr.c.

References Assert, buf, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferDescriptorGetContentLock, BufferIsLocal, BufferIsPinned, elog, ERROR, GetBufferDescriptor, LW_EXCLUSIVE, LW_SHARED, LWLockAcquire(), and LWLockRelease().

Referenced by _bt_lockbuf(), _bt_unlockbuf(), _bt_upgradelockbufcleanup(), _hash_addovflpage(), _hash_doinsert(), _hash_expandtable(), _hash_finish_split(), _hash_first(), _hash_freeovflpage(), _hash_getbuf(), _hash_getbuf_with_strategy(), _hash_getcachedmetap(), _hash_getnewbuf(), _hash_init(), _hash_kill_items(), _hash_readnext(), _hash_readpage(), _hash_readprev(), _hash_splitbucket(), _hash_squeezebucket(), _hash_vacuum_one_page(), blbulkdelete(), blgetbitmap(), blinsert(), BloomNewBuffer(), blvacuumcleanup(), brin_doinsert(), brin_doupdate(), brin_evacuate_page(), brin_getinsertbuffer(), brin_page_cleanup(), brinbuild(), brinbuildempty(), bringetbitmap(), brinGetStats(), brinGetTupleForHeapBlock(), brininsert(), brinLockRevmapPageForUpdate(), brinRevmapDesummarizeRange(), brinRevmapInitialize(), brinsummarize(), bt_metap(), bt_page_items_internal(), bt_page_stats_internal(), bt_recheck_sibling_links(), checkXLogConsistency(), collect_corrupt_items(), collect_visibility_data(), collectMatchBitmap(), ConditionalLockBufferForCleanup(), count_nondeletable_pages(), entryLoadMoreItems(), fill_seq_with_data(), FreeSpaceMapPrepareTruncateRel(), fsm_readbuf(), fsm_search(), fsm_search_avail(), fsm_set_and_search(), fsm_vacuum_page(), get_raw_page_internal(), GetVisibilityMapPins(), ginbuildempty(), ginbulkdelete(), ginEntryInsert(), ginFindLeafPage(), ginFindParents(), ginFinishSplit(), ginGetStats(), ginHeapTupleFastInsert(), ginInsertCleanup(), ginInsertValue(), GinNewBuffer(), ginScanToDelete(), ginStepRight(), ginTraverseLock(), ginUpdateStats(), ginvacuumcleanup(), ginVacuumPostingTreeLeaves(), gistBufferingFindCorrectParent(), gistbufferinginserttuples(), gistbuildempty(), gistdoinsert(), gistFindCorrectParent(), gistFindPath(), gistfinishsplit(), gistfixsplit(), gistformdownlink(), gistGetMaxLevel(), gistinserttuples(), gistkillitems(), gistNewBuffer(), gistProcessItup(), gistScanPage(), gistvacuum_delete_empty_pages(), gistvacuumpage(), hashbucketcleanup(), hashbulkdelete(), heap_abort_speculative(), heap_delete(), heap_fetch(), heap_finish_speculative(), heap_get_latest_tid(), heap_index_delete_tuples(), heap_inplace_update(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_page_prune_opt(), heap_update(), heap_xlog_visible(), heapam_index_build_range_scan(), heapam_index_fetch_tuple(), heapam_index_validate_scan(), heapam_relation_copy_for_cluster(), heapam_scan_analyze_next_block(), heapam_scan_bitmap_next_block(), heapam_scan_sample_next_tuple(), heapam_tuple_satisfies_snapshot(), heapgetpage(), heapgettup(), initBloomState(), lazy_scan_heap(), lazy_vacuum_heap_rel(), LockBufferForCleanup(), log_newpage_range(), palloc_btree_page(), pg_visibility(), pgrowlocks(), pgstat_btree_page(), pgstat_gist_page(), pgstat_heap(), pgstatginindex_internal(), pgstathashindex(), pgstatindex_impl(), read_seq_tuple(), RelationGetBufferForTuple(), revmap_physical_extend(), scanGetCandidate(), scanPendingInsert(), shiftList(), spgdoinsert(), spgGetCache(), SpGistNewBuffer(), spgprocesspending(), spgvacuumpage(), spgWalk(), startScanEntry(), statapprox_heap(), summarize_range(), UnlockReleaseBuffer(), verify_heapam(), visibilitymap_clear(), visibilitymap_prepare_truncate(), visibilitymap_set(), vm_readbuf(), XLogReadBufferExtended(), XLogReadBufferForRedoExtended(), and XLogRecordPageWithFreeSpace().

4024 {
4025  BufferDesc *buf;
4026 
4027  Assert(BufferIsPinned(buffer));
4028  if (BufferIsLocal(buffer))
4029  return; /* local buffers need no lock */
4030 
4031  buf = GetBufferDescriptor(buffer - 1);
4032 
4033  if (mode == BUFFER_LOCK_UNLOCK)
4035  else if (mode == BUFFER_LOCK_SHARE)
4037  else if (mode == BUFFER_LOCK_EXCLUSIVE)
4039  else
4040  elog(ERROR, "unrecognized buffer lock mode: %d", mode);
4041 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:448
static PgChecksumMode mode
Definition: pg_checksums.c:61
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:98
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1816
#define ERROR
Definition: elog.h:46
static char * buf
Definition: pg_test_fsync.c:68
#define GetBufferDescriptor(id)
#define BufferDescriptorGetContentLock(bdesc)
#define Assert(condition)
Definition: c.h:804
#define BufferIsLocal(buffer)
Definition: buf.h:37
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1203
#define elog(elevel,...)
Definition: elog.h:232
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:97

◆ LockBufferForCleanup()

void LockBufferForCleanup ( Buffer  buffer)

Definition at line 4080 of file bufmgr.c.

References Assert, BM_PIN_COUNT_WAITER, BUF_STATE_GET_REFCOUNT, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferIsLocal, BufferIsPinned, DeadlockTimeout, elog, ERROR, get_ps_display(), GetBufferDescriptor, GetCurrentTimestamp(), GetPrivateRefCount(), InHotStandby, LocalRefCount, LockBuffer(), LockBufHdr(), log_recovery_conflict_waits, LogRecoveryConflict(), MyProcPid, now(), palloc(), pfree(), PG_WAIT_BUFFER_PIN, PROCSIG_RECOVERY_CONFLICT_BUFFERPIN, ProcWaitForSignal(), ResolveRecoveryConflictWithBufferPin(), set_ps_display(), SetStartupBufferPinWaitBufId(), TimestampDifferenceExceeds(), UnlockBufHdr, update_process_title, and BufferDesc::wait_backend_pid.

Referenced by _bt_upgradelockbufcleanup(), ginVacuumPostingTree(), hashbulkdelete(), heap_force_common(), lazy_scan_heap(), ReadBuffer_common(), and XLogReadBufferForRedoExtended().

4081 {
4082  BufferDesc *bufHdr;
4083  char *new_status = NULL;
4084  TimestampTz waitStart = 0;
4085  bool logged_recovery_conflict = false;
4086 
4087  Assert(BufferIsPinned(buffer));
4088  Assert(PinCountWaitBuf == NULL);
4089 
4090  if (BufferIsLocal(buffer))
4091  {
4092  /* There should be exactly one pin */
4093  if (LocalRefCount[-buffer - 1] != 1)
4094  elog(ERROR, "incorrect local pin count: %d",
4095  LocalRefCount[-buffer - 1]);
4096  /* Nobody else to wait for */
4097  return;
4098  }
4099 
4100  /* There should be exactly one local pin */
4101  if (GetPrivateRefCount(buffer) != 1)
4102  elog(ERROR, "incorrect local pin count: %d",
4103  GetPrivateRefCount(buffer));
4104 
4105  bufHdr = GetBufferDescriptor(buffer - 1);
4106 
4107  for (;;)
4108  {
4109  uint32 buf_state;
4110 
4111  /* Try to acquire lock */
4113  buf_state = LockBufHdr(bufHdr);
4114 
4115  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
4116  if (BUF_STATE_GET_REFCOUNT(buf_state) == 1)
4117  {
4118  /* Successfully acquired exclusive lock with pincount 1 */
4119  UnlockBufHdr(bufHdr, buf_state);
4120 
4121  /*
4122  * Emit the log message if recovery conflict on buffer pin was
4123  * resolved but the startup process waited longer than
4124  * deadlock_timeout for it.
4125  */
4126  if (logged_recovery_conflict)
4128  waitStart, GetCurrentTimestamp(),
4129  NULL, false);
4130 
4131  /* Report change to non-waiting status */
4132  if (new_status)
4133  {
4134  set_ps_display(new_status);
4135  pfree(new_status);
4136  }
4137  return;
4138  }
4139  /* Failed, so mark myself as waiting for pincount 1 */
4140  if (buf_state & BM_PIN_COUNT_WAITER)
4141  {
4142  UnlockBufHdr(bufHdr, buf_state);
4143  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
4144  elog(ERROR, "multiple backends attempting to wait for pincount 1");
4145  }
4146  bufHdr->wait_backend_pid = MyProcPid;
4147  PinCountWaitBuf = bufHdr;
4148  buf_state |= BM_PIN_COUNT_WAITER;
4149  UnlockBufHdr(bufHdr, buf_state);
4150  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
4151 
4152  /* Wait to be signaled by UnpinBuffer() */
4153  if (InHotStandby)
4154  {
4155  /* Report change to waiting status */
4156  if (update_process_title && new_status == NULL)
4157  {
4158  const char *old_status;
4159  int len;
4160 
4161  old_status = get_ps_display(&len);
4162  new_status = (char *) palloc(len + 8 + 1);
4163  memcpy(new_status, old_status, len);
4164  strcpy(new_status + len, " waiting");
4165  set_ps_display(new_status);
4166  new_status[len] = '\0'; /* truncate off " waiting" */
4167  }
4168 
4169  /*
4170  * Emit the log message if the startup process is waiting longer
4171  * than deadlock_timeout for recovery conflict on buffer pin.
4172  *
4173  * Skip this if first time through because the startup process has
4174  * not started waiting yet in this case. So, the wait start
4175  * timestamp is set after this logic.
4176  */
4177  if (waitStart != 0 && !logged_recovery_conflict)
4178  {
4180 
4181  if (TimestampDifferenceExceeds(waitStart, now,
4182  DeadlockTimeout))
4183  {
4185  waitStart, now, NULL, true);
4186  logged_recovery_conflict = true;
4187  }
4188  }
4189 
4190  /*
4191  * Set the wait start timestamp if logging is enabled and first
4192  * time through.
4193  */
4194  if (log_recovery_conflict_waits && waitStart == 0)
4195  waitStart = GetCurrentTimestamp();
4196 
4197  /* Publish the bufid that Startup process waits on */
4198  SetStartupBufferPinWaitBufId(buffer - 1);
4199  /* Set alarm and then wait to be signaled by UnpinBuffer() */
4201  /* Reset the published bufid */
4203  }
4204  else
4206 
4207  /*
4208  * Remove flag marking us as waiter. Normally this will not be set
4209  * anymore, but ProcWaitForSignal() can return for other signals as
4210  * well. We take care to only reset the flag if we're the waiter, as
4211  * theoretically another backend could have started waiting. That's
4212  * impossible with the current usages due to table level locking, but
4213  * better be safe.
4214  */
4215  buf_state = LockBufHdr(bufHdr);
4216  if ((buf_state & BM_PIN_COUNT_WAITER) != 0 &&
4217  bufHdr->wait_backend_pid == MyProcPid)
4218  buf_state &= ~BM_PIN_COUNT_WAITER;
4219  UnlockBufHdr(bufHdr, buf_state);
4220 
4221  PinCountWaitBuf = NULL;
4222  /* Loop back and try again */
4223  }
4224 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:448
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
int MyProcPid
Definition: globals.c:43
int wait_backend_pid
bool update_process_title
Definition: ps_status.c:36
TimestampTz GetCurrentTimestamp(void)
Definition: timestamp.c:1580
int64 TimestampTz
Definition: timestamp.h:39
void LogRecoveryConflict(ProcSignalReason reason, TimestampTz wait_start, TimestampTz now, VirtualTransactionId *wait_list, bool still_waiting)
Definition: standby.c:249
#define InHotStandby
Definition: xlog.h:74
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:98
bool TimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, int msec)
Definition: timestamp.c:1711
void set_ps_display(const char *activity)
Definition: ps_status.c:349
void pfree(void *pointer)
Definition: mcxt.c:1169
#define ERROR
Definition: elog.h:46
void ResolveRecoveryConflictWithBufferPin(void)
Definition: standby.c:753
const char * get_ps_display(int *displen)
Definition: ps_status.c:430
void SetStartupBufferPinWaitBufId(int bufid)
Definition: proc.c:650
#define GetBufferDescriptor(id)
#define PG_WAIT_BUFFER_PIN
Definition: wait_event.h:20
static int32 GetPrivateRefCount(Buffer buffer)
Definition: bufmgr.c:387
unsigned int uint32
Definition: c.h:441
bool log_recovery_conflict_waits
Definition: standby.c:42
void ProcWaitForSignal(uint32 wait_event_info)
Definition: proc.c:1896
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:4023
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4603
#define Assert(condition)
Definition: c.h:804
#define BufferIsLocal(buffer)
Definition: buf.h:37
void * palloc(Size size)
Definition: mcxt.c:1062
#define UnlockBufHdr(desc, s)
#define elog(elevel,...)
Definition: elog.h:232
static BufferDesc * PinCountWaitBuf
Definition: bufmgr.c:165
int DeadlockTimeout
Definition: proc.c:60
Datum now(PG_FUNCTION_ARGS)
Definition: timestamp.c:1544
int32 * LocalRefCount
Definition: localbuf.c:45
#define BUF_STATE_GET_REFCOUNT(state)
Definition: buf_internals.h:49
#define BM_PIN_COUNT_WAITER
Definition: buf_internals.h:65

◆ MarkBufferDirty()

void MarkBufferDirty ( Buffer  buffer)

Definition at line 1562 of file bufmgr.c.

References Assert, BM_DIRTY, BM_JUST_DIRTIED, BM_LOCKED, BUF_STATE_GET_REFCOUNT, BufferDescriptorGetContentLock, BufferIsLocal, BufferIsPinned, BufferIsValid, elog, ERROR, GetBufferDescriptor, LW_EXCLUSIVE, LWLockHeldByMeInMode(), MarkLocalBufferDirty(), pg_atomic_compare_exchange_u32(), pg_atomic_read_u32(), pgBufferUsage, BufferUsage::shared_blks_dirtied, BufferDesc::state, VacuumCostActive, VacuumCostBalance, VacuumCostPageDirty, VacuumPageDirty, and WaitBufHdrUnlocked().

Referenced by _bt_clear_incomplete_split(), _bt_dedup_pass(), _bt_delitems_delete(), _bt_delitems_vacuum(), _bt_getroot(), _bt_insertonpg(), _bt_mark_page_halfdead(), _bt_newroot(), _bt_restore_meta(), _bt_set_cleanup_info(), _bt_split(), _bt_unlink_halfdead_page(), _hash_addovflpage(), _hash_doinsert(), _hash_expandtable(), _hash_freeovflpage(), _hash_init(), _hash_splitbucket(), _hash_squeezebucket(), _hash_vacuum_one_page(), addLeafTuple(), brin_doinsert(), brin_doupdate(), brin_initialize_empty_new_buffer(), brin_xlog_createidx(), brin_xlog_desummarize_page(), brin_xlog_insert_update(), brin_xlog_revmap_extend(), brin_xlog_samepage_update(), brin_xlog_update(), brinbuild(), brinbuildempty(), brinRevmapDesummarizeRange(), btree_xlog_dedup(), btree_xlog_delete(), btree_xlog_insert(), btree_xlog_mark_page_halfdead(), btree_xlog_newroot(), btree_xlog_split(), btree_xlog_unlink_page(), btree_xlog_vacuum(), createPostingTree(), do_setval(), doPickSplit(), fill_seq_with_data(), FreeSpaceMapPrepareTruncateRel(), generic_redo(), GenericXLogFinish(), ginbuild(), ginbuildempty(), ginbulkdelete(), ginDeletePage(), ginHeapTupleFastInsert(), ginPlaceToPage(), ginRedoClearIncompleteSplit(), ginRedoCreatePTree(), ginRedoDeleteListPages(), ginRedoDeletePage(), ginRedoInsert(), ginRedoInsertListPage(), ginRedoUpdateMetapage(), ginRedoVacuumDataLeafPage(), ginUpdateStats(), ginVacuumPostingTreeLeaf(), gistbuild(), gistbuildempty(), gistdeletepage(), gistplacetopage(), gistprunepage(), gistRedoClearFollowRight(), gistRedoDeleteRecord(), gistRedoPageDelete(), gistRedoPageSplitRecord(), gistRedoPageUpdateRecord(), gistvacuumpage(), hash_xlog_add_ovfl_page(), hash_xlog_delete(), hash_xlog_init_bitmap_page(), hash_xlog_init_meta_page(), hash_xlog_insert(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_split_cleanup(), hash_xlog_split_complete(), hash_xlog_squeeze_page(), hash_xlog_update_meta_page(), hash_xlog_vacuum_one_page(), hashbucketcleanup(), hashbulkdelete(), heap_abort_speculative(), heap_delete(), heap_finish_speculative(), heap_force_common(), heap_inplace_update(), heap_insert(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_page_prune(), heap_update(), heap_xlog_confirm(), heap_xlog_delete(), heap_xlog_freeze_page(), heap_xlog_inplace(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_prune(), heap_xlog_update(), heap_xlog_vacuum(), heap_xlog_visible(), lazy_scan_heap(), lazy_scan_prune(), lazy_vacuum_heap_page(), log_newpage_range(), moveLeafs(), nextval_internal(), RelationGetBufferForTuple(), revmap_physical_extend(), saveNodeLink(), seq_redo(), shiftList(), spgAddNodeAction(), spgbuild(), SpGistUpdateMetaPage(), spgRedoAddLeaf(), spgRedoAddNode(), spgRedoMoveLeafs(), spgRedoPickSplit(), spgRedoSplitTuple(), spgRedoVacuumLeaf(), spgRedoVacuumRedirect(), spgRedoVacuumRoot(), spgSplitNodeAction(), vacuumLeafPage(), vacuumLeafRoot(), vacuumRedirectAndPlaceholder(), visibilitymap_clear(), visibilitymap_prepare_truncate(), visibilitymap_set(), writeListPage(), and XLogReadBufferForRedoExtended().

1563 {
1564  BufferDesc *bufHdr;
1565  uint32 buf_state;
1566  uint32 old_buf_state;
1567 
1568  if (!BufferIsValid(buffer))
1569  elog(ERROR, "bad buffer ID: %d", buffer);
1570 
1571  if (BufferIsLocal(buffer))
1572  {
1573  MarkLocalBufferDirty(buffer);
1574  return;
1575  }
1576 
1577  bufHdr = GetBufferDescriptor(buffer - 1);
1578 
1579  Assert(BufferIsPinned(buffer));
1581  LW_EXCLUSIVE));
1582 
1583  old_buf_state = pg_atomic_read_u32(&bufHdr->state);
1584  for (;;)
1585  {
1586  if (old_buf_state & BM_LOCKED)
1587  old_buf_state = WaitBufHdrUnlocked(bufHdr);
1588 
1589  buf_state = old_buf_state;
1590 
1591  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
1592  buf_state |= BM_DIRTY | BM_JUST_DIRTIED;
1593 
1594  if (pg_atomic_compare_exchange_u32(&bufHdr->state, &old_buf_state,
1595  buf_state))
1596  break;
1597  }
1598 
1599  /*
1600  * If the buffer was not dirty already, do vacuum accounting.
1601  */
1602  if (!(old_buf_state & BM_DIRTY))
1603  {
1604  VacuumPageDirty++;
1606  if (VacuumCostActive)
1608  }
1609 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:448
bool LWLockHeldByMeInMode(LWLock *l, LWLockMode mode)
Definition: lwlock.c:1950
int VacuumCostBalance
Definition: globals.c:151
static bool pg_atomic_compare_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval)
Definition: atomics.h:311
int64 VacuumPageDirty
Definition: globals.c:149
#define BM_DIRTY
Definition: buf_internals.h:59
int VacuumCostPageDirty
Definition: globals.c:143
#define ERROR
Definition: elog.h:46
#define GetBufferDescriptor(id)
#define BM_JUST_DIRTIED
Definition: buf_internals.h:64
long shared_blks_dirtied
Definition: instrument.h:23
unsigned int uint32
Definition: c.h:441
void MarkLocalBufferDirty(Buffer buffer)
Definition: localbuf.c:286
#define BM_LOCKED
Definition: buf_internals.h:58
#define BufferDescriptorGetContentLock(bdesc)
#define Assert(condition)
Definition: c.h:804
#define BufferIsLocal(buffer)
Definition: buf.h:37
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
static uint32 WaitBufHdrUnlocked(BufferDesc *buf)
Definition: bufmgr.c:4631
#define elog(elevel,...)
Definition: elog.h:232
pg_atomic_uint32 state
BufferUsage pgBufferUsage
Definition: instrument.c:20
#define BUF_STATE_GET_REFCOUNT(state)
Definition: buf_internals.h:49
bool VacuumCostActive
Definition: globals.c:152
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:241

◆ MarkBufferDirtyHint()

void MarkBufferDirtyHint ( Buffer  buffer,
bool  buffer_std 
)

Definition at line 3854 of file bufmgr.c.

References Assert, BM_DIRTY, BM_JUST_DIRTIED, BM_PERMANENT, BUF_STATE_GET_REFCOUNT, BufferDescriptorGetContentLock, BufferGetPage, BufferIsLocal, BufferIsValid, PGPROC::delayChkpt, elog, ERROR, GetBufferDescriptor, GetPrivateRefCount(), InvalidXLogRecPtr, LockBufHdr(), LWLockHeldByMe(), MarkLocalBufferDirty(), MyProc, PageSetLSN, pg_atomic_read_u32(), pgBufferUsage, RecoveryInProgress(), RelFileNodeSkippingWAL(), buftag::rnode, BufferUsage::shared_blks_dirtied, BufferDesc::state, BufferDesc::tag, UnlockBufHdr, VacuumCostActive, VacuumCostBalance, VacuumCostPageDirty, VacuumPageDirty, XLogHintBitIsNeeded, XLogRecPtrIsInvalid, and XLogSaveBufferForHint().

Referenced by _bt_check_unique(), _bt_killitems(), _hash_kill_items(), brin_start_evacuating_page(), btvacuumpage(), fsm_search_avail(), fsm_set_and_search(), fsm_vacuum_page(), gistkillitems(), heap_page_prune(), read_seq_tuple(), SetHintBits(), and XLogRecordPageWithFreeSpace().

3855 {
3856  BufferDesc *bufHdr;
3857  Page page = BufferGetPage(buffer);
3858 
3859  if (!BufferIsValid(buffer))
3860  elog(ERROR, "bad buffer ID: %d", buffer);
3861 
3862  if (BufferIsLocal(buffer))
3863  {
3864  MarkLocalBufferDirty(buffer);
3865  return;
3866  }
3867 
3868  bufHdr = GetBufferDescriptor(buffer - 1);
3869 
3870  Assert(GetPrivateRefCount(buffer) > 0);
3871  /* here, either share or exclusive lock is OK */
3873 
3874  /*
3875  * This routine might get called many times on the same page, if we are
3876  * making the first scan after commit of an xact that added/deleted many
3877  * tuples. So, be as quick as we can if the buffer is already dirty. We
3878  * do this by not acquiring spinlock if it looks like the status bits are
3879  * already set. Since we make this test unlocked, there's a chance we
3880  * might fail to notice that the flags have just been cleared, and failed
3881  * to reset them, due to memory-ordering issues. But since this function
3882  * is only intended to be used in cases where failing to write out the
3883  * data would be harmless anyway, it doesn't really matter.
3884  */
3885  if ((pg_atomic_read_u32(&bufHdr->state) & (BM_DIRTY | BM_JUST_DIRTIED)) !=
3887  {
3889  bool dirtied = false;
3890  bool delayChkpt = false;
3891  uint32 buf_state;
3892 
3893  /*
3894  * If we need to protect hint bit updates from torn writes, WAL-log a
3895  * full page image of the page. This full page image is only necessary
3896  * if the hint bit update is the first change to the page since the
3897  * last checkpoint.
3898  *
3899  * We don't check full_page_writes here because that logic is included
3900  * when we call XLogInsert() since the value changes dynamically.
3901  */
3902  if (XLogHintBitIsNeeded() &&
3903  (pg_atomic_read_u32(&bufHdr->state) & BM_PERMANENT))
3904  {
3905  /*
3906  * If we must not write WAL, due to a relfilenode-specific
3907  * condition or being in recovery, don't dirty the page. We can
3908  * set the hint, just not dirty the page as a result so the hint
3909  * is lost when we evict the page or shutdown.
3910  *
3911  * See src/backend/storage/page/README for longer discussion.
3912  */
3913  if (RecoveryInProgress() ||
3914  RelFileNodeSkippingWAL(bufHdr->tag.rnode))
3915  return;
3916 
3917  /*
3918  * If the block is already dirty because we either made a change
3919  * or set a hint already, then we don't need to write a full page
3920  * image. Note that aggressive cleaning of blocks dirtied by hint
3921  * bit setting would increase the call rate. Bulk setting of hint
3922  * bits would reduce the call rate...
3923  *
3924  * We must issue the WAL record before we mark the buffer dirty.
3925  * Otherwise we might write the page before we write the WAL. That
3926  * causes a race condition, since a checkpoint might occur between
3927  * writing the WAL record and marking the buffer dirty. We solve
3928  * that with a kluge, but one that is already in use during
3929  * transaction commit to prevent race conditions. Basically, we
3930  * simply prevent the checkpoint WAL record from being written
3931  * until we have marked the buffer dirty. We don't start the
3932  * checkpoint flush until we have marked dirty, so our checkpoint
3933  * must flush the change to disk successfully or the checkpoint
3934  * never gets written, so crash recovery will fix.
3935  *
3936  * It's possible we may enter here without an xid, so it is
3937  * essential that CreateCheckpoint waits for virtual transactions
3938  * rather than full transactionids.
3939  */
3940  MyProc->delayChkpt = delayChkpt = true;
3941  lsn = XLogSaveBufferForHint(buffer, buffer_std);
3942  }
3943 
3944  buf_state = LockBufHdr(bufHdr);
3945 
3946  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
3947 
3948  if (!(buf_state & BM_DIRTY))
3949  {
3950  dirtied = true; /* Means "will be dirtied by this action" */
3951 
3952  /*
3953  * Set the page LSN if we wrote a backup block. We aren't supposed
3954  * to set this when only holding a share lock but as long as we
3955  * serialise it somehow we're OK. We choose to set LSN while
3956  * holding the buffer header lock, which causes any reader of an
3957  * LSN who holds only a share lock to also obtain a buffer header
3958  * lock before using PageGetLSN(), which is enforced in
3959  * BufferGetLSNAtomic().
3960  *
3961  * If checksums are enabled, you might think we should reset the
3962  * checksum here. That will happen when the page is written
3963  * sometime later in this checkpoint cycle.
3964  */
3965  if (!XLogRecPtrIsInvalid(lsn))
3966  PageSetLSN(page, lsn);
3967  }
3968 
3969  buf_state |= BM_DIRTY | BM_JUST_DIRTIED;
3970  UnlockBufHdr(bufHdr, buf_state);
3971 
3972  if (delayChkpt)
3973  MyProc->delayChkpt = false;
3974 
3975  if (dirtied)
3976  {
3977  VacuumPageDirty++;
3979  if (VacuumCostActive)
3981  }
3982  }
3983 }
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
#define BM_PERMANENT
Definition: buf_internals.h:67
int VacuumCostBalance
Definition: globals.c:151
bool LWLockHeldByMe(LWLock *l)
Definition: lwlock.c:1932
PGPROC * MyProc
Definition: proc.c:68
XLogRecPtr XLogSaveBufferForHint(Buffer buffer, bool buffer_std)
Definition: xloginsert.c:917
int64 VacuumPageDirty
Definition: globals.c:149
bool RecoveryInProgress(void)
Definition: xlog.c:8237
#define BM_DIRTY
Definition: buf_internals.h:59
int VacuumCostPageDirty
Definition: globals.c:143
#define ERROR
Definition: elog.h:46
bool delayChkpt
Definition: proc.h:187
#define GetBufferDescriptor(id)
#define BM_JUST_DIRTIED
Definition: buf_internals.h:64
long shared_blks_dirtied
Definition: instrument.h:23
static int32 GetPrivateRefCount(Buffer buffer)
Definition: bufmgr.c:387
unsigned int uint32
Definition: c.h:441
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
void MarkLocalBufferDirty(Buffer buffer)
Definition: localbuf.c:286
#define XLogRecPtrIsInvalid(r)
Definition: xlogdefs.h:29
#define BufferDescriptorGetContentLock(bdesc)
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4603
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:804
#define BufferIsLocal(buffer)
Definition: buf.h:37
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
RelFileNode rnode
Definition: buf_internals.h:93
bool RelFileNodeSkippingWAL(RelFileNode rnode)
Definition: storage.c:497
BufferTag tag
#define UnlockBufHdr(desc, s)
#define elog(elevel,...)
Definition: elog.h:232
pg_atomic_uint32 state
#define PageSetLSN(page, lsn)
Definition: bufpage.h:368
#define XLogHintBitIsNeeded()
Definition: xlog.h:212
Pointer Page
Definition: bufpage.h:78
BufferUsage pgBufferUsage
Definition: instrument.c:20
#define BUF_STATE_GET_REFCOUNT(state)
Definition: buf_internals.h:49
bool VacuumCostActive
Definition: globals.c:152
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:241

◆ PrefetchBuffer()

PrefetchBufferResult PrefetchBuffer ( Relation  reln,
ForkNumber  forkNum,
BlockNumber  blockNum 
)

Definition at line 587 of file bufmgr.c.

References Assert, BlockNumberIsValid, ereport, errcode(), errmsg(), ERROR, PrefetchLocalBuffer(), PrefetchSharedBuffer(), RelationData::rd_smgr, RELATION_IS_OTHER_TEMP, RelationIsValid, RelationOpenSmgr, and RelationUsesLocalBuffers.

Referenced by acquire_sample_rows(), BitmapPrefetch(), count_nondeletable_pages(), HeapTupleHeaderAdvanceLatestRemovedXid(), and pg_prewarm().

588 {
589  Assert(RelationIsValid(reln));
590  Assert(BlockNumberIsValid(blockNum));
591 
592  /* Open it at the smgr level if not already done */
593  RelationOpenSmgr(reln);
594 
595  if (RelationUsesLocalBuffers(reln))
596  {
597  /* see comments in ReadBufferExtended */
598  if (RELATION_IS_OTHER_TEMP(reln))
599  ereport(ERROR,
600  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
601  errmsg("cannot access temporary tables of other sessions")));
602 
603  /* pass it off to localbuf.c */
604  return PrefetchLocalBuffer(reln->rd_smgr, forkNum, blockNum);
605  }
606  else
607  {
608  /* pass it to the shared buffer version */
609  return PrefetchSharedBuffer(reln->rd_smgr, forkNum, blockNum);
610  }
611 }
struct SMgrRelationData * rd_smgr
Definition: rel.h:57
PrefetchBufferResult PrefetchLocalBuffer(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum)
Definition: localbuf.c:64
int errcode(int sqlerrcode)
Definition: elog.c:698
#define RelationOpenSmgr(relation)
Definition: rel.h:529
#define ERROR
Definition: elog.h:46
#define RelationIsValid(relation)
Definition: rel.h:445
PrefetchBufferResult PrefetchSharedBuffer(SMgrRelation smgr_reln, ForkNumber forkNum, BlockNumber blockNum)
Definition: bufmgr.c:500
#define ereport(elevel,...)
Definition: elog.h:157
#define BlockNumberIsValid(blockNumber)
Definition: block.h:70
#define Assert(condition)
Definition: c.h:804
#define RELATION_IS_OTHER_TEMP(relation)
Definition: rel.h:615
#define RelationUsesLocalBuffers(relation)
Definition: rel.h:594
int errmsg(const char *fmt,...)
Definition: elog.c:909

◆ PrefetchSharedBuffer()

PrefetchBufferResult PrefetchSharedBuffer ( struct SMgrRelationData smgr_reln,
ForkNumber  forkNum,
BlockNumber  blockNum 
)

◆ PrintBufferLeakWarning()

void PrintBufferLeakWarning ( Buffer  buffer)

Definition at line 2691 of file bufmgr.c.

References Assert, buftag::blockNum, buf, BUF_FLAG_MASK, BUF_STATE_GET_REFCOUNT, BufferIsLocal, BufferIsValid, elog, buftag::forkNum, GetBufferDescriptor, GetLocalBufferDescriptor, GetPrivateRefCount(), InvalidBackendId, LocalRefCount, MyBackendId, pfree(), pg_atomic_read_u32(), relpathbackend, buftag::rnode, BufferDesc::state, BufferDesc::tag, and WARNING.

Referenced by CheckForBufferLeaks(), CheckForLocalBufferLeaks(), and ResourceOwnerReleaseInternal().

2692 {
2693  BufferDesc *buf;
2694  int32 loccount;
2695  char *path;
2696  BackendId backend;
2697  uint32 buf_state;
2698 
2699  Assert(BufferIsValid(buffer));
2700  if (BufferIsLocal(buffer))
2701  {
2702  buf = GetLocalBufferDescriptor(-buffer - 1);
2703  loccount = LocalRefCount[-buffer - 1];
2704  backend = MyBackendId;
2705  }
2706  else
2707  {
2708  buf = GetBufferDescriptor(buffer - 1);
2709  loccount = GetPrivateRefCount(buffer);
2710  backend = InvalidBackendId;
2711  }
2712 
2713  /* theoretically we should lock the bufhdr here */
2714  path = relpathbackend(buf->tag.rnode, backend, buf->tag.forkNum);
2715  buf_state = pg_atomic_read_u32(&buf->state);
2716  elog(WARNING,
2717  "buffer refcount leak: [%03d] "
2718  "(rel=%s, blockNum=%u, flags=0x%x, refcount=%u %d)",
2719  buffer, path,
2720  buf->tag.blockNum, buf_state & BUF_FLAG_MASK,
2721  BUF_STATE_GET_REFCOUNT(buf_state), loccount);
2722  pfree(path);
2723 }
BackendId MyBackendId
Definition: globals.c:84
ForkNumber forkNum
Definition: buf_internals.h:94
#define GetLocalBufferDescriptor(id)
signed int int32
Definition: c.h:429
void pfree(void *pointer)
Definition: mcxt.c:1169
#define BUF_FLAG_MASK
Definition: buf_internals.h:46
static char * buf
Definition: pg_test_fsync.c:68
#define GetBufferDescriptor(id)
static int32 GetPrivateRefCount(Buffer buffer)
Definition: bufmgr.c:387
unsigned int uint32
Definition: c.h:441
#define WARNING
Definition: elog.h:40
#define InvalidBackendId
Definition: backendid.h:23
int BackendId
Definition: backendid.h:21
#define Assert(condition)
Definition: c.h:804
#define BufferIsLocal(buffer)
Definition: buf.h:37
BlockNumber blockNum
Definition: buf_internals.h:95
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
RelFileNode rnode
Definition: buf_internals.h:93
BufferTag tag
#define elog(elevel,...)
Definition: elog.h:232
pg_atomic_uint32 state
#define relpathbackend(rnode, backend, forknum)
Definition: relpath.h:78
int32 * LocalRefCount
Definition: localbuf.c:45
#define BUF_STATE_GET_REFCOUNT(state)
Definition: buf_internals.h:49
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:241

◆ ReadBuffer()

Buffer ReadBuffer ( Relation  reln,
BlockNumber  blockNum 
)

Definition at line 697 of file bufmgr.c.

References MAIN_FORKNUM, RBM_NORMAL, and ReadBufferExtended().

Referenced by _bt_getbuf(), _bt_search_insert(), _hash_getbuf(), _hash_getbuf_with_condlock_cleanup(), blbulkdelete(), blinsert(), BloomNewBuffer(), brin_getinsertbuffer(), brinbuild(), brinGetStats(), brinGetTupleForHeapBlock(), brinRevmapDesummarizeRange(), brinRevmapInitialize(), bt_metap(), bt_page_items_internal(), bt_page_stats_internal(), fill_seq_with_data(), ginFindLeafPage(), ginFindParents(), ginGetStats(), ginHeapTupleFastInsert(), ginInsertCleanup(), GinNewBuffer(), ginStepRight(), ginUpdateStats(), gistBufferingFindCorrectParent(), gistbufferinginserttuples(), gistdoinsert(), gistFindCorrectParent(), gistFindPath(), gistfixsplit(), gistGetMaxLevel(), gistkillitems(), gistNewBuffer(), gistProcessItup(), gistScanPage(), heap_abort_speculative(), heap_delete(), heap_fetch(), heap_finish_speculative(), heap_force_common(), heap_get_latest_tid(), heap_index_delete_tuples(), heap_inplace_update(), heap_lock_tuple(), heap_update(), initBloomState(), pg_visibility(), pgstatginindex_internal(), read_seq_tuple(), RelationGetBufferForTuple(), ReleaseAndReadBuffer(), revmap_get_buffer(), revmap_physical_extend(), scanGetCandidate(), scanPendingInsert(), shiftList(), spgdoinsert(), spgGetCache(), SpGistGetBuffer(), SpGistNewBuffer(), SpGistUpdateMetaPage(), and spgWalk().

698 {
699  return ReadBufferExtended(reln, MAIN_FORKNUM, blockNum, RBM_NORMAL, NULL);
700 }
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:744

◆ ReadBufferExtended()

Buffer ReadBufferExtended ( Relation  reln,
ForkNumber  forkNum,
BlockNumber  blockNum,
ReadBufferMode  mode,
BufferAccessStrategy  strategy 
)

Definition at line 744 of file bufmgr.c.

References buf, ereport, errcode(), errmsg(), ERROR, pgstat_count_buffer_hit, pgstat_count_buffer_read, RelationData::rd_rel, RelationData::rd_smgr, ReadBuffer_common(), RELATION_IS_OTHER_TEMP, and RelationOpenSmgr.

Referenced by _hash_getbuf_with_strategy(), _hash_getinitbuf(), _hash_getnewbuf(), autoprewarm_database_main(), blbulkdelete(), blgetbitmap(), blvacuumcleanup(), brin_vacuum_scan(), brinbuildempty(), bt_recheck_sibling_links(), btvacuumpage(), collect_corrupt_items(), collect_visibility_data(), count_nondeletable_pages(), fsm_readbuf(), get_raw_page_internal(), ginbuildempty(), ginbulkdelete(), ginDeletePage(), ginScanToDelete(), ginvacuumcleanup(), ginVacuumPostingTree(), ginVacuumPostingTreeLeaves(), gistbuildempty(), gistvacuum_delete_empty_pages(), gistvacuumpage(), hashbulkdelete(), heapam_scan_analyze_next_block(), heapgetpage(), lazy_scan_heap(), lazy_vacuum_heap_rel(), log_newpage_range(), palloc_btree_page(), pg_prewarm(), pgstat_btree_page(), pgstat_gist_page(), pgstat_heap(), pgstathashindex(), pgstatindex_impl(), ReadBuffer(), ReadBufferBI(), spgprocesspending(), spgvacuumpage(), statapprox_heap(), verify_heapam(), and vm_readbuf().

746 {
747  bool hit;
748  Buffer buf;
749 
750  /* Open it at the smgr level if not already done */
751  RelationOpenSmgr(reln);
752 
753  /*
754  * Reject attempts to read non-local temporary relations; we would be
755  * likely to get wrong data since we have no visibility into the owning
756  * session's local buffers.
757  */
758  if (RELATION_IS_OTHER_TEMP(reln))
759  ereport(ERROR,
760  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
761  errmsg("cannot access temporary tables of other sessions")));
762 
763  /*
764  * Read the buffer, and update pgstat counters to reflect a cache hit or
765  * miss.
766  */
768  buf = ReadBuffer_common(reln->rd_smgr, reln->rd_rel->relpersistence,
769  forkNum, blockNum, mode, strategy, &hit);
770  if (hit)
772  return buf;
773 }
static PgChecksumMode mode
Definition: pg_checksums.c:61
struct SMgrRelationData * rd_smgr
Definition: rel.h:57
int errcode(int sqlerrcode)
Definition: elog.c:698
Form_pg_class rd_rel
Definition: rel.h:110
#define RelationOpenSmgr(relation)
Definition: rel.h:529
#define ERROR
Definition: elog.h:46
#define pgstat_count_buffer_read(rel)
Definition: pgstat.h:1074
static char * buf
Definition: pg_test_fsync.c:68
#define pgstat_count_buffer_hit(rel)
Definition: pgstat.h:1079
static Buffer ReadBuffer_common(SMgrRelation reln, char relpersistence, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy, bool *hit)
Definition: bufmgr.c:807
#define ereport(elevel,...)
Definition: elog.h:157
#define RELATION_IS_OTHER_TEMP(relation)
Definition: rel.h:615
int errmsg(const char *fmt,...)
Definition: elog.c:909
int Buffer
Definition: buf.h:23

◆ ReadBufferWithoutRelcache()

Buffer ReadBufferWithoutRelcache ( RelFileNode  rnode,
ForkNumber  forkNum,
BlockNumber  blockNum,
ReadBufferMode  mode,
BufferAccessStrategy  strategy 
)

Definition at line 786 of file bufmgr.c.

References Assert, InRecovery, InvalidBackendId, ReadBuffer_common(), and smgropen().

Referenced by XLogReadBufferExtended().

789 {
790  bool hit;
791 
792  SMgrRelation smgr = smgropen(rnode, InvalidBackendId);
793 
795 
796  return ReadBuffer_common(smgr, RELPERSISTENCE_PERMANENT, forkNum, blockNum,
797  mode, strategy, &hit);
798 }
static PgChecksumMode mode
Definition: pg_checksums.c:61
bool InRecovery
Definition: xlog.c:209
SMgrRelation smgropen(RelFileNode rnode, BackendId backend)
Definition: smgr.c:146
#define InvalidBackendId
Definition: backendid.h:23
static Buffer ReadBuffer_common(SMgrRelation reln, char relpersistence, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy, bool *hit)
Definition: bufmgr.c:807
#define Assert(condition)
Definition: c.h:804

◆ ReadRecentBuffer()

bool ReadRecentBuffer ( RelFileNode  rnode,
ForkNumber  forkNum,
BlockNumber  blockNum,
Buffer  recent_buffer 
)

Definition at line 621 of file bufmgr.c.

References Assert, BM_MAX_USAGE_COUNT, BM_VALID, BUF_STATE_GET_USAGECOUNT, BUF_USAGECOUNT_ONE, BufferIsLocal, BufferIsValid, BUFFERTAGS_EQUAL, CurrentResourceOwner, GetBufferDescriptor, GetPrivateRefCount(), INIT_BUFFERTAG, LocalRefCount, LockBufHdr(), pg_atomic_read_u32(), pg_atomic_write_u32(), PinBuffer(), PinBuffer_Locked(), ReservePrivateRefCountEntry(), ResourceOwnerEnlargeBuffers(), ResourceOwnerRememberBuffer(), BufferDesc::state, BufferDesc::tag, and UnlockBufHdr.

Referenced by XLogReadBufferExtended().

623 {
624  BufferDesc *bufHdr;
625  BufferTag tag;
626  uint32 buf_state;
627  bool have_private_ref;
628 
629  Assert(BufferIsValid(recent_buffer));
630 
633  INIT_BUFFERTAG(tag, rnode, forkNum, blockNum);
634 
635  if (BufferIsLocal(recent_buffer))
636  {
637  bufHdr = GetBufferDescriptor(-recent_buffer - 1);
638  buf_state = pg_atomic_read_u32(&bufHdr->state);
639 
640  /* Is it still valid and holding the right tag? */
641  if ((buf_state & BM_VALID) && BUFFERTAGS_EQUAL(tag, bufHdr->tag))
642  {
643  /* Bump local buffer's ref and usage counts. */
645  LocalRefCount[-recent_buffer - 1]++;
647  pg_atomic_write_u32(&bufHdr->state,
648  buf_state + BUF_USAGECOUNT_ONE);
649 
650  return true;
651  }
652  }
653  else
654  {
655  bufHdr = GetBufferDescriptor(recent_buffer - 1);
656  have_private_ref = GetPrivateRefCount(recent_buffer) > 0;
657 
658  /*
659  * Do we already have this buffer pinned with a private reference? If
660  * so, it must be valid and it is safe to check the tag without
661  * locking. If not, we have to lock the header first and then check.
662  */
663  if (have_private_ref)
664  buf_state = pg_atomic_read_u32(&bufHdr->state);
665  else
666  buf_state = LockBufHdr(bufHdr);
667 
668  if ((buf_state & BM_VALID) && BUFFERTAGS_EQUAL(tag, bufHdr->tag))
669  {
670  /*
671  * It's now safe to pin the buffer. We can't pin first and ask
672  * questions later, because because it might confuse code paths
673  * like InvalidateBuffer() if we pinned a random non-matching
674  * buffer.
675  */
676  if (have_private_ref)
677  PinBuffer(bufHdr, NULL); /* bump pin count */
678  else
679  PinBuffer_Locked(bufHdr); /* pin for first time */
680 
681  return true;
682  }
683 
684  /* If we locked the header above, now unlock. */
685  if (!have_private_ref)
686  UnlockBufHdr(bufHdr, buf_state);
687  }
688 
689  return false;
690 }
static bool PinBuffer(BufferDesc *buf, BufferAccessStrategy strategy)
Definition: bufmgr.c:1683
ResourceOwner CurrentResourceOwner
Definition: resowner.c:146
void ResourceOwnerRememberBuffer(ResourceOwner owner, Buffer buffer)
Definition: resowner.c:959
#define BUF_USAGECOUNT_ONE
Definition: buf_internals.h:44
#define GetBufferDescriptor(id)
static int32 GetPrivateRefCount(Buffer buffer)
Definition: bufmgr.c:387
unsigned int uint32
Definition: c.h:441
#define BUFFERTAGS_EQUAL(a, b)
void ResourceOwnerEnlargeBuffers(ResourceOwner owner)
Definition: resowner.c:946
#define BM_VALID
Definition: buf_internals.h:60
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4603
#define Assert(condition)
Definition: c.h:804
#define INIT_BUFFERTAG(a, xx_rnode, xx_forkNum, xx_blockNum)
static void PinBuffer_Locked(BufferDesc *buf)
Definition: bufmgr.c:1786
#define BufferIsLocal(buffer)
Definition: buf.h:37
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
#define BM_MAX_USAGE_COUNT
Definition: buf_internals.h:77
BufferTag tag
#define UnlockBufHdr(desc, s)
static void ReservePrivateRefCountEntry(void)
Definition: bufmgr.c:215
pg_atomic_uint32 state
#define BUF_STATE_GET_USAGECOUNT(state)
Definition: buf_internals.h:50
static void pg_atomic_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:258
int32 * LocalRefCount
Definition: localbuf.c:45
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:241

◆ RelationGetNumberOfBlocksInFork()

BlockNumber RelationGetNumberOfBlocksInFork ( Relation  relation,
ForkNumber  forkNum 
)

Definition at line 2945 of file bufmgr.c.

References Assert, RelationData::rd_rel, RelationData::rd_smgr, RelationOpenSmgr, smgrnblocks(), and table_relation_size().

Referenced by _hash_getnewbuf(), _hash_init(), autoprewarm_database_main(), get_raw_page_internal(), and pg_prewarm().

2946 {
2947  switch (relation->rd_rel->relkind)
2948  {
2949  case RELKIND_SEQUENCE:
2950  case RELKIND_INDEX:
2951  case RELKIND_PARTITIONED_INDEX:
2952  /* Open it at the smgr level if not already done */
2953  RelationOpenSmgr(relation);
2954 
2955  return smgrnblocks(relation->rd_smgr, forkNum);
2956 
2957  case RELKIND_RELATION:
2958  case RELKIND_TOASTVALUE:
2959  case RELKIND_MATVIEW:
2960  {
2961  /*
2962  * Not every table AM uses BLCKSZ wide fixed size blocks.
2963  * Therefore tableam returns the size in bytes - but for the
2964  * purpose of this routine, we want the number of blocks.
2965  * Therefore divide, rounding up.
2966  */
2967  uint64 szbytes;
2968 
2969  szbytes = table_relation_size(relation, forkNum);
2970 
2971  return (szbytes + (BLCKSZ - 1)) / BLCKSZ;
2972  }
2973  case RELKIND_VIEW:
2974  case RELKIND_COMPOSITE_TYPE:
2975  case RELKIND_FOREIGN_TABLE:
2976  case RELKIND_PARTITIONED_TABLE:
2977  default:
2978  Assert(false);
2979  break;
2980  }
2981 
2982  return 0; /* keep compiler quiet */
2983 }
struct SMgrRelationData * rd_smgr
Definition: rel.h:57
Form_pg_class rd_rel
Definition: rel.h:110
#define RelationOpenSmgr(relation)
Definition: rel.h:529
static uint64 table_relation_size(Relation rel, ForkNumber forkNumber)
Definition: tableam.h:1838
BlockNumber smgrnblocks(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:548
#define Assert(condition)
Definition: c.h:804

◆ ReleaseAndReadBuffer()

Buffer ReleaseAndReadBuffer ( Buffer  buffer,
Relation  relation,
BlockNumber  blockNum 
)

Definition at line 1625 of file bufmgr.c.

References Assert, buftag::blockNum, PrivateRefCountEntry::buffer, BufferIsLocal, BufferIsPinned, BufferIsValid, CurrentResourceOwner, buftag::forkNum, GetBufferDescriptor, GetLocalBufferDescriptor, LocalRefCount, MAIN_FORKNUM, RelationData::rd_node, ReadBuffer(), RelFileNodeEquals, ResourceOwnerForgetBuffer(), buftag::rnode, BufferDesc::tag, and UnpinBuffer().

Referenced by _bt_relandgetbuf(), ginFindLeafPage(), heapam_index_fetch_tuple(), and heapam_scan_bitmap_next_block().

1628 {
1629  ForkNumber forkNum = MAIN_FORKNUM;
1630  BufferDesc *bufHdr;
1631 
1632  if (BufferIsValid(buffer))
1633  {
1634  Assert(BufferIsPinned(buffer));
1635  if (BufferIsLocal(buffer))
1636  {
1637  bufHdr = GetLocalBufferDescriptor(-buffer - 1);
1638  if (bufHdr->tag.blockNum == blockNum &&
1639  RelFileNodeEquals(bufHdr->tag.rnode, relation->rd_node) &&
1640  bufHdr->tag.forkNum == forkNum)
1641  return buffer;
1643  LocalRefCount[-buffer - 1]--;
1644  }
1645  else
1646  {
1647  bufHdr = GetBufferDescriptor(buffer - 1);
1648  /* we have pin, so it's ok to examine tag without spinlock */
1649  if (bufHdr->tag.blockNum == blockNum &&
1650  RelFileNodeEquals(bufHdr->tag.rnode, relation->rd_node) &&
1651  bufHdr->tag.forkNum == forkNum)
1652  return buffer;
1653  UnpinBuffer(bufHdr, true);
1654  }
1655  }
1656 
1657  return ReadBuffer(relation, blockNum);
1658 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:448
ForkNumber forkNum
Definition: buf_internals.h:94
ResourceOwner CurrentResourceOwner
Definition: resowner.c:146
#define GetLocalBufferDescriptor(id)
#define GetBufferDescriptor(id)
ForkNumber
Definition: relpath.h:40
static void UnpinBuffer(BufferDesc *buf, bool fixOwner)
Definition: bufmgr.c:1831
RelFileNode rd_node
Definition: rel.h:55
#define Assert(condition)
Definition: c.h:804
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:697
#define BufferIsLocal(buffer)
Definition: buf.h:37
BlockNumber blockNum
Definition: buf_internals.h:95
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
RelFileNode rnode
Definition: buf_internals.h:93
BufferTag tag
int32 * LocalRefCount
Definition: localbuf.c:45
#define RelFileNodeEquals(node1, node2)
Definition: relfilenode.h:88
void ResourceOwnerForgetBuffer(ResourceOwner owner, Buffer buffer)
Definition: resowner.c:968

◆ ReleaseBuffer()

void ReleaseBuffer ( Buffer  buffer)

Definition at line 3784 of file bufmgr.c.

References Assert, BufferIsLocal, BufferIsValid, CurrentResourceOwner, elog, ERROR, GetBufferDescriptor, LocalRefCount, ResourceOwnerForgetBuffer(), and UnpinBuffer().

Referenced by _bt_drop_lock_and_maybe_pin(), _bt_getbuf(), _bt_pagedel(), _bt_relbuf(), _bt_search_insert(), _bt_unlink_halfdead_page(), _hash_dropbuf(), _hash_getbuf_with_condlock_cleanup(), autoprewarm_database_main(), blinsert(), BloomNewBuffer(), brin_getinsertbuffer(), brin_vacuum_scan(), bringetbitmap(), brinGetTupleForHeapBlock(), brininsert(), brinRevmapTerminate(), brinsummarize(), collect_corrupt_items(), collect_visibility_data(), entryLoadMoreItems(), ExecEndBitmapHeapScan(), ExecEndIndexOnlyScan(), ExecReScanBitmapHeapScan(), FreeBulkInsertState(), freeGinBtreeStack(), fsm_vacuum_page(), get_actual_variable_endpoint(), get_raw_page_internal(), GetRecordedFreeSpace(), ginDeletePage(), ginFindParents(), ginFinishSplit(), ginFreeScanKeys(), ginInsertCleanup(), GinNewBuffer(), ginScanToDelete(), gistdoinsert(), gistFindCorrectParent(), gistNewBuffer(), gistvacuum_delete_empty_pages(), heap_abort_speculative(), heap_delete(), heap_endscan(), heap_fetch(), heap_force_common(), heap_insert(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_rescan(), heap_update(), heap_xlog_delete(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_update(), heap_xlog_visible(), heapam_index_fetch_reset(), heapam_scan_sample_next_block(), heapam_tuple_lock(), heapgetpage(), heapgettup(), heapgettup_pagemode(), lazy_scan_heap(), lazy_vacuum_heap_rel(), pg_prewarm(), pg_visibility(), pg_visibility_map(), pg_visibility_map_summary(), pgstatindex_impl(), ReadBufferBI(), RelationGetBufferForTuple(), ReleaseBulkInsertStatePin(), ResourceOwnerReleaseInternal(), revmap_get_buffer(), revmap_physical_extend(), spgdoinsert(), SpGistGetBuffer(), SpGistNewBuffer(), SpGistUpdateMetaPage(), statapprox_heap(), summarize_range(), terminate_brin_buildstate(), tts_buffer_heap_clear(), tts_buffer_heap_materialize(), tts_buffer_heap_store_tuple(), UnlockReleaseBuffer(), verify_heapam(), visibilitymap_count(), visibilitymap_get_status(), visibilitymap_pin(), and XLogReadBufferExtended().

3785 {
3786  if (!BufferIsValid(buffer))
3787  elog(ERROR, "bad buffer ID: %d", buffer);
3788 
3789  if (BufferIsLocal(buffer))
3790  {
3792 
3793  Assert(LocalRefCount[-buffer - 1] > 0);
3794  LocalRefCount[-buffer - 1]--;
3795  return;
3796  }
3797 
3798  UnpinBuffer(GetBufferDescriptor(buffer - 1), true);
3799 }
ResourceOwner CurrentResourceOwner
Definition: resowner.c:146
#define ERROR
Definition: elog.h:46
#define GetBufferDescriptor(id)
static void UnpinBuffer(BufferDesc *buf, bool fixOwner)
Definition: bufmgr.c:1831
#define Assert(condition)
Definition: c.h:804
#define BufferIsLocal(buffer)
Definition: buf.h:37
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
#define elog(elevel,...)
Definition: elog.h:232
int32 * LocalRefCount
Definition: localbuf.c:45
void ResourceOwnerForgetBuffer(ResourceOwner owner, Buffer buffer)
Definition: resowner.c:968

◆ TestForOldSnapshot()

static void TestForOldSnapshot ( Snapshot  snapshot,
Relation  relation,
Page  page 
)
inlinestatic

Definition at line 279 of file bufmgr.h.

References Assert, old_snapshot_threshold, PageGetLSN, SNAPSHOT_MVCC, SNAPSHOT_TOAST, TestForOldSnapshot_impl(), and XLogRecPtrIsInvalid.

Referenced by _bt_get_endpoint(), _bt_moveright(), _bt_readnextpage(), _bt_walk_left(), _hash_first(), _hash_next(), _hash_readnext(), _hash_readprev(), blgetbitmap(), brinGetTupleForHeapBlock(), brinRevmapInitialize(), collectMatchBitmap(), collectMatchesForHeapRow(), ginFindLeafPage(), gistScanPage(), heap_fetch(), heap_get_latest_tid(), heapgetpage(), heapgettup(), heapgettup_pagemode(), scanGetCandidate(), scanPendingInsert(), and spgWalk().

280 {
281  Assert(relation != NULL);
282 
283  if (old_snapshot_threshold >= 0
284  && (snapshot) != NULL
285  && ((snapshot)->snapshot_type == SNAPSHOT_MVCC
286  || (snapshot)->snapshot_type == SNAPSHOT_TOAST)
287  && !XLogRecPtrIsInvalid((snapshot)->lsn)
288  && PageGetLSN(page) > (snapshot)->lsn)
289  TestForOldSnapshot_impl(snapshot, relation);
290 }
#define XLogRecPtrIsInvalid(r)
Definition: xlogdefs.h:29
#define Assert(condition)
Definition: c.h:804
#define PageGetLSN(page)
Definition: bufpage.h:366
int old_snapshot_threshold
Definition: snapmgr.c:78
void TestForOldSnapshot_impl(Snapshot snapshot, Relation relation)
Definition: bufmgr.c:4864

◆ TestForOldSnapshot_impl()

void TestForOldSnapshot_impl ( Snapshot  snapshot,
Relation  relation 
)

Definition at line 4864 of file bufmgr.c.

References ereport, errcode(), errmsg(), ERROR, GetOldSnapshotThresholdTimestamp(), and RelationAllowsEarlyPruning.

Referenced by TestForOldSnapshot().

4865 {
4866  if (RelationAllowsEarlyPruning(relation)
4867  && (snapshot)->whenTaken < GetOldSnapshotThresholdTimestamp())
4868  ereport(ERROR,
4869  (errcode(ERRCODE_SNAPSHOT_TOO_OLD),
4870  errmsg("snapshot too old")));
4871 }
TimestampTz GetOldSnapshotThresholdTimestamp(void)
Definition: snapmgr.c:1660
#define RelationAllowsEarlyPruning(rel)
Definition: snapmgr.h:38
int errcode(int sqlerrcode)
Definition: elog.c:698
#define ERROR
Definition: elog.h:46
#define ereport(elevel,...)
Definition: elog.h:157
int errmsg(const char *fmt,...)
Definition: elog.c:909

◆ UnlockBuffers()

void UnlockBuffers ( void  )

Definition at line 3995 of file bufmgr.c.

References BM_PIN_COUNT_WAITER, buf, LockBufHdr(), MyProcPid, PinCountWaitBuf, UnlockBufHdr, and BufferDesc::wait_backend_pid.

Referenced by AbortSubTransaction(), AbortTransaction(), AtProcExit_Buffers(), AutoVacLauncherMain(), BackgroundWriterMain(), CheckpointerMain(), and WalWriterMain().

3996 {
3998 
3999  if (buf)
4000  {
4001  uint32 buf_state;
4002 
4003  buf_state = LockBufHdr(buf);
4004 
4005  /*
4006  * Don't complain if flag bit not set; it could have been reset but we
4007  * got a cancel/die interrupt before getting the signal.
4008  */
4009  if ((buf_state & BM_PIN_COUNT_WAITER) != 0 &&
4010  buf->wait_backend_pid == MyProcPid)
4011  buf_state &= ~BM_PIN_COUNT_WAITER;
4012 
4013  UnlockBufHdr(buf, buf_state);
4014 
4015  PinCountWaitBuf = NULL;
4016  }
4017 }
int MyProcPid
Definition: globals.c:43
int wait_backend_pid
static char * buf
Definition: pg_test_fsync.c:68
unsigned int uint32
Definition: c.h:441
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4603
#define UnlockBufHdr(desc, s)
static BufferDesc * PinCountWaitBuf
Definition: bufmgr.c:165
#define BM_PIN_COUNT_WAITER
Definition: buf_internals.h:65

◆ UnlockReleaseBuffer()

void UnlockReleaseBuffer ( Buffer  buffer)

Definition at line 3807 of file bufmgr.c.

References BUFFER_LOCK_UNLOCK, LockBuffer(), and ReleaseBuffer().

Referenced by _bt_clear_incomplete_split(), _bt_restore_meta(), _hash_relbuf(), allocNewBuffer(), AlterSequence(), blbulkdelete(), blgetbitmap(), blinsert(), BloomInitMetapage(), blvacuumcleanup(), brin_doinsert(), brin_doupdate(), brin_evacuate_page(), brin_getinsertbuffer(), brin_xlog_createidx(), brin_xlog_desummarize_page(), brin_xlog_insert_update(), brin_xlog_revmap_extend(), brin_xlog_samepage_update(), brin_xlog_update(), brinbuild(), brinbuildempty(), brinGetStats(), brinRevmapDesummarizeRange(), bt_metap(), bt_page_items_internal(), bt_page_stats_internal(), bt_recheck_sibling_links(), btree_xlog_dedup(), btree_xlog_delete(), btree_xlog_insert(), btree_xlog_mark_page_halfdead(), btree_xlog_newroot(), btree_xlog_split(), btree_xlog_unlink_page(), btree_xlog_vacuum(), checkXLogConsistency(), collect_corrupt_items(), collect_visibility_data(), count_nondeletable_pages(), createPostingTree(), do_setval(), doPickSplit(), entryLoadMoreItems(), fill_seq_with_data(), flushCachedPage(), FreeSpaceMapPrepareTruncateRel(), fsm_search(), fsm_set_and_search(), generic_redo(), ginbuild(), ginbuildempty(), ginbulkdelete(), ginFindParents(), ginGetStats(), ginHeapTupleFastInsert(), ginInsertCleanup(), ginPlaceToPage(), ginRedoClearIncompleteSplit(), ginRedoCreatePTree(), ginRedoDeleteListPages(), ginRedoDeletePage(), ginRedoInsert(), ginRedoInsertListPage(), ginRedoSplit(), ginRedoUpdateMetapage(), ginRedoVacuumDataLeafPage(), ginRedoVacuumPage(), ginScanToDelete(), ginStepRight(), ginUpdateStats(), ginvacuumcleanup(), ginVacuumPostingTree(), ginVacuumPostingTreeLeaves(), gistbufferinginserttuples(), gistbuild(), gistbuildempty(), gistdoinsert(), gistFindCorrectParent(), gistFindPath(), gistGetMaxLevel(), gistinserttuples(), gistkillitems(), gistplacetopage(), gistProcessItup(), gistRedoClearFollowRight(), gistRedoDeleteRecord(), gistRedoPageDelete(), gistRedoPageSplitRecord(), gistRedoPageUpdateRecord(), gistScanPage(), gistvacuum_delete_empty_pages(), gistvacuumpage(), hash_xlog_add_ovfl_page(), hash_xlog_delete(), hash_xlog_init_bitmap_page(), hash_xlog_init_meta_page(), hash_xlog_insert(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_split_cleanup(), hash_xlog_split_complete(), hash_xlog_split_page(), hash_xlog_squeeze_page(), hash_xlog_update_meta_page(), hash_xlog_vacuum_one_page(), heap_delete(), heap_finish_speculative(), heap_force_common(), heap_get_latest_tid(), heap_index_delete_tuples(), heap_inplace_update(), heap_insert(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_update(), heap_xlog_confirm(), heap_xlog_delete(), heap_xlog_freeze_page(), heap_xlog_inplace(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_prune(), heap_xlog_update(), heap_xlog_vacuum(), heap_xlog_visible(), heapam_scan_analyze_next_tuple(), initBloomState(), lazy_scan_heap(), lazy_vacuum_heap_rel(), log_newpage_range(), moveLeafs(), nextval_internal(), palloc_btree_page(), pg_sequence_last_value(), pg_visibility(), pgstat_gist_page(), pgstat_heap(), pgstatginindex_internal(), pgstathashindex(), RelationAddExtraBlocks(), RelationGetBufferForTuple(), ResetSequence(), revmap_physical_extend(), scanGetCandidate(), scanPendingInsert(), scanPostingTree(), seq_redo(), shiftList(), spgAddNodeAction(), spgbuild(), spgdoinsert(), spgGetCache(), SpGistGetBuffer(), SpGistUpdateMetaPage(), spgMatchNodeAction(), spgprocesspending(), spgRedoAddLeaf(), spgRedoAddNode(), spgRedoMoveLeafs(), spgRedoPickSplit(), spgRedoSplitTuple(), spgRedoVacuumLeaf(), spgRedoVacuumRedirect(), spgRedoVacuumRoot(), spgSplitNodeAction(), spgvacuumpage(), spgWalk(), statapprox_heap(), verify_heapam(), visibilitymap_prepare_truncate(), writeListPage(), xlog_redo(), and XLogRecordPageWithFreeSpace().

3808 {
3809  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3810  ReleaseBuffer(buffer);
3811 }
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3784
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:4023

Variable Documentation

◆ backend_flush_after

int backend_flush_after

Definition at line 158 of file bufmgr.c.

Referenced by InitBufferPool().

◆ bgwriter_flush_after

int bgwriter_flush_after

Definition at line 157 of file bufmgr.c.

Referenced by BackgroundWriterMain().

◆ bgwriter_lru_maxpages

int bgwriter_lru_maxpages

Definition at line 133 of file bufmgr.c.

Referenced by BgBufferSync().

◆ bgwriter_lru_multiplier

double bgwriter_lru_multiplier

Definition at line 134 of file bufmgr.c.

Referenced by BgBufferSync().

◆ BufferBlocks

PGDLLIMPORT char* BufferBlocks

Definition at line 21 of file buf_init.c.

Referenced by InitBufferPool().

◆ checkpoint_flush_after

int checkpoint_flush_after

Definition at line 156 of file bufmgr.c.

Referenced by BufferSync().

◆ effective_io_concurrency

int effective_io_concurrency

Definition at line 143 of file bufmgr.c.

Referenced by get_tablespace_io_concurrency(), and tablespace_reloptions().

◆ LocalBufferBlockPointers

PGDLLIMPORT Block* LocalBufferBlockPointers

Definition at line 44 of file localbuf.c.

Referenced by InitLocalBuffers().

◆ LocalRefCount

◆ maintenance_io_concurrency

◆ NBuffers

◆ NLocBuffer

◆ track_io_timing

bool track_io_timing

◆ zero_damaged_pages

bool zero_damaged_pages

Definition at line 132 of file bufmgr.c.

Referenced by mdread(), and ReadBuffer_common().