PostgreSQL Source Code  git master
bufmgr.h File Reference
#include "storage/block.h"
#include "storage/buf.h"
#include "storage/bufpage.h"
#include "storage/relfilenode.h"
#include "utils/relcache.h"
#include "utils/snapmgr.h"
Include dependency graph for bufmgr.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Data Structures

struct  PrefetchBufferResult
 

Macros

#define MAX_IO_CONCURRENCY   1000
 
#define P_NEW   InvalidBlockNumber /* grow the file to get a new page */
 
#define BUFFER_LOCK_UNLOCK   0
 
#define BUFFER_LOCK_SHARE   1
 
#define BUFFER_LOCK_EXCLUSIVE   2
 
#define BufferIsValid(bufnum)
 
#define BufferGetBlock(buffer)
 
#define BufferGetPageSize(buffer)
 
#define BufferGetPage(buffer)   ((Page)BufferGetBlock(buffer))
 
#define RelationGetNumberOfBlocks(reln)   RelationGetNumberOfBlocksInFork(reln, MAIN_FORKNUM)
 

Typedefs

typedef void * Block
 
typedef enum BufferAccessStrategyType BufferAccessStrategyType
 
typedef struct PrefetchBufferResult PrefetchBufferResult
 

Enumerations

enum  BufferAccessStrategyType { BAS_NORMAL, BAS_BULKREAD, BAS_BULKWRITE, BAS_VACUUM }
 
enum  ReadBufferMode {
  RBM_NORMAL, RBM_ZERO_AND_LOCK, RBM_ZERO_AND_CLEANUP_LOCK, RBM_ZERO_ON_ERROR,
  RBM_NORMAL_NO_LOG
}
 

Functions

PrefetchBufferResult PrefetchSharedBuffer (struct SMgrRelationData *smgr_reln, ForkNumber forkNum, BlockNumber blockNum)
 
PrefetchBufferResult PrefetchBuffer (Relation reln, ForkNumber forkNum, BlockNumber blockNum)
 
Buffer ReadBuffer (Relation reln, BlockNumber blockNum)
 
Buffer ReadBufferExtended (Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
 
Buffer ReadBufferWithoutRelcache (RelFileNode rnode, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
 
void ReleaseBuffer (Buffer buffer)
 
void UnlockReleaseBuffer (Buffer buffer)
 
void MarkBufferDirty (Buffer buffer)
 
void IncrBufferRefCount (Buffer buffer)
 
Buffer ReleaseAndReadBuffer (Buffer buffer, Relation relation, BlockNumber blockNum)
 
void InitBufferPool (void)
 
void InitBufferPoolAccess (void)
 
void InitBufferPoolBackend (void)
 
void AtEOXact_Buffers (bool isCommit)
 
void PrintBufferLeakWarning (Buffer buffer)
 
void CheckPointBuffers (int flags)
 
BlockNumber BufferGetBlockNumber (Buffer buffer)
 
BlockNumber RelationGetNumberOfBlocksInFork (Relation relation, ForkNumber forkNum)
 
void FlushOneBuffer (Buffer buffer)
 
void FlushRelationBuffers (Relation rel)
 
void FlushRelationsAllBuffers (struct SMgrRelationData **smgrs, int nrels)
 
void FlushDatabaseBuffers (Oid dbid)
 
void DropRelFileNodeBuffers (struct SMgrRelationData *smgr_reln, ForkNumber *forkNum, int nforks, BlockNumber *firstDelBlock)
 
void DropRelFileNodesAllBuffers (struct SMgrRelationData **smgr_reln, int nnodes)
 
void DropDatabaseBuffers (Oid dbid)
 
bool BufferIsPermanent (Buffer buffer)
 
XLogRecPtr BufferGetLSNAtomic (Buffer buffer)
 
Size BufferShmemSize (void)
 
void BufferGetTag (Buffer buffer, RelFileNode *rnode, ForkNumber *forknum, BlockNumber *blknum)
 
void MarkBufferDirtyHint (Buffer buffer, bool buffer_std)
 
void UnlockBuffers (void)
 
void LockBuffer (Buffer buffer, int mode)
 
bool ConditionalLockBuffer (Buffer buffer)
 
void LockBufferForCleanup (Buffer buffer)
 
bool ConditionalLockBufferForCleanup (Buffer buffer)
 
bool IsBufferCleanupOK (Buffer buffer)
 
bool HoldingBufferPinThatDelaysRecovery (void)
 
void AbortBufferIO (void)
 
void BufmgrCommit (void)
 
bool BgBufferSync (struct WritebackContext *wb_context)
 
void AtProcExit_LocalBuffers (void)
 
void TestForOldSnapshot_impl (Snapshot snapshot, Relation relation)
 
BufferAccessStrategy GetAccessStrategy (BufferAccessStrategyType btype)
 
void FreeAccessStrategy (BufferAccessStrategy strategy)
 
static void TestForOldSnapshot (Snapshot snapshot, Relation relation, Page page)
 

Variables

PGDLLIMPORT int NBuffers
 
bool zero_damaged_pages
 
int bgwriter_lru_maxpages
 
double bgwriter_lru_multiplier
 
bool track_io_timing
 
int effective_io_concurrency
 
int maintenance_io_concurrency
 
int checkpoint_flush_after
 
int backend_flush_after
 
int bgwriter_flush_after
 
PGDLLIMPORT char * BufferBlocks
 
PGDLLIMPORT int NLocBuffer
 
PGDLLIMPORT BlockLocalBufferBlockPointers
 
PGDLLIMPORT int32LocalRefCount
 

Macro Definition Documentation

◆ BUFFER_LOCK_EXCLUSIVE

◆ BUFFER_LOCK_SHARE

◆ BUFFER_LOCK_UNLOCK

#define BUFFER_LOCK_UNLOCK   0

Definition at line 96 of file bufmgr.h.

Referenced by _bt_unlockbuf(), _bt_upgradelockbufcleanup(), _hash_addovflpage(), _hash_doinsert(), _hash_expandtable(), _hash_finish_split(), _hash_first(), _hash_freeovflpage(), _hash_getcachedmetap(), _hash_init(), _hash_kill_items(), _hash_readnext(), _hash_readpage(), _hash_readprev(), _hash_splitbucket(), _hash_squeezebucket(), _hash_vacuum_one_page(), blinsert(), BloomNewBuffer(), brin_doinsert(), brin_doupdate(), brin_evacuate_page(), brin_getinsertbuffer(), brin_page_cleanup(), bringetbitmap(), brinGetTupleForHeapBlock(), brininsert(), brinRevmapDesummarizeRange(), brinRevmapInitialize(), brinsummarize(), ConditionalLockBufferForCleanup(), fsm_readbuf(), fsm_search_avail(), fsm_vacuum_page(), get_raw_page_internal(), GetVisibilityMapPins(), hashbucketcleanup(), hashbulkdelete(), heap_abort_speculative(), heap_delete(), heap_fetch(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_page_prune_opt(), heap_update(), heap_xlog_visible(), heapam_index_build_range_scan(), heapam_index_fetch_tuple(), heapam_index_validate_scan(), heapam_relation_copy_for_cluster(), heapam_scan_bitmap_next_block(), heapam_scan_sample_next_tuple(), heapam_tuple_satisfies_snapshot(), heapgetpage(), heapgettup(), lazy_scan_heap(), LockBuffer(), LockBufferForCleanup(), pgrowlocks(), pgstat_heap(), pgstatindex_impl(), RelationGetBufferForTuple(), revmap_physical_extend(), SpGistNewBuffer(), summarize_range(), UnlockReleaseBuffer(), visibilitymap_clear(), visibilitymap_set(), vm_readbuf(), and XLogReadBufferExtended().

◆ BufferGetBlock

#define BufferGetBlock (   buffer)
Value:
( \
AssertMacro(BufferIsValid(buffer)), \
BufferIsLocal(buffer) ? \
LocalBufferBlockPointers[-(buffer) - 1] \
: \
(Block) (BufferBlocks + ((Size) ((buffer) - 1)) * BLCKSZ) \
)
PGDLLIMPORT char * BufferBlocks
Definition: buf_init.c:21
size_t Size
Definition: c.h:540
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
void * Block
Definition: bufmgr.h:24

Definition at line 136 of file bufmgr.h.

Referenced by XLogSaveBufferForHint().

◆ BufferGetPage

#define BufferGetPage (   buffer)    ((Page)BufferGetBlock(buffer))

Definition at line 169 of file bufmgr.h.

Referenced by _bt_binsrch(), _bt_binsrch_insert(), _bt_bottomupdel_pass(), _bt_check_unique(), _bt_checkpage(), _bt_clear_incomplete_split(), _bt_conditionallockbuf(), _bt_dedup_pass(), _bt_delete_or_dedup_one_page(), _bt_delitems_delete(), _bt_delitems_delete_check(), _bt_delitems_vacuum(), _bt_endpoint(), _bt_findinsertloc(), _bt_finish_split(), _bt_get_endpoint(), _bt_getbuf(), _bt_getmeta(), _bt_getroot(), _bt_getstackbuf(), _bt_gettrueroot(), _bt_insert_parent(), _bt_insertonpg(), _bt_killitems(), _bt_leftsib_splitflag(), _bt_lock_subtree_parent(), _bt_lockbuf(), _bt_mark_page_halfdead(), _bt_moveright(), _bt_newroot(), _bt_pagedel(), _bt_readnextpage(), _bt_readpage(), _bt_restore_meta(), _bt_rightsib_halfdeadflag(), _bt_search(), _bt_search_insert(), _bt_set_cleanup_info(), _bt_simpledel_pass(), _bt_split(), _bt_stepright(), _bt_unlink_halfdead_page(), _bt_unlockbuf(), _bt_upgradelockbufcleanup(), _bt_vacuum_needs_cleanup(), _bt_walk_left(), _hash_addovflpage(), _hash_checkpage(), _hash_doinsert(), _hash_expandtable(), _hash_finish_split(), _hash_first(), _hash_freeovflpage(), _hash_get_newblock_from_oldbucket(), _hash_get_oldblock_from_newbucket(), _hash_getbucketbuf_from_hashkey(), _hash_getcachedmetap(), _hash_getinitbuf(), _hash_getnewbuf(), _hash_init(), _hash_init_metabuffer(), _hash_initbitmapbuffer(), _hash_initbuf(), _hash_kill_items(), _hash_next(), _hash_pgaddmultitup(), _hash_pgaddtup(), _hash_readnext(), _hash_readpage(), _hash_readprev(), _hash_splitbucket(), _hash_squeezebucket(), _hash_vacuum_one_page(), allocNewBuffer(), blgetbitmap(), blinsert(), BloomNewBuffer(), blvacuumcleanup(), brin_can_do_samepage_update(), brin_doinsert(), brin_doupdate(), brin_evacuate_page(), brin_getinsertbuffer(), brin_initialize_empty_new_buffer(), brin_page_cleanup(), brin_start_evacuating_page(), brin_xlog_createidx(), brin_xlog_desummarize_page(), brin_xlog_insert_update(), brin_xlog_revmap_extend(), brin_xlog_samepage_update(), brin_xlog_update(), brinbuild(), brinbuildempty(), brinGetStats(), brinGetTupleForHeapBlock(), brininsert(), brinRevmapDesummarizeRange(), brinRevmapInitialize(), brinSetHeapBlockItemptr(), bt_metap(), bt_page_items_internal(), bt_recheck_sibling_links(), bt_rootdescend(), btree_xlog_dedup(), btree_xlog_delete(), btree_xlog_insert(), btree_xlog_mark_page_halfdead(), btree_xlog_newroot(), btree_xlog_split(), btree_xlog_unlink_page(), btree_xlog_vacuum(), btvacuumpage(), BufferGetLSNAtomic(), checkXLogConsistency(), collect_corrupt_items(), collect_visibility_data(), collectMatchBitmap(), collectMatchesForHeapRow(), count_nondeletable_pages(), createPostingTree(), dataBeginPlaceToPage(), dataBeginPlaceToPageInternal(), dataBeginPlaceToPageLeaf(), dataExecPlaceToPage(), dataExecPlaceToPageInternal(), dataLocateItem(), dataPlaceToPageLeafRecompress(), dataPrepareDownlink(), dataSplitPageInternal(), do_setval(), doPickSplit(), entryExecPlaceToPage(), entryIsEnoughSpace(), entryLoadMoreItems(), entryLocateEntry(), entryLocateLeafEntry(), entryPrepareDownlink(), entrySplitPage(), fill_seq_with_data(), FreeSpaceMapPrepareTruncateRel(), fsm_readbuf(), fsm_search(), fsm_search_avail(), fsm_set_and_search(), fsm_vacuum_page(), generic_redo(), GenericXLogFinish(), GenericXLogRegisterBuffer(), get_raw_page_internal(), GetBTPageStatistics(), GetRecordedFreeSpace(), GetVisibilityMapPins(), ginbulkdelete(), ginDeletePage(), ginEntryInsert(), ginFindLeafPage(), ginFindParents(), ginFinishSplit(), ginGetStats(), ginHeapTupleFastInsert(), GinInitBuffer(), GinInitMetabuffer(), ginInsertCleanup(), ginInsertValue(), GinNewBuffer(), ginPlaceToPage(), ginRedoClearIncompleteSplit(), ginRedoCreatePTree(), ginRedoDeleteListPages(), ginRedoDeletePage(), ginRedoInsert(), ginRedoInsertData(), ginRedoInsertEntry(), ginRedoInsertListPage(), ginRedoUpdateMetapage(), ginRedoVacuumDataLeafPage(), ginScanToDelete(), ginStepRight(), ginTraverseLock(), ginUpdateStats(), ginvacuumcleanup(), ginVacuumEntryPage(), ginVacuumPostingTreeLeaf(), ginVacuumPostingTreeLeaves(), gistBufferingFindCorrectParent(), gistbufferinginserttuples(), gistbuild(), gistcheckpage(), gistdeletepage(), gistdoinsert(), gistFindCorrectParent(), gistFindPath(), gistfixsplit(), gistformdownlink(), gistGetMaxLevel(), GISTInitBuffer(), gistkillitems(), gistMemorizeAllDownlinks(), gistNewBuffer(), gistplacetopage(), gistProcessItup(), gistRedoClearFollowRight(), gistRedoDeleteRecord(), gistRedoPageDelete(), gistRedoPageSplitRecord(), gistRedoPageUpdateRecord(), gistScanPage(), gistvacuum_delete_empty_pages(), gistvacuumpage(), hash_bitmap_info(), hash_xlog_add_ovfl_page(), hash_xlog_delete(), hash_xlog_init_bitmap_page(), hash_xlog_init_meta_page(), hash_xlog_insert(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_split_cleanup(), hash_xlog_split_complete(), hash_xlog_squeeze_page(), hash_xlog_update_meta_page(), hash_xlog_vacuum_one_page(), hashbucketcleanup(), hashbulkdelete(), heap_abort_speculative(), heap_delete(), heap_fetch(), heap_finish_speculative(), heap_force_common(), heap_get_latest_tid(), heap_hot_search_buffer(), heap_index_delete_tuples(), heap_inplace_update(), heap_insert(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_page_is_all_visible(), heap_page_prune(), heap_page_prune_execute(), heap_page_prune_opt(), heap_prune_chain(), heap_update(), heap_xlog_clean(), heap_xlog_confirm(), heap_xlog_delete(), heap_xlog_freeze_page(), heap_xlog_inplace(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_update(), heap_xlog_visible(), heapam_index_build_range_scan(), heapam_index_validate_scan(), heapam_scan_analyze_next_tuple(), heapam_scan_bitmap_next_block(), heapam_scan_bitmap_next_tuple(), heapam_scan_sample_next_tuple(), heapgetpage(), heapgettup(), heapgettup_pagemode(), index_compute_xid_horizon_for_tuples(), initBloomState(), lazy_check_needs_freeze(), lazy_scan_heap(), lazy_vacuum_heap(), lazy_vacuum_page(), log_heap_update(), log_newpage_buffer(), log_newpage_range(), log_split_page(), MarkBufferDirtyHint(), moveLeafs(), moveRightIfItNeeded(), nextval_internal(), palloc_btree_page(), pg_visibility(), pgstat_btree_page(), pgstat_gist_page(), pgstat_hash_page(), pgstat_heap(), pgstatginindex_internal(), pgstathashindex(), pgstatindex_impl(), read_seq_tuple(), RelationAddExtraBlocks(), RelationGetBufferForTuple(), RelationPutHeapTuple(), revmap_physical_extend(), scanGetCandidate(), scanPendingInsert(), scanPostingTree(), seq_redo(), shiftList(), spgAddNodeAction(), spgbuild(), spgdoinsert(), spgGetCache(), SpGistGetBuffer(), SpGistInitBuffer(), SpGistNewBuffer(), SpGistSetLastUsedPage(), SpGistUpdateMetaPage(), spgprocesspending(), spgRedoAddLeaf(), spgRedoAddNode(), spgRedoMoveLeafs(), spgRedoPickSplit(), spgRedoSplitTuple(), spgRedoVacuumLeaf(), spgRedoVacuumRedirect(), spgRedoVacuumRoot(), spgSplitNodeAction(), spgvacuumpage(), spgWalk(), startScanEntry(), statapprox_heap(), terminate_brin_buildstate(), vacuumLeafPage(), vacuumLeafRoot(), vacuumRedirectAndPlaceholder(), verify_heapam(), visibilitymap_clear(), visibilitymap_count(), visibilitymap_get_status(), visibilitymap_prepare_truncate(), visibilitymap_set(), vm_readbuf(), writeListPage(), XLogCheckBufferNeedsBackup(), XLogReadBufferExtended(), XLogReadBufferForRedoExtended(), XLogRecordPageWithFreeSpace(), XLogRegisterBuffer(), XLogSaveBufferForHint(), and xlogVacuumPage().

◆ BufferGetPageSize

◆ BufferIsValid

#define BufferIsValid (   bufnum)
Value:
( \
AssertMacro((bufnum) <= NBuffers && (bufnum) >= -NLocBuffer), \
(bufnum) != InvalidBuffer \
)
#define InvalidBuffer
Definition: buf.h:25
PGDLLIMPORT int NLocBuffer
Definition: localbuf.c:41
PGDLLIMPORT int NBuffers
Definition: globals.c:133

Definition at line 123 of file bufmgr.h.

Referenced by _bt_clear_incomplete_split(), _bt_endpoint(), _bt_first(), _bt_get_endpoint(), _bt_insertonpg(), _bt_readpage(), _bt_relandgetbuf(), _bt_search(), _bt_unlink_halfdead_page(), _hash_addovflpage(), _hash_dropscanbuf(), _hash_freeovflpage(), _hash_getbucketbuf_from_hashkey(), _hash_getcachedmetap(), _hash_readnext(), _hash_readpage(), _hash_readprev(), autoprewarm_database_main(), brin_doinsert(), brin_doupdate(), brin_getinsertbuffer(), brin_xlog_createidx(), brin_xlog_desummarize_page(), brin_xlog_insert_update(), brin_xlog_revmap_extend(), brin_xlog_samepage_update(), brin_xlog_update(), brinGetTupleForHeapBlock(), brininsert(), brinsummarize(), bt_recheck_sibling_links(), bt_rootdescend(), btree_xlog_dedup(), btree_xlog_delete(), btree_xlog_insert(), btree_xlog_mark_page_halfdead(), btree_xlog_split(), btree_xlog_unlink_page(), btree_xlog_vacuum(), BufferGetLSNAtomic(), BufferIsPermanent(), checkXLogConsistency(), ConditionalLockBufferForCleanup(), doPickSplit(), entryGetItem(), entryLoadMoreItems(), ExecStoreBufferHeapTuple(), ExecStorePinnedBufferHeapTuple(), FreeSpaceMapPrepareTruncateRel(), fsm_search(), fsm_vacuum_page(), generic_redo(), GetPrivateRefCount(), GetPrivateRefCountEntry(), GetRecordedFreeSpace(), GetVisibilityMapPins(), ginPlaceToPage(), ginRedoClearIncompleteSplit(), ginRedoDeletePage(), ginRedoInsert(), ginRedoUpdateMetapage(), ginRedoVacuumDataLeafPage(), ginScanToDelete(), gistinserttuples(), gistkillitems(), gistplacetopage(), gistRedoClearFollowRight(), gistRedoDeleteRecord(), gistRedoPageDelete(), gistRedoPageUpdateRecord(), gistXLogSplit(), gistXLogUpdate(), hash_xlog_add_ovfl_page(), hash_xlog_delete(), hash_xlog_init_bitmap_page(), hash_xlog_init_meta_page(), hash_xlog_insert(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_split_cleanup(), hash_xlog_split_complete(), hash_xlog_squeeze_page(), hash_xlog_update_meta_page(), hash_xlog_vacuum_one_page(), heap_endscan(), heap_index_delete_tuples(), heap_lock_tuple(), heap_rescan(), heap_update(), heap_xlog_clean(), heap_xlog_confirm(), heap_xlog_delete(), heap_xlog_freeze_page(), heap_xlog_inplace(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_update(), heap_xlog_visible(), heapam_index_fetch_reset(), heapam_scan_sample_next_block(), heapam_tuple_lock(), heapam_tuple_satisfies_snapshot(), heapgetpage(), heapgettup(), heapgettup_pagemode(), IsBufferCleanupOK(), lazy_scan_heap(), lazy_vacuum_heap(), lazy_vacuum_page(), log_heap_visible(), MarkBufferDirty(), MarkBufferDirtyHint(), PrintBufferLeakWarning(), ReleaseAndReadBuffer(), ReleaseBuffer(), spgRedoAddLeaf(), spgRedoAddNode(), spgRedoMoveLeafs(), spgRedoPickSplit(), spgRedoSplitTuple(), spgRedoVacuumLeaf(), spgRedoVacuumRedirect(), spgRedoVacuumRoot(), statapprox_heap(), tts_buffer_heap_clear(), tts_buffer_heap_copyslot(), tts_buffer_heap_materialize(), tts_buffer_heap_store_tuple(), visibilitymap_clear(), visibilitymap_count(), visibilitymap_get_status(), visibilitymap_pin(), visibilitymap_pin_ok(), visibilitymap_prepare_truncate(), visibilitymap_set(), and XLogReadBufferForRedoExtended().

◆ MAX_IO_CONCURRENCY

#define MAX_IO_CONCURRENCY   1000

Definition at line 88 of file bufmgr.h.

◆ P_NEW

◆ RelationGetNumberOfBlocks

Typedef Documentation

◆ Block

typedef void* Block

Definition at line 24 of file bufmgr.h.

◆ BufferAccessStrategyType

◆ PrefetchBufferResult

Enumeration Type Documentation

◆ BufferAccessStrategyType

Enumerator
BAS_NORMAL 
BAS_BULKREAD 
BAS_BULKWRITE 
BAS_VACUUM 

Definition at line 27 of file bufmgr.h.

28 {
29  BAS_NORMAL, /* Normal random access */
30  BAS_BULKREAD, /* Large read-only scan (hint bit updates are
31  * ok) */
32  BAS_BULKWRITE, /* Large multi-block write (e.g. COPY IN) */
33  BAS_VACUUM /* VACUUM */
BufferAccessStrategyType
Definition: bufmgr.h:27

◆ ReadBufferMode

Enumerator
RBM_NORMAL 
RBM_ZERO_AND_LOCK 
RBM_ZERO_AND_CLEANUP_LOCK 
RBM_ZERO_ON_ERROR 
RBM_NORMAL_NO_LOG 

Definition at line 37 of file bufmgr.h.

38 {
39  RBM_NORMAL, /* Normal read */
40  RBM_ZERO_AND_LOCK, /* Don't read from disk, caller will
41  * initialize. Also locks the page. */
42  RBM_ZERO_AND_CLEANUP_LOCK, /* Like RBM_ZERO_AND_LOCK, but locks the page
43  * in "cleanup" mode */
44  RBM_ZERO_ON_ERROR, /* Read, but return an all-zeros page on error */
45  RBM_NORMAL_NO_LOG /* Don't log page as invalid during WAL
46  * replay; otherwise same as RBM_NORMAL */
ReadBufferMode
Definition: bufmgr.h:37

Function Documentation

◆ AbortBufferIO()

void AbortBufferIO ( void  )

Definition at line 4429 of file bufmgr.c.

References Assert, buftag::blockNum, BM_DIRTY, BM_IO_ERROR, BM_IO_IN_PROGRESS, BM_VALID, buf, BufferDescriptorGetIOLock, ereport, errcode(), errdetail(), errmsg(), buftag::forkNum, InProgressBuf, IsForInput, LockBufHdr(), LW_EXCLUSIVE, LWLockAcquire(), pfree(), relpathperm, buftag::rnode, BufferDesc::tag, TerminateBufferIO(), UnlockBufHdr, and WARNING.

Referenced by AbortSubTransaction(), AbortTransaction(), AtProcExit_Buffers(), AutoVacLauncherMain(), BackgroundWriterMain(), CheckpointerMain(), and WalWriterMain().

4430 {
4432 
4433  if (buf)
4434  {
4435  uint32 buf_state;
4436 
4437  /*
4438  * Since LWLockReleaseAll has already been called, we're not holding
4439  * the buffer's io_in_progress_lock. We have to re-acquire it so that
4440  * we can use TerminateBufferIO. Anyone who's executing WaitIO on the
4441  * buffer will be in a busy spin until we succeed in doing this.
4442  */
4444 
4445  buf_state = LockBufHdr(buf);
4446  Assert(buf_state & BM_IO_IN_PROGRESS);
4447  if (IsForInput)
4448  {
4449  Assert(!(buf_state & BM_DIRTY));
4450 
4451  /* We'd better not think buffer is valid yet */
4452  Assert(!(buf_state & BM_VALID));
4453  UnlockBufHdr(buf, buf_state);
4454  }
4455  else
4456  {
4457  Assert(buf_state & BM_DIRTY);
4458  UnlockBufHdr(buf, buf_state);
4459  /* Issue notice if this is not the first failure... */
4460  if (buf_state & BM_IO_ERROR)
4461  {
4462  /* Buffer is pinned, so we can read tag without spinlock */
4463  char *path;
4464 
4465  path = relpathperm(buf->tag.rnode, buf->tag.forkNum);
4466  ereport(WARNING,
4467  (errcode(ERRCODE_IO_ERROR),
4468  errmsg("could not write block %u of %s",
4469  buf->tag.blockNum, path),
4470  errdetail("Multiple failures --- write error might be permanent.")));
4471  pfree(path);
4472  }
4473  }
4474  TerminateBufferIO(buf, false, BM_IO_ERROR);
4475  }
4476 }
#define relpathperm(rnode, forknum)
Definition: relpath.h:83
ForkNumber forkNum
Definition: buf_internals.h:93
int errcode(int sqlerrcode)
Definition: elog.c:694
#define BM_DIRTY
Definition: buf_internals.h:58
#define BufferDescriptorGetIOLock(bdesc)
static BufferDesc * InProgressBuf
Definition: bufmgr.c:161
void pfree(void *pointer)
Definition: mcxt.c:1057
static char * buf
Definition: pg_test_fsync.c:68
int errdetail(const char *fmt,...)
Definition: elog.c:1038
unsigned int uint32
Definition: c.h:441
static bool IsForInput
Definition: bufmgr.c:162
#define WARNING
Definition: elog.h:40
static void TerminateBufferIO(BufferDesc *buf, bool clear_dirty, uint32 set_flag_bits)
Definition: bufmgr.c:4397
#define BM_VALID
Definition: buf_internals.h:59
#define ereport(elevel,...)
Definition: elog.h:155
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4547
#define Assert(condition)
Definition: c.h:804
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1206
BlockNumber blockNum
Definition: buf_internals.h:94
RelFileNode rnode
Definition: buf_internals.h:92
#define BM_IO_ERROR
Definition: buf_internals.h:62
BufferTag tag
int errmsg(const char *fmt,...)
Definition: elog.c:905
#define UnlockBufHdr(desc, s)
#define BM_IO_IN_PROGRESS
Definition: buf_internals.h:61

◆ AtEOXact_Buffers()

void AtEOXact_Buffers ( bool  isCommit)

Definition at line 2492 of file bufmgr.c.

References Assert, AtEOXact_LocalBuffers(), CheckForBufferLeaks(), and PrivateRefCountOverflowed.

Referenced by AbortTransaction(), AutoVacLauncherMain(), BackgroundWriterMain(), CheckpointerMain(), CommitTransaction(), PrepareTransaction(), and WalWriterMain().

2493 {
2495 
2496  AtEOXact_LocalBuffers(isCommit);
2497 
2499 }
static int32 PrivateRefCountOverflowed
Definition: bufmgr.c:199
#define Assert(condition)
Definition: c.h:804
static void CheckForBufferLeaks(void)
Definition: bufmgr.c:2566
void AtEOXact_LocalBuffers(bool isCommit)
Definition: localbuf.c:577

◆ AtProcExit_LocalBuffers()

void AtProcExit_LocalBuffers ( void  )

Definition at line 588 of file localbuf.c.

References CheckForLocalBufferLeaks().

Referenced by AtProcExit_Buffers().

589 {
590  /*
591  * We shouldn't be holding any remaining pins; if we are, and assertions
592  * aren't enabled, we'll fail later in DropRelFileNodeBuffers while trying
593  * to drop the temp rels.
594  */
596 }
static void CheckForLocalBufferLeaks(void)
Definition: localbuf.c:548

◆ BgBufferSync()

bool BgBufferSync ( struct WritebackContext wb_context)

Definition at line 2122 of file bufmgr.c.

References Assert, bgwriter_lru_maxpages, bgwriter_lru_multiplier, BgWriterDelay, BgWriterStats, BUF_REUSABLE, BUF_WRITTEN, CurrentResourceOwner, DEBUG1, DEBUG2, elog, PgStat_MsgBgWriter::m_buf_alloc, PgStat_MsgBgWriter::m_buf_written_clean, PgStat_MsgBgWriter::m_maxwritten_clean, NBuffers, ResourceOwnerEnlargeBuffers(), StrategySyncStart(), and SyncOneBuffer().

Referenced by BackgroundWriterMain().

2123 {
2124  /* info obtained from freelist.c */
2125  int strategy_buf_id;
2126  uint32 strategy_passes;
2127  uint32 recent_alloc;
2128 
2129  /*
2130  * Information saved between calls so we can determine the strategy
2131  * point's advance rate and avoid scanning already-cleaned buffers.
2132  */
2133  static bool saved_info_valid = false;
2134  static int prev_strategy_buf_id;
2135  static uint32 prev_strategy_passes;
2136  static int next_to_clean;
2137  static uint32 next_passes;
2138 
2139  /* Moving averages of allocation rate and clean-buffer density */
2140  static float smoothed_alloc = 0;
2141  static float smoothed_density = 10.0;
2142 
2143  /* Potentially these could be tunables, but for now, not */
2144  float smoothing_samples = 16;
2145  float scan_whole_pool_milliseconds = 120000.0;
2146 
2147  /* Used to compute how far we scan ahead */
2148  long strategy_delta;
2149  int bufs_to_lap;
2150  int bufs_ahead;
2151  float scans_per_alloc;
2152  int reusable_buffers_est;
2153  int upcoming_alloc_est;
2154  int min_scan_buffers;
2155 
2156  /* Variables for the scanning loop proper */
2157  int num_to_scan;
2158  int num_written;
2159  int reusable_buffers;
2160 
2161  /* Variables for final smoothed_density update */
2162  long new_strategy_delta;
2163  uint32 new_recent_alloc;
2164 
2165  /*
2166  * Find out where the freelist clock sweep currently is, and how many
2167  * buffer allocations have happened since our last call.
2168  */
2169  strategy_buf_id = StrategySyncStart(&strategy_passes, &recent_alloc);
2170 
2171  /* Report buffer alloc counts to pgstat */
2172  BgWriterStats.m_buf_alloc += recent_alloc;
2173 
2174  /*
2175  * If we're not running the LRU scan, just stop after doing the stats
2176  * stuff. We mark the saved state invalid so that we can recover sanely
2177  * if LRU scan is turned back on later.
2178  */
2179  if (bgwriter_lru_maxpages <= 0)
2180  {
2181  saved_info_valid = false;
2182  return true;
2183  }
2184 
2185  /*
2186  * Compute strategy_delta = how many buffers have been scanned by the
2187  * clock sweep since last time. If first time through, assume none. Then
2188  * see if we are still ahead of the clock sweep, and if so, how many
2189  * buffers we could scan before we'd catch up with it and "lap" it. Note:
2190  * weird-looking coding of xxx_passes comparisons are to avoid bogus
2191  * behavior when the passes counts wrap around.
2192  */
2193  if (saved_info_valid)
2194  {
2195  int32 passes_delta = strategy_passes - prev_strategy_passes;
2196 
2197  strategy_delta = strategy_buf_id - prev_strategy_buf_id;
2198  strategy_delta += (long) passes_delta * NBuffers;
2199 
2200  Assert(strategy_delta >= 0);
2201 
2202  if ((int32) (next_passes - strategy_passes) > 0)
2203  {
2204  /* we're one pass ahead of the strategy point */
2205  bufs_to_lap = strategy_buf_id - next_to_clean;
2206 #ifdef BGW_DEBUG
2207  elog(DEBUG2, "bgwriter ahead: bgw %u-%u strategy %u-%u delta=%ld lap=%d",
2208  next_passes, next_to_clean,
2209  strategy_passes, strategy_buf_id,
2210  strategy_delta, bufs_to_lap);
2211 #endif
2212  }
2213  else if (next_passes == strategy_passes &&
2214  next_to_clean >= strategy_buf_id)
2215  {
2216  /* on same pass, but ahead or at least not behind */
2217  bufs_to_lap = NBuffers - (next_to_clean - strategy_buf_id);
2218 #ifdef BGW_DEBUG
2219  elog(DEBUG2, "bgwriter ahead: bgw %u-%u strategy %u-%u delta=%ld lap=%d",
2220  next_passes, next_to_clean,
2221  strategy_passes, strategy_buf_id,
2222  strategy_delta, bufs_to_lap);
2223 #endif
2224  }
2225  else
2226  {
2227  /*
2228  * We're behind, so skip forward to the strategy point and start
2229  * cleaning from there.
2230  */
2231 #ifdef BGW_DEBUG
2232  elog(DEBUG2, "bgwriter behind: bgw %u-%u strategy %u-%u delta=%ld",
2233  next_passes, next_to_clean,
2234  strategy_passes, strategy_buf_id,
2235  strategy_delta);
2236 #endif
2237  next_to_clean = strategy_buf_id;
2238  next_passes = strategy_passes;
2239  bufs_to_lap = NBuffers;
2240  }
2241  }
2242  else
2243  {
2244  /*
2245  * Initializing at startup or after LRU scanning had been off. Always
2246  * start at the strategy point.
2247  */
2248 #ifdef BGW_DEBUG
2249  elog(DEBUG2, "bgwriter initializing: strategy %u-%u",
2250  strategy_passes, strategy_buf_id);
2251 #endif
2252  strategy_delta = 0;
2253  next_to_clean = strategy_buf_id;
2254  next_passes = strategy_passes;
2255  bufs_to_lap = NBuffers;
2256  }
2257 
2258  /* Update saved info for next time */
2259  prev_strategy_buf_id = strategy_buf_id;
2260  prev_strategy_passes = strategy_passes;
2261  saved_info_valid = true;
2262 
2263  /*
2264  * Compute how many buffers had to be scanned for each new allocation, ie,
2265  * 1/density of reusable buffers, and track a moving average of that.
2266  *
2267  * If the strategy point didn't move, we don't update the density estimate
2268  */
2269  if (strategy_delta > 0 && recent_alloc > 0)
2270  {
2271  scans_per_alloc = (float) strategy_delta / (float) recent_alloc;
2272  smoothed_density += (scans_per_alloc - smoothed_density) /
2273  smoothing_samples;
2274  }
2275 
2276  /*
2277  * Estimate how many reusable buffers there are between the current
2278  * strategy point and where we've scanned ahead to, based on the smoothed
2279  * density estimate.
2280  */
2281  bufs_ahead = NBuffers - bufs_to_lap;
2282  reusable_buffers_est = (float) bufs_ahead / smoothed_density;
2283 
2284  /*
2285  * Track a moving average of recent buffer allocations. Here, rather than
2286  * a true average we want a fast-attack, slow-decline behavior: we
2287  * immediately follow any increase.
2288  */
2289  if (smoothed_alloc <= (float) recent_alloc)
2290  smoothed_alloc = recent_alloc;
2291  else
2292  smoothed_alloc += ((float) recent_alloc - smoothed_alloc) /
2293  smoothing_samples;
2294 
2295  /* Scale the estimate by a GUC to allow more aggressive tuning. */
2296  upcoming_alloc_est = (int) (smoothed_alloc * bgwriter_lru_multiplier);
2297 
2298  /*
2299  * If recent_alloc remains at zero for many cycles, smoothed_alloc will
2300  * eventually underflow to zero, and the underflows produce annoying
2301  * kernel warnings on some platforms. Once upcoming_alloc_est has gone to
2302  * zero, there's no point in tracking smaller and smaller values of
2303  * smoothed_alloc, so just reset it to exactly zero to avoid this
2304  * syndrome. It will pop back up as soon as recent_alloc increases.
2305  */
2306  if (upcoming_alloc_est == 0)
2307  smoothed_alloc = 0;
2308 
2309  /*
2310  * Even in cases where there's been little or no buffer allocation
2311  * activity, we want to make a small amount of progress through the buffer
2312  * cache so that as many reusable buffers as possible are clean after an
2313  * idle period.
2314  *
2315  * (scan_whole_pool_milliseconds / BgWriterDelay) computes how many times
2316  * the BGW will be called during the scan_whole_pool time; slice the
2317  * buffer pool into that many sections.
2318  */
2319  min_scan_buffers = (int) (NBuffers / (scan_whole_pool_milliseconds / BgWriterDelay));
2320 
2321  if (upcoming_alloc_est < (min_scan_buffers + reusable_buffers_est))
2322  {
2323 #ifdef BGW_DEBUG
2324  elog(DEBUG2, "bgwriter: alloc_est=%d too small, using min=%d + reusable_est=%d",
2325  upcoming_alloc_est, min_scan_buffers, reusable_buffers_est);
2326 #endif
2327  upcoming_alloc_est = min_scan_buffers + reusable_buffers_est;
2328  }
2329 
2330  /*
2331  * Now write out dirty reusable buffers, working forward from the
2332  * next_to_clean point, until we have lapped the strategy scan, or cleaned
2333  * enough buffers to match our estimate of the next cycle's allocation
2334  * requirements, or hit the bgwriter_lru_maxpages limit.
2335  */
2336 
2337  /* Make sure we can handle the pin inside SyncOneBuffer */
2339 
2340  num_to_scan = bufs_to_lap;
2341  num_written = 0;
2342  reusable_buffers = reusable_buffers_est;
2343 
2344  /* Execute the LRU scan */
2345  while (num_to_scan > 0 && reusable_buffers < upcoming_alloc_est)
2346  {
2347  int sync_state = SyncOneBuffer(next_to_clean, true,
2348  wb_context);
2349 
2350  if (++next_to_clean >= NBuffers)
2351  {
2352  next_to_clean = 0;
2353  next_passes++;
2354  }
2355  num_to_scan--;
2356 
2357  if (sync_state & BUF_WRITTEN)
2358  {
2359  reusable_buffers++;
2360  if (++num_written >= bgwriter_lru_maxpages)
2361  {
2363  break;
2364  }
2365  }
2366  else if (sync_state & BUF_REUSABLE)
2367  reusable_buffers++;
2368  }
2369 
2370  BgWriterStats.m_buf_written_clean += num_written;
2371 
2372 #ifdef BGW_DEBUG
2373  elog(DEBUG1, "bgwriter: recent_alloc=%u smoothed=%.2f delta=%ld ahead=%d density=%.2f reusable_est=%d upcoming_est=%d scanned=%d wrote=%d reusable=%d",
2374  recent_alloc, smoothed_alloc, strategy_delta, bufs_ahead,
2375  smoothed_density, reusable_buffers_est, upcoming_alloc_est,
2376  bufs_to_lap - num_to_scan,
2377  num_written,
2378  reusable_buffers - reusable_buffers_est);
2379 #endif
2380 
2381  /*
2382  * Consider the above scan as being like a new allocation scan.
2383  * Characterize its density and update the smoothed one based on it. This
2384  * effectively halves the moving average period in cases where both the
2385  * strategy and the background writer are doing some useful scanning,
2386  * which is helpful because a long memory isn't as desirable on the
2387  * density estimates.
2388  */
2389  new_strategy_delta = bufs_to_lap - num_to_scan;
2390  new_recent_alloc = reusable_buffers - reusable_buffers_est;
2391  if (new_strategy_delta > 0 && new_recent_alloc > 0)
2392  {
2393  scans_per_alloc = (float) new_strategy_delta / (float) new_recent_alloc;
2394  smoothed_density += (scans_per_alloc - smoothed_density) /
2395  smoothing_samples;
2396 
2397 #ifdef BGW_DEBUG
2398  elog(DEBUG2, "bgwriter: cleaner density alloc=%u scan=%ld density=%.2f new smoothed=%.2f",
2399  new_recent_alloc, new_strategy_delta,
2400  scans_per_alloc, smoothed_density);
2401 #endif
2402  }
2403 
2404  /* Return true if OK to hibernate */
2405  return (bufs_to_lap == 0 && recent_alloc == 0);
2406 }
PgStat_Counter m_buf_alloc
Definition: pgstat.h:461
int StrategySyncStart(uint32 *complete_passes, uint32 *num_buf_alloc)
Definition: freelist.c:395
#define DEBUG1
Definition: elog.h:25
int BgWriterDelay
Definition: bgwriter.c:64
ResourceOwner CurrentResourceOwner
Definition: resowner.c:144
PgStat_Counter m_maxwritten_clean
Definition: pgstat.h:458
PgStat_Counter m_buf_written_clean
Definition: pgstat.h:457
PgStat_MsgBgWriter BgWriterStats
Definition: pgstat.c:144
double bgwriter_lru_multiplier
Definition: bufmgr.c:134
static int SyncOneBuffer(int buf_id, bool skip_recently_used, WritebackContext *wb_context)
Definition: bufmgr.c:2425
signed int int32
Definition: c.h:429
#define BUF_REUSABLE
Definition: bufmgr.c:69
int bgwriter_lru_maxpages
Definition: bufmgr.c:133
#define DEBUG2
Definition: elog.h:24
unsigned int uint32
Definition: c.h:441
#define BUF_WRITTEN
Definition: bufmgr.c:68
void ResourceOwnerEnlargeBuffers(ResourceOwner owner)
Definition: resowner.c:930
#define Assert(condition)
Definition: c.h:804
#define elog(elevel,...)
Definition: elog.h:227
int NBuffers
Definition: globals.c:133

◆ BufferGetBlockNumber()

BlockNumber BufferGetBlockNumber ( Buffer  buffer)

Definition at line 2674 of file bufmgr.c.

References Assert, buftag::blockNum, BufferIsLocal, BufferIsPinned, GetBufferDescriptor, GetLocalBufferDescriptor, and BufferDesc::tag.

Referenced by _bt_check_unique(), _bt_checkpage(), _bt_delitems_delete(), _bt_delitems_vacuum(), _bt_doinsert(), _bt_endpoint(), _bt_finish_split(), _bt_first(), _bt_getroot(), _bt_insert_parent(), _bt_insertonpg(), _bt_mark_page_halfdead(), _bt_moveright(), _bt_newroot(), _bt_pagedel(), _bt_readnextpage(), _bt_readpage(), _bt_restore_meta(), _bt_search(), _bt_split(), _bt_unlink_halfdead_page(), _bt_walk_left(), _hash_addovflpage(), _hash_checkpage(), _hash_doinsert(), _hash_first(), _hash_freeovflpage(), _hash_getnewbuf(), _hash_readnext(), _hash_readpage(), _hash_splitbucket(), allocNewBuffer(), blinsert(), BloomInitMetapage(), brin_doinsert(), brin_doupdate(), brin_getinsertbuffer(), brin_initialize_empty_new_buffer(), brin_page_cleanup(), brin_xlog_insert_update(), brinbuild(), brinGetTupleForHeapBlock(), createPostingTree(), dataBeginPlaceToPageLeaf(), dataPrepareDownlink(), doPickSplit(), entryPrepareDownlink(), fill_seq_with_data(), ginEntryInsert(), ginFindParents(), ginFinishSplit(), ginPlaceToPage(), ginRedoDeleteListPages(), ginRedoUpdateMetapage(), ginScanToDelete(), gistbufferinginserttuples(), gistbuild(), gistcheckpage(), gistdeletepage(), gistformdownlink(), gistinserttuples(), gistMemorizeAllDownlinks(), gistplacetopage(), gistRelocateBuildBuffersOnSplit(), gistScanPage(), hash_xlog_add_ovfl_page(), heap_delete(), heap_hot_search_buffer(), heap_insert(), heap_multi_insert(), heap_page_is_all_visible(), heap_prune_chain(), heap_update(), heap_xlog_confirm(), heap_xlog_lock(), makeSublist(), moveLeafs(), moveRightIfItNeeded(), pgstathashindex(), ReadBufferBI(), RelationAddExtraBlocks(), RelationGetBufferForTuple(), RelationPutHeapTuple(), revmap_get_buffer(), revmap_physical_extend(), spgAddNodeAction(), spgbuild(), spgdoinsert(), SpGistSetLastUsedPage(), spgSplitNodeAction(), spgWalk(), startScanEntry(), terminate_brin_buildstate(), vacuumLeafPage(), visibilitymap_clear(), visibilitymap_get_status(), visibilitymap_pin(), visibilitymap_pin_ok(), visibilitymap_set(), and XLogReadBufferExtended().

2675 {
2676  BufferDesc *bufHdr;
2677 
2678  Assert(BufferIsPinned(buffer));
2679 
2680  if (BufferIsLocal(buffer))
2681  bufHdr = GetLocalBufferDescriptor(-buffer - 1);
2682  else
2683  bufHdr = GetBufferDescriptor(buffer - 1);
2684 
2685  /* pinned, so OK to read tag without spinlock */
2686  return bufHdr->tag.blockNum;
2687 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:448
#define GetLocalBufferDescriptor(id)
#define GetBufferDescriptor(id)
#define Assert(condition)
Definition: c.h:804
#define BufferIsLocal(buffer)
Definition: buf.h:37
BlockNumber blockNum
Definition: buf_internals.h:94
BufferTag tag

◆ BufferGetLSNAtomic()

XLogRecPtr BufferGetLSNAtomic ( Buffer  buffer)

Definition at line 2937 of file bufmgr.c.

References Assert, BufferGetPage, BufferIsLocal, BufferIsPinned, BufferIsValid, GetBufferDescriptor, LockBufHdr(), PageGetLSN, UnlockBufHdr, and XLogHintBitIsNeeded.

Referenced by _bt_killitems(), _bt_readpage(), gistdoinsert(), gistFindPath(), gistkillitems(), gistScanPage(), SetHintBits(), and XLogSaveBufferForHint().

2938 {
2939  BufferDesc *bufHdr = GetBufferDescriptor(buffer - 1);
2940  char *page = BufferGetPage(buffer);
2941  XLogRecPtr lsn;
2942  uint32 buf_state;
2943 
2944  /*
2945  * If we don't need locking for correctness, fastpath out.
2946  */
2947  if (!XLogHintBitIsNeeded() || BufferIsLocal(buffer))
2948  return PageGetLSN(page);
2949 
2950  /* Make sure we've got a real buffer, and that we hold a pin on it. */
2951  Assert(BufferIsValid(buffer));
2952  Assert(BufferIsPinned(buffer));
2953 
2954  buf_state = LockBufHdr(bufHdr);
2955  lsn = PageGetLSN(page);
2956  UnlockBufHdr(bufHdr, buf_state);
2957 
2958  return lsn;
2959 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:448
#define GetBufferDescriptor(id)
unsigned int uint32
Definition: c.h:441
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4547
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:804
#define BufferIsLocal(buffer)
Definition: buf.h:37
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
#define PageGetLSN(page)
Definition: bufpage.h:366
#define UnlockBufHdr(desc, s)
#define XLogHintBitIsNeeded()
Definition: xlog.h:202

◆ BufferGetTag()

void BufferGetTag ( Buffer  buffer,
RelFileNode rnode,
ForkNumber forknum,
BlockNumber blknum 
)

Definition at line 2695 of file bufmgr.c.

References Assert, buftag::blockNum, BufferIsLocal, BufferIsPinned, buftag::forkNum, GetBufferDescriptor, GetLocalBufferDescriptor, buftag::rnode, and BufferDesc::tag.

Referenced by fsm_search_avail(), ginRedoInsertEntry(), log_newpage_buffer(), ResolveCminCmaxDuringDecoding(), XLogRegisterBuffer(), and XLogSaveBufferForHint().

2697 {
2698  BufferDesc *bufHdr;
2699 
2700  /* Do the same checks as BufferGetBlockNumber. */
2701  Assert(BufferIsPinned(buffer));
2702 
2703  if (BufferIsLocal(buffer))
2704  bufHdr = GetLocalBufferDescriptor(-buffer - 1);
2705  else
2706  bufHdr = GetBufferDescriptor(buffer - 1);
2707 
2708  /* pinned, so OK to read tag without spinlock */
2709  *rnode = bufHdr->tag.rnode;
2710  *forknum = bufHdr->tag.forkNum;
2711  *blknum = bufHdr->tag.blockNum;
2712 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:448
ForkNumber forkNum
Definition: buf_internals.h:93
#define GetLocalBufferDescriptor(id)
#define GetBufferDescriptor(id)
#define Assert(condition)
Definition: c.h:804
#define BufferIsLocal(buffer)
Definition: buf.h:37
BlockNumber blockNum
Definition: buf_internals.h:94
RelFileNode rnode
Definition: buf_internals.h:92
BufferTag tag

◆ BufferIsPermanent()

bool BufferIsPermanent ( Buffer  buffer)

Definition at line 2907 of file bufmgr.c.

References Assert, BM_PERMANENT, BufferIsLocal, BufferIsPinned, BufferIsValid, GetBufferDescriptor, pg_atomic_read_u32(), and BufferDesc::state.

Referenced by SetHintBits().

2908 {
2909  BufferDesc *bufHdr;
2910 
2911  /* Local buffers are used only for temp relations. */
2912  if (BufferIsLocal(buffer))
2913  return false;
2914 
2915  /* Make sure we've got a real buffer, and that we hold a pin on it. */
2916  Assert(BufferIsValid(buffer));
2917  Assert(BufferIsPinned(buffer));
2918 
2919  /*
2920  * BM_PERMANENT can't be changed while we hold a pin on the buffer, so we
2921  * need not bother with the buffer header spinlock. Even if someone else
2922  * changes the buffer header state while we're doing this, the state is
2923  * changed atomically, so we'll read the old value or the new value, but
2924  * not random garbage.
2925  */
2926  bufHdr = GetBufferDescriptor(buffer - 1);
2927  return (pg_atomic_read_u32(&bufHdr->state) & BM_PERMANENT) != 0;
2928 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:448
#define BM_PERMANENT
Definition: buf_internals.h:66
#define GetBufferDescriptor(id)
#define Assert(condition)
Definition: c.h:804
#define BufferIsLocal(buffer)
Definition: buf.h:37
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
pg_atomic_uint32 state
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:241

◆ BufferShmemSize()

Size BufferShmemSize ( void  )

Definition at line 157 of file buf_init.c.

References add_size(), mul_size(), NBuffers, PG_CACHE_LINE_SIZE, and StrategyShmemSize().

Referenced by CreateSharedMemoryAndSemaphores().

158 {
159  Size size = 0;
160 
161  /* size of buffer descriptors */
162  size = add_size(size, mul_size(NBuffers, sizeof(BufferDescPadded)));
163  /* to allow aligning buffer descriptors */
164  size = add_size(size, PG_CACHE_LINE_SIZE);
165 
166  /* size of data pages */
167  size = add_size(size, mul_size(NBuffers, BLCKSZ));
168 
169  /* size of stuff controlled by freelist.c */
170  size = add_size(size, StrategyShmemSize());
171 
172  /*
173  * It would be nice to include the I/O locks in the BufferDesc, but that
174  * would increase the size of a BufferDesc to more than one cache line,
175  * and benchmarking has shown that keeping every BufferDesc aligned on a
176  * cache line boundary is important for performance. So, instead, the
177  * array of I/O locks is allocated in a separate tranche. Because those
178  * locks are not highly contended, we lay out the array with minimal
179  * padding.
180  */
181  size = add_size(size, mul_size(NBuffers, sizeof(LWLockMinimallyPadded)));
182  /* to allow aligning the above */
183  size = add_size(size, PG_CACHE_LINE_SIZE);
184 
185  /* size of checkpoint sort array in bufmgr.c */
186  size = add_size(size, mul_size(NBuffers, sizeof(CkptSortItem)));
187 
188  return size;
189 }
#define PG_CACHE_LINE_SIZE
Size mul_size(Size s1, Size s2)
Definition: shmem.c:519
Size add_size(Size s1, Size s2)
Definition: shmem.c:502
size_t Size
Definition: c.h:540
int NBuffers
Definition: globals.c:133
Size StrategyShmemSize(void)
Definition: freelist.c:454

◆ BufmgrCommit()

void BufmgrCommit ( void  )

Definition at line 2660 of file bufmgr.c.

Referenced by PrepareTransaction(), and RecordTransactionCommit().

2661 {
2662  /* Nothing to do in bufmgr anymore... */
2663 }

◆ CheckPointBuffers()

void CheckPointBuffers ( int  flags)

Definition at line 2650 of file bufmgr.c.

References BufferSync().

Referenced by CheckPointGuts().

2651 {
2652  BufferSync(flags);
2653 }
static void BufferSync(int flags)
Definition: bufmgr.c:1845

◆ ConditionalLockBuffer()

bool ConditionalLockBuffer ( Buffer  buffer)

Definition at line 3965 of file bufmgr.c.

References Assert, buf, BufferDescriptorGetContentLock, BufferIsLocal, BufferIsPinned, GetBufferDescriptor, LW_EXCLUSIVE, and LWLockConditionalAcquire().

Referenced by _bt_conditionallockbuf(), BloomNewBuffer(), ConditionalLockBufferForCleanup(), GinNewBuffer(), gistNewBuffer(), RelationGetBufferForTuple(), spgdoinsert(), SpGistGetBuffer(), SpGistNewBuffer(), and SpGistUpdateMetaPage().

3966 {
3967  BufferDesc *buf;
3968 
3969  Assert(BufferIsPinned(buffer));
3970  if (BufferIsLocal(buffer))
3971  return true; /* act as though we got it */
3972 
3973  buf = GetBufferDescriptor(buffer - 1);
3974 
3976  LW_EXCLUSIVE);
3977 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:448
bool LWLockConditionalAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1378
static char * buf
Definition: pg_test_fsync.c:68
#define GetBufferDescriptor(id)
#define BufferDescriptorGetContentLock(bdesc)
#define Assert(condition)
Definition: c.h:804
#define BufferIsLocal(buffer)
Definition: buf.h:37

◆ ConditionalLockBufferForCleanup()

bool ConditionalLockBufferForCleanup ( Buffer  buffer)

Definition at line 4173 of file bufmgr.c.

References Assert, BUF_STATE_GET_REFCOUNT, BUFFER_LOCK_UNLOCK, BufferIsLocal, BufferIsValid, ConditionalLockBuffer(), GetBufferDescriptor, GetPrivateRefCount(), LocalRefCount, LockBuffer(), LockBufHdr(), PrivateRefCountEntry::refcount, and UnlockBufHdr.

Referenced by _hash_finish_split(), _hash_getbuf_with_condlock_cleanup(), heap_page_prune_opt(), lazy_scan_heap(), and lazy_vacuum_heap().

4174 {
4175  BufferDesc *bufHdr;
4176  uint32 buf_state,
4177  refcount;
4178 
4179  Assert(BufferIsValid(buffer));
4180 
4181  if (BufferIsLocal(buffer))
4182  {
4183  refcount = LocalRefCount[-buffer - 1];
4184  /* There should be exactly one pin */
4185  Assert(refcount > 0);
4186  if (refcount != 1)
4187  return false;
4188  /* Nobody else to wait for */
4189  return true;
4190  }
4191 
4192  /* There should be exactly one local pin */
4193  refcount = GetPrivateRefCount(buffer);
4194  Assert(refcount);
4195  if (refcount != 1)
4196  return false;
4197 
4198  /* Try to acquire lock */
4199  if (!ConditionalLockBuffer(buffer))
4200  return false;
4201 
4202  bufHdr = GetBufferDescriptor(buffer - 1);
4203  buf_state = LockBufHdr(bufHdr);
4204  refcount = BUF_STATE_GET_REFCOUNT(buf_state);
4205 
4206  Assert(refcount > 0);
4207  if (refcount == 1)
4208  {
4209  /* Successfully acquired exclusive lock with pincount 1 */
4210  UnlockBufHdr(bufHdr, buf_state);
4211  return true;
4212  }
4213 
4214  /* Failed, so release the lock */
4215  UnlockBufHdr(bufHdr, buf_state);
4216  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
4217  return false;
4218 }
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
#define GetBufferDescriptor(id)
static int32 GetPrivateRefCount(Buffer buffer)
Definition: bufmgr.c:387
unsigned int uint32
Definition: c.h:441
bool ConditionalLockBuffer(Buffer buffer)
Definition: bufmgr.c:3965
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3939
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4547
#define Assert(condition)
Definition: c.h:804
#define BufferIsLocal(buffer)
Definition: buf.h:37
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
#define UnlockBufHdr(desc, s)
int32 * LocalRefCount
Definition: localbuf.c:45
#define BUF_STATE_GET_REFCOUNT(state)
Definition: buf_internals.h:48

◆ DropDatabaseBuffers()

void DropDatabaseBuffers ( Oid  dbid)

Definition at line 3337 of file bufmgr.c.

References buftag::blockNum, buf, BufferDescriptorGetBuffer, RelFileNode::dbNode, elog, buftag::forkNum, BufferDesc::freeNext, GetBufferDescriptor, GetPrivateRefCount(), i, InvalidateBuffer(), InvalidBackendId, LockBufHdr(), LOG, NBuffers, relpathbackend, relpathperm, buftag::rnode, BufferDesc::tag, and UnlockBufHdr.

Referenced by dbase_redo(), dropdb(), and movedb().

3338 {
3339  int i;
3340 
3341  /*
3342  * We needn't consider local buffers, since by assumption the target
3343  * database isn't our own.
3344  */
3345 
3346  for (i = 0; i < NBuffers; i++)
3347  {
3348  BufferDesc *bufHdr = GetBufferDescriptor(i);
3349  uint32 buf_state;
3350 
3351  /*
3352  * As in DropRelFileNodeBuffers, an unlocked precheck should be safe
3353  * and saves some cycles.
3354  */
3355  if (bufHdr->tag.rnode.dbNode != dbid)
3356  continue;
3357 
3358  buf_state = LockBufHdr(bufHdr);
3359  if (bufHdr->tag.rnode.dbNode == dbid)
3360  InvalidateBuffer(bufHdr); /* releases spinlock */
3361  else
3362  UnlockBufHdr(bufHdr, buf_state);
3363  }
3364 }
static void InvalidateBuffer(BufferDesc *buf)
Definition: bufmgr.c:1385
#define GetBufferDescriptor(id)
unsigned int uint32
Definition: c.h:441
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4547
RelFileNode rnode
Definition: buf_internals.h:92
BufferTag tag
#define UnlockBufHdr(desc, s)
int i
int NBuffers
Definition: globals.c:133

◆ DropRelFileNodeBuffers()

void DropRelFileNodeBuffers ( struct SMgrRelationData smgr_reln,
ForkNumber forkNum,
int  nforks,
BlockNumber firstDelBlock 
)

◆ DropRelFileNodesAllBuffers()

void DropRelFileNodesAllBuffers ( struct SMgrRelationData **  smgr_reln,
int  nnodes 
)

◆ FlushDatabaseBuffers()

void FlushDatabaseBuffers ( Oid  dbid)

Definition at line 3637 of file bufmgr.c.

References BM_DIRTY, BM_VALID, BufferDescriptorGetContentLock, CurrentResourceOwner, RelFileNode::dbNode, FlushBuffer(), GetBufferDescriptor, i, LockBufHdr(), LW_SHARED, LWLockAcquire(), LWLockRelease(), NBuffers, PinBuffer_Locked(), ReservePrivateRefCountEntry(), ResourceOwnerEnlargeBuffers(), buftag::rnode, BufferDesc::tag, UnlockBufHdr, and UnpinBuffer().

Referenced by dbase_redo().

3638 {
3639  int i;
3640  BufferDesc *bufHdr;
3641 
3642  /* Make sure we can handle the pin inside the loop */
3644 
3645  for (i = 0; i < NBuffers; i++)
3646  {
3647  uint32 buf_state;
3648 
3649  bufHdr = GetBufferDescriptor(i);
3650 
3651  /*
3652  * As in DropRelFileNodeBuffers, an unlocked precheck should be safe
3653  * and saves some cycles.
3654  */
3655  if (bufHdr->tag.rnode.dbNode != dbid)
3656  continue;
3657 
3659 
3660  buf_state = LockBufHdr(bufHdr);
3661  if (bufHdr->tag.rnode.dbNode == dbid &&
3662  (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
3663  {
3664  PinBuffer_Locked(bufHdr);
3666  FlushBuffer(bufHdr, NULL);
3668  UnpinBuffer(bufHdr, true);
3669  }
3670  else
3671  UnlockBufHdr(bufHdr, buf_state);
3672  }
3673 }
ResourceOwner CurrentResourceOwner
Definition: resowner.c:144
#define BM_DIRTY
Definition: buf_internals.h:58
static void FlushBuffer(BufferDesc *buf, SMgrRelation reln)
Definition: bufmgr.c:2734
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1808
#define GetBufferDescriptor(id)
unsigned int uint32
Definition: c.h:441
static void UnpinBuffer(BufferDesc *buf, bool fixOwner)
Definition: bufmgr.c:1752
void ResourceOwnerEnlargeBuffers(ResourceOwner owner)
Definition: resowner.c:930
#define BM_VALID
Definition: buf_internals.h:59
#define BufferDescriptorGetContentLock(bdesc)
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4547
static void PinBuffer_Locked(BufferDesc *buf)
Definition: bufmgr.c:1707
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1206
RelFileNode rnode
Definition: buf_internals.h:92
BufferTag tag
#define UnlockBufHdr(desc, s)
int i
int NBuffers
Definition: globals.c:133
static void ReservePrivateRefCountEntry(void)
Definition: bufmgr.c:215

◆ FlushOneBuffer()

void FlushOneBuffer ( Buffer  buffer)

Definition at line 3680 of file bufmgr.c.

References Assert, BufferDescriptorGetContentLock, BufferIsLocal, BufferIsPinned, FlushBuffer(), GetBufferDescriptor, and LWLockHeldByMe().

Referenced by hash_xlog_init_bitmap_page(), hash_xlog_init_meta_page(), and XLogReadBufferForRedoExtended().

3681 {
3682  BufferDesc *bufHdr;
3683 
3684  /* currently not needed, but no fundamental reason not to support */
3685  Assert(!BufferIsLocal(buffer));
3686 
3687  Assert(BufferIsPinned(buffer));
3688 
3689  bufHdr = GetBufferDescriptor(buffer - 1);
3690 
3692 
3693  FlushBuffer(bufHdr, NULL);
3694 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:448
bool LWLockHeldByMe(LWLock *l)
Definition: lwlock.c:1924
static void FlushBuffer(BufferDesc *buf, SMgrRelation reln)
Definition: bufmgr.c:2734
#define GetBufferDescriptor(id)
#define BufferDescriptorGetContentLock(bdesc)
#define Assert(condition)
Definition: c.h:804
#define BufferIsLocal(buffer)
Definition: buf.h:37

◆ FlushRelationBuffers()

void FlushRelationBuffers ( Relation  rel)

Definition at line 3441 of file bufmgr.c.

References ErrorContextCallback::arg, buftag::blockNum, BM_DIRTY, BM_JUST_DIRTIED, BM_VALID, BufferDescriptorGetContentLock, ErrorContextCallback::callback, CurrentResourceOwner, error_context_stack, FlushBuffer(), buftag::forkNum, GetBufferDescriptor, GetLocalBufferDescriptor, i, local_buffer_write_error_callback(), LocalBufHdrGetBlock, LockBufHdr(), LW_SHARED, LWLockAcquire(), LWLockRelease(), NBuffers, NLocBuffer, PageSetChecksumInplace(), pg_atomic_read_u32(), pg_atomic_unlocked_write_u32(), PinBuffer_Locked(), ErrorContextCallback::previous, RelationData::rd_node, RelationData::rd_smgr, RelationOpenSmgr, RelationUsesLocalBuffers, RelFileNodeEquals, ReservePrivateRefCountEntry(), ResourceOwnerEnlargeBuffers(), buftag::rnode, smgrwrite(), BufferDesc::state, BufferDesc::tag, UnlockBufHdr, and UnpinBuffer().

Referenced by heapam_relation_copy_data(), and index_copy_data().

3442 {
3443  int i;
3444  BufferDesc *bufHdr;
3445 
3446  /* Open rel at the smgr level if not already done */
3447  RelationOpenSmgr(rel);
3448 
3449  if (RelationUsesLocalBuffers(rel))
3450  {
3451  for (i = 0; i < NLocBuffer; i++)
3452  {
3453  uint32 buf_state;
3454 
3455  bufHdr = GetLocalBufferDescriptor(i);
3456  if (RelFileNodeEquals(bufHdr->tag.rnode, rel->rd_node) &&
3457  ((buf_state = pg_atomic_read_u32(&bufHdr->state)) &
3458  (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
3459  {
3460  ErrorContextCallback errcallback;
3461  Page localpage;
3462 
3463  localpage = (char *) LocalBufHdrGetBlock(bufHdr);
3464 
3465  /* Setup error traceback support for ereport() */
3467  errcallback.arg = (void *) bufHdr;
3468  errcallback.previous = error_context_stack;
3469  error_context_stack = &errcallback;
3470 
3471  PageSetChecksumInplace(localpage, bufHdr->tag.blockNum);
3472 
3473  smgrwrite(rel->rd_smgr,
3474  bufHdr->tag.forkNum,
3475  bufHdr->tag.blockNum,
3476  localpage,
3477  false);
3478 
3479  buf_state &= ~(BM_DIRTY | BM_JUST_DIRTIED);
3480  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
3481 
3482  /* Pop the error context stack */
3483  error_context_stack = errcallback.previous;
3484  }
3485  }
3486 
3487  return;
3488  }
3489 
3490  /* Make sure we can handle the pin inside the loop */
3492 
3493  for (i = 0; i < NBuffers; i++)
3494  {
3495  uint32 buf_state;
3496 
3497  bufHdr = GetBufferDescriptor(i);
3498 
3499  /*
3500  * As in DropRelFileNodeBuffers, an unlocked precheck should be safe
3501  * and saves some cycles.
3502  */
3503  if (!RelFileNodeEquals(bufHdr->tag.rnode, rel->rd_node))
3504  continue;
3505 
3507 
3508  buf_state = LockBufHdr(bufHdr);
3509  if (RelFileNodeEquals(bufHdr->tag.rnode, rel->rd_node) &&
3510  (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
3511  {
3512  PinBuffer_Locked(bufHdr);
3514  FlushBuffer(bufHdr, rel->rd_smgr);
3516  UnpinBuffer(bufHdr, true);
3517  }
3518  else
3519  UnlockBufHdr(bufHdr, buf_state);
3520  }
3521 }
#define LocalBufHdrGetBlock(bufHdr)
Definition: bufmgr.c:64
ForkNumber forkNum
Definition: buf_internals.h:93
static void local_buffer_write_error_callback(void *arg)
Definition: bufmgr.c:4501
ResourceOwner CurrentResourceOwner
Definition: resowner.c:144
struct SMgrRelationData * rd_smgr
Definition: rel.h:57
#define GetLocalBufferDescriptor(id)
#define BM_DIRTY
Definition: buf_internals.h:58
void(* callback)(void *arg)
Definition: elog.h:242
struct ErrorContextCallback * previous
Definition: elog.h:241
static void FlushBuffer(BufferDesc *buf, SMgrRelation reln)
Definition: bufmgr.c:2734
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1808
ErrorContextCallback * error_context_stack
Definition: elog.c:93
#define RelationOpenSmgr(relation)
Definition: rel.h:514
int NLocBuffer
Definition: localbuf.c:41
void smgrwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, char *buffer, bool skipFsync)
Definition: smgr.c:523
#define GetBufferDescriptor(id)
#define BM_JUST_DIRTIED
Definition: buf_internals.h:63
unsigned int uint32
Definition: c.h:441
static void UnpinBuffer(BufferDesc *buf, bool fixOwner)
Definition: bufmgr.c:1752
void ResourceOwnerEnlargeBuffers(ResourceOwner owner)
Definition: resowner.c:930
#define BM_VALID
Definition: buf_internals.h:59
RelFileNode rd_node
Definition: rel.h:55
#define BufferDescriptorGetContentLock(bdesc)
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4547
static void PinBuffer_Locked(BufferDesc *buf)
Definition: bufmgr.c:1707
void PageSetChecksumInplace(Page page, BlockNumber blkno)
Definition: bufpage.c:1422
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1206
BlockNumber blockNum
Definition: buf_internals.h:94
RelFileNode rnode
Definition: buf_internals.h:92
#define RelationUsesLocalBuffers(relation)
Definition: rel.h:573
BufferTag tag
#define UnlockBufHdr(desc, s)
int i
int NBuffers
Definition: globals.c:133
static void pg_atomic_unlocked_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:277
static void ReservePrivateRefCountEntry(void)
Definition: bufmgr.c:215
pg_atomic_uint32 state
Pointer Page
Definition: bufpage.h:78
#define RelFileNodeEquals(node1, node2)
Definition: relfilenode.h:88
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:241

◆ FlushRelationsAllBuffers()

void FlushRelationsAllBuffers ( struct SMgrRelationData **  smgrs,
int  nrels 
)

◆ FreeAccessStrategy()

void FreeAccessStrategy ( BufferAccessStrategy  strategy)

Definition at line 597 of file freelist.c.

References pfree().

Referenced by blgetbitmap(), FreeBulkInsertState(), heap_endscan(), and initscan().

598 {
599  /* don't crash if called on a "default" strategy */
600  if (strategy != NULL)
601  pfree(strategy);
602 }
void pfree(void *pointer)
Definition: mcxt.c:1057

◆ GetAccessStrategy()

BufferAccessStrategy GetAccessStrategy ( BufferAccessStrategyType  btype)

Definition at line 542 of file freelist.c.

References BAS_BULKREAD, BAS_BULKWRITE, BAS_NORMAL, BAS_VACUUM, BufferAccessStrategyData::btype, BufferAccessStrategyData::buffers, elog, ERROR, Min, NBuffers, offsetof, palloc0(), and BufferAccessStrategyData::ring_size.

Referenced by blgetbitmap(), bt_check_every_level(), collect_corrupt_items(), collect_visibility_data(), do_autovacuum(), GetBulkInsertState(), initscan(), pgstat_index(), pgstathashindex(), pgstatindex_impl(), statapprox_heap(), vacuum(), and verify_heapam().

543 {
544  BufferAccessStrategy strategy;
545  int ring_size;
546 
547  /*
548  * Select ring size to use. See buffer/README for rationales.
549  *
550  * Note: if you change the ring size for BAS_BULKREAD, see also
551  * SYNC_SCAN_REPORT_INTERVAL in access/heap/syncscan.c.
552  */
553  switch (btype)
554  {
555  case BAS_NORMAL:
556  /* if someone asks for NORMAL, just give 'em a "default" object */
557  return NULL;
558 
559  case BAS_BULKREAD:
560  ring_size = 256 * 1024 / BLCKSZ;
561  break;
562  case BAS_BULKWRITE:
563  ring_size = 16 * 1024 * 1024 / BLCKSZ;
564  break;
565  case BAS_VACUUM:
566  ring_size = 256 * 1024 / BLCKSZ;
567  break;
568 
569  default:
570  elog(ERROR, "unrecognized buffer access strategy: %d",
571  (int) btype);
572  return NULL; /* keep compiler quiet */
573  }
574 
575  /* Make sure ring isn't an undue fraction of shared buffers */
576  ring_size = Min(NBuffers / 8, ring_size);
577 
578  /* Allocate the object and initialize all elements to zeroes */
579  strategy = (BufferAccessStrategy)
581  ring_size * sizeof(Buffer));
582 
583  /* Set fields that don't start out zero */
584  strategy->btype = btype;
585  strategy->ring_size = ring_size;
586 
587  return strategy;
588 }
struct BufferAccessStrategyData * BufferAccessStrategy
Definition: buf.h:44
#define Min(x, y)
Definition: c.h:986
#define ERROR
Definition: elog.h:45
BufferAccessStrategyType btype
Definition: freelist.c:74
void * palloc0(Size size)
Definition: mcxt.c:981
#define elog(elevel,...)
Definition: elog.h:227
int NBuffers
Definition: globals.c:133
int Buffer
Definition: buf.h:23
#define offsetof(type, field)
Definition: c.h:727

◆ HoldingBufferPinThatDelaysRecovery()

bool HoldingBufferPinThatDelaysRecovery ( void  )

Definition at line 4147 of file bufmgr.c.

References GetPrivateRefCount(), and GetStartupBufferPinWaitBufId().

Referenced by CheckRecoveryConflictDeadlock(), and RecoveryConflictInterrupt().

4148 {
4149  int bufid = GetStartupBufferPinWaitBufId();
4150 
4151  /*
4152  * If we get woken slowly then it's possible that the Startup process was
4153  * already woken by other backends before we got here. Also possible that
4154  * we get here by multiple interrupts or interrupts at inappropriate
4155  * times, so make sure we do nothing if the bufid is not set.
4156  */
4157  if (bufid < 0)
4158  return false;
4159 
4160  if (GetPrivateRefCount(bufid + 1) > 0)
4161  return true;
4162 
4163  return false;
4164 }
static int32 GetPrivateRefCount(Buffer buffer)
Definition: bufmgr.c:387
int GetStartupBufferPinWaitBufId(void)
Definition: proc.c:656

◆ IncrBufferRefCount()

void IncrBufferRefCount ( Buffer  buffer)

Definition at line 3738 of file bufmgr.c.

References Assert, BufferIsLocal, BufferIsPinned, CurrentResourceOwner, GetPrivateRefCountEntry(), LocalRefCount, PrivateRefCountEntry::refcount, ResourceOwnerEnlargeBuffers(), and ResourceOwnerRememberBuffer().

Referenced by _bt_steppage(), btrestrpos(), entryLoadMoreItems(), ReadBufferBI(), scanPostingTree(), startScanEntry(), and tts_buffer_heap_store_tuple().

3739 {
3740  Assert(BufferIsPinned(buffer));
3742  if (BufferIsLocal(buffer))
3743  LocalRefCount[-buffer - 1]++;
3744  else
3745  {
3746  PrivateRefCountEntry *ref;
3747 
3748  ref = GetPrivateRefCountEntry(buffer, true);
3749  Assert(ref != NULL);
3750  ref->refcount++;
3751  }
3753 }
static PrivateRefCountEntry * GetPrivateRefCountEntry(Buffer buffer, bool do_move)
Definition: bufmgr.c:307
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:448
ResourceOwner CurrentResourceOwner
Definition: resowner.c:144
void ResourceOwnerRememberBuffer(ResourceOwner owner, Buffer buffer)
Definition: resowner.c:943
void ResourceOwnerEnlargeBuffers(ResourceOwner owner)
Definition: resowner.c:930
#define Assert(condition)
Definition: c.h:804
#define BufferIsLocal(buffer)
Definition: buf.h:37
int32 * LocalRefCount
Definition: localbuf.c:45

◆ InitBufferPool()

void InitBufferPool ( void  )

Definition at line 67 of file buf_init.c.

References Assert, backend_flush_after, buf, BufferDesc::buf_id, BufferBlocks, BufferDescriptorGetContentLock, BufferDescriptorGetIOLock, CLEAR_BUFFERTAG, BufferDesc::freeNext, FREENEXT_END_OF_LIST, GetBufferDescriptor, i, LWLockInitialize(), LWTRANCHE_BUFFER_CONTENT, LWTRANCHE_BUFFER_IO, NBuffers, pg_atomic_init_u32(), ShmemInitStruct(), BufferDesc::state, StrategyInitialize(), BufferDesc::tag, BufferDesc::wait_backend_pid, and WritebackContextInit().

Referenced by CreateSharedMemoryAndSemaphores().

68 {
69  bool foundBufs,
70  foundDescs,
71  foundIOLocks,
72  foundBufCkpt;
73 
74  /* Align descriptors to a cacheline boundary. */
76  ShmemInitStruct("Buffer Descriptors",
77  NBuffers * sizeof(BufferDescPadded),
78  &foundDescs);
79 
80  BufferBlocks = (char *)
81  ShmemInitStruct("Buffer Blocks",
82  NBuffers * (Size) BLCKSZ, &foundBufs);
83 
84  /* Align lwlocks to cacheline boundary */
86  ShmemInitStruct("Buffer IO Locks",
88  &foundIOLocks);
89 
90  /*
91  * The array used to sort to-be-checkpointed buffer ids is located in
92  * shared memory, to avoid having to allocate significant amounts of
93  * memory at runtime. As that'd be in the middle of a checkpoint, or when
94  * the checkpointer is restarted, memory allocation failures would be
95  * painful.
96  */
98  ShmemInitStruct("Checkpoint BufferIds",
99  NBuffers * sizeof(CkptSortItem), &foundBufCkpt);
100 
101  if (foundDescs || foundBufs || foundIOLocks || foundBufCkpt)
102  {
103  /* should find all of these, or none of them */
104  Assert(foundDescs && foundBufs && foundIOLocks && foundBufCkpt);
105  /* note: this path is only taken in EXEC_BACKEND case */
106  }
107  else
108  {
109  int i;
110 
111  /*
112  * Initialize all the buffer headers.
113  */
114  for (i = 0; i < NBuffers; i++)
115  {
117 
118  CLEAR_BUFFERTAG(buf->tag);
119 
120  pg_atomic_init_u32(&buf->state, 0);
121  buf->wait_backend_pid = 0;
122 
123  buf->buf_id = i;
124 
125  /*
126  * Initially link all the buffers together as unused. Subsequent
127  * management of this list is done by freelist.c.
128  */
129  buf->freeNext = i + 1;
130 
133 
136  }
137 
138  /* Correct last entry of linked list */
139  GetBufferDescriptor(NBuffers - 1)->freeNext = FREENEXT_END_OF_LIST;
140  }
141 
142  /* Init other shared buffer-management stuff */
143  StrategyInitialize(!foundDescs);
144 
145  /* Initialize per-backend file flush context */
148 }
#define FREENEXT_END_OF_LIST
LWLockMinimallyPadded * BufferIOLWLockArray
Definition: buf_init.c:22
int wait_backend_pid
int backend_flush_after
Definition: bufmgr.c:158
#define BufferDescriptorGetIOLock(bdesc)
void StrategyInitialize(bool init)
Definition: freelist.c:475
void WritebackContextInit(WritebackContext *context, int *max_pending)
Definition: bufmgr.c:4687
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:396
WritebackContext BackendWritebackContext
Definition: buf_init.c:23
static char * buf
Definition: pg_test_fsync.c:68
BufferDescPadded * BufferDescriptors
Definition: buf_init.c:20
#define GetBufferDescriptor(id)
union LWLockMinimallyPadded LWLockMinimallyPadded
void LWLockInitialize(LWLock *lock, int tranche_id)
Definition: lwlock.c:743
#define BufferDescriptorGetContentLock(bdesc)
#define Assert(condition)
Definition: c.h:804
#define CLEAR_BUFFERTAG(a)
Definition: buf_internals.h:97
CkptSortItem * CkptBufferIds
Definition: buf_init.c:24
size_t Size
Definition: c.h:540
BufferTag tag
int i
int NBuffers
Definition: globals.c:133
pg_atomic_uint32 state
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:223
char * BufferBlocks
Definition: buf_init.c:21

◆ InitBufferPoolAccess()

void InitBufferPoolAccess ( void  )

Definition at line 2514 of file bufmgr.c.

References HASHCTL::entrysize, HASH_BLOBS, hash_create(), HASH_ELEM, HASHCTL::keysize, and PrivateRefCountArray.

Referenced by BaseInit().

2515 {
2516  HASHCTL hash_ctl;
2517 
2518  memset(&PrivateRefCountArray, 0, sizeof(PrivateRefCountArray));
2519 
2520  hash_ctl.keysize = sizeof(int32);
2521  hash_ctl.entrysize = sizeof(PrivateRefCountEntry);
2522 
2523  PrivateRefCountHash = hash_create("PrivateRefCount", 100, &hash_ctl,
2524  HASH_ELEM | HASH_BLOBS);
2525 }
struct PrivateRefCountEntry PrivateRefCountEntry
#define HASH_ELEM
Definition: hsearch.h:95
Size entrysize
Definition: hsearch.h:76
signed int int32
Definition: c.h:429
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:349
static struct PrivateRefCountEntry PrivateRefCountArray[REFCOUNT_ARRAY_ENTRIES]
Definition: bufmgr.c:197
#define HASH_BLOBS
Definition: hsearch.h:97
Size keysize
Definition: hsearch.h:75
static HTAB * PrivateRefCountHash
Definition: bufmgr.c:198

◆ InitBufferPoolBackend()

void InitBufferPoolBackend ( void  )

Definition at line 2537 of file bufmgr.c.

References AtProcExit_Buffers(), and on_shmem_exit().

Referenced by AuxiliaryProcessMain(), and InitPostgres().

2538 {
2540 }
void on_shmem_exit(pg_on_exit_callback function, Datum arg)
Definition: ipc.c:361
static void AtProcExit_Buffers(int code, Datum arg)
Definition: bufmgr.c:2547

◆ IsBufferCleanupOK()

bool IsBufferCleanupOK ( Buffer  buffer)

Definition at line 4229 of file bufmgr.c.

References Assert, BUF_STATE_GET_REFCOUNT, BufferDescriptorGetContentLock, BufferIsLocal, BufferIsValid, GetBufferDescriptor, GetPrivateRefCount(), LocalRefCount, LockBufHdr(), LW_EXCLUSIVE, LWLockHeldByMeInMode(), and UnlockBufHdr.

Referenced by _hash_doinsert(), _hash_expandtable(), _hash_splitbucket(), hash_xlog_split_allocate_page(), and hashbucketcleanup().

4230 {
4231  BufferDesc *bufHdr;
4232  uint32 buf_state;
4233 
4234  Assert(BufferIsValid(buffer));
4235 
4236  if (BufferIsLocal(buffer))
4237  {
4238  /* There should be exactly one pin */
4239  if (LocalRefCount[-buffer - 1] != 1)
4240  return false;
4241  /* Nobody else to wait for */
4242  return true;
4243  }
4244 
4245  /* There should be exactly one local pin */
4246  if (GetPrivateRefCount(buffer) != 1)
4247  return false;
4248 
4249  bufHdr = GetBufferDescriptor(buffer - 1);
4250 
4251  /* caller must hold exclusive lock on buffer */
4253  LW_EXCLUSIVE));
4254 
4255  buf_state = LockBufHdr(bufHdr);
4256 
4257  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
4258  if (BUF_STATE_GET_REFCOUNT(buf_state) == 1)
4259  {
4260  /* pincount is OK. */
4261  UnlockBufHdr(bufHdr, buf_state);
4262  return true;
4263  }
4264 
4265  UnlockBufHdr(bufHdr, buf_state);
4266  return false;
4267 }
bool LWLockHeldByMeInMode(LWLock *l, LWLockMode mode)
Definition: lwlock.c:1942
#define GetBufferDescriptor(id)
static int32 GetPrivateRefCount(Buffer buffer)
Definition: bufmgr.c:387
unsigned int uint32
Definition: c.h:441
#define BufferDescriptorGetContentLock(bdesc)
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4547
#define Assert(condition)
Definition: c.h:804
#define BufferIsLocal(buffer)
Definition: buf.h:37
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
#define UnlockBufHdr(desc, s)
int32 * LocalRefCount
Definition: localbuf.c:45
#define BUF_STATE_GET_REFCOUNT(state)
Definition: buf_internals.h:48

◆ LockBuffer()

void LockBuffer ( Buffer  buffer,
int  mode 
)

Definition at line 3939 of file bufmgr.c.

References Assert, buf, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferDescriptorGetContentLock, BufferIsLocal, BufferIsPinned, elog, ERROR, GetBufferDescriptor, LW_EXCLUSIVE, LW_SHARED, LWLockAcquire(), and LWLockRelease().

Referenced by _bt_lockbuf(), _bt_unlockbuf(), _bt_upgradelockbufcleanup(), _hash_addovflpage(), _hash_doinsert(), _hash_expandtable(), _hash_finish_split(), _hash_first(), _hash_freeovflpage(), _hash_getbuf(), _hash_getbuf_with_strategy(), _hash_getcachedmetap(), _hash_getnewbuf(), _hash_init(), _hash_kill_items(), _hash_readnext(), _hash_readpage(), _hash_readprev(), _hash_splitbucket(), _hash_squeezebucket(), _hash_vacuum_one_page(), blbulkdelete(), blgetbitmap(), blinsert(), BloomNewBuffer(), blvacuumcleanup(), brin_doinsert(), brin_doupdate(), brin_evacuate_page(), brin_getinsertbuffer(), brin_page_cleanup(), brinbuild(), brinbuildempty(), bringetbitmap(), brinGetStats(), brinGetTupleForHeapBlock(), brininsert(), brinLockRevmapPageForUpdate(), brinRevmapDesummarizeRange(), brinRevmapInitialize(), brinsummarize(), bt_metap(), bt_page_items_internal(), bt_page_stats_internal(), bt_recheck_sibling_links(), checkXLogConsistency(), collect_corrupt_items(), collect_visibility_data(), collectMatchBitmap(), ConditionalLockBufferForCleanup(), count_nondeletable_pages(), entryLoadMoreItems(), fill_seq_with_data(), FreeSpaceMapPrepareTruncateRel(), fsm_readbuf(), fsm_search(), fsm_search_avail(), fsm_set_and_search(), fsm_vacuum_page(), get_raw_page_internal(), GetVisibilityMapPins(), ginbuildempty(), ginbulkdelete(), ginEntryInsert(), ginFindLeafPage(), ginFindParents(), ginFinishSplit(), ginGetStats(), ginHeapTupleFastInsert(), ginInsertCleanup(), ginInsertValue(), GinNewBuffer(), ginScanToDelete(), ginStepRight(), ginTraverseLock(), ginUpdateStats(), ginvacuumcleanup(), ginVacuumPostingTreeLeaves(), gistBufferingFindCorrectParent(), gistbufferinginserttuples(), gistbuildempty(), gistdoinsert(), gistFindCorrectParent(), gistFindPath(), gistfinishsplit(), gistfixsplit(), gistformdownlink(), gistGetMaxLevel(), gistinserttuples(), gistkillitems(), gistNewBuffer(), gistProcessItup(), gistScanPage(), gistvacuum_delete_empty_pages(), gistvacuumpage(), hashbucketcleanup(), hashbulkdelete(), heap_abort_speculative(), heap_delete(), heap_fetch(), heap_finish_speculative(), heap_get_latest_tid(), heap_index_delete_tuples(), heap_inplace_update(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_page_prune_opt(), heap_update(), heap_xlog_visible(), heapam_index_build_range_scan(), heapam_index_fetch_tuple(), heapam_index_validate_scan(), heapam_relation_copy_for_cluster(), heapam_scan_analyze_next_block(), heapam_scan_bitmap_next_block(), heapam_scan_sample_next_tuple(), heapam_tuple_satisfies_snapshot(), heapgetpage(), heapgettup(), initBloomState(), lazy_scan_heap(), LockBufferForCleanup(), log_newpage_range(), palloc_btree_page(), pg_visibility(), pgrowlocks(), pgstat_btree_page(), pgstat_gist_page(), pgstat_heap(), pgstatginindex_internal(), pgstathashindex(), pgstatindex_impl(), read_seq_tuple(), RelationGetBufferForTuple(), revmap_physical_extend(), scanGetCandidate(), scanPendingInsert(), shiftList(), spgdoinsert(), spgGetCache(), SpGistNewBuffer(), spgprocesspending(), spgvacuumpage(), spgWalk(), startScanEntry(), statapprox_heap(), summarize_range(), UnlockReleaseBuffer(), verify_heapam(), visibilitymap_clear(), visibilitymap_prepare_truncate(), visibilitymap_set(), vm_readbuf(), XLogReadBufferExtended(), XLogReadBufferForRedoExtended(), and XLogRecordPageWithFreeSpace().

3940 {
3941  BufferDesc *buf;
3942 
3943  Assert(BufferIsPinned(buffer));
3944  if (BufferIsLocal(buffer))
3945  return; /* local buffers need no lock */
3946 
3947  buf = GetBufferDescriptor(buffer - 1);
3948 
3949  if (mode == BUFFER_LOCK_UNLOCK)
3951  else if (mode == BUFFER_LOCK_SHARE)
3953  else if (mode == BUFFER_LOCK_EXCLUSIVE)
3955  else
3956  elog(ERROR, "unrecognized buffer lock mode: %d", mode);
3957 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:448
static PgChecksumMode mode
Definition: pg_checksums.c:61
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:98
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1808
#define ERROR
Definition: elog.h:45
static char * buf
Definition: pg_test_fsync.c:68
#define GetBufferDescriptor(id)
#define BufferDescriptorGetContentLock(bdesc)
#define Assert(condition)
Definition: c.h:804
#define BufferIsLocal(buffer)
Definition: buf.h:37
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1206
#define elog(elevel,...)
Definition: elog.h:227
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:97

◆ LockBufferForCleanup()

void LockBufferForCleanup ( Buffer  buffer)

Definition at line 3996 of file bufmgr.c.

References Assert, BM_PIN_COUNT_WAITER, BUF_STATE_GET_REFCOUNT, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferIsLocal, BufferIsPinned, DeadlockTimeout, elog, ERROR, get_ps_display(), GetBufferDescriptor, GetCurrentTimestamp(), GetPrivateRefCount(), InHotStandby, LocalRefCount, LockBuffer(), LockBufHdr(), log_recovery_conflict_waits, LogRecoveryConflict(), MyProcPid, now(), palloc(), pfree(), PG_WAIT_BUFFER_PIN, PROCSIG_RECOVERY_CONFLICT_BUFFERPIN, ProcWaitForSignal(), ResolveRecoveryConflictWithBufferPin(), set_ps_display(), SetStartupBufferPinWaitBufId(), TimestampDifferenceExceeds(), UnlockBufHdr, update_process_title, and BufferDesc::wait_backend_pid.

Referenced by _bt_upgradelockbufcleanup(), ginVacuumPostingTree(), hashbulkdelete(), heap_force_common(), lazy_scan_heap(), ReadBuffer_common(), and XLogReadBufferForRedoExtended().

3997 {
3998  BufferDesc *bufHdr;
3999  char *new_status = NULL;
4000  TimestampTz waitStart = 0;
4001  bool logged_recovery_conflict = false;
4002 
4003  Assert(BufferIsPinned(buffer));
4004  Assert(PinCountWaitBuf == NULL);
4005 
4006  if (BufferIsLocal(buffer))
4007  {
4008  /* There should be exactly one pin */
4009  if (LocalRefCount[-buffer - 1] != 1)
4010  elog(ERROR, "incorrect local pin count: %d",
4011  LocalRefCount[-buffer - 1]);
4012  /* Nobody else to wait for */
4013  return;
4014  }
4015 
4016  /* There should be exactly one local pin */
4017  if (GetPrivateRefCount(buffer) != 1)
4018  elog(ERROR, "incorrect local pin count: %d",
4019  GetPrivateRefCount(buffer));
4020 
4021  bufHdr = GetBufferDescriptor(buffer - 1);
4022 
4023  for (;;)
4024  {
4025  uint32 buf_state;
4026 
4027  /* Try to acquire lock */
4029  buf_state = LockBufHdr(bufHdr);
4030 
4031  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
4032  if (BUF_STATE_GET_REFCOUNT(buf_state) == 1)
4033  {
4034  /* Successfully acquired exclusive lock with pincount 1 */
4035  UnlockBufHdr(bufHdr, buf_state);
4036 
4037  /*
4038  * Emit the log message if recovery conflict on buffer pin was
4039  * resolved but the startup process waited longer than
4040  * deadlock_timeout for it.
4041  */
4042  if (logged_recovery_conflict)
4044  waitStart, GetCurrentTimestamp(),
4045  NULL, false);
4046 
4047  /* Report change to non-waiting status */
4048  if (new_status)
4049  {
4050  set_ps_display(new_status);
4051  pfree(new_status);
4052  }
4053  return;
4054  }
4055  /* Failed, so mark myself as waiting for pincount 1 */
4056  if (buf_state & BM_PIN_COUNT_WAITER)
4057  {
4058  UnlockBufHdr(bufHdr, buf_state);
4059  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
4060  elog(ERROR, "multiple backends attempting to wait for pincount 1");
4061  }
4062  bufHdr->wait_backend_pid = MyProcPid;
4063  PinCountWaitBuf = bufHdr;
4064  buf_state |= BM_PIN_COUNT_WAITER;
4065  UnlockBufHdr(bufHdr, buf_state);
4066  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
4067 
4068  /* Wait to be signaled by UnpinBuffer() */
4069  if (InHotStandby)
4070  {
4071  /* Report change to waiting status */
4072  if (update_process_title && new_status == NULL)
4073  {
4074  const char *old_status;
4075  int len;
4076 
4077  old_status = get_ps_display(&len);
4078  new_status = (char *) palloc(len + 8 + 1);
4079  memcpy(new_status, old_status, len);
4080  strcpy(new_status + len, " waiting");
4081  set_ps_display(new_status);
4082  new_status[len] = '\0'; /* truncate off " waiting" */
4083  }
4084 
4085  /*
4086  * Emit the log message if the startup process is waiting longer
4087  * than deadlock_timeout for recovery conflict on buffer pin.
4088  *
4089  * Skip this if first time through because the startup process has
4090  * not started waiting yet in this case. So, the wait start
4091  * timestamp is set after this logic.
4092  */
4093  if (waitStart != 0 && !logged_recovery_conflict)
4094  {
4096 
4097  if (TimestampDifferenceExceeds(waitStart, now,
4098  DeadlockTimeout))
4099  {
4101  waitStart, now, NULL, true);
4102  logged_recovery_conflict = true;
4103  }
4104  }
4105 
4106  /*
4107  * Set the wait start timestamp if logging is enabled and first
4108  * time through.
4109  */
4110  if (log_recovery_conflict_waits && waitStart == 0)
4111  waitStart = GetCurrentTimestamp();
4112 
4113  /* Publish the bufid that Startup process waits on */
4114  SetStartupBufferPinWaitBufId(buffer - 1);
4115  /* Set alarm and then wait to be signaled by UnpinBuffer() */
4117  /* Reset the published bufid */
4119  }
4120  else
4122 
4123  /*
4124  * Remove flag marking us as waiter. Normally this will not be set
4125  * anymore, but ProcWaitForSignal() can return for other signals as
4126  * well. We take care to only reset the flag if we're the waiter, as
4127  * theoretically another backend could have started waiting. That's
4128  * impossible with the current usages due to table level locking, but
4129  * better be safe.
4130  */
4131  buf_state = LockBufHdr(bufHdr);
4132  if ((buf_state & BM_PIN_COUNT_WAITER) != 0 &&
4133  bufHdr->wait_backend_pid == MyProcPid)
4134  buf_state &= ~BM_PIN_COUNT_WAITER;
4135  UnlockBufHdr(bufHdr, buf_state);
4136 
4137  PinCountWaitBuf = NULL;
4138  /* Loop back and try again */
4139  }
4140 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:448
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
int MyProcPid
Definition: globals.c:41
int wait_backend_pid
bool update_process_title
Definition: ps_status.c:36
TimestampTz GetCurrentTimestamp(void)
Definition: timestamp.c:1578
int64 TimestampTz
Definition: timestamp.h:39
void LogRecoveryConflict(ProcSignalReason reason, TimestampTz wait_start, TimestampTz now, VirtualTransactionId *wait_list, bool still_waiting)
Definition: standby.c:234
#define InHotStandby
Definition: xlog.h:74
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:98
bool TimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, int msec)
Definition: timestamp.c:1709
void set_ps_display(const char *activity)
Definition: ps_status.c:349
void pfree(void *pointer)
Definition: mcxt.c:1057
#define ERROR
Definition: elog.h:45
void ResolveRecoveryConflictWithBufferPin(void)
Definition: standby.c:738
const char * get_ps_display(int *displen)
Definition: ps_status.c:430
void SetStartupBufferPinWaitBufId(int bufid)
Definition: proc.c:644
#define GetBufferDescriptor(id)
static int32 GetPrivateRefCount(Buffer buffer)
Definition: bufmgr.c:387
unsigned int uint32
Definition: c.h:441
bool log_recovery_conflict_waits
Definition: standby.c:42
void ProcWaitForSignal(uint32 wait_event_info)
Definition: proc.c:1888
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3939
#define PG_WAIT_BUFFER_PIN
Definition: pgstat.h:897
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4547
#define Assert(condition)
Definition: c.h:804
#define BufferIsLocal(buffer)
Definition: buf.h:37
void * palloc(Size size)
Definition: mcxt.c:950
#define UnlockBufHdr(desc, s)
#define elog(elevel,...)
Definition: elog.h:227
static BufferDesc * PinCountWaitBuf
Definition: bufmgr.c:165
int DeadlockTimeout
Definition: proc.c:60
Datum now(PG_FUNCTION_ARGS)
Definition: timestamp.c:1542
int32 * LocalRefCount
Definition: localbuf.c:45
#define BUF_STATE_GET_REFCOUNT(state)
Definition: buf_internals.h:48
#define BM_PIN_COUNT_WAITER
Definition: buf_internals.h:64

◆ MarkBufferDirty()

void MarkBufferDirty ( Buffer  buffer)

Definition at line 1483 of file bufmgr.c.

References Assert, BM_DIRTY, BM_JUST_DIRTIED, BM_LOCKED, BUF_STATE_GET_REFCOUNT, BufferDescriptorGetContentLock, BufferIsLocal, BufferIsPinned, BufferIsValid, elog, ERROR, GetBufferDescriptor, LW_EXCLUSIVE, LWLockHeldByMeInMode(), MarkLocalBufferDirty(), pg_atomic_compare_exchange_u32(), pg_atomic_read_u32(), pgBufferUsage, BufferUsage::shared_blks_dirtied, BufferDesc::state, VacuumCostActive, VacuumCostBalance, VacuumCostPageDirty, VacuumPageDirty, and WaitBufHdrUnlocked().

Referenced by _bt_clear_incomplete_split(), _bt_dedup_pass(), _bt_delitems_delete(), _bt_delitems_vacuum(), _bt_getroot(), _bt_insertonpg(), _bt_mark_page_halfdead(), _bt_newroot(), _bt_restore_meta(), _bt_set_cleanup_info(), _bt_split(), _bt_unlink_halfdead_page(), _hash_addovflpage(), _hash_doinsert(), _hash_expandtable(), _hash_freeovflpage(), _hash_init(), _hash_splitbucket(), _hash_squeezebucket(), _hash_vacuum_one_page(), addLeafTuple(), brin_doinsert(), brin_doupdate(), brin_initialize_empty_new_buffer(), brin_xlog_createidx(), brin_xlog_desummarize_page(), brin_xlog_insert_update(), brin_xlog_revmap_extend(), brin_xlog_samepage_update(), brin_xlog_update(), brinbuild(), brinbuildempty(), brinRevmapDesummarizeRange(), btree_xlog_dedup(), btree_xlog_delete(), btree_xlog_insert(), btree_xlog_mark_page_halfdead(), btree_xlog_newroot(), btree_xlog_split(), btree_xlog_unlink_page(), btree_xlog_vacuum(), createPostingTree(), do_setval(), doPickSplit(), fill_seq_with_data(), FreeSpaceMapPrepareTruncateRel(), generic_redo(), GenericXLogFinish(), ginbuild(), ginbuildempty(), ginbulkdelete(), ginDeletePage(), ginHeapTupleFastInsert(), ginPlaceToPage(), ginRedoClearIncompleteSplit(), ginRedoCreatePTree(), ginRedoDeleteListPages(), ginRedoDeletePage(), ginRedoInsert(), ginRedoInsertListPage(), ginRedoUpdateMetapage(), ginRedoVacuumDataLeafPage(), ginUpdateStats(), ginVacuumPostingTreeLeaf(), gistbuild(), gistbuildempty(), gistdeletepage(), gistplacetopage(), gistprunepage(), gistRedoClearFollowRight(), gistRedoDeleteRecord(), gistRedoPageDelete(), gistRedoPageSplitRecord(), gistRedoPageUpdateRecord(), gistvacuumpage(), hash_xlog_add_ovfl_page(), hash_xlog_delete(), hash_xlog_init_bitmap_page(), hash_xlog_init_meta_page(), hash_xlog_insert(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_split_cleanup(), hash_xlog_split_complete(), hash_xlog_squeeze_page(), hash_xlog_update_meta_page(), hash_xlog_vacuum_one_page(), hashbucketcleanup(), hashbulkdelete(), heap_abort_speculative(), heap_delete(), heap_finish_speculative(), heap_force_common(), heap_inplace_update(), heap_insert(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_page_prune(), heap_update(), heap_xlog_clean(), heap_xlog_confirm(), heap_xlog_delete(), heap_xlog_freeze_page(), heap_xlog_inplace(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_update(), heap_xlog_visible(), lazy_scan_heap(), lazy_vacuum_page(), log_newpage_range(), moveLeafs(), nextval_internal(), RelationGetBufferForTuple(), revmap_physical_extend(), saveNodeLink(), seq_redo(), shiftList(), spgAddNodeAction(), spgbuild(), SpGistUpdateMetaPage(), spgRedoAddLeaf(), spgRedoAddNode(), spgRedoMoveLeafs(), spgRedoPickSplit(), spgRedoSplitTuple(), spgRedoVacuumLeaf(), spgRedoVacuumRedirect(), spgRedoVacuumRoot(), spgSplitNodeAction(), vacuumLeafPage(), vacuumLeafRoot(), vacuumRedirectAndPlaceholder(), visibilitymap_clear(), visibilitymap_prepare_truncate(), visibilitymap_set(), writeListPage(), and XLogReadBufferForRedoExtended().

1484 {
1485  BufferDesc *bufHdr;
1486  uint32 buf_state;
1487  uint32 old_buf_state;
1488 
1489  if (!BufferIsValid(buffer))
1490  elog(ERROR, "bad buffer ID: %d", buffer);
1491 
1492  if (BufferIsLocal(buffer))
1493  {
1494  MarkLocalBufferDirty(buffer);
1495  return;
1496  }
1497 
1498  bufHdr = GetBufferDescriptor(buffer - 1);
1499 
1500  Assert(BufferIsPinned(buffer));
1502  LW_EXCLUSIVE));
1503 
1504  old_buf_state = pg_atomic_read_u32(&bufHdr->state);
1505  for (;;)
1506  {
1507  if (old_buf_state & BM_LOCKED)
1508  old_buf_state = WaitBufHdrUnlocked(bufHdr);
1509 
1510  buf_state = old_buf_state;
1511 
1512  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
1513  buf_state |= BM_DIRTY | BM_JUST_DIRTIED;
1514 
1515  if (pg_atomic_compare_exchange_u32(&bufHdr->state, &old_buf_state,
1516  buf_state))
1517  break;
1518  }
1519 
1520  /*
1521  * If the buffer was not dirty already, do vacuum accounting.
1522  */
1523  if (!(old_buf_state & BM_DIRTY))
1524  {
1525  VacuumPageDirty++;
1527  if (VacuumCostActive)
1529  }
1530 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:448
bool LWLockHeldByMeInMode(LWLock *l, LWLockMode mode)
Definition: lwlock.c:1942
int VacuumCostBalance
Definition: globals.c:149
static bool pg_atomic_compare_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval)
Definition: atomics.h:311
int64 VacuumPageDirty
Definition: globals.c:147
#define BM_DIRTY
Definition: buf_internals.h:58
int VacuumCostPageDirty
Definition: globals.c:141
#define ERROR
Definition: elog.h:45
#define GetBufferDescriptor(id)
#define BM_JUST_DIRTIED
Definition: buf_internals.h:63
long shared_blks_dirtied
Definition: instrument.h:23
unsigned int uint32
Definition: c.h:441
void MarkLocalBufferDirty(Buffer buffer)
Definition: localbuf.c:286
#define BM_LOCKED
Definition: buf_internals.h:57
#define BufferDescriptorGetContentLock(bdesc)
#define Assert(condition)
Definition: c.h:804
#define BufferIsLocal(buffer)
Definition: buf.h:37
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
static uint32 WaitBufHdrUnlocked(BufferDesc *buf)
Definition: bufmgr.c:4575
#define elog(elevel,...)
Definition: elog.h:227
pg_atomic_uint32 state
BufferUsage pgBufferUsage
Definition: instrument.c:20
#define BUF_STATE_GET_REFCOUNT(state)
Definition: buf_internals.h:48
bool VacuumCostActive
Definition: globals.c:150
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:241

◆ MarkBufferDirtyHint()

void MarkBufferDirtyHint ( Buffer  buffer,
bool  buffer_std 
)

Definition at line 3770 of file bufmgr.c.

References Assert, BM_DIRTY, BM_JUST_DIRTIED, BM_PERMANENT, BUF_STATE_GET_REFCOUNT, BufferDescriptorGetContentLock, BufferGetPage, BufferIsLocal, BufferIsValid, PGPROC::delayChkpt, elog, ERROR, GetBufferDescriptor, GetPrivateRefCount(), InvalidXLogRecPtr, LockBufHdr(), LWLockHeldByMe(), MarkLocalBufferDirty(), MyProc, PageSetLSN, pg_atomic_read_u32(), pgBufferUsage, RecoveryInProgress(), RelFileNodeSkippingWAL(), buftag::rnode, BufferUsage::shared_blks_dirtied, BufferDesc::state, BufferDesc::tag, UnlockBufHdr, VacuumCostActive, VacuumCostBalance, VacuumCostPageDirty, VacuumPageDirty, XLogHintBitIsNeeded, XLogRecPtrIsInvalid, and XLogSaveBufferForHint().

Referenced by _bt_check_unique(), _bt_killitems(), _hash_kill_items(), brin_start_evacuating_page(), btvacuumpage(), fsm_search_avail(), fsm_set_and_search(), fsm_vacuum_page(), gistkillitems(), heap_page_prune(), read_seq_tuple(), SetHintBits(), and XLogRecordPageWithFreeSpace().

3771 {
3772  BufferDesc *bufHdr;
3773  Page page = BufferGetPage(buffer);
3774 
3775  if (!BufferIsValid(buffer))
3776  elog(ERROR, "bad buffer ID: %d", buffer);
3777 
3778  if (BufferIsLocal(buffer))
3779  {
3780  MarkLocalBufferDirty(buffer);
3781  return;
3782  }
3783 
3784  bufHdr = GetBufferDescriptor(buffer - 1);
3785 
3786  Assert(GetPrivateRefCount(buffer) > 0);
3787  /* here, either share or exclusive lock is OK */
3789 
3790  /*
3791  * This routine might get called many times on the same page, if we are
3792  * making the first scan after commit of an xact that added/deleted many
3793  * tuples. So, be as quick as we can if the buffer is already dirty. We
3794  * do this by not acquiring spinlock if it looks like the status bits are
3795  * already set. Since we make this test unlocked, there's a chance we
3796  * might fail to notice that the flags have just been cleared, and failed
3797  * to reset them, due to memory-ordering issues. But since this function
3798  * is only intended to be used in cases where failing to write out the
3799  * data would be harmless anyway, it doesn't really matter.
3800  */
3801  if ((pg_atomic_read_u32(&bufHdr->state) & (BM_DIRTY | BM_JUST_DIRTIED)) !=
3803  {
3805  bool dirtied = false;
3806  bool delayChkpt = false;
3807  uint32 buf_state;
3808 
3809  /*
3810  * If we need to protect hint bit updates from torn writes, WAL-log a
3811  * full page image of the page. This full page image is only necessary
3812  * if the hint bit update is the first change to the page since the
3813  * last checkpoint.
3814  *
3815  * We don't check full_page_writes here because that logic is included
3816  * when we call XLogInsert() since the value changes dynamically.
3817  */
3818  if (XLogHintBitIsNeeded() &&
3819  (pg_atomic_read_u32(&bufHdr->state) & BM_PERMANENT))
3820  {
3821  /*
3822  * If we must not write WAL, due to a relfilenode-specific
3823  * condition or being in recovery, don't dirty the page. We can
3824  * set the hint, just not dirty the page as a result so the hint
3825  * is lost when we evict the page or shutdown.
3826  *
3827  * See src/backend/storage/page/README for longer discussion.
3828  */
3829  if (RecoveryInProgress() ||
3830  RelFileNodeSkippingWAL(bufHdr->tag.rnode))
3831  return;
3832 
3833  /*
3834  * If the block is already dirty because we either made a change
3835  * or set a hint already, then we don't need to write a full page
3836  * image. Note that aggressive cleaning of blocks dirtied by hint
3837  * bit setting would increase the call rate. Bulk setting of hint
3838  * bits would reduce the call rate...
3839  *
3840  * We must issue the WAL record before we mark the buffer dirty.
3841  * Otherwise we might write the page before we write the WAL. That
3842  * causes a race condition, since a checkpoint might occur between
3843  * writing the WAL record and marking the buffer dirty. We solve
3844  * that with a kluge, but one that is already in use during
3845  * transaction commit to prevent race conditions. Basically, we
3846  * simply prevent the checkpoint WAL record from being written
3847  * until we have marked the buffer dirty. We don't start the
3848  * checkpoint flush until we have marked dirty, so our checkpoint
3849  * must flush the change to disk successfully or the checkpoint
3850  * never gets written, so crash recovery will fix.
3851  *
3852  * It's possible we may enter here without an xid, so it is
3853  * essential that CreateCheckpoint waits for virtual transactions
3854  * rather than full transactionids.
3855  */
3856  MyProc->delayChkpt = delayChkpt = true;
3857  lsn = XLogSaveBufferForHint(buffer, buffer_std);
3858  }
3859 
3860  buf_state = LockBufHdr(bufHdr);
3861 
3862  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
3863 
3864  if (!(buf_state & BM_DIRTY))
3865  {
3866  dirtied = true; /* Means "will be dirtied by this action" */
3867 
3868  /*
3869  * Set the page LSN if we wrote a backup block. We aren't supposed
3870  * to set this when only holding a share lock but as long as we
3871  * serialise it somehow we're OK. We choose to set LSN while
3872  * holding the buffer header lock, which causes any reader of an
3873  * LSN who holds only a share lock to also obtain a buffer header
3874  * lock before using PageGetLSN(), which is enforced in
3875  * BufferGetLSNAtomic().
3876  *
3877  * If checksums are enabled, you might think we should reset the
3878  * checksum here. That will happen when the page is written
3879  * sometime later in this checkpoint cycle.
3880  */
3881  if (!XLogRecPtrIsInvalid(lsn))
3882  PageSetLSN(page, lsn);
3883  }
3884 
3885  buf_state |= BM_DIRTY | BM_JUST_DIRTIED;
3886  UnlockBufHdr(bufHdr, buf_state);
3887 
3888  if (delayChkpt)
3889  MyProc->delayChkpt = false;
3890 
3891  if (dirtied)
3892  {
3893  VacuumPageDirty++;
3895  if (VacuumCostActive)
3897  }
3898  }
3899 }
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
#define BM_PERMANENT
Definition: buf_internals.h:66
int VacuumCostBalance
Definition: globals.c:149
bool LWLockHeldByMe(LWLock *l)
Definition: lwlock.c:1924
PGPROC * MyProc
Definition: proc.c:68
XLogRecPtr XLogSaveBufferForHint(Buffer buffer, bool buffer_std)
Definition: xloginsert.c:917
int64 VacuumPageDirty
Definition: globals.c:147
bool RecoveryInProgress(void)
Definition: xlog.c:8132
#define BM_DIRTY
Definition: buf_internals.h:58
int VacuumCostPageDirty
Definition: globals.c:141
#define ERROR
Definition: elog.h:45
bool delayChkpt
Definition: proc.h:187
#define GetBufferDescriptor(id)
#define BM_JUST_DIRTIED
Definition: buf_internals.h:63
long shared_blks_dirtied
Definition: instrument.h:23
static int32 GetPrivateRefCount(Buffer buffer)
Definition: bufmgr.c:387
unsigned int uint32
Definition: c.h:441
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
void MarkLocalBufferDirty(Buffer buffer)
Definition: localbuf.c:286
#define XLogRecPtrIsInvalid(r)
Definition: xlogdefs.h:29
#define BufferDescriptorGetContentLock(bdesc)
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4547
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:804
#define BufferIsLocal(buffer)
Definition: buf.h:37
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
RelFileNode rnode
Definition: buf_internals.h:92
bool RelFileNodeSkippingWAL(RelFileNode rnode)
Definition: storage.c:497
BufferTag tag
#define UnlockBufHdr(desc, s)
#define elog(elevel,...)
Definition: elog.h:227
pg_atomic_uint32 state
#define PageSetLSN(page, lsn)
Definition: bufpage.h:368
#define XLogHintBitIsNeeded()
Definition: xlog.h:202
Pointer Page
Definition: bufpage.h:78
BufferUsage pgBufferUsage
Definition: instrument.c:20
#define BUF_STATE_GET_REFCOUNT(state)
Definition: buf_internals.h:48
bool VacuumCostActive
Definition: globals.c:150
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:241

◆ PrefetchBuffer()

PrefetchBufferResult PrefetchBuffer ( Relation  reln,
ForkNumber  forkNum,
BlockNumber  blockNum 
)

Definition at line 587 of file bufmgr.c.

References Assert, BlockNumberIsValid, ereport, errcode(), errmsg(), ERROR, PrefetchLocalBuffer(), PrefetchSharedBuffer(), RelationData::rd_smgr, RELATION_IS_OTHER_TEMP, RelationIsValid, RelationOpenSmgr, and RelationUsesLocalBuffers.

Referenced by BitmapPrefetch(), count_nondeletable_pages(), HeapTupleHeaderAdvanceLatestRemovedXid(), and pg_prewarm().

588 {
589  Assert(RelationIsValid(reln));
590  Assert(BlockNumberIsValid(blockNum));
591 
592  /* Open it at the smgr level if not already done */
593  RelationOpenSmgr(reln);
594 
595  if (RelationUsesLocalBuffers(reln))
596  {
597  /* see comments in ReadBufferExtended */
598  if (RELATION_IS_OTHER_TEMP(reln))
599  ereport(ERROR,
600  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
601  errmsg("cannot access temporary tables of other sessions")));
602 
603  /* pass it off to localbuf.c */
604  return PrefetchLocalBuffer(reln->rd_smgr, forkNum, blockNum);
605  }
606  else
607  {
608  /* pass it to the shared buffer version */
609  return PrefetchSharedBuffer(reln->rd_smgr, forkNum, blockNum);
610  }
611 }
struct SMgrRelationData * rd_smgr
Definition: rel.h:57
PrefetchBufferResult PrefetchLocalBuffer(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum)
Definition: localbuf.c:64
int errcode(int sqlerrcode)
Definition: elog.c:694
#define RelationOpenSmgr(relation)
Definition: rel.h:514
#define ERROR
Definition: elog.h:45
#define RelationIsValid(relation)
Definition: rel.h:430
PrefetchBufferResult PrefetchSharedBuffer(SMgrRelation smgr_reln, ForkNumber forkNum, BlockNumber blockNum)
Definition: bufmgr.c:500
#define ereport(elevel,...)
Definition: elog.h:155
#define BlockNumberIsValid(blockNumber)
Definition: block.h:70
#define Assert(condition)
Definition: c.h:804
#define RELATION_IS_OTHER_TEMP(relation)
Definition: rel.h:594
#define RelationUsesLocalBuffers(relation)
Definition: rel.h:573
int errmsg(const char *fmt,...)
Definition: elog.c:905

◆ PrefetchSharedBuffer()

PrefetchBufferResult PrefetchSharedBuffer ( struct SMgrRelationData smgr_reln,
ForkNumber  forkNum,
BlockNumber  blockNum 
)

◆ PrintBufferLeakWarning()

void PrintBufferLeakWarning ( Buffer  buffer)

Definition at line 2607 of file bufmgr.c.

References Assert, buftag::blockNum, buf, BUF_FLAG_MASK, BUF_STATE_GET_REFCOUNT, BufferIsLocal, BufferIsValid, elog, buftag::forkNum, GetBufferDescriptor, GetLocalBufferDescriptor, GetPrivateRefCount(), InvalidBackendId, LocalRefCount, MyBackendId, pfree(), pg_atomic_read_u32(), relpathbackend, buftag::rnode, BufferDesc::state, BufferDesc::tag, and WARNING.

Referenced by CheckForBufferLeaks(), CheckForLocalBufferLeaks(), and ResourceOwnerReleaseInternal().

2608 {
2609  BufferDesc *buf;
2610  int32 loccount;
2611  char *path;
2612  BackendId backend;
2613  uint32 buf_state;
2614 
2615  Assert(BufferIsValid(buffer));
2616  if (BufferIsLocal(buffer))
2617  {
2618  buf = GetLocalBufferDescriptor(-buffer - 1);
2619  loccount = LocalRefCount[-buffer - 1];
2620  backend = MyBackendId;
2621  }
2622  else
2623  {
2624  buf = GetBufferDescriptor(buffer - 1);
2625  loccount = GetPrivateRefCount(buffer);
2626  backend = InvalidBackendId;
2627  }
2628 
2629  /* theoretically we should lock the bufhdr here */
2630  path = relpathbackend(buf->tag.rnode, backend, buf->tag.forkNum);
2631  buf_state = pg_atomic_read_u32(&buf->state);
2632  elog(WARNING,
2633  "buffer refcount leak: [%03d] "
2634  "(rel=%s, blockNum=%u, flags=0x%x, refcount=%u %d)",
2635  buffer, path,
2636  buf->tag.blockNum, buf_state & BUF_FLAG_MASK,
2637  BUF_STATE_GET_REFCOUNT(buf_state), loccount);
2638  pfree(path);
2639 }
BackendId MyBackendId
Definition: globals.c:82
ForkNumber forkNum
Definition: buf_internals.h:93
#define GetLocalBufferDescriptor(id)
signed int int32
Definition: c.h:429
void pfree(void *pointer)
Definition: mcxt.c:1057
#define BUF_FLAG_MASK
Definition: buf_internals.h:45
static char * buf
Definition: pg_test_fsync.c:68
#define GetBufferDescriptor(id)
static int32 GetPrivateRefCount(Buffer buffer)
Definition: bufmgr.c:387
unsigned int uint32
Definition: c.h:441
#define WARNING
Definition: elog.h:40
#define InvalidBackendId
Definition: backendid.h:23
int BackendId
Definition: backendid.h:21
#define Assert(condition)
Definition: c.h:804
#define BufferIsLocal(buffer)
Definition: buf.h:37
BlockNumber blockNum
Definition: buf_internals.h:94
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
RelFileNode rnode
Definition: buf_internals.h:92
BufferTag tag
#define elog(elevel,...)
Definition: elog.h:227
pg_atomic_uint32 state
#define relpathbackend(rnode, backend, forknum)
Definition: relpath.h:78
int32 * LocalRefCount
Definition: localbuf.c:45
#define BUF_STATE_GET_REFCOUNT(state)
Definition: buf_internals.h:48
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:241

◆ ReadBuffer()

Buffer ReadBuffer ( Relation  reln,
BlockNumber  blockNum 
)

Definition at line 619 of file bufmgr.c.

References MAIN_FORKNUM, RBM_NORMAL, and ReadBufferExtended().

Referenced by _bt_getbuf(), _bt_search_insert(), _hash_getbuf(), _hash_getbuf_with_condlock_cleanup(), blbulkdelete(), blinsert(), BloomNewBuffer(), brin_getinsertbuffer(), brinbuild(), brinGetStats(), brinGetTupleForHeapBlock(), brinRevmapDesummarizeRange(), brinRevmapInitialize(), bt_metap(), bt_page_items_internal(), bt_page_stats_internal(), fill_seq_with_data(), ginFindLeafPage(), ginFindParents(), ginGetStats(), ginHeapTupleFastInsert(), ginInsertCleanup(), GinNewBuffer(), ginStepRight(), ginUpdateStats(), gistBufferingFindCorrectParent(), gistbufferinginserttuples(), gistdoinsert(), gistFindCorrectParent(), gistFindPath(), gistfixsplit(), gistGetMaxLevel(), gistkillitems(), gistNewBuffer(), gistProcessItup(), gistScanPage(), heap_abort_speculative(), heap_delete(), heap_fetch(), heap_finish_speculative(), heap_force_common(), heap_get_latest_tid(), heap_index_delete_tuples(), heap_inplace_update(), heap_lock_tuple(), heap_update(), initBloomState(), pg_visibility(), pgstatginindex_internal(), read_seq_tuple(), RelationGetBufferForTuple(), ReleaseAndReadBuffer(), revmap_get_buffer(), revmap_physical_extend(), scanGetCandidate(), scanPendingInsert(), shiftList(), spgdoinsert(), spgGetCache(), SpGistGetBuffer(), SpGistNewBuffer(), SpGistUpdateMetaPage(), and spgWalk().

620 {
621  return ReadBufferExtended(reln, MAIN_FORKNUM, blockNum, RBM_NORMAL, NULL);
622 }
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:666

◆ ReadBufferExtended()

Buffer ReadBufferExtended ( Relation  reln,
ForkNumber  forkNum,
BlockNumber  blockNum,
ReadBufferMode  mode,
BufferAccessStrategy  strategy 
)

Definition at line 666 of file bufmgr.c.

References buf, ereport, errcode(), errmsg(), ERROR, pgstat_count_buffer_hit, pgstat_count_buffer_read, RelationData::rd_rel, RelationData::rd_smgr, ReadBuffer_common(), RELATION_IS_OTHER_TEMP, and RelationOpenSmgr.

Referenced by _hash_getbuf_with_strategy(), _hash_getinitbuf(), _hash_getnewbuf(), autoprewarm_database_main(), blbulkdelete(), blgetbitmap(), blvacuumcleanup(), brin_vacuum_scan(), brinbuildempty(), bt_recheck_sibling_links(), btvacuumpage(), collect_corrupt_items(), collect_visibility_data(), count_nondeletable_pages(), fsm_readbuf(), get_raw_page_internal(), ginbuildempty(), ginbulkdelete(), ginDeletePage(), ginScanToDelete(), ginvacuumcleanup(), ginVacuumPostingTree(), ginVacuumPostingTreeLeaves(), gistbuildempty(), gistvacuum_delete_empty_pages(), gistvacuumpage(), hashbulkdelete(), heapam_scan_analyze_next_block(), heapgetpage(), lazy_scan_heap(), lazy_vacuum_heap(), log_newpage_range(), palloc_btree_page(), pg_prewarm(), pgstat_btree_page(), pgstat_gist_page(), pgstat_heap(), pgstathashindex(), pgstatindex_impl(), ReadBuffer(), ReadBufferBI(), spgprocesspending(), spgvacuumpage(), statapprox_heap(), verify_heapam(), and vm_readbuf().

668 {
669  bool hit;
670  Buffer buf;
671 
672  /* Open it at the smgr level if not already done */
673  RelationOpenSmgr(reln);
674 
675  /*
676  * Reject attempts to read non-local temporary relations; we would be
677  * likely to get wrong data since we have no visibility into the owning
678  * session's local buffers.
679  */
680  if (RELATION_IS_OTHER_TEMP(reln))
681  ereport(ERROR,
682  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
683  errmsg("cannot access temporary tables of other sessions")));
684 
685  /*
686  * Read the buffer, and update pgstat counters to reflect a cache hit or
687  * miss.
688  */
690  buf = ReadBuffer_common(reln->rd_smgr, reln->rd_rel->relpersistence,
691  forkNum, blockNum, mode, strategy, &hit);
692  if (hit)
694  return buf;
695 }
static PgChecksumMode mode
Definition: pg_checksums.c:61
struct SMgrRelationData * rd_smgr
Definition: rel.h:57
int errcode(int sqlerrcode)
Definition: elog.c:694
Form_pg_class rd_rel
Definition: rel.h:110
#define RelationOpenSmgr(relation)
Definition: rel.h:514
#define ERROR
Definition: elog.h:45
#define pgstat_count_buffer_read(rel)
Definition: pgstat.h:1553
static char * buf
Definition: pg_test_fsync.c:68
#define pgstat_count_buffer_hit(rel)
Definition: pgstat.h:1558
static Buffer ReadBuffer_common(SMgrRelation reln, char relpersistence, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy, bool *hit)
Definition: bufmgr.c:729
#define ereport(elevel,...)
Definition: elog.h:155
#define RELATION_IS_OTHER_TEMP(relation)
Definition: rel.h:594
int errmsg(const char *fmt,...)
Definition: elog.c:905
int Buffer
Definition: buf.h:23

◆ ReadBufferWithoutRelcache()

Buffer ReadBufferWithoutRelcache ( RelFileNode  rnode,
ForkNumber  forkNum,
BlockNumber  blockNum,
ReadBufferMode  mode,
BufferAccessStrategy  strategy 
)

Definition at line 708 of file bufmgr.c.

References Assert, InRecovery, InvalidBackendId, ReadBuffer_common(), and smgropen().

Referenced by XLogReadBufferExtended().

711 {
712  bool hit;
713 
714  SMgrRelation smgr = smgropen(rnode, InvalidBackendId);
715 
717 
718  return ReadBuffer_common(smgr, RELPERSISTENCE_PERMANENT, forkNum, blockNum,
719  mode, strategy, &hit);
720 }
static PgChecksumMode mode
Definition: pg_checksums.c:61
bool InRecovery
Definition: xlog.c:206
SMgrRelation smgropen(RelFileNode rnode, BackendId backend)
Definition: smgr.c:146
#define InvalidBackendId
Definition: backendid.h:23
static Buffer ReadBuffer_common(SMgrRelation reln, char relpersistence, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy, bool *hit)
Definition: bufmgr.c:729
#define Assert(condition)
Definition: c.h:804

◆ RelationGetNumberOfBlocksInFork()

BlockNumber RelationGetNumberOfBlocksInFork ( Relation  relation,
ForkNumber  forkNum 
)

Definition at line 2861 of file bufmgr.c.

References Assert, RelationData::rd_rel, RelationData::rd_smgr, RelationOpenSmgr, smgrnblocks(), and table_relation_size().

Referenced by _hash_getnewbuf(), _hash_init(), autoprewarm_database_main(), get_raw_page_internal(), and pg_prewarm().

2862 {
2863  switch (relation->rd_rel->relkind)
2864  {
2865  case RELKIND_SEQUENCE:
2866  case RELKIND_INDEX:
2867  case RELKIND_PARTITIONED_INDEX:
2868  /* Open it at the smgr level if not already done */
2869  RelationOpenSmgr(relation);
2870 
2871  return smgrnblocks(relation->rd_smgr, forkNum);
2872 
2873  case RELKIND_RELATION:
2874  case RELKIND_TOASTVALUE:
2875  case RELKIND_MATVIEW:
2876  {
2877  /*
2878  * Not every table AM uses BLCKSZ wide fixed size blocks.
2879  * Therefore tableam returns the size in bytes - but for the
2880  * purpose of this routine, we want the number of blocks.
2881  * Therefore divide, rounding up.
2882  */
2883  uint64 szbytes;
2884 
2885  szbytes = table_relation_size(relation, forkNum);
2886 
2887  return (szbytes + (BLCKSZ - 1)) / BLCKSZ;
2888  }
2889  case RELKIND_VIEW:
2890  case RELKIND_COMPOSITE_TYPE:
2891  case RELKIND_FOREIGN_TABLE:
2892  case RELKIND_PARTITIONED_TABLE:
2893  default:
2894  Assert(false);
2895  break;
2896  }
2897 
2898  return 0; /* keep compiler quiet */
2899 }
struct SMgrRelationData * rd_smgr
Definition: rel.h:57
Form_pg_class rd_rel
Definition: rel.h:110
#define RelationOpenSmgr(relation)
Definition: rel.h:514
static uint64 table_relation_size(Relation rel, ForkNumber forkNumber)
Definition: tableam.h:1838
BlockNumber smgrnblocks(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:548
#define Assert(condition)
Definition: c.h:804

◆ ReleaseAndReadBuffer()

Buffer ReleaseAndReadBuffer ( Buffer  buffer,
Relation  relation,
BlockNumber  blockNum 
)

Definition at line 1546 of file bufmgr.c.

References Assert, buftag::blockNum, PrivateRefCountEntry::buffer, BufferIsLocal, BufferIsPinned, BufferIsValid, CurrentResourceOwner, buftag::forkNum, GetBufferDescriptor, GetLocalBufferDescriptor, LocalRefCount, MAIN_FORKNUM, RelationData::rd_node, ReadBuffer(), RelFileNodeEquals, ResourceOwnerForgetBuffer(), buftag::rnode, BufferDesc::tag, and UnpinBuffer().

Referenced by _bt_relandgetbuf(), ginFindLeafPage(), heapam_index_fetch_tuple(), and heapam_scan_bitmap_next_block().

1549 {
1550  ForkNumber forkNum = MAIN_FORKNUM;
1551  BufferDesc *bufHdr;
1552 
1553  if (BufferIsValid(buffer))
1554  {
1555  Assert(BufferIsPinned(buffer));
1556  if (BufferIsLocal(buffer))
1557  {
1558  bufHdr = GetLocalBufferDescriptor(-buffer - 1);
1559  if (bufHdr->tag.blockNum == blockNum &&
1560  RelFileNodeEquals(bufHdr->tag.rnode, relation->rd_node) &&
1561  bufHdr->tag.forkNum == forkNum)
1562  return buffer;
1564  LocalRefCount[-buffer - 1]--;
1565  }
1566  else
1567  {
1568  bufHdr = GetBufferDescriptor(buffer - 1);
1569  /* we have pin, so it's ok to examine tag without spinlock */
1570  if (bufHdr->tag.blockNum == blockNum &&
1571  RelFileNodeEquals(bufHdr->tag.rnode, relation->rd_node) &&
1572  bufHdr->tag.forkNum == forkNum)
1573  return buffer;
1574  UnpinBuffer(bufHdr, true);
1575  }
1576  }
1577 
1578  return ReadBuffer(relation, blockNum);
1579 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:448
ForkNumber forkNum
Definition: buf_internals.h:93
ResourceOwner CurrentResourceOwner
Definition: resowner.c:144
#define GetLocalBufferDescriptor(id)
#define GetBufferDescriptor(id)
ForkNumber
Definition: relpath.h:40
static void UnpinBuffer(BufferDesc *buf, bool fixOwner)
Definition: bufmgr.c:1752
RelFileNode rd_node
Definition: rel.h:55
#define Assert(condition)
Definition: c.h:804
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:619
#define BufferIsLocal(buffer)
Definition: buf.h:37
BlockNumber blockNum
Definition: buf_internals.h:94
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
RelFileNode rnode
Definition: buf_internals.h:92
BufferTag tag
int32 * LocalRefCount
Definition: localbuf.c:45
#define RelFileNodeEquals(node1, node2)
Definition: relfilenode.h:88
void ResourceOwnerForgetBuffer(ResourceOwner owner, Buffer buffer)
Definition: resowner.c:952

◆ ReleaseBuffer()

void ReleaseBuffer ( Buffer  buffer)

Definition at line 3700 of file bufmgr.c.

References Assert, BufferIsLocal, BufferIsValid, CurrentResourceOwner, elog, ERROR, GetBufferDescriptor, LocalRefCount, ResourceOwnerForgetBuffer(), and UnpinBuffer().

Referenced by _bt_drop_lock_and_maybe_pin(), _bt_getbuf(), _bt_pagedel(), _bt_relbuf(), _bt_search_insert(), _bt_unlink_halfdead_page(), _hash_dropbuf(), _hash_getbuf_with_condlock_cleanup(), autoprewarm_database_main(), blinsert(), BloomNewBuffer(), brin_getinsertbuffer(), brin_vacuum_scan(), bringetbitmap(), brinGetTupleForHeapBlock(), brininsert(), brinRevmapTerminate(), brinsummarize(), collect_corrupt_items(), collect_visibility_data(), entryLoadMoreItems(), ExecEndBitmapHeapScan(), ExecEndIndexOnlyScan(), ExecReScanBitmapHeapScan(), FreeBulkInsertState(), freeGinBtreeStack(), fsm_vacuum_page(), get_actual_variable_endpoint(), get_raw_page_internal(), GetRecordedFreeSpace(), ginDeletePage(), ginFindParents(), ginFinishSplit(), ginFreeScanKeys(), ginInsertCleanup(), GinNewBuffer(), ginScanToDelete(), gistdoinsert(), gistFindCorrectParent(), gistNewBuffer(), gistvacuum_delete_empty_pages(), heap_abort_speculative(), heap_delete(), heap_endscan(), heap_fetch(), heap_force_common(), heap_insert(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_rescan(), heap_update(), heap_xlog_delete(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_update(), heap_xlog_visible(), heapam_index_fetch_reset(), heapam_scan_sample_next_block(), heapam_tuple_lock(), heapgetpage(), heapgettup(), heapgettup_pagemode(), lazy_scan_heap(), lazy_vacuum_heap(), pg_prewarm(), pg_visibility(), pg_visibility_map(), pg_visibility_map_summary(), pgstatindex_impl(), ReadBufferBI(), RelationGetBufferForTuple(), ReleaseBulkInsertStatePin(), ResourceOwnerReleaseInternal(), revmap_get_buffer(), revmap_physical_extend(), spgdoinsert(), SpGistGetBuffer(), SpGistNewBuffer(), SpGistUpdateMetaPage(), statapprox_heap(), summarize_range(), terminate_brin_buildstate(), tts_buffer_heap_clear(), tts_buffer_heap_materialize(), tts_buffer_heap_store_tuple(), UnlockReleaseBuffer(), verify_heapam(), visibilitymap_count(), visibilitymap_get_status(), visibilitymap_pin(), and XLogReadBufferExtended().

3701 {
3702  if (!BufferIsValid(buffer))
3703  elog(ERROR, "bad buffer ID: %d", buffer);
3704 
3705  if (BufferIsLocal(buffer))
3706  {
3708 
3709  Assert(LocalRefCount[-buffer - 1] > 0);
3710  LocalRefCount[-buffer - 1]--;
3711  return;
3712  }
3713 
3714  UnpinBuffer(GetBufferDescriptor(buffer - 1), true);
3715 }
ResourceOwner CurrentResourceOwner
Definition: resowner.c:144
#define ERROR
Definition: elog.h:45
#define GetBufferDescriptor(id)
static void UnpinBuffer(BufferDesc *buf, bool fixOwner)
Definition: bufmgr.c:1752
#define Assert(condition)
Definition: c.h:804
#define BufferIsLocal(buffer)
Definition: buf.h:37
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
#define elog(elevel,...)
Definition: elog.h:227
int32 * LocalRefCount
Definition: localbuf.c:45
void ResourceOwnerForgetBuffer(ResourceOwner owner, Buffer buffer)
Definition: resowner.c:952

◆ TestForOldSnapshot()

static void TestForOldSnapshot ( Snapshot  snapshot,
Relation  relation,
Page  page 
)
inlinestatic

Definition at line 277 of file bufmgr.h.

References Assert, old_snapshot_threshold, PageGetLSN, SNAPSHOT_MVCC, SNAPSHOT_TOAST, TestForOldSnapshot_impl(), and XLogRecPtrIsInvalid.

Referenced by _bt_get_endpoint(), _bt_moveright(), _bt_readnextpage(), _bt_walk_left(), _hash_first(), _hash_next(), _hash_readnext(), _hash_readprev(), blgetbitmap(), brinGetTupleForHeapBlock(), brinRevmapInitialize(), collectMatchBitmap(), collectMatchesForHeapRow(), ginFindLeafPage(), gistScanPage(), heap_fetch(), heap_get_latest_tid(), heapgetpage(), heapgettup(), heapgettup_pagemode(), scanGetCandidate(), scanPendingInsert(), and spgWalk().

278 {
279  Assert(relation != NULL);
280 
281  if (old_snapshot_threshold >= 0
282  && (snapshot) != NULL
283  && ((snapshot)->snapshot_type == SNAPSHOT_MVCC
284  || (snapshot)->snapshot_type == SNAPSHOT_TOAST)
285  && !XLogRecPtrIsInvalid((snapshot)->lsn)
286  && PageGetLSN(page) > (snapshot)->lsn)
287  TestForOldSnapshot_impl(snapshot, relation);
288 }
#define XLogRecPtrIsInvalid(r)
Definition: xlogdefs.h:29
#define Assert(condition)
Definition: c.h:804
#define PageGetLSN(page)
Definition: bufpage.h:366
int old_snapshot_threshold
Definition: snapmgr.c:78
void TestForOldSnapshot_impl(Snapshot snapshot, Relation relation)
Definition: bufmgr.c:4807

◆ TestForOldSnapshot_impl()

void TestForOldSnapshot_impl ( Snapshot  snapshot,
Relation  relation 
)

Definition at line 4807 of file bufmgr.c.

References ereport, errcode(), errmsg(), ERROR, GetOldSnapshotThresholdTimestamp(), and RelationAllowsEarlyPruning.

Referenced by TestForOldSnapshot().

4808 {
4809  if (RelationAllowsEarlyPruning(relation)
4810  && (snapshot)->whenTaken < GetOldSnapshotThresholdTimestamp())
4811  ereport(ERROR,
4812  (errcode(ERRCODE_SNAPSHOT_TOO_OLD),
4813  errmsg("snapshot too old")));
4814 }
TimestampTz GetOldSnapshotThresholdTimestamp(void)
Definition: snapmgr.c:1660
#define RelationAllowsEarlyPruning(rel)
Definition: snapmgr.h:38
int errcode(int sqlerrcode)
Definition: elog.c:694
#define ERROR
Definition: elog.h:45
#define ereport(elevel,...)
Definition: elog.h:155
int errmsg(const char *fmt,...)
Definition: elog.c:905

◆ UnlockBuffers()

void UnlockBuffers ( void  )

Definition at line 3911 of file bufmgr.c.

References BM_PIN_COUNT_WAITER, buf, LockBufHdr(), MyProcPid, PinCountWaitBuf, UnlockBufHdr, and BufferDesc::wait_backend_pid.

Referenced by AbortSubTransaction(), AbortTransaction(), AtProcExit_Buffers(), AutoVacLauncherMain(), BackgroundWriterMain(), CheckpointerMain(), and WalWriterMain().

3912 {
3914 
3915  if (buf)
3916  {
3917  uint32 buf_state;
3918 
3919  buf_state = LockBufHdr(buf);
3920 
3921  /*
3922  * Don't complain if flag bit not set; it could have been reset but we
3923  * got a cancel/die interrupt before getting the signal.
3924  */
3925  if ((buf_state & BM_PIN_COUNT_WAITER) != 0 &&
3926  buf->wait_backend_pid == MyProcPid)
3927  buf_state &= ~BM_PIN_COUNT_WAITER;
3928 
3929  UnlockBufHdr(buf, buf_state);
3930 
3931  PinCountWaitBuf = NULL;
3932  }
3933 }
int MyProcPid
Definition: globals.c:41
int wait_backend_pid
static char * buf
Definition: pg_test_fsync.c:68
unsigned int uint32
Definition: c.h:441
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4547
#define UnlockBufHdr(desc, s)
static BufferDesc * PinCountWaitBuf
Definition: bufmgr.c:165
#define BM_PIN_COUNT_WAITER
Definition: buf_internals.h:64

◆ UnlockReleaseBuffer()

void UnlockReleaseBuffer ( Buffer  buffer)

Definition at line 3723 of file bufmgr.c.

References BUFFER_LOCK_UNLOCK, LockBuffer(), and ReleaseBuffer().

Referenced by _bt_clear_incomplete_split(), _bt_restore_meta(), _hash_relbuf(), allocNewBuffer(), AlterSequence(), blbulkdelete(), blgetbitmap(), blinsert(), BloomInitMetapage(), blvacuumcleanup(), brin_doinsert(), brin_doupdate(), brin_evacuate_page(), brin_getinsertbuffer(), brin_xlog_createidx(), brin_xlog_desummarize_page(), brin_xlog_insert_update(), brin_xlog_revmap_extend(), brin_xlog_samepage_update(), brin_xlog_update(), brinbuild(), brinbuildempty(), brinGetStats(), brinRevmapDesummarizeRange(), bt_metap(), bt_page_items_internal(), bt_page_stats_internal(), bt_recheck_sibling_links(), btree_xlog_dedup(), btree_xlog_delete(), btree_xlog_insert(), btree_xlog_mark_page_halfdead(), btree_xlog_newroot(), btree_xlog_split(), btree_xlog_unlink_page(), btree_xlog_vacuum(), checkXLogConsistency(), collect_corrupt_items(), collect_visibility_data(), count_nondeletable_pages(), createPostingTree(), do_setval(), doPickSplit(), entryLoadMoreItems(), fill_seq_with_data(), flushCachedPage(), FreeSpaceMapPrepareTruncateRel(), fsm_search(), fsm_set_and_search(), generic_redo(), ginbuild(), ginbuildempty(), ginbulkdelete(), ginFindParents(), ginGetStats(), ginHeapTupleFastInsert(), ginInsertCleanup(), ginPlaceToPage(), ginRedoClearIncompleteSplit(), ginRedoCreatePTree(), ginRedoDeleteListPages(), ginRedoDeletePage(), ginRedoInsert(), ginRedoInsertListPage(), ginRedoSplit(), ginRedoUpdateMetapage(), ginRedoVacuumDataLeafPage(), ginRedoVacuumPage(), ginScanToDelete(), ginStepRight(), ginUpdateStats(), ginvacuumcleanup(), ginVacuumPostingTree(), ginVacuumPostingTreeLeaves(), gistbufferinginserttuples(), gistbuild(), gistbuildempty(), gistdoinsert(), gistFindCorrectParent(), gistFindPath(), gistGetMaxLevel(), gistinserttuples(), gistkillitems(), gistplacetopage(), gistProcessItup(), gistRedoClearFollowRight(), gistRedoDeleteRecord(), gistRedoPageDelete(), gistRedoPageSplitRecord(), gistRedoPageUpdateRecord(), gistScanPage(), gistvacuum_delete_empty_pages(), gistvacuumpage(), hash_xlog_add_ovfl_page(), hash_xlog_delete(), hash_xlog_init_bitmap_page(), hash_xlog_init_meta_page(), hash_xlog_insert(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_split_cleanup(), hash_xlog_split_complete(), hash_xlog_split_page(), hash_xlog_squeeze_page(), hash_xlog_update_meta_page(), hash_xlog_vacuum_one_page(), heap_delete(), heap_finish_speculative(), heap_force_common(), heap_get_latest_tid(), heap_index_delete_tuples(), heap_inplace_update(), heap_insert(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_update(), heap_xlog_clean(), heap_xlog_confirm(), heap_xlog_delete(), heap_xlog_freeze_page(), heap_xlog_inplace(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_update(), heap_xlog_visible(), heapam_scan_analyze_next_tuple(), initBloomState(), lazy_scan_heap(), lazy_vacuum_heap(), log_newpage_range(), moveLeafs(), nextval_internal(), palloc_btree_page(), pg_sequence_last_value(), pg_visibility(), pgstat_gist_page(), pgstat_heap(), pgstatginindex_internal(), pgstathashindex(), RelationAddExtraBlocks(), RelationGetBufferForTuple(), ResetSequence(), revmap_physical_extend(), scanGetCandidate(), scanPendingInsert(), scanPostingTree(), seq_redo(), shiftList(), spgAddNodeAction(), spgbuild(), spgdoinsert(), spgGetCache(), SpGistGetBuffer(), SpGistUpdateMetaPage(), spgMatchNodeAction(), spgprocesspending(), spgRedoAddLeaf(), spgRedoAddNode(), spgRedoMoveLeafs(), spgRedoPickSplit(), spgRedoSplitTuple(), spgRedoVacuumLeaf(), spgRedoVacuumRedirect(), spgRedoVacuumRoot(), spgSplitNodeAction(), spgvacuumpage(), spgWalk(), statapprox_heap(), verify_heapam(), visibilitymap_prepare_truncate(), writeListPage(), xlog_redo(), and XLogRecordPageWithFreeSpace().

3724 {
3725  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3726  ReleaseBuffer(buffer);
3727 }
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3700
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3939

Variable Documentation

◆ backend_flush_after

int backend_flush_after

Definition at line 158 of file bufmgr.c.

Referenced by InitBufferPool().

◆ bgwriter_flush_after

int bgwriter_flush_after

Definition at line 157 of file bufmgr.c.

Referenced by BackgroundWriterMain().

◆ bgwriter_lru_maxpages

int bgwriter_lru_maxpages

Definition at line 133 of file bufmgr.c.

Referenced by BgBufferSync().

◆ bgwriter_lru_multiplier

double bgwriter_lru_multiplier

Definition at line 134 of file bufmgr.c.

Referenced by BgBufferSync().

◆ BufferBlocks

PGDLLIMPORT char* BufferBlocks

Definition at line 21 of file buf_init.c.

Referenced by InitBufferPool().

◆ checkpoint_flush_after

int checkpoint_flush_after

Definition at line 156 of file bufmgr.c.

Referenced by BufferSync().

◆ effective_io_concurrency

int effective_io_concurrency

Definition at line 143 of file bufmgr.c.

Referenced by get_tablespace_io_concurrency(), and tablespace_reloptions().

◆ LocalBufferBlockPointers

PGDLLIMPORT Block* LocalBufferBlockPointers

Definition at line 44 of file localbuf.c.

Referenced by InitLocalBuffers().

◆ LocalRefCount

◆ maintenance_io_concurrency

int maintenance_io_concurrency

◆ NBuffers

◆ NLocBuffer

◆ track_io_timing

bool track_io_timing

Definition at line 135 of file bufmgr.c.

Referenced by FlushBuffer(), ReadBuffer_common(), and show_buffer_usage().

◆ zero_damaged_pages

bool zero_damaged_pages

Definition at line 132 of file bufmgr.c.

Referenced by mdread(), and ReadBuffer_common().