PostgreSQL Source Code  git master
bufmgr.h File Reference
#include "storage/block.h"
#include "storage/buf.h"
#include "storage/bufpage.h"
#include "storage/relfilenode.h"
#include "utils/relcache.h"
#include "utils/snapmgr.h"
Include dependency graph for bufmgr.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Data Structures

struct  PrefetchBufferResult
 

Macros

#define MAX_IO_CONCURRENCY   1000
 
#define P_NEW   InvalidBlockNumber /* grow the file to get a new page */
 
#define BUFFER_LOCK_UNLOCK   0
 
#define BUFFER_LOCK_SHARE   1
 
#define BUFFER_LOCK_EXCLUSIVE   2
 
#define BufferIsValid(bufnum)
 
#define BufferGetBlock(buffer)
 
#define BufferGetPageSize(buffer)
 
#define BufferGetPage(buffer)   ((Page)BufferGetBlock(buffer))
 
#define RelationGetNumberOfBlocks(reln)   RelationGetNumberOfBlocksInFork(reln, MAIN_FORKNUM)
 

Typedefs

typedef void * Block
 
typedef enum BufferAccessStrategyType BufferAccessStrategyType
 
typedef struct PrefetchBufferResult PrefetchBufferResult
 

Enumerations

enum  BufferAccessStrategyType { BAS_NORMAL, BAS_BULKREAD, BAS_BULKWRITE, BAS_VACUUM }
 
enum  ReadBufferMode {
  RBM_NORMAL, RBM_ZERO_AND_LOCK, RBM_ZERO_AND_CLEANUP_LOCK, RBM_ZERO_ON_ERROR,
  RBM_NORMAL_NO_LOG
}
 

Functions

PrefetchBufferResult PrefetchSharedBuffer (struct SMgrRelationData *smgr_reln, ForkNumber forkNum, BlockNumber blockNum)
 
PrefetchBufferResult PrefetchBuffer (Relation reln, ForkNumber forkNum, BlockNumber blockNum)
 
Buffer ReadBuffer (Relation reln, BlockNumber blockNum)
 
Buffer ReadBufferExtended (Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
 
Buffer ReadBufferWithoutRelcache (RelFileNode rnode, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
 
void ReleaseBuffer (Buffer buffer)
 
void UnlockReleaseBuffer (Buffer buffer)
 
void MarkBufferDirty (Buffer buffer)
 
void IncrBufferRefCount (Buffer buffer)
 
Buffer ReleaseAndReadBuffer (Buffer buffer, Relation relation, BlockNumber blockNum)
 
void InitBufferPool (void)
 
void InitBufferPoolAccess (void)
 
void InitBufferPoolBackend (void)
 
void AtEOXact_Buffers (bool isCommit)
 
void PrintBufferLeakWarning (Buffer buffer)
 
void CheckPointBuffers (int flags)
 
BlockNumber BufferGetBlockNumber (Buffer buffer)
 
BlockNumber RelationGetNumberOfBlocksInFork (Relation relation, ForkNumber forkNum)
 
void FlushOneBuffer (Buffer buffer)
 
void FlushRelationBuffers (Relation rel)
 
void FlushRelationsAllBuffers (struct SMgrRelationData **smgrs, int nrels)
 
void FlushDatabaseBuffers (Oid dbid)
 
void DropRelFileNodeBuffers (RelFileNodeBackend rnode, ForkNumber *forkNum, int nforks, BlockNumber *firstDelBlock)
 
void DropRelFileNodesAllBuffers (RelFileNodeBackend *rnodes, int nnodes)
 
void DropDatabaseBuffers (Oid dbid)
 
bool BufferIsPermanent (Buffer buffer)
 
XLogRecPtr BufferGetLSNAtomic (Buffer buffer)
 
Size BufferShmemSize (void)
 
void BufferGetTag (Buffer buffer, RelFileNode *rnode, ForkNumber *forknum, BlockNumber *blknum)
 
void MarkBufferDirtyHint (Buffer buffer, bool buffer_std)
 
void UnlockBuffers (void)
 
void LockBuffer (Buffer buffer, int mode)
 
bool ConditionalLockBuffer (Buffer buffer)
 
void LockBufferForCleanup (Buffer buffer)
 
bool ConditionalLockBufferForCleanup (Buffer buffer)
 
bool IsBufferCleanupOK (Buffer buffer)
 
bool HoldingBufferPinThatDelaysRecovery (void)
 
void AbortBufferIO (void)
 
void BufmgrCommit (void)
 
bool BgBufferSync (struct WritebackContext *wb_context)
 
void AtProcExit_LocalBuffers (void)
 
void TestForOldSnapshot_impl (Snapshot snapshot, Relation relation)
 
BufferAccessStrategy GetAccessStrategy (BufferAccessStrategyType btype)
 
void FreeAccessStrategy (BufferAccessStrategy strategy)
 
static void TestForOldSnapshot (Snapshot snapshot, Relation relation, Page page)
 

Variables

PGDLLIMPORT int NBuffers
 
bool zero_damaged_pages
 
int bgwriter_lru_maxpages
 
double bgwriter_lru_multiplier
 
bool track_io_timing
 
int effective_io_concurrency
 
int maintenance_io_concurrency
 
int checkpoint_flush_after
 
int backend_flush_after
 
int bgwriter_flush_after
 
PGDLLIMPORT char * BufferBlocks
 
PGDLLIMPORT int NLocBuffer
 
PGDLLIMPORT BlockLocalBufferBlockPointers
 
PGDLLIMPORT int32LocalRefCount
 

Macro Definition Documentation

◆ BUFFER_LOCK_EXCLUSIVE

◆ BUFFER_LOCK_SHARE

◆ BUFFER_LOCK_UNLOCK

#define BUFFER_LOCK_UNLOCK   0

Definition at line 96 of file bufmgr.h.

Referenced by _bt_unlockbuf(), _bt_upgradelockbufcleanup(), _hash_addovflpage(), _hash_doinsert(), _hash_expandtable(), _hash_finish_split(), _hash_first(), _hash_freeovflpage(), _hash_getcachedmetap(), _hash_init(), _hash_kill_items(), _hash_readnext(), _hash_readpage(), _hash_readprev(), _hash_splitbucket(), _hash_squeezebucket(), _hash_vacuum_one_page(), blinsert(), BloomNewBuffer(), brin_doinsert(), brin_doupdate(), brin_evacuate_page(), brin_getinsertbuffer(), brin_page_cleanup(), bringetbitmap(), brinGetTupleForHeapBlock(), brininsert(), brinRevmapDesummarizeRange(), brinRevmapInitialize(), brinsummarize(), ConditionalLockBufferForCleanup(), fsm_readbuf(), fsm_search_avail(), fsm_vacuum_page(), get_raw_page_internal(), GetVisibilityMapPins(), hashbucketcleanup(), hashbulkdelete(), heap_abort_speculative(), heap_compute_xid_horizon_for_tuples(), heap_delete(), heap_fetch(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_page_prune_opt(), heap_update(), heap_xlog_visible(), heapam_index_build_range_scan(), heapam_index_fetch_tuple(), heapam_index_validate_scan(), heapam_relation_copy_for_cluster(), heapam_scan_bitmap_next_block(), heapam_scan_sample_next_tuple(), heapam_tuple_satisfies_snapshot(), heapgetpage(), heapgettup(), lazy_scan_heap(), LockBuffer(), LockBufferForCleanup(), pgrowlocks(), pgstat_heap(), pgstatindex_impl(), RelationGetBufferForTuple(), revmap_physical_extend(), SpGistNewBuffer(), summarize_range(), UnlockReleaseBuffer(), visibilitymap_clear(), visibilitymap_set(), vm_readbuf(), and XLogReadBufferExtended().

◆ BufferGetBlock

#define BufferGetBlock (   buffer)
Value:
( \
AssertMacro(BufferIsValid(buffer)), \
BufferIsLocal(buffer) ? \
LocalBufferBlockPointers[-(buffer) - 1] \
: \
(Block) (BufferBlocks + ((Size) ((buffer) - 1)) * BLCKSZ) \
)
PGDLLIMPORT char * BufferBlocks
Definition: buf_init.c:21
size_t Size
Definition: c.h:528
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
void * Block
Definition: bufmgr.h:24

Definition at line 136 of file bufmgr.h.

Referenced by XLogSaveBufferForHint().

◆ BufferGetPage

#define BufferGetPage (   buffer)    ((Page)BufferGetBlock(buffer))

Definition at line 169 of file bufmgr.h.

Referenced by _bt_binsrch(), _bt_binsrch_insert(), _bt_check_unique(), _bt_checkpage(), _bt_clear_incomplete_split(), _bt_conditionallockbuf(), _bt_dedup_pass(), _bt_delete_or_dedup_one_page(), _bt_delitems_delete(), _bt_delitems_vacuum(), _bt_endpoint(), _bt_findinsertloc(), _bt_finish_split(), _bt_get_endpoint(), _bt_getbuf(), _bt_getmeta(), _bt_getroot(), _bt_getstackbuf(), _bt_gettrueroot(), _bt_insert_parent(), _bt_insertonpg(), _bt_killitems(), _bt_leftsib_splitflag(), _bt_lock_subtree_parent(), _bt_lockbuf(), _bt_mark_page_halfdead(), _bt_moveright(), _bt_newroot(), _bt_pagedel(), _bt_readnextpage(), _bt_readpage(), _bt_restore_meta(), _bt_rightsib_halfdeadflag(), _bt_search(), _bt_search_insert(), _bt_split(), _bt_stepright(), _bt_unlink_halfdead_page(), _bt_unlockbuf(), _bt_update_meta_cleanup_info(), _bt_upgradelockbufcleanup(), _bt_vacuum_needs_cleanup(), _bt_walk_left(), _hash_addovflpage(), _hash_checkpage(), _hash_doinsert(), _hash_expandtable(), _hash_finish_split(), _hash_first(), _hash_freeovflpage(), _hash_get_newblock_from_oldbucket(), _hash_get_oldblock_from_newbucket(), _hash_getbucketbuf_from_hashkey(), _hash_getcachedmetap(), _hash_getinitbuf(), _hash_getnewbuf(), _hash_init(), _hash_init_metabuffer(), _hash_initbitmapbuffer(), _hash_initbuf(), _hash_kill_items(), _hash_next(), _hash_pgaddmultitup(), _hash_pgaddtup(), _hash_readnext(), _hash_readpage(), _hash_readprev(), _hash_splitbucket(), _hash_squeezebucket(), _hash_vacuum_one_page(), allocNewBuffer(), blgetbitmap(), blinsert(), BloomNewBuffer(), blvacuumcleanup(), brin_can_do_samepage_update(), brin_doinsert(), brin_doupdate(), brin_evacuate_page(), brin_getinsertbuffer(), brin_initialize_empty_new_buffer(), brin_page_cleanup(), brin_start_evacuating_page(), brin_xlog_createidx(), brin_xlog_desummarize_page(), brin_xlog_insert_update(), brin_xlog_revmap_extend(), brin_xlog_samepage_update(), brin_xlog_update(), brinbuild(), brinbuildempty(), brinGetStats(), brinGetTupleForHeapBlock(), brininsert(), brinRevmapDesummarizeRange(), brinRevmapInitialize(), brinSetHeapBlockItemptr(), bt_metap(), bt_page_items(), bt_recheck_sibling_links(), bt_rootdescend(), btree_xlog_dedup(), btree_xlog_delete(), btree_xlog_insert(), btree_xlog_mark_page_halfdead(), btree_xlog_newroot(), btree_xlog_split(), btree_xlog_unlink_page(), btree_xlog_vacuum(), btvacuumpage(), BufferGetLSNAtomic(), checkXLogConsistency(), collect_corrupt_items(), collect_visibility_data(), collectMatchBitmap(), collectMatchesForHeapRow(), count_nondeletable_pages(), createPostingTree(), dataBeginPlaceToPage(), dataBeginPlaceToPageInternal(), dataBeginPlaceToPageLeaf(), dataExecPlaceToPage(), dataExecPlaceToPageInternal(), dataLocateItem(), dataPlaceToPageLeafRecompress(), dataPrepareDownlink(), dataSplitPageInternal(), do_setval(), doPickSplit(), entryExecPlaceToPage(), entryIsEnoughSpace(), entryLoadMoreItems(), entryLocateEntry(), entryLocateLeafEntry(), entryPrepareDownlink(), entrySplitPage(), fill_seq_with_data(), FreeSpaceMapPrepareTruncateRel(), fsm_readbuf(), fsm_search(), fsm_search_avail(), fsm_set_and_search(), fsm_vacuum_page(), generic_redo(), GenericXLogFinish(), GenericXLogRegisterBuffer(), get_raw_page_internal(), GetBTPageStatistics(), GetRecordedFreeSpace(), GetVisibilityMapPins(), ginbulkdelete(), ginDeletePage(), ginEntryInsert(), ginFindLeafPage(), ginFindParents(), ginFinishSplit(), ginGetStats(), ginHeapTupleFastInsert(), GinInitBuffer(), GinInitMetabuffer(), ginInsertCleanup(), ginInsertValue(), GinNewBuffer(), ginPlaceToPage(), ginRedoClearIncompleteSplit(), ginRedoCreatePTree(), ginRedoDeleteListPages(), ginRedoDeletePage(), ginRedoInsert(), ginRedoInsertData(), ginRedoInsertEntry(), ginRedoInsertListPage(), ginRedoUpdateMetapage(), ginRedoVacuumDataLeafPage(), ginScanToDelete(), ginStepRight(), ginTraverseLock(), ginUpdateStats(), ginvacuumcleanup(), ginVacuumEntryPage(), ginVacuumPostingTreeLeaf(), ginVacuumPostingTreeLeaves(), gistBufferingFindCorrectParent(), gistbufferinginserttuples(), gistbuild(), gistcheckpage(), gistdeletepage(), gistdoinsert(), gistFindCorrectParent(), gistFindPath(), gistfixsplit(), gistformdownlink(), gistGetMaxLevel(), GISTInitBuffer(), gistkillitems(), gistMemorizeAllDownlinks(), gistNewBuffer(), gistplacetopage(), gistProcessItup(), gistRedoClearFollowRight(), gistRedoDeleteRecord(), gistRedoPageDelete(), gistRedoPageSplitRecord(), gistRedoPageUpdateRecord(), gistScanPage(), gistvacuum_delete_empty_pages(), gistvacuumpage(), hash_bitmap_info(), hash_xlog_add_ovfl_page(), hash_xlog_delete(), hash_xlog_init_bitmap_page(), hash_xlog_init_meta_page(), hash_xlog_insert(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_split_cleanup(), hash_xlog_split_complete(), hash_xlog_squeeze_page(), hash_xlog_update_meta_page(), hash_xlog_vacuum_one_page(), hashbucketcleanup(), hashbulkdelete(), heap_abort_speculative(), heap_compute_xid_horizon_for_tuples(), heap_delete(), heap_fetch(), heap_finish_speculative(), heap_force_common(), heap_get_latest_tid(), heap_hot_search_buffer(), heap_inplace_update(), heap_insert(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_page_is_all_visible(), heap_page_prune(), heap_page_prune_execute(), heap_page_prune_opt(), heap_prune_chain(), heap_update(), heap_xlog_clean(), heap_xlog_confirm(), heap_xlog_delete(), heap_xlog_freeze_page(), heap_xlog_inplace(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_update(), heap_xlog_visible(), heapam_index_build_range_scan(), heapam_index_validate_scan(), heapam_scan_analyze_next_tuple(), heapam_scan_bitmap_next_block(), heapam_scan_bitmap_next_tuple(), heapam_scan_sample_next_tuple(), heapgetpage(), heapgettup(), heapgettup_pagemode(), index_compute_xid_horizon_for_tuples(), initBloomState(), lazy_check_needs_freeze(), lazy_scan_heap(), lazy_vacuum_heap(), lazy_vacuum_page(), log_heap_update(), log_newpage_buffer(), log_newpage_range(), log_split_page(), MarkBufferDirtyHint(), moveLeafs(), moveRightIfItNeeded(), nextval_internal(), palloc_btree_page(), pg_visibility(), pgstat_btree_page(), pgstat_gist_page(), pgstat_hash_page(), pgstat_heap(), pgstatginindex_internal(), pgstathashindex(), pgstatindex_impl(), read_seq_tuple(), RelationAddExtraBlocks(), RelationGetBufferForTuple(), RelationPutHeapTuple(), revmap_physical_extend(), scanGetCandidate(), scanPendingInsert(), scanPostingTree(), seq_redo(), shiftList(), spgAddNodeAction(), spgbuild(), spgdoinsert(), spgGetCache(), SpGistGetBuffer(), SpGistInitBuffer(), SpGistNewBuffer(), SpGistSetLastUsedPage(), SpGistUpdateMetaPage(), spgprocesspending(), spgRedoAddLeaf(), spgRedoAddNode(), spgRedoMoveLeafs(), spgRedoPickSplit(), spgRedoSplitTuple(), spgRedoVacuumLeaf(), spgRedoVacuumRedirect(), spgRedoVacuumRoot(), spgSplitNodeAction(), spgvacuumpage(), spgWalk(), startScanEntry(), statapprox_heap(), terminate_brin_buildstate(), vacuumLeafPage(), vacuumLeafRoot(), vacuumRedirectAndPlaceholder(), verify_heapam(), visibilitymap_clear(), visibilitymap_count(), visibilitymap_get_status(), visibilitymap_prepare_truncate(), visibilitymap_set(), vm_readbuf(), writeListPage(), XLogCheckBufferNeedsBackup(), XLogReadBufferExtended(), XLogReadBufferForRedoExtended(), XLogRecordPageWithFreeSpace(), XLogRegisterBuffer(), XLogSaveBufferForHint(), and xlogVacuumPage().

◆ BufferGetPageSize

◆ BufferIsValid

#define BufferIsValid (   bufnum)
Value:
( \
AssertMacro((bufnum) <= NBuffers && (bufnum) >= -NLocBuffer), \
(bufnum) != InvalidBuffer \
)
#define InvalidBuffer
Definition: buf.h:25
PGDLLIMPORT int NLocBuffer
Definition: localbuf.c:41
PGDLLIMPORT int NBuffers
Definition: globals.c:132

Definition at line 123 of file bufmgr.h.

Referenced by _bt_clear_incomplete_split(), _bt_endpoint(), _bt_first(), _bt_get_endpoint(), _bt_insertonpg(), _bt_readpage(), _bt_relandgetbuf(), _bt_search(), _bt_unlink_halfdead_page(), _hash_addovflpage(), _hash_dropscanbuf(), _hash_freeovflpage(), _hash_getbucketbuf_from_hashkey(), _hash_getcachedmetap(), _hash_readnext(), _hash_readpage(), _hash_readprev(), autoprewarm_database_main(), brin_doinsert(), brin_doupdate(), brin_getinsertbuffer(), brin_xlog_createidx(), brin_xlog_desummarize_page(), brin_xlog_insert_update(), brin_xlog_revmap_extend(), brin_xlog_samepage_update(), brin_xlog_update(), brinGetTupleForHeapBlock(), brininsert(), brinsummarize(), bt_recheck_sibling_links(), bt_rootdescend(), btree_xlog_dedup(), btree_xlog_delete(), btree_xlog_insert(), btree_xlog_mark_page_halfdead(), btree_xlog_split(), btree_xlog_unlink_page(), btree_xlog_vacuum(), BufferGetLSNAtomic(), BufferIsPermanent(), checkXLogConsistency(), ConditionalLockBufferForCleanup(), doPickSplit(), entryGetItem(), entryLoadMoreItems(), ExecStoreBufferHeapTuple(), ExecStorePinnedBufferHeapTuple(), FreeSpaceMapPrepareTruncateRel(), fsm_search(), fsm_vacuum_page(), generic_redo(), GetPrivateRefCount(), GetPrivateRefCountEntry(), GetRecordedFreeSpace(), GetVisibilityMapPins(), ginPlaceToPage(), ginRedoClearIncompleteSplit(), ginRedoDeletePage(), ginRedoInsert(), ginRedoUpdateMetapage(), ginRedoVacuumDataLeafPage(), ginScanToDelete(), gistinserttuples(), gistkillitems(), gistplacetopage(), gistRedoClearFollowRight(), gistRedoDeleteRecord(), gistRedoPageDelete(), gistRedoPageUpdateRecord(), gistXLogSplit(), gistXLogUpdate(), hash_xlog_add_ovfl_page(), hash_xlog_delete(), hash_xlog_init_bitmap_page(), hash_xlog_init_meta_page(), hash_xlog_insert(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_split_cleanup(), hash_xlog_split_complete(), hash_xlog_squeeze_page(), hash_xlog_update_meta_page(), hash_xlog_vacuum_one_page(), heap_compute_xid_horizon_for_tuples(), heap_endscan(), heap_lock_tuple(), heap_rescan(), heap_update(), heap_xlog_clean(), heap_xlog_confirm(), heap_xlog_delete(), heap_xlog_freeze_page(), heap_xlog_inplace(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_update(), heap_xlog_visible(), heapam_index_fetch_reset(), heapam_scan_sample_next_block(), heapam_tuple_lock(), heapam_tuple_satisfies_snapshot(), heapgetpage(), heapgettup(), heapgettup_pagemode(), IsBufferCleanupOK(), lazy_scan_heap(), lazy_vacuum_heap(), lazy_vacuum_page(), log_heap_visible(), MarkBufferDirty(), MarkBufferDirtyHint(), PrintBufferLeakWarning(), ReleaseAndReadBuffer(), ReleaseBuffer(), spgRedoAddLeaf(), spgRedoAddNode(), spgRedoMoveLeafs(), spgRedoPickSplit(), spgRedoSplitTuple(), spgRedoVacuumLeaf(), spgRedoVacuumRedirect(), spgRedoVacuumRoot(), statapprox_heap(), tts_buffer_heap_clear(), tts_buffer_heap_copyslot(), tts_buffer_heap_materialize(), tts_buffer_heap_store_tuple(), visibilitymap_clear(), visibilitymap_count(), visibilitymap_get_status(), visibilitymap_pin(), visibilitymap_pin_ok(), visibilitymap_prepare_truncate(), visibilitymap_set(), and XLogReadBufferForRedoExtended().

◆ MAX_IO_CONCURRENCY

#define MAX_IO_CONCURRENCY   1000

Definition at line 88 of file bufmgr.h.

◆ P_NEW

◆ RelationGetNumberOfBlocks

Typedef Documentation

◆ Block

typedef void* Block

Definition at line 24 of file bufmgr.h.

◆ BufferAccessStrategyType

◆ PrefetchBufferResult

Enumeration Type Documentation

◆ BufferAccessStrategyType

Enumerator
BAS_NORMAL 
BAS_BULKREAD 
BAS_BULKWRITE 
BAS_VACUUM 

Definition at line 27 of file bufmgr.h.

28 {
29  BAS_NORMAL, /* Normal random access */
30  BAS_BULKREAD, /* Large read-only scan (hint bit updates are
31  * ok) */
32  BAS_BULKWRITE, /* Large multi-block write (e.g. COPY IN) */
33  BAS_VACUUM /* VACUUM */
BufferAccessStrategyType
Definition: bufmgr.h:27

◆ ReadBufferMode

Enumerator
RBM_NORMAL 
RBM_ZERO_AND_LOCK 
RBM_ZERO_AND_CLEANUP_LOCK 
RBM_ZERO_ON_ERROR 
RBM_NORMAL_NO_LOG 

Definition at line 37 of file bufmgr.h.

38 {
39  RBM_NORMAL, /* Normal read */
40  RBM_ZERO_AND_LOCK, /* Don't read from disk, caller will
41  * initialize. Also locks the page. */
42  RBM_ZERO_AND_CLEANUP_LOCK, /* Like RBM_ZERO_AND_LOCK, but locks the page
43  * in "cleanup" mode */
44  RBM_ZERO_ON_ERROR, /* Read, but return an all-zeros page on error */
45  RBM_NORMAL_NO_LOG /* Don't log page as invalid during WAL
46  * replay; otherwise same as RBM_NORMAL */
ReadBufferMode
Definition: bufmgr.h:37

Function Documentation

◆ AbortBufferIO()

void AbortBufferIO ( void  )

Definition at line 4202 of file bufmgr.c.

References Assert, buftag::blockNum, BM_DIRTY, BM_IO_ERROR, BM_IO_IN_PROGRESS, BM_VALID, buf, BufferDescriptorGetIOLock, ereport, errcode(), errdetail(), errmsg(), buftag::forkNum, InProgressBuf, IsForInput, LockBufHdr(), LW_EXCLUSIVE, LWLockAcquire(), pfree(), relpathperm, buftag::rnode, BufferDesc::tag, TerminateBufferIO(), UnlockBufHdr, and WARNING.

Referenced by AbortSubTransaction(), AbortTransaction(), AtProcExit_Buffers(), AutoVacLauncherMain(), BackgroundWriterMain(), CheckpointerMain(), and WalWriterMain().

4203 {
4205 
4206  if (buf)
4207  {
4208  uint32 buf_state;
4209 
4210  /*
4211  * Since LWLockReleaseAll has already been called, we're not holding
4212  * the buffer's io_in_progress_lock. We have to re-acquire it so that
4213  * we can use TerminateBufferIO. Anyone who's executing WaitIO on the
4214  * buffer will be in a busy spin until we succeed in doing this.
4215  */
4217 
4218  buf_state = LockBufHdr(buf);
4219  Assert(buf_state & BM_IO_IN_PROGRESS);
4220  if (IsForInput)
4221  {
4222  Assert(!(buf_state & BM_DIRTY));
4223 
4224  /* We'd better not think buffer is valid yet */
4225  Assert(!(buf_state & BM_VALID));
4226  UnlockBufHdr(buf, buf_state);
4227  }
4228  else
4229  {
4230  Assert(buf_state & BM_DIRTY);
4231  UnlockBufHdr(buf, buf_state);
4232  /* Issue notice if this is not the first failure... */
4233  if (buf_state & BM_IO_ERROR)
4234  {
4235  /* Buffer is pinned, so we can read tag without spinlock */
4236  char *path;
4237 
4238  path = relpathperm(buf->tag.rnode, buf->tag.forkNum);
4239  ereport(WARNING,
4240  (errcode(ERRCODE_IO_ERROR),
4241  errmsg("could not write block %u of %s",
4242  buf->tag.blockNum, path),
4243  errdetail("Multiple failures --- write error might be permanent.")));
4244  pfree(path);
4245  }
4246  }
4247  TerminateBufferIO(buf, false, BM_IO_ERROR);
4248  }
4249 }
#define relpathperm(rnode, forknum)
Definition: relpath.h:83
ForkNumber forkNum
Definition: buf_internals.h:93
int errcode(int sqlerrcode)
Definition: elog.c:691
#define BM_DIRTY
Definition: buf_internals.h:58
#define BufferDescriptorGetIOLock(bdesc)
static BufferDesc * InProgressBuf
Definition: bufmgr.c:153
void pfree(void *pointer)
Definition: mcxt.c:1057
static char * buf
Definition: pg_test_fsync.c:68
int errdetail(const char *fmt,...)
Definition: elog.c:1035
unsigned int uint32
Definition: c.h:429
static bool IsForInput
Definition: bufmgr.c:154
#define WARNING
Definition: elog.h:40
static void TerminateBufferIO(BufferDesc *buf, bool clear_dirty, uint32 set_flag_bits)
Definition: bufmgr.c:4170
#define BM_VALID
Definition: buf_internals.h:59
#define ereport(elevel,...)
Definition: elog.h:155
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4320
#define Assert(condition)
Definition: c.h:800
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1207
BlockNumber blockNum
Definition: buf_internals.h:94
RelFileNode rnode
Definition: buf_internals.h:92
#define BM_IO_ERROR
Definition: buf_internals.h:62
BufferTag tag
int errmsg(const char *fmt,...)
Definition: elog.c:902
#define UnlockBufHdr(desc, s)
#define BM_IO_IN_PROGRESS
Definition: buf_internals.h:61

◆ AtEOXact_Buffers()

void AtEOXact_Buffers ( bool  isCommit)

Definition at line 2480 of file bufmgr.c.

References Assert, AtEOXact_LocalBuffers(), CheckForBufferLeaks(), and PrivateRefCountOverflowed.

Referenced by AbortTransaction(), AutoVacLauncherMain(), BackgroundWriterMain(), CheckpointerMain(), CommitTransaction(), PrepareTransaction(), and WalWriterMain().

2481 {
2483 
2484  AtEOXact_LocalBuffers(isCommit);
2485 
2487 }
static int32 PrivateRefCountOverflowed
Definition: bufmgr.c:191
#define Assert(condition)
Definition: c.h:800
static void CheckForBufferLeaks(void)
Definition: bufmgr.c:2555
void AtEOXact_LocalBuffers(bool isCommit)
Definition: localbuf.c:578

◆ AtProcExit_LocalBuffers()

void AtProcExit_LocalBuffers ( void  )

Definition at line 589 of file localbuf.c.

References CheckForLocalBufferLeaks().

Referenced by AtProcExit_Buffers().

590 {
591  /*
592  * We shouldn't be holding any remaining pins; if we are, and assertions
593  * aren't enabled, we'll fail later in DropRelFileNodeBuffers while trying
594  * to drop the temp rels.
595  */
597 }
static void CheckForLocalBufferLeaks(void)
Definition: localbuf.c:549

◆ BgBufferSync()

bool BgBufferSync ( struct WritebackContext wb_context)

Definition at line 2110 of file bufmgr.c.

References Assert, bgwriter_lru_maxpages, bgwriter_lru_multiplier, BgWriterDelay, BgWriterStats, BUF_REUSABLE, BUF_WRITTEN, CurrentResourceOwner, DEBUG1, DEBUG2, elog, PgStat_MsgBgWriter::m_buf_alloc, PgStat_MsgBgWriter::m_buf_written_clean, PgStat_MsgBgWriter::m_maxwritten_clean, NBuffers, ResourceOwnerEnlargeBuffers(), StrategySyncStart(), and SyncOneBuffer().

Referenced by BackgroundWriterMain().

2111 {
2112  /* info obtained from freelist.c */
2113  int strategy_buf_id;
2114  uint32 strategy_passes;
2115  uint32 recent_alloc;
2116 
2117  /*
2118  * Information saved between calls so we can determine the strategy
2119  * point's advance rate and avoid scanning already-cleaned buffers.
2120  */
2121  static bool saved_info_valid = false;
2122  static int prev_strategy_buf_id;
2123  static uint32 prev_strategy_passes;
2124  static int next_to_clean;
2125  static uint32 next_passes;
2126 
2127  /* Moving averages of allocation rate and clean-buffer density */
2128  static float smoothed_alloc = 0;
2129  static float smoothed_density = 10.0;
2130 
2131  /* Potentially these could be tunables, but for now, not */
2132  float smoothing_samples = 16;
2133  float scan_whole_pool_milliseconds = 120000.0;
2134 
2135  /* Used to compute how far we scan ahead */
2136  long strategy_delta;
2137  int bufs_to_lap;
2138  int bufs_ahead;
2139  float scans_per_alloc;
2140  int reusable_buffers_est;
2141  int upcoming_alloc_est;
2142  int min_scan_buffers;
2143 
2144  /* Variables for the scanning loop proper */
2145  int num_to_scan;
2146  int num_written;
2147  int reusable_buffers;
2148 
2149  /* Variables for final smoothed_density update */
2150  long new_strategy_delta;
2151  uint32 new_recent_alloc;
2152 
2153  /*
2154  * Find out where the freelist clock sweep currently is, and how many
2155  * buffer allocations have happened since our last call.
2156  */
2157  strategy_buf_id = StrategySyncStart(&strategy_passes, &recent_alloc);
2158 
2159  /* Report buffer alloc counts to pgstat */
2160  BgWriterStats.m_buf_alloc += recent_alloc;
2161 
2162  /*
2163  * If we're not running the LRU scan, just stop after doing the stats
2164  * stuff. We mark the saved state invalid so that we can recover sanely
2165  * if LRU scan is turned back on later.
2166  */
2167  if (bgwriter_lru_maxpages <= 0)
2168  {
2169  saved_info_valid = false;
2170  return true;
2171  }
2172 
2173  /*
2174  * Compute strategy_delta = how many buffers have been scanned by the
2175  * clock sweep since last time. If first time through, assume none. Then
2176  * see if we are still ahead of the clock sweep, and if so, how many
2177  * buffers we could scan before we'd catch up with it and "lap" it. Note:
2178  * weird-looking coding of xxx_passes comparisons are to avoid bogus
2179  * behavior when the passes counts wrap around.
2180  */
2181  if (saved_info_valid)
2182  {
2183  int32 passes_delta = strategy_passes - prev_strategy_passes;
2184 
2185  strategy_delta = strategy_buf_id - prev_strategy_buf_id;
2186  strategy_delta += (long) passes_delta * NBuffers;
2187 
2188  Assert(strategy_delta >= 0);
2189 
2190  if ((int32) (next_passes - strategy_passes) > 0)
2191  {
2192  /* we're one pass ahead of the strategy point */
2193  bufs_to_lap = strategy_buf_id - next_to_clean;
2194 #ifdef BGW_DEBUG
2195  elog(DEBUG2, "bgwriter ahead: bgw %u-%u strategy %u-%u delta=%ld lap=%d",
2196  next_passes, next_to_clean,
2197  strategy_passes, strategy_buf_id,
2198  strategy_delta, bufs_to_lap);
2199 #endif
2200  }
2201  else if (next_passes == strategy_passes &&
2202  next_to_clean >= strategy_buf_id)
2203  {
2204  /* on same pass, but ahead or at least not behind */
2205  bufs_to_lap = NBuffers - (next_to_clean - strategy_buf_id);
2206 #ifdef BGW_DEBUG
2207  elog(DEBUG2, "bgwriter ahead: bgw %u-%u strategy %u-%u delta=%ld lap=%d",
2208  next_passes, next_to_clean,
2209  strategy_passes, strategy_buf_id,
2210  strategy_delta, bufs_to_lap);
2211 #endif
2212  }
2213  else
2214  {
2215  /*
2216  * We're behind, so skip forward to the strategy point and start
2217  * cleaning from there.
2218  */
2219 #ifdef BGW_DEBUG
2220  elog(DEBUG2, "bgwriter behind: bgw %u-%u strategy %u-%u delta=%ld",
2221  next_passes, next_to_clean,
2222  strategy_passes, strategy_buf_id,
2223  strategy_delta);
2224 #endif
2225  next_to_clean = strategy_buf_id;
2226  next_passes = strategy_passes;
2227  bufs_to_lap = NBuffers;
2228  }
2229  }
2230  else
2231  {
2232  /*
2233  * Initializing at startup or after LRU scanning had been off. Always
2234  * start at the strategy point.
2235  */
2236 #ifdef BGW_DEBUG
2237  elog(DEBUG2, "bgwriter initializing: strategy %u-%u",
2238  strategy_passes, strategy_buf_id);
2239 #endif
2240  strategy_delta = 0;
2241  next_to_clean = strategy_buf_id;
2242  next_passes = strategy_passes;
2243  bufs_to_lap = NBuffers;
2244  }
2245 
2246  /* Update saved info for next time */
2247  prev_strategy_buf_id = strategy_buf_id;
2248  prev_strategy_passes = strategy_passes;
2249  saved_info_valid = true;
2250 
2251  /*
2252  * Compute how many buffers had to be scanned for each new allocation, ie,
2253  * 1/density of reusable buffers, and track a moving average of that.
2254  *
2255  * If the strategy point didn't move, we don't update the density estimate
2256  */
2257  if (strategy_delta > 0 && recent_alloc > 0)
2258  {
2259  scans_per_alloc = (float) strategy_delta / (float) recent_alloc;
2260  smoothed_density += (scans_per_alloc - smoothed_density) /
2261  smoothing_samples;
2262  }
2263 
2264  /*
2265  * Estimate how many reusable buffers there are between the current
2266  * strategy point and where we've scanned ahead to, based on the smoothed
2267  * density estimate.
2268  */
2269  bufs_ahead = NBuffers - bufs_to_lap;
2270  reusable_buffers_est = (float) bufs_ahead / smoothed_density;
2271 
2272  /*
2273  * Track a moving average of recent buffer allocations. Here, rather than
2274  * a true average we want a fast-attack, slow-decline behavior: we
2275  * immediately follow any increase.
2276  */
2277  if (smoothed_alloc <= (float) recent_alloc)
2278  smoothed_alloc = recent_alloc;
2279  else
2280  smoothed_alloc += ((float) recent_alloc - smoothed_alloc) /
2281  smoothing_samples;
2282 
2283  /* Scale the estimate by a GUC to allow more aggressive tuning. */
2284  upcoming_alloc_est = (int) (smoothed_alloc * bgwriter_lru_multiplier);
2285 
2286  /*
2287  * If recent_alloc remains at zero for many cycles, smoothed_alloc will
2288  * eventually underflow to zero, and the underflows produce annoying
2289  * kernel warnings on some platforms. Once upcoming_alloc_est has gone to
2290  * zero, there's no point in tracking smaller and smaller values of
2291  * smoothed_alloc, so just reset it to exactly zero to avoid this
2292  * syndrome. It will pop back up as soon as recent_alloc increases.
2293  */
2294  if (upcoming_alloc_est == 0)
2295  smoothed_alloc = 0;
2296 
2297  /*
2298  * Even in cases where there's been little or no buffer allocation
2299  * activity, we want to make a small amount of progress through the buffer
2300  * cache so that as many reusable buffers as possible are clean after an
2301  * idle period.
2302  *
2303  * (scan_whole_pool_milliseconds / BgWriterDelay) computes how many times
2304  * the BGW will be called during the scan_whole_pool time; slice the
2305  * buffer pool into that many sections.
2306  */
2307  min_scan_buffers = (int) (NBuffers / (scan_whole_pool_milliseconds / BgWriterDelay));
2308 
2309  if (upcoming_alloc_est < (min_scan_buffers + reusable_buffers_est))
2310  {
2311 #ifdef BGW_DEBUG
2312  elog(DEBUG2, "bgwriter: alloc_est=%d too small, using min=%d + reusable_est=%d",
2313  upcoming_alloc_est, min_scan_buffers, reusable_buffers_est);
2314 #endif
2315  upcoming_alloc_est = min_scan_buffers + reusable_buffers_est;
2316  }
2317 
2318  /*
2319  * Now write out dirty reusable buffers, working forward from the
2320  * next_to_clean point, until we have lapped the strategy scan, or cleaned
2321  * enough buffers to match our estimate of the next cycle's allocation
2322  * requirements, or hit the bgwriter_lru_maxpages limit.
2323  */
2324 
2325  /* Make sure we can handle the pin inside SyncOneBuffer */
2327 
2328  num_to_scan = bufs_to_lap;
2329  num_written = 0;
2330  reusable_buffers = reusable_buffers_est;
2331 
2332  /* Execute the LRU scan */
2333  while (num_to_scan > 0 && reusable_buffers < upcoming_alloc_est)
2334  {
2335  int sync_state = SyncOneBuffer(next_to_clean, true,
2336  wb_context);
2337 
2338  if (++next_to_clean >= NBuffers)
2339  {
2340  next_to_clean = 0;
2341  next_passes++;
2342  }
2343  num_to_scan--;
2344 
2345  if (sync_state & BUF_WRITTEN)
2346  {
2347  reusable_buffers++;
2348  if (++num_written >= bgwriter_lru_maxpages)
2349  {
2351  break;
2352  }
2353  }
2354  else if (sync_state & BUF_REUSABLE)
2355  reusable_buffers++;
2356  }
2357 
2358  BgWriterStats.m_buf_written_clean += num_written;
2359 
2360 #ifdef BGW_DEBUG
2361  elog(DEBUG1, "bgwriter: recent_alloc=%u smoothed=%.2f delta=%ld ahead=%d density=%.2f reusable_est=%d upcoming_est=%d scanned=%d wrote=%d reusable=%d",
2362  recent_alloc, smoothed_alloc, strategy_delta, bufs_ahead,
2363  smoothed_density, reusable_buffers_est, upcoming_alloc_est,
2364  bufs_to_lap - num_to_scan,
2365  num_written,
2366  reusable_buffers - reusable_buffers_est);
2367 #endif
2368 
2369  /*
2370  * Consider the above scan as being like a new allocation scan.
2371  * Characterize its density and update the smoothed one based on it. This
2372  * effectively halves the moving average period in cases where both the
2373  * strategy and the background writer are doing some useful scanning,
2374  * which is helpful because a long memory isn't as desirable on the
2375  * density estimates.
2376  */
2377  new_strategy_delta = bufs_to_lap - num_to_scan;
2378  new_recent_alloc = reusable_buffers - reusable_buffers_est;
2379  if (new_strategy_delta > 0 && new_recent_alloc > 0)
2380  {
2381  scans_per_alloc = (float) new_strategy_delta / (float) new_recent_alloc;
2382  smoothed_density += (scans_per_alloc - smoothed_density) /
2383  smoothing_samples;
2384 
2385 #ifdef BGW_DEBUG
2386  elog(DEBUG2, "bgwriter: cleaner density alloc=%u scan=%ld density=%.2f new smoothed=%.2f",
2387  new_recent_alloc, new_strategy_delta,
2388  scans_per_alloc, smoothed_density);
2389 #endif
2390  }
2391 
2392  /* Return true if OK to hibernate */
2393  return (bufs_to_lap == 0 && recent_alloc == 0);
2394 }
PgStat_Counter m_buf_alloc
Definition: pgstat.h:450
int StrategySyncStart(uint32 *complete_passes, uint32 *num_buf_alloc)
Definition: freelist.c:395
#define DEBUG1
Definition: elog.h:25
int BgWriterDelay
Definition: bgwriter.c:64
ResourceOwner CurrentResourceOwner
Definition: resowner.c:144
PgStat_Counter m_maxwritten_clean
Definition: pgstat.h:447
PgStat_Counter m_buf_written_clean
Definition: pgstat.h:446
PgStat_MsgBgWriter BgWriterStats
Definition: pgstat.c:144
double bgwriter_lru_multiplier
Definition: bufmgr.c:126
static int SyncOneBuffer(int buf_id, bool skip_recently_used, WritebackContext *wb_context)
Definition: bufmgr.c:2413
signed int int32
Definition: c.h:417
#define BUF_REUSABLE
Definition: bufmgr.c:69
int bgwriter_lru_maxpages
Definition: bufmgr.c:125
#define DEBUG2
Definition: elog.h:24
unsigned int uint32
Definition: c.h:429
#define BUF_WRITTEN
Definition: bufmgr.c:68
void ResourceOwnerEnlargeBuffers(ResourceOwner owner)
Definition: resowner.c:934
#define Assert(condition)
Definition: c.h:800
#define elog(elevel,...)
Definition: elog.h:228
int NBuffers
Definition: globals.c:132

◆ BufferGetBlockNumber()

BlockNumber BufferGetBlockNumber ( Buffer  buffer)

Definition at line 2663 of file bufmgr.c.

References Assert, buftag::blockNum, BufferIsLocal, BufferIsPinned, GetBufferDescriptor, GetLocalBufferDescriptor, and BufferDesc::tag.

Referenced by _bt_check_unique(), _bt_checkpage(), _bt_delitems_vacuum(), _bt_doinsert(), _bt_endpoint(), _bt_finish_split(), _bt_first(), _bt_getroot(), _bt_insert_parent(), _bt_insertonpg(), _bt_mark_page_halfdead(), _bt_moveright(), _bt_newroot(), _bt_pagedel(), _bt_readnextpage(), _bt_readpage(), _bt_restore_meta(), _bt_search(), _bt_split(), _bt_unlink_halfdead_page(), _bt_walk_left(), _hash_addovflpage(), _hash_checkpage(), _hash_doinsert(), _hash_first(), _hash_freeovflpage(), _hash_getnewbuf(), _hash_readnext(), _hash_readpage(), _hash_splitbucket(), allocNewBuffer(), blinsert(), BloomInitMetapage(), brin_doinsert(), brin_doupdate(), brin_getinsertbuffer(), brin_initialize_empty_new_buffer(), brin_page_cleanup(), brin_xlog_insert_update(), brinbuild(), brinGetTupleForHeapBlock(), createPostingTree(), dataBeginPlaceToPageLeaf(), dataPrepareDownlink(), doPickSplit(), entryPrepareDownlink(), fill_seq_with_data(), ginEntryInsert(), ginFindParents(), ginFinishSplit(), ginPlaceToPage(), ginRedoDeleteListPages(), ginRedoUpdateMetapage(), ginScanToDelete(), gistbufferinginserttuples(), gistbuild(), gistcheckpage(), gistdeletepage(), gistformdownlink(), gistinserttuples(), gistMemorizeAllDownlinks(), gistplacetopage(), gistRelocateBuildBuffersOnSplit(), gistScanPage(), hash_xlog_add_ovfl_page(), heap_delete(), heap_hot_search_buffer(), heap_insert(), heap_multi_insert(), heap_page_is_all_visible(), heap_prune_chain(), heap_update(), heap_xlog_confirm(), heap_xlog_lock(), makeSublist(), moveLeafs(), moveRightIfItNeeded(), pgstathashindex(), ReadBufferBI(), RelationAddExtraBlocks(), RelationGetBufferForTuple(), RelationPutHeapTuple(), revmap_get_buffer(), revmap_physical_extend(), spgAddNodeAction(), spgbuild(), spgdoinsert(), SpGistSetLastUsedPage(), spgSplitNodeAction(), spgWalk(), startScanEntry(), terminate_brin_buildstate(), vacuumLeafPage(), visibilitymap_clear(), visibilitymap_get_status(), visibilitymap_pin(), visibilitymap_pin_ok(), visibilitymap_set(), and XLogReadBufferExtended().

2664 {
2665  BufferDesc *bufHdr;
2666 
2667  Assert(BufferIsPinned(buffer));
2668 
2669  if (BufferIsLocal(buffer))
2670  bufHdr = GetLocalBufferDescriptor(-buffer - 1);
2671  else
2672  bufHdr = GetBufferDescriptor(buffer - 1);
2673 
2674  /* pinned, so OK to read tag without spinlock */
2675  return bufHdr->tag.blockNum;
2676 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:440
#define GetLocalBufferDescriptor(id)
#define GetBufferDescriptor(id)
#define Assert(condition)
Definition: c.h:800
#define BufferIsLocal(buffer)
Definition: buf.h:37
BlockNumber blockNum
Definition: buf_internals.h:94
BufferTag tag

◆ BufferGetLSNAtomic()

XLogRecPtr BufferGetLSNAtomic ( Buffer  buffer)

Definition at line 2926 of file bufmgr.c.

References Assert, BufferGetPage, BufferIsLocal, BufferIsPinned, BufferIsValid, GetBufferDescriptor, LockBufHdr(), PageGetLSN, UnlockBufHdr, and XLogHintBitIsNeeded.

Referenced by _bt_killitems(), _bt_readpage(), gistdoinsert(), gistFindPath(), gistkillitems(), gistScanPage(), SetHintBits(), and XLogSaveBufferForHint().

2927 {
2928  BufferDesc *bufHdr = GetBufferDescriptor(buffer - 1);
2929  char *page = BufferGetPage(buffer);
2930  XLogRecPtr lsn;
2931  uint32 buf_state;
2932 
2933  /*
2934  * If we don't need locking for correctness, fastpath out.
2935  */
2936  if (!XLogHintBitIsNeeded() || BufferIsLocal(buffer))
2937  return PageGetLSN(page);
2938 
2939  /* Make sure we've got a real buffer, and that we hold a pin on it. */
2940  Assert(BufferIsValid(buffer));
2941  Assert(BufferIsPinned(buffer));
2942 
2943  buf_state = LockBufHdr(bufHdr);
2944  lsn = PageGetLSN(page);
2945  UnlockBufHdr(bufHdr, buf_state);
2946 
2947  return lsn;
2948 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:440
#define GetBufferDescriptor(id)
unsigned int uint32
Definition: c.h:429
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4320
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:800
#define BufferIsLocal(buffer)
Definition: buf.h:37
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
#define PageGetLSN(page)
Definition: bufpage.h:366
#define UnlockBufHdr(desc, s)
#define XLogHintBitIsNeeded()
Definition: xlog.h:202

◆ BufferGetTag()

void BufferGetTag ( Buffer  buffer,
RelFileNode rnode,
ForkNumber forknum,
BlockNumber blknum 
)

Definition at line 2684 of file bufmgr.c.

References Assert, buftag::blockNum, BufferIsLocal, BufferIsPinned, buftag::forkNum, GetBufferDescriptor, GetLocalBufferDescriptor, buftag::rnode, and BufferDesc::tag.

Referenced by fsm_search_avail(), ginRedoInsertEntry(), log_newpage_buffer(), ResolveCminCmaxDuringDecoding(), XLogRegisterBuffer(), and XLogSaveBufferForHint().

2686 {
2687  BufferDesc *bufHdr;
2688 
2689  /* Do the same checks as BufferGetBlockNumber. */
2690  Assert(BufferIsPinned(buffer));
2691 
2692  if (BufferIsLocal(buffer))
2693  bufHdr = GetLocalBufferDescriptor(-buffer - 1);
2694  else
2695  bufHdr = GetBufferDescriptor(buffer - 1);
2696 
2697  /* pinned, so OK to read tag without spinlock */
2698  *rnode = bufHdr->tag.rnode;
2699  *forknum = bufHdr->tag.forkNum;
2700  *blknum = bufHdr->tag.blockNum;
2701 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:440
ForkNumber forkNum
Definition: buf_internals.h:93
#define GetLocalBufferDescriptor(id)
#define GetBufferDescriptor(id)
#define Assert(condition)
Definition: c.h:800
#define BufferIsLocal(buffer)
Definition: buf.h:37
BlockNumber blockNum
Definition: buf_internals.h:94
RelFileNode rnode
Definition: buf_internals.h:92
BufferTag tag

◆ BufferIsPermanent()

bool BufferIsPermanent ( Buffer  buffer)

Definition at line 2896 of file bufmgr.c.

References Assert, BM_PERMANENT, BufferIsLocal, BufferIsPinned, BufferIsValid, GetBufferDescriptor, pg_atomic_read_u32(), and BufferDesc::state.

Referenced by SetHintBits().

2897 {
2898  BufferDesc *bufHdr;
2899 
2900  /* Local buffers are used only for temp relations. */
2901  if (BufferIsLocal(buffer))
2902  return false;
2903 
2904  /* Make sure we've got a real buffer, and that we hold a pin on it. */
2905  Assert(BufferIsValid(buffer));
2906  Assert(BufferIsPinned(buffer));
2907 
2908  /*
2909  * BM_PERMANENT can't be changed while we hold a pin on the buffer, so we
2910  * need not bother with the buffer header spinlock. Even if someone else
2911  * changes the buffer header state while we're doing this, the state is
2912  * changed atomically, so we'll read the old value or the new value, but
2913  * not random garbage.
2914  */
2915  bufHdr = GetBufferDescriptor(buffer - 1);
2916  return (pg_atomic_read_u32(&bufHdr->state) & BM_PERMANENT) != 0;
2917 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:440
#define BM_PERMANENT
Definition: buf_internals.h:66
#define GetBufferDescriptor(id)
#define Assert(condition)
Definition: c.h:800
#define BufferIsLocal(buffer)
Definition: buf.h:37
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
pg_atomic_uint32 state
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:241

◆ BufferShmemSize()

Size BufferShmemSize ( void  )

Definition at line 157 of file buf_init.c.

References add_size(), mul_size(), NBuffers, PG_CACHE_LINE_SIZE, and StrategyShmemSize().

Referenced by CreateSharedMemoryAndSemaphores().

158 {
159  Size size = 0;
160 
161  /* size of buffer descriptors */
162  size = add_size(size, mul_size(NBuffers, sizeof(BufferDescPadded)));
163  /* to allow aligning buffer descriptors */
164  size = add_size(size, PG_CACHE_LINE_SIZE);
165 
166  /* size of data pages */
167  size = add_size(size, mul_size(NBuffers, BLCKSZ));
168 
169  /* size of stuff controlled by freelist.c */
170  size = add_size(size, StrategyShmemSize());
171 
172  /*
173  * It would be nice to include the I/O locks in the BufferDesc, but that
174  * would increase the size of a BufferDesc to more than one cache line,
175  * and benchmarking has shown that keeping every BufferDesc aligned on a
176  * cache line boundary is important for performance. So, instead, the
177  * array of I/O locks is allocated in a separate tranche. Because those
178  * locks are not highly contended, we lay out the array with minimal
179  * padding.
180  */
181  size = add_size(size, mul_size(NBuffers, sizeof(LWLockMinimallyPadded)));
182  /* to allow aligning the above */
183  size = add_size(size, PG_CACHE_LINE_SIZE);
184 
185  /* size of checkpoint sort array in bufmgr.c */
186  size = add_size(size, mul_size(NBuffers, sizeof(CkptSortItem)));
187 
188  return size;
189 }
#define PG_CACHE_LINE_SIZE
Size mul_size(Size s1, Size s2)
Definition: shmem.c:515
Size add_size(Size s1, Size s2)
Definition: shmem.c:498
size_t Size
Definition: c.h:528
int NBuffers
Definition: globals.c:132
Size StrategyShmemSize(void)
Definition: freelist.c:454

◆ BufmgrCommit()

void BufmgrCommit ( void  )

Definition at line 2649 of file bufmgr.c.

Referenced by PrepareTransaction(), and RecordTransactionCommit().

2650 {
2651  /* Nothing to do in bufmgr anymore... */
2652 }

◆ CheckPointBuffers()

void CheckPointBuffers ( int  flags)

Definition at line 2639 of file bufmgr.c.

References BufferSync().

Referenced by CheckPointGuts().

2640 {
2641  BufferSync(flags);
2642 }
static void BufferSync(int flags)
Definition: bufmgr.c:1833

◆ ConditionalLockBuffer()

bool ConditionalLockBuffer ( Buffer  buffer)

Definition at line 3778 of file bufmgr.c.

References Assert, buf, BufferDescriptorGetContentLock, BufferIsLocal, BufferIsPinned, GetBufferDescriptor, LW_EXCLUSIVE, and LWLockConditionalAcquire().

Referenced by _bt_conditionallockbuf(), BloomNewBuffer(), ConditionalLockBufferForCleanup(), GinNewBuffer(), gistNewBuffer(), RelationGetBufferForTuple(), spgdoinsert(), SpGistGetBuffer(), SpGistNewBuffer(), and SpGistUpdateMetaPage().

3779 {
3780  BufferDesc *buf;
3781 
3782  Assert(BufferIsPinned(buffer));
3783  if (BufferIsLocal(buffer))
3784  return true; /* act as though we got it */
3785 
3786  buf = GetBufferDescriptor(buffer - 1);
3787 
3789  LW_EXCLUSIVE);
3790 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:440
bool LWLockConditionalAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1379
static char * buf
Definition: pg_test_fsync.c:68
#define GetBufferDescriptor(id)
#define BufferDescriptorGetContentLock(bdesc)
#define Assert(condition)
Definition: c.h:800
#define BufferIsLocal(buffer)
Definition: buf.h:37

◆ ConditionalLockBufferForCleanup()

bool ConditionalLockBufferForCleanup ( Buffer  buffer)

Definition at line 3946 of file bufmgr.c.

References Assert, BUF_STATE_GET_REFCOUNT, BUFFER_LOCK_UNLOCK, BufferIsLocal, BufferIsValid, ConditionalLockBuffer(), GetBufferDescriptor, GetPrivateRefCount(), LocalRefCount, LockBuffer(), LockBufHdr(), PrivateRefCountEntry::refcount, and UnlockBufHdr.

Referenced by _hash_finish_split(), _hash_getbuf_with_condlock_cleanup(), heap_page_prune_opt(), lazy_scan_heap(), and lazy_vacuum_heap().

3947 {
3948  BufferDesc *bufHdr;
3949  uint32 buf_state,
3950  refcount;
3951 
3952  Assert(BufferIsValid(buffer));
3953 
3954  if (BufferIsLocal(buffer))
3955  {
3956  refcount = LocalRefCount[-buffer - 1];
3957  /* There should be exactly one pin */
3958  Assert(refcount > 0);
3959  if (refcount != 1)
3960  return false;
3961  /* Nobody else to wait for */
3962  return true;
3963  }
3964 
3965  /* There should be exactly one local pin */
3966  refcount = GetPrivateRefCount(buffer);
3967  Assert(refcount);
3968  if (refcount != 1)
3969  return false;
3970 
3971  /* Try to acquire lock */
3972  if (!ConditionalLockBuffer(buffer))
3973  return false;
3974 
3975  bufHdr = GetBufferDescriptor(buffer - 1);
3976  buf_state = LockBufHdr(bufHdr);
3977  refcount = BUF_STATE_GET_REFCOUNT(buf_state);
3978 
3979  Assert(refcount > 0);
3980  if (refcount == 1)
3981  {
3982  /* Successfully acquired exclusive lock with pincount 1 */
3983  UnlockBufHdr(bufHdr, buf_state);
3984  return true;
3985  }
3986 
3987  /* Failed, so release the lock */
3988  UnlockBufHdr(bufHdr, buf_state);
3989  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3990  return false;
3991 }
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
#define GetBufferDescriptor(id)
static int32 GetPrivateRefCount(Buffer buffer)
Definition: bufmgr.c:379
unsigned int uint32
Definition: c.h:429
bool ConditionalLockBuffer(Buffer buffer)
Definition: bufmgr.c:3778
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3752
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4320
#define Assert(condition)
Definition: c.h:800
#define BufferIsLocal(buffer)
Definition: buf.h:37
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
#define UnlockBufHdr(desc, s)
int32 * LocalRefCount
Definition: localbuf.c:45
#define BUF_STATE_GET_REFCOUNT(state)
Definition: buf_internals.h:48

◆ DropDatabaseBuffers()

void DropDatabaseBuffers ( Oid  dbid)

Definition at line 3149 of file bufmgr.c.

References buftag::blockNum, buf, BufferDescriptorGetBuffer, RelFileNode::dbNode, elog, buftag::forkNum, BufferDesc::freeNext, GetBufferDescriptor, GetPrivateRefCount(), i, InvalidateBuffer(), InvalidBackendId, LockBufHdr(), LOG, NBuffers, relpathbackend, relpathperm, buftag::rnode, BufferDesc::tag, and UnlockBufHdr.

Referenced by dbase_redo(), dropdb(), and movedb().

3150 {
3151  int i;
3152 
3153  /*
3154  * We needn't consider local buffers, since by assumption the target
3155  * database isn't our own.
3156  */
3157 
3158  for (i = 0; i < NBuffers; i++)
3159  {
3160  BufferDesc *bufHdr = GetBufferDescriptor(i);
3161  uint32 buf_state;
3162 
3163  /*
3164  * As in DropRelFileNodeBuffers, an unlocked precheck should be safe
3165  * and saves some cycles.
3166  */
3167  if (bufHdr->tag.rnode.dbNode != dbid)
3168  continue;
3169 
3170  buf_state = LockBufHdr(bufHdr);
3171  if (bufHdr->tag.rnode.dbNode == dbid)
3172  InvalidateBuffer(bufHdr); /* releases spinlock */
3173  else
3174  UnlockBufHdr(bufHdr, buf_state);
3175  }
3176 }
static void InvalidateBuffer(BufferDesc *buf)
Definition: bufmgr.c:1373
#define GetBufferDescriptor(id)
unsigned int uint32
Definition: c.h:429
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4320
RelFileNode rnode
Definition: buf_internals.h:92
BufferTag tag
#define UnlockBufHdr(desc, s)
int i
int NBuffers
Definition: globals.c:132

◆ DropRelFileNodeBuffers()

void DropRelFileNodeBuffers ( RelFileNodeBackend  rnode,
ForkNumber forkNum,
int  nforks,
BlockNumber firstDelBlock 
)

Definition at line 2977 of file bufmgr.c.

References RelFileNodeBackend::backend, buftag::blockNum, DropRelFileNodeLocalBuffers(), buftag::forkNum, GetBufferDescriptor, i, InvalidateBuffer(), LockBufHdr(), MyBackendId, NBuffers, RelFileNodeBackend::node, RelFileNodeBackendIsTemp, RelFileNodeEquals, buftag::rnode, BufferDesc::tag, and UnlockBufHdr.

Referenced by smgrtruncate().

2979 {
2980  int i;
2981  int j;
2982 
2983  /* If it's a local relation, it's localbuf.c's problem. */
2984  if (RelFileNodeBackendIsTemp(rnode))
2985  {
2986  if (rnode.backend == MyBackendId)
2987  {
2988  for (j = 0; j < nforks; j++)
2989  DropRelFileNodeLocalBuffers(rnode.node, forkNum[j],
2990  firstDelBlock[j]);
2991  }
2992  return;
2993  }
2994 
2995  for (i = 0; i < NBuffers; i++)
2996  {
2997  BufferDesc *bufHdr = GetBufferDescriptor(i);
2998  uint32 buf_state;
2999 
3000  /*
3001  * We can make this a tad faster by prechecking the buffer tag before
3002  * we attempt to lock the buffer; this saves a lot of lock
3003  * acquisitions in typical cases. It should be safe because the
3004  * caller must have AccessExclusiveLock on the relation, or some other
3005  * reason to be certain that no one is loading new pages of the rel
3006  * into the buffer pool. (Otherwise we might well miss such pages
3007  * entirely.) Therefore, while the tag might be changing while we
3008  * look at it, it can't be changing *to* a value we care about, only
3009  * *away* from such a value. So false negatives are impossible, and
3010  * false positives are safe because we'll recheck after getting the
3011  * buffer lock.
3012  *
3013  * We could check forkNum and blockNum as well as the rnode, but the
3014  * incremental win from doing so seems small.
3015  */
3016  if (!RelFileNodeEquals(bufHdr->tag.rnode, rnode.node))
3017  continue;
3018 
3019  buf_state = LockBufHdr(bufHdr);
3020 
3021  for (j = 0; j < nforks; j++)
3022  {
3023  if (RelFileNodeEquals(bufHdr->tag.rnode, rnode.node) &&
3024  bufHdr->tag.forkNum == forkNum[j] &&
3025  bufHdr->tag.blockNum >= firstDelBlock[j])
3026  {
3027  InvalidateBuffer(bufHdr); /* releases spinlock */
3028  break;
3029  }
3030  }
3031  if (j >= nforks)
3032  UnlockBufHdr(bufHdr, buf_state);
3033  }
3034 }
BackendId MyBackendId
Definition: globals.c:81
#define RelFileNodeBackendIsTemp(rnode)
Definition: relfilenode.h:78
ForkNumber forkNum
Definition: buf_internals.h:93
static void InvalidateBuffer(BufferDesc *buf)
Definition: bufmgr.c:1373
void DropRelFileNodeLocalBuffers(RelFileNode rnode, ForkNumber forkNum, BlockNumber firstDelBlock)
Definition: localbuf.c:326
#define GetBufferDescriptor(id)
unsigned int uint32
Definition: c.h:429
RelFileNode node
Definition: relfilenode.h:74
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4320
BackendId backend
Definition: relfilenode.h:75
BlockNumber blockNum
Definition: buf_internals.h:94
RelFileNode rnode
Definition: buf_internals.h:92
BufferTag tag
#define UnlockBufHdr(desc, s)
int i
int NBuffers
Definition: globals.c:132
#define RelFileNodeEquals(node1, node2)
Definition: relfilenode.h:88

◆ DropRelFileNodesAllBuffers()

void DropRelFileNodesAllBuffers ( RelFileNodeBackend rnodes,
int  nnodes 
)

Definition at line 3046 of file bufmgr.c.

References DropRelFileNodeAllLocalBuffers(), GetBufferDescriptor, i, InvalidateBuffer(), LockBufHdr(), MyBackendId, NBuffers, RelFileNodeBackend::node, palloc(), pfree(), pg_qsort(), RelFileNodeBackendIsTemp, RelFileNodeEquals, RELS_BSEARCH_THRESHOLD, buftag::rnode, rnode_comparator(), BufferDesc::tag, and UnlockBufHdr.

Referenced by smgrdounlinkall().

3047 {
3048  int i,
3049  n = 0;
3050  RelFileNode *nodes;
3051  bool use_bsearch;
3052 
3053  if (nnodes == 0)
3054  return;
3055 
3056  nodes = palloc(sizeof(RelFileNode) * nnodes); /* non-local relations */
3057 
3058  /* If it's a local relation, it's localbuf.c's problem. */
3059  for (i = 0; i < nnodes; i++)
3060  {
3061  if (RelFileNodeBackendIsTemp(rnodes[i]))
3062  {
3063  if (rnodes[i].backend == MyBackendId)
3064  DropRelFileNodeAllLocalBuffers(rnodes[i].node);
3065  }
3066  else
3067  nodes[n++] = rnodes[i].node;
3068  }
3069 
3070  /*
3071  * If there are no non-local relations, then we're done. Release the
3072  * memory and return.
3073  */
3074  if (n == 0)
3075  {
3076  pfree(nodes);
3077  return;
3078  }
3079 
3080  /*
3081  * For low number of relations to drop just use a simple walk through, to
3082  * save the bsearch overhead. The threshold to use is rather a guess than
3083  * an exactly determined value, as it depends on many factors (CPU and RAM
3084  * speeds, amount of shared buffers etc.).
3085  */
3086  use_bsearch = n > RELS_BSEARCH_THRESHOLD;
3087 
3088  /* sort the list of rnodes if necessary */
3089  if (use_bsearch)
3090  pg_qsort(nodes, n, sizeof(RelFileNode), rnode_comparator);
3091 
3092  for (i = 0; i < NBuffers; i++)
3093  {
3094  RelFileNode *rnode = NULL;
3095  BufferDesc *bufHdr = GetBufferDescriptor(i);
3096  uint32 buf_state;
3097 
3098  /*
3099  * As in DropRelFileNodeBuffers, an unlocked precheck should be safe
3100  * and saves some cycles.
3101  */
3102 
3103  if (!use_bsearch)
3104  {
3105  int j;
3106 
3107  for (j = 0; j < n; j++)
3108  {
3109  if (RelFileNodeEquals(bufHdr->tag.rnode, nodes[j]))
3110  {
3111  rnode = &nodes[j];
3112  break;
3113  }
3114  }
3115  }
3116  else
3117  {
3118  rnode = bsearch((const void *) &(bufHdr->tag.rnode),
3119  nodes, n, sizeof(RelFileNode),
3121  }
3122 
3123  /* buffer doesn't belong to any of the given relfilenodes; skip it */
3124  if (rnode == NULL)
3125  continue;
3126 
3127  buf_state = LockBufHdr(bufHdr);
3128  if (RelFileNodeEquals(bufHdr->tag.rnode, (*rnode)))
3129  InvalidateBuffer(bufHdr); /* releases spinlock */
3130  else
3131  UnlockBufHdr(bufHdr, buf_state);
3132  }
3133 
3134  pfree(nodes);
3135 }
BackendId MyBackendId
Definition: globals.c:81
#define RelFileNodeBackendIsTemp(rnode)
Definition: relfilenode.h:78
static void InvalidateBuffer(BufferDesc *buf)
Definition: bufmgr.c:1373
#define RELS_BSEARCH_THRESHOLD
Definition: bufmgr.c:71
void DropRelFileNodeAllLocalBuffers(RelFileNode rnode)
Definition: localbuf.c:373
void pfree(void *pointer)
Definition: mcxt.c:1057
#define GetBufferDescriptor(id)
unsigned int uint32
Definition: c.h:429
static int rnode_comparator(const void *p1, const void *p2)
Definition: bufmgr.c:4293
RelFileNode node
Definition: relfilenode.h:74
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4320
void pg_qsort(void *base, size_t nel, size_t elsize, int(*cmp)(const void *, const void *))
Definition: qsort.c:113
RelFileNode rnode
Definition: buf_internals.h:92
BufferTag tag
void * palloc(Size size)
Definition: mcxt.c:950
#define UnlockBufHdr(desc, s)
int i
int NBuffers
Definition: globals.c:132
#define RelFileNodeEquals(node1, node2)
Definition: relfilenode.h:88

◆ FlushDatabaseBuffers()

void FlushDatabaseBuffers ( Oid  dbid)

Definition at line 3450 of file bufmgr.c.

References BM_DIRTY, BM_VALID, BufferDescriptorGetContentLock, CurrentResourceOwner, RelFileNode::dbNode, FlushBuffer(), GetBufferDescriptor, i, LockBufHdr(), LW_SHARED, LWLockAcquire(), LWLockRelease(), NBuffers, PinBuffer_Locked(), ReservePrivateRefCountEntry(), ResourceOwnerEnlargeBuffers(), buftag::rnode, BufferDesc::tag, UnlockBufHdr, and UnpinBuffer().

Referenced by dbase_redo().

3451 {
3452  int i;
3453  BufferDesc *bufHdr;
3454 
3455  /* Make sure we can handle the pin inside the loop */
3457 
3458  for (i = 0; i < NBuffers; i++)
3459  {
3460  uint32 buf_state;
3461 
3462  bufHdr = GetBufferDescriptor(i);
3463 
3464  /*
3465  * As in DropRelFileNodeBuffers, an unlocked precheck should be safe
3466  * and saves some cycles.
3467  */
3468  if (bufHdr->tag.rnode.dbNode != dbid)
3469  continue;
3470 
3472 
3473  buf_state = LockBufHdr(bufHdr);
3474  if (bufHdr->tag.rnode.dbNode == dbid &&
3475  (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
3476  {
3477  PinBuffer_Locked(bufHdr);
3479  FlushBuffer(bufHdr, NULL);
3481  UnpinBuffer(bufHdr, true);
3482  }
3483  else
3484  UnlockBufHdr(bufHdr, buf_state);
3485  }
3486 }
ResourceOwner CurrentResourceOwner
Definition: resowner.c:144
#define BM_DIRTY
Definition: buf_internals.h:58
static void FlushBuffer(BufferDesc *buf, SMgrRelation reln)
Definition: bufmgr.c:2723
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1811
#define GetBufferDescriptor(id)
unsigned int uint32
Definition: c.h:429
static void UnpinBuffer(BufferDesc *buf, bool fixOwner)
Definition: bufmgr.c:1740
void ResourceOwnerEnlargeBuffers(ResourceOwner owner)
Definition: resowner.c:934
#define BM_VALID
Definition: buf_internals.h:59
#define BufferDescriptorGetContentLock(bdesc)
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4320
static void PinBuffer_Locked(BufferDesc *buf)
Definition: bufmgr.c:1695
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1207
RelFileNode rnode
Definition: buf_internals.h:92
BufferTag tag
#define UnlockBufHdr(desc, s)
int i
int NBuffers
Definition: globals.c:132
static void ReservePrivateRefCountEntry(void)
Definition: bufmgr.c:207

◆ FlushOneBuffer()

void FlushOneBuffer ( Buffer  buffer)

Definition at line 3493 of file bufmgr.c.

References Assert, BufferDescriptorGetContentLock, BufferIsLocal, BufferIsPinned, FlushBuffer(), GetBufferDescriptor, and LWLockHeldByMe().

Referenced by hash_xlog_init_bitmap_page(), hash_xlog_init_meta_page(), and XLogReadBufferForRedoExtended().

3494 {
3495  BufferDesc *bufHdr;
3496 
3497  /* currently not needed, but no fundamental reason not to support */
3498  Assert(!BufferIsLocal(buffer));
3499 
3500  Assert(BufferIsPinned(buffer));
3501 
3502  bufHdr = GetBufferDescriptor(buffer - 1);
3503 
3505 
3506  FlushBuffer(bufHdr, NULL);
3507 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:440
bool LWLockHeldByMe(LWLock *l)
Definition: lwlock.c:1927
static void FlushBuffer(BufferDesc *buf, SMgrRelation reln)
Definition: bufmgr.c:2723
#define GetBufferDescriptor(id)
#define BufferDescriptorGetContentLock(bdesc)
#define Assert(condition)
Definition: c.h:800
#define BufferIsLocal(buffer)
Definition: buf.h:37

◆ FlushRelationBuffers()

void FlushRelationBuffers ( Relation  rel)

Definition at line 3254 of file bufmgr.c.

References ErrorContextCallback::arg, buftag::blockNum, BM_DIRTY, BM_JUST_DIRTIED, BM_VALID, BufferDescriptorGetContentLock, ErrorContextCallback::callback, CurrentResourceOwner, error_context_stack, FlushBuffer(), buftag::forkNum, GetBufferDescriptor, GetLocalBufferDescriptor, i, local_buffer_write_error_callback(), LocalBufHdrGetBlock, LockBufHdr(), LW_SHARED, LWLockAcquire(), LWLockRelease(), NBuffers, NLocBuffer, PageSetChecksumInplace(), pg_atomic_read_u32(), pg_atomic_unlocked_write_u32(), PinBuffer_Locked(), ErrorContextCallback::previous, RelationData::rd_node, RelationData::rd_smgr, RelationOpenSmgr, RelationUsesLocalBuffers, RelFileNodeEquals, ReservePrivateRefCountEntry(), ResourceOwnerEnlargeBuffers(), buftag::rnode, smgrwrite(), BufferDesc::state, BufferDesc::tag, UnlockBufHdr, and UnpinBuffer().

Referenced by heapam_relation_copy_data(), and index_copy_data().

3255 {
3256  int i;
3257  BufferDesc *bufHdr;
3258 
3259  /* Open rel at the smgr level if not already done */
3260  RelationOpenSmgr(rel);
3261 
3262  if (RelationUsesLocalBuffers(rel))
3263  {
3264  for (i = 0; i < NLocBuffer; i++)
3265  {
3266  uint32 buf_state;
3267 
3268  bufHdr = GetLocalBufferDescriptor(i);
3269  if (RelFileNodeEquals(bufHdr->tag.rnode, rel->rd_node) &&
3270  ((buf_state = pg_atomic_read_u32(&bufHdr->state)) &
3271  (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
3272  {
3273  ErrorContextCallback errcallback;
3274  Page localpage;
3275 
3276  localpage = (char *) LocalBufHdrGetBlock(bufHdr);
3277 
3278  /* Setup error traceback support for ereport() */
3280  errcallback.arg = (void *) bufHdr;
3281  errcallback.previous = error_context_stack;
3282  error_context_stack = &errcallback;
3283 
3284  PageSetChecksumInplace(localpage, bufHdr->tag.blockNum);
3285 
3286  smgrwrite(rel->rd_smgr,
3287  bufHdr->tag.forkNum,
3288  bufHdr->tag.blockNum,
3289  localpage,
3290  false);
3291 
3292  buf_state &= ~(BM_DIRTY | BM_JUST_DIRTIED);
3293  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
3294 
3295  /* Pop the error context stack */
3296  error_context_stack = errcallback.previous;
3297  }
3298  }
3299 
3300  return;
3301  }
3302 
3303  /* Make sure we can handle the pin inside the loop */
3305 
3306  for (i = 0; i < NBuffers; i++)
3307  {
3308  uint32 buf_state;
3309 
3310  bufHdr = GetBufferDescriptor(i);
3311 
3312  /*
3313  * As in DropRelFileNodeBuffers, an unlocked precheck should be safe
3314  * and saves some cycles.
3315  */
3316  if (!RelFileNodeEquals(bufHdr->tag.rnode, rel->rd_node))
3317  continue;
3318 
3320 
3321  buf_state = LockBufHdr(bufHdr);
3322  if (RelFileNodeEquals(bufHdr->tag.rnode, rel->rd_node) &&
3323  (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
3324  {
3325  PinBuffer_Locked(bufHdr);
3327  FlushBuffer(bufHdr, rel->rd_smgr);
3329  UnpinBuffer(bufHdr, true);
3330  }
3331  else
3332  UnlockBufHdr(bufHdr, buf_state);
3333  }
3334 }
#define LocalBufHdrGetBlock(bufHdr)
Definition: bufmgr.c:64
ForkNumber forkNum
Definition: buf_internals.h:93
static void local_buffer_write_error_callback(void *arg)
Definition: bufmgr.c:4274
ResourceOwner CurrentResourceOwner
Definition: resowner.c:144
struct SMgrRelationData * rd_smgr
Definition: rel.h:57
#define GetLocalBufferDescriptor(id)
#define BM_DIRTY
Definition: buf_internals.h:58
void(* callback)(void *arg)
Definition: elog.h:243
struct ErrorContextCallback * previous
Definition: elog.h:242
static void FlushBuffer(BufferDesc *buf, SMgrRelation reln)
Definition: bufmgr.c:2723
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1811
ErrorContextCallback * error_context_stack
Definition: elog.c:92
#define RelationOpenSmgr(relation)
Definition: rel.h:514
int NLocBuffer
Definition: localbuf.c:41
void smgrwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, char *buffer, bool skipFsync)
Definition: smgr.c:524
#define GetBufferDescriptor(id)
#define BM_JUST_DIRTIED
Definition: buf_internals.h:63
unsigned int uint32
Definition: c.h:429
static void UnpinBuffer(BufferDesc *buf, bool fixOwner)
Definition: bufmgr.c:1740
void ResourceOwnerEnlargeBuffers(ResourceOwner owner)
Definition: resowner.c:934
#define BM_VALID
Definition: buf_internals.h:59
RelFileNode rd_node
Definition: rel.h:55
#define BufferDescriptorGetContentLock(bdesc)
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4320
static void PinBuffer_Locked(BufferDesc *buf)
Definition: bufmgr.c:1695
void PageSetChecksumInplace(Page page, BlockNumber blkno)
Definition: bufpage.c:1422
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1207
BlockNumber blockNum
Definition: buf_internals.h:94
RelFileNode rnode
Definition: buf_internals.h:92
#define RelationUsesLocalBuffers(relation)
Definition: rel.h:573
BufferTag tag
#define UnlockBufHdr(desc, s)
int i
int NBuffers
Definition: globals.c:132
static void pg_atomic_unlocked_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:277
static void ReservePrivateRefCountEntry(void)
Definition: bufmgr.c:207
pg_atomic_uint32 state
Pointer Page
Definition: bufpage.h:78
#define RelFileNodeEquals(node1, node2)
Definition: relfilenode.h:88
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:241

◆ FlushRelationsAllBuffers()

void FlushRelationsAllBuffers ( struct SMgrRelationData **  smgrs,
int  nrels 
)

◆ FreeAccessStrategy()

void FreeAccessStrategy ( BufferAccessStrategy  strategy)

Definition at line 597 of file freelist.c.

References pfree().

Referenced by blgetbitmap(), FreeBulkInsertState(), heap_endscan(), and initscan().

598 {
599  /* don't crash if called on a "default" strategy */
600  if (strategy != NULL)
601  pfree(strategy);
602 }
void pfree(void *pointer)
Definition: mcxt.c:1057

◆ GetAccessStrategy()

BufferAccessStrategy GetAccessStrategy ( BufferAccessStrategyType  btype)

Definition at line 542 of file freelist.c.

References BAS_BULKREAD, BAS_BULKWRITE, BAS_NORMAL, BAS_VACUUM, BufferAccessStrategyData::btype, BufferAccessStrategyData::buffers, elog, ERROR, Min, NBuffers, offsetof, palloc0(), and BufferAccessStrategyData::ring_size.

Referenced by blgetbitmap(), bt_check_every_level(), collect_corrupt_items(), collect_visibility_data(), do_autovacuum(), GetBulkInsertState(), initscan(), pgstat_index(), pgstathashindex(), pgstatindex_impl(), statapprox_heap(), vacuum(), and verify_heapam().

543 {
544  BufferAccessStrategy strategy;
545  int ring_size;
546 
547  /*
548  * Select ring size to use. See buffer/README for rationales.
549  *
550  * Note: if you change the ring size for BAS_BULKREAD, see also
551  * SYNC_SCAN_REPORT_INTERVAL in access/heap/syncscan.c.
552  */
553  switch (btype)
554  {
555  case BAS_NORMAL:
556  /* if someone asks for NORMAL, just give 'em a "default" object */
557  return NULL;
558 
559  case BAS_BULKREAD:
560  ring_size = 256 * 1024 / BLCKSZ;
561  break;
562  case BAS_BULKWRITE:
563  ring_size = 16 * 1024 * 1024 / BLCKSZ;
564  break;
565  case BAS_VACUUM:
566  ring_size = 256 * 1024 / BLCKSZ;
567  break;
568 
569  default:
570  elog(ERROR, "unrecognized buffer access strategy: %d",
571  (int) btype);
572  return NULL; /* keep compiler quiet */
573  }
574 
575  /* Make sure ring isn't an undue fraction of shared buffers */
576  ring_size = Min(NBuffers / 8, ring_size);
577 
578  /* Allocate the object and initialize all elements to zeroes */
579  strategy = (BufferAccessStrategy)
581  ring_size * sizeof(Buffer));
582 
583  /* Set fields that don't start out zero */
584  strategy->btype = btype;
585  strategy->ring_size = ring_size;
586 
587  return strategy;
588 }
struct BufferAccessStrategyData * BufferAccessStrategy
Definition: buf.h:44
#define Min(x, y)
Definition: c.h:982
#define ERROR
Definition: elog.h:43
BufferAccessStrategyType btype
Definition: freelist.c:74
void * palloc0(Size size)
Definition: mcxt.c:981
#define elog(elevel,...)
Definition: elog.h:228
int NBuffers
Definition: globals.c:132
int Buffer
Definition: buf.h:23
#define offsetof(type, field)
Definition: c.h:723

◆ HoldingBufferPinThatDelaysRecovery()

bool HoldingBufferPinThatDelaysRecovery ( void  )

Definition at line 3920 of file bufmgr.c.

References GetPrivateRefCount(), and GetStartupBufferPinWaitBufId().

Referenced by CheckRecoveryConflictDeadlock(), and RecoveryConflictInterrupt().

3921 {
3922  int bufid = GetStartupBufferPinWaitBufId();
3923 
3924  /*
3925  * If we get woken slowly then it's possible that the Startup process was
3926  * already woken by other backends before we got here. Also possible that
3927  * we get here by multiple interrupts or interrupts at inappropriate
3928  * times, so make sure we do nothing if the bufid is not set.
3929  */
3930  if (bufid < 0)
3931  return false;
3932 
3933  if (GetPrivateRefCount(bufid + 1) > 0)
3934  return true;
3935 
3936  return false;
3937 }
static int32 GetPrivateRefCount(Buffer buffer)
Definition: bufmgr.c:379
int GetStartupBufferPinWaitBufId(void)
Definition: proc.c:652

◆ IncrBufferRefCount()

void IncrBufferRefCount ( Buffer  buffer)

Definition at line 3551 of file bufmgr.c.

References Assert, BufferIsLocal, BufferIsPinned, CurrentResourceOwner, GetPrivateRefCountEntry(), LocalRefCount, PrivateRefCountEntry::refcount, ResourceOwnerEnlargeBuffers(), and ResourceOwnerRememberBuffer().

Referenced by _bt_steppage(), btrestrpos(), entryLoadMoreItems(), ReadBufferBI(), scanPostingTree(), startScanEntry(), and tts_buffer_heap_store_tuple().

3552 {
3553  Assert(BufferIsPinned(buffer));
3555  if (BufferIsLocal(buffer))
3556  LocalRefCount[-buffer - 1]++;
3557  else
3558  {
3559  PrivateRefCountEntry *ref;
3560 
3561  ref = GetPrivateRefCountEntry(buffer, true);
3562  Assert(ref != NULL);
3563  ref->refcount++;
3564  }
3566 }
static PrivateRefCountEntry * GetPrivateRefCountEntry(Buffer buffer, bool do_move)
Definition: bufmgr.c:299
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:440
ResourceOwner CurrentResourceOwner
Definition: resowner.c:144
void ResourceOwnerRememberBuffer(ResourceOwner owner, Buffer buffer)
Definition: resowner.c:947
void ResourceOwnerEnlargeBuffers(ResourceOwner owner)
Definition: resowner.c:934
#define Assert(condition)
Definition: c.h:800
#define BufferIsLocal(buffer)
Definition: buf.h:37
int32 * LocalRefCount
Definition: localbuf.c:45

◆ InitBufferPool()

void InitBufferPool ( void  )

Definition at line 67 of file buf_init.c.

References Assert, backend_flush_after, buf, BufferDesc::buf_id, BufferBlocks, BufferDescriptorGetContentLock, BufferDescriptorGetIOLock, CLEAR_BUFFERTAG, BufferDesc::freeNext, FREENEXT_END_OF_LIST, GetBufferDescriptor, i, LWLockInitialize(), LWTRANCHE_BUFFER_CONTENT, LWTRANCHE_BUFFER_IO, NBuffers, pg_atomic_init_u32(), ShmemInitStruct(), BufferDesc::state, StrategyInitialize(), BufferDesc::tag, BufferDesc::wait_backend_pid, and WritebackContextInit().

Referenced by CreateSharedMemoryAndSemaphores().

68 {
69  bool foundBufs,
70  foundDescs,
71  foundIOLocks,
72  foundBufCkpt;
73 
74  /* Align descriptors to a cacheline boundary. */
76  ShmemInitStruct("Buffer Descriptors",
77  NBuffers * sizeof(BufferDescPadded),
78  &foundDescs);
79 
80  BufferBlocks = (char *)
81  ShmemInitStruct("Buffer Blocks",
82  NBuffers * (Size) BLCKSZ, &foundBufs);
83 
84  /* Align lwlocks to cacheline boundary */
86  ShmemInitStruct("Buffer IO Locks",
88  &foundIOLocks);
89 
90  /*
91  * The array used to sort to-be-checkpointed buffer ids is located in
92  * shared memory, to avoid having to allocate significant amounts of
93  * memory at runtime. As that'd be in the middle of a checkpoint, or when
94  * the checkpointer is restarted, memory allocation failures would be
95  * painful.
96  */
98  ShmemInitStruct("Checkpoint BufferIds",
99  NBuffers * sizeof(CkptSortItem), &foundBufCkpt);
100 
101  if (foundDescs || foundBufs || foundIOLocks || foundBufCkpt)
102  {
103  /* should find all of these, or none of them */
104  Assert(foundDescs && foundBufs && foundIOLocks && foundBufCkpt);
105  /* note: this path is only taken in EXEC_BACKEND case */
106  }
107  else
108  {
109  int i;
110 
111  /*
112  * Initialize all the buffer headers.
113  */
114  for (i = 0; i < NBuffers; i++)
115  {
117 
118  CLEAR_BUFFERTAG(buf->tag);
119 
120  pg_atomic_init_u32(&buf->state, 0);
121  buf->wait_backend_pid = 0;
122 
123  buf->buf_id = i;
124 
125  /*
126  * Initially link all the buffers together as unused. Subsequent
127  * management of this list is done by freelist.c.
128  */
129  buf->freeNext = i + 1;
130 
133 
136  }
137 
138  /* Correct last entry of linked list */
139  GetBufferDescriptor(NBuffers - 1)->freeNext = FREENEXT_END_OF_LIST;
140  }
141 
142  /* Init other shared buffer-management stuff */
143  StrategyInitialize(!foundDescs);
144 
145  /* Initialize per-backend file flush context */
148 }
#define FREENEXT_END_OF_LIST
LWLockMinimallyPadded * BufferIOLWLockArray
Definition: buf_init.c:22
int wait_backend_pid
int backend_flush_after
Definition: bufmgr.c:150
#define BufferDescriptorGetIOLock(bdesc)
void StrategyInitialize(bool init)
Definition: freelist.c:475
void WritebackContextInit(WritebackContext *context, int *max_pending)
Definition: bufmgr.c:4460
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:392
WritebackContext BackendWritebackContext
Definition: buf_init.c:23
static char * buf
Definition: pg_test_fsync.c:68
BufferDescPadded * BufferDescriptors
Definition: buf_init.c:20
#define GetBufferDescriptor(id)
union LWLockMinimallyPadded LWLockMinimallyPadded
void LWLockInitialize(LWLock *lock, int tranche_id)
Definition: lwlock.c:744
#define BufferDescriptorGetContentLock(bdesc)
#define Assert(condition)
Definition: c.h:800
#define CLEAR_BUFFERTAG(a)
Definition: buf_internals.h:97
CkptSortItem * CkptBufferIds
Definition: buf_init.c:24
size_t Size
Definition: c.h:528
BufferTag tag
int i
int NBuffers
Definition: globals.c:132
pg_atomic_uint32 state
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:223
char * BufferBlocks
Definition: buf_init.c:21

◆ InitBufferPoolAccess()

void InitBufferPoolAccess ( void  )

Definition at line 2502 of file bufmgr.c.

References HASHCTL::entrysize, HASH_BLOBS, hash_create(), HASH_ELEM, HASHCTL::keysize, MemSet, and PrivateRefCountArray.

Referenced by BaseInit().

2503 {
2504  HASHCTL hash_ctl;
2505 
2506  memset(&PrivateRefCountArray, 0, sizeof(PrivateRefCountArray));
2507 
2508  MemSet(&hash_ctl, 0, sizeof(hash_ctl));
2509  hash_ctl.keysize = sizeof(int32);
2510  hash_ctl.entrysize = sizeof(PrivateRefCountEntry);
2511 
2512  PrivateRefCountHash = hash_create("PrivateRefCount", 100, &hash_ctl,
2513  HASH_ELEM | HASH_BLOBS);
2514 }
struct PrivateRefCountEntry PrivateRefCountEntry
#define HASH_ELEM
Definition: hsearch.h:85
Size entrysize
Definition: hsearch.h:72
#define MemSet(start, val, len)
Definition: c.h:1004
signed int int32
Definition: c.h:417
static struct PrivateRefCountEntry PrivateRefCountArray[REFCOUNT_ARRAY_ENTRIES]
Definition: bufmgr.c:189
#define HASH_BLOBS
Definition: hsearch.h:86
HTAB * hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
Definition: dynahash.c:326
Size keysize
Definition: hsearch.h:71
static HTAB * PrivateRefCountHash
Definition: bufmgr.c:190

◆ InitBufferPoolBackend()

void InitBufferPoolBackend ( void  )

Definition at line 2526 of file bufmgr.c.

References AtProcExit_Buffers(), and on_shmem_exit().

Referenced by AuxiliaryProcessMain(), and InitPostgres().

2527 {
2529 }
void on_shmem_exit(pg_on_exit_callback function, Datum arg)
Definition: ipc.c:361
static void AtProcExit_Buffers(int code, Datum arg)
Definition: bufmgr.c:2536

◆ IsBufferCleanupOK()

bool IsBufferCleanupOK ( Buffer  buffer)

Definition at line 4002 of file bufmgr.c.

References Assert, BUF_STATE_GET_REFCOUNT, BufferDescriptorGetContentLock, BufferIsLocal, BufferIsValid, GetBufferDescriptor, GetPrivateRefCount(), LocalRefCount, LockBufHdr(), LW_EXCLUSIVE, LWLockHeldByMeInMode(), and UnlockBufHdr.

Referenced by _hash_doinsert(), _hash_expandtable(), _hash_splitbucket(), hash_xlog_split_allocate_page(), and hashbucketcleanup().

4003 {
4004  BufferDesc *bufHdr;
4005  uint32 buf_state;
4006 
4007  Assert(BufferIsValid(buffer));
4008 
4009  if (BufferIsLocal(buffer))
4010  {
4011  /* There should be exactly one pin */
4012  if (LocalRefCount[-buffer - 1] != 1)
4013  return false;
4014  /* Nobody else to wait for */
4015  return true;
4016  }
4017 
4018  /* There should be exactly one local pin */
4019  if (GetPrivateRefCount(buffer) != 1)
4020  return false;
4021 
4022  bufHdr = GetBufferDescriptor(buffer - 1);
4023 
4024  /* caller must hold exclusive lock on buffer */
4026  LW_EXCLUSIVE));
4027 
4028  buf_state = LockBufHdr(bufHdr);
4029 
4030  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
4031  if (BUF_STATE_GET_REFCOUNT(buf_state) == 1)
4032  {
4033  /* pincount is OK. */
4034  UnlockBufHdr(bufHdr, buf_state);
4035  return true;
4036  }
4037 
4038  UnlockBufHdr(bufHdr, buf_state);
4039  return false;
4040 }
bool LWLockHeldByMeInMode(LWLock *l, LWLockMode mode)
Definition: lwlock.c:1945
#define GetBufferDescriptor(id)
static int32 GetPrivateRefCount(Buffer buffer)
Definition: bufmgr.c:379
unsigned int uint32
Definition: c.h:429
#define BufferDescriptorGetContentLock(bdesc)
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4320
#define Assert(condition)
Definition: c.h:800
#define BufferIsLocal(buffer)
Definition: buf.h:37
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
#define UnlockBufHdr(desc, s)
int32 * LocalRefCount
Definition: localbuf.c:45
#define BUF_STATE_GET_REFCOUNT(state)
Definition: buf_internals.h:48

◆ LockBuffer()

void LockBuffer ( Buffer  buffer,
int  mode 
)

Definition at line 3752 of file bufmgr.c.

References Assert, buf, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferDescriptorGetContentLock, BufferIsLocal, BufferIsPinned, elog, ERROR, GetBufferDescriptor, LW_EXCLUSIVE, LW_SHARED, LWLockAcquire(), and LWLockRelease().

Referenced by _bt_lockbuf(), _bt_unlockbuf(), _bt_upgradelockbufcleanup(), _hash_addovflpage(), _hash_doinsert(), _hash_expandtable(), _hash_finish_split(), _hash_first(), _hash_freeovflpage(), _hash_getbuf(), _hash_getbuf_with_strategy(), _hash_getcachedmetap(), _hash_getnewbuf(), _hash_init(), _hash_kill_items(), _hash_readnext(), _hash_readpage(), _hash_readprev(), _hash_splitbucket(), _hash_squeezebucket(), _hash_vacuum_one_page(), blbulkdelete(), blgetbitmap(), blinsert(), BloomNewBuffer(), blvacuumcleanup(), brin_doinsert(), brin_doupdate(), brin_evacuate_page(), brin_getinsertbuffer(), brin_page_cleanup(), brinbuild(), brinbuildempty(), bringetbitmap(), brinGetStats(), brinGetTupleForHeapBlock(), brininsert(), brinLockRevmapPageForUpdate(), brinRevmapDesummarizeRange(), brinRevmapInitialize(), brinsummarize(), bt_metap(), bt_page_items(), bt_page_stats(), bt_recheck_sibling_links(), checkXLogConsistency(), collect_corrupt_items(), collect_visibility_data(), collectMatchBitmap(), ConditionalLockBufferForCleanup(), count_nondeletable_pages(), entryLoadMoreItems(), fill_seq_with_data(), FreeSpaceMapPrepareTruncateRel(), fsm_readbuf(), fsm_search(), fsm_search_avail(), fsm_set_and_search(), fsm_vacuum_page(), get_raw_page_internal(), GetVisibilityMapPins(), ginbuildempty(), ginbulkdelete(), ginEntryInsert(), ginFindLeafPage(), ginFindParents(), ginFinishSplit(), ginGetStats(), ginHeapTupleFastInsert(), ginInsertCleanup(), ginInsertValue(), GinNewBuffer(), ginScanToDelete(), ginStepRight(), ginTraverseLock(), ginUpdateStats(), ginvacuumcleanup(), ginVacuumPostingTreeLeaves(), gistBufferingFindCorrectParent(), gistbufferinginserttuples(), gistbuildempty(), gistdoinsert(), gistFindCorrectParent(), gistFindPath(), gistfinishsplit(), gistfixsplit(), gistformdownlink(), gistGetMaxLevel(), gistinserttuples(), gistkillitems(), gistNewBuffer(), gistProcessItup(), gistScanPage(), gistvacuum_delete_empty_pages(), gistvacuumpage(), hashbucketcleanup(), hashbulkdelete(), heap_abort_speculative(), heap_compute_xid_horizon_for_tuples(), heap_delete(), heap_fetch(), heap_finish_speculative(), heap_get_latest_tid(), heap_inplace_update(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_page_prune_opt(), heap_update(), heap_xlog_visible(), heapam_index_build_range_scan(), heapam_index_fetch_tuple(), heapam_index_validate_scan(), heapam_relation_copy_for_cluster(), heapam_scan_analyze_next_block(), heapam_scan_bitmap_next_block(), heapam_scan_sample_next_tuple(), heapam_tuple_satisfies_snapshot(), heapgetpage(), heapgettup(), initBloomState(), lazy_scan_heap(), LockBufferForCleanup(), log_newpage_range(), palloc_btree_page(), pg_visibility(), pgrowlocks(), pgstat_btree_page(), pgstat_gist_page(), pgstat_heap(), pgstatginindex_internal(), pgstathashindex(), pgstatindex_impl(), read_seq_tuple(), RelationGetBufferForTuple(), revmap_physical_extend(), scanGetCandidate(), scanPendingInsert(), shiftList(), spgdoinsert(), spgGetCache(), SpGistNewBuffer(), spgprocesspending(), spgvacuumpage(), spgWalk(), startScanEntry(), statapprox_heap(), summarize_range(), UnlockReleaseBuffer(), verify_heapam(), visibilitymap_clear(), visibilitymap_prepare_truncate(), visibilitymap_set(), vm_readbuf(), XLogReadBufferExtended(), XLogReadBufferForRedoExtended(), and XLogRecordPageWithFreeSpace().

3753 {
3754  BufferDesc *buf;
3755 
3756  Assert(BufferIsPinned(buffer));
3757  if (BufferIsLocal(buffer))
3758  return; /* local buffers need no lock */
3759 
3760  buf = GetBufferDescriptor(buffer - 1);
3761 
3762  if (mode == BUFFER_LOCK_UNLOCK)
3764  else if (mode == BUFFER_LOCK_SHARE)
3766  else if (mode == BUFFER_LOCK_EXCLUSIVE)
3768  else
3769  elog(ERROR, "unrecognized buffer lock mode: %d", mode);
3770 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:440
static PgChecksumMode mode
Definition: pg_checksums.c:61
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:98
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1811
#define ERROR
Definition: elog.h:43
static char * buf
Definition: pg_test_fsync.c:68
#define GetBufferDescriptor(id)
#define BufferDescriptorGetContentLock(bdesc)
#define Assert(condition)
Definition: c.h:800
#define BufferIsLocal(buffer)
Definition: buf.h:37
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1207
#define elog(elevel,...)
Definition: elog.h:228
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:97

◆ LockBufferForCleanup()

void LockBufferForCleanup ( Buffer  buffer)

Definition at line 3809 of file bufmgr.c.

References Assert, BM_PIN_COUNT_WAITER, BUF_STATE_GET_REFCOUNT, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferIsLocal, BufferIsPinned, elog, ERROR, get_ps_display(), GetBufferDescriptor, GetPrivateRefCount(), InHotStandby, LocalRefCount, LockBuffer(), LockBufHdr(), MyProcPid, palloc(), pfree(), PG_WAIT_BUFFER_PIN, ProcWaitForSignal(), ResolveRecoveryConflictWithBufferPin(), set_ps_display(), SetStartupBufferPinWaitBufId(), UnlockBufHdr, update_process_title, and BufferDesc::wait_backend_pid.

Referenced by _bt_upgradelockbufcleanup(), ginVacuumPostingTree(), hashbulkdelete(), heap_force_common(), lazy_scan_heap(), ReadBuffer_common(), and XLogReadBufferForRedoExtended().

3810 {
3811  BufferDesc *bufHdr;
3812  char *new_status = NULL;
3813 
3814  Assert(BufferIsPinned(buffer));
3815  Assert(PinCountWaitBuf == NULL);
3816 
3817  if (BufferIsLocal(buffer))
3818  {
3819  /* There should be exactly one pin */
3820  if (LocalRefCount[-buffer - 1] != 1)
3821  elog(ERROR, "incorrect local pin count: %d",
3822  LocalRefCount[-buffer - 1]);
3823  /* Nobody else to wait for */
3824  return;
3825  }
3826 
3827  /* There should be exactly one local pin */
3828  if (GetPrivateRefCount(buffer) != 1)
3829  elog(ERROR, "incorrect local pin count: %d",
3830  GetPrivateRefCount(buffer));
3831 
3832  bufHdr = GetBufferDescriptor(buffer - 1);
3833 
3834  for (;;)
3835  {
3836  uint32 buf_state;
3837 
3838  /* Try to acquire lock */
3840  buf_state = LockBufHdr(bufHdr);
3841 
3842  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
3843  if (BUF_STATE_GET_REFCOUNT(buf_state) == 1)
3844  {
3845  /* Successfully acquired exclusive lock with pincount 1 */
3846  UnlockBufHdr(bufHdr, buf_state);
3847 
3848  /* Report change to non-waiting status */
3849  if (new_status)
3850  {
3851  set_ps_display(new_status);
3852  pfree(new_status);
3853  }
3854  return;
3855  }
3856  /* Failed, so mark myself as waiting for pincount 1 */
3857  if (buf_state & BM_PIN_COUNT_WAITER)
3858  {
3859  UnlockBufHdr(bufHdr, buf_state);
3860  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3861  elog(ERROR, "multiple backends attempting to wait for pincount 1");
3862  }
3863  bufHdr->wait_backend_pid = MyProcPid;
3864  PinCountWaitBuf = bufHdr;
3865  buf_state |= BM_PIN_COUNT_WAITER;
3866  UnlockBufHdr(bufHdr, buf_state);
3867  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3868 
3869  /* Wait to be signaled by UnpinBuffer() */
3870  if (InHotStandby)
3871  {
3872  /* Report change to waiting status */
3873  if (update_process_title && new_status == NULL)
3874  {
3875  const char *old_status;
3876  int len;
3877 
3878  old_status = get_ps_display(&len);
3879  new_status = (char *) palloc(len + 8 + 1);
3880  memcpy(new_status, old_status, len);
3881  strcpy(new_status + len, " waiting");
3882  set_ps_display(new_status);
3883  new_status[len] = '\0'; /* truncate off " waiting" */
3884  }
3885 
3886  /* Publish the bufid that Startup process waits on */
3887  SetStartupBufferPinWaitBufId(buffer - 1);
3888  /* Set alarm and then wait to be signaled by UnpinBuffer() */
3890  /* Reset the published bufid */
3892  }
3893  else
3895 
3896  /*
3897  * Remove flag marking us as waiter. Normally this will not be set
3898  * anymore, but ProcWaitForSignal() can return for other signals as
3899  * well. We take care to only reset the flag if we're the waiter, as
3900  * theoretically another backend could have started waiting. That's
3901  * impossible with the current usages due to table level locking, but
3902  * better be safe.
3903  */
3904  buf_state = LockBufHdr(bufHdr);
3905  if ((buf_state & BM_PIN_COUNT_WAITER) != 0 &&
3906  bufHdr->wait_backend_pid == MyProcPid)
3907  buf_state &= ~BM_PIN_COUNT_WAITER;
3908  UnlockBufHdr(bufHdr, buf_state);
3909 
3910  PinCountWaitBuf = NULL;
3911  /* Loop back and try again */
3912  }
3913 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:440
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
int MyProcPid
Definition: globals.c:40
int wait_backend_pid
bool update_process_title
Definition: ps_status.c:36
#define InHotStandby
Definition: xlog.h:74
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:98
void set_ps_display(const char *activity)
Definition: ps_status.c:349
void pfree(void *pointer)
Definition: mcxt.c:1057
#define ERROR
Definition: elog.h:43
void ResolveRecoveryConflictWithBufferPin(void)
Definition: standby.c:483
const char * get_ps_display(int *displen)
Definition: ps_status.c:430
void SetStartupBufferPinWaitBufId(int bufid)
Definition: proc.c:640
#define GetBufferDescriptor(id)
static int32 GetPrivateRefCount(Buffer buffer)
Definition: bufmgr.c:379
unsigned int uint32
Definition: c.h:429
void ProcWaitForSignal(uint32 wait_event_info)
Definition: proc.c:1809
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3752
#define PG_WAIT_BUFFER_PIN
Definition: pgstat.h:863
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4320
#define Assert(condition)
Definition: c.h:800
#define BufferIsLocal(buffer)
Definition: buf.h:37
void * palloc(Size size)
Definition: mcxt.c:950
#define UnlockBufHdr(desc, s)
#define elog(elevel,...)
Definition: elog.h:228
static BufferDesc * PinCountWaitBuf
Definition: bufmgr.c:157
int32 * LocalRefCount
Definition: localbuf.c:45
#define BUF_STATE_GET_REFCOUNT(state)
Definition: buf_internals.h:48
#define BM_PIN_COUNT_WAITER
Definition: buf_internals.h:64

◆ MarkBufferDirty()

void MarkBufferDirty ( Buffer  buffer)

Definition at line 1471 of file bufmgr.c.

References Assert, BM_DIRTY, BM_JUST_DIRTIED, BM_LOCKED, BUF_STATE_GET_REFCOUNT, BufferDescriptorGetContentLock, BufferIsLocal, BufferIsPinned, BufferIsValid, elog, ERROR, GetBufferDescriptor, LW_EXCLUSIVE, LWLockHeldByMeInMode(), MarkLocalBufferDirty(), pg_atomic_compare_exchange_u32(), pg_atomic_read_u32(), pgBufferUsage, BufferUsage::shared_blks_dirtied, BufferDesc::state, VacuumCostActive, VacuumCostBalance, VacuumCostPageDirty, VacuumPageDirty, and WaitBufHdrUnlocked().

Referenced by _bt_clear_incomplete_split(), _bt_dedup_pass(), _bt_delitems_delete(), _bt_delitems_vacuum(), _bt_getroot(), _bt_insertonpg(), _bt_mark_page_halfdead(), _bt_newroot(), _bt_restore_meta(), _bt_split(), _bt_unlink_halfdead_page(), _bt_update_meta_cleanup_info(), _hash_addovflpage(), _hash_doinsert(), _hash_expandtable(), _hash_freeovflpage(), _hash_init(), _hash_splitbucket(), _hash_squeezebucket(), _hash_vacuum_one_page(), addLeafTuple(), brin_doinsert(), brin_doupdate(), brin_initialize_empty_new_buffer(), brin_xlog_createidx(), brin_xlog_desummarize_page(), brin_xlog_insert_update(), brin_xlog_revmap_extend(), brin_xlog_samepage_update(), brin_xlog_update(), brinbuild(), brinbuildempty(), brinRevmapDesummarizeRange(), btree_xlog_dedup(), btree_xlog_delete(), btree_xlog_insert(), btree_xlog_mark_page_halfdead(), btree_xlog_newroot(), btree_xlog_split(), btree_xlog_unlink_page(), btree_xlog_vacuum(), createPostingTree(), do_setval(), doPickSplit(), fill_seq_with_data(), FreeSpaceMapPrepareTruncateRel(), generic_redo(), GenericXLogFinish(), ginbuild(), ginbuildempty(), ginbulkdelete(), ginDeletePage(), ginHeapTupleFastInsert(), ginPlaceToPage(), ginRedoClearIncompleteSplit(), ginRedoCreatePTree(), ginRedoDeleteListPages(), ginRedoDeletePage(), ginRedoInsert(), ginRedoInsertListPage(), ginRedoUpdateMetapage(), ginRedoVacuumDataLeafPage(), ginUpdateStats(), ginVacuumPostingTreeLeaf(), gistbuild(), gistbuildempty(), gistdeletepage(), gistplacetopage(), gistprunepage(), gistRedoClearFollowRight(), gistRedoDeleteRecord(), gistRedoPageDelete(), gistRedoPageSplitRecord(), gistRedoPageUpdateRecord(), gistvacuumpage(), hash_xlog_add_ovfl_page(), hash_xlog_delete(), hash_xlog_init_bitmap_page(), hash_xlog_init_meta_page(), hash_xlog_insert(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_split_cleanup(), hash_xlog_split_complete(), hash_xlog_squeeze_page(), hash_xlog_update_meta_page(), hash_xlog_vacuum_one_page(), hashbucketcleanup(), hashbulkdelete(), heap_abort_speculative(), heap_delete(), heap_finish_speculative(), heap_force_common(), heap_inplace_update(), heap_insert(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_page_prune(), heap_update(), heap_xlog_clean(), heap_xlog_confirm(), heap_xlog_delete(), heap_xlog_freeze_page(), heap_xlog_inplace(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_update(), heap_xlog_visible(), lazy_scan_heap(), lazy_vacuum_page(), log_newpage_range(), moveLeafs(), nextval_internal(), RelationGetBufferForTuple(), revmap_physical_extend(), saveNodeLink(), seq_redo(), shiftList(), spgAddNodeAction(), spgbuild(), SpGistUpdateMetaPage(), spgRedoAddLeaf(), spgRedoAddNode(), spgRedoMoveLeafs(), spgRedoPickSplit(), spgRedoSplitTuple(), spgRedoVacuumLeaf(), spgRedoVacuumRedirect(), spgRedoVacuumRoot(), spgSplitNodeAction(), vacuumLeafPage(), vacuumLeafRoot(), vacuumRedirectAndPlaceholder(), visibilitymap_clear(), visibilitymap_prepare_truncate(), visibilitymap_set(), writeListPage(), and XLogReadBufferForRedoExtended().

1472 {
1473  BufferDesc *bufHdr;
1474  uint32 buf_state;
1475  uint32 old_buf_state;
1476 
1477  if (!BufferIsValid(buffer))
1478  elog(ERROR, "bad buffer ID: %d", buffer);
1479 
1480  if (BufferIsLocal(buffer))
1481  {
1482  MarkLocalBufferDirty(buffer);
1483  return;
1484  }
1485 
1486  bufHdr = GetBufferDescriptor(buffer - 1);
1487 
1488  Assert(BufferIsPinned(buffer));
1490  LW_EXCLUSIVE));
1491 
1492  old_buf_state = pg_atomic_read_u32(&bufHdr->state);
1493  for (;;)
1494  {
1495  if (old_buf_state & BM_LOCKED)
1496  old_buf_state = WaitBufHdrUnlocked(bufHdr);
1497 
1498  buf_state = old_buf_state;
1499 
1500  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
1501  buf_state |= BM_DIRTY | BM_JUST_DIRTIED;
1502 
1503  if (pg_atomic_compare_exchange_u32(&bufHdr->state, &old_buf_state,
1504  buf_state))
1505  break;
1506  }
1507 
1508  /*
1509  * If the buffer was not dirty already, do vacuum accounting.
1510  */
1511  if (!(old_buf_state & BM_DIRTY))
1512  {
1513  VacuumPageDirty++;
1515  if (VacuumCostActive)
1517  }
1518 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:440
bool LWLockHeldByMeInMode(LWLock *l, LWLockMode mode)
Definition: lwlock.c:1945
int VacuumCostBalance
Definition: globals.c:148
static bool pg_atomic_compare_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval)
Definition: atomics.h:311
int64 VacuumPageDirty
Definition: globals.c:146
#define BM_DIRTY
Definition: buf_internals.h:58
int VacuumCostPageDirty
Definition: globals.c:140
#define ERROR
Definition: elog.h:43
#define GetBufferDescriptor(id)
#define BM_JUST_DIRTIED
Definition: buf_internals.h:63
long shared_blks_dirtied
Definition: instrument.h:23
unsigned int uint32
Definition: c.h:429
void MarkLocalBufferDirty(Buffer buffer)
Definition: localbuf.c:286
#define BM_LOCKED
Definition: buf_internals.h:57
#define BufferDescriptorGetContentLock(bdesc)
#define Assert(condition)
Definition: c.h:800
#define BufferIsLocal(buffer)
Definition: buf.h:37
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
static uint32 WaitBufHdrUnlocked(BufferDesc *buf)
Definition: bufmgr.c:4348
#define elog(elevel,...)
Definition: elog.h:228
pg_atomic_uint32 state
BufferUsage pgBufferUsage
Definition: instrument.c:20
#define BUF_STATE_GET_REFCOUNT(state)
Definition: buf_internals.h:48
bool VacuumCostActive
Definition: globals.c:149
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:241

◆ MarkBufferDirtyHint()

void MarkBufferDirtyHint ( Buffer  buffer,
bool  buffer_std 
)

Definition at line 3583 of file bufmgr.c.

References Assert, BM_DIRTY, BM_JUST_DIRTIED, BM_PERMANENT, BUF_STATE_GET_REFCOUNT, BufferDescriptorGetContentLock, BufferGetPage, BufferIsLocal, BufferIsValid, PGPROC::delayChkpt, elog, ERROR, GetBufferDescriptor, GetPrivateRefCount(), InvalidXLogRecPtr, LockBufHdr(), LWLockHeldByMe(), MarkLocalBufferDirty(), MyProc, PageSetLSN, pg_atomic_read_u32(), pgBufferUsage, RecoveryInProgress(), RelFileNodeSkippingWAL(), buftag::rnode, BufferUsage::shared_blks_dirtied, BufferDesc::state, BufferDesc::tag, UnlockBufHdr, VacuumCostActive, VacuumCostBalance, VacuumCostPageDirty, VacuumPageDirty, XLogHintBitIsNeeded, XLogRecPtrIsInvalid, and XLogSaveBufferForHint().

Referenced by _bt_check_unique(), _bt_killitems(), _hash_kill_items(), brin_start_evacuating_page(), btvacuumpage(), fsm_search_avail(), fsm_set_and_search(), fsm_vacuum_page(), gistkillitems(), heap_page_prune(), read_seq_tuple(), SetHintBits(), and XLogRecordPageWithFreeSpace().

3584 {
3585  BufferDesc *bufHdr;
3586  Page page = BufferGetPage(buffer);
3587 
3588  if (!BufferIsValid(buffer))
3589  elog(ERROR, "bad buffer ID: %d", buffer);
3590 
3591  if (BufferIsLocal(buffer))
3592  {
3593  MarkLocalBufferDirty(buffer);
3594  return;
3595  }
3596 
3597  bufHdr = GetBufferDescriptor(buffer - 1);
3598 
3599  Assert(GetPrivateRefCount(buffer) > 0);
3600  /* here, either share or exclusive lock is OK */
3602 
3603  /*
3604  * This routine might get called many times on the same page, if we are
3605  * making the first scan after commit of an xact that added/deleted many
3606  * tuples. So, be as quick as we can if the buffer is already dirty. We
3607  * do this by not acquiring spinlock if it looks like the status bits are
3608  * already set. Since we make this test unlocked, there's a chance we
3609  * might fail to notice that the flags have just been cleared, and failed
3610  * to reset them, due to memory-ordering issues. But since this function
3611  * is only intended to be used in cases where failing to write out the
3612  * data would be harmless anyway, it doesn't really matter.
3613  */
3614  if ((pg_atomic_read_u32(&bufHdr->state) & (BM_DIRTY | BM_JUST_DIRTIED)) !=
3616  {
3618  bool dirtied = false;
3619  bool delayChkpt = false;
3620  uint32 buf_state;
3621 
3622  /*
3623  * If we need to protect hint bit updates from torn writes, WAL-log a
3624  * full page image of the page. This full page image is only necessary
3625  * if the hint bit update is the first change to the page since the
3626  * last checkpoint.
3627  *
3628  * We don't check full_page_writes here because that logic is included
3629  * when we call XLogInsert() since the value changes dynamically.
3630  */
3631  if (XLogHintBitIsNeeded() &&
3632  (pg_atomic_read_u32(&bufHdr->state) & BM_PERMANENT))
3633  {
3634  /*
3635  * If we must not write WAL, due to a relfilenode-specific
3636  * condition or being in recovery, don't dirty the page. We can
3637  * set the hint, just not dirty the page as a result so the hint
3638  * is lost when we evict the page or shutdown.
3639  *
3640  * See src/backend/storage/page/README for longer discussion.
3641  */
3642  if (RecoveryInProgress() ||
3643  RelFileNodeSkippingWAL(bufHdr->tag.rnode))
3644  return;
3645 
3646  /*
3647  * If the block is already dirty because we either made a change
3648  * or set a hint already, then we don't need to write a full page
3649  * image. Note that aggressive cleaning of blocks dirtied by hint
3650  * bit setting would increase the call rate. Bulk setting of hint
3651  * bits would reduce the call rate...
3652  *
3653  * We must issue the WAL record before we mark the buffer dirty.
3654  * Otherwise we might write the page before we write the WAL. That
3655  * causes a race condition, since a checkpoint might occur between
3656  * writing the WAL record and marking the buffer dirty. We solve
3657  * that with a kluge, but one that is already in use during
3658  * transaction commit to prevent race conditions. Basically, we
3659  * simply prevent the checkpoint WAL record from being written
3660  * until we have marked the buffer dirty. We don't start the
3661  * checkpoint flush until we have marked dirty, so our checkpoint
3662  * must flush the change to disk successfully or the checkpoint
3663  * never gets written, so crash recovery will fix.
3664  *
3665  * It's possible we may enter here without an xid, so it is
3666  * essential that CreateCheckpoint waits for virtual transactions
3667  * rather than full transactionids.
3668  */
3669  MyProc->delayChkpt = delayChkpt = true;
3670  lsn = XLogSaveBufferForHint(buffer, buffer_std);
3671  }
3672 
3673  buf_state = LockBufHdr(bufHdr);
3674 
3675  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
3676 
3677  if (!(buf_state & BM_DIRTY))
3678  {
3679  dirtied = true; /* Means "will be dirtied by this action" */
3680 
3681  /*
3682  * Set the page LSN if we wrote a backup block. We aren't supposed
3683  * to set this when only holding a share lock but as long as we
3684  * serialise it somehow we're OK. We choose to set LSN while
3685  * holding the buffer header lock, which causes any reader of an
3686  * LSN who holds only a share lock to also obtain a buffer header
3687  * lock before using PageGetLSN(), which is enforced in
3688  * BufferGetLSNAtomic().
3689  *
3690  * If checksums are enabled, you might think we should reset the
3691  * checksum here. That will happen when the page is written
3692  * sometime later in this checkpoint cycle.
3693  */
3694  if (!XLogRecPtrIsInvalid(lsn))
3695  PageSetLSN(page, lsn);
3696  }
3697 
3698  buf_state |= BM_DIRTY | BM_JUST_DIRTIED;
3699  UnlockBufHdr(bufHdr, buf_state);
3700 
3701  if (delayChkpt)
3702  MyProc->delayChkpt = false;
3703 
3704  if (dirtied)
3705  {
3706  VacuumPageDirty++;
3708  if (VacuumCostActive)
3710  }
3711  }
3712 }
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
#define BM_PERMANENT
Definition: buf_internals.h:66
int VacuumCostBalance
Definition: globals.c:148
bool LWLockHeldByMe(LWLock *l)
Definition: lwlock.c:1927
PGPROC * MyProc
Definition: proc.c:67
XLogRecPtr XLogSaveBufferForHint(Buffer buffer, bool buffer_std)
Definition: xloginsert.c:917
int64 VacuumPageDirty
Definition: globals.c:146
bool RecoveryInProgress(void)
Definition: xlog.c:8071
#define BM_DIRTY
Definition: buf_internals.h:58
int VacuumCostPageDirty
Definition: globals.c:140
#define ERROR
Definition: elog.h:43
bool delayChkpt
Definition: proc.h:184
#define GetBufferDescriptor(id)
#define BM_JUST_DIRTIED
Definition: buf_internals.h:63
long shared_blks_dirtied
Definition: instrument.h:23
static int32 GetPrivateRefCount(Buffer buffer)
Definition: bufmgr.c:379
unsigned int uint32
Definition: c.h:429
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
void MarkLocalBufferDirty(Buffer buffer)
Definition: localbuf.c:286
#define XLogRecPtrIsInvalid(r)
Definition: xlogdefs.h:29
#define BufferDescriptorGetContentLock(bdesc)
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4320
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:800
#define BufferIsLocal(buffer)
Definition: buf.h:37
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
RelFileNode rnode
Definition: buf_internals.h:92
bool RelFileNodeSkippingWAL(RelFileNode rnode)
Definition: storage.c:497
BufferTag tag
#define UnlockBufHdr(desc, s)
#define elog(elevel,...)
Definition: elog.h:228
pg_atomic_uint32 state
#define PageSetLSN(page, lsn)
Definition: bufpage.h:368
#define XLogHintBitIsNeeded()
Definition: xlog.h:202
Pointer Page
Definition: bufpage.h:78
BufferUsage pgBufferUsage
Definition: instrument.c:20
#define BUF_STATE_GET_REFCOUNT(state)
Definition: buf_internals.h:48
bool VacuumCostActive
Definition: globals.c:149
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:241

◆ PrefetchBuffer()

PrefetchBufferResult PrefetchBuffer ( Relation  reln,
ForkNumber  forkNum,
BlockNumber  blockNum 
)

Definition at line 575 of file bufmgr.c.

References Assert, BlockNumberIsValid, ereport, errcode(), errmsg(), ERROR, PrefetchLocalBuffer(), PrefetchSharedBuffer(), RelationData::rd_smgr, RELATION_IS_OTHER_TEMP, RelationIsValid, RelationOpenSmgr, and RelationUsesLocalBuffers.

Referenced by BitmapPrefetch(), count_nondeletable_pages(), HeapTupleHeaderAdvanceLatestRemovedXid(), and pg_prewarm().

576 {
577  Assert(RelationIsValid(reln));
578  Assert(BlockNumberIsValid(blockNum));
579 
580  /* Open it at the smgr level if not already done */
581  RelationOpenSmgr(reln);
582 
583  if (RelationUsesLocalBuffers(reln))
584  {
585  /* see comments in ReadBufferExtended */
586  if (RELATION_IS_OTHER_TEMP(reln))
587  ereport(ERROR,
588  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
589  errmsg("cannot access temporary tables of other sessions")));
590 
591  /* pass it off to localbuf.c */
592  return PrefetchLocalBuffer(reln->rd_smgr, forkNum, blockNum);
593  }
594  else
595  {
596  /* pass it to the shared buffer version */
597  return PrefetchSharedBuffer(reln->rd_smgr, forkNum, blockNum);
598  }
599 }
struct SMgrRelationData * rd_smgr
Definition: rel.h:57
PrefetchBufferResult PrefetchLocalBuffer(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum)
Definition: localbuf.c:64
int errcode(int sqlerrcode)
Definition: elog.c:691
#define RelationOpenSmgr(relation)
Definition: rel.h:514
#define ERROR
Definition: elog.h:43
#define RelationIsValid(relation)
Definition: rel.h:430
PrefetchBufferResult PrefetchSharedBuffer(SMgrRelation smgr_reln, ForkNumber forkNum, BlockNumber blockNum)
Definition: bufmgr.c:488
#define ereport(elevel,...)
Definition: elog.h:155
#define BlockNumberIsValid(blockNumber)
Definition: block.h:70
#define Assert(condition)
Definition: c.h:800
#define RELATION_IS_OTHER_TEMP(relation)
Definition: rel.h:594
#define RelationUsesLocalBuffers(relation)
Definition: rel.h:573
int errmsg(const char *fmt,...)
Definition: elog.c:902

◆ PrefetchSharedBuffer()

PrefetchBufferResult PrefetchSharedBuffer ( struct SMgrRelationData smgr_reln,
ForkNumber  forkNum,
BlockNumber  blockNum 
)

◆ PrintBufferLeakWarning()

void PrintBufferLeakWarning ( Buffer  buffer)

Definition at line 2596 of file bufmgr.c.

References Assert, buftag::blockNum, buf, BUF_FLAG_MASK, BUF_STATE_GET_REFCOUNT, BufferIsLocal, BufferIsValid, elog, buftag::forkNum, GetBufferDescriptor, GetLocalBufferDescriptor, GetPrivateRefCount(), InvalidBackendId, LocalRefCount, MyBackendId, pfree(), pg_atomic_read_u32(), relpathbackend, buftag::rnode, BufferDesc::state, BufferDesc::tag, and WARNING.

Referenced by CheckForBufferLeaks(), CheckForLocalBufferLeaks(), and ResourceOwnerReleaseInternal().

2597 {
2598  BufferDesc *buf;
2599  int32 loccount;
2600  char *path;
2601  BackendId backend;
2602  uint32 buf_state;
2603 
2604  Assert(BufferIsValid(buffer));
2605  if (BufferIsLocal(buffer))
2606  {
2607  buf = GetLocalBufferDescriptor(-buffer - 1);
2608  loccount = LocalRefCount[-buffer - 1];
2609  backend = MyBackendId;
2610  }
2611  else
2612  {
2613  buf = GetBufferDescriptor(buffer - 1);
2614  loccount = GetPrivateRefCount(buffer);
2615  backend = InvalidBackendId;
2616  }
2617 
2618  /* theoretically we should lock the bufhdr here */
2619  path = relpathbackend(buf->tag.rnode, backend, buf->tag.forkNum);
2620  buf_state = pg_atomic_read_u32(&buf->state);
2621  elog(WARNING,
2622  "buffer refcount leak: [%03d] "
2623  "(rel=%s, blockNum=%u, flags=0x%x, refcount=%u %d)",
2624  buffer, path,
2625  buf->tag.blockNum, buf_state & BUF_FLAG_MASK,
2626  BUF_STATE_GET_REFCOUNT(buf_state), loccount);
2627  pfree(path);
2628 }
BackendId MyBackendId
Definition: globals.c:81
ForkNumber forkNum
Definition: buf_internals.h:93
#define GetLocalBufferDescriptor(id)
signed int int32
Definition: c.h:417
void pfree(void *pointer)
Definition: mcxt.c:1057
#define BUF_FLAG_MASK
Definition: buf_internals.h:45
static char * buf
Definition: pg_test_fsync.c:68
#define GetBufferDescriptor(id)
static int32 GetPrivateRefCount(Buffer buffer)
Definition: bufmgr.c:379
unsigned int uint32
Definition: c.h:429
#define WARNING
Definition: elog.h:40
#define InvalidBackendId
Definition: backendid.h:23
int BackendId
Definition: backendid.h:21
#define Assert(condition)
Definition: c.h:800
#define BufferIsLocal(buffer)
Definition: buf.h:37
BlockNumber blockNum
Definition: buf_internals.h:94
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
RelFileNode rnode
Definition: buf_internals.h:92
BufferTag tag
#define elog(elevel,...)
Definition: elog.h:228
pg_atomic_uint32 state
#define relpathbackend(rnode, backend, forknum)
Definition: relpath.h:78
int32 * LocalRefCount
Definition: localbuf.c:45
#define BUF_STATE_GET_REFCOUNT(state)
Definition: buf_internals.h:48
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:241

◆ ReadBuffer()

Buffer ReadBuffer ( Relation  reln,
BlockNumber  blockNum 
)

Definition at line 607 of file bufmgr.c.

References MAIN_FORKNUM, RBM_NORMAL, and ReadBufferExtended().

Referenced by _bt_getbuf(), _bt_search_insert(), _hash_getbuf(), _hash_getbuf_with_condlock_cleanup(), blbulkdelete(), blinsert(), BloomNewBuffer(), brin_getinsertbuffer(), brinbuild(), brinGetStats(), brinGetTupleForHeapBlock(), brinRevmapDesummarizeRange(), brinRevmapInitialize(), bt_metap(), bt_page_items(), bt_page_stats(), fill_seq_with_data(), ginFindLeafPage(), ginFindParents(), ginGetStats(), ginHeapTupleFastInsert(), ginInsertCleanup(), GinNewBuffer(), ginStepRight(), ginUpdateStats(), gistBufferingFindCorrectParent(), gistbufferinginserttuples(), gistdoinsert(), gistFindCorrectParent(), gistFindPath(), gistfixsplit(), gistGetMaxLevel(), gistkillitems(), gistNewBuffer(), gistProcessItup(), gistScanPage(), heap_abort_speculative(), heap_compute_xid_horizon_for_tuples(), heap_delete(), heap_fetch(), heap_finish_speculative(), heap_force_common(), heap_get_latest_tid(), heap_inplace_update(), heap_lock_tuple(), heap_update(), initBloomState(), pg_visibility(), pgstatginindex_internal(), read_seq_tuple(), RelationGetBufferForTuple(), ReleaseAndReadBuffer(), revmap_get_buffer(), revmap_physical_extend(), scanGetCandidate(), scanPendingInsert(), shiftList(), spgdoinsert(), spgGetCache(), SpGistGetBuffer(), SpGistNewBuffer(), SpGistUpdateMetaPage(), and spgWalk().

608 {
609  return ReadBufferExtended(reln, MAIN_FORKNUM, blockNum, RBM_NORMAL, NULL);
610 }
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:654

◆ ReadBufferExtended()

Buffer ReadBufferExtended ( Relation  reln,
ForkNumber  forkNum,
BlockNumber  blockNum,
ReadBufferMode  mode,
BufferAccessStrategy  strategy 
)

Definition at line 654 of file bufmgr.c.

References buf, ereport, errcode(), errmsg(), ERROR, pgstat_count_buffer_hit, pgstat_count_buffer_read, RelationData::rd_rel, RelationData::rd_smgr, ReadBuffer_common(), RELATION_IS_OTHER_TEMP, and RelationOpenSmgr.

Referenced by _hash_getbuf_with_strategy(), _hash_getinitbuf(), _hash_getnewbuf(), autoprewarm_database_main(), blbulkdelete(), blgetbitmap(), blvacuumcleanup(), brin_vacuum_scan(), brinbuildempty(), bt_recheck_sibling_links(), btvacuumpage(), collect_corrupt_items(), collect_visibility_data(), count_nondeletable_pages(), fsm_readbuf(), get_raw_page_internal(), ginbuildempty(), ginbulkdelete(), ginDeletePage(), ginScanToDelete(), ginvacuumcleanup(), ginVacuumPostingTree(), ginVacuumPostingTreeLeaves(), gistbuildempty(), gistvacuum_delete_empty_pages(), gistvacuumpage(), hashbulkdelete(), heapam_scan_analyze_next_block(), heapgetpage(), lazy_scan_heap(), lazy_vacuum_heap(), log_newpage_range(), palloc_btree_page(), pg_prewarm(), pgstat_btree_page(), pgstat_gist_page(), pgstat_heap(), pgstathashindex(), pgstatindex_impl(), ReadBuffer(), ReadBufferBI(), spgprocesspending(), spgvacuumpage(), statapprox_heap(), verify_heapam(), and vm_readbuf().

656 {
657  bool hit;
658  Buffer buf;
659 
660  /* Open it at the smgr level if not already done */
661  RelationOpenSmgr(reln);
662 
663  /*
664  * Reject attempts to read non-local temporary relations; we would be
665  * likely to get wrong data since we have no visibility into the owning
666  * session's local buffers.
667  */
668  if (RELATION_IS_OTHER_TEMP(reln))
669  ereport(ERROR,
670  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
671  errmsg("cannot access temporary tables of other sessions")));
672 
673  /*
674  * Read the buffer, and update pgstat counters to reflect a cache hit or
675  * miss.
676  */
678  buf = ReadBuffer_common(reln->rd_smgr, reln->rd_rel->relpersistence,
679  forkNum, blockNum, mode, strategy, &hit);
680  if (hit)
682  return buf;
683 }
static PgChecksumMode mode
Definition: pg_checksums.c:61
struct SMgrRelationData * rd_smgr
Definition: rel.h:57
int errcode(int sqlerrcode)
Definition: elog.c:691
Form_pg_class rd_rel
Definition: rel.h:110
#define RelationOpenSmgr(relation)
Definition: rel.h:514
#define ERROR
Definition: elog.h:43
#define pgstat_count_buffer_read(rel)
Definition: pgstat.h:1513
static char * buf
Definition: pg_test_fsync.c:68
#define pgstat_count_buffer_hit(rel)
Definition: pgstat.h:1518
static Buffer ReadBuffer_common(SMgrRelation reln, char relpersistence, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy, bool *hit)
Definition: bufmgr.c:717
#define ereport(elevel,...)
Definition: elog.h:155
#define RELATION_IS_OTHER_TEMP(relation)
Definition: rel.h:594
int errmsg(const char *fmt,...)
Definition: elog.c:902
int Buffer
Definition: buf.h:23

◆ ReadBufferWithoutRelcache()

Buffer ReadBufferWithoutRelcache ( RelFileNode  rnode,
ForkNumber  forkNum,
BlockNumber  blockNum,
ReadBufferMode  mode,
BufferAccessStrategy  strategy 
)

Definition at line 696 of file bufmgr.c.

References Assert, InRecovery, InvalidBackendId, ReadBuffer_common(), and smgropen().

Referenced by XLogReadBufferExtended().

699 {
700  bool hit;
701 
702  SMgrRelation smgr = smgropen(rnode, InvalidBackendId);
703 
705 
706  return ReadBuffer_common(smgr, RELPERSISTENCE_PERMANENT, forkNum, blockNum,
707  mode, strategy, &hit);
708 }
static PgChecksumMode mode
Definition: pg_checksums.c:61
bool InRecovery
Definition: xlog.c:205
SMgrRelation smgropen(RelFileNode rnode, BackendId backend)
Definition: smgr.c:146
#define InvalidBackendId
Definition: backendid.h:23
static Buffer ReadBuffer_common(SMgrRelation reln, char relpersistence, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy, bool *hit)
Definition: bufmgr.c:717
#define Assert(condition)
Definition: c.h:800

◆ RelationGetNumberOfBlocksInFork()

BlockNumber RelationGetNumberOfBlocksInFork ( Relation  relation,
ForkNumber  forkNum 
)

Definition at line 2850 of file bufmgr.c.

References Assert, RelationData::rd_rel, RelationData::rd_smgr, RelationOpenSmgr, smgrnblocks(), and table_relation_size().

Referenced by _hash_getnewbuf(), _hash_init(), autoprewarm_database_main(), get_raw_page_internal(), and pg_prewarm().

2851 {
2852  switch (relation->rd_rel->relkind)
2853  {
2854  case RELKIND_SEQUENCE:
2855  case RELKIND_INDEX:
2856  case RELKIND_PARTITIONED_INDEX:
2857  /* Open it at the smgr level if not already done */
2858  RelationOpenSmgr(relation);
2859 
2860  return smgrnblocks(relation->rd_smgr, forkNum);
2861 
2862  case RELKIND_RELATION:
2863  case RELKIND_TOASTVALUE:
2864  case RELKIND_MATVIEW:
2865  {
2866  /*
2867  * Not every table AM uses BLCKSZ wide fixed size blocks.
2868  * Therefore tableam returns the size in bytes - but for the
2869  * purpose of this routine, we want the number of blocks.
2870  * Therefore divide, rounding up.
2871  */
2872  uint64 szbytes;
2873 
2874  szbytes = table_relation_size(relation, forkNum);
2875 
2876  return (szbytes + (BLCKSZ - 1)) / BLCKSZ;
2877  }
2878  case RELKIND_VIEW:
2879  case RELKIND_COMPOSITE_TYPE:
2880  case RELKIND_FOREIGN_TABLE:
2881  case RELKIND_PARTITIONED_TABLE:
2882  default:
2883  Assert(false);
2884  break;
2885  }
2886 
2887  return 0; /* keep compiler quiet */
2888 }
struct SMgrRelationData * rd_smgr
Definition: rel.h:57
Form_pg_class rd_rel
Definition: rel.h:110
#define RelationOpenSmgr(relation)
Definition: rel.h:514
static uint64 table_relation_size(Relation rel, ForkNumber forkNumber)
Definition: tableam.h:1645
BlockNumber smgrnblocks(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:549
#define Assert(condition)
Definition: c.h:800

◆ ReleaseAndReadBuffer()

Buffer ReleaseAndReadBuffer ( Buffer  buffer,
Relation  relation,
BlockNumber  blockNum 
)

Definition at line 1534 of file bufmgr.c.

References Assert, buftag::blockNum, PrivateRefCountEntry::buffer, BufferIsLocal, BufferIsPinned, BufferIsValid, CurrentResourceOwner, buftag::forkNum, GetBufferDescriptor, GetLocalBufferDescriptor, LocalRefCount, MAIN_FORKNUM, RelationData::rd_node, ReadBuffer(), RelFileNodeEquals, ResourceOwnerForgetBuffer(), buftag::rnode, BufferDesc::tag, and UnpinBuffer().

Referenced by _bt_relandgetbuf(), ginFindLeafPage(), heapam_index_fetch_tuple(), and heapam_scan_bitmap_next_block().

1537 {
1538  ForkNumber forkNum = MAIN_FORKNUM;
1539  BufferDesc *bufHdr;
1540 
1541  if (BufferIsValid(buffer))
1542  {
1543  Assert(BufferIsPinned(buffer));
1544  if (BufferIsLocal(buffer))
1545  {
1546  bufHdr = GetLocalBufferDescriptor(-buffer - 1);
1547  if (bufHdr->tag.blockNum == blockNum &&
1548  RelFileNodeEquals(bufHdr->tag.rnode, relation->rd_node) &&
1549  bufHdr->tag.forkNum == forkNum)
1550  return buffer;
1552  LocalRefCount[-buffer - 1]--;
1553  }
1554  else
1555  {
1556  bufHdr = GetBufferDescriptor(buffer - 1);
1557  /* we have pin, so it's ok to examine tag without spinlock */
1558  if (bufHdr->tag.blockNum == blockNum &&
1559  RelFileNodeEquals(bufHdr->tag.rnode, relation->rd_node) &&
1560  bufHdr->tag.forkNum == forkNum)
1561  return buffer;
1562  UnpinBuffer(bufHdr, true);
1563  }
1564  }
1565 
1566  return ReadBuffer(relation, blockNum);
1567 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:440
ForkNumber forkNum
Definition: buf_internals.h:93
ResourceOwner CurrentResourceOwner
Definition: resowner.c:144
#define GetLocalBufferDescriptor(id)
#define GetBufferDescriptor(id)
ForkNumber
Definition: relpath.h:40
static void UnpinBuffer(BufferDesc *buf, bool fixOwner)
Definition: bufmgr.c:1740
RelFileNode rd_node
Definition: rel.h:55
#define Assert(condition)
Definition: c.h:800
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:607
#define BufferIsLocal(buffer)
Definition: buf.h:37
BlockNumber blockNum
Definition: buf_internals.h:94
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
RelFileNode rnode
Definition: buf_internals.h:92
BufferTag tag
int32 * LocalRefCount
Definition: localbuf.c:45
#define RelFileNodeEquals(node1, node2)
Definition: relfilenode.h:88
void ResourceOwnerForgetBuffer(ResourceOwner owner, Buffer buffer)
Definition: resowner.c:956

◆ ReleaseBuffer()

void ReleaseBuffer ( Buffer  buffer)

Definition at line 3513 of file bufmgr.c.

References Assert, BufferIsLocal, BufferIsValid, CurrentResourceOwner, elog, ERROR, GetBufferDescriptor, LocalRefCount, ResourceOwnerForgetBuffer(), and UnpinBuffer().

Referenced by _bt_drop_lock_and_maybe_pin(), _bt_getbuf(), _bt_pagedel(), _bt_relbuf(), _bt_search_insert(), _bt_unlink_halfdead_page(), _hash_dropbuf(), _hash_getbuf_with_condlock_cleanup(), autoprewarm_database_main(), blinsert(), BloomNewBuffer(), brin_getinsertbuffer(), brin_vacuum_scan(), bringetbitmap(), brinGetTupleForHeapBlock(), brininsert(), brinRevmapTerminate(), brinsummarize(), collect_corrupt_items(), collect_visibility_data(), entryLoadMoreItems(), ExecEndBitmapHeapScan(), ExecEndIndexOnlyScan(), ExecReScanBitmapHeapScan(), FreeBulkInsertState(), freeGinBtreeStack(), fsm_vacuum_page(), get_actual_variable_endpoint(), get_raw_page_internal(), GetRecordedFreeSpace(), ginDeletePage(), ginFindParents(), ginFinishSplit(), ginFreeScanKeys(), ginInsertCleanup(), GinNewBuffer(), ginScanToDelete(), gistdoinsert(), gistFindCorrectParent(), gistNewBuffer(), gistvacuum_delete_empty_pages(), heap_abort_speculative(), heap_compute_xid_horizon_for_tuples(), heap_delete(), heap_endscan(), heap_fetch(), heap_force_common(), heap_insert(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_rescan(), heap_update(), heap_xlog_delete(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_update(), heap_xlog_visible(), heapam_index_fetch_reset(), heapam_scan_sample_next_block(), heapam_tuple_lock(), heapgetpage(), heapgettup(), heapgettup_pagemode(), lazy_scan_heap(), lazy_vacuum_heap(), pg_prewarm(), pg_visibility(), pg_visibility_map(), pg_visibility_map_summary(), pgstatindex_impl(), ReadBufferBI(), RelationGetBufferForTuple(), ReleaseBulkInsertStatePin(), ResourceOwnerReleaseInternal(), revmap_get_buffer(), revmap_physical_extend(), spgdoinsert(), SpGistGetBuffer(), SpGistNewBuffer(), SpGistUpdateMetaPage(), statapprox_heap(), summarize_range(), terminate_brin_buildstate(), tts_buffer_heap_clear(), tts_buffer_heap_materialize(), tts_buffer_heap_store_tuple(), UnlockReleaseBuffer(), verify_heapam(), visibilitymap_count(), visibilitymap_get_status(), visibilitymap_pin(), and XLogReadBufferExtended().

3514 {
3515  if (!BufferIsValid(buffer))
3516  elog(ERROR, "bad buffer ID: %d", buffer);
3517 
3518  if (BufferIsLocal(buffer))
3519  {
3521 
3522  Assert(LocalRefCount[-buffer - 1] > 0);
3523  LocalRefCount[-buffer - 1]--;
3524  return;
3525  }
3526 
3527  UnpinBuffer(GetBufferDescriptor(buffer - 1), true);
3528 }
ResourceOwner CurrentResourceOwner
Definition: resowner.c:144
#define ERROR
Definition: elog.h:43
#define GetBufferDescriptor(id)
static void UnpinBuffer(BufferDesc *buf, bool fixOwner)
Definition: bufmgr.c:1740
#define Assert(condition)
Definition: c.h:800
#define BufferIsLocal(buffer)
Definition: buf.h:37
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
#define elog(elevel,...)
Definition: elog.h:228
int32 * LocalRefCount
Definition: localbuf.c:45
void ResourceOwnerForgetBuffer(ResourceOwner owner, Buffer buffer)
Definition: resowner.c:956

◆ TestForOldSnapshot()

static void TestForOldSnapshot ( Snapshot  snapshot,
Relation  relation,
Page  page 
)
inlinestatic

Definition at line 277 of file bufmgr.h.

References Assert, old_snapshot_threshold, PageGetLSN, SNAPSHOT_MVCC, SNAPSHOT_TOAST, TestForOldSnapshot_impl(), and XLogRecPtrIsInvalid.

Referenced by _bt_get_endpoint(), _bt_moveright(), _bt_readnextpage(), _bt_walk_left(), _hash_first(), _hash_next(), _hash_readnext(), _hash_readprev(), blgetbitmap(), brinGetTupleForHeapBlock(), brinRevmapInitialize(), collectMatchBitmap(), collectMatchesForHeapRow(), ginFindLeafPage(), gistScanPage(), heap_fetch(), heap_get_latest_tid(), heapgetpage(), heapgettup(), heapgettup_pagemode(), scanGetCandidate(), scanPendingInsert(), and spgWalk().

278 {
279  Assert(relation != NULL);
280 
281  if (old_snapshot_threshold >= 0
282  && (snapshot) != NULL
283  && ((snapshot)->snapshot_type == SNAPSHOT_MVCC
284  || (snapshot)->snapshot_type == SNAPSHOT_TOAST)
285  && !XLogRecPtrIsInvalid((snapshot)->lsn)
286  && PageGetLSN(page) > (snapshot)->lsn)
287  TestForOldSnapshot_impl(snapshot, relation);
288 }
#define XLogRecPtrIsInvalid(r)
Definition: xlogdefs.h:29
#define Assert(condition)
Definition: c.h:800
#define PageGetLSN(page)
Definition: bufpage.h:366
int old_snapshot_threshold
Definition: snapmgr.c:78
void TestForOldSnapshot_impl(Snapshot snapshot, Relation relation)
Definition: bufmgr.c:4580

◆ TestForOldSnapshot_impl()

void TestForOldSnapshot_impl ( Snapshot  snapshot,
Relation  relation 
)

Definition at line 4580 of file bufmgr.c.

References ereport, errcode(), errmsg(), ERROR, GetOldSnapshotThresholdTimestamp(), and RelationAllowsEarlyPruning.

Referenced by TestForOldSnapshot().

4581 {
4582  if (RelationAllowsEarlyPruning(relation)
4583  && (snapshot)->whenTaken < GetOldSnapshotThresholdTimestamp())
4584  ereport(ERROR,
4585  (errcode(ERRCODE_SNAPSHOT_TOO_OLD),
4586  errmsg("snapshot too old")));
4587 }
TimestampTz GetOldSnapshotThresholdTimestamp(void)
Definition: snapmgr.c:1660
#define RelationAllowsEarlyPruning(rel)
Definition: snapmgr.h:38
int errcode(int sqlerrcode)
Definition: elog.c:691
#define ERROR
Definition: elog.h:43
#define ereport(elevel,...)
Definition: elog.h:155
int errmsg(const char *fmt,...)
Definition: elog.c:902

◆ UnlockBuffers()

void UnlockBuffers ( void  )

Definition at line 3724 of file bufmgr.c.

References BM_PIN_COUNT_WAITER, buf, LockBufHdr(), MyProcPid, PinCountWaitBuf, UnlockBufHdr, and BufferDesc::wait_backend_pid.

Referenced by AbortSubTransaction(), AbortTransaction(), AtProcExit_Buffers(), AutoVacLauncherMain(), BackgroundWriterMain(), CheckpointerMain(), and WalWriterMain().

3725 {
3727 
3728  if (buf)
3729  {
3730  uint32 buf_state;
3731 
3732  buf_state = LockBufHdr(buf);
3733 
3734  /*
3735  * Don't complain if flag bit not set; it could have been reset but we
3736  * got a cancel/die interrupt before getting the signal.
3737  */
3738  if ((buf_state & BM_PIN_COUNT_WAITER) != 0 &&
3739  buf->wait_backend_pid == MyProcPid)
3740  buf_state &= ~BM_PIN_COUNT_WAITER;
3741 
3742  UnlockBufHdr(buf, buf_state);
3743 
3744  PinCountWaitBuf = NULL;
3745  }
3746 }
int MyProcPid
Definition: globals.c:40
int wait_backend_pid
static char * buf
Definition: pg_test_fsync.c:68
unsigned int uint32
Definition: c.h:429
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4320
#define UnlockBufHdr(desc, s)
static BufferDesc * PinCountWaitBuf
Definition: bufmgr.c:157
#define BM_PIN_COUNT_WAITER
Definition: buf_internals.h:64

◆ UnlockReleaseBuffer()

void UnlockReleaseBuffer ( Buffer  buffer)

Definition at line 3536 of file bufmgr.c.

References BUFFER_LOCK_UNLOCK, LockBuffer(), and ReleaseBuffer().

Referenced by _bt_clear_incomplete_split(), _bt_restore_meta(), _hash_relbuf(), allocNewBuffer(), AlterSequence(), blbulkdelete(), blgetbitmap(), blinsert(), BloomInitMetapage(), blvacuumcleanup(), brin_doinsert(), brin_doupdate(), brin_evacuate_page(), brin_getinsertbuffer(), brin_xlog_createidx(), brin_xlog_desummarize_page(), brin_xlog_insert_update(), brin_xlog_revmap_extend(), brin_xlog_samepage_update(), brin_xlog_update(), brinbuild(), brinbuildempty(), brinGetStats(), brinRevmapDesummarizeRange(), bt_metap(), bt_page_items(), bt_page_stats(), bt_recheck_sibling_links(), btree_xlog_dedup(), btree_xlog_delete(), btree_xlog_insert(), btree_xlog_mark_page_halfdead(), btree_xlog_newroot(), btree_xlog_split(), btree_xlog_unlink_page(), btree_xlog_vacuum(), checkXLogConsistency(), collect_corrupt_items(), collect_visibility_data(), count_nondeletable_pages(), createPostingTree(), do_setval(), doPickSplit(), entryLoadMoreItems(), fill_seq_with_data(), flushCachedPage(), FreeSpaceMapPrepareTruncateRel(), fsm_search(), fsm_set_and_search(), generic_redo(), ginbuild(), ginbuildempty(), ginbulkdelete(), ginFindParents(), ginGetStats(), ginHeapTupleFastInsert(), ginInsertCleanup(), ginPlaceToPage(), ginRedoClearIncompleteSplit(), ginRedoCreatePTree(), ginRedoDeleteListPages(), ginRedoDeletePage(), ginRedoInsert(), ginRedoInsertListPage(), ginRedoSplit(), ginRedoUpdateMetapage(), ginRedoVacuumDataLeafPage(), ginRedoVacuumPage(), ginScanToDelete(), ginStepRight(), ginUpdateStats(), ginvacuumcleanup(), ginVacuumPostingTree(), ginVacuumPostingTreeLeaves(), gistbufferinginserttuples(), gistbuild(), gistbuildempty(), gistdoinsert(), gistFindCorrectParent(), gistFindPath(), gistGetMaxLevel(), gistinserttuples(), gistkillitems(), gistplacetopage(), gistProcessItup(), gistRedoClearFollowRight(), gistRedoDeleteRecord(), gistRedoPageDelete(), gistRedoPageSplitRecord(), gistRedoPageUpdateRecord(), gistScanPage(), gistvacuum_delete_empty_pages(), gistvacuumpage(), hash_xlog_add_ovfl_page(), hash_xlog_delete(), hash_xlog_init_bitmap_page(), hash_xlog_init_meta_page(), hash_xlog_insert(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_split_cleanup(), hash_xlog_split_complete(), hash_xlog_split_page(), hash_xlog_squeeze_page(), hash_xlog_update_meta_page(), hash_xlog_vacuum_one_page(), heap_delete(), heap_finish_speculative(), heap_force_common(), heap_get_latest_tid(), heap_inplace_update(), heap_insert(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_update(), heap_xlog_clean(), heap_xlog_confirm(), heap_xlog_delete(), heap_xlog_freeze_page(), heap_xlog_inplace(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_update(), heap_xlog_visible(), heapam_scan_analyze_next_tuple(), initBloomState(), lazy_scan_heap(), lazy_vacuum_heap(), log_newpage_range(), moveLeafs(), nextval_internal(), palloc_btree_page(), pg_sequence_last_value(), pg_visibility(), pgstat_gist_page(), pgstat_heap(), pgstatginindex_internal(), pgstathashindex(), RelationAddExtraBlocks(), RelationGetBufferForTuple(), ResetSequence(), revmap_physical_extend(), scanGetCandidate(), scanPendingInsert(), scanPostingTree(), seq_redo(), shiftList(), spgAddNodeAction(), spgbuild(), spgdoinsert(), spgGetCache(), SpGistGetBuffer(), SpGistUpdateMetaPage(), spgMatchNodeAction(), spgprocesspending(), spgRedoAddLeaf(), spgRedoAddNode(), spgRedoMoveLeafs(), spgRedoPickSplit(), spgRedoSplitTuple(), spgRedoVacuumLeaf(), spgRedoVacuumRedirect(), spgRedoVacuumRoot(), spgSplitNodeAction(), spgvacuumpage(), spgWalk(), statapprox_heap(), verify_heapam(), visibilitymap_prepare_truncate(), writeListPage(), xlog_redo(), and XLogRecordPageWithFreeSpace().

3537 {
3538  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3539  ReleaseBuffer(buffer);
3540 }
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3513
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3752

Variable Documentation

◆ backend_flush_after

int backend_flush_after

Definition at line 150 of file bufmgr.c.

Referenced by InitBufferPool().

◆ bgwriter_flush_after

int bgwriter_flush_after

Definition at line 149 of file bufmgr.c.

Referenced by BackgroundWriterMain().

◆ bgwriter_lru_maxpages

int bgwriter_lru_maxpages

Definition at line 125 of file bufmgr.c.

Referenced by BgBufferSync().

◆ bgwriter_lru_multiplier

double bgwriter_lru_multiplier

Definition at line 126 of file bufmgr.c.

Referenced by BgBufferSync().

◆ BufferBlocks

PGDLLIMPORT char* BufferBlocks

Definition at line 21 of file buf_init.c.

Referenced by InitBufferPool().

◆ checkpoint_flush_after

int checkpoint_flush_after

Definition at line 148 of file bufmgr.c.

Referenced by BufferSync().

◆ effective_io_concurrency

int effective_io_concurrency

Definition at line 135 of file bufmgr.c.

Referenced by get_tablespace_io_concurrency(), and tablespace_reloptions().

◆ LocalBufferBlockPointers

PGDLLIMPORT Block* LocalBufferBlockPointers

Definition at line 44 of file localbuf.c.

Referenced by InitLocalBuffers().

◆ LocalRefCount

◆ maintenance_io_concurrency

int maintenance_io_concurrency

◆ NBuffers

◆ NLocBuffer

◆ track_io_timing

bool track_io_timing

Definition at line 127 of file bufmgr.c.

Referenced by FlushBuffer(), ReadBuffer_common(), and show_buffer_usage().

◆ zero_damaged_pages

bool zero_damaged_pages

Definition at line 124 of file bufmgr.c.

Referenced by mdread(), and ReadBuffer_common().