PostgreSQL Source Code  git master
bufmgr.h File Reference
#include "storage/block.h"
#include "storage/buf.h"
#include "storage/bufpage.h"
#include "storage/relfilenode.h"
#include "utils/relcache.h"
#include "utils/snapmgr.h"
Include dependency graph for bufmgr.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Data Structures

struct  PrefetchBufferResult
 

Macros

#define MAX_IO_CONCURRENCY   1000
 
#define P_NEW   InvalidBlockNumber /* grow the file to get a new page */
 
#define BUFFER_LOCK_UNLOCK   0
 
#define BUFFER_LOCK_SHARE   1
 
#define BUFFER_LOCK_EXCLUSIVE   2
 
#define BufferIsValid(bufnum)
 
#define BufferGetBlock(buffer)
 
#define BufferGetPageSize(buffer)
 
#define BufferGetPage(buffer)   ((Page)BufferGetBlock(buffer))
 
#define RelationGetNumberOfBlocks(reln)   RelationGetNumberOfBlocksInFork(reln, MAIN_FORKNUM)
 

Typedefs

typedef void * Block
 
typedef enum BufferAccessStrategyType BufferAccessStrategyType
 
typedef struct PrefetchBufferResult PrefetchBufferResult
 

Enumerations

enum  BufferAccessStrategyType { BAS_NORMAL, BAS_BULKREAD, BAS_BULKWRITE, BAS_VACUUM }
 
enum  ReadBufferMode {
  RBM_NORMAL, RBM_ZERO_AND_LOCK, RBM_ZERO_AND_CLEANUP_LOCK, RBM_ZERO_ON_ERROR,
  RBM_NORMAL_NO_LOG
}
 

Functions

PrefetchBufferResult PrefetchSharedBuffer (struct SMgrRelationData *smgr_reln, ForkNumber forkNum, BlockNumber blockNum)
 
PrefetchBufferResult PrefetchBuffer (Relation reln, ForkNumber forkNum, BlockNumber blockNum)
 
Buffer ReadBuffer (Relation reln, BlockNumber blockNum)
 
Buffer ReadBufferExtended (Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
 
Buffer ReadBufferWithoutRelcache (RelFileNode rnode, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
 
void ReleaseBuffer (Buffer buffer)
 
void UnlockReleaseBuffer (Buffer buffer)
 
void MarkBufferDirty (Buffer buffer)
 
void IncrBufferRefCount (Buffer buffer)
 
Buffer ReleaseAndReadBuffer (Buffer buffer, Relation relation, BlockNumber blockNum)
 
void InitBufferPool (void)
 
void InitBufferPoolAccess (void)
 
void InitBufferPoolBackend (void)
 
void AtEOXact_Buffers (bool isCommit)
 
void PrintBufferLeakWarning (Buffer buffer)
 
void CheckPointBuffers (int flags)
 
BlockNumber BufferGetBlockNumber (Buffer buffer)
 
BlockNumber RelationGetNumberOfBlocksInFork (Relation relation, ForkNumber forkNum)
 
void FlushOneBuffer (Buffer buffer)
 
void FlushRelationBuffers (Relation rel)
 
void FlushRelationsAllBuffers (struct SMgrRelationData **smgrs, int nrels)
 
void FlushDatabaseBuffers (Oid dbid)
 
void DropRelFileNodeBuffers (RelFileNodeBackend rnode, ForkNumber *forkNum, int nforks, BlockNumber *firstDelBlock)
 
void DropRelFileNodesAllBuffers (RelFileNodeBackend *rnodes, int nnodes)
 
void DropDatabaseBuffers (Oid dbid)
 
bool BufferIsPermanent (Buffer buffer)
 
XLogRecPtr BufferGetLSNAtomic (Buffer buffer)
 
Size BufferShmemSize (void)
 
void BufferGetTag (Buffer buffer, RelFileNode *rnode, ForkNumber *forknum, BlockNumber *blknum)
 
void MarkBufferDirtyHint (Buffer buffer, bool buffer_std)
 
void UnlockBuffers (void)
 
void LockBuffer (Buffer buffer, int mode)
 
bool ConditionalLockBuffer (Buffer buffer)
 
void LockBufferForCleanup (Buffer buffer)
 
bool ConditionalLockBufferForCleanup (Buffer buffer)
 
bool IsBufferCleanupOK (Buffer buffer)
 
bool HoldingBufferPinThatDelaysRecovery (void)
 
void AbortBufferIO (void)
 
void BufmgrCommit (void)
 
bool BgBufferSync (struct WritebackContext *wb_context)
 
void AtProcExit_LocalBuffers (void)
 
void TestForOldSnapshot_impl (Snapshot snapshot, Relation relation)
 
BufferAccessStrategy GetAccessStrategy (BufferAccessStrategyType btype)
 
void FreeAccessStrategy (BufferAccessStrategy strategy)
 
static void TestForOldSnapshot (Snapshot snapshot, Relation relation, Page page)
 

Variables

PGDLLIMPORT int NBuffers
 
bool zero_damaged_pages
 
int bgwriter_lru_maxpages
 
double bgwriter_lru_multiplier
 
bool track_io_timing
 
int effective_io_concurrency
 
int maintenance_io_concurrency
 
int checkpoint_flush_after
 
int backend_flush_after
 
int bgwriter_flush_after
 
PGDLLIMPORT char * BufferBlocks
 
PGDLLIMPORT int NLocBuffer
 
PGDLLIMPORT BlockLocalBufferBlockPointers
 
PGDLLIMPORT int32LocalRefCount
 

Macro Definition Documentation

◆ BUFFER_LOCK_EXCLUSIVE

◆ BUFFER_LOCK_SHARE

◆ BUFFER_LOCK_UNLOCK

#define BUFFER_LOCK_UNLOCK   0

Definition at line 96 of file bufmgr.h.

Referenced by _bt_unlockbuf(), _bt_upgradelockbufcleanup(), _hash_addovflpage(), _hash_doinsert(), _hash_expandtable(), _hash_finish_split(), _hash_first(), _hash_freeovflpage(), _hash_getcachedmetap(), _hash_init(), _hash_kill_items(), _hash_readnext(), _hash_readpage(), _hash_readprev(), _hash_splitbucket(), _hash_squeezebucket(), _hash_vacuum_one_page(), blinsert(), BloomNewBuffer(), brin_doinsert(), brin_doupdate(), brin_evacuate_page(), brin_getinsertbuffer(), brin_page_cleanup(), bringetbitmap(), brinGetTupleForHeapBlock(), brininsert(), brinRevmapDesummarizeRange(), brinRevmapInitialize(), brinsummarize(), ConditionalLockBufferForCleanup(), fsm_readbuf(), fsm_search_avail(), fsm_vacuum_page(), get_raw_page_internal(), GetVisibilityMapPins(), hashbucketcleanup(), hashbulkdelete(), heap_abort_speculative(), heap_compute_xid_horizon_for_tuples(), heap_delete(), heap_fetch(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_page_prune_opt(), heap_update(), heap_xlog_visible(), heapam_index_build_range_scan(), heapam_index_fetch_tuple(), heapam_index_validate_scan(), heapam_relation_copy_for_cluster(), heapam_scan_bitmap_next_block(), heapam_scan_sample_next_tuple(), heapam_tuple_satisfies_snapshot(), heapgetpage(), heapgettup(), lazy_scan_heap(), LockBuffer(), LockBufferForCleanup(), pgrowlocks(), pgstat_heap(), pgstatindex_impl(), RelationGetBufferForTuple(), revmap_physical_extend(), SpGistNewBuffer(), summarize_range(), UnlockReleaseBuffer(), visibilitymap_clear(), visibilitymap_set(), vm_readbuf(), and XLogReadBufferExtended().

◆ BufferGetBlock

#define BufferGetBlock (   buffer)
Value:
( \
AssertMacro(BufferIsValid(buffer)), \
BufferIsLocal(buffer) ? \
LocalBufferBlockPointers[-(buffer) - 1] \
: \
(Block) (BufferBlocks + ((Size) ((buffer) - 1)) * BLCKSZ) \
)
PGDLLIMPORT char * BufferBlocks
Definition: buf_init.c:21
size_t Size
Definition: c.h:473
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
void * Block
Definition: bufmgr.h:24

Definition at line 136 of file bufmgr.h.

Referenced by XLogSaveBufferForHint().

◆ BufferGetPage

#define BufferGetPage (   buffer)    ((Page)BufferGetBlock(buffer))

Definition at line 169 of file bufmgr.h.

Referenced by _bt_binsrch(), _bt_binsrch_insert(), _bt_check_unique(), _bt_checkpage(), _bt_clear_incomplete_split(), _bt_conditionallockbuf(), _bt_dedup_one_page(), _bt_delitems_delete(), _bt_delitems_vacuum(), _bt_endpoint(), _bt_findinsertloc(), _bt_finish_split(), _bt_get_endpoint(), _bt_getbuf(), _bt_getmeta(), _bt_getroot(), _bt_getstackbuf(), _bt_gettrueroot(), _bt_insert_parent(), _bt_insertonpg(), _bt_killitems(), _bt_leftsib_splitflag(), _bt_lock_subtree_parent(), _bt_lockbuf(), _bt_mark_page_halfdead(), _bt_moveright(), _bt_newroot(), _bt_pagedel(), _bt_readnextpage(), _bt_readpage(), _bt_restore_meta(), _bt_rightsib_halfdeadflag(), _bt_search(), _bt_search_insert(), _bt_split(), _bt_stepright(), _bt_unlink_halfdead_page(), _bt_unlockbuf(), _bt_update_meta_cleanup_info(), _bt_upgradelockbufcleanup(), _bt_vacuum_needs_cleanup(), _bt_vacuum_one_page(), _bt_walk_left(), _hash_addovflpage(), _hash_checkpage(), _hash_doinsert(), _hash_expandtable(), _hash_finish_split(), _hash_first(), _hash_freeovflpage(), _hash_get_newblock_from_oldbucket(), _hash_get_oldblock_from_newbucket(), _hash_getbucketbuf_from_hashkey(), _hash_getcachedmetap(), _hash_getinitbuf(), _hash_getnewbuf(), _hash_init(), _hash_init_metabuffer(), _hash_initbitmapbuffer(), _hash_initbuf(), _hash_kill_items(), _hash_next(), _hash_pgaddmultitup(), _hash_pgaddtup(), _hash_readnext(), _hash_readpage(), _hash_readprev(), _hash_splitbucket(), _hash_squeezebucket(), _hash_vacuum_one_page(), allocNewBuffer(), blgetbitmap(), blinsert(), BloomNewBuffer(), blvacuumcleanup(), brin_can_do_samepage_update(), brin_doinsert(), brin_doupdate(), brin_evacuate_page(), brin_getinsertbuffer(), brin_initialize_empty_new_buffer(), brin_page_cleanup(), brin_start_evacuating_page(), brin_xlog_createidx(), brin_xlog_desummarize_page(), brin_xlog_insert_update(), brin_xlog_revmap_extend(), brin_xlog_samepage_update(), brin_xlog_update(), brinbuild(), brinbuildempty(), brinGetStats(), brinGetTupleForHeapBlock(), brininsert(), brinRevmapDesummarizeRange(), brinRevmapInitialize(), brinSetHeapBlockItemptr(), bt_metap(), bt_page_items(), bt_recheck_sibling_links(), bt_rootdescend(), btree_xlog_dedup(), btree_xlog_delete(), btree_xlog_insert(), btree_xlog_mark_page_halfdead(), btree_xlog_newroot(), btree_xlog_split(), btree_xlog_unlink_page(), btree_xlog_vacuum(), btvacuumpage(), BufferGetLSNAtomic(), checkXLogConsistency(), collect_corrupt_items(), collect_visibility_data(), collectMatchBitmap(), collectMatchesForHeapRow(), count_nondeletable_pages(), createPostingTree(), dataBeginPlaceToPage(), dataBeginPlaceToPageInternal(), dataBeginPlaceToPageLeaf(), dataExecPlaceToPage(), dataExecPlaceToPageInternal(), dataLocateItem(), dataPlaceToPageLeafRecompress(), dataPrepareDownlink(), dataSplitPageInternal(), do_setval(), doPickSplit(), entryExecPlaceToPage(), entryIsEnoughSpace(), entryLoadMoreItems(), entryLocateEntry(), entryLocateLeafEntry(), entryPrepareDownlink(), entrySplitPage(), fill_seq_with_data(), FreeSpaceMapPrepareTruncateRel(), fsm_readbuf(), fsm_search(), fsm_search_avail(), fsm_set_and_search(), fsm_vacuum_page(), generic_redo(), GenericXLogFinish(), GenericXLogRegisterBuffer(), get_raw_page_internal(), GetBTPageStatistics(), GetRecordedFreeSpace(), GetVisibilityMapPins(), ginbulkdelete(), ginDeletePage(), ginEntryInsert(), ginFindLeafPage(), ginFindParents(), ginFinishSplit(), ginGetStats(), ginHeapTupleFastInsert(), GinInitBuffer(), GinInitMetabuffer(), ginInsertCleanup(), ginInsertValue(), GinNewBuffer(), ginPlaceToPage(), ginRedoClearIncompleteSplit(), ginRedoCreatePTree(), ginRedoDeleteListPages(), ginRedoDeletePage(), ginRedoInsert(), ginRedoInsertData(), ginRedoInsertEntry(), ginRedoInsertListPage(), ginRedoUpdateMetapage(), ginRedoVacuumDataLeafPage(), ginScanToDelete(), ginStepRight(), ginTraverseLock(), ginUpdateStats(), ginvacuumcleanup(), ginVacuumEntryPage(), ginVacuumPostingTreeLeaf(), ginVacuumPostingTreeLeaves(), gistBufferingFindCorrectParent(), gistbufferinginserttuples(), gistbuild(), gistcheckpage(), gistdeletepage(), gistdoinsert(), gistFindCorrectParent(), gistFindPath(), gistfixsplit(), gistformdownlink(), gistGetMaxLevel(), GISTInitBuffer(), gistkillitems(), gistMemorizeAllDownlinks(), gistNewBuffer(), gistplacetopage(), gistProcessItup(), gistRedoClearFollowRight(), gistRedoDeleteRecord(), gistRedoPageDelete(), gistRedoPageSplitRecord(), gistRedoPageUpdateRecord(), gistScanPage(), gistvacuum_delete_empty_pages(), gistvacuumpage(), hash_bitmap_info(), hash_xlog_add_ovfl_page(), hash_xlog_delete(), hash_xlog_init_bitmap_page(), hash_xlog_init_meta_page(), hash_xlog_insert(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_split_cleanup(), hash_xlog_split_complete(), hash_xlog_squeeze_page(), hash_xlog_update_meta_page(), hash_xlog_vacuum_one_page(), hashbucketcleanup(), hashbulkdelete(), heap_abort_speculative(), heap_compute_xid_horizon_for_tuples(), heap_delete(), heap_fetch(), heap_finish_speculative(), heap_force_common(), heap_get_latest_tid(), heap_hot_search_buffer(), heap_inplace_update(), heap_insert(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_page_is_all_visible(), heap_page_prune(), heap_page_prune_execute(), heap_page_prune_opt(), heap_prune_chain(), heap_update(), heap_xlog_clean(), heap_xlog_confirm(), heap_xlog_delete(), heap_xlog_freeze_page(), heap_xlog_inplace(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_update(), heap_xlog_visible(), heapam_index_build_range_scan(), heapam_index_validate_scan(), heapam_scan_analyze_next_tuple(), heapam_scan_bitmap_next_block(), heapam_scan_bitmap_next_tuple(), heapam_scan_sample_next_tuple(), heapgetpage(), heapgettup(), heapgettup_pagemode(), index_compute_xid_horizon_for_tuples(), initBloomState(), lazy_check_needs_freeze(), lazy_scan_heap(), lazy_vacuum_heap(), lazy_vacuum_page(), log_heap_update(), log_newpage_buffer(), log_newpage_range(), log_split_page(), MarkBufferDirtyHint(), moveLeafs(), moveRightIfItNeeded(), nextval_internal(), palloc_btree_page(), pg_visibility(), pgstat_btree_page(), pgstat_gist_page(), pgstat_hash_page(), pgstat_heap(), pgstatginindex_internal(), pgstathashindex(), pgstatindex_impl(), read_seq_tuple(), RelationAddExtraBlocks(), RelationGetBufferForTuple(), RelationPutHeapTuple(), revmap_physical_extend(), scanGetCandidate(), scanPendingInsert(), scanPostingTree(), seq_redo(), shiftList(), spgAddNodeAction(), spgbuild(), spgdoinsert(), spgGetCache(), SpGistGetBuffer(), SpGistInitBuffer(), SpGistNewBuffer(), SpGistSetLastUsedPage(), SpGistUpdateMetaPage(), spgprocesspending(), spgRedoAddLeaf(), spgRedoAddNode(), spgRedoMoveLeafs(), spgRedoPickSplit(), spgRedoSplitTuple(), spgRedoVacuumLeaf(), spgRedoVacuumRedirect(), spgRedoVacuumRoot(), spgSplitNodeAction(), spgvacuumpage(), spgWalk(), startScanEntry(), statapprox_heap(), terminate_brin_buildstate(), vacuumLeafPage(), vacuumLeafRoot(), vacuumRedirectAndPlaceholder(), visibilitymap_clear(), visibilitymap_count(), visibilitymap_get_status(), visibilitymap_prepare_truncate(), visibilitymap_set(), vm_readbuf(), writeListPage(), XLogCheckBufferNeedsBackup(), XLogReadBufferExtended(), XLogReadBufferForRedoExtended(), XLogRecordPageWithFreeSpace(), XLogRegisterBuffer(), XLogSaveBufferForHint(), and xlogVacuumPage().

◆ BufferGetPageSize

◆ BufferIsValid

#define BufferIsValid (   bufnum)
Value:
( \
AssertMacro((bufnum) <= NBuffers && (bufnum) >= -NLocBuffer), \
(bufnum) != InvalidBuffer \
)
#define InvalidBuffer
Definition: buf.h:25
PGDLLIMPORT int NLocBuffer
Definition: localbuf.c:41
PGDLLIMPORT int NBuffers
Definition: globals.c:132

Definition at line 123 of file bufmgr.h.

Referenced by _bt_clear_incomplete_split(), _bt_endpoint(), _bt_first(), _bt_get_endpoint(), _bt_insertonpg(), _bt_readpage(), _bt_relandgetbuf(), _bt_search(), _bt_unlink_halfdead_page(), _hash_addovflpage(), _hash_dropscanbuf(), _hash_freeovflpage(), _hash_getbucketbuf_from_hashkey(), _hash_getcachedmetap(), _hash_readnext(), _hash_readpage(), _hash_readprev(), autoprewarm_database_main(), brin_doinsert(), brin_doupdate(), brin_getinsertbuffer(), brin_xlog_createidx(), brin_xlog_desummarize_page(), brin_xlog_insert_update(), brin_xlog_revmap_extend(), brin_xlog_samepage_update(), brin_xlog_update(), brinGetTupleForHeapBlock(), brininsert(), brinsummarize(), bt_recheck_sibling_links(), bt_rootdescend(), btree_xlog_dedup(), btree_xlog_delete(), btree_xlog_insert(), btree_xlog_mark_page_halfdead(), btree_xlog_split(), btree_xlog_unlink_page(), btree_xlog_vacuum(), BufferGetLSNAtomic(), BufferIsPermanent(), checkXLogConsistency(), ConditionalLockBufferForCleanup(), doPickSplit(), entryGetItem(), entryLoadMoreItems(), ExecStoreBufferHeapTuple(), ExecStorePinnedBufferHeapTuple(), FreeSpaceMapPrepareTruncateRel(), fsm_search(), fsm_vacuum_page(), generic_redo(), GetPrivateRefCount(), GetPrivateRefCountEntry(), GetRecordedFreeSpace(), GetVisibilityMapPins(), ginPlaceToPage(), ginRedoClearIncompleteSplit(), ginRedoDeletePage(), ginRedoInsert(), ginRedoUpdateMetapage(), ginRedoVacuumDataLeafPage(), ginScanToDelete(), gistinserttuples(), gistkillitems(), gistplacetopage(), gistRedoClearFollowRight(), gistRedoDeleteRecord(), gistRedoPageDelete(), gistRedoPageUpdateRecord(), gistXLogSplit(), gistXLogUpdate(), hash_xlog_add_ovfl_page(), hash_xlog_delete(), hash_xlog_init_bitmap_page(), hash_xlog_init_meta_page(), hash_xlog_insert(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_split_cleanup(), hash_xlog_split_complete(), hash_xlog_squeeze_page(), hash_xlog_update_meta_page(), hash_xlog_vacuum_one_page(), heap_compute_xid_horizon_for_tuples(), heap_endscan(), heap_lock_tuple(), heap_rescan(), heap_update(), heap_xlog_clean(), heap_xlog_confirm(), heap_xlog_delete(), heap_xlog_freeze_page(), heap_xlog_inplace(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_update(), heap_xlog_visible(), heapam_index_fetch_reset(), heapam_scan_sample_next_block(), heapam_tuple_lock(), heapam_tuple_satisfies_snapshot(), heapgetpage(), heapgettup(), heapgettup_pagemode(), IsBufferCleanupOK(), lazy_scan_heap(), lazy_vacuum_heap(), lazy_vacuum_page(), log_heap_visible(), MarkBufferDirty(), MarkBufferDirtyHint(), PrintBufferLeakWarning(), ReleaseAndReadBuffer(), ReleaseBuffer(), spgRedoAddLeaf(), spgRedoAddNode(), spgRedoMoveLeafs(), spgRedoPickSplit(), spgRedoSplitTuple(), spgRedoVacuumLeaf(), spgRedoVacuumRedirect(), spgRedoVacuumRoot(), statapprox_heap(), tts_buffer_heap_clear(), tts_buffer_heap_copyslot(), tts_buffer_heap_materialize(), tts_buffer_heap_store_tuple(), visibilitymap_clear(), visibilitymap_count(), visibilitymap_get_status(), visibilitymap_pin(), visibilitymap_pin_ok(), visibilitymap_prepare_truncate(), visibilitymap_set(), and XLogReadBufferForRedoExtended().

◆ MAX_IO_CONCURRENCY

#define MAX_IO_CONCURRENCY   1000

Definition at line 88 of file bufmgr.h.

◆ P_NEW

◆ RelationGetNumberOfBlocks

Typedef Documentation

◆ Block

typedef void* Block

Definition at line 24 of file bufmgr.h.

◆ BufferAccessStrategyType

◆ PrefetchBufferResult

Enumeration Type Documentation

◆ BufferAccessStrategyType

Enumerator
BAS_NORMAL 
BAS_BULKREAD 
BAS_BULKWRITE 
BAS_VACUUM 

Definition at line 27 of file bufmgr.h.

28 {
29  BAS_NORMAL, /* Normal random access */
30  BAS_BULKREAD, /* Large read-only scan (hint bit updates are
31  * ok) */
32  BAS_BULKWRITE, /* Large multi-block write (e.g. COPY IN) */
33  BAS_VACUUM /* VACUUM */
BufferAccessStrategyType
Definition: bufmgr.h:27

◆ ReadBufferMode

Enumerator
RBM_NORMAL 
RBM_ZERO_AND_LOCK 
RBM_ZERO_AND_CLEANUP_LOCK 
RBM_ZERO_ON_ERROR 
RBM_NORMAL_NO_LOG 

Definition at line 37 of file bufmgr.h.

38 {
39  RBM_NORMAL, /* Normal read */
40  RBM_ZERO_AND_LOCK, /* Don't read from disk, caller will
41  * initialize. Also locks the page. */
42  RBM_ZERO_AND_CLEANUP_LOCK, /* Like RBM_ZERO_AND_LOCK, but locks the page
43  * in "cleanup" mode */
44  RBM_ZERO_ON_ERROR, /* Read, but return an all-zeros page on error */
45  RBM_NORMAL_NO_LOG /* Don't log page as invalid during WAL
46  * replay; otherwise same as RBM_NORMAL */
ReadBufferMode
Definition: bufmgr.h:37

Function Documentation

◆ AbortBufferIO()

void AbortBufferIO ( void  )

Definition at line 4200 of file bufmgr.c.

References Assert, buftag::blockNum, BM_DIRTY, BM_IO_ERROR, BM_IO_IN_PROGRESS, BM_VALID, buf, BufferDescriptorGetIOLock, ereport, errcode(), errdetail(), errmsg(), buftag::forkNum, InProgressBuf, IsForInput, LockBufHdr(), LW_EXCLUSIVE, LWLockAcquire(), pfree(), relpathperm, buftag::rnode, BufferDesc::tag, TerminateBufferIO(), UnlockBufHdr, and WARNING.

Referenced by AbortSubTransaction(), AbortTransaction(), AtProcExit_Buffers(), AutoVacLauncherMain(), BackgroundWriterMain(), CheckpointerMain(), and WalWriterMain().

4201 {
4203 
4204  if (buf)
4205  {
4206  uint32 buf_state;
4207 
4208  /*
4209  * Since LWLockReleaseAll has already been called, we're not holding
4210  * the buffer's io_in_progress_lock. We have to re-acquire it so that
4211  * we can use TerminateBufferIO. Anyone who's executing WaitIO on the
4212  * buffer will be in a busy spin until we succeed in doing this.
4213  */
4215 
4216  buf_state = LockBufHdr(buf);
4217  Assert(buf_state & BM_IO_IN_PROGRESS);
4218  if (IsForInput)
4219  {
4220  Assert(!(buf_state & BM_DIRTY));
4221 
4222  /* We'd better not think buffer is valid yet */
4223  Assert(!(buf_state & BM_VALID));
4224  UnlockBufHdr(buf, buf_state);
4225  }
4226  else
4227  {
4228  Assert(buf_state & BM_DIRTY);
4229  UnlockBufHdr(buf, buf_state);
4230  /* Issue notice if this is not the first failure... */
4231  if (buf_state & BM_IO_ERROR)
4232  {
4233  /* Buffer is pinned, so we can read tag without spinlock */
4234  char *path;
4235 
4236  path = relpathperm(buf->tag.rnode, buf->tag.forkNum);
4237  ereport(WARNING,
4238  (errcode(ERRCODE_IO_ERROR),
4239  errmsg("could not write block %u of %s",
4240  buf->tag.blockNum, path),
4241  errdetail("Multiple failures --- write error might be permanent.")));
4242  pfree(path);
4243  }
4244  }
4245  TerminateBufferIO(buf, false, BM_IO_ERROR);
4246  }
4247 }
#define relpathperm(rnode, forknum)
Definition: relpath.h:83
ForkNumber forkNum
Definition: buf_internals.h:93
int errcode(int sqlerrcode)
Definition: elog.c:610
#define BM_DIRTY
Definition: buf_internals.h:58
#define BufferDescriptorGetIOLock(bdesc)
static BufferDesc * InProgressBuf
Definition: bufmgr.c:153
void pfree(void *pointer)
Definition: mcxt.c:1057
static char * buf
Definition: pg_test_fsync.c:68
int errdetail(const char *fmt,...)
Definition: elog.c:957
unsigned int uint32
Definition: c.h:374
static bool IsForInput
Definition: bufmgr.c:154
#define WARNING
Definition: elog.h:40
static void TerminateBufferIO(BufferDesc *buf, bool clear_dirty, uint32 set_flag_bits)
Definition: bufmgr.c:4168
#define BM_VALID
Definition: buf_internals.h:59
#define ereport(elevel,...)
Definition: elog.h:144
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4318
#define Assert(condition)
Definition: c.h:745
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1208
BlockNumber blockNum
Definition: buf_internals.h:94
RelFileNode rnode
Definition: buf_internals.h:92
#define BM_IO_ERROR
Definition: buf_internals.h:62
BufferTag tag
int errmsg(const char *fmt,...)
Definition: elog.c:824
#define UnlockBufHdr(desc, s)
#define BM_IO_IN_PROGRESS
Definition: buf_internals.h:61

◆ AtEOXact_Buffers()

void AtEOXact_Buffers ( bool  isCommit)

Definition at line 2478 of file bufmgr.c.

References Assert, AtEOXact_LocalBuffers(), CheckForBufferLeaks(), and PrivateRefCountOverflowed.

Referenced by AbortTransaction(), AutoVacLauncherMain(), BackgroundWriterMain(), CheckpointerMain(), CommitTransaction(), PrepareTransaction(), and WalWriterMain().

2479 {
2481 
2482  AtEOXact_LocalBuffers(isCommit);
2483 
2485 }
static int32 PrivateRefCountOverflowed
Definition: bufmgr.c:191
#define Assert(condition)
Definition: c.h:745
static void CheckForBufferLeaks(void)
Definition: bufmgr.c:2553
void AtEOXact_LocalBuffers(bool isCommit)
Definition: localbuf.c:578

◆ AtProcExit_LocalBuffers()

void AtProcExit_LocalBuffers ( void  )

Definition at line 589 of file localbuf.c.

References CheckForLocalBufferLeaks().

Referenced by AtProcExit_Buffers().

590 {
591  /*
592  * We shouldn't be holding any remaining pins; if we are, and assertions
593  * aren't enabled, we'll fail later in DropRelFileNodeBuffers while trying
594  * to drop the temp rels.
595  */
597 }
static void CheckForLocalBufferLeaks(void)
Definition: localbuf.c:549

◆ BgBufferSync()

bool BgBufferSync ( struct WritebackContext wb_context)

Definition at line 2108 of file bufmgr.c.

References Assert, bgwriter_lru_maxpages, bgwriter_lru_multiplier, BgWriterDelay, BgWriterStats, BUF_REUSABLE, BUF_WRITTEN, CurrentResourceOwner, DEBUG1, DEBUG2, elog, PgStat_MsgBgWriter::m_buf_alloc, PgStat_MsgBgWriter::m_buf_written_clean, PgStat_MsgBgWriter::m_maxwritten_clean, NBuffers, ResourceOwnerEnlargeBuffers(), StrategySyncStart(), and SyncOneBuffer().

Referenced by BackgroundWriterMain().

2109 {
2110  /* info obtained from freelist.c */
2111  int strategy_buf_id;
2112  uint32 strategy_passes;
2113  uint32 recent_alloc;
2114 
2115  /*
2116  * Information saved between calls so we can determine the strategy
2117  * point's advance rate and avoid scanning already-cleaned buffers.
2118  */
2119  static bool saved_info_valid = false;
2120  static int prev_strategy_buf_id;
2121  static uint32 prev_strategy_passes;
2122  static int next_to_clean;
2123  static uint32 next_passes;
2124 
2125  /* Moving averages of allocation rate and clean-buffer density */
2126  static float smoothed_alloc = 0;
2127  static float smoothed_density = 10.0;
2128 
2129  /* Potentially these could be tunables, but for now, not */
2130  float smoothing_samples = 16;
2131  float scan_whole_pool_milliseconds = 120000.0;
2132 
2133  /* Used to compute how far we scan ahead */
2134  long strategy_delta;
2135  int bufs_to_lap;
2136  int bufs_ahead;
2137  float scans_per_alloc;
2138  int reusable_buffers_est;
2139  int upcoming_alloc_est;
2140  int min_scan_buffers;
2141 
2142  /* Variables for the scanning loop proper */
2143  int num_to_scan;
2144  int num_written;
2145  int reusable_buffers;
2146 
2147  /* Variables for final smoothed_density update */
2148  long new_strategy_delta;
2149  uint32 new_recent_alloc;
2150 
2151  /*
2152  * Find out where the freelist clock sweep currently is, and how many
2153  * buffer allocations have happened since our last call.
2154  */
2155  strategy_buf_id = StrategySyncStart(&strategy_passes, &recent_alloc);
2156 
2157  /* Report buffer alloc counts to pgstat */
2158  BgWriterStats.m_buf_alloc += recent_alloc;
2159 
2160  /*
2161  * If we're not running the LRU scan, just stop after doing the stats
2162  * stuff. We mark the saved state invalid so that we can recover sanely
2163  * if LRU scan is turned back on later.
2164  */
2165  if (bgwriter_lru_maxpages <= 0)
2166  {
2167  saved_info_valid = false;
2168  return true;
2169  }
2170 
2171  /*
2172  * Compute strategy_delta = how many buffers have been scanned by the
2173  * clock sweep since last time. If first time through, assume none. Then
2174  * see if we are still ahead of the clock sweep, and if so, how many
2175  * buffers we could scan before we'd catch up with it and "lap" it. Note:
2176  * weird-looking coding of xxx_passes comparisons are to avoid bogus
2177  * behavior when the passes counts wrap around.
2178  */
2179  if (saved_info_valid)
2180  {
2181  int32 passes_delta = strategy_passes - prev_strategy_passes;
2182 
2183  strategy_delta = strategy_buf_id - prev_strategy_buf_id;
2184  strategy_delta += (long) passes_delta * NBuffers;
2185 
2186  Assert(strategy_delta >= 0);
2187 
2188  if ((int32) (next_passes - strategy_passes) > 0)
2189  {
2190  /* we're one pass ahead of the strategy point */
2191  bufs_to_lap = strategy_buf_id - next_to_clean;
2192 #ifdef BGW_DEBUG
2193  elog(DEBUG2, "bgwriter ahead: bgw %u-%u strategy %u-%u delta=%ld lap=%d",
2194  next_passes, next_to_clean,
2195  strategy_passes, strategy_buf_id,
2196  strategy_delta, bufs_to_lap);
2197 #endif
2198  }
2199  else if (next_passes == strategy_passes &&
2200  next_to_clean >= strategy_buf_id)
2201  {
2202  /* on same pass, but ahead or at least not behind */
2203  bufs_to_lap = NBuffers - (next_to_clean - strategy_buf_id);
2204 #ifdef BGW_DEBUG
2205  elog(DEBUG2, "bgwriter ahead: bgw %u-%u strategy %u-%u delta=%ld lap=%d",
2206  next_passes, next_to_clean,
2207  strategy_passes, strategy_buf_id,
2208  strategy_delta, bufs_to_lap);
2209 #endif
2210  }
2211  else
2212  {
2213  /*
2214  * We're behind, so skip forward to the strategy point and start
2215  * cleaning from there.
2216  */
2217 #ifdef BGW_DEBUG
2218  elog(DEBUG2, "bgwriter behind: bgw %u-%u strategy %u-%u delta=%ld",
2219  next_passes, next_to_clean,
2220  strategy_passes, strategy_buf_id,
2221  strategy_delta);
2222 #endif
2223  next_to_clean = strategy_buf_id;
2224  next_passes = strategy_passes;
2225  bufs_to_lap = NBuffers;
2226  }
2227  }
2228  else
2229  {
2230  /*
2231  * Initializing at startup or after LRU scanning had been off. Always
2232  * start at the strategy point.
2233  */
2234 #ifdef BGW_DEBUG
2235  elog(DEBUG2, "bgwriter initializing: strategy %u-%u",
2236  strategy_passes, strategy_buf_id);
2237 #endif
2238  strategy_delta = 0;
2239  next_to_clean = strategy_buf_id;
2240  next_passes = strategy_passes;
2241  bufs_to_lap = NBuffers;
2242  }
2243 
2244  /* Update saved info for next time */
2245  prev_strategy_buf_id = strategy_buf_id;
2246  prev_strategy_passes = strategy_passes;
2247  saved_info_valid = true;
2248 
2249  /*
2250  * Compute how many buffers had to be scanned for each new allocation, ie,
2251  * 1/density of reusable buffers, and track a moving average of that.
2252  *
2253  * If the strategy point didn't move, we don't update the density estimate
2254  */
2255  if (strategy_delta > 0 && recent_alloc > 0)
2256  {
2257  scans_per_alloc = (float) strategy_delta / (float) recent_alloc;
2258  smoothed_density += (scans_per_alloc - smoothed_density) /
2259  smoothing_samples;
2260  }
2261 
2262  /*
2263  * Estimate how many reusable buffers there are between the current
2264  * strategy point and where we've scanned ahead to, based on the smoothed
2265  * density estimate.
2266  */
2267  bufs_ahead = NBuffers - bufs_to_lap;
2268  reusable_buffers_est = (float) bufs_ahead / smoothed_density;
2269 
2270  /*
2271  * Track a moving average of recent buffer allocations. Here, rather than
2272  * a true average we want a fast-attack, slow-decline behavior: we
2273  * immediately follow any increase.
2274  */
2275  if (smoothed_alloc <= (float) recent_alloc)
2276  smoothed_alloc = recent_alloc;
2277  else
2278  smoothed_alloc += ((float) recent_alloc - smoothed_alloc) /
2279  smoothing_samples;
2280 
2281  /* Scale the estimate by a GUC to allow more aggressive tuning. */
2282  upcoming_alloc_est = (int) (smoothed_alloc * bgwriter_lru_multiplier);
2283 
2284  /*
2285  * If recent_alloc remains at zero for many cycles, smoothed_alloc will
2286  * eventually underflow to zero, and the underflows produce annoying
2287  * kernel warnings on some platforms. Once upcoming_alloc_est has gone to
2288  * zero, there's no point in tracking smaller and smaller values of
2289  * smoothed_alloc, so just reset it to exactly zero to avoid this
2290  * syndrome. It will pop back up as soon as recent_alloc increases.
2291  */
2292  if (upcoming_alloc_est == 0)
2293  smoothed_alloc = 0;
2294 
2295  /*
2296  * Even in cases where there's been little or no buffer allocation
2297  * activity, we want to make a small amount of progress through the buffer
2298  * cache so that as many reusable buffers as possible are clean after an
2299  * idle period.
2300  *
2301  * (scan_whole_pool_milliseconds / BgWriterDelay) computes how many times
2302  * the BGW will be called during the scan_whole_pool time; slice the
2303  * buffer pool into that many sections.
2304  */
2305  min_scan_buffers = (int) (NBuffers / (scan_whole_pool_milliseconds / BgWriterDelay));
2306 
2307  if (upcoming_alloc_est < (min_scan_buffers + reusable_buffers_est))
2308  {
2309 #ifdef BGW_DEBUG
2310  elog(DEBUG2, "bgwriter: alloc_est=%d too small, using min=%d + reusable_est=%d",
2311  upcoming_alloc_est, min_scan_buffers, reusable_buffers_est);
2312 #endif
2313  upcoming_alloc_est = min_scan_buffers + reusable_buffers_est;
2314  }
2315 
2316  /*
2317  * Now write out dirty reusable buffers, working forward from the
2318  * next_to_clean point, until we have lapped the strategy scan, or cleaned
2319  * enough buffers to match our estimate of the next cycle's allocation
2320  * requirements, or hit the bgwriter_lru_maxpages limit.
2321  */
2322 
2323  /* Make sure we can handle the pin inside SyncOneBuffer */
2325 
2326  num_to_scan = bufs_to_lap;
2327  num_written = 0;
2328  reusable_buffers = reusable_buffers_est;
2329 
2330  /* Execute the LRU scan */
2331  while (num_to_scan > 0 && reusable_buffers < upcoming_alloc_est)
2332  {
2333  int sync_state = SyncOneBuffer(next_to_clean, true,
2334  wb_context);
2335 
2336  if (++next_to_clean >= NBuffers)
2337  {
2338  next_to_clean = 0;
2339  next_passes++;
2340  }
2341  num_to_scan--;
2342 
2343  if (sync_state & BUF_WRITTEN)
2344  {
2345  reusable_buffers++;
2346  if (++num_written >= bgwriter_lru_maxpages)
2347  {
2349  break;
2350  }
2351  }
2352  else if (sync_state & BUF_REUSABLE)
2353  reusable_buffers++;
2354  }
2355 
2356  BgWriterStats.m_buf_written_clean += num_written;
2357 
2358 #ifdef BGW_DEBUG
2359  elog(DEBUG1, "bgwriter: recent_alloc=%u smoothed=%.2f delta=%ld ahead=%d density=%.2f reusable_est=%d upcoming_est=%d scanned=%d wrote=%d reusable=%d",
2360  recent_alloc, smoothed_alloc, strategy_delta, bufs_ahead,
2361  smoothed_density, reusable_buffers_est, upcoming_alloc_est,
2362  bufs_to_lap - num_to_scan,
2363  num_written,
2364  reusable_buffers - reusable_buffers_est);
2365 #endif
2366 
2367  /*
2368  * Consider the above scan as being like a new allocation scan.
2369  * Characterize its density and update the smoothed one based on it. This
2370  * effectively halves the moving average period in cases where both the
2371  * strategy and the background writer are doing some useful scanning,
2372  * which is helpful because a long memory isn't as desirable on the
2373  * density estimates.
2374  */
2375  new_strategy_delta = bufs_to_lap - num_to_scan;
2376  new_recent_alloc = reusable_buffers - reusable_buffers_est;
2377  if (new_strategy_delta > 0 && new_recent_alloc > 0)
2378  {
2379  scans_per_alloc = (float) new_strategy_delta / (float) new_recent_alloc;
2380  smoothed_density += (scans_per_alloc - smoothed_density) /
2381  smoothing_samples;
2382 
2383 #ifdef BGW_DEBUG
2384  elog(DEBUG2, "bgwriter: cleaner density alloc=%u scan=%ld density=%.2f new smoothed=%.2f",
2385  new_recent_alloc, new_strategy_delta,
2386  scans_per_alloc, smoothed_density);
2387 #endif
2388  }
2389 
2390  /* Return true if OK to hibernate */
2391  return (bufs_to_lap == 0 && recent_alloc == 0);
2392 }
PgStat_Counter m_buf_alloc
Definition: pgstat.h:434
int StrategySyncStart(uint32 *complete_passes, uint32 *num_buf_alloc)
Definition: freelist.c:395
#define DEBUG1
Definition: elog.h:25
int BgWriterDelay
Definition: bgwriter.c:64
ResourceOwner CurrentResourceOwner
Definition: resowner.c:142
PgStat_Counter m_maxwritten_clean
Definition: pgstat.h:431
PgStat_Counter m_buf_written_clean
Definition: pgstat.h:430
PgStat_MsgBgWriter BgWriterStats
Definition: pgstat.c:142
double bgwriter_lru_multiplier
Definition: bufmgr.c:126
static int SyncOneBuffer(int buf_id, bool skip_recently_used, WritebackContext *wb_context)
Definition: bufmgr.c:2411
signed int int32
Definition: c.h:362
#define BUF_REUSABLE
Definition: bufmgr.c:69
int bgwriter_lru_maxpages
Definition: bufmgr.c:125
#define DEBUG2
Definition: elog.h:24
unsigned int uint32
Definition: c.h:374
#define BUF_WRITTEN
Definition: bufmgr.c:68
void ResourceOwnerEnlargeBuffers(ResourceOwner owner)
Definition: resowner.c:917
#define Assert(condition)
Definition: c.h:745
#define elog(elevel,...)
Definition: elog.h:214
int NBuffers
Definition: globals.c:132

◆ BufferGetBlockNumber()

BlockNumber BufferGetBlockNumber ( Buffer  buffer)

Definition at line 2661 of file bufmgr.c.

References Assert, buftag::blockNum, BufferIsLocal, BufferIsPinned, GetBufferDescriptor, GetLocalBufferDescriptor, and BufferDesc::tag.

Referenced by _bt_check_unique(), _bt_checkpage(), _bt_delitems_vacuum(), _bt_doinsert(), _bt_endpoint(), _bt_finish_split(), _bt_first(), _bt_getroot(), _bt_insert_parent(), _bt_insertonpg(), _bt_mark_page_halfdead(), _bt_moveright(), _bt_newroot(), _bt_pagedel(), _bt_readnextpage(), _bt_readpage(), _bt_restore_meta(), _bt_search(), _bt_split(), _bt_unlink_halfdead_page(), _bt_walk_left(), _hash_addovflpage(), _hash_checkpage(), _hash_doinsert(), _hash_first(), _hash_freeovflpage(), _hash_getnewbuf(), _hash_readnext(), _hash_readpage(), _hash_splitbucket(), allocNewBuffer(), blinsert(), BloomInitMetapage(), brin_doinsert(), brin_doupdate(), brin_getinsertbuffer(), brin_initialize_empty_new_buffer(), brin_page_cleanup(), brin_xlog_insert_update(), brinbuild(), brinGetTupleForHeapBlock(), createPostingTree(), dataBeginPlaceToPageLeaf(), dataPrepareDownlink(), doPickSplit(), entryPrepareDownlink(), fill_seq_with_data(), ginEntryInsert(), ginFindParents(), ginFinishSplit(), ginPlaceToPage(), ginRedoDeleteListPages(), ginRedoUpdateMetapage(), ginScanToDelete(), gistbufferinginserttuples(), gistbuild(), gistcheckpage(), gistdeletepage(), gistformdownlink(), gistinserttuples(), gistMemorizeAllDownlinks(), gistplacetopage(), gistRelocateBuildBuffersOnSplit(), gistScanPage(), hash_xlog_add_ovfl_page(), heap_delete(), heap_hot_search_buffer(), heap_insert(), heap_multi_insert(), heap_page_is_all_visible(), heap_prune_chain(), heap_update(), heap_xlog_confirm(), heap_xlog_lock(), makeSublist(), moveLeafs(), moveRightIfItNeeded(), pgstathashindex(), ReadBufferBI(), RelationAddExtraBlocks(), RelationGetBufferForTuple(), RelationPutHeapTuple(), revmap_get_buffer(), revmap_physical_extend(), spgAddNodeAction(), spgbuild(), spgdoinsert(), SpGistSetLastUsedPage(), spgSplitNodeAction(), spgWalk(), startScanEntry(), terminate_brin_buildstate(), vacuumLeafPage(), visibilitymap_clear(), visibilitymap_get_status(), visibilitymap_pin(), visibilitymap_pin_ok(), visibilitymap_set(), and XLogReadBufferExtended().

2662 {
2663  BufferDesc *bufHdr;
2664 
2665  Assert(BufferIsPinned(buffer));
2666 
2667  if (BufferIsLocal(buffer))
2668  bufHdr = GetLocalBufferDescriptor(-buffer - 1);
2669  else
2670  bufHdr = GetBufferDescriptor(buffer - 1);
2671 
2672  /* pinned, so OK to read tag without spinlock */
2673  return bufHdr->tag.blockNum;
2674 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:440
#define GetLocalBufferDescriptor(id)
#define GetBufferDescriptor(id)
#define Assert(condition)
Definition: c.h:745
#define BufferIsLocal(buffer)
Definition: buf.h:37
BlockNumber blockNum
Definition: buf_internals.h:94
BufferTag tag

◆ BufferGetLSNAtomic()

XLogRecPtr BufferGetLSNAtomic ( Buffer  buffer)

Definition at line 2924 of file bufmgr.c.

References Assert, BufferGetPage, BufferIsLocal, BufferIsPinned, BufferIsValid, GetBufferDescriptor, LockBufHdr(), PageGetLSN, UnlockBufHdr, and XLogHintBitIsNeeded.

Referenced by _bt_killitems(), _bt_readpage(), gistdoinsert(), gistFindPath(), gistkillitems(), gistScanPage(), SetHintBits(), and XLogSaveBufferForHint().

2925 {
2926  BufferDesc *bufHdr = GetBufferDescriptor(buffer - 1);
2927  char *page = BufferGetPage(buffer);
2928  XLogRecPtr lsn;
2929  uint32 buf_state;
2930 
2931  /*
2932  * If we don't need locking for correctness, fastpath out.
2933  */
2934  if (!XLogHintBitIsNeeded() || BufferIsLocal(buffer))
2935  return PageGetLSN(page);
2936 
2937  /* Make sure we've got a real buffer, and that we hold a pin on it. */
2938  Assert(BufferIsValid(buffer));
2939  Assert(BufferIsPinned(buffer));
2940 
2941  buf_state = LockBufHdr(bufHdr);
2942  lsn = PageGetLSN(page);
2943  UnlockBufHdr(bufHdr, buf_state);
2944 
2945  return lsn;
2946 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:440
#define GetBufferDescriptor(id)
unsigned int uint32
Definition: c.h:374
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4318
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:745
#define BufferIsLocal(buffer)
Definition: buf.h:37
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
#define PageGetLSN(page)
Definition: bufpage.h:366
#define UnlockBufHdr(desc, s)
#define XLogHintBitIsNeeded()
Definition: xlog.h:202

◆ BufferGetTag()

void BufferGetTag ( Buffer  buffer,
RelFileNode rnode,
ForkNumber forknum,
BlockNumber blknum 
)

Definition at line 2682 of file bufmgr.c.

References Assert, buftag::blockNum, BufferIsLocal, BufferIsPinned, buftag::forkNum, GetBufferDescriptor, GetLocalBufferDescriptor, buftag::rnode, and BufferDesc::tag.

Referenced by fsm_search_avail(), ginRedoInsertEntry(), log_newpage_buffer(), ResolveCminCmaxDuringDecoding(), XLogRegisterBuffer(), and XLogSaveBufferForHint().

2684 {
2685  BufferDesc *bufHdr;
2686 
2687  /* Do the same checks as BufferGetBlockNumber. */
2688  Assert(BufferIsPinned(buffer));
2689 
2690  if (BufferIsLocal(buffer))
2691  bufHdr = GetLocalBufferDescriptor(-buffer - 1);
2692  else
2693  bufHdr = GetBufferDescriptor(buffer - 1);
2694 
2695  /* pinned, so OK to read tag without spinlock */
2696  *rnode = bufHdr->tag.rnode;
2697  *forknum = bufHdr->tag.forkNum;
2698  *blknum = bufHdr->tag.blockNum;
2699 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:440
ForkNumber forkNum
Definition: buf_internals.h:93
#define GetLocalBufferDescriptor(id)
#define GetBufferDescriptor(id)
#define Assert(condition)
Definition: c.h:745
#define BufferIsLocal(buffer)
Definition: buf.h:37
BlockNumber blockNum
Definition: buf_internals.h:94
RelFileNode rnode
Definition: buf_internals.h:92
BufferTag tag

◆ BufferIsPermanent()

bool BufferIsPermanent ( Buffer  buffer)

Definition at line 2894 of file bufmgr.c.

References Assert, BM_PERMANENT, BufferIsLocal, BufferIsPinned, BufferIsValid, GetBufferDescriptor, pg_atomic_read_u32(), and BufferDesc::state.

Referenced by SetHintBits().

2895 {
2896  BufferDesc *bufHdr;
2897 
2898  /* Local buffers are used only for temp relations. */
2899  if (BufferIsLocal(buffer))
2900  return false;
2901 
2902  /* Make sure we've got a real buffer, and that we hold a pin on it. */
2903  Assert(BufferIsValid(buffer));
2904  Assert(BufferIsPinned(buffer));
2905 
2906  /*
2907  * BM_PERMANENT can't be changed while we hold a pin on the buffer, so we
2908  * need not bother with the buffer header spinlock. Even if someone else
2909  * changes the buffer header state while we're doing this, the state is
2910  * changed atomically, so we'll read the old value or the new value, but
2911  * not random garbage.
2912  */
2913  bufHdr = GetBufferDescriptor(buffer - 1);
2914  return (pg_atomic_read_u32(&bufHdr->state) & BM_PERMANENT) != 0;
2915 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:440
#define BM_PERMANENT
Definition: buf_internals.h:66
#define GetBufferDescriptor(id)
#define Assert(condition)
Definition: c.h:745
#define BufferIsLocal(buffer)
Definition: buf.h:37
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
pg_atomic_uint32 state
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:241

◆ BufferShmemSize()

Size BufferShmemSize ( void  )

Definition at line 157 of file buf_init.c.

References add_size(), mul_size(), NBuffers, PG_CACHE_LINE_SIZE, and StrategyShmemSize().

Referenced by CreateSharedMemoryAndSemaphores().

158 {
159  Size size = 0;
160 
161  /* size of buffer descriptors */
162  size = add_size(size, mul_size(NBuffers, sizeof(BufferDescPadded)));
163  /* to allow aligning buffer descriptors */
164  size = add_size(size, PG_CACHE_LINE_SIZE);
165 
166  /* size of data pages */
167  size = add_size(size, mul_size(NBuffers, BLCKSZ));
168 
169  /* size of stuff controlled by freelist.c */
170  size = add_size(size, StrategyShmemSize());
171 
172  /*
173  * It would be nice to include the I/O locks in the BufferDesc, but that
174  * would increase the size of a BufferDesc to more than one cache line,
175  * and benchmarking has shown that keeping every BufferDesc aligned on a
176  * cache line boundary is important for performance. So, instead, the
177  * array of I/O locks is allocated in a separate tranche. Because those
178  * locks are not highly contended, we lay out the array with minimal
179  * padding.
180  */
181  size = add_size(size, mul_size(NBuffers, sizeof(LWLockMinimallyPadded)));
182  /* to allow aligning the above */
183  size = add_size(size, PG_CACHE_LINE_SIZE);
184 
185  /* size of checkpoint sort array in bufmgr.c */
186  size = add_size(size, mul_size(NBuffers, sizeof(CkptSortItem)));
187 
188  return size;
189 }
#define PG_CACHE_LINE_SIZE
Size mul_size(Size s1, Size s2)
Definition: shmem.c:515
Size add_size(Size s1, Size s2)
Definition: shmem.c:498
size_t Size
Definition: c.h:473
int NBuffers
Definition: globals.c:132
Size StrategyShmemSize(void)
Definition: freelist.c:454

◆ BufmgrCommit()

void BufmgrCommit ( void  )

Definition at line 2647 of file bufmgr.c.

Referenced by PrepareTransaction(), and RecordTransactionCommit().

2648 {
2649  /* Nothing to do in bufmgr anymore... */
2650 }

◆ CheckPointBuffers()

void CheckPointBuffers ( int  flags)

Definition at line 2637 of file bufmgr.c.

References BufferSync().

Referenced by CheckPointGuts().

2638 {
2639  BufferSync(flags);
2640 }
static void BufferSync(int flags)
Definition: bufmgr.c:1831

◆ ConditionalLockBuffer()

bool ConditionalLockBuffer ( Buffer  buffer)

Definition at line 3776 of file bufmgr.c.

References Assert, buf, BufferDescriptorGetContentLock, BufferIsLocal, BufferIsPinned, GetBufferDescriptor, LW_EXCLUSIVE, and LWLockConditionalAcquire().

Referenced by _bt_conditionallockbuf(), BloomNewBuffer(), ConditionalLockBufferForCleanup(), GinNewBuffer(), gistNewBuffer(), RelationGetBufferForTuple(), spgdoinsert(), SpGistGetBuffer(), SpGistNewBuffer(), and SpGistUpdateMetaPage().

3777 {
3778  BufferDesc *buf;
3779 
3780  Assert(BufferIsPinned(buffer));
3781  if (BufferIsLocal(buffer))
3782  return true; /* act as though we got it */
3783 
3784  buf = GetBufferDescriptor(buffer - 1);
3785 
3787  LW_EXCLUSIVE);
3788 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:440
bool LWLockConditionalAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1380
static char * buf
Definition: pg_test_fsync.c:68
#define GetBufferDescriptor(id)
#define BufferDescriptorGetContentLock(bdesc)
#define Assert(condition)
Definition: c.h:745
#define BufferIsLocal(buffer)
Definition: buf.h:37

◆ ConditionalLockBufferForCleanup()

bool ConditionalLockBufferForCleanup ( Buffer  buffer)

Definition at line 3944 of file bufmgr.c.

References Assert, BUF_STATE_GET_REFCOUNT, BUFFER_LOCK_UNLOCK, BufferIsLocal, BufferIsValid, ConditionalLockBuffer(), GetBufferDescriptor, GetPrivateRefCount(), LocalRefCount, LockBuffer(), LockBufHdr(), PrivateRefCountEntry::refcount, and UnlockBufHdr.

Referenced by _hash_finish_split(), _hash_getbuf_with_condlock_cleanup(), heap_page_prune_opt(), lazy_scan_heap(), and lazy_vacuum_heap().

3945 {
3946  BufferDesc *bufHdr;
3947  uint32 buf_state,
3948  refcount;
3949 
3950  Assert(BufferIsValid(buffer));
3951 
3952  if (BufferIsLocal(buffer))
3953  {
3954  refcount = LocalRefCount[-buffer - 1];
3955  /* There should be exactly one pin */
3956  Assert(refcount > 0);
3957  if (refcount != 1)
3958  return false;
3959  /* Nobody else to wait for */
3960  return true;
3961  }
3962 
3963  /* There should be exactly one local pin */
3964  refcount = GetPrivateRefCount(buffer);
3965  Assert(refcount);
3966  if (refcount != 1)
3967  return false;
3968 
3969  /* Try to acquire lock */
3970  if (!ConditionalLockBuffer(buffer))
3971  return false;
3972 
3973  bufHdr = GetBufferDescriptor(buffer - 1);
3974  buf_state = LockBufHdr(bufHdr);
3975  refcount = BUF_STATE_GET_REFCOUNT(buf_state);
3976 
3977  Assert(refcount > 0);
3978  if (refcount == 1)
3979  {
3980  /* Successfully acquired exclusive lock with pincount 1 */
3981  UnlockBufHdr(bufHdr, buf_state);
3982  return true;
3983  }
3984 
3985  /* Failed, so release the lock */
3986  UnlockBufHdr(bufHdr, buf_state);
3987  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3988  return false;
3989 }
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
#define GetBufferDescriptor(id)
static int32 GetPrivateRefCount(Buffer buffer)
Definition: bufmgr.c:379
unsigned int uint32
Definition: c.h:374
bool ConditionalLockBuffer(Buffer buffer)
Definition: bufmgr.c:3776
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3750
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4318
#define Assert(condition)
Definition: c.h:745
#define BufferIsLocal(buffer)
Definition: buf.h:37
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
#define UnlockBufHdr(desc, s)
int32 * LocalRefCount
Definition: localbuf.c:45
#define BUF_STATE_GET_REFCOUNT(state)
Definition: buf_internals.h:48

◆ DropDatabaseBuffers()

void DropDatabaseBuffers ( Oid  dbid)

Definition at line 3147 of file bufmgr.c.

References buftag::blockNum, buf, BufferDescriptorGetBuffer, RelFileNode::dbNode, elog, buftag::forkNum, BufferDesc::freeNext, GetBufferDescriptor, GetPrivateRefCount(), i, InvalidateBuffer(), InvalidBackendId, LockBufHdr(), LOG, NBuffers, relpathbackend, relpathperm, buftag::rnode, BufferDesc::tag, and UnlockBufHdr.

Referenced by dbase_redo(), dropdb(), and movedb().

3148 {
3149  int i;
3150 
3151  /*
3152  * We needn't consider local buffers, since by assumption the target
3153  * database isn't our own.
3154  */
3155 
3156  for (i = 0; i < NBuffers; i++)
3157  {
3158  BufferDesc *bufHdr = GetBufferDescriptor(i);
3159  uint32 buf_state;
3160 
3161  /*
3162  * As in DropRelFileNodeBuffers, an unlocked precheck should be safe
3163  * and saves some cycles.
3164  */
3165  if (bufHdr->tag.rnode.dbNode != dbid)
3166  continue;
3167 
3168  buf_state = LockBufHdr(bufHdr);
3169  if (bufHdr->tag.rnode.dbNode == dbid)
3170  InvalidateBuffer(bufHdr); /* releases spinlock */
3171  else
3172  UnlockBufHdr(bufHdr, buf_state);
3173  }
3174 }
static void InvalidateBuffer(BufferDesc *buf)
Definition: bufmgr.c:1371
#define GetBufferDescriptor(id)
unsigned int uint32
Definition: c.h:374
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4318
RelFileNode rnode
Definition: buf_internals.h:92
BufferTag tag
#define UnlockBufHdr(desc, s)
int i
int NBuffers
Definition: globals.c:132

◆ DropRelFileNodeBuffers()

void DropRelFileNodeBuffers ( RelFileNodeBackend  rnode,
ForkNumber forkNum,
int  nforks,
BlockNumber firstDelBlock 
)

Definition at line 2975 of file bufmgr.c.

References RelFileNodeBackend::backend, buftag::blockNum, DropRelFileNodeLocalBuffers(), buftag::forkNum, GetBufferDescriptor, i, InvalidateBuffer(), LockBufHdr(), MyBackendId, NBuffers, RelFileNodeBackend::node, RelFileNodeBackendIsTemp, RelFileNodeEquals, buftag::rnode, BufferDesc::tag, and UnlockBufHdr.

Referenced by smgrtruncate().

2977 {
2978  int i;
2979  int j;
2980 
2981  /* If it's a local relation, it's localbuf.c's problem. */
2982  if (RelFileNodeBackendIsTemp(rnode))
2983  {
2984  if (rnode.backend == MyBackendId)
2985  {
2986  for (j = 0; j < nforks; j++)
2987  DropRelFileNodeLocalBuffers(rnode.node, forkNum[j],
2988  firstDelBlock[j]);
2989  }
2990  return;
2991  }
2992 
2993  for (i = 0; i < NBuffers; i++)
2994  {
2995  BufferDesc *bufHdr = GetBufferDescriptor(i);
2996  uint32 buf_state;
2997 
2998  /*
2999  * We can make this a tad faster by prechecking the buffer tag before
3000  * we attempt to lock the buffer; this saves a lot of lock
3001  * acquisitions in typical cases. It should be safe because the
3002  * caller must have AccessExclusiveLock on the relation, or some other
3003  * reason to be certain that no one is loading new pages of the rel
3004  * into the buffer pool. (Otherwise we might well miss such pages
3005  * entirely.) Therefore, while the tag might be changing while we
3006  * look at it, it can't be changing *to* a value we care about, only
3007  * *away* from such a value. So false negatives are impossible, and
3008  * false positives are safe because we'll recheck after getting the
3009  * buffer lock.
3010  *
3011  * We could check forkNum and blockNum as well as the rnode, but the
3012  * incremental win from doing so seems small.
3013  */
3014  if (!RelFileNodeEquals(bufHdr->tag.rnode, rnode.node))
3015  continue;
3016 
3017  buf_state = LockBufHdr(bufHdr);
3018 
3019  for (j = 0; j < nforks; j++)
3020  {
3021  if (RelFileNodeEquals(bufHdr->tag.rnode, rnode.node) &&
3022  bufHdr->tag.forkNum == forkNum[j] &&
3023  bufHdr->tag.blockNum >= firstDelBlock[j])
3024  {
3025  InvalidateBuffer(bufHdr); /* releases spinlock */
3026  break;
3027  }
3028  }
3029  if (j >= nforks)
3030  UnlockBufHdr(bufHdr, buf_state);
3031  }
3032 }
BackendId MyBackendId
Definition: globals.c:81
#define RelFileNodeBackendIsTemp(rnode)
Definition: relfilenode.h:78
ForkNumber forkNum
Definition: buf_internals.h:93
static void InvalidateBuffer(BufferDesc *buf)
Definition: bufmgr.c:1371
void DropRelFileNodeLocalBuffers(RelFileNode rnode, ForkNumber forkNum, BlockNumber firstDelBlock)
Definition: localbuf.c:326
#define GetBufferDescriptor(id)
unsigned int uint32
Definition: c.h:374
RelFileNode node
Definition: relfilenode.h:74
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4318
BackendId backend
Definition: relfilenode.h:75
BlockNumber blockNum
Definition: buf_internals.h:94
RelFileNode rnode
Definition: buf_internals.h:92
BufferTag tag
#define UnlockBufHdr(desc, s)
int i
int NBuffers
Definition: globals.c:132
#define RelFileNodeEquals(node1, node2)
Definition: relfilenode.h:88

◆ DropRelFileNodesAllBuffers()

void DropRelFileNodesAllBuffers ( RelFileNodeBackend rnodes,
int  nnodes 
)

Definition at line 3044 of file bufmgr.c.

References DropRelFileNodeAllLocalBuffers(), GetBufferDescriptor, i, InvalidateBuffer(), LockBufHdr(), MyBackendId, NBuffers, RelFileNodeBackend::node, palloc(), pfree(), pg_qsort(), RelFileNodeBackendIsTemp, RelFileNodeEquals, RELS_BSEARCH_THRESHOLD, buftag::rnode, rnode_comparator(), BufferDesc::tag, and UnlockBufHdr.

Referenced by smgrdounlinkall().

3045 {
3046  int i,
3047  n = 0;
3048  RelFileNode *nodes;
3049  bool use_bsearch;
3050 
3051  if (nnodes == 0)
3052  return;
3053 
3054  nodes = palloc(sizeof(RelFileNode) * nnodes); /* non-local relations */
3055 
3056  /* If it's a local relation, it's localbuf.c's problem. */
3057  for (i = 0; i < nnodes; i++)
3058  {
3059  if (RelFileNodeBackendIsTemp(rnodes[i]))
3060  {
3061  if (rnodes[i].backend == MyBackendId)
3062  DropRelFileNodeAllLocalBuffers(rnodes[i].node);
3063  }
3064  else
3065  nodes[n++] = rnodes[i].node;
3066  }
3067 
3068  /*
3069  * If there are no non-local relations, then we're done. Release the
3070  * memory and return.
3071  */
3072  if (n == 0)
3073  {
3074  pfree(nodes);
3075  return;
3076  }
3077 
3078  /*
3079  * For low number of relations to drop just use a simple walk through, to
3080  * save the bsearch overhead. The threshold to use is rather a guess than
3081  * an exactly determined value, as it depends on many factors (CPU and RAM
3082  * speeds, amount of shared buffers etc.).
3083  */
3084  use_bsearch = n > RELS_BSEARCH_THRESHOLD;
3085 
3086  /* sort the list of rnodes if necessary */
3087  if (use_bsearch)
3088  pg_qsort(nodes, n, sizeof(RelFileNode), rnode_comparator);
3089 
3090  for (i = 0; i < NBuffers; i++)
3091  {
3092  RelFileNode *rnode = NULL;
3093  BufferDesc *bufHdr = GetBufferDescriptor(i);
3094  uint32 buf_state;
3095 
3096  /*
3097  * As in DropRelFileNodeBuffers, an unlocked precheck should be safe
3098  * and saves some cycles.
3099  */
3100 
3101  if (!use_bsearch)
3102  {
3103  int j;
3104 
3105  for (j = 0; j < n; j++)
3106  {
3107  if (RelFileNodeEquals(bufHdr->tag.rnode, nodes[j]))
3108  {
3109  rnode = &nodes[j];
3110  break;
3111  }
3112  }
3113  }
3114  else
3115  {
3116  rnode = bsearch((const void *) &(bufHdr->tag.rnode),
3117  nodes, n, sizeof(RelFileNode),
3119  }
3120 
3121  /* buffer doesn't belong to any of the given relfilenodes; skip it */
3122  if (rnode == NULL)
3123  continue;
3124 
3125  buf_state = LockBufHdr(bufHdr);
3126  if (RelFileNodeEquals(bufHdr->tag.rnode, (*rnode)))
3127  InvalidateBuffer(bufHdr); /* releases spinlock */
3128  else
3129  UnlockBufHdr(bufHdr, buf_state);
3130  }
3131 
3132  pfree(nodes);
3133 }
BackendId MyBackendId
Definition: globals.c:81
#define RelFileNodeBackendIsTemp(rnode)
Definition: relfilenode.h:78
static void InvalidateBuffer(BufferDesc *buf)
Definition: bufmgr.c:1371
#define RELS_BSEARCH_THRESHOLD
Definition: bufmgr.c:71
void DropRelFileNodeAllLocalBuffers(RelFileNode rnode)
Definition: localbuf.c:373
void pfree(void *pointer)
Definition: mcxt.c:1057
#define GetBufferDescriptor(id)
unsigned int uint32
Definition: c.h:374
static int rnode_comparator(const void *p1, const void *p2)
Definition: bufmgr.c:4291
RelFileNode node
Definition: relfilenode.h:74
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4318
void pg_qsort(void *base, size_t nel, size_t elsize, int(*cmp)(const void *, const void *))
Definition: qsort.c:113
RelFileNode rnode
Definition: buf_internals.h:92
BufferTag tag
void * palloc(Size size)
Definition: mcxt.c:950
#define UnlockBufHdr(desc, s)
int i
int NBuffers
Definition: globals.c:132
#define RelFileNodeEquals(node1, node2)
Definition: relfilenode.h:88

◆ FlushDatabaseBuffers()

void FlushDatabaseBuffers ( Oid  dbid)

Definition at line 3448 of file bufmgr.c.

References BM_DIRTY, BM_VALID, BufferDescriptorGetContentLock, CurrentResourceOwner, RelFileNode::dbNode, FlushBuffer(), GetBufferDescriptor, i, LockBufHdr(), LW_SHARED, LWLockAcquire(), LWLockRelease(), NBuffers, PinBuffer_Locked(), ReservePrivateRefCountEntry(), ResourceOwnerEnlargeBuffers(), buftag::rnode, BufferDesc::tag, UnlockBufHdr, and UnpinBuffer().

Referenced by dbase_redo().

3449 {
3450  int i;
3451  BufferDesc *bufHdr;
3452 
3453  /* Make sure we can handle the pin inside the loop */
3455 
3456  for (i = 0; i < NBuffers; i++)
3457  {
3458  uint32 buf_state;
3459 
3460  bufHdr = GetBufferDescriptor(i);
3461 
3462  /*
3463  * As in DropRelFileNodeBuffers, an unlocked precheck should be safe
3464  * and saves some cycles.
3465  */
3466  if (bufHdr->tag.rnode.dbNode != dbid)
3467  continue;
3468 
3470 
3471  buf_state = LockBufHdr(bufHdr);
3472  if (bufHdr->tag.rnode.dbNode == dbid &&
3473  (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
3474  {
3475  PinBuffer_Locked(bufHdr);
3477  FlushBuffer(bufHdr, NULL);
3479  UnpinBuffer(bufHdr, true);
3480  }
3481  else
3482  UnlockBufHdr(bufHdr, buf_state);
3483  }
3484 }
ResourceOwner CurrentResourceOwner
Definition: resowner.c:142
#define BM_DIRTY
Definition: buf_internals.h:58
static void FlushBuffer(BufferDesc *buf, SMgrRelation reln)
Definition: bufmgr.c:2721
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1812
#define GetBufferDescriptor(id)
unsigned int uint32
Definition: c.h:374
static void UnpinBuffer(BufferDesc *buf, bool fixOwner)
Definition: bufmgr.c:1738
void ResourceOwnerEnlargeBuffers(ResourceOwner owner)
Definition: resowner.c:917
#define BM_VALID
Definition: buf_internals.h:59
#define BufferDescriptorGetContentLock(bdesc)
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4318
static void PinBuffer_Locked(BufferDesc *buf)
Definition: bufmgr.c:1693
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1208
RelFileNode rnode
Definition: buf_internals.h:92
BufferTag tag
#define UnlockBufHdr(desc, s)
int i
int NBuffers
Definition: globals.c:132
static void ReservePrivateRefCountEntry(void)
Definition: bufmgr.c:207

◆ FlushOneBuffer()

void FlushOneBuffer ( Buffer  buffer)

Definition at line 3491 of file bufmgr.c.

References Assert, BufferDescriptorGetContentLock, BufferIsLocal, BufferIsPinned, FlushBuffer(), GetBufferDescriptor, and LWLockHeldByMe().

Referenced by hash_xlog_init_bitmap_page(), hash_xlog_init_meta_page(), and XLogReadBufferForRedoExtended().

3492 {
3493  BufferDesc *bufHdr;
3494 
3495  /* currently not needed, but no fundamental reason not to support */
3496  Assert(!BufferIsLocal(buffer));
3497 
3498  Assert(BufferIsPinned(buffer));
3499 
3500  bufHdr = GetBufferDescriptor(buffer - 1);
3501 
3503 
3504  FlushBuffer(bufHdr, NULL);
3505 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:440
bool LWLockHeldByMe(LWLock *l)
Definition: lwlock.c:1928
static void FlushBuffer(BufferDesc *buf, SMgrRelation reln)
Definition: bufmgr.c:2721
#define GetBufferDescriptor(id)
#define BufferDescriptorGetContentLock(bdesc)
#define Assert(condition)
Definition: c.h:745
#define BufferIsLocal(buffer)
Definition: buf.h:37

◆ FlushRelationBuffers()

void FlushRelationBuffers ( Relation  rel)

Definition at line 3252 of file bufmgr.c.

References ErrorContextCallback::arg, buftag::blockNum, BM_DIRTY, BM_JUST_DIRTIED, BM_VALID, BufferDescriptorGetContentLock, ErrorContextCallback::callback, CurrentResourceOwner, error_context_stack, FlushBuffer(), buftag::forkNum, GetBufferDescriptor, GetLocalBufferDescriptor, i, local_buffer_write_error_callback(), LocalBufHdrGetBlock, LockBufHdr(), LW_SHARED, LWLockAcquire(), LWLockRelease(), NBuffers, NLocBuffer, PageSetChecksumInplace(), pg_atomic_read_u32(), pg_atomic_unlocked_write_u32(), PinBuffer_Locked(), ErrorContextCallback::previous, RelationData::rd_node, RelationData::rd_smgr, RelationOpenSmgr, RelationUsesLocalBuffers, RelFileNodeEquals, ReservePrivateRefCountEntry(), ResourceOwnerEnlargeBuffers(), buftag::rnode, smgrwrite(), BufferDesc::state, BufferDesc::tag, UnlockBufHdr, and UnpinBuffer().

Referenced by heapam_relation_copy_data(), and index_copy_data().

3253 {
3254  int i;
3255  BufferDesc *bufHdr;
3256 
3257  /* Open rel at the smgr level if not already done */
3258  RelationOpenSmgr(rel);
3259 
3260  if (RelationUsesLocalBuffers(rel))
3261  {
3262  for (i = 0; i < NLocBuffer; i++)
3263  {
3264  uint32 buf_state;
3265 
3266  bufHdr = GetLocalBufferDescriptor(i);
3267  if (RelFileNodeEquals(bufHdr->tag.rnode, rel->rd_node) &&
3268  ((buf_state = pg_atomic_read_u32(&bufHdr->state)) &
3269  (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
3270  {
3271  ErrorContextCallback errcallback;
3272  Page localpage;
3273 
3274  localpage = (char *) LocalBufHdrGetBlock(bufHdr);
3275 
3276  /* Setup error traceback support for ereport() */
3278  errcallback.arg = (void *) bufHdr;
3279  errcallback.previous = error_context_stack;
3280  error_context_stack = &errcallback;
3281 
3282  PageSetChecksumInplace(localpage, bufHdr->tag.blockNum);
3283 
3284  smgrwrite(rel->rd_smgr,
3285  bufHdr->tag.forkNum,
3286  bufHdr->tag.blockNum,
3287  localpage,
3288  false);
3289 
3290  buf_state &= ~(BM_DIRTY | BM_JUST_DIRTIED);
3291  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
3292 
3293  /* Pop the error context stack */
3294  error_context_stack = errcallback.previous;
3295  }
3296  }
3297 
3298  return;
3299  }
3300 
3301  /* Make sure we can handle the pin inside the loop */
3303 
3304  for (i = 0; i < NBuffers; i++)
3305  {
3306  uint32 buf_state;
3307 
3308  bufHdr = GetBufferDescriptor(i);
3309 
3310  /*
3311  * As in DropRelFileNodeBuffers, an unlocked precheck should be safe
3312  * and saves some cycles.
3313  */
3314  if (!RelFileNodeEquals(bufHdr->tag.rnode, rel->rd_node))
3315  continue;
3316 
3318 
3319  buf_state = LockBufHdr(bufHdr);
3320  if (RelFileNodeEquals(bufHdr->tag.rnode, rel->rd_node) &&
3321  (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
3322  {
3323  PinBuffer_Locked(bufHdr);
3325  FlushBuffer(bufHdr, rel->rd_smgr);
3327  UnpinBuffer(bufHdr, true);
3328  }
3329  else
3330  UnlockBufHdr(bufHdr, buf_state);
3331  }
3332 }
#define LocalBufHdrGetBlock(bufHdr)
Definition: bufmgr.c:64
ForkNumber forkNum
Definition: buf_internals.h:93
static void local_buffer_write_error_callback(void *arg)
Definition: bufmgr.c:4272
ResourceOwner CurrentResourceOwner
Definition: resowner.c:142
struct SMgrRelationData * rd_smgr
Definition: rel.h:57
#define GetLocalBufferDescriptor(id)
#define BM_DIRTY
Definition: buf_internals.h:58
void(* callback)(void *arg)
Definition: elog.h:229
struct ErrorContextCallback * previous
Definition: elog.h:228
static void FlushBuffer(BufferDesc *buf, SMgrRelation reln)
Definition: bufmgr.c:2721
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1812
ErrorContextCallback * error_context_stack
Definition: elog.c:92
#define RelationOpenSmgr(relation)
Definition: rel.h:513
int NLocBuffer
Definition: localbuf.c:41
void smgrwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, char *buffer, bool skipFsync)
Definition: smgr.c:524
#define GetBufferDescriptor(id)
#define BM_JUST_DIRTIED
Definition: buf_internals.h:63
unsigned int uint32
Definition: c.h:374
static void UnpinBuffer(BufferDesc *buf, bool fixOwner)
Definition: bufmgr.c:1738
void ResourceOwnerEnlargeBuffers(ResourceOwner owner)
Definition: resowner.c:917
#define BM_VALID
Definition: buf_internals.h:59
RelFileNode rd_node
Definition: rel.h:55
#define BufferDescriptorGetContentLock(bdesc)
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4318
static void PinBuffer_Locked(BufferDesc *buf)
Definition: bufmgr.c:1693
void PageSetChecksumInplace(Page page, BlockNumber blkno)
Definition: bufpage.c:1414
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1208
BlockNumber blockNum
Definition: buf_internals.h:94
RelFileNode rnode
Definition: buf_internals.h:92
#define RelationUsesLocalBuffers(relation)
Definition: rel.h:572
BufferTag tag
#define UnlockBufHdr(desc, s)
int i
int NBuffers
Definition: globals.c:132
static void pg_atomic_unlocked_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:277
static void ReservePrivateRefCountEntry(void)
Definition: bufmgr.c:207
pg_atomic_uint32 state
Pointer Page
Definition: bufpage.h:78
#define RelFileNodeEquals(node1, node2)
Definition: relfilenode.h:88
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:241

◆ FlushRelationsAllBuffers()

void FlushRelationsAllBuffers ( struct SMgrRelationData **  smgrs,
int  nrels 
)

◆ FreeAccessStrategy()

void FreeAccessStrategy ( BufferAccessStrategy  strategy)

Definition at line 597 of file freelist.c.

References pfree().

Referenced by blgetbitmap(), FreeBulkInsertState(), heap_endscan(), and initscan().

598 {
599  /* don't crash if called on a "default" strategy */
600  if (strategy != NULL)
601  pfree(strategy);
602 }
void pfree(void *pointer)
Definition: mcxt.c:1057

◆ GetAccessStrategy()

BufferAccessStrategy GetAccessStrategy ( BufferAccessStrategyType  btype)

Definition at line 542 of file freelist.c.

References BAS_BULKREAD, BAS_BULKWRITE, BAS_NORMAL, BAS_VACUUM, BufferAccessStrategyData::btype, BufferAccessStrategyData::buffers, elog, ERROR, Min, NBuffers, offsetof, palloc0(), and BufferAccessStrategyData::ring_size.

Referenced by blgetbitmap(), bt_check_every_level(), collect_corrupt_items(), collect_visibility_data(), do_autovacuum(), GetBulkInsertState(), initscan(), pgstat_index(), pgstathashindex(), pgstatindex_impl(), statapprox_heap(), and vacuum().

543 {
544  BufferAccessStrategy strategy;
545  int ring_size;
546 
547  /*
548  * Select ring size to use. See buffer/README for rationales.
549  *
550  * Note: if you change the ring size for BAS_BULKREAD, see also
551  * SYNC_SCAN_REPORT_INTERVAL in access/heap/syncscan.c.
552  */
553  switch (btype)
554  {
555  case BAS_NORMAL:
556  /* if someone asks for NORMAL, just give 'em a "default" object */
557  return NULL;
558 
559  case BAS_BULKREAD:
560  ring_size = 256 * 1024 / BLCKSZ;
561  break;
562  case BAS_BULKWRITE:
563  ring_size = 16 * 1024 * 1024 / BLCKSZ;
564  break;
565  case BAS_VACUUM:
566  ring_size = 256 * 1024 / BLCKSZ;
567  break;
568 
569  default:
570  elog(ERROR, "unrecognized buffer access strategy: %d",
571  (int) btype);
572  return NULL; /* keep compiler quiet */
573  }
574 
575  /* Make sure ring isn't an undue fraction of shared buffers */
576  ring_size = Min(NBuffers / 8, ring_size);
577 
578  /* Allocate the object and initialize all elements to zeroes */
579  strategy = (BufferAccessStrategy)
581  ring_size * sizeof(Buffer));
582 
583  /* Set fields that don't start out zero */
584  strategy->btype = btype;
585  strategy->ring_size = ring_size;
586 
587  return strategy;
588 }
struct BufferAccessStrategyData * BufferAccessStrategy
Definition: buf.h:44
#define Min(x, y)
Definition: c.h:927
#define ERROR
Definition: elog.h:43
BufferAccessStrategyType btype
Definition: freelist.c:74
void * palloc0(Size size)
Definition: mcxt.c:981
#define elog(elevel,...)
Definition: elog.h:214
int NBuffers
Definition: globals.c:132
int Buffer
Definition: buf.h:23
#define offsetof(type, field)
Definition: c.h:668

◆ HoldingBufferPinThatDelaysRecovery()

bool HoldingBufferPinThatDelaysRecovery ( void  )

Definition at line 3918 of file bufmgr.c.

References GetPrivateRefCount(), and GetStartupBufferPinWaitBufId().

Referenced by CheckRecoveryConflictDeadlock(), and RecoveryConflictInterrupt().

3919 {
3920  int bufid = GetStartupBufferPinWaitBufId();
3921 
3922  /*
3923  * If we get woken slowly then it's possible that the Startup process was
3924  * already woken by other backends before we got here. Also possible that
3925  * we get here by multiple interrupts or interrupts at inappropriate
3926  * times, so make sure we do nothing if the bufid is not set.
3927  */
3928  if (bufid < 0)
3929  return false;
3930 
3931  if (GetPrivateRefCount(bufid + 1) > 0)
3932  return true;
3933 
3934  return false;
3935 }
static int32 GetPrivateRefCount(Buffer buffer)
Definition: bufmgr.c:379
int GetStartupBufferPinWaitBufId(void)
Definition: proc.c:652

◆ IncrBufferRefCount()

void IncrBufferRefCount ( Buffer  buffer)

Definition at line 3549 of file bufmgr.c.

References Assert, BufferIsLocal, BufferIsPinned, CurrentResourceOwner, GetPrivateRefCountEntry(), LocalRefCount, PrivateRefCountEntry::refcount, ResourceOwnerEnlargeBuffers(), and ResourceOwnerRememberBuffer().

Referenced by _bt_steppage(), btrestrpos(), entryLoadMoreItems(), ReadBufferBI(), scanPostingTree(), startScanEntry(), and tts_buffer_heap_store_tuple().

3550 {
3551  Assert(BufferIsPinned(buffer));
3553  if (BufferIsLocal(buffer))
3554  LocalRefCount[-buffer - 1]++;
3555  else
3556  {
3557  PrivateRefCountEntry *ref;
3558 
3559  ref = GetPrivateRefCountEntry(buffer, true);
3560  Assert(ref != NULL);
3561  ref->refcount++;
3562  }
3564 }
static PrivateRefCountEntry * GetPrivateRefCountEntry(Buffer buffer, bool do_move)
Definition: bufmgr.c:299
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:440
ResourceOwner CurrentResourceOwner
Definition: resowner.c:142
void ResourceOwnerRememberBuffer(ResourceOwner owner, Buffer buffer)
Definition: resowner.c:930
void ResourceOwnerEnlargeBuffers(ResourceOwner owner)
Definition: resowner.c:917
#define Assert(condition)
Definition: c.h:745
#define BufferIsLocal(buffer)
Definition: buf.h:37
int32 * LocalRefCount
Definition: localbuf.c:45

◆ InitBufferPool()

void InitBufferPool ( void  )

Definition at line 67 of file buf_init.c.

References Assert, backend_flush_after, buf, BufferDesc::buf_id, BufferBlocks, BufferDescriptorGetContentLock, BufferDescriptorGetIOLock, CLEAR_BUFFERTAG, BufferDesc::freeNext, FREENEXT_END_OF_LIST, GetBufferDescriptor, i, LWLockInitialize(), LWTRANCHE_BUFFER_CONTENT, LWTRANCHE_BUFFER_IO, NBuffers, pg_atomic_init_u32(), ShmemInitStruct(), BufferDesc::state, StrategyInitialize(), BufferDesc::tag, BufferDesc::wait_backend_pid, and WritebackContextInit().

Referenced by CreateSharedMemoryAndSemaphores().

68 {
69  bool foundBufs,
70  foundDescs,
71  foundIOLocks,
72  foundBufCkpt;
73 
74  /* Align descriptors to a cacheline boundary. */
76  ShmemInitStruct("Buffer Descriptors",
77  NBuffers * sizeof(BufferDescPadded),
78  &foundDescs);
79 
80  BufferBlocks = (char *)
81  ShmemInitStruct("Buffer Blocks",
82  NBuffers * (Size) BLCKSZ, &foundBufs);
83 
84  /* Align lwlocks to cacheline boundary */
86  ShmemInitStruct("Buffer IO Locks",
88  &foundIOLocks);
89 
90  /*
91  * The array used to sort to-be-checkpointed buffer ids is located in
92  * shared memory, to avoid having to allocate significant amounts of
93  * memory at runtime. As that'd be in the middle of a checkpoint, or when
94  * the checkpointer is restarted, memory allocation failures would be
95  * painful.
96  */
98  ShmemInitStruct("Checkpoint BufferIds",
99  NBuffers * sizeof(CkptSortItem), &foundBufCkpt);
100 
101  if (foundDescs || foundBufs || foundIOLocks || foundBufCkpt)
102  {
103  /* should find all of these, or none of them */
104  Assert(foundDescs && foundBufs && foundIOLocks && foundBufCkpt);
105  /* note: this path is only taken in EXEC_BACKEND case */
106  }
107  else
108  {
109  int i;
110 
111  /*
112  * Initialize all the buffer headers.
113  */
114  for (i = 0; i < NBuffers; i++)
115  {
117 
118  CLEAR_BUFFERTAG(buf->tag);
119 
120  pg_atomic_init_u32(&buf->state, 0);
121  buf->wait_backend_pid = 0;
122 
123  buf->buf_id = i;
124 
125  /*
126  * Initially link all the buffers together as unused. Subsequent
127  * management of this list is done by freelist.c.
128  */
129  buf->freeNext = i + 1;
130 
133 
136  }
137 
138  /* Correct last entry of linked list */
139  GetBufferDescriptor(NBuffers - 1)->freeNext = FREENEXT_END_OF_LIST;
140  }
141 
142  /* Init other shared buffer-management stuff */
143  StrategyInitialize(!foundDescs);
144 
145  /* Initialize per-backend file flush context */
148 }
#define FREENEXT_END_OF_LIST
LWLockMinimallyPadded * BufferIOLWLockArray
Definition: buf_init.c:22
int wait_backend_pid
int backend_flush_after
Definition: bufmgr.c:150
#define BufferDescriptorGetIOLock(bdesc)
void StrategyInitialize(bool init)
Definition: freelist.c:475
void WritebackContextInit(WritebackContext *context, int *max_pending)
Definition: bufmgr.c:4458
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:392
WritebackContext BackendWritebackContext
Definition: buf_init.c:23
static char * buf
Definition: pg_test_fsync.c:68
BufferDescPadded * BufferDescriptors
Definition: buf_init.c:20
#define GetBufferDescriptor(id)
union LWLockMinimallyPadded LWLockMinimallyPadded
void LWLockInitialize(LWLock *lock, int tranche_id)
Definition: lwlock.c:745
#define BufferDescriptorGetContentLock(bdesc)
#define Assert(condition)
Definition: c.h:745
#define CLEAR_BUFFERTAG(a)
Definition: buf_internals.h:97
CkptSortItem * CkptBufferIds
Definition: buf_init.c:24
size_t Size
Definition: c.h:473
BufferTag tag
int i
int NBuffers
Definition: globals.c:132
pg_atomic_uint32 state
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:223
char * BufferBlocks
Definition: buf_init.c:21

◆ InitBufferPoolAccess()

void InitBufferPoolAccess ( void  )

Definition at line 2500 of file bufmgr.c.

References HASHCTL::entrysize, HASH_BLOBS, hash_create(), HASH_ELEM, HASHCTL::keysize, MemSet, and PrivateRefCountArray.

Referenced by BaseInit().

2501 {
2502  HASHCTL hash_ctl;
2503 
2504  memset(&PrivateRefCountArray, 0, sizeof(PrivateRefCountArray));
2505 
2506  MemSet(&hash_ctl, 0, sizeof(hash_ctl));
2507  hash_ctl.keysize = sizeof(int32);
2508  hash_ctl.entrysize = sizeof(PrivateRefCountEntry);
2509 
2510  PrivateRefCountHash = hash_create("PrivateRefCount", 100, &hash_ctl,
2511  HASH_ELEM | HASH_BLOBS);
2512 }
struct PrivateRefCountEntry PrivateRefCountEntry
#define HASH_ELEM
Definition: hsearch.h:85
Size entrysize
Definition: hsearch.h:72
#define MemSet(start, val, len)
Definition: c.h:949
signed int int32
Definition: c.h:362
static struct PrivateRefCountEntry PrivateRefCountArray[REFCOUNT_ARRAY_ENTRIES]
Definition: bufmgr.c:189
#define HASH_BLOBS
Definition: hsearch.h:86
HTAB * hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
Definition: dynahash.c:326
Size keysize
Definition: hsearch.h:71
static HTAB * PrivateRefCountHash
Definition: bufmgr.c:190

◆ InitBufferPoolBackend()

void InitBufferPoolBackend ( void  )

Definition at line 2524 of file bufmgr.c.

References AtProcExit_Buffers(), and on_shmem_exit().

Referenced by AuxiliaryProcessMain(), and InitPostgres().

2525 {
2527 }
void on_shmem_exit(pg_on_exit_callback function, Datum arg)
Definition: ipc.c:361
static void AtProcExit_Buffers(int code, Datum arg)
Definition: bufmgr.c:2534

◆ IsBufferCleanupOK()

bool IsBufferCleanupOK ( Buffer  buffer)

Definition at line 4000 of file bufmgr.c.

References Assert, BUF_STATE_GET_REFCOUNT, BufferDescriptorGetContentLock, BufferIsLocal, BufferIsValid, GetBufferDescriptor, GetPrivateRefCount(), LocalRefCount, LockBufHdr(), LW_EXCLUSIVE, LWLockHeldByMeInMode(), and UnlockBufHdr.

Referenced by _hash_doinsert(), _hash_expandtable(), _hash_splitbucket(), hash_xlog_split_allocate_page(), and hashbucketcleanup().

4001 {
4002  BufferDesc *bufHdr;
4003  uint32 buf_state;
4004 
4005  Assert(BufferIsValid(buffer));
4006 
4007  if (BufferIsLocal(buffer))
4008  {
4009  /* There should be exactly one pin */
4010  if (LocalRefCount[-buffer - 1] != 1)
4011  return false;
4012  /* Nobody else to wait for */
4013  return true;
4014  }
4015 
4016  /* There should be exactly one local pin */
4017  if (GetPrivateRefCount(buffer) != 1)
4018  return false;
4019 
4020  bufHdr = GetBufferDescriptor(buffer - 1);
4021 
4022  /* caller must hold exclusive lock on buffer */
4024  LW_EXCLUSIVE));
4025 
4026  buf_state = LockBufHdr(bufHdr);
4027 
4028  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
4029  if (BUF_STATE_GET_REFCOUNT(buf_state) == 1)
4030  {
4031  /* pincount is OK. */
4032  UnlockBufHdr(bufHdr, buf_state);
4033  return true;
4034  }
4035 
4036  UnlockBufHdr(bufHdr, buf_state);
4037  return false;
4038 }
bool LWLockHeldByMeInMode(LWLock *l, LWLockMode mode)
Definition: lwlock.c:1946
#define GetBufferDescriptor(id)
static int32 GetPrivateRefCount(Buffer buffer)
Definition: bufmgr.c:379
unsigned int uint32
Definition: c.h:374
#define BufferDescriptorGetContentLock(bdesc)
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4318
#define Assert(condition)
Definition: c.h:745
#define BufferIsLocal(buffer)
Definition: buf.h:37
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
#define UnlockBufHdr(desc, s)
int32 * LocalRefCount
Definition: localbuf.c:45
#define BUF_STATE_GET_REFCOUNT(state)
Definition: buf_internals.h:48

◆ LockBuffer()

void LockBuffer ( Buffer  buffer,
int  mode 
)

Definition at line 3750 of file bufmgr.c.

References Assert, buf, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferDescriptorGetContentLock, BufferIsLocal, BufferIsPinned, elog, ERROR, GetBufferDescriptor, LW_EXCLUSIVE, LW_SHARED, LWLockAcquire(), and LWLockRelease().

Referenced by _bt_lockbuf(), _bt_unlockbuf(), _bt_upgradelockbufcleanup(), _hash_addovflpage(), _hash_doinsert(), _hash_expandtable(), _hash_finish_split(), _hash_first(), _hash_freeovflpage(), _hash_getbuf(), _hash_getbuf_with_strategy(), _hash_getcachedmetap(), _hash_getnewbuf(), _hash_init(), _hash_kill_items(), _hash_readnext(), _hash_readpage(), _hash_readprev(), _hash_splitbucket(), _hash_squeezebucket(), _hash_vacuum_one_page(), blbulkdelete(), blgetbitmap(), blinsert(), BloomNewBuffer(), blvacuumcleanup(), brin_doinsert(), brin_doupdate(), brin_evacuate_page(), brin_getinsertbuffer(), brin_page_cleanup(), brinbuild(), brinbuildempty(), bringetbitmap(), brinGetStats(), brinGetTupleForHeapBlock(), brininsert(), brinLockRevmapPageForUpdate(), brinRevmapDesummarizeRange(), brinRevmapInitialize(), brinsummarize(), bt_metap(), bt_page_items(), bt_page_stats(), bt_recheck_sibling_links(), checkXLogConsistency(), collect_corrupt_items(), collect_visibility_data(), collectMatchBitmap(), ConditionalLockBufferForCleanup(), count_nondeletable_pages(), entryLoadMoreItems(), fill_seq_with_data(), FreeSpaceMapPrepareTruncateRel(), fsm_readbuf(), fsm_search(), fsm_search_avail(), fsm_set_and_search(), fsm_vacuum_page(), get_raw_page_internal(), GetVisibilityMapPins(), ginbuildempty(), ginbulkdelete(), ginEntryInsert(), ginFindLeafPage(), ginFindParents(), ginFinishSplit(), ginGetStats(), ginHeapTupleFastInsert(), ginInsertCleanup(), ginInsertValue(), GinNewBuffer(), ginScanToDelete(), ginStepRight(), ginTraverseLock(), ginUpdateStats(), ginvacuumcleanup(), ginVacuumPostingTreeLeaves(), gistBufferingFindCorrectParent(), gistbufferinginserttuples(), gistbuildempty(), gistdoinsert(), gistFindCorrectParent(), gistFindPath(), gistfinishsplit(), gistfixsplit(), gistformdownlink(), gistGetMaxLevel(), gistinserttuples(), gistkillitems(), gistNewBuffer(), gistProcessItup(), gistScanPage(), gistvacuum_delete_empty_pages(), gistvacuumpage(), hashbucketcleanup(), hashbulkdelete(), heap_abort_speculative(), heap_compute_xid_horizon_for_tuples(), heap_delete(), heap_fetch(), heap_finish_speculative(), heap_get_latest_tid(), heap_inplace_update(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_page_prune_opt(), heap_update(), heap_xlog_visible(), heapam_index_build_range_scan(), heapam_index_fetch_tuple(), heapam_index_validate_scan(), heapam_relation_copy_for_cluster(), heapam_scan_analyze_next_block(), heapam_scan_bitmap_next_block(), heapam_scan_sample_next_tuple(), heapam_tuple_satisfies_snapshot(), heapgetpage(), heapgettup(), initBloomState(), lazy_scan_heap(), LockBufferForCleanup(), log_newpage_range(), palloc_btree_page(), pg_visibility(), pgrowlocks(), pgstat_btree_page(), pgstat_gist_page(), pgstat_heap(), pgstatginindex_internal(), pgstathashindex(), pgstatindex_impl(), read_seq_tuple(), RelationGetBufferForTuple(), revmap_physical_extend(), scanGetCandidate(), scanPendingInsert(), shiftList(), spgdoinsert(), spgGetCache(), SpGistNewBuffer(), spgprocesspending(), spgvacuumpage(), spgWalk(), startScanEntry(), statapprox_heap(), summarize_range(), UnlockReleaseBuffer(), visibilitymap_clear(), visibilitymap_prepare_truncate(), visibilitymap_set(), vm_readbuf(), XLogReadBufferExtended(), XLogReadBufferForRedoExtended(), and XLogRecordPageWithFreeSpace().

3751 {
3752  BufferDesc *buf;
3753 
3754  Assert(BufferIsPinned(buffer));
3755  if (BufferIsLocal(buffer))
3756  return; /* local buffers need no lock */
3757 
3758  buf = GetBufferDescriptor(buffer - 1);
3759 
3760  if (mode == BUFFER_LOCK_UNLOCK)
3762  else if (mode == BUFFER_LOCK_SHARE)
3764  else if (mode == BUFFER_LOCK_EXCLUSIVE)
3766  else
3767  elog(ERROR, "unrecognized buffer lock mode: %d", mode);
3768 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:440
static PgChecksumMode mode
Definition: pg_checksums.c:61
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:98
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1812
#define ERROR
Definition: elog.h:43
static char * buf
Definition: pg_test_fsync.c:68
#define GetBufferDescriptor(id)
#define BufferDescriptorGetContentLock(bdesc)
#define Assert(condition)
Definition: c.h:745
#define BufferIsLocal(buffer)
Definition: buf.h:37
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1208
#define elog(elevel,...)
Definition: elog.h:214
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:97

◆ LockBufferForCleanup()

void LockBufferForCleanup ( Buffer  buffer)

Definition at line 3807 of file bufmgr.c.

References Assert, BM_PIN_COUNT_WAITER, BUF_STATE_GET_REFCOUNT, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferIsLocal, BufferIsPinned, elog, ERROR, get_ps_display(), GetBufferDescriptor, GetPrivateRefCount(), InHotStandby, LocalRefCount, LockBuffer(), LockBufHdr(), MyProcPid, palloc(), pfree(), PG_WAIT_BUFFER_PIN, ProcWaitForSignal(), ResolveRecoveryConflictWithBufferPin(), set_ps_display(), SetStartupBufferPinWaitBufId(), UnlockBufHdr, update_process_title, and BufferDesc::wait_backend_pid.

Referenced by _bt_upgradelockbufcleanup(), ginVacuumPostingTree(), hashbulkdelete(), heap_force_common(), lazy_scan_heap(), ReadBuffer_common(), and XLogReadBufferForRedoExtended().

3808 {
3809  BufferDesc *bufHdr;
3810  char *new_status = NULL;
3811 
3812  Assert(BufferIsPinned(buffer));
3813  Assert(PinCountWaitBuf == NULL);
3814 
3815  if (BufferIsLocal(buffer))
3816  {
3817  /* There should be exactly one pin */
3818  if (LocalRefCount[-buffer - 1] != 1)
3819  elog(ERROR, "incorrect local pin count: %d",
3820  LocalRefCount[-buffer - 1]);
3821  /* Nobody else to wait for */
3822  return;
3823  }
3824 
3825  /* There should be exactly one local pin */
3826  if (GetPrivateRefCount(buffer) != 1)
3827  elog(ERROR, "incorrect local pin count: %d",
3828  GetPrivateRefCount(buffer));
3829 
3830  bufHdr = GetBufferDescriptor(buffer - 1);
3831 
3832  for (;;)
3833  {
3834  uint32 buf_state;
3835 
3836  /* Try to acquire lock */
3838  buf_state = LockBufHdr(bufHdr);
3839 
3840  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
3841  if (BUF_STATE_GET_REFCOUNT(buf_state) == 1)
3842  {
3843  /* Successfully acquired exclusive lock with pincount 1 */
3844  UnlockBufHdr(bufHdr, buf_state);
3845 
3846  /* Report change to non-waiting status */
3847  if (new_status)
3848  {
3849  set_ps_display(new_status);
3850  pfree(new_status);
3851  }
3852  return;
3853  }
3854  /* Failed, so mark myself as waiting for pincount 1 */
3855  if (buf_state & BM_PIN_COUNT_WAITER)
3856  {
3857  UnlockBufHdr(bufHdr, buf_state);
3858  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3859  elog(ERROR, "multiple backends attempting to wait for pincount 1");
3860  }
3861  bufHdr->wait_backend_pid = MyProcPid;
3862  PinCountWaitBuf = bufHdr;
3863  buf_state |= BM_PIN_COUNT_WAITER;
3864  UnlockBufHdr(bufHdr, buf_state);
3865  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3866 
3867  /* Wait to be signaled by UnpinBuffer() */
3868  if (InHotStandby)
3869  {
3870  /* Report change to waiting status */
3871  if (update_process_title && new_status == NULL)
3872  {
3873  const char *old_status;
3874  int len;
3875 
3876  old_status = get_ps_display(&len);
3877  new_status = (char *) palloc(len + 8 + 1);
3878  memcpy(new_status, old_status, len);
3879  strcpy(new_status + len, " waiting");
3880  set_ps_display(new_status);
3881  new_status[len] = '\0'; /* truncate off " waiting" */
3882  }
3883 
3884  /* Publish the bufid that Startup process waits on */
3885  SetStartupBufferPinWaitBufId(buffer - 1);
3886  /* Set alarm and then wait to be signaled by UnpinBuffer() */
3888  /* Reset the published bufid */
3890  }
3891  else
3893 
3894  /*
3895  * Remove flag marking us as waiter. Normally this will not be set
3896  * anymore, but ProcWaitForSignal() can return for other signals as
3897  * well. We take care to only reset the flag if we're the waiter, as
3898  * theoretically another backend could have started waiting. That's
3899  * impossible with the current usages due to table level locking, but
3900  * better be safe.
3901  */
3902  buf_state = LockBufHdr(bufHdr);
3903  if ((buf_state & BM_PIN_COUNT_WAITER) != 0 &&
3904  bufHdr->wait_backend_pid == MyProcPid)
3905  buf_state &= ~BM_PIN_COUNT_WAITER;
3906  UnlockBufHdr(bufHdr, buf_state);
3907 
3908  PinCountWaitBuf = NULL;
3909  /* Loop back and try again */
3910  }
3911 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:440
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
int MyProcPid
Definition: globals.c:40
int wait_backend_pid
bool update_process_title
Definition: ps_status.c:36
#define InHotStandby
Definition: xlog.h:74
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:98
void set_ps_display(const char *activity)
Definition: ps_status.c:349
void pfree(void *pointer)
Definition: mcxt.c:1057
#define ERROR
Definition: elog.h:43
void ResolveRecoveryConflictWithBufferPin(void)
Definition: standby.c:483
const char * get_ps_display(int *displen)
Definition: ps_status.c:430
void SetStartupBufferPinWaitBufId(int bufid)
Definition: proc.c:640
#define GetBufferDescriptor(id)
static int32 GetPrivateRefCount(Buffer buffer)
Definition: bufmgr.c:379
unsigned int uint32
Definition: c.h:374
void ProcWaitForSignal(uint32 wait_event_info)
Definition: proc.c:1796
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3750
#define PG_WAIT_BUFFER_PIN
Definition: pgstat.h:786
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4318
#define Assert(condition)
Definition: c.h:745
#define BufferIsLocal(buffer)
Definition: buf.h:37
void * palloc(Size size)
Definition: mcxt.c:950
#define UnlockBufHdr(desc, s)
#define elog(elevel,...)
Definition: elog.h:214
static BufferDesc * PinCountWaitBuf
Definition: bufmgr.c:157
int32 * LocalRefCount
Definition: localbuf.c:45
#define BUF_STATE_GET_REFCOUNT(state)
Definition: buf_internals.h:48
#define BM_PIN_COUNT_WAITER
Definition: buf_internals.h:64

◆ MarkBufferDirty()

void MarkBufferDirty ( Buffer  buffer)

Definition at line 1469 of file bufmgr.c.

References Assert, BM_DIRTY, BM_JUST_DIRTIED, BM_LOCKED, BUF_STATE_GET_REFCOUNT, BufferDescriptorGetContentLock, BufferIsLocal, BufferIsPinned, BufferIsValid, elog, ERROR, GetBufferDescriptor, LW_EXCLUSIVE, LWLockHeldByMeInMode(), MarkLocalBufferDirty(), pg_atomic_compare_exchange_u32(), pg_atomic_read_u32(), pgBufferUsage, BufferUsage::shared_blks_dirtied, BufferDesc::state, VacuumCostActive, VacuumCostBalance, VacuumCostPageDirty, VacuumPageDirty, and WaitBufHdrUnlocked().

Referenced by _bt_clear_incomplete_split(), _bt_dedup_one_page(), _bt_delitems_delete(), _bt_delitems_vacuum(), _bt_getroot(), _bt_insertonpg(), _bt_mark_page_halfdead(), _bt_newroot(), _bt_restore_meta(), _bt_split(), _bt_unlink_halfdead_page(), _bt_update_meta_cleanup_info(), _hash_addovflpage(), _hash_doinsert(), _hash_expandtable(), _hash_freeovflpage(), _hash_init(), _hash_splitbucket(), _hash_squeezebucket(), _hash_vacuum_one_page(), addLeafTuple(), brin_doinsert(), brin_doupdate(), brin_initialize_empty_new_buffer(), brin_xlog_createidx(), brin_xlog_desummarize_page(), brin_xlog_insert_update(), brin_xlog_revmap_extend(), brin_xlog_samepage_update(), brin_xlog_update(), brinbuild(), brinbuildempty(), brinRevmapDesummarizeRange(), btree_xlog_dedup(), btree_xlog_delete(), btree_xlog_insert(), btree_xlog_mark_page_halfdead(), btree_xlog_newroot(), btree_xlog_split(), btree_xlog_unlink_page(), btree_xlog_vacuum(), createPostingTree(), do_setval(), doPickSplit(), fill_seq_with_data(), FreeSpaceMapPrepareTruncateRel(), generic_redo(), GenericXLogFinish(), ginbuild(), ginbuildempty(), ginbulkdelete(), ginDeletePage(), ginHeapTupleFastInsert(), ginPlaceToPage(), ginRedoClearIncompleteSplit(), ginRedoCreatePTree(), ginRedoDeleteListPages(), ginRedoDeletePage(), ginRedoInsert(), ginRedoInsertListPage(), ginRedoUpdateMetapage(), ginRedoVacuumDataLeafPage(), ginUpdateStats(), ginVacuumPostingTreeLeaf(), gistbuild(), gistbuildempty(), gistdeletepage(), gistplacetopage(), gistprunepage(), gistRedoClearFollowRight(), gistRedoDeleteRecord(), gistRedoPageDelete(), gistRedoPageSplitRecord(), gistRedoPageUpdateRecord(), gistvacuumpage(), hash_xlog_add_ovfl_page(), hash_xlog_delete(), hash_xlog_init_bitmap_page(), hash_xlog_init_meta_page(), hash_xlog_insert(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_split_cleanup(), hash_xlog_split_complete(), hash_xlog_squeeze_page(), hash_xlog_update_meta_page(), hash_xlog_vacuum_one_page(), hashbucketcleanup(), hashbulkdelete(), heap_abort_speculative(), heap_delete(), heap_finish_speculative(), heap_force_common(), heap_inplace_update(), heap_insert(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_page_prune(), heap_update(), heap_xlog_clean(), heap_xlog_confirm(), heap_xlog_delete(), heap_xlog_freeze_page(), heap_xlog_inplace(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_update(), heap_xlog_visible(), lazy_scan_heap(), lazy_vacuum_page(), log_newpage_range(), moveLeafs(), nextval_internal(), RelationGetBufferForTuple(), revmap_physical_extend(), saveNodeLink(), seq_redo(), shiftList(), spgAddNodeAction(), spgbuild(), SpGistUpdateMetaPage(), spgRedoAddLeaf(), spgRedoAddNode(), spgRedoMoveLeafs(), spgRedoPickSplit(), spgRedoSplitTuple(), spgRedoVacuumLeaf(), spgRedoVacuumRedirect(), spgRedoVacuumRoot(), spgSplitNodeAction(), vacuumLeafPage(), vacuumLeafRoot(), vacuumRedirectAndPlaceholder(), visibilitymap_clear(), visibilitymap_prepare_truncate(), visibilitymap_set(), writeListPage(), and XLogReadBufferForRedoExtended().

1470 {
1471  BufferDesc *bufHdr;
1472  uint32 buf_state;
1473  uint32 old_buf_state;
1474 
1475  if (!BufferIsValid(buffer))
1476  elog(ERROR, "bad buffer ID: %d", buffer);
1477 
1478  if (BufferIsLocal(buffer))
1479  {
1480  MarkLocalBufferDirty(buffer);
1481  return;
1482  }
1483 
1484  bufHdr = GetBufferDescriptor(buffer - 1);
1485 
1486  Assert(BufferIsPinned(buffer));
1488  LW_EXCLUSIVE));
1489 
1490  old_buf_state = pg_atomic_read_u32(&bufHdr->state);
1491  for (;;)
1492  {
1493  if (old_buf_state & BM_LOCKED)
1494  old_buf_state = WaitBufHdrUnlocked(bufHdr);
1495 
1496  buf_state = old_buf_state;
1497 
1498  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
1499  buf_state |= BM_DIRTY | BM_JUST_DIRTIED;
1500 
1501  if (pg_atomic_compare_exchange_u32(&bufHdr->state, &old_buf_state,
1502  buf_state))
1503  break;
1504  }
1505 
1506  /*
1507  * If the buffer was not dirty already, do vacuum accounting.
1508  */
1509  if (!(old_buf_state & BM_DIRTY))
1510  {
1511  VacuumPageDirty++;
1513  if (VacuumCostActive)
1515  }
1516 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:440
bool LWLockHeldByMeInMode(LWLock *l, LWLockMode mode)
Definition: lwlock.c:1946
int VacuumCostBalance
Definition: globals.c:148
static bool pg_atomic_compare_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval)
Definition: atomics.h:311
int64 VacuumPageDirty
Definition: globals.c:146
#define BM_DIRTY
Definition: buf_internals.h:58
int VacuumCostPageDirty
Definition: globals.c:140
#define ERROR
Definition: elog.h:43
#define GetBufferDescriptor(id)
#define BM_JUST_DIRTIED
Definition: buf_internals.h:63
long shared_blks_dirtied
Definition: instrument.h:23
unsigned int uint32
Definition: c.h:374
void MarkLocalBufferDirty(Buffer buffer)
Definition: localbuf.c:286
#define BM_LOCKED
Definition: buf_internals.h:57
#define BufferDescriptorGetContentLock(bdesc)
#define Assert(condition)
Definition: c.h:745
#define BufferIsLocal(buffer)
Definition: buf.h:37
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
static uint32 WaitBufHdrUnlocked(BufferDesc *buf)
Definition: bufmgr.c:4346
#define elog(elevel,...)
Definition: elog.h:214
pg_atomic_uint32 state
BufferUsage pgBufferUsage
Definition: instrument.c:20
#define BUF_STATE_GET_REFCOUNT(state)
Definition: buf_internals.h:48
bool VacuumCostActive
Definition: globals.c:149
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:241

◆ MarkBufferDirtyHint()

void MarkBufferDirtyHint ( Buffer  buffer,
bool  buffer_std 
)

Definition at line 3581 of file bufmgr.c.

References Assert, BM_DIRTY, BM_JUST_DIRTIED, BM_PERMANENT, BUF_STATE_GET_REFCOUNT, BufferDescriptorGetContentLock, BufferGetPage, BufferIsLocal, BufferIsValid, PGPROC::delayChkpt, elog, ERROR, GetBufferDescriptor, GetPrivateRefCount(), InvalidXLogRecPtr, LockBufHdr(), LWLockHeldByMe(), MarkLocalBufferDirty(), MyProc, PageSetLSN, pg_atomic_read_u32(), pgBufferUsage, RecoveryInProgress(), RelFileNodeSkippingWAL(), buftag::rnode, BufferUsage::shared_blks_dirtied, BufferDesc::state, BufferDesc::tag, UnlockBufHdr, VacuumCostActive, VacuumCostBalance, VacuumCostPageDirty, VacuumPageDirty, XLogHintBitIsNeeded, XLogRecPtrIsInvalid, and XLogSaveBufferForHint().

Referenced by _bt_check_unique(), _bt_killitems(), _hash_kill_items(), brin_start_evacuating_page(), btvacuumpage(), fsm_search_avail(), fsm_set_and_search(), fsm_vacuum_page(), gistkillitems(), heap_page_prune(), read_seq_tuple(), SetHintBits(), and XLogRecordPageWithFreeSpace().

3582 {
3583  BufferDesc *bufHdr;
3584  Page page = BufferGetPage(buffer);
3585 
3586  if (!BufferIsValid(buffer))
3587  elog(ERROR, "bad buffer ID: %d", buffer);
3588 
3589  if (BufferIsLocal(buffer))
3590  {
3591  MarkLocalBufferDirty(buffer);
3592  return;
3593  }
3594 
3595  bufHdr = GetBufferDescriptor(buffer - 1);
3596 
3597  Assert(GetPrivateRefCount(buffer) > 0);
3598  /* here, either share or exclusive lock is OK */
3600 
3601  /*
3602  * This routine might get called many times on the same page, if we are
3603  * making the first scan after commit of an xact that added/deleted many
3604  * tuples. So, be as quick as we can if the buffer is already dirty. We
3605  * do this by not acquiring spinlock if it looks like the status bits are
3606  * already set. Since we make this test unlocked, there's a chance we
3607  * might fail to notice that the flags have just been cleared, and failed
3608  * to reset them, due to memory-ordering issues. But since this function
3609  * is only intended to be used in cases where failing to write out the
3610  * data would be harmless anyway, it doesn't really matter.
3611  */
3612  if ((pg_atomic_read_u32(&bufHdr->state) & (BM_DIRTY | BM_JUST_DIRTIED)) !=
3614  {
3616  bool dirtied = false;
3617  bool delayChkpt = false;
3618  uint32 buf_state;
3619 
3620  /*
3621  * If we need to protect hint bit updates from torn writes, WAL-log a
3622  * full page image of the page. This full page image is only necessary
3623  * if the hint bit update is the first change to the page since the
3624  * last checkpoint.
3625  *
3626  * We don't check full_page_writes here because that logic is included
3627  * when we call XLogInsert() since the value changes dynamically.
3628  */
3629  if (XLogHintBitIsNeeded() &&
3630  (pg_atomic_read_u32(&bufHdr->state) & BM_PERMANENT))
3631  {
3632  /*
3633  * If we must not write WAL, due to a relfilenode-specific
3634  * condition or being in recovery, don't dirty the page. We can
3635  * set the hint, just not dirty the page as a result so the hint
3636  * is lost when we evict the page or shutdown.
3637  *
3638  * See src/backend/storage/page/README for longer discussion.
3639  */
3640  if (RecoveryInProgress() ||
3641  RelFileNodeSkippingWAL(bufHdr->tag.rnode))
3642  return;
3643 
3644  /*
3645  * If the block is already dirty because we either made a change
3646  * or set a hint already, then we don't need to write a full page
3647  * image. Note that aggressive cleaning of blocks dirtied by hint
3648  * bit setting would increase the call rate. Bulk setting of hint
3649  * bits would reduce the call rate...
3650  *
3651  * We must issue the WAL record before we mark the buffer dirty.
3652  * Otherwise we might write the page before we write the WAL. That
3653  * causes a race condition, since a checkpoint might occur between
3654  * writing the WAL record and marking the buffer dirty. We solve
3655  * that with a kluge, but one that is already in use during
3656  * transaction commit to prevent race conditions. Basically, we
3657  * simply prevent the checkpoint WAL record from being written
3658  * until we have marked the buffer dirty. We don't start the
3659  * checkpoint flush until we have marked dirty, so our checkpoint
3660  * must flush the change to disk successfully or the checkpoint
3661  * never gets written, so crash recovery will fix.
3662  *
3663  * It's possible we may enter here without an xid, so it is
3664  * essential that CreateCheckpoint waits for virtual transactions
3665  * rather than full transactionids.
3666  */
3667  MyProc->delayChkpt = delayChkpt = true;
3668  lsn = XLogSaveBufferForHint(buffer, buffer_std);
3669  }
3670 
3671  buf_state = LockBufHdr(bufHdr);
3672 
3673  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
3674 
3675  if (!(buf_state & BM_DIRTY))
3676  {
3677  dirtied = true; /* Means "will be dirtied by this action" */
3678 
3679  /*
3680  * Set the page LSN if we wrote a backup block. We aren't supposed
3681  * to set this when only holding a share lock but as long as we
3682  * serialise it somehow we're OK. We choose to set LSN while
3683  * holding the buffer header lock, which causes any reader of an
3684  * LSN who holds only a share lock to also obtain a buffer header
3685  * lock before using PageGetLSN(), which is enforced in
3686  * BufferGetLSNAtomic().
3687  *
3688  * If checksums are enabled, you might think we should reset the
3689  * checksum here. That will happen when the page is written
3690  * sometime later in this checkpoint cycle.
3691  */
3692  if (!XLogRecPtrIsInvalid(lsn))
3693  PageSetLSN(page, lsn);
3694  }
3695 
3696  buf_state |= BM_DIRTY | BM_JUST_DIRTIED;
3697  UnlockBufHdr(bufHdr, buf_state);
3698 
3699  if (delayChkpt)
3700  MyProc->delayChkpt = false;
3701 
3702  if (dirtied)
3703  {
3704  VacuumPageDirty++;
3706  if (VacuumCostActive)
3708  }
3709  }
3710 }
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
#define BM_PERMANENT
Definition: buf_internals.h:66
int VacuumCostBalance
Definition: globals.c:148
bool LWLockHeldByMe(LWLock *l)
Definition: lwlock.c:1928
PGPROC * MyProc
Definition: proc.c:67
XLogRecPtr XLogSaveBufferForHint(Buffer buffer, bool buffer_std)
Definition: xloginsert.c:917
int64 VacuumPageDirty
Definition: globals.c:146
bool RecoveryInProgress(void)
Definition: xlog.c:8074
#define BM_DIRTY
Definition: buf_internals.h:58
int VacuumCostPageDirty
Definition: globals.c:140
#define ERROR
Definition: elog.h:43
bool delayChkpt
Definition: proc.h:176
#define GetBufferDescriptor(id)
#define BM_JUST_DIRTIED
Definition: buf_internals.h:63
long shared_blks_dirtied
Definition: instrument.h:23
static int32 GetPrivateRefCount(Buffer buffer)
Definition: bufmgr.c:379
unsigned int uint32
Definition: c.h:374
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
void MarkLocalBufferDirty(Buffer buffer)
Definition: localbuf.c:286
#define XLogRecPtrIsInvalid(r)
Definition: xlogdefs.h:29
#define BufferDescriptorGetContentLock(bdesc)
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4318
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:745
#define BufferIsLocal(buffer)
Definition: buf.h:37
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
RelFileNode rnode
Definition: buf_internals.h:92
bool RelFileNodeSkippingWAL(RelFileNode rnode)
Definition: storage.c:496
BufferTag tag
#define UnlockBufHdr(desc, s)
#define elog(elevel,...)
Definition: elog.h:214
pg_atomic_uint32 state
#define PageSetLSN(page, lsn)
Definition: bufpage.h:368
#define XLogHintBitIsNeeded()
Definition: xlog.h:202
Pointer Page
Definition: bufpage.h:78
BufferUsage pgBufferUsage
Definition: instrument.c:20
#define BUF_STATE_GET_REFCOUNT(state)
Definition: buf_internals.h:48
bool VacuumCostActive
Definition: globals.c:149
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:241

◆ PrefetchBuffer()

PrefetchBufferResult PrefetchBuffer ( Relation  reln,
ForkNumber  forkNum,
BlockNumber  blockNum 
)

Definition at line 575 of file bufmgr.c.

References Assert, BlockNumberIsValid, ereport, errcode(), errmsg(), ERROR, PrefetchLocalBuffer(), PrefetchSharedBuffer(), RelationData::rd_smgr, RELATION_IS_OTHER_TEMP, RelationIsValid, RelationOpenSmgr, and RelationUsesLocalBuffers.

Referenced by BitmapPrefetch(), count_nondeletable_pages(), HeapTupleHeaderAdvanceLatestRemovedXid(), and pg_prewarm().

576 {
577  Assert(RelationIsValid(reln));
578  Assert(BlockNumberIsValid(blockNum));
579 
580  /* Open it at the smgr level if not already done */
581  RelationOpenSmgr(reln);
582 
583  if (RelationUsesLocalBuffers(reln))
584  {
585  /* see comments in ReadBufferExtended */
586  if (RELATION_IS_OTHER_TEMP(reln))
587  ereport(ERROR,
588  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
589  errmsg("cannot access temporary tables of other sessions")));
590 
591  /* pass it off to localbuf.c */
592  return PrefetchLocalBuffer(reln->rd_smgr, forkNum, blockNum);
593  }
594  else
595  {
596  /* pass it to the shared buffer version */
597  return PrefetchSharedBuffer(reln->rd_smgr, forkNum, blockNum);
598  }
599 }
struct SMgrRelationData * rd_smgr
Definition: rel.h:57
PrefetchBufferResult PrefetchLocalBuffer(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum)
Definition: localbuf.c:64
int errcode(int sqlerrcode)
Definition: elog.c:610
#define RelationOpenSmgr(relation)
Definition: rel.h:513
#define ERROR
Definition: elog.h:43
#define RelationIsValid(relation)
Definition: rel.h:429
PrefetchBufferResult PrefetchSharedBuffer(SMgrRelation smgr_reln, ForkNumber forkNum, BlockNumber blockNum)
Definition: bufmgr.c:488
#define ereport(elevel,...)
Definition: elog.h:144
#define BlockNumberIsValid(blockNumber)
Definition: block.h:70
#define Assert(condition)
Definition: c.h:745
#define RELATION_IS_OTHER_TEMP(relation)
Definition: rel.h:593
#define RelationUsesLocalBuffers(relation)
Definition: rel.h:572
int errmsg(const char *fmt,...)
Definition: elog.c:824

◆ PrefetchSharedBuffer()

PrefetchBufferResult PrefetchSharedBuffer ( struct SMgrRelationData smgr_reln,
ForkNumber  forkNum,
BlockNumber  blockNum 
)

◆ PrintBufferLeakWarning()

void PrintBufferLeakWarning ( Buffer  buffer)

Definition at line 2594 of file bufmgr.c.

References Assert, buftag::blockNum, buf, BUF_FLAG_MASK, BUF_STATE_GET_REFCOUNT, BufferIsLocal, BufferIsValid, elog, buftag::forkNum, GetBufferDescriptor, GetLocalBufferDescriptor, GetPrivateRefCount(), InvalidBackendId, LocalRefCount, MyBackendId, pfree(), pg_atomic_read_u32(), relpathbackend, buftag::rnode, BufferDesc::state, BufferDesc::tag, and WARNING.

Referenced by CheckForBufferLeaks(), CheckForLocalBufferLeaks(), and ResourceOwnerReleaseInternal().

2595 {
2596  BufferDesc *buf;
2597  int32 loccount;
2598  char *path;
2599  BackendId backend;
2600  uint32 buf_state;
2601 
2602  Assert(BufferIsValid(buffer));
2603  if (BufferIsLocal(buffer))
2604  {
2605  buf = GetLocalBufferDescriptor(-buffer - 1);
2606  loccount = LocalRefCount[-buffer - 1];
2607  backend = MyBackendId;
2608  }
2609  else
2610  {
2611  buf = GetBufferDescriptor(buffer - 1);
2612  loccount = GetPrivateRefCount(buffer);
2613  backend = InvalidBackendId;
2614  }
2615 
2616  /* theoretically we should lock the bufhdr here */
2617  path = relpathbackend(buf->tag.rnode, backend, buf->tag.forkNum);
2618  buf_state = pg_atomic_read_u32(&buf->state);
2619  elog(WARNING,
2620  "buffer refcount leak: [%03d] "
2621  "(rel=%s, blockNum=%u, flags=0x%x, refcount=%u %d)",
2622  buffer, path,
2623  buf->tag.blockNum, buf_state & BUF_FLAG_MASK,
2624  BUF_STATE_GET_REFCOUNT(buf_state), loccount);
2625  pfree(path);
2626 }
BackendId MyBackendId
Definition: globals.c:81
ForkNumber forkNum
Definition: buf_internals.h:93
#define GetLocalBufferDescriptor(id)
signed int int32
Definition: c.h:362
void pfree(void *pointer)
Definition: mcxt.c:1057
#define BUF_FLAG_MASK
Definition: buf_internals.h:45
static char * buf
Definition: pg_test_fsync.c:68
#define GetBufferDescriptor(id)
static int32 GetPrivateRefCount(Buffer buffer)
Definition: bufmgr.c:379
unsigned int uint32
Definition: c.h:374
#define WARNING
Definition: elog.h:40
#define InvalidBackendId
Definition: backendid.h:23
int BackendId
Definition: backendid.h:21
#define Assert(condition)
Definition: c.h:745
#define BufferIsLocal(buffer)
Definition: buf.h:37
BlockNumber blockNum
Definition: buf_internals.h:94
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
RelFileNode rnode
Definition: buf_internals.h:92
BufferTag tag
#define elog(elevel,...)
Definition: elog.h:214
pg_atomic_uint32 state
#define relpathbackend(rnode, backend, forknum)
Definition: relpath.h:78
int32 * LocalRefCount
Definition: localbuf.c:45
#define BUF_STATE_GET_REFCOUNT(state)
Definition: buf_internals.h:48
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:241

◆ ReadBuffer()

Buffer ReadBuffer ( Relation  reln,
BlockNumber  blockNum 
)

Definition at line 607 of file bufmgr.c.

References MAIN_FORKNUM, RBM_NORMAL, and ReadBufferExtended().

Referenced by _bt_getbuf(), _bt_search_insert(), _hash_getbuf(), _hash_getbuf_with_condlock_cleanup(), blbulkdelete(), blinsert(), BloomNewBuffer(), brin_getinsertbuffer(), brinbuild(), brinGetStats(), brinGetTupleForHeapBlock(), brinRevmapDesummarizeRange(), brinRevmapInitialize(), bt_metap(), bt_page_items(), bt_page_stats(), fill_seq_with_data(), ginFindLeafPage(), ginFindParents(), ginGetStats(), ginHeapTupleFastInsert(), ginInsertCleanup(), GinNewBuffer(), ginStepRight(), ginUpdateStats(), gistBufferingFindCorrectParent(), gistbufferinginserttuples(), gistdoinsert(), gistFindCorrectParent(), gistFindPath(), gistfixsplit(), gistGetMaxLevel(), gistkillitems(), gistNewBuffer(), gistProcessItup(), gistScanPage(), heap_abort_speculative(), heap_compute_xid_horizon_for_tuples(), heap_delete(), heap_fetch(), heap_finish_speculative(), heap_force_common(), heap_get_latest_tid(), heap_inplace_update(), heap_lock_tuple(), heap_update(), initBloomState(), pg_visibility(), pgstatginindex_internal(), read_seq_tuple(), RelationGetBufferForTuple(), ReleaseAndReadBuffer(), revmap_get_buffer(), revmap_physical_extend(), scanGetCandidate(), scanPendingInsert(), shiftList(), spgdoinsert(), spgGetCache(), SpGistGetBuffer(), SpGistNewBuffer(), SpGistUpdateMetaPage(), and spgWalk().

608 {
609  return ReadBufferExtended(reln, MAIN_FORKNUM, blockNum, RBM_NORMAL, NULL);
610 }
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:653

◆ ReadBufferExtended()

Buffer ReadBufferExtended ( Relation  reln,
ForkNumber  forkNum,
BlockNumber  blockNum,
ReadBufferMode  mode,
BufferAccessStrategy  strategy 
)

Definition at line 653 of file bufmgr.c.

References buf, ereport, errcode(), errmsg(), ERROR, pgstat_count_buffer_hit, pgstat_count_buffer_read, RelationData::rd_rel, RelationData::rd_smgr, ReadBuffer_common(), RELATION_IS_OTHER_TEMP, and RelationOpenSmgr.

Referenced by _hash_getbuf_with_strategy(), _hash_getinitbuf(), _hash_getnewbuf(), autoprewarm_database_main(), blbulkdelete(), blgetbitmap(), blvacuumcleanup(), brin_vacuum_scan(), brinbuildempty(), bt_recheck_sibling_links(), btvacuumpage(), collect_corrupt_items(), collect_visibility_data(), count_nondeletable_pages(), fsm_readbuf(), get_raw_page_internal(), ginbuildempty(), ginbulkdelete(), ginDeletePage(), ginScanToDelete(), ginvacuumcleanup(), ginVacuumPostingTree(), ginVacuumPostingTreeLeaves(), gistbuildempty(), gistvacuum_delete_empty_pages(), gistvacuumpage(), hashbulkdelete(), heapam_scan_analyze_next_block(), heapgetpage(), lazy_scan_heap(), lazy_vacuum_heap(), log_newpage_range(), palloc_btree_page(), pg_prewarm(), pgstat_btree_page(), pgstat_gist_page(), pgstat_heap(), pgstathashindex(), pgstatindex_impl(), ReadBuffer(), ReadBufferBI(), spgprocesspending(), spgvacuumpage(), statapprox_heap(), and vm_readbuf().

655 {
656  bool hit;
657  Buffer buf;
658 
659  /* Open it at the smgr level if not already done */
660  RelationOpenSmgr(reln);
661 
662  /*
663  * Reject attempts to read non-local temporary relations; we would be
664  * likely to get wrong data since we have no visibility into the owning
665  * session's local buffers.
666  */
667  if (RELATION_IS_OTHER_TEMP(reln))
668  ereport(ERROR,
669  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
670  errmsg("cannot access temporary tables of other sessions")));
671 
672  /*
673  * Read the buffer, and update pgstat counters to reflect a cache hit or
674  * miss.
675  */
677  buf = ReadBuffer_common(reln->rd_smgr, reln->rd_rel->relpersistence,
678  forkNum, blockNum, mode, strategy, &hit);
679  if (hit)
681  return buf;
682 }
static PgChecksumMode mode
Definition: pg_checksums.c:61
struct SMgrRelationData * rd_smgr
Definition: rel.h:57
int errcode(int sqlerrcode)
Definition: elog.c:610
Form_pg_class rd_rel
Definition: rel.h:109
#define RelationOpenSmgr(relation)
Definition: rel.h:513
#define ERROR
Definition: elog.h:43
#define pgstat_count_buffer_read(rel)
Definition: pgstat.h:1427
static char * buf
Definition: pg_test_fsync.c:68
#define pgstat_count_buffer_hit(rel)
Definition: pgstat.h:1432
static Buffer ReadBuffer_common(SMgrRelation reln, char relpersistence, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy, bool *hit)
Definition: bufmgr.c:716
#define ereport(elevel,...)
Definition: elog.h:144
#define RELATION_IS_OTHER_TEMP(relation)
Definition: rel.h:593
int errmsg(const char *fmt,...)
Definition: elog.c:824
int Buffer
Definition: buf.h:23

◆ ReadBufferWithoutRelcache()

Buffer ReadBufferWithoutRelcache ( RelFileNode  rnode,
ForkNumber  forkNum,
BlockNumber  blockNum,
ReadBufferMode  mode,
BufferAccessStrategy  strategy 
)

Definition at line 695 of file bufmgr.c.

References Assert, InRecovery, InvalidBackendId, ReadBuffer_common(), and smgropen().

Referenced by XLogReadBufferExtended().

698 {
699  bool hit;
700 
701  SMgrRelation smgr = smgropen(rnode, InvalidBackendId);
702 
704 
705  return ReadBuffer_common(smgr, RELPERSISTENCE_PERMANENT, forkNum, blockNum,
706  mode, strategy, &hit);
707 }
static PgChecksumMode mode
Definition: pg_checksums.c:61
bool InRecovery
Definition: xlog.c:205
SMgrRelation smgropen(RelFileNode rnode, BackendId backend)
Definition: smgr.c:146
#define InvalidBackendId
Definition: backendid.h:23
static Buffer ReadBuffer_common(SMgrRelation reln, char relpersistence, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy, bool *hit)
Definition: bufmgr.c:716
#define Assert(condition)
Definition: c.h:745

◆ RelationGetNumberOfBlocksInFork()

BlockNumber RelationGetNumberOfBlocksInFork ( Relation  relation,
ForkNumber  forkNum 
)

Definition at line 2848 of file bufmgr.c.

References Assert, RelationData::rd_rel, RelationData::rd_smgr, RelationOpenSmgr, smgrnblocks(), and table_relation_size().

Referenced by _hash_getnewbuf(), _hash_init(), autoprewarm_database_main(), get_raw_page_internal(), and pg_prewarm().

2849 {
2850  switch (relation->rd_rel->relkind)
2851  {
2852  case RELKIND_SEQUENCE:
2853  case RELKIND_INDEX:
2854  case RELKIND_PARTITIONED_INDEX:
2855  /* Open it at the smgr level if not already done */
2856  RelationOpenSmgr(relation);
2857 
2858  return smgrnblocks(relation->rd_smgr, forkNum);
2859 
2860  case RELKIND_RELATION:
2861  case RELKIND_TOASTVALUE:
2862  case RELKIND_MATVIEW:
2863  {
2864  /*
2865  * Not every table AM uses BLCKSZ wide fixed size blocks.
2866  * Therefore tableam returns the size in bytes - but for the
2867  * purpose of this routine, we want the number of blocks.
2868  * Therefore divide, rounding up.
2869  */
2870  uint64 szbytes;
2871 
2872  szbytes = table_relation_size(relation, forkNum);
2873 
2874  return (szbytes + (BLCKSZ - 1)) / BLCKSZ;
2875  }
2876  case RELKIND_VIEW:
2877  case RELKIND_COMPOSITE_TYPE:
2878  case RELKIND_FOREIGN_TABLE:
2879  case RELKIND_PARTITIONED_TABLE:
2880  default:
2881  Assert(false);
2882  break;
2883  }
2884 
2885  return 0; /* keep compiler quiet */
2886 }
struct SMgrRelationData * rd_smgr
Definition: rel.h:57
Form_pg_class rd_rel
Definition: rel.h:109
#define RelationOpenSmgr(relation)
Definition: rel.h:513
static uint64 table_relation_size(Relation rel, ForkNumber forkNumber)
Definition: tableam.h:1645
BlockNumber smgrnblocks(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:549
#define Assert(condition)
Definition: c.h:745

◆ ReleaseAndReadBuffer()

Buffer ReleaseAndReadBuffer ( Buffer  buffer,
Relation  relation,
BlockNumber  blockNum 
)

Definition at line 1532 of file bufmgr.c.

References Assert, buftag::blockNum, PrivateRefCountEntry::buffer, BufferIsLocal, BufferIsPinned, BufferIsValid, CurrentResourceOwner, buftag::forkNum, GetBufferDescriptor, GetLocalBufferDescriptor, LocalRefCount, MAIN_FORKNUM, RelationData::rd_node, ReadBuffer(), RelFileNodeEquals, ResourceOwnerForgetBuffer(), buftag::rnode, BufferDesc::tag, and UnpinBuffer().

Referenced by _bt_relandgetbuf(), ginFindLeafPage(), heapam_index_fetch_tuple(), and heapam_scan_bitmap_next_block().

1535 {
1536  ForkNumber forkNum = MAIN_FORKNUM;
1537  BufferDesc *bufHdr;
1538 
1539  if (BufferIsValid(buffer))
1540  {
1541  Assert(BufferIsPinned(buffer));
1542  if (BufferIsLocal(buffer))
1543  {
1544  bufHdr = GetLocalBufferDescriptor(-buffer - 1);
1545  if (bufHdr->tag.blockNum == blockNum &&
1546  RelFileNodeEquals(bufHdr->tag.rnode, relation->rd_node) &&
1547  bufHdr->tag.forkNum == forkNum)
1548  return buffer;
1550  LocalRefCount[-buffer - 1]--;
1551  }
1552  else
1553  {
1554  bufHdr = GetBufferDescriptor(buffer - 1);
1555  /* we have pin, so it's ok to examine tag without spinlock */
1556  if (bufHdr->tag.blockNum == blockNum &&
1557  RelFileNodeEquals(bufHdr->tag.rnode, relation->rd_node) &&
1558  bufHdr->tag.forkNum == forkNum)
1559  return buffer;
1560  UnpinBuffer(bufHdr, true);
1561  }
1562  }
1563 
1564  return ReadBuffer(relation, blockNum);
1565 }
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:440
ForkNumber forkNum
Definition: buf_internals.h:93
ResourceOwner CurrentResourceOwner
Definition: resowner.c:142
#define GetLocalBufferDescriptor(id)
#define GetBufferDescriptor(id)
ForkNumber
Definition: relpath.h:40
static void UnpinBuffer(BufferDesc *buf, bool fixOwner)
Definition: bufmgr.c:1738
RelFileNode rd_node
Definition: rel.h:55
#define Assert(condition)
Definition: c.h:745
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:607
#define BufferIsLocal(buffer)
Definition: buf.h:37
BlockNumber blockNum
Definition: buf_internals.h:94
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
RelFileNode rnode
Definition: buf_internals.h:92
BufferTag tag
int32 * LocalRefCount
Definition: localbuf.c:45
#define RelFileNodeEquals(node1, node2)
Definition: relfilenode.h:88
void ResourceOwnerForgetBuffer(ResourceOwner owner, Buffer buffer)
Definition: resowner.c:939

◆ ReleaseBuffer()

void ReleaseBuffer ( Buffer  buffer)

Definition at line 3511 of file bufmgr.c.

References Assert, BufferIsLocal, BufferIsValid, CurrentResourceOwner, elog, ERROR, GetBufferDescriptor, LocalRefCount, ResourceOwnerForgetBuffer(), and UnpinBuffer().

Referenced by _bt_drop_lock_and_maybe_pin(), _bt_getbuf(), _bt_pagedel(), _bt_relbuf(), _bt_search_insert(), _bt_unlink_halfdead_page(), _hash_dropbuf(), _hash_getbuf_with_condlock_cleanup(), autoprewarm_database_main(), blinsert(), BloomNewBuffer(), brin_getinsertbuffer(), brin_vacuum_scan(), bringetbitmap(), brinGetTupleForHeapBlock(), brininsert(), brinRevmapTerminate(), brinsummarize(), collect_corrupt_items(), collect_visibility_data(), entryLoadMoreItems(), ExecEndBitmapHeapScan(), ExecEndIndexOnlyScan(), ExecReScanBitmapHeapScan(), FreeBulkInsertState(), freeGinBtreeStack(), fsm_vacuum_page(), get_actual_variable_endpoint(), get_raw_page_internal(), GetRecordedFreeSpace(), ginDeletePage(), ginFindParents(), ginFinishSplit(), ginFreeScanKeys(), ginInsertCleanup(), GinNewBuffer(), ginScanToDelete(), gistdoinsert(), gistFindCorrectParent(), gistNewBuffer(), gistvacuum_delete_empty_pages(), heap_abort_speculative(), heap_compute_xid_horizon_for_tuples(), heap_delete(), heap_endscan(), heap_fetch(), heap_force_common(), heap_insert(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_rescan(), heap_update(), heap_xlog_delete(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_update(), heap_xlog_visible(), heapam_index_fetch_reset(), heapam_scan_sample_next_block(), heapam_tuple_lock(), heapgetpage(), heapgettup(), heapgettup_pagemode(), lazy_scan_heap(), lazy_vacuum_heap(), pg_prewarm(), pg_visibility(), pg_visibility_map(), pg_visibility_map_summary(), pgstatindex_impl(), ReadBufferBI(), RelationGetBufferForTuple(), ReleaseBulkInsertStatePin(), ResourceOwnerReleaseInternal(), revmap_get_buffer(), revmap_physical_extend(), spgdoinsert(), SpGistGetBuffer(), SpGistNewBuffer(), SpGistUpdateMetaPage(), statapprox_heap(), summarize_range(), terminate_brin_buildstate(), tts_buffer_heap_clear(), tts_buffer_heap_materialize(), tts_buffer_heap_store_tuple(), UnlockReleaseBuffer(), visibilitymap_count(), visibilitymap_get_status(), visibilitymap_pin(), and XLogReadBufferExtended().

3512 {
3513  if (!BufferIsValid(buffer))
3514  elog(ERROR, "bad buffer ID: %d", buffer);
3515 
3516  if (BufferIsLocal(buffer))
3517  {
3519 
3520  Assert(LocalRefCount[-buffer - 1] > 0);
3521  LocalRefCount[-buffer - 1]--;
3522  return;
3523  }
3524 
3525  UnpinBuffer(GetBufferDescriptor(buffer - 1), true);
3526 }
ResourceOwner CurrentResourceOwner
Definition: resowner.c:142
#define ERROR
Definition: elog.h:43
#define GetBufferDescriptor(id)
static void UnpinBuffer(BufferDesc *buf, bool fixOwner)
Definition: bufmgr.c:1738
#define Assert(condition)
Definition: c.h:745
#define BufferIsLocal(buffer)
Definition: buf.h:37
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
#define elog(elevel,...)
Definition: elog.h:214
int32 * LocalRefCount
Definition: localbuf.c:45
void ResourceOwnerForgetBuffer(ResourceOwner owner, Buffer buffer)
Definition: resowner.c:939

◆ TestForOldSnapshot()

static void TestForOldSnapshot ( Snapshot  snapshot,
Relation  relation,
Page  page 
)
inlinestatic

Definition at line 277 of file bufmgr.h.

References Assert, old_snapshot_threshold, PageGetLSN, SNAPSHOT_MVCC, SNAPSHOT_TOAST, TestForOldSnapshot_impl(), and XLogRecPtrIsInvalid.

Referenced by _bt_get_endpoint(), _bt_moveright(), _bt_readnextpage(), _bt_walk_left(), _hash_first(), _hash_next(), _hash_readnext(), _hash_readprev(), blgetbitmap(), brinGetTupleForHeapBlock(), brinRevmapInitialize(), collectMatchBitmap(), collectMatchesForHeapRow(), ginFindLeafPage(), gistScanPage(), heap_fetch(), heap_get_latest_tid(), heapgetpage(), heapgettup(), heapgettup_pagemode(), scanGetCandidate(), scanPendingInsert(), and spgWalk().

278 {
279  Assert(relation != NULL);
280 
281  if (old_snapshot_threshold >= 0
282  && (snapshot) != NULL
283  && ((snapshot)->snapshot_type == SNAPSHOT_MVCC
284  || (snapshot)->snapshot_type == SNAPSHOT_TOAST)
285  && !XLogRecPtrIsInvalid((snapshot)->lsn)
286  && PageGetLSN(page) > (snapshot)->lsn)
287  TestForOldSnapshot_impl(snapshot, relation);
288 }
#define XLogRecPtrIsInvalid(r)
Definition: xlogdefs.h:29
#define Assert(condition)
Definition: c.h:745
#define PageGetLSN(page)
Definition: bufpage.h:366
int old_snapshot_threshold
Definition: snapmgr.c:78
void TestForOldSnapshot_impl(Snapshot snapshot, Relation relation)
Definition: bufmgr.c:4578

◆ TestForOldSnapshot_impl()

void TestForOldSnapshot_impl ( Snapshot  snapshot,
Relation  relation 
)

Definition at line 4578 of file bufmgr.c.

References ereport, errcode(), errmsg(), ERROR, GetOldSnapshotThresholdTimestamp(), and RelationAllowsEarlyPruning.

Referenced by TestForOldSnapshot().

4579 {
4580  if (RelationAllowsEarlyPruning(relation)
4581  && (snapshot)->whenTaken < GetOldSnapshotThresholdTimestamp())
4582  ereport(ERROR,
4583  (errcode(ERRCODE_SNAPSHOT_TOO_OLD),
4584  errmsg("snapshot too old")));
4585 }
TimestampTz GetOldSnapshotThresholdTimestamp(void)
Definition: snapmgr.c:1660
#define RelationAllowsEarlyPruning(rel)
Definition: snapmgr.h:38
int errcode(int sqlerrcode)
Definition: elog.c:610
#define ERROR
Definition: elog.h:43
#define ereport(elevel,...)
Definition: elog.h:144
int errmsg(const char *fmt,...)
Definition: elog.c:824

◆ UnlockBuffers()

void UnlockBuffers ( void  )

Definition at line 3722 of file bufmgr.c.

References BM_PIN_COUNT_WAITER, buf, LockBufHdr(), MyProcPid, PinCountWaitBuf, UnlockBufHdr, and BufferDesc::wait_backend_pid.

Referenced by AbortSubTransaction(), AbortTransaction(), AtProcExit_Buffers(), AutoVacLauncherMain(), BackgroundWriterMain(), CheckpointerMain(), and WalWriterMain().

3723 {
3725 
3726  if (buf)
3727  {
3728  uint32 buf_state;
3729 
3730  buf_state = LockBufHdr(buf);
3731 
3732  /*
3733  * Don't complain if flag bit not set; it could have been reset but we
3734  * got a cancel/die interrupt before getting the signal.
3735  */
3736  if ((buf_state & BM_PIN_COUNT_WAITER) != 0 &&
3737  buf->wait_backend_pid == MyProcPid)
3738  buf_state &= ~BM_PIN_COUNT_WAITER;
3739 
3740  UnlockBufHdr(buf, buf_state);
3741 
3742  PinCountWaitBuf = NULL;
3743  }
3744 }
int MyProcPid
Definition: globals.c:40
int wait_backend_pid
static char * buf
Definition: pg_test_fsync.c:68
unsigned int uint32
Definition: c.h:374
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4318
#define UnlockBufHdr(desc, s)
static BufferDesc * PinCountWaitBuf
Definition: bufmgr.c:157
#define BM_PIN_COUNT_WAITER
Definition: buf_internals.h:64

◆ UnlockReleaseBuffer()

void UnlockReleaseBuffer ( Buffer  buffer)

Definition at line 3534 of file bufmgr.c.

References BUFFER_LOCK_UNLOCK, LockBuffer(), and ReleaseBuffer().

Referenced by _bt_clear_incomplete_split(), _bt_restore_meta(), _hash_relbuf(), allocNewBuffer(), AlterSequence(), blbulkdelete(), blgetbitmap(), blinsert(), BloomInitMetapage(), blvacuumcleanup(), brin_doinsert(), brin_doupdate(), brin_evacuate_page(), brin_getinsertbuffer(), brin_xlog_createidx(), brin_xlog_desummarize_page(), brin_xlog_insert_update(), brin_xlog_revmap_extend(), brin_xlog_samepage_update(), brin_xlog_update(), brinbuild(), brinbuildempty(), brinGetStats(), brinRevmapDesummarizeRange(), bt_metap(), bt_page_items(), bt_page_stats(), bt_recheck_sibling_links(), btree_xlog_dedup(), btree_xlog_delete(), btree_xlog_insert(), btree_xlog_mark_page_halfdead(), btree_xlog_newroot(), btree_xlog_split(), btree_xlog_unlink_page(), btree_xlog_vacuum(), checkXLogConsistency(), collect_corrupt_items(), collect_visibility_data(), count_nondeletable_pages(), createPostingTree(), do_setval(), doPickSplit(), entryLoadMoreItems(), fill_seq_with_data(), flushCachedPage(), FreeSpaceMapPrepareTruncateRel(), fsm_search(), fsm_set_and_search(), generic_redo(), ginbuild(), ginbuildempty(), ginbulkdelete(), ginFindParents(), ginGetStats(), ginHeapTupleFastInsert(), ginInsertCleanup(), ginPlaceToPage(), ginRedoClearIncompleteSplit(), ginRedoCreatePTree(), ginRedoDeleteListPages(), ginRedoDeletePage(), ginRedoInsert(), ginRedoInsertListPage(), ginRedoSplit(), ginRedoUpdateMetapage(), ginRedoVacuumDataLeafPage(), ginRedoVacuumPage(), ginScanToDelete(), ginStepRight(), ginUpdateStats(), ginvacuumcleanup(), ginVacuumPostingTree(), ginVacuumPostingTreeLeaves(), gistbufferinginserttuples(), gistbuild(), gistbuildempty(), gistdoinsert(), gistFindCorrectParent(), gistFindPath(), gistGetMaxLevel(), gistinserttuples(), gistkillitems(), gistplacetopage(), gistProcessItup(), gistRedoClearFollowRight(), gistRedoDeleteRecord(), gistRedoPageDelete(), gistRedoPageSplitRecord(), gistRedoPageUpdateRecord(), gistScanPage(), gistvacuum_delete_empty_pages(), gistvacuumpage(), hash_xlog_add_ovfl_page(), hash_xlog_delete(), hash_xlog_init_bitmap_page(), hash_xlog_init_meta_page(), hash_xlog_insert(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_split_cleanup(), hash_xlog_split_complete(), hash_xlog_split_page(), hash_xlog_squeeze_page(), hash_xlog_update_meta_page(), hash_xlog_vacuum_one_page(), heap_delete(), heap_finish_speculative(), heap_force_common(), heap_get_latest_tid(), heap_inplace_update(), heap_insert(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_update(), heap_xlog_clean(), heap_xlog_confirm(), heap_xlog_delete(), heap_xlog_freeze_page(), heap_xlog_inplace(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_update(), heap_xlog_visible(), heapam_scan_analyze_next_tuple(), initBloomState(), lazy_scan_heap(), lazy_vacuum_heap(), log_newpage_range(), moveLeafs(), nextval_internal(), palloc_btree_page(), pg_sequence_last_value(), pg_visibility(), pgstat_gist_page(), pgstat_heap(), pgstatginindex_internal(), pgstathashindex(), RelationAddExtraBlocks(), RelationGetBufferForTuple(), ResetSequence(), revmap_physical_extend(), scanGetCandidate(), scanPendingInsert(), scanPostingTree(), seq_redo(), shiftList(), spgAddNodeAction(), spgbuild(), spgdoinsert(), spgGetCache(), SpGistGetBuffer(), SpGistUpdateMetaPage(), spgMatchNodeAction(), spgprocesspending(), spgRedoAddLeaf(), spgRedoAddNode(), spgRedoMoveLeafs(), spgRedoPickSplit(), spgRedoSplitTuple(), spgRedoVacuumLeaf(), spgRedoVacuumRedirect(), spgRedoVacuumRoot(), spgSplitNodeAction(), spgvacuumpage(), spgWalk(), statapprox_heap(), visibilitymap_prepare_truncate(), writeListPage(), xlog_redo(), and XLogRecordPageWithFreeSpace().

3535 {
3536  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3537  ReleaseBuffer(buffer);
3538 }
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3511
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3750

Variable Documentation

◆ backend_flush_after

int backend_flush_after

Definition at line 150 of file bufmgr.c.

Referenced by InitBufferPool().

◆ bgwriter_flush_after

int bgwriter_flush_after

Definition at line 149 of file bufmgr.c.

Referenced by BackgroundWriterMain().

◆ bgwriter_lru_maxpages

int bgwriter_lru_maxpages

Definition at line 125 of file bufmgr.c.

Referenced by BgBufferSync().

◆ bgwriter_lru_multiplier

double bgwriter_lru_multiplier

Definition at line 126 of file bufmgr.c.

Referenced by BgBufferSync().

◆ BufferBlocks

PGDLLIMPORT char* BufferBlocks

Definition at line 21 of file buf_init.c.

Referenced by InitBufferPool().

◆ checkpoint_flush_after

int checkpoint_flush_after

Definition at line 148 of file bufmgr.c.

Referenced by BufferSync().

◆ effective_io_concurrency

int effective_io_concurrency

Definition at line 135 of file bufmgr.c.

Referenced by get_tablespace_io_concurrency(), and tablespace_reloptions().

◆ LocalBufferBlockPointers

PGDLLIMPORT Block* LocalBufferBlockPointers

Definition at line 44 of file localbuf.c.

Referenced by InitLocalBuffers().

◆ LocalRefCount

◆ maintenance_io_concurrency

int maintenance_io_concurrency

◆ NBuffers

◆ NLocBuffer

◆ track_io_timing

bool track_io_timing

Definition at line 127 of file bufmgr.c.

Referenced by FlushBuffer(), ReadBuffer_common(), and show_buffer_usage().

◆ zero_damaged_pages

bool zero_damaged_pages

Definition at line 124 of file bufmgr.c.

Referenced by mdread(), and ReadBuffer_common().