PostgreSQL Source Code  git master
bufmgr.h File Reference
#include "port/pg_iovec.h"
#include "storage/block.h"
#include "storage/buf.h"
#include "storage/bufpage.h"
#include "storage/relfilelocator.h"
#include "utils/relcache.h"
#include "utils/snapmgr.h"
Include dependency graph for bufmgr.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Data Structures

struct  PrefetchBufferResult
 
struct  BufferManagerRelation
 
struct  ReadBuffersOperation
 

Macros

#define BMR_REL(p_rel)   ((BufferManagerRelation){.rel = p_rel})
 
#define BMR_SMGR(p_smgr, p_relpersistence)   ((BufferManagerRelation){.smgr = p_smgr, .relpersistence = p_relpersistence})
 
#define READ_BUFFERS_ZERO_ON_ERROR   (1 << 0)
 
#define READ_BUFFERS_ISSUE_ADVICE   (1 << 1)
 
#define DEFAULT_EFFECTIVE_IO_CONCURRENCY   0
 
#define DEFAULT_MAINTENANCE_IO_CONCURRENCY   0
 
#define MAX_IO_COMBINE_LIMIT   PG_IOV_MAX
 
#define DEFAULT_IO_COMBINE_LIMIT   Min(MAX_IO_COMBINE_LIMIT, (128 * 1024) / BLCKSZ)
 
#define MAX_IO_CONCURRENCY   1000
 
#define P_NEW   InvalidBlockNumber /* grow the file to get a new page */
 
#define BUFFER_LOCK_UNLOCK   0
 
#define BUFFER_LOCK_SHARE   1
 
#define BUFFER_LOCK_EXCLUSIVE   2
 
#define RelationGetNumberOfBlocks(reln)    RelationGetNumberOfBlocksInFork(reln, MAIN_FORKNUM)
 

Typedefs

typedef void * Block
 
typedef enum BufferAccessStrategyType BufferAccessStrategyType
 
typedef struct PrefetchBufferResult PrefetchBufferResult
 
typedef enum ExtendBufferedFlags ExtendBufferedFlags
 
typedef struct BufferManagerRelation BufferManagerRelation
 
typedef struct ReadBuffersOperation ReadBuffersOperation
 

Enumerations

enum  BufferAccessStrategyType { BAS_NORMAL , BAS_BULKREAD , BAS_BULKWRITE , BAS_VACUUM }
 
enum  ReadBufferMode {
  RBM_NORMAL , RBM_ZERO_AND_LOCK , RBM_ZERO_AND_CLEANUP_LOCK , RBM_ZERO_ON_ERROR ,
  RBM_NORMAL_NO_LOG
}
 
enum  ExtendBufferedFlags {
  EB_SKIP_EXTENSION_LOCK = (1 << 0) , EB_PERFORMING_RECOVERY = (1 << 1) , EB_CREATE_FORK_IF_NEEDED = (1 << 2) , EB_LOCK_FIRST = (1 << 3) ,
  EB_CLEAR_SIZE_CACHE = (1 << 4) , EB_LOCK_TARGET = (1 << 5)
}
 

Functions

PrefetchBufferResult PrefetchSharedBuffer (struct SMgrRelationData *smgr_reln, ForkNumber forkNum, BlockNumber blockNum)
 
PrefetchBufferResult PrefetchBuffer (Relation reln, ForkNumber forkNum, BlockNumber blockNum)
 
bool ReadRecentBuffer (RelFileLocator rlocator, ForkNumber forkNum, BlockNumber blockNum, Buffer recent_buffer)
 
Buffer ReadBuffer (Relation reln, BlockNumber blockNum)
 
Buffer ReadBufferExtended (Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
 
Buffer ReadBufferWithoutRelcache (RelFileLocator rlocator, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy, bool permanent)
 
bool StartReadBuffer (ReadBuffersOperation *operation, Buffer *buffer, BlockNumber blocknum, int flags)
 
bool StartReadBuffers (ReadBuffersOperation *operation, Buffer *buffers, BlockNumber blockNum, int *nblocks, int flags)
 
void WaitReadBuffers (ReadBuffersOperation *operation)
 
void ReleaseBuffer (Buffer buffer)
 
void UnlockReleaseBuffer (Buffer buffer)
 
bool BufferIsExclusiveLocked (Buffer buffer)
 
bool BufferIsDirty (Buffer buffer)
 
void MarkBufferDirty (Buffer buffer)
 
void IncrBufferRefCount (Buffer buffer)
 
void CheckBufferIsPinnedOnce (Buffer buffer)
 
Buffer ReleaseAndReadBuffer (Buffer buffer, Relation relation, BlockNumber blockNum)
 
Buffer ExtendBufferedRel (BufferManagerRelation bmr, ForkNumber forkNum, BufferAccessStrategy strategy, uint32 flags)
 
BlockNumber ExtendBufferedRelBy (BufferManagerRelation bmr, ForkNumber fork, BufferAccessStrategy strategy, uint32 flags, uint32 extend_by, Buffer *buffers, uint32 *extended_by)
 
Buffer ExtendBufferedRelTo (BufferManagerRelation bmr, ForkNumber fork, BufferAccessStrategy strategy, uint32 flags, BlockNumber extend_to, ReadBufferMode mode)
 
void InitBufferManagerAccess (void)
 
void AtEOXact_Buffers (bool isCommit)
 
char * DebugPrintBufferRefcount (Buffer buffer)
 
void CheckPointBuffers (int flags)
 
BlockNumber BufferGetBlockNumber (Buffer buffer)
 
BlockNumber RelationGetNumberOfBlocksInFork (Relation relation, ForkNumber forkNum)
 
void FlushOneBuffer (Buffer buffer)
 
void FlushRelationBuffers (Relation rel)
 
void FlushRelationsAllBuffers (struct SMgrRelationData **smgrs, int nrels)
 
void CreateAndCopyRelationData (RelFileLocator src_rlocator, RelFileLocator dst_rlocator, bool permanent)
 
void FlushDatabaseBuffers (Oid dbid)
 
void DropRelationBuffers (struct SMgrRelationData *smgr_reln, ForkNumber *forkNum, int nforks, BlockNumber *firstDelBlock)
 
void DropRelationsAllBuffers (struct SMgrRelationData **smgr_reln, int nlocators)
 
void DropDatabaseBuffers (Oid dbid)
 
bool BufferIsPermanent (Buffer buffer)
 
XLogRecPtr BufferGetLSNAtomic (Buffer buffer)
 
void BufferGetTag (Buffer buffer, RelFileLocator *rlocator, ForkNumber *forknum, BlockNumber *blknum)
 
void MarkBufferDirtyHint (Buffer buffer, bool buffer_std)
 
void UnlockBuffers (void)
 
void LockBuffer (Buffer buffer, int mode)
 
bool ConditionalLockBuffer (Buffer buffer)
 
void LockBufferForCleanup (Buffer buffer)
 
bool ConditionalLockBufferForCleanup (Buffer buffer)
 
bool IsBufferCleanupOK (Buffer buffer)
 
bool HoldingBufferPinThatDelaysRecovery (void)
 
bool BgBufferSync (struct WritebackContext *wb_context)
 
void LimitAdditionalPins (uint32 *additional_pins)
 
void LimitAdditionalLocalPins (uint32 *additional_pins)
 
bool EvictUnpinnedBuffer (Buffer buf)
 
void BufferManagerShmemInit (void)
 
Size BufferManagerShmemSize (void)
 
void AtProcExit_LocalBuffers (void)
 
BufferAccessStrategy GetAccessStrategy (BufferAccessStrategyType btype)
 
BufferAccessStrategy GetAccessStrategyWithSize (BufferAccessStrategyType btype, int ring_size_kb)
 
int GetAccessStrategyBufferCount (BufferAccessStrategy strategy)
 
int GetAccessStrategyPinLimit (BufferAccessStrategy strategy)
 
void FreeAccessStrategy (BufferAccessStrategy strategy)
 
static bool BufferIsValid (Buffer bufnum)
 
static Block BufferGetBlock (Buffer buffer)
 
static Size BufferGetPageSize (Buffer buffer)
 
static Page BufferGetPage (Buffer buffer)
 

Variables

PGDLLIMPORT int NBuffers
 
PGDLLIMPORT bool zero_damaged_pages
 
PGDLLIMPORT int bgwriter_lru_maxpages
 
PGDLLIMPORT double bgwriter_lru_multiplier
 
PGDLLIMPORT bool track_io_timing
 
PGDLLIMPORT int effective_io_concurrency
 
PGDLLIMPORT int maintenance_io_concurrency
 
PGDLLIMPORT int io_combine_limit
 
PGDLLIMPORT int checkpoint_flush_after
 
PGDLLIMPORT int backend_flush_after
 
PGDLLIMPORT int bgwriter_flush_after
 
PGDLLIMPORT char * BufferBlocks
 
PGDLLIMPORT int NLocBuffer
 
PGDLLIMPORT BlockLocalBufferBlockPointers
 
PGDLLIMPORT int32LocalRefCount
 

Macro Definition Documentation

◆ BMR_REL

#define BMR_REL (   p_rel)    ((BufferManagerRelation){.rel = p_rel})

Definition at line 107 of file bufmgr.h.

◆ BMR_SMGR

#define BMR_SMGR (   p_smgr,
  p_relpersistence 
)    ((BufferManagerRelation){.smgr = p_smgr, .relpersistence = p_relpersistence})

Definition at line 108 of file bufmgr.h.

◆ BUFFER_LOCK_EXCLUSIVE

#define BUFFER_LOCK_EXCLUSIVE   2

Definition at line 191 of file bufmgr.h.

◆ BUFFER_LOCK_SHARE

#define BUFFER_LOCK_SHARE   1

Definition at line 190 of file bufmgr.h.

◆ BUFFER_LOCK_UNLOCK

#define BUFFER_LOCK_UNLOCK   0

Definition at line 189 of file bufmgr.h.

◆ DEFAULT_EFFECTIVE_IO_CONCURRENCY

#define DEFAULT_EFFECTIVE_IO_CONCURRENCY   0

Definition at line 158 of file bufmgr.h.

◆ DEFAULT_IO_COMBINE_LIMIT

#define DEFAULT_IO_COMBINE_LIMIT   Min(MAX_IO_COMBINE_LIMIT, (128 * 1024) / BLCKSZ)

Definition at line 165 of file bufmgr.h.

◆ DEFAULT_MAINTENANCE_IO_CONCURRENCY

#define DEFAULT_MAINTENANCE_IO_CONCURRENCY   0

Definition at line 159 of file bufmgr.h.

◆ MAX_IO_COMBINE_LIMIT

#define MAX_IO_COMBINE_LIMIT   PG_IOV_MAX

Definition at line 164 of file bufmgr.h.

◆ MAX_IO_CONCURRENCY

#define MAX_IO_CONCURRENCY   1000

Definition at line 181 of file bufmgr.h.

◆ P_NEW

#define P_NEW   InvalidBlockNumber /* grow the file to get a new page */

Definition at line 184 of file bufmgr.h.

◆ READ_BUFFERS_ISSUE_ADVICE

#define READ_BUFFERS_ISSUE_ADVICE   (1 << 1)

Definition at line 113 of file bufmgr.h.

◆ READ_BUFFERS_ZERO_ON_ERROR

#define READ_BUFFERS_ZERO_ON_ERROR   (1 << 0)

Definition at line 111 of file bufmgr.h.

◆ RelationGetNumberOfBlocks

#define RelationGetNumberOfBlocks (   reln)     RelationGetNumberOfBlocksInFork(reln, MAIN_FORKNUM)

Definition at line 273 of file bufmgr.h.

Typedef Documentation

◆ Block

typedef void* Block

Definition at line 25 of file bufmgr.h.

◆ BufferAccessStrategyType

◆ BufferManagerRelation

◆ ExtendBufferedFlags

◆ PrefetchBufferResult

◆ ReadBuffersOperation

Definition at line 25 of file bufmgr.h.

Enumeration Type Documentation

◆ BufferAccessStrategyType

Enumerator
BAS_NORMAL 
BAS_BULKREAD 
BAS_BULKWRITE 
BAS_VACUUM 

Definition at line 33 of file bufmgr.h.

34 {
35  BAS_NORMAL, /* Normal random access */
36  BAS_BULKREAD, /* Large read-only scan (hint bit updates are
37  * ok) */
38  BAS_BULKWRITE, /* Large multi-block write (e.g. COPY IN) */
39  BAS_VACUUM, /* VACUUM */
BufferAccessStrategyType
Definition: bufmgr.h:34
@ BAS_BULKREAD
Definition: bufmgr.h:36
@ BAS_NORMAL
Definition: bufmgr.h:35
@ BAS_VACUUM
Definition: bufmgr.h:39
@ BAS_BULKWRITE
Definition: bufmgr.h:38

◆ ExtendBufferedFlags

Enumerator
EB_SKIP_EXTENSION_LOCK 
EB_PERFORMING_RECOVERY 
EB_CREATE_FORK_IF_NEEDED 
EB_LOCK_FIRST 
EB_CLEAR_SIZE_CACHE 
EB_LOCK_TARGET 

Definition at line 67 of file bufmgr.h.

68 {
69  /*
70  * Don't acquire extension lock. This is safe only if the relation isn't
71  * shared, an access exclusive lock is held or if this is the startup
72  * process.
73  */
74  EB_SKIP_EXTENSION_LOCK = (1 << 0),
75 
76  /* Is this extension part of recovery? */
77  EB_PERFORMING_RECOVERY = (1 << 1),
78 
79  /*
80  * Should the fork be created if it does not currently exist? This likely
81  * only ever makes sense for relation forks.
82  */
83  EB_CREATE_FORK_IF_NEEDED = (1 << 2),
84 
85  /* Should the first (possibly only) return buffer be returned locked? */
86  EB_LOCK_FIRST = (1 << 3),
87 
88  /* Should the smgr size cache be cleared? */
89  EB_CLEAR_SIZE_CACHE = (1 << 4),
90 
91  /* internal flags follow */
92  EB_LOCK_TARGET = (1 << 5),
ExtendBufferedFlags
Definition: bufmgr.h:68
@ EB_LOCK_TARGET
Definition: bufmgr.h:92
@ EB_CLEAR_SIZE_CACHE
Definition: bufmgr.h:89
@ EB_PERFORMING_RECOVERY
Definition: bufmgr.h:77
@ EB_CREATE_FORK_IF_NEEDED
Definition: bufmgr.h:83
@ EB_SKIP_EXTENSION_LOCK
Definition: bufmgr.h:74
@ EB_LOCK_FIRST
Definition: bufmgr.h:86

◆ ReadBufferMode

Enumerator
RBM_NORMAL 
RBM_ZERO_AND_LOCK 
RBM_ZERO_AND_CLEANUP_LOCK 
RBM_ZERO_ON_ERROR 
RBM_NORMAL_NO_LOG 

Definition at line 43 of file bufmgr.h.

44 {
45  RBM_NORMAL, /* Normal read */
46  RBM_ZERO_AND_LOCK, /* Don't read from disk, caller will
47  * initialize. Also locks the page. */
48  RBM_ZERO_AND_CLEANUP_LOCK, /* Like RBM_ZERO_AND_LOCK, but locks the page
49  * in "cleanup" mode */
50  RBM_ZERO_ON_ERROR, /* Read, but return an all-zeros page on error */
51  RBM_NORMAL_NO_LOG, /* Don't log page as invalid during WAL
52  * replay; otherwise same as RBM_NORMAL */
ReadBufferMode
Definition: bufmgr.h:44
@ RBM_ZERO_ON_ERROR
Definition: bufmgr.h:50
@ RBM_ZERO_AND_CLEANUP_LOCK
Definition: bufmgr.h:48
@ RBM_ZERO_AND_LOCK
Definition: bufmgr.h:46
@ RBM_NORMAL
Definition: bufmgr.h:45
@ RBM_NORMAL_NO_LOG
Definition: bufmgr.h:51

Function Documentation

◆ AtEOXact_Buffers()

void AtEOXact_Buffers ( bool  isCommit)

Definition at line 3541 of file bufmgr.c.

3542 {
3544 
3545  AtEOXact_LocalBuffers(isCommit);
3546 
3548 }
static void CheckForBufferLeaks(void)
Definition: bufmgr.c:3601
static int32 PrivateRefCountOverflowed
Definition: bufmgr.c:210
#define Assert(condition)
Definition: c.h:858
void AtEOXact_LocalBuffers(bool isCommit)
Definition: localbuf.c:819

References Assert, AtEOXact_LocalBuffers(), CheckForBufferLeaks(), and PrivateRefCountOverflowed.

Referenced by AbortTransaction(), BackgroundWriterMain(), CheckpointerMain(), CommitTransaction(), PrepareTransaction(), and WalWriterMain().

◆ AtProcExit_LocalBuffers()

void AtProcExit_LocalBuffers ( void  )

Definition at line 830 of file localbuf.c.

831 {
832  /*
833  * We shouldn't be holding any remaining pins; if we are, and assertions
834  * aren't enabled, we'll fail later in DropRelationBuffers while trying to
835  * drop the temp rels.
836  */
838 }
static void CheckForLocalBufferLeaks(void)
Definition: localbuf.c:786

References CheckForLocalBufferLeaks().

Referenced by AtProcExit_Buffers().

◆ BgBufferSync()

bool BgBufferSync ( struct WritebackContext wb_context)

Definition at line 3170 of file bufmgr.c.

3171 {
3172  /* info obtained from freelist.c */
3173  int strategy_buf_id;
3174  uint32 strategy_passes;
3175  uint32 recent_alloc;
3176 
3177  /*
3178  * Information saved between calls so we can determine the strategy
3179  * point's advance rate and avoid scanning already-cleaned buffers.
3180  */
3181  static bool saved_info_valid = false;
3182  static int prev_strategy_buf_id;
3183  static uint32 prev_strategy_passes;
3184  static int next_to_clean;
3185  static uint32 next_passes;
3186 
3187  /* Moving averages of allocation rate and clean-buffer density */
3188  static float smoothed_alloc = 0;
3189  static float smoothed_density = 10.0;
3190 
3191  /* Potentially these could be tunables, but for now, not */
3192  float smoothing_samples = 16;
3193  float scan_whole_pool_milliseconds = 120000.0;
3194 
3195  /* Used to compute how far we scan ahead */
3196  long strategy_delta;
3197  int bufs_to_lap;
3198  int bufs_ahead;
3199  float scans_per_alloc;
3200  int reusable_buffers_est;
3201  int upcoming_alloc_est;
3202  int min_scan_buffers;
3203 
3204  /* Variables for the scanning loop proper */
3205  int num_to_scan;
3206  int num_written;
3207  int reusable_buffers;
3208 
3209  /* Variables for final smoothed_density update */
3210  long new_strategy_delta;
3211  uint32 new_recent_alloc;
3212 
3213  /*
3214  * Find out where the freelist clock sweep currently is, and how many
3215  * buffer allocations have happened since our last call.
3216  */
3217  strategy_buf_id = StrategySyncStart(&strategy_passes, &recent_alloc);
3218 
3219  /* Report buffer alloc counts to pgstat */
3220  PendingBgWriterStats.buf_alloc += recent_alloc;
3221 
3222  /*
3223  * If we're not running the LRU scan, just stop after doing the stats
3224  * stuff. We mark the saved state invalid so that we can recover sanely
3225  * if LRU scan is turned back on later.
3226  */
3227  if (bgwriter_lru_maxpages <= 0)
3228  {
3229  saved_info_valid = false;
3230  return true;
3231  }
3232 
3233  /*
3234  * Compute strategy_delta = how many buffers have been scanned by the
3235  * clock sweep since last time. If first time through, assume none. Then
3236  * see if we are still ahead of the clock sweep, and if so, how many
3237  * buffers we could scan before we'd catch up with it and "lap" it. Note:
3238  * weird-looking coding of xxx_passes comparisons are to avoid bogus
3239  * behavior when the passes counts wrap around.
3240  */
3241  if (saved_info_valid)
3242  {
3243  int32 passes_delta = strategy_passes - prev_strategy_passes;
3244 
3245  strategy_delta = strategy_buf_id - prev_strategy_buf_id;
3246  strategy_delta += (long) passes_delta * NBuffers;
3247 
3248  Assert(strategy_delta >= 0);
3249 
3250  if ((int32) (next_passes - strategy_passes) > 0)
3251  {
3252  /* we're one pass ahead of the strategy point */
3253  bufs_to_lap = strategy_buf_id - next_to_clean;
3254 #ifdef BGW_DEBUG
3255  elog(DEBUG2, "bgwriter ahead: bgw %u-%u strategy %u-%u delta=%ld lap=%d",
3256  next_passes, next_to_clean,
3257  strategy_passes, strategy_buf_id,
3258  strategy_delta, bufs_to_lap);
3259 #endif
3260  }
3261  else if (next_passes == strategy_passes &&
3262  next_to_clean >= strategy_buf_id)
3263  {
3264  /* on same pass, but ahead or at least not behind */
3265  bufs_to_lap = NBuffers - (next_to_clean - strategy_buf_id);
3266 #ifdef BGW_DEBUG
3267  elog(DEBUG2, "bgwriter ahead: bgw %u-%u strategy %u-%u delta=%ld lap=%d",
3268  next_passes, next_to_clean,
3269  strategy_passes, strategy_buf_id,
3270  strategy_delta, bufs_to_lap);
3271 #endif
3272  }
3273  else
3274  {
3275  /*
3276  * We're behind, so skip forward to the strategy point and start
3277  * cleaning from there.
3278  */
3279 #ifdef BGW_DEBUG
3280  elog(DEBUG2, "bgwriter behind: bgw %u-%u strategy %u-%u delta=%ld",
3281  next_passes, next_to_clean,
3282  strategy_passes, strategy_buf_id,
3283  strategy_delta);
3284 #endif
3285  next_to_clean = strategy_buf_id;
3286  next_passes = strategy_passes;
3287  bufs_to_lap = NBuffers;
3288  }
3289  }
3290  else
3291  {
3292  /*
3293  * Initializing at startup or after LRU scanning had been off. Always
3294  * start at the strategy point.
3295  */
3296 #ifdef BGW_DEBUG
3297  elog(DEBUG2, "bgwriter initializing: strategy %u-%u",
3298  strategy_passes, strategy_buf_id);
3299 #endif
3300  strategy_delta = 0;
3301  next_to_clean = strategy_buf_id;
3302  next_passes = strategy_passes;
3303  bufs_to_lap = NBuffers;
3304  }
3305 
3306  /* Update saved info for next time */
3307  prev_strategy_buf_id = strategy_buf_id;
3308  prev_strategy_passes = strategy_passes;
3309  saved_info_valid = true;
3310 
3311  /*
3312  * Compute how many buffers had to be scanned for each new allocation, ie,
3313  * 1/density of reusable buffers, and track a moving average of that.
3314  *
3315  * If the strategy point didn't move, we don't update the density estimate
3316  */
3317  if (strategy_delta > 0 && recent_alloc > 0)
3318  {
3319  scans_per_alloc = (float) strategy_delta / (float) recent_alloc;
3320  smoothed_density += (scans_per_alloc - smoothed_density) /
3321  smoothing_samples;
3322  }
3323 
3324  /*
3325  * Estimate how many reusable buffers there are between the current
3326  * strategy point and where we've scanned ahead to, based on the smoothed
3327  * density estimate.
3328  */
3329  bufs_ahead = NBuffers - bufs_to_lap;
3330  reusable_buffers_est = (float) bufs_ahead / smoothed_density;
3331 
3332  /*
3333  * Track a moving average of recent buffer allocations. Here, rather than
3334  * a true average we want a fast-attack, slow-decline behavior: we
3335  * immediately follow any increase.
3336  */
3337  if (smoothed_alloc <= (float) recent_alloc)
3338  smoothed_alloc = recent_alloc;
3339  else
3340  smoothed_alloc += ((float) recent_alloc - smoothed_alloc) /
3341  smoothing_samples;
3342 
3343  /* Scale the estimate by a GUC to allow more aggressive tuning. */
3344  upcoming_alloc_est = (int) (smoothed_alloc * bgwriter_lru_multiplier);
3345 
3346  /*
3347  * If recent_alloc remains at zero for many cycles, smoothed_alloc will
3348  * eventually underflow to zero, and the underflows produce annoying
3349  * kernel warnings on some platforms. Once upcoming_alloc_est has gone to
3350  * zero, there's no point in tracking smaller and smaller values of
3351  * smoothed_alloc, so just reset it to exactly zero to avoid this
3352  * syndrome. It will pop back up as soon as recent_alloc increases.
3353  */
3354  if (upcoming_alloc_est == 0)
3355  smoothed_alloc = 0;
3356 
3357  /*
3358  * Even in cases where there's been little or no buffer allocation
3359  * activity, we want to make a small amount of progress through the buffer
3360  * cache so that as many reusable buffers as possible are clean after an
3361  * idle period.
3362  *
3363  * (scan_whole_pool_milliseconds / BgWriterDelay) computes how many times
3364  * the BGW will be called during the scan_whole_pool time; slice the
3365  * buffer pool into that many sections.
3366  */
3367  min_scan_buffers = (int) (NBuffers / (scan_whole_pool_milliseconds / BgWriterDelay));
3368 
3369  if (upcoming_alloc_est < (min_scan_buffers + reusable_buffers_est))
3370  {
3371 #ifdef BGW_DEBUG
3372  elog(DEBUG2, "bgwriter: alloc_est=%d too small, using min=%d + reusable_est=%d",
3373  upcoming_alloc_est, min_scan_buffers, reusable_buffers_est);
3374 #endif
3375  upcoming_alloc_est = min_scan_buffers + reusable_buffers_est;
3376  }
3377 
3378  /*
3379  * Now write out dirty reusable buffers, working forward from the
3380  * next_to_clean point, until we have lapped the strategy scan, or cleaned
3381  * enough buffers to match our estimate of the next cycle's allocation
3382  * requirements, or hit the bgwriter_lru_maxpages limit.
3383  */
3384 
3385  num_to_scan = bufs_to_lap;
3386  num_written = 0;
3387  reusable_buffers = reusable_buffers_est;
3388 
3389  /* Execute the LRU scan */
3390  while (num_to_scan > 0 && reusable_buffers < upcoming_alloc_est)
3391  {
3392  int sync_state = SyncOneBuffer(next_to_clean, true,
3393  wb_context);
3394 
3395  if (++next_to_clean >= NBuffers)
3396  {
3397  next_to_clean = 0;
3398  next_passes++;
3399  }
3400  num_to_scan--;
3401 
3402  if (sync_state & BUF_WRITTEN)
3403  {
3404  reusable_buffers++;
3405  if (++num_written >= bgwriter_lru_maxpages)
3406  {
3408  break;
3409  }
3410  }
3411  else if (sync_state & BUF_REUSABLE)
3412  reusable_buffers++;
3413  }
3414 
3415  PendingBgWriterStats.buf_written_clean += num_written;
3416 
3417 #ifdef BGW_DEBUG
3418  elog(DEBUG1, "bgwriter: recent_alloc=%u smoothed=%.2f delta=%ld ahead=%d density=%.2f reusable_est=%d upcoming_est=%d scanned=%d wrote=%d reusable=%d",
3419  recent_alloc, smoothed_alloc, strategy_delta, bufs_ahead,
3420  smoothed_density, reusable_buffers_est, upcoming_alloc_est,
3421  bufs_to_lap - num_to_scan,
3422  num_written,
3423  reusable_buffers - reusable_buffers_est);
3424 #endif
3425 
3426  /*
3427  * Consider the above scan as being like a new allocation scan.
3428  * Characterize its density and update the smoothed one based on it. This
3429  * effectively halves the moving average period in cases where both the
3430  * strategy and the background writer are doing some useful scanning,
3431  * which is helpful because a long memory isn't as desirable on the
3432  * density estimates.
3433  */
3434  new_strategy_delta = bufs_to_lap - num_to_scan;
3435  new_recent_alloc = reusable_buffers - reusable_buffers_est;
3436  if (new_strategy_delta > 0 && new_recent_alloc > 0)
3437  {
3438  scans_per_alloc = (float) new_strategy_delta / (float) new_recent_alloc;
3439  smoothed_density += (scans_per_alloc - smoothed_density) /
3440  smoothing_samples;
3441 
3442 #ifdef BGW_DEBUG
3443  elog(DEBUG2, "bgwriter: cleaner density alloc=%u scan=%ld density=%.2f new smoothed=%.2f",
3444  new_recent_alloc, new_strategy_delta,
3445  scans_per_alloc, smoothed_density);
3446 #endif
3447  }
3448 
3449  /* Return true if OK to hibernate */
3450  return (bufs_to_lap == 0 && recent_alloc == 0);
3451 }
int BgWriterDelay
Definition: bgwriter.c:57
#define BUF_REUSABLE
Definition: bufmgr.c:77
double bgwriter_lru_multiplier
Definition: bufmgr.c:142
static int SyncOneBuffer(int buf_id, bool skip_recently_used, WritebackContext *wb_context)
Definition: bufmgr.c:3468
int bgwriter_lru_maxpages
Definition: bufmgr.c:141
#define BUF_WRITTEN
Definition: bufmgr.c:76
unsigned int uint32
Definition: c.h:506
signed int int32
Definition: c.h:494
#define DEBUG2
Definition: elog.h:29
#define DEBUG1
Definition: elog.h:30
#define elog(elevel,...)
Definition: elog.h:225
int StrategySyncStart(uint32 *complete_passes, uint32 *num_buf_alloc)
Definition: freelist.c:394
int NBuffers
Definition: globals.c:141
PgStat_BgWriterStats PendingBgWriterStats
PgStat_Counter buf_written_clean
Definition: pgstat.h:287
PgStat_Counter maxwritten_clean
Definition: pgstat.h:288
PgStat_Counter buf_alloc
Definition: pgstat.h:289

References Assert, bgwriter_lru_maxpages, bgwriter_lru_multiplier, BgWriterDelay, PgStat_BgWriterStats::buf_alloc, BUF_REUSABLE, BUF_WRITTEN, PgStat_BgWriterStats::buf_written_clean, DEBUG1, DEBUG2, elog, PgStat_BgWriterStats::maxwritten_clean, NBuffers, PendingBgWriterStats, StrategySyncStart(), and SyncOneBuffer().

Referenced by BackgroundWriterMain().

◆ BufferGetBlock()

static Block BufferGetBlock ( Buffer  buffer)
inlinestatic

Definition at line 367 of file bufmgr.h.

368 {
369  Assert(BufferIsValid(buffer));
370 
371  if (BufferIsLocal(buffer))
372  return LocalBufferBlockPointers[-buffer - 1];
373  else
374  return (Block) (BufferBlocks + ((Size) (buffer - 1)) * BLCKSZ);
375 }
#define BufferIsLocal(buffer)
Definition: buf.h:37
PGDLLIMPORT Block * LocalBufferBlockPointers
Definition: localbuf.c:45
void * Block
Definition: bufmgr.h:25
PGDLLIMPORT char * BufferBlocks
Definition: buf_init.c:22
static bool BufferIsValid(Buffer bufnum)
Definition: bufmgr.h:351
size_t Size
Definition: c.h:605

References Assert, BufferBlocks, BufferIsLocal, BufferIsValid(), and LocalBufferBlockPointers.

Referenced by BufferGetPage(), WaitReadBuffers(), and XLogSaveBufferForHint().

◆ BufferGetBlockNumber()

BlockNumber BufferGetBlockNumber ( Buffer  buffer)

Definition at line 3706 of file bufmgr.c.

3707 {
3708  BufferDesc *bufHdr;
3709 
3710  Assert(BufferIsPinned(buffer));
3711 
3712  if (BufferIsLocal(buffer))
3713  bufHdr = GetLocalBufferDescriptor(-buffer - 1);
3714  else
3715  bufHdr = GetBufferDescriptor(buffer - 1);
3716 
3717  /* pinned, so OK to read tag without spinlock */
3718  return bufHdr->tag.blockNum;
3719 }
static BufferDesc * GetLocalBufferDescriptor(uint32 id)
static BufferDesc * GetBufferDescriptor(uint32 id)
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:474
BufferTag tag
BlockNumber blockNum
Definition: buf_internals.h:98

References Assert, buftag::blockNum, PrivateRefCountEntry::buffer, BufferIsLocal, BufferIsPinned, GetBufferDescriptor(), GetLocalBufferDescriptor(), and BufferDesc::tag.

Referenced by _bt_binsrch_insert(), _bt_bottomupdel_pass(), _bt_check_unique(), _bt_checkpage(), _bt_delitems_delete(), _bt_delitems_vacuum(), _bt_doinsert(), _bt_endpoint(), _bt_finish_split(), _bt_first(), _bt_getroot(), _bt_insert_parent(), _bt_insertonpg(), _bt_mark_page_halfdead(), _bt_moveright(), _bt_newlevel(), _bt_pagedel(), _bt_readnextpage(), _bt_readpage(), _bt_restore_meta(), _bt_search(), _bt_simpledel_pass(), _bt_split(), _bt_unlink_halfdead_page(), _bt_walk_left(), _hash_addovflpage(), _hash_checkpage(), _hash_doinsert(), _hash_first(), _hash_freeovflpage(), _hash_getnewbuf(), _hash_readnext(), _hash_readpage(), _hash_splitbucket(), allocNewBuffer(), blinsert(), BloomInitMetapage(), brin_doinsert(), brin_doupdate(), brin_getinsertbuffer(), brin_initialize_empty_new_buffer(), brin_page_cleanup(), brin_xlog_insert_update(), brinbuild(), brinGetTupleForHeapBlock(), collect_corrupt_items(), collectMatchBitmap(), createPostingTree(), dataBeginPlaceToPageLeaf(), dataPrepareDownlink(), doPickSplit(), entryPrepareDownlink(), fill_seq_fork_with_data(), ginEntryInsert(), ginFindParents(), ginFinishSplit(), ginPlaceToPage(), ginRedoDeleteListPages(), ginRedoUpdateMetapage(), ginScanToDelete(), gistbufferinginserttuples(), gistbuild(), gistcheckpage(), gistdeletepage(), gistformdownlink(), gistinserttuples(), gistMemorizeAllDownlinks(), gistplacetopage(), gistRelocateBuildBuffersOnSplit(), gistScanPage(), hash_xlog_add_ovfl_page(), heap_delete(), heap_fetch_next_buffer(), heap_hot_search_buffer(), heap_insert(), heap_multi_insert(), heap_page_is_all_visible(), heap_page_prune_and_freeze(), heap_prepare_pagescan(), heap_update(), heap_xlog_confirm(), heap_xlog_lock(), heapam_scan_analyze_next_block(), heapgettup(), heapgettup_pagemode(), index_compute_xid_horizon_for_tuples(), lazy_scan_noprune(), lazy_scan_prune(), makeSublist(), moveLeafs(), moveRightIfItNeeded(), pgstathashindex(), ReadBufferBI(), RelationAddBlocks(), RelationCopyStorageUsingBuffer(), RelationGetBufferForTuple(), RelationPutHeapTuple(), revmap_get_buffer(), revmap_physical_extend(), ScanSourceDatabasePgClassPage(), spgAddNodeAction(), spgbuild(), spgdoinsert(), SpGistSetLastUsedPage(), spgSplitNodeAction(), spgWalk(), startScanEntry(), terminate_brin_buildstate(), vacuumLeafPage(), visibilitymap_clear(), visibilitymap_get_status(), visibilitymap_pin(), visibilitymap_pin_ok(), visibilitymap_set(), and WaitReadBuffers().

◆ BufferGetLSNAtomic()

XLogRecPtr BufferGetLSNAtomic ( Buffer  buffer)

Definition at line 3967 of file bufmgr.c.

3968 {
3969  BufferDesc *bufHdr = GetBufferDescriptor(buffer - 1);
3970  char *page = BufferGetPage(buffer);
3971  XLogRecPtr lsn;
3972  uint32 buf_state;
3973 
3974  /*
3975  * If we don't need locking for correctness, fastpath out.
3976  */
3977  if (!XLogHintBitIsNeeded() || BufferIsLocal(buffer))
3978  return PageGetLSN(page);
3979 
3980  /* Make sure we've got a real buffer, and that we hold a pin on it. */
3981  Assert(BufferIsValid(buffer));
3982  Assert(BufferIsPinned(buffer));
3983 
3984  buf_state = LockBufHdr(bufHdr);
3985  lsn = PageGetLSN(page);
3986  UnlockBufHdr(bufHdr, buf_state);
3987 
3988  return lsn;
3989 }
static void UnlockBufHdr(BufferDesc *desc, uint32 buf_state)
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:5743
static Page BufferGetPage(Buffer buffer)
Definition: bufmgr.h:400
static XLogRecPtr PageGetLSN(const char *page)
Definition: bufpage.h:386
#define XLogHintBitIsNeeded()
Definition: xlog.h:120
uint64 XLogRecPtr
Definition: xlogdefs.h:21

References Assert, PrivateRefCountEntry::buffer, BufferGetPage(), BufferIsLocal, BufferIsPinned, BufferIsValid(), GetBufferDescriptor(), LockBufHdr(), PageGetLSN(), UnlockBufHdr(), and XLogHintBitIsNeeded.

Referenced by _bt_killitems(), _bt_readpage(), gistdoinsert(), gistFindPath(), gistkillitems(), gistScanPage(), SetHintBits(), and XLogSaveBufferForHint().

◆ BufferGetPage()

static Page BufferGetPage ( Buffer  buffer)
inlinestatic

Definition at line 400 of file bufmgr.h.

401 {
402  return (Page) BufferGetBlock(buffer);
403 }
static Block BufferGetBlock(Buffer buffer)
Definition: bufmgr.h:367
Pointer Page
Definition: bufpage.h:81

References BufferGetBlock().

Referenced by _bt_allocbuf(), _bt_binsrch(), _bt_binsrch_insert(), _bt_bottomupdel_pass(), _bt_check_unique(), _bt_checkpage(), _bt_clear_incomplete_split(), _bt_conditionallockbuf(), _bt_dedup_pass(), _bt_delete_or_dedup_one_page(), _bt_delitems_delete(), _bt_delitems_delete_check(), _bt_delitems_vacuum(), _bt_endpoint(), _bt_findinsertloc(), _bt_finish_split(), _bt_get_endpoint(), _bt_getmeta(), _bt_getroot(), _bt_getstackbuf(), _bt_gettrueroot(), _bt_insert_parent(), _bt_insertonpg(), _bt_killitems(), _bt_leftsib_splitflag(), _bt_lock_subtree_parent(), _bt_lockbuf(), _bt_mark_page_halfdead(), _bt_moveright(), _bt_newlevel(), _bt_pagedel(), _bt_readnextpage(), _bt_readpage(), _bt_restore_meta(), _bt_rightsib_halfdeadflag(), _bt_search(), _bt_search_insert(), _bt_set_cleanup_info(), _bt_simpledel_pass(), _bt_split(), _bt_stepright(), _bt_unlink_halfdead_page(), _bt_unlockbuf(), _bt_upgradelockbufcleanup(), _bt_vacuum_needs_cleanup(), _bt_walk_left(), _hash_addovflpage(), _hash_checkpage(), _hash_doinsert(), _hash_expandtable(), _hash_finish_split(), _hash_first(), _hash_freeovflpage(), _hash_get_newblock_from_oldbucket(), _hash_get_oldblock_from_newbucket(), _hash_getbucketbuf_from_hashkey(), _hash_getcachedmetap(), _hash_getinitbuf(), _hash_getnewbuf(), _hash_init(), _hash_init_metabuffer(), _hash_initbitmapbuffer(), _hash_initbuf(), _hash_kill_items(), _hash_pgaddmultitup(), _hash_pgaddtup(), _hash_readnext(), _hash_readpage(), _hash_readprev(), _hash_splitbucket(), _hash_squeezebucket(), _hash_vacuum_one_page(), allocNewBuffer(), blgetbitmap(), blinsert(), BloomNewBuffer(), blvacuumcleanup(), brin_can_do_samepage_update(), brin_doinsert(), brin_doupdate(), brin_evacuate_page(), brin_getinsertbuffer(), brin_initialize_empty_new_buffer(), brin_page_cleanup(), brin_start_evacuating_page(), brin_xlog_createidx(), brin_xlog_desummarize_page(), brin_xlog_insert_update(), brin_xlog_revmap_extend(), brin_xlog_samepage_update(), brin_xlog_update(), brinbuild(), brinbuildempty(), brinGetStats(), brinGetTupleForHeapBlock(), brininsert(), brinRevmapDesummarizeRange(), brinRevmapInitialize(), brinSetHeapBlockItemptr(), bt_metap(), bt_page_items_internal(), bt_recheck_sibling_links(), bt_rootdescend(), btree_xlog_dedup(), btree_xlog_delete(), btree_xlog_insert(), btree_xlog_mark_page_halfdead(), btree_xlog_newroot(), btree_xlog_split(), btree_xlog_unlink_page(), btree_xlog_vacuum(), btvacuumpage(), BufferGetLSNAtomic(), collect_corrupt_items(), collect_visibility_data(), collectMatchBitmap(), collectMatchesForHeapRow(), count_nondeletable_pages(), createPostingTree(), dataBeginPlaceToPage(), dataBeginPlaceToPageInternal(), dataBeginPlaceToPageLeaf(), dataExecPlaceToPage(), dataExecPlaceToPageInternal(), dataLocateItem(), dataPlaceToPageLeafRecompress(), dataPrepareDownlink(), dataSplitPageInternal(), do_setval(), doPickSplit(), entryExecPlaceToPage(), entryIsEnoughSpace(), entryLoadMoreItems(), entryLocateEntry(), entryLocateLeafEntry(), entryPrepareDownlink(), entrySplitPage(), fill_seq_fork_with_data(), FreeSpaceMapPrepareTruncateRel(), fsm_readbuf(), fsm_search(), fsm_search_avail(), fsm_set_and_search(), fsm_vacuum_page(), generic_redo(), GenericXLogFinish(), GenericXLogRegisterBuffer(), get_raw_page_internal(), GetBTPageStatistics(), GetRecordedFreeSpace(), GetVisibilityMapPins(), ginbulkdelete(), ginDeletePage(), ginEntryInsert(), ginFindLeafPage(), ginFindParents(), ginFinishOldSplit(), ginFinishSplit(), ginGetStats(), ginHeapTupleFastInsert(), GinInitBuffer(), GinInitMetabuffer(), ginInsertCleanup(), ginInsertValue(), GinNewBuffer(), ginPlaceToPage(), ginRedoClearIncompleteSplit(), ginRedoCreatePTree(), ginRedoDeleteListPages(), ginRedoDeletePage(), ginRedoInsert(), ginRedoInsertData(), ginRedoInsertEntry(), ginRedoInsertListPage(), ginRedoUpdateMetapage(), ginRedoVacuumDataLeafPage(), ginScanToDelete(), ginStepRight(), ginTraverseLock(), ginUpdateStats(), ginvacuumcleanup(), ginVacuumEntryPage(), ginVacuumPostingTreeLeaf(), ginVacuumPostingTreeLeaves(), gistBufferingFindCorrectParent(), gistbufferinginserttuples(), gistbuild(), gistcheckpage(), gistdeletepage(), gistdoinsert(), gistFindCorrectParent(), gistFindPath(), gistfixsplit(), gistformdownlink(), gistGetMaxLevel(), GISTInitBuffer(), gistkillitems(), gistMemorizeAllDownlinks(), gistNewBuffer(), gistplacetopage(), gistProcessItup(), gistRedoClearFollowRight(), gistRedoDeleteRecord(), gistRedoPageDelete(), gistRedoPageSplitRecord(), gistRedoPageUpdateRecord(), gistScanPage(), gistvacuum_delete_empty_pages(), gistvacuumpage(), hash_bitmap_info(), hash_xlog_add_ovfl_page(), hash_xlog_delete(), hash_xlog_init_bitmap_page(), hash_xlog_init_meta_page(), hash_xlog_insert(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_split_cleanup(), hash_xlog_split_complete(), hash_xlog_squeeze_page(), hash_xlog_update_meta_page(), hash_xlog_vacuum_one_page(), hashbucketcleanup(), hashbulkdelete(), heap_abort_speculative(), heap_delete(), heap_fetch(), heap_finish_speculative(), heap_force_common(), heap_freeze_prepared_tuples(), heap_get_latest_tid(), heap_hot_search_buffer(), heap_index_delete_tuples(), heap_inplace_update(), heap_insert(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_page_is_all_visible(), heap_page_prune_and_freeze(), heap_page_prune_execute(), heap_page_prune_opt(), heap_pre_freeze_checks(), heap_prepare_pagescan(), heap_update(), heap_xlog_confirm(), heap_xlog_delete(), heap_xlog_inplace(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_prune_freeze(), heap_xlog_update(), heap_xlog_visible(), heapam_index_build_range_scan(), heapam_index_validate_scan(), heapam_scan_analyze_next_tuple(), heapam_scan_bitmap_next_block(), heapam_scan_bitmap_next_tuple(), heapam_scan_sample_next_tuple(), heapgettup_continue_page(), heapgettup_pagemode(), heapgettup_start_page(), index_compute_xid_horizon_for_tuples(), initBloomState(), lazy_scan_heap(), lazy_vacuum_heap_page(), lazy_vacuum_heap_rel(), log_heap_prune_and_freeze(), log_heap_update(), log_newpage_buffer(), log_newpage_range(), log_split_page(), MarkBufferDirtyHint(), moveLeafs(), moveRightIfItNeeded(), nextval_internal(), palloc_btree_page(), pg_visibility(), pgstat_btree_page(), pgstat_gist_page(), pgstat_hash_page(), pgstat_heap(), pgstatginindex_internal(), pgstathashindex(), pgstatindex_impl(), read_seq_tuple(), RelationAddBlocks(), RelationCopyStorageUsingBuffer(), RelationGetBufferForTuple(), RelationPutHeapTuple(), revmap_physical_extend(), scanGetCandidate(), scanPendingInsert(), scanPostingTree(), ScanSourceDatabasePgClass(), seq_redo(), shiftList(), spgAddNodeAction(), spgbuild(), spgdoinsert(), spgGetCache(), SpGistGetBuffer(), SpGistInitBuffer(), SpGistNewBuffer(), SpGistSetLastUsedPage(), SpGistUpdateMetaPage(), spgprocesspending(), spgRedoAddLeaf(), spgRedoAddNode(), spgRedoMoveLeafs(), spgRedoPickSplit(), spgRedoSplitTuple(), spgRedoVacuumLeaf(), spgRedoVacuumRedirect(), spgRedoVacuumRoot(), spgSplitNodeAction(), spgvacuumpage(), spgWalk(), startScanEntry(), statapprox_heap(), terminate_brin_buildstate(), vacuumLeafPage(), vacuumLeafRoot(), vacuumRedirectAndPlaceholder(), verify_heapam(), verifyBackupPageConsistency(), visibilitymap_clear(), visibilitymap_count(), visibilitymap_get_status(), visibilitymap_prepare_truncate(), visibilitymap_set(), vm_readbuf(), writeListPage(), XLogCheckBufferNeedsBackup(), XLogReadBufferExtended(), XLogReadBufferForRedoExtended(), XLogRecordPageWithFreeSpace(), XLogRegisterBuffer(), XLogSaveBufferForHint(), xlogVacuumPage(), and ZeroAndLockBuffer().

◆ BufferGetPageSize()

◆ BufferGetTag()

void BufferGetTag ( Buffer  buffer,
RelFileLocator rlocator,
ForkNumber forknum,
BlockNumber blknum 
)

Definition at line 3727 of file bufmgr.c.

3729 {
3730  BufferDesc *bufHdr;
3731 
3732  /* Do the same checks as BufferGetBlockNumber. */
3733  Assert(BufferIsPinned(buffer));
3734 
3735  if (BufferIsLocal(buffer))
3736  bufHdr = GetLocalBufferDescriptor(-buffer - 1);
3737  else
3738  bufHdr = GetBufferDescriptor(buffer - 1);
3739 
3740  /* pinned, so OK to read tag without spinlock */
3741  *rlocator = BufTagGetRelFileLocator(&bufHdr->tag);
3742  *forknum = BufTagGetForkNum(&bufHdr->tag);
3743  *blknum = bufHdr->tag.blockNum;
3744 }
static ForkNumber BufTagGetForkNum(const BufferTag *tag)
static RelFileLocator BufTagGetRelFileLocator(const BufferTag *tag)

References Assert, buftag::blockNum, PrivateRefCountEntry::buffer, BufferIsLocal, BufferIsPinned, BufTagGetForkNum(), BufTagGetRelFileLocator(), GetBufferDescriptor(), GetLocalBufferDescriptor(), and BufferDesc::tag.

Referenced by fsm_search_avail(), ginRedoInsertEntry(), log_newpage_buffer(), ResolveCminCmaxDuringDecoding(), XLogRegisterBuffer(), and XLogSaveBufferForHint().

◆ BufferIsDirty()

bool BufferIsDirty ( Buffer  buffer)

Definition at line 2482 of file bufmgr.c.

2483 {
2484  BufferDesc *bufHdr;
2485 
2486  if (BufferIsLocal(buffer))
2487  {
2488  int bufid = -buffer - 1;
2489 
2490  bufHdr = GetLocalBufferDescriptor(bufid);
2491  }
2492  else
2493  {
2494  bufHdr = GetBufferDescriptor(buffer - 1);
2495  }
2496 
2497  Assert(BufferIsPinned(buffer));
2499  LW_EXCLUSIVE));
2500 
2501  return pg_atomic_read_u32(&bufHdr->state) & BM_DIRTY;
2502 }
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:239
#define BM_DIRTY
Definition: buf_internals.h:61
static LWLock * BufferDescriptorGetContentLock(const BufferDesc *bdesc)
bool LWLockHeldByMeInMode(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1937
@ LW_EXCLUSIVE
Definition: lwlock.h:114
pg_atomic_uint32 state

References Assert, BM_DIRTY, PrivateRefCountEntry::buffer, BufferDescriptorGetContentLock(), BufferIsLocal, BufferIsPinned, GetBufferDescriptor(), GetLocalBufferDescriptor(), LW_EXCLUSIVE, LWLockHeldByMeInMode(), pg_atomic_read_u32(), and BufferDesc::state.

Referenced by XLogRegisterBuffer().

◆ BufferIsExclusiveLocked()

bool BufferIsExclusiveLocked ( Buffer  buffer)

Definition at line 2453 of file bufmgr.c.

2454 {
2455  BufferDesc *bufHdr;
2456 
2457  if (BufferIsLocal(buffer))
2458  {
2459  int bufid = -buffer - 1;
2460 
2461  bufHdr = GetLocalBufferDescriptor(bufid);
2462  }
2463  else
2464  {
2465  bufHdr = GetBufferDescriptor(buffer - 1);
2466  }
2467 
2468  Assert(BufferIsPinned(buffer));
2470  LW_EXCLUSIVE);
2471 }

References Assert, PrivateRefCountEntry::buffer, BufferDescriptorGetContentLock(), BufferIsLocal, BufferIsPinned, GetBufferDescriptor(), GetLocalBufferDescriptor(), LW_EXCLUSIVE, and LWLockHeldByMeInMode().

Referenced by XLogRegisterBuffer().

◆ BufferIsPermanent()

bool BufferIsPermanent ( Buffer  buffer)

Definition at line 3937 of file bufmgr.c.

3938 {
3939  BufferDesc *bufHdr;
3940 
3941  /* Local buffers are used only for temp relations. */
3942  if (BufferIsLocal(buffer))
3943  return false;
3944 
3945  /* Make sure we've got a real buffer, and that we hold a pin on it. */
3946  Assert(BufferIsValid(buffer));
3947  Assert(BufferIsPinned(buffer));
3948 
3949  /*
3950  * BM_PERMANENT can't be changed while we hold a pin on the buffer, so we
3951  * need not bother with the buffer header spinlock. Even if someone else
3952  * changes the buffer header state while we're doing this, the state is
3953  * changed atomically, so we'll read the old value or the new value, but
3954  * not random garbage.
3955  */
3956  bufHdr = GetBufferDescriptor(buffer - 1);
3957  return (pg_atomic_read_u32(&bufHdr->state) & BM_PERMANENT) != 0;
3958 }
#define BM_PERMANENT
Definition: buf_internals.h:69

References Assert, BM_PERMANENT, PrivateRefCountEntry::buffer, BufferIsLocal, BufferIsPinned, BufferIsValid(), GetBufferDescriptor(), pg_atomic_read_u32(), and BufferDesc::state.

Referenced by SetHintBits().

◆ BufferIsValid()

static bool BufferIsValid ( Buffer  bufnum)
inlinestatic

Definition at line 351 of file bufmgr.h.

352 {
353  Assert(bufnum <= NBuffers);
354  Assert(bufnum >= -NLocBuffer);
355 
356  return bufnum != InvalidBuffer;
357 }
#define InvalidBuffer
Definition: buf.h:25
PGDLLIMPORT int NBuffers
Definition: globals.c:141
PGDLLIMPORT int NLocBuffer
Definition: localbuf.c:42

References Assert, InvalidBuffer, NBuffers, and NLocBuffer.

Referenced by _bt_clear_incomplete_split(), _bt_endpoint(), _bt_first(), _bt_get_endpoint(), _bt_insertonpg(), _bt_readpage(), _bt_relandgetbuf(), _bt_search(), _bt_unlink_halfdead_page(), _hash_addovflpage(), _hash_dropscanbuf(), _hash_freeovflpage(), _hash_getbucketbuf_from_hashkey(), _hash_getcachedmetap(), _hash_readnext(), _hash_readpage(), _hash_readprev(), autoprewarm_database_main(), brin_doinsert(), brin_doupdate(), brin_getinsertbuffer(), brin_xlog_createidx(), brin_xlog_desummarize_page(), brin_xlog_insert_update(), brin_xlog_revmap_extend(), brin_xlog_samepage_update(), brin_xlog_update(), brinGetTupleForHeapBlock(), brininsert(), brinsummarize(), bt_recheck_sibling_links(), bt_rootdescend(), btree_xlog_dedup(), btree_xlog_delete(), btree_xlog_insert(), btree_xlog_mark_page_halfdead(), btree_xlog_split(), btree_xlog_unlink_page(), btree_xlog_vacuum(), BufferGetBlock(), BufferGetLSNAtomic(), BufferGetPageSize(), BufferIsPermanent(), ConditionalLockBufferForCleanup(), DebugPrintBufferRefcount(), doPickSplit(), entryGetItem(), entryLoadMoreItems(), ExecStoreBufferHeapTuple(), ExecStorePinnedBufferHeapTuple(), FreeSpaceMapPrepareTruncateRel(), fsm_search(), fsm_vacuum_page(), generic_redo(), GetPrivateRefCount(), GetPrivateRefCountEntry(), GetRecordedFreeSpace(), GetVisibilityMapPins(), ginPlaceToPage(), ginRedoClearIncompleteSplit(), ginRedoDeletePage(), ginRedoInsert(), ginRedoUpdateMetapage(), ginRedoVacuumDataLeafPage(), ginScanToDelete(), gistinserttuples(), gistkillitems(), gistplacetopage(), gistRedoClearFollowRight(), gistRedoDeleteRecord(), gistRedoPageDelete(), gistRedoPageUpdateRecord(), gistXLogSplit(), gistXLogUpdate(), hash_xlog_add_ovfl_page(), hash_xlog_delete(), hash_xlog_init_bitmap_page(), hash_xlog_init_meta_page(), hash_xlog_insert(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_split_cleanup(), hash_xlog_split_complete(), hash_xlog_squeeze_page(), hash_xlog_update_meta_page(), hash_xlog_vacuum_one_page(), heap_endscan(), heap_fetch_next_buffer(), heap_index_delete_tuples(), heap_lock_tuple(), heap_rescan(), heap_update(), heap_vac_scan_next_block(), heap_xlog_confirm(), heap_xlog_delete(), heap_xlog_inplace(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_prune_freeze(), heap_xlog_update(), heap_xlog_visible(), heapam_index_fetch_reset(), heapam_scan_analyze_next_block(), heapam_scan_sample_next_block(), heapam_tuple_lock(), heapam_tuple_satisfies_snapshot(), heapgettup(), heapgettup_continue_page(), heapgettup_pagemode(), heapgettup_start_page(), IsBufferCleanupOK(), lazy_scan_heap(), lazy_vacuum_heap_rel(), log_heap_visible(), MarkBufferDirty(), MarkBufferDirtyHint(), read_stream_next_buffer(), ReadRecentBuffer(), ReleaseAndReadBuffer(), ReleaseBuffer(), ResOwnerReleaseBufferPin(), spgRedoAddLeaf(), spgRedoAddNode(), spgRedoMoveLeafs(), spgRedoPickSplit(), spgRedoSplitTuple(), spgRedoVacuumLeaf(), spgRedoVacuumRedirect(), spgRedoVacuumRoot(), statapprox_heap(), tts_buffer_heap_clear(), tts_buffer_heap_copyslot(), tts_buffer_heap_materialize(), tts_buffer_heap_store_tuple(), verifyBackupPageConsistency(), visibilitymap_clear(), visibilitymap_count(), visibilitymap_get_status(), visibilitymap_pin(), visibilitymap_pin_ok(), visibilitymap_prepare_truncate(), visibilitymap_set(), XLogPrefetcherNextBlock(), XLogReadBufferExtended(), and XLogReadBufferForRedoExtended().

◆ BufferManagerShmemInit()

void BufferManagerShmemInit ( void  )

Definition at line 68 of file buf_init.c.

69 {
70  bool foundBufs,
71  foundDescs,
72  foundIOCV,
73  foundBufCkpt;
74 
75  /* Align descriptors to a cacheline boundary. */
77  ShmemInitStruct("Buffer Descriptors",
78  NBuffers * sizeof(BufferDescPadded),
79  &foundDescs);
80 
81  /* Align buffer pool on IO page size boundary. */
82  BufferBlocks = (char *)
84  ShmemInitStruct("Buffer Blocks",
85  NBuffers * (Size) BLCKSZ + PG_IO_ALIGN_SIZE,
86  &foundBufs));
87 
88  /* Align condition variables to cacheline boundary. */
90  ShmemInitStruct("Buffer IO Condition Variables",
92  &foundIOCV);
93 
94  /*
95  * The array used to sort to-be-checkpointed buffer ids is located in
96  * shared memory, to avoid having to allocate significant amounts of
97  * memory at runtime. As that'd be in the middle of a checkpoint, or when
98  * the checkpointer is restarted, memory allocation failures would be
99  * painful.
100  */
102  ShmemInitStruct("Checkpoint BufferIds",
103  NBuffers * sizeof(CkptSortItem), &foundBufCkpt);
104 
105  if (foundDescs || foundBufs || foundIOCV || foundBufCkpt)
106  {
107  /* should find all of these, or none of them */
108  Assert(foundDescs && foundBufs && foundIOCV && foundBufCkpt);
109  /* note: this path is only taken in EXEC_BACKEND case */
110  }
111  else
112  {
113  int i;
114 
115  /*
116  * Initialize all the buffer headers.
117  */
118  for (i = 0; i < NBuffers; i++)
119  {
121 
122  ClearBufferTag(&buf->tag);
123 
124  pg_atomic_init_u32(&buf->state, 0);
125  buf->wait_backend_pgprocno = INVALID_PROC_NUMBER;
126 
127  buf->buf_id = i;
128 
129  /*
130  * Initially link all the buffers together as unused. Subsequent
131  * management of this list is done by freelist.c.
132  */
133  buf->freeNext = i + 1;
134 
137 
139  }
140 
141  /* Correct last entry of linked list */
143  }
144 
145  /* Init other shared buffer-management stuff */
146  StrategyInitialize(!foundDescs);
147 
148  /* Initialize per-backend file flush context */
151 }
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:221
CkptSortItem * CkptBufferIds
Definition: buf_init.c:25
char * BufferBlocks
Definition: buf_init.c:22
WritebackContext BackendWritebackContext
Definition: buf_init.c:24
ConditionVariableMinimallyPadded * BufferIOCVArray
Definition: buf_init.c:23
BufferDescPadded * BufferDescriptors
Definition: buf_init.c:21
#define FREENEXT_END_OF_LIST
static void ClearBufferTag(BufferTag *tag)
static ConditionVariable * BufferDescriptorGetIOCV(const BufferDesc *bdesc)
void WritebackContextInit(WritebackContext *context, int *max_pending)
Definition: bufmgr.c:5885
int backend_flush_after
Definition: bufmgr.c:173
#define TYPEALIGN(ALIGNVAL, LEN)
Definition: c.h:804
void ConditionVariableInit(ConditionVariable *cv)
void StrategyInitialize(bool init)
Definition: freelist.c:474
int i
Definition: isn.c:73
void LWLockInitialize(LWLock *lock, int tranche_id)
Definition: lwlock.c:707
@ LWTRANCHE_BUFFER_CONTENT
Definition: lwlock.h:187
#define PG_IO_ALIGN_SIZE
static char * buf
Definition: pg_test_fsync.c:73
#define INVALID_PROC_NUMBER
Definition: procnumber.h:26
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:387

References Assert, backend_flush_after, BackendWritebackContext, buf, BufferBlocks, BufferDescriptorGetContentLock(), BufferDescriptorGetIOCV(), BufferDescriptors, BufferIOCVArray, CkptBufferIds, ClearBufferTag(), ConditionVariableInit(), BufferDesc::freeNext, FREENEXT_END_OF_LIST, GetBufferDescriptor(), i, INVALID_PROC_NUMBER, LWLockInitialize(), LWTRANCHE_BUFFER_CONTENT, NBuffers, pg_atomic_init_u32(), PG_IO_ALIGN_SIZE, ShmemInitStruct(), StrategyInitialize(), TYPEALIGN, and WritebackContextInit().

Referenced by CreateOrAttachShmemStructs().

◆ BufferManagerShmemSize()

Size BufferManagerShmemSize ( void  )

Definition at line 160 of file buf_init.c.

161 {
162  Size size = 0;
163 
164  /* size of buffer descriptors */
166  /* to allow aligning buffer descriptors */
168 
169  /* size of data pages, plus alignment padding */
171  size = add_size(size, mul_size(NBuffers, BLCKSZ));
172 
173  /* size of stuff controlled by freelist.c */
175 
176  /* size of I/O condition variables */
179  /* to allow aligning the above */
181 
182  /* size of checkpoint sort array in bufmgr.c */
184 
185  return size;
186 }
Size StrategyShmemSize(void)
Definition: freelist.c:453
#define PG_CACHE_LINE_SIZE
Size add_size(Size s1, Size s2)
Definition: shmem.c:493
Size mul_size(Size s1, Size s2)
Definition: shmem.c:510
static pg_noinline void Size size
Definition: slab.c:607

References add_size(), mul_size(), NBuffers, PG_CACHE_LINE_SIZE, PG_IO_ALIGN_SIZE, size, and StrategyShmemSize().

Referenced by CalculateShmemSize().

◆ CheckBufferIsPinnedOnce()

void CheckBufferIsPinnedOnce ( Buffer  buffer)

Definition at line 5187 of file bufmgr.c.

5188 {
5189  if (BufferIsLocal(buffer))
5190  {
5191  if (LocalRefCount[-buffer - 1] != 1)
5192  elog(ERROR, "incorrect local pin count: %d",
5193  LocalRefCount[-buffer - 1]);
5194  }
5195  else
5196  {
5197  if (GetPrivateRefCount(buffer) != 1)
5198  elog(ERROR, "incorrect local pin count: %d",
5199  GetPrivateRefCount(buffer));
5200  }
5201 }
static int32 GetPrivateRefCount(Buffer buffer)
Definition: bufmgr.c:416
#define ERROR
Definition: elog.h:39
int32 * LocalRefCount
Definition: localbuf.c:46

References PrivateRefCountEntry::buffer, BufferIsLocal, elog, ERROR, GetPrivateRefCount(), and LocalRefCount.

Referenced by GetVictimBuffer(), and LockBufferForCleanup().

◆ CheckPointBuffers()

void CheckPointBuffers ( int  flags)

Definition at line 3692 of file bufmgr.c.

3693 {
3694  BufferSync(flags);
3695 }
static void BufferSync(int flags)
Definition: bufmgr.c:2894

References BufferSync().

Referenced by CheckPointGuts().

◆ ConditionalLockBuffer()

bool ConditionalLockBuffer ( Buffer  buffer)

Definition at line 5166 of file bufmgr.c.

5167 {
5168  BufferDesc *buf;
5169 
5170  Assert(BufferIsPinned(buffer));
5171  if (BufferIsLocal(buffer))
5172  return true; /* act as though we got it */
5173 
5174  buf = GetBufferDescriptor(buffer - 1);
5175 
5177  LW_EXCLUSIVE);
5178 }
bool LWLockConditionalAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1339

References Assert, buf, PrivateRefCountEntry::buffer, BufferDescriptorGetContentLock(), BufferIsLocal, BufferIsPinned, GetBufferDescriptor(), LW_EXCLUSIVE, and LWLockConditionalAcquire().

Referenced by _bt_conditionallockbuf(), BloomNewBuffer(), ConditionalLockBufferForCleanup(), GinNewBuffer(), gistNewBuffer(), RelationGetBufferForTuple(), spgdoinsert(), SpGistGetBuffer(), SpGistNewBuffer(), and SpGistUpdateMetaPage().

◆ ConditionalLockBufferForCleanup()

bool ConditionalLockBufferForCleanup ( Buffer  buffer)

Definition at line 5381 of file bufmgr.c.

5382 {
5383  BufferDesc *bufHdr;
5384  uint32 buf_state,
5385  refcount;
5386 
5387  Assert(BufferIsValid(buffer));
5388 
5389  if (BufferIsLocal(buffer))
5390  {
5391  refcount = LocalRefCount[-buffer - 1];
5392  /* There should be exactly one pin */
5393  Assert(refcount > 0);
5394  if (refcount != 1)
5395  return false;
5396  /* Nobody else to wait for */
5397  return true;
5398  }
5399 
5400  /* There should be exactly one local pin */
5401  refcount = GetPrivateRefCount(buffer);
5402  Assert(refcount);
5403  if (refcount != 1)
5404  return false;
5405 
5406  /* Try to acquire lock */
5407  if (!ConditionalLockBuffer(buffer))
5408  return false;
5409 
5410  bufHdr = GetBufferDescriptor(buffer - 1);
5411  buf_state = LockBufHdr(bufHdr);
5412  refcount = BUF_STATE_GET_REFCOUNT(buf_state);
5413 
5414  Assert(refcount > 0);
5415  if (refcount == 1)
5416  {
5417  /* Successfully acquired exclusive lock with pincount 1 */
5418  UnlockBufHdr(bufHdr, buf_state);
5419  return true;
5420  }
5421 
5422  /* Failed, so release the lock */
5423  UnlockBufHdr(bufHdr, buf_state);
5424  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
5425  return false;
5426 }
#define BUF_STATE_GET_REFCOUNT(state)
Definition: buf_internals.h:51
bool ConditionalLockBuffer(Buffer buffer)
Definition: bufmgr.c:5166
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:5140
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:189

References Assert, BUF_STATE_GET_REFCOUNT, PrivateRefCountEntry::buffer, BUFFER_LOCK_UNLOCK, BufferIsLocal, BufferIsValid(), ConditionalLockBuffer(), GetBufferDescriptor(), GetPrivateRefCount(), LocalRefCount, LockBuffer(), LockBufHdr(), PrivateRefCountEntry::refcount, and UnlockBufHdr().

Referenced by _hash_finish_split(), _hash_getbuf_with_condlock_cleanup(), heap_page_prune_opt(), and lazy_scan_heap().

◆ CreateAndCopyRelationData()

void CreateAndCopyRelationData ( RelFileLocator  src_rlocator,
RelFileLocator  dst_rlocator,
bool  permanent 
)

Definition at line 4780 of file bufmgr.c.

4782 {
4783  char relpersistence;
4784  SMgrRelation src_rel;
4785  SMgrRelation dst_rel;
4786 
4787  /* Set the relpersistence. */
4788  relpersistence = permanent ?
4789  RELPERSISTENCE_PERMANENT : RELPERSISTENCE_UNLOGGED;
4790 
4791  src_rel = smgropen(src_rlocator, INVALID_PROC_NUMBER);
4792  dst_rel = smgropen(dst_rlocator, INVALID_PROC_NUMBER);
4793 
4794  /*
4795  * Create and copy all forks of the relation. During create database we
4796  * have a separate cleanup mechanism which deletes complete database
4797  * directory. Therefore, each individual relation doesn't need to be
4798  * registered for cleanup.
4799  */
4800  RelationCreateStorage(dst_rlocator, relpersistence, false);
4801 
4802  /* copy main fork. */
4803  RelationCopyStorageUsingBuffer(src_rlocator, dst_rlocator, MAIN_FORKNUM,
4804  permanent);
4805 
4806  /* copy those extra forks that exist */
4807  for (ForkNumber forkNum = MAIN_FORKNUM + 1;
4808  forkNum <= MAX_FORKNUM; forkNum++)
4809  {
4810  if (smgrexists(src_rel, forkNum))
4811  {
4812  smgrcreate(dst_rel, forkNum, false);
4813 
4814  /*
4815  * WAL log creation if the relation is persistent, or this is the
4816  * init fork of an unlogged relation.
4817  */
4818  if (permanent || forkNum == INIT_FORKNUM)
4819  log_smgrcreate(&dst_rlocator, forkNum);
4820 
4821  /* Copy a fork's data, block by block. */
4822  RelationCopyStorageUsingBuffer(src_rlocator, dst_rlocator, forkNum,
4823  permanent);
4824  }
4825  }
4826 }
static void RelationCopyStorageUsingBuffer(RelFileLocator srclocator, RelFileLocator dstlocator, ForkNumber forkNum, bool permanent)
Definition: bufmgr.c:4672
ForkNumber
Definition: relpath.h:56
@ MAIN_FORKNUM
Definition: relpath.h:58
@ INIT_FORKNUM
Definition: relpath.h:61
#define MAX_FORKNUM
Definition: relpath.h:70
SMgrRelation smgropen(RelFileLocator rlocator, ProcNumber backend)
Definition: smgr.c:198
void smgrcreate(SMgrRelation reln, ForkNumber forknum, bool isRedo)
Definition: smgr.c:411
bool smgrexists(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:398
SMgrRelation RelationCreateStorage(RelFileLocator rlocator, char relpersistence, bool register_delete)
Definition: storage.c:121
void log_smgrcreate(const RelFileLocator *rlocator, ForkNumber forkNum)
Definition: storage.c:186

References INIT_FORKNUM, INVALID_PROC_NUMBER, log_smgrcreate(), MAIN_FORKNUM, MAX_FORKNUM, RelationCopyStorageUsingBuffer(), RelationCreateStorage(), smgrcreate(), smgrexists(), and smgropen().

Referenced by CreateDatabaseUsingWalLog().

◆ DebugPrintBufferRefcount()

char* DebugPrintBufferRefcount ( Buffer  buffer)

Definition at line 3647 of file bufmgr.c.

3648 {
3649  BufferDesc *buf;
3650  int32 loccount;
3651  char *path;
3652  char *result;
3653  ProcNumber backend;
3654  uint32 buf_state;
3655 
3656  Assert(BufferIsValid(buffer));
3657  if (BufferIsLocal(buffer))
3658  {
3659  buf = GetLocalBufferDescriptor(-buffer - 1);
3660  loccount = LocalRefCount[-buffer - 1];
3661  backend = MyProcNumber;
3662  }
3663  else
3664  {
3665  buf = GetBufferDescriptor(buffer - 1);
3666  loccount = GetPrivateRefCount(buffer);
3667  backend = INVALID_PROC_NUMBER;
3668  }
3669 
3670  /* theoretically we should lock the bufhdr here */
3671  path = relpathbackend(BufTagGetRelFileLocator(&buf->tag), backend,
3672  BufTagGetForkNum(&buf->tag));
3673  buf_state = pg_atomic_read_u32(&buf->state);
3674 
3675  result = psprintf("[%03d] (rel=%s, blockNum=%u, flags=0x%x, refcount=%u %d)",
3676  buffer, path,
3677  buf->tag.blockNum, buf_state & BUF_FLAG_MASK,
3678  BUF_STATE_GET_REFCOUNT(buf_state), loccount);
3679  pfree(path);
3680  return result;
3681 }
#define BUF_FLAG_MASK
Definition: buf_internals.h:48
ProcNumber MyProcNumber
Definition: globals.c:89
void pfree(void *pointer)
Definition: mcxt.c:1521
int ProcNumber
Definition: procnumber.h:24
char * psprintf(const char *fmt,...)
Definition: psprintf.c:46
#define relpathbackend(rlocator, backend, forknum)
Definition: relpath.h:93

References Assert, buf, BUF_FLAG_MASK, BUF_STATE_GET_REFCOUNT, PrivateRefCountEntry::buffer, BufferIsLocal, BufferIsValid(), BufTagGetForkNum(), BufTagGetRelFileLocator(), GetBufferDescriptor(), GetLocalBufferDescriptor(), GetPrivateRefCount(), INVALID_PROC_NUMBER, LocalRefCount, MyProcNumber, pfree(), pg_atomic_read_u32(), psprintf(), and relpathbackend.

Referenced by CheckForBufferLeaks(), CheckForLocalBufferLeaks(), and ResOwnerPrintBufferPin().

◆ DropDatabaseBuffers()

void DropDatabaseBuffers ( Oid  dbid)

Definition at line 4368 of file bufmgr.c.

4369 {
4370  int i;
4371 
4372  /*
4373  * We needn't consider local buffers, since by assumption the target
4374  * database isn't our own.
4375  */
4376 
4377  for (i = 0; i < NBuffers; i++)
4378  {
4379  BufferDesc *bufHdr = GetBufferDescriptor(i);
4380  uint32 buf_state;
4381 
4382  /*
4383  * As in DropRelationBuffers, an unlocked precheck should be safe and
4384  * saves some cycles.
4385  */
4386  if (bufHdr->tag.dbOid != dbid)
4387  continue;
4388 
4389  buf_state = LockBufHdr(bufHdr);
4390  if (bufHdr->tag.dbOid == dbid)
4391  InvalidateBuffer(bufHdr); /* releases spinlock */
4392  else
4393  UnlockBufHdr(bufHdr, buf_state);
4394  }
4395 }
static void InvalidateBuffer(BufferDesc *buf)
Definition: bufmgr.c:1766
Oid dbOid
Definition: buf_internals.h:95

References buftag::dbOid, GetBufferDescriptor(), i, InvalidateBuffer(), LockBufHdr(), NBuffers, BufferDesc::tag, and UnlockBufHdr().

Referenced by createdb_failure_callback(), dbase_redo(), dropdb(), and movedb().

◆ DropRelationBuffers()

void DropRelationBuffers ( struct SMgrRelationData smgr_reln,
ForkNumber forkNum,
int  nforks,
BlockNumber firstDelBlock 
)

◆ DropRelationsAllBuffers()

void DropRelationsAllBuffers ( struct SMgrRelationData **  smgr_reln,
int  nlocators 
)

◆ EvictUnpinnedBuffer()

bool EvictUnpinnedBuffer ( Buffer  buf)

Definition at line 6078 of file bufmgr.c.

6079 {
6080  BufferDesc *desc;
6081  uint32 buf_state;
6082  bool result;
6083 
6084  /* Make sure we can pin the buffer. */
6087 
6089  desc = GetBufferDescriptor(buf - 1);
6090 
6091  /* Lock the header and check if it's valid. */
6092  buf_state = LockBufHdr(desc);
6093  if ((buf_state & BM_VALID) == 0)
6094  {
6095  UnlockBufHdr(desc, buf_state);
6096  return false;
6097  }
6098 
6099  /* Check that it's not pinned already. */
6100  if (BUF_STATE_GET_REFCOUNT(buf_state) > 0)
6101  {
6102  UnlockBufHdr(desc, buf_state);
6103  return false;
6104  }
6105 
6106  PinBuffer_Locked(desc); /* releases spinlock */
6107 
6108  /* If it was dirty, try to clean it once. */
6109  if (buf_state & BM_DIRTY)
6110  {
6114  }
6115 
6116  /* This will return false if it becomes dirty or someone else pins it. */
6117  result = InvalidateVictimBuffer(desc);
6118 
6119  UnpinBuffer(desc);
6120 
6121  return result;
6122 }
#define BM_VALID
Definition: buf_internals.h:62
static void FlushBuffer(BufferDesc *buf, SMgrRelation reln, IOObject io_object, IOContext io_context)
Definition: bufmgr.c:3766
static void PinBuffer_Locked(BufferDesc *buf)
Definition: bufmgr.c:2745
static bool InvalidateVictimBuffer(BufferDesc *buf_hdr)
Definition: bufmgr.c:1864
static void ReservePrivateRefCountEntry(void)
Definition: bufmgr.c:250
static void UnpinBuffer(BufferDesc *buf)
Definition: bufmgr.c:2788
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1168
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1781
@ LW_SHARED
Definition: lwlock.h:115
@ IOOBJECT_RELATION
Definition: pgstat.h:312
@ IOCONTEXT_NORMAL
Definition: pgstat.h:322
ResourceOwner CurrentResourceOwner
Definition: resowner.c:165
void ResourceOwnerEnlarge(ResourceOwner owner)
Definition: resowner.c:442

References Assert, BM_DIRTY, BM_VALID, buf, BUF_STATE_GET_REFCOUNT, BufferDescriptorGetContentLock(), BufferIsLocal, CurrentResourceOwner, FlushBuffer(), GetBufferDescriptor(), InvalidateVictimBuffer(), IOCONTEXT_NORMAL, IOOBJECT_RELATION, LockBufHdr(), LW_SHARED, LWLockAcquire(), LWLockRelease(), PinBuffer_Locked(), ReservePrivateRefCountEntry(), ResourceOwnerEnlarge(), UnlockBufHdr(), and UnpinBuffer().

Referenced by pg_buffercache_evict().

◆ ExtendBufferedRel()

Buffer ExtendBufferedRel ( BufferManagerRelation  bmr,
ForkNumber  forkNum,
BufferAccessStrategy  strategy,
uint32  flags 
)

Definition at line 846 of file bufmgr.c.

850 {
851  Buffer buf;
852  uint32 extend_by = 1;
853 
854  ExtendBufferedRelBy(bmr, forkNum, strategy, flags, extend_by,
855  &buf, &extend_by);
856 
857  return buf;
858 }
int Buffer
Definition: buf.h:23
BlockNumber ExtendBufferedRelBy(BufferManagerRelation bmr, ForkNumber fork, BufferAccessStrategy strategy, uint32 flags, uint32 extend_by, Buffer *buffers, uint32 *extended_by)
Definition: bufmgr.c:878

References buf, and ExtendBufferedRelBy().

Referenced by _bt_allocbuf(), _hash_getnewbuf(), BloomNewBuffer(), brinbuild(), brinbuildempty(), fill_seq_fork_with_data(), ginbuildempty(), GinNewBuffer(), gistbuildempty(), gistNewBuffer(), ReadBuffer_common(), revmap_physical_extend(), and SpGistNewBuffer().

◆ ExtendBufferedRelBy()

BlockNumber ExtendBufferedRelBy ( BufferManagerRelation  bmr,
ForkNumber  fork,
BufferAccessStrategy  strategy,
uint32  flags,
uint32  extend_by,
Buffer buffers,
uint32 extended_by 
)

Definition at line 878 of file bufmgr.c.

885 {
886  Assert((bmr.rel != NULL) != (bmr.smgr != NULL));
887  Assert(bmr.smgr == NULL || bmr.relpersistence != 0);
888  Assert(extend_by > 0);
889 
890  if (bmr.smgr == NULL)
891  {
892  bmr.smgr = RelationGetSmgr(bmr.rel);
893  bmr.relpersistence = bmr.rel->rd_rel->relpersistence;
894  }
895 
896  return ExtendBufferedRelCommon(bmr, fork, strategy, flags,
897  extend_by, InvalidBlockNumber,
898  buffers, extended_by);
899 }
#define InvalidBlockNumber
Definition: block.h:33
static BlockNumber ExtendBufferedRelCommon(BufferManagerRelation bmr, ForkNumber fork, BufferAccessStrategy strategy, uint32 flags, uint32 extend_by, BlockNumber extend_upto, Buffer *buffers, uint32 *extended_by)
Definition: bufmgr.c:2129
static SMgrRelation RelationGetSmgr(Relation rel)
Definition: rel.h:567
struct SMgrRelationData * smgr
Definition: bufmgr.h:103
Form_pg_class rd_rel
Definition: rel.h:111

References Assert, ExtendBufferedRelCommon(), InvalidBlockNumber, RelationData::rd_rel, BufferManagerRelation::rel, RelationGetSmgr(), BufferManagerRelation::relpersistence, and BufferManagerRelation::smgr.

Referenced by ExtendBufferedRel(), and RelationAddBlocks().

◆ ExtendBufferedRelTo()

Buffer ExtendBufferedRelTo ( BufferManagerRelation  bmr,
ForkNumber  fork,
BufferAccessStrategy  strategy,
uint32  flags,
BlockNumber  extend_to,
ReadBufferMode  mode 
)

Definition at line 910 of file bufmgr.c.

916 {
918  uint32 extended_by = 0;
919  Buffer buffer = InvalidBuffer;
920  Buffer buffers[64];
921 
922  Assert((bmr.rel != NULL) != (bmr.smgr != NULL));
923  Assert(bmr.smgr == NULL || bmr.relpersistence != 0);
924  Assert(extend_to != InvalidBlockNumber && extend_to > 0);
925 
926  if (bmr.smgr == NULL)
927  {
928  bmr.smgr = RelationGetSmgr(bmr.rel);
929  bmr.relpersistence = bmr.rel->rd_rel->relpersistence;
930  }
931 
932  /*
933  * If desired, create the file if it doesn't exist. If
934  * smgr_cached_nblocks[fork] is positive then it must exist, no need for
935  * an smgrexists call.
936  */
937  if ((flags & EB_CREATE_FORK_IF_NEEDED) &&
938  (bmr.smgr->smgr_cached_nblocks[fork] == 0 ||
940  !smgrexists(bmr.smgr, fork))
941  {
943 
944  /* recheck, fork might have been created concurrently */
945  if (!smgrexists(bmr.smgr, fork))
946  smgrcreate(bmr.smgr, fork, flags & EB_PERFORMING_RECOVERY);
947 
949  }
950 
951  /*
952  * If requested, invalidate size cache, so that smgrnblocks asks the
953  * kernel.
954  */
955  if (flags & EB_CLEAR_SIZE_CACHE)
957 
958  /*
959  * Estimate how many pages we'll need to extend by. This avoids acquiring
960  * unnecessarily many victim buffers.
961  */
962  current_size = smgrnblocks(bmr.smgr, fork);
963 
964  /*
965  * Since no-one else can be looking at the page contents yet, there is no
966  * difference between an exclusive lock and a cleanup-strength lock. Note
967  * that we pass the original mode to ReadBuffer_common() below, when
968  * falling back to reading the buffer to a concurrent relation extension.
969  */
971  flags |= EB_LOCK_TARGET;
972 
973  while (current_size < extend_to)
974  {
975  uint32 num_pages = lengthof(buffers);
976  BlockNumber first_block;
977 
978  if ((uint64) current_size + num_pages > extend_to)
979  num_pages = extend_to - current_size;
980 
981  first_block = ExtendBufferedRelCommon(bmr, fork, strategy, flags,
982  num_pages, extend_to,
983  buffers, &extended_by);
984 
985  current_size = first_block + extended_by;
986  Assert(num_pages != 0 || current_size >= extend_to);
987 
988  for (uint32 i = 0; i < extended_by; i++)
989  {
990  if (first_block + i != extend_to - 1)
991  ReleaseBuffer(buffers[i]);
992  else
993  buffer = buffers[i];
994  }
995  }
996 
997  /*
998  * It's possible that another backend concurrently extended the relation.
999  * In that case read the buffer.
1000  *
1001  * XXX: Should we control this via a flag?
1002  */
1003  if (buffer == InvalidBuffer)
1004  {
1005  Assert(extended_by == 0);
1006  buffer = ReadBuffer_common(bmr.rel, bmr.smgr, bmr.relpersistence,
1007  fork, extend_to - 1, mode, strategy);
1008  }
1009 
1010  return buffer;
1011 }
uint32 BlockNumber
Definition: block.h:31
static Buffer ReadBuffer_common(Relation rel, SMgrRelation smgr, char smgr_persistence, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:1189
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:4906
#define lengthof(array)
Definition: c.h:788
void LockRelationForExtension(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:420
void UnlockRelationForExtension(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:470
#define ExclusiveLock
Definition: lockdefs.h:42
static PgChecksumMode mode
Definition: pg_checksums.c:56
static int64 current_size
Definition: pg_checksums.c:64
BlockNumber smgrnblocks(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:655
BlockNumber smgr_cached_nblocks[MAX_FORKNUM+1]
Definition: smgr.h:46

References Assert, PrivateRefCountEntry::buffer, current_size, EB_CLEAR_SIZE_CACHE, EB_CREATE_FORK_IF_NEEDED, EB_LOCK_TARGET, EB_PERFORMING_RECOVERY, ExclusiveLock, ExtendBufferedRelCommon(), i, InvalidBlockNumber, InvalidBuffer, lengthof, LockRelationForExtension(), mode, RBM_ZERO_AND_CLEANUP_LOCK, RBM_ZERO_AND_LOCK, RelationData::rd_rel, ReadBuffer_common(), BufferManagerRelation::rel, RelationGetSmgr(), ReleaseBuffer(), BufferManagerRelation::relpersistence, BufferManagerRelation::smgr, SMgrRelationData::smgr_cached_nblocks, smgrcreate(), smgrexists(), smgrnblocks(), and UnlockRelationForExtension().

Referenced by fsm_extend(), vm_extend(), and XLogReadBufferExtended().

◆ FlushDatabaseBuffers()

void FlushDatabaseBuffers ( Oid  dbid)

Definition at line 4844 of file bufmgr.c.

4845 {
4846  int i;
4847  BufferDesc *bufHdr;
4848 
4849  for (i = 0; i < NBuffers; i++)
4850  {
4851  uint32 buf_state;
4852 
4853  bufHdr = GetBufferDescriptor(i);
4854 
4855  /*
4856  * As in DropRelationBuffers, an unlocked precheck should be safe and
4857  * saves some cycles.
4858  */
4859  if (bufHdr->tag.dbOid != dbid)
4860  continue;
4861 
4862  /* Make sure we can handle the pin */
4865 
4866  buf_state = LockBufHdr(bufHdr);
4867  if (bufHdr->tag.dbOid == dbid &&
4868  (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
4869  {
4870  PinBuffer_Locked(bufHdr);
4874  UnpinBuffer(bufHdr);
4875  }
4876  else
4877  UnlockBufHdr(bufHdr, buf_state);
4878  }
4879 }

References BM_DIRTY, BM_VALID, BufferDescriptorGetContentLock(), CurrentResourceOwner, buftag::dbOid, FlushBuffer(), GetBufferDescriptor(), i, IOCONTEXT_NORMAL, IOOBJECT_RELATION, LockBufHdr(), LW_SHARED, LWLockAcquire(), LWLockRelease(), NBuffers, PinBuffer_Locked(), ReservePrivateRefCountEntry(), ResourceOwnerEnlarge(), BufferDesc::tag, UnlockBufHdr(), and UnpinBuffer().

Referenced by dbase_redo().

◆ FlushOneBuffer()

void FlushOneBuffer ( Buffer  buffer)

Definition at line 4886 of file bufmgr.c.

4887 {
4888  BufferDesc *bufHdr;
4889 
4890  /* currently not needed, but no fundamental reason not to support */
4891  Assert(!BufferIsLocal(buffer));
4892 
4893  Assert(BufferIsPinned(buffer));
4894 
4895  bufHdr = GetBufferDescriptor(buffer - 1);
4896 
4898 
4900 }
bool LWLockHeldByMe(LWLock *lock)
Definition: lwlock.c:1893

References Assert, PrivateRefCountEntry::buffer, BufferDescriptorGetContentLock(), BufferIsLocal, BufferIsPinned, FlushBuffer(), GetBufferDescriptor(), IOCONTEXT_NORMAL, IOOBJECT_RELATION, and LWLockHeldByMe().

Referenced by hash_xlog_init_bitmap_page(), hash_xlog_init_meta_page(), and XLogReadBufferForRedoExtended().

◆ FlushRelationBuffers()

void FlushRelationBuffers ( Relation  rel)

Definition at line 4474 of file bufmgr.c.

4475 {
4476  int i;
4477  BufferDesc *bufHdr;
4478  SMgrRelation srel = RelationGetSmgr(rel);
4479 
4480  if (RelationUsesLocalBuffers(rel))
4481  {
4482  for (i = 0; i < NLocBuffer; i++)
4483  {
4484  uint32 buf_state;
4485  instr_time io_start;
4486 
4487  bufHdr = GetLocalBufferDescriptor(i);
4488  if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator) &&
4489  ((buf_state = pg_atomic_read_u32(&bufHdr->state)) &
4490  (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
4491  {
4492  ErrorContextCallback errcallback;
4493  Page localpage;
4494 
4495  localpage = (char *) LocalBufHdrGetBlock(bufHdr);
4496 
4497  /* Setup error traceback support for ereport() */
4499  errcallback.arg = (void *) bufHdr;
4500  errcallback.previous = error_context_stack;
4501  error_context_stack = &errcallback;
4502 
4503  PageSetChecksumInplace(localpage, bufHdr->tag.blockNum);
4504 
4506 
4507  smgrwrite(srel,
4508  BufTagGetForkNum(&bufHdr->tag),
4509  bufHdr->tag.blockNum,
4510  localpage,
4511  false);
4512 
4515  io_start, 1);
4516 
4517  buf_state &= ~(BM_DIRTY | BM_JUST_DIRTIED);
4518  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
4519 
4521 
4522  /* Pop the error context stack */
4523  error_context_stack = errcallback.previous;
4524  }
4525  }
4526 
4527  return;
4528  }
4529 
4530  for (i = 0; i < NBuffers; i++)
4531  {
4532  uint32 buf_state;
4533 
4534  bufHdr = GetBufferDescriptor(i);
4535 
4536  /*
4537  * As in DropRelationBuffers, an unlocked precheck should be safe and
4538  * saves some cycles.
4539  */
4540  if (!BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator))
4541  continue;
4542 
4543  /* Make sure we can handle the pin */
4546 
4547  buf_state = LockBufHdr(bufHdr);
4548  if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator) &&
4549  (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
4550  {
4551  PinBuffer_Locked(bufHdr);
4555  UnpinBuffer(bufHdr);
4556  }
4557  else
4558  UnlockBufHdr(bufHdr, buf_state);
4559  }
4560 }
static void pg_atomic_unlocked_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:295
static bool BufTagMatchesRelFileLocator(const BufferTag *tag, const RelFileLocator *rlocator)
#define BM_JUST_DIRTIED
Definition: buf_internals.h:66
bool track_io_timing
Definition: bufmgr.c:143
#define LocalBufHdrGetBlock(bufHdr)
Definition: bufmgr.c:72
static void local_buffer_write_error_callback(void *arg)
Definition: bufmgr.c:5696
void PageSetChecksumInplace(Page page, BlockNumber blkno)
Definition: bufpage.c:1542
ErrorContextCallback * error_context_stack
Definition: elog.c:94
BufferUsage pgBufferUsage
Definition: instrument.c:20
int NLocBuffer
Definition: localbuf.c:42
@ IOOBJECT_TEMP_RELATION
Definition: pgstat.h:313
@ IOOP_WRITE
Definition: pgstat.h:336
instr_time pgstat_prepare_io_time(bool track_io_guc)
Definition: pgstat_io.c:100
void pgstat_count_io_op_time(IOObject io_object, IOContext io_context, IOOp io_op, instr_time start_time, uint32 cnt)
Definition: pgstat_io.c:122
#define RelationUsesLocalBuffers(relation)
Definition: rel.h:637
static void smgrwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, const void *buffer, bool skipFsync)
Definition: smgr.h:121
int64 local_blks_written
Definition: instrument.h:33
struct ErrorContextCallback * previous
Definition: elog.h:296
void(* callback)(void *arg)
Definition: elog.h:297
RelFileLocator rd_locator
Definition: rel.h:57

References ErrorContextCallback::arg, buftag::blockNum, BM_DIRTY, BM_JUST_DIRTIED, BM_VALID, BufferDescriptorGetContentLock(), BufTagGetForkNum(), BufTagMatchesRelFileLocator(), ErrorContextCallback::callback, CurrentResourceOwner, error_context_stack, FlushBuffer(), GetBufferDescriptor(), GetLocalBufferDescriptor(), i, IOCONTEXT_NORMAL, IOOBJECT_RELATION, IOOBJECT_TEMP_RELATION, IOOP_WRITE, BufferUsage::local_blks_written, local_buffer_write_error_callback(), LocalBufHdrGetBlock, LockBufHdr(), LW_SHARED, LWLockAcquire(), LWLockRelease(), NBuffers, NLocBuffer, PageSetChecksumInplace(), pg_atomic_read_u32(), pg_atomic_unlocked_write_u32(), pgBufferUsage, pgstat_count_io_op_time(), pgstat_prepare_io_time(), PinBuffer_Locked(), ErrorContextCallback::previous, RelationData::rd_locator, RelationGetSmgr(), RelationUsesLocalBuffers, ReservePrivateRefCountEntry(), ResourceOwnerEnlarge(), smgrwrite(), BufferDesc::state, BufferDesc::tag, track_io_timing, UnlockBufHdr(), and UnpinBuffer().

Referenced by fill_seq_with_data(), heapam_relation_copy_data(), and index_copy_data().

◆ FlushRelationsAllBuffers()

void FlushRelationsAllBuffers ( struct SMgrRelationData **  smgrs,
int  nrels 
)

◆ FreeAccessStrategy()

void FreeAccessStrategy ( BufferAccessStrategy  strategy)

Definition at line 681 of file freelist.c.

682 {
683  /* don't crash if called on a "default" strategy */
684  if (strategy != NULL)
685  pfree(strategy);
686 }

References pfree().

Referenced by blgetbitmap(), FreeBulkInsertState(), heap_endscan(), initscan(), parallel_vacuum_main(), and RelationCopyStorageUsingBuffer().

◆ GetAccessStrategy()

BufferAccessStrategy GetAccessStrategy ( BufferAccessStrategyType  btype)

Definition at line 541 of file freelist.c.

542 {
543  int ring_size_kb;
544 
545  /*
546  * Select ring size to use. See buffer/README for rationales.
547  *
548  * Note: if you change the ring size for BAS_BULKREAD, see also
549  * SYNC_SCAN_REPORT_INTERVAL in access/heap/syncscan.c.
550  */
551  switch (btype)
552  {
553  case BAS_NORMAL:
554  /* if someone asks for NORMAL, just give 'em a "default" object */
555  return NULL;
556 
557  case BAS_BULKREAD:
558  ring_size_kb = 256;
559  break;
560  case BAS_BULKWRITE:
561  ring_size_kb = 16 * 1024;
562  break;
563  case BAS_VACUUM:
564  ring_size_kb = 2048;
565  break;
566 
567  default:
568  elog(ERROR, "unrecognized buffer access strategy: %d",
569  (int) btype);
570  return NULL; /* keep compiler quiet */
571  }
572 
573  return GetAccessStrategyWithSize(btype, ring_size_kb);
574 }
BufferAccessStrategy GetAccessStrategyWithSize(BufferAccessStrategyType btype, int ring_size_kb)
Definition: freelist.c:584

References BAS_BULKREAD, BAS_BULKWRITE, BAS_NORMAL, BAS_VACUUM, elog, ERROR, and GetAccessStrategyWithSize().

Referenced by blgetbitmap(), bt_check_every_level(), collect_corrupt_items(), collect_visibility_data(), GetBulkInsertState(), initscan(), pgstat_index(), pgstathashindex(), pgstatindex_impl(), RelationCopyStorageUsingBuffer(), ScanSourceDatabasePgClass(), statapprox_heap(), and verify_heapam().

◆ GetAccessStrategyBufferCount()

int GetAccessStrategyBufferCount ( BufferAccessStrategy  strategy)

Definition at line 624 of file freelist.c.

625 {
626  if (strategy == NULL)
627  return 0;
628 
629  return strategy->nbuffers;
630 }

References BufferAccessStrategyData::nbuffers.

Referenced by parallel_vacuum_init().

◆ GetAccessStrategyPinLimit()

int GetAccessStrategyPinLimit ( BufferAccessStrategy  strategy)

Definition at line 647 of file freelist.c.

648 {
649  if (strategy == NULL)
650  return NBuffers;
651 
652  switch (strategy->btype)
653  {
654  case BAS_BULKREAD:
655 
656  /*
657  * Since BAS_BULKREAD uses StrategyRejectBuffer(), dirty buffers
658  * shouldn't be a problem and the caller is free to pin up to the
659  * entire ring at once.
660  */
661  return strategy->nbuffers;
662 
663  default:
664 
665  /*
666  * Tell caller not to pin more than half the buffers in the ring.
667  * This is a trade-off between look ahead distance and deferring
668  * writeback and associated WAL traffic.
669  */
670  return strategy->nbuffers / 2;
671  }
672 }
BufferAccessStrategyType btype
Definition: freelist.c:75

References BAS_BULKREAD, BufferAccessStrategyData::btype, BufferAccessStrategyData::nbuffers, and NBuffers.

Referenced by read_stream_begin_impl().

◆ GetAccessStrategyWithSize()

BufferAccessStrategy GetAccessStrategyWithSize ( BufferAccessStrategyType  btype,
int  ring_size_kb 
)

Definition at line 584 of file freelist.c.

585 {
586  int ring_buffers;
587  BufferAccessStrategy strategy;
588 
589  Assert(ring_size_kb >= 0);
590 
591  /* Figure out how many buffers ring_size_kb is */
592  ring_buffers = ring_size_kb / (BLCKSZ / 1024);
593 
594  /* 0 means unlimited, so no BufferAccessStrategy required */
595  if (ring_buffers == 0)
596  return NULL;
597 
598  /* Cap to 1/8th of shared_buffers */
599  ring_buffers = Min(NBuffers / 8, ring_buffers);
600 
601  /* NBuffers should never be less than 16, so this shouldn't happen */
602  Assert(ring_buffers > 0);
603 
604  /* Allocate the object and initialize all elements to zeroes */
605  strategy = (BufferAccessStrategy)
606  palloc0(offsetof(BufferAccessStrategyData, buffers) +
607  ring_buffers * sizeof(Buffer));
608 
609  /* Set fields that don't start out zero */
610  strategy->btype = btype;
611  strategy->nbuffers = ring_buffers;
612 
613  return strategy;
614 }
struct BufferAccessStrategyData * BufferAccessStrategy
Definition: buf.h:44
#define Min(x, y)
Definition: c.h:1004
void * palloc0(Size size)
Definition: mcxt.c:1347

References Assert, BufferAccessStrategyData::btype, Min, BufferAccessStrategyData::nbuffers, NBuffers, and palloc0().

Referenced by do_autovacuum(), ExecVacuum(), GetAccessStrategy(), and parallel_vacuum_main().

◆ HoldingBufferPinThatDelaysRecovery()

bool HoldingBufferPinThatDelaysRecovery ( void  )

Definition at line 5355 of file bufmgr.c.

5356 {
5357  int bufid = GetStartupBufferPinWaitBufId();
5358 
5359  /*
5360  * If we get woken slowly then it's possible that the Startup process was
5361  * already woken by other backends before we got here. Also possible that
5362  * we get here by multiple interrupts or interrupts at inappropriate
5363  * times, so make sure we do nothing if the bufid is not set.
5364  */
5365  if (bufid < 0)
5366  return false;
5367 
5368  if (GetPrivateRefCount(bufid + 1) > 0)
5369  return true;
5370 
5371  return false;
5372 }
int GetStartupBufferPinWaitBufId(void)
Definition: proc.c:672

References GetPrivateRefCount(), and GetStartupBufferPinWaitBufId().

Referenced by CheckRecoveryConflictDeadlock(), and ProcessRecoveryConflictInterrupt().

◆ IncrBufferRefCount()

void IncrBufferRefCount ( Buffer  buffer)

Definition at line 4938 of file bufmgr.c.

4939 {
4940  Assert(BufferIsPinned(buffer));
4942  if (BufferIsLocal(buffer))
4943  LocalRefCount[-buffer - 1]++;
4944  else
4945  {
4946  PrivateRefCountEntry *ref;
4947 
4948  ref = GetPrivateRefCountEntry(buffer, true);
4949  Assert(ref != NULL);
4950  ref->refcount++;
4951  }
4953 }
static void ResourceOwnerRememberBuffer(ResourceOwner owner, Buffer buffer)
static PrivateRefCountEntry * GetPrivateRefCountEntry(Buffer buffer, bool do_move)
Definition: bufmgr.c:342

References Assert, PrivateRefCountEntry::buffer, BufferIsLocal, BufferIsPinned, CurrentResourceOwner, GetPrivateRefCountEntry(), LocalRefCount, PrivateRefCountEntry::refcount, ResourceOwnerEnlarge(), and ResourceOwnerRememberBuffer().

Referenced by _bt_steppage(), btrestrpos(), entryLoadMoreItems(), ReadBufferBI(), RelationAddBlocks(), scanPostingTree(), startScanEntry(), and tts_buffer_heap_store_tuple().

◆ InitBufferManagerAccess()

void InitBufferManagerAccess ( void  )

Definition at line 3558 of file bufmgr.c.

3559 {
3560  HASHCTL hash_ctl;
3561 
3562  memset(&PrivateRefCountArray, 0, sizeof(PrivateRefCountArray));
3563 
3564  hash_ctl.keysize = sizeof(int32);
3565  hash_ctl.entrysize = sizeof(PrivateRefCountEntry);
3566 
3567  PrivateRefCountHash = hash_create("PrivateRefCount", 100, &hash_ctl,
3568  HASH_ELEM | HASH_BLOBS);
3569 
3570  /*
3571  * AtProcExit_Buffers needs LWLock access, and thereby has to be called at
3572  * the corresponding phase of backend shutdown.
3573  */
3574  Assert(MyProc != NULL);
3576 }
static void AtProcExit_Buffers(int code, Datum arg)
Definition: bufmgr.c:3583
struct PrivateRefCountEntry PrivateRefCountEntry
static struct PrivateRefCountEntry PrivateRefCountArray[REFCOUNT_ARRAY_ENTRIES]
Definition: bufmgr.c:208
static HTAB * PrivateRefCountHash
Definition: bufmgr.c:209
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:352
#define HASH_ELEM
Definition: hsearch.h:95
#define HASH_BLOBS
Definition: hsearch.h:97
void on_shmem_exit(pg_on_exit_callback function, Datum arg)
Definition: ipc.c:365
PGPROC * MyProc
Definition: proc.c:67
Size keysize
Definition: hsearch.h:75
Size entrysize
Definition: hsearch.h:76

References Assert, AtProcExit_Buffers(), HASHCTL::entrysize, HASH_BLOBS, hash_create(), HASH_ELEM, HASHCTL::keysize, MyProc, on_shmem_exit(), PrivateRefCountArray, and PrivateRefCountHash.

Referenced by BaseInit().

◆ IsBufferCleanupOK()

bool IsBufferCleanupOK ( Buffer  buffer)

Definition at line 5437 of file bufmgr.c.

5438 {
5439  BufferDesc *bufHdr;
5440  uint32 buf_state;
5441 
5442  Assert(BufferIsValid(buffer));
5443 
5444  if (BufferIsLocal(buffer))
5445  {
5446  /* There should be exactly one pin */
5447  if (LocalRefCount[-buffer - 1] != 1)
5448  return false;
5449  /* Nobody else to wait for */
5450  return true;
5451  }
5452 
5453  /* There should be exactly one local pin */
5454  if (GetPrivateRefCount(buffer) != 1)
5455  return false;
5456 
5457  bufHdr = GetBufferDescriptor(buffer - 1);
5458 
5459  /* caller must hold exclusive lock on buffer */
5461  LW_EXCLUSIVE));
5462 
5463  buf_state = LockBufHdr(bufHdr);
5464 
5465  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
5466  if (BUF_STATE_GET_REFCOUNT(buf_state) == 1)
5467  {
5468  /* pincount is OK. */
5469  UnlockBufHdr(bufHdr, buf_state);
5470  return true;
5471  }
5472 
5473  UnlockBufHdr(bufHdr, buf_state);
5474  return false;
5475 }

References Assert, BUF_STATE_GET_REFCOUNT, PrivateRefCountEntry::buffer, BufferDescriptorGetContentLock(), BufferIsLocal, BufferIsValid(), GetBufferDescriptor(), GetPrivateRefCount(), LocalRefCount, LockBufHdr(), LW_EXCLUSIVE, LWLockHeldByMeInMode(), and UnlockBufHdr().

Referenced by _hash_doinsert(), _hash_expandtable(), _hash_splitbucket(), and hashbucketcleanup().

◆ LimitAdditionalLocalPins()

void LimitAdditionalLocalPins ( uint32 additional_pins)

Definition at line 290 of file localbuf.c.

291 {
292  uint32 max_pins;
293 
294  if (*additional_pins <= 1)
295  return;
296 
297  /*
298  * In contrast to LimitAdditionalPins() other backends don't play a role
299  * here. We can allow up to NLocBuffer pins in total, but it might not be
300  * initialized yet so read num_temp_buffers.
301  */
302  max_pins = (num_temp_buffers - NLocalPinnedBuffers);
303 
304  if (*additional_pins >= max_pins)
305  *additional_pins = max_pins;
306 }
int num_temp_buffers
Definition: guc_tables.c:535
static int NLocalPinnedBuffers
Definition: localbuf.c:53

References NLocalPinnedBuffers, and num_temp_buffers.

Referenced by ExtendBufferedRelLocal(), and read_stream_begin_impl().

◆ LimitAdditionalPins()

void LimitAdditionalPins ( uint32 additional_pins)

Definition at line 2098 of file bufmgr.c.

2099 {
2100  uint32 max_backends;
2101  int max_proportional_pins;
2102 
2103  if (*additional_pins <= 1)
2104  return;
2105 
2106  max_backends = MaxBackends + NUM_AUXILIARY_PROCS;
2107  max_proportional_pins = NBuffers / max_backends;
2108 
2109  /*
2110  * Subtract the approximate number of buffers already pinned by this
2111  * backend. We get the number of "overflowed" pins for free, but don't
2112  * know the number of pins in PrivateRefCountArray. The cost of
2113  * calculating that exactly doesn't seem worth it, so just assume the max.
2114  */
2115  max_proportional_pins -= PrivateRefCountOverflowed + REFCOUNT_ARRAY_ENTRIES;
2116 
2117  if (max_proportional_pins <= 0)
2118  max_proportional_pins = 1;
2119 
2120  if (*additional_pins > max_proportional_pins)
2121  *additional_pins = max_proportional_pins;
2122 }
#define REFCOUNT_ARRAY_ENTRIES
Definition: bufmgr.c:96
int MaxBackends
Definition: globals.c:145
#define NUM_AUXILIARY_PROCS
Definition: proc.h:439

References MaxBackends, NBuffers, NUM_AUXILIARY_PROCS, PrivateRefCountOverflowed, and REFCOUNT_ARRAY_ENTRIES.

Referenced by ExtendBufferedRelShared(), and read_stream_begin_impl().

◆ LockBuffer()

void LockBuffer ( Buffer  buffer,
int  mode 
)

Definition at line 5140 of file bufmgr.c.

5141 {
5142  BufferDesc *buf;
5143 
5144  Assert(BufferIsPinned(buffer));
5145  if (BufferIsLocal(buffer))
5146  return; /* local buffers need no lock */
5147 
5148  buf = GetBufferDescriptor(buffer - 1);
5149 
5150  if (mode == BUFFER_LOCK_UNLOCK)
5152  else if (mode == BUFFER_LOCK_SHARE)
5154  else if (mode == BUFFER_LOCK_EXCLUSIVE)
5156  else
5157  elog(ERROR, "unrecognized buffer lock mode: %d", mode);
5158 }
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:190
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:191

References Assert, buf, PrivateRefCountEntry::buffer, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferDescriptorGetContentLock(), BufferIsLocal, BufferIsPinned, elog, ERROR, GetBufferDescriptor(), LW_EXCLUSIVE, LW_SHARED, LWLockAcquire(), LWLockRelease(), and mode.

Referenced by _bt_lockbuf(), _bt_unlockbuf(), _bt_upgradelockbufcleanup(), _hash_addovflpage(), _hash_doinsert(), _hash_expandtable(), _hash_finish_split(), _hash_first(), _hash_freeovflpage(), _hash_getbuf(), _hash_getbuf_with_strategy(), _hash_getcachedmetap(), _hash_init(), _hash_kill_items(), _hash_readnext(), _hash_readpage(), _hash_readprev(), _hash_splitbucket(), _hash_squeezebucket(), _hash_vacuum_one_page(), blbulkdelete(), blgetbitmap(), blinsert(), BloomInitMetapage(), BloomNewBuffer(), blvacuumcleanup(), brin_doinsert(), brin_doupdate(), brin_evacuate_page(), brin_getinsertbuffer(), brin_page_cleanup(), bringetbitmap(), brinGetStats(), brinGetTupleForHeapBlock(), brininsert(), brinLockRevmapPageForUpdate(), brinRevmapDesummarizeRange(), brinRevmapInitialize(), brinsummarize(), bt_metap(), bt_multi_page_stats(), bt_page_items_internal(), bt_page_stats_internal(), bt_recheck_sibling_links(), collect_corrupt_items(), collect_visibility_data(), collectMatchBitmap(), ConditionalLockBufferForCleanup(), count_nondeletable_pages(), entryLoadMoreItems(), FreeSpaceMapPrepareTruncateRel(), fsm_readbuf(), fsm_search(), fsm_search_avail(), fsm_set_and_search(), fsm_vacuum_page(), get_raw_page_internal(), GetVisibilityMapPins(), ginbulkdelete(), ginEntryInsert(), ginFindLeafPage(), ginFindParents(), ginFinishOldSplit(), ginFinishSplit(), ginGetStats(), ginHeapTupleFastInsert(), ginInsertCleanup(), ginInsertValue(), GinNewBuffer(), ginScanToDelete(), ginStepRight(), ginTraverseLock(), ginUpdateStats(), ginvacuumcleanup(), ginVacuumPostingTreeLeaves(), gistBufferingFindCorrectParent(), gistbufferinginserttuples(), gistdoinsert(), gistFindCorrectParent(), gistFindPath(), gistfinishsplit(), gistfixsplit(), gistformdownlink(), gistGetMaxLevel(), gistinserttuples(), gistkillitems(), gistNewBuffer(), gistProcessItup(), gistScanPage(), gistvacuum_delete_empty_pages(), gistvacuumpage(), hashbucketcleanup(), hashbulkdelete(), heap_abort_speculative(), heap_delete(), heap_fetch(), heap_finish_speculative(), heap_get_latest_tid(), heap_index_delete_tuples(), heap_inplace_update(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_page_prune_opt(), heap_prepare_pagescan(), heap_update(), heap_xlog_visible(), heapam_index_build_range_scan(), heapam_index_fetch_tuple(), heapam_index_validate_scan(), heapam_relation_copy_for_cluster(), heapam_scan_analyze_next_block(), heapam_scan_bitmap_next_block(), heapam_scan_sample_next_tuple(), heapam_tuple_satisfies_snapshot(), heapgettup(), initBloomState(), lazy_scan_heap(), lazy_scan_new_or_empty(), lazy_vacuum_heap_rel(), LockBufferForCleanup(), log_newpage_range(), palloc_btree_page(), pg_visibility(), pgrowlocks(), pgstat_btree_page(), pgstat_gist_page(), pgstat_heap(), pgstatginindex_internal(), pgstathashindex(), pgstatindex_impl(), read_seq_tuple(), RelationAddBlocks(), RelationCopyStorageUsingBuffer(), RelationGetBufferForTuple(), revmap_physical_extend(), scanGetCandidate(), scanPendingInsert(), ScanSourceDatabasePgClass(), shiftList(), spgdoinsert(), spgGetCache(), SpGistNewBuffer(), spgprocesspending(), spgvacuumpage(), spgWalk(), startScanEntry(), statapprox_heap(), summarize_range(), UnlockReleaseBuffer(), verify_heapam(), verifyBackupPageConsistency(), visibilitymap_clear(), visibilitymap_prepare_truncate(), visibilitymap_set(), vm_readbuf(), XLogReadBufferForRedoExtended(), XLogRecordPageWithFreeSpace(), and ZeroAndLockBuffer().

◆ LockBufferForCleanup()

void LockBufferForCleanup ( Buffer  buffer)

Definition at line 5220 of file bufmgr.c.

5221 {
5222  BufferDesc *bufHdr;
5223  TimestampTz waitStart = 0;
5224  bool waiting = false;
5225  bool logged_recovery_conflict = false;
5226 
5227  Assert(BufferIsPinned(buffer));
5228  Assert(PinCountWaitBuf == NULL);
5229 
5230  CheckBufferIsPinnedOnce(buffer);
5231 
5232  /* Nobody else to wait for */
5233  if (BufferIsLocal(buffer))
5234  return;
5235 
5236  bufHdr = GetBufferDescriptor(buffer - 1);
5237 
5238  for (;;)
5239  {
5240  uint32 buf_state;
5241 
5242  /* Try to acquire lock */
5244  buf_state = LockBufHdr(bufHdr);
5245 
5246  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
5247  if (BUF_STATE_GET_REFCOUNT(buf_state) == 1)
5248  {
5249  /* Successfully acquired exclusive lock with pincount 1 */
5250  UnlockBufHdr(bufHdr, buf_state);
5251 
5252  /*
5253  * Emit the log message if recovery conflict on buffer pin was
5254  * resolved but the startup process waited longer than
5255  * deadlock_timeout for it.
5256  */
5257  if (logged_recovery_conflict)
5259  waitStart, GetCurrentTimestamp(),
5260  NULL, false);
5261 
5262  if (waiting)
5263  {
5264  /* reset ps display to remove the suffix if we added one */
5266  waiting = false;
5267  }
5268  return;
5269  }
5270  /* Failed, so mark myself as waiting for pincount 1 */
5271  if (buf_state & BM_PIN_COUNT_WAITER)
5272  {
5273  UnlockBufHdr(bufHdr, buf_state);
5274  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
5275  elog(ERROR, "multiple backends attempting to wait for pincount 1");
5276  }
5278  PinCountWaitBuf = bufHdr;
5279  buf_state |= BM_PIN_COUNT_WAITER;
5280  UnlockBufHdr(bufHdr, buf_state);
5281  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
5282 
5283  /* Wait to be signaled by UnpinBuffer() */
5284  if (InHotStandby)
5285  {
5286  if (!waiting)
5287  {
5288  /* adjust the process title to indicate that it's waiting */
5289  set_ps_display_suffix("waiting");
5290  waiting = true;
5291  }
5292 
5293  /*
5294  * Emit the log message if the startup process is waiting longer
5295  * than deadlock_timeout for recovery conflict on buffer pin.
5296  *
5297  * Skip this if first time through because the startup process has
5298  * not started waiting yet in this case. So, the wait start
5299  * timestamp is set after this logic.
5300  */
5301  if (waitStart != 0 && !logged_recovery_conflict)
5302  {
5304 
5305  if (TimestampDifferenceExceeds(waitStart, now,
5306  DeadlockTimeout))
5307  {
5309  waitStart, now, NULL, true);
5310  logged_recovery_conflict = true;
5311  }
5312  }
5313 
5314  /*
5315  * Set the wait start timestamp if logging is enabled and first
5316  * time through.
5317  */
5318  if (log_recovery_conflict_waits && waitStart == 0)
5319  waitStart = GetCurrentTimestamp();
5320 
5321  /* Publish the bufid that Startup process waits on */
5322  SetStartupBufferPinWaitBufId(buffer - 1);
5323  /* Set alarm and then wait to be signaled by UnpinBuffer() */
5325  /* Reset the published bufid */
5327  }
5328  else
5329  ProcWaitForSignal(WAIT_EVENT_BUFFER_PIN);
5330 
5331  /*
5332  * Remove flag marking us as waiter. Normally this will not be set
5333  * anymore, but ProcWaitForSignal() can return for other signals as
5334  * well. We take care to only reset the flag if we're the waiter, as
5335  * theoretically another backend could have started waiting. That's
5336  * impossible with the current usages due to table level locking, but
5337  * better be safe.
5338  */
5339  buf_state = LockBufHdr(bufHdr);
5340  if ((buf_state & BM_PIN_COUNT_WAITER) != 0 &&
5342  buf_state &= ~BM_PIN_COUNT_WAITER;
5343  UnlockBufHdr(bufHdr, buf_state);
5344 
5345  PinCountWaitBuf = NULL;
5346  /* Loop back and try again */
5347  }
5348 }
bool TimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, int msec)
Definition: timestamp.c:1780
TimestampTz GetCurrentTimestamp(void)
Definition: timestamp.c:1644
Datum now(PG_FUNCTION_ARGS)
Definition: timestamp.c:1608
#define BM_PIN_COUNT_WAITER
Definition: buf_internals.h:67
void CheckBufferIsPinnedOnce(Buffer buffer)
Definition: bufmgr.c:5187
static BufferDesc * PinCountWaitBuf
Definition: bufmgr.c:176
int64 TimestampTz
Definition: timestamp.h:39
static volatile sig_atomic_t waiting
Definition: latch.c:162
@ PROCSIG_RECOVERY_CONFLICT_BUFFERPIN
Definition: procsignal.h:47
void set_ps_display_remove_suffix(void)
Definition: ps_status.c:421
void set_ps_display_suffix(const char *suffix)
Definition: ps_status.c:369
int DeadlockTimeout
Definition: proc.c:58
void SetStartupBufferPinWaitBufId(int bufid)
Definition: proc.c:660
void ProcWaitForSignal(uint32 wait_event_info)
Definition: proc.c:1872
void ResolveRecoveryConflictWithBufferPin(void)
Definition: standby.c:792
bool log_recovery_conflict_waits
Definition: standby.c:41
void LogRecoveryConflict(ProcSignalReason reason, TimestampTz wait_start, TimestampTz now, VirtualTransactionId *wait_list, bool still_waiting)
Definition: standby.c:273
int wait_backend_pgprocno
#define InHotStandby
Definition: xlogutils.h:60

References Assert, BM_PIN_COUNT_WAITER, BUF_STATE_GET_REFCOUNT, PrivateRefCountEntry::buffer, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferIsLocal, BufferIsPinned, CheckBufferIsPinnedOnce(), DeadlockTimeout, elog, ERROR, GetBufferDescriptor(), GetCurrentTimestamp(), InHotStandby, LockBuffer(), LockBufHdr(), log_recovery_conflict_waits, LogRecoveryConflict(), MyProcNumber, now(), PinCountWaitBuf, PROCSIG_RECOVERY_CONFLICT_BUFFERPIN, ProcWaitForSignal(), ResolveRecoveryConflictWithBufferPin(), set_ps_display_remove_suffix(), set_ps_display_suffix(), SetStartupBufferPinWaitBufId(), TimestampDifferenceExceeds(), UnlockBufHdr(), BufferDesc::wait_backend_pgprocno, and waiting.

Referenced by _bt_upgradelockbufcleanup(), ginVacuumPostingTree(), hashbulkdelete(), heap_force_common(), lazy_scan_heap(), XLogReadBufferForRedoExtended(), and ZeroAndLockBuffer().

◆ MarkBufferDirty()

void MarkBufferDirty ( Buffer  buffer)

Definition at line 2514 of file bufmgr.c.

2515 {
2516  BufferDesc *bufHdr;
2517  uint32 buf_state;
2518  uint32 old_buf_state;
2519 
2520  if (!BufferIsValid(buffer))
2521  elog(ERROR, "bad buffer ID: %d", buffer);
2522 
2523  if (BufferIsLocal(buffer))
2524  {
2525  MarkLocalBufferDirty(buffer);
2526  return;
2527  }
2528 
2529  bufHdr = GetBufferDescriptor(buffer - 1);
2530 
2531  Assert(BufferIsPinned(buffer));
2533  LW_EXCLUSIVE));
2534 
2535  old_buf_state = pg_atomic_read_u32(&bufHdr->state);
2536  for (;;)
2537  {
2538  if (old_buf_state & BM_LOCKED)
2539  old_buf_state = WaitBufHdrUnlocked(bufHdr);
2540 
2541  buf_state = old_buf_state;
2542 
2543  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
2544  buf_state |= BM_DIRTY | BM_JUST_DIRTIED;
2545 
2546  if (pg_atomic_compare_exchange_u32(&bufHdr->state, &old_buf_state,
2547  buf_state))
2548  break;
2549  }
2550 
2551  /*
2552  * If the buffer was not dirty already, do vacuum accounting.
2553  */
2554  if (!(old_buf_state & BM_DIRTY))
2555  {
2557  if (VacuumCostActive)
2559  }
2560 }
static bool pg_atomic_compare_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval)
Definition: atomics.h:349
#define BM_LOCKED
Definition: buf_internals.h:60
static uint32 WaitBufHdrUnlocked(BufferDesc *buf)
Definition: bufmgr.c:5773
bool VacuumCostActive
Definition: globals.c:157
int VacuumCostBalance
Definition: globals.c:156
int VacuumCostPageDirty
Definition: globals.c:152
void MarkLocalBufferDirty(Buffer buffer)
Definition: localbuf.c:449
int64 shared_blks_dirtied
Definition: instrument.h:28

References Assert, BM_DIRTY, BM_JUST_DIRTIED, BM_LOCKED, BUF_STATE_GET_REFCOUNT, PrivateRefCountEntry::buffer, BufferDescriptorGetContentLock(), BufferIsLocal, BufferIsPinned, BufferIsValid(), elog, ERROR, GetBufferDescriptor(), LW_EXCLUSIVE, LWLockHeldByMeInMode(), MarkLocalBufferDirty(), pg_atomic_compare_exchange_u32(), pg_atomic_read_u32(), pgBufferUsage, BufferUsage::shared_blks_dirtied, BufferDesc::state, VacuumCostActive, VacuumCostBalance, VacuumCostPageDirty, and WaitBufHdrUnlocked().

Referenced by _bt_clear_incomplete_split(), _bt_dedup_pass(), _bt_delitems_delete(), _bt_delitems_vacuum(), _bt_getroot(), _bt_insertonpg(), _bt_mark_page_halfdead(), _bt_newlevel(), _bt_restore_meta(), _bt_set_cleanup_info(), _bt_split(), _bt_unlink_halfdead_page(), _hash_addovflpage(), _hash_doinsert(), _hash_expandtable(), _hash_freeovflpage(), _hash_init(), _hash_splitbucket(), _hash_squeezebucket(), _hash_vacuum_one_page(), addLeafTuple(), brin_doinsert(), brin_doupdate(), brin_initialize_empty_new_buffer(), brin_xlog_createidx(), brin_xlog_desummarize_page(), brin_xlog_insert_update(), brin_xlog_revmap_extend(), brin_xlog_samepage_update(), brin_xlog_update(), brinbuild(), brinbuildempty(), brinRevmapDesummarizeRange(), btree_xlog_dedup(), btree_xlog_delete(), btree_xlog_insert(), btree_xlog_mark_page_halfdead(), btree_xlog_newroot(), btree_xlog_split(), btree_xlog_unlink_page(), btree_xlog_vacuum(), createPostingTree(), dataExecPlaceToPageInternal(), dataExecPlaceToPageLeaf(), do_setval(), doPickSplit(), entryExecPlaceToPage(), fill_seq_fork_with_data(), FreeSpaceMapPrepareTruncateRel(), generic_redo(), GenericXLogFinish(), ginbuild(), ginbuildempty(), ginbulkdelete(), ginDeletePage(), ginHeapTupleFastInsert(), ginPlaceToPage(), ginRedoClearIncompleteSplit(), ginRedoCreatePTree(), ginRedoDeleteListPages(), ginRedoDeletePage(), ginRedoInsert(), ginRedoInsertListPage(), ginRedoUpdateMetapage(), ginRedoVacuumDataLeafPage(), ginUpdateStats(), ginVacuumPostingTreeLeaf(), gistbuild(), gistbuildempty(), gistdeletepage(), gistplacetopage(), gistprunepage(), gistRedoClearFollowRight(), gistRedoDeleteRecord(), gistRedoPageDelete(), gistRedoPageSplitRecord(), gistRedoPageUpdateRecord(), gistvacuumpage(), hash_xlog_add_ovfl_page(), hash_xlog_delete(), hash_xlog_init_bitmap_page(), hash_xlog_init_meta_page(), hash_xlog_insert(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_split_cleanup(), hash_xlog_split_complete(), hash_xlog_squeeze_page(), hash_xlog_update_meta_page(), hash_xlog_vacuum_one_page(), hashbucketcleanup(), hashbulkdelete(), heap_abort_speculative(), heap_delete(), heap_finish_speculative(), heap_force_common(), heap_inplace_update(), heap_insert(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_page_prune_and_freeze(), heap_update(), heap_xlog_confirm(), heap_xlog_delete(), heap_xlog_inplace(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_prune_freeze(), heap_xlog_update(), heap_xlog_visible(), lazy_scan_new_or_empty(), lazy_scan_prune(), lazy_vacuum_heap_page(), log_newpage_range(), moveLeafs(), nextval_internal(), RelationAddBlocks(), RelationCopyStorageUsingBuffer(), RelationGetBufferForTuple(), revmap_physical_extend(), saveNodeLink(), seq_redo(), shiftList(), spgAddNodeAction(), spgbuild(), SpGistUpdateMetaPage(), spgRedoAddLeaf(), spgRedoAddNode(), spgRedoMoveLeafs(), spgRedoPickSplit(), spgRedoSplitTuple(), spgRedoVacuumLeaf(), spgRedoVacuumRedirect(), spgRedoVacuumRoot(), spgSplitNodeAction(), vacuumLeafPage(), vacuumLeafRoot(), vacuumRedirectAndPlaceholder(), visibilitymap_clear(), visibilitymap_prepare_truncate(), visibilitymap_set(), writeListPage(), and XLogReadBufferForRedoExtended().

◆ MarkBufferDirtyHint()

void MarkBufferDirtyHint ( Buffer  buffer,
bool  buffer_std 
)

Definition at line 4970 of file bufmgr.c.

4971 {
4972  BufferDesc *bufHdr;
4973  Page page = BufferGetPage(buffer);
4974 
4975  if (!BufferIsValid(buffer))
4976  elog(ERROR, "bad buffer ID: %d", buffer);
4977 
4978  if (BufferIsLocal(buffer))
4979  {
4980  MarkLocalBufferDirty(buffer);
4981  return;
4982  }
4983 
4984  bufHdr = GetBufferDescriptor(buffer - 1);
4985 
4986  Assert(GetPrivateRefCount(buffer) > 0);
4987  /* here, either share or exclusive lock is OK */
4989 
4990  /*
4991  * This routine might get called many times on the same page, if we are
4992  * making the first scan after commit of an xact that added/deleted many
4993  * tuples. So, be as quick as we can if the buffer is already dirty. We
4994  * do this by not acquiring spinlock if it looks like the status bits are
4995  * already set. Since we make this test unlocked, there's a chance we
4996  * might fail to notice that the flags have just been cleared, and failed
4997  * to reset them, due to memory-ordering issues. But since this function
4998  * is only intended to be used in cases where failing to write out the
4999  * data would be harmless anyway, it doesn't really matter.
5000  */
5001  if ((pg_atomic_read_u32(&bufHdr->state) & (BM_DIRTY | BM_JUST_DIRTIED)) !=
5003  {
5005  bool dirtied = false;
5006  bool delayChkptFlags = false;
5007  uint32 buf_state;
5008 
5009  /*
5010  * If we need to protect hint bit updates from torn writes, WAL-log a
5011  * full page image of the page. This full page image is only necessary
5012  * if the hint bit update is the first change to the page since the
5013  * last checkpoint.
5014  *
5015  * We don't check full_page_writes here because that logic is included
5016  * when we call XLogInsert() since the value changes dynamically.
5017  */
5018  if (XLogHintBitIsNeeded() &&
5019  (pg_atomic_read_u32(&bufHdr->state) & BM_PERMANENT))
5020  {
5021  /*
5022  * If we must not write WAL, due to a relfilelocator-specific
5023  * condition or being in recovery, don't dirty the page. We can
5024  * set the hint, just not dirty the page as a result so the hint
5025  * is lost when we evict the page or shutdown.
5026  *
5027  * See src/backend/storage/page/README for longer discussion.
5028  */
5029  if (RecoveryInProgress() ||
5031  return;
5032 
5033  /*
5034  * If the block is already dirty because we either made a change
5035  * or set a hint already, then we don't need to write a full page
5036  * image. Note that aggressive cleaning of blocks dirtied by hint
5037  * bit setting would increase the call rate. Bulk setting of hint
5038  * bits would reduce the call rate...
5039  *
5040  * We must issue the WAL record before we mark the buffer dirty.
5041  * Otherwise we might write the page before we write the WAL. That
5042  * causes a race condition, since a checkpoint might occur between
5043  * writing the WAL record and marking the buffer dirty. We solve
5044  * that with a kluge, but one that is already in use during
5045  * transaction commit to prevent race conditions. Basically, we
5046  * simply prevent the checkpoint WAL record from being written
5047  * until we have marked the buffer dirty. We don't start the
5048  * checkpoint flush until we have marked dirty, so our checkpoint
5049  * must flush the change to disk successfully or the checkpoint
5050  * never gets written, so crash recovery will fix.
5051  *
5052  * It's possible we may enter here without an xid, so it is
5053  * essential that CreateCheckPoint waits for virtual transactions
5054  * rather than full transactionids.
5055  */
5058  delayChkptFlags = true;
5059  lsn = XLogSaveBufferForHint(buffer, buffer_std);
5060  }
5061 
5062  buf_state = LockBufHdr(bufHdr);
5063 
5064  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
5065 
5066  if (!(buf_state & BM_DIRTY))
5067  {
5068  dirtied = true; /* Means "will be dirtied by this action" */
5069 
5070  /*
5071  * Set the page LSN if we wrote a backup block. We aren't supposed
5072  * to set this when only holding a share lock but as long as we
5073  * serialise it somehow we're OK. We choose to set LSN while
5074  * holding the buffer header lock, which causes any reader of an
5075  * LSN who holds only a share lock to also obtain a buffer header
5076  * lock before using PageGetLSN(), which is enforced in
5077  * BufferGetLSNAtomic().
5078  *
5079  * If checksums are enabled, you might think we should reset the
5080  * checksum here. That will happen when the page is written
5081  * sometime later in this checkpoint cycle.
5082  */
5083  if (!XLogRecPtrIsInvalid(lsn))
5084  PageSetLSN(page, lsn);
5085  }
5086 
5087  buf_state |= BM_DIRTY | BM_JUST_DIRTIED;
5088  UnlockBufHdr(bufHdr, buf_state);
5089 
5090  if (delayChkptFlags)
5092 
5093  if (dirtied)
5094  {
5096  if (VacuumCostActive)
5098  }
5099  }
5100 }
static void PageSetLSN(Page page, XLogRecPtr lsn)
Definition: bufpage.h:391
#define DELAY_CHKPT_START
Definition: proc.h:114
bool RelFileLocatorSkippingWAL(RelFileLocator rlocator)
Definition: storage.c:532
int delayChkptFlags
Definition: proc.h:235
bool RecoveryInProgress(void)
Definition: xlog.c:6333
#define XLogRecPtrIsInvalid(r)
Definition: xlogdefs.h:29
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
XLogRecPtr XLogSaveBufferForHint(Buffer buffer, bool buffer_std)
Definition: xloginsert.c:1065

References Assert, BM_DIRTY, BM_JUST_DIRTIED, BM_PERMANENT, BUF_STATE_GET_REFCOUNT, PrivateRefCountEntry::buffer, BufferDescriptorGetContentLock(), BufferGetPage(), BufferIsLocal, BufferIsValid(), BufTagGetRelFileLocator(), DELAY_CHKPT_START, PGPROC::delayChkptFlags, elog, ERROR, GetBufferDescriptor(), GetPrivateRefCount(), InvalidXLogRecPtr, LockBufHdr(), LWLockHeldByMe(), MarkLocalBufferDirty(), MyProc, PageSetLSN(), pg_atomic_read_u32(), pgBufferUsage, RecoveryInProgress(), RelFileLocatorSkippingWAL(), BufferUsage::shared_blks_dirtied, BufferDesc::state, BufferDesc::tag, UnlockBufHdr(), VacuumCostActive, VacuumCostBalance, VacuumCostPageDirty, XLogHintBitIsNeeded, XLogRecPtrIsInvalid, and XLogSaveBufferForHint().

Referenced by _bt_check_unique(), _bt_killitems(), _hash_kill_items(), brin_start_evacuating_page(), btvacuumpage(), fsm_search(), fsm_search_avail(), fsm_set_and_search(), fsm_vacuum_page(), gistkillitems(), heap_page_prune_and_freeze(), read_seq_tuple(), SetHintBits(), and XLogRecordPageWithFreeSpace().

◆ PrefetchBuffer()

PrefetchBufferResult PrefetchBuffer ( Relation  reln,
ForkNumber  forkNum,
BlockNumber  blockNum 
)

Definition at line 639 of file bufmgr.c.

640 {
641  Assert(RelationIsValid(reln));
642  Assert(BlockNumberIsValid(blockNum));
643 
644  if (RelationUsesLocalBuffers(reln))
645  {
646  /* see comments in ReadBufferExtended */
647  if (RELATION_IS_OTHER_TEMP(reln))
648  ereport(ERROR,
649  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
650  errmsg("cannot access temporary tables of other sessions")));
651 
652  /* pass it off to localbuf.c */
653  return PrefetchLocalBuffer(RelationGetSmgr(reln), forkNum, blockNum);
654  }
655  else
656  {
657  /* pass it to the shared buffer version */
658  return PrefetchSharedBuffer(RelationGetSmgr(reln), forkNum, blockNum);
659  }
660 }
static bool BlockNumberIsValid(BlockNumber blockNumber)
Definition: block.h:71
PrefetchBufferResult PrefetchSharedBuffer(SMgrRelation smgr_reln, ForkNumber forkNum, BlockNumber blockNum)
Definition: bufmgr.c:549
int errcode(int sqlerrcode)
Definition: elog.c:853
int errmsg(const char *fmt,...)
Definition: elog.c:1070
#define ereport(elevel,...)
Definition: elog.h:149
PrefetchBufferResult PrefetchLocalBuffer(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum)
Definition: localbuf.c:69
#define RELATION_IS_OTHER_TEMP(relation)
Definition: rel.h:658
#define RelationIsValid(relation)
Definition: rel.h:478

References Assert, BlockNumberIsValid(), ereport, errcode(), errmsg(), ERROR, PrefetchLocalBuffer(), PrefetchSharedBuffer(), RELATION_IS_OTHER_TEMP, RelationGetSmgr(), RelationIsValid, and RelationUsesLocalBuffers.

Referenced by BitmapPrefetch(), count_nondeletable_pages(), and pg_prewarm().

◆ PrefetchSharedBuffer()

PrefetchBufferResult PrefetchSharedBuffer ( struct SMgrRelationData smgr_reln,
ForkNumber  forkNum,
BlockNumber  blockNum 
)

◆ ReadBuffer()

Buffer ReadBuffer ( Relation  reln,
BlockNumber  blockNum 
)

Definition at line 746 of file bufmgr.c.

747 {
748  return ReadBufferExtended(reln, MAIN_FORKNUM, blockNum, RBM_NORMAL, NULL);
749 }
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:793

References MAIN_FORKNUM, RBM_NORMAL, and ReadBufferExtended().

Referenced by _bt_allocbuf(), _bt_getbuf(), _bt_search_insert(), _hash_getbuf(), _hash_getbuf_with_condlock_cleanup(), blbulkdelete(), blinsert(), BloomNewBuffer(), brin_getinsertbuffer(), brinGetStats(), brinGetTupleForHeapBlock(), brinRevmapDesummarizeRange(), brinRevmapInitialize(), bt_metap(), bt_multi_page_stats(), bt_page_items_internal(), bt_page_stats_internal(), ginFindLeafPage(), ginFindParents(), ginGetStats(), ginHeapTupleFastInsert(), ginInsertCleanup(), GinNewBuffer(), ginStepRight(), ginUpdateStats(), gistBufferingFindCorrectParent(), gistbufferinginserttuples(), gistdoinsert(), gistFindCorrectParent(), gistFindPath(), gistfixsplit(), gistGetMaxLevel(), gistkillitems(), gistNewBuffer(), gistProcessItup(), gistScanPage(), heap_abort_speculative(), heap_delete(), heap_fetch(), heap_finish_speculative(), heap_force_common(), heap_get_latest_tid(), heap_index_delete_tuples(), heap_inplace_update(), heap_lock_tuple(), heap_update(), initBloomState(), pg_visibility(), pgstatginindex_internal(), read_seq_tuple(), RelationGetBufferForTuple(), ReleaseAndReadBuffer(), revmap_get_buffer(), revmap_physical_extend(), scanGetCandidate(), scanPendingInsert(), shiftList(), spgdoinsert(), spgGetCache(), SpGistGetBuffer(), SpGistNewBuffer(), SpGistUpdateMetaPage(), and spgWalk().

◆ ReadBufferExtended()

Buffer ReadBufferExtended ( Relation  reln,
ForkNumber  forkNum,
BlockNumber  blockNum,
ReadBufferMode  mode,
BufferAccessStrategy  strategy 
)
inline

Definition at line 793 of file bufmgr.c.

795 {
796  Buffer buf;
797 
798  /*
799  * Reject attempts to read non-local temporary relations; we would be
800  * likely to get wrong data since we have no visibility into the owning
801  * session's local buffers.
802  */
803  if (RELATION_IS_OTHER_TEMP(reln))
804  ereport(ERROR,
805  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
806  errmsg("cannot access temporary tables of other sessions")));
807 
808  /*
809  * Read the buffer, and update pgstat counters to reflect a cache hit or
810  * miss.
811  */
812  buf = ReadBuffer_common(reln, RelationGetSmgr(reln), 0,
813  forkNum, blockNum, mode, strategy);
814 
815  return buf;
816 }

References buf, ereport, errcode(), errmsg(), ERROR, mode, ReadBuffer_common(), RELATION_IS_OTHER_TEMP, and RelationGetSmgr().

Referenced by _hash_getbuf_with_strategy(), _hash_getinitbuf(), _hash_getnewbuf(), autoprewarm_database_main(), blbulkdelete(), blgetbitmap(), BloomInitMetapage(), blvacuumcleanup(), brin_vacuum_scan(), bt_recheck_sibling_links(), btvacuumpage(), count_nondeletable_pages(), fsm_readbuf(), get_raw_page_internal(), ginbulkdelete(), ginDeletePage(), ginScanToDelete(), ginvacuumcleanup(), ginVacuumPostingTree(), ginVacuumPostingTreeLeaves(), gistvacuum_delete_empty_pages(), gistvacuumpage(), hashbulkdelete(), heapam_scan_sample_next_block(), lazy_scan_heap(), lazy_vacuum_heap_rel(), log_newpage_range(), palloc_btree_page(), pgstat_btree_page(), pgstat_gist_page(), pgstat_heap(), pgstathashindex(), pgstatindex_impl(), ReadBuffer(), ReadBufferBI(), spgprocesspending(), spgvacuumpage(), statapprox_heap(), verify_heapam(), and vm_readbuf().

◆ ReadBufferWithoutRelcache()

Buffer ReadBufferWithoutRelcache ( RelFileLocator  rlocator,
ForkNumber  forkNum,
BlockNumber  blockNum,
ReadBufferMode  mode,
BufferAccessStrategy  strategy,
bool  permanent 
)

Definition at line 830 of file bufmgr.c.

833 {
834  SMgrRelation smgr = smgropen(rlocator, INVALID_PROC_NUMBER);
835 
836  return ReadBuffer_common(NULL, smgr,
837  permanent ? RELPERSISTENCE_PERMANENT : RELPERSISTENCE_UNLOGGED,
838  forkNum, blockNum,
839  mode, strategy);
840 }

References INVALID_PROC_NUMBER, mode, ReadBuffer_common(), and smgropen().

Referenced by RelationCopyStorageUsingBuffer(), ScanSourceDatabasePgClass(), and XLogReadBufferExtended().

◆ ReadRecentBuffer()

bool ReadRecentBuffer ( RelFileLocator  rlocator,
ForkNumber  forkNum,
BlockNumber  blockNum,
Buffer  recent_buffer 
)

Definition at line 670 of file bufmgr.c.

672 {
673  BufferDesc *bufHdr;
674  BufferTag tag;
675  uint32 buf_state;
676  bool have_private_ref;
677 
678  Assert(BufferIsValid(recent_buffer));
679 
682  InitBufferTag(&tag, &rlocator, forkNum, blockNum);
683 
684  if (BufferIsLocal(recent_buffer))
685  {
686  int b = -recent_buffer - 1;
687 
688  bufHdr = GetLocalBufferDescriptor(b);
689  buf_state = pg_atomic_read_u32(&bufHdr->state);
690 
691  /* Is it still valid and holding the right tag? */
692  if ((buf_state & BM_VALID) && BufferTagsEqual(&tag, &bufHdr->tag))
693  {
694  PinLocalBuffer(bufHdr, true);
695 
697 
698  return true;
699  }
700  }
701  else
702  {
703  bufHdr = GetBufferDescriptor(recent_buffer - 1);
704  have_private_ref = GetPrivateRefCount(recent_buffer) > 0;
705 
706  /*
707  * Do we already have this buffer pinned with a private reference? If
708  * so, it must be valid and it is safe to check the tag without
709  * locking. If not, we have to lock the header first and then check.
710  */
711  if (have_private_ref)
712  buf_state = pg_atomic_read_u32(&bufHdr->state);
713  else
714  buf_state = LockBufHdr(bufHdr);
715 
716  if ((buf_state & BM_VALID) && BufferTagsEqual(&tag, &bufHdr->tag))
717  {
718  /*
719  * It's now safe to pin the buffer. We can't pin first and ask
720  * questions later, because it might confuse code paths like
721  * InvalidateBuffer() if we pinned a random non-matching buffer.
722  */
723  if (have_private_ref)
724  PinBuffer(bufHdr, NULL); /* bump pin count */
725  else
726  PinBuffer_Locked(bufHdr); /* pin for first time */
727 
729 
730  return true;
731  }
732 
733  /* If we locked the header above, now unlock. */
734  if (!have_private_ref)
735  UnlockBufHdr(bufHdr, buf_state);
736  }
737 
738  return false;
739 }
static void InitBufferTag(BufferTag *tag, const RelFileLocator *rlocator, ForkNumber forkNum, BlockNumber blockNum)
static bool BufferTagsEqual(const BufferTag *tag1, const BufferTag *tag2)
static bool PinBuffer(BufferDesc *buf, BufferAccessStrategy strategy)
Definition: bufmgr.c:2634
int b
Definition: isn.c:70
bool PinLocalBuffer(BufferDesc *buf_hdr, bool adjust_usagecount)
Definition: localbuf.c:655
int64 local_blks_hit
Definition: instrument.h:30
int64 shared_blks_hit
Definition: instrument.h:26

References Assert, b, BM_VALID, BufferIsLocal, BufferIsValid(), BufferTagsEqual(), CurrentResourceOwner, GetBufferDescriptor(), GetLocalBufferDescriptor(), GetPrivateRefCount(), InitBufferTag(), BufferUsage::local_blks_hit, LockBufHdr(), pg_atomic_read_u32(), pgBufferUsage, PinBuffer(), PinBuffer_Locked(), PinLocalBuffer(), ReservePrivateRefCountEntry(), ResourceOwnerEnlarge(), BufferUsage::shared_blks_hit, BufferDesc::state, BufferDesc::tag, and UnlockBufHdr().

Referenced by XLogReadBufferExtended().

◆ RelationGetNumberOfBlocksInFork()

BlockNumber RelationGetNumberOfBlocksInFork ( Relation  relation,
ForkNumber  forkNum 
)

Definition at line 3905 of file bufmgr.c.

3906 {
3907  if (RELKIND_HAS_TABLE_AM(relation->rd_rel->relkind))
3908  {
3909  /*
3910  * Not every table AM uses BLCKSZ wide fixed size blocks. Therefore
3911  * tableam returns the size in bytes - but for the purpose of this
3912  * routine, we want the number of blocks. Therefore divide, rounding
3913  * up.
3914  */
3915  uint64 szbytes;
3916 
3917  szbytes = table_relation_size(relation, forkNum);
3918 
3919  return (szbytes + (BLCKSZ - 1)) / BLCKSZ;
3920  }
3921  else if (RELKIND_HAS_STORAGE(relation->rd_rel->relkind))
3922  {
3923  return smgrnblocks(RelationGetSmgr(relation), forkNum);
3924  }
3925  else
3926  Assert(false);
3927 
3928  return 0; /* keep compiler quiet */
3929 }
static uint64 table_relation_size(Relation rel, ForkNumber forkNumber)
Definition: tableam.h:1868

References Assert, RelationData::rd_rel, RelationGetSmgr(), smgrnblocks(), and table_relation_size().

Referenced by _hash_getnewbuf(), _hash_init(), autoprewarm_database_main(), get_raw_page_internal(), and pg_prewarm().

◆ ReleaseAndReadBuffer()

Buffer ReleaseAndReadBuffer ( Buffer  buffer,
Relation  relation,
BlockNumber  blockNum 
)

Definition at line 2576 of file bufmgr.c.

2579 {
2580  ForkNumber forkNum = MAIN_FORKNUM;
2581  BufferDesc *bufHdr;
2582 
2583  if (BufferIsValid(buffer))
2584  {
2585  Assert(BufferIsPinned(buffer));
2586  if (BufferIsLocal(buffer))
2587  {
2588  bufHdr = GetLocalBufferDescriptor(-buffer - 1);
2589  if (bufHdr->tag.blockNum == blockNum &&
2590  BufTagMatchesRelFileLocator(&bufHdr->tag, &relation->rd_locator) &&
2591  BufTagGetForkNum(&bufHdr->tag) == forkNum)
2592  return buffer;
2593  UnpinLocalBuffer(buffer);
2594  }
2595  else
2596  {
2597  bufHdr = GetBufferDescriptor(buffer - 1);
2598  /* we have pin, so it's ok to examine tag without spinlock */
2599  if (bufHdr->tag.blockNum == blockNum &&
2600  BufTagMatchesRelFileLocator(&bufHdr->tag, &relation->rd_locator) &&
2601  BufTagGetForkNum(&bufHdr->tag) == forkNum)
2602  return buffer;
2603  UnpinBuffer(bufHdr);
2604  }
2605  }
2606 
2607  return ReadBuffer(relation, blockNum);
2608 }
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:746
void UnpinLocalBuffer(Buffer buffer)
Definition: localbuf.c:681

References Assert, buftag::blockNum, PrivateRefCountEntry::buffer, BufferIsLocal, BufferIsPinned, BufferIsValid(), BufTagGetForkNum(), BufTagMatchesRelFileLocator(), GetBufferDescriptor(), GetLocalBufferDescriptor(), MAIN_FORKNUM, RelationData::rd_locator, ReadBuffer(), BufferDesc::tag, UnpinBuffer(), and UnpinLocalBuffer().

Referenced by _bt_relandgetbuf(), ginFindLeafPage(), heapam_index_fetch_tuple(), and heapam_scan_bitmap_next_block().

◆ ReleaseBuffer()

void ReleaseBuffer ( Buffer  buffer)

Definition at line 4906 of file bufmgr.c.

4907 {
4908  if (!BufferIsValid(buffer))
4909  elog(ERROR, "bad buffer ID: %d", buffer);
4910 
4911  if (BufferIsLocal(buffer))
4912  UnpinLocalBuffer(buffer);
4913  else
4914  UnpinBuffer(GetBufferDescriptor(buffer - 1));
4915 }

References PrivateRefCountEntry::buffer, BufferIsLocal, BufferIsValid(), elog, ERROR, GetBufferDescriptor(), UnpinBuffer(), and UnpinLocalBuffer().

Referenced by _bt_allocbuf(), _bt_drop_lock_and_maybe_pin(), _bt_pagedel(), _bt_relbuf(), _bt_search_insert(), _bt_unlink_halfdead_page(), _hash_dropbuf(), _hash_getbuf_with_condlock_cleanup(), autoprewarm_database_main(), blinsert(), BloomNewBuffer(), brin_getinsertbuffer(), brin_vacuum_scan(), bringetbitmap(), brinGetTupleForHeapBlock(), brininsert(), brinRevmapTerminate(), brinsummarize(), collect_corrupt_items(), collect_visibility_data(), entryLoadMoreItems(), ExecEndBitmapHeapScan(), ExecEndIndexOnlyScan(), ExecReScanBitmapHeapScan(), ExtendBufferedRelTo(), FreeBulkInsertState(), freeGinBtreeStack(), fsm_search(), fsm_vacuum_page(), get_actual_variable_endpoint(), get_raw_page_internal(), GetRecordedFreeSpace(), ginDeletePage(), ginFindParents(), ginFinishSplit(), ginFreeScanKeys(), ginInsertCleanup(), GinNewBuffer(), ginScanToDelete(), gistdoinsert(), gistFindCorrectParent(), gistNewBuffer(), gistvacuum_delete_empty_pages(), heap_abort_speculative(), heap_delete(), heap_endscan(), heap_fetch(), heap_fetch_next_buffer(), heap_force_common(), heap_insert(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_rescan(), heap_update(), heap_vac_scan_next_block(), heap_xlog_delete(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_update(), heap_xlog_visible(), heapam_index_fetch_reset(), heapam_scan_sample_next_block(), heapam_tuple_lock(), heapgettup(), heapgettup_pagemode(), lazy_scan_heap(), lazy_vacuum_heap_rel(), pg_prewarm(), pg_visibility(), pg_visibility_map(), pg_visibility_map_summary(), pgstatindex_impl(), read_stream_reset(), ReadBufferBI(), RelationAddBlocks(), RelationGetBufferForTuple(), ReleaseBulkInsertStatePin(), revmap_get_buffer(), spgdoinsert(), SpGistGetBuffer(), SpGistNewBuffer(), SpGistUpdateMetaPage(), statapprox_heap(), summarize_range(), terminate_brin_buildstate(), tts_buffer_heap_clear(), tts_buffer_heap_materialize(), tts_buffer_heap_store_tuple(), UnlockReleaseBuffer(), verify_heapam(), visibilitymap_count(), visibilitymap_get_status(), visibilitymap_pin(), and XLogReadBufferExtended().

◆ StartReadBuffer()

bool StartReadBuffer ( ReadBuffersOperation operation,
Buffer buffer,
BlockNumber  blocknum,
int  flags 
)

Definition at line 1364 of file bufmgr.c.

1368 {
1369  int nblocks = 1;
1370  bool result;
1371 
1372  result = StartReadBuffersImpl(operation, buffer, blocknum, &nblocks, flags);
1373  Assert(nblocks == 1); /* single block can't be short */
1374 
1375  return result;
1376 }
static pg_attribute_always_inline bool StartReadBuffersImpl(ReadBuffersOperation *operation, Buffer *buffers, BlockNumber blockNum, int *nblocks, int flags)
Definition: bufmgr.c:1254

References Assert, PrivateRefCountEntry::buffer, and StartReadBuffersImpl().

Referenced by read_stream_next_buffer(), and ReadBuffer_common().

◆ StartReadBuffers()

bool StartReadBuffers ( ReadBuffersOperation operation,
Buffer buffers,
BlockNumber  blockNum,
int *  nblocks,
int  flags 
)

Definition at line 1349 of file bufmgr.c.

1354 {
1355  return StartReadBuffersImpl(operation, buffers, blockNum, nblocks, flags);
1356 }

References StartReadBuffersImpl().

Referenced by read_stream_start_pending_read().

◆ UnlockBuffers()

void UnlockBuffers ( void  )

Definition at line 5112 of file bufmgr.c.

5113 {
5115 
5116  if (buf)
5117  {
5118  uint32 buf_state;
5119 
5120  buf_state = LockBufHdr(buf);
5121 
5122  /*
5123  * Don't complain if flag bit not set; it could have been reset but we
5124  * got a cancel/die interrupt before getting the signal.
5125  */
5126  if ((buf_state & BM_PIN_COUNT_WAITER) != 0 &&
5127  buf->wait_backend_pgprocno == MyProcNumber)
5128  buf_state &= ~BM_PIN_COUNT_WAITER;
5129 
5130  UnlockBufHdr(buf, buf_state);
5131 
5132  PinCountWaitBuf = NULL;
5133  }
5134 }

References BM_PIN_COUNT_WAITER, buf, LockBufHdr(), MyProcNumber, PinCountWaitBuf, and UnlockBufHdr().

Referenced by AbortSubTransaction(), AbortTransaction(), AtProcExit_Buffers(), BackgroundWriterMain(), CheckpointerMain(), and WalWriterMain().

◆ UnlockReleaseBuffer()

void UnlockReleaseBuffer ( Buffer  buffer)

Definition at line 4923 of file bufmgr.c.

4924 {
4925  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
4926  ReleaseBuffer(buffer);
4927 }

References PrivateRefCountEntry::buffer, BUFFER_LOCK_UNLOCK, LockBuffer(), and ReleaseBuffer().

Referenced by _bt_clear_incomplete_split(), _bt_restore_meta(), _hash_relbuf(), allocNewBuffer(), AlterSequence(), blbulkdelete(), blgetbitmap(), blinsert(), BloomInitMetapage(), blvacuumcleanup(), brin_doinsert(), brin_doupdate(), brin_evacuate_page(), brin_getinsertbuffer(), brin_xlog_createidx(), brin_xlog_desummarize_page(), brin_xlog_insert_update(), brin_xlog_revmap_extend(), brin_xlog_samepage_update(), brin_xlog_update(), brinbuild(), brinbuildempty(), brinGetStats(), brinRevmapDesummarizeRange(), bt_metap(), bt_multi_page_stats(), bt_page_items_internal(), bt_page_stats_internal(), bt_recheck_sibling_links(), btree_xlog_dedup(), btree_xlog_delete(), btree_xlog_insert(), btree_xlog_mark_page_halfdead(), btree_xlog_newroot(), btree_xlog_split(), btree_xlog_unlink_page(), btree_xlog_vacuum(), collect_corrupt_items(), collect_visibility_data(), count_nondeletable_pages(), createPostingTree(), do_setval(), doPickSplit(), entryLoadMoreItems(), fill_seq_fork_with_data(), flushCachedPage(), FreeSpaceMapPrepareTruncateRel(), fsm_search(), fsm_set_and_search(), generic_redo(), ginbuild(), ginbuildempty(), ginbulkdelete(), ginGetStats(), ginHeapTupleFastInsert(), ginInsertCleanup(), ginPlaceToPage(), ginRedoClearIncompleteSplit(), ginRedoCreatePTree(), ginRedoDeleteListPages(), ginRedoDeletePage(), ginRedoInsert(), ginRedoInsertListPage(), ginRedoSplit(), ginRedoUpdateMetapage(), ginRedoVacuumDataLeafPage(), ginRedoVacuumPage(), ginScanToDelete(), ginStepRight(), ginUpdateStats(), ginvacuumcleanup(), ginVacuumPostingTree(), ginVacuumPostingTreeLeaves(), gistbufferinginserttuples(), gistbuild(), gistbuildempty(), gistdoinsert(), gistFindCorrectParent(), gistFindPath(), gistGetMaxLevel(), gistinserttuples(), gistkillitems(), gistplacetopage(), gistProcessItup(), gistRedoClearFollowRight(), gistRedoDeleteRecord(), gistRedoPageDelete(), gistRedoPageSplitRecord(), gistRedoPageUpdateRecord(), gistScanPage(), gistvacuum_delete_empty_pages(), gistvacuumpage(), hash_xlog_add_ovfl_page(), hash_xlog_delete(), hash_xlog_init_bitmap_page(), hash_xlog_init_meta_page(), hash_xlog_insert(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_split_cleanup(), hash_xlog_split_complete(), hash_xlog_split_page(), hash_xlog_squeeze_page(), hash_xlog_update_meta_page(), hash_xlog_vacuum_one_page(), heap_delete(), heap_finish_speculative(), heap_force_common(), heap_get_latest_tid(), heap_index_delete_tuples(), heap_inplace_update(), heap_insert(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_update(), heap_xlog_confirm(), heap_xlog_delete(), heap_xlog_inplace(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_prune_freeze(), heap_xlog_update(), heap_xlog_visible(), heapam_scan_analyze_next_tuple(), initBloomState(), lazy_scan_heap(), lazy_scan_new_or_empty(), lazy_vacuum_heap_rel(), log_newpage_range(), moveLeafs(), nextval_internal(), palloc_btree_page(), pg_get_sequence_data(), pg_sequence_last_value(), pg_visibility(), pgstat_gist_page(), pgstat_heap(), pgstatginindex_internal(), pgstathashindex(), RelationCopyStorageUsingBuffer(), RelationGetBufferForTuple(), ResetSequence(), revmap_physical_extend(), scanGetCandidate(), scanPendingInsert(), scanPostingTree(), ScanSourceDatabasePgClass(), seq_redo(), SequenceChangePersistence(), shiftList(), spgAddNodeAction(), spgbuild(), spgdoinsert(), spgGetCache(), SpGistGetBuffer(), SpGistUpdateMetaPage(), spgMatchNodeAction(), spgprocesspending(), spgRedoAddLeaf(), spgRedoAddNode(), spgRedoMoveLeafs(), spgRedoPickSplit(), spgRedoSplitTuple(), spgRedoVacuumLeaf(), spgRedoVacuumRedirect(), spgRedoVacuumRoot(), spgSplitNodeAction(), spgvacuumpage(), spgWalk(), statapprox_heap(), verify_heapam(), verifyBackupPageConsistency(), visibilitymap_prepare_truncate(), writeListPage(), xlog_redo(), and XLogRecordPageWithFreeSpace().

◆ WaitReadBuffers()

void WaitReadBuffers ( ReadBuffersOperation operation)

Definition at line 1392 of file bufmgr.c.

1393 {
1394  Buffer *buffers;
1395  int nblocks;
1396  BlockNumber blocknum;
1397  ForkNumber forknum;
1398  IOContext io_context;
1399  IOObject io_object;
1400  char persistence;
1401 
1402  /*
1403  * Currently operations are only allowed to include a read of some range,
1404  * with an optional extra buffer that is already pinned at the end. So
1405  * nblocks can be at most one more than io_buffers_len.
1406  */
1407  Assert((operation->nblocks == operation->io_buffers_len) ||
1408  (operation->nblocks == operation->io_buffers_len + 1));
1409 
1410  /* Find the range of the physical read we need to perform. */
1411  nblocks = operation->io_buffers_len;
1412  if (nblocks == 0)
1413  return; /* nothing to do */
1414 
1415  buffers = &operation->buffers[0];
1416  blocknum = operation->blocknum;
1417  forknum = operation->forknum;
1418  persistence = operation->persistence;
1419 
1420  if (persistence == RELPERSISTENCE_TEMP)
1421  {
1422  io_context = IOCONTEXT_NORMAL;
1423  io_object = IOOBJECT_TEMP_RELATION;
1424  }
1425  else
1426  {
1427  io_context = IOContextForStrategy(operation->strategy);
1428  io_object = IOOBJECT_RELATION;
1429  }
1430 
1431  /*
1432  * We count all these blocks as read by this backend. This is traditional
1433  * behavior, but might turn out to be not true if we find that someone
1434  * else has beaten us and completed the read of some of these blocks. In
1435  * that case the system globally double-counts, but we traditionally don't
1436  * count this as a "hit", and we don't have a separate counter for "miss,
1437  * but another backend completed the read".
1438  */
1439  if (persistence == RELPERSISTENCE_TEMP)
1440  pgBufferUsage.local_blks_read += nblocks;
1441  else
1442  pgBufferUsage.shared_blks_read += nblocks;
1443 
1444  for (int i = 0; i < nblocks; ++i)
1445  {
1446  int io_buffers_len;
1447  Buffer io_buffers[MAX_IO_COMBINE_LIMIT];
1448  void *io_pages[MAX_IO_COMBINE_LIMIT];
1449  instr_time io_start;
1450  BlockNumber io_first_block;
1451 
1452  /*
1453  * Skip this block if someone else has already completed it. If an
1454  * I/O is already in progress in another backend, this will wait for
1455  * the outcome: either done, or something went wrong and we will
1456  * retry.
1457  */
1458  if (!WaitReadBuffersCanStartIO(buffers[i], false))
1459  {
1460  /*
1461  * Report this as a 'hit' for this backend, even though it must
1462  * have started out as a miss in PinBufferForBlock().
1463  */
1464  TRACE_POSTGRESQL_BUFFER_READ_DONE(forknum, blocknum + i,
1465  operation->smgr->smgr_rlocator.locator.spcOid,
1466  operation->smgr->smgr_rlocator.locator.dbOid,
1467  operation->smgr->smgr_rlocator.locator.relNumber,
1468  operation->smgr->smgr_rlocator.backend,
1469  true);
1470  continue;
1471  }
1472 
1473  /* We found a buffer that we need to read in. */
1474  io_buffers[0] = buffers[i];
1475  io_pages[0] = BufferGetBlock(buffers[i]);
1476  io_first_block = blocknum + i;
1477  io_buffers_len = 1;
1478 
1479  /*
1480  * How many neighboring-on-disk blocks can we can scatter-read into
1481  * other buffers at the same time? In this case we don't wait if we
1482  * see an I/O already in progress. We already hold BM_IO_IN_PROGRESS
1483  * for the head block, so we should get on with that I/O as soon as
1484  * possible. We'll come back to this block again, above.
1485  */
1486  while ((i + 1) < nblocks &&
1487  WaitReadBuffersCanStartIO(buffers[i + 1], true))
1488  {
1489  /* Must be consecutive block numbers. */
1490  Assert(BufferGetBlockNumber(buffers[i + 1]) ==
1491  BufferGetBlockNumber(buffers[i]) + 1);
1492 
1493  io_buffers[io_buffers_len] = buffers[++i];
1494  io_pages[io_buffers_len++] = BufferGetBlock(buffers[i]);
1495  }
1496 
1498  smgrreadv(operation->smgr, forknum, io_first_block, io_pages, io_buffers_len);
1499  pgstat_count_io_op_time(io_object, io_context, IOOP_READ, io_start,
1500  io_buffers_len);
1501 
1502  /* Verify each block we read, and terminate the I/O. */
1503  for (int j = 0; j < io_buffers_len; ++j)
1504  {
1505  BufferDesc *bufHdr;
1506  Block bufBlock;
1507 
1508  if (persistence == RELPERSISTENCE_TEMP)
1509  {
1510  bufHdr = GetLocalBufferDescriptor(-io_buffers[j] - 1);
1511  bufBlock = LocalBufHdrGetBlock(bufHdr);
1512  }
1513  else
1514  {
1515  bufHdr = GetBufferDescriptor(io_buffers[j] - 1);
1516  bufBlock = BufHdrGetBlock(bufHdr);
1517  }
1518 
1519  /* check for garbage data */
1520  if (!PageIsVerifiedExtended((Page) bufBlock, io_first_block + j,
1522  {
1523  if ((operation->flags & READ_BUFFERS_ZERO_ON_ERROR) || zero_damaged_pages)
1524  {
1525  ereport(WARNING,
1527  errmsg("invalid page in block %u of relation %s; zeroing out page",
1528  io_first_block + j,
1529  relpath(operation->smgr->smgr_rlocator, forknum))));
1530  memset(bufBlock, 0, BLCKSZ);
1531  }
1532  else
1533  ereport(ERROR,
1535  errmsg("invalid page in block %u of relation %s",
1536  io_first_block + j,
1537  relpath(operation->smgr->smgr_rlocator, forknum))));
1538  }
1539 
1540  /* Terminate I/O and set BM_VALID. */
1541  if (persistence == RELPERSISTENCE_TEMP)
1542  {
1543  uint32 buf_state = pg_atomic_read_u32(&bufHdr->state);
1544 
1545  buf_state |= BM_VALID;
1546  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
1547  }
1548  else
1549  {
1550  /* Set BM_VALID, terminate IO, and wake up any waiters */
1551  TerminateBufferIO(bufHdr, false, BM_VALID, true);
1552  }
1553 
1554  /* Report I/Os as completing individually. */
1555  TRACE_POSTGRESQL_BUFFER_READ_DONE(forknum, io_first_block + j,
1556  operation->smgr->smgr_rlocator.locator.spcOid,
1557  operation->smgr->smgr_rlocator.locator.dbOid,
1558  operation->smgr->smgr_rlocator.locator.relNumber,
1559  operation->smgr->smgr_rlocator.backend,
1560  false);
1561  }
1562 
1563  if (VacuumCostActive)
1564  VacuumCostBalance += VacuumCostPageMiss * io_buffers_len;
1565  }
1566 }
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:3706
static bool WaitReadBuffersCanStartIO(Buffer buffer, bool nowait)
Definition: bufmgr.c:1379
bool zero_damaged_pages
Definition: bufmgr.c:140
static void TerminateBufferIO(BufferDesc *buf, bool clear_dirty, uint32 set_flag_bits, bool forget_owner)
Definition: bufmgr.c:5597
#define BufHdrGetBlock(bufHdr)
Definition: bufmgr.c:68
#define READ_BUFFERS_ZERO_ON_ERROR
Definition: bufmgr.h:111
#define MAX_IO_COMBINE_LIMIT
Definition: bufmgr.h:164
bool PageIsVerifiedExtended(Page page, BlockNumber blkno, int flags)
Definition: bufpage.c:88
#define PIV_LOG_WARNING
Definition: bufpage.h:468
#define PIV_REPORT_STAT
Definition: bufpage.h:469
#define WARNING
Definition: elog.h:36
IOContext IOContextForStrategy(BufferAccessStrategy strategy)
Definition: freelist.c:758
int VacuumCostPageMiss
Definition: globals.c:151
int j
Definition: isn.c:74
#define ERRCODE_DATA_CORRUPTED
Definition: pg_basebackup.c:41
IOObject
Definition: pgstat.h:311
IOContext
Definition: pgstat.h:319
@ IOOP_READ
Definition: pgstat.h:334
#define relpath(rlocator, forknum)
Definition: relpath.h:102
void smgrreadv(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, void **buffers, BlockNumber nblocks)
Definition: smgr.c:600
int64 shared_blks_read
Definition: instrument.h:27
int64 local_blks_read
Definition: instrument.h:31
ForkNumber forknum
Definition: bufmgr.h:121
int16 io_buffers_len
Definition: bufmgr.h:133
Buffer * buffers
Definition: bufmgr.h:129
BufferAccessStrategy strategy
Definition: bufmgr.h:122
BlockNumber blocknum
Definition: bufmgr.h:130
struct SMgrRelationData * smgr
Definition: bufmgr.h:119
RelFileLocator locator
RelFileNumber relNumber
RelFileLocatorBackend smgr_rlocator
Definition: smgr.h:37

References Assert, RelFileLocatorBackend::backend, ReadBuffersOperation::blocknum, BM_VALID, BufferGetBlock(), BufferGetBlockNumber(), ReadBuffersOperation::buffers, BufHdrGetBlock, RelFileLocator::dbOid, ereport, errcode(), ERRCODE_DATA_CORRUPTED, errmsg(), ERROR, ReadBuffersOperation::flags, ReadBuffersOperation::forknum, GetBufferDescriptor(), GetLocalBufferDescriptor(), i, ReadBuffersOperation::io_buffers_len, IOCONTEXT_NORMAL, IOContextForStrategy(), IOOBJECT_RELATION, IOOBJECT_TEMP_RELATION, IOOP_READ, j, BufferUsage::local_blks_read, LocalBufHdrGetBlock, RelFileLocatorBackend::locator, MAX_IO_COMBINE_LIMIT, ReadBuffersOperation::nblocks, PageIsVerifiedExtended(), ReadBuffersOperation::persistence, pg_atomic_read_u32(), pg_atomic_unlocked_write_u32(), pgBufferUsage, pgstat_count_io_op_time(), pgstat_prepare_io_time(), PIV_LOG_WARNING, PIV_REPORT_STAT, READ_BUFFERS_ZERO_ON_ERROR, RelFileLocator::relNumber, relpath, BufferUsage::shared_blks_read, ReadBuffersOperation::smgr, SMgrRelationData::smgr_rlocator, smgrreadv(), RelFileLocator::spcOid, BufferDesc::state, ReadBuffersOperation::strategy, TerminateBufferIO(), track_io_timing, VacuumCostActive, VacuumCostBalance, VacuumCostPageMiss, WaitReadBuffersCanStartIO(), WARNING, and zero_damaged_pages.

Referenced by read_stream_next_buffer(), and ReadBuffer_common().

Variable Documentation

◆ backend_flush_after

PGDLLIMPORT int backend_flush_after
extern

Definition at line 173 of file bufmgr.c.

Referenced by BufferManagerShmemInit().

◆ bgwriter_flush_after

PGDLLIMPORT int bgwriter_flush_after
extern

Definition at line 172 of file bufmgr.c.

Referenced by BackgroundWriterMain().

◆ bgwriter_lru_maxpages

PGDLLIMPORT int bgwriter_lru_maxpages
extern

Definition at line 141 of file bufmgr.c.

Referenced by BgBufferSync().

◆ bgwriter_lru_multiplier

PGDLLIMPORT double bgwriter_lru_multiplier
extern

Definition at line 142 of file bufmgr.c.

Referenced by BgBufferSync().

◆ BufferBlocks

PGDLLIMPORT char* BufferBlocks
extern

Definition at line 22 of file buf_init.c.

Referenced by BufferGetBlock(), and BufferManagerShmemInit().

◆ checkpoint_flush_after

PGDLLIMPORT int checkpoint_flush_after
extern

Definition at line 171 of file bufmgr.c.

Referenced by BufferSync().

◆ effective_io_concurrency

PGDLLIMPORT int effective_io_concurrency
extern

◆ io_combine_limit

PGDLLIMPORT int io_combine_limit
extern

◆ LocalBufferBlockPointers

PGDLLIMPORT Block* LocalBufferBlockPointers
extern

Definition at line 45 of file localbuf.c.

Referenced by BufferGetBlock(), and InitLocalBuffers().

◆ LocalRefCount

◆ maintenance_io_concurrency

◆ NBuffers

◆ NLocBuffer

◆ track_io_timing

◆ zero_damaged_pages

PGDLLIMPORT bool zero_damaged_pages
extern

Definition at line 140 of file bufmgr.c.

Referenced by mdreadv(), and WaitReadBuffers().