PostgreSQL Source Code git master
bufmgr.h File Reference
#include "port/pg_iovec.h"
#include "storage/block.h"
#include "storage/buf.h"
#include "storage/bufpage.h"
#include "storage/relfilelocator.h"
#include "utils/relcache.h"
#include "utils/snapmgr.h"
Include dependency graph for bufmgr.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Data Structures

struct  PrefetchBufferResult
 
struct  BufferManagerRelation
 
struct  ReadBuffersOperation
 

Macros

#define BMR_REL(p_rel)   ((BufferManagerRelation){.rel = p_rel})
 
#define BMR_SMGR(p_smgr, p_relpersistence)   ((BufferManagerRelation){.smgr = p_smgr, .relpersistence = p_relpersistence})
 
#define READ_BUFFERS_ZERO_ON_ERROR   (1 << 0)
 
#define READ_BUFFERS_ISSUE_ADVICE   (1 << 1)
 
#define DEFAULT_EFFECTIVE_IO_CONCURRENCY   0
 
#define DEFAULT_MAINTENANCE_IO_CONCURRENCY   0
 
#define MAX_IO_COMBINE_LIMIT   PG_IOV_MAX
 
#define DEFAULT_IO_COMBINE_LIMIT   Min(MAX_IO_COMBINE_LIMIT, (128 * 1024) / BLCKSZ)
 
#define MAX_IO_CONCURRENCY   1000
 
#define P_NEW   InvalidBlockNumber /* grow the file to get a new page */
 
#define BUFFER_LOCK_UNLOCK   0
 
#define BUFFER_LOCK_SHARE   1
 
#define BUFFER_LOCK_EXCLUSIVE   2
 
#define RelationGetNumberOfBlocks(reln)    RelationGetNumberOfBlocksInFork(reln, MAIN_FORKNUM)
 

Typedefs

typedef void * Block
 
typedef enum BufferAccessStrategyType BufferAccessStrategyType
 
typedef struct PrefetchBufferResult PrefetchBufferResult
 
typedef enum ExtendBufferedFlags ExtendBufferedFlags
 
typedef struct BufferManagerRelation BufferManagerRelation
 
typedef struct ReadBuffersOperation ReadBuffersOperation
 

Enumerations

enum  BufferAccessStrategyType { BAS_NORMAL , BAS_BULKREAD , BAS_BULKWRITE , BAS_VACUUM }
 
enum  ReadBufferMode {
  RBM_NORMAL , RBM_ZERO_AND_LOCK , RBM_ZERO_AND_CLEANUP_LOCK , RBM_ZERO_ON_ERROR ,
  RBM_NORMAL_NO_LOG
}
 
enum  ExtendBufferedFlags {
  EB_SKIP_EXTENSION_LOCK = (1 << 0) , EB_PERFORMING_RECOVERY = (1 << 1) , EB_CREATE_FORK_IF_NEEDED = (1 << 2) , EB_LOCK_FIRST = (1 << 3) ,
  EB_CLEAR_SIZE_CACHE = (1 << 4) , EB_LOCK_TARGET = (1 << 5)
}
 

Functions

PrefetchBufferResult PrefetchSharedBuffer (struct SMgrRelationData *smgr_reln, ForkNumber forkNum, BlockNumber blockNum)
 
PrefetchBufferResult PrefetchBuffer (Relation reln, ForkNumber forkNum, BlockNumber blockNum)
 
bool ReadRecentBuffer (RelFileLocator rlocator, ForkNumber forkNum, BlockNumber blockNum, Buffer recent_buffer)
 
Buffer ReadBuffer (Relation reln, BlockNumber blockNum)
 
Buffer ReadBufferExtended (Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
 
Buffer ReadBufferWithoutRelcache (RelFileLocator rlocator, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy, bool permanent)
 
bool StartReadBuffer (ReadBuffersOperation *operation, Buffer *buffer, BlockNumber blocknum, int flags)
 
bool StartReadBuffers (ReadBuffersOperation *operation, Buffer *buffers, BlockNumber blockNum, int *nblocks, int flags)
 
void WaitReadBuffers (ReadBuffersOperation *operation)
 
void ReleaseBuffer (Buffer buffer)
 
void UnlockReleaseBuffer (Buffer buffer)
 
bool BufferIsExclusiveLocked (Buffer buffer)
 
bool BufferIsDirty (Buffer buffer)
 
void MarkBufferDirty (Buffer buffer)
 
void IncrBufferRefCount (Buffer buffer)
 
void CheckBufferIsPinnedOnce (Buffer buffer)
 
Buffer ReleaseAndReadBuffer (Buffer buffer, Relation relation, BlockNumber blockNum)
 
Buffer ExtendBufferedRel (BufferManagerRelation bmr, ForkNumber forkNum, BufferAccessStrategy strategy, uint32 flags)
 
BlockNumber ExtendBufferedRelBy (BufferManagerRelation bmr, ForkNumber fork, BufferAccessStrategy strategy, uint32 flags, uint32 extend_by, Buffer *buffers, uint32 *extended_by)
 
Buffer ExtendBufferedRelTo (BufferManagerRelation bmr, ForkNumber fork, BufferAccessStrategy strategy, uint32 flags, BlockNumber extend_to, ReadBufferMode mode)
 
void InitBufferManagerAccess (void)
 
void AtEOXact_Buffers (bool isCommit)
 
char * DebugPrintBufferRefcount (Buffer buffer)
 
void CheckPointBuffers (int flags)
 
BlockNumber BufferGetBlockNumber (Buffer buffer)
 
BlockNumber RelationGetNumberOfBlocksInFork (Relation relation, ForkNumber forkNum)
 
void FlushOneBuffer (Buffer buffer)
 
void FlushRelationBuffers (Relation rel)
 
void FlushRelationsAllBuffers (struct SMgrRelationData **smgrs, int nrels)
 
void CreateAndCopyRelationData (RelFileLocator src_rlocator, RelFileLocator dst_rlocator, bool permanent)
 
void FlushDatabaseBuffers (Oid dbid)
 
void DropRelationBuffers (struct SMgrRelationData *smgr_reln, ForkNumber *forkNum, int nforks, BlockNumber *firstDelBlock)
 
void DropRelationsAllBuffers (struct SMgrRelationData **smgr_reln, int nlocators)
 
void DropDatabaseBuffers (Oid dbid)
 
bool BufferIsPermanent (Buffer buffer)
 
XLogRecPtr BufferGetLSNAtomic (Buffer buffer)
 
void BufferGetTag (Buffer buffer, RelFileLocator *rlocator, ForkNumber *forknum, BlockNumber *blknum)
 
void MarkBufferDirtyHint (Buffer buffer, bool buffer_std)
 
void UnlockBuffers (void)
 
void LockBuffer (Buffer buffer, int mode)
 
bool ConditionalLockBuffer (Buffer buffer)
 
void LockBufferForCleanup (Buffer buffer)
 
bool ConditionalLockBufferForCleanup (Buffer buffer)
 
bool IsBufferCleanupOK (Buffer buffer)
 
bool HoldingBufferPinThatDelaysRecovery (void)
 
bool BgBufferSync (struct WritebackContext *wb_context)
 
void LimitAdditionalPins (uint32 *additional_pins)
 
void LimitAdditionalLocalPins (uint32 *additional_pins)
 
bool EvictUnpinnedBuffer (Buffer buf)
 
void BufferManagerShmemInit (void)
 
Size BufferManagerShmemSize (void)
 
void AtProcExit_LocalBuffers (void)
 
BufferAccessStrategy GetAccessStrategy (BufferAccessStrategyType btype)
 
BufferAccessStrategy GetAccessStrategyWithSize (BufferAccessStrategyType btype, int ring_size_kb)
 
int GetAccessStrategyBufferCount (BufferAccessStrategy strategy)
 
int GetAccessStrategyPinLimit (BufferAccessStrategy strategy)
 
void FreeAccessStrategy (BufferAccessStrategy strategy)
 
static bool BufferIsValid (Buffer bufnum)
 
static Block BufferGetBlock (Buffer buffer)
 
static Size BufferGetPageSize (Buffer buffer)
 
static Page BufferGetPage (Buffer buffer)
 

Variables

PGDLLIMPORT int NBuffers
 
PGDLLIMPORT bool zero_damaged_pages
 
PGDLLIMPORT int bgwriter_lru_maxpages
 
PGDLLIMPORT double bgwriter_lru_multiplier
 
PGDLLIMPORT bool track_io_timing
 
PGDLLIMPORT int effective_io_concurrency
 
PGDLLIMPORT int maintenance_io_concurrency
 
PGDLLIMPORT int io_combine_limit
 
PGDLLIMPORT int checkpoint_flush_after
 
PGDLLIMPORT int backend_flush_after
 
PGDLLIMPORT int bgwriter_flush_after
 
PGDLLIMPORT char * BufferBlocks
 
PGDLLIMPORT int NLocBuffer
 
PGDLLIMPORT BlockLocalBufferBlockPointers
 
PGDLLIMPORT int32LocalRefCount
 

Macro Definition Documentation

◆ BMR_REL

#define BMR_REL (   p_rel)    ((BufferManagerRelation){.rel = p_rel})

Definition at line 107 of file bufmgr.h.

◆ BMR_SMGR

#define BMR_SMGR (   p_smgr,
  p_relpersistence 
)    ((BufferManagerRelation){.smgr = p_smgr, .relpersistence = p_relpersistence})

Definition at line 108 of file bufmgr.h.

◆ BUFFER_LOCK_EXCLUSIVE

#define BUFFER_LOCK_EXCLUSIVE   2

Definition at line 191 of file bufmgr.h.

◆ BUFFER_LOCK_SHARE

#define BUFFER_LOCK_SHARE   1

Definition at line 190 of file bufmgr.h.

◆ BUFFER_LOCK_UNLOCK

#define BUFFER_LOCK_UNLOCK   0

Definition at line 189 of file bufmgr.h.

◆ DEFAULT_EFFECTIVE_IO_CONCURRENCY

#define DEFAULT_EFFECTIVE_IO_CONCURRENCY   0

Definition at line 158 of file bufmgr.h.

◆ DEFAULT_IO_COMBINE_LIMIT

#define DEFAULT_IO_COMBINE_LIMIT   Min(MAX_IO_COMBINE_LIMIT, (128 * 1024) / BLCKSZ)

Definition at line 165 of file bufmgr.h.

◆ DEFAULT_MAINTENANCE_IO_CONCURRENCY

#define DEFAULT_MAINTENANCE_IO_CONCURRENCY   0

Definition at line 159 of file bufmgr.h.

◆ MAX_IO_COMBINE_LIMIT

#define MAX_IO_COMBINE_LIMIT   PG_IOV_MAX

Definition at line 164 of file bufmgr.h.

◆ MAX_IO_CONCURRENCY

#define MAX_IO_CONCURRENCY   1000

Definition at line 181 of file bufmgr.h.

◆ P_NEW

#define P_NEW   InvalidBlockNumber /* grow the file to get a new page */

Definition at line 184 of file bufmgr.h.

◆ READ_BUFFERS_ISSUE_ADVICE

#define READ_BUFFERS_ISSUE_ADVICE   (1 << 1)

Definition at line 113 of file bufmgr.h.

◆ READ_BUFFERS_ZERO_ON_ERROR

#define READ_BUFFERS_ZERO_ON_ERROR   (1 << 0)

Definition at line 111 of file bufmgr.h.

◆ RelationGetNumberOfBlocks

#define RelationGetNumberOfBlocks (   reln)     RelationGetNumberOfBlocksInFork(reln, MAIN_FORKNUM)

Definition at line 273 of file bufmgr.h.

Typedef Documentation

◆ Block

typedef void* Block

Definition at line 25 of file bufmgr.h.

◆ BufferAccessStrategyType

◆ BufferManagerRelation

◆ ExtendBufferedFlags

◆ PrefetchBufferResult

◆ ReadBuffersOperation

Definition at line 136 of file bufmgr.h.

Enumeration Type Documentation

◆ BufferAccessStrategyType

Enumerator
BAS_NORMAL 
BAS_BULKREAD 
BAS_BULKWRITE 
BAS_VACUUM 

Definition at line 33 of file bufmgr.h.

34{
35 BAS_NORMAL, /* Normal random access */
36 BAS_BULKREAD, /* Large read-only scan (hint bit updates are
37 * ok) */
38 BAS_BULKWRITE, /* Large multi-block write (e.g. COPY IN) */
39 BAS_VACUUM, /* VACUUM */
BufferAccessStrategyType
Definition: bufmgr.h:34
@ BAS_BULKREAD
Definition: bufmgr.h:36
@ BAS_NORMAL
Definition: bufmgr.h:35
@ BAS_VACUUM
Definition: bufmgr.h:39
@ BAS_BULKWRITE
Definition: bufmgr.h:38

◆ ExtendBufferedFlags

Enumerator
EB_SKIP_EXTENSION_LOCK 
EB_PERFORMING_RECOVERY 
EB_CREATE_FORK_IF_NEEDED 
EB_LOCK_FIRST 
EB_CLEAR_SIZE_CACHE 
EB_LOCK_TARGET 

Definition at line 67 of file bufmgr.h.

68{
69 /*
70 * Don't acquire extension lock. This is safe only if the relation isn't
71 * shared, an access exclusive lock is held or if this is the startup
72 * process.
73 */
74 EB_SKIP_EXTENSION_LOCK = (1 << 0),
75
76 /* Is this extension part of recovery? */
77 EB_PERFORMING_RECOVERY = (1 << 1),
78
79 /*
80 * Should the fork be created if it does not currently exist? This likely
81 * only ever makes sense for relation forks.
82 */
83 EB_CREATE_FORK_IF_NEEDED = (1 << 2),
84
85 /* Should the first (possibly only) return buffer be returned locked? */
86 EB_LOCK_FIRST = (1 << 3),
87
88 /* Should the smgr size cache be cleared? */
89 EB_CLEAR_SIZE_CACHE = (1 << 4),
90
91 /* internal flags follow */
92 EB_LOCK_TARGET = (1 << 5),
ExtendBufferedFlags
Definition: bufmgr.h:68
@ EB_LOCK_TARGET
Definition: bufmgr.h:92
@ EB_CLEAR_SIZE_CACHE
Definition: bufmgr.h:89
@ EB_PERFORMING_RECOVERY
Definition: bufmgr.h:77
@ EB_CREATE_FORK_IF_NEEDED
Definition: bufmgr.h:83
@ EB_SKIP_EXTENSION_LOCK
Definition: bufmgr.h:74
@ EB_LOCK_FIRST
Definition: bufmgr.h:86

◆ ReadBufferMode

Enumerator
RBM_NORMAL 
RBM_ZERO_AND_LOCK 
RBM_ZERO_AND_CLEANUP_LOCK 
RBM_ZERO_ON_ERROR 
RBM_NORMAL_NO_LOG 

Definition at line 43 of file bufmgr.h.

44{
45 RBM_NORMAL, /* Normal read */
46 RBM_ZERO_AND_LOCK, /* Don't read from disk, caller will
47 * initialize. Also locks the page. */
48 RBM_ZERO_AND_CLEANUP_LOCK, /* Like RBM_ZERO_AND_LOCK, but locks the page
49 * in "cleanup" mode */
50 RBM_ZERO_ON_ERROR, /* Read, but return an all-zeros page on error */
51 RBM_NORMAL_NO_LOG, /* Don't log page as invalid during WAL
52 * replay; otherwise same as RBM_NORMAL */
ReadBufferMode
Definition: bufmgr.h:44
@ RBM_ZERO_ON_ERROR
Definition: bufmgr.h:50
@ RBM_ZERO_AND_CLEANUP_LOCK
Definition: bufmgr.h:48
@ RBM_ZERO_AND_LOCK
Definition: bufmgr.h:46
@ RBM_NORMAL
Definition: bufmgr.h:45
@ RBM_NORMAL_NO_LOG
Definition: bufmgr.h:51

Function Documentation

◆ AtEOXact_Buffers()

void AtEOXact_Buffers ( bool  isCommit)

Definition at line 3559 of file bufmgr.c.

3560{
3562
3563 AtEOXact_LocalBuffers(isCommit);
3564
3566}
static void CheckForBufferLeaks(void)
Definition: bufmgr.c:3619
static int32 PrivateRefCountOverflowed
Definition: bufmgr.c:210
#define Assert(condition)
Definition: c.h:815
void AtEOXact_LocalBuffers(bool isCommit)
Definition: localbuf.c:820

References Assert, AtEOXact_LocalBuffers(), CheckForBufferLeaks(), and PrivateRefCountOverflowed.

Referenced by AbortTransaction(), BackgroundWriterMain(), CheckpointerMain(), CommitTransaction(), PrepareTransaction(), and WalWriterMain().

◆ AtProcExit_LocalBuffers()

void AtProcExit_LocalBuffers ( void  )

Definition at line 831 of file localbuf.c.

832{
833 /*
834 * We shouldn't be holding any remaining pins; if we are, and assertions
835 * aren't enabled, we'll fail later in DropRelationBuffers while trying to
836 * drop the temp rels.
837 */
839}
static void CheckForLocalBufferLeaks(void)
Definition: localbuf.c:787

References CheckForLocalBufferLeaks().

Referenced by AtProcExit_Buffers().

◆ BgBufferSync()

bool BgBufferSync ( struct WritebackContext wb_context)

Definition at line 3188 of file bufmgr.c.

3189{
3190 /* info obtained from freelist.c */
3191 int strategy_buf_id;
3192 uint32 strategy_passes;
3193 uint32 recent_alloc;
3194
3195 /*
3196 * Information saved between calls so we can determine the strategy
3197 * point's advance rate and avoid scanning already-cleaned buffers.
3198 */
3199 static bool saved_info_valid = false;
3200 static int prev_strategy_buf_id;
3201 static uint32 prev_strategy_passes;
3202 static int next_to_clean;
3203 static uint32 next_passes;
3204
3205 /* Moving averages of allocation rate and clean-buffer density */
3206 static float smoothed_alloc = 0;
3207 static float smoothed_density = 10.0;
3208
3209 /* Potentially these could be tunables, but for now, not */
3210 float smoothing_samples = 16;
3211 float scan_whole_pool_milliseconds = 120000.0;
3212
3213 /* Used to compute how far we scan ahead */
3214 long strategy_delta;
3215 int bufs_to_lap;
3216 int bufs_ahead;
3217 float scans_per_alloc;
3218 int reusable_buffers_est;
3219 int upcoming_alloc_est;
3220 int min_scan_buffers;
3221
3222 /* Variables for the scanning loop proper */
3223 int num_to_scan;
3224 int num_written;
3225 int reusable_buffers;
3226
3227 /* Variables for final smoothed_density update */
3228 long new_strategy_delta;
3229 uint32 new_recent_alloc;
3230
3231 /*
3232 * Find out where the freelist clock sweep currently is, and how many
3233 * buffer allocations have happened since our last call.
3234 */
3235 strategy_buf_id = StrategySyncStart(&strategy_passes, &recent_alloc);
3236
3237 /* Report buffer alloc counts to pgstat */
3238 PendingBgWriterStats.buf_alloc += recent_alloc;
3239
3240 /*
3241 * If we're not running the LRU scan, just stop after doing the stats
3242 * stuff. We mark the saved state invalid so that we can recover sanely
3243 * if LRU scan is turned back on later.
3244 */
3245 if (bgwriter_lru_maxpages <= 0)
3246 {
3247 saved_info_valid = false;
3248 return true;
3249 }
3250
3251 /*
3252 * Compute strategy_delta = how many buffers have been scanned by the
3253 * clock sweep since last time. If first time through, assume none. Then
3254 * see if we are still ahead of the clock sweep, and if so, how many
3255 * buffers we could scan before we'd catch up with it and "lap" it. Note:
3256 * weird-looking coding of xxx_passes comparisons are to avoid bogus
3257 * behavior when the passes counts wrap around.
3258 */
3259 if (saved_info_valid)
3260 {
3261 int32 passes_delta = strategy_passes - prev_strategy_passes;
3262
3263 strategy_delta = strategy_buf_id - prev_strategy_buf_id;
3264 strategy_delta += (long) passes_delta * NBuffers;
3265
3266 Assert(strategy_delta >= 0);
3267
3268 if ((int32) (next_passes - strategy_passes) > 0)
3269 {
3270 /* we're one pass ahead of the strategy point */
3271 bufs_to_lap = strategy_buf_id - next_to_clean;
3272#ifdef BGW_DEBUG
3273 elog(DEBUG2, "bgwriter ahead: bgw %u-%u strategy %u-%u delta=%ld lap=%d",
3274 next_passes, next_to_clean,
3275 strategy_passes, strategy_buf_id,
3276 strategy_delta, bufs_to_lap);
3277#endif
3278 }
3279 else if (next_passes == strategy_passes &&
3280 next_to_clean >= strategy_buf_id)
3281 {
3282 /* on same pass, but ahead or at least not behind */
3283 bufs_to_lap = NBuffers - (next_to_clean - strategy_buf_id);
3284#ifdef BGW_DEBUG
3285 elog(DEBUG2, "bgwriter ahead: bgw %u-%u strategy %u-%u delta=%ld lap=%d",
3286 next_passes, next_to_clean,
3287 strategy_passes, strategy_buf_id,
3288 strategy_delta, bufs_to_lap);
3289#endif
3290 }
3291 else
3292 {
3293 /*
3294 * We're behind, so skip forward to the strategy point and start
3295 * cleaning from there.
3296 */
3297#ifdef BGW_DEBUG
3298 elog(DEBUG2, "bgwriter behind: bgw %u-%u strategy %u-%u delta=%ld",
3299 next_passes, next_to_clean,
3300 strategy_passes, strategy_buf_id,
3301 strategy_delta);
3302#endif
3303 next_to_clean = strategy_buf_id;
3304 next_passes = strategy_passes;
3305 bufs_to_lap = NBuffers;
3306 }
3307 }
3308 else
3309 {
3310 /*
3311 * Initializing at startup or after LRU scanning had been off. Always
3312 * start at the strategy point.
3313 */
3314#ifdef BGW_DEBUG
3315 elog(DEBUG2, "bgwriter initializing: strategy %u-%u",
3316 strategy_passes, strategy_buf_id);
3317#endif
3318 strategy_delta = 0;
3319 next_to_clean = strategy_buf_id;
3320 next_passes = strategy_passes;
3321 bufs_to_lap = NBuffers;
3322 }
3323
3324 /* Update saved info for next time */
3325 prev_strategy_buf_id = strategy_buf_id;
3326 prev_strategy_passes = strategy_passes;
3327 saved_info_valid = true;
3328
3329 /*
3330 * Compute how many buffers had to be scanned for each new allocation, ie,
3331 * 1/density of reusable buffers, and track a moving average of that.
3332 *
3333 * If the strategy point didn't move, we don't update the density estimate
3334 */
3335 if (strategy_delta > 0 && recent_alloc > 0)
3336 {
3337 scans_per_alloc = (float) strategy_delta / (float) recent_alloc;
3338 smoothed_density += (scans_per_alloc - smoothed_density) /
3339 smoothing_samples;
3340 }
3341
3342 /*
3343 * Estimate how many reusable buffers there are between the current
3344 * strategy point and where we've scanned ahead to, based on the smoothed
3345 * density estimate.
3346 */
3347 bufs_ahead = NBuffers - bufs_to_lap;
3348 reusable_buffers_est = (float) bufs_ahead / smoothed_density;
3349
3350 /*
3351 * Track a moving average of recent buffer allocations. Here, rather than
3352 * a true average we want a fast-attack, slow-decline behavior: we
3353 * immediately follow any increase.
3354 */
3355 if (smoothed_alloc <= (float) recent_alloc)
3356 smoothed_alloc = recent_alloc;
3357 else
3358 smoothed_alloc += ((float) recent_alloc - smoothed_alloc) /
3359 smoothing_samples;
3360
3361 /* Scale the estimate by a GUC to allow more aggressive tuning. */
3362 upcoming_alloc_est = (int) (smoothed_alloc * bgwriter_lru_multiplier);
3363
3364 /*
3365 * If recent_alloc remains at zero for many cycles, smoothed_alloc will
3366 * eventually underflow to zero, and the underflows produce annoying
3367 * kernel warnings on some platforms. Once upcoming_alloc_est has gone to
3368 * zero, there's no point in tracking smaller and smaller values of
3369 * smoothed_alloc, so just reset it to exactly zero to avoid this
3370 * syndrome. It will pop back up as soon as recent_alloc increases.
3371 */
3372 if (upcoming_alloc_est == 0)
3373 smoothed_alloc = 0;
3374
3375 /*
3376 * Even in cases where there's been little or no buffer allocation
3377 * activity, we want to make a small amount of progress through the buffer
3378 * cache so that as many reusable buffers as possible are clean after an
3379 * idle period.
3380 *
3381 * (scan_whole_pool_milliseconds / BgWriterDelay) computes how many times
3382 * the BGW will be called during the scan_whole_pool time; slice the
3383 * buffer pool into that many sections.
3384 */
3385 min_scan_buffers = (int) (NBuffers / (scan_whole_pool_milliseconds / BgWriterDelay));
3386
3387 if (upcoming_alloc_est < (min_scan_buffers + reusable_buffers_est))
3388 {
3389#ifdef BGW_DEBUG
3390 elog(DEBUG2, "bgwriter: alloc_est=%d too small, using min=%d + reusable_est=%d",
3391 upcoming_alloc_est, min_scan_buffers, reusable_buffers_est);
3392#endif
3393 upcoming_alloc_est = min_scan_buffers + reusable_buffers_est;
3394 }
3395
3396 /*
3397 * Now write out dirty reusable buffers, working forward from the
3398 * next_to_clean point, until we have lapped the strategy scan, or cleaned
3399 * enough buffers to match our estimate of the next cycle's allocation
3400 * requirements, or hit the bgwriter_lru_maxpages limit.
3401 */
3402
3403 num_to_scan = bufs_to_lap;
3404 num_written = 0;
3405 reusable_buffers = reusable_buffers_est;
3406
3407 /* Execute the LRU scan */
3408 while (num_to_scan > 0 && reusable_buffers < upcoming_alloc_est)
3409 {
3410 int sync_state = SyncOneBuffer(next_to_clean, true,
3411 wb_context);
3412
3413 if (++next_to_clean >= NBuffers)
3414 {
3415 next_to_clean = 0;
3416 next_passes++;
3417 }
3418 num_to_scan--;
3419
3420 if (sync_state & BUF_WRITTEN)
3421 {
3422 reusable_buffers++;
3423 if (++num_written >= bgwriter_lru_maxpages)
3424 {
3426 break;
3427 }
3428 }
3429 else if (sync_state & BUF_REUSABLE)
3430 reusable_buffers++;
3431 }
3432
3434
3435#ifdef BGW_DEBUG
3436 elog(DEBUG1, "bgwriter: recent_alloc=%u smoothed=%.2f delta=%ld ahead=%d density=%.2f reusable_est=%d upcoming_est=%d scanned=%d wrote=%d reusable=%d",
3437 recent_alloc, smoothed_alloc, strategy_delta, bufs_ahead,
3438 smoothed_density, reusable_buffers_est, upcoming_alloc_est,
3439 bufs_to_lap - num_to_scan,
3440 num_written,
3441 reusable_buffers - reusable_buffers_est);
3442#endif
3443
3444 /*
3445 * Consider the above scan as being like a new allocation scan.
3446 * Characterize its density and update the smoothed one based on it. This
3447 * effectively halves the moving average period in cases where both the
3448 * strategy and the background writer are doing some useful scanning,
3449 * which is helpful because a long memory isn't as desirable on the
3450 * density estimates.
3451 */
3452 new_strategy_delta = bufs_to_lap - num_to_scan;
3453 new_recent_alloc = reusable_buffers - reusable_buffers_est;
3454 if (new_strategy_delta > 0 && new_recent_alloc > 0)
3455 {
3456 scans_per_alloc = (float) new_strategy_delta / (float) new_recent_alloc;
3457 smoothed_density += (scans_per_alloc - smoothed_density) /
3458 smoothing_samples;
3459
3460#ifdef BGW_DEBUG
3461 elog(DEBUG2, "bgwriter: cleaner density alloc=%u scan=%ld density=%.2f new smoothed=%.2f",
3462 new_recent_alloc, new_strategy_delta,
3463 scans_per_alloc, smoothed_density);
3464#endif
3465 }
3466
3467 /* Return true if OK to hibernate */
3468 return (bufs_to_lap == 0 && recent_alloc == 0);
3469}
int BgWriterDelay
Definition: bgwriter.c:57
#define BUF_REUSABLE
Definition: bufmgr.c:77
double bgwriter_lru_multiplier
Definition: bufmgr.c:142
static int SyncOneBuffer(int buf_id, bool skip_recently_used, WritebackContext *wb_context)
Definition: bufmgr.c:3486
int bgwriter_lru_maxpages
Definition: bufmgr.c:141
#define BUF_WRITTEN
Definition: bufmgr.c:76
int32_t int32
Definition: c.h:484
uint32_t uint32
Definition: c.h:488
#define DEBUG2
Definition: elog.h:29
#define DEBUG1
Definition: elog.h:30
#define elog(elevel,...)
Definition: elog.h:225
int StrategySyncStart(uint32 *complete_passes, uint32 *num_buf_alloc)
Definition: freelist.c:394
int NBuffers
Definition: globals.c:141
PgStat_BgWriterStats PendingBgWriterStats
PgStat_Counter buf_written_clean
Definition: pgstat.h:240
PgStat_Counter maxwritten_clean
Definition: pgstat.h:241
PgStat_Counter buf_alloc
Definition: pgstat.h:242

References Assert, bgwriter_lru_maxpages, bgwriter_lru_multiplier, BgWriterDelay, PgStat_BgWriterStats::buf_alloc, BUF_REUSABLE, BUF_WRITTEN, PgStat_BgWriterStats::buf_written_clean, DEBUG1, DEBUG2, elog, PgStat_BgWriterStats::maxwritten_clean, NBuffers, PendingBgWriterStats, StrategySyncStart(), and SyncOneBuffer().

Referenced by BackgroundWriterMain().

◆ BufferGetBlock()

static Block BufferGetBlock ( Buffer  buffer)
inlinestatic

Definition at line 367 of file bufmgr.h.

368{
369 Assert(BufferIsValid(buffer));
370
371 if (BufferIsLocal(buffer))
372 return LocalBufferBlockPointers[-buffer - 1];
373 else
374 return (Block) (BufferBlocks + ((Size) (buffer - 1)) * BLCKSZ);
375}
#define BufferIsLocal(buffer)
Definition: buf.h:37
PGDLLIMPORT Block * LocalBufferBlockPointers
Definition: localbuf.c:45
void * Block
Definition: bufmgr.h:25
PGDLLIMPORT char * BufferBlocks
Definition: buf_init.c:21
static bool BufferIsValid(Buffer bufnum)
Definition: bufmgr.h:351
size_t Size
Definition: c.h:562

References Assert, BufferBlocks, BufferIsLocal, BufferIsValid(), and LocalBufferBlockPointers.

Referenced by BufferGetPage(), heap_inplace_update_and_unlock(), WaitReadBuffers(), and XLogSaveBufferForHint().

◆ BufferGetBlockNumber()

BlockNumber BufferGetBlockNumber ( Buffer  buffer)

Definition at line 3724 of file bufmgr.c.

3725{
3726 BufferDesc *bufHdr;
3727
3728 Assert(BufferIsPinned(buffer));
3729
3730 if (BufferIsLocal(buffer))
3731 bufHdr = GetLocalBufferDescriptor(-buffer - 1);
3732 else
3733 bufHdr = GetBufferDescriptor(buffer - 1);
3734
3735 /* pinned, so OK to read tag without spinlock */
3736 return bufHdr->tag.blockNum;
3737}
static BufferDesc * GetLocalBufferDescriptor(uint32 id)
static BufferDesc * GetBufferDescriptor(uint32 id)
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:474
BufferTag tag
BlockNumber blockNum
Definition: buf_internals.h:97

References Assert, buftag::blockNum, PrivateRefCountEntry::buffer, BufferIsLocal, BufferIsPinned, GetBufferDescriptor(), GetLocalBufferDescriptor(), and BufferDesc::tag.

Referenced by _bt_binsrch_insert(), _bt_bottomupdel_pass(), _bt_check_unique(), _bt_checkpage(), _bt_delitems_delete(), _bt_delitems_vacuum(), _bt_doinsert(), _bt_finish_split(), _bt_getroot(), _bt_insert_parent(), _bt_insertonpg(), _bt_mark_page_halfdead(), _bt_moveright(), _bt_newlevel(), _bt_pagedel(), _bt_readpage(), _bt_restore_meta(), _bt_search(), _bt_simpledel_pass(), _bt_split(), _bt_unlink_halfdead_page(), _hash_addovflpage(), _hash_checkpage(), _hash_doinsert(), _hash_first(), _hash_freeovflpage(), _hash_getnewbuf(), _hash_readnext(), _hash_readpage(), _hash_splitbucket(), allocNewBuffer(), blinsert(), BloomInitMetapage(), brin_doinsert(), brin_doupdate(), brin_getinsertbuffer(), brin_initialize_empty_new_buffer(), brin_page_cleanup(), brin_xlog_insert_update(), brinbuild(), brinGetTupleForHeapBlock(), collect_corrupt_items(), collectMatchBitmap(), createPostingTree(), dataBeginPlaceToPageLeaf(), dataPrepareDownlink(), doPickSplit(), entryPrepareDownlink(), fill_seq_fork_with_data(), ginEntryInsert(), ginFindParents(), ginFinishSplit(), ginPlaceToPage(), ginRedoDeleteListPages(), ginRedoUpdateMetapage(), ginScanToDelete(), gistbufferinginserttuples(), gistbuild(), gistcheckpage(), gistdeletepage(), gistformdownlink(), gistinserttuples(), gistMemorizeAllDownlinks(), gistplacetopage(), gistRelocateBuildBuffersOnSplit(), gistScanPage(), hash_xlog_add_ovfl_page(), heap_delete(), heap_fetch_next_buffer(), heap_hot_search_buffer(), heap_insert(), heap_multi_insert(), heap_page_is_all_visible(), heap_page_prune_and_freeze(), heap_prepare_pagescan(), heap_update(), heap_xlog_confirm(), heap_xlog_lock(), heapam_scan_analyze_next_block(), heapgettup(), heapgettup_pagemode(), index_compute_xid_horizon_for_tuples(), lazy_scan_noprune(), lazy_scan_prune(), makeSublist(), moveLeafs(), moveRightIfItNeeded(), pgstathashindex(), ReadBufferBI(), RelationAddBlocks(), RelationCopyStorageUsingBuffer(), RelationGetBufferForTuple(), RelationPutHeapTuple(), revmap_get_buffer(), revmap_physical_extend(), ScanSourceDatabasePgClassPage(), spgAddNodeAction(), spgbuild(), spgdoinsert(), SpGistSetLastUsedPage(), spgSplitNodeAction(), spgWalk(), startScanEntry(), terminate_brin_buildstate(), vacuumLeafPage(), visibilitymap_clear(), visibilitymap_get_status(), visibilitymap_pin(), visibilitymap_pin_ok(), visibilitymap_set(), and WaitReadBuffers().

◆ BufferGetLSNAtomic()

XLogRecPtr BufferGetLSNAtomic ( Buffer  buffer)

Definition at line 3985 of file bufmgr.c.

3986{
3987 BufferDesc *bufHdr = GetBufferDescriptor(buffer - 1);
3988 char *page = BufferGetPage(buffer);
3989 XLogRecPtr lsn;
3990 uint32 buf_state;
3991
3992 /*
3993 * If we don't need locking for correctness, fastpath out.
3994 */
3995 if (!XLogHintBitIsNeeded() || BufferIsLocal(buffer))
3996 return PageGetLSN(page);
3997
3998 /* Make sure we've got a real buffer, and that we hold a pin on it. */
3999 Assert(BufferIsValid(buffer));
4000 Assert(BufferIsPinned(buffer));
4001
4002 buf_state = LockBufHdr(bufHdr);
4003 lsn = PageGetLSN(page);
4004 UnlockBufHdr(bufHdr, buf_state);
4005
4006 return lsn;
4007}
static void UnlockBufHdr(BufferDesc *desc, uint32 buf_state)
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:5761
static Page BufferGetPage(Buffer buffer)
Definition: bufmgr.h:400
static XLogRecPtr PageGetLSN(const char *page)
Definition: bufpage.h:386
#define XLogHintBitIsNeeded()
Definition: xlog.h:120
uint64 XLogRecPtr
Definition: xlogdefs.h:21

References Assert, PrivateRefCountEntry::buffer, BufferGetPage(), BufferIsLocal, BufferIsPinned, BufferIsValid(), GetBufferDescriptor(), LockBufHdr(), PageGetLSN(), UnlockBufHdr(), and XLogHintBitIsNeeded.

Referenced by _bt_killitems(), _bt_readpage(), gistdoinsert(), gistFindPath(), gistkillitems(), gistScanPage(), SetHintBits(), and XLogSaveBufferForHint().

◆ BufferGetPage()

static Page BufferGetPage ( Buffer  buffer)
inlinestatic

Definition at line 400 of file bufmgr.h.

401{
402 return (Page) BufferGetBlock(buffer);
403}
static Block BufferGetBlock(Buffer buffer)
Definition: bufmgr.h:367
Pointer Page
Definition: bufpage.h:81

References BufferGetBlock().

Referenced by _bt_allocbuf(), _bt_binsrch(), _bt_binsrch_insert(), _bt_bottomupdel_pass(), _bt_check_unique(), _bt_checkpage(), _bt_clear_incomplete_split(), _bt_conditionallockbuf(), _bt_dedup_pass(), _bt_delete_or_dedup_one_page(), _bt_delitems_delete(), _bt_delitems_delete_check(), _bt_delitems_vacuum(), _bt_endpoint(), _bt_findinsertloc(), _bt_finish_split(), _bt_get_endpoint(), _bt_getmeta(), _bt_getroot(), _bt_getstackbuf(), _bt_gettrueroot(), _bt_insert_parent(), _bt_insertonpg(), _bt_killitems(), _bt_leftsib_splitflag(), _bt_lock_and_validate_left(), _bt_lock_subtree_parent(), _bt_lockbuf(), _bt_mark_page_halfdead(), _bt_moveright(), _bt_newlevel(), _bt_pagedel(), _bt_readnextpage(), _bt_readpage(), _bt_restore_meta(), _bt_rightsib_halfdeadflag(), _bt_search(), _bt_search_insert(), _bt_set_cleanup_info(), _bt_simpledel_pass(), _bt_split(), _bt_stepright(), _bt_unlink_halfdead_page(), _bt_unlockbuf(), _bt_upgradelockbufcleanup(), _bt_vacuum_needs_cleanup(), _hash_addovflpage(), _hash_checkpage(), _hash_doinsert(), _hash_expandtable(), _hash_finish_split(), _hash_first(), _hash_freeovflpage(), _hash_get_newblock_from_oldbucket(), _hash_get_oldblock_from_newbucket(), _hash_getbucketbuf_from_hashkey(), _hash_getcachedmetap(), _hash_getinitbuf(), _hash_getnewbuf(), _hash_init(), _hash_init_metabuffer(), _hash_initbitmapbuffer(), _hash_initbuf(), _hash_kill_items(), _hash_pgaddmultitup(), _hash_pgaddtup(), _hash_readnext(), _hash_readpage(), _hash_readprev(), _hash_splitbucket(), _hash_squeezebucket(), _hash_vacuum_one_page(), allocNewBuffer(), blgetbitmap(), blinsert(), BloomNewBuffer(), blvacuumcleanup(), brin_can_do_samepage_update(), brin_doinsert(), brin_doupdate(), brin_evacuate_page(), brin_getinsertbuffer(), brin_initialize_empty_new_buffer(), brin_page_cleanup(), brin_start_evacuating_page(), brin_xlog_createidx(), brin_xlog_desummarize_page(), brin_xlog_insert_update(), brin_xlog_revmap_extend(), brin_xlog_samepage_update(), brin_xlog_update(), brinbuild(), brinbuildempty(), brinGetStats(), brinGetTupleForHeapBlock(), brininsert(), brinRevmapDesummarizeRange(), brinRevmapInitialize(), brinSetHeapBlockItemptr(), bt_metap(), bt_page_items_internal(), bt_recheck_sibling_links(), bt_rootdescend(), btree_xlog_dedup(), btree_xlog_delete(), btree_xlog_insert(), btree_xlog_mark_page_halfdead(), btree_xlog_newroot(), btree_xlog_split(), btree_xlog_unlink_page(), btree_xlog_vacuum(), btvacuumpage(), BufferGetLSNAtomic(), collect_corrupt_items(), collect_visibility_data(), collectMatchBitmap(), collectMatchesForHeapRow(), count_nondeletable_pages(), createPostingTree(), dataBeginPlaceToPage(), dataBeginPlaceToPageInternal(), dataBeginPlaceToPageLeaf(), dataExecPlaceToPage(), dataExecPlaceToPageInternal(), dataLocateItem(), dataPlaceToPageLeafRecompress(), dataPrepareDownlink(), dataSplitPageInternal(), do_setval(), doPickSplit(), entryExecPlaceToPage(), entryIsEnoughSpace(), entryLoadMoreItems(), entryLocateEntry(), entryLocateLeafEntry(), entryPrepareDownlink(), entrySplitPage(), fill_seq_fork_with_data(), FreeSpaceMapPrepareTruncateRel(), fsm_readbuf(), fsm_search(), fsm_search_avail(), fsm_set_and_search(), fsm_vacuum_page(), generic_redo(), GenericXLogFinish(), GenericXLogRegisterBuffer(), get_raw_page_internal(), GetBTPageStatistics(), GetRecordedFreeSpace(), GetVisibilityMapPins(), ginbulkdelete(), ginDeletePage(), ginEntryInsert(), ginFindLeafPage(), ginFindParents(), ginFinishOldSplit(), ginFinishSplit(), ginGetStats(), ginHeapTupleFastInsert(), GinInitBuffer(), GinInitMetabuffer(), ginInsertCleanup(), ginInsertValue(), GinNewBuffer(), ginPlaceToPage(), ginRedoClearIncompleteSplit(), ginRedoCreatePTree(), ginRedoDeleteListPages(), ginRedoDeletePage(), ginRedoInsert(), ginRedoInsertData(), ginRedoInsertEntry(), ginRedoInsertListPage(), ginRedoUpdateMetapage(), ginRedoVacuumDataLeafPage(), ginScanToDelete(), ginStepRight(), ginTraverseLock(), ginUpdateStats(), ginvacuumcleanup(), ginVacuumEntryPage(), ginVacuumPostingTreeLeaf(), ginVacuumPostingTreeLeaves(), gistBufferingFindCorrectParent(), gistbufferinginserttuples(), gistbuild(), gistcheckpage(), gistdeletepage(), gistdoinsert(), gistFindCorrectParent(), gistFindPath(), gistfixsplit(), gistformdownlink(), gistGetMaxLevel(), GISTInitBuffer(), gistkillitems(), gistMemorizeAllDownlinks(), gistNewBuffer(), gistplacetopage(), gistProcessItup(), gistRedoClearFollowRight(), gistRedoDeleteRecord(), gistRedoPageDelete(), gistRedoPageSplitRecord(), gistRedoPageUpdateRecord(), gistScanPage(), gistvacuum_delete_empty_pages(), gistvacuumpage(), hash_bitmap_info(), hash_xlog_add_ovfl_page(), hash_xlog_delete(), hash_xlog_init_bitmap_page(), hash_xlog_init_meta_page(), hash_xlog_insert(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_split_cleanup(), hash_xlog_split_complete(), hash_xlog_squeeze_page(), hash_xlog_update_meta_page(), hash_xlog_vacuum_one_page(), hashbucketcleanup(), hashbulkdelete(), heap_abort_speculative(), heap_delete(), heap_fetch(), heap_finish_speculative(), heap_force_common(), heap_freeze_prepared_tuples(), heap_get_latest_tid(), heap_hot_search_buffer(), heap_index_delete_tuples(), heap_inplace_update_and_unlock(), heap_insert(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_page_is_all_visible(), heap_page_prune_and_freeze(), heap_page_prune_execute(), heap_page_prune_opt(), heap_pre_freeze_checks(), heap_prepare_pagescan(), heap_update(), heap_xlog_confirm(), heap_xlog_delete(), heap_xlog_inplace(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_prune_freeze(), heap_xlog_update(), heap_xlog_visible(), heapam_index_build_range_scan(), heapam_index_validate_scan(), heapam_scan_analyze_next_tuple(), heapam_scan_bitmap_next_block(), heapam_scan_bitmap_next_tuple(), heapam_scan_sample_next_tuple(), heapgettup_continue_page(), heapgettup_pagemode(), heapgettup_start_page(), index_compute_xid_horizon_for_tuples(), initBloomState(), lazy_scan_heap(), lazy_vacuum_heap_page(), lazy_vacuum_heap_rel(), log_heap_prune_and_freeze(), log_heap_update(), log_newpage_buffer(), log_newpage_range(), log_split_page(), MarkBufferDirtyHint(), moveLeafs(), moveRightIfItNeeded(), nextval_internal(), palloc_btree_page(), pg_visibility(), pgstat_btree_page(), pgstat_gist_page(), pgstat_hash_page(), pgstat_heap(), pgstatginindex_internal(), pgstathashindex(), pgstatindex_impl(), read_seq_tuple(), RelationAddBlocks(), RelationCopyStorageUsingBuffer(), RelationGetBufferForTuple(), RelationPutHeapTuple(), revmap_physical_extend(), scanGetCandidate(), scanPendingInsert(), scanPostingTree(), ScanSourceDatabasePgClass(), seq_redo(), shiftList(), spgAddNodeAction(), spgbuild(), spgdoinsert(), spgGetCache(), SpGistGetBuffer(), SpGistInitBuffer(), SpGistNewBuffer(), SpGistSetLastUsedPage(), SpGistUpdateMetaPage(), spgprocesspending(), spgRedoAddLeaf(), spgRedoAddNode(), spgRedoMoveLeafs(), spgRedoPickSplit(), spgRedoSplitTuple(), spgRedoVacuumLeaf(), spgRedoVacuumRedirect(), spgRedoVacuumRoot(), spgSplitNodeAction(), spgvacuumpage(), spgWalk(), startScanEntry(), statapprox_heap(), terminate_brin_buildstate(), vacuumLeafPage(), vacuumLeafRoot(), vacuumRedirectAndPlaceholder(), verify_heapam(), verifyBackupPageConsistency(), visibilitymap_clear(), visibilitymap_count(), visibilitymap_get_status(), visibilitymap_prepare_truncate(), visibilitymap_set(), vm_readbuf(), writeListPage(), XLogCheckBufferNeedsBackup(), XLogReadBufferExtended(), XLogReadBufferForRedoExtended(), XLogRecordPageWithFreeSpace(), XLogRegisterBuffer(), XLogSaveBufferForHint(), xlogVacuumPage(), and ZeroAndLockBuffer().

◆ BufferGetPageSize()

◆ BufferGetTag()

void BufferGetTag ( Buffer  buffer,
RelFileLocator rlocator,
ForkNumber forknum,
BlockNumber blknum 
)

Definition at line 3745 of file bufmgr.c.

3747{
3748 BufferDesc *bufHdr;
3749
3750 /* Do the same checks as BufferGetBlockNumber. */
3751 Assert(BufferIsPinned(buffer));
3752
3753 if (BufferIsLocal(buffer))
3754 bufHdr = GetLocalBufferDescriptor(-buffer - 1);
3755 else
3756 bufHdr = GetBufferDescriptor(buffer - 1);
3757
3758 /* pinned, so OK to read tag without spinlock */
3759 *rlocator = BufTagGetRelFileLocator(&bufHdr->tag);
3760 *forknum = BufTagGetForkNum(&bufHdr->tag);
3761 *blknum = bufHdr->tag.blockNum;
3762}
static ForkNumber BufTagGetForkNum(const BufferTag *tag)
static RelFileLocator BufTagGetRelFileLocator(const BufferTag *tag)

References Assert, buftag::blockNum, PrivateRefCountEntry::buffer, BufferIsLocal, BufferIsPinned, BufTagGetForkNum(), BufTagGetRelFileLocator(), GetBufferDescriptor(), GetLocalBufferDescriptor(), and BufferDesc::tag.

Referenced by fsm_search_avail(), ginRedoInsertEntry(), heap_inplace_update_and_unlock(), log_newpage_buffer(), ResolveCminCmaxDuringDecoding(), XLogRegisterBuffer(), and XLogSaveBufferForHint().

◆ BufferIsDirty()

bool BufferIsDirty ( Buffer  buffer)

Definition at line 2500 of file bufmgr.c.

2501{
2502 BufferDesc *bufHdr;
2503
2504 if (BufferIsLocal(buffer))
2505 {
2506 int bufid = -buffer - 1;
2507
2508 bufHdr = GetLocalBufferDescriptor(bufid);
2509 }
2510 else
2511 {
2512 bufHdr = GetBufferDescriptor(buffer - 1);
2513 }
2514
2515 Assert(BufferIsPinned(buffer));
2517 LW_EXCLUSIVE));
2518
2519 return pg_atomic_read_u32(&bufHdr->state) & BM_DIRTY;
2520}
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:239
static LWLock * BufferDescriptorGetContentLock(const BufferDesc *bdesc)
#define BM_DIRTY
Definition: buf_internals.h:60
bool LWLockHeldByMeInMode(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1937
@ LW_EXCLUSIVE
Definition: lwlock.h:114
pg_atomic_uint32 state

References Assert, BM_DIRTY, PrivateRefCountEntry::buffer, BufferDescriptorGetContentLock(), BufferIsLocal, BufferIsPinned, GetBufferDescriptor(), GetLocalBufferDescriptor(), LW_EXCLUSIVE, LWLockHeldByMeInMode(), pg_atomic_read_u32(), and BufferDesc::state.

Referenced by XLogRegisterBuffer().

◆ BufferIsExclusiveLocked()

bool BufferIsExclusiveLocked ( Buffer  buffer)

Definition at line 2471 of file bufmgr.c.

2472{
2473 BufferDesc *bufHdr;
2474
2475 if (BufferIsLocal(buffer))
2476 {
2477 int bufid = -buffer - 1;
2478
2479 bufHdr = GetLocalBufferDescriptor(bufid);
2480 }
2481 else
2482 {
2483 bufHdr = GetBufferDescriptor(buffer - 1);
2484 }
2485
2486 Assert(BufferIsPinned(buffer));
2488 LW_EXCLUSIVE);
2489}

References Assert, PrivateRefCountEntry::buffer, BufferDescriptorGetContentLock(), BufferIsLocal, BufferIsPinned, GetBufferDescriptor(), GetLocalBufferDescriptor(), LW_EXCLUSIVE, and LWLockHeldByMeInMode().

Referenced by XLogRegisterBuffer().

◆ BufferIsPermanent()

bool BufferIsPermanent ( Buffer  buffer)

Definition at line 3955 of file bufmgr.c.

3956{
3957 BufferDesc *bufHdr;
3958
3959 /* Local buffers are used only for temp relations. */
3960 if (BufferIsLocal(buffer))
3961 return false;
3962
3963 /* Make sure we've got a real buffer, and that we hold a pin on it. */
3964 Assert(BufferIsValid(buffer));
3965 Assert(BufferIsPinned(buffer));
3966
3967 /*
3968 * BM_PERMANENT can't be changed while we hold a pin on the buffer, so we
3969 * need not bother with the buffer header spinlock. Even if someone else
3970 * changes the buffer header state while we're doing this, the state is
3971 * changed atomically, so we'll read the old value or the new value, but
3972 * not random garbage.
3973 */
3974 bufHdr = GetBufferDescriptor(buffer - 1);
3975 return (pg_atomic_read_u32(&bufHdr->state) & BM_PERMANENT) != 0;
3976}
#define BM_PERMANENT
Definition: buf_internals.h:68

References Assert, BM_PERMANENT, PrivateRefCountEntry::buffer, BufferIsLocal, BufferIsPinned, BufferIsValid(), GetBufferDescriptor(), pg_atomic_read_u32(), and BufferDesc::state.

Referenced by SetHintBits().

◆ BufferIsValid()

static bool BufferIsValid ( Buffer  bufnum)
inlinestatic

Definition at line 351 of file bufmgr.h.

352{
353 Assert(bufnum <= NBuffers);
354 Assert(bufnum >= -NLocBuffer);
355
356 return bufnum != InvalidBuffer;
357}
#define InvalidBuffer
Definition: buf.h:25
PGDLLIMPORT int NBuffers
Definition: globals.c:141
PGDLLIMPORT int NLocBuffer
Definition: localbuf.c:42

References Assert, InvalidBuffer, NBuffers, and NLocBuffer.

Referenced by _bt_clear_incomplete_split(), _bt_endpoint(), _bt_first(), _bt_get_endpoint(), _bt_insertonpg(), _bt_relandgetbuf(), _bt_search(), _bt_unlink_halfdead_page(), _hash_addovflpage(), _hash_dropscanbuf(), _hash_freeovflpage(), _hash_getbucketbuf_from_hashkey(), _hash_getcachedmetap(), _hash_readnext(), _hash_readpage(), _hash_readprev(), autoprewarm_database_main(), brin_doinsert(), brin_doupdate(), brin_getinsertbuffer(), brin_xlog_createidx(), brin_xlog_desummarize_page(), brin_xlog_insert_update(), brin_xlog_revmap_extend(), brin_xlog_samepage_update(), brin_xlog_update(), brinGetTupleForHeapBlock(), brininsert(), brinsummarize(), bt_recheck_sibling_links(), bt_rootdescend(), btree_xlog_dedup(), btree_xlog_delete(), btree_xlog_insert(), btree_xlog_mark_page_halfdead(), btree_xlog_split(), btree_xlog_unlink_page(), btree_xlog_vacuum(), BufferGetBlock(), BufferGetLSNAtomic(), BufferGetPageSize(), BufferIsPermanent(), ConditionalLockBufferForCleanup(), DebugPrintBufferRefcount(), doPickSplit(), entryGetItem(), entryLoadMoreItems(), ExecStoreBufferHeapTuple(), ExecStorePinnedBufferHeapTuple(), FreeSpaceMapPrepareTruncateRel(), fsm_search(), fsm_vacuum_page(), generic_redo(), GetPrivateRefCount(), GetPrivateRefCountEntry(), GetRecordedFreeSpace(), GetVisibilityMapPins(), ginPlaceToPage(), ginRedoClearIncompleteSplit(), ginRedoDeletePage(), ginRedoInsert(), ginRedoUpdateMetapage(), ginRedoVacuumDataLeafPage(), ginScanToDelete(), gistinserttuples(), gistkillitems(), gistplacetopage(), gistRedoClearFollowRight(), gistRedoDeleteRecord(), gistRedoPageDelete(), gistRedoPageUpdateRecord(), gistXLogSplit(), gistXLogUpdate(), hash_xlog_add_ovfl_page(), hash_xlog_delete(), hash_xlog_init_bitmap_page(), hash_xlog_init_meta_page(), hash_xlog_insert(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_split_cleanup(), hash_xlog_split_complete(), hash_xlog_squeeze_page(), hash_xlog_update_meta_page(), hash_xlog_vacuum_one_page(), heap_endscan(), heap_fetch_next_buffer(), heap_index_delete_tuples(), heap_inplace_lock(), heap_lock_tuple(), heap_rescan(), heap_update(), heap_vac_scan_next_block(), heap_xlog_confirm(), heap_xlog_delete(), heap_xlog_inplace(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_prune_freeze(), heap_xlog_update(), heap_xlog_visible(), heapam_index_fetch_reset(), heapam_scan_analyze_next_block(), heapam_scan_sample_next_block(), heapam_tuple_lock(), heapam_tuple_satisfies_snapshot(), heapgettup(), heapgettup_continue_page(), heapgettup_pagemode(), heapgettup_start_page(), IsBufferCleanupOK(), lazy_scan_heap(), lazy_vacuum_heap_rel(), log_heap_visible(), MarkBufferDirty(), MarkBufferDirtyHint(), read_stream_next_buffer(), ReadRecentBuffer(), ReleaseAndReadBuffer(), ReleaseBuffer(), ResOwnerReleaseBufferPin(), spgRedoAddLeaf(), spgRedoAddNode(), spgRedoMoveLeafs(), spgRedoPickSplit(), spgRedoSplitTuple(), spgRedoVacuumLeaf(), spgRedoVacuumRedirect(), spgRedoVacuumRoot(), statapprox_heap(), tts_buffer_heap_clear(), tts_buffer_heap_copyslot(), tts_buffer_heap_materialize(), tts_buffer_heap_store_tuple(), verifyBackupPageConsistency(), visibilitymap_clear(), visibilitymap_count(), visibilitymap_get_status(), visibilitymap_pin(), visibilitymap_pin_ok(), visibilitymap_prepare_truncate(), visibilitymap_set(), XLogPrefetcherNextBlock(), XLogReadBufferExtended(), and XLogReadBufferForRedoExtended().

◆ BufferManagerShmemInit()

void BufferManagerShmemInit ( void  )

Definition at line 67 of file buf_init.c.

68{
69 bool foundBufs,
70 foundDescs,
71 foundIOCV,
72 foundBufCkpt;
73
74 /* Align descriptors to a cacheline boundary. */
76 ShmemInitStruct("Buffer Descriptors",
77 NBuffers * sizeof(BufferDescPadded),
78 &foundDescs);
79
80 /* Align buffer pool on IO page size boundary. */
81 BufferBlocks = (char *)
83 ShmemInitStruct("Buffer Blocks",
84 NBuffers * (Size) BLCKSZ + PG_IO_ALIGN_SIZE,
85 &foundBufs));
86
87 /* Align condition variables to cacheline boundary. */
89 ShmemInitStruct("Buffer IO Condition Variables",
91 &foundIOCV);
92
93 /*
94 * The array used to sort to-be-checkpointed buffer ids is located in
95 * shared memory, to avoid having to allocate significant amounts of
96 * memory at runtime. As that'd be in the middle of a checkpoint, or when
97 * the checkpointer is restarted, memory allocation failures would be
98 * painful.
99 */
101 ShmemInitStruct("Checkpoint BufferIds",
102 NBuffers * sizeof(CkptSortItem), &foundBufCkpt);
103
104 if (foundDescs || foundBufs || foundIOCV || foundBufCkpt)
105 {
106 /* should find all of these, or none of them */
107 Assert(foundDescs && foundBufs && foundIOCV && foundBufCkpt);
108 /* note: this path is only taken in EXEC_BACKEND case */
109 }
110 else
111 {
112 int i;
113
114 /*
115 * Initialize all the buffer headers.
116 */
117 for (i = 0; i < NBuffers; i++)
118 {
120
121 ClearBufferTag(&buf->tag);
122
123 pg_atomic_init_u32(&buf->state, 0);
124 buf->wait_backend_pgprocno = INVALID_PROC_NUMBER;
125
126 buf->buf_id = i;
127
128 /*
129 * Initially link all the buffers together as unused. Subsequent
130 * management of this list is done by freelist.c.
131 */
132 buf->freeNext = i + 1;
133
136
138 }
139
140 /* Correct last entry of linked list */
142 }
143
144 /* Init other shared buffer-management stuff */
145 StrategyInitialize(!foundDescs);
146
147 /* Initialize per-backend file flush context */
150}
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:221
CkptSortItem * CkptBufferIds
Definition: buf_init.c:24
char * BufferBlocks
Definition: buf_init.c:21
WritebackContext BackendWritebackContext
Definition: buf_init.c:23
ConditionVariableMinimallyPadded * BufferIOCVArray
Definition: buf_init.c:22
BufferDescPadded * BufferDescriptors
Definition: buf_init.c:20
static ConditionVariable * BufferDescriptorGetIOCV(const BufferDesc *bdesc)
#define FREENEXT_END_OF_LIST
static void ClearBufferTag(BufferTag *tag)
void WritebackContextInit(WritebackContext *context, int *max_pending)
Definition: bufmgr.c:5903
int backend_flush_after
Definition: bufmgr.c:173
#define TYPEALIGN(ALIGNVAL, LEN)
Definition: c.h:761
void ConditionVariableInit(ConditionVariable *cv)
void StrategyInitialize(bool init)
Definition: freelist.c:474
int i
Definition: isn.c:72
void LWLockInitialize(LWLock *lock, int tranche_id)
Definition: lwlock.c:707
@ LWTRANCHE_BUFFER_CONTENT
Definition: lwlock.h:187
#define PG_IO_ALIGN_SIZE
static char * buf
Definition: pg_test_fsync.c:72
#define INVALID_PROC_NUMBER
Definition: procnumber.h:26
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:382

References Assert, backend_flush_after, BackendWritebackContext, buf, BufferBlocks, BufferDescriptorGetContentLock(), BufferDescriptorGetIOCV(), BufferDescriptors, BufferIOCVArray, CkptBufferIds, ClearBufferTag(), ConditionVariableInit(), BufferDesc::freeNext, FREENEXT_END_OF_LIST, GetBufferDescriptor(), i, INVALID_PROC_NUMBER, LWLockInitialize(), LWTRANCHE_BUFFER_CONTENT, NBuffers, pg_atomic_init_u32(), PG_IO_ALIGN_SIZE, ShmemInitStruct(), StrategyInitialize(), TYPEALIGN, and WritebackContextInit().

Referenced by CreateOrAttachShmemStructs().

◆ BufferManagerShmemSize()

Size BufferManagerShmemSize ( void  )

Definition at line 159 of file buf_init.c.

160{
161 Size size = 0;
162
163 /* size of buffer descriptors */
165 /* to allow aligning buffer descriptors */
167
168 /* size of data pages, plus alignment padding */
170 size = add_size(size, mul_size(NBuffers, BLCKSZ));
171
172 /* size of stuff controlled by freelist.c */
174
175 /* size of I/O condition variables */
178 /* to allow aligning the above */
180
181 /* size of checkpoint sort array in bufmgr.c */
183
184 return size;
185}
Size StrategyShmemSize(void)
Definition: freelist.c:453
#define PG_CACHE_LINE_SIZE
Size add_size(Size s1, Size s2)
Definition: shmem.c:488
Size mul_size(Size s1, Size s2)
Definition: shmem.c:505
static pg_noinline void Size size
Definition: slab.c:607

References add_size(), mul_size(), NBuffers, PG_CACHE_LINE_SIZE, PG_IO_ALIGN_SIZE, size, and StrategyShmemSize().

Referenced by CalculateShmemSize().

◆ CheckBufferIsPinnedOnce()

void CheckBufferIsPinnedOnce ( Buffer  buffer)

Definition at line 5205 of file bufmgr.c.

5206{
5207 if (BufferIsLocal(buffer))
5208 {
5209 if (LocalRefCount[-buffer - 1] != 1)
5210 elog(ERROR, "incorrect local pin count: %d",
5211 LocalRefCount[-buffer - 1]);
5212 }
5213 else
5214 {
5215 if (GetPrivateRefCount(buffer) != 1)
5216 elog(ERROR, "incorrect local pin count: %d",
5217 GetPrivateRefCount(buffer));
5218 }
5219}
static int32 GetPrivateRefCount(Buffer buffer)
Definition: bufmgr.c:416
#define ERROR
Definition: elog.h:39
int32 * LocalRefCount
Definition: localbuf.c:46

References PrivateRefCountEntry::buffer, BufferIsLocal, elog, ERROR, GetPrivateRefCount(), and LocalRefCount.

Referenced by GetVictimBuffer(), and LockBufferForCleanup().

◆ CheckPointBuffers()

void CheckPointBuffers ( int  flags)

Definition at line 3710 of file bufmgr.c.

3711{
3712 BufferSync(flags);
3713}
static void BufferSync(int flags)
Definition: bufmgr.c:2912

References BufferSync().

Referenced by CheckPointGuts().

◆ ConditionalLockBuffer()

bool ConditionalLockBuffer ( Buffer  buffer)

◆ ConditionalLockBufferForCleanup()

bool ConditionalLockBufferForCleanup ( Buffer  buffer)

Definition at line 5399 of file bufmgr.c.

5400{
5401 BufferDesc *bufHdr;
5402 uint32 buf_state,
5403 refcount;
5404
5405 Assert(BufferIsValid(buffer));
5406
5407 if (BufferIsLocal(buffer))
5408 {
5409 refcount = LocalRefCount[-buffer - 1];
5410 /* There should be exactly one pin */
5411 Assert(refcount > 0);
5412 if (refcount != 1)
5413 return false;
5414 /* Nobody else to wait for */
5415 return true;
5416 }
5417
5418 /* There should be exactly one local pin */
5419 refcount = GetPrivateRefCount(buffer);
5420 Assert(refcount);
5421 if (refcount != 1)
5422 return false;
5423
5424 /* Try to acquire lock */
5425 if (!ConditionalLockBuffer(buffer))
5426 return false;
5427
5428 bufHdr = GetBufferDescriptor(buffer - 1);
5429 buf_state = LockBufHdr(bufHdr);
5430 refcount = BUF_STATE_GET_REFCOUNT(buf_state);
5431
5432 Assert(refcount > 0);
5433 if (refcount == 1)
5434 {
5435 /* Successfully acquired exclusive lock with pincount 1 */
5436 UnlockBufHdr(bufHdr, buf_state);
5437 return true;
5438 }
5439
5440 /* Failed, so release the lock */
5441 UnlockBufHdr(bufHdr, buf_state);
5443 return false;
5444}
#define BUF_STATE_GET_REFCOUNT(state)
Definition: buf_internals.h:50
bool ConditionalLockBuffer(Buffer buffer)
Definition: bufmgr.c:5184
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:5158
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:189

References Assert, BUF_STATE_GET_REFCOUNT, PrivateRefCountEntry::buffer, BUFFER_LOCK_UNLOCK, BufferIsLocal, BufferIsValid(), ConditionalLockBuffer(), GetBufferDescriptor(), GetPrivateRefCount(), LocalRefCount, LockBuffer(), LockBufHdr(), PrivateRefCountEntry::refcount, and UnlockBufHdr().

Referenced by _hash_finish_split(), _hash_getbuf_with_condlock_cleanup(), heap_page_prune_opt(), and lazy_scan_heap().

◆ CreateAndCopyRelationData()

void CreateAndCopyRelationData ( RelFileLocator  src_rlocator,
RelFileLocator  dst_rlocator,
bool  permanent 
)

Definition at line 4798 of file bufmgr.c.

4800{
4801 char relpersistence;
4802 SMgrRelation src_rel;
4803 SMgrRelation dst_rel;
4804
4805 /* Set the relpersistence. */
4806 relpersistence = permanent ?
4807 RELPERSISTENCE_PERMANENT : RELPERSISTENCE_UNLOGGED;
4808
4809 src_rel = smgropen(src_rlocator, INVALID_PROC_NUMBER);
4810 dst_rel = smgropen(dst_rlocator, INVALID_PROC_NUMBER);
4811
4812 /*
4813 * Create and copy all forks of the relation. During create database we
4814 * have a separate cleanup mechanism which deletes complete database
4815 * directory. Therefore, each individual relation doesn't need to be
4816 * registered for cleanup.
4817 */
4818 RelationCreateStorage(dst_rlocator, relpersistence, false);
4819
4820 /* copy main fork. */
4821 RelationCopyStorageUsingBuffer(src_rlocator, dst_rlocator, MAIN_FORKNUM,
4822 permanent);
4823
4824 /* copy those extra forks that exist */
4825 for (ForkNumber forkNum = MAIN_FORKNUM + 1;
4826 forkNum <= MAX_FORKNUM; forkNum++)
4827 {
4828 if (smgrexists(src_rel, forkNum))
4829 {
4830 smgrcreate(dst_rel, forkNum, false);
4831
4832 /*
4833 * WAL log creation if the relation is persistent, or this is the
4834 * init fork of an unlogged relation.
4835 */
4836 if (permanent || forkNum == INIT_FORKNUM)
4837 log_smgrcreate(&dst_rlocator, forkNum);
4838
4839 /* Copy a fork's data, block by block. */
4840 RelationCopyStorageUsingBuffer(src_rlocator, dst_rlocator, forkNum,
4841 permanent);
4842 }
4843 }
4844}
static void RelationCopyStorageUsingBuffer(RelFileLocator srclocator, RelFileLocator dstlocator, ForkNumber forkNum, bool permanent)
Definition: bufmgr.c:4690
ForkNumber
Definition: relpath.h:56
@ MAIN_FORKNUM
Definition: relpath.h:58
@ INIT_FORKNUM
Definition: relpath.h:61
#define MAX_FORKNUM
Definition: relpath.h:70
SMgrRelation smgropen(RelFileLocator rlocator, ProcNumber backend)
Definition: smgr.c:201
void smgrcreate(SMgrRelation reln, ForkNumber forknum, bool isRedo)
Definition: smgr.c:414
bool smgrexists(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:401
SMgrRelation RelationCreateStorage(RelFileLocator rlocator, char relpersistence, bool register_delete)
Definition: storage.c:121
void log_smgrcreate(const RelFileLocator *rlocator, ForkNumber forkNum)
Definition: storage.c:186

References INIT_FORKNUM, INVALID_PROC_NUMBER, log_smgrcreate(), MAIN_FORKNUM, MAX_FORKNUM, RelationCopyStorageUsingBuffer(), RelationCreateStorage(), smgrcreate(), smgrexists(), and smgropen().

Referenced by CreateDatabaseUsingWalLog().

◆ DebugPrintBufferRefcount()

char * DebugPrintBufferRefcount ( Buffer  buffer)

Definition at line 3665 of file bufmgr.c.

3666{
3667 BufferDesc *buf;
3668 int32 loccount;
3669 char *path;
3670 char *result;
3671 ProcNumber backend;
3672 uint32 buf_state;
3673
3674 Assert(BufferIsValid(buffer));
3675 if (BufferIsLocal(buffer))
3676 {
3677 buf = GetLocalBufferDescriptor(-buffer - 1);
3678 loccount = LocalRefCount[-buffer - 1];
3679 backend = MyProcNumber;
3680 }
3681 else
3682 {
3683 buf = GetBufferDescriptor(buffer - 1);
3684 loccount = GetPrivateRefCount(buffer);
3685 backend = INVALID_PROC_NUMBER;
3686 }
3687
3688 /* theoretically we should lock the bufhdr here */
3689 path = relpathbackend(BufTagGetRelFileLocator(&buf->tag), backend,
3690 BufTagGetForkNum(&buf->tag));
3691 buf_state = pg_atomic_read_u32(&buf->state);
3692
3693 result = psprintf("[%03d] (rel=%s, blockNum=%u, flags=0x%x, refcount=%u %d)",
3694 buffer, path,
3695 buf->tag.blockNum, buf_state & BUF_FLAG_MASK,
3696 BUF_STATE_GET_REFCOUNT(buf_state), loccount);
3697 pfree(path);
3698 return result;
3699}
#define BUF_FLAG_MASK
Definition: buf_internals.h:47
ProcNumber MyProcNumber
Definition: globals.c:89
void pfree(void *pointer)
Definition: mcxt.c:1521
int ProcNumber
Definition: procnumber.h:24
char * psprintf(const char *fmt,...)
Definition: psprintf.c:43
#define relpathbackend(rlocator, backend, forknum)
Definition: relpath.h:93

References Assert, buf, BUF_FLAG_MASK, BUF_STATE_GET_REFCOUNT, PrivateRefCountEntry::buffer, BufferIsLocal, BufferIsValid(), BufTagGetForkNum(), BufTagGetRelFileLocator(), GetBufferDescriptor(), GetLocalBufferDescriptor(), GetPrivateRefCount(), INVALID_PROC_NUMBER, LocalRefCount, MyProcNumber, pfree(), pg_atomic_read_u32(), psprintf(), and relpathbackend.

Referenced by CheckForBufferLeaks(), CheckForLocalBufferLeaks(), and ResOwnerPrintBufferPin().

◆ DropDatabaseBuffers()

void DropDatabaseBuffers ( Oid  dbid)

Definition at line 4386 of file bufmgr.c.

4387{
4388 int i;
4389
4390 /*
4391 * We needn't consider local buffers, since by assumption the target
4392 * database isn't our own.
4393 */
4394
4395 for (i = 0; i < NBuffers; i++)
4396 {
4397 BufferDesc *bufHdr = GetBufferDescriptor(i);
4398 uint32 buf_state;
4399
4400 /*
4401 * As in DropRelationBuffers, an unlocked precheck should be safe and
4402 * saves some cycles.
4403 */
4404 if (bufHdr->tag.dbOid != dbid)
4405 continue;
4406
4407 buf_state = LockBufHdr(bufHdr);
4408 if (bufHdr->tag.dbOid == dbid)
4409 InvalidateBuffer(bufHdr); /* releases spinlock */
4410 else
4411 UnlockBufHdr(bufHdr, buf_state);
4412 }
4413}
static void InvalidateBuffer(BufferDesc *buf)
Definition: bufmgr.c:1784
Oid dbOid
Definition: buf_internals.h:94

References buftag::dbOid, GetBufferDescriptor(), i, InvalidateBuffer(), LockBufHdr(), NBuffers, BufferDesc::tag, and UnlockBufHdr().

Referenced by createdb_failure_callback(), dbase_redo(), dropdb(), and movedb().

◆ DropRelationBuffers()

void DropRelationBuffers ( struct SMgrRelationData smgr_reln,
ForkNumber forkNum,
int  nforks,
BlockNumber firstDelBlock 
)

◆ DropRelationsAllBuffers()

void DropRelationsAllBuffers ( struct SMgrRelationData **  smgr_reln,
int  nlocators 
)

◆ EvictUnpinnedBuffer()

bool EvictUnpinnedBuffer ( Buffer  buf)

Definition at line 6101 of file bufmgr.c.

6102{
6103 BufferDesc *desc;
6104 uint32 buf_state;
6105 bool result;
6106
6107 /* Make sure we can pin the buffer. */
6110
6112 desc = GetBufferDescriptor(buf - 1);
6113
6114 /* Lock the header and check if it's valid. */
6115 buf_state = LockBufHdr(desc);
6116 if ((buf_state & BM_VALID) == 0)
6117 {
6118 UnlockBufHdr(desc, buf_state);
6119 return false;
6120 }
6121
6122 /* Check that it's not pinned already. */
6123 if (BUF_STATE_GET_REFCOUNT(buf_state) > 0)
6124 {
6125 UnlockBufHdr(desc, buf_state);
6126 return false;
6127 }
6128
6129 PinBuffer_Locked(desc); /* releases spinlock */
6130
6131 /* If it was dirty, try to clean it once. */
6132 if (buf_state & BM_DIRTY)
6133 {
6137 }
6138
6139 /* This will return false if it becomes dirty or someone else pins it. */
6140 result = InvalidateVictimBuffer(desc);
6141
6142 UnpinBuffer(desc);
6143
6144 return result;
6145}
#define BM_VALID
Definition: buf_internals.h:61
static void FlushBuffer(BufferDesc *buf, SMgrRelation reln, IOObject io_object, IOContext io_context)
Definition: bufmgr.c:3784
static void PinBuffer_Locked(BufferDesc *buf)
Definition: bufmgr.c:2763
static bool InvalidateVictimBuffer(BufferDesc *buf_hdr)
Definition: bufmgr.c:1882
static void ReservePrivateRefCountEntry(void)
Definition: bufmgr.c:250
static void UnpinBuffer(BufferDesc *buf)
Definition: bufmgr.c:2806
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1168
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1781
@ LW_SHARED
Definition: lwlock.h:115
@ IOOBJECT_RELATION
Definition: pgstat.h:275
@ IOCONTEXT_NORMAL
Definition: pgstat.h:285
ResourceOwner CurrentResourceOwner
Definition: resowner.c:165
void ResourceOwnerEnlarge(ResourceOwner owner)
Definition: resowner.c:442

References Assert, BM_DIRTY, BM_VALID, buf, BUF_STATE_GET_REFCOUNT, BufferDescriptorGetContentLock(), BufferIsLocal, CurrentResourceOwner, FlushBuffer(), GetBufferDescriptor(), InvalidateVictimBuffer(), IOCONTEXT_NORMAL, IOOBJECT_RELATION, LockBufHdr(), LW_SHARED, LWLockAcquire(), LWLockRelease(), PinBuffer_Locked(), ReservePrivateRefCountEntry(), ResourceOwnerEnlarge(), UnlockBufHdr(), and UnpinBuffer().

Referenced by pg_buffercache_evict().

◆ ExtendBufferedRel()

Buffer ExtendBufferedRel ( BufferManagerRelation  bmr,
ForkNumber  forkNum,
BufferAccessStrategy  strategy,
uint32  flags 
)

Definition at line 846 of file bufmgr.c.

850{
851 Buffer buf;
852 uint32 extend_by = 1;
853
854 ExtendBufferedRelBy(bmr, forkNum, strategy, flags, extend_by,
855 &buf, &extend_by);
856
857 return buf;
858}
int Buffer
Definition: buf.h:23
BlockNumber ExtendBufferedRelBy(BufferManagerRelation bmr, ForkNumber fork, BufferAccessStrategy strategy, uint32 flags, uint32 extend_by, Buffer *buffers, uint32 *extended_by)
Definition: bufmgr.c:878

References buf, and ExtendBufferedRelBy().

Referenced by _bt_allocbuf(), _hash_getnewbuf(), BloomNewBuffer(), brinbuild(), brinbuildempty(), fill_seq_fork_with_data(), ginbuildempty(), GinNewBuffer(), gistbuildempty(), gistNewBuffer(), ReadBuffer_common(), revmap_physical_extend(), and SpGistNewBuffer().

◆ ExtendBufferedRelBy()

BlockNumber ExtendBufferedRelBy ( BufferManagerRelation  bmr,
ForkNumber  fork,
BufferAccessStrategy  strategy,
uint32  flags,
uint32  extend_by,
Buffer buffers,
uint32 extended_by 
)

Definition at line 878 of file bufmgr.c.

885{
886 Assert((bmr.rel != NULL) != (bmr.smgr != NULL));
887 Assert(bmr.smgr == NULL || bmr.relpersistence != 0);
888 Assert(extend_by > 0);
889
890 if (bmr.smgr == NULL)
891 {
892 bmr.smgr = RelationGetSmgr(bmr.rel);
893 bmr.relpersistence = bmr.rel->rd_rel->relpersistence;
894 }
895
896 return ExtendBufferedRelCommon(bmr, fork, strategy, flags,
897 extend_by, InvalidBlockNumber,
898 buffers, extended_by);
899}
#define InvalidBlockNumber
Definition: block.h:33
static BlockNumber ExtendBufferedRelCommon(BufferManagerRelation bmr, ForkNumber fork, BufferAccessStrategy strategy, uint32 flags, uint32 extend_by, BlockNumber extend_upto, Buffer *buffers, uint32 *extended_by)
Definition: bufmgr.c:2147
static SMgrRelation RelationGetSmgr(Relation rel)
Definition: rel.h:567
struct SMgrRelationData * smgr
Definition: bufmgr.h:103
Form_pg_class rd_rel
Definition: rel.h:111

References Assert, ExtendBufferedRelCommon(), InvalidBlockNumber, RelationData::rd_rel, BufferManagerRelation::rel, RelationGetSmgr(), BufferManagerRelation::relpersistence, and BufferManagerRelation::smgr.

Referenced by ExtendBufferedRel(), and RelationAddBlocks().

◆ ExtendBufferedRelTo()

Buffer ExtendBufferedRelTo ( BufferManagerRelation  bmr,
ForkNumber  fork,
BufferAccessStrategy  strategy,
uint32  flags,
BlockNumber  extend_to,
ReadBufferMode  mode 
)

Definition at line 910 of file bufmgr.c.

916{
918 uint32 extended_by = 0;
919 Buffer buffer = InvalidBuffer;
920 Buffer buffers[64];
921
922 Assert((bmr.rel != NULL) != (bmr.smgr != NULL));
923 Assert(bmr.smgr == NULL || bmr.relpersistence != 0);
924 Assert(extend_to != InvalidBlockNumber && extend_to > 0);
925
926 if (bmr.smgr == NULL)
927 {
928 bmr.smgr = RelationGetSmgr(bmr.rel);
929 bmr.relpersistence = bmr.rel->rd_rel->relpersistence;
930 }
931
932 /*
933 * If desired, create the file if it doesn't exist. If
934 * smgr_cached_nblocks[fork] is positive then it must exist, no need for
935 * an smgrexists call.
936 */
937 if ((flags & EB_CREATE_FORK_IF_NEEDED) &&
938 (bmr.smgr->smgr_cached_nblocks[fork] == 0 ||
940 !smgrexists(bmr.smgr, fork))
941 {
943
944 /* recheck, fork might have been created concurrently */
945 if (!smgrexists(bmr.smgr, fork))
946 smgrcreate(bmr.smgr, fork, flags & EB_PERFORMING_RECOVERY);
947
949 }
950
951 /*
952 * If requested, invalidate size cache, so that smgrnblocks asks the
953 * kernel.
954 */
955 if (flags & EB_CLEAR_SIZE_CACHE)
957
958 /*
959 * Estimate how many pages we'll need to extend by. This avoids acquiring
960 * unnecessarily many victim buffers.
961 */
962 current_size = smgrnblocks(bmr.smgr, fork);
963
964 /*
965 * Since no-one else can be looking at the page contents yet, there is no
966 * difference between an exclusive lock and a cleanup-strength lock. Note
967 * that we pass the original mode to ReadBuffer_common() below, when
968 * falling back to reading the buffer to a concurrent relation extension.
969 */
971 flags |= EB_LOCK_TARGET;
972
973 while (current_size < extend_to)
974 {
975 uint32 num_pages = lengthof(buffers);
976 BlockNumber first_block;
977
978 if ((uint64) current_size + num_pages > extend_to)
979 num_pages = extend_to - current_size;
980
981 first_block = ExtendBufferedRelCommon(bmr, fork, strategy, flags,
982 num_pages, extend_to,
983 buffers, &extended_by);
984
985 current_size = first_block + extended_by;
986 Assert(num_pages != 0 || current_size >= extend_to);
987
988 for (uint32 i = 0; i < extended_by; i++)
989 {
990 if (first_block + i != extend_to - 1)
991 ReleaseBuffer(buffers[i]);
992 else
993 buffer = buffers[i];
994 }
995 }
996
997 /*
998 * It's possible that another backend concurrently extended the relation.
999 * In that case read the buffer.
1000 *
1001 * XXX: Should we control this via a flag?
1002 */
1003 if (buffer == InvalidBuffer)
1004 {
1005 Assert(extended_by == 0);
1006 buffer = ReadBuffer_common(bmr.rel, bmr.smgr, bmr.relpersistence,
1007 fork, extend_to - 1, mode, strategy);
1008 }
1009
1010 return buffer;
1011}
uint32 BlockNumber
Definition: block.h:31
static Buffer ReadBuffer_common(Relation rel, SMgrRelation smgr, char smgr_persistence, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:1189
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:4924
uint64_t uint64
Definition: c.h:489
#define lengthof(array)
Definition: c.h:745
void LockRelationForExtension(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:419
void UnlockRelationForExtension(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:469
#define ExclusiveLock
Definition: lockdefs.h:42
static PgChecksumMode mode
Definition: pg_checksums.c:55
static int64 current_size
Definition: pg_checksums.c:63
BlockNumber smgrnblocks(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:677
BlockNumber smgr_cached_nblocks[MAX_FORKNUM+1]
Definition: smgr.h:46

References Assert, PrivateRefCountEntry::buffer, current_size, EB_CLEAR_SIZE_CACHE, EB_CREATE_FORK_IF_NEEDED, EB_LOCK_TARGET, EB_PERFORMING_RECOVERY, ExclusiveLock, ExtendBufferedRelCommon(), i, InvalidBlockNumber, InvalidBuffer, lengthof, LockRelationForExtension(), mode, RBM_ZERO_AND_CLEANUP_LOCK, RBM_ZERO_AND_LOCK, RelationData::rd_rel, ReadBuffer_common(), BufferManagerRelation::rel, RelationGetSmgr(), ReleaseBuffer(), BufferManagerRelation::relpersistence, BufferManagerRelation::smgr, SMgrRelationData::smgr_cached_nblocks, smgrcreate(), smgrexists(), smgrnblocks(), and UnlockRelationForExtension().

Referenced by fsm_extend(), vm_extend(), and XLogReadBufferExtended().

◆ FlushDatabaseBuffers()

void FlushDatabaseBuffers ( Oid  dbid)

Definition at line 4862 of file bufmgr.c.

4863{
4864 int i;
4865 BufferDesc *bufHdr;
4866
4867 for (i = 0; i < NBuffers; i++)
4868 {
4869 uint32 buf_state;
4870
4871 bufHdr = GetBufferDescriptor(i);
4872
4873 /*
4874 * As in DropRelationBuffers, an unlocked precheck should be safe and
4875 * saves some cycles.
4876 */
4877 if (bufHdr->tag.dbOid != dbid)
4878 continue;
4879
4880 /* Make sure we can handle the pin */
4883
4884 buf_state = LockBufHdr(bufHdr);
4885 if (bufHdr->tag.dbOid == dbid &&
4886 (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
4887 {
4888 PinBuffer_Locked(bufHdr);
4892 UnpinBuffer(bufHdr);
4893 }
4894 else
4895 UnlockBufHdr(bufHdr, buf_state);
4896 }
4897}

References BM_DIRTY, BM_VALID, BufferDescriptorGetContentLock(), CurrentResourceOwner, buftag::dbOid, FlushBuffer(), GetBufferDescriptor(), i, IOCONTEXT_NORMAL, IOOBJECT_RELATION, LockBufHdr(), LW_SHARED, LWLockAcquire(), LWLockRelease(), NBuffers, PinBuffer_Locked(), ReservePrivateRefCountEntry(), ResourceOwnerEnlarge(), BufferDesc::tag, UnlockBufHdr(), and UnpinBuffer().

Referenced by dbase_redo().

◆ FlushOneBuffer()

void FlushOneBuffer ( Buffer  buffer)

Definition at line 4904 of file bufmgr.c.

4905{
4906 BufferDesc *bufHdr;
4907
4908 /* currently not needed, but no fundamental reason not to support */
4909 Assert(!BufferIsLocal(buffer));
4910
4911 Assert(BufferIsPinned(buffer));
4912
4913 bufHdr = GetBufferDescriptor(buffer - 1);
4914
4916
4918}
bool LWLockHeldByMe(LWLock *lock)
Definition: lwlock.c:1893

References Assert, PrivateRefCountEntry::buffer, BufferDescriptorGetContentLock(), BufferIsLocal, BufferIsPinned, FlushBuffer(), GetBufferDescriptor(), IOCONTEXT_NORMAL, IOOBJECT_RELATION, and LWLockHeldByMe().

Referenced by hash_xlog_init_bitmap_page(), hash_xlog_init_meta_page(), and XLogReadBufferForRedoExtended().

◆ FlushRelationBuffers()

void FlushRelationBuffers ( Relation  rel)

Definition at line 4492 of file bufmgr.c.

4493{
4494 int i;
4495 BufferDesc *bufHdr;
4496 SMgrRelation srel = RelationGetSmgr(rel);
4497
4498 if (RelationUsesLocalBuffers(rel))
4499 {
4500 for (i = 0; i < NLocBuffer; i++)
4501 {
4502 uint32 buf_state;
4503 instr_time io_start;
4504
4505 bufHdr = GetLocalBufferDescriptor(i);
4506 if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator) &&
4507 ((buf_state = pg_atomic_read_u32(&bufHdr->state)) &
4508 (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
4509 {
4510 ErrorContextCallback errcallback;
4511 Page localpage;
4512
4513 localpage = (char *) LocalBufHdrGetBlock(bufHdr);
4514
4515 /* Setup error traceback support for ereport() */
4517 errcallback.arg = bufHdr;
4518 errcallback.previous = error_context_stack;
4519 error_context_stack = &errcallback;
4520
4521 PageSetChecksumInplace(localpage, bufHdr->tag.blockNum);
4522
4524
4525 smgrwrite(srel,
4526 BufTagGetForkNum(&bufHdr->tag),
4527 bufHdr->tag.blockNum,
4528 localpage,
4529 false);
4530
4533 io_start, 1, BLCKSZ);
4534
4535 buf_state &= ~(BM_DIRTY | BM_JUST_DIRTIED);
4536 pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
4537
4539
4540 /* Pop the error context stack */
4541 error_context_stack = errcallback.previous;
4542 }
4543 }
4544
4545 return;
4546 }
4547
4548 for (i = 0; i < NBuffers; i++)
4549 {
4550 uint32 buf_state;
4551
4552 bufHdr = GetBufferDescriptor(i);
4553
4554 /*
4555 * As in DropRelationBuffers, an unlocked precheck should be safe and
4556 * saves some cycles.
4557 */
4558 if (!BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator))
4559 continue;
4560
4561 /* Make sure we can handle the pin */
4564
4565 buf_state = LockBufHdr(bufHdr);
4566 if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator) &&
4567 (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
4568 {
4569 PinBuffer_Locked(bufHdr);
4573 UnpinBuffer(bufHdr);
4574 }
4575 else
4576 UnlockBufHdr(bufHdr, buf_state);
4577 }
4578}
static void pg_atomic_unlocked_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:295
static bool BufTagMatchesRelFileLocator(const BufferTag *tag, const RelFileLocator *rlocator)
#define BM_JUST_DIRTIED
Definition: buf_internals.h:65
bool track_io_timing
Definition: bufmgr.c:143
#define LocalBufHdrGetBlock(bufHdr)
Definition: bufmgr.c:72
static void local_buffer_write_error_callback(void *arg)
Definition: bufmgr.c:5714
void PageSetChecksumInplace(Page page, BlockNumber blkno)
Definition: bufpage.c:1531
ErrorContextCallback * error_context_stack
Definition: elog.c:94
BufferUsage pgBufferUsage
Definition: instrument.c:20
int NLocBuffer
Definition: localbuf.c:42
@ IOOBJECT_TEMP_RELATION
Definition: pgstat.h:276
@ IOOP_WRITE
Definition: pgstat.h:312
instr_time pgstat_prepare_io_time(bool track_io_guc)
Definition: pgstat_io.c:104
void pgstat_count_io_op_time(IOObject io_object, IOContext io_context, IOOp io_op, instr_time start_time, uint32 cnt, uint64 bytes)
Definition: pgstat_io.c:126
#define RelationUsesLocalBuffers(relation)
Definition: rel.h:637
static void smgrwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, const void *buffer, bool skipFsync)
Definition: smgr.h:124
int64 local_blks_written
Definition: instrument.h:33
struct ErrorContextCallback * previous
Definition: elog.h:296
void(* callback)(void *arg)
Definition: elog.h:297
RelFileLocator rd_locator
Definition: rel.h:57

References ErrorContextCallback::arg, buftag::blockNum, BM_DIRTY, BM_JUST_DIRTIED, BM_VALID, BufferDescriptorGetContentLock(), BufTagGetForkNum(), BufTagMatchesRelFileLocator(), ErrorContextCallback::callback, CurrentResourceOwner, error_context_stack, FlushBuffer(), GetBufferDescriptor(), GetLocalBufferDescriptor(), i, IOCONTEXT_NORMAL, IOOBJECT_RELATION, IOOBJECT_TEMP_RELATION, IOOP_WRITE, BufferUsage::local_blks_written, local_buffer_write_error_callback(), LocalBufHdrGetBlock, LockBufHdr(), LW_SHARED, LWLockAcquire(), LWLockRelease(), NBuffers, NLocBuffer, PageSetChecksumInplace(), pg_atomic_read_u32(), pg_atomic_unlocked_write_u32(), pgBufferUsage, pgstat_count_io_op_time(), pgstat_prepare_io_time(), PinBuffer_Locked(), ErrorContextCallback::previous, RelationData::rd_locator, RelationGetSmgr(), RelationUsesLocalBuffers, ReservePrivateRefCountEntry(), ResourceOwnerEnlarge(), smgrwrite(), BufferDesc::state, BufferDesc::tag, track_io_timing, UnlockBufHdr(), and UnpinBuffer().

Referenced by fill_seq_with_data(), heapam_relation_copy_data(), and index_copy_data().

◆ FlushRelationsAllBuffers()

void FlushRelationsAllBuffers ( struct SMgrRelationData **  smgrs,
int  nrels 
)

◆ FreeAccessStrategy()

void FreeAccessStrategy ( BufferAccessStrategy  strategy)

Definition at line 681 of file freelist.c.

682{
683 /* don't crash if called on a "default" strategy */
684 if (strategy != NULL)
685 pfree(strategy);
686}

References pfree().

Referenced by blgetbitmap(), FreeBulkInsertState(), heap_endscan(), initscan(), parallel_vacuum_main(), and RelationCopyStorageUsingBuffer().

◆ GetAccessStrategy()

BufferAccessStrategy GetAccessStrategy ( BufferAccessStrategyType  btype)

Definition at line 541 of file freelist.c.

542{
543 int ring_size_kb;
544
545 /*
546 * Select ring size to use. See buffer/README for rationales.
547 *
548 * Note: if you change the ring size for BAS_BULKREAD, see also
549 * SYNC_SCAN_REPORT_INTERVAL in access/heap/syncscan.c.
550 */
551 switch (btype)
552 {
553 case BAS_NORMAL:
554 /* if someone asks for NORMAL, just give 'em a "default" object */
555 return NULL;
556
557 case BAS_BULKREAD:
558 ring_size_kb = 256;
559 break;
560 case BAS_BULKWRITE:
561 ring_size_kb = 16 * 1024;
562 break;
563 case BAS_VACUUM:
564 ring_size_kb = 2048;
565 break;
566
567 default:
568 elog(ERROR, "unrecognized buffer access strategy: %d",
569 (int) btype);
570 return NULL; /* keep compiler quiet */
571 }
572
573 return GetAccessStrategyWithSize(btype, ring_size_kb);
574}
BufferAccessStrategy GetAccessStrategyWithSize(BufferAccessStrategyType btype, int ring_size_kb)
Definition: freelist.c:584

References BAS_BULKREAD, BAS_BULKWRITE, BAS_NORMAL, BAS_VACUUM, elog, ERROR, and GetAccessStrategyWithSize().

Referenced by blgetbitmap(), bt_check_every_level(), collect_corrupt_items(), collect_visibility_data(), GetBulkInsertState(), initscan(), pgstat_index(), pgstathashindex(), pgstatindex_impl(), RelationCopyStorageUsingBuffer(), ScanSourceDatabasePgClass(), statapprox_heap(), and verify_heapam().

◆ GetAccessStrategyBufferCount()

int GetAccessStrategyBufferCount ( BufferAccessStrategy  strategy)

Definition at line 624 of file freelist.c.

625{
626 if (strategy == NULL)
627 return 0;
628
629 return strategy->nbuffers;
630}

References BufferAccessStrategyData::nbuffers.

Referenced by parallel_vacuum_init().

◆ GetAccessStrategyPinLimit()

int GetAccessStrategyPinLimit ( BufferAccessStrategy  strategy)

Definition at line 647 of file freelist.c.

648{
649 if (strategy == NULL)
650 return NBuffers;
651
652 switch (strategy->btype)
653 {
654 case BAS_BULKREAD:
655
656 /*
657 * Since BAS_BULKREAD uses StrategyRejectBuffer(), dirty buffers
658 * shouldn't be a problem and the caller is free to pin up to the
659 * entire ring at once.
660 */
661 return strategy->nbuffers;
662
663 default:
664
665 /*
666 * Tell caller not to pin more than half the buffers in the ring.
667 * This is a trade-off between look ahead distance and deferring
668 * writeback and associated WAL traffic.
669 */
670 return strategy->nbuffers / 2;
671 }
672}
BufferAccessStrategyType btype
Definition: freelist.c:75

References BAS_BULKREAD, BufferAccessStrategyData::btype, BufferAccessStrategyData::nbuffers, and NBuffers.

Referenced by read_stream_begin_impl().

◆ GetAccessStrategyWithSize()

BufferAccessStrategy GetAccessStrategyWithSize ( BufferAccessStrategyType  btype,
int  ring_size_kb 
)

Definition at line 584 of file freelist.c.

585{
586 int ring_buffers;
587 BufferAccessStrategy strategy;
588
589 Assert(ring_size_kb >= 0);
590
591 /* Figure out how many buffers ring_size_kb is */
592 ring_buffers = ring_size_kb / (BLCKSZ / 1024);
593
594 /* 0 means unlimited, so no BufferAccessStrategy required */
595 if (ring_buffers == 0)
596 return NULL;
597
598 /* Cap to 1/8th of shared_buffers */
599 ring_buffers = Min(NBuffers / 8, ring_buffers);
600
601 /* NBuffers should never be less than 16, so this shouldn't happen */
602 Assert(ring_buffers > 0);
603
604 /* Allocate the object and initialize all elements to zeroes */
605 strategy = (BufferAccessStrategy)
606 palloc0(offsetof(BufferAccessStrategyData, buffers) +
607 ring_buffers * sizeof(Buffer));
608
609 /* Set fields that don't start out zero */
610 strategy->btype = btype;
611 strategy->nbuffers = ring_buffers;
612
613 return strategy;
614}
struct BufferAccessStrategyData * BufferAccessStrategy
Definition: buf.h:44
#define Min(x, y)
Definition: c.h:961
void * palloc0(Size size)
Definition: mcxt.c:1347

References Assert, BufferAccessStrategyData::btype, Min, BufferAccessStrategyData::nbuffers, NBuffers, and palloc0().

Referenced by do_autovacuum(), ExecVacuum(), GetAccessStrategy(), and parallel_vacuum_main().

◆ HoldingBufferPinThatDelaysRecovery()

bool HoldingBufferPinThatDelaysRecovery ( void  )

Definition at line 5373 of file bufmgr.c.

5374{
5375 int bufid = GetStartupBufferPinWaitBufId();
5376
5377 /*
5378 * If we get woken slowly then it's possible that the Startup process was
5379 * already woken by other backends before we got here. Also possible that
5380 * we get here by multiple interrupts or interrupts at inappropriate
5381 * times, so make sure we do nothing if the bufid is not set.
5382 */
5383 if (bufid < 0)
5384 return false;
5385
5386 if (GetPrivateRefCount(bufid + 1) > 0)
5387 return true;
5388
5389 return false;
5390}
int GetStartupBufferPinWaitBufId(void)
Definition: proc.c:717

References GetPrivateRefCount(), and GetStartupBufferPinWaitBufId().

Referenced by CheckRecoveryConflictDeadlock(), and ProcessRecoveryConflictInterrupt().

◆ IncrBufferRefCount()

void IncrBufferRefCount ( Buffer  buffer)

Definition at line 4956 of file bufmgr.c.

4957{
4958 Assert(BufferIsPinned(buffer));
4960 if (BufferIsLocal(buffer))
4961 LocalRefCount[-buffer - 1]++;
4962 else
4963 {
4965
4966 ref = GetPrivateRefCountEntry(buffer, true);
4967 Assert(ref != NULL);
4968 ref->refcount++;
4969 }
4971}
static void ResourceOwnerRememberBuffer(ResourceOwner owner, Buffer buffer)
static PrivateRefCountEntry * GetPrivateRefCountEntry(Buffer buffer, bool do_move)
Definition: bufmgr.c:342

References Assert, PrivateRefCountEntry::buffer, BufferIsLocal, BufferIsPinned, CurrentResourceOwner, GetPrivateRefCountEntry(), LocalRefCount, PrivateRefCountEntry::refcount, ResourceOwnerEnlarge(), and ResourceOwnerRememberBuffer().

Referenced by _bt_steppage(), btrestrpos(), entryLoadMoreItems(), ReadBufferBI(), RelationAddBlocks(), scanPostingTree(), startScanEntry(), and tts_buffer_heap_store_tuple().

◆ InitBufferManagerAccess()

void InitBufferManagerAccess ( void  )

Definition at line 3576 of file bufmgr.c.

3577{
3578 HASHCTL hash_ctl;
3579
3580 memset(&PrivateRefCountArray, 0, sizeof(PrivateRefCountArray));
3581
3582 hash_ctl.keysize = sizeof(int32);
3583 hash_ctl.entrysize = sizeof(PrivateRefCountEntry);
3584
3585 PrivateRefCountHash = hash_create("PrivateRefCount", 100, &hash_ctl,
3587
3588 /*
3589 * AtProcExit_Buffers needs LWLock access, and thereby has to be called at
3590 * the corresponding phase of backend shutdown.
3591 */
3592 Assert(MyProc != NULL);
3594}
static void AtProcExit_Buffers(int code, Datum arg)
Definition: bufmgr.c:3601
struct PrivateRefCountEntry PrivateRefCountEntry
static struct PrivateRefCountEntry PrivateRefCountArray[REFCOUNT_ARRAY_ENTRIES]
Definition: bufmgr.c:208
static HTAB * PrivateRefCountHash
Definition: bufmgr.c:209
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:352
#define HASH_ELEM
Definition: hsearch.h:95
#define HASH_BLOBS
Definition: hsearch.h:97
void on_shmem_exit(pg_on_exit_callback function, Datum arg)
Definition: ipc.c:365
PGPROC * MyProc
Definition: proc.c:66
Size keysize
Definition: hsearch.h:75
Size entrysize
Definition: hsearch.h:76

References Assert, AtProcExit_Buffers(), HASHCTL::entrysize, HASH_BLOBS, hash_create(), HASH_ELEM, HASHCTL::keysize, MyProc, on_shmem_exit(), PrivateRefCountArray, and PrivateRefCountHash.

Referenced by BaseInit().

◆ IsBufferCleanupOK()

bool IsBufferCleanupOK ( Buffer  buffer)

Definition at line 5455 of file bufmgr.c.

5456{
5457 BufferDesc *bufHdr;
5458 uint32 buf_state;
5459
5460 Assert(BufferIsValid(buffer));
5461
5462 if (BufferIsLocal(buffer))
5463 {
5464 /* There should be exactly one pin */
5465 if (LocalRefCount[-buffer - 1] != 1)
5466 return false;
5467 /* Nobody else to wait for */
5468 return true;
5469 }
5470
5471 /* There should be exactly one local pin */
5472 if (GetPrivateRefCount(buffer) != 1)
5473 return false;
5474
5475 bufHdr = GetBufferDescriptor(buffer - 1);
5476
5477 /* caller must hold exclusive lock on buffer */
5479 LW_EXCLUSIVE));
5480
5481 buf_state = LockBufHdr(bufHdr);
5482
5483 Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
5484 if (BUF_STATE_GET_REFCOUNT(buf_state) == 1)
5485 {
5486 /* pincount is OK. */
5487 UnlockBufHdr(bufHdr, buf_state);
5488 return true;
5489 }
5490
5491 UnlockBufHdr(bufHdr, buf_state);
5492 return false;
5493}

References Assert, BUF_STATE_GET_REFCOUNT, PrivateRefCountEntry::buffer, BufferDescriptorGetContentLock(), BufferIsLocal, BufferIsValid(), GetBufferDescriptor(), GetPrivateRefCount(), LocalRefCount, LockBufHdr(), LW_EXCLUSIVE, LWLockHeldByMeInMode(), and UnlockBufHdr().

Referenced by _hash_doinsert(), _hash_expandtable(), _hash_splitbucket(), and hashbucketcleanup().

◆ LimitAdditionalLocalPins()

void LimitAdditionalLocalPins ( uint32 additional_pins)

Definition at line 291 of file localbuf.c.

292{
293 uint32 max_pins;
294
295 if (*additional_pins <= 1)
296 return;
297
298 /*
299 * In contrast to LimitAdditionalPins() other backends don't play a role
300 * here. We can allow up to NLocBuffer pins in total, but it might not be
301 * initialized yet so read num_temp_buffers.
302 */
304
305 if (*additional_pins >= max_pins)
306 *additional_pins = max_pins;
307}
int num_temp_buffers
Definition: guc_tables.c:535
static int NLocalPinnedBuffers
Definition: localbuf.c:53

References NLocalPinnedBuffers, and num_temp_buffers.

Referenced by ExtendBufferedRelLocal(), and read_stream_begin_impl().

◆ LimitAdditionalPins()

void LimitAdditionalPins ( uint32 additional_pins)

Definition at line 2116 of file bufmgr.c.

2117{
2118 uint32 max_backends;
2119 int max_proportional_pins;
2120
2121 if (*additional_pins <= 1)
2122 return;
2123
2124 max_backends = MaxBackends + NUM_AUXILIARY_PROCS;
2125 max_proportional_pins = NBuffers / max_backends;
2126
2127 /*
2128 * Subtract the approximate number of buffers already pinned by this
2129 * backend. We get the number of "overflowed" pins for free, but don't
2130 * know the number of pins in PrivateRefCountArray. The cost of
2131 * calculating that exactly doesn't seem worth it, so just assume the max.
2132 */
2133 max_proportional_pins -= PrivateRefCountOverflowed + REFCOUNT_ARRAY_ENTRIES;
2134
2135 if (max_proportional_pins <= 0)
2136 max_proportional_pins = 1;
2137
2138 if (*additional_pins > max_proportional_pins)
2139 *additional_pins = max_proportional_pins;
2140}
#define REFCOUNT_ARRAY_ENTRIES
Definition: bufmgr.c:96
int MaxBackends
Definition: globals.c:145
#define NUM_AUXILIARY_PROCS
Definition: proc.h:445

References MaxBackends, NBuffers, NUM_AUXILIARY_PROCS, PrivateRefCountOverflowed, and REFCOUNT_ARRAY_ENTRIES.

Referenced by ExtendBufferedRelShared(), and read_stream_begin_impl().

◆ LockBuffer()

void LockBuffer ( Buffer  buffer,
int  mode 
)

Definition at line 5158 of file bufmgr.c.

5159{
5160 BufferDesc *buf;
5161
5162 Assert(BufferIsPinned(buffer));
5163 if (BufferIsLocal(buffer))
5164 return; /* local buffers need no lock */
5165
5166 buf = GetBufferDescriptor(buffer - 1);
5167
5168 if (mode == BUFFER_LOCK_UNLOCK)
5170 else if (mode == BUFFER_LOCK_SHARE)
5172 else if (mode == BUFFER_LOCK_EXCLUSIVE)
5174 else
5175 elog(ERROR, "unrecognized buffer lock mode: %d", mode);
5176}
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:190
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:191

References Assert, buf, PrivateRefCountEntry::buffer, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferDescriptorGetContentLock(), BufferIsLocal, BufferIsPinned, elog, ERROR, GetBufferDescriptor(), LW_EXCLUSIVE, LW_SHARED, LWLockAcquire(), LWLockRelease(), and mode.

Referenced by _bt_lockbuf(), _bt_unlockbuf(), _bt_upgradelockbufcleanup(), _hash_addovflpage(), _hash_doinsert(), _hash_expandtable(), _hash_finish_split(), _hash_first(), _hash_freeovflpage(), _hash_getbuf(), _hash_getbuf_with_strategy(), _hash_getcachedmetap(), _hash_init(), _hash_kill_items(), _hash_readnext(), _hash_readpage(), _hash_readprev(), _hash_splitbucket(), _hash_squeezebucket(), _hash_vacuum_one_page(), blbulkdelete(), blgetbitmap(), blinsert(), BloomInitMetapage(), BloomNewBuffer(), blvacuumcleanup(), brin_doinsert(), brin_doupdate(), brin_evacuate_page(), brin_getinsertbuffer(), brin_page_cleanup(), bringetbitmap(), brinGetStats(), brinGetTupleForHeapBlock(), brininsert(), brinLockRevmapPageForUpdate(), brinRevmapDesummarizeRange(), brinRevmapInitialize(), brinsummarize(), bt_metap(), bt_multi_page_stats(), bt_page_items_internal(), bt_page_stats_internal(), bt_recheck_sibling_links(), collect_corrupt_items(), collect_visibility_data(), collectMatchBitmap(), ConditionalLockBufferForCleanup(), count_nondeletable_pages(), entryLoadMoreItems(), FreeSpaceMapPrepareTruncateRel(), fsm_readbuf(), fsm_search(), fsm_search_avail(), fsm_set_and_search(), fsm_vacuum_page(), get_raw_page_internal(), GetVisibilityMapPins(), ginbulkdelete(), ginEntryInsert(), ginFindLeafPage(), ginFindParents(), ginFinishOldSplit(), ginFinishSplit(), ginGetStats(), ginHeapTupleFastInsert(), ginInsertCleanup(), ginInsertValue(), GinNewBuffer(), ginScanToDelete(), ginStepRight(), ginTraverseLock(), ginUpdateStats(), ginvacuumcleanup(), ginVacuumPostingTreeLeaves(), gistBufferingFindCorrectParent(), gistbufferinginserttuples(), gistdoinsert(), gistFindCorrectParent(), gistFindPath(), gistfinishsplit(), gistfixsplit(), gistformdownlink(), gistGetMaxLevel(), gistinserttuples(), gistkillitems(), gistNewBuffer(), gistProcessItup(), gistScanPage(), gistvacuum_delete_empty_pages(), gistvacuumpage(), hashbucketcleanup(), hashbulkdelete(), heap_abort_speculative(), heap_delete(), heap_fetch(), heap_finish_speculative(), heap_get_latest_tid(), heap_index_delete_tuples(), heap_inplace_lock(), heap_inplace_unlock(), heap_inplace_update_and_unlock(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_page_prune_opt(), heap_prepare_pagescan(), heap_update(), heap_xlog_visible(), heapam_index_build_range_scan(), heapam_index_fetch_tuple(), heapam_index_validate_scan(), heapam_relation_copy_for_cluster(), heapam_scan_analyze_next_block(), heapam_scan_bitmap_next_block(), heapam_scan_sample_next_tuple(), heapam_tuple_satisfies_snapshot(), heapgettup(), initBloomState(), lazy_scan_heap(), lazy_scan_new_or_empty(), lazy_vacuum_heap_rel(), LockBufferForCleanup(), log_newpage_range(), palloc_btree_page(), pg_visibility(), pgrowlocks(), pgstat_btree_page(), pgstat_gist_page(), pgstat_heap(), pgstatginindex_internal(), pgstathashindex(), pgstatindex_impl(), read_seq_tuple(), RelationAddBlocks(), RelationCopyStorageUsingBuffer(), RelationGetBufferForTuple(), revmap_physical_extend(), scanGetCandidate(), scanPendingInsert(), ScanSourceDatabasePgClass(), shiftList(), spgdoinsert(), spgGetCache(), SpGistNewBuffer(), spgprocesspending(), spgvacuumpage(), spgWalk(), startScanEntry(), statapprox_heap(), summarize_range(), UnlockReleaseBuffer(), verify_heapam(), verifyBackupPageConsistency(), visibilitymap_clear(), visibilitymap_prepare_truncate(), visibilitymap_set(), vm_readbuf(), XLogReadBufferForRedoExtended(), XLogRecordPageWithFreeSpace(), and ZeroAndLockBuffer().

◆ LockBufferForCleanup()

void LockBufferForCleanup ( Buffer  buffer)

Definition at line 5238 of file bufmgr.c.

5239{
5240 BufferDesc *bufHdr;
5241 TimestampTz waitStart = 0;
5242 bool waiting = false;
5243 bool logged_recovery_conflict = false;
5244
5245 Assert(BufferIsPinned(buffer));
5246 Assert(PinCountWaitBuf == NULL);
5247
5249
5250 /* Nobody else to wait for */
5251 if (BufferIsLocal(buffer))
5252 return;
5253
5254 bufHdr = GetBufferDescriptor(buffer - 1);
5255
5256 for (;;)
5257 {
5258 uint32 buf_state;
5259
5260 /* Try to acquire lock */
5262 buf_state = LockBufHdr(bufHdr);
5263
5264 Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
5265 if (BUF_STATE_GET_REFCOUNT(buf_state) == 1)
5266 {
5267 /* Successfully acquired exclusive lock with pincount 1 */
5268 UnlockBufHdr(bufHdr, buf_state);
5269
5270 /*
5271 * Emit the log message if recovery conflict on buffer pin was
5272 * resolved but the startup process waited longer than
5273 * deadlock_timeout for it.
5274 */
5275 if (logged_recovery_conflict)
5277 waitStart, GetCurrentTimestamp(),
5278 NULL, false);
5279
5280 if (waiting)
5281 {
5282 /* reset ps display to remove the suffix if we added one */
5284 waiting = false;
5285 }
5286 return;
5287 }
5288 /* Failed, so mark myself as waiting for pincount 1 */
5289 if (buf_state & BM_PIN_COUNT_WAITER)
5290 {
5291 UnlockBufHdr(bufHdr, buf_state);
5293 elog(ERROR, "multiple backends attempting to wait for pincount 1");
5294 }
5296 PinCountWaitBuf = bufHdr;
5297 buf_state |= BM_PIN_COUNT_WAITER;
5298 UnlockBufHdr(bufHdr, buf_state);
5300
5301 /* Wait to be signaled by UnpinBuffer() */
5302 if (InHotStandby)
5303 {
5304 if (!waiting)
5305 {
5306 /* adjust the process title to indicate that it's waiting */
5307 set_ps_display_suffix("waiting");
5308 waiting = true;
5309 }
5310
5311 /*
5312 * Emit the log message if the startup process is waiting longer
5313 * than deadlock_timeout for recovery conflict on buffer pin.
5314 *
5315 * Skip this if first time through because the startup process has
5316 * not started waiting yet in this case. So, the wait start
5317 * timestamp is set after this logic.
5318 */
5319 if (waitStart != 0 && !logged_recovery_conflict)
5320 {
5322
5323 if (TimestampDifferenceExceeds(waitStart, now,
5325 {
5327 waitStart, now, NULL, true);
5328 logged_recovery_conflict = true;
5329 }
5330 }
5331
5332 /*
5333 * Set the wait start timestamp if logging is enabled and first
5334 * time through.
5335 */
5336 if (log_recovery_conflict_waits && waitStart == 0)
5337 waitStart = GetCurrentTimestamp();
5338
5339 /* Publish the bufid that Startup process waits on */
5340 SetStartupBufferPinWaitBufId(buffer - 1);
5341 /* Set alarm and then wait to be signaled by UnpinBuffer() */
5343 /* Reset the published bufid */
5345 }
5346 else
5347 ProcWaitForSignal(WAIT_EVENT_BUFFER_PIN);
5348
5349 /*
5350 * Remove flag marking us as waiter. Normally this will not be set
5351 * anymore, but ProcWaitForSignal() can return for other signals as
5352 * well. We take care to only reset the flag if we're the waiter, as
5353 * theoretically another backend could have started waiting. That's
5354 * impossible with the current usages due to table level locking, but
5355 * better be safe.
5356 */
5357 buf_state = LockBufHdr(bufHdr);
5358 if ((buf_state & BM_PIN_COUNT_WAITER) != 0 &&
5360 buf_state &= ~BM_PIN_COUNT_WAITER;
5361 UnlockBufHdr(bufHdr, buf_state);
5362
5363 PinCountWaitBuf = NULL;
5364 /* Loop back and try again */
5365 }
5366}
bool TimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, int msec)
Definition: timestamp.c:1780
TimestampTz GetCurrentTimestamp(void)
Definition: timestamp.c:1644
Datum now(PG_FUNCTION_ARGS)
Definition: timestamp.c:1608
#define BM_PIN_COUNT_WAITER
Definition: buf_internals.h:66
void CheckBufferIsPinnedOnce(Buffer buffer)
Definition: bufmgr.c:5205
static BufferDesc * PinCountWaitBuf
Definition: bufmgr.c:176
int64 TimestampTz
Definition: timestamp.h:39
static volatile sig_atomic_t waiting
Definition: latch.c:162
@ PROCSIG_RECOVERY_CONFLICT_BUFFERPIN
Definition: procsignal.h:47
void set_ps_display_remove_suffix(void)
Definition: ps_status.c:423
void set_ps_display_suffix(const char *suffix)
Definition: ps_status.c:371
int DeadlockTimeout
Definition: proc.c:57
void SetStartupBufferPinWaitBufId(int bufid)
Definition: proc.c:705
void ProcWaitForSignal(uint32 wait_event_info)
Definition: proc.c:1896
void ResolveRecoveryConflictWithBufferPin(void)
Definition: standby.c:792
bool log_recovery_conflict_waits
Definition: standby.c:41
void LogRecoveryConflict(ProcSignalReason reason, TimestampTz wait_start, TimestampTz now, VirtualTransactionId *wait_list, bool still_waiting)
Definition: standby.c:273
int wait_backend_pgprocno
#define InHotStandby
Definition: xlogutils.h:60

References Assert, BM_PIN_COUNT_WAITER, BUF_STATE_GET_REFCOUNT, PrivateRefCountEntry::buffer, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferIsLocal, BufferIsPinned, CheckBufferIsPinnedOnce(), DeadlockTimeout, elog, ERROR, GetBufferDescriptor(), GetCurrentTimestamp(), InHotStandby, LockBuffer(), LockBufHdr(), log_recovery_conflict_waits, LogRecoveryConflict(), MyProcNumber, now(), PinCountWaitBuf, PROCSIG_RECOVERY_CONFLICT_BUFFERPIN, ProcWaitForSignal(), ResolveRecoveryConflictWithBufferPin(), set_ps_display_remove_suffix(), set_ps_display_suffix(), SetStartupBufferPinWaitBufId(), TimestampDifferenceExceeds(), UnlockBufHdr(), BufferDesc::wait_backend_pgprocno, and waiting.

Referenced by _bt_upgradelockbufcleanup(), ginVacuumPostingTree(), hashbulkdelete(), heap_force_common(), lazy_scan_heap(), XLogReadBufferForRedoExtended(), and ZeroAndLockBuffer().

◆ MarkBufferDirty()

void MarkBufferDirty ( Buffer  buffer)

Definition at line 2532 of file bufmgr.c.

2533{
2534 BufferDesc *bufHdr;
2535 uint32 buf_state;
2536 uint32 old_buf_state;
2537
2538 if (!BufferIsValid(buffer))
2539 elog(ERROR, "bad buffer ID: %d", buffer);
2540
2541 if (BufferIsLocal(buffer))
2542 {
2543 MarkLocalBufferDirty(buffer);
2544 return;
2545 }
2546
2547 bufHdr = GetBufferDescriptor(buffer - 1);
2548
2549 Assert(BufferIsPinned(buffer));
2551 LW_EXCLUSIVE));
2552
2553 old_buf_state = pg_atomic_read_u32(&bufHdr->state);
2554 for (;;)
2555 {
2556 if (old_buf_state & BM_LOCKED)
2557 old_buf_state = WaitBufHdrUnlocked(bufHdr);
2558
2559 buf_state = old_buf_state;
2560
2561 Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
2562 buf_state |= BM_DIRTY | BM_JUST_DIRTIED;
2563
2564 if (pg_atomic_compare_exchange_u32(&bufHdr->state, &old_buf_state,
2565 buf_state))
2566 break;
2567 }
2568
2569 /*
2570 * If the buffer was not dirty already, do vacuum accounting.
2571 */
2572 if (!(old_buf_state & BM_DIRTY))
2573 {
2575 if (VacuumCostActive)
2577 }
2578}
static bool pg_atomic_compare_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval)
Definition: atomics.h:349
#define BM_LOCKED
Definition: buf_internals.h:59
static uint32 WaitBufHdrUnlocked(BufferDesc *buf)
Definition: bufmgr.c:5791
bool VacuumCostActive
Definition: globals.c:157
int VacuumCostBalance
Definition: globals.c:156
int VacuumCostPageDirty
Definition: globals.c:152
void MarkLocalBufferDirty(Buffer buffer)
Definition: localbuf.c:450
int64 shared_blks_dirtied
Definition: instrument.h:28

References Assert, BM_DIRTY, BM_JUST_DIRTIED, BM_LOCKED, BUF_STATE_GET_REFCOUNT, PrivateRefCountEntry::buffer, BufferDescriptorGetContentLock(), BufferIsLocal, BufferIsPinned, BufferIsValid(), elog, ERROR, GetBufferDescriptor(), LW_EXCLUSIVE, LWLockHeldByMeInMode(), MarkLocalBufferDirty(), pg_atomic_compare_exchange_u32(), pg_atomic_read_u32(), pgBufferUsage, BufferUsage::shared_blks_dirtied, BufferDesc::state, VacuumCostActive, VacuumCostBalance, VacuumCostPageDirty, and WaitBufHdrUnlocked().

Referenced by _bt_clear_incomplete_split(), _bt_dedup_pass(), _bt_delitems_delete(), _bt_delitems_vacuum(), _bt_getroot(), _bt_insertonpg(), _bt_mark_page_halfdead(), _bt_newlevel(), _bt_restore_meta(), _bt_set_cleanup_info(), _bt_split(), _bt_unlink_halfdead_page(), _hash_addovflpage(), _hash_doinsert(), _hash_expandtable(), _hash_freeovflpage(), _hash_init(), _hash_splitbucket(), _hash_squeezebucket(), _hash_vacuum_one_page(), addLeafTuple(), brin_doinsert(), brin_doupdate(), brin_initialize_empty_new_buffer(), brin_xlog_createidx(), brin_xlog_desummarize_page(), brin_xlog_insert_update(), brin_xlog_revmap_extend(), brin_xlog_samepage_update(), brin_xlog_update(), brinbuild(), brinbuildempty(), brinRevmapDesummarizeRange(), btree_xlog_dedup(), btree_xlog_delete(), btree_xlog_insert(), btree_xlog_mark_page_halfdead(), btree_xlog_newroot(), btree_xlog_split(), btree_xlog_unlink_page(), btree_xlog_vacuum(), createPostingTree(), dataExecPlaceToPageInternal(), dataExecPlaceToPageLeaf(), do_setval(), doPickSplit(), entryExecPlaceToPage(), fill_seq_fork_with_data(), FreeSpaceMapPrepareTruncateRel(), generic_redo(), GenericXLogFinish(), ginbuild(), ginbuildempty(), ginbulkdelete(), ginDeletePage(), ginHeapTupleFastInsert(), ginPlaceToPage(), ginRedoClearIncompleteSplit(), ginRedoCreatePTree(), ginRedoDeleteListPages(), ginRedoDeletePage(), ginRedoInsert(), ginRedoInsertListPage(), ginRedoUpdateMetapage(), ginRedoVacuumDataLeafPage(), ginUpdateStats(), ginVacuumPostingTreeLeaf(), gistbuild(), gistbuildempty(), gistdeletepage(), gistplacetopage(), gistprunepage(), gistRedoClearFollowRight(), gistRedoDeleteRecord(), gistRedoPageDelete(), gistRedoPageSplitRecord(), gistRedoPageUpdateRecord(), gistvacuumpage(), hash_xlog_add_ovfl_page(), hash_xlog_delete(), hash_xlog_init_bitmap_page(), hash_xlog_init_meta_page(), hash_xlog_insert(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_split_cleanup(), hash_xlog_split_complete(), hash_xlog_squeeze_page(), hash_xlog_update_meta_page(), hash_xlog_vacuum_one_page(), hashbucketcleanup(), hashbulkdelete(), heap_abort_speculative(), heap_delete(), heap_finish_speculative(), heap_force_common(), heap_inplace_update_and_unlock(), heap_insert(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_page_prune_and_freeze(), heap_update(), heap_xlog_confirm(), heap_xlog_delete(), heap_xlog_inplace(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_prune_freeze(), heap_xlog_update(), heap_xlog_visible(), lazy_scan_new_or_empty(), lazy_scan_prune(), lazy_vacuum_heap_page(), log_newpage_range(), moveLeafs(), nextval_internal(), RelationAddBlocks(), RelationCopyStorageUsingBuffer(), RelationGetBufferForTuple(), revmap_physical_extend(), saveNodeLink(), seq_redo(), shiftList(), spgAddNodeAction(), spgbuild(), SpGistUpdateMetaPage(), spgRedoAddLeaf(), spgRedoAddNode(), spgRedoMoveLeafs(), spgRedoPickSplit(), spgRedoSplitTuple(), spgRedoVacuumLeaf(), spgRedoVacuumRedirect(), spgRedoVacuumRoot(), spgSplitNodeAction(), vacuumLeafPage(), vacuumLeafRoot(), vacuumRedirectAndPlaceholder(), visibilitymap_clear(), visibilitymap_prepare_truncate(), visibilitymap_set(), writeListPage(), and XLogReadBufferForRedoExtended().

◆ MarkBufferDirtyHint()

void MarkBufferDirtyHint ( Buffer  buffer,
bool  buffer_std 
)

Definition at line 4988 of file bufmgr.c.

4989{
4990 BufferDesc *bufHdr;
4991 Page page = BufferGetPage(buffer);
4992
4993 if (!BufferIsValid(buffer))
4994 elog(ERROR, "bad buffer ID: %d", buffer);
4995
4996 if (BufferIsLocal(buffer))
4997 {
4998 MarkLocalBufferDirty(buffer);
4999 return;
5000 }
5001
5002 bufHdr = GetBufferDescriptor(buffer - 1);
5003
5004 Assert(GetPrivateRefCount(buffer) > 0);
5005 /* here, either share or exclusive lock is OK */
5007
5008 /*
5009 * This routine might get called many times on the same page, if we are
5010 * making the first scan after commit of an xact that added/deleted many
5011 * tuples. So, be as quick as we can if the buffer is already dirty. We
5012 * do this by not acquiring spinlock if it looks like the status bits are
5013 * already set. Since we make this test unlocked, there's a chance we
5014 * might fail to notice that the flags have just been cleared, and failed
5015 * to reset them, due to memory-ordering issues. But since this function
5016 * is only intended to be used in cases where failing to write out the
5017 * data would be harmless anyway, it doesn't really matter.
5018 */
5019 if ((pg_atomic_read_u32(&bufHdr->state) & (BM_DIRTY | BM_JUST_DIRTIED)) !=
5021 {
5023 bool dirtied = false;
5024 bool delayChkptFlags = false;
5025 uint32 buf_state;
5026
5027 /*
5028 * If we need to protect hint bit updates from torn writes, WAL-log a
5029 * full page image of the page. This full page image is only necessary
5030 * if the hint bit update is the first change to the page since the
5031 * last checkpoint.
5032 *
5033 * We don't check full_page_writes here because that logic is included
5034 * when we call XLogInsert() since the value changes dynamically.
5035 */
5036 if (XLogHintBitIsNeeded() &&
5038 {
5039 /*
5040 * If we must not write WAL, due to a relfilelocator-specific
5041 * condition or being in recovery, don't dirty the page. We can
5042 * set the hint, just not dirty the page as a result so the hint
5043 * is lost when we evict the page or shutdown.
5044 *
5045 * See src/backend/storage/page/README for longer discussion.
5046 */
5047 if (RecoveryInProgress() ||
5049 return;
5050
5051 /*
5052 * If the block is already dirty because we either made a change
5053 * or set a hint already, then we don't need to write a full page
5054 * image. Note that aggressive cleaning of blocks dirtied by hint
5055 * bit setting would increase the call rate. Bulk setting of hint
5056 * bits would reduce the call rate...
5057 *
5058 * We must issue the WAL record before we mark the buffer dirty.
5059 * Otherwise we might write the page before we write the WAL. That
5060 * causes a race condition, since a checkpoint might occur between
5061 * writing the WAL record and marking the buffer dirty. We solve
5062 * that with a kluge, but one that is already in use during
5063 * transaction commit to prevent race conditions. Basically, we
5064 * simply prevent the checkpoint WAL record from being written
5065 * until we have marked the buffer dirty. We don't start the
5066 * checkpoint flush until we have marked dirty, so our checkpoint
5067 * must flush the change to disk successfully or the checkpoint
5068 * never gets written, so crash recovery will fix.
5069 *
5070 * It's possible we may enter here without an xid, so it is
5071 * essential that CreateCheckPoint waits for virtual transactions
5072 * rather than full transactionids.
5073 */
5076 delayChkptFlags = true;
5077 lsn = XLogSaveBufferForHint(buffer, buffer_std);
5078 }
5079
5080 buf_state = LockBufHdr(bufHdr);
5081
5082 Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
5083
5084 if (!(buf_state & BM_DIRTY))
5085 {
5086 dirtied = true; /* Means "will be dirtied by this action" */
5087
5088 /*
5089 * Set the page LSN if we wrote a backup block. We aren't supposed
5090 * to set this when only holding a share lock but as long as we
5091 * serialise it somehow we're OK. We choose to set LSN while
5092 * holding the buffer header lock, which causes any reader of an
5093 * LSN who holds only a share lock to also obtain a buffer header
5094 * lock before using PageGetLSN(), which is enforced in
5095 * BufferGetLSNAtomic().
5096 *
5097 * If checksums are enabled, you might think we should reset the
5098 * checksum here. That will happen when the page is written
5099 * sometime later in this checkpoint cycle.
5100 */
5101 if (!XLogRecPtrIsInvalid(lsn))
5102 PageSetLSN(page, lsn);
5103 }
5104
5105 buf_state |= BM_DIRTY | BM_JUST_DIRTIED;
5106 UnlockBufHdr(bufHdr, buf_state);
5107
5108 if (delayChkptFlags)
5109 MyProc->delayChkptFlags &= ~DELAY_CHKPT_START;
5110
5111 if (dirtied)
5112 {
5114 if (VacuumCostActive)
5116 }
5117 }
5118}
static void PageSetLSN(Page page, XLogRecPtr lsn)
Definition: bufpage.h:391
#define DELAY_CHKPT_START
Definition: proc.h:119
bool RelFileLocatorSkippingWAL(RelFileLocator rlocator)
Definition: storage.c:557
int delayChkptFlags
Definition: proc.h:240
bool RecoveryInProgress(void)
Definition: xlog.c:6334
#define XLogRecPtrIsInvalid(r)
Definition: xlogdefs.h:29
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
XLogRecPtr XLogSaveBufferForHint(Buffer buffer, bool buffer_std)
Definition: xloginsert.c:1065

References Assert, BM_DIRTY, BM_JUST_DIRTIED, BM_PERMANENT, BUF_STATE_GET_REFCOUNT, PrivateRefCountEntry::buffer, BufferDescriptorGetContentLock(), BufferGetPage(), BufferIsLocal, BufferIsValid(), BufTagGetRelFileLocator(), DELAY_CHKPT_START, PGPROC::delayChkptFlags, elog, ERROR, GetBufferDescriptor(), GetPrivateRefCount(), InvalidXLogRecPtr, LockBufHdr(), LWLockHeldByMe(), MarkLocalBufferDirty(), MyProc, PageSetLSN(), pg_atomic_read_u32(), pgBufferUsage, RecoveryInProgress(), RelFileLocatorSkippingWAL(), BufferUsage::shared_blks_dirtied, BufferDesc::state, BufferDesc::tag, UnlockBufHdr(), VacuumCostActive, VacuumCostBalance, VacuumCostPageDirty, XLogHintBitIsNeeded, XLogRecPtrIsInvalid, and XLogSaveBufferForHint().

Referenced by _bt_check_unique(), _bt_killitems(), _hash_kill_items(), brin_start_evacuating_page(), btvacuumpage(), fsm_search(), fsm_search_avail(), fsm_set_and_search(), fsm_vacuum_page(), gistkillitems(), heap_page_prune_and_freeze(), read_seq_tuple(), SetHintBits(), and XLogRecordPageWithFreeSpace().

◆ PrefetchBuffer()

PrefetchBufferResult PrefetchBuffer ( Relation  reln,
ForkNumber  forkNum,
BlockNumber  blockNum 
)

Definition at line 639 of file bufmgr.c.

640{
641 Assert(RelationIsValid(reln));
642 Assert(BlockNumberIsValid(blockNum));
643
644 if (RelationUsesLocalBuffers(reln))
645 {
646 /* see comments in ReadBufferExtended */
647 if (RELATION_IS_OTHER_TEMP(reln))
649 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
650 errmsg("cannot access temporary tables of other sessions")));
651
652 /* pass it off to localbuf.c */
653 return PrefetchLocalBuffer(RelationGetSmgr(reln), forkNum, blockNum);
654 }
655 else
656 {
657 /* pass it to the shared buffer version */
658 return PrefetchSharedBuffer(RelationGetSmgr(reln), forkNum, blockNum);
659 }
660}
static bool BlockNumberIsValid(BlockNumber blockNumber)
Definition: block.h:71
PrefetchBufferResult PrefetchSharedBuffer(SMgrRelation smgr_reln, ForkNumber forkNum, BlockNumber blockNum)
Definition: bufmgr.c:549
int errcode(int sqlerrcode)
Definition: elog.c:853
int errmsg(const char *fmt,...)
Definition: elog.c:1070
#define ereport(elevel,...)
Definition: elog.h:149
PrefetchBufferResult PrefetchLocalBuffer(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum)
Definition: localbuf.c:69
#define RELATION_IS_OTHER_TEMP(relation)
Definition: rel.h:658
#define RelationIsValid(relation)
Definition: rel.h:478

References Assert, BlockNumberIsValid(), ereport, errcode(), errmsg(), ERROR, PrefetchLocalBuffer(), PrefetchSharedBuffer(), RELATION_IS_OTHER_TEMP, RelationGetSmgr(), RelationIsValid, and RelationUsesLocalBuffers.

Referenced by BitmapPrefetch(), count_nondeletable_pages(), and pg_prewarm().

◆ PrefetchSharedBuffer()

PrefetchBufferResult PrefetchSharedBuffer ( struct SMgrRelationData smgr_reln,
ForkNumber  forkNum,
BlockNumber  blockNum 
)

◆ ReadBuffer()

Buffer ReadBuffer ( Relation  reln,
BlockNumber  blockNum 
)

Definition at line 746 of file bufmgr.c.

747{
748 return ReadBufferExtended(reln, MAIN_FORKNUM, blockNum, RBM_NORMAL, NULL);
749}
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:793

References MAIN_FORKNUM, RBM_NORMAL, and ReadBufferExtended().

Referenced by _bt_allocbuf(), _bt_getbuf(), _bt_search_insert(), _hash_getbuf(), _hash_getbuf_with_condlock_cleanup(), blbulkdelete(), blinsert(), BloomNewBuffer(), brin_getinsertbuffer(), brinGetStats(), brinGetTupleForHeapBlock(), brinRevmapDesummarizeRange(), brinRevmapInitialize(), bt_metap(), bt_multi_page_stats(), bt_page_items_internal(), bt_page_stats_internal(), ginFindLeafPage(), ginFindParents(), ginGetStats(), ginHeapTupleFastInsert(), ginInsertCleanup(), GinNewBuffer(), ginStepRight(), ginUpdateStats(), gistBufferingFindCorrectParent(), gistbufferinginserttuples(), gistdoinsert(), gistFindCorrectParent(), gistFindPath(), gistfixsplit(), gistGetMaxLevel(), gistkillitems(), gistNewBuffer(), gistProcessItup(), gistScanPage(), heap_abort_speculative(), heap_delete(), heap_fetch(), heap_finish_speculative(), heap_force_common(), heap_get_latest_tid(), heap_index_delete_tuples(), heap_lock_tuple(), heap_update(), initBloomState(), pg_visibility(), pgstatginindex_internal(), read_seq_tuple(), RelationGetBufferForTuple(), ReleaseAndReadBuffer(), revmap_get_buffer(), revmap_physical_extend(), scanGetCandidate(), scanPendingInsert(), shiftList(), spgdoinsert(), spgGetCache(), SpGistGetBuffer(), SpGistNewBuffer(), SpGistUpdateMetaPage(), and spgWalk().

◆ ReadBufferExtended()

Buffer ReadBufferExtended ( Relation  reln,
ForkNumber  forkNum,
BlockNumber  blockNum,
ReadBufferMode  mode,
BufferAccessStrategy  strategy 
)
inline

Definition at line 793 of file bufmgr.c.

795{
796 Buffer buf;
797
798 /*
799 * Reject attempts to read non-local temporary relations; we would be
800 * likely to get wrong data since we have no visibility into the owning
801 * session's local buffers.
802 */
803 if (RELATION_IS_OTHER_TEMP(reln))
805 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
806 errmsg("cannot access temporary tables of other sessions")));
807
808 /*
809 * Read the buffer, and update pgstat counters to reflect a cache hit or
810 * miss.
811 */
812 buf = ReadBuffer_common(reln, RelationGetSmgr(reln), 0,
813 forkNum, blockNum, mode, strategy);
814
815 return buf;
816}

References buf, ereport, errcode(), errmsg(), ERROR, mode, ReadBuffer_common(), RELATION_IS_OTHER_TEMP, and RelationGetSmgr().

Referenced by _hash_getbuf_with_strategy(), _hash_getinitbuf(), _hash_getnewbuf(), autoprewarm_database_main(), blbulkdelete(), blgetbitmap(), BloomInitMetapage(), blvacuumcleanup(), brin_vacuum_scan(), bt_recheck_sibling_links(), btvacuumpage(), count_nondeletable_pages(), fsm_readbuf(), get_raw_page_internal(), ginbulkdelete(), ginDeletePage(), ginScanToDelete(), ginvacuumcleanup(), ginVacuumPostingTree(), ginVacuumPostingTreeLeaves(), gistvacuum_delete_empty_pages(), gistvacuumpage(), hashbulkdelete(), heapam_scan_sample_next_block(), lazy_scan_heap(), lazy_vacuum_heap_rel(), log_newpage_range(), palloc_btree_page(), pgstat_btree_page(), pgstat_gist_page(), pgstat_heap(), pgstathashindex(), pgstatindex_impl(), ReadBuffer(), ReadBufferBI(), spgprocesspending(), spgvacuumpage(), statapprox_heap(), verify_heapam(), and vm_readbuf().

◆ ReadBufferWithoutRelcache()

Buffer ReadBufferWithoutRelcache ( RelFileLocator  rlocator,
ForkNumber  forkNum,
BlockNumber  blockNum,
ReadBufferMode  mode,
BufferAccessStrategy  strategy,
bool  permanent 
)

Definition at line 830 of file bufmgr.c.

833{
834 SMgrRelation smgr = smgropen(rlocator, INVALID_PROC_NUMBER);
835
836 return ReadBuffer_common(NULL, smgr,
837 permanent ? RELPERSISTENCE_PERMANENT : RELPERSISTENCE_UNLOGGED,
838 forkNum, blockNum,
839 mode, strategy);
840}

References INVALID_PROC_NUMBER, mode, ReadBuffer_common(), and smgropen().

Referenced by RelationCopyStorageUsingBuffer(), ScanSourceDatabasePgClass(), and XLogReadBufferExtended().

◆ ReadRecentBuffer()

bool ReadRecentBuffer ( RelFileLocator  rlocator,
ForkNumber  forkNum,
BlockNumber  blockNum,
Buffer  recent_buffer 
)

Definition at line 670 of file bufmgr.c.

672{
673 BufferDesc *bufHdr;
674 BufferTag tag;
675 uint32 buf_state;
676 bool have_private_ref;
677
678 Assert(BufferIsValid(recent_buffer));
679
682 InitBufferTag(&tag, &rlocator, forkNum, blockNum);
683
684 if (BufferIsLocal(recent_buffer))
685 {
686 int b = -recent_buffer - 1;
687
688 bufHdr = GetLocalBufferDescriptor(b);
689 buf_state = pg_atomic_read_u32(&bufHdr->state);
690
691 /* Is it still valid and holding the right tag? */
692 if ((buf_state & BM_VALID) && BufferTagsEqual(&tag, &bufHdr->tag))
693 {
694 PinLocalBuffer(bufHdr, true);
695
697
698 return true;
699 }
700 }
701 else
702 {
703 bufHdr = GetBufferDescriptor(recent_buffer - 1);
704 have_private_ref = GetPrivateRefCount(recent_buffer) > 0;
705
706 /*
707 * Do we already have this buffer pinned with a private reference? If
708 * so, it must be valid and it is safe to check the tag without
709 * locking. If not, we have to lock the header first and then check.
710 */
711 if (have_private_ref)
712 buf_state = pg_atomic_read_u32(&bufHdr->state);
713 else
714 buf_state = LockBufHdr(bufHdr);
715
716 if ((buf_state & BM_VALID) && BufferTagsEqual(&tag, &bufHdr->tag))
717 {
718 /*
719 * It's now safe to pin the buffer. We can't pin first and ask
720 * questions later, because it might confuse code paths like
721 * InvalidateBuffer() if we pinned a random non-matching buffer.
722 */
723 if (have_private_ref)
724 PinBuffer(bufHdr, NULL); /* bump pin count */
725 else
726 PinBuffer_Locked(bufHdr); /* pin for first time */
727
729
730 return true;
731 }
732
733 /* If we locked the header above, now unlock. */
734 if (!have_private_ref)
735 UnlockBufHdr(bufHdr, buf_state);
736 }
737
738 return false;
739}
static void InitBufferTag(BufferTag *tag, const RelFileLocator *rlocator, ForkNumber forkNum, BlockNumber blockNum)
static bool BufferTagsEqual(const BufferTag *tag1, const BufferTag *tag2)
static bool PinBuffer(BufferDesc *buf, BufferAccessStrategy strategy)
Definition: bufmgr.c:2652
int b
Definition: isn.c:69
bool PinLocalBuffer(BufferDesc *buf_hdr, bool adjust_usagecount)
Definition: localbuf.c:656
int64 local_blks_hit
Definition: instrument.h:30
int64 shared_blks_hit
Definition: instrument.h:26

References Assert, b, BM_VALID, BufferIsLocal, BufferIsValid(), BufferTagsEqual(), CurrentResourceOwner, GetBufferDescriptor(), GetLocalBufferDescriptor(), GetPrivateRefCount(), InitBufferTag(), BufferUsage::local_blks_hit, LockBufHdr(), pg_atomic_read_u32(), pgBufferUsage, PinBuffer(), PinBuffer_Locked(), PinLocalBuffer(), ReservePrivateRefCountEntry(), ResourceOwnerEnlarge(), BufferUsage::shared_blks_hit, BufferDesc::state, BufferDesc::tag, and UnlockBufHdr().

Referenced by XLogReadBufferExtended().

◆ RelationGetNumberOfBlocksInFork()

BlockNumber RelationGetNumberOfBlocksInFork ( Relation  relation,
ForkNumber  forkNum 
)

Definition at line 3923 of file bufmgr.c.

3924{
3925 if (RELKIND_HAS_TABLE_AM(relation->rd_rel->relkind))
3926 {
3927 /*
3928 * Not every table AM uses BLCKSZ wide fixed size blocks. Therefore
3929 * tableam returns the size in bytes - but for the purpose of this
3930 * routine, we want the number of blocks. Therefore divide, rounding
3931 * up.
3932 */
3933 uint64 szbytes;
3934
3935 szbytes = table_relation_size(relation, forkNum);
3936
3937 return (szbytes + (BLCKSZ - 1)) / BLCKSZ;
3938 }
3939 else if (RELKIND_HAS_STORAGE(relation->rd_rel->relkind))
3940 {
3941 return smgrnblocks(RelationGetSmgr(relation), forkNum);
3942 }
3943 else
3944 Assert(false);
3945
3946 return 0; /* keep compiler quiet */
3947}
static uint64 table_relation_size(Relation rel, ForkNumber forkNumber)
Definition: tableam.h:1873

References Assert, RelationData::rd_rel, RelationGetSmgr(), smgrnblocks(), and table_relation_size().

Referenced by _hash_getnewbuf(), _hash_init(), autoprewarm_database_main(), get_raw_page_internal(), and pg_prewarm().

◆ ReleaseAndReadBuffer()

Buffer ReleaseAndReadBuffer ( Buffer  buffer,
Relation  relation,
BlockNumber  blockNum 
)

Definition at line 2594 of file bufmgr.c.

2597{
2598 ForkNumber forkNum = MAIN_FORKNUM;
2599 BufferDesc *bufHdr;
2600
2601 if (BufferIsValid(buffer))
2602 {
2603 Assert(BufferIsPinned(buffer));
2604 if (BufferIsLocal(buffer))
2605 {
2606 bufHdr = GetLocalBufferDescriptor(-buffer - 1);
2607 if (bufHdr->tag.blockNum == blockNum &&
2608 BufTagMatchesRelFileLocator(&bufHdr->tag, &relation->rd_locator) &&
2609 BufTagGetForkNum(&bufHdr->tag) == forkNum)
2610 return buffer;
2611 UnpinLocalBuffer(buffer);
2612 }
2613 else
2614 {
2615 bufHdr = GetBufferDescriptor(buffer - 1);
2616 /* we have pin, so it's ok to examine tag without spinlock */
2617 if (bufHdr->tag.blockNum == blockNum &&
2618 BufTagMatchesRelFileLocator(&bufHdr->tag, &relation->rd_locator) &&
2619 BufTagGetForkNum(&bufHdr->tag) == forkNum)
2620 return buffer;
2621 UnpinBuffer(bufHdr);
2622 }
2623 }
2624
2625 return ReadBuffer(relation, blockNum);
2626}
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:746
void UnpinLocalBuffer(Buffer buffer)
Definition: localbuf.c:682

References Assert, buftag::blockNum, PrivateRefCountEntry::buffer, BufferIsLocal, BufferIsPinned, BufferIsValid(), BufTagGetForkNum(), BufTagMatchesRelFileLocator(), GetBufferDescriptor(), GetLocalBufferDescriptor(), MAIN_FORKNUM, RelationData::rd_locator, ReadBuffer(), BufferDesc::tag, UnpinBuffer(), and UnpinLocalBuffer().

Referenced by _bt_relandgetbuf(), ginFindLeafPage(), heapam_index_fetch_tuple(), and heapam_scan_bitmap_next_block().

◆ ReleaseBuffer()

void ReleaseBuffer ( Buffer  buffer)

Definition at line 4924 of file bufmgr.c.

4925{
4926 if (!BufferIsValid(buffer))
4927 elog(ERROR, "bad buffer ID: %d", buffer);
4928
4929 if (BufferIsLocal(buffer))
4930 UnpinLocalBuffer(buffer);
4931 else
4932 UnpinBuffer(GetBufferDescriptor(buffer - 1));
4933}

References PrivateRefCountEntry::buffer, BufferIsLocal, BufferIsValid(), elog, ERROR, GetBufferDescriptor(), UnpinBuffer(), and UnpinLocalBuffer().

Referenced by _bt_allocbuf(), _bt_drop_lock_and_maybe_pin(), _bt_pagedel(), _bt_relbuf(), _bt_search_insert(), _bt_unlink_halfdead_page(), _hash_dropbuf(), _hash_getbuf_with_condlock_cleanup(), autoprewarm_database_main(), blinsert(), BloomNewBuffer(), brin_getinsertbuffer(), brin_vacuum_scan(), bringetbitmap(), brinGetTupleForHeapBlock(), brininsert(), brinRevmapTerminate(), brinsummarize(), collect_corrupt_items(), collect_visibility_data(), entryLoadMoreItems(), ExecEndBitmapHeapScan(), ExecEndIndexOnlyScan(), ExecReScanBitmapHeapScan(), ExtendBufferedRelTo(), FreeBulkInsertState(), freeGinBtreeStack(), fsm_search(), fsm_vacuum_page(), get_actual_variable_endpoint(), get_raw_page_internal(), GetRecordedFreeSpace(), ginDeletePage(), ginFindParents(), ginFinishSplit(), ginFreeScanKeys(), ginInsertCleanup(), GinNewBuffer(), ginScanToDelete(), gistdoinsert(), gistFindCorrectParent(), gistNewBuffer(), gistvacuum_delete_empty_pages(), heap_abort_speculative(), heap_delete(), heap_endscan(), heap_fetch(), heap_fetch_next_buffer(), heap_force_common(), heap_insert(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_rescan(), heap_update(), heap_vac_scan_next_block(), heap_xlog_delete(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_update(), heap_xlog_visible(), heapam_index_fetch_reset(), heapam_scan_sample_next_block(), heapam_tuple_lock(), heapgettup(), heapgettup_pagemode(), lazy_scan_heap(), lazy_vacuum_heap_rel(), pg_prewarm(), pg_visibility(), pg_visibility_map(), pg_visibility_map_summary(), pgstatindex_impl(), read_stream_reset(), ReadBufferBI(), RelationAddBlocks(), RelationGetBufferForTuple(), ReleaseBulkInsertStatePin(), revmap_get_buffer(), spgdoinsert(), SpGistGetBuffer(), SpGistNewBuffer(), SpGistUpdateMetaPage(), statapprox_heap(), summarize_range(), terminate_brin_buildstate(), tts_buffer_heap_clear(), tts_buffer_heap_materialize(), tts_buffer_heap_store_tuple(), UnlockReleaseBuffer(), verify_heapam(), visibilitymap_count(), visibilitymap_get_status(), visibilitymap_pin(), and XLogReadBufferExtended().

◆ StartReadBuffer()

bool StartReadBuffer ( ReadBuffersOperation operation,
Buffer buffer,
BlockNumber  blocknum,
int  flags 
)

Definition at line 1382 of file bufmgr.c.

1386{
1387 int nblocks = 1;
1388 bool result;
1389
1390 result = StartReadBuffersImpl(operation, buffer, blocknum, &nblocks, flags);
1391 Assert(nblocks == 1); /* single block can't be short */
1392
1393 return result;
1394}
static pg_attribute_always_inline bool StartReadBuffersImpl(ReadBuffersOperation *operation, Buffer *buffers, BlockNumber blockNum, int *nblocks, int flags)
Definition: bufmgr.c:1254

References Assert, PrivateRefCountEntry::buffer, and StartReadBuffersImpl().

Referenced by read_stream_next_buffer(), and ReadBuffer_common().

◆ StartReadBuffers()

bool StartReadBuffers ( ReadBuffersOperation operation,
Buffer buffers,
BlockNumber  blockNum,
int *  nblocks,
int  flags 
)

Definition at line 1367 of file bufmgr.c.

1372{
1373 return StartReadBuffersImpl(operation, buffers, blockNum, nblocks, flags);
1374}

References StartReadBuffersImpl().

Referenced by read_stream_start_pending_read().

◆ UnlockBuffers()

void UnlockBuffers ( void  )

Definition at line 5130 of file bufmgr.c.

5131{
5133
5134 if (buf)
5135 {
5136 uint32 buf_state;
5137
5138 buf_state = LockBufHdr(buf);
5139
5140 /*
5141 * Don't complain if flag bit not set; it could have been reset but we
5142 * got a cancel/die interrupt before getting the signal.
5143 */
5144 if ((buf_state & BM_PIN_COUNT_WAITER) != 0 &&
5145 buf->wait_backend_pgprocno == MyProcNumber)
5146 buf_state &= ~BM_PIN_COUNT_WAITER;
5147
5148 UnlockBufHdr(buf, buf_state);
5149
5150 PinCountWaitBuf = NULL;
5151 }
5152}

References BM_PIN_COUNT_WAITER, buf, LockBufHdr(), MyProcNumber, PinCountWaitBuf, and UnlockBufHdr().

Referenced by AbortSubTransaction(), AbortTransaction(), AtProcExit_Buffers(), BackgroundWriterMain(), CheckpointerMain(), and WalWriterMain().

◆ UnlockReleaseBuffer()

void UnlockReleaseBuffer ( Buffer  buffer)

Definition at line 4941 of file bufmgr.c.

4942{
4944 ReleaseBuffer(buffer);
4945}

References PrivateRefCountEntry::buffer, BUFFER_LOCK_UNLOCK, LockBuffer(), and ReleaseBuffer().

Referenced by _bt_clear_incomplete_split(), _bt_restore_meta(), _hash_relbuf(), allocNewBuffer(), AlterSequence(), blbulkdelete(), blgetbitmap(), blinsert(), BloomInitMetapage(), blvacuumcleanup(), brin_doinsert(), brin_doupdate(), brin_evacuate_page(), brin_getinsertbuffer(), brin_xlog_createidx(), brin_xlog_desummarize_page(), brin_xlog_insert_update(), brin_xlog_revmap_extend(), brin_xlog_samepage_update(), brin_xlog_update(), brinbuild(), brinbuildempty(), brinGetStats(), brinRevmapDesummarizeRange(), bt_metap(), bt_multi_page_stats(), bt_page_items_internal(), bt_page_stats_internal(), bt_recheck_sibling_links(), btree_xlog_dedup(), btree_xlog_delete(), btree_xlog_insert(), btree_xlog_mark_page_halfdead(), btree_xlog_newroot(), btree_xlog_split(), btree_xlog_unlink_page(), btree_xlog_vacuum(), collect_corrupt_items(), collect_visibility_data(), count_nondeletable_pages(), createPostingTree(), do_setval(), doPickSplit(), entryLoadMoreItems(), fill_seq_fork_with_data(), flushCachedPage(), FreeSpaceMapPrepareTruncateRel(), fsm_search(), fsm_set_and_search(), generic_redo(), ginbuild(), ginbuildempty(), ginbulkdelete(), ginGetStats(), ginHeapTupleFastInsert(), ginInsertCleanup(), ginPlaceToPage(), ginRedoClearIncompleteSplit(), ginRedoCreatePTree(), ginRedoDeleteListPages(), ginRedoDeletePage(), ginRedoInsert(), ginRedoInsertListPage(), ginRedoSplit(), ginRedoUpdateMetapage(), ginRedoVacuumDataLeafPage(), ginRedoVacuumPage(), ginScanToDelete(), ginStepRight(), ginUpdateStats(), ginvacuumcleanup(), ginVacuumPostingTree(), ginVacuumPostingTreeLeaves(), gistbufferinginserttuples(), gistbuild(), gistbuildempty(), gistdoinsert(), gistFindCorrectParent(), gistFindPath(), gistGetMaxLevel(), gistinserttuples(), gistkillitems(), gistplacetopage(), gistProcessItup(), gistRedoClearFollowRight(), gistRedoDeleteRecord(), gistRedoPageDelete(), gistRedoPageSplitRecord(), gistRedoPageUpdateRecord(), gistScanPage(), gistvacuum_delete_empty_pages(), gistvacuumpage(), hash_xlog_add_ovfl_page(), hash_xlog_delete(), hash_xlog_init_bitmap_page(), hash_xlog_init_meta_page(), hash_xlog_insert(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_split_cleanup(), hash_xlog_split_complete(), hash_xlog_split_page(), hash_xlog_squeeze_page(), hash_xlog_update_meta_page(), hash_xlog_vacuum_one_page(), heap_delete(), heap_finish_speculative(), heap_force_common(), heap_get_latest_tid(), heap_index_delete_tuples(), heap_insert(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_update(), heap_xlog_confirm(), heap_xlog_delete(), heap_xlog_inplace(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_prune_freeze(), heap_xlog_update(), heap_xlog_visible(), heapam_scan_analyze_next_tuple(), initBloomState(), lazy_scan_heap(), lazy_scan_new_or_empty(), lazy_vacuum_heap_rel(), log_newpage_range(), moveLeafs(), nextval_internal(), palloc_btree_page(), pg_get_sequence_data(), pg_sequence_last_value(), pg_visibility(), pgstat_gist_page(), pgstat_heap(), pgstatginindex_internal(), pgstathashindex(), RelationCopyStorageUsingBuffer(), RelationGetBufferForTuple(), ResetSequence(), revmap_physical_extend(), scanGetCandidate(), scanPendingInsert(), scanPostingTree(), ScanSourceDatabasePgClass(), seq_redo(), SequenceChangePersistence(), shiftList(), spgAddNodeAction(), spgbuild(), spgdoinsert(), spgGetCache(), SpGistGetBuffer(), SpGistUpdateMetaPage(), spgMatchNodeAction(), spgprocesspending(), spgRedoAddLeaf(), spgRedoAddNode(), spgRedoMoveLeafs(), spgRedoPickSplit(), spgRedoSplitTuple(), spgRedoVacuumLeaf(), spgRedoVacuumRedirect(), spgRedoVacuumRoot(), spgSplitNodeAction(), spgvacuumpage(), spgWalk(), statapprox_heap(), verify_heapam(), verifyBackupPageConsistency(), visibilitymap_prepare_truncate(), writeListPage(), xlog_redo(), and XLogRecordPageWithFreeSpace().

◆ WaitReadBuffers()

void WaitReadBuffers ( ReadBuffersOperation operation)

Definition at line 1410 of file bufmgr.c.

1411{
1412 Buffer *buffers;
1413 int nblocks;
1414 BlockNumber blocknum;
1415 ForkNumber forknum;
1416 IOContext io_context;
1417 IOObject io_object;
1418 char persistence;
1419
1420 /*
1421 * Currently operations are only allowed to include a read of some range,
1422 * with an optional extra buffer that is already pinned at the end. So
1423 * nblocks can be at most one more than io_buffers_len.
1424 */
1425 Assert((operation->nblocks == operation->io_buffers_len) ||
1426 (operation->nblocks == operation->io_buffers_len + 1));
1427
1428 /* Find the range of the physical read we need to perform. */
1429 nblocks = operation->io_buffers_len;
1430 if (nblocks == 0)
1431 return; /* nothing to do */
1432
1433 buffers = &operation->buffers[0];
1434 blocknum = operation->blocknum;
1435 forknum = operation->forknum;
1436 persistence = operation->persistence;
1437
1438 if (persistence == RELPERSISTENCE_TEMP)
1439 {
1440 io_context = IOCONTEXT_NORMAL;
1441 io_object = IOOBJECT_TEMP_RELATION;
1442 }
1443 else
1444 {
1445 io_context = IOContextForStrategy(operation->strategy);
1446 io_object = IOOBJECT_RELATION;
1447 }
1448
1449 /*
1450 * We count all these blocks as read by this backend. This is traditional
1451 * behavior, but might turn out to be not true if we find that someone
1452 * else has beaten us and completed the read of some of these blocks. In
1453 * that case the system globally double-counts, but we traditionally don't
1454 * count this as a "hit", and we don't have a separate counter for "miss,
1455 * but another backend completed the read".
1456 */
1457 if (persistence == RELPERSISTENCE_TEMP)
1458 pgBufferUsage.local_blks_read += nblocks;
1459 else
1461
1462 for (int i = 0; i < nblocks; ++i)
1463 {
1464 int io_buffers_len;
1465 Buffer io_buffers[MAX_IO_COMBINE_LIMIT];
1466 void *io_pages[MAX_IO_COMBINE_LIMIT];
1467 instr_time io_start;
1468 BlockNumber io_first_block;
1469
1470 /*
1471 * Skip this block if someone else has already completed it. If an
1472 * I/O is already in progress in another backend, this will wait for
1473 * the outcome: either done, or something went wrong and we will
1474 * retry.
1475 */
1476 if (!WaitReadBuffersCanStartIO(buffers[i], false))
1477 {
1478 /*
1479 * Report this as a 'hit' for this backend, even though it must
1480 * have started out as a miss in PinBufferForBlock().
1481 */
1482 TRACE_POSTGRESQL_BUFFER_READ_DONE(forknum, blocknum + i,
1483 operation->smgr->smgr_rlocator.locator.spcOid,
1484 operation->smgr->smgr_rlocator.locator.dbOid,
1485 operation->smgr->smgr_rlocator.locator.relNumber,
1486 operation->smgr->smgr_rlocator.backend,
1487 true);
1488 continue;
1489 }
1490
1491 /* We found a buffer that we need to read in. */
1492 io_buffers[0] = buffers[i];
1493 io_pages[0] = BufferGetBlock(buffers[i]);
1494 io_first_block = blocknum + i;
1495 io_buffers_len = 1;
1496
1497 /*
1498 * How many neighboring-on-disk blocks can we scatter-read into other
1499 * buffers at the same time? In this case we don't wait if we see an
1500 * I/O already in progress. We already hold BM_IO_IN_PROGRESS for the
1501 * head block, so we should get on with that I/O as soon as possible.
1502 * We'll come back to this block again, above.
1503 */
1504 while ((i + 1) < nblocks &&
1505 WaitReadBuffersCanStartIO(buffers[i + 1], true))
1506 {
1507 /* Must be consecutive block numbers. */
1508 Assert(BufferGetBlockNumber(buffers[i + 1]) ==
1509 BufferGetBlockNumber(buffers[i]) + 1);
1510
1511 io_buffers[io_buffers_len] = buffers[++i];
1512 io_pages[io_buffers_len++] = BufferGetBlock(buffers[i]);
1513 }
1514
1516 smgrreadv(operation->smgr, forknum, io_first_block, io_pages, io_buffers_len);
1517 pgstat_count_io_op_time(io_object, io_context, IOOP_READ, io_start,
1518 1, io_buffers_len * BLCKSZ);
1519
1520 /* Verify each block we read, and terminate the I/O. */
1521 for (int j = 0; j < io_buffers_len; ++j)
1522 {
1523 BufferDesc *bufHdr;
1524 Block bufBlock;
1525
1526 if (persistence == RELPERSISTENCE_TEMP)
1527 {
1528 bufHdr = GetLocalBufferDescriptor(-io_buffers[j] - 1);
1529 bufBlock = LocalBufHdrGetBlock(bufHdr);
1530 }
1531 else
1532 {
1533 bufHdr = GetBufferDescriptor(io_buffers[j] - 1);
1534 bufBlock = BufHdrGetBlock(bufHdr);
1535 }
1536
1537 /* check for garbage data */
1538 if (!PageIsVerifiedExtended((Page) bufBlock, io_first_block + j,
1540 {
1542 {
1545 errmsg("invalid page in block %u of relation %s; zeroing out page",
1546 io_first_block + j,
1547 relpath(operation->smgr->smgr_rlocator, forknum))));
1548 memset(bufBlock, 0, BLCKSZ);
1549 }
1550 else
1551 ereport(ERROR,
1553 errmsg("invalid page in block %u of relation %s",
1554 io_first_block + j,
1555 relpath(operation->smgr->smgr_rlocator, forknum))));
1556 }
1557
1558 /* Terminate I/O and set BM_VALID. */
1559 if (persistence == RELPERSISTENCE_TEMP)
1560 {
1561 uint32 buf_state = pg_atomic_read_u32(&bufHdr->state);
1562
1563 buf_state |= BM_VALID;
1564 pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
1565 }
1566 else
1567 {
1568 /* Set BM_VALID, terminate IO, and wake up any waiters */
1569 TerminateBufferIO(bufHdr, false, BM_VALID, true);
1570 }
1571
1572 /* Report I/Os as completing individually. */
1573 TRACE_POSTGRESQL_BUFFER_READ_DONE(forknum, io_first_block + j,
1574 operation->smgr->smgr_rlocator.locator.spcOid,
1575 operation->smgr->smgr_rlocator.locator.dbOid,
1576 operation->smgr->smgr_rlocator.locator.relNumber,
1577 operation->smgr->smgr_rlocator.backend,
1578 false);
1579 }
1580
1581 if (VacuumCostActive)
1582 VacuumCostBalance += VacuumCostPageMiss * io_buffers_len;
1583 }
1584}
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:3724
static bool WaitReadBuffersCanStartIO(Buffer buffer, bool nowait)
Definition: bufmgr.c:1397
bool zero_damaged_pages
Definition: bufmgr.c:140
static void TerminateBufferIO(BufferDesc *buf, bool clear_dirty, uint32 set_flag_bits, bool forget_owner)
Definition: bufmgr.c:5615
#define BufHdrGetBlock(bufHdr)
Definition: bufmgr.c:68
#define READ_BUFFERS_ZERO_ON_ERROR
Definition: bufmgr.h:111
#define MAX_IO_COMBINE_LIMIT
Definition: bufmgr.h:164
bool PageIsVerifiedExtended(Page page, BlockNumber blkno, int flags)
Definition: bufpage.c:88
#define PIV_LOG_WARNING
Definition: bufpage.h:468
#define PIV_REPORT_STAT
Definition: bufpage.h:469
#define WARNING
Definition: elog.h:36
IOContext IOContextForStrategy(BufferAccessStrategy strategy)
Definition: freelist.c:758
int VacuumCostPageMiss
Definition: globals.c:151
int j
Definition: isn.c:73
#define ERRCODE_DATA_CORRUPTED
Definition: pg_basebackup.c:41
IOObject
Definition: pgstat.h:274
IOContext
Definition: pgstat.h:282
@ IOOP_READ
Definition: pgstat.h:311
#define relpath(rlocator, forknum)
Definition: relpath.h:102
void smgrreadv(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, void **buffers, BlockNumber nblocks)
Definition: smgr.c:619
int64 shared_blks_read
Definition: instrument.h:27
int64 local_blks_read
Definition: instrument.h:31
ForkNumber forknum
Definition: bufmgr.h:121
int16 io_buffers_len
Definition: bufmgr.h:133
Buffer * buffers
Definition: bufmgr.h:129
BufferAccessStrategy strategy
Definition: bufmgr.h:122
BlockNumber blocknum
Definition: bufmgr.h:130
struct SMgrRelationData * smgr
Definition: bufmgr.h:119
RelFileLocator locator
RelFileNumber relNumber
RelFileLocatorBackend smgr_rlocator
Definition: smgr.h:37

References Assert, RelFileLocatorBackend::backend, ReadBuffersOperation::blocknum, BM_VALID, BufferGetBlock(), BufferGetBlockNumber(), ReadBuffersOperation::buffers, BufHdrGetBlock, RelFileLocator::dbOid, ereport, errcode(), ERRCODE_DATA_CORRUPTED, errmsg(), ERROR, ReadBuffersOperation::flags, ReadBuffersOperation::forknum, GetBufferDescriptor(), GetLocalBufferDescriptor(), i, ReadBuffersOperation::io_buffers_len, IOCONTEXT_NORMAL, IOContextForStrategy(), IOOBJECT_RELATION, IOOBJECT_TEMP_RELATION, IOOP_READ, j, BufferUsage::local_blks_read, LocalBufHdrGetBlock, RelFileLocatorBackend::locator, MAX_IO_COMBINE_LIMIT, ReadBuffersOperation::nblocks, PageIsVerifiedExtended(), ReadBuffersOperation::persistence, pg_atomic_read_u32(), pg_atomic_unlocked_write_u32(), pgBufferUsage, pgstat_count_io_op_time(), pgstat_prepare_io_time(), PIV_LOG_WARNING, PIV_REPORT_STAT, READ_BUFFERS_ZERO_ON_ERROR, RelFileLocator::relNumber, relpath, BufferUsage::shared_blks_read, ReadBuffersOperation::smgr, SMgrRelationData::smgr_rlocator, smgrreadv(), RelFileLocator::spcOid, BufferDesc::state, ReadBuffersOperation::strategy, TerminateBufferIO(), track_io_timing, VacuumCostActive, VacuumCostBalance, VacuumCostPageMiss, WaitReadBuffersCanStartIO(), WARNING, and zero_damaged_pages.

Referenced by read_stream_next_buffer(), and ReadBuffer_common().

Variable Documentation

◆ backend_flush_after

PGDLLIMPORT int backend_flush_after
extern

Definition at line 173 of file bufmgr.c.

Referenced by BufferManagerShmemInit().

◆ bgwriter_flush_after

PGDLLIMPORT int bgwriter_flush_after
extern

Definition at line 172 of file bufmgr.c.

Referenced by BackgroundWriterMain().

◆ bgwriter_lru_maxpages

PGDLLIMPORT int bgwriter_lru_maxpages
extern

Definition at line 141 of file bufmgr.c.

Referenced by BgBufferSync().

◆ bgwriter_lru_multiplier

PGDLLIMPORT double bgwriter_lru_multiplier
extern

Definition at line 142 of file bufmgr.c.

Referenced by BgBufferSync().

◆ BufferBlocks

PGDLLIMPORT char* BufferBlocks
extern

Definition at line 21 of file buf_init.c.

Referenced by BufferGetBlock(), and BufferManagerShmemInit().

◆ checkpoint_flush_after

PGDLLIMPORT int checkpoint_flush_after
extern

Definition at line 171 of file bufmgr.c.

Referenced by BufferSync().

◆ effective_io_concurrency

PGDLLIMPORT int effective_io_concurrency
extern

◆ io_combine_limit

PGDLLIMPORT int io_combine_limit
extern

◆ LocalBufferBlockPointers

PGDLLIMPORT Block* LocalBufferBlockPointers
extern

Definition at line 45 of file localbuf.c.

Referenced by BufferGetBlock(), and InitLocalBuffers().

◆ LocalRefCount

◆ maintenance_io_concurrency

◆ NBuffers

◆ NLocBuffer

◆ track_io_timing

◆ zero_damaged_pages

PGDLLIMPORT bool zero_damaged_pages
extern

Definition at line 140 of file bufmgr.c.

Referenced by mdreadv(), and WaitReadBuffers().