PostgreSQL Source Code  git master
bufmgr.h File Reference
#include "storage/block.h"
#include "storage/buf.h"
#include "storage/bufpage.h"
#include "storage/relfilelocator.h"
#include "utils/relcache.h"
#include "utils/snapmgr.h"
Include dependency graph for bufmgr.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Data Structures

struct  PrefetchBufferResult
 
struct  BufferManagerRelation
 

Macros

#define BMR_REL(p_rel)   ((BufferManagerRelation){.rel = p_rel})
 
#define BMR_SMGR(p_smgr, p_relpersistence)   ((BufferManagerRelation){.smgr = p_smgr, .relpersistence = p_relpersistence})
 
#define DEFAULT_EFFECTIVE_IO_CONCURRENCY   0
 
#define DEFAULT_MAINTENANCE_IO_CONCURRENCY   0
 
#define MAX_IO_CONCURRENCY   1000
 
#define P_NEW   InvalidBlockNumber /* grow the file to get a new page */
 
#define BUFFER_LOCK_UNLOCK   0
 
#define BUFFER_LOCK_SHARE   1
 
#define BUFFER_LOCK_EXCLUSIVE   2
 
#define RelationGetNumberOfBlocks(reln)    RelationGetNumberOfBlocksInFork(reln, MAIN_FORKNUM)
 

Typedefs

typedef void * Block
 
typedef enum BufferAccessStrategyType BufferAccessStrategyType
 
typedef struct PrefetchBufferResult PrefetchBufferResult
 
typedef enum ExtendBufferedFlags ExtendBufferedFlags
 
typedef struct BufferManagerRelation BufferManagerRelation
 

Enumerations

enum  BufferAccessStrategyType { BAS_NORMAL , BAS_BULKREAD , BAS_BULKWRITE , BAS_VACUUM }
 
enum  ReadBufferMode {
  RBM_NORMAL , RBM_ZERO_AND_LOCK , RBM_ZERO_AND_CLEANUP_LOCK , RBM_ZERO_ON_ERROR ,
  RBM_NORMAL_NO_LOG
}
 
enum  ExtendBufferedFlags {
  EB_SKIP_EXTENSION_LOCK = (1 << 0) , EB_PERFORMING_RECOVERY = (1 << 1) , EB_CREATE_FORK_IF_NEEDED = (1 << 2) , EB_LOCK_FIRST = (1 << 3) ,
  EB_CLEAR_SIZE_CACHE = (1 << 4) , EB_LOCK_TARGET = (1 << 5)
}
 

Functions

PrefetchBufferResult PrefetchSharedBuffer (struct SMgrRelationData *smgr_reln, ForkNumber forkNum, BlockNumber blockNum)
 
PrefetchBufferResult PrefetchBuffer (Relation reln, ForkNumber forkNum, BlockNumber blockNum)
 
bool ReadRecentBuffer (RelFileLocator rlocator, ForkNumber forkNum, BlockNumber blockNum, Buffer recent_buffer)
 
Buffer ReadBuffer (Relation reln, BlockNumber blockNum)
 
Buffer ReadBufferExtended (Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
 
Buffer ReadBufferWithoutRelcache (RelFileLocator rlocator, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy, bool permanent)
 
void ReleaseBuffer (Buffer buffer)
 
void UnlockReleaseBuffer (Buffer buffer)
 
bool BufferIsExclusiveLocked (Buffer buffer)
 
bool BufferIsDirty (Buffer buffer)
 
void MarkBufferDirty (Buffer buffer)
 
void IncrBufferRefCount (Buffer buffer)
 
void CheckBufferIsPinnedOnce (Buffer buffer)
 
Buffer ReleaseAndReadBuffer (Buffer buffer, Relation relation, BlockNumber blockNum)
 
Buffer ExtendBufferedRel (BufferManagerRelation bmr, ForkNumber forkNum, BufferAccessStrategy strategy, uint32 flags)
 
BlockNumber ExtendBufferedRelBy (BufferManagerRelation bmr, ForkNumber fork, BufferAccessStrategy strategy, uint32 flags, uint32 extend_by, Buffer *buffers, uint32 *extended_by)
 
Buffer ExtendBufferedRelTo (BufferManagerRelation bmr, ForkNumber fork, BufferAccessStrategy strategy, uint32 flags, BlockNumber extend_to, ReadBufferMode mode)
 
void InitBufferPoolAccess (void)
 
void AtEOXact_Buffers (bool isCommit)
 
char * DebugPrintBufferRefcount (Buffer buffer)
 
void CheckPointBuffers (int flags)
 
BlockNumber BufferGetBlockNumber (Buffer buffer)
 
BlockNumber RelationGetNumberOfBlocksInFork (Relation relation, ForkNumber forkNum)
 
void FlushOneBuffer (Buffer buffer)
 
void FlushRelationBuffers (Relation rel)
 
void FlushRelationsAllBuffers (struct SMgrRelationData **smgrs, int nrels)
 
void CreateAndCopyRelationData (RelFileLocator src_rlocator, RelFileLocator dst_rlocator, bool permanent)
 
void FlushDatabaseBuffers (Oid dbid)
 
void DropRelationBuffers (struct SMgrRelationData *smgr_reln, ForkNumber *forkNum, int nforks, BlockNumber *firstDelBlock)
 
void DropRelationsAllBuffers (struct SMgrRelationData **smgr_reln, int nlocators)
 
void DropDatabaseBuffers (Oid dbid)
 
bool BufferIsPermanent (Buffer buffer)
 
XLogRecPtr BufferGetLSNAtomic (Buffer buffer)
 
void BufferGetTag (Buffer buffer, RelFileLocator *rlocator, ForkNumber *forknum, BlockNumber *blknum)
 
void MarkBufferDirtyHint (Buffer buffer, bool buffer_std)
 
void UnlockBuffers (void)
 
void LockBuffer (Buffer buffer, int mode)
 
bool ConditionalLockBuffer (Buffer buffer)
 
void LockBufferForCleanup (Buffer buffer)
 
bool ConditionalLockBufferForCleanup (Buffer buffer)
 
bool IsBufferCleanupOK (Buffer buffer)
 
bool HoldingBufferPinThatDelaysRecovery (void)
 
bool BgBufferSync (struct WritebackContext *wb_context)
 
void InitBufferPool (void)
 
Size BufferShmemSize (void)
 
void AtProcExit_LocalBuffers (void)
 
BufferAccessStrategy GetAccessStrategy (BufferAccessStrategyType btype)
 
BufferAccessStrategy GetAccessStrategyWithSize (BufferAccessStrategyType btype, int ring_size_kb)
 
int GetAccessStrategyBufferCount (BufferAccessStrategy strategy)
 
void FreeAccessStrategy (BufferAccessStrategy strategy)
 
static bool BufferIsValid (Buffer bufnum)
 
static Block BufferGetBlock (Buffer buffer)
 
static Size BufferGetPageSize (Buffer buffer)
 
static Page BufferGetPage (Buffer buffer)
 

Variables

PGDLLIMPORT int NBuffers
 
PGDLLIMPORT bool zero_damaged_pages
 
PGDLLIMPORT int bgwriter_lru_maxpages
 
PGDLLIMPORT double bgwriter_lru_multiplier
 
PGDLLIMPORT bool track_io_timing
 
PGDLLIMPORT int effective_io_concurrency
 
PGDLLIMPORT int maintenance_io_concurrency
 
PGDLLIMPORT int checkpoint_flush_after
 
PGDLLIMPORT int backend_flush_after
 
PGDLLIMPORT int bgwriter_flush_after
 
PGDLLIMPORT char * BufferBlocks
 
PGDLLIMPORT int NLocBuffer
 
PGDLLIMPORT BlockLocalBufferBlockPointers
 
PGDLLIMPORT int32LocalRefCount
 

Macro Definition Documentation

◆ BMR_REL

#define BMR_REL (   p_rel)    ((BufferManagerRelation){.rel = p_rel})

Definition at line 106 of file bufmgr.h.

◆ BMR_SMGR

#define BMR_SMGR (   p_smgr,
  p_relpersistence 
)    ((BufferManagerRelation){.smgr = p_smgr, .relpersistence = p_relpersistence})

Definition at line 107 of file bufmgr.h.

◆ BUFFER_LOCK_EXCLUSIVE

#define BUFFER_LOCK_EXCLUSIVE   2

Definition at line 159 of file bufmgr.h.

◆ BUFFER_LOCK_SHARE

#define BUFFER_LOCK_SHARE   1

Definition at line 158 of file bufmgr.h.

◆ BUFFER_LOCK_UNLOCK

#define BUFFER_LOCK_UNLOCK   0

Definition at line 157 of file bufmgr.h.

◆ DEFAULT_EFFECTIVE_IO_CONCURRENCY

#define DEFAULT_EFFECTIVE_IO_CONCURRENCY   0

Definition at line 130 of file bufmgr.h.

◆ DEFAULT_MAINTENANCE_IO_CONCURRENCY

#define DEFAULT_MAINTENANCE_IO_CONCURRENCY   0

Definition at line 131 of file bufmgr.h.

◆ MAX_IO_CONCURRENCY

#define MAX_IO_CONCURRENCY   1000

Definition at line 149 of file bufmgr.h.

◆ P_NEW

#define P_NEW   InvalidBlockNumber /* grow the file to get a new page */

Definition at line 152 of file bufmgr.h.

◆ RelationGetNumberOfBlocks

#define RelationGetNumberOfBlocks (   reln)     RelationGetNumberOfBlocksInFork(reln, MAIN_FORKNUM)

Definition at line 229 of file bufmgr.h.

Typedef Documentation

◆ Block

typedef void* Block

Definition at line 24 of file bufmgr.h.

◆ BufferAccessStrategyType

◆ BufferManagerRelation

◆ ExtendBufferedFlags

◆ PrefetchBufferResult

Enumeration Type Documentation

◆ BufferAccessStrategyType

Enumerator
BAS_NORMAL 
BAS_BULKREAD 
BAS_BULKWRITE 
BAS_VACUUM 

Definition at line 32 of file bufmgr.h.

33 {
34  BAS_NORMAL, /* Normal random access */
35  BAS_BULKREAD, /* Large read-only scan (hint bit updates are
36  * ok) */
37  BAS_BULKWRITE, /* Large multi-block write (e.g. COPY IN) */
38  BAS_VACUUM, /* VACUUM */
BufferAccessStrategyType
Definition: bufmgr.h:33
@ BAS_BULKREAD
Definition: bufmgr.h:35
@ BAS_NORMAL
Definition: bufmgr.h:34
@ BAS_VACUUM
Definition: bufmgr.h:38
@ BAS_BULKWRITE
Definition: bufmgr.h:37

◆ ExtendBufferedFlags

Enumerator
EB_SKIP_EXTENSION_LOCK 
EB_PERFORMING_RECOVERY 
EB_CREATE_FORK_IF_NEEDED 
EB_LOCK_FIRST 
EB_CLEAR_SIZE_CACHE 
EB_LOCK_TARGET 

Definition at line 66 of file bufmgr.h.

67 {
68  /*
69  * Don't acquire extension lock. This is safe only if the relation isn't
70  * shared, an access exclusive lock is held or if this is the startup
71  * process.
72  */
73  EB_SKIP_EXTENSION_LOCK = (1 << 0),
74 
75  /* Is this extension part of recovery? */
76  EB_PERFORMING_RECOVERY = (1 << 1),
77 
78  /*
79  * Should the fork be created if it does not currently exist? This likely
80  * only ever makes sense for relation forks.
81  */
82  EB_CREATE_FORK_IF_NEEDED = (1 << 2),
83 
84  /* Should the first (possibly only) return buffer be returned locked? */
85  EB_LOCK_FIRST = (1 << 3),
86 
87  /* Should the smgr size cache be cleared? */
88  EB_CLEAR_SIZE_CACHE = (1 << 4),
89 
90  /* internal flags follow */
91  EB_LOCK_TARGET = (1 << 5),
ExtendBufferedFlags
Definition: bufmgr.h:67
@ EB_LOCK_TARGET
Definition: bufmgr.h:91
@ EB_CLEAR_SIZE_CACHE
Definition: bufmgr.h:88
@ EB_PERFORMING_RECOVERY
Definition: bufmgr.h:76
@ EB_CREATE_FORK_IF_NEEDED
Definition: bufmgr.h:82
@ EB_SKIP_EXTENSION_LOCK
Definition: bufmgr.h:73
@ EB_LOCK_FIRST
Definition: bufmgr.h:85

◆ ReadBufferMode

Enumerator
RBM_NORMAL 
RBM_ZERO_AND_LOCK 
RBM_ZERO_AND_CLEANUP_LOCK 
RBM_ZERO_ON_ERROR 
RBM_NORMAL_NO_LOG 

Definition at line 42 of file bufmgr.h.

43 {
44  RBM_NORMAL, /* Normal read */
45  RBM_ZERO_AND_LOCK, /* Don't read from disk, caller will
46  * initialize. Also locks the page. */
47  RBM_ZERO_AND_CLEANUP_LOCK, /* Like RBM_ZERO_AND_LOCK, but locks the page
48  * in "cleanup" mode */
49  RBM_ZERO_ON_ERROR, /* Read, but return an all-zeros page on error */
50  RBM_NORMAL_NO_LOG, /* Don't log page as invalid during WAL
51  * replay; otherwise same as RBM_NORMAL */
ReadBufferMode
Definition: bufmgr.h:43
@ RBM_ZERO_ON_ERROR
Definition: bufmgr.h:49
@ RBM_ZERO_AND_CLEANUP_LOCK
Definition: bufmgr.h:47
@ RBM_ZERO_AND_LOCK
Definition: bufmgr.h:45
@ RBM_NORMAL
Definition: bufmgr.h:44
@ RBM_NORMAL_NO_LOG
Definition: bufmgr.h:50

Function Documentation

◆ AtEOXact_Buffers()

void AtEOXact_Buffers ( bool  isCommit)

Definition at line 3213 of file bufmgr.c.

3214 {
3216 
3217  AtEOXact_LocalBuffers(isCommit);
3218 
3220 }
static void CheckForBufferLeaks(void)
Definition: bufmgr.c:3273
static int32 PrivateRefCountOverflowed
Definition: bufmgr.c:199
Assert(fmt[strlen(fmt) - 1] !='\n')
void AtEOXact_LocalBuffers(bool isCommit)
Definition: localbuf.c:820

References Assert(), AtEOXact_LocalBuffers(), CheckForBufferLeaks(), and PrivateRefCountOverflowed.

Referenced by AbortTransaction(), AutoVacLauncherMain(), BackgroundWriterMain(), CheckpointerMain(), CommitTransaction(), PrepareTransaction(), and WalWriterMain().

◆ AtProcExit_LocalBuffers()

void AtProcExit_LocalBuffers ( void  )

Definition at line 831 of file localbuf.c.

832 {
833  /*
834  * We shouldn't be holding any remaining pins; if we are, and assertions
835  * aren't enabled, we'll fail later in DropRelationBuffers while trying to
836  * drop the temp rels.
837  */
839 }
static void CheckForLocalBufferLeaks(void)
Definition: localbuf.c:787

References CheckForLocalBufferLeaks().

Referenced by AtProcExit_Buffers().

◆ BgBufferSync()

bool BgBufferSync ( struct WritebackContext wb_context)

Definition at line 2842 of file bufmgr.c.

2843 {
2844  /* info obtained from freelist.c */
2845  int strategy_buf_id;
2846  uint32 strategy_passes;
2847  uint32 recent_alloc;
2848 
2849  /*
2850  * Information saved between calls so we can determine the strategy
2851  * point's advance rate and avoid scanning already-cleaned buffers.
2852  */
2853  static bool saved_info_valid = false;
2854  static int prev_strategy_buf_id;
2855  static uint32 prev_strategy_passes;
2856  static int next_to_clean;
2857  static uint32 next_passes;
2858 
2859  /* Moving averages of allocation rate and clean-buffer density */
2860  static float smoothed_alloc = 0;
2861  static float smoothed_density = 10.0;
2862 
2863  /* Potentially these could be tunables, but for now, not */
2864  float smoothing_samples = 16;
2865  float scan_whole_pool_milliseconds = 120000.0;
2866 
2867  /* Used to compute how far we scan ahead */
2868  long strategy_delta;
2869  int bufs_to_lap;
2870  int bufs_ahead;
2871  float scans_per_alloc;
2872  int reusable_buffers_est;
2873  int upcoming_alloc_est;
2874  int min_scan_buffers;
2875 
2876  /* Variables for the scanning loop proper */
2877  int num_to_scan;
2878  int num_written;
2879  int reusable_buffers;
2880 
2881  /* Variables for final smoothed_density update */
2882  long new_strategy_delta;
2883  uint32 new_recent_alloc;
2884 
2885  /*
2886  * Find out where the freelist clock sweep currently is, and how many
2887  * buffer allocations have happened since our last call.
2888  */
2889  strategy_buf_id = StrategySyncStart(&strategy_passes, &recent_alloc);
2890 
2891  /* Report buffer alloc counts to pgstat */
2892  PendingBgWriterStats.buf_alloc += recent_alloc;
2893 
2894  /*
2895  * If we're not running the LRU scan, just stop after doing the stats
2896  * stuff. We mark the saved state invalid so that we can recover sanely
2897  * if LRU scan is turned back on later.
2898  */
2899  if (bgwriter_lru_maxpages <= 0)
2900  {
2901  saved_info_valid = false;
2902  return true;
2903  }
2904 
2905  /*
2906  * Compute strategy_delta = how many buffers have been scanned by the
2907  * clock sweep since last time. If first time through, assume none. Then
2908  * see if we are still ahead of the clock sweep, and if so, how many
2909  * buffers we could scan before we'd catch up with it and "lap" it. Note:
2910  * weird-looking coding of xxx_passes comparisons are to avoid bogus
2911  * behavior when the passes counts wrap around.
2912  */
2913  if (saved_info_valid)
2914  {
2915  int32 passes_delta = strategy_passes - prev_strategy_passes;
2916 
2917  strategy_delta = strategy_buf_id - prev_strategy_buf_id;
2918  strategy_delta += (long) passes_delta * NBuffers;
2919 
2920  Assert(strategy_delta >= 0);
2921 
2922  if ((int32) (next_passes - strategy_passes) > 0)
2923  {
2924  /* we're one pass ahead of the strategy point */
2925  bufs_to_lap = strategy_buf_id - next_to_clean;
2926 #ifdef BGW_DEBUG
2927  elog(DEBUG2, "bgwriter ahead: bgw %u-%u strategy %u-%u delta=%ld lap=%d",
2928  next_passes, next_to_clean,
2929  strategy_passes, strategy_buf_id,
2930  strategy_delta, bufs_to_lap);
2931 #endif
2932  }
2933  else if (next_passes == strategy_passes &&
2934  next_to_clean >= strategy_buf_id)
2935  {
2936  /* on same pass, but ahead or at least not behind */
2937  bufs_to_lap = NBuffers - (next_to_clean - strategy_buf_id);
2938 #ifdef BGW_DEBUG
2939  elog(DEBUG2, "bgwriter ahead: bgw %u-%u strategy %u-%u delta=%ld lap=%d",
2940  next_passes, next_to_clean,
2941  strategy_passes, strategy_buf_id,
2942  strategy_delta, bufs_to_lap);
2943 #endif
2944  }
2945  else
2946  {
2947  /*
2948  * We're behind, so skip forward to the strategy point and start
2949  * cleaning from there.
2950  */
2951 #ifdef BGW_DEBUG
2952  elog(DEBUG2, "bgwriter behind: bgw %u-%u strategy %u-%u delta=%ld",
2953  next_passes, next_to_clean,
2954  strategy_passes, strategy_buf_id,
2955  strategy_delta);
2956 #endif
2957  next_to_clean = strategy_buf_id;
2958  next_passes = strategy_passes;
2959  bufs_to_lap = NBuffers;
2960  }
2961  }
2962  else
2963  {
2964  /*
2965  * Initializing at startup or after LRU scanning had been off. Always
2966  * start at the strategy point.
2967  */
2968 #ifdef BGW_DEBUG
2969  elog(DEBUG2, "bgwriter initializing: strategy %u-%u",
2970  strategy_passes, strategy_buf_id);
2971 #endif
2972  strategy_delta = 0;
2973  next_to_clean = strategy_buf_id;
2974  next_passes = strategy_passes;
2975  bufs_to_lap = NBuffers;
2976  }
2977 
2978  /* Update saved info for next time */
2979  prev_strategy_buf_id = strategy_buf_id;
2980  prev_strategy_passes = strategy_passes;
2981  saved_info_valid = true;
2982 
2983  /*
2984  * Compute how many buffers had to be scanned for each new allocation, ie,
2985  * 1/density of reusable buffers, and track a moving average of that.
2986  *
2987  * If the strategy point didn't move, we don't update the density estimate
2988  */
2989  if (strategy_delta > 0 && recent_alloc > 0)
2990  {
2991  scans_per_alloc = (float) strategy_delta / (float) recent_alloc;
2992  smoothed_density += (scans_per_alloc - smoothed_density) /
2993  smoothing_samples;
2994  }
2995 
2996  /*
2997  * Estimate how many reusable buffers there are between the current
2998  * strategy point and where we've scanned ahead to, based on the smoothed
2999  * density estimate.
3000  */
3001  bufs_ahead = NBuffers - bufs_to_lap;
3002  reusable_buffers_est = (float) bufs_ahead / smoothed_density;
3003 
3004  /*
3005  * Track a moving average of recent buffer allocations. Here, rather than
3006  * a true average we want a fast-attack, slow-decline behavior: we
3007  * immediately follow any increase.
3008  */
3009  if (smoothed_alloc <= (float) recent_alloc)
3010  smoothed_alloc = recent_alloc;
3011  else
3012  smoothed_alloc += ((float) recent_alloc - smoothed_alloc) /
3013  smoothing_samples;
3014 
3015  /* Scale the estimate by a GUC to allow more aggressive tuning. */
3016  upcoming_alloc_est = (int) (smoothed_alloc * bgwriter_lru_multiplier);
3017 
3018  /*
3019  * If recent_alloc remains at zero for many cycles, smoothed_alloc will
3020  * eventually underflow to zero, and the underflows produce annoying
3021  * kernel warnings on some platforms. Once upcoming_alloc_est has gone to
3022  * zero, there's no point in tracking smaller and smaller values of
3023  * smoothed_alloc, so just reset it to exactly zero to avoid this
3024  * syndrome. It will pop back up as soon as recent_alloc increases.
3025  */
3026  if (upcoming_alloc_est == 0)
3027  smoothed_alloc = 0;
3028 
3029  /*
3030  * Even in cases where there's been little or no buffer allocation
3031  * activity, we want to make a small amount of progress through the buffer
3032  * cache so that as many reusable buffers as possible are clean after an
3033  * idle period.
3034  *
3035  * (scan_whole_pool_milliseconds / BgWriterDelay) computes how many times
3036  * the BGW will be called during the scan_whole_pool time; slice the
3037  * buffer pool into that many sections.
3038  */
3039  min_scan_buffers = (int) (NBuffers / (scan_whole_pool_milliseconds / BgWriterDelay));
3040 
3041  if (upcoming_alloc_est < (min_scan_buffers + reusable_buffers_est))
3042  {
3043 #ifdef BGW_DEBUG
3044  elog(DEBUG2, "bgwriter: alloc_est=%d too small, using min=%d + reusable_est=%d",
3045  upcoming_alloc_est, min_scan_buffers, reusable_buffers_est);
3046 #endif
3047  upcoming_alloc_est = min_scan_buffers + reusable_buffers_est;
3048  }
3049 
3050  /*
3051  * Now write out dirty reusable buffers, working forward from the
3052  * next_to_clean point, until we have lapped the strategy scan, or cleaned
3053  * enough buffers to match our estimate of the next cycle's allocation
3054  * requirements, or hit the bgwriter_lru_maxpages limit.
3055  */
3056 
3057  num_to_scan = bufs_to_lap;
3058  num_written = 0;
3059  reusable_buffers = reusable_buffers_est;
3060 
3061  /* Execute the LRU scan */
3062  while (num_to_scan > 0 && reusable_buffers < upcoming_alloc_est)
3063  {
3064  int sync_state = SyncOneBuffer(next_to_clean, true,
3065  wb_context);
3066 
3067  if (++next_to_clean >= NBuffers)
3068  {
3069  next_to_clean = 0;
3070  next_passes++;
3071  }
3072  num_to_scan--;
3073 
3074  if (sync_state & BUF_WRITTEN)
3075  {
3076  reusable_buffers++;
3077  if (++num_written >= bgwriter_lru_maxpages)
3078  {
3080  break;
3081  }
3082  }
3083  else if (sync_state & BUF_REUSABLE)
3084  reusable_buffers++;
3085  }
3086 
3087  PendingBgWriterStats.buf_written_clean += num_written;
3088 
3089 #ifdef BGW_DEBUG
3090  elog(DEBUG1, "bgwriter: recent_alloc=%u smoothed=%.2f delta=%ld ahead=%d density=%.2f reusable_est=%d upcoming_est=%d scanned=%d wrote=%d reusable=%d",
3091  recent_alloc, smoothed_alloc, strategy_delta, bufs_ahead,
3092  smoothed_density, reusable_buffers_est, upcoming_alloc_est,
3093  bufs_to_lap - num_to_scan,
3094  num_written,
3095  reusable_buffers - reusable_buffers_est);
3096 #endif
3097 
3098  /*
3099  * Consider the above scan as being like a new allocation scan.
3100  * Characterize its density and update the smoothed one based on it. This
3101  * effectively halves the moving average period in cases where both the
3102  * strategy and the background writer are doing some useful scanning,
3103  * which is helpful because a long memory isn't as desirable on the
3104  * density estimates.
3105  */
3106  new_strategy_delta = bufs_to_lap - num_to_scan;
3107  new_recent_alloc = reusable_buffers - reusable_buffers_est;
3108  if (new_strategy_delta > 0 && new_recent_alloc > 0)
3109  {
3110  scans_per_alloc = (float) new_strategy_delta / (float) new_recent_alloc;
3111  smoothed_density += (scans_per_alloc - smoothed_density) /
3112  smoothing_samples;
3113 
3114 #ifdef BGW_DEBUG
3115  elog(DEBUG2, "bgwriter: cleaner density alloc=%u scan=%ld density=%.2f new smoothed=%.2f",
3116  new_recent_alloc, new_strategy_delta,
3117  scans_per_alloc, smoothed_density);
3118 #endif
3119  }
3120 
3121  /* Return true if OK to hibernate */
3122  return (bufs_to_lap == 0 && recent_alloc == 0);
3123 }
int BgWriterDelay
Definition: bgwriter.c:61
#define BUF_REUSABLE
Definition: bufmgr.c:73
double bgwriter_lru_multiplier
Definition: bufmgr.c:138
static int SyncOneBuffer(int buf_id, bool skip_recently_used, WritebackContext *wb_context)
Definition: bufmgr.c:3140
int bgwriter_lru_maxpages
Definition: bufmgr.c:137
#define BUF_WRITTEN
Definition: bufmgr.c:72
unsigned int uint32
Definition: c.h:495
signed int int32
Definition: c.h:483
#define DEBUG2
Definition: elog.h:29
#define DEBUG1
Definition: elog.h:30
int StrategySyncStart(uint32 *complete_passes, uint32 *num_buf_alloc)
Definition: freelist.c:394
int NBuffers
Definition: globals.c:139
PgStat_BgWriterStats PendingBgWriterStats
PgStat_Counter buf_written_clean
Definition: pgstat.h:255
PgStat_Counter maxwritten_clean
Definition: pgstat.h:256
PgStat_Counter buf_alloc
Definition: pgstat.h:257

References Assert(), bgwriter_lru_maxpages, bgwriter_lru_multiplier, BgWriterDelay, PgStat_BgWriterStats::buf_alloc, BUF_REUSABLE, BUF_WRITTEN, PgStat_BgWriterStats::buf_written_clean, DEBUG1, DEBUG2, elog(), PgStat_BgWriterStats::maxwritten_clean, NBuffers, PendingBgWriterStats, StrategySyncStart(), and SyncOneBuffer().

Referenced by BackgroundWriterMain().

◆ BufferGetBlock()

static Block BufferGetBlock ( Buffer  buffer)
inlinestatic

Definition at line 317 of file bufmgr.h.

318 {
319  Assert(BufferIsValid(buffer));
320 
321  if (BufferIsLocal(buffer))
322  return LocalBufferBlockPointers[-buffer - 1];
323  else
324  return (Block) (BufferBlocks + ((Size) (buffer - 1)) * BLCKSZ);
325 }
#define BufferIsLocal(buffer)
Definition: buf.h:37
PGDLLIMPORT Block * LocalBufferBlockPointers
Definition: localbuf.c:46
void * Block
Definition: bufmgr.h:24
PGDLLIMPORT char * BufferBlocks
Definition: buf_init.c:22
static bool BufferIsValid(Buffer bufnum)
Definition: bufmgr.h:301
size_t Size
Definition: c.h:594

References Assert(), BufferBlocks, BufferIsLocal, BufferIsValid(), and LocalBufferBlockPointers.

Referenced by BufferGetPage(), and XLogSaveBufferForHint().

◆ BufferGetBlockNumber()

BlockNumber BufferGetBlockNumber ( Buffer  buffer)

Definition at line 3378 of file bufmgr.c.

3379 {
3380  BufferDesc *bufHdr;
3381 
3382  Assert(BufferIsPinned(buffer));
3383 
3384  if (BufferIsLocal(buffer))
3385  bufHdr = GetLocalBufferDescriptor(-buffer - 1);
3386  else
3387  bufHdr = GetBufferDescriptor(buffer - 1);
3388 
3389  /* pinned, so OK to read tag without spinlock */
3390  return bufHdr->tag.blockNum;
3391 }
static BufferDesc * GetLocalBufferDescriptor(uint32 id)
static BufferDesc * GetBufferDescriptor(uint32 id)
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:463
BufferTag tag
BlockNumber blockNum
Definition: buf_internals.h:98

References Assert(), buftag::blockNum, PrivateRefCountEntry::buffer, BufferIsLocal, BufferIsPinned, GetBufferDescriptor(), GetLocalBufferDescriptor(), and BufferDesc::tag.

Referenced by _bt_binsrch_insert(), _bt_bottomupdel_pass(), _bt_check_unique(), _bt_checkpage(), _bt_delitems_delete(), _bt_delitems_vacuum(), _bt_doinsert(), _bt_endpoint(), _bt_finish_split(), _bt_first(), _bt_getroot(), _bt_insert_parent(), _bt_insertonpg(), _bt_mark_page_halfdead(), _bt_moveright(), _bt_newlevel(), _bt_pagedel(), _bt_readnextpage(), _bt_readpage(), _bt_restore_meta(), _bt_search(), _bt_simpledel_pass(), _bt_split(), _bt_unlink_halfdead_page(), _bt_walk_left(), _hash_addovflpage(), _hash_checkpage(), _hash_doinsert(), _hash_first(), _hash_freeovflpage(), _hash_getnewbuf(), _hash_readnext(), _hash_readpage(), _hash_splitbucket(), allocNewBuffer(), blinsert(), BloomInitMetapage(), brin_doinsert(), brin_doupdate(), brin_getinsertbuffer(), brin_initialize_empty_new_buffer(), brin_page_cleanup(), brin_xlog_insert_update(), brinbuild(), brinGetTupleForHeapBlock(), collectMatchBitmap(), createPostingTree(), dataBeginPlaceToPageLeaf(), dataPrepareDownlink(), doPickSplit(), entryPrepareDownlink(), fill_seq_fork_with_data(), ginEntryInsert(), ginFindParents(), ginFinishSplit(), ginPlaceToPage(), ginRedoDeleteListPages(), ginRedoUpdateMetapage(), ginScanToDelete(), gistbufferinginserttuples(), gistbuild(), gistcheckpage(), gistdeletepage(), gistformdownlink(), gistinserttuples(), gistMemorizeAllDownlinks(), gistplacetopage(), gistRelocateBuildBuffersOnSplit(), gistScanPage(), hash_xlog_add_ovfl_page(), heap_delete(), heap_hot_search_buffer(), heap_insert(), heap_multi_insert(), heap_page_is_all_visible(), heap_page_prune(), heap_prune_chain(), heap_update(), heap_xlog_confirm(), heap_xlog_lock(), index_compute_xid_horizon_for_tuples(), lazy_scan_noprune(), lazy_scan_prune(), makeSublist(), moveLeafs(), moveRightIfItNeeded(), pgstathashindex(), ReadBufferBI(), RelationAddBlocks(), RelationGetBufferForTuple(), RelationPutHeapTuple(), revmap_get_buffer(), revmap_physical_extend(), ScanSourceDatabasePgClassPage(), spgAddNodeAction(), spgbuild(), spgdoinsert(), SpGistSetLastUsedPage(), spgSplitNodeAction(), spgWalk(), startScanEntry(), terminate_brin_buildstate(), vacuumLeafPage(), visibilitymap_clear(), visibilitymap_get_status(), visibilitymap_pin(), visibilitymap_pin_ok(), and visibilitymap_set().

◆ BufferGetLSNAtomic()

XLogRecPtr BufferGetLSNAtomic ( Buffer  buffer)

Definition at line 3639 of file bufmgr.c.

3640 {
3641  BufferDesc *bufHdr = GetBufferDescriptor(buffer - 1);
3642  char *page = BufferGetPage(buffer);
3643  XLogRecPtr lsn;
3644  uint32 buf_state;
3645 
3646  /*
3647  * If we don't need locking for correctness, fastpath out.
3648  */
3649  if (!XLogHintBitIsNeeded() || BufferIsLocal(buffer))
3650  return PageGetLSN(page);
3651 
3652  /* Make sure we've got a real buffer, and that we hold a pin on it. */
3653  Assert(BufferIsValid(buffer));
3654  Assert(BufferIsPinned(buffer));
3655 
3656  buf_state = LockBufHdr(bufHdr);
3657  lsn = PageGetLSN(page);
3658  UnlockBufHdr(bufHdr, buf_state);
3659 
3660  return lsn;
3661 }
static void UnlockBufHdr(BufferDesc *desc, uint32 buf_state)
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:5391
static Page BufferGetPage(Buffer buffer)
Definition: bufmgr.h:350
static XLogRecPtr PageGetLSN(Page page)
Definition: bufpage.h:383
#define XLogHintBitIsNeeded()
Definition: xlog.h:118
uint64 XLogRecPtr
Definition: xlogdefs.h:21

References Assert(), PrivateRefCountEntry::buffer, BufferGetPage(), BufferIsLocal, BufferIsPinned, BufferIsValid(), GetBufferDescriptor(), LockBufHdr(), PageGetLSN(), UnlockBufHdr(), and XLogHintBitIsNeeded.

Referenced by _bt_killitems(), _bt_readpage(), gistdoinsert(), gistFindPath(), gistkillitems(), gistScanPage(), SetHintBits(), and XLogSaveBufferForHint().

◆ BufferGetPage()

static Page BufferGetPage ( Buffer  buffer)
inlinestatic

Definition at line 350 of file bufmgr.h.

351 {
352  return (Page) BufferGetBlock(buffer);
353 }
static Block BufferGetBlock(Buffer buffer)
Definition: bufmgr.h:317
Pointer Page
Definition: bufpage.h:78

References BufferGetBlock().

Referenced by _bt_allocbuf(), _bt_binsrch(), _bt_binsrch_insert(), _bt_bottomupdel_pass(), _bt_check_unique(), _bt_checkpage(), _bt_clear_incomplete_split(), _bt_conditionallockbuf(), _bt_dedup_pass(), _bt_delete_or_dedup_one_page(), _bt_delitems_delete(), _bt_delitems_delete_check(), _bt_delitems_vacuum(), _bt_endpoint(), _bt_findinsertloc(), _bt_finish_split(), _bt_get_endpoint(), _bt_getmeta(), _bt_getroot(), _bt_getstackbuf(), _bt_gettrueroot(), _bt_insert_parent(), _bt_insertonpg(), _bt_killitems(), _bt_leftsib_splitflag(), _bt_lock_subtree_parent(), _bt_lockbuf(), _bt_mark_page_halfdead(), _bt_moveright(), _bt_newlevel(), _bt_pagedel(), _bt_readnextpage(), _bt_readpage(), _bt_restore_meta(), _bt_rightsib_halfdeadflag(), _bt_search(), _bt_search_insert(), _bt_set_cleanup_info(), _bt_simpledel_pass(), _bt_split(), _bt_stepright(), _bt_unlink_halfdead_page(), _bt_unlockbuf(), _bt_upgradelockbufcleanup(), _bt_vacuum_needs_cleanup(), _bt_walk_left(), _hash_addovflpage(), _hash_checkpage(), _hash_doinsert(), _hash_expandtable(), _hash_finish_split(), _hash_first(), _hash_freeovflpage(), _hash_get_newblock_from_oldbucket(), _hash_get_oldblock_from_newbucket(), _hash_getbucketbuf_from_hashkey(), _hash_getcachedmetap(), _hash_getinitbuf(), _hash_getnewbuf(), _hash_init(), _hash_init_metabuffer(), _hash_initbitmapbuffer(), _hash_initbuf(), _hash_kill_items(), _hash_pgaddmultitup(), _hash_pgaddtup(), _hash_readnext(), _hash_readpage(), _hash_readprev(), _hash_splitbucket(), _hash_squeezebucket(), _hash_vacuum_one_page(), allocNewBuffer(), blgetbitmap(), blinsert(), BloomNewBuffer(), blvacuumcleanup(), brin_can_do_samepage_update(), brin_doinsert(), brin_doupdate(), brin_evacuate_page(), brin_getinsertbuffer(), brin_initialize_empty_new_buffer(), brin_page_cleanup(), brin_start_evacuating_page(), brin_xlog_createidx(), brin_xlog_desummarize_page(), brin_xlog_insert_update(), brin_xlog_revmap_extend(), brin_xlog_samepage_update(), brin_xlog_update(), brinbuild(), brinbuildempty(), brinGetStats(), brinGetTupleForHeapBlock(), brininsert(), brinRevmapDesummarizeRange(), brinRevmapInitialize(), brinSetHeapBlockItemptr(), bt_metap(), bt_page_items_internal(), bt_recheck_sibling_links(), bt_rootdescend(), btree_xlog_dedup(), btree_xlog_delete(), btree_xlog_insert(), btree_xlog_mark_page_halfdead(), btree_xlog_newroot(), btree_xlog_split(), btree_xlog_unlink_page(), btree_xlog_vacuum(), btvacuumpage(), BufferGetLSNAtomic(), collect_corrupt_items(), collect_visibility_data(), collectMatchBitmap(), collectMatchesForHeapRow(), count_nondeletable_pages(), createPostingTree(), dataBeginPlaceToPage(), dataBeginPlaceToPageInternal(), dataBeginPlaceToPageLeaf(), dataExecPlaceToPage(), dataExecPlaceToPageInternal(), dataLocateItem(), dataPlaceToPageLeafRecompress(), dataPrepareDownlink(), dataSplitPageInternal(), do_setval(), doPickSplit(), entryExecPlaceToPage(), entryIsEnoughSpace(), entryLoadMoreItems(), entryLocateEntry(), entryLocateLeafEntry(), entryPrepareDownlink(), entrySplitPage(), fill_seq_fork_with_data(), FreeSpaceMapPrepareTruncateRel(), fsm_readbuf(), fsm_search(), fsm_search_avail(), fsm_set_and_search(), fsm_vacuum_page(), generic_redo(), GenericXLogFinish(), GenericXLogRegisterBuffer(), get_raw_page_internal(), GetBTPageStatistics(), GetRecordedFreeSpace(), GetVisibilityMapPins(), ginbulkdelete(), ginDeletePage(), ginEntryInsert(), ginFindLeafPage(), ginFindParents(), ginFinishOldSplit(), ginFinishSplit(), ginGetStats(), ginHeapTupleFastInsert(), GinInitBuffer(), GinInitMetabuffer(), ginInsertCleanup(), ginInsertValue(), GinNewBuffer(), ginPlaceToPage(), ginRedoClearIncompleteSplit(), ginRedoCreatePTree(), ginRedoDeleteListPages(), ginRedoDeletePage(), ginRedoInsert(), ginRedoInsertData(), ginRedoInsertEntry(), ginRedoInsertListPage(), ginRedoUpdateMetapage(), ginRedoVacuumDataLeafPage(), ginScanToDelete(), ginStepRight(), ginTraverseLock(), ginUpdateStats(), ginvacuumcleanup(), ginVacuumEntryPage(), ginVacuumPostingTreeLeaf(), ginVacuumPostingTreeLeaves(), gistBufferingFindCorrectParent(), gistbufferinginserttuples(), gistbuild(), gistcheckpage(), gistdeletepage(), gistdoinsert(), gistFindCorrectParent(), gistFindPath(), gistfixsplit(), gistformdownlink(), gistGetMaxLevel(), GISTInitBuffer(), gistkillitems(), gistMemorizeAllDownlinks(), gistNewBuffer(), gistplacetopage(), gistProcessItup(), gistRedoClearFollowRight(), gistRedoDeleteRecord(), gistRedoPageDelete(), gistRedoPageSplitRecord(), gistRedoPageUpdateRecord(), gistScanPage(), gistvacuum_delete_empty_pages(), gistvacuumpage(), hash_bitmap_info(), hash_xlog_add_ovfl_page(), hash_xlog_delete(), hash_xlog_init_bitmap_page(), hash_xlog_init_meta_page(), hash_xlog_insert(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_split_cleanup(), hash_xlog_split_complete(), hash_xlog_squeeze_page(), hash_xlog_update_meta_page(), hash_xlog_vacuum_one_page(), hashbucketcleanup(), hashbulkdelete(), heap_abort_speculative(), heap_delete(), heap_fetch(), heap_finish_speculative(), heap_force_common(), heap_freeze_execute_prepared(), heap_get_latest_tid(), heap_hot_search_buffer(), heap_index_delete_tuples(), heap_inplace_update(), heap_insert(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_page_is_all_visible(), heap_page_prune(), heap_page_prune_execute(), heap_page_prune_opt(), heap_prune_chain(), heap_update(), heap_xlog_confirm(), heap_xlog_delete(), heap_xlog_freeze_page(), heap_xlog_inplace(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_prune(), heap_xlog_update(), heap_xlog_vacuum(), heap_xlog_visible(), heapam_index_build_range_scan(), heapam_index_validate_scan(), heapam_scan_analyze_next_tuple(), heapam_scan_bitmap_next_block(), heapam_scan_bitmap_next_tuple(), heapam_scan_sample_next_tuple(), heapgetpage(), heapgettup_continue_page(), heapgettup_pagemode(), heapgettup_start_page(), index_compute_xid_horizon_for_tuples(), initBloomState(), lazy_scan_heap(), lazy_vacuum_heap_page(), lazy_vacuum_heap_rel(), log_heap_update(), log_newpage_buffer(), log_newpage_range(), log_split_page(), MarkBufferDirtyHint(), moveLeafs(), moveRightIfItNeeded(), nextval_internal(), palloc_btree_page(), pg_visibility(), pgstat_btree_page(), pgstat_gist_page(), pgstat_hash_page(), pgstat_heap(), pgstatginindex_internal(), pgstathashindex(), pgstatindex_impl(), read_seq_tuple(), RelationAddBlocks(), RelationCopyStorageUsingBuffer(), RelationGetBufferForTuple(), RelationPutHeapTuple(), revmap_physical_extend(), scanGetCandidate(), scanPendingInsert(), scanPostingTree(), ScanSourceDatabasePgClass(), seq_redo(), shiftList(), spgAddNodeAction(), spgbuild(), spgdoinsert(), spgGetCache(), SpGistGetBuffer(), SpGistInitBuffer(), SpGistNewBuffer(), SpGistSetLastUsedPage(), SpGistUpdateMetaPage(), spgprocesspending(), spgRedoAddLeaf(), spgRedoAddNode(), spgRedoMoveLeafs(), spgRedoPickSplit(), spgRedoSplitTuple(), spgRedoVacuumLeaf(), spgRedoVacuumRedirect(), spgRedoVacuumRoot(), spgSplitNodeAction(), spgvacuumpage(), spgWalk(), startScanEntry(), statapprox_heap(), terminate_brin_buildstate(), vacuumLeafPage(), vacuumLeafRoot(), vacuumRedirectAndPlaceholder(), verify_heapam(), verifyBackupPageConsistency(), visibilitymap_clear(), visibilitymap_count(), visibilitymap_get_status(), visibilitymap_prepare_truncate(), visibilitymap_set(), vm_readbuf(), writeListPage(), XLogCheckBufferNeedsBackup(), XLogReadBufferExtended(), XLogReadBufferForRedoExtended(), XLogRecordPageWithFreeSpace(), XLogRegisterBuffer(), XLogSaveBufferForHint(), and xlogVacuumPage().

◆ BufferGetPageSize()

◆ BufferGetTag()

void BufferGetTag ( Buffer  buffer,
RelFileLocator rlocator,
ForkNumber forknum,
BlockNumber blknum 
)

Definition at line 3399 of file bufmgr.c.

3401 {
3402  BufferDesc *bufHdr;
3403 
3404  /* Do the same checks as BufferGetBlockNumber. */
3405  Assert(BufferIsPinned(buffer));
3406 
3407  if (BufferIsLocal(buffer))
3408  bufHdr = GetLocalBufferDescriptor(-buffer - 1);
3409  else
3410  bufHdr = GetBufferDescriptor(buffer - 1);
3411 
3412  /* pinned, so OK to read tag without spinlock */
3413  *rlocator = BufTagGetRelFileLocator(&bufHdr->tag);
3414  *forknum = BufTagGetForkNum(&bufHdr->tag);
3415  *blknum = bufHdr->tag.blockNum;
3416 }
static ForkNumber BufTagGetForkNum(const BufferTag *tag)
static RelFileLocator BufTagGetRelFileLocator(const BufferTag *tag)

References Assert(), buftag::blockNum, PrivateRefCountEntry::buffer, BufferIsLocal, BufferIsPinned, BufTagGetForkNum(), BufTagGetRelFileLocator(), GetBufferDescriptor(), GetLocalBufferDescriptor(), and BufferDesc::tag.

Referenced by fsm_search_avail(), ginRedoInsertEntry(), log_newpage_buffer(), ResolveCminCmaxDuringDecoding(), XLogRegisterBuffer(), and XLogSaveBufferForHint().

◆ BufferIsDirty()

bool BufferIsDirty ( Buffer  buffer)

Definition at line 2158 of file bufmgr.c.

2159 {
2160  BufferDesc *bufHdr;
2161 
2162  if (BufferIsLocal(buffer))
2163  {
2164  int bufid = -buffer - 1;
2165 
2166  bufHdr = GetLocalBufferDescriptor(bufid);
2167  }
2168  else
2169  {
2170  bufHdr = GetBufferDescriptor(buffer - 1);
2171  }
2172 
2173  Assert(BufferIsPinned(buffer));
2175  LW_EXCLUSIVE));
2176 
2177  return pg_atomic_read_u32(&bufHdr->state) & BM_DIRTY;
2178 }
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:236
#define BM_DIRTY
Definition: buf_internals.h:61
static LWLock * BufferDescriptorGetContentLock(const BufferDesc *bdesc)
bool LWLockHeldByMeInMode(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1937
@ LW_EXCLUSIVE
Definition: lwlock.h:116
pg_atomic_uint32 state

References Assert(), BM_DIRTY, PrivateRefCountEntry::buffer, BufferDescriptorGetContentLock(), BufferIsLocal, BufferIsPinned, GetBufferDescriptor(), GetLocalBufferDescriptor(), LW_EXCLUSIVE, LWLockHeldByMeInMode(), pg_atomic_read_u32(), and BufferDesc::state.

Referenced by XLogRegisterBuffer().

◆ BufferIsExclusiveLocked()

bool BufferIsExclusiveLocked ( Buffer  buffer)

Definition at line 2129 of file bufmgr.c.

2130 {
2131  BufferDesc *bufHdr;
2132 
2133  if (BufferIsLocal(buffer))
2134  {
2135  int bufid = -buffer - 1;
2136 
2137  bufHdr = GetLocalBufferDescriptor(bufid);
2138  }
2139  else
2140  {
2141  bufHdr = GetBufferDescriptor(buffer - 1);
2142  }
2143 
2144  Assert(BufferIsPinned(buffer));
2146  LW_EXCLUSIVE);
2147 }

References Assert(), PrivateRefCountEntry::buffer, BufferDescriptorGetContentLock(), BufferIsLocal, BufferIsPinned, GetBufferDescriptor(), GetLocalBufferDescriptor(), LW_EXCLUSIVE, and LWLockHeldByMeInMode().

Referenced by XLogRegisterBuffer().

◆ BufferIsPermanent()

bool BufferIsPermanent ( Buffer  buffer)

Definition at line 3609 of file bufmgr.c.

3610 {
3611  BufferDesc *bufHdr;
3612 
3613  /* Local buffers are used only for temp relations. */
3614  if (BufferIsLocal(buffer))
3615  return false;
3616 
3617  /* Make sure we've got a real buffer, and that we hold a pin on it. */
3618  Assert(BufferIsValid(buffer));
3619  Assert(BufferIsPinned(buffer));
3620 
3621  /*
3622  * BM_PERMANENT can't be changed while we hold a pin on the buffer, so we
3623  * need not bother with the buffer header spinlock. Even if someone else
3624  * changes the buffer header state while we're doing this, the state is
3625  * changed atomically, so we'll read the old value or the new value, but
3626  * not random garbage.
3627  */
3628  bufHdr = GetBufferDescriptor(buffer - 1);
3629  return (pg_atomic_read_u32(&bufHdr->state) & BM_PERMANENT) != 0;
3630 }
#define BM_PERMANENT
Definition: buf_internals.h:69

References Assert(), BM_PERMANENT, PrivateRefCountEntry::buffer, BufferIsLocal, BufferIsPinned, BufferIsValid(), GetBufferDescriptor(), pg_atomic_read_u32(), and BufferDesc::state.

Referenced by SetHintBits().

◆ BufferIsValid()

static bool BufferIsValid ( Buffer  bufnum)
inlinestatic

Definition at line 301 of file bufmgr.h.

302 {
303  Assert(bufnum <= NBuffers);
304  Assert(bufnum >= -NLocBuffer);
305 
306  return bufnum != InvalidBuffer;
307 }
#define InvalidBuffer
Definition: buf.h:25
PGDLLIMPORT int NBuffers
Definition: globals.c:139
PGDLLIMPORT int NLocBuffer
Definition: localbuf.c:43

References Assert(), InvalidBuffer, NBuffers, and NLocBuffer.

Referenced by _bt_clear_incomplete_split(), _bt_endpoint(), _bt_first(), _bt_get_endpoint(), _bt_insertonpg(), _bt_readpage(), _bt_relandgetbuf(), _bt_search(), _bt_unlink_halfdead_page(), _hash_addovflpage(), _hash_dropscanbuf(), _hash_freeovflpage(), _hash_getbucketbuf_from_hashkey(), _hash_getcachedmetap(), _hash_readnext(), _hash_readpage(), _hash_readprev(), autoprewarm_database_main(), brin_doinsert(), brin_doupdate(), brin_getinsertbuffer(), brin_xlog_createidx(), brin_xlog_desummarize_page(), brin_xlog_insert_update(), brin_xlog_revmap_extend(), brin_xlog_samepage_update(), brin_xlog_update(), brinGetTupleForHeapBlock(), brininsert(), brinsummarize(), bt_recheck_sibling_links(), bt_rootdescend(), btree_xlog_dedup(), btree_xlog_delete(), btree_xlog_insert(), btree_xlog_mark_page_halfdead(), btree_xlog_split(), btree_xlog_unlink_page(), btree_xlog_vacuum(), BufferGetBlock(), BufferGetLSNAtomic(), BufferGetPageSize(), BufferIsPermanent(), ConditionalLockBufferForCleanup(), DebugPrintBufferRefcount(), doPickSplit(), entryGetItem(), entryLoadMoreItems(), ExecStoreBufferHeapTuple(), ExecStorePinnedBufferHeapTuple(), FreeSpaceMapPrepareTruncateRel(), fsm_search(), fsm_vacuum_page(), generic_redo(), GetPrivateRefCount(), GetPrivateRefCountEntry(), GetRecordedFreeSpace(), GetVisibilityMapPins(), ginPlaceToPage(), ginRedoClearIncompleteSplit(), ginRedoDeletePage(), ginRedoInsert(), ginRedoUpdateMetapage(), ginRedoVacuumDataLeafPage(), ginScanToDelete(), gistinserttuples(), gistkillitems(), gistplacetopage(), gistRedoClearFollowRight(), gistRedoDeleteRecord(), gistRedoPageDelete(), gistRedoPageUpdateRecord(), gistXLogSplit(), gistXLogUpdate(), hash_xlog_add_ovfl_page(), hash_xlog_delete(), hash_xlog_init_bitmap_page(), hash_xlog_init_meta_page(), hash_xlog_insert(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_split_cleanup(), hash_xlog_split_complete(), hash_xlog_squeeze_page(), hash_xlog_update_meta_page(), hash_xlog_vacuum_one_page(), heap_endscan(), heap_index_delete_tuples(), heap_lock_tuple(), heap_rescan(), heap_update(), heap_xlog_confirm(), heap_xlog_delete(), heap_xlog_freeze_page(), heap_xlog_inplace(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_prune(), heap_xlog_update(), heap_xlog_vacuum(), heap_xlog_visible(), heapam_index_fetch_reset(), heapam_scan_sample_next_block(), heapam_tuple_lock(), heapam_tuple_satisfies_snapshot(), heapgetpage(), heapgettup(), heapgettup_continue_page(), heapgettup_pagemode(), heapgettup_start_page(), IsBufferCleanupOK(), lazy_scan_heap(), lazy_vacuum_heap_rel(), log_heap_visible(), MarkBufferDirty(), MarkBufferDirtyHint(), ReadRecentBuffer(), ReleaseAndReadBuffer(), ReleaseBuffer(), ResOwnerReleaseBufferPin(), spgRedoAddLeaf(), spgRedoAddNode(), spgRedoMoveLeafs(), spgRedoPickSplit(), spgRedoSplitTuple(), spgRedoVacuumLeaf(), spgRedoVacuumRedirect(), spgRedoVacuumRoot(), statapprox_heap(), tts_buffer_heap_clear(), tts_buffer_heap_copyslot(), tts_buffer_heap_materialize(), tts_buffer_heap_store_tuple(), verifyBackupPageConsistency(), visibilitymap_clear(), visibilitymap_count(), visibilitymap_get_status(), visibilitymap_pin(), visibilitymap_pin_ok(), visibilitymap_prepare_truncate(), visibilitymap_set(), XLogPrefetcherNextBlock(), XLogReadBufferExtended(), and XLogReadBufferForRedoExtended().

◆ BufferShmemSize()

Size BufferShmemSize ( void  )

Definition at line 160 of file buf_init.c.

161 {
162  Size size = 0;
163 
164  /* size of buffer descriptors */
165  size = add_size(size, mul_size(NBuffers, sizeof(BufferDescPadded)));
166  /* to allow aligning buffer descriptors */
167  size = add_size(size, PG_CACHE_LINE_SIZE);
168 
169  /* size of data pages, plus alignment padding */
170  size = add_size(size, PG_IO_ALIGN_SIZE);
171  size = add_size(size, mul_size(NBuffers, BLCKSZ));
172 
173  /* size of stuff controlled by freelist.c */
174  size = add_size(size, StrategyShmemSize());
175 
176  /* size of I/O condition variables */
177  size = add_size(size, mul_size(NBuffers,
179  /* to allow aligning the above */
180  size = add_size(size, PG_CACHE_LINE_SIZE);
181 
182  /* size of checkpoint sort array in bufmgr.c */
183  size = add_size(size, mul_size(NBuffers, sizeof(CkptSortItem)));
184 
185  return size;
186 }
Size StrategyShmemSize(void)
Definition: freelist.c:453
#define PG_IO_ALIGN_SIZE
#define PG_CACHE_LINE_SIZE
Size add_size(Size s1, Size s2)
Definition: shmem.c:494
Size mul_size(Size s1, Size s2)
Definition: shmem.c:511

References add_size(), mul_size(), NBuffers, PG_CACHE_LINE_SIZE, PG_IO_ALIGN_SIZE, and StrategyShmemSize().

Referenced by CalculateShmemSize().

◆ CheckBufferIsPinnedOnce()

void CheckBufferIsPinnedOnce ( Buffer  buffer)

Definition at line 4843 of file bufmgr.c.

4844 {
4845  if (BufferIsLocal(buffer))
4846  {
4847  if (LocalRefCount[-buffer - 1] != 1)
4848  elog(ERROR, "incorrect local pin count: %d",
4849  LocalRefCount[-buffer - 1]);
4850  }
4851  else
4852  {
4853  if (GetPrivateRefCount(buffer) != 1)
4854  elog(ERROR, "incorrect local pin count: %d",
4855  GetPrivateRefCount(buffer));
4856  }
4857 }
static int32 GetPrivateRefCount(Buffer buffer)
Definition: bufmgr.c:405
#define ERROR
Definition: elog.h:39
int32 * LocalRefCount
Definition: localbuf.c:47

References PrivateRefCountEntry::buffer, BufferIsLocal, elog(), ERROR, GetPrivateRefCount(), and LocalRefCount.

Referenced by GetVictimBuffer(), and LockBufferForCleanup().

◆ CheckPointBuffers()

void CheckPointBuffers ( int  flags)

Definition at line 3364 of file bufmgr.c.

3365 {
3366  BufferSync(flags);
3367 }
static void BufferSync(int flags)
Definition: bufmgr.c:2566

References BufferSync().

Referenced by CheckPointGuts().

◆ ConditionalLockBuffer()

bool ConditionalLockBuffer ( Buffer  buffer)

Definition at line 4822 of file bufmgr.c.

4823 {
4824  BufferDesc *buf;
4825 
4826  Assert(BufferIsPinned(buffer));
4827  if (BufferIsLocal(buffer))
4828  return true; /* act as though we got it */
4829 
4830  buf = GetBufferDescriptor(buffer - 1);
4831 
4833  LW_EXCLUSIVE);
4834 }
bool LWLockConditionalAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1339
static char * buf
Definition: pg_test_fsync.c:73

References Assert(), buf, PrivateRefCountEntry::buffer, BufferDescriptorGetContentLock(), BufferIsLocal, BufferIsPinned, GetBufferDescriptor(), LW_EXCLUSIVE, and LWLockConditionalAcquire().

Referenced by _bt_conditionallockbuf(), BloomNewBuffer(), ConditionalLockBufferForCleanup(), GinNewBuffer(), gistNewBuffer(), RelationGetBufferForTuple(), spgdoinsert(), SpGistGetBuffer(), SpGistNewBuffer(), and SpGistUpdateMetaPage().

◆ ConditionalLockBufferForCleanup()

bool ConditionalLockBufferForCleanup ( Buffer  buffer)

Definition at line 5037 of file bufmgr.c.

5038 {
5039  BufferDesc *bufHdr;
5040  uint32 buf_state,
5041  refcount;
5042 
5043  Assert(BufferIsValid(buffer));
5044 
5045  if (BufferIsLocal(buffer))
5046  {
5047  refcount = LocalRefCount[-buffer - 1];
5048  /* There should be exactly one pin */
5049  Assert(refcount > 0);
5050  if (refcount != 1)
5051  return false;
5052  /* Nobody else to wait for */
5053  return true;
5054  }
5055 
5056  /* There should be exactly one local pin */
5057  refcount = GetPrivateRefCount(buffer);
5058  Assert(refcount);
5059  if (refcount != 1)
5060  return false;
5061 
5062  /* Try to acquire lock */
5063  if (!ConditionalLockBuffer(buffer))
5064  return false;
5065 
5066  bufHdr = GetBufferDescriptor(buffer - 1);
5067  buf_state = LockBufHdr(bufHdr);
5068  refcount = BUF_STATE_GET_REFCOUNT(buf_state);
5069 
5070  Assert(refcount > 0);
5071  if (refcount == 1)
5072  {
5073  /* Successfully acquired exclusive lock with pincount 1 */
5074  UnlockBufHdr(bufHdr, buf_state);
5075  return true;
5076  }
5077 
5078  /* Failed, so release the lock */
5079  UnlockBufHdr(bufHdr, buf_state);
5080  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
5081  return false;
5082 }
#define BUF_STATE_GET_REFCOUNT(state)
Definition: buf_internals.h:51
bool ConditionalLockBuffer(Buffer buffer)
Definition: bufmgr.c:4822
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:4796
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:157

References Assert(), BUF_STATE_GET_REFCOUNT, PrivateRefCountEntry::buffer, BUFFER_LOCK_UNLOCK, BufferIsLocal, BufferIsValid(), ConditionalLockBuffer(), GetBufferDescriptor(), GetPrivateRefCount(), LocalRefCount, LockBuffer(), LockBufHdr(), PrivateRefCountEntry::refcount, and UnlockBufHdr().

Referenced by _hash_finish_split(), _hash_getbuf_with_condlock_cleanup(), heap_page_prune_opt(), and lazy_scan_heap().

◆ CreateAndCopyRelationData()

void CreateAndCopyRelationData ( RelFileLocator  src_rlocator,
RelFileLocator  dst_rlocator,
bool  permanent 
)

Definition at line 4435 of file bufmgr.c.

4437 {
4438  char relpersistence;
4439  SMgrRelation src_rel;
4440  SMgrRelation dst_rel;
4441 
4442  /* Set the relpersistence. */
4443  relpersistence = permanent ?
4444  RELPERSISTENCE_PERMANENT : RELPERSISTENCE_UNLOGGED;
4445 
4446  src_rel = smgropen(src_rlocator, InvalidBackendId);
4447  dst_rel = smgropen(dst_rlocator, InvalidBackendId);
4448 
4449  /*
4450  * Create and copy all forks of the relation. During create database we
4451  * have a separate cleanup mechanism which deletes complete database
4452  * directory. Therefore, each individual relation doesn't need to be
4453  * registered for cleanup.
4454  */
4455  RelationCreateStorage(dst_rlocator, relpersistence, false);
4456 
4457  /* copy main fork. */
4458  RelationCopyStorageUsingBuffer(src_rlocator, dst_rlocator, MAIN_FORKNUM,
4459  permanent);
4460 
4461  /* copy those extra forks that exist */
4462  for (ForkNumber forkNum = MAIN_FORKNUM + 1;
4463  forkNum <= MAX_FORKNUM; forkNum++)
4464  {
4465  if (smgrexists(src_rel, forkNum))
4466  {
4467  smgrcreate(dst_rel, forkNum, false);
4468 
4469  /*
4470  * WAL log creation if the relation is persistent, or this is the
4471  * init fork of an unlogged relation.
4472  */
4473  if (permanent || forkNum == INIT_FORKNUM)
4474  log_smgrcreate(&dst_rlocator, forkNum);
4475 
4476  /* Copy a fork's data, block by block. */
4477  RelationCopyStorageUsingBuffer(src_rlocator, dst_rlocator, forkNum,
4478  permanent);
4479  }
4480  }
4481 }
#define InvalidBackendId
Definition: backendid.h:23
static void RelationCopyStorageUsingBuffer(RelFileLocator srclocator, RelFileLocator dstlocator, ForkNumber forkNum, bool permanent)
Definition: bufmgr.c:4344
ForkNumber
Definition: relpath.h:48
@ MAIN_FORKNUM
Definition: relpath.h:50
@ INIT_FORKNUM
Definition: relpath.h:53
#define MAX_FORKNUM
Definition: relpath.h:62
void smgrcreate(SMgrRelation reln, ForkNumber forknum, bool isRedo)
Definition: smgr.c:414
SMgrRelation smgropen(RelFileLocator rlocator, BackendId backend)
Definition: smgr.c:199
bool smgrexists(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:401
SMgrRelation RelationCreateStorage(RelFileLocator rlocator, char relpersistence, bool register_delete)
Definition: storage.c:121
void log_smgrcreate(const RelFileLocator *rlocator, ForkNumber forkNum)
Definition: storage.c:186

References INIT_FORKNUM, InvalidBackendId, log_smgrcreate(), MAIN_FORKNUM, MAX_FORKNUM, RelationCopyStorageUsingBuffer(), RelationCreateStorage(), smgrcreate(), smgrexists(), and smgropen().

Referenced by CreateDatabaseUsingWalLog().

◆ DebugPrintBufferRefcount()

char* DebugPrintBufferRefcount ( Buffer  buffer)

Definition at line 3319 of file bufmgr.c.

3320 {
3321  BufferDesc *buf;
3322  int32 loccount;
3323  char *path;
3324  char *result;
3325  BackendId backend;
3326  uint32 buf_state;
3327 
3328  Assert(BufferIsValid(buffer));
3329  if (BufferIsLocal(buffer))
3330  {
3331  buf = GetLocalBufferDescriptor(-buffer - 1);
3332  loccount = LocalRefCount[-buffer - 1];
3333  backend = MyBackendId;
3334  }
3335  else
3336  {
3337  buf = GetBufferDescriptor(buffer - 1);
3338  loccount = GetPrivateRefCount(buffer);
3339  backend = InvalidBackendId;
3340  }
3341 
3342  /* theoretically we should lock the bufhdr here */
3343  path = relpathbackend(BufTagGetRelFileLocator(&buf->tag), backend,
3344  BufTagGetForkNum(&buf->tag));
3345  buf_state = pg_atomic_read_u32(&buf->state);
3346 
3347  result = psprintf("[%03d] (rel=%s, blockNum=%u, flags=0x%x, refcount=%u %d)",
3348  buffer, path,
3349  buf->tag.blockNum, buf_state & BUF_FLAG_MASK,
3350  BUF_STATE_GET_REFCOUNT(buf_state), loccount);
3351  pfree(path);
3352  return result;
3353 }
int BackendId
Definition: backendid.h:21
#define BUF_FLAG_MASK
Definition: buf_internals.h:48
BackendId MyBackendId
Definition: globals.c:86
void pfree(void *pointer)
Definition: mcxt.c:1431
char * psprintf(const char *fmt,...)
Definition: psprintf.c:46
#define relpathbackend(rlocator, backend, forknum)
Definition: relpath.h:85

References Assert(), buf, BUF_FLAG_MASK, BUF_STATE_GET_REFCOUNT, PrivateRefCountEntry::buffer, BufferIsLocal, BufferIsValid(), BufTagGetForkNum(), BufTagGetRelFileLocator(), GetBufferDescriptor(), GetLocalBufferDescriptor(), GetPrivateRefCount(), InvalidBackendId, LocalRefCount, MyBackendId, pfree(), pg_atomic_read_u32(), psprintf(), and relpathbackend.

Referenced by CheckForBufferLeaks(), CheckForLocalBufferLeaks(), and ResOwnerPrintBufferPin().

◆ DropDatabaseBuffers()

void DropDatabaseBuffers ( Oid  dbid)

Definition at line 4040 of file bufmgr.c.

4041 {
4042  int i;
4043 
4044  /*
4045  * We needn't consider local buffers, since by assumption the target
4046  * database isn't our own.
4047  */
4048 
4049  for (i = 0; i < NBuffers; i++)
4050  {
4051  BufferDesc *bufHdr = GetBufferDescriptor(i);
4052  uint32 buf_state;
4053 
4054  /*
4055  * As in DropRelationBuffers, an unlocked precheck should be safe and
4056  * saves some cycles.
4057  */
4058  if (bufHdr->tag.dbOid != dbid)
4059  continue;
4060 
4061  buf_state = LockBufHdr(bufHdr);
4062  if (bufHdr->tag.dbOid == dbid)
4063  InvalidateBuffer(bufHdr); /* releases spinlock */
4064  else
4065  UnlockBufHdr(bufHdr, buf_state);
4066  }
4067 }
static void InvalidateBuffer(BufferDesc *buf)
Definition: bufmgr.c:1442
int i
Definition: isn.c:73
Oid dbOid
Definition: buf_internals.h:95

References buftag::dbOid, GetBufferDescriptor(), i, InvalidateBuffer(), LockBufHdr(), NBuffers, BufferDesc::tag, and UnlockBufHdr().

Referenced by createdb_failure_callback(), dbase_redo(), dropdb(), and movedb().

◆ DropRelationBuffers()

void DropRelationBuffers ( struct SMgrRelationData smgr_reln,
ForkNumber forkNum,
int  nforks,
BlockNumber firstDelBlock 
)

◆ DropRelationsAllBuffers()

void DropRelationsAllBuffers ( struct SMgrRelationData **  smgr_reln,
int  nlocators 
)

◆ ExtendBufferedRel()

Buffer ExtendBufferedRel ( BufferManagerRelation  bmr,
ForkNumber  forkNum,
BufferAccessStrategy  strategy,
uint32  flags 
)

Definition at line 839 of file bufmgr.c.

843 {
844  Buffer buf;
845  uint32 extend_by = 1;
846 
847  ExtendBufferedRelBy(bmr, forkNum, strategy, flags, extend_by,
848  &buf, &extend_by);
849 
850  return buf;
851 }
int Buffer
Definition: buf.h:23
BlockNumber ExtendBufferedRelBy(BufferManagerRelation bmr, ForkNumber fork, BufferAccessStrategy strategy, uint32 flags, uint32 extend_by, Buffer *buffers, uint32 *extended_by)
Definition: bufmgr.c:871

References buf, and ExtendBufferedRelBy().

Referenced by _bt_allocbuf(), _hash_getnewbuf(), BloomNewBuffer(), brinbuild(), brinbuildempty(), fill_seq_fork_with_data(), ginbuildempty(), GinNewBuffer(), gistbuildempty(), gistNewBuffer(), ReadBuffer_common(), revmap_physical_extend(), and SpGistNewBuffer().

◆ ExtendBufferedRelBy()

BlockNumber ExtendBufferedRelBy ( BufferManagerRelation  bmr,
ForkNumber  fork,
BufferAccessStrategy  strategy,
uint32  flags,
uint32  extend_by,
Buffer buffers,
uint32 extended_by 
)

Definition at line 871 of file bufmgr.c.

878 {
879  Assert((bmr.rel != NULL) != (bmr.smgr != NULL));
880  Assert(bmr.smgr == NULL || bmr.relpersistence != 0);
881  Assert(extend_by > 0);
882 
883  if (bmr.smgr == NULL)
884  {
885  bmr.smgr = RelationGetSmgr(bmr.rel);
886  bmr.relpersistence = bmr.rel->rd_rel->relpersistence;
887  }
888 
889  return ExtendBufferedRelCommon(bmr, fork, strategy, flags,
890  extend_by, InvalidBlockNumber,
891  buffers, extended_by);
892 }
#define InvalidBlockNumber
Definition: block.h:33
static BlockNumber ExtendBufferedRelCommon(BufferManagerRelation bmr, ForkNumber fork, BufferAccessStrategy strategy, uint32 flags, uint32 extend_by, BlockNumber extend_upto, Buffer *buffers, uint32 *extended_by)
Definition: bufmgr.c:1805
static SMgrRelation RelationGetSmgr(Relation rel)
Definition: rel.h:566
struct SMgrRelationData * smgr
Definition: bufmgr.h:102
Form_pg_class rd_rel
Definition: rel.h:111

References Assert(), ExtendBufferedRelCommon(), InvalidBlockNumber, RelationData::rd_rel, BufferManagerRelation::rel, RelationGetSmgr(), BufferManagerRelation::relpersistence, and BufferManagerRelation::smgr.

Referenced by ExtendBufferedRel(), and RelationAddBlocks().

◆ ExtendBufferedRelTo()

Buffer ExtendBufferedRelTo ( BufferManagerRelation  bmr,
ForkNumber  fork,
BufferAccessStrategy  strategy,
uint32  flags,
BlockNumber  extend_to,
ReadBufferMode  mode 
)

Definition at line 903 of file bufmgr.c.

909 {
911  uint32 extended_by = 0;
912  Buffer buffer = InvalidBuffer;
913  Buffer buffers[64];
914 
915  Assert((bmr.rel != NULL) != (bmr.smgr != NULL));
916  Assert(bmr.smgr == NULL || bmr.relpersistence != 0);
917  Assert(extend_to != InvalidBlockNumber && extend_to > 0);
918 
919  if (bmr.smgr == NULL)
920  {
921  bmr.smgr = RelationGetSmgr(bmr.rel);
922  bmr.relpersistence = bmr.rel->rd_rel->relpersistence;
923  }
924 
925  /*
926  * If desired, create the file if it doesn't exist. If
927  * smgr_cached_nblocks[fork] is positive then it must exist, no need for
928  * an smgrexists call.
929  */
930  if ((flags & EB_CREATE_FORK_IF_NEEDED) &&
931  (bmr.smgr->smgr_cached_nblocks[fork] == 0 ||
933  !smgrexists(bmr.smgr, fork))
934  {
936 
937  /* recheck, fork might have been created concurrently */
938  if (!smgrexists(bmr.smgr, fork))
939  smgrcreate(bmr.smgr, fork, flags & EB_PERFORMING_RECOVERY);
940 
942  }
943 
944  /*
945  * If requested, invalidate size cache, so that smgrnblocks asks the
946  * kernel.
947  */
948  if (flags & EB_CLEAR_SIZE_CACHE)
950 
951  /*
952  * Estimate how many pages we'll need to extend by. This avoids acquiring
953  * unnecessarily many victim buffers.
954  */
955  current_size = smgrnblocks(bmr.smgr, fork);
956 
957  /*
958  * Since no-one else can be looking at the page contents yet, there is no
959  * difference between an exclusive lock and a cleanup-strength lock. Note
960  * that we pass the original mode to ReadBuffer_common() below, when
961  * falling back to reading the buffer to a concurrent relation extension.
962  */
964  flags |= EB_LOCK_TARGET;
965 
966  while (current_size < extend_to)
967  {
968  uint32 num_pages = lengthof(buffers);
969  BlockNumber first_block;
970 
971  if ((uint64) current_size + num_pages > extend_to)
972  num_pages = extend_to - current_size;
973 
974  first_block = ExtendBufferedRelCommon(bmr, fork, strategy, flags,
975  num_pages, extend_to,
976  buffers, &extended_by);
977 
978  current_size = first_block + extended_by;
979  Assert(num_pages != 0 || current_size >= extend_to);
980 
981  for (uint32 i = 0; i < extended_by; i++)
982  {
983  if (first_block + i != extend_to - 1)
984  ReleaseBuffer(buffers[i]);
985  else
986  buffer = buffers[i];
987  }
988  }
989 
990  /*
991  * It's possible that another backend concurrently extended the relation.
992  * In that case read the buffer.
993  *
994  * XXX: Should we control this via a flag?
995  */
996  if (buffer == InvalidBuffer)
997  {
998  bool hit;
999 
1000  Assert(extended_by == 0);
1001  buffer = ReadBuffer_common(bmr.smgr, bmr.relpersistence,
1002  fork, extend_to - 1, mode, strategy,
1003  &hit);
1004  }
1005 
1006  return buffer;
1007 }
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:4561
static Buffer ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy, bool *hit)
Definition: bufmgr.c:1015
#define lengthof(array)
Definition: c.h:777
void LockRelationForExtension(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:431
void UnlockRelationForExtension(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:481
#define ExclusiveLock
Definition: lockdefs.h:42
static PgChecksumMode mode
Definition: pg_checksums.c:56
int64 current_size
Definition: pg_checksums.c:64
BlockNumber smgrnblocks(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:658
BlockNumber smgr_cached_nblocks[MAX_FORKNUM+1]
Definition: smgr.h:46

References Assert(), PrivateRefCountEntry::buffer, current_size, EB_CLEAR_SIZE_CACHE, EB_CREATE_FORK_IF_NEEDED, EB_LOCK_TARGET, EB_PERFORMING_RECOVERY, ExclusiveLock, ExtendBufferedRelCommon(), i, InvalidBlockNumber, InvalidBuffer, lengthof, LockRelationForExtension(), mode, RBM_ZERO_AND_CLEANUP_LOCK, RBM_ZERO_AND_LOCK, RelationData::rd_rel, ReadBuffer_common(), BufferManagerRelation::rel, RelationGetSmgr(), ReleaseBuffer(), BufferManagerRelation::relpersistence, BufferManagerRelation::smgr, SMgrRelationData::smgr_cached_nblocks, smgrcreate(), smgrexists(), smgrnblocks(), and UnlockRelationForExtension().

Referenced by fsm_extend(), vm_extend(), and XLogReadBufferExtended().

◆ FlushDatabaseBuffers()

void FlushDatabaseBuffers ( Oid  dbid)

Definition at line 4499 of file bufmgr.c.

4500 {
4501  int i;
4502  BufferDesc *bufHdr;
4503 
4504  for (i = 0; i < NBuffers; i++)
4505  {
4506  uint32 buf_state;
4507 
4508  bufHdr = GetBufferDescriptor(i);
4509 
4510  /*
4511  * As in DropRelationBuffers, an unlocked precheck should be safe and
4512  * saves some cycles.
4513  */
4514  if (bufHdr->tag.dbOid != dbid)
4515  continue;
4516 
4517  /* Make sure we can handle the pin */
4520 
4521  buf_state = LockBufHdr(bufHdr);
4522  if (bufHdr->tag.dbOid == dbid &&
4523  (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
4524  {
4525  PinBuffer_Locked(bufHdr);
4529  UnpinBuffer(bufHdr);
4530  }
4531  else
4532  UnlockBufHdr(bufHdr, buf_state);
4533  }
4534 }
#define BM_VALID
Definition: buf_internals.h:62
static void FlushBuffer(BufferDesc *buf, SMgrRelation reln, IOObject io_object, IOContext io_context)
Definition: bufmgr.c:3438
static void PinBuffer_Locked(BufferDesc *buf)
Definition: bufmgr.c:2417
static void ReservePrivateRefCountEntry(void)
Definition: bufmgr.c:239
static void UnpinBuffer(BufferDesc *buf)
Definition: bufmgr.c:2460
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1168
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1781
@ LW_SHARED
Definition: lwlock.h:117
@ IOOBJECT_RELATION
Definition: pgstat.h:280
@ IOCONTEXT_NORMAL
Definition: pgstat.h:290
ResourceOwner CurrentResourceOwner
Definition: resowner.c:165
void ResourceOwnerEnlarge(ResourceOwner owner)
Definition: resowner.c:442

References BM_DIRTY, BM_VALID, BufferDescriptorGetContentLock(), CurrentResourceOwner, buftag::dbOid, FlushBuffer(), GetBufferDescriptor(), i, IOCONTEXT_NORMAL, IOOBJECT_RELATION, LockBufHdr(), LW_SHARED, LWLockAcquire(), LWLockRelease(), NBuffers, PinBuffer_Locked(), ReservePrivateRefCountEntry(), ResourceOwnerEnlarge(), BufferDesc::tag, UnlockBufHdr(), and UnpinBuffer().

Referenced by dbase_redo().

◆ FlushOneBuffer()

void FlushOneBuffer ( Buffer  buffer)

Definition at line 4541 of file bufmgr.c.

4542 {
4543  BufferDesc *bufHdr;
4544 
4545  /* currently not needed, but no fundamental reason not to support */
4546  Assert(!BufferIsLocal(buffer));
4547 
4548  Assert(BufferIsPinned(buffer));
4549 
4550  bufHdr = GetBufferDescriptor(buffer - 1);
4551 
4553 
4555 }
bool LWLockHeldByMe(LWLock *lock)
Definition: lwlock.c:1893

References Assert(), PrivateRefCountEntry::buffer, BufferDescriptorGetContentLock(), BufferIsLocal, BufferIsPinned, FlushBuffer(), GetBufferDescriptor(), IOCONTEXT_NORMAL, IOOBJECT_RELATION, and LWLockHeldByMe().

Referenced by hash_xlog_init_bitmap_page(), hash_xlog_init_meta_page(), and XLogReadBufferForRedoExtended().

◆ FlushRelationBuffers()

void FlushRelationBuffers ( Relation  rel)

Definition at line 4146 of file bufmgr.c.

4147 {
4148  int i;
4149  BufferDesc *bufHdr;
4150  SMgrRelation srel = RelationGetSmgr(rel);
4151 
4152  if (RelationUsesLocalBuffers(rel))
4153  {
4154  for (i = 0; i < NLocBuffer; i++)
4155  {
4156  uint32 buf_state;
4157  instr_time io_start;
4158 
4159  bufHdr = GetLocalBufferDescriptor(i);
4160  if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator) &&
4161  ((buf_state = pg_atomic_read_u32(&bufHdr->state)) &
4162  (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
4163  {
4164  ErrorContextCallback errcallback;
4165  Page localpage;
4166 
4167  localpage = (char *) LocalBufHdrGetBlock(bufHdr);
4168 
4169  /* Setup error traceback support for ereport() */
4171  errcallback.arg = (void *) bufHdr;
4172  errcallback.previous = error_context_stack;
4173  error_context_stack = &errcallback;
4174 
4175  PageSetChecksumInplace(localpage, bufHdr->tag.blockNum);
4176 
4178 
4179  smgrwrite(srel,
4180  BufTagGetForkNum(&bufHdr->tag),
4181  bufHdr->tag.blockNum,
4182  localpage,
4183  false);
4184 
4187  io_start, 1);
4188 
4189  buf_state &= ~(BM_DIRTY | BM_JUST_DIRTIED);
4190  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
4191 
4193 
4194  /* Pop the error context stack */
4195  error_context_stack = errcallback.previous;
4196  }
4197  }
4198 
4199  return;
4200  }
4201 
4202  for (i = 0; i < NBuffers; i++)
4203  {
4204  uint32 buf_state;
4205 
4206  bufHdr = GetBufferDescriptor(i);
4207 
4208  /*
4209  * As in DropRelationBuffers, an unlocked precheck should be safe and
4210  * saves some cycles.
4211  */
4212  if (!BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator))
4213  continue;
4214 
4215  /* Make sure we can handle the pin */
4218 
4219  buf_state = LockBufHdr(bufHdr);
4220  if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator) &&
4221  (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
4222  {
4223  PinBuffer_Locked(bufHdr);
4227  UnpinBuffer(bufHdr);
4228  }
4229  else
4230  UnlockBufHdr(bufHdr, buf_state);
4231  }
4232 }
static void pg_atomic_unlocked_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:272
static bool BufTagMatchesRelFileLocator(const BufferTag *tag, const RelFileLocator *rlocator)
#define BM_JUST_DIRTIED
Definition: buf_internals.h:66
bool track_io_timing
Definition: bufmgr.c:139
#define LocalBufHdrGetBlock(bufHdr)
Definition: bufmgr.c:68
static void local_buffer_write_error_callback(void *arg)
Definition: bufmgr.c:5344
void PageSetChecksumInplace(Page page, BlockNumber blkno)
Definition: bufpage.c:1542
ErrorContextCallback * error_context_stack
Definition: elog.c:95
BufferUsage pgBufferUsage
Definition: instrument.c:20
int NLocBuffer
Definition: localbuf.c:43
@ IOOBJECT_TEMP_RELATION
Definition: pgstat.h:281
@ IOOP_WRITE
Definition: pgstat.h:304
instr_time pgstat_prepare_io_time(bool track_io_guc)
Definition: pgstat_io.c:100
void pgstat_count_io_op_time(IOObject io_object, IOContext io_context, IOOp io_op, instr_time start_time, uint32 cnt)
Definition: pgstat_io.c:122
#define RelationUsesLocalBuffers(relation)
Definition: rel.h:636
static void smgrwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, const void *buffer, bool skipFsync)
Definition: smgr.h:121
int64 local_blks_written
Definition: instrument.h:33
struct ErrorContextCallback * previous
Definition: elog.h:295
void(* callback)(void *arg)
Definition: elog.h:296
RelFileLocator rd_locator
Definition: rel.h:57

References ErrorContextCallback::arg, buftag::blockNum, BM_DIRTY, BM_JUST_DIRTIED, BM_VALID, BufferDescriptorGetContentLock(), BufTagGetForkNum(), BufTagMatchesRelFileLocator(), ErrorContextCallback::callback, CurrentResourceOwner, error_context_stack, FlushBuffer(), GetBufferDescriptor(), GetLocalBufferDescriptor(), i, IOCONTEXT_NORMAL, IOOBJECT_RELATION, IOOBJECT_TEMP_RELATION, IOOP_WRITE, BufferUsage::local_blks_written, local_buffer_write_error_callback(), LocalBufHdrGetBlock, LockBufHdr(), LW_SHARED, LWLockAcquire(), LWLockRelease(), NBuffers, NLocBuffer, PageSetChecksumInplace(), pg_atomic_read_u32(), pg_atomic_unlocked_write_u32(), pgBufferUsage, pgstat_count_io_op_time(), pgstat_prepare_io_time(), PinBuffer_Locked(), ErrorContextCallback::previous, RelationData::rd_locator, RelationGetSmgr(), RelationUsesLocalBuffers, ReservePrivateRefCountEntry(), ResourceOwnerEnlarge(), smgrwrite(), BufferDesc::state, BufferDesc::tag, track_io_timing, UnlockBufHdr(), and UnpinBuffer().

Referenced by fill_seq_with_data(), heapam_relation_copy_data(), and index_copy_data().

◆ FlushRelationsAllBuffers()

void FlushRelationsAllBuffers ( struct SMgrRelationData **  smgrs,
int  nrels 
)

◆ FreeAccessStrategy()

void FreeAccessStrategy ( BufferAccessStrategy  strategy)

Definition at line 639 of file freelist.c.

640 {
641  /* don't crash if called on a "default" strategy */
642  if (strategy != NULL)
643  pfree(strategy);
644 }

References pfree().

Referenced by blgetbitmap(), FreeBulkInsertState(), heap_endscan(), initscan(), parallel_vacuum_main(), and RelationCopyStorageUsingBuffer().

◆ GetAccessStrategy()

BufferAccessStrategy GetAccessStrategy ( BufferAccessStrategyType  btype)

Definition at line 541 of file freelist.c.

542 {
543  int ring_size_kb;
544 
545  /*
546  * Select ring size to use. See buffer/README for rationales.
547  *
548  * Note: if you change the ring size for BAS_BULKREAD, see also
549  * SYNC_SCAN_REPORT_INTERVAL in access/heap/syncscan.c.
550  */
551  switch (btype)
552  {
553  case BAS_NORMAL:
554  /* if someone asks for NORMAL, just give 'em a "default" object */
555  return NULL;
556 
557  case BAS_BULKREAD:
558  ring_size_kb = 256;
559  break;
560  case BAS_BULKWRITE:
561  ring_size_kb = 16 * 1024;
562  break;
563  case BAS_VACUUM:
564  ring_size_kb = 256;
565  break;
566 
567  default:
568  elog(ERROR, "unrecognized buffer access strategy: %d",
569  (int) btype);
570  return NULL; /* keep compiler quiet */
571  }
572 
573  return GetAccessStrategyWithSize(btype, ring_size_kb);
574 }
BufferAccessStrategy GetAccessStrategyWithSize(BufferAccessStrategyType btype, int ring_size_kb)
Definition: freelist.c:584

References BAS_BULKREAD, BAS_BULKWRITE, BAS_NORMAL, BAS_VACUUM, elog(), ERROR, and GetAccessStrategyWithSize().

Referenced by blgetbitmap(), bt_check_every_level(), collect_corrupt_items(), collect_visibility_data(), GetBulkInsertState(), initscan(), pgstat_index(), pgstathashindex(), pgstatindex_impl(), RelationCopyStorageUsingBuffer(), ScanSourceDatabasePgClass(), statapprox_heap(), and verify_heapam().

◆ GetAccessStrategyBufferCount()

int GetAccessStrategyBufferCount ( BufferAccessStrategy  strategy)

Definition at line 624 of file freelist.c.

625 {
626  if (strategy == NULL)
627  return 0;
628 
629  return strategy->nbuffers;
630 }

References BufferAccessStrategyData::nbuffers.

Referenced by parallel_vacuum_init().

◆ GetAccessStrategyWithSize()

BufferAccessStrategy GetAccessStrategyWithSize ( BufferAccessStrategyType  btype,
int  ring_size_kb 
)

Definition at line 584 of file freelist.c.

585 {
586  int ring_buffers;
587  BufferAccessStrategy strategy;
588 
589  Assert(ring_size_kb >= 0);
590 
591  /* Figure out how many buffers ring_size_kb is */
592  ring_buffers = ring_size_kb / (BLCKSZ / 1024);
593 
594  /* 0 means unlimited, so no BufferAccessStrategy required */
595  if (ring_buffers == 0)
596  return NULL;
597 
598  /* Cap to 1/8th of shared_buffers */
599  ring_buffers = Min(NBuffers / 8, ring_buffers);
600 
601  /* NBuffers should never be less than 16, so this shouldn't happen */
602  Assert(ring_buffers > 0);
603 
604  /* Allocate the object and initialize all elements to zeroes */
605  strategy = (BufferAccessStrategy)
606  palloc0(offsetof(BufferAccessStrategyData, buffers) +
607  ring_buffers * sizeof(Buffer));
608 
609  /* Set fields that don't start out zero */
610  strategy->btype = btype;
611  strategy->nbuffers = ring_buffers;
612 
613  return strategy;
614 }
struct BufferAccessStrategyData * BufferAccessStrategy
Definition: buf.h:44
#define Min(x, y)
Definition: c.h:993
void * palloc0(Size size)
Definition: mcxt.c:1232
BufferAccessStrategyType btype
Definition: freelist.c:75

References Assert(), BufferAccessStrategyData::btype, Min, BufferAccessStrategyData::nbuffers, NBuffers, and palloc0().

Referenced by do_autovacuum(), ExecVacuum(), GetAccessStrategy(), and parallel_vacuum_main().

◆ HoldingBufferPinThatDelaysRecovery()

bool HoldingBufferPinThatDelaysRecovery ( void  )

Definition at line 5011 of file bufmgr.c.

5012 {
5013  int bufid = GetStartupBufferPinWaitBufId();
5014 
5015  /*
5016  * If we get woken slowly then it's possible that the Startup process was
5017  * already woken by other backends before we got here. Also possible that
5018  * we get here by multiple interrupts or interrupts at inappropriate
5019  * times, so make sure we do nothing if the bufid is not set.
5020  */
5021  if (bufid < 0)
5022  return false;
5023 
5024  if (GetPrivateRefCount(bufid + 1) > 0)
5025  return true;
5026 
5027  return false;
5028 }
int GetStartupBufferPinWaitBufId(void)
Definition: proc.c:675

References GetPrivateRefCount(), and GetStartupBufferPinWaitBufId().

Referenced by CheckRecoveryConflictDeadlock(), and ProcessRecoveryConflictInterrupt().

◆ IncrBufferRefCount()

void IncrBufferRefCount ( Buffer  buffer)

Definition at line 4593 of file bufmgr.c.

4594 {
4595  Assert(BufferIsPinned(buffer));
4597  if (BufferIsLocal(buffer))
4598  LocalRefCount[-buffer - 1]++;
4599  else
4600  {
4601  PrivateRefCountEntry *ref;
4602 
4603  ref = GetPrivateRefCountEntry(buffer, true);
4604  Assert(ref != NULL);
4605  ref->refcount++;
4606  }
4608 }
static PrivateRefCountEntry * GetPrivateRefCountEntry(Buffer buffer, bool do_move)
Definition: bufmgr.c:331
void ResourceOwnerRememberBuffer(ResourceOwner owner, Buffer buffer)

References Assert(), PrivateRefCountEntry::buffer, BufferIsLocal, BufferIsPinned, CurrentResourceOwner, GetPrivateRefCountEntry(), LocalRefCount, PrivateRefCountEntry::refcount, ResourceOwnerEnlarge(), and ResourceOwnerRememberBuffer().

Referenced by _bt_steppage(), btrestrpos(), entryLoadMoreItems(), ReadBufferBI(), RelationAddBlocks(), scanPostingTree(), startScanEntry(), and tts_buffer_heap_store_tuple().

◆ InitBufferPool()

void InitBufferPool ( void  )

Definition at line 68 of file buf_init.c.

69 {
70  bool foundBufs,
71  foundDescs,
72  foundIOCV,
73  foundBufCkpt;
74 
75  /* Align descriptors to a cacheline boundary. */
77  ShmemInitStruct("Buffer Descriptors",
78  NBuffers * sizeof(BufferDescPadded),
79  &foundDescs);
80 
81  /* Align buffer pool on IO page size boundary. */
82  BufferBlocks = (char *)
84  ShmemInitStruct("Buffer Blocks",
85  NBuffers * (Size) BLCKSZ + PG_IO_ALIGN_SIZE,
86  &foundBufs));
87 
88  /* Align condition variables to cacheline boundary. */
90  ShmemInitStruct("Buffer IO Condition Variables",
92  &foundIOCV);
93 
94  /*
95  * The array used to sort to-be-checkpointed buffer ids is located in
96  * shared memory, to avoid having to allocate significant amounts of
97  * memory at runtime. As that'd be in the middle of a checkpoint, or when
98  * the checkpointer is restarted, memory allocation failures would be
99  * painful.
100  */
102  ShmemInitStruct("Checkpoint BufferIds",
103  NBuffers * sizeof(CkptSortItem), &foundBufCkpt);
104 
105  if (foundDescs || foundBufs || foundIOCV || foundBufCkpt)
106  {
107  /* should find all of these, or none of them */
108  Assert(foundDescs && foundBufs && foundIOCV && foundBufCkpt);
109  /* note: this path is only taken in EXEC_BACKEND case */
110  }
111  else
112  {
113  int i;
114 
115  /*
116  * Initialize all the buffer headers.
117  */
118  for (i = 0; i < NBuffers; i++)
119  {
121 
122  ClearBufferTag(&buf->tag);
123 
124  pg_atomic_init_u32(&buf->state, 0);
125  buf->wait_backend_pgprocno = INVALID_PGPROCNO;
126 
127  buf->buf_id = i;
128 
129  /*
130  * Initially link all the buffers together as unused. Subsequent
131  * management of this list is done by freelist.c.
132  */
133  buf->freeNext = i + 1;
134 
137 
139  }
140 
141  /* Correct last entry of linked list */
143  }
144 
145  /* Init other shared buffer-management stuff */
146  StrategyInitialize(!foundDescs);
147 
148  /* Initialize per-backend file flush context */
151 }
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:218
CkptSortItem * CkptBufferIds
Definition: buf_init.c:25
char * BufferBlocks
Definition: buf_init.c:22
WritebackContext BackendWritebackContext
Definition: buf_init.c:24
ConditionVariableMinimallyPadded * BufferIOCVArray
Definition: buf_init.c:23
BufferDescPadded * BufferDescriptors
Definition: buf_init.c:21
#define FREENEXT_END_OF_LIST
static void ClearBufferTag(BufferTag *tag)
static ConditionVariable * BufferDescriptorGetIOCV(const BufferDesc *bdesc)
void WritebackContextInit(WritebackContext *context, int *max_pending)
Definition: bufmgr.c:5533
int backend_flush_after
Definition: bufmgr.c:162
#define TYPEALIGN(ALIGNVAL, LEN)
Definition: c.h:793
void ConditionVariableInit(ConditionVariable *cv)
void StrategyInitialize(bool init)
Definition: freelist.c:474
void LWLockInitialize(LWLock *lock, int tranche_id)
Definition: lwlock.c:703
@ LWTRANCHE_BUFFER_CONTENT
Definition: lwlock.h:189
#define INVALID_PGPROCNO
Definition: proc.h:85
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:388

References Assert(), backend_flush_after, BackendWritebackContext, buf, BufferBlocks, BufferDescriptorGetContentLock(), BufferDescriptorGetIOCV(), BufferDescriptors, BufferIOCVArray, CkptBufferIds, ClearBufferTag(), ConditionVariableInit(), BufferDesc::freeNext, FREENEXT_END_OF_LIST, GetBufferDescriptor(), i, INVALID_PGPROCNO, LWLockInitialize(), LWTRANCHE_BUFFER_CONTENT, NBuffers, pg_atomic_init_u32(), PG_IO_ALIGN_SIZE, ShmemInitStruct(), StrategyInitialize(), TYPEALIGN, and WritebackContextInit().

Referenced by CreateOrAttachShmemStructs().

◆ InitBufferPoolAccess()

void InitBufferPoolAccess ( void  )

Definition at line 3230 of file bufmgr.c.

3231 {
3232  HASHCTL hash_ctl;
3233 
3234  memset(&PrivateRefCountArray, 0, sizeof(PrivateRefCountArray));
3235 
3236  hash_ctl.keysize = sizeof(int32);
3237  hash_ctl.entrysize = sizeof(PrivateRefCountEntry);
3238 
3239  PrivateRefCountHash = hash_create("PrivateRefCount", 100, &hash_ctl,
3240  HASH_ELEM | HASH_BLOBS);
3241 
3242  /*
3243  * AtProcExit_Buffers needs LWLock access, and thereby has to be called at
3244  * the corresponding phase of backend shutdown.
3245  */
3246  Assert(MyProc != NULL);
3248 }
static void AtProcExit_Buffers(int code, Datum arg)
Definition: bufmgr.c:3255
struct PrivateRefCountEntry PrivateRefCountEntry
static struct PrivateRefCountEntry PrivateRefCountArray[REFCOUNT_ARRAY_ENTRIES]
Definition: bufmgr.c:197
static HTAB * PrivateRefCountHash
Definition: bufmgr.c:198
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:350
#define HASH_ELEM
Definition: hsearch.h:95
#define HASH_BLOBS
Definition: hsearch.h:97
void on_shmem_exit(pg_on_exit_callback function, Datum arg)
Definition: ipc.c:365
PGPROC * MyProc
Definition: proc.c:68
Size keysize
Definition: hsearch.h:75
Size entrysize
Definition: hsearch.h:76

References Assert(), AtProcExit_Buffers(), HASHCTL::entrysize, HASH_BLOBS, hash_create(), HASH_ELEM, HASHCTL::keysize, MyProc, on_shmem_exit(), PrivateRefCountArray, and PrivateRefCountHash.

Referenced by BaseInit().

◆ IsBufferCleanupOK()

bool IsBufferCleanupOK ( Buffer  buffer)

Definition at line 5093 of file bufmgr.c.

5094 {
5095  BufferDesc *bufHdr;
5096  uint32 buf_state;
5097 
5098  Assert(BufferIsValid(buffer));
5099 
5100  if (BufferIsLocal(buffer))
5101  {
5102  /* There should be exactly one pin */
5103  if (LocalRefCount[-buffer - 1] != 1)
5104  return false;
5105  /* Nobody else to wait for */
5106  return true;
5107  }
5108 
5109  /* There should be exactly one local pin */
5110  if (GetPrivateRefCount(buffer) != 1)
5111  return false;
5112 
5113  bufHdr = GetBufferDescriptor(buffer - 1);
5114 
5115  /* caller must hold exclusive lock on buffer */
5117  LW_EXCLUSIVE));
5118 
5119  buf_state = LockBufHdr(bufHdr);
5120 
5121  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
5122  if (BUF_STATE_GET_REFCOUNT(buf_state) == 1)
5123  {
5124  /* pincount is OK. */
5125  UnlockBufHdr(bufHdr, buf_state);
5126  return true;
5127  }
5128 
5129  UnlockBufHdr(bufHdr, buf_state);
5130  return false;
5131 }

References Assert(), BUF_STATE_GET_REFCOUNT, PrivateRefCountEntry::buffer, BufferDescriptorGetContentLock(), BufferIsLocal, BufferIsValid(), GetBufferDescriptor(), GetPrivateRefCount(), LocalRefCount, LockBufHdr(), LW_EXCLUSIVE, LWLockHeldByMeInMode(), and UnlockBufHdr().

Referenced by _hash_doinsert(), _hash_expandtable(), _hash_splitbucket(), and hashbucketcleanup().

◆ LockBuffer()

void LockBuffer ( Buffer  buffer,
int  mode 
)

Definition at line 4796 of file bufmgr.c.

4797 {
4798  BufferDesc *buf;
4799 
4800  Assert(BufferIsPinned(buffer));
4801  if (BufferIsLocal(buffer))
4802  return; /* local buffers need no lock */
4803 
4804  buf = GetBufferDescriptor(buffer - 1);
4805 
4806  if (mode == BUFFER_LOCK_UNLOCK)
4808  else if (mode == BUFFER_LOCK_SHARE)
4810  else if (mode == BUFFER_LOCK_EXCLUSIVE)
4812  else
4813  elog(ERROR, "unrecognized buffer lock mode: %d", mode);
4814 }
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:158
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:159

References Assert(), buf, PrivateRefCountEntry::buffer, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferDescriptorGetContentLock(), BufferIsLocal, BufferIsPinned, elog(), ERROR, GetBufferDescriptor(), LW_EXCLUSIVE, LW_SHARED, LWLockAcquire(), LWLockRelease(), and mode.

Referenced by _bt_lockbuf(), _bt_unlockbuf(), _bt_upgradelockbufcleanup(), _hash_addovflpage(), _hash_doinsert(), _hash_expandtable(), _hash_finish_split(), _hash_first(), _hash_freeovflpage(), _hash_getbuf(), _hash_getbuf_with_strategy(), _hash_getcachedmetap(), _hash_init(), _hash_kill_items(), _hash_readnext(), _hash_readpage(), _hash_readprev(), _hash_splitbucket(), _hash_squeezebucket(), _hash_vacuum_one_page(), blbulkdelete(), blgetbitmap(), blinsert(), BloomInitMetapage(), BloomNewBuffer(), blvacuumcleanup(), brin_doinsert(), brin_doupdate(), brin_evacuate_page(), brin_getinsertbuffer(), brin_page_cleanup(), bringetbitmap(), brinGetStats(), brinGetTupleForHeapBlock(), brininsert(), brinLockRevmapPageForUpdate(), brinRevmapDesummarizeRange(), brinRevmapInitialize(), brinsummarize(), bt_metap(), bt_multi_page_stats(), bt_page_items_internal(), bt_page_stats_internal(), bt_recheck_sibling_links(), collect_corrupt_items(), collect_visibility_data(), collectMatchBitmap(), ConditionalLockBufferForCleanup(), count_nondeletable_pages(), entryLoadMoreItems(), FreeSpaceMapPrepareTruncateRel(), fsm_readbuf(), fsm_search(), fsm_search_avail(), fsm_set_and_search(), fsm_vacuum_page(), get_raw_page_internal(), GetVisibilityMapPins(), ginbulkdelete(), ginEntryInsert(), ginFindLeafPage(), ginFindParents(), ginFinishOldSplit(), ginFinishSplit(), ginGetStats(), ginHeapTupleFastInsert(), ginInsertCleanup(), ginInsertValue(), GinNewBuffer(), ginScanToDelete(), ginStepRight(), ginTraverseLock(), ginUpdateStats(), ginvacuumcleanup(), ginVacuumPostingTreeLeaves(), gistBufferingFindCorrectParent(), gistbufferinginserttuples(), gistdoinsert(), gistFindCorrectParent(), gistFindPath(), gistfinishsplit(), gistfixsplit(), gistformdownlink(), gistGetMaxLevel(), gistinserttuples(), gistkillitems(), gistNewBuffer(), gistProcessItup(), gistScanPage(), gistvacuum_delete_empty_pages(), gistvacuumpage(), hashbucketcleanup(), hashbulkdelete(), heap_abort_speculative(), heap_delete(), heap_fetch(), heap_finish_speculative(), heap_get_latest_tid(), heap_index_delete_tuples(), heap_inplace_update(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_page_prune_opt(), heap_update(), heap_xlog_visible(), heapam_index_build_range_scan(), heapam_index_fetch_tuple(), heapam_index_validate_scan(), heapam_relation_copy_for_cluster(), heapam_scan_analyze_next_block(), heapam_scan_bitmap_next_block(), heapam_scan_sample_next_tuple(), heapam_tuple_satisfies_snapshot(), heapgetpage(), heapgettup(), initBloomState(), lazy_scan_heap(), lazy_scan_new_or_empty(), lazy_vacuum_heap_rel(), LockBufferForCleanup(), log_newpage_range(), palloc_btree_page(), pg_visibility(), pgrowlocks(), pgstat_btree_page(), pgstat_gist_page(), pgstat_heap(), pgstatginindex_internal(), pgstathashindex(), pgstatindex_impl(), read_seq_tuple(), RelationAddBlocks(), RelationCopyStorageUsingBuffer(), RelationGetBufferForTuple(), revmap_physical_extend(), scanGetCandidate(), scanPendingInsert(), ScanSourceDatabasePgClass(), shiftList(), spgdoinsert(), spgGetCache(), SpGistNewBuffer(), spgprocesspending(), spgvacuumpage(), spgWalk(), startScanEntry(), statapprox_heap(), summarize_range(), UnlockReleaseBuffer(), verify_heapam(), verifyBackupPageConsistency(), visibilitymap_clear(), visibilitymap_prepare_truncate(), visibilitymap_set(), vm_readbuf(), XLogReadBufferForRedoExtended(), and XLogRecordPageWithFreeSpace().

◆ LockBufferForCleanup()

void LockBufferForCleanup ( Buffer  buffer)

Definition at line 4876 of file bufmgr.c.

4877 {
4878  BufferDesc *bufHdr;
4879  TimestampTz waitStart = 0;
4880  bool waiting = false;
4881  bool logged_recovery_conflict = false;
4882 
4883  Assert(BufferIsPinned(buffer));
4884  Assert(PinCountWaitBuf == NULL);
4885 
4886  CheckBufferIsPinnedOnce(buffer);
4887 
4888  /* Nobody else to wait for */
4889  if (BufferIsLocal(buffer))
4890  return;
4891 
4892  bufHdr = GetBufferDescriptor(buffer - 1);
4893 
4894  for (;;)
4895  {
4896  uint32 buf_state;
4897 
4898  /* Try to acquire lock */
4900  buf_state = LockBufHdr(bufHdr);
4901 
4902  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
4903  if (BUF_STATE_GET_REFCOUNT(buf_state) == 1)
4904  {
4905  /* Successfully acquired exclusive lock with pincount 1 */
4906  UnlockBufHdr(bufHdr, buf_state);
4907 
4908  /*
4909  * Emit the log message if recovery conflict on buffer pin was
4910  * resolved but the startup process waited longer than
4911  * deadlock_timeout for it.
4912  */
4913  if (logged_recovery_conflict)
4915  waitStart, GetCurrentTimestamp(),
4916  NULL, false);
4917 
4918  if (waiting)
4919  {
4920  /* reset ps display to remove the suffix if we added one */
4922  waiting = false;
4923  }
4924  return;
4925  }
4926  /* Failed, so mark myself as waiting for pincount 1 */
4927  if (buf_state & BM_PIN_COUNT_WAITER)
4928  {
4929  UnlockBufHdr(bufHdr, buf_state);
4930  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
4931  elog(ERROR, "multiple backends attempting to wait for pincount 1");
4932  }
4934  PinCountWaitBuf = bufHdr;
4935  buf_state |= BM_PIN_COUNT_WAITER;
4936  UnlockBufHdr(bufHdr, buf_state);
4937  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
4938 
4939  /* Wait to be signaled by UnpinBuffer() */
4940  if (InHotStandby)
4941  {
4942  if (!waiting)
4943  {
4944  /* adjust the process title to indicate that it's waiting */
4945  set_ps_display_suffix("waiting");
4946  waiting = true;
4947  }
4948 
4949  /*
4950  * Emit the log message if the startup process is waiting longer
4951  * than deadlock_timeout for recovery conflict on buffer pin.
4952  *
4953  * Skip this if first time through because the startup process has
4954  * not started waiting yet in this case. So, the wait start
4955  * timestamp is set after this logic.
4956  */
4957  if (waitStart != 0 && !logged_recovery_conflict)
4958  {
4960 
4961  if (TimestampDifferenceExceeds(waitStart, now,
4962  DeadlockTimeout))
4963  {
4965  waitStart, now, NULL, true);
4966  logged_recovery_conflict = true;
4967  }
4968  }
4969 
4970  /*
4971  * Set the wait start timestamp if logging is enabled and first
4972  * time through.
4973  */
4974  if (log_recovery_conflict_waits && waitStart == 0)
4975  waitStart = GetCurrentTimestamp();
4976 
4977  /* Publish the bufid that Startup process waits on */
4978  SetStartupBufferPinWaitBufId(buffer - 1);
4979  /* Set alarm and then wait to be signaled by UnpinBuffer() */
4981  /* Reset the published bufid */
4983  }
4984  else
4985  ProcWaitForSignal(WAIT_EVENT_BUFFER_PIN);
4986 
4987  /*
4988  * Remove flag marking us as waiter. Normally this will not be set
4989  * anymore, but ProcWaitForSignal() can return for other signals as
4990  * well. We take care to only reset the flag if we're the waiter, as
4991  * theoretically another backend could have started waiting. That's
4992  * impossible with the current usages due to table level locking, but
4993  * better be safe.
4994  */
4995  buf_state = LockBufHdr(bufHdr);
4996  if ((buf_state & BM_PIN_COUNT_WAITER) != 0 &&
4998  buf_state &= ~BM_PIN_COUNT_WAITER;
4999  UnlockBufHdr(bufHdr, buf_state);
5000 
5001  PinCountWaitBuf = NULL;
5002  /* Loop back and try again */
5003  }
5004 }
bool TimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, int msec)
Definition: timestamp.c:1791
TimestampTz GetCurrentTimestamp(void)
Definition: timestamp.c:1655
Datum now(PG_FUNCTION_ARGS)
Definition: timestamp.c:1619
#define BM_PIN_COUNT_WAITER
Definition: buf_internals.h:67
void CheckBufferIsPinnedOnce(Buffer buffer)
Definition: bufmgr.c:4843
static BufferDesc * PinCountWaitBuf
Definition: bufmgr.c:165
int64 TimestampTz
Definition: timestamp.h:39
static volatile sig_atomic_t waiting
Definition: latch.c:163
@ PROCSIG_RECOVERY_CONFLICT_BUFFERPIN
Definition: procsignal.h:47
void set_ps_display_remove_suffix(void)
Definition: ps_status.c:396
void set_ps_display_suffix(const char *suffix)
Definition: ps_status.c:344
int MyProcNumber
Definition: proc.c:69
int DeadlockTimeout
Definition: proc.c:59
void SetStartupBufferPinWaitBufId(int bufid)
Definition: proc.c:663
void ProcWaitForSignal(uint32 wait_event_info)
Definition: proc.c:1847
void ResolveRecoveryConflictWithBufferPin(void)
Definition: standby.c:793
bool log_recovery_conflict_waits
Definition: standby.c:43
void LogRecoveryConflict(ProcSignalReason reason, TimestampTz wait_start, TimestampTz now, VirtualTransactionId *wait_list, bool still_waiting)
Definition: standby.c:274
int wait_backend_pgprocno
#define InHotStandby
Definition: xlogutils.h:57

References Assert(), BM_PIN_COUNT_WAITER, BUF_STATE_GET_REFCOUNT, PrivateRefCountEntry::buffer, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferIsLocal, BufferIsPinned, CheckBufferIsPinnedOnce(), DeadlockTimeout, elog(), ERROR, GetBufferDescriptor(), GetCurrentTimestamp(), InHotStandby, LockBuffer(), LockBufHdr(), log_recovery_conflict_waits, LogRecoveryConflict(), MyProcNumber, now(), PinCountWaitBuf, PROCSIG_RECOVERY_CONFLICT_BUFFERPIN, ProcWaitForSignal(), ResolveRecoveryConflictWithBufferPin(), set_ps_display_remove_suffix(), set_ps_display_suffix(), SetStartupBufferPinWaitBufId(), TimestampDifferenceExceeds(), UnlockBufHdr(), BufferDesc::wait_backend_pgprocno, and waiting.

Referenced by _bt_upgradelockbufcleanup(), ginVacuumPostingTree(), hashbulkdelete(), heap_force_common(), lazy_scan_heap(), ReadBuffer_common(), and XLogReadBufferForRedoExtended().

◆ MarkBufferDirty()

void MarkBufferDirty ( Buffer  buffer)

Definition at line 2190 of file bufmgr.c.

2191 {
2192  BufferDesc *bufHdr;
2193  uint32 buf_state;
2194  uint32 old_buf_state;
2195 
2196  if (!BufferIsValid(buffer))
2197  elog(ERROR, "bad buffer ID: %d", buffer);
2198 
2199  if (BufferIsLocal(buffer))
2200  {
2201  MarkLocalBufferDirty(buffer);
2202  return;
2203  }
2204 
2205  bufHdr = GetBufferDescriptor(buffer - 1);
2206 
2207  Assert(BufferIsPinned(buffer));
2209  LW_EXCLUSIVE));
2210 
2211  old_buf_state = pg_atomic_read_u32(&bufHdr->state);
2212  for (;;)
2213  {
2214  if (old_buf_state & BM_LOCKED)
2215  old_buf_state = WaitBufHdrUnlocked(bufHdr);
2216 
2217  buf_state = old_buf_state;
2218 
2219  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
2220  buf_state |= BM_DIRTY | BM_JUST_DIRTIED;
2221 
2222  if (pg_atomic_compare_exchange_u32(&bufHdr->state, &old_buf_state,
2223  buf_state))
2224  break;
2225  }
2226 
2227  /*
2228  * If the buffer was not dirty already, do vacuum accounting.
2229  */
2230  if (!(old_buf_state & BM_DIRTY))
2231  {
2232  VacuumPageDirty++;
2234  if (VacuumCostActive)
2236  }
2237 }
static bool pg_atomic_compare_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval)
Definition: atomics.h:306
#define BM_LOCKED
Definition: buf_internals.h:60
static uint32 WaitBufHdrUnlocked(BufferDesc *buf)
Definition: bufmgr.c:5421
bool VacuumCostActive
Definition: globals.c:159
int64 VacuumPageDirty
Definition: globals.c:156
int VacuumCostBalance
Definition: globals.c:158
int VacuumCostPageDirty
Definition: globals.c:150
void MarkLocalBufferDirty(Buffer buffer)
Definition: localbuf.c:450
int64 shared_blks_dirtied
Definition: instrument.h:28

References Assert(), BM_DIRTY, BM_JUST_DIRTIED, BM_LOCKED, BUF_STATE_GET_REFCOUNT, PrivateRefCountEntry::buffer, BufferDescriptorGetContentLock(), BufferIsLocal, BufferIsPinned, BufferIsValid(), elog(), ERROR, GetBufferDescriptor(), LW_EXCLUSIVE, LWLockHeldByMeInMode(), MarkLocalBufferDirty(), pg_atomic_compare_exchange_u32(), pg_atomic_read_u32(), pgBufferUsage, BufferUsage::shared_blks_dirtied, BufferDesc::state, VacuumCostActive, VacuumCostBalance, VacuumCostPageDirty, VacuumPageDirty, and WaitBufHdrUnlocked().

Referenced by _bt_clear_incomplete_split(), _bt_dedup_pass(), _bt_delitems_delete(), _bt_delitems_vacuum(), _bt_getroot(), _bt_insertonpg(), _bt_mark_page_halfdead(), _bt_newlevel(), _bt_restore_meta(), _bt_set_cleanup_info(), _bt_split(), _bt_unlink_halfdead_page(), _hash_addovflpage(), _hash_doinsert(), _hash_expandtable(), _hash_freeovflpage(), _hash_init(), _hash_splitbucket(), _hash_squeezebucket(), _hash_vacuum_one_page(), addLeafTuple(), brin_doinsert(), brin_doupdate(), brin_initialize_empty_new_buffer(), brin_xlog_createidx(), brin_xlog_desummarize_page(), brin_xlog_insert_update(), brin_xlog_revmap_extend(), brin_xlog_samepage_update(), brin_xlog_update(), brinbuild(), brinbuildempty(), brinRevmapDesummarizeRange(), btree_xlog_dedup(), btree_xlog_delete(), btree_xlog_insert(), btree_xlog_mark_page_halfdead(), btree_xlog_newroot(), btree_xlog_split(), btree_xlog_unlink_page(), btree_xlog_vacuum(), createPostingTree(), dataExecPlaceToPageInternal(), dataExecPlaceToPageLeaf(), do_setval(), doPickSplit(), entryExecPlaceToPage(), fill_seq_fork_with_data(), FreeSpaceMapPrepareTruncateRel(), generic_redo(), GenericXLogFinish(), ginbuild(), ginbuildempty(), ginbulkdelete(), ginDeletePage(), ginHeapTupleFastInsert(), ginPlaceToPage(), ginRedoClearIncompleteSplit(), ginRedoCreatePTree(), ginRedoDeleteListPages(), ginRedoDeletePage(), ginRedoInsert(), ginRedoInsertListPage(), ginRedoUpdateMetapage(), ginRedoVacuumDataLeafPage(), ginUpdateStats(), ginVacuumPostingTreeLeaf(), gistbuild(), gistbuildempty(), gistdeletepage(), gistplacetopage(), gistprunepage(), gistRedoClearFollowRight(), gistRedoDeleteRecord(), gistRedoPageDelete(), gistRedoPageSplitRecord(), gistRedoPageUpdateRecord(), gistvacuumpage(), hash_xlog_add_ovfl_page(), hash_xlog_delete(), hash_xlog_init_bitmap_page(), hash_xlog_init_meta_page(), hash_xlog_insert(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_split_cleanup(), hash_xlog_split_complete(), hash_xlog_squeeze_page(), hash_xlog_update_meta_page(), hash_xlog_vacuum_one_page(), hashbucketcleanup(), hashbulkdelete(), heap_abort_speculative(), heap_delete(), heap_finish_speculative(), heap_force_common(), heap_freeze_execute_prepared(), heap_inplace_update(), heap_insert(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_page_prune(), heap_update(), heap_xlog_confirm(), heap_xlog_delete(), heap_xlog_freeze_page(), heap_xlog_inplace(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_prune(), heap_xlog_update(), heap_xlog_vacuum(), heap_xlog_visible(), lazy_scan_new_or_empty(), lazy_scan_prune(), lazy_vacuum_heap_page(), log_newpage_range(), moveLeafs(), nextval_internal(), RelationAddBlocks(), RelationCopyStorageUsingBuffer(), RelationGetBufferForTuple(), revmap_physical_extend(), saveNodeLink(), seq_redo(), shiftList(), spgAddNodeAction(), spgbuild(), SpGistUpdateMetaPage(), spgRedoAddLeaf(), spgRedoAddNode(), spgRedoMoveLeafs(), spgRedoPickSplit(), spgRedoSplitTuple(), spgRedoVacuumLeaf(), spgRedoVacuumRedirect(), spgRedoVacuumRoot(), spgSplitNodeAction(), vacuumLeafPage(), vacuumLeafRoot(), vacuumRedirectAndPlaceholder(), visibilitymap_clear(), visibilitymap_prepare_truncate(), visibilitymap_set(), writeListPage(), and XLogReadBufferForRedoExtended().

◆ MarkBufferDirtyHint()

void MarkBufferDirtyHint ( Buffer  buffer,
bool  buffer_std 
)

Definition at line 4625 of file bufmgr.c.

4626 {
4627  BufferDesc *bufHdr;
4628  Page page = BufferGetPage(buffer);
4629 
4630  if (!BufferIsValid(buffer))
4631  elog(ERROR, "bad buffer ID: %d", buffer);
4632 
4633  if (BufferIsLocal(buffer))
4634  {
4635  MarkLocalBufferDirty(buffer);
4636  return;
4637  }
4638 
4639  bufHdr = GetBufferDescriptor(buffer - 1);
4640 
4641  Assert(GetPrivateRefCount(buffer) > 0);
4642  /* here, either share or exclusive lock is OK */
4644 
4645  /*
4646  * This routine might get called many times on the same page, if we are
4647  * making the first scan after commit of an xact that added/deleted many
4648  * tuples. So, be as quick as we can if the buffer is already dirty. We
4649  * do this by not acquiring spinlock if it looks like the status bits are
4650  * already set. Since we make this test unlocked, there's a chance we
4651  * might fail to notice that the flags have just been cleared, and failed
4652  * to reset them, due to memory-ordering issues. But since this function
4653  * is only intended to be used in cases where failing to write out the
4654  * data would be harmless anyway, it doesn't really matter.
4655  */
4656  if ((pg_atomic_read_u32(&bufHdr->state) & (BM_DIRTY | BM_JUST_DIRTIED)) !=
4658  {
4660  bool dirtied = false;
4661  bool delayChkptFlags = false;
4662  uint32 buf_state;
4663 
4664  /*
4665  * If we need to protect hint bit updates from torn writes, WAL-log a
4666  * full page image of the page. This full page image is only necessary
4667  * if the hint bit update is the first change to the page since the
4668  * last checkpoint.
4669  *
4670  * We don't check full_page_writes here because that logic is included
4671  * when we call XLogInsert() since the value changes dynamically.
4672  */
4673  if (XLogHintBitIsNeeded() &&
4674  (pg_atomic_read_u32(&bufHdr->state) & BM_PERMANENT))
4675  {
4676  /*
4677  * If we must not write WAL, due to a relfilelocator-specific
4678  * condition or being in recovery, don't dirty the page. We can
4679  * set the hint, just not dirty the page as a result so the hint
4680  * is lost when we evict the page or shutdown.
4681  *
4682  * See src/backend/storage/page/README for longer discussion.
4683  */
4684  if (RecoveryInProgress() ||
4686  return;
4687 
4688  /*
4689  * If the block is already dirty because we either made a change
4690  * or set a hint already, then we don't need to write a full page
4691  * image. Note that aggressive cleaning of blocks dirtied by hint
4692  * bit setting would increase the call rate. Bulk setting of hint
4693  * bits would reduce the call rate...
4694  *
4695  * We must issue the WAL record before we mark the buffer dirty.
4696  * Otherwise we might write the page before we write the WAL. That
4697  * causes a race condition, since a checkpoint might occur between
4698  * writing the WAL record and marking the buffer dirty. We solve
4699  * that with a kluge, but one that is already in use during
4700  * transaction commit to prevent race conditions. Basically, we
4701  * simply prevent the checkpoint WAL record from being written
4702  * until we have marked the buffer dirty. We don't start the
4703  * checkpoint flush until we have marked dirty, so our checkpoint
4704  * must flush the change to disk successfully or the checkpoint
4705  * never gets written, so crash recovery will fix.
4706  *
4707  * It's possible we may enter here without an xid, so it is
4708  * essential that CreateCheckPoint waits for virtual transactions
4709  * rather than full transactionids.
4710  */
4713  delayChkptFlags = true;
4714  lsn = XLogSaveBufferForHint(buffer, buffer_std);
4715  }
4716 
4717  buf_state = LockBufHdr(bufHdr);
4718 
4719  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
4720 
4721  if (!(buf_state & BM_DIRTY))
4722  {
4723  dirtied = true; /* Means "will be dirtied by this action" */
4724 
4725  /*
4726  * Set the page LSN if we wrote a backup block. We aren't supposed
4727  * to set this when only holding a share lock but as long as we
4728  * serialise it somehow we're OK. We choose to set LSN while
4729  * holding the buffer header lock, which causes any reader of an
4730  * LSN who holds only a share lock to also obtain a buffer header
4731  * lock before using PageGetLSN(), which is enforced in
4732  * BufferGetLSNAtomic().
4733  *
4734  * If checksums are enabled, you might think we should reset the
4735  * checksum here. That will happen when the page is written
4736  * sometime later in this checkpoint cycle.
4737  */
4738  if (!XLogRecPtrIsInvalid(lsn))
4739  PageSetLSN(page, lsn);
4740  }
4741 
4742  buf_state |= BM_DIRTY | BM_JUST_DIRTIED;
4743  UnlockBufHdr(bufHdr, buf_state);
4744 
4745  if (delayChkptFlags)
4747 
4748  if (dirtied)
4749  {
4750  VacuumPageDirty++;
4752  if (VacuumCostActive)
4754  }
4755  }
4756 }
static void PageSetLSN(Page page, XLogRecPtr lsn)
Definition: bufpage.h:388
#define DELAY_CHKPT_START
Definition: proc.h:119
bool RelFileLocatorSkippingWAL(RelFileLocator rlocator)
Definition: storage.c:532
int delayChkptFlags
Definition: proc.h:226
bool RecoveryInProgress(void)
Definition: xlog.c:6211
#define XLogRecPtrIsInvalid(r)
Definition: xlogdefs.h:29
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
XLogRecPtr XLogSaveBufferForHint(Buffer buffer, bool buffer_std)
Definition: xloginsert.c:1066

References Assert(), BM_DIRTY, BM_JUST_DIRTIED, BM_PERMANENT, BUF_STATE_GET_REFCOUNT, PrivateRefCountEntry::buffer, BufferDescriptorGetContentLock(), BufferGetPage(), BufferIsLocal, BufferIsValid(), BufTagGetRelFileLocator(), DELAY_CHKPT_START, PGPROC::delayChkptFlags, elog(), ERROR, GetBufferDescriptor(), GetPrivateRefCount(), InvalidXLogRecPtr, LockBufHdr(), LWLockHeldByMe(), MarkLocalBufferDirty(), MyProc, PageSetLSN(), pg_atomic_read_u32(), pgBufferUsage, RecoveryInProgress(), RelFileLocatorSkippingWAL(), BufferUsage::shared_blks_dirtied, BufferDesc::state, BufferDesc::tag, UnlockBufHdr(), VacuumCostActive, VacuumCostBalance, VacuumCostPageDirty, VacuumPageDirty, XLogHintBitIsNeeded, XLogRecPtrIsInvalid, and XLogSaveBufferForHint().

Referenced by _bt_check_unique(), _bt_killitems(), _hash_kill_items(), brin_start_evacuating_page(), btvacuumpage(), fsm_search_avail(), fsm_set_and_search(), fsm_vacuum_page(), gistkillitems(), heap_page_prune(), read_seq_tuple(), SetHintBits(), and XLogRecordPageWithFreeSpace().

◆ PrefetchBuffer()

PrefetchBufferResult PrefetchBuffer ( Relation  reln,
ForkNumber  forkNum,
BlockNumber  blockNum 
)

Definition at line 628 of file bufmgr.c.

629 {
630  Assert(RelationIsValid(reln));
631  Assert(BlockNumberIsValid(blockNum));
632 
633  if (RelationUsesLocalBuffers(reln))
634  {
635  /* see comments in ReadBufferExtended */
636  if (RELATION_IS_OTHER_TEMP(reln))
637  ereport(ERROR,
638  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
639  errmsg("cannot access temporary tables of other sessions")));
640 
641  /* pass it off to localbuf.c */
642  return PrefetchLocalBuffer(RelationGetSmgr(reln), forkNum, blockNum);
643  }
644  else
645  {
646  /* pass it to the shared buffer version */
647  return PrefetchSharedBuffer(RelationGetSmgr(reln), forkNum, blockNum);
648  }
649 }
static bool BlockNumberIsValid(BlockNumber blockNumber)
Definition: block.h:71
PrefetchBufferResult PrefetchSharedBuffer(SMgrRelation smgr_reln, ForkNumber forkNum, BlockNumber blockNum)
Definition: bufmgr.c:538
int errcode(int sqlerrcode)
Definition: elog.c:860
int errmsg(const char *fmt,...)
Definition: elog.c:1075
#define ereport(elevel,...)
Definition: elog.h:149
PrefetchBufferResult PrefetchLocalBuffer(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum)
Definition: localbuf.c:70
#define RELATION_IS_OTHER_TEMP(relation)
Definition: rel.h:657
#define RelationIsValid(relation)
Definition: rel.h:477

References Assert(), BlockNumberIsValid(), ereport, errcode(), errmsg(), ERROR, PrefetchLocalBuffer(), PrefetchSharedBuffer(), RELATION_IS_OTHER_TEMP, RelationGetSmgr(), RelationIsValid, and RelationUsesLocalBuffers.

Referenced by acquire_sample_rows(), BitmapPrefetch(), count_nondeletable_pages(), and pg_prewarm().

◆ PrefetchSharedBuffer()

PrefetchBufferResult PrefetchSharedBuffer ( struct SMgrRelationData smgr_reln,
ForkNumber  forkNum,
BlockNumber  blockNum 
)

◆ ReadBuffer()

Buffer ReadBuffer ( Relation  reln,
BlockNumber  blockNum 
)

Definition at line 735 of file bufmgr.c.

736 {
737  return ReadBufferExtended(reln, MAIN_FORKNUM, blockNum, RBM_NORMAL, NULL);
738 }
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:782

References MAIN_FORKNUM, RBM_NORMAL, and ReadBufferExtended().

Referenced by _bt_allocbuf(), _bt_getbuf(), _bt_search_insert(), _hash_getbuf(), _hash_getbuf_with_condlock_cleanup(), blbulkdelete(), blinsert(), BloomNewBuffer(), brin_getinsertbuffer(), brinGetStats(), brinGetTupleForHeapBlock(), brinRevmapDesummarizeRange(), brinRevmapInitialize(), bt_metap(), bt_multi_page_stats(), bt_page_items_internal(), bt_page_stats_internal(), ginFindLeafPage(), ginFindParents(), ginGetStats(), ginHeapTupleFastInsert(), ginInsertCleanup(), GinNewBuffer(), ginStepRight(), ginUpdateStats(), gistBufferingFindCorrectParent(), gistbufferinginserttuples(), gistdoinsert(), gistFindCorrectParent(), gistFindPath(), gistfixsplit(), gistGetMaxLevel(), gistkillitems(), gistNewBuffer(), gistProcessItup(), gistScanPage(), heap_abort_speculative(), heap_delete(), heap_fetch(), heap_finish_speculative(), heap_force_common(), heap_get_latest_tid(), heap_index_delete_tuples(), heap_inplace_update(), heap_lock_tuple(), heap_update(), initBloomState(), pg_visibility(), pgstatginindex_internal(), read_seq_tuple(), RelationGetBufferForTuple(), ReleaseAndReadBuffer(), revmap_get_buffer(), revmap_physical_extend(), scanGetCandidate(), scanPendingInsert(), shiftList(), spgdoinsert(), spgGetCache(), SpGistGetBuffer(), SpGistNewBuffer(), SpGistUpdateMetaPage(), and spgWalk().

◆ ReadBufferExtended()

Buffer ReadBufferExtended ( Relation  reln,
ForkNumber  forkNum,
BlockNumber  blockNum,
ReadBufferMode  mode,
BufferAccessStrategy  strategy 
)

Definition at line 782 of file bufmgr.c.

784 {
785  bool hit;
786  Buffer buf;
787 
788  /*
789  * Reject attempts to read non-local temporary relations; we would be
790  * likely to get wrong data since we have no visibility into the owning
791  * session's local buffers.
792  */
793  if (RELATION_IS_OTHER_TEMP(reln))
794  ereport(ERROR,
795  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
796  errmsg("cannot access temporary tables of other sessions")));
797 
798  /*
799  * Read the buffer, and update pgstat counters to reflect a cache hit or
800  * miss.
801  */
803  buf = ReadBuffer_common(RelationGetSmgr(reln), reln->rd_rel->relpersistence,
804  forkNum, blockNum, mode, strategy, &hit);
805  if (hit)
807  return buf;
808 }
#define pgstat_count_buffer_read(rel)
Definition: pgstat.h:635
#define pgstat_count_buffer_hit(rel)
Definition: pgstat.h:640

References buf, ereport, errcode(), errmsg(), ERROR, mode, pgstat_count_buffer_hit, pgstat_count_buffer_read, RelationData::rd_rel, ReadBuffer_common(), RELATION_IS_OTHER_TEMP, and RelationGetSmgr().

Referenced by _hash_getbuf_with_strategy(), _hash_getinitbuf(), _hash_getnewbuf(), autoprewarm_database_main(), blbulkdelete(), blgetbitmap(), BloomInitMetapage(), blvacuumcleanup(), brin_vacuum_scan(), bt_recheck_sibling_links(), btvacuumpage(), collect_corrupt_items(), collect_visibility_data(), count_nondeletable_pages(), fsm_readbuf(), get_raw_page_internal(), ginbulkdelete(), ginDeletePage(), ginScanToDelete(), ginvacuumcleanup(), ginVacuumPostingTree(), ginVacuumPostingTreeLeaves(), gistvacuum_delete_empty_pages(), gistvacuumpage(), hashbulkdelete(), heapam_scan_analyze_next_block(), heapgetpage(), lazy_scan_heap(), lazy_vacuum_heap_rel(), log_newpage_range(), palloc_btree_page(), pg_prewarm(), pgstat_btree_page(), pgstat_gist_page(), pgstat_heap(), pgstathashindex(), pgstatindex_impl(), ReadBuffer(), ReadBufferBI(), spgprocesspending(), spgvacuumpage(), statapprox_heap(), verify_heapam(), and vm_readbuf().

◆ ReadBufferWithoutRelcache()

Buffer ReadBufferWithoutRelcache ( RelFileLocator  rlocator,
ForkNumber  forkNum,
BlockNumber  blockNum,
ReadBufferMode  mode,
BufferAccessStrategy  strategy,
bool  permanent 
)

Definition at line 822 of file bufmgr.c.

825 {
826  bool hit;
827 
828  SMgrRelation smgr = smgropen(rlocator, InvalidBackendId);
829 
830  return ReadBuffer_common(smgr, permanent ? RELPERSISTENCE_PERMANENT :
831  RELPERSISTENCE_UNLOGGED, forkNum, blockNum,
832  mode, strategy, &hit);
833 }

References InvalidBackendId, mode, ReadBuffer_common(), and smgropen().

Referenced by RelationCopyStorageUsingBuffer(), ScanSourceDatabasePgClass(), and XLogReadBufferExtended().

◆ ReadRecentBuffer()

bool ReadRecentBuffer ( RelFileLocator  rlocator,
ForkNumber  forkNum,
BlockNumber  blockNum,
Buffer  recent_buffer 
)

Definition at line 659 of file bufmgr.c.

661 {
662  BufferDesc *bufHdr;
663  BufferTag tag;
664  uint32 buf_state;
665  bool have_private_ref;
666 
667  Assert(BufferIsValid(recent_buffer));
668 
671  InitBufferTag(&tag, &rlocator, forkNum, blockNum);
672 
673  if (BufferIsLocal(recent_buffer))
674  {
675  int b = -recent_buffer - 1;
676 
677  bufHdr = GetLocalBufferDescriptor(b);
678  buf_state = pg_atomic_read_u32(&bufHdr->state);
679 
680  /* Is it still valid and holding the right tag? */
681  if ((buf_state & BM_VALID) && BufferTagsEqual(&tag, &bufHdr->tag))
682  {
683  PinLocalBuffer(bufHdr, true);
684 
686 
687  return true;
688  }
689  }
690  else
691  {
692  bufHdr = GetBufferDescriptor(recent_buffer - 1);
693  have_private_ref = GetPrivateRefCount(recent_buffer) > 0;
694 
695  /*
696  * Do we already have this buffer pinned with a private reference? If
697  * so, it must be valid and it is safe to check the tag without
698  * locking. If not, we have to lock the header first and then check.
699  */
700  if (have_private_ref)
701  buf_state = pg_atomic_read_u32(&bufHdr->state);
702  else
703  buf_state = LockBufHdr(bufHdr);
704 
705  if ((buf_state & BM_VALID) && BufferTagsEqual(&tag, &bufHdr->tag))
706  {
707  /*
708  * It's now safe to pin the buffer. We can't pin first and ask
709  * questions later, because it might confuse code paths like
710  * InvalidateBuffer() if we pinned a random non-matching buffer.
711  */
712  if (have_private_ref)
713  PinBuffer(bufHdr, NULL); /* bump pin count */
714  else
715  PinBuffer_Locked(bufHdr); /* pin for first time */
716 
718 
719  return true;
720  }
721 
722  /* If we locked the header above, now unlock. */
723  if (!have_private_ref)
724  UnlockBufHdr(bufHdr, buf_state);
725  }
726 
727  return false;
728 }
static void InitBufferTag(BufferTag *tag, const RelFileLocator *rlocator, ForkNumber forkNum, BlockNumber blockNum)
static bool BufferTagsEqual(const BufferTag *tag1, const BufferTag *tag2)
static bool PinBuffer(BufferDesc *buf, BufferAccessStrategy strategy)
Definition: bufmgr.c:2311
int b
Definition: isn.c:70
bool PinLocalBuffer(BufferDesc *buf_hdr, bool adjust_usagecount)
Definition: localbuf.c:656
int64 local_blks_hit
Definition: instrument.h:30
int64 shared_blks_hit
Definition: instrument.h:26

References Assert(), b, BM_VALID, BufferIsLocal, BufferIsValid(), BufferTagsEqual(), CurrentResourceOwner, GetBufferDescriptor(), GetLocalBufferDescriptor(), GetPrivateRefCount(), InitBufferTag(), BufferUsage::local_blks_hit, LockBufHdr(), pg_atomic_read_u32(), pgBufferUsage, PinBuffer(), PinBuffer_Locked(), PinLocalBuffer(), ReservePrivateRefCountEntry(), ResourceOwnerEnlarge(), BufferUsage::shared_blks_hit, BufferDesc::state, BufferDesc::tag, and UnlockBufHdr().

Referenced by XLogReadBufferExtended().

◆ RelationGetNumberOfBlocksInFork()

BlockNumber RelationGetNumberOfBlocksInFork ( Relation  relation,
ForkNumber  forkNum 
)

Definition at line 3577 of file bufmgr.c.

3578 {
3579  if (RELKIND_HAS_TABLE_AM(relation->rd_rel->relkind))
3580  {
3581  /*
3582  * Not every table AM uses BLCKSZ wide fixed size blocks. Therefore
3583  * tableam returns the size in bytes - but for the purpose of this
3584  * routine, we want the number of blocks. Therefore divide, rounding
3585  * up.
3586  */
3587  uint64 szbytes;
3588 
3589  szbytes = table_relation_size(relation, forkNum);
3590 
3591  return (szbytes + (BLCKSZ - 1)) / BLCKSZ;
3592  }
3593  else if (RELKIND_HAS_STORAGE(relation->rd_rel->relkind))
3594  {
3595  return smgrnblocks(RelationGetSmgr(relation), forkNum);
3596  }
3597  else
3598  Assert(false);
3599 
3600  return 0; /* keep compiler quiet */
3601 }
static uint64 table_relation_size(Relation rel, ForkNumber forkNumber)
Definition: tableam.h:1865

References Assert(), RelationData::rd_rel, RelationGetSmgr(), smgrnblocks(), and table_relation_size().

Referenced by _hash_getnewbuf(), _hash_init(), autoprewarm_database_main(), get_raw_page_internal(), and pg_prewarm().

◆ ReleaseAndReadBuffer()

Buffer ReleaseAndReadBuffer ( Buffer  buffer,
Relation  relation,
BlockNumber  blockNum 
)

Definition at line 2253 of file bufmgr.c.

2256 {
2257  ForkNumber forkNum = MAIN_FORKNUM;
2258  BufferDesc *bufHdr;
2259 
2260  if (BufferIsValid(buffer))
2261  {
2262  Assert(BufferIsPinned(buffer));
2263  if (BufferIsLocal(buffer))
2264  {
2265  bufHdr = GetLocalBufferDescriptor(-buffer - 1);
2266  if (bufHdr->tag.blockNum == blockNum &&
2267  BufTagMatchesRelFileLocator(&bufHdr->tag, &relation->rd_locator) &&
2268  BufTagGetForkNum(&bufHdr->tag) == forkNum)
2269  return buffer;
2270  UnpinLocalBuffer(buffer);
2271  }
2272  else
2273  {
2274  bufHdr = GetBufferDescriptor(buffer - 1);
2275  /* we have pin, so it's ok to examine tag without spinlock */
2276  if (bufHdr->tag.blockNum == blockNum &&
2277  BufTagMatchesRelFileLocator(&bufHdr->tag, &relation->rd_locator) &&
2278  BufTagGetForkNum(&bufHdr->tag) == forkNum)
2279  return buffer;
2280  UnpinBuffer(bufHdr);
2281  }
2282  }
2283 
2284  return ReadBuffer(relation, blockNum);
2285 }
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:735
void UnpinLocalBuffer(Buffer buffer)
Definition: localbuf.c:682

References Assert(), buftag::blockNum, PrivateRefCountEntry::buffer, BufferIsLocal, BufferIsPinned, BufferIsValid(), BufTagGetForkNum(), BufTagMatchesRelFileLocator(), GetBufferDescriptor(), GetLocalBufferDescriptor(), MAIN_FORKNUM, RelationData::rd_locator, ReadBuffer(), BufferDesc::tag, UnpinBuffer(), and UnpinLocalBuffer().

Referenced by _bt_relandgetbuf(), ginFindLeafPage(), heapam_index_fetch_tuple(), and heapam_scan_bitmap_next_block().

◆ ReleaseBuffer()

void ReleaseBuffer ( Buffer  buffer)

Definition at line 4561 of file bufmgr.c.

4562 {
4563  if (!BufferIsValid(buffer))
4564  elog(ERROR, "bad buffer ID: %d", buffer);
4565 
4566  if (BufferIsLocal(buffer))
4567  UnpinLocalBuffer(buffer);
4568  else
4569  UnpinBuffer(GetBufferDescriptor(buffer - 1));
4570 }

References PrivateRefCountEntry::buffer, BufferIsLocal, BufferIsValid(), elog(), ERROR, GetBufferDescriptor(), UnpinBuffer(), and UnpinLocalBuffer().

Referenced by _bt_allocbuf(), _bt_drop_lock_and_maybe_pin(), _bt_pagedel(), _bt_relbuf(), _bt_search_insert(), _bt_unlink_halfdead_page(), _hash_dropbuf(), _hash_getbuf_with_condlock_cleanup(), autoprewarm_database_main(), blinsert(), BloomNewBuffer(), brin_getinsertbuffer(), brin_vacuum_scan(), bringetbitmap(), brinGetTupleForHeapBlock(), brininsert(), brinRevmapTerminate(), brinsummarize(), collect_corrupt_items(), collect_visibility_data(), entryLoadMoreItems(), ExecEndBitmapHeapScan(), ExecEndIndexOnlyScan(), ExecReScanBitmapHeapScan(), ExtendBufferedRelTo(), FreeBulkInsertState(), freeGinBtreeStack(), fsm_vacuum_page(), get_actual_variable_endpoint(), get_raw_page_internal(), GetRecordedFreeSpace(), ginDeletePage(), ginFindParents(), ginFinishSplit(), ginFreeScanKeys(), ginInsertCleanup(), GinNewBuffer(), ginScanToDelete(), gistdoinsert(), gistFindCorrectParent(), gistNewBuffer(), gistvacuum_delete_empty_pages(), heap_abort_speculative(), heap_delete(), heap_endscan(), heap_fetch(), heap_force_common(), heap_insert(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_rescan(), heap_update(), heap_xlog_delete(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_update(), heap_xlog_visible(), heapam_index_fetch_reset(), heapam_scan_sample_next_block(), heapam_tuple_lock(), heapgetpage(), heapgettup(), heapgettup_pagemode(), lazy_scan_heap(), lazy_vacuum_heap_rel(), pg_prewarm(), pg_visibility(), pg_visibility_map(), pg_visibility_map_summary(), pgstatindex_impl(), ReadBufferBI(), RelationAddBlocks(), RelationGetBufferForTuple(), ReleaseBulkInsertStatePin(), revmap_get_buffer(), spgdoinsert(), SpGistGetBuffer(), SpGistNewBuffer(), SpGistUpdateMetaPage(), statapprox_heap(), summarize_range(), terminate_brin_buildstate(), tts_buffer_heap_clear(), tts_buffer_heap_materialize(), tts_buffer_heap_store_tuple(), UnlockReleaseBuffer(), verify_heapam(), visibilitymap_count(), visibilitymap_get_status(), visibilitymap_pin(), and XLogReadBufferExtended().

◆ UnlockBuffers()

void UnlockBuffers ( void  )

Definition at line 4768 of file bufmgr.c.

4769 {
4771 
4772  if (buf)
4773  {
4774  uint32 buf_state;
4775 
4776  buf_state = LockBufHdr(buf);
4777 
4778  /*
4779  * Don't complain if flag bit not set; it could have been reset but we
4780  * got a cancel/die interrupt before getting the signal.
4781  */
4782  if ((buf_state & BM_PIN_COUNT_WAITER) != 0 &&
4783  buf->wait_backend_pgprocno == MyProcNumber)
4784  buf_state &= ~BM_PIN_COUNT_WAITER;
4785 
4786  UnlockBufHdr(buf, buf_state);
4787 
4788  PinCountWaitBuf = NULL;
4789  }
4790 }

References BM_PIN_COUNT_WAITER, buf, LockBufHdr(), MyProcNumber, PinCountWaitBuf, and UnlockBufHdr().

Referenced by AbortSubTransaction(), AbortTransaction(), AtProcExit_Buffers(), AutoVacLauncherMain(), BackgroundWriterMain(), CheckpointerMain(), and WalWriterMain().

◆ UnlockReleaseBuffer()

void UnlockReleaseBuffer ( Buffer  buffer)

Definition at line 4578 of file bufmgr.c.

4579 {
4580  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
4581  ReleaseBuffer(buffer);
4582 }

References PrivateRefCountEntry::buffer, BUFFER_LOCK_UNLOCK, LockBuffer(), and ReleaseBuffer().

Referenced by _bt_clear_incomplete_split(), _bt_restore_meta(), _hash_relbuf(), allocNewBuffer(), AlterSequence(), blbulkdelete(), blgetbitmap(), blinsert(), BloomInitMetapage(), blvacuumcleanup(), brin_doinsert(), brin_doupdate(), brin_evacuate_page(), brin_getinsertbuffer(), brin_xlog_createidx(), brin_xlog_desummarize_page(), brin_xlog_insert_update(), brin_xlog_revmap_extend(), brin_xlog_samepage_update(), brin_xlog_update(), brinbuild(), brinbuildempty(), brinGetStats(), brinRevmapDesummarizeRange(), bt_metap(), bt_multi_page_stats(), bt_page_items_internal(), bt_page_stats_internal(), bt_recheck_sibling_links(), btree_xlog_dedup(), btree_xlog_delete(), btree_xlog_insert(), btree_xlog_mark_page_halfdead(), btree_xlog_newroot(), btree_xlog_split(), btree_xlog_unlink_page(), btree_xlog_vacuum(), collect_corrupt_items(), collect_visibility_data(), count_nondeletable_pages(), createPostingTree(), do_setval(), doPickSplit(), entryLoadMoreItems(), fill_seq_fork_with_data(), flushCachedPage(), FreeSpaceMapPrepareTruncateRel(), fsm_search(), fsm_set_and_search(), generic_redo(), ginbuild(), ginbuildempty(), ginbulkdelete(), ginGetStats(), ginHeapTupleFastInsert(), ginInsertCleanup(), ginPlaceToPage(), ginRedoClearIncompleteSplit(), ginRedoCreatePTree(), ginRedoDeleteListPages(), ginRedoDeletePage(), ginRedoInsert(), ginRedoInsertListPage(), ginRedoSplit(), ginRedoUpdateMetapage(), ginRedoVacuumDataLeafPage(), ginRedoVacuumPage(), ginScanToDelete(), ginStepRight(), ginUpdateStats(), ginvacuumcleanup(), ginVacuumPostingTree(), ginVacuumPostingTreeLeaves(), gistbufferinginserttuples(), gistbuild(), gistbuildempty(), gistdoinsert(), gistFindCorrectParent(), gistFindPath(), gistGetMaxLevel(), gistinserttuples(), gistkillitems(), gistplacetopage(), gistProcessItup(), gistRedoClearFollowRight(), gistRedoDeleteRecord(), gistRedoPageDelete(), gistRedoPageSplitRecord(), gistRedoPageUpdateRecord(), gistScanPage(), gistvacuum_delete_empty_pages(), gistvacuumpage(), hash_xlog_add_ovfl_page(), hash_xlog_delete(), hash_xlog_init_bitmap_page(), hash_xlog_init_meta_page(), hash_xlog_insert(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_split_cleanup(), hash_xlog_split_complete(), hash_xlog_split_page(), hash_xlog_squeeze_page(), hash_xlog_update_meta_page(), hash_xlog_vacuum_one_page(), heap_delete(), heap_finish_speculative(), heap_force_common(), heap_get_latest_tid(), heap_index_delete_tuples(), heap_inplace_update(), heap_insert(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_update(), heap_xlog_confirm(), heap_xlog_delete(), heap_xlog_freeze_page(), heap_xlog_inplace(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_prune(), heap_xlog_update(), heap_xlog_vacuum(), heap_xlog_visible(), heapam_scan_analyze_next_tuple(), initBloomState(), lazy_scan_heap(), lazy_scan_new_or_empty(), lazy_vacuum_heap_rel(), log_newpage_range(), moveLeafs(), nextval_internal(), palloc_btree_page(), pg_sequence_last_value(), pg_visibility(), pgstat_gist_page(), pgstat_heap(), pgstatginindex_internal(), pgstathashindex(), RelationCopyStorageUsingBuffer(), RelationGetBufferForTuple(), ResetSequence(), revmap_physical_extend(), scanGetCandidate(), scanPendingInsert(), scanPostingTree(), ScanSourceDatabasePgClass(), seq_redo(), SequenceChangePersistence(), shiftList(), spgAddNodeAction(), spgbuild(), spgdoinsert(), spgGetCache(), SpGistGetBuffer(), SpGistUpdateMetaPage(), spgMatchNodeAction(), spgprocesspending(), spgRedoAddLeaf(), spgRedoAddNode(), spgRedoMoveLeafs(), spgRedoPickSplit(), spgRedoSplitTuple(), spgRedoVacuumLeaf(), spgRedoVacuumRedirect(), spgRedoVacuumRoot(), spgSplitNodeAction(), spgvacuumpage(), spgWalk(), statapprox_heap(), verify_heapam(), verifyBackupPageConsistency(), visibilitymap_prepare_truncate(), writeListPage(), xlog_redo(), and XLogRecordPageWithFreeSpace().

Variable Documentation

◆ backend_flush_after

PGDLLIMPORT int backend_flush_after
extern

Definition at line 162 of file bufmgr.c.

Referenced by InitBufferPool().

◆ bgwriter_flush_after

PGDLLIMPORT int bgwriter_flush_after
extern

Definition at line 161 of file bufmgr.c.

Referenced by BackgroundWriterMain().

◆ bgwriter_lru_maxpages

PGDLLIMPORT int bgwriter_lru_maxpages
extern

Definition at line 137 of file bufmgr.c.

Referenced by BgBufferSync().

◆ bgwriter_lru_multiplier

PGDLLIMPORT double bgwriter_lru_multiplier
extern

Definition at line 138 of file bufmgr.c.

Referenced by BgBufferSync().

◆ BufferBlocks

PGDLLIMPORT char* BufferBlocks
extern

Definition at line 22 of file buf_init.c.

Referenced by BufferGetBlock(), and InitBufferPool().

◆ checkpoint_flush_after

PGDLLIMPORT int checkpoint_flush_after
extern

Definition at line 160 of file bufmgr.c.

Referenced by BufferSync().

◆ effective_io_concurrency

PGDLLIMPORT int effective_io_concurrency
extern

Definition at line 147 of file bufmgr.c.

Referenced by get_tablespace_io_concurrency(), and tablespace_reloptions().

◆ LocalBufferBlockPointers

PGDLLIMPORT Block* LocalBufferBlockPointers
extern

Definition at line 46 of file localbuf.c.

Referenced by BufferGetBlock(), and InitLocalBuffers().

◆ LocalRefCount

◆ maintenance_io_concurrency

◆ NBuffers

◆ NLocBuffer

◆ track_io_timing

◆ zero_damaged_pages

PGDLLIMPORT bool zero_damaged_pages
extern

Definition at line 136 of file bufmgr.c.

Referenced by mdreadv(), and ReadBuffer_common().