PostgreSQL Source Code  git master
bufmgr.h File Reference
#include "storage/block.h"
#include "storage/buf.h"
#include "storage/bufpage.h"
#include "storage/relfilelocator.h"
#include "utils/relcache.h"
#include "utils/snapmgr.h"
Include dependency graph for bufmgr.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Data Structures

struct  PrefetchBufferResult
 
struct  BufferManagerRelation
 

Macros

#define BMR_REL(p_rel)   ((BufferManagerRelation){.rel = p_rel})
 
#define BMR_SMGR(p_smgr, p_relpersistence)   ((BufferManagerRelation){.smgr = p_smgr, .relpersistence = p_relpersistence})
 
#define DEFAULT_EFFECTIVE_IO_CONCURRENCY   0
 
#define DEFAULT_MAINTENANCE_IO_CONCURRENCY   0
 
#define MAX_IO_CONCURRENCY   1000
 
#define P_NEW   InvalidBlockNumber /* grow the file to get a new page */
 
#define BUFFER_LOCK_UNLOCK   0
 
#define BUFFER_LOCK_SHARE   1
 
#define BUFFER_LOCK_EXCLUSIVE   2
 
#define RelationGetNumberOfBlocks(reln)    RelationGetNumberOfBlocksInFork(reln, MAIN_FORKNUM)
 

Typedefs

typedef void * Block
 
typedef enum BufferAccessStrategyType BufferAccessStrategyType
 
typedef struct PrefetchBufferResult PrefetchBufferResult
 
typedef enum ExtendBufferedFlags ExtendBufferedFlags
 
typedef struct BufferManagerRelation BufferManagerRelation
 

Enumerations

enum  BufferAccessStrategyType { BAS_NORMAL , BAS_BULKREAD , BAS_BULKWRITE , BAS_VACUUM }
 
enum  ReadBufferMode {
  RBM_NORMAL , RBM_ZERO_AND_LOCK , RBM_ZERO_AND_CLEANUP_LOCK , RBM_ZERO_ON_ERROR ,
  RBM_NORMAL_NO_LOG
}
 
enum  ExtendBufferedFlags {
  EB_SKIP_EXTENSION_LOCK = (1 << 0) , EB_PERFORMING_RECOVERY = (1 << 1) , EB_CREATE_FORK_IF_NEEDED = (1 << 2) , EB_LOCK_FIRST = (1 << 3) ,
  EB_CLEAR_SIZE_CACHE = (1 << 4) , EB_LOCK_TARGET = (1 << 5)
}
 

Functions

PrefetchBufferResult PrefetchSharedBuffer (struct SMgrRelationData *smgr_reln, ForkNumber forkNum, BlockNumber blockNum)
 
PrefetchBufferResult PrefetchBuffer (Relation reln, ForkNumber forkNum, BlockNumber blockNum)
 
bool ReadRecentBuffer (RelFileLocator rlocator, ForkNumber forkNum, BlockNumber blockNum, Buffer recent_buffer)
 
Buffer ReadBuffer (Relation reln, BlockNumber blockNum)
 
Buffer ReadBufferExtended (Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
 
Buffer ReadBufferWithoutRelcache (RelFileLocator rlocator, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy, bool permanent)
 
void ReleaseBuffer (Buffer buffer)
 
void UnlockReleaseBuffer (Buffer buffer)
 
bool BufferIsExclusiveLocked (Buffer buffer)
 
bool BufferIsDirty (Buffer buffer)
 
void MarkBufferDirty (Buffer buffer)
 
void IncrBufferRefCount (Buffer buffer)
 
void CheckBufferIsPinnedOnce (Buffer buffer)
 
Buffer ReleaseAndReadBuffer (Buffer buffer, Relation relation, BlockNumber blockNum)
 
Buffer ExtendBufferedRel (BufferManagerRelation bmr, ForkNumber forkNum, BufferAccessStrategy strategy, uint32 flags)
 
BlockNumber ExtendBufferedRelBy (BufferManagerRelation bmr, ForkNumber fork, BufferAccessStrategy strategy, uint32 flags, uint32 extend_by, Buffer *buffers, uint32 *extended_by)
 
Buffer ExtendBufferedRelTo (BufferManagerRelation bmr, ForkNumber fork, BufferAccessStrategy strategy, uint32 flags, BlockNumber extend_to, ReadBufferMode mode)
 
void InitBufferPoolAccess (void)
 
void AtEOXact_Buffers (bool isCommit)
 
char * DebugPrintBufferRefcount (Buffer buffer)
 
void CheckPointBuffers (int flags)
 
BlockNumber BufferGetBlockNumber (Buffer buffer)
 
BlockNumber RelationGetNumberOfBlocksInFork (Relation relation, ForkNumber forkNum)
 
void FlushOneBuffer (Buffer buffer)
 
void FlushRelationBuffers (Relation rel)
 
void FlushRelationsAllBuffers (struct SMgrRelationData **smgrs, int nrels)
 
void CreateAndCopyRelationData (RelFileLocator src_rlocator, RelFileLocator dst_rlocator, bool permanent)
 
void FlushDatabaseBuffers (Oid dbid)
 
void DropRelationBuffers (struct SMgrRelationData *smgr_reln, ForkNumber *forkNum, int nforks, BlockNumber *firstDelBlock)
 
void DropRelationsAllBuffers (struct SMgrRelationData **smgr_reln, int nlocators)
 
void DropDatabaseBuffers (Oid dbid)
 
bool BufferIsPermanent (Buffer buffer)
 
XLogRecPtr BufferGetLSNAtomic (Buffer buffer)
 
void BufferGetTag (Buffer buffer, RelFileLocator *rlocator, ForkNumber *forknum, BlockNumber *blknum)
 
void MarkBufferDirtyHint (Buffer buffer, bool buffer_std)
 
void UnlockBuffers (void)
 
void LockBuffer (Buffer buffer, int mode)
 
bool ConditionalLockBuffer (Buffer buffer)
 
void LockBufferForCleanup (Buffer buffer)
 
bool ConditionalLockBufferForCleanup (Buffer buffer)
 
bool IsBufferCleanupOK (Buffer buffer)
 
bool HoldingBufferPinThatDelaysRecovery (void)
 
bool BgBufferSync (struct WritebackContext *wb_context)
 
void InitBufferPool (void)
 
Size BufferShmemSize (void)
 
void AtProcExit_LocalBuffers (void)
 
BufferAccessStrategy GetAccessStrategy (BufferAccessStrategyType btype)
 
BufferAccessStrategy GetAccessStrategyWithSize (BufferAccessStrategyType btype, int ring_size_kb)
 
int GetAccessStrategyBufferCount (BufferAccessStrategy strategy)
 
void FreeAccessStrategy (BufferAccessStrategy strategy)
 
static bool BufferIsValid (Buffer bufnum)
 
static Block BufferGetBlock (Buffer buffer)
 
static Size BufferGetPageSize (Buffer buffer)
 
static Page BufferGetPage (Buffer buffer)
 

Variables

PGDLLIMPORT int NBuffers
 
PGDLLIMPORT bool zero_damaged_pages
 
PGDLLIMPORT int bgwriter_lru_maxpages
 
PGDLLIMPORT double bgwriter_lru_multiplier
 
PGDLLIMPORT bool track_io_timing
 
PGDLLIMPORT int effective_io_concurrency
 
PGDLLIMPORT int maintenance_io_concurrency
 
PGDLLIMPORT int checkpoint_flush_after
 
PGDLLIMPORT int backend_flush_after
 
PGDLLIMPORT int bgwriter_flush_after
 
PGDLLIMPORT char * BufferBlocks
 
PGDLLIMPORT int NLocBuffer
 
PGDLLIMPORT BlockLocalBufferBlockPointers
 
PGDLLIMPORT int32LocalRefCount
 

Macro Definition Documentation

◆ BMR_REL

#define BMR_REL (   p_rel)    ((BufferManagerRelation){.rel = p_rel})

Definition at line 106 of file bufmgr.h.

◆ BMR_SMGR

#define BMR_SMGR (   p_smgr,
  p_relpersistence 
)    ((BufferManagerRelation){.smgr = p_smgr, .relpersistence = p_relpersistence})

Definition at line 107 of file bufmgr.h.

◆ BUFFER_LOCK_EXCLUSIVE

#define BUFFER_LOCK_EXCLUSIVE   2

Definition at line 159 of file bufmgr.h.

◆ BUFFER_LOCK_SHARE

#define BUFFER_LOCK_SHARE   1

Definition at line 158 of file bufmgr.h.

◆ BUFFER_LOCK_UNLOCK

#define BUFFER_LOCK_UNLOCK   0

Definition at line 157 of file bufmgr.h.

◆ DEFAULT_EFFECTIVE_IO_CONCURRENCY

#define DEFAULT_EFFECTIVE_IO_CONCURRENCY   0

Definition at line 130 of file bufmgr.h.

◆ DEFAULT_MAINTENANCE_IO_CONCURRENCY

#define DEFAULT_MAINTENANCE_IO_CONCURRENCY   0

Definition at line 131 of file bufmgr.h.

◆ MAX_IO_CONCURRENCY

#define MAX_IO_CONCURRENCY   1000

Definition at line 149 of file bufmgr.h.

◆ P_NEW

#define P_NEW   InvalidBlockNumber /* grow the file to get a new page */

Definition at line 152 of file bufmgr.h.

◆ RelationGetNumberOfBlocks

#define RelationGetNumberOfBlocks (   reln)     RelationGetNumberOfBlocksInFork(reln, MAIN_FORKNUM)

Definition at line 229 of file bufmgr.h.

Typedef Documentation

◆ Block

typedef void* Block

Definition at line 24 of file bufmgr.h.

◆ BufferAccessStrategyType

◆ BufferManagerRelation

◆ ExtendBufferedFlags

◆ PrefetchBufferResult

Enumeration Type Documentation

◆ BufferAccessStrategyType

Enumerator
BAS_NORMAL 
BAS_BULKREAD 
BAS_BULKWRITE 
BAS_VACUUM 

Definition at line 32 of file bufmgr.h.

33 {
34  BAS_NORMAL, /* Normal random access */
35  BAS_BULKREAD, /* Large read-only scan (hint bit updates are
36  * ok) */
37  BAS_BULKWRITE, /* Large multi-block write (e.g. COPY IN) */
38  BAS_VACUUM, /* VACUUM */
BufferAccessStrategyType
Definition: bufmgr.h:33
@ BAS_BULKREAD
Definition: bufmgr.h:35
@ BAS_NORMAL
Definition: bufmgr.h:34
@ BAS_VACUUM
Definition: bufmgr.h:38
@ BAS_BULKWRITE
Definition: bufmgr.h:37

◆ ExtendBufferedFlags

Enumerator
EB_SKIP_EXTENSION_LOCK 
EB_PERFORMING_RECOVERY 
EB_CREATE_FORK_IF_NEEDED 
EB_LOCK_FIRST 
EB_CLEAR_SIZE_CACHE 
EB_LOCK_TARGET 

Definition at line 66 of file bufmgr.h.

67 {
68  /*
69  * Don't acquire extension lock. This is safe only if the relation isn't
70  * shared, an access exclusive lock is held or if this is the startup
71  * process.
72  */
73  EB_SKIP_EXTENSION_LOCK = (1 << 0),
74 
75  /* Is this extension part of recovery? */
76  EB_PERFORMING_RECOVERY = (1 << 1),
77 
78  /*
79  * Should the fork be created if it does not currently exist? This likely
80  * only ever makes sense for relation forks.
81  */
82  EB_CREATE_FORK_IF_NEEDED = (1 << 2),
83 
84  /* Should the first (possibly only) return buffer be returned locked? */
85  EB_LOCK_FIRST = (1 << 3),
86 
87  /* Should the smgr size cache be cleared? */
88  EB_CLEAR_SIZE_CACHE = (1 << 4),
89 
90  /* internal flags follow */
91  EB_LOCK_TARGET = (1 << 5),
ExtendBufferedFlags
Definition: bufmgr.h:67
@ EB_LOCK_TARGET
Definition: bufmgr.h:91
@ EB_CLEAR_SIZE_CACHE
Definition: bufmgr.h:88
@ EB_PERFORMING_RECOVERY
Definition: bufmgr.h:76
@ EB_CREATE_FORK_IF_NEEDED
Definition: bufmgr.h:82
@ EB_SKIP_EXTENSION_LOCK
Definition: bufmgr.h:73
@ EB_LOCK_FIRST
Definition: bufmgr.h:85

◆ ReadBufferMode

Enumerator
RBM_NORMAL 
RBM_ZERO_AND_LOCK 
RBM_ZERO_AND_CLEANUP_LOCK 
RBM_ZERO_ON_ERROR 
RBM_NORMAL_NO_LOG 

Definition at line 42 of file bufmgr.h.

43 {
44  RBM_NORMAL, /* Normal read */
45  RBM_ZERO_AND_LOCK, /* Don't read from disk, caller will
46  * initialize. Also locks the page. */
47  RBM_ZERO_AND_CLEANUP_LOCK, /* Like RBM_ZERO_AND_LOCK, but locks the page
48  * in "cleanup" mode */
49  RBM_ZERO_ON_ERROR, /* Read, but return an all-zeros page on error */
50  RBM_NORMAL_NO_LOG, /* Don't log page as invalid during WAL
51  * replay; otherwise same as RBM_NORMAL */
ReadBufferMode
Definition: bufmgr.h:43
@ RBM_ZERO_ON_ERROR
Definition: bufmgr.h:49
@ RBM_ZERO_AND_CLEANUP_LOCK
Definition: bufmgr.h:47
@ RBM_ZERO_AND_LOCK
Definition: bufmgr.h:45
@ RBM_NORMAL
Definition: bufmgr.h:44
@ RBM_NORMAL_NO_LOG
Definition: bufmgr.h:50

Function Documentation

◆ AtEOXact_Buffers()

void AtEOXact_Buffers ( bool  isCommit)

Definition at line 3212 of file bufmgr.c.

3213 {
3215 
3216  AtEOXact_LocalBuffers(isCommit);
3217 
3219 }
static void CheckForBufferLeaks(void)
Definition: bufmgr.c:3272
static int32 PrivateRefCountOverflowed
Definition: bufmgr.c:198
Assert(fmt[strlen(fmt) - 1] !='\n')
void AtEOXact_LocalBuffers(bool isCommit)
Definition: localbuf.c:819

References Assert(), AtEOXact_LocalBuffers(), CheckForBufferLeaks(), and PrivateRefCountOverflowed.

Referenced by AbortTransaction(), BackgroundWriterMain(), CheckpointerMain(), CommitTransaction(), PrepareTransaction(), and WalWriterMain().

◆ AtProcExit_LocalBuffers()

void AtProcExit_LocalBuffers ( void  )

Definition at line 830 of file localbuf.c.

831 {
832  /*
833  * We shouldn't be holding any remaining pins; if we are, and assertions
834  * aren't enabled, we'll fail later in DropRelationBuffers while trying to
835  * drop the temp rels.
836  */
838 }
static void CheckForLocalBufferLeaks(void)
Definition: localbuf.c:786

References CheckForLocalBufferLeaks().

Referenced by AtProcExit_Buffers().

◆ BgBufferSync()

bool BgBufferSync ( struct WritebackContext wb_context)

Definition at line 2841 of file bufmgr.c.

2842 {
2843  /* info obtained from freelist.c */
2844  int strategy_buf_id;
2845  uint32 strategy_passes;
2846  uint32 recent_alloc;
2847 
2848  /*
2849  * Information saved between calls so we can determine the strategy
2850  * point's advance rate and avoid scanning already-cleaned buffers.
2851  */
2852  static bool saved_info_valid = false;
2853  static int prev_strategy_buf_id;
2854  static uint32 prev_strategy_passes;
2855  static int next_to_clean;
2856  static uint32 next_passes;
2857 
2858  /* Moving averages of allocation rate and clean-buffer density */
2859  static float smoothed_alloc = 0;
2860  static float smoothed_density = 10.0;
2861 
2862  /* Potentially these could be tunables, but for now, not */
2863  float smoothing_samples = 16;
2864  float scan_whole_pool_milliseconds = 120000.0;
2865 
2866  /* Used to compute how far we scan ahead */
2867  long strategy_delta;
2868  int bufs_to_lap;
2869  int bufs_ahead;
2870  float scans_per_alloc;
2871  int reusable_buffers_est;
2872  int upcoming_alloc_est;
2873  int min_scan_buffers;
2874 
2875  /* Variables for the scanning loop proper */
2876  int num_to_scan;
2877  int num_written;
2878  int reusable_buffers;
2879 
2880  /* Variables for final smoothed_density update */
2881  long new_strategy_delta;
2882  uint32 new_recent_alloc;
2883 
2884  /*
2885  * Find out where the freelist clock sweep currently is, and how many
2886  * buffer allocations have happened since our last call.
2887  */
2888  strategy_buf_id = StrategySyncStart(&strategy_passes, &recent_alloc);
2889 
2890  /* Report buffer alloc counts to pgstat */
2891  PendingBgWriterStats.buf_alloc += recent_alloc;
2892 
2893  /*
2894  * If we're not running the LRU scan, just stop after doing the stats
2895  * stuff. We mark the saved state invalid so that we can recover sanely
2896  * if LRU scan is turned back on later.
2897  */
2898  if (bgwriter_lru_maxpages <= 0)
2899  {
2900  saved_info_valid = false;
2901  return true;
2902  }
2903 
2904  /*
2905  * Compute strategy_delta = how many buffers have been scanned by the
2906  * clock sweep since last time. If first time through, assume none. Then
2907  * see if we are still ahead of the clock sweep, and if so, how many
2908  * buffers we could scan before we'd catch up with it and "lap" it. Note:
2909  * weird-looking coding of xxx_passes comparisons are to avoid bogus
2910  * behavior when the passes counts wrap around.
2911  */
2912  if (saved_info_valid)
2913  {
2914  int32 passes_delta = strategy_passes - prev_strategy_passes;
2915 
2916  strategy_delta = strategy_buf_id - prev_strategy_buf_id;
2917  strategy_delta += (long) passes_delta * NBuffers;
2918 
2919  Assert(strategy_delta >= 0);
2920 
2921  if ((int32) (next_passes - strategy_passes) > 0)
2922  {
2923  /* we're one pass ahead of the strategy point */
2924  bufs_to_lap = strategy_buf_id - next_to_clean;
2925 #ifdef BGW_DEBUG
2926  elog(DEBUG2, "bgwriter ahead: bgw %u-%u strategy %u-%u delta=%ld lap=%d",
2927  next_passes, next_to_clean,
2928  strategy_passes, strategy_buf_id,
2929  strategy_delta, bufs_to_lap);
2930 #endif
2931  }
2932  else if (next_passes == strategy_passes &&
2933  next_to_clean >= strategy_buf_id)
2934  {
2935  /* on same pass, but ahead or at least not behind */
2936  bufs_to_lap = NBuffers - (next_to_clean - strategy_buf_id);
2937 #ifdef BGW_DEBUG
2938  elog(DEBUG2, "bgwriter ahead: bgw %u-%u strategy %u-%u delta=%ld lap=%d",
2939  next_passes, next_to_clean,
2940  strategy_passes, strategy_buf_id,
2941  strategy_delta, bufs_to_lap);
2942 #endif
2943  }
2944  else
2945  {
2946  /*
2947  * We're behind, so skip forward to the strategy point and start
2948  * cleaning from there.
2949  */
2950 #ifdef BGW_DEBUG
2951  elog(DEBUG2, "bgwriter behind: bgw %u-%u strategy %u-%u delta=%ld",
2952  next_passes, next_to_clean,
2953  strategy_passes, strategy_buf_id,
2954  strategy_delta);
2955 #endif
2956  next_to_clean = strategy_buf_id;
2957  next_passes = strategy_passes;
2958  bufs_to_lap = NBuffers;
2959  }
2960  }
2961  else
2962  {
2963  /*
2964  * Initializing at startup or after LRU scanning had been off. Always
2965  * start at the strategy point.
2966  */
2967 #ifdef BGW_DEBUG
2968  elog(DEBUG2, "bgwriter initializing: strategy %u-%u",
2969  strategy_passes, strategy_buf_id);
2970 #endif
2971  strategy_delta = 0;
2972  next_to_clean = strategy_buf_id;
2973  next_passes = strategy_passes;
2974  bufs_to_lap = NBuffers;
2975  }
2976 
2977  /* Update saved info for next time */
2978  prev_strategy_buf_id = strategy_buf_id;
2979  prev_strategy_passes = strategy_passes;
2980  saved_info_valid = true;
2981 
2982  /*
2983  * Compute how many buffers had to be scanned for each new allocation, ie,
2984  * 1/density of reusable buffers, and track a moving average of that.
2985  *
2986  * If the strategy point didn't move, we don't update the density estimate
2987  */
2988  if (strategy_delta > 0 && recent_alloc > 0)
2989  {
2990  scans_per_alloc = (float) strategy_delta / (float) recent_alloc;
2991  smoothed_density += (scans_per_alloc - smoothed_density) /
2992  smoothing_samples;
2993  }
2994 
2995  /*
2996  * Estimate how many reusable buffers there are between the current
2997  * strategy point and where we've scanned ahead to, based on the smoothed
2998  * density estimate.
2999  */
3000  bufs_ahead = NBuffers - bufs_to_lap;
3001  reusable_buffers_est = (float) bufs_ahead / smoothed_density;
3002 
3003  /*
3004  * Track a moving average of recent buffer allocations. Here, rather than
3005  * a true average we want a fast-attack, slow-decline behavior: we
3006  * immediately follow any increase.
3007  */
3008  if (smoothed_alloc <= (float) recent_alloc)
3009  smoothed_alloc = recent_alloc;
3010  else
3011  smoothed_alloc += ((float) recent_alloc - smoothed_alloc) /
3012  smoothing_samples;
3013 
3014  /* Scale the estimate by a GUC to allow more aggressive tuning. */
3015  upcoming_alloc_est = (int) (smoothed_alloc * bgwriter_lru_multiplier);
3016 
3017  /*
3018  * If recent_alloc remains at zero for many cycles, smoothed_alloc will
3019  * eventually underflow to zero, and the underflows produce annoying
3020  * kernel warnings on some platforms. Once upcoming_alloc_est has gone to
3021  * zero, there's no point in tracking smaller and smaller values of
3022  * smoothed_alloc, so just reset it to exactly zero to avoid this
3023  * syndrome. It will pop back up as soon as recent_alloc increases.
3024  */
3025  if (upcoming_alloc_est == 0)
3026  smoothed_alloc = 0;
3027 
3028  /*
3029  * Even in cases where there's been little or no buffer allocation
3030  * activity, we want to make a small amount of progress through the buffer
3031  * cache so that as many reusable buffers as possible are clean after an
3032  * idle period.
3033  *
3034  * (scan_whole_pool_milliseconds / BgWriterDelay) computes how many times
3035  * the BGW will be called during the scan_whole_pool time; slice the
3036  * buffer pool into that many sections.
3037  */
3038  min_scan_buffers = (int) (NBuffers / (scan_whole_pool_milliseconds / BgWriterDelay));
3039 
3040  if (upcoming_alloc_est < (min_scan_buffers + reusable_buffers_est))
3041  {
3042 #ifdef BGW_DEBUG
3043  elog(DEBUG2, "bgwriter: alloc_est=%d too small, using min=%d + reusable_est=%d",
3044  upcoming_alloc_est, min_scan_buffers, reusable_buffers_est);
3045 #endif
3046  upcoming_alloc_est = min_scan_buffers + reusable_buffers_est;
3047  }
3048 
3049  /*
3050  * Now write out dirty reusable buffers, working forward from the
3051  * next_to_clean point, until we have lapped the strategy scan, or cleaned
3052  * enough buffers to match our estimate of the next cycle's allocation
3053  * requirements, or hit the bgwriter_lru_maxpages limit.
3054  */
3055 
3056  num_to_scan = bufs_to_lap;
3057  num_written = 0;
3058  reusable_buffers = reusable_buffers_est;
3059 
3060  /* Execute the LRU scan */
3061  while (num_to_scan > 0 && reusable_buffers < upcoming_alloc_est)
3062  {
3063  int sync_state = SyncOneBuffer(next_to_clean, true,
3064  wb_context);
3065 
3066  if (++next_to_clean >= NBuffers)
3067  {
3068  next_to_clean = 0;
3069  next_passes++;
3070  }
3071  num_to_scan--;
3072 
3073  if (sync_state & BUF_WRITTEN)
3074  {
3075  reusable_buffers++;
3076  if (++num_written >= bgwriter_lru_maxpages)
3077  {
3079  break;
3080  }
3081  }
3082  else if (sync_state & BUF_REUSABLE)
3083  reusable_buffers++;
3084  }
3085 
3086  PendingBgWriterStats.buf_written_clean += num_written;
3087 
3088 #ifdef BGW_DEBUG
3089  elog(DEBUG1, "bgwriter: recent_alloc=%u smoothed=%.2f delta=%ld ahead=%d density=%.2f reusable_est=%d upcoming_est=%d scanned=%d wrote=%d reusable=%d",
3090  recent_alloc, smoothed_alloc, strategy_delta, bufs_ahead,
3091  smoothed_density, reusable_buffers_est, upcoming_alloc_est,
3092  bufs_to_lap - num_to_scan,
3093  num_written,
3094  reusable_buffers - reusable_buffers_est);
3095 #endif
3096 
3097  /*
3098  * Consider the above scan as being like a new allocation scan.
3099  * Characterize its density and update the smoothed one based on it. This
3100  * effectively halves the moving average period in cases where both the
3101  * strategy and the background writer are doing some useful scanning,
3102  * which is helpful because a long memory isn't as desirable on the
3103  * density estimates.
3104  */
3105  new_strategy_delta = bufs_to_lap - num_to_scan;
3106  new_recent_alloc = reusable_buffers - reusable_buffers_est;
3107  if (new_strategy_delta > 0 && new_recent_alloc > 0)
3108  {
3109  scans_per_alloc = (float) new_strategy_delta / (float) new_recent_alloc;
3110  smoothed_density += (scans_per_alloc - smoothed_density) /
3111  smoothing_samples;
3112 
3113 #ifdef BGW_DEBUG
3114  elog(DEBUG2, "bgwriter: cleaner density alloc=%u scan=%ld density=%.2f new smoothed=%.2f",
3115  new_recent_alloc, new_strategy_delta,
3116  scans_per_alloc, smoothed_density);
3117 #endif
3118  }
3119 
3120  /* Return true if OK to hibernate */
3121  return (bufs_to_lap == 0 && recent_alloc == 0);
3122 }
int BgWriterDelay
Definition: bgwriter.c:57
#define BUF_REUSABLE
Definition: bufmgr.c:72
double bgwriter_lru_multiplier
Definition: bufmgr.c:137
static int SyncOneBuffer(int buf_id, bool skip_recently_used, WritebackContext *wb_context)
Definition: bufmgr.c:3139
int bgwriter_lru_maxpages
Definition: bufmgr.c:136
#define BUF_WRITTEN
Definition: bufmgr.c:71
unsigned int uint32
Definition: c.h:493
signed int int32
Definition: c.h:481
#define DEBUG2
Definition: elog.h:29
#define DEBUG1
Definition: elog.h:30
#define elog(elevel,...)
Definition: elog.h:224
int StrategySyncStart(uint32 *complete_passes, uint32 *num_buf_alloc)
Definition: freelist.c:394
int NBuffers
Definition: globals.c:139
PgStat_BgWriterStats PendingBgWriterStats
PgStat_Counter buf_written_clean
Definition: pgstat.h:255
PgStat_Counter maxwritten_clean
Definition: pgstat.h:256
PgStat_Counter buf_alloc
Definition: pgstat.h:257

References Assert(), bgwriter_lru_maxpages, bgwriter_lru_multiplier, BgWriterDelay, PgStat_BgWriterStats::buf_alloc, BUF_REUSABLE, BUF_WRITTEN, PgStat_BgWriterStats::buf_written_clean, DEBUG1, DEBUG2, elog, PgStat_BgWriterStats::maxwritten_clean, NBuffers, PendingBgWriterStats, StrategySyncStart(), and SyncOneBuffer().

Referenced by BackgroundWriterMain().

◆ BufferGetBlock()

static Block BufferGetBlock ( Buffer  buffer)
inlinestatic

Definition at line 317 of file bufmgr.h.

318 {
319  Assert(BufferIsValid(buffer));
320 
321  if (BufferIsLocal(buffer))
322  return LocalBufferBlockPointers[-buffer - 1];
323  else
324  return (Block) (BufferBlocks + ((Size) (buffer - 1)) * BLCKSZ);
325 }
#define BufferIsLocal(buffer)
Definition: buf.h:37
PGDLLIMPORT Block * LocalBufferBlockPointers
Definition: localbuf.c:45
void * Block
Definition: bufmgr.h:24
PGDLLIMPORT char * BufferBlocks
Definition: buf_init.c:22
static bool BufferIsValid(Buffer bufnum)
Definition: bufmgr.h:301
size_t Size
Definition: c.h:592

References Assert(), BufferBlocks, BufferIsLocal, BufferIsValid(), and LocalBufferBlockPointers.

Referenced by BufferGetPage(), and XLogSaveBufferForHint().

◆ BufferGetBlockNumber()

BlockNumber BufferGetBlockNumber ( Buffer  buffer)

Definition at line 3377 of file bufmgr.c.

3378 {
3379  BufferDesc *bufHdr;
3380 
3381  Assert(BufferIsPinned(buffer));
3382 
3383  if (BufferIsLocal(buffer))
3384  bufHdr = GetLocalBufferDescriptor(-buffer - 1);
3385  else
3386  bufHdr = GetBufferDescriptor(buffer - 1);
3387 
3388  /* pinned, so OK to read tag without spinlock */
3389  return bufHdr->tag.blockNum;
3390 }
static BufferDesc * GetLocalBufferDescriptor(uint32 id)
static BufferDesc * GetBufferDescriptor(uint32 id)
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:462
BufferTag tag
BlockNumber blockNum
Definition: buf_internals.h:98

References Assert(), buftag::blockNum, PrivateRefCountEntry::buffer, BufferIsLocal, BufferIsPinned, GetBufferDescriptor(), GetLocalBufferDescriptor(), and BufferDesc::tag.

Referenced by _bt_binsrch_insert(), _bt_bottomupdel_pass(), _bt_check_unique(), _bt_checkpage(), _bt_delitems_delete(), _bt_delitems_vacuum(), _bt_doinsert(), _bt_endpoint(), _bt_finish_split(), _bt_first(), _bt_getroot(), _bt_insert_parent(), _bt_insertonpg(), _bt_mark_page_halfdead(), _bt_moveright(), _bt_newlevel(), _bt_pagedel(), _bt_readnextpage(), _bt_readpage(), _bt_restore_meta(), _bt_search(), _bt_simpledel_pass(), _bt_split(), _bt_unlink_halfdead_page(), _bt_walk_left(), _hash_addovflpage(), _hash_checkpage(), _hash_doinsert(), _hash_first(), _hash_freeovflpage(), _hash_getnewbuf(), _hash_readnext(), _hash_readpage(), _hash_splitbucket(), allocNewBuffer(), blinsert(), BloomInitMetapage(), brin_doinsert(), brin_doupdate(), brin_getinsertbuffer(), brin_initialize_empty_new_buffer(), brin_page_cleanup(), brin_xlog_insert_update(), brinbuild(), brinGetTupleForHeapBlock(), collectMatchBitmap(), createPostingTree(), dataBeginPlaceToPageLeaf(), dataPrepareDownlink(), doPickSplit(), entryPrepareDownlink(), fill_seq_fork_with_data(), ginEntryInsert(), ginFindParents(), ginFinishSplit(), ginPlaceToPage(), ginRedoDeleteListPages(), ginRedoUpdateMetapage(), ginScanToDelete(), gistbufferinginserttuples(), gistbuild(), gistcheckpage(), gistdeletepage(), gistformdownlink(), gistinserttuples(), gistMemorizeAllDownlinks(), gistplacetopage(), gistRelocateBuildBuffersOnSplit(), gistScanPage(), hash_xlog_add_ovfl_page(), heap_delete(), heap_hot_search_buffer(), heap_insert(), heap_multi_insert(), heap_page_is_all_visible(), heap_page_prune(), heap_prune_chain(), heap_update(), heap_xlog_confirm(), heap_xlog_lock(), index_compute_xid_horizon_for_tuples(), lazy_scan_noprune(), lazy_scan_prune(), makeSublist(), moveLeafs(), moveRightIfItNeeded(), pgstathashindex(), ReadBufferBI(), RelationAddBlocks(), RelationGetBufferForTuple(), RelationPutHeapTuple(), revmap_get_buffer(), revmap_physical_extend(), ScanSourceDatabasePgClassPage(), spgAddNodeAction(), spgbuild(), spgdoinsert(), SpGistSetLastUsedPage(), spgSplitNodeAction(), spgWalk(), startScanEntry(), terminate_brin_buildstate(), vacuumLeafPage(), visibilitymap_clear(), visibilitymap_get_status(), visibilitymap_pin(), visibilitymap_pin_ok(), and visibilitymap_set().

◆ BufferGetLSNAtomic()

XLogRecPtr BufferGetLSNAtomic ( Buffer  buffer)

Definition at line 3638 of file bufmgr.c.

3639 {
3640  BufferDesc *bufHdr = GetBufferDescriptor(buffer - 1);
3641  char *page = BufferGetPage(buffer);
3642  XLogRecPtr lsn;
3643  uint32 buf_state;
3644 
3645  /*
3646  * If we don't need locking for correctness, fastpath out.
3647  */
3648  if (!XLogHintBitIsNeeded() || BufferIsLocal(buffer))
3649  return PageGetLSN(page);
3650 
3651  /* Make sure we've got a real buffer, and that we hold a pin on it. */
3652  Assert(BufferIsValid(buffer));
3653  Assert(BufferIsPinned(buffer));
3654 
3655  buf_state = LockBufHdr(bufHdr);
3656  lsn = PageGetLSN(page);
3657  UnlockBufHdr(bufHdr, buf_state);
3658 
3659  return lsn;
3660 }
static void UnlockBufHdr(BufferDesc *desc, uint32 buf_state)
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:5390
static Page BufferGetPage(Buffer buffer)
Definition: bufmgr.h:350
static XLogRecPtr PageGetLSN(Page page)
Definition: bufpage.h:383
#define XLogHintBitIsNeeded()
Definition: xlog.h:118
uint64 XLogRecPtr
Definition: xlogdefs.h:21

References Assert(), PrivateRefCountEntry::buffer, BufferGetPage(), BufferIsLocal, BufferIsPinned, BufferIsValid(), GetBufferDescriptor(), LockBufHdr(), PageGetLSN(), UnlockBufHdr(), and XLogHintBitIsNeeded.

Referenced by _bt_killitems(), _bt_readpage(), gistdoinsert(), gistFindPath(), gistkillitems(), gistScanPage(), SetHintBits(), and XLogSaveBufferForHint().

◆ BufferGetPage()

static Page BufferGetPage ( Buffer  buffer)
inlinestatic

Definition at line 350 of file bufmgr.h.

351 {
352  return (Page) BufferGetBlock(buffer);
353 }
static Block BufferGetBlock(Buffer buffer)
Definition: bufmgr.h:317
Pointer Page
Definition: bufpage.h:78

References BufferGetBlock().

Referenced by _bt_allocbuf(), _bt_binsrch(), _bt_binsrch_insert(), _bt_bottomupdel_pass(), _bt_check_unique(), _bt_checkpage(), _bt_clear_incomplete_split(), _bt_conditionallockbuf(), _bt_dedup_pass(), _bt_delete_or_dedup_one_page(), _bt_delitems_delete(), _bt_delitems_delete_check(), _bt_delitems_vacuum(), _bt_endpoint(), _bt_findinsertloc(), _bt_finish_split(), _bt_get_endpoint(), _bt_getmeta(), _bt_getroot(), _bt_getstackbuf(), _bt_gettrueroot(), _bt_insert_parent(), _bt_insertonpg(), _bt_killitems(), _bt_leftsib_splitflag(), _bt_lock_subtree_parent(), _bt_lockbuf(), _bt_mark_page_halfdead(), _bt_moveright(), _bt_newlevel(), _bt_pagedel(), _bt_readnextpage(), _bt_readpage(), _bt_restore_meta(), _bt_rightsib_halfdeadflag(), _bt_search(), _bt_search_insert(), _bt_set_cleanup_info(), _bt_simpledel_pass(), _bt_split(), _bt_stepright(), _bt_unlink_halfdead_page(), _bt_unlockbuf(), _bt_upgradelockbufcleanup(), _bt_vacuum_needs_cleanup(), _bt_walk_left(), _hash_addovflpage(), _hash_checkpage(), _hash_doinsert(), _hash_expandtable(), _hash_finish_split(), _hash_first(), _hash_freeovflpage(), _hash_get_newblock_from_oldbucket(), _hash_get_oldblock_from_newbucket(), _hash_getbucketbuf_from_hashkey(), _hash_getcachedmetap(), _hash_getinitbuf(), _hash_getnewbuf(), _hash_init(), _hash_init_metabuffer(), _hash_initbitmapbuffer(), _hash_initbuf(), _hash_kill_items(), _hash_pgaddmultitup(), _hash_pgaddtup(), _hash_readnext(), _hash_readpage(), _hash_readprev(), _hash_splitbucket(), _hash_squeezebucket(), _hash_vacuum_one_page(), allocNewBuffer(), blgetbitmap(), blinsert(), BloomNewBuffer(), blvacuumcleanup(), brin_can_do_samepage_update(), brin_doinsert(), brin_doupdate(), brin_evacuate_page(), brin_getinsertbuffer(), brin_initialize_empty_new_buffer(), brin_page_cleanup(), brin_start_evacuating_page(), brin_xlog_createidx(), brin_xlog_desummarize_page(), brin_xlog_insert_update(), brin_xlog_revmap_extend(), brin_xlog_samepage_update(), brin_xlog_update(), brinbuild(), brinbuildempty(), brinGetStats(), brinGetTupleForHeapBlock(), brininsert(), brinRevmapDesummarizeRange(), brinRevmapInitialize(), brinSetHeapBlockItemptr(), bt_metap(), bt_page_items_internal(), bt_recheck_sibling_links(), bt_rootdescend(), btree_xlog_dedup(), btree_xlog_delete(), btree_xlog_insert(), btree_xlog_mark_page_halfdead(), btree_xlog_newroot(), btree_xlog_split(), btree_xlog_unlink_page(), btree_xlog_vacuum(), btvacuumpage(), BufferGetLSNAtomic(), collect_corrupt_items(), collect_visibility_data(), collectMatchBitmap(), collectMatchesForHeapRow(), count_nondeletable_pages(), createPostingTree(), dataBeginPlaceToPage(), dataBeginPlaceToPageInternal(), dataBeginPlaceToPageLeaf(), dataExecPlaceToPage(), dataExecPlaceToPageInternal(), dataLocateItem(), dataPlaceToPageLeafRecompress(), dataPrepareDownlink(), dataSplitPageInternal(), do_setval(), doPickSplit(), entryExecPlaceToPage(), entryIsEnoughSpace(), entryLoadMoreItems(), entryLocateEntry(), entryLocateLeafEntry(), entryPrepareDownlink(), entrySplitPage(), fill_seq_fork_with_data(), FreeSpaceMapPrepareTruncateRel(), fsm_readbuf(), fsm_search(), fsm_search_avail(), fsm_set_and_search(), fsm_vacuum_page(), generic_redo(), GenericXLogFinish(), GenericXLogRegisterBuffer(), get_raw_page_internal(), GetBTPageStatistics(), GetRecordedFreeSpace(), GetVisibilityMapPins(), ginbulkdelete(), ginDeletePage(), ginEntryInsert(), ginFindLeafPage(), ginFindParents(), ginFinishOldSplit(), ginFinishSplit(), ginGetStats(), ginHeapTupleFastInsert(), GinInitBuffer(), GinInitMetabuffer(), ginInsertCleanup(), ginInsertValue(), GinNewBuffer(), ginPlaceToPage(), ginRedoClearIncompleteSplit(), ginRedoCreatePTree(), ginRedoDeleteListPages(), ginRedoDeletePage(), ginRedoInsert(), ginRedoInsertData(), ginRedoInsertEntry(), ginRedoInsertListPage(), ginRedoUpdateMetapage(), ginRedoVacuumDataLeafPage(), ginScanToDelete(), ginStepRight(), ginTraverseLock(), ginUpdateStats(), ginvacuumcleanup(), ginVacuumEntryPage(), ginVacuumPostingTreeLeaf(), ginVacuumPostingTreeLeaves(), gistBufferingFindCorrectParent(), gistbufferinginserttuples(), gistbuild(), gistcheckpage(), gistdeletepage(), gistdoinsert(), gistFindCorrectParent(), gistFindPath(), gistfixsplit(), gistformdownlink(), gistGetMaxLevel(), GISTInitBuffer(), gistkillitems(), gistMemorizeAllDownlinks(), gistNewBuffer(), gistplacetopage(), gistProcessItup(), gistRedoClearFollowRight(), gistRedoDeleteRecord(), gistRedoPageDelete(), gistRedoPageSplitRecord(), gistRedoPageUpdateRecord(), gistScanPage(), gistvacuum_delete_empty_pages(), gistvacuumpage(), hash_bitmap_info(), hash_xlog_add_ovfl_page(), hash_xlog_delete(), hash_xlog_init_bitmap_page(), hash_xlog_init_meta_page(), hash_xlog_insert(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_split_cleanup(), hash_xlog_split_complete(), hash_xlog_squeeze_page(), hash_xlog_update_meta_page(), hash_xlog_vacuum_one_page(), hashbucketcleanup(), hashbulkdelete(), heap_abort_speculative(), heap_delete(), heap_fetch(), heap_finish_speculative(), heap_force_common(), heap_freeze_execute_prepared(), heap_get_latest_tid(), heap_hot_search_buffer(), heap_index_delete_tuples(), heap_inplace_update(), heap_insert(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_page_is_all_visible(), heap_page_prune(), heap_page_prune_execute(), heap_page_prune_opt(), heap_prune_chain(), heap_update(), heap_xlog_confirm(), heap_xlog_delete(), heap_xlog_freeze_page(), heap_xlog_inplace(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_prune(), heap_xlog_update(), heap_xlog_vacuum(), heap_xlog_visible(), heapam_index_build_range_scan(), heapam_index_validate_scan(), heapam_scan_analyze_next_tuple(), heapam_scan_bitmap_next_block(), heapam_scan_bitmap_next_tuple(), heapam_scan_sample_next_tuple(), heapgetpage(), heapgettup_continue_page(), heapgettup_pagemode(), heapgettup_start_page(), index_compute_xid_horizon_for_tuples(), initBloomState(), lazy_scan_heap(), lazy_vacuum_heap_page(), lazy_vacuum_heap_rel(), log_heap_update(), log_newpage_buffer(), log_newpage_range(), log_split_page(), MarkBufferDirtyHint(), moveLeafs(), moveRightIfItNeeded(), nextval_internal(), palloc_btree_page(), pg_visibility(), pgstat_btree_page(), pgstat_gist_page(), pgstat_hash_page(), pgstat_heap(), pgstatginindex_internal(), pgstathashindex(), pgstatindex_impl(), read_seq_tuple(), RelationAddBlocks(), RelationCopyStorageUsingBuffer(), RelationGetBufferForTuple(), RelationPutHeapTuple(), revmap_physical_extend(), scanGetCandidate(), scanPendingInsert(), scanPostingTree(), ScanSourceDatabasePgClass(), seq_redo(), shiftList(), spgAddNodeAction(), spgbuild(), spgdoinsert(), spgGetCache(), SpGistGetBuffer(), SpGistInitBuffer(), SpGistNewBuffer(), SpGistSetLastUsedPage(), SpGistUpdateMetaPage(), spgprocesspending(), spgRedoAddLeaf(), spgRedoAddNode(), spgRedoMoveLeafs(), spgRedoPickSplit(), spgRedoSplitTuple(), spgRedoVacuumLeaf(), spgRedoVacuumRedirect(), spgRedoVacuumRoot(), spgSplitNodeAction(), spgvacuumpage(), spgWalk(), startScanEntry(), statapprox_heap(), terminate_brin_buildstate(), vacuumLeafPage(), vacuumLeafRoot(), vacuumRedirectAndPlaceholder(), verify_heapam(), verifyBackupPageConsistency(), visibilitymap_clear(), visibilitymap_count(), visibilitymap_get_status(), visibilitymap_prepare_truncate(), visibilitymap_set(), vm_readbuf(), writeListPage(), XLogCheckBufferNeedsBackup(), XLogReadBufferExtended(), XLogReadBufferForRedoExtended(), XLogRecordPageWithFreeSpace(), XLogRegisterBuffer(), XLogSaveBufferForHint(), and xlogVacuumPage().

◆ BufferGetPageSize()

◆ BufferGetTag()

void BufferGetTag ( Buffer  buffer,
RelFileLocator rlocator,
ForkNumber forknum,
BlockNumber blknum 
)

Definition at line 3398 of file bufmgr.c.

3400 {
3401  BufferDesc *bufHdr;
3402 
3403  /* Do the same checks as BufferGetBlockNumber. */
3404  Assert(BufferIsPinned(buffer));
3405 
3406  if (BufferIsLocal(buffer))
3407  bufHdr = GetLocalBufferDescriptor(-buffer - 1);
3408  else
3409  bufHdr = GetBufferDescriptor(buffer - 1);
3410 
3411  /* pinned, so OK to read tag without spinlock */
3412  *rlocator = BufTagGetRelFileLocator(&bufHdr->tag);
3413  *forknum = BufTagGetForkNum(&bufHdr->tag);
3414  *blknum = bufHdr->tag.blockNum;
3415 }
static ForkNumber BufTagGetForkNum(const BufferTag *tag)
static RelFileLocator BufTagGetRelFileLocator(const BufferTag *tag)

References Assert(), buftag::blockNum, PrivateRefCountEntry::buffer, BufferIsLocal, BufferIsPinned, BufTagGetForkNum(), BufTagGetRelFileLocator(), GetBufferDescriptor(), GetLocalBufferDescriptor(), and BufferDesc::tag.

Referenced by fsm_search_avail(), ginRedoInsertEntry(), log_newpage_buffer(), ResolveCminCmaxDuringDecoding(), XLogRegisterBuffer(), and XLogSaveBufferForHint().

◆ BufferIsDirty()

bool BufferIsDirty ( Buffer  buffer)

Definition at line 2157 of file bufmgr.c.

2158 {
2159  BufferDesc *bufHdr;
2160 
2161  if (BufferIsLocal(buffer))
2162  {
2163  int bufid = -buffer - 1;
2164 
2165  bufHdr = GetLocalBufferDescriptor(bufid);
2166  }
2167  else
2168  {
2169  bufHdr = GetBufferDescriptor(buffer - 1);
2170  }
2171 
2172  Assert(BufferIsPinned(buffer));
2174  LW_EXCLUSIVE));
2175 
2176  return pg_atomic_read_u32(&bufHdr->state) & BM_DIRTY;
2177 }
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:234
#define BM_DIRTY
Definition: buf_internals.h:61
static LWLock * BufferDescriptorGetContentLock(const BufferDesc *bdesc)
bool LWLockHeldByMeInMode(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1941
@ LW_EXCLUSIVE
Definition: lwlock.h:116
pg_atomic_uint32 state

References Assert(), BM_DIRTY, PrivateRefCountEntry::buffer, BufferDescriptorGetContentLock(), BufferIsLocal, BufferIsPinned, GetBufferDescriptor(), GetLocalBufferDescriptor(), LW_EXCLUSIVE, LWLockHeldByMeInMode(), pg_atomic_read_u32(), and BufferDesc::state.

Referenced by XLogRegisterBuffer().

◆ BufferIsExclusiveLocked()

bool BufferIsExclusiveLocked ( Buffer  buffer)

Definition at line 2128 of file bufmgr.c.

2129 {
2130  BufferDesc *bufHdr;
2131 
2132  if (BufferIsLocal(buffer))
2133  {
2134  int bufid = -buffer - 1;
2135 
2136  bufHdr = GetLocalBufferDescriptor(bufid);
2137  }
2138  else
2139  {
2140  bufHdr = GetBufferDescriptor(buffer - 1);
2141  }
2142 
2143  Assert(BufferIsPinned(buffer));
2145  LW_EXCLUSIVE);
2146 }

References Assert(), PrivateRefCountEntry::buffer, BufferDescriptorGetContentLock(), BufferIsLocal, BufferIsPinned, GetBufferDescriptor(), GetLocalBufferDescriptor(), LW_EXCLUSIVE, and LWLockHeldByMeInMode().

Referenced by XLogRegisterBuffer().

◆ BufferIsPermanent()

bool BufferIsPermanent ( Buffer  buffer)

Definition at line 3608 of file bufmgr.c.

3609 {
3610  BufferDesc *bufHdr;
3611 
3612  /* Local buffers are used only for temp relations. */
3613  if (BufferIsLocal(buffer))
3614  return false;
3615 
3616  /* Make sure we've got a real buffer, and that we hold a pin on it. */
3617  Assert(BufferIsValid(buffer));
3618  Assert(BufferIsPinned(buffer));
3619 
3620  /*
3621  * BM_PERMANENT can't be changed while we hold a pin on the buffer, so we
3622  * need not bother with the buffer header spinlock. Even if someone else
3623  * changes the buffer header state while we're doing this, the state is
3624  * changed atomically, so we'll read the old value or the new value, but
3625  * not random garbage.
3626  */
3627  bufHdr = GetBufferDescriptor(buffer - 1);
3628  return (pg_atomic_read_u32(&bufHdr->state) & BM_PERMANENT) != 0;
3629 }
#define BM_PERMANENT
Definition: buf_internals.h:69

References Assert(), BM_PERMANENT, PrivateRefCountEntry::buffer, BufferIsLocal, BufferIsPinned, BufferIsValid(), GetBufferDescriptor(), pg_atomic_read_u32(), and BufferDesc::state.

Referenced by SetHintBits().

◆ BufferIsValid()

static bool BufferIsValid ( Buffer  bufnum)
inlinestatic

Definition at line 301 of file bufmgr.h.

302 {
303  Assert(bufnum <= NBuffers);
304  Assert(bufnum >= -NLocBuffer);
305 
306  return bufnum != InvalidBuffer;
307 }
#define InvalidBuffer
Definition: buf.h:25
PGDLLIMPORT int NBuffers
Definition: globals.c:139
PGDLLIMPORT int NLocBuffer
Definition: localbuf.c:42

References Assert(), InvalidBuffer, NBuffers, and NLocBuffer.

Referenced by _bt_clear_incomplete_split(), _bt_endpoint(), _bt_first(), _bt_get_endpoint(), _bt_insertonpg(), _bt_readpage(), _bt_relandgetbuf(), _bt_search(), _bt_unlink_halfdead_page(), _hash_addovflpage(), _hash_dropscanbuf(), _hash_freeovflpage(), _hash_getbucketbuf_from_hashkey(), _hash_getcachedmetap(), _hash_readnext(), _hash_readpage(), _hash_readprev(), autoprewarm_database_main(), brin_doinsert(), brin_doupdate(), brin_getinsertbuffer(), brin_xlog_createidx(), brin_xlog_desummarize_page(), brin_xlog_insert_update(), brin_xlog_revmap_extend(), brin_xlog_samepage_update(), brin_xlog_update(), brinGetTupleForHeapBlock(), brininsert(), brinsummarize(), bt_recheck_sibling_links(), bt_rootdescend(), btree_xlog_dedup(), btree_xlog_delete(), btree_xlog_insert(), btree_xlog_mark_page_halfdead(), btree_xlog_split(), btree_xlog_unlink_page(), btree_xlog_vacuum(), BufferGetBlock(), BufferGetLSNAtomic(), BufferGetPageSize(), BufferIsPermanent(), ConditionalLockBufferForCleanup(), DebugPrintBufferRefcount(), doPickSplit(), entryGetItem(), entryLoadMoreItems(), ExecStoreBufferHeapTuple(), ExecStorePinnedBufferHeapTuple(), FreeSpaceMapPrepareTruncateRel(), fsm_search(), fsm_vacuum_page(), generic_redo(), GetPrivateRefCount(), GetPrivateRefCountEntry(), GetRecordedFreeSpace(), GetVisibilityMapPins(), ginPlaceToPage(), ginRedoClearIncompleteSplit(), ginRedoDeletePage(), ginRedoInsert(), ginRedoUpdateMetapage(), ginRedoVacuumDataLeafPage(), ginScanToDelete(), gistinserttuples(), gistkillitems(), gistplacetopage(), gistRedoClearFollowRight(), gistRedoDeleteRecord(), gistRedoPageDelete(), gistRedoPageUpdateRecord(), gistXLogSplit(), gistXLogUpdate(), hash_xlog_add_ovfl_page(), hash_xlog_delete(), hash_xlog_init_bitmap_page(), hash_xlog_init_meta_page(), hash_xlog_insert(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_split_cleanup(), hash_xlog_split_complete(), hash_xlog_squeeze_page(), hash_xlog_update_meta_page(), hash_xlog_vacuum_one_page(), heap_endscan(), heap_index_delete_tuples(), heap_lock_tuple(), heap_rescan(), heap_update(), heap_vac_scan_next_block(), heap_xlog_confirm(), heap_xlog_delete(), heap_xlog_freeze_page(), heap_xlog_inplace(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_prune(), heap_xlog_update(), heap_xlog_vacuum(), heap_xlog_visible(), heapam_index_fetch_reset(), heapam_scan_sample_next_block(), heapam_tuple_lock(), heapam_tuple_satisfies_snapshot(), heapgetpage(), heapgettup(), heapgettup_continue_page(), heapgettup_pagemode(), heapgettup_start_page(), IsBufferCleanupOK(), lazy_scan_heap(), lazy_vacuum_heap_rel(), log_heap_visible(), MarkBufferDirty(), MarkBufferDirtyHint(), ReadRecentBuffer(), ReleaseAndReadBuffer(), ReleaseBuffer(), ResOwnerReleaseBufferPin(), spgRedoAddLeaf(), spgRedoAddNode(), spgRedoMoveLeafs(), spgRedoPickSplit(), spgRedoSplitTuple(), spgRedoVacuumLeaf(), spgRedoVacuumRedirect(), spgRedoVacuumRoot(), statapprox_heap(), tts_buffer_heap_clear(), tts_buffer_heap_copyslot(), tts_buffer_heap_materialize(), tts_buffer_heap_store_tuple(), verifyBackupPageConsistency(), visibilitymap_clear(), visibilitymap_count(), visibilitymap_get_status(), visibilitymap_pin(), visibilitymap_pin_ok(), visibilitymap_prepare_truncate(), visibilitymap_set(), XLogPrefetcherNextBlock(), XLogReadBufferExtended(), and XLogReadBufferForRedoExtended().

◆ BufferShmemSize()

Size BufferShmemSize ( void  )

Definition at line 160 of file buf_init.c.

161 {
162  Size size = 0;
163 
164  /* size of buffer descriptors */
166  /* to allow aligning buffer descriptors */
168 
169  /* size of data pages, plus alignment padding */
171  size = add_size(size, mul_size(NBuffers, BLCKSZ));
172 
173  /* size of stuff controlled by freelist.c */
175 
176  /* size of I/O condition variables */
179  /* to allow aligning the above */
181 
182  /* size of checkpoint sort array in bufmgr.c */
184 
185  return size;
186 }
Size StrategyShmemSize(void)
Definition: freelist.c:453
#define PG_IO_ALIGN_SIZE
#define PG_CACHE_LINE_SIZE
Size add_size(Size s1, Size s2)
Definition: shmem.c:493
Size mul_size(Size s1, Size s2)
Definition: shmem.c:510
static pg_noinline void Size size
Definition: slab.c:607

References add_size(), mul_size(), NBuffers, PG_CACHE_LINE_SIZE, PG_IO_ALIGN_SIZE, size, and StrategyShmemSize().

Referenced by CalculateShmemSize().

◆ CheckBufferIsPinnedOnce()

void CheckBufferIsPinnedOnce ( Buffer  buffer)

Definition at line 4842 of file bufmgr.c.

4843 {
4844  if (BufferIsLocal(buffer))
4845  {
4846  if (LocalRefCount[-buffer - 1] != 1)
4847  elog(ERROR, "incorrect local pin count: %d",
4848  LocalRefCount[-buffer - 1]);
4849  }
4850  else
4851  {
4852  if (GetPrivateRefCount(buffer) != 1)
4853  elog(ERROR, "incorrect local pin count: %d",
4854  GetPrivateRefCount(buffer));
4855  }
4856 }
static int32 GetPrivateRefCount(Buffer buffer)
Definition: bufmgr.c:404
#define ERROR
Definition: elog.h:39
int32 * LocalRefCount
Definition: localbuf.c:46

References PrivateRefCountEntry::buffer, BufferIsLocal, elog, ERROR, GetPrivateRefCount(), and LocalRefCount.

Referenced by GetVictimBuffer(), and LockBufferForCleanup().

◆ CheckPointBuffers()

void CheckPointBuffers ( int  flags)

Definition at line 3363 of file bufmgr.c.

3364 {
3365  BufferSync(flags);
3366 }
static void BufferSync(int flags)
Definition: bufmgr.c:2565

References BufferSync().

Referenced by CheckPointGuts().

◆ ConditionalLockBuffer()

bool ConditionalLockBuffer ( Buffer  buffer)

Definition at line 4821 of file bufmgr.c.

4822 {
4823  BufferDesc *buf;
4824 
4825  Assert(BufferIsPinned(buffer));
4826  if (BufferIsLocal(buffer))
4827  return true; /* act as though we got it */
4828 
4829  buf = GetBufferDescriptor(buffer - 1);
4830 
4832  LW_EXCLUSIVE);
4833 }
bool LWLockConditionalAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1343
static char * buf
Definition: pg_test_fsync.c:73

References Assert(), buf, PrivateRefCountEntry::buffer, BufferDescriptorGetContentLock(), BufferIsLocal, BufferIsPinned, GetBufferDescriptor(), LW_EXCLUSIVE, and LWLockConditionalAcquire().

Referenced by _bt_conditionallockbuf(), BloomNewBuffer(), ConditionalLockBufferForCleanup(), GinNewBuffer(), gistNewBuffer(), RelationGetBufferForTuple(), spgdoinsert(), SpGistGetBuffer(), SpGistNewBuffer(), and SpGistUpdateMetaPage().

◆ ConditionalLockBufferForCleanup()

bool ConditionalLockBufferForCleanup ( Buffer  buffer)

Definition at line 5036 of file bufmgr.c.

5037 {
5038  BufferDesc *bufHdr;
5039  uint32 buf_state,
5040  refcount;
5041 
5042  Assert(BufferIsValid(buffer));
5043 
5044  if (BufferIsLocal(buffer))
5045  {
5046  refcount = LocalRefCount[-buffer - 1];
5047  /* There should be exactly one pin */
5048  Assert(refcount > 0);
5049  if (refcount != 1)
5050  return false;
5051  /* Nobody else to wait for */
5052  return true;
5053  }
5054 
5055  /* There should be exactly one local pin */
5056  refcount = GetPrivateRefCount(buffer);
5057  Assert(refcount);
5058  if (refcount != 1)
5059  return false;
5060 
5061  /* Try to acquire lock */
5062  if (!ConditionalLockBuffer(buffer))
5063  return false;
5064 
5065  bufHdr = GetBufferDescriptor(buffer - 1);
5066  buf_state = LockBufHdr(bufHdr);
5067  refcount = BUF_STATE_GET_REFCOUNT(buf_state);
5068 
5069  Assert(refcount > 0);
5070  if (refcount == 1)
5071  {
5072  /* Successfully acquired exclusive lock with pincount 1 */
5073  UnlockBufHdr(bufHdr, buf_state);
5074  return true;
5075  }
5076 
5077  /* Failed, so release the lock */
5078  UnlockBufHdr(bufHdr, buf_state);
5079  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
5080  return false;
5081 }
#define BUF_STATE_GET_REFCOUNT(state)
Definition: buf_internals.h:51
bool ConditionalLockBuffer(Buffer buffer)
Definition: bufmgr.c:4821
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:4795
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:157

References Assert(), BUF_STATE_GET_REFCOUNT, PrivateRefCountEntry::buffer, BUFFER_LOCK_UNLOCK, BufferIsLocal, BufferIsValid(), ConditionalLockBuffer(), GetBufferDescriptor(), GetPrivateRefCount(), LocalRefCount, LockBuffer(), LockBufHdr(), PrivateRefCountEntry::refcount, and UnlockBufHdr().

Referenced by _hash_finish_split(), _hash_getbuf_with_condlock_cleanup(), heap_page_prune_opt(), and lazy_scan_heap().

◆ CreateAndCopyRelationData()

void CreateAndCopyRelationData ( RelFileLocator  src_rlocator,
RelFileLocator  dst_rlocator,
bool  permanent 
)

Definition at line 4434 of file bufmgr.c.

4436 {
4437  char relpersistence;
4438  SMgrRelation src_rel;
4439  SMgrRelation dst_rel;
4440 
4441  /* Set the relpersistence. */
4442  relpersistence = permanent ?
4443  RELPERSISTENCE_PERMANENT : RELPERSISTENCE_UNLOGGED;
4444 
4445  src_rel = smgropen(src_rlocator, INVALID_PROC_NUMBER);
4446  dst_rel = smgropen(dst_rlocator, INVALID_PROC_NUMBER);
4447 
4448  /*
4449  * Create and copy all forks of the relation. During create database we
4450  * have a separate cleanup mechanism which deletes complete database
4451  * directory. Therefore, each individual relation doesn't need to be
4452  * registered for cleanup.
4453  */
4454  RelationCreateStorage(dst_rlocator, relpersistence, false);
4455 
4456  /* copy main fork. */
4457  RelationCopyStorageUsingBuffer(src_rlocator, dst_rlocator, MAIN_FORKNUM,
4458  permanent);
4459 
4460  /* copy those extra forks that exist */
4461  for (ForkNumber forkNum = MAIN_FORKNUM + 1;
4462  forkNum <= MAX_FORKNUM; forkNum++)
4463  {
4464  if (smgrexists(src_rel, forkNum))
4465  {
4466  smgrcreate(dst_rel, forkNum, false);
4467 
4468  /*
4469  * WAL log creation if the relation is persistent, or this is the
4470  * init fork of an unlogged relation.
4471  */
4472  if (permanent || forkNum == INIT_FORKNUM)
4473  log_smgrcreate(&dst_rlocator, forkNum);
4474 
4475  /* Copy a fork's data, block by block. */
4476  RelationCopyStorageUsingBuffer(src_rlocator, dst_rlocator, forkNum,
4477  permanent);
4478  }
4479  }
4480 }
static void RelationCopyStorageUsingBuffer(RelFileLocator srclocator, RelFileLocator dstlocator, ForkNumber forkNum, bool permanent)
Definition: bufmgr.c:4343
#define INVALID_PROC_NUMBER
Definition: procnumber.h:26
ForkNumber
Definition: relpath.h:48
@ MAIN_FORKNUM
Definition: relpath.h:50
@ INIT_FORKNUM
Definition: relpath.h:53
#define MAX_FORKNUM
Definition: relpath.h:62
SMgrRelation smgropen(RelFileLocator rlocator, ProcNumber backend)
Definition: smgr.c:198
void smgrcreate(SMgrRelation reln, ForkNumber forknum, bool isRedo)
Definition: smgr.c:411
bool smgrexists(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:398
SMgrRelation RelationCreateStorage(RelFileLocator rlocator, char relpersistence, bool register_delete)
Definition: storage.c:121
void log_smgrcreate(const RelFileLocator *rlocator, ForkNumber forkNum)
Definition: storage.c:186

References INIT_FORKNUM, INVALID_PROC_NUMBER, log_smgrcreate(), MAIN_FORKNUM, MAX_FORKNUM, RelationCopyStorageUsingBuffer(), RelationCreateStorage(), smgrcreate(), smgrexists(), and smgropen().

Referenced by CreateDatabaseUsingWalLog().

◆ DebugPrintBufferRefcount()

char* DebugPrintBufferRefcount ( Buffer  buffer)

Definition at line 3318 of file bufmgr.c.

3319 {
3320  BufferDesc *buf;
3321  int32 loccount;
3322  char *path;
3323  char *result;
3324  ProcNumber backend;
3325  uint32 buf_state;
3326 
3327  Assert(BufferIsValid(buffer));
3328  if (BufferIsLocal(buffer))
3329  {
3330  buf = GetLocalBufferDescriptor(-buffer - 1);
3331  loccount = LocalRefCount[-buffer - 1];
3332  backend = MyProcNumber;
3333  }
3334  else
3335  {
3336  buf = GetBufferDescriptor(buffer - 1);
3337  loccount = GetPrivateRefCount(buffer);
3338  backend = INVALID_PROC_NUMBER;
3339  }
3340 
3341  /* theoretically we should lock the bufhdr here */
3342  path = relpathbackend(BufTagGetRelFileLocator(&buf->tag), backend,
3343  BufTagGetForkNum(&buf->tag));
3344  buf_state = pg_atomic_read_u32(&buf->state);
3345 
3346  result = psprintf("[%03d] (rel=%s, blockNum=%u, flags=0x%x, refcount=%u %d)",
3347  buffer, path,
3348  buf->tag.blockNum, buf_state & BUF_FLAG_MASK,
3349  BUF_STATE_GET_REFCOUNT(buf_state), loccount);
3350  pfree(path);
3351  return result;
3352 }
#define BUF_FLAG_MASK
Definition: buf_internals.h:48
ProcNumber MyProcNumber
Definition: globals.c:87
void pfree(void *pointer)
Definition: mcxt.c:1508
int ProcNumber
Definition: procnumber.h:24
char * psprintf(const char *fmt,...)
Definition: psprintf.c:46
#define relpathbackend(rlocator, backend, forknum)
Definition: relpath.h:85

References Assert(), buf, BUF_FLAG_MASK, BUF_STATE_GET_REFCOUNT, PrivateRefCountEntry::buffer, BufferIsLocal, BufferIsValid(), BufTagGetForkNum(), BufTagGetRelFileLocator(), GetBufferDescriptor(), GetLocalBufferDescriptor(), GetPrivateRefCount(), INVALID_PROC_NUMBER, LocalRefCount, MyProcNumber, pfree(), pg_atomic_read_u32(), psprintf(), and relpathbackend.

Referenced by CheckForBufferLeaks(), CheckForLocalBufferLeaks(), and ResOwnerPrintBufferPin().

◆ DropDatabaseBuffers()

void DropDatabaseBuffers ( Oid  dbid)

Definition at line 4039 of file bufmgr.c.

4040 {
4041  int i;
4042 
4043  /*
4044  * We needn't consider local buffers, since by assumption the target
4045  * database isn't our own.
4046  */
4047 
4048  for (i = 0; i < NBuffers; i++)
4049  {
4050  BufferDesc *bufHdr = GetBufferDescriptor(i);
4051  uint32 buf_state;
4052 
4053  /*
4054  * As in DropRelationBuffers, an unlocked precheck should be safe and
4055  * saves some cycles.
4056  */
4057  if (bufHdr->tag.dbOid != dbid)
4058  continue;
4059 
4060  buf_state = LockBufHdr(bufHdr);
4061  if (bufHdr->tag.dbOid == dbid)
4062  InvalidateBuffer(bufHdr); /* releases spinlock */
4063  else
4064  UnlockBufHdr(bufHdr, buf_state);
4065  }
4066 }
static void InvalidateBuffer(BufferDesc *buf)
Definition: bufmgr.c:1441
int i
Definition: isn.c:73
Oid dbOid
Definition: buf_internals.h:95

References buftag::dbOid, GetBufferDescriptor(), i, InvalidateBuffer(), LockBufHdr(), NBuffers, BufferDesc::tag, and UnlockBufHdr().

Referenced by createdb_failure_callback(), dbase_redo(), dropdb(), and movedb().

◆ DropRelationBuffers()

void DropRelationBuffers ( struct SMgrRelationData smgr_reln,
ForkNumber forkNum,
int  nforks,
BlockNumber firstDelBlock 
)

◆ DropRelationsAllBuffers()

void DropRelationsAllBuffers ( struct SMgrRelationData **  smgr_reln,
int  nlocators 
)

◆ ExtendBufferedRel()

Buffer ExtendBufferedRel ( BufferManagerRelation  bmr,
ForkNumber  forkNum,
BufferAccessStrategy  strategy,
uint32  flags 
)

Definition at line 838 of file bufmgr.c.

842 {
843  Buffer buf;
844  uint32 extend_by = 1;
845 
846  ExtendBufferedRelBy(bmr, forkNum, strategy, flags, extend_by,
847  &buf, &extend_by);
848 
849  return buf;
850 }
int Buffer
Definition: buf.h:23
BlockNumber ExtendBufferedRelBy(BufferManagerRelation bmr, ForkNumber fork, BufferAccessStrategy strategy, uint32 flags, uint32 extend_by, Buffer *buffers, uint32 *extended_by)
Definition: bufmgr.c:870

References buf, and ExtendBufferedRelBy().

Referenced by _bt_allocbuf(), _hash_getnewbuf(), BloomNewBuffer(), brinbuild(), brinbuildempty(), fill_seq_fork_with_data(), ginbuildempty(), GinNewBuffer(), gistbuildempty(), gistNewBuffer(), ReadBuffer_common(), revmap_physical_extend(), and SpGistNewBuffer().

◆ ExtendBufferedRelBy()

BlockNumber ExtendBufferedRelBy ( BufferManagerRelation  bmr,
ForkNumber  fork,
BufferAccessStrategy  strategy,
uint32  flags,
uint32  extend_by,
Buffer buffers,
uint32 extended_by 
)

Definition at line 870 of file bufmgr.c.

877 {
878  Assert((bmr.rel != NULL) != (bmr.smgr != NULL));
879  Assert(bmr.smgr == NULL || bmr.relpersistence != 0);
880  Assert(extend_by > 0);
881 
882  if (bmr.smgr == NULL)
883  {
884  bmr.smgr = RelationGetSmgr(bmr.rel);
885  bmr.relpersistence = bmr.rel->rd_rel->relpersistence;
886  }
887 
888  return ExtendBufferedRelCommon(bmr, fork, strategy, flags,
889  extend_by, InvalidBlockNumber,
890  buffers, extended_by);
891 }
#define InvalidBlockNumber
Definition: block.h:33
static BlockNumber ExtendBufferedRelCommon(BufferManagerRelation bmr, ForkNumber fork, BufferAccessStrategy strategy, uint32 flags, uint32 extend_by, BlockNumber extend_upto, Buffer *buffers, uint32 *extended_by)
Definition: bufmgr.c:1804
static SMgrRelation RelationGetSmgr(Relation rel)
Definition: rel.h:567
struct SMgrRelationData * smgr
Definition: bufmgr.h:102
Form_pg_class rd_rel
Definition: rel.h:111

References Assert(), ExtendBufferedRelCommon(), InvalidBlockNumber, RelationData::rd_rel, BufferManagerRelation::rel, RelationGetSmgr(), BufferManagerRelation::relpersistence, and BufferManagerRelation::smgr.

Referenced by ExtendBufferedRel(), and RelationAddBlocks().

◆ ExtendBufferedRelTo()

Buffer ExtendBufferedRelTo ( BufferManagerRelation  bmr,
ForkNumber  fork,
BufferAccessStrategy  strategy,
uint32  flags,
BlockNumber  extend_to,
ReadBufferMode  mode 
)

Definition at line 902 of file bufmgr.c.

908 {
910  uint32 extended_by = 0;
911  Buffer buffer = InvalidBuffer;
912  Buffer buffers[64];
913 
914  Assert((bmr.rel != NULL) != (bmr.smgr != NULL));
915  Assert(bmr.smgr == NULL || bmr.relpersistence != 0);
916  Assert(extend_to != InvalidBlockNumber && extend_to > 0);
917 
918  if (bmr.smgr == NULL)
919  {
920  bmr.smgr = RelationGetSmgr(bmr.rel);
921  bmr.relpersistence = bmr.rel->rd_rel->relpersistence;
922  }
923 
924  /*
925  * If desired, create the file if it doesn't exist. If
926  * smgr_cached_nblocks[fork] is positive then it must exist, no need for
927  * an smgrexists call.
928  */
929  if ((flags & EB_CREATE_FORK_IF_NEEDED) &&
930  (bmr.smgr->smgr_cached_nblocks[fork] == 0 ||
932  !smgrexists(bmr.smgr, fork))
933  {
935 
936  /* recheck, fork might have been created concurrently */
937  if (!smgrexists(bmr.smgr, fork))
938  smgrcreate(bmr.smgr, fork, flags & EB_PERFORMING_RECOVERY);
939 
941  }
942 
943  /*
944  * If requested, invalidate size cache, so that smgrnblocks asks the
945  * kernel.
946  */
947  if (flags & EB_CLEAR_SIZE_CACHE)
949 
950  /*
951  * Estimate how many pages we'll need to extend by. This avoids acquiring
952  * unnecessarily many victim buffers.
953  */
954  current_size = smgrnblocks(bmr.smgr, fork);
955 
956  /*
957  * Since no-one else can be looking at the page contents yet, there is no
958  * difference between an exclusive lock and a cleanup-strength lock. Note
959  * that we pass the original mode to ReadBuffer_common() below, when
960  * falling back to reading the buffer to a concurrent relation extension.
961  */
963  flags |= EB_LOCK_TARGET;
964 
965  while (current_size < extend_to)
966  {
967  uint32 num_pages = lengthof(buffers);
968  BlockNumber first_block;
969 
970  if ((uint64) current_size + num_pages > extend_to)
971  num_pages = extend_to - current_size;
972 
973  first_block = ExtendBufferedRelCommon(bmr, fork, strategy, flags,
974  num_pages, extend_to,
975  buffers, &extended_by);
976 
977  current_size = first_block + extended_by;
978  Assert(num_pages != 0 || current_size >= extend_to);
979 
980  for (uint32 i = 0; i < extended_by; i++)
981  {
982  if (first_block + i != extend_to - 1)
983  ReleaseBuffer(buffers[i]);
984  else
985  buffer = buffers[i];
986  }
987  }
988 
989  /*
990  * It's possible that another backend concurrently extended the relation.
991  * In that case read the buffer.
992  *
993  * XXX: Should we control this via a flag?
994  */
995  if (buffer == InvalidBuffer)
996  {
997  bool hit;
998 
999  Assert(extended_by == 0);
1000  buffer = ReadBuffer_common(bmr.smgr, bmr.relpersistence,
1001  fork, extend_to - 1, mode, strategy,
1002  &hit);
1003  }
1004 
1005  return buffer;
1006 }
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:4560
static Buffer ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy, bool *hit)
Definition: bufmgr.c:1014
#define lengthof(array)
Definition: c.h:775
void LockRelationForExtension(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:430
void UnlockRelationForExtension(Relation relation, LOCKMODE lockmode)
Definition: lmgr.c:480
#define ExclusiveLock
Definition: lockdefs.h:42
static PgChecksumMode mode
Definition: pg_checksums.c:56
int64 current_size
Definition: pg_checksums.c:64
BlockNumber smgrnblocks(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:655
BlockNumber smgr_cached_nblocks[MAX_FORKNUM+1]
Definition: smgr.h:46

References Assert(), PrivateRefCountEntry::buffer, current_size, EB_CLEAR_SIZE_CACHE, EB_CREATE_FORK_IF_NEEDED, EB_LOCK_TARGET, EB_PERFORMING_RECOVERY, ExclusiveLock, ExtendBufferedRelCommon(), i, InvalidBlockNumber, InvalidBuffer, lengthof, LockRelationForExtension(), mode, RBM_ZERO_AND_CLEANUP_LOCK, RBM_ZERO_AND_LOCK, RelationData::rd_rel, ReadBuffer_common(), BufferManagerRelation::rel, RelationGetSmgr(), ReleaseBuffer(), BufferManagerRelation::relpersistence, BufferManagerRelation::smgr, SMgrRelationData::smgr_cached_nblocks, smgrcreate(), smgrexists(), smgrnblocks(), and UnlockRelationForExtension().

Referenced by fsm_extend(), vm_extend(), and XLogReadBufferExtended().

◆ FlushDatabaseBuffers()

void FlushDatabaseBuffers ( Oid  dbid)

Definition at line 4498 of file bufmgr.c.

4499 {
4500  int i;
4501  BufferDesc *bufHdr;
4502 
4503  for (i = 0; i < NBuffers; i++)
4504  {
4505  uint32 buf_state;
4506 
4507  bufHdr = GetBufferDescriptor(i);
4508 
4509  /*
4510  * As in DropRelationBuffers, an unlocked precheck should be safe and
4511  * saves some cycles.
4512  */
4513  if (bufHdr->tag.dbOid != dbid)
4514  continue;
4515 
4516  /* Make sure we can handle the pin */
4519 
4520  buf_state = LockBufHdr(bufHdr);
4521  if (bufHdr->tag.dbOid == dbid &&
4522  (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
4523  {
4524  PinBuffer_Locked(bufHdr);
4528  UnpinBuffer(bufHdr);
4529  }
4530  else
4531  UnlockBufHdr(bufHdr, buf_state);
4532  }
4533 }
#define BM_VALID
Definition: buf_internals.h:62
static void FlushBuffer(BufferDesc *buf, SMgrRelation reln, IOObject io_object, IOContext io_context)
Definition: bufmgr.c:3437
static void PinBuffer_Locked(BufferDesc *buf)
Definition: bufmgr.c:2416
static void ReservePrivateRefCountEntry(void)
Definition: bufmgr.c:238
static void UnpinBuffer(BufferDesc *buf)
Definition: bufmgr.c:2459
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1172
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1785
@ LW_SHARED
Definition: lwlock.h:117
@ IOOBJECT_RELATION
Definition: pgstat.h:280
@ IOCONTEXT_NORMAL
Definition: pgstat.h:290
ResourceOwner CurrentResourceOwner
Definition: resowner.c:165
void ResourceOwnerEnlarge(ResourceOwner owner)
Definition: resowner.c:442

References BM_DIRTY, BM_VALID, BufferDescriptorGetContentLock(), CurrentResourceOwner, buftag::dbOid, FlushBuffer(), GetBufferDescriptor(), i, IOCONTEXT_NORMAL, IOOBJECT_RELATION, LockBufHdr(), LW_SHARED, LWLockAcquire(), LWLockRelease(), NBuffers, PinBuffer_Locked(), ReservePrivateRefCountEntry(), ResourceOwnerEnlarge(), BufferDesc::tag, UnlockBufHdr(), and UnpinBuffer().

Referenced by dbase_redo().

◆ FlushOneBuffer()

void FlushOneBuffer ( Buffer  buffer)

Definition at line 4540 of file bufmgr.c.

4541 {
4542  BufferDesc *bufHdr;
4543 
4544  /* currently not needed, but no fundamental reason not to support */
4545  Assert(!BufferIsLocal(buffer));
4546 
4547  Assert(BufferIsPinned(buffer));
4548 
4549  bufHdr = GetBufferDescriptor(buffer - 1);
4550 
4552 
4554 }
bool LWLockHeldByMe(LWLock *lock)
Definition: lwlock.c:1897

References Assert(), PrivateRefCountEntry::buffer, BufferDescriptorGetContentLock(), BufferIsLocal, BufferIsPinned, FlushBuffer(), GetBufferDescriptor(), IOCONTEXT_NORMAL, IOOBJECT_RELATION, and LWLockHeldByMe().

Referenced by hash_xlog_init_bitmap_page(), hash_xlog_init_meta_page(), and XLogReadBufferForRedoExtended().

◆ FlushRelationBuffers()

void FlushRelationBuffers ( Relation  rel)

Definition at line 4145 of file bufmgr.c.

4146 {
4147  int i;
4148  BufferDesc *bufHdr;
4149  SMgrRelation srel = RelationGetSmgr(rel);
4150 
4151  if (RelationUsesLocalBuffers(rel))
4152  {
4153  for (i = 0; i < NLocBuffer; i++)
4154  {
4155  uint32 buf_state;
4156  instr_time io_start;
4157 
4158  bufHdr = GetLocalBufferDescriptor(i);
4159  if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator) &&
4160  ((buf_state = pg_atomic_read_u32(&bufHdr->state)) &
4161  (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
4162  {
4163  ErrorContextCallback errcallback;
4164  Page localpage;
4165 
4166  localpage = (char *) LocalBufHdrGetBlock(bufHdr);
4167 
4168  /* Setup error traceback support for ereport() */
4170  errcallback.arg = (void *) bufHdr;
4171  errcallback.previous = error_context_stack;
4172  error_context_stack = &errcallback;
4173 
4174  PageSetChecksumInplace(localpage, bufHdr->tag.blockNum);
4175 
4177 
4178  smgrwrite(srel,
4179  BufTagGetForkNum(&bufHdr->tag),
4180  bufHdr->tag.blockNum,
4181  localpage,
4182  false);
4183 
4186  io_start, 1);
4187 
4188  buf_state &= ~(BM_DIRTY | BM_JUST_DIRTIED);
4189  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
4190 
4192 
4193  /* Pop the error context stack */
4194  error_context_stack = errcallback.previous;
4195  }
4196  }
4197 
4198  return;
4199  }
4200 
4201  for (i = 0; i < NBuffers; i++)
4202  {
4203  uint32 buf_state;
4204 
4205  bufHdr = GetBufferDescriptor(i);
4206 
4207  /*
4208  * As in DropRelationBuffers, an unlocked precheck should be safe and
4209  * saves some cycles.
4210  */
4211  if (!BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator))
4212  continue;
4213 
4214  /* Make sure we can handle the pin */
4217 
4218  buf_state = LockBufHdr(bufHdr);
4219  if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator) &&
4220  (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
4221  {
4222  PinBuffer_Locked(bufHdr);
4226  UnpinBuffer(bufHdr);
4227  }
4228  else
4229  UnlockBufHdr(bufHdr, buf_state);
4230  }
4231 }
static void pg_atomic_unlocked_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:290
static bool BufTagMatchesRelFileLocator(const BufferTag *tag, const RelFileLocator *rlocator)
#define BM_JUST_DIRTIED
Definition: buf_internals.h:66
bool track_io_timing
Definition: bufmgr.c:138
#define LocalBufHdrGetBlock(bufHdr)
Definition: bufmgr.c:67
static void local_buffer_write_error_callback(void *arg)
Definition: bufmgr.c:5343
void PageSetChecksumInplace(Page page, BlockNumber blkno)
Definition: bufpage.c:1542
ErrorContextCallback * error_context_stack
Definition: elog.c:94
BufferUsage pgBufferUsage
Definition: instrument.c:20
int NLocBuffer
Definition: localbuf.c:42
@ IOOBJECT_TEMP_RELATION
Definition: pgstat.h:281
@ IOOP_WRITE
Definition: pgstat.h:304
instr_time pgstat_prepare_io_time(bool track_io_guc)
Definition: pgstat_io.c:100
void pgstat_count_io_op_time(IOObject io_object, IOContext io_context, IOOp io_op, instr_time start_time, uint32 cnt)
Definition: pgstat_io.c:122
#define RelationUsesLocalBuffers(relation)
Definition: rel.h:637
static void smgrwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, const void *buffer, bool skipFsync)
Definition: smgr.h:121
int64 local_blks_written
Definition: instrument.h:33
struct ErrorContextCallback * previous
Definition: elog.h:295
void(* callback)(void *arg)
Definition: elog.h:296
RelFileLocator rd_locator
Definition: rel.h:57

References ErrorContextCallback::arg, buftag::blockNum, BM_DIRTY, BM_JUST_DIRTIED, BM_VALID, BufferDescriptorGetContentLock(), BufTagGetForkNum(), BufTagMatchesRelFileLocator(), ErrorContextCallback::callback, CurrentResourceOwner, error_context_stack, FlushBuffer(), GetBufferDescriptor(), GetLocalBufferDescriptor(), i, IOCONTEXT_NORMAL, IOOBJECT_RELATION, IOOBJECT_TEMP_RELATION, IOOP_WRITE, BufferUsage::local_blks_written, local_buffer_write_error_callback(), LocalBufHdrGetBlock, LockBufHdr(), LW_SHARED, LWLockAcquire(), LWLockRelease(), NBuffers, NLocBuffer, PageSetChecksumInplace(), pg_atomic_read_u32(), pg_atomic_unlocked_write_u32(), pgBufferUsage, pgstat_count_io_op_time(), pgstat_prepare_io_time(), PinBuffer_Locked(), ErrorContextCallback::previous, RelationData::rd_locator, RelationGetSmgr(), RelationUsesLocalBuffers, ReservePrivateRefCountEntry(), ResourceOwnerEnlarge(), smgrwrite(), BufferDesc::state, BufferDesc::tag, track_io_timing, UnlockBufHdr(), and UnpinBuffer().

Referenced by fill_seq_with_data(), heapam_relation_copy_data(), and index_copy_data().

◆ FlushRelationsAllBuffers()

void FlushRelationsAllBuffers ( struct SMgrRelationData **  smgrs,
int  nrels 
)

◆ FreeAccessStrategy()

void FreeAccessStrategy ( BufferAccessStrategy  strategy)

Definition at line 639 of file freelist.c.

640 {
641  /* don't crash if called on a "default" strategy */
642  if (strategy != NULL)
643  pfree(strategy);
644 }

References pfree().

Referenced by blgetbitmap(), FreeBulkInsertState(), heap_endscan(), initscan(), parallel_vacuum_main(), and RelationCopyStorageUsingBuffer().

◆ GetAccessStrategy()

BufferAccessStrategy GetAccessStrategy ( BufferAccessStrategyType  btype)

Definition at line 541 of file freelist.c.

542 {
543  int ring_size_kb;
544 
545  /*
546  * Select ring size to use. See buffer/README for rationales.
547  *
548  * Note: if you change the ring size for BAS_BULKREAD, see also
549  * SYNC_SCAN_REPORT_INTERVAL in access/heap/syncscan.c.
550  */
551  switch (btype)
552  {
553  case BAS_NORMAL:
554  /* if someone asks for NORMAL, just give 'em a "default" object */
555  return NULL;
556 
557  case BAS_BULKREAD:
558  ring_size_kb = 256;
559  break;
560  case BAS_BULKWRITE:
561  ring_size_kb = 16 * 1024;
562  break;
563  case BAS_VACUUM:
564  ring_size_kb = 256;
565  break;
566 
567  default:
568  elog(ERROR, "unrecognized buffer access strategy: %d",
569  (int) btype);
570  return NULL; /* keep compiler quiet */
571  }
572 
573  return GetAccessStrategyWithSize(btype, ring_size_kb);
574 }
BufferAccessStrategy GetAccessStrategyWithSize(BufferAccessStrategyType btype, int ring_size_kb)
Definition: freelist.c:584

References BAS_BULKREAD, BAS_BULKWRITE, BAS_NORMAL, BAS_VACUUM, elog, ERROR, and GetAccessStrategyWithSize().

Referenced by blgetbitmap(), bt_check_every_level(), collect_corrupt_items(), collect_visibility_data(), GetBulkInsertState(), initscan(), pgstat_index(), pgstathashindex(), pgstatindex_impl(), RelationCopyStorageUsingBuffer(), ScanSourceDatabasePgClass(), statapprox_heap(), and verify_heapam().

◆ GetAccessStrategyBufferCount()

int GetAccessStrategyBufferCount ( BufferAccessStrategy  strategy)

Definition at line 624 of file freelist.c.

625 {
626  if (strategy == NULL)
627  return 0;
628 
629  return strategy->nbuffers;
630 }

References BufferAccessStrategyData::nbuffers.

Referenced by parallel_vacuum_init().

◆ GetAccessStrategyWithSize()

BufferAccessStrategy GetAccessStrategyWithSize ( BufferAccessStrategyType  btype,
int  ring_size_kb 
)

Definition at line 584 of file freelist.c.

585 {
586  int ring_buffers;
587  BufferAccessStrategy strategy;
588 
589  Assert(ring_size_kb >= 0);
590 
591  /* Figure out how many buffers ring_size_kb is */
592  ring_buffers = ring_size_kb / (BLCKSZ / 1024);
593 
594  /* 0 means unlimited, so no BufferAccessStrategy required */
595  if (ring_buffers == 0)
596  return NULL;
597 
598  /* Cap to 1/8th of shared_buffers */
599  ring_buffers = Min(NBuffers / 8, ring_buffers);
600 
601  /* NBuffers should never be less than 16, so this shouldn't happen */
602  Assert(ring_buffers > 0);
603 
604  /* Allocate the object and initialize all elements to zeroes */
605  strategy = (BufferAccessStrategy)
606  palloc0(offsetof(BufferAccessStrategyData, buffers) +
607  ring_buffers * sizeof(Buffer));
608 
609  /* Set fields that don't start out zero */
610  strategy->btype = btype;
611  strategy->nbuffers = ring_buffers;
612 
613  return strategy;
614 }
struct BufferAccessStrategyData * BufferAccessStrategy
Definition: buf.h:44
#define Min(x, y)
Definition: c.h:991
void * palloc0(Size size)
Definition: mcxt.c:1334
BufferAccessStrategyType btype
Definition: freelist.c:75

References Assert(), BufferAccessStrategyData::btype, Min, BufferAccessStrategyData::nbuffers, NBuffers, and palloc0().

Referenced by do_autovacuum(), ExecVacuum(), GetAccessStrategy(), and parallel_vacuum_main().

◆ HoldingBufferPinThatDelaysRecovery()

bool HoldingBufferPinThatDelaysRecovery ( void  )

Definition at line 5010 of file bufmgr.c.

5011 {
5012  int bufid = GetStartupBufferPinWaitBufId();
5013 
5014  /*
5015  * If we get woken slowly then it's possible that the Startup process was
5016  * already woken by other backends before we got here. Also possible that
5017  * we get here by multiple interrupts or interrupts at inappropriate
5018  * times, so make sure we do nothing if the bufid is not set.
5019  */
5020  if (bufid < 0)
5021  return false;
5022 
5023  if (GetPrivateRefCount(bufid + 1) > 0)
5024  return true;
5025 
5026  return false;
5027 }
int GetStartupBufferPinWaitBufId(void)
Definition: proc.c:671

References GetPrivateRefCount(), and GetStartupBufferPinWaitBufId().

Referenced by CheckRecoveryConflictDeadlock(), and ProcessRecoveryConflictInterrupt().

◆ IncrBufferRefCount()

void IncrBufferRefCount ( Buffer  buffer)

Definition at line 4592 of file bufmgr.c.

4593 {
4594  Assert(BufferIsPinned(buffer));
4596  if (BufferIsLocal(buffer))
4597  LocalRefCount[-buffer - 1]++;
4598  else
4599  {
4600  PrivateRefCountEntry *ref;
4601 
4602  ref = GetPrivateRefCountEntry(buffer, true);
4603  Assert(ref != NULL);
4604  ref->refcount++;
4605  }
4607 }
static PrivateRefCountEntry * GetPrivateRefCountEntry(Buffer buffer, bool do_move)
Definition: bufmgr.c:330
void ResourceOwnerRememberBuffer(ResourceOwner owner, Buffer buffer)

References Assert(), PrivateRefCountEntry::buffer, BufferIsLocal, BufferIsPinned, CurrentResourceOwner, GetPrivateRefCountEntry(), LocalRefCount, PrivateRefCountEntry::refcount, ResourceOwnerEnlarge(), and ResourceOwnerRememberBuffer().

Referenced by _bt_steppage(), btrestrpos(), entryLoadMoreItems(), ReadBufferBI(), RelationAddBlocks(), scanPostingTree(), startScanEntry(), and tts_buffer_heap_store_tuple().

◆ InitBufferPool()

void InitBufferPool ( void  )

Definition at line 68 of file buf_init.c.

69 {
70  bool foundBufs,
71  foundDescs,
72  foundIOCV,
73  foundBufCkpt;
74 
75  /* Align descriptors to a cacheline boundary. */
77  ShmemInitStruct("Buffer Descriptors",
78  NBuffers * sizeof(BufferDescPadded),
79  &foundDescs);
80 
81  /* Align buffer pool on IO page size boundary. */
82  BufferBlocks = (char *)
84  ShmemInitStruct("Buffer Blocks",
85  NBuffers * (Size) BLCKSZ + PG_IO_ALIGN_SIZE,
86  &foundBufs));
87 
88  /* Align condition variables to cacheline boundary. */
90  ShmemInitStruct("Buffer IO Condition Variables",
92  &foundIOCV);
93 
94  /*
95  * The array used to sort to-be-checkpointed buffer ids is located in
96  * shared memory, to avoid having to allocate significant amounts of
97  * memory at runtime. As that'd be in the middle of a checkpoint, or when
98  * the checkpointer is restarted, memory allocation failures would be
99  * painful.
100  */
102  ShmemInitStruct("Checkpoint BufferIds",
103  NBuffers * sizeof(CkptSortItem), &foundBufCkpt);
104 
105  if (foundDescs || foundBufs || foundIOCV || foundBufCkpt)
106  {
107  /* should find all of these, or none of them */
108  Assert(foundDescs && foundBufs && foundIOCV && foundBufCkpt);
109  /* note: this path is only taken in EXEC_BACKEND case */
110  }
111  else
112  {
113  int i;
114 
115  /*
116  * Initialize all the buffer headers.
117  */
118  for (i = 0; i < NBuffers; i++)
119  {
121 
122  ClearBufferTag(&buf->tag);
123 
124  pg_atomic_init_u32(&buf->state, 0);
125  buf->wait_backend_pgprocno = INVALID_PROC_NUMBER;
126 
127  buf->buf_id = i;
128 
129  /*
130  * Initially link all the buffers together as unused. Subsequent
131  * management of this list is done by freelist.c.
132  */
133  buf->freeNext = i + 1;
134 
137 
139  }
140 
141  /* Correct last entry of linked list */
143  }
144 
145  /* Init other shared buffer-management stuff */
146  StrategyInitialize(!foundDescs);
147 
148  /* Initialize per-backend file flush context */
151 }
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:216
CkptSortItem * CkptBufferIds
Definition: buf_init.c:25
char * BufferBlocks
Definition: buf_init.c:22
WritebackContext BackendWritebackContext
Definition: buf_init.c:24
ConditionVariableMinimallyPadded * BufferIOCVArray
Definition: buf_init.c:23
BufferDescPadded * BufferDescriptors
Definition: buf_init.c:21
#define FREENEXT_END_OF_LIST
static void ClearBufferTag(BufferTag *tag)
static ConditionVariable * BufferDescriptorGetIOCV(const BufferDesc *bdesc)
void WritebackContextInit(WritebackContext *context, int *max_pending)
Definition: bufmgr.c:5532
int backend_flush_after
Definition: bufmgr.c:161
#define TYPEALIGN(ALIGNVAL, LEN)
Definition: c.h:791
void ConditionVariableInit(ConditionVariable *cv)
void StrategyInitialize(bool init)
Definition: freelist.c:474
void LWLockInitialize(LWLock *lock, int tranche_id)
Definition: lwlock.c:707
@ LWTRANCHE_BUFFER_CONTENT
Definition: lwlock.h:189
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:387

References Assert(), backend_flush_after, BackendWritebackContext, buf, BufferBlocks, BufferDescriptorGetContentLock(), BufferDescriptorGetIOCV(), BufferDescriptors, BufferIOCVArray, CkptBufferIds, ClearBufferTag(), ConditionVariableInit(), BufferDesc::freeNext, FREENEXT_END_OF_LIST, GetBufferDescriptor(), i, INVALID_PROC_NUMBER, LWLockInitialize(), LWTRANCHE_BUFFER_CONTENT, NBuffers, pg_atomic_init_u32(), PG_IO_ALIGN_SIZE, ShmemInitStruct(), StrategyInitialize(), TYPEALIGN, and WritebackContextInit().

Referenced by CreateOrAttachShmemStructs().

◆ InitBufferPoolAccess()

void InitBufferPoolAccess ( void  )

Definition at line 3229 of file bufmgr.c.

3230 {
3231  HASHCTL hash_ctl;
3232 
3233  memset(&PrivateRefCountArray, 0, sizeof(PrivateRefCountArray));
3234 
3235  hash_ctl.keysize = sizeof(int32);
3236  hash_ctl.entrysize = sizeof(PrivateRefCountEntry);
3237 
3238  PrivateRefCountHash = hash_create("PrivateRefCount", 100, &hash_ctl,
3239  HASH_ELEM | HASH_BLOBS);
3240 
3241  /*
3242  * AtProcExit_Buffers needs LWLock access, and thereby has to be called at
3243  * the corresponding phase of backend shutdown.
3244  */
3245  Assert(MyProc != NULL);
3247 }
static void AtProcExit_Buffers(int code, Datum arg)
Definition: bufmgr.c:3254
struct PrivateRefCountEntry PrivateRefCountEntry
static struct PrivateRefCountEntry PrivateRefCountArray[REFCOUNT_ARRAY_ENTRIES]
Definition: bufmgr.c:196
static HTAB * PrivateRefCountHash
Definition: bufmgr.c:197
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:352
#define HASH_ELEM
Definition: hsearch.h:95
#define HASH_BLOBS
Definition: hsearch.h:97
void on_shmem_exit(pg_on_exit_callback function, Datum arg)
Definition: ipc.c:365
PGPROC * MyProc
Definition: proc.c:66
Size keysize
Definition: hsearch.h:75
Size entrysize
Definition: hsearch.h:76

References Assert(), AtProcExit_Buffers(), HASHCTL::entrysize, HASH_BLOBS, hash_create(), HASH_ELEM, HASHCTL::keysize, MyProc, on_shmem_exit(), PrivateRefCountArray, and PrivateRefCountHash.

Referenced by BaseInit().

◆ IsBufferCleanupOK()

bool IsBufferCleanupOK ( Buffer  buffer)

Definition at line 5092 of file bufmgr.c.

5093 {
5094  BufferDesc *bufHdr;
5095  uint32 buf_state;
5096 
5097  Assert(BufferIsValid(buffer));
5098 
5099  if (BufferIsLocal(buffer))
5100  {
5101  /* There should be exactly one pin */
5102  if (LocalRefCount[-buffer - 1] != 1)
5103  return false;
5104  /* Nobody else to wait for */
5105  return true;
5106  }
5107 
5108  /* There should be exactly one local pin */
5109  if (GetPrivateRefCount(buffer) != 1)
5110  return false;
5111 
5112  bufHdr = GetBufferDescriptor(buffer - 1);
5113 
5114  /* caller must hold exclusive lock on buffer */
5116  LW_EXCLUSIVE));
5117 
5118  buf_state = LockBufHdr(bufHdr);
5119 
5120  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
5121  if (BUF_STATE_GET_REFCOUNT(buf_state) == 1)
5122  {
5123  /* pincount is OK. */
5124  UnlockBufHdr(bufHdr, buf_state);
5125  return true;
5126  }
5127 
5128  UnlockBufHdr(bufHdr, buf_state);
5129  return false;
5130 }

References Assert(), BUF_STATE_GET_REFCOUNT, PrivateRefCountEntry::buffer, BufferDescriptorGetContentLock(), BufferIsLocal, BufferIsValid(), GetBufferDescriptor(), GetPrivateRefCount(), LocalRefCount, LockBufHdr(), LW_EXCLUSIVE, LWLockHeldByMeInMode(), and UnlockBufHdr().

Referenced by _hash_doinsert(), _hash_expandtable(), _hash_splitbucket(), and hashbucketcleanup().

◆ LockBuffer()

void LockBuffer ( Buffer  buffer,
int  mode 
)

Definition at line 4795 of file bufmgr.c.

4796 {
4797  BufferDesc *buf;
4798 
4799  Assert(BufferIsPinned(buffer));
4800  if (BufferIsLocal(buffer))
4801  return; /* local buffers need no lock */
4802 
4803  buf = GetBufferDescriptor(buffer - 1);
4804 
4805  if (mode == BUFFER_LOCK_UNLOCK)
4807  else if (mode == BUFFER_LOCK_SHARE)
4809  else if (mode == BUFFER_LOCK_EXCLUSIVE)
4811  else
4812  elog(ERROR, "unrecognized buffer lock mode: %d", mode);
4813 }
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:158
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:159

References Assert(), buf, PrivateRefCountEntry::buffer, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferDescriptorGetContentLock(), BufferIsLocal, BufferIsPinned, elog, ERROR, GetBufferDescriptor(), LW_EXCLUSIVE, LW_SHARED, LWLockAcquire(), LWLockRelease(), and mode.

Referenced by _bt_lockbuf(), _bt_unlockbuf(), _bt_upgradelockbufcleanup(), _hash_addovflpage(), _hash_doinsert(), _hash_expandtable(), _hash_finish_split(), _hash_first(), _hash_freeovflpage(), _hash_getbuf(), _hash_getbuf_with_strategy(), _hash_getcachedmetap(), _hash_init(), _hash_kill_items(), _hash_readnext(), _hash_readpage(), _hash_readprev(), _hash_splitbucket(), _hash_squeezebucket(), _hash_vacuum_one_page(), blbulkdelete(), blgetbitmap(), blinsert(), BloomInitMetapage(), BloomNewBuffer(), blvacuumcleanup(), brin_doinsert(), brin_doupdate(), brin_evacuate_page(), brin_getinsertbuffer(), brin_page_cleanup(), bringetbitmap(), brinGetStats(), brinGetTupleForHeapBlock(), brininsert(), brinLockRevmapPageForUpdate(), brinRevmapDesummarizeRange(), brinRevmapInitialize(), brinsummarize(), bt_metap(), bt_multi_page_stats(), bt_page_items_internal(), bt_page_stats_internal(), bt_recheck_sibling_links(), collect_corrupt_items(), collect_visibility_data(), collectMatchBitmap(), ConditionalLockBufferForCleanup(), count_nondeletable_pages(), entryLoadMoreItems(), FreeSpaceMapPrepareTruncateRel(), fsm_readbuf(), fsm_search(), fsm_search_avail(), fsm_set_and_search(), fsm_vacuum_page(), get_raw_page_internal(), GetVisibilityMapPins(), ginbulkdelete(), ginEntryInsert(), ginFindLeafPage(), ginFindParents(), ginFinishOldSplit(), ginFinishSplit(), ginGetStats(), ginHeapTupleFastInsert(), ginInsertCleanup(), ginInsertValue(), GinNewBuffer(), ginScanToDelete(), ginStepRight(), ginTraverseLock(), ginUpdateStats(), ginvacuumcleanup(), ginVacuumPostingTreeLeaves(), gistBufferingFindCorrectParent(), gistbufferinginserttuples(), gistdoinsert(), gistFindCorrectParent(), gistFindPath(), gistfinishsplit(), gistfixsplit(), gistformdownlink(), gistGetMaxLevel(), gistinserttuples(), gistkillitems(), gistNewBuffer(), gistProcessItup(), gistScanPage(), gistvacuum_delete_empty_pages(), gistvacuumpage(), hashbucketcleanup(), hashbulkdelete(), heap_abort_speculative(), heap_delete(), heap_fetch(), heap_finish_speculative(), heap_get_latest_tid(), heap_index_delete_tuples(), heap_inplace_update(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_page_prune_opt(), heap_update(), heap_xlog_visible(), heapam_index_build_range_scan(), heapam_index_fetch_tuple(), heapam_index_validate_scan(), heapam_relation_copy_for_cluster(), heapam_scan_analyze_next_block(), heapam_scan_bitmap_next_block(), heapam_scan_sample_next_tuple(), heapam_tuple_satisfies_snapshot(), heapgetpage(), heapgettup(), initBloomState(), lazy_scan_heap(), lazy_scan_new_or_empty(), lazy_vacuum_heap_rel(), LockBufferForCleanup(), log_newpage_range(), palloc_btree_page(), pg_visibility(), pgrowlocks(), pgstat_btree_page(), pgstat_gist_page(), pgstat_heap(), pgstatginindex_internal(), pgstathashindex(), pgstatindex_impl(), read_seq_tuple(), RelationAddBlocks(), RelationCopyStorageUsingBuffer(), RelationGetBufferForTuple(), revmap_physical_extend(), scanGetCandidate(), scanPendingInsert(), ScanSourceDatabasePgClass(), shiftList(), spgdoinsert(), spgGetCache(), SpGistNewBuffer(), spgprocesspending(), spgvacuumpage(), spgWalk(), startScanEntry(), statapprox_heap(), summarize_range(), UnlockReleaseBuffer(), verify_heapam(), verifyBackupPageConsistency(), visibilitymap_clear(), visibilitymap_prepare_truncate(), visibilitymap_set(), vm_readbuf(), XLogReadBufferForRedoExtended(), and XLogRecordPageWithFreeSpace().

◆ LockBufferForCleanup()

void LockBufferForCleanup ( Buffer  buffer)

Definition at line 4875 of file bufmgr.c.

4876 {
4877  BufferDesc *bufHdr;
4878  TimestampTz waitStart = 0;
4879  bool waiting = false;
4880  bool logged_recovery_conflict = false;
4881 
4882  Assert(BufferIsPinned(buffer));
4883  Assert(PinCountWaitBuf == NULL);
4884 
4885  CheckBufferIsPinnedOnce(buffer);
4886 
4887  /* Nobody else to wait for */
4888  if (BufferIsLocal(buffer))
4889  return;
4890 
4891  bufHdr = GetBufferDescriptor(buffer - 1);
4892 
4893  for (;;)
4894  {
4895  uint32 buf_state;
4896 
4897  /* Try to acquire lock */
4899  buf_state = LockBufHdr(bufHdr);
4900 
4901  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
4902  if (BUF_STATE_GET_REFCOUNT(buf_state) == 1)
4903  {
4904  /* Successfully acquired exclusive lock with pincount 1 */
4905  UnlockBufHdr(bufHdr, buf_state);
4906 
4907  /*
4908  * Emit the log message if recovery conflict on buffer pin was
4909  * resolved but the startup process waited longer than
4910  * deadlock_timeout for it.
4911  */
4912  if (logged_recovery_conflict)
4914  waitStart, GetCurrentTimestamp(),
4915  NULL, false);
4916 
4917  if (waiting)
4918  {
4919  /* reset ps display to remove the suffix if we added one */
4921  waiting = false;
4922  }
4923  return;
4924  }
4925  /* Failed, so mark myself as waiting for pincount 1 */
4926  if (buf_state & BM_PIN_COUNT_WAITER)
4927  {
4928  UnlockBufHdr(bufHdr, buf_state);
4929  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
4930  elog(ERROR, "multiple backends attempting to wait for pincount 1");
4931  }
4933  PinCountWaitBuf = bufHdr;
4934  buf_state |= BM_PIN_COUNT_WAITER;
4935  UnlockBufHdr(bufHdr, buf_state);
4936  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
4937 
4938  /* Wait to be signaled by UnpinBuffer() */
4939  if (InHotStandby)
4940  {
4941  if (!waiting)
4942  {
4943  /* adjust the process title to indicate that it's waiting */
4944  set_ps_display_suffix("waiting");
4945  waiting = true;
4946  }
4947 
4948  /*
4949  * Emit the log message if the startup process is waiting longer
4950  * than deadlock_timeout for recovery conflict on buffer pin.
4951  *
4952  * Skip this if first time through because the startup process has
4953  * not started waiting yet in this case. So, the wait start
4954  * timestamp is set after this logic.
4955  */
4956  if (waitStart != 0 && !logged_recovery_conflict)
4957  {
4959 
4960  if (TimestampDifferenceExceeds(waitStart, now,
4961  DeadlockTimeout))
4962  {
4964  waitStart, now, NULL, true);
4965  logged_recovery_conflict = true;
4966  }
4967  }
4968 
4969  /*
4970  * Set the wait start timestamp if logging is enabled and first
4971  * time through.
4972  */
4973  if (log_recovery_conflict_waits && waitStart == 0)
4974  waitStart = GetCurrentTimestamp();
4975 
4976  /* Publish the bufid that Startup process waits on */
4977  SetStartupBufferPinWaitBufId(buffer - 1);
4978  /* Set alarm and then wait to be signaled by UnpinBuffer() */
4980  /* Reset the published bufid */
4982  }
4983  else
4984  ProcWaitForSignal(WAIT_EVENT_BUFFER_PIN);
4985 
4986  /*
4987  * Remove flag marking us as waiter. Normally this will not be set
4988  * anymore, but ProcWaitForSignal() can return for other signals as
4989  * well. We take care to only reset the flag if we're the waiter, as
4990  * theoretically another backend could have started waiting. That's
4991  * impossible with the current usages due to table level locking, but
4992  * better be safe.
4993  */
4994  buf_state = LockBufHdr(bufHdr);
4995  if ((buf_state & BM_PIN_COUNT_WAITER) != 0 &&
4997  buf_state &= ~BM_PIN_COUNT_WAITER;
4998  UnlockBufHdr(bufHdr, buf_state);
4999 
5000  PinCountWaitBuf = NULL;
5001  /* Loop back and try again */
5002  }
5003 }
bool TimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, int msec)
Definition: timestamp.c:1790
TimestampTz GetCurrentTimestamp(void)
Definition: timestamp.c:1654
Datum now(PG_FUNCTION_ARGS)
Definition: timestamp.c:1618
#define BM_PIN_COUNT_WAITER
Definition: buf_internals.h:67
void CheckBufferIsPinnedOnce(Buffer buffer)
Definition: bufmgr.c:4842
static BufferDesc * PinCountWaitBuf
Definition: bufmgr.c:164
int64 TimestampTz
Definition: timestamp.h:39
static volatile sig_atomic_t waiting
Definition: latch.c:162
@ PROCSIG_RECOVERY_CONFLICT_BUFFERPIN
Definition: procsignal.h:47
void set_ps_display_remove_suffix(void)
Definition: ps_status.c:394
void set_ps_display_suffix(const char *suffix)
Definition: ps_status.c:342
int DeadlockTimeout
Definition: proc.c:57
void SetStartupBufferPinWaitBufId(int bufid)
Definition: proc.c:659
void ProcWaitForSignal(uint32 wait_event_info)
Definition: proc.c:1866
void ResolveRecoveryConflictWithBufferPin(void)
Definition: standby.c:792
bool log_recovery_conflict_waits
Definition: standby.c:41
void LogRecoveryConflict(ProcSignalReason reason, TimestampTz wait_start, TimestampTz now, VirtualTransactionId *wait_list, bool still_waiting)
Definition: standby.c:273
int wait_backend_pgprocno
#define InHotStandby
Definition: xlogutils.h:57

References Assert(), BM_PIN_COUNT_WAITER, BUF_STATE_GET_REFCOUNT, PrivateRefCountEntry::buffer, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferIsLocal, BufferIsPinned, CheckBufferIsPinnedOnce(), DeadlockTimeout, elog, ERROR, GetBufferDescriptor(), GetCurrentTimestamp(), InHotStandby, LockBuffer(), LockBufHdr(), log_recovery_conflict_waits, LogRecoveryConflict(), MyProcNumber, now(), PinCountWaitBuf, PROCSIG_RECOVERY_CONFLICT_BUFFERPIN, ProcWaitForSignal(), ResolveRecoveryConflictWithBufferPin(), set_ps_display_remove_suffix(), set_ps_display_suffix(), SetStartupBufferPinWaitBufId(), TimestampDifferenceExceeds(), UnlockBufHdr(), BufferDesc::wait_backend_pgprocno, and waiting.

Referenced by _bt_upgradelockbufcleanup(), ginVacuumPostingTree(), hashbulkdelete(), heap_force_common(), lazy_scan_heap(), ReadBuffer_common(), and XLogReadBufferForRedoExtended().

◆ MarkBufferDirty()

void MarkBufferDirty ( Buffer  buffer)

Definition at line 2189 of file bufmgr.c.

2190 {
2191  BufferDesc *bufHdr;
2192  uint32 buf_state;
2193  uint32 old_buf_state;
2194 
2195  if (!BufferIsValid(buffer))
2196  elog(ERROR, "bad buffer ID: %d", buffer);
2197 
2198  if (BufferIsLocal(buffer))
2199  {
2200  MarkLocalBufferDirty(buffer);
2201  return;
2202  }
2203 
2204  bufHdr = GetBufferDescriptor(buffer - 1);
2205 
2206  Assert(BufferIsPinned(buffer));
2208  LW_EXCLUSIVE));
2209 
2210  old_buf_state = pg_atomic_read_u32(&bufHdr->state);
2211  for (;;)
2212  {
2213  if (old_buf_state & BM_LOCKED)
2214  old_buf_state = WaitBufHdrUnlocked(bufHdr);
2215 
2216  buf_state = old_buf_state;
2217 
2218  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
2219  buf_state |= BM_DIRTY | BM_JUST_DIRTIED;
2220 
2221  if (pg_atomic_compare_exchange_u32(&bufHdr->state, &old_buf_state,
2222  buf_state))
2223  break;
2224  }
2225 
2226  /*
2227  * If the buffer was not dirty already, do vacuum accounting.
2228  */
2229  if (!(old_buf_state & BM_DIRTY))
2230  {
2231  VacuumPageDirty++;
2233  if (VacuumCostActive)
2235  }
2236 }
static bool pg_atomic_compare_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval)
Definition: atomics.h:344
#define BM_LOCKED
Definition: buf_internals.h:60
static uint32 WaitBufHdrUnlocked(BufferDesc *buf)
Definition: bufmgr.c:5420
bool VacuumCostActive
Definition: globals.c:159
int64 VacuumPageDirty
Definition: globals.c:156
int VacuumCostBalance
Definition: globals.c:158
int VacuumCostPageDirty
Definition: globals.c:150
void MarkLocalBufferDirty(Buffer buffer)
Definition: localbuf.c:449
int64 shared_blks_dirtied
Definition: instrument.h:28

References Assert(), BM_DIRTY, BM_JUST_DIRTIED, BM_LOCKED, BUF_STATE_GET_REFCOUNT, PrivateRefCountEntry::buffer, BufferDescriptorGetContentLock(), BufferIsLocal, BufferIsPinned, BufferIsValid(), elog, ERROR, GetBufferDescriptor(), LW_EXCLUSIVE, LWLockHeldByMeInMode(), MarkLocalBufferDirty(), pg_atomic_compare_exchange_u32(), pg_atomic_read_u32(), pgBufferUsage, BufferUsage::shared_blks_dirtied, BufferDesc::state, VacuumCostActive, VacuumCostBalance, VacuumCostPageDirty, VacuumPageDirty, and WaitBufHdrUnlocked().

Referenced by _bt_clear_incomplete_split(), _bt_dedup_pass(), _bt_delitems_delete(), _bt_delitems_vacuum(), _bt_getroot(), _bt_insertonpg(), _bt_mark_page_halfdead(), _bt_newlevel(), _bt_restore_meta(), _bt_set_cleanup_info(), _bt_split(), _bt_unlink_halfdead_page(), _hash_addovflpage(), _hash_doinsert(), _hash_expandtable(), _hash_freeovflpage(), _hash_init(), _hash_splitbucket(), _hash_squeezebucket(), _hash_vacuum_one_page(), addLeafTuple(), brin_doinsert(), brin_doupdate(), brin_initialize_empty_new_buffer(), brin_xlog_createidx(), brin_xlog_desummarize_page(), brin_xlog_insert_update(), brin_xlog_revmap_extend(), brin_xlog_samepage_update(), brin_xlog_update(), brinbuild(), brinbuildempty(), brinRevmapDesummarizeRange(), btree_xlog_dedup(), btree_xlog_delete(), btree_xlog_insert(), btree_xlog_mark_page_halfdead(), btree_xlog_newroot(), btree_xlog_split(), btree_xlog_unlink_page(), btree_xlog_vacuum(), createPostingTree(), dataExecPlaceToPageInternal(), dataExecPlaceToPageLeaf(), do_setval(), doPickSplit(), entryExecPlaceToPage(), fill_seq_fork_with_data(), FreeSpaceMapPrepareTruncateRel(), generic_redo(), GenericXLogFinish(), ginbuild(), ginbuildempty(), ginbulkdelete(), ginDeletePage(), ginHeapTupleFastInsert(), ginPlaceToPage(), ginRedoClearIncompleteSplit(), ginRedoCreatePTree(), ginRedoDeleteListPages(), ginRedoDeletePage(), ginRedoInsert(), ginRedoInsertListPage(), ginRedoUpdateMetapage(), ginRedoVacuumDataLeafPage(), ginUpdateStats(), ginVacuumPostingTreeLeaf(), gistbuild(), gistbuildempty(), gistdeletepage(), gistplacetopage(), gistprunepage(), gistRedoClearFollowRight(), gistRedoDeleteRecord(), gistRedoPageDelete(), gistRedoPageSplitRecord(), gistRedoPageUpdateRecord(), gistvacuumpage(), hash_xlog_add_ovfl_page(), hash_xlog_delete(), hash_xlog_init_bitmap_page(), hash_xlog_init_meta_page(), hash_xlog_insert(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_split_cleanup(), hash_xlog_split_complete(), hash_xlog_squeeze_page(), hash_xlog_update_meta_page(), hash_xlog_vacuum_one_page(), hashbucketcleanup(), hashbulkdelete(), heap_abort_speculative(), heap_delete(), heap_finish_speculative(), heap_force_common(), heap_freeze_execute_prepared(), heap_inplace_update(), heap_insert(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_page_prune(), heap_update(), heap_xlog_confirm(), heap_xlog_delete(), heap_xlog_freeze_page(), heap_xlog_inplace(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_prune(), heap_xlog_update(), heap_xlog_vacuum(), heap_xlog_visible(), lazy_scan_new_or_empty(), lazy_scan_prune(), lazy_vacuum_heap_page(), log_newpage_range(), moveLeafs(), nextval_internal(), RelationAddBlocks(), RelationCopyStorageUsingBuffer(), RelationGetBufferForTuple(), revmap_physical_extend(), saveNodeLink(), seq_redo(), shiftList(), spgAddNodeAction(), spgbuild(), SpGistUpdateMetaPage(), spgRedoAddLeaf(), spgRedoAddNode(), spgRedoMoveLeafs(), spgRedoPickSplit(), spgRedoSplitTuple(), spgRedoVacuumLeaf(), spgRedoVacuumRedirect(), spgRedoVacuumRoot(), spgSplitNodeAction(), vacuumLeafPage(), vacuumLeafRoot(), vacuumRedirectAndPlaceholder(), visibilitymap_clear(), visibilitymap_prepare_truncate(), visibilitymap_set(), writeListPage(), and XLogReadBufferForRedoExtended().

◆ MarkBufferDirtyHint()

void MarkBufferDirtyHint ( Buffer  buffer,
bool  buffer_std 
)

Definition at line 4624 of file bufmgr.c.

4625 {
4626  BufferDesc *bufHdr;
4627  Page page = BufferGetPage(buffer);
4628 
4629  if (!BufferIsValid(buffer))
4630  elog(ERROR, "bad buffer ID: %d", buffer);
4631 
4632  if (BufferIsLocal(buffer))
4633  {
4634  MarkLocalBufferDirty(buffer);
4635  return;
4636  }
4637 
4638  bufHdr = GetBufferDescriptor(buffer - 1);
4639 
4640  Assert(GetPrivateRefCount(buffer) > 0);
4641  /* here, either share or exclusive lock is OK */
4643 
4644  /*
4645  * This routine might get called many times on the same page, if we are
4646  * making the first scan after commit of an xact that added/deleted many
4647  * tuples. So, be as quick as we can if the buffer is already dirty. We
4648  * do this by not acquiring spinlock if it looks like the status bits are
4649  * already set. Since we make this test unlocked, there's a chance we
4650  * might fail to notice that the flags have just been cleared, and failed
4651  * to reset them, due to memory-ordering issues. But since this function
4652  * is only intended to be used in cases where failing to write out the
4653  * data would be harmless anyway, it doesn't really matter.
4654  */
4655  if ((pg_atomic_read_u32(&bufHdr->state) & (BM_DIRTY | BM_JUST_DIRTIED)) !=
4657  {
4659  bool dirtied = false;
4660  bool delayChkptFlags = false;
4661  uint32 buf_state;
4662 
4663  /*
4664  * If we need to protect hint bit updates from torn writes, WAL-log a
4665  * full page image of the page. This full page image is only necessary
4666  * if the hint bit update is the first change to the page since the
4667  * last checkpoint.
4668  *
4669  * We don't check full_page_writes here because that logic is included
4670  * when we call XLogInsert() since the value changes dynamically.
4671  */
4672  if (XLogHintBitIsNeeded() &&
4673  (pg_atomic_read_u32(&bufHdr->state) & BM_PERMANENT))
4674  {
4675  /*
4676  * If we must not write WAL, due to a relfilelocator-specific
4677  * condition or being in recovery, don't dirty the page. We can
4678  * set the hint, just not dirty the page as a result so the hint
4679  * is lost when we evict the page or shutdown.
4680  *
4681  * See src/backend/storage/page/README for longer discussion.
4682  */
4683  if (RecoveryInProgress() ||
4685  return;
4686 
4687  /*
4688  * If the block is already dirty because we either made a change
4689  * or set a hint already, then we don't need to write a full page
4690  * image. Note that aggressive cleaning of blocks dirtied by hint
4691  * bit setting would increase the call rate. Bulk setting of hint
4692  * bits would reduce the call rate...
4693  *
4694  * We must issue the WAL record before we mark the buffer dirty.
4695  * Otherwise we might write the page before we write the WAL. That
4696  * causes a race condition, since a checkpoint might occur between
4697  * writing the WAL record and marking the buffer dirty. We solve
4698  * that with a kluge, but one that is already in use during
4699  * transaction commit to prevent race conditions. Basically, we
4700  * simply prevent the checkpoint WAL record from being written
4701  * until we have marked the buffer dirty. We don't start the
4702  * checkpoint flush until we have marked dirty, so our checkpoint
4703  * must flush the change to disk successfully or the checkpoint
4704  * never gets written, so crash recovery will fix.
4705  *
4706  * It's possible we may enter here without an xid, so it is
4707  * essential that CreateCheckPoint waits for virtual transactions
4708  * rather than full transactionids.
4709  */
4712  delayChkptFlags = true;
4713  lsn = XLogSaveBufferForHint(buffer, buffer_std);
4714  }
4715 
4716  buf_state = LockBufHdr(bufHdr);
4717 
4718  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
4719 
4720  if (!(buf_state & BM_DIRTY))
4721  {
4722  dirtied = true; /* Means "will be dirtied by this action" */
4723 
4724  /*
4725  * Set the page LSN if we wrote a backup block. We aren't supposed
4726  * to set this when only holding a share lock but as long as we
4727  * serialise it somehow we're OK. We choose to set LSN while
4728  * holding the buffer header lock, which causes any reader of an
4729  * LSN who holds only a share lock to also obtain a buffer header
4730  * lock before using PageGetLSN(), which is enforced in
4731  * BufferGetLSNAtomic().
4732  *
4733  * If checksums are enabled, you might think we should reset the
4734  * checksum here. That will happen when the page is written
4735  * sometime later in this checkpoint cycle.
4736  */
4737  if (!XLogRecPtrIsInvalid(lsn))
4738  PageSetLSN(page, lsn);
4739  }
4740 
4741  buf_state |= BM_DIRTY | BM_JUST_DIRTIED;
4742  UnlockBufHdr(bufHdr, buf_state);
4743 
4744  if (delayChkptFlags)
4746 
4747  if (dirtied)
4748  {
4749  VacuumPageDirty++;
4751  if (VacuumCostActive)
4753  }
4754  }
4755 }
static void PageSetLSN(Page page, XLogRecPtr lsn)
Definition: bufpage.h:388
#define DELAY_CHKPT_START
Definition: proc.h:114
bool RelFileLocatorSkippingWAL(RelFileLocator rlocator)
Definition: storage.c:532
int delayChkptFlags
Definition: proc.h:236
bool RecoveryInProgress(void)
Definition: xlog.c:6201
#define XLogRecPtrIsInvalid(r)
Definition: xlogdefs.h:29
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
XLogRecPtr XLogSaveBufferForHint(Buffer buffer, bool buffer_std)
Definition: xloginsert.c:1065

References Assert(), BM_DIRTY, BM_JUST_DIRTIED, BM_PERMANENT, BUF_STATE_GET_REFCOUNT, PrivateRefCountEntry::buffer, BufferDescriptorGetContentLock(), BufferGetPage(), BufferIsLocal, BufferIsValid(), BufTagGetRelFileLocator(), DELAY_CHKPT_START, PGPROC::delayChkptFlags, elog, ERROR, GetBufferDescriptor(), GetPrivateRefCount(), InvalidXLogRecPtr, LockBufHdr(), LWLockHeldByMe(), MarkLocalBufferDirty(), MyProc, PageSetLSN(), pg_atomic_read_u32(), pgBufferUsage, RecoveryInProgress(), RelFileLocatorSkippingWAL(), BufferUsage::shared_blks_dirtied, BufferDesc::state, BufferDesc::tag, UnlockBufHdr(), VacuumCostActive, VacuumCostBalance, VacuumCostPageDirty, VacuumPageDirty, XLogHintBitIsNeeded, XLogRecPtrIsInvalid, and XLogSaveBufferForHint().

Referenced by _bt_check_unique(), _bt_killitems(), _hash_kill_items(), brin_start_evacuating_page(), btvacuumpage(), fsm_search_avail(), fsm_set_and_search(), fsm_vacuum_page(), gistkillitems(), heap_page_prune(), read_seq_tuple(), SetHintBits(), and XLogRecordPageWithFreeSpace().

◆ PrefetchBuffer()

PrefetchBufferResult PrefetchBuffer ( Relation  reln,
ForkNumber  forkNum,
BlockNumber  blockNum 
)

Definition at line 627 of file bufmgr.c.

628 {
629  Assert(RelationIsValid(reln));
630  Assert(BlockNumberIsValid(blockNum));
631 
632  if (RelationUsesLocalBuffers(reln))
633  {
634  /* see comments in ReadBufferExtended */
635  if (RELATION_IS_OTHER_TEMP(reln))
636  ereport(ERROR,
637  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
638  errmsg("cannot access temporary tables of other sessions")));
639 
640  /* pass it off to localbuf.c */
641  return PrefetchLocalBuffer(RelationGetSmgr(reln), forkNum, blockNum);
642  }
643  else
644  {
645  /* pass it to the shared buffer version */
646  return PrefetchSharedBuffer(RelationGetSmgr(reln), forkNum, blockNum);
647  }
648 }
static bool BlockNumberIsValid(BlockNumber blockNumber)
Definition: block.h:71
PrefetchBufferResult PrefetchSharedBuffer(SMgrRelation smgr_reln, ForkNumber forkNum, BlockNumber blockNum)
Definition: bufmgr.c:537
int errcode(int sqlerrcode)
Definition: elog.c:859
int errmsg(const char *fmt,...)
Definition: elog.c:1072
#define ereport(elevel,...)
Definition: elog.h:149
PrefetchBufferResult PrefetchLocalBuffer(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum)
Definition: localbuf.c:69
#define RELATION_IS_OTHER_TEMP(relation)
Definition: rel.h:658
#define RelationIsValid(relation)
Definition: rel.h:478

References Assert(), BlockNumberIsValid(), ereport, errcode(), errmsg(), ERROR, PrefetchLocalBuffer(), PrefetchSharedBuffer(), RELATION_IS_OTHER_TEMP, RelationGetSmgr(), RelationIsValid, and RelationUsesLocalBuffers.

Referenced by acquire_sample_rows(), BitmapPrefetch(), count_nondeletable_pages(), and pg_prewarm().

◆ PrefetchSharedBuffer()

PrefetchBufferResult PrefetchSharedBuffer ( struct SMgrRelationData smgr_reln,
ForkNumber  forkNum,
BlockNumber  blockNum 
)

◆ ReadBuffer()

Buffer ReadBuffer ( Relation  reln,
BlockNumber  blockNum 
)

Definition at line 734 of file bufmgr.c.

735 {
736  return ReadBufferExtended(reln, MAIN_FORKNUM, blockNum, RBM_NORMAL, NULL);
737 }
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:781

References MAIN_FORKNUM, RBM_NORMAL, and ReadBufferExtended().

Referenced by _bt_allocbuf(), _bt_getbuf(), _bt_search_insert(), _hash_getbuf(), _hash_getbuf_with_condlock_cleanup(), blbulkdelete(), blinsert(), BloomNewBuffer(), brin_getinsertbuffer(), brinGetStats(), brinGetTupleForHeapBlock(), brinRevmapDesummarizeRange(), brinRevmapInitialize(), bt_metap(), bt_multi_page_stats(), bt_page_items_internal(), bt_page_stats_internal(), ginFindLeafPage(), ginFindParents(), ginGetStats(), ginHeapTupleFastInsert(), ginInsertCleanup(), GinNewBuffer(), ginStepRight(), ginUpdateStats(), gistBufferingFindCorrectParent(), gistbufferinginserttuples(), gistdoinsert(), gistFindCorrectParent(), gistFindPath(), gistfixsplit(), gistGetMaxLevel(), gistkillitems(), gistNewBuffer(), gistProcessItup(), gistScanPage(), heap_abort_speculative(), heap_delete(), heap_fetch(), heap_finish_speculative(), heap_force_common(), heap_get_latest_tid(), heap_index_delete_tuples(), heap_inplace_update(), heap_lock_tuple(), heap_update(), initBloomState(), pg_visibility(), pgstatginindex_internal(), read_seq_tuple(), RelationGetBufferForTuple(), ReleaseAndReadBuffer(), revmap_get_buffer(), revmap_physical_extend(), scanGetCandidate(), scanPendingInsert(), shiftList(), spgdoinsert(), spgGetCache(), SpGistGetBuffer(), SpGistNewBuffer(), SpGistUpdateMetaPage(), and spgWalk().

◆ ReadBufferExtended()

Buffer ReadBufferExtended ( Relation  reln,
ForkNumber  forkNum,
BlockNumber  blockNum,
ReadBufferMode  mode,
BufferAccessStrategy  strategy 
)

Definition at line 781 of file bufmgr.c.

783 {
784  bool hit;
785  Buffer buf;
786 
787  /*
788  * Reject attempts to read non-local temporary relations; we would be
789  * likely to get wrong data since we have no visibility into the owning
790  * session's local buffers.
791  */
792  if (RELATION_IS_OTHER_TEMP(reln))
793  ereport(ERROR,
794  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
795  errmsg("cannot access temporary tables of other sessions")));
796 
797  /*
798  * Read the buffer, and update pgstat counters to reflect a cache hit or
799  * miss.
800  */
802  buf = ReadBuffer_common(RelationGetSmgr(reln), reln->rd_rel->relpersistence,
803  forkNum, blockNum, mode, strategy, &hit);
804  if (hit)
806  return buf;
807 }
#define pgstat_count_buffer_read(rel)
Definition: pgstat.h:635
#define pgstat_count_buffer_hit(rel)
Definition: pgstat.h:640

References buf, ereport, errcode(), errmsg(), ERROR, mode, pgstat_count_buffer_hit, pgstat_count_buffer_read, RelationData::rd_rel, ReadBuffer_common(), RELATION_IS_OTHER_TEMP, and RelationGetSmgr().

Referenced by _hash_getbuf_with_strategy(), _hash_getinitbuf(), _hash_getnewbuf(), autoprewarm_database_main(), blbulkdelete(), blgetbitmap(), BloomInitMetapage(), blvacuumcleanup(), brin_vacuum_scan(), bt_recheck_sibling_links(), btvacuumpage(), collect_corrupt_items(), collect_visibility_data(), count_nondeletable_pages(), fsm_readbuf(), get_raw_page_internal(), ginbulkdelete(), ginDeletePage(), ginScanToDelete(), ginvacuumcleanup(), ginVacuumPostingTree(), ginVacuumPostingTreeLeaves(), gistvacuum_delete_empty_pages(), gistvacuumpage(), hashbulkdelete(), heapam_scan_analyze_next_block(), heapgetpage(), lazy_scan_heap(), lazy_vacuum_heap_rel(), log_newpage_range(), palloc_btree_page(), pg_prewarm(), pgstat_btree_page(), pgstat_gist_page(), pgstat_heap(), pgstathashindex(), pgstatindex_impl(), ReadBuffer(), ReadBufferBI(), spgprocesspending(), spgvacuumpage(), statapprox_heap(), verify_heapam(), and vm_readbuf().

◆ ReadBufferWithoutRelcache()

Buffer ReadBufferWithoutRelcache ( RelFileLocator  rlocator,
ForkNumber  forkNum,
BlockNumber  blockNum,
ReadBufferMode  mode,
BufferAccessStrategy  strategy,
bool  permanent 
)

Definition at line 821 of file bufmgr.c.

824 {
825  bool hit;
826 
827  SMgrRelation smgr = smgropen(rlocator, INVALID_PROC_NUMBER);
828 
829  return ReadBuffer_common(smgr, permanent ? RELPERSISTENCE_PERMANENT :
830  RELPERSISTENCE_UNLOGGED, forkNum, blockNum,
831  mode, strategy, &hit);
832 }

References INVALID_PROC_NUMBER, mode, ReadBuffer_common(), and smgropen().

Referenced by RelationCopyStorageUsingBuffer(), ScanSourceDatabasePgClass(), and XLogReadBufferExtended().

◆ ReadRecentBuffer()

bool ReadRecentBuffer ( RelFileLocator  rlocator,
ForkNumber  forkNum,
BlockNumber  blockNum,
Buffer  recent_buffer 
)

Definition at line 658 of file bufmgr.c.

660 {
661  BufferDesc *bufHdr;
662  BufferTag tag;
663  uint32 buf_state;
664  bool have_private_ref;
665 
666  Assert(BufferIsValid(recent_buffer));
667 
670  InitBufferTag(&tag, &rlocator, forkNum, blockNum);
671 
672  if (BufferIsLocal(recent_buffer))
673  {
674  int b = -recent_buffer - 1;
675 
676  bufHdr = GetLocalBufferDescriptor(b);
677  buf_state = pg_atomic_read_u32(&bufHdr->state);
678 
679  /* Is it still valid and holding the right tag? */
680  if ((buf_state & BM_VALID) && BufferTagsEqual(&tag, &bufHdr->tag))
681  {
682  PinLocalBuffer(bufHdr, true);
683 
685 
686  return true;
687  }
688  }
689  else
690  {
691  bufHdr = GetBufferDescriptor(recent_buffer - 1);
692  have_private_ref = GetPrivateRefCount(recent_buffer) > 0;
693 
694  /*
695  * Do we already have this buffer pinned with a private reference? If
696  * so, it must be valid and it is safe to check the tag without
697  * locking. If not, we have to lock the header first and then check.
698  */
699  if (have_private_ref)
700  buf_state = pg_atomic_read_u32(&bufHdr->state);
701  else
702  buf_state = LockBufHdr(bufHdr);
703 
704  if ((buf_state & BM_VALID) && BufferTagsEqual(&tag, &bufHdr->tag))
705  {
706  /*
707  * It's now safe to pin the buffer. We can't pin first and ask
708  * questions later, because it might confuse code paths like
709  * InvalidateBuffer() if we pinned a random non-matching buffer.
710  */
711  if (have_private_ref)
712  PinBuffer(bufHdr, NULL); /* bump pin count */
713  else
714  PinBuffer_Locked(bufHdr); /* pin for first time */
715 
717 
718  return true;
719  }
720 
721  /* If we locked the header above, now unlock. */
722  if (!have_private_ref)
723  UnlockBufHdr(bufHdr, buf_state);
724  }
725 
726  return false;
727 }
static void InitBufferTag(BufferTag *tag, const RelFileLocator *rlocator, ForkNumber forkNum, BlockNumber blockNum)
static bool BufferTagsEqual(const BufferTag *tag1, const BufferTag *tag2)
static bool PinBuffer(BufferDesc *buf, BufferAccessStrategy strategy)
Definition: bufmgr.c:2310
int b
Definition: isn.c:70
bool PinLocalBuffer(BufferDesc *buf_hdr, bool adjust_usagecount)
Definition: localbuf.c:655
int64 local_blks_hit
Definition: instrument.h:30
int64 shared_blks_hit
Definition: instrument.h:26

References Assert(), b, BM_VALID, BufferIsLocal, BufferIsValid(), BufferTagsEqual(), CurrentResourceOwner, GetBufferDescriptor(), GetLocalBufferDescriptor(), GetPrivateRefCount(), InitBufferTag(), BufferUsage::local_blks_hit, LockBufHdr(), pg_atomic_read_u32(), pgBufferUsage, PinBuffer(), PinBuffer_Locked(), PinLocalBuffer(), ReservePrivateRefCountEntry(), ResourceOwnerEnlarge(), BufferUsage::shared_blks_hit, BufferDesc::state, BufferDesc::tag, and UnlockBufHdr().

Referenced by XLogReadBufferExtended().

◆ RelationGetNumberOfBlocksInFork()

BlockNumber RelationGetNumberOfBlocksInFork ( Relation  relation,
ForkNumber  forkNum 
)

Definition at line 3576 of file bufmgr.c.

3577 {
3578  if (RELKIND_HAS_TABLE_AM(relation->rd_rel->relkind))
3579  {
3580  /*
3581  * Not every table AM uses BLCKSZ wide fixed size blocks. Therefore
3582  * tableam returns the size in bytes - but for the purpose of this
3583  * routine, we want the number of blocks. Therefore divide, rounding
3584  * up.
3585  */
3586  uint64 szbytes;
3587 
3588  szbytes = table_relation_size(relation, forkNum);
3589 
3590  return (szbytes + (BLCKSZ - 1)) / BLCKSZ;
3591  }
3592  else if (RELKIND_HAS_STORAGE(relation->rd_rel->relkind))
3593  {
3594  return smgrnblocks(RelationGetSmgr(relation), forkNum);
3595  }
3596  else
3597  Assert(false);
3598 
3599  return 0; /* keep compiler quiet */
3600 }
static uint64 table_relation_size(Relation rel, ForkNumber forkNumber)
Definition: tableam.h:1860

References Assert(), RelationData::rd_rel, RelationGetSmgr(), smgrnblocks(), and table_relation_size().

Referenced by _hash_getnewbuf(), _hash_init(), autoprewarm_database_main(), get_raw_page_internal(), and pg_prewarm().

◆ ReleaseAndReadBuffer()

Buffer ReleaseAndReadBuffer ( Buffer  buffer,
Relation  relation,
BlockNumber  blockNum 
)

Definition at line 2252 of file bufmgr.c.

2255 {
2256  ForkNumber forkNum = MAIN_FORKNUM;
2257  BufferDesc *bufHdr;
2258 
2259  if (BufferIsValid(buffer))
2260  {
2261  Assert(BufferIsPinned(buffer));
2262  if (BufferIsLocal(buffer))
2263  {
2264  bufHdr = GetLocalBufferDescriptor(-buffer - 1);
2265  if (bufHdr->tag.blockNum == blockNum &&
2266  BufTagMatchesRelFileLocator(&bufHdr->tag, &relation->rd_locator) &&
2267  BufTagGetForkNum(&bufHdr->tag) == forkNum)
2268  return buffer;
2269  UnpinLocalBuffer(buffer);
2270  }
2271  else
2272  {
2273  bufHdr = GetBufferDescriptor(buffer - 1);
2274  /* we have pin, so it's ok to examine tag without spinlock */
2275  if (bufHdr->tag.blockNum == blockNum &&
2276  BufTagMatchesRelFileLocator(&bufHdr->tag, &relation->rd_locator) &&
2277  BufTagGetForkNum(&bufHdr->tag) == forkNum)
2278  return buffer;
2279  UnpinBuffer(bufHdr);
2280  }
2281  }
2282 
2283  return ReadBuffer(relation, blockNum);
2284 }
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:734
void UnpinLocalBuffer(Buffer buffer)
Definition: localbuf.c:681

References Assert(), buftag::blockNum, PrivateRefCountEntry::buffer, BufferIsLocal, BufferIsPinned, BufferIsValid(), BufTagGetForkNum(), BufTagMatchesRelFileLocator(), GetBufferDescriptor(), GetLocalBufferDescriptor(), MAIN_FORKNUM, RelationData::rd_locator, ReadBuffer(), BufferDesc::tag, UnpinBuffer(), and UnpinLocalBuffer().

Referenced by _bt_relandgetbuf(), ginFindLeafPage(), heapam_index_fetch_tuple(), and heapam_scan_bitmap_next_block().

◆ ReleaseBuffer()

void ReleaseBuffer ( Buffer  buffer)

Definition at line 4560 of file bufmgr.c.

4561 {
4562  if (!BufferIsValid(buffer))
4563  elog(ERROR, "bad buffer ID: %d", buffer);
4564 
4565  if (BufferIsLocal(buffer))
4566  UnpinLocalBuffer(buffer);
4567  else
4568  UnpinBuffer(GetBufferDescriptor(buffer - 1));
4569 }

References PrivateRefCountEntry::buffer, BufferIsLocal, BufferIsValid(), elog, ERROR, GetBufferDescriptor(), UnpinBuffer(), and UnpinLocalBuffer().

Referenced by _bt_allocbuf(), _bt_drop_lock_and_maybe_pin(), _bt_pagedel(), _bt_relbuf(), _bt_search_insert(), _bt_unlink_halfdead_page(), _hash_dropbuf(), _hash_getbuf_with_condlock_cleanup(), autoprewarm_database_main(), blinsert(), BloomNewBuffer(), brin_getinsertbuffer(), brin_vacuum_scan(), bringetbitmap(), brinGetTupleForHeapBlock(), brininsert(), brinRevmapTerminate(), brinsummarize(), collect_corrupt_items(), collect_visibility_data(), entryLoadMoreItems(), ExecEndBitmapHeapScan(), ExecEndIndexOnlyScan(), ExecReScanBitmapHeapScan(), ExtendBufferedRelTo(), FreeBulkInsertState(), freeGinBtreeStack(), fsm_vacuum_page(), get_actual_variable_endpoint(), get_raw_page_internal(), GetRecordedFreeSpace(), ginDeletePage(), ginFindParents(), ginFinishSplit(), ginFreeScanKeys(), ginInsertCleanup(), GinNewBuffer(), ginScanToDelete(), gistdoinsert(), gistFindCorrectParent(), gistNewBuffer(), gistvacuum_delete_empty_pages(), heap_abort_speculative(), heap_delete(), heap_endscan(), heap_fetch(), heap_force_common(), heap_insert(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_rescan(), heap_update(), heap_vac_scan_next_block(), heap_xlog_delete(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_update(), heap_xlog_visible(), heapam_index_fetch_reset(), heapam_scan_sample_next_block(), heapam_tuple_lock(), heapgetpage(), heapgettup(), heapgettup_pagemode(), lazy_scan_heap(), lazy_vacuum_heap_rel(), pg_prewarm(), pg_visibility(), pg_visibility_map(), pg_visibility_map_summary(), pgstatindex_impl(), ReadBufferBI(), RelationAddBlocks(), RelationGetBufferForTuple(), ReleaseBulkInsertStatePin(), revmap_get_buffer(), spgdoinsert(), SpGistGetBuffer(), SpGistNewBuffer(), SpGistUpdateMetaPage(), statapprox_heap(), summarize_range(), terminate_brin_buildstate(), tts_buffer_heap_clear(), tts_buffer_heap_materialize(), tts_buffer_heap_store_tuple(), UnlockReleaseBuffer(), verify_heapam(), visibilitymap_count(), visibilitymap_get_status(), visibilitymap_pin(), and XLogReadBufferExtended().

◆ UnlockBuffers()

void UnlockBuffers ( void  )

Definition at line 4767 of file bufmgr.c.

4768 {
4770 
4771  if (buf)
4772  {
4773  uint32 buf_state;
4774 
4775  buf_state = LockBufHdr(buf);
4776 
4777  /*
4778  * Don't complain if flag bit not set; it could have been reset but we
4779  * got a cancel/die interrupt before getting the signal.
4780  */
4781  if ((buf_state & BM_PIN_COUNT_WAITER) != 0 &&
4782  buf->wait_backend_pgprocno == MyProcNumber)
4783  buf_state &= ~BM_PIN_COUNT_WAITER;
4784 
4785  UnlockBufHdr(buf, buf_state);
4786 
4787  PinCountWaitBuf = NULL;
4788  }
4789 }

References BM_PIN_COUNT_WAITER, buf, LockBufHdr(), MyProcNumber, PinCountWaitBuf, and UnlockBufHdr().

Referenced by AbortSubTransaction(), AbortTransaction(), AtProcExit_Buffers(), BackgroundWriterMain(), CheckpointerMain(), and WalWriterMain().

◆ UnlockReleaseBuffer()

void UnlockReleaseBuffer ( Buffer  buffer)

Definition at line 4577 of file bufmgr.c.

4578 {
4579  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
4580  ReleaseBuffer(buffer);
4581 }

References PrivateRefCountEntry::buffer, BUFFER_LOCK_UNLOCK, LockBuffer(), and ReleaseBuffer().

Referenced by _bt_clear_incomplete_split(), _bt_restore_meta(), _hash_relbuf(), allocNewBuffer(), AlterSequence(), blbulkdelete(), blgetbitmap(), blinsert(), BloomInitMetapage(), blvacuumcleanup(), brin_doinsert(), brin_doupdate(), brin_evacuate_page(), brin_getinsertbuffer(), brin_xlog_createidx(), brin_xlog_desummarize_page(), brin_xlog_insert_update(), brin_xlog_revmap_extend(), brin_xlog_samepage_update(), brin_xlog_update(), brinbuild(), brinbuildempty(), brinGetStats(), brinRevmapDesummarizeRange(), bt_metap(), bt_multi_page_stats(), bt_page_items_internal(), bt_page_stats_internal(), bt_recheck_sibling_links(), btree_xlog_dedup(), btree_xlog_delete(), btree_xlog_insert(), btree_xlog_mark_page_halfdead(), btree_xlog_newroot(), btree_xlog_split(), btree_xlog_unlink_page(), btree_xlog_vacuum(), collect_corrupt_items(), collect_visibility_data(), count_nondeletable_pages(), createPostingTree(), do_setval(), doPickSplit(), entryLoadMoreItems(), fill_seq_fork_with_data(), flushCachedPage(), FreeSpaceMapPrepareTruncateRel(), fsm_search(), fsm_set_and_search(), generic_redo(), ginbuild(), ginbuildempty(), ginbulkdelete(), ginGetStats(), ginHeapTupleFastInsert(), ginInsertCleanup(), ginPlaceToPage(), ginRedoClearIncompleteSplit(), ginRedoCreatePTree(), ginRedoDeleteListPages(), ginRedoDeletePage(), ginRedoInsert(), ginRedoInsertListPage(), ginRedoSplit(), ginRedoUpdateMetapage(), ginRedoVacuumDataLeafPage(), ginRedoVacuumPage(), ginScanToDelete(), ginStepRight(), ginUpdateStats(), ginvacuumcleanup(), ginVacuumPostingTree(), ginVacuumPostingTreeLeaves(), gistbufferinginserttuples(), gistbuild(), gistbuildempty(), gistdoinsert(), gistFindCorrectParent(), gistFindPath(), gistGetMaxLevel(), gistinserttuples(), gistkillitems(), gistplacetopage(), gistProcessItup(), gistRedoClearFollowRight(), gistRedoDeleteRecord(), gistRedoPageDelete(), gistRedoPageSplitRecord(), gistRedoPageUpdateRecord(), gistScanPage(), gistvacuum_delete_empty_pages(), gistvacuumpage(), hash_xlog_add_ovfl_page(), hash_xlog_delete(), hash_xlog_init_bitmap_page(), hash_xlog_init_meta_page(), hash_xlog_insert(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_split_cleanup(), hash_xlog_split_complete(), hash_xlog_split_page(), hash_xlog_squeeze_page(), hash_xlog_update_meta_page(), hash_xlog_vacuum_one_page(), heap_delete(), heap_finish_speculative(), heap_force_common(), heap_get_latest_tid(), heap_index_delete_tuples(), heap_inplace_update(), heap_insert(), heap_lock_updated_tuple_rec(), heap_multi_insert(), heap_update(), heap_xlog_confirm(), heap_xlog_delete(), heap_xlog_freeze_page(), heap_xlog_inplace(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_prune(), heap_xlog_update(), heap_xlog_vacuum(), heap_xlog_visible(), heapam_scan_analyze_next_tuple(), initBloomState(), lazy_scan_heap(), lazy_scan_new_or_empty(), lazy_vacuum_heap_rel(), log_newpage_range(), moveLeafs(), nextval_internal(), palloc_btree_page(), pg_sequence_last_value(), pg_visibility(), pgstat_gist_page(), pgstat_heap(), pgstatginindex_internal(), pgstathashindex(), RelationCopyStorageUsingBuffer(), RelationGetBufferForTuple(), ResetSequence(), revmap_physical_extend(), scanGetCandidate(), scanPendingInsert(), scanPostingTree(), ScanSourceDatabasePgClass(), seq_redo(), SequenceChangePersistence(), shiftList(), spgAddNodeAction(), spgbuild(), spgdoinsert(), spgGetCache(), SpGistGetBuffer(), SpGistUpdateMetaPage(), spgMatchNodeAction(), spgprocesspending(), spgRedoAddLeaf(), spgRedoAddNode(), spgRedoMoveLeafs(), spgRedoPickSplit(), spgRedoSplitTuple(), spgRedoVacuumLeaf(), spgRedoVacuumRedirect(), spgRedoVacuumRoot(), spgSplitNodeAction(), spgvacuumpage(), spgWalk(), statapprox_heap(), verify_heapam(), verifyBackupPageConsistency(), visibilitymap_prepare_truncate(), writeListPage(), xlog_redo(), and XLogRecordPageWithFreeSpace().

Variable Documentation

◆ backend_flush_after

PGDLLIMPORT int backend_flush_after
extern

Definition at line 161 of file bufmgr.c.

Referenced by InitBufferPool().

◆ bgwriter_flush_after

PGDLLIMPORT int bgwriter_flush_after
extern

Definition at line 160 of file bufmgr.c.

Referenced by BackgroundWriterMain().

◆ bgwriter_lru_maxpages

PGDLLIMPORT int bgwriter_lru_maxpages
extern

Definition at line 136 of file bufmgr.c.

Referenced by BgBufferSync().

◆ bgwriter_lru_multiplier

PGDLLIMPORT double bgwriter_lru_multiplier
extern

Definition at line 137 of file bufmgr.c.

Referenced by BgBufferSync().

◆ BufferBlocks

PGDLLIMPORT char* BufferBlocks
extern

Definition at line 22 of file buf_init.c.

Referenced by BufferGetBlock(), and InitBufferPool().

◆ checkpoint_flush_after

PGDLLIMPORT int checkpoint_flush_after
extern

Definition at line 159 of file bufmgr.c.

Referenced by BufferSync().

◆ effective_io_concurrency

PGDLLIMPORT int effective_io_concurrency
extern

Definition at line 146 of file bufmgr.c.

Referenced by get_tablespace_io_concurrency(), and tablespace_reloptions().

◆ LocalBufferBlockPointers

PGDLLIMPORT Block* LocalBufferBlockPointers
extern

Definition at line 45 of file localbuf.c.

Referenced by BufferGetBlock(), and InitLocalBuffers().

◆ LocalRefCount

◆ maintenance_io_concurrency

◆ NBuffers

◆ NLocBuffer

◆ track_io_timing

◆ zero_damaged_pages

PGDLLIMPORT bool zero_damaged_pages
extern

Definition at line 135 of file bufmgr.c.

Referenced by mdreadv(), and ReadBuffer_common().