PostgreSQL Source Code git master
Loading...
Searching...
No Matches
buf_internals.h
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * buf_internals.h
4 * Internal definitions for buffer manager and the buffer replacement
5 * strategy.
6 *
7 *
8 * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
9 * Portions Copyright (c) 1994, Regents of the University of California
10 *
11 * src/include/storage/buf_internals.h
12 *
13 *-------------------------------------------------------------------------
14 */
15#ifndef BUFMGR_INTERNALS_H
16#define BUFMGR_INTERNALS_H
17
18#include "pgstat.h"
19#include "port/atomics.h"
20#include "storage/aio_types.h"
21#include "storage/buf.h"
22#include "storage/bufmgr.h"
24#include "storage/lwlock.h"
25#include "storage/procnumber.h"
27#include "storage/shmem.h"
28#include "storage/smgr.h"
29#include "storage/spin.h"
30#include "utils/relcache.h"
31#include "utils/resowner.h"
32
33/*
34 * Buffer state is a single 64-bit variable where following data is combined.
35 *
36 * State of the buffer itself (in order):
37 * - 18 bits refcount
38 * - 4 bits usage count
39 * - 12 bits of flags
40 * - 18 bits share-lock count
41 * - 1 bit share-exclusive locked
42 * - 1 bit exclusive locked
43 *
44 * Combining these values allows to perform some operations without locking
45 * the buffer header, by modifying them together with a CAS loop.
46 *
47 * The definition of buffer state components is below.
48 */
49#define BUF_REFCOUNT_BITS 18
50#define BUF_USAGECOUNT_BITS 4
51#define BUF_FLAG_BITS 12
52#define BUF_LOCK_BITS (18+2)
53
55 "parts of buffer state space need to be <= 64");
56
57/* refcount related definitions */
58#define BUF_REFCOUNT_ONE 1
59#define BUF_REFCOUNT_MASK \
60 ((UINT64CONST(1) << BUF_REFCOUNT_BITS) - 1)
61
62/* usage count related definitions */
63#define BUF_USAGECOUNT_SHIFT \
64 BUF_REFCOUNT_BITS
65#define BUF_USAGECOUNT_MASK \
66 (((UINT64CONST(1) << BUF_USAGECOUNT_BITS) - 1) << (BUF_USAGECOUNT_SHIFT))
67#define BUF_USAGECOUNT_ONE \
68 (UINT64CONST(1) << BUF_REFCOUNT_BITS)
69
70/* flags related definitions */
71#define BUF_FLAG_SHIFT \
72 (BUF_REFCOUNT_BITS + BUF_USAGECOUNT_BITS)
73#define BUF_FLAG_MASK \
74 (((UINT64CONST(1) << BUF_FLAG_BITS) - 1) << BUF_FLAG_SHIFT)
75
76/* lock state related definitions */
77#define BM_LOCK_SHIFT \
78 (BUF_FLAG_SHIFT + BUF_FLAG_BITS)
79#define BM_LOCK_VAL_SHARED \
80 (UINT64CONST(1) << (BM_LOCK_SHIFT))
81#define BM_LOCK_VAL_SHARE_EXCLUSIVE \
82 (UINT64CONST(1) << (BM_LOCK_SHIFT + MAX_BACKENDS_BITS))
83#define BM_LOCK_VAL_EXCLUSIVE \
84 (UINT64CONST(1) << (BM_LOCK_SHIFT + MAX_BACKENDS_BITS + 1))
85#define BM_LOCK_MASK \
86 ((((uint64) MAX_BACKENDS) << BM_LOCK_SHIFT) | BM_LOCK_VAL_SHARE_EXCLUSIVE | BM_LOCK_VAL_EXCLUSIVE)
87
88
89/* Get refcount and usagecount from buffer state */
90#define BUF_STATE_GET_REFCOUNT(state) \
91 ((uint32)((state) & BUF_REFCOUNT_MASK))
92#define BUF_STATE_GET_USAGECOUNT(state) \
93 ((uint32)(((state) & BUF_USAGECOUNT_MASK) >> BUF_USAGECOUNT_SHIFT))
94
95/*
96 * Flags for buffer descriptors
97 *
98 * Note: BM_TAG_VALID essentially means that there is a buffer hashtable
99 * entry associated with the buffer's tag.
100 */
101
102#define BUF_DEFINE_FLAG(flagno) \
103 (UINT64CONST(1) << (BUF_FLAG_SHIFT + (flagno)))
104
105/* buffer header is locked */
106#define BM_LOCKED BUF_DEFINE_FLAG( 0)
107/* data needs writing */
108#define BM_DIRTY BUF_DEFINE_FLAG( 1)
109/* data is valid */
110#define BM_VALID BUF_DEFINE_FLAG( 2)
111/* tag is assigned */
112#define BM_TAG_VALID BUF_DEFINE_FLAG( 3)
113/* read or write in progress */
114#define BM_IO_IN_PROGRESS BUF_DEFINE_FLAG( 4)
115/* previous I/O failed */
116#define BM_IO_ERROR BUF_DEFINE_FLAG( 5)
117/* dirtied since write started */
118#define BM_JUST_DIRTIED BUF_DEFINE_FLAG( 6)
119/* have waiter for sole pin */
120#define BM_PIN_COUNT_WAITER BUF_DEFINE_FLAG( 7)
121/* must write for checkpoint */
122#define BM_CHECKPOINT_NEEDED BUF_DEFINE_FLAG( 8)
123/* permanent buffer (not unlogged, or init fork) */
124#define BM_PERMANENT BUF_DEFINE_FLAG( 9)
125/* content lock has waiters */
126#define BM_LOCK_HAS_WAITERS BUF_DEFINE_FLAG(10)
127/* waiter for content lock has been signalled but not yet run */
128#define BM_LOCK_WAKE_IN_PROGRESS BUF_DEFINE_FLAG(11)
129
130
132 "MAX_BACKENDS_BITS needs to be <= BUF_REFCOUNT_BITS");
134 "MAX_BACKENDS_BITS needs to be <= BUF_LOCK_BITS - 2");
135
136
137/*
138 * The maximum allowed value of usage_count represents a tradeoff between
139 * accuracy and speed of the clock-sweep buffer management algorithm. A
140 * large value (comparable to NBuffers) would approximate LRU semantics.
141 * But it can take as many as BM_MAX_USAGE_COUNT+1 complete cycles of the
142 * clock-sweep hand to find a free buffer, so in practice we don't want the
143 * value to be very large.
144 */
145#define BM_MAX_USAGE_COUNT 5
146
148 "BM_MAX_USAGE_COUNT doesn't fit in BUF_USAGECOUNT_BITS bits");
149
150/*
151 * Buffer tag identifies which disk block the buffer contains.
152 *
153 * Note: the BufferTag data must be sufficient to determine where to write the
154 * block, without reference to pg_class or pg_tablespace entries. It's
155 * possible that the backend flushing the buffer doesn't even believe the
156 * relation is visible yet (its xact may have started before the xact that
157 * created the rel). The storage manager must be able to cope anyway.
158 *
159 * Note: if there's any pad bytes in the struct, InitBufferTag will have
160 * to be fixed to zero them, since this struct is used as a hash key.
161 */
162typedef struct buftag
163{
164 Oid spcOid; /* tablespace oid */
165 Oid dbOid; /* database oid */
166 RelFileNumber relNumber; /* relation file number */
167 ForkNumber forkNum; /* fork number */
168 BlockNumber blockNum; /* blknum relative to begin of reln */
170
171static inline RelFileNumber
173{
174 return tag->relNumber;
175}
176
177static inline ForkNumber
179{
180 return tag->forkNum;
181}
182
183static inline void
185 ForkNumber forknum)
186{
187 tag->relNumber = relnumber;
188 tag->forkNum = forknum;
189}
190
191static inline RelFileLocator
193{
194 RelFileLocator rlocator;
195
196 rlocator.spcOid = tag->spcOid;
197 rlocator.dbOid = tag->dbOid;
198 rlocator.relNumber = BufTagGetRelNumber(tag);
199
200 return rlocator;
201}
202
203static inline void
211
212static inline void
214 ForkNumber forkNum, BlockNumber blockNum)
215{
216 tag->spcOid = rlocator->spcOid;
217 tag->dbOid = rlocator->dbOid;
218 BufTagSetRelForkDetails(tag, rlocator->relNumber, forkNum);
219 tag->blockNum = blockNum;
220}
221
222static inline bool
224{
225 return (tag1->spcOid == tag2->spcOid) &&
226 (tag1->dbOid == tag2->dbOid) &&
227 (tag1->relNumber == tag2->relNumber) &&
228 (tag1->blockNum == tag2->blockNum) &&
229 (tag1->forkNum == tag2->forkNum);
230}
231
232static inline bool
234 const RelFileLocator *rlocator)
235{
236 return (tag->spcOid == rlocator->spcOid) &&
237 (tag->dbOid == rlocator->dbOid) &&
238 (BufTagGetRelNumber(tag) == rlocator->relNumber);
239}
240
241
242/*
243 * The shared buffer mapping table is partitioned to reduce contention.
244 * To determine which partition lock a given tag requires, compute the tag's
245 * hash code with BufTableHashCode(), then apply BufMappingPartitionLock().
246 * NB: NUM_BUFFER_PARTITIONS must be a power of 2!
247 */
248static inline uint32
250{
251 return hashcode % NUM_BUFFER_PARTITIONS;
252}
253
254static inline LWLock *
260
261static inline LWLock *
266
267/*
268 * BufferDesc -- shared descriptor/state data for a single shared buffer.
269 *
270 * The state of the buffer is controlled by the, drumroll, state variable. It
271 * only may be modified using atomic operations. The state variable combines
272 * various flags, the buffer's refcount and usage count. See comment above
273 * BUF_REFCOUNT_BITS for details about the division. This layout allow us to
274 * do some operations in a single atomic operation, without actually acquiring
275 * and releasing the spinlock; for instance, increasing or decreasing the
276 * refcount.
277 *
278 * One of the aforementioned flags is BM_LOCKED, used to implement the buffer
279 * header lock. See the following paragraphs, as well as the documentation for
280 * individual fields, for more details.
281 *
282 * The identity of the buffer (BufferDesc.tag) can only be changed by the
283 * backend holding the buffer header lock.
284 *
285 * If the lock is held by another backend, neither additional buffer pins may
286 * be established (we would like to relax this eventually), nor can flags be
287 * set/cleared. These operations either need to acquire the buffer header
288 * spinlock, or need to use a CAS loop, waiting for the lock to be released if
289 * it is held. However, existing buffer pins may be released while the buffer
290 * header spinlock is held, using an atomic subtraction.
291 *
292 * If we have the buffer pinned, its tag can't change underneath us, so we can
293 * examine the tag without locking the buffer header. Also, in places we do
294 * one-time reads of the flags without bothering to lock the buffer header;
295 * this is generally for situations where we don't expect the flag bit being
296 * tested to be changing.
297 *
298 * We can't physically remove items from a disk page if another backend has
299 * the buffer pinned. Hence, a backend may need to wait for all other pins
300 * to go away. This is signaled by storing its own pgprocno into
301 * wait_backend_pgprocno and setting flag bit BM_PIN_COUNT_WAITER. At present,
302 * there can be only one such waiter per buffer.
303 *
304 * The content of buffers is protected via the buffer content lock,
305 * implemented as part of the buffer state. Note that the buffer header lock
306 * is *not* used to control access to the data in the buffer! We used to use
307 * an LWLock to implement the content lock, but having a dedicated
308 * implementation of content locks allows us to implement some otherwise hard
309 * things (e.g. race-freely checking if AIO is in progress before locking a
310 * buffer exclusively) and enables otherwise impossible optimizations
311 * (e.g. unlocking and unpinning a buffer in one atomic operation).
312 *
313 * We use this same struct for local buffer headers, but the locks are not
314 * used and not all of the flag bits are useful either. To avoid unnecessary
315 * overhead, manipulations of the state field should be done without actual
316 * atomic operations (i.e. only pg_atomic_read_u64() and
317 * pg_atomic_unlocked_write_u64()).
318 *
319 * Be careful to avoid increasing the size of the struct when adding or
320 * reordering members. Keeping it below 64 bytes (the most common CPU
321 * cache line size) is fairly important for performance.
322 *
323 * Per-buffer I/O condition variables are currently kept outside this struct in
324 * a separate array. They could be moved in here and still fit within that
325 * limit on common systems, but for now that is not done.
326 */
327typedef struct BufferDesc
328{
329 /*
330 * ID of page contained in buffer. The buffer header spinlock needs to be
331 * held to modify this field.
332 */
334
335 /*
336 * Buffer's index number (from 0). The field never changes after
337 * initialization, so does not need locking.
338 */
340
341 /*
342 * State of the buffer, containing flags, refcount and usagecount. See
343 * BUF_* and BM_* defines at the top of this file.
344 */
346
347 /*
348 * Backend of pin-count waiter. The buffer header spinlock needs to be
349 * held to modify this field.
350 */
352
353 PgAioWaitRef io_wref; /* set iff AIO is in progress */
354
355 /*
356 * List of PGPROCs waiting for the buffer content lock. Protected by the
357 * buffer header spinlock.
358 */
361
362/*
363 * Concurrent access to buffer headers has proven to be more efficient if
364 * they're cache line aligned. So we force the start of the BufferDescriptors
365 * array to be on a cache line boundary and force the elements to be cache
366 * line sized.
367 *
368 * XXX: As this is primarily matters in highly concurrent workloads which
369 * probably all are 64bit these days, and the space wastage would be a bit
370 * more noticeable on 32bit systems, we don't force the stride to be cache
371 * line sized on those. If somebody does actual performance testing, we can
372 * reevaluate.
373 *
374 * Note that local buffer descriptors aren't forced to be aligned - as there's
375 * no concurrent access to those it's unlikely to be beneficial.
376 *
377 * We use a 64-byte cache line size here, because that's the most common
378 * size. Making it bigger would be a waste of memory. Even if running on a
379 * platform with either 32 or 128 byte line sizes, it's good to align to
380 * boundaries and avoid false sharing.
381 */
382#define BUFFERDESC_PAD_TO_SIZE (SIZEOF_VOID_P == 8 ? 64 : 1)
383
389
390/*
391 * The PendingWriteback & WritebackContext structure are used to keep
392 * information about pending flush requests to be issued to the OS.
393 */
394typedef struct PendingWriteback
395{
396 /* could store different types of pending flushes here */
399
400/* struct forward declared in bufmgr.h */
401typedef struct WritebackContext
402{
403 /* pointer to the max number of writeback requests to coalesce */
405
406 /* current number of pending writeback requests */
408
409 /* pending requests */
412
413/* in buf_init.c */
417
418/* in localbuf.c */
420
421
422static inline BufferDesc *
424{
425 return &(BufferDescriptors[id]).bufferdesc;
426}
427
428static inline BufferDesc *
433
434static inline Buffer
436{
437 return (Buffer) (bdesc->buf_id + 1);
438}
439
440static inline ConditionVariable *
442{
443 return &(BufferIOCVArray[bdesc->buf_id]).cv;
444}
445
446/*
447 * Functions for acquiring/releasing a shared buffer header's spinlock. Do
448 * not apply these to local buffers!
449 */
450extern uint64 LockBufHdr(BufferDesc *desc);
451
452/*
453 * Unlock the buffer header.
454 *
455 * This can only be used if the caller did not modify BufferDesc.state. To
456 * set/unset flag bits or change the refcount use UnlockBufHdrExt().
457 */
458static inline void
465
466/*
467 * Unlock the buffer header, while atomically adding the flags in set_bits,
468 * unsetting the ones in unset_bits and changing the refcount by
469 * refcount_change.
470 *
471 * Note that this approach would not work for usagecount, since we need to cap
472 * the usagecount at BM_MAX_USAGE_COUNT.
473 */
474static inline uint64
499
501
502/* in bufmgr.c */
503
504/*
505 * Structure to sort buffers per file on checkpoints.
506 *
507 * This structure is allocated per buffer in shared memory, so it should be
508 * kept as small as possible.
509 */
518
520
521/* ResourceOwner callbacks to hold buffer I/Os and pins */
524
525/* Convenience wrappers over ResourceOwnerRemember/Forget */
526static inline void
531static inline void
536static inline void
541static inline void
546
547/*
548 * Internal buffer management routines
549 */
550/* bufmgr.c */
551extern void WritebackContextInit(WritebackContext *context, int *max_pending);
555
556extern void TrackNewBufferPin(Buffer buf);
557
558/* solely to make it easier to write tests */
559extern bool StartBufferIO(BufferDesc *buf, bool forInput, bool nowait);
561 bool forget_owner, bool release_aio);
562
563
564/* freelist.c */
567 uint64 *buf_state, bool *from_ring);
568extern bool StrategyRejectBuffer(BufferAccessStrategy strategy,
569 BufferDesc *buf, bool from_ring);
570
572extern void StrategyNotifyBgWriter(int bgwprocno);
573
574extern Size StrategyShmemSize(void);
575extern void StrategyInitialize(bool init);
576
577/* buf_table.c */
578extern Size BufTableShmemSize(int size);
579extern void InitBufTable(int size);
581extern int BufTableLookup(BufferTag *tagPtr, uint32 hashcode);
582extern int BufTableInsert(BufferTag *tagPtr, uint32 hashcode, int buf_id);
583extern void BufTableDelete(BufferTag *tagPtr, uint32 hashcode);
584
585/* localbuf.c */
587extern void UnpinLocalBuffer(Buffer buffer);
588extern void UnpinLocalBufferNoOwner(Buffer buffer);
590 ForkNumber forkNum,
591 BlockNumber blockNum);
593 BlockNumber blockNum, bool *foundPtr);
596 uint32 flags,
599 Buffer *buffers,
601extern void MarkLocalBufferDirty(Buffer buffer);
604extern bool StartLocalBufferIO(BufferDesc *bufHdr, bool forInput, bool nowait);
607extern void DropRelationLocalBuffers(RelFileLocator rlocator,
608 ForkNumber *forkNum, int nforks,
610extern void DropRelationAllLocalBuffers(RelFileLocator rlocator);
611extern void AtEOXact_LocalBuffers(bool isCommit);
612
613#endif /* BUFMGR_INTERNALS_H */
static bool pg_atomic_compare_exchange_u64(volatile pg_atomic_uint64 *ptr, uint64 *expected, uint64 newval)
Definition atomics.h:522
static uint64 pg_atomic_read_u64(volatile pg_atomic_uint64 *ptr)
Definition atomics.h:467
static uint64 pg_atomic_fetch_sub_u64(volatile pg_atomic_uint64 *ptr, int64 sub_)
Definition atomics.h:541
uint32 BlockNumber
Definition block.h:31
#define InvalidBlockNumber
Definition block.h:33
int Buffer
Definition buf.h:23
#define BM_MAX_USAGE_COUNT
void FlushLocalBuffer(BufferDesc *bufHdr, SMgrRelation reln)
Definition localbuf.c:183
PGDLLIMPORT const ResourceOwnerDesc buffer_resowner_desc
Definition bufmgr.c:278
static void InitBufferTag(BufferTag *tag, const RelFileLocator *rlocator, ForkNumber forkNum, BlockNumber blockNum)
static uint32 BufTableHashPartition(uint32 hashcode)
static LWLock * BufMappingPartitionLockByIndex(uint32 index)
void BufTableDelete(BufferTag *tagPtr, uint32 hashcode)
Definition buf_table.c:148
void UnpinLocalBuffer(Buffer buffer)
Definition localbuf.c:841
int StrategySyncStart(uint32 *complete_passes, uint32 *num_buf_alloc)
Definition freelist.c:321
static ForkNumber BufTagGetForkNum(const BufferTag *tag)
#define BUF_REFCOUNT_ONE
static ConditionVariable * BufferDescriptorGetIOCV(const BufferDesc *bdesc)
bool StartLocalBufferIO(BufferDesc *bufHdr, bool forInput, bool nowait)
Definition localbuf.c:523
#define BUF_FLAG_BITS
static uint64 UnlockBufHdrExt(BufferDesc *desc, uint64 old_buf_state, uint64 set_bits, uint64 unset_bits, int refcount_change)
void AtEOXact_LocalBuffers(bool isCommit)
Definition localbuf.c:1003
int BufTableLookup(BufferTag *tagPtr, uint32 hashcode)
Definition buf_table.c:90
BufferDesc * StrategyGetBuffer(BufferAccessStrategy strategy, uint64 *buf_state, bool *from_ring)
Definition freelist.c:174
static bool BufferTagsEqual(const BufferTag *tag1, const BufferTag *tag2)
static RelFileNumber BufTagGetRelNumber(const BufferTag *tag)
static void UnlockBufHdr(BufferDesc *desc)
static bool BufTagMatchesRelFileLocator(const BufferTag *tag, const RelFileLocator *rlocator)
#define BUF_REFCOUNT_BITS
bool PinLocalBuffer(BufferDesc *buf_hdr, bool adjust_usagecount)
Definition localbuf.c:805
static void BufTagSetRelForkDetails(BufferTag *tag, RelFileNumber relnumber, ForkNumber forknum)
void InitBufTable(int size)
Definition buf_table.c:51
PGDLLIMPORT const ResourceOwnerDesc buffer_io_resowner_desc
Definition bufmgr.c:269
void StrategyInitialize(bool init)
Definition freelist.c:401
#define BUF_USAGECOUNT_BITS
static void ResourceOwnerRememberBufferIO(ResourceOwner owner, Buffer buffer)
uint64 LockBufHdr(BufferDesc *desc)
Definition bufmgr.c:7097
#define BM_LOCKED
void MarkLocalBufferDirty(Buffer buffer)
Definition localbuf.c:491
#define BUFFERDESC_PAD_TO_SIZE
PGDLLIMPORT WritebackContext BackendWritebackContext
Definition buf_init.c:25
static void ResourceOwnerForgetBufferIO(ResourceOwner owner, Buffer buffer)
Size BufTableShmemSize(int size)
Definition buf_table.c:41
uint32 BufTableHashCode(BufferTag *tagPtr)
Definition buf_table.c:78
#define BUF_LOCK_BITS
void DropRelationAllLocalBuffers(RelFileLocator rlocator)
Definition localbuf.c:702
void ScheduleBufferTagForWriteback(WritebackContext *wb_context, IOContext io_context, BufferTag *tag)
Definition bufmgr.c:7269
void InvalidateLocalBuffer(BufferDesc *bufHdr, bool check_unreferenced)
Definition localbuf.c:605
int BufTableInsert(BufferTag *tagPtr, uint32 hashcode, int buf_id)
Definition buf_table.c:118
static void ClearBufferTag(BufferTag *tag)
static void ResourceOwnerRememberBuffer(ResourceOwner owner, Buffer buffer)
void WritebackContextInit(WritebackContext *context, int *max_pending)
Definition bufmgr.c:7257
void StrategyNotifyBgWriter(int bgwprocno)
Definition freelist.c:358
struct buftag BufferTag
static void ResourceOwnerForgetBuffer(ResourceOwner owner, Buffer buffer)
void TerminateLocalBufferIO(BufferDesc *bufHdr, bool clear_dirty, uint64 set_flag_bits, bool release_aio)
Definition localbuf.c:562
PGDLLIMPORT BufferDescPadded * BufferDescriptors
Definition buf_init.c:22
PrefetchBufferResult PrefetchLocalBuffer(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum)
Definition localbuf.c:72
PGDLLIMPORT ConditionVariableMinimallyPadded * BufferIOCVArray
Definition buf_init.c:24
StaticAssertDecl(BM_MAX_USAGE_COUNT<(UINT64CONST(1)<< BUF_USAGECOUNT_BITS), "BM_MAX_USAGE_COUNT doesn't fit in BUF_USAGECOUNT_BITS bits")
BlockNumber ExtendBufferedRelLocal(BufferManagerRelation bmr, ForkNumber fork, uint32 flags, uint32 extend_by, BlockNumber extend_upto, Buffer *buffers, uint32 *extended_by)
Definition localbuf.c:346
void IssuePendingWritebacks(WritebackContext *wb_context, IOContext io_context)
Definition bufmgr.c:7319
PGDLLIMPORT CkptSortItem * CkptBufferIds
Definition buf_init.c:26
IOContext IOContextForStrategy(BufferAccessStrategy strategy)
Definition freelist.c:747
void UnpinLocalBufferNoOwner(Buffer buffer)
Definition localbuf.c:848
void DropRelationLocalBuffers(RelFileLocator rlocator, ForkNumber *forkNum, int nforks, BlockNumber *firstDelBlock)
Definition localbuf.c:665
static LWLock * BufMappingPartitionLock(uint32 hashcode)
void TerminateBufferIO(BufferDesc *buf, bool clear_dirty, uint64 set_flag_bits, bool forget_owner, bool release_aio)
Definition bufmgr.c:6937
bool StartBufferIO(BufferDesc *buf, bool forInput, bool nowait)
Definition bufmgr.c:6879
static RelFileLocator BufTagGetRelFileLocator(const BufferTag *tag)
Size StrategyShmemSize(void)
Definition freelist.c:380
uint64 WaitBufHdrUnlocked(BufferDesc *buf)
Definition bufmgr.c:7145
PGDLLIMPORT BufferDesc * LocalBufferDescriptors
Definition localbuf.c:47
static BufferDesc * GetLocalBufferDescriptor(uint32 id)
static BufferDesc * GetBufferDescriptor(uint32 id)
void TrackNewBufferPin(Buffer buf)
Definition bufmgr.c:3416
bool StrategyRejectBuffer(BufferAccessStrategy strategy, BufferDesc *buf, bool from_ring)
Definition freelist.c:787
static Buffer BufferDescriptorGetBuffer(const BufferDesc *bdesc)
BufferDesc * LocalBufferAlloc(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum, bool *foundPtr)
Definition localbuf.c:119
#define PGDLLIMPORT
Definition c.h:1328
#define Assert(condition)
Definition c.h:873
uint64_t uint64
Definition c.h:547
uint32_t uint32
Definition c.h:546
#define UINT64CONST(x)
Definition c.h:561
size_t Size
Definition c.h:619
LWLockPadded * MainLWLockArray
Definition lwlock.c:161
#define BUFFER_MAPPING_LWLOCK_OFFSET
Definition lwlock.h:102
#define NUM_BUFFER_PARTITIONS
Definition lwlock.h:91
#define WRITEBACK_MAX_PENDING_FLUSHES
static char buf[DEFAULT_XLOG_SEG_SIZE]
IOContext
Definition pgstat.h:285
static Datum Int32GetDatum(int32 X)
Definition postgres.h:222
#define InvalidOid
unsigned int Oid
static int fb(int x)
#define MAX_BACKENDS_BITS
Definition procnumber.h:38
Oid RelFileNumber
Definition relpath.h:25
ForkNumber
Definition relpath.h:56
@ InvalidForkNumber
Definition relpath.h:57
#define InvalidRelFileNumber
Definition relpath.h:26
void ResourceOwnerForget(ResourceOwner owner, Datum value, const ResourceOwnerDesc *kind)
Definition resowner.c:561
void ResourceOwnerRemember(ResourceOwner owner, Datum value, const ResourceOwnerDesc *kind)
Definition resowner.c:521
#define init()
int wait_backend_pgprocno
BufferTag tag
pg_atomic_uint64 state
proclist_head lock_waiters
PgAioWaitRef io_wref
ForkNumber forkNum
RelFileNumber relNumber
BlockNumber blockNum
RelFileNumber relNumber
PendingWriteback pending_writebacks[WRITEBACK_MAX_PENDING_FLUSHES]
BlockNumber blockNum
RelFileNumber relNumber
ForkNumber forkNum
Definition type.h:96
char pad[BUFFERDESC_PAD_TO_SIZE]
BufferDesc bufferdesc
LWLock lock
Definition lwlock.h:70