PostgreSQL Source Code git master
freelist.c File Reference
#include "postgres.h"
#include "pgstat.h"
#include "port/atomics.h"
#include "storage/buf_internals.h"
#include "storage/bufmgr.h"
#include "storage/proc.h"
Include dependency graph for freelist.c:

Go to the source code of this file.

Data Structures

struct  BufferStrategyControl
 
struct  BufferAccessStrategyData
 

Macros

#define INT_ACCESS_ONCE(var)   ((int)(*((volatile int *)&(var))))
 

Typedefs

typedef struct BufferAccessStrategyData BufferAccessStrategyData
 

Functions

static BufferDescGetBufferFromRing (BufferAccessStrategy strategy, uint32 *buf_state)
 
static void AddBufferToRing (BufferAccessStrategy strategy, BufferDesc *buf)
 
static uint32 ClockSweepTick (void)
 
BufferDescStrategyGetBuffer (BufferAccessStrategy strategy, uint32 *buf_state, bool *from_ring)
 
int StrategySyncStart (uint32 *complete_passes, uint32 *num_buf_alloc)
 
void StrategyNotifyBgWriter (int bgwprocno)
 
Size StrategyShmemSize (void)
 
void StrategyInitialize (bool init)
 
BufferAccessStrategy GetAccessStrategy (BufferAccessStrategyType btype)
 
BufferAccessStrategy GetAccessStrategyWithSize (BufferAccessStrategyType btype, int ring_size_kb)
 
int GetAccessStrategyBufferCount (BufferAccessStrategy strategy)
 
int GetAccessStrategyPinLimit (BufferAccessStrategy strategy)
 
void FreeAccessStrategy (BufferAccessStrategy strategy)
 
IOContext IOContextForStrategy (BufferAccessStrategy strategy)
 
bool StrategyRejectBuffer (BufferAccessStrategy strategy, BufferDesc *buf, bool from_ring)
 

Variables

static BufferStrategyControlStrategyControl = NULL
 

Macro Definition Documentation

◆ INT_ACCESS_ONCE

#define INT_ACCESS_ONCE (   var)    ((int)(*((volatile int *)&(var))))

Definition at line 24 of file freelist.c.

Typedef Documentation

◆ BufferAccessStrategyData

Function Documentation

◆ AddBufferToRing()

static void AddBufferToRing ( BufferAccessStrategy  strategy,
BufferDesc buf 
)
static

Definition at line 737 of file freelist.c.

738{
739 strategy->buffers[strategy->current] = BufferDescriptorGetBuffer(buf);
740}
static Buffer BufferDescriptorGetBuffer(const BufferDesc *bdesc)
static char * buf
Definition: pg_test_fsync.c:72
Buffer buffers[FLEXIBLE_ARRAY_MEMBER]
Definition: freelist.c:83

References buf, BufferDescriptorGetBuffer(), BufferAccessStrategyData::buffers, and BufferAccessStrategyData::current.

Referenced by StrategyGetBuffer().

◆ ClockSweepTick()

static uint32 ClockSweepTick ( void  )
inlinestatic

Definition at line 100 of file freelist.c.

101{
102 uint32 victim;
103
104 /*
105 * Atomically move hand ahead one buffer - if there's several processes
106 * doing this, this can lead to buffers being returned slightly out of
107 * apparent order.
108 */
109 victim =
111
112 if (victim >= NBuffers)
113 {
114 uint32 originalVictim = victim;
115
116 /* always wrap what we look up in BufferDescriptors */
117 victim = victim % NBuffers;
118
119 /*
120 * If we're the one that just caused a wraparound, force
121 * completePasses to be incremented while holding the spinlock. We
122 * need the spinlock so StrategySyncStart() can return a consistent
123 * value consisting of nextVictimBuffer and completePasses.
124 */
125 if (victim == 0)
126 {
127 uint32 expected;
128 uint32 wrapped;
129 bool success = false;
130
131 expected = originalVictim + 1;
132
133 while (!success)
134 {
135 /*
136 * Acquire the spinlock while increasing completePasses. That
137 * allows other readers to read nextVictimBuffer and
138 * completePasses in a consistent manner which is required for
139 * StrategySyncStart(). In theory delaying the increment
140 * could lead to an overflow of nextVictimBuffers, but that's
141 * highly unlikely and wouldn't be particularly harmful.
142 */
144
145 wrapped = expected % NBuffers;
146
148 &expected, wrapped);
149 if (success)
152 }
153 }
154 }
155 return victim;
156}
static bool pg_atomic_compare_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval)
Definition: atomics.h:347
static uint32 pg_atomic_fetch_add_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: atomics.h:364
uint32_t uint32
Definition: c.h:541
static BufferStrategyControl * StrategyControl
Definition: freelist.c:57
int NBuffers
Definition: globals.c:142
static bool success
Definition: initdb.c:187
#define SpinLockRelease(lock)
Definition: spin.h:61
#define SpinLockAcquire(lock)
Definition: spin.h:59
pg_atomic_uint32 nextVictimBuffer
Definition: freelist.c:40
slock_t buffer_strategy_lock
Definition: freelist.c:33

References BufferStrategyControl::buffer_strategy_lock, BufferStrategyControl::completePasses, NBuffers, BufferStrategyControl::nextVictimBuffer, pg_atomic_compare_exchange_u32(), pg_atomic_fetch_add_u32(), SpinLockAcquire, SpinLockRelease, StrategyControl, and success.

Referenced by StrategyGetBuffer().

◆ FreeAccessStrategy()

void FreeAccessStrategy ( BufferAccessStrategy  strategy)

Definition at line 643 of file freelist.c.

644{
645 /* don't crash if called on a "default" strategy */
646 if (strategy != NULL)
647 pfree(strategy);
648}
void pfree(void *pointer)
Definition: mcxt.c:1594

References pfree().

Referenced by blgetbitmap(), FreeBulkInsertState(), heap_endscan(), initscan(), parallel_vacuum_main(), and RelationCopyStorageUsingBuffer().

◆ GetAccessStrategy()

BufferAccessStrategy GetAccessStrategy ( BufferAccessStrategyType  btype)

Definition at line 461 of file freelist.c.

462{
463 int ring_size_kb;
464
465 /*
466 * Select ring size to use. See buffer/README for rationales.
467 *
468 * Note: if you change the ring size for BAS_BULKREAD, see also
469 * SYNC_SCAN_REPORT_INTERVAL in access/heap/syncscan.c.
470 */
471 switch (btype)
472 {
473 case BAS_NORMAL:
474 /* if someone asks for NORMAL, just give 'em a "default" object */
475 return NULL;
476
477 case BAS_BULKREAD:
478 {
479 int ring_max_kb;
480
481 /*
482 * The ring always needs to be large enough to allow some
483 * separation in time between providing a buffer to the user
484 * of the strategy and that buffer being reused. Otherwise the
485 * user's pin will prevent reuse of the buffer, even without
486 * concurrent activity.
487 *
488 * We also need to ensure the ring always is large enough for
489 * SYNC_SCAN_REPORT_INTERVAL, as noted above.
490 *
491 * Thus we start out a minimal size and increase the size
492 * further if appropriate.
493 */
494 ring_size_kb = 256;
495
496 /*
497 * There's no point in a larger ring if we won't be allowed to
498 * pin sufficiently many buffers. But we never limit to less
499 * than the minimal size above.
500 */
501 ring_max_kb = GetPinLimit() * (BLCKSZ / 1024);
502 ring_max_kb = Max(ring_size_kb, ring_max_kb);
503
504 /*
505 * We would like the ring to additionally have space for the
506 * configured degree of IO concurrency. While being read in,
507 * buffers can obviously not yet be reused.
508 *
509 * Each IO can be up to io_combine_limit blocks large, and we
510 * want to start up to effective_io_concurrency IOs.
511 *
512 * Note that effective_io_concurrency may be 0, which disables
513 * AIO.
514 */
515 ring_size_kb += (BLCKSZ / 1024) *
517
518 if (ring_size_kb > ring_max_kb)
519 ring_size_kb = ring_max_kb;
520 break;
521 }
522 case BAS_BULKWRITE:
523 ring_size_kb = 16 * 1024;
524 break;
525 case BAS_VACUUM:
526 ring_size_kb = 2048;
527 break;
528
529 default:
530 elog(ERROR, "unrecognized buffer access strategy: %d",
531 (int) btype);
532 return NULL; /* keep compiler quiet */
533 }
534
535 return GetAccessStrategyWithSize(btype, ring_size_kb);
536}
int effective_io_concurrency
Definition: bufmgr.c:155
int io_combine_limit
Definition: bufmgr.c:170
uint32 GetPinLimit(void)
Definition: bufmgr.c:2475
@ BAS_BULKREAD
Definition: bufmgr.h:37
@ BAS_NORMAL
Definition: bufmgr.h:36
@ BAS_VACUUM
Definition: bufmgr.h:40
@ BAS_BULKWRITE
Definition: bufmgr.h:39
#define Max(x, y)
Definition: c.h:1000
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:226
BufferAccessStrategy GetAccessStrategyWithSize(BufferAccessStrategyType btype, int ring_size_kb)
Definition: freelist.c:546

References BAS_BULKREAD, BAS_BULKWRITE, BAS_NORMAL, BAS_VACUUM, effective_io_concurrency, elog, ERROR, GetAccessStrategyWithSize(), GetPinLimit(), io_combine_limit, and Max.

Referenced by blgetbitmap(), bt_check_every_level(), collect_corrupt_items(), collect_visibility_data(), GetBulkInsertState(), gin_check_parent_keys_consistency(), gin_check_posting_tree_parent_keys_consistency(), initscan(), pgstat_index(), pgstathashindex(), pgstatindex_impl(), RelationCopyStorageUsingBuffer(), ScanSourceDatabasePgClass(), statapprox_heap(), and verify_heapam().

◆ GetAccessStrategyBufferCount()

int GetAccessStrategyBufferCount ( BufferAccessStrategy  strategy)

Definition at line 586 of file freelist.c.

587{
588 if (strategy == NULL)
589 return 0;
590
591 return strategy->nbuffers;
592}

References BufferAccessStrategyData::nbuffers.

Referenced by parallel_vacuum_init().

◆ GetAccessStrategyPinLimit()

int GetAccessStrategyPinLimit ( BufferAccessStrategy  strategy)

Definition at line 609 of file freelist.c.

610{
611 if (strategy == NULL)
612 return NBuffers;
613
614 switch (strategy->btype)
615 {
616 case BAS_BULKREAD:
617
618 /*
619 * Since BAS_BULKREAD uses StrategyRejectBuffer(), dirty buffers
620 * shouldn't be a problem and the caller is free to pin up to the
621 * entire ring at once.
622 */
623 return strategy->nbuffers;
624
625 default:
626
627 /*
628 * Tell caller not to pin more than half the buffers in the ring.
629 * This is a trade-off between look ahead distance and deferring
630 * writeback and associated WAL traffic.
631 */
632 return strategy->nbuffers / 2;
633 }
634}
BufferAccessStrategyType btype
Definition: freelist.c:67

References BAS_BULKREAD, BufferAccessStrategyData::btype, BufferAccessStrategyData::nbuffers, and NBuffers.

Referenced by read_stream_begin_impl().

◆ GetAccessStrategyWithSize()

BufferAccessStrategy GetAccessStrategyWithSize ( BufferAccessStrategyType  btype,
int  ring_size_kb 
)

Definition at line 546 of file freelist.c.

547{
548 int ring_buffers;
549 BufferAccessStrategy strategy;
550
551 Assert(ring_size_kb >= 0);
552
553 /* Figure out how many buffers ring_size_kb is */
554 ring_buffers = ring_size_kb / (BLCKSZ / 1024);
555
556 /* 0 means unlimited, so no BufferAccessStrategy required */
557 if (ring_buffers == 0)
558 return NULL;
559
560 /* Cap to 1/8th of shared_buffers */
561 ring_buffers = Min(NBuffers / 8, ring_buffers);
562
563 /* NBuffers should never be less than 16, so this shouldn't happen */
564 Assert(ring_buffers > 0);
565
566 /* Allocate the object and initialize all elements to zeroes */
567 strategy = (BufferAccessStrategy)
568 palloc0(offsetof(BufferAccessStrategyData, buffers) +
569 ring_buffers * sizeof(Buffer));
570
571 /* Set fields that don't start out zero */
572 strategy->btype = btype;
573 strategy->nbuffers = ring_buffers;
574
575 return strategy;
576}
int Buffer
Definition: buf.h:23
struct BufferAccessStrategyData * BufferAccessStrategy
Definition: buf.h:44
#define Min(x, y)
Definition: c.h:1006
Assert(PointerIsAligned(start, uint64))
void * palloc0(Size size)
Definition: mcxt.c:1395

References Assert(), BufferAccessStrategyData::btype, Min, BufferAccessStrategyData::nbuffers, NBuffers, and palloc0().

Referenced by do_autovacuum(), ExecVacuum(), GetAccessStrategy(), and parallel_vacuum_main().

◆ GetBufferFromRing()

static BufferDesc * GetBufferFromRing ( BufferAccessStrategy  strategy,
uint32 buf_state 
)
static

Definition at line 658 of file freelist.c.

659{
661 Buffer bufnum;
662 uint32 old_buf_state;
663 uint32 local_buf_state; /* to avoid repeated (de-)referencing */
664
665
666 /* Advance to next ring slot */
667 if (++strategy->current >= strategy->nbuffers)
668 strategy->current = 0;
669
670 /*
671 * If the slot hasn't been filled yet, tell the caller to allocate a new
672 * buffer with the normal allocation strategy. He will then fill this
673 * slot by calling AddBufferToRing with the new buffer.
674 */
675 bufnum = strategy->buffers[strategy->current];
676 if (bufnum == InvalidBuffer)
677 return NULL;
678
679 buf = GetBufferDescriptor(bufnum - 1);
680
681 /*
682 * Check whether the buffer can be used and pin it if so. Do this using a
683 * CAS loop, to avoid having to lock the buffer header.
684 */
685 old_buf_state = pg_atomic_read_u32(&buf->state);
686 for (;;)
687 {
688 local_buf_state = old_buf_state;
689
690 /*
691 * If the buffer is pinned we cannot use it under any circumstances.
692 *
693 * If usage_count is 0 or 1 then the buffer is fair game (we expect 1,
694 * since our own previous usage of the ring element would have left it
695 * there, but it might've been decremented by clock-sweep since then).
696 * A higher usage_count indicates someone else has touched the buffer,
697 * so we shouldn't re-use it.
698 */
699 if (BUF_STATE_GET_REFCOUNT(local_buf_state) != 0
700 || BUF_STATE_GET_USAGECOUNT(local_buf_state) > 1)
701 break;
702
703 /* See equivalent code in PinBuffer() */
704 if (unlikely(local_buf_state & BM_LOCKED))
705 {
706 old_buf_state = WaitBufHdrUnlocked(buf);
707 continue;
708 }
709
710 /* pin the buffer if the CAS succeeds */
711 local_buf_state += BUF_REFCOUNT_ONE;
712
713 if (pg_atomic_compare_exchange_u32(&buf->state, &old_buf_state,
714 local_buf_state))
715 {
716 *buf_state = local_buf_state;
717
719 return buf;
720 }
721 }
722
723 /*
724 * Tell caller to allocate a new buffer with the normal allocation
725 * strategy. He'll then replace this ring element via AddBufferToRing.
726 */
727 return NULL;
728}
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:237
#define InvalidBuffer
Definition: buf.h:25
#define BUF_REFCOUNT_ONE
Definition: buf_internals.h:51
#define BM_LOCKED
Definition: buf_internals.h:68
#define BUF_STATE_GET_USAGECOUNT(state)
Definition: buf_internals.h:60
#define BUF_STATE_GET_REFCOUNT(state)
Definition: buf_internals.h:59
static BufferDesc * GetBufferDescriptor(uint32 id)
pg_noinline uint32 WaitBufHdrUnlocked(BufferDesc *buf)
Definition: bufmgr.c:6294
void TrackNewBufferPin(Buffer buf)
Definition: bufmgr.c:3303
#define unlikely(x)
Definition: c.h:407

References BM_LOCKED, buf, BUF_REFCOUNT_ONE, BUF_STATE_GET_REFCOUNT, BUF_STATE_GET_USAGECOUNT, BufferDescriptorGetBuffer(), BufferAccessStrategyData::buffers, BufferAccessStrategyData::current, GetBufferDescriptor(), InvalidBuffer, BufferAccessStrategyData::nbuffers, pg_atomic_compare_exchange_u32(), pg_atomic_read_u32(), TrackNewBufferPin(), unlikely, and WaitBufHdrUnlocked().

Referenced by StrategyGetBuffer().

◆ IOContextForStrategy()

IOContext IOContextForStrategy ( BufferAccessStrategy  strategy)

Definition at line 747 of file freelist.c.

748{
749 if (!strategy)
750 return IOCONTEXT_NORMAL;
751
752 switch (strategy->btype)
753 {
754 case BAS_NORMAL:
755
756 /*
757 * Currently, GetAccessStrategy() returns NULL for
758 * BufferAccessStrategyType BAS_NORMAL, so this case is
759 * unreachable.
760 */
762 return IOCONTEXT_NORMAL;
763 case BAS_BULKREAD:
764 return IOCONTEXT_BULKREAD;
765 case BAS_BULKWRITE:
766 return IOCONTEXT_BULKWRITE;
767 case BAS_VACUUM:
768 return IOCONTEXT_VACUUM;
769 }
770
771 elog(ERROR, "unrecognized BufferAccessStrategyType: %d", strategy->btype);
773}
#define pg_unreachable()
Definition: c.h:336
@ IOCONTEXT_NORMAL
Definition: pgstat.h:289
@ IOCONTEXT_VACUUM
Definition: pgstat.h:290
@ IOCONTEXT_BULKREAD
Definition: pgstat.h:286
@ IOCONTEXT_BULKWRITE
Definition: pgstat.h:287

References BAS_BULKREAD, BAS_BULKWRITE, BAS_NORMAL, BAS_VACUUM, BufferAccessStrategyData::btype, elog, ERROR, IOCONTEXT_BULKREAD, IOCONTEXT_BULKWRITE, IOCONTEXT_NORMAL, IOCONTEXT_VACUUM, and pg_unreachable.

Referenced by AsyncReadBuffers(), ExtendBufferedRelShared(), PinBufferForBlock(), and WaitReadBuffers().

◆ StrategyGetBuffer()

BufferDesc * StrategyGetBuffer ( BufferAccessStrategy  strategy,
uint32 buf_state,
bool *  from_ring 
)

Definition at line 174 of file freelist.c.

175{
177 int bgwprocno;
178 int trycounter;
179
180 *from_ring = false;
181
182 /*
183 * If given a strategy object, see whether it can select a buffer. We
184 * assume strategy objects don't need buffer_strategy_lock.
185 */
186 if (strategy != NULL)
187 {
188 buf = GetBufferFromRing(strategy, buf_state);
189 if (buf != NULL)
190 {
191 *from_ring = true;
192 return buf;
193 }
194 }
195
196 /*
197 * If asked, we need to waken the bgwriter. Since we don't want to rely on
198 * a spinlock for this we force a read from shared memory once, and then
199 * set the latch based on that value. We need to go through that length
200 * because otherwise bgwprocno might be reset while/after we check because
201 * the compiler might just reread from memory.
202 *
203 * This can possibly set the latch of the wrong process if the bgwriter
204 * dies in the wrong moment. But since PGPROC->procLatch is never
205 * deallocated the worst consequence of that is that we set the latch of
206 * some arbitrary process.
207 */
209 if (bgwprocno != -1)
210 {
211 /* reset bgwprocno first, before setting the latch */
213
214 /*
215 * Not acquiring ProcArrayLock here which is slightly icky. It's
216 * actually fine because procLatch isn't ever freed, so we just can
217 * potentially set the wrong process' (or no process') latch.
218 */
220 }
221
222 /*
223 * We count buffer allocation requests so that the bgwriter can estimate
224 * the rate of buffer consumption. Note that buffers recycled by a
225 * strategy object are intentionally not counted here.
226 */
228
229 /* Use the "clock sweep" algorithm to find a free buffer */
230 trycounter = NBuffers;
231 for (;;)
232 {
233 uint32 old_buf_state;
234 uint32 local_buf_state;
235
237
238 /*
239 * Check whether the buffer can be used and pin it if so. Do this
240 * using a CAS loop, to avoid having to lock the buffer header.
241 */
242 old_buf_state = pg_atomic_read_u32(&buf->state);
243 for (;;)
244 {
245 local_buf_state = old_buf_state;
246
247 /*
248 * If the buffer is pinned or has a nonzero usage_count, we cannot
249 * use it; decrement the usage_count (unless pinned) and keep
250 * scanning.
251 */
252
253 if (BUF_STATE_GET_REFCOUNT(local_buf_state) != 0)
254 {
255 if (--trycounter == 0)
256 {
257 /*
258 * We've scanned all the buffers without making any state
259 * changes, so all the buffers are pinned (or were when we
260 * looked at them). We could hope that someone will free
261 * one eventually, but it's probably better to fail than
262 * to risk getting stuck in an infinite loop.
263 */
264 elog(ERROR, "no unpinned buffers available");
265 }
266 break;
267 }
268
269 /* See equivalent code in PinBuffer() */
270 if (unlikely(local_buf_state & BM_LOCKED))
271 {
272 old_buf_state = WaitBufHdrUnlocked(buf);
273 continue;
274 }
275
276 if (BUF_STATE_GET_USAGECOUNT(local_buf_state) != 0)
277 {
278 local_buf_state -= BUF_USAGECOUNT_ONE;
279
280 if (pg_atomic_compare_exchange_u32(&buf->state, &old_buf_state,
281 local_buf_state))
282 {
283 trycounter = NBuffers;
284 break;
285 }
286 }
287 else
288 {
289 /* pin the buffer if the CAS succeeds */
290 local_buf_state += BUF_REFCOUNT_ONE;
291
292 if (pg_atomic_compare_exchange_u32(&buf->state, &old_buf_state,
293 local_buf_state))
294 {
295 /* Found a usable buffer */
296 if (strategy != NULL)
297 AddBufferToRing(strategy, buf);
298 *buf_state = local_buf_state;
299
301
302 return buf;
303 }
304 }
305 }
306 }
307}
#define BUF_USAGECOUNT_ONE
Definition: buf_internals.h:54
static uint32 ClockSweepTick(void)
Definition: freelist.c:100
static void AddBufferToRing(BufferAccessStrategy strategy, BufferDesc *buf)
Definition: freelist.c:737
#define INT_ACCESS_ONCE(var)
Definition: freelist.c:24
static BufferDesc * GetBufferFromRing(BufferAccessStrategy strategy, uint32 *buf_state)
Definition: freelist.c:658
void SetLatch(Latch *latch)
Definition: latch.c:290
PROC_HDR * ProcGlobal
Definition: proc.c:79
pg_atomic_uint32 numBufferAllocs
Definition: freelist.c:47
Latch procLatch
Definition: proc.h:186
PGPROC * allProcs
Definition: proc.h:388

References AddBufferToRing(), PROC_HDR::allProcs, BufferStrategyControl::bgwprocno, BM_LOCKED, buf, BUF_REFCOUNT_ONE, BUF_STATE_GET_REFCOUNT, BUF_STATE_GET_USAGECOUNT, BUF_USAGECOUNT_ONE, BufferDescriptorGetBuffer(), ClockSweepTick(), elog, ERROR, GetBufferDescriptor(), GetBufferFromRing(), INT_ACCESS_ONCE, NBuffers, BufferStrategyControl::numBufferAllocs, pg_atomic_compare_exchange_u32(), pg_atomic_fetch_add_u32(), pg_atomic_read_u32(), ProcGlobal, PGPROC::procLatch, SetLatch(), StrategyControl, TrackNewBufferPin(), unlikely, and WaitBufHdrUnlocked().

Referenced by GetVictimBuffer().

◆ StrategyInitialize()

void StrategyInitialize ( bool  init)

Definition at line 401 of file freelist.c.

402{
403 bool found;
404
405 /*
406 * Initialize the shared buffer lookup hashtable.
407 *
408 * Since we can't tolerate running out of lookup table entries, we must be
409 * sure to specify an adequate table size here. The maximum steady-state
410 * usage is of course NBuffers entries, but BufferAlloc() tries to insert
411 * a new entry before deleting the old. In principle this could be
412 * happening in each partition concurrently, so we could need as many as
413 * NBuffers + NUM_BUFFER_PARTITIONS entries.
414 */
416
417 /*
418 * Get or create the shared strategy control block
419 */
421 ShmemInitStruct("Buffer Strategy Status",
422 sizeof(BufferStrategyControl),
423 &found);
424
425 if (!found)
426 {
427 /*
428 * Only done once, usually in postmaster
429 */
430 Assert(init);
431
433
434 /* Initialize the clock-sweep pointer */
436
437 /* Clear statistics */
440
441 /* No pending notification */
443 }
444 else
445 Assert(!init);
446}
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:219
void InitBufTable(int size)
Definition: buf_table.c:51
int init
Definition: isn.c:79
#define NUM_BUFFER_PARTITIONS
Definition: lwlock.h:91
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:389
#define SpinLockInit(lock)
Definition: spin.h:57

References Assert(), BufferStrategyControl::bgwprocno, BufferStrategyControl::buffer_strategy_lock, BufferStrategyControl::completePasses, init, InitBufTable(), NBuffers, BufferStrategyControl::nextVictimBuffer, NUM_BUFFER_PARTITIONS, BufferStrategyControl::numBufferAllocs, pg_atomic_init_u32(), ShmemInitStruct(), SpinLockInit, and StrategyControl.

Referenced by BufferManagerShmemInit().

◆ StrategyNotifyBgWriter()

void StrategyNotifyBgWriter ( int  bgwprocno)

Definition at line 358 of file freelist.c.

359{
360 /*
361 * We acquire buffer_strategy_lock just to ensure that the store appears
362 * atomic to StrategyGetBuffer. The bgwriter should call this rather
363 * infrequently, so there's no performance penalty from being safe.
364 */
366 StrategyControl->bgwprocno = bgwprocno;
368}

References BufferStrategyControl::bgwprocno, BufferStrategyControl::buffer_strategy_lock, SpinLockAcquire, SpinLockRelease, and StrategyControl.

Referenced by BackgroundWriterMain().

◆ StrategyRejectBuffer()

bool StrategyRejectBuffer ( BufferAccessStrategy  strategy,
BufferDesc buf,
bool  from_ring 
)

Definition at line 787 of file freelist.c.

788{
789 /* We only do this in bulkread mode */
790 if (strategy->btype != BAS_BULKREAD)
791 return false;
792
793 /* Don't muck with behavior of normal buffer-replacement strategy */
794 if (!from_ring ||
795 strategy->buffers[strategy->current] != BufferDescriptorGetBuffer(buf))
796 return false;
797
798 /*
799 * Remove the dirty buffer from the ring; necessary to prevent infinite
800 * loop if all ring members are dirty.
801 */
802 strategy->buffers[strategy->current] = InvalidBuffer;
803
804 return true;
805}

References BAS_BULKREAD, BufferAccessStrategyData::btype, buf, BufferDescriptorGetBuffer(), BufferAccessStrategyData::buffers, BufferAccessStrategyData::current, and InvalidBuffer.

Referenced by GetVictimBuffer().

◆ StrategyShmemSize()

Size StrategyShmemSize ( void  )

Definition at line 380 of file freelist.c.

381{
382 Size size = 0;
383
384 /* size of lookup hash table ... see comment in StrategyInitialize */
386
387 /* size of the shared replacement strategy control block */
388 size = add_size(size, MAXALIGN(sizeof(BufferStrategyControl)));
389
390 return size;
391}
Size BufTableShmemSize(int size)
Definition: buf_table.c:41
#define MAXALIGN(LEN)
Definition: c.h:813
size_t Size
Definition: c.h:613
Size add_size(Size s1, Size s2)
Definition: shmem.c:495

References add_size(), BufTableShmemSize(), MAXALIGN, NBuffers, and NUM_BUFFER_PARTITIONS.

Referenced by BufferManagerShmemSize().

◆ StrategySyncStart()

int StrategySyncStart ( uint32 complete_passes,
uint32 num_buf_alloc 
)

Definition at line 321 of file freelist.c.

322{
323 uint32 nextVictimBuffer;
324 int result;
325
328 result = nextVictimBuffer % NBuffers;
329
330 if (complete_passes)
331 {
332 *complete_passes = StrategyControl->completePasses;
333
334 /*
335 * Additionally add the number of wraparounds that happened before
336 * completePasses could be incremented. C.f. ClockSweepTick().
337 */
338 *complete_passes += nextVictimBuffer / NBuffers;
339 }
340
341 if (num_buf_alloc)
342 {
344 }
346 return result;
347}
static uint32 pg_atomic_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 newval)
Definition: atomics.h:328

References BufferStrategyControl::buffer_strategy_lock, BufferStrategyControl::completePasses, NBuffers, BufferStrategyControl::nextVictimBuffer, BufferStrategyControl::numBufferAllocs, pg_atomic_exchange_u32(), pg_atomic_read_u32(), SpinLockAcquire, SpinLockRelease, and StrategyControl.

Referenced by BgBufferSync().

Variable Documentation

◆ StrategyControl

BufferStrategyControl* StrategyControl = NULL
static