PostgreSQL Source Code  git master
freelist.c File Reference
#include "postgres.h"
#include "port/atomics.h"
#include "storage/buf_internals.h"
#include "storage/bufmgr.h"
#include "storage/proc.h"
Include dependency graph for freelist.c:

Go to the source code of this file.

Data Structures

struct  BufferStrategyControl
 
struct  BufferAccessStrategyData
 

Macros

#define INT_ACCESS_ONCE(var)   ((int)(*((volatile int *)&(var))))
 

Typedefs

typedef struct BufferAccessStrategyData BufferAccessStrategyData
 

Functions

static BufferDescGetBufferFromRing (BufferAccessStrategy strategy, uint32 *buf_state)
 
static void AddBufferToRing (BufferAccessStrategy strategy, BufferDesc *buf)
 
static uint32 ClockSweepTick (void)
 
bool have_free_buffer (void)
 
BufferDescStrategyGetBuffer (BufferAccessStrategy strategy, uint32 *buf_state)
 
void StrategyFreeBuffer (BufferDesc *buf)
 
int StrategySyncStart (uint32 *complete_passes, uint32 *num_buf_alloc)
 
void StrategyNotifyBgWriter (int bgwprocno)
 
Size StrategyShmemSize (void)
 
void StrategyInitialize (bool init)
 
BufferAccessStrategy GetAccessStrategy (BufferAccessStrategyType btype)
 
void FreeAccessStrategy (BufferAccessStrategy strategy)
 
bool StrategyRejectBuffer (BufferAccessStrategy strategy, BufferDesc *buf)
 

Variables

static BufferStrategyControlStrategyControl = NULL
 

Macro Definition Documentation

◆ INT_ACCESS_ONCE

#define INT_ACCESS_ONCE (   var)    ((int)(*((volatile int *)&(var))))

Definition at line 23 of file freelist.c.

Typedef Documentation

◆ BufferAccessStrategyData

Function Documentation

◆ AddBufferToRing()

static void AddBufferToRing ( BufferAccessStrategy  strategy,
BufferDesc buf 
)
static

Definition at line 668 of file freelist.c.

669 {
670  strategy->buffers[strategy->current] = BufferDescriptorGetBuffer(buf);
671 }
static Buffer BufferDescriptorGetBuffer(const BufferDesc *bdesc)
static char * buf
Definition: pg_test_fsync.c:67
Buffer buffers[FLEXIBLE_ARRAY_MEMBER]
Definition: freelist.c:96

References buf, BufferDescriptorGetBuffer(), BufferAccessStrategyData::buffers, and BufferAccessStrategyData::current.

Referenced by StrategyGetBuffer().

◆ ClockSweepTick()

static uint32 ClockSweepTick ( void  )
inlinestatic

Definition at line 113 of file freelist.c.

114 {
115  uint32 victim;
116 
117  /*
118  * Atomically move hand ahead one buffer - if there's several processes
119  * doing this, this can lead to buffers being returned slightly out of
120  * apparent order.
121  */
122  victim =
124 
125  if (victim >= NBuffers)
126  {
127  uint32 originalVictim = victim;
128 
129  /* always wrap what we look up in BufferDescriptors */
130  victim = victim % NBuffers;
131 
132  /*
133  * If we're the one that just caused a wraparound, force
134  * completePasses to be incremented while holding the spinlock. We
135  * need the spinlock so StrategySyncStart() can return a consistent
136  * value consisting of nextVictimBuffer and completePasses.
137  */
138  if (victim == 0)
139  {
140  uint32 expected;
141  uint32 wrapped;
142  bool success = false;
143 
144  expected = originalVictim + 1;
145 
146  while (!success)
147  {
148  /*
149  * Acquire the spinlock while increasing completePasses. That
150  * allows other readers to read nextVictimBuffer and
151  * completePasses in a consistent manner which is required for
152  * StrategySyncStart(). In theory delaying the increment
153  * could lead to an overflow of nextVictimBuffers, but that's
154  * highly unlikely and wouldn't be particularly harmful.
155  */
157 
158  wrapped = expected % NBuffers;
159 
161  &expected, wrapped);
162  if (success)
165  }
166  }
167  }
168  return victim;
169 }
static bool pg_atomic_compare_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval)
Definition: atomics.h:306
static uint32 pg_atomic_fetch_add_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: atomics.h:323
unsigned int uint32
Definition: c.h:442
static BufferStrategyControl * StrategyControl
Definition: freelist.c:64
int NBuffers
Definition: globals.c:136
static bool success
Definition: initdb.c:170
#define SpinLockRelease(lock)
Definition: spin.h:64
#define SpinLockAcquire(lock)
Definition: spin.h:62
pg_atomic_uint32 nextVictimBuffer
Definition: freelist.c:39
slock_t buffer_strategy_lock
Definition: freelist.c:32

References BufferStrategyControl::buffer_strategy_lock, BufferStrategyControl::completePasses, NBuffers, BufferStrategyControl::nextVictimBuffer, pg_atomic_compare_exchange_u32(), pg_atomic_fetch_add_u32(), SpinLockAcquire, SpinLockRelease, StrategyControl, and success.

Referenced by StrategyGetBuffer().

◆ FreeAccessStrategy()

void FreeAccessStrategy ( BufferAccessStrategy  strategy)

Definition at line 596 of file freelist.c.

597 {
598  /* don't crash if called on a "default" strategy */
599  if (strategy != NULL)
600  pfree(strategy);
601 }
void pfree(void *pointer)
Definition: mcxt.c:1306

References pfree().

Referenced by blgetbitmap(), FreeBulkInsertState(), heap_endscan(), initscan(), and parallel_vacuum_main().

◆ GetAccessStrategy()

BufferAccessStrategy GetAccessStrategy ( BufferAccessStrategyType  btype)

Definition at line 541 of file freelist.c.

542 {
543  BufferAccessStrategy strategy;
544  int ring_size;
545 
546  /*
547  * Select ring size to use. See buffer/README for rationales.
548  *
549  * Note: if you change the ring size for BAS_BULKREAD, see also
550  * SYNC_SCAN_REPORT_INTERVAL in access/heap/syncscan.c.
551  */
552  switch (btype)
553  {
554  case BAS_NORMAL:
555  /* if someone asks for NORMAL, just give 'em a "default" object */
556  return NULL;
557 
558  case BAS_BULKREAD:
559  ring_size = 256 * 1024 / BLCKSZ;
560  break;
561  case BAS_BULKWRITE:
562  ring_size = 16 * 1024 * 1024 / BLCKSZ;
563  break;
564  case BAS_VACUUM:
565  ring_size = 256 * 1024 / BLCKSZ;
566  break;
567 
568  default:
569  elog(ERROR, "unrecognized buffer access strategy: %d",
570  (int) btype);
571  return NULL; /* keep compiler quiet */
572  }
573 
574  /* Make sure ring isn't an undue fraction of shared buffers */
575  ring_size = Min(NBuffers / 8, ring_size);
576 
577  /* Allocate the object and initialize all elements to zeroes */
578  strategy = (BufferAccessStrategy)
579  palloc0(offsetof(BufferAccessStrategyData, buffers) +
580  ring_size * sizeof(Buffer));
581 
582  /* Set fields that don't start out zero */
583  strategy->btype = btype;
584  strategy->ring_size = ring_size;
585 
586  return strategy;
587 }
int Buffer
Definition: buf.h:23
struct BufferAccessStrategyData * BufferAccessStrategy
Definition: buf.h:44
@ BAS_BULKREAD
Definition: bufmgr.h:30
@ BAS_NORMAL
Definition: bufmgr.h:29
@ BAS_VACUUM
Definition: bufmgr.h:33
@ BAS_BULKWRITE
Definition: bufmgr.h:32
#define Min(x, y)
Definition: c.h:937
#define ERROR
Definition: elog.h:35
void * palloc0(Size size)
Definition: mcxt.c:1230
BufferAccessStrategyType btype
Definition: freelist.c:74

References BAS_BULKREAD, BAS_BULKWRITE, BAS_NORMAL, BAS_VACUUM, BufferAccessStrategyData::btype, elog(), ERROR, Min, NBuffers, palloc0(), and BufferAccessStrategyData::ring_size.

Referenced by blgetbitmap(), bt_check_every_level(), collect_corrupt_items(), collect_visibility_data(), do_autovacuum(), GetBulkInsertState(), initscan(), parallel_vacuum_main(), pgstat_index(), pgstathashindex(), pgstatindex_impl(), RelationCopyStorageUsingBuffer(), ScanSourceDatabasePgClass(), statapprox_heap(), vacuum(), and verify_heapam().

◆ GetBufferFromRing()

static BufferDesc * GetBufferFromRing ( BufferAccessStrategy  strategy,
uint32 buf_state 
)
static

Definition at line 610 of file freelist.c.

611 {
612  BufferDesc *buf;
613  Buffer bufnum;
614  uint32 local_buf_state; /* to avoid repeated (de-)referencing */
615 
616 
617  /* Advance to next ring slot */
618  if (++strategy->current >= strategy->ring_size)
619  strategy->current = 0;
620 
621  /*
622  * If the slot hasn't been filled yet, tell the caller to allocate a new
623  * buffer with the normal allocation strategy. He will then fill this
624  * slot by calling AddBufferToRing with the new buffer.
625  */
626  bufnum = strategy->buffers[strategy->current];
627  if (bufnum == InvalidBuffer)
628  {
629  strategy->current_was_in_ring = false;
630  return NULL;
631  }
632 
633  /*
634  * If the buffer is pinned we cannot use it under any circumstances.
635  *
636  * If usage_count is 0 or 1 then the buffer is fair game (we expect 1,
637  * since our own previous usage of the ring element would have left it
638  * there, but it might've been decremented by clock sweep since then). A
639  * higher usage_count indicates someone else has touched the buffer, so we
640  * shouldn't re-use it.
641  */
642  buf = GetBufferDescriptor(bufnum - 1);
643  local_buf_state = LockBufHdr(buf);
644  if (BUF_STATE_GET_REFCOUNT(local_buf_state) == 0
645  && BUF_STATE_GET_USAGECOUNT(local_buf_state) <= 1)
646  {
647  strategy->current_was_in_ring = true;
648  *buf_state = local_buf_state;
649  return buf;
650  }
651  UnlockBufHdr(buf, local_buf_state);
652 
653  /*
654  * Tell caller to allocate a new buffer with the normal allocation
655  * strategy. He'll then replace this ring element via AddBufferToRing.
656  */
657  strategy->current_was_in_ring = false;
658  return NULL;
659 }
#define InvalidBuffer
Definition: buf.h:25
static BufferDesc * GetBufferDescriptor(uint32 id)
static void UnlockBufHdr(BufferDesc *desc, uint32 buf_state)
#define BUF_STATE_GET_USAGECOUNT(state)
Definition: buf_internals.h:50
#define BUF_STATE_GET_REFCOUNT(state)
Definition: buf_internals.h:49
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4755

References buf, BUF_STATE_GET_REFCOUNT, BUF_STATE_GET_USAGECOUNT, BufferAccessStrategyData::buffers, BufferAccessStrategyData::current, BufferAccessStrategyData::current_was_in_ring, GetBufferDescriptor(), InvalidBuffer, LockBufHdr(), BufferAccessStrategyData::ring_size, and UnlockBufHdr().

Referenced by StrategyGetBuffer().

◆ have_free_buffer()

bool have_free_buffer ( void  )

Definition at line 180 of file freelist.c.

181 {
183  return true;
184  else
185  return false;
186 }

References BufferStrategyControl::firstFreeBuffer, and StrategyControl.

Referenced by apw_load_buffers(), and autoprewarm_database_main().

◆ StrategyFreeBuffer()

void StrategyFreeBuffer ( BufferDesc buf)

Definition at line 363 of file freelist.c.

364 {
366 
367  /*
368  * It is possible that we are told to put something in the freelist that
369  * is already in it; don't screw up the list if so.
370  */
371  if (buf->freeNext == FREENEXT_NOT_IN_LIST)
372  {
373  buf->freeNext = StrategyControl->firstFreeBuffer;
374  if (buf->freeNext < 0)
377  }
378 
380 }
#define FREENEXT_NOT_IN_LIST

References buf, BufferStrategyControl::buffer_strategy_lock, BufferStrategyControl::firstFreeBuffer, FREENEXT_NOT_IN_LIST, BufferStrategyControl::lastFreeBuffer, SpinLockAcquire, SpinLockRelease, and StrategyControl.

Referenced by InvalidateBuffer().

◆ StrategyGetBuffer()

BufferDesc* StrategyGetBuffer ( BufferAccessStrategy  strategy,
uint32 buf_state 
)

Definition at line 201 of file freelist.c.

202 {
203  BufferDesc *buf;
204  int bgwprocno;
205  int trycounter;
206  uint32 local_buf_state; /* to avoid repeated (de-)referencing */
207 
208  /*
209  * If given a strategy object, see whether it can select a buffer. We
210  * assume strategy objects don't need buffer_strategy_lock.
211  */
212  if (strategy != NULL)
213  {
214  buf = GetBufferFromRing(strategy, buf_state);
215  if (buf != NULL)
216  return buf;
217  }
218 
219  /*
220  * If asked, we need to waken the bgwriter. Since we don't want to rely on
221  * a spinlock for this we force a read from shared memory once, and then
222  * set the latch based on that value. We need to go through that length
223  * because otherwise bgwprocno might be reset while/after we check because
224  * the compiler might just reread from memory.
225  *
226  * This can possibly set the latch of the wrong process if the bgwriter
227  * dies in the wrong moment. But since PGPROC->procLatch is never
228  * deallocated the worst consequence of that is that we set the latch of
229  * some arbitrary process.
230  */
232  if (bgwprocno != -1)
233  {
234  /* reset bgwprocno first, before setting the latch */
236 
237  /*
238  * Not acquiring ProcArrayLock here which is slightly icky. It's
239  * actually fine because procLatch isn't ever freed, so we just can
240  * potentially set the wrong process' (or no process') latch.
241  */
242  SetLatch(&ProcGlobal->allProcs[bgwprocno].procLatch);
243  }
244 
245  /*
246  * We count buffer allocation requests so that the bgwriter can estimate
247  * the rate of buffer consumption. Note that buffers recycled by a
248  * strategy object are intentionally not counted here.
249  */
251 
252  /*
253  * First check, without acquiring the lock, whether there's buffers in the
254  * freelist. Since we otherwise don't require the spinlock in every
255  * StrategyGetBuffer() invocation, it'd be sad to acquire it here -
256  * uselessly in most cases. That obviously leaves a race where a buffer is
257  * put on the freelist but we don't see the store yet - but that's pretty
258  * harmless, it'll just get used during the next buffer acquisition.
259  *
260  * If there's buffers on the freelist, acquire the spinlock to pop one
261  * buffer of the freelist. Then check whether that buffer is usable and
262  * repeat if not.
263  *
264  * Note that the freeNext fields are considered to be protected by the
265  * buffer_strategy_lock not the individual buffer spinlocks, so it's OK to
266  * manipulate them without holding the spinlock.
267  */
269  {
270  while (true)
271  {
272  /* Acquire the spinlock to remove element from the freelist */
274 
276  {
278  break;
279  }
280 
282  Assert(buf->freeNext != FREENEXT_NOT_IN_LIST);
283 
284  /* Unconditionally remove buffer from freelist */
285  StrategyControl->firstFreeBuffer = buf->freeNext;
286  buf->freeNext = FREENEXT_NOT_IN_LIST;
287 
288  /*
289  * Release the lock so someone else can access the freelist while
290  * we check out this buffer.
291  */
293 
294  /*
295  * If the buffer is pinned or has a nonzero usage_count, we cannot
296  * use it; discard it and retry. (This can only happen if VACUUM
297  * put a valid buffer in the freelist and then someone else used
298  * it before we got to it. It's probably impossible altogether as
299  * of 8.3, but we'd better check anyway.)
300  */
301  local_buf_state = LockBufHdr(buf);
302  if (BUF_STATE_GET_REFCOUNT(local_buf_state) == 0
303  && BUF_STATE_GET_USAGECOUNT(local_buf_state) == 0)
304  {
305  if (strategy != NULL)
306  AddBufferToRing(strategy, buf);
307  *buf_state = local_buf_state;
308  return buf;
309  }
310  UnlockBufHdr(buf, local_buf_state);
311  }
312  }
313 
314  /* Nothing on the freelist, so run the "clock sweep" algorithm */
315  trycounter = NBuffers;
316  for (;;)
317  {
319 
320  /*
321  * If the buffer is pinned or has a nonzero usage_count, we cannot use
322  * it; decrement the usage_count (unless pinned) and keep scanning.
323  */
324  local_buf_state = LockBufHdr(buf);
325 
326  if (BUF_STATE_GET_REFCOUNT(local_buf_state) == 0)
327  {
328  if (BUF_STATE_GET_USAGECOUNT(local_buf_state) != 0)
329  {
330  local_buf_state -= BUF_USAGECOUNT_ONE;
331 
332  trycounter = NBuffers;
333  }
334  else
335  {
336  /* Found a usable buffer */
337  if (strategy != NULL)
338  AddBufferToRing(strategy, buf);
339  *buf_state = local_buf_state;
340  return buf;
341  }
342  }
343  else if (--trycounter == 0)
344  {
345  /*
346  * We've scanned all the buffers without making any state changes,
347  * so all the buffers are pinned (or were when we looked at them).
348  * We could hope that someone will free one eventually, but it's
349  * probably better to fail than to risk getting stuck in an
350  * infinite loop.
351  */
352  UnlockBufHdr(buf, local_buf_state);
353  elog(ERROR, "no unpinned buffers available");
354  }
355  UnlockBufHdr(buf, local_buf_state);
356  }
357 }
#define BUF_USAGECOUNT_ONE
Definition: buf_internals.h:44
static uint32 ClockSweepTick(void)
Definition: freelist.c:113
static void AddBufferToRing(BufferAccessStrategy strategy, BufferDesc *buf)
Definition: freelist.c:668
#define INT_ACCESS_ONCE(var)
Definition: freelist.c:23
static BufferDesc * GetBufferFromRing(BufferAccessStrategy strategy, uint32 *buf_state)
Definition: freelist.c:610
void SetLatch(Latch *latch)
Definition: latch.c:591
Assert(fmt[strlen(fmt) - 1] !='\n')
PROC_HDR * ProcGlobal
Definition: proc.c:80
pg_atomic_uint32 numBufferAllocs
Definition: freelist.c:54
Latch procLatch
Definition: proc.h:170
PGPROC * allProcs
Definition: proc.h:362

References AddBufferToRing(), PROC_HDR::allProcs, Assert(), BufferStrategyControl::bgwprocno, buf, BUF_STATE_GET_REFCOUNT, BUF_STATE_GET_USAGECOUNT, BUF_USAGECOUNT_ONE, BufferStrategyControl::buffer_strategy_lock, ClockSweepTick(), elog(), ERROR, BufferStrategyControl::firstFreeBuffer, FREENEXT_NOT_IN_LIST, GetBufferDescriptor(), GetBufferFromRing(), INT_ACCESS_ONCE, LockBufHdr(), NBuffers, BufferStrategyControl::numBufferAllocs, pg_atomic_fetch_add_u32(), ProcGlobal, PGPROC::procLatch, SetLatch(), SpinLockAcquire, SpinLockRelease, StrategyControl, and UnlockBufHdr().

Referenced by BufferAlloc().

◆ StrategyInitialize()

void StrategyInitialize ( bool  init)

Definition at line 474 of file freelist.c.

475 {
476  bool found;
477 
478  /*
479  * Initialize the shared buffer lookup hashtable.
480  *
481  * Since we can't tolerate running out of lookup table entries, we must be
482  * sure to specify an adequate table size here. The maximum steady-state
483  * usage is of course NBuffers entries, but BufferAlloc() tries to insert
484  * a new entry before deleting the old. In principle this could be
485  * happening in each partition concurrently, so we could need as many as
486  * NBuffers + NUM_BUFFER_PARTITIONS entries.
487  */
489 
490  /*
491  * Get or create the shared strategy control block
492  */
494  ShmemInitStruct("Buffer Strategy Status",
495  sizeof(BufferStrategyControl),
496  &found);
497 
498  if (!found)
499  {
500  /*
501  * Only done once, usually in postmaster
502  */
503  Assert(init);
504 
506 
507  /*
508  * Grab the whole linked list of free buffers for our strategy. We
509  * assume it was previously set up by InitBufferPool().
510  */
513 
514  /* Initialize the clock sweep pointer */
516 
517  /* Clear statistics */
520 
521  /* No pending notification */
523  }
524  else
525  Assert(!init);
526 }
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:218
void InitBufTable(int size)
Definition: buf_table.c:52
int init
Definition: isn.c:75
#define NUM_BUFFER_PARTITIONS
Definition: lwlock.h:91
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:396
#define SpinLockInit(lock)
Definition: spin.h:60

References Assert(), BufferStrategyControl::bgwprocno, BufferStrategyControl::buffer_strategy_lock, BufferStrategyControl::completePasses, BufferStrategyControl::firstFreeBuffer, init, InitBufTable(), BufferStrategyControl::lastFreeBuffer, NBuffers, BufferStrategyControl::nextVictimBuffer, NUM_BUFFER_PARTITIONS, BufferStrategyControl::numBufferAllocs, pg_atomic_init_u32(), ShmemInitStruct(), SpinLockInit, and StrategyControl.

Referenced by InitBufferPool().

◆ StrategyNotifyBgWriter()

void StrategyNotifyBgWriter ( int  bgwprocno)

Definition at line 431 of file freelist.c.

432 {
433  /*
434  * We acquire buffer_strategy_lock just to ensure that the store appears
435  * atomic to StrategyGetBuffer. The bgwriter should call this rather
436  * infrequently, so there's no performance penalty from being safe.
437  */
439  StrategyControl->bgwprocno = bgwprocno;
441 }

References BufferStrategyControl::bgwprocno, BufferStrategyControl::buffer_strategy_lock, SpinLockAcquire, SpinLockRelease, and StrategyControl.

Referenced by BackgroundWriterMain().

◆ StrategyRejectBuffer()

bool StrategyRejectBuffer ( BufferAccessStrategy  strategy,
BufferDesc buf 
)

Definition at line 685 of file freelist.c.

686 {
687  /* We only do this in bulkread mode */
688  if (strategy->btype != BAS_BULKREAD)
689  return false;
690 
691  /* Don't muck with behavior of normal buffer-replacement strategy */
692  if (!strategy->current_was_in_ring ||
693  strategy->buffers[strategy->current] != BufferDescriptorGetBuffer(buf))
694  return false;
695 
696  /*
697  * Remove the dirty buffer from the ring; necessary to prevent infinite
698  * loop if all ring members are dirty.
699  */
700  strategy->buffers[strategy->current] = InvalidBuffer;
701 
702  return true;
703 }

References BAS_BULKREAD, BufferAccessStrategyData::btype, buf, BufferDescriptorGetBuffer(), BufferAccessStrategyData::buffers, BufferAccessStrategyData::current, BufferAccessStrategyData::current_was_in_ring, and InvalidBuffer.

Referenced by BufferAlloc().

◆ StrategyShmemSize()

Size StrategyShmemSize ( void  )

Definition at line 453 of file freelist.c.

454 {
455  Size size = 0;
456 
457  /* size of lookup hash table ... see comment in StrategyInitialize */
459 
460  /* size of the shared replacement strategy control block */
461  size = add_size(size, MAXALIGN(sizeof(BufferStrategyControl)));
462 
463  return size;
464 }
Size BufTableShmemSize(int size)
Definition: buf_table.c:42
#define MAXALIGN(LEN)
Definition: c.h:747
size_t Size
Definition: c.h:541
Size add_size(Size s1, Size s2)
Definition: shmem.c:502

References add_size(), BufTableShmemSize(), MAXALIGN, NBuffers, and NUM_BUFFER_PARTITIONS.

Referenced by BufferShmemSize().

◆ StrategySyncStart()

int StrategySyncStart ( uint32 complete_passes,
uint32 num_buf_alloc 
)

Definition at line 394 of file freelist.c.

395 {
396  uint32 nextVictimBuffer;
397  int result;
398 
401  result = nextVictimBuffer % NBuffers;
402 
403  if (complete_passes)
404  {
405  *complete_passes = StrategyControl->completePasses;
406 
407  /*
408  * Additionally add the number of wraparounds that happened before
409  * completePasses could be incremented. C.f. ClockSweepTick().
410  */
411  *complete_passes += nextVictimBuffer / NBuffers;
412  }
413 
414  if (num_buf_alloc)
415  {
417  }
419  return result;
420 }
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:236
static uint32 pg_atomic_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 newval)
Definition: atomics.h:287

References BufferStrategyControl::buffer_strategy_lock, BufferStrategyControl::completePasses, NBuffers, BufferStrategyControl::nextVictimBuffer, BufferStrategyControl::numBufferAllocs, pg_atomic_exchange_u32(), pg_atomic_read_u32(), SpinLockAcquire, SpinLockRelease, and StrategyControl.

Referenced by BgBufferSync().

Variable Documentation

◆ StrategyControl