PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
freelist.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * freelist.c
4  * routines for managing the buffer pool's replacement strategy.
5  *
6  *
7  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
8  * Portions Copyright (c) 1994, Regents of the University of California
9  *
10  *
11  * IDENTIFICATION
12  * src/backend/storage/buffer/freelist.c
13  *
14  *-------------------------------------------------------------------------
15  */
16 #include "postgres.h"
17 
18 #include "port/atomics.h"
19 #include "storage/buf_internals.h"
20 #include "storage/bufmgr.h"
21 #include "storage/proc.h"
22 
23 #define INT_ACCESS_ONCE(var) ((int)(*((volatile int *)&(var))))
24 
25 
26 /*
27  * The shared freelist control information.
28  */
29 typedef struct
30 {
31  /* Spinlock: protects the values below */
33 
34  /*
35  * Clock sweep hand: index of next buffer to consider grabbing. Note that
36  * this isn't a concrete buffer - we only ever increase the value. So, to
37  * get an actual buffer, it needs to be used modulo NBuffers.
38  */
40 
41  int firstFreeBuffer; /* Head of list of unused buffers */
42  int lastFreeBuffer; /* Tail of list of unused buffers */
43 
44  /*
45  * NOTE: lastFreeBuffer is undefined when firstFreeBuffer is -1 (that is,
46  * when the list is empty)
47  */
48 
49  /*
50  * Statistics. These counters should be wide enough that they can't
51  * overflow during a single bgwriter cycle.
52  */
53  uint32 completePasses; /* Complete cycles of the clock sweep */
54  pg_atomic_uint32 numBufferAllocs; /* Buffers allocated since last reset */
55 
56  /*
57  * Bgworker process to be notified upon activity or -1 if none. See
58  * StrategyNotifyBgWriter.
59  */
60  int bgwprocno;
62 
63 /* Pointers to shared state */
65 
66 /*
67  * Private (non-shared) state for managing a ring of shared buffers to re-use.
68  * This is currently the only kind of BufferAccessStrategy object, but someday
69  * we might have more kinds.
70  */
72 {
73  /* Overall strategy type */
75  /* Number of elements in buffers[] array */
76  int ring_size;
77 
78  /*
79  * Index of the "current" slot in the ring, ie, the one most recently
80  * returned by GetBufferFromRing.
81  */
82  int current;
83 
84  /*
85  * True if the buffer just returned by StrategyGetBuffer had been in the
86  * ring already.
87  */
89 
90  /*
91  * Array of buffer numbers. InvalidBuffer (that is, zero) indicates we
92  * have not yet selected a buffer for this ring slot. For allocation
93  * simplicity this is palloc'd together with the fixed fields of the
94  * struct.
95  */
96  Buffer buffers[FLEXIBLE_ARRAY_MEMBER];
98 
99 
100 /* Prototypes for internal functions */
102  uint32 *buf_state);
103 static void AddBufferToRing(BufferAccessStrategy strategy,
104  BufferDesc *buf);
105 
106 /*
107  * ClockSweepTick - Helper routine for StrategyGetBuffer()
108  *
109  * Move the clock hand one buffer ahead of its current position and return the
110  * id of the buffer now under the hand.
111  */
112 static inline uint32
114 {
115  uint32 victim;
116 
117  /*
118  * Atomically move hand ahead one buffer - if there's several processes
119  * doing this, this can lead to buffers being returned slightly out of
120  * apparent order.
121  */
122  victim =
123  pg_atomic_fetch_add_u32(&StrategyControl->nextVictimBuffer, 1);
124 
125  if (victim >= NBuffers)
126  {
127  uint32 originalVictim = victim;
128 
129  /* always wrap what we look up in BufferDescriptors */
130  victim = victim % NBuffers;
131 
132  /*
133  * If we're the one that just caused a wraparound, force
134  * completePasses to be incremented while holding the spinlock. We
135  * need the spinlock so StrategySyncStart() can return a consistent
136  * value consisting of nextVictimBuffer and completePasses.
137  */
138  if (victim == 0)
139  {
140  uint32 expected;
141  uint32 wrapped;
142  bool success = false;
143 
144  expected = originalVictim + 1;
145 
146  while (!success)
147  {
148  /*
149  * Acquire the spinlock while increasing completePasses. That
150  * allows other readers to read nextVictimBuffer and
151  * completePasses in a consistent manner which is required for
152  * StrategySyncStart(). In theory delaying the increment
153  * could lead to an overflow of nextVictimBuffers, but that's
154  * highly unlikely and wouldn't be particularly harmful.
155  */
156  SpinLockAcquire(&StrategyControl->buffer_strategy_lock);
157 
158  wrapped = expected % NBuffers;
159 
160  success = pg_atomic_compare_exchange_u32(&StrategyControl->nextVictimBuffer,
161  &expected, wrapped);
162  if (success)
163  StrategyControl->completePasses++;
164  SpinLockRelease(&StrategyControl->buffer_strategy_lock);
165  }
166  }
167  }
168  return victim;
169 }
170 
171 /*
172  * StrategyGetBuffer
173  *
174  * Called by the bufmgr to get the next candidate buffer to use in
175  * BufferAlloc(). The only hard requirement BufferAlloc() has is that
176  * the selected buffer must not currently be pinned by anyone.
177  *
178  * strategy is a BufferAccessStrategy object, or NULL for default strategy.
179  *
180  * To ensure that no one else can pin the buffer before we do, we must
181  * return the buffer with the buffer header spinlock still held.
182  */
183 BufferDesc *
185 {
186  BufferDesc *buf;
187  int bgwprocno;
188  int trycounter;
189  uint32 local_buf_state; /* to avoid repeated (de-)referencing */
190 
191  /*
192  * If given a strategy object, see whether it can select a buffer. We
193  * assume strategy objects don't need buffer_strategy_lock.
194  */
195  if (strategy != NULL)
196  {
197  buf = GetBufferFromRing(strategy, buf_state);
198  if (buf != NULL)
199  return buf;
200  }
201 
202  /*
203  * If asked, we need to waken the bgwriter. Since we don't want to rely on
204  * a spinlock for this we force a read from shared memory once, and then
205  * set the latch based on that value. We need to go through that length
206  * because otherwise bgprocno might be reset while/after we check because
207  * the compiler might just reread from memory.
208  *
209  * This can possibly set the latch of the wrong process if the bgwriter
210  * dies in the wrong moment. But since PGPROC->procLatch is never
211  * deallocated the worst consequence of that is that we set the latch of
212  * some arbitrary process.
213  */
214  bgwprocno = INT_ACCESS_ONCE(StrategyControl->bgwprocno);
215  if (bgwprocno != -1)
216  {
217  /* reset bgwprocno first, before setting the latch */
218  StrategyControl->bgwprocno = -1;
219 
220  /*
221  * Not acquiring ProcArrayLock here which is slightly icky. It's
222  * actually fine because procLatch isn't ever freed, so we just can
223  * potentially set the wrong process' (or no process') latch.
224  */
225  SetLatch(&ProcGlobal->allProcs[bgwprocno].procLatch);
226  }
227 
228  /*
229  * We count buffer allocation requests so that the bgwriter can estimate
230  * the rate of buffer consumption. Note that buffers recycled by a
231  * strategy object are intentionally not counted here.
232  */
233  pg_atomic_fetch_add_u32(&StrategyControl->numBufferAllocs, 1);
234 
235  /*
236  * First check, without acquiring the lock, whether there's buffers in the
237  * freelist. Since we otherwise don't require the spinlock in every
238  * StrategyGetBuffer() invocation, it'd be sad to acquire it here -
239  * uselessly in most cases. That obviously leaves a race where a buffer is
240  * put on the freelist but we don't see the store yet - but that's pretty
241  * harmless, it'll just get used during the next buffer acquisition.
242  *
243  * If there's buffers on the freelist, acquire the spinlock to pop one
244  * buffer of the freelist. Then check whether that buffer is usable and
245  * repeat if not.
246  *
247  * Note that the freeNext fields are considered to be protected by the
248  * buffer_strategy_lock not the individual buffer spinlocks, so it's OK to
249  * manipulate them without holding the spinlock.
250  */
251  if (StrategyControl->firstFreeBuffer >= 0)
252  {
253  while (true)
254  {
255  /* Acquire the spinlock to remove element from the freelist */
256  SpinLockAcquire(&StrategyControl->buffer_strategy_lock);
257 
258  if (StrategyControl->firstFreeBuffer < 0)
259  {
260  SpinLockRelease(&StrategyControl->buffer_strategy_lock);
261  break;
262  }
263 
264  buf = GetBufferDescriptor(StrategyControl->firstFreeBuffer);
266 
267  /* Unconditionally remove buffer from freelist */
268  StrategyControl->firstFreeBuffer = buf->freeNext;
270 
271  /*
272  * Release the lock so someone else can access the freelist while
273  * we check out this buffer.
274  */
275  SpinLockRelease(&StrategyControl->buffer_strategy_lock);
276 
277  /*
278  * If the buffer is pinned or has a nonzero usage_count, we cannot
279  * use it; discard it and retry. (This can only happen if VACUUM
280  * put a valid buffer in the freelist and then someone else used
281  * it before we got to it. It's probably impossible altogether as
282  * of 8.3, but we'd better check anyway.)
283  */
284  local_buf_state = LockBufHdr(buf);
285  if (BUF_STATE_GET_REFCOUNT(local_buf_state) == 0
286  && BUF_STATE_GET_USAGECOUNT(local_buf_state) == 0)
287  {
288  if (strategy != NULL)
289  AddBufferToRing(strategy, buf);
290  *buf_state = local_buf_state;
291  return buf;
292  }
293  UnlockBufHdr(buf, local_buf_state);
294 
295  }
296  }
297 
298  /* Nothing on the freelist, so run the "clock sweep" algorithm */
299  trycounter = NBuffers;
300  for (;;)
301  {
303 
304  /*
305  * If the buffer is pinned or has a nonzero usage_count, we cannot use
306  * it; decrement the usage_count (unless pinned) and keep scanning.
307  */
308  local_buf_state = LockBufHdr(buf);
309 
310  if (BUF_STATE_GET_REFCOUNT(local_buf_state) == 0)
311  {
312  if (BUF_STATE_GET_USAGECOUNT(local_buf_state) != 0)
313  {
314  local_buf_state -= BUF_USAGECOUNT_ONE;
315 
316  trycounter = NBuffers;
317  }
318  else
319  {
320  /* Found a usable buffer */
321  if (strategy != NULL)
322  AddBufferToRing(strategy, buf);
323  *buf_state = local_buf_state;
324  return buf;
325  }
326  }
327  else if (--trycounter == 0)
328  {
329  /*
330  * We've scanned all the buffers without making any state changes,
331  * so all the buffers are pinned (or were when we looked at them).
332  * We could hope that someone will free one eventually, but it's
333  * probably better to fail than to risk getting stuck in an
334  * infinite loop.
335  */
336  UnlockBufHdr(buf, local_buf_state);
337  elog(ERROR, "no unpinned buffers available");
338  }
339  UnlockBufHdr(buf, local_buf_state);
340  }
341 }
342 
343 /*
344  * StrategyFreeBuffer: put a buffer on the freelist
345  */
346 void
348 {
349  SpinLockAcquire(&StrategyControl->buffer_strategy_lock);
350 
351  /*
352  * It is possible that we are told to put something in the freelist that
353  * is already in it; don't screw up the list if so.
354  */
355  if (buf->freeNext == FREENEXT_NOT_IN_LIST)
356  {
357  buf->freeNext = StrategyControl->firstFreeBuffer;
358  if (buf->freeNext < 0)
359  StrategyControl->lastFreeBuffer = buf->buf_id;
360  StrategyControl->firstFreeBuffer = buf->buf_id;
361  }
362 
363  SpinLockRelease(&StrategyControl->buffer_strategy_lock);
364 }
365 
366 /*
367  * StrategySyncStart -- tell BufferSync where to start syncing
368  *
369  * The result is the buffer index of the best buffer to sync first.
370  * BufferSync() will proceed circularly around the buffer array from there.
371  *
372  * In addition, we return the completed-pass count (which is effectively
373  * the higher-order bits of nextVictimBuffer) and the count of recent buffer
374  * allocs if non-NULL pointers are passed. The alloc count is reset after
375  * being read.
376  */
377 int
378 StrategySyncStart(uint32 *complete_passes, uint32 *num_buf_alloc)
379 {
380  uint32 nextVictimBuffer;
381  int result;
382 
383  SpinLockAcquire(&StrategyControl->buffer_strategy_lock);
384  nextVictimBuffer = pg_atomic_read_u32(&StrategyControl->nextVictimBuffer);
385  result = nextVictimBuffer % NBuffers;
386 
387  if (complete_passes)
388  {
389  *complete_passes = StrategyControl->completePasses;
390 
391  /*
392  * Additionally add the number of wraparounds that happened before
393  * completePasses could be incremented. C.f. ClockSweepTick().
394  */
395  *complete_passes += nextVictimBuffer / NBuffers;
396  }
397 
398  if (num_buf_alloc)
399  {
400  *num_buf_alloc = pg_atomic_exchange_u32(&StrategyControl->numBufferAllocs, 0);
401  }
402  SpinLockRelease(&StrategyControl->buffer_strategy_lock);
403  return result;
404 }
405 
406 /*
407  * StrategyNotifyBgWriter -- set or clear allocation notification latch
408  *
409  * If bgwprocno isn't -1, the next invocation of StrategyGetBuffer will
410  * set that latch. Pass -1 to clear the pending notification before it
411  * happens. This feature is used by the bgwriter process to wake itself up
412  * from hibernation, and is not meant for anybody else to use.
413  */
414 void
416 {
417  /*
418  * We acquire buffer_strategy_lock just to ensure that the store appears
419  * atomic to StrategyGetBuffer. The bgwriter should call this rather
420  * infrequently, so there's no performance penalty from being safe.
421  */
422  SpinLockAcquire(&StrategyControl->buffer_strategy_lock);
423  StrategyControl->bgwprocno = bgwprocno;
424  SpinLockRelease(&StrategyControl->buffer_strategy_lock);
425 }
426 
427 
428 /*
429  * StrategyShmemSize
430  *
431  * estimate the size of shared memory used by the freelist-related structures.
432  *
433  * Note: for somewhat historical reasons, the buffer lookup hashtable size
434  * is also determined here.
435  */
436 Size
438 {
439  Size size = 0;
440 
441  /* size of lookup hash table ... see comment in StrategyInitialize */
443 
444  /* size of the shared replacement strategy control block */
445  size = add_size(size, MAXALIGN(sizeof(BufferStrategyControl)));
446 
447  return size;
448 }
449 
450 /*
451  * StrategyInitialize -- initialize the buffer cache replacement
452  * strategy.
453  *
454  * Assumes: All of the buffers are already built into a linked list.
455  * Only called by postmaster and only during initialization.
456  */
457 void
459 {
460  bool found;
461 
462  /*
463  * Initialize the shared buffer lookup hashtable.
464  *
465  * Since we can't tolerate running out of lookup table entries, we must be
466  * sure to specify an adequate table size here. The maximum steady-state
467  * usage is of course NBuffers entries, but BufferAlloc() tries to insert
468  * a new entry before deleting the old. In principle this could be
469  * happening in each partition concurrently, so we could need as many as
470  * NBuffers + NUM_BUFFER_PARTITIONS entries.
471  */
473 
474  /*
475  * Get or create the shared strategy control block
476  */
477  StrategyControl = (BufferStrategyControl *)
478  ShmemInitStruct("Buffer Strategy Status",
479  sizeof(BufferStrategyControl),
480  &found);
481 
482  if (!found)
483  {
484  /*
485  * Only done once, usually in postmaster
486  */
487  Assert(init);
488 
489  SpinLockInit(&StrategyControl->buffer_strategy_lock);
490 
491  /*
492  * Grab the whole linked list of free buffers for our strategy. We
493  * assume it was previously set up by InitBufferPool().
494  */
495  StrategyControl->firstFreeBuffer = 0;
496  StrategyControl->lastFreeBuffer = NBuffers - 1;
497 
498  /* Initialize the clock sweep pointer */
499  pg_atomic_init_u32(&StrategyControl->nextVictimBuffer, 0);
500 
501  /* Clear statistics */
502  StrategyControl->completePasses = 0;
503  pg_atomic_init_u32(&StrategyControl->numBufferAllocs, 0);
504 
505  /* No pending notification */
506  StrategyControl->bgwprocno = -1;
507  }
508  else
509  Assert(!init);
510 }
511 
512 
513 /* ----------------------------------------------------------------
514  * Backend-private buffer ring management
515  * ----------------------------------------------------------------
516  */
517 
518 
519 /*
520  * GetAccessStrategy -- create a BufferAccessStrategy object
521  *
522  * The object is allocated in the current memory context.
523  */
526 {
527  BufferAccessStrategy strategy;
528  int ring_size;
529 
530  /*
531  * Select ring size to use. See buffer/README for rationales.
532  *
533  * Note: if you change the ring size for BAS_BULKREAD, see also
534  * SYNC_SCAN_REPORT_INTERVAL in access/heap/syncscan.c.
535  */
536  switch (btype)
537  {
538  case BAS_NORMAL:
539  /* if someone asks for NORMAL, just give 'em a "default" object */
540  return NULL;
541 
542  case BAS_BULKREAD:
543  ring_size = 256 * 1024 / BLCKSZ;
544  break;
545  case BAS_BULKWRITE:
546  ring_size = 16 * 1024 * 1024 / BLCKSZ;
547  break;
548  case BAS_VACUUM:
549  ring_size = 256 * 1024 / BLCKSZ;
550  break;
551 
552  default:
553  elog(ERROR, "unrecognized buffer access strategy: %d",
554  (int) btype);
555  return NULL; /* keep compiler quiet */
556  }
557 
558  /* Make sure ring isn't an undue fraction of shared buffers */
559  ring_size = Min(NBuffers / 8, ring_size);
560 
561  /* Allocate the object and initialize all elements to zeroes */
562  strategy = (BufferAccessStrategy)
564  ring_size * sizeof(Buffer));
565 
566  /* Set fields that don't start out zero */
567  strategy->btype = btype;
568  strategy->ring_size = ring_size;
569 
570  return strategy;
571 }
572 
573 /*
574  * FreeAccessStrategy -- release a BufferAccessStrategy object
575  *
576  * A simple pfree would do at the moment, but we would prefer that callers
577  * don't assume that much about the representation of BufferAccessStrategy.
578  */
579 void
581 {
582  /* don't crash if called on a "default" strategy */
583  if (strategy != NULL)
584  pfree(strategy);
585 }
586 
587 /*
588  * GetBufferFromRing -- returns a buffer from the ring, or NULL if the
589  * ring is empty.
590  *
591  * The bufhdr spin lock is held on the returned buffer.
592  */
593 static BufferDesc *
595 {
596  BufferDesc *buf;
597  Buffer bufnum;
598  uint32 local_buf_state; /* to avoid repeated (de-)referencing */
599 
600 
601  /* Advance to next ring slot */
602  if (++strategy->current >= strategy->ring_size)
603  strategy->current = 0;
604 
605  /*
606  * If the slot hasn't been filled yet, tell the caller to allocate a new
607  * buffer with the normal allocation strategy. He will then fill this
608  * slot by calling AddBufferToRing with the new buffer.
609  */
610  bufnum = strategy->buffers[strategy->current];
611  if (bufnum == InvalidBuffer)
612  {
613  strategy->current_was_in_ring = false;
614  return NULL;
615  }
616 
617  /*
618  * If the buffer is pinned we cannot use it under any circumstances.
619  *
620  * If usage_count is 0 or 1 then the buffer is fair game (we expect 1,
621  * since our own previous usage of the ring element would have left it
622  * there, but it might've been decremented by clock sweep since then). A
623  * higher usage_count indicates someone else has touched the buffer, so we
624  * shouldn't re-use it.
625  */
626  buf = GetBufferDescriptor(bufnum - 1);
627  local_buf_state = LockBufHdr(buf);
628  if (BUF_STATE_GET_REFCOUNT(local_buf_state) == 0
629  && BUF_STATE_GET_USAGECOUNT(local_buf_state) <= 1)
630  {
631  strategy->current_was_in_ring = true;
632  *buf_state = local_buf_state;
633  return buf;
634  }
635  UnlockBufHdr(buf, local_buf_state);
636 
637  /*
638  * Tell caller to allocate a new buffer with the normal allocation
639  * strategy. He'll then replace this ring element via AddBufferToRing.
640  */
641  strategy->current_was_in_ring = false;
642  return NULL;
643 }
644 
645 /*
646  * AddBufferToRing -- add a buffer to the buffer ring
647  *
648  * Caller must hold the buffer header spinlock on the buffer. Since this
649  * is called with the spinlock held, it had better be quite cheap.
650  */
651 static void
653 {
654  strategy->buffers[strategy->current] = BufferDescriptorGetBuffer(buf);
655 }
656 
657 /*
658  * StrategyRejectBuffer -- consider rejecting a dirty buffer
659  *
660  * When a nondefault strategy is used, the buffer manager calls this function
661  * when it turns out that the buffer selected by StrategyGetBuffer needs to
662  * be written out and doing so would require flushing WAL too. This gives us
663  * a chance to choose a different victim.
664  *
665  * Returns true if buffer manager should ask for a new victim, and false
666  * if this buffer should be written and re-used.
667  */
668 bool
670 {
671  /* We only do this in bulkread mode */
672  if (strategy->btype != BAS_BULKREAD)
673  return false;
674 
675  /* Don't muck with behavior of normal buffer-replacement strategy */
676  if (!strategy->current_was_in_ring ||
677  strategy->buffers[strategy->current] != BufferDescriptorGetBuffer(buf))
678  return false;
679 
680  /*
681  * Remove the dirty buffer from the ring; necessary to prevent infinite
682  * loop if all ring members are dirty.
683  */
684  strategy->buffers[strategy->current] = InvalidBuffer;
685 
686  return true;
687 }
BufferAccessStrategy GetAccessStrategy(BufferAccessStrategyType btype)
Definition: freelist.c:525
int slock_t
Definition: s_lock.h:888
BufferDesc * StrategyGetBuffer(BufferAccessStrategy strategy, uint32 *buf_state)
Definition: freelist.c:184
int StrategySyncStart(uint32 *complete_passes, uint32 *num_buf_alloc)
Definition: freelist.c:378
static uint32 ClockSweepTick(void)
Definition: freelist.c:113
static bool pg_atomic_compare_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval)
Definition: atomics.h:322
pg_atomic_uint32 nextVictimBuffer
Definition: freelist.c:39
#define SpinLockInit(lock)
Definition: spin.h:60
struct BufferAccessStrategyData * BufferAccessStrategy
Definition: buf.h:44
#define Min(x, y)
Definition: c.h:806
#define InvalidBuffer
Definition: buf.h:25
PROC_HDR * ProcGlobal
Definition: proc.c:80
void InitBufTable(int size)
Definition: buf_table.c:53
return result
Definition: formatting.c:1632
void StrategyFreeBuffer(BufferDesc *buf)
Definition: freelist.c:347
Latch procLatch
Definition: proc.h:103
void StrategyInitialize(bool init)
Definition: freelist.c:458
Buffer buffers[FLEXIBLE_ARRAY_MEMBER]
Definition: freelist.c:96
static uint32 pg_atomic_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 newval)
Definition: atomics.h:303
#define SpinLockAcquire(lock)
Definition: spin.h:62
void pfree(void *pointer)
Definition: mcxt.c:950
#define FREENEXT_NOT_IN_LIST
#define ERROR
Definition: elog.h:43
static BufferDesc * GetBufferFromRing(BufferAccessStrategy strategy, uint32 *buf_state)
Definition: freelist.c:594
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:372
static bool success
Definition: pg_basebackup.c:96
static char * buf
Definition: pg_test_fsync.c:66
BufferAccessStrategyType btype
Definition: freelist.c:74
#define BUF_USAGECOUNT_ONE
Definition: buf_internals.h:44
#define GetBufferDescriptor(id)
#define NUM_BUFFER_PARTITIONS
Definition: lwlock.h:113
unsigned int uint32
Definition: c.h:268
slock_t buffer_strategy_lock
Definition: freelist.c:32
#define INT_ACCESS_ONCE(var)
Definition: freelist.c:23
#define SpinLockRelease(lock)
Definition: spin.h:64
void * palloc0(Size size)
Definition: mcxt.c:878
Size add_size(Size s1, Size s2)
Definition: shmem.c:475
pg_atomic_uint32 numBufferAllocs
Definition: freelist.c:54
static void AddBufferToRing(BufferAccessStrategy strategy, BufferDesc *buf)
Definition: freelist.c:652
bool StrategyRejectBuffer(BufferAccessStrategy strategy, BufferDesc *buf)
Definition: freelist.c:669
static uint32 pg_atomic_fetch_add_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: atomics.h:339
void FreeAccessStrategy(BufferAccessStrategy strategy)
Definition: freelist.c:580
void SetLatch(volatile Latch *latch)
Definition: latch.c:415
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4092
#define NULL
Definition: c.h:229
#define Assert(condition)
Definition: c.h:675
struct BufferAccessStrategyData BufferAccessStrategyData
BufferAccessStrategyType
Definition: bufmgr.h:28
size_t Size
Definition: c.h:356
#define BufferDescriptorGetBuffer(bdesc)
#define MAXALIGN(LEN)
Definition: c.h:588
static void init(bool is_no_vacuum)
Definition: pgbench.c:2571
void StrategyNotifyBgWriter(int bgwprocno)
Definition: freelist.c:415
#define UnlockBufHdr(desc, s)
int NBuffers
Definition: globals.c:122
PGPROC * allProcs
Definition: proc.h:232
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:234
#define elog
Definition: elog.h:219
#define BUF_STATE_GET_USAGECOUNT(state)
Definition: buf_internals.h:50
Size BufTableShmemSize(int size)
Definition: buf_table.c:43
int Buffer
Definition: buf.h:23
#define offsetof(type, field)
Definition: c.h:555
static BufferStrategyControl * StrategyControl
Definition: freelist.c:64
#define BUF_STATE_GET_REFCOUNT(state)
Definition: buf_internals.h:49
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:252
Size StrategyShmemSize(void)
Definition: freelist.c:437