PostgreSQL Source Code  git master
bufmgr.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * bufmgr.c
4  * buffer manager interface routines
5  *
6  * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/storage/buffer/bufmgr.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 /*
16  * Principal entry points:
17  *
18  * ReadBuffer() -- find or create a buffer holding the requested page,
19  * and pin it so that no one can destroy it while this process
20  * is using it.
21  *
22  * ReleaseBuffer() -- unpin a buffer
23  *
24  * MarkBufferDirty() -- mark a pinned buffer's contents as "dirty".
25  * The disk write is delayed until buffer replacement or checkpoint.
26  *
27  * See also these files:
28  * freelist.c -- chooses victim for buffer replacement
29  * buf_table.c -- manages the buffer lookup table
30  */
31 #include "postgres.h"
32 
33 #include <sys/file.h>
34 #include <unistd.h>
35 
36 #include "access/tableam.h"
37 #include "access/xlog.h"
38 #include "catalog/catalog.h"
39 #include "catalog/storage.h"
40 #include "executor/instrument.h"
41 #include "lib/binaryheap.h"
42 #include "miscadmin.h"
43 #include "pg_trace.h"
44 #include "pgstat.h"
45 #include "postmaster/bgwriter.h"
46 #include "storage/buf_internals.h"
47 #include "storage/bufmgr.h"
48 #include "storage/ipc.h"
49 #include "storage/proc.h"
50 #include "storage/smgr.h"
51 #include "storage/standby.h"
52 #include "utils/memdebug.h"
53 #include "utils/ps_status.h"
54 #include "utils/rel.h"
55 #include "utils/resowner_private.h"
56 #include "utils/timestamp.h"
57 
58 
59 /* Note: these two macros only work on shared buffers, not local ones! */
60 #define BufHdrGetBlock(bufHdr) ((Block) (BufferBlocks + ((Size) (bufHdr)->buf_id) * BLCKSZ))
61 #define BufferGetLSN(bufHdr) (PageGetLSN(BufHdrGetBlock(bufHdr)))
62 
63 /* Note: this macro only works on local buffers, not shared ones! */
64 #define LocalBufHdrGetBlock(bufHdr) \
65  LocalBufferBlockPointers[-((bufHdr)->buf_id + 2)]
66 
67 /* Bits in SyncOneBuffer's return value */
68 #define BUF_WRITTEN 0x01
69 #define BUF_REUSABLE 0x02
70 
71 #define RELS_BSEARCH_THRESHOLD 20
72 
73 /*
74  * This is the size (in the number of blocks) above which we scan the
75  * entire buffer pool to remove the buffers for all the pages of relation
76  * being dropped. For the relations with size below this threshold, we find
77  * the buffers by doing lookups in BufMapping table.
78  */
79 #define BUF_DROP_FULL_SCAN_THRESHOLD (uint64) (NBuffers / 32)
80 
81 typedef struct PrivateRefCountEntry
82 {
86 
87 /* 64 bytes, about the size of a cache line on common systems */
88 #define REFCOUNT_ARRAY_ENTRIES 8
89 
90 /*
91  * Status of buffers to checkpoint for a particular tablespace, used
92  * internally in BufferSync.
93  */
94 typedef struct CkptTsStatus
95 {
96  /* oid of the tablespace */
98 
99  /*
100  * Checkpoint progress for this tablespace. To make progress comparable
101  * between tablespaces the progress is, for each tablespace, measured as a
102  * number between 0 and the total number of to-be-checkpointed pages. Each
103  * page checkpointed in this tablespace increments this space's progress
104  * by progress_slice.
105  */
108 
109  /* number of to-be checkpointed pages in this tablespace */
111  /* already processed pages in this tablespace */
113 
114  /* current offset in CkptBufferIds for this tablespace */
115  int index;
116 } CkptTsStatus;
117 
118 /*
119  * Type for array used to sort SMgrRelations
120  *
121  * FlushRelationsAllBuffers shares the same comparator function with
122  * DropRelFileNodesAllBuffers. Pointer to this struct and RelFileNode must be
123  * compatible.
124  */
125 typedef struct SMgrSortArray
126 {
127  RelFileNode rnode; /* This must be the first member */
129 } SMgrSortArray;
130 
131 /* GUC variables */
132 bool zero_damaged_pages = false;
135 bool track_io_timing = false;
136 
137 /*
138  * How many buffers PrefetchBuffer callers should try to stay ahead of their
139  * ReadBuffer calls by. Zero means "never prefetch". This value is only used
140  * for buffers not belonging to tablespaces that have their
141  * effective_io_concurrency parameter set.
142  */
144 
145 /*
146  * Like effective_io_concurrency, but used by maintenance code paths that might
147  * benefit from a higher setting because they work on behalf of many sessions.
148  * Overridden by the tablespace setting of the same name.
149  */
151 
152 /*
153  * GUC variables about triggering kernel writeback for buffers written; OS
154  * dependent defaults are set via the GUC mechanism.
155  */
159 
160 /* local state for StartBufferIO and related functions */
161 static BufferDesc *InProgressBuf = NULL;
162 static bool IsForInput;
163 
164 /* local state for LockBufferForCleanup */
166 
167 /*
168  * Backend-Private refcount management:
169  *
170  * Each buffer also has a private refcount that keeps track of the number of
171  * times the buffer is pinned in the current process. This is so that the
172  * shared refcount needs to be modified only once if a buffer is pinned more
173  * than once by an individual backend. It's also used to check that no buffers
174  * are still pinned at the end of transactions and when exiting.
175  *
176  *
177  * To avoid - as we used to - requiring an array with NBuffers entries to keep
178  * track of local buffers, we use a small sequentially searched array
179  * (PrivateRefCountArray) and an overflow hash table (PrivateRefCountHash) to
180  * keep track of backend local pins.
181  *
182  * Until no more than REFCOUNT_ARRAY_ENTRIES buffers are pinned at once, all
183  * refcounts are kept track of in the array; after that, new array entries
184  * displace old ones into the hash table. That way a frequently used entry
185  * can't get "stuck" in the hashtable while infrequent ones clog the array.
186  *
187  * Note that in most scenarios the number of pinned buffers will not exceed
188  * REFCOUNT_ARRAY_ENTRIES.
189  *
190  *
191  * To enter a buffer into the refcount tracking mechanism first reserve a free
192  * entry using ReservePrivateRefCountEntry() and then later, if necessary,
193  * fill it with NewPrivateRefCountEntry(). That split lets us avoid doing
194  * memory allocations in NewPrivateRefCountEntry() which can be important
195  * because in some scenarios it's called with a spinlock held...
196  */
198 static HTAB *PrivateRefCountHash = NULL;
202 
203 static void ReservePrivateRefCountEntry(void);
206 static inline int32 GetPrivateRefCount(Buffer buffer);
208 
209 /*
210  * Ensure that the PrivateRefCountArray has sufficient space to store one more
211  * entry. This has to be called before using NewPrivateRefCountEntry() to fill
212  * a new entry - but it's perfectly fine to not use a reserved entry.
213  */
214 static void
216 {
217  /* Already reserved (or freed), nothing to do */
218  if (ReservedRefCountEntry != NULL)
219  return;
220 
221  /*
222  * First search for a free entry the array, that'll be sufficient in the
223  * majority of cases.
224  */
225  {
226  int i;
227 
228  for (i = 0; i < REFCOUNT_ARRAY_ENTRIES; i++)
229  {
231 
232  res = &PrivateRefCountArray[i];
233 
234  if (res->buffer == InvalidBuffer)
235  {
236  ReservedRefCountEntry = res;
237  return;
238  }
239  }
240  }
241 
242  /*
243  * No luck. All array entries are full. Move one array entry into the hash
244  * table.
245  */
246  {
247  /*
248  * Move entry from the current clock position in the array into the
249  * hashtable. Use that slot.
250  */
251  PrivateRefCountEntry *hashent;
252  bool found;
253 
254  /* select victim slot */
255  ReservedRefCountEntry =
257 
258  /* Better be used, otherwise we shouldn't get here. */
259  Assert(ReservedRefCountEntry->buffer != InvalidBuffer);
260 
261  /* enter victim array entry into hashtable */
262  hashent = hash_search(PrivateRefCountHash,
263  (void *) &(ReservedRefCountEntry->buffer),
264  HASH_ENTER,
265  &found);
266  Assert(!found);
267  hashent->refcount = ReservedRefCountEntry->refcount;
268 
269  /* clear the now free array slot */
270  ReservedRefCountEntry->buffer = InvalidBuffer;
271  ReservedRefCountEntry->refcount = 0;
272 
274  }
275 }
276 
277 /*
278  * Fill a previously reserved refcount entry.
279  */
280 static PrivateRefCountEntry *
282 {
284 
285  /* only allowed to be called when a reservation has been made */
286  Assert(ReservedRefCountEntry != NULL);
287 
288  /* use up the reserved entry */
289  res = ReservedRefCountEntry;
290  ReservedRefCountEntry = NULL;
291 
292  /* and fill it */
293  res->buffer = buffer;
294  res->refcount = 0;
295 
296  return res;
297 }
298 
299 /*
300  * Return the PrivateRefCount entry for the passed buffer.
301  *
302  * Returns NULL if a buffer doesn't have a refcount entry. Otherwise, if
303  * do_move is true, and the entry resides in the hashtable the entry is
304  * optimized for frequent access by moving it to the array.
305  */
306 static PrivateRefCountEntry *
308 {
310  int i;
311 
312  Assert(BufferIsValid(buffer));
313  Assert(!BufferIsLocal(buffer));
314 
315  /*
316  * First search for references in the array, that'll be sufficient in the
317  * majority of cases.
318  */
319  for (i = 0; i < REFCOUNT_ARRAY_ENTRIES; i++)
320  {
321  res = &PrivateRefCountArray[i];
322 
323  if (res->buffer == buffer)
324  return res;
325  }
326 
327  /*
328  * By here we know that the buffer, if already pinned, isn't residing in
329  * the array.
330  *
331  * Only look up the buffer in the hashtable if we've previously overflowed
332  * into it.
333  */
334  if (PrivateRefCountOverflowed == 0)
335  return NULL;
336 
337  res = hash_search(PrivateRefCountHash,
338  (void *) &buffer,
339  HASH_FIND,
340  NULL);
341 
342  if (res == NULL)
343  return NULL;
344  else if (!do_move)
345  {
346  /* caller doesn't want us to move the hash entry into the array */
347  return res;
348  }
349  else
350  {
351  /* move buffer from hashtable into the free array slot */
352  bool found;
354 
355  /* Ensure there's a free array slot */
357 
358  /* Use up the reserved slot */
359  Assert(ReservedRefCountEntry != NULL);
360  free = ReservedRefCountEntry;
361  ReservedRefCountEntry = NULL;
362  Assert(free->buffer == InvalidBuffer);
363 
364  /* and fill it */
365  free->buffer = buffer;
366  free->refcount = res->refcount;
367 
368  /* delete from hashtable */
369  hash_search(PrivateRefCountHash,
370  (void *) &buffer,
371  HASH_REMOVE,
372  &found);
373  Assert(found);
376 
377  return free;
378  }
379 }
380 
381 /*
382  * Returns how many times the passed buffer is pinned by this backend.
383  *
384  * Only works for shared memory buffers!
385  */
386 static inline int32
388 {
390 
391  Assert(BufferIsValid(buffer));
392  Assert(!BufferIsLocal(buffer));
393 
394  /*
395  * Not moving the entry - that's ok for the current users, but we might
396  * want to change this one day.
397  */
398  ref = GetPrivateRefCountEntry(buffer, false);
399 
400  if (ref == NULL)
401  return 0;
402  return ref->refcount;
403 }
404 
405 /*
406  * Release resources used to track the reference count of a buffer which we no
407  * longer have pinned and don't want to pin again immediately.
408  */
409 static void
411 {
412  Assert(ref->refcount == 0);
413 
414  if (ref >= &PrivateRefCountArray[0] &&
416  {
417  ref->buffer = InvalidBuffer;
418 
419  /*
420  * Mark the just used entry as reserved - in many scenarios that
421  * allows us to avoid ever having to search the array/hash for free
422  * entries.
423  */
424  ReservedRefCountEntry = ref;
425  }
426  else
427  {
428  bool found;
429  Buffer buffer = ref->buffer;
430 
431  hash_search(PrivateRefCountHash,
432  (void *) &buffer,
433  HASH_REMOVE,
434  &found);
435  Assert(found);
438  }
439 }
440 
441 /*
442  * BufferIsPinned
443  * True iff the buffer is pinned (also checks for valid buffer number).
444  *
445  * NOTE: what we check here is that *this* backend holds a pin on
446  * the buffer. We do not care whether some other backend does.
447  */
448 #define BufferIsPinned(bufnum) \
449 ( \
450  !BufferIsValid(bufnum) ? \
451  false \
452  : \
453  BufferIsLocal(bufnum) ? \
454  (LocalRefCount[-(bufnum) - 1] > 0) \
455  : \
456  (GetPrivateRefCount(bufnum) > 0) \
457 )
458 
459 
460 static Buffer ReadBuffer_common(SMgrRelation reln, char relpersistence,
461  ForkNumber forkNum, BlockNumber blockNum,
463  bool *hit);
464 static bool PinBuffer(BufferDesc *buf, BufferAccessStrategy strategy);
465 static void PinBuffer_Locked(BufferDesc *buf);
466 static void UnpinBuffer(BufferDesc *buf, bool fixOwner);
467 static void BufferSync(int flags);
469 static int SyncOneBuffer(int buf_id, bool skip_recently_used,
470  WritebackContext *wb_context);
471 static void WaitIO(BufferDesc *buf);
472 static bool StartBufferIO(BufferDesc *buf, bool forInput);
473 static void TerminateBufferIO(BufferDesc *buf, bool clear_dirty,
474  uint32 set_flag_bits);
475 static void shared_buffer_write_error_callback(void *arg);
476 static void local_buffer_write_error_callback(void *arg);
477 static BufferDesc *BufferAlloc(SMgrRelation smgr,
478  char relpersistence,
479  ForkNumber forkNum,
480  BlockNumber blockNum,
481  BufferAccessStrategy strategy,
482  bool *foundPtr);
483 static void FlushBuffer(BufferDesc *buf, SMgrRelation reln);
485  ForkNumber forkNum,
486  BlockNumber nForkBlock,
487  BlockNumber firstDelBlock);
488 static void AtProcExit_Buffers(int code, Datum arg);
489 static void CheckForBufferLeaks(void);
490 static int rnode_comparator(const void *p1, const void *p2);
491 static inline int buffertag_comparator(const BufferTag *a, const BufferTag *b);
492 static inline int ckpt_buforder_comparator(const CkptSortItem *a, const CkptSortItem *b);
493 static int ts_ckpt_progress_comparator(Datum a, Datum b, void *arg);
494 
495 
496 /*
497  * Implementation of PrefetchBuffer() for shared buffers.
498  */
501  ForkNumber forkNum,
502  BlockNumber blockNum)
503 {
504  PrefetchBufferResult result = {InvalidBuffer, false};
505  BufferTag newTag; /* identity of requested block */
506  uint32 newHash; /* hash value for newTag */
507  LWLock *newPartitionLock; /* buffer partition lock for it */
508  int buf_id;
509 
510  Assert(BlockNumberIsValid(blockNum));
511 
512  /* create a tag so we can lookup the buffer */
513  INIT_BUFFERTAG(newTag, smgr_reln->smgr_rnode.node,
514  forkNum, blockNum);
515 
516  /* determine its hash code and partition lock ID */
517  newHash = BufTableHashCode(&newTag);
518  newPartitionLock = BufMappingPartitionLock(newHash);
519 
520  /* see if the block is in the buffer pool already */
521  LWLockAcquire(newPartitionLock, LW_SHARED);
522  buf_id = BufTableLookup(&newTag, newHash);
523  LWLockRelease(newPartitionLock);
524 
525  /* If not in buffers, initiate prefetch */
526  if (buf_id < 0)
527  {
528 #ifdef USE_PREFETCH
529  /*
530  * Try to initiate an asynchronous read. This returns false in
531  * recovery if the relation file doesn't exist.
532  */
533  if (smgrprefetch(smgr_reln, forkNum, blockNum))
534  result.initiated_io = true;
535 #endif /* USE_PREFETCH */
536  }
537  else
538  {
539  /*
540  * Report the buffer it was in at that time. The caller may be able
541  * to avoid a buffer table lookup, but it's not pinned and it must be
542  * rechecked!
543  */
544  result.recent_buffer = buf_id + 1;
545  }
546 
547  /*
548  * If the block *is* in buffers, we do nothing. This is not really ideal:
549  * the block might be just about to be evicted, which would be stupid
550  * since we know we are going to need it soon. But the only easy answer
551  * is to bump the usage_count, which does not seem like a great solution:
552  * when the caller does ultimately touch the block, usage_count would get
553  * bumped again, resulting in too much favoritism for blocks that are
554  * involved in a prefetch sequence. A real fix would involve some
555  * additional per-buffer state, and it's not clear that there's enough of
556  * a problem to justify that.
557  */
558 
559  return result;
560 }
561 
562 /*
563  * PrefetchBuffer -- initiate asynchronous read of a block of a relation
564  *
565  * This is named by analogy to ReadBuffer but doesn't actually allocate a
566  * buffer. Instead it tries to ensure that a future ReadBuffer for the given
567  * block will not be delayed by the I/O. Prefetching is optional.
568  *
569  * There are three possible outcomes:
570  *
571  * 1. If the block is already cached, the result includes a valid buffer that
572  * could be used by the caller to avoid the need for a later buffer lookup, but
573  * it's not pinned, so the caller must recheck it.
574  *
575  * 2. If the kernel has been asked to initiate I/O, the initiated_io member is
576  * true. Currently there is no way to know if the data was already cached by
577  * the kernel and therefore didn't really initiate I/O, and no way to know when
578  * the I/O completes other than using synchronous ReadBuffer().
579  *
580  * 3. Otherwise, the buffer wasn't already cached by PostgreSQL, and either
581  * USE_PREFETCH is not defined (this build doesn't support prefetching due to
582  * lack of a kernel facility), or the underlying relation file wasn't found and
583  * we are in recovery. (If the relation file wasn't found and we are not in
584  * recovery, an error is raised).
585  */
588 {
589  Assert(RelationIsValid(reln));
590  Assert(BlockNumberIsValid(blockNum));
591 
592  /* Open it at the smgr level if not already done */
593  RelationOpenSmgr(reln);
594 
595  if (RelationUsesLocalBuffers(reln))
596  {
597  /* see comments in ReadBufferExtended */
598  if (RELATION_IS_OTHER_TEMP(reln))
599  ereport(ERROR,
600  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
601  errmsg("cannot access temporary tables of other sessions")));
602 
603  /* pass it off to localbuf.c */
604  return PrefetchLocalBuffer(reln->rd_smgr, forkNum, blockNum);
605  }
606  else
607  {
608  /* pass it to the shared buffer version */
609  return PrefetchSharedBuffer(reln->rd_smgr, forkNum, blockNum);
610  }
611 }
612 
613 /*
614  * ReadRecentBuffer -- try to pin a block in a recently observed buffer
615  *
616  * Compared to ReadBuffer(), this avoids a buffer mapping lookup when it's
617  * successful. Return true if the buffer is valid and still has the expected
618  * tag. In that case, the buffer is pinned and the usage count is bumped.
619  */
620 bool
622  Buffer recent_buffer)
623 {
624  BufferDesc *bufHdr;
625  BufferTag tag;
626  uint32 buf_state;
627  bool have_private_ref;
628 
629  Assert(BufferIsValid(recent_buffer));
630 
633  INIT_BUFFERTAG(tag, rnode, forkNum, blockNum);
634 
635  if (BufferIsLocal(recent_buffer))
636  {
637  bufHdr = GetBufferDescriptor(-recent_buffer - 1);
638  buf_state = pg_atomic_read_u32(&bufHdr->state);
639 
640  /* Is it still valid and holding the right tag? */
641  if ((buf_state & BM_VALID) && BUFFERTAGS_EQUAL(tag, bufHdr->tag))
642  {
643  /* Bump local buffer's ref and usage counts. */
645  LocalRefCount[-recent_buffer - 1]++;
647  pg_atomic_write_u32(&bufHdr->state,
648  buf_state + BUF_USAGECOUNT_ONE);
649 
650  return true;
651  }
652  }
653  else
654  {
655  bufHdr = GetBufferDescriptor(recent_buffer - 1);
656  have_private_ref = GetPrivateRefCount(recent_buffer) > 0;
657 
658  /*
659  * Do we already have this buffer pinned with a private reference? If
660  * so, it must be valid and it is safe to check the tag without
661  * locking. If not, we have to lock the header first and then check.
662  */
663  if (have_private_ref)
664  buf_state = pg_atomic_read_u32(&bufHdr->state);
665  else
666  buf_state = LockBufHdr(bufHdr);
667 
668  if ((buf_state & BM_VALID) && BUFFERTAGS_EQUAL(tag, bufHdr->tag))
669  {
670  /*
671  * It's now safe to pin the buffer. We can't pin first and ask
672  * questions later, because because it might confuse code paths
673  * like InvalidateBuffer() if we pinned a random non-matching
674  * buffer.
675  */
676  if (have_private_ref)
677  PinBuffer(bufHdr, NULL); /* bump pin count */
678  else
679  PinBuffer_Locked(bufHdr); /* pin for first time */
680 
681  return true;
682  }
683 
684  /* If we locked the header above, now unlock. */
685  if (!have_private_ref)
686  UnlockBufHdr(bufHdr, buf_state);
687  }
688 
689  return false;
690 }
691 
692 /*
693  * ReadBuffer -- a shorthand for ReadBufferExtended, for reading from main
694  * fork with RBM_NORMAL mode and default strategy.
695  */
696 Buffer
698 {
699  return ReadBufferExtended(reln, MAIN_FORKNUM, blockNum, RBM_NORMAL, NULL);
700 }
701 
702 /*
703  * ReadBufferExtended -- returns a buffer containing the requested
704  * block of the requested relation. If the blknum
705  * requested is P_NEW, extend the relation file and
706  * allocate a new block. (Caller is responsible for
707  * ensuring that only one backend tries to extend a
708  * relation at the same time!)
709  *
710  * Returns: the buffer number for the buffer containing
711  * the block read. The returned buffer has been pinned.
712  * Does not return on error --- elog's instead.
713  *
714  * Assume when this function is called, that reln has been opened already.
715  *
716  * In RBM_NORMAL mode, the page is read from disk, and the page header is
717  * validated. An error is thrown if the page header is not valid. (But
718  * note that an all-zero page is considered "valid"; see
719  * PageIsVerifiedExtended().)
720  *
721  * RBM_ZERO_ON_ERROR is like the normal mode, but if the page header is not
722  * valid, the page is zeroed instead of throwing an error. This is intended
723  * for non-critical data, where the caller is prepared to repair errors.
724  *
725  * In RBM_ZERO_AND_LOCK mode, if the page isn't in buffer cache already, it's
726  * filled with zeros instead of reading it from disk. Useful when the caller
727  * is going to fill the page from scratch, since this saves I/O and avoids
728  * unnecessary failure if the page-on-disk has corrupt page headers.
729  * The page is returned locked to ensure that the caller has a chance to
730  * initialize the page before it's made visible to others.
731  * Caution: do not use this mode to read a page that is beyond the relation's
732  * current physical EOF; that is likely to cause problems in md.c when
733  * the page is modified and written out. P_NEW is OK, though.
734  *
735  * RBM_ZERO_AND_CLEANUP_LOCK is the same as RBM_ZERO_AND_LOCK, but acquires
736  * a cleanup-strength lock on the page.
737  *
738  * RBM_NORMAL_NO_LOG mode is treated the same as RBM_NORMAL here.
739  *
740  * If strategy is not NULL, a nondefault buffer access strategy is used.
741  * See buffer/README for details.
742  */
743 Buffer
746 {
747  bool hit;
748  Buffer buf;
749 
750  /* Open it at the smgr level if not already done */
751  RelationOpenSmgr(reln);
752 
753  /*
754  * Reject attempts to read non-local temporary relations; we would be
755  * likely to get wrong data since we have no visibility into the owning
756  * session's local buffers.
757  */
758  if (RELATION_IS_OTHER_TEMP(reln))
759  ereport(ERROR,
760  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
761  errmsg("cannot access temporary tables of other sessions")));
762 
763  /*
764  * Read the buffer, and update pgstat counters to reflect a cache hit or
765  * miss.
766  */
768  buf = ReadBuffer_common(reln->rd_smgr, reln->rd_rel->relpersistence,
769  forkNum, blockNum, mode, strategy, &hit);
770  if (hit)
772  return buf;
773 }
774 
775 
776 /*
777  * ReadBufferWithoutRelcache -- like ReadBufferExtended, but doesn't require
778  * a relcache entry for the relation.
779  *
780  * NB: At present, this function may only be used on permanent relations, which
781  * is OK, because we only use it during XLOG replay. If in the future we
782  * want to use it on temporary or unlogged relations, we could pass additional
783  * parameters.
784  */
785 Buffer
787  BlockNumber blockNum, ReadBufferMode mode,
788  BufferAccessStrategy strategy)
789 {
790  bool hit;
791 
792  SMgrRelation smgr = smgropen(rnode, InvalidBackendId);
793 
795 
796  return ReadBuffer_common(smgr, RELPERSISTENCE_PERMANENT, forkNum, blockNum,
797  mode, strategy, &hit);
798 }
799 
800 
801 /*
802  * ReadBuffer_common -- common logic for all ReadBuffer variants
803  *
804  * *hit is set to true if the request was satisfied from shared buffer cache.
805  */
806 static Buffer
807 ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
808  BlockNumber blockNum, ReadBufferMode mode,
809  BufferAccessStrategy strategy, bool *hit)
810 {
811  BufferDesc *bufHdr;
812  Block bufBlock;
813  bool found;
814  bool isExtend;
815  bool isLocalBuf = SmgrIsTemp(smgr);
816 
817  *hit = false;
818 
819  /* Make sure we will have room to remember the buffer pin */
821 
822  isExtend = (blockNum == P_NEW);
823 
824  TRACE_POSTGRESQL_BUFFER_READ_START(forkNum, blockNum,
825  smgr->smgr_rnode.node.spcNode,
826  smgr->smgr_rnode.node.dbNode,
827  smgr->smgr_rnode.node.relNode,
828  smgr->smgr_rnode.backend,
829  isExtend);
830 
831  /* Substitute proper block number if caller asked for P_NEW */
832  if (isExtend)
833  blockNum = smgrnblocks(smgr, forkNum);
834 
835  if (isLocalBuf)
836  {
837  bufHdr = LocalBufferAlloc(smgr, forkNum, blockNum, &found);
838  if (found)
840  else if (isExtend)
842  else if (mode == RBM_NORMAL || mode == RBM_NORMAL_NO_LOG ||
843  mode == RBM_ZERO_ON_ERROR)
845  }
846  else
847  {
848  /*
849  * lookup the buffer. IO_IN_PROGRESS is set if the requested block is
850  * not currently in memory.
851  */
852  bufHdr = BufferAlloc(smgr, relpersistence, forkNum, blockNum,
853  strategy, &found);
854  if (found)
856  else if (isExtend)
858  else if (mode == RBM_NORMAL || mode == RBM_NORMAL_NO_LOG ||
859  mode == RBM_ZERO_ON_ERROR)
861  }
862 
863  /* At this point we do NOT hold any locks. */
864 
865  /* if it was already in the buffer pool, we're done */
866  if (found)
867  {
868  if (!isExtend)
869  {
870  /* Just need to update stats before we exit */
871  *hit = true;
872  VacuumPageHit++;
873 
874  if (VacuumCostActive)
876 
877  TRACE_POSTGRESQL_BUFFER_READ_DONE(forkNum, blockNum,
878  smgr->smgr_rnode.node.spcNode,
879  smgr->smgr_rnode.node.dbNode,
880  smgr->smgr_rnode.node.relNode,
881  smgr->smgr_rnode.backend,
882  isExtend,
883  found);
884 
885  /*
886  * In RBM_ZERO_AND_LOCK mode the caller expects the page to be
887  * locked on return.
888  */
889  if (!isLocalBuf)
890  {
891  if (mode == RBM_ZERO_AND_LOCK)
893  LW_EXCLUSIVE);
894  else if (mode == RBM_ZERO_AND_CLEANUP_LOCK)
896  }
897 
898  return BufferDescriptorGetBuffer(bufHdr);
899  }
900 
901  /*
902  * We get here only in the corner case where we are trying to extend
903  * the relation but we found a pre-existing buffer marked BM_VALID.
904  * This can happen because mdread doesn't complain about reads beyond
905  * EOF (when zero_damaged_pages is ON) and so a previous attempt to
906  * read a block beyond EOF could have left a "valid" zero-filled
907  * buffer. Unfortunately, we have also seen this case occurring
908  * because of buggy Linux kernels that sometimes return an
909  * lseek(SEEK_END) result that doesn't account for a recent write. In
910  * that situation, the pre-existing buffer would contain valid data
911  * that we don't want to overwrite. Since the legitimate case should
912  * always have left a zero-filled buffer, complain if not PageIsNew.
913  */
914  bufBlock = isLocalBuf ? LocalBufHdrGetBlock(bufHdr) : BufHdrGetBlock(bufHdr);
915  if (!PageIsNew((Page) bufBlock))
916  ereport(ERROR,
917  (errmsg("unexpected data beyond EOF in block %u of relation %s",
918  blockNum, relpath(smgr->smgr_rnode, forkNum)),
919  errhint("This has been seen to occur with buggy kernels; consider updating your system.")));
920 
921  /*
922  * We *must* do smgrextend before succeeding, else the page will not
923  * be reserved by the kernel, and the next P_NEW call will decide to
924  * return the same page. Clear the BM_VALID bit, do the StartBufferIO
925  * call that BufferAlloc didn't, and proceed.
926  */
927  if (isLocalBuf)
928  {
929  /* Only need to adjust flags */
930  uint32 buf_state = pg_atomic_read_u32(&bufHdr->state);
931 
932  Assert(buf_state & BM_VALID);
933  buf_state &= ~BM_VALID;
934  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
935  }
936  else
937  {
938  /*
939  * Loop to handle the very small possibility that someone re-sets
940  * BM_VALID between our clearing it and StartBufferIO inspecting
941  * it.
942  */
943  do
944  {
945  uint32 buf_state = LockBufHdr(bufHdr);
946 
947  Assert(buf_state & BM_VALID);
948  buf_state &= ~BM_VALID;
949  UnlockBufHdr(bufHdr, buf_state);
950  } while (!StartBufferIO(bufHdr, true));
951  }
952  }
953 
954  /*
955  * if we have gotten to this point, we have allocated a buffer for the
956  * page but its contents are not yet valid. IO_IN_PROGRESS is set for it,
957  * if it's a shared buffer.
958  *
959  * Note: if smgrextend fails, we will end up with a buffer that is
960  * allocated but not marked BM_VALID. P_NEW will still select the same
961  * block number (because the relation didn't get any longer on disk) and
962  * so future attempts to extend the relation will find the same buffer (if
963  * it's not been recycled) but come right back here to try smgrextend
964  * again.
965  */
966  Assert(!(pg_atomic_read_u32(&bufHdr->state) & BM_VALID)); /* spinlock not needed */
967 
968  bufBlock = isLocalBuf ? LocalBufHdrGetBlock(bufHdr) : BufHdrGetBlock(bufHdr);
969 
970  if (isExtend)
971  {
972  /* new buffers are zero-filled */
973  MemSet((char *) bufBlock, 0, BLCKSZ);
974  /* don't set checksum for all-zero page */
975  smgrextend(smgr, forkNum, blockNum, (char *) bufBlock, false);
976 
977  /*
978  * NB: we're *not* doing a ScheduleBufferTagForWriteback here;
979  * although we're essentially performing a write. At least on linux
980  * doing so defeats the 'delayed allocation' mechanism, leading to
981  * increased file fragmentation.
982  */
983  }
984  else
985  {
986  /*
987  * Read in the page, unless the caller intends to overwrite it and
988  * just wants us to allocate a buffer.
989  */
990  if (mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK)
991  MemSet((char *) bufBlock, 0, BLCKSZ);
992  else
993  {
994  instr_time io_start,
995  io_time;
996 
997  if (track_io_timing)
998  INSTR_TIME_SET_CURRENT(io_start);
999 
1000  smgrread(smgr, forkNum, blockNum, (char *) bufBlock);
1001 
1002  if (track_io_timing)
1003  {
1004  INSTR_TIME_SET_CURRENT(io_time);
1005  INSTR_TIME_SUBTRACT(io_time, io_start);
1008  }
1009 
1010  /* check for garbage data */
1011  if (!PageIsVerifiedExtended((Page) bufBlock, blockNum,
1013  {
1014  if (mode == RBM_ZERO_ON_ERROR || zero_damaged_pages)
1015  {
1016  ereport(WARNING,
1018  errmsg("invalid page in block %u of relation %s; zeroing out page",
1019  blockNum,
1020  relpath(smgr->smgr_rnode, forkNum))));
1021  MemSet((char *) bufBlock, 0, BLCKSZ);
1022  }
1023  else
1024  ereport(ERROR,
1026  errmsg("invalid page in block %u of relation %s",
1027  blockNum,
1028  relpath(smgr->smgr_rnode, forkNum))));
1029  }
1030  }
1031  }
1032 
1033  /*
1034  * In RBM_ZERO_AND_LOCK mode, grab the buffer content lock before marking
1035  * the page as valid, to make sure that no other backend sees the zeroed
1036  * page before the caller has had a chance to initialize it.
1037  *
1038  * Since no-one else can be looking at the page contents yet, there is no
1039  * difference between an exclusive lock and a cleanup-strength lock. (Note
1040  * that we cannot use LockBuffer() or LockBufferForCleanup() here, because
1041  * they assert that the buffer is already valid.)
1042  */
1043  if ((mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK) &&
1044  !isLocalBuf)
1045  {
1047  }
1048 
1049  if (isLocalBuf)
1050  {
1051  /* Only need to adjust flags */
1052  uint32 buf_state = pg_atomic_read_u32(&bufHdr->state);
1053 
1054  buf_state |= BM_VALID;
1055  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
1056  }
1057  else
1058  {
1059  /* Set BM_VALID, terminate IO, and wake up any waiters */
1060  TerminateBufferIO(bufHdr, false, BM_VALID);
1061  }
1062 
1063  VacuumPageMiss++;
1064  if (VacuumCostActive)
1066 
1067  TRACE_POSTGRESQL_BUFFER_READ_DONE(forkNum, blockNum,
1068  smgr->smgr_rnode.node.spcNode,
1069  smgr->smgr_rnode.node.dbNode,
1070  smgr->smgr_rnode.node.relNode,
1071  smgr->smgr_rnode.backend,
1072  isExtend,
1073  found);
1074 
1075  return BufferDescriptorGetBuffer(bufHdr);
1076 }
1077 
1078 /*
1079  * BufferAlloc -- subroutine for ReadBuffer. Handles lookup of a shared
1080  * buffer. If no buffer exists already, selects a replacement
1081  * victim and evicts the old page, but does NOT read in new page.
1082  *
1083  * "strategy" can be a buffer replacement strategy object, or NULL for
1084  * the default strategy. The selected buffer's usage_count is advanced when
1085  * using the default strategy, but otherwise possibly not (see PinBuffer).
1086  *
1087  * The returned buffer is pinned and is already marked as holding the
1088  * desired page. If it already did have the desired page, *foundPtr is
1089  * set true. Otherwise, *foundPtr is set false and the buffer is marked
1090  * as IO_IN_PROGRESS; ReadBuffer will now need to do I/O to fill it.
1091  *
1092  * *foundPtr is actually redundant with the buffer's BM_VALID flag, but
1093  * we keep it for simplicity in ReadBuffer.
1094  *
1095  * No locks are held either at entry or exit.
1096  */
1097 static BufferDesc *
1098 BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
1099  BlockNumber blockNum,
1100  BufferAccessStrategy strategy,
1101  bool *foundPtr)
1102 {
1103  BufferTag newTag; /* identity of requested block */
1104  uint32 newHash; /* hash value for newTag */
1105  LWLock *newPartitionLock; /* buffer partition lock for it */
1106  BufferTag oldTag; /* previous identity of selected buffer */
1107  uint32 oldHash; /* hash value for oldTag */
1108  LWLock *oldPartitionLock; /* buffer partition lock for it */
1109  uint32 oldFlags;
1110  int buf_id;
1111  BufferDesc *buf;
1112  bool valid;
1113  uint32 buf_state;
1114 
1115  /* create a tag so we can lookup the buffer */
1116  INIT_BUFFERTAG(newTag, smgr->smgr_rnode.node, forkNum, blockNum);
1117 
1118  /* determine its hash code and partition lock ID */
1119  newHash = BufTableHashCode(&newTag);
1120  newPartitionLock = BufMappingPartitionLock(newHash);
1121 
1122  /* see if the block is in the buffer pool already */
1123  LWLockAcquire(newPartitionLock, LW_SHARED);
1124  buf_id = BufTableLookup(&newTag, newHash);
1125  if (buf_id >= 0)
1126  {
1127  /*
1128  * Found it. Now, pin the buffer so no one can steal it from the
1129  * buffer pool, and check to see if the correct data has been loaded
1130  * into the buffer.
1131  */
1132  buf = GetBufferDescriptor(buf_id);
1133 
1134  valid = PinBuffer(buf, strategy);
1135 
1136  /* Can release the mapping lock as soon as we've pinned it */
1137  LWLockRelease(newPartitionLock);
1138 
1139  *foundPtr = true;
1140 
1141  if (!valid)
1142  {
1143  /*
1144  * We can only get here if (a) someone else is still reading in
1145  * the page, or (b) a previous read attempt failed. We have to
1146  * wait for any active read attempt to finish, and then set up our
1147  * own read attempt if the page is still not BM_VALID.
1148  * StartBufferIO does it all.
1149  */
1150  if (StartBufferIO(buf, true))
1151  {
1152  /*
1153  * If we get here, previous attempts to read the buffer must
1154  * have failed ... but we shall bravely try again.
1155  */
1156  *foundPtr = false;
1157  }
1158  }
1159 
1160  return buf;
1161  }
1162 
1163  /*
1164  * Didn't find it in the buffer pool. We'll have to initialize a new
1165  * buffer. Remember to unlock the mapping lock while doing the work.
1166  */
1167  LWLockRelease(newPartitionLock);
1168 
1169  /* Loop here in case we have to try another victim buffer */
1170  for (;;)
1171  {
1172  /*
1173  * Ensure, while the spinlock's not yet held, that there's a free
1174  * refcount entry.
1175  */
1177 
1178  /*
1179  * Select a victim buffer. The buffer is returned with its header
1180  * spinlock still held!
1181  */
1182  buf = StrategyGetBuffer(strategy, &buf_state);
1183 
1184  Assert(BUF_STATE_GET_REFCOUNT(buf_state) == 0);
1185 
1186  /* Must copy buffer flags while we still hold the spinlock */
1187  oldFlags = buf_state & BUF_FLAG_MASK;
1188 
1189  /* Pin the buffer and then release the buffer spinlock */
1190  PinBuffer_Locked(buf);
1191 
1192  /*
1193  * If the buffer was dirty, try to write it out. There is a race
1194  * condition here, in that someone might dirty it after we released it
1195  * above, or even while we are writing it out (since our share-lock
1196  * won't prevent hint-bit updates). We will recheck the dirty bit
1197  * after re-locking the buffer header.
1198  */
1199  if (oldFlags & BM_DIRTY)
1200  {
1201  /*
1202  * We need a share-lock on the buffer contents to write it out
1203  * (else we might write invalid data, eg because someone else is
1204  * compacting the page contents while we write). We must use a
1205  * conditional lock acquisition here to avoid deadlock. Even
1206  * though the buffer was not pinned (and therefore surely not
1207  * locked) when StrategyGetBuffer returned it, someone else could
1208  * have pinned and exclusive-locked it by the time we get here. If
1209  * we try to get the lock unconditionally, we'd block waiting for
1210  * them; if they later block waiting for us, deadlock ensues.
1211  * (This has been observed to happen when two backends are both
1212  * trying to split btree index pages, and the second one just
1213  * happens to be trying to split the page the first one got from
1214  * StrategyGetBuffer.)
1215  */
1217  LW_SHARED))
1218  {
1219  /*
1220  * If using a nondefault strategy, and writing the buffer
1221  * would require a WAL flush, let the strategy decide whether
1222  * to go ahead and write/reuse the buffer or to choose another
1223  * victim. We need lock to inspect the page LSN, so this
1224  * can't be done inside StrategyGetBuffer.
1225  */
1226  if (strategy != NULL)
1227  {
1228  XLogRecPtr lsn;
1229 
1230  /* Read the LSN while holding buffer header lock */
1231  buf_state = LockBufHdr(buf);
1232  lsn = BufferGetLSN(buf);
1233  UnlockBufHdr(buf, buf_state);
1234 
1235  if (XLogNeedsFlush(lsn) &&
1236  StrategyRejectBuffer(strategy, buf))
1237  {
1238  /* Drop lock/pin and loop around for another buffer */
1240  UnpinBuffer(buf, true);
1241  continue;
1242  }
1243  }
1244 
1245  /* OK, do the I/O */
1246  TRACE_POSTGRESQL_BUFFER_WRITE_DIRTY_START(forkNum, blockNum,
1247  smgr->smgr_rnode.node.spcNode,
1248  smgr->smgr_rnode.node.dbNode,
1249  smgr->smgr_rnode.node.relNode);
1250 
1251  FlushBuffer(buf, NULL);
1253 
1255  &buf->tag);
1256 
1257  TRACE_POSTGRESQL_BUFFER_WRITE_DIRTY_DONE(forkNum, blockNum,
1258  smgr->smgr_rnode.node.spcNode,
1259  smgr->smgr_rnode.node.dbNode,
1260  smgr->smgr_rnode.node.relNode);
1261  }
1262  else
1263  {
1264  /*
1265  * Someone else has locked the buffer, so give it up and loop
1266  * back to get another one.
1267  */
1268  UnpinBuffer(buf, true);
1269  continue;
1270  }
1271  }
1272 
1273  /*
1274  * To change the association of a valid buffer, we'll need to have
1275  * exclusive lock on both the old and new mapping partitions.
1276  */
1277  if (oldFlags & BM_TAG_VALID)
1278  {
1279  /*
1280  * Need to compute the old tag's hashcode and partition lock ID.
1281  * XXX is it worth storing the hashcode in BufferDesc so we need
1282  * not recompute it here? Probably not.
1283  */
1284  oldTag = buf->tag;
1285  oldHash = BufTableHashCode(&oldTag);
1286  oldPartitionLock = BufMappingPartitionLock(oldHash);
1287 
1288  /*
1289  * Must lock the lower-numbered partition first to avoid
1290  * deadlocks.
1291  */
1292  if (oldPartitionLock < newPartitionLock)
1293  {
1294  LWLockAcquire(oldPartitionLock, LW_EXCLUSIVE);
1295  LWLockAcquire(newPartitionLock, LW_EXCLUSIVE);
1296  }
1297  else if (oldPartitionLock > newPartitionLock)
1298  {
1299  LWLockAcquire(newPartitionLock, LW_EXCLUSIVE);
1300  LWLockAcquire(oldPartitionLock, LW_EXCLUSIVE);
1301  }
1302  else
1303  {
1304  /* only one partition, only one lock */
1305  LWLockAcquire(newPartitionLock, LW_EXCLUSIVE);
1306  }
1307  }
1308  else
1309  {
1310  /* if it wasn't valid, we need only the new partition */
1311  LWLockAcquire(newPartitionLock, LW_EXCLUSIVE);
1312  /* remember we have no old-partition lock or tag */
1313  oldPartitionLock = NULL;
1314  /* keep the compiler quiet about uninitialized variables */
1315  oldHash = 0;
1316  }
1317 
1318  /*
1319  * Try to make a hashtable entry for the buffer under its new tag.
1320  * This could fail because while we were writing someone else
1321  * allocated another buffer for the same block we want to read in.
1322  * Note that we have not yet removed the hashtable entry for the old
1323  * tag.
1324  */
1325  buf_id = BufTableInsert(&newTag, newHash, buf->buf_id);
1326 
1327  if (buf_id >= 0)
1328  {
1329  /*
1330  * Got a collision. Someone has already done what we were about to
1331  * do. We'll just handle this as if it were found in the buffer
1332  * pool in the first place. First, give up the buffer we were
1333  * planning to use.
1334  */
1335  UnpinBuffer(buf, true);
1336 
1337  /* Can give up that buffer's mapping partition lock now */
1338  if (oldPartitionLock != NULL &&
1339  oldPartitionLock != newPartitionLock)
1340  LWLockRelease(oldPartitionLock);
1341 
1342  /* remaining code should match code at top of routine */
1343 
1344  buf = GetBufferDescriptor(buf_id);
1345 
1346  valid = PinBuffer(buf, strategy);
1347 
1348  /* Can release the mapping lock as soon as we've pinned it */
1349  LWLockRelease(newPartitionLock);
1350 
1351  *foundPtr = true;
1352 
1353  if (!valid)
1354  {
1355  /*
1356  * We can only get here if (a) someone else is still reading
1357  * in the page, or (b) a previous read attempt failed. We
1358  * have to wait for any active read attempt to finish, and
1359  * then set up our own read attempt if the page is still not
1360  * BM_VALID. StartBufferIO does it all.
1361  */
1362  if (StartBufferIO(buf, true))
1363  {
1364  /*
1365  * If we get here, previous attempts to read the buffer
1366  * must have failed ... but we shall bravely try again.
1367  */
1368  *foundPtr = false;
1369  }
1370  }
1371 
1372  return buf;
1373  }
1374 
1375  /*
1376  * Need to lock the buffer header too in order to change its tag.
1377  */
1378  buf_state = LockBufHdr(buf);
1379 
1380  /*
1381  * Somebody could have pinned or re-dirtied the buffer while we were
1382  * doing the I/O and making the new hashtable entry. If so, we can't
1383  * recycle this buffer; we must undo everything we've done and start
1384  * over with a new victim buffer.
1385  */
1386  oldFlags = buf_state & BUF_FLAG_MASK;
1387  if (BUF_STATE_GET_REFCOUNT(buf_state) == 1 && !(oldFlags & BM_DIRTY))
1388  break;
1389 
1390  UnlockBufHdr(buf, buf_state);
1391  BufTableDelete(&newTag, newHash);
1392  if (oldPartitionLock != NULL &&
1393  oldPartitionLock != newPartitionLock)
1394  LWLockRelease(oldPartitionLock);
1395  LWLockRelease(newPartitionLock);
1396  UnpinBuffer(buf, true);
1397  }
1398 
1399  /*
1400  * Okay, it's finally safe to rename the buffer.
1401  *
1402  * Clearing BM_VALID here is necessary, clearing the dirtybits is just
1403  * paranoia. We also reset the usage_count since any recency of use of
1404  * the old content is no longer relevant. (The usage_count starts out at
1405  * 1 so that the buffer can survive one clock-sweep pass.)
1406  *
1407  * Make sure BM_PERMANENT is set for buffers that must be written at every
1408  * checkpoint. Unlogged buffers only need to be written at shutdown
1409  * checkpoints, except for their "init" forks, which need to be treated
1410  * just like permanent relations.
1411  */
1412  buf->tag = newTag;
1413  buf_state &= ~(BM_VALID | BM_DIRTY | BM_JUST_DIRTIED |
1416  if (relpersistence == RELPERSISTENCE_PERMANENT || forkNum == INIT_FORKNUM)
1417  buf_state |= BM_TAG_VALID | BM_PERMANENT | BUF_USAGECOUNT_ONE;
1418  else
1419  buf_state |= BM_TAG_VALID | BUF_USAGECOUNT_ONE;
1420 
1421  UnlockBufHdr(buf, buf_state);
1422 
1423  if (oldPartitionLock != NULL)
1424  {
1425  BufTableDelete(&oldTag, oldHash);
1426  if (oldPartitionLock != newPartitionLock)
1427  LWLockRelease(oldPartitionLock);
1428  }
1429 
1430  LWLockRelease(newPartitionLock);
1431 
1432  /*
1433  * Buffer contents are currently invalid. Try to obtain the right to
1434  * start I/O. If StartBufferIO returns false, then someone else managed
1435  * to read it before we did, so there's nothing left for BufferAlloc() to
1436  * do.
1437  */
1438  if (StartBufferIO(buf, true))
1439  *foundPtr = false;
1440  else
1441  *foundPtr = true;
1442 
1443  return buf;
1444 }
1445 
1446 /*
1447  * InvalidateBuffer -- mark a shared buffer invalid and return it to the
1448  * freelist.
1449  *
1450  * The buffer header spinlock must be held at entry. We drop it before
1451  * returning. (This is sane because the caller must have locked the
1452  * buffer in order to be sure it should be dropped.)
1453  *
1454  * This is used only in contexts such as dropping a relation. We assume
1455  * that no other backend could possibly be interested in using the page,
1456  * so the only reason the buffer might be pinned is if someone else is
1457  * trying to write it out. We have to let them finish before we can
1458  * reclaim the buffer.
1459  *
1460  * The buffer could get reclaimed by someone else while we are waiting
1461  * to acquire the necessary locks; if so, don't mess it up.
1462  */
1463 static void
1465 {
1466  BufferTag oldTag;
1467  uint32 oldHash; /* hash value for oldTag */
1468  LWLock *oldPartitionLock; /* buffer partition lock for it */
1469  uint32 oldFlags;
1470  uint32 buf_state;
1471 
1472  /* Save the original buffer tag before dropping the spinlock */
1473  oldTag = buf->tag;
1474 
1475  buf_state = pg_atomic_read_u32(&buf->state);
1476  Assert(buf_state & BM_LOCKED);
1477  UnlockBufHdr(buf, buf_state);
1478 
1479  /*
1480  * Need to compute the old tag's hashcode and partition lock ID. XXX is it
1481  * worth storing the hashcode in BufferDesc so we need not recompute it
1482  * here? Probably not.
1483  */
1484  oldHash = BufTableHashCode(&oldTag);
1485  oldPartitionLock = BufMappingPartitionLock(oldHash);
1486 
1487 retry:
1488 
1489  /*
1490  * Acquire exclusive mapping lock in preparation for changing the buffer's
1491  * association.
1492  */
1493  LWLockAcquire(oldPartitionLock, LW_EXCLUSIVE);
1494 
1495  /* Re-lock the buffer header */
1496  buf_state = LockBufHdr(buf);
1497 
1498  /* If it's changed while we were waiting for lock, do nothing */
1499  if (!BUFFERTAGS_EQUAL(buf->tag, oldTag))
1500  {
1501  UnlockBufHdr(buf, buf_state);
1502  LWLockRelease(oldPartitionLock);
1503  return;
1504  }
1505 
1506  /*
1507  * We assume the only reason for it to be pinned is that someone else is
1508  * flushing the page out. Wait for them to finish. (This could be an
1509  * infinite loop if the refcount is messed up... it would be nice to time
1510  * out after awhile, but there seems no way to be sure how many loops may
1511  * be needed. Note that if the other guy has pinned the buffer but not
1512  * yet done StartBufferIO, WaitIO will fall through and we'll effectively
1513  * be busy-looping here.)
1514  */
1515  if (BUF_STATE_GET_REFCOUNT(buf_state) != 0)
1516  {
1517  UnlockBufHdr(buf, buf_state);
1518  LWLockRelease(oldPartitionLock);
1519  /* safety check: should definitely not be our *own* pin */
1521  elog(ERROR, "buffer is pinned in InvalidateBuffer");
1522  WaitIO(buf);
1523  goto retry;
1524  }
1525 
1526  /*
1527  * Clear out the buffer's tag and flags. We must do this to ensure that
1528  * linear scans of the buffer array don't think the buffer is valid.
1529  */
1530  oldFlags = buf_state & BUF_FLAG_MASK;
1531  CLEAR_BUFFERTAG(buf->tag);
1532  buf_state &= ~(BUF_FLAG_MASK | BUF_USAGECOUNT_MASK);
1533  UnlockBufHdr(buf, buf_state);
1534 
1535  /*
1536  * Remove the buffer from the lookup hashtable, if it was in there.
1537  */
1538  if (oldFlags & BM_TAG_VALID)
1539  BufTableDelete(&oldTag, oldHash);
1540 
1541  /*
1542  * Done with mapping lock.
1543  */
1544  LWLockRelease(oldPartitionLock);
1545 
1546  /*
1547  * Insert the buffer at the head of the list of free buffers.
1548  */
1549  StrategyFreeBuffer(buf);
1550 }
1551 
1552 /*
1553  * MarkBufferDirty
1554  *
1555  * Marks buffer contents as dirty (actual write happens later).
1556  *
1557  * Buffer must be pinned and exclusive-locked. (If caller does not hold
1558  * exclusive lock, then somebody could be in process of writing the buffer,
1559  * leading to risk of bad data written to disk.)
1560  */
1561 void
1563 {
1564  BufferDesc *bufHdr;
1565  uint32 buf_state;
1566  uint32 old_buf_state;
1567 
1568  if (!BufferIsValid(buffer))
1569  elog(ERROR, "bad buffer ID: %d", buffer);
1570 
1571  if (BufferIsLocal(buffer))
1572  {
1573  MarkLocalBufferDirty(buffer);
1574  return;
1575  }
1576 
1577  bufHdr = GetBufferDescriptor(buffer - 1);
1578 
1579  Assert(BufferIsPinned(buffer));
1581  LW_EXCLUSIVE));
1582 
1583  old_buf_state = pg_atomic_read_u32(&bufHdr->state);
1584  for (;;)
1585  {
1586  if (old_buf_state & BM_LOCKED)
1587  old_buf_state = WaitBufHdrUnlocked(bufHdr);
1588 
1589  buf_state = old_buf_state;
1590 
1591  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
1592  buf_state |= BM_DIRTY | BM_JUST_DIRTIED;
1593 
1594  if (pg_atomic_compare_exchange_u32(&bufHdr->state, &old_buf_state,
1595  buf_state))
1596  break;
1597  }
1598 
1599  /*
1600  * If the buffer was not dirty already, do vacuum accounting.
1601  */
1602  if (!(old_buf_state & BM_DIRTY))
1603  {
1604  VacuumPageDirty++;
1606  if (VacuumCostActive)
1608  }
1609 }
1610 
1611 /*
1612  * ReleaseAndReadBuffer -- combine ReleaseBuffer() and ReadBuffer()
1613  *
1614  * Formerly, this saved one cycle of acquiring/releasing the BufMgrLock
1615  * compared to calling the two routines separately. Now it's mainly just
1616  * a convenience function. However, if the passed buffer is valid and
1617  * already contains the desired block, we just return it as-is; and that
1618  * does save considerable work compared to a full release and reacquire.
1619  *
1620  * Note: it is OK to pass buffer == InvalidBuffer, indicating that no old
1621  * buffer actually needs to be released. This case is the same as ReadBuffer,
1622  * but can save some tests in the caller.
1623  */
1624 Buffer
1626  Relation relation,
1627  BlockNumber blockNum)
1628 {
1629  ForkNumber forkNum = MAIN_FORKNUM;
1630  BufferDesc *bufHdr;
1631 
1632  if (BufferIsValid(buffer))
1633  {
1634  Assert(BufferIsPinned(buffer));
1635  if (BufferIsLocal(buffer))
1636  {
1637  bufHdr = GetLocalBufferDescriptor(-buffer - 1);
1638  if (bufHdr->tag.blockNum == blockNum &&
1639  RelFileNodeEquals(bufHdr->tag.rnode, relation->rd_node) &&
1640  bufHdr->tag.forkNum == forkNum)
1641  return buffer;
1643  LocalRefCount[-buffer - 1]--;
1644  }
1645  else
1646  {
1647  bufHdr = GetBufferDescriptor(buffer - 1);
1648  /* we have pin, so it's ok to examine tag without spinlock */
1649  if (bufHdr->tag.blockNum == blockNum &&
1650  RelFileNodeEquals(bufHdr->tag.rnode, relation->rd_node) &&
1651  bufHdr->tag.forkNum == forkNum)
1652  return buffer;
1653  UnpinBuffer(bufHdr, true);
1654  }
1655  }
1656 
1657  return ReadBuffer(relation, blockNum);
1658 }
1659 
1660 /*
1661  * PinBuffer -- make buffer unavailable for replacement.
1662  *
1663  * For the default access strategy, the buffer's usage_count is incremented
1664  * when we first pin it; for other strategies we just make sure the usage_count
1665  * isn't zero. (The idea of the latter is that we don't want synchronized
1666  * heap scans to inflate the count, but we need it to not be zero to discourage
1667  * other backends from stealing buffers from our ring. As long as we cycle
1668  * through the ring faster than the global clock-sweep cycles, buffers in
1669  * our ring won't be chosen as victims for replacement by other backends.)
1670  *
1671  * This should be applied only to shared buffers, never local ones.
1672  *
1673  * Since buffers are pinned/unpinned very frequently, pin buffers without
1674  * taking the buffer header lock; instead update the state variable in loop of
1675  * CAS operations. Hopefully it's just a single CAS.
1676  *
1677  * Note that ResourceOwnerEnlargeBuffers must have been done already.
1678  *
1679  * Returns true if buffer is BM_VALID, else false. This provision allows
1680  * some callers to avoid an extra spinlock cycle.
1681  */
1682 static bool
1684 {
1686  bool result;
1687  PrivateRefCountEntry *ref;
1688 
1689  ref = GetPrivateRefCountEntry(b, true);
1690 
1691  if (ref == NULL)
1692  {
1693  uint32 buf_state;
1694  uint32 old_buf_state;
1695 
1697  ref = NewPrivateRefCountEntry(b);
1698 
1699  old_buf_state = pg_atomic_read_u32(&buf->state);
1700  for (;;)
1701  {
1702  if (old_buf_state & BM_LOCKED)
1703  old_buf_state = WaitBufHdrUnlocked(buf);
1704 
1705  buf_state = old_buf_state;
1706 
1707  /* increase refcount */
1708  buf_state += BUF_REFCOUNT_ONE;
1709 
1710  if (strategy == NULL)
1711  {
1712  /* Default case: increase usagecount unless already max. */
1714  buf_state += BUF_USAGECOUNT_ONE;
1715  }
1716  else
1717  {
1718  /*
1719  * Ring buffers shouldn't evict others from pool. Thus we
1720  * don't make usagecount more than 1.
1721  */
1722  if (BUF_STATE_GET_USAGECOUNT(buf_state) == 0)
1723  buf_state += BUF_USAGECOUNT_ONE;
1724  }
1725 
1726  if (pg_atomic_compare_exchange_u32(&buf->state, &old_buf_state,
1727  buf_state))
1728  {
1729  result = (buf_state & BM_VALID) != 0;
1730 
1731  /*
1732  * Assume that we acquired a buffer pin for the purposes of
1733  * Valgrind buffer client checks (even in !result case) to
1734  * keep things simple. Buffers that are unsafe to access are
1735  * not generally guaranteed to be marked undefined or
1736  * non-accessible in any case.
1737  */
1739  break;
1740  }
1741  }
1742  }
1743  else
1744  {
1745  /*
1746  * If we previously pinned the buffer, it must surely be valid.
1747  *
1748  * Note: We deliberately avoid a Valgrind client request here.
1749  * Individual access methods can optionally superimpose buffer page
1750  * client requests on top of our client requests to enforce that
1751  * buffers are only accessed while locked (and pinned). It's possible
1752  * that the buffer page is legitimately non-accessible here. We
1753  * cannot meddle with that.
1754  */
1755  result = true;
1756  }
1757 
1758  ref->refcount++;
1759  Assert(ref->refcount > 0);
1761  return result;
1762 }
1763 
1764 /*
1765  * PinBuffer_Locked -- as above, but caller already locked the buffer header.
1766  * The spinlock is released before return.
1767  *
1768  * As this function is called with the spinlock held, the caller has to
1769  * previously call ReservePrivateRefCountEntry().
1770  *
1771  * Currently, no callers of this function want to modify the buffer's
1772  * usage_count at all, so there's no need for a strategy parameter.
1773  * Also we don't bother with a BM_VALID test (the caller could check that for
1774  * itself).
1775  *
1776  * Also all callers only ever use this function when it's known that the
1777  * buffer can't have a preexisting pin by this backend. That allows us to skip
1778  * searching the private refcount array & hash, which is a boon, because the
1779  * spinlock is still held.
1780  *
1781  * Note: use of this routine is frequently mandatory, not just an optimization
1782  * to save a spin lock/unlock cycle, because we need to pin a buffer before
1783  * its state can change under us.
1784  */
1785 static void
1787 {
1788  Buffer b;
1789  PrivateRefCountEntry *ref;
1790  uint32 buf_state;
1791 
1792  /*
1793  * As explained, We don't expect any preexisting pins. That allows us to
1794  * manipulate the PrivateRefCount after releasing the spinlock
1795  */
1797 
1798  /*
1799  * Buffer can't have a preexisting pin, so mark its page as defined to
1800  * Valgrind (this is similar to the PinBuffer() case where the backend
1801  * doesn't already have a buffer pin)
1802  */
1804 
1805  /*
1806  * Since we hold the buffer spinlock, we can update the buffer state and
1807  * release the lock in one operation.
1808  */
1809  buf_state = pg_atomic_read_u32(&buf->state);
1810  Assert(buf_state & BM_LOCKED);
1811  buf_state += BUF_REFCOUNT_ONE;
1812  UnlockBufHdr(buf, buf_state);
1813 
1814  b = BufferDescriptorGetBuffer(buf);
1815 
1816  ref = NewPrivateRefCountEntry(b);
1817  ref->refcount++;
1818 
1820 }
1821 
1822 /*
1823  * UnpinBuffer -- make buffer available for replacement.
1824  *
1825  * This should be applied only to shared buffers, never local ones.
1826  *
1827  * Most but not all callers want CurrentResourceOwner to be adjusted.
1828  * Those that don't should pass fixOwner = false.
1829  */
1830 static void
1831 UnpinBuffer(BufferDesc *buf, bool fixOwner)
1832 {
1833  PrivateRefCountEntry *ref;
1835 
1836  /* not moving as we're likely deleting it soon anyway */
1837  ref = GetPrivateRefCountEntry(b, false);
1838  Assert(ref != NULL);
1839 
1840  if (fixOwner)
1842 
1843  Assert(ref->refcount > 0);
1844  ref->refcount--;
1845  if (ref->refcount == 0)
1846  {
1847  uint32 buf_state;
1848  uint32 old_buf_state;
1849 
1850  /*
1851  * Mark buffer non-accessible to Valgrind.
1852  *
1853  * Note that the buffer may have already been marked non-accessible
1854  * within access method code that enforces that buffers are only
1855  * accessed while a buffer lock is held.
1856  */
1858 
1859  /* I'd better not still hold the buffer content lock */
1861 
1862  /*
1863  * Decrement the shared reference count.
1864  *
1865  * Since buffer spinlock holder can update status using just write,
1866  * it's not safe to use atomic decrement here; thus use a CAS loop.
1867  */
1868  old_buf_state = pg_atomic_read_u32(&buf->state);
1869  for (;;)
1870  {
1871  if (old_buf_state & BM_LOCKED)
1872  old_buf_state = WaitBufHdrUnlocked(buf);
1873 
1874  buf_state = old_buf_state;
1875 
1876  buf_state -= BUF_REFCOUNT_ONE;
1877 
1878  if (pg_atomic_compare_exchange_u32(&buf->state, &old_buf_state,
1879  buf_state))
1880  break;
1881  }
1882 
1883  /* Support LockBufferForCleanup() */
1884  if (buf_state & BM_PIN_COUNT_WAITER)
1885  {
1886  /*
1887  * Acquire the buffer header lock, re-check that there's a waiter.
1888  * Another backend could have unpinned this buffer, and already
1889  * woken up the waiter. There's no danger of the buffer being
1890  * replaced after we unpinned it above, as it's pinned by the
1891  * waiter.
1892  */
1893  buf_state = LockBufHdr(buf);
1894 
1895  if ((buf_state & BM_PIN_COUNT_WAITER) &&
1896  BUF_STATE_GET_REFCOUNT(buf_state) == 1)
1897  {
1898  /* we just released the last pin other than the waiter's */
1899  int wait_backend_pid = buf->wait_backend_pid;
1900 
1901  buf_state &= ~BM_PIN_COUNT_WAITER;
1902  UnlockBufHdr(buf, buf_state);
1903  ProcSendSignal(wait_backend_pid);
1904  }
1905  else
1906  UnlockBufHdr(buf, buf_state);
1907  }
1909  }
1910 }
1911 
1912 #define ST_SORT sort_checkpoint_bufferids
1913 #define ST_ELEMENT_TYPE CkptSortItem
1914 #define ST_COMPARE(a, b) ckpt_buforder_comparator(a, b)
1915 #define ST_SCOPE static
1916 #define ST_DEFINE
1917 #include <lib/sort_template.h>
1918 
1919 /*
1920  * BufferSync -- Write out all dirty buffers in the pool.
1921  *
1922  * This is called at checkpoint time to write out all dirty shared buffers.
1923  * The checkpoint request flags should be passed in. If CHECKPOINT_IMMEDIATE
1924  * is set, we disable delays between writes; if CHECKPOINT_IS_SHUTDOWN,
1925  * CHECKPOINT_END_OF_RECOVERY or CHECKPOINT_FLUSH_ALL is set, we write even
1926  * unlogged buffers, which are otherwise skipped. The remaining flags
1927  * currently have no effect here.
1928  */
1929 static void
1930 BufferSync(int flags)
1931 {
1932  uint32 buf_state;
1933  int buf_id;
1934  int num_to_scan;
1935  int num_spaces;
1936  int num_processed;
1937  int num_written;
1938  CkptTsStatus *per_ts_stat = NULL;
1939  Oid last_tsid;
1940  binaryheap *ts_heap;
1941  int i;
1942  int mask = BM_DIRTY;
1943  WritebackContext wb_context;
1944 
1945  /* Make sure we can handle the pin inside SyncOneBuffer */
1947 
1948  /*
1949  * Unless this is a shutdown checkpoint or we have been explicitly told,
1950  * we write only permanent, dirty buffers. But at shutdown or end of
1951  * recovery, we write all dirty buffers.
1952  */
1955  mask |= BM_PERMANENT;
1956 
1957  /*
1958  * Loop over all buffers, and mark the ones that need to be written with
1959  * BM_CHECKPOINT_NEEDED. Count them as we go (num_to_scan), so that we
1960  * can estimate how much work needs to be done.
1961  *
1962  * This allows us to write only those pages that were dirty when the
1963  * checkpoint began, and not those that get dirtied while it proceeds.
1964  * Whenever a page with BM_CHECKPOINT_NEEDED is written out, either by us
1965  * later in this function, or by normal backends or the bgwriter cleaning
1966  * scan, the flag is cleared. Any buffer dirtied after this point won't
1967  * have the flag set.
1968  *
1969  * Note that if we fail to write some buffer, we may leave buffers with
1970  * BM_CHECKPOINT_NEEDED still set. This is OK since any such buffer would
1971  * certainly need to be written for the next checkpoint attempt, too.
1972  */
1973  num_to_scan = 0;
1974  for (buf_id = 0; buf_id < NBuffers; buf_id++)
1975  {
1976  BufferDesc *bufHdr = GetBufferDescriptor(buf_id);
1977 
1978  /*
1979  * Header spinlock is enough to examine BM_DIRTY, see comment in
1980  * SyncOneBuffer.
1981  */
1982  buf_state = LockBufHdr(bufHdr);
1983 
1984  if ((buf_state & mask) == mask)
1985  {
1986  CkptSortItem *item;
1987 
1988  buf_state |= BM_CHECKPOINT_NEEDED;
1989 
1990  item = &CkptBufferIds[num_to_scan++];
1991  item->buf_id = buf_id;
1992  item->tsId = bufHdr->tag.rnode.spcNode;
1993  item->relNode = bufHdr->tag.rnode.relNode;
1994  item->forkNum = bufHdr->tag.forkNum;
1995  item->blockNum = bufHdr->tag.blockNum;
1996  }
1997 
1998  UnlockBufHdr(bufHdr, buf_state);
1999 
2000  /* Check for barrier events in case NBuffers is large. */
2003  }
2004 
2005  if (num_to_scan == 0)
2006  return; /* nothing to do */
2007 
2009 
2010  TRACE_POSTGRESQL_BUFFER_SYNC_START(NBuffers, num_to_scan);
2011 
2012  /*
2013  * Sort buffers that need to be written to reduce the likelihood of random
2014  * IO. The sorting is also important for the implementation of balancing
2015  * writes between tablespaces. Without balancing writes we'd potentially
2016  * end up writing to the tablespaces one-by-one; possibly overloading the
2017  * underlying system.
2018  */
2019  sort_checkpoint_bufferids(CkptBufferIds, num_to_scan);
2020 
2021  num_spaces = 0;
2022 
2023  /*
2024  * Allocate progress status for each tablespace with buffers that need to
2025  * be flushed. This requires the to-be-flushed array to be sorted.
2026  */
2027  last_tsid = InvalidOid;
2028  for (i = 0; i < num_to_scan; i++)
2029  {
2030  CkptTsStatus *s;
2031  Oid cur_tsid;
2032 
2033  cur_tsid = CkptBufferIds[i].tsId;
2034 
2035  /*
2036  * Grow array of per-tablespace status structs, every time a new
2037  * tablespace is found.
2038  */
2039  if (last_tsid == InvalidOid || last_tsid != cur_tsid)
2040  {
2041  Size sz;
2042 
2043  num_spaces++;
2044 
2045  /*
2046  * Not worth adding grow-by-power-of-2 logic here - even with a
2047  * few hundred tablespaces this should be fine.
2048  */
2049  sz = sizeof(CkptTsStatus) * num_spaces;
2050 
2051  if (per_ts_stat == NULL)
2052  per_ts_stat = (CkptTsStatus *) palloc(sz);
2053  else
2054  per_ts_stat = (CkptTsStatus *) repalloc(per_ts_stat, sz);
2055 
2056  s = &per_ts_stat[num_spaces - 1];
2057  memset(s, 0, sizeof(*s));
2058  s->tsId = cur_tsid;
2059 
2060  /*
2061  * The first buffer in this tablespace. As CkptBufferIds is sorted
2062  * by tablespace all (s->num_to_scan) buffers in this tablespace
2063  * will follow afterwards.
2064  */
2065  s->index = i;
2066 
2067  /*
2068  * progress_slice will be determined once we know how many buffers
2069  * are in each tablespace, i.e. after this loop.
2070  */
2071 
2072  last_tsid = cur_tsid;
2073  }
2074  else
2075  {
2076  s = &per_ts_stat[num_spaces - 1];
2077  }
2078 
2079  s->num_to_scan++;
2080 
2081  /* Check for barrier events. */
2084  }
2085 
2086  Assert(num_spaces > 0);
2087 
2088  /*
2089  * Build a min-heap over the write-progress in the individual tablespaces,
2090  * and compute how large a portion of the total progress a single
2091  * processed buffer is.
2092  */
2093  ts_heap = binaryheap_allocate(num_spaces,
2095  NULL);
2096 
2097  for (i = 0; i < num_spaces; i++)
2098  {
2099  CkptTsStatus *ts_stat = &per_ts_stat[i];
2100 
2101  ts_stat->progress_slice = (float8) num_to_scan / ts_stat->num_to_scan;
2102 
2103  binaryheap_add_unordered(ts_heap, PointerGetDatum(ts_stat));
2104  }
2105 
2106  binaryheap_build(ts_heap);
2107 
2108  /*
2109  * Iterate through to-be-checkpointed buffers and write the ones (still)
2110  * marked with BM_CHECKPOINT_NEEDED. The writes are balanced between
2111  * tablespaces; otherwise the sorting would lead to only one tablespace
2112  * receiving writes at a time, making inefficient use of the hardware.
2113  */
2114  num_processed = 0;
2115  num_written = 0;
2116  while (!binaryheap_empty(ts_heap))
2117  {
2118  BufferDesc *bufHdr = NULL;
2119  CkptTsStatus *ts_stat = (CkptTsStatus *)
2121 
2122  buf_id = CkptBufferIds[ts_stat->index].buf_id;
2123  Assert(buf_id != -1);
2124 
2125  bufHdr = GetBufferDescriptor(buf_id);
2126 
2127  num_processed++;
2128 
2129  /*
2130  * We don't need to acquire the lock here, because we're only looking
2131  * at a single bit. It's possible that someone else writes the buffer
2132  * and clears the flag right after we check, but that doesn't matter
2133  * since SyncOneBuffer will then do nothing. However, there is a
2134  * further race condition: it's conceivable that between the time we
2135  * examine the bit here and the time SyncOneBuffer acquires the lock,
2136  * someone else not only wrote the buffer but replaced it with another
2137  * page and dirtied it. In that improbable case, SyncOneBuffer will
2138  * write the buffer though we didn't need to. It doesn't seem worth
2139  * guarding against this, though.
2140  */
2142  {
2143  if (SyncOneBuffer(buf_id, false, &wb_context) & BUF_WRITTEN)
2144  {
2145  TRACE_POSTGRESQL_BUFFER_SYNC_WRITTEN(buf_id);
2147  num_written++;
2148  }
2149  }
2150 
2151  /*
2152  * Measure progress independent of actually having to flush the buffer
2153  * - otherwise writing become unbalanced.
2154  */
2155  ts_stat->progress += ts_stat->progress_slice;
2156  ts_stat->num_scanned++;
2157  ts_stat->index++;
2158 
2159  /* Have all the buffers from the tablespace been processed? */
2160  if (ts_stat->num_scanned == ts_stat->num_to_scan)
2161  {
2162  binaryheap_remove_first(ts_heap);
2163  }
2164  else
2165  {
2166  /* update heap with the new progress */
2167  binaryheap_replace_first(ts_heap, PointerGetDatum(ts_stat));
2168  }
2169 
2170  /*
2171  * Sleep to throttle our I/O rate.
2172  *
2173  * (This will check for barrier events even if it doesn't sleep.)
2174  */
2175  CheckpointWriteDelay(flags, (double) num_processed / num_to_scan);
2176  }
2177 
2178  /* issue all pending flushes */
2179  IssuePendingWritebacks(&wb_context);
2180 
2181  pfree(per_ts_stat);
2182  per_ts_stat = NULL;
2183  binaryheap_free(ts_heap);
2184 
2185  /*
2186  * Update checkpoint statistics. As noted above, this doesn't include
2187  * buffers written by other backends or bgwriter scan.
2188  */
2189  CheckpointStats.ckpt_bufs_written += num_written;
2190 
2191  TRACE_POSTGRESQL_BUFFER_SYNC_DONE(NBuffers, num_written, num_to_scan);
2192 }
2193 
2194 /*
2195  * BgBufferSync -- Write out some dirty buffers in the pool.
2196  *
2197  * This is called periodically by the background writer process.
2198  *
2199  * Returns true if it's appropriate for the bgwriter process to go into
2200  * low-power hibernation mode. (This happens if the strategy clock sweep
2201  * has been "lapped" and no buffer allocations have occurred recently,
2202  * or if the bgwriter has been effectively disabled by setting
2203  * bgwriter_lru_maxpages to 0.)
2204  */
2205 bool
2207 {
2208  /* info obtained from freelist.c */
2209  int strategy_buf_id;
2210  uint32 strategy_passes;
2211  uint32 recent_alloc;
2212 
2213  /*
2214  * Information saved between calls so we can determine the strategy
2215  * point's advance rate and avoid scanning already-cleaned buffers.
2216  */
2217  static bool saved_info_valid = false;
2218  static int prev_strategy_buf_id;
2219  static uint32 prev_strategy_passes;
2220  static int next_to_clean;
2221  static uint32 next_passes;
2222 
2223  /* Moving averages of allocation rate and clean-buffer density */
2224  static float smoothed_alloc = 0;
2225  static float smoothed_density = 10.0;
2226 
2227  /* Potentially these could be tunables, but for now, not */
2228  float smoothing_samples = 16;
2229  float scan_whole_pool_milliseconds = 120000.0;
2230 
2231  /* Used to compute how far we scan ahead */
2232  long strategy_delta;
2233  int bufs_to_lap;
2234  int bufs_ahead;
2235  float scans_per_alloc;
2236  int reusable_buffers_est;
2237  int upcoming_alloc_est;
2238  int min_scan_buffers;
2239 
2240  /* Variables for the scanning loop proper */
2241  int num_to_scan;
2242  int num_written;
2243  int reusable_buffers;
2244 
2245  /* Variables for final smoothed_density update */
2246  long new_strategy_delta;
2247  uint32 new_recent_alloc;
2248 
2249  /*
2250  * Find out where the freelist clock sweep currently is, and how many
2251  * buffer allocations have happened since our last call.
2252  */
2253  strategy_buf_id = StrategySyncStart(&strategy_passes, &recent_alloc);
2254 
2255  /* Report buffer alloc counts to pgstat */
2256  BgWriterStats.m_buf_alloc += recent_alloc;
2257 
2258  /*
2259  * If we're not running the LRU scan, just stop after doing the stats
2260  * stuff. We mark the saved state invalid so that we can recover sanely
2261  * if LRU scan is turned back on later.
2262  */
2263  if (bgwriter_lru_maxpages <= 0)
2264  {
2265  saved_info_valid = false;
2266  return true;
2267  }
2268 
2269  /*
2270  * Compute strategy_delta = how many buffers have been scanned by the
2271  * clock sweep since last time. If first time through, assume none. Then
2272  * see if we are still ahead of the clock sweep, and if so, how many
2273  * buffers we could scan before we'd catch up with it and "lap" it. Note:
2274  * weird-looking coding of xxx_passes comparisons are to avoid bogus
2275  * behavior when the passes counts wrap around.
2276  */
2277  if (saved_info_valid)
2278  {
2279  int32 passes_delta = strategy_passes - prev_strategy_passes;
2280 
2281  strategy_delta = strategy_buf_id - prev_strategy_buf_id;
2282  strategy_delta += (long) passes_delta * NBuffers;
2283 
2284  Assert(strategy_delta >= 0);
2285 
2286  if ((int32) (next_passes - strategy_passes) > 0)
2287  {
2288  /* we're one pass ahead of the strategy point */
2289  bufs_to_lap = strategy_buf_id - next_to_clean;
2290 #ifdef BGW_DEBUG
2291  elog(DEBUG2, "bgwriter ahead: bgw %u-%u strategy %u-%u delta=%ld lap=%d",
2292  next_passes, next_to_clean,
2293  strategy_passes, strategy_buf_id,
2294  strategy_delta, bufs_to_lap);
2295 #endif
2296  }
2297  else if (next_passes == strategy_passes &&
2298  next_to_clean >= strategy_buf_id)
2299  {
2300  /* on same pass, but ahead or at least not behind */
2301  bufs_to_lap = NBuffers - (next_to_clean - strategy_buf_id);
2302 #ifdef BGW_DEBUG
2303  elog(DEBUG2, "bgwriter ahead: bgw %u-%u strategy %u-%u delta=%ld lap=%d",
2304  next_passes, next_to_clean,
2305  strategy_passes, strategy_buf_id,
2306  strategy_delta, bufs_to_lap);
2307 #endif
2308  }
2309  else
2310  {
2311  /*
2312  * We're behind, so skip forward to the strategy point and start
2313  * cleaning from there.
2314  */
2315 #ifdef BGW_DEBUG
2316  elog(DEBUG2, "bgwriter behind: bgw %u-%u strategy %u-%u delta=%ld",
2317  next_passes, next_to_clean,
2318  strategy_passes, strategy_buf_id,
2319  strategy_delta);
2320 #endif
2321  next_to_clean = strategy_buf_id;
2322  next_passes = strategy_passes;
2323  bufs_to_lap = NBuffers;
2324  }
2325  }
2326  else
2327  {
2328  /*
2329  * Initializing at startup or after LRU scanning had been off. Always
2330  * start at the strategy point.
2331  */
2332 #ifdef BGW_DEBUG
2333  elog(DEBUG2, "bgwriter initializing: strategy %u-%u",
2334  strategy_passes, strategy_buf_id);
2335 #endif
2336  strategy_delta = 0;
2337  next_to_clean = strategy_buf_id;
2338  next_passes = strategy_passes;
2339  bufs_to_lap = NBuffers;
2340  }
2341 
2342  /* Update saved info for next time */
2343  prev_strategy_buf_id = strategy_buf_id;
2344  prev_strategy_passes = strategy_passes;
2345  saved_info_valid = true;
2346 
2347  /*
2348  * Compute how many buffers had to be scanned for each new allocation, ie,
2349  * 1/density of reusable buffers, and track a moving average of that.
2350  *
2351  * If the strategy point didn't move, we don't update the density estimate
2352  */
2353  if (strategy_delta > 0 && recent_alloc > 0)
2354  {
2355  scans_per_alloc = (float) strategy_delta / (float) recent_alloc;
2356  smoothed_density += (scans_per_alloc - smoothed_density) /
2357  smoothing_samples;
2358  }
2359 
2360  /*
2361  * Estimate how many reusable buffers there are between the current
2362  * strategy point and where we've scanned ahead to, based on the smoothed
2363  * density estimate.
2364  */
2365  bufs_ahead = NBuffers - bufs_to_lap;
2366  reusable_buffers_est = (float) bufs_ahead / smoothed_density;
2367 
2368  /*
2369  * Track a moving average of recent buffer allocations. Here, rather than
2370  * a true average we want a fast-attack, slow-decline behavior: we
2371  * immediately follow any increase.
2372  */
2373  if (smoothed_alloc <= (float) recent_alloc)
2374  smoothed_alloc = recent_alloc;
2375  else
2376  smoothed_alloc += ((float) recent_alloc - smoothed_alloc) /
2377  smoothing_samples;
2378 
2379  /* Scale the estimate by a GUC to allow more aggressive tuning. */
2380  upcoming_alloc_est = (int) (smoothed_alloc * bgwriter_lru_multiplier);
2381 
2382  /*
2383  * If recent_alloc remains at zero for many cycles, smoothed_alloc will
2384  * eventually underflow to zero, and the underflows produce annoying
2385  * kernel warnings on some platforms. Once upcoming_alloc_est has gone to
2386  * zero, there's no point in tracking smaller and smaller values of
2387  * smoothed_alloc, so just reset it to exactly zero to avoid this
2388  * syndrome. It will pop back up as soon as recent_alloc increases.
2389  */
2390  if (upcoming_alloc_est == 0)
2391  smoothed_alloc = 0;
2392 
2393  /*
2394  * Even in cases where there's been little or no buffer allocation
2395  * activity, we want to make a small amount of progress through the buffer
2396  * cache so that as many reusable buffers as possible are clean after an
2397  * idle period.
2398  *
2399  * (scan_whole_pool_milliseconds / BgWriterDelay) computes how many times
2400  * the BGW will be called during the scan_whole_pool time; slice the
2401  * buffer pool into that many sections.
2402  */
2403  min_scan_buffers = (int) (NBuffers / (scan_whole_pool_milliseconds / BgWriterDelay));
2404 
2405  if (upcoming_alloc_est < (min_scan_buffers + reusable_buffers_est))
2406  {
2407 #ifdef BGW_DEBUG
2408  elog(DEBUG2, "bgwriter: alloc_est=%d too small, using min=%d + reusable_est=%d",
2409  upcoming_alloc_est, min_scan_buffers, reusable_buffers_est);
2410 #endif
2411  upcoming_alloc_est = min_scan_buffers + reusable_buffers_est;
2412  }
2413 
2414  /*
2415  * Now write out dirty reusable buffers, working forward from the
2416  * next_to_clean point, until we have lapped the strategy scan, or cleaned
2417  * enough buffers to match our estimate of the next cycle's allocation
2418  * requirements, or hit the bgwriter_lru_maxpages limit.
2419  */
2420 
2421  /* Make sure we can handle the pin inside SyncOneBuffer */
2423 
2424  num_to_scan = bufs_to_lap;
2425  num_written = 0;
2426  reusable_buffers = reusable_buffers_est;
2427 
2428  /* Execute the LRU scan */
2429  while (num_to_scan > 0 && reusable_buffers < upcoming_alloc_est)
2430  {
2431  int sync_state = SyncOneBuffer(next_to_clean, true,
2432  wb_context);
2433 
2434  if (++next_to_clean >= NBuffers)
2435  {
2436  next_to_clean = 0;
2437  next_passes++;
2438  }
2439  num_to_scan--;
2440 
2441  if (sync_state & BUF_WRITTEN)
2442  {
2443  reusable_buffers++;
2444  if (++num_written >= bgwriter_lru_maxpages)
2445  {
2447  break;
2448  }
2449  }
2450  else if (sync_state & BUF_REUSABLE)
2451  reusable_buffers++;
2452  }
2453 
2454  BgWriterStats.m_buf_written_clean += num_written;
2455 
2456 #ifdef BGW_DEBUG
2457  elog(DEBUG1, "bgwriter: recent_alloc=%u smoothed=%.2f delta=%ld ahead=%d density=%.2f reusable_est=%d upcoming_est=%d scanned=%d wrote=%d reusable=%d",
2458  recent_alloc, smoothed_alloc, strategy_delta, bufs_ahead,
2459  smoothed_density, reusable_buffers_est, upcoming_alloc_est,
2460  bufs_to_lap - num_to_scan,
2461  num_written,
2462  reusable_buffers - reusable_buffers_est);
2463 #endif
2464 
2465  /*
2466  * Consider the above scan as being like a new allocation scan.
2467  * Characterize its density and update the smoothed one based on it. This
2468  * effectively halves the moving average period in cases where both the
2469  * strategy and the background writer are doing some useful scanning,
2470  * which is helpful because a long memory isn't as desirable on the
2471  * density estimates.
2472  */
2473  new_strategy_delta = bufs_to_lap - num_to_scan;
2474  new_recent_alloc = reusable_buffers - reusable_buffers_est;
2475  if (new_strategy_delta > 0 && new_recent_alloc > 0)
2476  {
2477  scans_per_alloc = (float) new_strategy_delta / (float) new_recent_alloc;
2478  smoothed_density += (scans_per_alloc - smoothed_density) /
2479  smoothing_samples;
2480 
2481 #ifdef BGW_DEBUG
2482  elog(DEBUG2, "bgwriter: cleaner density alloc=%u scan=%ld density=%.2f new smoothed=%.2f",
2483  new_recent_alloc, new_strategy_delta,
2484  scans_per_alloc, smoothed_density);
2485 #endif
2486  }
2487 
2488  /* Return true if OK to hibernate */
2489  return (bufs_to_lap == 0 && recent_alloc == 0);
2490 }
2491 
2492 /*
2493  * SyncOneBuffer -- process a single buffer during syncing.
2494  *
2495  * If skip_recently_used is true, we don't write currently-pinned buffers, nor
2496  * buffers marked recently used, as these are not replacement candidates.
2497  *
2498  * Returns a bitmask containing the following flag bits:
2499  * BUF_WRITTEN: we wrote the buffer.
2500  * BUF_REUSABLE: buffer is available for replacement, ie, it has
2501  * pin count 0 and usage count 0.
2502  *
2503  * (BUF_WRITTEN could be set in error if FlushBuffer finds the buffer clean
2504  * after locking it, but we don't care all that much.)
2505  *
2506  * Note: caller must have done ResourceOwnerEnlargeBuffers.
2507  */
2508 static int
2509 SyncOneBuffer(int buf_id, bool skip_recently_used, WritebackContext *wb_context)
2510 {
2511  BufferDesc *bufHdr = GetBufferDescriptor(buf_id);
2512  int result = 0;
2513  uint32 buf_state;
2514  BufferTag tag;
2515 
2517 
2518  /*
2519  * Check whether buffer needs writing.
2520  *
2521  * We can make this check without taking the buffer content lock so long
2522  * as we mark pages dirty in access methods *before* logging changes with
2523  * XLogInsert(): if someone marks the buffer dirty just after our check we
2524  * don't worry because our checkpoint.redo points before log record for
2525  * upcoming changes and so we are not required to write such dirty buffer.
2526  */
2527  buf_state = LockBufHdr(bufHdr);
2528 
2529  if (BUF_STATE_GET_REFCOUNT(buf_state) == 0 &&
2530  BUF_STATE_GET_USAGECOUNT(buf_state) == 0)
2531  {
2532  result |= BUF_REUSABLE;
2533  }
2534  else if (skip_recently_used)
2535  {
2536  /* Caller told us not to write recently-used buffers */
2537  UnlockBufHdr(bufHdr, buf_state);
2538  return result;
2539  }
2540 
2541  if (!(buf_state & BM_VALID) || !(buf_state & BM_DIRTY))
2542  {
2543  /* It's clean, so nothing to do */
2544  UnlockBufHdr(bufHdr, buf_state);
2545  return result;
2546  }
2547 
2548  /*
2549  * Pin it, share-lock it, write it. (FlushBuffer will do nothing if the
2550  * buffer is clean by the time we've locked it.)
2551  */
2552  PinBuffer_Locked(bufHdr);
2554 
2555  FlushBuffer(bufHdr, NULL);
2556 
2558 
2559  tag = bufHdr->tag;
2560 
2561  UnpinBuffer(bufHdr, true);
2562 
2563  ScheduleBufferTagForWriteback(wb_context, &tag);
2564 
2565  return result | BUF_WRITTEN;
2566 }
2567 
2568 /*
2569  * AtEOXact_Buffers - clean up at end of transaction.
2570  *
2571  * As of PostgreSQL 8.0, buffer pins should get released by the
2572  * ResourceOwner mechanism. This routine is just a debugging
2573  * cross-check that no pins remain.
2574  */
2575 void
2576 AtEOXact_Buffers(bool isCommit)
2577 {
2579 
2580  AtEOXact_LocalBuffers(isCommit);
2581 
2583 }
2584 
2585 /*
2586  * Initialize access to shared buffer pool
2587  *
2588  * This is called during backend startup (whether standalone or under the
2589  * postmaster). It sets up for this backend's access to the already-existing
2590  * buffer pool.
2591  *
2592  * NB: this is called before InitProcess(), so we do not have a PGPROC and
2593  * cannot do LWLockAcquire; hence we can't actually access stuff in
2594  * shared memory yet. We are only initializing local data here.
2595  * (See also InitBufferPoolBackend)
2596  */
2597 void
2599 {
2600  HASHCTL hash_ctl;
2601 
2602  memset(&PrivateRefCountArray, 0, sizeof(PrivateRefCountArray));
2603 
2604  hash_ctl.keysize = sizeof(int32);
2605  hash_ctl.entrysize = sizeof(PrivateRefCountEntry);
2606 
2607  PrivateRefCountHash = hash_create("PrivateRefCount", 100, &hash_ctl,
2608  HASH_ELEM | HASH_BLOBS);
2609 }
2610 
2611 /*
2612  * InitBufferPoolBackend --- second-stage initialization of a new backend
2613  *
2614  * This is called after we have acquired a PGPROC and so can safely get
2615  * LWLocks. We don't currently need to do anything at this stage ...
2616  * except register a shmem-exit callback. AtProcExit_Buffers needs LWLock
2617  * access, and thereby has to be called at the corresponding phase of
2618  * backend shutdown.
2619  */
2620 void
2622 {
2624 }
2625 
2626 /*
2627  * During backend exit, ensure that we released all shared-buffer locks and
2628  * assert that we have no remaining pins.
2629  */
2630 static void
2632 {
2633  AbortBufferIO();
2634  UnlockBuffers();
2635 
2637 
2638  /* localbuf.c needs a chance too */
2640 }
2641 
2642 /*
2643  * CheckForBufferLeaks - ensure this backend holds no buffer pins
2644  *
2645  * As of PostgreSQL 8.0, buffer pins should get released by the
2646  * ResourceOwner mechanism. This routine is just a debugging
2647  * cross-check that no pins remain.
2648  */
2649 static void
2651 {
2652 #ifdef USE_ASSERT_CHECKING
2653  int RefCountErrors = 0;
2654  PrivateRefCountEntry *res;
2655  int i;
2656 
2657  /* check the array */
2658  for (i = 0; i < REFCOUNT_ARRAY_ENTRIES; i++)
2659  {
2660  res = &PrivateRefCountArray[i];
2661 
2662  if (res->buffer != InvalidBuffer)
2663  {
2665  RefCountErrors++;
2666  }
2667  }
2668 
2669  /* if necessary search the hash */
2671  {
2672  HASH_SEQ_STATUS hstat;
2673 
2674  hash_seq_init(&hstat, PrivateRefCountHash);
2675  while ((res = (PrivateRefCountEntry *) hash_seq_search(&hstat)) != NULL)
2676  {
2678  RefCountErrors++;
2679  }
2680 
2681  }
2682 
2683  Assert(RefCountErrors == 0);
2684 #endif
2685 }
2686 
2687 /*
2688  * Helper routine to issue warnings when a buffer is unexpectedly pinned
2689  */
2690 void
2692 {
2693  BufferDesc *buf;
2694  int32 loccount;
2695  char *path;
2696  BackendId backend;
2697  uint32 buf_state;
2698 
2699  Assert(BufferIsValid(buffer));
2700  if (BufferIsLocal(buffer))
2701  {
2702  buf = GetLocalBufferDescriptor(-buffer - 1);
2703  loccount = LocalRefCount[-buffer - 1];
2704  backend = MyBackendId;
2705  }
2706  else
2707  {
2708  buf = GetBufferDescriptor(buffer - 1);
2709  loccount = GetPrivateRefCount(buffer);
2710  backend = InvalidBackendId;
2711  }
2712 
2713  /* theoretically we should lock the bufhdr here */
2714  path = relpathbackend(buf->tag.rnode, backend, buf->tag.forkNum);
2715  buf_state = pg_atomic_read_u32(&buf->state);
2716  elog(WARNING,
2717  "buffer refcount leak: [%03d] "
2718  "(rel=%s, blockNum=%u, flags=0x%x, refcount=%u %d)",
2719  buffer, path,
2720  buf->tag.blockNum, buf_state & BUF_FLAG_MASK,
2721  BUF_STATE_GET_REFCOUNT(buf_state), loccount);
2722  pfree(path);
2723 }
2724 
2725 /*
2726  * CheckPointBuffers
2727  *
2728  * Flush all dirty blocks in buffer pool to disk at checkpoint time.
2729  *
2730  * Note: temporary relations do not participate in checkpoints, so they don't
2731  * need to be flushed.
2732  */
2733 void
2735 {
2736  BufferSync(flags);
2737 }
2738 
2739 
2740 /*
2741  * Do whatever is needed to prepare for commit at the bufmgr and smgr levels
2742  */
2743 void
2745 {
2746  /* Nothing to do in bufmgr anymore... */
2747 }
2748 
2749 /*
2750  * BufferGetBlockNumber
2751  * Returns the block number associated with a buffer.
2752  *
2753  * Note:
2754  * Assumes that the buffer is valid and pinned, else the
2755  * value may be obsolete immediately...
2756  */
2759 {
2760  BufferDesc *bufHdr;
2761 
2762  Assert(BufferIsPinned(buffer));
2763 
2764  if (BufferIsLocal(buffer))
2765  bufHdr = GetLocalBufferDescriptor(-buffer - 1);
2766  else
2767  bufHdr = GetBufferDescriptor(buffer - 1);
2768 
2769  /* pinned, so OK to read tag without spinlock */
2770  return bufHdr->tag.blockNum;
2771 }
2772 
2773 /*
2774  * BufferGetTag
2775  * Returns the relfilenode, fork number and block number associated with
2776  * a buffer.
2777  */
2778 void
2780  BlockNumber *blknum)
2781 {
2782  BufferDesc *bufHdr;
2783 
2784  /* Do the same checks as BufferGetBlockNumber. */
2785  Assert(BufferIsPinned(buffer));
2786 
2787  if (BufferIsLocal(buffer))
2788  bufHdr = GetLocalBufferDescriptor(-buffer - 1);
2789  else
2790  bufHdr = GetBufferDescriptor(buffer - 1);
2791 
2792  /* pinned, so OK to read tag without spinlock */
2793  *rnode = bufHdr->tag.rnode;
2794  *forknum = bufHdr->tag.forkNum;
2795  *blknum = bufHdr->tag.blockNum;
2796 }
2797 
2798 /*
2799  * FlushBuffer
2800  * Physically write out a shared buffer.
2801  *
2802  * NOTE: this actually just passes the buffer contents to the kernel; the
2803  * real write to disk won't happen until the kernel feels like it. This
2804  * is okay from our point of view since we can redo the changes from WAL.
2805  * However, we will need to force the changes to disk via fsync before
2806  * we can checkpoint WAL.
2807  *
2808  * The caller must hold a pin on the buffer and have share-locked the
2809  * buffer contents. (Note: a share-lock does not prevent updates of
2810  * hint bits in the buffer, so the page could change while the write
2811  * is in progress, but we assume that that will not invalidate the data
2812  * written.)
2813  *
2814  * If the caller has an smgr reference for the buffer's relation, pass it
2815  * as the second parameter. If not, pass NULL.
2816  */
2817 static void
2819 {
2820  XLogRecPtr recptr;
2821  ErrorContextCallback errcallback;
2822  instr_time io_start,
2823  io_time;
2824  Block bufBlock;
2825  char *bufToWrite;
2826  uint32 buf_state;
2827 
2828  /*
2829  * Try to start an I/O operation. If StartBufferIO returns false, then
2830  * someone else flushed the buffer before we could, so we need not do
2831  * anything.
2832  */
2833  if (!StartBufferIO(buf, false))
2834  return;
2835 
2836  /* Setup error traceback support for ereport() */
2838  errcallback.arg = (void *) buf;
2839  errcallback.previous = error_context_stack;
2840  error_context_stack = &errcallback;
2841 
2842  /* Find smgr relation for buffer */
2843  if (reln == NULL)
2844  reln = smgropen(buf->tag.rnode, InvalidBackendId);
2845 
2846  TRACE_POSTGRESQL_BUFFER_FLUSH_START(buf->tag.forkNum,
2847  buf->tag.blockNum,
2848  reln->smgr_rnode.node.spcNode,
2849  reln->smgr_rnode.node.dbNode,
2850  reln->smgr_rnode.node.relNode);
2851 
2852  buf_state = LockBufHdr(buf);
2853 
2854  /*
2855  * Run PageGetLSN while holding header lock, since we don't have the
2856  * buffer locked exclusively in all cases.
2857  */
2858  recptr = BufferGetLSN(buf);
2859 
2860  /* To check if block content changes while flushing. - vadim 01/17/97 */
2861  buf_state &= ~BM_JUST_DIRTIED;
2862  UnlockBufHdr(buf, buf_state);
2863 
2864  /*
2865  * Force XLOG flush up to buffer's LSN. This implements the basic WAL
2866  * rule that log updates must hit disk before any of the data-file changes
2867  * they describe do.
2868  *
2869  * However, this rule does not apply to unlogged relations, which will be
2870  * lost after a crash anyway. Most unlogged relation pages do not bear
2871  * LSNs since we never emit WAL records for them, and therefore flushing
2872  * up through the buffer LSN would be useless, but harmless. However,
2873  * GiST indexes use LSNs internally to track page-splits, and therefore
2874  * unlogged GiST pages bear "fake" LSNs generated by
2875  * GetFakeLSNForUnloggedRel. It is unlikely but possible that the fake
2876  * LSN counter could advance past the WAL insertion point; and if it did
2877  * happen, attempting to flush WAL through that location would fail, with
2878  * disastrous system-wide consequences. To make sure that can't happen,
2879  * skip the flush if the buffer isn't permanent.
2880  */
2881  if (buf_state & BM_PERMANENT)
2882  XLogFlush(recptr);
2883 
2884  /*
2885  * Now it's safe to write buffer to disk. Note that no one else should
2886  * have been able to write it while we were busy with log flushing because
2887  * only one process at a time can set the BM_IO_IN_PROGRESS bit.
2888  */
2889  bufBlock = BufHdrGetBlock(buf);
2890 
2891  /*
2892  * Update page checksum if desired. Since we have only shared lock on the
2893  * buffer, other processes might be updating hint bits in it, so we must
2894  * copy the page to private storage if we do checksumming.
2895  */
2896  bufToWrite = PageSetChecksumCopy((Page) bufBlock, buf->tag.blockNum);
2897 
2898  if (track_io_timing)
2899  INSTR_TIME_SET_CURRENT(io_start);
2900 
2901  /*
2902  * bufToWrite is either the shared buffer or a copy, as appropriate.
2903  */
2904  smgrwrite(reln,
2905  buf->tag.forkNum,
2906  buf->tag.blockNum,
2907  bufToWrite,
2908  false);
2909 
2910  if (track_io_timing)
2911  {
2912  INSTR_TIME_SET_CURRENT(io_time);
2913  INSTR_TIME_SUBTRACT(io_time, io_start);
2916  }
2917 
2919 
2920  /*
2921  * Mark the buffer as clean (unless BM_JUST_DIRTIED has become set) and
2922  * end the BM_IO_IN_PROGRESS state.
2923  */
2924  TerminateBufferIO(buf, true, 0);
2925 
2926  TRACE_POSTGRESQL_BUFFER_FLUSH_DONE(buf->tag.forkNum,
2927  buf->tag.blockNum,
2928  reln->smgr_rnode.node.spcNode,
2929  reln->smgr_rnode.node.dbNode,
2930  reln->smgr_rnode.node.relNode);
2931 
2932  /* Pop the error context stack */
2933  error_context_stack = errcallback.previous;
2934 }
2935 
2936 /*
2937  * RelationGetNumberOfBlocksInFork
2938  * Determines the current number of pages in the specified relation fork.
2939  *
2940  * Note that the accuracy of the result will depend on the details of the
2941  * relation's storage. For builtin AMs it'll be accurate, but for external AMs
2942  * it might not be.
2943  */
2946 {
2947  switch (relation->rd_rel->relkind)
2948  {
2949  case RELKIND_SEQUENCE:
2950  case RELKIND_INDEX:
2951  case RELKIND_PARTITIONED_INDEX:
2952  /* Open it at the smgr level if not already done */
2953  RelationOpenSmgr(relation);
2954 
2955  return smgrnblocks(relation->rd_smgr, forkNum);
2956 
2957  case RELKIND_RELATION:
2958  case RELKIND_TOASTVALUE:
2959  case RELKIND_MATVIEW:
2960  {
2961  /*
2962  * Not every table AM uses BLCKSZ wide fixed size blocks.
2963  * Therefore tableam returns the size in bytes - but for the
2964  * purpose of this routine, we want the number of blocks.
2965  * Therefore divide, rounding up.
2966  */
2967  uint64 szbytes;
2968 
2969  szbytes = table_relation_size(relation, forkNum);
2970 
2971  return (szbytes + (BLCKSZ - 1)) / BLCKSZ;
2972  }
2973  case RELKIND_VIEW:
2974  case RELKIND_COMPOSITE_TYPE:
2975  case RELKIND_FOREIGN_TABLE:
2976  case RELKIND_PARTITIONED_TABLE:
2977  default:
2978  Assert(false);
2979  break;
2980  }
2981 
2982  return 0; /* keep compiler quiet */
2983 }
2984 
2985 /*
2986  * BufferIsPermanent
2987  * Determines whether a buffer will potentially still be around after
2988  * a crash. Caller must hold a buffer pin.
2989  */
2990 bool
2992 {
2993  BufferDesc *bufHdr;
2994 
2995  /* Local buffers are used only for temp relations. */
2996  if (BufferIsLocal(buffer))
2997  return false;
2998 
2999  /* Make sure we've got a real buffer, and that we hold a pin on it. */
3000  Assert(BufferIsValid(buffer));
3001  Assert(BufferIsPinned(buffer));
3002 
3003  /*
3004  * BM_PERMANENT can't be changed while we hold a pin on the buffer, so we
3005  * need not bother with the buffer header spinlock. Even if someone else
3006  * changes the buffer header state while we're doing this, the state is
3007  * changed atomically, so we'll read the old value or the new value, but
3008  * not random garbage.
3009  */
3010  bufHdr = GetBufferDescriptor(buffer - 1);
3011  return (pg_atomic_read_u32(&bufHdr->state) & BM_PERMANENT) != 0;
3012 }
3013 
3014 /*
3015  * BufferGetLSNAtomic
3016  * Retrieves the LSN of the buffer atomically using a buffer header lock.
3017  * This is necessary for some callers who may not have an exclusive lock
3018  * on the buffer.
3019  */
3020 XLogRecPtr
3022 {
3023  BufferDesc *bufHdr = GetBufferDescriptor(buffer - 1);
3024  char *page = BufferGetPage(buffer);
3025  XLogRecPtr lsn;
3026  uint32 buf_state;
3027 
3028  /*
3029  * If we don't need locking for correctness, fastpath out.
3030  */
3031  if (!XLogHintBitIsNeeded() || BufferIsLocal(buffer))
3032  return PageGetLSN(page);
3033 
3034  /* Make sure we've got a real buffer, and that we hold a pin on it. */
3035  Assert(BufferIsValid(buffer));
3036  Assert(BufferIsPinned(buffer));
3037 
3038  buf_state = LockBufHdr(bufHdr);
3039  lsn = PageGetLSN(page);
3040  UnlockBufHdr(bufHdr, buf_state);
3041 
3042  return lsn;
3043 }
3044 
3045 /* ---------------------------------------------------------------------
3046  * DropRelFileNodeBuffers
3047  *
3048  * This function removes from the buffer pool all the pages of the
3049  * specified relation forks that have block numbers >= firstDelBlock.
3050  * (In particular, with firstDelBlock = 0, all pages are removed.)
3051  * Dirty pages are simply dropped, without bothering to write them
3052  * out first. Therefore, this is NOT rollback-able, and so should be
3053  * used only with extreme caution!
3054  *
3055  * Currently, this is called only from smgr.c when the underlying file
3056  * is about to be deleted or truncated (firstDelBlock is needed for
3057  * the truncation case). The data in the affected pages would therefore
3058  * be deleted momentarily anyway, and there is no point in writing it.
3059  * It is the responsibility of higher-level code to ensure that the
3060  * deletion or truncation does not lose any data that could be needed
3061  * later. It is also the responsibility of higher-level code to ensure
3062  * that no other process could be trying to load more pages of the
3063  * relation into buffers.
3064  * --------------------------------------------------------------------
3065  */
3066 void
3068  int nforks, BlockNumber *firstDelBlock)
3069 {
3070  int i;
3071  int j;
3072  RelFileNodeBackend rnode;
3073  BlockNumber nForkBlock[MAX_FORKNUM];
3074  uint64 nBlocksToInvalidate = 0;
3075 
3076  rnode = smgr_reln->smgr_rnode;
3077 
3078  /* If it's a local relation, it's localbuf.c's problem. */
3079  if (RelFileNodeBackendIsTemp(rnode))
3080  {
3081  if (rnode.backend == MyBackendId)
3082  {
3083  for (j = 0; j < nforks; j++)
3084  DropRelFileNodeLocalBuffers(rnode.node, forkNum[j],
3085  firstDelBlock[j]);
3086  }
3087  return;
3088  }
3089 
3090  /*
3091  * To remove all the pages of the specified relation forks from the buffer
3092  * pool, we need to scan the entire buffer pool but we can optimize it by
3093  * finding the buffers from BufMapping table provided we know the exact
3094  * size of each fork of the relation. The exact size is required to ensure
3095  * that we don't leave any buffer for the relation being dropped as
3096  * otherwise the background writer or checkpointer can lead to a PANIC
3097  * error while flushing buffers corresponding to files that don't exist.
3098  *
3099  * To know the exact size, we rely on the size cached for each fork by us
3100  * during recovery which limits the optimization to recovery and on
3101  * standbys but we can easily extend it once we have shared cache for
3102  * relation size.
3103  *
3104  * In recovery, we cache the value returned by the first lseek(SEEK_END)
3105  * and the future writes keeps the cached value up-to-date. See
3106  * smgrextend. It is possible that the value of the first lseek is smaller
3107  * than the actual number of existing blocks in the file due to buggy
3108  * Linux kernels that might not have accounted for the recent write. But
3109  * that should be fine because there must not be any buffers after that
3110  * file size.
3111  */
3112  for (i = 0; i < nforks; i++)
3113  {
3114  /* Get the number of blocks for a relation's fork */
3115  nForkBlock[i] = smgrnblocks_cached(smgr_reln, forkNum[i]);
3116 
3117  if (nForkBlock[i] == InvalidBlockNumber)
3118  {
3119  nBlocksToInvalidate = InvalidBlockNumber;
3120  break;
3121  }
3122 
3123  /* calculate the number of blocks to be invalidated */
3124  nBlocksToInvalidate += (nForkBlock[i] - firstDelBlock[i]);
3125  }
3126 
3127  /*
3128  * We apply the optimization iff the total number of blocks to invalidate
3129  * is below the BUF_DROP_FULL_SCAN_THRESHOLD.
3130  */
3131  if (BlockNumberIsValid(nBlocksToInvalidate) &&
3132  nBlocksToInvalidate < BUF_DROP_FULL_SCAN_THRESHOLD)
3133  {
3134  for (j = 0; j < nforks; j++)
3135  FindAndDropRelFileNodeBuffers(rnode.node, forkNum[j],
3136  nForkBlock[j], firstDelBlock[j]);
3137  return;
3138  }
3139 
3140  for (i = 0; i < NBuffers; i++)
3141  {
3142  BufferDesc *bufHdr = GetBufferDescriptor(i);
3143  uint32 buf_state;
3144 
3145  /*
3146  * We can make this a tad faster by prechecking the buffer tag before
3147  * we attempt to lock the buffer; this saves a lot of lock
3148  * acquisitions in typical cases. It should be safe because the
3149  * caller must have AccessExclusiveLock on the relation, or some other
3150  * reason to be certain that no one is loading new pages of the rel
3151  * into the buffer pool. (Otherwise we might well miss such pages
3152  * entirely.) Therefore, while the tag might be changing while we
3153  * look at it, it can't be changing *to* a value we care about, only
3154  * *away* from such a value. So false negatives are impossible, and
3155  * false positives are safe because we'll recheck after getting the
3156  * buffer lock.
3157  *
3158  * We could check forkNum and blockNum as well as the rnode, but the
3159  * incremental win from doing so seems small.
3160  */
3161  if (!RelFileNodeEquals(bufHdr->tag.rnode, rnode.node))
3162  continue;
3163 
3164  buf_state = LockBufHdr(bufHdr);
3165 
3166  for (j = 0; j < nforks; j++)
3167  {
3168  if (RelFileNodeEquals(bufHdr->tag.rnode, rnode.node) &&
3169  bufHdr->tag.forkNum == forkNum[j] &&
3170  bufHdr->tag.blockNum >= firstDelBlock[j])
3171  {
3172  InvalidateBuffer(bufHdr); /* releases spinlock */
3173  break;
3174  }
3175  }
3176  if (j >= nforks)
3177  UnlockBufHdr(bufHdr, buf_state);
3178  }
3179 }
3180 
3181 /* ---------------------------------------------------------------------
3182  * DropRelFileNodesAllBuffers
3183  *
3184  * This function removes from the buffer pool all the pages of all
3185  * forks of the specified relations. It's equivalent to calling
3186  * DropRelFileNodeBuffers once per fork per relation with
3187  * firstDelBlock = 0.
3188  * --------------------------------------------------------------------
3189  */
3190 void
3192 {
3193  int i;
3194  int j;
3195  int n = 0;
3196  SMgrRelation *rels;
3197  BlockNumber (*block)[MAX_FORKNUM + 1];
3198  uint64 nBlocksToInvalidate = 0;
3199  RelFileNode *nodes;
3200  bool cached = true;
3201  bool use_bsearch;
3202 
3203  if (nnodes == 0)
3204  return;
3205 
3206  rels = palloc(sizeof(SMgrRelation) * nnodes); /* non-local relations */
3207 
3208  /* If it's a local relation, it's localbuf.c's problem. */
3209  for (i = 0; i < nnodes; i++)
3210  {
3211  if (RelFileNodeBackendIsTemp(smgr_reln[i]->smgr_rnode))
3212  {
3213  if (smgr_reln[i]->smgr_rnode.backend == MyBackendId)
3214  DropRelFileNodeAllLocalBuffers(smgr_reln[i]->smgr_rnode.node);
3215  }
3216  else
3217  rels[n++] = smgr_reln[i];
3218  }
3219 
3220  /*
3221  * If there are no non-local relations, then we're done. Release the
3222  * memory and return.
3223  */
3224  if (n == 0)
3225  {
3226  pfree(rels);
3227  return;
3228  }
3229 
3230  /*
3231  * This is used to remember the number of blocks for all the relations
3232  * forks.
3233  */
3234  block = (BlockNumber (*)[MAX_FORKNUM + 1])
3235  palloc(sizeof(BlockNumber) * n * (MAX_FORKNUM + 1));
3236 
3237  /*
3238  * We can avoid scanning the entire buffer pool if we know the exact size
3239  * of each of the given relation forks. See DropRelFileNodeBuffers.
3240  */
3241  for (i = 0; i < n && cached; i++)
3242  {
3243  for (j = 0; j <= MAX_FORKNUM; j++)
3244  {
3245  /* Get the number of blocks for a relation's fork. */
3246  block[i][j] = smgrnblocks_cached(rels[i], j);
3247 
3248  /* We need to only consider the relation forks that exists. */
3249  if (block[i][j] == InvalidBlockNumber)
3250  {
3251  if (!smgrexists(rels[i], j))
3252  continue;
3253  cached = false;
3254  break;
3255  }
3256 
3257  /* calculate the total number of blocks to be invalidated */
3258  nBlocksToInvalidate += block[i][j];
3259  }
3260  }
3261 
3262  /*
3263  * We apply the optimization iff the total number of blocks to invalidate
3264  * is below the BUF_DROP_FULL_SCAN_THRESHOLD.
3265  */
3266  if (cached && nBlocksToInvalidate < BUF_DROP_FULL_SCAN_THRESHOLD)
3267  {
3268  for (i = 0; i < n; i++)
3269  {
3270  for (j = 0; j <= MAX_FORKNUM; j++)
3271  {
3272  /* ignore relation forks that doesn't exist */
3273  if (!BlockNumberIsValid(block[i][j]))
3274  continue;
3275 
3276  /* drop all the buffers for a particular relation fork */
3277  FindAndDropRelFileNodeBuffers(rels[i]->smgr_rnode.node,
3278  j, block[i][j], 0);
3279  }
3280  }
3281 
3282  pfree(block);
3283  pfree(rels);
3284  return;
3285  }
3286 
3287  pfree(block);
3288  nodes = palloc(sizeof(RelFileNode) * n); /* non-local relations */
3289  for (i = 0; i < n; i++)
3290  nodes[i] = rels[i]->smgr_rnode.node;
3291 
3292  /*
3293  * For low number of relations to drop just use a simple walk through, to
3294  * save the bsearch overhead. The threshold to use is rather a guess than
3295  * an exactly determined value, as it depends on many factors (CPU and RAM
3296  * speeds, amount of shared buffers etc.).
3297  */
3298  use_bsearch = n > RELS_BSEARCH_THRESHOLD;
3299 
3300  /* sort the list of rnodes if necessary */
3301  if (use_bsearch)
3302  pg_qsort(nodes, n, sizeof(RelFileNode), rnode_comparator);
3303 
3304  for (i = 0; i < NBuffers; i++)
3305  {
3306  RelFileNode *rnode = NULL;
3307  BufferDesc *bufHdr = GetBufferDescriptor(i);
3308  uint32 buf_state;
3309 
3310  /*
3311  * As in DropRelFileNodeBuffers, an unlocked precheck should be safe
3312  * and saves some cycles.
3313  */
3314 
3315  if (!use_bsearch)
3316  {
3317  int j;
3318 
3319  for (j = 0; j < n; j++)
3320  {
3321  if (RelFileNodeEquals(bufHdr->tag.rnode, nodes[j]))
3322  {
3323  rnode = &nodes[j];
3324  break;
3325  }
3326  }
3327  }
3328  else
3329  {
3330  rnode = bsearch((const void *) &(bufHdr->tag.rnode),
3331  nodes, n, sizeof(RelFileNode),
3333  }
3334 
3335  /* buffer doesn't belong to any of the given relfilenodes; skip it */
3336  if (rnode == NULL)
3337  continue;
3338 
3339  buf_state = LockBufHdr(bufHdr);
3340  if (RelFileNodeEquals(bufHdr->tag.rnode, (*rnode)))
3341  InvalidateBuffer(bufHdr); /* releases spinlock */
3342  else
3343  UnlockBufHdr(bufHdr, buf_state);
3344  }
3345 
3346  pfree(nodes);
3347  pfree(rels);
3348 }
3349 
3350 /* ---------------------------------------------------------------------
3351  * FindAndDropRelFileNodeBuffers
3352  *
3353  * This function performs look up in BufMapping table and removes from the
3354  * buffer pool all the pages of the specified relation fork that has block
3355  * number >= firstDelBlock. (In particular, with firstDelBlock = 0, all
3356  * pages are removed.)
3357  * --------------------------------------------------------------------
3358  */
3359 static void
3361  BlockNumber nForkBlock,
3362  BlockNumber firstDelBlock)
3363 {
3364  BlockNumber curBlock;
3365 
3366  for (curBlock = firstDelBlock; curBlock < nForkBlock; curBlock++)
3367  {
3368  uint32 bufHash; /* hash value for tag */
3369  BufferTag bufTag; /* identity of requested block */
3370  LWLock *bufPartitionLock; /* buffer partition lock for it */
3371  int buf_id;
3372  BufferDesc *bufHdr;
3373  uint32 buf_state;
3374 
3375  /* create a tag so we can lookup the buffer */
3376  INIT_BUFFERTAG(bufTag, rnode, forkNum, curBlock);
3377 
3378  /* determine its hash code and partition lock ID */
3379  bufHash = BufTableHashCode(&bufTag);
3380  bufPartitionLock = BufMappingPartitionLock(bufHash);
3381 
3382  /* Check that it is in the buffer pool. If not, do nothing. */
3383  LWLockAcquire(bufPartitionLock, LW_SHARED);
3384  buf_id = BufTableLookup(&bufTag, bufHash);
3385  LWLockRelease(bufPartitionLock);
3386 
3387  if (buf_id < 0)
3388  continue;
3389 
3390  bufHdr = GetBufferDescriptor(buf_id);
3391 
3392  /*
3393  * We need to lock the buffer header and recheck if the buffer is
3394  * still associated with the same block because the buffer could be
3395  * evicted by some other backend loading blocks for a different
3396  * relation after we release lock on the BufMapping table.
3397  */
3398  buf_state = LockBufHdr(bufHdr);
3399 
3400  if (RelFileNodeEquals(bufHdr->tag.rnode, rnode) &&
3401  bufHdr->tag.forkNum == forkNum &&
3402  bufHdr->tag.blockNum >= firstDelBlock)
3403  InvalidateBuffer(bufHdr); /* releases spinlock */
3404  else
3405  UnlockBufHdr(bufHdr, buf_state);
3406  }
3407 }
3408 
3409 /* ---------------------------------------------------------------------
3410  * DropDatabaseBuffers
3411  *
3412  * This function removes all the buffers in the buffer cache for a
3413  * particular database. Dirty pages are simply dropped, without
3414  * bothering to write them out first. This is used when we destroy a
3415  * database, to avoid trying to flush data to disk when the directory
3416  * tree no longer exists. Implementation is pretty similar to
3417  * DropRelFileNodeBuffers() which is for destroying just one relation.
3418  * --------------------------------------------------------------------
3419  */
3420 void
3422 {
3423  int i;
3424 
3425  /*
3426  * We needn't consider local buffers, since by assumption the target
3427  * database isn't our own.
3428  */
3429 
3430  for (i = 0; i < NBuffers; i++)
3431  {
3432  BufferDesc *bufHdr = GetBufferDescriptor(i);
3433  uint32 buf_state;
3434 
3435  /*
3436  * As in DropRelFileNodeBuffers, an unlocked precheck should be safe
3437  * and saves some cycles.
3438  */
3439  if (bufHdr->tag.rnode.dbNode != dbid)
3440  continue;
3441 
3442  buf_state = LockBufHdr(bufHdr);
3443  if (bufHdr->tag.rnode.dbNode == dbid)
3444  InvalidateBuffer(bufHdr); /* releases spinlock */
3445  else
3446  UnlockBufHdr(bufHdr, buf_state);
3447  }
3448 }
3449 
3450 /* -----------------------------------------------------------------
3451  * PrintBufferDescs
3452  *
3453  * this function prints all the buffer descriptors, for debugging
3454  * use only.
3455  * -----------------------------------------------------------------
3456  */
3457 #ifdef NOT_USED
3458 void
3459 PrintBufferDescs(void)
3460 {
3461  int i;
3462 
3463  for (i = 0; i < NBuffers; ++i)
3464  {
3467 
3468  /* theoretically we should lock the bufhdr here */
3469  elog(LOG,
3470  "[%02d] (freeNext=%d, rel=%s, "
3471  "blockNum=%u, flags=0x%x, refcount=%u %d)",
3472  i, buf->freeNext,
3474  buf->tag.blockNum, buf->flags,
3475  buf->refcount, GetPrivateRefCount(b));
3476  }
3477 }
3478 #endif
3479 
3480 #ifdef NOT_USED
3481 void
3482 PrintPinnedBufs(void)
3483 {
3484  int i;
3485 
3486  for (i = 0; i < NBuffers; ++i)
3487  {
3490 
3491  if (GetPrivateRefCount(b) > 0)
3492  {
3493  /* theoretically we should lock the bufhdr here */
3494  elog(LOG,
3495  "[%02d] (freeNext=%d, rel=%s, "
3496  "blockNum=%u, flags=0x%x, refcount=%u %d)",
3497  i, buf->freeNext,
3498  relpathperm(buf->tag.rnode, buf->tag.forkNum),
3499  buf->tag.blockNum, buf->flags,
3500  buf->refcount, GetPrivateRefCount(b));
3501  }
3502  }
3503 }
3504 #endif
3505 
3506 /* ---------------------------------------------------------------------
3507  * FlushRelationBuffers
3508  *
3509  * This function writes all dirty pages of a relation out to disk
3510  * (or more accurately, out to kernel disk buffers), ensuring that the
3511  * kernel has an up-to-date view of the relation.
3512  *
3513  * Generally, the caller should be holding AccessExclusiveLock on the
3514  * target relation to ensure that no other backend is busy dirtying
3515  * more blocks of the relation; the effects can't be expected to last
3516  * after the lock is released.
3517  *
3518  * XXX currently it sequentially searches the buffer pool, should be
3519  * changed to more clever ways of searching. This routine is not
3520  * used in any performance-critical code paths, so it's not worth
3521  * adding additional overhead to normal paths to make it go faster.
3522  * --------------------------------------------------------------------
3523  */
3524 void
3526 {
3527  int i;
3528  BufferDesc *bufHdr;
3529 
3530  /* Open rel at the smgr level if not already done */
3531  RelationOpenSmgr(rel);
3532 
3533  if (RelationUsesLocalBuffers(rel))
3534  {
3535  for (i = 0; i < NLocBuffer; i++)
3536  {
3537  uint32 buf_state;
3538 
3539  bufHdr = GetLocalBufferDescriptor(i);
3540  if (RelFileNodeEquals(bufHdr->tag.rnode, rel->rd_node) &&
3541  ((buf_state = pg_atomic_read_u32(&bufHdr->state)) &
3542  (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
3543  {
3544  ErrorContextCallback errcallback;
3545  Page localpage;
3546 
3547  localpage = (char *) LocalBufHdrGetBlock(bufHdr);
3548 
3549  /* Setup error traceback support for ereport() */
3551  errcallback.arg = (void *) bufHdr;
3552  errcallback.previous = error_context_stack;
3553  error_context_stack = &errcallback;
3554 
3555  PageSetChecksumInplace(localpage, bufHdr->tag.blockNum);
3556 
3557  smgrwrite(rel->rd_smgr,
3558  bufHdr->tag.forkNum,
3559  bufHdr->tag.blockNum,
3560  localpage,
3561  false);
3562 
3563  buf_state &= ~(BM_DIRTY | BM_JUST_DIRTIED);
3564  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
3565 
3566  /* Pop the error context stack */
3567  error_context_stack = errcallback.previous;
3568  }
3569  }
3570 
3571  return;
3572  }
3573 
3574  /* Make sure we can handle the pin inside the loop */
3576 
3577  for (i = 0; i < NBuffers; i++)
3578  {
3579  uint32 buf_state;
3580 
3581  bufHdr = GetBufferDescriptor(i);
3582 
3583  /*
3584  * As in DropRelFileNodeBuffers, an unlocked precheck should be safe
3585  * and saves some cycles.
3586  */
3587  if (!RelFileNodeEquals(bufHdr->tag.rnode, rel->rd_node))
3588  continue;
3589 
3591 
3592  buf_state = LockBufHdr(bufHdr);
3593  if (RelFileNodeEquals(bufHdr->tag.rnode, rel->rd_node) &&
3594  (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
3595  {
3596  PinBuffer_Locked(bufHdr);
3598  FlushBuffer(bufHdr, rel->rd_smgr);
3600  UnpinBuffer(bufHdr, true);
3601  }
3602  else
3603  UnlockBufHdr(bufHdr, buf_state);
3604  }
3605 }
3606 
3607 /* ---------------------------------------------------------------------
3608  * FlushRelationsAllBuffers
3609  *
3610  * This function flushes out of the buffer pool all the pages of all
3611  * forks of the specified smgr relations. It's equivalent to calling
3612  * FlushRelationBuffers once per fork per relation. The relations are
3613  * assumed not to use local buffers.
3614  * --------------------------------------------------------------------
3615  */
3616 void
3618 {
3619  int i;
3620  SMgrSortArray *srels;
3621  bool use_bsearch;
3622 
3623  if (nrels == 0)
3624  return;
3625 
3626  /* fill-in array for qsort */
3627  srels = palloc(sizeof(SMgrSortArray) * nrels);
3628 
3629  for (i = 0; i < nrels; i++)
3630  {
3631  Assert(!RelFileNodeBackendIsTemp(smgrs[i]->smgr_rnode));
3632 
3633  srels[i].rnode = smgrs[i]->smgr_rnode.node;
3634  srels[i].srel = smgrs[i];
3635  }
3636 
3637  /*
3638  * Save the bsearch overhead for low number of relations to sync. See
3639  * DropRelFileNodesAllBuffers for details.
3640  */
3641  use_bsearch = nrels > RELS_BSEARCH_THRESHOLD;
3642 
3643  /* sort the list of SMgrRelations if necessary */
3644  if (use_bsearch)
3645  pg_qsort(srels, nrels, sizeof(SMgrSortArray), rnode_comparator);
3646 
3647  /* Make sure we can handle the pin inside the loop */
3649 
3650  for (i = 0; i < NBuffers; i++)
3651  {
3652  SMgrSortArray *srelent = NULL;
3653  BufferDesc *bufHdr = GetBufferDescriptor(i);
3654  uint32 buf_state;
3655 
3656  /*
3657  * As in DropRelFileNodeBuffers, an unlocked precheck should be safe
3658  * and saves some cycles.
3659  */
3660 
3661  if (!use_bsearch)
3662  {
3663  int j;
3664 
3665  for (j = 0; j < nrels; j++)
3666  {
3667  if (RelFileNodeEquals(bufHdr->tag.rnode, srels[j].rnode))
3668  {
3669  srelent = &srels[j];
3670  break;
3671  }
3672  }
3673 
3674  }
3675  else
3676  {
3677  srelent = bsearch((const void *) &(bufHdr->tag.rnode),
3678  srels, nrels, sizeof(SMgrSortArray),
3680  }
3681 
3682  /* buffer doesn't belong to any of the given relfilenodes; skip it */
3683  if (srelent == NULL)
3684  continue;
3685 
3687 
3688  buf_state = LockBufHdr(bufHdr);
3689  if (RelFileNodeEquals(bufHdr->tag.rnode, srelent->rnode) &&
3690  (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
3691  {
3692  PinBuffer_Locked(bufHdr);
3694  FlushBuffer(bufHdr, srelent->srel);
3696  UnpinBuffer(bufHdr, true);
3697  }
3698  else
3699  UnlockBufHdr(bufHdr, buf_state);
3700  }
3701 
3702  pfree(srels);
3703 }
3704 
3705 /* ---------------------------------------------------------------------
3706  * FlushDatabaseBuffers
3707  *
3708  * This function writes all dirty pages of a database out to disk
3709  * (or more accurately, out to kernel disk buffers), ensuring that the
3710  * kernel has an up-to-date view of the database.
3711  *
3712  * Generally, the caller should be holding an appropriate lock to ensure
3713  * no other backend is active in the target database; otherwise more
3714  * pages could get dirtied.
3715  *
3716  * Note we don't worry about flushing any pages of temporary relations.
3717  * It's assumed these wouldn't be interesting.
3718  * --------------------------------------------------------------------
3719  */
3720 void
3722 {
3723  int i;
3724  BufferDesc *bufHdr;
3725 
3726  /* Make sure we can handle the pin inside the loop */
3728 
3729  for (i = 0; i < NBuffers; i++)
3730  {
3731  uint32 buf_state;
3732 
3733  bufHdr = GetBufferDescriptor(i);
3734 
3735  /*
3736  * As in DropRelFileNodeBuffers, an unlocked precheck should be safe
3737  * and saves some cycles.
3738  */
3739  if (bufHdr->tag.rnode.dbNode != dbid)
3740  continue;
3741 
3743 
3744  buf_state = LockBufHdr(bufHdr);
3745  if (bufHdr->tag.rnode.dbNode == dbid &&
3746  (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
3747  {
3748  PinBuffer_Locked(bufHdr);
3750  FlushBuffer(bufHdr, NULL);
3752  UnpinBuffer(bufHdr, true);
3753  }
3754  else
3755  UnlockBufHdr(bufHdr, buf_state);
3756  }
3757 }
3758 
3759 /*
3760  * Flush a previously, shared or exclusively, locked and pinned buffer to the
3761  * OS.
3762  */
3763 void
3765 {
3766  BufferDesc *bufHdr;
3767 
3768  /* currently not needed, but no fundamental reason not to support */
3769  Assert(!BufferIsLocal(buffer));
3770 
3771  Assert(BufferIsPinned(buffer));
3772 
3773  bufHdr = GetBufferDescriptor(buffer - 1);
3774 
3776 
3777  FlushBuffer(bufHdr, NULL);
3778 }
3779 
3780 /*
3781  * ReleaseBuffer -- release the pin on a buffer
3782  */
3783 void
3785 {
3786  if (!BufferIsValid(buffer))
3787  elog(ERROR, "bad buffer ID: %d", buffer);
3788 
3789  if (BufferIsLocal(buffer))
3790  {
3792 
3793  Assert(LocalRefCount[-buffer - 1] > 0);
3794  LocalRefCount[-buffer - 1]--;
3795  return;
3796  }
3797 
3798  UnpinBuffer(GetBufferDescriptor(buffer - 1), true);
3799 }
3800 
3801 /*
3802  * UnlockReleaseBuffer -- release the content lock and pin on a buffer
3803  *
3804  * This is just a shorthand for a common combination.
3805  */
3806 void
3808 {
3809  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3810  ReleaseBuffer(buffer);
3811 }
3812 
3813 /*
3814  * IncrBufferRefCount
3815  * Increment the pin count on a buffer that we have *already* pinned
3816  * at least once.
3817  *
3818  * This function cannot be used on a buffer we do not have pinned,
3819  * because it doesn't change the shared buffer state.
3820  */
3821 void
3823 {
3824  Assert(BufferIsPinned(buffer));
3826  if (BufferIsLocal(buffer))
3827  LocalRefCount[-buffer - 1]++;
3828  else
3829  {
3830  PrivateRefCountEntry *ref;
3831 
3832  ref = GetPrivateRefCountEntry(buffer, true);
3833  Assert(ref != NULL);
3834  ref->refcount++;
3835  }
3837 }
3838 
3839 /*
3840  * MarkBufferDirtyHint
3841  *
3842  * Mark a buffer dirty for non-critical changes.
3843  *
3844  * This is essentially the same as MarkBufferDirty, except:
3845  *
3846  * 1. The caller does not write WAL; so if checksums are enabled, we may need
3847  * to write an XLOG_FPI_FOR_HINT WAL record to protect against torn pages.
3848  * 2. The caller might have only share-lock instead of exclusive-lock on the
3849  * buffer's content lock.
3850  * 3. This function does not guarantee that the buffer is always marked dirty
3851  * (due to a race condition), so it cannot be used for important changes.
3852  */
3853 void
3855 {
3856  BufferDesc *bufHdr;
3857  Page page = BufferGetPage(buffer);
3858 
3859  if (!BufferIsValid(buffer))
3860  elog(ERROR, "bad buffer ID: %d", buffer);
3861 
3862  if (BufferIsLocal(buffer))
3863  {
3864  MarkLocalBufferDirty(buffer);
3865  return;
3866  }
3867 
3868  bufHdr = GetBufferDescriptor(buffer - 1);
3869 
3870  Assert(GetPrivateRefCount(buffer) > 0);
3871  /* here, either share or exclusive lock is OK */
3873 
3874  /*
3875  * This routine might get called many times on the same page, if we are
3876  * making the first scan after commit of an xact that added/deleted many
3877  * tuples. So, be as quick as we can if the buffer is already dirty. We
3878  * do this by not acquiring spinlock if it looks like the status bits are
3879  * already set. Since we make this test unlocked, there's a chance we
3880  * might fail to notice that the flags have just been cleared, and failed
3881  * to reset them, due to memory-ordering issues. But since this function
3882  * is only intended to be used in cases where failing to write out the
3883  * data would be harmless anyway, it doesn't really matter.
3884  */
3885  if ((pg_atomic_read_u32(&bufHdr->state) & (BM_DIRTY | BM_JUST_DIRTIED)) !=
3887  {
3889  bool dirtied = false;
3890  bool delayChkpt = false;
3891  uint32 buf_state;
3892 
3893  /*
3894  * If we need to protect hint bit updates from torn writes, WAL-log a
3895  * full page image of the page. This full page image is only necessary
3896  * if the hint bit update is the first change to the page since the
3897  * last checkpoint.
3898  *
3899  * We don't check full_page_writes here because that logic is included
3900  * when we call XLogInsert() since the value changes dynamically.
3901  */
3902  if (XLogHintBitIsNeeded() &&
3903  (pg_atomic_read_u32(&bufHdr->state) & BM_PERMANENT))
3904  {
3905  /*
3906  * If we must not write WAL, due to a relfilenode-specific
3907  * condition or being in recovery, don't dirty the page. We can
3908  * set the hint, just not dirty the page as a result so the hint
3909  * is lost when we evict the page or shutdown.
3910  *
3911  * See src/backend/storage/page/README for longer discussion.
3912  */
3913  if (RecoveryInProgress() ||
3914  RelFileNodeSkippingWAL(bufHdr->tag.rnode))
3915  return;
3916 
3917  /*
3918  * If the block is already dirty because we either made a change
3919  * or set a hint already, then we don't need to write a full page
3920  * image. Note that aggressive cleaning of blocks dirtied by hint
3921  * bit setting would increase the call rate. Bulk setting of hint
3922  * bits would reduce the call rate...
3923  *
3924  * We must issue the WAL record before we mark the buffer dirty.
3925  * Otherwise we might write the page before we write the WAL. That
3926  * causes a race condition, since a checkpoint might occur between
3927  * writing the WAL record and marking the buffer dirty. We solve
3928  * that with a kluge, but one that is already in use during
3929  * transaction commit to prevent race conditions. Basically, we
3930  * simply prevent the checkpoint WAL record from being written
3931  * until we have marked the buffer dirty. We don't start the
3932  * checkpoint flush until we have marked dirty, so our checkpoint
3933  * must flush the change to disk successfully or the checkpoint
3934  * never gets written, so crash recovery will fix.
3935  *
3936  * It's possible we may enter here without an xid, so it is
3937  * essential that CreateCheckpoint waits for virtual transactions
3938  * rather than full transactionids.
3939  */
3940  MyProc->delayChkpt = delayChkpt = true;
3941  lsn = XLogSaveBufferForHint(buffer, buffer_std);
3942  }
3943 
3944  buf_state = LockBufHdr(bufHdr);
3945 
3946  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
3947 
3948  if (!(buf_state & BM_DIRTY))
3949  {
3950  dirtied = true; /* Means "will be dirtied by this action" */
3951 
3952  /*
3953  * Set the page LSN if we wrote a backup block. We aren't supposed
3954  * to set this when only holding a share lock but as long as we
3955  * serialise it somehow we're OK. We choose to set LSN while
3956  * holding the buffer header lock, which causes any reader of an
3957  * LSN who holds only a share lock to also obtain a buffer header
3958  * lock before using PageGetLSN(), which is enforced in
3959  * BufferGetLSNAtomic().
3960  *
3961  * If checksums are enabled, you might think we should reset the
3962  * checksum here. That will happen when the page is written
3963  * sometime later in this checkpoint cycle.
3964  */
3965  if (!XLogRecPtrIsInvalid(lsn))
3966  PageSetLSN(page, lsn);
3967  }
3968 
3969  buf_state |= BM_DIRTY | BM_JUST_DIRTIED;
3970  UnlockBufHdr(bufHdr, buf_state);
3971 
3972  if (delayChkpt)
3973  MyProc->delayChkpt = false;
3974 
3975  if (dirtied)
3976  {
3977  VacuumPageDirty++;
3979  if (VacuumCostActive)
3981  }
3982  }
3983 }
3984 
3985 /*
3986  * Release buffer content locks for shared buffers.
3987  *
3988  * Used to clean up after errors.
3989  *
3990  * Currently, we can expect that lwlock.c's LWLockReleaseAll() took care
3991  * of releasing buffer content locks per se; the only thing we need to deal
3992  * with here is clearing any PIN_COUNT request that was in progress.
3993  */
3994 void
3996 {
3998 
3999  if (buf)
4000  {
4001  uint32 buf_state;
4002 
4003  buf_state = LockBufHdr(buf);
4004 
4005  /*
4006  * Don't complain if flag bit not set; it could have been reset but we
4007  * got a cancel/die interrupt before getting the signal.
4008  */
4009  if ((buf_state & BM_PIN_COUNT_WAITER) != 0 &&
4010  buf->wait_backend_pid == MyProcPid)
4011  buf_state &= ~BM_PIN_COUNT_WAITER;
4012 
4013  UnlockBufHdr(buf, buf_state);
4014 
4015  PinCountWaitBuf = NULL;
4016  }
4017 }
4018 
4019 /*
4020  * Acquire or release the content_lock for the buffer.
4021  */
4022 void
4024 {
4025  BufferDesc *buf;
4026 
4027  Assert(BufferIsPinned(buffer));
4028  if (BufferIsLocal(buffer))
4029  return; /* local buffers need no lock */
4030 
4031  buf = GetBufferDescriptor(buffer - 1);
4032 
4033  if (mode == BUFFER_LOCK_UNLOCK)
4035  else if (mode == BUFFER_LOCK_SHARE)
4037  else if (mode == BUFFER_LOCK_EXCLUSIVE)
4039  else
4040  elog(ERROR, "unrecognized buffer lock mode: %d", mode);
4041 }
4042 
4043 /*
4044  * Acquire the content_lock for the buffer, but only if we don't have to wait.
4045  *
4046  * This assumes the caller wants BUFFER_LOCK_EXCLUSIVE mode.
4047  */
4048 bool
4050 {
4051  BufferDesc *buf;
4052 
4053  Assert(BufferIsPinned(buffer));
4054  if (BufferIsLocal(buffer))
4055  return true; /* act as though we got it */
4056 
4057  buf = GetBufferDescriptor(buffer - 1);
4058 
4060  LW_EXCLUSIVE);
4061 }
4062 
4063 /*
4064  * LockBufferForCleanup - lock a buffer in preparation for deleting items
4065  *
4066  * Items may be deleted from a disk page only when the caller (a) holds an
4067  * exclusive lock on the buffer and (b) has observed that no other backend
4068  * holds a pin on the buffer. If there is a pin, then the other backend
4069  * might have a pointer into the buffer (for example, a heapscan reference
4070  * to an item --- see README for more details). It's OK if a pin is added
4071  * after the cleanup starts, however; the newly-arrived backend will be
4072  * unable to look at the page until we release the exclusive lock.
4073  *
4074  * To implement this protocol, a would-be deleter must pin the buffer and
4075  * then call LockBufferForCleanup(). LockBufferForCleanup() is similar to
4076  * LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE), except that it loops until
4077  * it has successfully observed pin count = 1.
4078  */
4079 void
4081 {
4082  BufferDesc *bufHdr;
4083  char *new_status = NULL;
4084  TimestampTz waitStart = 0;
4085  bool logged_recovery_conflict = false;
4086 
4087  Assert(BufferIsPinned(buffer));
4088  Assert(PinCountWaitBuf == NULL);
4089 
4090  if (BufferIsLocal(buffer))
4091  {
4092  /* There should be exactly one pin */
4093  if (LocalRefCount[-buffer - 1] != 1)
4094  elog(ERROR, "incorrect local pin count: %d",
4095  LocalRefCount[-buffer - 1]);
4096  /* Nobody else to wait for */
4097  return;
4098  }
4099 
4100  /* There should be exactly one local pin */
4101  if (GetPrivateRefCount(buffer) != 1)
4102  elog(ERROR, "incorrect local pin count: %d",
4103  GetPrivateRefCount(buffer));
4104 
4105  bufHdr = GetBufferDescriptor(buffer - 1);
4106 
4107  for (;;)
4108  {
4109  uint32 buf_state;
4110 
4111  /* Try to acquire lock */
4113  buf_state = LockBufHdr(bufHdr);
4114 
4115  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
4116  if (BUF_STATE_GET_REFCOUNT(buf_state) == 1)
4117  {
4118  /* Successfully acquired exclusive lock with pincount 1 */
4119  UnlockBufHdr(bufHdr, buf_state);
4120 
4121  /*
4122  * Emit the log message if recovery conflict on buffer pin was
4123  * resolved but the startup process waited longer than
4124  * deadlock_timeout for it.
4125  */
4126  if (logged_recovery_conflict)
4128  waitStart, GetCurrentTimestamp(),
4129  NULL, false);
4130 
4131  /* Report change to non-waiting status */
4132  if (new_status)
4133  {
4134  set_ps_display(new_status);
4135  pfree(new_status);
4136  }
4137  return;
4138  }
4139  /* Failed, so mark myself as waiting for pincount 1 */
4140  if (buf_state & BM_PIN_COUNT_WAITER)
4141  {
4142  UnlockBufHdr(bufHdr, buf_state);
4143  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
4144  elog(ERROR, "multiple backends attempting to wait for pincount 1");
4145  }
4146  bufHdr->wait_backend_pid = MyProcPid;
4147  PinCountWaitBuf = bufHdr;
4148  buf_state |= BM_PIN_COUNT_WAITER;
4149  UnlockBufHdr(bufHdr, buf_state);
4150  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
4151 
4152  /* Wait to be signaled by UnpinBuffer() */
4153  if (InHotStandby)
4154  {
4155  /* Report change to waiting status */
4156  if (update_process_title && new_status == NULL)
4157  {
4158  const char *old_status;
4159  int len;
4160 
4161  old_status = get_ps_display(&len);
4162  new_status = (char *) palloc(len + 8 + 1);
4163  memcpy(new_status, old_status, len);
4164  strcpy(new_status + len, " waiting");
4165  set_ps_display(new_status);
4166  new_status[len] = '\0'; /* truncate off " waiting" */
4167  }
4168 
4169  /*
4170  * Emit the log message if the startup process is waiting longer
4171  * than deadlock_timeout for recovery conflict on buffer pin.
4172  *
4173  * Skip this if first time through because the startup process has
4174  * not started waiting yet in this case. So, the wait start
4175  * timestamp is set after this logic.
4176  */
4177  if (waitStart != 0 && !logged_recovery_conflict)
4178  {
4180 
4181  if (TimestampDifferenceExceeds(waitStart, now,
4182  DeadlockTimeout))
4183  {
4185  waitStart, now, NULL, true);
4186  logged_recovery_conflict = true;
4187  }
4188  }
4189 
4190  /*
4191  * Set the wait start timestamp if logging is enabled and first
4192  * time through.
4193  */
4194  if (log_recovery_conflict_waits && waitStart == 0)
4195  waitStart = GetCurrentTimestamp();
4196 
4197  /* Publish the bufid that Startup process waits on */
4198  SetStartupBufferPinWaitBufId(buffer - 1);
4199  /* Set alarm and then wait to be signaled by UnpinBuffer() */
4201  /* Reset the published bufid */
4203  }
4204  else
4206 
4207  /*
4208  * Remove flag marking us as waiter. Normally this will not be set
4209  * anymore, but ProcWaitForSignal() can return for other signals as
4210  * well. We take care to only reset the flag if we're the waiter, as
4211  * theoretically another backend could have started waiting. That's
4212  * impossible with the current usages due to table level locking, but
4213  * better be safe.
4214  */
4215  buf_state = LockBufHdr(bufHdr);
4216  if ((buf_state & BM_PIN_COUNT_WAITER) != 0 &&
4217  bufHdr->wait_backend_pid == MyProcPid)
4218  buf_state &= ~BM_PIN_COUNT_WAITER;
4219  UnlockBufHdr(bufHdr, buf_state);
4220 
4221  PinCountWaitBuf = NULL;
4222  /* Loop back and try again */
4223  }
4224 }
4225 
4226 /*
4227  * Check called from RecoveryConflictInterrupt handler when Startup
4228  * process requests cancellation of all pin holders that are blocking it.
4229  */
4230 bool
4232 {
4233  int bufid = GetStartupBufferPinWaitBufId();
4234 
4235  /*
4236  * If we get woken slowly then it's possible that the Startup process was
4237  * already woken by other backends before we got here. Also possible that
4238  * we get here by multiple interrupts or interrupts at inappropriate
4239  * times, so make sure we do nothing if the bufid is not set.
4240  */
4241  if (bufid < 0)
4242  return false;
4243 
4244  if (GetPrivateRefCount(bufid + 1) > 0)
4245  return true;
4246 
4247  return false;
4248 }
4249 
4250 /*
4251  * ConditionalLockBufferForCleanup - as above, but don't wait to get the lock
4252  *
4253  * We won't loop, but just check once to see if the pin count is OK. If
4254  * not, return false with no lock held.
4255  */
4256 bool
4258 {
4259  BufferDesc *bufHdr;
4260  uint32 buf_state,
4261  refcount;
4262 
4263  Assert(BufferIsValid(buffer));
4264 
4265  if (BufferIsLocal(buffer))
4266  {
4267  refcount = LocalRefCount[-buffer - 1];
4268  /* There should be exactly one pin */
4269  Assert(refcount > 0);
4270  if (refcount != 1)
4271  return false;
4272  /* Nobody else to wait for */
4273  return true;
4274  }
4275 
4276  /* There should be exactly one local pin */
4277  refcount = GetPrivateRefCount(buffer);
4278  Assert(refcount);
4279  if (refcount != 1)
4280  return false;
4281 
4282  /* Try to acquire lock */
4283  if (!ConditionalLockBuffer(buffer))
4284  return false;
4285 
4286  bufHdr = GetBufferDescriptor(buffer - 1);
4287  buf_state = LockBufHdr(bufHdr);
4288  refcount = BUF_STATE_GET_REFCOUNT(buf_state);
4289 
4290  Assert(refcount > 0);
4291  if (refcount == 1)
4292  {
4293  /* Successfully acquired exclusive lock with pincount 1 */
4294  UnlockBufHdr(bufHdr, buf_state);
4295  return true;
4296  }
4297 
4298  /* Failed, so release the lock */
4299  UnlockBufHdr(bufHdr, buf_state);
4300  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
4301  return false;
4302 }
4303 
4304 /*
4305  * IsBufferCleanupOK - as above, but we already have the lock
4306  *
4307  * Check whether it's OK to perform cleanup on a buffer we've already
4308  * locked. If we observe that the pin count is 1, our exclusive lock
4309  * happens to be a cleanup lock, and we can proceed with anything that
4310  * would have been allowable had we sought a cleanup lock originally.
4311  */
4312 bool
4314 {
4315  BufferDesc *bufHdr;
4316  uint32 buf_state;
4317 
4318  Assert(BufferIsValid(buffer));
4319 
4320  if (BufferIsLocal(buffer))
4321  {
4322  /* There should be exactly one pin */
4323  if (LocalRefCount[-buffer - 1] != 1)
4324  return false;
4325  /* Nobody else to wait for */
4326  return true;
4327  }
4328 
4329  /* There should be exactly one local pin */
4330  if (GetPrivateRefCount(buffer) != 1)
4331  return false;
4332 
4333  bufHdr = GetBufferDescriptor(buffer - 1);
4334 
4335  /* caller must hold exclusive lock on buffer */
4337  LW_EXCLUSIVE));
4338 
4339  buf_state = LockBufHdr(bufHdr);
4340 
4341  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
4342  if (BUF_STATE_GET_REFCOUNT(buf_state) == 1)
4343  {
4344  /* pincount is OK. */
4345  UnlockBufHdr(bufHdr, buf_state);
4346  return true;
4347  }
4348 
4349  UnlockBufHdr(bufHdr, buf_state);
4350  return false;
4351 }
4352 
4353 
4354 /*
4355  * Functions for buffer I/O handling
4356  *
4357  * Note: We assume that nested buffer I/O never occurs.
4358  * i.e at most one BM_IO_IN_PROGRESS bit is set per proc.
4359  *
4360  * Also note that these are used only for shared buffers, not local ones.
4361  */
4362 
4363 /*
4364  * WaitIO -- Block until the IO_IN_PROGRESS flag on 'buf' is cleared.
4365  */
4366 static void
4368 {
4370 
4372  for (;;)
4373  {
4374  uint32 buf_state;
4375 
4376  /*
4377  * It may not be necessary to acquire the spinlock to check the flag
4378  * here, but since this test is essential for correctness, we'd better
4379  * play it safe.
4380  */
4381  buf_state = LockBufHdr(buf);
4382  UnlockBufHdr(buf, buf_state);
4383 
4384  if (!(buf_state & BM_IO_IN_PROGRESS))
4385  break;
4387  }
4389 }
4390 
4391 /*
4392  * StartBufferIO: begin I/O on this buffer
4393  * (Assumptions)
4394  * My process is executing no IO
4395  * The buffer is Pinned
4396  *
4397  * In some scenarios there are race conditions in which multiple backends
4398  * could attempt the same I/O operation concurrently. If someone else
4399  * has already started I/O on this buffer then we will block on the
4400  * I/O condition variable until he's done.
4401  *
4402  * Input operations are only attempted on buffers that are not BM_VALID,
4403  * and output operations only on buffers that are BM_VALID and BM_DIRTY,
4404  * so we can always tell if the work is already done.
4405  *
4406  * Returns true if we successfully marked the buffer as I/O busy,
4407  * false if someone else already did the work.
4408  */
4409 static bool
4410 StartBufferIO(BufferDesc *buf, bool forInput)
4411 {
4412  uint32 buf_state;
4413 
4414  Assert(!InProgressBuf);
4415 
4416  for (;;)
4417  {
4418  buf_state = LockBufHdr(buf);
4419 
4420  if (!(buf_state & BM_IO_IN_PROGRESS))
4421  break;
4422  UnlockBufHdr(buf, buf_state);
4423  WaitIO(buf);
4424  }
4425 
4426  /* Once we get here, there is definitely no I/O active on this buffer */
4427 
4428  if (forInput ? (buf_state & BM_VALID) : !(buf_state & BM_DIRTY))
4429  {
4430  /* someone else already did the I/O */
4431  UnlockBufHdr(buf, buf_state);
4432  return false;
4433  }
4434 
4435  buf_state |= BM_IO_IN_PROGRESS;
4436  UnlockBufHdr(buf, buf_state);
4437 
4438  InProgressBuf = buf;
4439  IsForInput = forInput;
4440 
4441  return true;
4442 }
4443 
4444 /*
4445  * TerminateBufferIO: release a buffer we were doing I/O on
4446  * (Assumptions)
4447  * My process is executing IO for the buffer
4448  * BM_IO_IN_PROGRESS bit is set for the buffer
4449  * The buffer is Pinned
4450  *
4451  * If clear_dirty is true and BM_JUST_DIRTIED is not set, we clear the
4452  * buffer's BM_DIRTY flag. This is appropriate when terminating a
4453  * successful write. The check on BM_JUST_DIRTIED is necessary to avoid
4454  * marking the buffer clean if it was re-dirtied while we were writing.
4455  *
4456  * set_flag_bits gets ORed into the buffer's flags. It must include
4457  * BM_IO_ERROR in a failure case. For successful completion it could
4458  * be 0, or BM_VALID if we just finished reading in the page.
4459  */
4460 static void
4461 TerminateBufferIO(BufferDesc *buf, bool clear_dirty, uint32 set_flag_bits)
4462 {
4463  uint32 buf_state;
4464 
4465  Assert(buf == InProgressBuf);
4466 
4467  buf_state = LockBufHdr(buf);
4468 
4469  Assert(buf_state & BM_IO_IN_PROGRESS);
4470 
4471  buf_state &= ~(BM_IO_IN_PROGRESS | BM_IO_ERROR);
4472  if (clear_dirty && !(buf_state & BM_JUST_DIRTIED))
4473  buf_state &= ~(BM_DIRTY | BM_CHECKPOINT_NEEDED);
4474 
4475  buf_state |= set_flag_bits;
4476  UnlockBufHdr(buf, buf_state);
4477 
4478  InProgressBuf = NULL;
4479 
4481 }
4482 
4483 /*
4484  * AbortBufferIO: Clean up any active buffer I/O after an error.
4485  *
4486  * All LWLocks we might have held have been released,
4487  * but we haven't yet released buffer pins, so the buffer is still pinned.
4488  *
4489  * If I/O was in progress, we always set BM_IO_ERROR, even though it's
4490  * possible the error condition wasn't related to the I/O.
4491  */
4492 void
4494 {
4496 
4497  if (buf)
4498  {
4499  uint32 buf_state;
4500 
4501  buf_state = LockBufHdr(buf);
4502  Assert(buf_state & BM_IO_IN_PROGRESS);
4503  if (IsForInput)
4504  {
4505  Assert(!(buf_state & BM_DIRTY));
4506 
4507  /* We'd better not think buffer is valid yet */
4508  Assert(!(buf_state & BM_VALID));
4509  UnlockBufHdr(buf, buf_state);
4510  }
4511  else
4512  {
4513  Assert(buf_state & BM_DIRTY);
4514  UnlockBufHdr(buf, buf_state);
4515  /* Issue notice if this is not the first failure... */
4516  if (buf_state & BM_IO_ERROR)
4517  {
4518  /* Buffer is pinned, so we can read tag without spinlock */
4519  char *path;
4520 
4521  path = relpathperm(buf->tag.rnode, buf->tag.forkNum);
4522  ereport(WARNING,
4523  (errcode(ERRCODE_IO_ERROR),
4524  errmsg("could not write block %u of %s",
4525  buf->tag.blockNum, path),
4526  errdetail("Multiple failures --- write error might be permanent.")));
4527  pfree(path);
4528  }
4529  }
4530  TerminateBufferIO(buf, false, BM_IO_ERROR);
4531  }
4532 }
4533 
4534 /*
4535  * Error context callback for errors occurring during shared buffer writes.
4536  */
4537 static void
4539 {
4540  BufferDesc *bufHdr = (BufferDesc *) arg;
4541 
4542  /* Buffer is pinned, so we can read the tag without locking the spinlock */
4543  if (bufHdr != NULL)
4544  {
4545  char *path = relpathperm(bufHdr->tag.rnode, bufHdr->tag.forkNum);
4546 
4547  errcontext("writing block %u of relation %s",
4548  bufHdr->tag.blockNum, path);
4549  pfree(path);
4550  }
4551 }
4552 
4553 /*
4554  * Error context callback for errors occurring during local buffer writes.
4555  */
4556 static void
4558 {
4559  BufferDesc *bufHdr = (BufferDesc *) arg;
4560 
4561  if (bufHdr != NULL)
4562  {
4563  char *path = relpathbackend(bufHdr->tag.rnode, MyBackendId,
4564  bufHdr->tag.forkNum);
4565 
4566  errcontext("writing block %u of relation %s",
4567  bufHdr->tag.blockNum, path);
4568  pfree(path);
4569  }
4570 }
4571 
4572 /*
4573  * RelFileNode qsort/bsearch comparator; see RelFileNodeEquals.
4574  */
4575 static int
4576 rnode_comparator(const void *p1, const void *p2)
4577 {
4578  RelFileNode n1 = *(const RelFileNode *) p1;
4579  RelFileNode n2 = *(const RelFileNode *) p2;
4580 
4581  if (n1.relNode < n2.relNode)
4582  return -1;
4583  else if (n1.relNode > n2.relNode)
4584  return 1;
4585 
4586  if (n1.dbNode < n2.dbNode)
4587  return -1;
4588  else if (n1.dbNode > n2.dbNode)
4589  return 1;
4590 
4591  if (n1.spcNode < n2.spcNode)
4592  return -1;
4593  else if (n1.spcNode > n2.spcNode)
4594  return 1;
4595  else
4596  return 0;
4597 }
4598 
4599 /*
4600  * Lock buffer header - set BM_LOCKED in buffer state.
4601  */
4602 uint32
4604 {
4605  SpinDelayStatus delayStatus;
4606  uint32 old_buf_state;
4607 
4608  init_local_spin_delay(&delayStatus);
4609 
4610  while (true)
4611  {
4612  /* set BM_LOCKED flag */
4613  old_buf_state = pg_atomic_fetch_or_u32(&desc->state, BM_LOCKED);
4614  /* if it wasn't set before we're OK */
4615  if (!(old_buf_state & BM_LOCKED))
4616  break;
4617  perform_spin_delay(&delayStatus);
4618  }
4619  finish_spin_delay(&delayStatus);
4620  return old_buf_state | BM_LOCKED;
4621 }
4622 
4623 /*
4624  * Wait until the BM_LOCKED flag isn't set anymore and return the buffer's
4625  * state at that point.
4626  *
4627  * Obviously the buffer could be locked by the time the value is returned, so
4628  * this is primarily useful in CAS style loops.
4629  */
4630 static uint32
4632 {
4633  SpinDelayStatus delayStatus;
4634  uint32 buf_state;
4635 
4636  init_local_spin_delay(&delayStatus);
4637 
4638  buf_state = pg_atomic_read_u32(&buf->state);
4639 
4640  while (buf_state & BM_LOCKED)
4641  {
4642  perform_spin_delay(&delayStatus);
4643  buf_state = pg_atomic_read_u32(&buf->state);
4644  }
4645 
4646  finish_spin_delay(&delayStatus);
4647 
4648  return buf_state;
4649 }
4650 
4651 /*
4652  * BufferTag comparator.
4653  */
4654 static inline int
4656 {
4657  int ret;
4658 
4659  ret = rnode_comparator(&ba->rnode, &bb->rnode);
4660 
4661  if (ret != 0)
4662  return ret;
4663 
4664  if (ba->forkNum < bb->forkNum)
4665  return -1;
4666  if (ba->forkNum > bb->forkNum)
4667  return 1;
4668 
4669  if (ba->blockNum < bb->blockNum)
4670  return -1;
4671  if (ba->blockNum > bb->blockNum)
4672  return 1;
4673 
4674  return 0;
4675 }
4676 
4677 /*
4678  * Comparator determining the writeout order in a checkpoint.
4679  *
4680  * It is important that tablespaces are compared first, the logic balancing
4681  * writes between tablespaces relies on it.
4682  */
4683 static inline int
4685 {
4686  /* compare tablespace */
4687  if (a->tsId < b->tsId)
4688  return -1;
4689  else if (a->tsId > b->tsId)
4690  return 1;
4691  /* compare relation */
4692  if (a->relNode < b->relNode)
4693  return -1;
4694  else if (a->relNode > b->relNode)
4695  return 1;
4696  /* compare fork */
4697  else if (a->forkNum < b->forkNum)
4698  return -1;
4699  else if (a->forkNum > b->forkNum)
4700  return 1;
4701  /* compare block number */
4702  else if (a->blockNum < b->blockNum)
4703  return -1;
4704  else if (a->blockNum > b->blockNum)
4705  return 1;
4706  /* equal page IDs are unlikely, but not impossible */
4707  return 0;
4708 }
4709 
4710 /*
4711  * Comparator for a Min-Heap over the per-tablespace checkpoint completion
4712  * progress.
4713  */
4714 static int
4716 {
4717  CkptTsStatus *sa = (CkptTsStatus *) a;
4718  CkptTsStatus *sb = (CkptTsStatus *) b;
4719 
4720  /* we want a min-heap, so return 1 for the a < b */
4721  if (sa->progress < sb->progress)
4722  return 1;
4723  else if (sa->progress == sb->progress)
4724  return 0;
4725  else
4726  return -1;
4727 }
4728 
4729 /*
4730  * Initialize a writeback context, discarding potential previous state.
4731  *
4732  * *max_pending is a pointer instead of an immediate value, so the coalesce
4733  * limits can easily changed by the GUC mechanism, and so calling code does
4734  * not have to check the current configuration. A value of 0 means that no
4735  * writeback control will be performed.
4736  */
4737 void
4738 WritebackContextInit(WritebackContext *context, int *max_pending)
4739 {
4740  Assert(*max_pending <= WRITEBACK_MAX_PENDING_FLUSHES);
4741 
4742  context->max_pending = max_pending;
4743  context->nr_pending = 0;
4744 }
4745 
4746 /*
4747  * Add buffer to list of pending writeback requests.
4748  */
4749 void
4751 {
4752  PendingWriteback *pending;
4753 
4754  /*
4755  * Add buffer to the pending writeback array, unless writeback control is
4756  * disabled.
4757  */
4758  if (*context->max_pending > 0)
4759  {
4761 
4762  pending = &context->pending_writebacks[context->nr_pending++];
4763 
4764  pending->tag = *tag;
4765  }
4766 
4767  /*
4768  * Perform pending flushes if the writeback limit is exceeded. This
4769  * includes the case where previously an item has been added, but control
4770  * is now disabled.
4771  */
4772  if (context->nr_pending >= *context->max_pending)
4773  IssuePendingWritebacks(context);
4774 }
4775 
4776 #define ST_SORT sort_pending_writebacks
4777 #define ST_ELEMENT_TYPE PendingWriteback
4778 #define ST_COMPARE(a, b) buffertag_comparator(&a->tag, &b->tag)
4779 #define ST_SCOPE static
4780 #define ST_DEFINE
4781 #include <lib/sort_template.h>
4782 
4783 /*
4784  * Issue all pending writeback requests, previously scheduled with
4785  * ScheduleBufferTagForWriteback, to the OS.
4786  *
4787  * Because this is only used to improve the OSs IO scheduling we try to never
4788  * error out - it's just a hint.
4789  */
4790 void
4792 {
4793  int i;
4794 
4795  if (context->nr_pending == 0)
4796  return;
4797 
4798  /*
4799  * Executing the writes in-order can make them a lot faster, and allows to
4800  * merge writeback requests to consecutive blocks into larger writebacks.
4801  */
4802  sort_pending_writebacks(context->pending_writebacks, context->nr_pending);
4803 
4804  /*
4805  * Coalesce neighbouring writes, but nothing else. For that we iterate
4806  * through the, now sorted, array of pending flushes, and look forward to
4807  * find all neighbouring (or identical) writes.
4808  */
4809  for (i = 0; i < context->nr_pending; i++)
4810  {
4813  SMgrRelation reln;
4814  int ahead;
4815  BufferTag tag;
4816  Size nblocks = 1;
4817 
4818  cur = &context->pending_writebacks[i];
4819  tag = cur->tag;
4820 
4821  /*
4822  * Peek ahead, into following writeback requests, to see if they can
4823  * be combined with the current one.
4824  */
4825  for (ahead = 0; i + ahead + 1 < context->nr_pending; ahead++)
4826  {
4827  next = &context->pending_writebacks[i + ahead + 1];
4828 
4829  /* different file, stop */
4830  if (!RelFileNodeEquals(cur->tag.rnode, next->tag.rnode) ||
4831  cur->tag.forkNum != next->tag.forkNum)
4832  break;
4833 
4834  /* ok, block queued twice, skip */
4835  if (cur->tag.blockNum == next->tag.blockNum)
4836  continue;
4837 
4838  /* only merge consecutive writes */
4839  if (cur->tag.blockNum + 1 != next->tag.blockNum)
4840  break;
4841 
4842  nblocks++;
4843  cur = next;
4844  }
4845 
4846  i += ahead;
4847 
4848  /* and finally tell the kernel to write the data to storage */
4849  reln = smgropen(tag.rnode, InvalidBackendId);
4850  smgrwriteback(reln, tag.forkNum, tag.blockNum, nblocks);
4851  }
4852 
4853  context->nr_pending = 0;
4854 }
4855 
4856 
4857 /*
4858  * Implement slower/larger portions of TestForOldSnapshot
4859  *
4860  * Smaller/faster portions are put inline, but the entire set of logic is too
4861  * big for that.
4862  */
4863 void
4865 {
4866  if (RelationAllowsEarlyPruning(relation)
4867  && (snapshot)->whenTaken < GetOldSnapshotThresholdTimestamp())
4868  ereport(ERROR,
4869  (errcode(ERRCODE_SNAPSHOT_TOO_OLD),
4870  errmsg("snapshot too old")));
4871 }
#define LocalBufHdrGetBlock(bufHdr)
Definition: bufmgr.c:64
BufferDesc * LocalBufferAlloc(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum, bool *foundPtr)
Definition: localbuf.c:109
static bool PinBuffer(BufferDesc *buf, BufferAccessStrategy strategy)
Definition: bufmgr.c:1683
static PrivateRefCountEntry * GetPrivateRefCountEntry(Buffer buffer, bool do_move)
Definition: bufmgr.c:307
#define init_local_spin_delay(status)
Definition: s_lock.h:1043
struct PrivateRefCountEntry PrivateRefCountEntry
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:448
static PgChecksumMode mode
Definition: pg_checksums.c:61
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
BufferDesc * StrategyGetBuffer(BufferAccessStrategy strategy, uint32 *buf_state)
Definition: freelist.c:201
void BufTableDelete(BufferTag *tagPtr, uint32 hashcode)
Definition: buf_table.c:149
void CheckpointWriteDelay(int flags, double progress)
Definition: checkpointer.c:694
long local_blks_hit
Definition: instrument.h:25
void LockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:4080
Definition: lwlock.h:31
#define relpathperm(rnode, forknum)
Definition: relpath.h:83
PgStat_Counter m_buf_written_checkpoints
Definition: pgstat.h:489
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
void CheckPointBuffers(int flags)
Definition: bufmgr.c:2734
PgStat_Counter m_buf_alloc
Definition: pgstat.h:494
#define BM_PERMANENT
Definition: buf_internals.h:67
bool LWLockHeldByMeInMode(LWLock *l, LWLockMode mode)
Definition: lwlock.c:1938
int StrategySyncStart(uint32 *complete_passes, uint32 *num_buf_alloc)
Definition: freelist.c:395
int64 VacuumPageMiss
Definition: globals.c:148
#define CHECKPOINT_FLUSH_ALL
Definition: xlog.h:236
#define BufMappingPartitionLock(hashcode)
#define DEBUG1
Definition: elog.h:25
int MyProcPid
Definition: globals.c:43
#define VALGRIND_MAKE_MEM_DEFINED(addr, size)
Definition: memdebug.h:26
int errhint(const char *fmt,...)
Definition: elog.c:1156
BackendId MyBackendId
Definition: globals.c:84
TimestampTz GetOldSnapshotThresholdTimestamp(void)
Definition: snapmgr.c:1660
long local_blks_read
Definition: instrument.h:26
int maintenance_io_concurrency
Definition: bufmgr.c:150
#define BM_TAG_VALID
Definition: buf_internals.h:61
Oid tsId
Definition: bufmgr.c:97
static int32 next
Definition: blutils.c:219
int VacuumCostBalance
Definition: globals.c:151
bool BgBufferSync(WritebackContext *wb_context)
Definition: bufmgr.c:2206
#define binaryheap_empty(h)
Definition: binaryheap.h:52
void PrintBufferLeakWarning(Buffer buffer)
Definition: bufmgr.c:2691
int BgWriterDelay
Definition: bgwriter.c:64
int wait_backend_pid
#define RelFileNodeBackendIsTemp(rnode)
Definition: relfilenode.h:78
ForkNumber forkNum
Definition: buf_internals.h:94
#define HASH_ELEM
Definition: hsearch.h:95
static uint32 PrivateRefCountClock
Definition: bufmgr.c:200
void MarkBufferDirtyHint(Buffer buffer, bool buffer_std)
Definition: bufmgr.c:3854
static int32 PrivateRefCountOverflowed
Definition: bufmgr.c:199
bool LWLockHeldByMe(LWLock *l)
Definition: lwlock.c:1920
instr_time blk_read_time
Definition: instrument.h:31
bool update_process_title
Definition: ps_status.c:36
TimestampTz GetCurrentTimestamp(void)
Definition: timestamp.c:1580
static void InvalidateBuffer(BufferDesc *buf)
Definition: bufmgr.c:1464
static void local_buffer_write_error_callback(void *arg)
Definition: bufmgr.c:4557
static bool pg_atomic_compare_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval)
Definition: atomics.h:311
bool XLogNeedsFlush(XLogRecPtr record)
Definition: xlog.c:3202
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1562
PGPROC * MyProc
Definition: proc.c:68
int64 TimestampTz
Definition: timestamp.h:39
int backend_flush_after
Definition: bufmgr.c:158
#define PointerGetDatum(X)
Definition: postgres.h:600
void AtEOXact_Buffers(bool isCommit)
Definition: bufmgr.c:2576
#define VALGRIND_MAKE_MEM_NOACCESS(addr, size)
Definition: memdebug.h:27
ResourceOwner CurrentResourceOwner
Definition: resowner.c:146
struct SMgrRelationData * rd_smgr
Definition: rel.h:57
PgStat_Counter m_maxwritten_clean
Definition: pgstat.h:491
#define RELS_BSEARCH_THRESHOLD
Definition: bufmgr.c:71
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:744
#define RelationAllowsEarlyPruning(rel)
Definition: snapmgr.h:38
XLogRecPtr XLogSaveBufferForHint(Buffer buffer, bool buffer_std)
Definition: xloginsert.c:917
struct timeval instr_time
Definition: instr_time.h:150
bool InRecovery
Definition: xlog.c:209
#define BM_CHECKPOINT_NEEDED
Definition: buf_internals.h:66
long shared_blks_read
Definition: instrument.h:22
static void WaitIO(BufferDesc *buf)
Definition: bufmgr.c:4367
int64 VacuumPageHit
Definition: globals.c:147
uint32 BufTableHashCode(BufferTag *tagPtr)
Definition: buf_table.c:79
void ProcessProcSignalBarrier(void)
Definition: procsignal.c:453
static int ts_ckpt_progress_comparator(Datum a, Datum b, void *arg)
Definition: bufmgr.c:4715
PrefetchBufferResult PrefetchLocalBuffer(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum)
Definition: localbuf.c:64
static void ForgetPrivateRefCountEntry(PrivateRefCountEntry *ref)
Definition: bufmgr.c:410
PgStat_Counter m_buf_written_clean
Definition: pgstat.h:490
PgStat_MsgBgWriter BgWriterStats
Definition: pgstat.c:131
#define InvalidBuffer
Definition: buf.h:25
Size entrysize
Definition: hsearch.h:76
void LogRecoveryConflict(ProcSignalReason reason, TimestampTz wait_start, TimestampTz now, VirtualTransactionId *wait_list, bool still_waiting)
Definition: standby.c:249
void DropRelFileNodeLocalBuffers(RelFileNode rnode, ForkNumber forkNum, BlockNumber firstDelBlock)
Definition: localbuf.c:326
#define GetLocalBufferDescriptor(id)
Buffer ReadBufferWithoutRelcache(RelFileNode rnode, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:786
char * PageSetChecksumCopy(Page page, BlockNumber blkno)
Definition: bufpage.c:1503
int checkpoint_flush_after
Definition: bufmgr.c:156
struct cursor * cur
Definition: ecpg.c:28
#define InHotStandby
Definition: xlog.h:74
void ConditionVariableBroadcast(ConditionVariable *cv)
int errcode(int sqlerrcode)
Definition: elog.c:698
#define MemSet(start, val, len)
Definition: c.h:1008
void binaryheap_replace_first(binaryheap *heap, Datum d)
Definition: binaryheap.c:204
void StrategyFreeBuffer(BufferDesc *buf)
Definition: freelist.c:364
int64 VacuumPageDirty
Definition: globals.c:149
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3784
#define P_NEW
Definition: bufmgr.h:91
double bgwriter_lru_multiplier
Definition: bufmgr.c:134
bool smgrexists(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:247
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:954
int BufTableLookup(BufferTag *tagPtr, uint32 hashcode)
Definition: buf_table.c:91
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:98
#define LOG
Definition: elog.h:26
Form_pg_class rd_rel
Definition: rel.h:110
unsigned int Oid
Definition: postgres_ext.h:31
bool RecoveryInProgress(void)
Definition: xlog.c:8237
#define BM_DIRTY
Definition: buf_internals.h:59
void FlushRelationsAllBuffers(SMgrRelation *smgrs, int nrels)
Definition: bufmgr.c:3617
int VacuumCostPageDirty
Definition: globals.c:143
void(* callback)(void *arg)
Definition: elog.h:247
bool TimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, int msec)
Definition: timestamp.c:1711
struct ErrorContextCallback * previous
Definition: elog.h:246
void ResourceOwnerRememberBuffer(ResourceOwner owner, Buffer buffer)
Definition: resowner.c:959
void binaryheap_add_unordered(binaryheap *heap, Datum d)
Definition: binaryheap.c:110
Buffer recent_buffer
Definition: bufmgr.h:54
#define BUF_DROP_FULL_SCAN_THRESHOLD
Definition: bufmgr.c:79
void XLogFlush(XLogRecPtr record)
Definition: xlog.c:2881
static void FlushBuffer(BufferDesc *buf, SMgrRelation reln)
Definition: bufmgr.c:2818
int effective_io_concurrency
Definition: bufmgr.c:143
static int SyncOneBuffer(int buf_id, bool skip_recently_used, WritebackContext *wb_context)
Definition: bufmgr.c:2509
void IssuePendingWritebacks(WritebackContext *context)
Definition: bufmgr.c:4791
static BufferDesc * InProgressBuf
Definition: bufmgr.c:161
signed int int32
Definition: c.h:429
void WritebackContextInit(WritebackContext *context, int *max_pending)
Definition: bufmgr.c:4738
static void FindAndDropRelFileNodeBuffers(RelFileNode rnode, ForkNumber forkNum, BlockNumber nForkBlock, BlockNumber firstDelBlock)
Definition: bufmgr.c:3360
bool ReadRecentBuffer(RelFileNode rnode, ForkNumber forkNum, BlockNumber blockNum, Buffer recent_buffer)
Definition: bufmgr.c:621
int bgwriter_flush_after
Definition: bufmgr.c:157
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1805
struct SMgrSortArray SMgrSortArray
void smgrread(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, char *buffer)
Definition: smgr.c:501
static PrivateRefCountEntry * NewPrivateRefCountEntry(Buffer buffer)
Definition: bufmgr.c:281
ErrorContextCallback * error_context_stack
Definition: elog.c:93
void ConditionVariablePrepareToSleep(ConditionVariable *cv)
void set_ps_display(const char *activity)
Definition: ps_status.c:349
#define RelationOpenSmgr(relation)
Definition: rel.h:514
void ProcSendSignal(int pid)
Definition: proc.c:1908
#define SmgrIsTemp(smgr)
Definition: smgr.h:77
#define BUF_REUSABLE
Definition: bufmgr.c:69
long shared_blks_written
Definition: instrument.h:24
Definition: dynahash.c:219
static bool StartBufferIO(BufferDesc *buf, bool forInput)
Definition: bufmgr.c:4410
void DropRelFileNodeAllLocalBuffers(RelFileNode rnode)
Definition: localbuf.c:373
void pfree(void *pointer)
Definition: mcxt.c:1169
void ConditionVariableCancelSleep(void)
int BufTableInsert(BufferTag *tagPtr, uint32 hashcode, int buf_id)
Definition: buf_table.c:119
void InitBufferPoolAccess(void)
Definition: bufmgr.c:2598
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3807
bool ConditionalLockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:4257
#define ERROR
Definition: elog.h:46
double float8
Definition: c.h:565
bool delayChkpt
Definition: proc.h:187
#define PIV_LOG_WARNING
Definition: bufpage.h:413
#define RelationIsValid(relation)
Definition: rel.h:430
void ResolveRecoveryConflictWithBufferPin(void)
Definition: standby.c:753
#define INSTR_TIME_SUBTRACT(x, y)
Definition: instr_time.h:170
void ScheduleBufferTagForWriteback(WritebackContext *context, BufferTag *tag)
Definition: bufmgr.c:4750
Datum binaryheap_first(binaryheap *heap)
Definition: binaryheap.c:159
#define BUF_FLAG_MASK
Definition: buf_internals.h:46
int bgwriter_lru_maxpages
Definition: bufmgr.c:133
BlockNumber smgrnblocks_cached(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:572
int NLocBuffer
Definition: localbuf.c:41
RelFileNodeBackend smgr_rnode
Definition: smgr.h:42
#define pgstat_count_buffer_read(rel)
Definition: pgstat.h:1072
void finish_spin_delay(SpinDelayStatus *status)
Definition: s_lock.c:174
#define DEBUG2
Definition: elog.h:24
WritebackContext BackendWritebackContext
Definition: buf_init.c:23
PendingWriteback pending_writebacks[WRITEBACK_MAX_PENDING_FLUSHES]
SMgrRelation srel
Definition: bufmgr.c:128
int num_to_scan
Definition: bufmgr.c:110
const char * get_ps_display(int *displen)
Definition: ps_status.c:430
void AtProcExit_LocalBuffers(void)
Definition: localbuf.c:588
void on_shmem_exit(pg_on_exit_callback function, Datum arg)
Definition: ipc.c:361
float8 progress_slice
Definition: bufmgr.c:107
XLogRecPtr BufferGetLSNAtomic(Buffer buffer)
Definition: bufmgr.c:3021
bool LWLockConditionalAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1375
void SetStartupBufferPinWaitBufId(int bufid)
Definition: proc.c:650
static char * buf
Definition: pg_test_fsync.c:68
int index
Definition: bufmgr.c:115
float8 progress
Definition: bufmgr.c:106
void FlushDatabaseBuffers(Oid dbid)
Definition: bufmgr.c:3721
#define INSTR_TIME_ADD(x, y)
Definition: instr_time.h:158
void smgrwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, char *buffer, bool skipFsync)
Definition: smgr.c:523
static uint64 table_relation_size(Relation rel, ForkNumber forkNumber)
Definition: tableam.h:1838
#define PIV_REPORT_STAT
Definition: bufpage.h:414
int errdetail(const char *fmt,...)
Definition: elog.c:1042
#define CHECKPOINT_END_OF_RECOVERY
Definition: xlog.h:232
#define BUF_USAGECOUNT_ONE
Definition: buf_internals.h:44
#define GetBufferDescriptor(id)
#define BufferDescriptorGetIOCV(bdesc)
#define PG_WAIT_BUFFER_PIN
Definition: wait_event.h:20
#define BM_JUST_DIRTIED
Definition: buf_internals.h:64
long shared_blks_dirtied
Definition: instrument.h:23
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:349
static int32 GetPrivateRefCount(Buffer buffer)
Definition: bufmgr.c:387
void DropRelFileNodeBuffers(SMgrRelation smgr_reln, ForkNumber *forkNum, int nforks, BlockNumber *firstDelBlock)
Definition: bufmgr.c:3067
unsigned int uint32
Definition: c.h:441
void smgrwriteback(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, BlockNumber nblocks)
Definition: smgr.c:536
bool BufferIsPermanent(Buffer buffer)
Definition: bufmgr.c:2991
#define BUF_WRITTEN
Definition: bufmgr.c:68
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
static bool IsForInput
Definition: bufmgr.c:162
bool PageIsVerifiedExtended(Page page, BlockNumber blkno, int flags)
Definition: bufpage.c:88
bool log_recovery_conflict_waits
Definition: standby.c:42
static struct PrivateRefCountEntry PrivateRefCountArray[REFCOUNT_ARRAY_ENTRIES]
Definition: bufmgr.c:197
bool ConditionalLockBuffer(Buffer buffer)
Definition: bufmgr.c:4049
int VacuumCostPageHit
Definition: globals.c:141
static void BufferSync(int flags)
Definition: bufmgr.c:1930
#define BUFFERTAGS_EQUAL(a, b)
SMgrRelation smgropen(RelFileNode rnode, BackendId backend)
Definition: smgr.c:146
bool IsBufferCleanupOK(Buffer buffer)
Definition: bufmgr.c:4313
ForkNumber
Definition: relpath.h:40
void MarkLocalBufferDirty(Buffer buffer)
Definition: localbuf.c:286
#define ERRCODE_DATA_CORRUPTED
Definition: pg_basebackup.c:45
static void UnpinBuffer(BufferDesc *buf, bool fixOwner)
Definition: bufmgr.c:1831
void ProcWaitForSignal(uint32 wait_event_info)
Definition: proc.c:1896
void ResourceOwnerEnlargeBuffers(ResourceOwner owner)
Definition: resowner.c:946
int ckpt_bufs_written
Definition: xlog.h:262
PrefetchBufferResult PrefetchSharedBuffer(SMgrRelation smgr_reln, ForkNumber forkNum, BlockNumber blockNum)
Definition: bufmgr.c:500
#define XLogRecPtrIsInvalid(r)
Definition: xlogdefs.h:29
static PrivateRefCountEntry * ReservedRefCountEntry
Definition: bufmgr.c:201
#define WARNING
Definition: elog.h:40
ReadBufferMode
Definition: bufmgr.h:37
int GetStartupBufferPinWaitBufId(void)
Definition: proc.c:662
#define BUF_REFCOUNT_ONE
Definition: buf_internals.h:41
#define BM_LOCKED
Definition: buf_internals.h:58
#define pgstat_count_buffer_hit(rel)
Definition: pgstat.h:1077
void UnlockBuffers(void)
Definition: bufmgr.c:3995
static void TerminateBufferIO(BufferDesc *buf, bool clear_dirty, uint32 set_flag_bits)
Definition: bufmgr.c:4461
#define HASH_BLOBS
Definition: hsearch.h:97
static int rnode_comparator(const void *p1, const void *p2)
Definition: bufmgr.c:4576
#define InvalidBackendId
Definition: backendid.h:23
#define BM_VALID
Definition: buf_internals.h:60
BlockNumber blockNum
#define BufHdrGetBlock(bufHdr)
Definition: bufmgr.c:60
uintptr_t Datum
Definition: postgres.h:411
int BackendId
Definition: backendid.h:21
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:4023
Size keysize
Definition: hsearch.h:75
static Buffer ReadBuffer_common(SMgrRelation reln, char relpersistence, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy, bool *hit)
Definition: bufmgr.c:807
#define InvalidOid
Definition: postgres_ext.h:36
#define ereport(elevel,...)
Definition: elog.h:157
bool StrategyRejectBuffer(BufferAccessStrategy strategy, BufferDesc *buf)
Definition: freelist.c:686
#define BlockNumberIsValid(blockNumber)
Definition: block.h:70
void binaryheap_build(binaryheap *heap)
Definition: binaryheap.c:126
RelFileNode node
Definition: relfilenode.h:74
#define free(a)
Definition: header.h:65
BlockNumber RelationGetNumberOfBlocksInFork(Relation relation, ForkNumber forkNum)
Definition: bufmgr.c:2945
static BufferDesc * BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, BlockNumber blockNum, BufferAccessStrategy strategy, bool *foundPtr)
Definition: bufmgr.c:1098
RelFileNode rd_node
Definition: rel.h:55
void ConditionVariableSleep(ConditionVariable *cv, uint32 wait_event_info)
#define BufferDescriptorGetContentLock(bdesc)
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4603
BlockNumber smgrnblocks(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:548
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:804
#define pgstat_count_buffer_read_time(n)
Definition: pgstat.h:1082
#define CLEAR_BUFFERTAG(a)
Definition: buf_internals.h:98
static void CheckForBufferLeaks(void)
Definition: bufmgr.c:2650
static int buffertag_comparator(const BufferTag *a, const BufferTag *b)
Definition: bufmgr.c:4655
#define RELATION_IS_OTHER_TEMP(relation)
Definition: rel.h:600
#define INIT_BUFFERTAG(a, xx_rnode, xx_forkNum, xx_blockNum)
volatile sig_atomic_t ProcSignalBarrierPending
Definition: globals.c:37
bool HoldingBufferPinThatDelaysRecovery(void)
Definition: bufmgr.c:4231
#define INSTR_TIME_GET_MICROSEC(t)
Definition: instr_time.h:205
#define REFCOUNT_ARRAY_ENTRIES
Definition: bufmgr.c:88
void FlushRelationBuffers(Relation rel)
Definition: bufmgr.c:3525
CheckpointStatsData CheckpointStats
Definition: xlog.c:190
instr_time blk_write_time
Definition: instrument.h:32
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:697
static void PinBuffer_Locked(BufferDesc *buf)
Definition: bufmgr.c:1786
CkptSortItem * CkptBufferIds
Definition: buf_init.c:24
size_t Size
Definition: c.h:540
void binaryheap_free(binaryheap *heap)
Definition: binaryheap.c:69
#define BUF_USAGECOUNT_MASK
Definition: buf_internals.h:43
BackendId backend
Definition: relfilenode.h:75
#define InvalidBlockNumber
Definition: block.h:33
void pg_qsort(void *base, size_t nel, size_t elsize, int(*cmp)(const void *, const void *))
#define BufferIsLocal(buffer)
Definition: buf.h:37
#define BufferDescriptorGetBuffer(bdesc)
#define MAX_FORKNUM
Definition: relpath.h:55
#define pgstat_count_buffer_write_time(n)
Definition: pgstat.h:1084
void PageSetChecksumInplace(Page page, BlockNumber blkno)
Definition: bufpage.c:1532
Buffer ReleaseAndReadBuffer(Buffer buffer, Relation relation, BlockNumber blockNum)
Definition: bufmgr.c:1625
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1203
void AbortBufferIO(void)
Definition: bufmgr.c:4493
BlockNumber blockNum
Definition: buf_internals.h:95
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
static uint32 WaitBufHdrUnlocked(BufferDesc *buf)
Definition: bufmgr.c:4631
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1436
bool smgrprefetch(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum)
Definition: smgr.c:487
RelFileNode rnode
Definition: buf_internals.h:93
bool RelFileNodeSkippingWAL(RelFileNode rnode)
Definition: storage.c:497
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:1182
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1426
#define INSTR_TIME_SET_CURRENT(t)
Definition: instr_time.h:156
void FlushOneBuffer(Buffer buffer)
Definition: bufmgr.c:3764
#define BM_MAX_USAGE_COUNT
Definition: buf_internals.h:77
binaryheap * binaryheap_allocate(int capacity, binaryheap_comparator compare, void *arg)
Definition: binaryheap.c:33
#define RelationUsesLocalBuffers(relation)
Definition: rel.h:579
#define BM_IO_ERROR
Definition: buf_internals.h:63
#define PageGetLSN(page)
Definition: bufpage.h:366
#define DatumGetPointer(X)
Definition: postgres.h:593
void smgrextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, char *buffer, bool skipFsync)
Definition: smgr.c:462
BufferTag tag
void DropRelFileNodesAllBuffers(SMgrRelation *smgr_reln, int nnodes)
Definition: bufmgr.c:3191
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2758
#define PageIsNew(page)
Definition: bufpage.h:229
static int ckpt_buforder_comparator(const CkptSortItem *a, const CkptSortItem *b)
Definition: bufmgr.c:4684
void * palloc(Size size)
Definition: mcxt.c:1062
int errmsg(const char *fmt,...)
Definition: elog.c:909
long shared_blks_hit
Definition: instrument.h:21
#define UnlockBufHdr(desc, s)
long local_blks_written
Definition: instrument.h:28
static HTAB * PrivateRefCountHash
Definition: bufmgr.c:198
#define elog(elevel,...)
Definition: elog.h:232
int i
PrefetchBufferResult PrefetchBuffer(Relation reln, ForkNumber forkNum, BlockNumber blockNum)
Definition: bufmgr.c:587
static BufferDesc * PinCountWaitBuf
Definition: bufmgr.c:165
#define relpath(rnode, forknum)
Definition: relpath.h:87
#define errcontext
Definition: elog.h:204
int NBuffers
Definition: globals.c:135
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:97
static void pg_atomic_unlocked_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:277
static void ReservePrivateRefCountEntry(void)
Definition: bufmgr.c:215
pg_atomic_uint32 state
#define WRITEBACK_MAX_PENDING_FLUSHES
void * arg
Datum binaryheap_remove_first(binaryheap *heap)
Definition: binaryheap.c:174
static uint32 pg_atomic_fetch_or_u32(volatile pg_atomic_uint32 *ptr, uint32 or_)
Definition: atomics.h:372
int DeadlockTimeout
Definition: proc.c:60
int num_scanned
Definition: bufmgr.c:112
void InitBufferPoolBackend(void)
Definition: bufmgr.c:2621
#define BM_IO_IN_PROGRESS
Definition: buf_internals.h:62
int VacuumCostPageMiss
Definition: globals.c:142
#define BUF_STATE_GET_USAGECOUNT(state)
Definition: buf_internals.h:50
RelFileNode rnode
Definition: bufmgr.c:127
#define BufferGetLSN(bufHdr)
Definition: bufmgr.c:61
static void pg_atomic_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:258
void BufferGetTag(Buffer buffer, RelFileNode *rnode, ForkNumber *forknum, BlockNumber *blknum)
Definition: bufmgr.c:2779
void DropDatabaseBuffers(Oid dbid)
Definition: bufmgr.c:3421
#define relpathbackend(rnode, backend, forknum)
Definition: relpath.h:78
#define PageSetLSN(page, lsn)
Definition: bufpage.h:368
static void shared_buffer_write_error_callback(void *arg)
Definition: bufmgr.c:4538
int Buffer
Definition: buf.h:23
void TestForOldSnapshot_impl(Snapshot snapshot, Relation relation)
Definition: bufmgr.c:4864
ForkNumber forkNum
Datum now(PG_FUNCTION_ARGS)
Definition: timestamp.c:1544
struct CkptTsStatus CkptTsStatus
void BufmgrCommit(void)
Definition: bufmgr.c:2744
void IncrBufferRefCount(Buffer buffer)
Definition: bufmgr.c:3822
#define XLogHintBitIsNeeded()
Definition: xlog.h:212
bool track_io_timing
Definition: bufmgr.c:135
int32 * LocalRefCount
Definition: localbuf.c:45
Pointer Page
Definition: bufpage.h:78
void AtEOXact_LocalBuffers(bool isCommit)
Definition: localbuf.c:577
#define CHECKPOINT_IS_SHUTDOWN
Definition: xlog.h:231
#define RelFileNodeEquals(node1, node2)
Definition: relfilenode.h:88
BufferUsage pgBufferUsage
Definition: instrument.c:20
#define BUF_STATE_GET_REFCOUNT(state)
Definition: buf_internals.h:49
void perform_spin_delay(SpinDelayStatus *status)
Definition: s_lock.c:124
void * Block
Definition: bufmgr.h:24
dlist_node node
Definition: smgr.h:72
bool VacuumCostActive
Definition: globals.c:152
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:241
bool zero_damaged_pages
Definition: bufmgr.c:132
void ResourceOwnerForgetBuffer(ResourceOwner owner, Buffer buffer)
Definition: resowner.c:968
#define BM_PIN_COUNT_WAITER
Definition: buf_internals.h:65
static void AtProcExit_Buffers(int code, Datum arg)
Definition: bufmgr.c:2631