PostgreSQL Source Code  git master
bufmgr.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * bufmgr.c
4  * buffer manager interface routines
5  *
6  * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/storage/buffer/bufmgr.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 /*
16  * Principal entry points:
17  *
18  * ReadBuffer() -- find or create a buffer holding the requested page,
19  * and pin it so that no one can destroy it while this process
20  * is using it.
21  *
22  * ReleaseBuffer() -- unpin a buffer
23  *
24  * MarkBufferDirty() -- mark a pinned buffer's contents as "dirty".
25  * The disk write is delayed until buffer replacement or checkpoint.
26  *
27  * See also these files:
28  * freelist.c -- chooses victim for buffer replacement
29  * buf_table.c -- manages the buffer lookup table
30  */
31 #include "postgres.h"
32 
33 #include <sys/file.h>
34 #include <unistd.h>
35 
36 #include "access/tableam.h"
37 #include "access/xlogutils.h"
38 #include "catalog/catalog.h"
39 #include "catalog/storage.h"
40 #include "executor/instrument.h"
41 #include "lib/binaryheap.h"
42 #include "miscadmin.h"
43 #include "pg_trace.h"
44 #include "pgstat.h"
45 #include "postmaster/bgwriter.h"
46 #include "storage/buf_internals.h"
47 #include "storage/bufmgr.h"
48 #include "storage/ipc.h"
49 #include "storage/proc.h"
50 #include "storage/smgr.h"
51 #include "storage/standby.h"
52 #include "utils/memdebug.h"
53 #include "utils/ps_status.h"
54 #include "utils/rel.h"
55 #include "utils/resowner_private.h"
56 #include "utils/timestamp.h"
57 
58 
59 /* Note: these two macros only work on shared buffers, not local ones! */
60 #define BufHdrGetBlock(bufHdr) ((Block) (BufferBlocks + ((Size) (bufHdr)->buf_id) * BLCKSZ))
61 #define BufferGetLSN(bufHdr) (PageGetLSN(BufHdrGetBlock(bufHdr)))
62 
63 /* Note: this macro only works on local buffers, not shared ones! */
64 #define LocalBufHdrGetBlock(bufHdr) \
65  LocalBufferBlockPointers[-((bufHdr)->buf_id + 2)]
66 
67 /* Bits in SyncOneBuffer's return value */
68 #define BUF_WRITTEN 0x01
69 #define BUF_REUSABLE 0x02
70 
71 #define RELS_BSEARCH_THRESHOLD 20
72 
73 /*
74  * This is the size (in the number of blocks) above which we scan the
75  * entire buffer pool to remove the buffers for all the pages of relation
76  * being dropped. For the relations with size below this threshold, we find
77  * the buffers by doing lookups in BufMapping table.
78  */
79 #define BUF_DROP_FULL_SCAN_THRESHOLD (uint64) (NBuffers / 32)
80 
81 typedef struct PrivateRefCountEntry
82 {
86 
87 /* 64 bytes, about the size of a cache line on common systems */
88 #define REFCOUNT_ARRAY_ENTRIES 8
89 
90 /*
91  * Status of buffers to checkpoint for a particular tablespace, used
92  * internally in BufferSync.
93  */
94 typedef struct CkptTsStatus
95 {
96  /* oid of the tablespace */
98 
99  /*
100  * Checkpoint progress for this tablespace. To make progress comparable
101  * between tablespaces the progress is, for each tablespace, measured as a
102  * number between 0 and the total number of to-be-checkpointed pages. Each
103  * page checkpointed in this tablespace increments this space's progress
104  * by progress_slice.
105  */
108 
109  /* number of to-be checkpointed pages in this tablespace */
111  /* already processed pages in this tablespace */
113 
114  /* current offset in CkptBufferIds for this tablespace */
115  int index;
116 } CkptTsStatus;
117 
118 /*
119  * Type for array used to sort SMgrRelations
120  *
121  * FlushRelationsAllBuffers shares the same comparator function with
122  * DropRelFileNodesAllBuffers. Pointer to this struct and RelFileNode must be
123  * compatible.
124  */
125 typedef struct SMgrSortArray
126 {
127  RelFileNode rnode; /* This must be the first member */
129 } SMgrSortArray;
130 
131 /* GUC variables */
132 bool zero_damaged_pages = false;
135 bool track_io_timing = false;
136 
137 /*
138  * How many buffers PrefetchBuffer callers should try to stay ahead of their
139  * ReadBuffer calls by. Zero means "never prefetch". This value is only used
140  * for buffers not belonging to tablespaces that have their
141  * effective_io_concurrency parameter set.
142  */
144 
145 /*
146  * Like effective_io_concurrency, but used by maintenance code paths that might
147  * benefit from a higher setting because they work on behalf of many sessions.
148  * Overridden by the tablespace setting of the same name.
149  */
151 
152 /*
153  * GUC variables about triggering kernel writeback for buffers written; OS
154  * dependent defaults are set via the GUC mechanism.
155  */
159 
160 /* local state for StartBufferIO and related functions */
161 static BufferDesc *InProgressBuf = NULL;
162 static bool IsForInput;
163 
164 /* local state for LockBufferForCleanup */
166 
167 /*
168  * Backend-Private refcount management:
169  *
170  * Each buffer also has a private refcount that keeps track of the number of
171  * times the buffer is pinned in the current process. This is so that the
172  * shared refcount needs to be modified only once if a buffer is pinned more
173  * than once by an individual backend. It's also used to check that no buffers
174  * are still pinned at the end of transactions and when exiting.
175  *
176  *
177  * To avoid - as we used to - requiring an array with NBuffers entries to keep
178  * track of local buffers, we use a small sequentially searched array
179  * (PrivateRefCountArray) and an overflow hash table (PrivateRefCountHash) to
180  * keep track of backend local pins.
181  *
182  * Until no more than REFCOUNT_ARRAY_ENTRIES buffers are pinned at once, all
183  * refcounts are kept track of in the array; after that, new array entries
184  * displace old ones into the hash table. That way a frequently used entry
185  * can't get "stuck" in the hashtable while infrequent ones clog the array.
186  *
187  * Note that in most scenarios the number of pinned buffers will not exceed
188  * REFCOUNT_ARRAY_ENTRIES.
189  *
190  *
191  * To enter a buffer into the refcount tracking mechanism first reserve a free
192  * entry using ReservePrivateRefCountEntry() and then later, if necessary,
193  * fill it with NewPrivateRefCountEntry(). That split lets us avoid doing
194  * memory allocations in NewPrivateRefCountEntry() which can be important
195  * because in some scenarios it's called with a spinlock held...
196  */
198 static HTAB *PrivateRefCountHash = NULL;
202 
203 static void ReservePrivateRefCountEntry(void);
206 static inline int32 GetPrivateRefCount(Buffer buffer);
208 
209 /*
210  * Ensure that the PrivateRefCountArray has sufficient space to store one more
211  * entry. This has to be called before using NewPrivateRefCountEntry() to fill
212  * a new entry - but it's perfectly fine to not use a reserved entry.
213  */
214 static void
216 {
217  /* Already reserved (or freed), nothing to do */
218  if (ReservedRefCountEntry != NULL)
219  return;
220 
221  /*
222  * First search for a free entry the array, that'll be sufficient in the
223  * majority of cases.
224  */
225  {
226  int i;
227 
228  for (i = 0; i < REFCOUNT_ARRAY_ENTRIES; i++)
229  {
231 
232  res = &PrivateRefCountArray[i];
233 
234  if (res->buffer == InvalidBuffer)
235  {
236  ReservedRefCountEntry = res;
237  return;
238  }
239  }
240  }
241 
242  /*
243  * No luck. All array entries are full. Move one array entry into the hash
244  * table.
245  */
246  {
247  /*
248  * Move entry from the current clock position in the array into the
249  * hashtable. Use that slot.
250  */
251  PrivateRefCountEntry *hashent;
252  bool found;
253 
254  /* select victim slot */
255  ReservedRefCountEntry =
257 
258  /* Better be used, otherwise we shouldn't get here. */
259  Assert(ReservedRefCountEntry->buffer != InvalidBuffer);
260 
261  /* enter victim array entry into hashtable */
262  hashent = hash_search(PrivateRefCountHash,
263  (void *) &(ReservedRefCountEntry->buffer),
264  HASH_ENTER,
265  &found);
266  Assert(!found);
267  hashent->refcount = ReservedRefCountEntry->refcount;
268 
269  /* clear the now free array slot */
270  ReservedRefCountEntry->buffer = InvalidBuffer;
271  ReservedRefCountEntry->refcount = 0;
272 
274  }
275 }
276 
277 /*
278  * Fill a previously reserved refcount entry.
279  */
280 static PrivateRefCountEntry *
282 {
284 
285  /* only allowed to be called when a reservation has been made */
286  Assert(ReservedRefCountEntry != NULL);
287 
288  /* use up the reserved entry */
289  res = ReservedRefCountEntry;
290  ReservedRefCountEntry = NULL;
291 
292  /* and fill it */
293  res->buffer = buffer;
294  res->refcount = 0;
295 
296  return res;
297 }
298 
299 /*
300  * Return the PrivateRefCount entry for the passed buffer.
301  *
302  * Returns NULL if a buffer doesn't have a refcount entry. Otherwise, if
303  * do_move is true, and the entry resides in the hashtable the entry is
304  * optimized for frequent access by moving it to the array.
305  */
306 static PrivateRefCountEntry *
308 {
310  int i;
311 
312  Assert(BufferIsValid(buffer));
313  Assert(!BufferIsLocal(buffer));
314 
315  /*
316  * First search for references in the array, that'll be sufficient in the
317  * majority of cases.
318  */
319  for (i = 0; i < REFCOUNT_ARRAY_ENTRIES; i++)
320  {
321  res = &PrivateRefCountArray[i];
322 
323  if (res->buffer == buffer)
324  return res;
325  }
326 
327  /*
328  * By here we know that the buffer, if already pinned, isn't residing in
329  * the array.
330  *
331  * Only look up the buffer in the hashtable if we've previously overflowed
332  * into it.
333  */
334  if (PrivateRefCountOverflowed == 0)
335  return NULL;
336 
337  res = hash_search(PrivateRefCountHash,
338  (void *) &buffer,
339  HASH_FIND,
340  NULL);
341 
342  if (res == NULL)
343  return NULL;
344  else if (!do_move)
345  {
346  /* caller doesn't want us to move the hash entry into the array */
347  return res;
348  }
349  else
350  {
351  /* move buffer from hashtable into the free array slot */
352  bool found;
354 
355  /* Ensure there's a free array slot */
357 
358  /* Use up the reserved slot */
359  Assert(ReservedRefCountEntry != NULL);
360  free = ReservedRefCountEntry;
361  ReservedRefCountEntry = NULL;
362  Assert(free->buffer == InvalidBuffer);
363 
364  /* and fill it */
365  free->buffer = buffer;
366  free->refcount = res->refcount;
367 
368  /* delete from hashtable */
369  hash_search(PrivateRefCountHash,
370  (void *) &buffer,
371  HASH_REMOVE,
372  &found);
373  Assert(found);
376 
377  return free;
378  }
379 }
380 
381 /*
382  * Returns how many times the passed buffer is pinned by this backend.
383  *
384  * Only works for shared memory buffers!
385  */
386 static inline int32
388 {
390 
391  Assert(BufferIsValid(buffer));
392  Assert(!BufferIsLocal(buffer));
393 
394  /*
395  * Not moving the entry - that's ok for the current users, but we might
396  * want to change this one day.
397  */
398  ref = GetPrivateRefCountEntry(buffer, false);
399 
400  if (ref == NULL)
401  return 0;
402  return ref->refcount;
403 }
404 
405 /*
406  * Release resources used to track the reference count of a buffer which we no
407  * longer have pinned and don't want to pin again immediately.
408  */
409 static void
411 {
412  Assert(ref->refcount == 0);
413 
414  if (ref >= &PrivateRefCountArray[0] &&
416  {
417  ref->buffer = InvalidBuffer;
418 
419  /*
420  * Mark the just used entry as reserved - in many scenarios that
421  * allows us to avoid ever having to search the array/hash for free
422  * entries.
423  */
424  ReservedRefCountEntry = ref;
425  }
426  else
427  {
428  bool found;
429  Buffer buffer = ref->buffer;
430 
431  hash_search(PrivateRefCountHash,
432  (void *) &buffer,
433  HASH_REMOVE,
434  &found);
435  Assert(found);
438  }
439 }
440 
441 /*
442  * BufferIsPinned
443  * True iff the buffer is pinned (also checks for valid buffer number).
444  *
445  * NOTE: what we check here is that *this* backend holds a pin on
446  * the buffer. We do not care whether some other backend does.
447  */
448 #define BufferIsPinned(bufnum) \
449 ( \
450  !BufferIsValid(bufnum) ? \
451  false \
452  : \
453  BufferIsLocal(bufnum) ? \
454  (LocalRefCount[-(bufnum) - 1] > 0) \
455  : \
456  (GetPrivateRefCount(bufnum) > 0) \
457 )
458 
459 
460 static Buffer ReadBuffer_common(SMgrRelation reln, char relpersistence,
461  ForkNumber forkNum, BlockNumber blockNum,
463  bool *hit);
464 static bool PinBuffer(BufferDesc *buf, BufferAccessStrategy strategy);
465 static void PinBuffer_Locked(BufferDesc *buf);
466 static void UnpinBuffer(BufferDesc *buf, bool fixOwner);
467 static void BufferSync(int flags);
469 static int SyncOneBuffer(int buf_id, bool skip_recently_used,
470  WritebackContext *wb_context);
471 static void WaitIO(BufferDesc *buf);
472 static bool StartBufferIO(BufferDesc *buf, bool forInput);
473 static void TerminateBufferIO(BufferDesc *buf, bool clear_dirty,
474  uint32 set_flag_bits);
475 static void shared_buffer_write_error_callback(void *arg);
476 static void local_buffer_write_error_callback(void *arg);
477 static BufferDesc *BufferAlloc(SMgrRelation smgr,
478  char relpersistence,
479  ForkNumber forkNum,
480  BlockNumber blockNum,
481  BufferAccessStrategy strategy,
482  bool *foundPtr);
483 static void FlushBuffer(BufferDesc *buf, SMgrRelation reln);
485  ForkNumber forkNum,
486  BlockNumber nForkBlock,
487  BlockNumber firstDelBlock);
488 static void AtProcExit_Buffers(int code, Datum arg);
489 static void CheckForBufferLeaks(void);
490 static int rnode_comparator(const void *p1, const void *p2);
491 static inline int buffertag_comparator(const BufferTag *a, const BufferTag *b);
492 static inline int ckpt_buforder_comparator(const CkptSortItem *a, const CkptSortItem *b);
493 static int ts_ckpt_progress_comparator(Datum a, Datum b, void *arg);
494 
495 
496 /*
497  * Implementation of PrefetchBuffer() for shared buffers.
498  */
501  ForkNumber forkNum,
502  BlockNumber blockNum)
503 {
504  PrefetchBufferResult result = {InvalidBuffer, false};
505  BufferTag newTag; /* identity of requested block */
506  uint32 newHash; /* hash value for newTag */
507  LWLock *newPartitionLock; /* buffer partition lock for it */
508  int buf_id;
509 
510  Assert(BlockNumberIsValid(blockNum));
511 
512  /* create a tag so we can lookup the buffer */
513  INIT_BUFFERTAG(newTag, smgr_reln->smgr_rnode.node,
514  forkNum, blockNum);
515 
516  /* determine its hash code and partition lock ID */
517  newHash = BufTableHashCode(&newTag);
518  newPartitionLock = BufMappingPartitionLock(newHash);
519 
520  /* see if the block is in the buffer pool already */
521  LWLockAcquire(newPartitionLock, LW_SHARED);
522  buf_id = BufTableLookup(&newTag, newHash);
523  LWLockRelease(newPartitionLock);
524 
525  /* If not in buffers, initiate prefetch */
526  if (buf_id < 0)
527  {
528 #ifdef USE_PREFETCH
529  /*
530  * Try to initiate an asynchronous read. This returns false in
531  * recovery if the relation file doesn't exist.
532  */
533  if (smgrprefetch(smgr_reln, forkNum, blockNum))
534  result.initiated_io = true;
535 #endif /* USE_PREFETCH */
536  }
537  else
538  {
539  /*
540  * Report the buffer it was in at that time. The caller may be able
541  * to avoid a buffer table lookup, but it's not pinned and it must be
542  * rechecked!
543  */
544  result.recent_buffer = buf_id + 1;
545  }
546 
547  /*
548  * If the block *is* in buffers, we do nothing. This is not really ideal:
549  * the block might be just about to be evicted, which would be stupid
550  * since we know we are going to need it soon. But the only easy answer
551  * is to bump the usage_count, which does not seem like a great solution:
552  * when the caller does ultimately touch the block, usage_count would get
553  * bumped again, resulting in too much favoritism for blocks that are
554  * involved in a prefetch sequence. A real fix would involve some
555  * additional per-buffer state, and it's not clear that there's enough of
556  * a problem to justify that.
557  */
558 
559  return result;
560 }
561 
562 /*
563  * PrefetchBuffer -- initiate asynchronous read of a block of a relation
564  *
565  * This is named by analogy to ReadBuffer but doesn't actually allocate a
566  * buffer. Instead it tries to ensure that a future ReadBuffer for the given
567  * block will not be delayed by the I/O. Prefetching is optional.
568  *
569  * There are three possible outcomes:
570  *
571  * 1. If the block is already cached, the result includes a valid buffer that
572  * could be used by the caller to avoid the need for a later buffer lookup, but
573  * it's not pinned, so the caller must recheck it.
574  *
575  * 2. If the kernel has been asked to initiate I/O, the initiated_io member is
576  * true. Currently there is no way to know if the data was already cached by
577  * the kernel and therefore didn't really initiate I/O, and no way to know when
578  * the I/O completes other than using synchronous ReadBuffer().
579  *
580  * 3. Otherwise, the buffer wasn't already cached by PostgreSQL, and either
581  * USE_PREFETCH is not defined (this build doesn't support prefetching due to
582  * lack of a kernel facility), or the underlying relation file wasn't found and
583  * we are in recovery. (If the relation file wasn't found and we are not in
584  * recovery, an error is raised).
585  */
588 {
589  Assert(RelationIsValid(reln));
590  Assert(BlockNumberIsValid(blockNum));
591 
592  if (RelationUsesLocalBuffers(reln))
593  {
594  /* see comments in ReadBufferExtended */
595  if (RELATION_IS_OTHER_TEMP(reln))
596  ereport(ERROR,
597  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
598  errmsg("cannot access temporary tables of other sessions")));
599 
600  /* pass it off to localbuf.c */
601  return PrefetchLocalBuffer(RelationGetSmgr(reln), forkNum, blockNum);
602  }
603  else
604  {
605  /* pass it to the shared buffer version */
606  return PrefetchSharedBuffer(RelationGetSmgr(reln), forkNum, blockNum);
607  }
608 }
609 
610 /*
611  * ReadRecentBuffer -- try to pin a block in a recently observed buffer
612  *
613  * Compared to ReadBuffer(), this avoids a buffer mapping lookup when it's
614  * successful. Return true if the buffer is valid and still has the expected
615  * tag. In that case, the buffer is pinned and the usage count is bumped.
616  */
617 bool
619  Buffer recent_buffer)
620 {
621  BufferDesc *bufHdr;
622  BufferTag tag;
623  uint32 buf_state;
624  bool have_private_ref;
625 
626  Assert(BufferIsValid(recent_buffer));
627 
630  INIT_BUFFERTAG(tag, rnode, forkNum, blockNum);
631 
632  if (BufferIsLocal(recent_buffer))
633  {
634  bufHdr = GetBufferDescriptor(-recent_buffer - 1);
635  buf_state = pg_atomic_read_u32(&bufHdr->state);
636 
637  /* Is it still valid and holding the right tag? */
638  if ((buf_state & BM_VALID) && BUFFERTAGS_EQUAL(tag, bufHdr->tag))
639  {
640  /* Bump local buffer's ref and usage counts. */
642  LocalRefCount[-recent_buffer - 1]++;
644  pg_atomic_write_u32(&bufHdr->state,
645  buf_state + BUF_USAGECOUNT_ONE);
646 
647  return true;
648  }
649  }
650  else
651  {
652  bufHdr = GetBufferDescriptor(recent_buffer - 1);
653  have_private_ref = GetPrivateRefCount(recent_buffer) > 0;
654 
655  /*
656  * Do we already have this buffer pinned with a private reference? If
657  * so, it must be valid and it is safe to check the tag without
658  * locking. If not, we have to lock the header first and then check.
659  */
660  if (have_private_ref)
661  buf_state = pg_atomic_read_u32(&bufHdr->state);
662  else
663  buf_state = LockBufHdr(bufHdr);
664 
665  if ((buf_state & BM_VALID) && BUFFERTAGS_EQUAL(tag, bufHdr->tag))
666  {
667  /*
668  * It's now safe to pin the buffer. We can't pin first and ask
669  * questions later, because because it might confuse code paths
670  * like InvalidateBuffer() if we pinned a random non-matching
671  * buffer.
672  */
673  if (have_private_ref)
674  PinBuffer(bufHdr, NULL); /* bump pin count */
675  else
676  PinBuffer_Locked(bufHdr); /* pin for first time */
677 
678  return true;
679  }
680 
681  /* If we locked the header above, now unlock. */
682  if (!have_private_ref)
683  UnlockBufHdr(bufHdr, buf_state);
684  }
685 
686  return false;
687 }
688 
689 /*
690  * ReadBuffer -- a shorthand for ReadBufferExtended, for reading from main
691  * fork with RBM_NORMAL mode and default strategy.
692  */
693 Buffer
695 {
696  return ReadBufferExtended(reln, MAIN_FORKNUM, blockNum, RBM_NORMAL, NULL);
697 }
698 
699 /*
700  * ReadBufferExtended -- returns a buffer containing the requested
701  * block of the requested relation. If the blknum
702  * requested is P_NEW, extend the relation file and
703  * allocate a new block. (Caller is responsible for
704  * ensuring that only one backend tries to extend a
705  * relation at the same time!)
706  *
707  * Returns: the buffer number for the buffer containing
708  * the block read. The returned buffer has been pinned.
709  * Does not return on error --- elog's instead.
710  *
711  * Assume when this function is called, that reln has been opened already.
712  *
713  * In RBM_NORMAL mode, the page is read from disk, and the page header is
714  * validated. An error is thrown if the page header is not valid. (But
715  * note that an all-zero page is considered "valid"; see
716  * PageIsVerifiedExtended().)
717  *
718  * RBM_ZERO_ON_ERROR is like the normal mode, but if the page header is not
719  * valid, the page is zeroed instead of throwing an error. This is intended
720  * for non-critical data, where the caller is prepared to repair errors.
721  *
722  * In RBM_ZERO_AND_LOCK mode, if the page isn't in buffer cache already, it's
723  * filled with zeros instead of reading it from disk. Useful when the caller
724  * is going to fill the page from scratch, since this saves I/O and avoids
725  * unnecessary failure if the page-on-disk has corrupt page headers.
726  * The page is returned locked to ensure that the caller has a chance to
727  * initialize the page before it's made visible to others.
728  * Caution: do not use this mode to read a page that is beyond the relation's
729  * current physical EOF; that is likely to cause problems in md.c when
730  * the page is modified and written out. P_NEW is OK, though.
731  *
732  * RBM_ZERO_AND_CLEANUP_LOCK is the same as RBM_ZERO_AND_LOCK, but acquires
733  * a cleanup-strength lock on the page.
734  *
735  * RBM_NORMAL_NO_LOG mode is treated the same as RBM_NORMAL here.
736  *
737  * If strategy is not NULL, a nondefault buffer access strategy is used.
738  * See buffer/README for details.
739  */
740 Buffer
743 {
744  bool hit;
745  Buffer buf;
746 
747  /*
748  * Reject attempts to read non-local temporary relations; we would be
749  * likely to get wrong data since we have no visibility into the owning
750  * session's local buffers.
751  */
752  if (RELATION_IS_OTHER_TEMP(reln))
753  ereport(ERROR,
754  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
755  errmsg("cannot access temporary tables of other sessions")));
756 
757  /*
758  * Read the buffer, and update pgstat counters to reflect a cache hit or
759  * miss.
760  */
762  buf = ReadBuffer_common(RelationGetSmgr(reln), reln->rd_rel->relpersistence,
763  forkNum, blockNum, mode, strategy, &hit);
764  if (hit)
766  return buf;
767 }
768 
769 
770 /*
771  * ReadBufferWithoutRelcache -- like ReadBufferExtended, but doesn't require
772  * a relcache entry for the relation.
773  *
774  * NB: At present, this function may only be used on permanent relations, which
775  * is OK, because we only use it during XLOG replay. If in the future we
776  * want to use it on temporary or unlogged relations, we could pass additional
777  * parameters.
778  */
779 Buffer
781  BlockNumber blockNum, ReadBufferMode mode,
782  BufferAccessStrategy strategy)
783 {
784  bool hit;
785 
786  SMgrRelation smgr = smgropen(rnode, InvalidBackendId);
787 
789 
790  return ReadBuffer_common(smgr, RELPERSISTENCE_PERMANENT, forkNum, blockNum,
791  mode, strategy, &hit);
792 }
793 
794 
795 /*
796  * ReadBuffer_common -- common logic for all ReadBuffer variants
797  *
798  * *hit is set to true if the request was satisfied from shared buffer cache.
799  */
800 static Buffer
801 ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
802  BlockNumber blockNum, ReadBufferMode mode,
803  BufferAccessStrategy strategy, bool *hit)
804 {
805  BufferDesc *bufHdr;
806  Block bufBlock;
807  bool found;
808  bool isExtend;
809  bool isLocalBuf = SmgrIsTemp(smgr);
810 
811  *hit = false;
812 
813  /* Make sure we will have room to remember the buffer pin */
815 
816  isExtend = (blockNum == P_NEW);
817 
818  TRACE_POSTGRESQL_BUFFER_READ_START(forkNum, blockNum,
819  smgr->smgr_rnode.node.spcNode,
820  smgr->smgr_rnode.node.dbNode,
821  smgr->smgr_rnode.node.relNode,
822  smgr->smgr_rnode.backend,
823  isExtend);
824 
825  /* Substitute proper block number if caller asked for P_NEW */
826  if (isExtend)
827  blockNum = smgrnblocks(smgr, forkNum);
828 
829  if (isLocalBuf)
830  {
831  bufHdr = LocalBufferAlloc(smgr, forkNum, blockNum, &found);
832  if (found)
834  else if (isExtend)
836  else if (mode == RBM_NORMAL || mode == RBM_NORMAL_NO_LOG ||
837  mode == RBM_ZERO_ON_ERROR)
839  }
840  else
841  {
842  /*
843  * lookup the buffer. IO_IN_PROGRESS is set if the requested block is
844  * not currently in memory.
845  */
846  bufHdr = BufferAlloc(smgr, relpersistence, forkNum, blockNum,
847  strategy, &found);
848  if (found)
850  else if (isExtend)
852  else if (mode == RBM_NORMAL || mode == RBM_NORMAL_NO_LOG ||
853  mode == RBM_ZERO_ON_ERROR)
855  }
856 
857  /* At this point we do NOT hold any locks. */
858 
859  /* if it was already in the buffer pool, we're done */
860  if (found)
861  {
862  if (!isExtend)
863  {
864  /* Just need to update stats before we exit */
865  *hit = true;
866  VacuumPageHit++;
867 
868  if (VacuumCostActive)
870 
871  TRACE_POSTGRESQL_BUFFER_READ_DONE(forkNum, blockNum,
872  smgr->smgr_rnode.node.spcNode,
873  smgr->smgr_rnode.node.dbNode,
874  smgr->smgr_rnode.node.relNode,
875  smgr->smgr_rnode.backend,
876  isExtend,
877  found);
878 
879  /*
880  * In RBM_ZERO_AND_LOCK mode the caller expects the page to be
881  * locked on return.
882  */
883  if (!isLocalBuf)
884  {
885  if (mode == RBM_ZERO_AND_LOCK)
887  LW_EXCLUSIVE);
888  else if (mode == RBM_ZERO_AND_CLEANUP_LOCK)
890  }
891 
892  return BufferDescriptorGetBuffer(bufHdr);
893  }
894 
895  /*
896  * We get here only in the corner case where we are trying to extend
897  * the relation but we found a pre-existing buffer marked BM_VALID.
898  * This can happen because mdread doesn't complain about reads beyond
899  * EOF (when zero_damaged_pages is ON) and so a previous attempt to
900  * read a block beyond EOF could have left a "valid" zero-filled
901  * buffer. Unfortunately, we have also seen this case occurring
902  * because of buggy Linux kernels that sometimes return an
903  * lseek(SEEK_END) result that doesn't account for a recent write. In
904  * that situation, the pre-existing buffer would contain valid data
905  * that we don't want to overwrite. Since the legitimate case should
906  * always have left a zero-filled buffer, complain if not PageIsNew.
907  */
908  bufBlock = isLocalBuf ? LocalBufHdrGetBlock(bufHdr) : BufHdrGetBlock(bufHdr);
909  if (!PageIsNew((Page) bufBlock))
910  ereport(ERROR,
911  (errmsg("unexpected data beyond EOF in block %u of relation %s",
912  blockNum, relpath(smgr->smgr_rnode, forkNum)),
913  errhint("This has been seen to occur with buggy kernels; consider updating your system.")));
914 
915  /*
916  * We *must* do smgrextend before succeeding, else the page will not
917  * be reserved by the kernel, and the next P_NEW call will decide to
918  * return the same page. Clear the BM_VALID bit, do the StartBufferIO
919  * call that BufferAlloc didn't, and proceed.
920  */
921  if (isLocalBuf)
922  {
923  /* Only need to adjust flags */
924  uint32 buf_state = pg_atomic_read_u32(&bufHdr->state);
925 
926  Assert(buf_state & BM_VALID);
927  buf_state &= ~BM_VALID;
928  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
929  }
930  else
931  {
932  /*
933  * Loop to handle the very small possibility that someone re-sets
934  * BM_VALID between our clearing it and StartBufferIO inspecting
935  * it.
936  */
937  do
938  {
939  uint32 buf_state = LockBufHdr(bufHdr);
940 
941  Assert(buf_state & BM_VALID);
942  buf_state &= ~BM_VALID;
943  UnlockBufHdr(bufHdr, buf_state);
944  } while (!StartBufferIO(bufHdr, true));
945  }
946  }
947 
948  /*
949  * if we have gotten to this point, we have allocated a buffer for the
950  * page but its contents are not yet valid. IO_IN_PROGRESS is set for it,
951  * if it's a shared buffer.
952  *
953  * Note: if smgrextend fails, we will end up with a buffer that is
954  * allocated but not marked BM_VALID. P_NEW will still select the same
955  * block number (because the relation didn't get any longer on disk) and
956  * so future attempts to extend the relation will find the same buffer (if
957  * it's not been recycled) but come right back here to try smgrextend
958  * again.
959  */
960  Assert(!(pg_atomic_read_u32(&bufHdr->state) & BM_VALID)); /* spinlock not needed */
961 
962  bufBlock = isLocalBuf ? LocalBufHdrGetBlock(bufHdr) : BufHdrGetBlock(bufHdr);
963 
964  if (isExtend)
965  {
966  /* new buffers are zero-filled */
967  MemSet((char *) bufBlock, 0, BLCKSZ);
968  /* don't set checksum for all-zero page */
969  smgrextend(smgr, forkNum, blockNum, (char *) bufBlock, false);
970 
971  /*
972  * NB: we're *not* doing a ScheduleBufferTagForWriteback here;
973  * although we're essentially performing a write. At least on linux
974  * doing so defeats the 'delayed allocation' mechanism, leading to
975  * increased file fragmentation.
976  */
977  }
978  else
979  {
980  /*
981  * Read in the page, unless the caller intends to overwrite it and
982  * just wants us to allocate a buffer.
983  */
984  if (mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK)
985  MemSet((char *) bufBlock, 0, BLCKSZ);
986  else
987  {
988  instr_time io_start,
989  io_time;
990 
991  if (track_io_timing)
992  INSTR_TIME_SET_CURRENT(io_start);
993 
994  smgrread(smgr, forkNum, blockNum, (char *) bufBlock);
995 
996  if (track_io_timing)
997  {
998  INSTR_TIME_SET_CURRENT(io_time);
999  INSTR_TIME_SUBTRACT(io_time, io_start);
1002  }
1003 
1004  /* check for garbage data */
1005  if (!PageIsVerifiedExtended((Page) bufBlock, blockNum,
1007  {
1008  if (mode == RBM_ZERO_ON_ERROR || zero_damaged_pages)
1009  {
1010  ereport(WARNING,
1012  errmsg("invalid page in block %u of relation %s; zeroing out page",
1013  blockNum,
1014  relpath(smgr->smgr_rnode, forkNum))));
1015  MemSet((char *) bufBlock, 0, BLCKSZ);
1016  }
1017  else
1018  ereport(ERROR,
1020  errmsg("invalid page in block %u of relation %s",
1021  blockNum,
1022  relpath(smgr->smgr_rnode, forkNum))));
1023  }
1024  }
1025  }
1026 
1027  /*
1028  * In RBM_ZERO_AND_LOCK mode, grab the buffer content lock before marking
1029  * the page as valid, to make sure that no other backend sees the zeroed
1030  * page before the caller has had a chance to initialize it.
1031  *
1032  * Since no-one else can be looking at the page contents yet, there is no
1033  * difference between an exclusive lock and a cleanup-strength lock. (Note
1034  * that we cannot use LockBuffer() or LockBufferForCleanup() here, because
1035  * they assert that the buffer is already valid.)
1036  */
1037  if ((mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK) &&
1038  !isLocalBuf)
1039  {
1041  }
1042 
1043  if (isLocalBuf)
1044  {
1045  /* Only need to adjust flags */
1046  uint32 buf_state = pg_atomic_read_u32(&bufHdr->state);
1047 
1048  buf_state |= BM_VALID;
1049  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
1050  }
1051  else
1052  {
1053  /* Set BM_VALID, terminate IO, and wake up any waiters */
1054  TerminateBufferIO(bufHdr, false, BM_VALID);
1055  }
1056 
1057  VacuumPageMiss++;
1058  if (VacuumCostActive)
1060 
1061  TRACE_POSTGRESQL_BUFFER_READ_DONE(forkNum, blockNum,
1062  smgr->smgr_rnode.node.spcNode,
1063  smgr->smgr_rnode.node.dbNode,
1064  smgr->smgr_rnode.node.relNode,
1065  smgr->smgr_rnode.backend,
1066  isExtend,
1067  found);
1068 
1069  return BufferDescriptorGetBuffer(bufHdr);
1070 }
1071 
1072 /*
1073  * BufferAlloc -- subroutine for ReadBuffer. Handles lookup of a shared
1074  * buffer. If no buffer exists already, selects a replacement
1075  * victim and evicts the old page, but does NOT read in new page.
1076  *
1077  * "strategy" can be a buffer replacement strategy object, or NULL for
1078  * the default strategy. The selected buffer's usage_count is advanced when
1079  * using the default strategy, but otherwise possibly not (see PinBuffer).
1080  *
1081  * The returned buffer is pinned and is already marked as holding the
1082  * desired page. If it already did have the desired page, *foundPtr is
1083  * set true. Otherwise, *foundPtr is set false and the buffer is marked
1084  * as IO_IN_PROGRESS; ReadBuffer will now need to do I/O to fill it.
1085  *
1086  * *foundPtr is actually redundant with the buffer's BM_VALID flag, but
1087  * we keep it for simplicity in ReadBuffer.
1088  *
1089  * No locks are held either at entry or exit.
1090  */
1091 static BufferDesc *
1092 BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
1093  BlockNumber blockNum,
1094  BufferAccessStrategy strategy,
1095  bool *foundPtr)
1096 {
1097  BufferTag newTag; /* identity of requested block */
1098  uint32 newHash; /* hash value for newTag */
1099  LWLock *newPartitionLock; /* buffer partition lock for it */
1100  BufferTag oldTag; /* previous identity of selected buffer */
1101  uint32 oldHash; /* hash value for oldTag */
1102  LWLock *oldPartitionLock; /* buffer partition lock for it */
1103  uint32 oldFlags;
1104  int buf_id;
1105  BufferDesc *buf;
1106  bool valid;
1107  uint32 buf_state;
1108 
1109  /* create a tag so we can lookup the buffer */
1110  INIT_BUFFERTAG(newTag, smgr->smgr_rnode.node, forkNum, blockNum);
1111 
1112  /* determine its hash code and partition lock ID */
1113  newHash = BufTableHashCode(&newTag);
1114  newPartitionLock = BufMappingPartitionLock(newHash);
1115 
1116  /* see if the block is in the buffer pool already */
1117  LWLockAcquire(newPartitionLock, LW_SHARED);
1118  buf_id = BufTableLookup(&newTag, newHash);
1119  if (buf_id >= 0)
1120  {
1121  /*
1122  * Found it. Now, pin the buffer so no one can steal it from the
1123  * buffer pool, and check to see if the correct data has been loaded
1124  * into the buffer.
1125  */
1126  buf = GetBufferDescriptor(buf_id);
1127 
1128  valid = PinBuffer(buf, strategy);
1129 
1130  /* Can release the mapping lock as soon as we've pinned it */
1131  LWLockRelease(newPartitionLock);
1132 
1133  *foundPtr = true;
1134 
1135  if (!valid)
1136  {
1137  /*
1138  * We can only get here if (a) someone else is still reading in
1139  * the page, or (b) a previous read attempt failed. We have to
1140  * wait for any active read attempt to finish, and then set up our
1141  * own read attempt if the page is still not BM_VALID.
1142  * StartBufferIO does it all.
1143  */
1144  if (StartBufferIO(buf, true))
1145  {
1146  /*
1147  * If we get here, previous attempts to read the buffer must
1148  * have failed ... but we shall bravely try again.
1149  */
1150  *foundPtr = false;
1151  }
1152  }
1153 
1154  return buf;
1155  }
1156 
1157  /*
1158  * Didn't find it in the buffer pool. We'll have to initialize a new
1159  * buffer. Remember to unlock the mapping lock while doing the work.
1160  */
1161  LWLockRelease(newPartitionLock);
1162 
1163  /* Loop here in case we have to try another victim buffer */
1164  for (;;)
1165  {
1166  /*
1167  * Ensure, while the spinlock's not yet held, that there's a free
1168  * refcount entry.
1169  */
1171 
1172  /*
1173  * Select a victim buffer. The buffer is returned with its header
1174  * spinlock still held!
1175  */
1176  buf = StrategyGetBuffer(strategy, &buf_state);
1177 
1178  Assert(BUF_STATE_GET_REFCOUNT(buf_state) == 0);
1179 
1180  /* Must copy buffer flags while we still hold the spinlock */
1181  oldFlags = buf_state & BUF_FLAG_MASK;
1182 
1183  /* Pin the buffer and then release the buffer spinlock */
1184  PinBuffer_Locked(buf);
1185 
1186  /*
1187  * If the buffer was dirty, try to write it out. There is a race
1188  * condition here, in that someone might dirty it after we released it
1189  * above, or even while we are writing it out (since our share-lock
1190  * won't prevent hint-bit updates). We will recheck the dirty bit
1191  * after re-locking the buffer header.
1192  */
1193  if (oldFlags & BM_DIRTY)
1194  {
1195  /*
1196  * We need a share-lock on the buffer contents to write it out
1197  * (else we might write invalid data, eg because someone else is
1198  * compacting the page contents while we write). We must use a
1199  * conditional lock acquisition here to avoid deadlock. Even
1200  * though the buffer was not pinned (and therefore surely not
1201  * locked) when StrategyGetBuffer returned it, someone else could
1202  * have pinned and exclusive-locked it by the time we get here. If
1203  * we try to get the lock unconditionally, we'd block waiting for
1204  * them; if they later block waiting for us, deadlock ensues.
1205  * (This has been observed to happen when two backends are both
1206  * trying to split btree index pages, and the second one just
1207  * happens to be trying to split the page the first one got from
1208  * StrategyGetBuffer.)
1209  */
1211  LW_SHARED))
1212  {
1213  /*
1214  * If using a nondefault strategy, and writing the buffer
1215  * would require a WAL flush, let the strategy decide whether
1216  * to go ahead and write/reuse the buffer or to choose another
1217  * victim. We need lock to inspect the page LSN, so this
1218  * can't be done inside StrategyGetBuffer.
1219  */
1220  if (strategy != NULL)
1221  {
1222  XLogRecPtr lsn;
1223 
1224  /* Read the LSN while holding buffer header lock */
1225  buf_state = LockBufHdr(buf);
1226  lsn = BufferGetLSN(buf);
1227  UnlockBufHdr(buf, buf_state);
1228 
1229  if (XLogNeedsFlush(lsn) &&
1230  StrategyRejectBuffer(strategy, buf))
1231  {
1232  /* Drop lock/pin and loop around for another buffer */
1234  UnpinBuffer(buf, true);
1235  continue;
1236  }
1237  }
1238 
1239  /* OK, do the I/O */
1240  TRACE_POSTGRESQL_BUFFER_WRITE_DIRTY_START(forkNum, blockNum,
1241  smgr->smgr_rnode.node.spcNode,
1242  smgr->smgr_rnode.node.dbNode,
1243  smgr->smgr_rnode.node.relNode);
1244 
1245  FlushBuffer(buf, NULL);
1247 
1249  &buf->tag);
1250 
1251  TRACE_POSTGRESQL_BUFFER_WRITE_DIRTY_DONE(forkNum, blockNum,
1252  smgr->smgr_rnode.node.spcNode,
1253  smgr->smgr_rnode.node.dbNode,
1254  smgr->smgr_rnode.node.relNode);
1255  }
1256  else
1257  {
1258  /*
1259  * Someone else has locked the buffer, so give it up and loop
1260  * back to get another one.
1261  */
1262  UnpinBuffer(buf, true);
1263  continue;
1264  }
1265  }
1266 
1267  /*
1268  * To change the association of a valid buffer, we'll need to have
1269  * exclusive lock on both the old and new mapping partitions.
1270  */
1271  if (oldFlags & BM_TAG_VALID)
1272  {
1273  /*
1274  * Need to compute the old tag's hashcode and partition lock ID.
1275  * XXX is it worth storing the hashcode in BufferDesc so we need
1276  * not recompute it here? Probably not.
1277  */
1278  oldTag = buf->tag;
1279  oldHash = BufTableHashCode(&oldTag);
1280  oldPartitionLock = BufMappingPartitionLock(oldHash);
1281 
1282  /*
1283  * Must lock the lower-numbered partition first to avoid
1284  * deadlocks.
1285  */
1286  if (oldPartitionLock < newPartitionLock)
1287  {
1288  LWLockAcquire(oldPartitionLock, LW_EXCLUSIVE);
1289  LWLockAcquire(newPartitionLock, LW_EXCLUSIVE);
1290  }
1291  else if (oldPartitionLock > newPartitionLock)
1292  {
1293  LWLockAcquire(newPartitionLock, LW_EXCLUSIVE);
1294  LWLockAcquire(oldPartitionLock, LW_EXCLUSIVE);
1295  }
1296  else
1297  {
1298  /* only one partition, only one lock */
1299  LWLockAcquire(newPartitionLock, LW_EXCLUSIVE);
1300  }
1301  }
1302  else
1303  {
1304  /* if it wasn't valid, we need only the new partition */
1305  LWLockAcquire(newPartitionLock, LW_EXCLUSIVE);
1306  /* remember we have no old-partition lock or tag */
1307  oldPartitionLock = NULL;
1308  /* keep the compiler quiet about uninitialized variables */
1309  oldHash = 0;
1310  }
1311 
1312  /*
1313  * Try to make a hashtable entry for the buffer under its new tag.
1314  * This could fail because while we were writing someone else
1315  * allocated another buffer for the same block we want to read in.
1316  * Note that we have not yet removed the hashtable entry for the old
1317  * tag.
1318  */
1319  buf_id = BufTableInsert(&newTag, newHash, buf->buf_id);
1320 
1321  if (buf_id >= 0)
1322  {
1323  /*
1324  * Got a collision. Someone has already done what we were about to
1325  * do. We'll just handle this as if it were found in the buffer
1326  * pool in the first place. First, give up the buffer we were
1327  * planning to use.
1328  */
1329  UnpinBuffer(buf, true);
1330 
1331  /* Can give up that buffer's mapping partition lock now */
1332  if (oldPartitionLock != NULL &&
1333  oldPartitionLock != newPartitionLock)
1334  LWLockRelease(oldPartitionLock);
1335 
1336  /* remaining code should match code at top of routine */
1337 
1338  buf = GetBufferDescriptor(buf_id);
1339 
1340  valid = PinBuffer(buf, strategy);
1341 
1342  /* Can release the mapping lock as soon as we've pinned it */
1343  LWLockRelease(newPartitionLock);
1344 
1345  *foundPtr = true;
1346 
1347  if (!valid)
1348  {
1349  /*
1350  * We can only get here if (a) someone else is still reading
1351  * in the page, or (b) a previous read attempt failed. We
1352  * have to wait for any active read attempt to finish, and
1353  * then set up our own read attempt if the page is still not
1354  * BM_VALID. StartBufferIO does it all.
1355  */
1356  if (StartBufferIO(buf, true))
1357  {
1358  /*
1359  * If we get here, previous attempts to read the buffer
1360  * must have failed ... but we shall bravely try again.
1361  */
1362  *foundPtr = false;
1363  }
1364  }
1365 
1366  return buf;
1367  }
1368 
1369  /*
1370  * Need to lock the buffer header too in order to change its tag.
1371  */
1372  buf_state = LockBufHdr(buf);
1373 
1374  /*
1375  * Somebody could have pinned or re-dirtied the buffer while we were
1376  * doing the I/O and making the new hashtable entry. If so, we can't
1377  * recycle this buffer; we must undo everything we've done and start
1378  * over with a new victim buffer.
1379  */
1380  oldFlags = buf_state & BUF_FLAG_MASK;
1381  if (BUF_STATE_GET_REFCOUNT(buf_state) == 1 && !(oldFlags & BM_DIRTY))
1382  break;
1383 
1384  UnlockBufHdr(buf, buf_state);
1385  BufTableDelete(&newTag, newHash);
1386  if (oldPartitionLock != NULL &&
1387  oldPartitionLock != newPartitionLock)
1388  LWLockRelease(oldPartitionLock);
1389  LWLockRelease(newPartitionLock);
1390  UnpinBuffer(buf, true);
1391  }
1392 
1393  /*
1394  * Okay, it's finally safe to rename the buffer.
1395  *
1396  * Clearing BM_VALID here is necessary, clearing the dirtybits is just
1397  * paranoia. We also reset the usage_count since any recency of use of
1398  * the old content is no longer relevant. (The usage_count starts out at
1399  * 1 so that the buffer can survive one clock-sweep pass.)
1400  *
1401  * Make sure BM_PERMANENT is set for buffers that must be written at every
1402  * checkpoint. Unlogged buffers only need to be written at shutdown
1403  * checkpoints, except for their "init" forks, which need to be treated
1404  * just like permanent relations.
1405  */
1406  buf->tag = newTag;
1407  buf_state &= ~(BM_VALID | BM_DIRTY | BM_JUST_DIRTIED |
1410  if (relpersistence == RELPERSISTENCE_PERMANENT || forkNum == INIT_FORKNUM)
1411  buf_state |= BM_TAG_VALID | BM_PERMANENT | BUF_USAGECOUNT_ONE;
1412  else
1413  buf_state |= BM_TAG_VALID | BUF_USAGECOUNT_ONE;
1414 
1415  UnlockBufHdr(buf, buf_state);
1416 
1417  if (oldPartitionLock != NULL)
1418  {
1419  BufTableDelete(&oldTag, oldHash);
1420  if (oldPartitionLock != newPartitionLock)
1421  LWLockRelease(oldPartitionLock);
1422  }
1423 
1424  LWLockRelease(newPartitionLock);
1425 
1426  /*
1427  * Buffer contents are currently invalid. Try to obtain the right to
1428  * start I/O. If StartBufferIO returns false, then someone else managed
1429  * to read it before we did, so there's nothing left for BufferAlloc() to
1430  * do.
1431  */
1432  if (StartBufferIO(buf, true))
1433  *foundPtr = false;
1434  else
1435  *foundPtr = true;
1436 
1437  return buf;
1438 }
1439 
1440 /*
1441  * InvalidateBuffer -- mark a shared buffer invalid and return it to the
1442  * freelist.
1443  *
1444  * The buffer header spinlock must be held at entry. We drop it before
1445  * returning. (This is sane because the caller must have locked the
1446  * buffer in order to be sure it should be dropped.)
1447  *
1448  * This is used only in contexts such as dropping a relation. We assume
1449  * that no other backend could possibly be interested in using the page,
1450  * so the only reason the buffer might be pinned is if someone else is
1451  * trying to write it out. We have to let them finish before we can
1452  * reclaim the buffer.
1453  *
1454  * The buffer could get reclaimed by someone else while we are waiting
1455  * to acquire the necessary locks; if so, don't mess it up.
1456  */
1457 static void
1459 {
1460  BufferTag oldTag;
1461  uint32 oldHash; /* hash value for oldTag */
1462  LWLock *oldPartitionLock; /* buffer partition lock for it */
1463  uint32 oldFlags;
1464  uint32 buf_state;
1465 
1466  /* Save the original buffer tag before dropping the spinlock */
1467  oldTag = buf->tag;
1468 
1469  buf_state = pg_atomic_read_u32(&buf->state);
1470  Assert(buf_state & BM_LOCKED);
1471  UnlockBufHdr(buf, buf_state);
1472 
1473  /*
1474  * Need to compute the old tag's hashcode and partition lock ID. XXX is it
1475  * worth storing the hashcode in BufferDesc so we need not recompute it
1476  * here? Probably not.
1477  */
1478  oldHash = BufTableHashCode(&oldTag);
1479  oldPartitionLock = BufMappingPartitionLock(oldHash);
1480 
1481 retry:
1482 
1483  /*
1484  * Acquire exclusive mapping lock in preparation for changing the buffer's
1485  * association.
1486  */
1487  LWLockAcquire(oldPartitionLock, LW_EXCLUSIVE);
1488 
1489  /* Re-lock the buffer header */
1490  buf_state = LockBufHdr(buf);
1491 
1492  /* If it's changed while we were waiting for lock, do nothing */
1493  if (!BUFFERTAGS_EQUAL(buf->tag, oldTag))
1494  {
1495  UnlockBufHdr(buf, buf_state);
1496  LWLockRelease(oldPartitionLock);
1497  return;
1498  }
1499 
1500  /*
1501  * We assume the only reason for it to be pinned is that someone else is
1502  * flushing the page out. Wait for them to finish. (This could be an
1503  * infinite loop if the refcount is messed up... it would be nice to time
1504  * out after awhile, but there seems no way to be sure how many loops may
1505  * be needed. Note that if the other guy has pinned the buffer but not
1506  * yet done StartBufferIO, WaitIO will fall through and we'll effectively
1507  * be busy-looping here.)
1508  */
1509  if (BUF_STATE_GET_REFCOUNT(buf_state) != 0)
1510  {
1511  UnlockBufHdr(buf, buf_state);
1512  LWLockRelease(oldPartitionLock);
1513  /* safety check: should definitely not be our *own* pin */
1515  elog(ERROR, "buffer is pinned in InvalidateBuffer");
1516  WaitIO(buf);
1517  goto retry;
1518  }
1519 
1520  /*
1521  * Clear out the buffer's tag and flags. We must do this to ensure that
1522  * linear scans of the buffer array don't think the buffer is valid.
1523  */
1524  oldFlags = buf_state & BUF_FLAG_MASK;
1525  CLEAR_BUFFERTAG(buf->tag);
1526  buf_state &= ~(BUF_FLAG_MASK | BUF_USAGECOUNT_MASK);
1527  UnlockBufHdr(buf, buf_state);
1528 
1529  /*
1530  * Remove the buffer from the lookup hashtable, if it was in there.
1531  */
1532  if (oldFlags & BM_TAG_VALID)
1533  BufTableDelete(&oldTag, oldHash);
1534 
1535  /*
1536  * Done with mapping lock.
1537  */
1538  LWLockRelease(oldPartitionLock);
1539 
1540  /*
1541  * Insert the buffer at the head of the list of free buffers.
1542  */
1543  StrategyFreeBuffer(buf);
1544 }
1545 
1546 /*
1547  * MarkBufferDirty
1548  *
1549  * Marks buffer contents as dirty (actual write happens later).
1550  *
1551  * Buffer must be pinned and exclusive-locked. (If caller does not hold
1552  * exclusive lock, then somebody could be in process of writing the buffer,
1553  * leading to risk of bad data written to disk.)
1554  */
1555 void
1557 {
1558  BufferDesc *bufHdr;
1559  uint32 buf_state;
1560  uint32 old_buf_state;
1561 
1562  if (!BufferIsValid(buffer))
1563  elog(ERROR, "bad buffer ID: %d", buffer);
1564 
1565  if (BufferIsLocal(buffer))
1566  {
1567  MarkLocalBufferDirty(buffer);
1568  return;
1569  }
1570 
1571  bufHdr = GetBufferDescriptor(buffer - 1);
1572 
1573  Assert(BufferIsPinned(buffer));
1575  LW_EXCLUSIVE));
1576 
1577  old_buf_state = pg_atomic_read_u32(&bufHdr->state);
1578  for (;;)
1579  {
1580  if (old_buf_state & BM_LOCKED)
1581  old_buf_state = WaitBufHdrUnlocked(bufHdr);
1582 
1583  buf_state = old_buf_state;
1584 
1585  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
1586  buf_state |= BM_DIRTY | BM_JUST_DIRTIED;
1587 
1588  if (pg_atomic_compare_exchange_u32(&bufHdr->state, &old_buf_state,
1589  buf_state))
1590  break;
1591  }
1592 
1593  /*
1594  * If the buffer was not dirty already, do vacuum accounting.
1595  */
1596  if (!(old_buf_state & BM_DIRTY))
1597  {
1598  VacuumPageDirty++;
1600  if (VacuumCostActive)
1602  }
1603 }
1604 
1605 /*
1606  * ReleaseAndReadBuffer -- combine ReleaseBuffer() and ReadBuffer()
1607  *
1608  * Formerly, this saved one cycle of acquiring/releasing the BufMgrLock
1609  * compared to calling the two routines separately. Now it's mainly just
1610  * a convenience function. However, if the passed buffer is valid and
1611  * already contains the desired block, we just return it as-is; and that
1612  * does save considerable work compared to a full release and reacquire.
1613  *
1614  * Note: it is OK to pass buffer == InvalidBuffer, indicating that no old
1615  * buffer actually needs to be released. This case is the same as ReadBuffer,
1616  * but can save some tests in the caller.
1617  */
1618 Buffer
1620  Relation relation,
1621  BlockNumber blockNum)
1622 {
1623  ForkNumber forkNum = MAIN_FORKNUM;
1624  BufferDesc *bufHdr;
1625 
1626  if (BufferIsValid(buffer))
1627  {
1628  Assert(BufferIsPinned(buffer));
1629  if (BufferIsLocal(buffer))
1630  {
1631  bufHdr = GetLocalBufferDescriptor(-buffer - 1);
1632  if (bufHdr->tag.blockNum == blockNum &&
1633  RelFileNodeEquals(bufHdr->tag.rnode, relation->rd_node) &&
1634  bufHdr->tag.forkNum == forkNum)
1635  return buffer;
1637  LocalRefCount[-buffer - 1]--;
1638  }
1639  else
1640  {
1641  bufHdr = GetBufferDescriptor(buffer - 1);
1642  /* we have pin, so it's ok to examine tag without spinlock */
1643  if (bufHdr->tag.blockNum == blockNum &&
1644  RelFileNodeEquals(bufHdr->tag.rnode, relation->rd_node) &&
1645  bufHdr->tag.forkNum == forkNum)
1646  return buffer;
1647  UnpinBuffer(bufHdr, true);
1648  }
1649  }
1650 
1651  return ReadBuffer(relation, blockNum);
1652 }
1653 
1654 /*
1655  * PinBuffer -- make buffer unavailable for replacement.
1656  *
1657  * For the default access strategy, the buffer's usage_count is incremented
1658  * when we first pin it; for other strategies we just make sure the usage_count
1659  * isn't zero. (The idea of the latter is that we don't want synchronized
1660  * heap scans to inflate the count, but we need it to not be zero to discourage
1661  * other backends from stealing buffers from our ring. As long as we cycle
1662  * through the ring faster than the global clock-sweep cycles, buffers in
1663  * our ring won't be chosen as victims for replacement by other backends.)
1664  *
1665  * This should be applied only to shared buffers, never local ones.
1666  *
1667  * Since buffers are pinned/unpinned very frequently, pin buffers without
1668  * taking the buffer header lock; instead update the state variable in loop of
1669  * CAS operations. Hopefully it's just a single CAS.
1670  *
1671  * Note that ResourceOwnerEnlargeBuffers must have been done already.
1672  *
1673  * Returns true if buffer is BM_VALID, else false. This provision allows
1674  * some callers to avoid an extra spinlock cycle.
1675  */
1676 static bool
1678 {
1680  bool result;
1681  PrivateRefCountEntry *ref;
1682 
1683  ref = GetPrivateRefCountEntry(b, true);
1684 
1685  if (ref == NULL)
1686  {
1687  uint32 buf_state;
1688  uint32 old_buf_state;
1689 
1691  ref = NewPrivateRefCountEntry(b);
1692 
1693  old_buf_state = pg_atomic_read_u32(&buf->state);
1694  for (;;)
1695  {
1696  if (old_buf_state & BM_LOCKED)
1697  old_buf_state = WaitBufHdrUnlocked(buf);
1698 
1699  buf_state = old_buf_state;
1700 
1701  /* increase refcount */
1702  buf_state += BUF_REFCOUNT_ONE;
1703 
1704  if (strategy == NULL)
1705  {
1706  /* Default case: increase usagecount unless already max. */
1708  buf_state += BUF_USAGECOUNT_ONE;
1709  }
1710  else
1711  {
1712  /*
1713  * Ring buffers shouldn't evict others from pool. Thus we
1714  * don't make usagecount more than 1.
1715  */
1716  if (BUF_STATE_GET_USAGECOUNT(buf_state) == 0)
1717  buf_state += BUF_USAGECOUNT_ONE;
1718  }
1719 
1720  if (pg_atomic_compare_exchange_u32(&buf->state, &old_buf_state,
1721  buf_state))
1722  {
1723  result = (buf_state & BM_VALID) != 0;
1724 
1725  /*
1726  * Assume that we acquired a buffer pin for the purposes of
1727  * Valgrind buffer client checks (even in !result case) to
1728  * keep things simple. Buffers that are unsafe to access are
1729  * not generally guaranteed to be marked undefined or
1730  * non-accessible in any case.
1731  */
1733  break;
1734  }
1735  }
1736  }
1737  else
1738  {
1739  /*
1740  * If we previously pinned the buffer, it must surely be valid.
1741  *
1742  * Note: We deliberately avoid a Valgrind client request here.
1743  * Individual access methods can optionally superimpose buffer page
1744  * client requests on top of our client requests to enforce that
1745  * buffers are only accessed while locked (and pinned). It's possible
1746  * that the buffer page is legitimately non-accessible here. We
1747  * cannot meddle with that.
1748  */
1749  result = true;
1750  }
1751 
1752  ref->refcount++;
1753  Assert(ref->refcount > 0);
1755  return result;
1756 }
1757 
1758 /*
1759  * PinBuffer_Locked -- as above, but caller already locked the buffer header.
1760  * The spinlock is released before return.
1761  *
1762  * As this function is called with the spinlock held, the caller has to
1763  * previously call ReservePrivateRefCountEntry().
1764  *
1765  * Currently, no callers of this function want to modify the buffer's
1766  * usage_count at all, so there's no need for a strategy parameter.
1767  * Also we don't bother with a BM_VALID test (the caller could check that for
1768  * itself).
1769  *
1770  * Also all callers only ever use this function when it's known that the
1771  * buffer can't have a preexisting pin by this backend. That allows us to skip
1772  * searching the private refcount array & hash, which is a boon, because the
1773  * spinlock is still held.
1774  *
1775  * Note: use of this routine is frequently mandatory, not just an optimization
1776  * to save a spin lock/unlock cycle, because we need to pin a buffer before
1777  * its state can change under us.
1778  */
1779 static void
1781 {
1782  Buffer b;
1783  PrivateRefCountEntry *ref;
1784  uint32 buf_state;
1785 
1786  /*
1787  * As explained, We don't expect any preexisting pins. That allows us to
1788  * manipulate the PrivateRefCount after releasing the spinlock
1789  */
1791 
1792  /*
1793  * Buffer can't have a preexisting pin, so mark its page as defined to
1794  * Valgrind (this is similar to the PinBuffer() case where the backend
1795  * doesn't already have a buffer pin)
1796  */
1798 
1799  /*
1800  * Since we hold the buffer spinlock, we can update the buffer state and
1801  * release the lock in one operation.
1802  */
1803  buf_state = pg_atomic_read_u32(&buf->state);
1804  Assert(buf_state & BM_LOCKED);
1805  buf_state += BUF_REFCOUNT_ONE;
1806  UnlockBufHdr(buf, buf_state);
1807 
1808  b = BufferDescriptorGetBuffer(buf);
1809 
1810  ref = NewPrivateRefCountEntry(b);
1811  ref->refcount++;
1812 
1814 }
1815 
1816 /*
1817  * UnpinBuffer -- make buffer available for replacement.
1818  *
1819  * This should be applied only to shared buffers, never local ones.
1820  *
1821  * Most but not all callers want CurrentResourceOwner to be adjusted.
1822  * Those that don't should pass fixOwner = false.
1823  */
1824 static void
1825 UnpinBuffer(BufferDesc *buf, bool fixOwner)
1826 {
1827  PrivateRefCountEntry *ref;
1829 
1830  /* not moving as we're likely deleting it soon anyway */
1831  ref = GetPrivateRefCountEntry(b, false);
1832  Assert(ref != NULL);
1833 
1834  if (fixOwner)
1836 
1837  Assert(ref->refcount > 0);
1838  ref->refcount--;
1839  if (ref->refcount == 0)
1840  {
1841  uint32 buf_state;
1842  uint32 old_buf_state;
1843 
1844  /*
1845  * Mark buffer non-accessible to Valgrind.
1846  *
1847  * Note that the buffer may have already been marked non-accessible
1848  * within access method code that enforces that buffers are only
1849  * accessed while a buffer lock is held.
1850  */
1852 
1853  /* I'd better not still hold the buffer content lock */
1855 
1856  /*
1857  * Decrement the shared reference count.
1858  *
1859  * Since buffer spinlock holder can update status using just write,
1860  * it's not safe to use atomic decrement here; thus use a CAS loop.
1861  */
1862  old_buf_state = pg_atomic_read_u32(&buf->state);
1863  for (;;)
1864  {
1865  if (old_buf_state & BM_LOCKED)
1866  old_buf_state = WaitBufHdrUnlocked(buf);
1867 
1868  buf_state = old_buf_state;
1869 
1870  buf_state -= BUF_REFCOUNT_ONE;
1871 
1872  if (pg_atomic_compare_exchange_u32(&buf->state, &old_buf_state,
1873  buf_state))
1874  break;
1875  }
1876 
1877  /* Support LockBufferForCleanup() */
1878  if (buf_state & BM_PIN_COUNT_WAITER)
1879  {
1880  /*
1881  * Acquire the buffer header lock, re-check that there's a waiter.
1882  * Another backend could have unpinned this buffer, and already
1883  * woken up the waiter. There's no danger of the buffer being
1884  * replaced after we unpinned it above, as it's pinned by the
1885  * waiter.
1886  */
1887  buf_state = LockBufHdr(buf);
1888 
1889  if ((buf_state & BM_PIN_COUNT_WAITER) &&
1890  BUF_STATE_GET_REFCOUNT(buf_state) == 1)
1891  {
1892  /* we just released the last pin other than the waiter's */
1893  int wait_backend_pid = buf->wait_backend_pid;
1894 
1895  buf_state &= ~BM_PIN_COUNT_WAITER;
1896  UnlockBufHdr(buf, buf_state);
1897  ProcSendSignal(wait_backend_pid);
1898  }
1899  else
1900  UnlockBufHdr(buf, buf_state);
1901  }
1903  }
1904 }
1905 
1906 #define ST_SORT sort_checkpoint_bufferids
1907 #define ST_ELEMENT_TYPE CkptSortItem
1908 #define ST_COMPARE(a, b) ckpt_buforder_comparator(a, b)
1909 #define ST_SCOPE static
1910 #define ST_DEFINE
1911 #include <lib/sort_template.h>
1912 
1913 /*
1914  * BufferSync -- Write out all dirty buffers in the pool.
1915  *
1916  * This is called at checkpoint time to write out all dirty shared buffers.
1917  * The checkpoint request flags should be passed in. If CHECKPOINT_IMMEDIATE
1918  * is set, we disable delays between writes; if CHECKPOINT_IS_SHUTDOWN,
1919  * CHECKPOINT_END_OF_RECOVERY or CHECKPOINT_FLUSH_ALL is set, we write even
1920  * unlogged buffers, which are otherwise skipped. The remaining flags
1921  * currently have no effect here.
1922  */
1923 static void
1924 BufferSync(int flags)
1925 {
1926  uint32 buf_state;
1927  int buf_id;
1928  int num_to_scan;
1929  int num_spaces;
1930  int num_processed;
1931  int num_written;
1932  CkptTsStatus *per_ts_stat = NULL;
1933  Oid last_tsid;
1934  binaryheap *ts_heap;
1935  int i;
1936  int mask = BM_DIRTY;
1937  WritebackContext wb_context;
1938 
1939  /* Make sure we can handle the pin inside SyncOneBuffer */
1941 
1942  /*
1943  * Unless this is a shutdown checkpoint or we have been explicitly told,
1944  * we write only permanent, dirty buffers. But at shutdown or end of
1945  * recovery, we write all dirty buffers.
1946  */
1949  mask |= BM_PERMANENT;
1950 
1951  /*
1952  * Loop over all buffers, and mark the ones that need to be written with
1953  * BM_CHECKPOINT_NEEDED. Count them as we go (num_to_scan), so that we
1954  * can estimate how much work needs to be done.
1955  *
1956  * This allows us to write only those pages that were dirty when the
1957  * checkpoint began, and not those that get dirtied while it proceeds.
1958  * Whenever a page with BM_CHECKPOINT_NEEDED is written out, either by us
1959  * later in this function, or by normal backends or the bgwriter cleaning
1960  * scan, the flag is cleared. Any buffer dirtied after this point won't
1961  * have the flag set.
1962  *
1963  * Note that if we fail to write some buffer, we may leave buffers with
1964  * BM_CHECKPOINT_NEEDED still set. This is OK since any such buffer would
1965  * certainly need to be written for the next checkpoint attempt, too.
1966  */
1967  num_to_scan = 0;
1968  for (buf_id = 0; buf_id < NBuffers; buf_id++)
1969  {
1970  BufferDesc *bufHdr = GetBufferDescriptor(buf_id);
1971 
1972  /*
1973  * Header spinlock is enough to examine BM_DIRTY, see comment in
1974  * SyncOneBuffer.
1975  */
1976  buf_state = LockBufHdr(bufHdr);
1977 
1978  if ((buf_state & mask) == mask)
1979  {
1980  CkptSortItem *item;
1981 
1982  buf_state |= BM_CHECKPOINT_NEEDED;
1983 
1984  item = &CkptBufferIds[num_to_scan++];
1985  item->buf_id = buf_id;
1986  item->tsId = bufHdr->tag.rnode.spcNode;
1987  item->relNode = bufHdr->tag.rnode.relNode;
1988  item->forkNum = bufHdr->tag.forkNum;
1989  item->blockNum = bufHdr->tag.blockNum;
1990  }
1991 
1992  UnlockBufHdr(bufHdr, buf_state);
1993 
1994  /* Check for barrier events in case NBuffers is large. */
1997  }
1998 
1999  if (num_to_scan == 0)
2000  return; /* nothing to do */
2001 
2003 
2004  TRACE_POSTGRESQL_BUFFER_SYNC_START(NBuffers, num_to_scan);
2005 
2006  /*
2007  * Sort buffers that need to be written to reduce the likelihood of random
2008  * IO. The sorting is also important for the implementation of balancing
2009  * writes between tablespaces. Without balancing writes we'd potentially
2010  * end up writing to the tablespaces one-by-one; possibly overloading the
2011  * underlying system.
2012  */
2013  sort_checkpoint_bufferids(CkptBufferIds, num_to_scan);
2014 
2015  num_spaces = 0;
2016 
2017  /*
2018  * Allocate progress status for each tablespace with buffers that need to
2019  * be flushed. This requires the to-be-flushed array to be sorted.
2020  */
2021  last_tsid = InvalidOid;
2022  for (i = 0; i < num_to_scan; i++)
2023  {
2024  CkptTsStatus *s;
2025  Oid cur_tsid;
2026 
2027  cur_tsid = CkptBufferIds[i].tsId;
2028 
2029  /*
2030  * Grow array of per-tablespace status structs, every time a new
2031  * tablespace is found.
2032  */
2033  if (last_tsid == InvalidOid || last_tsid != cur_tsid)
2034  {
2035  Size sz;
2036 
2037  num_spaces++;
2038 
2039  /*
2040  * Not worth adding grow-by-power-of-2 logic here - even with a
2041  * few hundred tablespaces this should be fine.
2042  */
2043  sz = sizeof(CkptTsStatus) * num_spaces;
2044 
2045  if (per_ts_stat == NULL)
2046  per_ts_stat = (CkptTsStatus *) palloc(sz);
2047  else
2048  per_ts_stat = (CkptTsStatus *) repalloc(per_ts_stat, sz);
2049 
2050  s = &per_ts_stat[num_spaces - 1];
2051  memset(s, 0, sizeof(*s));
2052  s->tsId = cur_tsid;
2053 
2054  /*
2055  * The first buffer in this tablespace. As CkptBufferIds is sorted
2056  * by tablespace all (s->num_to_scan) buffers in this tablespace
2057  * will follow afterwards.
2058  */
2059  s->index = i;
2060 
2061  /*
2062  * progress_slice will be determined once we know how many buffers
2063  * are in each tablespace, i.e. after this loop.
2064  */
2065 
2066  last_tsid = cur_tsid;
2067  }
2068  else
2069  {
2070  s = &per_ts_stat[num_spaces - 1];
2071  }
2072 
2073  s->num_to_scan++;
2074 
2075  /* Check for barrier events. */
2078  }
2079 
2080  Assert(num_spaces > 0);
2081 
2082  /*
2083  * Build a min-heap over the write-progress in the individual tablespaces,
2084  * and compute how large a portion of the total progress a single
2085  * processed buffer is.
2086  */
2087  ts_heap = binaryheap_allocate(num_spaces,
2089  NULL);
2090 
2091  for (i = 0; i < num_spaces; i++)
2092  {
2093  CkptTsStatus *ts_stat = &per_ts_stat[i];
2094 
2095  ts_stat->progress_slice = (float8) num_to_scan / ts_stat->num_to_scan;
2096 
2097  binaryheap_add_unordered(ts_heap, PointerGetDatum(ts_stat));
2098  }
2099 
2100  binaryheap_build(ts_heap);
2101 
2102  /*
2103  * Iterate through to-be-checkpointed buffers and write the ones (still)
2104  * marked with BM_CHECKPOINT_NEEDED. The writes are balanced between
2105  * tablespaces; otherwise the sorting would lead to only one tablespace
2106  * receiving writes at a time, making inefficient use of the hardware.
2107  */
2108  num_processed = 0;
2109  num_written = 0;
2110  while (!binaryheap_empty(ts_heap))
2111  {
2112  BufferDesc *bufHdr = NULL;
2113  CkptTsStatus *ts_stat = (CkptTsStatus *)
2115 
2116  buf_id = CkptBufferIds[ts_stat->index].buf_id;
2117  Assert(buf_id != -1);
2118 
2119  bufHdr = GetBufferDescriptor(buf_id);
2120 
2121  num_processed++;
2122 
2123  /*
2124  * We don't need to acquire the lock here, because we're only looking
2125  * at a single bit. It's possible that someone else writes the buffer
2126  * and clears the flag right after we check, but that doesn't matter
2127  * since SyncOneBuffer will then do nothing. However, there is a
2128  * further race condition: it's conceivable that between the time we
2129  * examine the bit here and the time SyncOneBuffer acquires the lock,
2130  * someone else not only wrote the buffer but replaced it with another
2131  * page and dirtied it. In that improbable case, SyncOneBuffer will
2132  * write the buffer though we didn't need to. It doesn't seem worth
2133  * guarding against this, though.
2134  */
2136  {
2137  if (SyncOneBuffer(buf_id, false, &wb_context) & BUF_WRITTEN)
2138  {
2139  TRACE_POSTGRESQL_BUFFER_SYNC_WRITTEN(buf_id);
2141  num_written++;
2142  }
2143  }
2144 
2145  /*
2146  * Measure progress independent of actually having to flush the buffer
2147  * - otherwise writing become unbalanced.
2148  */
2149  ts_stat->progress += ts_stat->progress_slice;
2150  ts_stat->num_scanned++;
2151  ts_stat->index++;
2152 
2153  /* Have all the buffers from the tablespace been processed? */
2154  if (ts_stat->num_scanned == ts_stat->num_to_scan)
2155  {
2156  binaryheap_remove_first(ts_heap);
2157  }
2158  else
2159  {
2160  /* update heap with the new progress */
2161  binaryheap_replace_first(ts_heap, PointerGetDatum(ts_stat));
2162  }
2163 
2164  /*
2165  * Sleep to throttle our I/O rate.
2166  *
2167  * (This will check for barrier events even if it doesn't sleep.)
2168  */
2169  CheckpointWriteDelay(flags, (double) num_processed / num_to_scan);
2170  }
2171 
2172  /* issue all pending flushes */
2173  IssuePendingWritebacks(&wb_context);
2174 
2175  pfree(per_ts_stat);
2176  per_ts_stat = NULL;
2177  binaryheap_free(ts_heap);
2178 
2179  /*
2180  * Update checkpoint statistics. As noted above, this doesn't include
2181  * buffers written by other backends or bgwriter scan.
2182  */
2183  CheckpointStats.ckpt_bufs_written += num_written;
2184 
2185  TRACE_POSTGRESQL_BUFFER_SYNC_DONE(NBuffers, num_written, num_to_scan);
2186 }
2187 
2188 /*
2189  * BgBufferSync -- Write out some dirty buffers in the pool.
2190  *
2191  * This is called periodically by the background writer process.
2192  *
2193  * Returns true if it's appropriate for the bgwriter process to go into
2194  * low-power hibernation mode. (This happens if the strategy clock sweep
2195  * has been "lapped" and no buffer allocations have occurred recently,
2196  * or if the bgwriter has been effectively disabled by setting
2197  * bgwriter_lru_maxpages to 0.)
2198  */
2199 bool
2201 {
2202  /* info obtained from freelist.c */
2203  int strategy_buf_id;
2204  uint32 strategy_passes;
2205  uint32 recent_alloc;
2206 
2207  /*
2208  * Information saved between calls so we can determine the strategy
2209  * point's advance rate and avoid scanning already-cleaned buffers.
2210  */
2211  static bool saved_info_valid = false;
2212  static int prev_strategy_buf_id;
2213  static uint32 prev_strategy_passes;
2214  static int next_to_clean;
2215  static uint32 next_passes;
2216 
2217  /* Moving averages of allocation rate and clean-buffer density */
2218  static float smoothed_alloc = 0;
2219  static float smoothed_density = 10.0;
2220 
2221  /* Potentially these could be tunables, but for now, not */
2222  float smoothing_samples = 16;
2223  float scan_whole_pool_milliseconds = 120000.0;
2224 
2225  /* Used to compute how far we scan ahead */
2226  long strategy_delta;
2227  int bufs_to_lap;
2228  int bufs_ahead;
2229  float scans_per_alloc;
2230  int reusable_buffers_est;
2231  int upcoming_alloc_est;
2232  int min_scan_buffers;
2233 
2234  /* Variables for the scanning loop proper */
2235  int num_to_scan;
2236  int num_written;
2237  int reusable_buffers;
2238 
2239  /* Variables for final smoothed_density update */
2240  long new_strategy_delta;
2241  uint32 new_recent_alloc;
2242 
2243  /*
2244  * Find out where the freelist clock sweep currently is, and how many
2245  * buffer allocations have happened since our last call.
2246  */
2247  strategy_buf_id = StrategySyncStart(&strategy_passes, &recent_alloc);
2248 
2249  /* Report buffer alloc counts to pgstat */
2250  BgWriterStats.m_buf_alloc += recent_alloc;
2251 
2252  /*
2253  * If we're not running the LRU scan, just stop after doing the stats
2254  * stuff. We mark the saved state invalid so that we can recover sanely
2255  * if LRU scan is turned back on later.
2256  */
2257  if (bgwriter_lru_maxpages <= 0)
2258  {
2259  saved_info_valid = false;
2260  return true;
2261  }
2262 
2263  /*
2264  * Compute strategy_delta = how many buffers have been scanned by the
2265  * clock sweep since last time. If first time through, assume none. Then
2266  * see if we are still ahead of the clock sweep, and if so, how many
2267  * buffers we could scan before we'd catch up with it and "lap" it. Note:
2268  * weird-looking coding of xxx_passes comparisons are to avoid bogus
2269  * behavior when the passes counts wrap around.
2270  */
2271  if (saved_info_valid)
2272  {
2273  int32 passes_delta = strategy_passes - prev_strategy_passes;
2274 
2275  strategy_delta = strategy_buf_id - prev_strategy_buf_id;
2276  strategy_delta += (long) passes_delta * NBuffers;
2277 
2278  Assert(strategy_delta >= 0);
2279 
2280  if ((int32) (next_passes - strategy_passes) > 0)
2281  {
2282  /* we're one pass ahead of the strategy point */
2283  bufs_to_lap = strategy_buf_id - next_to_clean;
2284 #ifdef BGW_DEBUG
2285  elog(DEBUG2, "bgwriter ahead: bgw %u-%u strategy %u-%u delta=%ld lap=%d",
2286  next_passes, next_to_clean,
2287  strategy_passes, strategy_buf_id,
2288  strategy_delta, bufs_to_lap);
2289 #endif
2290  }
2291  else if (next_passes == strategy_passes &&
2292  next_to_clean >= strategy_buf_id)
2293  {
2294  /* on same pass, but ahead or at least not behind */
2295  bufs_to_lap = NBuffers - (next_to_clean - strategy_buf_id);
2296 #ifdef BGW_DEBUG
2297  elog(DEBUG2, "bgwriter ahead: bgw %u-%u strategy %u-%u delta=%ld lap=%d",
2298  next_passes, next_to_clean,
2299  strategy_passes, strategy_buf_id,
2300  strategy_delta, bufs_to_lap);
2301 #endif
2302  }
2303  else
2304  {
2305  /*
2306  * We're behind, so skip forward to the strategy point and start
2307  * cleaning from there.
2308  */
2309 #ifdef BGW_DEBUG
2310  elog(DEBUG2, "bgwriter behind: bgw %u-%u strategy %u-%u delta=%ld",
2311  next_passes, next_to_clean,
2312  strategy_passes, strategy_buf_id,
2313  strategy_delta);
2314 #endif
2315  next_to_clean = strategy_buf_id;
2316  next_passes = strategy_passes;
2317  bufs_to_lap = NBuffers;
2318  }
2319  }
2320  else
2321  {
2322  /*
2323  * Initializing at startup or after LRU scanning had been off. Always
2324  * start at the strategy point.
2325  */
2326 #ifdef BGW_DEBUG
2327  elog(DEBUG2, "bgwriter initializing: strategy %u-%u",
2328  strategy_passes, strategy_buf_id);
2329 #endif
2330  strategy_delta = 0;
2331  next_to_clean = strategy_buf_id;
2332  next_passes = strategy_passes;
2333  bufs_to_lap = NBuffers;
2334  }
2335 
2336  /* Update saved info for next time */
2337  prev_strategy_buf_id = strategy_buf_id;
2338  prev_strategy_passes = strategy_passes;
2339  saved_info_valid = true;
2340 
2341  /*
2342  * Compute how many buffers had to be scanned for each new allocation, ie,
2343  * 1/density of reusable buffers, and track a moving average of that.
2344  *
2345  * If the strategy point didn't move, we don't update the density estimate
2346  */
2347  if (strategy_delta > 0 && recent_alloc > 0)
2348  {
2349  scans_per_alloc = (float) strategy_delta / (float) recent_alloc;
2350  smoothed_density += (scans_per_alloc - smoothed_density) /
2351  smoothing_samples;
2352  }
2353 
2354  /*
2355  * Estimate how many reusable buffers there are between the current
2356  * strategy point and where we've scanned ahead to, based on the smoothed
2357  * density estimate.
2358  */
2359  bufs_ahead = NBuffers - bufs_to_lap;
2360  reusable_buffers_est = (float) bufs_ahead / smoothed_density;
2361 
2362  /*
2363  * Track a moving average of recent buffer allocations. Here, rather than
2364  * a true average we want a fast-attack, slow-decline behavior: we
2365  * immediately follow any increase.
2366  */
2367  if (smoothed_alloc <= (float) recent_alloc)
2368  smoothed_alloc = recent_alloc;
2369  else
2370  smoothed_alloc += ((float) recent_alloc - smoothed_alloc) /
2371  smoothing_samples;
2372 
2373  /* Scale the estimate by a GUC to allow more aggressive tuning. */
2374  upcoming_alloc_est = (int) (smoothed_alloc * bgwriter_lru_multiplier);
2375 
2376  /*
2377  * If recent_alloc remains at zero for many cycles, smoothed_alloc will
2378  * eventually underflow to zero, and the underflows produce annoying
2379  * kernel warnings on some platforms. Once upcoming_alloc_est has gone to
2380  * zero, there's no point in tracking smaller and smaller values of
2381  * smoothed_alloc, so just reset it to exactly zero to avoid this
2382  * syndrome. It will pop back up as soon as recent_alloc increases.
2383  */
2384  if (upcoming_alloc_est == 0)
2385  smoothed_alloc = 0;
2386 
2387  /*
2388  * Even in cases where there's been little or no buffer allocation
2389  * activity, we want to make a small amount of progress through the buffer
2390  * cache so that as many reusable buffers as possible are clean after an
2391  * idle period.
2392  *
2393  * (scan_whole_pool_milliseconds / BgWriterDelay) computes how many times
2394  * the BGW will be called during the scan_whole_pool time; slice the
2395  * buffer pool into that many sections.
2396  */
2397  min_scan_buffers = (int) (NBuffers / (scan_whole_pool_milliseconds / BgWriterDelay));
2398 
2399  if (upcoming_alloc_est < (min_scan_buffers + reusable_buffers_est))
2400  {
2401 #ifdef BGW_DEBUG
2402  elog(DEBUG2, "bgwriter: alloc_est=%d too small, using min=%d + reusable_est=%d",
2403  upcoming_alloc_est, min_scan_buffers, reusable_buffers_est);
2404 #endif
2405  upcoming_alloc_est = min_scan_buffers + reusable_buffers_est;
2406  }
2407 
2408  /*
2409  * Now write out dirty reusable buffers, working forward from the
2410  * next_to_clean point, until we have lapped the strategy scan, or cleaned
2411  * enough buffers to match our estimate of the next cycle's allocation
2412  * requirements, or hit the bgwriter_lru_maxpages limit.
2413  */
2414 
2415  /* Make sure we can handle the pin inside SyncOneBuffer */
2417 
2418  num_to_scan = bufs_to_lap;
2419  num_written = 0;
2420  reusable_buffers = reusable_buffers_est;
2421 
2422  /* Execute the LRU scan */
2423  while (num_to_scan > 0 && reusable_buffers < upcoming_alloc_est)
2424  {
2425  int sync_state = SyncOneBuffer(next_to_clean, true,
2426  wb_context);
2427 
2428  if (++next_to_clean >= NBuffers)
2429  {
2430  next_to_clean = 0;
2431  next_passes++;
2432  }
2433  num_to_scan--;
2434 
2435  if (sync_state & BUF_WRITTEN)
2436  {
2437  reusable_buffers++;
2438  if (++num_written >= bgwriter_lru_maxpages)
2439  {
2441  break;
2442  }
2443  }
2444  else if (sync_state & BUF_REUSABLE)
2445  reusable_buffers++;
2446  }
2447 
2448  BgWriterStats.m_buf_written_clean += num_written;
2449 
2450 #ifdef BGW_DEBUG
2451  elog(DEBUG1, "bgwriter: recent_alloc=%u smoothed=%.2f delta=%ld ahead=%d density=%.2f reusable_est=%d upcoming_est=%d scanned=%d wrote=%d reusable=%d",
2452  recent_alloc, smoothed_alloc, strategy_delta, bufs_ahead,
2453  smoothed_density, reusable_buffers_est, upcoming_alloc_est,
2454  bufs_to_lap - num_to_scan,
2455  num_written,
2456  reusable_buffers - reusable_buffers_est);
2457 #endif
2458 
2459  /*
2460  * Consider the above scan as being like a new allocation scan.
2461  * Characterize its density and update the smoothed one based on it. This
2462  * effectively halves the moving average period in cases where both the
2463  * strategy and the background writer are doing some useful scanning,
2464  * which is helpful because a long memory isn't as desirable on the
2465  * density estimates.
2466  */
2467  new_strategy_delta = bufs_to_lap - num_to_scan;
2468  new_recent_alloc = reusable_buffers - reusable_buffers_est;
2469  if (new_strategy_delta > 0 && new_recent_alloc > 0)
2470  {
2471  scans_per_alloc = (float) new_strategy_delta / (float) new_recent_alloc;
2472  smoothed_density += (scans_per_alloc - smoothed_density) /
2473  smoothing_samples;
2474 
2475 #ifdef BGW_DEBUG
2476  elog(DEBUG2, "bgwriter: cleaner density alloc=%u scan=%ld density=%.2f new smoothed=%.2f",
2477  new_recent_alloc, new_strategy_delta,
2478  scans_per_alloc, smoothed_density);
2479 #endif
2480  }
2481 
2482  /* Return true if OK to hibernate */
2483  return (bufs_to_lap == 0 && recent_alloc == 0);
2484 }
2485 
2486 /*
2487  * SyncOneBuffer -- process a single buffer during syncing.
2488  *
2489  * If skip_recently_used is true, we don't write currently-pinned buffers, nor
2490  * buffers marked recently used, as these are not replacement candidates.
2491  *
2492  * Returns a bitmask containing the following flag bits:
2493  * BUF_WRITTEN: we wrote the buffer.
2494  * BUF_REUSABLE: buffer is available for replacement, ie, it has
2495  * pin count 0 and usage count 0.
2496  *
2497  * (BUF_WRITTEN could be set in error if FlushBuffer finds the buffer clean
2498  * after locking it, but we don't care all that much.)
2499  *
2500  * Note: caller must have done ResourceOwnerEnlargeBuffers.
2501  */
2502 static int
2503 SyncOneBuffer(int buf_id, bool skip_recently_used, WritebackContext *wb_context)
2504 {
2505  BufferDesc *bufHdr = GetBufferDescriptor(buf_id);
2506  int result = 0;
2507  uint32 buf_state;
2508  BufferTag tag;
2509 
2511 
2512  /*
2513  * Check whether buffer needs writing.
2514  *
2515  * We can make this check without taking the buffer content lock so long
2516  * as we mark pages dirty in access methods *before* logging changes with
2517  * XLogInsert(): if someone marks the buffer dirty just after our check we
2518  * don't worry because our checkpoint.redo points before log record for
2519  * upcoming changes and so we are not required to write such dirty buffer.
2520  */
2521  buf_state = LockBufHdr(bufHdr);
2522 
2523  if (BUF_STATE_GET_REFCOUNT(buf_state) == 0 &&
2524  BUF_STATE_GET_USAGECOUNT(buf_state) == 0)
2525  {
2526  result |= BUF_REUSABLE;
2527  }
2528  else if (skip_recently_used)
2529  {
2530  /* Caller told us not to write recently-used buffers */
2531  UnlockBufHdr(bufHdr, buf_state);
2532  return result;
2533  }
2534 
2535  if (!(buf_state & BM_VALID) || !(buf_state & BM_DIRTY))
2536  {
2537  /* It's clean, so nothing to do */
2538  UnlockBufHdr(bufHdr, buf_state);
2539  return result;
2540  }
2541 
2542  /*
2543  * Pin it, share-lock it, write it. (FlushBuffer will do nothing if the
2544  * buffer is clean by the time we've locked it.)
2545  */
2546  PinBuffer_Locked(bufHdr);
2548 
2549  FlushBuffer(bufHdr, NULL);
2550 
2552 
2553  tag = bufHdr->tag;
2554 
2555  UnpinBuffer(bufHdr, true);
2556 
2557  ScheduleBufferTagForWriteback(wb_context, &tag);
2558 
2559  return result | BUF_WRITTEN;
2560 }
2561 
2562 /*
2563  * AtEOXact_Buffers - clean up at end of transaction.
2564  *
2565  * As of PostgreSQL 8.0, buffer pins should get released by the
2566  * ResourceOwner mechanism. This routine is just a debugging
2567  * cross-check that no pins remain.
2568  */
2569 void
2570 AtEOXact_Buffers(bool isCommit)
2571 {
2573 
2574  AtEOXact_LocalBuffers(isCommit);
2575 
2577 }
2578 
2579 /*
2580  * Initialize access to shared buffer pool
2581  *
2582  * This is called during backend startup (whether standalone or under the
2583  * postmaster). It sets up for this backend's access to the already-existing
2584  * buffer pool.
2585  *
2586  * NB: this is called before InitProcess(), so we do not have a PGPROC and
2587  * cannot do LWLockAcquire; hence we can't actually access stuff in
2588  * shared memory yet. We are only initializing local data here.
2589  * (See also InitBufferPoolBackend)
2590  */
2591 void
2593 {
2594  HASHCTL hash_ctl;
2595 
2596  memset(&PrivateRefCountArray, 0, sizeof(PrivateRefCountArray));
2597 
2598  hash_ctl.keysize = sizeof(int32);
2599  hash_ctl.entrysize = sizeof(PrivateRefCountEntry);
2600 
2601  PrivateRefCountHash = hash_create("PrivateRefCount", 100, &hash_ctl,
2602  HASH_ELEM | HASH_BLOBS);
2603 }
2604 
2605 /*
2606  * InitBufferPoolBackend --- second-stage initialization of a new backend
2607  *
2608  * This is called after we have acquired a PGPROC and so can safely get
2609  * LWLocks. We don't currently need to do anything at this stage ...
2610  * except register a shmem-exit callback. AtProcExit_Buffers needs LWLock
2611  * access, and thereby has to be called at the corresponding phase of
2612  * backend shutdown.
2613  */
2614 void
2616 {
2618 }
2619 
2620 /*
2621  * During backend exit, ensure that we released all shared-buffer locks and
2622  * assert that we have no remaining pins.
2623  */
2624 static void
2626 {
2627  AbortBufferIO();
2628  UnlockBuffers();
2629 
2631 
2632  /* localbuf.c needs a chance too */
2634 }
2635 
2636 /*
2637  * CheckForBufferLeaks - ensure this backend holds no buffer pins
2638  *
2639  * As of PostgreSQL 8.0, buffer pins should get released by the
2640  * ResourceOwner mechanism. This routine is just a debugging
2641  * cross-check that no pins remain.
2642  */
2643 static void
2645 {
2646 #ifdef USE_ASSERT_CHECKING
2647  int RefCountErrors = 0;
2648  PrivateRefCountEntry *res;
2649  int i;
2650 
2651  /* check the array */
2652  for (i = 0; i < REFCOUNT_ARRAY_ENTRIES; i++)
2653  {
2654  res = &PrivateRefCountArray[i];
2655 
2656  if (res->buffer != InvalidBuffer)
2657  {
2659  RefCountErrors++;
2660  }
2661  }
2662 
2663  /* if necessary search the hash */
2665  {
2666  HASH_SEQ_STATUS hstat;
2667 
2668  hash_seq_init(&hstat, PrivateRefCountHash);
2669  while ((res = (PrivateRefCountEntry *) hash_seq_search(&hstat)) != NULL)
2670  {
2672  RefCountErrors++;
2673  }
2674 
2675  }
2676 
2677  Assert(RefCountErrors == 0);
2678 #endif
2679 }
2680 
2681 /*
2682  * Helper routine to issue warnings when a buffer is unexpectedly pinned
2683  */
2684 void
2686 {
2687  BufferDesc *buf;
2688  int32 loccount;
2689  char *path;
2690  BackendId backend;
2691  uint32 buf_state;
2692 
2693  Assert(BufferIsValid(buffer));
2694  if (BufferIsLocal(buffer))
2695  {
2696  buf = GetLocalBufferDescriptor(-buffer - 1);
2697  loccount = LocalRefCount[-buffer - 1];
2698  backend = MyBackendId;
2699  }
2700  else
2701  {
2702  buf = GetBufferDescriptor(buffer - 1);
2703  loccount = GetPrivateRefCount(buffer);
2704  backend = InvalidBackendId;
2705  }
2706 
2707  /* theoretically we should lock the bufhdr here */
2708  path = relpathbackend(buf->tag.rnode, backend, buf->tag.forkNum);
2709  buf_state = pg_atomic_read_u32(&buf->state);
2710  elog(WARNING,
2711  "buffer refcount leak: [%03d] "
2712  "(rel=%s, blockNum=%u, flags=0x%x, refcount=%u %d)",
2713  buffer, path,
2714  buf->tag.blockNum, buf_state & BUF_FLAG_MASK,
2715  BUF_STATE_GET_REFCOUNT(buf_state), loccount);
2716  pfree(path);
2717 }
2718 
2719 /*
2720  * CheckPointBuffers
2721  *
2722  * Flush all dirty blocks in buffer pool to disk at checkpoint time.
2723  *
2724  * Note: temporary relations do not participate in checkpoints, so they don't
2725  * need to be flushed.
2726  */
2727 void
2729 {
2730  BufferSync(flags);
2731 }
2732 
2733 
2734 /*
2735  * Do whatever is needed to prepare for commit at the bufmgr and smgr levels
2736  */
2737 void
2739 {
2740  /* Nothing to do in bufmgr anymore... */
2741 }
2742 
2743 /*
2744  * BufferGetBlockNumber
2745  * Returns the block number associated with a buffer.
2746  *
2747  * Note:
2748  * Assumes that the buffer is valid and pinned, else the
2749  * value may be obsolete immediately...
2750  */
2753 {
2754  BufferDesc *bufHdr;
2755 
2756  Assert(BufferIsPinned(buffer));
2757 
2758  if (BufferIsLocal(buffer))
2759  bufHdr = GetLocalBufferDescriptor(-buffer - 1);
2760  else
2761  bufHdr = GetBufferDescriptor(buffer - 1);
2762 
2763  /* pinned, so OK to read tag without spinlock */
2764  return bufHdr->tag.blockNum;
2765 }
2766 
2767 /*
2768  * BufferGetTag
2769  * Returns the relfilenode, fork number and block number associated with
2770  * a buffer.
2771  */
2772 void
2774  BlockNumber *blknum)
2775 {
2776  BufferDesc *bufHdr;
2777 
2778  /* Do the same checks as BufferGetBlockNumber. */
2779  Assert(BufferIsPinned(buffer));
2780 
2781  if (BufferIsLocal(buffer))
2782  bufHdr = GetLocalBufferDescriptor(-buffer - 1);
2783  else
2784  bufHdr = GetBufferDescriptor(buffer - 1);
2785 
2786  /* pinned, so OK to read tag without spinlock */
2787  *rnode = bufHdr->tag.rnode;
2788  *forknum = bufHdr->tag.forkNum;
2789  *blknum = bufHdr->tag.blockNum;
2790 }
2791 
2792 /*
2793  * FlushBuffer
2794  * Physically write out a shared buffer.
2795  *
2796  * NOTE: this actually just passes the buffer contents to the kernel; the
2797  * real write to disk won't happen until the kernel feels like it. This
2798  * is okay from our point of view since we can redo the changes from WAL.
2799  * However, we will need to force the changes to disk via fsync before
2800  * we can checkpoint WAL.
2801  *
2802  * The caller must hold a pin on the buffer and have share-locked the
2803  * buffer contents. (Note: a share-lock does not prevent updates of
2804  * hint bits in the buffer, so the page could change while the write
2805  * is in progress, but we assume that that will not invalidate the data
2806  * written.)
2807  *
2808  * If the caller has an smgr reference for the buffer's relation, pass it
2809  * as the second parameter. If not, pass NULL.
2810  */
2811 static void
2813 {
2814  XLogRecPtr recptr;
2815  ErrorContextCallback errcallback;
2816  instr_time io_start,
2817  io_time;
2818  Block bufBlock;
2819  char *bufToWrite;
2820  uint32 buf_state;
2821 
2822  /*
2823  * Try to start an I/O operation. If StartBufferIO returns false, then
2824  * someone else flushed the buffer before we could, so we need not do
2825  * anything.
2826  */
2827  if (!StartBufferIO(buf, false))
2828  return;
2829 
2830  /* Setup error traceback support for ereport() */
2832  errcallback.arg = (void *) buf;
2833  errcallback.previous = error_context_stack;
2834  error_context_stack = &errcallback;
2835 
2836  /* Find smgr relation for buffer */
2837  if (reln == NULL)
2838  reln = smgropen(buf->tag.rnode, InvalidBackendId);
2839 
2840  TRACE_POSTGRESQL_BUFFER_FLUSH_START(buf->tag.forkNum,
2841  buf->tag.blockNum,
2842  reln->smgr_rnode.node.spcNode,
2843  reln->smgr_rnode.node.dbNode,
2844  reln->smgr_rnode.node.relNode);
2845 
2846  buf_state = LockBufHdr(buf);
2847 
2848  /*
2849  * Run PageGetLSN while holding header lock, since we don't have the
2850  * buffer locked exclusively in all cases.
2851  */
2852  recptr = BufferGetLSN(buf);
2853 
2854  /* To check if block content changes while flushing. - vadim 01/17/97 */
2855  buf_state &= ~BM_JUST_DIRTIED;
2856  UnlockBufHdr(buf, buf_state);
2857 
2858  /*
2859  * Force XLOG flush up to buffer's LSN. This implements the basic WAL
2860  * rule that log updates must hit disk before any of the data-file changes
2861  * they describe do.
2862  *
2863  * However, this rule does not apply to unlogged relations, which will be
2864  * lost after a crash anyway. Most unlogged relation pages do not bear
2865  * LSNs since we never emit WAL records for them, and therefore flushing
2866  * up through the buffer LSN would be useless, but harmless. However,
2867  * GiST indexes use LSNs internally to track page-splits, and therefore
2868  * unlogged GiST pages bear "fake" LSNs generated by
2869  * GetFakeLSNForUnloggedRel. It is unlikely but possible that the fake
2870  * LSN counter could advance past the WAL insertion point; and if it did
2871  * happen, attempting to flush WAL through that location would fail, with
2872  * disastrous system-wide consequences. To make sure that can't happen,
2873  * skip the flush if the buffer isn't permanent.
2874  */
2875  if (buf_state & BM_PERMANENT)
2876  XLogFlush(recptr);
2877 
2878  /*
2879  * Now it's safe to write buffer to disk. Note that no one else should
2880  * have been able to write it while we were busy with log flushing because
2881  * only one process at a time can set the BM_IO_IN_PROGRESS bit.
2882  */
2883  bufBlock = BufHdrGetBlock(buf);
2884 
2885  /*
2886  * Update page checksum if desired. Since we have only shared lock on the
2887  * buffer, other processes might be updating hint bits in it, so we must
2888  * copy the page to private storage if we do checksumming.
2889  */
2890  bufToWrite = PageSetChecksumCopy((Page) bufBlock, buf->tag.blockNum);
2891 
2892  if (track_io_timing)
2893  INSTR_TIME_SET_CURRENT(io_start);
2894 
2895  /*
2896  * bufToWrite is either the shared buffer or a copy, as appropriate.
2897  */
2898  smgrwrite(reln,
2899  buf->tag.forkNum,
2900  buf->tag.blockNum,
2901  bufToWrite,
2902  false);
2903 
2904  if (track_io_timing)
2905  {
2906  INSTR_TIME_SET_CURRENT(io_time);
2907  INSTR_TIME_SUBTRACT(io_time, io_start);
2910  }
2911 
2913 
2914  /*
2915  * Mark the buffer as clean (unless BM_JUST_DIRTIED has become set) and
2916  * end the BM_IO_IN_PROGRESS state.
2917  */
2918  TerminateBufferIO(buf, true, 0);
2919 
2920  TRACE_POSTGRESQL_BUFFER_FLUSH_DONE(buf->tag.forkNum,
2921  buf->tag.blockNum,
2922  reln->smgr_rnode.node.spcNode,
2923  reln->smgr_rnode.node.dbNode,
2924  reln->smgr_rnode.node.relNode);
2925 
2926  /* Pop the error context stack */
2927  error_context_stack = errcallback.previous;
2928 }
2929 
2930 /*
2931  * RelationGetNumberOfBlocksInFork
2932  * Determines the current number of pages in the specified relation fork.
2933  *
2934  * Note that the accuracy of the result will depend on the details of the
2935  * relation's storage. For builtin AMs it'll be accurate, but for external AMs
2936  * it might not be.
2937  */
2940 {
2941  switch (relation->rd_rel->relkind)
2942  {
2943  case RELKIND_SEQUENCE:
2944  case RELKIND_INDEX:
2945  case RELKIND_PARTITIONED_INDEX:
2946  return smgrnblocks(RelationGetSmgr(relation), forkNum);
2947 
2948  case RELKIND_RELATION:
2949  case RELKIND_TOASTVALUE:
2950  case RELKIND_MATVIEW:
2951  {
2952  /*
2953  * Not every table AM uses BLCKSZ wide fixed size blocks.
2954  * Therefore tableam returns the size in bytes - but for the
2955  * purpose of this routine, we want the number of blocks.
2956  * Therefore divide, rounding up.
2957  */
2958  uint64 szbytes;
2959 
2960  szbytes = table_relation_size(relation, forkNum);
2961 
2962  return (szbytes + (BLCKSZ - 1)) / BLCKSZ;
2963  }
2964  case RELKIND_VIEW:
2965  case RELKIND_COMPOSITE_TYPE:
2966  case RELKIND_FOREIGN_TABLE:
2967  case RELKIND_PARTITIONED_TABLE:
2968  default:
2969  Assert(false);
2970  break;
2971  }
2972 
2973  return 0; /* keep compiler quiet */
2974 }
2975 
2976 /*
2977  * BufferIsPermanent
2978  * Determines whether a buffer will potentially still be around after
2979  * a crash. Caller must hold a buffer pin.
2980  */
2981 bool
2983 {
2984  BufferDesc *bufHdr;
2985 
2986  /* Local buffers are used only for temp relations. */
2987  if (BufferIsLocal(buffer))
2988  return false;
2989 
2990  /* Make sure we've got a real buffer, and that we hold a pin on it. */
2991  Assert(BufferIsValid(buffer));
2992  Assert(BufferIsPinned(buffer));
2993 
2994  /*
2995  * BM_PERMANENT can't be changed while we hold a pin on the buffer, so we
2996  * need not bother with the buffer header spinlock. Even if someone else
2997  * changes the buffer header state while we're doing this, the state is
2998  * changed atomically, so we'll read the old value or the new value, but
2999  * not random garbage.
3000  */
3001  bufHdr = GetBufferDescriptor(buffer - 1);
3002  return (pg_atomic_read_u32(&bufHdr->state) & BM_PERMANENT) != 0;
3003 }
3004 
3005 /*
3006  * BufferGetLSNAtomic
3007  * Retrieves the LSN of the buffer atomically using a buffer header lock.
3008  * This is necessary for some callers who may not have an exclusive lock
3009  * on the buffer.
3010  */
3011 XLogRecPtr
3013 {
3014  BufferDesc *bufHdr = GetBufferDescriptor(buffer - 1);
3015  char *page = BufferGetPage(buffer);
3016  XLogRecPtr lsn;
3017  uint32 buf_state;
3018 
3019  /*
3020  * If we don't need locking for correctness, fastpath out.
3021  */
3022  if (!XLogHintBitIsNeeded() || BufferIsLocal(buffer))
3023  return PageGetLSN(page);
3024 
3025  /* Make sure we've got a real buffer, and that we hold a pin on it. */
3026  Assert(BufferIsValid(buffer));
3027  Assert(BufferIsPinned(buffer));
3028 
3029  buf_state = LockBufHdr(bufHdr);
3030  lsn = PageGetLSN(page);
3031  UnlockBufHdr(bufHdr, buf_state);
3032 
3033  return lsn;
3034 }
3035 
3036 /* ---------------------------------------------------------------------
3037  * DropRelFileNodeBuffers
3038  *
3039  * This function removes from the buffer pool all the pages of the
3040  * specified relation forks that have block numbers >= firstDelBlock.
3041  * (In particular, with firstDelBlock = 0, all pages are removed.)
3042  * Dirty pages are simply dropped, without bothering to write them
3043  * out first. Therefore, this is NOT rollback-able, and so should be
3044  * used only with extreme caution!
3045  *
3046  * Currently, this is called only from smgr.c when the underlying file
3047  * is about to be deleted or truncated (firstDelBlock is needed for
3048  * the truncation case). The data in the affected pages would therefore
3049  * be deleted momentarily anyway, and there is no point in writing it.
3050  * It is the responsibility of higher-level code to ensure that the
3051  * deletion or truncation does not lose any data that could be needed
3052  * later. It is also the responsibility of higher-level code to ensure
3053  * that no other process could be trying to load more pages of the
3054  * relation into buffers.
3055  * --------------------------------------------------------------------
3056  */
3057 void
3059  int nforks, BlockNumber *firstDelBlock)
3060 {
3061  int i;
3062  int j;
3063  RelFileNodeBackend rnode;
3064  BlockNumber nForkBlock[MAX_FORKNUM];
3065  uint64 nBlocksToInvalidate = 0;
3066 
3067  rnode = smgr_reln->smgr_rnode;
3068 
3069  /* If it's a local relation, it's localbuf.c's problem. */
3070  if (RelFileNodeBackendIsTemp(rnode))
3071  {
3072  if (rnode.backend == MyBackendId)
3073  {
3074  for (j = 0; j < nforks; j++)
3075  DropRelFileNodeLocalBuffers(rnode.node, forkNum[j],
3076  firstDelBlock[j]);
3077  }
3078  return;
3079  }
3080 
3081  /*
3082  * To remove all the pages of the specified relation forks from the buffer
3083  * pool, we need to scan the entire buffer pool but we can optimize it by
3084  * finding the buffers from BufMapping table provided we know the exact
3085  * size of each fork of the relation. The exact size is required to ensure
3086  * that we don't leave any buffer for the relation being dropped as
3087  * otherwise the background writer or checkpointer can lead to a PANIC
3088  * error while flushing buffers corresponding to files that don't exist.
3089  *
3090  * To know the exact size, we rely on the size cached for each fork by us
3091  * during recovery which limits the optimization to recovery and on
3092  * standbys but we can easily extend it once we have shared cache for
3093  * relation size.
3094  *
3095  * In recovery, we cache the value returned by the first lseek(SEEK_END)
3096  * and the future writes keeps the cached value up-to-date. See
3097  * smgrextend. It is possible that the value of the first lseek is smaller
3098  * than the actual number of existing blocks in the file due to buggy
3099  * Linux kernels that might not have accounted for the recent write. But
3100  * that should be fine because there must not be any buffers after that
3101  * file size.
3102  */
3103  for (i = 0; i < nforks; i++)
3104  {
3105  /* Get the number of blocks for a relation's fork */
3106  nForkBlock[i] = smgrnblocks_cached(smgr_reln, forkNum[i]);
3107 
3108  if (nForkBlock[i] == InvalidBlockNumber)
3109  {
3110  nBlocksToInvalidate = InvalidBlockNumber;
3111  break;
3112  }
3113 
3114  /* calculate the number of blocks to be invalidated */
3115  nBlocksToInvalidate += (nForkBlock[i] - firstDelBlock[i]);
3116  }
3117 
3118  /*
3119  * We apply the optimization iff the total number of blocks to invalidate
3120  * is below the BUF_DROP_FULL_SCAN_THRESHOLD.
3121  */
3122  if (BlockNumberIsValid(nBlocksToInvalidate) &&
3123  nBlocksToInvalidate < BUF_DROP_FULL_SCAN_THRESHOLD)
3124  {
3125  for (j = 0; j < nforks; j++)
3126  FindAndDropRelFileNodeBuffers(rnode.node, forkNum[j],
3127  nForkBlock[j], firstDelBlock[j]);
3128  return;
3129  }
3130 
3131  for (i = 0; i < NBuffers; i++)
3132  {
3133  BufferDesc *bufHdr = GetBufferDescriptor(i);
3134  uint32 buf_state;
3135 
3136  /*
3137  * We can make this a tad faster by prechecking the buffer tag before
3138  * we attempt to lock the buffer; this saves a lot of lock
3139  * acquisitions in typical cases. It should be safe because the
3140  * caller must have AccessExclusiveLock on the relation, or some other
3141  * reason to be certain that no one is loading new pages of the rel
3142  * into the buffer pool. (Otherwise we might well miss such pages
3143  * entirely.) Therefore, while the tag might be changing while we
3144  * look at it, it can't be changing *to* a value we care about, only
3145  * *away* from such a value. So false negatives are impossible, and
3146  * false positives are safe because we'll recheck after getting the
3147  * buffer lock.
3148  *
3149  * We could check forkNum and blockNum as well as the rnode, but the
3150  * incremental win from doing so seems small.
3151  */
3152  if (!RelFileNodeEquals(bufHdr->tag.rnode, rnode.node))
3153  continue;
3154 
3155  buf_state = LockBufHdr(bufHdr);
3156 
3157  for (j = 0; j < nforks; j++)
3158  {
3159  if (RelFileNodeEquals(bufHdr->tag.rnode, rnode.node) &&
3160  bufHdr->tag.forkNum == forkNum[j] &&
3161  bufHdr->tag.blockNum >= firstDelBlock[j])
3162  {
3163  InvalidateBuffer(bufHdr); /* releases spinlock */
3164  break;
3165  }
3166  }
3167  if (j >= nforks)
3168  UnlockBufHdr(bufHdr, buf_state);
3169  }
3170 }
3171 
3172 /* ---------------------------------------------------------------------
3173  * DropRelFileNodesAllBuffers
3174  *
3175  * This function removes from the buffer pool all the pages of all
3176  * forks of the specified relations. It's equivalent to calling
3177  * DropRelFileNodeBuffers once per fork per relation with
3178  * firstDelBlock = 0.
3179  * --------------------------------------------------------------------
3180  */
3181 void
3183 {
3184  int i;
3185  int j;
3186  int n = 0;
3187  SMgrRelation *rels;
3188  BlockNumber (*block)[MAX_FORKNUM + 1];
3189  uint64 nBlocksToInvalidate = 0;
3190  RelFileNode *nodes;
3191  bool cached = true;
3192  bool use_bsearch;
3193 
3194  if (nnodes == 0)
3195  return;
3196 
3197  rels = palloc(sizeof(SMgrRelation) * nnodes); /* non-local relations */
3198 
3199  /* If it's a local relation, it's localbuf.c's problem. */
3200  for (i = 0; i < nnodes; i++)
3201  {
3202  if (RelFileNodeBackendIsTemp(smgr_reln[i]->smgr_rnode))
3203  {
3204  if (smgr_reln[i]->smgr_rnode.backend == MyBackendId)
3205  DropRelFileNodeAllLocalBuffers(smgr_reln[i]->smgr_rnode.node);
3206  }
3207  else
3208  rels[n++] = smgr_reln[i];
3209  }
3210 
3211  /*
3212  * If there are no non-local relations, then we're done. Release the
3213  * memory and return.
3214  */
3215  if (n == 0)
3216  {
3217  pfree(rels);
3218  return;
3219  }
3220 
3221  /*
3222  * This is used to remember the number of blocks for all the relations
3223  * forks.
3224  */
3225  block = (BlockNumber (*)[MAX_FORKNUM + 1])
3226  palloc(sizeof(BlockNumber) * n * (MAX_FORKNUM + 1));
3227 
3228  /*
3229  * We can avoid scanning the entire buffer pool if we know the exact size
3230  * of each of the given relation forks. See DropRelFileNodeBuffers.
3231  */
3232  for (i = 0; i < n && cached; i++)
3233  {
3234  for (j = 0; j <= MAX_FORKNUM; j++)
3235  {
3236  /* Get the number of blocks for a relation's fork. */
3237  block[i][j] = smgrnblocks_cached(rels[i], j);
3238 
3239  /* We need to only consider the relation forks that exists. */
3240  if (block[i][j] == InvalidBlockNumber)
3241  {
3242  if (!smgrexists(rels[i], j))
3243  continue;
3244  cached = false;
3245  break;
3246  }
3247 
3248  /* calculate the total number of blocks to be invalidated */
3249  nBlocksToInvalidate += block[i][j];
3250  }
3251  }
3252 
3253  /*
3254  * We apply the optimization iff the total number of blocks to invalidate
3255  * is below the BUF_DROP_FULL_SCAN_THRESHOLD.
3256  */
3257  if (cached && nBlocksToInvalidate < BUF_DROP_FULL_SCAN_THRESHOLD)
3258  {
3259  for (i = 0; i < n; i++)
3260  {
3261  for (j = 0; j <= MAX_FORKNUM; j++)
3262  {
3263  /* ignore relation forks that doesn't exist */
3264  if (!BlockNumberIsValid(block[i][j]))
3265  continue;
3266 
3267  /* drop all the buffers for a particular relation fork */
3268  FindAndDropRelFileNodeBuffers(rels[i]->smgr_rnode.node,
3269  j, block[i][j], 0);
3270  }
3271  }
3272 
3273  pfree(block);
3274  pfree(rels);
3275  return;
3276  }
3277 
3278  pfree(block);
3279  nodes = palloc(sizeof(RelFileNode) * n); /* non-local relations */
3280  for (i = 0; i < n; i++)
3281  nodes[i] = rels[i]->smgr_rnode.node;
3282 
3283  /*
3284  * For low number of relations to drop just use a simple walk through, to
3285  * save the bsearch overhead. The threshold to use is rather a guess than
3286  * an exactly determined value, as it depends on many factors (CPU and RAM
3287  * speeds, amount of shared buffers etc.).
3288  */
3289  use_bsearch = n > RELS_BSEARCH_THRESHOLD;
3290 
3291  /* sort the list of rnodes if necessary */
3292  if (use_bsearch)
3293  pg_qsort(nodes, n, sizeof(RelFileNode), rnode_comparator);
3294 
3295  for (i = 0; i < NBuffers; i++)
3296  {
3297  RelFileNode *rnode = NULL;
3298  BufferDesc *bufHdr = GetBufferDescriptor(i);
3299  uint32 buf_state;
3300 
3301  /*
3302  * As in DropRelFileNodeBuffers, an unlocked precheck should be safe
3303  * and saves some cycles.
3304  */
3305 
3306  if (!use_bsearch)
3307  {
3308  int j;
3309 
3310  for (j = 0; j < n; j++)
3311  {
3312  if (RelFileNodeEquals(bufHdr->tag.rnode, nodes[j]))
3313  {
3314  rnode = &nodes[j];
3315  break;
3316  }
3317  }
3318  }
3319  else
3320  {
3321  rnode = bsearch((const void *) &(bufHdr->tag.rnode),
3322  nodes, n, sizeof(RelFileNode),
3324  }
3325 
3326  /* buffer doesn't belong to any of the given relfilenodes; skip it */
3327  if (rnode == NULL)
3328  continue;
3329 
3330  buf_state = LockBufHdr(bufHdr);
3331  if (RelFileNodeEquals(bufHdr->tag.rnode, (*rnode)))
3332  InvalidateBuffer(bufHdr); /* releases spinlock */
3333  else
3334  UnlockBufHdr(bufHdr, buf_state);
3335  }
3336 
3337  pfree(nodes);
3338  pfree(rels);
3339 }
3340 
3341 /* ---------------------------------------------------------------------
3342  * FindAndDropRelFileNodeBuffers
3343  *
3344  * This function performs look up in BufMapping table and removes from the
3345  * buffer pool all the pages of the specified relation fork that has block
3346  * number >= firstDelBlock. (In particular, with firstDelBlock = 0, all
3347  * pages are removed.)
3348  * --------------------------------------------------------------------
3349  */
3350 static void
3352  BlockNumber nForkBlock,
3353  BlockNumber firstDelBlock)
3354 {
3355  BlockNumber curBlock;
3356 
3357  for (curBlock = firstDelBlock; curBlock < nForkBlock; curBlock++)
3358  {
3359  uint32 bufHash; /* hash value for tag */
3360  BufferTag bufTag; /* identity of requested block */
3361  LWLock *bufPartitionLock; /* buffer partition lock for it */
3362  int buf_id;
3363  BufferDesc *bufHdr;
3364  uint32 buf_state;
3365 
3366  /* create a tag so we can lookup the buffer */
3367  INIT_BUFFERTAG(bufTag, rnode, forkNum, curBlock);
3368 
3369  /* determine its hash code and partition lock ID */
3370  bufHash = BufTableHashCode(&bufTag);
3371  bufPartitionLock = BufMappingPartitionLock(bufHash);
3372 
3373  /* Check that it is in the buffer pool. If not, do nothing. */
3374  LWLockAcquire(bufPartitionLock, LW_SHARED);
3375  buf_id = BufTableLookup(&bufTag, bufHash);
3376  LWLockRelease(bufPartitionLock);
3377 
3378  if (buf_id < 0)
3379  continue;
3380 
3381  bufHdr = GetBufferDescriptor(buf_id);
3382 
3383  /*
3384  * We need to lock the buffer header and recheck if the buffer is
3385  * still associated with the same block because the buffer could be
3386  * evicted by some other backend loading blocks for a different
3387  * relation after we release lock on the BufMapping table.
3388  */
3389  buf_state = LockBufHdr(bufHdr);
3390 
3391  if (RelFileNodeEquals(bufHdr->tag.rnode, rnode) &&
3392  bufHdr->tag.forkNum == forkNum &&
3393  bufHdr->tag.blockNum >= firstDelBlock)
3394  InvalidateBuffer(bufHdr); /* releases spinlock */
3395  else
3396  UnlockBufHdr(bufHdr, buf_state);
3397  }
3398 }
3399 
3400 /* ---------------------------------------------------------------------
3401  * DropDatabaseBuffers
3402  *
3403  * This function removes all the buffers in the buffer cache for a
3404  * particular database. Dirty pages are simply dropped, without
3405  * bothering to write them out first. This is used when we destroy a
3406  * database, to avoid trying to flush data to disk when the directory
3407  * tree no longer exists. Implementation is pretty similar to
3408  * DropRelFileNodeBuffers() which is for destroying just one relation.
3409  * --------------------------------------------------------------------
3410  */
3411 void
3413 {
3414  int i;
3415 
3416  /*
3417  * We needn't consider local buffers, since by assumption the target
3418  * database isn't our own.
3419  */
3420 
3421  for (i = 0; i < NBuffers; i++)
3422  {
3423  BufferDesc *bufHdr = GetBufferDescriptor(i);
3424  uint32 buf_state;
3425 
3426  /*
3427  * As in DropRelFileNodeBuffers, an unlocked precheck should be safe
3428  * and saves some cycles.
3429  */
3430  if (bufHdr->tag.rnode.dbNode != dbid)
3431  continue;
3432 
3433  buf_state = LockBufHdr(bufHdr);
3434  if (bufHdr->tag.rnode.dbNode == dbid)
3435  InvalidateBuffer(bufHdr); /* releases spinlock */
3436  else
3437  UnlockBufHdr(bufHdr, buf_state);
3438  }
3439 }
3440 
3441 /* -----------------------------------------------------------------
3442  * PrintBufferDescs
3443  *
3444  * this function prints all the buffer descriptors, for debugging
3445  * use only.
3446  * -----------------------------------------------------------------
3447  */
3448 #ifdef NOT_USED
3449 void
3450 PrintBufferDescs(void)
3451 {
3452  int i;
3453 
3454  for (i = 0; i < NBuffers; ++i)
3455  {
3458 
3459  /* theoretically we should lock the bufhdr here */
3460  elog(LOG,
3461  "[%02d] (freeNext=%d, rel=%s, "
3462  "blockNum=%u, flags=0x%x, refcount=%u %d)",
3463  i, buf->freeNext,
3465  buf->tag.blockNum, buf->flags,
3466  buf->refcount, GetPrivateRefCount(b));
3467  }
3468 }
3469 #endif
3470 
3471 #ifdef NOT_USED
3472 void
3473 PrintPinnedBufs(void)
3474 {
3475  int i;
3476 
3477  for (i = 0; i < NBuffers; ++i)
3478  {
3481 
3482  if (GetPrivateRefCount(b) > 0)
3483  {
3484  /* theoretically we should lock the bufhdr here */
3485  elog(LOG,
3486  "[%02d] (freeNext=%d, rel=%s, "
3487  "blockNum=%u, flags=0x%x, refcount=%u %d)",
3488  i, buf->freeNext,
3489  relpathperm(buf->tag.rnode, buf->tag.forkNum),
3490  buf->tag.blockNum, buf->flags,
3491  buf->refcount, GetPrivateRefCount(b));
3492  }
3493  }
3494 }
3495 #endif
3496 
3497 /* ---------------------------------------------------------------------
3498  * FlushRelationBuffers
3499  *
3500  * This function writes all dirty pages of a relation out to disk
3501  * (or more accurately, out to kernel disk buffers), ensuring that the
3502  * kernel has an up-to-date view of the relation.
3503  *
3504  * Generally, the caller should be holding AccessExclusiveLock on the
3505  * target relation to ensure that no other backend is busy dirtying
3506  * more blocks of the relation; the effects can't be expected to last
3507  * after the lock is released.
3508  *
3509  * XXX currently it sequentially searches the buffer pool, should be
3510  * changed to more clever ways of searching. This routine is not
3511  * used in any performance-critical code paths, so it's not worth
3512  * adding additional overhead to normal paths to make it go faster.
3513  * --------------------------------------------------------------------
3514  */
3515 void
3517 {
3518  int i;
3519  BufferDesc *bufHdr;
3520 
3521  if (RelationUsesLocalBuffers(rel))
3522  {
3523  for (i = 0; i < NLocBuffer; i++)
3524  {
3525  uint32 buf_state;
3526 
3527  bufHdr = GetLocalBufferDescriptor(i);
3528  if (RelFileNodeEquals(bufHdr->tag.rnode, rel->rd_node) &&
3529  ((buf_state = pg_atomic_read_u32(&bufHdr->state)) &
3530  (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
3531  {
3532  ErrorContextCallback errcallback;
3533  Page localpage;
3534 
3535  localpage = (char *) LocalBufHdrGetBlock(bufHdr);
3536 
3537  /* Setup error traceback support for ereport() */
3539  errcallback.arg = (void *) bufHdr;
3540  errcallback.previous = error_context_stack;
3541  error_context_stack = &errcallback;
3542 
3543  PageSetChecksumInplace(localpage, bufHdr->tag.blockNum);
3544 
3546  bufHdr->tag.forkNum,
3547  bufHdr->tag.blockNum,
3548  localpage,
3549  false);
3550 
3551  buf_state &= ~(BM_DIRTY | BM_JUST_DIRTIED);
3552  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
3553 
3554  /* Pop the error context stack */
3555  error_context_stack = errcallback.previous;
3556  }
3557  }
3558 
3559  return;
3560  }
3561 
3562  /* Make sure we can handle the pin inside the loop */
3564 
3565  for (i = 0; i < NBuffers; i++)
3566  {
3567  uint32 buf_state;
3568 
3569  bufHdr = GetBufferDescriptor(i);
3570 
3571  /*
3572  * As in DropRelFileNodeBuffers, an unlocked precheck should be safe
3573  * and saves some cycles.
3574  */
3575  if (!RelFileNodeEquals(bufHdr->tag.rnode, rel->rd_node))
3576  continue;
3577 
3579 
3580  buf_state = LockBufHdr(bufHdr);
3581  if (RelFileNodeEquals(bufHdr->tag.rnode, rel->rd_node) &&
3582  (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
3583  {
3584  PinBuffer_Locked(bufHdr);
3586  FlushBuffer(bufHdr, RelationGetSmgr(rel));
3588  UnpinBuffer(bufHdr, true);
3589  }
3590  else
3591  UnlockBufHdr(bufHdr, buf_state);
3592  }
3593 }
3594 
3595 /* ---------------------------------------------------------------------
3596  * FlushRelationsAllBuffers
3597  *
3598  * This function flushes out of the buffer pool all the pages of all
3599  * forks of the specified smgr relations. It's equivalent to calling
3600  * FlushRelationBuffers once per fork per relation. The relations are
3601  * assumed not to use local buffers.
3602  * --------------------------------------------------------------------
3603  */
3604 void
3606 {
3607  int i;
3608  SMgrSortArray *srels;
3609  bool use_bsearch;
3610 
3611  if (nrels == 0)
3612  return;
3613 
3614  /* fill-in array for qsort */
3615  srels = palloc(sizeof(SMgrSortArray) * nrels);
3616 
3617  for (i = 0; i < nrels; i++)
3618  {
3619  Assert(!RelFileNodeBackendIsTemp(smgrs[i]->smgr_rnode));
3620 
3621  srels[i].rnode = smgrs[i]->smgr_rnode.node;
3622  srels[i].srel = smgrs[i];
3623  }
3624 
3625  /*
3626  * Save the bsearch overhead for low number of relations to sync. See
3627  * DropRelFileNodesAllBuffers for details.
3628  */
3629  use_bsearch = nrels > RELS_BSEARCH_THRESHOLD;
3630 
3631  /* sort the list of SMgrRelations if necessary */
3632  if (use_bsearch)
3633  pg_qsort(srels, nrels, sizeof(SMgrSortArray), rnode_comparator);
3634 
3635  /* Make sure we can handle the pin inside the loop */
3637 
3638  for (i = 0; i < NBuffers; i++)
3639  {
3640  SMgrSortArray *srelent = NULL;
3641  BufferDesc *bufHdr = GetBufferDescriptor(i);
3642  uint32 buf_state;
3643 
3644  /*
3645  * As in DropRelFileNodeBuffers, an unlocked precheck should be safe
3646  * and saves some cycles.
3647  */
3648 
3649  if (!use_bsearch)
3650  {
3651  int j;
3652 
3653  for (j = 0; j < nrels; j++)
3654  {
3655  if (RelFileNodeEquals(bufHdr->tag.rnode, srels[j].rnode))
3656  {
3657  srelent = &srels[j];
3658  break;
3659  }
3660  }
3661 
3662  }
3663  else
3664  {
3665  srelent = bsearch((const void *) &(bufHdr->tag.rnode),
3666  srels, nrels, sizeof(SMgrSortArray),
3668  }
3669 
3670  /* buffer doesn't belong to any of the given relfilenodes; skip it */
3671  if (srelent == NULL)
3672  continue;
3673 
3675 
3676  buf_state = LockBufHdr(bufHdr);
3677  if (RelFileNodeEquals(bufHdr->tag.rnode, srelent->rnode) &&
3678  (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
3679  {
3680  PinBuffer_Locked(bufHdr);
3682  FlushBuffer(bufHdr, srelent->srel);
3684  UnpinBuffer(bufHdr, true);
3685  }
3686  else
3687  UnlockBufHdr(bufHdr, buf_state);
3688  }
3689 
3690  pfree(srels);
3691 }
3692 
3693 /* ---------------------------------------------------------------------
3694  * FlushDatabaseBuffers
3695  *
3696  * This function writes all dirty pages of a database out to disk
3697  * (or more accurately, out to kernel disk buffers), ensuring that the
3698  * kernel has an up-to-date view of the database.
3699  *
3700  * Generally, the caller should be holding an appropriate lock to ensure
3701  * no other backend is active in the target database; otherwise more
3702  * pages could get dirtied.
3703  *
3704  * Note we don't worry about flushing any pages of temporary relations.
3705  * It's assumed these wouldn't be interesting.
3706  * --------------------------------------------------------------------
3707  */
3708 void
3710 {
3711  int i;
3712  BufferDesc *bufHdr;
3713 
3714  /* Make sure we can handle the pin inside the loop */
3716 
3717  for (i = 0; i < NBuffers; i++)
3718  {
3719  uint32 buf_state;
3720 
3721  bufHdr = GetBufferDescriptor(i);
3722 
3723  /*
3724  * As in DropRelFileNodeBuffers, an unlocked precheck should be safe
3725  * and saves some cycles.
3726  */
3727  if (bufHdr->tag.rnode.dbNode != dbid)
3728  continue;
3729 
3731 
3732  buf_state = LockBufHdr(bufHdr);
3733  if (bufHdr->tag.rnode.dbNode == dbid &&
3734  (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
3735  {
3736  PinBuffer_Locked(bufHdr);
3738  FlushBuffer(bufHdr, NULL);
3740  UnpinBuffer(bufHdr, true);
3741  }
3742  else
3743  UnlockBufHdr(bufHdr, buf_state);
3744  }
3745 }
3746 
3747 /*
3748  * Flush a previously, shared or exclusively, locked and pinned buffer to the
3749  * OS.
3750  */
3751 void
3753 {
3754  BufferDesc *bufHdr;
3755 
3756  /* currently not needed, but no fundamental reason not to support */
3757  Assert(!BufferIsLocal(buffer));
3758 
3759  Assert(BufferIsPinned(buffer));
3760 
3761  bufHdr = GetBufferDescriptor(buffer - 1);
3762 
3764 
3765  FlushBuffer(bufHdr, NULL);
3766 }
3767 
3768 /*
3769  * ReleaseBuffer -- release the pin on a buffer
3770  */
3771 void
3773 {
3774  if (!BufferIsValid(buffer))
3775  elog(ERROR, "bad buffer ID: %d", buffer);
3776 
3777  if (BufferIsLocal(buffer))
3778  {
3780 
3781  Assert(LocalRefCount[-buffer - 1] > 0);
3782  LocalRefCount[-buffer - 1]--;
3783  return;
3784  }
3785 
3786  UnpinBuffer(GetBufferDescriptor(buffer - 1), true);
3787 }
3788 
3789 /*
3790  * UnlockReleaseBuffer -- release the content lock and pin on a buffer
3791  *
3792  * This is just a shorthand for a common combination.
3793  */
3794 void
3796 {
3797  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3798  ReleaseBuffer(buffer);
3799 }
3800 
3801 /*
3802  * IncrBufferRefCount
3803  * Increment the pin count on a buffer that we have *already* pinned
3804  * at least once.
3805  *
3806  * This function cannot be used on a buffer we do not have pinned,
3807  * because it doesn't change the shared buffer state.
3808  */
3809 void
3811 {
3812  Assert(BufferIsPinned(buffer));
3814  if (BufferIsLocal(buffer))
3815  LocalRefCount[-buffer - 1]++;
3816  else
3817  {
3818  PrivateRefCountEntry *ref;
3819 
3820  ref = GetPrivateRefCountEntry(buffer, true);
3821  Assert(ref != NULL);
3822  ref->refcount++;
3823  }
3825 }
3826 
3827 /*
3828  * MarkBufferDirtyHint
3829  *
3830  * Mark a buffer dirty for non-critical changes.
3831  *
3832  * This is essentially the same as MarkBufferDirty, except:
3833  *
3834  * 1. The caller does not write WAL; so if checksums are enabled, we may need
3835  * to write an XLOG_FPI_FOR_HINT WAL record to protect against torn pages.
3836  * 2. The caller might have only share-lock instead of exclusive-lock on the
3837  * buffer's content lock.
3838  * 3. This function does not guarantee that the buffer is always marked dirty
3839  * (due to a race condition), so it cannot be used for important changes.
3840  */
3841 void
3843 {
3844  BufferDesc *bufHdr;
3845  Page page = BufferGetPage(buffer);
3846 
3847  if (!BufferIsValid(buffer))
3848  elog(ERROR, "bad buffer ID: %d", buffer);
3849 
3850  if (BufferIsLocal(buffer))
3851  {
3852  MarkLocalBufferDirty(buffer);
3853  return;
3854  }
3855 
3856  bufHdr = GetBufferDescriptor(buffer - 1);
3857 
3858  Assert(GetPrivateRefCount(buffer) > 0);
3859  /* here, either share or exclusive lock is OK */
3861 
3862  /*
3863  * This routine might get called many times on the same page, if we are
3864  * making the first scan after commit of an xact that added/deleted many
3865  * tuples. So, be as quick as we can if the buffer is already dirty. We
3866  * do this by not acquiring spinlock if it looks like the status bits are
3867  * already set. Since we make this test unlocked, there's a chance we
3868  * might fail to notice that the flags have just been cleared, and failed
3869  * to reset them, due to memory-ordering issues. But since this function
3870  * is only intended to be used in cases where failing to write out the
3871  * data would be harmless anyway, it doesn't really matter.
3872  */
3873  if ((pg_atomic_read_u32(&bufHdr->state) & (BM_DIRTY | BM_JUST_DIRTIED)) !=
3875  {
3877  bool dirtied = false;
3878  bool delayChkpt = false;
3879  uint32 buf_state;
3880 
3881  /*
3882  * If we need to protect hint bit updates from torn writes, WAL-log a
3883  * full page image of the page. This full page image is only necessary
3884  * if the hint bit update is the first change to the page since the
3885  * last checkpoint.
3886  *
3887  * We don't check full_page_writes here because that logic is included
3888  * when we call XLogInsert() since the value changes dynamically.
3889  */
3890  if (XLogHintBitIsNeeded() &&
3891  (pg_atomic_read_u32(&bufHdr->state) & BM_PERMANENT))
3892  {
3893  /*
3894  * If we must not write WAL, due to a relfilenode-specific
3895  * condition or being in recovery, don't dirty the page. We can
3896  * set the hint, just not dirty the page as a result so the hint
3897  * is lost when we evict the page or shutdown.
3898  *
3899  * See src/backend/storage/page/README for longer discussion.
3900  */
3901  if (RecoveryInProgress() ||
3902  RelFileNodeSkippingWAL(bufHdr->tag.rnode))
3903  return;
3904 
3905  /*
3906  * If the block is already dirty because we either made a change
3907  * or set a hint already, then we don't need to write a full page
3908  * image. Note that aggressive cleaning of blocks dirtied by hint
3909  * bit setting would increase the call rate. Bulk setting of hint
3910  * bits would reduce the call rate...
3911  *
3912  * We must issue the WAL record before we mark the buffer dirty.
3913  * Otherwise we might write the page before we write the WAL. That
3914  * causes a race condition, since a checkpoint might occur between
3915  * writing the WAL record and marking the buffer dirty. We solve
3916  * that with a kluge, but one that is already in use during
3917  * transaction commit to prevent race conditions. Basically, we
3918  * simply prevent the checkpoint WAL record from being written
3919  * until we have marked the buffer dirty. We don't start the
3920  * checkpoint flush until we have marked dirty, so our checkpoint
3921  * must flush the change to disk successfully or the checkpoint
3922  * never gets written, so crash recovery will fix.
3923  *
3924  * It's possible we may enter here without an xid, so it is
3925  * essential that CreateCheckpoint waits for virtual transactions
3926  * rather than full transactionids.
3927  */
3928  MyProc->delayChkpt = delayChkpt = true;
3929  lsn = XLogSaveBufferForHint(buffer, buffer_std);
3930  }
3931 
3932  buf_state = LockBufHdr(bufHdr);
3933 
3934  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
3935 
3936  if (!(buf_state & BM_DIRTY))
3937  {
3938  dirtied = true; /* Means "will be dirtied by this action" */
3939 
3940  /*
3941  * Set the page LSN if we wrote a backup block. We aren't supposed
3942  * to set this when only holding a share lock but as long as we
3943  * serialise it somehow we're OK. We choose to set LSN while
3944  * holding the buffer header lock, which causes any reader of an
3945  * LSN who holds only a share lock to also obtain a buffer header
3946  * lock before using PageGetLSN(), which is enforced in
3947  * BufferGetLSNAtomic().
3948  *
3949  * If checksums are enabled, you might think we should reset the
3950  * checksum here. That will happen when the page is written
3951  * sometime later in this checkpoint cycle.
3952  */
3953  if (!XLogRecPtrIsInvalid(lsn))
3954  PageSetLSN(page, lsn);
3955  }
3956 
3957  buf_state |= BM_DIRTY | BM_JUST_DIRTIED;
3958  UnlockBufHdr(bufHdr, buf_state);
3959 
3960  if (delayChkpt)
3961  MyProc->delayChkpt = false;
3962 
3963  if (dirtied)
3964  {
3965  VacuumPageDirty++;
3967  if (VacuumCostActive)
3969  }
3970  }
3971 }
3972 
3973 /*
3974  * Release buffer content locks for shared buffers.
3975  *
3976  * Used to clean up after errors.
3977  *
3978  * Currently, we can expect that lwlock.c's LWLockReleaseAll() took care
3979  * of releasing buffer content locks per se; the only thing we need to deal
3980  * with here is clearing any PIN_COUNT request that was in progress.
3981  */
3982 void
3984 {
3986 
3987  if (buf)
3988  {
3989  uint32 buf_state;
3990 
3991  buf_state = LockBufHdr(buf);
3992 
3993  /*
3994  * Don't complain if flag bit not set; it could have been reset but we
3995  * got a cancel/die interrupt before getting the signal.
3996  */
3997  if ((buf_state & BM_PIN_COUNT_WAITER) != 0 &&
3998  buf->wait_backend_pid == MyProcPid)
3999  buf_state &= ~BM_PIN_COUNT_WAITER;
4000 
4001  UnlockBufHdr(buf, buf_state);
4002 
4003  PinCountWaitBuf = NULL;
4004  }
4005 }
4006 
4007 /*
4008  * Acquire or release the content_lock for the buffer.
4009  */
4010 void
4012 {
4013  BufferDesc *buf;
4014 
4015  Assert(BufferIsPinned(buffer));
4016  if (BufferIsLocal(buffer))
4017  return; /* local buffers need no lock */
4018 
4019  buf = GetBufferDescriptor(buffer - 1);
4020 
4021  if (mode == BUFFER_LOCK_UNLOCK)
4023  else if (mode == BUFFER_LOCK_SHARE)
4025  else if (mode == BUFFER_LOCK_EXCLUSIVE)
4027  else
4028  elog(ERROR, "unrecognized buffer lock mode: %d", mode);
4029 }
4030 
4031 /*
4032  * Acquire the content_lock for the buffer, but only if we don't have to wait.
4033  *
4034  * This assumes the caller wants BUFFER_LOCK_EXCLUSIVE mode.
4035  */
4036 bool
4038 {
4039  BufferDesc *buf;
4040 
4041  Assert(BufferIsPinned(buffer));
4042  if (BufferIsLocal(buffer))
4043  return true; /* act as though we got it */
4044 
4045  buf = GetBufferDescriptor(buffer - 1);
4046 
4048  LW_EXCLUSIVE);
4049 }
4050 
4051 /*
4052  * LockBufferForCleanup - lock a buffer in preparation for deleting items
4053  *
4054  * Items may be deleted from a disk page only when the caller (a) holds an
4055  * exclusive lock on the buffer and (b) has observed that no other backend
4056  * holds a pin on the buffer. If there is a pin, then the other backend
4057  * might have a pointer into the buffer (for example, a heapscan reference
4058  * to an item --- see README for more details). It's OK if a pin is added
4059  * after the cleanup starts, however; the newly-arrived backend will be
4060  * unable to look at the page until we release the exclusive lock.
4061  *
4062  * To implement this protocol, a would-be deleter must pin the buffer and
4063  * then call LockBufferForCleanup(). LockBufferForCleanup() is similar to
4064  * LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE), except that it loops until
4065  * it has successfully observed pin count = 1.
4066  */
4067 void
4069 {
4070  BufferDesc *bufHdr;
4071  char *new_status = NULL;
4072  TimestampTz waitStart = 0;
4073  bool logged_recovery_conflict = false;
4074 
4075  Assert(BufferIsPinned(buffer));
4076  Assert(PinCountWaitBuf == NULL);
4077 
4078  if (BufferIsLocal(buffer))
4079  {
4080  /* There should be exactly one pin */
4081  if (LocalRefCount[-buffer - 1] != 1)
4082  elog(ERROR, "incorrect local pin count: %d",
4083  LocalRefCount[-buffer - 1]);
4084  /* Nobody else to wait for */
4085  return;
4086  }
4087 
4088  /* There should be exactly one local pin */
4089  if (GetPrivateRefCount(buffer) != 1)
4090  elog(ERROR, "incorrect local pin count: %d",
4091  GetPrivateRefCount(buffer));
4092 
4093  bufHdr = GetBufferDescriptor(buffer - 1);
4094 
4095  for (;;)
4096  {
4097  uint32 buf_state;
4098 
4099  /* Try to acquire lock */
4101  buf_state = LockBufHdr(bufHdr);
4102 
4103  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
4104  if (BUF_STATE_GET_REFCOUNT(buf_state) == 1)
4105  {
4106  /* Successfully acquired exclusive lock with pincount 1 */
4107  UnlockBufHdr(bufHdr, buf_state);
4108 
4109  /*
4110  * Emit the log message if recovery conflict on buffer pin was
4111  * resolved but the startup process waited longer than
4112  * deadlock_timeout for it.
4113  */
4114  if (logged_recovery_conflict)
4116  waitStart, GetCurrentTimestamp(),
4117  NULL, false);
4118 
4119  /* Report change to non-waiting status */
4120  if (new_status)
4121  {
4122  set_ps_display(new_status);
4123  pfree(new_status);
4124  }
4125  return;
4126  }
4127  /* Failed, so mark myself as waiting for pincount 1 */
4128  if (buf_state & BM_PIN_COUNT_WAITER)
4129  {
4130  UnlockBufHdr(bufHdr, buf_state);
4131  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
4132  elog(ERROR, "multiple backends attempting to wait for pincount 1");
4133  }
4134  bufHdr->wait_backend_pid = MyProcPid;
4135  PinCountWaitBuf = bufHdr;
4136  buf_state |= BM_PIN_COUNT_WAITER;
4137  UnlockBufHdr(bufHdr, buf_state);
4138  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
4139 
4140  /* Wait to be signaled by UnpinBuffer() */
4141  if (InHotStandby)
4142  {
4143  /* Report change to waiting status */
4144  if (update_process_title && new_status == NULL)
4145  {
4146  const char *old_status;
4147  int len;
4148 
4149  old_status = get_ps_display(&len);
4150  new_status = (char *) palloc(len + 8 + 1);
4151  memcpy(new_status, old_status, len);
4152  strcpy(new_status + len, " waiting");
4153  set_ps_display(new_status);
4154  new_status[len] = '\0'; /* truncate off " waiting" */
4155  }
4156 
4157  /*
4158  * Emit the log message if the startup process is waiting longer
4159  * than deadlock_timeout for recovery conflict on buffer pin.
4160  *
4161  * Skip this if first time through because the startup process has
4162  * not started waiting yet in this case. So, the wait start
4163  * timestamp is set after this logic.
4164  */
4165  if (waitStart != 0 && !logged_recovery_conflict)
4166  {
4168 
4169  if (TimestampDifferenceExceeds(waitStart, now,
4170  DeadlockTimeout))
4171  {
4173  waitStart, now, NULL, true);
4174  logged_recovery_conflict = true;
4175  }
4176  }
4177 
4178  /*
4179  * Set the wait start timestamp if logging is enabled and first
4180  * time through.
4181  */
4182  if (log_recovery_conflict_waits && waitStart == 0)
4183  waitStart = GetCurrentTimestamp();
4184 
4185  /* Publish the bufid that Startup process waits on */
4186  SetStartupBufferPinWaitBufId(buffer - 1);
4187  /* Set alarm and then wait to be signaled by UnpinBuffer() */
4189  /* Reset the published bufid */
4191  }
4192  else
4194 
4195  /*
4196  * Remove flag marking us as waiter. Normally this will not be set
4197  * anymore, but ProcWaitForSignal() can return for other signals as
4198  * well. We take care to only reset the flag if we're the waiter, as
4199  * theoretically another backend could have started waiting. That's
4200  * impossible with the current usages due to table level locking, but
4201  * better be safe.
4202  */
4203  buf_state = LockBufHdr(bufHdr);
4204  if ((buf_state & BM_PIN_COUNT_WAITER) != 0 &&
4205  bufHdr->wait_backend_pid == MyProcPid)
4206  buf_state &= ~BM_PIN_COUNT_WAITER;
4207  UnlockBufHdr(bufHdr, buf_state);
4208 
4209  PinCountWaitBuf = NULL;
4210  /* Loop back and try again */
4211  }
4212 }
4213 
4214 /*
4215  * Check called from RecoveryConflictInterrupt handler when Startup
4216  * process requests cancellation of all pin holders that are blocking it.
4217  */
4218 bool
4220 {
4221  int bufid = GetStartupBufferPinWaitBufId();
4222 
4223  /*
4224  * If we get woken slowly then it's possible that the Startup process was
4225  * already woken by other backends before we got here. Also possible that
4226  * we get here by multiple interrupts or interrupts at inappropriate
4227  * times, so make sure we do nothing if the bufid is not set.
4228  */
4229  if (bufid < 0)
4230  return false;
4231 
4232  if (GetPrivateRefCount(bufid + 1) > 0)
4233  return true;
4234 
4235  return false;
4236 }
4237 
4238 /*
4239  * ConditionalLockBufferForCleanup - as above, but don't wait to get the lock
4240  *
4241  * We won't loop, but just check once to see if the pin count is OK. If
4242  * not, return false with no lock held.
4243  */
4244 bool
4246 {
4247  BufferDesc *bufHdr;
4248  uint32 buf_state,
4249  refcount;
4250 
4251  Assert(BufferIsValid(buffer));
4252 
4253  if (BufferIsLocal(buffer))
4254  {
4255  refcount = LocalRefCount[-buffer - 1];
4256  /* There should be exactly one pin */
4257  Assert(refcount > 0);
4258  if (refcount != 1)
4259  return false;
4260  /* Nobody else to wait for */
4261  return true;
4262  }
4263 
4264  /* There should be exactly one local pin */
4265  refcount = GetPrivateRefCount(buffer);
4266  Assert(refcount);
4267  if (refcount != 1)
4268  return false;
4269 
4270  /* Try to acquire lock */
4271  if (!ConditionalLockBuffer(buffer))
4272  return false;
4273 
4274  bufHdr = GetBufferDescriptor(buffer - 1);
4275  buf_state = LockBufHdr(bufHdr);
4276  refcount = BUF_STATE_GET_REFCOUNT(buf_state);
4277 
4278  Assert(refcount > 0);
4279  if (refcount == 1)
4280  {
4281  /* Successfully acquired exclusive lock with pincount 1 */
4282  UnlockBufHdr(bufHdr, buf_state);
4283  return true;
4284  }
4285 
4286  /* Failed, so release the lock */
4287  UnlockBufHdr(bufHdr, buf_state);
4288  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
4289  return false;
4290 }
4291 
4292 /*
4293  * IsBufferCleanupOK - as above, but we already have the lock
4294  *
4295  * Check whether it's OK to perform cleanup on a buffer we've already
4296  * locked. If we observe that the pin count is 1, our exclusive lock
4297  * happens to be a cleanup lock, and we can proceed with anything that
4298  * would have been allowable had we sought a cleanup lock originally.
4299  */
4300 bool
4302 {
4303  BufferDesc *bufHdr;
4304  uint32 buf_state;
4305 
4306  Assert(BufferIsValid(buffer));
4307 
4308  if (BufferIsLocal(buffer))
4309  {
4310  /* There should be exactly one pin */
4311  if (LocalRefCount[-buffer - 1] != 1)
4312  return false;
4313  /* Nobody else to wait for */
4314  return true;
4315  }
4316 
4317  /* There should be exactly one local pin */
4318  if (GetPrivateRefCount(buffer) != 1)
4319  return false;
4320 
4321  bufHdr = GetBufferDescriptor(buffer - 1);
4322 
4323  /* caller must hold exclusive lock on buffer */
4325  LW_EXCLUSIVE));
4326 
4327  buf_state = LockBufHdr(bufHdr);
4328 
4329  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
4330  if (BUF_STATE_GET_REFCOUNT(buf_state) == 1)
4331  {
4332  /* pincount is OK. */
4333  UnlockBufHdr(bufHdr, buf_state);
4334  return true;
4335  }
4336 
4337  UnlockBufHdr(bufHdr, buf_state);
4338  return false;
4339 }
4340 
4341 
4342 /*
4343  * Functions for buffer I/O handling
4344  *
4345  * Note: We assume that nested buffer I/O never occurs.
4346  * i.e at most one BM_IO_IN_PROGRESS bit is set per proc.
4347  *
4348  * Also note that these are used only for shared buffers, not local ones.
4349  */
4350 
4351 /*
4352  * WaitIO -- Block until the IO_IN_PROGRESS flag on 'buf' is cleared.
4353  */
4354 static void
4356 {
4358 
4360  for (;;)
4361  {
4362  uint32 buf_state;
4363 
4364  /*
4365  * It may not be necessary to acquire the spinlock to check the flag
4366  * here, but since this test is essential for correctness, we'd better
4367  * play it safe.
4368  */
4369  buf_state = LockBufHdr(buf);
4370  UnlockBufHdr(buf, buf_state);
4371 
4372  if (!(buf_state & BM_IO_IN_PROGRESS))
4373  break;
4375  }
4377 }
4378 
4379 /*
4380  * StartBufferIO: begin I/O on this buffer
4381  * (Assumptions)
4382  * My process is executing no IO
4383  * The buffer is Pinned
4384  *
4385  * In some scenarios there are race conditions in which multiple backends
4386  * could attempt the same I/O operation concurrently. If someone else
4387  * has already started I/O on this buffer then we will block on the
4388  * I/O condition variable until he's done.
4389  *
4390  * Input operations are only attempted on buffers that are not BM_VALID,
4391  * and output operations only on buffers that are BM_VALID and BM_DIRTY,
4392  * so we can always tell if the work is already done.
4393  *
4394  * Returns true if we successfully marked the buffer as I/O busy,
4395  * false if someone else already did the work.
4396  */
4397 static bool
4398 StartBufferIO(BufferDesc *buf, bool forInput)
4399 {
4400  uint32 buf_state;
4401 
4402  Assert(!InProgressBuf);
4403 
4404  for (;;)
4405  {
4406  buf_state = LockBufHdr(buf);
4407 
4408  if (!(buf_state & BM_IO_IN_PROGRESS))
4409  break;
4410  UnlockBufHdr(buf, buf_state);
4411  WaitIO(buf);
4412  }
4413 
4414  /* Once we get here, there is definitely no I/O active on this buffer */
4415 
4416  if (forInput ? (buf_state & BM_VALID) : !(buf_state & BM_DIRTY))
4417  {
4418  /* someone else already did the I/O */
4419  UnlockBufHdr(buf, buf_state);
4420  return false;
4421  }
4422 
4423  buf_state |= BM_IO_IN_PROGRESS;
4424  UnlockBufHdr(buf, buf_state);
4425 
4426  InProgressBuf = buf;
4427  IsForInput = forInput;
4428 
4429  return true;
4430 }
4431 
4432 /*
4433  * TerminateBufferIO: release a buffer we were doing I/O on
4434  * (Assumptions)
4435  * My process is executing IO for the buffer
4436  * BM_IO_IN_PROGRESS bit is set for the buffer
4437  * The buffer is Pinned
4438  *
4439  * If clear_dirty is true and BM_JUST_DIRTIED is not set, we clear the
4440  * buffer's BM_DIRTY flag. This is appropriate when terminating a
4441  * successful write. The check on BM_JUST_DIRTIED is necessary to avoid
4442  * marking the buffer clean if it was re-dirtied while we were writing.
4443  *
4444  * set_flag_bits gets ORed into the buffer's flags. It must include
4445  * BM_IO_ERROR in a failure case. For successful completion it could
4446  * be 0, or BM_VALID if we just finished reading in the page.
4447  */
4448 static void
4449 TerminateBufferIO(BufferDesc *buf, bool clear_dirty, uint32 set_flag_bits)
4450 {
4451  uint32 buf_state;
4452 
4453  Assert(buf == InProgressBuf);
4454 
4455  buf_state = LockBufHdr(buf);
4456 
4457  Assert(buf_state & BM_IO_IN_PROGRESS);
4458 
4459  buf_state &= ~(BM_IO_IN_PROGRESS | BM_IO_ERROR);
4460  if (clear_dirty && !(buf_state & BM_JUST_DIRTIED))
4461  buf_state &= ~(BM_DIRTY | BM_CHECKPOINT_NEEDED);
4462 
4463  buf_state |= set_flag_bits;
4464  UnlockBufHdr(buf, buf_state);
4465 
4466  InProgressBuf = NULL;
4467 
4469 }
4470 
4471 /*
4472  * AbortBufferIO: Clean up any active buffer I/O after an error.
4473  *
4474  * All LWLocks we might have held have been released,
4475  * but we haven't yet released buffer pins, so the buffer is still pinned.
4476  *
4477  * If I/O was in progress, we always set BM_IO_ERROR, even though it's
4478  * possible the error condition wasn't related to the I/O.
4479  */
4480 void
4482 {
4484 
4485  if (buf)
4486  {
4487  uint32 buf_state;
4488 
4489  buf_state = LockBufHdr(buf);
4490  Assert(buf_state & BM_IO_IN_PROGRESS);
4491  if (IsForInput)
4492  {
4493  Assert(!(buf_state & BM_DIRTY));
4494 
4495  /* We'd better not think buffer is valid yet */
4496  Assert(!(buf_state & BM_VALID));
4497  UnlockBufHdr(buf, buf_state);
4498  }
4499  else
4500  {
4501  Assert(buf_state & BM_DIRTY);
4502  UnlockBufHdr(buf, buf_state);
4503  /* Issue notice if this is not the first failure... */
4504  if (buf_state & BM_IO_ERROR)
4505  {
4506  /* Buffer is pinned, so we can read tag without spinlock */
4507  char *path;
4508 
4509  path = relpathperm(buf->tag.rnode, buf->tag.forkNum);
4510  ereport(WARNING,
4511  (errcode(ERRCODE_IO_ERROR),
4512  errmsg("could not write block %u of %s",
4513  buf->tag.blockNum, path),
4514  errdetail("Multiple failures --- write error might be permanent.")));
4515  pfree(path);
4516  }
4517  }
4518  TerminateBufferIO(buf, false, BM_IO_ERROR);
4519  }
4520 }
4521 
4522 /*
4523  * Error context callback for errors occurring during shared buffer writes.
4524  */
4525 static void
4527 {
4528  BufferDesc *bufHdr = (BufferDesc *) arg;
4529 
4530  /* Buffer is pinned, so we can read the tag without locking the spinlock */
4531  if (bufHdr != NULL)
4532  {
4533  char *path = relpathperm(bufHdr->tag.rnode, bufHdr->tag.forkNum);
4534 
4535  errcontext("writing block %u of relation %s",
4536  bufHdr->tag.blockNum, path);
4537  pfree(path);
4538  }
4539 }
4540 
4541 /*
4542  * Error context callback for errors occurring during local buffer writes.
4543  */
4544 static void
4546 {
4547  BufferDesc *bufHdr = (BufferDesc *) arg;
4548 
4549  if (bufHdr != NULL)
4550  {
4551  char *path = relpathbackend(bufHdr->tag.rnode, MyBackendId,
4552  bufHdr->tag.forkNum);
4553 
4554  errcontext("writing block %u of relation %s",
4555  bufHdr->tag.blockNum, path);
4556  pfree(path);
4557  }
4558 }
4559 
4560 /*
4561  * RelFileNode qsort/bsearch comparator; see RelFileNodeEquals.
4562  */
4563 static int
4564 rnode_comparator(const void *p1, const void *p2)
4565 {
4566  RelFileNode n1 = *(const RelFileNode *) p1;
4567  RelFileNode n2 = *(const RelFileNode *) p2;
4568 
4569  if (n1.relNode < n2.relNode)
4570  return -1;
4571  else if (n1.relNode > n2.relNode)
4572  return 1;
4573 
4574  if (n1.dbNode < n2.dbNode)
4575  return -1;
4576  else if (n1.dbNode > n2.dbNode)
4577  return 1;
4578 
4579  if (n1.spcNode < n2.spcNode)
4580  return -1;
4581  else if (n1.spcNode > n2.spcNode)
4582  return 1;
4583  else
4584  return 0;
4585 }
4586 
4587 /*
4588  * Lock buffer header - set BM_LOCKED in buffer state.
4589  */
4590 uint32
4592 {
4593  SpinDelayStatus delayStatus;
4594  uint32 old_buf_state;
4595 
4596  init_local_spin_delay(&delayStatus);
4597 
4598  while (true)
4599  {
4600  /* set BM_LOCKED flag */
4601  old_buf_state = pg_atomic_fetch_or_u32(&desc->state, BM_LOCKED);
4602  /* if it wasn't set before we're OK */
4603  if (!(old_buf_state & BM_LOCKED))
4604  break;
4605  perform_spin_delay(&delayStatus);
4606  }
4607  finish_spin_delay(&delayStatus);
4608  return old_buf_state | BM_LOCKED;
4609 }
4610 
4611 /*
4612  * Wait until the BM_LOCKED flag isn't set anymore and return the buffer's
4613  * state at that point.
4614  *
4615  * Obviously the buffer could be locked by the time the value is returned, so
4616  * this is primarily useful in CAS style loops.
4617  */
4618 static uint32
4620 {
4621  SpinDelayStatus delayStatus;
4622  uint32 buf_state;
4623 
4624  init_local_spin_delay(&delayStatus);
4625 
4626  buf_state = pg_atomic_read_u32(&buf->state);
4627 
4628  while (buf_state & BM_LOCKED)
4629  {
4630  perform_spin_delay(&delayStatus);
4631  buf_state = pg_atomic_read_u32(&buf->state);
4632  }
4633 
4634  finish_spin_delay(&delayStatus);
4635 
4636  return buf_state;
4637 }
4638 
4639 /*
4640  * BufferTag comparator.
4641  */
4642 static inline int
4644 {
4645  int ret;
4646 
4647  ret = rnode_comparator(&ba->rnode, &bb->rnode);
4648 
4649  if (ret != 0)
4650  return ret;
4651 
4652  if (ba->forkNum < bb->forkNum)
4653  return -1;
4654  if (ba->forkNum > bb->forkNum)
4655  return 1;
4656 
4657  if (ba->blockNum < bb->blockNum)
4658  return -1;
4659  if (ba->blockNum > bb->blockNum)
4660  return 1;
4661 
4662  return 0;
4663 }
4664 
4665 /*
4666  * Comparator determining the writeout order in a checkpoint.
4667  *
4668  * It is important that tablespaces are compared first, the logic balancing
4669  * writes between tablespaces relies on it.
4670  */
4671 static inline int
4673 {
4674  /* compare tablespace */
4675  if (a->tsId < b->tsId)
4676  return -1;
4677  else if (a->tsId > b->tsId)
4678  return 1;
4679  /* compare relation */
4680  if (a->relNode < b->relNode)
4681  return -1;
4682  else if (a->relNode > b->relNode)
4683  return 1;
4684  /* compare fork */
4685  else if (a->forkNum < b->forkNum)
4686  return -1;
4687  else if (a->forkNum > b->forkNum)
4688  return 1;
4689  /* compare block number */
4690  else if (a->blockNum < b->blockNum)
4691  return -1;
4692  else if (a->blockNum > b->blockNum)
4693  return 1;
4694  /* equal page IDs are unlikely, but not impossible */
4695  return 0;
4696 }
4697 
4698 /*
4699  * Comparator for a Min-Heap over the per-tablespace checkpoint completion
4700  * progress.
4701  */
4702 static int
4704 {
4705  CkptTsStatus *sa = (CkptTsStatus *) a;
4706  CkptTsStatus *sb = (CkptTsStatus *) b;
4707 
4708  /* we want a min-heap, so return 1 for the a < b */
4709  if (sa->progress < sb->progress)
4710  return 1;
4711  else if (sa->progress == sb->progress)
4712  return 0;
4713  else
4714  return -1;
4715 }
4716 
4717 /*
4718  * Initialize a writeback context, discarding potential previous state.
4719  *
4720  * *max_pending is a pointer instead of an immediate value, so the coalesce
4721  * limits can easily changed by the GUC mechanism, and so calling code does
4722  * not have to check the current configuration. A value of 0 means that no
4723  * writeback control will be performed.
4724  */
4725 void
4726 WritebackContextInit(WritebackContext *context, int *max_pending)
4727 {
4728  Assert(*max_pending <= WRITEBACK_MAX_PENDING_FLUSHES);
4729 
4730  context->max_pending = max_pending;
4731  context->nr_pending = 0;
4732 }
4733 
4734 /*
4735  * Add buffer to list of pending writeback requests.
4736  */
4737 void
4739 {
4740  PendingWriteback *pending;
4741 
4742  /*
4743  * Add buffer to the pending writeback array, unless writeback control is
4744  * disabled.
4745  */
4746  if (*context->max_pending > 0)
4747  {
4749 
4750  pending = &context->pending_writebacks[context->nr_pending++];
4751 
4752  pending->tag = *tag;
4753  }
4754 
4755  /*
4756  * Perform pending flushes if the writeback limit is exceeded. This
4757  * includes the case where previously an item has been added, but control
4758  * is now disabled.
4759  */
4760  if (context->nr_pending >= *context->max_pending)
4761  IssuePendingWritebacks(context);
4762 }
4763 
4764 #define ST_SORT sort_pending_writebacks
4765 #define ST_ELEMENT_TYPE PendingWriteback
4766 #define ST_COMPARE(a, b) buffertag_comparator(&a->tag, &b->tag)
4767 #define ST_SCOPE static
4768 #define ST_DEFINE
4769 #include <lib/sort_template.h>
4770 
4771 /*
4772  * Issue all pending writeback requests, previously scheduled with
4773  * ScheduleBufferTagForWriteback, to the OS.
4774  *
4775  * Because this is only used to improve the OSs IO scheduling we try to never
4776  * error out - it's just a hint.
4777  */
4778 void
4780 {
4781  int i;
4782 
4783  if (context->nr_pending == 0)
4784  return;
4785 
4786  /*
4787  * Executing the writes in-order can make them a lot faster, and allows to
4788  * merge writeback requests to consecutive blocks into larger writebacks.
4789  */
4790  sort_pending_writebacks(context->pending_writebacks, context->nr_pending);
4791 
4792  /*
4793  * Coalesce neighbouring writes, but nothing else. For that we iterate
4794  * through the, now sorted, array of pending flushes, and look forward to
4795  * find all neighbouring (or identical) writes.
4796  */
4797  for (i = 0; i < context->nr_pending; i++)
4798  {
4801  SMgrRelation reln;
4802  int ahead;
4803  BufferTag tag;
4804  Size nblocks = 1;
4805 
4806  cur = &context->pending_writebacks[i];
4807  tag = cur->tag;
4808 
4809  /*
4810  * Peek ahead, into following writeback requests, to see if they can
4811  * be combined with the current one.
4812  */
4813  for (ahead = 0; i + ahead + 1 < context->nr_pending; ahead++)
4814  {
4815  next = &context->pending_writebacks[i + ahead + 1];
4816 
4817  /* different file, stop */
4818  if (!RelFileNodeEquals(cur->tag.rnode, next->tag.rnode) ||
4819  cur->tag.forkNum != next->tag.forkNum)
4820  break;
4821 
4822  /* ok, block queued twice, skip */
4823  if (cur->tag.blockNum == next->tag.blockNum)
4824  continue;
4825 
4826  /* only merge consecutive writes */
4827  if (cur->tag.blockNum + 1 != next->tag.blockNum)
4828  break;
4829 
4830  nblocks++;
4831  cur = next;
4832  }
4833 
4834  i += ahead;
4835 
4836  /* and finally tell the kernel to write the data to storage */
4837  reln = smgropen(tag.rnode, InvalidBackendId);
4838  smgrwriteback(reln, tag.forkNum, tag.blockNum, nblocks);
4839  }
4840 
4841  context->nr_pending = 0;
4842 }
4843 
4844 
4845 /*
4846  * Implement slower/larger portions of TestForOldSnapshot
4847  *
4848  * Smaller/faster portions are put inline, but the entire set of logic is too
4849  * big for that.
4850  */
4851 void
4853 {
4854  if (RelationAllowsEarlyPruning(relation)
4855  && (snapshot)->whenTaken < GetOldSnapshotThresholdTimestamp())
4856  ereport(ERROR,
4857  (errcode(ERRCODE_SNAPSHOT_TOO_OLD),
4858  errmsg("snapshot too old")));
4859 }
#define LocalBufHdrGetBlock(bufHdr)
Definition: bufmgr.c:64
BufferDesc * LocalBufferAlloc(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum, bool *foundPtr)
Definition: localbuf.c:109
static bool PinBuffer(BufferDesc *buf, BufferAccessStrategy strategy)
Definition: bufmgr.c:1677
static PrivateRefCountEntry * GetPrivateRefCountEntry(Buffer buffer, bool do_move)
Definition: bufmgr.c:307
#define init_local_spin_delay(status)
Definition: s_lock.h:1043
struct PrivateRefCountEntry PrivateRefCountEntry
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:448
static PgChecksumMode mode
Definition: pg_checksums.c:65
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
BufferDesc * StrategyGetBuffer(BufferAccessStrategy strategy, uint32 *buf_state)
Definition: freelist.c:201
void BufTableDelete(BufferTag *tagPtr, uint32 hashcode)
Definition: buf_table.c:149
void CheckpointWriteDelay(int flags, double progress)
Definition: checkpointer.c:691
void LockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:4068
Definition: lwlock.h:31
#define relpathperm(rnode, forknum)
Definition: relpath.h:83
PgStat_Counter m_buf_written_checkpoints
Definition: pgstat.h:475
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
void CheckPointBuffers(int flags)
Definition: bufmgr.c:2728
PgStat_Counter m_buf_alloc
Definition: pgstat.h:480
#define BM_PERMANENT
Definition: buf_internals.h:67
bool LWLockHeldByMeInMode(LWLock *l, LWLockMode mode)
Definition: lwlock.c:1937
int StrategySyncStart(uint32 *complete_passes, uint32 *num_buf_alloc)
Definition: freelist.c:395
int64 VacuumPageMiss
Definition: globals.c:148
#define CHECKPOINT_FLUSH_ALL
Definition: xlog.h:201
#define BufMappingPartitionLock(hashcode)
#define DEBUG1
Definition: elog.h:25
int MyProcPid
Definition: globals.c:43
#define VALGRIND_MAKE_MEM_DEFINED(addr, size)
Definition: memdebug.h:26
int errhint(const char *fmt,...)
Definition: elog.c:1156
BackendId MyBackendId
Definition: globals.c:84
TimestampTz GetOldSnapshotThresholdTimestamp(void)
Definition: snapmgr.c:1660
int maintenance_io_concurrency
Definition: bufmgr.c:150
#define BM_TAG_VALID
Definition: buf_internals.h:61
Oid tsId
Definition: bufmgr.c:97
static int32 next
Definition: blutils.c:219
int VacuumCostBalance
Definition: globals.c:151
bool BgBufferSync(WritebackContext *wb_context)
Definition: bufmgr.c:2200
#define binaryheap_empty(h)
Definition: binaryheap.h:52
void PrintBufferLeakWarning(Buffer buffer)
Definition: bufmgr.c:2685
int BgWriterDelay
Definition: bgwriter.c:61
int wait_backend_pid
#define RelFileNodeBackendIsTemp(rnode)
Definition: relfilenode.h:78
ForkNumber forkNum
Definition: buf_internals.h:94
#define HASH_ELEM
Definition: hsearch.h:95
static uint32 PrivateRefCountClock
Definition: bufmgr.c:200
void MarkBufferDirtyHint(Buffer buffer, bool buffer_std)
Definition: bufmgr.c:3842
static int32 PrivateRefCountOverflowed
Definition: bufmgr.c:199
bool LWLockHeldByMe(LWLock *l)
Definition: lwlock.c:1919
instr_time blk_read_time
Definition: instrument.h:36
bool update_process_title
Definition: ps_status.c:36
TimestampTz GetCurrentTimestamp(void)
Definition: timestamp.c:1580
static void InvalidateBuffer(BufferDesc *buf)
Definition: bufmgr.c:1458
static void local_buffer_write_error_callback(void *arg)
Definition: bufmgr.c:4545
static bool pg_atomic_compare_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval)
Definition: atomics.h:311
bool XLogNeedsFlush(XLogRecPtr record)
Definition: xlog.c:3181
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1556
PGPROC * MyProc
Definition: proc.c:68
int64 TimestampTz
Definition: timestamp.h:39
int backend_flush_after
Definition: bufmgr.c:158
int64 shared_blks_read
Definition: instrument.h:27
#define PointerGetDatum(X)
Definition: postgres.h:600
void AtEOXact_Buffers(bool isCommit)
Definition: bufmgr.c:2570
#define VALGRIND_MAKE_MEM_NOACCESS(addr, size)
Definition: memdebug.h:27
ResourceOwner CurrentResourceOwner
Definition: resowner.c:146
PgStat_Counter m_maxwritten_clean
Definition: pgstat.h:477
#define RELS_BSEARCH_THRESHOLD
Definition: bufmgr.c:71
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:741
#define RelationAllowsEarlyPruning(rel)
Definition: snapmgr.h:38
XLogRecPtr XLogSaveBufferForHint(Buffer buffer, bool buffer_std)
Definition: xloginsert.c:970
struct timeval instr_time
Definition: instr_time.h:150
#define BM_CHECKPOINT_NEEDED
Definition: buf_internals.h:66
static void WaitIO(BufferDesc *buf)
Definition: bufmgr.c:4355
int64 VacuumPageHit
Definition: globals.c:147
uint32 BufTableHashCode(BufferTag *tagPtr)
Definition: buf_table.c:79
void ProcessProcSignalBarrier(void)
Definition: procsignal.c:453
static int ts_ckpt_progress_comparator(Datum a, Datum b, void *arg)
Definition: bufmgr.c:4703
PrefetchBufferResult PrefetchLocalBuffer(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum)
Definition: localbuf.c:64
static void ForgetPrivateRefCountEntry(PrivateRefCountEntry *ref)
Definition: bufmgr.c:410
PgStat_Counter m_buf_written_clean
Definition: pgstat.h:476
PgStat_MsgBgWriter BgWriterStats
Definition: pgstat.c:131
#define InvalidBuffer
Definition: buf.h:25
Size entrysize
Definition: hsearch.h:76
void LogRecoveryConflict(ProcSignalReason reason, TimestampTz wait_start, TimestampTz now, VirtualTransactionId *wait_list, bool still_waiting)
Definition: standby.c:249
void DropRelFileNodeLocalBuffers(RelFileNode rnode, ForkNumber forkNum, BlockNumber firstDelBlock)
Definition: localbuf.c:326
#define GetLocalBufferDescriptor(id)
Buffer ReadBufferWithoutRelcache(RelFileNode rnode, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:780
char * PageSetChecksumCopy(Page page, BlockNumber blkno)
Definition: bufpage.c:1503
int checkpoint_flush_after
Definition: bufmgr.c:156
struct cursor * cur
Definition: ecpg.c:28
void ConditionVariableBroadcast(ConditionVariable *cv)
int errcode(int sqlerrcode)
Definition: elog.c:698
#define MemSet(start, val, len)
Definition: c.h:1008
void binaryheap_replace_first(binaryheap *heap, Datum d)
Definition: binaryheap.c:204
void StrategyFreeBuffer(BufferDesc *buf)
Definition: freelist.c:364
int64 VacuumPageDirty
Definition: globals.c:149
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3772
#define P_NEW
Definition: bufmgr.h:91
double bgwriter_lru_multiplier
Definition: bufmgr.c:134
bool smgrexists(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:247
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:954
int BufTableLookup(BufferTag *tagPtr, uint32 hashcode)
Definition: buf_table.c:91
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:98
#define LOG
Definition: elog.h:26
int64 shared_blks_dirtied
Definition: instrument.h:28
Form_pg_class rd_rel
Definition: rel.h:109
unsigned int Oid
Definition: postgres_ext.h:31
bool RecoveryInProgress(void)
Definition: xlog.c:8217
#define BM_DIRTY
Definition: buf_internals.h:59
void FlushRelationsAllBuffers(SMgrRelation *smgrs, int nrels)
Definition: bufmgr.c:3605
int VacuumCostPageDirty
Definition: globals.c:143
void(* callback)(void *arg)
Definition: elog.h:247
bool TimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, int msec)
Definition: timestamp.c:1711
struct ErrorContextCallback * previous
Definition: elog.h:246
void ResourceOwnerRememberBuffer(ResourceOwner owner, Buffer buffer)
Definition: resowner.c:959
void binaryheap_add_unordered(binaryheap *heap, Datum d)
Definition: binaryheap.c:110
Buffer recent_buffer
Definition: bufmgr.h:54
#define BUF_DROP_FULL_SCAN_THRESHOLD
Definition: bufmgr.c:79
void XLogFlush(XLogRecPtr record)
Definition: xlog.c:2860
static void FlushBuffer(BufferDesc *buf, SMgrRelation reln)
Definition: bufmgr.c:2812
int effective_io_concurrency
Definition: bufmgr.c:143
static int SyncOneBuffer(int buf_id, bool skip_recently_used, WritebackContext *wb_context)
Definition: bufmgr.c:2503
void IssuePendingWritebacks(WritebackContext *context)
Definition: bufmgr.c:4779
static BufferDesc * InProgressBuf
Definition: bufmgr.c:161
signed int int32
Definition: c.h:429
void WritebackContextInit(WritebackContext *context, int *max_pending)
Definition: bufmgr.c:4726
static void FindAndDropRelFileNodeBuffers(RelFileNode rnode, ForkNumber forkNum, BlockNumber nForkBlock, BlockNumber firstDelBlock)
Definition: bufmgr.c:3351
bool ReadRecentBuffer(RelFileNode rnode, ForkNumber forkNum, BlockNumber blockNum, Buffer recent_buffer)
Definition: bufmgr.c:618
int bgwriter_flush_after
Definition: bufmgr.c:157
int64 local_blks_read
Definition: instrument.h:31
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1803
struct SMgrSortArray SMgrSortArray
void smgrread(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, char *buffer)
Definition: smgr.c:501
static PrivateRefCountEntry * NewPrivateRefCountEntry(Buffer buffer)
Definition: bufmgr.c:281
ErrorContextCallback * error_context_stack
Definition: elog.c:93
void ConditionVariablePrepareToSleep(ConditionVariable *cv)
void set_ps_display(const char *activity)
Definition: ps_status.c:349
void ProcSendSignal(int pid)
Definition: proc.c:1909
#define SmgrIsTemp(smgr)
Definition: smgr.h:77
#define BUF_REUSABLE
Definition: bufmgr.c:69
Definition: dynahash.c:219
static bool StartBufferIO(BufferDesc *buf, bool forInput)
Definition: bufmgr.c:4398
void DropRelFileNodeAllLocalBuffers(RelFileNode rnode)
Definition: localbuf.c:373
void pfree(void *pointer)
Definition: mcxt.c:1169
void ConditionVariableCancelSleep(void)
int BufTableInsert(BufferTag *tagPtr, uint32 hashcode, int buf_id)
Definition: buf_table.c:119
void InitBufferPoolAccess(void)
Definition: bufmgr.c:2592
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3795
bool ConditionalLockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:4245
int64 local_blks_hit
Definition: instrument.h:30
#define ERROR
Definition: elog.h:46
double float8
Definition: c.h:565
bool delayChkpt
Definition: proc.h:187
#define PIV_LOG_WARNING
Definition: bufpage.h:413
#define RelationIsValid(relation)
Definition: rel.h:450
void ResolveRecoveryConflictWithBufferPin(void)
Definition: standby.c:753
#define INSTR_TIME_SUBTRACT(x, y)
Definition: instr_time.h:170
void ScheduleBufferTagForWriteback(WritebackContext *context, BufferTag *tag)
Definition: bufmgr.c:4738
Datum binaryheap_first(binaryheap *heap)
Definition: binaryheap.c:159
#define BUF_FLAG_MASK
Definition: buf_internals.h:46
int bgwriter_lru_maxpages
Definition: bufmgr.c:133
BlockNumber smgrnblocks_cached(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:572
int NLocBuffer
Definition: localbuf.c:41
RelFileNodeBackend smgr_rnode
Definition: smgr.h:42
#define pgstat_count_buffer_read(rel)
Definition: pgstat.h:1050
void finish_spin_delay(SpinDelayStatus *status)
Definition: s_lock.c:174
#define DEBUG2
Definition: elog.h:24
WritebackContext BackendWritebackContext
Definition: buf_init.c:23
PendingWriteback pending_writebacks[WRITEBACK_MAX_PENDING_FLUSHES]
SMgrRelation srel
Definition: bufmgr.c:128
int num_to_scan
Definition: bufmgr.c:110
const char * get_ps_display(int *displen)
Definition: ps_status.c:430
void AtProcExit_LocalBuffers(void)
Definition: localbuf.c:588
void on_shmem_exit(pg_on_exit_callback function, Datum arg)
Definition: ipc.c:361
float8 progress_slice
Definition: bufmgr.c:107
XLogRecPtr BufferGetLSNAtomic(Buffer buffer)
Definition: bufmgr.c:3012
bool LWLockConditionalAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1370
void SetStartupBufferPinWaitBufId(int bufid)
Definition: proc.c:650
static char * buf
Definition: pg_test_fsync.c:68
int index
Definition: bufmgr.c:115
float8 progress
Definition: bufmgr.c:106
void FlushDatabaseBuffers(Oid dbid)
Definition: bufmgr.c:3709
#define INSTR_TIME_ADD(x, y)
Definition: instr_time.h:158
#define InHotStandby
Definition: xlogutils.h:57
void smgrwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, char *buffer, bool skipFsync)
Definition: smgr.c:523
static uint64 table_relation_size(Relation rel, ForkNumber forkNumber)
Definition: tableam.h:1838
#define PIV_REPORT_STAT
Definition: bufpage.h:414
int errdetail(const char *fmt,...)
Definition: elog.c:1042
#define CHECKPOINT_END_OF_RECOVERY
Definition: xlog.h:197
#define BUF_USAGECOUNT_ONE
Definition: buf_internals.h:44
#define GetBufferDescriptor(id)
#define BufferDescriptorGetIOCV(bdesc)
#define PG_WAIT_BUFFER_PIN
Definition: wait_event.h:20
#define BM_JUST_DIRTIED
Definition: buf_internals.h:64
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:349
static int32 GetPrivateRefCount(Buffer buffer)
Definition: bufmgr.c:387
void DropRelFileNodeBuffers(SMgrRelation smgr_reln, ForkNumber *forkNum, int nforks, BlockNumber *firstDelBlock)
Definition: bufmgr.c:3058
unsigned int uint32
Definition: c.h:441
void smgrwriteback(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, BlockNumber nblocks)
Definition: smgr.c:536
bool BufferIsPermanent(Buffer buffer)
Definition: bufmgr.c:2982
#define BUF_WRITTEN
Definition: bufmgr.c:68
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
static bool IsForInput
Definition: bufmgr.c:162
bool PageIsVerifiedExtended(Page page, BlockNumber blkno, int flags)
Definition: bufpage.c:88
bool log_recovery_conflict_waits
Definition: standby.c:42
static struct PrivateRefCountEntry PrivateRefCountArray[REFCOUNT_ARRAY_ENTRIES]
Definition: bufmgr.c:197
bool ConditionalLockBuffer(Buffer buffer)
Definition: bufmgr.c:4037
int VacuumCostPageHit
Definition: globals.c:141
static void BufferSync(int flags)
Definition: bufmgr.c:1924
#define BUFFERTAGS_EQUAL(a, b)
SMgrRelation smgropen(RelFileNode rnode, BackendId backend)
Definition: smgr.c:146
bool IsBufferCleanupOK(Buffer buffer)
Definition: bufmgr.c:4301
ForkNumber
Definition: relpath.h:40
void MarkLocalBufferDirty(Buffer buffer)
Definition: localbuf.c:286
#define ERRCODE_DATA_CORRUPTED
Definition: pg_basebackup.c:47
static void UnpinBuffer(BufferDesc *buf, bool fixOwner)
Definition: bufmgr.c:1825
void ProcWaitForSignal(uint32 wait_event_info)
Definition: proc.c:1897
void ResourceOwnerEnlargeBuffers(ResourceOwner owner)
Definition: resowner.c:946
int ckpt_bufs_written
Definition: xlog.h:227
PrefetchBufferResult PrefetchSharedBuffer(SMgrRelation smgr_reln, ForkNumber forkNum, BlockNumber blockNum)
Definition: bufmgr.c:500
#define XLogRecPtrIsInvalid(r)
Definition: xlogdefs.h:29
static PrivateRefCountEntry * ReservedRefCountEntry
Definition: bufmgr.c:201
#define WARNING
Definition: elog.h:40
ReadBufferMode
Definition: bufmgr.h:37
int GetStartupBufferPinWaitBufId(void)
Definition: proc.c:662
#define BUF_REFCOUNT_ONE
Definition: buf_internals.h:41
#define BM_LOCKED
Definition: buf_internals.h:58
#define pgstat_count_buffer_hit(rel)
Definition: pgstat.h:1055
void UnlockBuffers(void)
Definition: bufmgr.c:3983
static void TerminateBufferIO(BufferDesc *buf, bool clear_dirty, uint32 set_flag_bits)
Definition: bufmgr.c:4449
#define HASH_BLOBS
Definition: hsearch.h:97
static int rnode_comparator(const void *p1, const void *p2)
Definition: bufmgr.c:4564
#define InvalidBackendId
Definition: backendid.h:23
#define BM_VALID
Definition: buf_internals.h:60
BlockNumber blockNum
#define BufHdrGetBlock(bufHdr)
Definition: bufmgr.c:60
uintptr_t Datum
Definition: postgres.h:411
int BackendId
Definition: backendid.h:21
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:4011
Size keysize
Definition: hsearch.h:75
static Buffer ReadBuffer_common(SMgrRelation reln, char relpersistence, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy, bool *hit)
Definition: bufmgr.c:801
#define InvalidOid
Definition: postgres_ext.h:36
#define ereport(elevel,...)
Definition: elog.h:157
bool StrategyRejectBuffer(BufferAccessStrategy strategy, BufferDesc *buf)
Definition: freelist.c:686
#define BlockNumberIsValid(blockNumber)
Definition: block.h:70
void binaryheap_build(binaryheap *heap)
Definition: binaryheap.c:126
RelFileNode node
Definition: relfilenode.h:74
bool InRecovery
Definition: xlogutils.c:52
#define free(a)
Definition: header.h:65
BlockNumber RelationGetNumberOfBlocksInFork(Relation relation, ForkNumber forkNum)
Definition: bufmgr.c:2939
static BufferDesc * BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, BlockNumber blockNum, BufferAccessStrategy strategy, bool *foundPtr)
Definition: bufmgr.c:1092
RelFileNode rd_node
Definition: rel.h:56
void ConditionVariableSleep(ConditionVariable *cv, uint32 wait_event_info)
#define BufferDescriptorGetContentLock(bdesc)
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4591
BlockNumber smgrnblocks(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:548
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:804
#define pgstat_count_buffer_read_time(n)
Definition: pgstat.h:1060
#define CLEAR_BUFFERTAG(a)
Definition: buf_internals.h:98
static void CheckForBufferLeaks(void)
Definition: bufmgr.c:2644
static int buffertag_comparator(const BufferTag *a, const BufferTag *b)
Definition: bufmgr.c:4643
#define RELATION_IS_OTHER_TEMP(relation)
Definition: rel.h:631
#define INIT_BUFFERTAG(a, xx_rnode, xx_forkNum, xx_blockNum)
volatile sig_atomic_t ProcSignalBarrierPending
Definition: globals.c:37
bool HoldingBufferPinThatDelaysRecovery(void)
Definition: bufmgr.c:4219
#define INSTR_TIME_GET_MICROSEC(t)
Definition: instr_time.h:205
#define REFCOUNT_ARRAY_ENTRIES
Definition: bufmgr.c:88
static SMgrRelation RelationGetSmgr(Relation rel)
Definition: rel.h:544
void FlushRelationBuffers(Relation rel)
Definition: bufmgr.c:3516
CheckpointStatsData CheckpointStats
Definition: xlog.c:188
instr_time blk_write_time
Definition: instrument.h:37
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:694
static void PinBuffer_Locked(BufferDesc *buf)
Definition: bufmgr.c:1780
CkptSortItem * CkptBufferIds
Definition: buf_init.c:24
size_t Size
Definition: c.h:540
void binaryheap_free(binaryheap *heap)
Definition: binaryheap.c:69
#define BUF_USAGECOUNT_MASK
Definition: buf_internals.h:43
BackendId backend
Definition: relfilenode.h:75
#define InvalidBlockNumber
Definition: block.h:33
void pg_qsort(void *base, size_t nel, size_t elsize, int(*cmp)(const void *, const void *))
#define BufferIsLocal(buffer)
Definition: buf.h:37
#define BufferDescriptorGetBuffer(bdesc)
#define MAX_FORKNUM
Definition: relpath.h:55
#define pgstat_count_buffer_write_time(n)
Definition: pgstat.h:1062
void PageSetChecksumInplace(Page page, BlockNumber blkno)
Definition: bufpage.c:1532
Buffer ReleaseAndReadBuffer(Buffer buffer, Relation relation, BlockNumber blockNum)
Definition: bufmgr.c:1619
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1199
void AbortBufferIO(void)
Definition: bufmgr.c:4481
BlockNumber blockNum
Definition: buf_internals.h:95
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
static uint32 WaitBufHdrUnlocked(BufferDesc *buf)
Definition: bufmgr.c:4619
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1436
bool smgrprefetch(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum)
Definition: smgr.c:487
RelFileNode rnode
Definition: buf_internals.h:93
bool RelFileNodeSkippingWAL(RelFileNode rnode)
Definition: storage.c:513
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:1182
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1426
#define INSTR_TIME_SET_CURRENT(t)
Definition: instr_time.h:156
void FlushOneBuffer(Buffer buffer)
Definition: bufmgr.c:3752
#define BM_MAX_USAGE_COUNT
Definition: buf_internals.h:77
binaryheap * binaryheap_allocate(int capacity, binaryheap_comparator compare, void *arg)
Definition: binaryheap.c:33
#define RelationUsesLocalBuffers(relation)
Definition: rel.h:610
#define BM_IO_ERROR
Definition: buf_internals.h:63
#define PageGetLSN(page)
Definition: bufpage.h:366
#define DatumGetPointer(X)
Definition: postgres.h:593
void smgrextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, char *buffer, bool skipFsync)
Definition: smgr.c:462
BufferTag tag
void DropRelFileNodesAllBuffers(SMgrRelation *smgr_reln, int nnodes)
Definition: bufmgr.c:3182
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2752
int64 local_blks_written
Definition: instrument.h:33
#define PageIsNew(page)
Definition: bufpage.h:229
static int ckpt_buforder_comparator(const CkptSortItem *a, const CkptSortItem *b)
Definition: bufmgr.c:4672
void * palloc(Size size)
Definition: mcxt.c:1062
int errmsg(const char *fmt,...)
Definition: elog.c:909
#define UnlockBufHdr(desc, s)
static HTAB * PrivateRefCountHash
Definition: bufmgr.c:198
int64 shared_blks_hit
Definition: instrument.h:26
#define elog(elevel,...)
Definition: elog.h:232
int i
PrefetchBufferResult PrefetchBuffer(Relation reln, ForkNumber forkNum, BlockNumber blockNum)
Definition: bufmgr.c:587
static BufferDesc * PinCountWaitBuf
Definition: bufmgr.c:165
#define relpath(rnode, forknum)
Definition: relpath.h:87
#define errcontext
Definition: elog.h:204
int NBuffers
Definition: globals.c:135
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:97
static void pg_atomic_unlocked_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:277
static void ReservePrivateRefCountEntry(void)
Definition: bufmgr.c:215
pg_atomic_uint32 state
#define WRITEBACK_MAX_PENDING_FLUSHES
void * arg
Datum binaryheap_remove_first(binaryheap *heap)
Definition: binaryheap.c:174
static uint32 pg_atomic_fetch_or_u32(volatile pg_atomic_uint32 *ptr, uint32 or_)
Definition: atomics.h:372
int DeadlockTimeout
Definition: proc.c:60
int num_scanned
Definition: bufmgr.c:112
void InitBufferPoolBackend(void)
Definition: bufmgr.c:2615
#define BM_IO_IN_PROGRESS
Definition: buf_internals.h:62
int VacuumCostPageMiss
Definition: globals.c:142
#define BUF_STATE_GET_USAGECOUNT(state)
Definition: buf_internals.h:50
RelFileNode rnode
Definition: bufmgr.c:127
#define BufferGetLSN(bufHdr)
Definition: bufmgr.c:61
static void pg_atomic_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:258
void BufferGetTag(Buffer buffer, RelFileNode *rnode, ForkNumber *forknum, BlockNumber *blknum)
Definition: bufmgr.c:2773
int64 shared_blks_written
Definition: instrument.h:29
void DropDatabaseBuffers(Oid dbid)
Definition: bufmgr.c:3412
#define relpathbackend(rnode, backend, forknum)
Definition: relpath.h:78
#define PageSetLSN(page, lsn)
Definition: bufpage.h:368
static void shared_buffer_write_error_callback(void *arg)
Definition: bufmgr.c:4526
int Buffer
Definition: buf.h:23
void TestForOldSnapshot_impl(Snapshot snapshot, Relation relation)
Definition: bufmgr.c:4852
ForkNumber forkNum
Datum now(PG_FUNCTION_ARGS)
Definition: timestamp.c:1544
struct CkptTsStatus CkptTsStatus
void BufmgrCommit(void)
Definition: bufmgr.c:2738
void IncrBufferRefCount(Buffer buffer)
Definition: bufmgr.c:3810
#define XLogHintBitIsNeeded()
Definition: xlog.h:177
bool track_io_timing
Definition: bufmgr.c:135
int32 * LocalRefCount
Definition: localbuf.c:45
Pointer Page
Definition: bufpage.h:78
void AtEOXact_LocalBuffers(bool isCommit)
Definition: localbuf.c:577
#define CHECKPOINT_IS_SHUTDOWN
Definition: xlog.h:196
#define RelFileNodeEquals(node1, node2)
Definition: relfilenode.h:88
BufferUsage pgBufferUsage
Definition: instrument.c:20
#define BUF_STATE_GET_REFCOUNT(state)
Definition: buf_internals.h:49
void perform_spin_delay(SpinDelayStatus *status)
Definition: s_lock.c:124
void * Block
Definition: bufmgr.h:24
dlist_node node
Definition: smgr.h:72
bool VacuumCostActive
Definition: globals.c:152
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:241
bool zero_damaged_pages
Definition: bufmgr.c:132
void ResourceOwnerForgetBuffer(ResourceOwner owner, Buffer buffer)
Definition: resowner.c:968
#define BM_PIN_COUNT_WAITER
Definition: buf_internals.h:65
static void AtProcExit_Buffers(int code, Datum arg)
Definition: bufmgr.c:2625