PostgreSQL Source Code  git master
bufmgr.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * bufmgr.c
4  * buffer manager interface routines
5  *
6  * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/storage/buffer/bufmgr.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 /*
16  * Principal entry points:
17  *
18  * ReadBuffer() -- find or create a buffer holding the requested page,
19  * and pin it so that no one can destroy it while this process
20  * is using it.
21  *
22  * ReleaseBuffer() -- unpin a buffer
23  *
24  * MarkBufferDirty() -- mark a pinned buffer's contents as "dirty".
25  * The disk write is delayed until buffer replacement or checkpoint.
26  *
27  * See also these files:
28  * freelist.c -- chooses victim for buffer replacement
29  * buf_table.c -- manages the buffer lookup table
30  */
31 #include "postgres.h"
32 
33 #include <sys/file.h>
34 #include <unistd.h>
35 
36 #include "access/tableam.h"
37 #include "access/xloginsert.h"
38 #include "access/xlogutils.h"
39 #include "catalog/catalog.h"
40 #include "catalog/storage.h"
41 #include "catalog/storage_xlog.h"
42 #include "executor/instrument.h"
43 #include "lib/binaryheap.h"
44 #include "miscadmin.h"
45 #include "pg_trace.h"
46 #include "pgstat.h"
47 #include "postmaster/bgwriter.h"
48 #include "storage/buf_internals.h"
49 #include "storage/bufmgr.h"
50 #include "storage/ipc.h"
51 #include "storage/lmgr.h"
52 #include "storage/proc.h"
53 #include "storage/smgr.h"
54 #include "storage/standby.h"
55 #include "utils/memdebug.h"
56 #include "utils/ps_status.h"
57 #include "utils/rel.h"
58 #include "utils/resowner_private.h"
59 #include "utils/timestamp.h"
60 
61 
62 /* Note: these two macros only work on shared buffers, not local ones! */
63 #define BufHdrGetBlock(bufHdr) ((Block) (BufferBlocks + ((Size) (bufHdr)->buf_id) * BLCKSZ))
64 #define BufferGetLSN(bufHdr) (PageGetLSN(BufHdrGetBlock(bufHdr)))
65 
66 /* Note: this macro only works on local buffers, not shared ones! */
67 #define LocalBufHdrGetBlock(bufHdr) \
68  LocalBufferBlockPointers[-((bufHdr)->buf_id + 2)]
69 
70 /* Bits in SyncOneBuffer's return value */
71 #define BUF_WRITTEN 0x01
72 #define BUF_REUSABLE 0x02
73 
74 #define RELS_BSEARCH_THRESHOLD 20
75 
76 /*
77  * This is the size (in the number of blocks) above which we scan the
78  * entire buffer pool to remove the buffers for all the pages of relation
79  * being dropped. For the relations with size below this threshold, we find
80  * the buffers by doing lookups in BufMapping table.
81  */
82 #define BUF_DROP_FULL_SCAN_THRESHOLD (uint64) (NBuffers / 32)
83 
84 typedef struct PrivateRefCountEntry
85 {
89 
90 /* 64 bytes, about the size of a cache line on common systems */
91 #define REFCOUNT_ARRAY_ENTRIES 8
92 
93 /*
94  * Status of buffers to checkpoint for a particular tablespace, used
95  * internally in BufferSync.
96  */
97 typedef struct CkptTsStatus
98 {
99  /* oid of the tablespace */
101 
102  /*
103  * Checkpoint progress for this tablespace. To make progress comparable
104  * between tablespaces the progress is, for each tablespace, measured as a
105  * number between 0 and the total number of to-be-checkpointed pages. Each
106  * page checkpointed in this tablespace increments this space's progress
107  * by progress_slice.
108  */
111 
112  /* number of to-be checkpointed pages in this tablespace */
114  /* already processed pages in this tablespace */
116 
117  /* current offset in CkptBufferIds for this tablespace */
118  int index;
120 
121 /*
122  * Type for array used to sort SMgrRelations
123  *
124  * FlushRelationsAllBuffers shares the same comparator function with
125  * DropRelationsAllBuffers. Pointer to this struct and RelFileLocator must be
126  * compatible.
127  */
128 typedef struct SMgrSortArray
129 {
130  RelFileLocator rlocator; /* This must be the first member */
133 
134 /* GUC variables */
135 bool zero_damaged_pages = false;
138 bool track_io_timing = false;
139 
140 /*
141  * How many buffers PrefetchBuffer callers should try to stay ahead of their
142  * ReadBuffer calls by. Zero means "never prefetch". This value is only used
143  * for buffers not belonging to tablespaces that have their
144  * effective_io_concurrency parameter set.
145  */
147 
148 /*
149  * Like effective_io_concurrency, but used by maintenance code paths that might
150  * benefit from a higher setting because they work on behalf of many sessions.
151  * Overridden by the tablespace setting of the same name.
152  */
154 
155 /*
156  * GUC variables about triggering kernel writeback for buffers written; OS
157  * dependent defaults are set via the GUC mechanism.
158  */
162 
163 /* local state for LockBufferForCleanup */
165 
166 /*
167  * Backend-Private refcount management:
168  *
169  * Each buffer also has a private refcount that keeps track of the number of
170  * times the buffer is pinned in the current process. This is so that the
171  * shared refcount needs to be modified only once if a buffer is pinned more
172  * than once by an individual backend. It's also used to check that no buffers
173  * are still pinned at the end of transactions and when exiting.
174  *
175  *
176  * To avoid - as we used to - requiring an array with NBuffers entries to keep
177  * track of local buffers, we use a small sequentially searched array
178  * (PrivateRefCountArray) and an overflow hash table (PrivateRefCountHash) to
179  * keep track of backend local pins.
180  *
181  * Until no more than REFCOUNT_ARRAY_ENTRIES buffers are pinned at once, all
182  * refcounts are kept track of in the array; after that, new array entries
183  * displace old ones into the hash table. That way a frequently used entry
184  * can't get "stuck" in the hashtable while infrequent ones clog the array.
185  *
186  * Note that in most scenarios the number of pinned buffers will not exceed
187  * REFCOUNT_ARRAY_ENTRIES.
188  *
189  *
190  * To enter a buffer into the refcount tracking mechanism first reserve a free
191  * entry using ReservePrivateRefCountEntry() and then later, if necessary,
192  * fill it with NewPrivateRefCountEntry(). That split lets us avoid doing
193  * memory allocations in NewPrivateRefCountEntry() which can be important
194  * because in some scenarios it's called with a spinlock held...
195  */
197 static HTAB *PrivateRefCountHash = NULL;
201 
202 static void ReservePrivateRefCountEntry(void);
205 static inline int32 GetPrivateRefCount(Buffer buffer);
207 
208 /*
209  * Ensure that the PrivateRefCountArray has sufficient space to store one more
210  * entry. This has to be called before using NewPrivateRefCountEntry() to fill
211  * a new entry - but it's perfectly fine to not use a reserved entry.
212  */
213 static void
215 {
216  /* Already reserved (or freed), nothing to do */
217  if (ReservedRefCountEntry != NULL)
218  return;
219 
220  /*
221  * First search for a free entry the array, that'll be sufficient in the
222  * majority of cases.
223  */
224  {
225  int i;
226 
227  for (i = 0; i < REFCOUNT_ARRAY_ENTRIES; i++)
228  {
230 
232 
233  if (res->buffer == InvalidBuffer)
234  {
236  return;
237  }
238  }
239  }
240 
241  /*
242  * No luck. All array entries are full. Move one array entry into the hash
243  * table.
244  */
245  {
246  /*
247  * Move entry from the current clock position in the array into the
248  * hashtable. Use that slot.
249  */
250  PrivateRefCountEntry *hashent;
251  bool found;
252 
253  /* select victim slot */
256 
257  /* Better be used, otherwise we shouldn't get here. */
259 
260  /* enter victim array entry into hashtable */
263  HASH_ENTER,
264  &found);
265  Assert(!found);
267 
268  /* clear the now free array slot */
271 
273  }
274 }
275 
276 /*
277  * Fill a previously reserved refcount entry.
278  */
279 static PrivateRefCountEntry *
281 {
283 
284  /* only allowed to be called when a reservation has been made */
285  Assert(ReservedRefCountEntry != NULL);
286 
287  /* use up the reserved entry */
289  ReservedRefCountEntry = NULL;
290 
291  /* and fill it */
292  res->buffer = buffer;
293  res->refcount = 0;
294 
295  return res;
296 }
297 
298 /*
299  * Return the PrivateRefCount entry for the passed buffer.
300  *
301  * Returns NULL if a buffer doesn't have a refcount entry. Otherwise, if
302  * do_move is true, and the entry resides in the hashtable the entry is
303  * optimized for frequent access by moving it to the array.
304  */
305 static PrivateRefCountEntry *
307 {
309  int i;
310 
313 
314  /*
315  * First search for references in the array, that'll be sufficient in the
316  * majority of cases.
317  */
318  for (i = 0; i < REFCOUNT_ARRAY_ENTRIES; i++)
319  {
321 
322  if (res->buffer == buffer)
323  return res;
324  }
325 
326  /*
327  * By here we know that the buffer, if already pinned, isn't residing in
328  * the array.
329  *
330  * Only look up the buffer in the hashtable if we've previously overflowed
331  * into it.
332  */
333  if (PrivateRefCountOverflowed == 0)
334  return NULL;
335 
337 
338  if (res == NULL)
339  return NULL;
340  else if (!do_move)
341  {
342  /* caller doesn't want us to move the hash entry into the array */
343  return res;
344  }
345  else
346  {
347  /* move buffer from hashtable into the free array slot */
348  bool found;
350 
351  /* Ensure there's a free array slot */
353 
354  /* Use up the reserved slot */
355  Assert(ReservedRefCountEntry != NULL);
357  ReservedRefCountEntry = NULL;
358  Assert(free->buffer == InvalidBuffer);
359 
360  /* and fill it */
361  free->buffer = buffer;
362  free->refcount = res->refcount;
363 
364  /* delete from hashtable */
366  Assert(found);
369 
370  return free;
371  }
372 }
373 
374 /*
375  * Returns how many times the passed buffer is pinned by this backend.
376  *
377  * Only works for shared memory buffers!
378  */
379 static inline int32
381 {
383 
386 
387  /*
388  * Not moving the entry - that's ok for the current users, but we might
389  * want to change this one day.
390  */
391  ref = GetPrivateRefCountEntry(buffer, false);
392 
393  if (ref == NULL)
394  return 0;
395  return ref->refcount;
396 }
397 
398 /*
399  * Release resources used to track the reference count of a buffer which we no
400  * longer have pinned and don't want to pin again immediately.
401  */
402 static void
404 {
405  Assert(ref->refcount == 0);
406 
407  if (ref >= &PrivateRefCountArray[0] &&
409  {
410  ref->buffer = InvalidBuffer;
411 
412  /*
413  * Mark the just used entry as reserved - in many scenarios that
414  * allows us to avoid ever having to search the array/hash for free
415  * entries.
416  */
417  ReservedRefCountEntry = ref;
418  }
419  else
420  {
421  bool found;
422  Buffer buffer = ref->buffer;
423 
425  Assert(found);
428  }
429 }
430 
431 /*
432  * BufferIsPinned
433  * True iff the buffer is pinned (also checks for valid buffer number).
434  *
435  * NOTE: what we check here is that *this* backend holds a pin on
436  * the buffer. We do not care whether some other backend does.
437  */
438 #define BufferIsPinned(bufnum) \
439 ( \
440  !BufferIsValid(bufnum) ? \
441  false \
442  : \
443  BufferIsLocal(bufnum) ? \
444  (LocalRefCount[-(bufnum) - 1] > 0) \
445  : \
446  (GetPrivateRefCount(bufnum) > 0) \
447 )
448 
449 
450 static Buffer ReadBuffer_common(SMgrRelation smgr, char relpersistence,
451  ForkNumber forkNum, BlockNumber blockNum,
453  bool *hit);
455  ForkNumber fork,
456  BufferAccessStrategy strategy,
457  uint32 flags,
458  uint32 extend_by,
459  BlockNumber extend_upto,
460  Buffer *buffers,
461  uint32 *extended_by);
463  ForkNumber fork,
464  BufferAccessStrategy strategy,
465  uint32 flags,
466  uint32 extend_by,
467  BlockNumber extend_upto,
468  Buffer *buffers,
469  uint32 *extended_by);
470 static bool PinBuffer(BufferDesc *buf, BufferAccessStrategy strategy);
471 static void PinBuffer_Locked(BufferDesc *buf);
472 static void UnpinBuffer(BufferDesc *buf);
473 static void BufferSync(int flags);
475 static int SyncOneBuffer(int buf_id, bool skip_recently_used,
476  WritebackContext *wb_context);
477 static void WaitIO(BufferDesc *buf);
478 static bool StartBufferIO(BufferDesc *buf, bool forInput);
479 static void TerminateBufferIO(BufferDesc *buf, bool clear_dirty,
480  uint32 set_flag_bits);
481 static void shared_buffer_write_error_callback(void *arg);
482 static void local_buffer_write_error_callback(void *arg);
483 static BufferDesc *BufferAlloc(SMgrRelation smgr,
484  char relpersistence,
485  ForkNumber forkNum,
486  BlockNumber blockNum,
487  BufferAccessStrategy strategy,
488  bool *foundPtr, IOContext io_context);
489 static Buffer GetVictimBuffer(BufferAccessStrategy strategy, IOContext io_context);
490 static void FlushBuffer(BufferDesc *buf, SMgrRelation reln,
491  IOObject io_object, IOContext io_context);
492 static void FindAndDropRelationBuffers(RelFileLocator rlocator,
493  ForkNumber forkNum,
494  BlockNumber nForkBlock,
495  BlockNumber firstDelBlock);
496 static void RelationCopyStorageUsingBuffer(RelFileLocator srclocator,
497  RelFileLocator dstlocator,
498  ForkNumber forkNum, bool permanent);
499 static void AtProcExit_Buffers(int code, Datum arg);
500 static void CheckForBufferLeaks(void);
501 static int rlocator_comparator(const void *p1, const void *p2);
502 static inline int buffertag_comparator(const BufferTag *ba, const BufferTag *bb);
503 static inline int ckpt_buforder_comparator(const CkptSortItem *a, const CkptSortItem *b);
504 static int ts_ckpt_progress_comparator(Datum a, Datum b, void *arg);
505 
506 
507 /*
508  * Implementation of PrefetchBuffer() for shared buffers.
509  */
512  ForkNumber forkNum,
513  BlockNumber blockNum)
514 {
515  PrefetchBufferResult result = {InvalidBuffer, false};
516  BufferTag newTag; /* identity of requested block */
517  uint32 newHash; /* hash value for newTag */
518  LWLock *newPartitionLock; /* buffer partition lock for it */
519  int buf_id;
520 
521  Assert(BlockNumberIsValid(blockNum));
522 
523  /* create a tag so we can lookup the buffer */
524  InitBufferTag(&newTag, &smgr_reln->smgr_rlocator.locator,
525  forkNum, blockNum);
526 
527  /* determine its hash code and partition lock ID */
528  newHash = BufTableHashCode(&newTag);
529  newPartitionLock = BufMappingPartitionLock(newHash);
530 
531  /* see if the block is in the buffer pool already */
532  LWLockAcquire(newPartitionLock, LW_SHARED);
533  buf_id = BufTableLookup(&newTag, newHash);
534  LWLockRelease(newPartitionLock);
535 
536  /* If not in buffers, initiate prefetch */
537  if (buf_id < 0)
538  {
539 #ifdef USE_PREFETCH
540  /*
541  * Try to initiate an asynchronous read. This returns false in
542  * recovery if the relation file doesn't exist.
543  */
544  if ((io_direct_flags & IO_DIRECT_DATA) == 0 &&
545  smgrprefetch(smgr_reln, forkNum, blockNum))
546  {
547  result.initiated_io = true;
548  }
549 #endif /* USE_PREFETCH */
550  }
551  else
552  {
553  /*
554  * Report the buffer it was in at that time. The caller may be able
555  * to avoid a buffer table lookup, but it's not pinned and it must be
556  * rechecked!
557  */
558  result.recent_buffer = buf_id + 1;
559  }
560 
561  /*
562  * If the block *is* in buffers, we do nothing. This is not really ideal:
563  * the block might be just about to be evicted, which would be stupid
564  * since we know we are going to need it soon. But the only easy answer
565  * is to bump the usage_count, which does not seem like a great solution:
566  * when the caller does ultimately touch the block, usage_count would get
567  * bumped again, resulting in too much favoritism for blocks that are
568  * involved in a prefetch sequence. A real fix would involve some
569  * additional per-buffer state, and it's not clear that there's enough of
570  * a problem to justify that.
571  */
572 
573  return result;
574 }
575 
576 /*
577  * PrefetchBuffer -- initiate asynchronous read of a block of a relation
578  *
579  * This is named by analogy to ReadBuffer but doesn't actually allocate a
580  * buffer. Instead it tries to ensure that a future ReadBuffer for the given
581  * block will not be delayed by the I/O. Prefetching is optional.
582  *
583  * There are three possible outcomes:
584  *
585  * 1. If the block is already cached, the result includes a valid buffer that
586  * could be used by the caller to avoid the need for a later buffer lookup, but
587  * it's not pinned, so the caller must recheck it.
588  *
589  * 2. If the kernel has been asked to initiate I/O, the initiated_io member is
590  * true. Currently there is no way to know if the data was already cached by
591  * the kernel and therefore didn't really initiate I/O, and no way to know when
592  * the I/O completes other than using synchronous ReadBuffer().
593  *
594  * 3. Otherwise, the buffer wasn't already cached by PostgreSQL, and
595  * USE_PREFETCH is not defined (this build doesn't support prefetching due to
596  * lack of a kernel facility), direct I/O is enabled, or the underlying
597  * relation file wasn't found and we are in recovery. (If the relation file
598  * wasn't found and we are not in recovery, an error is raised).
599  */
602 {
603  Assert(RelationIsValid(reln));
604  Assert(BlockNumberIsValid(blockNum));
605 
606  if (RelationUsesLocalBuffers(reln))
607  {
608  /* see comments in ReadBufferExtended */
609  if (RELATION_IS_OTHER_TEMP(reln))
610  ereport(ERROR,
611  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
612  errmsg("cannot access temporary tables of other sessions")));
613 
614  /* pass it off to localbuf.c */
615  return PrefetchLocalBuffer(RelationGetSmgr(reln), forkNum, blockNum);
616  }
617  else
618  {
619  /* pass it to the shared buffer version */
620  return PrefetchSharedBuffer(RelationGetSmgr(reln), forkNum, blockNum);
621  }
622 }
623 
624 /*
625  * ReadRecentBuffer -- try to pin a block in a recently observed buffer
626  *
627  * Compared to ReadBuffer(), this avoids a buffer mapping lookup when it's
628  * successful. Return true if the buffer is valid and still has the expected
629  * tag. In that case, the buffer is pinned and the usage count is bumped.
630  */
631 bool
633  Buffer recent_buffer)
634 {
635  BufferDesc *bufHdr;
636  BufferTag tag;
637  uint32 buf_state;
638  bool have_private_ref;
639 
640  Assert(BufferIsValid(recent_buffer));
641 
644  InitBufferTag(&tag, &rlocator, forkNum, blockNum);
645 
646  if (BufferIsLocal(recent_buffer))
647  {
648  int b = -recent_buffer - 1;
649 
650  bufHdr = GetLocalBufferDescriptor(b);
651  buf_state = pg_atomic_read_u32(&bufHdr->state);
652 
653  /* Is it still valid and holding the right tag? */
654  if ((buf_state & BM_VALID) && BufferTagsEqual(&tag, &bufHdr->tag))
655  {
656  PinLocalBuffer(bufHdr, true);
657 
659 
660  return true;
661  }
662  }
663  else
664  {
665  bufHdr = GetBufferDescriptor(recent_buffer - 1);
666  have_private_ref = GetPrivateRefCount(recent_buffer) > 0;
667 
668  /*
669  * Do we already have this buffer pinned with a private reference? If
670  * so, it must be valid and it is safe to check the tag without
671  * locking. If not, we have to lock the header first and then check.
672  */
673  if (have_private_ref)
674  buf_state = pg_atomic_read_u32(&bufHdr->state);
675  else
676  buf_state = LockBufHdr(bufHdr);
677 
678  if ((buf_state & BM_VALID) && BufferTagsEqual(&tag, &bufHdr->tag))
679  {
680  /*
681  * It's now safe to pin the buffer. We can't pin first and ask
682  * questions later, because it might confuse code paths like
683  * InvalidateBuffer() if we pinned a random non-matching buffer.
684  */
685  if (have_private_ref)
686  PinBuffer(bufHdr, NULL); /* bump pin count */
687  else
688  PinBuffer_Locked(bufHdr); /* pin for first time */
689 
691 
692  return true;
693  }
694 
695  /* If we locked the header above, now unlock. */
696  if (!have_private_ref)
697  UnlockBufHdr(bufHdr, buf_state);
698  }
699 
700  return false;
701 }
702 
703 /*
704  * ReadBuffer -- a shorthand for ReadBufferExtended, for reading from main
705  * fork with RBM_NORMAL mode and default strategy.
706  */
707 Buffer
709 {
710  return ReadBufferExtended(reln, MAIN_FORKNUM, blockNum, RBM_NORMAL, NULL);
711 }
712 
713 /*
714  * ReadBufferExtended -- returns a buffer containing the requested
715  * block of the requested relation. If the blknum
716  * requested is P_NEW, extend the relation file and
717  * allocate a new block. (Caller is responsible for
718  * ensuring that only one backend tries to extend a
719  * relation at the same time!)
720  *
721  * Returns: the buffer number for the buffer containing
722  * the block read. The returned buffer has been pinned.
723  * Does not return on error --- elog's instead.
724  *
725  * Assume when this function is called, that reln has been opened already.
726  *
727  * In RBM_NORMAL mode, the page is read from disk, and the page header is
728  * validated. An error is thrown if the page header is not valid. (But
729  * note that an all-zero page is considered "valid"; see
730  * PageIsVerifiedExtended().)
731  *
732  * RBM_ZERO_ON_ERROR is like the normal mode, but if the page header is not
733  * valid, the page is zeroed instead of throwing an error. This is intended
734  * for non-critical data, where the caller is prepared to repair errors.
735  *
736  * In RBM_ZERO_AND_LOCK mode, if the page isn't in buffer cache already, it's
737  * filled with zeros instead of reading it from disk. Useful when the caller
738  * is going to fill the page from scratch, since this saves I/O and avoids
739  * unnecessary failure if the page-on-disk has corrupt page headers.
740  * The page is returned locked to ensure that the caller has a chance to
741  * initialize the page before it's made visible to others.
742  * Caution: do not use this mode to read a page that is beyond the relation's
743  * current physical EOF; that is likely to cause problems in md.c when
744  * the page is modified and written out. P_NEW is OK, though.
745  *
746  * RBM_ZERO_AND_CLEANUP_LOCK is the same as RBM_ZERO_AND_LOCK, but acquires
747  * a cleanup-strength lock on the page.
748  *
749  * RBM_NORMAL_NO_LOG mode is treated the same as RBM_NORMAL here.
750  *
751  * If strategy is not NULL, a nondefault buffer access strategy is used.
752  * See buffer/README for details.
753  */
754 Buffer
757 {
758  bool hit;
759  Buffer buf;
760 
761  /*
762  * Reject attempts to read non-local temporary relations; we would be
763  * likely to get wrong data since we have no visibility into the owning
764  * session's local buffers.
765  */
766  if (RELATION_IS_OTHER_TEMP(reln))
767  ereport(ERROR,
768  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
769  errmsg("cannot access temporary tables of other sessions")));
770 
771  /*
772  * Read the buffer, and update pgstat counters to reflect a cache hit or
773  * miss.
774  */
776  buf = ReadBuffer_common(RelationGetSmgr(reln), reln->rd_rel->relpersistence,
777  forkNum, blockNum, mode, strategy, &hit);
778  if (hit)
780  return buf;
781 }
782 
783 
784 /*
785  * ReadBufferWithoutRelcache -- like ReadBufferExtended, but doesn't require
786  * a relcache entry for the relation.
787  *
788  * Pass permanent = true for a RELPERSISTENCE_PERMANENT relation, and
789  * permanent = false for a RELPERSISTENCE_UNLOGGED relation. This function
790  * cannot be used for temporary relations (and making that work might be
791  * difficult, unless we only want to read temporary relations for our own
792  * BackendId).
793  */
794 Buffer
796  BlockNumber blockNum, ReadBufferMode mode,
797  BufferAccessStrategy strategy, bool permanent)
798 {
799  bool hit;
800 
801  SMgrRelation smgr = smgropen(rlocator, InvalidBackendId);
802 
803  return ReadBuffer_common(smgr, permanent ? RELPERSISTENCE_PERMANENT :
804  RELPERSISTENCE_UNLOGGED, forkNum, blockNum,
805  mode, strategy, &hit);
806 }
807 
808 /*
809  * Convenience wrapper around ExtendBufferedRelBy() extending by one block.
810  */
811 Buffer
813  ForkNumber forkNum,
814  BufferAccessStrategy strategy,
815  uint32 flags)
816 {
817  Buffer buf;
818  uint32 extend_by = 1;
819 
820  ExtendBufferedRelBy(eb, forkNum, strategy, flags, extend_by,
821  &buf, &extend_by);
822 
823  return buf;
824 }
825 
826 /*
827  * Extend relation by multiple blocks.
828  *
829  * Tries to extend the relation by extend_by blocks. Depending on the
830  * availability of resources the relation may end up being extended by a
831  * smaller number of pages (unless an error is thrown, always by at least one
832  * page). *extended_by is updated to the number of pages the relation has been
833  * extended to.
834  *
835  * buffers needs to be an array that is at least extend_by long. Upon
836  * completion, the first extend_by array elements will point to a pinned
837  * buffer.
838  *
839  * If EB_LOCK_FIRST is part of flags, the first returned buffer is
840  * locked. This is useful for callers that want a buffer that is guaranteed to
841  * be empty.
842  */
845  ForkNumber fork,
846  BufferAccessStrategy strategy,
847  uint32 flags,
848  uint32 extend_by,
849  Buffer *buffers,
850  uint32 *extended_by)
851 {
852  Assert((eb.rel != NULL) != (eb.smgr != NULL));
853  Assert(eb.smgr == NULL || eb.relpersistence != 0);
854  Assert(extend_by > 0);
855 
856  if (eb.smgr == NULL)
857  {
858  eb.smgr = RelationGetSmgr(eb.rel);
859  eb.relpersistence = eb.rel->rd_rel->relpersistence;
860  }
861 
862  return ExtendBufferedRelCommon(eb, fork, strategy, flags,
863  extend_by, InvalidBlockNumber,
864  buffers, extended_by);
865 }
866 
867 /*
868  * Extend the relation so it is at least extend_to blocks large, return buffer
869  * (extend_to - 1).
870  *
871  * This is useful for callers that want to write a specific page, regardless
872  * of the current size of the relation (e.g. useful for visibilitymap and for
873  * crash recovery).
874  */
875 Buffer
877  ForkNumber fork,
878  BufferAccessStrategy strategy,
879  uint32 flags,
880  BlockNumber extend_to,
882 {
884  uint32 extended_by = 0;
886  Buffer buffers[64];
887 
888  Assert((eb.rel != NULL) != (eb.smgr != NULL));
889  Assert(eb.smgr == NULL || eb.relpersistence != 0);
890  Assert(extend_to != InvalidBlockNumber && extend_to > 0);
891 
892  if (eb.smgr == NULL)
893  {
894  eb.smgr = RelationGetSmgr(eb.rel);
895  eb.relpersistence = eb.rel->rd_rel->relpersistence;
896  }
897 
898  /*
899  * If desired, create the file if it doesn't exist. If
900  * smgr_cached_nblocks[fork] is positive then it must exist, no need for
901  * an smgrexists call.
902  */
903  if ((flags & EB_CREATE_FORK_IF_NEEDED) &&
904  (eb.smgr->smgr_cached_nblocks[fork] == 0 ||
906  !smgrexists(eb.smgr, fork))
907  {
909 
910  /* could have been closed while waiting for lock */
911  if (eb.rel)
912  eb.smgr = RelationGetSmgr(eb.rel);
913 
914  /* recheck, fork might have been created concurrently */
915  if (!smgrexists(eb.smgr, fork))
916  smgrcreate(eb.smgr, fork, flags & EB_PERFORMING_RECOVERY);
917 
919  }
920 
921  /*
922  * If requested, invalidate size cache, so that smgrnblocks asks the
923  * kernel.
924  */
925  if (flags & EB_CLEAR_SIZE_CACHE)
927 
928  /*
929  * Estimate how many pages we'll need to extend by. This avoids acquiring
930  * unnecessarily many victim buffers.
931  */
932  current_size = smgrnblocks(eb.smgr, fork);
933 
934  /*
935  * Since no-one else can be looking at the page contents yet, there is no
936  * difference between an exclusive lock and a cleanup-strength lock. Note
937  * that we pass the original mode to ReadBuffer_common() below, when
938  * falling back to reading the buffer to a concurrent relation extension.
939  */
941  flags |= EB_LOCK_TARGET;
942 
943  while (current_size < extend_to)
944  {
945  uint32 num_pages = lengthof(buffers);
946  BlockNumber first_block;
947 
948  if ((uint64) current_size + num_pages > extend_to)
949  num_pages = extend_to - current_size;
950 
951  first_block = ExtendBufferedRelCommon(eb, fork, strategy, flags,
952  num_pages, extend_to,
953  buffers, &extended_by);
954 
955  current_size = first_block + extended_by;
956  Assert(num_pages != 0 || current_size >= extend_to);
957 
958  for (int i = 0; i < extended_by; i++)
959  {
960  if (first_block + i != extend_to - 1)
961  ReleaseBuffer(buffers[i]);
962  else
963  buffer = buffers[i];
964  }
965  }
966 
967  /*
968  * It's possible that another backend concurrently extended the relation.
969  * In that case read the buffer.
970  *
971  * XXX: Should we control this via a flag?
972  */
973  if (buffer == InvalidBuffer)
974  {
975  bool hit;
976 
977  Assert(extended_by == 0);
979  fork, extend_to - 1, mode, strategy,
980  &hit);
981  }
982 
983  return buffer;
984 }
985 
986 /*
987  * ReadBuffer_common -- common logic for all ReadBuffer variants
988  *
989  * *hit is set to true if the request was satisfied from shared buffer cache.
990  */
991 static Buffer
992 ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
993  BlockNumber blockNum, ReadBufferMode mode,
994  BufferAccessStrategy strategy, bool *hit)
995 {
996  BufferDesc *bufHdr;
997  Block bufBlock;
998  bool found;
999  IOContext io_context;
1000  IOObject io_object;
1001  bool isLocalBuf = SmgrIsTemp(smgr);
1002 
1003  *hit = false;
1004 
1005  /*
1006  * Backward compatibility path, most code should use ExtendBufferedRel()
1007  * instead, as acquiring the extension lock inside ExtendBufferedRel()
1008  * scales a lot better.
1009  */
1010  if (unlikely(blockNum == P_NEW))
1011  {
1013 
1014  /*
1015  * Since no-one else can be looking at the page contents yet, there is
1016  * no difference between an exclusive lock and a cleanup-strength
1017  * lock.
1018  */
1020  flags |= EB_LOCK_FIRST;
1021 
1022  return ExtendBufferedRel(EB_SMGR(smgr, relpersistence),
1023  forkNum, strategy, flags);
1024  }
1025 
1026  /* Make sure we will have room to remember the buffer pin */
1028 
1029  TRACE_POSTGRESQL_BUFFER_READ_START(forkNum, blockNum,
1031  smgr->smgr_rlocator.locator.dbOid,
1033  smgr->smgr_rlocator.backend);
1034 
1035  if (isLocalBuf)
1036  {
1037  /*
1038  * We do not use a BufferAccessStrategy for I/O of temporary tables.
1039  * However, in some cases, the "strategy" may not be NULL, so we can't
1040  * rely on IOContextForStrategy() to set the right IOContext for us.
1041  * This may happen in cases like CREATE TEMPORARY TABLE AS...
1042  */
1043  io_context = IOCONTEXT_NORMAL;
1044  io_object = IOOBJECT_TEMP_RELATION;
1045  bufHdr = LocalBufferAlloc(smgr, forkNum, blockNum, &found);
1046  if (found)
1048  else if (mode == RBM_NORMAL || mode == RBM_NORMAL_NO_LOG ||
1051  }
1052  else
1053  {
1054  /*
1055  * lookup the buffer. IO_IN_PROGRESS is set if the requested block is
1056  * not currently in memory.
1057  */
1058  io_context = IOContextForStrategy(strategy);
1059  io_object = IOOBJECT_RELATION;
1060  bufHdr = BufferAlloc(smgr, relpersistence, forkNum, blockNum,
1061  strategy, &found, io_context);
1062  if (found)
1064  else if (mode == RBM_NORMAL || mode == RBM_NORMAL_NO_LOG ||
1067  }
1068 
1069  /* At this point we do NOT hold any locks. */
1070 
1071  /* if it was already in the buffer pool, we're done */
1072  if (found)
1073  {
1074  /* Just need to update stats before we exit */
1075  *hit = true;
1076  VacuumPageHit++;
1077  pgstat_count_io_op(io_object, io_context, IOOP_HIT);
1078 
1079  if (VacuumCostActive)
1081 
1082  TRACE_POSTGRESQL_BUFFER_READ_DONE(forkNum, blockNum,
1084  smgr->smgr_rlocator.locator.dbOid,
1086  smgr->smgr_rlocator.backend,
1087  found);
1088 
1089  /*
1090  * In RBM_ZERO_AND_LOCK mode the caller expects the page to be locked
1091  * on return.
1092  */
1093  if (!isLocalBuf)
1094  {
1095  if (mode == RBM_ZERO_AND_LOCK)
1097  LW_EXCLUSIVE);
1098  else if (mode == RBM_ZERO_AND_CLEANUP_LOCK)
1100  }
1101 
1102  return BufferDescriptorGetBuffer(bufHdr);
1103  }
1104 
1105  /*
1106  * if we have gotten to this point, we have allocated a buffer for the
1107  * page but its contents are not yet valid. IO_IN_PROGRESS is set for it,
1108  * if it's a shared buffer.
1109  */
1110  Assert(!(pg_atomic_read_u32(&bufHdr->state) & BM_VALID)); /* spinlock not needed */
1111 
1112  bufBlock = isLocalBuf ? LocalBufHdrGetBlock(bufHdr) : BufHdrGetBlock(bufHdr);
1113 
1114  /*
1115  * Read in the page, unless the caller intends to overwrite it and just
1116  * wants us to allocate a buffer.
1117  */
1119  MemSet((char *) bufBlock, 0, BLCKSZ);
1120  else
1121  {
1122  instr_time io_start = pgstat_prepare_io_time();
1123 
1124  smgrread(smgr, forkNum, blockNum, bufBlock);
1125 
1126  pgstat_count_io_op_time(io_object, io_context,
1127  IOOP_READ, io_start, 1);
1128 
1129  /* check for garbage data */
1130  if (!PageIsVerifiedExtended((Page) bufBlock, blockNum,
1132  {
1134  {
1135  ereport(WARNING,
1137  errmsg("invalid page in block %u of relation %s; zeroing out page",
1138  blockNum,
1139  relpath(smgr->smgr_rlocator, forkNum))));
1140  MemSet((char *) bufBlock, 0, BLCKSZ);
1141  }
1142  else
1143  ereport(ERROR,
1145  errmsg("invalid page in block %u of relation %s",
1146  blockNum,
1147  relpath(smgr->smgr_rlocator, forkNum))));
1148  }
1149  }
1150 
1151  /*
1152  * In RBM_ZERO_AND_LOCK / RBM_ZERO_AND_CLEANUP_LOCK mode, grab the buffer
1153  * content lock before marking the page as valid, to make sure that no
1154  * other backend sees the zeroed page before the caller has had a chance
1155  * to initialize it.
1156  *
1157  * Since no-one else can be looking at the page contents yet, there is no
1158  * difference between an exclusive lock and a cleanup-strength lock. (Note
1159  * that we cannot use LockBuffer() or LockBufferForCleanup() here, because
1160  * they assert that the buffer is already valid.)
1161  */
1163  !isLocalBuf)
1164  {
1166  }
1167 
1168  if (isLocalBuf)
1169  {
1170  /* Only need to adjust flags */
1171  uint32 buf_state = pg_atomic_read_u32(&bufHdr->state);
1172 
1173  buf_state |= BM_VALID;
1174  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
1175  }
1176  else
1177  {
1178  /* Set BM_VALID, terminate IO, and wake up any waiters */
1179  TerminateBufferIO(bufHdr, false, BM_VALID);
1180  }
1181 
1182  VacuumPageMiss++;
1183  if (VacuumCostActive)
1185 
1186  TRACE_POSTGRESQL_BUFFER_READ_DONE(forkNum, blockNum,
1188  smgr->smgr_rlocator.locator.dbOid,
1190  smgr->smgr_rlocator.backend,
1191  found);
1192 
1193  return BufferDescriptorGetBuffer(bufHdr);
1194 }
1195 
1196 /*
1197  * BufferAlloc -- subroutine for ReadBuffer. Handles lookup of a shared
1198  * buffer. If no buffer exists already, selects a replacement
1199  * victim and evicts the old page, but does NOT read in new page.
1200  *
1201  * "strategy" can be a buffer replacement strategy object, or NULL for
1202  * the default strategy. The selected buffer's usage_count is advanced when
1203  * using the default strategy, but otherwise possibly not (see PinBuffer).
1204  *
1205  * The returned buffer is pinned and is already marked as holding the
1206  * desired page. If it already did have the desired page, *foundPtr is
1207  * set true. Otherwise, *foundPtr is set false and the buffer is marked
1208  * as IO_IN_PROGRESS; ReadBuffer will now need to do I/O to fill it.
1209  *
1210  * *foundPtr is actually redundant with the buffer's BM_VALID flag, but
1211  * we keep it for simplicity in ReadBuffer.
1212  *
1213  * io_context is passed as an output parameter to avoid calling
1214  * IOContextForStrategy() when there is a shared buffers hit and no IO
1215  * statistics need be captured.
1216  *
1217  * No locks are held either at entry or exit.
1218  */
1219 static BufferDesc *
1220 BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
1221  BlockNumber blockNum,
1222  BufferAccessStrategy strategy,
1223  bool *foundPtr, IOContext io_context)
1224 {
1225  BufferTag newTag; /* identity of requested block */
1226  uint32 newHash; /* hash value for newTag */
1227  LWLock *newPartitionLock; /* buffer partition lock for it */
1228  int existing_buf_id;
1229  Buffer victim_buffer;
1230  BufferDesc *victim_buf_hdr;
1231  uint32 victim_buf_state;
1232 
1233  /* create a tag so we can lookup the buffer */
1234  InitBufferTag(&newTag, &smgr->smgr_rlocator.locator, forkNum, blockNum);
1235 
1236  /* determine its hash code and partition lock ID */
1237  newHash = BufTableHashCode(&newTag);
1238  newPartitionLock = BufMappingPartitionLock(newHash);
1239 
1240  /* see if the block is in the buffer pool already */
1241  LWLockAcquire(newPartitionLock, LW_SHARED);
1242  existing_buf_id = BufTableLookup(&newTag, newHash);
1243  if (existing_buf_id >= 0)
1244  {
1245  BufferDesc *buf;
1246  bool valid;
1247 
1248  /*
1249  * Found it. Now, pin the buffer so no one can steal it from the
1250  * buffer pool, and check to see if the correct data has been loaded
1251  * into the buffer.
1252  */
1253  buf = GetBufferDescriptor(existing_buf_id);
1254 
1255  valid = PinBuffer(buf, strategy);
1256 
1257  /* Can release the mapping lock as soon as we've pinned it */
1258  LWLockRelease(newPartitionLock);
1259 
1260  *foundPtr = true;
1261 
1262  if (!valid)
1263  {
1264  /*
1265  * We can only get here if (a) someone else is still reading in
1266  * the page, or (b) a previous read attempt failed. We have to
1267  * wait for any active read attempt to finish, and then set up our
1268  * own read attempt if the page is still not BM_VALID.
1269  * StartBufferIO does it all.
1270  */
1271  if (StartBufferIO(buf, true))
1272  {
1273  /*
1274  * If we get here, previous attempts to read the buffer must
1275  * have failed ... but we shall bravely try again.
1276  */
1277  *foundPtr = false;
1278  }
1279  }
1280 
1281  return buf;
1282  }
1283 
1284  /*
1285  * Didn't find it in the buffer pool. We'll have to initialize a new
1286  * buffer. Remember to unlock the mapping lock while doing the work.
1287  */
1288  LWLockRelease(newPartitionLock);
1289 
1290  /*
1291  * Acquire a victim buffer. Somebody else might try to do the same, we
1292  * don't hold any conflicting locks. If so we'll have to undo our work
1293  * later.
1294  */
1295  victim_buffer = GetVictimBuffer(strategy, io_context);
1296  victim_buf_hdr = GetBufferDescriptor(victim_buffer - 1);
1297 
1298  /*
1299  * Try to make a hashtable entry for the buffer under its new tag. If
1300  * somebody else inserted another buffer for the tag, we'll release the
1301  * victim buffer we acquired and use the already inserted one.
1302  */
1303  LWLockAcquire(newPartitionLock, LW_EXCLUSIVE);
1304  existing_buf_id = BufTableInsert(&newTag, newHash, victim_buf_hdr->buf_id);
1305  if (existing_buf_id >= 0)
1306  {
1307  BufferDesc *existing_buf_hdr;
1308  bool valid;
1309 
1310  /*
1311  * Got a collision. Someone has already done what we were about to do.
1312  * We'll just handle this as if it were found in the buffer pool in
1313  * the first place. First, give up the buffer we were planning to
1314  * use.
1315  *
1316  * We could do this after releasing the partition lock, but then we'd
1317  * have to call ResourceOwnerEnlargeBuffers() &
1318  * ReservePrivateRefCountEntry() before acquiring the lock, for the
1319  * rare case of such a collision.
1320  */
1321  UnpinBuffer(victim_buf_hdr);
1322 
1323  /*
1324  * The victim buffer we acquired peviously is clean and unused, let it
1325  * be found again quickly
1326  */
1327  StrategyFreeBuffer(victim_buf_hdr);
1328 
1329  /* remaining code should match code at top of routine */
1330 
1331  existing_buf_hdr = GetBufferDescriptor(existing_buf_id);
1332 
1333  valid = PinBuffer(existing_buf_hdr, strategy);
1334 
1335  /* Can release the mapping lock as soon as we've pinned it */
1336  LWLockRelease(newPartitionLock);
1337 
1338  *foundPtr = true;
1339 
1340  if (!valid)
1341  {
1342  /*
1343  * We can only get here if (a) someone else is still reading in
1344  * the page, or (b) a previous read attempt failed. We have to
1345  * wait for any active read attempt to finish, and then set up our
1346  * own read attempt if the page is still not BM_VALID.
1347  * StartBufferIO does it all.
1348  */
1349  if (StartBufferIO(existing_buf_hdr, true))
1350  {
1351  /*
1352  * If we get here, previous attempts to read the buffer must
1353  * have failed ... but we shall bravely try again.
1354  */
1355  *foundPtr = false;
1356  }
1357  }
1358 
1359  return existing_buf_hdr;
1360  }
1361 
1362  /*
1363  * Need to lock the buffer header too in order to change its tag.
1364  */
1365  victim_buf_state = LockBufHdr(victim_buf_hdr);
1366 
1367  /* some sanity checks while we hold the buffer header lock */
1368  Assert(BUF_STATE_GET_REFCOUNT(victim_buf_state) == 1);
1369  Assert(!(victim_buf_state & (BM_TAG_VALID | BM_VALID | BM_DIRTY | BM_IO_IN_PROGRESS)));
1370 
1371  victim_buf_hdr->tag = newTag;
1372 
1373  /*
1374  * Make sure BM_PERMANENT is set for buffers that must be written at every
1375  * checkpoint. Unlogged buffers only need to be written at shutdown
1376  * checkpoints, except for their "init" forks, which need to be treated
1377  * just like permanent relations.
1378  */
1379  victim_buf_state |= BM_TAG_VALID | BUF_USAGECOUNT_ONE;
1380  if (relpersistence == RELPERSISTENCE_PERMANENT || forkNum == INIT_FORKNUM)
1381  victim_buf_state |= BM_PERMANENT;
1382 
1383  UnlockBufHdr(victim_buf_hdr, victim_buf_state);
1384 
1385  LWLockRelease(newPartitionLock);
1386 
1387  /*
1388  * Buffer contents are currently invalid. Try to obtain the right to
1389  * start I/O. If StartBufferIO returns false, then someone else managed
1390  * to read it before we did, so there's nothing left for BufferAlloc() to
1391  * do.
1392  */
1393  if (StartBufferIO(victim_buf_hdr, true))
1394  *foundPtr = false;
1395  else
1396  *foundPtr = true;
1397 
1398  return victim_buf_hdr;
1399 }
1400 
1401 /*
1402  * InvalidateBuffer -- mark a shared buffer invalid and return it to the
1403  * freelist.
1404  *
1405  * The buffer header spinlock must be held at entry. We drop it before
1406  * returning. (This is sane because the caller must have locked the
1407  * buffer in order to be sure it should be dropped.)
1408  *
1409  * This is used only in contexts such as dropping a relation. We assume
1410  * that no other backend could possibly be interested in using the page,
1411  * so the only reason the buffer might be pinned is if someone else is
1412  * trying to write it out. We have to let them finish before we can
1413  * reclaim the buffer.
1414  *
1415  * The buffer could get reclaimed by someone else while we are waiting
1416  * to acquire the necessary locks; if so, don't mess it up.
1417  */
1418 static void
1420 {
1421  BufferTag oldTag;
1422  uint32 oldHash; /* hash value for oldTag */
1423  LWLock *oldPartitionLock; /* buffer partition lock for it */
1424  uint32 oldFlags;
1425  uint32 buf_state;
1426 
1427  /* Save the original buffer tag before dropping the spinlock */
1428  oldTag = buf->tag;
1429 
1430  buf_state = pg_atomic_read_u32(&buf->state);
1431  Assert(buf_state & BM_LOCKED);
1432  UnlockBufHdr(buf, buf_state);
1433 
1434  /*
1435  * Need to compute the old tag's hashcode and partition lock ID. XXX is it
1436  * worth storing the hashcode in BufferDesc so we need not recompute it
1437  * here? Probably not.
1438  */
1439  oldHash = BufTableHashCode(&oldTag);
1440  oldPartitionLock = BufMappingPartitionLock(oldHash);
1441 
1442 retry:
1443 
1444  /*
1445  * Acquire exclusive mapping lock in preparation for changing the buffer's
1446  * association.
1447  */
1448  LWLockAcquire(oldPartitionLock, LW_EXCLUSIVE);
1449 
1450  /* Re-lock the buffer header */
1451  buf_state = LockBufHdr(buf);
1452 
1453  /* If it's changed while we were waiting for lock, do nothing */
1454  if (!BufferTagsEqual(&buf->tag, &oldTag))
1455  {
1456  UnlockBufHdr(buf, buf_state);
1457  LWLockRelease(oldPartitionLock);
1458  return;
1459  }
1460 
1461  /*
1462  * We assume the only reason for it to be pinned is that someone else is
1463  * flushing the page out. Wait for them to finish. (This could be an
1464  * infinite loop if the refcount is messed up... it would be nice to time
1465  * out after awhile, but there seems no way to be sure how many loops may
1466  * be needed. Note that if the other guy has pinned the buffer but not
1467  * yet done StartBufferIO, WaitIO will fall through and we'll effectively
1468  * be busy-looping here.)
1469  */
1470  if (BUF_STATE_GET_REFCOUNT(buf_state) != 0)
1471  {
1472  UnlockBufHdr(buf, buf_state);
1473  LWLockRelease(oldPartitionLock);
1474  /* safety check: should definitely not be our *own* pin */
1476  elog(ERROR, "buffer is pinned in InvalidateBuffer");
1477  WaitIO(buf);
1478  goto retry;
1479  }
1480 
1481  /*
1482  * Clear out the buffer's tag and flags. We must do this to ensure that
1483  * linear scans of the buffer array don't think the buffer is valid.
1484  */
1485  oldFlags = buf_state & BUF_FLAG_MASK;
1486  ClearBufferTag(&buf->tag);
1487  buf_state &= ~(BUF_FLAG_MASK | BUF_USAGECOUNT_MASK);
1488  UnlockBufHdr(buf, buf_state);
1489 
1490  /*
1491  * Remove the buffer from the lookup hashtable, if it was in there.
1492  */
1493  if (oldFlags & BM_TAG_VALID)
1494  BufTableDelete(&oldTag, oldHash);
1495 
1496  /*
1497  * Done with mapping lock.
1498  */
1499  LWLockRelease(oldPartitionLock);
1500 
1501  /*
1502  * Insert the buffer at the head of the list of free buffers.
1503  */
1505 }
1506 
1507 /*
1508  * Helper routine for GetVictimBuffer()
1509  *
1510  * Needs to be called on a buffer with a valid tag, pinned, but without the
1511  * buffer header spinlock held.
1512  *
1513  * Returns true if the buffer can be reused, in which case the buffer is only
1514  * pinned by this backend and marked as invalid, false otherwise.
1515  */
1516 static bool
1518 {
1519  uint32 buf_state;
1520  uint32 hash;
1521  LWLock *partition_lock;
1522  BufferTag tag;
1523 
1525 
1526  /* have buffer pinned, so it's safe to read tag without lock */
1527  tag = buf_hdr->tag;
1528 
1529  hash = BufTableHashCode(&tag);
1530  partition_lock = BufMappingPartitionLock(hash);
1531 
1532  LWLockAcquire(partition_lock, LW_EXCLUSIVE);
1533 
1534  /* lock the buffer header */
1535  buf_state = LockBufHdr(buf_hdr);
1536 
1537  /*
1538  * We have the buffer pinned nobody else should have been able to unset
1539  * this concurrently.
1540  */
1541  Assert(buf_state & BM_TAG_VALID);
1542  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
1543  Assert(BufferTagsEqual(&buf_hdr->tag, &tag));
1544 
1545  /*
1546  * If somebody else pinned the buffer since, or even worse, dirtied it,
1547  * give up on this buffer: It's clearly in use.
1548  */
1549  if (BUF_STATE_GET_REFCOUNT(buf_state) != 1 || (buf_state & BM_DIRTY))
1550  {
1551  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
1552 
1553  UnlockBufHdr(buf_hdr, buf_state);
1554  LWLockRelease(partition_lock);
1555 
1556  return false;
1557  }
1558 
1559  /*
1560  * Clear out the buffer's tag and flags and usagecount. This is not
1561  * strictly required, as BM_TAG_VALID/BM_VALID needs to be checked before
1562  * doing anything with the buffer. But currently it's beneficial, as the
1563  * cheaper pre-check for several linear scans of shared buffers use the
1564  * tag (see e.g. FlushDatabaseBuffers()).
1565  */
1566  ClearBufferTag(&buf_hdr->tag);
1567  buf_state &= ~(BUF_FLAG_MASK | BUF_USAGECOUNT_MASK);
1568  UnlockBufHdr(buf_hdr, buf_state);
1569 
1570  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
1571 
1572  /* finally delete buffer from the buffer mapping table */
1573  BufTableDelete(&tag, hash);
1574 
1575  LWLockRelease(partition_lock);
1576 
1577  Assert(!(buf_state & (BM_DIRTY | BM_VALID | BM_TAG_VALID)));
1578  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
1580 
1581  return true;
1582 }
1583 
1584 static Buffer
1586 {
1587  BufferDesc *buf_hdr;
1588  Buffer buf;
1589  uint32 buf_state;
1590  bool from_ring;
1591 
1592  /*
1593  * Ensure, while the spinlock's not yet held, that there's a free refcount
1594  * entry.
1595  */
1598 
1599  /* we return here if a prospective victim buffer gets used concurrently */
1600 again:
1601 
1602  /*
1603  * Select a victim buffer. The buffer is returned with its header
1604  * spinlock still held!
1605  */
1606  buf_hdr = StrategyGetBuffer(strategy, &buf_state, &from_ring);
1607  buf = BufferDescriptorGetBuffer(buf_hdr);
1608 
1609  Assert(BUF_STATE_GET_REFCOUNT(buf_state) == 0);
1610 
1611  /* Pin the buffer and then release the buffer spinlock */
1612  PinBuffer_Locked(buf_hdr);
1613 
1614  /*
1615  * We shouldn't have any other pins for this buffer.
1616  */
1618 
1619  /*
1620  * If the buffer was dirty, try to write it out. There is a race
1621  * condition here, in that someone might dirty it after we released the
1622  * buffer header lock above, or even while we are writing it out (since
1623  * our share-lock won't prevent hint-bit updates). We will recheck the
1624  * dirty bit after re-locking the buffer header.
1625  */
1626  if (buf_state & BM_DIRTY)
1627  {
1628  LWLock *content_lock;
1629 
1630  Assert(buf_state & BM_TAG_VALID);
1631  Assert(buf_state & BM_VALID);
1632 
1633  /*
1634  * We need a share-lock on the buffer contents to write it out (else
1635  * we might write invalid data, eg because someone else is compacting
1636  * the page contents while we write). We must use a conditional lock
1637  * acquisition here to avoid deadlock. Even though the buffer was not
1638  * pinned (and therefore surely not locked) when StrategyGetBuffer
1639  * returned it, someone else could have pinned and exclusive-locked it
1640  * by the time we get here. If we try to get the lock unconditionally,
1641  * we'd block waiting for them; if they later block waiting for us,
1642  * deadlock ensues. (This has been observed to happen when two
1643  * backends are both trying to split btree index pages, and the second
1644  * one just happens to be trying to split the page the first one got
1645  * from StrategyGetBuffer.)
1646  */
1647  content_lock = BufferDescriptorGetContentLock(buf_hdr);
1648  if (!LWLockConditionalAcquire(content_lock, LW_SHARED))
1649  {
1650  /*
1651  * Someone else has locked the buffer, so give it up and loop back
1652  * to get another one.
1653  */
1654  UnpinBuffer(buf_hdr);
1655  goto again;
1656  }
1657 
1658  /*
1659  * If using a nondefault strategy, and writing the buffer would
1660  * require a WAL flush, let the strategy decide whether to go ahead
1661  * and write/reuse the buffer or to choose another victim. We need a
1662  * lock to inspect the page LSN, so this can't be done inside
1663  * StrategyGetBuffer.
1664  */
1665  if (strategy != NULL)
1666  {
1667  XLogRecPtr lsn;
1668 
1669  /* Read the LSN while holding buffer header lock */
1670  buf_state = LockBufHdr(buf_hdr);
1671  lsn = BufferGetLSN(buf_hdr);
1672  UnlockBufHdr(buf_hdr, buf_state);
1673 
1674  if (XLogNeedsFlush(lsn)
1675  && StrategyRejectBuffer(strategy, buf_hdr, from_ring))
1676  {
1677  LWLockRelease(content_lock);
1678  UnpinBuffer(buf_hdr);
1679  goto again;
1680  }
1681  }
1682 
1683  /* OK, do the I/O */
1684  FlushBuffer(buf_hdr, NULL, IOOBJECT_RELATION, io_context);
1685  LWLockRelease(content_lock);
1686 
1688  &buf_hdr->tag);
1689  }
1690 
1691 
1692  if (buf_state & BM_VALID)
1693  {
1694  /*
1695  * When a BufferAccessStrategy is in use, blocks evicted from shared
1696  * buffers are counted as IOOP_EVICT in the corresponding context
1697  * (e.g. IOCONTEXT_BULKWRITE). Shared buffers are evicted by a
1698  * strategy in two cases: 1) while initially claiming buffers for the
1699  * strategy ring 2) to replace an existing strategy ring buffer
1700  * because it is pinned or in use and cannot be reused.
1701  *
1702  * Blocks evicted from buffers already in the strategy ring are
1703  * counted as IOOP_REUSE in the corresponding strategy context.
1704  *
1705  * At this point, we can accurately count evictions and reuses,
1706  * because we have successfully claimed the valid buffer. Previously,
1707  * we may have been forced to release the buffer due to concurrent
1708  * pinners or erroring out.
1709  */
1711  from_ring ? IOOP_REUSE : IOOP_EVICT);
1712  }
1713 
1714  /*
1715  * If the buffer has an entry in the buffer mapping table, delete it. This
1716  * can fail because another backend could have pinned or dirtied the
1717  * buffer.
1718  */
1719  if ((buf_state & BM_TAG_VALID) && !InvalidateVictimBuffer(buf_hdr))
1720  {
1721  UnpinBuffer(buf_hdr);
1722  goto again;
1723  }
1724 
1725  /* a final set of sanity checks */
1726 #ifdef USE_ASSERT_CHECKING
1727  buf_state = pg_atomic_read_u32(&buf_hdr->state);
1728 
1729  Assert(BUF_STATE_GET_REFCOUNT(buf_state) == 1);
1730  Assert(!(buf_state & (BM_TAG_VALID | BM_VALID | BM_DIRTY)));
1731 
1733 #endif
1734 
1735  return buf;
1736 }
1737 
1738 /*
1739  * Limit the number of pins a batch operation may additionally acquire, to
1740  * avoid running out of pinnable buffers.
1741  *
1742  * One additional pin is always allowed, as otherwise the operation likely
1743  * cannot be performed at all.
1744  *
1745  * The number of allowed pins for a backend is computed based on
1746  * shared_buffers and the maximum number of connections possible. That's very
1747  * pessimistic, but outside of toy-sized shared_buffers it should allow
1748  * sufficient pins.
1749  */
1750 static void
1751 LimitAdditionalPins(uint32 *additional_pins)
1752 {
1753  uint32 max_backends;
1754  int max_proportional_pins;
1755 
1756  if (*additional_pins <= 1)
1757  return;
1758 
1759  max_backends = MaxBackends + NUM_AUXILIARY_PROCS;
1760  max_proportional_pins = NBuffers / max_backends;
1761 
1762  /*
1763  * Subtract the approximate number of buffers already pinned by this
1764  * backend. We get the number of "overflowed" pins for free, but don't
1765  * know the number of pins in PrivateRefCountArray. The cost of
1766  * calculating that exactly doesn't seem worth it, so just assume the max.
1767  */
1768  max_proportional_pins -= PrivateRefCountOverflowed + REFCOUNT_ARRAY_ENTRIES;
1769 
1770  if (max_proportional_pins < 0)
1771  max_proportional_pins = 1;
1772 
1773  if (*additional_pins > max_proportional_pins)
1774  *additional_pins = max_proportional_pins;
1775 }
1776 
1777 /*
1778  * Logic shared between ExtendBufferedRelBy(), ExtendBufferedRelTo(). Just to
1779  * avoid duplicating the tracing and relpersistence related logic.
1780  */
1781 static BlockNumber
1783  ForkNumber fork,
1784  BufferAccessStrategy strategy,
1785  uint32 flags,
1786  uint32 extend_by,
1787  BlockNumber extend_upto,
1788  Buffer *buffers,
1789  uint32 *extended_by)
1790 {
1791  BlockNumber first_block;
1792 
1793  TRACE_POSTGRESQL_BUFFER_EXTEND_START(fork,
1798  extend_by);
1799 
1800  if (eb.relpersistence == RELPERSISTENCE_TEMP)
1801  first_block = ExtendBufferedRelLocal(eb, fork, flags,
1802  extend_by, extend_upto,
1803  buffers, &extend_by);
1804  else
1805  first_block = ExtendBufferedRelShared(eb, fork, strategy, flags,
1806  extend_by, extend_upto,
1807  buffers, &extend_by);
1808  *extended_by = extend_by;
1809 
1810  TRACE_POSTGRESQL_BUFFER_EXTEND_DONE(fork,
1815  *extended_by,
1816  first_block);
1817 
1818  return first_block;
1819 }
1820 
1821 /*
1822  * Implementation of ExtendBufferedRelBy() and ExtendBufferedRelTo() for
1823  * shared buffers.
1824  */
1825 static BlockNumber
1827  ForkNumber fork,
1828  BufferAccessStrategy strategy,
1829  uint32 flags,
1830  uint32 extend_by,
1831  BlockNumber extend_upto,
1832  Buffer *buffers,
1833  uint32 *extended_by)
1834 {
1835  BlockNumber first_block;
1836  IOContext io_context = IOContextForStrategy(strategy);
1837  instr_time io_start;
1838 
1839  LimitAdditionalPins(&extend_by);
1840 
1841  /*
1842  * Acquire victim buffers for extension without holding extension lock.
1843  * Writing out victim buffers is the most expensive part of extending the
1844  * relation, particularly when doing so requires WAL flushes. Zeroing out
1845  * the buffers is also quite expensive, so do that before holding the
1846  * extension lock as well.
1847  *
1848  * These pages are pinned by us and not valid. While we hold the pin they
1849  * can't be acquired as victim buffers by another backend.
1850  */
1851  for (uint32 i = 0; i < extend_by; i++)
1852  {
1853  Block buf_block;
1854 
1855  buffers[i] = GetVictimBuffer(strategy, io_context);
1856  buf_block = BufHdrGetBlock(GetBufferDescriptor(buffers[i] - 1));
1857 
1858  /* new buffers are zero-filled */
1859  MemSet((char *) buf_block, 0, BLCKSZ);
1860  }
1861 
1862  /* in case we need to pin an existing buffer below */
1864 
1865  /*
1866  * Lock relation against concurrent extensions, unless requested not to.
1867  *
1868  * We use the same extension lock for all forks. That's unnecessarily
1869  * restrictive, but currently extensions for forks don't happen often
1870  * enough to make it worth locking more granularly.
1871  *
1872  * Note that another backend might have extended the relation by the time
1873  * we get the lock.
1874  */
1875  if (!(flags & EB_SKIP_EXTENSION_LOCK))
1876  {
1878  if (eb.rel)
1879  eb.smgr = RelationGetSmgr(eb.rel);
1880  }
1881 
1882  /*
1883  * If requested, invalidate size cache, so that smgrnblocks asks the
1884  * kernel.
1885  */
1886  if (flags & EB_CLEAR_SIZE_CACHE)
1888 
1889  first_block = smgrnblocks(eb.smgr, fork);
1890 
1891  /*
1892  * Now that we have the accurate relation size, check if the caller wants
1893  * us to extend to only up to a specific size. If there were concurrent
1894  * extensions, we might have acquired too many buffers and need to release
1895  * them.
1896  */
1897  if (extend_upto != InvalidBlockNumber)
1898  {
1899  uint32 orig_extend_by = extend_by;
1900 
1901  if (first_block > extend_upto)
1902  extend_by = 0;
1903  else if ((uint64) first_block + extend_by > extend_upto)
1904  extend_by = extend_upto - first_block;
1905 
1906  for (uint32 i = extend_by; i < orig_extend_by; i++)
1907  {
1908  BufferDesc *buf_hdr = GetBufferDescriptor(buffers[i] - 1);
1909 
1910  /*
1911  * The victim buffer we acquired peviously is clean and unused,
1912  * let it be found again quickly
1913  */
1914  StrategyFreeBuffer(buf_hdr);
1915  UnpinBuffer(buf_hdr);
1916  }
1917 
1918  if (extend_by == 0)
1919  {
1920  if (!(flags & EB_SKIP_EXTENSION_LOCK))
1922  *extended_by = extend_by;
1923  return first_block;
1924  }
1925  }
1926 
1927  /* Fail if relation is already at maximum possible length */
1928  if ((uint64) first_block + extend_by >= MaxBlockNumber)
1929  ereport(ERROR,
1930  (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
1931  errmsg("cannot extend relation %s beyond %u blocks",
1932  relpath(eb.smgr->smgr_rlocator, fork),
1933  MaxBlockNumber)));
1934 
1935  /*
1936  * Insert buffers into buffer table, mark as IO_IN_PROGRESS.
1937  *
1938  * This needs to happen before we extend the relation, because as soon as
1939  * we do, other backends can start to read in those pages.
1940  */
1941  for (int i = 0; i < extend_by; i++)
1942  {
1943  Buffer victim_buf = buffers[i];
1944  BufferDesc *victim_buf_hdr = GetBufferDescriptor(victim_buf - 1);
1945  BufferTag tag;
1946  uint32 hash;
1947  LWLock *partition_lock;
1948  int existing_id;
1949 
1950  InitBufferTag(&tag, &eb.smgr->smgr_rlocator.locator, fork, first_block + i);
1951  hash = BufTableHashCode(&tag);
1952  partition_lock = BufMappingPartitionLock(hash);
1953 
1954  LWLockAcquire(partition_lock, LW_EXCLUSIVE);
1955 
1956  existing_id = BufTableInsert(&tag, hash, victim_buf_hdr->buf_id);
1957 
1958  /*
1959  * We get here only in the corner case where we are trying to extend
1960  * the relation but we found a pre-existing buffer. This can happen
1961  * because a prior attempt at extending the relation failed, and
1962  * because mdread doesn't complain about reads beyond EOF (when
1963  * zero_damaged_pages is ON) and so a previous attempt to read a block
1964  * beyond EOF could have left a "valid" zero-filled buffer.
1965  * Unfortunately, we have also seen this case occurring because of
1966  * buggy Linux kernels that sometimes return an lseek(SEEK_END) result
1967  * that doesn't account for a recent write. In that situation, the
1968  * pre-existing buffer would contain valid data that we don't want to
1969  * overwrite. Since the legitimate cases should always have left a
1970  * zero-filled buffer, complain if not PageIsNew.
1971  */
1972  if (existing_id >= 0)
1973  {
1974  BufferDesc *existing_hdr = GetBufferDescriptor(existing_id);
1975  Block buf_block;
1976  bool valid;
1977 
1978  /*
1979  * Pin the existing buffer before releasing the partition lock,
1980  * preventing it from being evicted.
1981  */
1982  valid = PinBuffer(existing_hdr, strategy);
1983 
1984  LWLockRelease(partition_lock);
1985 
1986  /*
1987  * The victim buffer we acquired peviously is clean and unused,
1988  * let it be found again quickly
1989  */
1990  StrategyFreeBuffer(victim_buf_hdr);
1991  UnpinBuffer(victim_buf_hdr);
1992 
1993  buffers[i] = BufferDescriptorGetBuffer(existing_hdr);
1994  buf_block = BufHdrGetBlock(existing_hdr);
1995 
1996  if (valid && !PageIsNew((Page) buf_block))
1997  ereport(ERROR,
1998  (errmsg("unexpected data beyond EOF in block %u of relation %s",
1999  existing_hdr->tag.blockNum, relpath(eb.smgr->smgr_rlocator, fork)),
2000  errhint("This has been seen to occur with buggy kernels; consider updating your system.")));
2001 
2002  /*
2003  * We *must* do smgr[zero]extend before succeeding, else the page
2004  * will not be reserved by the kernel, and the next P_NEW call
2005  * will decide to return the same page. Clear the BM_VALID bit,
2006  * do StartBufferIO() and proceed.
2007  *
2008  * Loop to handle the very small possibility that someone re-sets
2009  * BM_VALID between our clearing it and StartBufferIO inspecting
2010  * it.
2011  */
2012  do
2013  {
2014  uint32 buf_state = LockBufHdr(existing_hdr);
2015 
2016  buf_state &= ~BM_VALID;
2017  UnlockBufHdr(existing_hdr, buf_state);
2018  } while (!StartBufferIO(existing_hdr, true));
2019  }
2020  else
2021  {
2022  uint32 buf_state;
2023 
2024  buf_state = LockBufHdr(victim_buf_hdr);
2025 
2026  /* some sanity checks while we hold the buffer header lock */
2027  Assert(!(buf_state & (BM_VALID | BM_TAG_VALID | BM_DIRTY | BM_JUST_DIRTIED)));
2028  Assert(BUF_STATE_GET_REFCOUNT(buf_state) == 1);
2029 
2030  victim_buf_hdr->tag = tag;
2031 
2032  buf_state |= BM_TAG_VALID | BUF_USAGECOUNT_ONE;
2033  if (eb.relpersistence == RELPERSISTENCE_PERMANENT || fork == INIT_FORKNUM)
2034  buf_state |= BM_PERMANENT;
2035 
2036  UnlockBufHdr(victim_buf_hdr, buf_state);
2037 
2038  LWLockRelease(partition_lock);
2039 
2040  /* XXX: could combine the locked operations in it with the above */
2041  StartBufferIO(victim_buf_hdr, true);
2042  }
2043  }
2044 
2045  io_start = pgstat_prepare_io_time();
2046 
2047  /*
2048  * Note: if smgrzeroextend fails, we will end up with buffers that are
2049  * allocated but not marked BM_VALID. The next relation extension will
2050  * still select the same block number (because the relation didn't get any
2051  * longer on disk) and so future attempts to extend the relation will find
2052  * the same buffers (if they have not been recycled) but come right back
2053  * here to try smgrzeroextend again.
2054  *
2055  * We don't need to set checksum for all-zero pages.
2056  */
2057  smgrzeroextend(eb.smgr, fork, first_block, extend_by, false);
2058 
2059  /*
2060  * Release the file-extension lock; it's now OK for someone else to extend
2061  * the relation some more.
2062  *
2063  * We remove IO_IN_PROGRESS after this, as waking up waiting backends can
2064  * take noticeable time.
2065  */
2066  if (!(flags & EB_SKIP_EXTENSION_LOCK))
2068 
2070  io_start, extend_by);
2071 
2072  /* Set BM_VALID, terminate IO, and wake up any waiters */
2073  for (int i = 0; i < extend_by; i++)
2074  {
2075  Buffer buf = buffers[i];
2076  BufferDesc *buf_hdr = GetBufferDescriptor(buf - 1);
2077  bool lock = false;
2078 
2079  if (flags & EB_LOCK_FIRST && i == 0)
2080  lock = true;
2081  else if (flags & EB_LOCK_TARGET)
2082  {
2083  Assert(extend_upto != InvalidBlockNumber);
2084  if (first_block + i + 1 == extend_upto)
2085  lock = true;
2086  }
2087 
2088  if (lock)
2090 
2091  TerminateBufferIO(buf_hdr, false, BM_VALID);
2092  }
2093 
2094  pgBufferUsage.shared_blks_written += extend_by;
2095 
2096  *extended_by = extend_by;
2097 
2098  return first_block;
2099 }
2100 
2101 /*
2102  * MarkBufferDirty
2103  *
2104  * Marks buffer contents as dirty (actual write happens later).
2105  *
2106  * Buffer must be pinned and exclusive-locked. (If caller does not hold
2107  * exclusive lock, then somebody could be in process of writing the buffer,
2108  * leading to risk of bad data written to disk.)
2109  */
2110 void
2112 {
2113  BufferDesc *bufHdr;
2114  uint32 buf_state;
2115  uint32 old_buf_state;
2116 
2117  if (!BufferIsValid(buffer))
2118  elog(ERROR, "bad buffer ID: %d", buffer);
2119 
2120  if (BufferIsLocal(buffer))
2121  {
2123  return;
2124  }
2125 
2126  bufHdr = GetBufferDescriptor(buffer - 1);
2127 
2130  LW_EXCLUSIVE));
2131 
2132  old_buf_state = pg_atomic_read_u32(&bufHdr->state);
2133  for (;;)
2134  {
2135  if (old_buf_state & BM_LOCKED)
2136  old_buf_state = WaitBufHdrUnlocked(bufHdr);
2137 
2138  buf_state = old_buf_state;
2139 
2140  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
2141  buf_state |= BM_DIRTY | BM_JUST_DIRTIED;
2142 
2143  if (pg_atomic_compare_exchange_u32(&bufHdr->state, &old_buf_state,
2144  buf_state))
2145  break;
2146  }
2147 
2148  /*
2149  * If the buffer was not dirty already, do vacuum accounting.
2150  */
2151  if (!(old_buf_state & BM_DIRTY))
2152  {
2153  VacuumPageDirty++;
2155  if (VacuumCostActive)
2157  }
2158 }
2159 
2160 /*
2161  * ReleaseAndReadBuffer -- combine ReleaseBuffer() and ReadBuffer()
2162  *
2163  * Formerly, this saved one cycle of acquiring/releasing the BufMgrLock
2164  * compared to calling the two routines separately. Now it's mainly just
2165  * a convenience function. However, if the passed buffer is valid and
2166  * already contains the desired block, we just return it as-is; and that
2167  * does save considerable work compared to a full release and reacquire.
2168  *
2169  * Note: it is OK to pass buffer == InvalidBuffer, indicating that no old
2170  * buffer actually needs to be released. This case is the same as ReadBuffer,
2171  * but can save some tests in the caller.
2172  */
2173 Buffer
2175  Relation relation,
2176  BlockNumber blockNum)
2177 {
2178  ForkNumber forkNum = MAIN_FORKNUM;
2179  BufferDesc *bufHdr;
2180 
2181  if (BufferIsValid(buffer))
2182  {
2184  if (BufferIsLocal(buffer))
2185  {
2186  bufHdr = GetLocalBufferDescriptor(-buffer - 1);
2187  if (bufHdr->tag.blockNum == blockNum &&
2188  BufTagMatchesRelFileLocator(&bufHdr->tag, &relation->rd_locator) &&
2189  BufTagGetForkNum(&bufHdr->tag) == forkNum)
2190  return buffer;
2192  }
2193  else
2194  {
2195  bufHdr = GetBufferDescriptor(buffer - 1);
2196  /* we have pin, so it's ok to examine tag without spinlock */
2197  if (bufHdr->tag.blockNum == blockNum &&
2198  BufTagMatchesRelFileLocator(&bufHdr->tag, &relation->rd_locator) &&
2199  BufTagGetForkNum(&bufHdr->tag) == forkNum)
2200  return buffer;
2201  UnpinBuffer(bufHdr);
2202  }
2203  }
2204 
2205  return ReadBuffer(relation, blockNum);
2206 }
2207 
2208 /*
2209  * PinBuffer -- make buffer unavailable for replacement.
2210  *
2211  * For the default access strategy, the buffer's usage_count is incremented
2212  * when we first pin it; for other strategies we just make sure the usage_count
2213  * isn't zero. (The idea of the latter is that we don't want synchronized
2214  * heap scans to inflate the count, but we need it to not be zero to discourage
2215  * other backends from stealing buffers from our ring. As long as we cycle
2216  * through the ring faster than the global clock-sweep cycles, buffers in
2217  * our ring won't be chosen as victims for replacement by other backends.)
2218  *
2219  * This should be applied only to shared buffers, never local ones.
2220  *
2221  * Since buffers are pinned/unpinned very frequently, pin buffers without
2222  * taking the buffer header lock; instead update the state variable in loop of
2223  * CAS operations. Hopefully it's just a single CAS.
2224  *
2225  * Note that ResourceOwnerEnlargeBuffers must have been done already.
2226  *
2227  * Returns true if buffer is BM_VALID, else false. This provision allows
2228  * some callers to avoid an extra spinlock cycle.
2229  */
2230 static bool
2232 {
2234  bool result;
2235  PrivateRefCountEntry *ref;
2236 
2237  Assert(!BufferIsLocal(b));
2238 
2239  ref = GetPrivateRefCountEntry(b, true);
2240 
2241  if (ref == NULL)
2242  {
2243  uint32 buf_state;
2244  uint32 old_buf_state;
2245 
2247  ref = NewPrivateRefCountEntry(b);
2248 
2249  old_buf_state = pg_atomic_read_u32(&buf->state);
2250  for (;;)
2251  {
2252  if (old_buf_state & BM_LOCKED)
2253  old_buf_state = WaitBufHdrUnlocked(buf);
2254 
2255  buf_state = old_buf_state;
2256 
2257  /* increase refcount */
2258  buf_state += BUF_REFCOUNT_ONE;
2259 
2260  if (strategy == NULL)
2261  {
2262  /* Default case: increase usagecount unless already max. */
2264  buf_state += BUF_USAGECOUNT_ONE;
2265  }
2266  else
2267  {
2268  /*
2269  * Ring buffers shouldn't evict others from pool. Thus we
2270  * don't make usagecount more than 1.
2271  */
2272  if (BUF_STATE_GET_USAGECOUNT(buf_state) == 0)
2273  buf_state += BUF_USAGECOUNT_ONE;
2274  }
2275 
2276  if (pg_atomic_compare_exchange_u32(&buf->state, &old_buf_state,
2277  buf_state))
2278  {
2279  result = (buf_state & BM_VALID) != 0;
2280 
2281  /*
2282  * Assume that we acquired a buffer pin for the purposes of
2283  * Valgrind buffer client checks (even in !result case) to
2284  * keep things simple. Buffers that are unsafe to access are
2285  * not generally guaranteed to be marked undefined or
2286  * non-accessible in any case.
2287  */
2289  break;
2290  }
2291  }
2292  }
2293  else
2294  {
2295  /*
2296  * If we previously pinned the buffer, it must surely be valid.
2297  *
2298  * Note: We deliberately avoid a Valgrind client request here.
2299  * Individual access methods can optionally superimpose buffer page
2300  * client requests on top of our client requests to enforce that
2301  * buffers are only accessed while locked (and pinned). It's possible
2302  * that the buffer page is legitimately non-accessible here. We
2303  * cannot meddle with that.
2304  */
2305  result = true;
2306  }
2307 
2308  ref->refcount++;
2309  Assert(ref->refcount > 0);
2311  return result;
2312 }
2313 
2314 /*
2315  * PinBuffer_Locked -- as above, but caller already locked the buffer header.
2316  * The spinlock is released before return.
2317  *
2318  * As this function is called with the spinlock held, the caller has to
2319  * previously call ReservePrivateRefCountEntry().
2320  *
2321  * Currently, no callers of this function want to modify the buffer's
2322  * usage_count at all, so there's no need for a strategy parameter.
2323  * Also we don't bother with a BM_VALID test (the caller could check that for
2324  * itself).
2325  *
2326  * Also all callers only ever use this function when it's known that the
2327  * buffer can't have a preexisting pin by this backend. That allows us to skip
2328  * searching the private refcount array & hash, which is a boon, because the
2329  * spinlock is still held.
2330  *
2331  * Note: use of this routine is frequently mandatory, not just an optimization
2332  * to save a spin lock/unlock cycle, because we need to pin a buffer before
2333  * its state can change under us.
2334  */
2335 static void
2337 {
2338  Buffer b;
2339  PrivateRefCountEntry *ref;
2340  uint32 buf_state;
2341 
2342  /*
2343  * As explained, We don't expect any preexisting pins. That allows us to
2344  * manipulate the PrivateRefCount after releasing the spinlock
2345  */
2347 
2348  /*
2349  * Buffer can't have a preexisting pin, so mark its page as defined to
2350  * Valgrind (this is similar to the PinBuffer() case where the backend
2351  * doesn't already have a buffer pin)
2352  */
2354 
2355  /*
2356  * Since we hold the buffer spinlock, we can update the buffer state and
2357  * release the lock in one operation.
2358  */
2359  buf_state = pg_atomic_read_u32(&buf->state);
2360  Assert(buf_state & BM_LOCKED);
2361  buf_state += BUF_REFCOUNT_ONE;
2362  UnlockBufHdr(buf, buf_state);
2363 
2365 
2366  ref = NewPrivateRefCountEntry(b);
2367  ref->refcount++;
2368 
2370 }
2371 
2372 /*
2373  * UnpinBuffer -- make buffer available for replacement.
2374  *
2375  * This should be applied only to shared buffers, never local ones. This
2376  * always adjusts CurrentResourceOwner.
2377  */
2378 static void
2380 {
2381  PrivateRefCountEntry *ref;
2383 
2384  Assert(!BufferIsLocal(b));
2385 
2386  /* not moving as we're likely deleting it soon anyway */
2387  ref = GetPrivateRefCountEntry(b, false);
2388  Assert(ref != NULL);
2389 
2391 
2392  Assert(ref->refcount > 0);
2393  ref->refcount--;
2394  if (ref->refcount == 0)
2395  {
2396  uint32 buf_state;
2397  uint32 old_buf_state;
2398 
2399  /*
2400  * Mark buffer non-accessible to Valgrind.
2401  *
2402  * Note that the buffer may have already been marked non-accessible
2403  * within access method code that enforces that buffers are only
2404  * accessed while a buffer lock is held.
2405  */
2407 
2408  /* I'd better not still hold the buffer content lock */
2410 
2411  /*
2412  * Decrement the shared reference count.
2413  *
2414  * Since buffer spinlock holder can update status using just write,
2415  * it's not safe to use atomic decrement here; thus use a CAS loop.
2416  */
2417  old_buf_state = pg_atomic_read_u32(&buf->state);
2418  for (;;)
2419  {
2420  if (old_buf_state & BM_LOCKED)
2421  old_buf_state = WaitBufHdrUnlocked(buf);
2422 
2423  buf_state = old_buf_state;
2424 
2425  buf_state -= BUF_REFCOUNT_ONE;
2426 
2427  if (pg_atomic_compare_exchange_u32(&buf->state, &old_buf_state,
2428  buf_state))
2429  break;
2430  }
2431 
2432  /* Support LockBufferForCleanup() */
2433  if (buf_state & BM_PIN_COUNT_WAITER)
2434  {
2435  /*
2436  * Acquire the buffer header lock, re-check that there's a waiter.
2437  * Another backend could have unpinned this buffer, and already
2438  * woken up the waiter. There's no danger of the buffer being
2439  * replaced after we unpinned it above, as it's pinned by the
2440  * waiter.
2441  */
2442  buf_state = LockBufHdr(buf);
2443 
2444  if ((buf_state & BM_PIN_COUNT_WAITER) &&
2445  BUF_STATE_GET_REFCOUNT(buf_state) == 1)
2446  {
2447  /* we just released the last pin other than the waiter's */
2448  int wait_backend_pgprocno = buf->wait_backend_pgprocno;
2449 
2450  buf_state &= ~BM_PIN_COUNT_WAITER;
2451  UnlockBufHdr(buf, buf_state);
2452  ProcSendSignal(wait_backend_pgprocno);
2453  }
2454  else
2455  UnlockBufHdr(buf, buf_state);
2456  }
2458  }
2459 }
2460 
2461 #define ST_SORT sort_checkpoint_bufferids
2462 #define ST_ELEMENT_TYPE CkptSortItem
2463 #define ST_COMPARE(a, b) ckpt_buforder_comparator(a, b)
2464 #define ST_SCOPE static
2465 #define ST_DEFINE
2466 #include <lib/sort_template.h>
2467 
2468 /*
2469  * BufferSync -- Write out all dirty buffers in the pool.
2470  *
2471  * This is called at checkpoint time to write out all dirty shared buffers.
2472  * The checkpoint request flags should be passed in. If CHECKPOINT_IMMEDIATE
2473  * is set, we disable delays between writes; if CHECKPOINT_IS_SHUTDOWN,
2474  * CHECKPOINT_END_OF_RECOVERY or CHECKPOINT_FLUSH_ALL is set, we write even
2475  * unlogged buffers, which are otherwise skipped. The remaining flags
2476  * currently have no effect here.
2477  */
2478 static void
2479 BufferSync(int flags)
2480 {
2481  uint32 buf_state;
2482  int buf_id;
2483  int num_to_scan;
2484  int num_spaces;
2485  int num_processed;
2486  int num_written;
2487  CkptTsStatus *per_ts_stat = NULL;
2488  Oid last_tsid;
2489  binaryheap *ts_heap;
2490  int i;
2491  int mask = BM_DIRTY;
2492  WritebackContext wb_context;
2493 
2494  /* Make sure we can handle the pin inside SyncOneBuffer */
2496 
2497  /*
2498  * Unless this is a shutdown checkpoint or we have been explicitly told,
2499  * we write only permanent, dirty buffers. But at shutdown or end of
2500  * recovery, we write all dirty buffers.
2501  */
2504  mask |= BM_PERMANENT;
2505 
2506  /*
2507  * Loop over all buffers, and mark the ones that need to be written with
2508  * BM_CHECKPOINT_NEEDED. Count them as we go (num_to_scan), so that we
2509  * can estimate how much work needs to be done.
2510  *
2511  * This allows us to write only those pages that were dirty when the
2512  * checkpoint began, and not those that get dirtied while it proceeds.
2513  * Whenever a page with BM_CHECKPOINT_NEEDED is written out, either by us
2514  * later in this function, or by normal backends or the bgwriter cleaning
2515  * scan, the flag is cleared. Any buffer dirtied after this point won't
2516  * have the flag set.
2517  *
2518  * Note that if we fail to write some buffer, we may leave buffers with
2519  * BM_CHECKPOINT_NEEDED still set. This is OK since any such buffer would
2520  * certainly need to be written for the next checkpoint attempt, too.
2521  */
2522  num_to_scan = 0;
2523  for (buf_id = 0; buf_id < NBuffers; buf_id++)
2524  {
2525  BufferDesc *bufHdr = GetBufferDescriptor(buf_id);
2526 
2527  /*
2528  * Header spinlock is enough to examine BM_DIRTY, see comment in
2529  * SyncOneBuffer.
2530  */
2531  buf_state = LockBufHdr(bufHdr);
2532 
2533  if ((buf_state & mask) == mask)
2534  {
2535  CkptSortItem *item;
2536 
2537  buf_state |= BM_CHECKPOINT_NEEDED;
2538 
2539  item = &CkptBufferIds[num_to_scan++];
2540  item->buf_id = buf_id;
2541  item->tsId = bufHdr->tag.spcOid;
2542  item->relNumber = BufTagGetRelNumber(&bufHdr->tag);
2543  item->forkNum = BufTagGetForkNum(&bufHdr->tag);
2544  item->blockNum = bufHdr->tag.blockNum;
2545  }
2546 
2547  UnlockBufHdr(bufHdr, buf_state);
2548 
2549  /* Check for barrier events in case NBuffers is large. */
2552  }
2553 
2554  if (num_to_scan == 0)
2555  return; /* nothing to do */
2556 
2558 
2559  TRACE_POSTGRESQL_BUFFER_SYNC_START(NBuffers, num_to_scan);
2560 
2561  /*
2562  * Sort buffers that need to be written to reduce the likelihood of random
2563  * IO. The sorting is also important for the implementation of balancing
2564  * writes between tablespaces. Without balancing writes we'd potentially
2565  * end up writing to the tablespaces one-by-one; possibly overloading the
2566  * underlying system.
2567  */
2568  sort_checkpoint_bufferids(CkptBufferIds, num_to_scan);
2569 
2570  num_spaces = 0;
2571 
2572  /*
2573  * Allocate progress status for each tablespace with buffers that need to
2574  * be flushed. This requires the to-be-flushed array to be sorted.
2575  */
2576  last_tsid = InvalidOid;
2577  for (i = 0; i < num_to_scan; i++)
2578  {
2579  CkptTsStatus *s;
2580  Oid cur_tsid;
2581 
2582  cur_tsid = CkptBufferIds[i].tsId;
2583 
2584  /*
2585  * Grow array of per-tablespace status structs, every time a new
2586  * tablespace is found.
2587  */
2588  if (last_tsid == InvalidOid || last_tsid != cur_tsid)
2589  {
2590  Size sz;
2591 
2592  num_spaces++;
2593 
2594  /*
2595  * Not worth adding grow-by-power-of-2 logic here - even with a
2596  * few hundred tablespaces this should be fine.
2597  */
2598  sz = sizeof(CkptTsStatus) * num_spaces;
2599 
2600  if (per_ts_stat == NULL)
2601  per_ts_stat = (CkptTsStatus *) palloc(sz);
2602  else
2603  per_ts_stat = (CkptTsStatus *) repalloc(per_ts_stat, sz);
2604 
2605  s = &per_ts_stat[num_spaces - 1];
2606  memset(s, 0, sizeof(*s));
2607  s->tsId = cur_tsid;
2608 
2609  /*
2610  * The first buffer in this tablespace. As CkptBufferIds is sorted
2611  * by tablespace all (s->num_to_scan) buffers in this tablespace
2612  * will follow afterwards.
2613  */
2614  s->index = i;
2615 
2616  /*
2617  * progress_slice will be determined once we know how many buffers
2618  * are in each tablespace, i.e. after this loop.
2619  */
2620 
2621  last_tsid = cur_tsid;
2622  }
2623  else
2624  {
2625  s = &per_ts_stat[num_spaces - 1];
2626  }
2627 
2628  s->num_to_scan++;
2629 
2630  /* Check for barrier events. */
2633  }
2634 
2635  Assert(num_spaces > 0);
2636 
2637  /*
2638  * Build a min-heap over the write-progress in the individual tablespaces,
2639  * and compute how large a portion of the total progress a single
2640  * processed buffer is.
2641  */
2642  ts_heap = binaryheap_allocate(num_spaces,
2644  NULL);
2645 
2646  for (i = 0; i < num_spaces; i++)
2647  {
2648  CkptTsStatus *ts_stat = &per_ts_stat[i];
2649 
2650  ts_stat->progress_slice = (float8) num_to_scan / ts_stat->num_to_scan;
2651 
2652  binaryheap_add_unordered(ts_heap, PointerGetDatum(ts_stat));
2653  }
2654 
2655  binaryheap_build(ts_heap);
2656 
2657  /*
2658  * Iterate through to-be-checkpointed buffers and write the ones (still)
2659  * marked with BM_CHECKPOINT_NEEDED. The writes are balanced between
2660  * tablespaces; otherwise the sorting would lead to only one tablespace
2661  * receiving writes at a time, making inefficient use of the hardware.
2662  */
2663  num_processed = 0;
2664  num_written = 0;
2665  while (!binaryheap_empty(ts_heap))
2666  {
2667  BufferDesc *bufHdr = NULL;
2668  CkptTsStatus *ts_stat = (CkptTsStatus *)
2670 
2671  buf_id = CkptBufferIds[ts_stat->index].buf_id;
2672  Assert(buf_id != -1);
2673 
2674  bufHdr = GetBufferDescriptor(buf_id);
2675 
2676  num_processed++;
2677 
2678  /*
2679  * We don't need to acquire the lock here, because we're only looking
2680  * at a single bit. It's possible that someone else writes the buffer
2681  * and clears the flag right after we check, but that doesn't matter
2682  * since SyncOneBuffer will then do nothing. However, there is a
2683  * further race condition: it's conceivable that between the time we
2684  * examine the bit here and the time SyncOneBuffer acquires the lock,
2685  * someone else not only wrote the buffer but replaced it with another
2686  * page and dirtied it. In that improbable case, SyncOneBuffer will
2687  * write the buffer though we didn't need to. It doesn't seem worth
2688  * guarding against this, though.
2689  */
2691  {
2692  if (SyncOneBuffer(buf_id, false, &wb_context) & BUF_WRITTEN)
2693  {
2694  TRACE_POSTGRESQL_BUFFER_SYNC_WRITTEN(buf_id);
2696  num_written++;
2697  }
2698  }
2699 
2700  /*
2701  * Measure progress independent of actually having to flush the buffer
2702  * - otherwise writing become unbalanced.
2703  */
2704  ts_stat->progress += ts_stat->progress_slice;
2705  ts_stat->num_scanned++;
2706  ts_stat->index++;
2707 
2708  /* Have all the buffers from the tablespace been processed? */
2709  if (ts_stat->num_scanned == ts_stat->num_to_scan)
2710  {
2711  binaryheap_remove_first(ts_heap);
2712  }
2713  else
2714  {
2715  /* update heap with the new progress */
2716  binaryheap_replace_first(ts_heap, PointerGetDatum(ts_stat));
2717  }
2718 
2719  /*
2720  * Sleep to throttle our I/O rate.
2721  *
2722  * (This will check for barrier events even if it doesn't sleep.)
2723  */
2724  CheckpointWriteDelay(flags, (double) num_processed / num_to_scan);
2725  }
2726 
2727  /*
2728  * Issue all pending flushes. Only checkpointer calls BufferSync(), so
2729  * IOContext will always be IOCONTEXT_NORMAL.
2730  */
2732 
2733  pfree(per_ts_stat);
2734  per_ts_stat = NULL;
2735  binaryheap_free(ts_heap);
2736 
2737  /*
2738  * Update checkpoint statistics. As noted above, this doesn't include
2739  * buffers written by other backends or bgwriter scan.
2740  */
2741  CheckpointStats.ckpt_bufs_written += num_written;
2742 
2743  TRACE_POSTGRESQL_BUFFER_SYNC_DONE(NBuffers, num_written, num_to_scan);
2744 }
2745 
2746 /*
2747  * BgBufferSync -- Write out some dirty buffers in the pool.
2748  *
2749  * This is called periodically by the background writer process.
2750  *
2751  * Returns true if it's appropriate for the bgwriter process to go into
2752  * low-power hibernation mode. (This happens if the strategy clock sweep
2753  * has been "lapped" and no buffer allocations have occurred recently,
2754  * or if the bgwriter has been effectively disabled by setting
2755  * bgwriter_lru_maxpages to 0.)
2756  */
2757 bool
2759 {
2760  /* info obtained from freelist.c */
2761  int strategy_buf_id;
2762  uint32 strategy_passes;
2763  uint32 recent_alloc;
2764 
2765  /*
2766  * Information saved between calls so we can determine the strategy
2767  * point's advance rate and avoid scanning already-cleaned buffers.
2768  */
2769  static bool saved_info_valid = false;
2770  static int prev_strategy_buf_id;
2771  static uint32 prev_strategy_passes;
2772  static int next_to_clean;
2773  static uint32 next_passes;
2774 
2775  /* Moving averages of allocation rate and clean-buffer density */
2776  static float smoothed_alloc = 0;
2777  static float smoothed_density = 10.0;
2778 
2779  /* Potentially these could be tunables, but for now, not */
2780  float smoothing_samples = 16;
2781  float scan_whole_pool_milliseconds = 120000.0;
2782 
2783  /* Used to compute how far we scan ahead */
2784  long strategy_delta;
2785  int bufs_to_lap;
2786  int bufs_ahead;
2787  float scans_per_alloc;
2788  int reusable_buffers_est;
2789  int upcoming_alloc_est;
2790  int min_scan_buffers;
2791 
2792  /* Variables for the scanning loop proper */
2793  int num_to_scan;
2794  int num_written;
2795  int reusable_buffers;
2796 
2797  /* Variables for final smoothed_density update */
2798  long new_strategy_delta;
2799  uint32 new_recent_alloc;
2800 
2801  /*
2802  * Find out where the freelist clock sweep currently is, and how many
2803  * buffer allocations have happened since our last call.
2804  */
2805  strategy_buf_id = StrategySyncStart(&strategy_passes, &recent_alloc);
2806 
2807  /* Report buffer alloc counts to pgstat */
2808  PendingBgWriterStats.buf_alloc += recent_alloc;
2809 
2810  /*
2811  * If we're not running the LRU scan, just stop after doing the stats
2812  * stuff. We mark the saved state invalid so that we can recover sanely
2813  * if LRU scan is turned back on later.
2814  */
2815  if (bgwriter_lru_maxpages <= 0)
2816  {
2817  saved_info_valid = false;
2818  return true;
2819  }
2820 
2821  /*
2822  * Compute strategy_delta = how many buffers have been scanned by the
2823  * clock sweep since last time. If first time through, assume none. Then
2824  * see if we are still ahead of the clock sweep, and if so, how many
2825  * buffers we could scan before we'd catch up with it and "lap" it. Note:
2826  * weird-looking coding of xxx_passes comparisons are to avoid bogus
2827  * behavior when the passes counts wrap around.
2828  */
2829  if (saved_info_valid)
2830  {
2831  int32 passes_delta = strategy_passes - prev_strategy_passes;
2832 
2833  strategy_delta = strategy_buf_id - prev_strategy_buf_id;
2834  strategy_delta += (long) passes_delta * NBuffers;
2835 
2836  Assert(strategy_delta >= 0);
2837 
2838  if ((int32) (next_passes - strategy_passes) > 0)
2839  {
2840  /* we're one pass ahead of the strategy point */
2841  bufs_to_lap = strategy_buf_id - next_to_clean;
2842 #ifdef BGW_DEBUG
2843  elog(DEBUG2, "bgwriter ahead: bgw %u-%u strategy %u-%u delta=%ld lap=%d",
2844  next_passes, next_to_clean,
2845  strategy_passes, strategy_buf_id,
2846  strategy_delta, bufs_to_lap);
2847 #endif
2848  }
2849  else if (next_passes == strategy_passes &&
2850  next_to_clean >= strategy_buf_id)
2851  {
2852  /* on same pass, but ahead or at least not behind */
2853  bufs_to_lap = NBuffers - (next_to_clean - strategy_buf_id);
2854 #ifdef BGW_DEBUG
2855  elog(DEBUG2, "bgwriter ahead: bgw %u-%u strategy %u-%u delta=%ld lap=%d",
2856  next_passes, next_to_clean,
2857  strategy_passes, strategy_buf_id,
2858  strategy_delta, bufs_to_lap);
2859 #endif
2860  }
2861  else
2862  {
2863  /*
2864  * We're behind, so skip forward to the strategy point and start
2865  * cleaning from there.
2866  */
2867 #ifdef BGW_DEBUG
2868  elog(DEBUG2, "bgwriter behind: bgw %u-%u strategy %u-%u delta=%ld",
2869  next_passes, next_to_clean,
2870  strategy_passes, strategy_buf_id,
2871  strategy_delta);
2872 #endif
2873  next_to_clean = strategy_buf_id;
2874  next_passes = strategy_passes;
2875  bufs_to_lap = NBuffers;
2876  }
2877  }
2878  else
2879  {
2880  /*
2881  * Initializing at startup or after LRU scanning had been off. Always
2882  * start at the strategy point.
2883  */
2884 #ifdef BGW_DEBUG
2885  elog(DEBUG2, "bgwriter initializing: strategy %u-%u",
2886  strategy_passes, strategy_buf_id);
2887 #endif
2888  strategy_delta = 0;
2889  next_to_clean = strategy_buf_id;
2890  next_passes = strategy_passes;
2891  bufs_to_lap = NBuffers;
2892  }
2893 
2894  /* Update saved info for next time */
2895  prev_strategy_buf_id = strategy_buf_id;
2896  prev_strategy_passes = strategy_passes;
2897  saved_info_valid = true;
2898 
2899  /*
2900  * Compute how many buffers had to be scanned for each new allocation, ie,
2901  * 1/density of reusable buffers, and track a moving average of that.
2902  *
2903  * If the strategy point didn't move, we don't update the density estimate
2904  */
2905  if (strategy_delta > 0 && recent_alloc > 0)
2906  {
2907  scans_per_alloc = (float) strategy_delta / (float) recent_alloc;
2908  smoothed_density += (scans_per_alloc - smoothed_density) /
2909  smoothing_samples;
2910  }
2911 
2912  /*
2913  * Estimate how many reusable buffers there are between the current
2914  * strategy point and where we've scanned ahead to, based on the smoothed
2915  * density estimate.
2916  */
2917  bufs_ahead = NBuffers - bufs_to_lap;
2918  reusable_buffers_est = (float) bufs_ahead / smoothed_density;
2919 
2920  /*
2921  * Track a moving average of recent buffer allocations. Here, rather than
2922  * a true average we want a fast-attack, slow-decline behavior: we
2923  * immediately follow any increase.
2924  */
2925  if (smoothed_alloc <= (float) recent_alloc)
2926  smoothed_alloc = recent_alloc;
2927  else
2928  smoothed_alloc += ((float) recent_alloc - smoothed_alloc) /
2929  smoothing_samples;
2930 
2931  /* Scale the estimate by a GUC to allow more aggressive tuning. */
2932  upcoming_alloc_est = (int) (smoothed_alloc * bgwriter_lru_multiplier);
2933 
2934  /*
2935  * If recent_alloc remains at zero for many cycles, smoothed_alloc will
2936  * eventually underflow to zero, and the underflows produce annoying
2937  * kernel warnings on some platforms. Once upcoming_alloc_est has gone to
2938  * zero, there's no point in tracking smaller and smaller values of
2939  * smoothed_alloc, so just reset it to exactly zero to avoid this
2940  * syndrome. It will pop back up as soon as recent_alloc increases.
2941  */
2942  if (upcoming_alloc_est == 0)
2943  smoothed_alloc = 0;
2944 
2945  /*
2946  * Even in cases where there's been little or no buffer allocation
2947  * activity, we want to make a small amount of progress through the buffer
2948  * cache so that as many reusable buffers as possible are clean after an
2949  * idle period.
2950  *
2951  * (scan_whole_pool_milliseconds / BgWriterDelay) computes how many times
2952  * the BGW will be called during the scan_whole_pool time; slice the
2953  * buffer pool into that many sections.
2954  */
2955  min_scan_buffers = (int) (NBuffers / (scan_whole_pool_milliseconds / BgWriterDelay));
2956 
2957  if (upcoming_alloc_est < (min_scan_buffers + reusable_buffers_est))
2958  {
2959 #ifdef BGW_DEBUG
2960  elog(DEBUG2, "bgwriter: alloc_est=%d too small, using min=%d + reusable_est=%d",
2961  upcoming_alloc_est, min_scan_buffers, reusable_buffers_est);
2962 #endif
2963  upcoming_alloc_est = min_scan_buffers + reusable_buffers_est;
2964  }
2965 
2966  /*
2967  * Now write out dirty reusable buffers, working forward from the
2968  * next_to_clean point, until we have lapped the strategy scan, or cleaned
2969  * enough buffers to match our estimate of the next cycle's allocation
2970  * requirements, or hit the bgwriter_lru_maxpages limit.
2971  */
2972 
2973  /* Make sure we can handle the pin inside SyncOneBuffer */
2975 
2976  num_to_scan = bufs_to_lap;
2977  num_written = 0;
2978  reusable_buffers = reusable_buffers_est;
2979 
2980  /* Execute the LRU scan */
2981  while (num_to_scan > 0 && reusable_buffers < upcoming_alloc_est)
2982  {
2983  int sync_state = SyncOneBuffer(next_to_clean, true,
2984  wb_context);
2985 
2986  if (++next_to_clean >= NBuffers)
2987  {
2988  next_to_clean = 0;
2989  next_passes++;
2990  }
2991  num_to_scan--;
2992 
2993  if (sync_state & BUF_WRITTEN)
2994  {
2995  reusable_buffers++;
2996  if (++num_written >= bgwriter_lru_maxpages)
2997  {
2999  break;
3000  }
3001  }
3002  else if (sync_state & BUF_REUSABLE)
3003  reusable_buffers++;
3004  }
3005 
3006  PendingBgWriterStats.buf_written_clean += num_written;
3007 
3008 #ifdef BGW_DEBUG
3009  elog(DEBUG1, "bgwriter: recent_alloc=%u smoothed=%.2f delta=%ld ahead=%d density=%.2f reusable_est=%d upcoming_est=%d scanned=%d wrote=%d reusable=%d",
3010  recent_alloc, smoothed_alloc, strategy_delta, bufs_ahead,
3011  smoothed_density, reusable_buffers_est, upcoming_alloc_est,
3012  bufs_to_lap - num_to_scan,
3013  num_written,
3014  reusable_buffers - reusable_buffers_est);
3015 #endif
3016 
3017  /*
3018  * Consider the above scan as being like a new allocation scan.
3019  * Characterize its density and update the smoothed one based on it. This
3020  * effectively halves the moving average period in cases where both the
3021  * strategy and the background writer are doing some useful scanning,
3022  * which is helpful because a long memory isn't as desirable on the
3023  * density estimates.
3024  */
3025  new_strategy_delta = bufs_to_lap - num_to_scan;
3026  new_recent_alloc = reusable_buffers - reusable_buffers_est;
3027  if (new_strategy_delta > 0 && new_recent_alloc > 0)
3028  {
3029  scans_per_alloc = (float) new_strategy_delta / (float) new_recent_alloc;
3030  smoothed_density += (scans_per_alloc - smoothed_density) /
3031  smoothing_samples;
3032 
3033 #ifdef BGW_DEBUG
3034  elog(DEBUG2, "bgwriter: cleaner density alloc=%u scan=%ld density=%.2f new smoothed=%.2f",
3035  new_recent_alloc, new_strategy_delta,
3036  scans_per_alloc, smoothed_density);
3037 #endif
3038  }
3039 
3040  /* Return true if OK to hibernate */
3041  return (bufs_to_lap == 0 && recent_alloc == 0);
3042 }
3043 
3044 /*
3045  * SyncOneBuffer -- process a single buffer during syncing.
3046  *
3047  * If skip_recently_used is true, we don't write currently-pinned buffers, nor
3048  * buffers marked recently used, as these are not replacement candidates.
3049  *
3050  * Returns a bitmask containing the following flag bits:
3051  * BUF_WRITTEN: we wrote the buffer.
3052  * BUF_REUSABLE: buffer is available for replacement, ie, it has
3053  * pin count 0 and usage count 0.
3054  *
3055  * (BUF_WRITTEN could be set in error if FlushBuffer finds the buffer clean
3056  * after locking it, but we don't care all that much.)
3057  *
3058  * Note: caller must have done ResourceOwnerEnlargeBuffers.
3059  */
3060 static int
3061 SyncOneBuffer(int buf_id, bool skip_recently_used, WritebackContext *wb_context)
3062 {
3063  BufferDesc *bufHdr = GetBufferDescriptor(buf_id);
3064  int result = 0;
3065  uint32 buf_state;
3066  BufferTag tag;
3067 
3069 
3070  /*
3071  * Check whether buffer needs writing.
3072  *
3073  * We can make this check without taking the buffer content lock so long
3074  * as we mark pages dirty in access methods *before* logging changes with
3075  * XLogInsert(): if someone marks the buffer dirty just after our check we
3076  * don't worry because our checkpoint.redo points before log record for
3077  * upcoming changes and so we are not required to write such dirty buffer.
3078  */
3079  buf_state = LockBufHdr(bufHdr);
3080 
3081  if (BUF_STATE_GET_REFCOUNT(buf_state) == 0 &&
3082  BUF_STATE_GET_USAGECOUNT(buf_state) == 0)
3083  {
3084  result |= BUF_REUSABLE;
3085  }
3086  else if (skip_recently_used)
3087  {
3088  /* Caller told us not to write recently-used buffers */
3089  UnlockBufHdr(bufHdr, buf_state);
3090  return result;
3091  }
3092 
3093  if (!(buf_state & BM_VALID) || !(buf_state & BM_DIRTY))
3094  {
3095  /* It's clean, so nothing to do */
3096  UnlockBufHdr(bufHdr, buf_state);
3097  return result;
3098  }
3099 
3100  /*
3101  * Pin it, share-lock it, write it. (FlushBuffer will do nothing if the
3102  * buffer is clean by the time we've locked it.)
3103  */
3104  PinBuffer_Locked(bufHdr);
3106 
3108 
3110 
3111  tag = bufHdr->tag;
3112 
3113  UnpinBuffer(bufHdr);
3114 
3115  /*
3116  * SyncOneBuffer() is only called by checkpointer and bgwriter, so
3117  * IOContext will always be IOCONTEXT_NORMAL.
3118  */
3120 
3121  return result | BUF_WRITTEN;
3122 }
3123 
3124 /*
3125  * AtEOXact_Buffers - clean up at end of transaction.
3126  *
3127  * As of PostgreSQL 8.0, buffer pins should get released by the
3128  * ResourceOwner mechanism. This routine is just a debugging
3129  * cross-check that no pins remain.
3130  */
3131 void
3132 AtEOXact_Buffers(bool isCommit)
3133 {
3135 
3136  AtEOXact_LocalBuffers(isCommit);
3137 
3139 }
3140 
3141 /*
3142  * Initialize access to shared buffer pool
3143  *
3144  * This is called during backend startup (whether standalone or under the
3145  * postmaster). It sets up for this backend's access to the already-existing
3146  * buffer pool.
3147  */
3148 void
3150 {
3151  HASHCTL hash_ctl;
3152 
3153  memset(&PrivateRefCountArray, 0, sizeof(PrivateRefCountArray));
3154 
3155  hash_ctl.keysize = sizeof(int32);
3156  hash_ctl.entrysize = sizeof(PrivateRefCountEntry);
3157 
3158  PrivateRefCountHash = hash_create("PrivateRefCount", 100, &hash_ctl,
3159  HASH_ELEM | HASH_BLOBS);
3160 
3161  /*
3162  * AtProcExit_Buffers needs LWLock access, and thereby has to be called at
3163  * the corresponding phase of backend shutdown.
3164  */
3165  Assert(MyProc != NULL);
3167 }
3168 
3169 /*
3170  * During backend exit, ensure that we released all shared-buffer locks and
3171  * assert that we have no remaining pins.
3172  */
3173 static void
3175 {
3176  UnlockBuffers();
3177 
3179 
3180  /* localbuf.c needs a chance too */
3182 }
3183 
3184 /*
3185  * CheckForBufferLeaks - ensure this backend holds no buffer pins
3186  *
3187  * As of PostgreSQL 8.0, buffer pins should get released by the
3188  * ResourceOwner mechanism. This routine is just a debugging
3189  * cross-check that no pins remain.
3190  */
3191 static void
3193 {
3194 #ifdef USE_ASSERT_CHECKING
3195  int RefCountErrors = 0;
3197  int i;
3198 
3199  /* check the array */
3200  for (i = 0; i < REFCOUNT_ARRAY_ENTRIES; i++)
3201  {
3203 
3204  if (res->buffer != InvalidBuffer)
3205  {
3206  PrintBufferLeakWarning(res->buffer);
3207  RefCountErrors++;
3208  }
3209  }
3210 
3211  /* if necessary search the hash */
3213  {
3214  HASH_SEQ_STATUS hstat;
3215 
3217  while ((res = (PrivateRefCountEntry *) hash_seq_search(&hstat)) != NULL)
3218  {
3219  PrintBufferLeakWarning(res->buffer);
3220  RefCountErrors++;
3221  }
3222  }
3223 
3224  Assert(RefCountErrors == 0);
3225 #endif
3226 }
3227 
3228 /*
3229  * Helper routine to issue warnings when a buffer is unexpectedly pinned
3230  */
3231 void
3233 {
3234  BufferDesc *buf;
3235  int32 loccount;
3236  char *path;
3237  BackendId backend;
3238  uint32 buf_state;
3239 
3241  if (BufferIsLocal(buffer))
3242  {
3244  loccount = LocalRefCount[-buffer - 1];
3245  backend = MyBackendId;
3246  }
3247  else
3248  {
3250  loccount = GetPrivateRefCount(buffer);
3251  backend = InvalidBackendId;
3252  }
3253 
3254  /* theoretically we should lock the bufhdr here */
3255  path = relpathbackend(BufTagGetRelFileLocator(&buf->tag), backend,
3256  BufTagGetForkNum(&buf->tag));
3257  buf_state = pg_atomic_read_u32(&buf->state);
3258  elog(WARNING,
3259  "buffer refcount leak: [%03d] "
3260  "(rel=%s, blockNum=%u, flags=0x%x, refcount=%u %d)",
3261  buffer, path,
3262  buf->tag.blockNum, buf_state & BUF_FLAG_MASK,
3263  BUF_STATE_GET_REFCOUNT(buf_state), loccount);
3264  pfree(path);
3265 }
3266 
3267 /*
3268  * CheckPointBuffers
3269  *
3270  * Flush all dirty blocks in buffer pool to disk at checkpoint time.
3271  *
3272  * Note: temporary relations do not participate in checkpoints, so they don't
3273  * need to be flushed.
3274  */
3275 void
3277 {
3278  BufferSync(flags);
3279 }
3280 
3281 /*
3282  * BufferGetBlockNumber
3283  * Returns the block number associated with a buffer.
3284  *
3285  * Note:
3286  * Assumes that the buffer is valid and pinned, else the
3287  * value may be obsolete immediately...
3288  */
3291 {
3292  BufferDesc *bufHdr;
3293 
3295 
3296  if (BufferIsLocal(buffer))
3297  bufHdr = GetLocalBufferDescriptor(-buffer - 1);
3298  else
3299  bufHdr = GetBufferDescriptor(buffer - 1);
3300 
3301  /* pinned, so OK to read tag without spinlock */
3302  return bufHdr->tag.blockNum;
3303 }
3304 
3305 /*
3306  * BufferGetTag
3307  * Returns the relfilelocator, fork number and block number associated with
3308  * a buffer.
3309  */
3310 void
3312  BlockNumber *blknum)
3313 {
3314  BufferDesc *bufHdr;
3315 
3316  /* Do the same checks as BufferGetBlockNumber. */
3318 
3319  if (BufferIsLocal(buffer))
3320  bufHdr = GetLocalBufferDescriptor(-buffer - 1);
3321  else
3322  bufHdr = GetBufferDescriptor(buffer - 1);
3323 
3324  /* pinned, so OK to read tag without spinlock */
3325  *rlocator = BufTagGetRelFileLocator(&bufHdr->tag);
3326  *forknum = BufTagGetForkNum(&bufHdr->tag);
3327  *blknum = bufHdr->tag.blockNum;
3328 }
3329 
3330 /*
3331  * FlushBuffer
3332  * Physically write out a shared buffer.
3333  *
3334  * NOTE: this actually just passes the buffer contents to the kernel; the
3335  * real write to disk won't happen until the kernel feels like it. This
3336  * is okay from our point of view since we can redo the changes from WAL.
3337  * However, we will need to force the changes to disk via fsync before
3338  * we can checkpoint WAL.
3339  *
3340  * The caller must hold a pin on the buffer and have share-locked the
3341  * buffer contents. (Note: a share-lock does not prevent updates of
3342  * hint bits in the buffer, so the page could change while the write
3343  * is in progress, but we assume that that will not invalidate the data
3344  * written.)
3345  *
3346  * If the caller has an smgr reference for the buffer's relation, pass it
3347  * as the second parameter. If not, pass NULL.
3348  */
3349 static void
3351  IOContext io_context)
3352 {
3353  XLogRecPtr recptr;
3354  ErrorContextCallback errcallback;
3355  instr_time io_start;
3356  Block bufBlock;
3357  char *bufToWrite;
3358  uint32 buf_state;
3359 
3360  /*
3361  * Try to start an I/O operation. If StartBufferIO returns false, then
3362  * someone else flushed the buffer before we could, so we need not do
3363  * anything.
3364  */
3365  if (!StartBufferIO(buf, false))
3366  return;
3367 
3368  /* Setup error traceback support for ereport() */
3370  errcallback.arg = (void *) buf;
3371  errcallback.previous = error_context_stack;
3372  error_context_stack = &errcallback;
3373 
3374  /* Find smgr relation for buffer */
3375  if (reln == NULL)
3377 
3378  TRACE_POSTGRESQL_BUFFER_FLUSH_START(BufTagGetForkNum(&buf->tag),
3379  buf->tag.blockNum,
3381  reln->smgr_rlocator.locator.dbOid,
3383 
3384  buf_state = LockBufHdr(buf);
3385 
3386  /*
3387  * Run PageGetLSN while holding header lock, since we don't have the
3388  * buffer locked exclusively in all cases.
3389  */
3390  recptr = BufferGetLSN(buf);
3391 
3392  /* To check if block content changes while flushing. - vadim 01/17/97 */
3393  buf_state &= ~BM_JUST_DIRTIED;
3394  UnlockBufHdr(buf, buf_state);
3395 
3396  /*
3397  * Force XLOG flush up to buffer's LSN. This implements the basic WAL
3398  * rule that log updates must hit disk before any of the data-file changes
3399  * they describe do.
3400  *
3401  * However, this rule does not apply to unlogged relations, which will be
3402  * lost after a crash anyway. Most unlogged relation pages do not bear
3403  * LSNs since we never emit WAL records for them, and therefore flushing
3404  * up through the buffer LSN would be useless, but harmless. However,
3405  * GiST indexes use LSNs internally to track page-splits, and therefore
3406  * unlogged GiST pages bear "fake" LSNs generated by
3407  * GetFakeLSNForUnloggedRel. It is unlikely but possible that the fake
3408  * LSN counter could advance past the WAL insertion point; and if it did
3409  * happen, attempting to flush WAL through that location would fail, with
3410  * disastrous system-wide consequences. To make sure that can't happen,
3411  * skip the flush if the buffer isn't permanent.
3412  */
3413  if (buf_state & BM_PERMANENT)
3414  XLogFlush(recptr);
3415 
3416  /*
3417  * Now it's safe to write buffer to disk. Note that no one else should
3418  * have been able to write it while we were busy with log flushing because
3419  * only one process at a time can set the BM_IO_IN_PROGRESS bit.
3420  */
3421  bufBlock = BufHdrGetBlock(buf);
3422 
3423  /*
3424  * Update page checksum if desired. Since we have only shared lock on the
3425  * buffer, other processes might be updating hint bits in it, so we must
3426  * copy the page to private storage if we do checksumming.
3427  */
3428  bufToWrite = PageSetChecksumCopy((Page) bufBlock, buf->tag.blockNum);
3429 
3430  io_start = pgstat_prepare_io_time();
3431 
3432  /*
3433  * bufToWrite is either the shared buffer or a copy, as appropriate.
3434  */
3435  smgrwrite(reln,
3436  BufTagGetForkNum(&buf->tag),
3437  buf->tag.blockNum,
3438  bufToWrite,
3439  false);
3440 
3441  /*
3442  * When a strategy is in use, only flushes of dirty buffers already in the
3443  * strategy ring are counted as strategy writes (IOCONTEXT
3444  * [BULKREAD|BULKWRITE|VACUUM] IOOP_WRITE) for the purpose of IO
3445  * statistics tracking.
3446  *
3447  * If a shared buffer initially added to the ring must be flushed before
3448  * being used, this is counted as an IOCONTEXT_NORMAL IOOP_WRITE.
3449  *
3450  * If a shared buffer which was added to the ring later because the
3451  * current strategy buffer is pinned or in use or because all strategy
3452  * buffers were dirty and rejected (for BAS_BULKREAD operations only)
3453  * requires flushing, this is counted as an IOCONTEXT_NORMAL IOOP_WRITE
3454  * (from_ring will be false).
3455  *
3456  * When a strategy is not in use, the write can only be a "regular" write
3457  * of a dirty shared buffer (IOCONTEXT_NORMAL IOOP_WRITE).
3458  */
3460  IOOP_WRITE, io_start, 1);
3461 
3463 
3464  /*
3465  * Mark the buffer as clean (unless BM_JUST_DIRTIED has become set) and
3466  * end the BM_IO_IN_PROGRESS state.
3467  */
3468  TerminateBufferIO(buf, true, 0);
3469 
3470  TRACE_POSTGRESQL_BUFFER_FLUSH_DONE(BufTagGetForkNum(&buf->tag),
3471  buf->tag.blockNum,
3473  reln->smgr_rlocator.locator.dbOid,
3475 
3476  /* Pop the error context stack */
3477  error_context_stack = errcallback.previous;
3478 }
3479 
3480 /*
3481  * RelationGetNumberOfBlocksInFork
3482  * Determines the current number of pages in the specified relation fork.
3483  *
3484  * Note that the accuracy of the result will depend on the details of the
3485  * relation's storage. For builtin AMs it'll be accurate, but for external AMs
3486  * it might not be.
3487  */
3490 {
3491  if (RELKIND_HAS_TABLE_AM(relation->rd_rel->relkind))
3492  {
3493  /*
3494  * Not every table AM uses BLCKSZ wide fixed size blocks. Therefore
3495  * tableam returns the size in bytes - but for the purpose of this
3496  * routine, we want the number of blocks. Therefore divide, rounding
3497  * up.
3498  */
3499  uint64 szbytes;
3500 
3501  szbytes = table_relation_size(relation, forkNum);
3502 
3503  return (szbytes + (BLCKSZ - 1)) / BLCKSZ;
3504  }
3505  else if (RELKIND_HAS_STORAGE(relation->rd_rel->relkind))
3506  {
3507  return smgrnblocks(RelationGetSmgr(relation), forkNum);
3508  }
3509  else
3510  Assert(false);
3511 
3512  return 0; /* keep compiler quiet */
3513 }
3514 
3515 /*
3516  * BufferIsPermanent
3517  * Determines whether a buffer will potentially still be around after
3518  * a crash. Caller must hold a buffer pin.
3519  */
3520 bool
3522 {
3523  BufferDesc *bufHdr;
3524 
3525  /* Local buffers are used only for temp relations. */
3526  if (BufferIsLocal(buffer))
3527  return false;
3528 
3529  /* Make sure we've got a real buffer, and that we hold a pin on it. */
3532 
3533  /*
3534  * BM_PERMANENT can't be changed while we hold a pin on the buffer, so we
3535  * need not bother with the buffer header spinlock. Even if someone else
3536  * changes the buffer header state while we're doing this, the state is
3537  * changed atomically, so we'll read the old value or the new value, but
3538  * not random garbage.
3539  */
3540  bufHdr = GetBufferDescriptor(buffer - 1);
3541  return (pg_atomic_read_u32(&bufHdr->state) & BM_PERMANENT) != 0;
3542 }
3543 
3544 /*
3545  * BufferGetLSNAtomic
3546  * Retrieves the LSN of the buffer atomically using a buffer header lock.
3547  * This is necessary for some callers who may not have an exclusive lock
3548  * on the buffer.
3549  */
3550 XLogRecPtr
3552 {
3553  BufferDesc *bufHdr = GetBufferDescriptor(buffer - 1);
3554  char *page = BufferGetPage(buffer);
3555  XLogRecPtr lsn;
3556  uint32 buf_state;
3557 
3558  /*
3559  * If we don't need locking for correctness, fastpath out.
3560  */
3562  return PageGetLSN(page);
3563 
3564  /* Make sure we've got a real buffer, and that we hold a pin on it. */
3567 
3568  buf_state = LockBufHdr(bufHdr);
3569  lsn = PageGetLSN(page);
3570  UnlockBufHdr(bufHdr, buf_state);
3571 
3572  return lsn;
3573 }
3574 
3575 /* ---------------------------------------------------------------------
3576  * DropRelationBuffers
3577  *
3578  * This function removes from the buffer pool all the pages of the
3579  * specified relation forks that have block numbers >= firstDelBlock.
3580  * (In particular, with firstDelBlock = 0, all pages are removed.)
3581  * Dirty pages are simply dropped, without bothering to write them
3582  * out first. Therefore, this is NOT rollback-able, and so should be
3583  * used only with extreme caution!
3584  *
3585  * Currently, this is called only from smgr.c when the underlying file
3586  * is about to be deleted or truncated (firstDelBlock is needed for
3587  * the truncation case). The data in the affected pages would therefore
3588  * be deleted momentarily anyway, and there is no point in writing it.
3589  * It is the responsibility of higher-level code to ensure that the
3590  * deletion or truncation does not lose any data that could be needed
3591  * later. It is also the responsibility of higher-level code to ensure
3592  * that no other process could be trying to load more pages of the
3593  * relation into buffers.
3594  * --------------------------------------------------------------------
3595  */
3596 void
3598  int nforks, BlockNumber *firstDelBlock)
3599 {
3600  int i;
3601  int j;
3602  RelFileLocatorBackend rlocator;
3603  BlockNumber nForkBlock[MAX_FORKNUM];
3604  uint64 nBlocksToInvalidate = 0;
3605 
3606  rlocator = smgr_reln->smgr_rlocator;
3607 
3608  /* If it's a local relation, it's localbuf.c's problem. */
3609  if (RelFileLocatorBackendIsTemp(rlocator))
3610  {
3611  if (rlocator.backend == MyBackendId)
3612  {
3613  for (j = 0; j < nforks; j++)
3614  DropRelationLocalBuffers(rlocator.locator, forkNum[j],
3615  firstDelBlock[j]);
3616  }
3617  return;
3618  }
3619 
3620  /*
3621  * To remove all the pages of the specified relation forks from the buffer
3622  * pool, we need to scan the entire buffer pool but we can optimize it by
3623  * finding the buffers from BufMapping table provided we know the exact
3624  * size of each fork of the relation. The exact size is required to ensure
3625  * that we don't leave any buffer for the relation being dropped as
3626  * otherwise the background writer or checkpointer can lead to a PANIC
3627  * error while flushing buffers corresponding to files that don't exist.
3628  *
3629  * To know the exact size, we rely on the size cached for each fork by us
3630  * during recovery which limits the optimization to recovery and on
3631  * standbys but we can easily extend it once we have shared cache for
3632  * relation size.
3633  *
3634  * In recovery, we cache the value returned by the first lseek(SEEK_END)
3635  * and the future writes keeps the cached value up-to-date. See
3636  * smgrextend. It is possible that the value of the first lseek is smaller
3637  * than the actual number of existing blocks in the file due to buggy
3638  * Linux kernels that might not have accounted for the recent write. But
3639  * that should be fine because there must not be any buffers after that
3640  * file size.
3641  */
3642  for (i = 0; i < nforks; i++)
3643  {
3644  /* Get the number of blocks for a relation's fork */
3645  nForkBlock[i] = smgrnblocks_cached(smgr_reln, forkNum[i]);
3646 
3647  if (nForkBlock[i] == InvalidBlockNumber)
3648  {
3649  nBlocksToInvalidate = InvalidBlockNumber;
3650  break;
3651  }
3652 
3653  /* calculate the number of blocks to be invalidated */
3654  nBlocksToInvalidate += (nForkBlock[i] - firstDelBlock[i]);
3655  }
3656 
3657  /*
3658  * We apply the optimization iff the total number of blocks to invalidate
3659  * is below the BUF_DROP_FULL_SCAN_THRESHOLD.
3660  */
3661  if (BlockNumberIsValid(nBlocksToInvalidate) &&
3662  nBlocksToInvalidate < BUF_DROP_FULL_SCAN_THRESHOLD)
3663  {
3664  for (j = 0; j < nforks; j++)
3665  FindAndDropRelationBuffers(rlocator.locator, forkNum[j],
3666  nForkBlock[j], firstDelBlock[j]);
3667  return;
3668  }
3669 
3670  for (i = 0; i < NBuffers; i++)
3671  {
3672  BufferDesc *bufHdr = GetBufferDescriptor(i);
3673  uint32 buf_state;
3674 
3675  /*
3676  * We can make this a tad faster by prechecking the buffer tag before
3677  * we attempt to lock the buffer; this saves a lot of lock
3678  * acquisitions in typical cases. It should be safe because the
3679  * caller must have AccessExclusiveLock on the relation, or some other
3680  * reason to be certain that no one is loading new pages of the rel
3681  * into the buffer pool. (Otherwise we might well miss such pages
3682  * entirely.) Therefore, while the tag might be changing while we
3683  * look at it, it can't be changing *to* a value we care about, only
3684  * *away* from such a value. So false negatives are impossible, and
3685  * false positives are safe because we'll recheck after getting the
3686  * buffer lock.
3687  *
3688  * We could check forkNum and blockNum as well as the rlocator, but
3689  * the incremental win from doing so seems small.
3690  */
3691  if (!BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator.locator))
3692  continue;
3693 
3694  buf_state = LockBufHdr(bufHdr);
3695 
3696  for (j = 0; j < nforks; j++)
3697  {
3698  if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator.locator) &&
3699  BufTagGetForkNum(&bufHdr->tag) == forkNum[j] &&
3700  bufHdr->tag.blockNum >= firstDelBlock[j])
3701  {
3702  InvalidateBuffer(bufHdr); /* releases spinlock */
3703  break;
3704  }
3705  }
3706  if (j >= nforks)
3707  UnlockBufHdr(bufHdr, buf_state);
3708  }
3709 }
3710 
3711 /* ---------------------------------------------------------------------
3712  * DropRelationsAllBuffers
3713  *
3714  * This function removes from the buffer pool all the pages of all
3715  * forks of the specified relations. It's equivalent to calling
3716  * DropRelationBuffers once per fork per relation with firstDelBlock = 0.
3717  * --------------------------------------------------------------------
3718  */
3719 void
3720 DropRelationsAllBuffers(SMgrRelation *smgr_reln, int nlocators)
3721 {
3722  int i;
3723  int n = 0;
3724  SMgrRelation *rels;
3725  BlockNumber (*block)[MAX_FORKNUM + 1];
3726  uint64 nBlocksToInvalidate = 0;
3727  RelFileLocator *locators;
3728  bool cached = true;
3729  bool use_bsearch;
3730 
3731  if (nlocators == 0)
3732  return;
3733 
3734  rels = palloc(sizeof(SMgrRelation) * nlocators); /* non-local relations */
3735 
3736  /* If it's a local relation, it's localbuf.c's problem. */
3737  for (i = 0; i < nlocators; i++)
3738  {
3739  if (RelFileLocatorBackendIsTemp(smgr_reln[i]->smgr_rlocator))
3740  {
3741  if (smgr_reln[i]->smgr_rlocator.backend == MyBackendId)
3742  DropRelationAllLocalBuffers(smgr_reln[i]->smgr_rlocator.locator);
3743  }
3744  else
3745  rels[n++] = smgr_reln[i];
3746  }
3747 
3748  /*
3749  * If there are no non-local relations, then we're done. Release the
3750  * memory and return.
3751  */
3752  if (n == 0)
3753  {
3754  pfree(rels);
3755  return;
3756  }
3757 
3758  /*
3759  * This is used to remember the number of blocks for all the relations
3760  * forks.
3761  */
3762  block = (BlockNumber (*)[MAX_FORKNUM + 1])
3763  palloc(sizeof(BlockNumber) * n * (MAX_FORKNUM + 1));
3764 
3765  /*
3766  * We can avoid scanning the entire buffer pool if we know the exact size
3767  * of each of the given relation forks. See DropRelationBuffers.
3768  */
3769  for (i = 0; i < n && cached; i++)
3770  {
3771  for (int j = 0; j <= MAX_FORKNUM; j++)
3772  {
3773  /* Get the number of blocks for a relation's fork. */
3774  block[i][j] = smgrnblocks_cached(rels[i], j);
3775 
3776  /* We need to only consider the relation forks that exists. */
3777  if (block[i][j] == InvalidBlockNumber)
3778  {
3779  if (!smgrexists(rels[i], j))
3780  continue;
3781  cached = false;
3782  break;
3783  }
3784 
3785  /* calculate the total number of blocks to be invalidated */
3786  nBlocksToInvalidate += block[i][j];
3787  }
3788  }
3789 
3790  /*
3791  * We apply the optimization iff the total number of blocks to invalidate
3792  * is below the BUF_DROP_FULL_SCAN_THRESHOLD.
3793  */
3794  if (cached && nBlocksToInvalidate < BUF_DROP_FULL_SCAN_THRESHOLD)
3795  {
3796  for (i = 0; i < n; i++)
3797  {
3798  for (int j = 0; j <= MAX_FORKNUM; j++)
3799  {
3800  /* ignore relation forks that doesn't exist */
3801  if (!BlockNumberIsValid(block[i][j]))
3802  continue;
3803 
3804  /* drop all the buffers for a particular relation fork */
3805  FindAndDropRelationBuffers(rels[i]->smgr_rlocator.locator,
3806  j, block[i][j], 0);
3807  }
3808  }
3809 
3810  pfree(block);
3811  pfree(rels);
3812  return;
3813  }
3814 
3815  pfree(block);
3816  locators = palloc(sizeof(RelFileLocator) * n); /* non-local relations */
3817  for (i = 0; i < n; i++)
3818  locators[i] = rels[i]->smgr_rlocator.locator;
3819 
3820  /*
3821  * For low number of relations to drop just use a simple walk through, to
3822  * save the bsearch overhead. The threshold to use is rather a guess than
3823  * an exactly determined value, as it depends on many factors (CPU and RAM
3824  * speeds, amount of shared buffers etc.).
3825  */
3826  use_bsearch = n > RELS_BSEARCH_THRESHOLD;
3827 
3828  /* sort the list of rlocators if necessary */
3829  if (use_bsearch)
3830  pg_qsort(locators, n, sizeof(RelFileLocator), rlocator_comparator);
3831 
3832  for (i = 0; i < NBuffers; i++)
3833  {
3834  RelFileLocator *rlocator = NULL;
3835  BufferDesc *bufHdr = GetBufferDescriptor(i);
3836  uint32 buf_state;
3837 
3838  /*
3839  * As in DropRelationBuffers, an unlocked precheck should be safe and
3840  * saves some cycles.
3841  */
3842 
3843  if (!use_bsearch)
3844  {
3845  int j;
3846 
3847  for (j = 0; j < n; j++)
3848  {
3849  if (BufTagMatchesRelFileLocator(&bufHdr->tag, &locators[j]))
3850  {
3851  rlocator = &locators[j];
3852  break;
3853  }
3854  }
3855  }
3856  else
3857  {
3858  RelFileLocator locator;
3859 
3860  locator = BufTagGetRelFileLocator(&bufHdr->tag);
3861  rlocator = bsearch((const void *) &(locator),
3862  locators, n, sizeof(RelFileLocator),
3864  }
3865 
3866  /* buffer doesn't belong to any of the given relfilelocators; skip it */
3867  if (rlocator == NULL)
3868  continue;
3869 
3870  buf_state = LockBufHdr(bufHdr);
3871  if (BufTagMatchesRelFileLocator(&bufHdr->tag, rlocator))
3872  InvalidateBuffer(bufHdr); /* releases spinlock */
3873  else
3874  UnlockBufHdr(bufHdr, buf_state);
3875  }
3876 
3877  pfree(locators);
3878  pfree(rels);
3879 }
3880 
3881 /* ---------------------------------------------------------------------
3882  * FindAndDropRelationBuffers
3883  *
3884  * This function performs look up in BufMapping table and removes from the
3885  * buffer pool all the pages of the specified relation fork that has block
3886  * number >= firstDelBlock. (In particular, with firstDelBlock = 0, all
3887  * pages are removed.)
3888  * --------------------------------------------------------------------
3889  */
3890 static void
3892  BlockNumber nForkBlock,
3893  BlockNumber firstDelBlock)
3894 {
3895  BlockNumber curBlock;
3896 
3897  for (curBlock = firstDelBlock; curBlock < nForkBlock; curBlock++)
3898  {
3899  uint32 bufHash; /* hash value for tag */
3900  BufferTag bufTag; /* identity of requested block */
3901  LWLock *bufPartitionLock; /* buffer partition lock for it */
3902  int buf_id;
3903  BufferDesc *bufHdr;
3904  uint32 buf_state;
3905 
3906  /* create a tag so we can lookup the buffer */
3907  InitBufferTag(&bufTag, &rlocator, forkNum, curBlock);
3908 
3909  /* determine its hash code and partition lock ID */
3910  bufHash = BufTableHashCode(&bufTag);
3911  bufPartitionLock = BufMappingPartitionLock(bufHash);
3912 
3913  /* Check that it is in the buffer pool. If not, do nothing. */
3914  LWLockAcquire(bufPartitionLock, LW_SHARED);
3915  buf_id = BufTableLookup(&bufTag, bufHash);
3916  LWLockRelease(bufPartitionLock);
3917 
3918  if (buf_id < 0)
3919  continue;
3920 
3921  bufHdr = GetBufferDescriptor(buf_id);
3922 
3923  /*
3924  * We need to lock the buffer header and recheck if the buffer is
3925  * still associated with the same block because the buffer could be
3926  * evicted by some other backend loading blocks for a different
3927  * relation after we release lock on the BufMapping table.
3928  */
3929  buf_state = LockBufHdr(bufHdr);
3930 
3931  if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator) &&
3932  BufTagGetForkNum(&bufHdr->tag) == forkNum &&
3933  bufHdr->tag.blockNum >= firstDelBlock)
3934  InvalidateBuffer(bufHdr); /* releases spinlock */
3935  else
3936  UnlockBufHdr(bufHdr, buf_state);
3937  }
3938 }
3939 
3940 /* ---------------------------------------------------------------------
3941  * DropDatabaseBuffers
3942  *
3943  * This function removes all the buffers in the buffer cache for a
3944  * particular database. Dirty pages are simply dropped, without
3945  * bothering to write them out first. This is used when we destroy a
3946  * database, to avoid trying to flush data to disk when the directory
3947  * tree no longer exists. Implementation is pretty similar to
3948  * DropRelationBuffers() which is for destroying just one relation.
3949  * --------------------------------------------------------------------
3950  */
3951 void
3953 {
3954  int i;
3955 
3956  /*
3957  * We needn't consider local buffers, since by assumption the target
3958  * database isn't our own.
3959  */
3960 
3961  for (i = 0; i < NBuffers; i++)
3962  {
3963  BufferDesc *bufHdr = GetBufferDescriptor(i);
3964  uint32 buf_state;
3965 
3966  /*
3967  * As in DropRelationBuffers, an unlocked precheck should be safe and
3968  * saves some cycles.
3969  */
3970  if (bufHdr->tag.dbOid != dbid)
3971  continue;
3972 
3973  buf_state = LockBufHdr(bufHdr);
3974  if (bufHdr->tag.dbOid == dbid)
3975  InvalidateBuffer(bufHdr); /* releases spinlock */
3976  else
3977  UnlockBufHdr(bufHdr, buf_state);
3978  }
3979 }
3980 
3981 /* -----------------------------------------------------------------
3982  * PrintBufferDescs
3983  *
3984  * this function prints all the buffer descriptors, for debugging
3985  * use only.
3986  * -----------------------------------------------------------------
3987  */
3988 #ifdef NOT_USED
3989 void
3990 PrintBufferDescs(void)
3991 {
3992  int i;
3993 
3994  for (i = 0; i < NBuffers; ++i)
3995  {
3998 
3999  /* theoretically we should lock the bufhdr here */
4000  elog(LOG,
4001  "[%02d] (freeNext=%d, rel=%s, "
4002  "blockNum=%u, flags=0x%x, refcount=%u %d)",
4003  i, buf->freeNext,
4006  buf->tag.blockNum, buf->flags,
4007  buf->refcount, GetPrivateRefCount(b));
4008  }
4009 }
4010 #endif
4011 
4012 #ifdef NOT_USED
4013 void
4014 PrintPinnedBufs(void)
4015 {
4016  int i;
4017 
4018  for (i = 0; i < NBuffers; ++i)
4019  {
4022 
4023  if (GetPrivateRefCount(b) > 0)
4024  {
4025  /* theoretically we should lock the bufhdr here */
4026  elog(LOG,
4027  "[%02d] (freeNext=%d, rel=%s, "
4028  "blockNum=%u, flags=0x%x, refcount=%u %d)",
4029  i, buf->freeNext,
4031  BufTagGetForkNum(&buf->tag)),
4032  buf->tag.blockNum, buf->flags,
4033  buf->refcount, GetPrivateRefCount(b));
4034  }
4035  }
4036 }
4037 #endif
4038 
4039 /* ---------------------------------------------------------------------
4040  * FlushRelationBuffers
4041  *
4042  * This function writes all dirty pages of a relation out to disk
4043  * (or more accurately, out to kernel disk buffers), ensuring that the
4044  * kernel has an up-to-date view of the relation.
4045  *
4046  * Generally, the caller should be holding AccessExclusiveLock on the
4047  * target relation to ensure that no other backend is busy dirtying
4048  * more blocks of the relation; the effects can't be expected to last
4049  * after the lock is released.
4050  *
4051  * XXX currently it sequentially searches the buffer pool, should be
4052  * changed to more clever ways of searching. This routine is not
4053  * used in any performance-critical code paths, so it's not worth
4054  * adding additional overhead to normal paths to make it go faster.
4055  * --------------------------------------------------------------------
4056  */
4057 void
4059 {
4060  int i;
4061  BufferDesc *bufHdr;
4062 
4063  if (RelationUsesLocalBuffers(rel))
4064  {
4065  for (i = 0; i < NLocBuffer; i++)
4066  {
4067  uint32 buf_state;
4068  instr_time io_start;
4069 
4070  bufHdr = GetLocalBufferDescriptor(i);
4071  if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator) &&
4072  ((buf_state = pg_atomic_read_u32(&bufHdr->state)) &
4073  (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
4074  {
4075  ErrorContextCallback errcallback;
4076  Page localpage;
4077 
4078  localpage = (char *) LocalBufHdrGetBlock(bufHdr);
4079 
4080  /* Setup error traceback support for ereport() */
4082  errcallback.arg = (void *) bufHdr;
4083  errcallback.previous = error_context_stack;
4084  error_context_stack = &errcallback;
4085 
4086  PageSetChecksumInplace(localpage, bufHdr->tag.blockNum);
4087 
4088  io_start = pgstat_prepare_io_time();
4089 
4091  BufTagGetForkNum(&bufHdr->tag),
4092  bufHdr->tag.blockNum,
4093  localpage,
4094  false);
4095 
4098  io_start, 1);
4099 
4100  buf_state &= ~(BM_DIRTY | BM_JUST_DIRTIED);
4101  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
4102 
4104 
4105  /* Pop the error context stack */
4106  error_context_stack = errcallback.previous;
4107  }
4108  }
4109 
4110  return;
4111  }
4112 
4113  /* Make sure we can handle the pin inside the loop */
4115 
4116  for (i = 0; i < NBuffers; i++)
4117  {
4118  uint32 buf_state;
4119 
4120  bufHdr = GetBufferDescriptor(i);
4121 
4122  /*
4123  * As in DropRelationBuffers, an unlocked precheck should be safe and
4124  * saves some cycles.
4125  */
4126  if (!BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator))
4127  continue;
4128 
4130 
4131  buf_state = LockBufHdr(bufHdr);
4132  if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator) &&
4133  (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
4134  {
4135  PinBuffer_Locked(bufHdr);
4139  UnpinBuffer(bufHdr);
4140  }
4141  else
4142  UnlockBufHdr(bufHdr, buf_state);
4143  }
4144 }
4145 
4146 /* ---------------------------------------------------------------------
4147  * FlushRelationsAllBuffers
4148  *
4149  * This function flushes out of the buffer pool all the pages of all
4150  * forks of the specified smgr relations. It's equivalent to calling
4151  * FlushRelationBuffers once per relation. The relations are assumed not
4152  * to use local buffers.
4153  * --------------------------------------------------------------------
4154  */
4155 void
4157 {
4158  int i;
4159  SMgrSortArray *srels;
4160  bool use_bsearch;
4161 
4162  if (nrels == 0)
4163  return;
4164 
4165  /* fill-in array for qsort */
4166  srels = palloc(sizeof(SMgrSortArray) * nrels);
4167 
4168  for (i = 0; i < nrels; i++)
4169  {
4170  Assert(!RelFileLocatorBackendIsTemp(smgrs[i]->smgr_rlocator));
4171 
4172  srels[i].rlocator = smgrs[i]->smgr_rlocator.locator;
4173  srels[i].srel = smgrs[i];
4174  }
4175 
4176  /*
4177  * Save the bsearch overhead for low number of relations to sync. See
4178  * DropRelationsAllBuffers for details.
4179  */
4180  use_bsearch = nrels > RELS_BSEARCH_THRESHOLD;
4181 
4182  /* sort the list of SMgrRelations if necessary */
4183  if (use_bsearch)
4184  pg_qsort(srels, nrels, sizeof(SMgrSortArray), rlocator_comparator);
4185 
4186  /* Make sure we can handle the pin inside the loop */
4188 
4189  for (i = 0; i < NBuffers; i++)
4190  {
4191  SMgrSortArray *srelent = NULL;
4192  BufferDesc *bufHdr = GetBufferDescriptor(i);
4193  uint32 buf_state;
4194 
4195  /*
4196  * As in DropRelationBuffers, an unlocked precheck should be safe and
4197  * saves some cycles.
4198  */
4199 
4200  if (!use_bsearch)
4201  {
4202  int j;
4203 
4204  for (j = 0; j < nrels; j++)
4205  {
4206  if (BufTagMatchesRelFileLocator(&bufHdr->tag, &srels[j].rlocator))
4207  {
4208  srelent = &srels[j];
4209  break;
4210  }
4211  }
4212  }
4213  else
4214  {
4215  RelFileLocator rlocator;
4216 
4217  rlocator = BufTagGetRelFileLocator(&bufHdr->tag);
4218  srelent = bsearch((const void *) &(rlocator),
4219  srels, nrels, sizeof(SMgrSortArray),
4221  }
4222 
4223  /* buffer doesn't belong to any of the given relfilelocators; skip it */
4224  if (srelent == NULL)
4225  continue;
4226 
4228 
4229  buf_state = LockBufHdr(bufHdr);
4230  if (BufTagMatchesRelFileLocator(&bufHdr->tag, &srelent->rlocator) &&
4231  (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
4232  {
4233  PinBuffer_Locked(bufHdr);
4235  FlushBuffer(bufHdr, srelent->srel, IOOBJECT_RELATION, IOCONTEXT_NORMAL);
4237  UnpinBuffer(bufHdr);
4238  }
4239  else
4240  UnlockBufHdr(bufHdr, buf_state);
4241  }
4242 
4243  pfree(srels);
4244 }
4245 
4246 /* ---------------------------------------------------------------------
4247  * RelationCopyStorageUsingBuffer
4248  *
4249  * Copy fork's data using bufmgr. Same as RelationCopyStorage but instead
4250  * of using smgrread and smgrextend this will copy using bufmgr APIs.
4251  *
4252  * Refer comments atop CreateAndCopyRelationData() for details about
4253  * 'permanent' parameter.
4254  * --------------------------------------------------------------------
4255  */
4256 static void
4258  RelFileLocator dstlocator,
4259  ForkNumber forkNum, bool permanent)
4260 {
4261  Buffer srcBuf;
4262  Buffer dstBuf;
4263  Page srcPage;
4264  Page dstPage;
4265  bool use_wal;
4266  BlockNumber nblocks;
4267  BlockNumber blkno;
4269  BufferAccessStrategy bstrategy_src;
4270  BufferAccessStrategy bstrategy_dst;
4271 
4272  /*
4273  * In general, we want to write WAL whenever wal_level > 'minimal', but we
4274  * can skip it when copying any fork of an unlogged relation other than
4275  * the init fork.
4276  */
4277  use_wal = XLogIsNeeded() && (permanent || forkNum == INIT_FORKNUM);
4278 
4279  /* Get number of blocks in the source relation. */
4280  nblocks = smgrnblocks(smgropen(srclocator, InvalidBackendId),
4281  forkNum);
4282 
4283  /* Nothing to copy; just return. */
4284  if (nblocks == 0)
4285  return;
4286 
4287  /*
4288  * Bulk extend the destination relation of the same size as the source
4289  * relation before starting to copy block by block.
4290  */
4291  memset(buf.data, 0, BLCKSZ);
4292  smgrextend(smgropen(dstlocator, InvalidBackendId), forkNum, nblocks - 1,
4293  buf.data, true);
4294 
4295  /* This is a bulk operation, so use buffer access strategies. */
4296  bstrategy_src = GetAccessStrategy(BAS_BULKREAD);
4297  bstrategy_dst = GetAccessStrategy(BAS_BULKWRITE);
4298 
4299  /* Iterate over each block of the source relation file. */
4300  for (blkno = 0; blkno < nblocks; blkno++)
4301  {
4303 
4304  /* Read block from source relation. */
4305  srcBuf = ReadBufferWithoutRelcache(srclocator, forkNum, blkno,
4306  RBM_NORMAL, bstrategy_src,
4307  permanent);
4308  LockBuffer(srcBuf, BUFFER_LOCK_SHARE);
4309  srcPage = BufferGetPage(srcBuf);
4310 
4311  dstBuf = ReadBufferWithoutRelcache(dstlocator, forkNum, blkno,
4312  RBM_ZERO_AND_LOCK, bstrategy_dst,
4313  permanent);
4314  dstPage = BufferGetPage(dstBuf);
4315 
4317 
4318  /* Copy page data from the source to the destination. */
4319  memcpy(dstPage, srcPage, BLCKSZ);
4320  MarkBufferDirty(dstBuf);
4321 
4322  /* WAL-log the copied page. */
4323  if (use_wal)
4324  log_newpage_buffer(dstBuf, true);
4325 
4326  END_CRIT_SECTION();
4327 
4328  UnlockReleaseBuffer(dstBuf);
4329  UnlockReleaseBuffer(srcBuf);
4330  }
4331 
4332  FreeAccessStrategy(bstrategy_src);
4333  FreeAccessStrategy(bstrategy_dst);
4334 }
4335 
4336 /* ---------------------------------------------------------------------
4337  * CreateAndCopyRelationData
4338  *
4339  * Create destination relation storage and copy all forks from the
4340  * source relation to the destination.
4341  *
4342  * Pass permanent as true for permanent relations and false for
4343  * unlogged relations. Currently this API is not supported for
4344  * temporary relations.
4345  * --------------------------------------------------------------------
4346  */
4347 void
4349  RelFileLocator dst_rlocator, bool permanent)
4350 {
4351  RelFileLocatorBackend rlocator;
4352  char relpersistence;
4353 
4354  /* Set the relpersistence. */
4355  relpersistence = permanent ?
4356  RELPERSISTENCE_PERMANENT : RELPERSISTENCE_UNLOGGED;
4357 
4358  /*
4359  * Create and copy all forks of the relation. During create database we
4360  * have a separate cleanup mechanism which deletes complete database
4361  * directory. Therefore, each individual relation doesn't need to be
4362  * registered for cleanup.
4363  */
4364  RelationCreateStorage(dst_rlocator, relpersistence, false);
4365 
4366  /* copy main fork. */
4367  RelationCopyStorageUsingBuffer(src_rlocator, dst_rlocator, MAIN_FORKNUM,
4368  permanent);
4369 
4370  /* copy those extra forks that exist */
4371  for (ForkNumber forkNum = MAIN_FORKNUM + 1;
4372  forkNum <= MAX_FORKNUM; forkNum++)
4373  {
4374  if (smgrexists(smgropen(src_rlocator, InvalidBackendId), forkNum))
4375  {
4376  smgrcreate(smgropen(dst_rlocator, InvalidBackendId), forkNum, false);
4377 
4378  /*
4379  * WAL log creation if the relation is persistent, or this is the
4380  * init fork of an unlogged relation.
4381  */
4382  if (permanent || forkNum == INIT_FORKNUM)
4383  log_smgrcreate(&dst_rlocator, forkNum);
4384 
4385  /* Copy a fork's data, block by block. */
4386  RelationCopyStorageUsingBuffer(src_rlocator, dst_rlocator, forkNum,
4387  permanent);
4388  }
4389  }
4390 
4391  /* close source and destination smgr if exists. */
4392  rlocator.backend = InvalidBackendId;
4393 
4394  rlocator.locator = src_rlocator;
4395  smgrcloserellocator(rlocator);
4396 
4397  rlocator.locator = dst_rlocator;
4398  smgrcloserellocator(rlocator);
4399 }
4400 
4401 /* ---------------------------------------------------------------------
4402  * FlushDatabaseBuffers
4403  *
4404  * This function writes all dirty pages of a database out to disk
4405  * (or more accurately, out to kernel disk buffers), ensuring that the
4406  * kernel has an up-to-date view of the database.
4407  *
4408  * Generally, the caller should be holding an appropriate lock to ensure
4409  * no other backend is active in the target database; otherwise more
4410  * pages could get dirtied.
4411  *
4412  * Note we don't worry about flushing any pages of temporary relations.
4413  * It's assumed these wouldn't be interesting.
4414  * --------------------------------------------------------------------
4415  */
4416 void
4418 {
4419  int i;
4420  BufferDesc *bufHdr;
4421 
4422  /* Make sure we can handle the pin inside the loop */
4424 
4425  for (i = 0; i < NBuffers; i++)
4426  {
4427  uint32 buf_state;
4428 
4429  bufHdr = GetBufferDescriptor(i);
4430 
4431  /*
4432  * As in DropRelationBuffers, an unlocked precheck should be safe and
4433  * saves some cycles.
4434  */
4435  if (bufHdr->tag.dbOid != dbid)
4436  continue;
4437 
4439 
4440  buf_state = LockBufHdr(bufHdr);
4441  if (bufHdr->tag.dbOid == dbid &&
4442  (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
4443  {
4444  PinBuffer_Locked(bufHdr);
4448  UnpinBuffer(bufHdr);
4449  }
4450  else
4451  UnlockBufHdr(bufHdr, buf_state);
4452  }
4453 }
4454 
4455 /*
4456  * Flush a previously, shared or exclusively, locked and pinned buffer to the
4457  * OS.
4458  */
4459 void
4461 {
4462  BufferDesc *bufHdr;
4463 
4464  /* currently not needed, but no fundamental reason not to support */
4466 
4468 
4469  bufHdr = GetBufferDescriptor(buffer - 1);
4470 
4472 
4474 }
4475 
4476 /*
4477  * ReleaseBuffer -- release the pin on a buffer
4478  */
4479 void
4481 {
4482  if (!BufferIsValid(buffer))
4483  elog(ERROR, "bad buffer ID: %d", buffer);
4484 
4485  if (BufferIsLocal(buffer))
4487  else
4489 }
4490 
4491 /*
4492  * UnlockReleaseBuffer -- release the content lock and pin on a buffer
4493  *
4494  * This is just a shorthand for a common combination.
4495  */
4496 void
4498 {
4501 }
4502 
4503 /*
4504  * IncrBufferRefCount
4505  * Increment the pin count on a buffer that we have *already* pinned
4506  * at least once.
4507  *
4508  * This function cannot be used on a buffer we do not have pinned,
4509  * because it doesn't change the shared buffer state.
4510  */
4511 void
4513 {
4516  if (BufferIsLocal(buffer))
4517  LocalRefCount[-buffer - 1]++;
4518  else
4519  {
4520  PrivateRefCountEntry *ref;
4521 
4522  ref = GetPrivateRefCountEntry(buffer, true);
4523  Assert(ref != NULL);
4524  ref->refcount++;
4525  }
4527 }
4528 
4529 /*
4530  * MarkBufferDirtyHint
4531  *
4532  * Mark a buffer dirty for non-critical changes.
4533  *
4534  * This is essentially the same as MarkBufferDirty, except:
4535  *
4536  * 1. The caller does not write WAL; so if checksums are enabled, we may need
4537  * to write an XLOG_FPI_FOR_HINT WAL record to protect against torn pages.
4538  * 2. The caller might have only share-lock instead of exclusive-lock on the
4539  * buffer's content lock.
4540  * 3. This function does not guarantee that the buffer is always marked dirty
4541  * (due to a race condition), so it cannot be used for important changes.
4542  */
4543 void
4545 {
4546  BufferDesc *bufHdr;
4547  Page page = BufferGetPage(buffer);
4548 
4549  if (!BufferIsValid(buffer))
4550  elog(ERROR, "bad buffer ID: %d", buffer);
4551 
4552  if (BufferIsLocal(buffer))
4553  {
4555  return;
4556  }
4557 
4558  bufHdr = GetBufferDescriptor(buffer - 1);
4559 
4561  /* here, either share or exclusive lock is OK */
4563 
4564  /*
4565  * This routine might get called many times on the same page, if we are
4566  * making the first scan after commit of an xact that added/deleted many
4567  * tuples. So, be as quick as we can if the buffer is already dirty. We
4568  * do this by not acquiring spinlock if it looks like the status bits are
4569  * already set. Since we make this test unlocked, there's a chance we
4570  * might fail to notice that the flags have just been cleared, and failed
4571  * to reset them, due to memory-ordering issues. But since this function
4572  * is only intended to be used in cases where failing to write out the
4573  * data would be harmless anyway, it doesn't really matter.
4574  */
4575  if ((pg_atomic_read_u32(&bufHdr->state) & (BM_DIRTY | BM_JUST_DIRTIED)) !=
4577  {
4579  bool dirtied = false;
4580  bool delayChkptFlags = false;
4581  uint32 buf_state;
4582 
4583  /*
4584  * If we need to protect hint bit updates from torn writes, WAL-log a
4585  * full page image of the page. This full page image is only necessary
4586  * if the hint bit update is the first change to the page since the
4587  * last checkpoint.
4588  *
4589  * We don't check full_page_writes here because that logic is included
4590  * when we call XLogInsert() since the value changes dynamically.
4591  */
4592  if (XLogHintBitIsNeeded() &&
4593  (pg_atomic_read_u32(&bufHdr->state) & BM_PERMANENT))
4594  {
4595  /*
4596  * If we must not write WAL, due to a relfilelocator-specific
4597  * condition or being in recovery, don't dirty the page. We can
4598  * set the hint, just not dirty the page as a result so the hint
4599  * is lost when we evict the page or shutdown.
4600  *
4601  * See src/backend/storage/page/README for longer discussion.
4602  */
4603  if (RecoveryInProgress() ||
4605  return;
4606 
4607  /*
4608  * If the block is already dirty because we either made a change
4609  * or set a hint already, then we don't need to write a full page
4610  * image. Note that aggressive cleaning of blocks dirtied by hint
4611  * bit setting would increase the call rate. Bulk setting of hint
4612  * bits would reduce the call rate...
4613  *
4614  * We must issue the WAL record before we mark the buffer dirty.
4615  * Otherwise we might write the page before we write the WAL. That
4616  * causes a race condition, since a checkpoint might occur between
4617  * writing the WAL record and marking the buffer dirty. We solve
4618  * that with a kluge, but one that is already in use during
4619  * transaction commit to prevent race conditions. Basically, we
4620  * simply prevent the checkpoint WAL record from being written
4621  * until we have marked the buffer dirty. We don't start the
4622  * checkpoint flush until we have marked dirty, so our checkpoint
4623  * must flush the change to disk successfully or the checkpoint
4624  * never gets written, so crash recovery will fix.
4625  *
4626  * It's possible we may enter here without an xid, so it is
4627  * essential that CreateCheckPoint waits for virtual transactions
4628  * rather than full transactionids.
4629  */
4632  delayChkptFlags = true;
4633  lsn = XLogSaveBufferForHint(buffer, buffer_std);
4634  }
4635 
4636  buf_state = LockBufHdr(bufHdr);
4637 
4638  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
4639 
4640  if (!(buf_state & BM_DIRTY))
4641  {
4642  dirtied = true; /* Means "will be dirtied by this action" */
4643 
4644  /*
4645  * Set the page LSN if we wrote a backup block. We aren't supposed
4646  * to set this when only holding a share lock but as long as we
4647  * serialise it somehow we're OK. We choose to set LSN while
4648  * holding the buffer header lock, which causes any reader of an
4649  * LSN who holds only a share lock to also obtain a buffer header
4650  * lock before using PageGetLSN(), which is enforced in
4651  * BufferGetLSNAtomic().
4652  *
4653  * If checksums are enabled, you might think we should reset the
4654  * checksum here. That will happen when the page is written
4655  * sometime later in this checkpoint cycle.
4656  */
4657  if (!XLogRecPtrIsInvalid(lsn))
4658  PageSetLSN(page, lsn);
4659  }
4660 
4661  buf_state |= BM_DIRTY | BM_JUST_DIRTIED;
4662  UnlockBufHdr(bufHdr, buf_state);
4663 
4664  if (delayChkptFlags)
4666 
4667  if (dirtied)
4668  {
4669  VacuumPageDirty++;
4671  if (VacuumCostActive)
4673  }
4674  }
4675 }
4676 
4677 /*
4678  * Release buffer content locks for shared buffers.
4679  *
4680  * Used to clean up after errors.
4681  *
4682  * Currently, we can expect that lwlock.c's LWLockReleaseAll() took care
4683  * of releasing buffer content locks per se; the only thing we need to deal
4684  * with here is clearing any PIN_COUNT request that was in progress.
4685  */
4686 void
4688 {
4690 
4691  if (buf)
4692  {
4693  uint32 buf_state;
4694 
4695  buf_state = LockBufHdr(buf);
4696 
4697  /*
4698  * Don't complain if flag bit not set; it could have been reset but we
4699  * got a cancel/die interrupt before getting the signal.
4700  */
4701  if ((buf_state & BM_PIN_COUNT_WAITER) != 0 &&
4702  buf->wait_backend_pgprocno == MyProc->pgprocno)
4703  buf_state &= ~BM_PIN_COUNT_WAITER;
4704 
4705  UnlockBufHdr(buf, buf_state);
4706 
4707  PinCountWaitBuf = NULL;
4708  }
4709 }
4710 
4711 /*
4712  * Acquire or release the content_lock for the buffer.
4713  */
4714 void
4716 {
4717  BufferDesc *buf;
4718 
4720  if (BufferIsLocal(buffer))
4721  return; /* local buffers need no lock */
4722 
4724 
4725  if (mode == BUFFER_LOCK_UNLOCK)
4727  else if (mode == BUFFER_LOCK_SHARE)
4729  else if (mode == BUFFER_LOCK_EXCLUSIVE)
4731  else
4732  elog(ERROR, "unrecognized buffer lock mode: %d", mode);
4733 }
4734 
4735 /*
4736  * Acquire the content_lock for the buffer, but only if we don't have to wait.
4737  *
4738  * This assumes the caller wants BUFFER_LOCK_EXCLUSIVE mode.
4739  */
4740 bool
4742 {
4743  BufferDesc *buf;
4744 
4746  if (BufferIsLocal(buffer))
4747  return true; /* act as though we got it */
4748 
4750 
4752  LW_EXCLUSIVE);
4753 }
4754 
4755 /*
4756  * Verify that this backend is pinning the buffer exactly once.
4757  *
4758  * NOTE: Like in BufferIsPinned(), what we check here is that *this* backend
4759  * holds a pin on the buffer. We do not care whether some other backend does.
4760  */
4761 void
4763 {
4764  if (BufferIsLocal(buffer))
4765  {
4766  if (LocalRefCount[-buffer - 1] != 1)
4767  elog(ERROR, "incorrect local pin count: %d",
4768  LocalRefCount[-buffer - 1]);
4769  }
4770  else
4771  {
4772  if (GetPrivateRefCount(buffer) != 1)
4773  elog(ERROR, "incorrect local pin count: %d",
4775  }
4776 }
4777 
4778 /*
4779  * LockBufferForCleanup - lock a buffer in preparation for deleting items
4780  *
4781  * Items may be deleted from a disk page only when the caller (a) holds an
4782  * exclusive lock on the buffer and (b) has observed that no other backend
4783  * holds a pin on the buffer. If there is a pin, then the other backend
4784  * might have a pointer into the buffer (for example, a heapscan reference
4785  * to an item --- see README for more details). It's OK if a pin is added
4786  * after the cleanup starts, however; the newly-arrived backend will be
4787  * unable to look at the page until we release the exclusive lock.
4788  *
4789  * To implement this protocol, a would-be deleter must pin the buffer and
4790  * then call LockBufferForCleanup(). LockBufferForCleanup() is similar to
4791  * LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE), except that it loops until
4792  * it has successfully observed pin count = 1.
4793  */
4794 void
4796 {
4797  BufferDesc *bufHdr;
4798  TimestampTz waitStart = 0;
4799  bool waiting = false;
4800  bool logged_recovery_conflict = false;
4801 
4803  Assert(PinCountWaitBuf == NULL);
4804 
4806 
4807  /* Nobody else to wait for */
4808  if (BufferIsLocal(buffer))
4809  return;
4810 
4811  bufHdr = GetBufferDescriptor(buffer - 1);
4812 
4813  for (;;)
4814  {
4815  uint32 buf_state;
4816 
4817  /* Try to acquire lock */
4819  buf_state = LockBufHdr(bufHdr);
4820 
4821  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
4822  if (BUF_STATE_GET_REFCOUNT(buf_state) == 1)
4823  {
4824  /* Successfully acquired exclusive lock with pincount 1 */
4825  UnlockBufHdr(bufHdr, buf_state);
4826 
4827  /*
4828  * Emit the log message if recovery conflict on buffer pin was
4829  * resolved but the startup process waited longer than
4830  * deadlock_timeout for it.
4831  */
4832  if (logged_recovery_conflict)
4834  waitStart, GetCurrentTimestamp(),
4835  NULL, false);
4836 
4837  if (waiting)
4838  {
4839  /* reset ps display to remove the suffix if we added one */
4841  waiting = false;
4842  }
4843  return;
4844  }
4845  /* Failed, so mark myself as waiting for pincount 1 */
4846  if (buf_state & BM_PIN_COUNT_WAITER)
4847  {
4848  UnlockBufHdr(bufHdr, buf_state);
4850  elog(ERROR, "multiple backends attempting to wait for pincount 1");
4851  }
4853  PinCountWaitBuf = bufHdr;
4854  buf_state |= BM_PIN_COUNT_WAITER;
4855  UnlockBufHdr(bufHdr, buf_state);
4857 
4858  /* Wait to be signaled by UnpinBuffer() */
4859  if (InHotStandby)
4860  {
4861  if (!waiting)
4862  {
4863  /* adjust the process title to indicate that it's waiting */
4864  set_ps_display_suffix("waiting");
4865  waiting = true;
4866  }
4867 
4868  /*
4869  * Emit the log message if the startup process is waiting longer
4870  * than deadlock_timeout for recovery conflict on buffer pin.
4871  *
4872  * Skip this if first time through because the startup process has
4873  * not started waiting yet in this case. So, the wait start
4874  * timestamp is set after this logic.
4875  */
4876  if (waitStart != 0 && !logged_recovery_conflict)
4877  {
4879 
4880  if (TimestampDifferenceExceeds(waitStart, now,
4881  DeadlockTimeout))
4882  {
4884  waitStart, now, NULL, true);
4885  logged_recovery_conflict = true;
4886  }
4887  }
4888 
4889  /*
4890  * Set the wait start timestamp if logging is enabled and first
4891  * time through.
4892  */
4893  if (log_recovery_conflict_waits && waitStart == 0)
4894  waitStart = GetCurrentTimestamp();
4895 
4896  /* Publish the bufid that Startup process waits on */
4898  /* Set alarm and then wait to be signaled by UnpinBuffer() */
4900  /* Reset the published bufid */
4902  }
4903  else
4905 
4906  /*
4907  * Remove flag marking us as waiter. Normally this will not be set
4908  * anymore, but ProcWaitForSignal() can return for other signals as
4909  * well. We take care to only reset the flag if we're the waiter, as
4910  * theoretically another backend could have started waiting. That's
4911  * impossible with the current usages due to table level locking, but
4912  * better be safe.
4913  */
4914  buf_state = LockBufHdr(bufHdr);
4915  if ((buf_state & BM_PIN_COUNT_WAITER) != 0 &&
4917  buf_state &= ~BM_PIN_COUNT_WAITER;
4918  UnlockBufHdr(bufHdr, buf_state);
4919 
4920  PinCountWaitBuf = NULL;
4921  /* Loop back and try again */
4922  }
4923 }
4924 
4925 /*
4926  * Check called from RecoveryConflictInterrupt handler when Startup
4927  * process requests cancellation of all pin holders that are blocking it.
4928  */
4929 bool
4931 {
4932  int bufid = GetStartupBufferPinWaitBufId();
4933 
4934  /*
4935  * If we get woken slowly then it's possible that the Startup process was
4936  * already woken by other backends before we got here. Also possible that
4937  * we get here by multiple interrupts or interrupts at inappropriate
4938  * times, so make sure we do nothing if the bufid is not set.
4939  */
4940  if (bufid < 0)
4941  return false;
4942 
4943  if (GetPrivateRefCount(bufid + 1) > 0)
4944  return true;
4945 
4946  return false;
4947 }
4948 
4949 /*
4950  * ConditionalLockBufferForCleanup - as above, but don't wait to get the lock
4951  *
4952  * We won't loop, but just check once to see if the pin count is OK. If
4953  * not, return false with no lock held.
4954  */
4955 bool
4957 {
4958  BufferDesc *bufHdr;
4959  uint32 buf_state,
4960  refcount;
4961 
4963 
4964  if (BufferIsLocal(buffer))
4965  {
4966  refcount = LocalRefCount[-buffer - 1];
4967  /* There should be exactly one pin */
4968  Assert(refcount > 0);
4969  if (refcount != 1)
4970  return false;
4971  /* Nobody else to wait for */
4972  return true;
4973  }
4974 
4975  /* There should be exactly one local pin */
4977  Assert(refcount);
4978  if (refcount != 1)
4979  return false;
4980 
4981  /* Try to acquire lock */
4983  return false;
4984 
4985  bufHdr = GetBufferDescriptor(buffer - 1);
4986  buf_state = LockBufHdr(bufHdr);
4987  refcount = BUF_STATE_GET_REFCOUNT(buf_state);
4988 
4989  Assert(refcount > 0);
4990  if (refcount == 1)
4991  {
4992  /* Successfully acquired exclusive lock with pincount 1 */
4993  UnlockBufHdr(bufHdr, buf_state);
4994  return true;
4995  }
4996 
4997  /* Failed, so release the lock */
4998  UnlockBufHdr(bufHdr, buf_state);
5000  return false;
5001 }
5002 
5003 /*
5004  * IsBufferCleanupOK - as above, but we already have the lock
5005  *
5006  * Check whether it's OK to perform cleanup on a buffer we've already
5007  * locked. If we observe that the pin count is 1, our exclusive lock
5008  * happens to be a cleanup lock, and we can proceed with anything that
5009  * would have been allowable had we sought a cleanup lock originally.
5010  */
5011 bool
5013 {
5014  BufferDesc *bufHdr;
5015  uint32 buf_state;
5016 
5018 
5019  if (BufferIsLocal(buffer))
5020  {
5021  /* There should be exactly one pin */
5022  if (LocalRefCount[-buffer - 1] != 1)
5023  return false;
5024  /* Nobody else to wait for */
5025  return true;
5026  }
5027 
5028  /* There should be exactly one local pin */
5029  if (GetPrivateRefCount(buffer) != 1)
5030  return false;
5031 
5032  bufHdr = GetBufferDescriptor(buffer - 1);
5033 
5034  /* caller must hold exclusive lock on buffer */
5036  LW_EXCLUSIVE));
5037 
5038  buf_state = LockBufHdr(bufHdr);
5039 
5040  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
5041  if (BUF_STATE_GET_REFCOUNT(buf_state) == 1)
5042  {
5043  /* pincount is OK. */
5044  UnlockBufHdr(bufHdr, buf_state);
5045  return true;
5046  }
5047 
5048  UnlockBufHdr(bufHdr, buf_state);
5049  return false;
5050 }
5051 
5052 
5053 /*
5054  * Functions for buffer I/O handling
5055  *
5056  * Note: We assume that nested buffer I/O never occurs.
5057  * i.e at most one BM_IO_IN_PROGRESS bit is set per proc.
5058  *
5059  * Also note that these are used only for shared buffers, not local ones.
5060  */
5061 
5062 /*
5063  * WaitIO -- Block until the IO_IN_PROGRESS flag on 'buf' is cleared.
5064  */
5065 static void
5067 {
5069 
5071  for (;;)
5072  {
5073  uint32 buf_state;
5074 
5075  /*
5076  * It may not be necessary to acquire the spinlock to check the flag
5077  * here, but since this test is essential for correctness, we'd better
5078  * play it safe.
5079  */
5080  buf_state = LockBufHdr(buf);
5081  UnlockBufHdr(buf, buf_state);
5082 
5083  if (!(buf_state & BM_IO_IN_PROGRESS))
5084  break;
5086  }
5088 }
5089 
5090 /*
5091  * StartBufferIO: begin I/O on this buffer
5092  * (Assumptions)
5093  * My process is executing no IO
5094  * The buffer is Pinned
5095  *
5096  * In some scenarios there are race conditions in which multiple backends
5097  * could attempt the same I/O operation concurrently. If someone else
5098  * has already started I/O on this buffer then we will block on the
5099  * I/O condition variable until he's done.
5100  *
5101  * Input operations are only attempted on buffers that are not BM_VALID,
5102  * and output operations only on buffers that are BM_VALID and BM_DIRTY,
5103  * so we can always tell if the work is already done.
5104  *
5105  * Returns true if we successfully marked the buffer as I/O busy,
5106  * false if someone else already did the work.
5107  */
5108 static bool
5109 StartBufferIO(BufferDesc *buf, bool forInput)
5110 {
5111  uint32 buf_state;
5112 
5114 
5115  for (;;)
5116  {
5117  buf_state = LockBufHdr(buf);
5118 
5119  if (!(buf_state & BM_IO_IN_PROGRESS))
5120  break;
5121  UnlockBufHdr(buf, buf_state);
5122  WaitIO(buf);
5123  }
5124 
5125  /* Once we get here, there is definitely no I/O active on this buffer */
5126 
5127  if (forInput ? (buf_state & BM_VALID) : !(buf_state & BM_DIRTY))
5128  {
5129  /* someone else already did the I/O */
5130  UnlockBufHdr(buf, buf_state);
5131  return false;
5132  }
5133 
5134  buf_state |= BM_IO_IN_PROGRESS;
5135  UnlockBufHdr(buf, buf_state);
5136 
5139 
5140  return true;
5141 }
5142 
5143 /*
5144  * TerminateBufferIO: release a buffer we were doing I/O on
5145  * (Assumptions)
5146  * My process is executing IO for the buffer
5147  * BM_IO_IN_PROGRESS bit is set for the buffer
5148  * The buffer is Pinned
5149  *
5150  * If clear_dirty is true and BM_JUST_DIRTIED is not set, we clear the
5151  * buffer's BM_DIRTY flag. This is appropriate when terminating a
5152  * successful write. The check on BM_JUST_DIRTIED is necessary to avoid
5153  * marking the buffer clean if it was re-dirtied while we were writing.
5154  *
5155  * set_flag_bits gets ORed into the buffer's flags. It must include
5156  * BM_IO_ERROR in a failure case. For successful completion it could
5157  * be 0, or BM_VALID if we just finished reading in the page.
5158  */
5159 static void
5160 TerminateBufferIO(BufferDesc *buf, bool clear_dirty, uint32 set_flag_bits)
5161 {
5162  uint32 buf_state;
5163 
5164  buf_state = LockBufHdr(buf);
5165 
5166  Assert(buf_state & BM_IO_IN_PROGRESS);
5167 
5168  buf_state &= ~(BM_IO_IN_PROGRESS | BM_IO_ERROR);
5169  if (clear_dirty && !(buf_state & BM_JUST_DIRTIED))
5170  buf_state &= ~(BM_DIRTY | BM_CHECKPOINT_NEEDED);
5171 
5172  buf_state |= set_flag_bits;
5173  UnlockBufHdr(buf, buf_state);
5174 
5177 
5179 }
5180 
5181 /*
5182  * AbortBufferIO: Clean up active buffer I/O after an error.
5183  *
5184  * All LWLocks we might have held have been released,
5185  * but we haven't yet released buffer pins, so the buffer is still pinned.
5186  *
5187  * If I/O was in progress, we always set BM_IO_ERROR, even though it's
5188  * possible the error condition wasn't related to the I/O.
5189  */
5190 void
5192 {
5193  BufferDesc *buf_hdr = GetBufferDescriptor(buffer - 1);
5194  uint32 buf_state;
5195 
5196  buf_state = LockBufHdr(buf_hdr);
5197  Assert(buf_state & (BM_IO_IN_PROGRESS | BM_TAG_VALID));
5198 
5199  if (!(buf_state & BM_VALID))
5200  {
5201  Assert(!(buf_state & BM_DIRTY));
5202  UnlockBufHdr(buf_hdr, buf_state);
5203  }
5204  else
5205  {
5206  Assert(buf_state & BM_DIRTY);
5207  UnlockBufHdr(buf_hdr, buf_state);
5208 
5209  /* Issue notice if this is not the first failure... */
5210  if (buf_state & BM_IO_ERROR)
5211  {
5212  /* Buffer is pinned, so we can read tag without spinlock */
5213  char *path;
5214 
5215  path = relpathperm(BufTagGetRelFileLocator(&buf_hdr->tag),
5216  BufTagGetForkNum(&buf_hdr->tag));
5217  ereport(WARNING,
5218  (errcode(ERRCODE_IO_ERROR),
5219  errmsg("could not write block %u of %s",
5220  buf_hdr->tag.blockNum, path),
5221  errdetail("Multiple failures --- write error might be permanent.")));
5222  pfree(path);
5223  }
5224  }
5225 
5226  TerminateBufferIO(buf_hdr, false, BM_IO_ERROR);
5227 }
5228 
5229 /*
5230  * Error context callback for errors occurring during shared buffer writes.
5231  */
5232 static void
5234 {
5235  BufferDesc *bufHdr = (BufferDesc *) arg;
5236 
5237  /* Buffer is pinned, so we can read the tag without locking the spinlock */
5238  if (bufHdr != NULL)
5239  {
5240  char *path = relpathperm(BufTagGetRelFileLocator(&bufHdr->tag),
5241  BufTagGetForkNum(&bufHdr->tag));
5242 
5243  errcontext("writing block %u of relation %s",
5244  bufHdr->tag.blockNum, path);
5245  pfree(path);
5246  }
5247 }
5248 
5249 /*
5250  * Error context callback for errors occurring during local buffer writes.
5251  */
5252 static void
5254 {
5255  BufferDesc *bufHdr = (BufferDesc *) arg;
5256 
5257  if (bufHdr != NULL)
5258  {
5259  char *path = relpathbackend(BufTagGetRelFileLocator(&bufHdr->tag),
5260  MyBackendId,
5261  BufTagGetForkNum(&bufHdr->tag));
5262 
5263  errcontext("writing block %u of relation %s",
5264  bufHdr->tag.blockNum, path);
5265  pfree(path);
5266  }
5267 }
5268 
5269 /*
5270  * RelFileLocator qsort/bsearch comparator; see RelFileLocatorEquals.
5271  */
5272 static int
5273 rlocator_comparator(const void *p1, const void *p2)
5274 {
5275  RelFileLocator n1 = *(const RelFileLocator *) p1;
5276  RelFileLocator n2 = *(const RelFileLocator *) p2;
5277 
5278  if (n1.relNumber < n2.relNumber)
5279  return -1;
5280  else if (n1.relNumber > n2.relNumber)
5281  return 1;
5282 
5283  if (n1.dbOid < n2.dbOid)
5284  return -1;
5285  else if (n1.dbOid > n2.dbOid)
5286  return 1;
5287 
5288  if (n1.spcOid < n2.spcOid)
5289  return -1;
5290  else if (n1.spcOid > n2.spcOid)
5291  return 1;
5292  else
5293  return 0;
5294 }
5295 
5296 /*
5297  * Lock buffer header - set BM_LOCKED in buffer state.
5298  */
5299 uint32
5301 {
5302  SpinDelayStatus delayStatus;
5303  uint32 old_buf_state;
5304 
5306 
5307  init_local_spin_delay(&delayStatus);
5308 
5309  while (true)
5310  {
5311  /* set BM_LOCKED flag */
5312  old_buf_state = pg_atomic_fetch_or_u32(&desc->state, BM_LOCKED);
5313  /* if it wasn't set before we're OK */
5314  if (!(old_buf_state & BM_LOCKED))
5315  break;
5316  perform_spin_delay(&delayStatus);
5317  }
5318  finish_spin_delay(&delayStatus);
5319  return old_buf_state | BM_LOCKED;
5320 }
5321 
5322 /*
5323  * Wait until the BM_LOCKED flag isn't set anymore and return the buffer's
5324  * state at that point.
5325  *
5326  * Obviously the buffer could be locked by the time the value is returned, so
5327  * this is primarily useful in CAS style loops.
5328  */
5329 static uint32
5331 {
5332  SpinDelayStatus delayStatus;
5333  uint32 buf_state;
5334 
5335  init_local_spin_delay(&delayStatus);
5336 
5337  buf_state = pg_atomic_read_u32(&buf->state);
5338 
5339  while (buf_state & BM_LOCKED)
5340  {
5341  perform_spin_delay(&delayStatus);
5342  buf_state = pg_atomic_read_u32(&buf->state);
5343  }
5344 
5345  finish_spin_delay(&delayStatus);
5346 
5347  return buf_state;
5348 }
5349 
5350 /*
5351  * BufferTag comparator.
5352  */
5353 static inline int
5355 {
5356  int ret;
5357  RelFileLocator rlocatora;
5358  RelFileLocator rlocatorb;
5359 
5360  rlocatora = BufTagGetRelFileLocator(ba);
5361  rlocatorb = BufTagGetRelFileLocator(bb);
5362 
5363  ret = rlocator_comparator(&rlocatora, &rlocatorb);
5364 
5365  if (ret != 0)
5366  return ret;
5367 
5368  if (BufTagGetForkNum(ba) < BufTagGetForkNum(bb))
5369  return -1;
5370  if (BufTagGetForkNum(ba) > BufTagGetForkNum(bb))
5371  return 1;
5372 
5373  if (ba->blockNum < bb->blockNum)
5374  return -1;
5375  if (ba->blockNum > bb->blockNum)
5376  return 1;
5377 
5378  return 0;
5379 }
5380 
5381 /*
5382  * Comparator determining the writeout order in a checkpoint.
5383  *
5384  * It is important that tablespaces are compared first, the logic balancing
5385  * writes between tablespaces relies on it.
5386  */
5387 static inline int
5389 {
5390  /* compare tablespace */
5391  if (a->tsId < b->tsId)
5392  return -1;
5393  else if (a->tsId > b->tsId)
5394  return 1;
5395  /* compare relation */
5396  if (a->relNumber < b->relNumber)
5397  return -1;
5398  else if (a->relNumber > b->relNumber)
5399  return 1;
5400  /* compare fork */
5401  else if (a->forkNum < b->forkNum)
5402  return -1;
5403  else if (a->forkNum > b->forkNum)
5404  return 1;
5405  /* compare block number */
5406  else if (a->blockNum < b->blockNum)
5407  return -1;
5408  else if (a->blockNum > b->blockNum)
5409  return 1;
5410  /* equal page IDs are unlikely, but not impossible */
5411  return 0;
5412 }
5413 
5414 /*
5415  * Comparator for a Min-Heap over the per-tablespace checkpoint completion
5416  * progress.
5417  */
5418 static int
5420 {
5421  CkptTsStatus *sa = (CkptTsStatus *) a;
5422  CkptTsStatus *sb = (CkptTsStatus *) b;
5423 
5424  /* we want a min-heap, so return 1 for the a < b */
5425  if (sa->progress < sb->progress)
5426  return 1;
5427  else if (sa->progress == sb->progress)
5428  return 0;
5429  else
5430  return -1;
5431 }
5432 
5433 /*
5434  * Initialize a writeback context, discarding potential previous state.
5435  *
5436  * *max_pending is a pointer instead of an immediate value, so the coalesce
5437  * limits can easily changed by the GUC mechanism, and so calling code does
5438  * not have to check the current configuration. A value of 0 means that no
5439  * writeback control will be performed.
5440  */
5441 void
5442 WritebackContextInit(WritebackContext *context, int *max_pending)
5443 {
5444  Assert(*max_pending <= WRITEBACK_MAX_PENDING_FLUSHES);
5445 
5446  context->max_pending = max_pending;
5447  context->nr_pending = 0;
5448 }
5449 
5450 /*
5451  * Add buffer to list of pending writeback requests.
5452  */
5453 void
5455  BufferTag *tag)
5456 {
5457  PendingWriteback *pending;
5458 
5460  return;
5461 
5462  /*
5463  * Add buffer to the pending writeback array, unless writeback control is
5464  * disabled.
5465  */
5466  if (*wb_context->max_pending > 0)
5467  {
5469 
5470  pending = &wb_context->pending_writebacks[wb_context->nr_pending++];
5471 
5472  pending->tag = *tag;
5473  }
5474 
5475  /*
5476  * Perform pending flushes if the writeback limit is exceeded. This
5477  * includes the case where previously an item has been added, but control
5478  * is now disabled.
5479  */
5480  if (wb_context->nr_pending >= *wb_context->max_pending)
5481  IssuePendingWritebacks(wb_context, io_context);
5482 }
5483 
5484 #define ST_SORT sort_pending_writebacks
5485 #define ST_ELEMENT_TYPE PendingWriteback
5486 #define ST_COMPARE(a, b) buffertag_comparator(&a->tag, &b->tag)
5487 #define ST_SCOPE static
5488 #define ST_DEFINE
5489 #include <lib/sort_template.h>
5490 
5491 /*
5492  * Issue all pending writeback requests, previously scheduled with
5493  * ScheduleBufferTagForWriteback, to the OS.
5494  *
5495  * Because this is only used to improve the OSs IO scheduling we try to never
5496  * error out - it's just a hint.
5497  */
5498 void
5500 {
5501  instr_time io_start;
5502  int i;
5503 
5504  if (wb_context->nr_pending == 0)
5505  return;
5506 
5507  /*
5508  * Executing the writes in-order can make them a lot faster, and allows to
5509  * merge writeback requests to consecutive blocks into larger writebacks.
5510  */
5511  sort_pending_writebacks(wb_context->pending_writebacks,
5512  wb_context->nr_pending);
5513 
5514  io_start = pgstat_prepare_io_time();
5515 
5516  /*
5517  * Coalesce neighbouring writes, but nothing else. For that we iterate
5518  * through the, now sorted, array of pending flushes, and look forward to
5519  * find all neighbouring (or identical) writes.
5520  */
5521  for (i = 0; i < wb_context->nr_pending; i++)
5522  {
5525  SMgrRelation reln;
5526  int ahead;
5527  BufferTag tag;
5528  RelFileLocator currlocator;
5529  Size nblocks = 1;
5530 
5531  cur = &wb_context->pending_writebacks[i];
5532  tag = cur->tag;
5533  currlocator = BufTagGetRelFileLocator(&tag);
5534 
5535  /*
5536  * Peek ahead, into following writeback requests, to see if they can
5537  * be combined with the current one.
5538  */
5539  for (ahead = 0; i + ahead + 1 < wb_context->nr_pending; ahead++)
5540  {
5541 
5542  next = &wb_context->pending_writebacks[i + ahead + 1];
5543 
5544  /* different file, stop */
5545  if (!RelFileLocatorEquals(currlocator,
5546  BufTagGetRelFileLocator(&next->tag)) ||
5547  BufTagGetForkNum(&cur->tag) != BufTagGetForkNum(&next->tag))
5548  break;
5549 
5550  /* ok, block queued twice, skip */
5551  if (cur->tag.blockNum == next->tag.blockNum)
5552  continue;
5553 
5554  /* only merge consecutive writes */
5555  if (cur->tag.blockNum + 1 != next->tag.blockNum)
5556  break;
5557 
5558  nblocks++;
5559  cur = next;
5560  }
5561 
5562  i += ahead;
5563 
5564  /* and finally tell the kernel to write the data to storage */
5565  reln = smgropen(currlocator, InvalidBackendId);
5566  smgrwriteback(reln, BufTagGetForkNum(&tag), tag.blockNum, nblocks);
5567  }
5568 
5569  /*
5570  * Assume that writeback requests are only issued for buffers containing
5571  * blocks of permanent relations.
5572  */
5574  IOOP_WRITEBACK, io_start, wb_context->nr_pending);
5575 
5576  wb_context->nr_pending = 0;
5577 }
5578 
5579 
5580 /*
5581  * Implement slower/larger portions of TestForOldSnapshot
5582  *
5583  * Smaller/faster portions are put inline, but the entire set of logic is too
5584  * big for that.
5585  */
5586 void
5588 {
5589  if (RelationAllowsEarlyPruning(relation)
5590  && (snapshot)->whenTaken < GetOldSnapshotThresholdTimestamp())
5591  ereport(ERROR,
5592  (errcode(ERRCODE_SNAPSHOT_TOO_OLD),
5593  errmsg("snapshot too old")));
5594 }
static bool pg_atomic_compare_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval)
Definition: atomics.h:306
static uint32 pg_atomic_fetch_or_u32(volatile pg_atomic_uint32 *ptr, uint32 or_)
Definition: atomics.h:367
static void pg_atomic_unlocked_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:272
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:236
bool TimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, int msec)
Definition: timestamp.c:1719
TimestampTz GetCurrentTimestamp(void)
Definition: timestamp.c:1583
Datum now(PG_FUNCTION_ARGS)
Definition: timestamp.c:1547
int BackendId
Definition: backendid.h:21
#define InvalidBackendId
Definition: backendid.h:23
int BgWriterDelay
Definition: bgwriter.c:61
void binaryheap_build(binaryheap *heap)
Definition: binaryheap.c:125
void binaryheap_add_unordered(binaryheap *heap, Datum d)
Definition: binaryheap.c:109
binaryheap * binaryheap_allocate(int capacity, binaryheap_comparator compare, void *arg)
Definition: binaryheap.c:32
Datum binaryheap_remove_first(binaryheap *heap)
Definition: binaryheap.c:173
void binaryheap_free(binaryheap *heap)
Definition: binaryheap.c:68
void binaryheap_replace_first(binaryheap *heap, Datum d)
Definition: binaryheap.c:207
Datum binaryheap_first(binaryheap *heap)
Definition: binaryheap.c:158
#define binaryheap_empty(h)
Definition: binaryheap.h:52
uint32 BlockNumber
Definition: block.h:31
#define InvalidBlockNumber
Definition: block.h:33
static bool BlockNumberIsValid(BlockNumber blockNumber)
Definition: block.h:71
#define MaxBlockNumber
Definition: block.h:35
static int32 next
Definition: blutils.c:219
int Buffer
Definition: buf.h:23
#define InvalidBuffer
Definition: buf.h:25
#define BufferIsLocal(buffer)
Definition: buf.h:37
CkptSortItem * CkptBufferIds
Definition: buf_init.c:25
WritebackContext BackendWritebackContext
Definition: buf_init.c:24
#define BM_MAX_USAGE_COUNT
Definition: buf_internals.h:77
static void InitBufferTag(BufferTag *tag, const RelFileLocator *rlocator, ForkNumber forkNum, BlockNumber blockNum)
#define BM_TAG_VALID
Definition: buf_internals.h:62
#define BM_PERMANENT
Definition: buf_internals.h:68
#define BUF_USAGECOUNT_MASK
Definition: buf_internals.h:44
static ForkNumber BufTagGetForkNum(const BufferTag *tag)
#define BUF_REFCOUNT_ONE
Definition: buf_internals.h:42
static BufferDesc * GetLocalBufferDescriptor(uint32 id)
static BufferDesc * GetBufferDescriptor(uint32 id)
static void UnlockBufHdr(BufferDesc *desc, uint32 buf_state)
static bool BufferTagsEqual(const BufferTag *tag1, const BufferTag *tag2)
static RelFileNumber BufTagGetRelNumber(const BufferTag *tag)
static bool BufTagMatchesRelFileLocator(const BufferTag *tag, const RelFileLocator *rlocator)
#define BUF_FLAG_MASK
Definition: buf_internals.h:47
static LWLock * BufMappingPartitionLock(uint32 hashcode)
#define BM_PIN_COUNT_WAITER
Definition: buf_internals.h:66
#define BM_DIRTY
Definition: buf_internals.h:60
#define BM_LOCKED
Definition: buf_internals.h:59
#define BM_JUST_DIRTIED
Definition: buf_internals.h:65
#define BUF_STATE_GET_USAGECOUNT(state)
Definition: buf_internals.h:51
#define BM_IO_IN_PROGRESS
Definition: buf_internals.h:63
static void ClearBufferTag(BufferTag *tag)
static ConditionVariable * BufferDescriptorGetIOCV(const BufferDesc *bdesc)
#define BUF_USAGECOUNT_ONE
Definition: buf_internals.h:45
#define BUF_STATE_GET_REFCOUNT(state)
Definition: buf_internals.h:50
static RelFileLocator BufTagGetRelFileLocator(const BufferTag *tag)
#define BM_VALID
Definition: buf_internals.h:61
#define BM_IO_ERROR
Definition: buf_internals.h:64
static Buffer BufferDescriptorGetBuffer(const BufferDesc *bdesc)
#define BM_CHECKPOINT_NEEDED
Definition: buf_internals.h:67
static LWLock * BufferDescriptorGetContentLock(const BufferDesc *bdesc)
void BufTableDelete(BufferTag *tagPtr, uint32 hashcode)
Definition: buf_table.c:149
int BufTableLookup(BufferTag *tagPtr, uint32 hashcode)
Definition: buf_table.c:91
uint32 BufTableHashCode(BufferTag *tagPtr)
Definition: buf_table.c:79
int BufTableInsert(BufferTag *tagPtr, uint32 hashcode, int buf_id)
Definition: buf_table.c:119
bool track_io_timing
Definition: bufmgr.c:138
void CheckBufferIsPinnedOnce(Buffer buffer)
Definition: bufmgr.c:4762
void FlushRelationsAllBuffers(SMgrRelation *smgrs, int nrels)
Definition: bufmgr.c:4156
void IncrBufferRefCount(Buffer buffer)
Definition: bufmgr.c:4512
void DropDatabaseBuffers(Oid dbid)
Definition: bufmgr.c:3952
static int ckpt_buforder_comparator(const CkptSortItem *a, const CkptSortItem *b)
Definition: bufmgr.c:5388
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:3290
static PrivateRefCountEntry * NewPrivateRefCountEntry(Buffer buffer)
Definition: bufmgr.c:280
void DropRelationBuffers(SMgrRelation smgr_reln, ForkNumber *forkNum, int nforks, BlockNumber *firstDelBlock)
Definition: bufmgr.c:3597
Buffer ReleaseAndReadBuffer(Buffer buffer, Relation relation, BlockNumber blockNum)
Definition: bufmgr.c:2174
PrefetchBufferResult PrefetchBuffer(Relation reln, ForkNumber forkNum, BlockNumber blockNum)
Definition: bufmgr.c:601
static uint32 PrivateRefCountClock
Definition: bufmgr.c:199
static void FlushBuffer(BufferDesc *buf, SMgrRelation reln, IOObject io_object, IOContext io_context)
Definition: bufmgr.c:3350
static BlockNumber ExtendBufferedRelCommon(ExtendBufferedWhat eb, ForkNumber fork, BufferAccessStrategy strategy, uint32 flags, uint32 extend_by, BlockNumber extend_upto, Buffer *buffers, uint32 *extended_by)
Definition: bufmgr.c:1782
static bool PinBuffer(BufferDesc *buf, BufferAccessStrategy strategy)
Definition: bufmgr.c:2231
bool zero_damaged_pages
Definition: bufmgr.c:135
#define BUF_DROP_FULL_SCAN_THRESHOLD
Definition: bufmgr.c:82
static BlockNumber ExtendBufferedRelShared(ExtendBufferedWhat eb, ForkNumber fork, BufferAccessStrategy strategy, uint32 flags, uint32 extend_by, BlockNumber extend_upto, Buffer *buffers, uint32 *extended_by)
Definition: bufmgr.c:1826
static void PinBuffer_Locked(BufferDesc *buf)
Definition: bufmgr.c:2336
static uint32 WaitBufHdrUnlocked(BufferDesc *buf)
Definition: bufmgr.c:5330
static int buffertag_comparator(const BufferTag *ba, const BufferTag *bb)
Definition: bufmgr.c:5354
#define LocalBufHdrGetBlock(bufHdr)
Definition: bufmgr.c:67
bool IsBufferCleanupOK(Buffer buffer)
Definition: bufmgr.c:5012
#define BufferGetLSN(bufHdr)
Definition: bufmgr.c:64
static BufferDesc * BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, BlockNumber blockNum, BufferAccessStrategy strategy, bool *foundPtr, IOContext io_context)
Definition: bufmgr.c:1220
void AtEOXact_Buffers(bool isCommit)
Definition: bufmgr.c:3132
static void CheckForBufferLeaks(void)
Definition: bufmgr.c:3192
void CreateAndCopyRelationData(RelFileLocator src_rlocator, RelFileLocator dst_rlocator, bool permanent)
Definition: bufmgr.c:4348
Buffer ExtendBufferedRel(ExtendBufferedWhat eb, ForkNumber forkNum, BufferAccessStrategy strategy, uint32 flags)
Definition: bufmgr.c:812
void DropRelationsAllBuffers(SMgrRelation *smgr_reln, int nlocators)
De